diff --git a/pbs-client/src/backup_reader.rs b/pbs-client/src/backup_reader.rs index 4706abc7..88cba599 100644 --- a/pbs-client/src/backup_reader.rs +++ b/pbs-client/src/backup_reader.rs @@ -6,13 +6,12 @@ use std::sync::Arc; use futures::future::AbortHandle; use serde_json::{json, Value}; -use pbs_api_types::{BackupDir, BackupNamespace}; +use pbs_api_types::{BackupArchiveName, BackupDir, BackupNamespace, MANIFEST_BLOB_NAME}; use pbs_datastore::data_blob::DataBlob; use pbs_datastore::data_blob_reader::DataBlobReader; use pbs_datastore::dynamic_index::DynamicIndexReader; use pbs_datastore::fixed_index::FixedIndexReader; use pbs_datastore::index::IndexFile; -use pbs_datastore::manifest::MANIFEST_BLOB_NAME; use pbs_datastore::{BackupManifest, PROXMOX_BACKUP_READER_PROTOCOL_ID_V1}; use pbs_tools::crypt_config::CryptConfig; use pbs_tools::sha::sha256; @@ -127,7 +126,8 @@ impl BackupReader { /// The manifest signature is verified if we have a crypt_config. pub async fn download_manifest(&self) -> Result<(BackupManifest, Vec), Error> { let mut raw_data = Vec::with_capacity(64 * 1024); - self.download(MANIFEST_BLOB_NAME, &mut raw_data).await?; + self.download(MANIFEST_BLOB_NAME.as_ref(), &mut raw_data) + .await?; let blob = DataBlob::load_from_reader(&mut &raw_data[..])?; // no expected digest available let data = blob.decode(None, None)?; @@ -145,11 +145,11 @@ impl BackupReader { pub async fn download_blob( &self, manifest: &BackupManifest, - name: &str, + name: &BackupArchiveName, ) -> Result, Error> { let mut tmpfile = crate::tools::create_tmp_file()?; - self.download(name, &mut tmpfile).await?; + self.download(name.as_ref(), &mut tmpfile).await?; tmpfile.seek(SeekFrom::Start(0))?; let (csum, size) = sha256(&mut tmpfile)?; @@ -167,11 +167,11 @@ impl BackupReader { pub async fn download_dynamic_index( &self, manifest: &BackupManifest, - name: &str, + name: &BackupArchiveName, ) -> Result { let mut tmpfile = crate::tools::create_tmp_file()?; - self.download(name, &mut tmpfile).await?; + self.download(name.as_ref(), &mut tmpfile).await?; let index = DynamicIndexReader::new(tmpfile) .map_err(|err| format_err!("unable to read dynamic index '{}' - {}", name, err))?; @@ -190,11 +190,11 @@ impl BackupReader { pub async fn download_fixed_index( &self, manifest: &BackupManifest, - name: &str, + name: &BackupArchiveName, ) -> Result { let mut tmpfile = crate::tools::create_tmp_file()?; - self.download(name, &mut tmpfile).await?; + self.download(name.as_ref(), &mut tmpfile).await?; let index = FixedIndexReader::new(tmpfile) .map_err(|err| format_err!("unable to read fixed index '{}' - {}", name, err))?; diff --git a/pbs-client/src/backup_writer.rs b/pbs-client/src/backup_writer.rs index 2ffd0b9b..baf2aebb 100644 --- a/pbs-client/src/backup_writer.rs +++ b/pbs-client/src/backup_writer.rs @@ -13,13 +13,15 @@ use tokio::io::AsyncReadExt; use tokio::sync::{mpsc, oneshot}; use tokio_stream::wrappers::ReceiverStream; -use pbs_api_types::{ArchiveType, BackupDir, BackupNamespace}; +use pbs_api_types::{ + ArchiveType, BackupArchiveName, BackupDir, BackupNamespace, CATALOG_NAME, MANIFEST_BLOB_NAME, +}; use pbs_datastore::data_blob::{ChunkInfo, DataBlob, DataChunkBuilder}; use pbs_datastore::dynamic_index::DynamicIndexReader; use pbs_datastore::fixed_index::FixedIndexReader; use pbs_datastore::index::IndexFile; -use pbs_datastore::manifest::{BackupManifest, MANIFEST_BLOB_NAME}; -use pbs_datastore::{CATALOG_NAME, PROXMOX_BACKUP_PROTOCOL_ID_V1}; +use pbs_datastore::manifest::BackupManifest; +use pbs_datastore::PROXMOX_BACKUP_PROTOCOL_ID_V1; use pbs_tools::crypt_config::CryptConfig; use proxmox_human_byte::HumanByte; @@ -269,7 +271,7 @@ impl BackupWriter { /// Upload chunks and index pub async fn upload_index_chunk_info( &self, - archive_name: &str, + archive_name: &BackupArchiveName, stream: impl Stream>, options: UploadOptions, ) -> Result { @@ -361,7 +363,7 @@ impl BackupWriter { pub async fn upload_stream( &self, - archive_name: &str, + archive_name: &BackupArchiveName, stream: impl Stream>, options: UploadOptions, injections: Option>, @@ -387,13 +389,13 @@ impl BackupWriter { if !manifest .files() .iter() - .any(|file| file.filename == archive_name) + .any(|file| file.filename == archive_name.as_ref()) { log::info!("Previous manifest does not contain an archive called '{archive_name}', skipping download.."); } else { // try, but ignore errors - match ArchiveType::from_path(archive_name) { - Ok(ArchiveType::FixedIndex) => { + match archive_name.archive_type() { + ArchiveType::FixedIndex => { if let Err(err) = self .download_previous_fixed_index( archive_name, @@ -405,7 +407,7 @@ impl BackupWriter { log::warn!("Error downloading .fidx from previous manifest: {}", err); } } - Ok(ArchiveType::DynamicIndex) => { + ArchiveType::DynamicIndex => { if let Err(err) = self .download_previous_dynamic_index( archive_name, @@ -429,12 +431,6 @@ impl BackupWriter { .as_u64() .unwrap(); - let archive = if log::log_enabled!(log::Level::Debug) { - archive_name - } else { - pbs_tools::format::strip_server_file_extension(archive_name) - }; - let upload_stats = Self::upload_chunk_info_stream( self.h2.clone(), wid, @@ -448,12 +444,17 @@ impl BackupWriter { }, options.compress, injections, - archive, + archive_name, ) .await?; let size_dirty = upload_stats.size - upload_stats.size_reused; let size: HumanByte = upload_stats.size.into(); + let archive = if log::log_enabled!(log::Level::Debug) { + archive_name.to_string() + } else { + archive_name.without_type_extension() + }; if upload_stats.chunk_injected > 0 { log::info!( @@ -463,7 +464,7 @@ impl BackupWriter { ); } - if archive_name != CATALOG_NAME { + if *archive_name != *CATALOG_NAME { let speed: HumanByte = ((size_dirty * 1_000_000) / (upload_stats.duration.as_micros() as usize)).into(); let size_dirty: HumanByte = size_dirty.into(); @@ -629,7 +630,7 @@ impl BackupWriter { pub async fn download_previous_fixed_index( &self, - archive_name: &str, + archive_name: &BackupArchiveName, manifest: &BackupManifest, known_chunks: Arc>>, ) -> Result { @@ -664,7 +665,7 @@ impl BackupWriter { pub async fn download_previous_dynamic_index( &self, - archive_name: &str, + archive_name: &BackupArchiveName, manifest: &BackupManifest, known_chunks: Arc>>, ) -> Result { @@ -711,7 +712,7 @@ impl BackupWriter { pub async fn download_previous_manifest(&self) -> Result { let mut raw_data = Vec::with_capacity(64 * 1024); - let param = json!({ "archive-name": MANIFEST_BLOB_NAME }); + let param = json!({ "archive-name": MANIFEST_BLOB_NAME.to_string() }); self.h2 .download("previous", Some(param), &mut raw_data) .await?; @@ -739,7 +740,7 @@ impl BackupWriter { crypt_config: Option>, compress: bool, injections: Option>, - archive: &str, + archive: &BackupArchiveName, ) -> impl Future> { let mut counters = UploadCounters::new(); let counters_readonly = counters.clone(); @@ -831,7 +832,7 @@ impl BackupWriter { fn upload_merged_chunk_stream( h2: H2Client, wid: u64, - archive: &str, + archive: &BackupArchiveName, prefix: &str, stream: impl Stream>, index_csum: Arc>>, diff --git a/pbs-client/src/pxar/tools.rs b/pbs-client/src/pxar/tools.rs index b076daf6..483ef19b 100644 --- a/pbs-client/src/pxar/tools.rs +++ b/pbs-client/src/pxar/tools.rs @@ -14,6 +14,7 @@ use pxar::accessor::ReadAt; use pxar::format::StatxTimestamp; use pxar::{mode, Entry, EntryKind, Metadata}; +use pbs_api_types::BackupArchiveName; use pbs_datastore::catalog::{ArchiveEntry, CatalogEntryType, DirEntryAttribute}; use pbs_datastore::dynamic_index::{BufferedDynamicReader, LocalDynamicReadAt}; @@ -330,7 +331,7 @@ pub fn handle_root_with_optional_format_version_prelude, manifest: &BackupManifest, crypt_config: Option>, diff --git a/pbs-client/src/tools/mod.rs b/pbs-client/src/tools/mod.rs index 28db6f34..8068dc00 100644 --- a/pbs-client/src/tools/mod.rs +++ b/pbs-client/src/tools/mod.rs @@ -17,7 +17,9 @@ use proxmox_router::cli::{complete_file_name, shellword_split}; use proxmox_schema::*; use proxmox_sys::fs::file_get_json; -use pbs_api_types::{Authid, BackupNamespace, RateLimitConfig, UserWithTokens, BACKUP_REPO_URL}; +use pbs_api_types::{ + Authid, BackupArchiveName, BackupNamespace, RateLimitConfig, UserWithTokens, BACKUP_REPO_URL, +}; use pbs_datastore::BackupManifest; use crate::{BackupRepository, HttpClient, HttpClientOptions}; @@ -548,19 +550,18 @@ pub fn place_xdg_file( } pub fn get_pxar_archive_names( - archive_name: &str, + archive_name: &BackupArchiveName, manifest: &BackupManifest, -) -> Result<(String, Option), Error> { - let (filename, ext) = match archive_name.strip_suffix(".didx") { - Some(filename) => (filename, ".didx"), - None => (archive_name, ""), - }; +) -> Result<(BackupArchiveName, Option), Error> { + let filename = archive_name.without_type_extension(); + let ext = archive_name.archive_type().extension(); - // Check if archive with given extension is present + // Check if archive is given as split archive or regular archive and is present in manifest, + // otherwise goto fallback below if manifest .files() .iter() - .any(|fileinfo| fileinfo.filename == format!("{filename}.didx")) + .any(|fileinfo| fileinfo.filename == archive_name.as_ref()) { // check if already given as one of split archive name variants if let Some(base) = filename @@ -568,8 +569,8 @@ pub fn get_pxar_archive_names( .or_else(|| filename.strip_suffix(".ppxar")) { return Ok(( - format!("{base}.mpxar{ext}"), - Some(format!("{base}.ppxar{ext}")), + format!("{base}.mpxar.{ext}").as_str().try_into()?, + Some(format!("{base}.ppxar.{ext}").as_str().try_into()?), )); } return Ok((archive_name.to_owned(), None)); @@ -577,7 +578,10 @@ pub fn get_pxar_archive_names( // if not, try fallback from regular to split archive if let Some(base) = filename.strip_suffix(".pxar") { - return get_pxar_archive_names(&format!("{base}.mpxar{ext}"), manifest); + return get_pxar_archive_names( + &format!("{base}.mpxar.{ext}").as_str().try_into()?, + manifest, + ); } bail!("archive not found in manifest"); diff --git a/pbs-datastore/src/backup_info.rs b/pbs-datastore/src/backup_info.rs index 62d12b11..4c17692d 100644 --- a/pbs-datastore/src/backup_info.rs +++ b/pbs-datastore/src/backup_info.rs @@ -9,13 +9,11 @@ use proxmox_sys::fs::{lock_dir_noblock, replace_file, CreateOptions}; use pbs_api_types::{ Authid, BackupGroupDeleteStats, BackupNamespace, BackupType, GroupFilter, BACKUP_DATE_REGEX, - BACKUP_FILE_REGEX, + BACKUP_FILE_REGEX, CLIENT_LOG_BLOB_NAME, MANIFEST_BLOB_NAME, }; use pbs_config::{open_backup_lockfile, BackupLockGuard}; -use crate::manifest::{ - BackupManifest, CLIENT_LOG_BLOB_NAME, MANIFEST_BLOB_NAME, MANIFEST_LOCK_NAME, -}; +use crate::manifest::{BackupManifest, MANIFEST_LOCK_NAME}; use crate::{DataBlob, DataStore}; /// BackupGroup is a directory containing a list of BackupDir @@ -139,7 +137,7 @@ impl BackupGroup { } let mut manifest_path = PathBuf::from(backup_time); - manifest_path.push(MANIFEST_BLOB_NAME); + manifest_path.push(MANIFEST_BLOB_NAME.as_ref()); use nix::fcntl::{openat, OFlag}; match openat( @@ -492,7 +490,7 @@ impl BackupDir { /// Load the manifest without a lock. Must not be written back. pub fn load_manifest(&self) -> Result<(BackupManifest, u64), Error> { - let blob = self.load_blob(MANIFEST_BLOB_NAME)?; + let blob = self.load_blob(MANIFEST_BLOB_NAME.as_ref())?; let raw_size = blob.raw_size(); let manifest = BackupManifest::try_from(blob)?; Ok((manifest, raw_size)) @@ -515,7 +513,7 @@ impl BackupDir { let raw_data = blob.raw_data(); let mut path = self.full_path(); - path.push(MANIFEST_BLOB_NAME); + path.push(MANIFEST_BLOB_NAME.as_ref()); // atomic replace invalidates flock - no other writes past this point! replace_file(&path, raw_data, CreateOptions::new(), false)?; @@ -636,7 +634,9 @@ impl BackupInfo { pub fn is_finished(&self) -> bool { // backup is considered unfinished if there is no manifest - self.files.iter().any(|name| name == MANIFEST_BLOB_NAME) + self.files + .iter() + .any(|name| name == MANIFEST_BLOB_NAME.as_ref()) } } diff --git a/pbs-datastore/src/lib.rs b/pbs-datastore/src/lib.rs index 202b0955..8050cf4d 100644 --- a/pbs-datastore/src/lib.rs +++ b/pbs-datastore/src/lib.rs @@ -144,9 +144,6 @@ #![deny(unsafe_op_in_unsafe_fn)] -// Note: .pcat1 => Proxmox Catalog Format version 1 -pub const CATALOG_NAME: &str = "catalog.pcat1.didx"; - /// Directory path where active operations counters are saved. pub const ACTIVE_OPERATIONS_DIR: &str = concat!( pbs_buildcfg::PROXMOX_BACKUP_RUN_DIR_M!(), diff --git a/pbs-datastore/src/manifest.rs b/pbs-datastore/src/manifest.rs index 823c8500..51ec117e 100644 --- a/pbs-datastore/src/manifest.rs +++ b/pbs-datastore/src/manifest.rs @@ -3,13 +3,10 @@ use anyhow::{bail, format_err, Error}; use serde::{Deserialize, Serialize}; use serde_json::{json, Value}; -use pbs_api_types::{ArchiveType, BackupType, CryptMode, Fingerprint}; +use pbs_api_types::{BackupArchiveName, BackupType, CryptMode, Fingerprint}; use pbs_tools::crypt_config::CryptConfig; -pub const MANIFEST_BLOB_NAME: &str = "index.json.blob"; pub const MANIFEST_LOCK_NAME: &str = ".index.json.lck"; -pub const CLIENT_LOG_BLOB_NAME: &str = "client.log.blob"; -pub const ENCRYPTED_KEY_BLOB_NAME: &str = "rsa-encrypted.key.blob"; fn crypt_mode_none() -> CryptMode { CryptMode::None @@ -68,14 +65,13 @@ impl BackupManifest { pub fn add_file( &mut self, - filename: String, + filename: &BackupArchiveName, size: u64, csum: [u8; 32], crypt_mode: CryptMode, ) -> Result<(), Error> { - let _archive_type = ArchiveType::from_path(&filename)?; // check type self.files.push(FileInfo { - filename, + filename: filename.to_string(), size, csum, crypt_mode, @@ -87,8 +83,11 @@ impl BackupManifest { &self.files[..] } - pub fn lookup_file_info(&self, name: &str) -> Result<&FileInfo, Error> { - let info = self.files.iter().find(|item| item.filename == name); + pub fn lookup_file_info(&self, name: &BackupArchiveName) -> Result<&FileInfo, Error> { + let info = self + .files + .iter() + .find(|item| item.filename == name.as_ref()); match info { None => bail!("manifest does not contain file '{}'", name), @@ -96,7 +95,12 @@ impl BackupManifest { } } - pub fn verify_file(&self, name: &str, csum: &[u8; 32], size: u64) -> Result<(), Error> { + pub fn verify_file( + &self, + name: &BackupArchiveName, + csum: &[u8; 32], + size: u64, + ) -> Result<(), Error> { let info = self.lookup_file_info(name)?; if size != info.size { @@ -256,8 +260,13 @@ fn test_manifest_signature() -> Result<(), Error> { let mut manifest = BackupManifest::new("host/elsa/2020-06-26T13:56:05Z".parse()?); - manifest.add_file("test1.img.fidx".into(), 200, [1u8; 32], CryptMode::Encrypt)?; - manifest.add_file("abc.blob".into(), 200, [2u8; 32], CryptMode::None)?; + manifest.add_file( + &"test1.img.fidx".try_into()?, + 200, + [1u8; 32], + CryptMode::Encrypt, + )?; + manifest.add_file(&"abc.blob".try_into()?, 200, [2u8; 32], CryptMode::None)?; manifest.unprotected["note"] = "This is not protected by the signature.".into(); diff --git a/pbs-datastore/src/snapshot_reader.rs b/pbs-datastore/src/snapshot_reader.rs index 432701ea..95e59a42 100644 --- a/pbs-datastore/src/snapshot_reader.rs +++ b/pbs-datastore/src/snapshot_reader.rs @@ -8,13 +8,15 @@ use nix::dir::Dir; use proxmox_sys::fs::lock_dir_noblock_shared; -use pbs_api_types::{print_store_and_ns, ArchiveType, BackupNamespace, Operation}; +use pbs_api_types::{ + print_store_and_ns, ArchiveType, BackupNamespace, Operation, CLIENT_LOG_BLOB_NAME, + MANIFEST_BLOB_NAME, +}; use crate::backup_info::BackupDir; use crate::dynamic_index::DynamicIndexReader; use crate::fixed_index::FixedIndexReader; use crate::index::IndexFile; -use crate::manifest::{CLIENT_LOG_BLOB_NAME, MANIFEST_BLOB_NAME}; use crate::DataStore; /// Helper to access the contents of a datastore backup snapshot @@ -62,7 +64,7 @@ impl SnapshotReader { }; let mut client_log_path = snapshot_path; - client_log_path.push(CLIENT_LOG_BLOB_NAME); + client_log_path.push(CLIENT_LOG_BLOB_NAME.as_ref()); let mut file_list = vec![MANIFEST_BLOB_NAME.to_string()]; for item in manifest.files() { diff --git a/proxmox-backup-client/src/catalog.rs b/proxmox-backup-client/src/catalog.rs index a55c9eff..0e20886a 100644 --- a/proxmox-backup-client/src/catalog.rs +++ b/proxmox-backup-client/src/catalog.rs @@ -7,9 +7,8 @@ use serde_json::Value; use proxmox_router::cli::*; use proxmox_schema::api; -use pbs_api_types::BackupNamespace; +use pbs_api_types::{BackupArchiveName, BackupNamespace, CATALOG_NAME}; use pbs_client::pxar::tools::get_remote_pxar_reader; -use pbs_client::tools::has_pxar_filename_extension; use pbs_client::tools::key_source::get_encryption_key_password; use pbs_client::{BackupReader, RemoteChunkReader}; use pbs_tools::crypt_config::CryptConfig; @@ -22,7 +21,7 @@ use crate::{ complete_pxar_archive_name, complete_repository, connect, crypto_parameters, decrypt_key, dir_or_last_from_group, extract_repository_from_value, format_key_source, optional_ns_param, record_repository, BackupDir, BufferedDynamicReader, CatalogReader, DynamicIndexReader, - IndexFile, Shell, CATALOG_NAME, KEYFD_SCHEMA, REPO_URL_SCHEMA, + IndexFile, Shell, KEYFD_SCHEMA, REPO_URL_SCHEMA, }; #[api( @@ -90,7 +89,7 @@ async fn dump_catalog(param: Value) -> Result { let (manifest, _) = client.download_manifest().await?; manifest.check_fingerprint(crypt_config.as_ref().map(Arc::as_ref))?; - let file_info = match manifest.lookup_file_info(CATALOG_NAME) { + let file_info = match manifest.lookup_file_info(&CATALOG_NAME) { Ok(file_info) => file_info, Err(err) => { let mut metadata_archives = Vec::new(); @@ -104,7 +103,7 @@ async fn dump_catalog(param: Value) -> Result { for archive in &metadata_archives { let (reader, archive_size) = get_remote_pxar_reader( - &archive, + &archive.as_str().try_into()?, client.clone(), &manifest, crypt_config.clone(), @@ -128,7 +127,7 @@ async fn dump_catalog(param: Value) -> Result { }; let index = client - .download_dynamic_index(&manifest, CATALOG_NAME) + .download_dynamic_index(&manifest, &CATALOG_NAME) .await?; let most_used = index.find_most_used_chunks(8); @@ -170,8 +169,7 @@ async fn dump_catalog(param: Value) -> Result { description: "Group/Snapshot path.", }, "archive-name": { - type: String, - description: "Backup archive name.", + type: BackupArchiveName, }, "repository": { optional: true, @@ -195,7 +193,8 @@ async fn catalog_shell(param: Value) -> Result<(), Error> { let client = connect(&repo)?; let backup_ns = optional_ns_param(¶m)?; let path = required_string_param(¶m, "snapshot")?; - let archive_name = required_string_param(¶m, "archive-name")?; + let server_archive_name: BackupArchiveName = + required_string_param(¶m, "archive-name")?.try_into()?; let backup_dir = dir_or_last_from_group(&client, &repo, &backup_ns, path).await?; @@ -214,9 +213,7 @@ async fn catalog_shell(param: Value) -> Result<(), Error> { } }; - let server_archive_name = if has_pxar_filename_extension(archive_name, false) { - format!("{}.didx", archive_name) - } else { + if !server_archive_name.has_pxar_filename_extension() { bail!("Can only mount pxar archives."); }; @@ -233,7 +230,7 @@ async fn catalog_shell(param: Value) -> Result<(), Error> { let (manifest, _) = client.download_manifest().await?; manifest.check_fingerprint(crypt_config.as_ref().map(Arc::as_ref))?; - if let Err(_err) = manifest.lookup_file_info(CATALOG_NAME) { + if let Err(_err) = manifest.lookup_file_info(&CATALOG_NAME) { // No catalog, fallback to pxar archive accessor if present let accessor = helper::get_pxar_fuse_accessor( &server_archive_name, @@ -243,7 +240,7 @@ async fn catalog_shell(param: Value) -> Result<(), Error> { ) .await?; - let state = Shell::new(None, &server_archive_name, accessor).await?; + let state = Shell::new(None, &server_archive_name.as_ref(), accessor).await?; log::info!("Starting interactive shell"); state.shell().await?; record_repository(&repo); @@ -261,17 +258,17 @@ async fn catalog_shell(param: Value) -> Result<(), Error> { ) .await?; - client.download(CATALOG_NAME, &mut tmpfile).await?; + client.download(CATALOG_NAME.as_ref(), &mut tmpfile).await?; let index = DynamicIndexReader::new(tmpfile) .map_err(|err| format_err!("unable to read catalog index - {}", err))?; // Note: do not use values stored in index (not trusted) - instead, computed them again let (csum, size) = index.compute_csum(); - manifest.verify_file(CATALOG_NAME, &csum, size)?; + manifest.verify_file(&CATALOG_NAME, &csum, size)?; let most_used = index.find_most_used_chunks(8); - let file_info = manifest.lookup_file_info(CATALOG_NAME)?; + let file_info = manifest.lookup_file_info(&CATALOG_NAME)?; let chunk_reader = RemoteChunkReader::new( client.clone(), crypt_config, @@ -286,7 +283,7 @@ async fn catalog_shell(param: Value) -> Result<(), Error> { catalogfile.seek(SeekFrom::Start(0))?; let catalog_reader = CatalogReader::new(catalogfile); - let state = Shell::new(Some(catalog_reader), &server_archive_name, decoder).await?; + let state = Shell::new(Some(catalog_reader), &server_archive_name.as_ref(), decoder).await?; log::info!("Starting interactive shell"); state.shell().await?; diff --git a/proxmox-backup-client/src/helper.rs b/proxmox-backup-client/src/helper.rs index 60355d7d..642d66a7 100644 --- a/proxmox-backup-client/src/helper.rs +++ b/proxmox-backup-client/src/helper.rs @@ -1,6 +1,7 @@ use std::sync::Arc; use anyhow::Error; +use pbs_api_types::BackupArchiveName; use pbs_client::{BackupReader, RemoteChunkReader}; use pbs_datastore::BackupManifest; use pbs_tools::crypt_config::CryptConfig; @@ -8,7 +9,7 @@ use pbs_tools::crypt_config::CryptConfig; use crate::{BufferedDynamicReadAt, BufferedDynamicReader, IndexFile}; pub(crate) async fn get_pxar_fuse_accessor( - archive_name: &str, + archive_name: &BackupArchiveName, client: Arc, manifest: &BackupManifest, crypt_config: Option>, @@ -44,7 +45,7 @@ pub(crate) async fn get_pxar_fuse_accessor( } pub(crate) async fn get_pxar_fuse_reader( - archive_name: &str, + archive_name: &BackupArchiveName, client: Arc, manifest: &BackupManifest, crypt_config: Option>, @@ -57,7 +58,7 @@ pub(crate) async fn get_pxar_fuse_reader( } pub(crate) async fn get_buffered_pxar_reader( - archive_name: &str, + archive_name: &BackupArchiveName, client: Arc, manifest: &BackupManifest, crypt_config: Option>, diff --git a/proxmox-backup-client/src/main.rs b/proxmox-backup-client/src/main.rs index f6fb3555..7e305e1d 100644 --- a/proxmox-backup-client/src/main.rs +++ b/proxmox-backup-client/src/main.rs @@ -7,6 +7,7 @@ use std::task::Context; use anyhow::{bail, format_err, Error}; use futures::stream::{StreamExt, TryStreamExt}; +use pbs_client::tools::has_pxar_filename_extension; use serde::Deserialize; use serde_json::{json, Value}; use tokio::sync::mpsc; @@ -25,10 +26,11 @@ use pxar::accessor::aio::Accessor; use pxar::accessor::{MaybeReady, ReadAt, ReadAtOperation}; use pbs_api_types::{ - ArchiveType, Authid, BackupDir, BackupGroup, BackupNamespace, BackupPart, BackupType, - ClientRateLimitConfig, CryptMode, Fingerprint, GroupListItem, PruneJobOptions, PruneListItem, - RateLimitConfig, SnapshotListItem, StorageStatus, BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA, - BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, + ArchiveType, Authid, BackupArchiveName, BackupDir, BackupGroup, BackupNamespace, BackupPart, + BackupType, ClientRateLimitConfig, CryptMode, Fingerprint, GroupListItem, PruneJobOptions, + PruneListItem, RateLimitConfig, SnapshotListItem, StorageStatus, BACKUP_ID_SCHEMA, + BACKUP_NAMESPACE_SCHEMA, BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, CATALOG_NAME, + ENCRYPTED_KEY_BLOB_NAME, MANIFEST_BLOB_NAME, }; use pbs_client::catalog_shell::Shell; use pbs_client::pxar::{ErrorHandler as PxarErrorHandler, MetadataArchiveReader, PxarPrevRef}; @@ -36,7 +38,7 @@ use pbs_client::tools::{ complete_archive_name, complete_auth_id, complete_backup_group, complete_backup_snapshot, complete_backup_source, complete_chunk_size, complete_group_or_snapshot, complete_img_archive_name, complete_namespace, complete_pxar_archive_name, complete_repository, - connect, connect_rate_limited, extract_repository_from_value, has_pxar_filename_extension, + connect, connect_rate_limited, extract_repository_from_value, key_source::{ crypto_parameters, format_key_source, get_encryption_key_password, KEYFD_SCHEMA, KEYFILE_SCHEMA, MASTER_PUBKEY_FD_SCHEMA, MASTER_PUBKEY_FILE_SCHEMA, @@ -54,9 +56,8 @@ use pbs_datastore::chunk_store::verify_chunk_size; use pbs_datastore::dynamic_index::{BufferedDynamicReader, DynamicIndexReader, LocalDynamicReadAt}; use pbs_datastore::fixed_index::FixedIndexReader; use pbs_datastore::index::IndexFile; -use pbs_datastore::manifest::{BackupManifest, ENCRYPTED_KEY_BLOB_NAME, MANIFEST_BLOB_NAME}; +use pbs_datastore::manifest::BackupManifest; use pbs_datastore::read_chunk::AsyncReadChunk; -use pbs_datastore::CATALOG_NAME; use pbs_key_config::{decrypt_key, rsa_encrypt_key_config, KeyConfig}; use pbs_tools::crypt_config::CryptConfig; use pbs_tools::json; @@ -196,8 +197,8 @@ pub async fn dir_or_last_from_group( async fn backup_directory>( client: &BackupWriter, dir_path: P, - archive_name: &str, - payload_target: Option<&str>, + archive_name: &BackupArchiveName, + payload_target: Option<&BackupArchiveName>, chunk_size: Option, catalog: Option>>>>>, pxar_create_options: pbs_client::pxar::PxarCreateOptions, @@ -276,7 +277,7 @@ async fn backup_directory>( async fn backup_image>( client: &BackupWriter, image_path: P, - archive_name: &str, + archive_name: &BackupArchiveName, chunk_size: Option, upload_options: UploadOptions, ) -> Result { @@ -606,7 +607,7 @@ fn spawn_catalog_upload( tokio::spawn(async move { let catalog_upload_result = client - .upload_stream(CATALOG_NAME, catalog_chunk_stream, upload_options, None) + .upload_stream(&CATALOG_NAME, catalog_chunk_stream, upload_options, None) .await; if let Err(ref err) = catalog_upload_result { @@ -1005,13 +1006,21 @@ async fn create_backup( }; for (backup_type, filename, target_base, extension, size) in upload_list { - let target = format!("{target_base}.{extension}"); + let target: BackupArchiveName = format!("{target_base}.{extension}").as_str().try_into()?; match (backup_type, dry_run) { // dry-run - (BackupSpecificationType::CONFIG, true) => log_file("config file", &filename, &target), - (BackupSpecificationType::LOGFILE, true) => log_file("log file", &filename, &target), - (BackupSpecificationType::PXAR, true) => log_file("directory", &filename, &target), - (BackupSpecificationType::IMAGE, true) => log_file("image", &filename, &target), + (BackupSpecificationType::CONFIG, true) => { + log_file("config file", &filename, target.as_ref()) + } + (BackupSpecificationType::LOGFILE, true) => { + log_file("log file", &filename, target.as_ref()) + } + (BackupSpecificationType::PXAR, true) => { + log_file("directory", &filename, target.as_ref()) + } + (BackupSpecificationType::IMAGE, true) => { + log_file("image", &filename, &target.as_ref()) + } // no dry-run (BackupSpecificationType::CONFIG, false) => { let upload_options = UploadOptions { @@ -1020,11 +1029,11 @@ async fn create_backup( ..UploadOptions::default() }; - log_file("config file", &filename, &target); + log_file("config file", &filename, target.as_ref()); let stats = client - .upload_blob_from_file(&filename, &target, upload_options) + .upload_blob_from_file(&filename, target.as_ref(), upload_options) .await?; - manifest.add_file(target, stats.size, stats.csum, crypto.mode)?; + manifest.add_file(&target, stats.size, stats.csum, crypto.mode)?; } (BackupSpecificationType::LOGFILE, false) => { // fixme: remove - not needed anymore ? @@ -1034,11 +1043,11 @@ async fn create_backup( ..UploadOptions::default() }; - log_file("log file", &filename, &target); + log_file("log file", &filename, target.as_ref()); let stats = client - .upload_blob_from_file(&filename, &target, upload_options) + .upload_blob_from_file(&filename, target.as_ref(), upload_options) .await?; - manifest.add_file(target, stats.size, stats.csum, crypto.mode)?; + manifest.add_file(&target, stats.size, stats.csum, crypto.mode)?; } (BackupSpecificationType::PXAR, false) => { let target_base = if let Some(base) = target_base.strip_suffix(".pxar") { @@ -1050,8 +1059,14 @@ async fn create_backup( let (target, payload_target) = if detection_mode.is_metadata() || detection_mode.is_data() { ( - format!("{target_base}.mpxar.{extension}"), - Some(format!("{target_base}.ppxar.{extension}")), + format!("{target_base}.mpxar.{extension}") + .as_str() + .try_into()?, + Some( + format!("{target_base}.ppxar.{extension}") + .as_str() + .try_into()?, + ), ) } else { (target, None) @@ -1065,12 +1080,12 @@ async fn create_backup( catalog_result_rx = Some(catalog_upload_res.result); } - log_file("directory", &filename, &target); + log_file("directory", &filename, target.as_ref()); if let Some(catalog) = catalog.as_ref() { catalog .lock() .unwrap() - .start_directory(std::ffi::CString::new(target.as_str())?.as_c_str())?; + .start_directory(std::ffi::CString::new(target.as_ref())?.as_c_str())?; } let mut previous_ref = None; @@ -1137,7 +1152,7 @@ async fn create_backup( &client, &filename, &target, - payload_target.as_deref(), + payload_target.as_ref().as_deref(), chunk_size_opt, catalog.as_ref().cloned(), pxar_options, @@ -1147,20 +1162,20 @@ async fn create_backup( if let Some(payload_stats) = payload_stats { manifest.add_file( - payload_target + &payload_target .ok_or_else(|| format_err!("missing payload target archive"))?, payload_stats.size, payload_stats.csum, crypto.mode, )?; } - manifest.add_file(target, stats.size, stats.csum, crypto.mode)?; + manifest.add_file(&target, stats.size, stats.csum, crypto.mode)?; if let Some(catalog) = catalog.as_ref() { catalog.lock().unwrap().end_directory()?; } } (BackupSpecificationType::IMAGE, false) => { - log_file("image", &filename, &target); + log_file("image", &filename, target.as_ref()); let upload_options = UploadOptions { previous_manifest: previous_manifest.clone(), @@ -1172,7 +1187,7 @@ async fn create_backup( let stats = backup_image(&client, &filename, &target, chunk_size_opt, upload_options) .await?; - manifest.add_file(target, stats.size, stats.csum, crypto.mode)?; + manifest.add_file(&target, stats.size, stats.csum, crypto.mode)?; } } } @@ -1194,22 +1209,30 @@ async fn create_backup( if let Some(catalog_result_rx) = catalog_result_rx { let stats = catalog_result_rx.await??; - manifest.add_file(CATALOG_NAME.to_owned(), stats.size, stats.csum, crypto.mode)?; + manifest.add_file(&CATALOG_NAME, stats.size, stats.csum, crypto.mode)?; } } if let Some(rsa_encrypted_key) = rsa_encrypted_key { - let target = ENCRYPTED_KEY_BLOB_NAME; - log::info!("Upload RSA encoded key to '{}' as {}", repo, target); + log::info!( + "Upload RSA encoded key to '{}' as {}", + repo, + *ENCRYPTED_KEY_BLOB_NAME + ); let options = UploadOptions { compress: false, encrypt: false, ..UploadOptions::default() }; let stats = client - .upload_blob_from_data(rsa_encrypted_key, target, options) + .upload_blob_from_data(rsa_encrypted_key, ENCRYPTED_KEY_BLOB_NAME.as_ref(), options) .await?; - manifest.add_file(target.to_string(), stats.size, stats.csum, crypto.mode)?; + manifest.add_file( + &ENCRYPTED_KEY_BLOB_NAME, + stats.size, + stats.csum, + crypto.mode, + )?; } // create manifest (index.json) // manifests are never encrypted, but include a signature @@ -1225,7 +1248,7 @@ async fn create_backup( ..UploadOptions::default() }; client - .upload_blob_from_data(manifest.into_bytes(), MANIFEST_BLOB_NAME, options) + .upload_blob_from_data(manifest.into_bytes(), MANIFEST_BLOB_NAME.as_ref(), options) .await?; client.finish().await?; @@ -1238,7 +1261,7 @@ async fn create_backup( } async fn prepare_reference( - target: &str, + target: &BackupArchiveName, manifest: Arc, backup_writer: &BackupWriter, backup_reader: Arc, @@ -1250,7 +1273,11 @@ async fn prepare_reference( Ok((target, payload_target)) => (target, payload_target), Err(_) => return Ok(None), }; - let payload_target = payload_target.unwrap_or_default(); + let payload_target = if let Some(payload_target) = payload_target { + payload_target + } else { + return Ok(None); + }; let metadata_ref_index = if let Ok(index) = backup_reader .download_dynamic_index(&manifest, &target) @@ -1299,7 +1326,7 @@ async fn prepare_reference( Ok(Some(pbs_client::pxar::PxarPrevRef { accessor, payload_index: payload_ref_index, - archive_name: target, + archive_name: target.to_string(), })) } @@ -1486,7 +1513,8 @@ async fn restore( ) -> Result { let repo = extract_repository_from_value(¶m)?; - let archive_name = json::required_string_param(¶m, "archive-name")?; + let archive_name: BackupArchiveName = + json::required_string_param(¶m, "archive-name")?.try_into()?; let rate_limit = RateLimitConfig::from_client_config(limit); @@ -1525,11 +1553,9 @@ async fn restore( ) .await?; - let (archive_name, archive_type) = parse_archive_type(archive_name); - let (manifest, backup_index_data) = client.download_manifest().await?; - if archive_name == ENCRYPTED_KEY_BLOB_NAME && crypt_config.is_none() { + if archive_name == *ENCRYPTED_KEY_BLOB_NAME && crypt_config.is_none() { log::info!("Restoring encrypted key blob without original key - skipping manifest fingerprint check!") } else { if manifest.signature.is_some() { @@ -1543,7 +1569,7 @@ async fn restore( manifest.check_fingerprint(crypt_config.as_ref().map(Arc::as_ref))?; } - if archive_name == MANIFEST_BLOB_NAME { + if archive_name == *MANIFEST_BLOB_NAME { if let Some(target) = target { replace_file(target, &backup_index_data, CreateOptions::new(), false)?; } else { @@ -1557,7 +1583,7 @@ async fn restore( return Ok(Value::Null); } - if archive_type == ArchiveType::Blob { + if archive_name.archive_type() == ArchiveType::Blob { let mut reader = client.download_blob(&manifest, &archive_name).await?; if let Some(target) = target { @@ -1576,7 +1602,7 @@ async fn restore( std::io::copy(&mut reader, &mut writer) .map_err(|err| format_err!("unable to pipe data - {}", err))?; } - } else if archive_type == ArchiveType::DynamicIndex { + } else if archive_name.archive_type() == ArchiveType::DynamicIndex { let (archive_name, payload_archive_name) = pbs_client::tools::get_pxar_archive_names(&archive_name, &manifest)?; @@ -1680,7 +1706,7 @@ async fn restore( std::io::copy(&mut reader, &mut writer) .map_err(|err| format_err!("unable to pipe data - {}", err))?; } - } else if archive_type == ArchiveType::FixedIndex { + } else if archive_name.archive_type() == ArchiveType::FixedIndex { let file_info = manifest.lookup_file_info(&archive_name)?; let index = client .download_fixed_index(&manifest, &archive_name) diff --git a/proxmox-backup-client/src/mount.rs b/proxmox-backup-client/src/mount.rs index c15e030f..0048a8ad 100644 --- a/proxmox-backup-client/src/mount.rs +++ b/proxmox-backup-client/src/mount.rs @@ -18,8 +18,7 @@ use proxmox_schema::*; use proxmox_sortable_macro::sortable; use proxmox_systemd; -use pbs_api_types::BackupNamespace; -use pbs_client::tools::has_pxar_filename_extension; +use pbs_api_types::{ArchiveType, BackupArchiveName, BackupNamespace}; use pbs_client::tools::key_source::get_encryption_key_password; use pbs_client::{BackupReader, RemoteChunkReader}; use pbs_datastore::cached_chunk_reader::CachedChunkReader; @@ -47,11 +46,7 @@ const API_METHOD_MOUNT: ApiMethod = ApiMethod::new( false, &StringSchema::new("Group/Snapshot path.").schema() ), - ( - "archive-name", - false, - &StringSchema::new("Backup archive name.").schema() - ), + ("archive-name", false, &BackupArchiveName::API_SCHEMA), ( "target", false, @@ -87,11 +82,7 @@ WARNING: Only do this with *trusted* backups!", false, &StringSchema::new("Group/Snapshot path.").schema() ), - ( - "archive-name", - false, - &StringSchema::new("Backup archive name.").schema() - ), + ("archive-name", false, &BackupArchiveName::API_SCHEMA), ("repository", true, &REPO_URL_SCHEMA), ( "keyfile", @@ -208,7 +199,8 @@ fn mount( async fn mount_do(param: Value, pipe: Option) -> Result { let repo = extract_repository_from_value(¶m)?; - let archive_name = required_string_param(¶m, "archive-name")?; + let server_archive_name: BackupArchiveName = + required_string_param(¶m, "archive-name")?.try_into()?; let client = connect(&repo)?; let target = param["target"].as_str(); @@ -230,16 +222,14 @@ async fn mount_do(param: Value, pipe: Option) -> Result { } }; - let server_archive_name = if has_pxar_filename_extension(archive_name, false) { + if server_archive_name.has_pxar_filename_extension() { if target.is_none() { bail!("use the 'mount' command to mount pxar archives"); } - format!("{}.didx", archive_name) - } else if archive_name.ends_with(".img") { + } else if server_archive_name.ends_with(".img.fidx") { if target.is_some() { bail!("use the 'map' command to map drive images"); } - format!("{}.fidx", archive_name) } else { bail!("Can only mount/map pxar archives and drive images."); }; @@ -291,7 +281,7 @@ async fn mount_do(param: Value, pipe: Option) -> Result { let mut interrupt = futures::future::select(interrupt_int.recv().boxed(), interrupt_term.recv().boxed()); - if server_archive_name.ends_with(".didx") { + if server_archive_name.archive_type() == ArchiveType::DynamicIndex { let decoder = helper::get_pxar_fuse_accessor( &server_archive_name, client.clone(), @@ -312,7 +302,7 @@ async fn mount_do(param: Value, pipe: Option) -> Result { // exit on interrupted } } - } else if server_archive_name.ends_with(".fidx") { + } else if server_archive_name.archive_type() == ArchiveType::FixedIndex { let file_info = manifest.lookup_file_info(&server_archive_name)?; let index = client .download_fixed_index(&manifest, &server_archive_name) @@ -326,7 +316,10 @@ async fn mount_do(param: Value, pipe: Option) -> Result { ); let reader = CachedChunkReader::new(chunk_reader, index, 8).seekable(); - let name = &format!("{}:{}/{}", repo, path, archive_name); + let name = &format!( + "{repo}:{path}/{}", + server_archive_name.without_type_extension(), + ); let name_escaped = proxmox_systemd::escape_unit(name, false); let mut session = diff --git a/proxmox-file-restore/src/main.rs b/proxmox-file-restore/src/main.rs index 08354b45..0a60d69f 100644 --- a/proxmox-file-restore/src/main.rs +++ b/proxmox-file-restore/src/main.rs @@ -5,6 +5,7 @@ use std::sync::Arc; use anyhow::{bail, format_err, Error}; use futures::StreamExt; +use pbs_api_types::{BackupArchiveName, CATALOG_NAME}; use serde_json::{json, Value}; use tokio::io::AsyncWriteExt; @@ -37,7 +38,6 @@ use pbs_client::{BackupReader, BackupRepository, RemoteChunkReader}; use pbs_datastore::catalog::{ArchiveEntry, CatalogReader, DirEntryAttribute}; use pbs_datastore::dynamic_index::BufferedDynamicReader; use pbs_datastore::index::IndexFile; -use pbs_datastore::CATALOG_NAME; use pbs_key_config::decrypt_key; use pbs_tools::crypt_config::CryptConfig; @@ -149,9 +149,9 @@ async fn list_files( Ok(entries) } ExtractPath::Pxar(file, mut path) => { - if let Ok(file_info) = manifest.lookup_file_info(CATALOG_NAME) { + if let Ok(file_info) = manifest.lookup_file_info(&CATALOG_NAME) { let index = client - .download_dynamic_index(&manifest, CATALOG_NAME) + .download_dynamic_index(&manifest, &CATALOG_NAME) .await?; let most_used = index.find_most_used_chunks(8); let chunk_reader = RemoteChunkReader::new( @@ -172,6 +172,7 @@ async fn list_files( path = vec![b'/']; } + let file: BackupArchiveName = file.as_str().try_into()?; let (archive_name, _payload_archive_name) = pbs_client::tools::get_pxar_archive_names(&file, &manifest)?; @@ -191,7 +192,7 @@ async fn list_files( pbs_client::pxar::tools::pxar_metadata_catalog_lookup( accessor, path, - Some(&archive_name), + Some(archive_name.as_ref()), ) .await } @@ -476,10 +477,11 @@ async fn extract( match path { ExtractPath::Pxar(archive_name, path) => { + let archive_name: BackupArchiveName = archive_name.as_str().try_into()?; let (archive_name, payload_archive_name) = pbs_client::tools::get_pxar_archive_names(&archive_name, &manifest)?; let (reader, archive_size) = get_remote_pxar_reader( - &archive_name, + &archive_name.try_into()?, client.clone(), &manifest, crypt_config.clone(), diff --git a/src/api2/admin/datastore.rs b/src/api2/admin/datastore.rs index 99b579f0..3b863c06 100644 --- a/src/api2/admin/datastore.rs +++ b/src/api2/admin/datastore.rs @@ -2,6 +2,7 @@ use std::collections::HashSet; use std::ffi::OsStr; +use std::ops::Deref; use std::os::unix::ffi::OsStrExt; use std::path::PathBuf; use std::sync::Arc; @@ -34,12 +35,13 @@ use pxar::accessor::aio::Accessor; use pxar::EntryKind; use pbs_api_types::{ - print_ns_and_snapshot, print_store_and_ns, Authid, BackupContent, BackupGroupDeleteStats, - BackupNamespace, BackupType, Counts, CryptMode, DataStoreConfig, DataStoreListItem, - DataStoreStatus, GarbageCollectionJobStatus, GroupListItem, JobScheduleStatus, KeepOptions, - Operation, PruneJobOptions, SnapshotListItem, SnapshotVerifyState, BACKUP_ARCHIVE_NAME_SCHEMA, - BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA, BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, - DATASTORE_SCHEMA, IGNORE_VERIFIED_BACKUPS_SCHEMA, MAX_NAMESPACE_DEPTH, NS_MAX_DEPTH_SCHEMA, + print_ns_and_snapshot, print_store_and_ns, ArchiveType, Authid, BackupArchiveName, + BackupContent, BackupGroupDeleteStats, BackupNamespace, BackupType, Counts, CryptMode, + DataStoreConfig, DataStoreListItem, DataStoreStatus, GarbageCollectionJobStatus, GroupListItem, + JobScheduleStatus, KeepOptions, Operation, PruneJobOptions, SnapshotListItem, + SnapshotVerifyState, BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA, + BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, CATALOG_NAME, CLIENT_LOG_BLOB_NAME, DATASTORE_SCHEMA, + IGNORE_VERIFIED_BACKUPS_SCHEMA, MANIFEST_BLOB_NAME, MAX_NAMESPACE_DEPTH, NS_MAX_DEPTH_SCHEMA, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE, PRIV_DATASTORE_READ, PRIV_DATASTORE_VERIFY, UPID, UPID_SCHEMA, VERIFICATION_OUTDATED_AFTER_SCHEMA, @@ -54,11 +56,11 @@ use pbs_datastore::data_blob_reader::DataBlobReader; use pbs_datastore::dynamic_index::{BufferedDynamicReader, DynamicIndexReader, LocalDynamicReadAt}; use pbs_datastore::fixed_index::FixedIndexReader; use pbs_datastore::index::IndexFile; -use pbs_datastore::manifest::{BackupManifest, CLIENT_LOG_BLOB_NAME, MANIFEST_BLOB_NAME}; +use pbs_datastore::manifest::BackupManifest; use pbs_datastore::prune::compute_prune_info; use pbs_datastore::{ check_backup_owner, task_tracking, BackupDir, BackupGroup, DataStore, LocalChunkReader, - StoreProgress, CATALOG_NAME, + StoreProgress, }; use pbs_tools::json::required_string_param; use proxmox_rest_server::{formatter, WorkerTask}; @@ -1481,12 +1483,13 @@ pub fn download_file_decoded( &backup_dir_api.group, )?; - let file_name = required_string_param(¶m, "file-name")?.to_owned(); + let file_name: BackupArchiveName = + required_string_param(¶m, "file-name")?.try_into()?; let backup_dir = datastore.backup_dir(backup_ns.clone(), backup_dir_api.clone())?; let (manifest, files) = read_backup_index(&backup_dir)?; for file in files { - if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) { + if file.filename == file_name.as_ref() && file.crypt_mode == Some(CryptMode::Encrypt) { bail!("cannot decode '{}' - is encrypted", file_name); } } @@ -1501,12 +1504,10 @@ pub fn download_file_decoded( let mut path = datastore.base_path(); path.push(backup_dir.relative_path()); - path.push(&file_name); + path.push(file_name.as_ref()); - let (_, extension) = file_name.rsplit_once('.').unwrap(); - - let body = match extension { - "didx" => { + let body = match file_name.archive_type() { + ArchiveType::DynamicIndex => { let index = DynamicIndexReader::open(&path).map_err(|err| { format_err!("unable to read dynamic index '{:?}' - {}", &path, err) })?; @@ -1520,7 +1521,7 @@ pub fn download_file_decoded( err })) } - "fidx" => { + ArchiveType::FixedIndex => { let index = FixedIndexReader::open(&path).map_err(|err| { format_err!("unable to read fixed index '{:?}' - {}", &path, err) })?; @@ -1539,7 +1540,7 @@ pub fn download_file_decoded( ), ) } - "blob" => { + ArchiveType::Blob => { let file = std::fs::File::open(&path) .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?; @@ -1554,9 +1555,6 @@ pub fn download_file_decoded( ), ) } - extension => { - bail!("cannot download '{}' files", extension); - } }; // fixme: set other headers ? @@ -1613,10 +1611,10 @@ pub fn upload_backup_log( )?; let backup_dir = datastore.backup_dir(backup_ns.clone(), backup_dir_api.clone())?; - let file_name = CLIENT_LOG_BLOB_NAME; + let file_name = &CLIENT_LOG_BLOB_NAME; let mut path = backup_dir.full_path(); - path.push(file_name); + path.push(file_name.as_ref()); if path.exists() { bail!("backup already contains a log."); @@ -1625,6 +1623,7 @@ pub fn upload_backup_log( println!( "Upload backup log to {} {backup_dir_api}/{file_name}", print_store_and_ns(store, &backup_ns), + file_name = file_name.deref(), ); let data = req_body @@ -1671,7 +1670,7 @@ fn decode_path(path: &str) -> Result, Error> { type: String, }, "archive-name": { - schema: BACKUP_ARCHIVE_NAME_SCHEMA, + type: BackupArchiveName, optional: true, }, }, @@ -1688,12 +1687,10 @@ pub async fn catalog( ns: Option, backup_dir: pbs_api_types::BackupDir, filepath: String, - archive_name: Option, + archive_name: Option, rpcenv: &mut dyn RpcEnvironment, ) -> Result, Error> { - let file_name = archive_name - .clone() - .unwrap_or_else(|| CATALOG_NAME.to_string()); + let file_name = archive_name.clone().unwrap_or_else(|| CATALOG_NAME.clone()); let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; @@ -1713,7 +1710,7 @@ pub async fn catalog( let (manifest, files) = read_backup_index(&backup_dir)?; for file in files { - if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) { + if file.filename == file_name.as_ref() && file.crypt_mode == Some(CryptMode::Encrypt) { bail!("cannot decode '{file_name}' - is encrypted"); } } @@ -1722,7 +1719,7 @@ pub async fn catalog( tokio::task::spawn_blocking(move || { let mut path = datastore.base_path(); path.push(backup_dir.relative_path()); - path.push(&file_name); + path.push(file_name.as_ref()); let index = DynamicIndexReader::open(&path) .map_err(|err| format_err!("unable to read dynamic index '{path:?}' - {err}"))?; @@ -1772,7 +1769,7 @@ pub const API_METHOD_PXAR_FILE_DOWNLOAD: ApiMethod = ApiMethod::new( ("backup-time", false, &BACKUP_TIME_SCHEMA), ("filepath", false, &StringSchema::new("Base64 encoded path").schema()), ("tar", true, &BooleanSchema::new("Download as .tar.zst").schema()), - ("archive-name", true, &BACKUP_ARCHIVE_NAME_SCHEMA), + ("archive-name", true, &BackupArchiveName::API_SCHEMA), ]), ) ).access( @@ -1787,11 +1784,11 @@ fn get_local_pxar_reader( datastore: Arc, manifest: &BackupManifest, backup_dir: &BackupDir, - pxar_name: &str, + pxar_name: &BackupArchiveName, ) -> Result<(LocalDynamicReadAt, u64), Error> { let mut path = datastore.base_path(); path.push(backup_dir.relative_path()); - path.push(pxar_name); + path.push(pxar_name.as_ref()); let index = DynamicIndexReader::open(&path) .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?; @@ -1849,16 +1846,16 @@ pub fn pxar_file_download( let file_path = split.next().unwrap_or(b"/"); (pxar_name.to_owned(), file_path.to_owned()) }; - let pxar_name = std::str::from_utf8(&pxar_name)?; + let pxar_name: BackupArchiveName = std::str::from_utf8(&pxar_name)?.try_into()?; let (manifest, files) = read_backup_index(&backup_dir)?; for file in files { - if file.filename == pxar_name && file.crypt_mode == Some(CryptMode::Encrypt) { + if file.filename == pxar_name.as_ref() && file.crypt_mode == Some(CryptMode::Encrypt) { bail!("cannot decode '{}' - is encrypted", pxar_name); } } let (pxar_name, payload_archive_name) = - pbs_client::tools::get_pxar_archive_names(pxar_name, &manifest)?; + pbs_client::tools::get_pxar_archive_names(&pxar_name, &manifest)?; let (reader, archive_size) = get_local_pxar_reader(datastore.clone(), &manifest, &backup_dir, &pxar_name)?; diff --git a/src/api2/tape/restore.rs b/src/api2/tape/restore.rs index a180a4b0..93a6053b 100644 --- a/src/api2/tape/restore.rs +++ b/src/api2/tape/restore.rs @@ -21,16 +21,16 @@ use proxmox_worker_task::WorkerTaskContext; use pbs_api_types::{ parse_ns_and_snapshot, print_ns_and_snapshot, ArchiveType, Authid, BackupDir, BackupNamespace, CryptMode, NotificationMode, Operation, TapeRestoreNamespace, Userid, - DATASTORE_MAP_ARRAY_SCHEMA, DATASTORE_MAP_LIST_SCHEMA, DRIVE_NAME_SCHEMA, MAX_NAMESPACE_DEPTH, - PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY, PRIV_TAPE_READ, TAPE_RESTORE_NAMESPACE_SCHEMA, - TAPE_RESTORE_SNAPSHOT_SCHEMA, UPID_SCHEMA, + DATASTORE_MAP_ARRAY_SCHEMA, DATASTORE_MAP_LIST_SCHEMA, DRIVE_NAME_SCHEMA, MANIFEST_BLOB_NAME, + MAX_NAMESPACE_DEPTH, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY, PRIV_TAPE_READ, + TAPE_RESTORE_NAMESPACE_SCHEMA, TAPE_RESTORE_SNAPSHOT_SCHEMA, UPID_SCHEMA, }; use pbs_client::pxar::tools::handle_root_with_optional_format_version_prelude; use pbs_config::CachedUserInfo; use pbs_datastore::dynamic_index::DynamicIndexReader; use pbs_datastore::fixed_index::FixedIndexReader; use pbs_datastore::index::IndexFile; -use pbs_datastore::manifest::{BackupManifest, MANIFEST_BLOB_NAME}; +use pbs_datastore::manifest::BackupManifest; use pbs_datastore::{DataBlob, DataStore}; use pbs_tape::{ BlockReadError, MediaContentHeader, TapeRead, PROXMOX_BACKUP_CONTENT_HEADER_MAGIC_1_0, @@ -1652,7 +1652,7 @@ fn try_restore_snapshot_archive( } let root_path = Path::new("/"); - let manifest_file_name = OsStr::new(MANIFEST_BLOB_NAME); + let manifest_file_name = OsStr::new(MANIFEST_BLOB_NAME.as_ref()); let mut manifest = None; @@ -1732,7 +1732,7 @@ fn try_restore_snapshot_archive( // commit manifest let mut manifest_path = snapshot_path.to_owned(); - manifest_path.push(MANIFEST_BLOB_NAME); + manifest_path.push(MANIFEST_BLOB_NAME.as_ref()); let mut tmp_manifest_path = manifest_path.clone(); tmp_manifest_path.set_extension("tmp"); diff --git a/src/backup/mod.rs b/src/backup/mod.rs index 8c84b8ce..c5dae69a 100644 --- a/src/backup/mod.rs +++ b/src/backup/mod.rs @@ -1,8 +1,5 @@ //! Server/client-specific parts for what's otherwise in pbs-datastore. -// Note: .pcat1 => Proxmox Catalog Format version 1 -pub const CATALOG_NAME: &str = "catalog.pcat1.didx"; - mod verify; pub use verify::*; diff --git a/src/bin/proxmox_backup_debug/diff.rs b/src/bin/proxmox_backup_debug/diff.rs index b0436d04..dcd351d9 100644 --- a/src/bin/proxmox_backup_debug/diff.rs +++ b/src/bin/proxmox_backup_debug/diff.rs @@ -13,7 +13,7 @@ use proxmox_human_byte::HumanByte; use proxmox_router::cli::{CliCommand, CliCommandMap, CommandLineInterface}; use proxmox_schema::api; -use pbs_api_types::{BackupNamespace, BackupPart}; +use pbs_api_types::{BackupArchiveName, BackupNamespace, BackupPart}; use pbs_client::tools::key_source::{ crypto_parameters, format_key_source, get_encryption_key_password, KEYFD_SCHEMA, }; @@ -70,8 +70,7 @@ pub fn diff_commands() -> CommandLineInterface { type: String, }, "archive-name": { - description: "Name of the .pxar archive", - type: String, + type: BackupArchiveName, }, "repository": { optional: true, @@ -106,7 +105,7 @@ pub fn diff_commands() -> CommandLineInterface { async fn diff_archive_cmd( prev_snapshot: String, snapshot: String, - archive_name: String, + archive_name: BackupArchiveName, compare_content: bool, color: Option, ns: Option, @@ -140,12 +139,11 @@ async fn diff_archive_cmd( let output_params = OutputParams { color }; - if archive_name.ends_with(".pxar") { - let file_name = format!("{}.didx", archive_name); + if archive_name.ends_with(".pxar.didx") { diff_archive( &prev_snapshot, &snapshot, - &file_name, + &archive_name, &repo_params, compare_content, &output_params, @@ -161,7 +159,7 @@ async fn diff_archive_cmd( async fn diff_archive( snapshot_a: &str, snapshot_b: &str, - file_name: &str, + file_name: &BackupArchiveName, repo_params: &RepoParams, compare_contents: bool, output_params: &OutputParams, @@ -249,7 +247,7 @@ struct OutputParams { async fn open_dynamic_index( snapshot: &str, - archive_name: &str, + archive_name: &BackupArchiveName, params: &RepoParams, ) -> Result<(DynamicIndexReader, Accessor), Error> { let backup_reader = create_backup_reader(snapshot, params).await?; diff --git a/src/server/pull.rs b/src/server/pull.rs index 62c27917..4951ccfb 100644 --- a/src/server/pull.rs +++ b/src/server/pull.rs @@ -11,9 +11,9 @@ use proxmox_human_byte::HumanByte; use tracing::info; use pbs_api_types::{ - print_store_and_ns, ArchiveType, Authid, BackupDir, BackupGroup, BackupNamespace, GroupFilter, - Operation, RateLimitConfig, Remote, MAX_NAMESPACE_DEPTH, PRIV_DATASTORE_AUDIT, - PRIV_DATASTORE_BACKUP, + print_store_and_ns, ArchiveType, Authid, BackupArchiveName, BackupDir, BackupGroup, + BackupNamespace, GroupFilter, Operation, RateLimitConfig, Remote, CLIENT_LOG_BLOB_NAME, + MANIFEST_BLOB_NAME, MAX_NAMESPACE_DEPTH, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP, }; use pbs_client::BackupRepository; use pbs_config::CachedUserInfo; @@ -21,7 +21,7 @@ use pbs_datastore::data_blob::DataBlob; use pbs_datastore::dynamic_index::DynamicIndexReader; use pbs_datastore::fixed_index::FixedIndexReader; use pbs_datastore::index::IndexFile; -use pbs_datastore::manifest::{BackupManifest, FileInfo, CLIENT_LOG_BLOB_NAME, MANIFEST_BLOB_NAME}; +use pbs_datastore::manifest::{BackupManifest, FileInfo}; use pbs_datastore::read_chunk::AsyncReadChunk; use pbs_datastore::{check_backup_owner, DataStore, StoreProgress}; use pbs_tools::sha::sha256; @@ -334,16 +334,16 @@ async fn pull_snapshot<'a>( ) -> Result { let mut sync_stats = SyncStats::default(); let mut manifest_name = snapshot.full_path(); - manifest_name.push(MANIFEST_BLOB_NAME); + manifest_name.push(MANIFEST_BLOB_NAME.as_ref()); let mut client_log_name = snapshot.full_path(); - client_log_name.push(CLIENT_LOG_BLOB_NAME); + client_log_name.push(CLIENT_LOG_BLOB_NAME.as_ref()); let mut tmp_manifest_name = manifest_name.clone(); tmp_manifest_name.set_extension("tmp"); let tmp_manifest_blob; if let Some(data) = reader - .load_file_into(MANIFEST_BLOB_NAME, &tmp_manifest_name) + .load_file_into(MANIFEST_BLOB_NAME.as_ref(), &tmp_manifest_name) .await? { tmp_manifest_blob = data; @@ -381,11 +381,12 @@ async fn pull_snapshot<'a>( path.push(&item.filename); if path.exists() { - match ArchiveType::from_path(&item.filename)? { + let filename: BackupArchiveName = item.filename.as_str().try_into()?; + match filename.archive_type() { ArchiveType::DynamicIndex => { let index = DynamicIndexReader::open(&path)?; let (csum, size) = index.compute_csum(); - match manifest.verify_file(&item.filename, &csum, size) { + match manifest.verify_file(&filename, &csum, size) { Ok(_) => continue, Err(err) => { info!("detected changed file {path:?} - {err}"); @@ -395,7 +396,7 @@ async fn pull_snapshot<'a>( ArchiveType::FixedIndex => { let index = FixedIndexReader::open(&path)?; let (csum, size) = index.compute_csum(); - match manifest.verify_file(&item.filename, &csum, size) { + match manifest.verify_file(&filename, &csum, size) { Ok(_) => continue, Err(err) => { info!("detected changed file {path:?} - {err}"); @@ -405,7 +406,7 @@ async fn pull_snapshot<'a>( ArchiveType::Blob => { let mut tmpfile = std::fs::File::open(&path)?; let (csum, size) = sha256(&mut tmpfile)?; - match manifest.verify_file(&item.filename, &csum, size) { + match manifest.verify_file(&filename, &csum, size) { Ok(_) => continue, Err(err) => { info!("detected changed file {path:?} - {err}"); diff --git a/src/server/push.rs b/src/server/push.rs index 288792e0..1fc744d2 100644 --- a/src/server/push.rs +++ b/src/server/push.rs @@ -10,11 +10,11 @@ use tokio_stream::wrappers::ReceiverStream; use tracing::{info, warn}; use pbs_api_types::{ - print_store_and_ns, ApiVersion, ApiVersionInfo, ArchiveType, Authid, BackupDir, BackupGroup, - BackupGroupDeleteStats, BackupNamespace, GroupFilter, GroupListItem, NamespaceListItem, - Operation, RateLimitConfig, Remote, SnapshotListItem, PRIV_DATASTORE_BACKUP, - PRIV_DATASTORE_READ, PRIV_REMOTE_DATASTORE_BACKUP, PRIV_REMOTE_DATASTORE_MODIFY, - PRIV_REMOTE_DATASTORE_PRUNE, + print_store_and_ns, ApiVersion, ApiVersionInfo, ArchiveType, Authid, BackupArchiveName, + BackupDir, BackupGroup, BackupGroupDeleteStats, BackupNamespace, GroupFilter, GroupListItem, + NamespaceListItem, Operation, RateLimitConfig, Remote, SnapshotListItem, CLIENT_LOG_BLOB_NAME, + MANIFEST_BLOB_NAME, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_READ, PRIV_REMOTE_DATASTORE_BACKUP, + PRIV_REMOTE_DATASTORE_MODIFY, PRIV_REMOTE_DATASTORE_PRUNE, }; use pbs_client::{BackupRepository, BackupWriter, HttpClient, MergedChunkInfo, UploadOptions}; use pbs_config::CachedUserInfo; @@ -22,7 +22,6 @@ use pbs_datastore::data_blob::ChunkInfo; use pbs_datastore::dynamic_index::DynamicIndexReader; use pbs_datastore::fixed_index::FixedIndexReader; use pbs_datastore::index::IndexFile; -use pbs_datastore::manifest::{CLIENT_LOG_BLOB_NAME, MANIFEST_BLOB_NAME}; use pbs_datastore::read_chunk::AsyncReadChunk; use pbs_datastore::{DataStore, StoreProgress}; @@ -805,10 +804,13 @@ pub(crate) async fn push_snapshot( let mut path = backup_dir.full_path(); path.push(&entry.filename); if path.try_exists()? { - match ArchiveType::from_path(&entry.filename)? { + let archive_name = BackupArchiveName::from_path(&entry.filename)?; + match archive_name.archive_type() { ArchiveType::Blob => { let file = std::fs::File::open(path.clone())?; - let backup_stats = backup_writer.upload_blob(file, &entry.filename).await?; + let backup_stats = backup_writer + .upload_blob(file, archive_name.as_ref()) + .await?; stats.add(SyncStats { chunk_count: backup_stats.chunk_count as usize, bytes: backup_stats.size as usize, @@ -821,7 +823,7 @@ pub(crate) async fn push_snapshot( // Add known chunks, ignore errors since archive might not be present let _res = backup_writer .download_previous_dynamic_index( - &entry.filename, + &archive_name, manifest, known_chunks.clone(), ) @@ -830,7 +832,7 @@ pub(crate) async fn push_snapshot( let index = DynamicIndexReader::open(&path)?; let chunk_reader = reader.chunk_reader(entry.chunk_crypt_mode()); let sync_stats = push_index( - &entry.filename, + &archive_name, index, chunk_reader, &backup_writer, @@ -845,7 +847,7 @@ pub(crate) async fn push_snapshot( // Add known chunks, ignore errors since archive might not be present let _res = backup_writer .download_previous_fixed_index( - &entry.filename, + &archive_name, manifest, known_chunks.clone(), ) @@ -855,7 +857,7 @@ pub(crate) async fn push_snapshot( let chunk_reader = reader.chunk_reader(entry.chunk_crypt_mode()); let size = index.index_bytes(); let sync_stats = push_index( - &entry.filename, + &archive_name, index, chunk_reader, &backup_writer, @@ -874,12 +876,13 @@ pub(crate) async fn push_snapshot( // Fetch client log from source and push to target // this has to be handled individually since the log is never part of the manifest let mut client_log_path = backup_dir.full_path(); - client_log_path.push(CLIENT_LOG_BLOB_NAME); + let client_log_name = &CLIENT_LOG_BLOB_NAME; + client_log_path.push(client_log_name.as_ref()); if client_log_path.is_file() { backup_writer .upload_blob_from_file( &client_log_path, - CLIENT_LOG_BLOB_NAME, + client_log_name.as_ref(), upload_options.clone(), ) .await?; @@ -891,7 +894,7 @@ pub(crate) async fn push_snapshot( let backup_stats = backup_writer .upload_blob_from_data( manifest_string.into_bytes(), - MANIFEST_BLOB_NAME, + MANIFEST_BLOB_NAME.as_ref(), upload_options, ) .await?; @@ -912,7 +915,7 @@ pub(crate) async fn push_snapshot( // For fixed indexes, the size must be provided as given by the index reader. #[allow(clippy::too_many_arguments)] async fn push_index<'a>( - filename: &'a str, + filename: &'a BackupArchiveName, index: impl IndexFile + Send + 'static, chunk_reader: Arc, backup_writer: &BackupWriter, diff --git a/src/server/sync.rs b/src/server/sync.rs index a0157ab2..4c6b43d2 100644 --- a/src/server/sync.rs +++ b/src/server/sync.rs @@ -2,6 +2,7 @@ use std::collections::HashMap; use std::io::{Seek, Write}; +use std::ops::Deref; use std::path::{Path, PathBuf}; use std::sync::{Arc, Mutex}; use std::time::Duration; @@ -18,11 +19,11 @@ use proxmox_router::HttpError; use pbs_api_types::{ Authid, BackupDir, BackupGroup, BackupNamespace, CryptMode, GroupListItem, SnapshotListItem, - SyncDirection, SyncJobConfig, MAX_NAMESPACE_DEPTH, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_READ, + SyncDirection, SyncJobConfig, CLIENT_LOG_BLOB_NAME, MAX_NAMESPACE_DEPTH, PRIV_DATASTORE_BACKUP, + PRIV_DATASTORE_READ, }; use pbs_client::{BackupReader, BackupRepository, HttpClient, RemoteChunkReader}; use pbs_datastore::data_blob::DataBlob; -use pbs_datastore::manifest::CLIENT_LOG_BLOB_NAME; use pbs_datastore::read_chunk::AsyncReadChunk; use pbs_datastore::{DataStore, ListNamespacesRecursive, LocalChunkReader}; @@ -162,15 +163,19 @@ impl SyncSourceReader for RemoteSourceReader { .open(&tmp_path)?; // Note: be silent if there is no log - only log successful download + let client_log_name = &CLIENT_LOG_BLOB_NAME; if let Ok(()) = self .backup_reader - .download(CLIENT_LOG_BLOB_NAME, tmpfile) + .download(client_log_name.as_ref(), tmpfile) .await { if let Err(err) = std::fs::rename(&tmp_path, to_path) { bail!("Atomic rename file {to_path:?} failed - {err}"); } - info!("got backup log file {CLIENT_LOG_BLOB_NAME:?}"); + info!( + "got backup log file {client_log_name}", + client_log_name = client_log_name.deref() + ); } Ok(()) diff --git a/tests/prune.rs b/tests/prune.rs index 3b320969..b11449ca 100644 --- a/tests/prune.rs +++ b/tests/prune.rs @@ -2,8 +2,7 @@ use std::path::PathBuf; use anyhow::Error; -use pbs_api_types::PruneJobOptions; -use pbs_datastore::manifest::MANIFEST_BLOB_NAME; +use pbs_api_types::{PruneJobOptions, MANIFEST_BLOB_NAME}; use pbs_datastore::prune::compute_prune_info; use pbs_datastore::{BackupDir, BackupInfo}; @@ -34,7 +33,7 @@ fn create_info(snapshot: &str, partial: bool) -> BackupInfo { let mut files = Vec::new(); if !partial { - files.push(String::from(MANIFEST_BLOB_NAME)); + files.push(MANIFEST_BLOB_NAME.to_string()); } BackupInfo {