client/server: use dedicated api type for all archive names

Instead of using the plain String or slices of it for archive names,
use the dedicated api type and its methods to parse and check for
archive type based on archive filename extension.

Thereby, keeping the checks and mappings in the api type and
resticting function parameters by the narrower wrapper type to reduce
potential misuse.

Further, instead of declaring and using the archive name constants
throughout the codebase, use the `BackupArchiveName` helpers to
generate the archive names for manifest, client logs and encryption
keys.

This allows for easy archive name comparisons using the same
`BackupArchiveName` type, at the cost of some extra allocations and
avoids the currently present double constant declaration of
`CATALOG_NAME`.

A positive ergonomic side effect of this is that commands now also
accept the archive type extension optionally, when passing the archive
name.

E.g.
```
proxmox-backup-client restore <snapshot> <name>.pxar.didx <target>
```
is equal to
```
proxmox-backup-client restore <snapshot> <name>.pxar <target>
```

The previously default mapping of any archive name extension to a blob
has been dropped in favor of consistent mapping by the api type
helpers.

Signed-off-by: Christian Ebner <c.ebner@proxmox.com>

FG: use LazyLock for constant archive names
FG: add missing import

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
This commit is contained in:
Christian Ebner 2024-11-22 11:30:09 +01:00 committed by Fabian Grünbichler
parent addfae26cf
commit 6771869cc1
21 changed files with 284 additions and 251 deletions

View File

@ -6,13 +6,12 @@ use std::sync::Arc;
use futures::future::AbortHandle; use futures::future::AbortHandle;
use serde_json::{json, Value}; use serde_json::{json, Value};
use pbs_api_types::{BackupDir, BackupNamespace}; use pbs_api_types::{BackupArchiveName, BackupDir, BackupNamespace, MANIFEST_BLOB_NAME};
use pbs_datastore::data_blob::DataBlob; use pbs_datastore::data_blob::DataBlob;
use pbs_datastore::data_blob_reader::DataBlobReader; use pbs_datastore::data_blob_reader::DataBlobReader;
use pbs_datastore::dynamic_index::DynamicIndexReader; use pbs_datastore::dynamic_index::DynamicIndexReader;
use pbs_datastore::fixed_index::FixedIndexReader; use pbs_datastore::fixed_index::FixedIndexReader;
use pbs_datastore::index::IndexFile; use pbs_datastore::index::IndexFile;
use pbs_datastore::manifest::MANIFEST_BLOB_NAME;
use pbs_datastore::{BackupManifest, PROXMOX_BACKUP_READER_PROTOCOL_ID_V1}; use pbs_datastore::{BackupManifest, PROXMOX_BACKUP_READER_PROTOCOL_ID_V1};
use pbs_tools::crypt_config::CryptConfig; use pbs_tools::crypt_config::CryptConfig;
use pbs_tools::sha::sha256; use pbs_tools::sha::sha256;
@ -127,7 +126,8 @@ impl BackupReader {
/// The manifest signature is verified if we have a crypt_config. /// The manifest signature is verified if we have a crypt_config.
pub async fn download_manifest(&self) -> Result<(BackupManifest, Vec<u8>), Error> { pub async fn download_manifest(&self) -> Result<(BackupManifest, Vec<u8>), Error> {
let mut raw_data = Vec::with_capacity(64 * 1024); let mut raw_data = Vec::with_capacity(64 * 1024);
self.download(MANIFEST_BLOB_NAME, &mut raw_data).await?; self.download(MANIFEST_BLOB_NAME.as_ref(), &mut raw_data)
.await?;
let blob = DataBlob::load_from_reader(&mut &raw_data[..])?; let blob = DataBlob::load_from_reader(&mut &raw_data[..])?;
// no expected digest available // no expected digest available
let data = blob.decode(None, None)?; let data = blob.decode(None, None)?;
@ -145,11 +145,11 @@ impl BackupReader {
pub async fn download_blob( pub async fn download_blob(
&self, &self,
manifest: &BackupManifest, manifest: &BackupManifest,
name: &str, name: &BackupArchiveName,
) -> Result<DataBlobReader<'_, File>, Error> { ) -> Result<DataBlobReader<'_, File>, Error> {
let mut tmpfile = crate::tools::create_tmp_file()?; let mut tmpfile = crate::tools::create_tmp_file()?;
self.download(name, &mut tmpfile).await?; self.download(name.as_ref(), &mut tmpfile).await?;
tmpfile.seek(SeekFrom::Start(0))?; tmpfile.seek(SeekFrom::Start(0))?;
let (csum, size) = sha256(&mut tmpfile)?; let (csum, size) = sha256(&mut tmpfile)?;
@ -167,11 +167,11 @@ impl BackupReader {
pub async fn download_dynamic_index( pub async fn download_dynamic_index(
&self, &self,
manifest: &BackupManifest, manifest: &BackupManifest,
name: &str, name: &BackupArchiveName,
) -> Result<DynamicIndexReader, Error> { ) -> Result<DynamicIndexReader, Error> {
let mut tmpfile = crate::tools::create_tmp_file()?; let mut tmpfile = crate::tools::create_tmp_file()?;
self.download(name, &mut tmpfile).await?; self.download(name.as_ref(), &mut tmpfile).await?;
let index = DynamicIndexReader::new(tmpfile) let index = DynamicIndexReader::new(tmpfile)
.map_err(|err| format_err!("unable to read dynamic index '{}' - {}", name, err))?; .map_err(|err| format_err!("unable to read dynamic index '{}' - {}", name, err))?;
@ -190,11 +190,11 @@ impl BackupReader {
pub async fn download_fixed_index( pub async fn download_fixed_index(
&self, &self,
manifest: &BackupManifest, manifest: &BackupManifest,
name: &str, name: &BackupArchiveName,
) -> Result<FixedIndexReader, Error> { ) -> Result<FixedIndexReader, Error> {
let mut tmpfile = crate::tools::create_tmp_file()?; let mut tmpfile = crate::tools::create_tmp_file()?;
self.download(name, &mut tmpfile).await?; self.download(name.as_ref(), &mut tmpfile).await?;
let index = FixedIndexReader::new(tmpfile) let index = FixedIndexReader::new(tmpfile)
.map_err(|err| format_err!("unable to read fixed index '{}' - {}", name, err))?; .map_err(|err| format_err!("unable to read fixed index '{}' - {}", name, err))?;

View File

@ -13,13 +13,15 @@ use tokio::io::AsyncReadExt;
use tokio::sync::{mpsc, oneshot}; use tokio::sync::{mpsc, oneshot};
use tokio_stream::wrappers::ReceiverStream; use tokio_stream::wrappers::ReceiverStream;
use pbs_api_types::{ArchiveType, BackupDir, BackupNamespace}; use pbs_api_types::{
ArchiveType, BackupArchiveName, BackupDir, BackupNamespace, CATALOG_NAME, MANIFEST_BLOB_NAME,
};
use pbs_datastore::data_blob::{ChunkInfo, DataBlob, DataChunkBuilder}; use pbs_datastore::data_blob::{ChunkInfo, DataBlob, DataChunkBuilder};
use pbs_datastore::dynamic_index::DynamicIndexReader; use pbs_datastore::dynamic_index::DynamicIndexReader;
use pbs_datastore::fixed_index::FixedIndexReader; use pbs_datastore::fixed_index::FixedIndexReader;
use pbs_datastore::index::IndexFile; use pbs_datastore::index::IndexFile;
use pbs_datastore::manifest::{BackupManifest, MANIFEST_BLOB_NAME}; use pbs_datastore::manifest::BackupManifest;
use pbs_datastore::{CATALOG_NAME, PROXMOX_BACKUP_PROTOCOL_ID_V1}; use pbs_datastore::PROXMOX_BACKUP_PROTOCOL_ID_V1;
use pbs_tools::crypt_config::CryptConfig; use pbs_tools::crypt_config::CryptConfig;
use proxmox_human_byte::HumanByte; use proxmox_human_byte::HumanByte;
@ -269,7 +271,7 @@ impl BackupWriter {
/// Upload chunks and index /// Upload chunks and index
pub async fn upload_index_chunk_info( pub async fn upload_index_chunk_info(
&self, &self,
archive_name: &str, archive_name: &BackupArchiveName,
stream: impl Stream<Item = Result<MergedChunkInfo, Error>>, stream: impl Stream<Item = Result<MergedChunkInfo, Error>>,
options: UploadOptions, options: UploadOptions,
) -> Result<BackupStats, Error> { ) -> Result<BackupStats, Error> {
@ -361,7 +363,7 @@ impl BackupWriter {
pub async fn upload_stream( pub async fn upload_stream(
&self, &self,
archive_name: &str, archive_name: &BackupArchiveName,
stream: impl Stream<Item = Result<bytes::BytesMut, Error>>, stream: impl Stream<Item = Result<bytes::BytesMut, Error>>,
options: UploadOptions, options: UploadOptions,
injections: Option<std::sync::mpsc::Receiver<InjectChunks>>, injections: Option<std::sync::mpsc::Receiver<InjectChunks>>,
@ -387,13 +389,13 @@ impl BackupWriter {
if !manifest if !manifest
.files() .files()
.iter() .iter()
.any(|file| file.filename == archive_name) .any(|file| file.filename == archive_name.as_ref())
{ {
log::info!("Previous manifest does not contain an archive called '{archive_name}', skipping download.."); log::info!("Previous manifest does not contain an archive called '{archive_name}', skipping download..");
} else { } else {
// try, but ignore errors // try, but ignore errors
match ArchiveType::from_path(archive_name) { match archive_name.archive_type() {
Ok(ArchiveType::FixedIndex) => { ArchiveType::FixedIndex => {
if let Err(err) = self if let Err(err) = self
.download_previous_fixed_index( .download_previous_fixed_index(
archive_name, archive_name,
@ -405,7 +407,7 @@ impl BackupWriter {
log::warn!("Error downloading .fidx from previous manifest: {}", err); log::warn!("Error downloading .fidx from previous manifest: {}", err);
} }
} }
Ok(ArchiveType::DynamicIndex) => { ArchiveType::DynamicIndex => {
if let Err(err) = self if let Err(err) = self
.download_previous_dynamic_index( .download_previous_dynamic_index(
archive_name, archive_name,
@ -429,12 +431,6 @@ impl BackupWriter {
.as_u64() .as_u64()
.unwrap(); .unwrap();
let archive = if log::log_enabled!(log::Level::Debug) {
archive_name
} else {
pbs_tools::format::strip_server_file_extension(archive_name)
};
let upload_stats = Self::upload_chunk_info_stream( let upload_stats = Self::upload_chunk_info_stream(
self.h2.clone(), self.h2.clone(),
wid, wid,
@ -448,12 +444,17 @@ impl BackupWriter {
}, },
options.compress, options.compress,
injections, injections,
archive, archive_name,
) )
.await?; .await?;
let size_dirty = upload_stats.size - upload_stats.size_reused; let size_dirty = upload_stats.size - upload_stats.size_reused;
let size: HumanByte = upload_stats.size.into(); let size: HumanByte = upload_stats.size.into();
let archive = if log::log_enabled!(log::Level::Debug) {
archive_name.to_string()
} else {
archive_name.without_type_extension()
};
if upload_stats.chunk_injected > 0 { if upload_stats.chunk_injected > 0 {
log::info!( log::info!(
@ -463,7 +464,7 @@ impl BackupWriter {
); );
} }
if archive_name != CATALOG_NAME { if *archive_name != *CATALOG_NAME {
let speed: HumanByte = let speed: HumanByte =
((size_dirty * 1_000_000) / (upload_stats.duration.as_micros() as usize)).into(); ((size_dirty * 1_000_000) / (upload_stats.duration.as_micros() as usize)).into();
let size_dirty: HumanByte = size_dirty.into(); let size_dirty: HumanByte = size_dirty.into();
@ -629,7 +630,7 @@ impl BackupWriter {
pub async fn download_previous_fixed_index( pub async fn download_previous_fixed_index(
&self, &self,
archive_name: &str, archive_name: &BackupArchiveName,
manifest: &BackupManifest, manifest: &BackupManifest,
known_chunks: Arc<Mutex<HashSet<[u8; 32]>>>, known_chunks: Arc<Mutex<HashSet<[u8; 32]>>>,
) -> Result<FixedIndexReader, Error> { ) -> Result<FixedIndexReader, Error> {
@ -664,7 +665,7 @@ impl BackupWriter {
pub async fn download_previous_dynamic_index( pub async fn download_previous_dynamic_index(
&self, &self,
archive_name: &str, archive_name: &BackupArchiveName,
manifest: &BackupManifest, manifest: &BackupManifest,
known_chunks: Arc<Mutex<HashSet<[u8; 32]>>>, known_chunks: Arc<Mutex<HashSet<[u8; 32]>>>,
) -> Result<DynamicIndexReader, Error> { ) -> Result<DynamicIndexReader, Error> {
@ -711,7 +712,7 @@ impl BackupWriter {
pub async fn download_previous_manifest(&self) -> Result<BackupManifest, Error> { pub async fn download_previous_manifest(&self) -> Result<BackupManifest, Error> {
let mut raw_data = Vec::with_capacity(64 * 1024); let mut raw_data = Vec::with_capacity(64 * 1024);
let param = json!({ "archive-name": MANIFEST_BLOB_NAME }); let param = json!({ "archive-name": MANIFEST_BLOB_NAME.to_string() });
self.h2 self.h2
.download("previous", Some(param), &mut raw_data) .download("previous", Some(param), &mut raw_data)
.await?; .await?;
@ -739,7 +740,7 @@ impl BackupWriter {
crypt_config: Option<Arc<CryptConfig>>, crypt_config: Option<Arc<CryptConfig>>,
compress: bool, compress: bool,
injections: Option<std::sync::mpsc::Receiver<InjectChunks>>, injections: Option<std::sync::mpsc::Receiver<InjectChunks>>,
archive: &str, archive: &BackupArchiveName,
) -> impl Future<Output = Result<UploadStats, Error>> { ) -> impl Future<Output = Result<UploadStats, Error>> {
let mut counters = UploadCounters::new(); let mut counters = UploadCounters::new();
let counters_readonly = counters.clone(); let counters_readonly = counters.clone();
@ -831,7 +832,7 @@ impl BackupWriter {
fn upload_merged_chunk_stream( fn upload_merged_chunk_stream(
h2: H2Client, h2: H2Client,
wid: u64, wid: u64,
archive: &str, archive: &BackupArchiveName,
prefix: &str, prefix: &str,
stream: impl Stream<Item = Result<MergedChunkInfo, Error>>, stream: impl Stream<Item = Result<MergedChunkInfo, Error>>,
index_csum: Arc<Mutex<Option<Sha256>>>, index_csum: Arc<Mutex<Option<Sha256>>>,

View File

@ -14,6 +14,7 @@ use pxar::accessor::ReadAt;
use pxar::format::StatxTimestamp; use pxar::format::StatxTimestamp;
use pxar::{mode, Entry, EntryKind, Metadata}; use pxar::{mode, Entry, EntryKind, Metadata};
use pbs_api_types::BackupArchiveName;
use pbs_datastore::catalog::{ArchiveEntry, CatalogEntryType, DirEntryAttribute}; use pbs_datastore::catalog::{ArchiveEntry, CatalogEntryType, DirEntryAttribute};
use pbs_datastore::dynamic_index::{BufferedDynamicReader, LocalDynamicReadAt}; use pbs_datastore::dynamic_index::{BufferedDynamicReader, LocalDynamicReadAt};
@ -330,7 +331,7 @@ pub fn handle_root_with_optional_format_version_prelude<R: pxar::decoder::SeqRea
} }
pub async fn get_remote_pxar_reader( pub async fn get_remote_pxar_reader(
archive_name: &str, archive_name: &BackupArchiveName,
client: Arc<BackupReader>, client: Arc<BackupReader>,
manifest: &BackupManifest, manifest: &BackupManifest,
crypt_config: Option<Arc<CryptConfig>>, crypt_config: Option<Arc<CryptConfig>>,

View File

@ -17,7 +17,9 @@ use proxmox_router::cli::{complete_file_name, shellword_split};
use proxmox_schema::*; use proxmox_schema::*;
use proxmox_sys::fs::file_get_json; use proxmox_sys::fs::file_get_json;
use pbs_api_types::{Authid, BackupNamespace, RateLimitConfig, UserWithTokens, BACKUP_REPO_URL}; use pbs_api_types::{
Authid, BackupArchiveName, BackupNamespace, RateLimitConfig, UserWithTokens, BACKUP_REPO_URL,
};
use pbs_datastore::BackupManifest; use pbs_datastore::BackupManifest;
use crate::{BackupRepository, HttpClient, HttpClientOptions}; use crate::{BackupRepository, HttpClient, HttpClientOptions};
@ -548,19 +550,18 @@ pub fn place_xdg_file(
} }
pub fn get_pxar_archive_names( pub fn get_pxar_archive_names(
archive_name: &str, archive_name: &BackupArchiveName,
manifest: &BackupManifest, manifest: &BackupManifest,
) -> Result<(String, Option<String>), Error> { ) -> Result<(BackupArchiveName, Option<BackupArchiveName>), Error> {
let (filename, ext) = match archive_name.strip_suffix(".didx") { let filename = archive_name.without_type_extension();
Some(filename) => (filename, ".didx"), let ext = archive_name.archive_type().extension();
None => (archive_name, ""),
};
// Check if archive with given extension is present // Check if archive is given as split archive or regular archive and is present in manifest,
// otherwise goto fallback below
if manifest if manifest
.files() .files()
.iter() .iter()
.any(|fileinfo| fileinfo.filename == format!("{filename}.didx")) .any(|fileinfo| fileinfo.filename == archive_name.as_ref())
{ {
// check if already given as one of split archive name variants // check if already given as one of split archive name variants
if let Some(base) = filename if let Some(base) = filename
@ -568,8 +569,8 @@ pub fn get_pxar_archive_names(
.or_else(|| filename.strip_suffix(".ppxar")) .or_else(|| filename.strip_suffix(".ppxar"))
{ {
return Ok(( return Ok((
format!("{base}.mpxar{ext}"), format!("{base}.mpxar.{ext}").as_str().try_into()?,
Some(format!("{base}.ppxar{ext}")), Some(format!("{base}.ppxar.{ext}").as_str().try_into()?),
)); ));
} }
return Ok((archive_name.to_owned(), None)); return Ok((archive_name.to_owned(), None));
@ -577,7 +578,10 @@ pub fn get_pxar_archive_names(
// if not, try fallback from regular to split archive // if not, try fallback from regular to split archive
if let Some(base) = filename.strip_suffix(".pxar") { if let Some(base) = filename.strip_suffix(".pxar") {
return get_pxar_archive_names(&format!("{base}.mpxar{ext}"), manifest); return get_pxar_archive_names(
&format!("{base}.mpxar.{ext}").as_str().try_into()?,
manifest,
);
} }
bail!("archive not found in manifest"); bail!("archive not found in manifest");

View File

@ -9,13 +9,11 @@ use proxmox_sys::fs::{lock_dir_noblock, replace_file, CreateOptions};
use pbs_api_types::{ use pbs_api_types::{
Authid, BackupGroupDeleteStats, BackupNamespace, BackupType, GroupFilter, BACKUP_DATE_REGEX, Authid, BackupGroupDeleteStats, BackupNamespace, BackupType, GroupFilter, BACKUP_DATE_REGEX,
BACKUP_FILE_REGEX, BACKUP_FILE_REGEX, CLIENT_LOG_BLOB_NAME, MANIFEST_BLOB_NAME,
}; };
use pbs_config::{open_backup_lockfile, BackupLockGuard}; use pbs_config::{open_backup_lockfile, BackupLockGuard};
use crate::manifest::{ use crate::manifest::{BackupManifest, MANIFEST_LOCK_NAME};
BackupManifest, CLIENT_LOG_BLOB_NAME, MANIFEST_BLOB_NAME, MANIFEST_LOCK_NAME,
};
use crate::{DataBlob, DataStore}; use crate::{DataBlob, DataStore};
/// BackupGroup is a directory containing a list of BackupDir /// BackupGroup is a directory containing a list of BackupDir
@ -139,7 +137,7 @@ impl BackupGroup {
} }
let mut manifest_path = PathBuf::from(backup_time); let mut manifest_path = PathBuf::from(backup_time);
manifest_path.push(MANIFEST_BLOB_NAME); manifest_path.push(MANIFEST_BLOB_NAME.as_ref());
use nix::fcntl::{openat, OFlag}; use nix::fcntl::{openat, OFlag};
match openat( match openat(
@ -492,7 +490,7 @@ impl BackupDir {
/// Load the manifest without a lock. Must not be written back. /// Load the manifest without a lock. Must not be written back.
pub fn load_manifest(&self) -> Result<(BackupManifest, u64), Error> { pub fn load_manifest(&self) -> Result<(BackupManifest, u64), Error> {
let blob = self.load_blob(MANIFEST_BLOB_NAME)?; let blob = self.load_blob(MANIFEST_BLOB_NAME.as_ref())?;
let raw_size = blob.raw_size(); let raw_size = blob.raw_size();
let manifest = BackupManifest::try_from(blob)?; let manifest = BackupManifest::try_from(blob)?;
Ok((manifest, raw_size)) Ok((manifest, raw_size))
@ -515,7 +513,7 @@ impl BackupDir {
let raw_data = blob.raw_data(); let raw_data = blob.raw_data();
let mut path = self.full_path(); let mut path = self.full_path();
path.push(MANIFEST_BLOB_NAME); path.push(MANIFEST_BLOB_NAME.as_ref());
// atomic replace invalidates flock - no other writes past this point! // atomic replace invalidates flock - no other writes past this point!
replace_file(&path, raw_data, CreateOptions::new(), false)?; replace_file(&path, raw_data, CreateOptions::new(), false)?;
@ -636,7 +634,9 @@ impl BackupInfo {
pub fn is_finished(&self) -> bool { pub fn is_finished(&self) -> bool {
// backup is considered unfinished if there is no manifest // backup is considered unfinished if there is no manifest
self.files.iter().any(|name| name == MANIFEST_BLOB_NAME) self.files
.iter()
.any(|name| name == MANIFEST_BLOB_NAME.as_ref())
} }
} }

View File

@ -144,9 +144,6 @@
#![deny(unsafe_op_in_unsafe_fn)] #![deny(unsafe_op_in_unsafe_fn)]
// Note: .pcat1 => Proxmox Catalog Format version 1
pub const CATALOG_NAME: &str = "catalog.pcat1.didx";
/// Directory path where active operations counters are saved. /// Directory path where active operations counters are saved.
pub const ACTIVE_OPERATIONS_DIR: &str = concat!( pub const ACTIVE_OPERATIONS_DIR: &str = concat!(
pbs_buildcfg::PROXMOX_BACKUP_RUN_DIR_M!(), pbs_buildcfg::PROXMOX_BACKUP_RUN_DIR_M!(),

View File

@ -3,13 +3,10 @@ use anyhow::{bail, format_err, Error};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use serde_json::{json, Value}; use serde_json::{json, Value};
use pbs_api_types::{ArchiveType, BackupType, CryptMode, Fingerprint}; use pbs_api_types::{BackupArchiveName, BackupType, CryptMode, Fingerprint};
use pbs_tools::crypt_config::CryptConfig; use pbs_tools::crypt_config::CryptConfig;
pub const MANIFEST_BLOB_NAME: &str = "index.json.blob";
pub const MANIFEST_LOCK_NAME: &str = ".index.json.lck"; pub const MANIFEST_LOCK_NAME: &str = ".index.json.lck";
pub const CLIENT_LOG_BLOB_NAME: &str = "client.log.blob";
pub const ENCRYPTED_KEY_BLOB_NAME: &str = "rsa-encrypted.key.blob";
fn crypt_mode_none() -> CryptMode { fn crypt_mode_none() -> CryptMode {
CryptMode::None CryptMode::None
@ -68,14 +65,13 @@ impl BackupManifest {
pub fn add_file( pub fn add_file(
&mut self, &mut self,
filename: String, filename: &BackupArchiveName,
size: u64, size: u64,
csum: [u8; 32], csum: [u8; 32],
crypt_mode: CryptMode, crypt_mode: CryptMode,
) -> Result<(), Error> { ) -> Result<(), Error> {
let _archive_type = ArchiveType::from_path(&filename)?; // check type
self.files.push(FileInfo { self.files.push(FileInfo {
filename, filename: filename.to_string(),
size, size,
csum, csum,
crypt_mode, crypt_mode,
@ -87,8 +83,11 @@ impl BackupManifest {
&self.files[..] &self.files[..]
} }
pub fn lookup_file_info(&self, name: &str) -> Result<&FileInfo, Error> { pub fn lookup_file_info(&self, name: &BackupArchiveName) -> Result<&FileInfo, Error> {
let info = self.files.iter().find(|item| item.filename == name); let info = self
.files
.iter()
.find(|item| item.filename == name.as_ref());
match info { match info {
None => bail!("manifest does not contain file '{}'", name), None => bail!("manifest does not contain file '{}'", name),
@ -96,7 +95,12 @@ impl BackupManifest {
} }
} }
pub fn verify_file(&self, name: &str, csum: &[u8; 32], size: u64) -> Result<(), Error> { pub fn verify_file(
&self,
name: &BackupArchiveName,
csum: &[u8; 32],
size: u64,
) -> Result<(), Error> {
let info = self.lookup_file_info(name)?; let info = self.lookup_file_info(name)?;
if size != info.size { if size != info.size {
@ -256,8 +260,13 @@ fn test_manifest_signature() -> Result<(), Error> {
let mut manifest = BackupManifest::new("host/elsa/2020-06-26T13:56:05Z".parse()?); let mut manifest = BackupManifest::new("host/elsa/2020-06-26T13:56:05Z".parse()?);
manifest.add_file("test1.img.fidx".into(), 200, [1u8; 32], CryptMode::Encrypt)?; manifest.add_file(
manifest.add_file("abc.blob".into(), 200, [2u8; 32], CryptMode::None)?; &"test1.img.fidx".try_into()?,
200,
[1u8; 32],
CryptMode::Encrypt,
)?;
manifest.add_file(&"abc.blob".try_into()?, 200, [2u8; 32], CryptMode::None)?;
manifest.unprotected["note"] = "This is not protected by the signature.".into(); manifest.unprotected["note"] = "This is not protected by the signature.".into();

View File

@ -8,13 +8,15 @@ use nix::dir::Dir;
use proxmox_sys::fs::lock_dir_noblock_shared; use proxmox_sys::fs::lock_dir_noblock_shared;
use pbs_api_types::{print_store_and_ns, ArchiveType, BackupNamespace, Operation}; use pbs_api_types::{
print_store_and_ns, ArchiveType, BackupNamespace, Operation, CLIENT_LOG_BLOB_NAME,
MANIFEST_BLOB_NAME,
};
use crate::backup_info::BackupDir; use crate::backup_info::BackupDir;
use crate::dynamic_index::DynamicIndexReader; use crate::dynamic_index::DynamicIndexReader;
use crate::fixed_index::FixedIndexReader; use crate::fixed_index::FixedIndexReader;
use crate::index::IndexFile; use crate::index::IndexFile;
use crate::manifest::{CLIENT_LOG_BLOB_NAME, MANIFEST_BLOB_NAME};
use crate::DataStore; use crate::DataStore;
/// Helper to access the contents of a datastore backup snapshot /// Helper to access the contents of a datastore backup snapshot
@ -62,7 +64,7 @@ impl SnapshotReader {
}; };
let mut client_log_path = snapshot_path; let mut client_log_path = snapshot_path;
client_log_path.push(CLIENT_LOG_BLOB_NAME); client_log_path.push(CLIENT_LOG_BLOB_NAME.as_ref());
let mut file_list = vec![MANIFEST_BLOB_NAME.to_string()]; let mut file_list = vec![MANIFEST_BLOB_NAME.to_string()];
for item in manifest.files() { for item in manifest.files() {

View File

@ -7,9 +7,8 @@ use serde_json::Value;
use proxmox_router::cli::*; use proxmox_router::cli::*;
use proxmox_schema::api; use proxmox_schema::api;
use pbs_api_types::BackupNamespace; use pbs_api_types::{BackupArchiveName, BackupNamespace, CATALOG_NAME};
use pbs_client::pxar::tools::get_remote_pxar_reader; use pbs_client::pxar::tools::get_remote_pxar_reader;
use pbs_client::tools::has_pxar_filename_extension;
use pbs_client::tools::key_source::get_encryption_key_password; use pbs_client::tools::key_source::get_encryption_key_password;
use pbs_client::{BackupReader, RemoteChunkReader}; use pbs_client::{BackupReader, RemoteChunkReader};
use pbs_tools::crypt_config::CryptConfig; use pbs_tools::crypt_config::CryptConfig;
@ -22,7 +21,7 @@ use crate::{
complete_pxar_archive_name, complete_repository, connect, crypto_parameters, decrypt_key, complete_pxar_archive_name, complete_repository, connect, crypto_parameters, decrypt_key,
dir_or_last_from_group, extract_repository_from_value, format_key_source, optional_ns_param, dir_or_last_from_group, extract_repository_from_value, format_key_source, optional_ns_param,
record_repository, BackupDir, BufferedDynamicReader, CatalogReader, DynamicIndexReader, record_repository, BackupDir, BufferedDynamicReader, CatalogReader, DynamicIndexReader,
IndexFile, Shell, CATALOG_NAME, KEYFD_SCHEMA, REPO_URL_SCHEMA, IndexFile, Shell, KEYFD_SCHEMA, REPO_URL_SCHEMA,
}; };
#[api( #[api(
@ -90,7 +89,7 @@ async fn dump_catalog(param: Value) -> Result<Value, Error> {
let (manifest, _) = client.download_manifest().await?; let (manifest, _) = client.download_manifest().await?;
manifest.check_fingerprint(crypt_config.as_ref().map(Arc::as_ref))?; manifest.check_fingerprint(crypt_config.as_ref().map(Arc::as_ref))?;
let file_info = match manifest.lookup_file_info(CATALOG_NAME) { let file_info = match manifest.lookup_file_info(&CATALOG_NAME) {
Ok(file_info) => file_info, Ok(file_info) => file_info,
Err(err) => { Err(err) => {
let mut metadata_archives = Vec::new(); let mut metadata_archives = Vec::new();
@ -104,7 +103,7 @@ async fn dump_catalog(param: Value) -> Result<Value, Error> {
for archive in &metadata_archives { for archive in &metadata_archives {
let (reader, archive_size) = get_remote_pxar_reader( let (reader, archive_size) = get_remote_pxar_reader(
&archive, &archive.as_str().try_into()?,
client.clone(), client.clone(),
&manifest, &manifest,
crypt_config.clone(), crypt_config.clone(),
@ -128,7 +127,7 @@ async fn dump_catalog(param: Value) -> Result<Value, Error> {
}; };
let index = client let index = client
.download_dynamic_index(&manifest, CATALOG_NAME) .download_dynamic_index(&manifest, &CATALOG_NAME)
.await?; .await?;
let most_used = index.find_most_used_chunks(8); let most_used = index.find_most_used_chunks(8);
@ -170,8 +169,7 @@ async fn dump_catalog(param: Value) -> Result<Value, Error> {
description: "Group/Snapshot path.", description: "Group/Snapshot path.",
}, },
"archive-name": { "archive-name": {
type: String, type: BackupArchiveName,
description: "Backup archive name.",
}, },
"repository": { "repository": {
optional: true, optional: true,
@ -195,7 +193,8 @@ async fn catalog_shell(param: Value) -> Result<(), Error> {
let client = connect(&repo)?; let client = connect(&repo)?;
let backup_ns = optional_ns_param(&param)?; let backup_ns = optional_ns_param(&param)?;
let path = required_string_param(&param, "snapshot")?; let path = required_string_param(&param, "snapshot")?;
let archive_name = required_string_param(&param, "archive-name")?; let server_archive_name: BackupArchiveName =
required_string_param(&param, "archive-name")?.try_into()?;
let backup_dir = dir_or_last_from_group(&client, &repo, &backup_ns, path).await?; let backup_dir = dir_or_last_from_group(&client, &repo, &backup_ns, path).await?;
@ -214,9 +213,7 @@ async fn catalog_shell(param: Value) -> Result<(), Error> {
} }
}; };
let server_archive_name = if has_pxar_filename_extension(archive_name, false) { if !server_archive_name.has_pxar_filename_extension() {
format!("{}.didx", archive_name)
} else {
bail!("Can only mount pxar archives."); bail!("Can only mount pxar archives.");
}; };
@ -233,7 +230,7 @@ async fn catalog_shell(param: Value) -> Result<(), Error> {
let (manifest, _) = client.download_manifest().await?; let (manifest, _) = client.download_manifest().await?;
manifest.check_fingerprint(crypt_config.as_ref().map(Arc::as_ref))?; manifest.check_fingerprint(crypt_config.as_ref().map(Arc::as_ref))?;
if let Err(_err) = manifest.lookup_file_info(CATALOG_NAME) { if let Err(_err) = manifest.lookup_file_info(&CATALOG_NAME) {
// No catalog, fallback to pxar archive accessor if present // No catalog, fallback to pxar archive accessor if present
let accessor = helper::get_pxar_fuse_accessor( let accessor = helper::get_pxar_fuse_accessor(
&server_archive_name, &server_archive_name,
@ -243,7 +240,7 @@ async fn catalog_shell(param: Value) -> Result<(), Error> {
) )
.await?; .await?;
let state = Shell::new(None, &server_archive_name, accessor).await?; let state = Shell::new(None, &server_archive_name.as_ref(), accessor).await?;
log::info!("Starting interactive shell"); log::info!("Starting interactive shell");
state.shell().await?; state.shell().await?;
record_repository(&repo); record_repository(&repo);
@ -261,17 +258,17 @@ async fn catalog_shell(param: Value) -> Result<(), Error> {
) )
.await?; .await?;
client.download(CATALOG_NAME, &mut tmpfile).await?; client.download(CATALOG_NAME.as_ref(), &mut tmpfile).await?;
let index = DynamicIndexReader::new(tmpfile) let index = DynamicIndexReader::new(tmpfile)
.map_err(|err| format_err!("unable to read catalog index - {}", err))?; .map_err(|err| format_err!("unable to read catalog index - {}", err))?;
// Note: do not use values stored in index (not trusted) - instead, computed them again // Note: do not use values stored in index (not trusted) - instead, computed them again
let (csum, size) = index.compute_csum(); let (csum, size) = index.compute_csum();
manifest.verify_file(CATALOG_NAME, &csum, size)?; manifest.verify_file(&CATALOG_NAME, &csum, size)?;
let most_used = index.find_most_used_chunks(8); let most_used = index.find_most_used_chunks(8);
let file_info = manifest.lookup_file_info(CATALOG_NAME)?; let file_info = manifest.lookup_file_info(&CATALOG_NAME)?;
let chunk_reader = RemoteChunkReader::new( let chunk_reader = RemoteChunkReader::new(
client.clone(), client.clone(),
crypt_config, crypt_config,
@ -286,7 +283,7 @@ async fn catalog_shell(param: Value) -> Result<(), Error> {
catalogfile.seek(SeekFrom::Start(0))?; catalogfile.seek(SeekFrom::Start(0))?;
let catalog_reader = CatalogReader::new(catalogfile); let catalog_reader = CatalogReader::new(catalogfile);
let state = Shell::new(Some(catalog_reader), &server_archive_name, decoder).await?; let state = Shell::new(Some(catalog_reader), &server_archive_name.as_ref(), decoder).await?;
log::info!("Starting interactive shell"); log::info!("Starting interactive shell");
state.shell().await?; state.shell().await?;

View File

@ -1,6 +1,7 @@
use std::sync::Arc; use std::sync::Arc;
use anyhow::Error; use anyhow::Error;
use pbs_api_types::BackupArchiveName;
use pbs_client::{BackupReader, RemoteChunkReader}; use pbs_client::{BackupReader, RemoteChunkReader};
use pbs_datastore::BackupManifest; use pbs_datastore::BackupManifest;
use pbs_tools::crypt_config::CryptConfig; use pbs_tools::crypt_config::CryptConfig;
@ -8,7 +9,7 @@ use pbs_tools::crypt_config::CryptConfig;
use crate::{BufferedDynamicReadAt, BufferedDynamicReader, IndexFile}; use crate::{BufferedDynamicReadAt, BufferedDynamicReader, IndexFile};
pub(crate) async fn get_pxar_fuse_accessor( pub(crate) async fn get_pxar_fuse_accessor(
archive_name: &str, archive_name: &BackupArchiveName,
client: Arc<BackupReader>, client: Arc<BackupReader>,
manifest: &BackupManifest, manifest: &BackupManifest,
crypt_config: Option<Arc<CryptConfig>>, crypt_config: Option<Arc<CryptConfig>>,
@ -44,7 +45,7 @@ pub(crate) async fn get_pxar_fuse_accessor(
} }
pub(crate) async fn get_pxar_fuse_reader( pub(crate) async fn get_pxar_fuse_reader(
archive_name: &str, archive_name: &BackupArchiveName,
client: Arc<BackupReader>, client: Arc<BackupReader>,
manifest: &BackupManifest, manifest: &BackupManifest,
crypt_config: Option<Arc<CryptConfig>>, crypt_config: Option<Arc<CryptConfig>>,
@ -57,7 +58,7 @@ pub(crate) async fn get_pxar_fuse_reader(
} }
pub(crate) async fn get_buffered_pxar_reader( pub(crate) async fn get_buffered_pxar_reader(
archive_name: &str, archive_name: &BackupArchiveName,
client: Arc<BackupReader>, client: Arc<BackupReader>,
manifest: &BackupManifest, manifest: &BackupManifest,
crypt_config: Option<Arc<CryptConfig>>, crypt_config: Option<Arc<CryptConfig>>,

View File

@ -7,6 +7,7 @@ use std::task::Context;
use anyhow::{bail, format_err, Error}; use anyhow::{bail, format_err, Error};
use futures::stream::{StreamExt, TryStreamExt}; use futures::stream::{StreamExt, TryStreamExt};
use pbs_client::tools::has_pxar_filename_extension;
use serde::Deserialize; use serde::Deserialize;
use serde_json::{json, Value}; use serde_json::{json, Value};
use tokio::sync::mpsc; use tokio::sync::mpsc;
@ -25,10 +26,11 @@ use pxar::accessor::aio::Accessor;
use pxar::accessor::{MaybeReady, ReadAt, ReadAtOperation}; use pxar::accessor::{MaybeReady, ReadAt, ReadAtOperation};
use pbs_api_types::{ use pbs_api_types::{
ArchiveType, Authid, BackupDir, BackupGroup, BackupNamespace, BackupPart, BackupType, ArchiveType, Authid, BackupArchiveName, BackupDir, BackupGroup, BackupNamespace, BackupPart,
ClientRateLimitConfig, CryptMode, Fingerprint, GroupListItem, PruneJobOptions, PruneListItem, BackupType, ClientRateLimitConfig, CryptMode, Fingerprint, GroupListItem, PruneJobOptions,
RateLimitConfig, SnapshotListItem, StorageStatus, BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA, PruneListItem, RateLimitConfig, SnapshotListItem, StorageStatus, BACKUP_ID_SCHEMA,
BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, BACKUP_NAMESPACE_SCHEMA, BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, CATALOG_NAME,
ENCRYPTED_KEY_BLOB_NAME, MANIFEST_BLOB_NAME,
}; };
use pbs_client::catalog_shell::Shell; use pbs_client::catalog_shell::Shell;
use pbs_client::pxar::{ErrorHandler as PxarErrorHandler, MetadataArchiveReader, PxarPrevRef}; use pbs_client::pxar::{ErrorHandler as PxarErrorHandler, MetadataArchiveReader, PxarPrevRef};
@ -36,7 +38,7 @@ use pbs_client::tools::{
complete_archive_name, complete_auth_id, complete_backup_group, complete_backup_snapshot, complete_archive_name, complete_auth_id, complete_backup_group, complete_backup_snapshot,
complete_backup_source, complete_chunk_size, complete_group_or_snapshot, complete_backup_source, complete_chunk_size, complete_group_or_snapshot,
complete_img_archive_name, complete_namespace, complete_pxar_archive_name, complete_repository, complete_img_archive_name, complete_namespace, complete_pxar_archive_name, complete_repository,
connect, connect_rate_limited, extract_repository_from_value, has_pxar_filename_extension, connect, connect_rate_limited, extract_repository_from_value,
key_source::{ key_source::{
crypto_parameters, format_key_source, get_encryption_key_password, KEYFD_SCHEMA, crypto_parameters, format_key_source, get_encryption_key_password, KEYFD_SCHEMA,
KEYFILE_SCHEMA, MASTER_PUBKEY_FD_SCHEMA, MASTER_PUBKEY_FILE_SCHEMA, KEYFILE_SCHEMA, MASTER_PUBKEY_FD_SCHEMA, MASTER_PUBKEY_FILE_SCHEMA,
@ -54,9 +56,8 @@ use pbs_datastore::chunk_store::verify_chunk_size;
use pbs_datastore::dynamic_index::{BufferedDynamicReader, DynamicIndexReader, LocalDynamicReadAt}; use pbs_datastore::dynamic_index::{BufferedDynamicReader, DynamicIndexReader, LocalDynamicReadAt};
use pbs_datastore::fixed_index::FixedIndexReader; use pbs_datastore::fixed_index::FixedIndexReader;
use pbs_datastore::index::IndexFile; use pbs_datastore::index::IndexFile;
use pbs_datastore::manifest::{BackupManifest, ENCRYPTED_KEY_BLOB_NAME, MANIFEST_BLOB_NAME}; use pbs_datastore::manifest::BackupManifest;
use pbs_datastore::read_chunk::AsyncReadChunk; use pbs_datastore::read_chunk::AsyncReadChunk;
use pbs_datastore::CATALOG_NAME;
use pbs_key_config::{decrypt_key, rsa_encrypt_key_config, KeyConfig}; use pbs_key_config::{decrypt_key, rsa_encrypt_key_config, KeyConfig};
use pbs_tools::crypt_config::CryptConfig; use pbs_tools::crypt_config::CryptConfig;
use pbs_tools::json; use pbs_tools::json;
@ -196,8 +197,8 @@ pub async fn dir_or_last_from_group(
async fn backup_directory<P: AsRef<Path>>( async fn backup_directory<P: AsRef<Path>>(
client: &BackupWriter, client: &BackupWriter,
dir_path: P, dir_path: P,
archive_name: &str, archive_name: &BackupArchiveName,
payload_target: Option<&str>, payload_target: Option<&BackupArchiveName>,
chunk_size: Option<usize>, chunk_size: Option<usize>,
catalog: Option<Arc<Mutex<CatalogWriter<TokioWriterAdapter<StdChannelWriter<Error>>>>>>, catalog: Option<Arc<Mutex<CatalogWriter<TokioWriterAdapter<StdChannelWriter<Error>>>>>>,
pxar_create_options: pbs_client::pxar::PxarCreateOptions, pxar_create_options: pbs_client::pxar::PxarCreateOptions,
@ -276,7 +277,7 @@ async fn backup_directory<P: AsRef<Path>>(
async fn backup_image<P: AsRef<Path>>( async fn backup_image<P: AsRef<Path>>(
client: &BackupWriter, client: &BackupWriter,
image_path: P, image_path: P,
archive_name: &str, archive_name: &BackupArchiveName,
chunk_size: Option<usize>, chunk_size: Option<usize>,
upload_options: UploadOptions, upload_options: UploadOptions,
) -> Result<BackupStats, Error> { ) -> Result<BackupStats, Error> {
@ -606,7 +607,7 @@ fn spawn_catalog_upload(
tokio::spawn(async move { tokio::spawn(async move {
let catalog_upload_result = client let catalog_upload_result = client
.upload_stream(CATALOG_NAME, catalog_chunk_stream, upload_options, None) .upload_stream(&CATALOG_NAME, catalog_chunk_stream, upload_options, None)
.await; .await;
if let Err(ref err) = catalog_upload_result { if let Err(ref err) = catalog_upload_result {
@ -1005,13 +1006,21 @@ async fn create_backup(
}; };
for (backup_type, filename, target_base, extension, size) in upload_list { for (backup_type, filename, target_base, extension, size) in upload_list {
let target = format!("{target_base}.{extension}"); let target: BackupArchiveName = format!("{target_base}.{extension}").as_str().try_into()?;
match (backup_type, dry_run) { match (backup_type, dry_run) {
// dry-run // dry-run
(BackupSpecificationType::CONFIG, true) => log_file("config file", &filename, &target), (BackupSpecificationType::CONFIG, true) => {
(BackupSpecificationType::LOGFILE, true) => log_file("log file", &filename, &target), log_file("config file", &filename, target.as_ref())
(BackupSpecificationType::PXAR, true) => log_file("directory", &filename, &target), }
(BackupSpecificationType::IMAGE, true) => log_file("image", &filename, &target), (BackupSpecificationType::LOGFILE, true) => {
log_file("log file", &filename, target.as_ref())
}
(BackupSpecificationType::PXAR, true) => {
log_file("directory", &filename, target.as_ref())
}
(BackupSpecificationType::IMAGE, true) => {
log_file("image", &filename, &target.as_ref())
}
// no dry-run // no dry-run
(BackupSpecificationType::CONFIG, false) => { (BackupSpecificationType::CONFIG, false) => {
let upload_options = UploadOptions { let upload_options = UploadOptions {
@ -1020,11 +1029,11 @@ async fn create_backup(
..UploadOptions::default() ..UploadOptions::default()
}; };
log_file("config file", &filename, &target); log_file("config file", &filename, target.as_ref());
let stats = client let stats = client
.upload_blob_from_file(&filename, &target, upload_options) .upload_blob_from_file(&filename, target.as_ref(), upload_options)
.await?; .await?;
manifest.add_file(target, stats.size, stats.csum, crypto.mode)?; manifest.add_file(&target, stats.size, stats.csum, crypto.mode)?;
} }
(BackupSpecificationType::LOGFILE, false) => { (BackupSpecificationType::LOGFILE, false) => {
// fixme: remove - not needed anymore ? // fixme: remove - not needed anymore ?
@ -1034,11 +1043,11 @@ async fn create_backup(
..UploadOptions::default() ..UploadOptions::default()
}; };
log_file("log file", &filename, &target); log_file("log file", &filename, target.as_ref());
let stats = client let stats = client
.upload_blob_from_file(&filename, &target, upload_options) .upload_blob_from_file(&filename, target.as_ref(), upload_options)
.await?; .await?;
manifest.add_file(target, stats.size, stats.csum, crypto.mode)?; manifest.add_file(&target, stats.size, stats.csum, crypto.mode)?;
} }
(BackupSpecificationType::PXAR, false) => { (BackupSpecificationType::PXAR, false) => {
let target_base = if let Some(base) = target_base.strip_suffix(".pxar") { let target_base = if let Some(base) = target_base.strip_suffix(".pxar") {
@ -1050,8 +1059,14 @@ async fn create_backup(
let (target, payload_target) = let (target, payload_target) =
if detection_mode.is_metadata() || detection_mode.is_data() { if detection_mode.is_metadata() || detection_mode.is_data() {
( (
format!("{target_base}.mpxar.{extension}"), format!("{target_base}.mpxar.{extension}")
Some(format!("{target_base}.ppxar.{extension}")), .as_str()
.try_into()?,
Some(
format!("{target_base}.ppxar.{extension}")
.as_str()
.try_into()?,
),
) )
} else { } else {
(target, None) (target, None)
@ -1065,12 +1080,12 @@ async fn create_backup(
catalog_result_rx = Some(catalog_upload_res.result); catalog_result_rx = Some(catalog_upload_res.result);
} }
log_file("directory", &filename, &target); log_file("directory", &filename, target.as_ref());
if let Some(catalog) = catalog.as_ref() { if let Some(catalog) = catalog.as_ref() {
catalog catalog
.lock() .lock()
.unwrap() .unwrap()
.start_directory(std::ffi::CString::new(target.as_str())?.as_c_str())?; .start_directory(std::ffi::CString::new(target.as_ref())?.as_c_str())?;
} }
let mut previous_ref = None; let mut previous_ref = None;
@ -1137,7 +1152,7 @@ async fn create_backup(
&client, &client,
&filename, &filename,
&target, &target,
payload_target.as_deref(), payload_target.as_ref().as_deref(),
chunk_size_opt, chunk_size_opt,
catalog.as_ref().cloned(), catalog.as_ref().cloned(),
pxar_options, pxar_options,
@ -1147,20 +1162,20 @@ async fn create_backup(
if let Some(payload_stats) = payload_stats { if let Some(payload_stats) = payload_stats {
manifest.add_file( manifest.add_file(
payload_target &payload_target
.ok_or_else(|| format_err!("missing payload target archive"))?, .ok_or_else(|| format_err!("missing payload target archive"))?,
payload_stats.size, payload_stats.size,
payload_stats.csum, payload_stats.csum,
crypto.mode, crypto.mode,
)?; )?;
} }
manifest.add_file(target, stats.size, stats.csum, crypto.mode)?; manifest.add_file(&target, stats.size, stats.csum, crypto.mode)?;
if let Some(catalog) = catalog.as_ref() { if let Some(catalog) = catalog.as_ref() {
catalog.lock().unwrap().end_directory()?; catalog.lock().unwrap().end_directory()?;
} }
} }
(BackupSpecificationType::IMAGE, false) => { (BackupSpecificationType::IMAGE, false) => {
log_file("image", &filename, &target); log_file("image", &filename, target.as_ref());
let upload_options = UploadOptions { let upload_options = UploadOptions {
previous_manifest: previous_manifest.clone(), previous_manifest: previous_manifest.clone(),
@ -1172,7 +1187,7 @@ async fn create_backup(
let stats = let stats =
backup_image(&client, &filename, &target, chunk_size_opt, upload_options) backup_image(&client, &filename, &target, chunk_size_opt, upload_options)
.await?; .await?;
manifest.add_file(target, stats.size, stats.csum, crypto.mode)?; manifest.add_file(&target, stats.size, stats.csum, crypto.mode)?;
} }
} }
} }
@ -1194,22 +1209,30 @@ async fn create_backup(
if let Some(catalog_result_rx) = catalog_result_rx { if let Some(catalog_result_rx) = catalog_result_rx {
let stats = catalog_result_rx.await??; let stats = catalog_result_rx.await??;
manifest.add_file(CATALOG_NAME.to_owned(), stats.size, stats.csum, crypto.mode)?; manifest.add_file(&CATALOG_NAME, stats.size, stats.csum, crypto.mode)?;
} }
} }
if let Some(rsa_encrypted_key) = rsa_encrypted_key { if let Some(rsa_encrypted_key) = rsa_encrypted_key {
let target = ENCRYPTED_KEY_BLOB_NAME; log::info!(
log::info!("Upload RSA encoded key to '{}' as {}", repo, target); "Upload RSA encoded key to '{}' as {}",
repo,
*ENCRYPTED_KEY_BLOB_NAME
);
let options = UploadOptions { let options = UploadOptions {
compress: false, compress: false,
encrypt: false, encrypt: false,
..UploadOptions::default() ..UploadOptions::default()
}; };
let stats = client let stats = client
.upload_blob_from_data(rsa_encrypted_key, target, options) .upload_blob_from_data(rsa_encrypted_key, ENCRYPTED_KEY_BLOB_NAME.as_ref(), options)
.await?; .await?;
manifest.add_file(target.to_string(), stats.size, stats.csum, crypto.mode)?; manifest.add_file(
&ENCRYPTED_KEY_BLOB_NAME,
stats.size,
stats.csum,
crypto.mode,
)?;
} }
// create manifest (index.json) // create manifest (index.json)
// manifests are never encrypted, but include a signature // manifests are never encrypted, but include a signature
@ -1225,7 +1248,7 @@ async fn create_backup(
..UploadOptions::default() ..UploadOptions::default()
}; };
client client
.upload_blob_from_data(manifest.into_bytes(), MANIFEST_BLOB_NAME, options) .upload_blob_from_data(manifest.into_bytes(), MANIFEST_BLOB_NAME.as_ref(), options)
.await?; .await?;
client.finish().await?; client.finish().await?;
@ -1238,7 +1261,7 @@ async fn create_backup(
} }
async fn prepare_reference( async fn prepare_reference(
target: &str, target: &BackupArchiveName,
manifest: Arc<BackupManifest>, manifest: Arc<BackupManifest>,
backup_writer: &BackupWriter, backup_writer: &BackupWriter,
backup_reader: Arc<BackupReader>, backup_reader: Arc<BackupReader>,
@ -1250,7 +1273,11 @@ async fn prepare_reference(
Ok((target, payload_target)) => (target, payload_target), Ok((target, payload_target)) => (target, payload_target),
Err(_) => return Ok(None), Err(_) => return Ok(None),
}; };
let payload_target = payload_target.unwrap_or_default(); let payload_target = if let Some(payload_target) = payload_target {
payload_target
} else {
return Ok(None);
};
let metadata_ref_index = if let Ok(index) = backup_reader let metadata_ref_index = if let Ok(index) = backup_reader
.download_dynamic_index(&manifest, &target) .download_dynamic_index(&manifest, &target)
@ -1299,7 +1326,7 @@ async fn prepare_reference(
Ok(Some(pbs_client::pxar::PxarPrevRef { Ok(Some(pbs_client::pxar::PxarPrevRef {
accessor, accessor,
payload_index: payload_ref_index, payload_index: payload_ref_index,
archive_name: target, archive_name: target.to_string(),
})) }))
} }
@ -1486,7 +1513,8 @@ async fn restore(
) -> Result<Value, Error> { ) -> Result<Value, Error> {
let repo = extract_repository_from_value(&param)?; let repo = extract_repository_from_value(&param)?;
let archive_name = json::required_string_param(&param, "archive-name")?; let archive_name: BackupArchiveName =
json::required_string_param(&param, "archive-name")?.try_into()?;
let rate_limit = RateLimitConfig::from_client_config(limit); let rate_limit = RateLimitConfig::from_client_config(limit);
@ -1525,11 +1553,9 @@ async fn restore(
) )
.await?; .await?;
let (archive_name, archive_type) = parse_archive_type(archive_name);
let (manifest, backup_index_data) = client.download_manifest().await?; let (manifest, backup_index_data) = client.download_manifest().await?;
if archive_name == ENCRYPTED_KEY_BLOB_NAME && crypt_config.is_none() { if archive_name == *ENCRYPTED_KEY_BLOB_NAME && crypt_config.is_none() {
log::info!("Restoring encrypted key blob without original key - skipping manifest fingerprint check!") log::info!("Restoring encrypted key blob without original key - skipping manifest fingerprint check!")
} else { } else {
if manifest.signature.is_some() { if manifest.signature.is_some() {
@ -1543,7 +1569,7 @@ async fn restore(
manifest.check_fingerprint(crypt_config.as_ref().map(Arc::as_ref))?; manifest.check_fingerprint(crypt_config.as_ref().map(Arc::as_ref))?;
} }
if archive_name == MANIFEST_BLOB_NAME { if archive_name == *MANIFEST_BLOB_NAME {
if let Some(target) = target { if let Some(target) = target {
replace_file(target, &backup_index_data, CreateOptions::new(), false)?; replace_file(target, &backup_index_data, CreateOptions::new(), false)?;
} else { } else {
@ -1557,7 +1583,7 @@ async fn restore(
return Ok(Value::Null); return Ok(Value::Null);
} }
if archive_type == ArchiveType::Blob { if archive_name.archive_type() == ArchiveType::Blob {
let mut reader = client.download_blob(&manifest, &archive_name).await?; let mut reader = client.download_blob(&manifest, &archive_name).await?;
if let Some(target) = target { if let Some(target) = target {
@ -1576,7 +1602,7 @@ async fn restore(
std::io::copy(&mut reader, &mut writer) std::io::copy(&mut reader, &mut writer)
.map_err(|err| format_err!("unable to pipe data - {}", err))?; .map_err(|err| format_err!("unable to pipe data - {}", err))?;
} }
} else if archive_type == ArchiveType::DynamicIndex { } else if archive_name.archive_type() == ArchiveType::DynamicIndex {
let (archive_name, payload_archive_name) = let (archive_name, payload_archive_name) =
pbs_client::tools::get_pxar_archive_names(&archive_name, &manifest)?; pbs_client::tools::get_pxar_archive_names(&archive_name, &manifest)?;
@ -1680,7 +1706,7 @@ async fn restore(
std::io::copy(&mut reader, &mut writer) std::io::copy(&mut reader, &mut writer)
.map_err(|err| format_err!("unable to pipe data - {}", err))?; .map_err(|err| format_err!("unable to pipe data - {}", err))?;
} }
} else if archive_type == ArchiveType::FixedIndex { } else if archive_name.archive_type() == ArchiveType::FixedIndex {
let file_info = manifest.lookup_file_info(&archive_name)?; let file_info = manifest.lookup_file_info(&archive_name)?;
let index = client let index = client
.download_fixed_index(&manifest, &archive_name) .download_fixed_index(&manifest, &archive_name)

View File

@ -18,8 +18,7 @@ use proxmox_schema::*;
use proxmox_sortable_macro::sortable; use proxmox_sortable_macro::sortable;
use proxmox_systemd; use proxmox_systemd;
use pbs_api_types::BackupNamespace; use pbs_api_types::{ArchiveType, BackupArchiveName, BackupNamespace};
use pbs_client::tools::has_pxar_filename_extension;
use pbs_client::tools::key_source::get_encryption_key_password; use pbs_client::tools::key_source::get_encryption_key_password;
use pbs_client::{BackupReader, RemoteChunkReader}; use pbs_client::{BackupReader, RemoteChunkReader};
use pbs_datastore::cached_chunk_reader::CachedChunkReader; use pbs_datastore::cached_chunk_reader::CachedChunkReader;
@ -47,11 +46,7 @@ const API_METHOD_MOUNT: ApiMethod = ApiMethod::new(
false, false,
&StringSchema::new("Group/Snapshot path.").schema() &StringSchema::new("Group/Snapshot path.").schema()
), ),
( ("archive-name", false, &BackupArchiveName::API_SCHEMA),
"archive-name",
false,
&StringSchema::new("Backup archive name.").schema()
),
( (
"target", "target",
false, false,
@ -87,11 +82,7 @@ WARNING: Only do this with *trusted* backups!",
false, false,
&StringSchema::new("Group/Snapshot path.").schema() &StringSchema::new("Group/Snapshot path.").schema()
), ),
( ("archive-name", false, &BackupArchiveName::API_SCHEMA),
"archive-name",
false,
&StringSchema::new("Backup archive name.").schema()
),
("repository", true, &REPO_URL_SCHEMA), ("repository", true, &REPO_URL_SCHEMA),
( (
"keyfile", "keyfile",
@ -208,7 +199,8 @@ fn mount(
async fn mount_do(param: Value, pipe: Option<OwnedFd>) -> Result<Value, Error> { async fn mount_do(param: Value, pipe: Option<OwnedFd>) -> Result<Value, Error> {
let repo = extract_repository_from_value(&param)?; let repo = extract_repository_from_value(&param)?;
let archive_name = required_string_param(&param, "archive-name")?; let server_archive_name: BackupArchiveName =
required_string_param(&param, "archive-name")?.try_into()?;
let client = connect(&repo)?; let client = connect(&repo)?;
let target = param["target"].as_str(); let target = param["target"].as_str();
@ -230,16 +222,14 @@ async fn mount_do(param: Value, pipe: Option<OwnedFd>) -> Result<Value, Error> {
} }
}; };
let server_archive_name = if has_pxar_filename_extension(archive_name, false) { if server_archive_name.has_pxar_filename_extension() {
if target.is_none() { if target.is_none() {
bail!("use the 'mount' command to mount pxar archives"); bail!("use the 'mount' command to mount pxar archives");
} }
format!("{}.didx", archive_name) } else if server_archive_name.ends_with(".img.fidx") {
} else if archive_name.ends_with(".img") {
if target.is_some() { if target.is_some() {
bail!("use the 'map' command to map drive images"); bail!("use the 'map' command to map drive images");
} }
format!("{}.fidx", archive_name)
} else { } else {
bail!("Can only mount/map pxar archives and drive images."); bail!("Can only mount/map pxar archives and drive images.");
}; };
@ -291,7 +281,7 @@ async fn mount_do(param: Value, pipe: Option<OwnedFd>) -> Result<Value, Error> {
let mut interrupt = let mut interrupt =
futures::future::select(interrupt_int.recv().boxed(), interrupt_term.recv().boxed()); futures::future::select(interrupt_int.recv().boxed(), interrupt_term.recv().boxed());
if server_archive_name.ends_with(".didx") { if server_archive_name.archive_type() == ArchiveType::DynamicIndex {
let decoder = helper::get_pxar_fuse_accessor( let decoder = helper::get_pxar_fuse_accessor(
&server_archive_name, &server_archive_name,
client.clone(), client.clone(),
@ -312,7 +302,7 @@ async fn mount_do(param: Value, pipe: Option<OwnedFd>) -> Result<Value, Error> {
// exit on interrupted // exit on interrupted
} }
} }
} else if server_archive_name.ends_with(".fidx") { } else if server_archive_name.archive_type() == ArchiveType::FixedIndex {
let file_info = manifest.lookup_file_info(&server_archive_name)?; let file_info = manifest.lookup_file_info(&server_archive_name)?;
let index = client let index = client
.download_fixed_index(&manifest, &server_archive_name) .download_fixed_index(&manifest, &server_archive_name)
@ -326,7 +316,10 @@ async fn mount_do(param: Value, pipe: Option<OwnedFd>) -> Result<Value, Error> {
); );
let reader = CachedChunkReader::new(chunk_reader, index, 8).seekable(); let reader = CachedChunkReader::new(chunk_reader, index, 8).seekable();
let name = &format!("{}:{}/{}", repo, path, archive_name); let name = &format!(
"{repo}:{path}/{}",
server_archive_name.without_type_extension(),
);
let name_escaped = proxmox_systemd::escape_unit(name, false); let name_escaped = proxmox_systemd::escape_unit(name, false);
let mut session = let mut session =

View File

@ -5,6 +5,7 @@ use std::sync::Arc;
use anyhow::{bail, format_err, Error}; use anyhow::{bail, format_err, Error};
use futures::StreamExt; use futures::StreamExt;
use pbs_api_types::{BackupArchiveName, CATALOG_NAME};
use serde_json::{json, Value}; use serde_json::{json, Value};
use tokio::io::AsyncWriteExt; use tokio::io::AsyncWriteExt;
@ -37,7 +38,6 @@ use pbs_client::{BackupReader, BackupRepository, RemoteChunkReader};
use pbs_datastore::catalog::{ArchiveEntry, CatalogReader, DirEntryAttribute}; use pbs_datastore::catalog::{ArchiveEntry, CatalogReader, DirEntryAttribute};
use pbs_datastore::dynamic_index::BufferedDynamicReader; use pbs_datastore::dynamic_index::BufferedDynamicReader;
use pbs_datastore::index::IndexFile; use pbs_datastore::index::IndexFile;
use pbs_datastore::CATALOG_NAME;
use pbs_key_config::decrypt_key; use pbs_key_config::decrypt_key;
use pbs_tools::crypt_config::CryptConfig; use pbs_tools::crypt_config::CryptConfig;
@ -149,9 +149,9 @@ async fn list_files(
Ok(entries) Ok(entries)
} }
ExtractPath::Pxar(file, mut path) => { ExtractPath::Pxar(file, mut path) => {
if let Ok(file_info) = manifest.lookup_file_info(CATALOG_NAME) { if let Ok(file_info) = manifest.lookup_file_info(&CATALOG_NAME) {
let index = client let index = client
.download_dynamic_index(&manifest, CATALOG_NAME) .download_dynamic_index(&manifest, &CATALOG_NAME)
.await?; .await?;
let most_used = index.find_most_used_chunks(8); let most_used = index.find_most_used_chunks(8);
let chunk_reader = RemoteChunkReader::new( let chunk_reader = RemoteChunkReader::new(
@ -172,6 +172,7 @@ async fn list_files(
path = vec![b'/']; path = vec![b'/'];
} }
let file: BackupArchiveName = file.as_str().try_into()?;
let (archive_name, _payload_archive_name) = let (archive_name, _payload_archive_name) =
pbs_client::tools::get_pxar_archive_names(&file, &manifest)?; pbs_client::tools::get_pxar_archive_names(&file, &manifest)?;
@ -191,7 +192,7 @@ async fn list_files(
pbs_client::pxar::tools::pxar_metadata_catalog_lookup( pbs_client::pxar::tools::pxar_metadata_catalog_lookup(
accessor, accessor,
path, path,
Some(&archive_name), Some(archive_name.as_ref()),
) )
.await .await
} }
@ -476,10 +477,11 @@ async fn extract(
match path { match path {
ExtractPath::Pxar(archive_name, path) => { ExtractPath::Pxar(archive_name, path) => {
let archive_name: BackupArchiveName = archive_name.as_str().try_into()?;
let (archive_name, payload_archive_name) = let (archive_name, payload_archive_name) =
pbs_client::tools::get_pxar_archive_names(&archive_name, &manifest)?; pbs_client::tools::get_pxar_archive_names(&archive_name, &manifest)?;
let (reader, archive_size) = get_remote_pxar_reader( let (reader, archive_size) = get_remote_pxar_reader(
&archive_name, &archive_name.try_into()?,
client.clone(), client.clone(),
&manifest, &manifest,
crypt_config.clone(), crypt_config.clone(),

View File

@ -2,6 +2,7 @@
use std::collections::HashSet; use std::collections::HashSet;
use std::ffi::OsStr; use std::ffi::OsStr;
use std::ops::Deref;
use std::os::unix::ffi::OsStrExt; use std::os::unix::ffi::OsStrExt;
use std::path::PathBuf; use std::path::PathBuf;
use std::sync::Arc; use std::sync::Arc;
@ -34,12 +35,13 @@ use pxar::accessor::aio::Accessor;
use pxar::EntryKind; use pxar::EntryKind;
use pbs_api_types::{ use pbs_api_types::{
print_ns_and_snapshot, print_store_and_ns, Authid, BackupContent, BackupGroupDeleteStats, print_ns_and_snapshot, print_store_and_ns, ArchiveType, Authid, BackupArchiveName,
BackupNamespace, BackupType, Counts, CryptMode, DataStoreConfig, DataStoreListItem, BackupContent, BackupGroupDeleteStats, BackupNamespace, BackupType, Counts, CryptMode,
DataStoreStatus, GarbageCollectionJobStatus, GroupListItem, JobScheduleStatus, KeepOptions, DataStoreConfig, DataStoreListItem, DataStoreStatus, GarbageCollectionJobStatus, GroupListItem,
Operation, PruneJobOptions, SnapshotListItem, SnapshotVerifyState, BACKUP_ARCHIVE_NAME_SCHEMA, JobScheduleStatus, KeepOptions, Operation, PruneJobOptions, SnapshotListItem,
BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA, BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, SnapshotVerifyState, BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA,
DATASTORE_SCHEMA, IGNORE_VERIFIED_BACKUPS_SCHEMA, MAX_NAMESPACE_DEPTH, NS_MAX_DEPTH_SCHEMA, BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, CATALOG_NAME, CLIENT_LOG_BLOB_NAME, DATASTORE_SCHEMA,
IGNORE_VERIFIED_BACKUPS_SCHEMA, MANIFEST_BLOB_NAME, MAX_NAMESPACE_DEPTH, NS_MAX_DEPTH_SCHEMA,
PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE,
PRIV_DATASTORE_READ, PRIV_DATASTORE_VERIFY, UPID, UPID_SCHEMA, PRIV_DATASTORE_READ, PRIV_DATASTORE_VERIFY, UPID, UPID_SCHEMA,
VERIFICATION_OUTDATED_AFTER_SCHEMA, VERIFICATION_OUTDATED_AFTER_SCHEMA,
@ -54,11 +56,11 @@ use pbs_datastore::data_blob_reader::DataBlobReader;
use pbs_datastore::dynamic_index::{BufferedDynamicReader, DynamicIndexReader, LocalDynamicReadAt}; use pbs_datastore::dynamic_index::{BufferedDynamicReader, DynamicIndexReader, LocalDynamicReadAt};
use pbs_datastore::fixed_index::FixedIndexReader; use pbs_datastore::fixed_index::FixedIndexReader;
use pbs_datastore::index::IndexFile; use pbs_datastore::index::IndexFile;
use pbs_datastore::manifest::{BackupManifest, CLIENT_LOG_BLOB_NAME, MANIFEST_BLOB_NAME}; use pbs_datastore::manifest::BackupManifest;
use pbs_datastore::prune::compute_prune_info; use pbs_datastore::prune::compute_prune_info;
use pbs_datastore::{ use pbs_datastore::{
check_backup_owner, task_tracking, BackupDir, BackupGroup, DataStore, LocalChunkReader, check_backup_owner, task_tracking, BackupDir, BackupGroup, DataStore, LocalChunkReader,
StoreProgress, CATALOG_NAME, StoreProgress,
}; };
use pbs_tools::json::required_string_param; use pbs_tools::json::required_string_param;
use proxmox_rest_server::{formatter, WorkerTask}; use proxmox_rest_server::{formatter, WorkerTask};
@ -1481,12 +1483,13 @@ pub fn download_file_decoded(
&backup_dir_api.group, &backup_dir_api.group,
)?; )?;
let file_name = required_string_param(&param, "file-name")?.to_owned(); let file_name: BackupArchiveName =
required_string_param(&param, "file-name")?.try_into()?;
let backup_dir = datastore.backup_dir(backup_ns.clone(), backup_dir_api.clone())?; let backup_dir = datastore.backup_dir(backup_ns.clone(), backup_dir_api.clone())?;
let (manifest, files) = read_backup_index(&backup_dir)?; let (manifest, files) = read_backup_index(&backup_dir)?;
for file in files { for file in files {
if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) { if file.filename == file_name.as_ref() && file.crypt_mode == Some(CryptMode::Encrypt) {
bail!("cannot decode '{}' - is encrypted", file_name); bail!("cannot decode '{}' - is encrypted", file_name);
} }
} }
@ -1501,12 +1504,10 @@ pub fn download_file_decoded(
let mut path = datastore.base_path(); let mut path = datastore.base_path();
path.push(backup_dir.relative_path()); path.push(backup_dir.relative_path());
path.push(&file_name); path.push(file_name.as_ref());
let (_, extension) = file_name.rsplit_once('.').unwrap(); let body = match file_name.archive_type() {
ArchiveType::DynamicIndex => {
let body = match extension {
"didx" => {
let index = DynamicIndexReader::open(&path).map_err(|err| { let index = DynamicIndexReader::open(&path).map_err(|err| {
format_err!("unable to read dynamic index '{:?}' - {}", &path, err) format_err!("unable to read dynamic index '{:?}' - {}", &path, err)
})?; })?;
@ -1520,7 +1521,7 @@ pub fn download_file_decoded(
err err
})) }))
} }
"fidx" => { ArchiveType::FixedIndex => {
let index = FixedIndexReader::open(&path).map_err(|err| { let index = FixedIndexReader::open(&path).map_err(|err| {
format_err!("unable to read fixed index '{:?}' - {}", &path, err) format_err!("unable to read fixed index '{:?}' - {}", &path, err)
})?; })?;
@ -1539,7 +1540,7 @@ pub fn download_file_decoded(
), ),
) )
} }
"blob" => { ArchiveType::Blob => {
let file = std::fs::File::open(&path) let file = std::fs::File::open(&path)
.map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?; .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?;
@ -1554,9 +1555,6 @@ pub fn download_file_decoded(
), ),
) )
} }
extension => {
bail!("cannot download '{}' files", extension);
}
}; };
// fixme: set other headers ? // fixme: set other headers ?
@ -1613,10 +1611,10 @@ pub fn upload_backup_log(
)?; )?;
let backup_dir = datastore.backup_dir(backup_ns.clone(), backup_dir_api.clone())?; let backup_dir = datastore.backup_dir(backup_ns.clone(), backup_dir_api.clone())?;
let file_name = CLIENT_LOG_BLOB_NAME; let file_name = &CLIENT_LOG_BLOB_NAME;
let mut path = backup_dir.full_path(); let mut path = backup_dir.full_path();
path.push(file_name); path.push(file_name.as_ref());
if path.exists() { if path.exists() {
bail!("backup already contains a log."); bail!("backup already contains a log.");
@ -1625,6 +1623,7 @@ pub fn upload_backup_log(
println!( println!(
"Upload backup log to {} {backup_dir_api}/{file_name}", "Upload backup log to {} {backup_dir_api}/{file_name}",
print_store_and_ns(store, &backup_ns), print_store_and_ns(store, &backup_ns),
file_name = file_name.deref(),
); );
let data = req_body let data = req_body
@ -1671,7 +1670,7 @@ fn decode_path(path: &str) -> Result<Vec<u8>, Error> {
type: String, type: String,
}, },
"archive-name": { "archive-name": {
schema: BACKUP_ARCHIVE_NAME_SCHEMA, type: BackupArchiveName,
optional: true, optional: true,
}, },
}, },
@ -1688,12 +1687,10 @@ pub async fn catalog(
ns: Option<BackupNamespace>, ns: Option<BackupNamespace>,
backup_dir: pbs_api_types::BackupDir, backup_dir: pbs_api_types::BackupDir,
filepath: String, filepath: String,
archive_name: Option<String>, archive_name: Option<BackupArchiveName>,
rpcenv: &mut dyn RpcEnvironment, rpcenv: &mut dyn RpcEnvironment,
) -> Result<Vec<ArchiveEntry>, Error> { ) -> Result<Vec<ArchiveEntry>, Error> {
let file_name = archive_name let file_name = archive_name.clone().unwrap_or_else(|| CATALOG_NAME.clone());
.clone()
.unwrap_or_else(|| CATALOG_NAME.to_string());
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
@ -1713,7 +1710,7 @@ pub async fn catalog(
let (manifest, files) = read_backup_index(&backup_dir)?; let (manifest, files) = read_backup_index(&backup_dir)?;
for file in files { for file in files {
if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) { if file.filename == file_name.as_ref() && file.crypt_mode == Some(CryptMode::Encrypt) {
bail!("cannot decode '{file_name}' - is encrypted"); bail!("cannot decode '{file_name}' - is encrypted");
} }
} }
@ -1722,7 +1719,7 @@ pub async fn catalog(
tokio::task::spawn_blocking(move || { tokio::task::spawn_blocking(move || {
let mut path = datastore.base_path(); let mut path = datastore.base_path();
path.push(backup_dir.relative_path()); path.push(backup_dir.relative_path());
path.push(&file_name); path.push(file_name.as_ref());
let index = DynamicIndexReader::open(&path) let index = DynamicIndexReader::open(&path)
.map_err(|err| format_err!("unable to read dynamic index '{path:?}' - {err}"))?; .map_err(|err| format_err!("unable to read dynamic index '{path:?}' - {err}"))?;
@ -1772,7 +1769,7 @@ pub const API_METHOD_PXAR_FILE_DOWNLOAD: ApiMethod = ApiMethod::new(
("backup-time", false, &BACKUP_TIME_SCHEMA), ("backup-time", false, &BACKUP_TIME_SCHEMA),
("filepath", false, &StringSchema::new("Base64 encoded path").schema()), ("filepath", false, &StringSchema::new("Base64 encoded path").schema()),
("tar", true, &BooleanSchema::new("Download as .tar.zst").schema()), ("tar", true, &BooleanSchema::new("Download as .tar.zst").schema()),
("archive-name", true, &BACKUP_ARCHIVE_NAME_SCHEMA), ("archive-name", true, &BackupArchiveName::API_SCHEMA),
]), ]),
) )
).access( ).access(
@ -1787,11 +1784,11 @@ fn get_local_pxar_reader(
datastore: Arc<DataStore>, datastore: Arc<DataStore>,
manifest: &BackupManifest, manifest: &BackupManifest,
backup_dir: &BackupDir, backup_dir: &BackupDir,
pxar_name: &str, pxar_name: &BackupArchiveName,
) -> Result<(LocalDynamicReadAt<LocalChunkReader>, u64), Error> { ) -> Result<(LocalDynamicReadAt<LocalChunkReader>, u64), Error> {
let mut path = datastore.base_path(); let mut path = datastore.base_path();
path.push(backup_dir.relative_path()); path.push(backup_dir.relative_path());
path.push(pxar_name); path.push(pxar_name.as_ref());
let index = DynamicIndexReader::open(&path) let index = DynamicIndexReader::open(&path)
.map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?; .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?;
@ -1849,16 +1846,16 @@ pub fn pxar_file_download(
let file_path = split.next().unwrap_or(b"/"); let file_path = split.next().unwrap_or(b"/");
(pxar_name.to_owned(), file_path.to_owned()) (pxar_name.to_owned(), file_path.to_owned())
}; };
let pxar_name = std::str::from_utf8(&pxar_name)?; let pxar_name: BackupArchiveName = std::str::from_utf8(&pxar_name)?.try_into()?;
let (manifest, files) = read_backup_index(&backup_dir)?; let (manifest, files) = read_backup_index(&backup_dir)?;
for file in files { for file in files {
if file.filename == pxar_name && file.crypt_mode == Some(CryptMode::Encrypt) { if file.filename == pxar_name.as_ref() && file.crypt_mode == Some(CryptMode::Encrypt) {
bail!("cannot decode '{}' - is encrypted", pxar_name); bail!("cannot decode '{}' - is encrypted", pxar_name);
} }
} }
let (pxar_name, payload_archive_name) = let (pxar_name, payload_archive_name) =
pbs_client::tools::get_pxar_archive_names(pxar_name, &manifest)?; pbs_client::tools::get_pxar_archive_names(&pxar_name, &manifest)?;
let (reader, archive_size) = let (reader, archive_size) =
get_local_pxar_reader(datastore.clone(), &manifest, &backup_dir, &pxar_name)?; get_local_pxar_reader(datastore.clone(), &manifest, &backup_dir, &pxar_name)?;

View File

@ -21,16 +21,16 @@ use proxmox_worker_task::WorkerTaskContext;
use pbs_api_types::{ use pbs_api_types::{
parse_ns_and_snapshot, print_ns_and_snapshot, ArchiveType, Authid, BackupDir, BackupNamespace, parse_ns_and_snapshot, print_ns_and_snapshot, ArchiveType, Authid, BackupDir, BackupNamespace,
CryptMode, NotificationMode, Operation, TapeRestoreNamespace, Userid, CryptMode, NotificationMode, Operation, TapeRestoreNamespace, Userid,
DATASTORE_MAP_ARRAY_SCHEMA, DATASTORE_MAP_LIST_SCHEMA, DRIVE_NAME_SCHEMA, MAX_NAMESPACE_DEPTH, DATASTORE_MAP_ARRAY_SCHEMA, DATASTORE_MAP_LIST_SCHEMA, DRIVE_NAME_SCHEMA, MANIFEST_BLOB_NAME,
PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY, PRIV_TAPE_READ, TAPE_RESTORE_NAMESPACE_SCHEMA, MAX_NAMESPACE_DEPTH, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY, PRIV_TAPE_READ,
TAPE_RESTORE_SNAPSHOT_SCHEMA, UPID_SCHEMA, TAPE_RESTORE_NAMESPACE_SCHEMA, TAPE_RESTORE_SNAPSHOT_SCHEMA, UPID_SCHEMA,
}; };
use pbs_client::pxar::tools::handle_root_with_optional_format_version_prelude; use pbs_client::pxar::tools::handle_root_with_optional_format_version_prelude;
use pbs_config::CachedUserInfo; use pbs_config::CachedUserInfo;
use pbs_datastore::dynamic_index::DynamicIndexReader; use pbs_datastore::dynamic_index::DynamicIndexReader;
use pbs_datastore::fixed_index::FixedIndexReader; use pbs_datastore::fixed_index::FixedIndexReader;
use pbs_datastore::index::IndexFile; use pbs_datastore::index::IndexFile;
use pbs_datastore::manifest::{BackupManifest, MANIFEST_BLOB_NAME}; use pbs_datastore::manifest::BackupManifest;
use pbs_datastore::{DataBlob, DataStore}; use pbs_datastore::{DataBlob, DataStore};
use pbs_tape::{ use pbs_tape::{
BlockReadError, MediaContentHeader, TapeRead, PROXMOX_BACKUP_CONTENT_HEADER_MAGIC_1_0, BlockReadError, MediaContentHeader, TapeRead, PROXMOX_BACKUP_CONTENT_HEADER_MAGIC_1_0,
@ -1652,7 +1652,7 @@ fn try_restore_snapshot_archive<R: pxar::decoder::SeqRead>(
} }
let root_path = Path::new("/"); let root_path = Path::new("/");
let manifest_file_name = OsStr::new(MANIFEST_BLOB_NAME); let manifest_file_name = OsStr::new(MANIFEST_BLOB_NAME.as_ref());
let mut manifest = None; let mut manifest = None;
@ -1732,7 +1732,7 @@ fn try_restore_snapshot_archive<R: pxar::decoder::SeqRead>(
// commit manifest // commit manifest
let mut manifest_path = snapshot_path.to_owned(); let mut manifest_path = snapshot_path.to_owned();
manifest_path.push(MANIFEST_BLOB_NAME); manifest_path.push(MANIFEST_BLOB_NAME.as_ref());
let mut tmp_manifest_path = manifest_path.clone(); let mut tmp_manifest_path = manifest_path.clone();
tmp_manifest_path.set_extension("tmp"); tmp_manifest_path.set_extension("tmp");

View File

@ -1,8 +1,5 @@
//! Server/client-specific parts for what's otherwise in pbs-datastore. //! Server/client-specific parts for what's otherwise in pbs-datastore.
// Note: .pcat1 => Proxmox Catalog Format version 1
pub const CATALOG_NAME: &str = "catalog.pcat1.didx";
mod verify; mod verify;
pub use verify::*; pub use verify::*;

View File

@ -13,7 +13,7 @@ use proxmox_human_byte::HumanByte;
use proxmox_router::cli::{CliCommand, CliCommandMap, CommandLineInterface}; use proxmox_router::cli::{CliCommand, CliCommandMap, CommandLineInterface};
use proxmox_schema::api; use proxmox_schema::api;
use pbs_api_types::{BackupNamespace, BackupPart}; use pbs_api_types::{BackupArchiveName, BackupNamespace, BackupPart};
use pbs_client::tools::key_source::{ use pbs_client::tools::key_source::{
crypto_parameters, format_key_source, get_encryption_key_password, KEYFD_SCHEMA, crypto_parameters, format_key_source, get_encryption_key_password, KEYFD_SCHEMA,
}; };
@ -70,8 +70,7 @@ pub fn diff_commands() -> CommandLineInterface {
type: String, type: String,
}, },
"archive-name": { "archive-name": {
description: "Name of the .pxar archive", type: BackupArchiveName,
type: String,
}, },
"repository": { "repository": {
optional: true, optional: true,
@ -106,7 +105,7 @@ pub fn diff_commands() -> CommandLineInterface {
async fn diff_archive_cmd( async fn diff_archive_cmd(
prev_snapshot: String, prev_snapshot: String,
snapshot: String, snapshot: String,
archive_name: String, archive_name: BackupArchiveName,
compare_content: bool, compare_content: bool,
color: Option<ColorMode>, color: Option<ColorMode>,
ns: Option<BackupNamespace>, ns: Option<BackupNamespace>,
@ -140,12 +139,11 @@ async fn diff_archive_cmd(
let output_params = OutputParams { color }; let output_params = OutputParams { color };
if archive_name.ends_with(".pxar") { if archive_name.ends_with(".pxar.didx") {
let file_name = format!("{}.didx", archive_name);
diff_archive( diff_archive(
&prev_snapshot, &prev_snapshot,
&snapshot, &snapshot,
&file_name, &archive_name,
&repo_params, &repo_params,
compare_content, compare_content,
&output_params, &output_params,
@ -161,7 +159,7 @@ async fn diff_archive_cmd(
async fn diff_archive( async fn diff_archive(
snapshot_a: &str, snapshot_a: &str,
snapshot_b: &str, snapshot_b: &str,
file_name: &str, file_name: &BackupArchiveName,
repo_params: &RepoParams, repo_params: &RepoParams,
compare_contents: bool, compare_contents: bool,
output_params: &OutputParams, output_params: &OutputParams,
@ -249,7 +247,7 @@ struct OutputParams {
async fn open_dynamic_index( async fn open_dynamic_index(
snapshot: &str, snapshot: &str,
archive_name: &str, archive_name: &BackupArchiveName,
params: &RepoParams, params: &RepoParams,
) -> Result<(DynamicIndexReader, Accessor), Error> { ) -> Result<(DynamicIndexReader, Accessor), Error> {
let backup_reader = create_backup_reader(snapshot, params).await?; let backup_reader = create_backup_reader(snapshot, params).await?;

View File

@ -11,9 +11,9 @@ use proxmox_human_byte::HumanByte;
use tracing::info; use tracing::info;
use pbs_api_types::{ use pbs_api_types::{
print_store_and_ns, ArchiveType, Authid, BackupDir, BackupGroup, BackupNamespace, GroupFilter, print_store_and_ns, ArchiveType, Authid, BackupArchiveName, BackupDir, BackupGroup,
Operation, RateLimitConfig, Remote, MAX_NAMESPACE_DEPTH, PRIV_DATASTORE_AUDIT, BackupNamespace, GroupFilter, Operation, RateLimitConfig, Remote, CLIENT_LOG_BLOB_NAME,
PRIV_DATASTORE_BACKUP, MANIFEST_BLOB_NAME, MAX_NAMESPACE_DEPTH, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP,
}; };
use pbs_client::BackupRepository; use pbs_client::BackupRepository;
use pbs_config::CachedUserInfo; use pbs_config::CachedUserInfo;
@ -21,7 +21,7 @@ use pbs_datastore::data_blob::DataBlob;
use pbs_datastore::dynamic_index::DynamicIndexReader; use pbs_datastore::dynamic_index::DynamicIndexReader;
use pbs_datastore::fixed_index::FixedIndexReader; use pbs_datastore::fixed_index::FixedIndexReader;
use pbs_datastore::index::IndexFile; use pbs_datastore::index::IndexFile;
use pbs_datastore::manifest::{BackupManifest, FileInfo, CLIENT_LOG_BLOB_NAME, MANIFEST_BLOB_NAME}; use pbs_datastore::manifest::{BackupManifest, FileInfo};
use pbs_datastore::read_chunk::AsyncReadChunk; use pbs_datastore::read_chunk::AsyncReadChunk;
use pbs_datastore::{check_backup_owner, DataStore, StoreProgress}; use pbs_datastore::{check_backup_owner, DataStore, StoreProgress};
use pbs_tools::sha::sha256; use pbs_tools::sha::sha256;
@ -334,16 +334,16 @@ async fn pull_snapshot<'a>(
) -> Result<SyncStats, Error> { ) -> Result<SyncStats, Error> {
let mut sync_stats = SyncStats::default(); let mut sync_stats = SyncStats::default();
let mut manifest_name = snapshot.full_path(); let mut manifest_name = snapshot.full_path();
manifest_name.push(MANIFEST_BLOB_NAME); manifest_name.push(MANIFEST_BLOB_NAME.as_ref());
let mut client_log_name = snapshot.full_path(); let mut client_log_name = snapshot.full_path();
client_log_name.push(CLIENT_LOG_BLOB_NAME); client_log_name.push(CLIENT_LOG_BLOB_NAME.as_ref());
let mut tmp_manifest_name = manifest_name.clone(); let mut tmp_manifest_name = manifest_name.clone();
tmp_manifest_name.set_extension("tmp"); tmp_manifest_name.set_extension("tmp");
let tmp_manifest_blob; let tmp_manifest_blob;
if let Some(data) = reader if let Some(data) = reader
.load_file_into(MANIFEST_BLOB_NAME, &tmp_manifest_name) .load_file_into(MANIFEST_BLOB_NAME.as_ref(), &tmp_manifest_name)
.await? .await?
{ {
tmp_manifest_blob = data; tmp_manifest_blob = data;
@ -381,11 +381,12 @@ async fn pull_snapshot<'a>(
path.push(&item.filename); path.push(&item.filename);
if path.exists() { if path.exists() {
match ArchiveType::from_path(&item.filename)? { let filename: BackupArchiveName = item.filename.as_str().try_into()?;
match filename.archive_type() {
ArchiveType::DynamicIndex => { ArchiveType::DynamicIndex => {
let index = DynamicIndexReader::open(&path)?; let index = DynamicIndexReader::open(&path)?;
let (csum, size) = index.compute_csum(); let (csum, size) = index.compute_csum();
match manifest.verify_file(&item.filename, &csum, size) { match manifest.verify_file(&filename, &csum, size) {
Ok(_) => continue, Ok(_) => continue,
Err(err) => { Err(err) => {
info!("detected changed file {path:?} - {err}"); info!("detected changed file {path:?} - {err}");
@ -395,7 +396,7 @@ async fn pull_snapshot<'a>(
ArchiveType::FixedIndex => { ArchiveType::FixedIndex => {
let index = FixedIndexReader::open(&path)?; let index = FixedIndexReader::open(&path)?;
let (csum, size) = index.compute_csum(); let (csum, size) = index.compute_csum();
match manifest.verify_file(&item.filename, &csum, size) { match manifest.verify_file(&filename, &csum, size) {
Ok(_) => continue, Ok(_) => continue,
Err(err) => { Err(err) => {
info!("detected changed file {path:?} - {err}"); info!("detected changed file {path:?} - {err}");
@ -405,7 +406,7 @@ async fn pull_snapshot<'a>(
ArchiveType::Blob => { ArchiveType::Blob => {
let mut tmpfile = std::fs::File::open(&path)?; let mut tmpfile = std::fs::File::open(&path)?;
let (csum, size) = sha256(&mut tmpfile)?; let (csum, size) = sha256(&mut tmpfile)?;
match manifest.verify_file(&item.filename, &csum, size) { match manifest.verify_file(&filename, &csum, size) {
Ok(_) => continue, Ok(_) => continue,
Err(err) => { Err(err) => {
info!("detected changed file {path:?} - {err}"); info!("detected changed file {path:?} - {err}");

View File

@ -10,11 +10,11 @@ use tokio_stream::wrappers::ReceiverStream;
use tracing::{info, warn}; use tracing::{info, warn};
use pbs_api_types::{ use pbs_api_types::{
print_store_and_ns, ApiVersion, ApiVersionInfo, ArchiveType, Authid, BackupDir, BackupGroup, print_store_and_ns, ApiVersion, ApiVersionInfo, ArchiveType, Authid, BackupArchiveName,
BackupGroupDeleteStats, BackupNamespace, GroupFilter, GroupListItem, NamespaceListItem, BackupDir, BackupGroup, BackupGroupDeleteStats, BackupNamespace, GroupFilter, GroupListItem,
Operation, RateLimitConfig, Remote, SnapshotListItem, PRIV_DATASTORE_BACKUP, NamespaceListItem, Operation, RateLimitConfig, Remote, SnapshotListItem, CLIENT_LOG_BLOB_NAME,
PRIV_DATASTORE_READ, PRIV_REMOTE_DATASTORE_BACKUP, PRIV_REMOTE_DATASTORE_MODIFY, MANIFEST_BLOB_NAME, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_READ, PRIV_REMOTE_DATASTORE_BACKUP,
PRIV_REMOTE_DATASTORE_PRUNE, PRIV_REMOTE_DATASTORE_MODIFY, PRIV_REMOTE_DATASTORE_PRUNE,
}; };
use pbs_client::{BackupRepository, BackupWriter, HttpClient, MergedChunkInfo, UploadOptions}; use pbs_client::{BackupRepository, BackupWriter, HttpClient, MergedChunkInfo, UploadOptions};
use pbs_config::CachedUserInfo; use pbs_config::CachedUserInfo;
@ -22,7 +22,6 @@ use pbs_datastore::data_blob::ChunkInfo;
use pbs_datastore::dynamic_index::DynamicIndexReader; use pbs_datastore::dynamic_index::DynamicIndexReader;
use pbs_datastore::fixed_index::FixedIndexReader; use pbs_datastore::fixed_index::FixedIndexReader;
use pbs_datastore::index::IndexFile; use pbs_datastore::index::IndexFile;
use pbs_datastore::manifest::{CLIENT_LOG_BLOB_NAME, MANIFEST_BLOB_NAME};
use pbs_datastore::read_chunk::AsyncReadChunk; use pbs_datastore::read_chunk::AsyncReadChunk;
use pbs_datastore::{DataStore, StoreProgress}; use pbs_datastore::{DataStore, StoreProgress};
@ -805,10 +804,13 @@ pub(crate) async fn push_snapshot(
let mut path = backup_dir.full_path(); let mut path = backup_dir.full_path();
path.push(&entry.filename); path.push(&entry.filename);
if path.try_exists()? { if path.try_exists()? {
match ArchiveType::from_path(&entry.filename)? { let archive_name = BackupArchiveName::from_path(&entry.filename)?;
match archive_name.archive_type() {
ArchiveType::Blob => { ArchiveType::Blob => {
let file = std::fs::File::open(path.clone())?; let file = std::fs::File::open(path.clone())?;
let backup_stats = backup_writer.upload_blob(file, &entry.filename).await?; let backup_stats = backup_writer
.upload_blob(file, archive_name.as_ref())
.await?;
stats.add(SyncStats { stats.add(SyncStats {
chunk_count: backup_stats.chunk_count as usize, chunk_count: backup_stats.chunk_count as usize,
bytes: backup_stats.size as usize, bytes: backup_stats.size as usize,
@ -821,7 +823,7 @@ pub(crate) async fn push_snapshot(
// Add known chunks, ignore errors since archive might not be present // Add known chunks, ignore errors since archive might not be present
let _res = backup_writer let _res = backup_writer
.download_previous_dynamic_index( .download_previous_dynamic_index(
&entry.filename, &archive_name,
manifest, manifest,
known_chunks.clone(), known_chunks.clone(),
) )
@ -830,7 +832,7 @@ pub(crate) async fn push_snapshot(
let index = DynamicIndexReader::open(&path)?; let index = DynamicIndexReader::open(&path)?;
let chunk_reader = reader.chunk_reader(entry.chunk_crypt_mode()); let chunk_reader = reader.chunk_reader(entry.chunk_crypt_mode());
let sync_stats = push_index( let sync_stats = push_index(
&entry.filename, &archive_name,
index, index,
chunk_reader, chunk_reader,
&backup_writer, &backup_writer,
@ -845,7 +847,7 @@ pub(crate) async fn push_snapshot(
// Add known chunks, ignore errors since archive might not be present // Add known chunks, ignore errors since archive might not be present
let _res = backup_writer let _res = backup_writer
.download_previous_fixed_index( .download_previous_fixed_index(
&entry.filename, &archive_name,
manifest, manifest,
known_chunks.clone(), known_chunks.clone(),
) )
@ -855,7 +857,7 @@ pub(crate) async fn push_snapshot(
let chunk_reader = reader.chunk_reader(entry.chunk_crypt_mode()); let chunk_reader = reader.chunk_reader(entry.chunk_crypt_mode());
let size = index.index_bytes(); let size = index.index_bytes();
let sync_stats = push_index( let sync_stats = push_index(
&entry.filename, &archive_name,
index, index,
chunk_reader, chunk_reader,
&backup_writer, &backup_writer,
@ -874,12 +876,13 @@ pub(crate) async fn push_snapshot(
// Fetch client log from source and push to target // Fetch client log from source and push to target
// this has to be handled individually since the log is never part of the manifest // this has to be handled individually since the log is never part of the manifest
let mut client_log_path = backup_dir.full_path(); let mut client_log_path = backup_dir.full_path();
client_log_path.push(CLIENT_LOG_BLOB_NAME); let client_log_name = &CLIENT_LOG_BLOB_NAME;
client_log_path.push(client_log_name.as_ref());
if client_log_path.is_file() { if client_log_path.is_file() {
backup_writer backup_writer
.upload_blob_from_file( .upload_blob_from_file(
&client_log_path, &client_log_path,
CLIENT_LOG_BLOB_NAME, client_log_name.as_ref(),
upload_options.clone(), upload_options.clone(),
) )
.await?; .await?;
@ -891,7 +894,7 @@ pub(crate) async fn push_snapshot(
let backup_stats = backup_writer let backup_stats = backup_writer
.upload_blob_from_data( .upload_blob_from_data(
manifest_string.into_bytes(), manifest_string.into_bytes(),
MANIFEST_BLOB_NAME, MANIFEST_BLOB_NAME.as_ref(),
upload_options, upload_options,
) )
.await?; .await?;
@ -912,7 +915,7 @@ pub(crate) async fn push_snapshot(
// For fixed indexes, the size must be provided as given by the index reader. // For fixed indexes, the size must be provided as given by the index reader.
#[allow(clippy::too_many_arguments)] #[allow(clippy::too_many_arguments)]
async fn push_index<'a>( async fn push_index<'a>(
filename: &'a str, filename: &'a BackupArchiveName,
index: impl IndexFile + Send + 'static, index: impl IndexFile + Send + 'static,
chunk_reader: Arc<dyn AsyncReadChunk>, chunk_reader: Arc<dyn AsyncReadChunk>,
backup_writer: &BackupWriter, backup_writer: &BackupWriter,

View File

@ -2,6 +2,7 @@
use std::collections::HashMap; use std::collections::HashMap;
use std::io::{Seek, Write}; use std::io::{Seek, Write};
use std::ops::Deref;
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
use std::sync::{Arc, Mutex}; use std::sync::{Arc, Mutex};
use std::time::Duration; use std::time::Duration;
@ -18,11 +19,11 @@ use proxmox_router::HttpError;
use pbs_api_types::{ use pbs_api_types::{
Authid, BackupDir, BackupGroup, BackupNamespace, CryptMode, GroupListItem, SnapshotListItem, Authid, BackupDir, BackupGroup, BackupNamespace, CryptMode, GroupListItem, SnapshotListItem,
SyncDirection, SyncJobConfig, MAX_NAMESPACE_DEPTH, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_READ, SyncDirection, SyncJobConfig, CLIENT_LOG_BLOB_NAME, MAX_NAMESPACE_DEPTH, PRIV_DATASTORE_BACKUP,
PRIV_DATASTORE_READ,
}; };
use pbs_client::{BackupReader, BackupRepository, HttpClient, RemoteChunkReader}; use pbs_client::{BackupReader, BackupRepository, HttpClient, RemoteChunkReader};
use pbs_datastore::data_blob::DataBlob; use pbs_datastore::data_blob::DataBlob;
use pbs_datastore::manifest::CLIENT_LOG_BLOB_NAME;
use pbs_datastore::read_chunk::AsyncReadChunk; use pbs_datastore::read_chunk::AsyncReadChunk;
use pbs_datastore::{DataStore, ListNamespacesRecursive, LocalChunkReader}; use pbs_datastore::{DataStore, ListNamespacesRecursive, LocalChunkReader};
@ -162,15 +163,19 @@ impl SyncSourceReader for RemoteSourceReader {
.open(&tmp_path)?; .open(&tmp_path)?;
// Note: be silent if there is no log - only log successful download // Note: be silent if there is no log - only log successful download
let client_log_name = &CLIENT_LOG_BLOB_NAME;
if let Ok(()) = self if let Ok(()) = self
.backup_reader .backup_reader
.download(CLIENT_LOG_BLOB_NAME, tmpfile) .download(client_log_name.as_ref(), tmpfile)
.await .await
{ {
if let Err(err) = std::fs::rename(&tmp_path, to_path) { if let Err(err) = std::fs::rename(&tmp_path, to_path) {
bail!("Atomic rename file {to_path:?} failed - {err}"); bail!("Atomic rename file {to_path:?} failed - {err}");
} }
info!("got backup log file {CLIENT_LOG_BLOB_NAME:?}"); info!(
"got backup log file {client_log_name}",
client_log_name = client_log_name.deref()
);
} }
Ok(()) Ok(())

View File

@ -2,8 +2,7 @@ use std::path::PathBuf;
use anyhow::Error; use anyhow::Error;
use pbs_api_types::PruneJobOptions; use pbs_api_types::{PruneJobOptions, MANIFEST_BLOB_NAME};
use pbs_datastore::manifest::MANIFEST_BLOB_NAME;
use pbs_datastore::prune::compute_prune_info; use pbs_datastore::prune::compute_prune_info;
use pbs_datastore::{BackupDir, BackupInfo}; use pbs_datastore::{BackupDir, BackupInfo};
@ -34,7 +33,7 @@ fn create_info(snapshot: &str, partial: bool) -> BackupInfo {
let mut files = Vec::new(); let mut files = Vec::new();
if !partial { if !partial {
files.push(String::from(MANIFEST_BLOB_NAME)); files.push(MANIFEST_BLOB_NAME.to_string());
} }
BackupInfo { BackupInfo {