diff --git a/pbs-datastore/examples/ls-snapshots.rs b/pbs-datastore/examples/ls-snapshots.rs index e81c9017..d87d4484 100644 --- a/pbs-datastore/examples/ls-snapshots.rs +++ b/pbs-datastore/examples/ls-snapshots.rs @@ -2,7 +2,7 @@ use std::path::PathBuf; use anyhow::{bail, Error}; -use pbs_datastore::{ListGroups, ListSnapshots}; +use pbs_datastore::DataStore; fn run() -> Result<(), Error> { let base: PathBuf = match std::env::args().skip(1).next() { @@ -10,12 +10,13 @@ fn run() -> Result<(), Error> { None => bail!("no path passed"), }; - for group in ListGroups::new(base.to_owned())? { + let store = unsafe { DataStore::open_path("", &base, None)? }; + + for group in store.iter_backup_groups()? { let group = group?; println!("found group {}", group); - let group_path = base.as_path().join(group.to_string()); - for snapshot in ListSnapshots::new(group, group_path)? { + for snapshot in group.iter_snapshots()? { println!("\t{}", snapshot?); } } diff --git a/pbs-datastore/src/backup_info.rs b/pbs-datastore/src/backup_info.rs index df9708c7..b8c8fcfc 100644 --- a/pbs-datastore/src/backup_info.rs +++ b/pbs-datastore/src/backup_info.rs @@ -1,21 +1,35 @@ +use std::fmt; use std::os::unix::io::RawFd; -use std::path::{Path, PathBuf}; +use std::path::PathBuf; +use std::sync::Arc; use anyhow::{bail, Error}; use pbs_api_types::{BackupType, GroupFilter, BACKUP_DATE_REGEX, BACKUP_FILE_REGEX}; -use super::manifest::MANIFEST_BLOB_NAME; +use crate::manifest::MANIFEST_BLOB_NAME; +use crate::DataStore; /// BackupGroup is a directory containing a list of BackupDir -#[derive(Debug, Clone)] +#[derive(Clone)] pub struct BackupGroup { + store: Arc, + group: pbs_api_types::BackupGroup, } +impl fmt::Debug for BackupGroup { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("BackupGroup") + .field("store", &self.store.name()) + .field("group", &self.group) + .finish() + } +} + impl BackupGroup { - pub(crate) fn new(group: pbs_api_types::BackupGroup) -> Self { - Self { group } + pub(crate) fn new(store: Arc, group: pbs_api_types::BackupGroup) -> Self { + Self { store, group } } /// Access the underlying [`BackupGroup`](pbs_api_types::BackupGroup). @@ -32,13 +46,18 @@ impl BackupGroup { &self.group.id } + pub fn full_group_path(&self) -> PathBuf { + self.store.base_path().join(self.group.to_string()) + } + pub fn relative_group_path(&self) -> PathBuf { self.group.to_string().into() } - pub fn list_backups(&self, base_path: &Path) -> Result, Error> { + pub fn list_backups(&self) -> Result, Error> { let mut list = vec![]; + let base_path = self.store.base_path(); let mut path = base_path.to_owned(); path.push(self.relative_group_path()); @@ -54,7 +73,7 @@ impl BackupGroup { let backup_dir = self.backup_dir_with_rfc3339(backup_time)?; let files = list_backup_files(l2_fd, backup_time)?; - let protected = backup_dir.is_protected(base_path.to_owned()); + let protected = backup_dir.is_protected(); list.push(BackupInfo { backup_dir, @@ -69,22 +88,18 @@ impl BackupGroup { } /// Finds the latest backup inside a backup group - pub fn last_backup( - &self, - base_path: &Path, - only_finished: bool, - ) -> Result, Error> { - let backups = self.list_backups(base_path)?; + pub fn last_backup(&self, only_finished: bool) -> Result, Error> { + let backups = self.list_backups()?; Ok(backups .into_iter() .filter(|item| !only_finished || item.is_finished()) .max_by_key(|item| item.backup_dir.backup_time())) } - pub fn last_successful_backup(&self, base_path: &Path) -> Result, Error> { + pub fn last_successful_backup(&self) -> Result, Error> { let mut last = None; - let mut path = base_path.to_owned(); + let mut path = self.store.base_path(); path.push(self.relative_group_path()); proxmox_sys::fs::scandir( @@ -149,6 +164,10 @@ impl BackupGroup { ) -> Result { BackupDir::with_rfc3339(self.clone(), time_string.into()) } + + pub fn iter_snapshots(&self) -> Result { + crate::ListSnapshots::new(self.clone()) + } } impl AsRef for BackupGroup { @@ -181,6 +200,7 @@ impl std::fmt::Display for BackupGroup { impl From for BackupGroup { fn from(dir: BackupDir) -> BackupGroup { BackupGroup { + store: dir.store, group: dir.dir.group, } } @@ -189,6 +209,7 @@ impl From for BackupGroup { impl From<&BackupDir> for BackupGroup { fn from(dir: &BackupDir) -> BackupGroup { BackupGroup { + store: Arc::clone(&dir.store), group: dir.dir.group.clone(), } } @@ -197,18 +218,30 @@ impl From<&BackupDir> for BackupGroup { /// Uniquely identify a Backup (relative to data store) /// /// We also call this a backup snaphost. -#[derive(Debug, Clone)] +#[derive(Clone)] pub struct BackupDir { + store: Arc, dir: pbs_api_types::BackupDir, // backup_time as rfc3339 backup_time_string: String, } +impl fmt::Debug for BackupDir { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("BackupDir") + .field("store", &self.store.name()) + .field("dir", &self.dir) + .field("backup_time_string", &self.backup_time_string) + .finish() + } +} + impl BackupDir { /// Temporarily used for tests. #[doc(hidden)] pub fn new_test(dir: pbs_api_types::BackupDir) -> Self { Self { + store: unsafe { DataStore::new_test() }, backup_time_string: Self::backup_time_to_string(dir.time).unwrap(), dir, } @@ -217,6 +250,7 @@ impl BackupDir { pub(crate) fn with_group(group: BackupGroup, backup_time: i64) -> Result { let backup_time_string = Self::backup_time_to_string(backup_time)?; Ok(Self { + store: group.store, dir: (group.group, backup_time).into(), backup_time_string, }) @@ -228,6 +262,7 @@ impl BackupDir { ) -> Result { let backup_time = proxmox_time::parse_rfc3339(&backup_time_string)?; Ok(Self { + store: group.store, dir: (group.group, backup_time).into(), backup_time_string, }) @@ -257,21 +292,23 @@ impl BackupDir { } /// Returns the absolute path for backup_dir, using the cached formatted time string. - pub fn full_path(&self, mut base_path: PathBuf) -> PathBuf { + pub fn full_path(&self) -> PathBuf { + let mut base_path = self.store.base_path(); base_path.push(self.dir.group.ty.as_str()); base_path.push(&self.dir.group.id); base_path.push(&self.backup_time_string); base_path } - pub fn protected_file(&self, mut path: PathBuf) -> PathBuf { + pub fn protected_file(&self) -> PathBuf { + let mut path = self.store.base_path(); path.push(self.relative_path()); path.push(".protected"); path } - pub fn is_protected(&self, base_path: PathBuf) -> bool { - let path = self.protected_file(base_path); + pub fn is_protected(&self) -> bool { + let path = self.protected_file(); path.exists() } @@ -324,7 +361,7 @@ impl std::fmt::Display for BackupDir { } /// Detailed Backup Information, lists files inside a BackupDir -#[derive(Debug, Clone)] +#[derive(Clone, Debug)] pub struct BackupInfo { /// the backup directory pub backup_dir: BackupDir, @@ -335,12 +372,13 @@ pub struct BackupInfo { } impl BackupInfo { - pub fn new(base_path: &Path, backup_dir: BackupDir) -> Result { - let mut path = base_path.to_owned(); + pub fn new(backup_dir: BackupDir) -> Result { + let base_path = backup_dir.store.base_path(); + let mut path = base_path.clone(); path.push(backup_dir.relative_path()); let files = list_backup_files(libc::AT_FDCWD, &path)?; - let protected = backup_dir.is_protected(base_path.to_owned()); + let protected = backup_dir.is_protected(); Ok(BackupInfo { backup_dir, diff --git a/pbs-datastore/src/chunk_store.rs b/pbs-datastore/src/chunk_store.rs index 739e349d..94fe1dfe 100644 --- a/pbs-datastore/src/chunk_store.rs +++ b/pbs-datastore/src/chunk_store.rs @@ -21,7 +21,7 @@ pub struct ChunkStore { pub(crate) base: PathBuf, chunk_dir: PathBuf, mutex: Mutex<()>, - locker: Arc>, + locker: Option>>, } // TODO: what about sysctl setting vm.vfs_cache_pressure (0 - 100) ? @@ -60,6 +60,17 @@ fn digest_to_prefix(digest: &[u8]) -> PathBuf { } impl ChunkStore { + #[doc(hidden)] + pub unsafe fn panic_store() -> Self { + Self { + name: String::new(), + base: PathBuf::new(), + chunk_dir: PathBuf::new(), + mutex: Mutex::new(()), + locker: None, + } + } + fn chunk_dir>(path: P) -> PathBuf { let mut chunk_dir: PathBuf = PathBuf::from(path.as_ref()); chunk_dir.push(".chunks"); @@ -180,12 +191,15 @@ impl ChunkStore { name: name.to_owned(), base, chunk_dir, - locker, + locker: Some(locker), mutex: Mutex::new(()), }) } pub fn touch_chunk(&self, digest: &[u8; 32]) -> Result<(), Error> { + // unwrap: only `None` in unit tests + assert!(self.locker.is_some()); + self.cond_touch_chunk(digest, true)?; Ok(()) } @@ -195,11 +209,17 @@ impl ChunkStore { digest: &[u8; 32], fail_if_not_exist: bool, ) -> Result { + // unwrap: only `None` in unit tests + assert!(self.locker.is_some()); + let (chunk_path, _digest_str) = self.chunk_path(digest); self.cond_touch_path(&chunk_path, fail_if_not_exist) } pub fn cond_touch_path(&self, path: &Path, fail_if_not_exist: bool) -> Result { + // unwrap: only `None` in unit tests + assert!(self.locker.is_some()); + const UTIME_NOW: i64 = (1 << 30) - 1; const UTIME_OMIT: i64 = (1 << 30) - 2; @@ -239,6 +259,9 @@ impl ChunkStore { + std::iter::FusedIterator, Error, > { + // unwrap: only `None` in unit tests + assert!(self.locker.is_some()); + use nix::dir::Dir; use nix::fcntl::OFlag; use nix::sys::stat::Mode; @@ -325,7 +348,8 @@ impl ChunkStore { } pub fn oldest_writer(&self) -> Option { - ProcessLocker::oldest_shared_lock(self.locker.clone()) + // unwrap: only `None` in unit tests + ProcessLocker::oldest_shared_lock(self.locker.clone().unwrap()) } pub fn sweep_unused_chunks( @@ -335,6 +359,9 @@ impl ChunkStore { status: &mut GarbageCollectionStatus, worker: &dyn WorkerTaskContext, ) -> Result<(), Error> { + // unwrap: only `None` in unit tests + assert!(self.locker.is_some()); + use nix::sys::stat::fstatat; use nix::unistd::{unlinkat, UnlinkatFlags}; @@ -426,6 +453,9 @@ impl ChunkStore { } pub fn insert_chunk(&self, chunk: &DataBlob, digest: &[u8; 32]) -> Result<(bool, u64), Error> { + // unwrap: only `None` in unit tests + assert!(self.locker.is_some()); + //println!("DIGEST {}", hex::encode(digest)); let (chunk_path, digest_str) = self.chunk_path(digest); @@ -485,6 +515,9 @@ impl ChunkStore { } pub fn chunk_path(&self, digest: &[u8; 32]) -> (PathBuf, String) { + // unwrap: only `None` in unit tests + assert!(self.locker.is_some()); + let mut chunk_path = self.chunk_dir.clone(); let prefix = digest_to_prefix(digest); chunk_path.push(&prefix); @@ -494,6 +527,9 @@ impl ChunkStore { } pub fn relative_path(&self, path: &Path) -> PathBuf { + // unwrap: only `None` in unit tests + assert!(self.locker.is_some()); + let mut full_path = self.base.clone(); full_path.push(path); full_path @@ -504,15 +540,20 @@ impl ChunkStore { } pub fn base_path(&self) -> PathBuf { + // unwrap: only `None` in unit tests + assert!(self.locker.is_some()); + self.base.clone() } pub fn try_shared_lock(&self) -> Result { - ProcessLocker::try_shared_lock(self.locker.clone()) + // unwrap: only `None` in unit tests + ProcessLocker::try_shared_lock(self.locker.clone().unwrap()) } pub fn try_exclusive_lock(&self) -> Result { - ProcessLocker::try_exclusive_lock(self.locker.clone()) + // unwrap: only `None` in unit tests + ProcessLocker::try_exclusive_lock(self.locker.clone().unwrap()) } } diff --git a/pbs-datastore/src/datastore.rs b/pbs-datastore/src/datastore.rs index d9f3bd12..e7682856 100644 --- a/pbs-datastore/src/datastore.rs +++ b/pbs-datastore/src/datastore.rs @@ -65,6 +65,22 @@ pub struct DataStoreImpl { last_update: i64, } +impl DataStoreImpl { + // This one just panics on everything + #[doc(hidden)] + pub unsafe fn new_test() -> Arc { + Arc::new(Self { + chunk_store: Arc::new(unsafe { ChunkStore::panic_store() }), + gc_mutex: Mutex::new(()), + last_gc_status: Mutex::new(GarbageCollectionStatus::default()), + verify_new: false, + chunk_order: ChunkOrder::None, + last_generation: 0, + last_update: 0, + }) + } +} + pub struct DataStore { inner: Arc, operation: Option, @@ -98,6 +114,15 @@ impl Drop for DataStore { } impl DataStore { + // This one just panics on everything + #[doc(hidden)] + pub unsafe fn new_test() -> Arc { + Arc::new(Self { + inner: unsafe { DataStoreImpl::new_test() }, + operation: None, + }) + } + pub fn lookup_datastore( name: &str, operation: Option, @@ -108,7 +133,6 @@ impl DataStore { let (config, _digest) = pbs_config::datastore::config()?; let config: DataStoreConfig = config.lookup("datastore", name)?; - let path = PathBuf::from(&config.path); if let Some(maintenance_mode) = config.get_maintenance_mode() { if let Err(error) = maintenance_mode.check(operation) { @@ -132,7 +156,8 @@ impl DataStore { } } - let datastore = DataStore::open_with_path(name, &path, config, generation, now)?; + let chunk_store = ChunkStore::open(name, &config.path)?; + let datastore = DataStore::with_store_and_config(chunk_store, config, generation, now)?; let datastore = Arc::new(datastore); map.insert(name.to_string(), datastore.clone()); @@ -153,15 +178,43 @@ impl DataStore { Ok(()) } - fn open_with_path( - store_name: &str, - path: &Path, + /// Open a raw database given a name and a path. + pub unsafe fn open_path( + name: &str, + path: impl AsRef, + operation: Option, + ) -> Result, Error> { + let path = path + .as_ref() + .to_str() + .ok_or_else(|| format_err!("non-utf8 paths not supported"))? + .to_owned(); + unsafe { Self::open_from_config(DataStoreConfig::new(name.to_owned(), path), operation) } + } + + /// Open a datastore given a raw configuration. + pub unsafe fn open_from_config( + config: DataStoreConfig, + operation: Option, + ) -> Result, Error> { + let name = config.name.clone(); + + let chunk_store = ChunkStore::open(&name, &config.path)?; + let inner = Arc::new(Self::with_store_and_config(chunk_store, config, 0, 0)?); + + if let Some(operation) = operation { + update_active_operations(&name, operation, 1)?; + } + + Ok(Arc::new(Self { inner, operation })) + } + + fn with_store_and_config( + chunk_store: ChunkStore, config: DataStoreConfig, last_generation: usize, last_update: i64, ) -> Result { - let chunk_store = ChunkStore::open(store_name, path)?; - let mut gc_status_path = chunk_store.base_path(); gc_status_path.push(".gc-status"); @@ -363,7 +416,7 @@ impl DataStore { /// Remove a complete backup group including all snapshots, returns true /// if all snapshots were removed, and false if some were protected pub fn remove_backup_group( - &self, + self: &Arc, backup_group: &pbs_api_types::BackupGroup, ) -> Result { let backup_group = self.backup_group(backup_group.clone()); @@ -381,8 +434,8 @@ impl DataStore { let mut removed_all = true; // remove all individual backup dirs first to ensure nothing is using them - for snap in backup_group.list_backups(&self.base_path())? { - if snap.backup_dir.is_protected(self.base_path()) { + for snap in backup_group.list_backups()? { + if snap.backup_dir.is_protected() { removed_all = false; continue; } @@ -405,13 +458,13 @@ impl DataStore { /// Remove a backup directory including all content pub fn remove_backup_dir( - &self, + self: &Arc, backup_dir: &pbs_api_types::BackupDir, force: bool, ) -> Result<(), Error> { let backup_dir = self.backup_dir(backup_dir.clone())?; - let full_path = backup_dir.full_path(self.base_path()); + let full_path = backup_dir.full_path(); let (_guard, _manifest_guard); if !force { @@ -419,7 +472,7 @@ impl DataStore { _manifest_guard = self.lock_manifest(&backup_dir)?; } - if backup_dir.is_protected(self.base_path()) { + if backup_dir.is_protected() { bail!("cannot remove protected snapshot"); } @@ -441,7 +494,7 @@ impl DataStore { /// /// Or None if there is no backup in the group (or the group dir does not exist). pub fn last_successful_backup( - &self, + self: &Arc, backup_group: &pbs_api_types::BackupGroup, ) -> Result, Error> { let backup_group = self.backup_group(backup_group.clone()); @@ -451,7 +504,7 @@ impl DataStore { group_path.push(backup_group.relative_group_path()); if group_path.exists() { - backup_group.last_successful_backup(&base_path) + backup_group.last_successful_backup() } else { Ok(None) } @@ -584,20 +637,23 @@ impl DataStore { /// /// The iterated item is still a Result that can contain errors from rather unexptected FS or /// parsing errors. - pub fn iter_backup_groups(&self) -> Result { - ListGroups::new(self.base_path()) + pub fn iter_backup_groups(self: &Arc) -> Result { + ListGroups::new(Arc::clone(self)) } /// Get a streaming iter over top-level backup groups of a datatstore, filtered by Ok results /// /// The iterated item's result is already unwrapped, if it contained an error it will be /// logged. Can be useful in iterator chain commands - pub fn iter_backup_groups_ok(&self) -> Result + '_, Error> { + pub fn iter_backup_groups_ok( + self: &Arc, + ) -> Result + 'static, Error> { + let this = Arc::clone(self); Ok( - ListGroups::new(self.base_path())?.filter_map(move |group| match group { + ListGroups::new(Arc::clone(&self))?.filter_map(move |group| match group { Ok(group) => Some(group), Err(err) => { - log::error!("list groups error on datastore {} - {}", self.name(), err); + log::error!("list groups error on datastore {} - {}", this.name(), err); None } }), @@ -607,8 +663,8 @@ impl DataStore { /// Get a in-memory vector for all top-level backup groups of a datatstore /// /// NOTE: using the iterator directly is most often more efficient w.r.t. memory usage - pub fn list_backup_groups(&self) -> Result, Error> { - ListGroups::new(self.base_path())?.collect() + pub fn list_backup_groups(self: &Arc) -> Result, Error> { + ListGroups::new(Arc::clone(self))?.collect() } pub fn list_images(&self) -> Result, Error> { @@ -1022,11 +1078,11 @@ impl DataStore { /// Updates the protection status of the specified snapshot. pub fn update_protection(&self, backup_dir: &BackupDir, protection: bool) -> Result<(), Error> { - let full_path = backup_dir.full_path(self.base_path()); + let full_path = backup_dir.full_path(); let _guard = lock_dir_noblock(&full_path, "snapshot", "possibly running or in use")?; - let protected_path = backup_dir.protected_file(self.base_path()); + let protected_path = backup_dir.protected_file(); if protection { std::fs::File::create(protected_path) .map_err(|err| format_err!("could not create protection file: {}", err))?; @@ -1093,12 +1149,12 @@ impl DataStore { } /// Open a backup group from this datastore. - pub fn backup_group(&self, group: pbs_api_types::BackupGroup) -> BackupGroup { - BackupGroup::new(group) + pub fn backup_group(self: &Arc, group: pbs_api_types::BackupGroup) -> BackupGroup { + BackupGroup::new(Arc::clone(&self), group) } /// Open a backup group from this datastore. - pub fn backup_group_from_parts(&self, ty: BackupType, id: T) -> BackupGroup + pub fn backup_group_from_parts(self: &Arc, ty: BackupType, id: T) -> BackupGroup where T: Into, { @@ -1108,18 +1164,18 @@ impl DataStore { /// Open a backup group from this datastore by backup group path such as `vm/100`. /// /// Convenience method for `store.backup_group(path.parse()?)` - pub fn backup_group_from_path(&self, path: &str) -> Result { + pub fn backup_group_from_path(self: &Arc, path: &str) -> Result { Ok(self.backup_group(path.parse()?)) } /// Open a snapshot (backup directory) from this datastore. - pub fn backup_dir(&self, dir: pbs_api_types::BackupDir) -> Result { + pub fn backup_dir(self: &Arc, dir: pbs_api_types::BackupDir) -> Result { BackupDir::with_group(self.backup_group(dir.group), dir.time) } /// Open a snapshot (backup directory) from this datastore. pub fn backup_dir_from_parts( - &self, + self: &Arc, ty: BackupType, id: T, time: i64, @@ -1132,7 +1188,7 @@ impl DataStore { /// Open a snapshot (backup directory) from this datastore with a cached rfc3339 time string. pub fn backup_dir_with_rfc3339>( - &self, + self: &Arc, group: BackupGroup, time_string: T, ) -> Result { @@ -1140,7 +1196,7 @@ impl DataStore { } /// Open a snapshot (backup directory) from this datastore by a snapshot path. - pub fn backup_dir_from_path(&self, path: &str) -> Result { + pub fn backup_dir_from_path(self: &Arc, path: &str) -> Result { self.backup_dir(path.parse()?) } } @@ -1152,9 +1208,9 @@ pub struct ListSnapshots { } impl ListSnapshots { - pub fn new(group: BackupGroup, group_path: PathBuf) -> Result { + pub fn new(group: BackupGroup) -> Result { Ok(ListSnapshots { - fd: proxmox_sys::fs::read_subdir(libc::AT_FDCWD, &group_path)?, + fd: proxmox_sys::fs::read_subdir(libc::AT_FDCWD, &group.full_group_path())?, group, }) } @@ -1192,14 +1248,16 @@ impl Iterator for ListSnapshots { /// A iterator for a (single) level of Backup Groups pub struct ListGroups { + store: Arc, type_fd: proxmox_sys::fs::ReadDir, id_state: Option<(BackupType, proxmox_sys::fs::ReadDir)>, } impl ListGroups { - pub fn new(base_path: PathBuf) -> Result { + pub fn new(store: Arc) -> Result { Ok(ListGroups { - type_fd: proxmox_sys::fs::read_subdir(libc::AT_FDCWD, &base_path)?, + type_fd: proxmox_sys::fs::read_subdir(libc::AT_FDCWD, &store.base_path())?, + store, id_state: None, }) } @@ -1227,6 +1285,7 @@ impl Iterator for ListGroups { } if BACKUP_ID_REGEX.is_match(name) { return Some(Ok(BackupGroup::new( + Arc::clone(&self.store), (group_type, name.to_owned()).into(), ))); } diff --git a/pbs-datastore/src/lib.rs b/pbs-datastore/src/lib.rs index bb6b99a6..f08d92f8 100644 --- a/pbs-datastore/src/lib.rs +++ b/pbs-datastore/src/lib.rs @@ -142,6 +142,8 @@ //! * / = no interaction //! * shared/exclusive from POV of 'starting' process +#![deny(unsafe_op_in_unsafe_fn)] + // Note: .pcat1 => Proxmox Catalog Format version 1 pub const CATALOG_NAME: &str = "catalog.pcat1.didx"; diff --git a/pbs-datastore/src/snapshot_reader.rs b/pbs-datastore/src/snapshot_reader.rs index a3d1a8cb..7c99c9e9 100644 --- a/pbs-datastore/src/snapshot_reader.rs +++ b/pbs-datastore/src/snapshot_reader.rs @@ -34,7 +34,7 @@ impl SnapshotReader { ) -> Result { let snapshot = datastore.backup_dir(snapshot)?; - let snapshot_path = snapshot.full_path(datastore.base_path()); + let snapshot_path = snapshot.full_path(); let locked_dir = lock_dir_noblock_shared(&snapshot_path, "snapshot", "locked by another operation")?; diff --git a/src/api2/admin/datastore.rs b/src/api2/admin/datastore.rs index 6de577e4..9f7acd0d 100644 --- a/src/api2/admin/datastore.rs +++ b/src/api2/admin/datastore.rs @@ -4,6 +4,7 @@ use std::collections::HashSet; use std::ffi::OsStr; use std::os::unix::ffi::OsStrExt; use std::path::PathBuf; +use std::sync::Arc; use anyhow::{bail, format_err, Error}; use futures::*; @@ -182,7 +183,7 @@ pub fn list_groups( return Ok(group_info); } - let snapshots = match group.list_backups(&datastore.base_path()) { + let snapshots = match group.list_backups() { Ok(snapshots) => snapshots, Err(_) => return Ok(group_info), }; @@ -294,7 +295,7 @@ pub fn list_snapshot_files( PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ, )?; - let info = BackupInfo::new(&datastore.base_path(), snapshot)?; + let info = BackupInfo::new(snapshot)?; let (_manifest, files) = get_all_snapshot_files(&datastore, &info)?; @@ -405,7 +406,7 @@ pub fn list_snapshots( group: group.into(), time: info.backup_dir.backup_time(), }; - let protected = info.backup_dir.is_protected(datastore.base_path()); + let protected = info.backup_dir.is_protected(); match get_all_snapshot_files(&datastore, &info) { Ok((manifest, files)) => { @@ -488,7 +489,7 @@ pub fn list_snapshots( return Ok(snapshots); } - let group_backups = group.list_backups(&datastore.base_path())?; + let group_backups = group.list_backups()?; snapshots.extend( group_backups @@ -500,7 +501,10 @@ pub fn list_snapshots( }) } -fn get_snapshots_count(store: &DataStore, filter_owner: Option<&Authid>) -> Result { +fn get_snapshots_count( + store: &Arc, + filter_owner: Option<&Authid>, +) -> Result { store .iter_backup_groups_ok()? .filter(|group| { @@ -519,7 +523,7 @@ fn get_snapshots_count(store: &DataStore, filter_owner: Option<&Authid>) -> Resu } }) .try_fold(Counts::default(), |mut counts, group| { - let snapshot_count = group.list_backups(&store.base_path())?.len() as u64; + let snapshot_count = group.list_backups()?.len() as u64; // only include groups with snapshots, counting/displaying emtpy groups can confuse if snapshot_count > 0 { @@ -788,7 +792,7 @@ pub fn prune( let mut prune_result = Vec::new(); - let list = group.list_backups(&datastore.base_path())?; + let list = group.list_backups()?; let mut prune_info = compute_prune_info(list, &prune_options)?; @@ -1797,7 +1801,7 @@ pub fn get_protection( PRIV_DATASTORE_AUDIT, )?; - Ok(backup_dir.is_protected(datastore.base_path())) + Ok(backup_dir.is_protected()) } #[api( diff --git a/src/api2/backup/mod.rs b/src/api2/backup/mod.rs index 811c4a73..9effc494 100644 --- a/src/api2/backup/mod.rs +++ b/src/api2/backup/mod.rs @@ -133,9 +133,7 @@ fn upgrade_to_backup_protocol( } let last_backup = { - let info = backup_group - .last_backup(&datastore.base_path(), true) - .unwrap_or(None); + let info = backup_group.last_backup(true).unwrap_or(None); if let Some(info) = info { let (manifest, _) = datastore.load_manifest(&info.backup_dir)?; let verify = manifest.unprotected["verify_state"].clone(); diff --git a/src/api2/reader/mod.rs b/src/api2/reader/mod.rs index ec01eac8..6bde4ccb 100644 --- a/src/api2/reader/mod.rs +++ b/src/api2/reader/mod.rs @@ -123,7 +123,7 @@ fn upgrade_to_backup_reader_protocol( } let _guard = lock_dir_noblock_shared( - &backup_dir.full_path(datastore.base_path()), + &backup_dir.full_path(), "snapshot", "locked by another operation", )?; diff --git a/src/api2/tape/backup.rs b/src/api2/tape/backup.rs index d0ae07a8..9a0c208a 100644 --- a/src/api2/tape/backup.rs +++ b/src/api2/tape/backup.rs @@ -458,7 +458,7 @@ fn backup_worker( progress.done_snapshots = 0; progress.group_snapshots = 0; - let snapshot_list = group.list_backups(&datastore.base_path())?; + let snapshot_list = group.list_backups()?; // filter out unfinished backups let mut snapshot_list: Vec<_> = snapshot_list diff --git a/src/backup/verify.rs b/src/backup/verify.rs index b355f151..d8be501e 100644 --- a/src/backup/verify.rs +++ b/src/backup/verify.rs @@ -450,7 +450,7 @@ pub fn verify_backup_group( filter: Option<&dyn Fn(&BackupManifest) -> bool>, ) -> Result, Error> { let mut errors = Vec::new(); - let mut list = match group.list_backups(&verify_worker.datastore.base_path()) { + let mut list = match group.list_backups() { Ok(list) => list, Err(err) => { task_log!( diff --git a/src/server/prune_job.rs b/src/server/prune_job.rs index 660359f7..2f8461eb 100644 --- a/src/server/prune_job.rs +++ b/src/server/prune_job.rs @@ -44,7 +44,7 @@ pub fn prune_datastore( for group in datastore.iter_backup_groups()? { let group = group?; - let list = group.list_backups(&datastore.base_path())?; + let list = group.list_backups()?; if !has_privs && !datastore.owns_backup(group.as_ref(), &auth_id)? { continue; diff --git a/src/server/pull.rs b/src/server/pull.rs index f28662b9..09d3e45a 100644 --- a/src/server/pull.rs +++ b/src/server/pull.rs @@ -663,13 +663,13 @@ pub async fn pull_group( if params.remove_vanished { let group = params.store.backup_group(group.clone()); - let local_list = group.list_backups(¶ms.store.base_path())?; + let local_list = group.list_backups()?; for info in local_list { let backup_time = info.backup_dir.backup_time(); if remote_snapshots.contains(&backup_time) { continue; } - if info.backup_dir.is_protected(params.store.base_path()) { + if info.backup_dir.is_protected() { task_log!( worker, "don't delete vanished snapshot {:?} (protected)",