From eb5e0ae65a95e3b92d8202e458859990742fb884 Mon Sep 17 00:00:00 2001 From: Wolfgang Bumiller Date: Wed, 21 Jul 2021 14:12:22 +0200 Subject: [PATCH] move remaining client tools to pbs-tools/datastore pbs-datastore now ended up depending on tokio after all, but that's fine for now for the fuse code I added pbs-fuse-loop (has the old fuse_loop and its 'loopdev' module) ultimately only binaries should depend on this to avoid the library link the only thins remaining to move out the client binary are the api method return types, those will need to be moved to pbs-api-types... Signed-off-by: Wolfgang Bumiller --- Cargo.toml | 3 +- Makefile | 1 + pbs-api-types/Cargo.toml | 3 +- pbs-api-types/src/lib.rs | 39 +++ .../src}/dynamic_index.rs | 4 +- pbs-client/src/lib.rs | 1 + pbs-datastore/Cargo.toml | 4 +- pbs-datastore/src/dynamic_index.rs | 224 +++++++++++++++++- pbs-datastore/src/lib.rs | 1 + {src/tools => pbs-datastore/src}/paperkey.rs | 4 +- pbs-fuse-loop/Cargo.toml | 20 ++ {src/tools => pbs-fuse-loop/src}/fuse_loop.rs | 2 +- pbs-fuse-loop/src/lib.rs | 5 + {src/tools => pbs-fuse-loop/src}/loopdev.rs | 3 +- pbs-tools/src/io.rs | 22 ++ pbs-tools/src/lib.rs | 1 + src/api2/types/mod.rs | 38 --- src/backup/mod.rs | 4 - src/bin/proxmox-backup-client.rs | 6 +- src/bin/proxmox_backup_client/key.rs | 11 +- src/bin/proxmox_backup_client/mount.rs | 36 ++- src/bin/proxmox_backup_client/snapshot.rs | 14 +- src/bin/proxmox_backup_client/task.rs | 6 +- src/bin/proxmox_tape/encryption_key.rs | 7 +- src/tools/mod.rs | 20 -- 25 files changed, 353 insertions(+), 126 deletions(-) rename {src/backup => pbs-client/src}/dynamic_index.rs (98%) rename {src/tools => pbs-datastore/src}/paperkey.rs (98%) create mode 100644 pbs-fuse-loop/Cargo.toml rename {src/tools => pbs-fuse-loop/src}/fuse_loop.rs (99%) create mode 100644 pbs-fuse-loop/src/lib.rs rename {src/tools => pbs-fuse-loop/src}/loopdev.rs (99%) create mode 100644 pbs-tools/src/io.rs diff --git a/Cargo.toml b/Cargo.toml index f1a748af..0bc48bb9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -23,6 +23,7 @@ members = [ "pbs-buildcfg", "pbs-client", "pbs-datastore", + "pbs-fuse-loop", "pbs-runtime", "pbs-systemd", "pbs-tools", @@ -90,7 +91,6 @@ pxar = { version = "0.10.1", features = [ "tokio-io" ] } proxmox = { version = "0.12.0", features = [ "sortable-macro", "api-macro", "cli", "router", "tfa" ] } proxmox-acme-rs = "0.2.1" proxmox-apt = "0.5.1" -proxmox-fuse = "0.1.1" proxmox-http = { version = "0.3.0", features = [ "client", "http-helpers", "websocket" ] } proxmox-openid = "0.6.1" @@ -98,6 +98,7 @@ pbs-api-types = { path = "pbs-api-types" } pbs-buildcfg = { path = "pbs-buildcfg" } pbs-client = { path = "pbs-client" } pbs-datastore = { path = "pbs-datastore" } +pbs-fuse-loop = { path = "pbs-fuse-loop" } pbs-runtime = { path = "pbs-runtime" } pbs-systemd = { path = "pbs-systemd" } pbs-tools = { path = "pbs-tools" } diff --git a/Makefile b/Makefile index 4c398219..ed7182bc 100644 --- a/Makefile +++ b/Makefile @@ -35,6 +35,7 @@ SUBCRATES := \ pbs-buildcfg \ pbs-client \ pbs-datastore \ + pbs-fuse-loop \ pbs-runtime \ pbs-systemd \ pbs-tools \ diff --git a/pbs-api-types/Cargo.toml b/pbs-api-types/Cargo.toml index 564a2101..c8372ba4 100644 --- a/pbs-api-types/Cargo.toml +++ b/pbs-api-types/Cargo.toml @@ -8,8 +8,9 @@ description = "general API type helpers for PBS" [dependencies] anyhow = "1.0" lazy_static = "1.4" -nix = "0.19.1" libc = "0.2" +nix = "0.19.1" +openssl = "0.10" regex = "1.2" serde = { version = "1.0", features = ["derive"] } diff --git a/pbs-api-types/src/lib.rs b/pbs-api-types/src/lib.rs index a95cbf6a..576099eb 100644 --- a/pbs-api-types/src/lib.rs +++ b/pbs-api-types/src/lib.rs @@ -525,3 +525,42 @@ pub struct Counts { /// The counts for other backup types pub other: Option, } + +pub const PASSWORD_HINT_SCHEMA: Schema = StringSchema::new("Password hint.") + .format(&SINGLE_LINE_COMMENT_FORMAT) + .min_length(1) + .max_length(64) + .schema(); + + +#[api] +#[derive(Deserialize, Serialize)] +/// RSA public key information +pub struct RsaPubKeyInfo { + /// Path to key (if stored in a file) + #[serde(skip_serializing_if="Option::is_none")] + pub path: Option, + /// RSA exponent + pub exponent: String, + /// Hex-encoded RSA modulus + pub modulus: String, + /// Key (modulus) length in bits + pub length: usize, +} + +impl std::convert::TryFrom> for RsaPubKeyInfo { + type Error = anyhow::Error; + + fn try_from(value: openssl::rsa::Rsa) -> Result { + let modulus = value.n().to_hex_str()?.to_string(); + let exponent = value.e().to_dec_str()?.to_string(); + let length = value.size() as usize * 8; + + Ok(Self { + path: None, + exponent, + modulus, + length, + }) + } +} diff --git a/src/backup/dynamic_index.rs b/pbs-client/src/dynamic_index.rs similarity index 98% rename from src/backup/dynamic_index.rs rename to pbs-client/src/dynamic_index.rs index f7e758d2..3857f2c2 100644 --- a/src/backup/dynamic_index.rs +++ b/pbs-client/src/dynamic_index.rs @@ -101,7 +101,7 @@ impl BufferedDynamicReader { } } -impl crate::tools::BufferedRead for BufferedDynamicReader { +impl pbs_tools::io::BufferedRead for BufferedDynamicReader { fn buffered_read(&mut self, offset: u64) -> Result<&[u8], Error> { if offset == self.archive_size { return Ok(&self.read_buffer[0..0]); @@ -141,7 +141,7 @@ impl crate::tools::BufferedRead for BufferedDynamicReader { impl std::io::Read for BufferedDynamicReader { fn read(&mut self, buf: &mut [u8]) -> Result { - use crate::tools::BufferedRead; + use pbs_tools::io::BufferedRead; use std::io::{Error, ErrorKind}; let data = match self.buffered_read(self.read_offset) { diff --git a/pbs-client/src/lib.rs b/pbs-client/src/lib.rs index 87a911c2..d14a3617 100644 --- a/pbs-client/src/lib.rs +++ b/pbs-client/src/lib.rs @@ -11,6 +11,7 @@ use pbs_tools::cert::CertInfo; use pbs_tools::auth::private_auth_key; pub mod catalog_shell; +pub mod dynamic_index; pub mod pxar; pub mod tools; diff --git a/pbs-datastore/Cargo.toml b/pbs-datastore/Cargo.toml index 12e097fa..c5eb2353 100644 --- a/pbs-datastore/Cargo.toml +++ b/pbs-datastore/Cargo.toml @@ -7,6 +7,7 @@ description = "low level pbs data storage access" [dependencies] anyhow = "1.0" +base64 = "0.12" crc32fast = "1" endian_trait = { version = "0.6", features = [ "arrays" ] } libc = "0.2" @@ -15,10 +16,11 @@ nix = "0.19.1" openssl = "0.10" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" +tokio = { version = "1.6", features = [] } zstd = { version = "0.6", features = [ "bindgen" ] } pathpatterns = "0.1.2" -pxar = { version = "0.10.1", features = [ "tokio-io" ] } +pxar = "0.10.1" proxmox = { version = "0.12.0", default-features = false, features = [ "api-macro" ] } diff --git a/pbs-datastore/src/dynamic_index.rs b/pbs-datastore/src/dynamic_index.rs index 28b71d57..bdddc138 100644 --- a/pbs-datastore/src/dynamic_index.rs +++ b/pbs-datastore/src/dynamic_index.rs @@ -1,15 +1,20 @@ use std::fs::File; use std::io::{BufWriter, Seek, SeekFrom, Write}; +use std::ops::Range; use std::os::unix::io::AsRawFd; use std::path::{Path, PathBuf}; -use std::sync::Arc; +use std::pin::Pin; +use std::sync::{Arc, Mutex}; +use std::task::Context; use anyhow::{bail, format_err, Error}; use proxmox::tools::io::ReadExt; use proxmox::tools::uuid::Uuid; use proxmox::tools::mmap::Mmap; +use pxar::accessor::{MaybeReady, ReadAt, ReadAtOperation}; +use pbs_tools::lru_cache::LruCache; use pbs_tools::process_locker::ProcessLockSharedGuard; use crate::Chunker; @@ -18,6 +23,7 @@ use crate::chunk_store::ChunkStore; use crate::data_blob::{DataBlob, DataChunkBuilder}; use crate::file_formats; use crate::index::{IndexFile, ChunkReadInfo}; +use crate::read_chunk::ReadChunk; /// Header format definition for dynamic index files (`.dixd`) #[repr(C)] @@ -506,3 +512,219 @@ impl Write for DynamicChunkWriter { )) } } + +struct CachedChunk { + range: Range, + data: Vec, +} + +impl CachedChunk { + /// Perform sanity checks on the range and data size: + pub fn new(range: Range, data: Vec) -> Result { + if data.len() as u64 != range.end - range.start { + bail!( + "read chunk with wrong size ({} != {})", + data.len(), + range.end - range.start, + ); + } + Ok(Self { range, data }) + } +} + +pub struct BufferedDynamicReader { + store: S, + index: DynamicIndexReader, + archive_size: u64, + read_buffer: Vec, + buffered_chunk_idx: usize, + buffered_chunk_start: u64, + read_offset: u64, + lru_cache: LruCache, +} + +struct ChunkCacher<'a, S> { + store: &'a mut S, + index: &'a DynamicIndexReader, +} + +impl<'a, S: ReadChunk> pbs_tools::lru_cache::Cacher for ChunkCacher<'a, S> { + fn fetch(&mut self, index: usize) -> Result, Error> { + let info = match self.index.chunk_info(index) { + Some(info) => info, + None => bail!("chunk index out of range"), + }; + let range = info.range; + let data = self.store.read_chunk(&info.digest)?; + CachedChunk::new(range, data).map(Some) + } +} + +impl BufferedDynamicReader { + pub fn new(index: DynamicIndexReader, store: S) -> Self { + let archive_size = index.index_bytes(); + Self { + store, + index, + archive_size, + read_buffer: Vec::with_capacity(1024 * 1024), + buffered_chunk_idx: 0, + buffered_chunk_start: 0, + read_offset: 0, + lru_cache: LruCache::new(32), + } + } + + pub fn archive_size(&self) -> u64 { + self.archive_size + } + + fn buffer_chunk(&mut self, idx: usize) -> Result<(), Error> { + //let (start, end, data) = self.lru_cache.access( + let cached_chunk = self.lru_cache.access( + idx, + &mut ChunkCacher { + store: &mut self.store, + index: &self.index, + }, + )?.ok_or_else(|| format_err!("chunk not found by cacher"))?; + + // fixme: avoid copy + self.read_buffer.clear(); + self.read_buffer.extend_from_slice(&cached_chunk.data); + + self.buffered_chunk_idx = idx; + + self.buffered_chunk_start = cached_chunk.range.start; + //println!("BUFFER {} {}", self.buffered_chunk_start, end); + Ok(()) + } +} + +impl pbs_tools::io::BufferedRead for BufferedDynamicReader { + fn buffered_read(&mut self, offset: u64) -> Result<&[u8], Error> { + if offset == self.archive_size { + return Ok(&self.read_buffer[0..0]); + } + + let buffer_len = self.read_buffer.len(); + let index = &self.index; + + // optimization for sequential read + if buffer_len > 0 + && ((self.buffered_chunk_idx + 1) < index.index().len()) + && (offset >= (self.buffered_chunk_start + (self.read_buffer.len() as u64))) + { + let next_idx = self.buffered_chunk_idx + 1; + let next_end = index.chunk_end(next_idx); + if offset < next_end { + self.buffer_chunk(next_idx)?; + let buffer_offset = (offset - self.buffered_chunk_start) as usize; + return Ok(&self.read_buffer[buffer_offset..]); + } + } + + if (buffer_len == 0) + || (offset < self.buffered_chunk_start) + || (offset >= (self.buffered_chunk_start + (self.read_buffer.len() as u64))) + { + let end_idx = index.index().len() - 1; + let end = index.chunk_end(end_idx); + let idx = index.binary_search(0, 0, end_idx, end, offset)?; + self.buffer_chunk(idx)?; + } + + let buffer_offset = (offset - self.buffered_chunk_start) as usize; + Ok(&self.read_buffer[buffer_offset..]) + } +} + +impl std::io::Read for BufferedDynamicReader { + fn read(&mut self, buf: &mut [u8]) -> Result { + use pbs_tools::io::BufferedRead; + use std::io::{Error, ErrorKind}; + + let data = match self.buffered_read(self.read_offset) { + Ok(v) => v, + Err(err) => return Err(Error::new(ErrorKind::Other, err.to_string())), + }; + + let n = if data.len() > buf.len() { + buf.len() + } else { + data.len() + }; + + buf[0..n].copy_from_slice(&data[0..n]); + + self.read_offset += n as u64; + + Ok(n) + } +} + +impl std::io::Seek for BufferedDynamicReader { + fn seek(&mut self, pos: SeekFrom) -> Result { + let new_offset = match pos { + SeekFrom::Start(start_offset) => start_offset as i64, + SeekFrom::End(end_offset) => (self.archive_size as i64) + end_offset, + SeekFrom::Current(offset) => (self.read_offset as i64) + offset, + }; + + use std::io::{Error, ErrorKind}; + if (new_offset < 0) || (new_offset > (self.archive_size as i64)) { + return Err(Error::new( + ErrorKind::Other, + format!( + "seek is out of range {} ([0..{}])", + new_offset, self.archive_size + ), + )); + } + self.read_offset = new_offset as u64; + + Ok(self.read_offset) + } +} + +/// This is a workaround until we have cleaned up the chunk/reader/... infrastructure for better +/// async use! +/// +/// Ideally BufferedDynamicReader gets replaced so the LruCache maps to `BroadcastFuture`, +/// so that we can properly access it from multiple threads simultaneously while not issuing +/// duplicate simultaneous reads over http. +#[derive(Clone)] +pub struct LocalDynamicReadAt { + inner: Arc>>, +} + +impl LocalDynamicReadAt { + pub fn new(inner: BufferedDynamicReader) -> Self { + Self { + inner: Arc::new(Mutex::new(inner)), + } + } +} + +impl ReadAt for LocalDynamicReadAt { + fn start_read_at<'a>( + self: Pin<&'a Self>, + _cx: &mut Context, + buf: &'a mut [u8], + offset: u64, + ) -> MaybeReady, ReadAtOperation<'a>> { + use std::io::Read; + MaybeReady::Ready(tokio::task::block_in_place(move || { + let mut reader = self.inner.lock().unwrap(); + reader.seek(SeekFrom::Start(offset))?; + Ok(reader.read(buf)?) + })) + } + + fn poll_complete<'a>( + self: Pin<&'a Self>, + _op: ReadAtOperation<'a>, + ) -> MaybeReady, ReadAtOperation<'a>> { + panic!("LocalDynamicReadAt::start_read_at returned Pending"); + } +} diff --git a/pbs-datastore/src/lib.rs b/pbs-datastore/src/lib.rs index 3034ec4e..f0765514 100644 --- a/pbs-datastore/src/lib.rs +++ b/pbs-datastore/src/lib.rs @@ -195,6 +195,7 @@ pub mod file_formats; pub mod index; pub mod key_derivation; pub mod manifest; +pub mod paperkey; pub mod prune; pub mod read_chunk; pub mod store_progress; diff --git a/src/tools/paperkey.rs b/pbs-datastore/src/paperkey.rs similarity index 98% rename from src/tools/paperkey.rs rename to pbs-datastore/src/paperkey.rs index 2dc185a8..d90fd83a 100644 --- a/src/tools/paperkey.rs +++ b/pbs-datastore/src/paperkey.rs @@ -6,7 +6,7 @@ use serde::{Deserialize, Serialize}; use proxmox::api::api; -use crate::backup::KeyConfig; +use crate::KeyConfig; #[api()] #[derive(Debug, Serialize, Deserialize)] @@ -247,7 +247,7 @@ fn generate_qr_code(output_type: &str, lines: &[String]) -> Result, Erro .wait_with_output() .map_err(|_| format_err!("Failed to read stdout"))?; - let output = crate::tools::command_output(output, None)?; + let output = pbs_tools::command_output(output, None)?; Ok(output) } diff --git a/pbs-fuse-loop/Cargo.toml b/pbs-fuse-loop/Cargo.toml new file mode 100644 index 00000000..90fffe0b --- /dev/null +++ b/pbs-fuse-loop/Cargo.toml @@ -0,0 +1,20 @@ +[package] +name = "pbs-fuse-loop" +version = "0.1.0" +authors = ["Proxmox Support Team "] +edition = "2018" +description = "fuse and loop device helpers" + +[dependencies] +anyhow = "1.0" +futures = "0.3" +lazy_static = "1.4" +libc = "0.2" +nix = "0.19.1" +regex = "1.2" +tokio = { version = "1.6", features = [] } + +proxmox = "0.12.0" +proxmox-fuse = "0.1.1" + +pbs-tools = { path = "../pbs-tools" } diff --git a/src/tools/fuse_loop.rs b/pbs-fuse-loop/src/fuse_loop.rs similarity index 99% rename from src/tools/fuse_loop.rs rename to pbs-fuse-loop/src/fuse_loop.rs index 68d8b0a9..3836d11c 100644 --- a/src/tools/fuse_loop.rs +++ b/pbs-fuse-loop/src/fuse_loop.rs @@ -295,7 +295,7 @@ fn emerg_cleanup (loopdev: Option<&str>, mut backing_file: PathBuf) { let mut command = std::process::Command::new("fusermount"); command.arg("-u"); command.arg(&backing_file); - let _ = crate::tools::run_command(command, None); + let _ = pbs_tools::run_command(command, None); let _ = remove_file(&backing_file); backing_file.set_extension("pid"); diff --git a/pbs-fuse-loop/src/lib.rs b/pbs-fuse-loop/src/lib.rs new file mode 100644 index 00000000..18e1e8c6 --- /dev/null +++ b/pbs-fuse-loop/src/lib.rs @@ -0,0 +1,5 @@ +pub mod loopdev; + + +mod fuse_loop; +pub use fuse_loop::*; diff --git a/src/tools/loopdev.rs b/pbs-fuse-loop/src/loopdev.rs similarity index 99% rename from src/tools/loopdev.rs rename to pbs-fuse-loop/src/loopdev.rs index 68918dfd..db041f3b 100644 --- a/src/tools/loopdev.rs +++ b/pbs-fuse-loop/src/loopdev.rs @@ -1,10 +1,11 @@ //! Helpers to work with /dev/loop* devices -use anyhow::Error; use std::fs::{File, OpenOptions}; use std::path::Path; use std::os::unix::io::{RawFd, AsRawFd}; +use anyhow::Error; + const LOOP_CONTROL: &str = "/dev/loop-control"; const LOOP_NAME: &str = "/dev/loop"; diff --git a/pbs-tools/src/io.rs b/pbs-tools/src/io.rs new file mode 100644 index 00000000..13e1d9b7 --- /dev/null +++ b/pbs-tools/src/io.rs @@ -0,0 +1,22 @@ +//! I/O utilities. + +use anyhow::Error; + +use proxmox::tools::fd::Fd; + +/// The `BufferedRead` trait provides a single function +/// `buffered_read`. It returns a reference to an internal buffer. The +/// purpose of this traid is to avoid unnecessary data copies. +pub trait BufferedRead { + /// This functions tries to fill the internal buffers, then + /// returns a reference to the available data. It returns an empty + /// buffer if `offset` points to the end of the file. + fn buffered_read(&mut self, offset: u64) -> Result<&[u8], Error>; +} + +/// safe wrapper for `nix::unistd::pipe2` defaulting to `O_CLOEXEC` and guarding the file +/// descriptors. +pub fn pipe() -> Result<(Fd, Fd), Error> { + let (pin, pout) = nix::unistd::pipe2(nix::fcntl::OFlag::O_CLOEXEC)?; + Ok((Fd(pin), Fd(pout))) +} diff --git a/pbs-tools/src/lib.rs b/pbs-tools/src/lib.rs index c64615fd..683afbba 100644 --- a/pbs-tools/src/lib.rs +++ b/pbs-tools/src/lib.rs @@ -7,6 +7,7 @@ pub mod cert; pub mod compression; pub mod format; pub mod fs; +pub mod io; pub mod json; pub mod lru_cache; pub mod nom; diff --git a/src/api2/types/mod.rs b/src/api2/types/mod.rs index 02a538fa..bd3c7ac5 100644 --- a/src/api2/types/mod.rs +++ b/src/api2/types/mod.rs @@ -933,44 +933,6 @@ pub const DATASTORE_NOTIFY_STRING_SCHEMA: Schema = StringSchema::new( .schema(); -pub const PASSWORD_HINT_SCHEMA: Schema = StringSchema::new("Password hint.") - .format(&SINGLE_LINE_COMMENT_FORMAT) - .min_length(1) - .max_length(64) - .schema(); - -#[api] -#[derive(Deserialize, Serialize)] -/// RSA public key information -pub struct RsaPubKeyInfo { - /// Path to key (if stored in a file) - #[serde(skip_serializing_if="Option::is_none")] - pub path: Option, - /// RSA exponent - pub exponent: String, - /// Hex-encoded RSA modulus - pub modulus: String, - /// Key (modulus) length in bits - pub length: usize, -} - -impl std::convert::TryFrom> for RsaPubKeyInfo { - type Error = anyhow::Error; - - fn try_from(value: openssl::rsa::Rsa) -> Result { - let modulus = value.n().to_hex_str()?.to_string(); - let exponent = value.e().to_dec_str()?.to_string(); - let length = value.size() as usize * 8; - - Ok(Self { - path: None, - exponent, - modulus, - length, - }) - } -} - #[api( properties: { "next-run": { diff --git a/src/backup/mod.rs b/src/backup/mod.rs index 20b6b3ca..6ab3278d 100644 --- a/src/backup/mod.rs +++ b/src/backup/mod.rs @@ -85,10 +85,6 @@ pub use pbs_datastore::read_chunk::*; mod read_chunk; pub use read_chunk::*; -// Split -mod dynamic_index; -pub use dynamic_index::*; - mod datastore; pub use datastore::*; diff --git a/src/bin/proxmox-backup-client.rs b/src/bin/proxmox-backup-client.rs index 7e1879dd..faf2c0a3 100644 --- a/src/bin/proxmox-backup-client.rs +++ b/src/bin/proxmox-backup-client.rs @@ -64,7 +64,7 @@ use pbs_datastore::{CATALOG_NAME, CryptConfig, KeyConfig, decrypt_key, rsa_encry use pbs_datastore::backup_info::{BackupDir, BackupGroup}; use pbs_datastore::catalog::{BackupCatalogWriter, CatalogReader, CatalogWriter}; use pbs_datastore::chunk_store::verify_chunk_size; -use pbs_datastore::dynamic_index::DynamicIndexReader; +use pbs_datastore::dynamic_index::{BufferedDynamicReader, DynamicIndexReader}; use pbs_datastore::fixed_index::FixedIndexReader; use pbs_datastore::index::IndexFile; use pbs_datastore::manifest::{ @@ -76,10 +76,6 @@ use pbs_tools::sync::StdChannelWriter; use pbs_tools::tokio::TokioWriterAdapter; use pbs_tools::json; -use proxmox_backup::backup::{ - BufferedDynamicReader, -}; - mod proxmox_backup_client; use proxmox_backup_client::*; diff --git a/src/bin/proxmox_backup_client/key.rs b/src/bin/proxmox_backup_client/key.rs index 7ca028bc..cd2958ba 100644 --- a/src/bin/proxmox_backup_client/key.rs +++ b/src/bin/proxmox_backup_client/key.rs @@ -13,19 +13,14 @@ use proxmox::api::router::ReturnType; use proxmox::sys::linux::tty; use proxmox::tools::fs::{file_get_contents, replace_file, CreateOptions}; -use pbs_datastore::{KeyInfo, Kdf}; +use pbs_api_types::{RsaPubKeyInfo, PASSWORD_HINT_SCHEMA}; +use pbs_datastore::{KeyConfig, KeyInfo, Kdf, rsa_decrypt_key_config}; +use pbs_datastore::paperkey::{generate_paper_key, PaperkeyFormat}; use pbs_client::tools::key_source::{ find_default_encryption_key, find_default_master_pubkey, get_encryption_key_password, place_default_encryption_key, place_default_master_pubkey, }; - -use proxmox_backup::{ - api2::types::{RsaPubKeyInfo, PASSWORD_HINT_SCHEMA}, - backup::{rsa_decrypt_key_config, KeyConfig}, - tools::paperkey::{generate_paper_key, PaperkeyFormat}, -}; - #[api( input: { properties: { diff --git a/src/bin/proxmox_backup_client/mount.rs b/src/bin/proxmox_backup_client/mount.rs index dc2b720a..6a80b558 100644 --- a/src/bin/proxmox_backup_client/mount.rs +++ b/src/bin/proxmox_backup_client/mount.rs @@ -17,20 +17,14 @@ use proxmox::{sortable, identity}; use proxmox::api::{ApiHandler, ApiMethod, RpcEnvironment, schema::*, cli::*}; use proxmox::tools::fd::Fd; +use pbs_datastore::{BackupDir, BackupGroup, CryptConfig, load_and_decrypt_key}; +use pbs_datastore::index::IndexFile; +use pbs_datastore::dynamic_index::BufferedDynamicReader; use pbs_client::tools::key_source::get_encryption_key_password; use pbs_client::{BackupReader, RemoteChunkReader}; use pbs_tools::json::required_string_param; -use proxmox_backup::tools; -use proxmox_backup::backup::{ - load_and_decrypt_key, - CryptConfig, - IndexFile, - BackupDir, - BackupGroup, - BufferedDynamicReader, - CachedChunkReader, -}; +use proxmox_backup::backup::CachedChunkReader; use crate::{ REPO_URL_SCHEMA, @@ -120,10 +114,10 @@ pub fn unmap_cmd_def() -> CliCommand { fn complete_mapping_names(_arg: &str, _param: &HashMap) -> Vec { - match tools::fuse_loop::find_all_mappings() { + match pbs_fuse_loop::find_all_mappings() { Ok(mappings) => mappings .filter_map(|(name, _)| { - tools::systemd::unescape_unit(&name).ok() + pbs_systemd::unescape_unit(&name).ok() }).collect(), Err(_) => Vec::new() } @@ -144,7 +138,7 @@ fn mount( // Process should be daemonized. // Make sure to fork before the async runtime is instantiated to avoid troubles. - let (pr, pw) = proxmox_backup::tools::pipe()?; + let (pr, pw) = pbs_tools::io::pipe()?; match unsafe { fork() } { Ok(ForkResult::Parent { .. }) => { drop(pw); @@ -284,9 +278,9 @@ async fn mount_do(param: Value, pipe: Option) -> Result { let reader = CachedChunkReader::new(chunk_reader, index, 8).seekable(); let name = &format!("{}:{}/{}", repo.to_string(), path, archive_name); - let name_escaped = tools::systemd::escape_unit(name, false); + let name_escaped = pbs_systemd::escape_unit(name, false); - let mut session = tools::fuse_loop::FuseLoopSession::map_loop(size, reader, &name_escaped, options).await?; + let mut session = pbs_fuse_loop::FuseLoopSession::map_loop(size, reader, &name_escaped, options).await?; let loopdev = session.loopdev_path.clone(); let (st_send, st_recv) = futures::channel::mpsc::channel(1); @@ -343,10 +337,10 @@ fn unmap( let mut name = match param["name"].as_str() { Some(name) => name.to_owned(), None => { - tools::fuse_loop::cleanup_unused_run_files(None); + pbs_fuse_loop::cleanup_unused_run_files(None); let mut any = false; - for (backing, loopdev) in tools::fuse_loop::find_all_mappings()? { - let name = tools::systemd::unescape_unit(&backing)?; + for (backing, loopdev) in pbs_fuse_loop::find_all_mappings()? { + let name = pbs_systemd::unescape_unit(&backing)?; println!("{}:\t{}", loopdev.unwrap_or_else(|| "(unmapped)".to_string()), name); any = true; } @@ -363,10 +357,10 @@ fn unmap( } if name.starts_with("/dev/loop") { - tools::fuse_loop::unmap_loopdev(name)?; + pbs_fuse_loop::unmap_loopdev(name)?; } else { - let name = tools::systemd::escape_unit(&name, false); - tools::fuse_loop::unmap_name(name)?; + let name = pbs_systemd::escape_unit(&name, false); + pbs_fuse_loop::unmap_name(name)?; } Ok(Value::Null) diff --git a/src/bin/proxmox_backup_client/snapshot.rs b/src/bin/proxmox_backup_client/snapshot.rs index c5fd79f5..631a3a53 100644 --- a/src/bin/proxmox_backup_client/snapshot.rs +++ b/src/bin/proxmox_backup_client/snapshot.rs @@ -8,20 +8,12 @@ use proxmox::{ tools::fs::file_get_contents, }; +use pbs_api_types::SnapshotListItem; use pbs_client::tools::key_source::get_encryption_key_password; +use pbs_datastore::{BackupGroup, CryptMode, CryptConfig, decrypt_key}; +use pbs_datastore::data_blob::DataBlob; use pbs_tools::json::required_string_param; -use proxmox_backup::{ - api2::types::*, - backup::{ - CryptMode, - CryptConfig, - DataBlob, - BackupGroup, - decrypt_key, - } -}; - use crate::{ REPO_URL_SCHEMA, KEYFILE_SCHEMA, diff --git a/src/bin/proxmox_backup_client/task.rs b/src/bin/proxmox_backup_client/task.rs index 42c8c4c0..121f084b 100644 --- a/src/bin/proxmox_backup_client/task.rs +++ b/src/bin/proxmox_backup_client/task.rs @@ -7,7 +7,7 @@ use pbs_client::display_task_log; use pbs_tools::percent_encoding::percent_encode_component; use pbs_tools::json::required_string_param; -use proxmox_backup::api2::types::UPID_SCHEMA; +use pbs_api_types::UPID; use crate::{ REPO_URL_SCHEMA, @@ -87,7 +87,7 @@ async fn task_list(param: Value) -> Result { optional: true, }, upid: { - schema: UPID_SCHEMA, + type: UPID, }, } } @@ -113,7 +113,7 @@ async fn task_log(param: Value) -> Result { optional: true, }, upid: { - schema: UPID_SCHEMA, + type: UPID, }, } } diff --git a/src/bin/proxmox_tape/encryption_key.rs b/src/bin/proxmox_tape/encryption_key.rs index e3c93704..a161ab6d 100644 --- a/src/bin/proxmox_tape/encryption_key.rs +++ b/src/bin/proxmox_tape/encryption_key.rs @@ -12,14 +12,9 @@ use proxmox::{ }; use pbs_datastore::Kdf; +use pbs_datastore::paperkey::{PaperkeyFormat, generate_paper_key}; use proxmox_backup::{ - tools::{ - paperkey::{ - PaperkeyFormat, - generate_paper_key, - }, - }, config, api2::{ self, diff --git a/src/tools/mod.rs b/src/tools/mod.rs index 2d2d923a..b6c55ac2 100644 --- a/src/tools/mod.rs +++ b/src/tools/mod.rs @@ -30,20 +30,17 @@ pub mod config; pub mod cpio; pub mod daemon; pub mod disks; -pub mod fuse_loop; mod memcom; pub use memcom::Memcom; pub mod logrotate; -pub mod loopdev; pub mod serde_filter; pub mod statistics; pub mod subscription; pub mod systemd; pub mod ticket; pub mod sgutils2; -pub mod paperkey; pub mod parallel_handler; pub use parallel_handler::ParallelHandler; @@ -54,16 +51,6 @@ pub use file_logger::{FileLogger, FileLogOptions}; pub use pbs_tools::broadcast_future::{BroadcastData, BroadcastFuture}; pub use pbs_tools::ops::ControlFlow; -/// The `BufferedRead` trait provides a single function -/// `buffered_read`. It returns a reference to an internal buffer. The -/// purpose of this traid is to avoid unnecessary data copies. -pub trait BufferedRead { - /// This functions tries to fill the internal buffers, then - /// returns a reference to the available data. It returns an empty - /// buffer if `offset` points to the end of the file. - fn buffered_read(&mut self, offset: u64) -> Result<&[u8], Error>; -} - /// Shortcut for md5 sums. pub fn md5sum(data: &[u8]) -> Result { hash(MessageDigest::md5(), data).map_err(Error::from) @@ -174,13 +161,6 @@ pub fn fail_on_shutdown() -> Result<(), Error> { Ok(()) } -/// safe wrapper for `nix::unistd::pipe2` defaulting to `O_CLOEXEC` and guarding the file -/// descriptors. -pub fn pipe() -> Result<(Fd, Fd), Error> { - let (pin, pout) = nix::unistd::pipe2(nix::fcntl::OFlag::O_CLOEXEC)?; - Ok((Fd(pin), Fd(pout))) -} - /// safe wrapper for `nix::sys::socket::socketpair` defaulting to `O_CLOEXEC` and guarding the file /// descriptors. pub fn socketpair() -> Result<(Fd, Fd), Error> {