diff --git a/examples/upload-speed.rs b/examples/upload-speed.rs index bbabb37d..ed181330 100644 --- a/examples/upload-speed.rs +++ b/examples/upload-speed.rs @@ -26,6 +26,7 @@ async fn upload_speed() -> Result { crypt_config: None, debug: false, benchmark: true, + no_cache: false, }, ) .await?; diff --git a/pbs-client/src/backup_writer.rs b/pbs-client/src/backup_writer.rs index 853b1cb4..abe7c79e 100644 --- a/pbs-client/src/backup_writer.rs +++ b/pbs-client/src/backup_writer.rs @@ -77,6 +77,8 @@ pub struct BackupWriterOptions<'a> { pub debug: bool, /// Start benchmark pub benchmark: bool, + /// Skip datastore cache + pub no_cache: bool, } impl BackupWriter { @@ -100,6 +102,7 @@ impl BackupWriter { "store": writer_options.datastore, "debug": writer_options.debug, "benchmark": writer_options.benchmark, + "no-cache": writer_options.no_cache, }); if !writer_options.ns.is_root() { diff --git a/proxmox-backup-client/src/benchmark.rs b/proxmox-backup-client/src/benchmark.rs index 6b11e216..463c2e61 100644 --- a/proxmox-backup-client/src/benchmark.rs +++ b/proxmox-backup-client/src/benchmark.rs @@ -237,6 +237,7 @@ async fn test_upload_speed( crypt_config: crypt_config.clone(), debug: false, benchmark: true, + no_cache: true, }, ) .await?; diff --git a/proxmox-backup-client/src/main.rs b/proxmox-backup-client/src/main.rs index 44c07662..3f6c5adb 100644 --- a/proxmox-backup-client/src/main.rs +++ b/proxmox-backup-client/src/main.rs @@ -742,6 +742,12 @@ fn spawn_catalog_upload( optional: true, default: false, }, + "no-cache": { + type: Boolean, + description: "Bypass local datastore cache for network storages.", + optional: true, + default: false, + }, } } )] @@ -754,6 +760,7 @@ async fn create_backup( change_detection_mode: Option, dry_run: bool, skip_e2big_xattr: bool, + no_cache: bool, limit: ClientRateLimitConfig, _info: &ApiMethod, _rpcenv: &mut dyn RpcEnvironment, @@ -961,6 +968,7 @@ async fn create_backup( crypt_config: crypt_config.clone(), debug: true, benchmark: false, + no_cache, }, ) .await?; diff --git a/src/api2/backup/environment.rs b/src/api2/backup/environment.rs index 6a265bcc..d5e6869c 100644 --- a/src/api2/backup/environment.rs +++ b/src/api2/backup/environment.rs @@ -112,6 +112,7 @@ pub struct BackupEnvironment { result_attributes: Value, auth_id: Authid, pub debug: bool, + pub no_cache: bool, pub formatter: &'static dyn OutputFormatter, pub worker: Arc, pub datastore: Arc, @@ -128,6 +129,7 @@ impl BackupEnvironment { worker: Arc, datastore: Arc, backup_dir: BackupDir, + no_cache: bool, ) -> Result { let state = SharedBackupState { finished: false, @@ -148,6 +150,7 @@ impl BackupEnvironment { worker, datastore, debug: tracing::enabled!(tracing::Level::DEBUG), + no_cache, formatter: JSON_FORMATTER, backup_dir, last_backup: None, diff --git a/src/api2/backup/mod.rs b/src/api2/backup/mod.rs index 026f1f10..ae61ff69 100644 --- a/src/api2/backup/mod.rs +++ b/src/api2/backup/mod.rs @@ -53,6 +53,7 @@ pub const API_METHOD_UPGRADE_BACKUP: ApiMethod = ApiMethod::new( ("backup-time", false, &BACKUP_TIME_SCHEMA), ("debug", true, &BooleanSchema::new("Enable verbose debug logging.").schema()), ("benchmark", true, &BooleanSchema::new("Job is a benchmark (do not keep data).").schema()), + ("no-cache", true, &BooleanSchema::new("Disable local datastore cache for network storages").schema()), ]), ) ).access( @@ -79,6 +80,7 @@ fn upgrade_to_backup_protocol( async move { let debug = param["debug"].as_bool().unwrap_or(false); let benchmark = param["benchmark"].as_bool().unwrap_or(false); + let no_cache = param["no-cache"].as_bool().unwrap_or(false); let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; @@ -214,6 +216,7 @@ fn upgrade_to_backup_protocol( worker.clone(), datastore, backup_dir, + no_cache, )?; env.debug = debug; diff --git a/src/api2/backup/upload_chunk.rs b/src/api2/backup/upload_chunk.rs index 4514e3b9..35378377 100644 --- a/src/api2/backup/upload_chunk.rs +++ b/src/api2/backup/upload_chunk.rs @@ -262,6 +262,15 @@ async fn upload_to_backend( ); } + if env.no_cache { + let object_key = pbs_datastore::s3::object_key_from_digest(&digest)?; + let is_duplicate = s3_client + .upload_no_replace_with_retry(object_key, data) + .await + .context("failed to upload chunk to s3 backend")?; + return Ok((digest, size, encoded_size, is_duplicate)); + } + // Avoid re-upload to S3 if the chunk is either present in the LRU cache or the chunk // file exists on filesystem. The latter means that the chunk has been present in the // past an was not cleaned up by garbage collection, so contained in the S3 object store. diff --git a/src/server/push.rs b/src/server/push.rs index c7806366..4a25d51c 100644 --- a/src/server/push.rs +++ b/src/server/push.rs @@ -831,6 +831,7 @@ pub(crate) async fn push_snapshot( crypt_config: None, debug: false, benchmark: false, + no_cache: false, }, ) .await?;