forked from proxmox-mirrors/proxmox-backup
api: backup: add no-cache flag to bypass local datastore cache
Adds the `no-cache` flag so the client can request to bypass the local datastore cache for chunk uploads. This is mainly intended for debugging and benchmarking, but can be used in cases the caching is known to be ineffective (no possible deduplication). Signed-off-by: Christian Ebner <c.ebner@proxmox.com> Reviewed-by: Hannes Laimer <h.laimer@proxmox.com> Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
This commit is contained in:
parent
014a049033
commit
9072382886
@ -26,6 +26,7 @@ async fn upload_speed() -> Result<f64, Error> {
|
||||
crypt_config: None,
|
||||
debug: false,
|
||||
benchmark: true,
|
||||
no_cache: false,
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
|
@ -77,6 +77,8 @@ pub struct BackupWriterOptions<'a> {
|
||||
pub debug: bool,
|
||||
/// Start benchmark
|
||||
pub benchmark: bool,
|
||||
/// Skip datastore cache
|
||||
pub no_cache: bool,
|
||||
}
|
||||
|
||||
impl BackupWriter {
|
||||
@ -100,6 +102,7 @@ impl BackupWriter {
|
||||
"store": writer_options.datastore,
|
||||
"debug": writer_options.debug,
|
||||
"benchmark": writer_options.benchmark,
|
||||
"no-cache": writer_options.no_cache,
|
||||
});
|
||||
|
||||
if !writer_options.ns.is_root() {
|
||||
|
@ -237,6 +237,7 @@ async fn test_upload_speed(
|
||||
crypt_config: crypt_config.clone(),
|
||||
debug: false,
|
||||
benchmark: true,
|
||||
no_cache: true,
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
|
@ -742,6 +742,12 @@ fn spawn_catalog_upload(
|
||||
optional: true,
|
||||
default: false,
|
||||
},
|
||||
"no-cache": {
|
||||
type: Boolean,
|
||||
description: "Bypass local datastore cache for network storages.",
|
||||
optional: true,
|
||||
default: false,
|
||||
},
|
||||
}
|
||||
}
|
||||
)]
|
||||
@ -754,6 +760,7 @@ async fn create_backup(
|
||||
change_detection_mode: Option<BackupDetectionMode>,
|
||||
dry_run: bool,
|
||||
skip_e2big_xattr: bool,
|
||||
no_cache: bool,
|
||||
limit: ClientRateLimitConfig,
|
||||
_info: &ApiMethod,
|
||||
_rpcenv: &mut dyn RpcEnvironment,
|
||||
@ -961,6 +968,7 @@ async fn create_backup(
|
||||
crypt_config: crypt_config.clone(),
|
||||
debug: true,
|
||||
benchmark: false,
|
||||
no_cache,
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
|
@ -112,6 +112,7 @@ pub struct BackupEnvironment {
|
||||
result_attributes: Value,
|
||||
auth_id: Authid,
|
||||
pub debug: bool,
|
||||
pub no_cache: bool,
|
||||
pub formatter: &'static dyn OutputFormatter,
|
||||
pub worker: Arc<WorkerTask>,
|
||||
pub datastore: Arc<DataStore>,
|
||||
@ -128,6 +129,7 @@ impl BackupEnvironment {
|
||||
worker: Arc<WorkerTask>,
|
||||
datastore: Arc<DataStore>,
|
||||
backup_dir: BackupDir,
|
||||
no_cache: bool,
|
||||
) -> Result<Self, Error> {
|
||||
let state = SharedBackupState {
|
||||
finished: false,
|
||||
@ -148,6 +150,7 @@ impl BackupEnvironment {
|
||||
worker,
|
||||
datastore,
|
||||
debug: tracing::enabled!(tracing::Level::DEBUG),
|
||||
no_cache,
|
||||
formatter: JSON_FORMATTER,
|
||||
backup_dir,
|
||||
last_backup: None,
|
||||
|
@ -53,6 +53,7 @@ pub const API_METHOD_UPGRADE_BACKUP: ApiMethod = ApiMethod::new(
|
||||
("backup-time", false, &BACKUP_TIME_SCHEMA),
|
||||
("debug", true, &BooleanSchema::new("Enable verbose debug logging.").schema()),
|
||||
("benchmark", true, &BooleanSchema::new("Job is a benchmark (do not keep data).").schema()),
|
||||
("no-cache", true, &BooleanSchema::new("Disable local datastore cache for network storages").schema()),
|
||||
]),
|
||||
)
|
||||
).access(
|
||||
@ -79,6 +80,7 @@ fn upgrade_to_backup_protocol(
|
||||
async move {
|
||||
let debug = param["debug"].as_bool().unwrap_or(false);
|
||||
let benchmark = param["benchmark"].as_bool().unwrap_or(false);
|
||||
let no_cache = param["no-cache"].as_bool().unwrap_or(false);
|
||||
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
|
||||
@ -214,6 +216,7 @@ fn upgrade_to_backup_protocol(
|
||||
worker.clone(),
|
||||
datastore,
|
||||
backup_dir,
|
||||
no_cache,
|
||||
)?;
|
||||
|
||||
env.debug = debug;
|
||||
|
@ -262,6 +262,15 @@ async fn upload_to_backend(
|
||||
);
|
||||
}
|
||||
|
||||
if env.no_cache {
|
||||
let object_key = pbs_datastore::s3::object_key_from_digest(&digest)?;
|
||||
let is_duplicate = s3_client
|
||||
.upload_no_replace_with_retry(object_key, data)
|
||||
.await
|
||||
.context("failed to upload chunk to s3 backend")?;
|
||||
return Ok((digest, size, encoded_size, is_duplicate));
|
||||
}
|
||||
|
||||
// Avoid re-upload to S3 if the chunk is either present in the LRU cache or the chunk
|
||||
// file exists on filesystem. The latter means that the chunk has been present in the
|
||||
// past an was not cleaned up by garbage collection, so contained in the S3 object store.
|
||||
|
@ -831,6 +831,7 @@ pub(crate) async fn push_snapshot(
|
||||
crypt_config: None,
|
||||
debug: false,
|
||||
benchmark: false,
|
||||
no_cache: false,
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
|
Loading…
Reference in New Issue
Block a user