api: sync jobs: expose optional sync-direction parameter

Exposes and switch the config type for sync job operations based
on the `sync-direction` parameter, exposed on required api endpoints.

If not set, the default config type is `sync` and the default sync
direction is `pull` for full backwards compatibility. Whenever
possible, determine the sync direction and config type from the sync
job config directly rather than requiring it as optional api
parameter.

Further, extend read and modify access checks by sync direction to
conditionally check for the required permissions in pull and push
direction.

Signed-off-by: Christian Ebner <c.ebner@proxmox.com>
This commit is contained in:
Christian Ebner 2024-11-11 16:43:43 +01:00 committed by Fabian Grünbichler
parent c9078b189c
commit 397e9c9991
5 changed files with 261 additions and 96 deletions

View File

@ -1,6 +1,7 @@
//! Datastore Synchronization Job Management
use anyhow::{bail, format_err, Error};
use serde::Deserialize;
use serde_json::Value;
use proxmox_router::{
@ -29,6 +30,10 @@ use crate::{
schema: DATASTORE_SCHEMA,
optional: true,
},
"sync-direction": {
type: SyncDirection,
optional: true,
},
},
},
returns: {
@ -44,6 +49,7 @@ use crate::{
/// List all sync jobs
pub fn list_sync_jobs(
store: Option<String>,
sync_direction: Option<SyncDirection>,
_param: Value,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Vec<SyncJobStatus>, Error> {
@ -52,8 +58,9 @@ pub fn list_sync_jobs(
let (config, digest) = sync::config()?;
let sync_direction = sync_direction.unwrap_or_default();
let job_config_iter = config
.convert_to_typed_array("sync")?
.convert_to_typed_array(sync_direction.as_config_type_str())?
.into_iter()
.filter(|job: &SyncJobConfig| {
if let Some(store) = &store {
@ -62,7 +69,9 @@ pub fn list_sync_jobs(
true
}
})
.filter(|job: &SyncJobConfig| check_sync_job_read_access(&user_info, &auth_id, job));
.filter(|job: &SyncJobConfig| {
check_sync_job_read_access(&user_info, &auth_id, job, sync_direction)
});
let mut list = Vec::new();
@ -106,24 +115,23 @@ pub fn run_sync_job(
let user_info = CachedUserInfo::new()?;
let (config, _digest) = sync::config()?;
let sync_job: SyncJobConfig = config.lookup("sync", &id)?;
let (config_type, config_section) = config
.sections
.get(&id)
.ok_or_else(|| format_err!("No sync job with id '{id}' found in config"))?;
if !check_sync_job_modify_access(&user_info, &auth_id, &sync_job) {
bail!("permission check failed");
let sync_direction = SyncDirection::from_config_type_str(config_type)?;
let sync_job = SyncJobConfig::deserialize(config_section)?;
if !check_sync_job_modify_access(&user_info, &auth_id, &sync_job, sync_direction) {
bail!("permission check failed, '{auth_id}' is missing access");
}
let job = Job::new("syncjob", &id)?;
let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
let upid_str = do_sync_job(
job,
sync_job,
&auth_id,
None,
SyncDirection::Pull,
to_stdout,
)?;
let upid_str = do_sync_job(job, sync_job, &auth_id, None, sync_direction, to_stdout)?;
Ok(upid_str)
}

View File

@ -13,8 +13,9 @@ use proxmox_uuid::Uuid;
use pbs_api_types::{
Authid, DataStoreConfig, DataStoreConfigUpdater, DatastoreNotify, DatastoreTuning, KeepOptions,
MaintenanceMode, PruneJobConfig, PruneJobOptions, DATASTORE_SCHEMA, PRIV_DATASTORE_ALLOCATE,
PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_MODIFY, PROXMOX_CONFIG_DIGEST_SCHEMA, UPID_SCHEMA,
MaintenanceMode, PruneJobConfig, PruneJobOptions, SyncDirection, DATASTORE_SCHEMA,
PRIV_DATASTORE_ALLOCATE, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_MODIFY,
PROXMOX_CONFIG_DIGEST_SCHEMA, UPID_SCHEMA,
};
use pbs_config::BackupLockGuard;
use pbs_datastore::chunk_store::ChunkStore;
@ -523,9 +524,11 @@ pub async fn delete_datastore(
for job in list_verification_jobs(Some(name.clone()), Value::Null, rpcenv)? {
delete_verification_job(job.config.id, None, rpcenv)?
}
for job in list_sync_jobs(Some(name.clone()), Value::Null, rpcenv)? {
for direction in [SyncDirection::Pull, SyncDirection::Push] {
for job in list_sync_jobs(Some(name.clone()), Some(direction), Value::Null, rpcenv)? {
delete_sync_job(job.config.id, None, rpcenv)?
}
}
for job in list_prune_jobs(Some(name.clone()), Value::Null, rpcenv)? {
delete_prune_job(job.config.id, None, rpcenv)?
}

View File

@ -9,7 +9,7 @@ use proxmox_schema::api;
use proxmox_sortable_macro::sortable;
use crate::api2::admin::datastore::get_datastore_list;
use pbs_api_types::PRIV_SYS_AUDIT;
use pbs_api_types::{SyncDirection, PRIV_SYS_AUDIT};
use crate::api2::admin::prune::list_prune_jobs;
use crate::api2::admin::sync::list_sync_jobs;
@ -154,7 +154,8 @@ pub fn get_values(
});
}
let sync_jobs = list_sync_jobs(None, param.clone(), rpcenv)?;
for direction in [SyncDirection::Pull, SyncDirection::Push] {
let sync_jobs = list_sync_jobs(None, Some(direction), param.clone(), rpcenv)?;
for job in sync_jobs {
values.push(MatchableValue {
field: "job-id".into(),
@ -162,6 +163,7 @@ pub fn get_values(
comment: job.config.comment,
});
}
}
let verify_jobs = list_verification_jobs(None, param.clone(), rpcenv)?;
for job in verify_jobs {
@ -184,6 +186,7 @@ pub fn get_values(
"package-updates",
"prune",
"sync",
"sync-push",
"system-mail",
"tape-backup",
"tape-load",

View File

@ -1,6 +1,7 @@
use ::serde::{Deserialize, Serialize};
use anyhow::{bail, Error};
use hex::FromHex;
use pbs_api_types::SyncDirection;
use serde_json::Value;
use proxmox_router::{http_bail, Permission, Router, RpcEnvironment};
@ -8,8 +9,9 @@ use proxmox_schema::{api, param_bail};
use pbs_api_types::{
Authid, SyncJobConfig, SyncJobConfigUpdater, JOB_ID_SCHEMA, PRIV_DATASTORE_AUDIT,
PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE, PRIV_REMOTE_AUDIT,
PRIV_REMOTE_READ, PROXMOX_CONFIG_DIGEST_SCHEMA,
PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE, PRIV_DATASTORE_READ,
PRIV_REMOTE_AUDIT, PRIV_REMOTE_DATASTORE_BACKUP, PRIV_REMOTE_DATASTORE_PRUNE, PRIV_REMOTE_READ,
PROXMOX_CONFIG_DIGEST_SCHEMA,
};
use pbs_config::sync;
@ -20,19 +22,36 @@ pub fn check_sync_job_read_access(
user_info: &CachedUserInfo,
auth_id: &Authid,
job: &SyncJobConfig,
sync_direction: SyncDirection,
) -> bool {
// check for audit access on datastore/namespace, applies for pull and push direction
let ns_anchor_privs = user_info.lookup_privs(auth_id, &job.acl_path());
if ns_anchor_privs & PRIV_DATASTORE_AUDIT == 0 {
return false;
}
match sync_direction {
SyncDirection::Pull => {
if let Some(remote) = &job.remote {
let remote_privs = user_info.lookup_privs(auth_id, &["remote", remote]);
remote_privs & PRIV_REMOTE_AUDIT != 0
} else {
let source_ds_privs = user_info.lookup_privs(auth_id, &["datastore", &job.remote_store]);
let source_ds_privs =
user_info.lookup_privs(auth_id, &["datastore", &job.remote_store]);
source_ds_privs & PRIV_DATASTORE_AUDIT != 0
}
}
SyncDirection::Push => {
// check for audit access on remote/datastore/namespace
if let Some(target_acl_path) = job.remote_acl_path() {
let remote_privs = user_info.lookup_privs(auth_id, &target_acl_path);
remote_privs & PRIV_REMOTE_AUDIT != 0
} else {
// Remote must always be present for sync in push direction, fail otherwise
false
}
}
}
}
fn is_correct_owner(auth_id: &Authid, job: &SyncJobConfig) -> bool {
@ -43,17 +62,23 @@ fn is_correct_owner(auth_id: &Authid, job: &SyncJobConfig) -> bool {
}
}
/// checks whether user can run the corresponding pull job
/// checks whether user can run the corresponding sync job, depending on sync direction
///
/// namespace creation/deletion ACL and backup group ownership checks happen in the pull code directly.
/// namespace creation/deletion ACL and backup group ownership checks happen in the pull/push code
/// directly.
/// remote side checks/filters remote datastore/namespace/group access.
pub fn check_sync_job_modify_access(
user_info: &CachedUserInfo,
auth_id: &Authid,
job: &SyncJobConfig,
sync_direction: SyncDirection,
) -> bool {
match sync_direction {
SyncDirection::Pull => {
let ns_anchor_privs = user_info.lookup_privs(auth_id, &job.acl_path());
if ns_anchor_privs & PRIV_DATASTORE_BACKUP == 0 || ns_anchor_privs & PRIV_DATASTORE_AUDIT == 0 {
if ns_anchor_privs & PRIV_DATASTORE_BACKUP == 0
|| ns_anchor_privs & PRIV_DATASTORE_AUDIT == 0
{
return false;
}
@ -69,15 +94,59 @@ pub fn check_sync_job_modify_access(
}
if let Some(remote) = &job.remote {
let remote_privs = user_info.lookup_privs(auth_id, &["remote", remote, &job.remote_store]);
let remote_privs =
user_info.lookup_privs(auth_id, &["remote", remote, &job.remote_store]);
return remote_privs & PRIV_REMOTE_READ != 0;
}
true
}
SyncDirection::Push => {
// Remote must always be present for sync in push direction, fail otherwise
let target_privs = if let Some(target_acl_path) = job.remote_acl_path() {
user_info.lookup_privs(auth_id, &target_acl_path)
} else {
return false;
};
// check user is allowed to create backups on remote datastore
if target_privs & PRIV_REMOTE_DATASTORE_BACKUP == 0 {
return false;
}
if let Some(true) = job.remove_vanished {
// check user is allowed to prune backup snapshots on remote datastore
if target_privs & PRIV_REMOTE_DATASTORE_PRUNE == 0 {
return false;
}
}
let source_privs = user_info.lookup_privs(auth_id, &job.acl_path());
// check user is allowed to read from (local) source datastore/namespace, independent
// of job ownership
if source_privs & PRIV_DATASTORE_READ != 0 {
return true;
}
// check user is not the owner of the sync job, but has datastore modify permissions,
// which implies permissions to change group ownership
if !is_correct_owner(auth_id, job) && source_privs & PRIV_DATASTORE_MODIFY == 0 {
return false;
}
// user has Datastore.Modify, check also for Datastore.Backup to allow modify access
source_privs & PRIV_DATASTORE_BACKUP != 0
}
}
}
#[api(
input: {
properties: {},
properties: {
"sync-direction": {
type: SyncDirection,
optional: true,
},
},
},
returns: {
description: "List configured jobs.",
@ -92,6 +161,7 @@ pub fn check_sync_job_modify_access(
/// List all sync jobs
pub fn list_sync_jobs(
_param: Value,
sync_direction: Option<SyncDirection>,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Vec<SyncJobConfig>, Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
@ -99,13 +169,16 @@ pub fn list_sync_jobs(
let (config, digest) = sync::config()?;
let list = config.convert_to_typed_array("sync")?;
let sync_direction = sync_direction.unwrap_or_default();
let list = config.convert_to_typed_array(sync_direction.as_config_type_str())?;
rpcenv["digest"] = hex::encode(digest).into();
let list = list
.into_iter()
.filter(|sync_job| check_sync_job_read_access(&user_info, &auth_id, sync_job))
.filter(|sync_job| {
check_sync_job_read_access(&user_info, &auth_id, sync_job, sync_direction)
})
.collect();
Ok(list)
}
@ -118,6 +191,10 @@ pub fn list_sync_jobs(
type: SyncJobConfig,
flatten: true,
},
"sync-direction": {
type: SyncDirection,
optional: true,
},
},
},
access: {
@ -128,14 +205,16 @@ pub fn list_sync_jobs(
/// Create a new sync job.
pub fn create_sync_job(
config: SyncJobConfig,
sync_direction: Option<SyncDirection>,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<(), Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
let sync_direction = sync_direction.unwrap_or_default();
let _lock = sync::lock_config()?;
if !check_sync_job_modify_access(&user_info, &auth_id, &config) {
if !check_sync_job_modify_access(&user_info, &auth_id, &config, sync_direction) {
bail!("permission check failed");
}
@ -158,7 +237,7 @@ pub fn create_sync_job(
param_bail!("id", "job '{}' already exists.", config.id);
}
section_config.set_data(&config.id, "sync", &config)?;
section_config.set_data(&config.id, sync_direction.as_config_type_str(), &config)?;
sync::save_config(&section_config)?;
@ -188,8 +267,17 @@ pub fn read_sync_job(id: String, rpcenv: &mut dyn RpcEnvironment) -> Result<Sync
let (config, digest) = sync::config()?;
let sync_job = config.lookup("sync", &id)?;
if !check_sync_job_read_access(&user_info, &auth_id, &sync_job) {
let (sync_job, sync_direction) =
if let Some((config_type, config_section)) = config.sections.get(&id) {
(
SyncJobConfig::deserialize(config_section)?,
SyncDirection::from_config_type_str(config_type)?,
)
} else {
http_bail!(NOT_FOUND, "job '{id}' does not exist.")
};
if !check_sync_job_read_access(&user_info, &auth_id, &sync_job, sync_direction) {
bail!("permission check failed");
}
@ -284,7 +372,15 @@ pub fn update_sync_job(
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
}
let mut data: SyncJobConfig = config.lookup("sync", &id)?;
let (mut data, sync_direction) =
if let Some((config_type, config_section)) = config.sections.get(&id) {
(
SyncJobConfig::deserialize(config_section)?,
SyncDirection::from_config_type_str(config_type)?,
)
} else {
http_bail!(NOT_FOUND, "job '{id}' does not exist.")
};
if let Some(delete) = delete {
for delete_prop in delete {
@ -405,11 +501,11 @@ pub fn update_sync_job(
}
}
if !check_sync_job_modify_access(&user_info, &auth_id, &data) {
if !check_sync_job_modify_access(&user_info, &auth_id, &data, sync_direction) {
bail!("permission check failed");
}
config.set_data(&id, "sync", &data)?;
config.set_data(&id, sync_direction.as_config_type_str(), &data)?;
sync::save_config(&config)?;
@ -456,17 +552,16 @@ pub fn delete_sync_job(
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
}
match config.lookup("sync", &id) {
Ok(job) => {
if !check_sync_job_modify_access(&user_info, &auth_id, &job) {
if let Some((config_type, config_section)) = config.sections.get(&id) {
let sync_direction = SyncDirection::from_config_type_str(config_type)?;
let job = SyncJobConfig::deserialize(config_section)?;
if !check_sync_job_modify_access(&user_info, &auth_id, &job, sync_direction) {
bail!("permission check failed");
}
config.sections.remove(&id);
}
Err(_) => {
} else {
http_bail!(NOT_FOUND, "job '{}' does not exist.", id)
}
};
sync::save_config(&config)?;
@ -536,39 +631,67 @@ acl:1:/remote/remote1/remotestore1:write@pbs:RemoteSyncOperator
};
// should work without ACLs
assert!(check_sync_job_read_access(&user_info, root_auth_id, &job));
assert!(check_sync_job_modify_access(&user_info, root_auth_id, &job));
assert!(check_sync_job_read_access(
&user_info,
root_auth_id,
&job,
SyncDirection::Pull,
));
assert!(check_sync_job_modify_access(
&user_info,
root_auth_id,
&job,
SyncDirection::Pull,
));
// user without permissions must fail
assert!(!check_sync_job_read_access(
&user_info,
&no_perm_auth_id,
&job
&job,
SyncDirection::Pull,
));
assert!(!check_sync_job_modify_access(
&user_info,
&no_perm_auth_id,
&job
&job,
SyncDirection::Pull,
));
// reading without proper read permissions on either remote or local must fail
assert!(!check_sync_job_read_access(&user_info, &read_auth_id, &job));
assert!(!check_sync_job_read_access(
&user_info,
&read_auth_id,
&job,
SyncDirection::Pull,
));
// reading without proper read permissions on local end must fail
job.remote = Some("remote1".to_string());
assert!(!check_sync_job_read_access(&user_info, &read_auth_id, &job));
assert!(!check_sync_job_read_access(
&user_info,
&read_auth_id,
&job,
SyncDirection::Pull,
));
// reading without proper read permissions on remote end must fail
job.remote = Some("remote0".to_string());
job.store = "localstore1".to_string();
assert!(!check_sync_job_read_access(&user_info, &read_auth_id, &job));
assert!(!check_sync_job_read_access(
&user_info,
&read_auth_id,
&job,
SyncDirection::Pull,
));
// writing without proper write permissions on either end must fail
job.store = "localstore0".to_string();
assert!(!check_sync_job_modify_access(
&user_info,
&write_auth_id,
&job
&job,
SyncDirection::Pull,
));
// writing without proper write permissions on local end must fail
@ -580,39 +703,54 @@ acl:1:/remote/remote1/remotestore1:write@pbs:RemoteSyncOperator
assert!(!check_sync_job_modify_access(
&user_info,
&write_auth_id,
&job
&job,
SyncDirection::Pull,
));
// reset remote to one where users have access
job.remote = Some("remote1".to_string());
// user with read permission can only read, but not modify/run
assert!(check_sync_job_read_access(&user_info, &read_auth_id, &job));
assert!(check_sync_job_read_access(
&user_info,
&read_auth_id,
&job,
SyncDirection::Pull,
));
job.owner = Some(read_auth_id.clone());
assert!(!check_sync_job_modify_access(
&user_info,
&read_auth_id,
&job
&job,
SyncDirection::Pull,
));
job.owner = None;
assert!(!check_sync_job_modify_access(
&user_info,
&read_auth_id,
&job
&job,
SyncDirection::Pull,
));
job.owner = Some(write_auth_id.clone());
assert!(!check_sync_job_modify_access(
&user_info,
&read_auth_id,
&job
&job,
SyncDirection::Pull,
));
// user with simple write permission can modify/run
assert!(check_sync_job_read_access(&user_info, &write_auth_id, &job));
assert!(check_sync_job_read_access(
&user_info,
&write_auth_id,
&job,
SyncDirection::Pull,
));
assert!(check_sync_job_modify_access(
&user_info,
&write_auth_id,
&job
&job,
SyncDirection::Pull,
));
// but can't modify/run with deletion
@ -620,7 +758,8 @@ acl:1:/remote/remote1/remotestore1:write@pbs:RemoteSyncOperator
assert!(!check_sync_job_modify_access(
&user_info,
&write_auth_id,
&job
&job,
SyncDirection::Pull,
));
// unless they have Datastore.Prune as well
@ -628,7 +767,8 @@ acl:1:/remote/remote1/remotestore1:write@pbs:RemoteSyncOperator
assert!(check_sync_job_modify_access(
&user_info,
&write_auth_id,
&job
&job,
SyncDirection::Pull,
));
// changing owner is not possible
@ -636,7 +776,8 @@ acl:1:/remote/remote1/remotestore1:write@pbs:RemoteSyncOperator
assert!(!check_sync_job_modify_access(
&user_info,
&write_auth_id,
&job
&job,
SyncDirection::Pull,
));
// also not to the default 'root@pam'
@ -644,7 +785,8 @@ acl:1:/remote/remote1/remotestore1:write@pbs:RemoteSyncOperator
assert!(!check_sync_job_modify_access(
&user_info,
&write_auth_id,
&job
&job,
SyncDirection::Pull,
));
// unless they have Datastore.Modify as well
@ -653,13 +795,15 @@ acl:1:/remote/remote1/remotestore1:write@pbs:RemoteSyncOperator
assert!(check_sync_job_modify_access(
&user_info,
&write_auth_id,
&job
&job,
SyncDirection::Pull,
));
job.owner = None;
assert!(check_sync_job_modify_access(
&user_info,
&write_auth_id,
&job
&job,
SyncDirection::Pull,
));
Ok(())

View File

@ -589,7 +589,14 @@ async fn schedule_datastore_sync_jobs() {
Ok((config, _digest)) => config,
};
for (job_id, (_, job_config)) in config.sections {
for (job_id, (job_type, job_config)) in config.sections {
let sync_direction = match SyncDirection::from_config_type_str(&job_type) {
Ok(direction) => direction,
Err(err) => {
eprintln!("unexpected config type in sync job config - {err}");
continue;
}
};
let job_config: SyncJobConfig = match serde_json::from_value(job_config) {
Ok(c) => c,
Err(err) => {
@ -616,7 +623,7 @@ async fn schedule_datastore_sync_jobs() {
job_config,
&auth_id,
Some(event_str),
SyncDirection::Pull,
sync_direction,
false,
) {
eprintln!("unable to start datastore sync job {job_id} - {err}");