diff --git a/src/api2/tape/backup.rs b/src/api2/tape/backup.rs index d5dbb0b7..eeef5528 100644 --- a/src/api2/tape/backup.rs +++ b/src/api2/tape/backup.rs @@ -19,6 +19,7 @@ use crate::{ self, tape_job::{ TapeBackupJobConfig, + TapeBackupJobSetup, TapeBackupJobStatus, }, }, @@ -36,9 +37,6 @@ use crate::{ }, api2::types::{ Authid, - DATASTORE_SCHEMA, - MEDIA_POOL_NAME_SCHEMA, - DRIVE_NAME_SCHEMA, UPID_SCHEMA, JOB_ID_SCHEMA, MediaPoolConfig, @@ -109,28 +107,28 @@ pub fn list_tape_backup_jobs( pub fn do_tape_backup_job( mut job: Job, - tape_job: TapeBackupJobConfig, + setup: TapeBackupJobSetup, auth_id: &Authid, schedule: Option, ) -> Result { let job_id = format!("{}:{}:{}:{}", - tape_job.store, - tape_job.pool, - tape_job.drive, + setup.store, + setup.pool, + setup.drive, job.jobname()); let worker_type = job.jobtype().to_string(); - let datastore = DataStore::lookup_datastore(&tape_job.store)?; + let datastore = DataStore::lookup_datastore(&setup.store)?; let (config, _digest) = config::media_pool::config()?; - let pool_config: MediaPoolConfig = config.lookup("pool", &tape_job.pool)?; + let pool_config: MediaPoolConfig = config.lookup("pool", &setup.pool)?; let (drive_config, _digest) = config::drive::config()?; // early check/lock before starting worker - let drive_lock = lock_tape_device(&drive_config, &tape_job.drive)?; + let drive_lock = lock_tape_device(&drive_config, &setup.drive)?; let upid_str = WorkerTask::new_thread( &worker_type, @@ -140,7 +138,7 @@ pub fn do_tape_backup_job( move |worker| { let _drive_lock = drive_lock; // keep lock guard - set_tape_device_state(&tape_job.drive, &worker.upid().to_string())?; + set_tape_device_state(&setup.drive, &worker.upid().to_string())?; job.start(&worker.upid().to_string())?; task_log!(worker,"Starting tape backup job '{}'", job_id); @@ -151,11 +149,8 @@ pub fn do_tape_backup_job( let job_result = backup_worker( &worker, datastore, - &tape_job.drive, &pool_config, - tape_job.eject_media.unwrap_or(false), - tape_job.export_media_set.unwrap_or(false), - tape_job.latest_only.unwrap_or(false), + &setup, ); let status = worker.create_state(&job_result); @@ -168,10 +163,10 @@ pub fn do_tape_backup_job( ); } - if let Err(err) = set_tape_device_state(&tape_job.drive, "") { + if let Err(err) = set_tape_device_state(&setup.drive, "") { eprintln!( "could not unset drive state for {}: {}", - tape_job.drive, + setup.drive, err ); } @@ -204,7 +199,7 @@ pub fn run_tape_backup_job( let job = Job::new("tape-backup-job", &id)?; - let upid_str = do_tape_backup_job(job, backup_job, &auth_id, None)?; + let upid_str = do_tape_backup_job(job, backup_job.setup, &auth_id, None)?; Ok(upid_str) } @@ -212,29 +207,9 @@ pub fn run_tape_backup_job( #[api( input: { properties: { - store: { - schema: DATASTORE_SCHEMA, - }, - pool: { - schema: MEDIA_POOL_NAME_SCHEMA, - }, - drive: { - schema: DRIVE_NAME_SCHEMA, - }, - "eject-media": { - description: "Eject media upon job completion.", - type: bool, - optional: true, - }, - "export-media-set": { - description: "Export media set upon job completion.", - type: bool, - optional: true, - }, - "latest-only": { - description: "Backup latest snapshots only.", - type: bool, - optional: true, + setup: { + type: TapeBackupJobSetup, + flatten: true, }, }, }, @@ -244,34 +219,25 @@ pub fn run_tape_backup_job( )] /// Backup datastore to tape media pool pub fn backup( - store: String, - pool: String, - drive: String, - eject_media: Option, - export_media_set: Option, - latest_only: Option, + setup: TapeBackupJobSetup, rpcenv: &mut dyn RpcEnvironment, ) -> Result { let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; - let datastore = DataStore::lookup_datastore(&store)?; + let datastore = DataStore::lookup_datastore(&setup.store)?; let (config, _digest) = config::media_pool::config()?; - let pool_config: MediaPoolConfig = config.lookup("pool", &pool)?; + let pool_config: MediaPoolConfig = config.lookup("pool", &setup.pool)?; let (drive_config, _digest) = config::drive::config()?; // early check/lock before starting worker - let drive_lock = lock_tape_device(&drive_config, &drive)?; + let drive_lock = lock_tape_device(&drive_config, &setup.drive)?; let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI; - let eject_media = eject_media.unwrap_or(false); - let export_media_set = export_media_set.unwrap_or(false); - let latest_only = latest_only.unwrap_or(false); - - let job_id = format!("{}:{}:{}", store, pool, drive); + let job_id = format!("{}:{}:{}", setup.store, setup.pool, setup.drive); let upid_str = WorkerTask::new_thread( "tape-backup", @@ -280,19 +246,16 @@ pub fn backup( to_stdout, move |worker| { let _drive_lock = drive_lock; // keep lock guard - set_tape_device_state(&drive, &worker.upid().to_string())?; + set_tape_device_state(&setup.drive, &worker.upid().to_string())?; backup_worker( &worker, datastore, - &drive, &pool_config, - eject_media, - export_media_set, - latest_only, + &setup, )?; // ignore errors - let _ = set_tape_device_state(&drive, ""); + let _ = set_tape_device_state(&setup.drive, ""); Ok(()) } )?; @@ -303,11 +266,8 @@ pub fn backup( fn backup_worker( worker: &WorkerTask, datastore: Arc, - drive: &str, pool_config: &MediaPoolConfig, - eject_media: bool, - export_media_set: bool, - latest_only: bool, + setup: &TapeBackupJobSetup, ) -> Result<(), Error> { let status_path = Path::new(TAPE_STATUS_DIR); @@ -315,20 +275,22 @@ fn backup_worker( let _lock = MediaPool::lock(status_path, &pool_config.name)?; task_log!(worker, "update media online status"); - let changer_name = update_media_online_status(drive)?; + let changer_name = update_media_online_status(&setup.drive)?; let pool = MediaPool::with_config(status_path, &pool_config, changer_name)?; - let mut pool_writer = PoolWriter::new(pool, drive)?; + let mut pool_writer = PoolWriter::new(pool, &setup.drive)?; let mut group_list = BackupInfo::list_backup_groups(&datastore.base_path())?; group_list.sort_unstable(); + let latest_only = setup.latest_only.unwrap_or(false); + if latest_only { task_log!(worker, "latest-only: true (only considering latest snapshots)"); } - + for group in group_list { let mut snapshot_list = group.list_backups(&datastore.base_path())?; @@ -355,9 +317,9 @@ fn backup_worker( pool_writer.commit()?; - if export_media_set { + if setup.export_media_set.unwrap_or(false) { pool_writer.export_media_set(worker)?; - } else if eject_media { + } else if setup.eject_media.unwrap_or(false) { pool_writer.eject_media(worker)?; } diff --git a/src/bin/docgen.rs b/src/bin/docgen.rs index 8d018813..9df9f33b 100644 --- a/src/bin/docgen.rs +++ b/src/bin/docgen.rs @@ -6,7 +6,6 @@ use proxmox::{ schema::{ Schema, ObjectSchemaType, - SchemaPropertyEntry, ApiStringFormat, }, router::{ diff --git a/src/bin/proxmox-backup-proxy.rs b/src/bin/proxmox-backup-proxy.rs index 987944da..68fbbaad 100644 --- a/src/bin/proxmox-backup-proxy.rs +++ b/src/bin/proxmox-backup-proxy.rs @@ -586,7 +586,7 @@ async fn schedule_tape_backup_jobs() { Ok(job) => job, Err(_) => continue, // could not get lock }; - if let Err(err) = do_tape_backup_job(job, job_config, &auth_id, Some(event_str)) { + if let Err(err) = do_tape_backup_job(job, job_config.setup, &auth_id, Some(event_str)) { eprintln!("unable to start tape bvackup job {} - {}", &job_id, err); } }; diff --git a/src/bin/proxmox-tape.rs b/src/bin/proxmox-tape.rs index 74b9c128..ae12cab9 100644 --- a/src/bin/proxmox-tape.rs +++ b/src/bin/proxmox-tape.rs @@ -38,6 +38,7 @@ use proxmox_backup::{ datastore::complete_datastore_name, drive::complete_drive_name, media_pool::complete_pool_name, + tape_job::TapeBackupJobSetup, }, tape::{ drive::{ @@ -790,27 +791,11 @@ async fn clean_drive(mut param: Value) -> Result<(), Error> { } #[api( - input: { + input: { properties: { - store: { - schema: DATASTORE_SCHEMA, - }, - pool: { - schema: MEDIA_POOL_NAME_SCHEMA, - }, - drive: { - schema: DRIVE_NAME_SCHEMA, - optional: true, - }, - "eject-media": { - description: "Eject media upon job completion.", - type: bool, - optional: true, - }, - "export-media-set": { - description: "Export media set upon job completion.", - type: bool, - optional: true, + setup: { + type: TapeBackupJobSetup, + flatten: true, }, "output-format": { schema: OUTPUT_FORMAT, @@ -926,6 +911,7 @@ fn main() { "backup", CliCommand::new(&API_METHOD_BACKUP) .arg_param(&["store", "pool"]) + .completion_cb("drive", complete_drive_name) .completion_cb("store", complete_datastore_name) .completion_cb("pool", complete_pool_name) ) diff --git a/src/config/tape_job.rs b/src/config/tape_job.rs index 1d0c37ed..7e1ad37d 100644 --- a/src/config/tape_job.rs +++ b/src/config/tape_job.rs @@ -31,9 +31,6 @@ lazy_static! { #[api( properties: { - id: { - schema: JOB_ID_SCHEMA, - }, store: { schema: DATASTORE_SCHEMA, }, @@ -58,6 +55,31 @@ lazy_static! { type: bool, optional: true, }, + } +)] +#[serde(rename_all="kebab-case")] +#[derive(Updater,Serialize,Deserialize,Clone)] +/// Tape Backup Job Setup +pub struct TapeBackupJobSetup { + pub store: String, + pub pool: String, + pub drive: String, + #[serde(skip_serializing_if="Option::is_none")] + pub eject_media: Option, + #[serde(skip_serializing_if="Option::is_none")] + pub export_media_set: Option, + #[serde(skip_serializing_if="Option::is_none")] + pub latest_only: Option, +} + +#[api( + properties: { + id: { + schema: JOB_ID_SCHEMA, + }, + setup: { + type: TapeBackupJobSetup, + }, comment: { optional: true, schema: SINGLE_LINE_COMMENT_SCHEMA, @@ -74,15 +96,8 @@ lazy_static! { pub struct TapeBackupJobConfig { #[updater(fixed)] pub id: String, - pub store: String, - pub pool: String, - pub drive: String, - #[serde(skip_serializing_if="Option::is_none")] - pub eject_media: Option, - #[serde(skip_serializing_if="Option::is_none")] - pub export_media_set: Option, - #[serde(skip_serializing_if="Option::is_none")] - pub latest_only: Option, + #[serde(flatten)] + pub setup: TapeBackupJobSetup, #[serde(skip_serializing_if="Option::is_none")] pub comment: Option, #[serde(skip_serializing_if="Option::is_none")] @@ -111,7 +126,7 @@ pub struct TapeBackupJobStatus { fn init() -> SectionConfig { let obj_schema = match TapeBackupJobConfig::API_SCHEMA { - Schema::Object(ref obj_schema) => obj_schema, + Schema::AllOf(ref allof_schema) => allof_schema, _ => unreachable!(), };