diff --git a/PVE/API2/Qemu.pm b/PVE/API2/Qemu.pm index d8d3f3e6..21a0eae1 100644 --- a/PVE/API2/Qemu.pm +++ b/PVE/API2/Qemu.pm @@ -2938,6 +2938,7 @@ __PACKAGE__->register_method({ foreach my $opt (keys %$drives) { my $drive = $drives->{$opt}; my $skipcomplete = ($total_jobs != $i); # finish after last drive + my $completion = $skipcomplete ? 'skip' : 'wait'; my $src_sid = PVE::Storage::parse_volume_id($drive->{file}); my $storage_list = [ $src_sid ]; @@ -2946,7 +2947,7 @@ __PACKAGE__->register_method({ my $newdrive = PVE::QemuServer::clone_disk($storecfg, $vmid, $running, $opt, $drive, $snapname, $newid, $storage, $format, $fullclone->{$opt}, $newvollist, - $jobs, $skipcomplete, $oldconf->{agent}, $clonelimit); + $jobs, $completion, $oldconf->{agent}, $clonelimit); $newconf->{$opt} = PVE::QemuServer::print_drive($newdrive); diff --git a/PVE/QemuMigrate.pm b/PVE/QemuMigrate.pm index 44e4c57d..10c0ff24 100644 --- a/PVE/QemuMigrate.pm +++ b/PVE/QemuMigrate.pm @@ -703,7 +703,7 @@ sub phase2 { my $bwlimit = PVE::Storage::get_bandwidth_limit('migration', [$source_sid, $target_sid], $opt_bwlimit); $self->log('info', "$drive: start migration to $nbd_uri"); - PVE::QemuServer::qemu_drive_mirror($vmid, $drive, $nbd_uri, $vmid, undef, $self->{storage_migration_jobs}, 1, undef, $bwlimit); + PVE::QemuServer::qemu_drive_mirror($vmid, $drive, $nbd_uri, $vmid, undef, $self->{storage_migration_jobs}, 'skip', undef, $bwlimit); } } @@ -968,7 +968,7 @@ sub phase3_cleanup { if ($self->{storage_migration}) { # finish block-job - eval { PVE::QemuServer::qemu_drive_mirror_monitor($vmid, undef, $self->{storage_migration_jobs}); }; + eval { PVE::QemuServer::qemu_drive_mirror_monitor($vmid, undef, $self->{storage_migration_jobs}, 'wait_noswap'); }; if (my $err = $@) { eval { PVE::QemuServer::qemu_blockjobs_cancel($vmid, $self->{storage_migration_jobs}) }; diff --git a/PVE/QemuServer.pm b/PVE/QemuServer.pm index b2ff5159..429ec05c 100644 --- a/PVE/QemuServer.pm +++ b/PVE/QemuServer.pm @@ -6521,7 +6521,7 @@ sub qemu_img_format { } sub qemu_drive_mirror { - my ($vmid, $drive, $dst_volid, $vmiddst, $is_zero_initialized, $jobs, $skipcomplete, $qga, $bwlimit) = @_; + my ($vmid, $drive, $dst_volid, $vmiddst, $is_zero_initialized, $jobs, $completion, $qga, $bwlimit) = @_; $jobs = {} if !$jobs; @@ -6563,11 +6563,13 @@ sub qemu_drive_mirror { die "mirroring error: $err\n"; } - qemu_drive_mirror_monitor ($vmid, $vmiddst, $jobs, $skipcomplete, $qga); + qemu_drive_mirror_monitor ($vmid, $vmiddst, $jobs, $completion, $qga); } sub qemu_drive_mirror_monitor { - my ($vmid, $vmiddst, $jobs, $skipcomplete, $qga) = @_; + my ($vmid, $vmiddst, $jobs, $completion, $qga) = @_; + + $completion //= 'wait'; # same semantic as with 'skipcomplete' before eval { my $err_complete = 0; @@ -6612,7 +6614,7 @@ sub qemu_drive_mirror_monitor { if ($readycounter == scalar(keys %$jobs)) { print "all mirroring jobs are ready \n"; - last if $skipcomplete; #do the complete later + last if $completion eq 'skip'; #do the complete later if ($vmiddst && $vmiddst != $vmid) { my $agent_running = $qga && qga_check_running($vmid); @@ -6642,7 +6644,15 @@ sub qemu_drive_mirror_monitor { # try to switch the disk if source and destination are on the same guest print "$job: Completing block job...\n"; - eval { mon_cmd($vmid, "block-job-complete", device => $job) }; + my $op; + if ($completion eq 'wait') { + $op = 'block-job-complete'; + } elsif ($completion eq 'wait_noswap') { + $op = 'block-job-cancel'; + } else { + die "invalid completion value: $completion\n"; + } + eval { mon_cmd($vmid, $op, device => $job) }; if ($@ =~ m/cannot be completed/) { print "$job: Block job cannot be completed, try again.\n"; $err_complete++;