move code from qmigrate into PVE/QemuMigrate.pm

and make migrate a subcommand of qm
This commit is contained in:
Dietmar Maurer 2011-09-14 12:02:08 +02:00
parent 694fcad480
commit 3ea94c60d2
6 changed files with 372 additions and 378 deletions

View File

@ -57,7 +57,7 @@ qm.1.pod: qm PVE/QemuServer.pm
vm.conf.5.pod: gen-vmconf-pod.pl PVE/QemuServer.pm
perl -I. ./gen-vmconf-pod.pl >$@
PKGSOURCES=qm qm.1.gz qm.1.pod qmigrate qmigrate.1.gz qmrestore qmrestore.1.gz sparsecp vmtar qemu.init.d qmupdate control vm.conf.5.pod vm.conf.5.gz
PKGSOURCES=qm qm.1.gz qm.1.pod qmrestore qmrestore.1.gz sparsecp vmtar qemu.init.d qmupdate control vm.conf.5.pod vm.conf.5.gz
.PHONY: install
install: ${PKGSOURCES}
@ -72,17 +72,14 @@ install: ${PKGSOURCES}
install -m 0644 pve-usb.cfg ${DESTDIR}/usr/share/${PACKAGE}
make -C PVE install
install -m 0755 qm ${DESTDIR}${SBINDIR}
install -m 0755 qmigrate ${DESTDIR}${SBINDIR}
install -m 0755 qmrestore ${DESTDIR}${SBINDIR}
install -D -m 0755 qmupdate ${DESTDIR}${VARLIBDIR}/qmupdate
install -D -m 0755 qemu.init.d ${DESTDIR}/etc/init.d/${PACKAGE}
install -m 0755 pve-bridge ${DESTDIR}${VARLIBDIR}/pve-bridge
install -s -m 0755 vmtar ${DESTDIR}${LIBDIR}
install -s -m 0755 sparsecp ${DESTDIR}${LIBDIR}
# pod2man -n qemu-server -s 1 -r "proxmox 1.0" -c "Proxmox Documentation" <qemu-server.pod | gzip -9 > ${DESTDIR}/usr/share/man/man1/qemu-server.1.gz
install -m 0644 qm.1.gz ${DESTDIR}/usr/share/man/man1/
install -m 0644 qm.1.pod ${DESTDIR}/${PODDIR}
install -m 0644 qmigrate.1.gz ${DESTDIR}/usr/share/man/man1/
install -m 0644 qmrestore.1.gz ${DESTDIR}/usr/share/man/man1/
install -m 0644 vm.conf.5.pod ${DESTDIR}/${PODDIR}
install -m 0644 vm.conf.5.gz ${DESTDIR}/usr/share/man/man5/

View File

@ -11,6 +11,7 @@ use PVE::Storage;
use PVE::JSONSchema qw(get_standard_option);
use PVE::RESTHandler;
use PVE::QemuServer;
use PVE::QemuMigrate;
use PVE::RPCEnvironment;
use PVE::AccessControl;
use PVE::INotify;
@ -181,6 +182,7 @@ __PACKAGE__->register_method({
{ subdir => 'status' },
{ subdir => 'unlink' },
{ subdir => 'vncproxy' },
{ subdir => 'migrate' },
{ subdir => 'rrd' },
{ subdir => 'rrddata' },
];
@ -321,11 +323,7 @@ __PACKAGE__->register_method({
{
node => get_standard_option('pve-node'),
vmid => get_standard_option('pve-vmid'),
skiplock => {
description => "Ignore locks - only root is allowed to use this option.",
type => 'boolean',
optional => 1,
},
skiplock => get_standard_option('skiplock'),
delete => {
type => 'string', format => 'pve-configid-list',
description => "A list of settings you want to delete.",
@ -355,12 +353,11 @@ __PACKAGE__->register_method({
my $node = extract_param($param, 'node');
# fixme: fork worker?
my $vmid = extract_param($param, 'vmid');
my $skiplock = extract_param($param, 'skiplock');
raise_param_exc({ skiplock => "Only root may use this option." }) if $user ne 'root@pam';
raise_param_exc({ skiplock => "Only root may use this option." })
if $skiplock && $user ne 'root@pam';
my $delete = extract_param($param, 'delete');
my $force = extract_param($param, 'force');
@ -510,6 +507,7 @@ __PACKAGE__->register_method({
properties => {
node => get_standard_option('pve-node'),
vmid => get_standard_option('pve-vmid'),
skiplock => get_standard_option('skiplock'),
},
},
returns => { type => 'null' },
@ -524,7 +522,7 @@ __PACKAGE__->register_method({
my $skiplock = $param->{skiplock};
raise_param_exc({ skiplock => "Only root may use this option." })
if $user ne 'root@pam';
if $skiplock && $user ne 'root@pam';
my $storecfg = PVE::Storage::config();
@ -692,18 +690,16 @@ __PACKAGE__->register_method({
method => 'PUT',
protected => 1,
proxyto => 'node',
description => "Set virtual machine status.",
description => "Set virtual machine status (execute vm commands).",
parameters => {
additionalProperties => 0,
properties => {
node => get_standard_option('pve-node'),
vmid => get_standard_option('pve-vmid'),
skiplock => {
description => "Ignore locks - only root is allowed to use this option.",
type => 'boolean',
optional => 1,
},
skiplock => get_standard_option('skiplock'),
stateuri => get_standard_option('pve-qm-stateuri'),
command => {
description => "The command to execute.",
type => 'string',
enum => [qw(start stop reset shutdown cad suspend resume) ],
},
@ -719,22 +715,22 @@ __PACKAGE__->register_method({
my $node = extract_param($param, 'node');
# fixme: proxy to correct node
# fixme: fork worker?
my $vmid = extract_param($param, 'vmid');
my $stateuri = extract_param($param, 'stateuri');
raise_param_exc({ stateuri => "Only root may use this option." })
if $stateuri && $user ne 'root@pam';
my $skiplock = extract_param($param, 'skiplock');
raise_param_exc({ skiplock => "Only root may use this option." })
if $user ne 'root@pam';
if $skiplock && $user ne 'root@pam';
my $command = $param->{command};
my $storecfg = PVE::Storage::config();
if ($command eq 'start') {
my $statefile = undef; # fixme: --incoming parameter
PVE::QemuServer::vm_start($storecfg, $vmid, $statefile, $skiplock);
PVE::QemuServer::vm_start($storecfg, $vmid, $stateuri, $skiplock);
} elsif ($command eq 'stop') {
PVE::QemuServer::vm_stop($vmid, $skiplock);
} elsif ($command eq 'reset') {
@ -754,5 +750,75 @@ __PACKAGE__->register_method({
return undef;
}});
__PACKAGE__->register_method({
name => 'migrate_vm',
path => '{vmid}/migrate',
method => 'POST',
protected => 1,
proxyto => 'node',
description => "Migrate virtual machine. Creates a new migration task.",
parameters => {
additionalProperties => 0,
properties => {
node => get_standard_option('pve-node'),
vmid => get_standard_option('pve-vmid'),
target => get_standard_option('pve-node', { description => "Target node." }),
online => {
type => 'boolean',
description => "Use online/live migration.",
optional => 1,
},
force => {
type => 'boolean',
description => "Allow to migrate VMs which use local devices. Only root may use this option.",
optional => 1,
},
},
},
returns => {
type => 'string',
description => "the task ID.",
},
code => sub {
my ($param) = @_;
my $rpcenv = PVE::RPCEnvironment::get();
my $user = $rpcenv->get_user();
my $target = extract_param($param, 'target');
my $localnode = PVE::INotify::nodename();
raise_param_exc({ target => "target is local node."}) if $target eq $localnode;
PVE::Cluster::check_cfs_quorum();
PVE::Cluster::check_node_exists($target);
my $targetip = PVE::Cluster::remote_node_ip($target);
my $vmid = extract_param($param, 'vmid');
raise_param_exc({ force => "Only root may use this option." }) if $user ne 'root@pam';
# test if VM exists
PVE::QemuServer::load_config($vmid);
# try to detect errors early
if (PVE::QemuServer::check_running($vmid)) {
die "cant migrate running VM without --online\n"
if !$param->{online};
}
my $realcmd = sub {
my $upid = shift;
PVE::QemuMigrate::migrate($target, $targetip, $vmid, $param->{online}, $param->{force});
};
my $upid = $rpcenv->fork_worker('qmigrate', $vmid, $user, $realcmd);
return $upid;
}});
1;

View File

@ -2,5 +2,6 @@
.PHONY: install
install:
install -D -m 0644 QemuServer.pm ${DESTDIR}${PERLDIR}/PVE/QemuServer.pm
install -D -m 0644 QemuMigrate.pm ${DESTDIR}${PERLDIR}/PVE/QemuMigrate.pm
make -C VZDump install
make -C API2 install

609
qmigrate → PVE/QemuMigrate.pm Executable file → Normal file
View File

@ -1,29 +1,18 @@
#!/usr/bin/perl -w
# fixme: kvm > 88 has more migration options and verbose status
# fixme: bwlimit ?
package PVE::QemuMigrate;
use strict;
use Getopt::Long;
use PVE::SafeSyslog;
use IO::Select;
use IPC::Open3;
use IPC::Open2;
use PVE::Cluster;
use PVE::INotify;
use PVE::Tools qw(run_command);
use PVE::JSONSchema qw(get_standard_option);
use PVE::QemuServer;
use PVE::Storage;
use warnings;
use POSIX qw(strftime);
use Data::Dumper; # fixme: remove
use PVE::RESTHandler;
use IO::File;
use IPC::Open2;
use PVE::Tools qw(run_command);
use PVE::SafeSyslog;
use PVE::INotify;
use PVE::Cluster;
use PVE::Storage;
use PVE::QemuServer;
use base qw(PVE::RESTHandler);
die "please run as root\n" if $> != 0;
$ENV{'PATH'} = '/sbin:/bin:/usr/sbin:/usr/bin';
my $delayed_interrupt = 0;
# blowfish is a fast block cipher, much faster then 3des
my @ssh_opts = ('-c', 'blowfish', '-o', 'BatchMode=yes');
@ -31,199 +20,6 @@ my @ssh_cmd = ('/usr/bin/ssh', @ssh_opts);
my @scp_cmd = ('/usr/bin/scp', @ssh_opts);
my $qm_cmd = '/usr/sbin/qm';
$ENV{RSYNC_RSH} = join(' ', @ssh_cmd);
my $localnode = PVE::INotify::nodename();
initlog('qmigrate');
PVE::Cluster::cfs_update();
# global vars, initialized later
my @rem_ssh;
my $vmid;
my $node;
my $nodeip;
my $storecfg = PVE::Storage::config();
my $delayed_interrupt = 0;
$SIG{INT} = $SIG{TERM} = $SIG{QUIT} = $SIG{HUP} = $SIG{PIPE} = sub {
logmsg('err', "received interrupt - delayed");
$delayed_interrupt = 1;
};
# we only use RESTHandler for automatic parameter verification
__PACKAGE__->register_method({
name => 'qmigrate',
path => 'qmigrate',
method => 'POST',
description => "Migrate VMs to other cluster nodes.",
parameters => {
additionalProperties => 0,
properties => {
vmid => get_standard_option('pve-vmid'),
node => get_standard_option('pve-node', {
description => "Target node" }),
online => {
type => 'boolean',
description => "Use online/live migration.",
optional => 1,
},
force => {
type => 'boolean',
description => "Allow to migrate VMs which use local devices.",
optional => 1,
},
},
},
returns => { type => 'null'},
code => sub {
my ($param) = @_;
my $errors;
my $starttime = time();
# initialize global variables
$vmid = $param->{vmid};
$node = $param->{node};
die "node is local\n" if $node eq $localnode;
PVE::Cluster::check_cfs_quorum();
PVE::Cluster::check_node_exists($node);
$nodeip = PVE::Cluster::remote_node_ip($node);
@rem_ssh = (@ssh_cmd, "root\@$nodeip");
# lock config during migration
PVE::QemuServer::lock_config($vmid, sub {
eval_int(\&prepare);
die $@ if $@;
my $conf = PVE::QemuServer::load_config($vmid);
PVE::QemuServer::check_lock($conf);
my $running = 0;
if (PVE::QemuServer::check_running($vmid)) {
die "cant migrate running VM without --online\n" if !$param->{online};
$running = 1;
}
my $rhash = {};
eval_int (sub { phase1($conf, $rhash, $running, $param->{force}); });
my $err = $@;
if ($err) {
if ($rhash->{clearlock}) {
my $unset = { lock => 1 };
eval { PVE::QemuServer::change_config_nolock($vmid, {}, $unset, 1) };
logmsg('err', $@) if $@;
}
if ($rhash->{volumes}) {
foreach my $volid (@{$rhash->{volumes}}) {
logmsg('err', "found stale volume copy '$volid' on node '$node'");
}
}
die $err;
}
# vm is now owned by other node
my $volids = $rhash->{volumes};
if ($running) {
$rhash = {};
eval_int(sub { phase2($conf, $rhash); });
my $err = $@;
# always kill tunnel
if ($rhash->{tunnel}) {
eval_int(sub { finish_tunnel($rhash->{tunnel}) });
if ($@) {
logmsg('err', "stopping tunnel failed - $@");
$errors = 1;
}
}
# always stop local VM - no interrupts possible
eval { PVE::QemuServer::vm_stop($vmid, 1); };
if ($@) {
logmsg('err', "stopping vm failed - $@");
$errors = 1;
}
if ($err) {
$errors = 1;
logmsg('err', "online migrate failure - $err");
}
}
# finalize -- clear migrate lock
eval_int(sub {
my $cmd = [ @rem_ssh, $qm_cmd, 'unlock', $vmid ];
run_command($cmd);
});
if ($@) {
logmsg('err', "failed to clear migrate lock - $@");
$errors = 1;
}
# destroy local copies
foreach my $volid (@$volids) {
eval_int(sub { PVE::Storage::vdisk_free($storecfg, $volid); });
my $err = $@;
if ($err) {
logmsg('err', "removing local copy of '$volid' failed - $err");
$errors = 1;
last if $err =~ /^interrupted by signal$/;
}
}
});
my $err = $@;
my $delay = time() - $starttime;
my $mins = int($delay/60);
my $secs = $delay - $mins*60;
my $hours = int($mins/60);
$mins = $mins - $hours*60;
my $duration = sprintf "%02d:%02d:%02d", $hours, $mins, $secs;
if ($err) {
logmsg('err', $err) if $err;
logmsg('info', "migration aborted");
exit(-1);
}
if ($errors) {
logmsg('info', "migration finished with problems (duration $duration)");
exit(-1);
}
logmsg('info', "migration finished successfuly (duration $duration)");
return undef;
}});
if ((scalar (@ARGV) == 0) ||
(scalar (@ARGV) == 1 && $ARGV[0] eq 'help')) {
print __PACKAGE__->usage_str('qmigrate', '', ['node', 'vmid'], {}, 'long');
} else {
__PACKAGE__->cli_handler('qmigrate', 'qmigrate', \@ARGV, ['node', 'vmid']);
}
exit(0);
sub logmsg {
my ($level, $msg) = @_;
@ -265,99 +61,6 @@ sub eval_int {
};
}
sub prepare {
my $conffile = PVE::QemuServer::config_file($vmid);
die "VM $vmid does not exist on this node\n" if ! -f $conffile;
# test ssh connection
my $cmd = [ @rem_ssh, '/bin/true' ];
eval { run_command($cmd); };
die "Can't connect to destination address using public key\n" if $@;
}
sub sync_disks {
my ($conf, $rhash, $running) = @_;
logmsg('info', "copying disk images");
my $res = [];
eval {
my $volhash = {};
my $cdromhash = {};
# get list from PVE::Storage (for unused volumes)
my $dl = PVE::Storage::vdisk_list($storecfg, undef, $vmid);
PVE::Storage::foreach_volid($dl, sub {
my ($volid, $sid, $volname) = @_;
my $scfg = PVE::Storage::storage_config($storecfg, $sid);
return if $scfg->{shared};
$volhash->{$volid} = 1;
});
# and add used,owned/non-shared disks (just to be sure we have all)
my $sharedvm = 1;
PVE::QemuServer::foreach_drive($conf, sub {
my ($ds, $drive) = @_;
my $volid = $drive->{file};
return if !$volid;
die "cant migrate local file/device '$volid'\n" if $volid =~ m|^/|;
if (PVE::QemuServer::drive_is_cdrom($drive)) {
die "cant migrate local cdrom drive\n" if $volid eq 'cdrom';
return if $volid eq 'none';
$cdromhash->{$volid} = 1;
}
my ($sid, $volname) = PVE::Storage::parse_volume_id($volid);
my $scfg = PVE::Storage::storage_config($storecfg, $sid);
return if $scfg->{shared};
die "can't migrate local cdrom '$volid'\n" if $cdromhash->{$volid};
$sharedvm = 0;
my ($path, $owner) = PVE::Storage::path($storecfg, $volid);
die "can't migrate volume '$volid' - owned by other VM (owner = VM $owner)\n"
if !$owner || ($owner != $vmid);
$volhash->{$volid} = 1;
});
if ($running && !$sharedvm) {
die "can't do online migration - VM uses local disks\n";
}
# do some checks first
foreach my $volid (keys %$volhash) {
my ($sid, $volname) = PVE::Storage::parse_volume_id($volid);
my $scfg = PVE::Storage::storage_config($storecfg, $sid);
die "can't migrate '$volid' - storagy type '$scfg->{type}' not supported\n"
if $scfg->{type} ne 'dir';
}
foreach my $volid (keys %$volhash) {
my ($sid, $volname) = PVE::Storage::parse_volume_id($volid);
push @{$rhash->{volumes}}, $volid;
PVE::Storage::storage_migrate($storecfg, $volid, $nodeip, $sid);
}
};
die "Failed to sync data - $@" if $@;
}
sub fork_command_pipe {
my ($cmd) = @_;
@ -435,7 +138,7 @@ sub run_with_timeout {
}
sub fork_tunnel {
my ($lport, $rport) = @_;
my ($nodeip, $lport, $rport) = @_;
my $cmd = [@ssh_cmd, '-o', 'BatchMode=yes',
'-L', "$lport:localhost:$rport", $nodeip,
@ -480,13 +183,247 @@ sub finish_tunnel {
die $err if $err;
}
sub phase1 {
my ($conf, $rhash, $running, $force) = @_;
sub migrate {
my ($node, $nodeip, $vmid, $online, $force) = @_;
logmsg('info', "starting migration of VM $vmid to node '$node' ($nodeip)");
my $starttime = time();
my $rem_ssh = [@ssh_cmd, "root\@$nodeip"];
local $SIG{INT} = $SIG{TERM} = $SIG{QUIT} = $SIG{HUP} = $SIG{PIPE} = sub {
logmsg('err', "received interrupt - delayed");
$delayed_interrupt = 1;
};
local $ENV{RSYNC_RSH} = join(' ', @ssh_cmd);
my $session = {
vmid => $vmid,
node => $node,
nodeip => $nodeip,
force => $force,
storecfg => PVE::Storage::config(),
rem_ssh => $rem_ssh,
};
my $errors;
# lock config during migration
eval { PVE::QemuServer::lock_config($vmid, sub {
eval_int(sub { prepare($session); });
die $@ if $@;
my $conf = PVE::QemuServer::load_config($vmid);
PVE::QemuServer::check_lock($conf);
my $running = 0;
if (my $pid = PVE::QemuServer::check_running($vmid)) {
die "cant migrate running VM without --online\n" if !$online;
$running = $pid;
}
my $rhash = {};
eval_int (sub { phase1($session, $conf, $rhash, $running); });
my $err = $@;
if ($err) {
if ($rhash->{clearlock}) {
my $unset = { lock => 1 };
eval { PVE::QemuServer::change_config_nolock($session->{vmid}, {}, $unset, 1) };
logmsg('err', $@) if $@;
}
if ($rhash->{volumes}) {
foreach my $volid (@{$rhash->{volumes}}) {
logmsg('err', "found stale volume copy '$volid' on node '$session->{node}'");
}
}
die $err;
}
# vm is now owned by other node
my $volids = $rhash->{volumes};
if ($running) {
$rhash = {};
eval_int(sub { phase2($session, $conf, $rhash); });
my $err = $@;
# always kill tunnel
if ($rhash->{tunnel}) {
eval_int(sub { finish_tunnel($rhash->{tunnel}) });
if ($@) {
logmsg('err', "stopping tunnel failed - $@");
$errors = 1;
}
}
# fixme: ther is no config file, so this will never work
# fixme: use kill(9, $running) to make sure it is stopped
# always stop local VM - no interrupts possible
eval { PVE::QemuServer::vm_stop($session->{vmid}, 1); };
if ($@) {
logmsg('err', "stopping vm failed - $@");
$errors = 1;
}
if ($err) {
$errors = 1;
logmsg('err', "online migrate failure - $err");
}
}
# finalize -- clear migrate lock
eval_int(sub {
my $cmd = [ @{$session->{rem_ssh}}, $qm_cmd, 'unlock', $session->{vmid} ];
run_command($cmd);
});
if ($@) {
logmsg('err', "failed to clear migrate lock - $@");
$errors = 1;
}
# destroy local copies
foreach my $volid (@$volids) {
eval_int(sub { PVE::Storage::vdisk_free($session->{storecfg}, $volid); });
my $err = $@;
if ($err) {
logmsg('err', "removing local copy of '$volid' failed - $err");
$errors = 1;
last if $err =~ /^interrupted by signal$/;
}
}
})};
my $err = $@;
my $delay = time() - $starttime;
my $mins = int($delay/60);
my $secs = $delay - $mins*60;
my $hours = int($mins/60);
$mins = $mins - $hours*60;
my $duration = sprintf "%02d:%02d:%02d", $hours, $mins, $secs;
if ($err) {
my $msg = "migration aborted (duration $duration): $err\n";
logmsg('err', $msg);
die $msg;
}
if ($errors) {
my $msg = "migration finished with problems (duration $duration)\n";
logmsg('err', $msg);
die $msg;
}
logmsg('info', "migration finished successfuly (duration $duration)");
}
sub prepare {
my ($session) = @_;
my $conffile = PVE::QemuServer::config_file($session->{vmid});
die "VM $session->{vmid} does not exist on this node\n" if ! -f $conffile;
# test ssh connection
my $cmd = [ @{$session->{rem_ssh}}, '/bin/true' ];
eval { run_command($cmd); };
die "Can't connect to destination address using public key\n" if $@;
}
sub sync_disks {
my ($session, $conf, $rhash, $running) = @_;
logmsg('info', "copying disk images");
my $res = [];
eval {
my $volhash = {};
my $cdromhash = {};
# get list from PVE::Storage (for unused volumes)
my $dl = PVE::Storage::vdisk_list($session->{storecfg}, undef, $session->{vmid});
PVE::Storage::foreach_volid($dl, sub {
my ($volid, $sid, $volname) = @_;
my $scfg = PVE::Storage::storage_config($session->{storecfg}, $sid);
return if $scfg->{shared};
$volhash->{$volid} = 1;
});
# and add used,owned/non-shared disks (just to be sure we have all)
my $sharedvm = 1;
PVE::QemuServer::foreach_drive($conf, sub {
my ($ds, $drive) = @_;
my $volid = $drive->{file};
return if !$volid;
die "cant migrate local file/device '$volid'\n" if $volid =~ m|^/|;
if (PVE::QemuServer::drive_is_cdrom($drive)) {
die "cant migrate local cdrom drive\n" if $volid eq 'cdrom';
return if $volid eq 'none';
$cdromhash->{$volid} = 1;
}
my ($sid, $volname) = PVE::Storage::parse_volume_id($volid);
my $scfg = PVE::Storage::storage_config($session->{storecfg}, $sid);
return if $scfg->{shared};
die "can't migrate local cdrom '$volid'\n" if $cdromhash->{$volid};
$sharedvm = 0;
my ($path, $owner) = PVE::Storage::path($session->{storecfg}, $volid);
die "can't migrate volume '$volid' - owned by other VM (owner = VM $owner)\n"
if !$owner || ($owner != $session->{vmid});
$volhash->{$volid} = 1;
});
if ($running && !$sharedvm) {
die "can't do online migration - VM uses local disks\n";
}
# do some checks first
foreach my $volid (keys %$volhash) {
my ($sid, $volname) = PVE::Storage::parse_volume_id($volid);
my $scfg = PVE::Storage::storage_config($session->{storecfg}, $sid);
die "can't migrate '$volid' - storagy type '$scfg->{type}' not supported\n"
if $scfg->{type} ne 'dir';
}
foreach my $volid (keys %$volhash) {
my ($sid, $volname) = PVE::Storage::parse_volume_id($volid);
push @{$rhash->{volumes}}, $volid;
PVE::Storage::storage_migrate($session->{storecfg}, $volid, $session->{nodeip}, $sid);
}
};
die "Failed to sync data - $@" if $@;
}
sub phase1 {
my ($session, $conf, $rhash, $running) = @_;
logmsg('info', "starting migration of VM $session->{vmid} to node '$session->{node}' ($session->{nodeip})");
if (my $loc_res = PVE::QemuServer::check_local_resources($conf, 1)) {
if ($running || !$force) {
if ($running || !$session->{force}) {
die "can't migrate VM which uses local devices\n";
} else {
logmsg('info', "migrating VM which uses local devices");
@ -496,28 +433,28 @@ sub phase1 {
# set migrate lock in config file
$rhash->{clearlock} = 1;
my $settings = { lock => 'migrate' };
PVE::QemuServer::change_config_nolock($vmid, $settings, {}, 1);
PVE::QemuServer::change_config_nolock($session->{vmid}, { lock => 'migrate' }, {}, 1);
sync_disks($conf, $rhash, $running);
sync_disks($session, $conf, $rhash, $running);
# move config to remote node
my $conffile = PVE::QemuServer::config_file($vmid, $localnode);
my $newconffile = PVE::QemuServer::config_file($vmid, $node);
my $conffile = PVE::QemuServer::config_file($session->{vmid});
my $newconffile = PVE::QemuServer::config_file($session->{vmid}, $session->{node});
die "Failed to move config to node '$node' - rename failed: $!\n"
die "Failed to move config to node '$session->{node}' - rename failed: $!\n"
if !rename($conffile, $newconffile);
};
sub phase2 {
my ($conf, $rhash) = shift;
my ($session, $conf, $rhash) = shift;
logmsg('info', "starting VM on remote node '$node'");
logmsg('info', "starting VM on remote node '$session->{node}'");
my $rport;
## start on remote node
my $cmd = [@rem_ssh, $qm_cmd, '--skiplock', 'start', $vmid, '--incoming', 'tcp'];
my $cmd = [@{$session->{rem_ssh}}, $qm_cmd, 'start',
$session->{vmid}, '--stateuri', 'tcp', '--skiplock'];
run_command($cmd, outfunc => sub {
my $line = shift;
@ -533,19 +470,19 @@ sub phase2 {
## create tunnel to remote port
my $lport = PVE::QemuServer::next_migrate_port();
$rhash->{tunnel} = fork_tunnel($lport, $rport);
$rhash->{tunnel} = fork_tunnel($session->{nodeip}, $lport, $rport);
logmsg('info', "starting online/live migration");
# start migration
my $start = time();
PVE::QemuServer::vm_monitor_command($vmid, "migrate -d \"tcp:localhost:$lport\"");
PVE::QemuServer::vm_monitor_command($session->{vmid}, "migrate -d \"tcp:localhost:$lport\"");
my $lstat = '';
while (1) {
sleep (2);
my $stat = PVE::QemuServer::vm_monitor_command($vmid, "info migrate", 1);
my $stat = PVE::QemuServer::vm_monitor_command($session->{vmid}, "info migrate", 1);
if ($stat =~ m/^Migration status: (active|completed|failed|cancelled)$/im) {
my $ms = $1;
@ -582,25 +519,3 @@ sub phase2 {
$lstat = $stat;
};
}
exit(0);
__END__
=head1 NAME
qmigrate - utility for VM migration between cluster nodes (kvm/qemu)
=head1 SYNOPSIS
qmigrate help
qmigrate [--online] target_node VMID
=head1 DESCRIPTION
Migrate VMs to other cluster nodes.

View File

@ -37,6 +37,19 @@ my $cpuinfo = PVE::ProcFSTools::read_cpuinfo();
cfs_register_file('/qemu-server/', \&parse_vm_config);
PVE::JSONSchema::register_standard_option('skiplock', {
description => "Ignore locks - only root is allowed to use this option.",
type => 'boolean',
optional => 1,
});
PVE::JSONSchema::register_standard_option('pve-qm-stateuri', {
description => "Some command save/restore state from this location.",
type => 'string',
maxLength => 128,
optional => 1,
});
#no warnings 'redefine';
unless(defined(&_VZSYSCALLS_H_)) {

14
qm
View File

@ -37,12 +37,6 @@ $rpcenv->set_user('root@pam');
my $nodename = PVE::INotify::nodename();
PVE::JSONSchema::register_standard_option('skiplock', {
description => "Ignore locks - only root is allowed to use this option.",
type => 'boolean',
optional => 1,
});
sub run_vnc_proxy {
my ($vmid) = @_;
@ -393,6 +387,13 @@ my $cmddef = {
destroy => [ "PVE::API2::Qemu", 'destroy_vm', ['vmid'], { node => $nodename } ],
migrate => [ "PVE::API2::Qemu", 'migrate_vm', ['target', 'vmid'], { node => $nodename },
sub {
my $upid = shift;
my $status = PVE::Tools::upid_read_status($upid);
exit($status eq 'OK' ? 0 : -1);
}],
set => [ "PVE::API2::Qemu", 'update_vm', ['vmid'], { node => $nodename } ],
unlink => [ "PVE::API2::Qemu", 'unlink', ['vmid', 'idlist'], { node => $nodename } ],
@ -443,6 +444,7 @@ sub register_vm_command {
properties => {
vmid => get_standard_option('pve-vmid'),
skiplock => get_standard_option('skiplock'),
stateuri => get_standard_option('pve-qm-stateuri'),
},
},
returns => { type => 'null'},