mirror of
https://git.proxmox.com/git/qemu-server
synced 2025-08-11 19:26:46 +00:00
qemu-server: add support for unsecure migration (setting in datacenter.cfg)
This patch adds support for unsecure migration using a direct tcp connection KVM <=> KVM instead of an extra SSH tunnel. Without ssh the limit is just the bandwith and no longer the CPU / one single core. You can enable this by adding: migration_unsecure: 1 to datacenter.cfg Examples using qemu 1.4 as migration with qemu 1.3 still does not work for me: current default with SSH Tunnel VM uses 2GB mem: Dec 27 21:10:32 starting migration of VM 105 to node 'cloud1-1202' (10.255.0.20) Dec 27 21:10:32 copying disk images Dec 27 21:10:32 starting VM 105 on remote node 'cloud1-1202' Dec 27 21:10:35 starting ssh migration tunnel Dec 27 21:10:36 starting online/live migration on localhost:60000 Dec 27 21:10:36 migrate_set_speed: 8589934592 Dec 27 21:10:36 migrate_set_downtime: 1 Dec 27 21:10:38 migration status: active (transferred 152481002, remaining 1938546688), total 2156396544) , expected downtime 0 Dec 27 21:10:40 migration status: active (transferred 279836995, remaining 1811140608), total 2156396544) , expected downtime 0 Dec 27 21:10:42 migration status: active (transferred 421265271, remaining 1669840896), total 2156396544) , expected downtime 0 Dec 27 21:10:44 migration status: active (transferred 570987974, remaining 1520152576), total 2156396544) , expected downtime 0 Dec 27 21:10:46 migration status: active (transferred 721469404, remaining 1369939968), total 2156396544) , expected downtime 0 Dec 27 21:10:48 migration status: active (transferred 875595258, remaining 1216057344), total 2156396544) , expected downtime 0 Dec 27 21:10:50 migration status: active (transferred 1034654822, remaining 1056931840), total 2156396544) , expected downtime 0 Dec 27 21:10:54 migration status: active (transferred 1176288424, remaining 915369984), total 2156396544) , expected downtime 0 Dec 27 21:10:56 migration status: active (transferred 1339734759, remaining 752050176), total 2156396544) , expected downtime 0 Dec 27 21:10:58 migration status: active (transferred 1503743261, remaining 588206080), total 2156396544) , expected downtime 0 Dec 27 21:11:02 migration status: active (transferred 1645097827, remaining 446906368), total 2156396544) , expected downtime 0 Dec 27 21:11:04 migration status: active (transferred 1810562934, remaining 281751552), total 2156396544) , expected downtime 0 Dec 27 21:11:06 migration status: active (transferred 1964377505, remaining 126033920), total 2156396544) , expected downtime 0 Dec 27 21:11:08 migration status: active (transferred 2077930417, remaining 0), total 2156396544) , expected downtime 0 Dec 27 21:11:09 migration speed: 62.06 MB/s - downtime 37 ms Dec 27 21:11:09 migration status: completed Dec 27 21:11:13 migration finished successfuly (duration 00:00:41) TASK OK with unsecure migration without SSH Tunnel: Dec 27 22:43:14 starting migration of VM 105 to node 'cloud1-1203' (10.255.0.22) Dec 27 22:43:14 copying disk images Dec 27 22:43:14 starting VM 105 on remote node 'cloud1-1203' Dec 27 22:43:17 starting online/live migration on 10.255.0.22:60000 Dec 27 22:43:17 migrate_set_speed: 8589934592 Dec 27 22:43:17 migrate_set_downtime: 1 Dec 27 22:43:19 migration speed: 1024.00 MB/s - downtime 1100 ms Dec 27 22:43:19 migration status: completed Dec 27 22:43:22 migration finished successfuly (duration 00:00:09) TASK OK
This commit is contained in:
parent
7c14dcae1f
commit
5bc1e0397e
@ -76,7 +76,9 @@ sub finish_command_pipe {
|
|||||||
sub fork_tunnel {
|
sub fork_tunnel {
|
||||||
my ($self, $nodeip, $lport, $rport) = @_;
|
my ($self, $nodeip, $lport, $rport) = @_;
|
||||||
|
|
||||||
my $cmd = [@{$self->{rem_ssh}}, '-L', "$lport:localhost:$rport",
|
my @localtunnelinfo = (defined $lport) ? qw(-L $lport:localhost:$rport) : ();
|
||||||
|
|
||||||
|
my $cmd = [@{$self->{rem_ssh}}, @localtunnelinfo,
|
||||||
'qm', 'mtunnel' ];
|
'qm', 'mtunnel' ];
|
||||||
|
|
||||||
my $tunnel = $self->fork_command_pipe($cmd);
|
my $tunnel = $self->fork_command_pipe($cmd);
|
||||||
@ -307,8 +309,8 @@ sub phase2 {
|
|||||||
|
|
||||||
$self->log('info', "starting VM $vmid on remote node '$self->{node}'");
|
$self->log('info', "starting VM $vmid on remote node '$self->{node}'");
|
||||||
|
|
||||||
|
my $raddr;
|
||||||
my $rport;
|
my $rport;
|
||||||
|
|
||||||
my $nodename = PVE::INotify::nodename();
|
my $nodename = PVE::INotify::nodename();
|
||||||
|
|
||||||
## start on remote node
|
## start on remote node
|
||||||
@ -333,9 +335,15 @@ sub phase2 {
|
|||||||
PVE::Tools::run_command($cmd, input => $spice_ticket, outfunc => sub {
|
PVE::Tools::run_command($cmd, input => $spice_ticket, outfunc => sub {
|
||||||
my $line = shift;
|
my $line = shift;
|
||||||
|
|
||||||
if ($line =~ m/^migration listens on port (\d+)$/) {
|
if ($line =~ m/^migration listens on tcp:([\d\.]+|localhost):(\d+)$/) {
|
||||||
|
$raddr = $1;
|
||||||
|
$rport = int($2);
|
||||||
|
}
|
||||||
|
elsif ($line =~ m/^migration listens on port (\d+)$/) {
|
||||||
|
$raddr = "localhost";
|
||||||
$rport = int($1);
|
$rport = int($1);
|
||||||
}elsif ($line =~ m/^spice listens on port (\d+)$/) {
|
}
|
||||||
|
elsif ($line =~ m/^spice listens on port (\d+)$/) {
|
||||||
$spice_port = int($1);
|
$spice_port = int($1);
|
||||||
}
|
}
|
||||||
}, errfunc => sub {
|
}, errfunc => sub {
|
||||||
@ -343,18 +351,16 @@ sub phase2 {
|
|||||||
$self->log('info', $line);
|
$self->log('info', $line);
|
||||||
});
|
});
|
||||||
|
|
||||||
die "unable to detect remote migration port\n" if !$rport;
|
die "unable to detect remote migration address\n" if !$raddr;
|
||||||
|
|
||||||
$self->log('info', "starting migration tunnel");
|
|
||||||
|
|
||||||
## create tunnel to remote port
|
## create tunnel to remote port
|
||||||
my $lport = PVE::Tools::next_migrate_port();
|
$self->log('info', "starting ssh migration tunnel");
|
||||||
|
my $lport = ($raddr eq "localhost") ? PVE::Tools::next_migrate_port() : undef;
|
||||||
$self->{tunnel} = $self->fork_tunnel($self->{nodeip}, $lport, $rport);
|
$self->{tunnel} = $self->fork_tunnel($self->{nodeip}, $lport, $rport);
|
||||||
|
|
||||||
$self->log('info', "starting online/live migration on port $lport");
|
|
||||||
# start migration
|
|
||||||
|
|
||||||
my $start = time();
|
my $start = time();
|
||||||
|
$self->log('info', "starting online/live migration on $raddr:$rport");
|
||||||
|
$self->{livemigration} = 1;
|
||||||
|
|
||||||
# load_defaults
|
# load_defaults
|
||||||
my $defaults = PVE::QemuServer::load_defaults();
|
my $defaults = PVE::QemuServer::load_defaults();
|
||||||
@ -415,9 +421,10 @@ sub phase2 {
|
|||||||
}
|
}
|
||||||
|
|
||||||
eval {
|
eval {
|
||||||
PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate", uri => "tcp:localhost:$lport");
|
PVE::QemuServer::vm_mon_cmd_nocheck($vmid, "migrate", uri => "tcp:$raddr:$rport");
|
||||||
};
|
};
|
||||||
my $merr = $@;
|
my $merr = $@;
|
||||||
|
$self->log('info', "migrate uri => tcp:$raddr:$rport failed: $merr") if $merr;
|
||||||
|
|
||||||
my $lstat = 0;
|
my $lstat = 0;
|
||||||
my $usleep = 2000000;
|
my $usleep = 2000000;
|
||||||
@ -569,8 +576,8 @@ sub phase3_cleanup {
|
|||||||
die "Failed to move config to node '$self->{node}' - rename failed: $!\n"
|
die "Failed to move config to node '$self->{node}' - rename failed: $!\n"
|
||||||
if !rename($conffile, $newconffile);
|
if !rename($conffile, $newconffile);
|
||||||
|
|
||||||
# now that config file is move, we can resume vm on target if livemigrate
|
if ($self->{livemigration}) {
|
||||||
if ($self->{tunnel}) {
|
# now that config file is move, we can resume vm on target if livemigrate
|
||||||
my $cmd = [@{$self->{rem_ssh}}, 'qm', 'resume', $vmid, '--skiplock'];
|
my $cmd = [@{$self->{rem_ssh}}, 'qm', 'resume', $vmid, '--skiplock'];
|
||||||
eval{ PVE::Tools::run_command($cmd, outfunc => sub {},
|
eval{ PVE::Tools::run_command($cmd, outfunc => sub {},
|
||||||
errfunc => sub {
|
errfunc => sub {
|
||||||
|
@ -3053,11 +3053,17 @@ sub vm_start {
|
|||||||
my ($cmd, $vollist, $spice_port) = config_to_command($storecfg, $vmid, $conf, $defaults, $forcemachine);
|
my ($cmd, $vollist, $spice_port) = config_to_command($storecfg, $vmid, $conf, $defaults, $forcemachine);
|
||||||
|
|
||||||
my $migrate_port = 0;
|
my $migrate_port = 0;
|
||||||
|
my $migrate_uri;
|
||||||
if ($statefile) {
|
if ($statefile) {
|
||||||
if ($statefile eq 'tcp') {
|
if ($statefile eq 'tcp') {
|
||||||
|
my $localip = "localhost";
|
||||||
|
my $datacenterconf = PVE::Cluster::cfs_read_file('datacenter.cfg');
|
||||||
|
if ($datacenterconf->{migration_unsecure}) {
|
||||||
|
my $nodename = PVE::INotify::nodename();
|
||||||
|
$localip = PVE::Cluster::remote_node_ip($nodename, 1);
|
||||||
|
}
|
||||||
$migrate_port = PVE::Tools::next_migrate_port();
|
$migrate_port = PVE::Tools::next_migrate_port();
|
||||||
my $migrate_uri = "tcp:localhost:${migrate_port}";
|
$migrate_uri = "tcp:${localip}:${migrate_port}";
|
||||||
push @$cmd, '-incoming', $migrate_uri;
|
push @$cmd, '-incoming', $migrate_uri;
|
||||||
push @$cmd, '-S';
|
push @$cmd, '-S';
|
||||||
} else {
|
} else {
|
||||||
@ -3085,7 +3091,7 @@ sub vm_start {
|
|||||||
my $err = $@;
|
my $err = $@;
|
||||||
die "start failed: $err" if $err;
|
die "start failed: $err" if $err;
|
||||||
|
|
||||||
print "migration listens on port $migrate_port\n" if $migrate_port;
|
print "migration listens on $migrate_uri\n" if $migrate_uri;
|
||||||
|
|
||||||
if ($statefile && $statefile ne 'tcp') {
|
if ($statefile && $statefile ne 'tcp') {
|
||||||
eval { vm_mon_cmd_nocheck($vmid, "cont"); };
|
eval { vm_mon_cmd_nocheck($vmid, "cont"); };
|
||||||
|
Loading…
Reference in New Issue
Block a user