activate LVM LVs more carefully

And remove syslog whenever possible (we run most commands as tasks, so
the output is loged anyways)
This commit is contained in:
Dietmar Maurer 2011-11-25 08:05:36 +01:00
parent 48e1a963f4
commit ff1a2432c8
6 changed files with 230 additions and 216 deletions

View File

@ -2,7 +2,7 @@ RELEASE=2.0
VERSION=2.0
PACKAGE=qemu-server
PKGREL=6
PKGREL=7
DESTDIR=
PREFIX=/usr

View File

@ -622,6 +622,10 @@ __PACKAGE__->register_method({
my $storecfg = PVE::Storage::config();
my $realcmd = sub {
my $upid = shift;
syslog('info', "destroy VM $vmid: $upid\n");
PVE::QemuServer::vm_destroy($storecfg, $vmid, $skiplock);
};
@ -807,7 +811,7 @@ __PACKAGE__->register_method({
# test if VM exists
my $conf = PVE::QemuServer::load_config($param->{vmid});
my $vmstatus = PVE::QemuServer::vmstatus($param->{vmid});
my $vmstatus = PVE::QemuServer::vmstatus($param->{vmid});
return $vmstatus->{$param->{vmid}};
}});
@ -850,6 +854,8 @@ __PACKAGE__->register_method({
raise_param_exc({ skiplock => "Only root may use this option." })
if $skiplock && $user ne 'root@pam';
die "VM $vmid already running\n" if PVE::QemuServer::check_running($vmid);
my $storecfg = PVE::Storage::config();
my $realcmd = sub {
@ -904,27 +910,16 @@ __PACKAGE__->register_method({
raise_param_exc({ skiplock => "Only root may use this option." })
if $skiplock && $user ne 'root@pam';
die "VM $vmid not running\n" if !PVE::QemuServer::check_running($vmid);
my $storecfg = PVE::Storage::config();
my $realcmd = sub {
my $upid = shift;
syslog('info', "stop VM $vmid: $upid\n");
PVE::QemuServer::vm_stop($vmid, $skiplock);
my $pid = PVE::QemuServer::check_running ($vmid);
if ($pid && $param->{timeout}) {
print "waiting until VM $vmid stopps (PID $pid)\n";
my $count = 0;
while (($count < $param->{timeout}) &&
PVE::QemuServer::check_running($vmid)) {
$count++;
sleep 1;
}
die "wait failed - got timeout\n" if PVE::QemuServer::check_running($vmid);
}
PVE::QemuServer::vm_stop($storecfg, $vmid, $skiplock, 0, $param->{timeout});
return;
};
@ -965,11 +960,11 @@ __PACKAGE__->register_method({
raise_param_exc({ skiplock => "Only root may use this option." })
if $skiplock && $user ne 'root@pam';
die "VM $vmid not running\n" if !PVE::QemuServer::check_running($vmid);
my $realcmd = sub {
my $upid = shift;
syslog('info', "reset VM $vmid: $upid\n");
PVE::QemuServer::vm_reset($vmid, $skiplock);
return;
@ -1017,27 +1012,14 @@ __PACKAGE__->register_method({
raise_param_exc({ skiplock => "Only root may use this option." })
if $skiplock && $user ne 'root@pam';
die "VM $vmid not running\n" if !PVE::QemuServer::check_running($vmid);
my $realcmd = sub {
my $upid = shift;
syslog('info', "shutdown VM $vmid: $upid\n");
PVE::QemuServer::vm_shutdown($vmid, $skiplock);
my $pid = PVE::QemuServer::check_running ($vmid);
if ($pid && $param->{timeout}) {
print "waiting until VM $vmid stopps (PID $pid)\n";
my $count = 0;
while (($count < $param->{timeout}) &&
PVE::QemuServer::check_running($vmid)) {
$count++;
sleep 1;
}
die "wait failed - got timeout\n" if PVE::QemuServer::check_running($vmid);
}
PVE::QemuServer::vm_shutdown($vmid, $skiplock, $param->{timeout});
return;
};
@ -1078,6 +1060,8 @@ __PACKAGE__->register_method({
raise_param_exc({ skiplock => "Only root may use this option." })
if $skiplock && $user ne 'root@pam';
die "VM $vmid not running\n" if !PVE::QemuServer::check_running($vmid);
my $realcmd = sub {
my $upid = shift;
@ -1124,6 +1108,8 @@ __PACKAGE__->register_method({
raise_param_exc({ skiplock => "Only root may use this option." })
if $skiplock && $user ne 'root@pam';
die "VM $vmid already running\n" if PVE::QemuServer::check_running($vmid);
my $realcmd = sub {
my $upid = shift;

View File

@ -6,7 +6,6 @@ use POSIX qw(strftime);
use IO::File;
use IPC::Open2;
use PVE::Tools qw(run_command);
use PVE::SafeSyslog;
use PVE::INotify;
use PVE::Cluster;
use PVE::Storage;
@ -29,10 +28,12 @@ sub logmsg {
my $tstr = strftime("%b %d %H:%M:%S", localtime);
syslog($level, $msg);
foreach my $line (split (/\n/, $msg)) {
print STDOUT "$tstr $line\n";
if ($level eq 'err') {
print STDOUT "$tstr ERROR: $line\n";
} else {
print STDOUT "$tstr $line\n";
}
}
\*STDOUT->flush();
}
@ -43,12 +44,10 @@ sub eval_int {
eval {
local $SIG{INT} = $SIG{TERM} = $SIG{QUIT} = $SIG{HUP} = sub {
$delayed_interrupt = 0;
logmsg('err', "received interrupt");
die "interrupted by signal\n";
};
local $SIG{PIPE} = sub {
$delayed_interrupt = 0;
logmsg('err', "received broken pipe interrupt");
die "interrupted by signal\n";
};
@ -211,13 +210,10 @@ sub migrate {
# lock config during migration
eval { PVE::QemuServer::lock_config($vmid, sub {
eval_int(sub { prepare($session); });
my $conf;
eval_int(sub { $conf = prepare($session); });
die $@ if $@;
my $conf = PVE::QemuServer::load_config($vmid);
PVE::QemuServer::check_lock($conf);
my $running = 0;
if (my $pid = PVE::QemuServer::check_running($vmid)) {
die "cant migrate running VM without --online\n" if !$online;
@ -232,7 +228,9 @@ sub migrate {
if ($rhash->{clearlock}) {
my $unset = { lock => 1 };
eval { PVE::QemuServer::change_config_nolock($session->{vmid}, {}, $unset, 1) };
logmsg('err', $@) if $@;
if (my $tmperr = $@) {
logmsg('err', $tmperr);
}
}
if ($rhash->{volumes}) {
foreach my $volid (@{$rhash->{volumes}}) {
@ -257,16 +255,16 @@ sub migrate {
# always kill tunnel
if ($rhash->{tunnel}) {
eval_int(sub { finish_tunnel($rhash->{tunnel}) });
if ($@) {
logmsg('err', "stopping tunnel failed - $@");
if (my $tmperr = $@) {
logmsg('err', "stopping tunnel failed - $tmperr");
$errors = 1;
}
}
# always stop local VM - no interrupts possible
eval { PVE::QemuServer::vm_stop($session->{vmid}, 1, 1); };
if ($@) {
logmsg('err', "stopping vm failed - $@");
eval { PVE::QemuServer::vm_stop($session->{storecfg}, $session->{vmid}, 1, 1); };
if (my $tmperr = $@) {
logmsg('err', "stopping vm failed - $tmperr");
$errors = 1;
}
@ -281,8 +279,8 @@ sub migrate {
my $cmd = [ @{$session->{rem_ssh}}, $qm_cmd, 'unlock', $session->{vmid} ];
run_command($cmd);
});
if ($@) {
logmsg('err', "failed to clear migrate lock - $@");
if (my $tmperr = $@) {
logmsg('err', "failed to clear migrate lock - $tmperr");
$errors = 1;
}
@ -298,6 +296,18 @@ sub migrate {
last if $err =~ /^interrupted by signal$/;
}
}
# always deactivate volumes - avoid lvm LVs to be active on
# several nodes
eval {
my $vollist = PVE::QemuServer::get_vm_volumes($conf);
PVE::Storage::deactivate_volumes($session->{storecfg}, $vollist);
};
if (my $tmperr = $@) {
logmsg('err', $tmperr);
$errors = 1;
}
})};
my $err = $@;
@ -311,15 +321,13 @@ sub migrate {
my $duration = sprintf "%02d:%02d:%02d", $hours, $mins, $secs;
if ($err) {
my $msg = "migration aborted (duration $duration): $err\n";
logmsg('err', $msg);
die $msg;
logmsg('err', "migration aborted (duration $duration): $err");
die "migration aborted";
}
if ($errors) {
my $msg = "migration finished with problems (duration $duration)\n";
logmsg('err', $msg);
die $msg;
logmsg('err', "migration finished with problems (duration $duration)");
die "migration problems"
}
logmsg('info', "migration finished successfuly (duration $duration)");
@ -328,13 +336,21 @@ sub migrate {
sub prepare {
my ($session) = @_;
my $conffile = PVE::QemuServer::config_file($session->{vmid});
die "VM $session->{vmid} does not exist on this node\n" if ! -f $conffile;
# test is VM exist
my $conf = PVE::QemuServer::load_config($session->{vmid});
PVE::QemuServer::check_lock($conf);
# activate volumes
my $vollist = PVE::QemuServer::get_vm_volumes($conf);
PVE::Storage::activate_volumes($session->{storecfg}, $vollist);
# test ssh connection
my $cmd = [ @{$session->{rem_ssh}}, '/bin/true' ];
eval { run_command($cmd); };
die "Can't connect to destination address using public key\n" if $@;
return $conf;
}
sub sync_disks {
@ -478,12 +494,12 @@ sub phase2 {
my $start = time();
PVE::QemuServer::vm_monitor_command($session->{vmid}, "migrate -d \"tcp:localhost:$lport\"", 0, 1);
PVE::QemuServer::vm_monitor_command($session->{vmid}, "migrate -d \"tcp:localhost:$lport\"", 1);
my $lstat = '';
while (1) {
sleep (2);
my $stat = PVE::QemuServer::vm_monitor_command($session->{vmid}, "info migrate", 1, 1);
my $stat = PVE::QemuServer::vm_monitor_command($session->{vmid}, "info migrate", 1);
if ($stat =~ m/^Migration status: (active|completed|failed|cancelled)$/im) {
my $ms = $1;

View File

@ -1294,24 +1294,6 @@ sub create_disks {
return $vollist;
}
sub unlink_image {
my ($storecfg, $vmid, $volid) = @_;
die "reject to unlink absolute path '$volid'"
if $volid =~ m|^/|;
my ($path, $owner) = PVE::Storage::path($storecfg, $volid);
die "reject to unlink '$volid' - not owned by this VM"
if !$owner || ($owner != $vmid);
syslog('info', "VM $vmid deleting volume '$volid'");
PVE::Storage::vdisk_free($storecfg, $volid);
touch_config($vmid);
}
sub destroy_vm {
my ($storecfg, $vmid, $keep_empty_config) = @_;
@ -1328,10 +1310,10 @@ sub destroy_vm {
return if drive_is_cdrom($drive);
my $volid = $drive->{file};
next if !$volid || $volid =~ m|^/|;
return if !$volid || $volid =~ m|^/|;
my ($path, $owner) = PVE::Storage::path($storecfg, $volid);
next if !$path || !$owner || ($owner != $vmid);
return if !$path || !$owner || ($owner != $vmid);
PVE::Storage::vdisk_free($storecfg, $volid);
});
@ -2099,11 +2081,10 @@ sub config_to_command {
foreach_drive($conf, sub {
my ($ds, $drive) = @_;
eval {
PVE::Storage::parse_volume_id($drive->{file});
if (PVE::Storage::parse_volume_id($drive->{file}, 1)) {
push @$vollist, $drive->{file};
}; # ignore errors
}
$use_virtio = 1 if $ds =~ m/^virtio/;
if ($drive->{interface} eq 'scsi') {
my $maxdev = 7;
@ -2222,7 +2203,7 @@ sub next_migrate_port {
sub vm_devices_list {
my ($vmid) = @_;
my $res = vm_monitor_command ($vmid, "info pci", 1);
my $res = vm_monitor_command ($vmid, "info pci");
my @lines = split ("\n", $res);
my $devices;
@ -2253,14 +2234,14 @@ sub vm_deviceadd {
if($deviceid =~ m/^(virtio)(\d+)$/) {
my $drive = print_drive_full($storecfg, $vmid, $device);
my $ret = vm_monitor_command($vmid, "drive_add auto $drive", 1);
my $ret = vm_monitor_command($vmid, "drive_add auto $drive");
# If the command succeeds qemu prints: "OK"
if ($ret !~ m/OK/s) {
die "adding drive failed: $ret";
}
my $devicefull = print_drivedevice_full($storecfg, $vmid, $device);
$ret = vm_monitor_command($vmid, "device_add $devicefull", 1);
$ret = vm_monitor_command($vmid, "device_add $devicefull");
$ret =~ s/^\s+//;
# Otherwise, if the command succeeds, no output is sent. So any non-empty string shows an error
die 'error on hotplug device : $ret' if $ret ne "";
@ -2284,7 +2265,7 @@ sub vm_devicedel {
if($deviceid =~ m/^(virtio)(\d+)$/){
my $ret = vm_monitor_command($vmid, "drive_del drive-$deviceid", 1);
my $ret = vm_monitor_command($vmid, "drive_del drive-$deviceid");
$ret =~ s/^\s+//;
if ($ret =~ m/Device \'.*?\' not found/s) {
# NB: device not found errors mean the drive was auto-deleted and we ignore the error
@ -2293,7 +2274,7 @@ sub vm_devicedel {
die "deleting drive $deviceid failed : $ret";
}
$ret = vm_monitor_command($vmid, "device_del $deviceid", 1);
$ret = vm_monitor_command($vmid, "device_del $deviceid");
$ret =~ s/^\s+//;
die 'detaching device $deviceid failed : $ret' if $ret ne "";
@ -2318,13 +2299,7 @@ sub vm_start {
check_lock($conf) if !$skiplock;
if (check_running($vmid)) {
my $msg = "VM $vmid already running - start failed\n" ;
syslog('err', $msg);
die $msg;
} else {
syslog('info', "VM $vmid start");
}
die "VM $vmid already running\n" if check_running($vmid);
my $migrate_uri;
my $migrate_port = 0;
@ -2359,14 +2334,8 @@ sub vm_start {
PVE::Storage::activate_volumes($storecfg, $vollist);
eval { run_command($cmd, timeout => $migrate_uri ? undef : 30); };
my $err = $@;
if ($err) {
my $msg = "start failed: $err";
syslog('err', "VM $vmid $msg");
die $msg;
}
die "start failed: $err" if $err;
if ($statefile) {
@ -2375,7 +2344,7 @@ sub vm_start {
} else {
unlink $statefile;
# fixme: send resume - is that necessary ?
eval { vm_monitor_command($vmid, "cont", 1) };
eval { vm_monitor_command($vmid, "cont"); };
}
}
@ -2385,13 +2354,13 @@ sub vm_start {
$migrate_speed = $conf->{migrate_speed} || $migrate_speed;
eval {
my $cmd = "migrate_set_speed ${migrate_speed}m";
vm_monitor_command($vmid, $cmd, 1);
vm_monitor_command($vmid, $cmd);
};
if (my $migrate_downtime =
$conf->{migrate_downtime} || $defaults->{migrate_downtime}) {
my $cmd = "migrate_set_downtime ${migrate_downtime}";
eval { vm_monitor_command($vmid, $cmd, 1); };
eval { vm_monitor_command($vmid, $cmd); };
}
vm_balloonset($vmid, $conf->{balloon}) if $conf->{balloon};
@ -2431,14 +2400,12 @@ sub __read_avail {
}
sub vm_monitor_command {
my ($vmid, $cmdstr, $nolog, $nocheck) = @_;
my ($vmid, $cmdstr, $nocheck) = @_;
my $res;
syslog("info", "VM $vmid monitor command '$cmdstr'") if !$nolog;
eval {
die "VM not running\n" if !check_running($vmid, $nocheck);
die "VM $vmid not running\n" if !check_running($vmid, $nocheck);
my $sname = monitor_socket($vmid);
@ -2525,14 +2492,42 @@ sub vm_reset {
check_lock($conf) if !$skiplock;
syslog("info", "VM $vmid sending 'reset'");
vm_monitor_command($vmid, "system_reset", 1);
vm_monitor_command($vmid, "system_reset");
});
}
sub get_vm_volumes {
my ($conf) = @_;
my $vollist = [];
foreach_drive($conf, sub {
my ($ds, $drive) = @_;
my ($sid, $volname) = PVE::Storage::parse_volume_id($drive->{file}, 1);
return if !$sid;
my $volid = $drive->{file};
return if !$volid || $volid =~ m|^/|;
push @$vollist, $volid;
});
return $vollist;
}
sub vm_stop_cleanup {
my ($storecfg, $vmid, $conf) = @_;
fairsched_rmnod($vmid); # try to destroy group
my $vollist = get_vm_volumes($conf);
PVE::Storage::deactivate_volumes($storecfg, $vollist);
}
sub vm_shutdown {
my ($vmid, $skiplock) = @_;
my ($storecfg, $vmid, $skiplock, $timeout) = @_;
$timeout = 60 if !$timeout;
lock_config($vmid, sub {
@ -2540,41 +2535,48 @@ sub vm_shutdown {
check_lock($conf) if !$skiplock;
syslog("info", "VM $vmid sending 'shutdown'");
vm_monitor_command($vmid, "system_powerdown");
vm_monitor_command($vmid, "system_powerdown", 1);
my $pid = check_running($vmid);
if ($pid && $timeout) {
print "waiting until VM $vmid stopps (PID $pid)\n";
my $count = 0;
while (($count < $timeout) && check_running($vmid)) {
$count++;
sleep 1;
}
die "shutdown failed - got timeout\n" if check_running($vmid);
}
vm_stop_cleanup($storecfg, $vmid, $conf);
});
}
# Note: use $nockeck to skip tests if VM configuration file exists.
# We need that when migration VMs to other nodes (files already moved)
sub vm_stop {
my ($vmid, $skiplock, $nocheck) = @_;
my ($storecfg, $vmid, $skiplock, $nocheck, $timeout) = @_;
$timeout = 60 if !$timeout;
lock_config($vmid, sub {
my $pid = check_running($vmid, $nocheck);
return if !$pid;
if (!$pid) {
syslog('info', "VM $vmid already stopped");
return;
}
my $conf;
if (!$nocheck) {
my $conf = load_config($vmid);
$conf = load_config($vmid);
check_lock($conf) if !$skiplock;
}
syslog("info", "VM $vmid stopping");
eval { vm_monitor_command($vmid, "quit", 1, $nocheck); };
eval { vm_monitor_command($vmid, "quit", $nocheck); };
my $err = $@;
if (!$err) {
# wait some time
my $timeout = 50; # fixme: how long?
my $count = 0;
while (($count < $timeout) && check_running($vmid, $nocheck)) {
$count++;
@ -2582,16 +2584,16 @@ sub vm_stop {
}
if ($count >= $timeout) {
syslog('info', "VM $vmid still running - terminating now with SIGTERM");
warn "VM still running - terminating now with SIGTERM\n";
kill 15, $pid;
}
} else {
syslog('info', "VM $vmid quit failed - terminating now with SIGTERM");
warn "VM quit failed - terminating now with SIGTERM\n";
kill 15, $pid;
}
# wait again
my $timeout = 10;
$timeout = 10;
my $count = 0;
while (($count < $timeout) && check_running($vmid, $nocheck)) {
@ -2600,12 +2602,13 @@ sub vm_stop {
}
if ($count >= $timeout) {
syslog('info', "VM $vmid still running - terminating now with SIGKILL\n");
warn "VM still running - terminating now with SIGKILL\n";
kill 9, $pid;
sleep 1;
}
fairsched_rmnod($vmid); # try to destroy group
});
vm_stop_cleanup($storecfg, $vmid, $conf) if $conf;
});
}
sub vm_suspend {
@ -2617,9 +2620,7 @@ sub vm_suspend {
check_lock($conf) if !$skiplock;
syslog("info", "VM $vmid suspend");
vm_monitor_command($vmid, "stop", 1);
vm_monitor_command($vmid, "stop");
});
}
@ -2632,9 +2633,7 @@ sub vm_resume {
check_lock($conf) if !$skiplock;
syslog("info", "VM $vmid resume");
vm_monitor_command($vmid, "cont", 1);
vm_monitor_command($vmid, "cont");
});
}
@ -2647,9 +2646,7 @@ sub vm_sendkey {
check_lock($conf) if !$skiplock;
syslog("info", "VM $vmid sending key $key");
vm_monitor_command($vmid, "sendkey $key", 1);
vm_monitor_command($vmid, "sendkey $key");
});
}
@ -2662,78 +2659,71 @@ sub vm_destroy {
check_lock($conf) if !$skiplock;
syslog("info", "VM $vmid destroy called (removing all data)");
eval {
if (!check_running($vmid)) {
fairsched_rmnod($vmid); # try to destroy group
destroy_vm($storecfg, $vmid);
} else {
die "VM is running\n";
}
};
my $err = $@;
if ($err) {
syslog("err", "VM $vmid destroy failed - $err");
die $err;
if (!check_running($vmid)) {
fairsched_rmnod($vmid); # try to destroy group
destroy_vm($storecfg, $vmid);
} else {
die "VM $vmid is running - destroy failed\n";
}
});
}
sub vm_stopall {
my ($timeout) = @_;
my ($storecfg, $timeout) = @_;
$timeout = 3*60 if !$timeout;
my $cleanuphash = {};
my $vzlist = vzlist();
my $count = 0;
foreach my $vmid (keys %$vzlist) {
next if !$vzlist->{$vmid}->{pid};
$count++;
$cleanuphash->{$vmid} = 1;
}
return if !$count;
my $msg = "Stopping Qemu Server - sending shutdown requests to all VMs\n";
syslog('info', $msg);
warn $msg;
foreach my $vmid (keys %$vzlist) {
next if !$vzlist->{$vmid}->{pid};
eval { vm_shutdown($storecfg, $vmid, 1); };
my $err = $@;
if ($err) {
warn $err;
} else {
delete $cleanuphash->{$vmid};
}
}
my $wt = 5;
my $maxtries = int(($timeout + $wt -1)/$wt);
my $try = 0;
while (($try < $maxtries) && $count) {
$try++;
sleep $wt;
$vzlist = vzlist();
$count = 0;
foreach my $vmid (keys %$vzlist) {
next if !$vzlist->{$vmid}->{pid};
$count++;
}
last if !$count;
}
if ($count) {
my $msg = "Stopping Qemu Server - sending shutdown requests to all VMs\n";
syslog('info', $msg);
print STDERR $msg;
foreach my $vmid (keys %$vzlist) {
next if !$vzlist->{$vmid}->{pid};
eval { vm_shutdown($vmid, 1); };
print STDERR $@ if $@;
}
my $wt = 5;
my $maxtries = int(($timeout + $wt -1)/$wt);
my $try = 0;
while (($try < $maxtries) && $count) {
$try++;
sleep $wt;
$vzlist = vzlist();
$count = 0;
foreach my $vmid (keys %$vzlist) {
next if !$vzlist->{$vmid}->{pid};
$count++;
}
last if !$count;
}
return if !$count;
foreach my $vmid (keys %$vzlist) {
next if !$vzlist->{$vmid}->{pid};
$msg = "VM $vmid still running - sending stop now\n";
syslog('info', $msg);
print $msg;
eval { vm_monitor_command($vmid, "quit", 1); };
print STDERR $@ if $@;
warn "VM $vmid still running - sending stop now\n";
eval { vm_monitor_command($vmid, "quit"); };
warn $@ if $@;
}
$timeout = 30;
@ -2742,7 +2732,7 @@ sub vm_stopall {
while (($try < $maxtries) && $count) {
$try++;
sleep $wt;
$vzlist = vzlist();
$count = 0;
foreach my $vmid (keys %$vzlist) {
@ -2752,24 +2742,34 @@ sub vm_stopall {
last if !$count;
}
return if !$count;
if ($count) {
foreach my $vmid (keys %$vzlist) {
next if !$vzlist->{$vmid}->{pid};
foreach my $vmid (keys %$vzlist) {
next if !$vzlist->{$vmid}->{pid};
$msg = "VM $vmid still running - terminating now with SIGTERM\n";
syslog('info', $msg);
print $msg;
kill 15, $vzlist->{$vmid}->{pid};
warn "VM $vmid still running - terminating now with SIGTERM\n";
kill 15, $vzlist->{$vmid}->{pid};
}
sleep 1;
}
# this is called by system shotdown scripts, so remaining
# processes gets killed anyways (no need to send kill -9 here)
$msg = "Qemu Server stopped\n";
syslog('info', $msg);
print STDERR $msg;
}
$vzlist = vzlist();
foreach my $vmid (keys %$cleanuphash) {
next if $vzlist->{$vmid}->{pid};
eval {
my $conf = load_config($vmid);
vm_stop_cleanup($storecfg, $vmid, $conf);
};
warn $@ if $@;
}
$msg = "Qemu Server stopped\n";
syslog('info', $msg);
print $msg;
}
# pci helpers
@ -2892,7 +2892,7 @@ sub print_pci_addr {
sub vm_balloonset {
my ($vmid, $value) = @_;
vm_monitor_command($vmid, "balloon $value", 1);
vm_monitor_command($vmid, "balloon $value");
}
# vzdump restore implementaion

View File

@ -1,3 +1,13 @@
qemu-server (2.0-7) unstable; urgency=low
* activate/deactivate LVs more carefully
* avoid syslog whenever possible
* code cleanups
-- Proxmox Support Team <support@proxmox.com> Fri, 25 Nov 2011 08:08:04 +0100
qemu-server (2.0-6) unstable; urgency=low
* set correct migrate speed

4
qm
View File

@ -276,7 +276,9 @@ __PACKAGE__->register_method ({
my $timeout = $param->{timeout};
PVE::QemuServer::vm_stopall($timeout);
my $storecfg = PVE::Storage::config();
PVE::QemuServer::vm_stopall($storecfg, $timeout);
return undef;
}});