diff --git a/PVE/API2/Qemu.pm b/PVE/API2/Qemu.pm index a0fcd28a..0787074d 100644 --- a/PVE/API2/Qemu.pm +++ b/PVE/API2/Qemu.pm @@ -512,7 +512,7 @@ __PACKAGE__->register_method({ }}); __PACKAGE__->register_method ({ - subclass => "PVE::API2::Firewall::VM", + subclass => "PVE::API2::Firewall::VM", path => '{vmid}/firewall', }); @@ -1004,7 +1004,7 @@ my $update_vm_api = sub { } elsif($opt eq 'tablet' && $param->{$opt} == 0){ PVE::QemuServer::vm_deviceunplug($vmid, $conf, $opt); } - + if($opt eq 'cores' && $conf->{maxcpus}){ PVE::QemuServer::qemu_cpu_hotplug($vmid, $conf, $param->{$opt}); } @@ -1429,7 +1429,7 @@ __PACKAGE__->register_method({ # check is done by verifying the VNC ticket (inside VNC protocol). my $port = $param->{port}; - + return { port => $port }; }}); @@ -1468,12 +1468,12 @@ __PACKAGE__->register_method({ my $port = PVE::QemuServer::spice_port($vmid); - my ($ticket, undef, $remote_viewer_config) = + my ($ticket, undef, $remote_viewer_config) = PVE::AccessControl::remote_viewer_config($authuser, $vmid, $node, $proxy, $title, $port); - + PVE::QemuServer::vm_mon_cmd($vmid, "set_password", protocol => 'spice', password => $ticket); PVE::QemuServer::vm_mon_cmd($vmid, "expire_password", protocol => 'spice', time => "+30"); - + return $remote_viewer_config; }}); @@ -2284,7 +2284,7 @@ __PACKAGE__->register_method({ UUID::generate($uuid); UUID::unparse($uuid, $uuid_str); my $smbios1 = PVE::QemuServer::parse_smbios1($newconf->{smbios1} || ''); - $smbios1->{uuid} = $uuid_str; + $smbios1->{uuid} = $uuid_str; $newconf->{smbios1} = PVE::QemuServer::print_smbios1($smbios1); delete $newconf->{template}; @@ -2484,9 +2484,9 @@ __PACKAGE__->register_method({ PVE::QemuServer::update_config_nolock($vmid, $conf, 1); - eval { + eval { # try to deactivate volumes - avoid lvm LVs to be active on several nodes - PVE::Storage::deactivate_volumes($storecfg, [ $newdrive->{file} ]) + PVE::Storage::deactivate_volumes($storecfg, [ $newdrive->{file} ]) if !$running; }; warn $@ if $@; diff --git a/PVE/QemuServer.pm b/PVE/QemuServer.pm index a4fa2784..3044b7f3 100644 --- a/PVE/QemuServer.pm +++ b/PVE/QemuServer.pm @@ -2591,7 +2591,7 @@ sub config_to_command { my $total_cores = $sockets * $cores; my $allowed_cores = $cpuinfo->{cpus}; - die "MAX $allowed_cores Cores allowed per VM on this Node" + die "MAX $allowed_cores cores allowed per VM on this node\n" if ($allowed_cores < $total_cores); if ($maxcpus) { @@ -4935,9 +4935,9 @@ sub snapshot_create { eval { vm_mon_cmd($vmid, "savevm-end") if $running; }; warn $@ if $@; - #savevm-end is async, we need to wait - if($running) { - for(;;) { + # savevm-end is async, we need to wait + if ($running) { + for (;;) { my $stat = vm_mon_cmd_nocheck($vmid, "query-savevm"); if (!$stat->{bytes}) { last; @@ -5190,7 +5190,7 @@ sub qemu_drive_mirror { #fixme : sometime drive-mirror timeout, but works fine after. # (I have see the problem with big volume > 200GB), so we need to eval - eval { vm_mon_cmd($vmid, "drive-mirror", %$opts); }; + eval { vm_mon_cmd($vmid, "drive-mirror", %$opts); }; # ignore errors here eval { @@ -5207,12 +5207,12 @@ sub qemu_drive_mirror { my $busy = $stat->{busy}; print "transferred: $transferred bytes remaining: $remaining bytes total: $total bytes progression: $percent % busy: $busy\n"; - + if ($stat->{len} == $stat->{offset}) { if ($busy eq 'false') { last if $vmiddst != $vmid; - + # try to switch the disk if source and destination are on the same guest eval { vm_mon_cmd($vmid, "block-job-complete", device => "drive-$drive") }; last if !$@; @@ -5247,7 +5247,7 @@ sub qemu_drive_mirror { }; if ($err) { - eval { &$cancel_job(); }; + eval { &$cancel_job(); }; die "mirroring error: $err"; }