forked from proxmox-mirrors/proxmox-backup
Compare commits
341 Commits
961c81bdeb
...
c18ad8de2e
Author | SHA1 | Date | |
---|---|---|---|
![]() |
c18ad8de2e | ||
![]() |
9aca936503 | ||
![]() |
8f4e455550 | ||
![]() |
ac5bcc36a1 | ||
![]() |
b1fa35ddf4 | ||
![]() |
6ca2162074 | ||
![]() |
8acb09630f | ||
![]() |
6dbe44cfd6 | ||
![]() |
8174ef5a4b | ||
![]() |
f7457dbd1f | ||
![]() |
db1a99d75e | ||
![]() |
795b3f8ee1 | ||
![]() |
665e729617 | ||
![]() |
4e1676a432 | ||
![]() |
fa9ccbe549 | ||
![]() |
a207cb37bd | ||
![]() |
f6c1bb8e6c | ||
![]() |
08d469eebc | ||
![]() |
7616c0a2ed | ||
![]() |
d2011ce6b6 | ||
![]() |
d8634d30f4 | ||
![]() |
d20296194d | ||
![]() |
dd490f30d1 | ||
![]() |
864edfb777 | ||
![]() |
948dd68daf | ||
![]() |
c763a0e9df | ||
![]() |
81b0ee0b68 | ||
![]() |
ce37fcb74b | ||
![]() |
cda8987040 | ||
![]() |
71f806aa39 | ||
![]() |
36152566f0 | ||
![]() |
a5dc26adbc | ||
![]() |
ad28d9ed75 | ||
![]() |
b8bf7e39fd | ||
![]() |
15bcf787c6 | ||
![]() |
35149f9458 | ||
![]() |
90f7c434de | ||
![]() |
f660a5ecb0 | ||
![]() |
bd37cb3378 | ||
![]() |
19dc53ac0c | ||
![]() |
6420da8a0c | ||
![]() |
6d7c593191 | ||
![]() |
fc47cf1bbb | ||
![]() |
a71bea5174 | ||
![]() |
3bbc9dedb9 | ||
![]() |
c4c0d6da8a | ||
![]() |
ab75d7ac6e | ||
![]() |
4840b515f8 | ||
![]() |
2900ef9594 | ||
![]() |
f7eb5fc86d | ||
![]() |
cfc93ebd03 | ||
![]() |
2838914c21 | ||
![]() |
96f9096931 | ||
![]() |
c10c8ffeca | ||
![]() |
78d9265a15 | ||
![]() |
3cc3c10d27 | ||
![]() |
40a287727f | ||
![]() |
7229d7129c | ||
![]() |
22cd2711eb | ||
![]() |
57ade02bfb | ||
![]() |
3a616987c2 | ||
![]() |
74f3a868dd | ||
![]() |
b2ffc83627 | ||
![]() |
9072382886 | ||
![]() |
014a049033 | ||
![]() |
f8304a3d31 | ||
![]() |
0adeafa17b | ||
![]() |
4bda068654 | ||
![]() |
299276be19 | ||
![]() |
0120e1ac21 | ||
![]() |
8c29e18b8e | ||
![]() |
982ef637a1 | ||
![]() |
fbdbda907b | ||
![]() |
c8cd77865b | ||
![]() |
f0a9b12078 | ||
![]() |
cd5b188d71 | ||
![]() |
6a880e8a44 | ||
![]() |
57b47366f7 | ||
![]() |
6ff078a5a0 | ||
![]() |
b9a2fa4994 | ||
![]() |
5ea28683bb | ||
![]() |
64031f24af | ||
![]() |
adf21cddd3 | ||
![]() |
e3ca69adb0 | ||
![]() |
4124b6a8be | ||
![]() |
daf5d46c7c | ||
![]() |
a97b237828 | ||
![]() |
352a206578 | ||
![]() |
9d66f486a4 | ||
![]() |
c9bd69a158 | ||
![]() |
b84aad3660 | ||
![]() |
62b932a874 | ||
![]() |
098ab91bd9 | ||
![]() |
41e1cbd2b8 | ||
![]() |
d07ccde395 | ||
![]() |
e8a1971647 | ||
![]() |
690c6441da | ||
![]() |
aeb4ff4992 | ||
![]() |
096505eaf7 | ||
![]() |
04c6015676 | ||
![]() |
f176a0774d | ||
![]() |
ead4ec5d7e | ||
![]() |
ac11a77580 | ||
![]() |
a740264063 | ||
![]() |
00ac201db7 | ||
![]() |
7a5f194f00 | ||
![]() |
cd23b5372f | ||
![]() |
b19d2c393f | ||
![]() |
57d82fde86 | ||
![]() |
8382d66513 | ||
![]() |
ae3994e003 | ||
![]() |
f645974503 | ||
![]() |
b289d294c8 | ||
![]() |
00290872a3 | ||
![]() |
0471464f4e | ||
![]() |
82963b7d4b | ||
![]() |
025afdb9fe | ||
![]() |
20053ec216 | ||
![]() |
243e6a5784 | ||
![]() |
9526aee10a | ||
![]() |
99129bbbd1 | ||
![]() |
4cba26673d | ||
![]() |
fa89533da5 | ||
![]() |
11b1bd5bc0 | ||
![]() |
74d3d6e9da | ||
![]() |
82a986ee67 | ||
![]() |
a85a83dd4e | ||
![]() |
8eb1cd4a95 | ||
![]() |
c6c58a0fc9 | ||
![]() |
16a10e64dc | ||
![]() |
43143681a1 | ||
![]() |
caa1f134f1 | ||
![]() |
433fc1b73b | ||
![]() |
039808bd3d | ||
![]() |
609d0bf289 | ||
![]() |
4db56a0478 | ||
![]() |
d5e4a2bb71 | ||
![]() |
d9df824402 | ||
![]() |
02a204cab8 | ||
![]() |
3117a8e790 | ||
![]() |
e245786bfb | ||
![]() |
c2173520d2 | ||
![]() |
f4b78e35d9 | ||
![]() |
bfa0893b34 | ||
![]() |
c48b8a6c8f | ||
![]() |
ceb6690cdd | ||
![]() |
63ece39a17 | ||
![]() |
d1e9fe0772 | ||
![]() |
f2a4b46379 | ||
![]() |
2f30b8404a | ||
![]() |
00c29f9cbe | ||
![]() |
f3570edaa0 | ||
![]() |
18b21955fa | ||
![]() |
8b637b2c66 | ||
![]() |
417daae371 | ||
![]() |
927460bbe0 | ||
![]() |
2a7012f96b | ||
![]() |
4e3f57529c | ||
![]() |
e536da9f80 | ||
![]() |
dbcdc76197 | ||
![]() |
7d8a095536 | ||
![]() |
7d60cd3beb | ||
![]() |
7869cb3921 | ||
![]() |
dc22dd7990 | ||
![]() |
f26018c74a | ||
![]() |
d2f13d096f | ||
![]() |
3befecbea7 | ||
![]() |
ec54686bc0 | ||
![]() |
ada8bc55a7 | ||
![]() |
695db8bb67 | ||
![]() |
4f9491a2d9 | ||
![]() |
3e44935af7 | ||
![]() |
37f1949335 | ||
![]() |
5285a859dc | ||
![]() |
ee99748fa6 | ||
![]() |
7149ecdacd | ||
![]() |
04eb5010e6 | ||
![]() |
2745f731e2 | ||
![]() |
ace285861e | ||
![]() |
a93775e37e | ||
![]() |
7cf68f3f2f | ||
![]() |
652902056d | ||
![]() |
3c2e866b44 | ||
![]() |
dc324716a6 | ||
![]() |
115942267d | ||
![]() |
58fb448be5 | ||
![]() |
07a21616c2 | ||
![]() |
cb9814e331 | ||
![]() |
31dbaf69ab | ||
![]() |
af5ff86a26 | ||
![]() |
5fc281cd89 | ||
![]() |
6c6257b94e | ||
![]() |
c644f7bc85 | ||
![]() |
4a022e1a3f | ||
![]() |
9247d57fdf | ||
![]() |
427c687e35 | ||
![]() |
f9532a3a84 | ||
![]() |
d400673641 | ||
![]() |
cdc710a736 | ||
![]() |
36ef1b01f7 | ||
![]() |
f91d5912f1 | ||
![]() |
c08c934c02 | ||
![]() |
9dfd0657eb | ||
![]() |
d39f1a4b57 | ||
![]() |
83e7b9de88 | ||
![]() |
601a84ae74 | ||
![]() |
152dc37057 | ||
![]() |
e98e962904 | ||
![]() |
f117dabcf0 | ||
![]() |
6d193b9a1e | ||
![]() |
d25ec96c21 | ||
![]() |
839b7d8c89 | ||
![]() |
f7f61002ee | ||
![]() |
266becd156 | ||
![]() |
37a85cf616 | ||
![]() |
8a056670ea | ||
![]() |
a7a28c4d95 | ||
![]() |
254169f622 | ||
![]() |
33024ffd43 | ||
![]() |
dfc0278248 | ||
![]() |
8e50c75fca | ||
![]() |
98abd76579 | ||
![]() |
bd95fd5797 | ||
![]() |
bccff939fa | ||
![]() |
a3815aff82 | ||
![]() |
d1fd12d82d | ||
![]() |
5e778d983a | ||
![]() |
4c0583b14e | ||
![]() |
dc914094c9 | ||
![]() |
6c774660a7 | ||
![]() |
6df6d3094c | ||
![]() |
f1a711c830 | ||
![]() |
3f1e103904 | ||
![]() |
f9270de9ef | ||
![]() |
40ccd1ac9e | ||
![]() |
ab5b64fadf | ||
![]() |
713fa6ee55 | ||
![]() |
f41a233a8e | ||
![]() |
6f9c16d5d4 | ||
![]() |
d93d7a8299 | ||
![]() |
17f183c40b | ||
![]() |
d977da6411 | ||
![]() |
960149b51e | ||
![]() |
074d957169 | ||
![]() |
8529e79983 | ||
![]() |
5b0c6a80e5 | ||
![]() |
029654a61d | ||
![]() |
a738d2bcc9 | ||
![]() |
234de23a50 | ||
![]() |
bf708e8cd7 | ||
![]() |
3ba907c888 | ||
![]() |
b5ba40095d | ||
![]() |
daa9d0a9d5 | ||
![]() |
c6a87e340c | ||
![]() |
bb8e7e2b48 | ||
![]() |
b18eab64a9 | ||
![]() |
8f6874391f | ||
![]() |
b48427720a | ||
![]() |
2084fd39c4 | ||
![]() |
d4a2730b1b | ||
![]() |
b0cd9e84f5 | ||
![]() |
912c8c4027 | ||
![]() |
263651912e | ||
![]() |
4b26fb2bd7 | ||
![]() |
70e1ad0efb | ||
![]() |
d49a27ede8 | ||
![]() |
f09f2e0d9e | ||
![]() |
d728c2e836 | ||
![]() |
7fbe029ceb | ||
![]() |
907ba4dd61 | ||
![]() |
7e15e6039b | ||
![]() |
03143eee0a | ||
![]() |
74361da855 | ||
![]() |
c9bd214555 | ||
![]() |
0b016e1efe | ||
![]() |
8d9dc69945 | ||
![]() |
3fdf8769f4 | ||
![]() |
320ea1cdb7 | ||
![]() |
13b15bce11 | ||
![]() |
ed8205e535 | ||
![]() |
32b5716fa4 | ||
![]() |
d1c96f69ee | ||
![]() |
8210a32613 | ||
![]() |
f2115b04c1 | ||
![]() |
1599b424cd | ||
![]() |
1b9e3cfd18 | ||
![]() |
940d34b42a | ||
![]() |
33d2444eca | ||
![]() |
7a3cbd7230 | ||
![]() |
b60912c65d | ||
![]() |
23be00a42c | ||
![]() |
04e50855b3 | ||
![]() |
52e5d52cbd | ||
![]() |
27dd73777f | ||
![]() |
e2c1866b13 | ||
![]() |
27ba2c0318 | ||
![]() |
b510184e72 | ||
![]() |
79e9eddf4b | ||
![]() |
24a6d4fd82 | ||
![]() |
b693f5d471 | ||
![]() |
3362a6e049 | ||
![]() |
7c45cf8c7a | ||
![]() |
d99c481596 | ||
![]() |
f74978572b | ||
![]() |
bb408fd151 | ||
![]() |
54763b39c7 | ||
![]() |
f1dd1e3557 | ||
![]() |
f314078a8d | ||
![]() |
7085d270d4 | ||
![]() |
6565199af4 | ||
![]() |
168ed37026 | ||
![]() |
2c9f3a63d5 | ||
![]() |
eba172a492 | ||
![]() |
cec8c75cd0 | ||
![]() |
ddf0489abb | ||
![]() |
22285d0d01 | ||
![]() |
f37ce33164 | ||
![]() |
2c89b88226 | ||
![]() |
cdc2b341b6 | ||
![]() |
5117a21ec9 | ||
![]() |
883e14ebcb | ||
![]() |
858744bf3c | ||
![]() |
582ba899b6 | ||
![]() |
f098814876 | ||
![]() |
62ff4f2472 | ||
![]() |
7cae3e44f2 | ||
![]() |
9d4d1216e3 | ||
![]() |
d8881be658 | ||
![]() |
c7a29011fa | ||
![]() |
abad8e25c4 | ||
![]() |
6c2b039ef4 | ||
![]() |
64cfb13193 | ||
![]() |
d986714201 | ||
![]() |
1b5436ccdd | ||
![]() |
c6600acf0b | ||
![]() |
c9cd520a1a | ||
![]() |
e0e644f119 | ||
![]() |
5863e5ff5d | ||
![]() |
46d4ceef77 | ||
![]() |
afd22455da | ||
![]() |
5ba351bac7 |
107
Cargo.toml
107
Cargo.toml
@ -1,5 +1,5 @@
|
||||
[workspace.package]
|
||||
version = "3.3.2"
|
||||
version = "4.0.6"
|
||||
authors = [
|
||||
"Dietmar Maurer <dietmar@proxmox.com>",
|
||||
"Dominik Csapak <d.csapak@proxmox.com>",
|
||||
@ -13,7 +13,7 @@ authors = [
|
||||
edition = "2021"
|
||||
license = "AGPL-3"
|
||||
repository = "https://git.proxmox.com/?p=proxmox-backup.git"
|
||||
rust-version = "1.80"
|
||||
rust-version = "1.81"
|
||||
|
||||
[package]
|
||||
name = "proxmox-backup"
|
||||
@ -53,49 +53,51 @@ path = "src/lib.rs"
|
||||
|
||||
[workspace.dependencies]
|
||||
# proxmox workspace
|
||||
proxmox-apt = { version = "0.11", features = [ "cache" ] }
|
||||
proxmox-apt-api-types = "1.0.1"
|
||||
proxmox-async = "0.4"
|
||||
proxmox-auth-api = "0.4"
|
||||
proxmox-apt = { version = "0.99", features = [ "cache" ] }
|
||||
proxmox-apt-api-types = "2"
|
||||
proxmox-async = "0.5"
|
||||
proxmox-auth-api = "1.0.2"
|
||||
proxmox-base64 = "1"
|
||||
proxmox-borrow = "1"
|
||||
proxmox-compression = "0.2"
|
||||
proxmox-config-digest = "0.1.0"
|
||||
proxmox-daemon = "0.1.0"
|
||||
proxmox-fuse = "0.1.3"
|
||||
proxmox-http = { version = "0.9.0", features = [ "client", "http-helpers", "websocket" ] } # see below
|
||||
proxmox-human-byte = "0.1"
|
||||
proxmox-compression = "1"
|
||||
proxmox-config-digest = "1"
|
||||
proxmox-daemon = "1"
|
||||
proxmox-fuse = "1"
|
||||
proxmox-http = { version = "1", features = [ "client", "http-helpers", "websocket" ] } # see below
|
||||
proxmox-human-byte = "1"
|
||||
proxmox-io = "1.0.1" # tools and client use "tokio" feature
|
||||
proxmox-lang = "1.1"
|
||||
proxmox-log = "0.2.6"
|
||||
proxmox-ldap = "0.2.1"
|
||||
proxmox-metrics = "0.3.1"
|
||||
proxmox-notify = "0.5.1"
|
||||
proxmox-openid = "0.10.0"
|
||||
proxmox-rest-server = { version = "0.8.5", features = [ "templates" ] }
|
||||
proxmox-log = "1"
|
||||
proxmox-ldap = "1"
|
||||
proxmox-metrics = "1"
|
||||
proxmox-notify = "1"
|
||||
proxmox-openid = "1"
|
||||
proxmox-rest-server = { version = "1.0.1", features = [ "templates" ] }
|
||||
# some use "cli", some use "cli" and "server", pbs-config uses nothing
|
||||
proxmox-router = { version = "3.0.0", default-features = false }
|
||||
proxmox-rrd = "0.4"
|
||||
proxmox-router = { version = "3.2.2", default-features = false }
|
||||
proxmox-rrd = "1"
|
||||
proxmox-rrd-api-types = "1.0.2"
|
||||
proxmox-s3-client = { version = "1.0.5", features = [ "impl" ] }
|
||||
# everything but pbs-config and pbs-client use "api-macro"
|
||||
proxmox-schema = "4"
|
||||
proxmox-section-config = "2"
|
||||
proxmox-serde = "0.1.1"
|
||||
proxmox-shared-cache = "0.1"
|
||||
proxmox-shared-memory = "0.3.0"
|
||||
proxmox-sortable-macro = "0.1.2"
|
||||
proxmox-subscription = { version = "0.5.0", features = [ "api-types" ] }
|
||||
proxmox-sys = "0.6.5"
|
||||
proxmox-systemd = "0.1"
|
||||
proxmox-tfa = { version = "5", features = [ "api", "api-types" ] }
|
||||
proxmox-section-config = "3"
|
||||
proxmox-serde = "1"
|
||||
proxmox-shared-cache = "1"
|
||||
proxmox-shared-memory = "1"
|
||||
proxmox-sortable-macro = "1"
|
||||
proxmox-subscription = { version = "1", features = [ "api-types" ] }
|
||||
proxmox-sys = "1"
|
||||
proxmox-systemd = "1"
|
||||
proxmox-tfa = { version = "6", features = [ "api", "api-types" ] }
|
||||
proxmox-time = "2"
|
||||
proxmox-uuid = "1"
|
||||
proxmox-worker-task = "0.1"
|
||||
pbs-api-types = "0.2.0"
|
||||
proxmox-uuid = { version = "1", features = [ "serde" ] }
|
||||
proxmox-worker-task = "1"
|
||||
pbs-api-types = "1.0.2"
|
||||
|
||||
# other proxmox crates
|
||||
pathpatterns = "0.3"
|
||||
proxmox-acme = "0.5.3"
|
||||
pxar = "0.12.1"
|
||||
pathpatterns = "1"
|
||||
proxmox-acme = "1"
|
||||
pxar = "1"
|
||||
|
||||
# PBS workspace
|
||||
pbs-buildcfg = { path = "pbs-buildcfg" }
|
||||
@ -112,25 +114,27 @@ pbs-tools = { path = "pbs-tools" }
|
||||
anyhow = "1.0"
|
||||
async-trait = "0.1.56"
|
||||
apt-pkg-native = "0.3.2"
|
||||
base64 = "0.13"
|
||||
bitflags = "2.4"
|
||||
bytes = "1.0"
|
||||
cidr = "0.2.1"
|
||||
cidr = "0.3"
|
||||
crc32fast = "1"
|
||||
const_format = "0.2"
|
||||
crossbeam-channel = "0.5"
|
||||
endian_trait = { version = "0.6", features = ["arrays"] }
|
||||
env_logger = "0.10"
|
||||
env_logger = "0.11"
|
||||
flate2 = "1.0"
|
||||
foreign-types = "0.3"
|
||||
futures = "0.3"
|
||||
h2 = { version = "0.4", features = [ "stream" ] }
|
||||
handlebars = "3.0"
|
||||
hex = "0.4.3"
|
||||
hyper = { version = "0.14", features = [ "full" ] }
|
||||
hickory-resolver = { version = "0.24.1", default-features = false, features = [ "system-config", "tokio-runtime" ] }
|
||||
http-body-util = "0.1"
|
||||
hyper-util = "0.1"
|
||||
hyper = { version = "1", features = [ "full" ] }
|
||||
libc = "0.2"
|
||||
log = "0.4.17"
|
||||
nix = "0.26.1"
|
||||
nix = "0.29"
|
||||
nom = "7"
|
||||
num-traits = "0.2"
|
||||
once_cell = "1.3.1"
|
||||
@ -138,32 +142,30 @@ openssl = "0.10.40"
|
||||
percent-encoding = "2.1"
|
||||
pin-project-lite = "0.2"
|
||||
regex = "1.5.5"
|
||||
rustyline = "9"
|
||||
rustyline = "14"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
serde_plain = "1"
|
||||
siphasher = "0.3"
|
||||
syslog = "6"
|
||||
tar = "0.4"
|
||||
termcolor = "1.1.2"
|
||||
thiserror = "1.0"
|
||||
thiserror = "2"
|
||||
tokio = "1.6"
|
||||
tokio-openssl = "0.6.1"
|
||||
tokio-stream = "0.1.0"
|
||||
tokio-util = { version = "0.7", features = [ "io" ] }
|
||||
tracing = "0.1"
|
||||
tower-service = "0.3.0"
|
||||
udev = "0.4"
|
||||
udev = "0.9"
|
||||
url = "2.1"
|
||||
walkdir = "2"
|
||||
xdg = "2.2"
|
||||
zstd = { version = "0.12", features = [ "bindgen" ] }
|
||||
zstd-safe = "6.0"
|
||||
zstd = "0.13"
|
||||
zstd-safe = "7"
|
||||
|
||||
[dependencies]
|
||||
anyhow.workspace = true
|
||||
async-trait.workspace = true
|
||||
base64.workspace = true
|
||||
bytes.workspace = true
|
||||
cidr.workspace = true
|
||||
const_format.workspace = true
|
||||
@ -173,7 +175,9 @@ endian_trait.workspace = true
|
||||
futures.workspace = true
|
||||
h2.workspace = true
|
||||
hex.workspace = true
|
||||
http-body-util.workspace = true
|
||||
hyper.workspace = true
|
||||
hyper-util = { workspace = true, features = ["server", "server-auto", "server-graceful"] }
|
||||
libc.workspace = true
|
||||
log.workspace = true
|
||||
nix.workspace = true
|
||||
@ -206,10 +210,11 @@ proxmox-apt.workspace = true
|
||||
proxmox-apt-api-types.workspace = true
|
||||
proxmox-async.workspace = true
|
||||
proxmox-auth-api = { workspace = true, features = [ "api", "pam-authenticator" ] }
|
||||
proxmox-base64.workspace = true
|
||||
proxmox-compression.workspace = true
|
||||
proxmox-config-digest.workspace = true
|
||||
proxmox-daemon.workspace = true
|
||||
proxmox-http = { workspace = true, features = [ "client-trait", "proxmox-async", "rate-limited-stream" ] } # pbs-client doesn't use these
|
||||
proxmox-http = { workspace = true, features = [ "body", "client-trait", "proxmox-async", "rate-limited-stream" ] } # pbs-client doesn't use these
|
||||
proxmox-human-byte.workspace = true
|
||||
proxmox-io.workspace = true
|
||||
proxmox-lang.workspace = true
|
||||
@ -220,6 +225,7 @@ proxmox-notify = { workspace = true, features = [ "pbs-context" ] }
|
||||
proxmox-openid.workspace = true
|
||||
proxmox-rest-server = { workspace = true, features = [ "rate-limited-stream" ] }
|
||||
proxmox-router = { workspace = true, features = [ "cli", "server"] }
|
||||
proxmox-s3-client.workspace = true
|
||||
proxmox-schema = { workspace = true, features = [ "api-macro" ] }
|
||||
proxmox-section-config.workspace = true
|
||||
proxmox-serde = { workspace = true, features = [ "serde_json" ] }
|
||||
@ -253,11 +259,13 @@ proxmox-rrd-api-types.workspace = true
|
||||
# Local path overrides
|
||||
# NOTE: You must run `cargo update` after changing this for it to take effect!
|
||||
[patch.crates-io]
|
||||
|
||||
#pbs-api-types = { path = "../proxmox/pbs-api-types" }
|
||||
#proxmox-acme = { path = "../proxmox/proxmox-acme" }
|
||||
#proxmox-apt = { path = "../proxmox/proxmox-apt" }
|
||||
#proxmox-apt-api-types = { path = "../proxmox/proxmox-apt-api-types" }
|
||||
#proxmox-async = { path = "../proxmox/proxmox-async" }
|
||||
#proxmox-auth-api = { path = "../proxmox/proxmox-auth-api" }
|
||||
#proxmox-base64 = { path = "../proxmox/proxmox-base64" }
|
||||
#proxmox-borrow = { path = "../proxmox/proxmox-borrow" }
|
||||
#proxmox-compression = { path = "../proxmox/proxmox-compression" }
|
||||
#proxmox-config-digest = { path = "../proxmox/proxmox-config-digest" }
|
||||
@ -289,7 +297,6 @@ proxmox-rrd-api-types.workspace = true
|
||||
#proxmox-uuid = { path = "../proxmox/proxmox-uuid" }
|
||||
#proxmox-worker-task = { path = "../proxmox/proxmox-worker-task" }
|
||||
|
||||
#proxmox-acme = { path = "../proxmox/proxmox-acme" }
|
||||
#pathpatterns = {path = "../pathpatterns" }
|
||||
#pxar = { path = "../pxar" }
|
||||
|
||||
|
53
Makefile
53
Makefile
@ -1,8 +1,10 @@
|
||||
include /usr/share/dpkg/default.mk
|
||||
include /usr/share/rustc/architecture.mk
|
||||
include defines.mk
|
||||
|
||||
PACKAGE := proxmox-backup
|
||||
ARCH := $(DEB_BUILD_ARCH)
|
||||
export DEB_HOST_RUST_TYPE
|
||||
|
||||
SUBDIRS := etc www docs templates
|
||||
|
||||
@ -36,13 +38,24 @@ SUBCRATES != cargo metadata --no-deps --format-version=1 \
|
||||
| grep "$$PWD/" \
|
||||
| sed -e "s!.*$$PWD/!!g" -e 's/\#.*$$//g' -e 's/)$$//g'
|
||||
|
||||
# sync with debian/rules!
|
||||
STATIC_TARGET_DIR := target/static-build
|
||||
ifeq ($(BUILD_MODE), release)
|
||||
CARGO_BUILD_ARGS += --release
|
||||
CARGO_BUILD_ARGS += --release --target $(DEB_HOST_RUST_TYPE)
|
||||
CARGO_STATIC_CONFIG ?= --config debian/cargo_home/config.static.toml
|
||||
CARGO_STATIC_BUILD_ARGS += $(CARGO_STATIC_CONFIG) --release --target $(DEB_HOST_RUST_TYPE) --target-dir $(STATIC_TARGET_DIR)
|
||||
COMPILEDIR := target/$(DEB_HOST_RUST_TYPE)/release
|
||||
STATIC_COMPILEDIR := $(STATIC_TARGET_DIR)/$(DEB_HOST_RUST_TYPE)/release
|
||||
else
|
||||
CARGO_BUILD_ARGS += --target $(DEB_HOST_RUST_TYPE)
|
||||
CARGO_STATIC_BUILD_ARGS += --target $(DEB_HOST_RUST_TYPE) --target-dir $(STATIC_TARGET_DIR)
|
||||
COMPILEDIR := target/$(DEB_HOST_RUST_TYPE)/debug
|
||||
STATIC_COMPILEDIR := $(STATIC_TARGET_DIR)/$(DEB_HOST_RUST_TYPE)/debug
|
||||
endif
|
||||
|
||||
STATIC_RUSTC_FLAGS := -C target-feature=+crt-static -L $(STATIC_COMPILEDIR)/deps-stubs/
|
||||
# end sync with debian/rules
|
||||
|
||||
ifeq ($(valgrind), yes)
|
||||
CARGO_BUILD_ARGS += --features valgrind
|
||||
endif
|
||||
@ -52,6 +65,9 @@ CARGO ?= cargo
|
||||
COMPILED_BINS := \
|
||||
$(addprefix $(COMPILEDIR)/,$(USR_BIN) $(USR_SBIN) $(SERVICE_BIN) $(RESTORE_BIN))
|
||||
|
||||
STATIC_BINS := \
|
||||
$(addprefix $(STATIC_COMPILEDIR)/,proxmox-backup-client-static pxar-static)
|
||||
|
||||
export DEB_VERSION DEB_VERSION_UPSTREAM
|
||||
|
||||
SERVER_DEB=$(PACKAGE)-server_$(DEB_VERSION)_$(ARCH).deb
|
||||
@ -60,10 +76,12 @@ CLIENT_DEB=$(PACKAGE)-client_$(DEB_VERSION)_$(ARCH).deb
|
||||
CLIENT_DBG_DEB=$(PACKAGE)-client-dbgsym_$(DEB_VERSION)_$(ARCH).deb
|
||||
RESTORE_DEB=proxmox-backup-file-restore_$(DEB_VERSION)_$(ARCH).deb
|
||||
RESTORE_DBG_DEB=proxmox-backup-file-restore-dbgsym_$(DEB_VERSION)_$(ARCH).deb
|
||||
STATIC_CLIENT_DEB=$(PACKAGE)-client-static_$(DEB_VERSION)_$(ARCH).deb
|
||||
STATIC_CLIENT_DBG_DEB=$(PACKAGE)-client-static-dbgsym_$(DEB_VERSION)_$(ARCH).deb
|
||||
DOC_DEB=$(PACKAGE)-docs_$(DEB_VERSION)_all.deb
|
||||
|
||||
DEBS=$(SERVER_DEB) $(SERVER_DBG_DEB) $(CLIENT_DEB) $(CLIENT_DBG_DEB) \
|
||||
$(RESTORE_DEB) $(RESTORE_DBG_DEB)
|
||||
$(RESTORE_DEB) $(RESTORE_DBG_DEB) $(STATIC_CLIENT_DEB) $(STATIC_CLIENT_DBG_DEB)
|
||||
|
||||
DSC = rust-$(PACKAGE)_$(DEB_VERSION).dsc
|
||||
|
||||
@ -71,7 +89,7 @@ DESTDIR=
|
||||
|
||||
tests ?= --workspace
|
||||
|
||||
all: $(SUBDIRS)
|
||||
all: proxmox-backup-client-static $(SUBDIRS)
|
||||
|
||||
.PHONY: $(SUBDIRS)
|
||||
$(SUBDIRS):
|
||||
@ -141,7 +159,6 @@ clean: clean-deb
|
||||
$(foreach i,$(SUBDIRS), \
|
||||
$(MAKE) -C $(i) clean ;)
|
||||
$(CARGO) clean
|
||||
rm -f .do-cargo-build
|
||||
|
||||
# allows one to avoid running cargo clean when one just wants to tidy up after a package build
|
||||
clean-deb:
|
||||
@ -157,11 +174,9 @@ docs: $(COMPILEDIR)/dump-catalog-shell-cli $(COMPILEDIR)/docgen
|
||||
|
||||
.PHONY: cargo-build
|
||||
cargo-build:
|
||||
rm -f .do-cargo-build
|
||||
$(MAKE) $(COMPILED_BINS)
|
||||
|
||||
$(COMPILED_BINS) $(COMPILEDIR)/dump-catalog-shell-cli $(COMPILEDIR)/docgen: .do-cargo-build
|
||||
.do-cargo-build:
|
||||
$(COMPILED_BINS) $(COMPILEDIR)/dump-catalog-shell-cli $(COMPILEDIR)/docgen &:
|
||||
$(CARGO) build $(CARGO_BUILD_ARGS) \
|
||||
--package proxmox-backup-banner \
|
||||
--bin proxmox-backup-banner \
|
||||
@ -180,7 +195,7 @@ $(COMPILED_BINS) $(COMPILEDIR)/dump-catalog-shell-cli $(COMPILEDIR)/docgen: .do-
|
||||
--bin proxmox-restore-daemon \
|
||||
--package proxmox-backup \
|
||||
--bin docgen \
|
||||
--bin pbs2to3 \
|
||||
--bin pbs3to4 \
|
||||
--bin proxmox-backup-api \
|
||||
--bin proxmox-backup-manager \
|
||||
--bin proxmox-backup-proxy \
|
||||
@ -190,12 +205,25 @@ $(COMPILED_BINS) $(COMPILEDIR)/dump-catalog-shell-cli $(COMPILEDIR)/docgen: .do-
|
||||
--bin sg-tape-cmd
|
||||
touch "$@"
|
||||
|
||||
.PHONY: proxmox-backup-client-static
|
||||
proxmox-backup-client-static:
|
||||
$(MAKE) $(STATIC_BINS)
|
||||
|
||||
$(STATIC_BINS) &:
|
||||
mkdir -p $(STATIC_COMPILEDIR)/deps-stubs/ && \
|
||||
echo '!<arch>' > $(STATIC_COMPILEDIR)/deps-stubs/libsystemd.a # workaround for to greedy linkage and proxmox-systemd
|
||||
OPENSSL_STATIC=1 \
|
||||
RUSTFLAGS="$(STATIC_RUSTC_FLAGS)" \
|
||||
$(CARGO) build $(CARGO_STATIC_BUILD_ARGS) --package pxar-bin --bin pxar
|
||||
OPENSSL_STATIC=1 \
|
||||
RUSTFLAGS="$(STATIC_RUSTC_FLAGS)" \
|
||||
$(CARGO) build $(CARGO_STATIC_BUILD_ARGS) --package proxmox-backup-client --bin proxmox-backup-client
|
||||
|
||||
.PHONY: lint
|
||||
lint:
|
||||
cargo clippy -- -A clippy::all -D clippy::correctness
|
||||
|
||||
install: $(COMPILED_BINS)
|
||||
install: $(COMPILED_BINS) $(STATIC_BINS)
|
||||
install -dm755 $(DESTDIR)$(BINDIR)
|
||||
install -dm755 $(DESTDIR)$(ZSH_COMPL_DEST)
|
||||
$(foreach i,$(USR_BIN), \
|
||||
@ -205,7 +233,7 @@ install: $(COMPILED_BINS)
|
||||
$(foreach i,$(USR_SBIN), \
|
||||
install -m755 $(COMPILEDIR)/$(i) $(DESTDIR)$(SBINDIR)/ ; \
|
||||
install -m644 zsh-completions/_$(i) $(DESTDIR)$(ZSH_COMPL_DEST)/ ;)
|
||||
install -m755 $(COMPILEDIR)/pbs2to3 $(DESTDIR)$(SBINDIR)/
|
||||
install -m755 $(COMPILEDIR)/pbs3to4 $(DESTDIR)$(SBINDIR)/
|
||||
install -dm755 $(DESTDIR)$(LIBEXECDIR)/proxmox-backup
|
||||
install -dm755 $(DESTDIR)$(LIBEXECDIR)/proxmox-backup/file-restore
|
||||
$(foreach i,$(RESTORE_BIN), \
|
||||
@ -214,16 +242,19 @@ install: $(COMPILED_BINS)
|
||||
install -m4755 -o root -g root $(COMPILEDIR)/sg-tape-cmd $(DESTDIR)$(LIBEXECDIR)/proxmox-backup/sg-tape-cmd
|
||||
$(foreach i,$(SERVICE_BIN), \
|
||||
install -m755 $(COMPILEDIR)/$(i) $(DESTDIR)$(LIBEXECDIR)/proxmox-backup/ ;)
|
||||
install -m755 $(STATIC_COMPILEDIR)/proxmox-backup-client $(DESTDIR)$(BINDIR)/proxmox-backup-client-static
|
||||
install -m755 $(STATIC_COMPILEDIR)/pxar $(DESTDIR)$(BINDIR)/pxar-static
|
||||
$(MAKE) -C www install
|
||||
$(MAKE) -C docs install
|
||||
$(MAKE) -C templates install
|
||||
|
||||
.PHONY: upload
|
||||
upload: UPLOAD_DIST ?= $(DEB_DISTRIBUTION)
|
||||
upload: $(SERVER_DEB) $(CLIENT_DEB) $(RESTORE_DEB) $(DOC_DEB)
|
||||
upload: $(SERVER_DEB) $(CLIENT_DEB) $(RESTORE_DEB) $(DOC_DEB) $(STATIC_CLIENT_DEB)
|
||||
# check if working directory is clean
|
||||
git diff --exit-code --stat && git diff --exit-code --stat --staged
|
||||
tar cf - $(SERVER_DEB) $(SERVER_DBG_DEB) $(DOC_DEB) $(CLIENT_DEB) $(CLIENT_DBG_DEB) \
|
||||
| ssh -X repoman@repo.proxmox.com upload --product pbs --dist $(UPLOAD_DIST)
|
||||
tar cf - $(CLIENT_DEB) $(CLIENT_DBG_DEB) | ssh -X repoman@repo.proxmox.com upload --product "pve,pmg,pbs-client" --dist $(UPLOAD_DIST)
|
||||
tar cf - $(STATIC_CLIENT_DEB) $(STATIC_CLIENT_DBG_DEB) | ssh -X repoman@repo.proxmox.com upload --product "pbs-client" --dist $(UPLOAD_DIST)
|
||||
tar cf - $(RESTORE_DEB) $(RESTORE_DBG_DEB) | ssh -X repoman@repo.proxmox.com upload --product "pve" --dist $(UPLOAD_DIST)
|
||||
|
402
debian/changelog
vendored
402
debian/changelog
vendored
@ -1,3 +1,405 @@
|
||||
rust-proxmox-backup (4.0.6-2) trixie; urgency=medium
|
||||
|
||||
* d/postinst: fix setting up pbs-test repo on fresh installation during the
|
||||
beta.
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Thu, 24 Jul 2025 15:31:20 +0200
|
||||
|
||||
rust-proxmox-backup (4.0.6-1) trixie; urgency=medium
|
||||
|
||||
* ui: add beta text with link to bugtracker.
|
||||
|
||||
* d/postinst: drop migration steps from PBS 2.x times, there is no
|
||||
single-step upgrade path from PBS 2 to PBS 4, one will always need to
|
||||
upgrade to PBS 3 inbetween.
|
||||
|
||||
* d/postinst: add pbs-test repo on fresh installation during the beta.
|
||||
|
||||
* api: datastore: fix cache store creation when reusing s3 backend.
|
||||
|
||||
* client: benchmark: fix no-cache flag backwards comaptibility.
|
||||
|
||||
* api: admin s3: switch request method from GET to PUT for s3 check.
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Thu, 24 Jul 2025 13:12:29 +0200
|
||||
|
||||
rust-proxmox-backup (4.0.5-1) trixie; urgency=medium
|
||||
|
||||
* s3 docs: add warning of potential costs when using s3 backends.
|
||||
|
||||
* s3 docs: add object store provider specific configuration examples.
|
||||
|
||||
* client: backup writer: make no-cache parameter backwards compatible to
|
||||
avoid confusing older Proxmox Backup Server needlessly if it's not
|
||||
configured.
|
||||
|
||||
* notifications docs: adapt to latest notification system UI changes.
|
||||
|
||||
* api: access: add opt-in HTTP-only cookie based ticket authentication flow.
|
||||
|
||||
* ui: opt into the new HTTP-only ticket authentication flow.
|
||||
|
||||
* client: adapt pbs client to also handle HTTP-only authentication flows
|
||||
correctly.
|
||||
|
||||
* tape: forbid operations on a s3 datastore for now.
|
||||
|
||||
* avoid leaking internal s3client struct name to the outside, favor using
|
||||
's3-endpoint' term where sensible and easy to adapt.
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Wed, 23 Jul 2025 21:53:47 +0200
|
||||
|
||||
rust-proxmox-backup (4.0.4-1) trixie; urgency=medium
|
||||
|
||||
* Add new S3 backend for datastores. This allows you to save all data on any
|
||||
S3 compatible object storage. A local cache for core index files is used
|
||||
to reduce access to slow(er) and potentially costly S3 endpoints. See the
|
||||
documentation for more details.
|
||||
|
||||
* ui: do not show consent banner twice for OIDC login.
|
||||
|
||||
* api: node system services: postfix is again a non-templated systemd unit.
|
||||
|
||||
* tape: ignore error on clearing the encryption mode when the changer forces
|
||||
encryption. This is a legitimate mode of operation, e.g., due to policy
|
||||
reasons where the hardware must be in control of the encryption mode.
|
||||
|
||||
* removable datastore: allow one to configure sync jobs to trigger when the
|
||||
underlying datastore gets mounted.
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Tue, 22 Jul 2025 22:05:59 +0200
|
||||
|
||||
rust-proxmox-backup (4.0.3-1) trixie; urgency=medium
|
||||
|
||||
* docs: update repository chapter to reflect new deb822 format.
|
||||
|
||||
* docs: update apt key installation guide to use the new release key rings.
|
||||
|
||||
* pxar extract: fix regression within extraction of hardlinks that caused a
|
||||
"failed to extract hardlink: EINVAL: Invalid argument" error.
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Sat, 19 Jul 2025 20:03:38 +0200
|
||||
|
||||
rust-proxmox-backup (4.0.2-1) trixie; urgency=medium
|
||||
|
||||
* ui: utils: add missing task description override for 'create-datastore'.
|
||||
|
||||
* bin: add pbs3to4 upgrade check-list script.
|
||||
|
||||
* fix #6188: smtp: quote sender's display name if needed due to special
|
||||
characters being used.
|
||||
|
||||
* proxmox-file-restore: allocate at least 256MB of RAM for the restore VM
|
||||
due to increased resource demand of the restire-image build on Trixie.
|
||||
|
||||
* docs: rephrase and extend rate limiting description for sync jobs.
|
||||
|
||||
* manager cli: move legacy update-to-prune-jobs command to new
|
||||
migrate-config sub-command.
|
||||
|
||||
* manager cli: add 'migrate-config default-notification-mode' command.
|
||||
|
||||
* ui: datastore options view: switch to new notification-mode default of
|
||||
using the modern notification-system.
|
||||
|
||||
* ui: tape backup job: move notification settings to a separate tab.
|
||||
|
||||
* ui: one-shot tape backup: use same wording as tape-backup jobs.
|
||||
|
||||
* ui: one-shot tape backup: use same wording as tape-backup jobs.
|
||||
|
||||
* ui: datastore options: drop notify and notify-user rows.
|
||||
|
||||
* ui: datastore options: notification: use radio controls to select mode.
|
||||
|
||||
* d/postinst: migrate notification mode default on update.
|
||||
|
||||
* garbage collection: track chunk cache hit and miss stats and show in task
|
||||
log.
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Wed, 16 Jul 2025 01:28:11 +0200
|
||||
|
||||
rust-proxmox-backup (4.0.1-2) trixie; urgency=medium
|
||||
|
||||
* proxmox-backup-server package: update SCSI library to libsgutils2-1.48
|
||||
from Debian Trixie.
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Tue, 08 Jul 2025 20:02:55 +0200
|
||||
|
||||
rust-proxmox-backup (4.0.1-1) trixie; urgency=medium
|
||||
|
||||
* proxmox-backup-client: fix static build.
|
||||
|
||||
* datastore: ignore missing owner file when removing group directory.
|
||||
|
||||
* update proxmox-apt to fix target paths for repository sources files when
|
||||
using the modern deb822 based format.
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Tue, 08 Jul 2025 19:28:26 +0200
|
||||
|
||||
rust-proxmox-backup (4.0.0-1) trixie; urgency=medium
|
||||
|
||||
* re-build for Debian 13 Trixie based Proxmox Backup Server 4 release.
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Mon, 16 Jun 2025 14:10:24 +0200
|
||||
|
||||
rust-proxmox-backup (3.4.2-1) bookworm; urgency=medium
|
||||
|
||||
* datastore: various small perf optimizations
|
||||
|
||||
* restore-daemon: adapt to zpool output changes in 2.3
|
||||
|
||||
* fix #6358: remove group note file if present on group destroy
|
||||
|
||||
* docs: tuning: list default and maximum values for `gc-cache-capacity`
|
||||
|
||||
* backup info: avoid additional stat syscall for protected check
|
||||
|
||||
* tools: lru cache: document limitations for cache capacity
|
||||
|
||||
* garbage collection: bypass cache if gc-cache-capacity is 0
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Wed, 04 Jun 2025 14:42:05 +0200
|
||||
|
||||
rust-proxmox-backup (3.4.1-1) bookworm; urgency=medium
|
||||
|
||||
* ui: token view: fix typo in 'lose' and rephrase token regenerate dialog
|
||||
message for more clarity.
|
||||
|
||||
* restrict consent-banner text length to 64 KiB.
|
||||
|
||||
* docs: describe the intend for the statically linked pbs client.
|
||||
|
||||
* api: backup: include previous snapshot name in log message.
|
||||
|
||||
* garbage collection: account for created/deleted index files concurrently
|
||||
to GC to avoid potentially confusing log messages.
|
||||
|
||||
* garbage collection: fix rare race in chunk marking phase for setups doing
|
||||
high frequent backups in quick succession while immediately pruning to a
|
||||
single backup snapshot being left over after each such backup.
|
||||
|
||||
* tape: wait for calibration of LTO-9 tapes in general, not just in the
|
||||
initial tape format procedure.
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Wed, 16 Apr 2025 14:45:37 +0200
|
||||
|
||||
rust-proxmox-backup (3.4.0-1) bookworm; urgency=medium
|
||||
|
||||
* fix #4788: build statically linked version of the proxmox-backup-client
|
||||
package.
|
||||
|
||||
* ui: sync job: change the rate limit direction based on sync direction.
|
||||
|
||||
* docs: mention how to set the push sync jobs rate limit
|
||||
|
||||
* ui: set error mask: ensure that message is html-encoded to avoid visual
|
||||
glitches.
|
||||
|
||||
* api server: increase maximal request body size fro 64 kiB to 512 kiB,
|
||||
similar to a recent change for our perl based projects.
|
||||
|
||||
* notifications: include Content-Length header for broader compatibility in
|
||||
the webhook and gotify targets.
|
||||
|
||||
* notifications: allow overriding notification templates.
|
||||
|
||||
* docs: notifications: add section about how to use custom templates
|
||||
|
||||
* sync: print whole error chain per group on failure for more context.
|
||||
|
||||
* ui: options-view: fix typo in empty-text for GC tuning option.
|
||||
|
||||
* memory info: use the "MemAvailable" field from '/proc/meminfo' to compute
|
||||
used memory to fix overestimation of that metric and to better align with
|
||||
what modern versions of tools like `free` do and to future proof against
|
||||
changes in how the kernel accounts memory usage for.
|
||||
|
||||
* add "MemAvailable" field to ProcFsMemInfo to promote its usage over the
|
||||
existing "MemFree" field, which is almost never the right choice. This new
|
||||
field will be provided for external metric server.
|
||||
|
||||
* docs: mention different name resolution for statically linked binary.
|
||||
|
||||
* docs: add basic info for how to install the statically linked client.
|
||||
|
||||
* docs: mention new verify-only and encrypted-only flags for sync jobs.
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Wed, 09 Apr 2025 17:41:38 +0200
|
||||
|
||||
rust-proxmox-backup (3.3.7-1) bookworm; urgency=medium
|
||||
|
||||
* fix #5982: garbage collection: add a check to ensure that the underlying
|
||||
file system supports and honors file access time (atime) updates.
|
||||
The check is performed once on datastore creation and on start of every
|
||||
garbage collection (GC) task, just to be sure. It can be disabled in the
|
||||
datastore tuning options.
|
||||
|
||||
* garbage collection: support setting a custom access time cutoff,
|
||||
overriding the default of one day and five minutes.
|
||||
|
||||
* ui: expose flag for GC access time support safety check and the access
|
||||
time cutoff override in datastore tuning options.
|
||||
|
||||
* docs: describe rationale for new GC access time update check setting and
|
||||
the access time cutoff check in tuning options.
|
||||
|
||||
* access control: add support to mark a specific authentication realm as
|
||||
default selected realm for the login user interface.
|
||||
|
||||
* fix #4382: api: access control: remove permissions of token on deletion.
|
||||
|
||||
* fix #3887: api: access control: allow users to regenerate the secret of an
|
||||
API token without changing any existing ACLs.
|
||||
|
||||
* fix #6072: sync jobs: support flags to limit sync to only encrypted and/or
|
||||
verified snapshots.
|
||||
|
||||
* ui: datastore tuning options: expose overriding GC cache capacity so that
|
||||
admins can either restrict the peak memory usage during GC or allow GC to
|
||||
use more memory to reduce file system IO even for huge (multiple TiB)
|
||||
referenced data in backup groups.
|
||||
|
||||
* ui: datastore tuning options: increase width and rework labels to provide
|
||||
a tiny bit more context about what these options are.
|
||||
|
||||
* ui: sync job: increase edit window width to 720px to make it less cramped.
|
||||
|
||||
* ui: sync job: small field label casing consistency fixes.
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Sat, 05 Apr 2025 17:54:31 +0200
|
||||
|
||||
rust-proxmox-backup (3.3.6-1) bookworm; urgency=medium
|
||||
|
||||
* datastore: ignore group locking errors when removing snapshots, they
|
||||
normally happen only due to old-locking, and the underlying snapshot is
|
||||
deleted in any case at this point, so it's no help to confuse the user.
|
||||
|
||||
* api: datastore: add error message on failed removal due to old locking and
|
||||
tell any admin what they can do to switch to the new locking.
|
||||
|
||||
* ui: only add delete parameter on token edit, not when creating tokens.
|
||||
|
||||
* pbs-client: allow reading fingerprint from system credential.
|
||||
|
||||
* docs: client: add section about system credentials integration.
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Thu, 03 Apr 2025 17:57:02 +0200
|
||||
|
||||
rust-proxmox-backup (3.3.5-1) bookworm; urgency=medium
|
||||
|
||||
* api: config: use guard for unmounting on failed datastore creation
|
||||
|
||||
* client: align description for backup specification to docs, using
|
||||
`archive-name` and `type` over `label` and `ext`.
|
||||
|
||||
* client: read credentials from CREDENTIALS_DIRECTORY environment variable
|
||||
following the "System and Service Credentials" specification. This allows
|
||||
users to use native systemd capabilities for credential management if the
|
||||
proxmox-backup-client is used in systemd units or, e.g., through a wrapper
|
||||
like systemd-run.
|
||||
|
||||
* fix #3935: datastore/api/backup: move datastore locking to '/run' to avoid
|
||||
that lock-files can block deleting backup groups or snapshots on the
|
||||
datastore and to decouple locking from the underlying datastore
|
||||
file-system.
|
||||
|
||||
* api: fix race when changing the owner of a backup-group.
|
||||
|
||||
* fix #3336: datastore: remove group if the last snapshot is removed to
|
||||
avoid confusing situations where the group directory still exists and
|
||||
blocks re-creating a group with another owner even though the empty group
|
||||
was not visible in the web UI.
|
||||
|
||||
* notifications: clean-up and add dedicated types for all templates as to
|
||||
allow declaring that interface stable in preparation for allowing
|
||||
overriding them in the future (not included in this release).
|
||||
|
||||
* tape: introduce a tape backup job worker-thread option for restores.
|
||||
Depending on the underlying storage using more threads can dramatically
|
||||
improve the restore speed. Especially fast storage with low penalty for
|
||||
random access, like flash-storage (SSDs) can profit from using more
|
||||
worker threads. But on file systems backed by spinning disks (HDDs) the
|
||||
performance can even degrade with more threads. This is why for now the
|
||||
default is left at a single thread and the admin needs to tune this for
|
||||
their storage.
|
||||
|
||||
* garbage collection: generate index file list via datastore iterators in a
|
||||
structured manner.
|
||||
|
||||
* fix #5331: garbage collection: avoid multiple chunk atime updates by
|
||||
keeping track of the recently marked chunks in phase 1 of garbage to avoid
|
||||
multiple atime updates via relatively expensive utimensat (touch) calls.
|
||||
Use a LRU cache with size 32 MiB for tracking already processed chunks,
|
||||
this fully covers backup groups referencing up to 4 TiB of actual chunks
|
||||
and even bigger ones can still benefit from the cache. On some real-world
|
||||
benchmarks of a datastore with 1.5 million chunks, and original data
|
||||
usage of 120 TiB and a referenced data usage of 2.7 TiB (high
|
||||
deduplication count due to long-term history) we measured 21.1 times less
|
||||
file updates (31.6 million) and a 6.1 times reduction in total GC runtime
|
||||
(155.4 s to 22.8 s) on a ZFS RAID 10 system consisting of spinning HDDs
|
||||
and a special device mirror backed by datacenter SSDs.
|
||||
|
||||
* logging helper: use new builder initializer – not functional change
|
||||
intended.
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Wed, 02 Apr 2025 19:42:38 +0200
|
||||
|
||||
rust-proxmox-backup (3.3.4-1) bookworm; urgency=medium
|
||||
|
||||
* fix #6185: client/docs: explicitly mention archive name restrictions
|
||||
|
||||
* docs: using-the-installer: adapt to raised root password length requirement
|
||||
|
||||
* disks: wipe: replace dd with write_all_at for zeroing disk
|
||||
|
||||
* fix #5946: disks: wipe: ensure GPT header backup is wiped
|
||||
|
||||
* docs: fix hash collision probability comparison
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Thu, 13 Mar 2025 13:04:05 +0100
|
||||
|
||||
rust-proxmox-backup (3.3.3-1) bookworm; urgency=medium
|
||||
|
||||
* api: datastore list: move checking if a datastore is mounted after we
|
||||
ensured that the user may actually access it. While this had no effect
|
||||
security wise, it could significantly increase the cost of this API
|
||||
endpoint in big setups with many datastores and many tenants that each
|
||||
have only access to one, or a small set, of datastores.
|
||||
|
||||
* Revert "fix #5710: api: backup: stat known chunks on backup finish" due to
|
||||
a big performance impact relative to what this is protectign against. We
|
||||
will work out a more efficient fix for this issue in the future.
|
||||
|
||||
* prune simulator: show backup entries that are kept also in the flat list
|
||||
of backups, not just in the calendar view.
|
||||
|
||||
* docs: improve the description for the garbage collection's cut-off time
|
||||
|
||||
* pxar extract: correctly honor the overwrite flag
|
||||
|
||||
* api: datastore: add missing log context for prune to avoid a case where
|
||||
the worker state being unknown after it finished.
|
||||
|
||||
* docs: add synopsis and basic docs for prune job configuration
|
||||
|
||||
* backup verification: handle manifest update errors as non-fatal to avoid
|
||||
that the job fails, as we want to continue with verificating the rest to
|
||||
ensure we uncover as much potential problems as possible.
|
||||
|
||||
* fix #4408: docs: add 'disaster recovery' section for tapes
|
||||
|
||||
* fix #6069: prune simulator: correctly handle schedules that mix both, a
|
||||
range and a step size at once.
|
||||
|
||||
* client: pxar: fix a race condition where the backup upload stream can miss
|
||||
an error from the create archive function, because the error state is only
|
||||
set after the backup stream was already polled. This avoids a edge case
|
||||
where a file-based backup was incorrectly marked as having succeeded while
|
||||
there was a error.
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Tue, 11 Feb 2025 20:24:27 +0100
|
||||
|
||||
rust-proxmox-backup (3.3.2-2) bookworm; urgency=medium
|
||||
|
||||
* file-restore: fix regression with the new blockdev method used to pass
|
||||
|
162
debian/control
vendored
162
debian/control
vendored
@ -16,109 +16,124 @@ Build-Depends: bash-completion,
|
||||
libfuse3-dev,
|
||||
librust-anyhow-1+default-dev,
|
||||
librust-async-trait-0.1+default-dev (>= 0.1.56-~~),
|
||||
librust-base64-0.13+default-dev,
|
||||
librust-bitflags-2+default-dev (>= 2.4-~~),
|
||||
librust-bytes-1+default-dev,
|
||||
librust-cidr-0.2+default-dev (>= 0.2.1-~~),
|
||||
librust-cidr-0.3+default-dev,
|
||||
librust-const-format-0.2+default-dev,
|
||||
librust-crc32fast-1+default-dev,
|
||||
librust-crossbeam-channel-0.5+default-dev,
|
||||
librust-endian-trait-0.6+arrays-dev,
|
||||
librust-endian-trait-0.6+default-dev,
|
||||
librust-env-logger-0.10+default-dev,
|
||||
librust-env-logger-0.11+default-dev,
|
||||
librust-foreign-types-0.3+default-dev,
|
||||
librust-futures-0.3+default-dev,
|
||||
librust-h2-0.4+default-dev,
|
||||
librust-h2-0.4+stream-dev,
|
||||
librust-hex-0.4+default-dev (>= 0.4.3-~~),
|
||||
librust-hex-0.4+serde-dev (>= 0.4.3-~~),
|
||||
librust-http-0.2+default-dev,
|
||||
librust-hyper-0.14+default-dev,
|
||||
librust-hyper-0.14+full-dev,
|
||||
librust-hickory-resolver-0.24+system-config-dev (>= 0.24.1-~~),
|
||||
librust-hickory-resolver-0.24+tokio-runtime-dev (>= 0.24.1-~~),
|
||||
librust-http-body-util-0.1+default-dev,
|
||||
librust-hyper-1+default-dev,
|
||||
librust-hyper-1+full-dev,
|
||||
librust-hyper-util-0.1+client-dev,
|
||||
librust-hyper-util-0.1+client-legacy-dev,
|
||||
librust-hyper-util-0.1+default-dev,
|
||||
librust-hyper-util-0.1+http1-dev,
|
||||
librust-hyper-util-0.1+http2-dev,
|
||||
librust-hyper-util-0.1+server-auto-dev,
|
||||
librust-hyper-util-0.1+server-dev,
|
||||
librust-hyper-util-0.1+server-graceful-dev,
|
||||
librust-hyper-util-0.1+service-dev,
|
||||
librust-hyper-util-0.1+tokio-dev,
|
||||
librust-libc-0.2+default-dev,
|
||||
librust-log-0.4+default-dev (>= 0.4.17-~~),
|
||||
librust-nix-0.26+default-dev (>= 0.26.1-~~),
|
||||
librust-nix-0.29+default-dev,
|
||||
librust-nix-0.29+feature-dev,
|
||||
librust-nix-0.29+mount-dev,
|
||||
librust-nix-0.29+reboot-dev,
|
||||
librust-nom-7+default-dev,
|
||||
librust-num-traits-0.2+default-dev,
|
||||
librust-once-cell-1+default-dev (>= 1.3.1-~~),
|
||||
librust-openssl-0.10+default-dev (>= 0.10.40-~~),
|
||||
librust-pathpatterns-0.3+default-dev,
|
||||
librust-pbs-api-types-0.2+default-dev,
|
||||
librust-pathpatterns-1+default-dev,
|
||||
librust-pbs-api-types-1+default-dev (>= 1.0.2~~),
|
||||
librust-percent-encoding-2+default-dev (>= 2.1-~~),
|
||||
librust-pin-project-lite-0.2+default-dev,
|
||||
librust-proxmox-acme-0.5+default-dev (>= 0.5.3-~~),
|
||||
librust-proxmox-apt-0.11+cache-dev,
|
||||
librust-proxmox-apt-0.11+default-dev,
|
||||
librust-proxmox-apt-api-types-1+default-dev (>= 1.0.1-~~),
|
||||
librust-proxmox-async-0.4+default-dev,
|
||||
librust-proxmox-auth-api-0.4+api-dev,
|
||||
librust-proxmox-auth-api-0.4+api-types-dev,
|
||||
librust-proxmox-auth-api-0.4+default-dev,
|
||||
librust-proxmox-auth-api-0.4+pam-authenticator-dev,
|
||||
librust-proxmox-acme-1+default-dev,
|
||||
librust-proxmox-apt-0.99+cache-dev,
|
||||
librust-proxmox-apt-0.99+default-dev,
|
||||
librust-proxmox-apt-api-types-2+default-dev,
|
||||
librust-proxmox-async-0.5+default-dev,
|
||||
librust-proxmox-auth-api-1+api-dev (>= 1.0.2),
|
||||
librust-proxmox-auth-api-1+default-dev,
|
||||
librust-proxmox-auth-api-1+pam-authenticator-dev,
|
||||
librust-proxmox-base64-1+default-dev,
|
||||
librust-proxmox-borrow-1+default-dev,
|
||||
librust-proxmox-compression-0.2+default-dev,
|
||||
librust-proxmox-config-digest-0.1+default-dev,
|
||||
librust-proxmox-daemon-0.1+default-dev,
|
||||
librust-proxmox-fuse-0.1+default-dev (>= 0.1.3-~~),
|
||||
librust-proxmox-http-0.9+client-dev,
|
||||
librust-proxmox-http-0.9+client-trait-dev,
|
||||
librust-proxmox-http-0.9+default-dev,
|
||||
librust-proxmox-http-0.9+http-helpers-dev,
|
||||
librust-proxmox-http-0.9+proxmox-async-dev,
|
||||
librust-proxmox-http-0.9+rate-limited-stream-dev,
|
||||
librust-proxmox-http-0.9+rate-limiter-dev,
|
||||
librust-proxmox-http-0.9+websocket-dev,
|
||||
librust-proxmox-human-byte-0.1+default-dev,
|
||||
librust-proxmox-compression-1+default-dev,
|
||||
librust-proxmox-config-digest-1+default-dev,
|
||||
librust-proxmox-daemon-1+default-dev,
|
||||
librust-proxmox-fuse-1+default-dev,
|
||||
librust-proxmox-http-1+body-dev,
|
||||
librust-proxmox-http-1+client-dev,
|
||||
librust-proxmox-http-1+client-trait-dev,
|
||||
librust-proxmox-http-1+default-dev,
|
||||
librust-proxmox-http-1+http-helpers-dev,
|
||||
librust-proxmox-http-1+proxmox-async-dev,
|
||||
librust-proxmox-http-1+rate-limited-stream-dev,
|
||||
librust-proxmox-http-1+rate-limiter-dev,
|
||||
librust-proxmox-http-1+websocket-dev,
|
||||
librust-proxmox-human-byte-1+default-dev,
|
||||
librust-proxmox-io-1+default-dev (>= 1.0.1-~~),
|
||||
librust-proxmox-io-1+tokio-dev (>= 1.0.1-~~),
|
||||
librust-proxmox-lang-1+default-dev (>= 1.1-~~),
|
||||
librust-proxmox-ldap-0.2+default-dev (>= 0.2.1-~~),
|
||||
librust-proxmox-log-0.2+default-dev (>= 0.2.6-~~),
|
||||
librust-proxmox-metrics-0.3+default-dev (>= 0.3.1-~~),
|
||||
librust-proxmox-notify-0.5+default-dev (>= 0.5.1-~~),
|
||||
librust-proxmox-notify-0.5+pbs-context-dev (>= 0.5.1-~~),
|
||||
librust-proxmox-openid-0.10+default-dev,
|
||||
librust-proxmox-rest-server-0.8+default-dev (>= 0.8.5-~~),
|
||||
librust-proxmox-rest-server-0.8+rate-limited-stream-dev (>= 0.8.5-~~),
|
||||
librust-proxmox-rest-server-0.8+templates-dev (>= 0.8.5-~~),
|
||||
librust-proxmox-router-3+cli-dev,
|
||||
librust-proxmox-ldap-1+default-dev,
|
||||
librust-proxmox-log-1+default-dev,
|
||||
librust-proxmox-metrics-1+default-dev,
|
||||
librust-proxmox-notify-1+default-dev,
|
||||
librust-proxmox-notify-1+pbs-context-dev,
|
||||
librust-proxmox-openid-1+default-dev,
|
||||
librust-proxmox-rest-server-1+default-dev (>= 1.0.1),
|
||||
librust-proxmox-rest-server-1+rate-limited-stream-dev,
|
||||
librust-proxmox-rest-server-1+templates-dev,
|
||||
librust-proxmox-router-3+cli-dev (>= 3.2.2-~),
|
||||
librust-proxmox-router-3+server-dev,
|
||||
librust-proxmox-rrd-0.4+default-dev,
|
||||
librust-proxmox-rrd-1+default-dev,
|
||||
librust-proxmox-rrd-api-types-1+default-dev (>= 1.0.2-~~),
|
||||
librust-proxmox-s3-client-1-dev (>= 1.0.5),
|
||||
librust-proxmox-schema-4+api-macro-dev,
|
||||
librust-proxmox-schema-4+default-dev,
|
||||
librust-proxmox-section-config-2+default-dev,
|
||||
librust-proxmox-serde-0.1+default-dev (>= 0.1.1-~~),
|
||||
librust-proxmox-serde-0.1+serde-json-dev (>= 0.1.1-~~),
|
||||
librust-proxmox-shared-cache-0.1+default-dev,
|
||||
librust-proxmox-shared-memory-0.3+default-dev,
|
||||
librust-proxmox-sortable-macro-0.1+default-dev (>= 0.1.2-~~),
|
||||
librust-proxmox-subscription-0.5+api-types-dev,
|
||||
librust-proxmox-subscription-0.5+default-dev,
|
||||
librust-proxmox-sys-0.6+acl-dev (>= 0.6.5-~~),
|
||||
librust-proxmox-sys-0.6+crypt-dev (>= 0.6.5-~~),
|
||||
librust-proxmox-sys-0.6+default-dev (>= 0.6.5-~~),
|
||||
librust-proxmox-sys-0.6+logrotate-dev (>= 0.6.5-~~),
|
||||
librust-proxmox-sys-0.6+timer-dev (>= 0.6.5-~~),
|
||||
librust-proxmox-systemd-0.1+default-dev,
|
||||
librust-proxmox-tfa-5+api-dev,
|
||||
librust-proxmox-tfa-5+api-types-dev,
|
||||
librust-proxmox-tfa-5+default-dev,
|
||||
librust-proxmox-section-config-3+default-dev,
|
||||
librust-proxmox-serde-1+default-dev,
|
||||
librust-proxmox-serde-1+serde-json-dev,
|
||||
librust-proxmox-shared-cache-1+default-dev,
|
||||
librust-proxmox-shared-memory-1+default-dev,
|
||||
librust-proxmox-sortable-macro-1+default-dev,
|
||||
librust-proxmox-subscription-1+api-types-dev,
|
||||
librust-proxmox-subscription-1+default-dev,
|
||||
librust-proxmox-sys-1+acl-dev,
|
||||
librust-proxmox-sys-1+crypt-dev,
|
||||
librust-proxmox-sys-1+default-dev,
|
||||
librust-proxmox-sys-1+logrotate-dev,
|
||||
librust-proxmox-sys-1+timer-dev,
|
||||
librust-proxmox-systemd-1+default-dev,
|
||||
librust-proxmox-tfa-6+api-dev,
|
||||
librust-proxmox-tfa-6+api-types-dev,
|
||||
librust-proxmox-tfa-6+default-dev,
|
||||
librust-proxmox-time-2+default-dev,
|
||||
librust-proxmox-uuid-1+default-dev,
|
||||
librust-proxmox-uuid-1+serde-dev,
|
||||
librust-proxmox-worker-task-0.1+default-dev,
|
||||
librust-pxar-0.12+default-dev (>= 0.12.1-~~),
|
||||
librust-proxmox-worker-task-1+default-dev,
|
||||
librust-pxar-1+default-dev,
|
||||
librust-regex-1+default-dev (>= 1.5.5-~~),
|
||||
librust-rustyline-9+default-dev,
|
||||
librust-rustyline-14+default-dev,
|
||||
librust-serde-1+default-dev,
|
||||
librust-serde-1+derive-dev,
|
||||
librust-serde-json-1+default-dev,
|
||||
librust-serde-plain-1+default-dev,
|
||||
librust-syslog-6+default-dev,
|
||||
librust-tar-0.4+default-dev,
|
||||
librust-termcolor-1+default-dev (>= 1.1.2-~~),
|
||||
librust-thiserror-1+default-dev,
|
||||
librust-thiserror-2+default-dev,
|
||||
librust-tokio-1+default-dev (>= 1.6-~~),
|
||||
librust-tokio-1+fs-dev (>= 1.6-~~),
|
||||
librust-tokio-1+io-std-dev (>= 1.6-~~),
|
||||
@ -139,19 +154,18 @@ Build-Depends: bash-completion,
|
||||
librust-tokio-util-0.7+io-dev,
|
||||
librust-tower-service-0.3+default-dev,
|
||||
librust-tracing-0.1+default-dev,
|
||||
librust-udev-0.4+default-dev,
|
||||
librust-udev-0.9+default-dev,
|
||||
librust-url-2+default-dev (>= 2.1-~~),
|
||||
librust-walkdir-2+default-dev,
|
||||
librust-xdg-2+default-dev (>= 2.2-~~),
|
||||
librust-zstd-0.12+bindgen-dev,
|
||||
librust-zstd-0.12+default-dev,
|
||||
librust-zstd-safe-6+default-dev,
|
||||
librust-zstd-0.13+default-dev,
|
||||
librust-zstd-safe-7+default-dev,
|
||||
libsgutils2-dev,
|
||||
libstd-rust-dev,
|
||||
libsystemd-dev (>= 246-~~),
|
||||
patchelf,
|
||||
proxmox-widget-toolkit-dev <!nodoc>,
|
||||
pve-eslint (>= 7.18.0~),
|
||||
proxmox-biome,
|
||||
python3-docutils,
|
||||
python3-pygments,
|
||||
python3-sphinx <!nodoc>,
|
||||
@ -176,7 +190,7 @@ Depends: fonts-font-awesome,
|
||||
libjs-extjs (>= 7~),
|
||||
libjs-qrcodejs (>= 1.20201119),
|
||||
libproxmox-acme-plugins,
|
||||
libsgutils2-1.46-2,
|
||||
libsgutils2-1.48,
|
||||
libzstd1 (>= 1.3.8),
|
||||
lvm2,
|
||||
openssh-server,
|
||||
@ -205,6 +219,14 @@ Description: Proxmox Backup Client tools
|
||||
This package contains the Proxmox Backup client, which provides a
|
||||
simple command line tool to create and restore backups.
|
||||
|
||||
Package: proxmox-backup-client-static
|
||||
Architecture: any
|
||||
Depends: qrencode, ${misc:Depends},
|
||||
Conflicts: proxmox-backup-client,
|
||||
Description: Proxmox Backup Client tools (statically linked)
|
||||
This package contains the Proxmox Backup client, which provides a
|
||||
simple command line tool to create and restore backups.
|
||||
|
||||
Package: proxmox-backup-docs
|
||||
Build-Profiles: <!nodoc>
|
||||
Section: doc
|
||||
|
8
debian/lintian-overrides
vendored
8
debian/lintian-overrides
vendored
@ -1,8 +1,8 @@
|
||||
proxmox-backup-server: mail-transport-agent-dependency-does-not-specify-default-mta
|
||||
proxmox-backup-server: package-installs-apt-sources [etc/apt/sources.list.d/pbs-enterprise.list]
|
||||
proxmox-backup-server: elevated-privileges 4755 root/root [usr/lib/x86_64-linux-gnu/proxmox-backup/sg-tape-cmd]
|
||||
proxmox-backup-server: systemd-service-file-refers-to-unusual-wantedby-target getty.target [lib/systemd/system/proxmox-backup-banner.service]
|
||||
proxmox-backup-server: mail-transport-agent-dependency-does-not-specify-default-mta
|
||||
proxmox-backup-server: package-installs-apt-sources [etc/apt/sources.list.d/pbs-enterprise.sources]
|
||||
proxmox-backup-server: systemd-service-file-refers-to-unusual-wantedby-target getty.target [usr/lib/systemd/system/proxmox-backup-banner.service]
|
||||
proxmox-backup-server: uses-dpkg-database-directly [usr/lib/x86_64-linux-gnu/proxmox-backup/proxmox-backup-api]
|
||||
proxmox-backup-server: uses-dpkg-database-directly [usr/lib/x86_64-linux-gnu/proxmox-backup/proxmox-backup-proxy]
|
||||
proxmox-backup-server: uses-dpkg-database-directly [usr/sbin/pbs2to3]
|
||||
proxmox-backup-server: uses-dpkg-database-directly [usr/sbin/pbs3to4]
|
||||
proxmox-backup-server: uses-dpkg-database-directly [usr/sbin/proxmox-backup-debug]
|
||||
|
76
debian/postinst
vendored
76
debian/postinst
vendored
@ -4,31 +4,23 @@ set -e
|
||||
|
||||
#DEBHELPER#
|
||||
|
||||
update_sync_job() {
|
||||
job="$1"
|
||||
|
||||
echo "Updating sync job '$job' to make old 'remove-vanished' default explicit.."
|
||||
proxmox-backup-manager sync-job update "$job" --remove-vanished true \
|
||||
|| echo "Failed, please check sync.cfg manually!"
|
||||
}
|
||||
|
||||
case "$1" in
|
||||
configure)
|
||||
# need to have user backup in the tape group
|
||||
usermod -a -G tape backup
|
||||
|
||||
# FIXME: remove after beta is over and add hunk to actively remove the repo
|
||||
BETA_SOURCES="/etc/apt/sources.list.d/pbs-test-for-beta.sources"
|
||||
if test -e /proxmox_install_mode && ! test -f "$BETA_SOURCES"; then
|
||||
echo "Adding pbs-test repo to '$BETA_SOURCES' to enable updates during Proxmox Backup Server 4.0 BETA"
|
||||
printf 'Types: deb\nURIs: http://download.proxmox.com/debian/pbs\nSuites: trixie\nComponents: pbs-test\nSigned-By: /usr/share/keyrings/proxmox-archive-keyring.gpg\n' \
|
||||
| tee "$BETA_SOURCES"
|
||||
fi
|
||||
|
||||
# modeled after dh_systemd_start output
|
||||
systemctl --system daemon-reload >/dev/null || true
|
||||
if [ -n "$2" ]; then
|
||||
if dpkg --compare-versions "$2" 'lt' '1.0.7-1'; then
|
||||
# there was an issue with reloading and systemd being confused in older daemon versions
|
||||
# so restart instead of reload if upgrading from there, see commit 0ec79339f7aebf9
|
||||
# FIXME: remove with PBS 2.1
|
||||
echo "Upgrading from older proxmox-backup-server: restart (not reload) daemons"
|
||||
_dh_action=try-restart
|
||||
else
|
||||
_dh_action=try-reload-or-restart
|
||||
fi
|
||||
_dh_action=try-reload-or-restart
|
||||
else
|
||||
_dh_action=start
|
||||
fi
|
||||
@ -41,45 +33,25 @@ case "$1" in
|
||||
flock -w 30 /var/log/proxmox-backup/tasks/active.lock sed -i 's/:termproxy::\([^@]\+\): /:termproxy::\1@pam: /' /var/log/proxmox-backup/tasks/active || true
|
||||
fi
|
||||
|
||||
if dpkg --compare-versions "$2" 'lt' '2.2.2~'; then
|
||||
echo "moving prune schedule from datacenter config to new prune job config"
|
||||
proxmox-backup-manager update-to-prune-jobs-config \
|
||||
|| echo "Failed to move prune jobs, please check manually"
|
||||
true
|
||||
fi
|
||||
|
||||
if dpkg --compare-versions "$2" 'lt' '2.3.1~' && test -e /etc/proxmox-backup/.datastore.lck; then
|
||||
lock_user="$(stat --format '%U' /etc/proxmox-backup/.datastore.lck)"
|
||||
if [ "${lock_user}" != "backup" ]; then
|
||||
echo "updating /etc/proxmox-backup/.datastore.lck from wrong user '${lock_user}' to 'backup'"
|
||||
chown backup:backup /etc/proxmox-backup/.datastore.lck \
|
||||
|| printf "Failed to fix datastore.lck user, please retry manually with:\n\n\t%s\n\n" \
|
||||
"chown backup:backup /etc/proxmox-backup/.datastore.lck"
|
||||
# TODO: remove with 4.1+, this was just exposed on internal testing.
|
||||
if dpkg --compare-versions "$2" 'lt' '4.0.5~'; then
|
||||
if [ -e /etc/proxmox-backup/s3.cfg ]; then
|
||||
sed -i 's/^s3client:/s3-endpoint:/' /etc/proxmox-backup/s3.cfg \
|
||||
|| echo "Failed to rename 's3client' config section-type to 's3-endpoint' in /etc/proxmox-backup/s3.cfg."
|
||||
fi
|
||||
fi
|
||||
|
||||
if dpkg --compare-versions "$2" 'lt' '2.1.3~' && test -e /etc/proxmox-backup/sync.cfg; then
|
||||
prev_job=""
|
||||
|
||||
# read from HERE doc because POSIX sh limitations
|
||||
while read -r key value; do
|
||||
if test "$key" = "sync:"; then
|
||||
if test -n "$prev_job"; then
|
||||
# previous job doesn't have an explicit value
|
||||
update_sync_job "$prev_job"
|
||||
fi
|
||||
prev_job=$value
|
||||
else
|
||||
prev_job=""
|
||||
fi
|
||||
done <<EOF
|
||||
$(grep -e '^sync:' -e 'remove-vanished' /etc/proxmox-backup/sync.cfg)
|
||||
EOF
|
||||
if test -n "$prev_job"; then
|
||||
# last job doesn't have an explicit value
|
||||
update_sync_job "$prev_job"
|
||||
fi
|
||||
# TODO: remove with PBS 5+
|
||||
if dpkg --compare-versions "$2" 'lt' '4.0.2~'; then
|
||||
proxmox-backup-manager migrate-config default-notification-mode \
|
||||
|| echo "Failed migrate tape-job/datastore notification mode, please check manually"
|
||||
fi
|
||||
|
||||
if dpkg --compare-versions "$2" 'lt' '3.3.5~'; then
|
||||
# ensure old locking is used by the daemon until a reboot happened
|
||||
touch "/run/proxmox-backup/old-locking"
|
||||
fi
|
||||
|
||||
fi
|
||||
;;
|
||||
|
||||
|
2
debian/proxmox-backup-client-static.bash-completion
vendored
Normal file
2
debian/proxmox-backup-client-static.bash-completion
vendored
Normal file
@ -0,0 +1,2 @@
|
||||
debian/proxmox-backup-client.bc proxmox-backup-client
|
||||
debian/pxar.bc pxar
|
4
debian/proxmox-backup-client-static.install
vendored
Normal file
4
debian/proxmox-backup-client-static.install
vendored
Normal file
@ -0,0 +1,4 @@
|
||||
usr/share/man/man1/proxmox-backup-client.1
|
||||
usr/share/man/man1/pxar.1
|
||||
usr/share/zsh/vendor-completions/_proxmox-backup-client
|
||||
usr/share/zsh/vendor-completions/_pxar
|
2
debian/proxmox-backup-client-static.lintian-overrides
vendored
Normal file
2
debian/proxmox-backup-client-static.lintian-overrides
vendored
Normal file
@ -0,0 +1,2 @@
|
||||
proxmox-backup-client-static: embedded-library zlib [usr/bin/proxmox-backup-client]
|
||||
proxmox-backup-client-static: embedded-library zlib [usr/bin/pxar]
|
21
debian/proxmox-backup-server.install
vendored
21
debian/proxmox-backup-server.install
vendored
@ -1,10 +1,10 @@
|
||||
etc/pbs-enterprise.list /etc/apt/sources.list.d/
|
||||
etc/proxmox-backup-banner.service /lib/systemd/system/
|
||||
etc/proxmox-backup-daily-update.service /lib/systemd/system/
|
||||
etc/proxmox-backup-daily-update.timer /lib/systemd/system/
|
||||
etc/proxmox-backup-proxy.service /lib/systemd/system/
|
||||
etc/proxmox-backup.service /lib/systemd/system/
|
||||
etc/removable-device-attach@.service /lib/systemd/system/
|
||||
etc/pbs-enterprise.sources /etc/apt/sources.list.d/
|
||||
etc/proxmox-backup-banner.service /usr/lib/systemd/system/
|
||||
etc/proxmox-backup-daily-update.service /usr/lib/systemd/system/
|
||||
etc/proxmox-backup-daily-update.timer /usr/lib/systemd/system/
|
||||
etc/proxmox-backup-proxy.service /usr/lib/systemd/system/
|
||||
etc/proxmox-backup.service /usr/lib/systemd/system/
|
||||
etc/removable-device-attach@.service /usr/lib/systemd/system/
|
||||
usr/bin/pmt
|
||||
usr/bin/pmtx
|
||||
usr/bin/proxmox-tape
|
||||
@ -13,14 +13,14 @@ usr/lib/x86_64-linux-gnu/proxmox-backup/proxmox-backup-banner
|
||||
usr/lib/x86_64-linux-gnu/proxmox-backup/proxmox-backup-proxy
|
||||
usr/lib/x86_64-linux-gnu/proxmox-backup/proxmox-daily-update
|
||||
usr/lib/x86_64-linux-gnu/proxmox-backup/sg-tape-cmd
|
||||
usr/sbin/pbs2to3
|
||||
usr/sbin/pbs3to4
|
||||
usr/sbin/proxmox-backup-debug
|
||||
usr/sbin/proxmox-backup-manager
|
||||
usr/share/javascript/proxmox-backup/css/ext6-pbs.css
|
||||
usr/share/javascript/proxmox-backup/images
|
||||
usr/share/javascript/proxmox-backup/index.hbs
|
||||
usr/share/javascript/proxmox-backup/js/proxmox-backup-gui.js
|
||||
usr/share/man/man1/pbs2to3.1
|
||||
usr/share/man/man1/pbs3to4.1
|
||||
usr/share/man/man1/pmt.1
|
||||
usr/share/man/man1/pmtx.1
|
||||
usr/share/man/man1/proxmox-backup-debug.1
|
||||
@ -34,13 +34,13 @@ usr/share/man/man5/media-pool.cfg.5
|
||||
usr/share/man/man5/notifications-priv.cfg.5
|
||||
usr/share/man/man5/notifications.cfg.5
|
||||
usr/share/man/man5/proxmox-backup.node.cfg.5
|
||||
usr/share/man/man5/prune.cfg.5
|
||||
usr/share/man/man5/remote.cfg.5
|
||||
usr/share/man/man5/sync.cfg.5
|
||||
usr/share/man/man5/tape-job.cfg.5
|
||||
usr/share/man/man5/tape.cfg.5
|
||||
usr/share/man/man5/user.cfg.5
|
||||
usr/share/man/man5/verification.cfg.5
|
||||
usr/share/man/man5/prune.cfg.5
|
||||
usr/share/proxmox-backup/templates/default/acme-err-body.txt.hbs
|
||||
usr/share/proxmox-backup/templates/default/acme-err-subject.txt.hbs
|
||||
usr/share/proxmox-backup/templates/default/gc-err-body.txt.hbs
|
||||
@ -63,7 +63,6 @@ usr/share/proxmox-backup/templates/default/tape-backup-ok-body.txt.hbs
|
||||
usr/share/proxmox-backup/templates/default/tape-backup-ok-subject.txt.hbs
|
||||
usr/share/proxmox-backup/templates/default/tape-load-body.txt.hbs
|
||||
usr/share/proxmox-backup/templates/default/tape-load-subject.txt.hbs
|
||||
usr/share/proxmox-backup/templates/default/test-body.html.hbs
|
||||
usr/share/proxmox-backup/templates/default/test-body.txt.hbs
|
||||
usr/share/proxmox-backup/templates/default/test-subject.txt.hbs
|
||||
usr/share/proxmox-backup/templates/default/verify-err-body.txt.hbs
|
||||
|
4
debian/proxmox-backup-server.udev
vendored
4
debian/proxmox-backup-server.udev
vendored
@ -5,7 +5,7 @@
|
||||
ACTION=="remove", GOTO="persistent_storage_tape_end"
|
||||
ENV{UDEV_DISABLE_PERSISTENT_STORAGE_RULES_FLAG}=="1", GOTO="persistent_storage_tape_end"
|
||||
|
||||
# also see: /lib/udev/rules.d/60-persistent-storage-tape.rules
|
||||
# also see: /usr/lib/udev/rules.d/60-persistent-storage-tape.rules
|
||||
|
||||
SUBSYSTEM=="scsi_generic", SUBSYSTEMS=="scsi", ATTRS{type}=="1", IMPORT{program}="scsi_id --sg-version=3 --export --whitelisted -d $devnode", \
|
||||
SYMLINK+="tape/by-id/scsi-$env{ID_SERIAL}-sg"
|
||||
@ -18,4 +18,4 @@ SUBSYSTEM=="scsi_generic", SUBSYSTEMS=="scsi", ATTRS{type}=="1", ENV{ID_SCSI_SER
|
||||
LABEL="persistent_storage_tape_end"
|
||||
|
||||
# triggers the mounting of a removable device
|
||||
ACTION=="add", SUBSYSTEM=="block", ENV{ID_FS_UUID}!="", TAG+="systemd", ENV{SYSTEMD_WANTS}="removable-device-attach@$env{ID_FS_UUID}"
|
||||
ACTION=="add", SUBSYSTEM=="block", ENV{ID_FS_UUID}!="", TAG+="systemd", ENV{SYSTEMD_WANTS}="removable-device-attach@$env{ID_FS_UUID}"
|
||||
|
16
debian/rules
vendored
16
debian/rules
vendored
@ -7,6 +7,16 @@ include /usr/share/dpkg/pkg-info.mk
|
||||
include /usr/share/rustc/architecture.mk
|
||||
|
||||
export BUILD_MODE=release
|
||||
export CARGO_STATIC_CONFIG=--config debian/cargo_home/config.static.toml
|
||||
|
||||
# sync with Makefile!
|
||||
STATIC_TARGET_DIR := target/static-build
|
||||
ifeq ($(BUILD_MODE), release)
|
||||
STATIC_COMPILEDIR := $(STATIC_TARGET_DIR)/$(DEB_HOST_RUST_TYPE)/release
|
||||
else
|
||||
STATIC_COMPILEDIR := $(STATIC_TARGET_DIR)/$(DEB_HOST_RUST_TYPE)/debug
|
||||
endif
|
||||
# end sync with Makefile!
|
||||
|
||||
export CARGO=/usr/share/cargo/bin/cargo
|
||||
|
||||
@ -28,6 +38,9 @@ override_dh_auto_configure:
|
||||
@perl -ne 'if (/^version\s*=\s*"(\d+(?:\.\d+)+)"/) { my $$v_cargo = $$1; my $$v_deb = "$(DEB_VERSION_UPSTREAM)"; \
|
||||
die "ERROR: d/changelog <-> Cargo.toml version mismatch: $$v_cargo != $$v_deb\n" if $$v_cargo ne $$v_deb; exit(0); }' Cargo.toml
|
||||
$(CARGO) prepare-debian $(CURDIR)/debian/cargo_registry --link-from-system
|
||||
# add a new config for static building, sync with Makefile!
|
||||
cp debian/cargo_home/config.toml debian/cargo_home/config.static.toml
|
||||
sed -ri -e 's!^(rustflags = .*)\]$$!\1, "-C", "target-feature=+crt-static", "-L", "$(STATIC_COMPILEDIR)/deps-stubs/"\]!' debian/cargo_home/config.static.toml
|
||||
# `cargo build` and `cargo install` have different config precedence, symlink
|
||||
# the wrapper config into a place where `build` picks it up as well..
|
||||
# https://doc.rust-lang.org/cargo/commands/cargo-install.html#configuration-discovery
|
||||
@ -47,6 +60,9 @@ override_dh_auto_install:
|
||||
dh_auto_install -- \
|
||||
PROXY_USER=backup \
|
||||
LIBDIR=/usr/lib/$(DEB_HOST_MULTIARCH)
|
||||
mkdir -p debian/proxmox-backup-client-static/usr/bin
|
||||
mv debian/tmp/usr/bin/proxmox-backup-client-static debian/proxmox-backup-client-static/usr/bin/proxmox-backup-client
|
||||
mv debian/tmp/usr/bin/pxar-static debian/proxmox-backup-client-static/usr/bin/pxar
|
||||
|
||||
override_dh_installsystemd:
|
||||
dh_installsystemd -pproxmox-backup-server proxmox-backup-daily-update.timer
|
||||
|
@ -25,7 +25,7 @@ GENERATED_SYNOPSIS := \
|
||||
pxar/synopsis.rst \
|
||||
|
||||
MAN1_PAGES := \
|
||||
pbs2to3.1 \
|
||||
pbs3to4.1 \
|
||||
pmt.1 \
|
||||
pmtx.1 \
|
||||
proxmox-backup-client.1 \
|
||||
|
@ -88,7 +88,7 @@ class ReflabelMapper(Builder):
|
||||
#pprint(vars(node))
|
||||
|
||||
if hasattr(node, 'expect_referenced_by_id') and len(node['ids']) > 1: # explicit labels
|
||||
filename = self.env.doc2path(docname)
|
||||
filename = str(self.env.doc2path(docname))
|
||||
filename_html = re.sub('.rst', '.html', filename)
|
||||
|
||||
# node['ids'][0] contains a normalized version of the
|
||||
|
@ -1,3 +1,5 @@
|
||||
.. _client_usage:
|
||||
|
||||
Backup Client Usage
|
||||
===================
|
||||
|
||||
@ -44,6 +46,24 @@ user\@pbs!token@host:store ``user@pbs!token`` host:8007 store
|
||||
[ff80::51]:1234:mydatastore ``root@pam`` [ff80::51]:1234 mydatastore
|
||||
================================ ================== ================== ===========
|
||||
|
||||
.. _statically_linked_client:
|
||||
|
||||
Statically Linked Backup Client
|
||||
-------------------------------
|
||||
|
||||
A statically linked version of the Proxmox Backup client is available for Linux
|
||||
based systems where the regular client is not available. Please note that it is
|
||||
recommended to use the regular client when possible, as the statically linked
|
||||
client is not a full replacement. For example, name resolution will not be
|
||||
performed via the mechanisms provided by libc, but uses a resolver written
|
||||
purely in the Rust programming language. Therefore, features and modules
|
||||
provided by Name Service Switch cannot be used.
|
||||
|
||||
The statically linked client is available via the ``pbs-client`` repository as
|
||||
described in the :ref:`installation <install_pbc>` section.
|
||||
|
||||
.. _environment-variables:
|
||||
|
||||
Environment Variables
|
||||
---------------------
|
||||
|
||||
@ -89,6 +109,43 @@ Environment Variables
|
||||
you can add arbitrary comments after the first newline.
|
||||
|
||||
|
||||
System and Service Credentials
|
||||
------------------------------
|
||||
|
||||
Some of the :ref:`environment variables <environment-variables>` above can be
|
||||
set using `system and service credentials <https://systemd.io/CREDENTIALS/>`_
|
||||
instead.
|
||||
|
||||
============================ ==============================================
|
||||
Environment Variable Credential Name Equivalent
|
||||
============================ ==============================================
|
||||
``PBS_REPOSITORY`` ``proxmox-backup-client.repository``
|
||||
``PBS_PASSWORD`` ``proxmox-backup-client.password``
|
||||
``PBS_ENCRYPTION_PASSWORD`` ``proxmox-backup-client.encryption-password``
|
||||
``PBS_FINGERPRINT`` ``proxmox-backup-client.fingerprint``
|
||||
============================ ==============================================
|
||||
|
||||
For example, the repository password can be stored in an encrypted file as
|
||||
follows:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# systemd-ask-password -n | systemd-creds encrypt --name=proxmox-backup-client.password - my-api-token.cred
|
||||
|
||||
The credential can then be reused inside of unit files or in a transient scope
|
||||
unit as follows:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# systemd-run --pipe --wait \
|
||||
--property=LoadCredentialEncrypted=proxmox-backup-client.password:/full/path/to/my-api-token.cred \
|
||||
--property=SetCredential=proxmox-backup-client.repository:'my_default_repository' \
|
||||
proxmox-backup-client ...
|
||||
|
||||
Additionally, system credentials (e.g. passed down from the hypervisor to a
|
||||
virtual machine via SMBIOS type 11) can be loaded on a service via
|
||||
`LoadCredential=` as described in the manual page ``systemd.exec(5)``.
|
||||
|
||||
Output Format
|
||||
-------------
|
||||
|
||||
@ -169,6 +226,7 @@ the client. The format is:
|
||||
|
||||
<archive-name>.<type>:<source-path>
|
||||
|
||||
The ``archive-name`` must contain alphanumerics, hyphens and underscores only.
|
||||
Common types are ``.pxar`` for file archives and ``.img`` for block
|
||||
device images. To create a backup of a block device, run the following command:
|
||||
|
||||
@ -470,6 +528,8 @@ version of your master key. The following command sends the output of the
|
||||
proxmox-backup-client key paperkey --output-format text > qrkey.txt
|
||||
|
||||
|
||||
.. _client_restoring_data:
|
||||
|
||||
Restoring Data
|
||||
--------------
|
||||
|
||||
|
@ -102,7 +102,7 @@ man_pages = [
|
||||
('pxar/man1', 'pxar', 'Proxmox File Archive CLI Tool', [author], 1),
|
||||
('pmt/man1', 'pmt', 'Control Linux Tape Devices', [author], 1),
|
||||
('pmtx/man1', 'pmtx', 'Control SCSI media changer devices (tape autoloaders)', [author], 1),
|
||||
('pbs2to3/man1', 'pbs2to3', 'Proxmox Backup Server upgrade checker script for 2.4+ to current 3.x major upgrades', [author], 1),
|
||||
('pbs3to4/man1', 'pbs3to4', 'Proxmox Backup Server upgrade checker script for 3.4+ to current 4.x major upgrades', [author], 1),
|
||||
# configs
|
||||
('config/acl/man5', 'acl.cfg', 'Access Control Configuration', [author], 5),
|
||||
('config/datastore/man5', 'datastore.cfg', 'Datastore Configuration', [author], 5),
|
||||
|
@ -138,7 +138,26 @@ you need to run:
|
||||
# apt update
|
||||
# apt install proxmox-backup-client
|
||||
|
||||
.. note:: The client-only repository should be usable by most recent Debian and
|
||||
Ubuntu derivatives.
|
||||
Install Statically Linked Proxmox Backup Client
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Proxmox provides a statically linked build of the Proxmox backup client that
|
||||
should run on any modern x86-64 Linux system.
|
||||
|
||||
It is currently available as a Debian package. After configuring the
|
||||
:ref:`package_repositories_client_only_apt`, you need to run:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# apt update
|
||||
# apt install proxmox-backup-client-static
|
||||
|
||||
This package conflicts with the `proxmox-backup-client` package, as both
|
||||
provide the client as an executable in the `/usr/bin/proxmox-backup-client`
|
||||
path.
|
||||
|
||||
You can copy this executable to other, e.g. non-Debian based Linux systems.
|
||||
|
||||
For details on using the Proxmox Backup Client, see :ref:`client_usage`.
|
||||
|
||||
.. include:: package-repositories.rst
|
||||
|
@ -72,6 +72,10 @@ either start it manually from the GUI or provide it with a schedule (see
|
||||
Backup snapshots, groups and namespaces which are no longer available on the
|
||||
**Remote** datastore can be removed from the local datastore as well by setting
|
||||
the ``remove-vanished`` option for the sync job.
|
||||
Setting the ``verified-only`` or ``encrypted-only`` flags allows to limit the
|
||||
sync jobs to backup snapshots which have been verified or encrypted,
|
||||
respectively. This is particularly of interest when sending backups to a less
|
||||
trusted remote backup server.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
@ -141,6 +145,11 @@ job needs to be run before a sync job with 'resync-corrupt' can be carried out.
|
||||
that a 'resync-corrupt'-job needs to check the manifests of all snapshots in a datastore
|
||||
and might take much longer than regular sync jobs.
|
||||
|
||||
If the ``run-on-mount`` flag is set, the sync job will be automatically started whenever a
|
||||
relevant removable datastore is mounted. If mounting a removable datastore would start
|
||||
multiple sync jobs, these jobs will be run sequentially in alphabetical order based on
|
||||
their ID.
|
||||
|
||||
Namespace Support
|
||||
^^^^^^^^^^^^^^^^^
|
||||
|
||||
@ -227,13 +236,18 @@ Bandwidth Limit
|
||||
|
||||
Syncing a datastore to an archive can produce a lot of traffic and impact other
|
||||
users of the network. In order to avoid network or storage congestion, you can
|
||||
limit the bandwidth of the sync job by setting the ``rate-in`` option either in
|
||||
the web interface or using the ``proxmox-backup-manager`` command-line tool:
|
||||
limit the bandwidth of a sync job in pull direction by setting the ``rate-in``
|
||||
option either in the web interface or using the ``proxmox-backup-manager``
|
||||
command-line tool:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-manager sync-job update ID --rate-in 20MiB
|
||||
|
||||
For sync jobs in push direction use the ``rate-out`` option instead. To allow
|
||||
for traffic bursts, you can set the size of the token bucket filter used for
|
||||
traffic limiting via ``burst-in`` or ``burst-out`` parameters.
|
||||
|
||||
Sync Direction Push
|
||||
^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
|
@ -7,26 +7,20 @@ Overview
|
||||
--------
|
||||
|
||||
* Proxmox Backup Server emits :ref:`notification_events` in case of noteworthy
|
||||
events in the system. These events are handled by the notification system.
|
||||
A notification event has metadata, for example a timestamp, a severity level,
|
||||
a type and other metadata fields.
|
||||
* :ref:`notification_matchers` route a notification event to one or more notification
|
||||
targets. A matcher can have match rules to selectively route based on the metadata
|
||||
of a notification event.
|
||||
events in the system. These events are processed based on the global
|
||||
notification settings. Each notification event includes metadata, such as a
|
||||
timestamp, severity level, type, and additional event-specific fields.
|
||||
* :ref:`notification_matchers` route a notification event to one or more
|
||||
notification targets. A matcher can have match rules to selectively route
|
||||
based on the metadata of a notification event.
|
||||
* :ref:`notification_targets` are a destination to which a notification event
|
||||
is routed to by a matcher. There are multiple types of target, mail-based
|
||||
(Sendmail and SMTP) and Gotify.
|
||||
|
||||
Datastores and tape backup jobs have a configurable :ref:`notification_mode`.
|
||||
It allows you to choose between the notification system and a legacy mode
|
||||
for sending notification emails. The legacy mode is equivalent to the
|
||||
way notifications were handled before Proxmox Backup Server 3.2.
|
||||
|
||||
The notification system can be configured in the GUI under
|
||||
*Configuration → Notifications*. The configuration is stored in
|
||||
:ref:`notifications.cfg` and :ref:`notifications_priv.cfg` -
|
||||
the latter contains sensitive configuration options such as
|
||||
passwords or authentication tokens for notification targets and
|
||||
Global notification settings can be configured in the GUI under *Configuration →
|
||||
Notifications*. The configuration is stored in :ref:`notifications.cfg` and
|
||||
:ref:`notifications_priv.cfg` - the latter contains sensitive configuration
|
||||
options such as passwords or authentication tokens for notification targets and
|
||||
can only be read by ``root``.
|
||||
|
||||
.. _notification_targets:
|
||||
@ -41,22 +35,23 @@ Proxmox Backup Server offers multiple types of notification targets.
|
||||
Sendmail
|
||||
^^^^^^^^
|
||||
The sendmail binary is a program commonly found on Unix-like operating systems
|
||||
that handles the sending of email messages.
|
||||
It is a command-line utility that allows users and applications to send emails
|
||||
directly from the command line or from within scripts.
|
||||
that handles the sending of email messages. It is a command-line utility that
|
||||
allows users and applications to send emails directly from the command line or
|
||||
from within scripts.
|
||||
|
||||
The sendmail notification target uses the ``sendmail`` binary to send emails to a
|
||||
list of configured users or email addresses. If a user is selected as a recipient,
|
||||
the email address configured in user's settings will be used.
|
||||
For the ``root@pam`` user, this is the email address entered during installation.
|
||||
A user's email address can be configured in ``Configuration -> Access Control -> User Management``.
|
||||
If a user has no associated email address, no email will be sent.
|
||||
The sendmail notification target uses the ``sendmail`` binary to send emails to
|
||||
a list of configured users or email addresses. If a user is selected as a
|
||||
recipient, the email address configured in user's settings will be used. For
|
||||
the ``root@pam`` user, this is the email address entered during installation. A
|
||||
user's email address can be configured in ``Configuration → Access Control →
|
||||
User Management``. If a user has no associated email address, no email will be
|
||||
sent.
|
||||
|
||||
.. NOTE:: In standard Proxmox Backup Server installations, the ``sendmail`` binary is provided by
|
||||
Postfix. It may be necessary to configure Postfix so that it can deliver
|
||||
mails correctly - for example by setting an external mail relay (smart host).
|
||||
In case of failed delivery, check the system logs for messages logged by
|
||||
the Postfix daemon.
|
||||
.. NOTE:: In standard Proxmox Backup Server installations, the ``sendmail``
|
||||
binary is provided by Postfix. It may be necessary to configure Postfix so
|
||||
that it can deliver mails correctly - for example by setting an external
|
||||
mail relay (smart host). In case of failed delivery, check the system logs
|
||||
for messages logged by the Postfix daemon.
|
||||
|
||||
See :ref:`notifications.cfg` for all configuration options.
|
||||
|
||||
@ -64,13 +59,13 @@ See :ref:`notifications.cfg` for all configuration options.
|
||||
|
||||
SMTP
|
||||
^^^^
|
||||
SMTP notification targets can send emails directly to an SMTP mail relay.
|
||||
This target does not use the system's MTA to deliver emails.
|
||||
Similar to sendmail targets, if a user is selected as a recipient, the user's configured
|
||||
email address will be used.
|
||||
SMTP notification targets can send emails directly to an SMTP mail relay. This
|
||||
target does not use the system's MTA to deliver emails. Similar to sendmail
|
||||
targets, if a user is selected as a recipient, the user's configured email
|
||||
address will be used.
|
||||
|
||||
.. NOTE:: Unlike sendmail targets, SMTP targets do not have any queuing/retry mechanism
|
||||
in case of a failed mail delivery.
|
||||
.. NOTE:: Unlike sendmail targets, SMTP targets do not have any queuing/retry
|
||||
mechanism in case of a failed mail delivery.
|
||||
|
||||
See :ref:`notifications.cfg` for all configuration options.
|
||||
|
||||
@ -78,10 +73,10 @@ See :ref:`notifications.cfg` for all configuration options.
|
||||
|
||||
Gotify
|
||||
^^^^^^
|
||||
`Gotify <http://gotify.net>`_ is an open-source self-hosted notification server that
|
||||
allows you to send push notifications to various devices and
|
||||
applications. It provides a simple API and web interface, making it easy to
|
||||
integrate with different platforms and services.
|
||||
`Gotify <http://gotify.net>`_ is an open-source self-hosted notification server
|
||||
that allows you to send push notifications to various devices and applications.
|
||||
It provides a simple API and web interface, making it easy to integrate with
|
||||
different platforms and services.
|
||||
|
||||
.. NOTE:: Gotify targets will respect the HTTP proxy settings from
|
||||
Configuration → Other → HTTP proxy
|
||||
@ -95,27 +90,28 @@ Webhook notification targets perform HTTP requests to a configurable URL.
|
||||
|
||||
The following configuration options are available:
|
||||
|
||||
* ``url``: The URL to which to perform the HTTP requests.
|
||||
Supports templating to inject message contents, metadata and secrets.
|
||||
* ``url``: The URL to which to perform the HTTP requests. Supports templating
|
||||
to inject message contents, metadata and secrets.
|
||||
* ``method``: HTTP Method to use (POST/PUT/GET)
|
||||
* ``header``: Array of HTTP headers that should be set for the request.
|
||||
Supports templating to inject message contents, metadata and secrets.
|
||||
* ``body``: HTTP body that should be sent.
|
||||
Supports templating to inject message contents, metadata and secrets.
|
||||
* ``secret``: Array of secret key-value pairs. These will be stored in
|
||||
a protected configuration file only readable by root. Secrets can be
|
||||
* ``body``: HTTP body that should be sent. Supports templating to inject
|
||||
message contents, metadata and secrets.
|
||||
* ``secret``: Array of secret key-value pairs. These will be stored in a
|
||||
protected configuration file only readable by root. Secrets can be
|
||||
accessed in body/header/URL templates via the ``secrets`` namespace.
|
||||
* ``comment``: Comment for this target.
|
||||
|
||||
For configuration options that support templating, the
|
||||
`Handlebars <https://handlebarsjs.com>`_ syntax can be used to
|
||||
access the following properties:
|
||||
For configuration options that support templating, the `Handlebars
|
||||
<https://handlebarsjs.com>`_ syntax can be used to access the following
|
||||
properties:
|
||||
|
||||
* ``{{ title }}``: The rendered notification title
|
||||
* ``{{ message }}``: The rendered notification body
|
||||
* ``{{ severity }}``: The severity of the notification (``info``, ``notice``,
|
||||
``warning``, ``error``, ``unknown``)
|
||||
* ``{{ timestamp }}``: The notification's timestamp as a UNIX epoch (in seconds).
|
||||
* ``{{ timestamp }}``: The notification's timestamp as a UNIX epoch (in
|
||||
seconds).
|
||||
* ``{{ fields.<name> }}``: Sub-namespace for any metadata fields of the
|
||||
notification. For instance, ``fields.type`` contains the notification
|
||||
type - for all available fields refer to :ref:`notification_events`.
|
||||
@ -197,20 +193,19 @@ Example - Slack
|
||||
Notification Matchers
|
||||
---------------------
|
||||
|
||||
Notification matchers route notifications to notification targets based
|
||||
on their matching rules. These rules can match certain properties of a
|
||||
notification, such as the timestamp (``match-calendar``), the severity of
|
||||
the notification (``match-severity``) or metadata fields (``match-field``).
|
||||
If a notification is matched by a matcher, all targets configured for the
|
||||
matcher will receive the notification.
|
||||
Notification matchers route notifications to notification targets based on
|
||||
their matching rules. These rules can match certain properties of a
|
||||
notification, such as the timestamp (``match-calendar``), the severity of the
|
||||
notification (``match-severity``) or metadata fields (``match-field``). If a
|
||||
notification is matched by a matcher, all targets configured for the matcher
|
||||
will receive the notification.
|
||||
|
||||
An arbitrary number of matchers can be created, each with with their own
|
||||
matching rules and targets to notify.
|
||||
Every target is notified at most once for every notification, even if
|
||||
the target is used in multiple matchers.
|
||||
matching rules and targets to notify. Every target is notified at most once for
|
||||
every notification, even if the target is used in multiple matchers.
|
||||
|
||||
A matcher without rules matches any notification; the configured targets
|
||||
will always be notified.
|
||||
A matcher without rules matches any notification; the configured targets will
|
||||
always be notified.
|
||||
|
||||
See :ref:`notifications.cfg` for all configuration options.
|
||||
|
||||
@ -227,20 +222,24 @@ Examples:
|
||||
|
||||
Field Matching Rules
|
||||
^^^^^^^^^^^^^^^^^^^^
|
||||
Notifications have a selection of metadata fields that can be matched.
|
||||
When using ``exact`` as a matching mode, a ``,`` can be used as a separator.
|
||||
The matching rule then matches if the metadata field has **any** of the specified
|
||||
Notifications have a selection of metadata fields that can be matched. When
|
||||
using ``exact`` as a matching mode, a ``,`` can be used as a separator. The
|
||||
matching rule then matches if the metadata field has **any** of the specified
|
||||
values.
|
||||
|
||||
Examples:
|
||||
|
||||
* ``match-field exact:type=gc`` Only match notifications for garbage collection jobs
|
||||
* ``match-field exact:type=prune,verify`` Match prune job and verification job notifications.
|
||||
* ``match-field regex:datastore=^backup-.*$`` Match any datastore starting with ``backup``.
|
||||
* ``match-field exact:type=gc`` Only match notifications for garbage collection
|
||||
jobs
|
||||
* ``match-field exact:type=prune,verify`` Match prune job and verification job
|
||||
notifications.
|
||||
* ``match-field regex:datastore=^backup-.*$`` Match any datastore starting with
|
||||
``backup``.
|
||||
|
||||
If a notification does not have the matched field, the rule will **not** match.
|
||||
For instance, a ``match-field regex:datastore=.*`` directive will match any notification that has
|
||||
a ``datastore`` metadata field, but will not match if the field does not exist.
|
||||
For instance, a ``match-field regex:datastore=.*`` directive will match any
|
||||
notification that has a ``datastore`` metadata field, but will not match if the
|
||||
field does not exist.
|
||||
|
||||
Severity Matching Rules
|
||||
^^^^^^^^^^^^^^^^^^^^^^^
|
||||
@ -259,9 +258,9 @@ The following severities are in use:
|
||||
Notification Events
|
||||
-------------------
|
||||
|
||||
The following table contains a list of all notification events in Proxmox Backup server, their
|
||||
type, severity and additional metadata fields. ``type`` as well as any other metadata field
|
||||
may be used in ``match-field`` match rules.
|
||||
The following table contains a list of all notification events in Proxmox
|
||||
Backup server, their type, severity and additional metadata fields. ``type`` as
|
||||
well as any other metadata field may be used in ``match-field`` match rules.
|
||||
|
||||
================================ ==================== ========== ==============================================================
|
||||
Event ``type`` Severity Metadata fields (in addition to ``type``)
|
||||
@ -281,8 +280,8 @@ Verification job failure ``verification`` ``error`` ``datastore``,
|
||||
Verification job success ``verification`` ``info`` ``datastore``, ``hostname``, ``job-id``
|
||||
================================ ==================== ========== ==============================================================
|
||||
|
||||
The following table contains a description of all use metadata fields. All of these
|
||||
can be used in ``match-field`` match rules.
|
||||
The following table contains a description of all use metadata fields. All of
|
||||
these can be used in ``match-field`` match rules.
|
||||
|
||||
==================== ===================================
|
||||
Metadata field Description
|
||||
@ -299,59 +298,48 @@ Metadata field Description
|
||||
|
||||
System Mail Forwarding
|
||||
----------------------
|
||||
Certain local system daemons, such as ``smartd``, send notification emails
|
||||
to the local ``root`` user. Proxmox Backup Server will feed these mails
|
||||
into the notification system as a notification of type ``system-mail``
|
||||
and with severity ``unknown``.
|
||||
Certain local system daemons, such as ``smartd``, send notification emails to
|
||||
the local ``root`` user. These mails are converted into notification events
|
||||
with the type ``system-mail`` and with a severity of ``unknown``.
|
||||
|
||||
When the email is forwarded to a sendmail target, the mail's content and headers
|
||||
are forwarded as-is. For all other targets,
|
||||
the system tries to extract both a subject line and the main text body
|
||||
from the email content. In instances where emails solely consist of HTML
|
||||
content, they will be transformed into plain text format during this process.
|
||||
When the email is forwarded to a sendmail target, the mail's content and
|
||||
headers are forwarded as-is. For all other targets, the system tries to extract
|
||||
both a subject line and the main text body from the email content. In instances
|
||||
where emails solely consist of HTML content, they will be transformed into
|
||||
plain text format during this process.
|
||||
|
||||
Permissions
|
||||
-----------
|
||||
In order to modify/view the configuration for notification targets,
|
||||
the ``Sys.Modify/Sys.Audit`` permissions are required for the
|
||||
In order to modify/view the configuration for notification targets, the
|
||||
``Sys.Modify/Sys.Audit`` permissions are required for the
|
||||
``/system/notifications`` ACL node.
|
||||
|
||||
.. _notification_mode:
|
||||
|
||||
Notification Mode
|
||||
-----------------
|
||||
Datastores and tape backup/restore job configuration have a ``notification-mode``
|
||||
option which can have one of two values:
|
||||
Datastores and tape backup/restore job configuration have a
|
||||
``notification-mode`` option which can have one of two values:
|
||||
|
||||
* ``legacy-sendmail``: Send notification emails via the system's ``sendmail`` command.
|
||||
The notification system will be bypassed and any configured targets/matchers will be ignored.
|
||||
This mode is equivalent to the notification behavior for version before
|
||||
Proxmox Backup Server 3.2.
|
||||
* Send notifications based on the global notification settings (``notification-system``).
|
||||
|
||||
* ``notification-system``: Use the new, flexible notification system.
|
||||
* Send notification emails via the system's ``sendmail`` command
|
||||
(``legacy-sendmail``). Any targets or matchers from the global notification
|
||||
settings are ignored. This mode is equivalent to the notification behavior
|
||||
for Proxmox Backup Server versions before 3.2. It might be removed in a
|
||||
later release of Proxmox Backup Server.
|
||||
|
||||
If the ``notification-mode`` option is not set, Proxmox Backup Server will default
|
||||
to ``legacy-sendmail``.
|
||||
|
||||
Starting with Proxmox Backup Server 3.2, a datastore created in the UI will
|
||||
automatically opt in to the new notification system. If the datastore is created
|
||||
via the API or the ``proxmox-backup-manager`` CLI, the ``notification-mode``
|
||||
option has to be set explicitly to ``notification-system`` if the
|
||||
notification system shall be used.
|
||||
|
||||
The ``legacy-sendmail`` mode might be removed in a later release of
|
||||
Proxmox Backup Server.
|
||||
|
||||
Settings for ``legacy-sendmail`` notification mode
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
If ``notification-mode`` is set to ``legacy-sendmail``, Proxmox Backup Server
|
||||
will send notification emails via the system's ``sendmail`` command to the email
|
||||
address configured for the user set in the ``notify-user`` option
|
||||
will send notification emails via the system's ``sendmail`` command to the
|
||||
email address configured for the user set in the ``notify-user`` option
|
||||
(falling back to ``root@pam`` if not set).
|
||||
|
||||
For datastores, you can also change the level of notifications received per task
|
||||
type via the ``notify`` option.
|
||||
For datastores, you can also change the level of notifications received per
|
||||
task type via the ``notify`` option.
|
||||
|
||||
* Always: send a notification for any scheduled task, independent of the
|
||||
outcome
|
||||
@ -360,5 +348,26 @@ type via the ``notify`` option.
|
||||
|
||||
* Never: do not send any notification at all
|
||||
|
||||
The ``notify-user`` and ``notify`` options are ignored if ``notification-mode``
|
||||
is set to ``notification-system``.
|
||||
The ``notify-user`` and ``notify`` options are ignored when using the global
|
||||
notification settings (``notification-mode`` is set to
|
||||
``notification-system``).
|
||||
|
||||
Overriding Notification Templates
|
||||
---------------------------------
|
||||
|
||||
Proxmox Backup Server uses Handlebars templates to render notifications. The
|
||||
original templates provided by Proxmox Backup Server are stored in
|
||||
``/usr/share/proxmox-backup/templates/default/``.
|
||||
|
||||
Notification templates can be overridden by providing a custom template file in
|
||||
the override directory at
|
||||
``/etc/proxmox-backup/notification-templates/default/``. When rendering a
|
||||
notification of a given type, Proxmox Backup Server will first attempt to load
|
||||
a template from the override directory. If this one does not exist or fails to
|
||||
render, the original template will be used.
|
||||
|
||||
The template files follow the naming convention of
|
||||
``<type>-<body|subject>.txt.hbs``. For instance, the file
|
||||
``gc-err-body.txt.hbs`` contains the template for rendering notifications for
|
||||
garbage collection errors, while ``package-updates-subject.txt.hbs`` is used to
|
||||
render the subject line of notifications for available package updates.
|
||||
|
@ -4,25 +4,62 @@ Debian Package Repositories
|
||||
---------------------------
|
||||
|
||||
All Debian based systems use APT_ as a package management tool. The lists of
|
||||
repositories are defined in ``/etc/apt/sources.list`` and the ``.list`` files found
|
||||
in the ``/etc/apt/sources.d/`` directory. Updates can be installed directly
|
||||
with the ``apt`` command-line tool, or via the GUI.
|
||||
repositories are defined in ``/etc/apt/sources.list`` and the ``.list`` or
|
||||
``.sources`` files found in the ``/etc/apt/sources.d/`` directory. Updates can
|
||||
be installed directly with the ``apt`` command-line tool, or via the GUI.
|
||||
|
||||
APT_ ``sources.list`` files list one package repository per line, with the most
|
||||
preferred source listed first. Empty lines are ignored and a ``#`` character
|
||||
anywhere on a line marks the remainder of that line as a comment. The
|
||||
information available from the configured sources is acquired by ``apt
|
||||
update``.
|
||||
.. _package_repos_repository_formats:
|
||||
|
||||
.. code-block:: sources.list
|
||||
:caption: File: ``/etc/apt/sources.list``
|
||||
Repository Formats
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
|
||||
deb http://deb.debian.org/debian bookworm main contrib
|
||||
deb http://deb.debian.org/debian bookworm-updates main contrib
|
||||
APT_ repositories can be configured in two distinct formats, the old single
|
||||
line format and the newer deb822 format. No matter what format you choose,
|
||||
``apt update`` will fetch the information from all configured sources.
|
||||
|
||||
# security updates
|
||||
deb http://security.debian.org/debian-security bookworm-security main contrib
|
||||
Single Line
|
||||
^^^^^^^^^^^
|
||||
|
||||
Single line repositories are defined in ``.list`` files list one package
|
||||
repository per line, with the most preferred source listed first. Empty lines
|
||||
are ignored and a ``#`` character anywhere on a line marks the remainder of
|
||||
that line as a comment.
|
||||
|
||||
deb822 Style
|
||||
^^^^^^^^^^^^
|
||||
|
||||
The newer deb822 multiline format is used in ``.sources`` files. Each
|
||||
repository consists of a stanza with multiple key value pairs. A stanza is
|
||||
simply a group of lines. One file can contain multiple stanzas by separating
|
||||
them with a blank line. You can still use ``#`` to comment out lines.
|
||||
|
||||
.. note:: Modernizing your repositories is recommended under Debian Trixie, as
|
||||
``apt`` will complain about older repository definitions otherwise. You can
|
||||
run the command ``apt modernize-sources`` to modernize your existing
|
||||
repositories automatically.
|
||||
|
||||
.. _package_repos_debian_base_repositories:
|
||||
|
||||
Debian Base Repositories
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
You will need a Debian base repository as a minimum to get updates for all
|
||||
packages provided by Debian itself:
|
||||
|
||||
.. code-block:: debian.sources
|
||||
:caption: File: ``/etc/apt/sources.list.d/debian.sources``
|
||||
|
||||
Types: deb
|
||||
URIs: http://deb.debian.org/debian/
|
||||
Suites: trixie trixie-updates
|
||||
Components: main contrib non-free-firmware
|
||||
Signed-By: /usr/share/keyrings/debian-archive-keyring.gpg
|
||||
|
||||
Types: deb
|
||||
URIs: http://security.debian.org/debian-security/
|
||||
Suites: trixie-security
|
||||
Components: main contrib non-free-firmware
|
||||
Signed-By: /usr/share/keyrings/debian-archive-keyring.gpg
|
||||
|
||||
In addition, you need a package repository from Proxmox to get Proxmox Backup
|
||||
updates.
|
||||
@ -32,38 +69,6 @@ updates.
|
||||
:align: right
|
||||
:alt: APT Repository Management in the Web Interface
|
||||
|
||||
.. _package_repos_secure_apt:
|
||||
|
||||
SecureApt
|
||||
~~~~~~~~~
|
||||
|
||||
The `Release` files in the repositories are signed with GnuPG. APT is using
|
||||
these signatures to verify that all packages are from a trusted source.
|
||||
|
||||
If you install Proxmox Backup Server from an official ISO image, the
|
||||
verification key is already installed.
|
||||
|
||||
If you install Proxmox Backup Server on top of Debian, download and install the
|
||||
key with the following commands:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# wget https://enterprise.proxmox.com/debian/proxmox-release-bookworm.gpg -O /etc/apt/trusted.gpg.d/proxmox-release-bookworm.gpg
|
||||
|
||||
Verify the SHA512 checksum afterwards with the expected output below:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# sha512sum /etc/apt/trusted.gpg.d/proxmox-release-bookworm.gpg
|
||||
7da6fe34168adc6e479327ba517796d4702fa2f8b4f0a9833f5ea6e6b48f6507a6da403a274fe201595edc86a84463d50383d07f64bdde2e3658108db7d6dc87 /etc/apt/trusted.gpg.d/proxmox-release-bookworm.gpg
|
||||
|
||||
and the md5sum, with the expected output below:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# md5sum /etc/apt/trusted.gpg.d/proxmox-release-bookworm.gpg
|
||||
41558dc019ef90bd0f6067644a51cf5b /etc/apt/trusted.gpg.d/proxmox-release-bookworm.gpg
|
||||
|
||||
.. _sysadmin_package_repos_enterprise:
|
||||
|
||||
`Proxmox Backup`_ Enterprise Repository
|
||||
@ -74,11 +79,14 @@ all Proxmox Backup subscription users. It contains the most stable packages,
|
||||
and is suitable for production use. The ``pbs-enterprise`` repository is
|
||||
enabled by default:
|
||||
|
||||
.. code-block:: sources.list
|
||||
:caption: File: ``/etc/apt/sources.list.d/pbs-enterprise.list``
|
||||
|
||||
deb https://enterprise.proxmox.com/debian/pbs bookworm pbs-enterprise
|
||||
.. code-block:: debian.sources
|
||||
:caption: File: ``/etc/apt/sources.list.d/pbs-enterprise.sources``
|
||||
|
||||
Types: deb
|
||||
URIs: https://enterprise.proxmox.com/debian/pbs
|
||||
Suites: trixie
|
||||
Components: pbs-enterprise
|
||||
Signed-By: /usr/share/keyrings/proxmox-archive-keyring.gpg
|
||||
|
||||
To never miss important security fixes, the superuser (``root@pam`` user) is
|
||||
notified via email about new packages as soon as they are available. The
|
||||
@ -88,11 +96,8 @@ Please note that you need a valid subscription key to access this
|
||||
repository. More information regarding subscription levels and pricing can be
|
||||
found at https://www.proxmox.com/en/proxmox-backup-server/pricing
|
||||
|
||||
.. note:: You can disable this repository by commenting out the above line
|
||||
using a `#` (at the start of the line). This prevents error messages if you do
|
||||
not have a subscription key. Please configure the ``pbs-no-subscription``
|
||||
repository in that case.
|
||||
|
||||
.. note:: You can disable this repository by adding the line ``Enabled: false``
|
||||
to the stanza.
|
||||
|
||||
`Proxmox Backup`_ No-Subscription Repository
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
@ -102,21 +107,17 @@ this repository. It can be used for testing and non-production
|
||||
use. It is not recommended to use it on production servers, because these
|
||||
packages are not always heavily tested and validated.
|
||||
|
||||
We recommend to configure this repository in ``/etc/apt/sources.list``.
|
||||
We recommend to configure this repository in
|
||||
``/etc/apt/sources.list.d/proxmox.sources``.
|
||||
|
||||
.. code-block:: sources.list
|
||||
:caption: File: ``/etc/apt/sources.list``
|
||||
|
||||
deb http://deb.debian.org/debian bookworm main contrib
|
||||
deb http://deb.debian.org/debian bookworm-updates main contrib
|
||||
|
||||
# Proxmox Backup Server pbs-no-subscription repository provided by proxmox.com,
|
||||
# NOT recommended for production use
|
||||
deb http://download.proxmox.com/debian/pbs bookworm pbs-no-subscription
|
||||
|
||||
# security updates
|
||||
deb http://security.debian.org/debian-security bookworm-security main contrib
|
||||
.. code-block:: debian.sources
|
||||
:caption: File: ``/etc/apt/sources.list.d/proxmox.sources``
|
||||
|
||||
Types: deb
|
||||
URIs: http://download.proxmox.com/debian/pbs
|
||||
Suites: trixie
|
||||
Components: pbs-no-subscription
|
||||
Signed-By: /usr/share/keyrings/proxmox-archive-keyring.gpg
|
||||
|
||||
`Proxmox Backup`_ Test Repository
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
@ -127,13 +128,17 @@ to test new features.
|
||||
.. .. warning:: the ``pbstest`` repository should (as the name implies)
|
||||
only be used to test new features or bug fixes.
|
||||
|
||||
You can access this repository by adding the following line to
|
||||
``/etc/apt/sources.list``:
|
||||
You can access this repository by adding the following stanza to
|
||||
``/etc/apt/sources.list.d/proxmox.sources``:
|
||||
|
||||
.. code-block:: sources.list
|
||||
.. code-block:: debian.sources
|
||||
:caption: sources.list entry for ``pbstest``
|
||||
|
||||
deb http://download.proxmox.com/debian/pbs bookworm pbstest
|
||||
Types: deb
|
||||
URIs: http://download.proxmox.com/debian/pbs
|
||||
Suites: trixie
|
||||
Components: pbs-test
|
||||
Signed-By: /usr/share/keyrings/proxmox-archive-keyring.gpg
|
||||
|
||||
.. _package_repositories_client_only:
|
||||
|
||||
@ -158,6 +163,24 @@ In order to configure this repository you need to first :ref:`setup the Proxmox
|
||||
release key <package_repos_secure_apt>`. After that, add the repository URL to
|
||||
the APT sources lists.
|
||||
|
||||
**Repositories for Debian 13 (Trixie) based releases**
|
||||
|
||||
This repository is tested with:
|
||||
|
||||
- Debian Trixie
|
||||
|
||||
Edit the file ``/etc/apt/sources.list.d/pbs-client.sources`` and add the following
|
||||
snippet
|
||||
|
||||
.. code-block:: debian.sources
|
||||
:caption: File: ``/etc/apt/sources.list.d/pbs``
|
||||
|
||||
Types: deb
|
||||
URIs: http://download.proxmox.com/debian/pbs-client
|
||||
Suites: trixie
|
||||
Components: main
|
||||
Signed-By: /usr/share/keyrings/proxmox-archive-keyring.gpg
|
||||
|
||||
**Repositories for Debian 12 (Bookworm) based releases**
|
||||
|
||||
This repository is tested with:
|
||||
@ -203,6 +226,50 @@ snippet
|
||||
|
||||
deb http://download.proxmox.com/debian/pbs-client buster main
|
||||
|
||||
.. _package_repos_secure_apt:
|
||||
|
||||
SecureApt
|
||||
~~~~~~~~~
|
||||
|
||||
The `Release` files in the repositories are signed with GnuPG. APT is using
|
||||
these signatures to verify that all packages are from a trusted source.
|
||||
|
||||
If you install Proxmox Backup Server from an official ISO image, the
|
||||
verification key is already installed.
|
||||
|
||||
If you install Proxmox Backup Server on top of Debian, download and install the
|
||||
key with the following commands:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# wget https://enterprise.proxmox.com/debian/proxmox-archive-keyring-trixie.gpg -O /usr/share/keyrings/proxmox-archive-keyring.gpg
|
||||
|
||||
.. note:: The `wget` command above adds the keyring for Proxmox releases based
|
||||
on Debian Trixie. Once the `proxmox-archive-keyring` package is installed,
|
||||
it will manage this file. At that point, the hashes below may no longer
|
||||
match the hashes of this file, as keys for new Proxmox releases get added or
|
||||
removed. This is intended, `apt` will ensure that only trusted keys are
|
||||
being used. **Modifying this file is discouraged once
|
||||
`proxmox-archive-keyring` is installed.**
|
||||
|
||||
Verify the SHA256 checksum afterwards with the expected output below:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# sha256sum /usr/share/keyrings/proxmox-archive-keyring.gpg
|
||||
136673be77aba35dcce385b28737689ad64fd785a797e57897589aed08db6e45 /usr/share/keyrings/proxmox-archive-keyring.gpg
|
||||
|
||||
and the md5sum, with the expected output below:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# md5sum /usr/share/keyrings/proxmox-archive-keyring.gpg
|
||||
77c8b1166d15ce8350102ab1bca2fcbf /usr/share/keyrings/proxmox-archive-keyring.gpg
|
||||
|
||||
.. note:: Make sure that the path that you download the key to, matches the
|
||||
path specified in the ``Signed-By:`` lines in your repository stanzas from
|
||||
above.
|
||||
|
||||
.. _node_options_http_proxy:
|
||||
|
||||
Repository Access Behind HTTP Proxy
|
||||
|
@ -2,7 +2,7 @@
|
||||
|
||||
|
||||
=======
|
||||
pbs2to3
|
||||
pbs3to4
|
||||
=======
|
||||
|
||||
Description
|
@ -354,12 +354,17 @@ Ext.onReady(function() {
|
||||
specValues.forEach(function(value) {
|
||||
if (value.includes('..')) {
|
||||
let [start, end] = value.split('..');
|
||||
let step = 1;
|
||||
if (end.includes('/')) {
|
||||
[end, step] = end.split('/');
|
||||
step = assertValid(step);
|
||||
}
|
||||
start = assertValid(start);
|
||||
end = assertValid(end);
|
||||
if (start > end) {
|
||||
throw "interval start is bigger then interval end '" + start + " > " + end + "'";
|
||||
}
|
||||
for (let i = start; i <= end; i++) {
|
||||
for (let i = start; i <= end; i += step) {
|
||||
matches[i] = 1;
|
||||
}
|
||||
} else if (value.includes('/')) {
|
||||
|
180
docs/storage.rst
180
docs/storage.rst
@ -233,6 +233,161 @@ datastore is not mounted when they are scheduled. Sync jobs start, but fail
|
||||
with an error saying the datastore was not mounted. The reason is that syncs
|
||||
not happening as scheduled should at least be noticeable.
|
||||
|
||||
.. _datastore_s3_backend:
|
||||
|
||||
Datastores with S3 Backend
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Proxmox Backup Server supports S3 compatible object stores as storage backend for datastores. For
|
||||
this, an S3 endpoint needs to be set-up under "Configuration" > "Remotes" > "S3 Endpoints".
|
||||
|
||||
.. important:: The S3 datastore backend is currently a technology preview.
|
||||
|
||||
.. important:: Keep in mind that operating as S3 backed object store might cause additional costs.
|
||||
Providers might charge you for storage space and API requests performed to the buckets, egress
|
||||
and bandwidth fees might be charged as well. Therefore, monitoring of these values and eventual
|
||||
costs is highly recommended.
|
||||
|
||||
In the endpoint configuration, provide the REST API endpoint for the object store. The endpoint
|
||||
is provider dependent and allows for the bucket and region templating. For example, configuring
|
||||
the endpoint as e.g. ``{{bucket}}.s3.{{region}}.amazonaws.com`` will be expanded to
|
||||
``my-pbs-bucket.s3.eu-central-1.amazonaws.com`` with a configured bucket of name ``my-pbs-bucket``
|
||||
located in region ``eu-central-1``.
|
||||
|
||||
The bucket name is part of the datastore backend configuration rather than the endpoint
|
||||
configuration, as the same endpoint might be reused for multiple bucket. Objects placed in the
|
||||
bucket are prefixed by the datastore name, therefore it is possible to create multiple datastores
|
||||
using the same bucket.
|
||||
|
||||
.. note:: Proxmox Backup Server does not handle bucket creation and access control. The bucket used
|
||||
to store the datastore's objects as well as the access key have to be setup beforehand in your S3
|
||||
provider interface. The Proxmox Backup Server acts as client and requires permissions to get, put
|
||||
list and delete objects in the bucket.
|
||||
|
||||
Most providers allow to access buckets either using a vhost style addressing, the bucket name being
|
||||
part of the endpoint address, or via path style addressing, the bucket name being the prefix to
|
||||
the path components of requests. Proxmox Backup Server supports both styles, favoring the vhost
|
||||
style urls over the path style. To use path style addresses, set the corresponding configuration
|
||||
flag.
|
||||
|
||||
Proxmox Backup Server does not support plain text communication with the S3 API, all communication
|
||||
is encrypted using HTTPS in transit. Therefore, for self-hosted S3 object stores using a self-signed
|
||||
certificate, the matching fingerprint has to be provided to the endpoint configuration. Otherwise
|
||||
the client refuses connections to the S3 object store.
|
||||
|
||||
The following example shows the setup of a new s3 endpoint configuration:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-manager s3 endpoint create my-s3-ep --access-key 'my-access-key' --secret-key 'my-secret-key' --endpoint '{{bucket}}.s3.{{region}}.amazonaws.com' --region eu-central-1
|
||||
|
||||
To list your s3 endpoint configuration, run:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-manager s3 endpoint list
|
||||
|
||||
A new datastore with S3 backend can be created using one of the configured S3 endpoints. Although
|
||||
storing all contents on the S3 object store, the datastore requires nevertheless a local cache store,
|
||||
used to increase performance and reduce the number of requests to the backend. For this, a local
|
||||
filesystem path has to be provided during datastore creation, just like for regular datastore setup.
|
||||
A minimum size of a few GiB of storage is recommended, given that cache datastore contents include
|
||||
also data chunks.
|
||||
|
||||
To setup a new datastore called ``my-s3-store`` placed in a bucket called ``pbs-s3-bucket``, run:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-manager datastore create my-s3-store /mnt/datastore/my-s3-store-cache --backend type=s3,client=my-s3-ep,bucket=pbs-s3-bucket
|
||||
|
||||
A datastore cannot be shared between multiple Proxmox Backup Server instances, only one instance can
|
||||
operate on the datastore at a time. However, datastore contents used on an instance which is no
|
||||
longer available can be reused on a fresh installation. To recreate the datastore, you must pass the
|
||||
``reuse-datastore`` and ``overwrite-in-use`` flags. Since the datastore name is used as prefix, the
|
||||
same datastore name must be used.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-manager datastore create my-s3-store /mnt/datastore/my-new-s3-store-cache --backend type=s3,client=my-s3-ep,bucket=pbs-s3-bucket --reuse-datastore true --overwrite-in-use true
|
||||
|
||||
.. note:: If your S3 object store runs out of space while performing write operation to it, most
|
||||
likely cleanup operations such as cleaning up of contents within a snapshot directory will fail
|
||||
as well. The recommended procedure is to cleanup any stray objects corresponding to this snapshot
|
||||
on the S3 object store manually and refresh the contents via an ``S3 refresh``, either via the
|
||||
CLI or UI.
|
||||
|
||||
|
||||
.. _datastore_s3_endpoint_examples:
|
||||
|
||||
S3 Datastore Backend Configuration Examples
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
The following shows example configurations for some typical S3 object store providers as excerpts
|
||||
(data relevant to S3 config only) from ``/etc/proxmox-backup/s3.cfg`` and
|
||||
``/etc/proxmox-backup/datastore.cfg``:
|
||||
|
||||
Self hosted S3 object store with Ceph Rados Gateway using plain IP address, custom port, self-signed
|
||||
certificate and path-style bucket:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# cat /etc/proxmox-backup/s3.cfg
|
||||
|
||||
s3-endpoint: ceph-s3-rados-gw
|
||||
access-key XXXXXXXXXXXXXXXXXXXX
|
||||
endpoint 172.16.0.200
|
||||
fingerprint XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX:XX
|
||||
path-style true
|
||||
port 7480
|
||||
secret-key XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
|
||||
|
||||
|
||||
# cat /etc/proxmox-backup/datastore.cfg
|
||||
|
||||
datastore: ceph-s3-rgw-store
|
||||
backend bucket=pbs-ceph-bucket,client=ceph-s3-rados-gw,type=s3
|
||||
path /mnt/datastore/ceph-s3-rgw-store-local-cache
|
||||
|
||||
AWS S3 with vhost style bucket addressing, using bucket name and region templating for the endpoint
|
||||
url:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# cat /etc/proxmox-backup/s3.cfg
|
||||
|
||||
s3-endpoint: aws-s3
|
||||
access-key XXXXXXXXXXXXXXXXXXXX
|
||||
endpoint {{bucket}}.s3.{{region}}.amazonaws.com
|
||||
region eu-central-1
|
||||
secret-key XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
|
||||
|
||||
# cat /etc/proxmox-backup/datastore.cfg
|
||||
|
||||
datastore: aws-s3-store
|
||||
backend bucket=pbs-s3-bucket,client=aws-s3,type=s3
|
||||
path /mnt/datastore/aws-s3-store-local-cache
|
||||
|
||||
Cloudflare R2 with path style bucket addressing, note that region must be set to ``auto`` as
|
||||
otherwise request authentication might fail:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# cat /etc/proxmox-backup/s3.cfg
|
||||
|
||||
s3-endpoint: cloudflare-r2
|
||||
access-key XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
|
||||
endpoint XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX.r2.cloudflarestorage.com
|
||||
path-style true
|
||||
region auto
|
||||
secret-key XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
|
||||
|
||||
# cat /etc/proxmox-backup/datastore.cfg
|
||||
|
||||
datastore: r2-s3-store
|
||||
backend bucket=pbs-r2-bucket,client=cloudflare-r2,type=s3
|
||||
path /mnt/datastore/r2-s3-store-local-cache
|
||||
|
||||
|
||||
Managing Datastores
|
||||
^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
@ -435,9 +590,30 @@ There are some tuning related options for the datastore that are more advanced:
|
||||
|
||||
This can be set with:
|
||||
|
||||
.. code-block:: console
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-manager datastore update <storename> --tuning 'sync-level=filesystem'
|
||||
# proxmox-backup-manager datastore update <storename> --tuning 'sync-level=filesystem'
|
||||
|
||||
* ``gc-atime-safety-check``: Datastore GC atime update safety check:
|
||||
You can explicitly `enable` or `disable` the atime update safety check
|
||||
performed on datastore creation and garbage collection. This checks if atime
|
||||
updates are handled as expected by garbage collection and therefore avoids the
|
||||
risk of data loss by unexpected filesystem behavior. It is recommended to set
|
||||
this to enabled, which is also the default value.
|
||||
|
||||
* ``gc-atime-cutoff``: Datastore GC atime cutoff for chunk cleanup:
|
||||
This allows to set the cutoff for which a chunk is still considered in-use
|
||||
during phase 2 of garbage collection (given no older writers). If the
|
||||
``atime`` of the chunk is outside the range, it will be removed.
|
||||
|
||||
* ``gc-cache-capacity``: Datastore GC least recently used cache capacity:
|
||||
Allows to control the cache capacity used to keep track of chunks for which
|
||||
the access time has already been updated during phase 1 of garbage collection.
|
||||
This avoids multiple updates and increases GC runtime performance. Higher
|
||||
values can reduce GC runtime at the cost of increase memory usage, setting the
|
||||
value to 0 disables caching. The given value sets the number of available
|
||||
cache slots, 1048576 (= 1024 * 1024) being the default, 8388608 (= 8192 *
|
||||
1024) the maximum value.
|
||||
|
||||
If you want to set multiple tuning options simultaneously, you can separate them
|
||||
with a comma, like this:
|
||||
|
@ -61,6 +61,7 @@ In general, LTO tapes offer the following advantages:
|
||||
Note that `Proxmox Backup Server` already stores compressed data, so using the
|
||||
tape compression feature has no advantage.
|
||||
|
||||
.. _tape-supported-hardware:
|
||||
|
||||
Supported Hardware
|
||||
------------------
|
||||
@ -969,6 +970,8 @@ You can restore from a tape even without an existing catalog, but only the
|
||||
whole media set. If you do this, the catalog will be automatically created.
|
||||
|
||||
|
||||
.. _tape_key_management:
|
||||
|
||||
Encryption Key Management
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
@ -1180,3 +1183,159 @@ In combination with fitting prune settings and tape backup schedules, this
|
||||
achieves long-term storage of some backups, while keeping the recent
|
||||
backups on smaller media sets that expire roughly every 4 weeks (that is, three
|
||||
plus the current week).
|
||||
|
||||
|
||||
Disaster Recovery
|
||||
-----------------
|
||||
|
||||
.. _Command-line Tools: command-line-tools.html
|
||||
|
||||
In case of major disasters, important data, or even whole servers might be
|
||||
destroyed or at least damaged up to the point where everything - sometimes
|
||||
including the backup server - has to be restored from a backup. For such cases,
|
||||
the following step-by-step guide will help you to set up the Proxmox Backup
|
||||
Server and restore everything from tape backups.
|
||||
|
||||
The following guide will explain the necessary steps using both the web GUI and
|
||||
the command line tools. For an overview of the command line tools, see
|
||||
`Command-line Tools`_.
|
||||
|
||||
|
||||
Setting Up a Datastore
|
||||
~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. _proxmox-backup-manager: proxmox-backup-manager/man1.html
|
||||
|
||||
.. _Installation: installation.html
|
||||
|
||||
After you set up a new Proxmox Backup Server, as outlined in the `Installation`_
|
||||
chapter, first set up a datastore so a tape can be restored to it:
|
||||
|
||||
#. Go to **Administration -> Storage / Disks** and make sure that the disk that
|
||||
will be used as a datastore shows up.
|
||||
|
||||
#. Under the **Directory** or **ZFS** tabs, you can either choose to create a
|
||||
directory or create a ZFS ``zpool``, respectively. Here you can also directly
|
||||
add the newly created directory or ZFS ``zpool`` as a datastore.
|
||||
|
||||
Alternatively, the `proxmox-backup-manager`_ can be used to perform the same
|
||||
tasks. For more information, check the :ref:`datastore_intro` documentation.
|
||||
|
||||
|
||||
Setting Up the Tape Drive
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
#. Make sure you have a properly working tape drive and/or changer matching to
|
||||
medium you want to restore from.
|
||||
|
||||
#. Connect the tape changer(s) and the tape drive(s) to the backup server. These
|
||||
should be detected automatically by Linux. You can get a list of available
|
||||
drives using:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-tape drive scan
|
||||
┌────────────────────────────────┬────────┬─────────────┬────────┐
|
||||
│ path │ vendor │ model │ serial │
|
||||
╞════════════════════════════════╪════════╪═════════════╪════════╡
|
||||
│ /dev/tape/by-id/scsi-12345-sg │ IBM │ ULT3580-TD4 │ 12345 │
|
||||
└────────────────────────────────┴────────┴─────────────┴────────┘
|
||||
|
||||
You can get a list of available changers with:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-tape changer scan
|
||||
┌─────────────────────────────┬─────────┬──────────────┬────────┐
|
||||
│ path │ vendor │ model │ serial │
|
||||
╞═════════════════════════════╪═════════╪══════════════╪════════╡
|
||||
│ /dev/tape/by-id/scsi-CC2C52 │ Quantum │ Superloader3 │ CC2C52 │
|
||||
└─────────────────────────────┴─────────┴──────────────┴────────┘
|
||||
|
||||
For more information, please read the chapters
|
||||
on :ref:`tape_changer_config` and :ref:`tape_drive_config`.
|
||||
|
||||
#. If you have a tape changer, go to the web interface of the Proxmox Backup
|
||||
Server, go to **Tape Backup -> Changers** and add it. For examples using the
|
||||
command line, read the chapter on :ref:`tape_changer_config`. If the changer
|
||||
has been detected correctly by Linux, the changer should show up in the list.
|
||||
|
||||
#. In the web interface, go to **Tape Backup -> Drives** and add the tape drive
|
||||
that will be used to read the tapes. For examples using the command line,
|
||||
read the chapter on :ref:`tape_drive_config`. If the tape drive has been
|
||||
detected correctly by Linux, the drive should show up in the list. If the
|
||||
drive also has a tape changer, make sure to select the changer as well and
|
||||
assign it the correct drive number.
|
||||
|
||||
|
||||
Restoring Data From the Tape
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. _proxmox-tape: proxmox-tape/man1.html
|
||||
|
||||
.. _proxmox-backup-client: proxmox-backup-client/man1.html
|
||||
|
||||
.. _Restore: https://pve.proxmox.com/pve-docs/chapter-vzdump.html#vzdump_restore
|
||||
|
||||
The following guide will explain the steps necessary to restore data from a
|
||||
tape, which can be done over either the web GUI or the command line. For details
|
||||
on the command line, read the documentation on the `proxmox-tape`_ tool.
|
||||
|
||||
To restore data from tapes, do the following:
|
||||
|
||||
#. Insert the first tape (as displayed on the label) into the tape drive or, if
|
||||
a tape changer is available, use the tape changer to insert the tape into the
|
||||
right drive. The web GUI can also be used to load or transfer tapes between
|
||||
tape drives by selecting the changer.
|
||||
|
||||
#. If the backup has been encrypted, the encryption keys need to be restored as
|
||||
well. In the **Encryption Keys** tab, press **Restore Key**. For more
|
||||
details or examples that use the command line, read the
|
||||
:ref:`tape_key_management` chapter.
|
||||
|
||||
#. The procedure for restoring data is slightly different depending on whether
|
||||
you are using a standalone tape drive or a changer:
|
||||
|
||||
* For changers, the procedure is simple:
|
||||
|
||||
#. Insert all tapes from the media set you want to restore from.
|
||||
|
||||
#. Click on the changer in the web GUI, click **Inventory**, make sure
|
||||
**Restore Catalog** is selected and press OK.
|
||||
|
||||
* For standalone drives, the procedure would be:
|
||||
|
||||
#. Insert the first tape of the media set.
|
||||
|
||||
#. Click **Catalog**.
|
||||
|
||||
#. Eject the tape, then repeat the steps for the remaining tapes of the
|
||||
media set.
|
||||
|
||||
#. Go back to **Tape Backup**. In the **Content** tab, press **Restore** and
|
||||
select the desired media set. Choose the snapshot you want to restore, press
|
||||
**Next**, select the drive and target datastore and press **Restore**.
|
||||
|
||||
#. By going to the datastore where the data has been restored, under the
|
||||
**Content** tab you should be able to see the restored snapshots. In order to
|
||||
access the backups from another machine, you will need to configure the
|
||||
access to the backup server. Go to **Configuration -> Access Control** and
|
||||
either create a new user, or a new API token (API tokens allow easy
|
||||
revocation if the token is compromised). Under **Permissions**, add the
|
||||
desired permissions, e.g. **DatastoreBackup**.
|
||||
|
||||
#. You can now perform virtual machine, container or file restores. You now have
|
||||
the following options:
|
||||
|
||||
* If you want to restore files on Linux distributions that are not based on
|
||||
Proxmox products or you prefer using a command line tool, you can use the
|
||||
`proxmox-backup-client`_, as explained in the
|
||||
:ref:`client_restoring_data` chapter. Use the newly created API token to
|
||||
be able to access the data. You can then restore individual files or
|
||||
mount an archive to your system.
|
||||
|
||||
* If you want to restore virtual machines or containers on a Proxmox VE
|
||||
server, add the datastore of the backup server as storage and go to
|
||||
**Backups**. Here you can restore VMs and containers, including their
|
||||
configuration. For more information on restoring backups in Proxmox VE,
|
||||
visit the `Restore`_ chapter of the Proxmox VE documentation.
|
||||
|
@ -298,8 +298,8 @@ will see that the probability of a collision in that scenario is:
|
||||
|
||||
For context, in a lottery game of guessing 6 numbers out of 45, the chance to
|
||||
correctly guess all 6 numbers is only :math:`1.2277 * 10^{-7}`. This means the
|
||||
chance of a collision is about the same as winning 13 such lottery games *in a
|
||||
row*.
|
||||
chance of a collision is lower than winning 8 such lottery games *in a row*:
|
||||
:math:`(1.2277 * 10^{-7})^{8} = 5.1623 * 10^{-56}`.
|
||||
|
||||
In conclusion, it is extremely unlikely that such a collision would occur by
|
||||
accident in a normal datastore.
|
||||
|
@ -19,11 +19,12 @@ certain hosts.
|
||||
|
||||
You can manage the traffic controls either via the web-interface or using the
|
||||
``traffic-control`` commands of the ``proxmox-backup-manager`` command-line
|
||||
tool.
|
||||
tool. Traffic is limited by rate (``rate-in`` and ``rate-out``) and allows for
|
||||
short bursts by setting the token bucket size (``burst-in`` and ``burst-out``).
|
||||
|
||||
.. note:: Sync jobs on the server are not affected by the configured rate-in limits.
|
||||
If you want to limit the incoming traffic that a pull-based sync job
|
||||
generates, you need to setup a job-specific rate-in limit. See
|
||||
.. note:: Sync jobs on the server are not affected by the configured rate limits.
|
||||
If you want to limit the incoming traffic of pull-based or outgoing traffic
|
||||
of push-based sync job, you need to setup a job-specific rate-in limit. See
|
||||
:ref:`syncjobs`.
|
||||
|
||||
The following command adds a traffic control rule to limit all IPv4 clients
|
||||
|
@ -16,8 +16,8 @@ User Configuration
|
||||
choose the realm when you add a new user. Possible realms are:
|
||||
|
||||
:pam: Linux PAM standard authentication. Use this if you want to
|
||||
authenticate as a Linux system user (users need to exist on the
|
||||
system).
|
||||
authenticate as a Linux system user. The users needs to already exist on
|
||||
the host system.
|
||||
|
||||
:pbs: Proxmox Backup Server realm. This type stores hashed passwords in
|
||||
``/etc/proxmox-backup/shadow.json``.
|
||||
@ -599,6 +599,32 @@ list view in the web UI, or using the command line:
|
||||
Authentication Realms
|
||||
---------------------
|
||||
|
||||
.. _user_realms_pam:
|
||||
|
||||
Linux PAM
|
||||
~~~~~~~~~
|
||||
|
||||
Linux PAM is a framework for system-wide user authentication. These users are
|
||||
created on the host system with commands such as ``adduser``.
|
||||
|
||||
If PAM users exist on the host system, corresponding entries can be added to
|
||||
Proxmox Backup Server, to allow these users to log in via their system username
|
||||
and password.
|
||||
|
||||
.. _user_realms_pbs:
|
||||
|
||||
Proxmox Backup authentication server
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
This is a Unix-like password store, which stores hashed passwords in
|
||||
``/etc/proxmox-backup/shadow.json``. Passwords are hashed using the SHA-256
|
||||
hashing algorithm.
|
||||
|
||||
This is the most convenient realm for small-scale (or even mid-scale)
|
||||
installations, where users do not need access to anything outside of Proxmox
|
||||
Backup Server. In this case, users are fully managed by Proxmox Backup Server
|
||||
and are able to change their own passwords via the GUI.
|
||||
|
||||
.. _user_realms_ldap:
|
||||
|
||||
LDAP
|
||||
|
@ -152,7 +152,7 @@ not commonly used in your country.
|
||||
:alt: Proxmox Backup Server Installer - Password and email configuration
|
||||
|
||||
Next the password of the superuser (``root``) and an email address needs to be
|
||||
specified. The password must consist of at least 5 characters. It's highly
|
||||
specified. The password must consist of at least 8 characters. It's highly
|
||||
recommended to use a stronger password. Some guidelines are:
|
||||
|
||||
|
|
||||
|
@ -10,7 +10,7 @@ DYNAMIC_UNITS := \
|
||||
proxmox-backup.service \
|
||||
proxmox-backup-proxy.service
|
||||
|
||||
all: $(UNITS) $(DYNAMIC_UNITS) pbs-enterprise.list
|
||||
all: $(UNITS) $(DYNAMIC_UNITS) pbs-enterprise.sources
|
||||
|
||||
clean:
|
||||
rm -f $(DYNAMIC_UNITS)
|
||||
|
@ -1 +0,0 @@
|
||||
deb https://enterprise.proxmox.com/debian/pbs bookworm pbs-enterprise
|
5
etc/pbs-enterprise.sources
Normal file
5
etc/pbs-enterprise.sources
Normal file
@ -0,0 +1,5 @@
|
||||
Types: deb
|
||||
URIs: https://enterprise.proxmox.com/debian/pbs
|
||||
Suites: trixie
|
||||
Components: pbs-enterprise
|
||||
Signed-By: /usr/share/keyrings/proxmox-archive-keyring.gpg
|
@ -1,4 +1,5 @@
|
||||
use anyhow::Error;
|
||||
use rustyline::history::MemHistory;
|
||||
|
||||
use proxmox_router::cli::*;
|
||||
use proxmox_schema::*;
|
||||
@ -71,7 +72,10 @@ fn cli_definition() -> CommandLineInterface {
|
||||
fn main() -> Result<(), Error> {
|
||||
let helper = CliHelper::new(cli_definition());
|
||||
|
||||
let mut rl = rustyline::Editor::<CliHelper>::new();
|
||||
let mut rl = rustyline::Editor::<CliHelper, MemHistory>::with_history(
|
||||
Default::default(),
|
||||
MemHistory::new(),
|
||||
)?;
|
||||
rl.set_helper(Some(helper));
|
||||
|
||||
while let Ok(line) = rl.readline("# prompt: ") {
|
||||
@ -82,7 +86,7 @@ fn main() -> Result<(), Error> {
|
||||
let rpcenv = CliEnvironment::new();
|
||||
let _ = handle_command(helper.cmd_def(), "", args, rpcenv, None);
|
||||
|
||||
rl.add_history_entry(line);
|
||||
rl.add_history_entry(line)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
|
@ -1,109 +0,0 @@
|
||||
use std::future::Future;
|
||||
use std::pin::Pin;
|
||||
use std::task::{Context, Poll};
|
||||
|
||||
use anyhow::Error;
|
||||
use futures::future::TryFutureExt;
|
||||
use futures::stream::Stream;
|
||||
use tokio::net::TcpStream;
|
||||
|
||||
// Simple H2 client to test H2 download speed using h2server.rs
|
||||
|
||||
struct Process {
|
||||
body: h2::RecvStream,
|
||||
trailers: bool,
|
||||
bytes: usize,
|
||||
}
|
||||
|
||||
impl Future for Process {
|
||||
type Output = Result<usize, Error>;
|
||||
|
||||
fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> {
|
||||
let this = self.get_mut();
|
||||
|
||||
loop {
|
||||
if this.trailers {
|
||||
match futures::ready!(this.body.poll_trailers(cx)) {
|
||||
Ok(Some(trailers)) => println!("trailers: {:?}", trailers),
|
||||
Ok(None) => (),
|
||||
Err(err) => return Poll::Ready(Err(Error::from(err))),
|
||||
}
|
||||
|
||||
println!("Received {} bytes", this.bytes);
|
||||
|
||||
return Poll::Ready(Ok(this.bytes));
|
||||
} else {
|
||||
match futures::ready!(Pin::new(&mut this.body).poll_next(cx)) {
|
||||
Some(Ok(chunk)) => {
|
||||
this.body.flow_control().release_capacity(chunk.len())?;
|
||||
this.bytes += chunk.len();
|
||||
// println!("GOT FRAME {}", chunk.len());
|
||||
}
|
||||
Some(Err(err)) => return Poll::Ready(Err(Error::from(err))),
|
||||
None => {
|
||||
this.trailers = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn send_request(
|
||||
mut client: h2::client::SendRequest<bytes::Bytes>,
|
||||
) -> impl Future<Output = Result<usize, Error>> {
|
||||
println!("sending request");
|
||||
|
||||
let request = hyper::http::Request::builder()
|
||||
.uri("http://localhost/")
|
||||
.body(())
|
||||
.unwrap();
|
||||
|
||||
let (response, _stream) = client.send_request(request, true).unwrap();
|
||||
|
||||
response.map_err(Error::from).and_then(|response| Process {
|
||||
body: response.into_body(),
|
||||
trailers: false,
|
||||
bytes: 0,
|
||||
})
|
||||
}
|
||||
|
||||
fn main() -> Result<(), Error> {
|
||||
proxmox_async::runtime::main(run())
|
||||
}
|
||||
|
||||
async fn run() -> Result<(), Error> {
|
||||
let start = std::time::SystemTime::now();
|
||||
|
||||
let conn = TcpStream::connect(std::net::SocketAddr::from(([127, 0, 0, 1], 8008))).await?;
|
||||
conn.set_nodelay(true).unwrap();
|
||||
|
||||
let (client, h2) = h2::client::Builder::new()
|
||||
.initial_connection_window_size(1024 * 1024 * 1024)
|
||||
.initial_window_size(1024 * 1024 * 1024)
|
||||
.max_frame_size(4 * 1024 * 1024)
|
||||
.handshake(conn)
|
||||
.await?;
|
||||
|
||||
tokio::spawn(async move {
|
||||
if let Err(err) = h2.await {
|
||||
println!("GOT ERR={:?}", err);
|
||||
}
|
||||
});
|
||||
|
||||
let mut bytes = 0;
|
||||
for _ in 0..2000 {
|
||||
bytes += send_request(client.clone()).await?;
|
||||
}
|
||||
|
||||
let elapsed = start.elapsed().unwrap();
|
||||
let elapsed = (elapsed.as_secs() as f64) + (elapsed.subsec_millis() as f64) / 1000.0;
|
||||
|
||||
println!(
|
||||
"Downloaded {} bytes, {} MB/s",
|
||||
bytes,
|
||||
(bytes as f64) / (elapsed * 1024.0 * 1024.0)
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
@ -1,125 +0,0 @@
|
||||
use std::future::Future;
|
||||
use std::pin::Pin;
|
||||
use std::task::{Context, Poll};
|
||||
|
||||
use anyhow::{format_err, Error};
|
||||
use futures::future::TryFutureExt;
|
||||
use futures::stream::Stream;
|
||||
use tokio::net::TcpStream;
|
||||
|
||||
// Simple H2 client to test H2 download speed using h2s-server.rs
|
||||
|
||||
struct Process {
|
||||
body: h2::RecvStream,
|
||||
trailers: bool,
|
||||
bytes: usize,
|
||||
}
|
||||
|
||||
impl Future for Process {
|
||||
type Output = Result<usize, Error>;
|
||||
|
||||
fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> {
|
||||
let this = self.get_mut();
|
||||
|
||||
loop {
|
||||
if this.trailers {
|
||||
match futures::ready!(this.body.poll_trailers(cx)) {
|
||||
Ok(Some(trailers)) => println!("trailers: {:?}", trailers),
|
||||
Ok(None) => (),
|
||||
Err(err) => return Poll::Ready(Err(Error::from(err))),
|
||||
}
|
||||
|
||||
println!("Received {} bytes", this.bytes);
|
||||
|
||||
return Poll::Ready(Ok(this.bytes));
|
||||
} else {
|
||||
match futures::ready!(Pin::new(&mut this.body).poll_next(cx)) {
|
||||
Some(Ok(chunk)) => {
|
||||
this.body.flow_control().release_capacity(chunk.len())?;
|
||||
this.bytes += chunk.len();
|
||||
// println!("GOT FRAME {}", chunk.len());
|
||||
}
|
||||
Some(Err(err)) => return Poll::Ready(Err(Error::from(err))),
|
||||
None => {
|
||||
this.trailers = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn send_request(
|
||||
mut client: h2::client::SendRequest<bytes::Bytes>,
|
||||
) -> impl Future<Output = Result<usize, Error>> {
|
||||
println!("sending request");
|
||||
|
||||
let request = hyper::http::Request::builder()
|
||||
.uri("http://localhost/")
|
||||
.body(())
|
||||
.unwrap();
|
||||
|
||||
let (response, _stream) = client.send_request(request, true).unwrap();
|
||||
|
||||
response.map_err(Error::from).and_then(|response| Process {
|
||||
body: response.into_body(),
|
||||
trailers: false,
|
||||
bytes: 0,
|
||||
})
|
||||
}
|
||||
|
||||
fn main() -> Result<(), Error> {
|
||||
proxmox_async::runtime::main(run())
|
||||
}
|
||||
|
||||
async fn run() -> Result<(), Error> {
|
||||
let start = std::time::SystemTime::now();
|
||||
|
||||
let conn = TcpStream::connect(std::net::SocketAddr::from(([127, 0, 0, 1], 8008))).await?;
|
||||
conn.set_nodelay(true).unwrap();
|
||||
|
||||
use openssl::ssl::{SslConnector, SslMethod};
|
||||
|
||||
let mut ssl_connector_builder = SslConnector::builder(SslMethod::tls()).unwrap();
|
||||
ssl_connector_builder.set_verify(openssl::ssl::SslVerifyMode::NONE);
|
||||
let ssl = ssl_connector_builder
|
||||
.build()
|
||||
.configure()?
|
||||
.into_ssl("localhost")?;
|
||||
|
||||
let conn = tokio_openssl::SslStream::new(ssl, conn)?;
|
||||
let mut conn = Box::pin(conn);
|
||||
conn.as_mut()
|
||||
.connect()
|
||||
.await
|
||||
.map_err(|err| format_err!("connect failed - {}", err))?;
|
||||
|
||||
let (client, h2) = h2::client::Builder::new()
|
||||
.initial_connection_window_size(1024 * 1024 * 1024)
|
||||
.initial_window_size(1024 * 1024 * 1024)
|
||||
.max_frame_size(4 * 1024 * 1024)
|
||||
.handshake(conn)
|
||||
.await?;
|
||||
|
||||
tokio::spawn(async move {
|
||||
if let Err(err) = h2.await {
|
||||
println!("GOT ERR={:?}", err);
|
||||
}
|
||||
});
|
||||
|
||||
let mut bytes = 0;
|
||||
for _ in 0..2000 {
|
||||
bytes += send_request(client.clone()).await?;
|
||||
}
|
||||
|
||||
let elapsed = start.elapsed().unwrap();
|
||||
let elapsed = (elapsed.as_secs() as f64) + (elapsed.subsec_millis() as f64) / 1000.0;
|
||||
|
||||
println!(
|
||||
"Downloaded {} bytes, {} MB/s",
|
||||
bytes,
|
||||
(bytes as f64) / (elapsed * 1024.0 * 1024.0)
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
@ -1,82 +0,0 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use anyhow::{format_err, Error};
|
||||
use futures::*;
|
||||
use hyper::{Body, Request, Response};
|
||||
use openssl::ssl::{SslAcceptor, SslFiletype, SslMethod};
|
||||
use tokio::net::{TcpListener, TcpStream};
|
||||
|
||||
use pbs_buildcfg::configdir;
|
||||
|
||||
fn main() -> Result<(), Error> {
|
||||
proxmox_async::runtime::main(run())
|
||||
}
|
||||
|
||||
async fn run() -> Result<(), Error> {
|
||||
let key_path = configdir!("/proxy.key");
|
||||
let cert_path = configdir!("/proxy.pem");
|
||||
|
||||
let mut acceptor = SslAcceptor::mozilla_intermediate(SslMethod::tls()).unwrap();
|
||||
acceptor
|
||||
.set_private_key_file(key_path, SslFiletype::PEM)
|
||||
.map_err(|err| format_err!("unable to read proxy key {} - {}", key_path, err))?;
|
||||
acceptor
|
||||
.set_certificate_chain_file(cert_path)
|
||||
.map_err(|err| format_err!("unable to read proxy cert {} - {}", cert_path, err))?;
|
||||
acceptor.check_private_key().unwrap();
|
||||
|
||||
let acceptor = Arc::new(acceptor.build());
|
||||
|
||||
let listener = TcpListener::bind(std::net::SocketAddr::from(([127, 0, 0, 1], 8008))).await?;
|
||||
|
||||
println!("listening on {:?}", listener.local_addr());
|
||||
|
||||
loop {
|
||||
let (socket, _addr) = listener.accept().await?;
|
||||
tokio::spawn(handle_connection(socket, Arc::clone(&acceptor)).map(|res| {
|
||||
if let Err(err) = res {
|
||||
eprintln!("Error: {}", err);
|
||||
}
|
||||
}));
|
||||
}
|
||||
}
|
||||
|
||||
async fn handle_connection(socket: TcpStream, acceptor: Arc<SslAcceptor>) -> Result<(), Error> {
|
||||
socket.set_nodelay(true).unwrap();
|
||||
|
||||
let ssl = openssl::ssl::Ssl::new(acceptor.context())?;
|
||||
let stream = tokio_openssl::SslStream::new(ssl, socket)?;
|
||||
let mut stream = Box::pin(stream);
|
||||
|
||||
stream.as_mut().accept().await?;
|
||||
|
||||
let mut http = hyper::server::conn::Http::new();
|
||||
http.http2_only(true);
|
||||
// increase window size: todo - find optiomal size
|
||||
let max_window_size = (1 << 31) - 2;
|
||||
http.http2_initial_stream_window_size(max_window_size);
|
||||
http.http2_initial_connection_window_size(max_window_size);
|
||||
|
||||
let service = hyper::service::service_fn(|_req: Request<Body>| {
|
||||
println!("Got request");
|
||||
let buffer = vec![65u8; 4 * 1024 * 1024]; // nonsense [A,A,A,A...]
|
||||
let body = Body::from(buffer);
|
||||
|
||||
let response = Response::builder()
|
||||
.status(hyper::http::StatusCode::OK)
|
||||
.header(
|
||||
hyper::http::header::CONTENT_TYPE,
|
||||
"application/octet-stream",
|
||||
)
|
||||
.body(body)
|
||||
.unwrap();
|
||||
future::ok::<_, Error>(response)
|
||||
});
|
||||
|
||||
http.serve_connection(stream, service)
|
||||
.map_err(Error::from)
|
||||
.await?;
|
||||
|
||||
println!("H2 connection CLOSE !");
|
||||
Ok(())
|
||||
}
|
@ -1,58 +0,0 @@
|
||||
use anyhow::Error;
|
||||
use futures::*;
|
||||
use hyper::{Body, Request, Response};
|
||||
|
||||
use tokio::net::{TcpListener, TcpStream};
|
||||
|
||||
fn main() -> Result<(), Error> {
|
||||
proxmox_async::runtime::main(run())
|
||||
}
|
||||
|
||||
async fn run() -> Result<(), Error> {
|
||||
let listener = TcpListener::bind(std::net::SocketAddr::from(([127, 0, 0, 1], 8008))).await?;
|
||||
|
||||
println!("listening on {:?}", listener.local_addr());
|
||||
|
||||
loop {
|
||||
let (socket, _addr) = listener.accept().await?;
|
||||
tokio::spawn(handle_connection(socket).map(|res| {
|
||||
if let Err(err) = res {
|
||||
eprintln!("Error: {}", err);
|
||||
}
|
||||
}));
|
||||
}
|
||||
}
|
||||
|
||||
async fn handle_connection(socket: TcpStream) -> Result<(), Error> {
|
||||
socket.set_nodelay(true).unwrap();
|
||||
|
||||
let mut http = hyper::server::conn::Http::new();
|
||||
http.http2_only(true);
|
||||
// increase window size: todo - find optiomal size
|
||||
let max_window_size = (1 << 31) - 2;
|
||||
http.http2_initial_stream_window_size(max_window_size);
|
||||
http.http2_initial_connection_window_size(max_window_size);
|
||||
|
||||
let service = hyper::service::service_fn(|_req: Request<Body>| {
|
||||
println!("Got request");
|
||||
let buffer = vec![65u8; 4 * 1024 * 1024]; // nonsense [A,A,A,A...]
|
||||
let body = Body::from(buffer);
|
||||
|
||||
let response = Response::builder()
|
||||
.status(hyper::http::StatusCode::OK)
|
||||
.header(
|
||||
hyper::http::header::CONTENT_TYPE,
|
||||
"application/octet-stream",
|
||||
)
|
||||
.body(body)
|
||||
.unwrap();
|
||||
future::ok::<_, Error>(response)
|
||||
});
|
||||
|
||||
http.serve_connection(socket, service)
|
||||
.map_err(Error::from)
|
||||
.await?;
|
||||
|
||||
println!("H2 connection CLOSE !");
|
||||
Ok(())
|
||||
}
|
@ -1,7 +1,7 @@
|
||||
use anyhow::Error;
|
||||
|
||||
use pbs_api_types::{Authid, BackupNamespace, BackupType};
|
||||
use pbs_client::{BackupWriter, HttpClient, HttpClientOptions};
|
||||
use pbs_client::{BackupWriter, BackupWriterOptions, HttpClient, HttpClientOptions};
|
||||
|
||||
async fn upload_speed() -> Result<f64, Error> {
|
||||
let host = "localhost";
|
||||
@ -19,12 +19,15 @@ async fn upload_speed() -> Result<f64, Error> {
|
||||
|
||||
let client = BackupWriter::start(
|
||||
&client,
|
||||
None,
|
||||
datastore,
|
||||
&BackupNamespace::root(),
|
||||
&(BackupType::Host, "speedtest".to_string(), backup_time).into(),
|
||||
false,
|
||||
true,
|
||||
BackupWriterOptions {
|
||||
datastore,
|
||||
ns: &BackupNamespace::root(),
|
||||
backup: &(BackupType::Host, "speedtest".to_string(), backup_time).into(),
|
||||
crypt_config: None,
|
||||
debug: false,
|
||||
benchmark: true,
|
||||
no_cache: false,
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
|
||||
|
@ -12,6 +12,8 @@ bytes.workspace = true
|
||||
futures.workspace = true
|
||||
h2.workspace = true
|
||||
hex.workspace = true
|
||||
http-body-util.workspace = true
|
||||
hyper-util = { workspace = true, features = ["client", "client-legacy", "http1", "http2", "tokio" ]}
|
||||
hyper.workspace = true
|
||||
libc.workspace = true
|
||||
nix.workspace = true
|
||||
@ -27,13 +29,14 @@ tokio = { workspace = true, features = [ "fs", "signal" ] }
|
||||
tokio-stream.workspace = true
|
||||
tower-service.workspace = true
|
||||
xdg.workspace = true
|
||||
hickory-resolver.workspace = true
|
||||
|
||||
pathpatterns.workspace = true
|
||||
|
||||
proxmox-async.workspace = true
|
||||
proxmox-auth-api.workspace = true
|
||||
proxmox-compression.workspace = true
|
||||
proxmox-http = { workspace = true, features = [ "rate-limiter" ] }
|
||||
proxmox-http = { workspace = true, features = [ "body", "rate-limiter" ] }
|
||||
proxmox-human-byte.workspace = true
|
||||
proxmox-io = { workspace = true, features = [ "tokio" ] }
|
||||
proxmox-log = { workspace = true }
|
||||
|
@ -7,10 +7,13 @@ const_regex! {
|
||||
BACKUPSPEC_REGEX = r"^([a-zA-Z0-9_-]+\.(pxar|img|conf|log)):(.+)$";
|
||||
}
|
||||
|
||||
pub const BACKUP_SOURCE_SCHEMA: Schema =
|
||||
StringSchema::new("Backup source specification ([<label>:<path>]).")
|
||||
.format(&ApiStringFormat::Pattern(&BACKUPSPEC_REGEX))
|
||||
.schema();
|
||||
pub const BACKUP_SOURCE_SCHEMA: Schema = StringSchema::new(
|
||||
"Backup source specification ([<archive-name>.<type>:<source-path>]), the \
|
||||
'archive-name' must contain alphanumerics, hyphens and underscores only. \
|
||||
The 'type' must be either 'pxar', 'img', 'conf' or 'log'.",
|
||||
)
|
||||
.format(&ApiStringFormat::Pattern(&BACKUPSPEC_REGEX))
|
||||
.schema();
|
||||
|
||||
pub enum BackupSpecificationType {
|
||||
PXAR,
|
||||
@ -35,7 +38,7 @@ pub fn parse_backup_specification(value: &str) -> Result<BackupSpecification, Er
|
||||
"img" => BackupSpecificationType::IMAGE,
|
||||
"conf" => BackupSpecificationType::CONFIG,
|
||||
"log" => BackupSpecificationType::LOGFILE,
|
||||
_ => bail!("unknown backup source type '{}'", extension),
|
||||
_ => bail!("unknown backup source type '{extension}'"),
|
||||
};
|
||||
return Ok(BackupSpecification {
|
||||
archive_name,
|
||||
@ -44,7 +47,7 @@ pub fn parse_backup_specification(value: &str) -> Result<BackupSpecification, Er
|
||||
});
|
||||
}
|
||||
|
||||
bail!("unable to parse backup source specification '{}'", value);
|
||||
bail!("unable to parse backup source specification '{value}'");
|
||||
}
|
||||
|
||||
#[api]
|
||||
|
@ -63,6 +63,24 @@ struct ChunkUploadResponse {
|
||||
type UploadQueueSender = mpsc::Sender<(MergedChunkInfo, Option<ChunkUploadResponse>)>;
|
||||
type UploadResultReceiver = oneshot::Receiver<Result<(), Error>>;
|
||||
|
||||
/// Additional configuration options for BackupWriter instance
|
||||
pub struct BackupWriterOptions<'a> {
|
||||
/// Target datastore
|
||||
pub datastore: &'a str,
|
||||
/// Target namespace
|
||||
pub ns: &'a BackupNamespace,
|
||||
/// Target snapshot
|
||||
pub backup: &'a BackupDir,
|
||||
/// Crypto configuration
|
||||
pub crypt_config: Option<Arc<CryptConfig>>,
|
||||
/// Run in debug mode
|
||||
pub debug: bool,
|
||||
/// Start benchmark
|
||||
pub benchmark: bool,
|
||||
/// Skip datastore cache
|
||||
pub no_cache: bool,
|
||||
}
|
||||
|
||||
impl BackupWriter {
|
||||
fn new(h2: H2Client, abort: AbortHandle, crypt_config: Option<Arc<CryptConfig>>) -> Arc<Self> {
|
||||
Arc::new(Self {
|
||||
@ -72,28 +90,25 @@ impl BackupWriter {
|
||||
})
|
||||
}
|
||||
|
||||
// FIXME: extract into (flattened) parameter struct?
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub async fn start(
|
||||
pub async fn start<'a>(
|
||||
client: &HttpClient,
|
||||
crypt_config: Option<Arc<CryptConfig>>,
|
||||
datastore: &str,
|
||||
ns: &BackupNamespace,
|
||||
backup: &BackupDir,
|
||||
debug: bool,
|
||||
benchmark: bool,
|
||||
writer_options: BackupWriterOptions<'a>,
|
||||
) -> Result<Arc<BackupWriter>, Error> {
|
||||
let mut param = json!({
|
||||
"backup-type": backup.ty(),
|
||||
"backup-id": backup.id(),
|
||||
"backup-time": backup.time,
|
||||
"store": datastore,
|
||||
"debug": debug,
|
||||
"benchmark": benchmark
|
||||
"backup-type": writer_options.backup.ty(),
|
||||
"backup-id": writer_options.backup.id(),
|
||||
"backup-time": writer_options.backup.time,
|
||||
"store": writer_options.datastore,
|
||||
"debug": writer_options.debug,
|
||||
"benchmark": writer_options.benchmark,
|
||||
});
|
||||
if writer_options.no_cache {
|
||||
param["no-cache"] = serde_json::to_value(writer_options.no_cache)?;
|
||||
}
|
||||
|
||||
if !ns.is_root() {
|
||||
param["ns"] = serde_json::to_value(ns)?;
|
||||
if !writer_options.ns.is_root() {
|
||||
param["ns"] = serde_json::to_value(writer_options.ns)?;
|
||||
}
|
||||
|
||||
let req = HttpClient::request_builder(
|
||||
@ -109,7 +124,7 @@ impl BackupWriter {
|
||||
.start_h2_connection(req, String::from(PROXMOX_BACKUP_PROTOCOL_ID_V1!()))
|
||||
.await?;
|
||||
|
||||
Ok(BackupWriter::new(h2, abort, crypt_config))
|
||||
Ok(BackupWriter::new(h2, abort, writer_options.crypt_config))
|
||||
}
|
||||
|
||||
pub async fn get(&self, path: &str, param: Option<Value>) -> Result<Value, Error> {
|
||||
|
@ -306,7 +306,7 @@ async fn restore_command(target: String, pattern: Option<String>) -> Result<(),
|
||||
/// here:
|
||||
pub struct Shell {
|
||||
/// Readline instance handling input and callbacks
|
||||
rl: rustyline::Editor<CliHelper>,
|
||||
rl: rustyline::Editor<CliHelper, rustyline::history::MemHistory>,
|
||||
|
||||
/// Interactive prompt.
|
||||
prompt: String,
|
||||
@ -352,7 +352,10 @@ impl Shell {
|
||||
archive: Accessor,
|
||||
) -> Result<Self, Error> {
|
||||
let cli_helper = CliHelper::new(catalog_shell_cli());
|
||||
let mut rl = rustyline::Editor::<CliHelper>::new();
|
||||
let mut rl = rustyline::Editor::<CliHelper, _>::with_history(
|
||||
rustyline::Config::default(),
|
||||
rustyline::history::MemHistory::new(),
|
||||
)?;
|
||||
rl.set_helper(Some(cli_helper));
|
||||
|
||||
let mut position = Vec::new();
|
||||
@ -426,7 +429,7 @@ impl Shell {
|
||||
let _ =
|
||||
cli::handle_command_future(helper.cmd_def(), "", args, cli::CliEnvironment::new())
|
||||
.await;
|
||||
this.rl.add_history_entry(line);
|
||||
let _ = this.rl.add_history_entry(line);
|
||||
this.update_prompt();
|
||||
}
|
||||
Ok(())
|
||||
|
@ -3,12 +3,18 @@ use std::sync::{Arc, Mutex, RwLock};
|
||||
use std::time::Duration;
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use bytes::Bytes;
|
||||
use futures::*;
|
||||
use hyper::client::{Client, HttpConnector};
|
||||
use http_body_util::{BodyDataStream, BodyExt};
|
||||
use hyper::body::Incoming;
|
||||
use hyper::header::SET_COOKIE;
|
||||
use hyper::http::header::HeaderValue;
|
||||
use hyper::http::Uri;
|
||||
use hyper::http::{Request, Response};
|
||||
use hyper::Body;
|
||||
#[cfg(not(target_feature = "crt-static"))]
|
||||
use hyper_util::client::legacy::connect::dns::GaiResolver;
|
||||
use hyper_util::client::legacy::{connect::HttpConnector, Client};
|
||||
use hyper_util::rt::{TokioExecutor, TokioIo};
|
||||
use openssl::{
|
||||
ssl::{SslConnector, SslMethod},
|
||||
x509::X509StoreContextRef,
|
||||
@ -24,6 +30,7 @@ use proxmox_sys::linux::tty;
|
||||
use proxmox_async::broadcast_future::BroadcastFuture;
|
||||
use proxmox_http::client::HttpsConnector;
|
||||
use proxmox_http::uri::{build_authority, json_object_to_query};
|
||||
use proxmox_http::Body;
|
||||
use proxmox_http::{ProxyConfig, RateLimiter};
|
||||
use proxmox_log::{error, info, warn};
|
||||
|
||||
@ -33,15 +40,88 @@ use pbs_api_types::{Authid, RateLimitConfig, Userid};
|
||||
use super::pipe_to_stream::PipeToSendStream;
|
||||
use super::PROXMOX_BACKUP_TCP_KEEPALIVE_TIME;
|
||||
|
||||
#[cfg(not(target_feature = "crt-static"))]
|
||||
type DnsResolver = GaiResolver;
|
||||
|
||||
#[cfg(target_feature = "crt-static")]
|
||||
type DnsResolver = resolver::HickoryDnsResolver;
|
||||
|
||||
#[cfg(target_feature = "crt-static")]
|
||||
mod resolver {
|
||||
use std::net::SocketAddr;
|
||||
use std::pin::Pin;
|
||||
use std::sync::Arc;
|
||||
use std::task::{Context, Poll};
|
||||
|
||||
use futures::Future;
|
||||
use hickory_resolver::error::ResolveError;
|
||||
use hickory_resolver::lookup_ip::LookupIpIntoIter;
|
||||
use hickory_resolver::TokioAsyncResolver;
|
||||
use hyper_util::client::legacy::connect::dns::Name;
|
||||
use tower_service::Service;
|
||||
|
||||
pub(crate) struct SocketAddrIter {
|
||||
inner: LookupIpIntoIter,
|
||||
}
|
||||
|
||||
impl Iterator for SocketAddrIter {
|
||||
type Item = SocketAddr;
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
self.inner.next().map(|ip_addr| SocketAddr::new(ip_addr, 0))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct HickoryDnsResolver {
|
||||
inner: Arc<TokioAsyncResolver>,
|
||||
}
|
||||
|
||||
impl HickoryDnsResolver {
|
||||
pub(crate) fn new() -> Self {
|
||||
Self {
|
||||
inner: Arc::new(TokioAsyncResolver::tokio_from_system_conf().unwrap()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Service<Name> for HickoryDnsResolver {
|
||||
type Response = SocketAddrIter;
|
||||
type Error = ResolveError;
|
||||
type Future = Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>> + Send>>;
|
||||
|
||||
fn poll_ready(&mut self, _ctx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
Poll::Ready(Ok(()))
|
||||
}
|
||||
|
||||
fn call(&mut self, name: Name) -> Self::Future {
|
||||
let inner = self.inner.clone();
|
||||
Box::pin(async move {
|
||||
inner
|
||||
.lookup_ip(name.as_str())
|
||||
.await
|
||||
.map(|r| SocketAddrIter {
|
||||
inner: r.into_iter(),
|
||||
})
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Timeout used for several HTTP operations that are expected to finish quickly but may block in
|
||||
/// certain error conditions. Keep it generous, to avoid false-positive under high load.
|
||||
const HTTP_TIMEOUT: Duration = Duration::from_secs(2 * 60);
|
||||
|
||||
const PROXMOX_BACKUP_AUTH_COOKIE: &str = "PBSAuthCookie";
|
||||
const PROXMOX_BACKUP_PREFIXED_AUTH_COOKIE: &str = "__Host-PBSAuthCookie";
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct AuthInfo {
|
||||
pub auth_id: Authid,
|
||||
pub ticket: String,
|
||||
pub token: String,
|
||||
// Whether the server uses HttpOnly cookies for authentication
|
||||
pub http_only: bool,
|
||||
}
|
||||
|
||||
pub struct HttpClientOptions {
|
||||
@ -134,7 +214,7 @@ impl Default for HttpClientOptions {
|
||||
|
||||
/// HTTP(S) API client
|
||||
pub struct HttpClient {
|
||||
client: Client<HttpsConnector>,
|
||||
client: Client<HttpsConnector<DnsResolver>, Body>,
|
||||
server: String,
|
||||
port: u16,
|
||||
fingerprint: Arc<Mutex<Option<String>>>,
|
||||
@ -365,7 +445,8 @@ impl HttpClient {
|
||||
ssl_connector_builder.set_verify(openssl::ssl::SslVerifyMode::NONE);
|
||||
}
|
||||
|
||||
let mut httpc = HttpConnector::new();
|
||||
let resolver = DnsResolver::new();
|
||||
let mut httpc = HttpConnector::new_with_resolver(resolver);
|
||||
httpc.set_nodelay(true); // important for h2 download performance!
|
||||
httpc.enforce_http(false); // we want https...
|
||||
|
||||
@ -398,7 +479,7 @@ impl HttpClient {
|
||||
https.set_proxy(config);
|
||||
}
|
||||
|
||||
let client = Client::builder()
|
||||
let client = Client::builder(TokioExecutor::new())
|
||||
//.http2_initial_stream_window_size( (1 << 31) - 2)
|
||||
//.http2_initial_connection_window_size( (1 << 31) - 2)
|
||||
.build::<_, Body>(https);
|
||||
@ -429,6 +510,7 @@ impl HttpClient {
|
||||
auth_id: auth_id.clone(),
|
||||
ticket: password.clone(),
|
||||
token: "".to_string(),
|
||||
http_only: false,
|
||||
}));
|
||||
|
||||
let server2 = server.to_string();
|
||||
@ -526,7 +608,9 @@ impl HttpClient {
|
||||
_options: options,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl HttpClient {
|
||||
/// Login
|
||||
///
|
||||
/// Login is done on demand, so this is only required if you need
|
||||
@ -648,10 +732,18 @@ impl HttpClient {
|
||||
HeaderValue::from_str(&enc_api_token).unwrap(),
|
||||
);
|
||||
} else {
|
||||
let cookie_name = if auth.http_only {
|
||||
// server has switched to http only flow, provide ticket in properly prefixed cookie
|
||||
PROXMOX_BACKUP_PREFIXED_AUTH_COOKIE
|
||||
} else {
|
||||
PROXMOX_BACKUP_AUTH_COOKIE
|
||||
};
|
||||
|
||||
let enc_ticket = format!(
|
||||
"PBSAuthCookie={}",
|
||||
"{cookie_name}={}",
|
||||
percent_encode(auth.ticket.as_bytes(), DEFAULT_ENCODE_SET)
|
||||
);
|
||||
|
||||
req.headers_mut()
|
||||
.insert("Cookie", HeaderValue::from_str(&enc_ticket).unwrap());
|
||||
req.headers_mut().insert(
|
||||
@ -690,10 +782,18 @@ impl HttpClient {
|
||||
|
||||
let auth = self.login().await?;
|
||||
|
||||
let cookie_name = if auth.http_only {
|
||||
// server has switched to http only flow, provide ticket in properly prefixed cookie
|
||||
PROXMOX_BACKUP_PREFIXED_AUTH_COOKIE
|
||||
} else {
|
||||
PROXMOX_BACKUP_AUTH_COOKIE
|
||||
};
|
||||
|
||||
let enc_ticket = format!(
|
||||
"PBSAuthCookie={}",
|
||||
"{cookie_name}={}",
|
||||
percent_encode(auth.ticket.as_bytes(), DEFAULT_ENCODE_SET)
|
||||
);
|
||||
|
||||
req.headers_mut()
|
||||
.insert("Cookie", HeaderValue::from_str(&enc_ticket).unwrap());
|
||||
|
||||
@ -706,8 +806,7 @@ impl HttpClient {
|
||||
.map(|_| Err(format_err!("unknown error")))
|
||||
.await?
|
||||
} else {
|
||||
resp.into_body()
|
||||
.map_err(Error::from)
|
||||
futures::TryStreamExt::map_err(BodyDataStream::new(resp.into_body()), Error::from)
|
||||
.try_fold(output, move |acc, chunk| async move {
|
||||
acc.write_all(&chunk)?;
|
||||
Ok::<_, Error>(acc)
|
||||
@ -760,10 +859,18 @@ impl HttpClient {
|
||||
HeaderValue::from_str(&enc_api_token).unwrap(),
|
||||
);
|
||||
} else {
|
||||
let cookie_name = if auth.http_only {
|
||||
// server has switched to http only flow, provide ticket in properly prefixed cookie
|
||||
PROXMOX_BACKUP_PREFIXED_AUTH_COOKIE
|
||||
} else {
|
||||
PROXMOX_BACKUP_AUTH_COOKIE
|
||||
};
|
||||
|
||||
let enc_ticket = format!(
|
||||
"PBSAuthCookie={}",
|
||||
"{cookie_name}={}",
|
||||
percent_encode(auth.ticket.as_bytes(), DEFAULT_ENCODE_SET)
|
||||
);
|
||||
|
||||
req.headers_mut()
|
||||
.insert("Cookie", HeaderValue::from_str(&enc_ticket).unwrap());
|
||||
req.headers_mut().insert(
|
||||
@ -787,7 +894,7 @@ impl HttpClient {
|
||||
bail!("unknown error");
|
||||
}
|
||||
|
||||
let upgraded = hyper::upgrade::on(resp).await?;
|
||||
let upgraded = TokioIo::new(hyper::upgrade::on(resp).await?);
|
||||
|
||||
let max_window_size = (1 << 31) - 2;
|
||||
|
||||
@ -815,7 +922,7 @@ impl HttpClient {
|
||||
}
|
||||
|
||||
async fn credentials(
|
||||
client: Client<HttpsConnector>,
|
||||
client: Client<HttpsConnector<DnsResolver>, Body>,
|
||||
server: String,
|
||||
port: u16,
|
||||
username: Userid,
|
||||
@ -829,22 +936,51 @@ impl HttpClient {
|
||||
"/api2/json/access/ticket",
|
||||
Some(data),
|
||||
)?;
|
||||
let cred = Self::api_request(client, req).await?;
|
||||
|
||||
let res = tokio::time::timeout(HTTP_TIMEOUT, client.request(req))
|
||||
.await
|
||||
.map_err(|_| format_err!("http request timed out"))??;
|
||||
|
||||
// check if the headers contain a newer HttpOnly cookie
|
||||
let http_only_ticket = res
|
||||
.headers()
|
||||
.get_all(SET_COOKIE)
|
||||
.iter()
|
||||
.filter_map(|c| c.to_str().ok())
|
||||
.filter_map(|c| match (c.find('='), c.find(';')) {
|
||||
(Some(begin), Some(end))
|
||||
if begin < end && &c[..begin] == PROXMOX_BACKUP_PREFIXED_AUTH_COOKIE =>
|
||||
{
|
||||
Some(c[begin + 1..end].to_string())
|
||||
}
|
||||
_ => None,
|
||||
})
|
||||
.next();
|
||||
|
||||
// if the headers contained a new HttpOnly cookie, the server switched to providing these
|
||||
// by default. this means that older cookies may no longer be supported, so switch to using
|
||||
// the new cookie name.
|
||||
let http_only = http_only_ticket.is_some();
|
||||
|
||||
let cred = Self::api_response(res).await?;
|
||||
let auth = AuthInfo {
|
||||
auth_id: cred["data"]["username"].as_str().unwrap().parse()?,
|
||||
ticket: cred["data"]["ticket"].as_str().unwrap().to_owned(),
|
||||
ticket: http_only_ticket
|
||||
.or(cred["data"]["ticket"].as_str().map(|t| t.to_string()))
|
||||
.unwrap(),
|
||||
token: cred["data"]["CSRFPreventionToken"]
|
||||
.as_str()
|
||||
.unwrap()
|
||||
.to_owned(),
|
||||
http_only,
|
||||
};
|
||||
|
||||
Ok(auth)
|
||||
}
|
||||
|
||||
async fn api_response(response: Response<Body>) -> Result<Value, Error> {
|
||||
async fn api_response(response: Response<Incoming>) -> Result<Value, Error> {
|
||||
let status = response.status();
|
||||
let data = hyper::body::to_bytes(response.into_body()).await?;
|
||||
let data = response.into_body().collect().await?.to_bytes();
|
||||
|
||||
let text = String::from_utf8(data.to_vec()).unwrap();
|
||||
if status.is_success() {
|
||||
@ -860,7 +996,7 @@ impl HttpClient {
|
||||
}
|
||||
|
||||
async fn api_request(
|
||||
client: Client<HttpsConnector>,
|
||||
client: Client<HttpsConnector<DnsResolver>, Body>,
|
||||
req: Request<Body>,
|
||||
) -> Result<Value, Error> {
|
||||
Self::api_response(
|
||||
@ -895,7 +1031,7 @@ impl HttpClient {
|
||||
.uri(url)
|
||||
.header("User-Agent", "proxmox-backup-client/1.0")
|
||||
.header(hyper::header::CONTENT_TYPE, "application/json")
|
||||
.body(Body::from(data.to_string()))?;
|
||||
.body(data.to_string().into())?;
|
||||
Ok(request)
|
||||
} else {
|
||||
let query = json_object_to_query(data)?;
|
||||
@ -936,11 +1072,11 @@ impl Drop for HttpClient {
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct H2Client {
|
||||
h2: h2::client::SendRequest<bytes::Bytes>,
|
||||
h2: h2::client::SendRequest<Bytes>,
|
||||
}
|
||||
|
||||
impl H2Client {
|
||||
pub fn new(h2: h2::client::SendRequest<bytes::Bytes>) -> Self {
|
||||
pub fn new(h2: h2::client::SendRequest<Bytes>) -> Self {
|
||||
Self { h2 }
|
||||
}
|
||||
|
||||
|
@ -630,7 +630,11 @@ impl Archiver {
|
||||
let mut stat_results: Option<FileStat> = None;
|
||||
|
||||
let get_file_mode = || {
|
||||
nix::sys::stat::fstatat(dir_fd, file_name, nix::fcntl::AtFlags::AT_SYMLINK_NOFOLLOW)
|
||||
nix::sys::stat::fstatat(
|
||||
Some(dir_fd),
|
||||
file_name,
|
||||
nix::fcntl::AtFlags::AT_SYMLINK_NOFOLLOW,
|
||||
)
|
||||
};
|
||||
|
||||
let match_result = self
|
||||
@ -871,6 +875,7 @@ impl Archiver {
|
||||
.await
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
async fn add_entry_to_archive<T: SeqWrite + Send>(
|
||||
&mut self,
|
||||
encoder: &mut Encoder<'_, T>,
|
||||
@ -1308,7 +1313,7 @@ impl Archiver {
|
||||
file_name: &Path,
|
||||
metadata: &Metadata,
|
||||
) -> Result<(), Error> {
|
||||
let dest = match nix::fcntl::readlinkat(fd.as_raw_fd(), &b""[..]) {
|
||||
let dest = match nix::fcntl::readlinkat(Some(fd.as_raw_fd()), &b""[..]) {
|
||||
Ok(dest) => dest,
|
||||
Err(Errno::ESTALE) => {
|
||||
self.report_stale_file_handle(None);
|
||||
|
@ -41,7 +41,7 @@ impl PxarDir {
|
||||
allow_existing_dirs: bool,
|
||||
) -> Result<BorrowedFd, Error> {
|
||||
if let Err(err) = mkdirat(
|
||||
parent,
|
||||
Some(parent),
|
||||
self.file_name.as_os_str(),
|
||||
perms_from_metadata(&self.metadata)?,
|
||||
) {
|
||||
@ -55,7 +55,7 @@ impl PxarDir {
|
||||
|
||||
fn open_dir(&mut self, parent: RawFd) -> Result<BorrowedFd, Error> {
|
||||
let dir = Dir::openat(
|
||||
parent,
|
||||
Some(parent),
|
||||
self.file_name.as_os_str(),
|
||||
OFlag::O_DIRECTORY | OFlag::O_CLOEXEC,
|
||||
Mode::empty(),
|
||||
|
@ -627,7 +627,7 @@ impl Extractor {
|
||||
target.as_c_str(),
|
||||
Some(parent),
|
||||
file_name,
|
||||
nix::unistd::LinkatFlags::NoSymlinkFollow,
|
||||
nix::fcntl::AtFlags::empty(),
|
||||
)
|
||||
};
|
||||
|
||||
@ -697,8 +697,13 @@ impl Extractor {
|
||||
}
|
||||
let mut file = unsafe {
|
||||
std::fs::File::from_raw_fd(
|
||||
nix::fcntl::openat(parent, file_name, oflags, Mode::from_bits(0o600).unwrap())
|
||||
.with_context(|| format!("failed to create file {file_name:?}"))?,
|
||||
nix::fcntl::openat(
|
||||
Some(parent),
|
||||
file_name,
|
||||
oflags,
|
||||
Mode::from_bits(0o600).unwrap(),
|
||||
)
|
||||
.with_context(|| format!("failed to create file {file_name:?}"))?,
|
||||
)
|
||||
};
|
||||
|
||||
@ -722,7 +727,7 @@ impl Extractor {
|
||||
}
|
||||
|
||||
if result.seeked_last {
|
||||
while match nix::unistd::ftruncate(file.as_raw_fd(), size as i64) {
|
||||
while match nix::unistd::ftruncate(&file, size as i64) {
|
||||
Ok(_) => false,
|
||||
Err(nix::errno::Errno::EINTR) => true,
|
||||
Err(err) => return Err(err).context("error setting file size"),
|
||||
@ -758,8 +763,13 @@ impl Extractor {
|
||||
}
|
||||
let mut file = tokio::fs::File::from_std(unsafe {
|
||||
std::fs::File::from_raw_fd(
|
||||
nix::fcntl::openat(parent, file_name, oflags, Mode::from_bits(0o600).unwrap())
|
||||
.with_context(|| format!("failed to create file {file_name:?}"))?,
|
||||
nix::fcntl::openat(
|
||||
Some(parent),
|
||||
file_name,
|
||||
oflags,
|
||||
Mode::from_bits(0o600).unwrap(),
|
||||
)
|
||||
.with_context(|| format!("failed to create file {file_name:?}"))?,
|
||||
)
|
||||
});
|
||||
|
||||
@ -784,7 +794,7 @@ impl Extractor {
|
||||
}
|
||||
|
||||
if result.seeked_last {
|
||||
while match nix::unistd::ftruncate(file.as_raw_fd(), size as i64) {
|
||||
while match nix::unistd::ftruncate(&file, size as i64) {
|
||||
Ok(_) => false,
|
||||
Err(nix::errno::Errno::EINTR) => true,
|
||||
Err(err) => return Err(err).context("error setting file size"),
|
||||
|
@ -11,6 +11,7 @@ use futures::stream::Stream;
|
||||
use nix::dir::Dir;
|
||||
use nix::fcntl::OFlag;
|
||||
use nix::sys::stat::Mode;
|
||||
use tokio::sync::Notify;
|
||||
|
||||
use proxmox_async::blocking::TokioWriterAdapter;
|
||||
use proxmox_io::StdChannelWriter;
|
||||
@ -31,6 +32,8 @@ pub struct PxarBackupStream {
|
||||
pub suggested_boundaries: Option<std::sync::mpsc::Receiver<u64>>,
|
||||
handle: Option<AbortHandle>,
|
||||
error: Arc<Mutex<Option<Error>>>,
|
||||
finished: bool,
|
||||
archiver_finished_notification: Arc<Notify>,
|
||||
}
|
||||
|
||||
impl Drop for PxarBackupStream {
|
||||
@ -80,6 +83,10 @@ impl PxarBackupStream {
|
||||
|
||||
let error = Arc::new(Mutex::new(None));
|
||||
let error2 = Arc::clone(&error);
|
||||
let stream_notifier = Arc::new(Notify::new());
|
||||
let stream_notification_receiver = stream_notifier.clone();
|
||||
let payload_stream_notifier = Arc::new(Notify::new());
|
||||
let payload_stream_notification_receiver = payload_stream_notifier.clone();
|
||||
let handler = async move {
|
||||
if let Err(err) = crate::pxar::create_archive(
|
||||
dir,
|
||||
@ -101,6 +108,10 @@ impl PxarBackupStream {
|
||||
let mut error = error2.lock().unwrap();
|
||||
*error = Some(err);
|
||||
}
|
||||
|
||||
// Notify upload streams that archiver is finished (with or without error)
|
||||
stream_notifier.notify_one();
|
||||
payload_stream_notifier.notify_one();
|
||||
};
|
||||
|
||||
let (handle, registration) = AbortHandle::new_pair();
|
||||
@ -112,6 +123,8 @@ impl PxarBackupStream {
|
||||
suggested_boundaries: None,
|
||||
handle: Some(handle.clone()),
|
||||
error: Arc::clone(&error),
|
||||
finished: false,
|
||||
archiver_finished_notification: stream_notification_receiver,
|
||||
};
|
||||
|
||||
let backup_payload_stream = payload_rx.map(|rx| Self {
|
||||
@ -119,6 +132,8 @@ impl PxarBackupStream {
|
||||
suggested_boundaries: suggested_boundaries_rx,
|
||||
handle: Some(handle),
|
||||
error,
|
||||
finished: false,
|
||||
archiver_finished_notification: payload_stream_notification_receiver,
|
||||
});
|
||||
|
||||
Ok((backup_stream, backup_payload_stream))
|
||||
@ -141,18 +156,31 @@ impl Stream for PxarBackupStream {
|
||||
type Item = Result<Vec<u8>, Error>;
|
||||
|
||||
fn poll_next(self: Pin<&mut Self>, _cx: &mut Context) -> Poll<Option<Self::Item>> {
|
||||
let this = self.get_mut();
|
||||
if this.finished {
|
||||
// Channel has already been finished and eventual errors propagated,
|
||||
// early return to avoid blocking on further archiver finished notifications
|
||||
// by subsequent polls.
|
||||
return Poll::Ready(None);
|
||||
}
|
||||
{
|
||||
// limit lock scope
|
||||
let mut error = self.error.lock().unwrap();
|
||||
let mut error = this.error.lock().unwrap();
|
||||
if let Some(err) = error.take() {
|
||||
return Poll::Ready(Some(Err(err)));
|
||||
}
|
||||
}
|
||||
|
||||
match proxmox_async::runtime::block_in_place(|| self.rx.as_ref().unwrap().recv()) {
|
||||
match proxmox_async::runtime::block_in_place(|| this.rx.as_ref().unwrap().recv()) {
|
||||
Ok(data) => Poll::Ready(Some(data)),
|
||||
Err(_) => {
|
||||
let mut error = self.error.lock().unwrap();
|
||||
// Wait for archiver to finish
|
||||
proxmox_async::runtime::block_on(this.archiver_finished_notification.notified());
|
||||
// Never block for archiver finished notification on subsequent calls.
|
||||
// Eventual error will already have been propagated.
|
||||
this.finished = true;
|
||||
|
||||
let mut error = this.error.lock().unwrap();
|
||||
if let Some(err) = error.take() {
|
||||
return Poll::Ready(Some(Err(err)));
|
||||
}
|
||||
|
@ -345,8 +345,8 @@ pub(crate) unsafe fn set_test_default_master_pubkey(value: Result<Option<Vec<u8>
|
||||
pub fn get_encryption_key_password() -> Result<Vec<u8>, Error> {
|
||||
// fixme: implement other input methods
|
||||
|
||||
if let Some(password) = super::get_secret_from_env("PBS_ENCRYPTION_PASSWORD")? {
|
||||
return Ok(password.as_bytes().to_vec());
|
||||
if let Some(password) = super::get_encryption_password()? {
|
||||
return Ok(password.into_bytes());
|
||||
}
|
||||
|
||||
// If we're on a TTY, query the user for a password
|
||||
|
@ -28,6 +28,21 @@ pub mod key_source;
|
||||
|
||||
const ENV_VAR_PBS_FINGERPRINT: &str = "PBS_FINGERPRINT";
|
||||
const ENV_VAR_PBS_PASSWORD: &str = "PBS_PASSWORD";
|
||||
const ENV_VAR_PBS_ENCRYPTION_PASSWORD: &str = "PBS_ENCRYPTION_PASSWORD";
|
||||
const ENV_VAR_PBS_REPOSITORY: &str = "PBS_REPOSITORY";
|
||||
|
||||
/// Directory with system [credential]s. See systemd-creds(1).
|
||||
///
|
||||
/// [credential]: https://systemd.io/CREDENTIALS/
|
||||
const ENV_VAR_CREDENTIALS_DIRECTORY: &str = "CREDENTIALS_DIRECTORY";
|
||||
/// Credential name of the encryption password.
|
||||
const CRED_PBS_ENCRYPTION_PASSWORD: &str = "proxmox-backup-client.encryption-password";
|
||||
/// Credential name of the the password.
|
||||
const CRED_PBS_PASSWORD: &str = "proxmox-backup-client.password";
|
||||
/// Credential name of the the repository.
|
||||
const CRED_PBS_REPOSITORY: &str = "proxmox-backup-client.repository";
|
||||
/// Credential name of the the fingerprint.
|
||||
const CRED_PBS_FINGERPRINT: &str = "proxmox-backup-client.fingerprint";
|
||||
|
||||
pub const REPO_URL_SCHEMA: Schema = StringSchema::new("Repository URL.")
|
||||
.format(&BACKUP_REPO_URL)
|
||||
@ -40,6 +55,30 @@ pub const CHUNK_SIZE_SCHEMA: Schema = IntegerSchema::new("Chunk size in KB. Must
|
||||
.default(4096)
|
||||
.schema();
|
||||
|
||||
/// Retrieves a secret stored in a [credential] provided by systemd.
|
||||
///
|
||||
/// Returns `Ok(None)` if the credential does not exist.
|
||||
///
|
||||
/// [credential]: https://systemd.io/CREDENTIALS/
|
||||
fn get_credential(cred_name: &str) -> std::io::Result<Option<Vec<u8>>> {
|
||||
let Some(creds_dir) = std::env::var_os(ENV_VAR_CREDENTIALS_DIRECTORY) else {
|
||||
return Ok(None);
|
||||
};
|
||||
let path = std::path::Path::new(&creds_dir).join(cred_name);
|
||||
|
||||
proxmox_log::debug!("attempting to use credential {cred_name} from {creds_dir:?}",);
|
||||
// We read the whole contents without a BufRead. As per systemd-creds(1):
|
||||
// Credentials are limited-size binary or textual objects.
|
||||
match std::fs::read(&path) {
|
||||
Ok(bytes) => Ok(Some(bytes)),
|
||||
Err(err) if err.kind() == std::io::ErrorKind::NotFound => {
|
||||
proxmox_log::debug!("no {cred_name} credential found in {creds_dir:?}");
|
||||
Ok(None)
|
||||
}
|
||||
Err(err) => Err(err),
|
||||
}
|
||||
}
|
||||
|
||||
/// Helper to read a secret through a environment variable (ENV).
|
||||
///
|
||||
/// Tries the following variable names in order and returns the value
|
||||
@ -51,7 +90,7 @@ pub const CHUNK_SIZE_SCHEMA: Schema = IntegerSchema::new("Chunk size in KB. Must
|
||||
/// BASE_NAME_CMD => read the secret from specified command first line of output on stdout
|
||||
///
|
||||
/// Only return the first line of data (without CRLF).
|
||||
pub fn get_secret_from_env(base_name: &str) -> Result<Option<String>, Error> {
|
||||
fn get_secret_from_env(base_name: &str) -> Result<Option<String>, Error> {
|
||||
let firstline = |data: String| -> String {
|
||||
match data.lines().next() {
|
||||
Some(line) => line.to_string(),
|
||||
@ -118,8 +157,80 @@ pub fn get_secret_from_env(base_name: &str) -> Result<Option<String>, Error> {
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
/// Gets a secret or value from the environment.
|
||||
///
|
||||
/// Checks for an environment variable named `env_variable`, and if missing, it
|
||||
/// checks for a system [credential] named `credential_name`. Assumes the secret
|
||||
/// is UTF-8 encoded.
|
||||
///
|
||||
/// [credential]: https://systemd.io/CREDENTIALS/
|
||||
fn get_secret_impl(env_variable: &str, credential_name: &str) -> Result<Option<String>, Error> {
|
||||
if let Some(password) = get_secret_from_env(env_variable)? {
|
||||
Ok(Some(password))
|
||||
} else if let Some(password) = get_credential(credential_name)? {
|
||||
String::from_utf8(password)
|
||||
.map(Option::Some)
|
||||
.map_err(|_err| format_err!("credential {credential_name} is not utf8 encoded"))
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
|
||||
/// Gets the backup server's password.
|
||||
///
|
||||
/// Looks for a password in the `PBS_PASSWORD` environment variable, if there
|
||||
/// isn't one it reads the `proxmox-backup-client.password` [credential].
|
||||
///
|
||||
/// Returns `Ok(None)` if neither the environment variable or credentials are
|
||||
/// present.
|
||||
///
|
||||
/// [credential]: https://systemd.io/CREDENTIALS/
|
||||
pub fn get_password() -> Result<Option<String>, Error> {
|
||||
get_secret_impl(ENV_VAR_PBS_PASSWORD, CRED_PBS_PASSWORD)
|
||||
}
|
||||
|
||||
/// Gets an encryption password.
|
||||
///
|
||||
///
|
||||
/// Looks for a password in the `PBS_ENCRYPTION_PASSWORD` environment variable,
|
||||
/// if there isn't one it reads the `proxmox-backup-client.encryption-password`
|
||||
/// [credential].
|
||||
///
|
||||
/// Returns `Ok(None)` if neither the environment variable or credentials are
|
||||
/// present.
|
||||
///
|
||||
/// [credential]: https://systemd.io/CREDENTIALS/
|
||||
pub fn get_encryption_password() -> Result<Option<String>, Error> {
|
||||
get_secret_impl(
|
||||
ENV_VAR_PBS_ENCRYPTION_PASSWORD,
|
||||
CRED_PBS_ENCRYPTION_PASSWORD,
|
||||
)
|
||||
}
|
||||
|
||||
pub fn get_default_repository() -> Option<String> {
|
||||
std::env::var("PBS_REPOSITORY").ok()
|
||||
get_secret_impl(ENV_VAR_PBS_REPOSITORY, CRED_PBS_REPOSITORY)
|
||||
.inspect_err(|err| {
|
||||
proxmox_log::error!("could not read default repository: {err:#}");
|
||||
})
|
||||
.unwrap_or_default()
|
||||
}
|
||||
|
||||
/// Gets the repository fingerprint.
|
||||
///
|
||||
/// Looks for the fingerprint in the `PBS_FINGERPRINT` environment variable, if
|
||||
/// there isn't one it reads the `proxmox-backup-client.fingerprint`
|
||||
/// [credential].
|
||||
///
|
||||
/// Returns `None` if neither the environment variable or the credential are
|
||||
/// present.
|
||||
///
|
||||
/// [credential]: https://systemd.io/CREDENTIALS/
|
||||
pub fn get_fingerprint() -> Option<String> {
|
||||
get_secret_impl(ENV_VAR_PBS_FINGERPRINT, CRED_PBS_FINGERPRINT)
|
||||
.inspect_err(|err| {
|
||||
proxmox_log::error!("could not read fingerprint: {err:#}");
|
||||
})
|
||||
.unwrap_or_default()
|
||||
}
|
||||
|
||||
pub fn remove_repository_from_value(param: &mut Value) -> Result<BackupRepository, Error> {
|
||||
@ -179,9 +290,9 @@ fn connect_do(
|
||||
auth_id: &Authid,
|
||||
rate_limit: RateLimitConfig,
|
||||
) -> Result<HttpClient, Error> {
|
||||
let fingerprint = std::env::var(ENV_VAR_PBS_FINGERPRINT).ok();
|
||||
let fingerprint = get_fingerprint();
|
||||
|
||||
let password = get_secret_from_env(ENV_VAR_PBS_PASSWORD)?;
|
||||
let password = get_password()?;
|
||||
let options = HttpClientOptions::new_interactive(password, fingerprint).rate_limit(rate_limit);
|
||||
|
||||
HttpClient::new(server, port, auth_id, options)
|
||||
@ -189,8 +300,8 @@ fn connect_do(
|
||||
|
||||
/// like get, but simply ignore errors and return Null instead
|
||||
pub async fn try_get(repo: &BackupRepository, url: &str) -> Value {
|
||||
let fingerprint = std::env::var(ENV_VAR_PBS_FINGERPRINT).ok();
|
||||
let password = get_secret_from_env(ENV_VAR_PBS_PASSWORD).unwrap_or(None);
|
||||
let fingerprint = get_fingerprint();
|
||||
let password = get_password().unwrap_or(None);
|
||||
|
||||
// ticket cache, but no questions asked
|
||||
let options = HttpClientOptions::new_interactive(password, fingerprint).interactive(false);
|
||||
|
@ -1,19 +1,23 @@
|
||||
use std::os::fd::{AsRawFd, IntoRawFd};
|
||||
use std::pin::Pin;
|
||||
use std::task::{Context, Poll};
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use futures::*;
|
||||
use hyper::client::connect::{Connected, Connection};
|
||||
use hyper::client::Client;
|
||||
use http_body_util::{BodyDataStream, BodyExt};
|
||||
use hyper::body::Incoming;
|
||||
use hyper::http::Uri;
|
||||
use hyper::http::{Request, Response};
|
||||
use hyper::Body;
|
||||
use hyper_util::client::legacy::connect::{Connected, Connection};
|
||||
use hyper_util::client::legacy::Client;
|
||||
use hyper_util::rt::{TokioExecutor, TokioIo};
|
||||
use pin_project_lite::pin_project;
|
||||
use serde_json::Value;
|
||||
use tokio::io::{AsyncRead, AsyncWrite, AsyncWriteExt, ReadBuf};
|
||||
use tokio::net::UnixStream;
|
||||
|
||||
use proxmox_http::uri::json_object_to_query;
|
||||
use proxmox_http::Body;
|
||||
use proxmox_router::HttpError;
|
||||
|
||||
pub const DEFAULT_VSOCK_PORT: u16 = 807;
|
||||
@ -30,9 +34,9 @@ pin_project! {
|
||||
}
|
||||
|
||||
impl tower_service::Service<Uri> for VsockConnector {
|
||||
type Response = UnixConnection;
|
||||
type Response = TokioIo<UnixConnection>;
|
||||
type Error = Error;
|
||||
type Future = Pin<Box<dyn Future<Output = Result<UnixConnection, Error>> + Send>>;
|
||||
type Future = Pin<Box<dyn Future<Output = Result<TokioIo<UnixConnection>, Error>> + Send>>;
|
||||
|
||||
fn poll_ready(&mut self, _cx: &mut task::Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
Poll::Ready(Ok(()))
|
||||
@ -73,16 +77,17 @@ impl tower_service::Service<Uri> for VsockConnector {
|
||||
)?;
|
||||
|
||||
let sock_addr = VsockAddr::new(cid, port as u32);
|
||||
connect(sock_fd, &sock_addr)?;
|
||||
connect(sock_fd.as_raw_fd(), &sock_addr)?;
|
||||
|
||||
// connect sync, but set nonblock after (tokio requires it)
|
||||
let std_stream = unsafe { std::os::unix::net::UnixStream::from_raw_fd(sock_fd) };
|
||||
let std_stream =
|
||||
unsafe { std::os::unix::net::UnixStream::from_raw_fd(sock_fd.into_raw_fd()) };
|
||||
std_stream.set_nonblocking(true)?;
|
||||
|
||||
let stream = tokio::net::UnixStream::from_std(std_stream)?;
|
||||
let connection = UnixConnection { stream };
|
||||
|
||||
Ok(connection)
|
||||
Ok(TokioIo::new(connection))
|
||||
})
|
||||
// unravel the thread JoinHandle to a usable future
|
||||
.map(|res| match res {
|
||||
@ -133,7 +138,7 @@ impl AsyncWrite for UnixConnection {
|
||||
|
||||
/// Slimmed down version of HttpClient for virtio-vsock connections (file restore daemon)
|
||||
pub struct VsockClient {
|
||||
client: Client<VsockConnector>,
|
||||
client: Client<VsockConnector, Body>,
|
||||
cid: i32,
|
||||
port: u16,
|
||||
auth: Option<String>,
|
||||
@ -142,7 +147,7 @@ pub struct VsockClient {
|
||||
impl VsockClient {
|
||||
pub fn new(cid: i32, port: u16, auth: Option<String>) -> Self {
|
||||
let conn = VsockConnector {};
|
||||
let client = Client::builder().build::<_, Body>(conn);
|
||||
let client = Client::builder(TokioExecutor::new()).build::<_, Body>(conn);
|
||||
Self {
|
||||
client,
|
||||
cid,
|
||||
@ -179,8 +184,7 @@ impl VsockClient {
|
||||
if !status.is_success() {
|
||||
Self::api_response(resp).await.map(|_| ())?
|
||||
} else {
|
||||
resp.into_body()
|
||||
.map_err(Error::from)
|
||||
futures::TryStreamExt::map_err(BodyDataStream::new(resp.into_body()), Error::from)
|
||||
.try_fold(output, move |acc, chunk| async move {
|
||||
acc.write_all(&chunk).await?;
|
||||
Ok::<_, Error>(acc)
|
||||
@ -190,9 +194,9 @@ impl VsockClient {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn api_response(response: Response<Body>) -> Result<Value, Error> {
|
||||
async fn api_response(response: Response<Incoming>) -> Result<Value, Error> {
|
||||
let status = response.status();
|
||||
let data = hyper::body::to_bytes(response.into_body()).await?;
|
||||
let data = response.into_body().collect().await?.to_bytes();
|
||||
|
||||
let text = String::from_utf8(data.to_vec()).unwrap();
|
||||
if status.is_success() {
|
||||
@ -238,7 +242,7 @@ impl VsockClient {
|
||||
if let Some(data) = data {
|
||||
if method == "POST" {
|
||||
let builder = make_builder("application/json", &url);
|
||||
let request = builder.body(Body::from(data.to_string()))?;
|
||||
let request = builder.body(data.to_string().into())?;
|
||||
return Ok(request);
|
||||
} else {
|
||||
let query = json_object_to_query(data)?;
|
||||
|
@ -19,11 +19,13 @@ serde_json.workspace = true
|
||||
|
||||
proxmox-notify.workspace = true
|
||||
proxmox-router = { workspace = true, default-features = false }
|
||||
proxmox-s3-client.workspace = true
|
||||
proxmox-schema.workspace = true
|
||||
proxmox-section-config.workspace = true
|
||||
proxmox-shared-memory.workspace = true
|
||||
proxmox-sys = { workspace = true, features = [ "acl", "crypt", "timer" ] }
|
||||
proxmox-time.workspace = true
|
||||
proxmox-uuid.workspace = true
|
||||
|
||||
pbs-api-types.workspace = true
|
||||
pbs-buildcfg.workspace = true
|
||||
|
@ -101,7 +101,7 @@ impl ConfigVersionCache {
|
||||
let file_path = Path::new(FILE_PATH);
|
||||
let dir_path = file_path.parent().unwrap();
|
||||
|
||||
create_path(dir_path, Some(dir_opts.clone()), Some(dir_opts))?;
|
||||
create_path(dir_path, Some(dir_opts), Some(dir_opts))?;
|
||||
|
||||
let file_opts = CreateOptions::new()
|
||||
.perm(Mode::from_bits_truncate(0o660))
|
||||
|
@ -113,3 +113,16 @@ pub fn complete_calendar_event(_arg: &str, _param: &HashMap<String, String>) ->
|
||||
.map(|s| String::from(*s))
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Returns the datastore backend type from it's name
|
||||
pub fn datastore_backend_type(store: &str) -> Result<pbs_api_types::DatastoreBackendType, Error> {
|
||||
let (config, _) = config()?;
|
||||
let store_config: DataStoreConfig = config.lookup("datastore", &store)?;
|
||||
|
||||
let backend_config: pbs_api_types::DatastoreBackendConfig = serde_json::from_value(
|
||||
pbs_api_types::DatastoreBackendConfig::API_SCHEMA
|
||||
.parse_property_string(store_config.backend.as_deref().unwrap_or(""))?,
|
||||
)?;
|
||||
|
||||
Ok(backend_config.ty.unwrap_or_default())
|
||||
}
|
||||
|
@ -8,17 +8,34 @@ use proxmox_schema::{ApiType, ObjectSchema};
|
||||
use proxmox_section_config::{SectionConfig, SectionConfigData, SectionConfigPlugin};
|
||||
|
||||
use crate::{open_backup_lockfile, replace_backup_config, BackupLockGuard};
|
||||
use pbs_api_types::{AdRealmConfig, LdapRealmConfig, OpenIdRealmConfig, REALM_ID_SCHEMA};
|
||||
use pbs_api_types::{
|
||||
AdRealmConfig, LdapRealmConfig, OpenIdRealmConfig, PamRealmConfig, PbsRealmConfig,
|
||||
REALM_ID_SCHEMA,
|
||||
};
|
||||
|
||||
pub static CONFIG: LazyLock<SectionConfig> = LazyLock::new(init);
|
||||
|
||||
fn init() -> SectionConfig {
|
||||
const PAM_SCHEMA: &ObjectSchema = PamRealmConfig::API_SCHEMA.unwrap_object_schema();
|
||||
const PBS_SCHEMA: &ObjectSchema = PbsRealmConfig::API_SCHEMA.unwrap_object_schema();
|
||||
const AD_SCHEMA: &ObjectSchema = AdRealmConfig::API_SCHEMA.unwrap_object_schema();
|
||||
const LDAP_SCHEMA: &ObjectSchema = LdapRealmConfig::API_SCHEMA.unwrap_object_schema();
|
||||
const OPENID_SCHEMA: &ObjectSchema = OpenIdRealmConfig::API_SCHEMA.unwrap_object_schema();
|
||||
|
||||
let mut config = SectionConfig::new(&REALM_ID_SCHEMA);
|
||||
|
||||
config.register_plugin(SectionConfigPlugin::new(
|
||||
"pam".to_owned(),
|
||||
Some("realm".to_owned()),
|
||||
PAM_SCHEMA,
|
||||
));
|
||||
|
||||
config.register_plugin(SectionConfigPlugin::new(
|
||||
"pbs".to_owned(),
|
||||
Some("realm".to_owned()),
|
||||
PBS_SCHEMA,
|
||||
));
|
||||
|
||||
let plugin = SectionConfigPlugin::new(
|
||||
"openid".to_string(),
|
||||
Some(String::from("realm")),
|
||||
@ -61,9 +78,24 @@ pub fn save_config(config: &SectionConfigData) -> Result<(), Error> {
|
||||
replace_backup_config(DOMAINS_CFG_FILENAME, raw.as_bytes())
|
||||
}
|
||||
|
||||
/// Unsets the default login realm for users by deleting the `default` property
|
||||
/// from the respective realm.
|
||||
///
|
||||
/// This only updates the configuration as given in `config`, making it
|
||||
/// permanent is left to the caller.
|
||||
pub fn unset_default_realm(config: &mut SectionConfigData) -> Result<(), Error> {
|
||||
for (_, data) in &mut config.sections.values_mut() {
|
||||
if let Some(obj) = data.as_object_mut() {
|
||||
obj.remove("default");
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Check if a realm with the given name exists
|
||||
pub fn exists(domains: &SectionConfigData, realm: &str) -> bool {
|
||||
realm == "pbs" || realm == "pam" || domains.sections.contains_key(realm)
|
||||
domains.sections.contains_key(realm)
|
||||
}
|
||||
|
||||
// shell completion helper
|
||||
|
@ -6,10 +6,10 @@
|
||||
//!
|
||||
//! Drive type [`VirtualTapeDrive`] is only useful for debugging.
|
||||
//!
|
||||
//! [LtoTapeDrive]: crate::api2::types::LtoTapeDrive
|
||||
//! [VirtualTapeDrive]: crate::api2::types::VirtualTapeDrive
|
||||
//! [ScsiTapeChanger]: crate::api2::types::ScsiTapeChanger
|
||||
//! [SectionConfig]: proxmox::api::section_config::SectionConfig
|
||||
//! [LtoTapeDrive]: pbs_api_types::LtoTapeDrive
|
||||
//! [VirtualTapeDrive]: pbs_api_types::VirtualTapeDrive
|
||||
//! [ScsiTapeChanger]: pbs_api_types::ScsiTapeChanger
|
||||
//! [SectionConfig]: proxmox_section_config::SectionConfig
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::sync::LazyLock;
|
||||
|
@ -10,6 +10,7 @@ pub mod network;
|
||||
pub mod notifications;
|
||||
pub mod prune;
|
||||
pub mod remote;
|
||||
pub mod s3;
|
||||
pub mod sync;
|
||||
pub mod tape_job;
|
||||
pub mod token_shadow;
|
||||
@ -22,6 +23,8 @@ pub use config_version_cache::ConfigVersionCache;
|
||||
|
||||
use anyhow::{format_err, Error};
|
||||
use nix::unistd::{Gid, Group, Uid, User};
|
||||
use proxmox_sys::fs::DirLockGuard;
|
||||
use std::os::unix::prelude::AsRawFd;
|
||||
|
||||
pub use pbs_buildcfg::{BACKUP_GROUP_NAME, BACKUP_USER_NAME};
|
||||
|
||||
@ -46,13 +49,34 @@ pub fn backup_group() -> Result<nix::unistd::Group, Error> {
|
||||
}
|
||||
|
||||
pub struct BackupLockGuard {
|
||||
_file: Option<std::fs::File>,
|
||||
file: Option<std::fs::File>,
|
||||
// TODO: Remove `_legacy_dir` with PBS 5
|
||||
_legacy_dir: Option<DirLockGuard>,
|
||||
}
|
||||
|
||||
impl AsRawFd for BackupLockGuard {
|
||||
fn as_raw_fd(&self) -> i32 {
|
||||
self.file.as_ref().map_or(-1, |f| f.as_raw_fd())
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: Remove with PBS 5
|
||||
impl From<DirLockGuard> for BackupLockGuard {
|
||||
fn from(value: DirLockGuard) -> Self {
|
||||
Self {
|
||||
file: None,
|
||||
_legacy_dir: Some(value),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[doc(hidden)]
|
||||
/// Note: do not use for production code, this is only intended for tests
|
||||
pub unsafe fn create_mocked_lock() -> BackupLockGuard {
|
||||
BackupLockGuard { _file: None }
|
||||
BackupLockGuard {
|
||||
file: None,
|
||||
_legacy_dir: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Open or create a lock file owned by user "backup" and lock it.
|
||||
@ -76,7 +100,10 @@ pub fn open_backup_lockfile<P: AsRef<std::path::Path>>(
|
||||
let timeout = timeout.unwrap_or(std::time::Duration::new(10, 0));
|
||||
|
||||
let file = proxmox_sys::fs::open_file_locked(&path, timeout, exclusive, options)?;
|
||||
Ok(BackupLockGuard { _file: Some(file) })
|
||||
Ok(BackupLockGuard {
|
||||
file: Some(file),
|
||||
_legacy_dir: None,
|
||||
})
|
||||
}
|
||||
|
||||
/// Atomically write data to file owned by "root:backup" with permission "0640"
|
||||
|
@ -3,7 +3,7 @@
|
||||
//! This configuration module is based on [`SectionConfig`], and
|
||||
//! provides a type safe interface to store [`MediaPoolConfig`],
|
||||
//!
|
||||
//! [MediaPoolConfig]: crate::api2::types::MediaPoolConfig
|
||||
//! [MediaPoolConfig]: pbs_api_types::MediaPoolConfig
|
||||
//! [SectionConfig]: proxmox_section_config::SectionConfig
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
@ -1,5 +1,5 @@
|
||||
use std::collections::HashMap;
|
||||
use std::os::unix::io::{AsRawFd, FromRawFd, OwnedFd};
|
||||
use std::os::unix::io::AsRawFd;
|
||||
use std::path::Path;
|
||||
use std::process::Command;
|
||||
use std::sync::LazyLock;
|
||||
@ -137,24 +137,20 @@ pub fn get_network_interfaces() -> Result<HashMap<String, bool>, Error> {
|
||||
|
||||
let lines = raw.lines();
|
||||
|
||||
let sock = unsafe {
|
||||
OwnedFd::from_raw_fd(
|
||||
socket(
|
||||
AddressFamily::Inet,
|
||||
SockType::Datagram,
|
||||
SockFlag::empty(),
|
||||
None,
|
||||
)
|
||||
.or_else(|_| {
|
||||
socket(
|
||||
AddressFamily::Inet6,
|
||||
SockType::Datagram,
|
||||
SockFlag::empty(),
|
||||
None,
|
||||
)
|
||||
})?,
|
||||
let sock = socket(
|
||||
AddressFamily::Inet,
|
||||
SockType::Datagram,
|
||||
SockFlag::empty(),
|
||||
None,
|
||||
)
|
||||
.or_else(|_| {
|
||||
socket(
|
||||
AddressFamily::Inet6,
|
||||
SockType::Datagram,
|
||||
SockFlag::empty(),
|
||||
None,
|
||||
)
|
||||
};
|
||||
})?;
|
||||
|
||||
let mut interface_list = HashMap::new();
|
||||
|
||||
|
68
pbs-config/src/s3.rs
Normal file
68
pbs-config/src/s3.rs
Normal file
@ -0,0 +1,68 @@
|
||||
use std::collections::HashMap;
|
||||
use std::sync::LazyLock;
|
||||
|
||||
use anyhow::Error;
|
||||
|
||||
use proxmox_s3_client::{S3ClientConf, S3_CLIENT_ID_SCHEMA};
|
||||
use proxmox_schema::*;
|
||||
use proxmox_section_config::{SectionConfig, SectionConfigData, SectionConfigPlugin};
|
||||
|
||||
use pbs_buildcfg::configdir;
|
||||
|
||||
use crate::{open_backup_lockfile, replace_backup_config, BackupLockGuard};
|
||||
|
||||
pub static CONFIG: LazyLock<SectionConfig> = LazyLock::new(init);
|
||||
|
||||
fn init() -> SectionConfig {
|
||||
let obj_schema = S3ClientConf::API_SCHEMA.unwrap_all_of_schema();
|
||||
let plugin = SectionConfigPlugin::new(
|
||||
"s3-endpoint".to_string(),
|
||||
Some(String::from("id")),
|
||||
obj_schema,
|
||||
);
|
||||
let mut config = SectionConfig::new(&S3_CLIENT_ID_SCHEMA);
|
||||
config.register_plugin(plugin);
|
||||
|
||||
config
|
||||
}
|
||||
|
||||
/// Configuration file location for S3 client.
|
||||
pub const S3_CFG_FILENAME: &str = configdir!("/s3.cfg");
|
||||
/// Configuration lock file used to prevent concurrent configuration update operations.
|
||||
pub const S3_CFG_LOCKFILE: &str = configdir!("/.s3.lck");
|
||||
|
||||
/// Config type for s3 client config entries
|
||||
pub const S3_CFG_TYPE_ID: &str = "s3-endpoint";
|
||||
|
||||
/// Get exclusive lock for S3 client configuration update.
|
||||
pub fn lock_config() -> Result<BackupLockGuard, Error> {
|
||||
open_backup_lockfile(S3_CFG_LOCKFILE, None, true)
|
||||
}
|
||||
|
||||
/// Load s3 client configuration from file.
|
||||
pub fn config() -> Result<(SectionConfigData, [u8; 32]), Error> {
|
||||
parse_config(S3_CFG_FILENAME)
|
||||
}
|
||||
|
||||
/// Save given s3 client configuration to file.
|
||||
pub fn save_config(config: &SectionConfigData) -> Result<(), Error> {
|
||||
let raw = CONFIG.write(S3_CFG_FILENAME, config)?;
|
||||
replace_backup_config(S3_CFG_FILENAME, raw.as_bytes())?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Shell completion helper to complete s3 client id's as found in the config.
|
||||
pub fn complete_s3_client_id(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
|
||||
match config() {
|
||||
Ok((data, _digest)) => data.sections.keys().map(|id| id.to_string()).collect(),
|
||||
Err(_) => Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_config(path: &str) -> Result<(SectionConfigData, [u8; 32]), Error> {
|
||||
let content = proxmox_sys::fs::file_read_optional_string(path)?;
|
||||
let content = content.unwrap_or_default();
|
||||
let digest = openssl::sha::sha256(content.as_bytes());
|
||||
let data = CONFIG.parse(path, &content)?;
|
||||
Ok((data, digest))
|
||||
}
|
@ -61,8 +61,16 @@ pub fn verify_secret(tokenid: &Authid, secret: &str) -> Result<(), Error> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Generates a new secret for the given tokenid / API token, sets it then returns it.
|
||||
/// The secret is stored as salted hash.
|
||||
pub fn generate_and_set_secret(tokenid: &Authid) -> Result<String, Error> {
|
||||
let secret = format!("{:x}", proxmox_uuid::Uuid::generate());
|
||||
set_secret(tokenid, &secret)?;
|
||||
Ok(secret)
|
||||
}
|
||||
|
||||
/// Adds a new entry for the given tokenid / API token secret. The secret is stored as salted hash.
|
||||
pub fn set_secret(tokenid: &Authid, secret: &str) -> Result<(), Error> {
|
||||
fn set_secret(tokenid: &Authid, secret: &str) -> Result<(), Error> {
|
||||
if !tokenid.is_token() {
|
||||
bail!("not an API token ID");
|
||||
}
|
||||
|
@ -8,15 +8,18 @@ rust-version.workspace = true
|
||||
|
||||
[dependencies]
|
||||
anyhow.workspace = true
|
||||
base64.workspace = true
|
||||
const_format.workspace = true
|
||||
crc32fast.workspace = true
|
||||
endian_trait.workspace = true
|
||||
futures.workspace = true
|
||||
hex = { workspace = true, features = [ "serde" ] }
|
||||
http-body-util.workspace = true
|
||||
hyper.workspace = true
|
||||
libc.workspace = true
|
||||
log.workspace = true
|
||||
nix.workspace = true
|
||||
openssl.workspace = true
|
||||
regex.workspace = true
|
||||
serde.workspace = true
|
||||
serde_json.workspace = true
|
||||
tokio = { workspace = true, features = [] }
|
||||
@ -28,13 +31,18 @@ zstd-safe.workspace = true
|
||||
pathpatterns.workspace = true
|
||||
pxar.workspace = true
|
||||
|
||||
proxmox-async.workspace = true
|
||||
proxmox-base64.workspace = true
|
||||
proxmox-borrow.workspace = true
|
||||
proxmox-http.workspace = true
|
||||
proxmox-human-byte.workspace = true
|
||||
proxmox-io.workspace = true
|
||||
proxmox-lang.workspace=true
|
||||
proxmox-s3-client = { workspace = true, features = [ "impl" ] }
|
||||
proxmox-schema = { workspace = true, features = [ "api-macro" ] }
|
||||
proxmox-serde = { workspace = true, features = [ "serde_json" ] }
|
||||
proxmox-sys.workspace = true
|
||||
proxmox-systemd.workspace = true
|
||||
proxmox-time.workspace = true
|
||||
proxmox-uuid.workspace = true
|
||||
proxmox-worker-task.workspace = true
|
||||
|
@ -1,20 +1,45 @@
|
||||
use std::fmt;
|
||||
use std::os::unix::io::RawFd;
|
||||
use std::os::unix::io::{AsRawFd, RawFd};
|
||||
use std::os::unix::prelude::OsStrExt;
|
||||
use std::path::Path;
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
use std::sync::{Arc, LazyLock};
|
||||
use std::time::Duration;
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use anyhow::{bail, format_err, Context, Error};
|
||||
use const_format::concatcp;
|
||||
|
||||
use proxmox_sys::fs::{lock_dir_noblock, replace_file, CreateOptions};
|
||||
use proxmox_s3_client::S3PathPrefix;
|
||||
use proxmox_sys::fs::{lock_dir_noblock, lock_dir_noblock_shared, replace_file, CreateOptions};
|
||||
use proxmox_systemd::escape_unit;
|
||||
|
||||
use pbs_api_types::{
|
||||
Authid, BackupGroupDeleteStats, BackupNamespace, BackupType, GroupFilter, VerifyState,
|
||||
BACKUP_DATE_REGEX, BACKUP_FILE_REGEX, CLIENT_LOG_BLOB_NAME, MANIFEST_BLOB_NAME,
|
||||
BACKUP_DATE_REGEX, CLIENT_LOG_BLOB_NAME, MANIFEST_BLOB_NAME,
|
||||
};
|
||||
use pbs_config::{open_backup_lockfile, BackupLockGuard};
|
||||
|
||||
use crate::datastore::{GROUP_NOTES_FILE_NAME, GROUP_OWNER_FILE_NAME};
|
||||
use crate::manifest::{BackupManifest, MANIFEST_LOCK_NAME};
|
||||
use crate::{DataBlob, DataStore};
|
||||
use crate::s3::S3_CONTENT_PREFIX;
|
||||
use crate::{DataBlob, DataStore, DatastoreBackend};
|
||||
|
||||
pub const DATASTORE_LOCKS_DIR: &str = "/run/proxmox-backup/locks";
|
||||
pub const PROTECTED_MARKER_FILENAME: &str = ".protected";
|
||||
|
||||
proxmox_schema::const_regex! {
|
||||
pub BACKUP_FILES_AND_PROTECTED_REGEX = concatcp!(r"^(.*\.([fd]idx|blob)|\", PROTECTED_MARKER_FILENAME, ")$");
|
||||
}
|
||||
|
||||
// TODO: Remove with PBS 5
|
||||
// Note: The `expect()` call here will only happen if we can neither confirm nor deny the existence
|
||||
// of the file. this should only happen if a user messes with the `/run/proxmox-backup` directory.
|
||||
// if that happens, a lot more should fail as we rely on the existence of the directory throughout
|
||||
// the code. so just panic with a reasonable message.
|
||||
pub(crate) static OLD_LOCKING: LazyLock<bool> = LazyLock::new(|| {
|
||||
std::fs::exists("/run/proxmox-backup/old-locking")
|
||||
.expect("cannot read `/run/proxmox-backup`, please check permissions")
|
||||
});
|
||||
|
||||
/// BackupGroup is a directory containing a list of BackupDir
|
||||
#[derive(Clone)]
|
||||
@ -97,9 +122,7 @@ impl BackupGroup {
|
||||
}
|
||||
|
||||
let backup_dir = self.backup_dir_with_rfc3339(backup_time)?;
|
||||
let files = list_backup_files(l2_fd, backup_time)?;
|
||||
|
||||
let protected = backup_dir.is_protected();
|
||||
let (files, protected) = list_backup_files(l2_fd, backup_time)?;
|
||||
|
||||
list.push(BackupInfo {
|
||||
backup_dir,
|
||||
@ -141,7 +164,7 @@ impl BackupGroup {
|
||||
|
||||
use nix::fcntl::{openat, OFlag};
|
||||
match openat(
|
||||
l2_fd,
|
||||
Some(l2_fd),
|
||||
&manifest_path,
|
||||
OFlag::O_RDONLY | OFlag::O_CLOEXEC,
|
||||
nix::sys::stat::Mode::empty(),
|
||||
@ -198,10 +221,11 @@ impl BackupGroup {
|
||||
///
|
||||
/// Returns `BackupGroupDeleteStats`, containing the number of deleted snapshots
|
||||
/// and number of protected snaphsots, which therefore were not removed.
|
||||
pub fn destroy(&self) -> Result<BackupGroupDeleteStats, Error> {
|
||||
pub fn destroy(&self, backend: &DatastoreBackend) -> Result<BackupGroupDeleteStats, Error> {
|
||||
let _guard = self
|
||||
.lock()
|
||||
.with_context(|| format!("while destroying group '{self:?}'"))?;
|
||||
let path = self.full_group_path();
|
||||
let _guard =
|
||||
proxmox_sys::fs::lock_dir_noblock(&path, "backup group", "possible running backup")?;
|
||||
|
||||
log::info!("removing backup group {:?}", path);
|
||||
let mut delete_stats = BackupGroupDeleteStats::default();
|
||||
@ -211,20 +235,67 @@ impl BackupGroup {
|
||||
delete_stats.increment_protected_snapshots();
|
||||
continue;
|
||||
}
|
||||
snap.destroy(false)?;
|
||||
// also for S3 cleanup local only, the actual S3 objects will be removed below,
|
||||
// reducing the number of required API calls.
|
||||
snap.destroy(false, &DatastoreBackend::Filesystem)?;
|
||||
delete_stats.increment_removed_snapshots();
|
||||
}
|
||||
|
||||
if delete_stats.all_removed() {
|
||||
std::fs::remove_dir_all(&path).map_err(|err| {
|
||||
format_err!("removing group directory {:?} failed - {}", path, err)
|
||||
})?;
|
||||
if let DatastoreBackend::S3(s3_client) = backend {
|
||||
let path = self.relative_group_path();
|
||||
let group_prefix = path
|
||||
.to_str()
|
||||
.ok_or_else(|| format_err!("invalid group path prefix"))?;
|
||||
let prefix = format!("{S3_CONTENT_PREFIX}/{group_prefix}");
|
||||
let delete_objects_error = proxmox_async::runtime::block_on(
|
||||
s3_client.delete_objects_by_prefix_with_suffix_filter(
|
||||
&S3PathPrefix::Some(prefix),
|
||||
PROTECTED_MARKER_FILENAME,
|
||||
&[GROUP_OWNER_FILE_NAME, GROUP_NOTES_FILE_NAME],
|
||||
),
|
||||
)?;
|
||||
if delete_objects_error {
|
||||
bail!("deleting objects failed");
|
||||
}
|
||||
}
|
||||
|
||||
// Note: make sure the old locking mechanism isn't used as `remove_dir_all` is not safe in
|
||||
// that case
|
||||
if delete_stats.all_removed() && !*OLD_LOCKING {
|
||||
self.remove_group_dir()?;
|
||||
delete_stats.increment_removed_groups();
|
||||
}
|
||||
|
||||
Ok(delete_stats)
|
||||
}
|
||||
|
||||
/// Helper function, assumes that no more snapshots are present in the group.
|
||||
fn remove_group_dir(&self) -> Result<(), Error> {
|
||||
let note_path = self.store.group_notes_path(&self.ns, &self.group);
|
||||
if let Err(err) = std::fs::remove_file(¬e_path) {
|
||||
if err.kind() != std::io::ErrorKind::NotFound {
|
||||
bail!("removing the note file '{note_path:?}' failed - {err}")
|
||||
}
|
||||
}
|
||||
|
||||
let owner_path = self.store.owner_path(&self.ns, &self.group);
|
||||
|
||||
if let Err(err) = std::fs::remove_file(&owner_path) {
|
||||
if err.kind() != std::io::ErrorKind::NotFound {
|
||||
bail!("removing the owner file '{owner_path:?}' failed - {err}");
|
||||
}
|
||||
}
|
||||
|
||||
let path = self.full_group_path();
|
||||
|
||||
std::fs::remove_dir(&path)
|
||||
.map_err(|err| format_err!("removing group directory {path:?} failed - {err}"))?;
|
||||
|
||||
let _ = std::fs::remove_file(self.lock_path());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Returns the backup owner.
|
||||
///
|
||||
/// The backup owner is the entity who first created the backup group.
|
||||
@ -237,6 +308,36 @@ impl BackupGroup {
|
||||
self.store
|
||||
.set_owner(&self.ns, self.as_ref(), auth_id, force)
|
||||
}
|
||||
|
||||
/// Returns a file name for locking a group.
|
||||
///
|
||||
/// The lock file will be located in:
|
||||
/// `${DATASTORE_LOCKS_DIR}/${datastore name}/${lock_file_path_helper(rpath)}`
|
||||
/// where `rpath` is the relative path of the group.
|
||||
fn lock_path(&self) -> PathBuf {
|
||||
let path = Path::new(DATASTORE_LOCKS_DIR).join(self.store.name());
|
||||
|
||||
let rpath = Path::new(self.group.ty.as_str()).join(&self.group.id);
|
||||
|
||||
path.join(lock_file_path_helper(&self.ns, rpath))
|
||||
}
|
||||
|
||||
/// Locks a group exclusively.
|
||||
pub fn lock(&self) -> Result<BackupLockGuard, Error> {
|
||||
if *OLD_LOCKING {
|
||||
lock_dir_noblock(
|
||||
&self.full_group_path(),
|
||||
"backup group",
|
||||
"possible runing backup, group is in use",
|
||||
)
|
||||
.map(BackupLockGuard::from)
|
||||
} else {
|
||||
lock_helper(self.store.name(), &self.lock_path(), |p| {
|
||||
open_backup_lockfile(p, Some(Duration::from_secs(0)), true)
|
||||
.with_context(|| format!("unable to acquire backup group lock {p:?}"))
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<pbs_api_types::BackupNamespace> for BackupGroup {
|
||||
@ -392,7 +493,7 @@ impl BackupDir {
|
||||
|
||||
pub fn protected_file(&self) -> PathBuf {
|
||||
let mut path = self.full_path();
|
||||
path.push(".protected");
|
||||
path.push(PROTECTED_MARKER_FILENAME);
|
||||
path
|
||||
}
|
||||
|
||||
@ -421,36 +522,101 @@ impl BackupDir {
|
||||
/// Returns the filename to lock a manifest
|
||||
///
|
||||
/// Also creates the basedir. The lockfile is located in
|
||||
/// '/run/proxmox-backup/locks/{datastore}/[ns/{ns}/]+{type}/{id}/{timestamp}.index.json.lck'
|
||||
fn manifest_lock_path(&self) -> Result<PathBuf, Error> {
|
||||
let mut path = PathBuf::from(&format!("/run/proxmox-backup/locks/{}", self.store.name()));
|
||||
path.push(self.relative_path());
|
||||
/// `${DATASTORE_LOCKS_DIR}/${datastore name}/${lock_file_path_helper(rpath)}.index.json.lck`
|
||||
/// where rpath is the relative path of the snapshot.
|
||||
fn manifest_lock_path(&self) -> PathBuf {
|
||||
let path = Path::new(DATASTORE_LOCKS_DIR).join(self.store.name());
|
||||
|
||||
std::fs::create_dir_all(&path)?;
|
||||
let ts = self.backup_time_string();
|
||||
path.push(format!("{ts}{MANIFEST_LOCK_NAME}"));
|
||||
let rpath = Path::new(self.dir.group.ty.as_str())
|
||||
.join(&self.dir.group.id)
|
||||
.join(&self.backup_time_string)
|
||||
.join(MANIFEST_LOCK_NAME);
|
||||
|
||||
Ok(path)
|
||||
path.join(lock_file_path_helper(&self.ns, rpath))
|
||||
}
|
||||
|
||||
/// Locks the manifest of a snapshot, for example, to update or delete it.
|
||||
pub(crate) fn lock_manifest(&self) -> Result<BackupLockGuard, Error> {
|
||||
let path = self.manifest_lock_path()?;
|
||||
let path = if *OLD_LOCKING {
|
||||
// old manifest lock path
|
||||
let path = Path::new(DATASTORE_LOCKS_DIR)
|
||||
.join(self.store.name())
|
||||
.join(self.relative_path());
|
||||
|
||||
// actions locking the manifest should be relatively short, only wait a few seconds
|
||||
open_backup_lockfile(&path, Some(std::time::Duration::from_secs(5)), true)
|
||||
.map_err(|err| format_err!("unable to acquire manifest lock {:?} - {}", &path, err))
|
||||
std::fs::create_dir_all(&path)?;
|
||||
|
||||
path.join(format!("{}{MANIFEST_LOCK_NAME}", self.backup_time_string()))
|
||||
} else {
|
||||
self.manifest_lock_path()
|
||||
};
|
||||
|
||||
lock_helper(self.store.name(), &path, |p| {
|
||||
// update_manifest should never take a long time, so if
|
||||
// someone else has the lock we can simply block a bit
|
||||
// and should get it soon
|
||||
open_backup_lockfile(p, Some(Duration::from_secs(5)), true)
|
||||
.with_context(|| format_err!("unable to acquire manifest lock {p:?}"))
|
||||
})
|
||||
}
|
||||
|
||||
/// Returns a file name for locking a snapshot.
|
||||
///
|
||||
/// The lock file will be located in:
|
||||
/// `${DATASTORE_LOCKS_DIR}/${datastore name}/${lock_file_path_helper(rpath)}`
|
||||
/// where `rpath` is the relative path of the snapshot.
|
||||
fn lock_path(&self) -> PathBuf {
|
||||
let path = Path::new(DATASTORE_LOCKS_DIR).join(self.store.name());
|
||||
|
||||
let rpath = Path::new(self.dir.group.ty.as_str())
|
||||
.join(&self.dir.group.id)
|
||||
.join(&self.backup_time_string);
|
||||
|
||||
path.join(lock_file_path_helper(&self.ns, rpath))
|
||||
}
|
||||
|
||||
/// Locks a snapshot exclusively.
|
||||
pub fn lock(&self) -> Result<BackupLockGuard, Error> {
|
||||
if *OLD_LOCKING {
|
||||
lock_dir_noblock(
|
||||
&self.full_path(),
|
||||
"snapshot",
|
||||
"backup is running or snapshot is in use",
|
||||
)
|
||||
.map(BackupLockGuard::from)
|
||||
} else {
|
||||
lock_helper(self.store.name(), &self.lock_path(), |p| {
|
||||
open_backup_lockfile(p, Some(Duration::from_secs(0)), true)
|
||||
.with_context(|| format!("unable to acquire snapshot lock {p:?}"))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Acquires a shared lock on a snapshot.
|
||||
pub fn lock_shared(&self) -> Result<BackupLockGuard, Error> {
|
||||
if *OLD_LOCKING {
|
||||
lock_dir_noblock_shared(
|
||||
&self.full_path(),
|
||||
"snapshot",
|
||||
"backup is running or snapshot is in use, could not acquire shared lock",
|
||||
)
|
||||
.map(BackupLockGuard::from)
|
||||
} else {
|
||||
lock_helper(self.store.name(), &self.lock_path(), |p| {
|
||||
open_backup_lockfile(p, Some(Duration::from_secs(0)), false)
|
||||
.with_context(|| format!("unable to acquire shared snapshot lock {p:?}"))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Destroy the whole snapshot, bails if it's protected
|
||||
///
|
||||
/// Setting `force` to true skips locking and thus ignores if the backup is currently in use.
|
||||
pub fn destroy(&self, force: bool) -> Result<(), Error> {
|
||||
let full_path = self.full_path();
|
||||
|
||||
pub fn destroy(&self, force: bool, backend: &DatastoreBackend) -> Result<(), Error> {
|
||||
let (_guard, _manifest_guard);
|
||||
if !force {
|
||||
_guard = lock_dir_noblock(&full_path, "snapshot", "possibly running or in use")?;
|
||||
_guard = self
|
||||
.lock()
|
||||
.with_context(|| format!("while destroying snapshot '{self:?}'"))?;
|
||||
_manifest_guard = self.lock_manifest()?;
|
||||
}
|
||||
|
||||
@ -458,14 +624,57 @@ impl BackupDir {
|
||||
bail!("cannot remove protected snapshot"); // use special error type?
|
||||
}
|
||||
|
||||
if let DatastoreBackend::S3(s3_client) = backend {
|
||||
let path = self.relative_path();
|
||||
let snapshot_prefix = path
|
||||
.to_str()
|
||||
.ok_or_else(|| format_err!("invalid snapshot path"))?;
|
||||
let prefix = format!("{S3_CONTENT_PREFIX}/{snapshot_prefix}");
|
||||
let delete_objects_error = proxmox_async::runtime::block_on(
|
||||
s3_client.delete_objects_by_prefix(&S3PathPrefix::Some(prefix)),
|
||||
)?;
|
||||
if delete_objects_error {
|
||||
bail!("deleting objects failed");
|
||||
}
|
||||
}
|
||||
|
||||
let full_path = self.full_path();
|
||||
log::info!("removing backup snapshot {:?}", full_path);
|
||||
std::fs::remove_dir_all(&full_path).map_err(|err| {
|
||||
format_err!("removing backup snapshot {:?} failed - {}", full_path, err,)
|
||||
})?;
|
||||
|
||||
// the manifest doesn't exist anymore, no need to keep the lock (already done by guard?)
|
||||
if let Ok(path) = self.manifest_lock_path() {
|
||||
let _ = std::fs::remove_file(path); // ignore errors
|
||||
// remove no longer needed lock files
|
||||
let _ = std::fs::remove_file(self.manifest_lock_path()); // ignore errors
|
||||
let _ = std::fs::remove_file(self.lock_path()); // ignore errors
|
||||
|
||||
let group = BackupGroup::from(self);
|
||||
let guard = group.lock().with_context(|| {
|
||||
format!("while checking if group '{group:?}' is empty during snapshot destruction")
|
||||
});
|
||||
|
||||
// Only remove the group if all of the following is true:
|
||||
//
|
||||
// - we can lock it: if we can't lock the group, it is still in use (either by another
|
||||
// backup process or a parent caller (who needs to take care that empty groups are
|
||||
// removed themselves).
|
||||
// - it is now empty: if the group isn't empty, removing it will fail (to avoid removing
|
||||
// backups that might still be used).
|
||||
// - the new locking mechanism is used: if the old mechanism is used, a group removal here
|
||||
// could lead to a race condition.
|
||||
//
|
||||
// Do not error out, as we have already removed the snapshot, there is nothing a user could
|
||||
// do to rectify the situation.
|
||||
if guard.is_ok() && group.list_backups()?.is_empty() && !*OLD_LOCKING {
|
||||
group.remove_group_dir()?;
|
||||
if let DatastoreBackend::S3(s3_client) = backend {
|
||||
let object_key =
|
||||
super::s3::object_key_from_path(&group.relative_group_path(), "owner")
|
||||
.context("invalid owner file object key")?;
|
||||
proxmox_async::runtime::block_on(s3_client.delete_object(object_key))?;
|
||||
}
|
||||
} else if let Err(err) = guard {
|
||||
log::debug!("{err:#}");
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@ -500,6 +709,7 @@ impl BackupDir {
|
||||
/// only use this method - anything else may break locking guarantees.
|
||||
pub fn update_manifest(
|
||||
&self,
|
||||
backend: &DatastoreBackend,
|
||||
update_fn: impl FnOnce(&mut BackupManifest),
|
||||
) -> Result<(), Error> {
|
||||
let _guard = self.lock_manifest()?;
|
||||
@ -512,6 +722,15 @@ impl BackupDir {
|
||||
let blob = DataBlob::encode(manifest.as_bytes(), None, true)?;
|
||||
let raw_data = blob.raw_data();
|
||||
|
||||
if let DatastoreBackend::S3(s3_client) = backend {
|
||||
let object_key =
|
||||
super::s3::object_key_from_path(&self.relative_path(), MANIFEST_BLOB_NAME.as_ref())
|
||||
.context("invalid manifest object key")?;
|
||||
let data = hyper::body::Bytes::copy_from_slice(raw_data);
|
||||
proxmox_async::runtime::block_on(s3_client.upload_replace_with_retry(object_key, data))
|
||||
.context("failed to update manifest on s3 backend")?;
|
||||
}
|
||||
|
||||
let mut path = self.full_path();
|
||||
path.push(MANIFEST_BLOB_NAME.as_ref());
|
||||
|
||||
@ -617,8 +836,7 @@ impl BackupInfo {
|
||||
pub fn new(backup_dir: BackupDir) -> Result<BackupInfo, Error> {
|
||||
let path = backup_dir.full_path();
|
||||
|
||||
let files = list_backup_files(libc::AT_FDCWD, &path)?;
|
||||
let protected = backup_dir.is_protected();
|
||||
let (files, protected) = list_backup_files(libc::AT_FDCWD, &path)?;
|
||||
|
||||
Ok(BackupInfo {
|
||||
backup_dir,
|
||||
@ -648,16 +866,99 @@ impl BackupInfo {
|
||||
fn list_backup_files<P: ?Sized + nix::NixPath>(
|
||||
dirfd: RawFd,
|
||||
path: &P,
|
||||
) -> Result<Vec<String>, Error> {
|
||||
) -> Result<(Vec<String>, bool), Error> {
|
||||
let mut files = vec![];
|
||||
let mut protected = false;
|
||||
|
||||
proxmox_sys::fs::scandir(dirfd, path, &BACKUP_FILE_REGEX, |_, filename, file_type| {
|
||||
if file_type != nix::dir::Type::File {
|
||||
return Ok(());
|
||||
}
|
||||
files.push(filename.to_owned());
|
||||
Ok(())
|
||||
})?;
|
||||
proxmox_sys::fs::scandir(
|
||||
dirfd,
|
||||
path,
|
||||
&BACKUP_FILES_AND_PROTECTED_REGEX,
|
||||
|_, filename, file_type| {
|
||||
if file_type != nix::dir::Type::File {
|
||||
return Ok(());
|
||||
}
|
||||
// avoids more expensive check via `BackupDir::is_protected`
|
||||
if filename == ".protected" {
|
||||
protected = true;
|
||||
} else {
|
||||
files.push(filename.to_owned());
|
||||
}
|
||||
Ok(())
|
||||
},
|
||||
)?;
|
||||
|
||||
Ok(files)
|
||||
Ok((files, protected))
|
||||
}
|
||||
|
||||
/// Creates a path to a lock file depending on the relative path of an object (snapshot, group,
|
||||
/// manifest) in a datastore. First all namespaces will be concatenated with a colon (ns-folder).
|
||||
/// Then the actual file name will depend on the length of the relative path without namespaces. If
|
||||
/// it is shorter than 255 characters in its unit encoded form, than the unit encoded form will be
|
||||
/// used directly. If not, the file name will consist of the first 80 character, the last 80
|
||||
/// characters and the hash of the unit encoded relative path without namespaces. It will also be
|
||||
/// placed into a "hashed" subfolder in the namespace folder.
|
||||
///
|
||||
/// Examples:
|
||||
///
|
||||
/// - vm-100
|
||||
/// - vm-100-2022\x2d05\x2d02T08\x3a11\x3a33Z
|
||||
/// - ns1:ns2:ns3:ns4:ns5:ns6:ns7/vm-100-2022\x2d05\x2d02T08\x3a11\x3a33Z
|
||||
///
|
||||
/// A "hashed" lock file would look like this:
|
||||
/// - ns1:ns2:ns3/hashed/$first_eighty...$last_eighty-$hash
|
||||
fn lock_file_path_helper(ns: &BackupNamespace, path: PathBuf) -> PathBuf {
|
||||
let to_return = PathBuf::from(
|
||||
ns.components()
|
||||
.map(String::from)
|
||||
.reduce(|acc, n| format!("{acc}:{n}"))
|
||||
.unwrap_or_default(),
|
||||
);
|
||||
|
||||
let path_bytes = path.as_os_str().as_bytes();
|
||||
|
||||
let enc = escape_unit(path_bytes, true);
|
||||
|
||||
if enc.len() < 255 {
|
||||
return to_return.join(enc);
|
||||
}
|
||||
|
||||
let to_return = to_return.join("hashed");
|
||||
|
||||
let first_eigthy = &enc[..80];
|
||||
let last_eighty = &enc[enc.len() - 80..];
|
||||
let hash = hex::encode(openssl::sha::sha256(path_bytes));
|
||||
|
||||
to_return.join(format!("{first_eigthy}...{last_eighty}-{hash}"))
|
||||
}
|
||||
|
||||
/// Helps implement the double stat'ing procedure. It avoids certain race conditions upon lock
|
||||
/// deletion.
|
||||
///
|
||||
/// It also creates the base directory for lock files.
|
||||
fn lock_helper<F>(
|
||||
store_name: &str,
|
||||
path: &std::path::Path,
|
||||
lock_fn: F,
|
||||
) -> Result<BackupLockGuard, Error>
|
||||
where
|
||||
F: Fn(&std::path::Path) -> Result<BackupLockGuard, Error>,
|
||||
{
|
||||
let mut lock_dir = Path::new(DATASTORE_LOCKS_DIR).join(store_name);
|
||||
|
||||
if let Some(parent) = path.parent() {
|
||||
lock_dir = lock_dir.join(parent);
|
||||
};
|
||||
|
||||
std::fs::create_dir_all(&lock_dir)?;
|
||||
|
||||
let lock = lock_fn(path)?;
|
||||
|
||||
let inode = nix::sys::stat::fstat(lock.as_raw_fd())?.st_ino;
|
||||
|
||||
if nix::sys::stat::stat(path).map_or(true, |st| inode != st.st_ino) {
|
||||
bail!("could not acquire lock, another thread modified the lock file");
|
||||
}
|
||||
|
||||
Ok(lock)
|
||||
}
|
||||
|
@ -81,7 +81,11 @@ impl<I: IndexFile, R: AsyncReadChunk + Send + Sync + 'static> CachedChunkReader<
|
||||
let info = self.index.chunk_info(chunk.0).unwrap();
|
||||
|
||||
// will never be None, see AsyncChunkCacher
|
||||
let data = self.cache.access(info.digest, &self.cacher).await?.unwrap();
|
||||
let data = self
|
||||
.cache
|
||||
.access(info.digest, &self.cacher, |_| Ok(()))
|
||||
.await?
|
||||
.unwrap();
|
||||
|
||||
let want_bytes = ((info.range.end - cur_offset) as usize).min(size - read);
|
||||
let slice = &mut buf[read..(read + want_bytes)];
|
||||
|
@ -973,7 +973,7 @@ impl ArchiveEntry {
|
||||
size: Option<u64>,
|
||||
) -> Self {
|
||||
Self {
|
||||
filepath: base64::encode(filepath),
|
||||
filepath: proxmox_base64::encode(filepath),
|
||||
text: String::from_utf8_lossy(filepath.split(|x| *x == b'/').last().unwrap())
|
||||
.to_string(),
|
||||
entry_type: match entry_type {
|
||||
|
@ -1,18 +1,22 @@
|
||||
use std::os::unix::fs::MetadataExt;
|
||||
use std::os::unix::io::AsRawFd;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::time::Duration;
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use tracing::info;
|
||||
use anyhow::{bail, format_err, Context, Error};
|
||||
use tracing::{info, warn};
|
||||
|
||||
use pbs_api_types::{DatastoreFSyncLevel, GarbageCollectionStatus};
|
||||
use proxmox_io::ReadExt;
|
||||
use proxmox_s3_client::S3Client;
|
||||
use proxmox_sys::fs::{create_dir, create_path, file_type_from_file_stat, CreateOptions};
|
||||
use proxmox_sys::process_locker::{
|
||||
ProcessLockExclusiveGuard, ProcessLockSharedGuard, ProcessLocker,
|
||||
};
|
||||
use proxmox_worker_task::WorkerTaskContext;
|
||||
|
||||
use crate::data_blob::DataChunkBuilder;
|
||||
use crate::file_formats::{
|
||||
COMPRESSED_BLOB_MAGIC_1_0, ENCRYPTED_BLOB_MAGIC_1_0, UNCOMPRESSED_BLOB_MAGIC_1_0,
|
||||
};
|
||||
@ -109,7 +113,7 @@ impl ChunkStore {
|
||||
|
||||
let default_options = CreateOptions::new();
|
||||
|
||||
match create_path(&base, Some(default_options), Some(options.clone())) {
|
||||
match create_path(&base, Some(default_options), Some(options)) {
|
||||
Err(err) => bail!("unable to create chunk store '{name}' at {base:?} - {err}"),
|
||||
Ok(res) => {
|
||||
if !res {
|
||||
@ -118,13 +122,13 @@ impl ChunkStore {
|
||||
}
|
||||
}
|
||||
|
||||
if let Err(err) = create_dir(&chunk_dir, options.clone()) {
|
||||
if let Err(err) = create_dir(&chunk_dir, options) {
|
||||
bail!("unable to create chunk store '{name}' subdir {chunk_dir:?} - {err}");
|
||||
}
|
||||
|
||||
// create lock file with correct owner/group
|
||||
let lockfile_path = Self::lockfile_path(&base);
|
||||
proxmox_sys::fs::replace_file(lockfile_path, b"", options.clone(), false)?;
|
||||
proxmox_sys::fs::replace_file(lockfile_path, b"", options, false)?;
|
||||
|
||||
// create 64*1024 subdirs
|
||||
let mut last_percentage = 0;
|
||||
@ -132,7 +136,7 @@ impl ChunkStore {
|
||||
for i in 0..64 * 1024 {
|
||||
let mut l1path = chunk_dir.clone();
|
||||
l1path.push(format!("{:04x}", i));
|
||||
if let Err(err) = create_dir(&l1path, options.clone()) {
|
||||
if let Err(err) = create_dir(&l1path, options) {
|
||||
bail!(
|
||||
"unable to create chunk store '{}' subdir {:?} - {}",
|
||||
name,
|
||||
@ -177,7 +181,7 @@ impl ChunkStore {
|
||||
/// Note that this must be used with care, as it's dangerous to create two instances on the
|
||||
/// same base path, as closing the underlying ProcessLocker drops all locks from this process
|
||||
/// on the lockfile (even if separate FDs)
|
||||
pub(crate) fn open<P: Into<PathBuf>>(
|
||||
pub fn open<P: Into<PathBuf>>(
|
||||
name: &str,
|
||||
base: P,
|
||||
sync_level: DatastoreFSyncLevel,
|
||||
@ -220,19 +224,16 @@ impl ChunkStore {
|
||||
// unwrap: only `None` in unit tests
|
||||
assert!(self.locker.is_some());
|
||||
|
||||
const UTIME_NOW: i64 = (1 << 30) - 1;
|
||||
const UTIME_OMIT: i64 = (1 << 30) - 2;
|
||||
|
||||
let times: [libc::timespec; 2] = [
|
||||
// access time -> update to now
|
||||
libc::timespec {
|
||||
tv_sec: 0,
|
||||
tv_nsec: UTIME_NOW,
|
||||
tv_nsec: libc::UTIME_NOW,
|
||||
},
|
||||
// modification time -> keep as is
|
||||
libc::timespec {
|
||||
tv_sec: 0,
|
||||
tv_nsec: UTIME_OMIT,
|
||||
tv_nsec: libc::UTIME_OMIT,
|
||||
},
|
||||
];
|
||||
|
||||
@ -353,10 +354,14 @@ impl ChunkStore {
|
||||
ProcessLocker::oldest_shared_lock(self.locker.clone().unwrap())
|
||||
}
|
||||
|
||||
pub fn mutex(&self) -> &std::sync::Mutex<()> {
|
||||
&self.mutex
|
||||
}
|
||||
|
||||
pub fn sweep_unused_chunks(
|
||||
&self,
|
||||
oldest_writer: i64,
|
||||
phase1_start_time: i64,
|
||||
min_atime: i64,
|
||||
status: &mut GarbageCollectionStatus,
|
||||
worker: &dyn WorkerTaskContext,
|
||||
) -> Result<(), Error> {
|
||||
@ -366,14 +371,6 @@ impl ChunkStore {
|
||||
use nix::sys::stat::fstatat;
|
||||
use nix::unistd::{unlinkat, UnlinkatFlags};
|
||||
|
||||
let mut min_atime = phase1_start_time - 3600 * 24; // at least 24h (see mount option relatime)
|
||||
|
||||
if oldest_writer < min_atime {
|
||||
min_atime = oldest_writer;
|
||||
}
|
||||
|
||||
min_atime -= 300; // add 5 mins gap for safety
|
||||
|
||||
let mut last_percentage = 0;
|
||||
let mut chunk_count = 0;
|
||||
|
||||
@ -398,7 +395,11 @@ impl ChunkStore {
|
||||
|
||||
let lock = self.mutex.lock();
|
||||
|
||||
if let Ok(stat) = fstatat(dirfd, filename, nix::fcntl::AtFlags::AT_SYMLINK_NOFOLLOW) {
|
||||
if let Ok(stat) = fstatat(
|
||||
Some(dirfd),
|
||||
filename,
|
||||
nix::fcntl::AtFlags::AT_SYMLINK_NOFOLLOW,
|
||||
) {
|
||||
let file_type = file_type_from_file_stat(&stat);
|
||||
if file_type != Some(nix::dir::Type::File) {
|
||||
drop(lock);
|
||||
@ -445,6 +446,87 @@ impl ChunkStore {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Check if atime updates are honored by the filesystem backing the chunk store.
|
||||
///
|
||||
/// Checks if the atime is always updated by utimensat taking into consideration the Linux
|
||||
/// kernel timestamp granularity.
|
||||
/// If `retry_on_file_changed` is set to true, the check is performed again on the changed file
|
||||
/// if a file change while testing is detected by differences in bith time or inode number.
|
||||
/// Uses a 4 MiB fixed size, compressed but unencrypted chunk to test. The chunk is inserted in
|
||||
/// the chunk store if not yet present.
|
||||
/// Returns with error if the check could not be performed.
|
||||
pub fn check_fs_atime_updates(
|
||||
&self,
|
||||
retry_on_file_changed: bool,
|
||||
s3_client: Option<Arc<S3Client>>,
|
||||
) -> Result<(), Error> {
|
||||
let (zero_chunk, digest) = DataChunkBuilder::build_zero_chunk(None, 4096 * 1024, true)?;
|
||||
let (path, _digest) = self.chunk_path(&digest);
|
||||
|
||||
if let Some(ref s3_client) = s3_client {
|
||||
if let Err(err) = std::fs::metadata(&path) {
|
||||
if err.kind() == std::io::ErrorKind::NotFound {
|
||||
let object_key = crate::s3::object_key_from_digest(&digest)?;
|
||||
proxmox_async::runtime::block_on(s3_client.upload_no_replace_with_retry(
|
||||
object_key,
|
||||
zero_chunk.raw_data().to_vec().into(),
|
||||
))
|
||||
.context("failed to upload chunk to s3 backend")?;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let (pre_existing, _) = self.insert_chunk(&zero_chunk, &digest)?;
|
||||
|
||||
// Take into account timestamp update granularity in the kernel
|
||||
// Blocking the thread is fine here since this runs in a worker.
|
||||
std::thread::sleep(Duration::from_secs(1));
|
||||
|
||||
let metadata_before = std::fs::metadata(&path).context(format!(
|
||||
"failed to get metadata for {path:?} before atime update"
|
||||
))?;
|
||||
|
||||
// Second atime update if chunk pre-existed, insert_chunk already updates pre-existing ones
|
||||
self.cond_touch_path(&path, true)?;
|
||||
|
||||
let metadata_now = std::fs::metadata(&path).context(format!(
|
||||
"failed to get metadata for {path:?} after atime update"
|
||||
))?;
|
||||
|
||||
// Check for the unlikely case that the file changed in-between the
|
||||
// two metadata calls, try to check once again on changed file
|
||||
if metadata_before.ino() != metadata_now.ino() {
|
||||
if retry_on_file_changed {
|
||||
return self.check_fs_atime_updates(false, s3_client);
|
||||
}
|
||||
bail!("chunk {path:?} changed twice during access time safety check, cannot proceed.");
|
||||
}
|
||||
|
||||
if metadata_before.accessed()? >= metadata_now.accessed()? {
|
||||
let chunk_info_str = if pre_existing {
|
||||
"pre-existing"
|
||||
} else {
|
||||
"newly inserted"
|
||||
};
|
||||
warn!("Chunk metadata was not correctly updated during access time safety check:");
|
||||
info!(
|
||||
"Timestamps before update: accessed {:?}, modified {:?}, created {:?}",
|
||||
metadata_before.accessed().ok(),
|
||||
metadata_before.modified().ok(),
|
||||
metadata_before.created().ok(),
|
||||
);
|
||||
info!(
|
||||
"Timestamps after update: accessed {:?}, modified {:?}, created {:?}",
|
||||
metadata_now.accessed().ok(),
|
||||
metadata_now.modified().ok(),
|
||||
metadata_now.created().ok(),
|
||||
);
|
||||
bail!("access time safety check using {chunk_info_str} chunk failed, aborting GC!");
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn insert_chunk(&self, chunk: &DataBlob, digest: &[u8; 32]) -> Result<(bool, u64), Error> {
|
||||
// unwrap: only `None` in unit tests
|
||||
assert!(self.locker.is_some());
|
||||
@ -506,10 +588,16 @@ impl ChunkStore {
|
||||
.parent()
|
||||
.ok_or_else(|| format_err!("unable to get chunk dir"))?;
|
||||
|
||||
let mut create_options = CreateOptions::new();
|
||||
if nix::unistd::Uid::effective().is_root() {
|
||||
let uid = pbs_config::backup_user()?.uid;
|
||||
let gid = pbs_config::backup_group()?.gid;
|
||||
create_options = create_options.owner(uid).group(gid);
|
||||
}
|
||||
proxmox_sys::fs::replace_file(
|
||||
&chunk_path,
|
||||
raw_data,
|
||||
CreateOptions::new(),
|
||||
create_options,
|
||||
self.sync_level == DatastoreFSyncLevel::File,
|
||||
)
|
||||
.map_err(|err| {
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -134,7 +134,7 @@ impl DynamicIndexReader {
|
||||
|
||||
let index = unsafe {
|
||||
Mmap::map_fd(
|
||||
rawfd,
|
||||
&file,
|
||||
header_size as u64,
|
||||
index_count,
|
||||
nix::sys::mman::ProtFlags::PROT_READ,
|
||||
@ -599,6 +599,7 @@ impl<S: ReadChunk> BufferedDynamicReader<S> {
|
||||
store: &mut self.store,
|
||||
index: &self.index,
|
||||
},
|
||||
|_| Ok(()),
|
||||
)?
|
||||
.ok_or_else(|| format_err!("chunk not found by cacher"))?;
|
||||
|
||||
|
@ -3,6 +3,7 @@ use std::io::Write;
|
||||
use std::io::{Seek, SeekFrom};
|
||||
use std::os::unix::io::AsRawFd;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::ptr::NonNull;
|
||||
use std::sync::Arc;
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
@ -109,10 +110,12 @@ impl FixedIndexReader {
|
||||
.ok_or_else(|| format_err!("invalid index size"))?,
|
||||
nix::sys::mman::ProtFlags::PROT_READ,
|
||||
nix::sys::mman::MapFlags::MAP_PRIVATE,
|
||||
file.as_raw_fd(),
|
||||
&file,
|
||||
header_size as i64,
|
||||
)
|
||||
}? as *mut u8;
|
||||
}?
|
||||
.as_ptr()
|
||||
.cast::<u8>();
|
||||
|
||||
Ok(Self {
|
||||
_file: file,
|
||||
@ -127,15 +130,13 @@ impl FixedIndexReader {
|
||||
}
|
||||
|
||||
fn unmap(&mut self) -> Result<(), Error> {
|
||||
if self.index.is_null() {
|
||||
let Some(index) = NonNull::new(self.index as *mut std::ffi::c_void) else {
|
||||
return Ok(());
|
||||
}
|
||||
};
|
||||
|
||||
let index_size = self.index_length * 32;
|
||||
|
||||
if let Err(err) =
|
||||
unsafe { nix::sys::mman::munmap(self.index as *mut std::ffi::c_void, index_size) }
|
||||
{
|
||||
if let Err(err) = unsafe { nix::sys::mman::munmap(index, index_size) } {
|
||||
bail!("unmap file failed - {}", err);
|
||||
}
|
||||
|
||||
@ -287,7 +288,7 @@ impl FixedIndexWriter {
|
||||
|
||||
let index_length = size.div_ceil(chunk_size);
|
||||
let index_size = index_length * 32;
|
||||
nix::unistd::ftruncate(file.as_raw_fd(), (header_size + index_size) as i64)?;
|
||||
nix::unistd::ftruncate(&file, (header_size + index_size) as i64)?;
|
||||
|
||||
let data = unsafe {
|
||||
nix::sys::mman::mmap(
|
||||
@ -296,10 +297,12 @@ impl FixedIndexWriter {
|
||||
.ok_or_else(|| format_err!("invalid index size"))?,
|
||||
nix::sys::mman::ProtFlags::PROT_READ | nix::sys::mman::ProtFlags::PROT_WRITE,
|
||||
nix::sys::mman::MapFlags::MAP_SHARED,
|
||||
file.as_raw_fd(),
|
||||
&file,
|
||||
header_size as i64,
|
||||
)
|
||||
}? as *mut u8;
|
||||
}?
|
||||
.as_ptr()
|
||||
.cast::<u8>();
|
||||
|
||||
Ok(Self {
|
||||
store,
|
||||
@ -321,15 +324,13 @@ impl FixedIndexWriter {
|
||||
}
|
||||
|
||||
fn unmap(&mut self) -> Result<(), Error> {
|
||||
if self.index.is_null() {
|
||||
let Some(index) = NonNull::new(self.index as *mut std::ffi::c_void) else {
|
||||
return Ok(());
|
||||
}
|
||||
};
|
||||
|
||||
let index_size = self.index_length * 32;
|
||||
|
||||
if let Err(err) =
|
||||
unsafe { nix::sys::mman::munmap(self.index as *mut std::ffi::c_void, index_size) }
|
||||
{
|
||||
if let Err(err) = unsafe { nix::sys::mman::munmap(index, index_size) } {
|
||||
bail!("unmap file {:?} failed - {}", self.tmp_filename, err);
|
||||
}
|
||||
|
||||
|
@ -1,4 +1,3 @@
|
||||
use std::os::unix::io::RawFd;
|
||||
use std::path::PathBuf;
|
||||
use std::str::FromStr;
|
||||
use std::sync::Arc;
|
||||
@ -56,12 +55,7 @@ impl Iterator for ListSnapshots {
|
||||
};
|
||||
if let Ok(name) = entry.file_name().to_str() {
|
||||
if BACKUP_DATE_REGEX.is_match(name) {
|
||||
let backup_time = match proxmox_time::parse_rfc3339(name) {
|
||||
Ok(time) => time,
|
||||
Err(err) => return Some(Err(err)),
|
||||
};
|
||||
|
||||
return Some(BackupDir::with_group(self.group.clone(), backup_time));
|
||||
return Some(BackupDir::with_rfc3339(self.group.clone(), name.to_owned()));
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -78,17 +72,8 @@ pub struct ListGroupsType {
|
||||
|
||||
impl ListGroupsType {
|
||||
pub fn new(store: Arc<DataStore>, ns: BackupNamespace, ty: BackupType) -> Result<Self, Error> {
|
||||
Self::new_at(libc::AT_FDCWD, store, ns, ty)
|
||||
}
|
||||
|
||||
fn new_at(
|
||||
fd: RawFd,
|
||||
store: Arc<DataStore>,
|
||||
ns: BackupNamespace,
|
||||
ty: BackupType,
|
||||
) -> Result<Self, Error> {
|
||||
Ok(Self {
|
||||
dir: proxmox_sys::fs::read_subdir(fd, &store.type_path(&ns, ty))?,
|
||||
dir: proxmox_sys::fs::read_subdir(libc::AT_FDCWD, &store.type_path(&ns, ty))?,
|
||||
store,
|
||||
ns,
|
||||
ty,
|
||||
@ -202,15 +187,16 @@ impl Iterator for ListGroups {
|
||||
if let Ok(group_type) = BackupType::from_str(name) {
|
||||
// found a backup group type, descend into it to scan all IDs in it
|
||||
// by switching to the id-state branch
|
||||
match ListGroupsType::new_at(
|
||||
entry.parent_fd(),
|
||||
Arc::clone(&self.store),
|
||||
self.ns.clone(),
|
||||
group_type,
|
||||
) {
|
||||
Ok(ty) => self.id_state = Some(ty),
|
||||
Err(err) => return Some(Err(err)),
|
||||
}
|
||||
let dir = match proxmox_sys::fs::read_subdir(entry.parent_fd(), name) {
|
||||
Ok(dir) => dir,
|
||||
Err(err) => return Some(Err(err.into())),
|
||||
};
|
||||
self.id_state = Some(ListGroupsType {
|
||||
dir,
|
||||
store: Arc::clone(&self.store),
|
||||
ns: self.ns.clone(),
|
||||
ty: group_type,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -182,6 +182,7 @@ pub mod manifest;
|
||||
pub mod paperkey;
|
||||
pub mod prune;
|
||||
pub mod read_chunk;
|
||||
pub mod s3;
|
||||
pub mod store_progress;
|
||||
pub mod task_tracking;
|
||||
|
||||
@ -203,6 +204,7 @@ pub use store_progress::StoreProgress;
|
||||
mod datastore;
|
||||
pub use datastore::{
|
||||
check_backup_owner, ensure_datastore_is_mounted, get_datastore_mount_status, DataStore,
|
||||
DatastoreBackend,
|
||||
};
|
||||
|
||||
mod hierarchy;
|
||||
@ -215,3 +217,6 @@ pub use snapshot_reader::SnapshotReader;
|
||||
|
||||
mod local_chunk_reader;
|
||||
pub use local_chunk_reader::LocalChunkReader;
|
||||
|
||||
mod local_datastore_lru_cache;
|
||||
pub use local_datastore_lru_cache::LocalDatastoreLruCache;
|
||||
|
@ -2,18 +2,22 @@ use std::future::Future;
|
||||
use std::pin::Pin;
|
||||
use std::sync::Arc;
|
||||
|
||||
use anyhow::{bail, Error};
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use http_body_util::BodyExt;
|
||||
|
||||
use pbs_api_types::CryptMode;
|
||||
use pbs_tools::crypt_config::CryptConfig;
|
||||
use proxmox_s3_client::S3Client;
|
||||
|
||||
use crate::data_blob::DataBlob;
|
||||
use crate::datastore::DatastoreBackend;
|
||||
use crate::read_chunk::{AsyncReadChunk, ReadChunk};
|
||||
use crate::DataStore;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct LocalChunkReader {
|
||||
store: Arc<DataStore>,
|
||||
backend: DatastoreBackend,
|
||||
crypt_config: Option<Arc<CryptConfig>>,
|
||||
crypt_mode: CryptMode,
|
||||
}
|
||||
@ -23,12 +27,14 @@ impl LocalChunkReader {
|
||||
store: Arc<DataStore>,
|
||||
crypt_config: Option<Arc<CryptConfig>>,
|
||||
crypt_mode: CryptMode,
|
||||
) -> Self {
|
||||
Self {
|
||||
) -> Result<Self, Error> {
|
||||
let backend = store.backend()?;
|
||||
Ok(Self {
|
||||
store,
|
||||
backend,
|
||||
crypt_config,
|
||||
crypt_mode,
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
fn ensure_crypt_mode(&self, chunk_mode: CryptMode) -> Result<(), Error> {
|
||||
@ -47,10 +53,35 @@ impl LocalChunkReader {
|
||||
}
|
||||
}
|
||||
|
||||
async fn fetch(s3_client: Arc<S3Client>, digest: &[u8; 32]) -> Result<DataBlob, Error> {
|
||||
let object_key = crate::s3::object_key_from_digest(digest)?;
|
||||
if let Some(response) = s3_client.get_object(object_key).await? {
|
||||
let bytes = response.content.collect().await?.to_bytes();
|
||||
DataBlob::from_raw(bytes.to_vec())
|
||||
} else {
|
||||
bail!("no object with digest {}", hex::encode(digest));
|
||||
}
|
||||
}
|
||||
|
||||
impl ReadChunk for LocalChunkReader {
|
||||
fn read_raw_chunk(&self, digest: &[u8; 32]) -> Result<DataBlob, Error> {
|
||||
let chunk = self.store.load_chunk(digest)?;
|
||||
let chunk = match &self.backend {
|
||||
DatastoreBackend::Filesystem => self.store.load_chunk(digest)?,
|
||||
DatastoreBackend::S3(s3_client) => match self.store.cache() {
|
||||
None => proxmox_async::runtime::block_on(fetch(Arc::clone(s3_client), digest))?,
|
||||
Some(cache) => {
|
||||
let mut cacher = self
|
||||
.store
|
||||
.cacher()?
|
||||
.ok_or(format_err!("no cacher for datastore"))?;
|
||||
proxmox_async::runtime::block_on(cache.access(digest, &mut cacher))?.ok_or(
|
||||
format_err!("unable to access chunk with digest {}", hex::encode(digest)),
|
||||
)?
|
||||
}
|
||||
},
|
||||
};
|
||||
self.ensure_crypt_mode(chunk.crypt_mode()?)?;
|
||||
|
||||
Ok(chunk)
|
||||
}
|
||||
|
||||
@ -69,11 +100,26 @@ impl AsyncReadChunk for LocalChunkReader {
|
||||
digest: &'a [u8; 32],
|
||||
) -> Pin<Box<dyn Future<Output = Result<DataBlob, Error>> + Send + 'a>> {
|
||||
Box::pin(async move {
|
||||
let (path, _) = self.store.chunk_path(digest);
|
||||
|
||||
let raw_data = tokio::fs::read(&path).await?;
|
||||
|
||||
let chunk = DataBlob::load_from_reader(&mut &raw_data[..])?;
|
||||
let chunk = match &self.backend {
|
||||
DatastoreBackend::Filesystem => {
|
||||
let (path, _) = self.store.chunk_path(digest);
|
||||
let raw_data = tokio::fs::read(&path).await?;
|
||||
DataBlob::load_from_reader(&mut &raw_data[..])?
|
||||
}
|
||||
DatastoreBackend::S3(s3_client) => match self.store.cache() {
|
||||
None => fetch(Arc::clone(s3_client), digest).await?,
|
||||
Some(cache) => {
|
||||
let mut cacher = self
|
||||
.store
|
||||
.cacher()?
|
||||
.ok_or(format_err!("no cacher for datastore"))?;
|
||||
cache.access(digest, &mut cacher).await?.ok_or(format_err!(
|
||||
"unable to access chunk with digest {}",
|
||||
hex::encode(digest)
|
||||
))?
|
||||
}
|
||||
},
|
||||
};
|
||||
self.ensure_crypt_mode(chunk.crypt_mode()?)?;
|
||||
|
||||
Ok(chunk)
|
||||
|
180
pbs-datastore/src/local_datastore_lru_cache.rs
Normal file
180
pbs-datastore/src/local_datastore_lru_cache.rs
Normal file
@ -0,0 +1,180 @@
|
||||
//! Use a local datastore as cache for operations on a datastore attached via
|
||||
//! a network layer (e.g. via the S3 backend).
|
||||
|
||||
use std::future::Future;
|
||||
use std::sync::Arc;
|
||||
|
||||
use anyhow::{bail, Error};
|
||||
use http_body_util::BodyExt;
|
||||
|
||||
use pbs_tools::async_lru_cache::{AsyncCacher, AsyncLruCache};
|
||||
use proxmox_s3_client::S3Client;
|
||||
|
||||
use crate::ChunkStore;
|
||||
use crate::DataBlob;
|
||||
|
||||
#[derive(Clone)]
|
||||
/// Cacher to fetch chunks from the S3 object store and insert them in the local cache store.
|
||||
pub struct S3Cacher {
|
||||
client: Arc<S3Client>,
|
||||
store: Arc<ChunkStore>,
|
||||
}
|
||||
|
||||
impl AsyncCacher<[u8; 32], ()> for S3Cacher {
|
||||
fn fetch(
|
||||
&self,
|
||||
key: [u8; 32],
|
||||
) -> Box<dyn Future<Output = Result<Option<()>, Error>> + Send + 'static> {
|
||||
let client = Arc::clone(&self.client);
|
||||
let store = Arc::clone(&self.store);
|
||||
Box::new(async move {
|
||||
let object_key = crate::s3::object_key_from_digest(&key)?;
|
||||
match client.get_object(object_key).await? {
|
||||
None => bail!("could not fetch object with key {}", hex::encode(key)),
|
||||
Some(response) => {
|
||||
let bytes = response.content.collect().await?.to_bytes();
|
||||
let chunk = DataBlob::from_raw(bytes.to_vec())?;
|
||||
store.insert_chunk(&chunk, &key)?;
|
||||
Ok(Some(()))
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl S3Cacher {
|
||||
pub fn new(client: Arc<S3Client>, store: Arc<ChunkStore>) -> Self {
|
||||
Self { client, store }
|
||||
}
|
||||
}
|
||||
|
||||
/// LRU cache using local datastore for caching chunks
|
||||
///
|
||||
/// Uses a LRU cache, but without storing the values in-memory but rather
|
||||
/// on the filesystem
|
||||
pub struct LocalDatastoreLruCache {
|
||||
cache: AsyncLruCache<[u8; 32], ()>,
|
||||
store: Arc<ChunkStore>,
|
||||
}
|
||||
|
||||
impl LocalDatastoreLruCache {
|
||||
/// Create a new cache instance storing up to given capacity chunks in the local cache store.
|
||||
pub fn new(capacity: usize, store: Arc<ChunkStore>) -> Self {
|
||||
Self {
|
||||
cache: AsyncLruCache::new(capacity),
|
||||
store,
|
||||
}
|
||||
}
|
||||
|
||||
/// Insert a new chunk into the local datastore cache.
|
||||
///
|
||||
/// Fails if the chunk cannot be inserted successfully.
|
||||
pub fn insert(&self, digest: &[u8; 32], chunk: &DataBlob) -> Result<(), Error> {
|
||||
self.store.insert_chunk(chunk, digest)?;
|
||||
self.cache.insert(*digest, (), |digest| {
|
||||
let (path, _digest_str) = self.store.chunk_path(&digest);
|
||||
// Truncate to free up space but keep the inode around, since that
|
||||
// is used as marker for chunks in use by garbage collection.
|
||||
if let Err(err) = nix::unistd::truncate(&path, 0) {
|
||||
if err != nix::errno::Errno::ENOENT {
|
||||
return Err(Error::from(err));
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
|
||||
/// Remove a chunk from the local datastore cache.
|
||||
///
|
||||
/// Fails if the chunk cannot be deleted successfully.
|
||||
pub fn remove(&self, digest: &[u8; 32]) -> Result<(), Error> {
|
||||
self.cache.remove(*digest);
|
||||
let (path, _digest_str) = self.store.chunk_path(digest);
|
||||
std::fs::remove_file(path).map_err(Error::from)
|
||||
}
|
||||
|
||||
/// Access the locally cached chunk or fetch it from the S3 object store via the provided
|
||||
/// cacher instance.
|
||||
///
|
||||
/// For evicted cache nodes, clear the chunk file contents but leave the empty marker file
|
||||
/// behind so garbage collection doesn't clean it while in use.
|
||||
pub async fn access(
|
||||
&self,
|
||||
digest: &[u8; 32],
|
||||
cacher: &mut S3Cacher,
|
||||
) -> Result<Option<DataBlob>, Error> {
|
||||
if self
|
||||
.cache
|
||||
.access(*digest, cacher, |digest| {
|
||||
let (path, _digest_str) = self.store.chunk_path(&digest);
|
||||
// Truncate to free up space but keep the inode around, since that
|
||||
// is used as marker for chunks in use by garbage collection.
|
||||
if let Err(err) = nix::unistd::truncate(&path, 0) {
|
||||
if err != nix::errno::Errno::ENOENT {
|
||||
return Err(Error::from(err));
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
})
|
||||
.await?
|
||||
.is_some()
|
||||
{
|
||||
let (path, _digest_str) = self.store.chunk_path(digest);
|
||||
let mut file = match std::fs::File::open(&path) {
|
||||
Ok(file) => file,
|
||||
Err(err) => {
|
||||
// Expected chunk to be present since LRU cache has it, but it is missing
|
||||
// locally, try to fetch again
|
||||
if err.kind() == std::io::ErrorKind::NotFound {
|
||||
let object_key = crate::s3::object_key_from_digest(digest)?;
|
||||
match cacher.client.get_object(object_key).await? {
|
||||
None => {
|
||||
bail!("could not fetch object with key {}", hex::encode(digest))
|
||||
}
|
||||
Some(response) => {
|
||||
let bytes = response.content.collect().await?.to_bytes();
|
||||
let chunk = DataBlob::from_raw(bytes.to_vec())?;
|
||||
self.store.insert_chunk(&chunk, digest)?;
|
||||
std::fs::File::open(&path)?
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return Err(Error::from(err));
|
||||
}
|
||||
}
|
||||
};
|
||||
let chunk = match DataBlob::load_from_reader(&mut file) {
|
||||
Ok(chunk) => chunk,
|
||||
Err(err) => {
|
||||
use std::io::Seek;
|
||||
// Check if file is empty marker file, try fetching content if so
|
||||
if file.seek(std::io::SeekFrom::End(0))? == 0 {
|
||||
let object_key = crate::s3::object_key_from_digest(digest)?;
|
||||
match cacher.client.get_object(object_key).await? {
|
||||
None => {
|
||||
bail!("could not fetch object with key {}", hex::encode(digest))
|
||||
}
|
||||
Some(response) => {
|
||||
let bytes = response.content.collect().await?.to_bytes();
|
||||
let chunk = DataBlob::from_raw(bytes.to_vec())?;
|
||||
self.store.insert_chunk(&chunk, digest)?;
|
||||
let mut file = std::fs::File::open(&path)?;
|
||||
DataBlob::load_from_reader(&mut file)?
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return Err(err);
|
||||
}
|
||||
}
|
||||
};
|
||||
Ok(Some(chunk))
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
|
||||
/// Checks if the given digest is stored in the datastores LRU cache
|
||||
pub fn contains(&self, digest: &[u8; 32]) -> bool {
|
||||
self.cache.contains(*digest)
|
||||
}
|
||||
}
|
@ -130,7 +130,7 @@ fn paperkey_html<W: Write>(
|
||||
writeln!(output, "</p>")?;
|
||||
|
||||
let qr_code = generate_qr_code("svg", block)?;
|
||||
let qr_code = base64::encode_config(qr_code, base64::STANDARD_NO_PAD);
|
||||
let qr_code = proxmox_base64::encode_no_pad(qr_code);
|
||||
|
||||
writeln!(output, "<center>")?;
|
||||
writeln!(output, "<img")?;
|
||||
@ -164,7 +164,7 @@ fn paperkey_html<W: Write>(
|
||||
writeln!(output, "</p>")?;
|
||||
|
||||
let qr_code = generate_qr_code("svg", lines)?;
|
||||
let qr_code = base64::encode_config(qr_code, base64::STANDARD_NO_PAD);
|
||||
let qr_code = proxmox_base64::encode_no_pad(qr_code);
|
||||
|
||||
writeln!(output, "<center>")?;
|
||||
writeln!(output, "<img")?;
|
||||
|
@ -124,13 +124,13 @@ pub fn compute_prune_info(
|
||||
|
||||
if let Some(keep_hourly) = options.keep_hourly {
|
||||
mark_selections(&mut mark, &list, keep_hourly as usize, |info| {
|
||||
strftime_local("%Y/%m/%d/%H", info.backup_dir.backup_time()).map_err(Error::from)
|
||||
strftime_local("%Y/%m/%d/%H", info.backup_dir.backup_time())
|
||||
})?;
|
||||
}
|
||||
|
||||
if let Some(keep_daily) = options.keep_daily {
|
||||
mark_selections(&mut mark, &list, keep_daily as usize, |info| {
|
||||
strftime_local("%Y/%m/%d", info.backup_dir.backup_time()).map_err(Error::from)
|
||||
strftime_local("%Y/%m/%d", info.backup_dir.backup_time())
|
||||
})?;
|
||||
}
|
||||
|
||||
@ -138,19 +138,19 @@ pub fn compute_prune_info(
|
||||
mark_selections(&mut mark, &list, keep_weekly as usize, |info| {
|
||||
// Note: Use iso-week year/week here. This year number
|
||||
// might not match the calendar year number.
|
||||
strftime_local("%G/%V", info.backup_dir.backup_time()).map_err(Error::from)
|
||||
strftime_local("%G/%V", info.backup_dir.backup_time())
|
||||
})?;
|
||||
}
|
||||
|
||||
if let Some(keep_monthly) = options.keep_monthly {
|
||||
mark_selections(&mut mark, &list, keep_monthly as usize, |info| {
|
||||
strftime_local("%Y/%m", info.backup_dir.backup_time()).map_err(Error::from)
|
||||
strftime_local("%Y/%m", info.backup_dir.backup_time())
|
||||
})?;
|
||||
}
|
||||
|
||||
if let Some(keep_yearly) = options.keep_yearly {
|
||||
mark_selections(&mut mark, &list, keep_yearly as usize, |info| {
|
||||
strftime_local("%Y", info.backup_dir.backup_time()).map_err(Error::from)
|
||||
strftime_local("%Y", info.backup_dir.backup_time())
|
||||
})?;
|
||||
}
|
||||
|
||||
|
114
pbs-datastore/src/s3.rs
Normal file
114
pbs-datastore/src/s3.rs
Normal file
@ -0,0 +1,114 @@
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
|
||||
use proxmox_s3_client::S3ObjectKey;
|
||||
|
||||
/// Object key prefix to group regular datastore contents (not chunks)
|
||||
pub const S3_CONTENT_PREFIX: &str = ".cnt";
|
||||
|
||||
/// Generate a relative object key with content prefix from given path and filename
|
||||
pub fn object_key_from_path(path: &Path, filename: &str) -> Result<S3ObjectKey, Error> {
|
||||
// Force the use of relative paths, otherwise this would loose the content prefix
|
||||
if path.is_absolute() {
|
||||
bail!("cannot generate object key from absolute path");
|
||||
}
|
||||
if filename.contains('/') {
|
||||
bail!("invalid filename containing slashes");
|
||||
}
|
||||
let mut object_path = PathBuf::from(S3_CONTENT_PREFIX);
|
||||
object_path.push(path);
|
||||
object_path.push(filename);
|
||||
|
||||
let object_key_str = object_path
|
||||
.to_str()
|
||||
.ok_or_else(|| format_err!("unexpected object key path"))?;
|
||||
S3ObjectKey::try_from(object_key_str)
|
||||
}
|
||||
|
||||
/// Generate a relative object key with chunk prefix from given digest
|
||||
pub fn object_key_from_digest(digest: &[u8; 32]) -> Result<S3ObjectKey, Error> {
|
||||
let object_key = hex::encode(digest);
|
||||
let digest_prefix = &object_key[..4];
|
||||
let object_key_string = format!(".chunks/{digest_prefix}/{object_key}");
|
||||
S3ObjectKey::try_from(object_key_string.as_str())
|
||||
}
|
||||
|
||||
/// Generate a relative object key with chunk prefix from given digest, extended by suffix
|
||||
pub fn object_key_from_digest_with_suffix(
|
||||
digest: &[u8; 32],
|
||||
suffix: &str,
|
||||
) -> Result<S3ObjectKey, Error> {
|
||||
if suffix.contains('/') {
|
||||
bail!("invalid suffix containing slashes");
|
||||
}
|
||||
let object_key = hex::encode(digest);
|
||||
let digest_prefix = &object_key[..4];
|
||||
let object_key_string = format!(".chunks/{digest_prefix}/{object_key}{suffix}");
|
||||
S3ObjectKey::try_from(object_key_string.as_str())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_object_key_from_path() {
|
||||
let path = Path::new("vm/100/2025-07-14T14:20:02Z");
|
||||
let filename = "drive-scsci0.img.fidx";
|
||||
assert_eq!(
|
||||
object_key_from_path(path, filename).unwrap().to_string(),
|
||||
".cnt/vm/100/2025-07-14T14:20:02Z/drive-scsci0.img.fidx",
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_object_key_from_empty_path() {
|
||||
let path = Path::new("");
|
||||
let filename = ".marker";
|
||||
assert_eq!(
|
||||
object_key_from_path(path, filename).unwrap().to_string(),
|
||||
".cnt/.marker",
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_object_key_from_absolute_path() {
|
||||
assert!(object_key_from_path(Path::new("/"), ".marker").is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_object_key_from_path_incorrect_filename() {
|
||||
assert!(object_key_from_path(Path::new(""), "/.marker").is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_object_key_from_digest() {
|
||||
use hex::FromHex;
|
||||
let digest =
|
||||
<[u8; 32]>::from_hex("bb9f8df61474d25e71fa00722318cd387396ca1736605e1248821cc0de3d3af8")
|
||||
.unwrap();
|
||||
assert_eq!(
|
||||
object_key_from_digest(&digest).unwrap().to_string(),
|
||||
".chunks/bb9f/bb9f8df61474d25e71fa00722318cd387396ca1736605e1248821cc0de3d3af8",
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_object_key_from_digest_with_suffix() {
|
||||
use hex::FromHex;
|
||||
let digest =
|
||||
<[u8; 32]>::from_hex("bb9f8df61474d25e71fa00722318cd387396ca1736605e1248821cc0de3d3af8")
|
||||
.unwrap();
|
||||
assert_eq!(
|
||||
object_key_from_digest_with_suffix(&digest, ".0.bad")
|
||||
.unwrap()
|
||||
.to_string(),
|
||||
".chunks/bb9f/bb9f8df61474d25e71fa00722318cd387396ca1736605e1248821cc0de3d3af8.0.bad",
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_object_key_from_digest_with_invalid_suffix() {
|
||||
use hex::FromHex;
|
||||
let digest =
|
||||
<[u8; 32]>::from_hex("bb9f8df61474d25e71fa00722318cd387396ca1736605e1248821cc0de3d3af8")
|
||||
.unwrap();
|
||||
assert!(object_key_from_digest_with_suffix(&digest, "/.0.bad").is_err());
|
||||
}
|
@ -1,12 +1,14 @@
|
||||
use std::fs::File;
|
||||
use std::os::unix::io::{AsRawFd, FromRawFd};
|
||||
use std::path::Path;
|
||||
use std::rc::Rc;
|
||||
use std::sync::Arc;
|
||||
|
||||
use anyhow::{bail, Error};
|
||||
use anyhow::{bail, Context, Error};
|
||||
use nix::dir::Dir;
|
||||
|
||||
use proxmox_sys::fs::lock_dir_noblock_shared;
|
||||
use nix::fcntl::OFlag;
|
||||
use nix::sys::stat::Mode;
|
||||
use pbs_config::BackupLockGuard;
|
||||
|
||||
use pbs_api_types::{
|
||||
print_store_and_ns, ArchiveType, BackupNamespace, Operation, CLIENT_LOG_BLOB_NAME,
|
||||
@ -27,6 +29,10 @@ pub struct SnapshotReader {
|
||||
datastore_name: String,
|
||||
file_list: Vec<String>,
|
||||
locked_dir: Dir,
|
||||
|
||||
// while this is never read, the lock needs to be kept until the
|
||||
// reader is dropped to ensure valid locking semantics
|
||||
_lock: BackupLockGuard,
|
||||
}
|
||||
|
||||
impl SnapshotReader {
|
||||
@ -47,8 +53,12 @@ impl SnapshotReader {
|
||||
bail!("snapshot {} does not exist!", snapshot.dir());
|
||||
}
|
||||
|
||||
let locked_dir =
|
||||
lock_dir_noblock_shared(&snapshot_path, "snapshot", "locked by another operation")?;
|
||||
let lock = snapshot
|
||||
.lock_shared()
|
||||
.with_context(|| format!("while trying to read snapshot '{snapshot:?}'"))?;
|
||||
|
||||
let locked_dir = Dir::open(&snapshot_path, OFlag::O_RDONLY, Mode::empty())
|
||||
.with_context(|| format!("unable to open snapshot directory {snapshot_path:?}"))?;
|
||||
|
||||
let datastore_name = datastore.name().to_string();
|
||||
let manifest = match snapshot.load_manifest() {
|
||||
@ -79,6 +89,7 @@ impl SnapshotReader {
|
||||
datastore_name,
|
||||
file_list,
|
||||
locked_dir,
|
||||
_lock: lock,
|
||||
})
|
||||
}
|
||||
|
||||
@ -100,7 +111,7 @@ impl SnapshotReader {
|
||||
/// Opens a file inside the snapshot (using openat) for reading
|
||||
pub fn open_file(&self, filename: &str) -> Result<File, Error> {
|
||||
let raw_fd = nix::fcntl::openat(
|
||||
self.locked_dir.as_raw_fd(),
|
||||
Some(self.locked_dir.as_raw_fd()),
|
||||
Path::new(filename),
|
||||
nix::fcntl::OFlag::O_RDONLY | nix::fcntl::OFlag::O_CLOEXEC,
|
||||
nix::sys::stat::Mode::empty(),
|
||||
@ -128,7 +139,7 @@ pub struct SnapshotChunkIterator<'a, F: Fn(&[u8; 32]) -> bool> {
|
||||
todo_list: Vec<String>,
|
||||
skip_fn: F,
|
||||
#[allow(clippy::type_complexity)]
|
||||
current_index: Option<(Arc<Box<dyn IndexFile + Send>>, usize, Vec<(usize, u64)>)>,
|
||||
current_index: Option<(Rc<Box<dyn IndexFile + Send>>, usize, Vec<(usize, u64)>)>,
|
||||
}
|
||||
|
||||
impl<F: Fn(&[u8; 32]) -> bool> Iterator for SnapshotChunkIterator<'_, F> {
|
||||
@ -158,7 +169,7 @@ impl<F: Fn(&[u8; 32]) -> bool> Iterator for SnapshotChunkIterator<'_, F> {
|
||||
let order =
|
||||
datastore.get_chunks_in_order(&*index, &self.skip_fn, |_| Ok(()))?;
|
||||
|
||||
self.current_index = Some((Arc::new(index), 0, order));
|
||||
self.current_index = Some((Rc::new(index), 0, order));
|
||||
} else {
|
||||
return Ok(None);
|
||||
}
|
||||
|
@ -47,7 +47,7 @@ fn open_lock_file(name: &str) -> Result<(std::fs::File, CreateOptions), Error> {
|
||||
let timeout = std::time::Duration::new(10, 0);
|
||||
|
||||
Ok((
|
||||
open_file_locked(lock_path, timeout, true, options.clone())?,
|
||||
open_file_locked(lock_path, timeout, true, options)?,
|
||||
options,
|
||||
))
|
||||
}
|
||||
|
@ -15,7 +15,6 @@
|
||||
use anyhow::{bail, Error};
|
||||
use serde_json::Value;
|
||||
|
||||
use proxmox_log::init_cli_logger;
|
||||
use proxmox_router::cli::*;
|
||||
use proxmox_router::RpcEnvironment;
|
||||
use proxmox_schema::{api, ArraySchema, IntegerSchema, Schema, StringSchema};
|
||||
@ -800,7 +799,9 @@ fn options(
|
||||
}
|
||||
|
||||
fn main() -> Result<(), Error> {
|
||||
init_cli_logger("PBS_LOG", proxmox_log::LevelFilter::INFO)?;
|
||||
proxmox_log::Logger::from_env("PBS_LOG", proxmox_log::LevelFilter::INFO)
|
||||
.stderr()
|
||||
.init()?;
|
||||
|
||||
let uid = nix::unistd::Uid::current();
|
||||
|
||||
|
@ -16,7 +16,6 @@ use std::fs::File;
|
||||
use anyhow::{bail, Error};
|
||||
use serde_json::Value;
|
||||
|
||||
use proxmox_log::init_cli_logger;
|
||||
use proxmox_router::cli::*;
|
||||
use proxmox_router::RpcEnvironment;
|
||||
use proxmox_schema::api;
|
||||
@ -388,7 +387,9 @@ fn scan(param: Value) -> Result<(), Error> {
|
||||
}
|
||||
|
||||
fn main() -> Result<(), Error> {
|
||||
init_cli_logger("PBS_LOG", proxmox_log::LevelFilter::INFO)?;
|
||||
proxmox_log::Logger::from_env("PBS_LOG", proxmox_log::LevelFilter::INFO)
|
||||
.stderr()
|
||||
.init()?;
|
||||
|
||||
let uid = nix::unistd::Uid::current();
|
||||
|
||||
|
@ -18,6 +18,7 @@ use crate::{
|
||||
};
|
||||
|
||||
const SCSI_CHANGER_DEFAULT_TIMEOUT: usize = 60 * 5; // 5 minutes
|
||||
const SCSI_CHANGER_MOVE_MEDIUM_TIMEOUT: usize = 60 * 45; // 45 minutes
|
||||
const SCSI_VOLUME_TAG_LEN: usize = 36;
|
||||
|
||||
/// Initialize element status (Inventory)
|
||||
@ -181,7 +182,7 @@ pub fn load_slot(file: &mut File, from_slot: u64, drivenum: u64) -> Result<(), E
|
||||
);
|
||||
|
||||
let mut sg_raw = SgRaw::new(file, 64)?;
|
||||
sg_raw.set_timeout(SCSI_CHANGER_DEFAULT_TIMEOUT);
|
||||
sg_raw.set_timeout(SCSI_CHANGER_MOVE_MEDIUM_TIMEOUT);
|
||||
|
||||
sg_raw
|
||||
.do_command(&cmd)
|
||||
@ -205,7 +206,7 @@ pub fn unload(file: &mut File, to_slot: u64, drivenum: u64) -> Result<(), Error>
|
||||
);
|
||||
|
||||
let mut sg_raw = SgRaw::new(file, 64)?;
|
||||
sg_raw.set_timeout(SCSI_CHANGER_DEFAULT_TIMEOUT);
|
||||
sg_raw.set_timeout(SCSI_CHANGER_MOVE_MEDIUM_TIMEOUT);
|
||||
|
||||
sg_raw
|
||||
.do_command(&cmd)
|
||||
@ -233,7 +234,7 @@ pub fn transfer_medium<F: AsRawFd>(
|
||||
);
|
||||
|
||||
let mut sg_raw = SgRaw::new(file, 64)?;
|
||||
sg_raw.set_timeout(SCSI_CHANGER_DEFAULT_TIMEOUT);
|
||||
sg_raw.set_timeout(SCSI_CHANGER_MOVE_MEDIUM_TIMEOUT);
|
||||
|
||||
sg_raw.do_command(&cmd).map_err(|err| {
|
||||
format_err!(
|
||||
|
@ -136,6 +136,8 @@ pub struct SgTape {
|
||||
file: File,
|
||||
locate_offset: Option<i64>,
|
||||
info: InquiryInfo,
|
||||
// auto-detect if we can set the encryption mode
|
||||
encryption_possible: Option<bool>,
|
||||
}
|
||||
|
||||
impl SgTape {
|
||||
@ -158,6 +160,7 @@ impl SgTape {
|
||||
file,
|
||||
info,
|
||||
locate_offset: None,
|
||||
encryption_possible: None,
|
||||
})
|
||||
}
|
||||
|
||||
@ -659,7 +662,8 @@ impl SgTape {
|
||||
pub fn wait_until_ready(&mut self, timeout: Option<u64>) -> Result<(), Error> {
|
||||
let start = SystemTime::now();
|
||||
let timeout = timeout.unwrap_or(Self::SCSI_TAPE_DEFAULT_TIMEOUT as u64);
|
||||
let max_wait = std::time::Duration::new(timeout, 0);
|
||||
let mut max_wait = std::time::Duration::new(timeout, 0);
|
||||
let mut increased_timeout = false;
|
||||
|
||||
loop {
|
||||
match self.test_unit_ready() {
|
||||
@ -667,6 +671,16 @@ impl SgTape {
|
||||
_ => {
|
||||
std::thread::sleep(std::time::Duration::new(1, 0));
|
||||
if start.elapsed()? > max_wait {
|
||||
if !increased_timeout {
|
||||
if let Ok(DeviceActivity::Calibrating) =
|
||||
read_device_activity(&mut self.file)
|
||||
{
|
||||
log::info!("Detected drive calibration, increasing timeout to 2 hours 5 minutes");
|
||||
max_wait = std::time::Duration::new(2 * 60 * 60 + 5 * 60, 0);
|
||||
increased_timeout = true;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
bail!("wait_until_ready failed - got timeout");
|
||||
}
|
||||
}
|
||||
@ -690,6 +704,14 @@ impl SgTape {
|
||||
}
|
||||
|
||||
pub fn set_encryption(&mut self, key_data: Option<([u8; 32], Uuid)>) -> Result<(), Error> {
|
||||
if self.encryption_possible == Some(false) {
|
||||
if key_data.is_some() {
|
||||
bail!("Drive does not support setting encryption mode");
|
||||
} else {
|
||||
// skip trying to set encryption if not supported and don't wanted
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
let key = if let Some((ref key, ref uuid)) = key_data {
|
||||
// derive specialized key for each media-set
|
||||
|
||||
@ -710,7 +732,24 @@ impl SgTape {
|
||||
None
|
||||
};
|
||||
|
||||
drive_set_encryption(&mut self.file, key)
|
||||
match drive_set_encryption(&mut self.file, key) {
|
||||
Ok(()) => self.encryption_possible = Some(true),
|
||||
Err(err) => {
|
||||
self.encryption_possible = Some(false);
|
||||
if key.is_some() {
|
||||
bail!("could not set encryption mode on drive: {err}");
|
||||
} else {
|
||||
log::info!("could not set encryption mode on drive: {err}, ignoring.");
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Returns if encryption is possible. Returns [`None`] if it's unknown, because
|
||||
/// no attempt was made to set the mode yet.
|
||||
pub fn encryption_possible(&self) -> Option<bool> {
|
||||
self.encryption_possible
|
||||
}
|
||||
|
||||
// Note: use alloc_page_aligned_buffer to alloc data transfer buffer
|
||||
|
@ -12,15 +12,7 @@ use crate::sgutils2::{alloc_page_aligned_buffer, SgRaw};
|
||||
///
|
||||
/// We always use mixed mode,
|
||||
pub fn drive_set_encryption<F: AsRawFd>(file: &mut F, key: Option<[u8; 32]>) -> Result<(), Error> {
|
||||
let data = match sg_spin_data_encryption_caps(file) {
|
||||
Ok(data) => data,
|
||||
Err(_) if key.is_none() => {
|
||||
// Assume device does not support HW encryption
|
||||
// We can simply ignore the clear key request
|
||||
return Ok(());
|
||||
}
|
||||
Err(err) => return Err(err),
|
||||
};
|
||||
let data = sg_spin_data_encryption_caps(file)?;
|
||||
|
||||
let algorithm_index = decode_spin_data_encryption_caps(&data)?;
|
||||
|
||||
@ -266,7 +258,7 @@ fn decode_spin_data_encryption_caps(data: &[u8]) -> Result<u8, Error> {
|
||||
|
||||
match aes_gcm_index {
|
||||
Some(index) => Ok(index),
|
||||
None => bail!("drive does not support AES-GCM encryption"),
|
||||
None => bail!("drive does not support setting AES-GCM encryption"),
|
||||
}
|
||||
})
|
||||
.map_err(|err: Error| format_err!("decode data encryption caps page failed - {}", err))
|
||||
|
@ -512,7 +512,7 @@ impl<'a, F: AsRawFd> SgRaw<'a, F> {
|
||||
SCSI_PT_DO_TIMEOUT => return Err(format_err!("do_scsi_pt failed - timeout").into()),
|
||||
code if code < 0 => {
|
||||
let errno = unsafe { get_scsi_pt_os_err(ptvp.as_ptr()) };
|
||||
let err = nix::errno::Errno::from_i32(errno);
|
||||
let err = nix::errno::Errno::from_raw(errno);
|
||||
return Err(format_err!("do_scsi_pt failed with err {}", err).into());
|
||||
}
|
||||
unknown => {
|
||||
@ -594,7 +594,7 @@ impl<'a, F: AsRawFd> SgRaw<'a, F> {
|
||||
}
|
||||
SCSI_PT_RESULT_OS_ERR => {
|
||||
let errno = unsafe { get_scsi_pt_os_err(ptvp.as_ptr()) };
|
||||
let err = nix::errno::Errno::from_i32(errno);
|
||||
let err = nix::errno::Errno::from_raw(errno);
|
||||
Err(format_err!("scsi command failed with err {}", err).into())
|
||||
}
|
||||
unknown => {
|
||||
|
@ -42,7 +42,16 @@ impl<K: std::cmp::Eq + std::hash::Hash + Copy, V: Clone + Send + 'static> AsyncL
|
||||
/// Access an item either via the cache or by calling cacher.fetch. A return value of Ok(None)
|
||||
/// means the item requested has no representation, Err(_) means a call to fetch() failed,
|
||||
/// regardless of whether it was initiated by this call or a previous one.
|
||||
pub async fn access(&self, key: K, cacher: &dyn AsyncCacher<K, V>) -> Result<Option<V>, Error> {
|
||||
/// Calls the removed callback on the evicted item, if any.
|
||||
pub async fn access<F>(
|
||||
&self,
|
||||
key: K,
|
||||
cacher: &dyn AsyncCacher<K, V>,
|
||||
removed: F,
|
||||
) -> Result<Option<V>, Error>
|
||||
where
|
||||
F: Fn(K) -> Result<(), Error>,
|
||||
{
|
||||
let (owner, result_fut) = {
|
||||
// check if already requested
|
||||
let mut maps = self.maps.lock().unwrap();
|
||||
@ -71,13 +80,36 @@ impl<K: std::cmp::Eq + std::hash::Hash + Copy, V: Clone + Send + 'static> AsyncL
|
||||
// this call was the one initiating the request, put into LRU and remove from map
|
||||
let mut maps = self.maps.lock().unwrap();
|
||||
if let Ok(Some(ref value)) = result {
|
||||
maps.0.insert(key, value.clone());
|
||||
maps.0.insert(key, value.clone(), removed)?;
|
||||
}
|
||||
maps.1.remove(&key);
|
||||
}
|
||||
|
||||
result
|
||||
}
|
||||
|
||||
/// Insert an item as the most recently used one into the cache, calling the removed callback
|
||||
/// on the evicted cache item, if any.
|
||||
pub fn insert<F>(&self, key: K, value: V, removed: F) -> Result<(), Error>
|
||||
where
|
||||
F: Fn(K) -> Result<(), Error>,
|
||||
{
|
||||
let mut maps = self.maps.lock().unwrap();
|
||||
maps.0.insert(key, value.clone(), removed)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Check if the item exists and if so, mark it as the most recently uses one.
|
||||
pub fn contains(&self, key: K) -> bool {
|
||||
let mut maps = self.maps.lock().unwrap();
|
||||
maps.0.get_mut(key).is_some()
|
||||
}
|
||||
|
||||
/// Remove the item from the cache.
|
||||
pub fn remove(&self, key: K) {
|
||||
let mut maps = self.maps.lock().unwrap();
|
||||
maps.0.remove(key);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@ -106,15 +138,15 @@ mod test {
|
||||
let cache: AsyncLruCache<i32, String> = AsyncLruCache::new(2);
|
||||
|
||||
assert_eq!(
|
||||
cache.access(10, &cacher).await.unwrap(),
|
||||
cache.access(10, &cacher, |_| Ok(())).await.unwrap(),
|
||||
Some("x10".to_string())
|
||||
);
|
||||
assert_eq!(
|
||||
cache.access(20, &cacher).await.unwrap(),
|
||||
cache.access(20, &cacher, |_| Ok(())).await.unwrap(),
|
||||
Some("x20".to_string())
|
||||
);
|
||||
assert_eq!(
|
||||
cache.access(30, &cacher).await.unwrap(),
|
||||
cache.access(30, &cacher, |_| Ok(())).await.unwrap(),
|
||||
Some("x30".to_string())
|
||||
);
|
||||
|
||||
@ -123,14 +155,14 @@ mod test {
|
||||
tokio::spawn(async move {
|
||||
let cacher = TestAsyncCacher { prefix: "y" };
|
||||
assert_eq!(
|
||||
c.access(40, &cacher).await.unwrap(),
|
||||
c.access(40, &cacher, |_| Ok(())).await.unwrap(),
|
||||
Some("y40".to_string())
|
||||
);
|
||||
});
|
||||
}
|
||||
|
||||
assert_eq!(
|
||||
cache.access(20, &cacher).await.unwrap(),
|
||||
cache.access(20, &cacher, |_| Ok(())).await.unwrap(),
|
||||
Some("x20".to_string())
|
||||
);
|
||||
});
|
||||
|
@ -60,10 +60,10 @@ impl<K, V> CacheNode<K, V> {
|
||||
/// assert_eq!(cache.get_mut(1), None);
|
||||
/// assert_eq!(cache.len(), 0);
|
||||
///
|
||||
/// cache.insert(1, 1);
|
||||
/// cache.insert(2, 2);
|
||||
/// cache.insert(3, 3);
|
||||
/// cache.insert(4, 4);
|
||||
/// cache.insert(1, 1, |_| Ok(()));
|
||||
/// cache.insert(2, 2, |_| Ok(()));
|
||||
/// cache.insert(3, 3, |_| Ok(()));
|
||||
/// cache.insert(4, 4, |_| Ok(()));
|
||||
/// assert_eq!(cache.len(), 3);
|
||||
///
|
||||
/// assert_eq!(cache.get_mut(1), None);
|
||||
@ -77,9 +77,9 @@ impl<K, V> CacheNode<K, V> {
|
||||
/// assert_eq!(cache.len(), 0);
|
||||
/// assert_eq!(cache.get_mut(2), None);
|
||||
/// // access will fill in missing cache entry by fetching from LruCacher
|
||||
/// assert_eq!(cache.access(2, &mut LruCacher {}).unwrap(), Some(&mut 2));
|
||||
/// assert_eq!(cache.access(2, &mut LruCacher {}, |_| Ok(())).unwrap(), Some(&mut 2));
|
||||
///
|
||||
/// cache.insert(1, 1);
|
||||
/// cache.insert(1, 1, |_| Ok(()));
|
||||
/// assert_eq!(cache.get_mut(1), Some(&mut 1));
|
||||
///
|
||||
/// cache.clear();
|
||||
@ -121,6 +121,8 @@ impl<K, V> LruCache<K, V> {
|
||||
|
||||
impl<K: std::cmp::Eq + std::hash::Hash + Copy, V> LruCache<K, V> {
|
||||
/// Create LRU cache instance which holds up to `capacity` nodes at once.
|
||||
///
|
||||
/// Forces a minimum `capacity` of 1 in case of the given value being 0.
|
||||
pub fn new(capacity: usize) -> Self {
|
||||
let capacity = capacity.max(1);
|
||||
Self {
|
||||
@ -133,7 +135,10 @@ impl<K: std::cmp::Eq + std::hash::Hash + Copy, V> LruCache<K, V> {
|
||||
|
||||
/// Insert or update an entry identified by `key` with the given `value`.
|
||||
/// This entry is placed as the most recently used node at the head.
|
||||
pub fn insert(&mut self, key: K, value: V) {
|
||||
pub fn insert<F>(&mut self, key: K, value: V, removed: F) -> Result<bool, anyhow::Error>
|
||||
where
|
||||
F: Fn(K) -> Result<(), anyhow::Error>,
|
||||
{
|
||||
match self.map.entry(key) {
|
||||
Entry::Occupied(mut o) => {
|
||||
// Node present, update value
|
||||
@ -142,6 +147,7 @@ impl<K: std::cmp::Eq + std::hash::Hash + Copy, V> LruCache<K, V> {
|
||||
let mut node = unsafe { Box::from_raw(node_ptr) };
|
||||
node.value = value;
|
||||
let _node_ptr = Box::into_raw(node);
|
||||
Ok(true)
|
||||
}
|
||||
Entry::Vacant(v) => {
|
||||
// Node not present, insert a new one
|
||||
@ -157,8 +163,11 @@ impl<K: std::cmp::Eq + std::hash::Hash + Copy, V> LruCache<K, V> {
|
||||
// avoid borrow conflict. This means there are temporarily
|
||||
// self.capacity + 1 cache nodes.
|
||||
if self.map.len() > self.capacity {
|
||||
self.pop_tail();
|
||||
if let Some(removed_node) = self.pop_tail() {
|
||||
removed(removed_node)?;
|
||||
}
|
||||
}
|
||||
Ok(false)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -172,11 +181,12 @@ impl<K: std::cmp::Eq + std::hash::Hash + Copy, V> LruCache<K, V> {
|
||||
}
|
||||
|
||||
/// Remove the least recently used node from the cache.
|
||||
fn pop_tail(&mut self) {
|
||||
fn pop_tail(&mut self) -> Option<K> {
|
||||
if let Some(old_tail) = self.list.pop_tail() {
|
||||
// Remove HashMap entry for old tail
|
||||
self.map.remove(&old_tail.key);
|
||||
return self.map.remove(&old_tail.key).map(|_| old_tail.key);
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
/// Get a mutable reference to the value identified by `key`.
|
||||
@ -204,11 +214,15 @@ impl<K: std::cmp::Eq + std::hash::Hash + Copy, V> LruCache<K, V> {
|
||||
/// value.
|
||||
/// If fetch returns a value, it is inserted as the most recently used entry
|
||||
/// in the cache.
|
||||
pub fn access<'a>(
|
||||
pub fn access<'a, F>(
|
||||
&'a mut self,
|
||||
key: K,
|
||||
cacher: &mut dyn Cacher<K, V>,
|
||||
) -> Result<Option<&'a mut V>, anyhow::Error> {
|
||||
removed: F,
|
||||
) -> Result<Option<&'a mut V>, anyhow::Error>
|
||||
where
|
||||
F: Fn(K) -> Result<(), anyhow::Error>,
|
||||
{
|
||||
match self.map.entry(key) {
|
||||
Entry::Occupied(mut o) => {
|
||||
// Cache hit, birng node to front of list
|
||||
@ -232,7 +246,9 @@ impl<K: std::cmp::Eq + std::hash::Hash + Copy, V> LruCache<K, V> {
|
||||
// avoid borrow conflict. This means there are temporarily
|
||||
// self.capacity + 1 cache nodes.
|
||||
if self.map.len() > self.capacity {
|
||||
self.pop_tail();
|
||||
if let Some(removed_node) = self.pop_tail() {
|
||||
removed(removed_node)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -6,4 +6,4 @@ edition.workspace = true
|
||||
|
||||
[dependencies]
|
||||
anyhow.workspace = true
|
||||
nix.workspace = true
|
||||
nix = { workspace = true, features = [ "feature" ] }
|
||||
|
@ -24,6 +24,7 @@ pxar.workspace = true
|
||||
|
||||
proxmox-async.workspace = true
|
||||
proxmox-human-byte.workspace = true
|
||||
proxmox-http = { workspace = true, features = [ "body" ] }
|
||||
proxmox-log.workspace = true
|
||||
proxmox-io.workspace = true
|
||||
proxmox-router = { workspace = true, features = [ "cli" ] }
|
||||
|
@ -16,7 +16,7 @@ use proxmox_schema::{api, ApiType, ReturnType};
|
||||
|
||||
use pbs_api_types::{BackupNamespace, BackupType};
|
||||
use pbs_client::tools::key_source::get_encryption_key_password;
|
||||
use pbs_client::{BackupRepository, BackupWriter};
|
||||
use pbs_client::{BackupRepository, BackupWriter, BackupWriterOptions};
|
||||
use pbs_datastore::data_blob::{DataBlob, DataChunkBuilder};
|
||||
use pbs_key_config::{load_and_decrypt_key, KeyDerivationConfig};
|
||||
use pbs_tools::crypt_config::CryptConfig;
|
||||
@ -103,26 +103,33 @@ static BENCHMARK_RESULT_2020_TOP: BenchmarkResult = BenchmarkResult {
|
||||
};
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
repository: {
|
||||
schema: REPO_URL_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
keyfile: {
|
||||
schema: KEYFILE_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"output-format": {
|
||||
schema: OUTPUT_FORMAT,
|
||||
optional: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
input: {
|
||||
properties: {
|
||||
repository: {
|
||||
schema: REPO_URL_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
keyfile: {
|
||||
schema: KEYFILE_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"no-cache": {
|
||||
type: Boolean,
|
||||
description: "Bypass local datastore cache for network storages.",
|
||||
optional: true,
|
||||
default: false,
|
||||
},
|
||||
"output-format": {
|
||||
schema: OUTPUT_FORMAT,
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
)]
|
||||
/// Run benchmark tests
|
||||
pub async fn benchmark(
|
||||
param: Value,
|
||||
no_cache: bool,
|
||||
_info: &ApiMethod,
|
||||
_rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<(), Error> {
|
||||
@ -145,7 +152,7 @@ pub async fn benchmark(
|
||||
|
||||
// do repo tests first, because this may prompt for a password
|
||||
if let Some(repo) = repo {
|
||||
test_upload_speed(&mut benchmark_result, repo, crypt_config.clone()).await?;
|
||||
test_upload_speed(&mut benchmark_result, repo, crypt_config.clone(), no_cache).await?;
|
||||
}
|
||||
|
||||
test_crypt_speed(&mut benchmark_result)?;
|
||||
@ -221,6 +228,7 @@ async fn test_upload_speed(
|
||||
benchmark_result: &mut BenchmarkResult,
|
||||
repo: BackupRepository,
|
||||
crypt_config: Option<Arc<CryptConfig>>,
|
||||
no_cache: bool,
|
||||
) -> Result<(), Error> {
|
||||
let backup_time = proxmox_time::epoch_i64();
|
||||
|
||||
@ -230,12 +238,15 @@ async fn test_upload_speed(
|
||||
log::debug!("Connecting to backup server");
|
||||
let client = BackupWriter::start(
|
||||
&client,
|
||||
crypt_config.clone(),
|
||||
repo.store(),
|
||||
&BackupNamespace::root(),
|
||||
&(BackupType::Host, "benchmark".to_string(), backup_time).into(),
|
||||
false,
|
||||
true,
|
||||
BackupWriterOptions {
|
||||
datastore: repo.store(),
|
||||
ns: &BackupNamespace::root(),
|
||||
backup: &(BackupType::Host, "benchmark".to_string(), backup_time).into(),
|
||||
crypt_config: crypt_config.clone(),
|
||||
debug: false,
|
||||
benchmark: true,
|
||||
no_cache,
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
|
||||
|
@ -16,7 +16,6 @@ use xdg::BaseDirectories;
|
||||
use pathpatterns::{MatchEntry, MatchType, PatternFlag};
|
||||
use proxmox_async::blocking::TokioWriterAdapter;
|
||||
use proxmox_io::StdChannelWriter;
|
||||
use proxmox_log::init_cli_logger;
|
||||
use proxmox_router::{cli::*, ApiMethod, RpcEnvironment};
|
||||
use proxmox_schema::api;
|
||||
use proxmox_sys::fs::{file_get_json, image_size, replace_file, CreateOptions};
|
||||
@ -47,8 +46,8 @@ use pbs_client::tools::{
|
||||
use pbs_client::{
|
||||
delete_ticket_info, parse_backup_specification, view_task_result, BackupDetectionMode,
|
||||
BackupReader, BackupRepository, BackupSpecificationType, BackupStats, BackupWriter,
|
||||
ChunkStream, FixedChunkStream, HttpClient, InjectionData, PxarBackupStream, RemoteChunkReader,
|
||||
UploadOptions, BACKUP_SOURCE_SCHEMA,
|
||||
BackupWriterOptions, ChunkStream, FixedChunkStream, HttpClient, InjectionData,
|
||||
PxarBackupStream, RemoteChunkReader, UploadOptions, BACKUP_SOURCE_SCHEMA,
|
||||
};
|
||||
use pbs_datastore::catalog::{BackupCatalogWriter, CatalogReader, CatalogWriter};
|
||||
use pbs_datastore::chunk_store::verify_chunk_size;
|
||||
@ -193,13 +192,16 @@ pub async fn dir_or_last_from_group(
|
||||
}
|
||||
}
|
||||
|
||||
type Catalog = CatalogWriter<TokioWriterAdapter<StdChannelWriter<Error>>>;
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
async fn backup_directory<P: AsRef<Path>>(
|
||||
client: &BackupWriter,
|
||||
dir_path: P,
|
||||
archive_name: &BackupArchiveName,
|
||||
payload_target: Option<&BackupArchiveName>,
|
||||
chunk_size: Option<usize>,
|
||||
catalog: Option<Arc<Mutex<CatalogWriter<TokioWriterAdapter<StdChannelWriter<Error>>>>>>,
|
||||
catalog: Option<Arc<Mutex<Catalog>>>,
|
||||
pxar_create_options: pbs_client::pxar::PxarCreateOptions,
|
||||
upload_options: UploadOptions,
|
||||
) -> Result<(BackupStats, Option<BackupStats>), Error> {
|
||||
@ -624,117 +626,131 @@ fn spawn_catalog_upload(
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
backupspec: {
|
||||
type: Array,
|
||||
description: "List of backup source specifications ([<label.ext>:<path>] ...)",
|
||||
items: {
|
||||
schema: BACKUP_SOURCE_SCHEMA,
|
||||
}
|
||||
},
|
||||
repository: {
|
||||
schema: REPO_URL_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"include-dev": {
|
||||
description: "Include mountpoints with same st_dev number (see ``man fstat``) as specified files.",
|
||||
optional: true,
|
||||
items: {
|
||||
type: String,
|
||||
description: "Path to file.",
|
||||
}
|
||||
},
|
||||
"all-file-systems": {
|
||||
type: Boolean,
|
||||
description: "Include all mounted subdirectories.",
|
||||
optional: true,
|
||||
default: false,
|
||||
},
|
||||
keyfile: {
|
||||
schema: KEYFILE_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"keyfd": {
|
||||
schema: KEYFD_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"master-pubkey-file": {
|
||||
schema: MASTER_PUBKEY_FILE_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"master-pubkey-fd": {
|
||||
schema: MASTER_PUBKEY_FD_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"crypt-mode": {
|
||||
type: CryptMode,
|
||||
optional: true,
|
||||
},
|
||||
"skip-lost-and-found": {
|
||||
type: Boolean,
|
||||
description: "Skip lost+found directory.",
|
||||
optional: true,
|
||||
default: false,
|
||||
},
|
||||
"ns": {
|
||||
schema: BACKUP_NAMESPACE_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"backup-type": {
|
||||
schema: BACKUP_TYPE_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"backup-id": {
|
||||
schema: BACKUP_ID_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"backup-time": {
|
||||
schema: BACKUP_TIME_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"chunk-size": {
|
||||
schema: CHUNK_SIZE_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
limit: {
|
||||
type: ClientRateLimitConfig,
|
||||
flatten: true,
|
||||
},
|
||||
"change-detection-mode": {
|
||||
type: BackupDetectionMode,
|
||||
optional: true,
|
||||
},
|
||||
"exclude": {
|
||||
type: Array,
|
||||
description: "List of paths or patterns for matching files to exclude.",
|
||||
optional: true,
|
||||
items: {
|
||||
type: String,
|
||||
description: "Path or match pattern.",
|
||||
input: {
|
||||
properties: {
|
||||
backupspec: {
|
||||
type: Array,
|
||||
description:
|
||||
"List of backup source specifications:\
|
||||
\n\n[<archive-name>.<type>:<source-path>] ...\n\n\
|
||||
The 'archive-name' must only contain alphanumerics, hyphens and underscores \
|
||||
while the 'type' must be either 'pxar', 'img', 'conf' or 'log'.",
|
||||
items: {
|
||||
schema: BACKUP_SOURCE_SCHEMA,
|
||||
}
|
||||
},
|
||||
"entries-max": {
|
||||
type: Integer,
|
||||
description: "Max number of entries to hold in memory.",
|
||||
optional: true,
|
||||
default: pbs_client::pxar::ENCODER_MAX_ENTRIES as isize,
|
||||
},
|
||||
"dry-run": {
|
||||
type: Boolean,
|
||||
description: "Just show what backup would do, but do not upload anything.",
|
||||
optional: true,
|
||||
default: false,
|
||||
},
|
||||
"skip-e2big-xattr": {
|
||||
type: Boolean,
|
||||
description: "Ignore the E2BIG error when retrieving xattrs. This includes the file, but discards the metadata.",
|
||||
optional: true,
|
||||
default: false,
|
||||
},
|
||||
}
|
||||
}
|
||||
)]
|
||||
},
|
||||
repository: {
|
||||
schema: REPO_URL_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"include-dev": {
|
||||
description:
|
||||
"Include mountpoints with same st_dev number (see ``man fstat``) as specified \
|
||||
files.",
|
||||
optional: true,
|
||||
items: {
|
||||
type: String,
|
||||
description: "Path to file.",
|
||||
}
|
||||
},
|
||||
"all-file-systems": {
|
||||
type: Boolean,
|
||||
description: "Include all mounted subdirectories.",
|
||||
optional: true,
|
||||
default: false,
|
||||
},
|
||||
keyfile: {
|
||||
schema: KEYFILE_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"keyfd": {
|
||||
schema: KEYFD_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"master-pubkey-file": {
|
||||
schema: MASTER_PUBKEY_FILE_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"master-pubkey-fd": {
|
||||
schema: MASTER_PUBKEY_FD_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"crypt-mode": {
|
||||
type: CryptMode,
|
||||
optional: true,
|
||||
},
|
||||
"skip-lost-and-found": {
|
||||
type: Boolean,
|
||||
description: "Skip lost+found directory.",
|
||||
optional: true,
|
||||
default: false,
|
||||
},
|
||||
"ns": {
|
||||
schema: BACKUP_NAMESPACE_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"backup-type": {
|
||||
schema: BACKUP_TYPE_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"backup-id": {
|
||||
schema: BACKUP_ID_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"backup-time": {
|
||||
schema: BACKUP_TIME_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"chunk-size": {
|
||||
schema: CHUNK_SIZE_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
limit: {
|
||||
type: ClientRateLimitConfig,
|
||||
flatten: true,
|
||||
},
|
||||
"change-detection-mode": {
|
||||
type: BackupDetectionMode,
|
||||
optional: true,
|
||||
},
|
||||
"exclude": {
|
||||
type: Array,
|
||||
description: "List of paths or patterns for matching files to exclude.",
|
||||
optional: true,
|
||||
items: {
|
||||
type: String,
|
||||
description: "Path or match pattern.",
|
||||
}
|
||||
},
|
||||
"entries-max": {
|
||||
type: Integer,
|
||||
description: "Max number of entries to hold in memory.",
|
||||
optional: true,
|
||||
default: pbs_client::pxar::ENCODER_MAX_ENTRIES as isize,
|
||||
},
|
||||
"dry-run": {
|
||||
type: Boolean,
|
||||
description: "Just show what backup would do, but do not upload anything.",
|
||||
optional: true,
|
||||
default: false,
|
||||
},
|
||||
"skip-e2big-xattr": {
|
||||
type: Boolean,
|
||||
description:
|
||||
"Ignore the E2BIG error when retrieving xattrs. This includes the file, but \
|
||||
discards the metadata.",
|
||||
optional: true,
|
||||
default: false,
|
||||
},
|
||||
"no-cache": {
|
||||
type: Boolean,
|
||||
description: "Bypass local datastore cache for network storages.",
|
||||
optional: true,
|
||||
default: false,
|
||||
},
|
||||
}
|
||||
}
|
||||
)]
|
||||
/// Create (host) backup.
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
async fn create_backup(
|
||||
@ -744,6 +760,7 @@ async fn create_backup(
|
||||
change_detection_mode: Option<BackupDetectionMode>,
|
||||
dry_run: bool,
|
||||
skip_e2big_xattr: bool,
|
||||
no_cache: bool,
|
||||
limit: ClientRateLimitConfig,
|
||||
_info: &ApiMethod,
|
||||
_rpcenv: &mut dyn RpcEnvironment,
|
||||
@ -817,40 +834,36 @@ async fn create_backup(
|
||||
let mut target_set = HashSet::new();
|
||||
|
||||
for backupspec in backupspec_list {
|
||||
let spec = parse_backup_specification(backupspec.as_str().unwrap())?;
|
||||
let filename = &spec.config_string;
|
||||
let target = &spec.archive_name;
|
||||
let pbs_client::BackupSpecification {
|
||||
archive_name: target,
|
||||
config_string: filename,
|
||||
spec_type,
|
||||
} = parse_backup_specification(backupspec.as_str().unwrap())?;
|
||||
|
||||
if target_set.contains(target) {
|
||||
if target_set.contains(&target) {
|
||||
bail!("got target twice: '{}'", target);
|
||||
}
|
||||
target_set.insert(target.to_string());
|
||||
target_set.insert(target.clone());
|
||||
|
||||
use std::os::unix::fs::FileTypeExt;
|
||||
|
||||
let metadata = std::fs::metadata(filename)
|
||||
let metadata = std::fs::metadata(&filename)
|
||||
.map_err(|err| format_err!("unable to access '{}' - {}", filename, err))?;
|
||||
let file_type = metadata.file_type();
|
||||
|
||||
match spec.spec_type {
|
||||
match spec_type {
|
||||
BackupSpecificationType::PXAR => {
|
||||
if !file_type.is_dir() {
|
||||
bail!("got unexpected file type (expected directory)");
|
||||
}
|
||||
upload_list.push((
|
||||
BackupSpecificationType::PXAR,
|
||||
filename.to_owned(),
|
||||
target.to_owned(),
|
||||
"didx",
|
||||
0,
|
||||
));
|
||||
upload_list.push((BackupSpecificationType::PXAR, filename, target, "didx", 0));
|
||||
}
|
||||
BackupSpecificationType::IMAGE => {
|
||||
if !(file_type.is_file() || file_type.is_block_device()) {
|
||||
bail!("got unexpected file type (expected file or block device)");
|
||||
}
|
||||
|
||||
let size = image_size(&PathBuf::from(filename))?;
|
||||
let size = image_size(&PathBuf::from(&filename))?;
|
||||
|
||||
if size == 0 {
|
||||
bail!("got zero-sized file '{}'", filename);
|
||||
@ -858,8 +871,8 @@ async fn create_backup(
|
||||
|
||||
upload_list.push((
|
||||
BackupSpecificationType::IMAGE,
|
||||
filename.to_owned(),
|
||||
target.to_owned(),
|
||||
filename,
|
||||
target,
|
||||
"fidx",
|
||||
size,
|
||||
));
|
||||
@ -870,8 +883,8 @@ async fn create_backup(
|
||||
}
|
||||
upload_list.push((
|
||||
BackupSpecificationType::CONFIG,
|
||||
filename.to_owned(),
|
||||
target.to_owned(),
|
||||
filename,
|
||||
target,
|
||||
"blob",
|
||||
metadata.len(),
|
||||
));
|
||||
@ -882,8 +895,8 @@ async fn create_backup(
|
||||
}
|
||||
upload_list.push((
|
||||
BackupSpecificationType::LOGFILE,
|
||||
filename.to_owned(),
|
||||
target.to_owned(),
|
||||
filename,
|
||||
target,
|
||||
"blob",
|
||||
metadata.len(),
|
||||
));
|
||||
@ -948,12 +961,15 @@ async fn create_backup(
|
||||
|
||||
let client = BackupWriter::start(
|
||||
&http_client,
|
||||
crypt_config.clone(),
|
||||
repo.store(),
|
||||
&backup_ns,
|
||||
&snapshot,
|
||||
true,
|
||||
false,
|
||||
BackupWriterOptions {
|
||||
datastore: repo.store(),
|
||||
ns: &backup_ns,
|
||||
backup: &snapshot,
|
||||
crypt_config: crypt_config.clone(),
|
||||
debug: true,
|
||||
benchmark: false,
|
||||
no_cache,
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
|
||||
@ -1565,7 +1581,10 @@ async fn restore(
|
||||
let (manifest, backup_index_data) = client.download_manifest().await?;
|
||||
|
||||
if archive_name == *ENCRYPTED_KEY_BLOB_NAME && crypt_config.is_none() {
|
||||
log::info!("Restoring encrypted key blob without original key - skipping manifest fingerprint check!")
|
||||
log::info!(
|
||||
"Restoring encrypted key blob without original key - skipping manifest fingerprint \
|
||||
check!"
|
||||
);
|
||||
} else {
|
||||
if manifest.signature.is_some() {
|
||||
if let Some(key) = &crypto.enc_key {
|
||||
@ -1960,7 +1979,10 @@ impl ReadAt for BufferedDynamicReadAt {
|
||||
|
||||
fn main() {
|
||||
pbs_tools::setup_libc_malloc_opts();
|
||||
init_cli_logger("PBS_LOG", proxmox_log::LevelFilter::INFO).expect("failed to initiate logger");
|
||||
proxmox_log::Logger::from_env("PBS_LOG", proxmox_log::LevelFilter::INFO)
|
||||
.stderr()
|
||||
.init()
|
||||
.expect("failed to initiate logger");
|
||||
|
||||
let backup_cmd_def = CliCommand::new(&API_METHOD_CREATE_BACKUP)
|
||||
.arg_param(&["backupspec"])
|
||||
|
@ -262,7 +262,7 @@ async fn mount_do(param: Value, pipe: Option<OwnedFd>) -> Result<Value, Error> {
|
||||
}
|
||||
// Signal the parent process that we are done with the setup and it can
|
||||
// terminate.
|
||||
nix::unistd::write(pipe.as_raw_fd(), &[1u8])?;
|
||||
nix::unistd::write(&pipe, &[1u8])?;
|
||||
let _: OwnedFd = pipe;
|
||||
}
|
||||
|
||||
|
@ -271,7 +271,7 @@ async fn upload_log(param: Value) -> Result<Value, Error> {
|
||||
);
|
||||
|
||||
let args = snapshot_args(&backup_ns, &snapshot)?;
|
||||
let body = hyper::Body::from(raw_data);
|
||||
let body = proxmox_http::Body::from(raw_data);
|
||||
|
||||
client
|
||||
.upload("application/octet-stream", body, &path, Some(args))
|
||||
|
@ -6,7 +6,6 @@ edition.workspace = true
|
||||
|
||||
[dependencies]
|
||||
anyhow.workspace = true
|
||||
base64.workspace = true
|
||||
futures.workspace = true
|
||||
libc.workspace = true
|
||||
log.workspace = true
|
||||
@ -19,6 +18,7 @@ tokio-util.workspace = true
|
||||
pxar.workspace = true
|
||||
|
||||
proxmox-async.workspace = true
|
||||
proxmox-base64.workspace = true
|
||||
proxmox-compression.workspace = true
|
||||
proxmox-lang.workspace=true
|
||||
proxmox-log.workspace=true
|
||||
|
@ -247,7 +247,7 @@ impl BlockRestoreDriver for QemuBlockDriver {
|
||||
path.insert(0, b'/');
|
||||
}
|
||||
handle_extra_guest_memory_needs(cid, &path).await;
|
||||
let path = base64::encode(img_file.bytes().chain(path).collect::<Vec<u8>>());
|
||||
let path = proxmox_base64::encode(img_file.bytes().chain(path).collect::<Vec<u8>>());
|
||||
let mut result = client
|
||||
.get("api2/json/list", Some(json!({ "path": path })))
|
||||
.await?;
|
||||
@ -270,7 +270,7 @@ impl BlockRestoreDriver for QemuBlockDriver {
|
||||
path.insert(0, b'/');
|
||||
}
|
||||
handle_extra_guest_memory_needs(cid, &path).await;
|
||||
let path = base64::encode(img_file.bytes().chain(path).collect::<Vec<u8>>());
|
||||
let path = proxmox_base64::encode(img_file.bytes().chain(path).collect::<Vec<u8>>());
|
||||
let (mut tx, rx) = tokio::io::duplex(1024 * 4096);
|
||||
let mut data = json!({ "path": path, "zstd": zstd });
|
||||
if let Some(format) = format {
|
||||
|
@ -10,7 +10,6 @@ use serde_json::{json, Value};
|
||||
use tokio::io::AsyncWriteExt;
|
||||
|
||||
use proxmox_compression::zstd::ZstdEncoder;
|
||||
use proxmox_log::init_cli_logger;
|
||||
use proxmox_router::cli::{
|
||||
complete_file_name, default_table_format_options, format_and_print_result_full,
|
||||
get_output_format, run_cli_command, CliCommand, CliCommandMap, CliEnvironment, ColumnConfig,
|
||||
@ -57,7 +56,7 @@ enum ExtractPath {
|
||||
|
||||
fn parse_path(path: String, base64: bool) -> Result<ExtractPath, Error> {
|
||||
let mut bytes = if base64 {
|
||||
base64::decode(&path)
|
||||
proxmox_base64::decode(&path)
|
||||
.map_err(|err| format_err!("Failed base64-decoding path '{path}' - {err}"))?
|
||||
} else {
|
||||
path.into_bytes()
|
||||
@ -629,7 +628,11 @@ fn main() {
|
||||
true => proxmox_log::LevelFilter::DEBUG,
|
||||
false => proxmox_log::LevelFilter::INFO,
|
||||
};
|
||||
init_cli_logger("PBS_LOG", loglevel).expect("failed to initiate logger");
|
||||
|
||||
proxmox_log::Logger::from_env("PBS_LOG", loglevel)
|
||||
.stderr()
|
||||
.init()
|
||||
.expect("failed to initiate logger");
|
||||
|
||||
let list_cmd_def = CliCommand::new(&API_METHOD_LIST)
|
||||
.arg_param(&["snapshot", "path"])
|
||||
|
@ -316,7 +316,6 @@ pub async fn start_vm(
|
||||
} else {
|
||||
// add more RAM if many drives are given
|
||||
match id {
|
||||
f if f < 10 => 192,
|
||||
f if f < 20 => 256,
|
||||
_ => 384,
|
||||
}
|
||||
|
@ -8,13 +8,13 @@ rust-version.workspace = true
|
||||
|
||||
[dependencies]
|
||||
anyhow.workspace = true
|
||||
base64.workspace = true
|
||||
env_logger.workspace = true
|
||||
futures.workspace = true
|
||||
hyper.workspace = true
|
||||
hyper-util = { workspace = true, features = [ "service" ] }
|
||||
libc.workspace = true
|
||||
log.workspace = true
|
||||
nix.workspace = true
|
||||
nix = { workspace = true, features = [ "mount", "reboot" ] }
|
||||
regex.workspace = true
|
||||
serde_json.workspace = true
|
||||
tokio = { workspace = true, features = ["macros", "parking_lot", "sync"] }
|
||||
@ -25,7 +25,9 @@ pathpatterns.workspace = true
|
||||
pxar.workspace = true
|
||||
|
||||
proxmox-async.workspace = true
|
||||
proxmox-base64.workspace = true
|
||||
proxmox-compression.workspace = true
|
||||
proxmox-http.workspace = true
|
||||
proxmox-rest-server.workspace = true
|
||||
proxmox-router = { workspace = true, features = [ "cli", "server" ] }
|
||||
proxmox-schema = { workspace = true, features = [ "api-macro" ] }
|
||||
|
@ -1,14 +1,15 @@
|
||||
//! Daemon binary to run inside a micro-VM for secure single file restore of disk images
|
||||
use std::fs::File;
|
||||
use std::io::prelude::*;
|
||||
use std::os::unix::{
|
||||
io::{FromRawFd, RawFd},
|
||||
net,
|
||||
};
|
||||
use std::os::fd::{AsRawFd, FromRawFd, OwnedFd};
|
||||
use std::os::unix::net;
|
||||
use std::path::Path;
|
||||
use std::sync::{Arc, LazyLock, Mutex};
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use futures::StreamExt;
|
||||
use hyper_util::rt::TokioIo;
|
||||
use hyper_util::service::TowerToHyperService;
|
||||
use log::{error, info};
|
||||
use tokio::sync::mpsc;
|
||||
use tokio_stream::wrappers::ReceiverStream;
|
||||
@ -114,14 +115,23 @@ async fn run() -> Result<(), Error> {
|
||||
|
||||
let vsock_fd = get_vsock_fd()?;
|
||||
let connections = accept_vsock_connections(vsock_fd);
|
||||
let receiver_stream = ReceiverStream::new(connections);
|
||||
let acceptor = hyper::server::accept::from_stream(receiver_stream);
|
||||
let mut receiver_stream = ReceiverStream::new(connections);
|
||||
|
||||
let hyper_future = async move {
|
||||
hyper::Server::builder(acceptor)
|
||||
.serve(rest_server)
|
||||
.await
|
||||
.map_err(|err| format_err!("hyper finished with error: {}", err))
|
||||
while let Some(conn) = receiver_stream.next().await {
|
||||
let conn = conn?;
|
||||
|
||||
let api_service = TowerToHyperService::new(rest_server.api_service(&conn)?);
|
||||
|
||||
let conn = hyper::server::conn::http1::Builder::new()
|
||||
.serve_connection(TokioIo::new(conn), api_service);
|
||||
|
||||
tokio::spawn(async move {
|
||||
conn.await
|
||||
.map_err(|err| format_err!("hyper finished with error: {}", err))
|
||||
});
|
||||
}
|
||||
Ok(())
|
||||
};
|
||||
|
||||
tokio::try_join!(init_future, hyper_future)?;
|
||||
@ -130,7 +140,7 @@ async fn run() -> Result<(), Error> {
|
||||
}
|
||||
|
||||
fn accept_vsock_connections(
|
||||
vsock_fd: RawFd,
|
||||
vsock_fd: OwnedFd,
|
||||
) -> mpsc::Receiver<Result<tokio::net::UnixStream, Error>> {
|
||||
use nix::sys::socket::*;
|
||||
let (sender, receiver) = mpsc::channel(MAX_PENDING);
|
||||
@ -139,7 +149,7 @@ fn accept_vsock_connections(
|
||||
loop {
|
||||
let stream: Result<tokio::net::UnixStream, Error> = tokio::task::block_in_place(|| {
|
||||
// we need to accept manually, as UnixListener aborts if socket type != AF_UNIX ...
|
||||
let client_fd = accept(vsock_fd)?;
|
||||
let client_fd = accept(vsock_fd.as_raw_fd())?;
|
||||
let stream = unsafe { net::UnixStream::from_raw_fd(client_fd) };
|
||||
stream.set_nonblocking(true)?;
|
||||
tokio::net::UnixStream::from_std(stream).map_err(|err| err.into())
|
||||
@ -161,7 +171,7 @@ fn accept_vsock_connections(
|
||||
receiver
|
||||
}
|
||||
|
||||
fn get_vsock_fd() -> Result<RawFd, Error> {
|
||||
fn get_vsock_fd() -> Result<OwnedFd, Error> {
|
||||
use nix::sys::socket::*;
|
||||
let sock_fd = socket(
|
||||
AddressFamily::Vsock,
|
||||
@ -170,7 +180,11 @@ fn get_vsock_fd() -> Result<RawFd, Error> {
|
||||
None,
|
||||
)?;
|
||||
let sock_addr = VsockAddr::new(libc::VMADDR_CID_ANY, DEFAULT_VSOCK_PORT as u32);
|
||||
bind(sock_fd, &sock_addr)?;
|
||||
listen(sock_fd, MAX_PENDING)?;
|
||||
bind(sock_fd.as_raw_fd(), &sock_addr)?;
|
||||
listen(
|
||||
&sock_fd,
|
||||
nix::sys::socket::Backlog::new(MAX_PENDING as i32)
|
||||
.expect("MAX_PENDING is invalid as a listening backlog"),
|
||||
)?;
|
||||
Ok(sock_fd)
|
||||
}
|
||||
|
@ -6,14 +6,16 @@ use std::path::{Path, PathBuf};
|
||||
|
||||
use anyhow::{bail, Error};
|
||||
use futures::FutureExt;
|
||||
use hyper::body::Incoming;
|
||||
use hyper::http::request::Parts;
|
||||
use hyper::{header, Body, Response, StatusCode};
|
||||
use hyper::{header, Response, StatusCode};
|
||||
use log::error;
|
||||
use serde_json::Value;
|
||||
use tokio::sync::Semaphore;
|
||||
|
||||
use pathpatterns::{MatchEntry, MatchPattern, MatchType, Pattern};
|
||||
use proxmox_compression::{tar::tar_directory, zip::zip_directory, zstd::ZstdEncoder};
|
||||
use proxmox_http::Body;
|
||||
use proxmox_router::{
|
||||
list_subdirs_api_method, ApiHandler, ApiMethod, ApiResponseFuture, Permission, Router,
|
||||
RpcEnvironment, SubdirMap,
|
||||
@ -143,7 +145,7 @@ fn list(
|
||||
|
||||
let mut res = Vec::new();
|
||||
|
||||
let param_path = base64::decode(path)?;
|
||||
let param_path = proxmox_base64::decode(path)?;
|
||||
let mut path = param_path.clone();
|
||||
if let Some(b'/') = path.last() {
|
||||
path.pop();
|
||||
@ -264,7 +266,7 @@ pub const API_METHOD_EXTRACT: ApiMethod = ApiMethod::new(
|
||||
|
||||
fn extract(
|
||||
_parts: Parts,
|
||||
_req_body: Body,
|
||||
_req_body: Incoming,
|
||||
param: Value,
|
||||
_info: &ApiMethod,
|
||||
_rpcenv: Box<dyn RpcEnvironment>,
|
||||
@ -280,7 +282,7 @@ fn extract(
|
||||
};
|
||||
|
||||
let path = required_string_param(¶m, "path")?;
|
||||
let mut path = base64::decode(path)?;
|
||||
let mut path = proxmox_base64::decode(path)?;
|
||||
if let Some(b'/') = path.last() {
|
||||
path.pop();
|
||||
}
|
||||
|
@ -7,8 +7,9 @@ use std::sync::Arc;
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use hyper::http::HeaderMap;
|
||||
use hyper::{Body, Method, Response, StatusCode};
|
||||
use hyper::{Method, Response, StatusCode};
|
||||
|
||||
use proxmox_http::Body;
|
||||
use proxmox_router::UserInformation;
|
||||
|
||||
use proxmox_rest_server::AuthError;
|
||||
@ -39,17 +40,15 @@ pub fn read_ticket() -> Result<Arc<str>, Error> {
|
||||
Ok(ticket.into())
|
||||
}
|
||||
|
||||
pub fn check_auth<'a>(
|
||||
ticket: Arc<str>,
|
||||
headers: &'a HeaderMap,
|
||||
_method: &'a Method,
|
||||
) -> Pin<
|
||||
type AuthFn<'a> = Pin<
|
||||
Box<
|
||||
dyn Future<Output = Result<(String, Box<dyn UserInformation + Sync + Send>), AuthError>>
|
||||
+ Send
|
||||
+ 'a,
|
||||
>,
|
||||
> {
|
||||
>;
|
||||
|
||||
pub fn check_auth<'a>(ticket: Arc<str>, headers: &'a HeaderMap, _method: &'a Method) -> AuthFn<'a> {
|
||||
Box::pin(async move {
|
||||
match headers.get(hyper::header::AUTHORIZATION) {
|
||||
Some(header) if header.to_str().unwrap_or("") == &*ticket => {
|
||||
@ -71,7 +70,7 @@ pub fn get_index() -> Pin<Box<dyn Future<Output = hyper::http::Response<Body>> +
|
||||
Response::builder()
|
||||
.status(StatusCode::OK)
|
||||
.header(hyper::header::CONTENT_TYPE, "text/html")
|
||||
.body(index.into())
|
||||
.body(index.to_owned().into())
|
||||
.unwrap()
|
||||
})
|
||||
}
|
||||
|
@ -17,7 +17,7 @@ use pbs_api_types::BLOCKDEVICE_NAME_REGEX;
|
||||
|
||||
const_regex! {
|
||||
VIRTIO_PART_REGEX = r"^vd[a-z]+(\d+)$";
|
||||
ZPOOL_POOL_NAME_REGEX = r"^ {3}pool: (.*)$";
|
||||
ZPOOL_POOL_NAME_REGEX = r"^ {2,3}pool: (.*)$";
|
||||
ZPOOL_IMPORT_DISK_REGEX = r"^\t {2,4}(vd[a-z]+(?:\d+)?)\s+ONLINE$";
|
||||
}
|
||||
|
||||
|
@ -22,7 +22,7 @@ use pbs_client::pxar::{
|
||||
use pxar::EntryKind;
|
||||
|
||||
use proxmox_human_byte::HumanByte;
|
||||
use proxmox_log::{debug, enabled, error, init_cli_logger, Level};
|
||||
use proxmox_log::{debug, enabled, error, Level};
|
||||
use proxmox_router::cli::*;
|
||||
use proxmox_schema::api;
|
||||
|
||||
@ -574,7 +574,10 @@ fn dump_archive(archive: String, payload_input: Option<String>) -> Result<(), Er
|
||||
}
|
||||
|
||||
fn main() {
|
||||
init_cli_logger("PXAR_LOG", proxmox_log::LevelFilter::INFO).expect("failed to initiate logger");
|
||||
proxmox_log::Logger::from_env("PXAR_LOG", proxmox_log::LevelFilter::INFO)
|
||||
.stderr()
|
||||
.init()
|
||||
.expect("failed to initiate logger");
|
||||
|
||||
let cmd_def = CliCommandMap::new()
|
||||
.insert(
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user