Compare commits
596 Commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
58fb448be5 | ||
![]() |
07a21616c2 | ||
![]() |
cb9814e331 | ||
![]() |
31dbaf69ab | ||
![]() |
af5ff86a26 | ||
![]() |
5fc281cd89 | ||
![]() |
6c6257b94e | ||
![]() |
c644f7bc85 | ||
![]() |
4a022e1a3f | ||
![]() |
9247d57fdf | ||
![]() |
427c687e35 | ||
![]() |
f9532a3a84 | ||
![]() |
d400673641 | ||
![]() |
cdc710a736 | ||
![]() |
36ef1b01f7 | ||
![]() |
f91d5912f1 | ||
![]() |
c08c934c02 | ||
![]() |
9dfd0657eb | ||
![]() |
d39f1a4b57 | ||
![]() |
83e7b9de88 | ||
![]() |
601a84ae74 | ||
![]() |
152dc37057 | ||
![]() |
e98e962904 | ||
![]() |
f117dabcf0 | ||
![]() |
6d193b9a1e | ||
![]() |
d25ec96c21 | ||
![]() |
839b7d8c89 | ||
![]() |
f7f61002ee | ||
![]() |
266becd156 | ||
![]() |
37a85cf616 | ||
![]() |
8a056670ea | ||
![]() |
a7a28c4d95 | ||
![]() |
254169f622 | ||
![]() |
33024ffd43 | ||
![]() |
dfc0278248 | ||
![]() |
8e50c75fca | ||
![]() |
98abd76579 | ||
![]() |
bd95fd5797 | ||
![]() |
bccff939fa | ||
![]() |
a3815aff82 | ||
![]() |
d1fd12d82d | ||
![]() |
5e778d983a | ||
![]() |
4c0583b14e | ||
![]() |
dc914094c9 | ||
![]() |
6c774660a7 | ||
![]() |
6df6d3094c | ||
![]() |
f1a711c830 | ||
![]() |
3f1e103904 | ||
![]() |
f9270de9ef | ||
![]() |
40ccd1ac9e | ||
![]() |
ab5b64fadf | ||
![]() |
713fa6ee55 | ||
![]() |
f41a233a8e | ||
![]() |
6f9c16d5d4 | ||
![]() |
d93d7a8299 | ||
![]() |
17f183c40b | ||
![]() |
d977da6411 | ||
![]() |
960149b51e | ||
![]() |
074d957169 | ||
![]() |
8529e79983 | ||
![]() |
5b0c6a80e5 | ||
![]() |
029654a61d | ||
![]() |
a738d2bcc9 | ||
![]() |
234de23a50 | ||
![]() |
bf708e8cd7 | ||
![]() |
3ba907c888 | ||
![]() |
b5ba40095d | ||
![]() |
daa9d0a9d5 | ||
![]() |
c6a87e340c | ||
![]() |
bb8e7e2b48 | ||
![]() |
b18eab64a9 | ||
![]() |
8f6874391f | ||
![]() |
b48427720a | ||
![]() |
2084fd39c4 | ||
![]() |
d4a2730b1b | ||
![]() |
b0cd9e84f5 | ||
![]() |
912c8c4027 | ||
![]() |
263651912e | ||
![]() |
4b26fb2bd7 | ||
![]() |
70e1ad0efb | ||
![]() |
d49a27ede8 | ||
![]() |
f09f2e0d9e | ||
![]() |
d728c2e836 | ||
![]() |
7fbe029ceb | ||
![]() |
907ba4dd61 | ||
![]() |
7e15e6039b | ||
![]() |
03143eee0a | ||
![]() |
74361da855 | ||
![]() |
c9bd214555 | ||
![]() |
0b016e1efe | ||
![]() |
8d9dc69945 | ||
![]() |
3fdf8769f4 | ||
![]() |
320ea1cdb7 | ||
![]() |
13b15bce11 | ||
![]() |
ed8205e535 | ||
![]() |
32b5716fa4 | ||
![]() |
d1c96f69ee | ||
![]() |
8210a32613 | ||
![]() |
f2115b04c1 | ||
![]() |
1599b424cd | ||
![]() |
1b9e3cfd18 | ||
![]() |
940d34b42a | ||
![]() |
33d2444eca | ||
![]() |
7a3cbd7230 | ||
![]() |
b60912c65d | ||
![]() |
23be00a42c | ||
![]() |
04e50855b3 | ||
![]() |
52e5d52cbd | ||
![]() |
27dd73777f | ||
![]() |
e2c1866b13 | ||
![]() |
27ba2c0318 | ||
![]() |
b510184e72 | ||
![]() |
79e9eddf4b | ||
![]() |
24a6d4fd82 | ||
![]() |
b693f5d471 | ||
![]() |
3362a6e049 | ||
![]() |
7c45cf8c7a | ||
![]() |
d99c481596 | ||
![]() |
f74978572b | ||
![]() |
bb408fd151 | ||
![]() |
54763b39c7 | ||
![]() |
f1dd1e3557 | ||
![]() |
f314078a8d | ||
![]() |
7085d270d4 | ||
![]() |
6565199af4 | ||
![]() |
168ed37026 | ||
![]() |
2c9f3a63d5 | ||
![]() |
eba172a492 | ||
![]() |
cec8c75cd0 | ||
![]() |
ddf0489abb | ||
![]() |
22285d0d01 | ||
![]() |
f37ce33164 | ||
![]() |
2c89b88226 | ||
![]() |
cdc2b341b6 | ||
![]() |
5117a21ec9 | ||
![]() |
883e14ebcb | ||
![]() |
858744bf3c | ||
![]() |
582ba899b6 | ||
![]() |
f098814876 | ||
![]() |
62ff4f2472 | ||
![]() |
7cae3e44f2 | ||
![]() |
9d4d1216e3 | ||
![]() |
d8881be658 | ||
![]() |
c7a29011fa | ||
![]() |
abad8e25c4 | ||
![]() |
6c2b039ef4 | ||
![]() |
64cfb13193 | ||
![]() |
d986714201 | ||
![]() |
1b5436ccdd | ||
![]() |
c6600acf0b | ||
![]() |
c9cd520a1a | ||
![]() |
e0e644f119 | ||
![]() |
5863e5ff5d | ||
![]() |
46d4ceef77 | ||
![]() |
afd22455da | ||
![]() |
5ba351bac7 | ||
![]() |
961c81bdeb | ||
![]() |
af18706fcb | ||
![]() |
ce8d56a3b5 | ||
![]() |
1f24167b4d | ||
![]() |
d4468ba6f8 | ||
![]() |
600ce36d57 | ||
![]() |
1cf52c6bb3 | ||
![]() |
95d8e70c84 | ||
![]() |
b249e44a0e | ||
![]() |
d93d782d37 | ||
![]() |
d910543d56 | ||
![]() |
41588772c9 | ||
![]() |
ed03985bd6 | ||
![]() |
7769be2f17 | ||
![]() |
de875c0f0e | ||
![]() |
f1a5808e67 | ||
![]() |
c4c050dc36 | ||
![]() |
fd6cdeebea | ||
![]() |
414f3656a8 | ||
![]() |
0185228ad7 | ||
![]() |
b72bdf4156 | ||
![]() |
4773f6b721 | ||
![]() |
40ef2afe01 | ||
![]() |
c312d58488 | ||
![]() |
34fbf1a809 | ||
![]() |
c676439a15 | ||
![]() |
ed8bc69a50 | ||
![]() |
c57ac02879 | ||
![]() |
668b8383a7 | ||
![]() |
c69d18626a | ||
![]() |
08d136e069 | ||
![]() |
bf063e4494 | ||
![]() |
d430b05ec3 | ||
![]() |
f55a08891e | ||
![]() |
77c81bcb31 | ||
![]() |
cf0aaec985 | ||
![]() |
7c570bac70 | ||
![]() |
1874857dc2 | ||
![]() |
8eaeedf31e | ||
![]() |
c17964e7fc | ||
![]() |
5d60f8692a | ||
![]() |
61d18bcf9c | ||
![]() |
2bacfa7029 | ||
![]() |
109e063a7e | ||
![]() |
47a29b1896 | ||
![]() |
0083e7ac05 | ||
![]() |
00254d60e3 | ||
![]() |
d11393c70e | ||
![]() |
77fd1853b3 | ||
![]() |
a50e0014df | ||
![]() |
d61bac6841 | ||
![]() |
cdeed5e440 | ||
![]() |
acddd3f09a | ||
![]() |
414a5b3a3a | ||
![]() |
80264dbfaa | ||
![]() |
ff9e36b431 | ||
![]() |
7eee253d8c | ||
![]() |
2dff4d2d6d | ||
![]() |
23e91fdd98 | ||
![]() |
c68117d0a1 | ||
![]() |
de44cb7b47 | ||
![]() |
81635877e2 | ||
![]() |
f36e8fea91 | ||
![]() |
2fab9155b3 | ||
![]() |
b711ccf0ad | ||
![]() |
38d961f9e4 | ||
![]() |
6ab04f14ae | ||
![]() |
269b7bffc7 | ||
![]() |
f418479aaa | ||
![]() |
c7cf3b424a | ||
![]() |
eb21f639f2 | ||
![]() |
54308a12b3 | ||
![]() |
af037dd25d | ||
![]() |
d3910e1334 | ||
![]() |
a7792e16c5 | ||
![]() |
353711199e | ||
![]() |
584068893a | ||
![]() |
87c648018d | ||
![]() |
d8777c0f9b | ||
![]() |
e5f2903981 | ||
![]() |
1b4426feec | ||
![]() |
4d5e14c07e | ||
![]() |
93bdba1ac6 | ||
![]() |
5a6aff6ad5 | ||
![]() |
614b5b6713 | ||
![]() |
4948864a07 | ||
![]() |
13fe842041 | ||
![]() |
4eadbcc49f | ||
![]() |
5eb51c75ca | ||
![]() |
0e21bb2482 | ||
![]() |
0fe9fd8dd0 | ||
![]() |
4869ec3bd3 | ||
![]() |
17b7ab8021 | ||
![]() |
00ced7808b | ||
![]() |
87f2087789 | ||
![]() |
0ca7833bc5 | ||
![]() |
41b8bf2aff | ||
![]() |
0fe805b95f | ||
![]() |
363e32a805 | ||
![]() |
c5c7fd3482 | ||
![]() |
6bd63b0e71 | ||
![]() |
63da9f8397 | ||
![]() |
963401348a | ||
![]() |
7e1aa4d283 | ||
![]() |
bd25fc40a6 | ||
![]() |
bb367c4d2e | ||
![]() |
a5f3d4a21c | ||
![]() |
af4d5607f1 | ||
![]() |
b3f16f6227 | ||
![]() |
e9dfb83131 | ||
![]() |
e066bd7207 | ||
![]() |
4b76b731cb | ||
![]() |
b5814a4142 | ||
![]() |
3b3d63ccfd | ||
![]() |
5b11e52b08 | ||
![]() |
e302382890 | ||
![]() |
28d6afc2d7 | ||
![]() |
403ad1f6d1 | ||
![]() |
23185135bb | ||
![]() |
d394c33a0c | ||
![]() |
0db4d9031b | ||
![]() |
d9f36232f1 | ||
![]() |
3b9cf7b7a1 | ||
![]() |
8c7492b99b | ||
![]() |
e90baeaaa8 | ||
![]() |
feeace2696 | ||
![]() |
caba859692 | ||
![]() |
a910ee8c0d | ||
![]() |
67a7e3c3eb | ||
![]() |
1a0eec9469 | ||
![]() |
80c9afae4e | ||
![]() |
20e58c056f | ||
![]() |
8dccdeb942 | ||
![]() |
9c4a934c71 | ||
![]() |
6a9aa3b9f4 | ||
![]() |
aa8f7f6208 | ||
![]() |
5a52d1f06c | ||
![]() |
f73bc28f03 | ||
![]() |
4174dafd32 | ||
![]() |
779f82ebdf | ||
![]() |
668d8dfda4 | ||
![]() |
fc2d288434 | ||
![]() |
2de3d5385c | ||
![]() |
d11deccff1 | ||
![]() |
f784201c63 | ||
![]() |
127de88a95 | ||
![]() |
09155de386 | ||
![]() |
18d4c4fc35 | ||
![]() |
c8835f5882 | ||
![]() |
42e3b2f12a | ||
![]() |
43466bf538 | ||
![]() |
94a068e316 | ||
![]() |
f0e1cb86d6 | ||
![]() |
703a822c97 | ||
![]() |
aaac857282 | ||
![]() |
b1b6489233 | ||
![]() |
c74b289174 | ||
![]() |
9cafa1775b | ||
![]() |
51148a0b1e | ||
![]() |
62963e6452 | ||
![]() |
e8eeee0b52 | ||
![]() |
b17ebd5c2c | ||
![]() |
2f874935b5 | ||
![]() |
919925519a | ||
![]() |
76609915d6 | ||
![]() |
40a2b110bf | ||
![]() |
91c67298f4 | ||
![]() |
2b31406a37 | ||
![]() |
46d7e573a9 | ||
![]() |
66389b2fd9 | ||
![]() |
652b774eb0 | ||
![]() |
89c650b83e | ||
![]() |
ffc8265e1f | ||
![]() |
4ef241a63b | ||
![]() |
37440cd93a | ||
![]() |
d399fe50da | ||
![]() |
ccf08921ee | ||
![]() |
dea876fd5e | ||
![]() |
28028b15b7 | ||
![]() |
d3f2e69cad | ||
![]() |
fb5b6f3eab | ||
![]() |
9f3733c5ed | ||
![]() |
6a619b2488 | ||
![]() |
674ae4947b | ||
![]() |
b0448d0ad1 | ||
![]() |
eb126116ca | ||
![]() |
5cacfe02da | ||
![]() |
d363818641 | ||
![]() |
391822f9ce | ||
![]() |
2009d8de41 | ||
![]() |
1bb680017b | ||
![]() |
70545af183 | ||
![]() |
33031f9835 | ||
![]() |
45b5556765 | ||
![]() |
a6c3192233 | ||
![]() |
75c695bea4 | ||
![]() |
8e057c3874 | ||
![]() |
19818d1449 | ||
![]() |
590187ff53 | ||
![]() |
0974ddfa17 | ||
![]() |
b5be65cf8a | ||
![]() |
3dc9d2de69 | ||
![]() |
62228d39f2 | ||
![]() |
83810759ee | ||
![]() |
db5bf33cfe | ||
![]() |
7ad5ad82e5 | ||
![]() |
6771869cc1 | ||
![]() |
addfae26cf | ||
![]() |
e932ec101e | ||
![]() |
cbf7bbefb7 | ||
![]() |
4e37c678dc | ||
![]() |
da11d22610 | ||
![]() |
6265b1103a | ||
![]() |
2134a2af48 | ||
![]() |
935368a62f | ||
![]() |
ffd52fbeeb | ||
![]() |
cc6fc6a540 | ||
![]() |
41b97b2454 | ||
![]() |
cd933e9d69 | ||
![]() |
ec4ffa924a | ||
![]() |
98ac310845 | ||
![]() |
1964cbdaad | ||
![]() |
e70c389918 | ||
![]() |
e3f2756cbb | ||
![]() |
efb49d8abe | ||
![]() |
102ab18146 | ||
![]() |
1b9df4ba4f | ||
![]() |
1be78aad72 | ||
![]() |
adbf59dd17 | ||
![]() |
5990728ec9 | ||
![]() |
0679c25ebb | ||
![]() |
c02d3a8717 | ||
![]() |
e0ddb88cb7 | ||
![]() |
a5350595fc | ||
![]() |
a3a0c7dbe7 | ||
![]() |
a304ed7c01 | ||
![]() |
2492083e37 | ||
![]() |
0bcbb1badd | ||
![]() |
56ab13f0e2 | ||
![]() |
27b8321f2a | ||
![]() |
2031aa4bec | ||
![]() |
70acf0f1df | ||
![]() |
90900fd017 | ||
![]() |
89ef8bf502 | ||
![]() |
162ff15378 | ||
![]() |
aec0ef6260 | ||
![]() |
44999809b0 | ||
![]() |
00f441eb93 | ||
![]() |
4e50ef5193 | ||
![]() |
5462d9d44d | ||
![]() |
9aa213b88e | ||
![]() |
262395abaf | ||
![]() |
0b965ec115 | ||
![]() |
43a92c8c1b | ||
![]() |
964162ce35 | ||
![]() |
4a1fa30a6f | ||
![]() |
397e9c9991 | ||
![]() |
c9078b189c | ||
![]() |
5876a963b8 | ||
![]() |
46951c103b | ||
![]() |
e898887f54 | ||
![]() |
bcd80bf976 | ||
![]() |
33737196b1 | ||
![]() |
a926803b92 | ||
![]() |
0be5b147d5 | ||
![]() |
f982c915f5 | ||
![]() |
d1e5e4533c | ||
![]() |
db4f1f64b6 | ||
![]() |
da0fd4267a | ||
![]() |
ae56a50b9d | ||
![]() |
aa273905d7 | ||
![]() |
732d9d7a5f | ||
![]() |
a5e3032d36 | ||
![]() |
008b38bfc7 | ||
![]() |
9fbe870d1c | ||
![]() |
c6648d59c6 | ||
![]() |
19a621ab98 | ||
![]() |
72fe4cdb79 | ||
![]() |
73b18b279d | ||
![]() |
4983a3c0ba | ||
![]() |
936ec6b69e | ||
![]() |
01bbaef7fa | ||
![]() |
9ab2e4e710 | ||
![]() |
79db26d316 | ||
![]() |
2febc83cc0 | ||
![]() |
b3675d867f | ||
![]() |
76504bfcac | ||
![]() |
7465ccd097 | ||
![]() |
974b4527e2 | ||
![]() |
65574209ad | ||
![]() |
1a0229b881 | ||
![]() |
b3d9b6d5f1 | ||
![]() |
da82aca849 | ||
![]() |
7f193b88ed | ||
![]() |
720bf707e8 | ||
![]() |
f2ea424cc1 | ||
![]() |
6e101ff757 | ||
![]() |
27811f3f8f | ||
![]() |
6391a45b43 | ||
![]() |
667797ce2e | ||
![]() |
dcd863e0c9 | ||
![]() |
faa08f6564 | ||
![]() |
868ca01a7a | ||
![]() |
b752b8cb96 | ||
![]() |
1e36930e0b | ||
![]() |
59243d200e | ||
![]() |
dd16eabe19 | ||
![]() |
5ddd59e167 | ||
![]() |
78e4098eae | ||
![]() |
8b9bae1ef1 | ||
![]() |
d530ba080d | ||
![]() |
0a32544585 | ||
![]() |
50d20e9b64 | ||
![]() |
3e6318a535 | ||
![]() |
3a6755363b | ||
![]() |
113c04bc60 | ||
![]() |
84c066297c | ||
![]() |
2d4209d9ef | ||
![]() |
32dad63696 | ||
![]() |
c606fdaa88 | ||
![]() |
6c44f3e584 | ||
![]() |
d2b6c75fa1 | ||
![]() |
1836c135bc | ||
![]() |
082c37b5a6 | ||
![]() |
521647436d | ||
![]() |
3c9fe358cc | ||
![]() |
45c9383e94 | ||
![]() |
a3e87f5a03 | ||
![]() |
1d746a2c02 | ||
![]() |
fb378fe543 | ||
![]() |
4ee9264b00 | ||
![]() |
a61272c7ff | ||
![]() |
8038f96a53 | ||
![]() |
2feb4160f1 | ||
![]() |
da409e0a62 | ||
![]() |
c804763bdf | ||
![]() |
da12adb1f9 | ||
![]() |
20753e1b53 | ||
![]() |
8ecbb5f152 | ||
![]() |
3547cfb63e | ||
![]() |
98cb8ff86b | ||
![]() |
da3612fade | ||
![]() |
e8c70ec252 | ||
![]() |
b7ac1fc8aa | ||
![]() |
0a852e1927 | ||
![]() |
045fc7750c | ||
![]() |
b862c872e0 | ||
![]() |
72e1181830 | ||
![]() |
d87c9771e4 | ||
![]() |
a4f08cbbbb | ||
![]() |
2f05d211c4 | ||
![]() |
610dd9b8f3 | ||
![]() |
3f52a94624 | ||
![]() |
ffe1dd4369 | ||
![]() |
0a916665ae | ||
![]() |
6950c7e4ef | ||
![]() |
93e9e8b6ef | ||
![]() |
79ed296f2d | ||
![]() |
0415304ca4 | ||
![]() |
d1a5855e74 | ||
![]() |
bd8c677eab | ||
![]() |
c3b7862be5 | ||
![]() |
ec1e78a4df | ||
![]() |
1de4974eeb | ||
![]() |
e97132bb64 | ||
![]() |
dc0de0db10 | ||
![]() |
ff485aa320 | ||
![]() |
8f27262d42 | ||
![]() |
2801fbf03c | ||
![]() |
4c0a8bc054 | ||
![]() |
e9152ee951 | ||
![]() |
baacc3f2de | ||
![]() |
631b09b2eb | ||
![]() |
8f58b0bf60 | ||
![]() |
bfca4f272d | ||
![]() |
59d9e62307 | ||
![]() |
ea3047b2c6 | ||
![]() |
0aab7980fc | ||
![]() |
2443b3f8d0 | ||
![]() |
0a005b092c | ||
![]() |
17c82d4a73 | ||
![]() |
8dc1b5abf7 | ||
![]() |
a637e7f490 | ||
![]() |
7549722640 | ||
![]() |
81b40e1421 | ||
![]() |
a480089bc9 | ||
![]() |
e1220b02ad | ||
![]() |
849c2deeb8 | ||
![]() |
03412aaa5b | ||
![]() |
a47b71a9ce | ||
![]() |
12a141a727 | ||
![]() |
4963c05f40 | ||
![]() |
f9843eec16 | ||
![]() |
4fa99a164d | ||
![]() |
f629a56c47 | ||
![]() |
a0ec3a9e14 | ||
![]() |
05d22be1cf | ||
![]() |
1c395ad195 | ||
![]() |
8caf3f9f57 | ||
![]() |
98c4056eaa | ||
![]() |
aae596ee18 | ||
![]() |
e1d92bce57 | ||
![]() |
69b8b4b02f | ||
![]() |
b43845aa07 | ||
![]() |
42e5be0f87 | ||
![]() |
19dfc86198 | ||
![]() |
72478171cf | ||
![]() |
1198253b20 | ||
![]() |
a62a9f098d | ||
![]() |
ab8e84498d | ||
![]() |
5985905eb8 | ||
![]() |
fa487e5352 | ||
![]() |
3f325047dc | ||
![]() |
96b7812b6a | ||
![]() |
eb44bdb842 | ||
![]() |
deb237a288 | ||
![]() |
00ce0e38bd | ||
![]() |
396806b211 | ||
![]() |
b5b0b87eef | ||
![]() |
c7275ede6d | ||
![]() |
ce9b933556 | ||
![]() |
1c81ffdefc | ||
![]() |
077c1a9979 | ||
![]() |
16170ef91d | ||
![]() |
fcccc3dfa5 | ||
![]() |
2ecdbe9a96 | ||
![]() |
6e3f844f9a | ||
![]() |
c052040028 | ||
![]() |
c1689192d9 | ||
![]() |
0e9aa78bf4 | ||
![]() |
625e2fd95f | ||
![]() |
1134a14166 | ||
![]() |
c4f2bb70da | ||
![]() |
3b2ade778f | ||
![]() |
7e2486e800 | ||
![]() |
cd25c36d21 | ||
![]() |
88f3ccb96c | ||
![]() |
68ec9356ec | ||
![]() |
55e7bef4d2 | ||
![]() |
da2002eadb |
84
Cargo.toml
@ -1,5 +1,5 @@
|
|||||||
[workspace.package]
|
[workspace.package]
|
||||||
version = "3.2.7"
|
version = "3.4.1"
|
||||||
authors = [
|
authors = [
|
||||||
"Dietmar Maurer <dietmar@proxmox.com>",
|
"Dietmar Maurer <dietmar@proxmox.com>",
|
||||||
"Dominik Csapak <d.csapak@proxmox.com>",
|
"Dominik Csapak <d.csapak@proxmox.com>",
|
||||||
@ -13,6 +13,7 @@ authors = [
|
|||||||
edition = "2021"
|
edition = "2021"
|
||||||
license = "AGPL-3"
|
license = "AGPL-3"
|
||||||
repository = "https://git.proxmox.com/?p=proxmox-backup.git"
|
repository = "https://git.proxmox.com/?p=proxmox-backup.git"
|
||||||
|
rust-version = "1.81"
|
||||||
|
|
||||||
[package]
|
[package]
|
||||||
name = "proxmox-backup"
|
name = "proxmox-backup"
|
||||||
@ -28,7 +29,6 @@ exclude = [ "build", "debian", "tests/catar_data/test_symlink/symlink1"]
|
|||||||
|
|
||||||
[workspace]
|
[workspace]
|
||||||
members = [
|
members = [
|
||||||
"pbs-api-types",
|
|
||||||
"pbs-buildcfg",
|
"pbs-buildcfg",
|
||||||
"pbs-client",
|
"pbs-client",
|
||||||
"pbs-config",
|
"pbs-config",
|
||||||
@ -53,43 +53,51 @@ path = "src/lib.rs"
|
|||||||
|
|
||||||
[workspace.dependencies]
|
[workspace.dependencies]
|
||||||
# proxmox workspace
|
# proxmox workspace
|
||||||
proxmox-apt = "0.10.5"
|
proxmox-apt = { version = "0.11", features = [ "cache" ] }
|
||||||
|
proxmox-apt-api-types = "1.0.1"
|
||||||
proxmox-async = "0.4"
|
proxmox-async = "0.4"
|
||||||
proxmox-auth-api = "0.4"
|
proxmox-auth-api = "0.4"
|
||||||
proxmox-borrow = "1"
|
proxmox-borrow = "1"
|
||||||
proxmox-compression = "0.2"
|
proxmox-compression = "0.2"
|
||||||
|
proxmox-config-digest = "0.1.0"
|
||||||
|
proxmox-daemon = "0.1.0"
|
||||||
proxmox-fuse = "0.1.3"
|
proxmox-fuse = "0.1.3"
|
||||||
proxmox-http = { version = "0.9.0", features = [ "client", "http-helpers", "websocket" ] } # see below
|
proxmox-http = { version = "0.9.5", features = [ "client", "http-helpers", "websocket" ] } # see below
|
||||||
proxmox-human-byte = "0.1"
|
proxmox-human-byte = "0.1"
|
||||||
proxmox-io = "1.0.1" # tools and client use "tokio" feature
|
proxmox-io = "1.0.1" # tools and client use "tokio" feature
|
||||||
proxmox-lang = "1.1"
|
proxmox-lang = "1.1"
|
||||||
|
proxmox-log = "0.2.6"
|
||||||
proxmox-ldap = "0.2.1"
|
proxmox-ldap = "0.2.1"
|
||||||
proxmox-metrics = "0.3.1"
|
proxmox-metrics = "0.3.1"
|
||||||
proxmox-notify = "0.4"
|
proxmox-notify = "0.5.1"
|
||||||
proxmox-openid = "0.10.0"
|
proxmox-openid = "0.10.0"
|
||||||
proxmox-rest-server = { version = "0.5.1", features = [ "templates" ] }
|
proxmox-rest-server = { version = "0.8.9", features = [ "templates" ] }
|
||||||
# some use "cli", some use "cli" and "server", pbs-config uses nothing
|
# some use "cli", some use "cli" and "server", pbs-config uses nothing
|
||||||
proxmox-router = { version = "2.0.0", default-features = false }
|
proxmox-router = { version = "3.0.0", default-features = false }
|
||||||
proxmox-rrd = { version = "0.2" }
|
proxmox-rrd = "0.4"
|
||||||
|
proxmox-rrd-api-types = "1.0.2"
|
||||||
# everything but pbs-config and pbs-client use "api-macro"
|
# everything but pbs-config and pbs-client use "api-macro"
|
||||||
proxmox-schema = "3"
|
proxmox-schema = "4"
|
||||||
proxmox-section-config = "2"
|
proxmox-section-config = "2"
|
||||||
proxmox-serde = "0.1.1"
|
proxmox-serde = "0.1.1"
|
||||||
|
proxmox-shared-cache = "0.1"
|
||||||
proxmox-shared-memory = "0.3.0"
|
proxmox-shared-memory = "0.3.0"
|
||||||
proxmox-sortable-macro = "0.1.2"
|
proxmox-sortable-macro = "0.1.2"
|
||||||
proxmox-subscription = { version = "0.4.2", features = [ "api-types" ] }
|
proxmox-subscription = { version = "0.5.0", features = [ "api-types" ] }
|
||||||
proxmox-sys = "0.5.7"
|
proxmox-sys = "0.6.7"
|
||||||
proxmox-tfa = { version = "4.0.4", features = [ "api", "api-types" ] }
|
proxmox-systemd = "0.1"
|
||||||
|
proxmox-tfa = { version = "5", features = [ "api", "api-types" ] }
|
||||||
proxmox-time = "2"
|
proxmox-time = "2"
|
||||||
proxmox-uuid = "1"
|
proxmox-uuid = { version = "1", features = [ "serde" ] }
|
||||||
|
proxmox-worker-task = "0.1"
|
||||||
|
pbs-api-types = "0.2.2"
|
||||||
|
|
||||||
# other proxmox crates
|
# other proxmox crates
|
||||||
pathpatterns = "0.3"
|
pathpatterns = "0.3"
|
||||||
proxmox-acme = "0.5"
|
proxmox-acme = "0.5.3"
|
||||||
pxar = "0.12"
|
pxar = "0.12.1"
|
||||||
|
|
||||||
# PBS workspace
|
# PBS workspace
|
||||||
pbs-api-types = { path = "pbs-api-types" }
|
|
||||||
pbs-buildcfg = { path = "pbs-buildcfg" }
|
pbs-buildcfg = { path = "pbs-buildcfg" }
|
||||||
pbs-client = { path = "pbs-client" }
|
pbs-client = { path = "pbs-client" }
|
||||||
pbs-config = { path = "pbs-config" }
|
pbs-config = { path = "pbs-config" }
|
||||||
@ -112,16 +120,15 @@ crc32fast = "1"
|
|||||||
const_format = "0.2"
|
const_format = "0.2"
|
||||||
crossbeam-channel = "0.5"
|
crossbeam-channel = "0.5"
|
||||||
endian_trait = { version = "0.6", features = ["arrays"] }
|
endian_trait = { version = "0.6", features = ["arrays"] }
|
||||||
env_logger = "0.10"
|
env_logger = "0.11"
|
||||||
flate2 = "1.0"
|
flate2 = "1.0"
|
||||||
foreign-types = "0.3"
|
foreign-types = "0.3"
|
||||||
futures = "0.3"
|
futures = "0.3"
|
||||||
h2 = { version = "0.3", features = [ "stream" ] }
|
h2 = { version = "0.4", features = [ "legacy", "stream" ] }
|
||||||
handlebars = "3.0"
|
handlebars = "3.0"
|
||||||
hex = "0.4.3"
|
hex = "0.4.3"
|
||||||
http = "0.2"
|
hickory-resolver = { version = "0.24.1", default-features = false, features = [ "system-config", "tokio-runtime" ] }
|
||||||
hyper = { version = "0.14", features = [ "full" ] }
|
hyper = { version = "0.14", features = [ "backports", "deprecated", "full" ] }
|
||||||
lazy_static = "1.4"
|
|
||||||
libc = "0.2"
|
libc = "0.2"
|
||||||
log = "0.4.17"
|
log = "0.4.17"
|
||||||
nix = "0.26.1"
|
nix = "0.26.1"
|
||||||
@ -135,7 +142,6 @@ regex = "1.5.5"
|
|||||||
rustyline = "9"
|
rustyline = "9"
|
||||||
serde = { version = "1.0", features = ["derive"] }
|
serde = { version = "1.0", features = ["derive"] }
|
||||||
serde_json = "1.0"
|
serde_json = "1.0"
|
||||||
serde_plain = "1"
|
|
||||||
siphasher = "0.3"
|
siphasher = "0.3"
|
||||||
syslog = "6"
|
syslog = "6"
|
||||||
tar = "0.4"
|
tar = "0.4"
|
||||||
@ -145,33 +151,29 @@ tokio = "1.6"
|
|||||||
tokio-openssl = "0.6.1"
|
tokio-openssl = "0.6.1"
|
||||||
tokio-stream = "0.1.0"
|
tokio-stream = "0.1.0"
|
||||||
tokio-util = { version = "0.7", features = [ "io" ] }
|
tokio-util = { version = "0.7", features = [ "io" ] }
|
||||||
|
tracing = "0.1"
|
||||||
tower-service = "0.3.0"
|
tower-service = "0.3.0"
|
||||||
udev = "0.4"
|
udev = "0.4"
|
||||||
url = "2.1"
|
url = "2.1"
|
||||||
walkdir = "2"
|
walkdir = "2"
|
||||||
xdg = "2.2"
|
xdg = "2.2"
|
||||||
zstd = { version = "0.12", features = [ "bindgen" ] }
|
zstd = { version = "0.12", features = [ "bindgen" ] }
|
||||||
|
zstd-safe = "6.0"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
anyhow.workspace = true
|
anyhow.workspace = true
|
||||||
async-trait.workspace = true
|
async-trait.workspace = true
|
||||||
apt-pkg-native.workspace = true
|
|
||||||
base64.workspace = true
|
base64.workspace = true
|
||||||
bitflags.workspace = true
|
|
||||||
bytes.workspace = true
|
bytes.workspace = true
|
||||||
cidr.workspace = true
|
cidr.workspace = true
|
||||||
const_format.workspace = true
|
const_format.workspace = true
|
||||||
crc32fast.workspace = true
|
crc32fast.workspace = true
|
||||||
crossbeam-channel.workspace = true
|
crossbeam-channel.workspace = true
|
||||||
endian_trait.workspace = true
|
endian_trait.workspace = true
|
||||||
flate2.workspace = true
|
|
||||||
futures.workspace = true
|
futures.workspace = true
|
||||||
h2.workspace = true
|
h2.workspace = true
|
||||||
handlebars.workspace = true
|
|
||||||
hex.workspace = true
|
hex.workspace = true
|
||||||
http.workspace = true
|
|
||||||
hyper.workspace = true
|
hyper.workspace = true
|
||||||
lazy_static.workspace = true
|
|
||||||
libc.workspace = true
|
libc.workspace = true
|
||||||
log.workspace = true
|
log.workspace = true
|
||||||
nix.workspace = true
|
nix.workspace = true
|
||||||
@ -184,7 +186,6 @@ regex.workspace = true
|
|||||||
rustyline.workspace = true
|
rustyline.workspace = true
|
||||||
serde.workspace = true
|
serde.workspace = true
|
||||||
serde_json.workspace = true
|
serde_json.workspace = true
|
||||||
siphasher.workspace = true
|
|
||||||
syslog.workspace = true
|
syslog.workspace = true
|
||||||
termcolor.workspace = true
|
termcolor.workspace = true
|
||||||
thiserror.workspace = true
|
thiserror.workspace = true
|
||||||
@ -192,24 +193,27 @@ tokio = { workspace = true, features = [ "fs", "io-util", "io-std", "macros", "n
|
|||||||
tokio-openssl.workspace = true
|
tokio-openssl.workspace = true
|
||||||
tokio-stream.workspace = true
|
tokio-stream.workspace = true
|
||||||
tokio-util = { workspace = true, features = [ "codec" ] }
|
tokio-util = { workspace = true, features = [ "codec" ] }
|
||||||
tower-service.workspace = true
|
tracing.workspace = true
|
||||||
udev.workspace = true
|
udev.workspace = true
|
||||||
url.workspace = true
|
url.workspace = true
|
||||||
walkdir.workspace = true
|
walkdir.workspace = true
|
||||||
xdg.workspace = true
|
|
||||||
zstd.workspace = true
|
zstd.workspace = true
|
||||||
|
|
||||||
#valgrind_request = { git = "https://github.com/edef1c/libvalgrind_request", version = "1.1.0", optional = true }
|
#valgrind_request = { git = "https://github.com/edef1c/libvalgrind_request", version = "1.1.0", optional = true }
|
||||||
|
|
||||||
# proxmox workspace
|
# proxmox workspace
|
||||||
proxmox-apt.workspace = true
|
proxmox-apt.workspace = true
|
||||||
|
proxmox-apt-api-types.workspace = true
|
||||||
proxmox-async.workspace = true
|
proxmox-async.workspace = true
|
||||||
proxmox-auth-api = { workspace = true, features = [ "api", "pam-authenticator" ] }
|
proxmox-auth-api = { workspace = true, features = [ "api", "pam-authenticator" ] }
|
||||||
proxmox-compression.workspace = true
|
proxmox-compression.workspace = true
|
||||||
|
proxmox-config-digest.workspace = true
|
||||||
|
proxmox-daemon.workspace = true
|
||||||
proxmox-http = { workspace = true, features = [ "client-trait", "proxmox-async", "rate-limited-stream" ] } # pbs-client doesn't use these
|
proxmox-http = { workspace = true, features = [ "client-trait", "proxmox-async", "rate-limited-stream" ] } # pbs-client doesn't use these
|
||||||
proxmox-human-byte.workspace = true
|
proxmox-human-byte.workspace = true
|
||||||
proxmox-io.workspace = true
|
proxmox-io.workspace = true
|
||||||
proxmox-lang.workspace = true
|
proxmox-lang.workspace = true
|
||||||
|
proxmox-log.workspace = true
|
||||||
proxmox-ldap.workspace = true
|
proxmox-ldap.workspace = true
|
||||||
proxmox-metrics.workspace = true
|
proxmox-metrics.workspace = true
|
||||||
proxmox-notify = { workspace = true, features = [ "pbs-context" ] }
|
proxmox-notify = { workspace = true, features = [ "pbs-context" ] }
|
||||||
@ -219,21 +223,23 @@ proxmox-router = { workspace = true, features = [ "cli", "server"] }
|
|||||||
proxmox-schema = { workspace = true, features = [ "api-macro" ] }
|
proxmox-schema = { workspace = true, features = [ "api-macro" ] }
|
||||||
proxmox-section-config.workspace = true
|
proxmox-section-config.workspace = true
|
||||||
proxmox-serde = { workspace = true, features = [ "serde_json" ] }
|
proxmox-serde = { workspace = true, features = [ "serde_json" ] }
|
||||||
|
proxmox-shared-cache.workspace = true
|
||||||
proxmox-shared-memory.workspace = true
|
proxmox-shared-memory.workspace = true
|
||||||
proxmox-sortable-macro.workspace = true
|
proxmox-sortable-macro.workspace = true
|
||||||
proxmox-subscription.workspace = true
|
proxmox-subscription.workspace = true
|
||||||
proxmox-sys = { workspace = true, features = [ "timer" ] }
|
proxmox-sys = { workspace = true, features = [ "timer" ] }
|
||||||
|
proxmox-systemd.workspace = true
|
||||||
proxmox-tfa.workspace = true
|
proxmox-tfa.workspace = true
|
||||||
proxmox-time.workspace = true
|
proxmox-time.workspace = true
|
||||||
proxmox-uuid.workspace = true
|
proxmox-uuid.workspace = true
|
||||||
|
proxmox-worker-task.workspace = true
|
||||||
|
pbs-api-types.workspace = true
|
||||||
|
|
||||||
# in their respective repo
|
# in their respective repo
|
||||||
pathpatterns.workspace = true
|
|
||||||
proxmox-acme.workspace = true
|
proxmox-acme.workspace = true
|
||||||
pxar.workspace = true
|
pxar.workspace = true
|
||||||
|
|
||||||
# proxmox-backup workspace/internal crates
|
# proxmox-backup workspace/internal crates
|
||||||
pbs-api-types.workspace = true
|
|
||||||
pbs-buildcfg.workspace = true
|
pbs-buildcfg.workspace = true
|
||||||
pbs-client.workspace = true
|
pbs-client.workspace = true
|
||||||
pbs-config.workspace = true
|
pbs-config.workspace = true
|
||||||
@ -242,21 +248,27 @@ pbs-key-config.workspace = true
|
|||||||
pbs-tape.workspace = true
|
pbs-tape.workspace = true
|
||||||
pbs-tools.workspace = true
|
pbs-tools.workspace = true
|
||||||
proxmox-rrd.workspace = true
|
proxmox-rrd.workspace = true
|
||||||
|
proxmox-rrd-api-types.workspace = true
|
||||||
|
|
||||||
# Local path overrides
|
# Local path overrides
|
||||||
# NOTE: You must run `cargo update` after changing this for it to take effect!
|
# NOTE: You must run `cargo update` after changing this for it to take effect!
|
||||||
[patch.crates-io]
|
[patch.crates-io]
|
||||||
|
#pbs-api-types = { path = "../proxmox/pbs-api-types" }
|
||||||
|
#proxmox-acme = { path = "../proxmox/proxmox-acme" }
|
||||||
#proxmox-apt = { path = "../proxmox/proxmox-apt" }
|
#proxmox-apt = { path = "../proxmox/proxmox-apt" }
|
||||||
|
#proxmox-apt-api-types = { path = "../proxmox/proxmox-apt-api-types" }
|
||||||
#proxmox-async = { path = "../proxmox/proxmox-async" }
|
#proxmox-async = { path = "../proxmox/proxmox-async" }
|
||||||
#proxmox-auth-api = { path = "../proxmox/proxmox-auth-api" }
|
#proxmox-auth-api = { path = "../proxmox/proxmox-auth-api" }
|
||||||
#proxmox-borrow = { path = "../proxmox/proxmox-borrow" }
|
#proxmox-borrow = { path = "../proxmox/proxmox-borrow" }
|
||||||
#proxmox-compression = { path = "../proxmox/proxmox-compression" }
|
#proxmox-compression = { path = "../proxmox/proxmox-compression" }
|
||||||
|
#proxmox-config-digest = { path = "../proxmox/proxmox-config-digest" }
|
||||||
|
#proxmox-daemon = { path = "../proxmox/proxmox-daemon" }
|
||||||
#proxmox-fuse = { path = "../proxmox-fuse" }
|
#proxmox-fuse = { path = "../proxmox-fuse" }
|
||||||
#proxmox-http = { path = "../proxmox/proxmox-http" }
|
#proxmox-http = { path = "../proxmox/proxmox-http" }
|
||||||
#proxmox-human-byte = { path = "../proxmox/proxmox-human-byte" }
|
#proxmox-human-byte = { path = "../proxmox/proxmox-human-byte" }
|
||||||
#proxmox-io = { path = "../proxmox/proxmox-io" }
|
#proxmox-io = { path = "../proxmox/proxmox-io" }
|
||||||
#proxmox-lang = { path = "../proxmox/proxmox-lang" }
|
#proxmox-lang = { path = "../proxmox/proxmox-lang" }
|
||||||
|
#proxmox-log = { path = "../proxmox/proxmox-log" }
|
||||||
#proxmox-ldap = { path = "../proxmox/proxmox-ldap" }
|
#proxmox-ldap = { path = "../proxmox/proxmox-ldap" }
|
||||||
#proxmox-metrics = { path = "../proxmox/proxmox-metrics" }
|
#proxmox-metrics = { path = "../proxmox/proxmox-metrics" }
|
||||||
#proxmox-notify = { path = "../proxmox/proxmox-notify" }
|
#proxmox-notify = { path = "../proxmox/proxmox-notify" }
|
||||||
@ -264,6 +276,7 @@ proxmox-rrd.workspace = true
|
|||||||
#proxmox-rest-server = { path = "../proxmox/proxmox-rest-server" }
|
#proxmox-rest-server = { path = "../proxmox/proxmox-rest-server" }
|
||||||
#proxmox-router = { path = "../proxmox/proxmox-router" }
|
#proxmox-router = { path = "../proxmox/proxmox-router" }
|
||||||
#proxmox-rrd = { path = "../proxmox/proxmox-rrd" }
|
#proxmox-rrd = { path = "../proxmox/proxmox-rrd" }
|
||||||
|
#proxmox-rrd-api-types = { path = "../proxmox/proxmox-rrd-api-types" }
|
||||||
#proxmox-schema = { path = "../proxmox/proxmox-schema" }
|
#proxmox-schema = { path = "../proxmox/proxmox-schema" }
|
||||||
#proxmox-section-config = { path = "../proxmox/proxmox-section-config" }
|
#proxmox-section-config = { path = "../proxmox/proxmox-section-config" }
|
||||||
#proxmox-serde = { path = "../proxmox/proxmox-serde" }
|
#proxmox-serde = { path = "../proxmox/proxmox-serde" }
|
||||||
@ -271,11 +284,12 @@ proxmox-rrd.workspace = true
|
|||||||
#proxmox-sortable-macro = { path = "../proxmox/proxmox-sortable-macro" }
|
#proxmox-sortable-macro = { path = "../proxmox/proxmox-sortable-macro" }
|
||||||
#proxmox-subscription = { path = "../proxmox/proxmox-subscription" }
|
#proxmox-subscription = { path = "../proxmox/proxmox-subscription" }
|
||||||
#proxmox-sys = { path = "../proxmox/proxmox-sys" }
|
#proxmox-sys = { path = "../proxmox/proxmox-sys" }
|
||||||
|
#proxmox-systemd = { path = "../proxmox/proxmox-systemd" }
|
||||||
#proxmox-tfa = { path = "../proxmox/proxmox-tfa" }
|
#proxmox-tfa = { path = "../proxmox/proxmox-tfa" }
|
||||||
#proxmox-time = { path = "../proxmox/proxmox-time" }
|
#proxmox-time = { path = "../proxmox/proxmox-time" }
|
||||||
#proxmox-uuid = { path = "../proxmox/proxmox-uuid" }
|
#proxmox-uuid = { path = "../proxmox/proxmox-uuid" }
|
||||||
|
#proxmox-worker-task = { path = "../proxmox/proxmox-worker-task" }
|
||||||
|
|
||||||
#proxmox-acme = { path = "../proxmox/proxmox-acme" }
|
|
||||||
#pathpatterns = {path = "../pathpatterns" }
|
#pathpatterns = {path = "../pathpatterns" }
|
||||||
#pxar = { path = "../pxar" }
|
#pxar = { path = "../pxar" }
|
||||||
|
|
||||||
|
42
Makefile
@ -1,8 +1,10 @@
|
|||||||
include /usr/share/dpkg/default.mk
|
include /usr/share/dpkg/default.mk
|
||||||
|
include /usr/share/rustc/architecture.mk
|
||||||
include defines.mk
|
include defines.mk
|
||||||
|
|
||||||
PACKAGE := proxmox-backup
|
PACKAGE := proxmox-backup
|
||||||
ARCH := $(DEB_BUILD_ARCH)
|
ARCH := $(DEB_BUILD_ARCH)
|
||||||
|
export DEB_HOST_RUST_TYPE
|
||||||
|
|
||||||
SUBDIRS := etc www docs templates
|
SUBDIRS := etc www docs templates
|
||||||
|
|
||||||
@ -36,13 +38,20 @@ SUBCRATES != cargo metadata --no-deps --format-version=1 \
|
|||||||
| grep "$$PWD/" \
|
| grep "$$PWD/" \
|
||||||
| sed -e "s!.*$$PWD/!!g" -e 's/\#.*$$//g' -e 's/)$$//g'
|
| sed -e "s!.*$$PWD/!!g" -e 's/\#.*$$//g' -e 's/)$$//g'
|
||||||
|
|
||||||
|
STATIC_TARGET_DIR := target/static-build
|
||||||
|
|
||||||
ifeq ($(BUILD_MODE), release)
|
ifeq ($(BUILD_MODE), release)
|
||||||
CARGO_BUILD_ARGS += --release
|
CARGO_BUILD_ARGS += --release --target $(DEB_HOST_RUST_TYPE)
|
||||||
COMPILEDIR := target/$(DEB_HOST_RUST_TYPE)/release
|
COMPILEDIR := target/$(DEB_HOST_RUST_TYPE)/release
|
||||||
|
STATIC_COMPILEDIR := $(STATIC_TARGET_DIR)/$(DEB_HOST_RUST_TYPE)/release
|
||||||
else
|
else
|
||||||
|
CARGO_BUILD_ARGS += --target $(DEB_HOST_RUST_TYPE)
|
||||||
COMPILEDIR := target/$(DEB_HOST_RUST_TYPE)/debug
|
COMPILEDIR := target/$(DEB_HOST_RUST_TYPE)/debug
|
||||||
|
STATIC_COMPILEDIR := $(STATIC_TARGET_DIR)/$(DEB_HOST_RUST_TYPE)/debug
|
||||||
endif
|
endif
|
||||||
|
|
||||||
|
STATIC_RUSTC_FLAGS := -C target-feature=+crt-static -L $(STATIC_COMPILEDIR)/deps-stubs/
|
||||||
|
|
||||||
ifeq ($(valgrind), yes)
|
ifeq ($(valgrind), yes)
|
||||||
CARGO_BUILD_ARGS += --features valgrind
|
CARGO_BUILD_ARGS += --features valgrind
|
||||||
endif
|
endif
|
||||||
@ -52,6 +61,9 @@ CARGO ?= cargo
|
|||||||
COMPILED_BINS := \
|
COMPILED_BINS := \
|
||||||
$(addprefix $(COMPILEDIR)/,$(USR_BIN) $(USR_SBIN) $(SERVICE_BIN) $(RESTORE_BIN))
|
$(addprefix $(COMPILEDIR)/,$(USR_BIN) $(USR_SBIN) $(SERVICE_BIN) $(RESTORE_BIN))
|
||||||
|
|
||||||
|
STATIC_BINS := \
|
||||||
|
$(addprefix $(STATIC_COMPILEDIR)/,proxmox-backup-client-static pxar-static)
|
||||||
|
|
||||||
export DEB_VERSION DEB_VERSION_UPSTREAM
|
export DEB_VERSION DEB_VERSION_UPSTREAM
|
||||||
|
|
||||||
SERVER_DEB=$(PACKAGE)-server_$(DEB_VERSION)_$(ARCH).deb
|
SERVER_DEB=$(PACKAGE)-server_$(DEB_VERSION)_$(ARCH).deb
|
||||||
@ -60,10 +72,12 @@ CLIENT_DEB=$(PACKAGE)-client_$(DEB_VERSION)_$(ARCH).deb
|
|||||||
CLIENT_DBG_DEB=$(PACKAGE)-client-dbgsym_$(DEB_VERSION)_$(ARCH).deb
|
CLIENT_DBG_DEB=$(PACKAGE)-client-dbgsym_$(DEB_VERSION)_$(ARCH).deb
|
||||||
RESTORE_DEB=proxmox-backup-file-restore_$(DEB_VERSION)_$(ARCH).deb
|
RESTORE_DEB=proxmox-backup-file-restore_$(DEB_VERSION)_$(ARCH).deb
|
||||||
RESTORE_DBG_DEB=proxmox-backup-file-restore-dbgsym_$(DEB_VERSION)_$(ARCH).deb
|
RESTORE_DBG_DEB=proxmox-backup-file-restore-dbgsym_$(DEB_VERSION)_$(ARCH).deb
|
||||||
|
STATIC_CLIENT_DEB=$(PACKAGE)-client-static_$(DEB_VERSION)_$(ARCH).deb
|
||||||
|
STATIC_CLIENT_DBG_DEB=$(PACKAGE)-client-static-dbgsym_$(DEB_VERSION)_$(ARCH).deb
|
||||||
DOC_DEB=$(PACKAGE)-docs_$(DEB_VERSION)_all.deb
|
DOC_DEB=$(PACKAGE)-docs_$(DEB_VERSION)_all.deb
|
||||||
|
|
||||||
DEBS=$(SERVER_DEB) $(SERVER_DBG_DEB) $(CLIENT_DEB) $(CLIENT_DBG_DEB) \
|
DEBS=$(SERVER_DEB) $(SERVER_DBG_DEB) $(CLIENT_DEB) $(CLIENT_DBG_DEB) \
|
||||||
$(RESTORE_DEB) $(RESTORE_DBG_DEB)
|
$(RESTORE_DEB) $(RESTORE_DBG_DEB) $(STATIC_CLIENT_DEB) $(STATIC_CLIENT_DBG_DEB)
|
||||||
|
|
||||||
DSC = rust-$(PACKAGE)_$(DEB_VERSION).dsc
|
DSC = rust-$(PACKAGE)_$(DEB_VERSION).dsc
|
||||||
|
|
||||||
@ -71,7 +85,7 @@ DESTDIR=
|
|||||||
|
|
||||||
tests ?= --workspace
|
tests ?= --workspace
|
||||||
|
|
||||||
all: $(SUBDIRS)
|
all: proxmox-backup-client-static $(SUBDIRS)
|
||||||
|
|
||||||
.PHONY: $(SUBDIRS)
|
.PHONY: $(SUBDIRS)
|
||||||
$(SUBDIRS):
|
$(SUBDIRS):
|
||||||
@ -141,7 +155,7 @@ clean: clean-deb
|
|||||||
$(foreach i,$(SUBDIRS), \
|
$(foreach i,$(SUBDIRS), \
|
||||||
$(MAKE) -C $(i) clean ;)
|
$(MAKE) -C $(i) clean ;)
|
||||||
$(CARGO) clean
|
$(CARGO) clean
|
||||||
rm -f .do-cargo-build
|
rm -f .do-cargo-build .do-static-cargo-build
|
||||||
|
|
||||||
# allows one to avoid running cargo clean when one just wants to tidy up after a package build
|
# allows one to avoid running cargo clean when one just wants to tidy up after a package build
|
||||||
clean-deb:
|
clean-deb:
|
||||||
@ -190,12 +204,25 @@ $(COMPILED_BINS) $(COMPILEDIR)/dump-catalog-shell-cli $(COMPILEDIR)/docgen: .do-
|
|||||||
--bin sg-tape-cmd
|
--bin sg-tape-cmd
|
||||||
touch "$@"
|
touch "$@"
|
||||||
|
|
||||||
|
.PHONY: proxmox-backup-client-static
|
||||||
|
proxmox-backup-client-static:
|
||||||
|
rm -f .do-static-cargo-build
|
||||||
|
$(MAKE) $(STATIC_BINS)
|
||||||
|
|
||||||
|
$(STATIC_BINS): .do-static-cargo-build
|
||||||
|
.do-static-cargo-build:
|
||||||
|
mkdir -p $(STATIC_COMPILEDIR)/deps-stubs/ && \
|
||||||
|
echo '!<arch>' > $(STATIC_COMPILEDIR)/deps-stubs/libsystemd.a # workaround for to greedy linkage and proxmox-systemd
|
||||||
|
$(CARGO) rustc $(CARGO_BUILD_ARGS) --package pxar-bin --bin pxar \
|
||||||
|
--target-dir $(STATIC_TARGET_DIR) -- $(STATIC_RUSTC_FLAGS)
|
||||||
|
$(CARGO) rustc $(CARGO_BUILD_ARGS) --package proxmox-backup-client --bin proxmox-backup-client \
|
||||||
|
--target-dir $(STATIC_TARGET_DIR) -- $(STATIC_RUSTC_FLAGS)
|
||||||
|
|
||||||
.PHONY: lint
|
.PHONY: lint
|
||||||
lint:
|
lint:
|
||||||
cargo clippy -- -A clippy::all -D clippy::correctness
|
cargo clippy -- -A clippy::all -D clippy::correctness
|
||||||
|
|
||||||
install: $(COMPILED_BINS)
|
install: $(COMPILED_BINS) $(STATIC_BINS)
|
||||||
install -dm755 $(DESTDIR)$(BINDIR)
|
install -dm755 $(DESTDIR)$(BINDIR)
|
||||||
install -dm755 $(DESTDIR)$(ZSH_COMPL_DEST)
|
install -dm755 $(DESTDIR)$(ZSH_COMPL_DEST)
|
||||||
$(foreach i,$(USR_BIN), \
|
$(foreach i,$(USR_BIN), \
|
||||||
@ -214,16 +241,19 @@ install: $(COMPILED_BINS)
|
|||||||
install -m4755 -o root -g root $(COMPILEDIR)/sg-tape-cmd $(DESTDIR)$(LIBEXECDIR)/proxmox-backup/sg-tape-cmd
|
install -m4755 -o root -g root $(COMPILEDIR)/sg-tape-cmd $(DESTDIR)$(LIBEXECDIR)/proxmox-backup/sg-tape-cmd
|
||||||
$(foreach i,$(SERVICE_BIN), \
|
$(foreach i,$(SERVICE_BIN), \
|
||||||
install -m755 $(COMPILEDIR)/$(i) $(DESTDIR)$(LIBEXECDIR)/proxmox-backup/ ;)
|
install -m755 $(COMPILEDIR)/$(i) $(DESTDIR)$(LIBEXECDIR)/proxmox-backup/ ;)
|
||||||
|
install -m755 $(STATIC_COMPILEDIR)/proxmox-backup-client $(DESTDIR)$(BINDIR)/proxmox-backup-client-static
|
||||||
|
install -m755 $(STATIC_COMPILEDIR)/pxar $(DESTDIR)$(BINDIR)/pxar-static
|
||||||
$(MAKE) -C www install
|
$(MAKE) -C www install
|
||||||
$(MAKE) -C docs install
|
$(MAKE) -C docs install
|
||||||
$(MAKE) -C templates install
|
$(MAKE) -C templates install
|
||||||
|
|
||||||
.PHONY: upload
|
.PHONY: upload
|
||||||
upload: UPLOAD_DIST ?= $(DEB_DISTRIBUTION)
|
upload: UPLOAD_DIST ?= $(DEB_DISTRIBUTION)
|
||||||
upload: $(SERVER_DEB) $(CLIENT_DEB) $(RESTORE_DEB) $(DOC_DEB)
|
upload: $(SERVER_DEB) $(CLIENT_DEB) $(RESTORE_DEB) $(DOC_DEB) $(STATIC_CLIENT_DEB)
|
||||||
# check if working directory is clean
|
# check if working directory is clean
|
||||||
git diff --exit-code --stat && git diff --exit-code --stat --staged
|
git diff --exit-code --stat && git diff --exit-code --stat --staged
|
||||||
tar cf - $(SERVER_DEB) $(SERVER_DBG_DEB) $(DOC_DEB) $(CLIENT_DEB) $(CLIENT_DBG_DEB) \
|
tar cf - $(SERVER_DEB) $(SERVER_DBG_DEB) $(DOC_DEB) $(CLIENT_DEB) $(CLIENT_DBG_DEB) \
|
||||||
| ssh -X repoman@repo.proxmox.com upload --product pbs --dist $(UPLOAD_DIST)
|
| ssh -X repoman@repo.proxmox.com upload --product pbs --dist $(UPLOAD_DIST)
|
||||||
tar cf - $(CLIENT_DEB) $(CLIENT_DBG_DEB) | ssh -X repoman@repo.proxmox.com upload --product "pve,pmg,pbs-client" --dist $(UPLOAD_DIST)
|
tar cf - $(CLIENT_DEB) $(CLIENT_DBG_DEB) | ssh -X repoman@repo.proxmox.com upload --product "pve,pmg,pbs-client" --dist $(UPLOAD_DIST)
|
||||||
|
tar cf - $(STATIC_CLIENT_DEB) $(STATIC_CLIENT_DBG_DEB) | ssh -X repoman@repo.proxmox.com upload --product "pbs-client" --dist $(UPLOAD_DIST)
|
||||||
tar cf - $(RESTORE_DEB) $(RESTORE_DBG_DEB) | ssh -X repoman@repo.proxmox.com upload --product "pve" --dist $(UPLOAD_DIST)
|
tar cf - $(RESTORE_DEB) $(RESTORE_DBG_DEB) | ssh -X repoman@repo.proxmox.com upload --product "pve" --dist $(UPLOAD_DIST)
|
||||||
|
@ -5,8 +5,11 @@ Build & Release Notes
|
|||||||
``rustup`` Toolchain
|
``rustup`` Toolchain
|
||||||
====================
|
====================
|
||||||
|
|
||||||
We normally want to build with the ``rustc`` Debian package. To do that
|
We normally want to build with the ``rustc`` Debian package (see below). If you
|
||||||
you can set the following ``rustup`` configuration:
|
still want to use ``rustup`` for other reasons (e.g. to easily switch between
|
||||||
|
the official stable, beta, and nightly compilers), you should set the following
|
||||||
|
``rustup`` configuration to use the Debian-provided ``rustc`` compiler
|
||||||
|
by default:
|
||||||
|
|
||||||
# rustup toolchain link system /usr
|
# rustup toolchain link system /usr
|
||||||
# rustup default system
|
# rustup default system
|
||||||
|
583
debian/changelog
vendored
@ -1,3 +1,586 @@
|
|||||||
|
rust-proxmox-backup (3.4.1-1) bookworm; urgency=medium
|
||||||
|
|
||||||
|
* ui: token view: fix typo in 'lose' and rephrase token regenerate dialog
|
||||||
|
message for more clarity.
|
||||||
|
|
||||||
|
* restrict consent-banner text length to 64 KiB.
|
||||||
|
|
||||||
|
* docs: describe the intend for the statically linked pbs client.
|
||||||
|
|
||||||
|
* api: backup: include previous snapshot name in log message.
|
||||||
|
|
||||||
|
* garbage collection: account for created/deleted index files concurrently
|
||||||
|
to GC to avoid potentially confusing log messages.
|
||||||
|
|
||||||
|
* garbage collection: fix rare race in chunk marking phase for setups doing
|
||||||
|
high frequent backups in quick succession while immediately pruning to a
|
||||||
|
single backup snapshot being left over after each such backup.
|
||||||
|
|
||||||
|
* tape: wait for calibration of LTO-9 tapes in general, not just in the
|
||||||
|
initial tape format procedure.
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Wed, 16 Apr 2025 14:45:37 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (3.4.0-1) bookworm; urgency=medium
|
||||||
|
|
||||||
|
* fix #4788: build statically linked version of the proxmox-backup-client
|
||||||
|
package.
|
||||||
|
|
||||||
|
* ui: sync job: change the rate limit direction based on sync direction.
|
||||||
|
|
||||||
|
* docs: mention how to set the push sync jobs rate limit
|
||||||
|
|
||||||
|
* ui: set error mask: ensure that message is html-encoded to avoid visual
|
||||||
|
glitches.
|
||||||
|
|
||||||
|
* api server: increase maximal request body size fro 64 kiB to 512 kiB,
|
||||||
|
similar to a recent change for our perl based projects.
|
||||||
|
|
||||||
|
* notifications: include Content-Length header for broader compatibility in
|
||||||
|
the webhook and gotify targets.
|
||||||
|
|
||||||
|
* notifications: allow overriding notification templates.
|
||||||
|
|
||||||
|
* docs: notifications: add section about how to use custom templates
|
||||||
|
|
||||||
|
* sync: print whole error chain per group on failure for more context.
|
||||||
|
|
||||||
|
* ui: options-view: fix typo in empty-text for GC tuning option.
|
||||||
|
|
||||||
|
* memory info: use the "MemAvailable" field from '/proc/meminfo' to compute
|
||||||
|
used memory to fix overestimation of that metric and to better align with
|
||||||
|
what modern versions of tools like `free` do and to future proof against
|
||||||
|
changes in how the kernel accounts memory usage for.
|
||||||
|
|
||||||
|
* add "MemAvailable" field to ProcFsMemInfo to promote its usage over the
|
||||||
|
existing "MemFree" field, which is almost never the right choice. This new
|
||||||
|
field will be provided for external metric server.
|
||||||
|
|
||||||
|
* docs: mention different name resolution for statically linked binary.
|
||||||
|
|
||||||
|
* docs: add basic info for how to install the statically linked client.
|
||||||
|
|
||||||
|
* docs: mention new verify-only and encrypted-only flags for sync jobs.
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Wed, 09 Apr 2025 17:41:38 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (3.3.7-1) bookworm; urgency=medium
|
||||||
|
|
||||||
|
* fix #5982: garbage collection: add a check to ensure that the underlying
|
||||||
|
file system supports and honors file access time (atime) updates.
|
||||||
|
The check is performed once on datastore creation and on start of every
|
||||||
|
garbage collection (GC) task, just to be sure. It can be disabled in the
|
||||||
|
datastore tuning options.
|
||||||
|
|
||||||
|
* garbage collection: support setting a custom access time cutoff,
|
||||||
|
overriding the default of one day and five minutes.
|
||||||
|
|
||||||
|
* ui: expose flag for GC access time support safety check and the access
|
||||||
|
time cutoff override in datastore tuning options.
|
||||||
|
|
||||||
|
* docs: describe rationale for new GC access time update check setting and
|
||||||
|
the access time cutoff check in tuning options.
|
||||||
|
|
||||||
|
* access control: add support to mark a specific authentication realm as
|
||||||
|
default selected realm for the login user interface.
|
||||||
|
|
||||||
|
* fix #4382: api: access control: remove permissions of token on deletion.
|
||||||
|
|
||||||
|
* fix #3887: api: access control: allow users to regenerate the secret of an
|
||||||
|
API token without changing any existing ACLs.
|
||||||
|
|
||||||
|
* fix #6072: sync jobs: support flags to limit sync to only encrypted and/or
|
||||||
|
verified snapshots.
|
||||||
|
|
||||||
|
* ui: datastore tuning options: expose overriding GC cache capacity so that
|
||||||
|
admins can either restrict the peak memory usage during GC or allow GC to
|
||||||
|
use more memory to reduce file system IO even for huge (multiple TiB)
|
||||||
|
referenced data in backup groups.
|
||||||
|
|
||||||
|
* ui: datastore tuning options: increase width and rework labels to provide
|
||||||
|
a tiny bit more context about what these options are.
|
||||||
|
|
||||||
|
* ui: sync job: increase edit window width to 720px to make it less cramped.
|
||||||
|
|
||||||
|
* ui: sync job: small field label casing consistency fixes.
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Sat, 05 Apr 2025 17:54:31 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (3.3.6-1) bookworm; urgency=medium
|
||||||
|
|
||||||
|
* datastore: ignore group locking errors when removing snapshots, they
|
||||||
|
normally happen only due to old-locking, and the underlying snapshot is
|
||||||
|
deleted in any case at this point, so it's no help to confuse the user.
|
||||||
|
|
||||||
|
* api: datastore: add error message on failed removal due to old locking and
|
||||||
|
tell any admin what they can do to switch to the new locking.
|
||||||
|
|
||||||
|
* ui: only add delete parameter on token edit, not when creating tokens.
|
||||||
|
|
||||||
|
* pbs-client: allow reading fingerprint from system credential.
|
||||||
|
|
||||||
|
* docs: client: add section about system credentials integration.
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Thu, 03 Apr 2025 17:57:02 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (3.3.5-1) bookworm; urgency=medium
|
||||||
|
|
||||||
|
* api: config: use guard for unmounting on failed datastore creation
|
||||||
|
|
||||||
|
* client: align description for backup specification to docs, using
|
||||||
|
`archive-name` and `type` over `label` and `ext`.
|
||||||
|
|
||||||
|
* client: read credentials from CREDENTIALS_DIRECTORY environment variable
|
||||||
|
following the "System and Service Credentials" specification. This allows
|
||||||
|
users to use native systemd capabilities for credential management if the
|
||||||
|
proxmox-backup-client is used in systemd units or, e.g., through a wrapper
|
||||||
|
like systemd-run.
|
||||||
|
|
||||||
|
* fix #3935: datastore/api/backup: move datastore locking to '/run' to avoid
|
||||||
|
that lock-files can block deleting backup groups or snapshots on the
|
||||||
|
datastore and to decouple locking from the underlying datastore
|
||||||
|
file-system.
|
||||||
|
|
||||||
|
* api: fix race when changing the owner of a backup-group.
|
||||||
|
|
||||||
|
* fix #3336: datastore: remove group if the last snapshot is removed to
|
||||||
|
avoid confusing situations where the group directory still exists and
|
||||||
|
blocks re-creating a group with another owner even though the empty group
|
||||||
|
was not visible in the web UI.
|
||||||
|
|
||||||
|
* notifications: clean-up and add dedicated types for all templates as to
|
||||||
|
allow declaring that interface stable in preparation for allowing
|
||||||
|
overriding them in the future (not included in this release).
|
||||||
|
|
||||||
|
* tape: introduce a tape backup job worker-thread option for restores.
|
||||||
|
Depending on the underlying storage using more threads can dramatically
|
||||||
|
improve the restore speed. Especially fast storage with low penalty for
|
||||||
|
random access, like flash-storage (SSDs) can profit from using more
|
||||||
|
worker threads. But on file systems backed by spinning disks (HDDs) the
|
||||||
|
performance can even degrade with more threads. This is why for now the
|
||||||
|
default is left at a single thread and the admin needs to tune this for
|
||||||
|
their storage.
|
||||||
|
|
||||||
|
* garbage collection: generate index file list via datastore iterators in a
|
||||||
|
structured manner.
|
||||||
|
|
||||||
|
* fix #5331: garbage collection: avoid multiple chunk atime updates by
|
||||||
|
keeping track of the recently marked chunks in phase 1 of garbage to avoid
|
||||||
|
multiple atime updates via relatively expensive utimensat (touch) calls.
|
||||||
|
Use a LRU cache with size 32 MiB for tracking already processed chunks,
|
||||||
|
this fully covers backup groups referencing up to 4 TiB of actual chunks
|
||||||
|
and even bigger ones can still benefit from the cache. On some real-world
|
||||||
|
benchmarks of a datastore with 1.5 million chunks, and original data
|
||||||
|
usage of 120 TiB and a referenced data usage of 2.7 TiB (high
|
||||||
|
deduplication count due to long-term history) we measured 21.1 times less
|
||||||
|
file updates (31.6 million) and a 6.1 times reduction in total GC runtime
|
||||||
|
(155.4 s to 22.8 s) on a ZFS RAID 10 system consisting of spinning HDDs
|
||||||
|
and a special device mirror backed by datacenter SSDs.
|
||||||
|
|
||||||
|
* logging helper: use new builder initializer – not functional change
|
||||||
|
intended.
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Wed, 02 Apr 2025 19:42:38 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (3.3.4-1) bookworm; urgency=medium
|
||||||
|
|
||||||
|
* fix #6185: client/docs: explicitly mention archive name restrictions
|
||||||
|
|
||||||
|
* docs: using-the-installer: adapt to raised root password length requirement
|
||||||
|
|
||||||
|
* disks: wipe: replace dd with write_all_at for zeroing disk
|
||||||
|
|
||||||
|
* fix #5946: disks: wipe: ensure GPT header backup is wiped
|
||||||
|
|
||||||
|
* docs: fix hash collision probability comparison
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Thu, 13 Mar 2025 13:04:05 +0100
|
||||||
|
|
||||||
|
rust-proxmox-backup (3.3.3-1) bookworm; urgency=medium
|
||||||
|
|
||||||
|
* api: datastore list: move checking if a datastore is mounted after we
|
||||||
|
ensured that the user may actually access it. While this had no effect
|
||||||
|
security wise, it could significantly increase the cost of this API
|
||||||
|
endpoint in big setups with many datastores and many tenants that each
|
||||||
|
have only access to one, or a small set, of datastores.
|
||||||
|
|
||||||
|
* Revert "fix #5710: api: backup: stat known chunks on backup finish" due to
|
||||||
|
a big performance impact relative to what this is protectign against. We
|
||||||
|
will work out a more efficient fix for this issue in the future.
|
||||||
|
|
||||||
|
* prune simulator: show backup entries that are kept also in the flat list
|
||||||
|
of backups, not just in the calendar view.
|
||||||
|
|
||||||
|
* docs: improve the description for the garbage collection's cut-off time
|
||||||
|
|
||||||
|
* pxar extract: correctly honor the overwrite flag
|
||||||
|
|
||||||
|
* api: datastore: add missing log context for prune to avoid a case where
|
||||||
|
the worker state being unknown after it finished.
|
||||||
|
|
||||||
|
* docs: add synopsis and basic docs for prune job configuration
|
||||||
|
|
||||||
|
* backup verification: handle manifest update errors as non-fatal to avoid
|
||||||
|
that the job fails, as we want to continue with verificating the rest to
|
||||||
|
ensure we uncover as much potential problems as possible.
|
||||||
|
|
||||||
|
* fix #4408: docs: add 'disaster recovery' section for tapes
|
||||||
|
|
||||||
|
* fix #6069: prune simulator: correctly handle schedules that mix both, a
|
||||||
|
range and a step size at once.
|
||||||
|
|
||||||
|
* client: pxar: fix a race condition where the backup upload stream can miss
|
||||||
|
an error from the create archive function, because the error state is only
|
||||||
|
set after the backup stream was already polled. This avoids a edge case
|
||||||
|
where a file-based backup was incorrectly marked as having succeeded while
|
||||||
|
there was a error.
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Tue, 11 Feb 2025 20:24:27 +0100
|
||||||
|
|
||||||
|
rust-proxmox-backup (3.3.2-2) bookworm; urgency=medium
|
||||||
|
|
||||||
|
* file-restore: fix regression with the new blockdev method used to pass
|
||||||
|
disks of a backup to the isolated virtual machine.
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Tue, 10 Dec 2024 12:14:47 +0100
|
||||||
|
|
||||||
|
rust-proxmox-backup (3.3.2-1) bookworm; urgency=medium
|
||||||
|
|
||||||
|
* pbs-client: remove `log` dependency and migrate to our common,
|
||||||
|
`tracing`-based, logging infrastructure. No semantic change intended.
|
||||||
|
|
||||||
|
* file restore: switch to more modern blockdev option for drives in QEMU
|
||||||
|
wrapper for the restore VM.
|
||||||
|
|
||||||
|
* pxar: client: fix missing file size check for metadata comparison
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Mon, 09 Dec 2024 10:37:32 +0100
|
||||||
|
|
||||||
|
rust-proxmox-backup (3.3.1-1) bookworm; urgency=medium
|
||||||
|
|
||||||
|
* tree-wide: add missing O_CLOEXEC flags to `openat` calls to avoid passing
|
||||||
|
any open FD to new child processes which can have undesired side-effects
|
||||||
|
like keeping a lock open longer than it should.
|
||||||
|
|
||||||
|
* cargo: update proxmox dependency of rest-server and sys crates to include
|
||||||
|
some fixes for open FDs and a fix for the active task worker tracking, as
|
||||||
|
on failing to update the index file the daemon did not finished the
|
||||||
|
worker, causing a reference count issue where an old daemon could keep
|
||||||
|
running forever.
|
||||||
|
|
||||||
|
* ui: check that store is set before trying to select anythin in the garbage
|
||||||
|
collection (GC) job view.
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Tue, 03 Dec 2024 18:11:04 +0100
|
||||||
|
|
||||||
|
rust-proxmox-backup (3.3.0-2) bookworm; urgency=medium
|
||||||
|
|
||||||
|
* tree-wide: fix various typos.
|
||||||
|
|
||||||
|
* ui: fix remove vanished tooltip to be valid for both sync directions.
|
||||||
|
|
||||||
|
* ui: mask unmounted datastores in datastore overview.
|
||||||
|
|
||||||
|
* server: push: fix supported api version check for minor version bump.
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Thu, 28 Nov 2024 13:03:03 +0100
|
||||||
|
|
||||||
|
rust-proxmox-backup (3.3.0-1) bookworm; urgency=medium
|
||||||
|
|
||||||
|
* GC: add safety-check for nested datastore
|
||||||
|
|
||||||
|
* ui: make some more strings translatable
|
||||||
|
|
||||||
|
* docs: make sphinx ignore the environment cache to avoid missing synopsis
|
||||||
|
in some HTML output, like for example the "Command Syntax" appendix.
|
||||||
|
|
||||||
|
* docs: add note for why FAT is not supported for as backing file system for
|
||||||
|
datastores
|
||||||
|
|
||||||
|
* api: disks: directory: fail if mount unit already exists for a new file
|
||||||
|
system
|
||||||
|
|
||||||
|
* : filter partitions without proper UUID in partition selector
|
||||||
|
|
||||||
|
* ui: version info: replace wrong hyphen separator with dot
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Wed, 27 Nov 2024 20:38:41 +0100
|
||||||
|
|
||||||
|
rust-proxmox-backup (3.2.14-1) bookworm; urgency=medium
|
||||||
|
|
||||||
|
* pull-sync: do not interpret older missing snapshots as needs-resync
|
||||||
|
|
||||||
|
* api: directory: use relative path when creating removable datastore
|
||||||
|
|
||||||
|
* ui: prune keep input: actually clear value on clear trigger click
|
||||||
|
|
||||||
|
* ui: datastore edit: fix empty-text for path field
|
||||||
|
|
||||||
|
* sync: push: pass full error context when returning error to job
|
||||||
|
|
||||||
|
* api: mount removable datastore: only log an informational message if the
|
||||||
|
correct device is already mounted.
|
||||||
|
|
||||||
|
* api: sync: restrict edit permissions for the new push sync jobs to avoid
|
||||||
|
that a user is able to create or edit sync jobs in push direction, but not
|
||||||
|
able to see them.
|
||||||
|
|
||||||
|
* api: create datastore: fix checks to avoid that any datastore can contain
|
||||||
|
another one to better handle the case for the new removable datastores.
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Wed, 27 Nov 2024 14:42:56 +0100
|
||||||
|
|
||||||
|
rust-proxmox-backup (3.2.13-1) bookworm; urgency=medium
|
||||||
|
|
||||||
|
* update pxar dependency to fix selective extraction with the newly
|
||||||
|
supported match patterns.
|
||||||
|
|
||||||
|
* reuse-datastore: avoid creating another prune job
|
||||||
|
|
||||||
|
* api: notification: add API routes for webhook targets
|
||||||
|
|
||||||
|
* management cli: add CLI for webhook targets
|
||||||
|
|
||||||
|
* ui: utils: enable webhook edit window
|
||||||
|
|
||||||
|
* ui: utils: add task description for mounting/unmounting
|
||||||
|
|
||||||
|
* ui: add onlineHelp for consent-banner option
|
||||||
|
|
||||||
|
* docs: client: fix example commands for client usage
|
||||||
|
|
||||||
|
* docs: explain some further caveats of the change detection modes
|
||||||
|
|
||||||
|
* ui: use same label for removable datastore created from disk
|
||||||
|
|
||||||
|
* api: maintenance: allow setting of maintenance mode if 'unmounting'
|
||||||
|
|
||||||
|
* docs: add more information for removable datastores
|
||||||
|
|
||||||
|
* ui: sync jobs: revert to single list for pull/push jobs, improve
|
||||||
|
distinction between push and pull jobs through other means.
|
||||||
|
|
||||||
|
* ui: sync jobs: change default sorting to 'store' -> 'direction' -> 'id'
|
||||||
|
|
||||||
|
* ui: sync jobs: add search filter-box
|
||||||
|
|
||||||
|
* config: sync: use same config section type `sync` for push and pull, note
|
||||||
|
that this breaks existing configurations and needs manual clean-up. As the
|
||||||
|
package versions never made it beyond test this is ignored, as while it's
|
||||||
|
not really ideal we never give guarantees for testing package versions,
|
||||||
|
and the maintenance burden with the old style would not be ideal either.
|
||||||
|
|
||||||
|
* api: removable datastores: require Sys.Modify permission on /system/disks
|
||||||
|
|
||||||
|
* ui: allow resetting unmounting maintenance
|
||||||
|
|
||||||
|
* datastore: re-phrase error message when datastore is unavailable
|
||||||
|
|
||||||
|
* client: backup writer: fix regression in progress output
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Tue, 26 Nov 2024 17:05:23 +0100
|
||||||
|
|
||||||
|
rust-proxmox-backup (3.2.12-1) bookworm; urgency=medium
|
||||||
|
|
||||||
|
* fix #5853: client: pxar: exclude stale files on metadata/link read
|
||||||
|
|
||||||
|
* docs: fix wrong product name in certificate docs
|
||||||
|
|
||||||
|
* docs: explain the working principle of the change detection modes
|
||||||
|
|
||||||
|
* allow datastore creation in directory with lost+found directory
|
||||||
|
|
||||||
|
* fix #5801: manager: switch datastore update command to real API call to
|
||||||
|
avoid early cancellation of the task.
|
||||||
|
|
||||||
|
* server: push: consistently use remote over target for error messages and
|
||||||
|
various smaller improvements to related log messages.
|
||||||
|
|
||||||
|
* push: move log messages for removed snapshot/group
|
||||||
|
|
||||||
|
* fix #5710: api: backup: stat known chunks on backup finish to ensure any
|
||||||
|
problem/corruption is caught earlier.
|
||||||
|
|
||||||
|
* pxar: extract: make invalid ACLs non-fatal, but only log them, there's
|
||||||
|
nothing to win by failing the restore completely.
|
||||||
|
|
||||||
|
* server: push: log encountered empty backup groups during sync
|
||||||
|
|
||||||
|
* fix #3786: ui, api, cli: add resync-corrupt option to sync jobs
|
||||||
|
|
||||||
|
* docs: add security implications of prune and change detection mode
|
||||||
|
|
||||||
|
* fix #2996: client: backup restore: add support to pass match patterns for
|
||||||
|
a selective restore
|
||||||
|
|
||||||
|
* docs: add installation media preparation and installation wizard guides
|
||||||
|
|
||||||
|
* api: enforce minimum character limit of 8 on new passwords to follow
|
||||||
|
recent NIST recommendations.
|
||||||
|
|
||||||
|
* ui, api: support configuring a consent banner that is shown before login
|
||||||
|
to allow complying with some (government) policy frameworks.
|
||||||
|
|
||||||
|
* ui, api: add initial support for removable datastore providing better
|
||||||
|
integration for datastore located on a non-permanently attached medium.
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Mon, 25 Nov 2024 22:52:11 +0100
|
||||||
|
|
||||||
|
rust-proxmox-backup (3.2.11-1) bookworm; urgency=medium
|
||||||
|
|
||||||
|
* fix #3044: server: implement push support for sync operations
|
||||||
|
|
||||||
|
* push sync related refactors
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Thu, 21 Nov 2024 12:03:50 +0100
|
||||||
|
|
||||||
|
rust-proxmox-backup (3.2.10-1) bookworm; urgency=medium
|
||||||
|
|
||||||
|
* api: disk list: do not fail but just log error on gathering smart data
|
||||||
|
|
||||||
|
* cargo: require proxmox-log 0.2.6 to reduce spamming the logs with the
|
||||||
|
whole worker task contents
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Tue, 19 Nov 2024 22:36:14 +0100
|
||||||
|
|
||||||
|
rust-proxmox-backup (3.2.9-1) bookworm; urgency=medium
|
||||||
|
|
||||||
|
* client: catalog: fallback to metadata archives for dumping the catalog
|
||||||
|
|
||||||
|
* client: catalog shell: make the catalog optional and use the pxar accessor
|
||||||
|
for navigation if the catalog is not provided, like its the case for
|
||||||
|
example for split pxar archives.
|
||||||
|
|
||||||
|
* client: catalog shell: drop payload offset in `stat` output, as this is a
|
||||||
|
internal value that only helps on debugging some specific development.
|
||||||
|
|
||||||
|
* sync: fix premature return in snapshot-skip filter logic to avoid that the
|
||||||
|
first snapshot newer that the last synced one gets unconditionally
|
||||||
|
included.
|
||||||
|
|
||||||
|
* fix #5861: ui: remove minimum required username length in dialog for
|
||||||
|
changing the owner of a backup group, as PBS support usernames shorter
|
||||||
|
than 4 characters since a while now.
|
||||||
|
|
||||||
|
* fix #5439: allow one to reuse an existing datastore on datastore creation
|
||||||
|
|
||||||
|
* ui: disallow datastore in the file system root, this is almost never what
|
||||||
|
user want and they can still use the CLI for such an edge case.
|
||||||
|
|
||||||
|
* fix #5233: api: tape: add explicit required permissions for the move tape,
|
||||||
|
update tape and destroy tape endpoints, requiring Tape.Modify and
|
||||||
|
Tape.Write on the `/tape` ACL object path, respectively. This avoids
|
||||||
|
requiring the use of the root account for basic tape management.
|
||||||
|
|
||||||
|
* client: catalog shell: make the root element its own parent to avoid
|
||||||
|
navigating below archive root, which makes no sense and just causes odd
|
||||||
|
glitches.
|
||||||
|
|
||||||
|
* api: disk management: avoid retrieving lsblk result twice when listing
|
||||||
|
disks, while it's not overly expensive it certainly does not help to be
|
||||||
|
performant either.
|
||||||
|
|
||||||
|
* api: disk management: parallelize retrieving the output from smartctl
|
||||||
|
checks.
|
||||||
|
|
||||||
|
* fix #5600: pbs2to3: make check more flexible to allow one to run arbitrary
|
||||||
|
newer '-pve' kernels after upgrade
|
||||||
|
|
||||||
|
* client: pxar: perform match pattern check for exclusion only once
|
||||||
|
|
||||||
|
* client: pxar: add debug output for exclude pattern matches to more
|
||||||
|
conveniently debug possible issues.
|
||||||
|
|
||||||
|
* fix #5868: rest-server: handshake detection: avoid infinite loop on
|
||||||
|
connections abort
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Thu, 14 Nov 2024 16:10:10 +0100
|
||||||
|
|
||||||
|
rust-proxmox-backup (3.2.8-1) bookworm; urgency=medium
|
||||||
|
|
||||||
|
* switch various log statements in worker tasks to the newer, more flexible
|
||||||
|
proxmox log crate. With this change, errors from task logs are now also
|
||||||
|
logged to the system log, increasing their visibility.
|
||||||
|
|
||||||
|
* datastore api: list snapshots: avoid calculating protected attribute
|
||||||
|
twice per snapshot, this reduces the amounts of file metadata requests.
|
||||||
|
|
||||||
|
* avoid re-calculating the backup snapshot path's date time component when
|
||||||
|
getting the full path, reducing calls to the relatively slow strftime
|
||||||
|
function from libc.
|
||||||
|
|
||||||
|
* fix #3699: client: prefer the XDG cache directory for temporary files with
|
||||||
|
a fallback to using /tmp, as before.
|
||||||
|
|
||||||
|
* sync job: improve log message for when syncing the root namespace.
|
||||||
|
|
||||||
|
* client: increase read buffer from 8 KiB to 4 MiB for raw image based
|
||||||
|
backups. This reduces the time spent polling between the reader, chunker
|
||||||
|
and uploader async tasks and thus can improve backup speed significantly,
|
||||||
|
especially on setups with fast network and storage.
|
||||||
|
|
||||||
|
* client benchmark: avoid unnecessary allocation in the AES benchmark,
|
||||||
|
causing artificial overhead. The benchmark AES results should now be more
|
||||||
|
in line with the hardware capability and what the PBS client could already
|
||||||
|
do. On our test system we saw an increase by an factor of 2.3 on this
|
||||||
|
specific benchmark.
|
||||||
|
|
||||||
|
* docs: add external metrics server page
|
||||||
|
|
||||||
|
* tfa: webauthn: serialize OriginUrl following RFC6454
|
||||||
|
|
||||||
|
* factor out apt and apt-repository handling into a new library crate for
|
||||||
|
re-use in other projects. There should be no functional change.
|
||||||
|
|
||||||
|
* fix various typos all over the place found using the rust based `typos`
|
||||||
|
tool.
|
||||||
|
|
||||||
|
* datastore: data blob compression: increase compression throughput by
|
||||||
|
switching away from a higher level zstd method to a lower level one, which
|
||||||
|
allows us to control the target buffer size directly and thus avoid some
|
||||||
|
allocation and syscall overhead. We saw the compression bandwidth increase
|
||||||
|
by a factor of 1.19 in our tests where both the source data and the target
|
||||||
|
datastore where located in memory backed tmpfs.
|
||||||
|
|
||||||
|
* daily-update: ensure notification system context is initialized.
|
||||||
|
|
||||||
|
* backup reader: derive if debug messages should be printed from the global
|
||||||
|
log level. This avoids printing some debug messages by default, e.g., the
|
||||||
|
"protocol upgrade done" message from sync jobs.
|
||||||
|
|
||||||
|
* ui: user view: disable 'Unlock TFA' button by default to improve UX if no
|
||||||
|
user is selected.
|
||||||
|
|
||||||
|
* manager cli: ensure the worker tasks finishes when triggering a reload of
|
||||||
|
the system network.
|
||||||
|
|
||||||
|
* fix #5622: backup client: properly handle rate and burst parameters.
|
||||||
|
Previously, passing any non-integer value, like `1mb`, was ignored.
|
||||||
|
|
||||||
|
* tape: read element status: ignore responses where the library specifies
|
||||||
|
that it will return a volume tag but then does not includes that field in
|
||||||
|
the actual response. As both the primary and the alternative volume tag
|
||||||
|
are not required by PBS, this specific error can simply be downgraded to a
|
||||||
|
warning.
|
||||||
|
|
||||||
|
* pxar: dump archive: print entries to stdout instead of stderr
|
||||||
|
|
||||||
|
* sync jobs: various clean-ups and refactoring that should not result in any
|
||||||
|
semantic change.
|
||||||
|
|
||||||
|
* metric collection: put metrics in a cache with a 30 minutes lifetime.
|
||||||
|
|
||||||
|
* api: add /status/metrics API to allow pull-based metric server to gather
|
||||||
|
data directly.
|
||||||
|
|
||||||
|
* partial fix #5560: client: periodically show backup progress
|
||||||
|
|
||||||
|
* docs: add proxmox-backup.node.cfg man page
|
||||||
|
|
||||||
|
* docs: sync: explicitly mention `removed-vanish` flag
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Fri, 18 Oct 2024 19:05:41 +0200
|
||||||
|
|
||||||
rust-proxmox-backup (3.2.7-1) bookworm; urgency=medium
|
rust-proxmox-backup (3.2.7-1) bookworm; urgency=medium
|
||||||
|
|
||||||
* docs: drop blanket statement recommending against remote storage
|
* docs: drop blanket statement recommending against remote storage
|
||||||
|
101
debian/control
vendored
@ -15,7 +15,6 @@ Build-Depends: bash-completion,
|
|||||||
libacl1-dev,
|
libacl1-dev,
|
||||||
libfuse3-dev,
|
libfuse3-dev,
|
||||||
librust-anyhow-1+default-dev,
|
librust-anyhow-1+default-dev,
|
||||||
librust-apt-pkg-native-0.3+default-dev (>= 0.3.2-~~),
|
|
||||||
librust-async-trait-0.1+default-dev (>= 0.1.56-~~),
|
librust-async-trait-0.1+default-dev (>= 0.1.56-~~),
|
||||||
librust-base64-0.13+default-dev,
|
librust-base64-0.13+default-dev,
|
||||||
librust-bitflags-2+default-dev (>= 2.4-~~),
|
librust-bitflags-2+default-dev (>= 2.4-~~),
|
||||||
@ -26,19 +25,18 @@ Build-Depends: bash-completion,
|
|||||||
librust-crossbeam-channel-0.5+default-dev,
|
librust-crossbeam-channel-0.5+default-dev,
|
||||||
librust-endian-trait-0.6+arrays-dev,
|
librust-endian-trait-0.6+arrays-dev,
|
||||||
librust-endian-trait-0.6+default-dev,
|
librust-endian-trait-0.6+default-dev,
|
||||||
librust-env-logger-0.10+default-dev,
|
librust-env-logger-0.11+default-dev,
|
||||||
librust-flate2-1+default-dev,
|
|
||||||
librust-foreign-types-0.3+default-dev,
|
librust-foreign-types-0.3+default-dev,
|
||||||
librust-futures-0.3+default-dev,
|
librust-futures-0.3+default-dev,
|
||||||
librust-h2-0.3+default-dev,
|
librust-h2-0.4+default-dev,
|
||||||
librust-h2-0.3+stream-dev,
|
librust-h2-0.4+legacy-dev,
|
||||||
librust-handlebars-3+default-dev,
|
librust-h2-0.4+stream-dev,
|
||||||
librust-hex-0.4+default-dev (>= 0.4.3-~~),
|
librust-hex-0.4+default-dev (>= 0.4.3-~~),
|
||||||
librust-hex-0.4+serde-dev (>= 0.4.3-~~),
|
librust-hex-0.4+serde-dev (>= 0.4.3-~~),
|
||||||
librust-http-0.2+default-dev,
|
librust-hyper-0.14+backports-dev,
|
||||||
librust-hyper-0.14+default-dev,
|
librust-hyper-0.14+default-dev,
|
||||||
|
librust-hyper-0.14+deprecated-dev,
|
||||||
librust-hyper-0.14+full-dev,
|
librust-hyper-0.14+full-dev,
|
||||||
librust-lazy-static-1+default-dev (>= 1.4-~~),
|
|
||||||
librust-libc-0.2+default-dev,
|
librust-libc-0.2+default-dev,
|
||||||
librust-log-0.4+default-dev (>= 0.4.17-~~),
|
librust-log-0.4+default-dev (>= 0.4.17-~~),
|
||||||
librust-nix-0.26+default-dev (>= 0.26.1-~~),
|
librust-nix-0.26+default-dev (>= 0.26.1-~~),
|
||||||
@ -47,69 +45,76 @@ Build-Depends: bash-completion,
|
|||||||
librust-once-cell-1+default-dev (>= 1.3.1-~~),
|
librust-once-cell-1+default-dev (>= 1.3.1-~~),
|
||||||
librust-openssl-0.10+default-dev (>= 0.10.40-~~),
|
librust-openssl-0.10+default-dev (>= 0.10.40-~~),
|
||||||
librust-pathpatterns-0.3+default-dev,
|
librust-pathpatterns-0.3+default-dev,
|
||||||
|
librust-pbs-api-types-0.2+default-dev (>= 0.2.2),
|
||||||
librust-percent-encoding-2+default-dev (>= 2.1-~~),
|
librust-percent-encoding-2+default-dev (>= 2.1-~~),
|
||||||
librust-pin-project-lite-0.2+default-dev,
|
librust-pin-project-lite-0.2+default-dev,
|
||||||
librust-proxmox-acme-0.5+default-dev,
|
librust-proxmox-acme-0.5+default-dev (>= 0.5.3-~~),
|
||||||
librust-proxmox-apt-0.10+default-dev (>= 0.10.5-~~),
|
librust-proxmox-apt-0.11+cache-dev,
|
||||||
|
librust-proxmox-apt-0.11+default-dev,
|
||||||
|
librust-proxmox-apt-api-types-1+default-dev (>= 1.0.1-~~),
|
||||||
librust-proxmox-async-0.4+default-dev,
|
librust-proxmox-async-0.4+default-dev,
|
||||||
librust-proxmox-auth-api-0.4+api-dev,
|
librust-proxmox-auth-api-0.4+api-dev,
|
||||||
librust-proxmox-auth-api-0.4+api-types-dev,
|
|
||||||
librust-proxmox-auth-api-0.4+default-dev,
|
librust-proxmox-auth-api-0.4+default-dev,
|
||||||
librust-proxmox-auth-api-0.4+pam-authenticator-dev,
|
librust-proxmox-auth-api-0.4+pam-authenticator-dev,
|
||||||
librust-proxmox-borrow-1+default-dev,
|
librust-proxmox-borrow-1+default-dev,
|
||||||
librust-proxmox-compression-0.2+default-dev,
|
librust-proxmox-compression-0.2+default-dev,
|
||||||
|
librust-proxmox-config-digest-0.1+default-dev,
|
||||||
|
librust-proxmox-daemon-0.1+default-dev,
|
||||||
librust-proxmox-fuse-0.1+default-dev (>= 0.1.3-~~),
|
librust-proxmox-fuse-0.1+default-dev (>= 0.1.3-~~),
|
||||||
librust-proxmox-http-0.9+client-dev,
|
librust-proxmox-http-0.9+client-dev (>= 0.9.5-~~),
|
||||||
librust-proxmox-http-0.9+client-trait-dev,
|
librust-proxmox-http-0.9+client-trait-dev (>= 0.9.5-~~),
|
||||||
librust-proxmox-http-0.9+default-dev,
|
librust-proxmox-http-0.9+default-dev (>= 0.9.5-~~),
|
||||||
librust-proxmox-http-0.9+http-helpers-dev,
|
librust-proxmox-http-0.9+http-helpers-dev (>= 0.9.5-~~),
|
||||||
librust-proxmox-http-0.9+proxmox-async-dev,
|
librust-proxmox-http-0.9+proxmox-async-dev (>= 0.9.5-~~),
|
||||||
librust-proxmox-http-0.9+rate-limited-stream-dev,
|
librust-proxmox-http-0.9+rate-limited-stream-dev (>= 0.9.5-~~),
|
||||||
librust-proxmox-http-0.9+rate-limiter-dev,
|
librust-proxmox-http-0.9+rate-limiter-dev (>= 0.9.5-~~),
|
||||||
librust-proxmox-http-0.9+websocket-dev,
|
librust-proxmox-http-0.9+websocket-dev (>= 0.9.5-~~),
|
||||||
librust-proxmox-human-byte-0.1+default-dev,
|
librust-proxmox-human-byte-0.1+default-dev,
|
||||||
librust-proxmox-io-1+default-dev (>= 1.0.1-~~),
|
librust-proxmox-io-1+default-dev (>= 1.0.1-~~),
|
||||||
librust-proxmox-io-1+tokio-dev (>= 1.0.1-~~),
|
librust-proxmox-io-1+tokio-dev (>= 1.0.1-~~),
|
||||||
librust-proxmox-lang-1+default-dev (>= 1.1-~~),
|
librust-proxmox-lang-1+default-dev (>= 1.1-~~),
|
||||||
librust-proxmox-ldap-0.2+default-dev (>= 0.2.1-~~),
|
librust-proxmox-ldap-0.2+default-dev (>= 0.2.1-~~),
|
||||||
|
librust-proxmox-log-0.2+default-dev (>= 0.2.6-~~),
|
||||||
librust-proxmox-metrics-0.3+default-dev (>= 0.3.1-~~),
|
librust-proxmox-metrics-0.3+default-dev (>= 0.3.1-~~),
|
||||||
librust-proxmox-notify-0.4+default-dev,
|
librust-proxmox-notify-0.5+default-dev (>= 0.5.1-~~),
|
||||||
librust-proxmox-notify-0.4+pbs-context-dev,
|
librust-proxmox-notify-0.5+pbs-context-dev (>= 0.5.1-~~),
|
||||||
librust-proxmox-openid-0.10+default-dev,
|
librust-proxmox-openid-0.10+default-dev,
|
||||||
librust-proxmox-rest-server-0.5+default-dev (>= 0.5.1-~~),
|
librust-proxmox-rest-server-0.8+default-dev (>= 0.8.9-~~),
|
||||||
librust-proxmox-rest-server-0.5+rate-limited-stream-dev (>= 0.5.1-~~),
|
librust-proxmox-rest-server-0.8+rate-limited-stream-dev (>= 0.8.9-~~),
|
||||||
librust-proxmox-rest-server-0.5+templates-dev (>= 0.5.1-~~),
|
librust-proxmox-rest-server-0.8+templates-dev (>= 0.8.9-~~),
|
||||||
librust-proxmox-router-2+cli-dev,
|
librust-proxmox-router-3+cli-dev,
|
||||||
librust-proxmox-router-2+server-dev,
|
librust-proxmox-router-3+server-dev,
|
||||||
librust-proxmox-rrd-0.2+default-dev,
|
librust-proxmox-rrd-0.4+default-dev,
|
||||||
librust-proxmox-schema-3+api-macro-dev,
|
librust-proxmox-rrd-api-types-1+default-dev (>= 1.0.2-~~),
|
||||||
librust-proxmox-schema-3+default-dev,
|
librust-proxmox-schema-4+api-macro-dev,
|
||||||
|
librust-proxmox-schema-4+default-dev,
|
||||||
librust-proxmox-section-config-2+default-dev,
|
librust-proxmox-section-config-2+default-dev,
|
||||||
librust-proxmox-serde-0.1+default-dev (>= 0.1.1-~~),
|
librust-proxmox-serde-0.1+default-dev (>= 0.1.1-~~),
|
||||||
librust-proxmox-serde-0.1+serde-json-dev (>= 0.1.1-~~),
|
librust-proxmox-serde-0.1+serde-json-dev (>= 0.1.1-~~),
|
||||||
|
librust-proxmox-shared-cache-0.1+default-dev,
|
||||||
librust-proxmox-shared-memory-0.3+default-dev,
|
librust-proxmox-shared-memory-0.3+default-dev,
|
||||||
librust-proxmox-sortable-macro-0.1+default-dev (>= 0.1.2-~~),
|
librust-proxmox-sortable-macro-0.1+default-dev (>= 0.1.2-~~),
|
||||||
librust-proxmox-subscription-0.4+api-types-dev (>= 0.4.2-~~),
|
librust-proxmox-subscription-0.5+api-types-dev,
|
||||||
librust-proxmox-subscription-0.4+default-dev (>= 0.4.2-~~),
|
librust-proxmox-subscription-0.5+default-dev,
|
||||||
librust-proxmox-sys-0.5+acl-dev (>= 0.5.7-~~),
|
librust-proxmox-sys-0.6+acl-dev (>= 0.6.5-~~),
|
||||||
librust-proxmox-sys-0.5+crypt-dev (>= 0.5.7-~~),
|
librust-proxmox-sys-0.6+crypt-dev (>= 0.6.5-~~),
|
||||||
librust-proxmox-sys-0.5+default-dev (>= 0.5.7-~~),
|
librust-proxmox-sys-0.6+default-dev (>= 0.6.7-~~),
|
||||||
librust-proxmox-sys-0.5+logrotate-dev (>= 0.5.7-~~),
|
librust-proxmox-sys-0.6+logrotate-dev (>= 0.6.5-~~),
|
||||||
librust-proxmox-sys-0.5+timer-dev (>= 0.5.7-~~),
|
librust-proxmox-sys-0.6+timer-dev (>= 0.6.5-~~),
|
||||||
librust-proxmox-tfa-4+api-dev (>= 4.0.4-~~),
|
librust-proxmox-systemd-0.1+default-dev,
|
||||||
librust-proxmox-tfa-4+api-types-dev (>= 4.0.4-~~),
|
librust-proxmox-tfa-5+api-dev,
|
||||||
librust-proxmox-tfa-4+default-dev (>= 4.0.4-~~),
|
librust-proxmox-tfa-5+api-types-dev,
|
||||||
|
librust-proxmox-tfa-5+default-dev,
|
||||||
librust-proxmox-time-2+default-dev,
|
librust-proxmox-time-2+default-dev,
|
||||||
librust-proxmox-uuid-1+default-dev,
|
librust-proxmox-uuid-1+default-dev,
|
||||||
librust-proxmox-uuid-1+serde-dev,
|
librust-proxmox-uuid-1+serde-dev,
|
||||||
librust-pxar-0.12+default-dev,
|
librust-proxmox-worker-task-0.1+default-dev,
|
||||||
|
librust-pxar-0.12+default-dev (>= 0.12.1-~~),
|
||||||
librust-regex-1+default-dev (>= 1.5.5-~~),
|
librust-regex-1+default-dev (>= 1.5.5-~~),
|
||||||
librust-rustyline-9+default-dev,
|
librust-rustyline-9+default-dev,
|
||||||
librust-serde-1+default-dev,
|
librust-serde-1+default-dev,
|
||||||
librust-serde-1+derive-dev,
|
librust-serde-1+derive-dev,
|
||||||
librust-serde-json-1+default-dev,
|
librust-serde-json-1+default-dev,
|
||||||
librust-serde-plain-1+default-dev,
|
|
||||||
librust-siphasher-0.3+default-dev,
|
|
||||||
librust-syslog-6+default-dev,
|
librust-syslog-6+default-dev,
|
||||||
librust-tar-0.4+default-dev,
|
librust-tar-0.4+default-dev,
|
||||||
librust-termcolor-1+default-dev (>= 1.1.2-~~),
|
librust-termcolor-1+default-dev (>= 1.1.2-~~),
|
||||||
@ -133,12 +138,14 @@ Build-Depends: bash-completion,
|
|||||||
librust-tokio-util-0.7+default-dev,
|
librust-tokio-util-0.7+default-dev,
|
||||||
librust-tokio-util-0.7+io-dev,
|
librust-tokio-util-0.7+io-dev,
|
||||||
librust-tower-service-0.3+default-dev,
|
librust-tower-service-0.3+default-dev,
|
||||||
|
librust-tracing-0.1+default-dev,
|
||||||
librust-udev-0.4+default-dev,
|
librust-udev-0.4+default-dev,
|
||||||
librust-url-2+default-dev (>= 2.1-~~),
|
librust-url-2+default-dev (>= 2.1-~~),
|
||||||
librust-walkdir-2+default-dev,
|
librust-walkdir-2+default-dev,
|
||||||
librust-xdg-2+default-dev (>= 2.2-~~),
|
librust-xdg-2+default-dev (>= 2.2-~~),
|
||||||
librust-zstd-0.12+bindgen-dev,
|
librust-zstd-0.12+bindgen-dev,
|
||||||
librust-zstd-0.12+default-dev,
|
librust-zstd-0.12+default-dev,
|
||||||
|
librust-zstd-safe-6+default-dev,
|
||||||
libsgutils2-dev,
|
libsgutils2-dev,
|
||||||
libstd-rust-dev,
|
libstd-rust-dev,
|
||||||
libsystemd-dev (>= 246-~~),
|
libsystemd-dev (>= 246-~~),
|
||||||
@ -177,7 +184,7 @@ Depends: fonts-font-awesome,
|
|||||||
postfix | mail-transport-agent,
|
postfix | mail-transport-agent,
|
||||||
proxmox-backup-docs,
|
proxmox-backup-docs,
|
||||||
proxmox-mini-journalreader,
|
proxmox-mini-journalreader,
|
||||||
proxmox-widget-toolkit (>= 4.1.4),
|
proxmox-widget-toolkit (>= 4.3.3),
|
||||||
pve-xtermjs (>= 4.7.0-1),
|
pve-xtermjs (>= 4.7.0-1),
|
||||||
sg3-utils,
|
sg3-utils,
|
||||||
smartmontools,
|
smartmontools,
|
||||||
@ -198,6 +205,14 @@ Description: Proxmox Backup Client tools
|
|||||||
This package contains the Proxmox Backup client, which provides a
|
This package contains the Proxmox Backup client, which provides a
|
||||||
simple command line tool to create and restore backups.
|
simple command line tool to create and restore backups.
|
||||||
|
|
||||||
|
Package: proxmox-backup-client-static
|
||||||
|
Architecture: any
|
||||||
|
Depends: qrencode, ${misc:Depends},
|
||||||
|
Conflicts: proxmox-backup-client,
|
||||||
|
Description: Proxmox Backup Client tools (statically linked)
|
||||||
|
This package contains the Proxmox Backup client, which provides a
|
||||||
|
simple command line tool to create and restore backups.
|
||||||
|
|
||||||
Package: proxmox-backup-docs
|
Package: proxmox-backup-docs
|
||||||
Build-Profiles: <!nodoc>
|
Build-Profiles: <!nodoc>
|
||||||
Section: doc
|
Section: doc
|
||||||
|
2
debian/copyright
vendored
@ -1,4 +1,4 @@
|
|||||||
Copyright (C) 2019 - 2024 Proxmox Server Solutions GmbH
|
Copyright (C) 2019 - 2025 Proxmox Server Solutions GmbH
|
||||||
|
|
||||||
This software is written by Proxmox Server Solutions GmbH <support@proxmox.com>
|
This software is written by Proxmox Server Solutions GmbH <support@proxmox.com>
|
||||||
|
|
||||||
|
15
debian/postinst
vendored
@ -20,15 +20,7 @@ case "$1" in
|
|||||||
# modeled after dh_systemd_start output
|
# modeled after dh_systemd_start output
|
||||||
systemctl --system daemon-reload >/dev/null || true
|
systemctl --system daemon-reload >/dev/null || true
|
||||||
if [ -n "$2" ]; then
|
if [ -n "$2" ]; then
|
||||||
if dpkg --compare-versions "$2" 'lt' '1.0.7-1'; then
|
_dh_action=try-reload-or-restart
|
||||||
# there was an issue with reloading and systemd being confused in older daemon versions
|
|
||||||
# so restart instead of reload if upgrading from there, see commit 0ec79339f7aebf9
|
|
||||||
# FIXME: remove with PBS 2.1
|
|
||||||
echo "Upgrading from older proxmox-backup-server: restart (not reload) daemons"
|
|
||||||
_dh_action=try-restart
|
|
||||||
else
|
|
||||||
_dh_action=try-reload-or-restart
|
|
||||||
fi
|
|
||||||
else
|
else
|
||||||
_dh_action=start
|
_dh_action=start
|
||||||
fi
|
fi
|
||||||
@ -80,6 +72,11 @@ EOF
|
|||||||
update_sync_job "$prev_job"
|
update_sync_job "$prev_job"
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
if dpkg --compare-versions "$2" 'lt' '3.3.5~'; then
|
||||||
|
# ensure old locking is used by the daemon until a reboot happened
|
||||||
|
touch "/run/proxmox-backup/old-locking"
|
||||||
|
fi
|
||||||
fi
|
fi
|
||||||
;;
|
;;
|
||||||
|
|
||||||
|
2
debian/proxmox-backup-client-static.bash-completion
vendored
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
debian/proxmox-backup-client.bc proxmox-backup-client
|
||||||
|
debian/pxar.bc pxar
|
4
debian/proxmox-backup-client-static.install
vendored
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
usr/share/man/man1/proxmox-backup-client.1
|
||||||
|
usr/share/man/man1/pxar.1
|
||||||
|
usr/share/zsh/vendor-completions/_proxmox-backup-client
|
||||||
|
usr/share/zsh/vendor-completions/_pxar
|
2
debian/proxmox-backup-file-restore.postinst
vendored
@ -9,7 +9,7 @@ update_initramfs() {
|
|||||||
CACHE_PATH_DBG="/var/cache/proxmox-backup/file-restore-initramfs-debug.img"
|
CACHE_PATH_DBG="/var/cache/proxmox-backup/file-restore-initramfs-debug.img"
|
||||||
|
|
||||||
# cleanup first, in case proxmox-file-restore was uninstalled since we do
|
# cleanup first, in case proxmox-file-restore was uninstalled since we do
|
||||||
# not want an unuseable image lying around
|
# not want an unusable image lying around
|
||||||
rm -f "$CACHE_PATH"
|
rm -f "$CACHE_PATH"
|
||||||
|
|
||||||
if [ ! -f "$INST_PATH/initramfs.img" ]; then
|
if [ ! -f "$INST_PATH/initramfs.img" ]; then
|
||||||
|
24
debian/proxmox-backup-server.install
vendored
@ -4,6 +4,7 @@ etc/proxmox-backup-daily-update.service /lib/systemd/system/
|
|||||||
etc/proxmox-backup-daily-update.timer /lib/systemd/system/
|
etc/proxmox-backup-daily-update.timer /lib/systemd/system/
|
||||||
etc/proxmox-backup-proxy.service /lib/systemd/system/
|
etc/proxmox-backup-proxy.service /lib/systemd/system/
|
||||||
etc/proxmox-backup.service /lib/systemd/system/
|
etc/proxmox-backup.service /lib/systemd/system/
|
||||||
|
etc/removable-device-attach@.service /lib/systemd/system/
|
||||||
usr/bin/pmt
|
usr/bin/pmt
|
||||||
usr/bin/pmtx
|
usr/bin/pmtx
|
||||||
usr/bin/proxmox-tape
|
usr/bin/proxmox-tape
|
||||||
@ -30,34 +31,31 @@ usr/share/man/man5/acl.cfg.5
|
|||||||
usr/share/man/man5/datastore.cfg.5
|
usr/share/man/man5/datastore.cfg.5
|
||||||
usr/share/man/man5/domains.cfg.5
|
usr/share/man/man5/domains.cfg.5
|
||||||
usr/share/man/man5/media-pool.cfg.5
|
usr/share/man/man5/media-pool.cfg.5
|
||||||
usr/share/man/man5/notifications.cfg.5
|
|
||||||
usr/share/man/man5/notifications-priv.cfg.5
|
usr/share/man/man5/notifications-priv.cfg.5
|
||||||
|
usr/share/man/man5/notifications.cfg.5
|
||||||
|
usr/share/man/man5/proxmox-backup.node.cfg.5
|
||||||
|
usr/share/man/man5/prune.cfg.5
|
||||||
usr/share/man/man5/remote.cfg.5
|
usr/share/man/man5/remote.cfg.5
|
||||||
usr/share/man/man5/sync.cfg.5
|
usr/share/man/man5/sync.cfg.5
|
||||||
usr/share/man/man5/tape-job.cfg.5
|
usr/share/man/man5/tape-job.cfg.5
|
||||||
usr/share/man/man5/tape.cfg.5
|
usr/share/man/man5/tape.cfg.5
|
||||||
usr/share/man/man5/user.cfg.5
|
usr/share/man/man5/user.cfg.5
|
||||||
usr/share/man/man5/verification.cfg.5
|
usr/share/man/man5/verification.cfg.5
|
||||||
usr/share/zsh/vendor-completions/_pmt
|
|
||||||
usr/share/zsh/vendor-completions/_pmtx
|
|
||||||
usr/share/zsh/vendor-completions/_proxmox-backup-debug
|
|
||||||
usr/share/zsh/vendor-completions/_proxmox-backup-manager
|
|
||||||
usr/share/zsh/vendor-completions/_proxmox-tape
|
|
||||||
usr/share/proxmox-backup/templates/default/acme-err-body.txt.hbs
|
usr/share/proxmox-backup/templates/default/acme-err-body.txt.hbs
|
||||||
usr/share/proxmox-backup/templates/default/acme-err-subject.txt.hbs
|
usr/share/proxmox-backup/templates/default/acme-err-subject.txt.hbs
|
||||||
usr/share/proxmox-backup/templates/default/gc-err-body.txt.hbs
|
usr/share/proxmox-backup/templates/default/gc-err-body.txt.hbs
|
||||||
usr/share/proxmox-backup/templates/default/gc-ok-body.txt.hbs
|
|
||||||
usr/share/proxmox-backup/templates/default/gc-err-subject.txt.hbs
|
usr/share/proxmox-backup/templates/default/gc-err-subject.txt.hbs
|
||||||
|
usr/share/proxmox-backup/templates/default/gc-ok-body.txt.hbs
|
||||||
usr/share/proxmox-backup/templates/default/gc-ok-subject.txt.hbs
|
usr/share/proxmox-backup/templates/default/gc-ok-subject.txt.hbs
|
||||||
usr/share/proxmox-backup/templates/default/package-updates-body.txt.hbs
|
usr/share/proxmox-backup/templates/default/package-updates-body.txt.hbs
|
||||||
usr/share/proxmox-backup/templates/default/package-updates-subject.txt.hbs
|
usr/share/proxmox-backup/templates/default/package-updates-subject.txt.hbs
|
||||||
usr/share/proxmox-backup/templates/default/prune-err-body.txt.hbs
|
usr/share/proxmox-backup/templates/default/prune-err-body.txt.hbs
|
||||||
usr/share/proxmox-backup/templates/default/prune-ok-body.txt.hbs
|
|
||||||
usr/share/proxmox-backup/templates/default/prune-err-subject.txt.hbs
|
usr/share/proxmox-backup/templates/default/prune-err-subject.txt.hbs
|
||||||
|
usr/share/proxmox-backup/templates/default/prune-ok-body.txt.hbs
|
||||||
usr/share/proxmox-backup/templates/default/prune-ok-subject.txt.hbs
|
usr/share/proxmox-backup/templates/default/prune-ok-subject.txt.hbs
|
||||||
usr/share/proxmox-backup/templates/default/sync-err-body.txt.hbs
|
usr/share/proxmox-backup/templates/default/sync-err-body.txt.hbs
|
||||||
usr/share/proxmox-backup/templates/default/sync-ok-body.txt.hbs
|
|
||||||
usr/share/proxmox-backup/templates/default/sync-err-subject.txt.hbs
|
usr/share/proxmox-backup/templates/default/sync-err-subject.txt.hbs
|
||||||
|
usr/share/proxmox-backup/templates/default/sync-ok-body.txt.hbs
|
||||||
usr/share/proxmox-backup/templates/default/sync-ok-subject.txt.hbs
|
usr/share/proxmox-backup/templates/default/sync-ok-subject.txt.hbs
|
||||||
usr/share/proxmox-backup/templates/default/tape-backup-err-body.txt.hbs
|
usr/share/proxmox-backup/templates/default/tape-backup-err-body.txt.hbs
|
||||||
usr/share/proxmox-backup/templates/default/tape-backup-err-subject.txt.hbs
|
usr/share/proxmox-backup/templates/default/tape-backup-err-subject.txt.hbs
|
||||||
@ -66,9 +64,13 @@ usr/share/proxmox-backup/templates/default/tape-backup-ok-subject.txt.hbs
|
|||||||
usr/share/proxmox-backup/templates/default/tape-load-body.txt.hbs
|
usr/share/proxmox-backup/templates/default/tape-load-body.txt.hbs
|
||||||
usr/share/proxmox-backup/templates/default/tape-load-subject.txt.hbs
|
usr/share/proxmox-backup/templates/default/tape-load-subject.txt.hbs
|
||||||
usr/share/proxmox-backup/templates/default/test-body.txt.hbs
|
usr/share/proxmox-backup/templates/default/test-body.txt.hbs
|
||||||
usr/share/proxmox-backup/templates/default/test-body.html.hbs
|
|
||||||
usr/share/proxmox-backup/templates/default/test-subject.txt.hbs
|
usr/share/proxmox-backup/templates/default/test-subject.txt.hbs
|
||||||
usr/share/proxmox-backup/templates/default/verify-err-body.txt.hbs
|
usr/share/proxmox-backup/templates/default/verify-err-body.txt.hbs
|
||||||
usr/share/proxmox-backup/templates/default/verify-ok-body.txt.hbs
|
|
||||||
usr/share/proxmox-backup/templates/default/verify-err-subject.txt.hbs
|
usr/share/proxmox-backup/templates/default/verify-err-subject.txt.hbs
|
||||||
|
usr/share/proxmox-backup/templates/default/verify-ok-body.txt.hbs
|
||||||
usr/share/proxmox-backup/templates/default/verify-ok-subject.txt.hbs
|
usr/share/proxmox-backup/templates/default/verify-ok-subject.txt.hbs
|
||||||
|
usr/share/zsh/vendor-completions/_pmt
|
||||||
|
usr/share/zsh/vendor-completions/_pmtx
|
||||||
|
usr/share/zsh/vendor-completions/_proxmox-backup-debug
|
||||||
|
usr/share/zsh/vendor-completions/_proxmox-backup-manager
|
||||||
|
usr/share/zsh/vendor-completions/_proxmox-tape
|
||||||
|
3
debian/proxmox-backup-server.udev
vendored
@ -16,3 +16,6 @@ SUBSYSTEM=="scsi_generic", SUBSYSTEMS=="scsi", ATTRS{type}=="1", ENV{ID_SCSI_SER
|
|||||||
SYMLINK+="tape/by-id/scsi-$env{ID_SCSI_SERIAL}-sg"
|
SYMLINK+="tape/by-id/scsi-$env{ID_SCSI_SERIAL}-sg"
|
||||||
|
|
||||||
LABEL="persistent_storage_tape_end"
|
LABEL="persistent_storage_tape_end"
|
||||||
|
|
||||||
|
# triggers the mounting of a removable device
|
||||||
|
ACTION=="add", SUBSYSTEM=="block", ENV{ID_FS_UUID}!="", TAG+="systemd", ENV{SYSTEMD_WANTS}="removable-device-attach@$env{ID_FS_UUID}"
|
8
debian/rules
vendored
@ -28,6 +28,11 @@ override_dh_auto_configure:
|
|||||||
@perl -ne 'if (/^version\s*=\s*"(\d+(?:\.\d+)+)"/) { my $$v_cargo = $$1; my $$v_deb = "$(DEB_VERSION_UPSTREAM)"; \
|
@perl -ne 'if (/^version\s*=\s*"(\d+(?:\.\d+)+)"/) { my $$v_cargo = $$1; my $$v_deb = "$(DEB_VERSION_UPSTREAM)"; \
|
||||||
die "ERROR: d/changelog <-> Cargo.toml version mismatch: $$v_cargo != $$v_deb\n" if $$v_cargo ne $$v_deb; exit(0); }' Cargo.toml
|
die "ERROR: d/changelog <-> Cargo.toml version mismatch: $$v_cargo != $$v_deb\n" if $$v_cargo ne $$v_deb; exit(0); }' Cargo.toml
|
||||||
$(CARGO) prepare-debian $(CURDIR)/debian/cargo_registry --link-from-system
|
$(CARGO) prepare-debian $(CURDIR)/debian/cargo_registry --link-from-system
|
||||||
|
# `cargo build` and `cargo install` have different config precedence, symlink
|
||||||
|
# the wrapper config into a place where `build` picks it up as well..
|
||||||
|
# https://doc.rust-lang.org/cargo/commands/cargo-install.html#configuration-discovery
|
||||||
|
mkdir -p .cargo
|
||||||
|
ln -s $(CARGO_HOME)/config.toml $(CURDIR)/.cargo/config.toml
|
||||||
dh_auto_configure
|
dh_auto_configure
|
||||||
|
|
||||||
override_dh_auto_build:
|
override_dh_auto_build:
|
||||||
@ -42,6 +47,9 @@ override_dh_auto_install:
|
|||||||
dh_auto_install -- \
|
dh_auto_install -- \
|
||||||
PROXY_USER=backup \
|
PROXY_USER=backup \
|
||||||
LIBDIR=/usr/lib/$(DEB_HOST_MULTIARCH)
|
LIBDIR=/usr/lib/$(DEB_HOST_MULTIARCH)
|
||||||
|
mkdir -p debian/proxmox-backup-client-static/usr/bin
|
||||||
|
mv debian/tmp/usr/bin/proxmox-backup-client-static debian/proxmox-backup-client-static/usr/bin/proxmox-backup-client
|
||||||
|
mv debian/tmp/usr/bin/pxar-static debian/proxmox-backup-client-static/usr/bin/pxar
|
||||||
|
|
||||||
override_dh_installsystemd:
|
override_dh_installsystemd:
|
||||||
dh_installsystemd -pproxmox-backup-server proxmox-backup-daily-update.timer
|
dh_installsystemd -pproxmox-backup-server proxmox-backup-daily-update.timer
|
||||||
|
@ -1,59 +1,65 @@
|
|||||||
include ../defines.mk
|
include ../defines.mk
|
||||||
|
|
||||||
GENERATED_SYNOPSIS := \
|
GENERATED_SYNOPSIS := \
|
||||||
proxmox-tape/synopsis.rst \
|
|
||||||
proxmox-backup-client/synopsis.rst \
|
|
||||||
proxmox-backup-client/catalog-shell-synopsis.rst \
|
|
||||||
proxmox-backup-manager/synopsis.rst \
|
|
||||||
proxmox-backup-debug/synopsis.rst \
|
|
||||||
proxmox-file-restore/synopsis.rst \
|
|
||||||
pxar/synopsis.rst \
|
|
||||||
pmtx/synopsis.rst \
|
|
||||||
pmt/synopsis.rst \
|
|
||||||
config/media-pool/config.rst \
|
|
||||||
config/notifications/config.rst \
|
|
||||||
config/notifications-priv/config.rst \
|
|
||||||
config/tape/config.rst \
|
|
||||||
config/tape-job/config.rst \
|
|
||||||
config/user/config.rst \
|
|
||||||
config/remote/config.rst \
|
|
||||||
config/sync/config.rst \
|
|
||||||
config/verification/config.rst \
|
|
||||||
config/acl/roles.rst \
|
config/acl/roles.rst \
|
||||||
config/datastore/config.rst \
|
config/datastore/config.rst \
|
||||||
config/domains/config.rst
|
config/domains/config.rst \
|
||||||
|
config/media-pool/config.rst \
|
||||||
|
config/notifications-priv/config.rst \
|
||||||
|
config/notifications/config.rst \
|
||||||
|
config/remote/config.rst \
|
||||||
|
config/sync/config.rst \
|
||||||
|
config/tape-job/config.rst \
|
||||||
|
config/tape/config.rst \
|
||||||
|
config/user/config.rst \
|
||||||
|
config/verification/config.rst \
|
||||||
|
config/prune/config.rst \
|
||||||
|
pmt/synopsis.rst \
|
||||||
|
pmtx/synopsis.rst \
|
||||||
|
proxmox-backup-client/catalog-shell-synopsis.rst \
|
||||||
|
proxmox-backup-client/synopsis.rst \
|
||||||
|
proxmox-backup-debug/synopsis.rst \
|
||||||
|
proxmox-backup-manager/synopsis.rst \
|
||||||
|
proxmox-file-restore/synopsis.rst \
|
||||||
|
proxmox-tape/synopsis.rst \
|
||||||
|
pxar/synopsis.rst \
|
||||||
|
|
||||||
MAN1_PAGES := \
|
MAN1_PAGES := \
|
||||||
pxar.1 \
|
|
||||||
pmtx.1 \
|
|
||||||
pmt.1 \
|
|
||||||
proxmox-tape.1 \
|
|
||||||
proxmox-backup-proxy.1 \
|
|
||||||
proxmox-backup-client.1 \
|
|
||||||
proxmox-backup-manager.1 \
|
|
||||||
proxmox-file-restore.1 \
|
|
||||||
proxmox-backup-debug.1 \
|
|
||||||
pbs2to3.1 \
|
pbs2to3.1 \
|
||||||
|
pmt.1 \
|
||||||
|
pmtx.1 \
|
||||||
|
proxmox-backup-client.1 \
|
||||||
|
proxmox-backup-debug.1 \
|
||||||
|
proxmox-backup-manager.1 \
|
||||||
|
proxmox-backup-proxy.1 \
|
||||||
|
proxmox-file-restore.1 \
|
||||||
|
proxmox-tape.1 \
|
||||||
|
pxar.1 \
|
||||||
|
|
||||||
|
# FIXME: prefix all man pages that are not directly relating to an existing executable with
|
||||||
|
# `proxmox-backup.`, like the newer added proxmox-backup.node.cfg but add backwards compatible
|
||||||
|
# symlinks, e.g. with a "5pbs" man page "suffix section".
|
||||||
MAN5_PAGES := \
|
MAN5_PAGES := \
|
||||||
media-pool.cfg.5 \
|
|
||||||
tape.cfg.5 \
|
|
||||||
tape-job.cfg.5 \
|
|
||||||
acl.cfg.5 \
|
acl.cfg.5 \
|
||||||
user.cfg.5 \
|
|
||||||
remote.cfg.5 \
|
|
||||||
sync.cfg.5 \
|
|
||||||
verification.cfg.5 \
|
|
||||||
datastore.cfg.5 \
|
datastore.cfg.5 \
|
||||||
domains.cfg.5 \
|
domains.cfg.5 \
|
||||||
notifications.cfg.5 \
|
media-pool.cfg.5 \
|
||||||
|
proxmox-backup.node.cfg.5 \
|
||||||
notifications-priv.cfg.5 \
|
notifications-priv.cfg.5 \
|
||||||
|
notifications.cfg.5 \
|
||||||
|
remote.cfg.5 \
|
||||||
|
sync.cfg.5 \
|
||||||
|
tape-job.cfg.5 \
|
||||||
|
tape.cfg.5 \
|
||||||
|
user.cfg.5 \
|
||||||
|
verification.cfg.5 \
|
||||||
|
prune.cfg.5 \
|
||||||
|
|
||||||
PRUNE_SIMULATOR_FILES := \
|
PRUNE_SIMULATOR_FILES := \
|
||||||
prune-simulator/index.html \
|
prune-simulator/index.html \
|
||||||
prune-simulator/documentation.html \
|
|
||||||
prune-simulator/clear-trigger.png \
|
prune-simulator/clear-trigger.png \
|
||||||
prune-simulator/prune-simulator.js
|
prune-simulator/documentation.html \
|
||||||
|
prune-simulator/prune-simulator.js \
|
||||||
|
|
||||||
PRUNE_SIMULATOR_JS_SOURCE := \
|
PRUNE_SIMULATOR_JS_SOURCE := \
|
||||||
/usr/share/javascript/proxmox-widget-toolkit-dev/Toolkit.js \
|
/usr/share/javascript/proxmox-widget-toolkit-dev/Toolkit.js \
|
||||||
@ -85,7 +91,7 @@ API_VIEWER_FILES := \
|
|||||||
/usr/share/javascript/proxmox-widget-toolkit-dev/APIViewer.js \
|
/usr/share/javascript/proxmox-widget-toolkit-dev/APIViewer.js \
|
||||||
|
|
||||||
# Sphinx documentation setup
|
# Sphinx documentation setup
|
||||||
SPHINXOPTS =
|
SPHINXOPTS = -E
|
||||||
SPHINXBUILD = sphinx-build
|
SPHINXBUILD = sphinx-build
|
||||||
BUILDDIR = output
|
BUILDDIR = output
|
||||||
|
|
||||||
|
@ -1,3 +1,5 @@
|
|||||||
|
.. _client_usage:
|
||||||
|
|
||||||
Backup Client Usage
|
Backup Client Usage
|
||||||
===================
|
===================
|
||||||
|
|
||||||
@ -44,6 +46,24 @@ user\@pbs!token@host:store ``user@pbs!token`` host:8007 store
|
|||||||
[ff80::51]:1234:mydatastore ``root@pam`` [ff80::51]:1234 mydatastore
|
[ff80::51]:1234:mydatastore ``root@pam`` [ff80::51]:1234 mydatastore
|
||||||
================================ ================== ================== ===========
|
================================ ================== ================== ===========
|
||||||
|
|
||||||
|
.. _statically_linked_client:
|
||||||
|
|
||||||
|
Statically Linked Backup Client
|
||||||
|
-------------------------------
|
||||||
|
|
||||||
|
A statically linked version of the Proxmox Backup client is available for Linux
|
||||||
|
based systems where the regular client is not available. Please note that it is
|
||||||
|
recommended to use the regular client when possible, as the statically linked
|
||||||
|
client is not a full replacement. For example, name resolution will not be
|
||||||
|
performed via the mechanisms provided by libc, but uses a resolver written
|
||||||
|
purely in the Rust programming language. Therefore, features and modules
|
||||||
|
provided by Name Service Switch cannot be used.
|
||||||
|
|
||||||
|
The statically linked client is available via the ``pbs-client`` repository as
|
||||||
|
described in the :ref:`installation <install_pbc>` section.
|
||||||
|
|
||||||
|
.. _environment-variables:
|
||||||
|
|
||||||
Environment Variables
|
Environment Variables
|
||||||
---------------------
|
---------------------
|
||||||
|
|
||||||
@ -89,6 +109,43 @@ Environment Variables
|
|||||||
you can add arbitrary comments after the first newline.
|
you can add arbitrary comments after the first newline.
|
||||||
|
|
||||||
|
|
||||||
|
System and Service Credentials
|
||||||
|
------------------------------
|
||||||
|
|
||||||
|
Some of the :ref:`environment variables <environment-variables>` above can be
|
||||||
|
set using `system and service credentials <https://systemd.io/CREDENTIALS/>`_
|
||||||
|
instead.
|
||||||
|
|
||||||
|
============================ ==============================================
|
||||||
|
Environment Variable Credential Name Equivalent
|
||||||
|
============================ ==============================================
|
||||||
|
``PBS_REPOSITORY`` ``proxmox-backup-client.repository``
|
||||||
|
``PBS_PASSWORD`` ``proxmox-backup-client.password``
|
||||||
|
``PBS_ENCRYPTION_PASSWORD`` ``proxmox-backup-client.encryption-password``
|
||||||
|
``PBS_FINGERPRINT`` ``proxmox-backup-client.fingerprint``
|
||||||
|
============================ ==============================================
|
||||||
|
|
||||||
|
For example, the repository password can be stored in an encrypted file as
|
||||||
|
follows:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# systemd-ask-password -n | systemd-creds encrypt --name=proxmox-backup-client.password - my-api-token.cred
|
||||||
|
|
||||||
|
The credential can then be reused inside of unit files or in a transient scope
|
||||||
|
unit as follows:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# systemd-run --pipe --wait \
|
||||||
|
--property=LoadCredentialEncrypted=proxmox-backup-client.password:/full/path/to/my-api-token.cred \
|
||||||
|
--property=SetCredential=proxmox-backup-client.repository:'my_default_repository' \
|
||||||
|
proxmox-backup-client ...
|
||||||
|
|
||||||
|
Additionally, system credentials (e.g. passed down from the hypervisor to a
|
||||||
|
virtual machine via SMBIOS type 11) can be loaded on a service via
|
||||||
|
`LoadCredential=` as described in the manual page ``systemd.exec(5)``.
|
||||||
|
|
||||||
Output Format
|
Output Format
|
||||||
-------------
|
-------------
|
||||||
|
|
||||||
@ -169,6 +226,7 @@ the client. The format is:
|
|||||||
|
|
||||||
<archive-name>.<type>:<source-path>
|
<archive-name>.<type>:<source-path>
|
||||||
|
|
||||||
|
The ``archive-name`` must contain alphanumerics, hyphens and underscores only.
|
||||||
Common types are ``.pxar`` for file archives and ``.img`` for block
|
Common types are ``.pxar`` for file archives and ``.img`` for block
|
||||||
device images. To create a backup of a block device, run the following command:
|
device images. To create a backup of a block device, run the following command:
|
||||||
|
|
||||||
@ -272,13 +330,13 @@ parameter. For example:
|
|||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
# proxmox-backup-client backup.pxar:./linux --exclude /usr
|
# proxmox-backup-client backup archive-name.pxar:./linux --exclude /usr
|
||||||
|
|
||||||
Multiple paths can be excluded like this:
|
Multiple paths can be excluded like this:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
# proxmox-backup-client backup.pxar:./linux --exclude=/usr --exclude=/rust
|
# proxmox-backup-client backup archive-name.pxar:./linux --exclude=/usr --exclude=/rust
|
||||||
|
|
||||||
.. _client_change_detection_mode:
|
.. _client_change_detection_mode:
|
||||||
|
|
||||||
@ -295,30 +353,22 @@ therefore deduplicated). If the backed up files are largely unchanged,
|
|||||||
re-reading and then detecting the corresponding chunks don't need to be uploaded
|
re-reading and then detecting the corresponding chunks don't need to be uploaded
|
||||||
after all is time consuming and undesired.
|
after all is time consuming and undesired.
|
||||||
|
|
||||||
The backup client's `change-detection-mode` can be switched from default to
|
The backup client's ``change-detection-mode`` can be switched from default to
|
||||||
`metadata` based detection to reduce limitations as described above, instructing
|
``metadata`` based detection to reduce limitations as described above,
|
||||||
the client to avoid re-reading files with unchanged metadata whenever possible.
|
instructing the client to avoid re-reading files with unchanged metadata
|
||||||
|
whenever possible.
|
||||||
When using this mode, instead of the regular pxar archive, the backup snapshot
|
When using this mode, instead of the regular pxar archive, the backup snapshot
|
||||||
is stored into two separate files: the `mpxar` containing the archive's metadata
|
is stored into two separate files: the ``mpxar`` containing the archive's
|
||||||
and the `ppxar` containing a concatenation of the file contents. This splitting
|
metadata and the ``ppxar`` containing a concatenation of the file contents. This
|
||||||
allows for efficient metadata lookups.
|
splitting allows for efficient metadata lookups. When creating the backup
|
||||||
|
archives, the current file metadata is compared to the one looked up in the
|
||||||
|
previous ``mpxar`` archive. The operational details are explained more in depth
|
||||||
|
in the :ref:`technical documentation <change-detection-mode-metadata>`.
|
||||||
|
|
||||||
Using the `change-detection-mode` set to `data` allows to create the same split
|
Using the ``change-detection-mode`` set to ``data`` allows to create the same
|
||||||
archive as when using the `metadata` mode, but without using a previous
|
split archive as when using the ``metadata`` mode, but without using a previous
|
||||||
reference and therefore reencoding all file payloads.
|
reference and therefore reencoding all file payloads. For details of this mode
|
||||||
When creating the backup archives, the current file metadata is compared to the
|
please see the :ref:`technical documentation <change-detection-mode-data>`.
|
||||||
one looked up in the previous `mpxar` archive.
|
|
||||||
The metadata comparison includes file size, file type, ownership and permission
|
|
||||||
information, as well as acls and attributes and most importantly the file's
|
|
||||||
mtime, for details see the
|
|
||||||
:ref:`pxar metadata archive format <pxar-meta-format>`.
|
|
||||||
|
|
||||||
If unchanged, the entry is cached for possible re-use of content chunks without
|
|
||||||
re-reading, by indexing the already present chunks containing the contents from
|
|
||||||
the previous backup snapshot. Since the file might only partially re-use chunks
|
|
||||||
(thereby introducing wasted space in the form of padding), the decision whether
|
|
||||||
to re-use or re-encode the currently cached entries is postponed to when enough
|
|
||||||
information is available, comparing the possible padding to a threshold value.
|
|
||||||
|
|
||||||
.. _client_change_detection_mode_table:
|
.. _client_change_detection_mode_table:
|
||||||
|
|
||||||
@ -337,7 +387,7 @@ mode:
|
|||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
# proxmox-backup-client backup.pxar:./linux --change-detection-mode=metadata
|
# proxmox-backup-client backup archive-name.pxar:./linux --change-detection-mode=metadata
|
||||||
|
|
||||||
.. _client_encryption:
|
.. _client_encryption:
|
||||||
|
|
||||||
@ -478,6 +528,8 @@ version of your master key. The following command sends the output of the
|
|||||||
proxmox-backup-client key paperkey --output-format text > qrkey.txt
|
proxmox-backup-client key paperkey --output-format text > qrkey.txt
|
||||||
|
|
||||||
|
|
||||||
|
.. _client_restoring_data:
|
||||||
|
|
||||||
Restoring Data
|
Restoring Data
|
||||||
--------------
|
--------------
|
||||||
|
|
||||||
@ -789,29 +841,25 @@ Garbage Collection
|
|||||||
------------------
|
------------------
|
||||||
|
|
||||||
The ``prune`` command removes only the backup index files, not the data
|
The ``prune`` command removes only the backup index files, not the data
|
||||||
from the datastore. This task is left to the garbage collection
|
from the datastore. Deletion of unused backup data from the datastore is done by
|
||||||
command. It is recommended to carry out garbage collection on a regular basis.
|
:ref:`garbage collection<_maintenance_gc>`. It is therefore recommended to
|
||||||
|
schedule garbage collection tasks on a regular basis. The working principle of
|
||||||
|
garbage collection is described in more details in the related :ref:`background
|
||||||
|
section <gc_background>`.
|
||||||
|
|
||||||
The garbage collection works in two phases. In the first phase, all
|
To start garbage collection from the client side, run the following command:
|
||||||
data blocks that are still in use are marked. In the second phase,
|
|
||||||
unused data blocks are removed.
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-client garbage-collect
|
||||||
|
|
||||||
.. note:: This command needs to read all existing backup index files
|
.. note:: This command needs to read all existing backup index files
|
||||||
and touches the complete chunk-store. This can take a long time
|
and touches the complete chunk-store. This can take a long time
|
||||||
depending on the number of chunks and the speed of the underlying
|
depending on the number of chunks and the speed of the underlying
|
||||||
disks.
|
disks.
|
||||||
|
|
||||||
.. note:: The garbage collection will only remove chunks that haven't been used
|
The progress of the garbage collection will be displayed as shown in the example
|
||||||
for at least one day (exactly 24h 5m). This grace period is necessary because
|
below:
|
||||||
chunks in use are marked by touching the chunk which updates the ``atime``
|
|
||||||
(access time) property. Filesystems are mounted with the ``relatime`` option
|
|
||||||
by default. This results in a better performance by only updating the
|
|
||||||
``atime`` property if the last access has been at least 24 hours ago. The
|
|
||||||
downside is that touching a chunk within these 24 hours will not always
|
|
||||||
update its ``atime`` property.
|
|
||||||
|
|
||||||
Chunks in the grace period will be logged at the end of the garbage
|
|
||||||
collection task as *Pending removals*.
|
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
|
@ -44,10 +44,8 @@ web-interface/API or using the ``proxmox-backup-manager`` CLI tool.
|
|||||||
Upload Custom Certificate
|
Upload Custom Certificate
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
If you already have a certificate which you want to use for a Proxmox
|
If you already have a certificate which you want to use for a `Proxmox Backup`_
|
||||||
Mail Gateway host, you can simply upload that certificate over the web
|
host, you can simply upload that certificate over the web interface.
|
||||||
interface.
|
|
||||||
|
|
||||||
|
|
||||||
.. image:: images/screenshots/pbs-gui-certs-upload-custom.png
|
.. image:: images/screenshots/pbs-gui-certs-upload-custom.png
|
||||||
:target: _images/pbs-gui-certs-upload-custom.png
|
:target: _images/pbs-gui-certs-upload-custom.png
|
||||||
|
@ -71,7 +71,7 @@ master_doc = 'index'
|
|||||||
|
|
||||||
# General information about the project.
|
# General information about the project.
|
||||||
project = 'Proxmox Backup'
|
project = 'Proxmox Backup'
|
||||||
copyright = '2019-2023, Proxmox Server Solutions GmbH'
|
copyright = '2019-2025, Proxmox Server Solutions GmbH'
|
||||||
author = 'Proxmox Support Team'
|
author = 'Proxmox Support Team'
|
||||||
|
|
||||||
# The version info for the project you're documenting acts as a replacement for
|
# The version info for the project you're documenting acts as a replacement for
|
||||||
@ -108,12 +108,14 @@ man_pages = [
|
|||||||
('config/datastore/man5', 'datastore.cfg', 'Datastore Configuration', [author], 5),
|
('config/datastore/man5', 'datastore.cfg', 'Datastore Configuration', [author], 5),
|
||||||
('config/domains/man5', 'domains.cfg', 'Realm Configuration', [author], 5),
|
('config/domains/man5', 'domains.cfg', 'Realm Configuration', [author], 5),
|
||||||
('config/media-pool/man5', 'media-pool.cfg', 'Media Pool Configuration', [author], 5),
|
('config/media-pool/man5', 'media-pool.cfg', 'Media Pool Configuration', [author], 5),
|
||||||
|
('config/node/man5', 'proxmox-backup.node.cfg', 'Proxmox Backup Server - Node Configuration', [author], 5),
|
||||||
('config/remote/man5', 'remote.cfg', 'Remote Server Configuration', [author], 5),
|
('config/remote/man5', 'remote.cfg', 'Remote Server Configuration', [author], 5),
|
||||||
('config/sync/man5', 'sync.cfg', 'Synchronization Job Configuration', [author], 5),
|
('config/sync/man5', 'sync.cfg', 'Synchronization Job Configuration', [author], 5),
|
||||||
('config/tape-job/man5', 'tape-job.cfg', 'Tape Job Configuration', [author], 5),
|
('config/tape-job/man5', 'tape-job.cfg', 'Tape Job Configuration', [author], 5),
|
||||||
('config/tape/man5', 'tape.cfg', 'Tape Drive and Changer Configuration', [author], 5),
|
('config/tape/man5', 'tape.cfg', 'Tape Drive and Changer Configuration', [author], 5),
|
||||||
('config/user/man5', 'user.cfg', 'User Configuration', [author], 5),
|
('config/user/man5', 'user.cfg', 'User Configuration', [author], 5),
|
||||||
('config/verification/man5', 'verification.cfg', 'Verification Job Configuration', [author], 5),
|
('config/verification/man5', 'verification.cfg', 'Verification Job Configuration', [author], 5),
|
||||||
|
('config/prune/man5', 'prune.cfg', 'Prune Job Configuration', [author], 5),
|
||||||
('config/notifications/man5', 'notifications.cfg', 'Notification target/matcher configuration', [author], 5),
|
('config/notifications/man5', 'notifications.cfg', 'Notification target/matcher configuration', [author], 5),
|
||||||
('config/notifications-priv/man5', 'notifications-priv.cfg', 'Notification target secrets', [author], 5),
|
('config/notifications-priv/man5', 'notifications-priv.cfg', 'Notification target secrets', [author], 5),
|
||||||
]
|
]
|
||||||
|
49
docs/config/node/format.rst
Normal file
@ -0,0 +1,49 @@
|
|||||||
|
The file contains these options:
|
||||||
|
|
||||||
|
:acme: The ACME account to use on this node.
|
||||||
|
|
||||||
|
:acmedomain0: ACME domain.
|
||||||
|
|
||||||
|
:acmedomain1: ACME domain.
|
||||||
|
|
||||||
|
:acmedomain2: ACME domain.
|
||||||
|
|
||||||
|
:acmedomain3: ACME domain.
|
||||||
|
|
||||||
|
:acmedomain4: ACME domain.
|
||||||
|
|
||||||
|
:http-proxy: Set proxy for apt and subscription checks.
|
||||||
|
|
||||||
|
:email-from: Fallback email from which notifications will be sent.
|
||||||
|
|
||||||
|
:ciphers-tls-1.3: List of TLS ciphers for TLS 1.3 that will be used by the proxy. Colon-separated and in descending priority (https://docs.openssl.org/master/man1/openssl-ciphers/). (Proxy has to be restarted for changes to take effect.)
|
||||||
|
|
||||||
|
:ciphers-tls-1.2: List of TLS ciphers for TLS <= 1.2 that will be used by the proxy. Colon-separated and in descending priority (https://docs.openssl.org/master/man1/openssl-ciphers/). (Proxy has to be restarted for changes to take effect.)
|
||||||
|
|
||||||
|
:default-lang: Default language used in the GUI.
|
||||||
|
|
||||||
|
:description: Node description.
|
||||||
|
|
||||||
|
:task-log-max-days: Maximum days to keep task logs.
|
||||||
|
|
||||||
|
For example:
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
|
acme: local
|
||||||
|
acmedomain0: first.domain.com
|
||||||
|
acmedomain1: second.domain.com
|
||||||
|
acmedomain2: third.domain.com
|
||||||
|
acmedomain3: fourth.domain.com
|
||||||
|
acmedomain4: fifth.domain.com
|
||||||
|
http-proxy: internal.proxy.com
|
||||||
|
email-from: proxmox@mail.com
|
||||||
|
ciphers-tls-1.3: TLS_AES_128_GCM_SHA256:TLS_AES_128_CCM_8_SHA256:TLS_CHACHA20_POLY1305_SHA256
|
||||||
|
ciphers-tls-1.2: RSA_WITH_AES_128_CCM:DHE_RSA_WITH_AES_128_CCM
|
||||||
|
default-lang: en
|
||||||
|
description: Primary PBS instance
|
||||||
|
task-log-max-days: 30
|
||||||
|
|
||||||
|
|
||||||
|
You can use the ``proxmox-backup-manager node`` command to manipulate
|
||||||
|
this file.
|
18
docs/config/node/man5.rst
Normal file
@ -0,0 +1,18 @@
|
|||||||
|
:orphan:
|
||||||
|
|
||||||
|
========
|
||||||
|
node.cfg
|
||||||
|
========
|
||||||
|
|
||||||
|
Description
|
||||||
|
===========
|
||||||
|
|
||||||
|
The file /etc/proxmox-backup/node.cfg is a configuration file for Proxmox
|
||||||
|
Backup Server. It contains the general configuration regarding this node.
|
||||||
|
|
||||||
|
Options
|
||||||
|
=======
|
||||||
|
|
||||||
|
.. include:: format.rst
|
||||||
|
|
||||||
|
.. include:: ../../pbs-copyright.rst
|
@ -8,7 +8,7 @@ Description
|
|||||||
===========
|
===========
|
||||||
|
|
||||||
The file /etc/proxmox-backup/notifications-priv.cfg is a configuration file
|
The file /etc/proxmox-backup/notifications-priv.cfg is a configuration file
|
||||||
for Proxmox Backup Server. It contains the configration for the
|
for Proxmox Backup Server. It contains the configuration for the
|
||||||
notification system configuration.
|
notification system configuration.
|
||||||
|
|
||||||
File Format
|
File Format
|
||||||
|
@ -8,7 +8,7 @@ Description
|
|||||||
===========
|
===========
|
||||||
|
|
||||||
The file /etc/proxmox-backup/notifications.cfg is a configuration file
|
The file /etc/proxmox-backup/notifications.cfg is a configuration file
|
||||||
for Proxmox Backup Server. It contains the configration for the
|
for Proxmox Backup Server. It contains the configuration for the
|
||||||
notification system configuration.
|
notification system configuration.
|
||||||
|
|
||||||
File Format
|
File Format
|
||||||
|
14
docs/config/prune/format.rst
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
Each entry starts with the header ``prune: <name>``, followed by the job
|
||||||
|
configuration options.
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
|
prune: prune-store2
|
||||||
|
schedule mon..fri 10:30
|
||||||
|
store my-datastore
|
||||||
|
|
||||||
|
prune: ...
|
||||||
|
|
||||||
|
|
||||||
|
You can use the ``proxmox-backup-manager prune-job`` command to manipulate this
|
||||||
|
file.
|
23
docs/config/prune/man5.rst
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
:orphan:
|
||||||
|
|
||||||
|
=========
|
||||||
|
prune.cfg
|
||||||
|
=========
|
||||||
|
|
||||||
|
Description
|
||||||
|
===========
|
||||||
|
|
||||||
|
The file /etc/proxmox-backup/prune.cfg is a configuration file for Proxmox
|
||||||
|
Backup Server. It contains the prune job configuration.
|
||||||
|
|
||||||
|
File Format
|
||||||
|
===========
|
||||||
|
|
||||||
|
.. include:: format.rst
|
||||||
|
|
||||||
|
Options
|
||||||
|
=======
|
||||||
|
|
||||||
|
.. include:: config.rst
|
||||||
|
|
||||||
|
.. include:: ../../pbs-copyright.rst
|
@ -7,8 +7,8 @@ verification.cfg
|
|||||||
Description
|
Description
|
||||||
===========
|
===========
|
||||||
|
|
||||||
The file /etc/proxmox-backup/sync.cfg is a configuration file for Proxmox
|
The file /etc/proxmox-backup/verification.cfg is a configuration file for
|
||||||
Backup Server. It contains the verification job configuration.
|
Proxmox Backup Server. It contains the verification job configuration.
|
||||||
|
|
||||||
File Format
|
File Format
|
||||||
===========
|
===========
|
||||||
|
@ -67,6 +67,14 @@ Options
|
|||||||
|
|
||||||
.. include:: config/media-pool/config.rst
|
.. include:: config/media-pool/config.rst
|
||||||
|
|
||||||
|
``node.cfg``
|
||||||
|
~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Options
|
||||||
|
^^^^^^^
|
||||||
|
|
||||||
|
.. include:: config/node/format.rst
|
||||||
|
|
||||||
.. _notifications.cfg:
|
.. _notifications.cfg:
|
||||||
|
|
||||||
``notifications.cfg``
|
``notifications.cfg``
|
||||||
@ -100,6 +108,21 @@ Options
|
|||||||
.. include:: config/notifications-priv/config.rst
|
.. include:: config/notifications-priv/config.rst
|
||||||
|
|
||||||
|
|
||||||
|
``prune.cfg``
|
||||||
|
~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
File Format
|
||||||
|
^^^^^^^^^^^
|
||||||
|
|
||||||
|
.. include:: config/prune/format.rst
|
||||||
|
|
||||||
|
|
||||||
|
Options
|
||||||
|
^^^^^^^
|
||||||
|
|
||||||
|
.. include:: config/prune/config.rst
|
||||||
|
|
||||||
|
|
||||||
``tape.cfg``
|
``tape.cfg``
|
||||||
~~~~~~~~~~~~
|
~~~~~~~~~~~~
|
||||||
|
|
||||||
|
55
docs/external-metric-server.rst
Normal file
@ -0,0 +1,55 @@
|
|||||||
|
External Metric Server
|
||||||
|
----------------------
|
||||||
|
|
||||||
|
Proxmox Backup Server periodically sends various metrics about your host's memory,
|
||||||
|
network and disk activity to configured external metric servers.
|
||||||
|
|
||||||
|
Currently supported are:
|
||||||
|
|
||||||
|
* InfluxDB (HTTP) (see https://docs.influxdata.com/influxdb/v2/ )
|
||||||
|
* InfluxDB (UDP) (see https://docs.influxdata.com/influxdb/v1/ )
|
||||||
|
|
||||||
|
The external metric server definitions are saved in
|
||||||
|
'/etc/proxmox-backup/metricserver.cfg', and can be edited through the web
|
||||||
|
interface.
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
|
||||||
|
Using HTTP is recommended as UDP support has been dropped in InfluxDB v2.
|
||||||
|
|
||||||
|
InfluxDB (HTTP) plugin configuration
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
The plugin can be configured to use the HTTP(s) API of InfluxDB 2.x.
|
||||||
|
InfluxDB 1.8.x does contain a forwards compatible API endpoint for this v2 API.
|
||||||
|
|
||||||
|
Since InfluxDB's v2 API is only available with authentication, you have
|
||||||
|
to generate a token that can write into the correct bucket and set it.
|
||||||
|
|
||||||
|
In the v2 compatible API of 1.8.x, you can use 'user:password' as token
|
||||||
|
(if required), and can omit the 'organization' since that has no meaning in InfluxDB 1.x.
|
||||||
|
|
||||||
|
You can also set the maximum batch size (default 25000000 bytes) with the
|
||||||
|
'max-body-size' setting (this corresponds to the InfluxDB setting with the
|
||||||
|
same name).
|
||||||
|
|
||||||
|
InfluxDB (UDP) plugin configuration
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Proxmox Backup Server can also send data via UDP. This requires the InfluxDB
|
||||||
|
server to be configured correctly. The MTU can also be configured here if
|
||||||
|
necessary.
|
||||||
|
|
||||||
|
Here is an example configuration for InfluxDB (on your InfluxDB server):
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
[[udp]]
|
||||||
|
enabled = true
|
||||||
|
bind-address = "0.0.0.0:8089"
|
||||||
|
database = "proxmox"
|
||||||
|
batch-size = 1000
|
||||||
|
batch-timeout = "1s"
|
||||||
|
|
||||||
|
With this configuration, the InfluxDB server listens on all IP addresses on
|
||||||
|
port 8089, and writes the data in the *proxmox* database.
|
10
docs/gui.rst
@ -40,6 +40,16 @@ Proxmox Backup Server supports various languages and authentication back ends
|
|||||||
.. note:: For convenience, you can save the username on the client side, by
|
.. note:: For convenience, you can save the username on the client side, by
|
||||||
selecting the "Save User name" checkbox at the bottom of the window.
|
selecting the "Save User name" checkbox at the bottom of the window.
|
||||||
|
|
||||||
|
.. _consent_banner:
|
||||||
|
|
||||||
|
Consent Banner
|
||||||
|
^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
A custom consent banner that has to be accepted before login can be configured
|
||||||
|
in **Configuration -> Other -> General -> Consent Text**. If there is no
|
||||||
|
content, the consent banner will not be displayed. The text will be stored as a
|
||||||
|
base64 string in the ``/etc/proxmox-backup/node.cfg`` config file.
|
||||||
|
|
||||||
|
|
||||||
GUI Overview
|
GUI Overview
|
||||||
------------
|
------------
|
||||||
|
BIN
docs/images/screenshots/pbs-installer-grub-menu.png
Normal file
After Width: | Height: | Size: 65 KiB |
BIN
docs/images/screenshots/pbs-installer-location.png
Normal file
After Width: | Height: | Size: 143 KiB |
BIN
docs/images/screenshots/pbs-installer-network.png
Normal file
After Width: | Height: | Size: 153 KiB |
BIN
docs/images/screenshots/pbs-installer-password.png
Normal file
After Width: | Height: | Size: 141 KiB |
BIN
docs/images/screenshots/pbs-installer-progress.png
Normal file
After Width: | Height: | Size: 162 KiB |
BIN
docs/images/screenshots/pbs-installer-select-disk.png
Normal file
After Width: | Height: | Size: 164 KiB |
BIN
docs/images/screenshots/pbs-installer-summary.png
Normal file
After Width: | Height: | Size: 139 KiB |
BIN
docs/images/screenshots/pbs-tui-installer.png
Normal file
After Width: | Height: | Size: 4.6 KiB |
157
docs/installation-media.rst
Normal file
@ -0,0 +1,157 @@
|
|||||||
|
.. _installation_medium:
|
||||||
|
|
||||||
|
Installation Medium
|
||||||
|
-------------------
|
||||||
|
|
||||||
|
Proxmox Backup Server can be installed via
|
||||||
|
:ref:`different methods <install_pbs>`. The recommended method is the
|
||||||
|
usage of an installation medium, to simply boot the interactive
|
||||||
|
installer.
|
||||||
|
|
||||||
|
Prepare Installation Medium
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Download the installer ISO image from |DOWNLOADS|.
|
||||||
|
|
||||||
|
The Proxmox Backup Server installation medium is a hybrid ISO image.
|
||||||
|
It works in two ways:
|
||||||
|
|
||||||
|
- An ISO image file ready to burn to a DVD.
|
||||||
|
|
||||||
|
- A raw sector (IMG) image file ready to copy to a USB flash drive (USB stick).
|
||||||
|
|
||||||
|
Using a USB flash drive to install Proxmox Backup Server is the
|
||||||
|
recommended way since it is the faster and more frequently available
|
||||||
|
option these days.
|
||||||
|
|
||||||
|
Prepare a USB Flash Drive as Installation Medium
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
The flash drive needs to have at least 2 GB of storage space.
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
|
||||||
|
Do not use *UNetbootin*. It does not work with the Proxmox Backup
|
||||||
|
Server installation image.
|
||||||
|
|
||||||
|
.. important::
|
||||||
|
|
||||||
|
Existing data on the USB flash drive will be overwritten.
|
||||||
|
Therefore, make sure that it does not contain any still needed data
|
||||||
|
and unmount it afterwards again before proceeding.
|
||||||
|
|
||||||
|
Instructions for GNU/Linux
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
On Unix-like operating systems use the ``dd`` command to copy the ISO
|
||||||
|
image to the USB flash drive. First find the correct device name of the
|
||||||
|
USB flash drive (see below). Then run the ``dd`` command. Depending on
|
||||||
|
your environment, you will need to have root privileges to execute
|
||||||
|
``dd``.
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# dd bs=1M conv=fdatasync if=./proxmox-backup-server_*.iso of=/dev/XYZ
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
|
||||||
|
Be sure to replace ``/dev/XYZ`` with the correct device name and adapt
|
||||||
|
the input filename (*if*) path.
|
||||||
|
|
||||||
|
.. caution::
|
||||||
|
|
||||||
|
Be very careful, and do not overwrite the wrong disk!
|
||||||
|
|
||||||
|
Find the Correct USB Device Name
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
There are two ways to find out the name of the USB flash drive. The
|
||||||
|
first one is to compare the last lines of the ``dmesg`` command output
|
||||||
|
before and after plugging in the flash drive. The second way is to
|
||||||
|
compare the output of the ``lsblk`` command. Open a terminal and run:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# lsblk
|
||||||
|
|
||||||
|
Then plug in your USB flash drive and run the command again:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# lsblk
|
||||||
|
|
||||||
|
A new device will appear. This is the one you want to use. To be on the
|
||||||
|
extra safe side check if the reported size matches your USB flash drive.
|
||||||
|
|
||||||
|
Instructions for macOS
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Open the terminal (query *Terminal* in Spotlight).
|
||||||
|
|
||||||
|
Convert the ``.iso`` file to ``.dmg`` format using the convert option of
|
||||||
|
``hdiutil``, for example:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# hdiutil convert proxmox-backup-server_*.iso -format UDRW -o proxmox-backup-server_*.dmg
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
|
||||||
|
macOS tends to automatically add ``.dmg`` to the output file name.
|
||||||
|
|
||||||
|
To get the current list of devices run the command:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# diskutil list
|
||||||
|
|
||||||
|
Now insert the USB flash drive and run this command again to determine
|
||||||
|
which device node has been assigned to it. (e.g., ``/dev/diskX``).
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# diskutil list
|
||||||
|
# diskutil unmountDisk /dev/diskX
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
|
||||||
|
replace *X* with the disk number from the last command.
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# sudo dd if=proxmox-backup-server_*.dmg bs=1M of=/dev/rdiskX
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
|
||||||
|
*rdiskX*, instead of *diskX*, in the last command is intended. It
|
||||||
|
will increase the write speed.
|
||||||
|
|
||||||
|
Instructions for Windows
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Using Etcher
|
||||||
|
^^^^^^^^^^^^
|
||||||
|
|
||||||
|
Etcher works out of the box. Download Etcher from https://etcher.io. It
|
||||||
|
will guide you through the process of selecting the ISO and your USB
|
||||||
|
flash drive.
|
||||||
|
|
||||||
|
Using Rufus
|
||||||
|
^^^^^^^^^^^
|
||||||
|
|
||||||
|
Rufus is a more lightweight alternative, but you need to use the **DD
|
||||||
|
mode** to make it work. Download Rufus from https://rufus.ie/. Either
|
||||||
|
install it or use the portable version. Select the destination drive
|
||||||
|
and the downloaded Proxmox ISO file.
|
||||||
|
|
||||||
|
.. important::
|
||||||
|
|
||||||
|
Once you click *Start*, you have to click *No* on the dialog asking to
|
||||||
|
download a different version of Grub. In the next dialog select **DD mode**.
|
||||||
|
|
||||||
|
Use the Installation Medium
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Insert the created USB flash drive (or DVD) into your server. Continue
|
||||||
|
by reading the :ref:`installer <using_the_installer>` chapter, which
|
||||||
|
also describes possible boot issues.
|
@ -7,7 +7,9 @@ Debian_ from the provided package repository.
|
|||||||
|
|
||||||
.. include:: system-requirements.rst
|
.. include:: system-requirements.rst
|
||||||
|
|
||||||
.. include:: package-repositories.rst
|
.. include:: installation-media.rst
|
||||||
|
|
||||||
|
.. _install_pbs:
|
||||||
|
|
||||||
Server Installation
|
Server Installation
|
||||||
-------------------
|
-------------------
|
||||||
@ -18,44 +20,37 @@ for various management tasks such as disk management.
|
|||||||
.. note:: You always need a backup server. It is not possible to use
|
.. note:: You always need a backup server. It is not possible to use
|
||||||
Proxmox Backup without the server part.
|
Proxmox Backup without the server part.
|
||||||
|
|
||||||
The disk image (ISO file) provided by Proxmox includes a complete Debian system
|
Using our provided disk image (ISO file) is the recommended
|
||||||
as well as all necessary packages for the Proxmox Backup Server.
|
installation method, as it includes a convenient installer, a complete
|
||||||
|
Debian system as well as all necessary packages for the Proxmox Backup
|
||||||
|
Server.
|
||||||
|
|
||||||
The installer will guide you through the setup process and allow
|
Once you have created an :ref:`installation_medium`, the booted
|
||||||
you to partition the local disk(s), apply basic system configuration
|
:ref:`installer <using_the_installer>` will guide you through the
|
||||||
(for example timezone, language, network), and install all required packages.
|
setup process. It will help you to partition your disks, apply basic
|
||||||
The provided ISO will get you started in just a few minutes, and is the
|
settings such as the language, time zone and network configuration,
|
||||||
recommended method for new and existing users.
|
and finally install all required packages within minutes.
|
||||||
|
|
||||||
Alternatively, Proxmox Backup Server can be installed on top of an
|
As an alternative to the interactive installer, advanced users may
|
||||||
existing Debian system.
|
wish to install Proxmox Backup Server
|
||||||
|
:ref:`unattended <install_pbs_unattended>`.
|
||||||
|
|
||||||
Install `Proxmox Backup`_ Server using the Installer
|
With sufficient Debian knowledge, you can also install Proxmox Backup
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
Server :ref:`on top of Debian <install_pbs_on_debian>` yourself.
|
||||||
|
|
||||||
Download the ISO from |DOWNLOADS|.
|
While not recommended, Proxmox Backup Server could also be installed
|
||||||
It includes the following:
|
:ref:`on Proxmox VE <install_pbs_on_pve>`.
|
||||||
|
|
||||||
* The Proxmox Backup Server installer, which partitions the local
|
.. include:: using-the-installer.rst
|
||||||
disk(s) with ext4, xfs or ZFS, and installs the operating system
|
|
||||||
|
|
||||||
* Complete operating system (Debian Linux, 64-bit)
|
.. _install_pbs_unattended:
|
||||||
|
|
||||||
* Proxmox Linux kernel with ZFS support
|
|
||||||
|
|
||||||
* Complete tool-set to administer backups and all necessary resources
|
|
||||||
|
|
||||||
* Web based management interface
|
|
||||||
|
|
||||||
.. note:: During the installation process, the complete server
|
|
||||||
is used by default and all existing data is removed.
|
|
||||||
|
|
||||||
Install `Proxmox Backup`_ Server Unattended
|
Install `Proxmox Backup`_ Server Unattended
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
It is possible to install {pve} automatically in an unattended manner. This
|
It is possible to install Proxmox Backup Server automatically in an
|
||||||
enables you to fully automate the setup process on bare-metal. Once the
|
unattended manner. This enables you to fully automate the setup process on
|
||||||
installation is complete and the host has booted up, automation tools like
|
bare-metal. Once the installation is complete and the host has booted up,
|
||||||
Ansible can be used to further configure the installation.
|
automation tools like Ansible can be used to further configure the installation.
|
||||||
|
|
||||||
The necessary options for the installer must be provided in an answer file.
|
The necessary options for the installer must be provided in an answer file.
|
||||||
This file allows the use of filter rules to determine which disks and network
|
This file allows the use of filter rules to determine which disks and network
|
||||||
@ -66,6 +61,7 @@ installation ISO. For more details and information on the unattended
|
|||||||
installation see `our wiki
|
installation see `our wiki
|
||||||
<https://pve.proxmox.com/wiki/Automated_Installation>`_.
|
<https://pve.proxmox.com/wiki/Automated_Installation>`_.
|
||||||
|
|
||||||
|
.. _install_pbs_on_debian:
|
||||||
|
|
||||||
Install `Proxmox Backup`_ Server on Debian
|
Install `Proxmox Backup`_ Server on Debian
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
@ -103,6 +99,8 @@ support, and a set of common and useful packages.
|
|||||||
your web browser, using HTTPS on port 8007. For example at
|
your web browser, using HTTPS on port 8007. For example at
|
||||||
``https://<ip-or-dns-name>:8007``
|
``https://<ip-or-dns-name>:8007``
|
||||||
|
|
||||||
|
.. _install_pbs_on_pve:
|
||||||
|
|
||||||
Install Proxmox Backup Server on `Proxmox VE`_
|
Install Proxmox Backup Server on `Proxmox VE`_
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
@ -123,6 +121,8 @@ After configuring the
|
|||||||
your web browser, using HTTPS on port 8007. For example at
|
your web browser, using HTTPS on port 8007. For example at
|
||||||
``https://<ip-or-dns-name>:8007``
|
``https://<ip-or-dns-name>:8007``
|
||||||
|
|
||||||
|
.. _install_pbc:
|
||||||
|
|
||||||
Client Installation
|
Client Installation
|
||||||
-------------------
|
-------------------
|
||||||
|
|
||||||
@ -138,7 +138,26 @@ you need to run:
|
|||||||
# apt update
|
# apt update
|
||||||
# apt install proxmox-backup-client
|
# apt install proxmox-backup-client
|
||||||
|
|
||||||
|
Install Statically Linked Proxmox Backup Client
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
.. note:: The client-only repository should be usable by most recent Debian and
|
Proxmox provides a statically linked build of the Proxmox backup client that
|
||||||
Ubuntu derivatives.
|
should run on any modern x86-64 Linux system.
|
||||||
|
|
||||||
|
It is currently available as a Debian package. After configuring the
|
||||||
|
:ref:`package_repositories_client_only_apt`, you need to run:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# apt update
|
||||||
|
# apt install proxmox-backup-client-static
|
||||||
|
|
||||||
|
This package conflicts with the `proxmox-backup-client` package, as both
|
||||||
|
provide the client as an executable in the `/usr/bin/proxmox-backup-client`
|
||||||
|
path.
|
||||||
|
|
||||||
|
You can copy this executable to other, e.g. non-Debian based Linux systems.
|
||||||
|
|
||||||
|
For details on using the Proxmox Backup Client, see :ref:`client_usage`.
|
||||||
|
|
||||||
|
.. include:: package-repositories.rst
|
||||||
|
@ -264,6 +264,7 @@ systems with more than 256 GiB of total memory, where simply setting
|
|||||||
|
|
||||||
# update-initramfs -u
|
# update-initramfs -u
|
||||||
|
|
||||||
|
.. _zfs_swap:
|
||||||
|
|
||||||
Swap on ZFS
|
Swap on ZFS
|
||||||
^^^^^^^^^^^
|
^^^^^^^^^^^
|
||||||
|
@ -108,7 +108,7 @@ Ext.define('PageCalibration', {
|
|||||||
xtype: 'numberfield',
|
xtype: 'numberfield',
|
||||||
value: 'a4',
|
value: 'a4',
|
||||||
name: 's_x',
|
name: 's_x',
|
||||||
fieldLabel: 'Meassured Start Offset Sx (mm)',
|
fieldLabel: 'Measured Start Offset Sx (mm)',
|
||||||
allowBlank: false,
|
allowBlank: false,
|
||||||
labelWidth: 200,
|
labelWidth: 200,
|
||||||
},
|
},
|
||||||
@ -116,7 +116,7 @@ Ext.define('PageCalibration', {
|
|||||||
xtype: 'numberfield',
|
xtype: 'numberfield',
|
||||||
value: 'a4',
|
value: 'a4',
|
||||||
name: 'd_x',
|
name: 'd_x',
|
||||||
fieldLabel: 'Meassured Length Dx (mm)',
|
fieldLabel: 'Measured Length Dx (mm)',
|
||||||
allowBlank: false,
|
allowBlank: false,
|
||||||
labelWidth: 200,
|
labelWidth: 200,
|
||||||
},
|
},
|
||||||
@ -124,7 +124,7 @@ Ext.define('PageCalibration', {
|
|||||||
xtype: 'numberfield',
|
xtype: 'numberfield',
|
||||||
value: 'a4',
|
value: 'a4',
|
||||||
name: 's_y',
|
name: 's_y',
|
||||||
fieldLabel: 'Meassured Start Offset Sy (mm)',
|
fieldLabel: 'Measured Start Offset Sy (mm)',
|
||||||
allowBlank: false,
|
allowBlank: false,
|
||||||
labelWidth: 200,
|
labelWidth: 200,
|
||||||
},
|
},
|
||||||
@ -132,7 +132,7 @@ Ext.define('PageCalibration', {
|
|||||||
xtype: 'numberfield',
|
xtype: 'numberfield',
|
||||||
value: 'a4',
|
value: 'a4',
|
||||||
name: 'd_y',
|
name: 'd_y',
|
||||||
fieldLabel: 'Meassured Length Dy (mm)',
|
fieldLabel: 'Measured Length Dy (mm)',
|
||||||
allowBlank: false,
|
allowBlank: false,
|
||||||
labelWidth: 200,
|
labelWidth: 200,
|
||||||
},
|
},
|
||||||
|
@ -6,8 +6,34 @@ Maintenance Tasks
|
|||||||
Pruning
|
Pruning
|
||||||
-------
|
-------
|
||||||
|
|
||||||
Prune lets you specify which backup snapshots you want to keep.
|
Prune lets you specify which backup snapshots you want to keep, removing others.
|
||||||
The following retention options are available:
|
When pruning a snapshot, only the snapshot metadata (manifest, indices, blobs,
|
||||||
|
log and notes) is removed. The chunks containing the actual backup data and
|
||||||
|
previously referenced by the pruned snapshot, have to be removed by a garbage
|
||||||
|
collection run.
|
||||||
|
|
||||||
|
.. Caution:: Take into consideration that sensitive information stored in a
|
||||||
|
given data chunk will outlive pruned snapshots and remain present in the
|
||||||
|
datastore as long as referenced by at least one backup snapshot. Further,
|
||||||
|
*even* if no snapshot references a given chunk, it will remain present until
|
||||||
|
removed by the garbage collection.
|
||||||
|
|
||||||
|
Moreover, file-level backups created using the change detection mode
|
||||||
|
``metadata`` can reference backup chunks containing files which have vanished
|
||||||
|
since the previous backup. These files might still be accessible by reading
|
||||||
|
the chunks raw data (client or server side).
|
||||||
|
|
||||||
|
To remove chunks containing sensitive data, prune any snapshot made while the
|
||||||
|
data was part of the backup input and run a garbage collection. Further, if
|
||||||
|
using file-based backups with change detection mode ``metadata``,
|
||||||
|
additionally prune all snapshots since the sensitive data was no longer part
|
||||||
|
of the backup input and run a garbage collection.
|
||||||
|
|
||||||
|
The no longer referenced chunks will then be marked for deletion on the next
|
||||||
|
garbage collection run and removed by a subsequent run after the grace
|
||||||
|
period.
|
||||||
|
|
||||||
|
The following retention options are available for pruning:
|
||||||
|
|
||||||
``keep-last <N>``
|
``keep-last <N>``
|
||||||
Keep the last ``<N>`` backup snapshots.
|
Keep the last ``<N>`` backup snapshots.
|
||||||
@ -171,6 +197,8 @@ It's recommended to setup a schedule to ensure that unused space is cleaned up
|
|||||||
periodically. For most setups a weekly schedule provides a good interval to
|
periodically. For most setups a weekly schedule provides a good interval to
|
||||||
start.
|
start.
|
||||||
|
|
||||||
|
.. _gc_background:
|
||||||
|
|
||||||
GC Background
|
GC Background
|
||||||
^^^^^^^^^^^^^
|
^^^^^^^^^^^^^
|
||||||
|
|
||||||
@ -196,17 +224,31 @@ datastore or interfering with other backups.
|
|||||||
The garbage collection (GC) process is performed per datastore and is split
|
The garbage collection (GC) process is performed per datastore and is split
|
||||||
into two phases:
|
into two phases:
|
||||||
|
|
||||||
- Phase one: Mark
|
- Phase one (Mark):
|
||||||
All index files are read, and the access time of the referred chunk files is
|
|
||||||
updated.
|
|
||||||
|
|
||||||
- Phase two: Sweep
|
All index files are read, and the access time (``atime``) of the referenced
|
||||||
The task iterates over all chunks, checks their file access time, and if it
|
chunk files is updated.
|
||||||
is older than the cutoff time (i.e., the time when GC started, plus some
|
|
||||||
headroom for safety and Linux file system behavior), the task knows that the
|
- Phase two (Sweep):
|
||||||
chunk was neither referred to in any backup index nor part of any currently
|
|
||||||
running backup that has no index to scan for. As such, the chunk can be
|
The task iterates over all chunks and checks their file access time against a
|
||||||
safely deleted.
|
cutoff time. The cutoff time is given by either the oldest backup writer
|
||||||
|
instance, if present, or 24 hours and 5 minutes before the start of the
|
||||||
|
garbage collection.
|
||||||
|
|
||||||
|
Garbage collection considers chunk files with access time older than the
|
||||||
|
cutoff time to be neither referenced by any backup snapshot's index, nor part
|
||||||
|
of any currently running backup job. Therefore, these chunks can safely be
|
||||||
|
deleted.
|
||||||
|
|
||||||
|
Chunks within the grace period will not be deleted and logged at the end of
|
||||||
|
the garbage collection task as *Pending removals*.
|
||||||
|
|
||||||
|
.. note:: The grace period for backup chunk removal is not arbitrary, but stems
|
||||||
|
from the fact that filesystems are typically mounted with the ``relatime``
|
||||||
|
option by default. This results in better performance by only updating the
|
||||||
|
``atime`` property if a file has been modified since the last access or the
|
||||||
|
last access has been at least 24 hours ago.
|
||||||
|
|
||||||
Manually Starting GC
|
Manually Starting GC
|
||||||
^^^^^^^^^^^^^^^^^^^^
|
^^^^^^^^^^^^^^^^^^^^
|
||||||
|
@ -69,6 +69,13 @@ sync-job`` command. The configuration information for sync jobs is stored at
|
|||||||
in the GUI, or use the ``create`` subcommand. After creating a sync job, you can
|
in the GUI, or use the ``create`` subcommand. After creating a sync job, you can
|
||||||
either start it manually from the GUI or provide it with a schedule (see
|
either start it manually from the GUI or provide it with a schedule (see
|
||||||
:ref:`calendar-event-scheduling`) to run regularly.
|
:ref:`calendar-event-scheduling`) to run regularly.
|
||||||
|
Backup snapshots, groups and namespaces which are no longer available on the
|
||||||
|
**Remote** datastore can be removed from the local datastore as well by setting
|
||||||
|
the ``remove-vanished`` option for the sync job.
|
||||||
|
Setting the ``verified-only`` or ``encrypted-only`` flags allows to limit the
|
||||||
|
sync jobs to backup snapshots which have been verified or encrypted,
|
||||||
|
respectively. This is particularly of interest when sending backups to a less
|
||||||
|
trusted remote backup server.
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
@ -132,6 +139,12 @@ For mixing include and exclude filter, following rules apply:
|
|||||||
|
|
||||||
.. note:: The ``protected`` flag of remote backup snapshots will not be synced.
|
.. note:: The ``protected`` flag of remote backup snapshots will not be synced.
|
||||||
|
|
||||||
|
Enabling the advanced option 'resync-corrupt' will re-sync all snapshots that have
|
||||||
|
failed to verify during the last :ref:`maintenance_verification`. Hence, a verification
|
||||||
|
job needs to be run before a sync job with 'resync-corrupt' can be carried out. Be aware
|
||||||
|
that a 'resync-corrupt'-job needs to check the manifests of all snapshots in a datastore
|
||||||
|
and might take much longer than regular sync jobs.
|
||||||
|
|
||||||
Namespace Support
|
Namespace Support
|
||||||
^^^^^^^^^^^^^^^^^
|
^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
@ -218,9 +231,52 @@ Bandwidth Limit
|
|||||||
|
|
||||||
Syncing a datastore to an archive can produce a lot of traffic and impact other
|
Syncing a datastore to an archive can produce a lot of traffic and impact other
|
||||||
users of the network. In order to avoid network or storage congestion, you can
|
users of the network. In order to avoid network or storage congestion, you can
|
||||||
limit the bandwidth of the sync job by setting the ``rate-in`` option either in
|
limit the bandwidth of a sync job in pull direction by setting the ``rate-in``
|
||||||
the web interface or using the ``proxmox-backup-manager`` command-line tool:
|
option either in the web interface or using the ``proxmox-backup-manager``
|
||||||
|
command-line tool:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
# proxmox-backup-manager sync-job update ID --rate-in 20MiB
|
# proxmox-backup-manager sync-job update ID --rate-in 20MiB
|
||||||
|
|
||||||
|
For sync jobs in push direction use the ``rate-out`` option instead.
|
||||||
|
|
||||||
|
Sync Direction Push
|
||||||
|
^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
Sync jobs can be configured for pull or push direction. Sync jobs in push
|
||||||
|
direction are not identical in behaviour because of the limited access to the
|
||||||
|
target datastore via the remote servers API. Most notably, pushed content will
|
||||||
|
always be owned by the user configured in the remote configuration, being
|
||||||
|
independent from the local user as configured in the sync job. Latter is used
|
||||||
|
exclusively for permission check and scope checks on the pushing side.
|
||||||
|
|
||||||
|
.. note:: It is strongly advised to create a dedicated remote configuration for
|
||||||
|
each individual sync job in push direction, using a dedicated user on the
|
||||||
|
remote. Otherwise, sync jobs pushing to the same target might remove each
|
||||||
|
others snapshots and/or groups, if the remove vanished flag is set or skip
|
||||||
|
snapshots if the backup time is not incremental.
|
||||||
|
This is because the backup groups on the target are owned by the user
|
||||||
|
given in the remote configuration.
|
||||||
|
|
||||||
|
The following permissions are required for a sync job in push direction:
|
||||||
|
|
||||||
|
#. ``Remote.Audit`` on ``/remote/{remote}`` and ``Remote.DatastoreBackup`` on
|
||||||
|
``/remote/{remote}/{remote-store}/{remote-ns}`` path or subnamespace.
|
||||||
|
#. At least ``Datastore.Read`` and ``Datastore.Audit`` on the local source
|
||||||
|
datastore namespace (``/datastore/{store}/{ns}``) or ``Datastore.Backup`` if
|
||||||
|
owner of the sync job.
|
||||||
|
#. ``Remote.DatastorePrune`` on ``/remote/{remote}/{remote-store}/{remote-ns}``
|
||||||
|
path to remove vanished snapshots and groups. Make sure to use a dedicated
|
||||||
|
remote for each sync job in push direction as noted above.
|
||||||
|
#. ``Remote.DatastoreModify`` on ``/remote/{remote}/{remote-store}/{remote-ns}``
|
||||||
|
path to remove vanished namespaces. A remote user with limited access should
|
||||||
|
be used on the remote backup server instance. Consider the implications as
|
||||||
|
noted below.
|
||||||
|
|
||||||
|
.. note:: ``Remote.DatastoreModify`` will allow to remove whole namespaces on the
|
||||||
|
remote target datastore, independent of ownership. Make sure the user as
|
||||||
|
configured in remote.cfg has limited permissions on the remote side.
|
||||||
|
|
||||||
|
.. note:: Sync jobs in push direction require namespace support on the remote
|
||||||
|
Proxmox Backup Server instance (minimum version 2.2).
|
||||||
|
@ -7,26 +7,25 @@ Overview
|
|||||||
--------
|
--------
|
||||||
|
|
||||||
* Proxmox Backup Server emits :ref:`notification_events` in case of noteworthy
|
* Proxmox Backup Server emits :ref:`notification_events` in case of noteworthy
|
||||||
events in the system. These events are handled by the notification system.
|
events in the system. These events are handled by the notification system. A
|
||||||
A notification event has metadata, for example a timestamp, a severity level,
|
notification event has metadata, for example a timestamp, a severity level, a
|
||||||
a type and other metadata fields.
|
type and other metadata fields.
|
||||||
* :ref:`notification_matchers` route a notification event to one or more notification
|
* :ref:`notification_matchers` route a notification event to one or more
|
||||||
targets. A matcher can have match rules to selectively route based on the metadata
|
notification targets. A matcher can have match rules to selectively route
|
||||||
of a notification event.
|
based on the metadata of a notification event.
|
||||||
* :ref:`notification_targets` are a destination to which a notification event
|
* :ref:`notification_targets` are a destination to which a notification event
|
||||||
is routed to by a matcher. There are multiple types of target, mail-based
|
is routed to by a matcher. There are multiple types of target, mail-based
|
||||||
(Sendmail and SMTP) and Gotify.
|
(Sendmail and SMTP) and Gotify.
|
||||||
|
|
||||||
Datastores and tape backup jobs have a configurable :ref:`notification_mode`.
|
Datastores and tape backup jobs have a configurable :ref:`notification_mode`.
|
||||||
It allows you to choose between the notification system and a legacy mode
|
It allows you to choose between the notification system and a legacy mode for
|
||||||
for sending notification emails. The legacy mode is equivalent to the
|
sending notification emails. The legacy mode is equivalent to the way
|
||||||
way notifications were handled before Proxmox Backup Server 3.2.
|
notifications were handled before Proxmox Backup Server 3.2.
|
||||||
|
|
||||||
The notification system can be configured in the GUI under
|
The notification system can be configured in the GUI under *Configuration →
|
||||||
*Configuration → Notifications*. The configuration is stored in
|
Notifications*. The configuration is stored in :ref:`notifications.cfg` and
|
||||||
:ref:`notifications.cfg` and :ref:`notifications_priv.cfg` -
|
:ref:`notifications_priv.cfg` - the latter contains sensitive configuration
|
||||||
the latter contains sensitive configuration options such as
|
options such as passwords or authentication tokens for notification targets and
|
||||||
passwords or authentication tokens for notification targets and
|
|
||||||
can only be read by ``root``.
|
can only be read by ``root``.
|
||||||
|
|
||||||
.. _notification_targets:
|
.. _notification_targets:
|
||||||
@ -41,22 +40,23 @@ Proxmox Backup Server offers multiple types of notification targets.
|
|||||||
Sendmail
|
Sendmail
|
||||||
^^^^^^^^
|
^^^^^^^^
|
||||||
The sendmail binary is a program commonly found on Unix-like operating systems
|
The sendmail binary is a program commonly found on Unix-like operating systems
|
||||||
that handles the sending of email messages.
|
that handles the sending of email messages. It is a command-line utility that
|
||||||
It is a command-line utility that allows users and applications to send emails
|
allows users and applications to send emails directly from the command line or
|
||||||
directly from the command line or from within scripts.
|
from within scripts.
|
||||||
|
|
||||||
The sendmail notification target uses the ``sendmail`` binary to send emails to a
|
The sendmail notification target uses the ``sendmail`` binary to send emails to
|
||||||
list of configured users or email addresses. If a user is selected as a recipient,
|
a list of configured users or email addresses. If a user is selected as a
|
||||||
the email address configured in user's settings will be used.
|
recipient, the email address configured in user's settings will be used. For
|
||||||
For the ``root@pam`` user, this is the email address entered during installation.
|
the ``root@pam`` user, this is the email address entered during installation. A
|
||||||
A user's email address can be configured in ``Configuration -> Access Control -> User Management``.
|
user's email address can be configured in ``Configuration → Access Control →
|
||||||
If a user has no associated email address, no email will be sent.
|
User Management``. If a user has no associated email address, no email will be
|
||||||
|
sent.
|
||||||
|
|
||||||
.. NOTE:: In standard Proxmox Backup Server installations, the ``sendmail`` binary is provided by
|
.. NOTE:: In standard Proxmox Backup Server installations, the ``sendmail``
|
||||||
Postfix. It may be necessary to configure Postfix so that it can deliver
|
binary is provided by Postfix. It may be necessary to configure Postfix so
|
||||||
mails correctly - for example by setting an external mail relay (smart host).
|
that it can deliver mails correctly - for example by setting an external
|
||||||
In case of failed delivery, check the system logs for messages logged by
|
mail relay (smart host). In case of failed delivery, check the system logs
|
||||||
the Postfix daemon.
|
for messages logged by the Postfix daemon.
|
||||||
|
|
||||||
See :ref:`notifications.cfg` for all configuration options.
|
See :ref:`notifications.cfg` for all configuration options.
|
||||||
|
|
||||||
@ -64,13 +64,13 @@ See :ref:`notifications.cfg` for all configuration options.
|
|||||||
|
|
||||||
SMTP
|
SMTP
|
||||||
^^^^
|
^^^^
|
||||||
SMTP notification targets can send emails directly to an SMTP mail relay.
|
SMTP notification targets can send emails directly to an SMTP mail relay. This
|
||||||
This target does not use the system's MTA to deliver emails.
|
target does not use the system's MTA to deliver emails. Similar to sendmail
|
||||||
Similar to sendmail targets, if a user is selected as a recipient, the user's configured
|
targets, if a user is selected as a recipient, the user's configured email
|
||||||
email address will be used.
|
address will be used.
|
||||||
|
|
||||||
.. NOTE:: Unlike sendmail targets, SMTP targets do not have any queuing/retry mechanism
|
.. NOTE:: Unlike sendmail targets, SMTP targets do not have any queuing/retry
|
||||||
in case of a failed mail delivery.
|
mechanism in case of a failed mail delivery.
|
||||||
|
|
||||||
See :ref:`notifications.cfg` for all configuration options.
|
See :ref:`notifications.cfg` for all configuration options.
|
||||||
|
|
||||||
@ -78,32 +78,139 @@ See :ref:`notifications.cfg` for all configuration options.
|
|||||||
|
|
||||||
Gotify
|
Gotify
|
||||||
^^^^^^
|
^^^^^^
|
||||||
`Gotify <http://gotify.net>`_ is an open-source self-hosted notification server that
|
`Gotify <http://gotify.net>`_ is an open-source self-hosted notification server
|
||||||
allows you to send push notifications to various devices and
|
that allows you to send push notifications to various devices and applications.
|
||||||
applications. It provides a simple API and web interface, making it easy to
|
It provides a simple API and web interface, making it easy to integrate with
|
||||||
integrate with different platforms and services.
|
different platforms and services.
|
||||||
|
|
||||||
|
.. NOTE:: Gotify targets will respect the HTTP proxy settings from
|
||||||
|
Configuration → Other → HTTP proxy
|
||||||
|
|
||||||
See :ref:`notifications.cfg` for all configuration options.
|
See :ref:`notifications.cfg` for all configuration options.
|
||||||
|
|
||||||
|
.. _notification_targets_webhook:
|
||||||
|
Webhook
|
||||||
|
^^^^^^^
|
||||||
|
Webhook notification targets perform HTTP requests to a configurable URL.
|
||||||
|
|
||||||
|
The following configuration options are available:
|
||||||
|
|
||||||
|
* ``url``: The URL to which to perform the HTTP requests. Supports templating
|
||||||
|
to inject message contents, metadata and secrets.
|
||||||
|
* ``method``: HTTP Method to use (POST/PUT/GET)
|
||||||
|
* ``header``: Array of HTTP headers that should be set for the request.
|
||||||
|
Supports templating to inject message contents, metadata and secrets.
|
||||||
|
* ``body``: HTTP body that should be sent. Supports templating to inject
|
||||||
|
message contents, metadata and secrets.
|
||||||
|
* ``secret``: Array of secret key-value pairs. These will be stored in a
|
||||||
|
protected configuration file only readable by root. Secrets can be
|
||||||
|
accessed in body/header/URL templates via the ``secrets`` namespace.
|
||||||
|
* ``comment``: Comment for this target.
|
||||||
|
|
||||||
|
For configuration options that support templating, the `Handlebars
|
||||||
|
<https://handlebarsjs.com>`_ syntax can be used to access the following
|
||||||
|
properties:
|
||||||
|
|
||||||
|
* ``{{ title }}``: The rendered notification title
|
||||||
|
* ``{{ message }}``: The rendered notification body
|
||||||
|
* ``{{ severity }}``: The severity of the notification (``info``, ``notice``,
|
||||||
|
``warning``, ``error``, ``unknown``)
|
||||||
|
* ``{{ timestamp }}``: The notification's timestamp as a UNIX epoch (in
|
||||||
|
seconds).
|
||||||
|
* ``{{ fields.<name> }}``: Sub-namespace for any metadata fields of the
|
||||||
|
notification. For instance, ``fields.type`` contains the notification
|
||||||
|
type - for all available fields refer to :ref:`notification_events`.
|
||||||
|
* ``{{ secrets.<name> }}``: Sub-namespace for secrets. For instance, a secret
|
||||||
|
named ``token`` is accessible via ``secrets.token``.
|
||||||
|
|
||||||
|
For convenience, the following helpers are available:
|
||||||
|
|
||||||
|
* ``{{ url-encode <value/property> }}``: URL-encode a property/literal.
|
||||||
|
* ``{{ escape <value/property> }}``: Escape any control characters that cannot
|
||||||
|
be safely represented as a JSON string.
|
||||||
|
* ``{{ json <value/property> }}``: Render a value as JSON. This can be useful
|
||||||
|
to pass a whole sub-namespace (e.g. ``fields``) as a part of a JSON payload
|
||||||
|
(e.g. ``{{ json fields }}``).
|
||||||
|
|
||||||
|
|
||||||
|
.. NOTE:: Webhook targets will respect the HTTP proxy settings from
|
||||||
|
Configuration → Other → HTTP proxy
|
||||||
|
|
||||||
|
Example - ntfy.sh
|
||||||
|
"""""""""""""""""
|
||||||
|
|
||||||
|
* Method: ``POST``
|
||||||
|
* URL: ``https://ntfy.sh/{{ secrets.channel }}``
|
||||||
|
* Headers:
|
||||||
|
|
||||||
|
* ``Markdown``: ``Yes``
|
||||||
|
* Body::
|
||||||
|
|
||||||
|
```
|
||||||
|
{{ message }}
|
||||||
|
```
|
||||||
|
|
||||||
|
* Secrets:
|
||||||
|
|
||||||
|
* ``channel``: ``<your ntfy.sh channel>``
|
||||||
|
|
||||||
|
Example - Discord
|
||||||
|
"""""""""""""""""
|
||||||
|
|
||||||
|
* Method: ``POST``
|
||||||
|
* URL: ``https://discord.com/api/webhooks/{{ secrets.token }}``
|
||||||
|
* Headers:
|
||||||
|
|
||||||
|
* ``Content-Type``: ``application/json``
|
||||||
|
|
||||||
|
* Body::
|
||||||
|
|
||||||
|
{
|
||||||
|
"content": "``` {{ escape message }}```"
|
||||||
|
}
|
||||||
|
|
||||||
|
* Secrets:
|
||||||
|
|
||||||
|
* ``token``: ``<token>``
|
||||||
|
|
||||||
|
Example - Slack
|
||||||
|
"""""""""""""""
|
||||||
|
|
||||||
|
* Method: ``POST``
|
||||||
|
* URL: ``https://hooks.slack.com/services/{{ secrets.token }}``
|
||||||
|
* Headers:
|
||||||
|
|
||||||
|
* ``Content-Type``: ``application/json``
|
||||||
|
|
||||||
|
* Body::
|
||||||
|
|
||||||
|
{
|
||||||
|
"text": "``` {{escape message}}```",
|
||||||
|
"type": "mrkdwn"
|
||||||
|
}
|
||||||
|
|
||||||
|
* Secrets:
|
||||||
|
|
||||||
|
* ``token``: ``<token>``
|
||||||
|
|
||||||
.. _notification_matchers:
|
.. _notification_matchers:
|
||||||
|
|
||||||
Notification Matchers
|
Notification Matchers
|
||||||
---------------------
|
---------------------
|
||||||
|
|
||||||
Notification matchers route notifications to notification targets based
|
Notification matchers route notifications to notification targets based on
|
||||||
on their matching rules. These rules can match certain properties of a
|
their matching rules. These rules can match certain properties of a
|
||||||
notification, such as the timestamp (``match-calendar``), the severity of
|
notification, such as the timestamp (``match-calendar``), the severity of the
|
||||||
the notification (``match-severity``) or metadata fields (``match-field``).
|
notification (``match-severity``) or metadata fields (``match-field``). If a
|
||||||
If a notification is matched by a matcher, all targets configured for the
|
notification is matched by a matcher, all targets configured for the matcher
|
||||||
matcher will receive the notification.
|
will receive the notification.
|
||||||
|
|
||||||
An arbitrary number of matchers can be created, each with with their own
|
An arbitrary number of matchers can be created, each with with their own
|
||||||
matching rules and targets to notify.
|
matching rules and targets to notify. Every target is notified at most once for
|
||||||
Every target is notified at most once for every notification, even if
|
every notification, even if the target is used in multiple matchers.
|
||||||
the target is used in multiple matchers.
|
|
||||||
|
|
||||||
A matcher without rules matches any notification; the configured targets
|
A matcher without rules matches any notification; the configured targets will
|
||||||
will always be notified.
|
always be notified.
|
||||||
|
|
||||||
See :ref:`notifications.cfg` for all configuration options.
|
See :ref:`notifications.cfg` for all configuration options.
|
||||||
|
|
||||||
@ -120,20 +227,24 @@ Examples:
|
|||||||
|
|
||||||
Field Matching Rules
|
Field Matching Rules
|
||||||
^^^^^^^^^^^^^^^^^^^^
|
^^^^^^^^^^^^^^^^^^^^
|
||||||
Notifications have a selection of metadata fields that can be matched.
|
Notifications have a selection of metadata fields that can be matched. When
|
||||||
When using ``exact`` as a matching mode, a ``,`` can be used as a separator.
|
using ``exact`` as a matching mode, a ``,`` can be used as a separator. The
|
||||||
The matching rule then matches if the metadata field has **any** of the specified
|
matching rule then matches if the metadata field has **any** of the specified
|
||||||
values.
|
values.
|
||||||
|
|
||||||
Examples:
|
Examples:
|
||||||
|
|
||||||
* ``match-field exact:type=gc`` Only match notifications for garbage collection jobs
|
* ``match-field exact:type=gc`` Only match notifications for garbage collection
|
||||||
* ``match-field exact:type=prune,verify`` Match prune job and verification job notifications.
|
jobs
|
||||||
* ``match-field regex:datastore=^backup-.*$`` Match any datastore starting with ``backup``.
|
* ``match-field exact:type=prune,verify`` Match prune job and verification job
|
||||||
|
notifications.
|
||||||
|
* ``match-field regex:datastore=^backup-.*$`` Match any datastore starting with
|
||||||
|
``backup``.
|
||||||
|
|
||||||
If a notification does not have the matched field, the rule will **not** match.
|
If a notification does not have the matched field, the rule will **not** match.
|
||||||
For instance, a ``match-field regex:datastore=.*`` directive will match any notification that has
|
For instance, a ``match-field regex:datastore=.*`` directive will match any
|
||||||
a ``datastore`` metadata field, but will not match if the field does not exist.
|
notification that has a ``datastore`` metadata field, but will not match if the
|
||||||
|
field does not exist.
|
||||||
|
|
||||||
Severity Matching Rules
|
Severity Matching Rules
|
||||||
^^^^^^^^^^^^^^^^^^^^^^^
|
^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
@ -152,9 +263,9 @@ The following severities are in use:
|
|||||||
Notification Events
|
Notification Events
|
||||||
-------------------
|
-------------------
|
||||||
|
|
||||||
The following table contains a list of all notification events in Proxmox Backup server, their
|
The following table contains a list of all notification events in Proxmox
|
||||||
type, severity and additional metadata fields. ``type`` as well as any other metadata field
|
Backup server, their type, severity and additional metadata fields. ``type`` as
|
||||||
may be used in ``match-field`` match rules.
|
well as any other metadata field may be used in ``match-field`` match rules.
|
||||||
|
|
||||||
================================ ==================== ========== ==============================================================
|
================================ ==================== ========== ==============================================================
|
||||||
Event ``type`` Severity Metadata fields (in addition to ``type``)
|
Event ``type`` Severity Metadata fields (in addition to ``type``)
|
||||||
@ -174,8 +285,8 @@ Verification job failure ``verification`` ``error`` ``datastore``,
|
|||||||
Verification job success ``verification`` ``info`` ``datastore``, ``hostname``, ``job-id``
|
Verification job success ``verification`` ``info`` ``datastore``, ``hostname``, ``job-id``
|
||||||
================================ ==================== ========== ==============================================================
|
================================ ==================== ========== ==============================================================
|
||||||
|
|
||||||
The following table contains a description of all use metadata fields. All of these
|
The following table contains a description of all use metadata fields. All of
|
||||||
can be used in ``match-field`` match rules.
|
these can be used in ``match-field`` match rules.
|
||||||
|
|
||||||
==================== ===================================
|
==================== ===================================
|
||||||
Metadata field Description
|
Metadata field Description
|
||||||
@ -192,45 +303,45 @@ Metadata field Description
|
|||||||
|
|
||||||
System Mail Forwarding
|
System Mail Forwarding
|
||||||
----------------------
|
----------------------
|
||||||
Certain local system daemons, such as ``smartd``, send notification emails
|
Certain local system daemons, such as ``smartd``, send notification emails to
|
||||||
to the local ``root`` user. Proxmox Backup Server will feed these mails
|
the local ``root`` user. Proxmox Backup Server will feed these mails into the
|
||||||
into the notification system as a notification of type ``system-mail``
|
notification system as a notification of type ``system-mail`` and with severity
|
||||||
and with severity ``unknown``.
|
``unknown``.
|
||||||
|
|
||||||
When the email is forwarded to a sendmail target, the mail's content and headers
|
When the email is forwarded to a sendmail target, the mail's content and
|
||||||
are forwarded as-is. For all other targets,
|
headers are forwarded as-is. For all other targets, the system tries to extract
|
||||||
the system tries to extract both a subject line and the main text body
|
both a subject line and the main text body from the email content. In instances
|
||||||
from the email content. In instances where emails solely consist of HTML
|
where emails solely consist of HTML content, they will be transformed into
|
||||||
content, they will be transformed into plain text format during this process.
|
plain text format during this process.
|
||||||
|
|
||||||
Permissions
|
Permissions
|
||||||
-----------
|
-----------
|
||||||
In order to modify/view the configuration for notification targets,
|
In order to modify/view the configuration for notification targets, the
|
||||||
the ``Sys.Modify/Sys.Audit`` permissions are required for the
|
``Sys.Modify/Sys.Audit`` permissions are required for the
|
||||||
``/system/notifications`` ACL node.
|
``/system/notifications`` ACL node.
|
||||||
|
|
||||||
.. _notification_mode:
|
.. _notification_mode:
|
||||||
|
|
||||||
Notification Mode
|
Notification Mode
|
||||||
-----------------
|
-----------------
|
||||||
Datastores and tape backup/restore job configuration have a ``notification-mode``
|
Datastores and tape backup/restore job configuration have a
|
||||||
option which can have one of two values:
|
``notification-mode`` option which can have one of two values:
|
||||||
|
|
||||||
* ``legacy-sendmail``: Send notification emails via the system's ``sendmail`` command.
|
* ``legacy-sendmail``: Send notification emails via the system's ``sendmail``
|
||||||
The notification system will be bypassed and any configured targets/matchers will be ignored.
|
command. The notification system will be bypassed and any configured
|
||||||
This mode is equivalent to the notification behavior for version before
|
targets/matchers will be ignored. This mode is equivalent to the notification
|
||||||
Proxmox Backup Server 3.2.
|
behavior for version before Proxmox Backup Server 3.2.
|
||||||
|
|
||||||
* ``notification-system``: Use the new, flexible notification system.
|
* ``notification-system``: Use the new, flexible notification system.
|
||||||
|
|
||||||
If the ``notification-mode`` option is not set, Proxmox Backup Server will default
|
If the ``notification-mode`` option is not set, Proxmox Backup Server will
|
||||||
to ``legacy-sendmail``.
|
default to ``legacy-sendmail``.
|
||||||
|
|
||||||
Starting with Proxmox Backup Server 3.2, a datastore created in the UI will
|
Starting with Proxmox Backup Server 3.2, a datastore created in the UI will
|
||||||
automatically opt in to the new notification system. If the datastore is created
|
automatically opt in to the new notification system. If the datastore is
|
||||||
via the API or the ``proxmox-backup-manager`` CLI, the ``notification-mode``
|
created via the API or the ``proxmox-backup-manager`` CLI, the
|
||||||
option has to be set explicitly to ``notification-system`` if the
|
``notification-mode`` option has to be set explicitly to
|
||||||
notification system shall be used.
|
``notification-system`` if the notification system shall be used.
|
||||||
|
|
||||||
The ``legacy-sendmail`` mode might be removed in a later release of
|
The ``legacy-sendmail`` mode might be removed in a later release of
|
||||||
Proxmox Backup Server.
|
Proxmox Backup Server.
|
||||||
@ -239,12 +350,12 @@ Settings for ``legacy-sendmail`` notification mode
|
|||||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
If ``notification-mode`` is set to ``legacy-sendmail``, Proxmox Backup Server
|
If ``notification-mode`` is set to ``legacy-sendmail``, Proxmox Backup Server
|
||||||
will send notification emails via the system's ``sendmail`` command to the email
|
will send notification emails via the system's ``sendmail`` command to the
|
||||||
address configured for the user set in the ``notify-user`` option
|
email address configured for the user set in the ``notify-user`` option
|
||||||
(falling back to ``root@pam`` if not set).
|
(falling back to ``root@pam`` if not set).
|
||||||
|
|
||||||
For datastores, you can also change the level of notifications received per task
|
For datastores, you can also change the level of notifications received per
|
||||||
type via the ``notify`` option.
|
task type via the ``notify`` option.
|
||||||
|
|
||||||
* Always: send a notification for any scheduled task, independent of the
|
* Always: send a notification for any scheduled task, independent of the
|
||||||
outcome
|
outcome
|
||||||
@ -255,3 +366,23 @@ type via the ``notify`` option.
|
|||||||
|
|
||||||
The ``notify-user`` and ``notify`` options are ignored if ``notification-mode``
|
The ``notify-user`` and ``notify`` options are ignored if ``notification-mode``
|
||||||
is set to ``notification-system``.
|
is set to ``notification-system``.
|
||||||
|
|
||||||
|
Overriding Notification Templates
|
||||||
|
---------------------------------
|
||||||
|
|
||||||
|
Proxmox Backup Server uses Handlebars templates to render notifications. The
|
||||||
|
original templates provided by Proxmox Backup Server are stored in
|
||||||
|
``/usr/share/proxmox-backup/templates/default/``.
|
||||||
|
|
||||||
|
Notification templates can be overridden by providing a custom template file in
|
||||||
|
the override directory at
|
||||||
|
``/etc/proxmox-backup/notification-templates/default/``. When rendering a
|
||||||
|
notification of a given type, Proxmox Backup Server will first attempt to load
|
||||||
|
a template from the override directory. If this one does not exist or fails to
|
||||||
|
render, the original template will be used.
|
||||||
|
|
||||||
|
The template files follow the naming convention of
|
||||||
|
``<type>-<body|subject>.txt.hbs``. For instance, the file
|
||||||
|
``gc-err-body.txt.hbs`` contains the template for rendering notifications for
|
||||||
|
garbage collection errors, while ``package-updates-subject.txt.hbs`` is used to
|
||||||
|
render the subject line of notifications for available package updates.
|
||||||
|
@ -149,7 +149,7 @@ Currently there's only a client-repository for APT based systems.
|
|||||||
.. _package_repositories_client_only_apt:
|
.. _package_repositories_client_only_apt:
|
||||||
|
|
||||||
APT-based Proxmox Backup Client Repository
|
APT-based Proxmox Backup Client Repository
|
||||||
++++++++++++++++++++++++++++++++++++++++++
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
For modern Linux distributions using `apt` as package manager, like all Debian
|
For modern Linux distributions using `apt` as package manager, like all Debian
|
||||||
and Ubuntu Derivative do, you may be able to use the APT-based repository.
|
and Ubuntu Derivative do, you may be able to use the APT-based repository.
|
||||||
|
@ -126,7 +126,8 @@ Ext.onReady(function() {
|
|||||||
if (data.mark !== 'keep') {
|
if (data.mark !== 'keep') {
|
||||||
return `<div style="text-decoration: line-through;">${text}</div>`;
|
return `<div style="text-decoration: line-through;">${text}</div>`;
|
||||||
}
|
}
|
||||||
if (me.useColors) {
|
let pruneList = this.up('prunesimulatorPruneList');
|
||||||
|
if (pruneList.useColors) {
|
||||||
let bgColor = COLORS[data.keepName];
|
let bgColor = COLORS[data.keepName];
|
||||||
let textColor = TEXT_COLORS[data.keepName];
|
let textColor = TEXT_COLORS[data.keepName];
|
||||||
return `<div style="background-color: ${bgColor};color: ${textColor};">${text}</div>`;
|
return `<div style="background-color: ${bgColor};color: ${textColor};">${text}</div>`;
|
||||||
@ -353,12 +354,17 @@ Ext.onReady(function() {
|
|||||||
specValues.forEach(function(value) {
|
specValues.forEach(function(value) {
|
||||||
if (value.includes('..')) {
|
if (value.includes('..')) {
|
||||||
let [start, end] = value.split('..');
|
let [start, end] = value.split('..');
|
||||||
|
let step = 1;
|
||||||
|
if (end.includes('/')) {
|
||||||
|
[end, step] = end.split('/');
|
||||||
|
step = assertValid(step);
|
||||||
|
}
|
||||||
start = assertValid(start);
|
start = assertValid(start);
|
||||||
end = assertValid(end);
|
end = assertValid(end);
|
||||||
if (start > end) {
|
if (start > end) {
|
||||||
throw "interval start is bigger then interval end '" + start + " > " + end + "'";
|
throw "interval start is bigger then interval end '" + start + " > " + end + "'";
|
||||||
}
|
}
|
||||||
for (let i = start; i <= end; i++) {
|
for (let i = start; i <= end; i += step) {
|
||||||
matches[i] = 1;
|
matches[i] = 1;
|
||||||
}
|
}
|
||||||
} else if (value.includes('/')) {
|
} else if (value.includes('/')) {
|
||||||
|
@ -165,6 +165,74 @@ following command creates a new datastore called ``store1`` on
|
|||||||
# proxmox-backup-manager datastore create store1 /backup/disk1/store1
|
# proxmox-backup-manager datastore create store1 /backup/disk1/store1
|
||||||
|
|
||||||
|
|
||||||
|
Removable Datastores
|
||||||
|
^^^^^^^^^^^^^^^^^^^^
|
||||||
|
Removable datastores have a ``backing-device`` associated with them, they can be
|
||||||
|
mounted and unmounted. Other than that they behave the same way a normal datastore
|
||||||
|
would.
|
||||||
|
|
||||||
|
They can be created on already correctly formatted partitions, which should be
|
||||||
|
either ``ext4`` or ``xfs`` as with normal datastores, but most modern file
|
||||||
|
systems supported by the Proxmox Linux kernel should work.
|
||||||
|
|
||||||
|
.. note:: FAT-based file systems do not support the POSIX file ownership
|
||||||
|
concept and have relatively low limits on the number of files per directory.
|
||||||
|
Therefore, creating a datastore is not supported on FAT file systems.
|
||||||
|
Because some external drives are preformatted with such a FAT-based file
|
||||||
|
system, you may need to reformat the drive before you can use it as a
|
||||||
|
backing-device for a removable datastore.
|
||||||
|
|
||||||
|
It is also possible to create them on completely unused disks through
|
||||||
|
"Administration" > "Disks / Storage" > "Directory", using this method the disk will
|
||||||
|
be partitioned and formatted automatically for the datastore.
|
||||||
|
|
||||||
|
Devices with only one datastore on them will be mounted automatically. Unmounting has
|
||||||
|
to be done through the UI by clicking "Unmount" on the summary page or using the CLI.
|
||||||
|
If unmounting fails, the reason is logged in the unmount task log, and the
|
||||||
|
datastore will stay in maintenance mode ``unmounting``, which prevents any IO
|
||||||
|
operations. In such cases, the maintenance mode has to be reset manually using:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-manager datastore update --maintenance-mode offline
|
||||||
|
|
||||||
|
to prevent any IO, or to clear it use:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-manager datastore update --delete maintenance-mode
|
||||||
|
|
||||||
|
|
||||||
|
A single device can house multiple datastores, they only limitation is that they are not
|
||||||
|
allowed to be nested.
|
||||||
|
|
||||||
|
Removable datastores are created on the the device with the given relative path that is specified
|
||||||
|
on creation. In order to use a datastore on multiple PBS instances, it has to be created on one,
|
||||||
|
and added with ``Reuse existing datastore`` checked on the others. The path you set on creation
|
||||||
|
is how multiple datastores on a single device are identified. So when adding on a new PBS instance,
|
||||||
|
it has to match what was set on creation.
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-manager datastore unmount store1
|
||||||
|
|
||||||
|
both will wait for any running tasks to finish and unmount the device.
|
||||||
|
|
||||||
|
All removable datastores are mounted under /mnt/datastore/<name>, and the specified path
|
||||||
|
refers to the path on the device.
|
||||||
|
|
||||||
|
All datastores present on a device can be listed using ``proxmox-backup-debug``.
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-backup-debug inspect device /dev/...
|
||||||
|
|
||||||
|
|
||||||
|
Verify, Prune and Garbage Collection jobs are skipped if the removable
|
||||||
|
datastore is not mounted when they are scheduled. Sync jobs start, but fail
|
||||||
|
with an error saying the datastore was not mounted. The reason is that syncs
|
||||||
|
not happening as scheduled should at least be noticeable.
|
||||||
|
|
||||||
Managing Datastores
|
Managing Datastores
|
||||||
^^^^^^^^^^^^^^^^^^^
|
^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
@ -367,9 +435,28 @@ There are some tuning related options for the datastore that are more advanced:
|
|||||||
|
|
||||||
This can be set with:
|
This can be set with:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
# proxmox-backup-manager datastore update <storename> --tuning 'sync-level=filesystem'
|
# proxmox-backup-manager datastore update <storename> --tuning 'sync-level=filesystem'
|
||||||
|
|
||||||
|
* ``gc-atime-safety-check``: Datastore GC atime update safety check:
|
||||||
|
You can explicitly `enable` or `disable` the atime update safety check
|
||||||
|
performed on datastore creation and garbage collection. This checks if atime
|
||||||
|
updates are handled as expected by garbage collection and therefore avoids the
|
||||||
|
risk of data loss by unexpected filesystem behavior. It is recommended to set
|
||||||
|
this to enabled, which is also the default value.
|
||||||
|
|
||||||
|
* ``gc-atime-cutoff``: Datastore GC atime cutoff for chunk cleanup:
|
||||||
|
This allows to set the cutoff for which a chunk is still considered in-use
|
||||||
|
during phase 2 of garbage collection (given no older writers). If the
|
||||||
|
``atime`` of the chunk is outside the range, it will be removed.
|
||||||
|
|
||||||
|
* ``gc-cache-capacity``: Datastore GC least recently used cache capacity:
|
||||||
|
Allows to control the cache capacity used to keep track of chunks for which
|
||||||
|
the access time has already been updated during phase 1 of garbage collection.
|
||||||
|
This avoids multiple updates and increases GC runtime performance. Higher
|
||||||
|
values can reduce GC runtime at the cost of increase memory usage, setting the
|
||||||
|
value to 0 disables caching.
|
||||||
|
|
||||||
If you want to set multiple tuning options simultaneously, you can separate them
|
If you want to set multiple tuning options simultaneously, you can separate them
|
||||||
with a comma, like this:
|
with a comma, like this:
|
||||||
|
@ -30,6 +30,8 @@ please refer to the standard Debian documentation.
|
|||||||
|
|
||||||
.. include:: certificate-management.rst
|
.. include:: certificate-management.rst
|
||||||
|
|
||||||
|
.. include:: external-metric-server.rst
|
||||||
|
|
||||||
.. include:: services.rst
|
.. include:: services.rst
|
||||||
|
|
||||||
.. include:: command-line-tools.rst
|
.. include:: command-line-tools.rst
|
||||||
|
@ -6,6 +6,8 @@ production. To further decrease the impact of a failed host, you can set up
|
|||||||
periodic, efficient, incremental :ref:`datastore synchronization <syncjobs>`
|
periodic, efficient, incremental :ref:`datastore synchronization <syncjobs>`
|
||||||
from other Proxmox Backup Server instances.
|
from other Proxmox Backup Server instances.
|
||||||
|
|
||||||
|
.. _minimum_system_requirements:
|
||||||
|
|
||||||
Minimum Server Requirements, for Evaluation
|
Minimum Server Requirements, for Evaluation
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
@ -61,6 +61,7 @@ In general, LTO tapes offer the following advantages:
|
|||||||
Note that `Proxmox Backup Server` already stores compressed data, so using the
|
Note that `Proxmox Backup Server` already stores compressed data, so using the
|
||||||
tape compression feature has no advantage.
|
tape compression feature has no advantage.
|
||||||
|
|
||||||
|
.. _tape-supported-hardware:
|
||||||
|
|
||||||
Supported Hardware
|
Supported Hardware
|
||||||
------------------
|
------------------
|
||||||
@ -969,6 +970,8 @@ You can restore from a tape even without an existing catalog, but only the
|
|||||||
whole media set. If you do this, the catalog will be automatically created.
|
whole media set. If you do this, the catalog will be automatically created.
|
||||||
|
|
||||||
|
|
||||||
|
.. _tape_key_management:
|
||||||
|
|
||||||
Encryption Key Management
|
Encryption Key Management
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
@ -1180,3 +1183,159 @@ In combination with fitting prune settings and tape backup schedules, this
|
|||||||
achieves long-term storage of some backups, while keeping the recent
|
achieves long-term storage of some backups, while keeping the recent
|
||||||
backups on smaller media sets that expire roughly every 4 weeks (that is, three
|
backups on smaller media sets that expire roughly every 4 weeks (that is, three
|
||||||
plus the current week).
|
plus the current week).
|
||||||
|
|
||||||
|
|
||||||
|
Disaster Recovery
|
||||||
|
-----------------
|
||||||
|
|
||||||
|
.. _Command-line Tools: command-line-tools.html
|
||||||
|
|
||||||
|
In case of major disasters, important data, or even whole servers might be
|
||||||
|
destroyed or at least damaged up to the point where everything - sometimes
|
||||||
|
including the backup server - has to be restored from a backup. For such cases,
|
||||||
|
the following step-by-step guide will help you to set up the Proxmox Backup
|
||||||
|
Server and restore everything from tape backups.
|
||||||
|
|
||||||
|
The following guide will explain the necessary steps using both the web GUI and
|
||||||
|
the command line tools. For an overview of the command line tools, see
|
||||||
|
`Command-line Tools`_.
|
||||||
|
|
||||||
|
|
||||||
|
Setting Up a Datastore
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
.. _proxmox-backup-manager: proxmox-backup-manager/man1.html
|
||||||
|
|
||||||
|
.. _Installation: installation.html
|
||||||
|
|
||||||
|
After you set up a new Proxmox Backup Server, as outlined in the `Installation`_
|
||||||
|
chapter, first set up a datastore so a tape can be restored to it:
|
||||||
|
|
||||||
|
#. Go to **Administration -> Storage / Disks** and make sure that the disk that
|
||||||
|
will be used as a datastore shows up.
|
||||||
|
|
||||||
|
#. Under the **Directory** or **ZFS** tabs, you can either choose to create a
|
||||||
|
directory or create a ZFS ``zpool``, respectively. Here you can also directly
|
||||||
|
add the newly created directory or ZFS ``zpool`` as a datastore.
|
||||||
|
|
||||||
|
Alternatively, the `proxmox-backup-manager`_ can be used to perform the same
|
||||||
|
tasks. For more information, check the :ref:`datastore_intro` documentation.
|
||||||
|
|
||||||
|
|
||||||
|
Setting Up the Tape Drive
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
#. Make sure you have a properly working tape drive and/or changer matching to
|
||||||
|
medium you want to restore from.
|
||||||
|
|
||||||
|
#. Connect the tape changer(s) and the tape drive(s) to the backup server. These
|
||||||
|
should be detected automatically by Linux. You can get a list of available
|
||||||
|
drives using:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-tape drive scan
|
||||||
|
┌────────────────────────────────┬────────┬─────────────┬────────┐
|
||||||
|
│ path │ vendor │ model │ serial │
|
||||||
|
╞════════════════════════════════╪════════╪═════════════╪════════╡
|
||||||
|
│ /dev/tape/by-id/scsi-12345-sg │ IBM │ ULT3580-TD4 │ 12345 │
|
||||||
|
└────────────────────────────────┴────────┴─────────────┴────────┘
|
||||||
|
|
||||||
|
You can get a list of available changers with:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# proxmox-tape changer scan
|
||||||
|
┌─────────────────────────────┬─────────┬──────────────┬────────┐
|
||||||
|
│ path │ vendor │ model │ serial │
|
||||||
|
╞═════════════════════════════╪═════════╪══════════════╪════════╡
|
||||||
|
│ /dev/tape/by-id/scsi-CC2C52 │ Quantum │ Superloader3 │ CC2C52 │
|
||||||
|
└─────────────────────────────┴─────────┴──────────────┴────────┘
|
||||||
|
|
||||||
|
For more information, please read the chapters
|
||||||
|
on :ref:`tape_changer_config` and :ref:`tape_drive_config`.
|
||||||
|
|
||||||
|
#. If you have a tape changer, go to the web interface of the Proxmox Backup
|
||||||
|
Server, go to **Tape Backup -> Changers** and add it. For examples using the
|
||||||
|
command line, read the chapter on :ref:`tape_changer_config`. If the changer
|
||||||
|
has been detected correctly by Linux, the changer should show up in the list.
|
||||||
|
|
||||||
|
#. In the web interface, go to **Tape Backup -> Drives** and add the tape drive
|
||||||
|
that will be used to read the tapes. For examples using the command line,
|
||||||
|
read the chapter on :ref:`tape_drive_config`. If the tape drive has been
|
||||||
|
detected correctly by Linux, the drive should show up in the list. If the
|
||||||
|
drive also has a tape changer, make sure to select the changer as well and
|
||||||
|
assign it the correct drive number.
|
||||||
|
|
||||||
|
|
||||||
|
Restoring Data From the Tape
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
.. _proxmox-tape: proxmox-tape/man1.html
|
||||||
|
|
||||||
|
.. _proxmox-backup-client: proxmox-backup-client/man1.html
|
||||||
|
|
||||||
|
.. _Restore: https://pve.proxmox.com/pve-docs/chapter-vzdump.html#vzdump_restore
|
||||||
|
|
||||||
|
The following guide will explain the steps necessary to restore data from a
|
||||||
|
tape, which can be done over either the web GUI or the command line. For details
|
||||||
|
on the command line, read the documentation on the `proxmox-tape`_ tool.
|
||||||
|
|
||||||
|
To restore data from tapes, do the following:
|
||||||
|
|
||||||
|
#. Insert the first tape (as displayed on the label) into the tape drive or, if
|
||||||
|
a tape changer is available, use the tape changer to insert the tape into the
|
||||||
|
right drive. The web GUI can also be used to load or transfer tapes between
|
||||||
|
tape drives by selecting the changer.
|
||||||
|
|
||||||
|
#. If the backup has been encrypted, the encryption keys need to be restored as
|
||||||
|
well. In the **Encryption Keys** tab, press **Restore Key**. For more
|
||||||
|
details or examples that use the command line, read the
|
||||||
|
:ref:`tape_key_management` chapter.
|
||||||
|
|
||||||
|
#. The procedure for restoring data is slightly different depending on whether
|
||||||
|
you are using a standalone tape drive or a changer:
|
||||||
|
|
||||||
|
* For changers, the procedure is simple:
|
||||||
|
|
||||||
|
#. Insert all tapes from the media set you want to restore from.
|
||||||
|
|
||||||
|
#. Click on the changer in the web GUI, click **Inventory**, make sure
|
||||||
|
**Restore Catalog** is selected and press OK.
|
||||||
|
|
||||||
|
* For standalone drives, the procedure would be:
|
||||||
|
|
||||||
|
#. Insert the first tape of the media set.
|
||||||
|
|
||||||
|
#. Click **Catalog**.
|
||||||
|
|
||||||
|
#. Eject the tape, then repeat the steps for the remaining tapes of the
|
||||||
|
media set.
|
||||||
|
|
||||||
|
#. Go back to **Tape Backup**. In the **Content** tab, press **Restore** and
|
||||||
|
select the desired media set. Choose the snapshot you want to restore, press
|
||||||
|
**Next**, select the drive and target datastore and press **Restore**.
|
||||||
|
|
||||||
|
#. By going to the datastore where the data has been restored, under the
|
||||||
|
**Content** tab you should be able to see the restored snapshots. In order to
|
||||||
|
access the backups from another machine, you will need to configure the
|
||||||
|
access to the backup server. Go to **Configuration -> Access Control** and
|
||||||
|
either create a new user, or a new API token (API tokens allow easy
|
||||||
|
revocation if the token is compromised). Under **Permissions**, add the
|
||||||
|
desired permissions, e.g. **DatastoreBackup**.
|
||||||
|
|
||||||
|
#. You can now perform virtual machine, container or file restores. You now have
|
||||||
|
the following options:
|
||||||
|
|
||||||
|
* If you want to restore files on Linux distributions that are not based on
|
||||||
|
Proxmox products or you prefer using a command line tool, you can use the
|
||||||
|
`proxmox-backup-client`_, as explained in the
|
||||||
|
:ref:`client_restoring_data` chapter. Use the newly created API token to
|
||||||
|
be able to access the data. You can then restore individual files or
|
||||||
|
mount an archive to your system.
|
||||||
|
|
||||||
|
* If you want to restore virtual machines or containers on a Proxmox VE
|
||||||
|
server, add the datastore of the backup server as storage and go to
|
||||||
|
**Backups**. Here you can restore VMs and containers, including their
|
||||||
|
configuration. For more information on restoring backups in Proxmox VE,
|
||||||
|
visit the `Restore`_ chapter of the Proxmox VE documentation.
|
||||||
|
@ -56,8 +56,9 @@ The chunks of a datastore are found in
|
|||||||
|
|
||||||
<datastore-root>/.chunks/
|
<datastore-root>/.chunks/
|
||||||
|
|
||||||
This chunk directory is further subdivided by the first four bytes of the
|
This chunk directory is further subdivided into directories grouping chunks by
|
||||||
chunk's checksum, so a chunk with the checksum
|
their checksums 2 byte prefix (given as 4 hexadecimal digits), so a chunk with
|
||||||
|
the checksum
|
||||||
|
|
||||||
a342e8151cbf439ce65f3df696b54c67a114982cc0aa751f2852c2f7acc19a8b
|
a342e8151cbf439ce65f3df696b54c67a114982cc0aa751f2852c2f7acc19a8b
|
||||||
|
|
||||||
@ -133,6 +134,141 @@ This is done to speed up the client part of the backup, since it only needs to
|
|||||||
encrypt chunks that are actually getting uploaded. Chunks that exist already in
|
encrypt chunks that are actually getting uploaded. Chunks that exist already in
|
||||||
the previous backup, do not need to be encrypted and uploaded.
|
the previous backup, do not need to be encrypted and uploaded.
|
||||||
|
|
||||||
|
Change Detection Mode for File-Based Backups
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
The change detection mode controls how to detect and act for files which did not
|
||||||
|
change in-between subsequent backup runs as well as the archive file format used
|
||||||
|
to encode the directory entries.
|
||||||
|
|
||||||
|
There are 3 modes available, the current default ``legacy`` mode, as well as the
|
||||||
|
``data`` and ``metadata`` mode. While the ``legacy`` mode encodes all contents
|
||||||
|
in a single ``pxar`` archive, the latter two modes split data and metadata into
|
||||||
|
``ppxar`` and ``mpxar`` archives. This is done to allow for fast comparison of
|
||||||
|
metadata with the previous snapshot, used by the ``metadata`` mode to detect
|
||||||
|
reusable files. The ``data`` mode refrains from reusing unchanged files by
|
||||||
|
rechunking the file unconditionally. This mode therefore assures that no file
|
||||||
|
changes are missed even if the metadata are unchanged.
|
||||||
|
|
||||||
|
.. NOTE:: ``pxar`` and ``mpxar``/``ppxar`` file formats are different and cannot
|
||||||
|
be deduplicated as efficiently if a datastore stores archive snapshots of
|
||||||
|
both types.
|
||||||
|
|
||||||
|
As the change detection modes are client side changes, they are backwards
|
||||||
|
compatible with older versions of Proxmox Backup Server. Exploring the backup
|
||||||
|
contents for the new archive format via the web interface requires however a
|
||||||
|
Proxmox Backup Server with version 3.2.5 or higher. Upgrading to the latest
|
||||||
|
version is recommended for full feature compatibility.
|
||||||
|
|
||||||
|
.. _change-detection-mode-legacy:
|
||||||
|
|
||||||
|
Legacy Mode
|
||||||
|
+++++++++++
|
||||||
|
|
||||||
|
Backup snapshots of filesystems are created by recursively scanning the
|
||||||
|
directory entries. All entries to be included in the snapshot are read and
|
||||||
|
serialized by encoding them using the ``pxar``
|
||||||
|
:ref:`archive format <pxar-format>`. The resulting stream is chunked into
|
||||||
|
:ref:`dynamically sized chunks <dynamically-sized-chunks>` and uploaded to the
|
||||||
|
Proxmox Backup Server, deduplicating chunks based on their content digest for
|
||||||
|
space efficient storage.
|
||||||
|
File contents are read and chunked unconditionally, no check is performed to
|
||||||
|
detect unchanged files.
|
||||||
|
|
||||||
|
.. _change-detection-mode-data:
|
||||||
|
|
||||||
|
Data Mode
|
||||||
|
+++++++++
|
||||||
|
|
||||||
|
Like for ``legacy`` mode file contents are read and chunked unconditionally, no
|
||||||
|
check is performed to detect unchanged files.
|
||||||
|
|
||||||
|
However, in contrast to ``legacy`` mode, which stores entries metadata and data
|
||||||
|
in a single self-contained ``pxar`` archive, the ``data`` mode encodes metadata
|
||||||
|
and file contents into two separate streams. The resulting backup snapshots
|
||||||
|
therefore contain split archives, an archive in ``mpxar``
|
||||||
|
:ref:`format <pxar-meta-format>` containing the entries metadata and an archive
|
||||||
|
with ``ppxar`` :ref:`format <ppxar-format>` , containing the actual file
|
||||||
|
contents, separated by payload headers for consistency checks. The metadata
|
||||||
|
archive stores a reference offset to the corresponding payload archive entry so
|
||||||
|
the file contents can be accessed. Both of these archives are chunked and
|
||||||
|
uploaded by the Proxmox backup client, resulting in separated indices and
|
||||||
|
independent chunks.
|
||||||
|
|
||||||
|
The ``mpxar`` archive can be used to efficiently fetch the associated metadata
|
||||||
|
for archive entries without the overhead of payload data stored within the same
|
||||||
|
chunks. This is used for example for entry lookups to list the archive contents
|
||||||
|
or to navigate the mounted filesystem via the FUSE implementation. No dedicated
|
||||||
|
catalog is therefore created for archives encoded using this mode.
|
||||||
|
|
||||||
|
By not comparing metadata to the previous backup snapshot, no files will be
|
||||||
|
considered reusable by this mode, in contrast to the ``metadata`` mode.
|
||||||
|
Latter can reuse files which have changed, but file size and mtime did not
|
||||||
|
change because restored after changing the files contents.
|
||||||
|
|
||||||
|
.. _change-detection-mode-metadata:
|
||||||
|
|
||||||
|
Metadata Mode
|
||||||
|
+++++++++++++
|
||||||
|
|
||||||
|
The ``metadata`` mode detects files whose file metadata did not change
|
||||||
|
in-between subsequent backup runs. The metadata comparison includes file size,
|
||||||
|
file type, ownership and permission information, as well as acls and attributes
|
||||||
|
and most importantly the file's mtime, for details see the
|
||||||
|
:ref:`pxar metadata archive format <pxar-meta-format>`. Files ctime and inode
|
||||||
|
number are not stored and used for comparison, since some tools (e.g.
|
||||||
|
``vzdump``) might sync the contents of the filesystem to a temporary location
|
||||||
|
before actually performing the backup via the Proxmox backup client. For these
|
||||||
|
cases, ctime and inode number will always change.
|
||||||
|
|
||||||
|
This mode will avoid reading and rechunking the file contents whenever possible
|
||||||
|
by reusing the file content chunks of unchanged files from the previous backup
|
||||||
|
snapshot.
|
||||||
|
|
||||||
|
To compare the metadata, the previous snapshots ``mpxar`` metadata archive is
|
||||||
|
downloaded at the start of the backup run and used as a reference. Further, the
|
||||||
|
index of the payload archive ``ppxar`` is fetched and used to lookup the file
|
||||||
|
content chunk's digests, which will be used to reindex pre-existing chunks
|
||||||
|
without the need to reread and rechunk the file contents.
|
||||||
|
|
||||||
|
During backup, the metadata and payload archives are encoded in the same manner
|
||||||
|
as for the ``data`` mode, but for the ``metadata`` mode each entry is
|
||||||
|
additionally looked up in the metadata reference archive for comparison first.
|
||||||
|
If the file did not change as compared to the reference, the file is considered
|
||||||
|
as unchanged and the Proxmox backup client enters a look-ahead caching mode. In
|
||||||
|
this mode, the client will keep reading and comparing then following entries in
|
||||||
|
the filesystem as long as they are reusable. Further, it keeps track of the
|
||||||
|
payload archive offset range these file contents are stored in. The additional
|
||||||
|
look-ahead caching is needed, as file boundaries are not required to be aligned
|
||||||
|
with chunk boundaries, therefore reused chunks can contain possibly wasted chunk
|
||||||
|
content (also called padding) if reused unconditionally.
|
||||||
|
|
||||||
|
The look-ahead cache will greedily cache all unchanged entries up to the point
|
||||||
|
where either the cache size limit is reached, a file entry with changed
|
||||||
|
metadata is encountered, or the range of payload chunks considered for reuse is
|
||||||
|
not continuous. An example for the latter is a file which disappeared in-between
|
||||||
|
subsequent backup runs, leaving a hole in the range. At this point, the caching
|
||||||
|
mode is disabled and the client calculates the wasted padding size which would
|
||||||
|
be introduced by reusing the payload chunks for all the unchanged files cached
|
||||||
|
up to this point. If the padding is acceptable (below a preset limit of 10% of
|
||||||
|
the actually reused chunk content), the files are reused by encoding them in the
|
||||||
|
metadata archive using updated offset references to the contents and reindexing
|
||||||
|
the pre-existing chunks in the new ``ppxar`` archive. If however the padding is
|
||||||
|
not acceptable, exceeding the limit, all cached entries are reencoded, not
|
||||||
|
reusing any of the pre-existing data. The metadata as cached will be encoded in
|
||||||
|
the metadata archive, no matter if cached file contents are to be reused or
|
||||||
|
reencoded.
|
||||||
|
|
||||||
|
This combination of look-ahead caching and reuse of pre-existing payload archive
|
||||||
|
chunks for files with unchanged contents therefore speeds up the backup
|
||||||
|
process by avoiding rereading and rechunking file contents whenever possible.
|
||||||
|
|
||||||
|
To reduce paddings and increase chunk reusability, during creation of the
|
||||||
|
archives in ``data`` mode and ``metadata`` mode the pxar encoder signals
|
||||||
|
encountered file boundaries as suggested chunk boundaries to the sliding window
|
||||||
|
chunker. The chunker then decides based on the internal state if the suggested
|
||||||
|
boundary is accepted or disregarded.
|
||||||
|
|
||||||
Caveats and Limitations
|
Caveats and Limitations
|
||||||
-----------------------
|
-----------------------
|
||||||
|
|
||||||
@ -162,8 +298,8 @@ will see that the probability of a collision in that scenario is:
|
|||||||
|
|
||||||
For context, in a lottery game of guessing 6 numbers out of 45, the chance to
|
For context, in a lottery game of guessing 6 numbers out of 45, the chance to
|
||||||
correctly guess all 6 numbers is only :math:`1.2277 * 10^{-7}`. This means the
|
correctly guess all 6 numbers is only :math:`1.2277 * 10^{-7}`. This means the
|
||||||
chance of a collision is about the same as winning 13 such lottery games *in a
|
chance of a collision is lower than winning 8 such lottery games *in a row*:
|
||||||
row*.
|
:math:`(1.2277 * 10^{-7})^{8} = 5.1623 * 10^{-56}`.
|
||||||
|
|
||||||
In conclusion, it is extremely unlikely that such a collision would occur by
|
In conclusion, it is extremely unlikely that such a collision would occur by
|
||||||
accident in a normal datastore.
|
accident in a normal datastore.
|
||||||
@ -183,6 +319,9 @@ read all files again for every backup, otherwise it would not be possible to
|
|||||||
generate a consistent, independent pxar archive where the original chunks can be
|
generate a consistent, independent pxar archive where the original chunks can be
|
||||||
reused. Note that in spite of this, only new or changed chunks will be uploaded.
|
reused. Note that in spite of this, only new or changed chunks will be uploaded.
|
||||||
|
|
||||||
|
In order to avoid these limitations, the Change Detection Mode ``metadata`` was
|
||||||
|
introduced.
|
||||||
|
|
||||||
Verification of Encrypted Chunks
|
Verification of Encrypted Chunks
|
||||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
@ -16,8 +16,8 @@ User Configuration
|
|||||||
choose the realm when you add a new user. Possible realms are:
|
choose the realm when you add a new user. Possible realms are:
|
||||||
|
|
||||||
:pam: Linux PAM standard authentication. Use this if you want to
|
:pam: Linux PAM standard authentication. Use this if you want to
|
||||||
authenticate as a Linux system user (users need to exist on the
|
authenticate as a Linux system user. The users needs to already exist on
|
||||||
system).
|
the host system.
|
||||||
|
|
||||||
:pbs: Proxmox Backup Server realm. This type stores hashed passwords in
|
:pbs: Proxmox Backup Server realm. This type stores hashed passwords in
|
||||||
``/etc/proxmox-backup/shadow.json``.
|
``/etc/proxmox-backup/shadow.json``.
|
||||||
@ -599,6 +599,32 @@ list view in the web UI, or using the command line:
|
|||||||
Authentication Realms
|
Authentication Realms
|
||||||
---------------------
|
---------------------
|
||||||
|
|
||||||
|
.. _user_realms_pam:
|
||||||
|
|
||||||
|
Linux PAM
|
||||||
|
~~~~~~~~~
|
||||||
|
|
||||||
|
Linux PAM is a framework for system-wide user authentication. These users are
|
||||||
|
created on the host system with commands such as ``adduser``.
|
||||||
|
|
||||||
|
If PAM users exist on the host system, corresponding entries can be added to
|
||||||
|
Proxmox Backup Server, to allow these users to log in via their system username
|
||||||
|
and password.
|
||||||
|
|
||||||
|
.. _user_realms_pbs:
|
||||||
|
|
||||||
|
Proxmox Backup authentication server
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
This is a Unix-like password store, which stores hashed passwords in
|
||||||
|
``/etc/proxmox-backup/shadow.json``. Passwords are hashed using the SHA-256
|
||||||
|
hashing algorithm.
|
||||||
|
|
||||||
|
This is the most convenient realm for small-scale (or even mid-scale)
|
||||||
|
installations, where users do not need access to anything outside of Proxmox
|
||||||
|
Backup Server. In this case, users are fully managed by Proxmox Backup Server
|
||||||
|
and are able to change their own passwords via the GUI.
|
||||||
|
|
||||||
.. _user_realms_ldap:
|
.. _user_realms_ldap:
|
||||||
|
|
||||||
LDAP
|
LDAP
|
||||||
@ -663,7 +689,7 @@ address must be specified. Most options from :ref:`user_realms_ldap` apply to
|
|||||||
Active Directory as well, most importantly the bind credentials ``bind-dn``
|
Active Directory as well, most importantly the bind credentials ``bind-dn``
|
||||||
and ``password``. This is typically required by default for Microsoft Active
|
and ``password``. This is typically required by default for Microsoft Active
|
||||||
Directory. The ``bind-dn`` can be specified either in AD-specific
|
Directory. The ``bind-dn`` can be specified either in AD-specific
|
||||||
``user@company.net`` syntax or the commen LDAP-DN syntax.
|
``user@company.net`` syntax or the common LDAP-DN syntax.
|
||||||
|
|
||||||
The authentication domain name must only be specified if anonymous bind is
|
The authentication domain name must only be specified if anonymous bind is
|
||||||
requested. If bind credentials are given, the domain name is automatically
|
requested. If bind credentials are given, the domain name is automatically
|
||||||
|
346
docs/using-the-installer.rst
Normal file
@ -0,0 +1,346 @@
|
|||||||
|
.. _using_the_installer:
|
||||||
|
|
||||||
|
Install `Proxmox Backup`_ Server using the Installer
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Download the ISO from |DOWNLOADS|.
|
||||||
|
It includes the following:
|
||||||
|
|
||||||
|
* The Proxmox Backup Server installer, which partitions the local
|
||||||
|
disk(s) with ext4, xfs or ZFS, and installs the operating system
|
||||||
|
|
||||||
|
* Complete operating system (Debian Linux, 64-bit)
|
||||||
|
|
||||||
|
* Proxmox Linux kernel with ZFS support
|
||||||
|
|
||||||
|
* Complete toolset to administer backups and all necessary resources
|
||||||
|
|
||||||
|
* Web-based management interface
|
||||||
|
|
||||||
|
.. note:: Any existing data on the selected drives will be overwritten
|
||||||
|
during the installation process. The installer does not add boot
|
||||||
|
menu entries for other operating systems.
|
||||||
|
|
||||||
|
Please insert the :ref:`installation_medium` (for example, USB flash
|
||||||
|
drive or DVD) and boot from it.
|
||||||
|
|
||||||
|
.. note:: You may need to go into your server's firmware settings, to
|
||||||
|
enable booting from your installation medium (for example, USB) and
|
||||||
|
set the desired boot order. When booting an installer prior to
|
||||||
|
`Proxmox Backup`_ Server version 3.1, Secure Boot needs to be
|
||||||
|
disabled.
|
||||||
|
|
||||||
|
.. image:: images/screenshots/pbs-installer-grub-menu.png
|
||||||
|
:target: _images/pbs-installer-grub-menu.png
|
||||||
|
:align: right
|
||||||
|
:alt: Proxmox Backup Server Installer GRUB Menu
|
||||||
|
|
||||||
|
After choosing the correct entry (for example, *Boot from USB*) the
|
||||||
|
Proxmox Backup Server menu will be displayed, and one of the following
|
||||||
|
options can be selected:
|
||||||
|
|
||||||
|
**Install Proxmox Backup Server (Graphical)**
|
||||||
|
|
||||||
|
Starts the normal installation.
|
||||||
|
|
||||||
|
TIP: It's possible to use the installation wizard with a keyboard only. Buttons
|
||||||
|
can be clicked by pressing the ``ALT`` key combined with the underlined character
|
||||||
|
from the respective button. For example, ``ALT + N`` to press a ``Next`` button.
|
||||||
|
|
||||||
|
**Install Proxmox Backup Server (Console)**
|
||||||
|
|
||||||
|
Starts the terminal-mode installation wizard. It provides the same overall
|
||||||
|
installation experience as the graphical installer, but has generally better
|
||||||
|
compatibility with very old and very new hardware.
|
||||||
|
|
||||||
|
**Install Proxmox Backup Server (Terminal UI, Serial Console)**
|
||||||
|
|
||||||
|
Starts the terminal-mode installation wizard, additionally setting up the Linux
|
||||||
|
kernel to use the (first) serial port of the machine for in- and output. This
|
||||||
|
can be used if the machine is completely headless and only has a serial console
|
||||||
|
available.
|
||||||
|
|
||||||
|
.. image:: images/screenshots/pbs-tui-installer.png
|
||||||
|
:target: _images/pbs-tui-installer.png
|
||||||
|
:align: right
|
||||||
|
:alt: Proxmox Backup Server Terminal UI Installer
|
||||||
|
|
||||||
|
Both modes use the same code base for the actual installation process to
|
||||||
|
benefit from more than a decade of bug fixes and ensure feature parity.
|
||||||
|
|
||||||
|
TIP: The *Console* or *Terminal UI* option can be used in case the graphical
|
||||||
|
installer does not work correctly, due to e.g. driver issues. See also
|
||||||
|
:ref:`nomodeset_kernel_param`.
|
||||||
|
|
||||||
|
**Advanced Options: Install Proxmox Backup Server (Debug Mode)**
|
||||||
|
|
||||||
|
Starts the installation in debug mode. A console will be opened at several
|
||||||
|
installation steps. This helps to debug the situation if something goes wrong.
|
||||||
|
To exit a debug console, press ``CTRL-D``. This option can be used to boot a
|
||||||
|
live system with all basic tools available. You can use it, for example, to
|
||||||
|
repair a degraded ZFS *rpool* or fix the :ref:`chapter-systembooting` for an
|
||||||
|
existing Proxmox Backup Server setup.
|
||||||
|
|
||||||
|
**Advanced Options: Install Proxmox Backup Server (Terminal UI, Debug Mode)**
|
||||||
|
|
||||||
|
Same as the graphical debug mode, but preparing the system to run the
|
||||||
|
terminal-based installer instead.
|
||||||
|
|
||||||
|
**Advanced Options: Install Proxmox Backup Server (Serial Console Debug Mode)**
|
||||||
|
|
||||||
|
Same the terminal-based debug mode, but additionally sets up the Linux kernel to
|
||||||
|
use the (first) serial port of the machine for in- and output.
|
||||||
|
|
||||||
|
**Advanced Options: Rescue Boot**
|
||||||
|
|
||||||
|
With this option you can boot an existing installation. It searches all attached
|
||||||
|
hard disks. If it finds an existing installation, it boots directly into that
|
||||||
|
disk using the Linux kernel from the ISO. This can be useful if there are
|
||||||
|
problems with the bootloader (GRUB/``systemd-boot``) or the BIOS/UEFI is unable
|
||||||
|
to read the boot block from the disk.
|
||||||
|
|
||||||
|
**Advanced Options: Test Memory (memtest86+)**
|
||||||
|
|
||||||
|
Runs *memtest86+*. This is useful to check if the memory is functional and free
|
||||||
|
of errors. Secure Boot must be turned off in the UEFI firmware setup utility to
|
||||||
|
run this option.
|
||||||
|
|
||||||
|
You normally select *Install Proxmox Backup Server (Graphical)* to start the
|
||||||
|
installation.
|
||||||
|
|
||||||
|
The first step is to read our EULA (End User License Agreement). Following this,
|
||||||
|
you can select the target hard disk(s) for the installation.
|
||||||
|
|
||||||
|
.. caution:: By default, the whole server is used and all existing data is
|
||||||
|
removed. Make sure there is no important data on the server before proceeding
|
||||||
|
with the installation.
|
||||||
|
|
||||||
|
The *Options* button lets you select the target file system, which defaults to
|
||||||
|
``ext4``. The installer uses LVM if you select ``ext4`` or ``xfs`` as a file
|
||||||
|
system, and offers additional options to restrict LVM space (see :ref:`below
|
||||||
|
<advanced_lvm_options>`).
|
||||||
|
|
||||||
|
.. image:: images/screenshots/pbs-installer-select-disk.png
|
||||||
|
:target: _images/pbs-installer-select-disk.png
|
||||||
|
:align: right
|
||||||
|
:alt: Proxmox Backup Server Installer - Harddisk Selection Dialog
|
||||||
|
|
||||||
|
Proxmox Backup Server can also be installed on ZFS. As ZFS offers several
|
||||||
|
software RAID levels, this is an option for systems that don't have a hardware
|
||||||
|
RAID controller. The target disks must be selected in the *Options* dialog. More
|
||||||
|
ZFS specific settings can be changed under :ref:`Advanced Options
|
||||||
|
<advanced_zfs_options>`.
|
||||||
|
|
||||||
|
.. warning:: ZFS on top of any hardware RAID is not supported and can result in
|
||||||
|
data loss.
|
||||||
|
|
||||||
|
.. image:: images/screenshots/pbs-installer-location.png
|
||||||
|
:target: _images/pbs-installer-location.png
|
||||||
|
:align: right
|
||||||
|
:alt: Proxmox Backup Server Installer - Location and timezone configuration
|
||||||
|
|
||||||
|
The next page asks for basic configuration options like your location, time
|
||||||
|
zone, and keyboard layout. The location is used to select a nearby download
|
||||||
|
server, in order to increase the speed of updates. The installer is usually able
|
||||||
|
to auto-detect these settings, so you only need to change them in rare
|
||||||
|
situations when auto-detection fails, or when you want to use a keyboard layout
|
||||||
|
not commonly used in your country.
|
||||||
|
|
||||||
|
.. image:: images/screenshots/pbs-installer-password.png
|
||||||
|
:target: _images/pbs-installer-password.png
|
||||||
|
:align: left
|
||||||
|
:alt: Proxmox Backup Server Installer - Password and email configuration
|
||||||
|
|
||||||
|
Next the password of the superuser (``root``) and an email address needs to be
|
||||||
|
specified. The password must consist of at least 8 characters. It's highly
|
||||||
|
recommended to use a stronger password. Some guidelines are:
|
||||||
|
|
||||||
|
|
|
||||||
|
|
|
||||||
|
|
||||||
|
- Use a minimum password length of at least 12 characters.
|
||||||
|
|
||||||
|
- Include lowercase and uppercase alphabetic characters, numbers, and symbols.
|
||||||
|
|
||||||
|
- Avoid character repetition, keyboard patterns, common dictionary words,
|
||||||
|
letter or number sequences, usernames, relative or pet names, romantic links
|
||||||
|
(current or past), and biographical information (for example ID numbers,
|
||||||
|
ancestors' names or dates).
|
||||||
|
|
||||||
|
The email address is used to send notifications to the system administrator.
|
||||||
|
For example:
|
||||||
|
|
||||||
|
- Information about available package updates.
|
||||||
|
|
||||||
|
- Error messages from periodic *cron* jobs.
|
||||||
|
|
||||||
|
.. image:: images/screenshots/pbs-installer-network.png
|
||||||
|
:target: _images/pbs-installer-network.png
|
||||||
|
:align: right
|
||||||
|
:alt: Proxmox Backup Server Installer - Network configuration
|
||||||
|
|
||||||
|
All those notification mails will be sent to the specified email address.
|
||||||
|
|
||||||
|
The last step is the network configuration. Network interfaces that are *UP*
|
||||||
|
show a filled circle in front of their name in the drop down menu. Please note
|
||||||
|
that during installation you can either specify an IPv4 or IPv6 address, but not
|
||||||
|
both. To configure a dual stack node, add additional IP addresses after the
|
||||||
|
installation.
|
||||||
|
|
||||||
|
.. image:: images/screenshots/pbs-installer-progress.png
|
||||||
|
:target: _images/pbs-installer-progress.png
|
||||||
|
:align: left
|
||||||
|
:alt: Proxmox Backup Server Installer - Installation progress
|
||||||
|
|
||||||
|
The next step shows a summary of the previously selected options. Please
|
||||||
|
re-check every setting and use the *Previous* button if a setting needs to be
|
||||||
|
changed.
|
||||||
|
|
||||||
|
After clicking *Install*, the installer will begin to format the disks and copy
|
||||||
|
packages to the target disk(s). Please wait until this step has finished; then
|
||||||
|
remove the installation medium and restart your system.
|
||||||
|
|
||||||
|
.. image:: images/screenshots/pbs-installer-summary.png
|
||||||
|
:target: _images/pbs-installer-summary.png
|
||||||
|
:align: right
|
||||||
|
:alt: Proxmox Backup Server Installer - Installation summary
|
||||||
|
|
||||||
|
Copying the packages usually takes several minutes, mostly depending on the
|
||||||
|
speed of the installation medium and the target disk performance.
|
||||||
|
|
||||||
|
When copying and setting up the packages has finished, you can reboot the
|
||||||
|
server. This will be done automatically after a few seconds by default.
|
||||||
|
|
||||||
|
Installation Failure
|
||||||
|
^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
If the installation failed, check out specific errors on the second TTY
|
||||||
|
(``CTRL + ALT + F2``) and ensure that the systems meets the
|
||||||
|
:ref:`minimum requirements <minimum_system_requirements>`.
|
||||||
|
|
||||||
|
If the installation is still not working, look at the :ref:`how to get help
|
||||||
|
chapter <get_help>`.
|
||||||
|
|
||||||
|
Accessing the Management Interface Post-Installation
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
.. image:: images/screenshots/pbs-gui-login-window.png
|
||||||
|
:target: _images/pbs-gui-login-window.png
|
||||||
|
:align: right
|
||||||
|
:alt: Proxmox Backup Server - Management interface login dialog
|
||||||
|
|
||||||
|
After a successful installation and reboot of the system you can use the Proxmox
|
||||||
|
Backup Server web interface for further configuration.
|
||||||
|
|
||||||
|
- Point your browser to the IP address given during the installation and port
|
||||||
|
8007, for example: https://pbs.yourdomain.tld:8007
|
||||||
|
|
||||||
|
- Log in using the ``root`` (realm *Linux PAM standard authentication*) username
|
||||||
|
and the password chosen during installation.
|
||||||
|
|
||||||
|
- Upload your subscription key to gain access to the Enterprise repository.
|
||||||
|
Otherwise, you will need to set up one of the public, less tested package
|
||||||
|
repositories to get updates for security fixes, bug fixes, and new features.
|
||||||
|
|
||||||
|
- Check the IP configuration and hostname.
|
||||||
|
|
||||||
|
- Check the timezone.
|
||||||
|
|
||||||
|
.. _advanced_lvm_options:
|
||||||
|
|
||||||
|
Advanced LVM Configuration Options
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
The installer creates a Volume Group (VG) called ``pbs``, and additional Logical
|
||||||
|
Volumes (LVs) called ``root`` and ``swap``, if ``ext4`` or ``xfs`` as filesystem
|
||||||
|
is used. To control the size of these volumes use:
|
||||||
|
|
||||||
|
- *hdsize*
|
||||||
|
|
||||||
|
Defines the total hard disk size to be used. This way you can reserve free
|
||||||
|
space on the hard disk for further partitioning.
|
||||||
|
|
||||||
|
- *swapsize*
|
||||||
|
|
||||||
|
Defines the size of the ``swap`` volume. The default is the size of the
|
||||||
|
installed memory, minimum 4 GB and maximum 8 GB. The resulting value cannot
|
||||||
|
be greater than ``hdsize/8``.
|
||||||
|
|
||||||
|
If set to ``0``, no ``swap`` volume will be created.
|
||||||
|
|
||||||
|
- *minfree*
|
||||||
|
|
||||||
|
Defines the amount of free space that should be left in the LVM volume group
|
||||||
|
``pbs``. With more than 128GB storage available, the default is 16GB,
|
||||||
|
otherwise ``hdsize/8`` will be used.
|
||||||
|
|
||||||
|
.. _advanced_zfs_options:
|
||||||
|
|
||||||
|
Advanced ZFS Configuration Options
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
The installer creates the ZFS pool ``rpool``, if ZFS is used. No swap space is
|
||||||
|
created but you can reserve some unpartitioned space on the install disks for
|
||||||
|
swap. You can also create a swap zvol after the installation, although this can
|
||||||
|
lead to problems (see :ref:`ZFS swap notes <zfs_swap>`).
|
||||||
|
|
||||||
|
- *ashift*
|
||||||
|
|
||||||
|
Defines the *ashift* value for the created pool. The *ashift* needs to be
|
||||||
|
set at least to the sector-size of the underlying disks (2 to the power of
|
||||||
|
*ashift* is the sector-size), or any disk which might be put in the pool
|
||||||
|
(for example the replacement of a defective disk).
|
||||||
|
|
||||||
|
- *compress*
|
||||||
|
|
||||||
|
Defines whether compression is enabled for ``rpool``.
|
||||||
|
|
||||||
|
- *checksum*
|
||||||
|
|
||||||
|
Defines which checksumming algorithm should be used for ``rpool``.
|
||||||
|
|
||||||
|
- *copies*
|
||||||
|
|
||||||
|
Defines the *copies* parameter for ``rpool``. Check the ``zfs(8)`` manpage
|
||||||
|
for the semantics, and why this does not replace redundancy on disk-level.
|
||||||
|
|
||||||
|
- *hdsize*
|
||||||
|
|
||||||
|
Defines the total hard disk size to be used. This is useful to save free
|
||||||
|
space on the hard disk(s) for further partitioning (for example, to create a
|
||||||
|
swap partition). *hdsize* is only honored for bootable disks, that is only
|
||||||
|
the first disk or mirror for RAID0, RAID1 or RAID10, and all disks in
|
||||||
|
RAID-Z[123].
|
||||||
|
|
||||||
|
ZFS Performance Tips
|
||||||
|
^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
ZFS works best with a lot of memory. If you intend to use ZFS make sure to have
|
||||||
|
enough RAM available for it. A good calculation is 4GB plus 1GB RAM for each TB
|
||||||
|
of raw disk space.
|
||||||
|
|
||||||
|
ZFS can use a dedicated drive as write cache, called the ZFS Intent Log (ZIL).
|
||||||
|
Use a fast drive (SSD) for it. It can be added after installation with the
|
||||||
|
following command:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# zpool add <pool-name> log </dev/path_to_fast_ssd>
|
||||||
|
|
||||||
|
.. _nomodeset_kernel_param:
|
||||||
|
|
||||||
|
Adding the ``nomodeset`` Kernel Parameter
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
Problems may arise on very old or very new hardware due to graphics drivers. If
|
||||||
|
the installation hangs during boot, you can try adding the ``nomodeset``
|
||||||
|
parameter. This prevents the Linux kernel from loading any graphics drivers and
|
||||||
|
forces it to continue using the BIOS/UEFI-provided framebuffer.
|
||||||
|
|
||||||
|
On the Proxmox Backup Server bootloader menu, navigate to *Install Proxmox
|
||||||
|
Backup Server (Console)* and press ``e`` to edit the entry. Using the arrow
|
||||||
|
keys, navigate to the line starting with ``linux``, move the cursor to the end
|
||||||
|
of that line and add the parameter ``nomodeset``, separated by a space from the
|
||||||
|
pre-existing last parameter.
|
||||||
|
|
||||||
|
Then press ``Ctrl-X`` or ``F10`` to boot the configuration.
|
@ -2,6 +2,7 @@ include ../defines.mk
|
|||||||
|
|
||||||
UNITS := \
|
UNITS := \
|
||||||
proxmox-backup-daily-update.timer \
|
proxmox-backup-daily-update.timer \
|
||||||
|
removable-device-attach@.service
|
||||||
|
|
||||||
DYNAMIC_UNITS := \
|
DYNAMIC_UNITS := \
|
||||||
proxmox-backup-banner.service \
|
proxmox-backup-banner.service \
|
||||||
|
8
etc/removable-device-attach@.service
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
[Unit]
|
||||||
|
Description=Try to mount the removable device of a datastore with uuid '%i'.
|
||||||
|
After=proxmox-backup-proxy.service
|
||||||
|
Requires=proxmox-backup-proxy.service
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
Type=simple
|
||||||
|
ExecStart=/usr/sbin/proxmox-backup-manager datastore uuid-mount %i
|
@ -10,7 +10,7 @@ use tokio::net::TcpStream;
|
|||||||
// Simple H2 client to test H2 download speed using h2server.rs
|
// Simple H2 client to test H2 download speed using h2server.rs
|
||||||
|
|
||||||
struct Process {
|
struct Process {
|
||||||
body: h2::RecvStream,
|
body: h2::legacy::RecvStream,
|
||||||
trailers: bool,
|
trailers: bool,
|
||||||
bytes: usize,
|
bytes: usize,
|
||||||
}
|
}
|
||||||
@ -50,11 +50,11 @@ impl Future for Process {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn send_request(
|
fn send_request(
|
||||||
mut client: h2::client::SendRequest<bytes::Bytes>,
|
mut client: h2::legacy::client::SendRequest<bytes::Bytes>,
|
||||||
) -> impl Future<Output = Result<usize, Error>> {
|
) -> impl Future<Output = Result<usize, Error>> {
|
||||||
println!("sending request");
|
println!("sending request");
|
||||||
|
|
||||||
let request = http::Request::builder()
|
let request = hyper::http::Request::builder()
|
||||||
.uri("http://localhost/")
|
.uri("http://localhost/")
|
||||||
.body(())
|
.body(())
|
||||||
.unwrap();
|
.unwrap();
|
||||||
@ -78,7 +78,7 @@ async fn run() -> Result<(), Error> {
|
|||||||
let conn = TcpStream::connect(std::net::SocketAddr::from(([127, 0, 0, 1], 8008))).await?;
|
let conn = TcpStream::connect(std::net::SocketAddr::from(([127, 0, 0, 1], 8008))).await?;
|
||||||
conn.set_nodelay(true).unwrap();
|
conn.set_nodelay(true).unwrap();
|
||||||
|
|
||||||
let (client, h2) = h2::client::Builder::new()
|
let (client, h2) = h2::legacy::client::Builder::new()
|
||||||
.initial_connection_window_size(1024 * 1024 * 1024)
|
.initial_connection_window_size(1024 * 1024 * 1024)
|
||||||
.initial_window_size(1024 * 1024 * 1024)
|
.initial_window_size(1024 * 1024 * 1024)
|
||||||
.max_frame_size(4 * 1024 * 1024)
|
.max_frame_size(4 * 1024 * 1024)
|
||||||
|
@ -10,7 +10,7 @@ use tokio::net::TcpStream;
|
|||||||
// Simple H2 client to test H2 download speed using h2s-server.rs
|
// Simple H2 client to test H2 download speed using h2s-server.rs
|
||||||
|
|
||||||
struct Process {
|
struct Process {
|
||||||
body: h2::RecvStream,
|
body: h2::legacy::RecvStream,
|
||||||
trailers: bool,
|
trailers: bool,
|
||||||
bytes: usize,
|
bytes: usize,
|
||||||
}
|
}
|
||||||
@ -50,11 +50,11 @@ impl Future for Process {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn send_request(
|
fn send_request(
|
||||||
mut client: h2::client::SendRequest<bytes::Bytes>,
|
mut client: h2::legacy::client::SendRequest<bytes::Bytes>,
|
||||||
) -> impl Future<Output = Result<usize, Error>> {
|
) -> impl Future<Output = Result<usize, Error>> {
|
||||||
println!("sending request");
|
println!("sending request");
|
||||||
|
|
||||||
let request = http::Request::builder()
|
let request = hyper::http::Request::builder()
|
||||||
.uri("http://localhost/")
|
.uri("http://localhost/")
|
||||||
.body(())
|
.body(())
|
||||||
.unwrap();
|
.unwrap();
|
||||||
@ -94,7 +94,7 @@ async fn run() -> Result<(), Error> {
|
|||||||
.await
|
.await
|
||||||
.map_err(|err| format_err!("connect failed - {}", err))?;
|
.map_err(|err| format_err!("connect failed - {}", err))?;
|
||||||
|
|
||||||
let (client, h2) = h2::client::Builder::new()
|
let (client, h2) = h2::legacy::client::Builder::new()
|
||||||
.initial_connection_window_size(1024 * 1024 * 1024)
|
.initial_connection_window_size(1024 * 1024 * 1024)
|
||||||
.initial_window_size(1024 * 1024 * 1024)
|
.initial_window_size(1024 * 1024 * 1024)
|
||||||
.max_frame_size(4 * 1024 * 1024)
|
.max_frame_size(4 * 1024 * 1024)
|
||||||
|
@ -8,6 +8,19 @@ use tokio::net::{TcpListener, TcpStream};
|
|||||||
|
|
||||||
use pbs_buildcfg::configdir;
|
use pbs_buildcfg::configdir;
|
||||||
|
|
||||||
|
#[derive(Clone, Copy)]
|
||||||
|
struct H2SExecutor;
|
||||||
|
|
||||||
|
impl<Fut> hyper::rt::Executor<Fut> for H2SExecutor
|
||||||
|
where
|
||||||
|
Fut: Future + Send + 'static,
|
||||||
|
Fut::Output: Send,
|
||||||
|
{
|
||||||
|
fn execute(&self, fut: Fut) {
|
||||||
|
tokio::spawn(fut);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
fn main() -> Result<(), Error> {
|
fn main() -> Result<(), Error> {
|
||||||
proxmox_async::runtime::main(run())
|
proxmox_async::runtime::main(run())
|
||||||
}
|
}
|
||||||
@ -50,12 +63,11 @@ async fn handle_connection(socket: TcpStream, acceptor: Arc<SslAcceptor>) -> Res
|
|||||||
|
|
||||||
stream.as_mut().accept().await?;
|
stream.as_mut().accept().await?;
|
||||||
|
|
||||||
let mut http = hyper::server::conn::Http::new();
|
let mut http = hyper::server::conn::http2::Builder::new(H2SExecutor);
|
||||||
http.http2_only(true);
|
|
||||||
// increase window size: todo - find optiomal size
|
// increase window size: todo - find optiomal size
|
||||||
let max_window_size = (1 << 31) - 2;
|
let max_window_size = (1 << 31) - 2;
|
||||||
http.http2_initial_stream_window_size(max_window_size);
|
http.initial_stream_window_size(max_window_size);
|
||||||
http.http2_initial_connection_window_size(max_window_size);
|
http.initial_connection_window_size(max_window_size);
|
||||||
|
|
||||||
let service = hyper::service::service_fn(|_req: Request<Body>| {
|
let service = hyper::service::service_fn(|_req: Request<Body>| {
|
||||||
println!("Got request");
|
println!("Got request");
|
||||||
@ -63,8 +75,11 @@ async fn handle_connection(socket: TcpStream, acceptor: Arc<SslAcceptor>) -> Res
|
|||||||
let body = Body::from(buffer);
|
let body = Body::from(buffer);
|
||||||
|
|
||||||
let response = Response::builder()
|
let response = Response::builder()
|
||||||
.status(http::StatusCode::OK)
|
.status(hyper::http::StatusCode::OK)
|
||||||
.header(http::header::CONTENT_TYPE, "application/octet-stream")
|
.header(
|
||||||
|
hyper::http::header::CONTENT_TYPE,
|
||||||
|
"application/octet-stream",
|
||||||
|
)
|
||||||
.body(body)
|
.body(body)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
future::ok::<_, Error>(response)
|
future::ok::<_, Error>(response)
|
||||||
|
@ -1,9 +1,24 @@
|
|||||||
|
use std::future::Future;
|
||||||
|
|
||||||
use anyhow::Error;
|
use anyhow::Error;
|
||||||
use futures::*;
|
use futures::*;
|
||||||
use hyper::{Body, Request, Response};
|
use hyper::{Body, Request, Response};
|
||||||
|
|
||||||
use tokio::net::{TcpListener, TcpStream};
|
use tokio::net::{TcpListener, TcpStream};
|
||||||
|
|
||||||
|
#[derive(Clone, Copy)]
|
||||||
|
struct H2Executor;
|
||||||
|
|
||||||
|
impl<Fut> hyper::rt::Executor<Fut> for H2Executor
|
||||||
|
where
|
||||||
|
Fut: Future + Send + 'static,
|
||||||
|
Fut::Output: Send,
|
||||||
|
{
|
||||||
|
fn execute(&self, fut: Fut) {
|
||||||
|
tokio::spawn(fut);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
fn main() -> Result<(), Error> {
|
fn main() -> Result<(), Error> {
|
||||||
proxmox_async::runtime::main(run())
|
proxmox_async::runtime::main(run())
|
||||||
}
|
}
|
||||||
@ -26,12 +41,11 @@ async fn run() -> Result<(), Error> {
|
|||||||
async fn handle_connection(socket: TcpStream) -> Result<(), Error> {
|
async fn handle_connection(socket: TcpStream) -> Result<(), Error> {
|
||||||
socket.set_nodelay(true).unwrap();
|
socket.set_nodelay(true).unwrap();
|
||||||
|
|
||||||
let mut http = hyper::server::conn::Http::new();
|
let mut http = hyper::server::conn::http2::Builder::new(H2Executor);
|
||||||
http.http2_only(true);
|
|
||||||
// increase window size: todo - find optiomal size
|
// increase window size: todo - find optiomal size
|
||||||
let max_window_size = (1 << 31) - 2;
|
let max_window_size = (1 << 31) - 2;
|
||||||
http.http2_initial_stream_window_size(max_window_size);
|
http.initial_stream_window_size(max_window_size);
|
||||||
http.http2_initial_connection_window_size(max_window_size);
|
http.initial_connection_window_size(max_window_size);
|
||||||
|
|
||||||
let service = hyper::service::service_fn(|_req: Request<Body>| {
|
let service = hyper::service::service_fn(|_req: Request<Body>| {
|
||||||
println!("Got request");
|
println!("Got request");
|
||||||
@ -39,8 +53,11 @@ async fn handle_connection(socket: TcpStream) -> Result<(), Error> {
|
|||||||
let body = Body::from(buffer);
|
let body = Body::from(buffer);
|
||||||
|
|
||||||
let response = Response::builder()
|
let response = Response::builder()
|
||||||
.status(http::StatusCode::OK)
|
.status(hyper::http::StatusCode::OK)
|
||||||
.header(http::header::CONTENT_TYPE, "application/octet-stream")
|
.header(
|
||||||
|
hyper::http::header::CONTENT_TYPE,
|
||||||
|
"application/octet-stream",
|
||||||
|
)
|
||||||
.body(body)
|
.body(body)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
future::ok::<_, Error>(response)
|
future::ok::<_, Error>(response)
|
||||||
|
@ -1,9 +1,10 @@
|
|||||||
|
use std::str::FromStr;
|
||||||
|
|
||||||
use anyhow::Error;
|
use anyhow::Error;
|
||||||
use futures::*;
|
use futures::*;
|
||||||
|
|
||||||
extern crate proxmox_backup;
|
|
||||||
|
|
||||||
use pbs_client::ChunkStream;
|
use pbs_client::ChunkStream;
|
||||||
|
use proxmox_human_byte::HumanByte;
|
||||||
|
|
||||||
// Test Chunker with real data read from a file.
|
// Test Chunker with real data read from a file.
|
||||||
//
|
//
|
||||||
@ -21,9 +22,19 @@ fn main() {
|
|||||||
async fn run() -> Result<(), Error> {
|
async fn run() -> Result<(), Error> {
|
||||||
let file = tokio::fs::File::open("random-test.dat").await?;
|
let file = tokio::fs::File::open("random-test.dat").await?;
|
||||||
|
|
||||||
let stream = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
|
let mut args = std::env::args();
|
||||||
.map_ok(|bytes| bytes.to_vec())
|
args.next();
|
||||||
.map_err(Error::from);
|
|
||||||
|
let buffer_size = args.next().unwrap_or("8k".to_string());
|
||||||
|
let buffer_size = HumanByte::from_str(&buffer_size)?;
|
||||||
|
println!("Using buffer size {buffer_size}");
|
||||||
|
|
||||||
|
let stream = tokio_util::codec::FramedRead::with_capacity(
|
||||||
|
file,
|
||||||
|
tokio_util::codec::BytesCodec::new(),
|
||||||
|
buffer_size.as_u64() as usize,
|
||||||
|
)
|
||||||
|
.map_err(Error::from);
|
||||||
|
|
||||||
//let chunk_stream = FixedChunkStream::new(stream, 4*1024*1024);
|
//let chunk_stream = FixedChunkStream::new(stream, 4*1024*1024);
|
||||||
let mut chunk_stream = ChunkStream::new(stream, None, None, None);
|
let mut chunk_stream = ChunkStream::new(stream, None, None, None);
|
||||||
@ -40,7 +51,7 @@ async fn run() -> Result<(), Error> {
|
|||||||
repeat += 1;
|
repeat += 1;
|
||||||
stream_len += chunk.len();
|
stream_len += chunk.len();
|
||||||
|
|
||||||
println!("Got chunk {}", chunk.len());
|
//println!("Got chunk {}", chunk.len());
|
||||||
}
|
}
|
||||||
|
|
||||||
let speed =
|
let speed =
|
||||||
|
@ -1,24 +0,0 @@
|
|||||||
[package]
|
|
||||||
name = "pbs-api-types"
|
|
||||||
version = "0.1.0"
|
|
||||||
authors.workspace = true
|
|
||||||
edition.workspace = true
|
|
||||||
description = "general API type helpers for PBS"
|
|
||||||
|
|
||||||
[dependencies]
|
|
||||||
anyhow.workspace = true
|
|
||||||
const_format.workspace = true
|
|
||||||
hex.workspace = true
|
|
||||||
lazy_static.workspace = true
|
|
||||||
percent-encoding.workspace = true
|
|
||||||
regex.workspace = true
|
|
||||||
serde.workspace = true
|
|
||||||
serde_plain.workspace = true
|
|
||||||
|
|
||||||
proxmox-auth-api = { workspace = true, features = [ "api-types" ] }
|
|
||||||
proxmox-human-byte.workspace = true
|
|
||||||
proxmox-lang.workspace=true
|
|
||||||
proxmox-schema = { workspace = true, features = [ "api-macro" ] }
|
|
||||||
proxmox-serde.workspace = true
|
|
||||||
proxmox-time.workspace = true
|
|
||||||
proxmox-uuid = { workspace = true, features = [ "serde" ] }
|
|
@ -1,294 +0,0 @@
|
|||||||
use std::str::FromStr;
|
|
||||||
|
|
||||||
use const_format::concatcp;
|
|
||||||
use serde::de::{value, IntoDeserializer};
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
|
|
||||||
use proxmox_lang::constnamedbitmap;
|
|
||||||
use proxmox_schema::{
|
|
||||||
api, const_regex, ApiStringFormat, BooleanSchema, EnumEntry, Schema, StringSchema,
|
|
||||||
};
|
|
||||||
|
|
||||||
use crate::PROXMOX_SAFE_ID_REGEX_STR;
|
|
||||||
|
|
||||||
const_regex! {
|
|
||||||
pub ACL_PATH_REGEX = concatcp!(r"^(?:/|", r"(?:/", PROXMOX_SAFE_ID_REGEX_STR, ")+", r")$");
|
|
||||||
}
|
|
||||||
|
|
||||||
// define Privilege bitfield
|
|
||||||
|
|
||||||
constnamedbitmap! {
|
|
||||||
/// Contains a list of privilege name to privilege value mappings.
|
|
||||||
///
|
|
||||||
/// The names are used when displaying/persisting privileges anywhere, the values are used to
|
|
||||||
/// allow easy matching of privileges as bitflags.
|
|
||||||
PRIVILEGES: u64 => {
|
|
||||||
/// Sys.Audit allows knowing about the system and its status
|
|
||||||
PRIV_SYS_AUDIT("Sys.Audit");
|
|
||||||
/// Sys.Modify allows modifying system-level configuration
|
|
||||||
PRIV_SYS_MODIFY("Sys.Modify");
|
|
||||||
/// Sys.Modify allows to poweroff/reboot/.. the system
|
|
||||||
PRIV_SYS_POWER_MANAGEMENT("Sys.PowerManagement");
|
|
||||||
|
|
||||||
/// Datastore.Audit allows knowing about a datastore,
|
|
||||||
/// including reading the configuration entry and listing its contents
|
|
||||||
PRIV_DATASTORE_AUDIT("Datastore.Audit");
|
|
||||||
/// Datastore.Allocate allows creating or deleting datastores
|
|
||||||
PRIV_DATASTORE_ALLOCATE("Datastore.Allocate");
|
|
||||||
/// Datastore.Modify allows modifying a datastore and its contents
|
|
||||||
PRIV_DATASTORE_MODIFY("Datastore.Modify");
|
|
||||||
/// Datastore.Read allows reading arbitrary backup contents
|
|
||||||
PRIV_DATASTORE_READ("Datastore.Read");
|
|
||||||
/// Allows verifying a datastore
|
|
||||||
PRIV_DATASTORE_VERIFY("Datastore.Verify");
|
|
||||||
|
|
||||||
/// Datastore.Backup allows Datastore.Read|Verify and creating new snapshots,
|
|
||||||
/// but also requires backup ownership
|
|
||||||
PRIV_DATASTORE_BACKUP("Datastore.Backup");
|
|
||||||
/// Datastore.Prune allows deleting snapshots,
|
|
||||||
/// but also requires backup ownership
|
|
||||||
PRIV_DATASTORE_PRUNE("Datastore.Prune");
|
|
||||||
|
|
||||||
/// Permissions.Modify allows modifying ACLs
|
|
||||||
PRIV_PERMISSIONS_MODIFY("Permissions.Modify");
|
|
||||||
|
|
||||||
/// Remote.Audit allows reading remote.cfg and sync.cfg entries
|
|
||||||
PRIV_REMOTE_AUDIT("Remote.Audit");
|
|
||||||
/// Remote.Modify allows modifying remote.cfg
|
|
||||||
PRIV_REMOTE_MODIFY("Remote.Modify");
|
|
||||||
/// Remote.Read allows reading data from a configured `Remote`
|
|
||||||
PRIV_REMOTE_READ("Remote.Read");
|
|
||||||
|
|
||||||
/// Sys.Console allows access to the system's console
|
|
||||||
PRIV_SYS_CONSOLE("Sys.Console");
|
|
||||||
|
|
||||||
/// Tape.Audit allows reading tape backup configuration and status
|
|
||||||
PRIV_TAPE_AUDIT("Tape.Audit");
|
|
||||||
/// Tape.Modify allows modifying tape backup configuration
|
|
||||||
PRIV_TAPE_MODIFY("Tape.Modify");
|
|
||||||
/// Tape.Write allows writing tape media
|
|
||||||
PRIV_TAPE_WRITE("Tape.Write");
|
|
||||||
/// Tape.Read allows reading tape backup configuration and media contents
|
|
||||||
PRIV_TAPE_READ("Tape.Read");
|
|
||||||
|
|
||||||
/// Realm.Allocate allows viewing, creating, modifying and deleting realms
|
|
||||||
PRIV_REALM_ALLOCATE("Realm.Allocate");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn privs_to_priv_names(privs: u64) -> Vec<&'static str> {
|
|
||||||
PRIVILEGES
|
|
||||||
.iter()
|
|
||||||
.fold(Vec::new(), |mut priv_names, (name, value)| {
|
|
||||||
if value & privs != 0 {
|
|
||||||
priv_names.push(name);
|
|
||||||
}
|
|
||||||
priv_names
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Admin always has all privileges. It can do everything except a few actions
|
|
||||||
/// which are limited to the 'root@pam` superuser
|
|
||||||
pub const ROLE_ADMIN: u64 = u64::MAX;
|
|
||||||
|
|
||||||
/// NoAccess can be used to remove privileges from specific (sub-)paths
|
|
||||||
pub const ROLE_NO_ACCESS: u64 = 0;
|
|
||||||
|
|
||||||
#[rustfmt::skip]
|
|
||||||
#[allow(clippy::identity_op)]
|
|
||||||
/// Audit can view configuration and status information, but not modify it.
|
|
||||||
pub const ROLE_AUDIT: u64 = 0
|
|
||||||
| PRIV_SYS_AUDIT
|
|
||||||
| PRIV_DATASTORE_AUDIT;
|
|
||||||
|
|
||||||
#[rustfmt::skip]
|
|
||||||
#[allow(clippy::identity_op)]
|
|
||||||
/// Datastore.Admin can do anything on the datastore.
|
|
||||||
pub const ROLE_DATASTORE_ADMIN: u64 = 0
|
|
||||||
| PRIV_DATASTORE_AUDIT
|
|
||||||
| PRIV_DATASTORE_MODIFY
|
|
||||||
| PRIV_DATASTORE_READ
|
|
||||||
| PRIV_DATASTORE_VERIFY
|
|
||||||
| PRIV_DATASTORE_BACKUP
|
|
||||||
| PRIV_DATASTORE_PRUNE;
|
|
||||||
|
|
||||||
#[rustfmt::skip]
|
|
||||||
#[allow(clippy::identity_op)]
|
|
||||||
/// Datastore.Reader can read/verify datastore content and do restore
|
|
||||||
pub const ROLE_DATASTORE_READER: u64 = 0
|
|
||||||
| PRIV_DATASTORE_AUDIT
|
|
||||||
| PRIV_DATASTORE_VERIFY
|
|
||||||
| PRIV_DATASTORE_READ;
|
|
||||||
|
|
||||||
#[rustfmt::skip]
|
|
||||||
#[allow(clippy::identity_op)]
|
|
||||||
/// Datastore.Backup can do backup and restore, but no prune.
|
|
||||||
pub const ROLE_DATASTORE_BACKUP: u64 = 0
|
|
||||||
| PRIV_DATASTORE_BACKUP;
|
|
||||||
|
|
||||||
#[rustfmt::skip]
|
|
||||||
#[allow(clippy::identity_op)]
|
|
||||||
/// Datastore.PowerUser can do backup, restore, and prune.
|
|
||||||
pub const ROLE_DATASTORE_POWERUSER: u64 = 0
|
|
||||||
| PRIV_DATASTORE_PRUNE
|
|
||||||
| PRIV_DATASTORE_BACKUP;
|
|
||||||
|
|
||||||
#[rustfmt::skip]
|
|
||||||
#[allow(clippy::identity_op)]
|
|
||||||
/// Datastore.Audit can audit the datastore.
|
|
||||||
pub const ROLE_DATASTORE_AUDIT: u64 = 0
|
|
||||||
| PRIV_DATASTORE_AUDIT;
|
|
||||||
|
|
||||||
#[rustfmt::skip]
|
|
||||||
#[allow(clippy::identity_op)]
|
|
||||||
/// Remote.Audit can audit the remote
|
|
||||||
pub const ROLE_REMOTE_AUDIT: u64 = 0
|
|
||||||
| PRIV_REMOTE_AUDIT;
|
|
||||||
|
|
||||||
#[rustfmt::skip]
|
|
||||||
#[allow(clippy::identity_op)]
|
|
||||||
/// Remote.Admin can do anything on the remote.
|
|
||||||
pub const ROLE_REMOTE_ADMIN: u64 = 0
|
|
||||||
| PRIV_REMOTE_AUDIT
|
|
||||||
| PRIV_REMOTE_MODIFY
|
|
||||||
| PRIV_REMOTE_READ;
|
|
||||||
|
|
||||||
#[rustfmt::skip]
|
|
||||||
#[allow(clippy::identity_op)]
|
|
||||||
/// Remote.SyncOperator can do read and prune on the remote.
|
|
||||||
pub const ROLE_REMOTE_SYNC_OPERATOR: u64 = 0
|
|
||||||
| PRIV_REMOTE_AUDIT
|
|
||||||
| PRIV_REMOTE_READ;
|
|
||||||
|
|
||||||
#[rustfmt::skip]
|
|
||||||
#[allow(clippy::identity_op)]
|
|
||||||
/// Tape.Audit can audit the tape backup configuration and media content
|
|
||||||
pub const ROLE_TAPE_AUDIT: u64 = 0
|
|
||||||
| PRIV_TAPE_AUDIT;
|
|
||||||
|
|
||||||
#[rustfmt::skip]
|
|
||||||
#[allow(clippy::identity_op)]
|
|
||||||
/// Tape.Admin can do anything on the tape backup
|
|
||||||
pub const ROLE_TAPE_ADMIN: u64 = 0
|
|
||||||
| PRIV_TAPE_AUDIT
|
|
||||||
| PRIV_TAPE_MODIFY
|
|
||||||
| PRIV_TAPE_READ
|
|
||||||
| PRIV_TAPE_WRITE;
|
|
||||||
|
|
||||||
#[rustfmt::skip]
|
|
||||||
#[allow(clippy::identity_op)]
|
|
||||||
/// Tape.Operator can do tape backup and restore (but no configuration changes)
|
|
||||||
pub const ROLE_TAPE_OPERATOR: u64 = 0
|
|
||||||
| PRIV_TAPE_AUDIT
|
|
||||||
| PRIV_TAPE_READ
|
|
||||||
| PRIV_TAPE_WRITE;
|
|
||||||
|
|
||||||
#[rustfmt::skip]
|
|
||||||
#[allow(clippy::identity_op)]
|
|
||||||
/// Tape.Reader can do read and inspect tape content
|
|
||||||
pub const ROLE_TAPE_READER: u64 = 0
|
|
||||||
| PRIV_TAPE_AUDIT
|
|
||||||
| PRIV_TAPE_READ;
|
|
||||||
|
|
||||||
/// NoAccess can be used to remove privileges from specific (sub-)paths
|
|
||||||
pub const ROLE_NAME_NO_ACCESS: &str = "NoAccess";
|
|
||||||
|
|
||||||
#[api(
|
|
||||||
type_text: "<role>",
|
|
||||||
)]
|
|
||||||
#[repr(u64)]
|
|
||||||
#[derive(Serialize, Deserialize)]
|
|
||||||
/// Enum representing roles via their [PRIVILEGES] combination.
|
|
||||||
///
|
|
||||||
/// Since privileges are implemented as bitflags, each unique combination of privileges maps to a
|
|
||||||
/// single, unique `u64` value that is used in this enum definition.
|
|
||||||
pub enum Role {
|
|
||||||
/// Administrator
|
|
||||||
Admin = ROLE_ADMIN,
|
|
||||||
/// Auditor
|
|
||||||
Audit = ROLE_AUDIT,
|
|
||||||
/// Disable Access
|
|
||||||
NoAccess = ROLE_NO_ACCESS,
|
|
||||||
/// Datastore Administrator
|
|
||||||
DatastoreAdmin = ROLE_DATASTORE_ADMIN,
|
|
||||||
/// Datastore Reader (inspect datastore content and do restores)
|
|
||||||
DatastoreReader = ROLE_DATASTORE_READER,
|
|
||||||
/// Datastore Backup (backup and restore owned backups)
|
|
||||||
DatastoreBackup = ROLE_DATASTORE_BACKUP,
|
|
||||||
/// Datastore PowerUser (backup, restore and prune owned backup)
|
|
||||||
DatastorePowerUser = ROLE_DATASTORE_POWERUSER,
|
|
||||||
/// Datastore Auditor
|
|
||||||
DatastoreAudit = ROLE_DATASTORE_AUDIT,
|
|
||||||
/// Remote Auditor
|
|
||||||
RemoteAudit = ROLE_REMOTE_AUDIT,
|
|
||||||
/// Remote Administrator
|
|
||||||
RemoteAdmin = ROLE_REMOTE_ADMIN,
|
|
||||||
/// Syncronisation Opertator
|
|
||||||
RemoteSyncOperator = ROLE_REMOTE_SYNC_OPERATOR,
|
|
||||||
/// Tape Auditor
|
|
||||||
TapeAudit = ROLE_TAPE_AUDIT,
|
|
||||||
/// Tape Administrator
|
|
||||||
TapeAdmin = ROLE_TAPE_ADMIN,
|
|
||||||
/// Tape Operator
|
|
||||||
TapeOperator = ROLE_TAPE_OPERATOR,
|
|
||||||
/// Tape Reader
|
|
||||||
TapeReader = ROLE_TAPE_READER,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl FromStr for Role {
|
|
||||||
type Err = value::Error;
|
|
||||||
|
|
||||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
|
||||||
Self::deserialize(s.into_deserializer())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub const ACL_PATH_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&ACL_PATH_REGEX);
|
|
||||||
|
|
||||||
pub const ACL_PATH_SCHEMA: Schema = StringSchema::new("Access control path.")
|
|
||||||
.format(&ACL_PATH_FORMAT)
|
|
||||||
.min_length(1)
|
|
||||||
.max_length(128)
|
|
||||||
.schema();
|
|
||||||
|
|
||||||
pub const ACL_PROPAGATE_SCHEMA: Schema =
|
|
||||||
BooleanSchema::new("Allow to propagate (inherit) permissions.")
|
|
||||||
.default(true)
|
|
||||||
.schema();
|
|
||||||
|
|
||||||
pub const ACL_UGID_TYPE_SCHEMA: Schema = StringSchema::new("Type of 'ugid' property.")
|
|
||||||
.format(&ApiStringFormat::Enum(&[
|
|
||||||
EnumEntry::new("user", "User"),
|
|
||||||
EnumEntry::new("group", "Group"),
|
|
||||||
]))
|
|
||||||
.schema();
|
|
||||||
|
|
||||||
#[api(
|
|
||||||
properties: {
|
|
||||||
propagate: {
|
|
||||||
schema: ACL_PROPAGATE_SCHEMA,
|
|
||||||
},
|
|
||||||
path: {
|
|
||||||
schema: ACL_PATH_SCHEMA,
|
|
||||||
},
|
|
||||||
ugid_type: {
|
|
||||||
schema: ACL_UGID_TYPE_SCHEMA,
|
|
||||||
},
|
|
||||||
ugid: {
|
|
||||||
type: String,
|
|
||||||
description: "User or Group ID.",
|
|
||||||
},
|
|
||||||
roleid: {
|
|
||||||
type: Role,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
)]
|
|
||||||
#[derive(Serialize, Deserialize, Clone, PartialEq)]
|
|
||||||
/// ACL list entry.
|
|
||||||
pub struct AclListItem {
|
|
||||||
pub path: String,
|
|
||||||
pub ugid: String,
|
|
||||||
pub ugid_type: String,
|
|
||||||
pub propagate: bool,
|
|
||||||
pub roleid: String,
|
|
||||||
}
|
|
@ -1,98 +0,0 @@
|
|||||||
use serde::{Deserialize, Serialize};
|
|
||||||
|
|
||||||
use proxmox_schema::{api, Updater};
|
|
||||||
|
|
||||||
use super::{
|
|
||||||
LdapMode, LDAP_DOMAIN_SCHEMA, REALM_ID_SCHEMA, SINGLE_LINE_COMMENT_SCHEMA,
|
|
||||||
SYNC_ATTRIBUTES_SCHEMA, SYNC_DEFAULTS_STRING_SCHEMA, USER_CLASSES_SCHEMA,
|
|
||||||
};
|
|
||||||
|
|
||||||
#[api(
|
|
||||||
properties: {
|
|
||||||
"realm": {
|
|
||||||
schema: REALM_ID_SCHEMA,
|
|
||||||
},
|
|
||||||
"comment": {
|
|
||||||
optional: true,
|
|
||||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
|
||||||
},
|
|
||||||
"verify": {
|
|
||||||
optional: true,
|
|
||||||
default: false,
|
|
||||||
},
|
|
||||||
"sync-defaults-options": {
|
|
||||||
schema: SYNC_DEFAULTS_STRING_SCHEMA,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
"sync-attributes": {
|
|
||||||
schema: SYNC_ATTRIBUTES_SCHEMA,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
"user-classes" : {
|
|
||||||
optional: true,
|
|
||||||
schema: USER_CLASSES_SCHEMA,
|
|
||||||
},
|
|
||||||
"base-dn" : {
|
|
||||||
schema: LDAP_DOMAIN_SCHEMA,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
"bind-dn" : {
|
|
||||||
schema: LDAP_DOMAIN_SCHEMA,
|
|
||||||
optional: true,
|
|
||||||
}
|
|
||||||
},
|
|
||||||
)]
|
|
||||||
#[derive(Serialize, Deserialize, Updater, Clone)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
/// AD realm configuration properties.
|
|
||||||
pub struct AdRealmConfig {
|
|
||||||
#[updater(skip)]
|
|
||||||
pub realm: String,
|
|
||||||
/// AD server address
|
|
||||||
pub server1: String,
|
|
||||||
/// Fallback AD server address
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub server2: Option<String>,
|
|
||||||
/// AD server Port
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub port: Option<u16>,
|
|
||||||
/// Base domain name. Users are searched under this domain using a `subtree search`.
|
|
||||||
/// Expected to be set only internally to `defaultNamingContext` of the AD server, but can be
|
|
||||||
/// overridden if the need arises.
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub base_dn: Option<String>,
|
|
||||||
/// Comment
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub comment: Option<String>,
|
|
||||||
/// Connection security
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub mode: Option<LdapMode>,
|
|
||||||
/// Verify server certificate
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub verify: Option<bool>,
|
|
||||||
/// CA certificate to use for the server. The path can point to
|
|
||||||
/// either a file, or a directory. If it points to a file,
|
|
||||||
/// the PEM-formatted X.509 certificate stored at the path
|
|
||||||
/// will be added as a trusted certificate.
|
|
||||||
/// If the path points to a directory,
|
|
||||||
/// the directory replaces the system's default certificate
|
|
||||||
/// store at `/etc/ssl/certs` - Every file in the directory
|
|
||||||
/// will be loaded as a trusted certificate.
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub capath: Option<String>,
|
|
||||||
/// Bind domain to use for looking up users
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub bind_dn: Option<String>,
|
|
||||||
/// Custom LDAP search filter for user sync
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub filter: Option<String>,
|
|
||||||
/// Default options for AD sync
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub sync_defaults_options: Option<String>,
|
|
||||||
/// List of LDAP attributes to sync from AD to user config
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub sync_attributes: Option<String>,
|
|
||||||
/// User ``objectClass`` classes to sync
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub user_classes: Option<String>,
|
|
||||||
}
|
|
@ -1,95 +0,0 @@
|
|||||||
use std::fmt::{self, Display};
|
|
||||||
|
|
||||||
use anyhow::Error;
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
|
|
||||||
use proxmox_schema::api;
|
|
||||||
|
|
||||||
#[api(default: "encrypt")]
|
|
||||||
#[derive(Copy, Clone, Debug, Eq, PartialEq, Deserialize, Serialize)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
/// Defines whether data is encrypted (using an AEAD cipher), only signed, or neither.
|
|
||||||
pub enum CryptMode {
|
|
||||||
/// Don't encrypt.
|
|
||||||
None,
|
|
||||||
/// Encrypt.
|
|
||||||
Encrypt,
|
|
||||||
/// Only sign.
|
|
||||||
SignOnly,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Eq, PartialEq, Hash, Clone, Deserialize, Serialize)]
|
|
||||||
#[serde(transparent)]
|
|
||||||
/// 32-byte fingerprint, usually calculated with SHA256.
|
|
||||||
pub struct Fingerprint {
|
|
||||||
#[serde(with = "bytes_as_fingerprint")]
|
|
||||||
bytes: [u8; 32],
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Fingerprint {
|
|
||||||
pub fn new(bytes: [u8; 32]) -> Self {
|
|
||||||
Self { bytes }
|
|
||||||
}
|
|
||||||
pub fn bytes(&self) -> &[u8; 32] {
|
|
||||||
&self.bytes
|
|
||||||
}
|
|
||||||
pub fn signature(&self) -> String {
|
|
||||||
as_fingerprint(&self.bytes)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Display as short key ID
|
|
||||||
impl Display for Fingerprint {
|
|
||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
|
||||||
write!(f, "{}", as_fingerprint(&self.bytes[0..8]))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl std::str::FromStr for Fingerprint {
|
|
||||||
type Err = Error;
|
|
||||||
|
|
||||||
fn from_str(s: &str) -> Result<Self, Error> {
|
|
||||||
let mut tmp = s.to_string();
|
|
||||||
tmp.retain(|c| c != ':');
|
|
||||||
let mut bytes = [0u8; 32];
|
|
||||||
hex::decode_to_slice(&tmp, &mut bytes)?;
|
|
||||||
Ok(Fingerprint::new(bytes))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn as_fingerprint(bytes: &[u8]) -> String {
|
|
||||||
hex::encode(bytes)
|
|
||||||
.as_bytes()
|
|
||||||
.chunks(2)
|
|
||||||
.map(|v| unsafe { std::str::from_utf8_unchecked(v) }) // it's a hex string
|
|
||||||
.collect::<Vec<&str>>()
|
|
||||||
.join(":")
|
|
||||||
}
|
|
||||||
|
|
||||||
pub mod bytes_as_fingerprint {
|
|
||||||
use std::mem::MaybeUninit;
|
|
||||||
|
|
||||||
use serde::{Deserialize, Deserializer, Serializer};
|
|
||||||
|
|
||||||
pub fn serialize<S>(bytes: &[u8; 32], serializer: S) -> Result<S::Ok, S::Error>
|
|
||||||
where
|
|
||||||
S: Serializer,
|
|
||||||
{
|
|
||||||
let s = super::as_fingerprint(bytes);
|
|
||||||
serializer.serialize_str(&s)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn deserialize<'de, D>(deserializer: D) -> Result<[u8; 32], D::Error>
|
|
||||||
where
|
|
||||||
D: Deserializer<'de>,
|
|
||||||
{
|
|
||||||
// TODO: more efficiently implement with a Visitor implementing visit_str using split() and
|
|
||||||
// hex::decode by-byte
|
|
||||||
let mut s = String::deserialize(deserializer)?;
|
|
||||||
s.retain(|c| c != ':');
|
|
||||||
let mut out = MaybeUninit::<[u8; 32]>::uninit();
|
|
||||||
hex::decode_to_slice(s.as_bytes(), unsafe { &mut (*out.as_mut_ptr())[..] })
|
|
||||||
.map_err(serde::de::Error::custom)?;
|
|
||||||
Ok(unsafe { out.assume_init() })
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,30 +0,0 @@
|
|||||||
use serde::{Deserialize, Serialize};
|
|
||||||
|
|
||||||
use proxmox_schema::api;
|
|
||||||
|
|
||||||
#[api]
|
|
||||||
#[derive(Serialize, Deserialize)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
/// General status information about a running VM file-restore daemon
|
|
||||||
pub struct RestoreDaemonStatus {
|
|
||||||
/// VM uptime in seconds
|
|
||||||
pub uptime: i64,
|
|
||||||
/// time left until auto-shutdown, keep in mind that this is useless when 'keep-timeout' is
|
|
||||||
/// not set, as then the status call will have reset the timer before returning the value
|
|
||||||
pub timeout: i64,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[api]
|
|
||||||
#[derive(Serialize, Deserialize, PartialEq, Eq)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
/// The desired format of the result.
|
|
||||||
pub enum FileRestoreFormat {
|
|
||||||
/// Plain file (only works for single files)
|
|
||||||
Plain,
|
|
||||||
/// PXAR archive
|
|
||||||
Pxar,
|
|
||||||
/// ZIP archive
|
|
||||||
Zip,
|
|
||||||
/// TAR archive
|
|
||||||
Tar,
|
|
||||||
}
|
|
@ -1,799 +0,0 @@
|
|||||||
use std::str::FromStr;
|
|
||||||
|
|
||||||
use anyhow::bail;
|
|
||||||
use const_format::concatcp;
|
|
||||||
use regex::Regex;
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
|
|
||||||
use proxmox_schema::*;
|
|
||||||
|
|
||||||
use crate::{
|
|
||||||
Authid, BackupNamespace, BackupType, NotificationMode, RateLimitConfig, Userid,
|
|
||||||
BACKUP_GROUP_SCHEMA, BACKUP_NAMESPACE_SCHEMA, BACKUP_NS_RE, DATASTORE_SCHEMA,
|
|
||||||
DRIVE_NAME_SCHEMA, MEDIA_POOL_NAME_SCHEMA, NS_MAX_DEPTH_REDUCED_SCHEMA, PROXMOX_SAFE_ID_FORMAT,
|
|
||||||
PROXMOX_SAFE_ID_REGEX_STR, REMOTE_ID_SCHEMA, SINGLE_LINE_COMMENT_SCHEMA,
|
|
||||||
};
|
|
||||||
|
|
||||||
const_regex! {
|
|
||||||
|
|
||||||
/// Regex for verification jobs 'DATASTORE:ACTUAL_JOB_ID'
|
|
||||||
pub VERIFICATION_JOB_WORKER_ID_REGEX = concatcp!(r"^(", PROXMOX_SAFE_ID_REGEX_STR, r"):");
|
|
||||||
/// Regex for sync jobs '(REMOTE|\-):REMOTE_DATASTORE:LOCAL_DATASTORE:(?:LOCAL_NS_ANCHOR:)ACTUAL_JOB_ID'
|
|
||||||
pub SYNC_JOB_WORKER_ID_REGEX = concatcp!(r"^(", PROXMOX_SAFE_ID_REGEX_STR, r"|\-):(", PROXMOX_SAFE_ID_REGEX_STR, r"):(", PROXMOX_SAFE_ID_REGEX_STR, r")(?::(", BACKUP_NS_RE, r"))?:");
|
|
||||||
}
|
|
||||||
|
|
||||||
pub const JOB_ID_SCHEMA: Schema = StringSchema::new("Job ID.")
|
|
||||||
.format(&PROXMOX_SAFE_ID_FORMAT)
|
|
||||||
.min_length(3)
|
|
||||||
.max_length(32)
|
|
||||||
.schema();
|
|
||||||
|
|
||||||
pub const SYNC_SCHEDULE_SCHEMA: Schema = StringSchema::new("Run sync job at specified schedule.")
|
|
||||||
.format(&ApiStringFormat::VerifyFn(
|
|
||||||
proxmox_time::verify_calendar_event,
|
|
||||||
))
|
|
||||||
.type_text("<calendar-event>")
|
|
||||||
.schema();
|
|
||||||
|
|
||||||
pub const GC_SCHEDULE_SCHEMA: Schema =
|
|
||||||
StringSchema::new("Run garbage collection job at specified schedule.")
|
|
||||||
.format(&ApiStringFormat::VerifyFn(
|
|
||||||
proxmox_time::verify_calendar_event,
|
|
||||||
))
|
|
||||||
.type_text("<calendar-event>")
|
|
||||||
.schema();
|
|
||||||
|
|
||||||
pub const PRUNE_SCHEDULE_SCHEMA: Schema = StringSchema::new("Run prune job at specified schedule.")
|
|
||||||
.format(&ApiStringFormat::VerifyFn(
|
|
||||||
proxmox_time::verify_calendar_event,
|
|
||||||
))
|
|
||||||
.type_text("<calendar-event>")
|
|
||||||
.schema();
|
|
||||||
|
|
||||||
pub const VERIFICATION_SCHEDULE_SCHEMA: Schema =
|
|
||||||
StringSchema::new("Run verify job at specified schedule.")
|
|
||||||
.format(&ApiStringFormat::VerifyFn(
|
|
||||||
proxmox_time::verify_calendar_event,
|
|
||||||
))
|
|
||||||
.type_text("<calendar-event>")
|
|
||||||
.schema();
|
|
||||||
|
|
||||||
pub const REMOVE_VANISHED_BACKUPS_SCHEMA: Schema = BooleanSchema::new(
|
|
||||||
"Delete vanished backups. This remove the local copy if the remote backup was deleted.",
|
|
||||||
)
|
|
||||||
.default(false)
|
|
||||||
.schema();
|
|
||||||
|
|
||||||
#[api(
|
|
||||||
properties: {
|
|
||||||
"next-run": {
|
|
||||||
description: "Estimated time of the next run (UNIX epoch).",
|
|
||||||
optional: true,
|
|
||||||
type: Integer,
|
|
||||||
},
|
|
||||||
"last-run-state": {
|
|
||||||
description: "Result of the last run.",
|
|
||||||
optional: true,
|
|
||||||
type: String,
|
|
||||||
},
|
|
||||||
"last-run-upid": {
|
|
||||||
description: "Task UPID of the last run.",
|
|
||||||
optional: true,
|
|
||||||
type: String,
|
|
||||||
},
|
|
||||||
"last-run-endtime": {
|
|
||||||
description: "Endtime of the last run.",
|
|
||||||
optional: true,
|
|
||||||
type: Integer,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
)]
|
|
||||||
#[derive(Serialize, Deserialize, Default, Clone, PartialEq)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
/// Job Scheduling Status
|
|
||||||
pub struct JobScheduleStatus {
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub next_run: Option<i64>,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub last_run_state: Option<String>,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub last_run_upid: Option<String>,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub last_run_endtime: Option<i64>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[api()]
|
|
||||||
#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
|
||||||
#[serde(rename_all = "lowercase")]
|
|
||||||
/// When do we send notifications
|
|
||||||
pub enum Notify {
|
|
||||||
/// Never send notification
|
|
||||||
Never,
|
|
||||||
/// Send notifications for failed and successful jobs
|
|
||||||
Always,
|
|
||||||
/// Send notifications for failed jobs only
|
|
||||||
Error,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[api(
|
|
||||||
properties: {
|
|
||||||
gc: {
|
|
||||||
type: Notify,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
verify: {
|
|
||||||
type: Notify,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
sync: {
|
|
||||||
type: Notify,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
prune: {
|
|
||||||
type: Notify,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
)]
|
|
||||||
#[derive(Debug, Serialize, Deserialize)]
|
|
||||||
/// Datastore notify settings
|
|
||||||
pub struct DatastoreNotify {
|
|
||||||
/// Garbage collection settings
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub gc: Option<Notify>,
|
|
||||||
/// Verify job setting
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub verify: Option<Notify>,
|
|
||||||
/// Sync job setting
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub sync: Option<Notify>,
|
|
||||||
/// Prune job setting
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub prune: Option<Notify>,
|
|
||||||
}
|
|
||||||
|
|
||||||
pub const DATASTORE_NOTIFY_STRING_SCHEMA: Schema = StringSchema::new(
|
|
||||||
"Datastore notification setting, enum can be one of 'always', 'never', or 'error'.",
|
|
||||||
)
|
|
||||||
.format(&ApiStringFormat::PropertyString(
|
|
||||||
&DatastoreNotify::API_SCHEMA,
|
|
||||||
))
|
|
||||||
.schema();
|
|
||||||
|
|
||||||
pub const IGNORE_VERIFIED_BACKUPS_SCHEMA: Schema = BooleanSchema::new(
|
|
||||||
"Do not verify backups that are already verified if their verification is not outdated.",
|
|
||||||
)
|
|
||||||
.default(true)
|
|
||||||
.schema();
|
|
||||||
|
|
||||||
pub const VERIFICATION_OUTDATED_AFTER_SCHEMA: Schema =
|
|
||||||
IntegerSchema::new("Days after that a verification becomes outdated. (0 is deprecated)'")
|
|
||||||
.minimum(0)
|
|
||||||
.schema();
|
|
||||||
|
|
||||||
#[api(
|
|
||||||
properties: {
|
|
||||||
id: {
|
|
||||||
schema: JOB_ID_SCHEMA,
|
|
||||||
},
|
|
||||||
store: {
|
|
||||||
schema: DATASTORE_SCHEMA,
|
|
||||||
},
|
|
||||||
"ignore-verified": {
|
|
||||||
optional: true,
|
|
||||||
schema: IGNORE_VERIFIED_BACKUPS_SCHEMA,
|
|
||||||
},
|
|
||||||
"outdated-after": {
|
|
||||||
optional: true,
|
|
||||||
schema: VERIFICATION_OUTDATED_AFTER_SCHEMA,
|
|
||||||
},
|
|
||||||
comment: {
|
|
||||||
optional: true,
|
|
||||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
|
||||||
},
|
|
||||||
schedule: {
|
|
||||||
optional: true,
|
|
||||||
schema: VERIFICATION_SCHEDULE_SCHEMA,
|
|
||||||
},
|
|
||||||
ns: {
|
|
||||||
optional: true,
|
|
||||||
schema: BACKUP_NAMESPACE_SCHEMA,
|
|
||||||
},
|
|
||||||
"max-depth": {
|
|
||||||
optional: true,
|
|
||||||
schema: crate::NS_MAX_DEPTH_SCHEMA,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
)]
|
|
||||||
#[derive(Serialize, Deserialize, Updater, Clone, PartialEq)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
/// Verification Job
|
|
||||||
pub struct VerificationJobConfig {
|
|
||||||
/// unique ID to address this job
|
|
||||||
#[updater(skip)]
|
|
||||||
pub id: String,
|
|
||||||
/// the datastore ID this verification job affects
|
|
||||||
pub store: String,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
/// if not set to false, check the age of the last snapshot verification to filter
|
|
||||||
/// out recent ones, depending on 'outdated_after' configuration.
|
|
||||||
pub ignore_verified: Option<bool>,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
/// Reverify snapshots after X days, never if 0. Ignored if 'ignore_verified' is false.
|
|
||||||
pub outdated_after: Option<i64>,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub comment: Option<String>,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
/// when to schedule this job in calendar event notation
|
|
||||||
pub schedule: Option<String>,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none", default)]
|
|
||||||
/// on which backup namespace to run the verification recursively
|
|
||||||
pub ns: Option<BackupNamespace>,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none", default)]
|
|
||||||
/// how deep the verify should go from the `ns` level downwards. Passing 0 verifies only the
|
|
||||||
/// snapshots on the same level as the passed `ns`, or the datastore root if none.
|
|
||||||
pub max_depth: Option<usize>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl VerificationJobConfig {
|
|
||||||
pub fn acl_path(&self) -> Vec<&str> {
|
|
||||||
match self.ns.as_ref() {
|
|
||||||
Some(ns) => ns.acl_path(&self.store),
|
|
||||||
None => vec!["datastore", &self.store],
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[api(
|
|
||||||
properties: {
|
|
||||||
config: {
|
|
||||||
type: VerificationJobConfig,
|
|
||||||
},
|
|
||||||
status: {
|
|
||||||
type: JobScheduleStatus,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
)]
|
|
||||||
#[derive(Serialize, Deserialize, Clone, PartialEq)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
/// Status of Verification Job
|
|
||||||
pub struct VerificationJobStatus {
|
|
||||||
#[serde(flatten)]
|
|
||||||
pub config: VerificationJobConfig,
|
|
||||||
#[serde(flatten)]
|
|
||||||
pub status: JobScheduleStatus,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[api(
|
|
||||||
properties: {
|
|
||||||
store: {
|
|
||||||
schema: DATASTORE_SCHEMA,
|
|
||||||
},
|
|
||||||
pool: {
|
|
||||||
schema: MEDIA_POOL_NAME_SCHEMA,
|
|
||||||
},
|
|
||||||
drive: {
|
|
||||||
schema: DRIVE_NAME_SCHEMA,
|
|
||||||
},
|
|
||||||
"eject-media": {
|
|
||||||
description: "Eject media upon job completion.",
|
|
||||||
type: bool,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
"export-media-set": {
|
|
||||||
description: "Export media set upon job completion.",
|
|
||||||
type: bool,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
"latest-only": {
|
|
||||||
description: "Backup latest snapshots only.",
|
|
||||||
type: bool,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
"notify-user": {
|
|
||||||
optional: true,
|
|
||||||
type: Userid,
|
|
||||||
},
|
|
||||||
"group-filter": {
|
|
||||||
schema: GROUP_FILTER_LIST_SCHEMA,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
ns: {
|
|
||||||
type: BackupNamespace,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
"max-depth": {
|
|
||||||
schema: crate::NS_MAX_DEPTH_SCHEMA,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
)]
|
|
||||||
#[derive(Serialize, Deserialize, Clone, Updater, PartialEq)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
/// Tape Backup Job Setup
|
|
||||||
pub struct TapeBackupJobSetup {
|
|
||||||
pub store: String,
|
|
||||||
pub pool: String,
|
|
||||||
pub drive: String,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub eject_media: Option<bool>,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub export_media_set: Option<bool>,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub latest_only: Option<bool>,
|
|
||||||
/// Send job email notification to this user
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub notify_user: Option<Userid>,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub notification_mode: Option<NotificationMode>,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub group_filter: Option<Vec<GroupFilter>>,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none", default)]
|
|
||||||
pub ns: Option<BackupNamespace>,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none", default)]
|
|
||||||
pub max_depth: Option<usize>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[api(
|
|
||||||
properties: {
|
|
||||||
id: {
|
|
||||||
schema: JOB_ID_SCHEMA,
|
|
||||||
},
|
|
||||||
setup: {
|
|
||||||
type: TapeBackupJobSetup,
|
|
||||||
},
|
|
||||||
comment: {
|
|
||||||
optional: true,
|
|
||||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
|
||||||
},
|
|
||||||
schedule: {
|
|
||||||
optional: true,
|
|
||||||
schema: SYNC_SCHEDULE_SCHEMA,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
)]
|
|
||||||
#[derive(Serialize, Deserialize, Clone, Updater, PartialEq)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
/// Tape Backup Job
|
|
||||||
pub struct TapeBackupJobConfig {
|
|
||||||
#[updater(skip)]
|
|
||||||
pub id: String,
|
|
||||||
#[serde(flatten)]
|
|
||||||
pub setup: TapeBackupJobSetup,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub comment: Option<String>,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub schedule: Option<String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[api(
|
|
||||||
properties: {
|
|
||||||
config: {
|
|
||||||
type: TapeBackupJobConfig,
|
|
||||||
},
|
|
||||||
status: {
|
|
||||||
type: JobScheduleStatus,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
)]
|
|
||||||
#[derive(Serialize, Deserialize, Clone, PartialEq)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
/// Status of Tape Backup Job
|
|
||||||
pub struct TapeBackupJobStatus {
|
|
||||||
#[serde(flatten)]
|
|
||||||
pub config: TapeBackupJobConfig,
|
|
||||||
#[serde(flatten)]
|
|
||||||
pub status: JobScheduleStatus,
|
|
||||||
/// Next tape used (best guess)
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub next_media_label: Option<String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Debug)]
|
|
||||||
/// Filter for matching `BackupGroup`s, for use with `BackupGroup::filter`.
|
|
||||||
pub enum FilterType {
|
|
||||||
/// BackupGroup type - either `vm`, `ct`, or `host`.
|
|
||||||
BackupType(BackupType),
|
|
||||||
/// Full identifier of BackupGroup, including type
|
|
||||||
Group(String),
|
|
||||||
/// A regular expression matched against the full identifier of the BackupGroup
|
|
||||||
Regex(Regex),
|
|
||||||
}
|
|
||||||
|
|
||||||
impl PartialEq for FilterType {
|
|
||||||
fn eq(&self, other: &Self) -> bool {
|
|
||||||
match (self, other) {
|
|
||||||
(Self::BackupType(a), Self::BackupType(b)) => a == b,
|
|
||||||
(Self::Group(a), Self::Group(b)) => a == b,
|
|
||||||
(Self::Regex(a), Self::Regex(b)) => a.as_str() == b.as_str(),
|
|
||||||
_ => false,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl std::str::FromStr for FilterType {
|
|
||||||
type Err = anyhow::Error;
|
|
||||||
|
|
||||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
|
||||||
Ok(match s.split_once(':') {
|
|
||||||
Some(("group", value)) => BACKUP_GROUP_SCHEMA.parse_simple_value(value).map(|_| FilterType::Group(value.to_string()))?,
|
|
||||||
Some(("type", value)) => FilterType::BackupType(value.parse()?),
|
|
||||||
Some(("regex", value)) => FilterType::Regex(Regex::new(value)?),
|
|
||||||
Some((ty, _value)) => bail!("expected 'group', 'type' or 'regex' prefix, got '{}'", ty),
|
|
||||||
None => bail!("input doesn't match expected format '<group:GROUP||type:<vm|ct|host>|regex:REGEX>'"),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// used for serializing below, caution!
|
|
||||||
impl std::fmt::Display for FilterType {
|
|
||||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
|
||||||
match self {
|
|
||||||
FilterType::BackupType(backup_type) => write!(f, "type:{}", backup_type),
|
|
||||||
FilterType::Group(backup_group) => write!(f, "group:{}", backup_group),
|
|
||||||
FilterType::Regex(regex) => write!(f, "regex:{}", regex.as_str()),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Debug)]
|
|
||||||
pub struct GroupFilter {
|
|
||||||
pub is_exclude: bool,
|
|
||||||
pub filter_type: FilterType,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl PartialEq for GroupFilter {
|
|
||||||
fn eq(&self, other: &Self) -> bool {
|
|
||||||
self.filter_type == other.filter_type && self.is_exclude == other.is_exclude
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Eq for GroupFilter {}
|
|
||||||
|
|
||||||
impl std::str::FromStr for GroupFilter {
|
|
||||||
type Err = anyhow::Error;
|
|
||||||
|
|
||||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
|
||||||
let (is_exclude, type_str) = match s.split_once(':') {
|
|
||||||
Some(("include", value)) => (false, value),
|
|
||||||
Some(("exclude", value)) => (true, value),
|
|
||||||
_ => (false, s),
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok(GroupFilter {
|
|
||||||
is_exclude,
|
|
||||||
filter_type: type_str.parse()?,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// used for serializing below, caution!
|
|
||||||
impl std::fmt::Display for GroupFilter {
|
|
||||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
|
||||||
if self.is_exclude {
|
|
||||||
f.write_str("exclude:")?;
|
|
||||||
}
|
|
||||||
std::fmt::Display::fmt(&self.filter_type, f)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
proxmox_serde::forward_deserialize_to_from_str!(GroupFilter);
|
|
||||||
proxmox_serde::forward_serialize_to_display!(GroupFilter);
|
|
||||||
|
|
||||||
fn verify_group_filter(input: &str) -> Result<(), anyhow::Error> {
|
|
||||||
GroupFilter::from_str(input).map(|_| ())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub const GROUP_FILTER_SCHEMA: Schema = StringSchema::new(
|
|
||||||
"Group filter based on group identifier ('group:GROUP'), group type ('type:<vm|ct|host>'), or regex ('regex:RE'). Can be inverted by prepending 'exclude:'.")
|
|
||||||
.format(&ApiStringFormat::VerifyFn(verify_group_filter))
|
|
||||||
.type_text("[<exclude:|include:>]<type:<vm|ct|host>|group:GROUP|regex:RE>")
|
|
||||||
.schema();
|
|
||||||
|
|
||||||
pub const GROUP_FILTER_LIST_SCHEMA: Schema =
|
|
||||||
ArraySchema::new("List of group filters.", &GROUP_FILTER_SCHEMA).schema();
|
|
||||||
|
|
||||||
pub const TRANSFER_LAST_SCHEMA: Schema =
|
|
||||||
IntegerSchema::new("Limit transfer to last N snapshots (per group), skipping others")
|
|
||||||
.minimum(1)
|
|
||||||
.schema();
|
|
||||||
|
|
||||||
#[api(
|
|
||||||
properties: {
|
|
||||||
id: {
|
|
||||||
schema: JOB_ID_SCHEMA,
|
|
||||||
},
|
|
||||||
store: {
|
|
||||||
schema: DATASTORE_SCHEMA,
|
|
||||||
},
|
|
||||||
ns: {
|
|
||||||
type: BackupNamespace,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
"owner": {
|
|
||||||
type: Authid,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
remote: {
|
|
||||||
schema: REMOTE_ID_SCHEMA,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
"remote-store": {
|
|
||||||
schema: DATASTORE_SCHEMA,
|
|
||||||
},
|
|
||||||
"remote-ns": {
|
|
||||||
type: BackupNamespace,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
"remove-vanished": {
|
|
||||||
schema: REMOVE_VANISHED_BACKUPS_SCHEMA,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
"max-depth": {
|
|
||||||
schema: NS_MAX_DEPTH_REDUCED_SCHEMA,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
comment: {
|
|
||||||
optional: true,
|
|
||||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
|
||||||
},
|
|
||||||
limit: {
|
|
||||||
type: RateLimitConfig,
|
|
||||||
},
|
|
||||||
schedule: {
|
|
||||||
optional: true,
|
|
||||||
schema: SYNC_SCHEDULE_SCHEMA,
|
|
||||||
},
|
|
||||||
"group-filter": {
|
|
||||||
schema: GROUP_FILTER_LIST_SCHEMA,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
"transfer-last": {
|
|
||||||
schema: TRANSFER_LAST_SCHEMA,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
)]
|
|
||||||
#[derive(Serialize, Deserialize, Clone, Updater, PartialEq)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
/// Sync Job
|
|
||||||
pub struct SyncJobConfig {
|
|
||||||
#[updater(skip)]
|
|
||||||
pub id: String,
|
|
||||||
pub store: String,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub ns: Option<BackupNamespace>,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub owner: Option<Authid>,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
/// None implies local sync.
|
|
||||||
pub remote: Option<String>,
|
|
||||||
pub remote_store: String,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub remote_ns: Option<BackupNamespace>,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub remove_vanished: Option<bool>,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub max_depth: Option<usize>,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub comment: Option<String>,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub schedule: Option<String>,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub group_filter: Option<Vec<GroupFilter>>,
|
|
||||||
#[serde(flatten)]
|
|
||||||
pub limit: RateLimitConfig,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub transfer_last: Option<usize>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl SyncJobConfig {
|
|
||||||
pub fn acl_path(&self) -> Vec<&str> {
|
|
||||||
match self.ns.as_ref() {
|
|
||||||
Some(ns) => ns.acl_path(&self.store),
|
|
||||||
None => vec!["datastore", &self.store],
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[api(
|
|
||||||
properties: {
|
|
||||||
config: {
|
|
||||||
type: SyncJobConfig,
|
|
||||||
},
|
|
||||||
status: {
|
|
||||||
type: JobScheduleStatus,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
)]
|
|
||||||
#[derive(Serialize, Deserialize, Clone, PartialEq)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
/// Status of Sync Job
|
|
||||||
pub struct SyncJobStatus {
|
|
||||||
#[serde(flatten)]
|
|
||||||
pub config: SyncJobConfig,
|
|
||||||
#[serde(flatten)]
|
|
||||||
pub status: JobScheduleStatus,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// These are used separately without `ns`/`max-depth` sometimes in the API, specifically in the API
|
|
||||||
/// call to prune a specific group, where `max-depth` makes no sense.
|
|
||||||
#[api(
|
|
||||||
properties: {
|
|
||||||
"keep-last": {
|
|
||||||
schema: crate::PRUNE_SCHEMA_KEEP_LAST,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
"keep-hourly": {
|
|
||||||
schema: crate::PRUNE_SCHEMA_KEEP_HOURLY,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
"keep-daily": {
|
|
||||||
schema: crate::PRUNE_SCHEMA_KEEP_DAILY,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
"keep-weekly": {
|
|
||||||
schema: crate::PRUNE_SCHEMA_KEEP_WEEKLY,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
"keep-monthly": {
|
|
||||||
schema: crate::PRUNE_SCHEMA_KEEP_MONTHLY,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
"keep-yearly": {
|
|
||||||
schema: crate::PRUNE_SCHEMA_KEEP_YEARLY,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
)]
|
|
||||||
#[derive(Serialize, Deserialize, Default, Updater, Clone, PartialEq)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
/// Common pruning options
|
|
||||||
pub struct KeepOptions {
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub keep_last: Option<u64>,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub keep_hourly: Option<u64>,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub keep_daily: Option<u64>,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub keep_weekly: Option<u64>,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub keep_monthly: Option<u64>,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub keep_yearly: Option<u64>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl KeepOptions {
|
|
||||||
pub fn keeps_something(&self) -> bool {
|
|
||||||
self.keep_last.unwrap_or(0)
|
|
||||||
+ self.keep_hourly.unwrap_or(0)
|
|
||||||
+ self.keep_daily.unwrap_or(0)
|
|
||||||
+ self.keep_weekly.unwrap_or(0)
|
|
||||||
+ self.keep_monthly.unwrap_or(0)
|
|
||||||
+ self.keep_yearly.unwrap_or(0)
|
|
||||||
> 0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[api(
|
|
||||||
properties: {
|
|
||||||
keep: {
|
|
||||||
type: KeepOptions,
|
|
||||||
},
|
|
||||||
ns: {
|
|
||||||
type: BackupNamespace,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
"max-depth": {
|
|
||||||
schema: NS_MAX_DEPTH_REDUCED_SCHEMA,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
)]
|
|
||||||
#[derive(Serialize, Deserialize, Default, Updater, Clone, PartialEq)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
/// Common pruning options
|
|
||||||
pub struct PruneJobOptions {
|
|
||||||
#[serde(flatten)]
|
|
||||||
pub keep: KeepOptions,
|
|
||||||
|
|
||||||
/// The (optional) recursion depth
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub max_depth: Option<usize>,
|
|
||||||
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub ns: Option<BackupNamespace>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl PruneJobOptions {
|
|
||||||
pub fn keeps_something(&self) -> bool {
|
|
||||||
self.keep.keeps_something()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn acl_path<'a>(&'a self, store: &'a str) -> Vec<&'a str> {
|
|
||||||
match &self.ns {
|
|
||||||
Some(ns) => ns.acl_path(store),
|
|
||||||
None => vec!["datastore", store],
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[api(
|
|
||||||
properties: {
|
|
||||||
disable: {
|
|
||||||
type: Boolean,
|
|
||||||
optional: true,
|
|
||||||
default: false,
|
|
||||||
},
|
|
||||||
id: {
|
|
||||||
schema: JOB_ID_SCHEMA,
|
|
||||||
},
|
|
||||||
store: {
|
|
||||||
schema: DATASTORE_SCHEMA,
|
|
||||||
},
|
|
||||||
schedule: {
|
|
||||||
schema: PRUNE_SCHEDULE_SCHEMA,
|
|
||||||
},
|
|
||||||
comment: {
|
|
||||||
optional: true,
|
|
||||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
|
||||||
},
|
|
||||||
options: {
|
|
||||||
type: PruneJobOptions,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
)]
|
|
||||||
#[derive(Deserialize, Serialize, Updater, Clone, PartialEq)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
/// Prune configuration.
|
|
||||||
pub struct PruneJobConfig {
|
|
||||||
/// unique ID to address this job
|
|
||||||
#[updater(skip)]
|
|
||||||
pub id: String,
|
|
||||||
|
|
||||||
pub store: String,
|
|
||||||
|
|
||||||
/// Disable this job.
|
|
||||||
#[serde(default, skip_serializing_if = "is_false")]
|
|
||||||
#[updater(serde(skip_serializing_if = "Option::is_none"))]
|
|
||||||
pub disable: bool,
|
|
||||||
|
|
||||||
pub schedule: String,
|
|
||||||
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub comment: Option<String>,
|
|
||||||
|
|
||||||
#[serde(flatten)]
|
|
||||||
pub options: PruneJobOptions,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl PruneJobConfig {
|
|
||||||
pub fn acl_path(&self) -> Vec<&str> {
|
|
||||||
self.options.acl_path(&self.store)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn is_false(b: &bool) -> bool {
|
|
||||||
!b
|
|
||||||
}
|
|
||||||
|
|
||||||
#[api(
|
|
||||||
properties: {
|
|
||||||
config: {
|
|
||||||
type: PruneJobConfig,
|
|
||||||
},
|
|
||||||
status: {
|
|
||||||
type: JobScheduleStatus,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
)]
|
|
||||||
#[derive(Serialize, Deserialize, Clone, PartialEq)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
/// Status of prune job
|
|
||||||
pub struct PruneJobStatus {
|
|
||||||
#[serde(flatten)]
|
|
||||||
pub config: PruneJobConfig,
|
|
||||||
#[serde(flatten)]
|
|
||||||
pub status: JobScheduleStatus,
|
|
||||||
}
|
|
@ -1,55 +0,0 @@
|
|||||||
use serde::{Deserialize, Serialize};
|
|
||||||
|
|
||||||
use proxmox_schema::api;
|
|
||||||
|
|
||||||
use crate::CERT_FINGERPRINT_SHA256_SCHEMA;
|
|
||||||
|
|
||||||
#[api(default: "scrypt")]
|
|
||||||
#[derive(Clone, Copy, Debug, Deserialize, Serialize)]
|
|
||||||
#[serde(rename_all = "lowercase")]
|
|
||||||
/// Key derivation function for password protected encryption keys.
|
|
||||||
pub enum Kdf {
|
|
||||||
/// Do not encrypt the key.
|
|
||||||
None,
|
|
||||||
/// Encrypt they key with a password using SCrypt.
|
|
||||||
Scrypt,
|
|
||||||
/// Encrtypt the Key with a password using PBKDF2
|
|
||||||
PBKDF2,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for Kdf {
|
|
||||||
#[inline]
|
|
||||||
fn default() -> Self {
|
|
||||||
Kdf::Scrypt
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[api(
|
|
||||||
properties: {
|
|
||||||
kdf: {
|
|
||||||
type: Kdf,
|
|
||||||
},
|
|
||||||
fingerprint: {
|
|
||||||
schema: CERT_FINGERPRINT_SHA256_SCHEMA,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
)]
|
|
||||||
#[derive(Deserialize, Serialize)]
|
|
||||||
/// Encryption Key Information
|
|
||||||
pub struct KeyInfo {
|
|
||||||
/// Path to key (if stored in a file)
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub path: Option<String>,
|
|
||||||
pub kdf: Kdf,
|
|
||||||
/// Key creation time
|
|
||||||
pub created: i64,
|
|
||||||
/// Key modification time
|
|
||||||
pub modified: i64,
|
|
||||||
/// Key fingerprint
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub fingerprint: Option<String>,
|
|
||||||
/// Password hint
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub hint: Option<String>,
|
|
||||||
}
|
|
@ -1,208 +0,0 @@
|
|||||||
use serde::{Deserialize, Serialize};
|
|
||||||
|
|
||||||
use proxmox_schema::{api, ApiStringFormat, ApiType, ArraySchema, Schema, StringSchema, Updater};
|
|
||||||
|
|
||||||
use super::{REALM_ID_SCHEMA, SINGLE_LINE_COMMENT_SCHEMA};
|
|
||||||
|
|
||||||
#[api()]
|
|
||||||
#[derive(Copy, Clone, PartialEq, Eq, Serialize, Deserialize, Default)]
|
|
||||||
/// LDAP connection type
|
|
||||||
pub enum LdapMode {
|
|
||||||
/// Plaintext LDAP connection
|
|
||||||
#[serde(rename = "ldap")]
|
|
||||||
#[default]
|
|
||||||
Ldap,
|
|
||||||
/// Secure STARTTLS connection
|
|
||||||
#[serde(rename = "ldap+starttls")]
|
|
||||||
StartTls,
|
|
||||||
/// Secure LDAPS connection
|
|
||||||
#[serde(rename = "ldaps")]
|
|
||||||
Ldaps,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[api(
|
|
||||||
properties: {
|
|
||||||
"realm": {
|
|
||||||
schema: REALM_ID_SCHEMA,
|
|
||||||
},
|
|
||||||
"comment": {
|
|
||||||
optional: true,
|
|
||||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
|
||||||
},
|
|
||||||
"verify": {
|
|
||||||
optional: true,
|
|
||||||
default: false,
|
|
||||||
},
|
|
||||||
"sync-defaults-options": {
|
|
||||||
schema: SYNC_DEFAULTS_STRING_SCHEMA,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
"sync-attributes": {
|
|
||||||
schema: SYNC_ATTRIBUTES_SCHEMA,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
"user-classes" : {
|
|
||||||
optional: true,
|
|
||||||
schema: USER_CLASSES_SCHEMA,
|
|
||||||
},
|
|
||||||
"base-dn" : {
|
|
||||||
schema: LDAP_DOMAIN_SCHEMA,
|
|
||||||
},
|
|
||||||
"bind-dn" : {
|
|
||||||
schema: LDAP_DOMAIN_SCHEMA,
|
|
||||||
optional: true,
|
|
||||||
}
|
|
||||||
},
|
|
||||||
)]
|
|
||||||
#[derive(Serialize, Deserialize, Updater, Clone)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
/// LDAP configuration properties.
|
|
||||||
pub struct LdapRealmConfig {
|
|
||||||
#[updater(skip)]
|
|
||||||
pub realm: String,
|
|
||||||
/// LDAP server address
|
|
||||||
pub server1: String,
|
|
||||||
/// Fallback LDAP server address
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub server2: Option<String>,
|
|
||||||
/// Port
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub port: Option<u16>,
|
|
||||||
/// Base domain name. Users are searched under this domain using a `subtree search`.
|
|
||||||
pub base_dn: String,
|
|
||||||
/// Username attribute. Used to map a ``userid`` to LDAP to an LDAP ``dn``.
|
|
||||||
pub user_attr: String,
|
|
||||||
/// Comment
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub comment: Option<String>,
|
|
||||||
/// Connection security
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub mode: Option<LdapMode>,
|
|
||||||
/// Verify server certificate
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub verify: Option<bool>,
|
|
||||||
/// CA certificate to use for the server. The path can point to
|
|
||||||
/// either a file, or a directory. If it points to a file,
|
|
||||||
/// the PEM-formatted X.509 certificate stored at the path
|
|
||||||
/// will be added as a trusted certificate.
|
|
||||||
/// If the path points to a directory,
|
|
||||||
/// the directory replaces the system's default certificate
|
|
||||||
/// store at `/etc/ssl/certs` - Every file in the directory
|
|
||||||
/// will be loaded as a trusted certificate.
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub capath: Option<String>,
|
|
||||||
/// Bind domain to use for looking up users
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub bind_dn: Option<String>,
|
|
||||||
/// Custom LDAP search filter for user sync
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub filter: Option<String>,
|
|
||||||
/// Default options for LDAP sync
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub sync_defaults_options: Option<String>,
|
|
||||||
/// List of attributes to sync from LDAP to user config
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub sync_attributes: Option<String>,
|
|
||||||
/// User ``objectClass`` classes to sync
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub user_classes: Option<String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[api(
|
|
||||||
properties: {
|
|
||||||
"remove-vanished": {
|
|
||||||
optional: true,
|
|
||||||
schema: REMOVE_VANISHED_SCHEMA,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
|
|
||||||
)]
|
|
||||||
#[derive(Serialize, Deserialize, Updater, Default, Debug)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
/// Default options for LDAP synchronization runs
|
|
||||||
pub struct SyncDefaultsOptions {
|
|
||||||
/// How to handle vanished properties/users
|
|
||||||
pub remove_vanished: Option<String>,
|
|
||||||
/// Enable new users after sync
|
|
||||||
pub enable_new: Option<bool>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[api()]
|
|
||||||
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
/// remove-vanished options
|
|
||||||
pub enum RemoveVanished {
|
|
||||||
/// Delete ACLs for vanished users
|
|
||||||
Acl,
|
|
||||||
/// Remove vanished users
|
|
||||||
Entry,
|
|
||||||
/// Remove vanished properties from users (e.g. email)
|
|
||||||
Properties,
|
|
||||||
}
|
|
||||||
|
|
||||||
pub const LDAP_DOMAIN_SCHEMA: Schema = StringSchema::new("LDAP Domain").schema();
|
|
||||||
|
|
||||||
pub const SYNC_DEFAULTS_STRING_SCHEMA: Schema = StringSchema::new("sync defaults options")
|
|
||||||
.format(&ApiStringFormat::PropertyString(
|
|
||||||
&SyncDefaultsOptions::API_SCHEMA,
|
|
||||||
))
|
|
||||||
.schema();
|
|
||||||
|
|
||||||
const REMOVE_VANISHED_DESCRIPTION: &str =
|
|
||||||
"A semicolon-seperated list of things to remove when they or the user \
|
|
||||||
vanishes during user synchronization. The following values are possible: ``entry`` removes the \
|
|
||||||
user when not returned from the sync; ``properties`` removes any \
|
|
||||||
properties on existing user that do not appear in the source. \
|
|
||||||
``acl`` removes ACLs when the user is not returned from the sync.";
|
|
||||||
|
|
||||||
pub const REMOVE_VANISHED_SCHEMA: Schema = StringSchema::new(REMOVE_VANISHED_DESCRIPTION)
|
|
||||||
.format(&ApiStringFormat::PropertyString(&REMOVE_VANISHED_ARRAY))
|
|
||||||
.schema();
|
|
||||||
|
|
||||||
pub const REMOVE_VANISHED_ARRAY: Schema = ArraySchema::new(
|
|
||||||
"Array of remove-vanished options",
|
|
||||||
&RemoveVanished::API_SCHEMA,
|
|
||||||
)
|
|
||||||
.min_length(1)
|
|
||||||
.schema();
|
|
||||||
|
|
||||||
#[api()]
|
|
||||||
#[derive(Serialize, Deserialize, Updater, Default, Debug)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
/// Determine which LDAP attributes should be synced to which user attributes
|
|
||||||
pub struct SyncAttributes {
|
|
||||||
/// Name of the LDAP attribute containing the user's email address
|
|
||||||
pub email: Option<String>,
|
|
||||||
/// Name of the LDAP attribute containing the user's first name
|
|
||||||
pub firstname: Option<String>,
|
|
||||||
/// Name of the LDAP attribute containing the user's last name
|
|
||||||
pub lastname: Option<String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
const SYNC_ATTRIBUTES_TEXT: &str = "Comma-separated list of key=value pairs for specifying \
|
|
||||||
which LDAP attributes map to which PBS user field. For example, \
|
|
||||||
to map the LDAP attribute ``mail`` to PBS's ``email``, write \
|
|
||||||
``email=mail``.";
|
|
||||||
|
|
||||||
pub const SYNC_ATTRIBUTES_SCHEMA: Schema = StringSchema::new(SYNC_ATTRIBUTES_TEXT)
|
|
||||||
.format(&ApiStringFormat::PropertyString(
|
|
||||||
&SyncAttributes::API_SCHEMA,
|
|
||||||
))
|
|
||||||
.schema();
|
|
||||||
|
|
||||||
pub const USER_CLASSES_ARRAY: Schema = ArraySchema::new(
|
|
||||||
"Array of user classes",
|
|
||||||
&StringSchema::new("user class").schema(),
|
|
||||||
)
|
|
||||||
.min_length(1)
|
|
||||||
.schema();
|
|
||||||
|
|
||||||
const USER_CLASSES_TEXT: &str = "Comma-separated list of allowed objectClass values for \
|
|
||||||
user synchronization. For instance, if ``user-classes`` is set to ``person,user``, \
|
|
||||||
then user synchronization will consider all LDAP entities \
|
|
||||||
where ``objectClass: person`` `or` ``objectClass: user``.";
|
|
||||||
|
|
||||||
pub const USER_CLASSES_SCHEMA: Schema = StringSchema::new(USER_CLASSES_TEXT)
|
|
||||||
.format(&ApiStringFormat::PropertyString(&USER_CLASSES_ARRAY))
|
|
||||||
.default("inetorgperson,posixaccount,person,user")
|
|
||||||
.schema();
|
|
@ -1,417 +0,0 @@
|
|||||||
//! Basic API types used by most of the PBS code.
|
|
||||||
|
|
||||||
use const_format::concatcp;
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
|
|
||||||
pub mod percent_encoding;
|
|
||||||
|
|
||||||
use proxmox_schema::{
|
|
||||||
api, const_regex, ApiStringFormat, ApiType, ArraySchema, ReturnType, Schema, StringSchema,
|
|
||||||
};
|
|
||||||
use proxmox_time::parse_daily_duration;
|
|
||||||
|
|
||||||
use proxmox_auth_api::types::{APITOKEN_ID_REGEX_STR, USER_ID_REGEX_STR};
|
|
||||||
|
|
||||||
pub use proxmox_schema::api_types::SAFE_ID_FORMAT as PROXMOX_SAFE_ID_FORMAT;
|
|
||||||
pub use proxmox_schema::api_types::SAFE_ID_REGEX as PROXMOX_SAFE_ID_REGEX;
|
|
||||||
pub use proxmox_schema::api_types::SAFE_ID_REGEX_STR as PROXMOX_SAFE_ID_REGEX_STR;
|
|
||||||
pub use proxmox_schema::api_types::{
|
|
||||||
BLOCKDEVICE_DISK_AND_PARTITION_NAME_REGEX, BLOCKDEVICE_NAME_REGEX,
|
|
||||||
};
|
|
||||||
pub use proxmox_schema::api_types::{DNS_ALIAS_REGEX, DNS_NAME_OR_IP_REGEX, DNS_NAME_REGEX};
|
|
||||||
pub use proxmox_schema::api_types::{FINGERPRINT_SHA256_REGEX, SHA256_HEX_REGEX};
|
|
||||||
pub use proxmox_schema::api_types::{
|
|
||||||
GENERIC_URI_REGEX, HOSTNAME_REGEX, HOST_PORT_REGEX, HTTP_URL_REGEX,
|
|
||||||
};
|
|
||||||
pub use proxmox_schema::api_types::{MULTI_LINE_COMMENT_REGEX, SINGLE_LINE_COMMENT_REGEX};
|
|
||||||
pub use proxmox_schema::api_types::{PASSWORD_REGEX, SYSTEMD_DATETIME_REGEX, UUID_REGEX};
|
|
||||||
|
|
||||||
pub use proxmox_schema::api_types::{CIDR_FORMAT, CIDR_REGEX};
|
|
||||||
pub use proxmox_schema::api_types::{CIDR_V4_FORMAT, CIDR_V4_REGEX};
|
|
||||||
pub use proxmox_schema::api_types::{CIDR_V6_FORMAT, CIDR_V6_REGEX};
|
|
||||||
pub use proxmox_schema::api_types::{IPRE_STR, IP_FORMAT, IP_REGEX};
|
|
||||||
pub use proxmox_schema::api_types::{IPV4RE_STR, IP_V4_FORMAT, IP_V4_REGEX};
|
|
||||||
pub use proxmox_schema::api_types::{IPV6RE_STR, IP_V6_FORMAT, IP_V6_REGEX};
|
|
||||||
|
|
||||||
pub use proxmox_schema::api_types::COMMENT_SCHEMA as SINGLE_LINE_COMMENT_SCHEMA;
|
|
||||||
pub use proxmox_schema::api_types::HOSTNAME_SCHEMA;
|
|
||||||
pub use proxmox_schema::api_types::HOST_PORT_SCHEMA;
|
|
||||||
pub use proxmox_schema::api_types::HTTP_URL_SCHEMA;
|
|
||||||
pub use proxmox_schema::api_types::MULTI_LINE_COMMENT_SCHEMA;
|
|
||||||
pub use proxmox_schema::api_types::NODE_SCHEMA;
|
|
||||||
pub use proxmox_schema::api_types::SINGLE_LINE_COMMENT_FORMAT;
|
|
||||||
pub use proxmox_schema::api_types::{
|
|
||||||
BLOCKDEVICE_DISK_AND_PARTITION_NAME_SCHEMA, BLOCKDEVICE_NAME_SCHEMA,
|
|
||||||
};
|
|
||||||
pub use proxmox_schema::api_types::{CERT_FINGERPRINT_SHA256_SCHEMA, FINGERPRINT_SHA256_FORMAT};
|
|
||||||
pub use proxmox_schema::api_types::{DISK_ARRAY_SCHEMA, DISK_LIST_SCHEMA};
|
|
||||||
pub use proxmox_schema::api_types::{DNS_ALIAS_FORMAT, DNS_NAME_FORMAT, DNS_NAME_OR_IP_SCHEMA};
|
|
||||||
pub use proxmox_schema::api_types::{PASSWORD_FORMAT, PASSWORD_SCHEMA};
|
|
||||||
pub use proxmox_schema::api_types::{SERVICE_ID_SCHEMA, UUID_FORMAT};
|
|
||||||
pub use proxmox_schema::api_types::{SYSTEMD_DATETIME_FORMAT, TIME_ZONE_SCHEMA};
|
|
||||||
|
|
||||||
use proxmox_schema::api_types::{DNS_NAME_STR, IPRE_BRACKET_STR};
|
|
||||||
|
|
||||||
#[rustfmt::skip]
|
|
||||||
pub const BACKUP_ID_RE: &str = r"[A-Za-z0-9_][A-Za-z0-9._\-]*";
|
|
||||||
|
|
||||||
#[rustfmt::skip]
|
|
||||||
pub const BACKUP_TYPE_RE: &str = r"(?:host|vm|ct)";
|
|
||||||
|
|
||||||
#[rustfmt::skip]
|
|
||||||
pub const BACKUP_TIME_RE: &str = r"[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}Z";
|
|
||||||
|
|
||||||
#[rustfmt::skip]
|
|
||||||
pub const BACKUP_NS_RE: &str =
|
|
||||||
concatcp!("(?:",
|
|
||||||
"(?:", PROXMOX_SAFE_ID_REGEX_STR, r"/){0,7}", PROXMOX_SAFE_ID_REGEX_STR,
|
|
||||||
")?");
|
|
||||||
|
|
||||||
#[rustfmt::skip]
|
|
||||||
pub const BACKUP_NS_PATH_RE: &str =
|
|
||||||
concatcp!(r"(?:ns/", PROXMOX_SAFE_ID_REGEX_STR, r"/){0,7}ns/", PROXMOX_SAFE_ID_REGEX_STR, r"/");
|
|
||||||
|
|
||||||
#[rustfmt::skip]
|
|
||||||
pub const SNAPSHOT_PATH_REGEX_STR: &str =
|
|
||||||
concatcp!(
|
|
||||||
r"(", BACKUP_TYPE_RE, ")/(", BACKUP_ID_RE, ")/(", BACKUP_TIME_RE, r")",
|
|
||||||
);
|
|
||||||
|
|
||||||
#[rustfmt::skip]
|
|
||||||
pub const GROUP_OR_SNAPSHOT_PATH_REGEX_STR: &str =
|
|
||||||
concatcp!(
|
|
||||||
r"(", BACKUP_TYPE_RE, ")/(", BACKUP_ID_RE, ")(?:/(", BACKUP_TIME_RE, r"))?",
|
|
||||||
);
|
|
||||||
|
|
||||||
mod acl;
|
|
||||||
pub use acl::*;
|
|
||||||
|
|
||||||
mod datastore;
|
|
||||||
pub use datastore::*;
|
|
||||||
|
|
||||||
mod jobs;
|
|
||||||
pub use jobs::*;
|
|
||||||
|
|
||||||
mod key_derivation;
|
|
||||||
pub use key_derivation::{Kdf, KeyInfo};
|
|
||||||
|
|
||||||
mod maintenance;
|
|
||||||
pub use maintenance::*;
|
|
||||||
|
|
||||||
mod network;
|
|
||||||
pub use network::*;
|
|
||||||
|
|
||||||
mod node;
|
|
||||||
pub use node::*;
|
|
||||||
|
|
||||||
pub use proxmox_auth_api::types as userid;
|
|
||||||
pub use proxmox_auth_api::types::{Authid, Userid};
|
|
||||||
pub use proxmox_auth_api::types::{Realm, RealmRef};
|
|
||||||
pub use proxmox_auth_api::types::{Tokenname, TokennameRef};
|
|
||||||
pub use proxmox_auth_api::types::{Username, UsernameRef};
|
|
||||||
pub use proxmox_auth_api::types::{
|
|
||||||
PROXMOX_GROUP_ID_SCHEMA, PROXMOX_TOKEN_ID_SCHEMA, PROXMOX_TOKEN_NAME_SCHEMA,
|
|
||||||
};
|
|
||||||
|
|
||||||
#[macro_use]
|
|
||||||
mod user;
|
|
||||||
pub use user::*;
|
|
||||||
|
|
||||||
pub use proxmox_schema::upid::*;
|
|
||||||
|
|
||||||
mod crypto;
|
|
||||||
pub use crypto::{bytes_as_fingerprint, CryptMode, Fingerprint};
|
|
||||||
|
|
||||||
pub mod file_restore;
|
|
||||||
|
|
||||||
mod openid;
|
|
||||||
pub use openid::*;
|
|
||||||
|
|
||||||
mod ldap;
|
|
||||||
pub use ldap::*;
|
|
||||||
|
|
||||||
mod ad;
|
|
||||||
pub use ad::*;
|
|
||||||
|
|
||||||
mod remote;
|
|
||||||
pub use remote::*;
|
|
||||||
|
|
||||||
mod tape;
|
|
||||||
pub use tape::*;
|
|
||||||
|
|
||||||
mod traffic_control;
|
|
||||||
pub use traffic_control::*;
|
|
||||||
|
|
||||||
mod zfs;
|
|
||||||
pub use zfs::*;
|
|
||||||
|
|
||||||
mod metrics;
|
|
||||||
pub use metrics::*;
|
|
||||||
|
|
||||||
const_regex! {
|
|
||||||
// just a rough check - dummy acceptor is used before persisting
|
|
||||||
pub OPENSSL_CIPHERS_REGEX = r"^[0-9A-Za-z_:, +!\-@=.]+$";
|
|
||||||
|
|
||||||
pub BACKUP_REPO_URL_REGEX = concatcp!(
|
|
||||||
r"^^(?:(?:(",
|
|
||||||
USER_ID_REGEX_STR, "|", APITOKEN_ID_REGEX_STR,
|
|
||||||
")@)?(",
|
|
||||||
DNS_NAME_STR, "|", IPRE_BRACKET_STR,
|
|
||||||
"):)?(?:([0-9]{1,5}):)?(", PROXMOX_SAFE_ID_REGEX_STR, r")$"
|
|
||||||
);
|
|
||||||
|
|
||||||
pub SUBSCRIPTION_KEY_REGEX = concat!(r"^pbs(?:[cbsp])-[0-9a-f]{10}$");
|
|
||||||
}
|
|
||||||
|
|
||||||
pub const PVE_CONFIG_DIGEST_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&SHA256_HEX_REGEX);
|
|
||||||
|
|
||||||
pub const SUBSCRIPTION_KEY_FORMAT: ApiStringFormat =
|
|
||||||
ApiStringFormat::Pattern(&SUBSCRIPTION_KEY_REGEX);
|
|
||||||
|
|
||||||
pub const OPENSSL_CIPHERS_TLS_FORMAT: ApiStringFormat =
|
|
||||||
ApiStringFormat::Pattern(&OPENSSL_CIPHERS_REGEX);
|
|
||||||
|
|
||||||
pub const DAILY_DURATION_FORMAT: ApiStringFormat =
|
|
||||||
ApiStringFormat::VerifyFn(|s| parse_daily_duration(s).map(drop));
|
|
||||||
|
|
||||||
pub const SEARCH_DOMAIN_SCHEMA: Schema =
|
|
||||||
StringSchema::new("Search domain for host-name lookup.").schema();
|
|
||||||
|
|
||||||
pub const FIRST_DNS_SERVER_SCHEMA: Schema = StringSchema::new("First name server IP address.")
|
|
||||||
.format(&IP_FORMAT)
|
|
||||||
.schema();
|
|
||||||
|
|
||||||
pub const SECOND_DNS_SERVER_SCHEMA: Schema = StringSchema::new("Second name server IP address.")
|
|
||||||
.format(&IP_FORMAT)
|
|
||||||
.schema();
|
|
||||||
|
|
||||||
pub const THIRD_DNS_SERVER_SCHEMA: Schema = StringSchema::new("Third name server IP address.")
|
|
||||||
.format(&IP_FORMAT)
|
|
||||||
.schema();
|
|
||||||
|
|
||||||
pub const OPENSSL_CIPHERS_TLS_1_2_SCHEMA: Schema =
|
|
||||||
StringSchema::new("OpenSSL cipher list used by the proxy for TLS <= 1.2")
|
|
||||||
.format(&OPENSSL_CIPHERS_TLS_FORMAT)
|
|
||||||
.schema();
|
|
||||||
|
|
||||||
pub const OPENSSL_CIPHERS_TLS_1_3_SCHEMA: Schema =
|
|
||||||
StringSchema::new("OpenSSL ciphersuites list used by the proxy for TLS 1.3")
|
|
||||||
.format(&OPENSSL_CIPHERS_TLS_FORMAT)
|
|
||||||
.schema();
|
|
||||||
|
|
||||||
pub const PBS_PASSWORD_SCHEMA: Schema = StringSchema::new("User Password.")
|
|
||||||
.format(&PASSWORD_FORMAT)
|
|
||||||
.min_length(5)
|
|
||||||
.max_length(64)
|
|
||||||
.schema();
|
|
||||||
|
|
||||||
pub const REALM_ID_SCHEMA: Schema = StringSchema::new("Realm name.")
|
|
||||||
.format(&PROXMOX_SAFE_ID_FORMAT)
|
|
||||||
.min_length(2)
|
|
||||||
.max_length(32)
|
|
||||||
.schema();
|
|
||||||
|
|
||||||
pub const SUBSCRIPTION_KEY_SCHEMA: Schema =
|
|
||||||
StringSchema::new("Proxmox Backup Server subscription key.")
|
|
||||||
.format(&SUBSCRIPTION_KEY_FORMAT)
|
|
||||||
.min_length(15)
|
|
||||||
.max_length(16)
|
|
||||||
.schema();
|
|
||||||
|
|
||||||
pub const PROXMOX_CONFIG_DIGEST_SCHEMA: Schema = StringSchema::new(
|
|
||||||
"Prevent changes if current configuration file has different \
|
|
||||||
SHA256 digest. This can be used to prevent concurrent \
|
|
||||||
modifications.",
|
|
||||||
)
|
|
||||||
.format(&PVE_CONFIG_DIGEST_FORMAT)
|
|
||||||
.schema();
|
|
||||||
|
|
||||||
/// API schema format definition for repository URLs
|
|
||||||
pub const BACKUP_REPO_URL: ApiStringFormat = ApiStringFormat::Pattern(&BACKUP_REPO_URL_REGEX);
|
|
||||||
|
|
||||||
// Complex type definitions
|
|
||||||
|
|
||||||
#[api()]
|
|
||||||
#[derive(Default, Serialize, Deserialize)]
|
|
||||||
/// Storage space usage information.
|
|
||||||
pub struct StorageStatus {
|
|
||||||
/// Total space (bytes).
|
|
||||||
pub total: u64,
|
|
||||||
/// Used space (bytes).
|
|
||||||
pub used: u64,
|
|
||||||
/// Available space (bytes).
|
|
||||||
pub avail: u64,
|
|
||||||
}
|
|
||||||
|
|
||||||
pub const PASSWORD_HINT_SCHEMA: Schema = StringSchema::new("Password hint.")
|
|
||||||
.format(&SINGLE_LINE_COMMENT_FORMAT)
|
|
||||||
.min_length(1)
|
|
||||||
.max_length(64)
|
|
||||||
.schema();
|
|
||||||
|
|
||||||
#[api()]
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
|
||||||
#[serde(rename_all = "PascalCase")]
|
|
||||||
/// Describes a package for which an update is available.
|
|
||||||
pub struct APTUpdateInfo {
|
|
||||||
/// Package name
|
|
||||||
pub package: String,
|
|
||||||
/// Package title
|
|
||||||
pub title: String,
|
|
||||||
/// Package architecture
|
|
||||||
pub arch: String,
|
|
||||||
/// Human readable package description
|
|
||||||
pub description: String,
|
|
||||||
/// New version to be updated to
|
|
||||||
pub version: String,
|
|
||||||
/// Old version currently installed
|
|
||||||
pub old_version: String,
|
|
||||||
/// Package origin
|
|
||||||
pub origin: String,
|
|
||||||
/// Package priority in human-readable form
|
|
||||||
pub priority: String,
|
|
||||||
/// Package section
|
|
||||||
pub section: String,
|
|
||||||
/// Custom extra field for additional package information
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub extra_info: Option<String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[api()]
|
|
||||||
#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
|
||||||
#[serde(rename_all = "lowercase")]
|
|
||||||
/// Node Power command type.
|
|
||||||
pub enum NodePowerCommand {
|
|
||||||
/// Restart the server
|
|
||||||
Reboot,
|
|
||||||
/// Shutdown the server
|
|
||||||
Shutdown,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[api()]
|
|
||||||
#[derive(Eq, PartialEq, Debug, Serialize, Deserialize)]
|
|
||||||
#[serde(rename_all = "lowercase")]
|
|
||||||
pub enum TaskStateType {
|
|
||||||
/// Ok
|
|
||||||
OK,
|
|
||||||
/// Warning
|
|
||||||
Warning,
|
|
||||||
/// Error
|
|
||||||
Error,
|
|
||||||
/// Unknown
|
|
||||||
Unknown,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[api(
|
|
||||||
properties: {
|
|
||||||
upid: { schema: UPID::API_SCHEMA },
|
|
||||||
},
|
|
||||||
)]
|
|
||||||
#[derive(Serialize, Deserialize, Clone, PartialEq)]
|
|
||||||
/// Task properties.
|
|
||||||
pub struct TaskListItem {
|
|
||||||
pub upid: String,
|
|
||||||
/// The node name where the task is running on.
|
|
||||||
pub node: String,
|
|
||||||
/// The Unix PID
|
|
||||||
pub pid: i64,
|
|
||||||
/// The task start time (Epoch)
|
|
||||||
pub pstart: u64,
|
|
||||||
/// The task start time (Epoch)
|
|
||||||
pub starttime: i64,
|
|
||||||
/// Worker type (arbitrary ASCII string)
|
|
||||||
pub worker_type: String,
|
|
||||||
/// Worker ID (arbitrary ASCII string)
|
|
||||||
pub worker_id: Option<String>,
|
|
||||||
/// The authenticated entity who started the task
|
|
||||||
pub user: String,
|
|
||||||
/// The task end time (Epoch)
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub endtime: Option<i64>,
|
|
||||||
/// Task end status
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub status: Option<String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
pub const NODE_TASKS_LIST_TASKS_RETURN_TYPE: ReturnType = ReturnType {
|
|
||||||
optional: false,
|
|
||||||
schema: &ArraySchema::new("A list of tasks.", &TaskListItem::API_SCHEMA).schema(),
|
|
||||||
};
|
|
||||||
|
|
||||||
#[api()]
|
|
||||||
#[derive(Copy, Clone, Serialize, Deserialize)]
|
|
||||||
#[serde(rename_all = "UPPERCASE")]
|
|
||||||
/// RRD consolidation mode
|
|
||||||
pub enum RRDMode {
|
|
||||||
/// Maximum
|
|
||||||
Max,
|
|
||||||
/// Average
|
|
||||||
Average,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[api()]
|
|
||||||
#[derive(Copy, Clone, Serialize, Deserialize)]
|
|
||||||
#[serde(rename_all = "lowercase")]
|
|
||||||
/// RRD time frame
|
|
||||||
pub enum RRDTimeFrame {
|
|
||||||
/// Hour
|
|
||||||
Hour,
|
|
||||||
/// Day
|
|
||||||
Day,
|
|
||||||
/// Week
|
|
||||||
Week,
|
|
||||||
/// Month
|
|
||||||
Month,
|
|
||||||
/// Year
|
|
||||||
Year,
|
|
||||||
/// Decade (10 years)
|
|
||||||
Decade,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[api]
|
|
||||||
#[derive(Deserialize, Serialize, Copy, Clone, PartialEq, Eq)]
|
|
||||||
#[serde(rename_all = "lowercase")]
|
|
||||||
/// type of the realm
|
|
||||||
pub enum RealmType {
|
|
||||||
/// The PAM realm
|
|
||||||
Pam,
|
|
||||||
/// The PBS realm
|
|
||||||
Pbs,
|
|
||||||
/// An OpenID Connect realm
|
|
||||||
OpenId,
|
|
||||||
/// An LDAP realm
|
|
||||||
Ldap,
|
|
||||||
/// An Active Directory (AD) realm
|
|
||||||
Ad,
|
|
||||||
}
|
|
||||||
|
|
||||||
serde_plain::derive_display_from_serialize!(RealmType);
|
|
||||||
serde_plain::derive_fromstr_from_deserialize!(RealmType);
|
|
||||||
|
|
||||||
#[api(
|
|
||||||
properties: {
|
|
||||||
realm: {
|
|
||||||
schema: REALM_ID_SCHEMA,
|
|
||||||
},
|
|
||||||
"type": {
|
|
||||||
type: RealmType,
|
|
||||||
},
|
|
||||||
comment: {
|
|
||||||
optional: true,
|
|
||||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
)]
|
|
||||||
#[derive(Deserialize, Serialize, Clone, PartialEq)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
/// Basic Information about a realm
|
|
||||||
pub struct BasicRealmInfo {
|
|
||||||
pub realm: String,
|
|
||||||
#[serde(rename = "type")]
|
|
||||||
pub ty: RealmType,
|
|
||||||
/// True if it is the default realm
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub default: Option<bool>,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub comment: Option<String>,
|
|
||||||
}
|
|
@ -1,106 +0,0 @@
|
|||||||
use anyhow::{bail, Error};
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
use std::borrow::Cow;
|
|
||||||
|
|
||||||
use proxmox_schema::{api, const_regex, ApiStringFormat, Schema, StringSchema};
|
|
||||||
|
|
||||||
const_regex! {
|
|
||||||
pub MAINTENANCE_MESSAGE_REGEX = r"^[[:^cntrl:]]*$";
|
|
||||||
}
|
|
||||||
|
|
||||||
pub const MAINTENANCE_MESSAGE_FORMAT: ApiStringFormat =
|
|
||||||
ApiStringFormat::Pattern(&MAINTENANCE_MESSAGE_REGEX);
|
|
||||||
|
|
||||||
pub const MAINTENANCE_MESSAGE_SCHEMA: Schema =
|
|
||||||
StringSchema::new("Message describing the reason for the maintenance.")
|
|
||||||
.format(&MAINTENANCE_MESSAGE_FORMAT)
|
|
||||||
.max_length(64)
|
|
||||||
.schema();
|
|
||||||
|
|
||||||
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
|
|
||||||
/// Operation requirements, used when checking for maintenance mode.
|
|
||||||
pub enum Operation {
|
|
||||||
/// for any read operation like backup restore or RRD metric collection
|
|
||||||
Read,
|
|
||||||
/// for any write/delete operation, like backup create or GC
|
|
||||||
Write,
|
|
||||||
/// for any purely logical operation on the in-memory state of the datastore, e.g., to check if
|
|
||||||
/// some mutex could be locked (e.g., GC already running?)
|
|
||||||
///
|
|
||||||
/// NOTE: one must *not* do any IO operations when only helding this Op state
|
|
||||||
Lookup,
|
|
||||||
// GarbageCollect or Delete?
|
|
||||||
}
|
|
||||||
|
|
||||||
#[api]
|
|
||||||
#[derive(Copy, Clone, Deserialize, Serialize, PartialEq, Eq)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
/// Maintenance type.
|
|
||||||
pub enum MaintenanceType {
|
|
||||||
// TODO:
|
|
||||||
// - Add "unmounting" once we got pluggable datastores
|
|
||||||
// - Add "GarbageCollection" or "DeleteOnly" as type and track GC (or all deletes) as separate
|
|
||||||
// operation, so that one can enable a mode where nothing new can be added but stuff can be
|
|
||||||
// cleaned
|
|
||||||
/// Only read operations are allowed on the datastore.
|
|
||||||
ReadOnly,
|
|
||||||
/// Neither read nor write operations are allowed on the datastore.
|
|
||||||
Offline,
|
|
||||||
/// The datastore is being deleted.
|
|
||||||
Delete,
|
|
||||||
}
|
|
||||||
serde_plain::derive_display_from_serialize!(MaintenanceType);
|
|
||||||
serde_plain::derive_fromstr_from_deserialize!(MaintenanceType);
|
|
||||||
|
|
||||||
#[api(
|
|
||||||
properties: {
|
|
||||||
type: {
|
|
||||||
type: MaintenanceType,
|
|
||||||
},
|
|
||||||
message: {
|
|
||||||
optional: true,
|
|
||||||
schema: MAINTENANCE_MESSAGE_SCHEMA,
|
|
||||||
}
|
|
||||||
},
|
|
||||||
default_key: "type",
|
|
||||||
)]
|
|
||||||
#[derive(Deserialize, Serialize)]
|
|
||||||
/// Maintenance mode
|
|
||||||
pub struct MaintenanceMode {
|
|
||||||
/// Type of maintenance ("read-only" or "offline").
|
|
||||||
#[serde(rename = "type")]
|
|
||||||
pub ty: MaintenanceType,
|
|
||||||
|
|
||||||
/// Reason for maintenance.
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub message: Option<String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl MaintenanceMode {
|
|
||||||
/// Used for deciding whether the datastore is cleared from the internal cache after the last
|
|
||||||
/// task finishes, so all open files are closed.
|
|
||||||
pub fn is_offline(&self) -> bool {
|
|
||||||
self.ty == MaintenanceType::Offline
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn check(&self, operation: Option<Operation>) -> Result<(), Error> {
|
|
||||||
if self.ty == MaintenanceType::Delete {
|
|
||||||
bail!("datastore is being deleted");
|
|
||||||
}
|
|
||||||
|
|
||||||
let message = percent_encoding::percent_decode_str(self.message.as_deref().unwrap_or(""))
|
|
||||||
.decode_utf8()
|
|
||||||
.unwrap_or(Cow::Borrowed(""));
|
|
||||||
|
|
||||||
if let Some(Operation::Lookup) = operation {
|
|
||||||
return Ok(());
|
|
||||||
} else if self.ty == MaintenanceType::Offline {
|
|
||||||
bail!("offline maintenance mode: {}", message);
|
|
||||||
} else if self.ty == MaintenanceType::ReadOnly {
|
|
||||||
if let Some(Operation::Write) = operation {
|
|
||||||
bail!("read-only maintenance mode: {}", message);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,189 +0,0 @@
|
|||||||
use serde::{Deserialize, Serialize};
|
|
||||||
|
|
||||||
use crate::{
|
|
||||||
HOST_PORT_SCHEMA, HTTP_URL_SCHEMA, PROXMOX_SAFE_ID_FORMAT, SINGLE_LINE_COMMENT_SCHEMA,
|
|
||||||
};
|
|
||||||
use proxmox_schema::{api, Schema, StringSchema, Updater};
|
|
||||||
|
|
||||||
pub const METRIC_SERVER_ID_SCHEMA: Schema = StringSchema::new("Metrics Server ID.")
|
|
||||||
.format(&PROXMOX_SAFE_ID_FORMAT)
|
|
||||||
.min_length(3)
|
|
||||||
.max_length(32)
|
|
||||||
.schema();
|
|
||||||
|
|
||||||
pub const INFLUXDB_BUCKET_SCHEMA: Schema = StringSchema::new("InfluxDB Bucket.")
|
|
||||||
.min_length(3)
|
|
||||||
.max_length(32)
|
|
||||||
.default("proxmox")
|
|
||||||
.schema();
|
|
||||||
|
|
||||||
pub const INFLUXDB_ORGANIZATION_SCHEMA: Schema = StringSchema::new("InfluxDB Organization.")
|
|
||||||
.min_length(3)
|
|
||||||
.max_length(32)
|
|
||||||
.default("proxmox")
|
|
||||||
.schema();
|
|
||||||
|
|
||||||
fn return_true() -> bool {
|
|
||||||
true
|
|
||||||
}
|
|
||||||
|
|
||||||
fn is_true(b: &bool) -> bool {
|
|
||||||
*b
|
|
||||||
}
|
|
||||||
|
|
||||||
#[api(
|
|
||||||
properties: {
|
|
||||||
name: {
|
|
||||||
schema: METRIC_SERVER_ID_SCHEMA,
|
|
||||||
},
|
|
||||||
enable: {
|
|
||||||
type: bool,
|
|
||||||
optional: true,
|
|
||||||
default: true,
|
|
||||||
},
|
|
||||||
host: {
|
|
||||||
schema: HOST_PORT_SCHEMA,
|
|
||||||
},
|
|
||||||
mtu: {
|
|
||||||
type: u16,
|
|
||||||
optional: true,
|
|
||||||
default: 1500,
|
|
||||||
},
|
|
||||||
comment: {
|
|
||||||
optional: true,
|
|
||||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
)]
|
|
||||||
#[derive(Serialize, Deserialize, Updater)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
/// InfluxDB Server (UDP)
|
|
||||||
pub struct InfluxDbUdp {
|
|
||||||
#[updater(skip)]
|
|
||||||
pub name: String,
|
|
||||||
#[serde(default = "return_true", skip_serializing_if = "is_true")]
|
|
||||||
#[updater(serde(skip_serializing_if = "Option::is_none"))]
|
|
||||||
/// Enables or disables the metrics server
|
|
||||||
pub enable: bool,
|
|
||||||
/// the host + port
|
|
||||||
pub host: String,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
/// The MTU
|
|
||||||
pub mtu: Option<u16>,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub comment: Option<String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[api(
|
|
||||||
properties: {
|
|
||||||
name: {
|
|
||||||
schema: METRIC_SERVER_ID_SCHEMA,
|
|
||||||
},
|
|
||||||
enable: {
|
|
||||||
type: bool,
|
|
||||||
optional: true,
|
|
||||||
default: true,
|
|
||||||
},
|
|
||||||
url: {
|
|
||||||
schema: HTTP_URL_SCHEMA,
|
|
||||||
},
|
|
||||||
token: {
|
|
||||||
type: String,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
bucket: {
|
|
||||||
schema: INFLUXDB_BUCKET_SCHEMA,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
organization: {
|
|
||||||
schema: INFLUXDB_ORGANIZATION_SCHEMA,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
"max-body-size": {
|
|
||||||
type: usize,
|
|
||||||
optional: true,
|
|
||||||
default: 25_000_000,
|
|
||||||
},
|
|
||||||
"verify-tls": {
|
|
||||||
type: bool,
|
|
||||||
optional: true,
|
|
||||||
default: true,
|
|
||||||
},
|
|
||||||
comment: {
|
|
||||||
optional: true,
|
|
||||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
)]
|
|
||||||
#[derive(Serialize, Deserialize, Updater)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
/// InfluxDB Server (HTTP(s))
|
|
||||||
pub struct InfluxDbHttp {
|
|
||||||
#[updater(skip)]
|
|
||||||
pub name: String,
|
|
||||||
#[serde(default = "return_true", skip_serializing_if = "is_true")]
|
|
||||||
#[updater(serde(skip_serializing_if = "Option::is_none"))]
|
|
||||||
/// Enables or disables the metrics server
|
|
||||||
pub enable: bool,
|
|
||||||
/// The base url of the influxdb server
|
|
||||||
pub url: String,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
/// The (optional) API token
|
|
||||||
pub token: Option<String>,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
/// Named location where time series data is stored
|
|
||||||
pub bucket: Option<String>,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
/// Workspace for a group of users
|
|
||||||
pub organization: Option<String>,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
/// The (optional) maximum body size
|
|
||||||
pub max_body_size: Option<usize>,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
/// If true, the certificate will be validated.
|
|
||||||
pub verify_tls: Option<bool>,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub comment: Option<String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[api]
|
|
||||||
#[derive(Copy, Clone, Deserialize, Serialize, PartialEq, Eq, PartialOrd, Ord)]
|
|
||||||
/// Type of the metric server
|
|
||||||
pub enum MetricServerType {
|
|
||||||
/// InfluxDB HTTP
|
|
||||||
#[serde(rename = "influxdb-http")]
|
|
||||||
InfluxDbHttp,
|
|
||||||
/// InfluxDB UDP
|
|
||||||
#[serde(rename = "influxdb-udp")]
|
|
||||||
InfluxDbUdp,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[api(
|
|
||||||
properties: {
|
|
||||||
name: {
|
|
||||||
schema: METRIC_SERVER_ID_SCHEMA,
|
|
||||||
},
|
|
||||||
"type": {
|
|
||||||
type: MetricServerType,
|
|
||||||
},
|
|
||||||
comment: {
|
|
||||||
optional: true,
|
|
||||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
)]
|
|
||||||
#[derive(Clone, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
/// Basic information about a metric server that's available for all types
|
|
||||||
pub struct MetricServerInfo {
|
|
||||||
pub name: String,
|
|
||||||
#[serde(rename = "type")]
|
|
||||||
pub ty: MetricServerType,
|
|
||||||
/// Enables or disables the metrics server
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub enable: Option<bool>,
|
|
||||||
/// The target server
|
|
||||||
pub server: String,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub comment: Option<String>,
|
|
||||||
}
|
|
@ -1,345 +0,0 @@
|
|||||||
use std::fmt;
|
|
||||||
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
|
|
||||||
use proxmox_schema::*;
|
|
||||||
|
|
||||||
use crate::{
|
|
||||||
CIDR_FORMAT, CIDR_V4_FORMAT, CIDR_V6_FORMAT, IP_FORMAT, IP_V4_FORMAT, IP_V6_FORMAT,
|
|
||||||
PROXMOX_SAFE_ID_REGEX,
|
|
||||||
};
|
|
||||||
|
|
||||||
pub const NETWORK_INTERFACE_FORMAT: ApiStringFormat =
|
|
||||||
ApiStringFormat::Pattern(&PROXMOX_SAFE_ID_REGEX);
|
|
||||||
|
|
||||||
pub const IP_V4_SCHEMA: Schema = StringSchema::new("IPv4 address.")
|
|
||||||
.format(&IP_V4_FORMAT)
|
|
||||||
.max_length(15)
|
|
||||||
.schema();
|
|
||||||
|
|
||||||
pub const IP_V6_SCHEMA: Schema = StringSchema::new("IPv6 address.")
|
|
||||||
.format(&IP_V6_FORMAT)
|
|
||||||
.max_length(39)
|
|
||||||
.schema();
|
|
||||||
|
|
||||||
pub const IP_SCHEMA: Schema = StringSchema::new("IP (IPv4 or IPv6) address.")
|
|
||||||
.format(&IP_FORMAT)
|
|
||||||
.max_length(39)
|
|
||||||
.schema();
|
|
||||||
|
|
||||||
pub const CIDR_V4_SCHEMA: Schema = StringSchema::new("IPv4 address with netmask (CIDR notation).")
|
|
||||||
.format(&CIDR_V4_FORMAT)
|
|
||||||
.max_length(18)
|
|
||||||
.schema();
|
|
||||||
|
|
||||||
pub const CIDR_V6_SCHEMA: Schema = StringSchema::new("IPv6 address with netmask (CIDR notation).")
|
|
||||||
.format(&CIDR_V6_FORMAT)
|
|
||||||
.max_length(43)
|
|
||||||
.schema();
|
|
||||||
|
|
||||||
pub const CIDR_SCHEMA: Schema =
|
|
||||||
StringSchema::new("IP address (IPv4 or IPv6) with netmask (CIDR notation).")
|
|
||||||
.format(&CIDR_FORMAT)
|
|
||||||
.max_length(43)
|
|
||||||
.schema();
|
|
||||||
|
|
||||||
#[api()]
|
|
||||||
#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
|
||||||
#[serde(rename_all = "lowercase")]
|
|
||||||
/// Interface configuration method
|
|
||||||
pub enum NetworkConfigMethod {
|
|
||||||
/// Configuration is done manually using other tools
|
|
||||||
Manual,
|
|
||||||
/// Define interfaces with statically allocated addresses.
|
|
||||||
Static,
|
|
||||||
/// Obtain an address via DHCP
|
|
||||||
DHCP,
|
|
||||||
/// Define the loopback interface.
|
|
||||||
Loopback,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[api()]
|
|
||||||
#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
#[repr(u8)]
|
|
||||||
/// Linux Bond Mode
|
|
||||||
pub enum LinuxBondMode {
|
|
||||||
/// Round-robin policy
|
|
||||||
BalanceRr = 0,
|
|
||||||
/// Active-backup policy
|
|
||||||
ActiveBackup = 1,
|
|
||||||
/// XOR policy
|
|
||||||
BalanceXor = 2,
|
|
||||||
/// Broadcast policy
|
|
||||||
Broadcast = 3,
|
|
||||||
/// IEEE 802.3ad Dynamic link aggregation
|
|
||||||
#[serde(rename = "802.3ad")]
|
|
||||||
Ieee802_3ad = 4,
|
|
||||||
/// Adaptive transmit load balancing
|
|
||||||
BalanceTlb = 5,
|
|
||||||
/// Adaptive load balancing
|
|
||||||
BalanceAlb = 6,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl fmt::Display for LinuxBondMode {
|
|
||||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
|
||||||
f.write_str(match self {
|
|
||||||
LinuxBondMode::BalanceRr => "balance-rr",
|
|
||||||
LinuxBondMode::ActiveBackup => "active-backup",
|
|
||||||
LinuxBondMode::BalanceXor => "balance-xor",
|
|
||||||
LinuxBondMode::Broadcast => "broadcast",
|
|
||||||
LinuxBondMode::Ieee802_3ad => "802.3ad",
|
|
||||||
LinuxBondMode::BalanceTlb => "balance-tlb",
|
|
||||||
LinuxBondMode::BalanceAlb => "balance-alb",
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[api()]
|
|
||||||
#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
#[repr(u8)]
|
|
||||||
/// Bond Transmit Hash Policy for LACP (802.3ad)
|
|
||||||
pub enum BondXmitHashPolicy {
|
|
||||||
/// Layer 2
|
|
||||||
Layer2 = 0,
|
|
||||||
/// Layer 2+3
|
|
||||||
#[serde(rename = "layer2+3")]
|
|
||||||
Layer2_3 = 1,
|
|
||||||
/// Layer 3+4
|
|
||||||
#[serde(rename = "layer3+4")]
|
|
||||||
Layer3_4 = 2,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl fmt::Display for BondXmitHashPolicy {
|
|
||||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
|
||||||
f.write_str(match self {
|
|
||||||
BondXmitHashPolicy::Layer2 => "layer2",
|
|
||||||
BondXmitHashPolicy::Layer2_3 => "layer2+3",
|
|
||||||
BondXmitHashPolicy::Layer3_4 => "layer3+4",
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[api()]
|
|
||||||
#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
|
||||||
#[serde(rename_all = "lowercase")]
|
|
||||||
/// Network interface type
|
|
||||||
pub enum NetworkInterfaceType {
|
|
||||||
/// Loopback
|
|
||||||
Loopback,
|
|
||||||
/// Physical Ethernet device
|
|
||||||
Eth,
|
|
||||||
/// Linux Bridge
|
|
||||||
Bridge,
|
|
||||||
/// Linux Bond
|
|
||||||
Bond,
|
|
||||||
/// Linux VLAN (eth.10)
|
|
||||||
Vlan,
|
|
||||||
/// Interface Alias (eth:1)
|
|
||||||
Alias,
|
|
||||||
/// Unknown interface type
|
|
||||||
Unknown,
|
|
||||||
}
|
|
||||||
|
|
||||||
pub const NETWORK_INTERFACE_NAME_SCHEMA: Schema = StringSchema::new("Network interface name.")
|
|
||||||
.format(&NETWORK_INTERFACE_FORMAT)
|
|
||||||
.min_length(1)
|
|
||||||
.max_length(15) // libc::IFNAMSIZ-1
|
|
||||||
.schema();
|
|
||||||
|
|
||||||
pub const NETWORK_INTERFACE_ARRAY_SCHEMA: Schema =
|
|
||||||
ArraySchema::new("Network interface list.", &NETWORK_INTERFACE_NAME_SCHEMA).schema();
|
|
||||||
|
|
||||||
pub const NETWORK_INTERFACE_LIST_SCHEMA: Schema =
|
|
||||||
StringSchema::new("A list of network devices, comma separated.")
|
|
||||||
.format(&ApiStringFormat::PropertyString(
|
|
||||||
&NETWORK_INTERFACE_ARRAY_SCHEMA,
|
|
||||||
))
|
|
||||||
.schema();
|
|
||||||
|
|
||||||
#[api(
|
|
||||||
properties: {
|
|
||||||
name: {
|
|
||||||
schema: NETWORK_INTERFACE_NAME_SCHEMA,
|
|
||||||
},
|
|
||||||
"type": {
|
|
||||||
type: NetworkInterfaceType,
|
|
||||||
},
|
|
||||||
method: {
|
|
||||||
type: NetworkConfigMethod,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
method6: {
|
|
||||||
type: NetworkConfigMethod,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
cidr: {
|
|
||||||
schema: CIDR_V4_SCHEMA,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
cidr6: {
|
|
||||||
schema: CIDR_V6_SCHEMA,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
gateway: {
|
|
||||||
schema: IP_V4_SCHEMA,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
gateway6: {
|
|
||||||
schema: IP_V6_SCHEMA,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
options: {
|
|
||||||
description: "Option list (inet)",
|
|
||||||
type: Array,
|
|
||||||
items: {
|
|
||||||
description: "Optional attribute line.",
|
|
||||||
type: String,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
options6: {
|
|
||||||
description: "Option list (inet6)",
|
|
||||||
type: Array,
|
|
||||||
items: {
|
|
||||||
description: "Optional attribute line.",
|
|
||||||
type: String,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
comments: {
|
|
||||||
description: "Comments (inet, may span multiple lines)",
|
|
||||||
type: String,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
comments6: {
|
|
||||||
description: "Comments (inet6, may span multiple lines)",
|
|
||||||
type: String,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
bridge_ports: {
|
|
||||||
schema: NETWORK_INTERFACE_ARRAY_SCHEMA,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
slaves: {
|
|
||||||
schema: NETWORK_INTERFACE_ARRAY_SCHEMA,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
"vlan-id": {
|
|
||||||
description: "VLAN ID.",
|
|
||||||
type: u16,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
"vlan-raw-device": {
|
|
||||||
schema: NETWORK_INTERFACE_NAME_SCHEMA,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
bond_mode: {
|
|
||||||
type: LinuxBondMode,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
"bond-primary": {
|
|
||||||
schema: NETWORK_INTERFACE_NAME_SCHEMA,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
bond_xmit_hash_policy: {
|
|
||||||
type: BondXmitHashPolicy,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
)]
|
|
||||||
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)]
|
|
||||||
/// Network Interface configuration
|
|
||||||
pub struct Interface {
|
|
||||||
/// Autostart interface
|
|
||||||
#[serde(rename = "autostart")]
|
|
||||||
pub autostart: bool,
|
|
||||||
/// Interface is active (UP)
|
|
||||||
pub active: bool,
|
|
||||||
/// Interface name
|
|
||||||
pub name: String,
|
|
||||||
/// Interface type
|
|
||||||
#[serde(rename = "type")]
|
|
||||||
pub interface_type: NetworkInterfaceType,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub method: Option<NetworkConfigMethod>,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub method6: Option<NetworkConfigMethod>,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
/// IPv4 address with netmask
|
|
||||||
pub cidr: Option<String>,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
/// IPv4 gateway
|
|
||||||
pub gateway: Option<String>,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
/// IPv6 address with netmask
|
|
||||||
pub cidr6: Option<String>,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
/// IPv6 gateway
|
|
||||||
pub gateway6: Option<String>,
|
|
||||||
|
|
||||||
#[serde(default, skip_serializing_if = "Vec::is_empty")]
|
|
||||||
pub options: Vec<String>,
|
|
||||||
#[serde(default, skip_serializing_if = "Vec::is_empty")]
|
|
||||||
pub options6: Vec<String>,
|
|
||||||
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub comments: Option<String>,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub comments6: Option<String>,
|
|
||||||
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
/// Maximum Transmission Unit
|
|
||||||
pub mtu: Option<u64>,
|
|
||||||
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub bridge_ports: Option<Vec<String>>,
|
|
||||||
/// Enable bridge vlan support.
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub bridge_vlan_aware: Option<bool>,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
#[serde(rename = "vlan-id")]
|
|
||||||
pub vlan_id: Option<u16>,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
#[serde(rename = "vlan-raw-device")]
|
|
||||||
pub vlan_raw_device: Option<String>,
|
|
||||||
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub slaves: Option<Vec<String>>,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub bond_mode: Option<LinuxBondMode>,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
#[serde(rename = "bond-primary")]
|
|
||||||
pub bond_primary: Option<String>,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub bond_xmit_hash_policy: Option<BondXmitHashPolicy>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Interface {
|
|
||||||
pub fn new(name: String) -> Self {
|
|
||||||
Self {
|
|
||||||
name,
|
|
||||||
interface_type: NetworkInterfaceType::Unknown,
|
|
||||||
autostart: false,
|
|
||||||
active: false,
|
|
||||||
method: None,
|
|
||||||
method6: None,
|
|
||||||
cidr: None,
|
|
||||||
gateway: None,
|
|
||||||
cidr6: None,
|
|
||||||
gateway6: None,
|
|
||||||
options: Vec::new(),
|
|
||||||
options6: Vec::new(),
|
|
||||||
comments: None,
|
|
||||||
comments6: None,
|
|
||||||
mtu: None,
|
|
||||||
bridge_ports: None,
|
|
||||||
bridge_vlan_aware: None,
|
|
||||||
vlan_id: None,
|
|
||||||
vlan_raw_device: None,
|
|
||||||
slaves: None,
|
|
||||||
bond_mode: None,
|
|
||||||
bond_primary: None,
|
|
||||||
bond_xmit_hash_policy: None,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,162 +0,0 @@
|
|||||||
use std::ffi::OsStr;
|
|
||||||
|
|
||||||
use proxmox_schema::*;
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
|
|
||||||
use crate::StorageStatus;
|
|
||||||
|
|
||||||
#[api]
|
|
||||||
#[derive(Serialize, Deserialize, Default)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
/// Node memory usage counters
|
|
||||||
pub struct NodeMemoryCounters {
|
|
||||||
/// Total memory
|
|
||||||
pub total: u64,
|
|
||||||
/// Used memory
|
|
||||||
pub used: u64,
|
|
||||||
/// Free memory
|
|
||||||
pub free: u64,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[api]
|
|
||||||
#[derive(Serialize, Deserialize, Default)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
/// Node swap usage counters
|
|
||||||
pub struct NodeSwapCounters {
|
|
||||||
/// Total swap
|
|
||||||
pub total: u64,
|
|
||||||
/// Used swap
|
|
||||||
pub used: u64,
|
|
||||||
/// Free swap
|
|
||||||
pub free: u64,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[api]
|
|
||||||
#[derive(Serialize, Deserialize, Default)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
/// Contains general node information such as the fingerprint`
|
|
||||||
pub struct NodeInformation {
|
|
||||||
/// The SSL Fingerprint
|
|
||||||
pub fingerprint: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[api]
|
|
||||||
#[derive(Serialize, Deserialize, Default)]
|
|
||||||
#[serde(rename_all = "lowercase")]
|
|
||||||
/// The current kernel version (output of `uname`)
|
|
||||||
pub struct KernelVersionInformation {
|
|
||||||
/// The systemname/nodename
|
|
||||||
pub sysname: String,
|
|
||||||
/// The kernel release number
|
|
||||||
pub release: String,
|
|
||||||
/// The kernel version
|
|
||||||
pub version: String,
|
|
||||||
/// The machine architecture
|
|
||||||
pub machine: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl KernelVersionInformation {
|
|
||||||
pub fn from_uname_parts(
|
|
||||||
sysname: &OsStr,
|
|
||||||
release: &OsStr,
|
|
||||||
version: &OsStr,
|
|
||||||
machine: &OsStr,
|
|
||||||
) -> Self {
|
|
||||||
KernelVersionInformation {
|
|
||||||
sysname: sysname.to_str().map(String::from).unwrap_or_default(),
|
|
||||||
release: release.to_str().map(String::from).unwrap_or_default(),
|
|
||||||
version: version.to_str().map(String::from).unwrap_or_default(),
|
|
||||||
machine: machine.to_str().map(String::from).unwrap_or_default(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn get_legacy(&self) -> String {
|
|
||||||
format!("{} {} {}", self.sysname, self.release, self.version)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[api]
|
|
||||||
#[derive(Serialize, Deserialize, Copy, Clone)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
/// The possible BootModes
|
|
||||||
pub enum BootMode {
|
|
||||||
/// The BootMode is EFI/UEFI
|
|
||||||
Efi,
|
|
||||||
/// The BootMode is Legacy BIOS
|
|
||||||
LegacyBios,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[api]
|
|
||||||
#[derive(Serialize, Deserialize, Clone)]
|
|
||||||
#[serde(rename_all = "lowercase")]
|
|
||||||
/// Holds the Bootmodes
|
|
||||||
pub struct BootModeInformation {
|
|
||||||
/// The BootMode, either Efi or Bios
|
|
||||||
pub mode: BootMode,
|
|
||||||
/// SecureBoot status
|
|
||||||
pub secureboot: bool,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[api]
|
|
||||||
#[derive(Serialize, Deserialize, Default)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
/// Information about the CPU
|
|
||||||
pub struct NodeCpuInformation {
|
|
||||||
/// The CPU model
|
|
||||||
pub model: String,
|
|
||||||
/// The number of CPU sockets
|
|
||||||
pub sockets: usize,
|
|
||||||
/// The number of CPU cores (incl. threads)
|
|
||||||
pub cpus: usize,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[api(
|
|
||||||
properties: {
|
|
||||||
memory: {
|
|
||||||
type: NodeMemoryCounters,
|
|
||||||
},
|
|
||||||
root: {
|
|
||||||
type: StorageStatus,
|
|
||||||
},
|
|
||||||
swap: {
|
|
||||||
type: NodeSwapCounters,
|
|
||||||
},
|
|
||||||
loadavg: {
|
|
||||||
type: Array,
|
|
||||||
items: {
|
|
||||||
type: Number,
|
|
||||||
description: "the load",
|
|
||||||
}
|
|
||||||
},
|
|
||||||
cpuinfo: {
|
|
||||||
type: NodeCpuInformation,
|
|
||||||
},
|
|
||||||
info: {
|
|
||||||
type: NodeInformation,
|
|
||||||
}
|
|
||||||
},
|
|
||||||
)]
|
|
||||||
#[derive(Serialize, Deserialize)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
/// The Node status
|
|
||||||
pub struct NodeStatus {
|
|
||||||
pub memory: NodeMemoryCounters,
|
|
||||||
pub root: StorageStatus,
|
|
||||||
pub swap: NodeSwapCounters,
|
|
||||||
/// The current uptime of the server.
|
|
||||||
pub uptime: u64,
|
|
||||||
/// Load for 1, 5 and 15 minutes.
|
|
||||||
pub loadavg: [f64; 3],
|
|
||||||
/// The current kernel version (NEW struct type).
|
|
||||||
pub current_kernel: KernelVersionInformation,
|
|
||||||
/// The current kernel version (LEGACY string type).
|
|
||||||
pub kversion: String,
|
|
||||||
/// Total CPU usage since last query.
|
|
||||||
pub cpu: f64,
|
|
||||||
/// Total IO wait since last query.
|
|
||||||
pub wait: f64,
|
|
||||||
pub cpuinfo: NodeCpuInformation,
|
|
||||||
pub info: NodeInformation,
|
|
||||||
/// Current boot mode
|
|
||||||
pub boot_info: BootModeInformation,
|
|
||||||
}
|
|
@ -1,120 +0,0 @@
|
|||||||
use serde::{Deserialize, Serialize};
|
|
||||||
|
|
||||||
use proxmox_schema::{api, ApiStringFormat, ArraySchema, Schema, StringSchema, Updater};
|
|
||||||
|
|
||||||
use super::{
|
|
||||||
GENERIC_URI_REGEX, PROXMOX_SAFE_ID_FORMAT, PROXMOX_SAFE_ID_REGEX, REALM_ID_SCHEMA,
|
|
||||||
SINGLE_LINE_COMMENT_SCHEMA,
|
|
||||||
};
|
|
||||||
|
|
||||||
pub const OPENID_SCOPE_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&PROXMOX_SAFE_ID_REGEX);
|
|
||||||
|
|
||||||
pub const OPENID_SCOPE_SCHEMA: Schema = StringSchema::new("OpenID Scope Name.")
|
|
||||||
.format(&OPENID_SCOPE_FORMAT)
|
|
||||||
.schema();
|
|
||||||
|
|
||||||
pub const OPENID_SCOPE_ARRAY_SCHEMA: Schema =
|
|
||||||
ArraySchema::new("Array of OpenId Scopes.", &OPENID_SCOPE_SCHEMA).schema();
|
|
||||||
|
|
||||||
pub const OPENID_SCOPE_LIST_FORMAT: ApiStringFormat =
|
|
||||||
ApiStringFormat::PropertyString(&OPENID_SCOPE_ARRAY_SCHEMA);
|
|
||||||
|
|
||||||
pub const OPENID_DEFAILT_SCOPE_LIST: &str = "email profile";
|
|
||||||
pub const OPENID_SCOPE_LIST_SCHEMA: Schema = StringSchema::new("OpenID Scope List")
|
|
||||||
.format(&OPENID_SCOPE_LIST_FORMAT)
|
|
||||||
.default(OPENID_DEFAILT_SCOPE_LIST)
|
|
||||||
.schema();
|
|
||||||
|
|
||||||
pub const OPENID_ACR_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&GENERIC_URI_REGEX);
|
|
||||||
|
|
||||||
pub const OPENID_ACR_SCHEMA: Schema =
|
|
||||||
StringSchema::new("OpenID Authentication Context Class Reference.")
|
|
||||||
.format(&OPENID_ACR_FORMAT)
|
|
||||||
.schema();
|
|
||||||
|
|
||||||
pub const OPENID_ACR_ARRAY_SCHEMA: Schema =
|
|
||||||
ArraySchema::new("Array of OpenId ACRs.", &OPENID_ACR_SCHEMA).schema();
|
|
||||||
|
|
||||||
pub const OPENID_ACR_LIST_FORMAT: ApiStringFormat =
|
|
||||||
ApiStringFormat::PropertyString(&OPENID_ACR_ARRAY_SCHEMA);
|
|
||||||
|
|
||||||
pub const OPENID_ACR_LIST_SCHEMA: Schema = StringSchema::new("OpenID ACR List")
|
|
||||||
.format(&OPENID_ACR_LIST_FORMAT)
|
|
||||||
.schema();
|
|
||||||
|
|
||||||
pub const OPENID_USERNAME_CLAIM_SCHEMA: Schema = StringSchema::new(
|
|
||||||
"Use the value of this attribute/claim as unique user name. It \
|
|
||||||
is up to the identity provider to guarantee the uniqueness. The \
|
|
||||||
OpenID specification only guarantees that Subject ('sub') is \
|
|
||||||
unique. Also make sure that the user is not allowed to change that \
|
|
||||||
attribute by himself!",
|
|
||||||
)
|
|
||||||
.max_length(64)
|
|
||||||
.min_length(1)
|
|
||||||
.format(&PROXMOX_SAFE_ID_FORMAT)
|
|
||||||
.schema();
|
|
||||||
|
|
||||||
#[api(
|
|
||||||
properties: {
|
|
||||||
realm: {
|
|
||||||
schema: REALM_ID_SCHEMA,
|
|
||||||
},
|
|
||||||
"client-key": {
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
"scopes": {
|
|
||||||
schema: OPENID_SCOPE_LIST_SCHEMA,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
"acr-values": {
|
|
||||||
schema: OPENID_ACR_LIST_SCHEMA,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
prompt: {
|
|
||||||
description: "OpenID Prompt",
|
|
||||||
type: String,
|
|
||||||
format: &PROXMOX_SAFE_ID_FORMAT,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
comment: {
|
|
||||||
optional: true,
|
|
||||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
|
||||||
},
|
|
||||||
autocreate: {
|
|
||||||
optional: true,
|
|
||||||
default: false,
|
|
||||||
},
|
|
||||||
"username-claim": {
|
|
||||||
schema: OPENID_USERNAME_CLAIM_SCHEMA,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
)]
|
|
||||||
#[derive(Serialize, Deserialize, Updater)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
/// OpenID configuration properties.
|
|
||||||
pub struct OpenIdRealmConfig {
|
|
||||||
#[updater(skip)]
|
|
||||||
pub realm: String,
|
|
||||||
/// OpenID Issuer Url
|
|
||||||
pub issuer_url: String,
|
|
||||||
/// OpenID Client ID
|
|
||||||
pub client_id: String,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub scopes: Option<String>,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub acr_values: Option<String>,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub prompt: Option<String>,
|
|
||||||
/// OpenID Client Key
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub client_key: Option<String>,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub comment: Option<String>,
|
|
||||||
/// Automatically create users if they do not exist.
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub autocreate: Option<bool>,
|
|
||||||
#[updater(skip)]
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub username_claim: Option<String>,
|
|
||||||
}
|
|
@ -1,22 +0,0 @@
|
|||||||
use percent_encoding::{utf8_percent_encode, AsciiSet};
|
|
||||||
|
|
||||||
/// This used to be: `SIMPLE_ENCODE_SET` plus space, `"`, `#`, `<`, `>`, backtick, `?`, `{`, `}`
|
|
||||||
pub const DEFAULT_ENCODE_SET: &AsciiSet = &percent_encoding::CONTROLS // 0..1f and 7e
|
|
||||||
// The SIMPLE_ENCODE_SET adds space and anything >= 0x7e (7e itself is already included above)
|
|
||||||
.add(0x20)
|
|
||||||
.add(0x7f)
|
|
||||||
// the DEFAULT_ENCODE_SET added:
|
|
||||||
.add(b' ')
|
|
||||||
.add(b'"')
|
|
||||||
.add(b'#')
|
|
||||||
.add(b'<')
|
|
||||||
.add(b'>')
|
|
||||||
.add(b'`')
|
|
||||||
.add(b'?')
|
|
||||||
.add(b'{')
|
|
||||||
.add(b'}');
|
|
||||||
|
|
||||||
/// percent encode a url component
|
|
||||||
pub fn percent_encode_component(comp: &str) -> String {
|
|
||||||
utf8_percent_encode(comp, percent_encoding::NON_ALPHANUMERIC).to_string()
|
|
||||||
}
|
|
@ -1,106 +0,0 @@
|
|||||||
use serde::{Deserialize, Serialize};
|
|
||||||
|
|
||||||
use super::*;
|
|
||||||
use proxmox_schema::*;
|
|
||||||
|
|
||||||
pub const REMOTE_PASSWORD_SCHEMA: Schema =
|
|
||||||
StringSchema::new("Password or auth token for remote host.")
|
|
||||||
.format(&PASSWORD_FORMAT)
|
|
||||||
.min_length(1)
|
|
||||||
.max_length(1024)
|
|
||||||
.schema();
|
|
||||||
|
|
||||||
pub const REMOTE_PASSWORD_BASE64_SCHEMA: Schema =
|
|
||||||
StringSchema::new("Password or auth token for remote host (stored as base64 string).")
|
|
||||||
.format(&PASSWORD_FORMAT)
|
|
||||||
.min_length(1)
|
|
||||||
.max_length(1024)
|
|
||||||
.schema();
|
|
||||||
|
|
||||||
pub const REMOTE_ID_SCHEMA: Schema = StringSchema::new("Remote ID.")
|
|
||||||
.format(&PROXMOX_SAFE_ID_FORMAT)
|
|
||||||
.min_length(3)
|
|
||||||
.max_length(32)
|
|
||||||
.schema();
|
|
||||||
|
|
||||||
#[api(
|
|
||||||
properties: {
|
|
||||||
comment: {
|
|
||||||
optional: true,
|
|
||||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
|
||||||
},
|
|
||||||
host: {
|
|
||||||
schema: DNS_NAME_OR_IP_SCHEMA,
|
|
||||||
},
|
|
||||||
port: {
|
|
||||||
optional: true,
|
|
||||||
description: "The (optional) port",
|
|
||||||
type: u16,
|
|
||||||
},
|
|
||||||
"auth-id": {
|
|
||||||
type: Authid,
|
|
||||||
},
|
|
||||||
fingerprint: {
|
|
||||||
optional: true,
|
|
||||||
schema: CERT_FINGERPRINT_SHA256_SCHEMA,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
)]
|
|
||||||
#[derive(Serialize, Deserialize, Updater, Clone, PartialEq)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
/// Remote configuration properties.
|
|
||||||
pub struct RemoteConfig {
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub comment: Option<String>,
|
|
||||||
pub host: String,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub port: Option<u16>,
|
|
||||||
pub auth_id: Authid,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub fingerprint: Option<String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[api(
|
|
||||||
properties: {
|
|
||||||
name: {
|
|
||||||
schema: REMOTE_ID_SCHEMA,
|
|
||||||
},
|
|
||||||
config: {
|
|
||||||
type: RemoteConfig,
|
|
||||||
},
|
|
||||||
password: {
|
|
||||||
schema: REMOTE_PASSWORD_BASE64_SCHEMA,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
)]
|
|
||||||
#[derive(Serialize, Deserialize)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
/// Remote properties.
|
|
||||||
pub struct Remote {
|
|
||||||
pub name: String,
|
|
||||||
// Note: The stored password is base64 encoded
|
|
||||||
#[serde(default, skip_serializing_if = "String::is_empty")]
|
|
||||||
#[serde(with = "proxmox_serde::string_as_base64")]
|
|
||||||
pub password: String,
|
|
||||||
#[serde(flatten)]
|
|
||||||
pub config: RemoteConfig,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[api(
|
|
||||||
properties: {
|
|
||||||
name: {
|
|
||||||
schema: REMOTE_ID_SCHEMA,
|
|
||||||
},
|
|
||||||
config: {
|
|
||||||
type: RemoteConfig,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
)]
|
|
||||||
#[derive(Serialize, Deserialize, Clone, PartialEq)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
/// Remote properties.
|
|
||||||
pub struct RemoteWithoutPassword {
|
|
||||||
pub name: String,
|
|
||||||
#[serde(flatten)]
|
|
||||||
pub config: RemoteConfig,
|
|
||||||
}
|
|
@ -1,134 +0,0 @@
|
|||||||
//! Types for tape changer API
|
|
||||||
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
|
|
||||||
use proxmox_schema::{
|
|
||||||
api, ApiStringFormat, ArraySchema, IntegerSchema, Schema, StringSchema, Updater,
|
|
||||||
};
|
|
||||||
|
|
||||||
use crate::{OptionalDeviceIdentification, PROXMOX_SAFE_ID_FORMAT};
|
|
||||||
|
|
||||||
pub const CHANGER_NAME_SCHEMA: Schema = StringSchema::new("Tape Changer Identifier.")
|
|
||||||
.format(&PROXMOX_SAFE_ID_FORMAT)
|
|
||||||
.min_length(3)
|
|
||||||
.max_length(32)
|
|
||||||
.schema();
|
|
||||||
|
|
||||||
pub const SCSI_CHANGER_PATH_SCHEMA: Schema =
|
|
||||||
StringSchema::new("Path to Linux generic SCSI device (e.g. '/dev/sg4')").schema();
|
|
||||||
|
|
||||||
pub const MEDIA_LABEL_SCHEMA: Schema = StringSchema::new("Media Label/Barcode.")
|
|
||||||
.format(&PROXMOX_SAFE_ID_FORMAT)
|
|
||||||
.min_length(2)
|
|
||||||
.max_length(32)
|
|
||||||
.schema();
|
|
||||||
|
|
||||||
pub const SLOT_ARRAY_SCHEMA: Schema = ArraySchema::new(
|
|
||||||
"Slot list.",
|
|
||||||
&IntegerSchema::new("Slot number").minimum(1).schema(),
|
|
||||||
)
|
|
||||||
.schema();
|
|
||||||
|
|
||||||
pub const EXPORT_SLOT_LIST_SCHEMA: Schema = StringSchema::new(
|
|
||||||
"\
|
|
||||||
A list of slot numbers, comma separated. Those slots are reserved for
|
|
||||||
Import/Export, i.e. any media in those slots are considered to be
|
|
||||||
'offline'.
|
|
||||||
",
|
|
||||||
)
|
|
||||||
.format(&ApiStringFormat::PropertyString(&SLOT_ARRAY_SCHEMA))
|
|
||||||
.schema();
|
|
||||||
|
|
||||||
#[api(
|
|
||||||
properties: {
|
|
||||||
name: {
|
|
||||||
schema: CHANGER_NAME_SCHEMA,
|
|
||||||
},
|
|
||||||
path: {
|
|
||||||
schema: SCSI_CHANGER_PATH_SCHEMA,
|
|
||||||
},
|
|
||||||
"export-slots": {
|
|
||||||
schema: EXPORT_SLOT_LIST_SCHEMA,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
"eject-before-unload": {
|
|
||||||
optional: true,
|
|
||||||
default: false,
|
|
||||||
}
|
|
||||||
},
|
|
||||||
)]
|
|
||||||
#[derive(Serialize, Deserialize, Updater)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
/// SCSI tape changer
|
|
||||||
pub struct ScsiTapeChanger {
|
|
||||||
#[updater(skip)]
|
|
||||||
pub name: String,
|
|
||||||
pub path: String,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub export_slots: Option<String>,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
/// if set to true, tapes are ejected manually before unloading
|
|
||||||
pub eject_before_unload: Option<bool>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[api(
|
|
||||||
properties: {
|
|
||||||
config: {
|
|
||||||
type: ScsiTapeChanger,
|
|
||||||
},
|
|
||||||
info: {
|
|
||||||
type: OptionalDeviceIdentification,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
)]
|
|
||||||
#[derive(Serialize, Deserialize)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
/// Changer config with optional device identification attributes
|
|
||||||
pub struct ChangerListEntry {
|
|
||||||
#[serde(flatten)]
|
|
||||||
pub config: ScsiTapeChanger,
|
|
||||||
#[serde(flatten)]
|
|
||||||
pub info: OptionalDeviceIdentification,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[api()]
|
|
||||||
#[derive(Serialize, Deserialize)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
/// Mtx Entry Kind
|
|
||||||
pub enum MtxEntryKind {
|
|
||||||
/// Drive
|
|
||||||
Drive,
|
|
||||||
/// Slot
|
|
||||||
Slot,
|
|
||||||
/// Import/Export Slot
|
|
||||||
ImportExport,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[api(
|
|
||||||
properties: {
|
|
||||||
"entry-kind": {
|
|
||||||
type: MtxEntryKind,
|
|
||||||
},
|
|
||||||
"label-text": {
|
|
||||||
schema: MEDIA_LABEL_SCHEMA,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
)]
|
|
||||||
#[derive(Serialize, Deserialize)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
/// Mtx Status Entry
|
|
||||||
pub struct MtxStatusEntry {
|
|
||||||
pub entry_kind: MtxEntryKind,
|
|
||||||
/// The ID of the slot or drive
|
|
||||||
pub entry_id: u64,
|
|
||||||
/// The media label (volume tag) if the slot/drive is full
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub label_text: Option<String>,
|
|
||||||
/// The slot the drive was loaded from
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub loaded_slot: Option<u64>,
|
|
||||||
/// The current state of the drive
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub state: Option<String>,
|
|
||||||
}
|
|
@ -1,55 +0,0 @@
|
|||||||
use ::serde::{Deserialize, Serialize};
|
|
||||||
|
|
||||||
use proxmox_schema::api;
|
|
||||||
|
|
||||||
#[api()]
|
|
||||||
#[derive(Serialize, Deserialize)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
/// Optional Device Identification Attributes
|
|
||||||
pub struct OptionalDeviceIdentification {
|
|
||||||
/// Vendor (autodetected)
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub vendor: Option<String>,
|
|
||||||
/// Model (autodetected)
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub model: Option<String>,
|
|
||||||
/// Serial number (autodetected)
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub serial: Option<String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[api()]
|
|
||||||
#[derive(Debug, Serialize, Deserialize)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
/// Kind of device
|
|
||||||
pub enum DeviceKind {
|
|
||||||
/// Tape changer (Autoloader, Robot)
|
|
||||||
Changer,
|
|
||||||
/// Normal SCSI tape device
|
|
||||||
Tape,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[api(
|
|
||||||
properties: {
|
|
||||||
kind: {
|
|
||||||
type: DeviceKind,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
)]
|
|
||||||
#[derive(Debug, Serialize, Deserialize)]
|
|
||||||
/// Tape device information
|
|
||||||
pub struct TapeDeviceInfo {
|
|
||||||
pub kind: DeviceKind,
|
|
||||||
/// Path to the linux device node
|
|
||||||
pub path: String,
|
|
||||||
/// Serial number (autodetected)
|
|
||||||
pub serial: String,
|
|
||||||
/// Vendor (autodetected)
|
|
||||||
pub vendor: String,
|
|
||||||
/// Model (autodetected)
|
|
||||||
pub model: String,
|
|
||||||
/// Device major number
|
|
||||||
pub major: u32,
|
|
||||||
/// Device minor number
|
|
||||||
pub minor: u32,
|
|
||||||
}
|
|
@ -1,349 +0,0 @@
|
|||||||
//! Types for tape drive API
|
|
||||||
use anyhow::{bail, Error};
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
|
|
||||||
use proxmox_schema::{api, IntegerSchema, Schema, StringSchema, Updater};
|
|
||||||
|
|
||||||
use crate::{OptionalDeviceIdentification, CHANGER_NAME_SCHEMA, PROXMOX_SAFE_ID_FORMAT};
|
|
||||||
|
|
||||||
pub const DRIVE_NAME_SCHEMA: Schema = StringSchema::new("Drive Identifier.")
|
|
||||||
.format(&PROXMOX_SAFE_ID_FORMAT)
|
|
||||||
.min_length(3)
|
|
||||||
.max_length(32)
|
|
||||||
.schema();
|
|
||||||
|
|
||||||
pub const LTO_DRIVE_PATH_SCHEMA: Schema =
|
|
||||||
StringSchema::new("The path to a LTO SCSI-generic tape device (i.e. '/dev/sg0')").schema();
|
|
||||||
|
|
||||||
pub const CHANGER_DRIVENUM_SCHEMA: Schema =
|
|
||||||
IntegerSchema::new("Associated changer drive number (requires option changer)")
|
|
||||||
.minimum(0)
|
|
||||||
.maximum(255)
|
|
||||||
.default(0)
|
|
||||||
.schema();
|
|
||||||
|
|
||||||
#[api(
|
|
||||||
properties: {
|
|
||||||
name: {
|
|
||||||
schema: DRIVE_NAME_SCHEMA,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
)]
|
|
||||||
#[derive(Serialize, Deserialize)]
|
|
||||||
/// Simulate tape drives (only for test and debug)
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub struct VirtualTapeDrive {
|
|
||||||
pub name: String,
|
|
||||||
/// Path to directory
|
|
||||||
pub path: String,
|
|
||||||
/// Virtual tape size
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub max_size: Option<usize>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[api(
|
|
||||||
properties: {
|
|
||||||
name: {
|
|
||||||
schema: DRIVE_NAME_SCHEMA,
|
|
||||||
},
|
|
||||||
path: {
|
|
||||||
schema: LTO_DRIVE_PATH_SCHEMA,
|
|
||||||
},
|
|
||||||
changer: {
|
|
||||||
schema: CHANGER_NAME_SCHEMA,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
"changer-drivenum": {
|
|
||||||
schema: CHANGER_DRIVENUM_SCHEMA,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
)]
|
|
||||||
#[derive(Serialize, Deserialize, Updater, Clone)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
/// Lto SCSI tape driver
|
|
||||||
pub struct LtoTapeDrive {
|
|
||||||
#[updater(skip)]
|
|
||||||
pub name: String,
|
|
||||||
pub path: String,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub changer: Option<String>,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub changer_drivenum: Option<u64>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[api(
|
|
||||||
properties: {
|
|
||||||
config: {
|
|
||||||
type: LtoTapeDrive,
|
|
||||||
},
|
|
||||||
info: {
|
|
||||||
type: OptionalDeviceIdentification,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
)]
|
|
||||||
#[derive(Serialize, Deserialize)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
/// Drive list entry
|
|
||||||
pub struct DriveListEntry {
|
|
||||||
#[serde(flatten)]
|
|
||||||
pub config: LtoTapeDrive,
|
|
||||||
#[serde(flatten)]
|
|
||||||
pub info: OptionalDeviceIdentification,
|
|
||||||
/// the state of the drive if locked
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub state: Option<String>,
|
|
||||||
/// Current device activity
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub activity: Option<DeviceActivity>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[api()]
|
|
||||||
#[derive(Serialize, Deserialize)]
|
|
||||||
/// Medium auxiliary memory attributes (MAM)
|
|
||||||
pub struct MamAttribute {
|
|
||||||
/// Attribute id
|
|
||||||
pub id: u16,
|
|
||||||
/// Attribute name
|
|
||||||
pub name: String,
|
|
||||||
/// Attribute value
|
|
||||||
pub value: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[api()]
|
|
||||||
#[derive(Serialize, Deserialize, Copy, Clone, Debug, PartialOrd, PartialEq)]
|
|
||||||
pub enum TapeDensity {
|
|
||||||
/// Unknown (no media loaded)
|
|
||||||
Unknown,
|
|
||||||
/// LTO1
|
|
||||||
LTO1,
|
|
||||||
/// LTO2
|
|
||||||
LTO2,
|
|
||||||
/// LTO3
|
|
||||||
LTO3,
|
|
||||||
/// LTO4
|
|
||||||
LTO4,
|
|
||||||
/// LTO5
|
|
||||||
LTO5,
|
|
||||||
/// LTO6
|
|
||||||
LTO6,
|
|
||||||
/// LTO7
|
|
||||||
LTO7,
|
|
||||||
/// LTO7M8
|
|
||||||
LTO7M8,
|
|
||||||
/// LTO8
|
|
||||||
LTO8,
|
|
||||||
/// LTO9
|
|
||||||
LTO9,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl TryFrom<u8> for TapeDensity {
|
|
||||||
type Error = Error;
|
|
||||||
|
|
||||||
fn try_from(value: u8) -> Result<Self, Self::Error> {
|
|
||||||
let density = match value {
|
|
||||||
0x00 => TapeDensity::Unknown,
|
|
||||||
0x40 => TapeDensity::LTO1,
|
|
||||||
0x42 => TapeDensity::LTO2,
|
|
||||||
0x44 => TapeDensity::LTO3,
|
|
||||||
0x46 => TapeDensity::LTO4,
|
|
||||||
0x58 => TapeDensity::LTO5,
|
|
||||||
0x5a => TapeDensity::LTO6,
|
|
||||||
0x5c => TapeDensity::LTO7,
|
|
||||||
0x5d => TapeDensity::LTO7M8,
|
|
||||||
0x5e => TapeDensity::LTO8,
|
|
||||||
0x60 => TapeDensity::LTO9,
|
|
||||||
_ => bail!("unknown tape density code 0x{:02x}", value),
|
|
||||||
};
|
|
||||||
Ok(density)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[api(
|
|
||||||
properties: {
|
|
||||||
density: {
|
|
||||||
type: TapeDensity,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
)]
|
|
||||||
#[derive(Serialize, Deserialize)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
/// Drive/Media status for Lto SCSI drives.
|
|
||||||
///
|
|
||||||
/// Media related data is optional - only set if there is a medium
|
|
||||||
/// loaded.
|
|
||||||
pub struct LtoDriveAndMediaStatus {
|
|
||||||
/// Vendor
|
|
||||||
pub vendor: String,
|
|
||||||
/// Product
|
|
||||||
pub product: String,
|
|
||||||
/// Revision
|
|
||||||
pub revision: String,
|
|
||||||
/// Block size (0 is variable size)
|
|
||||||
pub blocksize: u32,
|
|
||||||
/// Compression enabled
|
|
||||||
pub compression: bool,
|
|
||||||
/// Drive buffer mode
|
|
||||||
pub buffer_mode: u8,
|
|
||||||
/// Tape density
|
|
||||||
pub density: TapeDensity,
|
|
||||||
/// Media is write protected
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub write_protect: Option<bool>,
|
|
||||||
/// Tape Alert Flags
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub alert_flags: Option<String>,
|
|
||||||
/// Current file number
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub file_number: Option<u64>,
|
|
||||||
/// Current block number
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub block_number: Option<u64>,
|
|
||||||
/// Medium Manufacture Date (epoch)
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub manufactured: Option<i64>,
|
|
||||||
/// Total Bytes Read in Medium Life
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub bytes_read: Option<u64>,
|
|
||||||
/// Total Bytes Written in Medium Life
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub bytes_written: Option<u64>,
|
|
||||||
/// Number of mounts for the current volume (i.e., Thread Count)
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub volume_mounts: Option<u64>,
|
|
||||||
/// Count of the total number of times the medium has passed over
|
|
||||||
/// the head.
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub medium_passes: Option<u64>,
|
|
||||||
/// Estimated tape wearout factor (assuming max. 16000 end-to-end passes)
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub medium_wearout: Option<f64>,
|
|
||||||
/// Current device activity
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub drive_activity: Option<DeviceActivity>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[api()]
|
|
||||||
/// Volume statistics from SCSI log page 17h
|
|
||||||
#[derive(Default, Serialize, Deserialize)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub struct Lp17VolumeStatistics {
|
|
||||||
/// Volume mounts (thread count)
|
|
||||||
pub volume_mounts: u64,
|
|
||||||
/// Total data sets written
|
|
||||||
pub volume_datasets_written: u64,
|
|
||||||
/// Write retries
|
|
||||||
pub volume_recovered_write_data_errors: u64,
|
|
||||||
/// Total unrecovered write errors
|
|
||||||
pub volume_unrecovered_write_data_errors: u64,
|
|
||||||
/// Total suspended writes
|
|
||||||
pub volume_write_servo_errors: u64,
|
|
||||||
/// Total fatal suspended writes
|
|
||||||
pub volume_unrecovered_write_servo_errors: u64,
|
|
||||||
/// Total datasets read
|
|
||||||
pub volume_datasets_read: u64,
|
|
||||||
/// Total read retries
|
|
||||||
pub volume_recovered_read_errors: u64,
|
|
||||||
/// Total unrecovered read errors
|
|
||||||
pub volume_unrecovered_read_errors: u64,
|
|
||||||
/// Last mount unrecovered write errors
|
|
||||||
pub last_mount_unrecovered_write_errors: u64,
|
|
||||||
/// Last mount unrecovered read errors
|
|
||||||
pub last_mount_unrecovered_read_errors: u64,
|
|
||||||
/// Last mount bytes written
|
|
||||||
pub last_mount_bytes_written: u64,
|
|
||||||
/// Last mount bytes read
|
|
||||||
pub last_mount_bytes_read: u64,
|
|
||||||
/// Lifetime bytes written
|
|
||||||
pub lifetime_bytes_written: u64,
|
|
||||||
/// Lifetime bytes read
|
|
||||||
pub lifetime_bytes_read: u64,
|
|
||||||
/// Last load write compression ratio
|
|
||||||
pub last_load_write_compression_ratio: u64,
|
|
||||||
/// Last load read compression ratio
|
|
||||||
pub last_load_read_compression_ratio: u64,
|
|
||||||
/// Medium mount time
|
|
||||||
pub medium_mount_time: u64,
|
|
||||||
/// Medium ready time
|
|
||||||
pub medium_ready_time: u64,
|
|
||||||
/// Total native capacity
|
|
||||||
pub total_native_capacity: u64,
|
|
||||||
/// Total used native capacity
|
|
||||||
pub total_used_native_capacity: u64,
|
|
||||||
/// Write protect
|
|
||||||
pub write_protect: bool,
|
|
||||||
/// Volume is WORM
|
|
||||||
pub worm: bool,
|
|
||||||
/// Beginning of medium passes
|
|
||||||
pub beginning_of_medium_passes: u64,
|
|
||||||
/// Middle of medium passes
|
|
||||||
pub middle_of_tape_passes: u64,
|
|
||||||
/// Volume serial number
|
|
||||||
pub serial: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// The DT Device Activity from DT Device Status LP page
|
|
||||||
#[api]
|
|
||||||
#[derive(Copy, Clone, Serialize, Deserialize)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
pub enum DeviceActivity {
|
|
||||||
/// No activity
|
|
||||||
NoActivity,
|
|
||||||
/// Cleaning
|
|
||||||
Cleaning,
|
|
||||||
/// Loading
|
|
||||||
Loading,
|
|
||||||
/// Unloading
|
|
||||||
Unloading,
|
|
||||||
/// Other unspecified activity
|
|
||||||
Other,
|
|
||||||
/// Reading
|
|
||||||
Reading,
|
|
||||||
/// Writing
|
|
||||||
Writing,
|
|
||||||
/// Locating
|
|
||||||
Locating,
|
|
||||||
/// Rewinding
|
|
||||||
Rewinding,
|
|
||||||
/// Erasing
|
|
||||||
Erasing,
|
|
||||||
/// Formatting
|
|
||||||
Formatting,
|
|
||||||
/// Calibrating
|
|
||||||
Calibrating,
|
|
||||||
/// Other (DT)
|
|
||||||
OtherDT,
|
|
||||||
/// Updating microcode
|
|
||||||
MicrocodeUpdate,
|
|
||||||
/// Reading encrypted data
|
|
||||||
ReadingEncrypted,
|
|
||||||
/// Writing encrypted data
|
|
||||||
WritingEncrypted,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl TryFrom<u8> for DeviceActivity {
|
|
||||||
type Error = Error;
|
|
||||||
|
|
||||||
fn try_from(value: u8) -> Result<Self, Self::Error> {
|
|
||||||
Ok(match value {
|
|
||||||
0x00 => DeviceActivity::NoActivity,
|
|
||||||
0x01 => DeviceActivity::Cleaning,
|
|
||||||
0x02 => DeviceActivity::Loading,
|
|
||||||
0x03 => DeviceActivity::Unloading,
|
|
||||||
0x04 => DeviceActivity::Other,
|
|
||||||
0x05 => DeviceActivity::Reading,
|
|
||||||
0x06 => DeviceActivity::Writing,
|
|
||||||
0x07 => DeviceActivity::Locating,
|
|
||||||
0x08 => DeviceActivity::Rewinding,
|
|
||||||
0x09 => DeviceActivity::Erasing,
|
|
||||||
0x0A => DeviceActivity::Formatting,
|
|
||||||
0x0B => DeviceActivity::Calibrating,
|
|
||||||
0x0C => DeviceActivity::OtherDT,
|
|
||||||
0x0D => DeviceActivity::MicrocodeUpdate,
|
|
||||||
0x0E => DeviceActivity::ReadingEncrypted,
|
|
||||||
0x0F => DeviceActivity::WritingEncrypted,
|
|
||||||
other => bail!("invalid DT device activity value: {:x}", other),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,179 +0,0 @@
|
|||||||
use ::serde::{Deserialize, Serialize};
|
|
||||||
|
|
||||||
use proxmox_schema::*;
|
|
||||||
use proxmox_uuid::Uuid;
|
|
||||||
|
|
||||||
use crate::{MediaLocation, MediaStatus, UUID_FORMAT};
|
|
||||||
|
|
||||||
pub const MEDIA_SET_UUID_SCHEMA: Schema = StringSchema::new(
|
|
||||||
"MediaSet Uuid (We use the all-zero Uuid to reseve an empty media for a specific pool).",
|
|
||||||
)
|
|
||||||
.format(&UUID_FORMAT)
|
|
||||||
.schema();
|
|
||||||
|
|
||||||
pub const MEDIA_UUID_SCHEMA: Schema = StringSchema::new("Media Uuid.")
|
|
||||||
.format(&UUID_FORMAT)
|
|
||||||
.schema();
|
|
||||||
|
|
||||||
#[api(
|
|
||||||
properties: {
|
|
||||||
"media-set-uuid": {
|
|
||||||
schema: MEDIA_SET_UUID_SCHEMA,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
)]
|
|
||||||
#[derive(Serialize, Deserialize)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
/// Media Set list entry
|
|
||||||
pub struct MediaSetListEntry {
|
|
||||||
/// Media set name
|
|
||||||
pub media_set_name: String,
|
|
||||||
pub media_set_uuid: Uuid,
|
|
||||||
/// MediaSet creation time stamp
|
|
||||||
pub media_set_ctime: i64,
|
|
||||||
/// Media Pool
|
|
||||||
pub pool: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[api(
|
|
||||||
properties: {
|
|
||||||
location: {
|
|
||||||
type: MediaLocation,
|
|
||||||
},
|
|
||||||
status: {
|
|
||||||
type: MediaStatus,
|
|
||||||
},
|
|
||||||
uuid: {
|
|
||||||
schema: MEDIA_UUID_SCHEMA,
|
|
||||||
},
|
|
||||||
"media-set-uuid": {
|
|
||||||
schema: MEDIA_SET_UUID_SCHEMA,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
)]
|
|
||||||
#[derive(Serialize, Deserialize)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
/// Media list entry
|
|
||||||
pub struct MediaListEntry {
|
|
||||||
/// Media label text (or Barcode)
|
|
||||||
pub label_text: String,
|
|
||||||
pub uuid: Uuid,
|
|
||||||
/// Creation time stamp
|
|
||||||
pub ctime: i64,
|
|
||||||
pub location: MediaLocation,
|
|
||||||
pub status: MediaStatus,
|
|
||||||
/// Expired flag
|
|
||||||
pub expired: bool,
|
|
||||||
/// Catalog status OK
|
|
||||||
pub catalog: bool,
|
|
||||||
/// Media set name
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub media_set_name: Option<String>,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub media_set_uuid: Option<Uuid>,
|
|
||||||
/// Media set seq_nr
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub seq_nr: Option<u64>,
|
|
||||||
/// MediaSet creation time stamp
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub media_set_ctime: Option<i64>,
|
|
||||||
/// Media Pool
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub pool: Option<String>,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
/// Bytes currently used
|
|
||||||
pub bytes_used: Option<u64>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[api(
|
|
||||||
properties: {
|
|
||||||
uuid: {
|
|
||||||
schema: MEDIA_UUID_SCHEMA,
|
|
||||||
},
|
|
||||||
"media-set-uuid": {
|
|
||||||
schema: MEDIA_SET_UUID_SCHEMA,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
)]
|
|
||||||
#[derive(Serialize, Deserialize)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
/// Media label info
|
|
||||||
pub struct MediaIdFlat {
|
|
||||||
/// Unique ID
|
|
||||||
pub uuid: Uuid,
|
|
||||||
/// Media label text (or Barcode)
|
|
||||||
pub label_text: String,
|
|
||||||
/// Creation time stamp
|
|
||||||
pub ctime: i64,
|
|
||||||
// All MediaSet properties are optional here
|
|
||||||
/// MediaSet Pool
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub pool: Option<String>,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub media_set_uuid: Option<Uuid>,
|
|
||||||
/// MediaSet media sequence number
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub seq_nr: Option<u64>,
|
|
||||||
/// MediaSet Creation time stamp
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub media_set_ctime: Option<i64>,
|
|
||||||
/// Encryption key fingerprint
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub encryption_key_fingerprint: Option<String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[api(
|
|
||||||
properties: {
|
|
||||||
uuid: {
|
|
||||||
schema: MEDIA_UUID_SCHEMA,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
)]
|
|
||||||
#[derive(Serialize, Deserialize)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
/// Label with optional Uuid
|
|
||||||
pub struct LabelUuidMap {
|
|
||||||
/// Changer label text (or Barcode)
|
|
||||||
pub label_text: String,
|
|
||||||
/// Associated Uuid (if any)
|
|
||||||
pub uuid: Option<Uuid>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[api(
|
|
||||||
properties: {
|
|
||||||
uuid: {
|
|
||||||
schema: MEDIA_UUID_SCHEMA,
|
|
||||||
},
|
|
||||||
"media-set-uuid": {
|
|
||||||
schema: MEDIA_SET_UUID_SCHEMA,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
)]
|
|
||||||
#[derive(Serialize, Deserialize, Clone, PartialEq)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
/// Media content list entry
|
|
||||||
pub struct MediaContentEntry {
|
|
||||||
/// Media label text (or Barcode)
|
|
||||||
pub label_text: String,
|
|
||||||
/// Media Uuid
|
|
||||||
pub uuid: Uuid,
|
|
||||||
/// Media set name
|
|
||||||
pub media_set_name: String,
|
|
||||||
/// Media set uuid
|
|
||||||
pub media_set_uuid: Uuid,
|
|
||||||
/// MediaSet Creation time stamp
|
|
||||||
pub media_set_ctime: i64,
|
|
||||||
/// Media set seq_nr
|
|
||||||
pub seq_nr: u64,
|
|
||||||
/// Media Pool
|
|
||||||
pub pool: String,
|
|
||||||
/// Datastore Name
|
|
||||||
pub store: String,
|
|
||||||
/// Backup snapshot
|
|
||||||
pub snapshot: String,
|
|
||||||
/// Snapshot creation time (epoch)
|
|
||||||
pub backup_time: i64,
|
|
||||||
}
|
|
@ -1,80 +0,0 @@
|
|||||||
use anyhow::{bail, Error};
|
|
||||||
|
|
||||||
use proxmox_schema::{ApiStringFormat, Schema, StringSchema};
|
|
||||||
|
|
||||||
use crate::{CHANGER_NAME_SCHEMA, PROXMOX_SAFE_ID_FORMAT};
|
|
||||||
|
|
||||||
pub const VAULT_NAME_SCHEMA: Schema = StringSchema::new("Vault name.")
|
|
||||||
.format(&PROXMOX_SAFE_ID_FORMAT)
|
|
||||||
.min_length(3)
|
|
||||||
.max_length(32)
|
|
||||||
.schema();
|
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Eq, Clone)]
|
|
||||||
/// Media location
|
|
||||||
pub enum MediaLocation {
|
|
||||||
/// Ready for use (inside tape library)
|
|
||||||
Online(String),
|
|
||||||
/// Local available, but need to be mounted (insert into tape
|
|
||||||
/// drive)
|
|
||||||
Offline,
|
|
||||||
/// Media is inside a Vault
|
|
||||||
Vault(String),
|
|
||||||
}
|
|
||||||
|
|
||||||
proxmox_serde::forward_deserialize_to_from_str!(MediaLocation);
|
|
||||||
proxmox_serde::forward_serialize_to_display!(MediaLocation);
|
|
||||||
|
|
||||||
impl proxmox_schema::ApiType for MediaLocation {
|
|
||||||
const API_SCHEMA: Schema = StringSchema::new(
|
|
||||||
"Media location (e.g. 'offline', 'online-<changer_name>', 'vault-<vault_name>')",
|
|
||||||
)
|
|
||||||
.format(&ApiStringFormat::VerifyFn(|text| {
|
|
||||||
let location: MediaLocation = text.parse()?;
|
|
||||||
match location {
|
|
||||||
MediaLocation::Online(ref changer) => {
|
|
||||||
CHANGER_NAME_SCHEMA.parse_simple_value(changer)?;
|
|
||||||
}
|
|
||||||
MediaLocation::Vault(ref vault) => {
|
|
||||||
VAULT_NAME_SCHEMA.parse_simple_value(vault)?;
|
|
||||||
}
|
|
||||||
MediaLocation::Offline => { /* OK */ }
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}))
|
|
||||||
.schema();
|
|
||||||
}
|
|
||||||
|
|
||||||
impl std::fmt::Display for MediaLocation {
|
|
||||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
|
||||||
match self {
|
|
||||||
MediaLocation::Offline => {
|
|
||||||
write!(f, "offline")
|
|
||||||
}
|
|
||||||
MediaLocation::Online(changer) => {
|
|
||||||
write!(f, "online-{}", changer)
|
|
||||||
}
|
|
||||||
MediaLocation::Vault(vault) => {
|
|
||||||
write!(f, "vault-{}", vault)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl std::str::FromStr for MediaLocation {
|
|
||||||
type Err = Error;
|
|
||||||
|
|
||||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
|
||||||
if s == "offline" {
|
|
||||||
return Ok(MediaLocation::Offline);
|
|
||||||
}
|
|
||||||
if let Some(changer) = s.strip_prefix("online-") {
|
|
||||||
return Ok(MediaLocation::Online(changer.to_string()));
|
|
||||||
}
|
|
||||||
if let Some(vault) = s.strip_prefix("vault-") {
|
|
||||||
return Ok(MediaLocation::Vault(vault.to_string()));
|
|
||||||
}
|
|
||||||
|
|
||||||
bail!("MediaLocation parse error");
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,161 +0,0 @@
|
|||||||
//! Types for tape media pool API
|
|
||||||
//!
|
|
||||||
//! Note: Both MediaSetPolicy and RetentionPolicy are complex enums,
|
|
||||||
//! so we cannot use them directly for the API. Instead, we represent
|
|
||||||
//! them as String.
|
|
||||||
|
|
||||||
use std::str::FromStr;
|
|
||||||
|
|
||||||
use anyhow::Error;
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
|
|
||||||
use proxmox_schema::{api, ApiStringFormat, Schema, StringSchema, Updater};
|
|
||||||
|
|
||||||
use proxmox_time::{CalendarEvent, TimeSpan};
|
|
||||||
|
|
||||||
use crate::{
|
|
||||||
PROXMOX_SAFE_ID_FORMAT, SINGLE_LINE_COMMENT_FORMAT, SINGLE_LINE_COMMENT_SCHEMA,
|
|
||||||
TAPE_ENCRYPTION_KEY_FINGERPRINT_SCHEMA,
|
|
||||||
};
|
|
||||||
|
|
||||||
pub const MEDIA_POOL_NAME_SCHEMA: Schema = StringSchema::new("Media pool name.")
|
|
||||||
.format(&PROXMOX_SAFE_ID_FORMAT)
|
|
||||||
.min_length(2)
|
|
||||||
.max_length(32)
|
|
||||||
.schema();
|
|
||||||
|
|
||||||
pub const MEDIA_SET_NAMING_TEMPLATE_SCHEMA: Schema = StringSchema::new(
|
|
||||||
"Media set naming template (may contain strftime() time format specifications).",
|
|
||||||
)
|
|
||||||
.format(&SINGLE_LINE_COMMENT_FORMAT)
|
|
||||||
.min_length(2)
|
|
||||||
.max_length(64)
|
|
||||||
.schema();
|
|
||||||
|
|
||||||
pub const MEDIA_SET_ALLOCATION_POLICY_FORMAT: ApiStringFormat = ApiStringFormat::VerifyFn(|s| {
|
|
||||||
MediaSetPolicy::from_str(s)?;
|
|
||||||
Ok(())
|
|
||||||
});
|
|
||||||
|
|
||||||
pub const MEDIA_SET_ALLOCATION_POLICY_SCHEMA: Schema =
|
|
||||||
StringSchema::new("Media set allocation policy ('continue', 'always', or a calendar event).")
|
|
||||||
.format(&MEDIA_SET_ALLOCATION_POLICY_FORMAT)
|
|
||||||
.schema();
|
|
||||||
|
|
||||||
/// Media set allocation policy
|
|
||||||
pub enum MediaSetPolicy {
|
|
||||||
/// Try to use the current media set
|
|
||||||
ContinueCurrent,
|
|
||||||
/// Each backup job creates a new media set
|
|
||||||
AlwaysCreate,
|
|
||||||
/// Create a new set when the specified CalendarEvent triggers
|
|
||||||
CreateAt(CalendarEvent),
|
|
||||||
}
|
|
||||||
|
|
||||||
impl std::str::FromStr for MediaSetPolicy {
|
|
||||||
type Err = Error;
|
|
||||||
|
|
||||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
|
||||||
if s == "continue" {
|
|
||||||
return Ok(MediaSetPolicy::ContinueCurrent);
|
|
||||||
}
|
|
||||||
if s == "always" {
|
|
||||||
return Ok(MediaSetPolicy::AlwaysCreate);
|
|
||||||
}
|
|
||||||
|
|
||||||
let event = s.parse()?;
|
|
||||||
|
|
||||||
Ok(MediaSetPolicy::CreateAt(event))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub const MEDIA_RETENTION_POLICY_FORMAT: ApiStringFormat = ApiStringFormat::VerifyFn(|s| {
|
|
||||||
RetentionPolicy::from_str(s)?;
|
|
||||||
Ok(())
|
|
||||||
});
|
|
||||||
|
|
||||||
pub const MEDIA_RETENTION_POLICY_SCHEMA: Schema =
|
|
||||||
StringSchema::new("Media retention policy ('overwrite', 'keep', or time span).")
|
|
||||||
.format(&MEDIA_RETENTION_POLICY_FORMAT)
|
|
||||||
.schema();
|
|
||||||
|
|
||||||
/// Media retention Policy
|
|
||||||
pub enum RetentionPolicy {
|
|
||||||
/// Always overwrite media
|
|
||||||
OverwriteAlways,
|
|
||||||
/// Protect data for the timespan specified
|
|
||||||
ProtectFor(TimeSpan),
|
|
||||||
/// Never overwrite data
|
|
||||||
KeepForever,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl std::str::FromStr for RetentionPolicy {
|
|
||||||
type Err = Error;
|
|
||||||
|
|
||||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
|
||||||
if s == "overwrite" {
|
|
||||||
return Ok(RetentionPolicy::OverwriteAlways);
|
|
||||||
}
|
|
||||||
if s == "keep" {
|
|
||||||
return Ok(RetentionPolicy::KeepForever);
|
|
||||||
}
|
|
||||||
|
|
||||||
let time_span = s.parse()?;
|
|
||||||
|
|
||||||
Ok(RetentionPolicy::ProtectFor(time_span))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[api(
|
|
||||||
properties: {
|
|
||||||
name: {
|
|
||||||
schema: MEDIA_POOL_NAME_SCHEMA,
|
|
||||||
},
|
|
||||||
allocation: {
|
|
||||||
schema: MEDIA_SET_ALLOCATION_POLICY_SCHEMA,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
retention: {
|
|
||||||
schema: MEDIA_RETENTION_POLICY_SCHEMA,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
template: {
|
|
||||||
schema: MEDIA_SET_NAMING_TEMPLATE_SCHEMA,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
encrypt: {
|
|
||||||
schema: TAPE_ENCRYPTION_KEY_FINGERPRINT_SCHEMA,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
comment: {
|
|
||||||
optional: true,
|
|
||||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
)]
|
|
||||||
#[derive(Serialize, Deserialize, Updater)]
|
|
||||||
/// Media pool configuration
|
|
||||||
pub struct MediaPoolConfig {
|
|
||||||
/// The pool name
|
|
||||||
#[updater(skip)]
|
|
||||||
pub name: String,
|
|
||||||
/// Media Set allocation policy
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub allocation: Option<String>,
|
|
||||||
/// Media retention policy
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub retention: Option<String>,
|
|
||||||
/// Media set naming template (default "%c")
|
|
||||||
///
|
|
||||||
/// The template is UTF8 text, and can include strftime time
|
|
||||||
/// format specifications.
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub template: Option<String>,
|
|
||||||
/// Encryption key fingerprint
|
|
||||||
///
|
|
||||||
/// If set, encrypt all data using the specified key.
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub encrypt: Option<String>,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub comment: Option<String>,
|
|
||||||
}
|
|
@ -1,21 +0,0 @@
|
|||||||
use serde::{Deserialize, Serialize};
|
|
||||||
|
|
||||||
use proxmox_schema::api;
|
|
||||||
|
|
||||||
#[api()]
|
|
||||||
/// Media status
|
|
||||||
#[derive(Debug, PartialEq, Eq, Copy, Clone, Serialize, Deserialize)]
|
|
||||||
#[serde(rename_all = "lowercase")]
|
|
||||||
/// Media Status
|
|
||||||
pub enum MediaStatus {
|
|
||||||
/// Media is ready to be written
|
|
||||||
Writable,
|
|
||||||
/// Media is full (contains data)
|
|
||||||
Full,
|
|
||||||
/// Media is marked as unknown, needs rescan
|
|
||||||
Unknown,
|
|
||||||
/// Media is marked as damaged
|
|
||||||
Damaged,
|
|
||||||
/// Media is marked as retired
|
|
||||||
Retired,
|
|
||||||
}
|
|
@ -1,92 +0,0 @@
|
|||||||
//! Types for tape backup API
|
|
||||||
|
|
||||||
mod device;
|
|
||||||
pub use device::*;
|
|
||||||
|
|
||||||
mod changer;
|
|
||||||
pub use changer::*;
|
|
||||||
|
|
||||||
mod drive;
|
|
||||||
pub use drive::*;
|
|
||||||
|
|
||||||
mod media_pool;
|
|
||||||
pub use media_pool::*;
|
|
||||||
|
|
||||||
mod media_status;
|
|
||||||
pub use media_status::*;
|
|
||||||
|
|
||||||
mod media_location;
|
|
||||||
|
|
||||||
pub use media_location::*;
|
|
||||||
|
|
||||||
mod media;
|
|
||||||
pub use media::*;
|
|
||||||
|
|
||||||
use const_format::concatcp;
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
|
|
||||||
use proxmox_schema::{api, const_regex, ApiStringFormat, Schema, StringSchema};
|
|
||||||
use proxmox_uuid::Uuid;
|
|
||||||
|
|
||||||
use crate::{
|
|
||||||
BackupType, BACKUP_ID_SCHEMA, BACKUP_NS_PATH_RE, FINGERPRINT_SHA256_FORMAT,
|
|
||||||
PROXMOX_SAFE_ID_REGEX_STR, SNAPSHOT_PATH_REGEX_STR,
|
|
||||||
};
|
|
||||||
|
|
||||||
const_regex! {
|
|
||||||
pub TAPE_RESTORE_SNAPSHOT_REGEX = concatcp!(r"^", PROXMOX_SAFE_ID_REGEX_STR, r":(?:", BACKUP_NS_PATH_RE,")?", SNAPSHOT_PATH_REGEX_STR, r"$");
|
|
||||||
}
|
|
||||||
|
|
||||||
pub const TAPE_RESTORE_SNAPSHOT_FORMAT: ApiStringFormat =
|
|
||||||
ApiStringFormat::Pattern(&TAPE_RESTORE_SNAPSHOT_REGEX);
|
|
||||||
|
|
||||||
pub const TAPE_ENCRYPTION_KEY_FINGERPRINT_SCHEMA: Schema =
|
|
||||||
StringSchema::new("Tape encryption key fingerprint (sha256).")
|
|
||||||
.format(&FINGERPRINT_SHA256_FORMAT)
|
|
||||||
.schema();
|
|
||||||
|
|
||||||
pub const TAPE_RESTORE_SNAPSHOT_SCHEMA: Schema =
|
|
||||||
StringSchema::new("A snapshot in the format: 'store:[ns/namespace/...]type/id/time")
|
|
||||||
.format(&TAPE_RESTORE_SNAPSHOT_FORMAT)
|
|
||||||
.type_text("store:[ns/namespace/...]type/id/time")
|
|
||||||
.schema();
|
|
||||||
|
|
||||||
#[api(
|
|
||||||
properties: {
|
|
||||||
pool: {
|
|
||||||
schema: MEDIA_POOL_NAME_SCHEMA,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
"label-text": {
|
|
||||||
schema: MEDIA_LABEL_SCHEMA,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
"media": {
|
|
||||||
schema: MEDIA_UUID_SCHEMA,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
"media-set": {
|
|
||||||
schema: MEDIA_SET_UUID_SCHEMA,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
"backup-type": {
|
|
||||||
type: BackupType,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
"backup-id": {
|
|
||||||
schema: BACKUP_ID_SCHEMA,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
)]
|
|
||||||
#[derive(Serialize, Deserialize)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
/// Content list filter parameters
|
|
||||||
pub struct MediaContentListFilter {
|
|
||||||
pub pool: Option<String>,
|
|
||||||
pub label_text: Option<String>,
|
|
||||||
pub media: Option<Uuid>,
|
|
||||||
pub media_set: Option<Uuid>,
|
|
||||||
pub backup_type: Option<BackupType>,
|
|
||||||
pub backup_id: Option<String>,
|
|
||||||
}
|
|
@ -1,141 +0,0 @@
|
|||||||
use serde::{Deserialize, Serialize};
|
|
||||||
|
|
||||||
use proxmox_human_byte::HumanByte;
|
|
||||||
use proxmox_schema::{api, IntegerSchema, Schema, StringSchema, Updater};
|
|
||||||
|
|
||||||
use crate::{
|
|
||||||
CIDR_SCHEMA, DAILY_DURATION_FORMAT, PROXMOX_SAFE_ID_FORMAT, SINGLE_LINE_COMMENT_SCHEMA,
|
|
||||||
};
|
|
||||||
|
|
||||||
pub const TRAFFIC_CONTROL_TIMEFRAME_SCHEMA: Schema =
|
|
||||||
StringSchema::new("Timeframe to specify when the rule is active.")
|
|
||||||
.format(&DAILY_DURATION_FORMAT)
|
|
||||||
.schema();
|
|
||||||
|
|
||||||
pub const TRAFFIC_CONTROL_ID_SCHEMA: Schema = StringSchema::new("Rule ID.")
|
|
||||||
.format(&PROXMOX_SAFE_ID_FORMAT)
|
|
||||||
.min_length(3)
|
|
||||||
.max_length(32)
|
|
||||||
.schema();
|
|
||||||
|
|
||||||
pub const TRAFFIC_CONTROL_RATE_SCHEMA: Schema =
|
|
||||||
IntegerSchema::new("Rate limit (for Token bucket filter) in bytes/second.")
|
|
||||||
.minimum(100_000)
|
|
||||||
.schema();
|
|
||||||
|
|
||||||
pub const TRAFFIC_CONTROL_BURST_SCHEMA: Schema =
|
|
||||||
IntegerSchema::new("Size of the token bucket (for Token bucket filter) in bytes.")
|
|
||||||
.minimum(1000)
|
|
||||||
.schema();
|
|
||||||
|
|
||||||
#[api(
|
|
||||||
properties: {
|
|
||||||
"rate-in": {
|
|
||||||
type: HumanByte,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
"burst-in": {
|
|
||||||
type: HumanByte,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
"rate-out": {
|
|
||||||
type: HumanByte,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
"burst-out": {
|
|
||||||
type: HumanByte,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
)]
|
|
||||||
#[derive(Serialize, Deserialize, Default, Clone, Updater, PartialEq)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
/// Rate Limit Configuration
|
|
||||||
pub struct RateLimitConfig {
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub rate_in: Option<HumanByte>,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub burst_in: Option<HumanByte>,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub rate_out: Option<HumanByte>,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub burst_out: Option<HumanByte>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl RateLimitConfig {
|
|
||||||
pub fn with_same_inout(rate: Option<HumanByte>, burst: Option<HumanByte>) -> Self {
|
|
||||||
Self {
|
|
||||||
rate_in: rate,
|
|
||||||
burst_in: burst,
|
|
||||||
rate_out: rate,
|
|
||||||
burst_out: burst,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[api(
|
|
||||||
properties: {
|
|
||||||
name: {
|
|
||||||
schema: TRAFFIC_CONTROL_ID_SCHEMA,
|
|
||||||
},
|
|
||||||
comment: {
|
|
||||||
optional: true,
|
|
||||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
|
||||||
},
|
|
||||||
limit: {
|
|
||||||
type: RateLimitConfig,
|
|
||||||
},
|
|
||||||
network: {
|
|
||||||
type: Array,
|
|
||||||
items: {
|
|
||||||
schema: CIDR_SCHEMA,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
timeframe: {
|
|
||||||
type: Array,
|
|
||||||
items: {
|
|
||||||
schema: TRAFFIC_CONTROL_TIMEFRAME_SCHEMA,
|
|
||||||
},
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
)]
|
|
||||||
#[derive(Clone, Serialize, Deserialize, PartialEq, Updater)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
/// Traffic control rule
|
|
||||||
pub struct TrafficControlRule {
|
|
||||||
#[updater(skip)]
|
|
||||||
pub name: String,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub comment: Option<String>,
|
|
||||||
/// Rule applies to Source IPs within this networks
|
|
||||||
pub network: Vec<String>,
|
|
||||||
#[serde(flatten)]
|
|
||||||
pub limit: RateLimitConfig,
|
|
||||||
// fixme: expose this?
|
|
||||||
// /// Bandwidth is shared across all connections
|
|
||||||
// #[serde(skip_serializing_if="Option::is_none")]
|
|
||||||
// pub shared: Option<bool>,
|
|
||||||
/// Enable the rule at specific times
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub timeframe: Option<Vec<String>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[api(
|
|
||||||
properties: {
|
|
||||||
config: {
|
|
||||||
type: TrafficControlRule,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
)]
|
|
||||||
#[derive(Clone, Serialize, Deserialize, PartialEq)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
/// Traffic control rule config with current rates
|
|
||||||
pub struct TrafficControlCurrentRate {
|
|
||||||
#[serde(flatten)]
|
|
||||||
pub config: TrafficControlRule,
|
|
||||||
/// Current ingress rate in bytes/second
|
|
||||||
pub cur_rate_in: u64,
|
|
||||||
/// Current egress rate in bytes/second
|
|
||||||
pub cur_rate_out: u64,
|
|
||||||
}
|
|
@ -1,226 +0,0 @@
|
|||||||
use serde::{Deserialize, Serialize};
|
|
||||||
|
|
||||||
use proxmox_schema::{api, BooleanSchema, IntegerSchema, Schema, StringSchema, Updater};
|
|
||||||
|
|
||||||
use super::userid::{Authid, Userid, PROXMOX_TOKEN_ID_SCHEMA};
|
|
||||||
use super::{SINGLE_LINE_COMMENT_FORMAT, SINGLE_LINE_COMMENT_SCHEMA};
|
|
||||||
|
|
||||||
pub const ENABLE_USER_SCHEMA: Schema = BooleanSchema::new(
|
|
||||||
"Enable the account (default). You can set this to '0' to disable the account.",
|
|
||||||
)
|
|
||||||
.default(true)
|
|
||||||
.schema();
|
|
||||||
|
|
||||||
pub const EXPIRE_USER_SCHEMA: Schema = IntegerSchema::new(
|
|
||||||
"Account expiration date (seconds since epoch). '0' means no expiration date.",
|
|
||||||
)
|
|
||||||
.default(0)
|
|
||||||
.minimum(0)
|
|
||||||
.schema();
|
|
||||||
|
|
||||||
pub const FIRST_NAME_SCHEMA: Schema = StringSchema::new("First name.")
|
|
||||||
.format(&SINGLE_LINE_COMMENT_FORMAT)
|
|
||||||
.min_length(2)
|
|
||||||
.max_length(64)
|
|
||||||
.schema();
|
|
||||||
|
|
||||||
pub const LAST_NAME_SCHEMA: Schema = StringSchema::new("Last name.")
|
|
||||||
.format(&SINGLE_LINE_COMMENT_FORMAT)
|
|
||||||
.min_length(2)
|
|
||||||
.max_length(64)
|
|
||||||
.schema();
|
|
||||||
|
|
||||||
pub const EMAIL_SCHEMA: Schema = StringSchema::new("E-Mail Address.")
|
|
||||||
.format(&SINGLE_LINE_COMMENT_FORMAT)
|
|
||||||
.min_length(2)
|
|
||||||
.max_length(64)
|
|
||||||
.schema();
|
|
||||||
|
|
||||||
#[api(
|
|
||||||
properties: {
|
|
||||||
userid: {
|
|
||||||
type: Userid,
|
|
||||||
},
|
|
||||||
comment: {
|
|
||||||
optional: true,
|
|
||||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
|
||||||
},
|
|
||||||
enable: {
|
|
||||||
optional: true,
|
|
||||||
schema: ENABLE_USER_SCHEMA,
|
|
||||||
},
|
|
||||||
expire: {
|
|
||||||
optional: true,
|
|
||||||
schema: EXPIRE_USER_SCHEMA,
|
|
||||||
},
|
|
||||||
firstname: {
|
|
||||||
optional: true,
|
|
||||||
schema: FIRST_NAME_SCHEMA,
|
|
||||||
},
|
|
||||||
lastname: {
|
|
||||||
schema: LAST_NAME_SCHEMA,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
email: {
|
|
||||||
schema: EMAIL_SCHEMA,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
tokens: {
|
|
||||||
type: Array,
|
|
||||||
optional: true,
|
|
||||||
description: "List of user's API tokens.",
|
|
||||||
items: {
|
|
||||||
type: ApiToken
|
|
||||||
},
|
|
||||||
},
|
|
||||||
"totp-locked": {
|
|
||||||
type: bool,
|
|
||||||
optional: true,
|
|
||||||
default: false,
|
|
||||||
description: "True if the user is currently locked out of TOTP factors",
|
|
||||||
},
|
|
||||||
"tfa-locked-until": {
|
|
||||||
optional: true,
|
|
||||||
description: "Contains a timestamp until when a user is locked out of 2nd factors",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
)]
|
|
||||||
#[derive(Serialize, Deserialize, Clone, PartialEq)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
/// User properties with added list of ApiTokens
|
|
||||||
pub struct UserWithTokens {
|
|
||||||
pub userid: Userid,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub comment: Option<String>,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub enable: Option<bool>,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub expire: Option<i64>,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub firstname: Option<String>,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub lastname: Option<String>,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub email: Option<String>,
|
|
||||||
#[serde(skip_serializing_if = "Vec::is_empty", default)]
|
|
||||||
pub tokens: Vec<ApiToken>,
|
|
||||||
#[serde(skip_serializing_if = "bool_is_false", default)]
|
|
||||||
pub totp_locked: bool,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub tfa_locked_until: Option<i64>,
|
|
||||||
}
|
|
||||||
|
|
||||||
fn bool_is_false(b: &bool) -> bool {
|
|
||||||
!b
|
|
||||||
}
|
|
||||||
|
|
||||||
#[api(
|
|
||||||
properties: {
|
|
||||||
tokenid: {
|
|
||||||
schema: PROXMOX_TOKEN_ID_SCHEMA,
|
|
||||||
},
|
|
||||||
comment: {
|
|
||||||
optional: true,
|
|
||||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
|
||||||
},
|
|
||||||
enable: {
|
|
||||||
optional: true,
|
|
||||||
schema: ENABLE_USER_SCHEMA,
|
|
||||||
},
|
|
||||||
expire: {
|
|
||||||
optional: true,
|
|
||||||
schema: EXPIRE_USER_SCHEMA,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
)]
|
|
||||||
#[derive(Serialize, Deserialize, Clone, PartialEq)]
|
|
||||||
/// ApiToken properties.
|
|
||||||
pub struct ApiToken {
|
|
||||||
pub tokenid: Authid,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub comment: Option<String>,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub enable: Option<bool>,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub expire: Option<i64>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ApiToken {
|
|
||||||
pub fn is_active(&self) -> bool {
|
|
||||||
if !self.enable.unwrap_or(true) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
if let Some(expire) = self.expire {
|
|
||||||
let now = proxmox_time::epoch_i64();
|
|
||||||
if expire > 0 && expire <= now {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[api(
|
|
||||||
properties: {
|
|
||||||
userid: {
|
|
||||||
type: Userid,
|
|
||||||
},
|
|
||||||
comment: {
|
|
||||||
optional: true,
|
|
||||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
|
||||||
},
|
|
||||||
enable: {
|
|
||||||
optional: true,
|
|
||||||
schema: ENABLE_USER_SCHEMA,
|
|
||||||
},
|
|
||||||
expire: {
|
|
||||||
optional: true,
|
|
||||||
schema: EXPIRE_USER_SCHEMA,
|
|
||||||
},
|
|
||||||
firstname: {
|
|
||||||
optional: true,
|
|
||||||
schema: FIRST_NAME_SCHEMA,
|
|
||||||
},
|
|
||||||
lastname: {
|
|
||||||
schema: LAST_NAME_SCHEMA,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
email: {
|
|
||||||
schema: EMAIL_SCHEMA,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
)]
|
|
||||||
#[derive(Serialize, Deserialize, Updater, PartialEq, Eq)]
|
|
||||||
/// User properties.
|
|
||||||
pub struct User {
|
|
||||||
#[updater(skip)]
|
|
||||||
pub userid: Userid,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub comment: Option<String>,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub enable: Option<bool>,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub expire: Option<i64>,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub firstname: Option<String>,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub lastname: Option<String>,
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
|
||||||
pub email: Option<String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl User {
|
|
||||||
pub fn is_active(&self) -> bool {
|
|
||||||
if !self.enable.unwrap_or(true) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
if let Some(expire) = self.expire {
|
|
||||||
let now = proxmox_time::epoch_i64();
|
|
||||||
if expire > 0 && expire <= now {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
true
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,78 +0,0 @@
|
|||||||
use serde::{Deserialize, Serialize};
|
|
||||||
|
|
||||||
use proxmox_schema::*;
|
|
||||||
|
|
||||||
const_regex! {
|
|
||||||
pub ZPOOL_NAME_REGEX = r"^[a-zA-Z][a-z0-9A-Z\-_.:]+$";
|
|
||||||
}
|
|
||||||
|
|
||||||
pub const ZFS_ASHIFT_SCHEMA: Schema = IntegerSchema::new("Pool sector size exponent.")
|
|
||||||
.minimum(9)
|
|
||||||
.maximum(16)
|
|
||||||
.default(12)
|
|
||||||
.schema();
|
|
||||||
|
|
||||||
pub const ZPOOL_NAME_SCHEMA: Schema = StringSchema::new("ZFS Pool Name")
|
|
||||||
.format(&ApiStringFormat::Pattern(&ZPOOL_NAME_REGEX))
|
|
||||||
.schema();
|
|
||||||
|
|
||||||
#[api(default: "On")]
|
|
||||||
#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
|
||||||
#[serde(rename_all = "lowercase")]
|
|
||||||
/// The ZFS compression algorithm to use.
|
|
||||||
pub enum ZfsCompressionType {
|
|
||||||
/// Gnu Zip
|
|
||||||
Gzip,
|
|
||||||
/// LZ4
|
|
||||||
Lz4,
|
|
||||||
/// LZJB
|
|
||||||
Lzjb,
|
|
||||||
/// ZLE
|
|
||||||
Zle,
|
|
||||||
/// ZStd
|
|
||||||
ZStd,
|
|
||||||
/// Enable compression using the default algorithm.
|
|
||||||
On,
|
|
||||||
/// Disable compression.
|
|
||||||
Off,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[api()]
|
|
||||||
#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
|
||||||
#[serde(rename_all = "lowercase")]
|
|
||||||
/// The ZFS RAID level to use.
|
|
||||||
pub enum ZfsRaidLevel {
|
|
||||||
/// Single Disk
|
|
||||||
Single,
|
|
||||||
/// Mirror
|
|
||||||
Mirror,
|
|
||||||
/// Raid10
|
|
||||||
Raid10,
|
|
||||||
/// RaidZ
|
|
||||||
RaidZ,
|
|
||||||
/// RaidZ2
|
|
||||||
RaidZ2,
|
|
||||||
/// RaidZ3
|
|
||||||
RaidZ3,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[api()]
|
|
||||||
#[derive(Debug, Serialize, Deserialize)]
|
|
||||||
#[serde(rename_all = "kebab-case")]
|
|
||||||
/// zpool list item
|
|
||||||
pub struct ZpoolListItem {
|
|
||||||
/// zpool name
|
|
||||||
pub name: String,
|
|
||||||
/// Health
|
|
||||||
pub health: String,
|
|
||||||
/// Total size
|
|
||||||
pub size: u64,
|
|
||||||
/// Used size
|
|
||||||
pub alloc: u64,
|
|
||||||
/// Free space
|
|
||||||
pub free: u64,
|
|
||||||
/// ZFS fragnentation level
|
|
||||||
pub frag: u64,
|
|
||||||
/// ZFS deduplication ratio
|
|
||||||
pub dedup: f64,
|
|
||||||
}
|
|
@ -1,76 +0,0 @@
|
|||||||
use pbs_api_types::{BackupGroup, BackupType, GroupFilter};
|
|
||||||
use std::str::FromStr;
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_no_filters() {
|
|
||||||
let group_filters = vec![];
|
|
||||||
|
|
||||||
let do_backup = [
|
|
||||||
"vm/101", "vm/102", "vm/103", "vm/104", "vm/105", "vm/106", "vm/107", "vm/108", "vm/109",
|
|
||||||
];
|
|
||||||
|
|
||||||
for id in do_backup {
|
|
||||||
assert!(BackupGroup::new(BackupType::Vm, id).apply_filters(&group_filters));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_include_filters() {
|
|
||||||
let group_filters = vec![GroupFilter::from_str("regex:.*10[2-8]").unwrap()];
|
|
||||||
|
|
||||||
let do_backup = [
|
|
||||||
"vm/102", "vm/103", "vm/104", "vm/105", "vm/106", "vm/107", "vm/108",
|
|
||||||
];
|
|
||||||
|
|
||||||
let dont_backup = ["vm/101", "vm/109"];
|
|
||||||
|
|
||||||
for id in do_backup {
|
|
||||||
assert!(BackupGroup::new(BackupType::Vm, id).apply_filters(&group_filters));
|
|
||||||
}
|
|
||||||
|
|
||||||
for id in dont_backup {
|
|
||||||
assert!(!BackupGroup::new(BackupType::Vm, id).apply_filters(&group_filters));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_exclude_filters() {
|
|
||||||
let group_filters = [
|
|
||||||
GroupFilter::from_str("exclude:regex:.*10[1-3]").unwrap(),
|
|
||||||
GroupFilter::from_str("exclude:regex:.*10[5-7]").unwrap(),
|
|
||||||
];
|
|
||||||
|
|
||||||
let do_backup = ["vm/104", "vm/108", "vm/109"];
|
|
||||||
|
|
||||||
let dont_backup = ["vm/101", "vm/102", "vm/103", "vm/105", "vm/106", "vm/107"];
|
|
||||||
|
|
||||||
for id in do_backup {
|
|
||||||
assert!(BackupGroup::new(BackupType::Vm, id).apply_filters(&group_filters));
|
|
||||||
}
|
|
||||||
for id in dont_backup {
|
|
||||||
assert!(!BackupGroup::new(BackupType::Vm, id).apply_filters(&group_filters));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_include_and_exclude_filters() {
|
|
||||||
let group_filters = [
|
|
||||||
GroupFilter::from_str("exclude:regex:.*10[1-3]").unwrap(),
|
|
||||||
GroupFilter::from_str("regex:.*10[2-8]").unwrap(),
|
|
||||||
GroupFilter::from_str("exclude:regex:.*10[5-7]").unwrap(),
|
|
||||||
];
|
|
||||||
|
|
||||||
let do_backup = ["vm/104", "vm/108"];
|
|
||||||
|
|
||||||
let dont_backup = [
|
|
||||||
"vm/101", "vm/102", "vm/103", "vm/105", "vm/106", "vm/107", "vm/109",
|
|
||||||
];
|
|
||||||
|
|
||||||
for id in do_backup {
|
|
||||||
assert!(BackupGroup::new(BackupType::Vm, id).apply_filters(&group_filters));
|
|
||||||
}
|
|
||||||
|
|
||||||
for id in dont_backup {
|
|
||||||
assert!(!BackupGroup::new(BackupType::Vm, id).apply_filters(&group_filters));
|
|
||||||
}
|
|
||||||
}
|
|
@ -98,6 +98,8 @@ pub const PROXMOX_BACKUP_KERNEL_FN: &str =
|
|||||||
|
|
||||||
pub const PROXMOX_BACKUP_SUBSCRIPTION_FN: &str = configdir!("/subscription");
|
pub const PROXMOX_BACKUP_SUBSCRIPTION_FN: &str = configdir!("/subscription");
|
||||||
|
|
||||||
|
pub const APT_PKG_STATE_FN: &str = concat!(PROXMOX_BACKUP_STATE_DIR_M!(), "/pkg-state.json");
|
||||||
|
|
||||||
/// Prepend configuration directory to a file name
|
/// Prepend configuration directory to a file name
|
||||||
///
|
///
|
||||||
/// This is a simply way to get the full path for configuration files.
|
/// This is a simply way to get the full path for configuration files.
|
||||||
|
@ -12,11 +12,8 @@ bytes.workspace = true
|
|||||||
futures.workspace = true
|
futures.workspace = true
|
||||||
h2.workspace = true
|
h2.workspace = true
|
||||||
hex.workspace = true
|
hex.workspace = true
|
||||||
http.workspace = true
|
|
||||||
hyper.workspace = true
|
hyper.workspace = true
|
||||||
lazy_static.workspace = true
|
|
||||||
libc.workspace = true
|
libc.workspace = true
|
||||||
log.workspace = true
|
|
||||||
nix.workspace = true
|
nix.workspace = true
|
||||||
openssl.workspace = true
|
openssl.workspace = true
|
||||||
percent-encoding.workspace = true
|
percent-encoding.workspace = true
|
||||||
@ -30,6 +27,7 @@ tokio = { workspace = true, features = [ "fs", "signal" ] }
|
|||||||
tokio-stream.workspace = true
|
tokio-stream.workspace = true
|
||||||
tower-service.workspace = true
|
tower-service.workspace = true
|
||||||
xdg.workspace = true
|
xdg.workspace = true
|
||||||
|
hickory-resolver.workspace = true
|
||||||
|
|
||||||
pathpatterns.workspace = true
|
pathpatterns.workspace = true
|
||||||
|
|
||||||
@ -39,6 +37,7 @@ proxmox-compression.workspace = true
|
|||||||
proxmox-http = { workspace = true, features = [ "rate-limiter" ] }
|
proxmox-http = { workspace = true, features = [ "rate-limiter" ] }
|
||||||
proxmox-human-byte.workspace = true
|
proxmox-human-byte.workspace = true
|
||||||
proxmox-io = { workspace = true, features = [ "tokio" ] }
|
proxmox-io = { workspace = true, features = [ "tokio" ] }
|
||||||
|
proxmox-log = { workspace = true }
|
||||||
proxmox-router = { workspace = true, features = [ "cli", "server" ] }
|
proxmox-router = { workspace = true, features = [ "cli", "server" ] }
|
||||||
proxmox-schema.workspace = true
|
proxmox-schema.workspace = true
|
||||||
proxmox-sys.workspace = true
|
proxmox-sys.workspace = true
|
||||||
@ -47,6 +46,5 @@ proxmox-time.workspace = true
|
|||||||
pxar.workspace = true
|
pxar.workspace = true
|
||||||
|
|
||||||
pbs-api-types.workspace = true
|
pbs-api-types.workspace = true
|
||||||
pbs-buildcfg.workspace = true
|
|
||||||
pbs-datastore.workspace = true
|
pbs-datastore.workspace = true
|
||||||
pbs-tools.workspace = true
|
pbs-tools.workspace = true
|
||||||
|
@ -1,19 +1,17 @@
|
|||||||
use anyhow::{format_err, Error};
|
use anyhow::{format_err, Error};
|
||||||
use std::fs::File;
|
use std::fs::File;
|
||||||
use std::io::{Seek, SeekFrom, Write};
|
use std::io::{Seek, SeekFrom, Write};
|
||||||
use std::os::unix::fs::OpenOptionsExt;
|
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use futures::future::AbortHandle;
|
use futures::future::AbortHandle;
|
||||||
use serde_json::{json, Value};
|
use serde_json::{json, Value};
|
||||||
|
|
||||||
use pbs_api_types::{BackupDir, BackupNamespace};
|
use pbs_api_types::{BackupArchiveName, BackupDir, BackupNamespace, MANIFEST_BLOB_NAME};
|
||||||
use pbs_datastore::data_blob::DataBlob;
|
use pbs_datastore::data_blob::DataBlob;
|
||||||
use pbs_datastore::data_blob_reader::DataBlobReader;
|
use pbs_datastore::data_blob_reader::DataBlobReader;
|
||||||
use pbs_datastore::dynamic_index::DynamicIndexReader;
|
use pbs_datastore::dynamic_index::DynamicIndexReader;
|
||||||
use pbs_datastore::fixed_index::FixedIndexReader;
|
use pbs_datastore::fixed_index::FixedIndexReader;
|
||||||
use pbs_datastore::index::IndexFile;
|
use pbs_datastore::index::IndexFile;
|
||||||
use pbs_datastore::manifest::MANIFEST_BLOB_NAME;
|
|
||||||
use pbs_datastore::{BackupManifest, PROXMOX_BACKUP_READER_PROTOCOL_ID_V1};
|
use pbs_datastore::{BackupManifest, PROXMOX_BACKUP_READER_PROTOCOL_ID_V1};
|
||||||
use pbs_tools::crypt_config::CryptConfig;
|
use pbs_tools::crypt_config::CryptConfig;
|
||||||
use pbs_tools::sha::sha256;
|
use pbs_tools::sha::sha256;
|
||||||
@ -128,7 +126,8 @@ impl BackupReader {
|
|||||||
/// The manifest signature is verified if we have a crypt_config.
|
/// The manifest signature is verified if we have a crypt_config.
|
||||||
pub async fn download_manifest(&self) -> Result<(BackupManifest, Vec<u8>), Error> {
|
pub async fn download_manifest(&self) -> Result<(BackupManifest, Vec<u8>), Error> {
|
||||||
let mut raw_data = Vec::with_capacity(64 * 1024);
|
let mut raw_data = Vec::with_capacity(64 * 1024);
|
||||||
self.download(MANIFEST_BLOB_NAME, &mut raw_data).await?;
|
self.download(MANIFEST_BLOB_NAME.as_ref(), &mut raw_data)
|
||||||
|
.await?;
|
||||||
let blob = DataBlob::load_from_reader(&mut &raw_data[..])?;
|
let blob = DataBlob::load_from_reader(&mut &raw_data[..])?;
|
||||||
// no expected digest available
|
// no expected digest available
|
||||||
let data = blob.decode(None, None)?;
|
let data = blob.decode(None, None)?;
|
||||||
@ -141,20 +140,16 @@ impl BackupReader {
|
|||||||
|
|
||||||
/// Download a .blob file
|
/// Download a .blob file
|
||||||
///
|
///
|
||||||
/// This creates a temporary file in /tmp (using O_TMPFILE). The data is verified using
|
/// This creates a temporary file (See [`crate::tools::create_tmp_file`] for
|
||||||
/// the provided manifest.
|
/// details). The data is verified using the provided manifest.
|
||||||
pub async fn download_blob(
|
pub async fn download_blob(
|
||||||
&self,
|
&self,
|
||||||
manifest: &BackupManifest,
|
manifest: &BackupManifest,
|
||||||
name: &str,
|
name: &BackupArchiveName,
|
||||||
) -> Result<DataBlobReader<'_, File>, Error> {
|
) -> Result<DataBlobReader<'_, File>, Error> {
|
||||||
let mut tmpfile = std::fs::OpenOptions::new()
|
let mut tmpfile = crate::tools::create_tmp_file()?;
|
||||||
.write(true)
|
|
||||||
.read(true)
|
|
||||||
.custom_flags(libc::O_TMPFILE)
|
|
||||||
.open("/tmp")?;
|
|
||||||
|
|
||||||
self.download(name, &mut tmpfile).await?;
|
self.download(name.as_ref(), &mut tmpfile).await?;
|
||||||
|
|
||||||
tmpfile.seek(SeekFrom::Start(0))?;
|
tmpfile.seek(SeekFrom::Start(0))?;
|
||||||
let (csum, size) = sha256(&mut tmpfile)?;
|
let (csum, size) = sha256(&mut tmpfile)?;
|
||||||
@ -167,20 +162,16 @@ impl BackupReader {
|
|||||||
|
|
||||||
/// Download dynamic index file
|
/// Download dynamic index file
|
||||||
///
|
///
|
||||||
/// This creates a temporary file in /tmp (using O_TMPFILE). The index is verified using
|
/// This creates a temporary file (See [`crate::tools::create_tmp_file`] for
|
||||||
/// the provided manifest.
|
/// details). The index is verified using the provided manifest.
|
||||||
pub async fn download_dynamic_index(
|
pub async fn download_dynamic_index(
|
||||||
&self,
|
&self,
|
||||||
manifest: &BackupManifest,
|
manifest: &BackupManifest,
|
||||||
name: &str,
|
name: &BackupArchiveName,
|
||||||
) -> Result<DynamicIndexReader, Error> {
|
) -> Result<DynamicIndexReader, Error> {
|
||||||
let mut tmpfile = std::fs::OpenOptions::new()
|
let mut tmpfile = crate::tools::create_tmp_file()?;
|
||||||
.write(true)
|
|
||||||
.read(true)
|
|
||||||
.custom_flags(libc::O_TMPFILE)
|
|
||||||
.open("/tmp")?;
|
|
||||||
|
|
||||||
self.download(name, &mut tmpfile).await?;
|
self.download(name.as_ref(), &mut tmpfile).await?;
|
||||||
|
|
||||||
let index = DynamicIndexReader::new(tmpfile)
|
let index = DynamicIndexReader::new(tmpfile)
|
||||||
.map_err(|err| format_err!("unable to read dynamic index '{}' - {}", name, err))?;
|
.map_err(|err| format_err!("unable to read dynamic index '{}' - {}", name, err))?;
|
||||||
@ -194,20 +185,16 @@ impl BackupReader {
|
|||||||
|
|
||||||
/// Download fixed index file
|
/// Download fixed index file
|
||||||
///
|
///
|
||||||
/// This creates a temporary file in /tmp (using O_TMPFILE). The index is verified using
|
/// This creates a temporary file (See [`crate::tools::create_tmp_file`] for
|
||||||
/// the provided manifest.
|
/// details). The index is verified using the provided manifest.
|
||||||
pub async fn download_fixed_index(
|
pub async fn download_fixed_index(
|
||||||
&self,
|
&self,
|
||||||
manifest: &BackupManifest,
|
manifest: &BackupManifest,
|
||||||
name: &str,
|
name: &BackupArchiveName,
|
||||||
) -> Result<FixedIndexReader, Error> {
|
) -> Result<FixedIndexReader, Error> {
|
||||||
let mut tmpfile = std::fs::OpenOptions::new()
|
let mut tmpfile = crate::tools::create_tmp_file()?;
|
||||||
.write(true)
|
|
||||||
.read(true)
|
|
||||||
.custom_flags(libc::O_TMPFILE)
|
|
||||||
.open("/tmp")?;
|
|
||||||
|
|
||||||
self.download(name, &mut tmpfile).await?;
|
self.download(name.as_ref(), &mut tmpfile).await?;
|
||||||
|
|
||||||
let index = FixedIndexReader::new(tmpfile)
|
let index = FixedIndexReader::new(tmpfile)
|
||||||
.map_err(|err| format_err!("unable to read fixed index '{}' - {}", name, err))?;
|
.map_err(|err| format_err!("unable to read fixed index '{}' - {}", name, err))?;
|
||||||
|
@ -7,10 +7,13 @@ const_regex! {
|
|||||||
BACKUPSPEC_REGEX = r"^([a-zA-Z0-9_-]+\.(pxar|img|conf|log)):(.+)$";
|
BACKUPSPEC_REGEX = r"^([a-zA-Z0-9_-]+\.(pxar|img|conf|log)):(.+)$";
|
||||||
}
|
}
|
||||||
|
|
||||||
pub const BACKUP_SOURCE_SCHEMA: Schema =
|
pub const BACKUP_SOURCE_SCHEMA: Schema = StringSchema::new(
|
||||||
StringSchema::new("Backup source specification ([<label>:<path>]).")
|
"Backup source specification ([<archive-name>.<type>:<source-path>]), the \
|
||||||
.format(&ApiStringFormat::Pattern(&BACKUPSPEC_REGEX))
|
'archive-name' must contain alphanumerics, hyphens and underscores only. \
|
||||||
.schema();
|
The 'type' must be either 'pxar', 'img', 'conf' or 'log'.",
|
||||||
|
)
|
||||||
|
.format(&ApiStringFormat::Pattern(&BACKUPSPEC_REGEX))
|
||||||
|
.schema();
|
||||||
|
|
||||||
pub enum BackupSpecificationType {
|
pub enum BackupSpecificationType {
|
||||||
PXAR,
|
PXAR,
|
||||||
@ -35,7 +38,7 @@ pub fn parse_backup_specification(value: &str) -> Result<BackupSpecification, Er
|
|||||||
"img" => BackupSpecificationType::IMAGE,
|
"img" => BackupSpecificationType::IMAGE,
|
||||||
"conf" => BackupSpecificationType::CONFIG,
|
"conf" => BackupSpecificationType::CONFIG,
|
||||||
"log" => BackupSpecificationType::LOGFILE,
|
"log" => BackupSpecificationType::LOGFILE,
|
||||||
_ => bail!("unknown backup source type '{}'", extension),
|
_ => bail!("unknown backup source type '{extension}'"),
|
||||||
};
|
};
|
||||||
return Ok(BackupSpecification {
|
return Ok(BackupSpecification {
|
||||||
archive_name,
|
archive_name,
|
||||||
@ -44,7 +47,7 @@ pub fn parse_backup_specification(value: &str) -> Result<BackupSpecification, Er
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
bail!("unable to parse backup source specification '{}'", value);
|
bail!("unable to parse backup source specification '{value}'");
|
||||||
}
|
}
|
||||||
|
|
||||||
#[api]
|
#[api]
|
||||||
|
119
pbs-client/src/backup_stats.rs
Normal file
@ -0,0 +1,119 @@
|
|||||||
|
//! Implements counters to generate statistics for log outputs during uploads with backup writer
|
||||||
|
|
||||||
|
use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering};
|
||||||
|
use std::sync::Arc;
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
|
use crate::pxar::create::ReusableDynamicEntry;
|
||||||
|
|
||||||
|
/// Basic backup run statistics and archive checksum
|
||||||
|
pub struct BackupStats {
|
||||||
|
pub size: u64,
|
||||||
|
pub csum: [u8; 32],
|
||||||
|
pub duration: Duration,
|
||||||
|
pub chunk_count: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Extended backup run statistics and archive checksum
|
||||||
|
pub(crate) struct UploadStats {
|
||||||
|
pub(crate) chunk_count: usize,
|
||||||
|
pub(crate) chunk_reused: usize,
|
||||||
|
pub(crate) chunk_injected: usize,
|
||||||
|
pub(crate) size: usize,
|
||||||
|
pub(crate) size_reused: usize,
|
||||||
|
pub(crate) size_injected: usize,
|
||||||
|
pub(crate) size_compressed: usize,
|
||||||
|
pub(crate) duration: Duration,
|
||||||
|
pub(crate) csum: [u8; 32],
|
||||||
|
}
|
||||||
|
|
||||||
|
impl UploadStats {
|
||||||
|
/// Convert the upload stats to the more concise [`BackupStats`]
|
||||||
|
#[inline(always)]
|
||||||
|
pub(crate) fn to_backup_stats(&self) -> BackupStats {
|
||||||
|
BackupStats {
|
||||||
|
chunk_count: self.chunk_count as u64,
|
||||||
|
size: self.size as u64,
|
||||||
|
duration: self.duration,
|
||||||
|
csum: self.csum,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Atomic counters for accounting upload stream progress information
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub(crate) struct UploadCounters {
|
||||||
|
injected_chunk_count: Arc<AtomicUsize>,
|
||||||
|
known_chunk_count: Arc<AtomicUsize>,
|
||||||
|
total_chunk_count: Arc<AtomicUsize>,
|
||||||
|
compressed_stream_len: Arc<AtomicU64>,
|
||||||
|
injected_stream_len: Arc<AtomicUsize>,
|
||||||
|
reused_stream_len: Arc<AtomicUsize>,
|
||||||
|
total_stream_len: Arc<AtomicUsize>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl UploadCounters {
|
||||||
|
/// Create and zero init new upload counters
|
||||||
|
pub(crate) fn new() -> Self {
|
||||||
|
Self {
|
||||||
|
total_chunk_count: Arc::new(AtomicUsize::new(0)),
|
||||||
|
injected_chunk_count: Arc::new(AtomicUsize::new(0)),
|
||||||
|
known_chunk_count: Arc::new(AtomicUsize::new(0)),
|
||||||
|
compressed_stream_len: Arc::new(AtomicU64::new(0)),
|
||||||
|
injected_stream_len: Arc::new(AtomicUsize::new(0)),
|
||||||
|
reused_stream_len: Arc::new(AtomicUsize::new(0)),
|
||||||
|
total_stream_len: Arc::new(AtomicUsize::new(0)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[inline(always)]
|
||||||
|
pub(crate) fn add_known_chunk(&mut self, chunk_len: usize) -> usize {
|
||||||
|
self.known_chunk_count.fetch_add(1, Ordering::SeqCst);
|
||||||
|
self.total_chunk_count.fetch_add(1, Ordering::SeqCst);
|
||||||
|
self.reused_stream_len
|
||||||
|
.fetch_add(chunk_len, Ordering::SeqCst);
|
||||||
|
self.total_stream_len.fetch_add(chunk_len, Ordering::SeqCst)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[inline(always)]
|
||||||
|
pub(crate) fn add_new_chunk(&mut self, chunk_len: usize, chunk_raw_size: u64) -> usize {
|
||||||
|
self.total_chunk_count.fetch_add(1, Ordering::SeqCst);
|
||||||
|
self.compressed_stream_len
|
||||||
|
.fetch_add(chunk_raw_size, Ordering::SeqCst);
|
||||||
|
self.total_stream_len.fetch_add(chunk_len, Ordering::SeqCst)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[inline(always)]
|
||||||
|
pub(crate) fn add_injected_chunk(&mut self, chunk: &ReusableDynamicEntry) -> usize {
|
||||||
|
self.total_chunk_count.fetch_add(1, Ordering::SeqCst);
|
||||||
|
self.injected_chunk_count.fetch_add(1, Ordering::SeqCst);
|
||||||
|
|
||||||
|
self.reused_stream_len
|
||||||
|
.fetch_add(chunk.size() as usize, Ordering::SeqCst);
|
||||||
|
self.injected_stream_len
|
||||||
|
.fetch_add(chunk.size() as usize, Ordering::SeqCst);
|
||||||
|
self.total_stream_len
|
||||||
|
.fetch_add(chunk.size() as usize, Ordering::SeqCst)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[inline(always)]
|
||||||
|
pub(crate) fn total_stream_len(&self) -> usize {
|
||||||
|
self.total_stream_len.load(Ordering::SeqCst)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Convert the counters to [`UploadStats`], including given archive checksum and runtime.
|
||||||
|
#[inline(always)]
|
||||||
|
pub(crate) fn to_upload_stats(&self, csum: [u8; 32], duration: Duration) -> UploadStats {
|
||||||
|
UploadStats {
|
||||||
|
chunk_count: self.total_chunk_count.load(Ordering::SeqCst),
|
||||||
|
chunk_reused: self.known_chunk_count.load(Ordering::SeqCst),
|
||||||
|
chunk_injected: self.injected_chunk_count.load(Ordering::SeqCst),
|
||||||
|
size: self.total_stream_len.load(Ordering::SeqCst),
|
||||||
|
size_reused: self.reused_stream_len.load(Ordering::SeqCst),
|
||||||
|
size_injected: self.injected_stream_len.load(Ordering::SeqCst),
|
||||||
|
size_compressed: self.compressed_stream_len.load(Ordering::SeqCst) as usize,
|
||||||
|
duration,
|
||||||
|
csum,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@ -1,28 +1,34 @@
|
|||||||
use std::collections::HashSet;
|
use std::collections::HashSet;
|
||||||
use std::future::Future;
|
use std::future::Future;
|
||||||
use std::os::unix::fs::OpenOptionsExt;
|
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||||
use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering};
|
|
||||||
use std::sync::{Arc, Mutex};
|
use std::sync::{Arc, Mutex};
|
||||||
|
use std::time::Instant;
|
||||||
|
|
||||||
use anyhow::{bail, format_err, Error};
|
use anyhow::{bail, format_err, Error};
|
||||||
use futures::future::{self, AbortHandle, Either, FutureExt, TryFutureExt};
|
use futures::future::{self, AbortHandle, Either, FutureExt, TryFutureExt};
|
||||||
use futures::stream::{Stream, StreamExt, TryStreamExt};
|
use futures::stream::{Stream, StreamExt, TryStreamExt};
|
||||||
|
use openssl::sha::Sha256;
|
||||||
use serde_json::{json, Value};
|
use serde_json::{json, Value};
|
||||||
use tokio::io::AsyncReadExt;
|
use tokio::io::AsyncReadExt;
|
||||||
use tokio::sync::{mpsc, oneshot};
|
use tokio::sync::{mpsc, oneshot};
|
||||||
use tokio_stream::wrappers::ReceiverStream;
|
use tokio_stream::wrappers::ReceiverStream;
|
||||||
|
|
||||||
use pbs_api_types::{BackupDir, BackupNamespace};
|
use pbs_api_types::{
|
||||||
|
ArchiveType, BackupArchiveName, BackupDir, BackupNamespace, CATALOG_NAME, MANIFEST_BLOB_NAME,
|
||||||
|
};
|
||||||
use pbs_datastore::data_blob::{ChunkInfo, DataBlob, DataChunkBuilder};
|
use pbs_datastore::data_blob::{ChunkInfo, DataBlob, DataChunkBuilder};
|
||||||
use pbs_datastore::dynamic_index::DynamicIndexReader;
|
use pbs_datastore::dynamic_index::DynamicIndexReader;
|
||||||
use pbs_datastore::fixed_index::FixedIndexReader;
|
use pbs_datastore::fixed_index::FixedIndexReader;
|
||||||
use pbs_datastore::index::IndexFile;
|
use pbs_datastore::index::IndexFile;
|
||||||
use pbs_datastore::manifest::{ArchiveType, BackupManifest, MANIFEST_BLOB_NAME};
|
use pbs_datastore::manifest::BackupManifest;
|
||||||
use pbs_datastore::{CATALOG_NAME, PROXMOX_BACKUP_PROTOCOL_ID_V1};
|
use pbs_datastore::PROXMOX_BACKUP_PROTOCOL_ID_V1;
|
||||||
use pbs_tools::crypt_config::CryptConfig;
|
use pbs_tools::crypt_config::CryptConfig;
|
||||||
|
|
||||||
use proxmox_human_byte::HumanByte;
|
use proxmox_human_byte::HumanByte;
|
||||||
|
use proxmox_log::{debug, enabled, info, trace, warn, Level};
|
||||||
|
use proxmox_time::TimeSpan;
|
||||||
|
|
||||||
|
use super::backup_stats::{BackupStats, UploadCounters, UploadStats};
|
||||||
use super::inject_reused_chunks::{InjectChunks, InjectReusedChunks, InjectedChunksInfo};
|
use super::inject_reused_chunks::{InjectChunks, InjectReusedChunks, InjectedChunksInfo};
|
||||||
use super::merge_known_chunks::{MergeKnownChunks, MergedChunkInfo};
|
use super::merge_known_chunks::{MergeKnownChunks, MergedChunkInfo};
|
||||||
|
|
||||||
@ -40,11 +46,6 @@ impl Drop for BackupWriter {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct BackupStats {
|
|
||||||
pub size: u64,
|
|
||||||
pub csum: [u8; 32],
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Options for uploading blobs/streams to the server
|
/// Options for uploading blobs/streams to the server
|
||||||
#[derive(Default, Clone)]
|
#[derive(Default, Clone)]
|
||||||
pub struct UploadOptions {
|
pub struct UploadOptions {
|
||||||
@ -54,19 +55,12 @@ pub struct UploadOptions {
|
|||||||
pub fixed_size: Option<u64>,
|
pub fixed_size: Option<u64>,
|
||||||
}
|
}
|
||||||
|
|
||||||
struct UploadStats {
|
struct ChunkUploadResponse {
|
||||||
chunk_count: usize,
|
future: h2::legacy::client::ResponseFuture,
|
||||||
chunk_reused: usize,
|
|
||||||
chunk_injected: usize,
|
|
||||||
size: usize,
|
size: usize,
|
||||||
size_reused: usize,
|
|
||||||
size_injected: usize,
|
|
||||||
size_compressed: usize,
|
|
||||||
duration: std::time::Duration,
|
|
||||||
csum: [u8; 32],
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type UploadQueueSender = mpsc::Sender<(MergedChunkInfo, Option<h2::client::ResponseFuture>)>;
|
type UploadQueueSender = mpsc::Sender<(MergedChunkInfo, Option<ChunkUploadResponse>)>;
|
||||||
type UploadResultReceiver = oneshot::Receiver<Result<(), Error>>;
|
type UploadResultReceiver = oneshot::Receiver<Result<(), Error>>;
|
||||||
|
|
||||||
impl BackupWriter {
|
impl BackupWriter {
|
||||||
@ -149,7 +143,7 @@ impl BackupWriter {
|
|||||||
param: Option<Value>,
|
param: Option<Value>,
|
||||||
content_type: &str,
|
content_type: &str,
|
||||||
data: Vec<u8>,
|
data: Vec<u8>,
|
||||||
) -> Result<h2::client::ResponseFuture, Error> {
|
) -> Result<h2::legacy::client::ResponseFuture, Error> {
|
||||||
let request =
|
let request =
|
||||||
H2Client::request_builder("localhost", method, path, param, Some(content_type))
|
H2Client::request_builder("localhost", method, path, param, Some(content_type))
|
||||||
.unwrap();
|
.unwrap();
|
||||||
@ -189,6 +183,7 @@ impl BackupWriter {
|
|||||||
mut reader: R,
|
mut reader: R,
|
||||||
file_name: &str,
|
file_name: &str,
|
||||||
) -> Result<BackupStats, Error> {
|
) -> Result<BackupStats, Error> {
|
||||||
|
let start_time = Instant::now();
|
||||||
let mut raw_data = Vec::new();
|
let mut raw_data = Vec::new();
|
||||||
// fixme: avoid loading into memory
|
// fixme: avoid loading into memory
|
||||||
reader.read_to_end(&mut raw_data)?;
|
reader.read_to_end(&mut raw_data)?;
|
||||||
@ -206,7 +201,12 @@ impl BackupWriter {
|
|||||||
raw_data,
|
raw_data,
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
Ok(BackupStats { size, csum })
|
Ok(BackupStats {
|
||||||
|
size,
|
||||||
|
csum,
|
||||||
|
duration: start_time.elapsed(),
|
||||||
|
chunk_count: 0,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn upload_blob_from_data(
|
pub async fn upload_blob_from_data(
|
||||||
@ -215,6 +215,7 @@ impl BackupWriter {
|
|||||||
file_name: &str,
|
file_name: &str,
|
||||||
options: UploadOptions,
|
options: UploadOptions,
|
||||||
) -> Result<BackupStats, Error> {
|
) -> Result<BackupStats, Error> {
|
||||||
|
let start_time = Instant::now();
|
||||||
let blob = match (options.encrypt, &self.crypt_config) {
|
let blob = match (options.encrypt, &self.crypt_config) {
|
||||||
(false, _) => DataBlob::encode(&data, None, options.compress)?,
|
(false, _) => DataBlob::encode(&data, None, options.compress)?,
|
||||||
(true, None) => bail!("requested encryption without a crypt config"),
|
(true, None) => bail!("requested encryption without a crypt config"),
|
||||||
@ -238,7 +239,12 @@ impl BackupWriter {
|
|||||||
raw_data,
|
raw_data,
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
Ok(BackupStats { size, csum })
|
Ok(BackupStats {
|
||||||
|
size,
|
||||||
|
csum,
|
||||||
|
duration: start_time.elapsed(),
|
||||||
|
chunk_count: 0,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn upload_blob_from_file<P: AsRef<std::path::Path>>(
|
pub async fn upload_blob_from_file<P: AsRef<std::path::Path>>(
|
||||||
@ -263,9 +269,102 @@ impl BackupWriter {
|
|||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Upload chunks and index
|
||||||
|
pub async fn upload_index_chunk_info(
|
||||||
|
&self,
|
||||||
|
archive_name: &BackupArchiveName,
|
||||||
|
stream: impl Stream<Item = Result<MergedChunkInfo, Error>>,
|
||||||
|
options: UploadOptions,
|
||||||
|
) -> Result<BackupStats, Error> {
|
||||||
|
let mut param = json!({ "archive-name": archive_name });
|
||||||
|
let prefix = if let Some(size) = options.fixed_size {
|
||||||
|
param["size"] = size.into();
|
||||||
|
"fixed"
|
||||||
|
} else {
|
||||||
|
"dynamic"
|
||||||
|
};
|
||||||
|
|
||||||
|
if options.encrypt && self.crypt_config.is_none() {
|
||||||
|
bail!("requested encryption without a crypt config");
|
||||||
|
}
|
||||||
|
|
||||||
|
let wid = self
|
||||||
|
.h2
|
||||||
|
.post(&format!("{prefix}_index"), Some(param))
|
||||||
|
.await?
|
||||||
|
.as_u64()
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let mut counters = UploadCounters::new();
|
||||||
|
let counters_readonly = counters.clone();
|
||||||
|
|
||||||
|
let is_fixed_chunk_size = prefix == "fixed";
|
||||||
|
|
||||||
|
let index_csum = Arc::new(Mutex::new(Some(Sha256::new())));
|
||||||
|
let index_csum_2 = index_csum.clone();
|
||||||
|
|
||||||
|
let stream = stream
|
||||||
|
.and_then(move |mut merged_chunk_info| {
|
||||||
|
match merged_chunk_info {
|
||||||
|
MergedChunkInfo::New(ref chunk_info) => {
|
||||||
|
let chunk_len = chunk_info.chunk_len;
|
||||||
|
let offset =
|
||||||
|
counters.add_new_chunk(chunk_len as usize, chunk_info.chunk.raw_size());
|
||||||
|
let end_offset = offset as u64 + chunk_len;
|
||||||
|
let mut guard = index_csum.lock().unwrap();
|
||||||
|
let csum = guard.as_mut().unwrap();
|
||||||
|
if !is_fixed_chunk_size {
|
||||||
|
csum.update(&end_offset.to_le_bytes());
|
||||||
|
}
|
||||||
|
csum.update(&chunk_info.digest);
|
||||||
|
}
|
||||||
|
MergedChunkInfo::Known(ref mut known_chunk_list) => {
|
||||||
|
for (chunk_len, digest) in known_chunk_list {
|
||||||
|
let offset = counters.add_known_chunk(*chunk_len as usize);
|
||||||
|
let end_offset = offset as u64 + *chunk_len;
|
||||||
|
let mut guard = index_csum.lock().unwrap();
|
||||||
|
let csum = guard.as_mut().unwrap();
|
||||||
|
if !is_fixed_chunk_size {
|
||||||
|
csum.update(&end_offset.to_le_bytes());
|
||||||
|
}
|
||||||
|
csum.update(digest);
|
||||||
|
// Replace size with offset, expected by further stream
|
||||||
|
*chunk_len = offset as u64;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
future::ok(merged_chunk_info)
|
||||||
|
})
|
||||||
|
.merge_known_chunks();
|
||||||
|
|
||||||
|
let upload_stats = Self::upload_merged_chunk_stream(
|
||||||
|
self.h2.clone(),
|
||||||
|
wid,
|
||||||
|
archive_name,
|
||||||
|
prefix,
|
||||||
|
stream,
|
||||||
|
index_csum_2,
|
||||||
|
counters_readonly,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let param = json!({
|
||||||
|
"wid": wid ,
|
||||||
|
"chunk-count": upload_stats.chunk_count,
|
||||||
|
"size": upload_stats.size,
|
||||||
|
"csum": hex::encode(upload_stats.csum),
|
||||||
|
});
|
||||||
|
let _value = self
|
||||||
|
.h2
|
||||||
|
.post(&format!("{prefix}_close"), Some(param))
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
Ok(upload_stats.to_backup_stats())
|
||||||
|
}
|
||||||
|
|
||||||
pub async fn upload_stream(
|
pub async fn upload_stream(
|
||||||
&self,
|
&self,
|
||||||
archive_name: &str,
|
archive_name: &BackupArchiveName,
|
||||||
stream: impl Stream<Item = Result<bytes::BytesMut, Error>>,
|
stream: impl Stream<Item = Result<bytes::BytesMut, Error>>,
|
||||||
options: UploadOptions,
|
options: UploadOptions,
|
||||||
injections: Option<std::sync::mpsc::Receiver<InjectChunks>>,
|
injections: Option<std::sync::mpsc::Receiver<InjectChunks>>,
|
||||||
@ -291,13 +390,13 @@ impl BackupWriter {
|
|||||||
if !manifest
|
if !manifest
|
||||||
.files()
|
.files()
|
||||||
.iter()
|
.iter()
|
||||||
.any(|file| file.filename == archive_name)
|
.any(|file| file.filename == archive_name.as_ref())
|
||||||
{
|
{
|
||||||
log::info!("Previous manifest does not contain an archive called '{archive_name}', skipping download..");
|
info!("Previous manifest does not contain an archive called '{archive_name}', skipping download..");
|
||||||
} else {
|
} else {
|
||||||
// try, but ignore errors
|
// try, but ignore errors
|
||||||
match ArchiveType::from_path(archive_name) {
|
match archive_name.archive_type() {
|
||||||
Ok(ArchiveType::FixedIndex) => {
|
ArchiveType::FixedIndex => {
|
||||||
if let Err(err) = self
|
if let Err(err) = self
|
||||||
.download_previous_fixed_index(
|
.download_previous_fixed_index(
|
||||||
archive_name,
|
archive_name,
|
||||||
@ -306,10 +405,10 @@ impl BackupWriter {
|
|||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
log::warn!("Error downloading .fidx from previous manifest: {}", err);
|
warn!("Error downloading .fidx from previous manifest: {}", err);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Ok(ArchiveType::DynamicIndex) => {
|
ArchiveType::DynamicIndex => {
|
||||||
if let Err(err) = self
|
if let Err(err) = self
|
||||||
.download_previous_dynamic_index(
|
.download_previous_dynamic_index(
|
||||||
archive_name,
|
archive_name,
|
||||||
@ -318,7 +417,7 @@ impl BackupWriter {
|
|||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
log::warn!("Error downloading .didx from previous manifest: {}", err);
|
warn!("Error downloading .didx from previous manifest: {}", err);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
_ => { /* do nothing */ }
|
_ => { /* do nothing */ }
|
||||||
@ -346,61 +445,58 @@ impl BackupWriter {
|
|||||||
},
|
},
|
||||||
options.compress,
|
options.compress,
|
||||||
injections,
|
injections,
|
||||||
|
archive_name,
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
let size_dirty = upload_stats.size - upload_stats.size_reused;
|
let size_dirty = upload_stats.size - upload_stats.size_reused;
|
||||||
let size: HumanByte = upload_stats.size.into();
|
let size: HumanByte = upload_stats.size.into();
|
||||||
let archive = if log::log_enabled!(log::Level::Debug) {
|
let archive = if enabled!(Level::DEBUG) {
|
||||||
archive_name
|
archive_name.to_string()
|
||||||
} else {
|
} else {
|
||||||
pbs_tools::format::strip_server_file_extension(archive_name)
|
archive_name.without_type_extension()
|
||||||
};
|
};
|
||||||
|
|
||||||
if upload_stats.chunk_injected > 0 {
|
if upload_stats.chunk_injected > 0 {
|
||||||
log::info!(
|
info!(
|
||||||
"{archive}: reused {} from previous snapshot for unchanged files ({} chunks)",
|
"{archive}: reused {} from previous snapshot for unchanged files ({} chunks)",
|
||||||
HumanByte::from(upload_stats.size_injected),
|
HumanByte::from(upload_stats.size_injected),
|
||||||
upload_stats.chunk_injected,
|
upload_stats.chunk_injected,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
if archive_name != CATALOG_NAME {
|
if *archive_name != *CATALOG_NAME {
|
||||||
let speed: HumanByte =
|
let speed: HumanByte =
|
||||||
((size_dirty * 1_000_000) / (upload_stats.duration.as_micros() as usize)).into();
|
((size_dirty * 1_000_000) / (upload_stats.duration.as_micros() as usize)).into();
|
||||||
let size_dirty: HumanByte = size_dirty.into();
|
let size_dirty: HumanByte = size_dirty.into();
|
||||||
let size_compressed: HumanByte = upload_stats.size_compressed.into();
|
let size_compressed: HumanByte = upload_stats.size_compressed.into();
|
||||||
log::info!(
|
info!(
|
||||||
"{archive}: had to backup {size_dirty} of {size} (compressed {size_compressed}) in {:.2} s (average {speed}/s)",
|
"{archive}: had to backup {size_dirty} of {size} (compressed {size_compressed}) in {:.2} s (average {speed}/s)",
|
||||||
upload_stats.duration.as_secs_f64()
|
upload_stats.duration.as_secs_f64()
|
||||||
);
|
);
|
||||||
} else {
|
} else {
|
||||||
log::info!("Uploaded backup catalog ({})", size);
|
info!("Uploaded backup catalog ({})", size);
|
||||||
}
|
}
|
||||||
|
|
||||||
if upload_stats.size_reused > 0 && upload_stats.size > 1024 * 1024 {
|
if upload_stats.size_reused > 0 && upload_stats.size > 1024 * 1024 {
|
||||||
let reused_percent = upload_stats.size_reused as f64 * 100. / upload_stats.size as f64;
|
let reused_percent = upload_stats.size_reused as f64 * 100. / upload_stats.size as f64;
|
||||||
let reused: HumanByte = upload_stats.size_reused.into();
|
let reused: HumanByte = upload_stats.size_reused.into();
|
||||||
log::info!(
|
info!(
|
||||||
"{}: backup was done incrementally, reused {} ({:.1}%)",
|
"{}: backup was done incrementally, reused {} ({:.1}%)",
|
||||||
archive,
|
archive, reused, reused_percent
|
||||||
reused,
|
|
||||||
reused_percent
|
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
if log::log_enabled!(log::Level::Debug) && upload_stats.chunk_count > 0 {
|
if enabled!(Level::DEBUG) && upload_stats.chunk_count > 0 {
|
||||||
log::debug!(
|
debug!(
|
||||||
"{}: Reused {} from {} chunks.",
|
"{}: Reused {} from {} chunks.",
|
||||||
archive,
|
archive, upload_stats.chunk_reused, upload_stats.chunk_count
|
||||||
upload_stats.chunk_reused,
|
|
||||||
upload_stats.chunk_count
|
|
||||||
);
|
);
|
||||||
log::debug!(
|
debug!(
|
||||||
"{}: Average chunk size was {}.",
|
"{}: Average chunk size was {}.",
|
||||||
archive,
|
archive,
|
||||||
HumanByte::from(upload_stats.size / upload_stats.chunk_count)
|
HumanByte::from(upload_stats.size / upload_stats.chunk_count)
|
||||||
);
|
);
|
||||||
log::debug!(
|
debug!(
|
||||||
"{}: Average time per request: {} microseconds.",
|
"{}: Average time per request: {} microseconds.",
|
||||||
archive,
|
archive,
|
||||||
(upload_stats.duration.as_micros()) / (upload_stats.chunk_count as u128)
|
(upload_stats.duration.as_micros()) / (upload_stats.chunk_count as u128)
|
||||||
@ -414,14 +510,11 @@ impl BackupWriter {
|
|||||||
"csum": hex::encode(upload_stats.csum),
|
"csum": hex::encode(upload_stats.csum),
|
||||||
});
|
});
|
||||||
let _value = self.h2.post(&close_path, Some(param)).await?;
|
let _value = self.h2.post(&close_path, Some(param)).await?;
|
||||||
Ok(BackupStats {
|
Ok(upload_stats.to_backup_stats())
|
||||||
size: upload_stats.size as u64,
|
|
||||||
csum: upload_stats.csum,
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn response_queue() -> (
|
fn response_queue() -> (
|
||||||
mpsc::Sender<h2::client::ResponseFuture>,
|
mpsc::Sender<h2::legacy::client::ResponseFuture>,
|
||||||
oneshot::Receiver<Result<(), Error>>,
|
oneshot::Receiver<Result<(), Error>>,
|
||||||
) {
|
) {
|
||||||
let (verify_queue_tx, verify_queue_rx) = mpsc::channel(100);
|
let (verify_queue_tx, verify_queue_rx) = mpsc::channel(100);
|
||||||
@ -444,11 +537,11 @@ impl BackupWriter {
|
|||||||
tokio::spawn(
|
tokio::spawn(
|
||||||
ReceiverStream::new(verify_queue_rx)
|
ReceiverStream::new(verify_queue_rx)
|
||||||
.map(Ok::<_, Error>)
|
.map(Ok::<_, Error>)
|
||||||
.try_for_each(move |response: h2::client::ResponseFuture| {
|
.try_for_each(move |response: h2::legacy::client::ResponseFuture| {
|
||||||
response
|
response
|
||||||
.map_err(Error::from)
|
.map_err(Error::from)
|
||||||
.and_then(H2Client::h2api_response)
|
.and_then(H2Client::h2api_response)
|
||||||
.map_ok(move |result| log::debug!("RESPONSE: {:?}", result))
|
.map_ok(move |result| debug!("RESPONSE: {:?}", result))
|
||||||
.map_err(|err| format_err!("pipelined request failed: {}", err))
|
.map_err(|err| format_err!("pipelined request failed: {}", err))
|
||||||
})
|
})
|
||||||
.map(|result| {
|
.map(|result| {
|
||||||
@ -463,6 +556,7 @@ impl BackupWriter {
|
|||||||
h2: H2Client,
|
h2: H2Client,
|
||||||
wid: u64,
|
wid: u64,
|
||||||
path: String,
|
path: String,
|
||||||
|
uploaded: Arc<AtomicUsize>,
|
||||||
) -> (UploadQueueSender, UploadResultReceiver) {
|
) -> (UploadQueueSender, UploadResultReceiver) {
|
||||||
let (verify_queue_tx, verify_queue_rx) = mpsc::channel(64);
|
let (verify_queue_tx, verify_queue_rx) = mpsc::channel(64);
|
||||||
let (verify_result_tx, verify_result_rx) = oneshot::channel();
|
let (verify_result_tx, verify_result_rx) = oneshot::channel();
|
||||||
@ -471,15 +565,21 @@ impl BackupWriter {
|
|||||||
tokio::spawn(
|
tokio::spawn(
|
||||||
ReceiverStream::new(verify_queue_rx)
|
ReceiverStream::new(verify_queue_rx)
|
||||||
.map(Ok::<_, Error>)
|
.map(Ok::<_, Error>)
|
||||||
.and_then(move |(merged_chunk_info, response): (MergedChunkInfo, Option<h2::client::ResponseFuture>)| {
|
.and_then(move |(merged_chunk_info, response): (MergedChunkInfo, Option<ChunkUploadResponse>)| {
|
||||||
match (response, merged_chunk_info) {
|
match (response, merged_chunk_info) {
|
||||||
(Some(response), MergedChunkInfo::Known(list)) => {
|
(Some(response), MergedChunkInfo::Known(list)) => {
|
||||||
Either::Left(
|
Either::Left(
|
||||||
response
|
response
|
||||||
|
.future
|
||||||
.map_err(Error::from)
|
.map_err(Error::from)
|
||||||
.and_then(H2Client::h2api_response)
|
.and_then(H2Client::h2api_response)
|
||||||
.and_then(move |_result| {
|
.and_then({
|
||||||
future::ok(MergedChunkInfo::Known(list))
|
let uploaded = uploaded.clone();
|
||||||
|
move |_result| {
|
||||||
|
// account for uploaded bytes for progress output
|
||||||
|
uploaded.fetch_add(response.size, Ordering::SeqCst);
|
||||||
|
future::ok(MergedChunkInfo::Known(list))
|
||||||
|
}
|
||||||
})
|
})
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
@ -499,7 +599,7 @@ impl BackupWriter {
|
|||||||
digest_list.push(hex::encode(digest));
|
digest_list.push(hex::encode(digest));
|
||||||
offset_list.push(offset);
|
offset_list.push(offset);
|
||||||
}
|
}
|
||||||
log::debug!("append chunks list len ({})", digest_list.len());
|
debug!("append chunks list len ({})", digest_list.len());
|
||||||
let param = json!({ "wid": wid, "digest-list": digest_list, "offset-list": offset_list });
|
let param = json!({ "wid": wid, "digest-list": digest_list, "offset-list": offset_list });
|
||||||
let request = H2Client::request_builder("localhost", "PUT", &path, None, Some("application/json")).unwrap();
|
let request = H2Client::request_builder("localhost", "PUT", &path, None, Some("application/json")).unwrap();
|
||||||
let param_data = bytes::Bytes::from(param.to_string().into_bytes());
|
let param_data = bytes::Bytes::from(param.to_string().into_bytes());
|
||||||
@ -527,15 +627,11 @@ impl BackupWriter {
|
|||||||
|
|
||||||
pub async fn download_previous_fixed_index(
|
pub async fn download_previous_fixed_index(
|
||||||
&self,
|
&self,
|
||||||
archive_name: &str,
|
archive_name: &BackupArchiveName,
|
||||||
manifest: &BackupManifest,
|
manifest: &BackupManifest,
|
||||||
known_chunks: Arc<Mutex<HashSet<[u8; 32]>>>,
|
known_chunks: Arc<Mutex<HashSet<[u8; 32]>>>,
|
||||||
) -> Result<FixedIndexReader, Error> {
|
) -> Result<FixedIndexReader, Error> {
|
||||||
let mut tmpfile = std::fs::OpenOptions::new()
|
let mut tmpfile = crate::tools::create_tmp_file()?;
|
||||||
.write(true)
|
|
||||||
.read(true)
|
|
||||||
.custom_flags(libc::O_TMPFILE)
|
|
||||||
.open("/tmp")?;
|
|
||||||
|
|
||||||
let param = json!({ "archive-name": archive_name });
|
let param = json!({ "archive-name": archive_name });
|
||||||
self.h2
|
self.h2
|
||||||
@ -555,7 +651,7 @@ impl BackupWriter {
|
|||||||
known_chunks.insert(*index.index_digest(i).unwrap());
|
known_chunks.insert(*index.index_digest(i).unwrap());
|
||||||
}
|
}
|
||||||
|
|
||||||
log::debug!(
|
debug!(
|
||||||
"{}: known chunks list length is {}",
|
"{}: known chunks list length is {}",
|
||||||
archive_name,
|
archive_name,
|
||||||
index.index_count()
|
index.index_count()
|
||||||
@ -566,15 +662,11 @@ impl BackupWriter {
|
|||||||
|
|
||||||
pub async fn download_previous_dynamic_index(
|
pub async fn download_previous_dynamic_index(
|
||||||
&self,
|
&self,
|
||||||
archive_name: &str,
|
archive_name: &BackupArchiveName,
|
||||||
manifest: &BackupManifest,
|
manifest: &BackupManifest,
|
||||||
known_chunks: Arc<Mutex<HashSet<[u8; 32]>>>,
|
known_chunks: Arc<Mutex<HashSet<[u8; 32]>>>,
|
||||||
) -> Result<DynamicIndexReader, Error> {
|
) -> Result<DynamicIndexReader, Error> {
|
||||||
let mut tmpfile = std::fs::OpenOptions::new()
|
let mut tmpfile = crate::tools::create_tmp_file()?;
|
||||||
.write(true)
|
|
||||||
.read(true)
|
|
||||||
.custom_flags(libc::O_TMPFILE)
|
|
||||||
.open("/tmp")?;
|
|
||||||
|
|
||||||
let param = json!({ "archive-name": archive_name });
|
let param = json!({ "archive-name": archive_name });
|
||||||
self.h2
|
self.h2
|
||||||
@ -593,7 +685,7 @@ impl BackupWriter {
|
|||||||
known_chunks.insert(*index.index_digest(i).unwrap());
|
known_chunks.insert(*index.index_digest(i).unwrap());
|
||||||
}
|
}
|
||||||
|
|
||||||
log::debug!(
|
debug!(
|
||||||
"{}: known chunks list length is {}",
|
"{}: known chunks list length is {}",
|
||||||
archive_name,
|
archive_name,
|
||||||
index.index_count()
|
index.index_count()
|
||||||
@ -617,7 +709,7 @@ impl BackupWriter {
|
|||||||
pub async fn download_previous_manifest(&self) -> Result<BackupManifest, Error> {
|
pub async fn download_previous_manifest(&self) -> Result<BackupManifest, Error> {
|
||||||
let mut raw_data = Vec::with_capacity(64 * 1024);
|
let mut raw_data = Vec::with_capacity(64 * 1024);
|
||||||
|
|
||||||
let param = json!({ "archive-name": MANIFEST_BLOB_NAME });
|
let param = json!({ "archive-name": MANIFEST_BLOB_NAME.to_string() });
|
||||||
self.h2
|
self.h2
|
||||||
.download("previous", Some(param), &mut raw_data)
|
.download("previous", Some(param), &mut raw_data)
|
||||||
.await?;
|
.await?;
|
||||||
@ -645,52 +737,26 @@ impl BackupWriter {
|
|||||||
crypt_config: Option<Arc<CryptConfig>>,
|
crypt_config: Option<Arc<CryptConfig>>,
|
||||||
compress: bool,
|
compress: bool,
|
||||||
injections: Option<std::sync::mpsc::Receiver<InjectChunks>>,
|
injections: Option<std::sync::mpsc::Receiver<InjectChunks>>,
|
||||||
|
archive: &BackupArchiveName,
|
||||||
) -> impl Future<Output = Result<UploadStats, Error>> {
|
) -> impl Future<Output = Result<UploadStats, Error>> {
|
||||||
let total_chunks = Arc::new(AtomicUsize::new(0));
|
let mut counters = UploadCounters::new();
|
||||||
let total_chunks2 = total_chunks.clone();
|
let counters_readonly = counters.clone();
|
||||||
let known_chunk_count = Arc::new(AtomicUsize::new(0));
|
|
||||||
let known_chunk_count2 = known_chunk_count.clone();
|
|
||||||
let injected_chunk_count = Arc::new(AtomicUsize::new(0));
|
|
||||||
let injected_chunk_count2 = injected_chunk_count.clone();
|
|
||||||
|
|
||||||
let stream_len = Arc::new(AtomicUsize::new(0));
|
|
||||||
let stream_len2 = stream_len.clone();
|
|
||||||
let compressed_stream_len = Arc::new(AtomicU64::new(0));
|
|
||||||
let compressed_stream_len2 = compressed_stream_len.clone();
|
|
||||||
let reused_len = Arc::new(AtomicUsize::new(0));
|
|
||||||
let reused_len2 = reused_len.clone();
|
|
||||||
let injected_len = Arc::new(AtomicUsize::new(0));
|
|
||||||
let injected_len2 = injected_len.clone();
|
|
||||||
|
|
||||||
let append_chunk_path = format!("{}_index", prefix);
|
|
||||||
let upload_chunk_path = format!("{}_chunk", prefix);
|
|
||||||
let is_fixed_chunk_size = prefix == "fixed";
|
let is_fixed_chunk_size = prefix == "fixed";
|
||||||
|
|
||||||
let (upload_queue, upload_result) =
|
|
||||||
Self::append_chunk_queue(h2.clone(), wid, append_chunk_path);
|
|
||||||
|
|
||||||
let start_time = std::time::Instant::now();
|
|
||||||
|
|
||||||
let index_csum = Arc::new(Mutex::new(Some(openssl::sha::Sha256::new())));
|
let index_csum = Arc::new(Mutex::new(Some(openssl::sha::Sha256::new())));
|
||||||
let index_csum_2 = index_csum.clone();
|
let index_csum_2 = index_csum.clone();
|
||||||
|
|
||||||
stream
|
let stream = stream
|
||||||
.inject_reused_chunks(injections, stream_len.clone())
|
.inject_reused_chunks(injections, counters.clone())
|
||||||
.and_then(move |chunk_info| match chunk_info {
|
.and_then(move |chunk_info| match chunk_info {
|
||||||
InjectedChunksInfo::Known(chunks) => {
|
InjectedChunksInfo::Known(chunks) => {
|
||||||
// account for injected chunks
|
// account for injected chunks
|
||||||
let count = chunks.len();
|
|
||||||
total_chunks.fetch_add(count, Ordering::SeqCst);
|
|
||||||
injected_chunk_count.fetch_add(count, Ordering::SeqCst);
|
|
||||||
|
|
||||||
let mut known = Vec::new();
|
let mut known = Vec::new();
|
||||||
let mut guard = index_csum.lock().unwrap();
|
let mut guard = index_csum.lock().unwrap();
|
||||||
let csum = guard.as_mut().unwrap();
|
let csum = guard.as_mut().unwrap();
|
||||||
for chunk in chunks {
|
for chunk in chunks {
|
||||||
let offset =
|
let offset = counters.add_injected_chunk(&chunk) as u64;
|
||||||
stream_len.fetch_add(chunk.size() as usize, Ordering::SeqCst) as u64;
|
|
||||||
reused_len.fetch_add(chunk.size() as usize, Ordering::SeqCst);
|
|
||||||
injected_len.fetch_add(chunk.size() as usize, Ordering::SeqCst);
|
|
||||||
let digest = chunk.digest();
|
let digest = chunk.digest();
|
||||||
known.push((offset, digest));
|
known.push((offset, digest));
|
||||||
let end_offset = offset + chunk.size();
|
let end_offset = offset + chunk.size();
|
||||||
@ -703,9 +769,6 @@ impl BackupWriter {
|
|||||||
// account for not injected chunks (new and known)
|
// account for not injected chunks (new and known)
|
||||||
let chunk_len = data.len();
|
let chunk_len = data.len();
|
||||||
|
|
||||||
total_chunks.fetch_add(1, Ordering::SeqCst);
|
|
||||||
let offset = stream_len.fetch_add(chunk_len, Ordering::SeqCst) as u64;
|
|
||||||
|
|
||||||
let mut chunk_builder = DataChunkBuilder::new(data.as_ref()).compress(compress);
|
let mut chunk_builder = DataChunkBuilder::new(data.as_ref()).compress(compress);
|
||||||
|
|
||||||
if let Some(ref crypt_config) = crypt_config {
|
if let Some(ref crypt_config) = crypt_config {
|
||||||
@ -713,7 +776,29 @@ impl BackupWriter {
|
|||||||
}
|
}
|
||||||
|
|
||||||
let mut known_chunks = known_chunks.lock().unwrap();
|
let mut known_chunks = known_chunks.lock().unwrap();
|
||||||
let digest = chunk_builder.digest();
|
let digest = *chunk_builder.digest();
|
||||||
|
let (offset, res) = if known_chunks.contains(&digest) {
|
||||||
|
let offset = counters.add_known_chunk(chunk_len) as u64;
|
||||||
|
(offset, MergedChunkInfo::Known(vec![(offset, digest)]))
|
||||||
|
} else {
|
||||||
|
match chunk_builder.build() {
|
||||||
|
Ok((chunk, digest)) => {
|
||||||
|
let offset =
|
||||||
|
counters.add_new_chunk(chunk_len, chunk.raw_size()) as u64;
|
||||||
|
known_chunks.insert(digest);
|
||||||
|
(
|
||||||
|
offset,
|
||||||
|
MergedChunkInfo::New(ChunkInfo {
|
||||||
|
chunk,
|
||||||
|
digest,
|
||||||
|
chunk_len: chunk_len as u64,
|
||||||
|
offset,
|
||||||
|
}),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
Err(err) => return future::err(err),
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
let mut guard = index_csum.lock().unwrap();
|
let mut guard = index_csum.lock().unwrap();
|
||||||
let csum = guard.as_mut().unwrap();
|
let csum = guard.as_mut().unwrap();
|
||||||
@ -723,29 +808,63 @@ impl BackupWriter {
|
|||||||
if !is_fixed_chunk_size {
|
if !is_fixed_chunk_size {
|
||||||
csum.update(&chunk_end.to_le_bytes());
|
csum.update(&chunk_end.to_le_bytes());
|
||||||
}
|
}
|
||||||
csum.update(digest);
|
csum.update(&digest);
|
||||||
|
|
||||||
let chunk_is_known = known_chunks.contains(digest);
|
future::ok(res)
|
||||||
if chunk_is_known {
|
|
||||||
known_chunk_count.fetch_add(1, Ordering::SeqCst);
|
|
||||||
reused_len.fetch_add(chunk_len, Ordering::SeqCst);
|
|
||||||
future::ok(MergedChunkInfo::Known(vec![(offset, *digest)]))
|
|
||||||
} else {
|
|
||||||
let compressed_stream_len2 = compressed_stream_len.clone();
|
|
||||||
known_chunks.insert(*digest);
|
|
||||||
future::ready(chunk_builder.build().map(move |(chunk, digest)| {
|
|
||||||
compressed_stream_len2.fetch_add(chunk.raw_size(), Ordering::SeqCst);
|
|
||||||
MergedChunkInfo::New(ChunkInfo {
|
|
||||||
chunk,
|
|
||||||
digest,
|
|
||||||
chunk_len: chunk_len as u64,
|
|
||||||
offset,
|
|
||||||
})
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
.merge_known_chunks()
|
.merge_known_chunks();
|
||||||
|
|
||||||
|
Self::upload_merged_chunk_stream(
|
||||||
|
h2,
|
||||||
|
wid,
|
||||||
|
archive,
|
||||||
|
prefix,
|
||||||
|
stream,
|
||||||
|
index_csum_2,
|
||||||
|
counters_readonly,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn upload_merged_chunk_stream(
|
||||||
|
h2: H2Client,
|
||||||
|
wid: u64,
|
||||||
|
archive: &BackupArchiveName,
|
||||||
|
prefix: &str,
|
||||||
|
stream: impl Stream<Item = Result<MergedChunkInfo, Error>>,
|
||||||
|
index_csum: Arc<Mutex<Option<Sha256>>>,
|
||||||
|
counters: UploadCounters,
|
||||||
|
) -> impl Future<Output = Result<UploadStats, Error>> {
|
||||||
|
let append_chunk_path = format!("{prefix}_index");
|
||||||
|
let upload_chunk_path = format!("{prefix}_chunk");
|
||||||
|
|
||||||
|
let start_time = std::time::Instant::now();
|
||||||
|
let uploaded_len = Arc::new(AtomicUsize::new(0));
|
||||||
|
|
||||||
|
let (upload_queue, upload_result) =
|
||||||
|
Self::append_chunk_queue(h2.clone(), wid, append_chunk_path, uploaded_len.clone());
|
||||||
|
|
||||||
|
let progress_handle = if archive.ends_with(".img.fidx")
|
||||||
|
|| archive.ends_with(".pxar.didx")
|
||||||
|
|| archive.ends_with(".ppxar.didx")
|
||||||
|
{
|
||||||
|
let counters = counters.clone();
|
||||||
|
Some(tokio::spawn(async move {
|
||||||
|
loop {
|
||||||
|
tokio::time::sleep(tokio::time::Duration::from_secs(60)).await;
|
||||||
|
|
||||||
|
let size = HumanByte::from(counters.total_stream_len());
|
||||||
|
let size_uploaded = HumanByte::from(uploaded_len.load(Ordering::SeqCst));
|
||||||
|
let elapsed = TimeSpan::from(start_time.elapsed());
|
||||||
|
|
||||||
|
info!("processed {size} in {elapsed}, uploaded {size_uploaded}");
|
||||||
|
}
|
||||||
|
}))
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
|
|
||||||
|
stream
|
||||||
.try_for_each(move |merged_chunk_info| {
|
.try_for_each(move |merged_chunk_info| {
|
||||||
let upload_queue = upload_queue.clone();
|
let upload_queue = upload_queue.clone();
|
||||||
|
|
||||||
@ -754,7 +873,7 @@ impl BackupWriter {
|
|||||||
let digest = chunk_info.digest;
|
let digest = chunk_info.digest;
|
||||||
let digest_str = hex::encode(digest);
|
let digest_str = hex::encode(digest);
|
||||||
|
|
||||||
log::trace!(
|
trace!(
|
||||||
"upload new chunk {} ({} bytes, offset {})",
|
"upload new chunk {} ({} bytes, offset {})",
|
||||||
digest_str,
|
digest_str,
|
||||||
chunk_info.chunk_len,
|
chunk_info.chunk_len,
|
||||||
@ -785,7 +904,13 @@ impl BackupWriter {
|
|||||||
Either::Left(h2.send_request(request, upload_data).and_then(
|
Either::Left(h2.send_request(request, upload_data).and_then(
|
||||||
move |response| async move {
|
move |response| async move {
|
||||||
upload_queue
|
upload_queue
|
||||||
.send((new_info, Some(response)))
|
.send((
|
||||||
|
new_info,
|
||||||
|
Some(ChunkUploadResponse {
|
||||||
|
future: response,
|
||||||
|
size: chunk_info.chunk_len as usize,
|
||||||
|
}),
|
||||||
|
))
|
||||||
.await
|
.await
|
||||||
.map_err(|err| {
|
.map_err(|err| {
|
||||||
format_err!("failed to send to upload queue: {}", err)
|
format_err!("failed to send to upload queue: {}", err)
|
||||||
@ -803,29 +928,14 @@ impl BackupWriter {
|
|||||||
})
|
})
|
||||||
.then(move |result| async move { upload_result.await?.and(result) }.boxed())
|
.then(move |result| async move { upload_result.await?.and(result) }.boxed())
|
||||||
.and_then(move |_| {
|
.and_then(move |_| {
|
||||||
let duration = start_time.elapsed();
|
let mut guard = index_csum.lock().unwrap();
|
||||||
let chunk_count = total_chunks2.load(Ordering::SeqCst);
|
|
||||||
let chunk_reused = known_chunk_count2.load(Ordering::SeqCst);
|
|
||||||
let chunk_injected = injected_chunk_count2.load(Ordering::SeqCst);
|
|
||||||
let size = stream_len2.load(Ordering::SeqCst);
|
|
||||||
let size_reused = reused_len2.load(Ordering::SeqCst);
|
|
||||||
let size_injected = injected_len2.load(Ordering::SeqCst);
|
|
||||||
let size_compressed = compressed_stream_len2.load(Ordering::SeqCst) as usize;
|
|
||||||
|
|
||||||
let mut guard = index_csum_2.lock().unwrap();
|
|
||||||
let csum = guard.take().unwrap().finish();
|
let csum = guard.take().unwrap().finish();
|
||||||
|
|
||||||
futures::future::ok(UploadStats {
|
if let Some(handle) = progress_handle {
|
||||||
chunk_count,
|
handle.abort();
|
||||||
chunk_reused,
|
}
|
||||||
chunk_injected,
|
|
||||||
size,
|
futures::future::ok(counters.to_upload_stats(csum, start_time.elapsed()))
|
||||||
size_reused,
|
|
||||||
size_injected,
|
|
||||||
size_compressed,
|
|
||||||
duration,
|
|
||||||
csum,
|
|
||||||
})
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -854,7 +964,7 @@ impl BackupWriter {
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
log::debug!("send test data ({} bytes)", data.len());
|
debug!("send test data ({} bytes)", data.len());
|
||||||
let request =
|
let request =
|
||||||
H2Client::request_builder("localhost", "POST", "speedtest", None, None).unwrap();
|
H2Client::request_builder("localhost", "POST", "speedtest", None, None).unwrap();
|
||||||
let request_future = self
|
let request_future = self
|
||||||
@ -869,13 +979,13 @@ impl BackupWriter {
|
|||||||
|
|
||||||
let _ = upload_result.await?;
|
let _ = upload_result.await?;
|
||||||
|
|
||||||
log::info!(
|
info!(
|
||||||
"Uploaded {} chunks in {} seconds.",
|
"Uploaded {} chunks in {} seconds.",
|
||||||
repeat,
|
repeat,
|
||||||
start_time.elapsed().as_secs()
|
start_time.elapsed().as_secs()
|
||||||
);
|
);
|
||||||
let speed = ((item_len * (repeat as usize)) as f64) / start_time.elapsed().as_secs_f64();
|
let speed = ((item_len * (repeat as usize)) as f64) / start_time.elapsed().as_secs_f64();
|
||||||
log::info!(
|
info!(
|
||||||
"Time per request: {} microseconds.",
|
"Time per request: {} microseconds.",
|
||||||
(start_time.elapsed().as_micros()) / (repeat as u128)
|
(start_time.elapsed().as_micros()) / (repeat as u128)
|
||||||
);
|
);
|
||||||
|
@ -14,6 +14,7 @@ use nix::fcntl::OFlag;
|
|||||||
use nix::sys::stat::Mode;
|
use nix::sys::stat::Mode;
|
||||||
|
|
||||||
use pathpatterns::{MatchEntry, MatchList, MatchPattern, MatchType, PatternFlag};
|
use pathpatterns::{MatchEntry, MatchList, MatchPattern, MatchType, PatternFlag};
|
||||||
|
use pbs_api_types::PathPattern;
|
||||||
use proxmox_router::cli::{self, CliCommand, CliCommandMap, CliHelper, CommandLineInterface};
|
use proxmox_router::cli::{self, CliCommand, CliCommandMap, CliHelper, CommandLineInterface};
|
||||||
use proxmox_schema::api;
|
use proxmox_schema::api;
|
||||||
use proxmox_sys::fs::{create_path, CreateOptions};
|
use proxmox_sys::fs::{create_path, CreateOptions};
|
||||||
@ -21,7 +22,8 @@ use pxar::accessor::ReadAt;
|
|||||||
use pxar::{EntryKind, Metadata};
|
use pxar::{EntryKind, Metadata};
|
||||||
|
|
||||||
use pbs_datastore::catalog::{self, DirEntryAttribute};
|
use pbs_datastore::catalog::{self, DirEntryAttribute};
|
||||||
use proxmox_async::runtime::block_in_place;
|
use proxmox_async::runtime::{block_in_place, block_on};
|
||||||
|
use proxmox_log::error;
|
||||||
|
|
||||||
use crate::pxar::Flags;
|
use crate::pxar::Flags;
|
||||||
|
|
||||||
@ -105,7 +107,7 @@ fn complete_path(complete_me: &str, _map: &HashMap<String, String>) -> Vec<Strin
|
|||||||
match shell.complete_path(complete_me) {
|
match shell.complete_path(complete_me) {
|
||||||
Ok(list) => list,
|
Ok(list) => list,
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
log::error!("error during completion: {}", err);
|
error!("error during completion: {}", err);
|
||||||
Vec::new()
|
Vec::new()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -240,8 +242,7 @@ async fn list_selected_command(patterns: bool) -> Result<(), Error> {
|
|||||||
input: {
|
input: {
|
||||||
properties: {
|
properties: {
|
||||||
pattern: {
|
pattern: {
|
||||||
type: String,
|
type: PathPattern,
|
||||||
description: "Match pattern for matching files in the catalog."
|
|
||||||
},
|
},
|
||||||
select: {
|
select: {
|
||||||
type: bool,
|
type: bool,
|
||||||
@ -282,9 +283,8 @@ async fn restore_selected_command(target: String) -> Result<(), Error> {
|
|||||||
description: "target path for restore on local filesystem."
|
description: "target path for restore on local filesystem."
|
||||||
},
|
},
|
||||||
pattern: {
|
pattern: {
|
||||||
type: String,
|
type: PathPattern,
|
||||||
optional: true,
|
optional: true,
|
||||||
description: "match pattern to limit files for restore."
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -304,7 +304,6 @@ async fn restore_command(target: String, pattern: Option<String>) -> Result<(),
|
|||||||
/// The `Path` type's component iterator does not tell us anything about trailing slashes or
|
/// The `Path` type's component iterator does not tell us anything about trailing slashes or
|
||||||
/// trailing `Component::CurDir` entries. Since we only support regular paths we'll roll our own
|
/// trailing `Component::CurDir` entries. Since we only support regular paths we'll roll our own
|
||||||
/// here:
|
/// here:
|
||||||
|
|
||||||
pub struct Shell {
|
pub struct Shell {
|
||||||
/// Readline instance handling input and callbacks
|
/// Readline instance handling input and callbacks
|
||||||
rl: rustyline::Editor<CliHelper>,
|
rl: rustyline::Editor<CliHelper>,
|
||||||
@ -312,8 +311,9 @@ pub struct Shell {
|
|||||||
/// Interactive prompt.
|
/// Interactive prompt.
|
||||||
prompt: String,
|
prompt: String,
|
||||||
|
|
||||||
/// Catalog reader instance to navigate
|
/// Optional catalog reader instance to navigate, if not present the Accessor is used for
|
||||||
catalog: CatalogReader,
|
/// navigation
|
||||||
|
catalog: Option<CatalogReader>,
|
||||||
|
|
||||||
/// List of selected paths for restore
|
/// List of selected paths for restore
|
||||||
selected: HashMap<OsString, MatchEntry>,
|
selected: HashMap<OsString, MatchEntry>,
|
||||||
@ -347,7 +347,7 @@ impl PathStackEntry {
|
|||||||
impl Shell {
|
impl Shell {
|
||||||
/// Create a new shell for the given catalog and pxar archive.
|
/// Create a new shell for the given catalog and pxar archive.
|
||||||
pub async fn new(
|
pub async fn new(
|
||||||
mut catalog: CatalogReader,
|
mut catalog: Option<CatalogReader>,
|
||||||
archive_name: &str,
|
archive_name: &str,
|
||||||
archive: Accessor,
|
archive: Accessor,
|
||||||
) -> Result<Self, Error> {
|
) -> Result<Self, Error> {
|
||||||
@ -355,11 +355,31 @@ impl Shell {
|
|||||||
let mut rl = rustyline::Editor::<CliHelper>::new();
|
let mut rl = rustyline::Editor::<CliHelper>::new();
|
||||||
rl.set_helper(Some(cli_helper));
|
rl.set_helper(Some(cli_helper));
|
||||||
|
|
||||||
let catalog_root = catalog.root()?;
|
let mut position = Vec::new();
|
||||||
let archive_root = catalog
|
if let Some(catalog) = catalog.as_mut() {
|
||||||
.lookup(&catalog_root, archive_name.as_bytes())?
|
let catalog_root = catalog.root()?;
|
||||||
.ok_or_else(|| format_err!("archive not found in catalog"))?;
|
let archive_root = catalog
|
||||||
let position = vec![PathStackEntry::new(archive_root)];
|
.lookup(&catalog_root, archive_name.as_bytes())?
|
||||||
|
.ok_or_else(|| format_err!("archive not found in catalog"))?;
|
||||||
|
position.push(PathStackEntry::new(archive_root));
|
||||||
|
} else {
|
||||||
|
let root = archive.open_root().await?;
|
||||||
|
let root_entry = root.lookup_self().await?;
|
||||||
|
if let EntryKind::Directory = root_entry.kind() {
|
||||||
|
let entry_attr = DirEntryAttribute::Directory {
|
||||||
|
start: root_entry.entry_range_info().entry_range.start,
|
||||||
|
};
|
||||||
|
position.push(PathStackEntry {
|
||||||
|
catalog: catalog::DirEntry {
|
||||||
|
name: archive_name.into(),
|
||||||
|
attr: entry_attr,
|
||||||
|
},
|
||||||
|
pxar: Some(root_entry),
|
||||||
|
});
|
||||||
|
} else {
|
||||||
|
bail!("unexpected root entry type");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
let mut this = Self {
|
let mut this = Self {
|
||||||
rl,
|
rl,
|
||||||
@ -398,7 +418,7 @@ impl Shell {
|
|||||||
let args = match cli::shellword_split(&line) {
|
let args = match cli::shellword_split(&line) {
|
||||||
Ok(args) => args,
|
Ok(args) => args,
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
log::error!("Error: {}", err);
|
error!("Error: {}", err);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@ -450,7 +470,7 @@ impl Shell {
|
|||||||
|
|
||||||
async fn resolve_symlink(
|
async fn resolve_symlink(
|
||||||
stack: &mut Vec<PathStackEntry>,
|
stack: &mut Vec<PathStackEntry>,
|
||||||
catalog: &mut CatalogReader,
|
catalog: &mut Option<CatalogReader>,
|
||||||
accessor: &Accessor,
|
accessor: &Accessor,
|
||||||
follow_symlinks: &mut Option<usize>,
|
follow_symlinks: &mut Option<usize>,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
@ -468,7 +488,7 @@ impl Shell {
|
|||||||
};
|
};
|
||||||
|
|
||||||
let new_stack =
|
let new_stack =
|
||||||
Self::lookup(stack, &mut *catalog, accessor, Some(path), follow_symlinks).await?;
|
Self::lookup(stack, catalog, accessor, Some(path), follow_symlinks).await?;
|
||||||
|
|
||||||
*stack = new_stack;
|
*stack = new_stack;
|
||||||
|
|
||||||
@ -484,7 +504,7 @@ impl Shell {
|
|||||||
/// out.
|
/// out.
|
||||||
async fn step(
|
async fn step(
|
||||||
stack: &mut Vec<PathStackEntry>,
|
stack: &mut Vec<PathStackEntry>,
|
||||||
catalog: &mut CatalogReader,
|
catalog: &mut Option<CatalogReader>,
|
||||||
accessor: &Accessor,
|
accessor: &Accessor,
|
||||||
component: std::path::Component<'_>,
|
component: std::path::Component<'_>,
|
||||||
follow_symlinks: &mut Option<usize>,
|
follow_symlinks: &mut Option<usize>,
|
||||||
@ -503,9 +523,27 @@ impl Shell {
|
|||||||
if stack.last().unwrap().catalog.is_symlink() {
|
if stack.last().unwrap().catalog.is_symlink() {
|
||||||
Self::resolve_symlink(stack, catalog, accessor, follow_symlinks).await?;
|
Self::resolve_symlink(stack, catalog, accessor, follow_symlinks).await?;
|
||||||
}
|
}
|
||||||
match catalog.lookup(&stack.last().unwrap().catalog, entry.as_bytes())? {
|
if let Some(catalog) = catalog {
|
||||||
Some(dir) => stack.push(PathStackEntry::new(dir)),
|
match catalog.lookup(&stack.last().unwrap().catalog, entry.as_bytes())? {
|
||||||
None => bail!("no such file or directory: {:?}", entry),
|
Some(dir) => stack.push(PathStackEntry::new(dir)),
|
||||||
|
None => bail!("no such file or directory: {entry:?}"),
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
let pxar_entry = parent_pxar_entry(stack)?;
|
||||||
|
let parent_dir = pxar_entry.enter_directory().await?;
|
||||||
|
match parent_dir.lookup(entry).await? {
|
||||||
|
Some(entry) => {
|
||||||
|
let entry_attr = DirEntryAttribute::try_from(&entry)?;
|
||||||
|
stack.push(PathStackEntry {
|
||||||
|
catalog: catalog::DirEntry {
|
||||||
|
name: entry.entry().file_name().as_bytes().into(),
|
||||||
|
attr: entry_attr,
|
||||||
|
},
|
||||||
|
pxar: Some(entry),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
None => bail!("no such file or directory: {entry:?}"),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -515,7 +553,7 @@ impl Shell {
|
|||||||
|
|
||||||
fn step_nofollow(
|
fn step_nofollow(
|
||||||
stack: &mut Vec<PathStackEntry>,
|
stack: &mut Vec<PathStackEntry>,
|
||||||
catalog: &mut CatalogReader,
|
catalog: &mut Option<CatalogReader>,
|
||||||
component: std::path::Component<'_>,
|
component: std::path::Component<'_>,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
use std::path::Component;
|
use std::path::Component;
|
||||||
@ -531,11 +569,27 @@ impl Shell {
|
|||||||
Component::Normal(entry) => {
|
Component::Normal(entry) => {
|
||||||
if stack.last().unwrap().catalog.is_symlink() {
|
if stack.last().unwrap().catalog.is_symlink() {
|
||||||
bail!("target is a symlink");
|
bail!("target is a symlink");
|
||||||
} else {
|
} else if let Some(catalog) = catalog.as_mut() {
|
||||||
match catalog.lookup(&stack.last().unwrap().catalog, entry.as_bytes())? {
|
match catalog.lookup(&stack.last().unwrap().catalog, entry.as_bytes())? {
|
||||||
Some(dir) => stack.push(PathStackEntry::new(dir)),
|
Some(dir) => stack.push(PathStackEntry::new(dir)),
|
||||||
None => bail!("no such file or directory: {:?}", entry),
|
None => bail!("no such file or directory: {:?}", entry),
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
let pxar_entry = parent_pxar_entry(stack)?;
|
||||||
|
let parent_dir = block_on(pxar_entry.enter_directory())?;
|
||||||
|
match block_on(parent_dir.lookup(entry))? {
|
||||||
|
Some(entry) => {
|
||||||
|
let entry_attr = DirEntryAttribute::try_from(&entry)?;
|
||||||
|
stack.push(PathStackEntry {
|
||||||
|
catalog: catalog::DirEntry {
|
||||||
|
name: entry.entry().file_name().as_bytes().into(),
|
||||||
|
attr: entry_attr,
|
||||||
|
},
|
||||||
|
pxar: Some(entry),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
None => bail!("no such file or directory: {entry:?}"),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -545,7 +599,7 @@ impl Shell {
|
|||||||
/// The pxar accessor is required to resolve symbolic links
|
/// The pxar accessor is required to resolve symbolic links
|
||||||
async fn walk_catalog(
|
async fn walk_catalog(
|
||||||
stack: &mut Vec<PathStackEntry>,
|
stack: &mut Vec<PathStackEntry>,
|
||||||
catalog: &mut CatalogReader,
|
catalog: &mut Option<CatalogReader>,
|
||||||
accessor: &Accessor,
|
accessor: &Accessor,
|
||||||
path: &Path,
|
path: &Path,
|
||||||
follow_symlinks: &mut Option<usize>,
|
follow_symlinks: &mut Option<usize>,
|
||||||
@ -559,7 +613,7 @@ impl Shell {
|
|||||||
/// Non-async version cannot follow symlinks.
|
/// Non-async version cannot follow symlinks.
|
||||||
fn walk_catalog_nofollow(
|
fn walk_catalog_nofollow(
|
||||||
stack: &mut Vec<PathStackEntry>,
|
stack: &mut Vec<PathStackEntry>,
|
||||||
catalog: &mut CatalogReader,
|
catalog: &mut Option<CatalogReader>,
|
||||||
path: &Path,
|
path: &Path,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
for c in path.components() {
|
for c in path.components() {
|
||||||
@ -612,12 +666,34 @@ impl Shell {
|
|||||||
tmp_stack = self.position.clone();
|
tmp_stack = self.position.clone();
|
||||||
}
|
}
|
||||||
Self::walk_catalog_nofollow(&mut tmp_stack, &mut self.catalog, &path)?;
|
Self::walk_catalog_nofollow(&mut tmp_stack, &mut self.catalog, &path)?;
|
||||||
(&tmp_stack.last().unwrap().catalog, base, part)
|
(&tmp_stack.last().unwrap(), base, part)
|
||||||
}
|
}
|
||||||
None => (&self.position.last().unwrap().catalog, "", input),
|
None => (&self.position.last().unwrap(), "", input),
|
||||||
};
|
};
|
||||||
|
|
||||||
let entries = self.catalog.read_dir(parent)?;
|
let entries = if let Some(catalog) = self.catalog.as_mut() {
|
||||||
|
catalog.read_dir(&parent.catalog)?
|
||||||
|
} else {
|
||||||
|
let dir = if let Some(entry) = parent.pxar.as_ref() {
|
||||||
|
block_on(entry.enter_directory())?
|
||||||
|
} else {
|
||||||
|
bail!("missing pxar entry for parent");
|
||||||
|
};
|
||||||
|
let mut out = Vec::new();
|
||||||
|
let entries = block_on(crate::pxar::tools::pxar_metadata_read_dir(dir))?;
|
||||||
|
for entry in entries {
|
||||||
|
let mut name = base.to_string();
|
||||||
|
let file_name = entry.file_name().as_bytes();
|
||||||
|
if file_name.starts_with(part.as_bytes()) {
|
||||||
|
name.push_str(std::str::from_utf8(file_name)?);
|
||||||
|
if entry.is_dir() {
|
||||||
|
name.push('/');
|
||||||
|
}
|
||||||
|
out.push(name);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return Ok(out);
|
||||||
|
};
|
||||||
|
|
||||||
let mut out = Vec::new();
|
let mut out = Vec::new();
|
||||||
for entry in entries {
|
for entry in entries {
|
||||||
@ -637,7 +713,7 @@ impl Shell {
|
|||||||
// Break async recursion here: lookup -> walk_catalog -> step -> lookup
|
// Break async recursion here: lookup -> walk_catalog -> step -> lookup
|
||||||
fn lookup<'future, 's, 'c, 'a, 'p, 'y>(
|
fn lookup<'future, 's, 'c, 'a, 'p, 'y>(
|
||||||
stack: &'s [PathStackEntry],
|
stack: &'s [PathStackEntry],
|
||||||
catalog: &'c mut CatalogReader,
|
catalog: &'c mut Option<CatalogReader>,
|
||||||
accessor: &'a Accessor,
|
accessor: &'a Accessor,
|
||||||
path: Option<&'p Path>,
|
path: Option<&'p Path>,
|
||||||
follow_symlinks: &'y mut Option<usize>,
|
follow_symlinks: &'y mut Option<usize>,
|
||||||
@ -678,7 +754,23 @@ impl Shell {
|
|||||||
|
|
||||||
let last = stack.last().unwrap();
|
let last = stack.last().unwrap();
|
||||||
if last.catalog.is_directory() {
|
if last.catalog.is_directory() {
|
||||||
let items = self.catalog.read_dir(&stack.last().unwrap().catalog)?;
|
let items = if let Some(catalog) = self.catalog.as_mut() {
|
||||||
|
catalog.read_dir(&stack.last().unwrap().catalog)?
|
||||||
|
} else {
|
||||||
|
let dir = if let Some(entry) = last.pxar.as_ref() {
|
||||||
|
entry.enter_directory().await?
|
||||||
|
} else {
|
||||||
|
bail!("missing pxar entry for parent");
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut out = std::io::stdout();
|
||||||
|
let items = crate::pxar::tools::pxar_metadata_read_dir(dir).await?;
|
||||||
|
for item in items {
|
||||||
|
out.write_all(item.file_name().as_bytes())?;
|
||||||
|
out.write_all(b"\n")?;
|
||||||
|
}
|
||||||
|
return Ok(());
|
||||||
|
};
|
||||||
let mut out = std::io::stdout();
|
let mut out = std::io::stdout();
|
||||||
// FIXME: columnize
|
// FIXME: columnize
|
||||||
for item in items {
|
for item in items {
|
||||||
@ -705,7 +797,7 @@ impl Shell {
|
|||||||
|
|
||||||
let file = Self::walk_pxar_archive(&self.accessor, &mut stack).await?;
|
let file = Self::walk_pxar_archive(&self.accessor, &mut stack).await?;
|
||||||
std::io::stdout()
|
std::io::stdout()
|
||||||
.write_all(crate::pxar::format_multi_line_entry(file.entry()).as_bytes())?;
|
.write_all(crate::pxar::tools::format_multi_line_entry(file.entry()).as_bytes())?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -720,6 +812,14 @@ impl Shell {
|
|||||||
&mut None,
|
&mut None,
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
|
if new_position.is_empty() {
|
||||||
|
// Avoid moving below archive root into catalog root, thereby treating
|
||||||
|
// the archive root as its own parent directory.
|
||||||
|
self.position.truncate(1);
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
if !new_position.last().unwrap().catalog.is_directory() {
|
if !new_position.last().unwrap().catalog.is_directory() {
|
||||||
bail!("not a directory");
|
bail!("not a directory");
|
||||||
}
|
}
|
||||||
@ -820,17 +920,36 @@ impl Shell {
|
|||||||
async fn list_matching_files(&mut self) -> Result<(), Error> {
|
async fn list_matching_files(&mut self) -> Result<(), Error> {
|
||||||
let matches = self.build_match_list();
|
let matches = self.build_match_list();
|
||||||
|
|
||||||
self.catalog.find(
|
if let Some(catalog) = self.catalog.as_mut() {
|
||||||
&self.position[0].catalog,
|
catalog.find(
|
||||||
&mut Vec::new(),
|
&self.position[0].catalog,
|
||||||
&matches,
|
&mut Vec::new(),
|
||||||
&mut |path: &[u8]| -> Result<(), Error> {
|
&matches,
|
||||||
let mut out = std::io::stdout();
|
&mut |path: &[u8]| -> Result<(), Error> {
|
||||||
out.write_all(path)?;
|
let mut out = std::io::stdout();
|
||||||
out.write_all(b"\n")?;
|
out.write_all(path)?;
|
||||||
Ok(())
|
out.write_all(b"\n")?;
|
||||||
},
|
Ok(())
|
||||||
)?;
|
},
|
||||||
|
)?;
|
||||||
|
} else {
|
||||||
|
let parent_dir = if let Some(pxar_entry) = self.position[0].pxar.as_ref() {
|
||||||
|
pxar_entry.enter_directory().await?
|
||||||
|
} else {
|
||||||
|
bail!("missing pxar entry for archive root");
|
||||||
|
};
|
||||||
|
crate::pxar::tools::pxar_metadata_catalog_find(
|
||||||
|
parent_dir,
|
||||||
|
&matches,
|
||||||
|
&|path: &[u8]| -> Result<(), Error> {
|
||||||
|
let mut out = std::io::stdout();
|
||||||
|
out.write_all(path)?;
|
||||||
|
out.write_all(b"\n")?;
|
||||||
|
Ok(())
|
||||||
|
},
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@ -841,18 +960,37 @@ impl Shell {
|
|||||||
MatchEntry::parse_pattern(pattern, PatternFlag::PATH_NAME, MatchType::Include)?;
|
MatchEntry::parse_pattern(pattern, PatternFlag::PATH_NAME, MatchType::Include)?;
|
||||||
|
|
||||||
let mut found_some = false;
|
let mut found_some = false;
|
||||||
self.catalog.find(
|
if let Some(catalog) = self.catalog.as_mut() {
|
||||||
&self.position[0].catalog,
|
catalog.find(
|
||||||
&mut Vec::new(),
|
&self.position[0].catalog,
|
||||||
&[&pattern_entry],
|
&mut Vec::new(),
|
||||||
&mut |path: &[u8]| -> Result<(), Error> {
|
&[&pattern_entry],
|
||||||
found_some = true;
|
&mut |path: &[u8]| -> Result<(), Error> {
|
||||||
let mut out = std::io::stdout();
|
found_some = true;
|
||||||
out.write_all(path)?;
|
let mut out = std::io::stdout();
|
||||||
out.write_all(b"\n")?;
|
out.write_all(path)?;
|
||||||
Ok(())
|
out.write_all(b"\n")?;
|
||||||
},
|
Ok(())
|
||||||
)?;
|
},
|
||||||
|
)?;
|
||||||
|
} else {
|
||||||
|
let parent_dir = if let Some(pxar_entry) = self.position[0].pxar.as_ref() {
|
||||||
|
pxar_entry.enter_directory().await?
|
||||||
|
} else {
|
||||||
|
bail!("missing pxar entry for archive root");
|
||||||
|
};
|
||||||
|
crate::pxar::tools::pxar_metadata_catalog_find(
|
||||||
|
parent_dir,
|
||||||
|
&[&pattern_entry],
|
||||||
|
&|path: &[u8]| -> Result<(), Error> {
|
||||||
|
let mut out = std::io::stdout();
|
||||||
|
out.write_all(path)?;
|
||||||
|
out.write_all(b"\n")?;
|
||||||
|
Ok(())
|
||||||
|
},
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
}
|
||||||
|
|
||||||
if found_some && select {
|
if found_some && select {
|
||||||
self.selected.insert(pattern_os, pattern_entry);
|
self.selected.insert(pattern_os, pattern_entry);
|
||||||
@ -945,6 +1083,18 @@ impl Shell {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn parent_pxar_entry(dir_stack: &[PathStackEntry]) -> Result<&FileEntry, Error> {
|
||||||
|
if let Some(parent) = dir_stack.last().as_ref() {
|
||||||
|
if let Some(entry) = parent.pxar.as_ref() {
|
||||||
|
Ok(entry)
|
||||||
|
} else {
|
||||||
|
bail!("missing pxar entry for parent");
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
bail!("missing parent entry on stack");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
struct ExtractorState<'a> {
|
struct ExtractorState<'a> {
|
||||||
path: Vec<u8>,
|
path: Vec<u8>,
|
||||||
path_len: usize,
|
path_len: usize,
|
||||||
@ -960,22 +1110,38 @@ struct ExtractorState<'a> {
|
|||||||
|
|
||||||
extractor: crate::pxar::extract::Extractor,
|
extractor: crate::pxar::extract::Extractor,
|
||||||
|
|
||||||
catalog: &'a mut CatalogReader,
|
catalog: &'a mut Option<CatalogReader>,
|
||||||
match_list: &'a [MatchEntry],
|
match_list: &'a [MatchEntry],
|
||||||
accessor: &'a Accessor,
|
accessor: &'a Accessor,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a> ExtractorState<'a> {
|
impl<'a> ExtractorState<'a> {
|
||||||
pub fn new(
|
pub fn new(
|
||||||
catalog: &'a mut CatalogReader,
|
catalog: &'a mut Option<CatalogReader>,
|
||||||
dir_stack: Vec<PathStackEntry>,
|
dir_stack: Vec<PathStackEntry>,
|
||||||
extractor: crate::pxar::extract::Extractor,
|
extractor: crate::pxar::extract::Extractor,
|
||||||
match_list: &'a [MatchEntry],
|
match_list: &'a [MatchEntry],
|
||||||
accessor: &'a Accessor,
|
accessor: &'a Accessor,
|
||||||
) -> Result<Self, Error> {
|
) -> Result<Self, Error> {
|
||||||
let read_dir = catalog
|
let read_dir = if let Some(catalog) = catalog.as_mut() {
|
||||||
.read_dir(&dir_stack.last().unwrap().catalog)?
|
catalog
|
||||||
.into_iter();
|
.read_dir(&dir_stack.last().unwrap().catalog)?
|
||||||
|
.into_iter()
|
||||||
|
} else {
|
||||||
|
let pxar_entry = parent_pxar_entry(&dir_stack)?;
|
||||||
|
let dir = block_on(pxar_entry.enter_directory())?;
|
||||||
|
let entries = block_on(crate::pxar::tools::pxar_metadata_read_dir(dir))?;
|
||||||
|
|
||||||
|
let mut catalog_entries = Vec::with_capacity(entries.len());
|
||||||
|
for entry in entries {
|
||||||
|
let entry_attr = DirEntryAttribute::try_from(&entry).unwrap();
|
||||||
|
catalog_entries.push(catalog::DirEntry {
|
||||||
|
name: entry.entry().file_name().as_bytes().into(),
|
||||||
|
attr: entry_attr,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
catalog_entries.into_iter()
|
||||||
|
};
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
path: Vec::new(),
|
path: Vec::new(),
|
||||||
path_len: 0,
|
path_len: 0,
|
||||||
@ -1053,11 +1219,29 @@ impl<'a> ExtractorState<'a> {
|
|||||||
entry: catalog::DirEntry,
|
entry: catalog::DirEntry,
|
||||||
match_result: Option<MatchType>,
|
match_result: Option<MatchType>,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
|
let entry_iter = if let Some(catalog) = self.catalog.as_mut() {
|
||||||
|
catalog.read_dir(&entry)?.into_iter()
|
||||||
|
} else {
|
||||||
|
self.dir_stack.push(PathStackEntry::new(entry.clone()));
|
||||||
|
let dir = Shell::walk_pxar_archive(self.accessor, &mut self.dir_stack).await?;
|
||||||
|
self.dir_stack.pop();
|
||||||
|
let dir = dir.enter_directory().await?;
|
||||||
|
let entries = block_on(crate::pxar::tools::pxar_metadata_read_dir(dir))?;
|
||||||
|
entries
|
||||||
|
.into_iter()
|
||||||
|
.map(|entry| {
|
||||||
|
let entry_attr = DirEntryAttribute::try_from(&entry).unwrap();
|
||||||
|
catalog::DirEntry {
|
||||||
|
name: entry.entry().file_name().as_bytes().into(),
|
||||||
|
attr: entry_attr,
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.collect::<Vec<catalog::DirEntry>>()
|
||||||
|
.into_iter()
|
||||||
|
};
|
||||||
// enter a new directory:
|
// enter a new directory:
|
||||||
self.read_dir_stack.push(mem::replace(
|
self.read_dir_stack
|
||||||
&mut self.read_dir,
|
.push(mem::replace(&mut self.read_dir, entry_iter));
|
||||||
self.catalog.read_dir(&entry)?.into_iter(),
|
|
||||||
));
|
|
||||||
self.matches_stack.push(self.matches);
|
self.matches_stack.push(self.matches);
|
||||||
self.dir_stack.push(PathStackEntry::new(entry));
|
self.dir_stack.push(PathStackEntry::new(entry));
|
||||||
self.path_len_stack.push(self.path_len);
|
self.path_len_stack.push(self.path_len);
|
||||||
|
@ -4,11 +4,13 @@ use std::time::Duration;
|
|||||||
|
|
||||||
use anyhow::{bail, format_err, Error};
|
use anyhow::{bail, format_err, Error};
|
||||||
use futures::*;
|
use futures::*;
|
||||||
use http::header::HeaderValue;
|
#[cfg(not(target_feature = "crt-static"))]
|
||||||
use http::Uri;
|
use hyper::client::connect::dns::GaiResolver;
|
||||||
use http::{Request, Response};
|
|
||||||
use hyper::client::{Client, HttpConnector};
|
use hyper::client::{Client, HttpConnector};
|
||||||
use hyper::Body;
|
use hyper::http::header::HeaderValue;
|
||||||
|
use hyper::http::Uri;
|
||||||
|
use hyper::http::{Request, Response};
|
||||||
|
use hyper::{body::HttpBody, Body};
|
||||||
use openssl::{
|
use openssl::{
|
||||||
ssl::{SslConnector, SslMethod},
|
ssl::{SslConnector, SslMethod},
|
||||||
x509::X509StoreContextRef,
|
x509::X509StoreContextRef,
|
||||||
@ -25,6 +27,7 @@ use proxmox_async::broadcast_future::BroadcastFuture;
|
|||||||
use proxmox_http::client::HttpsConnector;
|
use proxmox_http::client::HttpsConnector;
|
||||||
use proxmox_http::uri::{build_authority, json_object_to_query};
|
use proxmox_http::uri::{build_authority, json_object_to_query};
|
||||||
use proxmox_http::{ProxyConfig, RateLimiter};
|
use proxmox_http::{ProxyConfig, RateLimiter};
|
||||||
|
use proxmox_log::{error, info, warn};
|
||||||
|
|
||||||
use pbs_api_types::percent_encoding::DEFAULT_ENCODE_SET;
|
use pbs_api_types::percent_encoding::DEFAULT_ENCODE_SET;
|
||||||
use pbs_api_types::{Authid, RateLimitConfig, Userid};
|
use pbs_api_types::{Authid, RateLimitConfig, Userid};
|
||||||
@ -32,6 +35,74 @@ use pbs_api_types::{Authid, RateLimitConfig, Userid};
|
|||||||
use super::pipe_to_stream::PipeToSendStream;
|
use super::pipe_to_stream::PipeToSendStream;
|
||||||
use super::PROXMOX_BACKUP_TCP_KEEPALIVE_TIME;
|
use super::PROXMOX_BACKUP_TCP_KEEPALIVE_TIME;
|
||||||
|
|
||||||
|
#[cfg(not(target_feature = "crt-static"))]
|
||||||
|
type DnsResolver = GaiResolver;
|
||||||
|
|
||||||
|
#[cfg(target_feature = "crt-static")]
|
||||||
|
type DnsResolver = resolver::HickoryDnsResolver;
|
||||||
|
|
||||||
|
#[cfg(target_feature = "crt-static")]
|
||||||
|
mod resolver {
|
||||||
|
use std::net::SocketAddr;
|
||||||
|
use std::pin::Pin;
|
||||||
|
use std::sync::Arc;
|
||||||
|
use std::task::{Context, Poll};
|
||||||
|
|
||||||
|
use futures::Future;
|
||||||
|
use hickory_resolver::error::ResolveError;
|
||||||
|
use hickory_resolver::lookup_ip::LookupIpIntoIter;
|
||||||
|
use hickory_resolver::TokioAsyncResolver;
|
||||||
|
use hyper::client::connect::dns::Name;
|
||||||
|
use tower_service::Service;
|
||||||
|
|
||||||
|
pub(crate) struct SocketAddrIter {
|
||||||
|
inner: LookupIpIntoIter,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Iterator for SocketAddrIter {
|
||||||
|
type Item = SocketAddr;
|
||||||
|
|
||||||
|
fn next(&mut self) -> Option<Self::Item> {
|
||||||
|
self.inner.next().map(|ip_addr| SocketAddr::new(ip_addr, 0))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub(crate) struct HickoryDnsResolver {
|
||||||
|
inner: Arc<TokioAsyncResolver>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl HickoryDnsResolver {
|
||||||
|
pub(crate) fn new() -> Self {
|
||||||
|
Self {
|
||||||
|
inner: Arc::new(TokioAsyncResolver::tokio_from_system_conf().unwrap()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Service<Name> for HickoryDnsResolver {
|
||||||
|
type Response = SocketAddrIter;
|
||||||
|
type Error = ResolveError;
|
||||||
|
type Future = Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>> + Send>>;
|
||||||
|
|
||||||
|
fn poll_ready(&mut self, _ctx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||||
|
Poll::Ready(Ok(()))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn call(&mut self, name: Name) -> Self::Future {
|
||||||
|
let inner = self.inner.clone();
|
||||||
|
Box::pin(async move {
|
||||||
|
inner
|
||||||
|
.lookup_ip(name.as_str())
|
||||||
|
.await
|
||||||
|
.map(|r| SocketAddrIter {
|
||||||
|
inner: r.into_iter(),
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Timeout used for several HTTP operations that are expected to finish quickly but may block in
|
/// Timeout used for several HTTP operations that are expected to finish quickly but may block in
|
||||||
/// certain error conditions. Keep it generous, to avoid false-positive under high load.
|
/// certain error conditions. Keep it generous, to avoid false-positive under high load.
|
||||||
const HTTP_TIMEOUT: Duration = Duration::from_secs(2 * 60);
|
const HTTP_TIMEOUT: Duration = Duration::from_secs(2 * 60);
|
||||||
@ -133,7 +204,7 @@ impl Default for HttpClientOptions {
|
|||||||
|
|
||||||
/// HTTP(S) API client
|
/// HTTP(S) API client
|
||||||
pub struct HttpClient {
|
pub struct HttpClient {
|
||||||
client: Client<HttpsConnector>,
|
client: Client<HttpsConnector<DnsResolver>>,
|
||||||
server: String,
|
server: String,
|
||||||
port: u16,
|
port: u16,
|
||||||
fingerprint: Arc<Mutex<Option<String>>>,
|
fingerprint: Arc<Mutex<Option<String>>>,
|
||||||
@ -348,14 +419,14 @@ impl HttpClient {
|
|||||||
if let Err(err) =
|
if let Err(err) =
|
||||||
store_fingerprint(prefix.as_ref().unwrap(), &server, &fingerprint)
|
store_fingerprint(prefix.as_ref().unwrap(), &server, &fingerprint)
|
||||||
{
|
{
|
||||||
log::error!("{}", err);
|
error!("{}", err);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
*verified_fingerprint.lock().unwrap() = Some(fingerprint);
|
*verified_fingerprint.lock().unwrap() = Some(fingerprint);
|
||||||
true
|
true
|
||||||
}
|
}
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
log::error!("certificate validation failed - {}", err);
|
error!("certificate validation failed - {}", err);
|
||||||
false
|
false
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
@ -364,7 +435,8 @@ impl HttpClient {
|
|||||||
ssl_connector_builder.set_verify(openssl::ssl::SslVerifyMode::NONE);
|
ssl_connector_builder.set_verify(openssl::ssl::SslVerifyMode::NONE);
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut httpc = HttpConnector::new();
|
let resolver = DnsResolver::new();
|
||||||
|
let mut httpc = HttpConnector::new_with_resolver(resolver);
|
||||||
httpc.set_nodelay(true); // important for h2 download performance!
|
httpc.set_nodelay(true); // important for h2 download performance!
|
||||||
httpc.enforce_http(false); // we want https...
|
httpc.enforce_http(false); // we want https...
|
||||||
|
|
||||||
@ -393,7 +465,7 @@ impl HttpClient {
|
|||||||
|
|
||||||
let proxy_config = ProxyConfig::from_proxy_env()?;
|
let proxy_config = ProxyConfig::from_proxy_env()?;
|
||||||
if let Some(config) = proxy_config {
|
if let Some(config) = proxy_config {
|
||||||
log::info!("Using proxy connection: {}:{}", config.host, config.port);
|
info!("Using proxy connection: {}:{}", config.host, config.port);
|
||||||
https.set_proxy(config);
|
https.set_proxy(config);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -461,14 +533,14 @@ impl HttpClient {
|
|||||||
&auth.token,
|
&auth.token,
|
||||||
) {
|
) {
|
||||||
if std::io::stdout().is_terminal() {
|
if std::io::stdout().is_terminal() {
|
||||||
log::error!("storing login ticket failed: {}", err);
|
error!("storing login ticket failed: {}", err);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
*auth2.write().unwrap() = auth;
|
*auth2.write().unwrap() = auth;
|
||||||
}
|
}
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
log::error!("re-authentication failed: {}", err);
|
error!("re-authentication failed: {}", err);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -498,7 +570,7 @@ impl HttpClient {
|
|||||||
&auth.token,
|
&auth.token,
|
||||||
) {
|
) {
|
||||||
if std::io::stdout().is_terminal() {
|
if std::io::stdout().is_terminal() {
|
||||||
log::error!("storing login ticket failed: {}", err);
|
error!("storing login ticket failed: {}", err);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -525,7 +597,9 @@ impl HttpClient {
|
|||||||
_options: options,
|
_options: options,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl HttpClient {
|
||||||
/// Login
|
/// Login
|
||||||
///
|
///
|
||||||
/// Login is done on demand, so this is only required if you need
|
/// Login is done on demand, so this is only required if you need
|
||||||
@ -600,14 +674,14 @@ impl HttpClient {
|
|||||||
if expected_fingerprint == fp_string {
|
if expected_fingerprint == fp_string {
|
||||||
return Ok(Some(fp_string));
|
return Ok(Some(fp_string));
|
||||||
} else {
|
} else {
|
||||||
log::warn!("WARNING: certificate fingerprint does not match expected fingerprint!");
|
warn!("WARNING: certificate fingerprint does not match expected fingerprint!");
|
||||||
log::warn!("expected: {}", expected_fingerprint);
|
warn!("expected: {}", expected_fingerprint);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// If we're on a TTY, query the user
|
// If we're on a TTY, query the user
|
||||||
if interactive && std::io::stdin().is_terminal() {
|
if interactive && std::io::stdin().is_terminal() {
|
||||||
log::info!("fingerprint: {}", fp_string);
|
info!("fingerprint: {}", fp_string);
|
||||||
loop {
|
loop {
|
||||||
eprint!("Are you sure you want to continue connecting? (y/n): ");
|
eprint!("Are you sure you want to continue connecting? (y/n): ");
|
||||||
let _ = std::io::stdout().flush();
|
let _ = std::io::stdout().flush();
|
||||||
@ -705,8 +779,7 @@ impl HttpClient {
|
|||||||
.map(|_| Err(format_err!("unknown error")))
|
.map(|_| Err(format_err!("unknown error")))
|
||||||
.await?
|
.await?
|
||||||
} else {
|
} else {
|
||||||
resp.into_body()
|
futures::TryStreamExt::map_err(resp.into_body(), Error::from)
|
||||||
.map_err(Error::from)
|
|
||||||
.try_fold(output, move |acc, chunk| async move {
|
.try_fold(output, move |acc, chunk| async move {
|
||||||
acc.write_all(&chunk)?;
|
acc.write_all(&chunk)?;
|
||||||
Ok::<_, Error>(acc)
|
Ok::<_, Error>(acc)
|
||||||
@ -781,7 +854,7 @@ impl HttpClient {
|
|||||||
.map_err(|_| format_err!("http upgrade request timed out"))??;
|
.map_err(|_| format_err!("http upgrade request timed out"))??;
|
||||||
let status = resp.status();
|
let status = resp.status();
|
||||||
|
|
||||||
if status != http::StatusCode::SWITCHING_PROTOCOLS {
|
if status != hyper::http::StatusCode::SWITCHING_PROTOCOLS {
|
||||||
Self::api_response(resp).await?;
|
Self::api_response(resp).await?;
|
||||||
bail!("unknown error");
|
bail!("unknown error");
|
||||||
}
|
}
|
||||||
@ -790,14 +863,14 @@ impl HttpClient {
|
|||||||
|
|
||||||
let max_window_size = (1 << 31) - 2;
|
let max_window_size = (1 << 31) - 2;
|
||||||
|
|
||||||
let (h2, connection) = h2::client::Builder::new()
|
let (h2, connection) = h2::legacy::client::Builder::new()
|
||||||
.initial_connection_window_size(max_window_size)
|
.initial_connection_window_size(max_window_size)
|
||||||
.initial_window_size(max_window_size)
|
.initial_window_size(max_window_size)
|
||||||
.max_frame_size(4 * 1024 * 1024)
|
.max_frame_size(4 * 1024 * 1024)
|
||||||
.handshake(upgraded)
|
.handshake(upgraded)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
let connection = connection.map_err(|_| log::error!("HTTP/2.0 connection failed"));
|
let connection = connection.map_err(|_| error!("HTTP/2.0 connection failed"));
|
||||||
|
|
||||||
let (connection, abort) = futures::future::abortable(connection);
|
let (connection, abort) = futures::future::abortable(connection);
|
||||||
// A cancellable future returns an Option which is None when cancelled and
|
// A cancellable future returns an Option which is None when cancelled and
|
||||||
@ -814,7 +887,7 @@ impl HttpClient {
|
|||||||
}
|
}
|
||||||
|
|
||||||
async fn credentials(
|
async fn credentials(
|
||||||
client: Client<HttpsConnector>,
|
client: Client<HttpsConnector<DnsResolver>>,
|
||||||
server: String,
|
server: String,
|
||||||
port: u16,
|
port: u16,
|
||||||
username: Userid,
|
username: Userid,
|
||||||
@ -843,7 +916,7 @@ impl HttpClient {
|
|||||||
|
|
||||||
async fn api_response(response: Response<Body>) -> Result<Value, Error> {
|
async fn api_response(response: Response<Body>) -> Result<Value, Error> {
|
||||||
let status = response.status();
|
let status = response.status();
|
||||||
let data = hyper::body::to_bytes(response.into_body()).await?;
|
let data = HttpBody::collect(response.into_body()).await?.to_bytes();
|
||||||
|
|
||||||
let text = String::from_utf8(data.to_vec()).unwrap();
|
let text = String::from_utf8(data.to_vec()).unwrap();
|
||||||
if status.is_success() {
|
if status.is_success() {
|
||||||
@ -859,7 +932,7 @@ impl HttpClient {
|
|||||||
}
|
}
|
||||||
|
|
||||||
async fn api_request(
|
async fn api_request(
|
||||||
client: Client<HttpsConnector>,
|
client: Client<HttpsConnector<DnsResolver>>,
|
||||||
req: Request<Body>,
|
req: Request<Body>,
|
||||||
) -> Result<Value, Error> {
|
) -> Result<Value, Error> {
|
||||||
Self::api_response(
|
Self::api_response(
|
||||||
@ -935,11 +1008,11 @@ impl Drop for HttpClient {
|
|||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct H2Client {
|
pub struct H2Client {
|
||||||
h2: h2::client::SendRequest<bytes::Bytes>,
|
h2: h2::legacy::client::SendRequest<bytes::Bytes>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl H2Client {
|
impl H2Client {
|
||||||
pub fn new(h2: h2::client::SendRequest<bytes::Bytes>) -> Self {
|
pub fn new(h2: h2::legacy::client::SendRequest<bytes::Bytes>) -> Self {
|
||||||
Self { h2 }
|
Self { h2 }
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1019,7 +1092,7 @@ impl H2Client {
|
|||||||
&self,
|
&self,
|
||||||
request: Request<()>,
|
request: Request<()>,
|
||||||
data: Option<bytes::Bytes>,
|
data: Option<bytes::Bytes>,
|
||||||
) -> impl Future<Output = Result<h2::client::ResponseFuture, Error>> {
|
) -> impl Future<Output = Result<h2::legacy::client::ResponseFuture, Error>> {
|
||||||
self.h2
|
self.h2
|
||||||
.clone()
|
.clone()
|
||||||
.ready()
|
.ready()
|
||||||
@ -1036,7 +1109,9 @@ impl H2Client {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn h2api_response(response: Response<h2::RecvStream>) -> Result<Value, Error> {
|
pub async fn h2api_response(
|
||||||
|
response: Response<h2::legacy::RecvStream>,
|
||||||
|
) -> Result<Value, Error> {
|
||||||
let status = response.status();
|
let status = response.status();
|
||||||
|
|
||||||
let (_head, mut body) = response.into_parts();
|
let (_head, mut body) = response.into_parts();
|
||||||
|
@ -1,13 +1,13 @@
|
|||||||
use std::cmp;
|
use std::cmp;
|
||||||
use std::pin::Pin;
|
use std::pin::Pin;
|
||||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
use std::sync::mpsc;
|
||||||
use std::sync::{mpsc, Arc};
|
|
||||||
use std::task::{Context, Poll};
|
use std::task::{Context, Poll};
|
||||||
|
|
||||||
use anyhow::{anyhow, Error};
|
use anyhow::{anyhow, Error};
|
||||||
use futures::{ready, Stream};
|
use futures::{ready, Stream};
|
||||||
use pin_project_lite::pin_project;
|
use pin_project_lite::pin_project;
|
||||||
|
|
||||||
|
use crate::backup_stats::UploadCounters;
|
||||||
use crate::pxar::create::ReusableDynamicEntry;
|
use crate::pxar::create::ReusableDynamicEntry;
|
||||||
|
|
||||||
pin_project! {
|
pin_project! {
|
||||||
@ -16,7 +16,7 @@ pin_project! {
|
|||||||
input: S,
|
input: S,
|
||||||
next_injection: Option<InjectChunks>,
|
next_injection: Option<InjectChunks>,
|
||||||
injections: Option<mpsc::Receiver<InjectChunks>>,
|
injections: Option<mpsc::Receiver<InjectChunks>>,
|
||||||
stream_len: Arc<AtomicUsize>,
|
counters: UploadCounters,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -42,7 +42,7 @@ pub trait InjectReusedChunks: Sized {
|
|||||||
fn inject_reused_chunks(
|
fn inject_reused_chunks(
|
||||||
self,
|
self,
|
||||||
injections: Option<mpsc::Receiver<InjectChunks>>,
|
injections: Option<mpsc::Receiver<InjectChunks>>,
|
||||||
stream_len: Arc<AtomicUsize>,
|
counters: UploadCounters,
|
||||||
) -> InjectReusedChunksQueue<Self>;
|
) -> InjectReusedChunksQueue<Self>;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -53,13 +53,13 @@ where
|
|||||||
fn inject_reused_chunks(
|
fn inject_reused_chunks(
|
||||||
self,
|
self,
|
||||||
injections: Option<mpsc::Receiver<InjectChunks>>,
|
injections: Option<mpsc::Receiver<InjectChunks>>,
|
||||||
stream_len: Arc<AtomicUsize>,
|
counters: UploadCounters,
|
||||||
) -> InjectReusedChunksQueue<Self> {
|
) -> InjectReusedChunksQueue<Self> {
|
||||||
InjectReusedChunksQueue {
|
InjectReusedChunksQueue {
|
||||||
input: self,
|
input: self,
|
||||||
next_injection: None,
|
next_injection: None,
|
||||||
injections,
|
injections,
|
||||||
stream_len,
|
counters,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -85,7 +85,7 @@ where
|
|||||||
|
|
||||||
if let Some(inject) = this.next_injection.take() {
|
if let Some(inject) = this.next_injection.take() {
|
||||||
// got reusable dynamic entries to inject
|
// got reusable dynamic entries to inject
|
||||||
let offset = this.stream_len.load(Ordering::SeqCst) as u64;
|
let offset = this.counters.total_stream_len() as u64;
|
||||||
|
|
||||||
match inject.boundary.cmp(&offset) {
|
match inject.boundary.cmp(&offset) {
|
||||||
// inject now
|
// inject now
|
||||||
|
@ -9,6 +9,7 @@ pub mod tools;
|
|||||||
|
|
||||||
mod inject_reused_chunks;
|
mod inject_reused_chunks;
|
||||||
mod merge_known_chunks;
|
mod merge_known_chunks;
|
||||||
|
pub use merge_known_chunks::MergedChunkInfo;
|
||||||
pub mod pipe_to_stream;
|
pub mod pipe_to_stream;
|
||||||
|
|
||||||
mod http_client;
|
mod http_client;
|
||||||
@ -41,4 +42,7 @@ pub use backup_specification::*;
|
|||||||
mod chunk_stream;
|
mod chunk_stream;
|
||||||
pub use chunk_stream::{ChunkStream, FixedChunkStream, InjectionData};
|
pub use chunk_stream::{ChunkStream, FixedChunkStream, InjectionData};
|
||||||
|
|
||||||
|
mod backup_stats;
|
||||||
|
pub use backup_stats::BackupStats;
|
||||||
|
|
||||||
pub const PROXMOX_BACKUP_TCP_KEEPALIVE_TIME: u32 = 120;
|
pub const PROXMOX_BACKUP_TCP_KEEPALIVE_TIME: u32 = 120;
|
||||||
|
@ -8,7 +8,7 @@ use std::task::{Context, Poll};
|
|||||||
use anyhow::{format_err, Error};
|
use anyhow::{format_err, Error};
|
||||||
use bytes::Bytes;
|
use bytes::Bytes;
|
||||||
use futures::{ready, Future};
|
use futures::{ready, Future};
|
||||||
use h2::SendStream;
|
use h2::legacy::SendStream;
|
||||||
|
|
||||||
pub struct PipeToSendStream {
|
pub struct PipeToSendStream {
|
||||||
body_tx: SendStream<Bytes>,
|
body_tx: SendStream<Bytes>,
|
||||||
|
@ -27,6 +27,7 @@ use pxar::{EntryKind, Metadata, PxarVariant};
|
|||||||
|
|
||||||
use proxmox_human_byte::HumanByte;
|
use proxmox_human_byte::HumanByte;
|
||||||
use proxmox_io::vec;
|
use proxmox_io::vec;
|
||||||
|
use proxmox_log::{debug, error, info, warn};
|
||||||
use proxmox_sys::fs::{self, acl, xattr};
|
use proxmox_sys::fs::{self, acl, xattr};
|
||||||
|
|
||||||
use pbs_datastore::catalog::BackupCatalogWriter;
|
use pbs_datastore::catalog::BackupCatalogWriter;
|
||||||
@ -62,7 +63,7 @@ pub struct PxarCreateOptions {
|
|||||||
|
|
||||||
pub type MetadataArchiveReader = Arc<dyn ReadAt + Send + Sync + 'static>;
|
pub type MetadataArchiveReader = Arc<dyn ReadAt + Send + Sync + 'static>;
|
||||||
|
|
||||||
/// Statefull information of previous backups snapshots for partial backups
|
/// Stateful information of previous backups snapshots for partial backups
|
||||||
pub struct PxarPrevRef {
|
pub struct PxarPrevRef {
|
||||||
/// Reference accessor for metadata comparison
|
/// Reference accessor for metadata comparison
|
||||||
pub accessor: Accessor<MetadataArchiveReader>,
|
pub accessor: Accessor<MetadataArchiveReader>,
|
||||||
@ -72,7 +73,7 @@ pub struct PxarPrevRef {
|
|||||||
pub archive_name: String,
|
pub archive_name: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
fn detect_fs_type(fd: RawFd) -> Result<i64, Error> {
|
fn detect_fs_type(fd: RawFd) -> Result<i64, Errno> {
|
||||||
let mut fs_stat = std::mem::MaybeUninit::uninit();
|
let mut fs_stat = std::mem::MaybeUninit::uninit();
|
||||||
let res = unsafe { libc::fstatfs(fd, fs_stat.as_mut_ptr()) };
|
let res = unsafe { libc::fstatfs(fd, fs_stat.as_mut_ptr()) };
|
||||||
Errno::result(res)?;
|
Errno::result(res)?;
|
||||||
@ -315,25 +316,25 @@ where
|
|||||||
encoder.close().await?;
|
encoder.close().await?;
|
||||||
|
|
||||||
if metadata_mode {
|
if metadata_mode {
|
||||||
log::info!("Change detection summary:");
|
info!("Change detection summary:");
|
||||||
log::info!(
|
info!(
|
||||||
" - {} total files ({} hardlinks)",
|
" - {} total files ({} hardlinks)",
|
||||||
archiver.reuse_stats.files_reused_count
|
archiver.reuse_stats.files_reused_count
|
||||||
+ archiver.reuse_stats.files_reencoded_count
|
+ archiver.reuse_stats.files_reencoded_count
|
||||||
+ archiver.reuse_stats.files_hardlink_count,
|
+ archiver.reuse_stats.files_hardlink_count,
|
||||||
archiver.reuse_stats.files_hardlink_count,
|
archiver.reuse_stats.files_hardlink_count,
|
||||||
);
|
);
|
||||||
log::info!(
|
info!(
|
||||||
" - {} unchanged, reusable files with {} data",
|
" - {} unchanged, reusable files with {} data",
|
||||||
archiver.reuse_stats.files_reused_count,
|
archiver.reuse_stats.files_reused_count,
|
||||||
HumanByte::from(archiver.reuse_stats.total_reused_payload_size),
|
HumanByte::from(archiver.reuse_stats.total_reused_payload_size),
|
||||||
);
|
);
|
||||||
log::info!(
|
info!(
|
||||||
" - {} changed or non-reusable files with {} data",
|
" - {} changed or non-reusable files with {} data",
|
||||||
archiver.reuse_stats.files_reencoded_count,
|
archiver.reuse_stats.files_reencoded_count,
|
||||||
HumanByte::from(archiver.reuse_stats.total_reencoded_size),
|
HumanByte::from(archiver.reuse_stats.total_reencoded_size),
|
||||||
);
|
);
|
||||||
log::info!(
|
info!(
|
||||||
" - {} padding in {} partially reused chunks",
|
" - {} padding in {} partially reused chunks",
|
||||||
HumanByte::from(
|
HumanByte::from(
|
||||||
archiver.reuse_stats.total_injected_size
|
archiver.reuse_stats.total_injected_size
|
||||||
@ -422,6 +423,7 @@ impl Archiver {
|
|||||||
previous_metadata_accessor: &Option<Directory<MetadataArchiveReader>>,
|
previous_metadata_accessor: &Option<Directory<MetadataArchiveReader>>,
|
||||||
file_name: &Path,
|
file_name: &Path,
|
||||||
metadata: &Metadata,
|
metadata: &Metadata,
|
||||||
|
file_size: u64,
|
||||||
) -> Result<Option<Range<u64>>, Error> {
|
) -> Result<Option<Range<u64>>, Error> {
|
||||||
if let Some(previous_metadata_accessor) = previous_metadata_accessor {
|
if let Some(previous_metadata_accessor) = previous_metadata_accessor {
|
||||||
if let Some(file_entry) = previous_metadata_accessor.lookup(file_name).await? {
|
if let Some(file_entry) = previous_metadata_accessor.lookup(file_name).await? {
|
||||||
@ -432,20 +434,23 @@ impl Archiver {
|
|||||||
..
|
..
|
||||||
} = file_entry.entry().kind()
|
} = file_entry.entry().kind()
|
||||||
{
|
{
|
||||||
|
if file_size != *size {
|
||||||
|
return Ok(None);
|
||||||
|
}
|
||||||
let range =
|
let range =
|
||||||
*offset..*offset + size + size_of::<pxar::format::Header>() as u64;
|
*offset..*offset + size + size_of::<pxar::format::Header>() as u64;
|
||||||
log::debug!(
|
debug!(
|
||||||
"reusable: {file_name:?} at range {range:?} has unchanged metadata."
|
"reusable: {file_name:?} at range {range:?} has unchanged metadata."
|
||||||
);
|
);
|
||||||
return Ok(Some(range));
|
return Ok(Some(range));
|
||||||
}
|
}
|
||||||
log::debug!("reencode: {file_name:?} not a regular file.");
|
debug!("re-encode: {file_name:?} not a regular file.");
|
||||||
return Ok(None);
|
return Ok(None);
|
||||||
}
|
}
|
||||||
log::debug!("reencode: {file_name:?} metadata did not match.");
|
debug!("re-encode: {file_name:?} metadata did not match.");
|
||||||
return Ok(None);
|
return Ok(None);
|
||||||
}
|
}
|
||||||
log::debug!("reencode: {file_name:?} not found in previous archive.");
|
debug!("re-encode: {file_name:?} not found in previous archive.");
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(None)
|
Ok(None)
|
||||||
@ -476,12 +481,16 @@ impl Archiver {
|
|||||||
Ok(fd) => Ok(Some(fd)),
|
Ok(fd) => Ok(Some(fd)),
|
||||||
Err(Errno::ENOENT) => {
|
Err(Errno::ENOENT) => {
|
||||||
if existed {
|
if existed {
|
||||||
self.report_vanished_file()?;
|
self.report_vanished_file();
|
||||||
}
|
}
|
||||||
Ok(None)
|
Ok(None)
|
||||||
}
|
}
|
||||||
Err(Errno::EACCES) => {
|
Err(Errno::EACCES) => {
|
||||||
log::warn!("failed to open file: {:?}: access denied", file_name);
|
warn!("failed to open file: {:?}: access denied", file_name);
|
||||||
|
Ok(None)
|
||||||
|
}
|
||||||
|
Err(Errno::ESTALE) => {
|
||||||
|
self.report_stale_file_handle(None);
|
||||||
Ok(None)
|
Ok(None)
|
||||||
}
|
}
|
||||||
Err(Errno::EPERM) if !noatime.is_empty() => {
|
Err(Errno::EPERM) if !noatime.is_empty() => {
|
||||||
@ -511,10 +520,9 @@ impl Archiver {
|
|||||||
let line = match line {
|
let line = match line {
|
||||||
Ok(line) => line,
|
Ok(line) => line,
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
log::warn!(
|
warn!(
|
||||||
"ignoring .pxarexclude after read error in {:?}: {}",
|
"ignoring .pxarexclude after read error in {:?}: {}",
|
||||||
self.path,
|
self.path, err,
|
||||||
err,
|
|
||||||
);
|
);
|
||||||
self.patterns.truncate(old_pattern_count);
|
self.patterns.truncate(old_pattern_count);
|
||||||
return Ok(());
|
return Ok(());
|
||||||
@ -554,7 +562,7 @@ impl Archiver {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
log::error!("bad pattern in {:?}: {}", self.path, err);
|
error!("bad pattern in {:?}: {}", self.path, err);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -635,18 +643,36 @@ impl Archiver {
|
|||||||
});
|
});
|
||||||
|
|
||||||
match match_result {
|
match match_result {
|
||||||
Ok(Some(MatchType::Exclude)) => continue,
|
Ok(Some(MatchType::Exclude)) => {
|
||||||
|
debug!("matched by exclude pattern '{full_path:?}'");
|
||||||
|
continue;
|
||||||
|
}
|
||||||
Ok(_) => (),
|
Ok(_) => (),
|
||||||
Err(err) if err.not_found() => continue,
|
Err(err) if err.not_found() => continue,
|
||||||
|
Err(Errno::ESTALE) => {
|
||||||
|
self.report_stale_file_handle(Some(&full_path));
|
||||||
|
continue;
|
||||||
|
}
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
return Err(err).with_context(|| format!("stat failed on {full_path:?}"))
|
return Err(err).with_context(|| format!("stat failed on {full_path:?}"))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let stat = stat_results
|
let stat = match stat_results {
|
||||||
.map(Ok)
|
Some(mode) => mode,
|
||||||
.unwrap_or_else(get_file_mode)
|
None => match get_file_mode() {
|
||||||
.with_context(|| format!("stat failed on {full_path:?}"))?;
|
Ok(mode) => mode,
|
||||||
|
Err(Errno::ESTALE) => {
|
||||||
|
self.report_stale_file_handle(Some(&full_path));
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
Err(err) => {
|
||||||
|
return Err(
|
||||||
|
Error::from(err).context(format!("stat failed on {full_path:?}"))
|
||||||
|
)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
self.entry_counter += 1;
|
self.entry_counter += 1;
|
||||||
if self.entry_counter > self.entry_limit {
|
if self.entry_counter > self.entry_limit {
|
||||||
@ -668,25 +694,27 @@ impl Archiver {
|
|||||||
Ok(file_list)
|
Ok(file_list)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn report_vanished_file(&mut self) -> Result<(), Error> {
|
fn report_stale_file_handle(&self, path: Option<&PathBuf>) {
|
||||||
log::warn!("warning: file vanished while reading: {:?}", self.path);
|
let path = path.unwrap_or(&self.path);
|
||||||
Ok(())
|
warn!("warning: stale file handle encountered while reading: {path:?}");
|
||||||
}
|
}
|
||||||
|
|
||||||
fn report_file_shrunk_while_reading(&mut self) -> Result<(), Error> {
|
fn report_vanished_file(&self) {
|
||||||
log::warn!(
|
warn!("warning: file vanished while reading: {:?}", self.path);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn report_file_shrunk_while_reading(&self) {
|
||||||
|
warn!(
|
||||||
"warning: file size shrunk while reading: {:?}, file will be padded with zeros!",
|
"warning: file size shrunk while reading: {:?}, file will be padded with zeros!",
|
||||||
self.path,
|
self.path,
|
||||||
);
|
);
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn report_file_grew_while_reading(&mut self) -> Result<(), Error> {
|
fn report_file_grew_while_reading(&self) {
|
||||||
log::warn!(
|
warn!(
|
||||||
"warning: file size increased while reading: {:?}, file will be truncated!",
|
"warning: file size increased while reading: {:?}, file will be truncated!",
|
||||||
self.path,
|
self.path,
|
||||||
);
|
);
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn add_entry<T: SeqWrite + Send>(
|
async fn add_entry<T: SeqWrite + Send>(
|
||||||
@ -716,23 +744,23 @@ impl Archiver {
|
|||||||
None => return Ok(()),
|
None => return Ok(()),
|
||||||
};
|
};
|
||||||
|
|
||||||
let match_path = PathBuf::from("/").join(self.path.clone());
|
let metadata = match get_metadata(
|
||||||
if self
|
|
||||||
.patterns
|
|
||||||
.matches(match_path.as_os_str().as_bytes(), stat.st_mode)?
|
|
||||||
== Some(MatchType::Exclude)
|
|
||||||
{
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
|
|
||||||
let metadata = get_metadata(
|
|
||||||
fd.as_raw_fd(),
|
fd.as_raw_fd(),
|
||||||
stat,
|
stat,
|
||||||
self.flags(),
|
self.flags(),
|
||||||
self.fs_magic,
|
self.fs_magic,
|
||||||
&mut self.fs_feature_flags,
|
&mut self.fs_feature_flags,
|
||||||
self.skip_e2big_xattr,
|
self.skip_e2big_xattr,
|
||||||
)?;
|
) {
|
||||||
|
Ok(metadata) => metadata,
|
||||||
|
Err(err) => {
|
||||||
|
if let Some(Errno::ESTALE) = err.downcast_ref::<Errno>() {
|
||||||
|
self.report_stale_file_handle(None);
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
return Err(err);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
if self.previous_payload_index.is_none() {
|
if self.previous_payload_index.is_none() {
|
||||||
return self
|
return self
|
||||||
@ -742,7 +770,7 @@ impl Archiver {
|
|||||||
|
|
||||||
// Avoid having to many open file handles in cached entries
|
// Avoid having to many open file handles in cached entries
|
||||||
if self.cache.is_full() {
|
if self.cache.is_full() {
|
||||||
log::debug!("Max cache size reached, reuse cached entries");
|
debug!("Max cache size reached, reuse cached entries");
|
||||||
self.flush_cached_reusing_if_below_threshold(encoder, true)
|
self.flush_cached_reusing_if_below_threshold(encoder, true)
|
||||||
.await?;
|
.await?;
|
||||||
}
|
}
|
||||||
@ -774,12 +802,13 @@ impl Archiver {
|
|||||||
}
|
}
|
||||||
|
|
||||||
let file_name: &Path = OsStr::from_bytes(c_file_name.to_bytes()).as_ref();
|
let file_name: &Path = OsStr::from_bytes(c_file_name.to_bytes()).as_ref();
|
||||||
|
let file_size = stat.st_size as u64;
|
||||||
if let Some(payload_range) = self
|
if let Some(payload_range) = self
|
||||||
.is_reusable_entry(previous_metadata, file_name, &metadata)
|
.is_reusable_entry(previous_metadata, file_name, &metadata, file_size)
|
||||||
.await?
|
.await?
|
||||||
{
|
{
|
||||||
if !self.cache.try_extend_range(payload_range.clone()) {
|
if !self.cache.try_extend_range(payload_range.clone()) {
|
||||||
log::debug!("Cache range has hole, new range: {payload_range:?}");
|
debug!("Cache range has hole, new range: {payload_range:?}");
|
||||||
self.flush_cached_reusing_if_below_threshold(encoder, true)
|
self.flush_cached_reusing_if_below_threshold(encoder, true)
|
||||||
.await?;
|
.await?;
|
||||||
// range has to be set after flushing of cached entries, which resets the range
|
// range has to be set after flushing of cached entries, which resets the range
|
||||||
@ -790,7 +819,7 @@ impl Archiver {
|
|||||||
// actual chunks, which needs to be added before encoding the payload reference
|
// actual chunks, which needs to be added before encoding the payload reference
|
||||||
let offset =
|
let offset =
|
||||||
PayloadOffset::default().add(payload_range.start - self.cache.range().start);
|
PayloadOffset::default().add(payload_range.start - self.cache.range().start);
|
||||||
log::debug!("Offset relative to range start: {offset:?}");
|
debug!("Offset relative to range start: {offset:?}");
|
||||||
|
|
||||||
self.cache.insert(
|
self.cache.insert(
|
||||||
fd,
|
fd,
|
||||||
@ -842,6 +871,7 @@ impl Archiver {
|
|||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[allow(clippy::too_many_arguments)]
|
||||||
async fn add_entry_to_archive<T: SeqWrite + Send>(
|
async fn add_entry_to_archive<T: SeqWrite + Send>(
|
||||||
&mut self,
|
&mut self,
|
||||||
encoder: &mut Encoder<'_, T>,
|
encoder: &mut Encoder<'_, T>,
|
||||||
@ -994,7 +1024,7 @@ impl Archiver {
|
|||||||
// do not reuse chunks if introduced padding higher than threshold
|
// do not reuse chunks if introduced padding higher than threshold
|
||||||
// opt for re-encoding in that case
|
// opt for re-encoding in that case
|
||||||
if ratio > CHUNK_PADDING_THRESHOLD {
|
if ratio > CHUNK_PADDING_THRESHOLD {
|
||||||
log::debug!(
|
debug!(
|
||||||
"Padding ratio: {ratio} > {CHUNK_PADDING_THRESHOLD}, padding: {}, total {}, chunks: {}",
|
"Padding ratio: {ratio} > {CHUNK_PADDING_THRESHOLD}, padding: {}, total {}, chunks: {}",
|
||||||
HumanByte::from(padding),
|
HumanByte::from(padding),
|
||||||
HumanByte::from(total_size),
|
HumanByte::from(total_size),
|
||||||
@ -1003,7 +1033,7 @@ impl Archiver {
|
|||||||
self.cache.update_last_chunk(prev_last_chunk);
|
self.cache.update_last_chunk(prev_last_chunk);
|
||||||
self.encode_entries_to_archive(encoder, None).await?;
|
self.encode_entries_to_archive(encoder, None).await?;
|
||||||
} else {
|
} else {
|
||||||
log::debug!(
|
debug!(
|
||||||
"Padding ratio: {ratio} < {CHUNK_PADDING_THRESHOLD}, padding: {}, total {}, chunks: {}",
|
"Padding ratio: {ratio} < {CHUNK_PADDING_THRESHOLD}, padding: {}, total {}, chunks: {}",
|
||||||
HumanByte::from(padding),
|
HumanByte::from(padding),
|
||||||
HumanByte::from(total_size),
|
HumanByte::from(total_size),
|
||||||
@ -1039,7 +1069,7 @@ impl Archiver {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Take ownership of cached entries and encode them to the archive
|
// Take ownership of cached entries and encode them to the archive
|
||||||
// Encode with reused payload chunks when base offset is some, reencode otherwise
|
// Encode with reused payload chunks when base offset is some, re-encode otherwise
|
||||||
async fn encode_entries_to_archive<T: SeqWrite + Send>(
|
async fn encode_entries_to_archive<T: SeqWrite + Send>(
|
||||||
&mut self,
|
&mut self,
|
||||||
encoder: &mut Encoder<'_, T>,
|
encoder: &mut Encoder<'_, T>,
|
||||||
@ -1054,7 +1084,7 @@ impl Archiver {
|
|||||||
let (entries, start_path) = self.cache.take_and_reset();
|
let (entries, start_path) = self.cache.take_and_reset();
|
||||||
let old_path = self.path.clone();
|
let old_path = self.path.clone();
|
||||||
self.path = start_path;
|
self.path = start_path;
|
||||||
log::debug!(
|
debug!(
|
||||||
"Got {} cache entries to encode: reuse is {}",
|
"Got {} cache entries to encode: reuse is {}",
|
||||||
entries.len(),
|
entries.len(),
|
||||||
base_offset.is_some()
|
base_offset.is_some()
|
||||||
@ -1123,7 +1153,7 @@ impl Archiver {
|
|||||||
let mut size = PayloadOffset::default();
|
let mut size = PayloadOffset::default();
|
||||||
|
|
||||||
for chunk in chunks.iter() {
|
for chunk in chunks.iter() {
|
||||||
log::debug!(
|
debug!(
|
||||||
"Injecting chunk with {} padding (chunk size {})",
|
"Injecting chunk with {} padding (chunk size {})",
|
||||||
HumanByte::from(chunk.padding),
|
HumanByte::from(chunk.padding),
|
||||||
HumanByte::from(chunk.size()),
|
HumanByte::from(chunk.size()),
|
||||||
@ -1151,7 +1181,7 @@ impl Archiver {
|
|||||||
};
|
};
|
||||||
|
|
||||||
injection_boundary = injection_boundary.add(size.raw());
|
injection_boundary = injection_boundary.add(size.raw());
|
||||||
log::debug!("Advance payload position by: {size:?}");
|
debug!("Advance payload position by: {size:?}");
|
||||||
encoder.advance(size)?;
|
encoder.advance(size)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1169,20 +1199,20 @@ impl Archiver {
|
|||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
let dir_name = OsStr::from_bytes(c_dir_name.to_bytes());
|
let dir_name = OsStr::from_bytes(c_dir_name.to_bytes());
|
||||||
|
|
||||||
if !self.cache.caching_enabled() {
|
|
||||||
if let Some(ref catalog) = self.catalog {
|
|
||||||
catalog.lock().unwrap().start_directory(c_dir_name)?;
|
|
||||||
}
|
|
||||||
encoder.create_directory(dir_name, metadata).await?;
|
|
||||||
}
|
|
||||||
|
|
||||||
let old_fs_magic = self.fs_magic;
|
let old_fs_magic = self.fs_magic;
|
||||||
let old_fs_feature_flags = self.fs_feature_flags;
|
let old_fs_feature_flags = self.fs_feature_flags;
|
||||||
let old_st_dev = self.current_st_dev;
|
let old_st_dev = self.current_st_dev;
|
||||||
|
|
||||||
let mut skip_contents = false;
|
let mut skip_contents = false;
|
||||||
if old_st_dev != stat.st_dev {
|
if old_st_dev != stat.st_dev {
|
||||||
self.fs_magic = detect_fs_type(dir.as_raw_fd())?;
|
match detect_fs_type(dir.as_raw_fd()) {
|
||||||
|
Ok(fs_magic) => self.fs_magic = fs_magic,
|
||||||
|
Err(Errno::ESTALE) => {
|
||||||
|
self.report_stale_file_handle(None);
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
Err(err) => return Err(err.into()),
|
||||||
|
}
|
||||||
self.fs_feature_flags = Flags::from_magic(self.fs_magic);
|
self.fs_feature_flags = Flags::from_magic(self.fs_magic);
|
||||||
self.current_st_dev = stat.st_dev;
|
self.current_st_dev = stat.st_dev;
|
||||||
|
|
||||||
@ -1193,8 +1223,15 @@ impl Archiver {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if !self.cache.caching_enabled() {
|
||||||
|
if let Some(ref catalog) = self.catalog {
|
||||||
|
catalog.lock().unwrap().start_directory(c_dir_name)?;
|
||||||
|
}
|
||||||
|
encoder.create_directory(dir_name, metadata).await?;
|
||||||
|
}
|
||||||
|
|
||||||
let result = if skip_contents {
|
let result = if skip_contents {
|
||||||
log::info!("skipping mount point: {:?}", self.path);
|
info!("skipping mount point: {:?}", self.path);
|
||||||
Ok(())
|
Ok(())
|
||||||
} else {
|
} else {
|
||||||
let mut dir_accessor = None;
|
let mut dir_accessor = None;
|
||||||
@ -1245,14 +1282,14 @@ impl Archiver {
|
|||||||
Err(err) => bail!(err),
|
Err(err) => bail!(err),
|
||||||
};
|
};
|
||||||
if got as u64 > remaining {
|
if got as u64 > remaining {
|
||||||
self.report_file_grew_while_reading()?;
|
self.report_file_grew_while_reading();
|
||||||
got = remaining as usize;
|
got = remaining as usize;
|
||||||
}
|
}
|
||||||
out.write_all(&self.file_copy_buffer[..got]).await?;
|
out.write_all(&self.file_copy_buffer[..got]).await?;
|
||||||
remaining -= got as u64;
|
remaining -= got as u64;
|
||||||
}
|
}
|
||||||
if remaining > 0 {
|
if remaining > 0 {
|
||||||
self.report_file_shrunk_while_reading()?;
|
self.report_file_shrunk_while_reading();
|
||||||
let to_zero = remaining.min(self.file_copy_buffer.len() as u64) as usize;
|
let to_zero = remaining.min(self.file_copy_buffer.len() as u64) as usize;
|
||||||
vec::clear(&mut self.file_copy_buffer[..to_zero]);
|
vec::clear(&mut self.file_copy_buffer[..to_zero]);
|
||||||
while remaining != 0 {
|
while remaining != 0 {
|
||||||
@ -1272,7 +1309,14 @@ impl Archiver {
|
|||||||
file_name: &Path,
|
file_name: &Path,
|
||||||
metadata: &Metadata,
|
metadata: &Metadata,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
let dest = nix::fcntl::readlinkat(fd.as_raw_fd(), &b""[..])?;
|
let dest = match nix::fcntl::readlinkat(fd.as_raw_fd(), &b""[..]) {
|
||||||
|
Ok(dest) => dest,
|
||||||
|
Err(Errno::ESTALE) => {
|
||||||
|
self.report_stale_file_handle(None);
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
Err(err) => return Err(err.into()),
|
||||||
|
};
|
||||||
encoder.add_symlink(metadata, file_name, dest).await?;
|
encoder.add_symlink(metadata, file_name, dest).await?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -57,7 +57,7 @@ impl PxarDir {
|
|||||||
let dir = Dir::openat(
|
let dir = Dir::openat(
|
||||||
parent,
|
parent,
|
||||||
self.file_name.as_os_str(),
|
self.file_name.as_os_str(),
|
||||||
OFlag::O_DIRECTORY,
|
OFlag::O_DIRECTORY | OFlag::O_CLOEXEC,
|
||||||
Mode::empty(),
|
Mode::empty(),
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
|