mirror of
https://git.proxmox.com/git/proxmox-backup
synced 2025-05-03 08:30:17 +00:00
Compare commits
128 Commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
58fb448be5 | ||
![]() |
07a21616c2 | ||
![]() |
cb9814e331 | ||
![]() |
31dbaf69ab | ||
![]() |
af5ff86a26 | ||
![]() |
5fc281cd89 | ||
![]() |
6c6257b94e | ||
![]() |
c644f7bc85 | ||
![]() |
4a022e1a3f | ||
![]() |
9247d57fdf | ||
![]() |
427c687e35 | ||
![]() |
f9532a3a84 | ||
![]() |
d400673641 | ||
![]() |
cdc710a736 | ||
![]() |
36ef1b01f7 | ||
![]() |
f91d5912f1 | ||
![]() |
c08c934c02 | ||
![]() |
9dfd0657eb | ||
![]() |
d39f1a4b57 | ||
![]() |
83e7b9de88 | ||
![]() |
601a84ae74 | ||
![]() |
152dc37057 | ||
![]() |
e98e962904 | ||
![]() |
f117dabcf0 | ||
![]() |
6d193b9a1e | ||
![]() |
d25ec96c21 | ||
![]() |
839b7d8c89 | ||
![]() |
f7f61002ee | ||
![]() |
266becd156 | ||
![]() |
37a85cf616 | ||
![]() |
8a056670ea | ||
![]() |
a7a28c4d95 | ||
![]() |
254169f622 | ||
![]() |
33024ffd43 | ||
![]() |
dfc0278248 | ||
![]() |
8e50c75fca | ||
![]() |
98abd76579 | ||
![]() |
bd95fd5797 | ||
![]() |
bccff939fa | ||
![]() |
a3815aff82 | ||
![]() |
d1fd12d82d | ||
![]() |
5e778d983a | ||
![]() |
4c0583b14e | ||
![]() |
dc914094c9 | ||
![]() |
6c774660a7 | ||
![]() |
6df6d3094c | ||
![]() |
f1a711c830 | ||
![]() |
3f1e103904 | ||
![]() |
f9270de9ef | ||
![]() |
40ccd1ac9e | ||
![]() |
ab5b64fadf | ||
![]() |
713fa6ee55 | ||
![]() |
f41a233a8e | ||
![]() |
6f9c16d5d4 | ||
![]() |
d93d7a8299 | ||
![]() |
17f183c40b | ||
![]() |
d977da6411 | ||
![]() |
960149b51e | ||
![]() |
074d957169 | ||
![]() |
8529e79983 | ||
![]() |
5b0c6a80e5 | ||
![]() |
029654a61d | ||
![]() |
a738d2bcc9 | ||
![]() |
234de23a50 | ||
![]() |
bf708e8cd7 | ||
![]() |
3ba907c888 | ||
![]() |
b5ba40095d | ||
![]() |
daa9d0a9d5 | ||
![]() |
c6a87e340c | ||
![]() |
bb8e7e2b48 | ||
![]() |
b18eab64a9 | ||
![]() |
8f6874391f | ||
![]() |
b48427720a | ||
![]() |
2084fd39c4 | ||
![]() |
d4a2730b1b | ||
![]() |
b0cd9e84f5 | ||
![]() |
912c8c4027 | ||
![]() |
263651912e | ||
![]() |
4b26fb2bd7 | ||
![]() |
70e1ad0efb | ||
![]() |
d49a27ede8 | ||
![]() |
f09f2e0d9e | ||
![]() |
d728c2e836 | ||
![]() |
7fbe029ceb | ||
![]() |
907ba4dd61 | ||
![]() |
7e15e6039b | ||
![]() |
03143eee0a | ||
![]() |
74361da855 | ||
![]() |
c9bd214555 | ||
![]() |
0b016e1efe | ||
![]() |
8d9dc69945 | ||
![]() |
3fdf8769f4 | ||
![]() |
320ea1cdb7 | ||
![]() |
13b15bce11 | ||
![]() |
ed8205e535 | ||
![]() |
32b5716fa4 | ||
![]() |
d1c96f69ee | ||
![]() |
8210a32613 | ||
![]() |
f2115b04c1 | ||
![]() |
1599b424cd | ||
![]() |
1b9e3cfd18 | ||
![]() |
940d34b42a | ||
![]() |
33d2444eca | ||
![]() |
7a3cbd7230 | ||
![]() |
b60912c65d | ||
![]() |
23be00a42c | ||
![]() |
04e50855b3 | ||
![]() |
52e5d52cbd | ||
![]() |
27dd73777f | ||
![]() |
e2c1866b13 | ||
![]() |
27ba2c0318 | ||
![]() |
b510184e72 | ||
![]() |
79e9eddf4b | ||
![]() |
24a6d4fd82 | ||
![]() |
b693f5d471 | ||
![]() |
3362a6e049 | ||
![]() |
7c45cf8c7a | ||
![]() |
d99c481596 | ||
![]() |
f74978572b | ||
![]() |
bb408fd151 | ||
![]() |
54763b39c7 | ||
![]() |
f1dd1e3557 | ||
![]() |
f314078a8d | ||
![]() |
7085d270d4 | ||
![]() |
6565199af4 | ||
![]() |
168ed37026 | ||
![]() |
2c9f3a63d5 | ||
![]() |
eba172a492 |
22
Cargo.toml
22
Cargo.toml
@ -1,5 +1,5 @@
|
|||||||
[workspace.package]
|
[workspace.package]
|
||||||
version = "3.3.4"
|
version = "3.4.1"
|
||||||
authors = [
|
authors = [
|
||||||
"Dietmar Maurer <dietmar@proxmox.com>",
|
"Dietmar Maurer <dietmar@proxmox.com>",
|
||||||
"Dominik Csapak <d.csapak@proxmox.com>",
|
"Dominik Csapak <d.csapak@proxmox.com>",
|
||||||
@ -13,7 +13,7 @@ authors = [
|
|||||||
edition = "2021"
|
edition = "2021"
|
||||||
license = "AGPL-3"
|
license = "AGPL-3"
|
||||||
repository = "https://git.proxmox.com/?p=proxmox-backup.git"
|
repository = "https://git.proxmox.com/?p=proxmox-backup.git"
|
||||||
rust-version = "1.80"
|
rust-version = "1.81"
|
||||||
|
|
||||||
[package]
|
[package]
|
||||||
name = "proxmox-backup"
|
name = "proxmox-backup"
|
||||||
@ -62,7 +62,7 @@ proxmox-compression = "0.2"
|
|||||||
proxmox-config-digest = "0.1.0"
|
proxmox-config-digest = "0.1.0"
|
||||||
proxmox-daemon = "0.1.0"
|
proxmox-daemon = "0.1.0"
|
||||||
proxmox-fuse = "0.1.3"
|
proxmox-fuse = "0.1.3"
|
||||||
proxmox-http = { version = "0.9.0", features = [ "client", "http-helpers", "websocket" ] } # see below
|
proxmox-http = { version = "0.9.5", features = [ "client", "http-helpers", "websocket" ] } # see below
|
||||||
proxmox-human-byte = "0.1"
|
proxmox-human-byte = "0.1"
|
||||||
proxmox-io = "1.0.1" # tools and client use "tokio" feature
|
proxmox-io = "1.0.1" # tools and client use "tokio" feature
|
||||||
proxmox-lang = "1.1"
|
proxmox-lang = "1.1"
|
||||||
@ -71,7 +71,7 @@ proxmox-ldap = "0.2.1"
|
|||||||
proxmox-metrics = "0.3.1"
|
proxmox-metrics = "0.3.1"
|
||||||
proxmox-notify = "0.5.1"
|
proxmox-notify = "0.5.1"
|
||||||
proxmox-openid = "0.10.0"
|
proxmox-openid = "0.10.0"
|
||||||
proxmox-rest-server = { version = "0.8.5", features = [ "templates" ] }
|
proxmox-rest-server = { version = "0.8.9", features = [ "templates" ] }
|
||||||
# some use "cli", some use "cli" and "server", pbs-config uses nothing
|
# some use "cli", some use "cli" and "server", pbs-config uses nothing
|
||||||
proxmox-router = { version = "3.0.0", default-features = false }
|
proxmox-router = { version = "3.0.0", default-features = false }
|
||||||
proxmox-rrd = "0.4"
|
proxmox-rrd = "0.4"
|
||||||
@ -84,13 +84,13 @@ proxmox-shared-cache = "0.1"
|
|||||||
proxmox-shared-memory = "0.3.0"
|
proxmox-shared-memory = "0.3.0"
|
||||||
proxmox-sortable-macro = "0.1.2"
|
proxmox-sortable-macro = "0.1.2"
|
||||||
proxmox-subscription = { version = "0.5.0", features = [ "api-types" ] }
|
proxmox-subscription = { version = "0.5.0", features = [ "api-types" ] }
|
||||||
proxmox-sys = "0.6.5"
|
proxmox-sys = "0.6.7"
|
||||||
proxmox-systemd = "0.1"
|
proxmox-systemd = "0.1"
|
||||||
proxmox-tfa = { version = "5", features = [ "api", "api-types" ] }
|
proxmox-tfa = { version = "5", features = [ "api", "api-types" ] }
|
||||||
proxmox-time = "2"
|
proxmox-time = "2"
|
||||||
proxmox-uuid = "1"
|
proxmox-uuid = { version = "1", features = [ "serde" ] }
|
||||||
proxmox-worker-task = "0.1"
|
proxmox-worker-task = "0.1"
|
||||||
pbs-api-types = "0.2.0"
|
pbs-api-types = "0.2.2"
|
||||||
|
|
||||||
# other proxmox crates
|
# other proxmox crates
|
||||||
pathpatterns = "0.3"
|
pathpatterns = "0.3"
|
||||||
@ -120,14 +120,15 @@ crc32fast = "1"
|
|||||||
const_format = "0.2"
|
const_format = "0.2"
|
||||||
crossbeam-channel = "0.5"
|
crossbeam-channel = "0.5"
|
||||||
endian_trait = { version = "0.6", features = ["arrays"] }
|
endian_trait = { version = "0.6", features = ["arrays"] }
|
||||||
env_logger = "0.10"
|
env_logger = "0.11"
|
||||||
flate2 = "1.0"
|
flate2 = "1.0"
|
||||||
foreign-types = "0.3"
|
foreign-types = "0.3"
|
||||||
futures = "0.3"
|
futures = "0.3"
|
||||||
h2 = { version = "0.4", features = [ "stream" ] }
|
h2 = { version = "0.4", features = [ "legacy", "stream" ] }
|
||||||
handlebars = "3.0"
|
handlebars = "3.0"
|
||||||
hex = "0.4.3"
|
hex = "0.4.3"
|
||||||
hyper = { version = "0.14", features = [ "full" ] }
|
hickory-resolver = { version = "0.24.1", default-features = false, features = [ "system-config", "tokio-runtime" ] }
|
||||||
|
hyper = { version = "0.14", features = [ "backports", "deprecated", "full" ] }
|
||||||
libc = "0.2"
|
libc = "0.2"
|
||||||
log = "0.4.17"
|
log = "0.4.17"
|
||||||
nix = "0.26.1"
|
nix = "0.26.1"
|
||||||
@ -141,7 +142,6 @@ regex = "1.5.5"
|
|||||||
rustyline = "9"
|
rustyline = "9"
|
||||||
serde = { version = "1.0", features = ["derive"] }
|
serde = { version = "1.0", features = ["derive"] }
|
||||||
serde_json = "1.0"
|
serde_json = "1.0"
|
||||||
serde_plain = "1"
|
|
||||||
siphasher = "0.3"
|
siphasher = "0.3"
|
||||||
syslog = "6"
|
syslog = "6"
|
||||||
tar = "0.4"
|
tar = "0.4"
|
||||||
|
42
Makefile
42
Makefile
@ -1,8 +1,10 @@
|
|||||||
include /usr/share/dpkg/default.mk
|
include /usr/share/dpkg/default.mk
|
||||||
|
include /usr/share/rustc/architecture.mk
|
||||||
include defines.mk
|
include defines.mk
|
||||||
|
|
||||||
PACKAGE := proxmox-backup
|
PACKAGE := proxmox-backup
|
||||||
ARCH := $(DEB_BUILD_ARCH)
|
ARCH := $(DEB_BUILD_ARCH)
|
||||||
|
export DEB_HOST_RUST_TYPE
|
||||||
|
|
||||||
SUBDIRS := etc www docs templates
|
SUBDIRS := etc www docs templates
|
||||||
|
|
||||||
@ -36,13 +38,20 @@ SUBCRATES != cargo metadata --no-deps --format-version=1 \
|
|||||||
| grep "$$PWD/" \
|
| grep "$$PWD/" \
|
||||||
| sed -e "s!.*$$PWD/!!g" -e 's/\#.*$$//g' -e 's/)$$//g'
|
| sed -e "s!.*$$PWD/!!g" -e 's/\#.*$$//g' -e 's/)$$//g'
|
||||||
|
|
||||||
|
STATIC_TARGET_DIR := target/static-build
|
||||||
|
|
||||||
ifeq ($(BUILD_MODE), release)
|
ifeq ($(BUILD_MODE), release)
|
||||||
CARGO_BUILD_ARGS += --release
|
CARGO_BUILD_ARGS += --release --target $(DEB_HOST_RUST_TYPE)
|
||||||
COMPILEDIR := target/$(DEB_HOST_RUST_TYPE)/release
|
COMPILEDIR := target/$(DEB_HOST_RUST_TYPE)/release
|
||||||
|
STATIC_COMPILEDIR := $(STATIC_TARGET_DIR)/$(DEB_HOST_RUST_TYPE)/release
|
||||||
else
|
else
|
||||||
|
CARGO_BUILD_ARGS += --target $(DEB_HOST_RUST_TYPE)
|
||||||
COMPILEDIR := target/$(DEB_HOST_RUST_TYPE)/debug
|
COMPILEDIR := target/$(DEB_HOST_RUST_TYPE)/debug
|
||||||
|
STATIC_COMPILEDIR := $(STATIC_TARGET_DIR)/$(DEB_HOST_RUST_TYPE)/debug
|
||||||
endif
|
endif
|
||||||
|
|
||||||
|
STATIC_RUSTC_FLAGS := -C target-feature=+crt-static -L $(STATIC_COMPILEDIR)/deps-stubs/
|
||||||
|
|
||||||
ifeq ($(valgrind), yes)
|
ifeq ($(valgrind), yes)
|
||||||
CARGO_BUILD_ARGS += --features valgrind
|
CARGO_BUILD_ARGS += --features valgrind
|
||||||
endif
|
endif
|
||||||
@ -52,6 +61,9 @@ CARGO ?= cargo
|
|||||||
COMPILED_BINS := \
|
COMPILED_BINS := \
|
||||||
$(addprefix $(COMPILEDIR)/,$(USR_BIN) $(USR_SBIN) $(SERVICE_BIN) $(RESTORE_BIN))
|
$(addprefix $(COMPILEDIR)/,$(USR_BIN) $(USR_SBIN) $(SERVICE_BIN) $(RESTORE_BIN))
|
||||||
|
|
||||||
|
STATIC_BINS := \
|
||||||
|
$(addprefix $(STATIC_COMPILEDIR)/,proxmox-backup-client-static pxar-static)
|
||||||
|
|
||||||
export DEB_VERSION DEB_VERSION_UPSTREAM
|
export DEB_VERSION DEB_VERSION_UPSTREAM
|
||||||
|
|
||||||
SERVER_DEB=$(PACKAGE)-server_$(DEB_VERSION)_$(ARCH).deb
|
SERVER_DEB=$(PACKAGE)-server_$(DEB_VERSION)_$(ARCH).deb
|
||||||
@ -60,10 +72,12 @@ CLIENT_DEB=$(PACKAGE)-client_$(DEB_VERSION)_$(ARCH).deb
|
|||||||
CLIENT_DBG_DEB=$(PACKAGE)-client-dbgsym_$(DEB_VERSION)_$(ARCH).deb
|
CLIENT_DBG_DEB=$(PACKAGE)-client-dbgsym_$(DEB_VERSION)_$(ARCH).deb
|
||||||
RESTORE_DEB=proxmox-backup-file-restore_$(DEB_VERSION)_$(ARCH).deb
|
RESTORE_DEB=proxmox-backup-file-restore_$(DEB_VERSION)_$(ARCH).deb
|
||||||
RESTORE_DBG_DEB=proxmox-backup-file-restore-dbgsym_$(DEB_VERSION)_$(ARCH).deb
|
RESTORE_DBG_DEB=proxmox-backup-file-restore-dbgsym_$(DEB_VERSION)_$(ARCH).deb
|
||||||
|
STATIC_CLIENT_DEB=$(PACKAGE)-client-static_$(DEB_VERSION)_$(ARCH).deb
|
||||||
|
STATIC_CLIENT_DBG_DEB=$(PACKAGE)-client-static-dbgsym_$(DEB_VERSION)_$(ARCH).deb
|
||||||
DOC_DEB=$(PACKAGE)-docs_$(DEB_VERSION)_all.deb
|
DOC_DEB=$(PACKAGE)-docs_$(DEB_VERSION)_all.deb
|
||||||
|
|
||||||
DEBS=$(SERVER_DEB) $(SERVER_DBG_DEB) $(CLIENT_DEB) $(CLIENT_DBG_DEB) \
|
DEBS=$(SERVER_DEB) $(SERVER_DBG_DEB) $(CLIENT_DEB) $(CLIENT_DBG_DEB) \
|
||||||
$(RESTORE_DEB) $(RESTORE_DBG_DEB)
|
$(RESTORE_DEB) $(RESTORE_DBG_DEB) $(STATIC_CLIENT_DEB) $(STATIC_CLIENT_DBG_DEB)
|
||||||
|
|
||||||
DSC = rust-$(PACKAGE)_$(DEB_VERSION).dsc
|
DSC = rust-$(PACKAGE)_$(DEB_VERSION).dsc
|
||||||
|
|
||||||
@ -71,7 +85,7 @@ DESTDIR=
|
|||||||
|
|
||||||
tests ?= --workspace
|
tests ?= --workspace
|
||||||
|
|
||||||
all: $(SUBDIRS)
|
all: proxmox-backup-client-static $(SUBDIRS)
|
||||||
|
|
||||||
.PHONY: $(SUBDIRS)
|
.PHONY: $(SUBDIRS)
|
||||||
$(SUBDIRS):
|
$(SUBDIRS):
|
||||||
@ -141,7 +155,7 @@ clean: clean-deb
|
|||||||
$(foreach i,$(SUBDIRS), \
|
$(foreach i,$(SUBDIRS), \
|
||||||
$(MAKE) -C $(i) clean ;)
|
$(MAKE) -C $(i) clean ;)
|
||||||
$(CARGO) clean
|
$(CARGO) clean
|
||||||
rm -f .do-cargo-build
|
rm -f .do-cargo-build .do-static-cargo-build
|
||||||
|
|
||||||
# allows one to avoid running cargo clean when one just wants to tidy up after a package build
|
# allows one to avoid running cargo clean when one just wants to tidy up after a package build
|
||||||
clean-deb:
|
clean-deb:
|
||||||
@ -190,12 +204,25 @@ $(COMPILED_BINS) $(COMPILEDIR)/dump-catalog-shell-cli $(COMPILEDIR)/docgen: .do-
|
|||||||
--bin sg-tape-cmd
|
--bin sg-tape-cmd
|
||||||
touch "$@"
|
touch "$@"
|
||||||
|
|
||||||
|
.PHONY: proxmox-backup-client-static
|
||||||
|
proxmox-backup-client-static:
|
||||||
|
rm -f .do-static-cargo-build
|
||||||
|
$(MAKE) $(STATIC_BINS)
|
||||||
|
|
||||||
|
$(STATIC_BINS): .do-static-cargo-build
|
||||||
|
.do-static-cargo-build:
|
||||||
|
mkdir -p $(STATIC_COMPILEDIR)/deps-stubs/ && \
|
||||||
|
echo '!<arch>' > $(STATIC_COMPILEDIR)/deps-stubs/libsystemd.a # workaround for to greedy linkage and proxmox-systemd
|
||||||
|
$(CARGO) rustc $(CARGO_BUILD_ARGS) --package pxar-bin --bin pxar \
|
||||||
|
--target-dir $(STATIC_TARGET_DIR) -- $(STATIC_RUSTC_FLAGS)
|
||||||
|
$(CARGO) rustc $(CARGO_BUILD_ARGS) --package proxmox-backup-client --bin proxmox-backup-client \
|
||||||
|
--target-dir $(STATIC_TARGET_DIR) -- $(STATIC_RUSTC_FLAGS)
|
||||||
|
|
||||||
.PHONY: lint
|
.PHONY: lint
|
||||||
lint:
|
lint:
|
||||||
cargo clippy -- -A clippy::all -D clippy::correctness
|
cargo clippy -- -A clippy::all -D clippy::correctness
|
||||||
|
|
||||||
install: $(COMPILED_BINS)
|
install: $(COMPILED_BINS) $(STATIC_BINS)
|
||||||
install -dm755 $(DESTDIR)$(BINDIR)
|
install -dm755 $(DESTDIR)$(BINDIR)
|
||||||
install -dm755 $(DESTDIR)$(ZSH_COMPL_DEST)
|
install -dm755 $(DESTDIR)$(ZSH_COMPL_DEST)
|
||||||
$(foreach i,$(USR_BIN), \
|
$(foreach i,$(USR_BIN), \
|
||||||
@ -214,16 +241,19 @@ install: $(COMPILED_BINS)
|
|||||||
install -m4755 -o root -g root $(COMPILEDIR)/sg-tape-cmd $(DESTDIR)$(LIBEXECDIR)/proxmox-backup/sg-tape-cmd
|
install -m4755 -o root -g root $(COMPILEDIR)/sg-tape-cmd $(DESTDIR)$(LIBEXECDIR)/proxmox-backup/sg-tape-cmd
|
||||||
$(foreach i,$(SERVICE_BIN), \
|
$(foreach i,$(SERVICE_BIN), \
|
||||||
install -m755 $(COMPILEDIR)/$(i) $(DESTDIR)$(LIBEXECDIR)/proxmox-backup/ ;)
|
install -m755 $(COMPILEDIR)/$(i) $(DESTDIR)$(LIBEXECDIR)/proxmox-backup/ ;)
|
||||||
|
install -m755 $(STATIC_COMPILEDIR)/proxmox-backup-client $(DESTDIR)$(BINDIR)/proxmox-backup-client-static
|
||||||
|
install -m755 $(STATIC_COMPILEDIR)/pxar $(DESTDIR)$(BINDIR)/pxar-static
|
||||||
$(MAKE) -C www install
|
$(MAKE) -C www install
|
||||||
$(MAKE) -C docs install
|
$(MAKE) -C docs install
|
||||||
$(MAKE) -C templates install
|
$(MAKE) -C templates install
|
||||||
|
|
||||||
.PHONY: upload
|
.PHONY: upload
|
||||||
upload: UPLOAD_DIST ?= $(DEB_DISTRIBUTION)
|
upload: UPLOAD_DIST ?= $(DEB_DISTRIBUTION)
|
||||||
upload: $(SERVER_DEB) $(CLIENT_DEB) $(RESTORE_DEB) $(DOC_DEB)
|
upload: $(SERVER_DEB) $(CLIENT_DEB) $(RESTORE_DEB) $(DOC_DEB) $(STATIC_CLIENT_DEB)
|
||||||
# check if working directory is clean
|
# check if working directory is clean
|
||||||
git diff --exit-code --stat && git diff --exit-code --stat --staged
|
git diff --exit-code --stat && git diff --exit-code --stat --staged
|
||||||
tar cf - $(SERVER_DEB) $(SERVER_DBG_DEB) $(DOC_DEB) $(CLIENT_DEB) $(CLIENT_DBG_DEB) \
|
tar cf - $(SERVER_DEB) $(SERVER_DBG_DEB) $(DOC_DEB) $(CLIENT_DEB) $(CLIENT_DBG_DEB) \
|
||||||
| ssh -X repoman@repo.proxmox.com upload --product pbs --dist $(UPLOAD_DIST)
|
| ssh -X repoman@repo.proxmox.com upload --product pbs --dist $(UPLOAD_DIST)
|
||||||
tar cf - $(CLIENT_DEB) $(CLIENT_DBG_DEB) | ssh -X repoman@repo.proxmox.com upload --product "pve,pmg,pbs-client" --dist $(UPLOAD_DIST)
|
tar cf - $(CLIENT_DEB) $(CLIENT_DBG_DEB) | ssh -X repoman@repo.proxmox.com upload --product "pve,pmg,pbs-client" --dist $(UPLOAD_DIST)
|
||||||
|
tar cf - $(STATIC_CLIENT_DEB) $(STATIC_CLIENT_DBG_DEB) | ssh -X repoman@repo.proxmox.com upload --product "pbs-client" --dist $(UPLOAD_DIST)
|
||||||
tar cf - $(RESTORE_DEB) $(RESTORE_DBG_DEB) | ssh -X repoman@repo.proxmox.com upload --product "pve" --dist $(UPLOAD_DIST)
|
tar cf - $(RESTORE_DEB) $(RESTORE_DBG_DEB) | ssh -X repoman@repo.proxmox.com upload --product "pve" --dist $(UPLOAD_DIST)
|
||||||
|
184
debian/changelog
vendored
184
debian/changelog
vendored
@ -1,3 +1,187 @@
|
|||||||
|
rust-proxmox-backup (3.4.1-1) bookworm; urgency=medium
|
||||||
|
|
||||||
|
* ui: token view: fix typo in 'lose' and rephrase token regenerate dialog
|
||||||
|
message for more clarity.
|
||||||
|
|
||||||
|
* restrict consent-banner text length to 64 KiB.
|
||||||
|
|
||||||
|
* docs: describe the intend for the statically linked pbs client.
|
||||||
|
|
||||||
|
* api: backup: include previous snapshot name in log message.
|
||||||
|
|
||||||
|
* garbage collection: account for created/deleted index files concurrently
|
||||||
|
to GC to avoid potentially confusing log messages.
|
||||||
|
|
||||||
|
* garbage collection: fix rare race in chunk marking phase for setups doing
|
||||||
|
high frequent backups in quick succession while immediately pruning to a
|
||||||
|
single backup snapshot being left over after each such backup.
|
||||||
|
|
||||||
|
* tape: wait for calibration of LTO-9 tapes in general, not just in the
|
||||||
|
initial tape format procedure.
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Wed, 16 Apr 2025 14:45:37 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (3.4.0-1) bookworm; urgency=medium
|
||||||
|
|
||||||
|
* fix #4788: build statically linked version of the proxmox-backup-client
|
||||||
|
package.
|
||||||
|
|
||||||
|
* ui: sync job: change the rate limit direction based on sync direction.
|
||||||
|
|
||||||
|
* docs: mention how to set the push sync jobs rate limit
|
||||||
|
|
||||||
|
* ui: set error mask: ensure that message is html-encoded to avoid visual
|
||||||
|
glitches.
|
||||||
|
|
||||||
|
* api server: increase maximal request body size fro 64 kiB to 512 kiB,
|
||||||
|
similar to a recent change for our perl based projects.
|
||||||
|
|
||||||
|
* notifications: include Content-Length header for broader compatibility in
|
||||||
|
the webhook and gotify targets.
|
||||||
|
|
||||||
|
* notifications: allow overriding notification templates.
|
||||||
|
|
||||||
|
* docs: notifications: add section about how to use custom templates
|
||||||
|
|
||||||
|
* sync: print whole error chain per group on failure for more context.
|
||||||
|
|
||||||
|
* ui: options-view: fix typo in empty-text for GC tuning option.
|
||||||
|
|
||||||
|
* memory info: use the "MemAvailable" field from '/proc/meminfo' to compute
|
||||||
|
used memory to fix overestimation of that metric and to better align with
|
||||||
|
what modern versions of tools like `free` do and to future proof against
|
||||||
|
changes in how the kernel accounts memory usage for.
|
||||||
|
|
||||||
|
* add "MemAvailable" field to ProcFsMemInfo to promote its usage over the
|
||||||
|
existing "MemFree" field, which is almost never the right choice. This new
|
||||||
|
field will be provided for external metric server.
|
||||||
|
|
||||||
|
* docs: mention different name resolution for statically linked binary.
|
||||||
|
|
||||||
|
* docs: add basic info for how to install the statically linked client.
|
||||||
|
|
||||||
|
* docs: mention new verify-only and encrypted-only flags for sync jobs.
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Wed, 09 Apr 2025 17:41:38 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (3.3.7-1) bookworm; urgency=medium
|
||||||
|
|
||||||
|
* fix #5982: garbage collection: add a check to ensure that the underlying
|
||||||
|
file system supports and honors file access time (atime) updates.
|
||||||
|
The check is performed once on datastore creation and on start of every
|
||||||
|
garbage collection (GC) task, just to be sure. It can be disabled in the
|
||||||
|
datastore tuning options.
|
||||||
|
|
||||||
|
* garbage collection: support setting a custom access time cutoff,
|
||||||
|
overriding the default of one day and five minutes.
|
||||||
|
|
||||||
|
* ui: expose flag for GC access time support safety check and the access
|
||||||
|
time cutoff override in datastore tuning options.
|
||||||
|
|
||||||
|
* docs: describe rationale for new GC access time update check setting and
|
||||||
|
the access time cutoff check in tuning options.
|
||||||
|
|
||||||
|
* access control: add support to mark a specific authentication realm as
|
||||||
|
default selected realm for the login user interface.
|
||||||
|
|
||||||
|
* fix #4382: api: access control: remove permissions of token on deletion.
|
||||||
|
|
||||||
|
* fix #3887: api: access control: allow users to regenerate the secret of an
|
||||||
|
API token without changing any existing ACLs.
|
||||||
|
|
||||||
|
* fix #6072: sync jobs: support flags to limit sync to only encrypted and/or
|
||||||
|
verified snapshots.
|
||||||
|
|
||||||
|
* ui: datastore tuning options: expose overriding GC cache capacity so that
|
||||||
|
admins can either restrict the peak memory usage during GC or allow GC to
|
||||||
|
use more memory to reduce file system IO even for huge (multiple TiB)
|
||||||
|
referenced data in backup groups.
|
||||||
|
|
||||||
|
* ui: datastore tuning options: increase width and rework labels to provide
|
||||||
|
a tiny bit more context about what these options are.
|
||||||
|
|
||||||
|
* ui: sync job: increase edit window width to 720px to make it less cramped.
|
||||||
|
|
||||||
|
* ui: sync job: small field label casing consistency fixes.
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Sat, 05 Apr 2025 17:54:31 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (3.3.6-1) bookworm; urgency=medium
|
||||||
|
|
||||||
|
* datastore: ignore group locking errors when removing snapshots, they
|
||||||
|
normally happen only due to old-locking, and the underlying snapshot is
|
||||||
|
deleted in any case at this point, so it's no help to confuse the user.
|
||||||
|
|
||||||
|
* api: datastore: add error message on failed removal due to old locking and
|
||||||
|
tell any admin what they can do to switch to the new locking.
|
||||||
|
|
||||||
|
* ui: only add delete parameter on token edit, not when creating tokens.
|
||||||
|
|
||||||
|
* pbs-client: allow reading fingerprint from system credential.
|
||||||
|
|
||||||
|
* docs: client: add section about system credentials integration.
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Thu, 03 Apr 2025 17:57:02 +0200
|
||||||
|
|
||||||
|
rust-proxmox-backup (3.3.5-1) bookworm; urgency=medium
|
||||||
|
|
||||||
|
* api: config: use guard for unmounting on failed datastore creation
|
||||||
|
|
||||||
|
* client: align description for backup specification to docs, using
|
||||||
|
`archive-name` and `type` over `label` and `ext`.
|
||||||
|
|
||||||
|
* client: read credentials from CREDENTIALS_DIRECTORY environment variable
|
||||||
|
following the "System and Service Credentials" specification. This allows
|
||||||
|
users to use native systemd capabilities for credential management if the
|
||||||
|
proxmox-backup-client is used in systemd units or, e.g., through a wrapper
|
||||||
|
like systemd-run.
|
||||||
|
|
||||||
|
* fix #3935: datastore/api/backup: move datastore locking to '/run' to avoid
|
||||||
|
that lock-files can block deleting backup groups or snapshots on the
|
||||||
|
datastore and to decouple locking from the underlying datastore
|
||||||
|
file-system.
|
||||||
|
|
||||||
|
* api: fix race when changing the owner of a backup-group.
|
||||||
|
|
||||||
|
* fix #3336: datastore: remove group if the last snapshot is removed to
|
||||||
|
avoid confusing situations where the group directory still exists and
|
||||||
|
blocks re-creating a group with another owner even though the empty group
|
||||||
|
was not visible in the web UI.
|
||||||
|
|
||||||
|
* notifications: clean-up and add dedicated types for all templates as to
|
||||||
|
allow declaring that interface stable in preparation for allowing
|
||||||
|
overriding them in the future (not included in this release).
|
||||||
|
|
||||||
|
* tape: introduce a tape backup job worker-thread option for restores.
|
||||||
|
Depending on the underlying storage using more threads can dramatically
|
||||||
|
improve the restore speed. Especially fast storage with low penalty for
|
||||||
|
random access, like flash-storage (SSDs) can profit from using more
|
||||||
|
worker threads. But on file systems backed by spinning disks (HDDs) the
|
||||||
|
performance can even degrade with more threads. This is why for now the
|
||||||
|
default is left at a single thread and the admin needs to tune this for
|
||||||
|
their storage.
|
||||||
|
|
||||||
|
* garbage collection: generate index file list via datastore iterators in a
|
||||||
|
structured manner.
|
||||||
|
|
||||||
|
* fix #5331: garbage collection: avoid multiple chunk atime updates by
|
||||||
|
keeping track of the recently marked chunks in phase 1 of garbage to avoid
|
||||||
|
multiple atime updates via relatively expensive utimensat (touch) calls.
|
||||||
|
Use a LRU cache with size 32 MiB for tracking already processed chunks,
|
||||||
|
this fully covers backup groups referencing up to 4 TiB of actual chunks
|
||||||
|
and even bigger ones can still benefit from the cache. On some real-world
|
||||||
|
benchmarks of a datastore with 1.5 million chunks, and original data
|
||||||
|
usage of 120 TiB and a referenced data usage of 2.7 TiB (high
|
||||||
|
deduplication count due to long-term history) we measured 21.1 times less
|
||||||
|
file updates (31.6 million) and a 6.1 times reduction in total GC runtime
|
||||||
|
(155.4 s to 22.8 s) on a ZFS RAID 10 system consisting of spinning HDDs
|
||||||
|
and a special device mirror backed by datacenter SSDs.
|
||||||
|
|
||||||
|
* logging helper: use new builder initializer – not functional change
|
||||||
|
intended.
|
||||||
|
|
||||||
|
-- Proxmox Support Team <support@proxmox.com> Wed, 02 Apr 2025 19:42:38 +0200
|
||||||
|
|
||||||
rust-proxmox-backup (3.3.4-1) bookworm; urgency=medium
|
rust-proxmox-backup (3.3.4-1) bookworm; urgency=medium
|
||||||
|
|
||||||
* fix #6185: client/docs: explicitly mention archive name restrictions
|
* fix #6185: client/docs: explicitly mention archive name restrictions
|
||||||
|
42
debian/control
vendored
42
debian/control
vendored
@ -25,15 +25,17 @@ Build-Depends: bash-completion,
|
|||||||
librust-crossbeam-channel-0.5+default-dev,
|
librust-crossbeam-channel-0.5+default-dev,
|
||||||
librust-endian-trait-0.6+arrays-dev,
|
librust-endian-trait-0.6+arrays-dev,
|
||||||
librust-endian-trait-0.6+default-dev,
|
librust-endian-trait-0.6+default-dev,
|
||||||
librust-env-logger-0.10+default-dev,
|
librust-env-logger-0.11+default-dev,
|
||||||
librust-foreign-types-0.3+default-dev,
|
librust-foreign-types-0.3+default-dev,
|
||||||
librust-futures-0.3+default-dev,
|
librust-futures-0.3+default-dev,
|
||||||
librust-h2-0.4+default-dev,
|
librust-h2-0.4+default-dev,
|
||||||
|
librust-h2-0.4+legacy-dev,
|
||||||
librust-h2-0.4+stream-dev,
|
librust-h2-0.4+stream-dev,
|
||||||
librust-hex-0.4+default-dev (>= 0.4.3-~~),
|
librust-hex-0.4+default-dev (>= 0.4.3-~~),
|
||||||
librust-hex-0.4+serde-dev (>= 0.4.3-~~),
|
librust-hex-0.4+serde-dev (>= 0.4.3-~~),
|
||||||
librust-http-0.2+default-dev,
|
librust-hyper-0.14+backports-dev,
|
||||||
librust-hyper-0.14+default-dev,
|
librust-hyper-0.14+default-dev,
|
||||||
|
librust-hyper-0.14+deprecated-dev,
|
||||||
librust-hyper-0.14+full-dev,
|
librust-hyper-0.14+full-dev,
|
||||||
librust-libc-0.2+default-dev,
|
librust-libc-0.2+default-dev,
|
||||||
librust-log-0.4+default-dev (>= 0.4.17-~~),
|
librust-log-0.4+default-dev (>= 0.4.17-~~),
|
||||||
@ -43,7 +45,7 @@ Build-Depends: bash-completion,
|
|||||||
librust-once-cell-1+default-dev (>= 1.3.1-~~),
|
librust-once-cell-1+default-dev (>= 1.3.1-~~),
|
||||||
librust-openssl-0.10+default-dev (>= 0.10.40-~~),
|
librust-openssl-0.10+default-dev (>= 0.10.40-~~),
|
||||||
librust-pathpatterns-0.3+default-dev,
|
librust-pathpatterns-0.3+default-dev,
|
||||||
librust-pbs-api-types-0.2+default-dev,
|
librust-pbs-api-types-0.2+default-dev (>= 0.2.2),
|
||||||
librust-percent-encoding-2+default-dev (>= 2.1-~~),
|
librust-percent-encoding-2+default-dev (>= 2.1-~~),
|
||||||
librust-pin-project-lite-0.2+default-dev,
|
librust-pin-project-lite-0.2+default-dev,
|
||||||
librust-proxmox-acme-0.5+default-dev (>= 0.5.3-~~),
|
librust-proxmox-acme-0.5+default-dev (>= 0.5.3-~~),
|
||||||
@ -52,7 +54,6 @@ Build-Depends: bash-completion,
|
|||||||
librust-proxmox-apt-api-types-1+default-dev (>= 1.0.1-~~),
|
librust-proxmox-apt-api-types-1+default-dev (>= 1.0.1-~~),
|
||||||
librust-proxmox-async-0.4+default-dev,
|
librust-proxmox-async-0.4+default-dev,
|
||||||
librust-proxmox-auth-api-0.4+api-dev,
|
librust-proxmox-auth-api-0.4+api-dev,
|
||||||
librust-proxmox-auth-api-0.4+api-types-dev,
|
|
||||||
librust-proxmox-auth-api-0.4+default-dev,
|
librust-proxmox-auth-api-0.4+default-dev,
|
||||||
librust-proxmox-auth-api-0.4+pam-authenticator-dev,
|
librust-proxmox-auth-api-0.4+pam-authenticator-dev,
|
||||||
librust-proxmox-borrow-1+default-dev,
|
librust-proxmox-borrow-1+default-dev,
|
||||||
@ -60,14 +61,14 @@ Build-Depends: bash-completion,
|
|||||||
librust-proxmox-config-digest-0.1+default-dev,
|
librust-proxmox-config-digest-0.1+default-dev,
|
||||||
librust-proxmox-daemon-0.1+default-dev,
|
librust-proxmox-daemon-0.1+default-dev,
|
||||||
librust-proxmox-fuse-0.1+default-dev (>= 0.1.3-~~),
|
librust-proxmox-fuse-0.1+default-dev (>= 0.1.3-~~),
|
||||||
librust-proxmox-http-0.9+client-dev,
|
librust-proxmox-http-0.9+client-dev (>= 0.9.5-~~),
|
||||||
librust-proxmox-http-0.9+client-trait-dev,
|
librust-proxmox-http-0.9+client-trait-dev (>= 0.9.5-~~),
|
||||||
librust-proxmox-http-0.9+default-dev,
|
librust-proxmox-http-0.9+default-dev (>= 0.9.5-~~),
|
||||||
librust-proxmox-http-0.9+http-helpers-dev,
|
librust-proxmox-http-0.9+http-helpers-dev (>= 0.9.5-~~),
|
||||||
librust-proxmox-http-0.9+proxmox-async-dev,
|
librust-proxmox-http-0.9+proxmox-async-dev (>= 0.9.5-~~),
|
||||||
librust-proxmox-http-0.9+rate-limited-stream-dev,
|
librust-proxmox-http-0.9+rate-limited-stream-dev (>= 0.9.5-~~),
|
||||||
librust-proxmox-http-0.9+rate-limiter-dev,
|
librust-proxmox-http-0.9+rate-limiter-dev (>= 0.9.5-~~),
|
||||||
librust-proxmox-http-0.9+websocket-dev,
|
librust-proxmox-http-0.9+websocket-dev (>= 0.9.5-~~),
|
||||||
librust-proxmox-human-byte-0.1+default-dev,
|
librust-proxmox-human-byte-0.1+default-dev,
|
||||||
librust-proxmox-io-1+default-dev (>= 1.0.1-~~),
|
librust-proxmox-io-1+default-dev (>= 1.0.1-~~),
|
||||||
librust-proxmox-io-1+tokio-dev (>= 1.0.1-~~),
|
librust-proxmox-io-1+tokio-dev (>= 1.0.1-~~),
|
||||||
@ -78,9 +79,9 @@ Build-Depends: bash-completion,
|
|||||||
librust-proxmox-notify-0.5+default-dev (>= 0.5.1-~~),
|
librust-proxmox-notify-0.5+default-dev (>= 0.5.1-~~),
|
||||||
librust-proxmox-notify-0.5+pbs-context-dev (>= 0.5.1-~~),
|
librust-proxmox-notify-0.5+pbs-context-dev (>= 0.5.1-~~),
|
||||||
librust-proxmox-openid-0.10+default-dev,
|
librust-proxmox-openid-0.10+default-dev,
|
||||||
librust-proxmox-rest-server-0.8+default-dev (>= 0.8.5-~~),
|
librust-proxmox-rest-server-0.8+default-dev (>= 0.8.9-~~),
|
||||||
librust-proxmox-rest-server-0.8+rate-limited-stream-dev (>= 0.8.5-~~),
|
librust-proxmox-rest-server-0.8+rate-limited-stream-dev (>= 0.8.9-~~),
|
||||||
librust-proxmox-rest-server-0.8+templates-dev (>= 0.8.5-~~),
|
librust-proxmox-rest-server-0.8+templates-dev (>= 0.8.9-~~),
|
||||||
librust-proxmox-router-3+cli-dev,
|
librust-proxmox-router-3+cli-dev,
|
||||||
librust-proxmox-router-3+server-dev,
|
librust-proxmox-router-3+server-dev,
|
||||||
librust-proxmox-rrd-0.4+default-dev,
|
librust-proxmox-rrd-0.4+default-dev,
|
||||||
@ -97,7 +98,7 @@ Build-Depends: bash-completion,
|
|||||||
librust-proxmox-subscription-0.5+default-dev,
|
librust-proxmox-subscription-0.5+default-dev,
|
||||||
librust-proxmox-sys-0.6+acl-dev (>= 0.6.5-~~),
|
librust-proxmox-sys-0.6+acl-dev (>= 0.6.5-~~),
|
||||||
librust-proxmox-sys-0.6+crypt-dev (>= 0.6.5-~~),
|
librust-proxmox-sys-0.6+crypt-dev (>= 0.6.5-~~),
|
||||||
librust-proxmox-sys-0.6+default-dev (>= 0.6.5-~~),
|
librust-proxmox-sys-0.6+default-dev (>= 0.6.7-~~),
|
||||||
librust-proxmox-sys-0.6+logrotate-dev (>= 0.6.5-~~),
|
librust-proxmox-sys-0.6+logrotate-dev (>= 0.6.5-~~),
|
||||||
librust-proxmox-sys-0.6+timer-dev (>= 0.6.5-~~),
|
librust-proxmox-sys-0.6+timer-dev (>= 0.6.5-~~),
|
||||||
librust-proxmox-systemd-0.1+default-dev,
|
librust-proxmox-systemd-0.1+default-dev,
|
||||||
@ -114,7 +115,6 @@ Build-Depends: bash-completion,
|
|||||||
librust-serde-1+default-dev,
|
librust-serde-1+default-dev,
|
||||||
librust-serde-1+derive-dev,
|
librust-serde-1+derive-dev,
|
||||||
librust-serde-json-1+default-dev,
|
librust-serde-json-1+default-dev,
|
||||||
librust-serde-plain-1+default-dev,
|
|
||||||
librust-syslog-6+default-dev,
|
librust-syslog-6+default-dev,
|
||||||
librust-tar-0.4+default-dev,
|
librust-tar-0.4+default-dev,
|
||||||
librust-termcolor-1+default-dev (>= 1.1.2-~~),
|
librust-termcolor-1+default-dev (>= 1.1.2-~~),
|
||||||
@ -205,6 +205,14 @@ Description: Proxmox Backup Client tools
|
|||||||
This package contains the Proxmox Backup client, which provides a
|
This package contains the Proxmox Backup client, which provides a
|
||||||
simple command line tool to create and restore backups.
|
simple command line tool to create and restore backups.
|
||||||
|
|
||||||
|
Package: proxmox-backup-client-static
|
||||||
|
Architecture: any
|
||||||
|
Depends: qrencode, ${misc:Depends},
|
||||||
|
Conflicts: proxmox-backup-client,
|
||||||
|
Description: Proxmox Backup Client tools (statically linked)
|
||||||
|
This package contains the Proxmox Backup client, which provides a
|
||||||
|
simple command line tool to create and restore backups.
|
||||||
|
|
||||||
Package: proxmox-backup-docs
|
Package: proxmox-backup-docs
|
||||||
Build-Profiles: <!nodoc>
|
Build-Profiles: <!nodoc>
|
||||||
Section: doc
|
Section: doc
|
||||||
|
15
debian/postinst
vendored
15
debian/postinst
vendored
@ -20,15 +20,7 @@ case "$1" in
|
|||||||
# modeled after dh_systemd_start output
|
# modeled after dh_systemd_start output
|
||||||
systemctl --system daemon-reload >/dev/null || true
|
systemctl --system daemon-reload >/dev/null || true
|
||||||
if [ -n "$2" ]; then
|
if [ -n "$2" ]; then
|
||||||
if dpkg --compare-versions "$2" 'lt' '1.0.7-1'; then
|
_dh_action=try-reload-or-restart
|
||||||
# there was an issue with reloading and systemd being confused in older daemon versions
|
|
||||||
# so restart instead of reload if upgrading from there, see commit 0ec79339f7aebf9
|
|
||||||
# FIXME: remove with PBS 2.1
|
|
||||||
echo "Upgrading from older proxmox-backup-server: restart (not reload) daemons"
|
|
||||||
_dh_action=try-restart
|
|
||||||
else
|
|
||||||
_dh_action=try-reload-or-restart
|
|
||||||
fi
|
|
||||||
else
|
else
|
||||||
_dh_action=start
|
_dh_action=start
|
||||||
fi
|
fi
|
||||||
@ -80,6 +72,11 @@ EOF
|
|||||||
update_sync_job "$prev_job"
|
update_sync_job "$prev_job"
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
if dpkg --compare-versions "$2" 'lt' '3.3.5~'; then
|
||||||
|
# ensure old locking is used by the daemon until a reboot happened
|
||||||
|
touch "/run/proxmox-backup/old-locking"
|
||||||
|
fi
|
||||||
fi
|
fi
|
||||||
;;
|
;;
|
||||||
|
|
||||||
|
2
debian/proxmox-backup-client-static.bash-completion
vendored
Normal file
2
debian/proxmox-backup-client-static.bash-completion
vendored
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
debian/proxmox-backup-client.bc proxmox-backup-client
|
||||||
|
debian/pxar.bc pxar
|
4
debian/proxmox-backup-client-static.install
vendored
Normal file
4
debian/proxmox-backup-client-static.install
vendored
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
usr/share/man/man1/proxmox-backup-client.1
|
||||||
|
usr/share/man/man1/pxar.1
|
||||||
|
usr/share/zsh/vendor-completions/_proxmox-backup-client
|
||||||
|
usr/share/zsh/vendor-completions/_pxar
|
3
debian/proxmox-backup-server.install
vendored
3
debian/proxmox-backup-server.install
vendored
@ -34,13 +34,13 @@ usr/share/man/man5/media-pool.cfg.5
|
|||||||
usr/share/man/man5/notifications-priv.cfg.5
|
usr/share/man/man5/notifications-priv.cfg.5
|
||||||
usr/share/man/man5/notifications.cfg.5
|
usr/share/man/man5/notifications.cfg.5
|
||||||
usr/share/man/man5/proxmox-backup.node.cfg.5
|
usr/share/man/man5/proxmox-backup.node.cfg.5
|
||||||
|
usr/share/man/man5/prune.cfg.5
|
||||||
usr/share/man/man5/remote.cfg.5
|
usr/share/man/man5/remote.cfg.5
|
||||||
usr/share/man/man5/sync.cfg.5
|
usr/share/man/man5/sync.cfg.5
|
||||||
usr/share/man/man5/tape-job.cfg.5
|
usr/share/man/man5/tape-job.cfg.5
|
||||||
usr/share/man/man5/tape.cfg.5
|
usr/share/man/man5/tape.cfg.5
|
||||||
usr/share/man/man5/user.cfg.5
|
usr/share/man/man5/user.cfg.5
|
||||||
usr/share/man/man5/verification.cfg.5
|
usr/share/man/man5/verification.cfg.5
|
||||||
usr/share/man/man5/prune.cfg.5
|
|
||||||
usr/share/proxmox-backup/templates/default/acme-err-body.txt.hbs
|
usr/share/proxmox-backup/templates/default/acme-err-body.txt.hbs
|
||||||
usr/share/proxmox-backup/templates/default/acme-err-subject.txt.hbs
|
usr/share/proxmox-backup/templates/default/acme-err-subject.txt.hbs
|
||||||
usr/share/proxmox-backup/templates/default/gc-err-body.txt.hbs
|
usr/share/proxmox-backup/templates/default/gc-err-body.txt.hbs
|
||||||
@ -63,7 +63,6 @@ usr/share/proxmox-backup/templates/default/tape-backup-ok-body.txt.hbs
|
|||||||
usr/share/proxmox-backup/templates/default/tape-backup-ok-subject.txt.hbs
|
usr/share/proxmox-backup/templates/default/tape-backup-ok-subject.txt.hbs
|
||||||
usr/share/proxmox-backup/templates/default/tape-load-body.txt.hbs
|
usr/share/proxmox-backup/templates/default/tape-load-body.txt.hbs
|
||||||
usr/share/proxmox-backup/templates/default/tape-load-subject.txt.hbs
|
usr/share/proxmox-backup/templates/default/tape-load-subject.txt.hbs
|
||||||
usr/share/proxmox-backup/templates/default/test-body.html.hbs
|
|
||||||
usr/share/proxmox-backup/templates/default/test-body.txt.hbs
|
usr/share/proxmox-backup/templates/default/test-body.txt.hbs
|
||||||
usr/share/proxmox-backup/templates/default/test-subject.txt.hbs
|
usr/share/proxmox-backup/templates/default/test-subject.txt.hbs
|
||||||
usr/share/proxmox-backup/templates/default/verify-err-body.txt.hbs
|
usr/share/proxmox-backup/templates/default/verify-err-body.txt.hbs
|
||||||
|
3
debian/rules
vendored
3
debian/rules
vendored
@ -47,6 +47,9 @@ override_dh_auto_install:
|
|||||||
dh_auto_install -- \
|
dh_auto_install -- \
|
||||||
PROXY_USER=backup \
|
PROXY_USER=backup \
|
||||||
LIBDIR=/usr/lib/$(DEB_HOST_MULTIARCH)
|
LIBDIR=/usr/lib/$(DEB_HOST_MULTIARCH)
|
||||||
|
mkdir -p debian/proxmox-backup-client-static/usr/bin
|
||||||
|
mv debian/tmp/usr/bin/proxmox-backup-client-static debian/proxmox-backup-client-static/usr/bin/proxmox-backup-client
|
||||||
|
mv debian/tmp/usr/bin/pxar-static debian/proxmox-backup-client-static/usr/bin/pxar
|
||||||
|
|
||||||
override_dh_installsystemd:
|
override_dh_installsystemd:
|
||||||
dh_installsystemd -pproxmox-backup-server proxmox-backup-daily-update.timer
|
dh_installsystemd -pproxmox-backup-server proxmox-backup-daily-update.timer
|
||||||
|
@ -1,3 +1,5 @@
|
|||||||
|
.. _client_usage:
|
||||||
|
|
||||||
Backup Client Usage
|
Backup Client Usage
|
||||||
===================
|
===================
|
||||||
|
|
||||||
@ -44,6 +46,24 @@ user\@pbs!token@host:store ``user@pbs!token`` host:8007 store
|
|||||||
[ff80::51]:1234:mydatastore ``root@pam`` [ff80::51]:1234 mydatastore
|
[ff80::51]:1234:mydatastore ``root@pam`` [ff80::51]:1234 mydatastore
|
||||||
================================ ================== ================== ===========
|
================================ ================== ================== ===========
|
||||||
|
|
||||||
|
.. _statically_linked_client:
|
||||||
|
|
||||||
|
Statically Linked Backup Client
|
||||||
|
-------------------------------
|
||||||
|
|
||||||
|
A statically linked version of the Proxmox Backup client is available for Linux
|
||||||
|
based systems where the regular client is not available. Please note that it is
|
||||||
|
recommended to use the regular client when possible, as the statically linked
|
||||||
|
client is not a full replacement. For example, name resolution will not be
|
||||||
|
performed via the mechanisms provided by libc, but uses a resolver written
|
||||||
|
purely in the Rust programming language. Therefore, features and modules
|
||||||
|
provided by Name Service Switch cannot be used.
|
||||||
|
|
||||||
|
The statically linked client is available via the ``pbs-client`` repository as
|
||||||
|
described in the :ref:`installation <install_pbc>` section.
|
||||||
|
|
||||||
|
.. _environment-variables:
|
||||||
|
|
||||||
Environment Variables
|
Environment Variables
|
||||||
---------------------
|
---------------------
|
||||||
|
|
||||||
@ -89,6 +109,43 @@ Environment Variables
|
|||||||
you can add arbitrary comments after the first newline.
|
you can add arbitrary comments after the first newline.
|
||||||
|
|
||||||
|
|
||||||
|
System and Service Credentials
|
||||||
|
------------------------------
|
||||||
|
|
||||||
|
Some of the :ref:`environment variables <environment-variables>` above can be
|
||||||
|
set using `system and service credentials <https://systemd.io/CREDENTIALS/>`_
|
||||||
|
instead.
|
||||||
|
|
||||||
|
============================ ==============================================
|
||||||
|
Environment Variable Credential Name Equivalent
|
||||||
|
============================ ==============================================
|
||||||
|
``PBS_REPOSITORY`` ``proxmox-backup-client.repository``
|
||||||
|
``PBS_PASSWORD`` ``proxmox-backup-client.password``
|
||||||
|
``PBS_ENCRYPTION_PASSWORD`` ``proxmox-backup-client.encryption-password``
|
||||||
|
``PBS_FINGERPRINT`` ``proxmox-backup-client.fingerprint``
|
||||||
|
============================ ==============================================
|
||||||
|
|
||||||
|
For example, the repository password can be stored in an encrypted file as
|
||||||
|
follows:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# systemd-ask-password -n | systemd-creds encrypt --name=proxmox-backup-client.password - my-api-token.cred
|
||||||
|
|
||||||
|
The credential can then be reused inside of unit files or in a transient scope
|
||||||
|
unit as follows:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# systemd-run --pipe --wait \
|
||||||
|
--property=LoadCredentialEncrypted=proxmox-backup-client.password:/full/path/to/my-api-token.cred \
|
||||||
|
--property=SetCredential=proxmox-backup-client.repository:'my_default_repository' \
|
||||||
|
proxmox-backup-client ...
|
||||||
|
|
||||||
|
Additionally, system credentials (e.g. passed down from the hypervisor to a
|
||||||
|
virtual machine via SMBIOS type 11) can be loaded on a service via
|
||||||
|
`LoadCredential=` as described in the manual page ``systemd.exec(5)``.
|
||||||
|
|
||||||
Output Format
|
Output Format
|
||||||
-------------
|
-------------
|
||||||
|
|
||||||
|
@ -138,7 +138,26 @@ you need to run:
|
|||||||
# apt update
|
# apt update
|
||||||
# apt install proxmox-backup-client
|
# apt install proxmox-backup-client
|
||||||
|
|
||||||
.. note:: The client-only repository should be usable by most recent Debian and
|
Install Statically Linked Proxmox Backup Client
|
||||||
Ubuntu derivatives.
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Proxmox provides a statically linked build of the Proxmox backup client that
|
||||||
|
should run on any modern x86-64 Linux system.
|
||||||
|
|
||||||
|
It is currently available as a Debian package. After configuring the
|
||||||
|
:ref:`package_repositories_client_only_apt`, you need to run:
|
||||||
|
|
||||||
|
.. code-block:: console
|
||||||
|
|
||||||
|
# apt update
|
||||||
|
# apt install proxmox-backup-client-static
|
||||||
|
|
||||||
|
This package conflicts with the `proxmox-backup-client` package, as both
|
||||||
|
provide the client as an executable in the `/usr/bin/proxmox-backup-client`
|
||||||
|
path.
|
||||||
|
|
||||||
|
You can copy this executable to other, e.g. non-Debian based Linux systems.
|
||||||
|
|
||||||
|
For details on using the Proxmox Backup Client, see :ref:`client_usage`.
|
||||||
|
|
||||||
.. include:: package-repositories.rst
|
.. include:: package-repositories.rst
|
||||||
|
@ -72,6 +72,10 @@ either start it manually from the GUI or provide it with a schedule (see
|
|||||||
Backup snapshots, groups and namespaces which are no longer available on the
|
Backup snapshots, groups and namespaces which are no longer available on the
|
||||||
**Remote** datastore can be removed from the local datastore as well by setting
|
**Remote** datastore can be removed from the local datastore as well by setting
|
||||||
the ``remove-vanished`` option for the sync job.
|
the ``remove-vanished`` option for the sync job.
|
||||||
|
Setting the ``verified-only`` or ``encrypted-only`` flags allows to limit the
|
||||||
|
sync jobs to backup snapshots which have been verified or encrypted,
|
||||||
|
respectively. This is particularly of interest when sending backups to a less
|
||||||
|
trusted remote backup server.
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
@ -227,13 +231,16 @@ Bandwidth Limit
|
|||||||
|
|
||||||
Syncing a datastore to an archive can produce a lot of traffic and impact other
|
Syncing a datastore to an archive can produce a lot of traffic and impact other
|
||||||
users of the network. In order to avoid network or storage congestion, you can
|
users of the network. In order to avoid network or storage congestion, you can
|
||||||
limit the bandwidth of the sync job by setting the ``rate-in`` option either in
|
limit the bandwidth of a sync job in pull direction by setting the ``rate-in``
|
||||||
the web interface or using the ``proxmox-backup-manager`` command-line tool:
|
option either in the web interface or using the ``proxmox-backup-manager``
|
||||||
|
command-line tool:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
# proxmox-backup-manager sync-job update ID --rate-in 20MiB
|
# proxmox-backup-manager sync-job update ID --rate-in 20MiB
|
||||||
|
|
||||||
|
For sync jobs in push direction use the ``rate-out`` option instead.
|
||||||
|
|
||||||
Sync Direction Push
|
Sync Direction Push
|
||||||
^^^^^^^^^^^^^^^^^^^
|
^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
@ -7,26 +7,25 @@ Overview
|
|||||||
--------
|
--------
|
||||||
|
|
||||||
* Proxmox Backup Server emits :ref:`notification_events` in case of noteworthy
|
* Proxmox Backup Server emits :ref:`notification_events` in case of noteworthy
|
||||||
events in the system. These events are handled by the notification system.
|
events in the system. These events are handled by the notification system. A
|
||||||
A notification event has metadata, for example a timestamp, a severity level,
|
notification event has metadata, for example a timestamp, a severity level, a
|
||||||
a type and other metadata fields.
|
type and other metadata fields.
|
||||||
* :ref:`notification_matchers` route a notification event to one or more notification
|
* :ref:`notification_matchers` route a notification event to one or more
|
||||||
targets. A matcher can have match rules to selectively route based on the metadata
|
notification targets. A matcher can have match rules to selectively route
|
||||||
of a notification event.
|
based on the metadata of a notification event.
|
||||||
* :ref:`notification_targets` are a destination to which a notification event
|
* :ref:`notification_targets` are a destination to which a notification event
|
||||||
is routed to by a matcher. There are multiple types of target, mail-based
|
is routed to by a matcher. There are multiple types of target, mail-based
|
||||||
(Sendmail and SMTP) and Gotify.
|
(Sendmail and SMTP) and Gotify.
|
||||||
|
|
||||||
Datastores and tape backup jobs have a configurable :ref:`notification_mode`.
|
Datastores and tape backup jobs have a configurable :ref:`notification_mode`.
|
||||||
It allows you to choose between the notification system and a legacy mode
|
It allows you to choose between the notification system and a legacy mode for
|
||||||
for sending notification emails. The legacy mode is equivalent to the
|
sending notification emails. The legacy mode is equivalent to the way
|
||||||
way notifications were handled before Proxmox Backup Server 3.2.
|
notifications were handled before Proxmox Backup Server 3.2.
|
||||||
|
|
||||||
The notification system can be configured in the GUI under
|
The notification system can be configured in the GUI under *Configuration →
|
||||||
*Configuration → Notifications*. The configuration is stored in
|
Notifications*. The configuration is stored in :ref:`notifications.cfg` and
|
||||||
:ref:`notifications.cfg` and :ref:`notifications_priv.cfg` -
|
:ref:`notifications_priv.cfg` - the latter contains sensitive configuration
|
||||||
the latter contains sensitive configuration options such as
|
options such as passwords or authentication tokens for notification targets and
|
||||||
passwords or authentication tokens for notification targets and
|
|
||||||
can only be read by ``root``.
|
can only be read by ``root``.
|
||||||
|
|
||||||
.. _notification_targets:
|
.. _notification_targets:
|
||||||
@ -41,22 +40,23 @@ Proxmox Backup Server offers multiple types of notification targets.
|
|||||||
Sendmail
|
Sendmail
|
||||||
^^^^^^^^
|
^^^^^^^^
|
||||||
The sendmail binary is a program commonly found on Unix-like operating systems
|
The sendmail binary is a program commonly found on Unix-like operating systems
|
||||||
that handles the sending of email messages.
|
that handles the sending of email messages. It is a command-line utility that
|
||||||
It is a command-line utility that allows users and applications to send emails
|
allows users and applications to send emails directly from the command line or
|
||||||
directly from the command line or from within scripts.
|
from within scripts.
|
||||||
|
|
||||||
The sendmail notification target uses the ``sendmail`` binary to send emails to a
|
The sendmail notification target uses the ``sendmail`` binary to send emails to
|
||||||
list of configured users or email addresses. If a user is selected as a recipient,
|
a list of configured users or email addresses. If a user is selected as a
|
||||||
the email address configured in user's settings will be used.
|
recipient, the email address configured in user's settings will be used. For
|
||||||
For the ``root@pam`` user, this is the email address entered during installation.
|
the ``root@pam`` user, this is the email address entered during installation. A
|
||||||
A user's email address can be configured in ``Configuration -> Access Control -> User Management``.
|
user's email address can be configured in ``Configuration → Access Control →
|
||||||
If a user has no associated email address, no email will be sent.
|
User Management``. If a user has no associated email address, no email will be
|
||||||
|
sent.
|
||||||
|
|
||||||
.. NOTE:: In standard Proxmox Backup Server installations, the ``sendmail`` binary is provided by
|
.. NOTE:: In standard Proxmox Backup Server installations, the ``sendmail``
|
||||||
Postfix. It may be necessary to configure Postfix so that it can deliver
|
binary is provided by Postfix. It may be necessary to configure Postfix so
|
||||||
mails correctly - for example by setting an external mail relay (smart host).
|
that it can deliver mails correctly - for example by setting an external
|
||||||
In case of failed delivery, check the system logs for messages logged by
|
mail relay (smart host). In case of failed delivery, check the system logs
|
||||||
the Postfix daemon.
|
for messages logged by the Postfix daemon.
|
||||||
|
|
||||||
See :ref:`notifications.cfg` for all configuration options.
|
See :ref:`notifications.cfg` for all configuration options.
|
||||||
|
|
||||||
@ -64,13 +64,13 @@ See :ref:`notifications.cfg` for all configuration options.
|
|||||||
|
|
||||||
SMTP
|
SMTP
|
||||||
^^^^
|
^^^^
|
||||||
SMTP notification targets can send emails directly to an SMTP mail relay.
|
SMTP notification targets can send emails directly to an SMTP mail relay. This
|
||||||
This target does not use the system's MTA to deliver emails.
|
target does not use the system's MTA to deliver emails. Similar to sendmail
|
||||||
Similar to sendmail targets, if a user is selected as a recipient, the user's configured
|
targets, if a user is selected as a recipient, the user's configured email
|
||||||
email address will be used.
|
address will be used.
|
||||||
|
|
||||||
.. NOTE:: Unlike sendmail targets, SMTP targets do not have any queuing/retry mechanism
|
.. NOTE:: Unlike sendmail targets, SMTP targets do not have any queuing/retry
|
||||||
in case of a failed mail delivery.
|
mechanism in case of a failed mail delivery.
|
||||||
|
|
||||||
See :ref:`notifications.cfg` for all configuration options.
|
See :ref:`notifications.cfg` for all configuration options.
|
||||||
|
|
||||||
@ -78,10 +78,10 @@ See :ref:`notifications.cfg` for all configuration options.
|
|||||||
|
|
||||||
Gotify
|
Gotify
|
||||||
^^^^^^
|
^^^^^^
|
||||||
`Gotify <http://gotify.net>`_ is an open-source self-hosted notification server that
|
`Gotify <http://gotify.net>`_ is an open-source self-hosted notification server
|
||||||
allows you to send push notifications to various devices and
|
that allows you to send push notifications to various devices and applications.
|
||||||
applications. It provides a simple API and web interface, making it easy to
|
It provides a simple API and web interface, making it easy to integrate with
|
||||||
integrate with different platforms and services.
|
different platforms and services.
|
||||||
|
|
||||||
.. NOTE:: Gotify targets will respect the HTTP proxy settings from
|
.. NOTE:: Gotify targets will respect the HTTP proxy settings from
|
||||||
Configuration → Other → HTTP proxy
|
Configuration → Other → HTTP proxy
|
||||||
@ -95,27 +95,28 @@ Webhook notification targets perform HTTP requests to a configurable URL.
|
|||||||
|
|
||||||
The following configuration options are available:
|
The following configuration options are available:
|
||||||
|
|
||||||
* ``url``: The URL to which to perform the HTTP requests.
|
* ``url``: The URL to which to perform the HTTP requests. Supports templating
|
||||||
Supports templating to inject message contents, metadata and secrets.
|
to inject message contents, metadata and secrets.
|
||||||
* ``method``: HTTP Method to use (POST/PUT/GET)
|
* ``method``: HTTP Method to use (POST/PUT/GET)
|
||||||
* ``header``: Array of HTTP headers that should be set for the request.
|
* ``header``: Array of HTTP headers that should be set for the request.
|
||||||
Supports templating to inject message contents, metadata and secrets.
|
Supports templating to inject message contents, metadata and secrets.
|
||||||
* ``body``: HTTP body that should be sent.
|
* ``body``: HTTP body that should be sent. Supports templating to inject
|
||||||
Supports templating to inject message contents, metadata and secrets.
|
message contents, metadata and secrets.
|
||||||
* ``secret``: Array of secret key-value pairs. These will be stored in
|
* ``secret``: Array of secret key-value pairs. These will be stored in a
|
||||||
a protected configuration file only readable by root. Secrets can be
|
protected configuration file only readable by root. Secrets can be
|
||||||
accessed in body/header/URL templates via the ``secrets`` namespace.
|
accessed in body/header/URL templates via the ``secrets`` namespace.
|
||||||
* ``comment``: Comment for this target.
|
* ``comment``: Comment for this target.
|
||||||
|
|
||||||
For configuration options that support templating, the
|
For configuration options that support templating, the `Handlebars
|
||||||
`Handlebars <https://handlebarsjs.com>`_ syntax can be used to
|
<https://handlebarsjs.com>`_ syntax can be used to access the following
|
||||||
access the following properties:
|
properties:
|
||||||
|
|
||||||
* ``{{ title }}``: The rendered notification title
|
* ``{{ title }}``: The rendered notification title
|
||||||
* ``{{ message }}``: The rendered notification body
|
* ``{{ message }}``: The rendered notification body
|
||||||
* ``{{ severity }}``: The severity of the notification (``info``, ``notice``,
|
* ``{{ severity }}``: The severity of the notification (``info``, ``notice``,
|
||||||
``warning``, ``error``, ``unknown``)
|
``warning``, ``error``, ``unknown``)
|
||||||
* ``{{ timestamp }}``: The notification's timestamp as a UNIX epoch (in seconds).
|
* ``{{ timestamp }}``: The notification's timestamp as a UNIX epoch (in
|
||||||
|
seconds).
|
||||||
* ``{{ fields.<name> }}``: Sub-namespace for any metadata fields of the
|
* ``{{ fields.<name> }}``: Sub-namespace for any metadata fields of the
|
||||||
notification. For instance, ``fields.type`` contains the notification
|
notification. For instance, ``fields.type`` contains the notification
|
||||||
type - for all available fields refer to :ref:`notification_events`.
|
type - for all available fields refer to :ref:`notification_events`.
|
||||||
@ -197,20 +198,19 @@ Example - Slack
|
|||||||
Notification Matchers
|
Notification Matchers
|
||||||
---------------------
|
---------------------
|
||||||
|
|
||||||
Notification matchers route notifications to notification targets based
|
Notification matchers route notifications to notification targets based on
|
||||||
on their matching rules. These rules can match certain properties of a
|
their matching rules. These rules can match certain properties of a
|
||||||
notification, such as the timestamp (``match-calendar``), the severity of
|
notification, such as the timestamp (``match-calendar``), the severity of the
|
||||||
the notification (``match-severity``) or metadata fields (``match-field``).
|
notification (``match-severity``) or metadata fields (``match-field``). If a
|
||||||
If a notification is matched by a matcher, all targets configured for the
|
notification is matched by a matcher, all targets configured for the matcher
|
||||||
matcher will receive the notification.
|
will receive the notification.
|
||||||
|
|
||||||
An arbitrary number of matchers can be created, each with with their own
|
An arbitrary number of matchers can be created, each with with their own
|
||||||
matching rules and targets to notify.
|
matching rules and targets to notify. Every target is notified at most once for
|
||||||
Every target is notified at most once for every notification, even if
|
every notification, even if the target is used in multiple matchers.
|
||||||
the target is used in multiple matchers.
|
|
||||||
|
|
||||||
A matcher without rules matches any notification; the configured targets
|
A matcher without rules matches any notification; the configured targets will
|
||||||
will always be notified.
|
always be notified.
|
||||||
|
|
||||||
See :ref:`notifications.cfg` for all configuration options.
|
See :ref:`notifications.cfg` for all configuration options.
|
||||||
|
|
||||||
@ -227,20 +227,24 @@ Examples:
|
|||||||
|
|
||||||
Field Matching Rules
|
Field Matching Rules
|
||||||
^^^^^^^^^^^^^^^^^^^^
|
^^^^^^^^^^^^^^^^^^^^
|
||||||
Notifications have a selection of metadata fields that can be matched.
|
Notifications have a selection of metadata fields that can be matched. When
|
||||||
When using ``exact`` as a matching mode, a ``,`` can be used as a separator.
|
using ``exact`` as a matching mode, a ``,`` can be used as a separator. The
|
||||||
The matching rule then matches if the metadata field has **any** of the specified
|
matching rule then matches if the metadata field has **any** of the specified
|
||||||
values.
|
values.
|
||||||
|
|
||||||
Examples:
|
Examples:
|
||||||
|
|
||||||
* ``match-field exact:type=gc`` Only match notifications for garbage collection jobs
|
* ``match-field exact:type=gc`` Only match notifications for garbage collection
|
||||||
* ``match-field exact:type=prune,verify`` Match prune job and verification job notifications.
|
jobs
|
||||||
* ``match-field regex:datastore=^backup-.*$`` Match any datastore starting with ``backup``.
|
* ``match-field exact:type=prune,verify`` Match prune job and verification job
|
||||||
|
notifications.
|
||||||
|
* ``match-field regex:datastore=^backup-.*$`` Match any datastore starting with
|
||||||
|
``backup``.
|
||||||
|
|
||||||
If a notification does not have the matched field, the rule will **not** match.
|
If a notification does not have the matched field, the rule will **not** match.
|
||||||
For instance, a ``match-field regex:datastore=.*`` directive will match any notification that has
|
For instance, a ``match-field regex:datastore=.*`` directive will match any
|
||||||
a ``datastore`` metadata field, but will not match if the field does not exist.
|
notification that has a ``datastore`` metadata field, but will not match if the
|
||||||
|
field does not exist.
|
||||||
|
|
||||||
Severity Matching Rules
|
Severity Matching Rules
|
||||||
^^^^^^^^^^^^^^^^^^^^^^^
|
^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
@ -259,9 +263,9 @@ The following severities are in use:
|
|||||||
Notification Events
|
Notification Events
|
||||||
-------------------
|
-------------------
|
||||||
|
|
||||||
The following table contains a list of all notification events in Proxmox Backup server, their
|
The following table contains a list of all notification events in Proxmox
|
||||||
type, severity and additional metadata fields. ``type`` as well as any other metadata field
|
Backup server, their type, severity and additional metadata fields. ``type`` as
|
||||||
may be used in ``match-field`` match rules.
|
well as any other metadata field may be used in ``match-field`` match rules.
|
||||||
|
|
||||||
================================ ==================== ========== ==============================================================
|
================================ ==================== ========== ==============================================================
|
||||||
Event ``type`` Severity Metadata fields (in addition to ``type``)
|
Event ``type`` Severity Metadata fields (in addition to ``type``)
|
||||||
@ -281,8 +285,8 @@ Verification job failure ``verification`` ``error`` ``datastore``,
|
|||||||
Verification job success ``verification`` ``info`` ``datastore``, ``hostname``, ``job-id``
|
Verification job success ``verification`` ``info`` ``datastore``, ``hostname``, ``job-id``
|
||||||
================================ ==================== ========== ==============================================================
|
================================ ==================== ========== ==============================================================
|
||||||
|
|
||||||
The following table contains a description of all use metadata fields. All of these
|
The following table contains a description of all use metadata fields. All of
|
||||||
can be used in ``match-field`` match rules.
|
these can be used in ``match-field`` match rules.
|
||||||
|
|
||||||
==================== ===================================
|
==================== ===================================
|
||||||
Metadata field Description
|
Metadata field Description
|
||||||
@ -299,45 +303,45 @@ Metadata field Description
|
|||||||
|
|
||||||
System Mail Forwarding
|
System Mail Forwarding
|
||||||
----------------------
|
----------------------
|
||||||
Certain local system daemons, such as ``smartd``, send notification emails
|
Certain local system daemons, such as ``smartd``, send notification emails to
|
||||||
to the local ``root`` user. Proxmox Backup Server will feed these mails
|
the local ``root`` user. Proxmox Backup Server will feed these mails into the
|
||||||
into the notification system as a notification of type ``system-mail``
|
notification system as a notification of type ``system-mail`` and with severity
|
||||||
and with severity ``unknown``.
|
``unknown``.
|
||||||
|
|
||||||
When the email is forwarded to a sendmail target, the mail's content and headers
|
When the email is forwarded to a sendmail target, the mail's content and
|
||||||
are forwarded as-is. For all other targets,
|
headers are forwarded as-is. For all other targets, the system tries to extract
|
||||||
the system tries to extract both a subject line and the main text body
|
both a subject line and the main text body from the email content. In instances
|
||||||
from the email content. In instances where emails solely consist of HTML
|
where emails solely consist of HTML content, they will be transformed into
|
||||||
content, they will be transformed into plain text format during this process.
|
plain text format during this process.
|
||||||
|
|
||||||
Permissions
|
Permissions
|
||||||
-----------
|
-----------
|
||||||
In order to modify/view the configuration for notification targets,
|
In order to modify/view the configuration for notification targets, the
|
||||||
the ``Sys.Modify/Sys.Audit`` permissions are required for the
|
``Sys.Modify/Sys.Audit`` permissions are required for the
|
||||||
``/system/notifications`` ACL node.
|
``/system/notifications`` ACL node.
|
||||||
|
|
||||||
.. _notification_mode:
|
.. _notification_mode:
|
||||||
|
|
||||||
Notification Mode
|
Notification Mode
|
||||||
-----------------
|
-----------------
|
||||||
Datastores and tape backup/restore job configuration have a ``notification-mode``
|
Datastores and tape backup/restore job configuration have a
|
||||||
option which can have one of two values:
|
``notification-mode`` option which can have one of two values:
|
||||||
|
|
||||||
* ``legacy-sendmail``: Send notification emails via the system's ``sendmail`` command.
|
* ``legacy-sendmail``: Send notification emails via the system's ``sendmail``
|
||||||
The notification system will be bypassed and any configured targets/matchers will be ignored.
|
command. The notification system will be bypassed and any configured
|
||||||
This mode is equivalent to the notification behavior for version before
|
targets/matchers will be ignored. This mode is equivalent to the notification
|
||||||
Proxmox Backup Server 3.2.
|
behavior for version before Proxmox Backup Server 3.2.
|
||||||
|
|
||||||
* ``notification-system``: Use the new, flexible notification system.
|
* ``notification-system``: Use the new, flexible notification system.
|
||||||
|
|
||||||
If the ``notification-mode`` option is not set, Proxmox Backup Server will default
|
If the ``notification-mode`` option is not set, Proxmox Backup Server will
|
||||||
to ``legacy-sendmail``.
|
default to ``legacy-sendmail``.
|
||||||
|
|
||||||
Starting with Proxmox Backup Server 3.2, a datastore created in the UI will
|
Starting with Proxmox Backup Server 3.2, a datastore created in the UI will
|
||||||
automatically opt in to the new notification system. If the datastore is created
|
automatically opt in to the new notification system. If the datastore is
|
||||||
via the API or the ``proxmox-backup-manager`` CLI, the ``notification-mode``
|
created via the API or the ``proxmox-backup-manager`` CLI, the
|
||||||
option has to be set explicitly to ``notification-system`` if the
|
``notification-mode`` option has to be set explicitly to
|
||||||
notification system shall be used.
|
``notification-system`` if the notification system shall be used.
|
||||||
|
|
||||||
The ``legacy-sendmail`` mode might be removed in a later release of
|
The ``legacy-sendmail`` mode might be removed in a later release of
|
||||||
Proxmox Backup Server.
|
Proxmox Backup Server.
|
||||||
@ -346,12 +350,12 @@ Settings for ``legacy-sendmail`` notification mode
|
|||||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
If ``notification-mode`` is set to ``legacy-sendmail``, Proxmox Backup Server
|
If ``notification-mode`` is set to ``legacy-sendmail``, Proxmox Backup Server
|
||||||
will send notification emails via the system's ``sendmail`` command to the email
|
will send notification emails via the system's ``sendmail`` command to the
|
||||||
address configured for the user set in the ``notify-user`` option
|
email address configured for the user set in the ``notify-user`` option
|
||||||
(falling back to ``root@pam`` if not set).
|
(falling back to ``root@pam`` if not set).
|
||||||
|
|
||||||
For datastores, you can also change the level of notifications received per task
|
For datastores, you can also change the level of notifications received per
|
||||||
type via the ``notify`` option.
|
task type via the ``notify`` option.
|
||||||
|
|
||||||
* Always: send a notification for any scheduled task, independent of the
|
* Always: send a notification for any scheduled task, independent of the
|
||||||
outcome
|
outcome
|
||||||
@ -362,3 +366,23 @@ type via the ``notify`` option.
|
|||||||
|
|
||||||
The ``notify-user`` and ``notify`` options are ignored if ``notification-mode``
|
The ``notify-user`` and ``notify`` options are ignored if ``notification-mode``
|
||||||
is set to ``notification-system``.
|
is set to ``notification-system``.
|
||||||
|
|
||||||
|
Overriding Notification Templates
|
||||||
|
---------------------------------
|
||||||
|
|
||||||
|
Proxmox Backup Server uses Handlebars templates to render notifications. The
|
||||||
|
original templates provided by Proxmox Backup Server are stored in
|
||||||
|
``/usr/share/proxmox-backup/templates/default/``.
|
||||||
|
|
||||||
|
Notification templates can be overridden by providing a custom template file in
|
||||||
|
the override directory at
|
||||||
|
``/etc/proxmox-backup/notification-templates/default/``. When rendering a
|
||||||
|
notification of a given type, Proxmox Backup Server will first attempt to load
|
||||||
|
a template from the override directory. If this one does not exist or fails to
|
||||||
|
render, the original template will be used.
|
||||||
|
|
||||||
|
The template files follow the naming convention of
|
||||||
|
``<type>-<body|subject>.txt.hbs``. For instance, the file
|
||||||
|
``gc-err-body.txt.hbs`` contains the template for rendering notifications for
|
||||||
|
garbage collection errors, while ``package-updates-subject.txt.hbs`` is used to
|
||||||
|
render the subject line of notifications for available package updates.
|
||||||
|
@ -435,9 +435,28 @@ There are some tuning related options for the datastore that are more advanced:
|
|||||||
|
|
||||||
This can be set with:
|
This can be set with:
|
||||||
|
|
||||||
.. code-block:: console
|
.. code-block:: console
|
||||||
|
|
||||||
# proxmox-backup-manager datastore update <storename> --tuning 'sync-level=filesystem'
|
# proxmox-backup-manager datastore update <storename> --tuning 'sync-level=filesystem'
|
||||||
|
|
||||||
|
* ``gc-atime-safety-check``: Datastore GC atime update safety check:
|
||||||
|
You can explicitly `enable` or `disable` the atime update safety check
|
||||||
|
performed on datastore creation and garbage collection. This checks if atime
|
||||||
|
updates are handled as expected by garbage collection and therefore avoids the
|
||||||
|
risk of data loss by unexpected filesystem behavior. It is recommended to set
|
||||||
|
this to enabled, which is also the default value.
|
||||||
|
|
||||||
|
* ``gc-atime-cutoff``: Datastore GC atime cutoff for chunk cleanup:
|
||||||
|
This allows to set the cutoff for which a chunk is still considered in-use
|
||||||
|
during phase 2 of garbage collection (given no older writers). If the
|
||||||
|
``atime`` of the chunk is outside the range, it will be removed.
|
||||||
|
|
||||||
|
* ``gc-cache-capacity``: Datastore GC least recently used cache capacity:
|
||||||
|
Allows to control the cache capacity used to keep track of chunks for which
|
||||||
|
the access time has already been updated during phase 1 of garbage collection.
|
||||||
|
This avoids multiple updates and increases GC runtime performance. Higher
|
||||||
|
values can reduce GC runtime at the cost of increase memory usage, setting the
|
||||||
|
value to 0 disables caching.
|
||||||
|
|
||||||
If you want to set multiple tuning options simultaneously, you can separate them
|
If you want to set multiple tuning options simultaneously, you can separate them
|
||||||
with a comma, like this:
|
with a comma, like this:
|
||||||
|
@ -16,8 +16,8 @@ User Configuration
|
|||||||
choose the realm when you add a new user. Possible realms are:
|
choose the realm when you add a new user. Possible realms are:
|
||||||
|
|
||||||
:pam: Linux PAM standard authentication. Use this if you want to
|
:pam: Linux PAM standard authentication. Use this if you want to
|
||||||
authenticate as a Linux system user (users need to exist on the
|
authenticate as a Linux system user. The users needs to already exist on
|
||||||
system).
|
the host system.
|
||||||
|
|
||||||
:pbs: Proxmox Backup Server realm. This type stores hashed passwords in
|
:pbs: Proxmox Backup Server realm. This type stores hashed passwords in
|
||||||
``/etc/proxmox-backup/shadow.json``.
|
``/etc/proxmox-backup/shadow.json``.
|
||||||
@ -599,6 +599,32 @@ list view in the web UI, or using the command line:
|
|||||||
Authentication Realms
|
Authentication Realms
|
||||||
---------------------
|
---------------------
|
||||||
|
|
||||||
|
.. _user_realms_pam:
|
||||||
|
|
||||||
|
Linux PAM
|
||||||
|
~~~~~~~~~
|
||||||
|
|
||||||
|
Linux PAM is a framework for system-wide user authentication. These users are
|
||||||
|
created on the host system with commands such as ``adduser``.
|
||||||
|
|
||||||
|
If PAM users exist on the host system, corresponding entries can be added to
|
||||||
|
Proxmox Backup Server, to allow these users to log in via their system username
|
||||||
|
and password.
|
||||||
|
|
||||||
|
.. _user_realms_pbs:
|
||||||
|
|
||||||
|
Proxmox Backup authentication server
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
This is a Unix-like password store, which stores hashed passwords in
|
||||||
|
``/etc/proxmox-backup/shadow.json``. Passwords are hashed using the SHA-256
|
||||||
|
hashing algorithm.
|
||||||
|
|
||||||
|
This is the most convenient realm for small-scale (or even mid-scale)
|
||||||
|
installations, where users do not need access to anything outside of Proxmox
|
||||||
|
Backup Server. In this case, users are fully managed by Proxmox Backup Server
|
||||||
|
and are able to change their own passwords via the GUI.
|
||||||
|
|
||||||
.. _user_realms_ldap:
|
.. _user_realms_ldap:
|
||||||
|
|
||||||
LDAP
|
LDAP
|
||||||
|
@ -10,7 +10,7 @@ use tokio::net::TcpStream;
|
|||||||
// Simple H2 client to test H2 download speed using h2server.rs
|
// Simple H2 client to test H2 download speed using h2server.rs
|
||||||
|
|
||||||
struct Process {
|
struct Process {
|
||||||
body: h2::RecvStream,
|
body: h2::legacy::RecvStream,
|
||||||
trailers: bool,
|
trailers: bool,
|
||||||
bytes: usize,
|
bytes: usize,
|
||||||
}
|
}
|
||||||
@ -50,7 +50,7 @@ impl Future for Process {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn send_request(
|
fn send_request(
|
||||||
mut client: h2::client::SendRequest<bytes::Bytes>,
|
mut client: h2::legacy::client::SendRequest<bytes::Bytes>,
|
||||||
) -> impl Future<Output = Result<usize, Error>> {
|
) -> impl Future<Output = Result<usize, Error>> {
|
||||||
println!("sending request");
|
println!("sending request");
|
||||||
|
|
||||||
@ -78,7 +78,7 @@ async fn run() -> Result<(), Error> {
|
|||||||
let conn = TcpStream::connect(std::net::SocketAddr::from(([127, 0, 0, 1], 8008))).await?;
|
let conn = TcpStream::connect(std::net::SocketAddr::from(([127, 0, 0, 1], 8008))).await?;
|
||||||
conn.set_nodelay(true).unwrap();
|
conn.set_nodelay(true).unwrap();
|
||||||
|
|
||||||
let (client, h2) = h2::client::Builder::new()
|
let (client, h2) = h2::legacy::client::Builder::new()
|
||||||
.initial_connection_window_size(1024 * 1024 * 1024)
|
.initial_connection_window_size(1024 * 1024 * 1024)
|
||||||
.initial_window_size(1024 * 1024 * 1024)
|
.initial_window_size(1024 * 1024 * 1024)
|
||||||
.max_frame_size(4 * 1024 * 1024)
|
.max_frame_size(4 * 1024 * 1024)
|
||||||
|
@ -10,7 +10,7 @@ use tokio::net::TcpStream;
|
|||||||
// Simple H2 client to test H2 download speed using h2s-server.rs
|
// Simple H2 client to test H2 download speed using h2s-server.rs
|
||||||
|
|
||||||
struct Process {
|
struct Process {
|
||||||
body: h2::RecvStream,
|
body: h2::legacy::RecvStream,
|
||||||
trailers: bool,
|
trailers: bool,
|
||||||
bytes: usize,
|
bytes: usize,
|
||||||
}
|
}
|
||||||
@ -50,7 +50,7 @@ impl Future for Process {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn send_request(
|
fn send_request(
|
||||||
mut client: h2::client::SendRequest<bytes::Bytes>,
|
mut client: h2::legacy::client::SendRequest<bytes::Bytes>,
|
||||||
) -> impl Future<Output = Result<usize, Error>> {
|
) -> impl Future<Output = Result<usize, Error>> {
|
||||||
println!("sending request");
|
println!("sending request");
|
||||||
|
|
||||||
@ -94,7 +94,7 @@ async fn run() -> Result<(), Error> {
|
|||||||
.await
|
.await
|
||||||
.map_err(|err| format_err!("connect failed - {}", err))?;
|
.map_err(|err| format_err!("connect failed - {}", err))?;
|
||||||
|
|
||||||
let (client, h2) = h2::client::Builder::new()
|
let (client, h2) = h2::legacy::client::Builder::new()
|
||||||
.initial_connection_window_size(1024 * 1024 * 1024)
|
.initial_connection_window_size(1024 * 1024 * 1024)
|
||||||
.initial_window_size(1024 * 1024 * 1024)
|
.initial_window_size(1024 * 1024 * 1024)
|
||||||
.max_frame_size(4 * 1024 * 1024)
|
.max_frame_size(4 * 1024 * 1024)
|
||||||
|
@ -8,6 +8,19 @@ use tokio::net::{TcpListener, TcpStream};
|
|||||||
|
|
||||||
use pbs_buildcfg::configdir;
|
use pbs_buildcfg::configdir;
|
||||||
|
|
||||||
|
#[derive(Clone, Copy)]
|
||||||
|
struct H2SExecutor;
|
||||||
|
|
||||||
|
impl<Fut> hyper::rt::Executor<Fut> for H2SExecutor
|
||||||
|
where
|
||||||
|
Fut: Future + Send + 'static,
|
||||||
|
Fut::Output: Send,
|
||||||
|
{
|
||||||
|
fn execute(&self, fut: Fut) {
|
||||||
|
tokio::spawn(fut);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
fn main() -> Result<(), Error> {
|
fn main() -> Result<(), Error> {
|
||||||
proxmox_async::runtime::main(run())
|
proxmox_async::runtime::main(run())
|
||||||
}
|
}
|
||||||
@ -50,12 +63,11 @@ async fn handle_connection(socket: TcpStream, acceptor: Arc<SslAcceptor>) -> Res
|
|||||||
|
|
||||||
stream.as_mut().accept().await?;
|
stream.as_mut().accept().await?;
|
||||||
|
|
||||||
let mut http = hyper::server::conn::Http::new();
|
let mut http = hyper::server::conn::http2::Builder::new(H2SExecutor);
|
||||||
http.http2_only(true);
|
|
||||||
// increase window size: todo - find optiomal size
|
// increase window size: todo - find optiomal size
|
||||||
let max_window_size = (1 << 31) - 2;
|
let max_window_size = (1 << 31) - 2;
|
||||||
http.http2_initial_stream_window_size(max_window_size);
|
http.initial_stream_window_size(max_window_size);
|
||||||
http.http2_initial_connection_window_size(max_window_size);
|
http.initial_connection_window_size(max_window_size);
|
||||||
|
|
||||||
let service = hyper::service::service_fn(|_req: Request<Body>| {
|
let service = hyper::service::service_fn(|_req: Request<Body>| {
|
||||||
println!("Got request");
|
println!("Got request");
|
||||||
|
@ -1,9 +1,24 @@
|
|||||||
|
use std::future::Future;
|
||||||
|
|
||||||
use anyhow::Error;
|
use anyhow::Error;
|
||||||
use futures::*;
|
use futures::*;
|
||||||
use hyper::{Body, Request, Response};
|
use hyper::{Body, Request, Response};
|
||||||
|
|
||||||
use tokio::net::{TcpListener, TcpStream};
|
use tokio::net::{TcpListener, TcpStream};
|
||||||
|
|
||||||
|
#[derive(Clone, Copy)]
|
||||||
|
struct H2Executor;
|
||||||
|
|
||||||
|
impl<Fut> hyper::rt::Executor<Fut> for H2Executor
|
||||||
|
where
|
||||||
|
Fut: Future + Send + 'static,
|
||||||
|
Fut::Output: Send,
|
||||||
|
{
|
||||||
|
fn execute(&self, fut: Fut) {
|
||||||
|
tokio::spawn(fut);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
fn main() -> Result<(), Error> {
|
fn main() -> Result<(), Error> {
|
||||||
proxmox_async::runtime::main(run())
|
proxmox_async::runtime::main(run())
|
||||||
}
|
}
|
||||||
@ -26,12 +41,11 @@ async fn run() -> Result<(), Error> {
|
|||||||
async fn handle_connection(socket: TcpStream) -> Result<(), Error> {
|
async fn handle_connection(socket: TcpStream) -> Result<(), Error> {
|
||||||
socket.set_nodelay(true).unwrap();
|
socket.set_nodelay(true).unwrap();
|
||||||
|
|
||||||
let mut http = hyper::server::conn::Http::new();
|
let mut http = hyper::server::conn::http2::Builder::new(H2Executor);
|
||||||
http.http2_only(true);
|
|
||||||
// increase window size: todo - find optiomal size
|
// increase window size: todo - find optiomal size
|
||||||
let max_window_size = (1 << 31) - 2;
|
let max_window_size = (1 << 31) - 2;
|
||||||
http.http2_initial_stream_window_size(max_window_size);
|
http.initial_stream_window_size(max_window_size);
|
||||||
http.http2_initial_connection_window_size(max_window_size);
|
http.initial_connection_window_size(max_window_size);
|
||||||
|
|
||||||
let service = hyper::service::service_fn(|_req: Request<Body>| {
|
let service = hyper::service::service_fn(|_req: Request<Body>| {
|
||||||
println!("Got request");
|
println!("Got request");
|
||||||
|
@ -27,6 +27,7 @@ tokio = { workspace = true, features = [ "fs", "signal" ] }
|
|||||||
tokio-stream.workspace = true
|
tokio-stream.workspace = true
|
||||||
tower-service.workspace = true
|
tower-service.workspace = true
|
||||||
xdg.workspace = true
|
xdg.workspace = true
|
||||||
|
hickory-resolver.workspace = true
|
||||||
|
|
||||||
pathpatterns.workspace = true
|
pathpatterns.workspace = true
|
||||||
|
|
||||||
|
@ -8,8 +8,9 @@ const_regex! {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub const BACKUP_SOURCE_SCHEMA: Schema = StringSchema::new(
|
pub const BACKUP_SOURCE_SCHEMA: Schema = StringSchema::new(
|
||||||
"Backup source specification ([<label>:<path>]), the specification \
|
"Backup source specification ([<archive-name>.<type>:<source-path>]), the \
|
||||||
'label' must contain alphanumerics, hyphens and underscores only.",
|
'archive-name' must contain alphanumerics, hyphens and underscores only. \
|
||||||
|
The 'type' must be either 'pxar', 'img', 'conf' or 'log'.",
|
||||||
)
|
)
|
||||||
.format(&ApiStringFormat::Pattern(&BACKUPSPEC_REGEX))
|
.format(&ApiStringFormat::Pattern(&BACKUPSPEC_REGEX))
|
||||||
.schema();
|
.schema();
|
||||||
|
@ -56,7 +56,7 @@ pub struct UploadOptions {
|
|||||||
}
|
}
|
||||||
|
|
||||||
struct ChunkUploadResponse {
|
struct ChunkUploadResponse {
|
||||||
future: h2::client::ResponseFuture,
|
future: h2::legacy::client::ResponseFuture,
|
||||||
size: usize,
|
size: usize,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -143,7 +143,7 @@ impl BackupWriter {
|
|||||||
param: Option<Value>,
|
param: Option<Value>,
|
||||||
content_type: &str,
|
content_type: &str,
|
||||||
data: Vec<u8>,
|
data: Vec<u8>,
|
||||||
) -> Result<h2::client::ResponseFuture, Error> {
|
) -> Result<h2::legacy::client::ResponseFuture, Error> {
|
||||||
let request =
|
let request =
|
||||||
H2Client::request_builder("localhost", method, path, param, Some(content_type))
|
H2Client::request_builder("localhost", method, path, param, Some(content_type))
|
||||||
.unwrap();
|
.unwrap();
|
||||||
@ -514,7 +514,7 @@ impl BackupWriter {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn response_queue() -> (
|
fn response_queue() -> (
|
||||||
mpsc::Sender<h2::client::ResponseFuture>,
|
mpsc::Sender<h2::legacy::client::ResponseFuture>,
|
||||||
oneshot::Receiver<Result<(), Error>>,
|
oneshot::Receiver<Result<(), Error>>,
|
||||||
) {
|
) {
|
||||||
let (verify_queue_tx, verify_queue_rx) = mpsc::channel(100);
|
let (verify_queue_tx, verify_queue_rx) = mpsc::channel(100);
|
||||||
@ -537,7 +537,7 @@ impl BackupWriter {
|
|||||||
tokio::spawn(
|
tokio::spawn(
|
||||||
ReceiverStream::new(verify_queue_rx)
|
ReceiverStream::new(verify_queue_rx)
|
||||||
.map(Ok::<_, Error>)
|
.map(Ok::<_, Error>)
|
||||||
.try_for_each(move |response: h2::client::ResponseFuture| {
|
.try_for_each(move |response: h2::legacy::client::ResponseFuture| {
|
||||||
response
|
response
|
||||||
.map_err(Error::from)
|
.map_err(Error::from)
|
||||||
.and_then(H2Client::h2api_response)
|
.and_then(H2Client::h2api_response)
|
||||||
|
@ -4,11 +4,13 @@ use std::time::Duration;
|
|||||||
|
|
||||||
use anyhow::{bail, format_err, Error};
|
use anyhow::{bail, format_err, Error};
|
||||||
use futures::*;
|
use futures::*;
|
||||||
|
#[cfg(not(target_feature = "crt-static"))]
|
||||||
|
use hyper::client::connect::dns::GaiResolver;
|
||||||
use hyper::client::{Client, HttpConnector};
|
use hyper::client::{Client, HttpConnector};
|
||||||
use hyper::http::header::HeaderValue;
|
use hyper::http::header::HeaderValue;
|
||||||
use hyper::http::Uri;
|
use hyper::http::Uri;
|
||||||
use hyper::http::{Request, Response};
|
use hyper::http::{Request, Response};
|
||||||
use hyper::Body;
|
use hyper::{body::HttpBody, Body};
|
||||||
use openssl::{
|
use openssl::{
|
||||||
ssl::{SslConnector, SslMethod},
|
ssl::{SslConnector, SslMethod},
|
||||||
x509::X509StoreContextRef,
|
x509::X509StoreContextRef,
|
||||||
@ -33,6 +35,74 @@ use pbs_api_types::{Authid, RateLimitConfig, Userid};
|
|||||||
use super::pipe_to_stream::PipeToSendStream;
|
use super::pipe_to_stream::PipeToSendStream;
|
||||||
use super::PROXMOX_BACKUP_TCP_KEEPALIVE_TIME;
|
use super::PROXMOX_BACKUP_TCP_KEEPALIVE_TIME;
|
||||||
|
|
||||||
|
#[cfg(not(target_feature = "crt-static"))]
|
||||||
|
type DnsResolver = GaiResolver;
|
||||||
|
|
||||||
|
#[cfg(target_feature = "crt-static")]
|
||||||
|
type DnsResolver = resolver::HickoryDnsResolver;
|
||||||
|
|
||||||
|
#[cfg(target_feature = "crt-static")]
|
||||||
|
mod resolver {
|
||||||
|
use std::net::SocketAddr;
|
||||||
|
use std::pin::Pin;
|
||||||
|
use std::sync::Arc;
|
||||||
|
use std::task::{Context, Poll};
|
||||||
|
|
||||||
|
use futures::Future;
|
||||||
|
use hickory_resolver::error::ResolveError;
|
||||||
|
use hickory_resolver::lookup_ip::LookupIpIntoIter;
|
||||||
|
use hickory_resolver::TokioAsyncResolver;
|
||||||
|
use hyper::client::connect::dns::Name;
|
||||||
|
use tower_service::Service;
|
||||||
|
|
||||||
|
pub(crate) struct SocketAddrIter {
|
||||||
|
inner: LookupIpIntoIter,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Iterator for SocketAddrIter {
|
||||||
|
type Item = SocketAddr;
|
||||||
|
|
||||||
|
fn next(&mut self) -> Option<Self::Item> {
|
||||||
|
self.inner.next().map(|ip_addr| SocketAddr::new(ip_addr, 0))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub(crate) struct HickoryDnsResolver {
|
||||||
|
inner: Arc<TokioAsyncResolver>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl HickoryDnsResolver {
|
||||||
|
pub(crate) fn new() -> Self {
|
||||||
|
Self {
|
||||||
|
inner: Arc::new(TokioAsyncResolver::tokio_from_system_conf().unwrap()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Service<Name> for HickoryDnsResolver {
|
||||||
|
type Response = SocketAddrIter;
|
||||||
|
type Error = ResolveError;
|
||||||
|
type Future = Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>> + Send>>;
|
||||||
|
|
||||||
|
fn poll_ready(&mut self, _ctx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||||
|
Poll::Ready(Ok(()))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn call(&mut self, name: Name) -> Self::Future {
|
||||||
|
let inner = self.inner.clone();
|
||||||
|
Box::pin(async move {
|
||||||
|
inner
|
||||||
|
.lookup_ip(name.as_str())
|
||||||
|
.await
|
||||||
|
.map(|r| SocketAddrIter {
|
||||||
|
inner: r.into_iter(),
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Timeout used for several HTTP operations that are expected to finish quickly but may block in
|
/// Timeout used for several HTTP operations that are expected to finish quickly but may block in
|
||||||
/// certain error conditions. Keep it generous, to avoid false-positive under high load.
|
/// certain error conditions. Keep it generous, to avoid false-positive under high load.
|
||||||
const HTTP_TIMEOUT: Duration = Duration::from_secs(2 * 60);
|
const HTTP_TIMEOUT: Duration = Duration::from_secs(2 * 60);
|
||||||
@ -134,7 +204,7 @@ impl Default for HttpClientOptions {
|
|||||||
|
|
||||||
/// HTTP(S) API client
|
/// HTTP(S) API client
|
||||||
pub struct HttpClient {
|
pub struct HttpClient {
|
||||||
client: Client<HttpsConnector>,
|
client: Client<HttpsConnector<DnsResolver>>,
|
||||||
server: String,
|
server: String,
|
||||||
port: u16,
|
port: u16,
|
||||||
fingerprint: Arc<Mutex<Option<String>>>,
|
fingerprint: Arc<Mutex<Option<String>>>,
|
||||||
@ -365,7 +435,8 @@ impl HttpClient {
|
|||||||
ssl_connector_builder.set_verify(openssl::ssl::SslVerifyMode::NONE);
|
ssl_connector_builder.set_verify(openssl::ssl::SslVerifyMode::NONE);
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut httpc = HttpConnector::new();
|
let resolver = DnsResolver::new();
|
||||||
|
let mut httpc = HttpConnector::new_with_resolver(resolver);
|
||||||
httpc.set_nodelay(true); // important for h2 download performance!
|
httpc.set_nodelay(true); // important for h2 download performance!
|
||||||
httpc.enforce_http(false); // we want https...
|
httpc.enforce_http(false); // we want https...
|
||||||
|
|
||||||
@ -526,7 +597,9 @@ impl HttpClient {
|
|||||||
_options: options,
|
_options: options,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl HttpClient {
|
||||||
/// Login
|
/// Login
|
||||||
///
|
///
|
||||||
/// Login is done on demand, so this is only required if you need
|
/// Login is done on demand, so this is only required if you need
|
||||||
@ -706,8 +779,7 @@ impl HttpClient {
|
|||||||
.map(|_| Err(format_err!("unknown error")))
|
.map(|_| Err(format_err!("unknown error")))
|
||||||
.await?
|
.await?
|
||||||
} else {
|
} else {
|
||||||
resp.into_body()
|
futures::TryStreamExt::map_err(resp.into_body(), Error::from)
|
||||||
.map_err(Error::from)
|
|
||||||
.try_fold(output, move |acc, chunk| async move {
|
.try_fold(output, move |acc, chunk| async move {
|
||||||
acc.write_all(&chunk)?;
|
acc.write_all(&chunk)?;
|
||||||
Ok::<_, Error>(acc)
|
Ok::<_, Error>(acc)
|
||||||
@ -791,7 +863,7 @@ impl HttpClient {
|
|||||||
|
|
||||||
let max_window_size = (1 << 31) - 2;
|
let max_window_size = (1 << 31) - 2;
|
||||||
|
|
||||||
let (h2, connection) = h2::client::Builder::new()
|
let (h2, connection) = h2::legacy::client::Builder::new()
|
||||||
.initial_connection_window_size(max_window_size)
|
.initial_connection_window_size(max_window_size)
|
||||||
.initial_window_size(max_window_size)
|
.initial_window_size(max_window_size)
|
||||||
.max_frame_size(4 * 1024 * 1024)
|
.max_frame_size(4 * 1024 * 1024)
|
||||||
@ -815,7 +887,7 @@ impl HttpClient {
|
|||||||
}
|
}
|
||||||
|
|
||||||
async fn credentials(
|
async fn credentials(
|
||||||
client: Client<HttpsConnector>,
|
client: Client<HttpsConnector<DnsResolver>>,
|
||||||
server: String,
|
server: String,
|
||||||
port: u16,
|
port: u16,
|
||||||
username: Userid,
|
username: Userid,
|
||||||
@ -844,7 +916,7 @@ impl HttpClient {
|
|||||||
|
|
||||||
async fn api_response(response: Response<Body>) -> Result<Value, Error> {
|
async fn api_response(response: Response<Body>) -> Result<Value, Error> {
|
||||||
let status = response.status();
|
let status = response.status();
|
||||||
let data = hyper::body::to_bytes(response.into_body()).await?;
|
let data = HttpBody::collect(response.into_body()).await?.to_bytes();
|
||||||
|
|
||||||
let text = String::from_utf8(data.to_vec()).unwrap();
|
let text = String::from_utf8(data.to_vec()).unwrap();
|
||||||
if status.is_success() {
|
if status.is_success() {
|
||||||
@ -860,7 +932,7 @@ impl HttpClient {
|
|||||||
}
|
}
|
||||||
|
|
||||||
async fn api_request(
|
async fn api_request(
|
||||||
client: Client<HttpsConnector>,
|
client: Client<HttpsConnector<DnsResolver>>,
|
||||||
req: Request<Body>,
|
req: Request<Body>,
|
||||||
) -> Result<Value, Error> {
|
) -> Result<Value, Error> {
|
||||||
Self::api_response(
|
Self::api_response(
|
||||||
@ -936,11 +1008,11 @@ impl Drop for HttpClient {
|
|||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct H2Client {
|
pub struct H2Client {
|
||||||
h2: h2::client::SendRequest<bytes::Bytes>,
|
h2: h2::legacy::client::SendRequest<bytes::Bytes>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl H2Client {
|
impl H2Client {
|
||||||
pub fn new(h2: h2::client::SendRequest<bytes::Bytes>) -> Self {
|
pub fn new(h2: h2::legacy::client::SendRequest<bytes::Bytes>) -> Self {
|
||||||
Self { h2 }
|
Self { h2 }
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1020,7 +1092,7 @@ impl H2Client {
|
|||||||
&self,
|
&self,
|
||||||
request: Request<()>,
|
request: Request<()>,
|
||||||
data: Option<bytes::Bytes>,
|
data: Option<bytes::Bytes>,
|
||||||
) -> impl Future<Output = Result<h2::client::ResponseFuture, Error>> {
|
) -> impl Future<Output = Result<h2::legacy::client::ResponseFuture, Error>> {
|
||||||
self.h2
|
self.h2
|
||||||
.clone()
|
.clone()
|
||||||
.ready()
|
.ready()
|
||||||
@ -1037,7 +1109,9 @@ impl H2Client {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn h2api_response(response: Response<h2::RecvStream>) -> Result<Value, Error> {
|
pub async fn h2api_response(
|
||||||
|
response: Response<h2::legacy::RecvStream>,
|
||||||
|
) -> Result<Value, Error> {
|
||||||
let status = response.status();
|
let status = response.status();
|
||||||
|
|
||||||
let (_head, mut body) = response.into_parts();
|
let (_head, mut body) = response.into_parts();
|
||||||
|
@ -8,7 +8,7 @@ use std::task::{Context, Poll};
|
|||||||
use anyhow::{format_err, Error};
|
use anyhow::{format_err, Error};
|
||||||
use bytes::Bytes;
|
use bytes::Bytes;
|
||||||
use futures::{ready, Future};
|
use futures::{ready, Future};
|
||||||
use h2::SendStream;
|
use h2::legacy::SendStream;
|
||||||
|
|
||||||
pub struct PipeToSendStream {
|
pub struct PipeToSendStream {
|
||||||
body_tx: SendStream<Bytes>,
|
body_tx: SendStream<Bytes>,
|
||||||
|
@ -345,8 +345,8 @@ pub(crate) unsafe fn set_test_default_master_pubkey(value: Result<Option<Vec<u8>
|
|||||||
pub fn get_encryption_key_password() -> Result<Vec<u8>, Error> {
|
pub fn get_encryption_key_password() -> Result<Vec<u8>, Error> {
|
||||||
// fixme: implement other input methods
|
// fixme: implement other input methods
|
||||||
|
|
||||||
if let Some(password) = super::get_secret_from_env("PBS_ENCRYPTION_PASSWORD")? {
|
if let Some(password) = super::get_encryption_password()? {
|
||||||
return Ok(password.as_bytes().to_vec());
|
return Ok(password.into_bytes());
|
||||||
}
|
}
|
||||||
|
|
||||||
// If we're on a TTY, query the user for a password
|
// If we're on a TTY, query the user for a password
|
||||||
|
@ -28,6 +28,21 @@ pub mod key_source;
|
|||||||
|
|
||||||
const ENV_VAR_PBS_FINGERPRINT: &str = "PBS_FINGERPRINT";
|
const ENV_VAR_PBS_FINGERPRINT: &str = "PBS_FINGERPRINT";
|
||||||
const ENV_VAR_PBS_PASSWORD: &str = "PBS_PASSWORD";
|
const ENV_VAR_PBS_PASSWORD: &str = "PBS_PASSWORD";
|
||||||
|
const ENV_VAR_PBS_ENCRYPTION_PASSWORD: &str = "PBS_ENCRYPTION_PASSWORD";
|
||||||
|
const ENV_VAR_PBS_REPOSITORY: &str = "PBS_REPOSITORY";
|
||||||
|
|
||||||
|
/// Directory with system [credential]s. See systemd-creds(1).
|
||||||
|
///
|
||||||
|
/// [credential]: https://systemd.io/CREDENTIALS/
|
||||||
|
const ENV_VAR_CREDENTIALS_DIRECTORY: &str = "CREDENTIALS_DIRECTORY";
|
||||||
|
/// Credential name of the encryption password.
|
||||||
|
const CRED_PBS_ENCRYPTION_PASSWORD: &str = "proxmox-backup-client.encryption-password";
|
||||||
|
/// Credential name of the the password.
|
||||||
|
const CRED_PBS_PASSWORD: &str = "proxmox-backup-client.password";
|
||||||
|
/// Credential name of the the repository.
|
||||||
|
const CRED_PBS_REPOSITORY: &str = "proxmox-backup-client.repository";
|
||||||
|
/// Credential name of the the fingerprint.
|
||||||
|
const CRED_PBS_FINGERPRINT: &str = "proxmox-backup-client.fingerprint";
|
||||||
|
|
||||||
pub const REPO_URL_SCHEMA: Schema = StringSchema::new("Repository URL.")
|
pub const REPO_URL_SCHEMA: Schema = StringSchema::new("Repository URL.")
|
||||||
.format(&BACKUP_REPO_URL)
|
.format(&BACKUP_REPO_URL)
|
||||||
@ -40,6 +55,30 @@ pub const CHUNK_SIZE_SCHEMA: Schema = IntegerSchema::new("Chunk size in KB. Must
|
|||||||
.default(4096)
|
.default(4096)
|
||||||
.schema();
|
.schema();
|
||||||
|
|
||||||
|
/// Retrieves a secret stored in a [credential] provided by systemd.
|
||||||
|
///
|
||||||
|
/// Returns `Ok(None)` if the credential does not exist.
|
||||||
|
///
|
||||||
|
/// [credential]: https://systemd.io/CREDENTIALS/
|
||||||
|
fn get_credential(cred_name: &str) -> std::io::Result<Option<Vec<u8>>> {
|
||||||
|
let Some(creds_dir) = std::env::var_os(ENV_VAR_CREDENTIALS_DIRECTORY) else {
|
||||||
|
return Ok(None);
|
||||||
|
};
|
||||||
|
let path = std::path::Path::new(&creds_dir).join(cred_name);
|
||||||
|
|
||||||
|
proxmox_log::debug!("attempting to use credential {cred_name} from {creds_dir:?}",);
|
||||||
|
// We read the whole contents without a BufRead. As per systemd-creds(1):
|
||||||
|
// Credentials are limited-size binary or textual objects.
|
||||||
|
match std::fs::read(&path) {
|
||||||
|
Ok(bytes) => Ok(Some(bytes)),
|
||||||
|
Err(err) if err.kind() == std::io::ErrorKind::NotFound => {
|
||||||
|
proxmox_log::debug!("no {cred_name} credential found in {creds_dir:?}");
|
||||||
|
Ok(None)
|
||||||
|
}
|
||||||
|
Err(err) => Err(err),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Helper to read a secret through a environment variable (ENV).
|
/// Helper to read a secret through a environment variable (ENV).
|
||||||
///
|
///
|
||||||
/// Tries the following variable names in order and returns the value
|
/// Tries the following variable names in order and returns the value
|
||||||
@ -51,7 +90,7 @@ pub const CHUNK_SIZE_SCHEMA: Schema = IntegerSchema::new("Chunk size in KB. Must
|
|||||||
/// BASE_NAME_CMD => read the secret from specified command first line of output on stdout
|
/// BASE_NAME_CMD => read the secret from specified command first line of output on stdout
|
||||||
///
|
///
|
||||||
/// Only return the first line of data (without CRLF).
|
/// Only return the first line of data (without CRLF).
|
||||||
pub fn get_secret_from_env(base_name: &str) -> Result<Option<String>, Error> {
|
fn get_secret_from_env(base_name: &str) -> Result<Option<String>, Error> {
|
||||||
let firstline = |data: String| -> String {
|
let firstline = |data: String| -> String {
|
||||||
match data.lines().next() {
|
match data.lines().next() {
|
||||||
Some(line) => line.to_string(),
|
Some(line) => line.to_string(),
|
||||||
@ -118,8 +157,80 @@ pub fn get_secret_from_env(base_name: &str) -> Result<Option<String>, Error> {
|
|||||||
Ok(None)
|
Ok(None)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Gets a secret or value from the environment.
|
||||||
|
///
|
||||||
|
/// Checks for an environment variable named `env_variable`, and if missing, it
|
||||||
|
/// checks for a system [credential] named `credential_name`. Assumes the secret
|
||||||
|
/// is UTF-8 encoded.
|
||||||
|
///
|
||||||
|
/// [credential]: https://systemd.io/CREDENTIALS/
|
||||||
|
fn get_secret_impl(env_variable: &str, credential_name: &str) -> Result<Option<String>, Error> {
|
||||||
|
if let Some(password) = get_secret_from_env(env_variable)? {
|
||||||
|
Ok(Some(password))
|
||||||
|
} else if let Some(password) = get_credential(credential_name)? {
|
||||||
|
String::from_utf8(password)
|
||||||
|
.map(Option::Some)
|
||||||
|
.map_err(|_err| format_err!("credential {credential_name} is not utf8 encoded"))
|
||||||
|
} else {
|
||||||
|
Ok(None)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Gets the backup server's password.
|
||||||
|
///
|
||||||
|
/// Looks for a password in the `PBS_PASSWORD` environment variable, if there
|
||||||
|
/// isn't one it reads the `proxmox-backup-client.password` [credential].
|
||||||
|
///
|
||||||
|
/// Returns `Ok(None)` if neither the environment variable or credentials are
|
||||||
|
/// present.
|
||||||
|
///
|
||||||
|
/// [credential]: https://systemd.io/CREDENTIALS/
|
||||||
|
pub fn get_password() -> Result<Option<String>, Error> {
|
||||||
|
get_secret_impl(ENV_VAR_PBS_PASSWORD, CRED_PBS_PASSWORD)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Gets an encryption password.
|
||||||
|
///
|
||||||
|
///
|
||||||
|
/// Looks for a password in the `PBS_ENCRYPTION_PASSWORD` environment variable,
|
||||||
|
/// if there isn't one it reads the `proxmox-backup-client.encryption-password`
|
||||||
|
/// [credential].
|
||||||
|
///
|
||||||
|
/// Returns `Ok(None)` if neither the environment variable or credentials are
|
||||||
|
/// present.
|
||||||
|
///
|
||||||
|
/// [credential]: https://systemd.io/CREDENTIALS/
|
||||||
|
pub fn get_encryption_password() -> Result<Option<String>, Error> {
|
||||||
|
get_secret_impl(
|
||||||
|
ENV_VAR_PBS_ENCRYPTION_PASSWORD,
|
||||||
|
CRED_PBS_ENCRYPTION_PASSWORD,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
pub fn get_default_repository() -> Option<String> {
|
pub fn get_default_repository() -> Option<String> {
|
||||||
std::env::var("PBS_REPOSITORY").ok()
|
get_secret_impl(ENV_VAR_PBS_REPOSITORY, CRED_PBS_REPOSITORY)
|
||||||
|
.inspect_err(|err| {
|
||||||
|
proxmox_log::error!("could not read default repository: {err:#}");
|
||||||
|
})
|
||||||
|
.unwrap_or_default()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Gets the repository fingerprint.
|
||||||
|
///
|
||||||
|
/// Looks for the fingerprint in the `PBS_FINGERPRINT` environment variable, if
|
||||||
|
/// there isn't one it reads the `proxmox-backup-client.fingerprint`
|
||||||
|
/// [credential].
|
||||||
|
///
|
||||||
|
/// Returns `None` if neither the environment variable or the credential are
|
||||||
|
/// present.
|
||||||
|
///
|
||||||
|
/// [credential]: https://systemd.io/CREDENTIALS/
|
||||||
|
pub fn get_fingerprint() -> Option<String> {
|
||||||
|
get_secret_impl(ENV_VAR_PBS_FINGERPRINT, CRED_PBS_FINGERPRINT)
|
||||||
|
.inspect_err(|err| {
|
||||||
|
proxmox_log::error!("could not read fingerprint: {err:#}");
|
||||||
|
})
|
||||||
|
.unwrap_or_default()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn remove_repository_from_value(param: &mut Value) -> Result<BackupRepository, Error> {
|
pub fn remove_repository_from_value(param: &mut Value) -> Result<BackupRepository, Error> {
|
||||||
@ -179,9 +290,9 @@ fn connect_do(
|
|||||||
auth_id: &Authid,
|
auth_id: &Authid,
|
||||||
rate_limit: RateLimitConfig,
|
rate_limit: RateLimitConfig,
|
||||||
) -> Result<HttpClient, Error> {
|
) -> Result<HttpClient, Error> {
|
||||||
let fingerprint = std::env::var(ENV_VAR_PBS_FINGERPRINT).ok();
|
let fingerprint = get_fingerprint();
|
||||||
|
|
||||||
let password = get_secret_from_env(ENV_VAR_PBS_PASSWORD)?;
|
let password = get_password()?;
|
||||||
let options = HttpClientOptions::new_interactive(password, fingerprint).rate_limit(rate_limit);
|
let options = HttpClientOptions::new_interactive(password, fingerprint).rate_limit(rate_limit);
|
||||||
|
|
||||||
HttpClient::new(server, port, auth_id, options)
|
HttpClient::new(server, port, auth_id, options)
|
||||||
@ -189,8 +300,8 @@ fn connect_do(
|
|||||||
|
|
||||||
/// like get, but simply ignore errors and return Null instead
|
/// like get, but simply ignore errors and return Null instead
|
||||||
pub async fn try_get(repo: &BackupRepository, url: &str) -> Value {
|
pub async fn try_get(repo: &BackupRepository, url: &str) -> Value {
|
||||||
let fingerprint = std::env::var(ENV_VAR_PBS_FINGERPRINT).ok();
|
let fingerprint = get_fingerprint();
|
||||||
let password = get_secret_from_env(ENV_VAR_PBS_PASSWORD).unwrap_or(None);
|
let password = get_password().unwrap_or(None);
|
||||||
|
|
||||||
// ticket cache, but no questions asked
|
// ticket cache, but no questions asked
|
||||||
let options = HttpClientOptions::new_interactive(password, fingerprint).interactive(false);
|
let options = HttpClientOptions::new_interactive(password, fingerprint).interactive(false);
|
||||||
|
@ -7,7 +7,7 @@ use hyper::client::connect::{Connected, Connection};
|
|||||||
use hyper::client::Client;
|
use hyper::client::Client;
|
||||||
use hyper::http::Uri;
|
use hyper::http::Uri;
|
||||||
use hyper::http::{Request, Response};
|
use hyper::http::{Request, Response};
|
||||||
use hyper::Body;
|
use hyper::{body::HttpBody, Body};
|
||||||
use pin_project_lite::pin_project;
|
use pin_project_lite::pin_project;
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
use tokio::io::{AsyncRead, AsyncWrite, AsyncWriteExt, ReadBuf};
|
use tokio::io::{AsyncRead, AsyncWrite, AsyncWriteExt, ReadBuf};
|
||||||
@ -179,8 +179,7 @@ impl VsockClient {
|
|||||||
if !status.is_success() {
|
if !status.is_success() {
|
||||||
Self::api_response(resp).await.map(|_| ())?
|
Self::api_response(resp).await.map(|_| ())?
|
||||||
} else {
|
} else {
|
||||||
resp.into_body()
|
futures::TryStreamExt::map_err(resp.into_body(), Error::from)
|
||||||
.map_err(Error::from)
|
|
||||||
.try_fold(output, move |acc, chunk| async move {
|
.try_fold(output, move |acc, chunk| async move {
|
||||||
acc.write_all(&chunk).await?;
|
acc.write_all(&chunk).await?;
|
||||||
Ok::<_, Error>(acc)
|
Ok::<_, Error>(acc)
|
||||||
@ -192,7 +191,7 @@ impl VsockClient {
|
|||||||
|
|
||||||
async fn api_response(response: Response<Body>) -> Result<Value, Error> {
|
async fn api_response(response: Response<Body>) -> Result<Value, Error> {
|
||||||
let status = response.status();
|
let status = response.status();
|
||||||
let data = hyper::body::to_bytes(response.into_body()).await?;
|
let data = HttpBody::collect(response.into_body()).await?.to_bytes();
|
||||||
|
|
||||||
let text = String::from_utf8(data.to_vec()).unwrap();
|
let text = String::from_utf8(data.to_vec()).unwrap();
|
||||||
if status.is_success() {
|
if status.is_success() {
|
||||||
|
@ -24,6 +24,7 @@ proxmox-section-config.workspace = true
|
|||||||
proxmox-shared-memory.workspace = true
|
proxmox-shared-memory.workspace = true
|
||||||
proxmox-sys = { workspace = true, features = [ "acl", "crypt", "timer" ] }
|
proxmox-sys = { workspace = true, features = [ "acl", "crypt", "timer" ] }
|
||||||
proxmox-time.workspace = true
|
proxmox-time.workspace = true
|
||||||
|
proxmox-uuid.workspace = true
|
||||||
|
|
||||||
pbs-api-types.workspace = true
|
pbs-api-types.workspace = true
|
||||||
pbs-buildcfg.workspace = true
|
pbs-buildcfg.workspace = true
|
||||||
|
@ -101,7 +101,7 @@ impl ConfigVersionCache {
|
|||||||
let file_path = Path::new(FILE_PATH);
|
let file_path = Path::new(FILE_PATH);
|
||||||
let dir_path = file_path.parent().unwrap();
|
let dir_path = file_path.parent().unwrap();
|
||||||
|
|
||||||
create_path(dir_path, Some(dir_opts.clone()), Some(dir_opts))?;
|
create_path(dir_path, Some(dir_opts), Some(dir_opts))?;
|
||||||
|
|
||||||
let file_opts = CreateOptions::new()
|
let file_opts = CreateOptions::new()
|
||||||
.perm(Mode::from_bits_truncate(0o660))
|
.perm(Mode::from_bits_truncate(0o660))
|
||||||
|
@ -8,17 +8,34 @@ use proxmox_schema::{ApiType, ObjectSchema};
|
|||||||
use proxmox_section_config::{SectionConfig, SectionConfigData, SectionConfigPlugin};
|
use proxmox_section_config::{SectionConfig, SectionConfigData, SectionConfigPlugin};
|
||||||
|
|
||||||
use crate::{open_backup_lockfile, replace_backup_config, BackupLockGuard};
|
use crate::{open_backup_lockfile, replace_backup_config, BackupLockGuard};
|
||||||
use pbs_api_types::{AdRealmConfig, LdapRealmConfig, OpenIdRealmConfig, REALM_ID_SCHEMA};
|
use pbs_api_types::{
|
||||||
|
AdRealmConfig, LdapRealmConfig, OpenIdRealmConfig, PamRealmConfig, PbsRealmConfig,
|
||||||
|
REALM_ID_SCHEMA,
|
||||||
|
};
|
||||||
|
|
||||||
pub static CONFIG: LazyLock<SectionConfig> = LazyLock::new(init);
|
pub static CONFIG: LazyLock<SectionConfig> = LazyLock::new(init);
|
||||||
|
|
||||||
fn init() -> SectionConfig {
|
fn init() -> SectionConfig {
|
||||||
|
const PAM_SCHEMA: &ObjectSchema = PamRealmConfig::API_SCHEMA.unwrap_object_schema();
|
||||||
|
const PBS_SCHEMA: &ObjectSchema = PbsRealmConfig::API_SCHEMA.unwrap_object_schema();
|
||||||
const AD_SCHEMA: &ObjectSchema = AdRealmConfig::API_SCHEMA.unwrap_object_schema();
|
const AD_SCHEMA: &ObjectSchema = AdRealmConfig::API_SCHEMA.unwrap_object_schema();
|
||||||
const LDAP_SCHEMA: &ObjectSchema = LdapRealmConfig::API_SCHEMA.unwrap_object_schema();
|
const LDAP_SCHEMA: &ObjectSchema = LdapRealmConfig::API_SCHEMA.unwrap_object_schema();
|
||||||
const OPENID_SCHEMA: &ObjectSchema = OpenIdRealmConfig::API_SCHEMA.unwrap_object_schema();
|
const OPENID_SCHEMA: &ObjectSchema = OpenIdRealmConfig::API_SCHEMA.unwrap_object_schema();
|
||||||
|
|
||||||
let mut config = SectionConfig::new(&REALM_ID_SCHEMA);
|
let mut config = SectionConfig::new(&REALM_ID_SCHEMA);
|
||||||
|
|
||||||
|
config.register_plugin(SectionConfigPlugin::new(
|
||||||
|
"pam".to_owned(),
|
||||||
|
Some("realm".to_owned()),
|
||||||
|
PAM_SCHEMA,
|
||||||
|
));
|
||||||
|
|
||||||
|
config.register_plugin(SectionConfigPlugin::new(
|
||||||
|
"pbs".to_owned(),
|
||||||
|
Some("realm".to_owned()),
|
||||||
|
PBS_SCHEMA,
|
||||||
|
));
|
||||||
|
|
||||||
let plugin = SectionConfigPlugin::new(
|
let plugin = SectionConfigPlugin::new(
|
||||||
"openid".to_string(),
|
"openid".to_string(),
|
||||||
Some(String::from("realm")),
|
Some(String::from("realm")),
|
||||||
@ -61,9 +78,24 @@ pub fn save_config(config: &SectionConfigData) -> Result<(), Error> {
|
|||||||
replace_backup_config(DOMAINS_CFG_FILENAME, raw.as_bytes())
|
replace_backup_config(DOMAINS_CFG_FILENAME, raw.as_bytes())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Unsets the default login realm for users by deleting the `default` property
|
||||||
|
/// from the respective realm.
|
||||||
|
///
|
||||||
|
/// This only updates the configuration as given in `config`, making it
|
||||||
|
/// permanent is left to the caller.
|
||||||
|
pub fn unset_default_realm(config: &mut SectionConfigData) -> Result<(), Error> {
|
||||||
|
for (_, data) in &mut config.sections.values_mut() {
|
||||||
|
if let Some(obj) = data.as_object_mut() {
|
||||||
|
obj.remove("default");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
/// Check if a realm with the given name exists
|
/// Check if a realm with the given name exists
|
||||||
pub fn exists(domains: &SectionConfigData, realm: &str) -> bool {
|
pub fn exists(domains: &SectionConfigData, realm: &str) -> bool {
|
||||||
realm == "pbs" || realm == "pam" || domains.sections.contains_key(realm)
|
domains.sections.contains_key(realm)
|
||||||
}
|
}
|
||||||
|
|
||||||
// shell completion helper
|
// shell completion helper
|
||||||
|
@ -6,10 +6,10 @@
|
|||||||
//!
|
//!
|
||||||
//! Drive type [`VirtualTapeDrive`] is only useful for debugging.
|
//! Drive type [`VirtualTapeDrive`] is only useful for debugging.
|
||||||
//!
|
//!
|
||||||
//! [LtoTapeDrive]: crate::api2::types::LtoTapeDrive
|
//! [LtoTapeDrive]: pbs_api_types::LtoTapeDrive
|
||||||
//! [VirtualTapeDrive]: crate::api2::types::VirtualTapeDrive
|
//! [VirtualTapeDrive]: pbs_api_types::VirtualTapeDrive
|
||||||
//! [ScsiTapeChanger]: crate::api2::types::ScsiTapeChanger
|
//! [ScsiTapeChanger]: pbs_api_types::ScsiTapeChanger
|
||||||
//! [SectionConfig]: proxmox::api::section_config::SectionConfig
|
//! [SectionConfig]: proxmox_section_config::SectionConfig
|
||||||
|
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::sync::LazyLock;
|
use std::sync::LazyLock;
|
||||||
|
@ -22,6 +22,8 @@ pub use config_version_cache::ConfigVersionCache;
|
|||||||
|
|
||||||
use anyhow::{format_err, Error};
|
use anyhow::{format_err, Error};
|
||||||
use nix::unistd::{Gid, Group, Uid, User};
|
use nix::unistd::{Gid, Group, Uid, User};
|
||||||
|
use proxmox_sys::fs::DirLockGuard;
|
||||||
|
use std::os::unix::prelude::AsRawFd;
|
||||||
|
|
||||||
pub use pbs_buildcfg::{BACKUP_GROUP_NAME, BACKUP_USER_NAME};
|
pub use pbs_buildcfg::{BACKUP_GROUP_NAME, BACKUP_USER_NAME};
|
||||||
|
|
||||||
@ -46,13 +48,34 @@ pub fn backup_group() -> Result<nix::unistd::Group, Error> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub struct BackupLockGuard {
|
pub struct BackupLockGuard {
|
||||||
_file: Option<std::fs::File>,
|
file: Option<std::fs::File>,
|
||||||
|
// TODO: Remove `_legacy_dir` with PBS 5
|
||||||
|
_legacy_dir: Option<DirLockGuard>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl AsRawFd for BackupLockGuard {
|
||||||
|
fn as_raw_fd(&self) -> i32 {
|
||||||
|
self.file.as_ref().map_or(-1, |f| f.as_raw_fd())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: Remove with PBS 5
|
||||||
|
impl From<DirLockGuard> for BackupLockGuard {
|
||||||
|
fn from(value: DirLockGuard) -> Self {
|
||||||
|
Self {
|
||||||
|
file: None,
|
||||||
|
_legacy_dir: Some(value),
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[doc(hidden)]
|
#[doc(hidden)]
|
||||||
/// Note: do not use for production code, this is only intended for tests
|
/// Note: do not use for production code, this is only intended for tests
|
||||||
pub unsafe fn create_mocked_lock() -> BackupLockGuard {
|
pub unsafe fn create_mocked_lock() -> BackupLockGuard {
|
||||||
BackupLockGuard { _file: None }
|
BackupLockGuard {
|
||||||
|
file: None,
|
||||||
|
_legacy_dir: None,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Open or create a lock file owned by user "backup" and lock it.
|
/// Open or create a lock file owned by user "backup" and lock it.
|
||||||
@ -76,7 +99,10 @@ pub fn open_backup_lockfile<P: AsRef<std::path::Path>>(
|
|||||||
let timeout = timeout.unwrap_or(std::time::Duration::new(10, 0));
|
let timeout = timeout.unwrap_or(std::time::Duration::new(10, 0));
|
||||||
|
|
||||||
let file = proxmox_sys::fs::open_file_locked(&path, timeout, exclusive, options)?;
|
let file = proxmox_sys::fs::open_file_locked(&path, timeout, exclusive, options)?;
|
||||||
Ok(BackupLockGuard { _file: Some(file) })
|
Ok(BackupLockGuard {
|
||||||
|
file: Some(file),
|
||||||
|
_legacy_dir: None,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Atomically write data to file owned by "root:backup" with permission "0640"
|
/// Atomically write data to file owned by "root:backup" with permission "0640"
|
||||||
|
@ -3,7 +3,7 @@
|
|||||||
//! This configuration module is based on [`SectionConfig`], and
|
//! This configuration module is based on [`SectionConfig`], and
|
||||||
//! provides a type safe interface to store [`MediaPoolConfig`],
|
//! provides a type safe interface to store [`MediaPoolConfig`],
|
||||||
//!
|
//!
|
||||||
//! [MediaPoolConfig]: crate::api2::types::MediaPoolConfig
|
//! [MediaPoolConfig]: pbs_api_types::MediaPoolConfig
|
||||||
//! [SectionConfig]: proxmox_section_config::SectionConfig
|
//! [SectionConfig]: proxmox_section_config::SectionConfig
|
||||||
|
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
|
@ -61,8 +61,16 @@ pub fn verify_secret(tokenid: &Authid, secret: &str) -> Result<(), Error> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Generates a new secret for the given tokenid / API token, sets it then returns it.
|
||||||
|
/// The secret is stored as salted hash.
|
||||||
|
pub fn generate_and_set_secret(tokenid: &Authid) -> Result<String, Error> {
|
||||||
|
let secret = format!("{:x}", proxmox_uuid::Uuid::generate());
|
||||||
|
set_secret(tokenid, &secret)?;
|
||||||
|
Ok(secret)
|
||||||
|
}
|
||||||
|
|
||||||
/// Adds a new entry for the given tokenid / API token secret. The secret is stored as salted hash.
|
/// Adds a new entry for the given tokenid / API token secret. The secret is stored as salted hash.
|
||||||
pub fn set_secret(tokenid: &Authid, secret: &str) -> Result<(), Error> {
|
fn set_secret(tokenid: &Authid, secret: &str) -> Result<(), Error> {
|
||||||
if !tokenid.is_token() {
|
if !tokenid.is_token() {
|
||||||
bail!("not an API token ID");
|
bail!("not an API token ID");
|
||||||
}
|
}
|
||||||
|
@ -35,6 +35,7 @@ proxmox-lang.workspace=true
|
|||||||
proxmox-schema = { workspace = true, features = [ "api-macro" ] }
|
proxmox-schema = { workspace = true, features = [ "api-macro" ] }
|
||||||
proxmox-serde = { workspace = true, features = [ "serde_json" ] }
|
proxmox-serde = { workspace = true, features = [ "serde_json" ] }
|
||||||
proxmox-sys.workspace = true
|
proxmox-sys.workspace = true
|
||||||
|
proxmox-systemd.workspace = true
|
||||||
proxmox-time.workspace = true
|
proxmox-time.workspace = true
|
||||||
proxmox-uuid.workspace = true
|
proxmox-uuid.workspace = true
|
||||||
proxmox-worker-task.workspace = true
|
proxmox-worker-task.workspace = true
|
||||||
|
@ -1,11 +1,15 @@
|
|||||||
use std::fmt;
|
use std::fmt;
|
||||||
use std::os::unix::io::RawFd;
|
use std::os::unix::io::{AsRawFd, RawFd};
|
||||||
|
use std::os::unix::prelude::OsStrExt;
|
||||||
|
use std::path::Path;
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
use std::sync::Arc;
|
use std::sync::{Arc, LazyLock};
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
use anyhow::{bail, format_err, Error};
|
use anyhow::{bail, format_err, Context, Error};
|
||||||
|
|
||||||
use proxmox_sys::fs::{lock_dir_noblock, replace_file, CreateOptions};
|
use proxmox_sys::fs::{lock_dir_noblock, lock_dir_noblock_shared, replace_file, CreateOptions};
|
||||||
|
use proxmox_systemd::escape_unit;
|
||||||
|
|
||||||
use pbs_api_types::{
|
use pbs_api_types::{
|
||||||
Authid, BackupGroupDeleteStats, BackupNamespace, BackupType, GroupFilter, VerifyState,
|
Authid, BackupGroupDeleteStats, BackupNamespace, BackupType, GroupFilter, VerifyState,
|
||||||
@ -16,6 +20,18 @@ use pbs_config::{open_backup_lockfile, BackupLockGuard};
|
|||||||
use crate::manifest::{BackupManifest, MANIFEST_LOCK_NAME};
|
use crate::manifest::{BackupManifest, MANIFEST_LOCK_NAME};
|
||||||
use crate::{DataBlob, DataStore};
|
use crate::{DataBlob, DataStore};
|
||||||
|
|
||||||
|
pub const DATASTORE_LOCKS_DIR: &str = "/run/proxmox-backup/locks";
|
||||||
|
|
||||||
|
// TODO: Remove with PBS 5
|
||||||
|
// Note: The `expect()` call here will only happen if we can neither confirm nor deny the existence
|
||||||
|
// of the file. this should only happen if a user messes with the `/run/proxmox-backup` directory.
|
||||||
|
// if that happens, a lot more should fail as we rely on the existence of the directory throughout
|
||||||
|
// the code. so just panic with a reasonable message.
|
||||||
|
pub(crate) static OLD_LOCKING: LazyLock<bool> = LazyLock::new(|| {
|
||||||
|
std::fs::exists("/run/proxmox-backup/old-locking")
|
||||||
|
.expect("cannot read `/run/proxmox-backup`, please check permissions")
|
||||||
|
});
|
||||||
|
|
||||||
/// BackupGroup is a directory containing a list of BackupDir
|
/// BackupGroup is a directory containing a list of BackupDir
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct BackupGroup {
|
pub struct BackupGroup {
|
||||||
@ -199,9 +215,10 @@ impl BackupGroup {
|
|||||||
/// Returns `BackupGroupDeleteStats`, containing the number of deleted snapshots
|
/// Returns `BackupGroupDeleteStats`, containing the number of deleted snapshots
|
||||||
/// and number of protected snaphsots, which therefore were not removed.
|
/// and number of protected snaphsots, which therefore were not removed.
|
||||||
pub fn destroy(&self) -> Result<BackupGroupDeleteStats, Error> {
|
pub fn destroy(&self) -> Result<BackupGroupDeleteStats, Error> {
|
||||||
|
let _guard = self
|
||||||
|
.lock()
|
||||||
|
.with_context(|| format!("while destroying group '{self:?}'"))?;
|
||||||
let path = self.full_group_path();
|
let path = self.full_group_path();
|
||||||
let _guard =
|
|
||||||
proxmox_sys::fs::lock_dir_noblock(&path, "backup group", "possible running backup")?;
|
|
||||||
|
|
||||||
log::info!("removing backup group {:?}", path);
|
log::info!("removing backup group {:?}", path);
|
||||||
let mut delete_stats = BackupGroupDeleteStats::default();
|
let mut delete_stats = BackupGroupDeleteStats::default();
|
||||||
@ -215,16 +232,34 @@ impl BackupGroup {
|
|||||||
delete_stats.increment_removed_snapshots();
|
delete_stats.increment_removed_snapshots();
|
||||||
}
|
}
|
||||||
|
|
||||||
if delete_stats.all_removed() {
|
// Note: make sure the old locking mechanism isn't used as `remove_dir_all` is not safe in
|
||||||
std::fs::remove_dir_all(&path).map_err(|err| {
|
// that case
|
||||||
format_err!("removing group directory {:?} failed - {}", path, err)
|
if delete_stats.all_removed() && !*OLD_LOCKING {
|
||||||
})?;
|
self.remove_group_dir()?;
|
||||||
delete_stats.increment_removed_groups();
|
delete_stats.increment_removed_groups();
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(delete_stats)
|
Ok(delete_stats)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Helper function, assumes that no more snapshots are present in the group.
|
||||||
|
fn remove_group_dir(&self) -> Result<(), Error> {
|
||||||
|
let owner_path = self.store.owner_path(&self.ns, &self.group);
|
||||||
|
|
||||||
|
std::fs::remove_file(&owner_path).map_err(|err| {
|
||||||
|
format_err!("removing the owner file '{owner_path:?}' failed - {err}")
|
||||||
|
})?;
|
||||||
|
|
||||||
|
let path = self.full_group_path();
|
||||||
|
|
||||||
|
std::fs::remove_dir(&path)
|
||||||
|
.map_err(|err| format_err!("removing group directory {path:?} failed - {err}"))?;
|
||||||
|
|
||||||
|
let _ = std::fs::remove_file(self.lock_path());
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
/// Returns the backup owner.
|
/// Returns the backup owner.
|
||||||
///
|
///
|
||||||
/// The backup owner is the entity who first created the backup group.
|
/// The backup owner is the entity who first created the backup group.
|
||||||
@ -237,6 +272,36 @@ impl BackupGroup {
|
|||||||
self.store
|
self.store
|
||||||
.set_owner(&self.ns, self.as_ref(), auth_id, force)
|
.set_owner(&self.ns, self.as_ref(), auth_id, force)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Returns a file name for locking a group.
|
||||||
|
///
|
||||||
|
/// The lock file will be located in:
|
||||||
|
/// `${DATASTORE_LOCKS_DIR}/${datastore name}/${lock_file_path_helper(rpath)}`
|
||||||
|
/// where `rpath` is the relative path of the group.
|
||||||
|
fn lock_path(&self) -> PathBuf {
|
||||||
|
let path = Path::new(DATASTORE_LOCKS_DIR).join(self.store.name());
|
||||||
|
|
||||||
|
let rpath = Path::new(self.group.ty.as_str()).join(&self.group.id);
|
||||||
|
|
||||||
|
path.join(lock_file_path_helper(&self.ns, rpath))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Locks a group exclusively.
|
||||||
|
pub fn lock(&self) -> Result<BackupLockGuard, Error> {
|
||||||
|
if *OLD_LOCKING {
|
||||||
|
lock_dir_noblock(
|
||||||
|
&self.full_group_path(),
|
||||||
|
"backup group",
|
||||||
|
"possible runing backup, group is in use",
|
||||||
|
)
|
||||||
|
.map(BackupLockGuard::from)
|
||||||
|
} else {
|
||||||
|
lock_helper(self.store.name(), &self.lock_path(), |p| {
|
||||||
|
open_backup_lockfile(p, Some(Duration::from_secs(0)), true)
|
||||||
|
.with_context(|| format!("unable to acquire backup group lock {p:?}"))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl AsRef<pbs_api_types::BackupNamespace> for BackupGroup {
|
impl AsRef<pbs_api_types::BackupNamespace> for BackupGroup {
|
||||||
@ -421,36 +486,101 @@ impl BackupDir {
|
|||||||
/// Returns the filename to lock a manifest
|
/// Returns the filename to lock a manifest
|
||||||
///
|
///
|
||||||
/// Also creates the basedir. The lockfile is located in
|
/// Also creates the basedir. The lockfile is located in
|
||||||
/// '/run/proxmox-backup/locks/{datastore}/[ns/{ns}/]+{type}/{id}/{timestamp}.index.json.lck'
|
/// `${DATASTORE_LOCKS_DIR}/${datastore name}/${lock_file_path_helper(rpath)}.index.json.lck`
|
||||||
fn manifest_lock_path(&self) -> Result<PathBuf, Error> {
|
/// where rpath is the relative path of the snapshot.
|
||||||
let mut path = PathBuf::from(&format!("/run/proxmox-backup/locks/{}", self.store.name()));
|
fn manifest_lock_path(&self) -> PathBuf {
|
||||||
path.push(self.relative_path());
|
let path = Path::new(DATASTORE_LOCKS_DIR).join(self.store.name());
|
||||||
|
|
||||||
std::fs::create_dir_all(&path)?;
|
let rpath = Path::new(self.dir.group.ty.as_str())
|
||||||
let ts = self.backup_time_string();
|
.join(&self.dir.group.id)
|
||||||
path.push(format!("{ts}{MANIFEST_LOCK_NAME}"));
|
.join(&self.backup_time_string)
|
||||||
|
.join(MANIFEST_LOCK_NAME);
|
||||||
|
|
||||||
Ok(path)
|
path.join(lock_file_path_helper(&self.ns, rpath))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Locks the manifest of a snapshot, for example, to update or delete it.
|
/// Locks the manifest of a snapshot, for example, to update or delete it.
|
||||||
pub(crate) fn lock_manifest(&self) -> Result<BackupLockGuard, Error> {
|
pub(crate) fn lock_manifest(&self) -> Result<BackupLockGuard, Error> {
|
||||||
let path = self.manifest_lock_path()?;
|
let path = if *OLD_LOCKING {
|
||||||
|
// old manifest lock path
|
||||||
|
let path = Path::new(DATASTORE_LOCKS_DIR)
|
||||||
|
.join(self.store.name())
|
||||||
|
.join(self.relative_path());
|
||||||
|
|
||||||
// actions locking the manifest should be relatively short, only wait a few seconds
|
std::fs::create_dir_all(&path)?;
|
||||||
open_backup_lockfile(&path, Some(std::time::Duration::from_secs(5)), true)
|
|
||||||
.map_err(|err| format_err!("unable to acquire manifest lock {:?} - {}", &path, err))
|
path.join(format!("{}{MANIFEST_LOCK_NAME}", self.backup_time_string()))
|
||||||
|
} else {
|
||||||
|
self.manifest_lock_path()
|
||||||
|
};
|
||||||
|
|
||||||
|
lock_helper(self.store.name(), &path, |p| {
|
||||||
|
// update_manifest should never take a long time, so if
|
||||||
|
// someone else has the lock we can simply block a bit
|
||||||
|
// and should get it soon
|
||||||
|
open_backup_lockfile(p, Some(Duration::from_secs(5)), true)
|
||||||
|
.with_context(|| format_err!("unable to acquire manifest lock {p:?}"))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns a file name for locking a snapshot.
|
||||||
|
///
|
||||||
|
/// The lock file will be located in:
|
||||||
|
/// `${DATASTORE_LOCKS_DIR}/${datastore name}/${lock_file_path_helper(rpath)}`
|
||||||
|
/// where `rpath` is the relative path of the snapshot.
|
||||||
|
fn lock_path(&self) -> PathBuf {
|
||||||
|
let path = Path::new(DATASTORE_LOCKS_DIR).join(self.store.name());
|
||||||
|
|
||||||
|
let rpath = Path::new(self.dir.group.ty.as_str())
|
||||||
|
.join(&self.dir.group.id)
|
||||||
|
.join(&self.backup_time_string);
|
||||||
|
|
||||||
|
path.join(lock_file_path_helper(&self.ns, rpath))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Locks a snapshot exclusively.
|
||||||
|
pub fn lock(&self) -> Result<BackupLockGuard, Error> {
|
||||||
|
if *OLD_LOCKING {
|
||||||
|
lock_dir_noblock(
|
||||||
|
&self.full_path(),
|
||||||
|
"snapshot",
|
||||||
|
"backup is running or snapshot is in use",
|
||||||
|
)
|
||||||
|
.map(BackupLockGuard::from)
|
||||||
|
} else {
|
||||||
|
lock_helper(self.store.name(), &self.lock_path(), |p| {
|
||||||
|
open_backup_lockfile(p, Some(Duration::from_secs(0)), true)
|
||||||
|
.with_context(|| format!("unable to acquire snapshot lock {p:?}"))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Acquires a shared lock on a snapshot.
|
||||||
|
pub fn lock_shared(&self) -> Result<BackupLockGuard, Error> {
|
||||||
|
if *OLD_LOCKING {
|
||||||
|
lock_dir_noblock_shared(
|
||||||
|
&self.full_path(),
|
||||||
|
"snapshot",
|
||||||
|
"backup is running or snapshot is in use, could not acquire shared lock",
|
||||||
|
)
|
||||||
|
.map(BackupLockGuard::from)
|
||||||
|
} else {
|
||||||
|
lock_helper(self.store.name(), &self.lock_path(), |p| {
|
||||||
|
open_backup_lockfile(p, Some(Duration::from_secs(0)), false)
|
||||||
|
.with_context(|| format!("unable to acquire shared snapshot lock {p:?}"))
|
||||||
|
})
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Destroy the whole snapshot, bails if it's protected
|
/// Destroy the whole snapshot, bails if it's protected
|
||||||
///
|
///
|
||||||
/// Setting `force` to true skips locking and thus ignores if the backup is currently in use.
|
/// Setting `force` to true skips locking and thus ignores if the backup is currently in use.
|
||||||
pub fn destroy(&self, force: bool) -> Result<(), Error> {
|
pub fn destroy(&self, force: bool) -> Result<(), Error> {
|
||||||
let full_path = self.full_path();
|
|
||||||
|
|
||||||
let (_guard, _manifest_guard);
|
let (_guard, _manifest_guard);
|
||||||
if !force {
|
if !force {
|
||||||
_guard = lock_dir_noblock(&full_path, "snapshot", "possibly running or in use")?;
|
_guard = self
|
||||||
|
.lock()
|
||||||
|
.with_context(|| format!("while destroying snapshot '{self:?}'"))?;
|
||||||
_manifest_guard = self.lock_manifest()?;
|
_manifest_guard = self.lock_manifest()?;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -458,14 +588,37 @@ impl BackupDir {
|
|||||||
bail!("cannot remove protected snapshot"); // use special error type?
|
bail!("cannot remove protected snapshot"); // use special error type?
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let full_path = self.full_path();
|
||||||
log::info!("removing backup snapshot {:?}", full_path);
|
log::info!("removing backup snapshot {:?}", full_path);
|
||||||
std::fs::remove_dir_all(&full_path).map_err(|err| {
|
std::fs::remove_dir_all(&full_path).map_err(|err| {
|
||||||
format_err!("removing backup snapshot {:?} failed - {}", full_path, err,)
|
format_err!("removing backup snapshot {:?} failed - {}", full_path, err,)
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
// the manifest doesn't exist anymore, no need to keep the lock (already done by guard?)
|
// remove no longer needed lock files
|
||||||
if let Ok(path) = self.manifest_lock_path() {
|
let _ = std::fs::remove_file(self.manifest_lock_path()); // ignore errors
|
||||||
let _ = std::fs::remove_file(path); // ignore errors
|
let _ = std::fs::remove_file(self.lock_path()); // ignore errors
|
||||||
|
|
||||||
|
let group = BackupGroup::from(self);
|
||||||
|
let guard = group.lock().with_context(|| {
|
||||||
|
format!("while checking if group '{group:?}' is empty during snapshot destruction")
|
||||||
|
});
|
||||||
|
|
||||||
|
// Only remove the group if all of the following is true:
|
||||||
|
//
|
||||||
|
// - we can lock it: if we can't lock the group, it is still in use (either by another
|
||||||
|
// backup process or a parent caller (who needs to take care that empty groups are
|
||||||
|
// removed themselves).
|
||||||
|
// - it is now empty: if the group isn't empty, removing it will fail (to avoid removing
|
||||||
|
// backups that might still be used).
|
||||||
|
// - the new locking mechanism is used: if the old mechanism is used, a group removal here
|
||||||
|
// could lead to a race condition.
|
||||||
|
//
|
||||||
|
// Do not error out, as we have already removed the snapshot, there is nothing a user could
|
||||||
|
// do to rectify the situation.
|
||||||
|
if guard.is_ok() && group.list_backups()?.is_empty() && !*OLD_LOCKING {
|
||||||
|
group.remove_group_dir()?;
|
||||||
|
} else if let Err(err) = guard {
|
||||||
|
log::debug!("{err:#}");
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
@ -661,3 +814,75 @@ fn list_backup_files<P: ?Sized + nix::NixPath>(
|
|||||||
|
|
||||||
Ok(files)
|
Ok(files)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Creates a path to a lock file depending on the relative path of an object (snapshot, group,
|
||||||
|
/// manifest) in a datastore. First all namespaces will be concatenated with a colon (ns-folder).
|
||||||
|
/// Then the actual file name will depend on the length of the relative path without namespaces. If
|
||||||
|
/// it is shorter than 255 characters in its unit encoded form, than the unit encoded form will be
|
||||||
|
/// used directly. If not, the file name will consist of the first 80 character, the last 80
|
||||||
|
/// characters and the hash of the unit encoded relative path without namespaces. It will also be
|
||||||
|
/// placed into a "hashed" subfolder in the namespace folder.
|
||||||
|
///
|
||||||
|
/// Examples:
|
||||||
|
///
|
||||||
|
/// - vm-100
|
||||||
|
/// - vm-100-2022\x2d05\x2d02T08\x3a11\x3a33Z
|
||||||
|
/// - ns1:ns2:ns3:ns4:ns5:ns6:ns7/vm-100-2022\x2d05\x2d02T08\x3a11\x3a33Z
|
||||||
|
///
|
||||||
|
/// A "hashed" lock file would look like this:
|
||||||
|
/// - ns1:ns2:ns3/hashed/$first_eighty...$last_eighty-$hash
|
||||||
|
fn lock_file_path_helper(ns: &BackupNamespace, path: PathBuf) -> PathBuf {
|
||||||
|
let to_return = PathBuf::from(
|
||||||
|
ns.components()
|
||||||
|
.map(String::from)
|
||||||
|
.reduce(|acc, n| format!("{acc}:{n}"))
|
||||||
|
.unwrap_or_default(),
|
||||||
|
);
|
||||||
|
|
||||||
|
let path_bytes = path.as_os_str().as_bytes();
|
||||||
|
|
||||||
|
let enc = escape_unit(path_bytes, true);
|
||||||
|
|
||||||
|
if enc.len() < 255 {
|
||||||
|
return to_return.join(enc);
|
||||||
|
}
|
||||||
|
|
||||||
|
let to_return = to_return.join("hashed");
|
||||||
|
|
||||||
|
let first_eigthy = &enc[..80];
|
||||||
|
let last_eighty = &enc[enc.len() - 80..];
|
||||||
|
let hash = hex::encode(openssl::sha::sha256(path_bytes));
|
||||||
|
|
||||||
|
to_return.join(format!("{first_eigthy}...{last_eighty}-{hash}"))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Helps implement the double stat'ing procedure. It avoids certain race conditions upon lock
|
||||||
|
/// deletion.
|
||||||
|
///
|
||||||
|
/// It also creates the base directory for lock files.
|
||||||
|
fn lock_helper<F>(
|
||||||
|
store_name: &str,
|
||||||
|
path: &std::path::Path,
|
||||||
|
lock_fn: F,
|
||||||
|
) -> Result<BackupLockGuard, Error>
|
||||||
|
where
|
||||||
|
F: Fn(&std::path::Path) -> Result<BackupLockGuard, Error>,
|
||||||
|
{
|
||||||
|
let mut lock_dir = Path::new(DATASTORE_LOCKS_DIR).join(store_name);
|
||||||
|
|
||||||
|
if let Some(parent) = path.parent() {
|
||||||
|
lock_dir = lock_dir.join(parent);
|
||||||
|
};
|
||||||
|
|
||||||
|
std::fs::create_dir_all(&lock_dir)?;
|
||||||
|
|
||||||
|
let lock = lock_fn(path)?;
|
||||||
|
|
||||||
|
let inode = nix::sys::stat::fstat(lock.as_raw_fd())?.st_ino;
|
||||||
|
|
||||||
|
if nix::sys::stat::stat(path).map_or(true, |st| inode != st.st_ino) {
|
||||||
|
bail!("could not acquire lock, another thread modified the lock file");
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(lock)
|
||||||
|
}
|
||||||
|
@ -1,9 +1,11 @@
|
|||||||
|
use std::os::unix::fs::MetadataExt;
|
||||||
use std::os::unix::io::AsRawFd;
|
use std::os::unix::io::AsRawFd;
|
||||||
use std::path::{Path, PathBuf};
|
use std::path::{Path, PathBuf};
|
||||||
use std::sync::{Arc, Mutex};
|
use std::sync::{Arc, Mutex};
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
use anyhow::{bail, format_err, Error};
|
use anyhow::{bail, format_err, Context, Error};
|
||||||
use tracing::info;
|
use tracing::{info, warn};
|
||||||
|
|
||||||
use pbs_api_types::{DatastoreFSyncLevel, GarbageCollectionStatus};
|
use pbs_api_types::{DatastoreFSyncLevel, GarbageCollectionStatus};
|
||||||
use proxmox_io::ReadExt;
|
use proxmox_io::ReadExt;
|
||||||
@ -13,6 +15,7 @@ use proxmox_sys::process_locker::{
|
|||||||
};
|
};
|
||||||
use proxmox_worker_task::WorkerTaskContext;
|
use proxmox_worker_task::WorkerTaskContext;
|
||||||
|
|
||||||
|
use crate::data_blob::DataChunkBuilder;
|
||||||
use crate::file_formats::{
|
use crate::file_formats::{
|
||||||
COMPRESSED_BLOB_MAGIC_1_0, ENCRYPTED_BLOB_MAGIC_1_0, UNCOMPRESSED_BLOB_MAGIC_1_0,
|
COMPRESSED_BLOB_MAGIC_1_0, ENCRYPTED_BLOB_MAGIC_1_0, UNCOMPRESSED_BLOB_MAGIC_1_0,
|
||||||
};
|
};
|
||||||
@ -109,7 +112,7 @@ impl ChunkStore {
|
|||||||
|
|
||||||
let default_options = CreateOptions::new();
|
let default_options = CreateOptions::new();
|
||||||
|
|
||||||
match create_path(&base, Some(default_options), Some(options.clone())) {
|
match create_path(&base, Some(default_options), Some(options)) {
|
||||||
Err(err) => bail!("unable to create chunk store '{name}' at {base:?} - {err}"),
|
Err(err) => bail!("unable to create chunk store '{name}' at {base:?} - {err}"),
|
||||||
Ok(res) => {
|
Ok(res) => {
|
||||||
if !res {
|
if !res {
|
||||||
@ -118,13 +121,13 @@ impl ChunkStore {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Err(err) = create_dir(&chunk_dir, options.clone()) {
|
if let Err(err) = create_dir(&chunk_dir, options) {
|
||||||
bail!("unable to create chunk store '{name}' subdir {chunk_dir:?} - {err}");
|
bail!("unable to create chunk store '{name}' subdir {chunk_dir:?} - {err}");
|
||||||
}
|
}
|
||||||
|
|
||||||
// create lock file with correct owner/group
|
// create lock file with correct owner/group
|
||||||
let lockfile_path = Self::lockfile_path(&base);
|
let lockfile_path = Self::lockfile_path(&base);
|
||||||
proxmox_sys::fs::replace_file(lockfile_path, b"", options.clone(), false)?;
|
proxmox_sys::fs::replace_file(lockfile_path, b"", options, false)?;
|
||||||
|
|
||||||
// create 64*1024 subdirs
|
// create 64*1024 subdirs
|
||||||
let mut last_percentage = 0;
|
let mut last_percentage = 0;
|
||||||
@ -132,7 +135,7 @@ impl ChunkStore {
|
|||||||
for i in 0..64 * 1024 {
|
for i in 0..64 * 1024 {
|
||||||
let mut l1path = chunk_dir.clone();
|
let mut l1path = chunk_dir.clone();
|
||||||
l1path.push(format!("{:04x}", i));
|
l1path.push(format!("{:04x}", i));
|
||||||
if let Err(err) = create_dir(&l1path, options.clone()) {
|
if let Err(err) = create_dir(&l1path, options) {
|
||||||
bail!(
|
bail!(
|
||||||
"unable to create chunk store '{}' subdir {:?} - {}",
|
"unable to create chunk store '{}' subdir {:?} - {}",
|
||||||
name,
|
name,
|
||||||
@ -177,7 +180,7 @@ impl ChunkStore {
|
|||||||
/// Note that this must be used with care, as it's dangerous to create two instances on the
|
/// Note that this must be used with care, as it's dangerous to create two instances on the
|
||||||
/// same base path, as closing the underlying ProcessLocker drops all locks from this process
|
/// same base path, as closing the underlying ProcessLocker drops all locks from this process
|
||||||
/// on the lockfile (even if separate FDs)
|
/// on the lockfile (even if separate FDs)
|
||||||
pub(crate) fn open<P: Into<PathBuf>>(
|
pub fn open<P: Into<PathBuf>>(
|
||||||
name: &str,
|
name: &str,
|
||||||
base: P,
|
base: P,
|
||||||
sync_level: DatastoreFSyncLevel,
|
sync_level: DatastoreFSyncLevel,
|
||||||
@ -353,7 +356,7 @@ impl ChunkStore {
|
|||||||
pub fn sweep_unused_chunks(
|
pub fn sweep_unused_chunks(
|
||||||
&self,
|
&self,
|
||||||
oldest_writer: i64,
|
oldest_writer: i64,
|
||||||
phase1_start_time: i64,
|
min_atime: i64,
|
||||||
status: &mut GarbageCollectionStatus,
|
status: &mut GarbageCollectionStatus,
|
||||||
worker: &dyn WorkerTaskContext,
|
worker: &dyn WorkerTaskContext,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
@ -363,14 +366,6 @@ impl ChunkStore {
|
|||||||
use nix::sys::stat::fstatat;
|
use nix::sys::stat::fstatat;
|
||||||
use nix::unistd::{unlinkat, UnlinkatFlags};
|
use nix::unistd::{unlinkat, UnlinkatFlags};
|
||||||
|
|
||||||
let mut min_atime = phase1_start_time - 3600 * 24; // at least 24h (see mount option relatime)
|
|
||||||
|
|
||||||
if oldest_writer < min_atime {
|
|
||||||
min_atime = oldest_writer;
|
|
||||||
}
|
|
||||||
|
|
||||||
min_atime -= 300; // add 5 mins gap for safety
|
|
||||||
|
|
||||||
let mut last_percentage = 0;
|
let mut last_percentage = 0;
|
||||||
let mut chunk_count = 0;
|
let mut chunk_count = 0;
|
||||||
|
|
||||||
@ -442,6 +437,69 @@ impl ChunkStore {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Check if atime updates are honored by the filesystem backing the chunk store.
|
||||||
|
///
|
||||||
|
/// Checks if the atime is always updated by utimensat taking into consideration the Linux
|
||||||
|
/// kernel timestamp granularity.
|
||||||
|
/// If `retry_on_file_changed` is set to true, the check is performed again on the changed file
|
||||||
|
/// if a file change while testing is detected by differences in bith time or inode number.
|
||||||
|
/// Uses a 4 MiB fixed size, compressed but unencrypted chunk to test. The chunk is inserted in
|
||||||
|
/// the chunk store if not yet present.
|
||||||
|
/// Returns with error if the check could not be performed.
|
||||||
|
pub fn check_fs_atime_updates(&self, retry_on_file_changed: bool) -> Result<(), Error> {
|
||||||
|
let (zero_chunk, digest) = DataChunkBuilder::build_zero_chunk(None, 4096 * 1024, true)?;
|
||||||
|
let (pre_existing, _) = self.insert_chunk(&zero_chunk, &digest)?;
|
||||||
|
let (path, _digest) = self.chunk_path(&digest);
|
||||||
|
|
||||||
|
// Take into account timestamp update granularity in the kernel
|
||||||
|
// Blocking the thread is fine here since this runs in a worker.
|
||||||
|
std::thread::sleep(Duration::from_secs(1));
|
||||||
|
|
||||||
|
let metadata_before = std::fs::metadata(&path).context(format!(
|
||||||
|
"failed to get metadata for {path:?} before atime update"
|
||||||
|
))?;
|
||||||
|
|
||||||
|
// Second atime update if chunk pre-existed, insert_chunk already updates pre-existing ones
|
||||||
|
self.cond_touch_path(&path, true)?;
|
||||||
|
|
||||||
|
let metadata_now = std::fs::metadata(&path).context(format!(
|
||||||
|
"failed to get metadata for {path:?} after atime update"
|
||||||
|
))?;
|
||||||
|
|
||||||
|
// Check for the unlikely case that the file changed in-between the
|
||||||
|
// two metadata calls, try to check once again on changed file
|
||||||
|
if metadata_before.ino() != metadata_now.ino() {
|
||||||
|
if retry_on_file_changed {
|
||||||
|
return self.check_fs_atime_updates(false);
|
||||||
|
}
|
||||||
|
bail!("chunk {path:?} changed twice during access time safety check, cannot proceed.");
|
||||||
|
}
|
||||||
|
|
||||||
|
if metadata_before.accessed()? >= metadata_now.accessed()? {
|
||||||
|
let chunk_info_str = if pre_existing {
|
||||||
|
"pre-existing"
|
||||||
|
} else {
|
||||||
|
"newly inserted"
|
||||||
|
};
|
||||||
|
warn!("Chunk metadata was not correctly updated during access time safety check:");
|
||||||
|
info!(
|
||||||
|
"Timestamps before update: accessed {:?}, modified {:?}, created {:?}",
|
||||||
|
metadata_before.accessed().ok(),
|
||||||
|
metadata_before.modified().ok(),
|
||||||
|
metadata_before.created().ok(),
|
||||||
|
);
|
||||||
|
info!(
|
||||||
|
"Timestamps after update: accessed {:?}, modified {:?}, created {:?}",
|
||||||
|
metadata_now.accessed().ok(),
|
||||||
|
metadata_now.modified().ok(),
|
||||||
|
metadata_now.created().ok(),
|
||||||
|
);
|
||||||
|
bail!("access time safety check using {chunk_info_str} chunk failed, aborting GC!");
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
pub fn insert_chunk(&self, chunk: &DataBlob, digest: &[u8; 32]) -> Result<(bool, u64), Error> {
|
pub fn insert_chunk(&self, chunk: &DataBlob, digest: &[u8; 32]) -> Result<(bool, u64), Error> {
|
||||||
// unwrap: only `None` in unit tests
|
// unwrap: only `None` in unit tests
|
||||||
assert!(self.locker.is_some());
|
assert!(self.locker.is_some());
|
||||||
@ -503,10 +561,16 @@ impl ChunkStore {
|
|||||||
.parent()
|
.parent()
|
||||||
.ok_or_else(|| format_err!("unable to get chunk dir"))?;
|
.ok_or_else(|| format_err!("unable to get chunk dir"))?;
|
||||||
|
|
||||||
|
let mut create_options = CreateOptions::new();
|
||||||
|
if nix::unistd::Uid::effective().is_root() {
|
||||||
|
let uid = pbs_config::backup_user()?.uid;
|
||||||
|
let gid = pbs_config::backup_group()?.gid;
|
||||||
|
create_options = create_options.owner(uid).group(gid);
|
||||||
|
}
|
||||||
proxmox_sys::fs::replace_file(
|
proxmox_sys::fs::replace_file(
|
||||||
&chunk_path,
|
&chunk_path,
|
||||||
raw_data,
|
raw_data,
|
||||||
CreateOptions::new(),
|
create_options,
|
||||||
self.sync_level == DatastoreFSyncLevel::File,
|
self.sync_level == DatastoreFSyncLevel::File,
|
||||||
)
|
)
|
||||||
.map_err(|err| {
|
.map_err(|err| {
|
||||||
|
@ -4,9 +4,11 @@ use std::os::unix::ffi::OsStrExt;
|
|||||||
use std::os::unix::io::AsRawFd;
|
use std::os::unix::io::AsRawFd;
|
||||||
use std::path::{Path, PathBuf};
|
use std::path::{Path, PathBuf};
|
||||||
use std::sync::{Arc, LazyLock, Mutex};
|
use std::sync::{Arc, LazyLock, Mutex};
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
use anyhow::{bail, format_err, Error};
|
use anyhow::{bail, format_err, Context, Error};
|
||||||
use nix::unistd::{unlinkat, UnlinkatFlags};
|
use nix::unistd::{unlinkat, UnlinkatFlags};
|
||||||
|
use pbs_tools::lru_cache::LruCache;
|
||||||
use tracing::{info, warn};
|
use tracing::{info, warn};
|
||||||
|
|
||||||
use proxmox_human_byte::HumanByte;
|
use proxmox_human_byte::HumanByte;
|
||||||
@ -14,9 +16,9 @@ use proxmox_schema::ApiType;
|
|||||||
|
|
||||||
use proxmox_sys::error::SysError;
|
use proxmox_sys::error::SysError;
|
||||||
use proxmox_sys::fs::{file_read_optional_string, replace_file, CreateOptions};
|
use proxmox_sys::fs::{file_read_optional_string, replace_file, CreateOptions};
|
||||||
use proxmox_sys::fs::{lock_dir_noblock, DirLockGuard};
|
|
||||||
use proxmox_sys::linux::procfs::MountInfo;
|
use proxmox_sys::linux::procfs::MountInfo;
|
||||||
use proxmox_sys::process_locker::ProcessLockSharedGuard;
|
use proxmox_sys::process_locker::ProcessLockSharedGuard;
|
||||||
|
use proxmox_time::TimeSpan;
|
||||||
use proxmox_worker_task::WorkerTaskContext;
|
use proxmox_worker_task::WorkerTaskContext;
|
||||||
|
|
||||||
use pbs_api_types::{
|
use pbs_api_types::{
|
||||||
@ -24,8 +26,9 @@ use pbs_api_types::{
|
|||||||
DataStoreConfig, DatastoreFSyncLevel, DatastoreTuning, GarbageCollectionStatus,
|
DataStoreConfig, DatastoreFSyncLevel, DatastoreTuning, GarbageCollectionStatus,
|
||||||
MaintenanceMode, MaintenanceType, Operation, UPID,
|
MaintenanceMode, MaintenanceType, Operation, UPID,
|
||||||
};
|
};
|
||||||
|
use pbs_config::BackupLockGuard;
|
||||||
|
|
||||||
use crate::backup_info::{BackupDir, BackupGroup};
|
use crate::backup_info::{BackupDir, BackupGroup, BackupInfo, OLD_LOCKING};
|
||||||
use crate::chunk_store::ChunkStore;
|
use crate::chunk_store::ChunkStore;
|
||||||
use crate::dynamic_index::{DynamicIndexReader, DynamicIndexWriter};
|
use crate::dynamic_index::{DynamicIndexReader, DynamicIndexWriter};
|
||||||
use crate::fixed_index::{FixedIndexReader, FixedIndexWriter};
|
use crate::fixed_index::{FixedIndexReader, FixedIndexWriter};
|
||||||
@ -706,7 +709,11 @@ impl DataStore {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Return the path of the 'owner' file.
|
/// Return the path of the 'owner' file.
|
||||||
fn owner_path(&self, ns: &BackupNamespace, group: &pbs_api_types::BackupGroup) -> PathBuf {
|
pub(super) fn owner_path(
|
||||||
|
&self,
|
||||||
|
ns: &BackupNamespace,
|
||||||
|
group: &pbs_api_types::BackupGroup,
|
||||||
|
) -> PathBuf {
|
||||||
self.group_path(ns, group).join("owner")
|
self.group_path(ns, group).join("owner")
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -774,41 +781,35 @@ impl DataStore {
|
|||||||
///
|
///
|
||||||
/// This also acquires an exclusive lock on the directory and returns the lock guard.
|
/// This also acquires an exclusive lock on the directory and returns the lock guard.
|
||||||
pub fn create_locked_backup_group(
|
pub fn create_locked_backup_group(
|
||||||
&self,
|
self: &Arc<Self>,
|
||||||
ns: &BackupNamespace,
|
ns: &BackupNamespace,
|
||||||
backup_group: &pbs_api_types::BackupGroup,
|
backup_group: &pbs_api_types::BackupGroup,
|
||||||
auth_id: &Authid,
|
auth_id: &Authid,
|
||||||
) -> Result<(Authid, DirLockGuard), Error> {
|
) -> Result<(Authid, BackupLockGuard), Error> {
|
||||||
// create intermediate path first:
|
let backup_group = self.backup_group(ns.clone(), backup_group.clone());
|
||||||
let mut full_path = self.base_path();
|
|
||||||
for ns in ns.components() {
|
|
||||||
full_path.push("ns");
|
|
||||||
full_path.push(ns);
|
|
||||||
}
|
|
||||||
full_path.push(backup_group.ty.as_str());
|
|
||||||
std::fs::create_dir_all(&full_path)?;
|
|
||||||
|
|
||||||
full_path.push(&backup_group.id);
|
// create intermediate path first
|
||||||
|
let full_path = backup_group.full_group_path();
|
||||||
|
|
||||||
// create the last component now
|
std::fs::create_dir_all(full_path.parent().ok_or_else(|| {
|
||||||
|
format_err!("could not construct parent path for group {backup_group:?}")
|
||||||
|
})?)?;
|
||||||
|
|
||||||
|
// now create the group, this allows us to check whether it existed before
|
||||||
match std::fs::create_dir(&full_path) {
|
match std::fs::create_dir(&full_path) {
|
||||||
Ok(_) => {
|
Ok(_) => {
|
||||||
let guard = lock_dir_noblock(
|
let guard = backup_group.lock().with_context(|| {
|
||||||
&full_path,
|
format!("while creating new locked backup group '{backup_group:?}'")
|
||||||
"backup group",
|
})?;
|
||||||
"another backup is already running",
|
self.set_owner(ns, backup_group.group(), auth_id, false)?;
|
||||||
)?;
|
let owner = self.get_owner(ns, backup_group.group())?; // just to be sure
|
||||||
self.set_owner(ns, backup_group, auth_id, false)?;
|
|
||||||
let owner = self.get_owner(ns, backup_group)?; // just to be sure
|
|
||||||
Ok((owner, guard))
|
Ok((owner, guard))
|
||||||
}
|
}
|
||||||
Err(ref err) if err.kind() == io::ErrorKind::AlreadyExists => {
|
Err(ref err) if err.kind() == io::ErrorKind::AlreadyExists => {
|
||||||
let guard = lock_dir_noblock(
|
let guard = backup_group.lock().with_context(|| {
|
||||||
&full_path,
|
format!("while creating locked backup group '{backup_group:?}'")
|
||||||
"backup group",
|
})?;
|
||||||
"another backup is already running",
|
let owner = self.get_owner(ns, backup_group.group())?; // just to be sure
|
||||||
)?;
|
|
||||||
let owner = self.get_owner(ns, backup_group)?; // just to be sure
|
|
||||||
Ok((owner, guard))
|
Ok((owner, guard))
|
||||||
}
|
}
|
||||||
Err(err) => bail!("unable to create backup group {:?} - {}", full_path, err),
|
Err(err) => bail!("unable to create backup group {:?} - {}", full_path, err),
|
||||||
@ -819,29 +820,25 @@ impl DataStore {
|
|||||||
///
|
///
|
||||||
/// The BackupGroup directory needs to exist.
|
/// The BackupGroup directory needs to exist.
|
||||||
pub fn create_locked_backup_dir(
|
pub fn create_locked_backup_dir(
|
||||||
&self,
|
self: &Arc<Self>,
|
||||||
ns: &BackupNamespace,
|
ns: &BackupNamespace,
|
||||||
backup_dir: &pbs_api_types::BackupDir,
|
backup_dir: &pbs_api_types::BackupDir,
|
||||||
) -> Result<(PathBuf, bool, DirLockGuard), Error> {
|
) -> Result<(PathBuf, bool, BackupLockGuard), Error> {
|
||||||
let full_path = self.snapshot_path(ns, backup_dir);
|
let backup_dir = self.backup_dir(ns.clone(), backup_dir.clone())?;
|
||||||
let relative_path = full_path.strip_prefix(self.base_path()).map_err(|err| {
|
let relative_path = backup_dir.relative_path();
|
||||||
format_err!(
|
|
||||||
"failed to produce correct path for backup {backup_dir} in namespace {ns}: {err}"
|
|
||||||
)
|
|
||||||
})?;
|
|
||||||
|
|
||||||
let lock = || {
|
match std::fs::create_dir(backup_dir.full_path()) {
|
||||||
lock_dir_noblock(
|
Ok(_) => {
|
||||||
&full_path,
|
let guard = backup_dir.lock().with_context(|| {
|
||||||
"snapshot",
|
format!("while creating new locked snapshot '{backup_dir:?}'")
|
||||||
"internal error - tried creating snapshot that's already in use",
|
})?;
|
||||||
)
|
Ok((relative_path, true, guard))
|
||||||
};
|
}
|
||||||
|
|
||||||
match std::fs::create_dir(&full_path) {
|
|
||||||
Ok(_) => Ok((relative_path.to_owned(), true, lock()?)),
|
|
||||||
Err(ref e) if e.kind() == io::ErrorKind::AlreadyExists => {
|
Err(ref e) if e.kind() == io::ErrorKind::AlreadyExists => {
|
||||||
Ok((relative_path.to_owned(), false, lock()?))
|
let guard = backup_dir
|
||||||
|
.lock()
|
||||||
|
.with_context(|| format!("while creating locked snapshot '{backup_dir:?}'"))?;
|
||||||
|
Ok((relative_path, false, guard))
|
||||||
}
|
}
|
||||||
Err(e) => Err(e.into()),
|
Err(e) => Err(e.into()),
|
||||||
}
|
}
|
||||||
@ -970,10 +967,15 @@ impl DataStore {
|
|||||||
ListGroups::new(Arc::clone(self), ns)?.collect()
|
ListGroups::new(Arc::clone(self), ns)?.collect()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn list_images(&self) -> Result<Vec<PathBuf>, Error> {
|
/// Lookup all index files to be found in the datastore without taking any logical iteration
|
||||||
|
/// into account.
|
||||||
|
/// The filesystem is walked recursevly to detect index files based on their archive type based
|
||||||
|
/// on the filename. This however excludes the chunks folder, hidden files and does not follow
|
||||||
|
/// symlinks.
|
||||||
|
fn list_index_files(&self) -> Result<HashSet<PathBuf>, Error> {
|
||||||
let base = self.base_path();
|
let base = self.base_path();
|
||||||
|
|
||||||
let mut list = vec![];
|
let mut list = HashSet::new();
|
||||||
|
|
||||||
use walkdir::WalkDir;
|
use walkdir::WalkDir;
|
||||||
|
|
||||||
@ -1021,7 +1023,7 @@ impl DataStore {
|
|||||||
if archive_type == ArchiveType::FixedIndex
|
if archive_type == ArchiveType::FixedIndex
|
||||||
|| archive_type == ArchiveType::DynamicIndex
|
|| archive_type == ArchiveType::DynamicIndex
|
||||||
{
|
{
|
||||||
list.push(path);
|
list.insert(path);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1029,11 +1031,51 @@ impl DataStore {
|
|||||||
Ok(list)
|
Ok(list)
|
||||||
}
|
}
|
||||||
|
|
||||||
// mark chunks used by ``index`` as used
|
// Similar to open index, but return with Ok(None) if index file vanished.
|
||||||
fn index_mark_used_chunks<I: IndexFile>(
|
fn open_index_reader(
|
||||||
&self,
|
&self,
|
||||||
index: I,
|
absolute_path: &Path,
|
||||||
|
) -> Result<Option<Box<dyn IndexFile>>, Error> {
|
||||||
|
let archive_type = match ArchiveType::from_path(absolute_path) {
|
||||||
|
// ignore archives with unknown archive type
|
||||||
|
Ok(ArchiveType::Blob) | Err(_) => bail!("unexpected archive type"),
|
||||||
|
Ok(archive_type) => archive_type,
|
||||||
|
};
|
||||||
|
|
||||||
|
if absolute_path.is_relative() {
|
||||||
|
bail!("expected absolute path, got '{absolute_path:?}'");
|
||||||
|
}
|
||||||
|
|
||||||
|
let file = match std::fs::File::open(absolute_path) {
|
||||||
|
Ok(file) => file,
|
||||||
|
// ignore vanished files
|
||||||
|
Err(err) if err.kind() == io::ErrorKind::NotFound => return Ok(None),
|
||||||
|
Err(err) => {
|
||||||
|
return Err(Error::from(err).context(format!("can't open file '{absolute_path:?}'")))
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
match archive_type {
|
||||||
|
ArchiveType::FixedIndex => {
|
||||||
|
let reader = FixedIndexReader::new(file)
|
||||||
|
.with_context(|| format!("can't open fixed index '{absolute_path:?}'"))?;
|
||||||
|
Ok(Some(Box::new(reader)))
|
||||||
|
}
|
||||||
|
ArchiveType::DynamicIndex => {
|
||||||
|
let reader = DynamicIndexReader::new(file)
|
||||||
|
.with_context(|| format!("can't open dynamic index '{absolute_path:?}'"))?;
|
||||||
|
Ok(Some(Box::new(reader)))
|
||||||
|
}
|
||||||
|
ArchiveType::Blob => bail!("unexpected archive type blob"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// mark chunks used by ``index`` as used
|
||||||
|
fn index_mark_used_chunks(
|
||||||
|
&self,
|
||||||
|
index: Box<dyn IndexFile>,
|
||||||
file_name: &Path, // only used for error reporting
|
file_name: &Path, // only used for error reporting
|
||||||
|
chunk_lru_cache: &mut LruCache<[u8; 32], ()>,
|
||||||
status: &mut GarbageCollectionStatus,
|
status: &mut GarbageCollectionStatus,
|
||||||
worker: &dyn WorkerTaskContext,
|
worker: &dyn WorkerTaskContext,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
@ -1044,6 +1086,12 @@ impl DataStore {
|
|||||||
worker.check_abort()?;
|
worker.check_abort()?;
|
||||||
worker.fail_on_shutdown()?;
|
worker.fail_on_shutdown()?;
|
||||||
let digest = index.index_digest(pos).unwrap();
|
let digest = index.index_digest(pos).unwrap();
|
||||||
|
|
||||||
|
// Avoid multiple expensive atime updates by utimensat
|
||||||
|
if chunk_lru_cache.insert(*digest, ()) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
if !self.inner.chunk_store.cond_touch_chunk(digest, false)? {
|
if !self.inner.chunk_store.cond_touch_chunk(digest, false)? {
|
||||||
let hex = hex::encode(digest);
|
let hex = hex::encode(digest);
|
||||||
warn!(
|
warn!(
|
||||||
@ -1069,61 +1117,135 @@ impl DataStore {
|
|||||||
&self,
|
&self,
|
||||||
status: &mut GarbageCollectionStatus,
|
status: &mut GarbageCollectionStatus,
|
||||||
worker: &dyn WorkerTaskContext,
|
worker: &dyn WorkerTaskContext,
|
||||||
|
cache_capacity: usize,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
let image_list = self.list_images()?;
|
// Iterate twice over the datastore to fetch index files, even if this comes with an
|
||||||
let image_count = image_list.len();
|
// additional runtime cost:
|
||||||
|
// - First iteration to find all index files, no matter if they are in a location expected
|
||||||
|
// by the datastore's hierarchy
|
||||||
|
// - Iterate using the datastore's helpers, so the namespaces, groups and snapshots are
|
||||||
|
// looked up given the expected hierarchy and iterator logic
|
||||||
|
//
|
||||||
|
// By this it is assured that all index files are used, even if they would not have been
|
||||||
|
// seen by the regular logic and the user is informed by the garbage collection run about
|
||||||
|
// the detected index files not following the iterators logic.
|
||||||
|
|
||||||
|
let mut unprocessed_index_list = self.list_index_files()?;
|
||||||
|
let mut index_count = unprocessed_index_list.len();
|
||||||
|
|
||||||
|
let mut chunk_lru_cache = LruCache::new(cache_capacity);
|
||||||
|
let mut processed_index_files = 0;
|
||||||
let mut last_percentage: usize = 0;
|
let mut last_percentage: usize = 0;
|
||||||
|
|
||||||
let mut strange_paths_count: u64 = 0;
|
let arc_self = Arc::new(self.clone());
|
||||||
|
for namespace in arc_self
|
||||||
|
.recursive_iter_backup_ns(BackupNamespace::root())
|
||||||
|
.context("creating namespace iterator failed")?
|
||||||
|
{
|
||||||
|
let namespace = namespace.context("iterating namespaces failed")?;
|
||||||
|
for group in arc_self.iter_backup_groups(namespace)? {
|
||||||
|
let group = group.context("iterating backup groups failed")?;
|
||||||
|
|
||||||
for (i, img) in image_list.into_iter().enumerate() {
|
// Avoid race between listing/marking of snapshots by GC and pruning the last
|
||||||
worker.check_abort()?;
|
// snapshot in the group, following a new snapshot creation. Otherwise known chunks
|
||||||
worker.fail_on_shutdown()?;
|
// might only be referenced by the new snapshot, so it must be read as well.
|
||||||
|
let mut retry_counter = 0;
|
||||||
|
'retry: loop {
|
||||||
|
let _lock = match retry_counter {
|
||||||
|
0..=9 => None,
|
||||||
|
10 => Some(
|
||||||
|
group
|
||||||
|
.lock()
|
||||||
|
.context("exhausted retries and failed to lock group")?,
|
||||||
|
),
|
||||||
|
_ => bail!("exhausted retries and unexpected counter overrun"),
|
||||||
|
};
|
||||||
|
|
||||||
if let Some(backup_dir_path) = img.parent() {
|
let mut snapshots = match group.list_backups() {
|
||||||
let backup_dir_path = backup_dir_path.strip_prefix(self.base_path())?;
|
Ok(snapshots) => snapshots,
|
||||||
if let Some(backup_dir_str) = backup_dir_path.to_str() {
|
Err(err) => {
|
||||||
if pbs_api_types::parse_ns_and_snapshot(backup_dir_str).is_err() {
|
if group.exists() {
|
||||||
strange_paths_count += 1;
|
return Err(err).context("listing snapshots failed")?;
|
||||||
}
|
}
|
||||||
}
|
break 'retry;
|
||||||
}
|
}
|
||||||
|
};
|
||||||
|
|
||||||
match std::fs::File::open(&img) {
|
// Always start iteration with the last snapshot of the group to reduce race
|
||||||
Ok(file) => {
|
// window with concurrent backup+prune previous last snapshot. Allows to retry
|
||||||
if let Ok(archive_type) = ArchiveType::from_path(&img) {
|
// without the need to keep track of already processed index files for the
|
||||||
if archive_type == ArchiveType::FixedIndex {
|
// current group.
|
||||||
let index = FixedIndexReader::new(file).map_err(|e| {
|
BackupInfo::sort_list(&mut snapshots, true);
|
||||||
format_err!("can't read index '{}' - {}", img.to_string_lossy(), e)
|
for (count, snapshot) in snapshots.into_iter().rev().enumerate() {
|
||||||
})?;
|
for file in snapshot.files {
|
||||||
self.index_mark_used_chunks(index, &img, status, worker)?;
|
worker.check_abort()?;
|
||||||
} else if archive_type == ArchiveType::DynamicIndex {
|
worker.fail_on_shutdown()?;
|
||||||
let index = DynamicIndexReader::new(file).map_err(|e| {
|
|
||||||
format_err!("can't read index '{}' - {}", img.to_string_lossy(), e)
|
match ArchiveType::from_path(&file) {
|
||||||
})?;
|
Ok(ArchiveType::FixedIndex) | Ok(ArchiveType::DynamicIndex) => (),
|
||||||
self.index_mark_used_chunks(index, &img, status, worker)?;
|
Ok(ArchiveType::Blob) | Err(_) => continue,
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut path = snapshot.backup_dir.full_path();
|
||||||
|
path.push(file);
|
||||||
|
|
||||||
|
let index = match self.open_index_reader(&path)? {
|
||||||
|
Some(index) => index,
|
||||||
|
None => {
|
||||||
|
unprocessed_index_list.remove(&path);
|
||||||
|
if count == 0 {
|
||||||
|
retry_counter += 1;
|
||||||
|
continue 'retry;
|
||||||
|
}
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
self.index_mark_used_chunks(
|
||||||
|
index,
|
||||||
|
&path,
|
||||||
|
&mut chunk_lru_cache,
|
||||||
|
status,
|
||||||
|
worker,
|
||||||
|
)?;
|
||||||
|
|
||||||
|
if !unprocessed_index_list.remove(&path) {
|
||||||
|
info!("Encountered new index file '{path:?}', increment total index file count");
|
||||||
|
index_count += 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
let percentage = (processed_index_files + 1) * 100 / index_count;
|
||||||
|
if percentage > last_percentage {
|
||||||
|
info!(
|
||||||
|
"marked {percentage}% ({} of {index_count} index files)",
|
||||||
|
processed_index_files + 1,
|
||||||
|
);
|
||||||
|
last_percentage = percentage;
|
||||||
|
}
|
||||||
|
processed_index_files += 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
Err(err) if err.kind() == io::ErrorKind::NotFound => (), // ignore vanished files
|
|
||||||
Err(err) => bail!("can't open index {} - {}", img.to_string_lossy(), err),
|
|
||||||
}
|
|
||||||
|
|
||||||
let percentage = (i + 1) * 100 / image_count;
|
break;
|
||||||
if percentage > last_percentage {
|
}
|
||||||
info!(
|
|
||||||
"marked {percentage}% ({} of {image_count} index files)",
|
|
||||||
i + 1,
|
|
||||||
);
|
|
||||||
last_percentage = percentage;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let mut strange_paths_count = unprocessed_index_list.len();
|
||||||
|
for path in unprocessed_index_list {
|
||||||
|
let index = match self.open_index_reader(&path)? {
|
||||||
|
Some(index) => index,
|
||||||
|
None => {
|
||||||
|
// do not count vanished (pruned) backup snapshots as strange paths.
|
||||||
|
strange_paths_count -= 1;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
self.index_mark_used_chunks(index, &path, &mut chunk_lru_cache, status, worker)?;
|
||||||
|
warn!("Marked chunks for unexpected index file at '{path:?}'");
|
||||||
|
}
|
||||||
if strange_paths_count > 0 {
|
if strange_paths_count > 0 {
|
||||||
info!(
|
warn!("Found {strange_paths_count} index files outside of expected directory scheme");
|
||||||
"found (and marked) {strange_paths_count} index files outside of expected directory scheme"
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
@ -1170,15 +1292,62 @@ impl DataStore {
|
|||||||
upid: Some(upid.to_string()),
|
upid: Some(upid.to_string()),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
};
|
||||||
|
let tuning: DatastoreTuning = serde_json::from_value(
|
||||||
|
DatastoreTuning::API_SCHEMA
|
||||||
|
.parse_property_string(gc_store_config.tuning.as_deref().unwrap_or(""))?,
|
||||||
|
)?;
|
||||||
|
|
||||||
|
if tuning.gc_atime_safety_check.unwrap_or(true) {
|
||||||
|
self.inner
|
||||||
|
.chunk_store
|
||||||
|
.check_fs_atime_updates(true)
|
||||||
|
.context("atime safety check failed")?;
|
||||||
|
info!("Access time update check successful, proceeding with GC.");
|
||||||
|
} else {
|
||||||
|
info!("Access time update check disabled by datastore tuning options.");
|
||||||
|
};
|
||||||
|
|
||||||
|
// Fallback to default 24h 5m if not set
|
||||||
|
let cutoff = tuning
|
||||||
|
.gc_atime_cutoff
|
||||||
|
.map(|cutoff| cutoff * 60)
|
||||||
|
.unwrap_or(3600 * 24 + 300);
|
||||||
|
|
||||||
|
let mut min_atime = phase1_start_time - cutoff as i64;
|
||||||
|
info!(
|
||||||
|
"Using access time cutoff {}, minimum access time is {}",
|
||||||
|
TimeSpan::from(Duration::from_secs(cutoff as u64)),
|
||||||
|
proxmox_time::epoch_to_rfc3339_utc(min_atime)?,
|
||||||
|
);
|
||||||
|
if oldest_writer < min_atime {
|
||||||
|
min_atime = oldest_writer - 300; // account for 5 min safety gap
|
||||||
|
info!(
|
||||||
|
"Oldest backup writer started at {}, extending minimum access time to {}",
|
||||||
|
TimeSpan::from(Duration::from_secs(oldest_writer as u64)),
|
||||||
|
proxmox_time::epoch_to_rfc3339_utc(min_atime)?,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
let tuning: DatastoreTuning = serde_json::from_value(
|
||||||
|
DatastoreTuning::API_SCHEMA
|
||||||
|
.parse_property_string(gc_store_config.tuning.as_deref().unwrap_or(""))?,
|
||||||
|
)?;
|
||||||
|
let gc_cache_capacity = if let Some(capacity) = tuning.gc_cache_capacity {
|
||||||
|
info!("Using chunk digest cache capacity of {capacity}.");
|
||||||
|
capacity
|
||||||
|
} else {
|
||||||
|
1024 * 1024
|
||||||
|
};
|
||||||
|
|
||||||
info!("Start GC phase1 (mark used chunks)");
|
info!("Start GC phase1 (mark used chunks)");
|
||||||
|
|
||||||
self.mark_used_chunks(&mut gc_status, worker)?;
|
self.mark_used_chunks(&mut gc_status, worker, gc_cache_capacity)
|
||||||
|
.context("marking used chunks failed")?;
|
||||||
|
|
||||||
info!("Start GC phase2 (sweep unused chunks)");
|
info!("Start GC phase2 (sweep unused chunks)");
|
||||||
self.inner.chunk_store.sweep_unused_chunks(
|
self.inner.chunk_store.sweep_unused_chunks(
|
||||||
oldest_writer,
|
oldest_writer,
|
||||||
phase1_start_time,
|
min_atime,
|
||||||
&mut gc_status,
|
&mut gc_status,
|
||||||
worker,
|
worker,
|
||||||
)?;
|
)?;
|
||||||
@ -1305,7 +1474,9 @@ impl DataStore {
|
|||||||
bail!("snapshot {} does not exist!", backup_dir.dir());
|
bail!("snapshot {} does not exist!", backup_dir.dir());
|
||||||
}
|
}
|
||||||
|
|
||||||
let _guard = lock_dir_noblock(&full_path, "snapshot", "possibly running or in use")?;
|
let _guard = backup_dir.lock().with_context(|| {
|
||||||
|
format!("while updating the protection status of snapshot '{backup_dir:?}'")
|
||||||
|
})?;
|
||||||
|
|
||||||
let protected_path = backup_dir.protected_file();
|
let protected_path = backup_dir.protected_file();
|
||||||
if protection {
|
if protection {
|
||||||
@ -1562,4 +1733,8 @@ impl DataStore {
|
|||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn old_locking(&self) -> bool {
|
||||||
|
*OLD_LOCKING
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,13 +1,14 @@
|
|||||||
use std::fs::File;
|
use std::fs::File;
|
||||||
use std::os::unix::io::{AsRawFd, FromRawFd};
|
use std::os::unix::io::{AsRawFd, FromRawFd};
|
||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
use std::sync::Arc;
|
|
||||||
use std::rc::Rc;
|
use std::rc::Rc;
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
use anyhow::{bail, Error};
|
use anyhow::{bail, Context, Error};
|
||||||
use nix::dir::Dir;
|
use nix::dir::Dir;
|
||||||
|
use nix::fcntl::OFlag;
|
||||||
use proxmox_sys::fs::lock_dir_noblock_shared;
|
use nix::sys::stat::Mode;
|
||||||
|
use pbs_config::BackupLockGuard;
|
||||||
|
|
||||||
use pbs_api_types::{
|
use pbs_api_types::{
|
||||||
print_store_and_ns, ArchiveType, BackupNamespace, Operation, CLIENT_LOG_BLOB_NAME,
|
print_store_and_ns, ArchiveType, BackupNamespace, Operation, CLIENT_LOG_BLOB_NAME,
|
||||||
@ -28,6 +29,10 @@ pub struct SnapshotReader {
|
|||||||
datastore_name: String,
|
datastore_name: String,
|
||||||
file_list: Vec<String>,
|
file_list: Vec<String>,
|
||||||
locked_dir: Dir,
|
locked_dir: Dir,
|
||||||
|
|
||||||
|
// while this is never read, the lock needs to be kept until the
|
||||||
|
// reader is dropped to ensure valid locking semantics
|
||||||
|
_lock: BackupLockGuard,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl SnapshotReader {
|
impl SnapshotReader {
|
||||||
@ -48,8 +53,12 @@ impl SnapshotReader {
|
|||||||
bail!("snapshot {} does not exist!", snapshot.dir());
|
bail!("snapshot {} does not exist!", snapshot.dir());
|
||||||
}
|
}
|
||||||
|
|
||||||
let locked_dir =
|
let lock = snapshot
|
||||||
lock_dir_noblock_shared(&snapshot_path, "snapshot", "locked by another operation")?;
|
.lock_shared()
|
||||||
|
.with_context(|| format!("while trying to read snapshot '{snapshot:?}'"))?;
|
||||||
|
|
||||||
|
let locked_dir = Dir::open(&snapshot_path, OFlag::O_RDONLY, Mode::empty())
|
||||||
|
.with_context(|| format!("unable to open snapshot directory {snapshot_path:?}"))?;
|
||||||
|
|
||||||
let datastore_name = datastore.name().to_string();
|
let datastore_name = datastore.name().to_string();
|
||||||
let manifest = match snapshot.load_manifest() {
|
let manifest = match snapshot.load_manifest() {
|
||||||
@ -80,6 +89,7 @@ impl SnapshotReader {
|
|||||||
datastore_name,
|
datastore_name,
|
||||||
file_list,
|
file_list,
|
||||||
locked_dir,
|
locked_dir,
|
||||||
|
_lock: lock,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -47,7 +47,7 @@ fn open_lock_file(name: &str) -> Result<(std::fs::File, CreateOptions), Error> {
|
|||||||
let timeout = std::time::Duration::new(10, 0);
|
let timeout = std::time::Duration::new(10, 0);
|
||||||
|
|
||||||
Ok((
|
Ok((
|
||||||
open_file_locked(lock_path, timeout, true, options.clone())?,
|
open_file_locked(lock_path, timeout, true, options)?,
|
||||||
options,
|
options,
|
||||||
))
|
))
|
||||||
}
|
}
|
||||||
|
@ -15,7 +15,6 @@
|
|||||||
use anyhow::{bail, Error};
|
use anyhow::{bail, Error};
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
|
|
||||||
use proxmox_log::init_cli_logger;
|
|
||||||
use proxmox_router::cli::*;
|
use proxmox_router::cli::*;
|
||||||
use proxmox_router::RpcEnvironment;
|
use proxmox_router::RpcEnvironment;
|
||||||
use proxmox_schema::{api, ArraySchema, IntegerSchema, Schema, StringSchema};
|
use proxmox_schema::{api, ArraySchema, IntegerSchema, Schema, StringSchema};
|
||||||
@ -800,7 +799,9 @@ fn options(
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn main() -> Result<(), Error> {
|
fn main() -> Result<(), Error> {
|
||||||
init_cli_logger("PBS_LOG", proxmox_log::LevelFilter::INFO)?;
|
proxmox_log::Logger::from_env("PBS_LOG", proxmox_log::LevelFilter::INFO)
|
||||||
|
.stderr()
|
||||||
|
.init()?;
|
||||||
|
|
||||||
let uid = nix::unistd::Uid::current();
|
let uid = nix::unistd::Uid::current();
|
||||||
|
|
||||||
|
@ -16,7 +16,6 @@ use std::fs::File;
|
|||||||
use anyhow::{bail, Error};
|
use anyhow::{bail, Error};
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
|
|
||||||
use proxmox_log::init_cli_logger;
|
|
||||||
use proxmox_router::cli::*;
|
use proxmox_router::cli::*;
|
||||||
use proxmox_router::RpcEnvironment;
|
use proxmox_router::RpcEnvironment;
|
||||||
use proxmox_schema::api;
|
use proxmox_schema::api;
|
||||||
@ -388,7 +387,9 @@ fn scan(param: Value) -> Result<(), Error> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn main() -> Result<(), Error> {
|
fn main() -> Result<(), Error> {
|
||||||
init_cli_logger("PBS_LOG", proxmox_log::LevelFilter::INFO)?;
|
proxmox_log::Logger::from_env("PBS_LOG", proxmox_log::LevelFilter::INFO)
|
||||||
|
.stderr()
|
||||||
|
.init()?;
|
||||||
|
|
||||||
let uid = nix::unistd::Uid::current();
|
let uid = nix::unistd::Uid::current();
|
||||||
|
|
||||||
|
@ -659,7 +659,8 @@ impl SgTape {
|
|||||||
pub fn wait_until_ready(&mut self, timeout: Option<u64>) -> Result<(), Error> {
|
pub fn wait_until_ready(&mut self, timeout: Option<u64>) -> Result<(), Error> {
|
||||||
let start = SystemTime::now();
|
let start = SystemTime::now();
|
||||||
let timeout = timeout.unwrap_or(Self::SCSI_TAPE_DEFAULT_TIMEOUT as u64);
|
let timeout = timeout.unwrap_or(Self::SCSI_TAPE_DEFAULT_TIMEOUT as u64);
|
||||||
let max_wait = std::time::Duration::new(timeout, 0);
|
let mut max_wait = std::time::Duration::new(timeout, 0);
|
||||||
|
let mut increased_timeout = false;
|
||||||
|
|
||||||
loop {
|
loop {
|
||||||
match self.test_unit_ready() {
|
match self.test_unit_ready() {
|
||||||
@ -667,6 +668,16 @@ impl SgTape {
|
|||||||
_ => {
|
_ => {
|
||||||
std::thread::sleep(std::time::Duration::new(1, 0));
|
std::thread::sleep(std::time::Duration::new(1, 0));
|
||||||
if start.elapsed()? > max_wait {
|
if start.elapsed()? > max_wait {
|
||||||
|
if !increased_timeout {
|
||||||
|
if let Ok(DeviceActivity::Calibrating) =
|
||||||
|
read_device_activity(&mut self.file)
|
||||||
|
{
|
||||||
|
log::info!("Detected drive calibration, increasing timeout to 2 hours 5 minutes");
|
||||||
|
max_wait = std::time::Duration::new(2 * 60 * 60 + 5 * 60, 0);
|
||||||
|
increased_timeout = true;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
bail!("wait_until_ready failed - got timeout");
|
bail!("wait_until_ready failed - got timeout");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -133,7 +133,7 @@ impl<K: std::cmp::Eq + std::hash::Hash + Copy, V> LruCache<K, V> {
|
|||||||
|
|
||||||
/// Insert or update an entry identified by `key` with the given `value`.
|
/// Insert or update an entry identified by `key` with the given `value`.
|
||||||
/// This entry is placed as the most recently used node at the head.
|
/// This entry is placed as the most recently used node at the head.
|
||||||
pub fn insert(&mut self, key: K, value: V) {
|
pub fn insert(&mut self, key: K, value: V) -> bool {
|
||||||
match self.map.entry(key) {
|
match self.map.entry(key) {
|
||||||
Entry::Occupied(mut o) => {
|
Entry::Occupied(mut o) => {
|
||||||
// Node present, update value
|
// Node present, update value
|
||||||
@ -142,6 +142,7 @@ impl<K: std::cmp::Eq + std::hash::Hash + Copy, V> LruCache<K, V> {
|
|||||||
let mut node = unsafe { Box::from_raw(node_ptr) };
|
let mut node = unsafe { Box::from_raw(node_ptr) };
|
||||||
node.value = value;
|
node.value = value;
|
||||||
let _node_ptr = Box::into_raw(node);
|
let _node_ptr = Box::into_raw(node);
|
||||||
|
true
|
||||||
}
|
}
|
||||||
Entry::Vacant(v) => {
|
Entry::Vacant(v) => {
|
||||||
// Node not present, insert a new one
|
// Node not present, insert a new one
|
||||||
@ -159,6 +160,7 @@ impl<K: std::cmp::Eq + std::hash::Hash + Copy, V> LruCache<K, V> {
|
|||||||
if self.map.len() > self.capacity {
|
if self.map.len() > self.capacity {
|
||||||
self.pop_tail();
|
self.pop_tail();
|
||||||
}
|
}
|
||||||
|
false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -16,7 +16,6 @@ use xdg::BaseDirectories;
|
|||||||
use pathpatterns::{MatchEntry, MatchType, PatternFlag};
|
use pathpatterns::{MatchEntry, MatchType, PatternFlag};
|
||||||
use proxmox_async::blocking::TokioWriterAdapter;
|
use proxmox_async::blocking::TokioWriterAdapter;
|
||||||
use proxmox_io::StdChannelWriter;
|
use proxmox_io::StdChannelWriter;
|
||||||
use proxmox_log::init_cli_logger;
|
|
||||||
use proxmox_router::{cli::*, ApiMethod, RpcEnvironment};
|
use proxmox_router::{cli::*, ApiMethod, RpcEnvironment};
|
||||||
use proxmox_schema::api;
|
use proxmox_schema::api;
|
||||||
use proxmox_sys::fs::{file_get_json, image_size, replace_file, CreateOptions};
|
use proxmox_sys::fs::{file_get_json, image_size, replace_file, CreateOptions};
|
||||||
@ -632,9 +631,10 @@ fn spawn_catalog_upload(
|
|||||||
backupspec: {
|
backupspec: {
|
||||||
type: Array,
|
type: Array,
|
||||||
description:
|
description:
|
||||||
"List of backup source specifications ([<label.ext>:<path>] ...), the \
|
"List of backup source specifications:\
|
||||||
specifications 'label' must contain alphanumerics, hyphens and underscores \
|
\n\n[<archive-name>.<type>:<source-path>] ...\n\n\
|
||||||
only.",
|
The 'archive-name' must only contain alphanumerics, hyphens and underscores \
|
||||||
|
while the 'type' must be either 'pxar', 'img', 'conf' or 'log'.",
|
||||||
items: {
|
items: {
|
||||||
schema: BACKUP_SOURCE_SCHEMA,
|
schema: BACKUP_SOURCE_SCHEMA,
|
||||||
}
|
}
|
||||||
@ -827,40 +827,36 @@ async fn create_backup(
|
|||||||
let mut target_set = HashSet::new();
|
let mut target_set = HashSet::new();
|
||||||
|
|
||||||
for backupspec in backupspec_list {
|
for backupspec in backupspec_list {
|
||||||
let spec = parse_backup_specification(backupspec.as_str().unwrap())?;
|
let pbs_client::BackupSpecification {
|
||||||
let filename = &spec.config_string;
|
archive_name: target,
|
||||||
let target = &spec.archive_name;
|
config_string: filename,
|
||||||
|
spec_type,
|
||||||
|
} = parse_backup_specification(backupspec.as_str().unwrap())?;
|
||||||
|
|
||||||
if target_set.contains(target) {
|
if target_set.contains(&target) {
|
||||||
bail!("got target twice: '{}'", target);
|
bail!("got target twice: '{}'", target);
|
||||||
}
|
}
|
||||||
target_set.insert(target.to_string());
|
target_set.insert(target.clone());
|
||||||
|
|
||||||
use std::os::unix::fs::FileTypeExt;
|
use std::os::unix::fs::FileTypeExt;
|
||||||
|
|
||||||
let metadata = std::fs::metadata(filename)
|
let metadata = std::fs::metadata(&filename)
|
||||||
.map_err(|err| format_err!("unable to access '{}' - {}", filename, err))?;
|
.map_err(|err| format_err!("unable to access '{}' - {}", filename, err))?;
|
||||||
let file_type = metadata.file_type();
|
let file_type = metadata.file_type();
|
||||||
|
|
||||||
match spec.spec_type {
|
match spec_type {
|
||||||
BackupSpecificationType::PXAR => {
|
BackupSpecificationType::PXAR => {
|
||||||
if !file_type.is_dir() {
|
if !file_type.is_dir() {
|
||||||
bail!("got unexpected file type (expected directory)");
|
bail!("got unexpected file type (expected directory)");
|
||||||
}
|
}
|
||||||
upload_list.push((
|
upload_list.push((BackupSpecificationType::PXAR, filename, target, "didx", 0));
|
||||||
BackupSpecificationType::PXAR,
|
|
||||||
filename.to_owned(),
|
|
||||||
target.to_owned(),
|
|
||||||
"didx",
|
|
||||||
0,
|
|
||||||
));
|
|
||||||
}
|
}
|
||||||
BackupSpecificationType::IMAGE => {
|
BackupSpecificationType::IMAGE => {
|
||||||
if !(file_type.is_file() || file_type.is_block_device()) {
|
if !(file_type.is_file() || file_type.is_block_device()) {
|
||||||
bail!("got unexpected file type (expected file or block device)");
|
bail!("got unexpected file type (expected file or block device)");
|
||||||
}
|
}
|
||||||
|
|
||||||
let size = image_size(&PathBuf::from(filename))?;
|
let size = image_size(&PathBuf::from(&filename))?;
|
||||||
|
|
||||||
if size == 0 {
|
if size == 0 {
|
||||||
bail!("got zero-sized file '{}'", filename);
|
bail!("got zero-sized file '{}'", filename);
|
||||||
@ -868,8 +864,8 @@ async fn create_backup(
|
|||||||
|
|
||||||
upload_list.push((
|
upload_list.push((
|
||||||
BackupSpecificationType::IMAGE,
|
BackupSpecificationType::IMAGE,
|
||||||
filename.to_owned(),
|
filename,
|
||||||
target.to_owned(),
|
target,
|
||||||
"fidx",
|
"fidx",
|
||||||
size,
|
size,
|
||||||
));
|
));
|
||||||
@ -880,8 +876,8 @@ async fn create_backup(
|
|||||||
}
|
}
|
||||||
upload_list.push((
|
upload_list.push((
|
||||||
BackupSpecificationType::CONFIG,
|
BackupSpecificationType::CONFIG,
|
||||||
filename.to_owned(),
|
filename,
|
||||||
target.to_owned(),
|
target,
|
||||||
"blob",
|
"blob",
|
||||||
metadata.len(),
|
metadata.len(),
|
||||||
));
|
));
|
||||||
@ -892,8 +888,8 @@ async fn create_backup(
|
|||||||
}
|
}
|
||||||
upload_list.push((
|
upload_list.push((
|
||||||
BackupSpecificationType::LOGFILE,
|
BackupSpecificationType::LOGFILE,
|
||||||
filename.to_owned(),
|
filename,
|
||||||
target.to_owned(),
|
target,
|
||||||
"blob",
|
"blob",
|
||||||
metadata.len(),
|
metadata.len(),
|
||||||
));
|
));
|
||||||
@ -1973,7 +1969,10 @@ impl ReadAt for BufferedDynamicReadAt {
|
|||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
pbs_tools::setup_libc_malloc_opts();
|
pbs_tools::setup_libc_malloc_opts();
|
||||||
init_cli_logger("PBS_LOG", proxmox_log::LevelFilter::INFO).expect("failed to initiate logger");
|
proxmox_log::Logger::from_env("PBS_LOG", proxmox_log::LevelFilter::INFO)
|
||||||
|
.stderr()
|
||||||
|
.init()
|
||||||
|
.expect("failed to initiate logger");
|
||||||
|
|
||||||
let backup_cmd_def = CliCommand::new(&API_METHOD_CREATE_BACKUP)
|
let backup_cmd_def = CliCommand::new(&API_METHOD_CREATE_BACKUP)
|
||||||
.arg_param(&["backupspec"])
|
.arg_param(&["backupspec"])
|
||||||
|
@ -10,7 +10,6 @@ use serde_json::{json, Value};
|
|||||||
use tokio::io::AsyncWriteExt;
|
use tokio::io::AsyncWriteExt;
|
||||||
|
|
||||||
use proxmox_compression::zstd::ZstdEncoder;
|
use proxmox_compression::zstd::ZstdEncoder;
|
||||||
use proxmox_log::init_cli_logger;
|
|
||||||
use proxmox_router::cli::{
|
use proxmox_router::cli::{
|
||||||
complete_file_name, default_table_format_options, format_and_print_result_full,
|
complete_file_name, default_table_format_options, format_and_print_result_full,
|
||||||
get_output_format, run_cli_command, CliCommand, CliCommandMap, CliEnvironment, ColumnConfig,
|
get_output_format, run_cli_command, CliCommand, CliCommandMap, CliEnvironment, ColumnConfig,
|
||||||
@ -629,7 +628,11 @@ fn main() {
|
|||||||
true => proxmox_log::LevelFilter::DEBUG,
|
true => proxmox_log::LevelFilter::DEBUG,
|
||||||
false => proxmox_log::LevelFilter::INFO,
|
false => proxmox_log::LevelFilter::INFO,
|
||||||
};
|
};
|
||||||
init_cli_logger("PBS_LOG", loglevel).expect("failed to initiate logger");
|
|
||||||
|
proxmox_log::Logger::from_env("PBS_LOG", loglevel)
|
||||||
|
.stderr()
|
||||||
|
.init()
|
||||||
|
.expect("failed to initiate logger");
|
||||||
|
|
||||||
let list_cmd_def = CliCommand::new(&API_METHOD_LIST)
|
let list_cmd_def = CliCommand::new(&API_METHOD_LIST)
|
||||||
.arg_param(&["snapshot", "path"])
|
.arg_param(&["snapshot", "path"])
|
||||||
|
@ -22,7 +22,7 @@ use pbs_client::pxar::{
|
|||||||
use pxar::EntryKind;
|
use pxar::EntryKind;
|
||||||
|
|
||||||
use proxmox_human_byte::HumanByte;
|
use proxmox_human_byte::HumanByte;
|
||||||
use proxmox_log::{debug, enabled, error, init_cli_logger, Level};
|
use proxmox_log::{debug, enabled, error, Level};
|
||||||
use proxmox_router::cli::*;
|
use proxmox_router::cli::*;
|
||||||
use proxmox_schema::api;
|
use proxmox_schema::api;
|
||||||
|
|
||||||
@ -574,7 +574,10 @@ fn dump_archive(archive: String, payload_input: Option<String>) -> Result<(), Er
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
init_cli_logger("PXAR_LOG", proxmox_log::LevelFilter::INFO).expect("failed to initiate logger");
|
proxmox_log::Logger::from_env("PXAR_LOG", proxmox_log::LevelFilter::INFO)
|
||||||
|
.stderr()
|
||||||
|
.init()
|
||||||
|
.expect("failed to initiate logger");
|
||||||
|
|
||||||
let cmd_def = CliCommandMap::new()
|
let cmd_def = CliCommandMap::new()
|
||||||
.insert(
|
.insert(
|
||||||
|
@ -6,7 +6,7 @@ use std::os::unix::fs::OpenOptionsExt;
|
|||||||
|
|
||||||
use anyhow::{bail, format_err};
|
use anyhow::{bail, format_err};
|
||||||
use bytes::Bytes;
|
use bytes::Bytes;
|
||||||
use hyper::{Body, Request};
|
use hyper::{body::HttpBody, Body, Request};
|
||||||
use nix::sys::stat::Mode;
|
use nix::sys::stat::Mode;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
@ -508,9 +508,11 @@ impl AcmeClient {
|
|||||||
let (parts, body) = response.into_parts();
|
let (parts, body) = response.into_parts();
|
||||||
|
|
||||||
let status = parts.status.as_u16();
|
let status = parts.status.as_u16();
|
||||||
let body = hyper::body::to_bytes(body)
|
let body = body
|
||||||
|
.collect()
|
||||||
.await
|
.await
|
||||||
.map_err(|err| Error::Custom(format!("failed to retrieve response body: {}", err)))?;
|
.map_err(|err| Error::Custom(format!("failed to retrieve response body: {}", err)))?
|
||||||
|
.to_bytes();
|
||||||
|
|
||||||
let got_nonce = if let Some(new_nonce) = parts.headers.get(proxmox_acme::REPLAY_NONCE) {
|
let got_nonce = if let Some(new_nonce) = parts.headers.get(proxmox_acme::REPLAY_NONCE) {
|
||||||
let new_nonce = new_nonce.to_str().map_err(|err| {
|
let new_nonce = new_nonce.to_str().map_err(|err| {
|
||||||
|
@ -29,19 +29,6 @@ use crate::server::jobstate::Job;
|
|||||||
/// Authentication domain/realm index.
|
/// Authentication domain/realm index.
|
||||||
fn list_domains(rpcenv: &mut dyn RpcEnvironment) -> Result<Vec<BasicRealmInfo>, Error> {
|
fn list_domains(rpcenv: &mut dyn RpcEnvironment) -> Result<Vec<BasicRealmInfo>, Error> {
|
||||||
let mut list = Vec::new();
|
let mut list = Vec::new();
|
||||||
|
|
||||||
list.push(serde_json::from_value(json!({
|
|
||||||
"realm": "pam",
|
|
||||||
"type": "pam",
|
|
||||||
"comment": "Linux PAM standard authentication",
|
|
||||||
"default": Some(true),
|
|
||||||
}))?);
|
|
||||||
list.push(serde_json::from_value(json!({
|
|
||||||
"realm": "pbs",
|
|
||||||
"type": "pbs",
|
|
||||||
"comment": "Proxmox Backup authentication server",
|
|
||||||
}))?);
|
|
||||||
|
|
||||||
let (config, digest) = pbs_config::domains::config()?;
|
let (config, digest) = pbs_config::domains::config()?;
|
||||||
|
|
||||||
for (_, (section_type, v)) in config.sections.iter() {
|
for (_, (section_type, v)) in config.sections.iter() {
|
||||||
|
@ -8,16 +8,16 @@ use std::collections::HashMap;
|
|||||||
|
|
||||||
use proxmox_router::{ApiMethod, Permission, Router, RpcEnvironment, SubdirMap};
|
use proxmox_router::{ApiMethod, Permission, Router, RpcEnvironment, SubdirMap};
|
||||||
use proxmox_schema::api;
|
use proxmox_schema::api;
|
||||||
|
use proxmox_section_config::SectionConfigData;
|
||||||
use proxmox_tfa::api::TfaConfig;
|
use proxmox_tfa::api::TfaConfig;
|
||||||
|
|
||||||
use pbs_api_types::{
|
use pbs_api_types::{
|
||||||
ApiToken, Authid, Tokenname, User, UserUpdater, UserWithTokens, Userid, ENABLE_USER_SCHEMA,
|
ApiToken, Authid, Tokenname, User, UserUpdater, UserWithTokens, Userid, ENABLE_USER_SCHEMA,
|
||||||
EXPIRE_USER_SCHEMA, PASSWORD_FORMAT, PBS_PASSWORD_SCHEMA, PRIV_PERMISSIONS_MODIFY,
|
EXPIRE_USER_SCHEMA, PASSWORD_FORMAT, PBS_PASSWORD_SCHEMA, PRIV_PERMISSIONS_MODIFY,
|
||||||
PRIV_SYS_AUDIT, PROXMOX_CONFIG_DIGEST_SCHEMA, SINGLE_LINE_COMMENT_SCHEMA,
|
PRIV_SYS_AUDIT, PROXMOX_CONFIG_DIGEST_SCHEMA, REGENERATE_TOKEN_SCHEMA,
|
||||||
|
SINGLE_LINE_COMMENT_SCHEMA,
|
||||||
};
|
};
|
||||||
use pbs_config::token_shadow;
|
use pbs_config::{acl::AclTree, token_shadow, CachedUserInfo};
|
||||||
|
|
||||||
use pbs_config::CachedUserInfo;
|
|
||||||
|
|
||||||
fn new_user_with_tokens(user: User, tfa: &TfaConfig) -> UserWithTokens {
|
fn new_user_with_tokens(user: User, tfa: &TfaConfig) -> UserWithTokens {
|
||||||
UserWithTokens {
|
UserWithTokens {
|
||||||
@ -354,6 +354,7 @@ pub async fn update_user(
|
|||||||
pub fn delete_user(userid: Userid, digest: Option<String>) -> Result<(), Error> {
|
pub fn delete_user(userid: Userid, digest: Option<String>) -> Result<(), Error> {
|
||||||
let _lock = pbs_config::user::lock_config()?;
|
let _lock = pbs_config::user::lock_config()?;
|
||||||
let _tfa_lock = crate::config::tfa::write_lock()?;
|
let _tfa_lock = crate::config::tfa::write_lock()?;
|
||||||
|
let _acl_lock = pbs_config::acl::lock_config()?;
|
||||||
|
|
||||||
let (mut config, expected_digest) = pbs_config::user::config()?;
|
let (mut config, expected_digest) = pbs_config::user::config()?;
|
||||||
|
|
||||||
@ -381,6 +382,22 @@ pub fn delete_user(userid: Userid, digest: Option<String>) -> Result<(), Error>
|
|||||||
eprintln!("error updating TFA config after deleting user {userid:?} {err}",);
|
eprintln!("error updating TFA config after deleting user {userid:?} {err}",);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let user_tokens: Vec<ApiToken> = config
|
||||||
|
.convert_to_typed_array::<ApiToken>("token")?
|
||||||
|
.into_iter()
|
||||||
|
.filter(|token| token.tokenid.user().eq(&userid))
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
let (mut acl_tree, _digest) = pbs_config::acl::config()?;
|
||||||
|
for token in user_tokens {
|
||||||
|
if let Some(name) = token.tokenid.tokenname() {
|
||||||
|
do_delete_token(name.to_owned(), &userid, &mut config, &mut acl_tree)?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pbs_config::user::save_config(&config)?;
|
||||||
|
pbs_config::acl::save_config(&acl_tree)?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -495,8 +512,7 @@ pub fn generate_token(
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
let secret = format!("{:x}", proxmox_uuid::Uuid::generate());
|
let secret = token_shadow::generate_and_set_secret(&tokenid)?;
|
||||||
token_shadow::set_secret(&tokenid, &secret)?;
|
|
||||||
|
|
||||||
let token = ApiToken {
|
let token = ApiToken {
|
||||||
tokenid,
|
tokenid,
|
||||||
@ -515,6 +531,15 @@ pub fn generate_token(
|
|||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[api()]
|
||||||
|
#[derive(Serialize, Deserialize)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
/// The set of properties that can be deleted from a token.
|
||||||
|
pub enum DeletableTokenProperty {
|
||||||
|
/// Delete the comment property.
|
||||||
|
Comment,
|
||||||
|
}
|
||||||
|
|
||||||
#[api(
|
#[api(
|
||||||
protected: true,
|
protected: true,
|
||||||
input: {
|
input: {
|
||||||
@ -537,11 +562,33 @@ pub fn generate_token(
|
|||||||
schema: EXPIRE_USER_SCHEMA,
|
schema: EXPIRE_USER_SCHEMA,
|
||||||
optional: true,
|
optional: true,
|
||||||
},
|
},
|
||||||
|
regenerate: {
|
||||||
|
schema: REGENERATE_TOKEN_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
delete: {
|
||||||
|
description: "List of properties to delete.",
|
||||||
|
type: Array,
|
||||||
|
optional: true,
|
||||||
|
items: {
|
||||||
|
type: DeletableTokenProperty,
|
||||||
|
}
|
||||||
|
},
|
||||||
digest: {
|
digest: {
|
||||||
optional: true,
|
optional: true,
|
||||||
schema: PROXMOX_CONFIG_DIGEST_SCHEMA,
|
schema: PROXMOX_CONFIG_DIGEST_SCHEMA,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
},
|
||||||
|
returns: {
|
||||||
|
description: "Regenerated secret, if regenerate is set.",
|
||||||
|
properties: {
|
||||||
|
secret: {
|
||||||
|
type: String,
|
||||||
|
optional: true,
|
||||||
|
description: "The new API token secret",
|
||||||
|
},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
access: {
|
access: {
|
||||||
permission: &Permission::Or(&[
|
permission: &Permission::Or(&[
|
||||||
@ -557,8 +604,10 @@ pub fn update_token(
|
|||||||
comment: Option<String>,
|
comment: Option<String>,
|
||||||
enable: Option<bool>,
|
enable: Option<bool>,
|
||||||
expire: Option<i64>,
|
expire: Option<i64>,
|
||||||
|
regenerate: Option<bool>,
|
||||||
|
delete: Option<Vec<DeletableTokenProperty>>,
|
||||||
digest: Option<String>,
|
digest: Option<String>,
|
||||||
) -> Result<(), Error> {
|
) -> Result<Value, Error> {
|
||||||
let _lock = pbs_config::user::lock_config()?;
|
let _lock = pbs_config::user::lock_config()?;
|
||||||
|
|
||||||
let (mut config, expected_digest) = pbs_config::user::config()?;
|
let (mut config, expected_digest) = pbs_config::user::config()?;
|
||||||
@ -573,6 +622,14 @@ pub fn update_token(
|
|||||||
|
|
||||||
let mut data: ApiToken = config.lookup("token", &tokenid_string)?;
|
let mut data: ApiToken = config.lookup("token", &tokenid_string)?;
|
||||||
|
|
||||||
|
if let Some(delete) = delete {
|
||||||
|
for delete_prop in delete {
|
||||||
|
match delete_prop {
|
||||||
|
DeletableTokenProperty::Comment => data.comment = None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if let Some(comment) = comment {
|
if let Some(comment) = comment {
|
||||||
let comment = comment.trim().to_string();
|
let comment = comment.trim().to_string();
|
||||||
if comment.is_empty() {
|
if comment.is_empty() {
|
||||||
@ -590,11 +647,21 @@ pub fn update_token(
|
|||||||
data.expire = if expire > 0 { Some(expire) } else { None };
|
data.expire = if expire > 0 { Some(expire) } else { None };
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let new_secret = if regenerate.unwrap_or_default() {
|
||||||
|
Some(token_shadow::generate_and_set_secret(&tokenid)?)
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
|
|
||||||
config.set_data(&tokenid_string, "token", &data)?;
|
config.set_data(&tokenid_string, "token", &data)?;
|
||||||
|
|
||||||
pbs_config::user::save_config(&config)?;
|
pbs_config::user::save_config(&config)?;
|
||||||
|
|
||||||
Ok(())
|
if let Some(secret) = new_secret {
|
||||||
|
Ok(json!({"secret": secret}))
|
||||||
|
} else {
|
||||||
|
Ok(Value::Null)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[api(
|
#[api(
|
||||||
@ -626,29 +693,41 @@ pub fn delete_token(
|
|||||||
token_name: Tokenname,
|
token_name: Tokenname,
|
||||||
digest: Option<String>,
|
digest: Option<String>,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
let _lock = pbs_config::user::lock_config()?;
|
let _acl_lock = pbs_config::acl::lock_config()?;
|
||||||
|
let _user_lock = pbs_config::user::lock_config()?;
|
||||||
|
|
||||||
let (mut config, expected_digest) = pbs_config::user::config()?;
|
let (mut user_config, expected_digest) = pbs_config::user::config()?;
|
||||||
|
|
||||||
if let Some(ref digest) = digest {
|
if let Some(ref digest) = digest {
|
||||||
let digest = <[u8; 32]>::from_hex(digest)?;
|
let digest = <[u8; 32]>::from_hex(digest)?;
|
||||||
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
|
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let (mut acl_config, _digest) = pbs_config::acl::config()?;
|
||||||
|
do_delete_token(token_name, &userid, &mut user_config, &mut acl_config)?;
|
||||||
|
|
||||||
|
pbs_config::user::save_config(&user_config)?;
|
||||||
|
pbs_config::acl::save_config(&acl_config)?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn do_delete_token(
|
||||||
|
token_name: Tokenname,
|
||||||
|
userid: &Userid,
|
||||||
|
user_config: &mut SectionConfigData,
|
||||||
|
acl_config: &mut AclTree,
|
||||||
|
) -> Result<(), Error> {
|
||||||
let tokenid = Authid::from((userid.clone(), Some(token_name.clone())));
|
let tokenid = Authid::from((userid.clone(), Some(token_name.clone())));
|
||||||
let tokenid_string = tokenid.to_string();
|
let tokenid_string = tokenid.to_string();
|
||||||
|
if user_config.sections.remove(&tokenid_string).is_none() {
|
||||||
if config.sections.remove(&tokenid_string).is_none() {
|
|
||||||
bail!(
|
bail!(
|
||||||
"token '{}' of user '{}' does not exist.",
|
"token '{}' of user '{}' does not exist.",
|
||||||
token_name.as_str(),
|
token_name.as_str(),
|
||||||
userid
|
userid
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
token_shadow::delete_secret(&tokenid)?;
|
token_shadow::delete_secret(&tokenid)?;
|
||||||
|
acl_config.delete_authid(&tokenid);
|
||||||
pbs_config::user::save_config(&config)?;
|
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -7,7 +7,7 @@ use std::os::unix::ffi::OsStrExt;
|
|||||||
use std::path::{Path, PathBuf};
|
use std::path::{Path, PathBuf};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use anyhow::{bail, format_err, Error};
|
use anyhow::{bail, format_err, Context, Error};
|
||||||
use futures::*;
|
use futures::*;
|
||||||
use hyper::http::request::Parts;
|
use hyper::http::request::Parts;
|
||||||
use hyper::{header, Body, Response, StatusCode};
|
use hyper::{header, Body, Response, StatusCode};
|
||||||
@ -313,13 +313,23 @@ pub async fn delete_group(
|
|||||||
)?;
|
)?;
|
||||||
|
|
||||||
let delete_stats = datastore.remove_backup_group(&ns, &group)?;
|
let delete_stats = datastore.remove_backup_group(&ns, &group)?;
|
||||||
if !delete_stats.all_removed() {
|
|
||||||
if error_on_protected {
|
let error_msg = if datastore.old_locking() {
|
||||||
bail!("group only partially deleted due to protected snapshots");
|
"could not remove empty groups directories due to old locking mechanism.\n\
|
||||||
} else {
|
If you are an admin, please reboot PBS or ensure no old backup job is running anymore, \
|
||||||
warn!("group only partially deleted due to protected snapshots");
|
then remove the file '/run/proxmox-backup/old-locking', and reload all PBS daemons"
|
||||||
}
|
} else if !delete_stats.all_removed() {
|
||||||
|
"group only partially deleted due to protected snapshots"
|
||||||
|
} else {
|
||||||
|
return Ok(delete_stats);
|
||||||
|
};
|
||||||
|
|
||||||
|
if error_on_protected {
|
||||||
|
bail!(error_msg);
|
||||||
|
} else {
|
||||||
|
warn!(error_msg);
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(delete_stats)
|
Ok(delete_stats)
|
||||||
})
|
})
|
||||||
.await?
|
.await?
|
||||||
@ -1218,11 +1228,7 @@ pub fn start_garbage_collection(
|
|||||||
let upid_str =
|
let upid_str =
|
||||||
crate::server::do_garbage_collection_job(job, datastore, &auth_id, None, to_stdout)
|
crate::server::do_garbage_collection_job(job, datastore, &auth_id, None, to_stdout)
|
||||||
.map_err(|err| {
|
.map_err(|err| {
|
||||||
format_err!(
|
format_err!("unable to start garbage collection job on datastore {store} - {err:#}")
|
||||||
"unable to start garbage collection job on datastore {} - {}",
|
|
||||||
store,
|
|
||||||
err
|
|
||||||
)
|
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
Ok(json!(upid_str))
|
Ok(json!(upid_str))
|
||||||
@ -2347,10 +2353,9 @@ pub async fn set_backup_owner(
|
|||||||
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
|
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
|
||||||
|
|
||||||
let backup_group = datastore.backup_group(ns, backup_group);
|
let backup_group = datastore.backup_group(ns, backup_group);
|
||||||
|
let owner = backup_group.get_owner()?;
|
||||||
|
|
||||||
if owner_check_required {
|
if owner_check_required {
|
||||||
let owner = backup_group.get_owner()?;
|
|
||||||
|
|
||||||
let allowed = match (owner.is_token(), new_owner.is_token()) {
|
let allowed = match (owner.is_token(), new_owner.is_token()) {
|
||||||
(true, true) => {
|
(true, true) => {
|
||||||
// API token to API token, owned by same user
|
// API token to API token, owned by same user
|
||||||
@ -2397,6 +2402,14 @@ pub async fn set_backup_owner(
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let _guard = backup_group
|
||||||
|
.lock()
|
||||||
|
.with_context(|| format!("while setting the owner of group '{backup_group:?}'"))?;
|
||||||
|
|
||||||
|
if owner != backup_group.get_owner()? {
|
||||||
|
bail!("{owner} does not own this group anymore");
|
||||||
|
}
|
||||||
|
|
||||||
backup_group.set_owner(&new_owner, true)?;
|
backup_group.set_owner(&new_owner, true)?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
@ -2416,20 +2429,12 @@ fn setup_mounted_device(datastore: &DataStoreConfig, tmp_mount_path: &str) -> Re
|
|||||||
.owner(backup_user.uid)
|
.owner(backup_user.uid)
|
||||||
.group(backup_user.gid);
|
.group(backup_user.gid);
|
||||||
|
|
||||||
proxmox_sys::fs::create_path(
|
proxmox_sys::fs::create_path(&mount_point, Some(default_options), Some(options))
|
||||||
&mount_point,
|
.map_err(|e| format_err!("creating mountpoint '{mount_point}' failed: {e}"))?;
|
||||||
Some(default_options.clone()),
|
|
||||||
Some(options.clone()),
|
|
||||||
)
|
|
||||||
.map_err(|e| format_err!("creating mountpoint '{mount_point}' failed: {e}"))?;
|
|
||||||
|
|
||||||
// can't be created before it is mounted, so we have to do it here
|
// can't be created before it is mounted, so we have to do it here
|
||||||
proxmox_sys::fs::create_path(
|
proxmox_sys::fs::create_path(&full_store_path, Some(default_options), Some(options))
|
||||||
&full_store_path,
|
.map_err(|e| format_err!("creating datastore path '{full_store_path}' failed: {e}"))?;
|
||||||
Some(default_options.clone()),
|
|
||||||
Some(options.clone()),
|
|
||||||
)
|
|
||||||
.map_err(|e| format_err!("creating datastore path '{full_store_path}' failed: {e}"))?;
|
|
||||||
|
|
||||||
info!(
|
info!(
|
||||||
"bind mount '{}'({}) to '{}'",
|
"bind mount '{}'({}) to '{}'",
|
||||||
@ -2468,8 +2473,8 @@ pub fn do_mount_device(datastore: DataStoreConfig) -> Result<(), Error> {
|
|||||||
let default_options = proxmox_sys::fs::CreateOptions::new();
|
let default_options = proxmox_sys::fs::CreateOptions::new();
|
||||||
proxmox_sys::fs::create_path(
|
proxmox_sys::fs::create_path(
|
||||||
&tmp_mount_path,
|
&tmp_mount_path,
|
||||||
Some(default_options.clone()),
|
Some(default_options),
|
||||||
Some(default_options.clone()),
|
Some(default_options),
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
info!("temporarily mounting '{uuid}' to '{}'", tmp_mount_path);
|
info!("temporarily mounting '{uuid}' to '{}'", tmp_mount_path);
|
||||||
|
@ -167,7 +167,14 @@ pub fn delete_namespace(
|
|||||||
let (removed_all, stats) = datastore.remove_namespace_recursive(&ns, delete_groups)?;
|
let (removed_all, stats) = datastore.remove_namespace_recursive(&ns, delete_groups)?;
|
||||||
if !removed_all {
|
if !removed_all {
|
||||||
let err_msg = if delete_groups {
|
let err_msg = if delete_groups {
|
||||||
"group only partially deleted due to protected snapshots"
|
if datastore.old_locking() {
|
||||||
|
"could not remove empty group directoriess due to old locking mechanism.\n\
|
||||||
|
If you are an admin, please reboot PBS or ensure no old backup job is running \
|
||||||
|
anymore, then remove the file '/run/proxmox-backup/old-locking', and reload all \
|
||||||
|
PBS daemons"
|
||||||
|
} else {
|
||||||
|
"group only partially deleted due to protected snapshots"
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
"only partially deleted due to existing groups but `delete-groups` not true"
|
"only partially deleted due to existing groups but `delete-groups` not true"
|
||||||
};
|
};
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
use anyhow::{bail, format_err, Error};
|
use anyhow::{bail, format_err, Context, Error};
|
||||||
use nix::dir::Dir;
|
use pbs_config::BackupLockGuard;
|
||||||
|
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::sync::{Arc, Mutex};
|
use std::sync::{Arc, Mutex};
|
||||||
use tracing::info;
|
use tracing::info;
|
||||||
@ -8,7 +9,7 @@ use ::serde::Serialize;
|
|||||||
use serde_json::{json, Value};
|
use serde_json::{json, Value};
|
||||||
|
|
||||||
use proxmox_router::{RpcEnvironment, RpcEnvironmentType};
|
use proxmox_router::{RpcEnvironment, RpcEnvironmentType};
|
||||||
use proxmox_sys::fs::{lock_dir_noblock_shared, replace_file, CreateOptions};
|
use proxmox_sys::fs::{replace_file, CreateOptions};
|
||||||
|
|
||||||
use pbs_api_types::Authid;
|
use pbs_api_types::Authid;
|
||||||
use pbs_datastore::backup_info::{BackupDir, BackupInfo};
|
use pbs_datastore::backup_info::{BackupDir, BackupInfo};
|
||||||
@ -635,7 +636,7 @@ impl BackupEnvironment {
|
|||||||
/// If verify-new is set on the datastore, this will run a new verify task
|
/// If verify-new is set on the datastore, this will run a new verify task
|
||||||
/// for the backup. If not, this will return and also drop the passed lock
|
/// for the backup. If not, this will return and also drop the passed lock
|
||||||
/// immediately.
|
/// immediately.
|
||||||
pub fn verify_after_complete(&self, excl_snap_lock: Dir) -> Result<(), Error> {
|
pub fn verify_after_complete(&self, excl_snap_lock: BackupLockGuard) -> Result<(), Error> {
|
||||||
self.ensure_finished()?;
|
self.ensure_finished()?;
|
||||||
|
|
||||||
if !self.datastore.verify_new() {
|
if !self.datastore.verify_new() {
|
||||||
@ -645,12 +646,12 @@ impl BackupEnvironment {
|
|||||||
|
|
||||||
// Downgrade to shared lock, the backup itself is finished
|
// Downgrade to shared lock, the backup itself is finished
|
||||||
drop(excl_snap_lock);
|
drop(excl_snap_lock);
|
||||||
let snap_lock = lock_dir_noblock_shared(
|
let snap_lock = self.backup_dir.lock_shared().with_context(|| {
|
||||||
&self.backup_dir.full_path(),
|
format!(
|
||||||
"snapshot",
|
"while trying to verify snapshot '{:?}' after completion",
|
||||||
"snapshot is already locked by another operation",
|
self.backup_dir
|
||||||
)?;
|
)
|
||||||
|
})?;
|
||||||
let worker_id = format!(
|
let worker_id = format!(
|
||||||
"{}:{}/{}/{:08X}",
|
"{}:{}/{}/{:08X}",
|
||||||
self.datastore.name(),
|
self.datastore.name(),
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
//! Backup protocol (HTTP2 upgrade)
|
//! Backup protocol (HTTP2 upgrade)
|
||||||
|
|
||||||
use anyhow::{bail, format_err, Error};
|
use anyhow::{bail, format_err, Context, Error};
|
||||||
use futures::*;
|
use futures::*;
|
||||||
use hex::FromHex;
|
use hex::FromHex;
|
||||||
use hyper::header::{HeaderValue, CONNECTION, UPGRADE};
|
use hyper::header::{HeaderValue, CONNECTION, UPGRADE};
|
||||||
@ -17,7 +17,6 @@ use proxmox_router::{
|
|||||||
};
|
};
|
||||||
use proxmox_schema::*;
|
use proxmox_schema::*;
|
||||||
use proxmox_sortable_macro::sortable;
|
use proxmox_sortable_macro::sortable;
|
||||||
use proxmox_sys::fs::lock_dir_noblock_shared;
|
|
||||||
|
|
||||||
use pbs_api_types::{
|
use pbs_api_types::{
|
||||||
ArchiveType, Authid, BackupNamespace, BackupType, Operation, VerifyState,
|
ArchiveType, Authid, BackupNamespace, BackupType, Operation, VerifyState,
|
||||||
@ -186,12 +185,10 @@ fn upgrade_to_backup_protocol(
|
|||||||
}
|
}
|
||||||
|
|
||||||
// lock last snapshot to prevent forgetting/pruning it during backup
|
// lock last snapshot to prevent forgetting/pruning it during backup
|
||||||
let full_path = last.backup_dir.full_path();
|
let guard = last.backup_dir
|
||||||
Some(lock_dir_noblock_shared(
|
.lock_shared()
|
||||||
&full_path,
|
.with_context(|| format!("while locking last snapshot during backup '{last:?}'"))?;
|
||||||
"snapshot",
|
Some(guard)
|
||||||
"base snapshot is already locked by another operation",
|
|
||||||
)?)
|
|
||||||
} else {
|
} else {
|
||||||
None
|
None
|
||||||
};
|
};
|
||||||
@ -239,14 +236,12 @@ fn upgrade_to_backup_protocol(
|
|||||||
.and_then(move |conn| {
|
.and_then(move |conn| {
|
||||||
env2.debug("protocol upgrade done");
|
env2.debug("protocol upgrade done");
|
||||||
|
|
||||||
let mut http = hyper::server::conn::Http::new()
|
let mut http = hyper::server::conn::http2::Builder::new(ExecInheritLogContext);
|
||||||
.with_executor(ExecInheritLogContext);
|
|
||||||
http.http2_only(true);
|
|
||||||
// increase window size: todo - find optiomal size
|
// increase window size: todo - find optiomal size
|
||||||
let window_size = 32 * 1024 * 1024; // max = (1 << 31) - 2
|
let window_size = 32 * 1024 * 1024; // max = (1 << 31) - 2
|
||||||
http.http2_initial_stream_window_size(window_size);
|
http.initial_stream_window_size(window_size);
|
||||||
http.http2_initial_connection_window_size(window_size);
|
http.initial_connection_window_size(window_size);
|
||||||
http.http2_max_frame_size(4 * 1024 * 1024);
|
http.max_frame_size(4 * 1024 * 1024);
|
||||||
|
|
||||||
let env3 = env2.clone();
|
let env3 = env2.clone();
|
||||||
http.serve_connection(conn, service).map(move |result| {
|
http.serve_connection(conn, service).map(move |result| {
|
||||||
@ -858,8 +853,8 @@ fn download_previous(
|
|||||||
};
|
};
|
||||||
if let Some(index) = index {
|
if let Some(index) = index {
|
||||||
env.log(format!(
|
env.log(format!(
|
||||||
"register chunks in '{}' from previous backup.",
|
"register chunks in '{archive_name}' from previous backup '{}'.",
|
||||||
archive_name
|
last_backup.backup_dir.dir(),
|
||||||
));
|
));
|
||||||
|
|
||||||
for pos in 0..index.index_count() {
|
for pos in 0..index.index_count() {
|
||||||
@ -870,7 +865,10 @@ fn download_previous(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
env.log(format!("download '{}' from previous backup.", archive_name));
|
env.log(format!(
|
||||||
|
"download '{archive_name}' from previous backup '{}'.",
|
||||||
|
last_backup.backup_dir.dir(),
|
||||||
|
));
|
||||||
crate::api2::helpers::create_download_response(path).await
|
crate::api2::helpers::create_download_response(path).await
|
||||||
}
|
}
|
||||||
.boxed()
|
.boxed()
|
||||||
|
@ -91,6 +91,10 @@ pub async fn create_ad_realm(
|
|||||||
auth_helpers::store_ldap_bind_password(&config.realm, &password, &domain_config_lock)?;
|
auth_helpers::store_ldap_bind_password(&config.realm, &password, &domain_config_lock)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if let Some(true) = config.default {
|
||||||
|
domains::unset_default_realm(&mut domains)?;
|
||||||
|
}
|
||||||
|
|
||||||
domains.set_data(&config.realm, "ad", &config)?;
|
domains.set_data(&config.realm, "ad", &config)?;
|
||||||
|
|
||||||
domains::save_config(&domains)?;
|
domains::save_config(&domains)?;
|
||||||
@ -136,6 +140,8 @@ pub enum DeletableProperty {
|
|||||||
Port,
|
Port,
|
||||||
/// Comment
|
/// Comment
|
||||||
Comment,
|
Comment,
|
||||||
|
/// Is default realm
|
||||||
|
Default,
|
||||||
/// Verify server certificate
|
/// Verify server certificate
|
||||||
Verify,
|
Verify,
|
||||||
/// Mode (ldap, ldap+starttls or ldaps),
|
/// Mode (ldap, ldap+starttls or ldaps),
|
||||||
@ -217,6 +223,9 @@ pub async fn update_ad_realm(
|
|||||||
DeletableProperty::Comment => {
|
DeletableProperty::Comment => {
|
||||||
config.comment = None;
|
config.comment = None;
|
||||||
}
|
}
|
||||||
|
DeletableProperty::Default => {
|
||||||
|
config.default = None;
|
||||||
|
}
|
||||||
DeletableProperty::Port => {
|
DeletableProperty::Port => {
|
||||||
config.port = None;
|
config.port = None;
|
||||||
}
|
}
|
||||||
@ -273,6 +282,13 @@ pub async fn update_ad_realm(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if let Some(true) = update.default {
|
||||||
|
domains::unset_default_realm(&mut domains)?;
|
||||||
|
config.default = Some(true);
|
||||||
|
} else {
|
||||||
|
config.default = None;
|
||||||
|
}
|
||||||
|
|
||||||
if let Some(mode) = update.mode {
|
if let Some(mode) = update.mode {
|
||||||
config.mode = Some(mode);
|
config.mode = Some(mode);
|
||||||
}
|
}
|
||||||
|
@ -81,6 +81,10 @@ pub fn create_ldap_realm(config: LdapRealmConfig, password: Option<String>) -> R
|
|||||||
auth_helpers::store_ldap_bind_password(&config.realm, &password, &domain_config_lock)?;
|
auth_helpers::store_ldap_bind_password(&config.realm, &password, &domain_config_lock)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if let Some(true) = config.default {
|
||||||
|
domains::unset_default_realm(&mut domains)?;
|
||||||
|
}
|
||||||
|
|
||||||
domains.set_data(&config.realm, "ldap", &config)?;
|
domains.set_data(&config.realm, "ldap", &config)?;
|
||||||
|
|
||||||
domains::save_config(&domains)?;
|
domains::save_config(&domains)?;
|
||||||
@ -171,6 +175,8 @@ pub enum DeletableProperty {
|
|||||||
Port,
|
Port,
|
||||||
/// Comment
|
/// Comment
|
||||||
Comment,
|
Comment,
|
||||||
|
/// Is default realm
|
||||||
|
Default,
|
||||||
/// Verify server certificate
|
/// Verify server certificate
|
||||||
Verify,
|
Verify,
|
||||||
/// Mode (ldap, ldap+starttls or ldaps),
|
/// Mode (ldap, ldap+starttls or ldaps),
|
||||||
@ -252,6 +258,9 @@ pub fn update_ldap_realm(
|
|||||||
DeletableProperty::Comment => {
|
DeletableProperty::Comment => {
|
||||||
config.comment = None;
|
config.comment = None;
|
||||||
}
|
}
|
||||||
|
DeletableProperty::Default => {
|
||||||
|
config.default = None;
|
||||||
|
}
|
||||||
DeletableProperty::Port => {
|
DeletableProperty::Port => {
|
||||||
config.port = None;
|
config.port = None;
|
||||||
}
|
}
|
||||||
@ -312,6 +321,13 @@ pub fn update_ldap_realm(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if let Some(true) = update.default {
|
||||||
|
domains::unset_default_realm(&mut domains)?;
|
||||||
|
config.default = Some(true);
|
||||||
|
} else {
|
||||||
|
config.default = None;
|
||||||
|
}
|
||||||
|
|
||||||
if let Some(mode) = update.mode {
|
if let Some(mode) = update.mode {
|
||||||
config.mode = Some(mode);
|
config.mode = Some(mode);
|
||||||
}
|
}
|
||||||
|
@ -5,10 +5,14 @@ use proxmox_sortable_macro::sortable;
|
|||||||
pub mod ad;
|
pub mod ad;
|
||||||
pub mod ldap;
|
pub mod ldap;
|
||||||
pub mod openid;
|
pub mod openid;
|
||||||
|
pub mod pam;
|
||||||
|
pub mod pbs;
|
||||||
pub mod tfa;
|
pub mod tfa;
|
||||||
|
|
||||||
#[sortable]
|
#[sortable]
|
||||||
const SUBDIRS: SubdirMap = &sorted!([
|
const SUBDIRS: SubdirMap = &sorted!([
|
||||||
|
("pam", &pam::ROUTER),
|
||||||
|
("pbs", &pbs::ROUTER),
|
||||||
("ad", &ad::ROUTER),
|
("ad", &ad::ROUTER),
|
||||||
("ldap", &ldap::ROUTER),
|
("ldap", &ldap::ROUTER),
|
||||||
("openid", &openid::ROUTER),
|
("openid", &openid::ROUTER),
|
||||||
|
@ -65,6 +65,10 @@ pub fn create_openid_realm(config: OpenIdRealmConfig) -> Result<(), Error> {
|
|||||||
param_bail!("realm", "realm '{}' already exists.", config.realm);
|
param_bail!("realm", "realm '{}' already exists.", config.realm);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if let Some(true) = config.default {
|
||||||
|
domains::unset_default_realm(&mut domains)?;
|
||||||
|
}
|
||||||
|
|
||||||
domains.set_data(&config.realm, "openid", &config)?;
|
domains.set_data(&config.realm, "openid", &config)?;
|
||||||
|
|
||||||
domains::save_config(&domains)?;
|
domains::save_config(&domains)?;
|
||||||
@ -149,6 +153,8 @@ pub enum DeletableProperty {
|
|||||||
ClientKey,
|
ClientKey,
|
||||||
/// Delete the comment property.
|
/// Delete the comment property.
|
||||||
Comment,
|
Comment,
|
||||||
|
/// Delete the default property.
|
||||||
|
Default,
|
||||||
/// Delete the autocreate property
|
/// Delete the autocreate property
|
||||||
Autocreate,
|
Autocreate,
|
||||||
/// Delete the scopes property
|
/// Delete the scopes property
|
||||||
@ -217,6 +223,9 @@ pub fn update_openid_realm(
|
|||||||
DeletableProperty::Comment => {
|
DeletableProperty::Comment => {
|
||||||
config.comment = None;
|
config.comment = None;
|
||||||
}
|
}
|
||||||
|
DeletableProperty::Default => {
|
||||||
|
config.default = None;
|
||||||
|
}
|
||||||
DeletableProperty::Autocreate => {
|
DeletableProperty::Autocreate => {
|
||||||
config.autocreate = None;
|
config.autocreate = None;
|
||||||
}
|
}
|
||||||
@ -242,6 +251,13 @@ pub fn update_openid_realm(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if let Some(true) = update.default {
|
||||||
|
domains::unset_default_realm(&mut domains)?;
|
||||||
|
config.default = Some(true);
|
||||||
|
} else {
|
||||||
|
config.default = None;
|
||||||
|
}
|
||||||
|
|
||||||
if let Some(issuer_url) = update.issuer_url {
|
if let Some(issuer_url) = update.issuer_url {
|
||||||
config.issuer_url = issuer_url;
|
config.issuer_url = issuer_url;
|
||||||
}
|
}
|
||||||
|
130
src/api2/config/access/pam.rs
Normal file
130
src/api2/config/access/pam.rs
Normal file
@ -0,0 +1,130 @@
|
|||||||
|
use ::serde::{Deserialize, Serialize};
|
||||||
|
use anyhow::Error;
|
||||||
|
use hex::FromHex;
|
||||||
|
|
||||||
|
use proxmox_router::{Permission, Router, RpcEnvironment};
|
||||||
|
use proxmox_schema::api;
|
||||||
|
|
||||||
|
use pbs_api_types::{
|
||||||
|
PamRealmConfig, PamRealmConfigUpdater, PRIV_REALM_ALLOCATE, PRIV_SYS_AUDIT,
|
||||||
|
PROXMOX_CONFIG_DIGEST_SCHEMA,
|
||||||
|
};
|
||||||
|
|
||||||
|
use pbs_config::domains;
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
returns: {
|
||||||
|
type: PamRealmConfig,
|
||||||
|
},
|
||||||
|
access: {
|
||||||
|
permission: &Permission::Privilege(&["access", "domains"], PRIV_SYS_AUDIT, false),
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Read the PAM realm configuration
|
||||||
|
pub fn read_pam_realm(rpcenv: &mut dyn RpcEnvironment) -> Result<PamRealmConfig, Error> {
|
||||||
|
let (domains, digest) = domains::config()?;
|
||||||
|
|
||||||
|
let config = domains.lookup("pam", "pam")?;
|
||||||
|
|
||||||
|
rpcenv["digest"] = hex::encode(digest).into();
|
||||||
|
|
||||||
|
Ok(config)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api]
|
||||||
|
#[derive(Serialize, Deserialize)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
/// Deletable property name
|
||||||
|
pub enum DeletableProperty {
|
||||||
|
/// Delete the comment property.
|
||||||
|
Comment,
|
||||||
|
/// Delete the default property.
|
||||||
|
Default,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
protected: true,
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
update: {
|
||||||
|
type: PamRealmConfigUpdater,
|
||||||
|
flatten: true,
|
||||||
|
},
|
||||||
|
delete: {
|
||||||
|
description: "List of properties to delete.",
|
||||||
|
type: Array,
|
||||||
|
optional: true,
|
||||||
|
items: {
|
||||||
|
type: DeletableProperty,
|
||||||
|
}
|
||||||
|
},
|
||||||
|
digest: {
|
||||||
|
optional: true,
|
||||||
|
schema: PROXMOX_CONFIG_DIGEST_SCHEMA,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
returns: {
|
||||||
|
type: PamRealmConfig,
|
||||||
|
},
|
||||||
|
access: {
|
||||||
|
permission: &Permission::Privilege(&["access", "domains"], PRIV_REALM_ALLOCATE, false),
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Update the PAM realm configuration
|
||||||
|
pub fn update_pam_realm(
|
||||||
|
update: PamRealmConfigUpdater,
|
||||||
|
delete: Option<Vec<DeletableProperty>>,
|
||||||
|
digest: Option<String>,
|
||||||
|
_rpcenv: &mut dyn RpcEnvironment,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
let _lock = domains::lock_config()?;
|
||||||
|
|
||||||
|
let (mut domains, expected_digest) = domains::config()?;
|
||||||
|
|
||||||
|
if let Some(ref digest) = digest {
|
||||||
|
let digest = <[u8; 32]>::from_hex(digest)?;
|
||||||
|
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut config: PamRealmConfig = domains.lookup("pam", "pam")?;
|
||||||
|
|
||||||
|
if let Some(delete) = delete {
|
||||||
|
for delete_prop in delete {
|
||||||
|
match delete_prop {
|
||||||
|
DeletableProperty::Comment => {
|
||||||
|
config.comment = None;
|
||||||
|
}
|
||||||
|
DeletableProperty::Default => {
|
||||||
|
config.default = None;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(comment) = update.comment {
|
||||||
|
let comment = comment.trim().to_string();
|
||||||
|
if comment.is_empty() {
|
||||||
|
config.comment = None;
|
||||||
|
} else {
|
||||||
|
config.comment = Some(comment);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(true) = update.default {
|
||||||
|
pbs_config::domains::unset_default_realm(&mut domains)?;
|
||||||
|
config.default = Some(true);
|
||||||
|
} else {
|
||||||
|
config.default = None;
|
||||||
|
}
|
||||||
|
|
||||||
|
domains.set_data("pam", "pam", &config)?;
|
||||||
|
|
||||||
|
domains::save_config(&domains)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub const ROUTER: Router = Router::new()
|
||||||
|
.get(&API_METHOD_READ_PAM_REALM)
|
||||||
|
.put(&API_METHOD_UPDATE_PAM_REALM);
|
130
src/api2/config/access/pbs.rs
Normal file
130
src/api2/config/access/pbs.rs
Normal file
@ -0,0 +1,130 @@
|
|||||||
|
use ::serde::{Deserialize, Serialize};
|
||||||
|
use anyhow::Error;
|
||||||
|
use hex::FromHex;
|
||||||
|
|
||||||
|
use proxmox_router::{Permission, Router, RpcEnvironment};
|
||||||
|
use proxmox_schema::api;
|
||||||
|
|
||||||
|
use pbs_api_types::{
|
||||||
|
PbsRealmConfig, PbsRealmConfigUpdater, PRIV_REALM_ALLOCATE, PRIV_SYS_AUDIT,
|
||||||
|
PROXMOX_CONFIG_DIGEST_SCHEMA,
|
||||||
|
};
|
||||||
|
|
||||||
|
use pbs_config::domains;
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
returns: {
|
||||||
|
type: PbsRealmConfig,
|
||||||
|
},
|
||||||
|
access: {
|
||||||
|
permission: &Permission::Privilege(&["access", "domains"], PRIV_SYS_AUDIT, false),
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Read the Proxmox Backup authentication server realm configuration
|
||||||
|
pub fn read_pbs_realm(rpcenv: &mut dyn RpcEnvironment) -> Result<PbsRealmConfig, Error> {
|
||||||
|
let (domains, digest) = domains::config()?;
|
||||||
|
|
||||||
|
let config = domains.lookup("pbs", "pbs")?;
|
||||||
|
|
||||||
|
rpcenv["digest"] = hex::encode(digest).into();
|
||||||
|
|
||||||
|
Ok(config)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api]
|
||||||
|
#[derive(Serialize, Deserialize)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
/// Deletable property name
|
||||||
|
pub enum DeletableProperty {
|
||||||
|
/// Delete the comment property.
|
||||||
|
Comment,
|
||||||
|
/// Delete the default property.
|
||||||
|
Default,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
protected: true,
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
update: {
|
||||||
|
type: PbsRealmConfigUpdater,
|
||||||
|
flatten: true,
|
||||||
|
},
|
||||||
|
delete: {
|
||||||
|
description: "List of properties to delete.",
|
||||||
|
type: Array,
|
||||||
|
optional: true,
|
||||||
|
items: {
|
||||||
|
type: DeletableProperty,
|
||||||
|
}
|
||||||
|
},
|
||||||
|
digest: {
|
||||||
|
optional: true,
|
||||||
|
schema: PROXMOX_CONFIG_DIGEST_SCHEMA,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
returns: {
|
||||||
|
type: PbsRealmConfig,
|
||||||
|
},
|
||||||
|
access: {
|
||||||
|
permission: &Permission::Privilege(&["access", "domains"], PRIV_REALM_ALLOCATE, false),
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Update the Proxmox Backup authentication server realm configuration
|
||||||
|
pub fn update_pbs_realm(
|
||||||
|
update: PbsRealmConfigUpdater,
|
||||||
|
delete: Option<Vec<DeletableProperty>>,
|
||||||
|
digest: Option<String>,
|
||||||
|
_rpcenv: &mut dyn RpcEnvironment,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
let _lock = domains::lock_config()?;
|
||||||
|
|
||||||
|
let (mut domains, expected_digest) = domains::config()?;
|
||||||
|
|
||||||
|
if let Some(ref digest) = digest {
|
||||||
|
let digest = <[u8; 32]>::from_hex(digest)?;
|
||||||
|
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut config: PbsRealmConfig = domains.lookup("pbs", "pbs")?;
|
||||||
|
|
||||||
|
if let Some(delete) = delete {
|
||||||
|
for delete_prop in delete {
|
||||||
|
match delete_prop {
|
||||||
|
DeletableProperty::Comment => {
|
||||||
|
config.comment = None;
|
||||||
|
}
|
||||||
|
DeletableProperty::Default => {
|
||||||
|
config.default = None;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(comment) = update.comment {
|
||||||
|
let comment = comment.trim().to_string();
|
||||||
|
if comment.is_empty() {
|
||||||
|
config.comment = None;
|
||||||
|
} else {
|
||||||
|
config.comment = Some(comment);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(true) = update.default {
|
||||||
|
pbs_config::domains::unset_default_realm(&mut domains)?;
|
||||||
|
config.default = Some(true);
|
||||||
|
} else {
|
||||||
|
config.default = None;
|
||||||
|
}
|
||||||
|
|
||||||
|
domains.set_data("pbs", "pbs", &config)?;
|
||||||
|
|
||||||
|
domains::save_config(&domains)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub const ROUTER: Router = Router::new()
|
||||||
|
.get(&API_METHOD_READ_PBS_REALM)
|
||||||
|
.put(&API_METHOD_UPDATE_PBS_REALM);
|
@ -1,10 +1,10 @@
|
|||||||
use std::path::{Path, PathBuf};
|
use std::path::{Path, PathBuf};
|
||||||
|
|
||||||
use ::serde::{Deserialize, Serialize};
|
use ::serde::{Deserialize, Serialize};
|
||||||
use anyhow::{bail, format_err, Error};
|
use anyhow::{bail, Context, Error};
|
||||||
use hex::FromHex;
|
use hex::FromHex;
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
use tracing::warn;
|
use tracing::{info, warn};
|
||||||
|
|
||||||
use proxmox_router::{http_bail, Permission, Router, RpcEnvironment, RpcEnvironmentType};
|
use proxmox_router::{http_bail, Permission, Router, RpcEnvironment, RpcEnvironmentType};
|
||||||
use proxmox_schema::{api, param_bail, ApiType};
|
use proxmox_schema::{api, param_bail, ApiType};
|
||||||
@ -70,6 +70,30 @@ pub fn list_datastores(
|
|||||||
Ok(list.into_iter().filter(filter_by_privs).collect())
|
Ok(list.into_iter().filter(filter_by_privs).collect())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct UnmountGuard {
|
||||||
|
path: Option<PathBuf>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl UnmountGuard {
|
||||||
|
fn new(path: Option<PathBuf>) -> Self {
|
||||||
|
UnmountGuard { path }
|
||||||
|
}
|
||||||
|
|
||||||
|
fn disable(mut self) {
|
||||||
|
self.path = None;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Drop for UnmountGuard {
|
||||||
|
fn drop(&mut self) {
|
||||||
|
if let Some(path) = &self.path {
|
||||||
|
if let Err(e) = unmount_by_mountpoint(path) {
|
||||||
|
warn!("could not unmount device: {e}");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pub(crate) fn do_create_datastore(
|
pub(crate) fn do_create_datastore(
|
||||||
_lock: BackupLockGuard,
|
_lock: BackupLockGuard,
|
||||||
mut config: SectionConfigData,
|
mut config: SectionConfigData,
|
||||||
@ -87,59 +111,66 @@ pub(crate) fn do_create_datastore(
|
|||||||
param_bail!("path", err);
|
param_bail!("path", err);
|
||||||
}
|
}
|
||||||
|
|
||||||
let need_unmount = datastore.backing_device.is_some();
|
|
||||||
if need_unmount {
|
|
||||||
do_mount_device(datastore.clone())?;
|
|
||||||
};
|
|
||||||
|
|
||||||
let tuning: DatastoreTuning = serde_json::from_value(
|
let tuning: DatastoreTuning = serde_json::from_value(
|
||||||
DatastoreTuning::API_SCHEMA
|
DatastoreTuning::API_SCHEMA
|
||||||
.parse_property_string(datastore.tuning.as_deref().unwrap_or(""))?,
|
.parse_property_string(datastore.tuning.as_deref().unwrap_or(""))?,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
let res = if reuse_datastore {
|
let unmount_guard = if datastore.backing_device.is_some() {
|
||||||
ChunkStore::verify_chunkstore(&path)
|
do_mount_device(datastore.clone())?;
|
||||||
|
UnmountGuard::new(Some(path.clone()))
|
||||||
|
} else {
|
||||||
|
UnmountGuard::new(None)
|
||||||
|
};
|
||||||
|
|
||||||
|
let chunk_store = if reuse_datastore {
|
||||||
|
ChunkStore::verify_chunkstore(&path).and_then(|_| {
|
||||||
|
// Must be the only instance accessing and locking the chunk store,
|
||||||
|
// dropping will close all other locks from this process on the lockfile as well.
|
||||||
|
ChunkStore::open(
|
||||||
|
&datastore.name,
|
||||||
|
&path,
|
||||||
|
tuning.sync_level.unwrap_or_default(),
|
||||||
|
)
|
||||||
|
})?
|
||||||
} else {
|
} else {
|
||||||
let mut is_empty = true;
|
|
||||||
if let Ok(dir) = std::fs::read_dir(&path) {
|
if let Ok(dir) = std::fs::read_dir(&path) {
|
||||||
for file in dir {
|
for file in dir {
|
||||||
let name = file?.file_name();
|
let name = file?.file_name();
|
||||||
let name = name.to_str();
|
let name = name.to_str();
|
||||||
if !name.is_some_and(|name| name.starts_with('.') || name == "lost+found") {
|
if !name.is_some_and(|name| name.starts_with('.') || name == "lost+found") {
|
||||||
is_empty = false;
|
bail!("datastore path not empty");
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if is_empty {
|
let backup_user = pbs_config::backup_user()?;
|
||||||
let backup_user = pbs_config::backup_user()?;
|
ChunkStore::create(
|
||||||
ChunkStore::create(
|
&datastore.name,
|
||||||
&datastore.name,
|
path.clone(),
|
||||||
path.clone(),
|
backup_user.uid,
|
||||||
backup_user.uid,
|
backup_user.gid,
|
||||||
backup_user.gid,
|
tuning.sync_level.unwrap_or_default(),
|
||||||
tuning.sync_level.unwrap_or_default(),
|
)?
|
||||||
)
|
|
||||||
.map(|_| ())
|
|
||||||
} else {
|
|
||||||
Err(format_err!("datastore path not empty"))
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
|
|
||||||
if res.is_err() {
|
if tuning.gc_atime_safety_check.unwrap_or(true) {
|
||||||
if need_unmount {
|
chunk_store
|
||||||
if let Err(e) = unmount_by_mountpoint(&path) {
|
.check_fs_atime_updates(true)
|
||||||
warn!("could not unmount device: {e}");
|
.context("access time safety check failed")?;
|
||||||
}
|
info!("Access time update check successful.");
|
||||||
}
|
} else {
|
||||||
return res;
|
info!("Access time update check skipped.");
|
||||||
}
|
}
|
||||||
|
|
||||||
config.set_data(&datastore.name, "datastore", &datastore)?;
|
config.set_data(&datastore.name, "datastore", &datastore)?;
|
||||||
|
|
||||||
pbs_config::datastore::save_config(&config)?;
|
pbs_config::datastore::save_config(&config)?;
|
||||||
|
|
||||||
jobstate::create_state_file("garbage_collection", &datastore.name)
|
jobstate::create_state_file("garbage_collection", &datastore.name)?;
|
||||||
|
|
||||||
|
unmount_guard.disable();
|
||||||
|
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[api(
|
#[api(
|
||||||
|
@ -335,6 +335,10 @@ pub enum DeletableProperty {
|
|||||||
MaxDepth,
|
MaxDepth,
|
||||||
/// Delete the transfer_last property,
|
/// Delete the transfer_last property,
|
||||||
TransferLast,
|
TransferLast,
|
||||||
|
/// Delete the encrypted_only property,
|
||||||
|
EncryptedOnly,
|
||||||
|
/// Delete the verified_only property,
|
||||||
|
VerifiedOnly,
|
||||||
/// Delete the sync_direction property,
|
/// Delete the sync_direction property,
|
||||||
SyncDirection,
|
SyncDirection,
|
||||||
}
|
}
|
||||||
@ -448,6 +452,12 @@ pub fn update_sync_job(
|
|||||||
DeletableProperty::TransferLast => {
|
DeletableProperty::TransferLast => {
|
||||||
data.transfer_last = None;
|
data.transfer_last = None;
|
||||||
}
|
}
|
||||||
|
DeletableProperty::EncryptedOnly => {
|
||||||
|
data.encrypted_only = None;
|
||||||
|
}
|
||||||
|
DeletableProperty::VerifiedOnly => {
|
||||||
|
data.verified_only = None;
|
||||||
|
}
|
||||||
DeletableProperty::SyncDirection => {
|
DeletableProperty::SyncDirection => {
|
||||||
data.sync_direction = None;
|
data.sync_direction = None;
|
||||||
}
|
}
|
||||||
@ -491,6 +501,12 @@ pub fn update_sync_job(
|
|||||||
if let Some(resync_corrupt) = update.resync_corrupt {
|
if let Some(resync_corrupt) = update.resync_corrupt {
|
||||||
data.resync_corrupt = Some(resync_corrupt);
|
data.resync_corrupt = Some(resync_corrupt);
|
||||||
}
|
}
|
||||||
|
if let Some(encrypted_only) = update.encrypted_only {
|
||||||
|
data.encrypted_only = Some(encrypted_only);
|
||||||
|
}
|
||||||
|
if let Some(verified_only) = update.verified_only {
|
||||||
|
data.verified_only = Some(verified_only);
|
||||||
|
}
|
||||||
if let Some(sync_direction) = update.sync_direction {
|
if let Some(sync_direction) = update.sync_direction {
|
||||||
data.sync_direction = Some(sync_direction);
|
data.sync_direction = Some(sync_direction);
|
||||||
}
|
}
|
||||||
@ -665,6 +681,8 @@ acl:1:/remote/remote1/remotestore1:write@pbs:RemoteSyncOperator
|
|||||||
schedule: None,
|
schedule: None,
|
||||||
limit: pbs_api_types::RateLimitConfig::default(), // no limit
|
limit: pbs_api_types::RateLimitConfig::default(), // no limit
|
||||||
transfer_last: None,
|
transfer_last: None,
|
||||||
|
encrypted_only: None,
|
||||||
|
verified_only: None,
|
||||||
sync_direction: None, // use default
|
sync_direction: None, // use default
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -140,6 +140,8 @@ pub enum DeletableProperty {
|
|||||||
MaxDepth,
|
MaxDepth,
|
||||||
/// Delete the 'ns' property
|
/// Delete the 'ns' property
|
||||||
Ns,
|
Ns,
|
||||||
|
/// Delete the 'worker-threads' property
|
||||||
|
WorkerThreads,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[api(
|
#[api(
|
||||||
@ -222,6 +224,9 @@ pub fn update_tape_backup_job(
|
|||||||
DeletableProperty::Ns => {
|
DeletableProperty::Ns => {
|
||||||
data.setup.ns = None;
|
data.setup.ns = None;
|
||||||
}
|
}
|
||||||
|
DeletableProperty::WorkerThreads => {
|
||||||
|
data.setup.worker_threads = None;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -260,6 +265,9 @@ pub fn update_tape_backup_job(
|
|||||||
if update.setup.max_depth.is_some() {
|
if update.setup.max_depth.is_some() {
|
||||||
data.setup.max_depth = update.setup.max_depth;
|
data.setup.max_depth = update.setup.max_depth;
|
||||||
}
|
}
|
||||||
|
if update.setup.worker_threads.is_some() {
|
||||||
|
data.setup.worker_threads = update.setup.worker_threads;
|
||||||
|
}
|
||||||
|
|
||||||
let schedule_changed = data.schedule != update.schedule;
|
let schedule_changed = data.schedule != update.schedule;
|
||||||
if update.schedule.is_some() {
|
if update.schedule.is_some() {
|
||||||
|
@ -10,7 +10,8 @@ use pbs_api_types::{
|
|||||||
Authid, BackupNamespace, GroupFilter, RateLimitConfig, SyncJobConfig, DATASTORE_SCHEMA,
|
Authid, BackupNamespace, GroupFilter, RateLimitConfig, SyncJobConfig, DATASTORE_SCHEMA,
|
||||||
GROUP_FILTER_LIST_SCHEMA, NS_MAX_DEPTH_REDUCED_SCHEMA, PRIV_DATASTORE_BACKUP,
|
GROUP_FILTER_LIST_SCHEMA, NS_MAX_DEPTH_REDUCED_SCHEMA, PRIV_DATASTORE_BACKUP,
|
||||||
PRIV_DATASTORE_PRUNE, PRIV_REMOTE_READ, REMOTE_ID_SCHEMA, REMOVE_VANISHED_BACKUPS_SCHEMA,
|
PRIV_DATASTORE_PRUNE, PRIV_REMOTE_READ, REMOTE_ID_SCHEMA, REMOVE_VANISHED_BACKUPS_SCHEMA,
|
||||||
RESYNC_CORRUPT_SCHEMA, TRANSFER_LAST_SCHEMA,
|
RESYNC_CORRUPT_SCHEMA, SYNC_ENCRYPTED_ONLY_SCHEMA, SYNC_VERIFIED_ONLY_SCHEMA,
|
||||||
|
TRANSFER_LAST_SCHEMA,
|
||||||
};
|
};
|
||||||
use pbs_config::CachedUserInfo;
|
use pbs_config::CachedUserInfo;
|
||||||
use proxmox_rest_server::WorkerTask;
|
use proxmox_rest_server::WorkerTask;
|
||||||
@ -87,6 +88,8 @@ impl TryFrom<&SyncJobConfig> for PullParameters {
|
|||||||
sync_job.group_filter.clone(),
|
sync_job.group_filter.clone(),
|
||||||
sync_job.limit.clone(),
|
sync_job.limit.clone(),
|
||||||
sync_job.transfer_last,
|
sync_job.transfer_last,
|
||||||
|
sync_job.encrypted_only,
|
||||||
|
sync_job.verified_only,
|
||||||
sync_job.resync_corrupt,
|
sync_job.resync_corrupt,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
@ -133,6 +136,14 @@ impl TryFrom<&SyncJobConfig> for PullParameters {
|
|||||||
schema: TRANSFER_LAST_SCHEMA,
|
schema: TRANSFER_LAST_SCHEMA,
|
||||||
optional: true,
|
optional: true,
|
||||||
},
|
},
|
||||||
|
"encrypted-only": {
|
||||||
|
schema: SYNC_ENCRYPTED_ONLY_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
"verified-only": {
|
||||||
|
schema: SYNC_VERIFIED_ONLY_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
"resync-corrupt": {
|
"resync-corrupt": {
|
||||||
schema: RESYNC_CORRUPT_SCHEMA,
|
schema: RESYNC_CORRUPT_SCHEMA,
|
||||||
optional: true,
|
optional: true,
|
||||||
@ -161,6 +172,8 @@ async fn pull(
|
|||||||
group_filter: Option<Vec<GroupFilter>>,
|
group_filter: Option<Vec<GroupFilter>>,
|
||||||
limit: RateLimitConfig,
|
limit: RateLimitConfig,
|
||||||
transfer_last: Option<usize>,
|
transfer_last: Option<usize>,
|
||||||
|
encrypted_only: Option<bool>,
|
||||||
|
verified_only: Option<bool>,
|
||||||
resync_corrupt: Option<bool>,
|
resync_corrupt: Option<bool>,
|
||||||
rpcenv: &mut dyn RpcEnvironment,
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
) -> Result<String, Error> {
|
) -> Result<String, Error> {
|
||||||
@ -199,6 +212,8 @@ async fn pull(
|
|||||||
group_filter,
|
group_filter,
|
||||||
limit,
|
limit,
|
||||||
transfer_last,
|
transfer_last,
|
||||||
|
encrypted_only,
|
||||||
|
verified_only,
|
||||||
resync_corrupt,
|
resync_corrupt,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
|
@ -5,7 +5,8 @@ use pbs_api_types::{
|
|||||||
Authid, BackupNamespace, GroupFilter, RateLimitConfig, DATASTORE_SCHEMA,
|
Authid, BackupNamespace, GroupFilter, RateLimitConfig, DATASTORE_SCHEMA,
|
||||||
GROUP_FILTER_LIST_SCHEMA, NS_MAX_DEPTH_REDUCED_SCHEMA, PRIV_DATASTORE_BACKUP,
|
GROUP_FILTER_LIST_SCHEMA, NS_MAX_DEPTH_REDUCED_SCHEMA, PRIV_DATASTORE_BACKUP,
|
||||||
PRIV_DATASTORE_READ, PRIV_REMOTE_DATASTORE_BACKUP, PRIV_REMOTE_DATASTORE_PRUNE,
|
PRIV_DATASTORE_READ, PRIV_REMOTE_DATASTORE_BACKUP, PRIV_REMOTE_DATASTORE_PRUNE,
|
||||||
REMOTE_ID_SCHEMA, REMOVE_VANISHED_BACKUPS_SCHEMA, TRANSFER_LAST_SCHEMA,
|
REMOTE_ID_SCHEMA, REMOVE_VANISHED_BACKUPS_SCHEMA, SYNC_ENCRYPTED_ONLY_SCHEMA,
|
||||||
|
SYNC_VERIFIED_ONLY_SCHEMA, TRANSFER_LAST_SCHEMA,
|
||||||
};
|
};
|
||||||
use proxmox_rest_server::WorkerTask;
|
use proxmox_rest_server::WorkerTask;
|
||||||
use proxmox_router::{Permission, Router, RpcEnvironment};
|
use proxmox_router::{Permission, Router, RpcEnvironment};
|
||||||
@ -91,6 +92,14 @@ fn check_push_privs(
|
|||||||
schema: GROUP_FILTER_LIST_SCHEMA,
|
schema: GROUP_FILTER_LIST_SCHEMA,
|
||||||
optional: true,
|
optional: true,
|
||||||
},
|
},
|
||||||
|
"encrypted-only": {
|
||||||
|
schema: SYNC_ENCRYPTED_ONLY_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
"verified-only": {
|
||||||
|
schema: SYNC_VERIFIED_ONLY_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
limit: {
|
limit: {
|
||||||
type: RateLimitConfig,
|
type: RateLimitConfig,
|
||||||
flatten: true,
|
flatten: true,
|
||||||
@ -120,6 +129,8 @@ async fn push(
|
|||||||
remove_vanished: Option<bool>,
|
remove_vanished: Option<bool>,
|
||||||
max_depth: Option<usize>,
|
max_depth: Option<usize>,
|
||||||
group_filter: Option<Vec<GroupFilter>>,
|
group_filter: Option<Vec<GroupFilter>>,
|
||||||
|
encrypted_only: Option<bool>,
|
||||||
|
verified_only: Option<bool>,
|
||||||
limit: RateLimitConfig,
|
limit: RateLimitConfig,
|
||||||
transfer_last: Option<usize>,
|
transfer_last: Option<usize>,
|
||||||
rpcenv: &mut dyn RpcEnvironment,
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
@ -149,6 +160,8 @@ async fn push(
|
|||||||
remove_vanished,
|
remove_vanished,
|
||||||
max_depth,
|
max_depth,
|
||||||
group_filter,
|
group_filter,
|
||||||
|
encrypted_only,
|
||||||
|
verified_only,
|
||||||
limit,
|
limit,
|
||||||
transfer_last,
|
transfer_last,
|
||||||
)
|
)
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
//! Backup reader/restore protocol (HTTP2 upgrade)
|
//! Backup reader/restore protocol (HTTP2 upgrade)
|
||||||
|
|
||||||
use anyhow::{bail, format_err, Error};
|
use anyhow::{bail, format_err, Context, Error};
|
||||||
use futures::*;
|
use futures::*;
|
||||||
use hex::FromHex;
|
use hex::FromHex;
|
||||||
use hyper::header::{self, HeaderValue, CONNECTION, UPGRADE};
|
use hyper::header::{self, HeaderValue, CONNECTION, UPGRADE};
|
||||||
@ -16,7 +16,6 @@ use proxmox_router::{
|
|||||||
};
|
};
|
||||||
use proxmox_schema::{BooleanSchema, ObjectSchema};
|
use proxmox_schema::{BooleanSchema, ObjectSchema};
|
||||||
use proxmox_sortable_macro::sortable;
|
use proxmox_sortable_macro::sortable;
|
||||||
use proxmox_sys::fs::lock_dir_noblock_shared;
|
|
||||||
|
|
||||||
use pbs_api_types::{
|
use pbs_api_types::{
|
||||||
ArchiveType, Authid, Operation, BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA,
|
ArchiveType, Authid, Operation, BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA,
|
||||||
@ -129,11 +128,9 @@ fn upgrade_to_backup_reader_protocol(
|
|||||||
bail!("snapshot {} does not exist.", backup_dir.dir());
|
bail!("snapshot {} does not exist.", backup_dir.dir());
|
||||||
}
|
}
|
||||||
|
|
||||||
let _guard = lock_dir_noblock_shared(
|
let _guard = backup_dir
|
||||||
&backup_dir.full_path(),
|
.lock_shared()
|
||||||
"snapshot",
|
.with_context(|| format!("while reading snapshot '{backup_dir:?}'"))?;
|
||||||
"locked by another operation",
|
|
||||||
)?;
|
|
||||||
|
|
||||||
let path = datastore.base_path();
|
let path = datastore.base_path();
|
||||||
|
|
||||||
@ -183,14 +180,12 @@ fn upgrade_to_backup_reader_protocol(
|
|||||||
let conn = hyper::upgrade::on(Request::from_parts(parts, req_body)).await?;
|
let conn = hyper::upgrade::on(Request::from_parts(parts, req_body)).await?;
|
||||||
env2.debug("protocol upgrade done");
|
env2.debug("protocol upgrade done");
|
||||||
|
|
||||||
let mut http =
|
let mut http = hyper::server::conn::http2::Builder::new(ExecInheritLogContext);
|
||||||
hyper::server::conn::Http::new().with_executor(ExecInheritLogContext);
|
|
||||||
http.http2_only(true);
|
|
||||||
// increase window size: todo - find optiomal size
|
// increase window size: todo - find optiomal size
|
||||||
let window_size = 32 * 1024 * 1024; // max = (1 << 31) - 2
|
let window_size = 32 * 1024 * 1024; // max = (1 << 31) - 2
|
||||||
http.http2_initial_stream_window_size(window_size);
|
http.initial_stream_window_size(window_size);
|
||||||
http.http2_initial_connection_window_size(window_size);
|
http.initial_connection_window_size(window_size);
|
||||||
http.http2_max_frame_size(4 * 1024 * 1024);
|
http.max_frame_size(4 * 1024 * 1024);
|
||||||
|
|
||||||
http.serve_connection(conn, service)
|
http.serve_connection(conn, service)
|
||||||
.map_err(Error::from)
|
.map_err(Error::from)
|
||||||
|
@ -387,6 +387,10 @@ fn backup_worker(
|
|||||||
ns_magic,
|
ns_magic,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
|
if let Some(threads) = setup.worker_threads {
|
||||||
|
pool_writer.set_read_thread_count(threads as usize);
|
||||||
|
}
|
||||||
|
|
||||||
let mut group_list = Vec::new();
|
let mut group_list = Vec::new();
|
||||||
let namespaces = datastore.recursive_iter_backup_ns_ok(root_namespace, setup.max_depth)?;
|
let namespaces = datastore.recursive_iter_backup_ns_ok(root_namespace, setup.max_depth)?;
|
||||||
for ns in namespaces {
|
for ns in namespaces {
|
||||||
|
@ -1,13 +1,12 @@
|
|||||||
|
use pbs_config::BackupLockGuard;
|
||||||
use std::collections::HashSet;
|
use std::collections::HashSet;
|
||||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||||
use std::sync::{Arc, Mutex};
|
use std::sync::{Arc, Mutex};
|
||||||
use std::time::Instant;
|
use std::time::Instant;
|
||||||
|
|
||||||
use anyhow::{bail, Error};
|
use anyhow::{bail, Error};
|
||||||
use nix::dir::Dir;
|
|
||||||
use tracing::{error, info, warn};
|
use tracing::{error, info, warn};
|
||||||
|
|
||||||
use proxmox_sys::fs::lock_dir_noblock_shared;
|
|
||||||
use proxmox_worker_task::WorkerTaskContext;
|
use proxmox_worker_task::WorkerTaskContext;
|
||||||
|
|
||||||
use pbs_api_types::{
|
use pbs_api_types::{
|
||||||
@ -307,11 +306,8 @@ pub fn verify_backup_dir(
|
|||||||
return Ok(true);
|
return Ok(true);
|
||||||
}
|
}
|
||||||
|
|
||||||
let snap_lock = lock_dir_noblock_shared(
|
let snap_lock = backup_dir.lock_shared();
|
||||||
&backup_dir.full_path(),
|
|
||||||
"snapshot",
|
|
||||||
"locked by another operation",
|
|
||||||
);
|
|
||||||
match snap_lock {
|
match snap_lock {
|
||||||
Ok(snap_lock) => {
|
Ok(snap_lock) => {
|
||||||
verify_backup_dir_with_lock(verify_worker, backup_dir, upid, filter, snap_lock)
|
verify_backup_dir_with_lock(verify_worker, backup_dir, upid, filter, snap_lock)
|
||||||
@ -334,7 +330,7 @@ pub fn verify_backup_dir_with_lock(
|
|||||||
backup_dir: &BackupDir,
|
backup_dir: &BackupDir,
|
||||||
upid: UPID,
|
upid: UPID,
|
||||||
filter: Option<&dyn Fn(&BackupManifest) -> bool>,
|
filter: Option<&dyn Fn(&BackupManifest) -> bool>,
|
||||||
_snap_lock: Dir,
|
_snap_lock: BackupLockGuard,
|
||||||
) -> Result<bool, Error> {
|
) -> Result<bool, Error> {
|
||||||
let datastore_name = verify_worker.datastore.name();
|
let datastore_name = verify_worker.datastore.name();
|
||||||
let backup_dir_name = backup_dir.dir();
|
let backup_dir_name = backup_dir.dir();
|
||||||
|
@ -8,7 +8,6 @@ use hyper::{Body, StatusCode};
|
|||||||
use tracing::level_filters::LevelFilter;
|
use tracing::level_filters::LevelFilter;
|
||||||
|
|
||||||
use proxmox_lang::try_block;
|
use proxmox_lang::try_block;
|
||||||
use proxmox_log::init_logger;
|
|
||||||
use proxmox_rest_server::{ApiConfig, RestServer};
|
use proxmox_rest_server::{ApiConfig, RestServer};
|
||||||
use proxmox_router::RpcEnvironmentType;
|
use proxmox_router::RpcEnvironmentType;
|
||||||
use proxmox_sys::fs::CreateOptions;
|
use proxmox_sys::fs::CreateOptions;
|
||||||
@ -41,11 +40,15 @@ fn get_index() -> Pin<Box<dyn Future<Output = Response<Body>> + Send>> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
async fn run() -> Result<(), Error> {
|
async fn run() -> Result<(), Error> {
|
||||||
init_logger("PBS_LOG", LevelFilter::INFO)?;
|
proxmox_log::Logger::from_env("PBS_LOG", LevelFilter::INFO)
|
||||||
|
.journald_on_no_workertask()
|
||||||
|
.tasklog_pbs()
|
||||||
|
.init()?;
|
||||||
|
|
||||||
config::create_configdir()?;
|
config::create_configdir()?;
|
||||||
|
|
||||||
config::update_self_signed_cert(false)?;
|
config::update_self_signed_cert(false)?;
|
||||||
|
config::update_default_realms()?;
|
||||||
|
|
||||||
proxmox_backup::server::create_run_dir()?;
|
proxmox_backup::server::create_run_dir()?;
|
||||||
proxmox_backup::server::create_state_dir()?;
|
proxmox_backup::server::create_state_dir()?;
|
||||||
@ -86,21 +89,21 @@ async fn run() -> Result<(), Error> {
|
|||||||
.default_api2_handler(&proxmox_backup::api2::ROUTER)
|
.default_api2_handler(&proxmox_backup::api2::ROUTER)
|
||||||
.enable_access_log(
|
.enable_access_log(
|
||||||
pbs_buildcfg::API_ACCESS_LOG_FN,
|
pbs_buildcfg::API_ACCESS_LOG_FN,
|
||||||
Some(dir_opts.clone()),
|
Some(dir_opts),
|
||||||
Some(file_opts.clone()),
|
Some(file_opts),
|
||||||
&mut command_sock,
|
&mut command_sock,
|
||||||
)?
|
)?
|
||||||
.enable_auth_log(
|
.enable_auth_log(
|
||||||
pbs_buildcfg::API_AUTH_LOG_FN,
|
pbs_buildcfg::API_AUTH_LOG_FN,
|
||||||
Some(dir_opts.clone()),
|
Some(dir_opts),
|
||||||
Some(file_opts.clone()),
|
Some(file_opts),
|
||||||
&mut command_sock,
|
&mut command_sock,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
let rest_server = RestServer::new(config);
|
let rest_server = RestServer::new(config);
|
||||||
proxmox_rest_server::init_worker_tasks(
|
proxmox_rest_server::init_worker_tasks(
|
||||||
pbs_buildcfg::PROXMOX_BACKUP_LOG_DIR_M!().into(),
|
pbs_buildcfg::PROXMOX_BACKUP_LOG_DIR_M!().into(),
|
||||||
file_opts.clone(),
|
file_opts,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
// http server future:
|
// http server future:
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
use proxmox_log::init_cli_logger;
|
|
||||||
use proxmox_router::{
|
use proxmox_router::{
|
||||||
cli::{run_cli_command, CliCommandMap, CliEnvironment},
|
cli::{run_cli_command, CliCommandMap, CliEnvironment},
|
||||||
RpcEnvironment,
|
RpcEnvironment,
|
||||||
@ -8,7 +7,10 @@ mod proxmox_backup_debug;
|
|||||||
use proxmox_backup_debug::*;
|
use proxmox_backup_debug::*;
|
||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
init_cli_logger("PBS_LOG", proxmox_log::LevelFilter::INFO).expect("failed to initiate logger");
|
proxmox_log::Logger::from_env("PBS_LOG", proxmox_log::LevelFilter::INFO)
|
||||||
|
.stderr()
|
||||||
|
.init()
|
||||||
|
.expect("failed to initiate logger");
|
||||||
|
|
||||||
let cmd_def = CliCommandMap::new()
|
let cmd_def = CliCommandMap::new()
|
||||||
.insert("inspect", inspect::inspect_commands())
|
.insert("inspect", inspect::inspect_commands())
|
||||||
|
@ -3,7 +3,6 @@ use std::io::{self, Write};
|
|||||||
use std::str::FromStr;
|
use std::str::FromStr;
|
||||||
|
|
||||||
use anyhow::{format_err, Error};
|
use anyhow::{format_err, Error};
|
||||||
use proxmox_log::init_cli_logger;
|
|
||||||
use serde_json::{json, Value};
|
use serde_json::{json, Value};
|
||||||
|
|
||||||
use proxmox_router::{cli::*, RpcEnvironment};
|
use proxmox_router::{cli::*, RpcEnvironment};
|
||||||
@ -14,8 +13,9 @@ use pbs_api_types::percent_encoding::percent_encode_component;
|
|||||||
use pbs_api_types::{
|
use pbs_api_types::{
|
||||||
BackupNamespace, GroupFilter, RateLimitConfig, SyncDirection, SyncJobConfig, DATASTORE_SCHEMA,
|
BackupNamespace, GroupFilter, RateLimitConfig, SyncDirection, SyncJobConfig, DATASTORE_SCHEMA,
|
||||||
GROUP_FILTER_LIST_SCHEMA, IGNORE_VERIFIED_BACKUPS_SCHEMA, NS_MAX_DEPTH_SCHEMA,
|
GROUP_FILTER_LIST_SCHEMA, IGNORE_VERIFIED_BACKUPS_SCHEMA, NS_MAX_DEPTH_SCHEMA,
|
||||||
REMOTE_ID_SCHEMA, REMOVE_VANISHED_BACKUPS_SCHEMA, RESYNC_CORRUPT_SCHEMA, TRANSFER_LAST_SCHEMA,
|
REMOTE_ID_SCHEMA, REMOVE_VANISHED_BACKUPS_SCHEMA, RESYNC_CORRUPT_SCHEMA,
|
||||||
UPID_SCHEMA, VERIFICATION_OUTDATED_AFTER_SCHEMA,
|
SYNC_ENCRYPTED_ONLY_SCHEMA, SYNC_VERIFIED_ONLY_SCHEMA, TRANSFER_LAST_SCHEMA, UPID_SCHEMA,
|
||||||
|
VERIFICATION_OUTDATED_AFTER_SCHEMA,
|
||||||
};
|
};
|
||||||
use pbs_client::{display_task_log, view_task_result};
|
use pbs_client::{display_task_log, view_task_result};
|
||||||
use pbs_config::sync;
|
use pbs_config::sync;
|
||||||
@ -308,6 +308,8 @@ async fn sync_datastore(
|
|||||||
limit: RateLimitConfig,
|
limit: RateLimitConfig,
|
||||||
transfer_last: Option<usize>,
|
transfer_last: Option<usize>,
|
||||||
resync_corrupt: Option<bool>,
|
resync_corrupt: Option<bool>,
|
||||||
|
encrypted_only: Option<bool>,
|
||||||
|
verified_only: Option<bool>,
|
||||||
param: Value,
|
param: Value,
|
||||||
sync_direction: SyncDirection,
|
sync_direction: SyncDirection,
|
||||||
) -> Result<Value, Error> {
|
) -> Result<Value, Error> {
|
||||||
@ -348,6 +350,14 @@ async fn sync_datastore(
|
|||||||
args["resync-corrupt"] = Value::from(resync);
|
args["resync-corrupt"] = Value::from(resync);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if let Some(encrypted_only) = encrypted_only {
|
||||||
|
args["encrypted-only"] = Value::from(encrypted_only);
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(verified_only) = verified_only {
|
||||||
|
args["verified-only"] = Value::from(verified_only);
|
||||||
|
}
|
||||||
|
|
||||||
let mut limit_json = json!(limit);
|
let mut limit_json = json!(limit);
|
||||||
let limit_map = limit_json
|
let limit_map = limit_json
|
||||||
.as_object_mut()
|
.as_object_mut()
|
||||||
@ -414,6 +424,14 @@ async fn sync_datastore(
|
|||||||
schema: RESYNC_CORRUPT_SCHEMA,
|
schema: RESYNC_CORRUPT_SCHEMA,
|
||||||
optional: true,
|
optional: true,
|
||||||
},
|
},
|
||||||
|
"encrypted-only": {
|
||||||
|
schema: SYNC_ENCRYPTED_ONLY_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
"verified-only": {
|
||||||
|
schema: SYNC_VERIFIED_ONLY_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
)]
|
)]
|
||||||
@ -431,6 +449,8 @@ async fn pull_datastore(
|
|||||||
limit: RateLimitConfig,
|
limit: RateLimitConfig,
|
||||||
transfer_last: Option<usize>,
|
transfer_last: Option<usize>,
|
||||||
resync_corrupt: Option<bool>,
|
resync_corrupt: Option<bool>,
|
||||||
|
encrypted_only: Option<bool>,
|
||||||
|
verified_only: Option<bool>,
|
||||||
param: Value,
|
param: Value,
|
||||||
) -> Result<Value, Error> {
|
) -> Result<Value, Error> {
|
||||||
sync_datastore(
|
sync_datastore(
|
||||||
@ -445,6 +465,8 @@ async fn pull_datastore(
|
|||||||
limit,
|
limit,
|
||||||
transfer_last,
|
transfer_last,
|
||||||
resync_corrupt,
|
resync_corrupt,
|
||||||
|
encrypted_only,
|
||||||
|
verified_only,
|
||||||
param,
|
param,
|
||||||
SyncDirection::Pull,
|
SyncDirection::Pull,
|
||||||
)
|
)
|
||||||
@ -495,6 +517,14 @@ async fn pull_datastore(
|
|||||||
schema: TRANSFER_LAST_SCHEMA,
|
schema: TRANSFER_LAST_SCHEMA,
|
||||||
optional: true,
|
optional: true,
|
||||||
},
|
},
|
||||||
|
"encrypted-only": {
|
||||||
|
schema: SYNC_ENCRYPTED_ONLY_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
"verified-only": {
|
||||||
|
schema: SYNC_VERIFIED_ONLY_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
)]
|
)]
|
||||||
@ -511,6 +541,8 @@ async fn push_datastore(
|
|||||||
group_filter: Option<Vec<GroupFilter>>,
|
group_filter: Option<Vec<GroupFilter>>,
|
||||||
limit: RateLimitConfig,
|
limit: RateLimitConfig,
|
||||||
transfer_last: Option<usize>,
|
transfer_last: Option<usize>,
|
||||||
|
encrypted_only: Option<bool>,
|
||||||
|
verified_only: Option<bool>,
|
||||||
param: Value,
|
param: Value,
|
||||||
) -> Result<Value, Error> {
|
) -> Result<Value, Error> {
|
||||||
sync_datastore(
|
sync_datastore(
|
||||||
@ -525,6 +557,8 @@ async fn push_datastore(
|
|||||||
limit,
|
limit,
|
||||||
transfer_last,
|
transfer_last,
|
||||||
None,
|
None,
|
||||||
|
encrypted_only,
|
||||||
|
verified_only,
|
||||||
param,
|
param,
|
||||||
SyncDirection::Push,
|
SyncDirection::Push,
|
||||||
)
|
)
|
||||||
@ -618,7 +652,12 @@ async fn get_versions(verbose: bool, param: Value) -> Result<Value, Error> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
async fn run() -> Result<(), Error> {
|
async fn run() -> Result<(), Error> {
|
||||||
init_cli_logger("PBS_LOG", proxmox_log::LevelFilter::INFO)?;
|
// We need to use the tasklog logger here as well, because the proxmox-backup-manager can and
|
||||||
|
// will directly execute workertasks.
|
||||||
|
proxmox_log::Logger::from_env("PBS_LOG", proxmox_log::LevelFilter::INFO)
|
||||||
|
.stderr_on_no_workertask()
|
||||||
|
.tasklog_pbs()
|
||||||
|
.init()?;
|
||||||
proxmox_backup::server::notifications::init()?;
|
proxmox_backup::server::notifications::init()?;
|
||||||
|
|
||||||
let cmd_def = CliCommandMap::new()
|
let cmd_def = CliCommandMap::new()
|
||||||
|
@ -16,7 +16,6 @@ use openssl::ssl::SslAcceptor;
|
|||||||
use serde_json::{json, Value};
|
use serde_json::{json, Value};
|
||||||
|
|
||||||
use proxmox_lang::try_block;
|
use proxmox_lang::try_block;
|
||||||
use proxmox_log::init_logger;
|
|
||||||
use proxmox_router::{RpcEnvironment, RpcEnvironmentType};
|
use proxmox_router::{RpcEnvironment, RpcEnvironmentType};
|
||||||
use proxmox_sys::fs::CreateOptions;
|
use proxmox_sys::fs::CreateOptions;
|
||||||
use proxmox_sys::logrotate::LogRotate;
|
use proxmox_sys::logrotate::LogRotate;
|
||||||
@ -179,7 +178,10 @@ async fn get_index_future(env: RestEnvironment, parts: Parts) -> Response<Body>
|
|||||||
}
|
}
|
||||||
|
|
||||||
async fn run() -> Result<(), Error> {
|
async fn run() -> Result<(), Error> {
|
||||||
init_logger("PBS_LOG", LevelFilter::INFO)?;
|
proxmox_log::Logger::from_env("PBS_LOG", LevelFilter::INFO)
|
||||||
|
.journald_on_no_workertask()
|
||||||
|
.tasklog_pbs()
|
||||||
|
.init()?;
|
||||||
|
|
||||||
proxmox_backup::auth_helpers::setup_auth_context(false);
|
proxmox_backup::auth_helpers::setup_auth_context(false);
|
||||||
proxmox_backup::server::notifications::init()?;
|
proxmox_backup::server::notifications::init()?;
|
||||||
@ -221,14 +223,14 @@ async fn run() -> Result<(), Error> {
|
|||||||
config = config
|
config = config
|
||||||
.enable_access_log(
|
.enable_access_log(
|
||||||
pbs_buildcfg::API_ACCESS_LOG_FN,
|
pbs_buildcfg::API_ACCESS_LOG_FN,
|
||||||
Some(dir_opts.clone()),
|
Some(dir_opts),
|
||||||
Some(file_opts.clone()),
|
Some(file_opts),
|
||||||
&mut command_sock,
|
&mut command_sock,
|
||||||
)?
|
)?
|
||||||
.enable_auth_log(
|
.enable_auth_log(
|
||||||
pbs_buildcfg::API_AUTH_LOG_FN,
|
pbs_buildcfg::API_AUTH_LOG_FN,
|
||||||
Some(dir_opts.clone()),
|
Some(dir_opts),
|
||||||
Some(file_opts.clone()),
|
Some(file_opts),
|
||||||
&mut command_sock,
|
&mut command_sock,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
@ -236,7 +238,7 @@ async fn run() -> Result<(), Error> {
|
|||||||
let redirector = Redirector::new();
|
let redirector = Redirector::new();
|
||||||
proxmox_rest_server::init_worker_tasks(
|
proxmox_rest_server::init_worker_tasks(
|
||||||
pbs_buildcfg::PROXMOX_BACKUP_LOG_DIR_M!().into(),
|
pbs_buildcfg::PROXMOX_BACKUP_LOG_DIR_M!().into(),
|
||||||
file_opts.clone(),
|
file_opts,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
//openssl req -x509 -newkey rsa:4096 -keyout /etc/proxmox-backup/proxy.key -out /etc/proxmox-backup/proxy.pem -nodes
|
//openssl req -x509 -newkey rsa:4096 -keyout /etc/proxmox-backup/proxy.key -out /etc/proxmox-backup/proxy.pem -nodes
|
||||||
@ -541,7 +543,7 @@ async fn schedule_datastore_garbage_collection() {
|
|||||||
Some(event_str),
|
Some(event_str),
|
||||||
false,
|
false,
|
||||||
) {
|
) {
|
||||||
eprintln!("unable to start garbage collection job on datastore {store} - {err}");
|
eprintln!("unable to start garbage collection job on datastore {store} - {err:#}");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -750,7 +752,7 @@ async fn schedule_task_log_rotate() {
|
|||||||
true,
|
true,
|
||||||
Some(max_files),
|
Some(max_files),
|
||||||
max_days,
|
max_days,
|
||||||
Some(options.clone()),
|
Some(options),
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
if has_rotated {
|
if has_rotated {
|
||||||
@ -766,7 +768,7 @@ async fn schedule_task_log_rotate() {
|
|||||||
pbs_buildcfg::API_ACCESS_LOG_FN,
|
pbs_buildcfg::API_ACCESS_LOG_FN,
|
||||||
true,
|
true,
|
||||||
Some(max_files),
|
Some(max_files),
|
||||||
Some(options.clone()),
|
Some(options),
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
if logrotate.rotate(max_size)? {
|
if logrotate.rotate(max_size)? {
|
||||||
|
@ -95,7 +95,7 @@ async fn run(rpcenv: &mut dyn RpcEnvironment) -> Result<(), Error> {
|
|||||||
.group(backup_user.gid);
|
.group(backup_user.gid);
|
||||||
proxmox_rest_server::init_worker_tasks(
|
proxmox_rest_server::init_worker_tasks(
|
||||||
pbs_buildcfg::PROXMOX_BACKUP_LOG_DIR_M!().into(),
|
pbs_buildcfg::PROXMOX_BACKUP_LOG_DIR_M!().into(),
|
||||||
file_opts.clone(),
|
file_opts,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
let mut command_sock = proxmox_daemon::command_socket::CommandSocket::new(backup_user.gid);
|
let mut command_sock = proxmox_daemon::command_socket::CommandSocket::new(backup_user.gid);
|
||||||
@ -110,13 +110,12 @@ async fn run(rpcenv: &mut dyn RpcEnvironment) -> Result<(), Error> {
|
|||||||
fn main() {
|
fn main() {
|
||||||
proxmox_backup::tools::setup_safe_path_env();
|
proxmox_backup::tools::setup_safe_path_env();
|
||||||
|
|
||||||
if let Err(err) = syslog::init(
|
// We need to use the tasklog layer here because we call a workertask.
|
||||||
syslog::Facility::LOG_DAEMON,
|
proxmox_log::Logger::from_env("PBS_LOG", proxmox_log::LevelFilter::INFO)
|
||||||
log::LevelFilter::Info,
|
.journald_on_no_workertask()
|
||||||
Some("proxmox-daily-update"),
|
.tasklog_pbs()
|
||||||
) {
|
.init()
|
||||||
eprintln!("unable to initialize syslog - {err}");
|
.expect("unable to initialize logger");
|
||||||
}
|
|
||||||
|
|
||||||
let mut rpcenv = CliEnvironment::new();
|
let mut rpcenv = CliEnvironment::new();
|
||||||
rpcenv.set_auth_id(Some(String::from("root@pam")));
|
rpcenv.set_auth_id(Some(String::from("root@pam")));
|
||||||
|
@ -5,7 +5,6 @@ use serde_json::{json, Value};
|
|||||||
|
|
||||||
use proxmox_human_byte::HumanByte;
|
use proxmox_human_byte::HumanByte;
|
||||||
use proxmox_io::ReadExt;
|
use proxmox_io::ReadExt;
|
||||||
use proxmox_log::init_cli_logger;
|
|
||||||
use proxmox_router::cli::*;
|
use proxmox_router::cli::*;
|
||||||
use proxmox_router::RpcEnvironment;
|
use proxmox_router::RpcEnvironment;
|
||||||
use proxmox_schema::api;
|
use proxmox_schema::api;
|
||||||
@ -998,7 +997,10 @@ async fn catalog_media(mut param: Value) -> Result<(), Error> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
init_cli_logger("PBS_LOG", proxmox_log::LevelFilter::INFO).expect("failed to initiate logger");
|
proxmox_log::Logger::from_env("PBS_LOG", proxmox_log::LevelFilter::INFO)
|
||||||
|
.stderr()
|
||||||
|
.init()
|
||||||
|
.expect("failed to initiate logger");
|
||||||
|
|
||||||
let cmd_def = CliCommandMap::new()
|
let cmd_def = CliCommandMap::new()
|
||||||
.insert(
|
.insert(
|
||||||
|
@ -409,8 +409,8 @@ fn inspect_device(device: String, param: Value) -> Result<(), Error> {
|
|||||||
let default_options = proxmox_sys::fs::CreateOptions::new();
|
let default_options = proxmox_sys::fs::CreateOptions::new();
|
||||||
proxmox_sys::fs::create_path(
|
proxmox_sys::fs::create_path(
|
||||||
&tmp_mount_path,
|
&tmp_mount_path,
|
||||||
Some(default_options.clone()),
|
Some(default_options),
|
||||||
Some(default_options.clone()),
|
Some(default_options),
|
||||||
)?;
|
)?;
|
||||||
let mut mount_cmd = std::process::Command::new("mount");
|
let mut mount_cmd = std::process::Command::new("mount");
|
||||||
mount_cmd.arg(device.clone());
|
mount_cmd.arg(device.clone());
|
||||||
|
@ -10,7 +10,6 @@ use pbs_tape::sg_tape::SgTape;
|
|||||||
use proxmox_backup::tape::encryption_keys::load_key;
|
use proxmox_backup::tape::encryption_keys::load_key;
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
|
|
||||||
use proxmox_log::init_cli_logger;
|
|
||||||
use proxmox_router::{cli::*, RpcEnvironment};
|
use proxmox_router::{cli::*, RpcEnvironment};
|
||||||
use proxmox_schema::api;
|
use proxmox_schema::api;
|
||||||
use proxmox_uuid::Uuid;
|
use proxmox_uuid::Uuid;
|
||||||
@ -125,7 +124,9 @@ fn set_encryption(
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn main() -> Result<(), Error> {
|
fn main() -> Result<(), Error> {
|
||||||
init_cli_logger("PBS_LOG", proxmox_log::LevelFilter::INFO)?;
|
proxmox_log::Logger::from_env("PBS_LOG", proxmox_log::LevelFilter::INFO)
|
||||||
|
.stderr()
|
||||||
|
.init()?;
|
||||||
|
|
||||||
// check if we are user root or backup
|
// check if we are user root or backup
|
||||||
let backup_uid = pbs_config::backup_user()?.uid;
|
let backup_uid = pbs_config::backup_user()?.uid;
|
||||||
|
@ -12,6 +12,7 @@ use std::path::Path;
|
|||||||
|
|
||||||
use proxmox_lang::try_block;
|
use proxmox_lang::try_block;
|
||||||
|
|
||||||
|
use pbs_api_types::{PamRealmConfig, PbsRealmConfig};
|
||||||
use pbs_buildcfg::{self, configdir};
|
use pbs_buildcfg::{self, configdir};
|
||||||
|
|
||||||
pub mod acme;
|
pub mod acme;
|
||||||
@ -194,3 +195,27 @@ pub(crate) fn set_proxy_certificate(cert_pem: &[u8], key_pem: &[u8]) -> Result<(
|
|||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn update_default_realms() -> Result<(), Error> {
|
||||||
|
let _lock = pbs_config::domains::lock_config()?;
|
||||||
|
let (mut domains, _) = pbs_config::domains::config()?;
|
||||||
|
|
||||||
|
if !pbs_config::domains::exists(&domains, "pam") {
|
||||||
|
domains.set_data(
|
||||||
|
"pam",
|
||||||
|
"pam",
|
||||||
|
PamRealmConfig {
|
||||||
|
// Setting it as default here is safe, because if we perform this
|
||||||
|
// migration, the user had not had any chance to set a custom default anyway.
|
||||||
|
default: Some(true),
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
if !pbs_config::domains::exists(&domains, "pbs") {
|
||||||
|
domains.set_data("pbs", "pbs", PbsRealmConfig::default())?;
|
||||||
|
}
|
||||||
|
|
||||||
|
pbs_config::domains::save_config(&domains)
|
||||||
|
}
|
||||||
|
@ -174,6 +174,11 @@ pub enum Translation {
|
|||||||
"description" : {
|
"description" : {
|
||||||
optional: true,
|
optional: true,
|
||||||
schema: MULTI_LINE_COMMENT_SCHEMA,
|
schema: MULTI_LINE_COMMENT_SCHEMA,
|
||||||
|
},
|
||||||
|
"consent-text" : {
|
||||||
|
optional: true,
|
||||||
|
type: String,
|
||||||
|
max_length: 64 * 1024,
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
)]
|
)]
|
||||||
|
@ -199,14 +199,15 @@ impl proxmox_tfa::api::OpenUserChallengeData for UserAccess {
|
|||||||
fn open(&self, userid: &str) -> Result<Box<dyn UserChallengeAccess>, Error> {
|
fn open(&self, userid: &str) -> Result<Box<dyn UserChallengeAccess>, Error> {
|
||||||
crate::server::create_run_dir()?;
|
crate::server::create_run_dir()?;
|
||||||
let options = CreateOptions::new().perm(Mode::from_bits_truncate(0o0600));
|
let options = CreateOptions::new().perm(Mode::from_bits_truncate(0o0600));
|
||||||
proxmox_sys::fs::create_path(CHALLENGE_DATA_PATH, Some(options.clone()), Some(options))
|
proxmox_sys::fs::create_path(CHALLENGE_DATA_PATH, Some(options), Some(options)).map_err(
|
||||||
.map_err(|err| {
|
|err| {
|
||||||
format_err!(
|
format_err!(
|
||||||
"failed to crate challenge data dir {:?}: {}",
|
"failed to crate challenge data dir {:?}: {}",
|
||||||
CHALLENGE_DATA_PATH,
|
CHALLENGE_DATA_PATH,
|
||||||
err
|
err
|
||||||
)
|
)
|
||||||
})?;
|
},
|
||||||
|
)?;
|
||||||
|
|
||||||
let path = challenge_data_path_str(userid);
|
let path = challenge_data_path_str(userid);
|
||||||
|
|
||||||
|
@ -87,7 +87,7 @@ pub fn create_jobstate_dir() -> Result<(), Error> {
|
|||||||
.owner(backup_user.uid)
|
.owner(backup_user.uid)
|
||||||
.group(backup_user.gid);
|
.group(backup_user.gid);
|
||||||
|
|
||||||
create_path(JOB_STATE_BASEDIR, Some(opts.clone()), Some(opts))
|
create_path(JOB_STATE_BASEDIR, Some(opts), Some(opts))
|
||||||
.map_err(|err: Error| format_err!("unable to create job state dir - {err}"))?;
|
.map_err(|err: Error| format_err!("unable to create job state dir - {err}"))?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
|
@ -73,9 +73,9 @@ pub fn get_all_metrics(start_time: i64) -> Result<Vec<MetricDataPoint>, Error> {
|
|||||||
|
|
||||||
let mut points = Vec::new();
|
let mut points = Vec::new();
|
||||||
|
|
||||||
for gen in cached_datapoints {
|
for generation in cached_datapoints {
|
||||||
if gen.timestamp > start_time {
|
if generation.timestamp > start_time {
|
||||||
points.extend(gen.datapoints);
|
points.extend(generation.datapoints);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -5,7 +5,6 @@ use std::time::{Duration, Instant};
|
|||||||
use anyhow::Error;
|
use anyhow::Error;
|
||||||
use const_format::concatcp;
|
use const_format::concatcp;
|
||||||
use nix::unistd::Uid;
|
use nix::unistd::Uid;
|
||||||
use serde_json::json;
|
|
||||||
|
|
||||||
use proxmox_notify::context::pbs::PBS_CONTEXT;
|
use proxmox_notify::context::pbs::PBS_CONTEXT;
|
||||||
use proxmox_schema::ApiType;
|
use proxmox_schema::ApiType;
|
||||||
@ -21,6 +20,15 @@ use proxmox_notify::{Endpoint, Notification, Severity};
|
|||||||
|
|
||||||
const SPOOL_DIR: &str = concatcp!(pbs_buildcfg::PROXMOX_BACKUP_STATE_DIR, "/notifications");
|
const SPOOL_DIR: &str = concatcp!(pbs_buildcfg::PROXMOX_BACKUP_STATE_DIR, "/notifications");
|
||||||
|
|
||||||
|
mod template_data;
|
||||||
|
|
||||||
|
use template_data::{
|
||||||
|
AcmeErrTemplateData, CommonData, GcErrTemplateData, GcOkTemplateData,
|
||||||
|
PackageUpdatesTemplateData, PruneErrTemplateData, PruneOkTemplateData, SyncErrTemplateData,
|
||||||
|
SyncOkTemplateData, TapeBackupErrTemplateData, TapeBackupOkTemplateData, TapeLoadTemplateData,
|
||||||
|
VerifyErrTemplateData, VerifyOkTemplateData,
|
||||||
|
};
|
||||||
|
|
||||||
/// Initialize the notification system by setting context in proxmox_notify
|
/// Initialize the notification system by setting context in proxmox_notify
|
||||||
pub fn init() -> Result<(), Error> {
|
pub fn init() -> Result<(), Error> {
|
||||||
proxmox_notify::context::set_context(&PBS_CONTEXT);
|
proxmox_notify::context::set_context(&PBS_CONTEXT);
|
||||||
@ -146,38 +154,32 @@ pub fn send_gc_status(
|
|||||||
status: &GarbageCollectionStatus,
|
status: &GarbageCollectionStatus,
|
||||||
result: &Result<(), Error>,
|
result: &Result<(), Error>,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
let (fqdn, port) = get_server_url();
|
|
||||||
let mut data = json!({
|
|
||||||
"datastore": datastore,
|
|
||||||
"fqdn": fqdn,
|
|
||||||
"port": port,
|
|
||||||
});
|
|
||||||
|
|
||||||
let (severity, template) = match result {
|
|
||||||
Ok(()) => {
|
|
||||||
let deduplication_factor = if status.disk_bytes > 0 {
|
|
||||||
(status.index_data_bytes as f64) / (status.disk_bytes as f64)
|
|
||||||
} else {
|
|
||||||
1.0
|
|
||||||
};
|
|
||||||
|
|
||||||
data["status"] = json!(status);
|
|
||||||
data["deduplication-factor"] = format!("{:.2}", deduplication_factor).into();
|
|
||||||
|
|
||||||
(Severity::Info, "gc-ok")
|
|
||||||
}
|
|
||||||
Err(err) => {
|
|
||||||
data["error"] = err.to_string().into();
|
|
||||||
(Severity::Error, "gc-err")
|
|
||||||
}
|
|
||||||
};
|
|
||||||
let metadata = HashMap::from([
|
let metadata = HashMap::from([
|
||||||
("datastore".into(), datastore.into()),
|
("datastore".into(), datastore.into()),
|
||||||
("hostname".into(), proxmox_sys::nodename().into()),
|
("hostname".into(), proxmox_sys::nodename().into()),
|
||||||
("type".into(), "gc".into()),
|
("type".into(), "gc".into()),
|
||||||
]);
|
]);
|
||||||
|
|
||||||
let notification = Notification::from_template(severity, template, data, metadata);
|
let notification = match result {
|
||||||
|
Ok(()) => {
|
||||||
|
let template_data = GcOkTemplateData::new(datastore.to_string(), status);
|
||||||
|
Notification::from_template(
|
||||||
|
Severity::Info,
|
||||||
|
"gc-ok",
|
||||||
|
serde_json::to_value(template_data)?,
|
||||||
|
metadata,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
Err(err) => {
|
||||||
|
let template_data = GcErrTemplateData::new(datastore.to_string(), format!("{err:#}"));
|
||||||
|
Notification::from_template(
|
||||||
|
Severity::Error,
|
||||||
|
"gc-err",
|
||||||
|
serde_json::to_value(template_data)?,
|
||||||
|
metadata,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
let (email, notify, mode) = lookup_datastore_notify_settings(datastore);
|
let (email, notify, mode) = lookup_datastore_notify_settings(datastore);
|
||||||
match mode {
|
match mode {
|
||||||
@ -204,25 +206,6 @@ pub fn send_verify_status(
|
|||||||
job: VerificationJobConfig,
|
job: VerificationJobConfig,
|
||||||
result: &Result<Vec<String>, Error>,
|
result: &Result<Vec<String>, Error>,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
let (fqdn, port) = get_server_url();
|
|
||||||
let mut data = json!({
|
|
||||||
"job": job,
|
|
||||||
"fqdn": fqdn,
|
|
||||||
"port": port,
|
|
||||||
});
|
|
||||||
|
|
||||||
let (template, severity) = match result {
|
|
||||||
Ok(errors) if errors.is_empty() => ("verify-ok", Severity::Info),
|
|
||||||
Ok(errors) => {
|
|
||||||
data["errors"] = json!(errors);
|
|
||||||
("verify-err", Severity::Error)
|
|
||||||
}
|
|
||||||
Err(_) => {
|
|
||||||
// aborted job - do not send any notification
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let metadata = HashMap::from([
|
let metadata = HashMap::from([
|
||||||
("job-id".into(), job.id.clone()),
|
("job-id".into(), job.id.clone()),
|
||||||
("datastore".into(), job.store.clone()),
|
("datastore".into(), job.store.clone()),
|
||||||
@ -230,7 +213,39 @@ pub fn send_verify_status(
|
|||||||
("type".into(), "verify".into()),
|
("type".into(), "verify".into()),
|
||||||
]);
|
]);
|
||||||
|
|
||||||
let notification = Notification::from_template(severity, template, data, metadata);
|
let notification = match result {
|
||||||
|
Err(_) => {
|
||||||
|
// aborted job - do not send any notification
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
Ok(errors) if errors.is_empty() => {
|
||||||
|
let template_data = VerifyOkTemplateData {
|
||||||
|
common: CommonData::new(),
|
||||||
|
datastore: job.store.clone(),
|
||||||
|
job_id: job.id.clone(),
|
||||||
|
};
|
||||||
|
Notification::from_template(
|
||||||
|
Severity::Info,
|
||||||
|
"verify-ok",
|
||||||
|
serde_json::to_value(template_data)?,
|
||||||
|
metadata,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
Ok(errors) => {
|
||||||
|
let template_data = VerifyErrTemplateData {
|
||||||
|
common: CommonData::new(),
|
||||||
|
datastore: job.store.clone(),
|
||||||
|
job_id: job.id.clone(),
|
||||||
|
failed_snapshot_list: errors.clone(),
|
||||||
|
};
|
||||||
|
Notification::from_template(
|
||||||
|
Severity::Error,
|
||||||
|
"verify-err",
|
||||||
|
serde_json::to_value(template_data)?,
|
||||||
|
metadata,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
let (email, notify, mode) = lookup_datastore_notify_settings(&job.store);
|
let (email, notify, mode) = lookup_datastore_notify_settings(&job.store);
|
||||||
match mode {
|
match mode {
|
||||||
@ -258,22 +273,6 @@ pub fn send_prune_status(
|
|||||||
jobname: &str,
|
jobname: &str,
|
||||||
result: &Result<(), Error>,
|
result: &Result<(), Error>,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
let (fqdn, port) = get_server_url();
|
|
||||||
let mut data = json!({
|
|
||||||
"jobname": jobname,
|
|
||||||
"store": store,
|
|
||||||
"fqdn": fqdn,
|
|
||||||
"port": port,
|
|
||||||
});
|
|
||||||
|
|
||||||
let (template, severity) = match result {
|
|
||||||
Ok(()) => ("prune-ok", Severity::Info),
|
|
||||||
Err(err) => {
|
|
||||||
data["error"] = err.to_string().into();
|
|
||||||
("prune-err", Severity::Error)
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let metadata = HashMap::from([
|
let metadata = HashMap::from([
|
||||||
("job-id".into(), jobname.to_string()),
|
("job-id".into(), jobname.to_string()),
|
||||||
("datastore".into(), store.into()),
|
("datastore".into(), store.into()),
|
||||||
@ -281,7 +280,37 @@ pub fn send_prune_status(
|
|||||||
("type".into(), "prune".into()),
|
("type".into(), "prune".into()),
|
||||||
]);
|
]);
|
||||||
|
|
||||||
let notification = Notification::from_template(severity, template, data, metadata);
|
let notification = match result {
|
||||||
|
Ok(()) => {
|
||||||
|
let template_data = PruneOkTemplateData {
|
||||||
|
common: CommonData::new(),
|
||||||
|
datastore: store.to_string(),
|
||||||
|
job_id: jobname.to_string(),
|
||||||
|
};
|
||||||
|
|
||||||
|
Notification::from_template(
|
||||||
|
Severity::Info,
|
||||||
|
"prune-ok",
|
||||||
|
serde_json::to_value(template_data)?,
|
||||||
|
metadata,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
Err(err) => {
|
||||||
|
let template_data = PruneErrTemplateData {
|
||||||
|
common: CommonData::new(),
|
||||||
|
datastore: store.to_string(),
|
||||||
|
job_id: jobname.to_string(),
|
||||||
|
error: format!("{err:#}"),
|
||||||
|
};
|
||||||
|
|
||||||
|
Notification::from_template(
|
||||||
|
Severity::Error,
|
||||||
|
"prune-err",
|
||||||
|
serde_json::to_value(template_data)?,
|
||||||
|
metadata,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
let (email, notify, mode) = lookup_datastore_notify_settings(store);
|
let (email, notify, mode) = lookup_datastore_notify_settings(store);
|
||||||
match mode {
|
match mode {
|
||||||
@ -305,21 +334,6 @@ pub fn send_prune_status(
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn send_sync_status(job: &SyncJobConfig, result: &Result<(), Error>) -> Result<(), Error> {
|
pub fn send_sync_status(job: &SyncJobConfig, result: &Result<(), Error>) -> Result<(), Error> {
|
||||||
let (fqdn, port) = get_server_url();
|
|
||||||
let mut data = json!({
|
|
||||||
"job": job,
|
|
||||||
"fqdn": fqdn,
|
|
||||||
"port": port,
|
|
||||||
});
|
|
||||||
|
|
||||||
let (template, severity) = match result {
|
|
||||||
Ok(()) => ("sync-ok", Severity::Info),
|
|
||||||
Err(err) => {
|
|
||||||
data["error"] = err.to_string().into();
|
|
||||||
("sync-err", Severity::Error)
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let metadata = HashMap::from([
|
let metadata = HashMap::from([
|
||||||
("job-id".into(), job.id.clone()),
|
("job-id".into(), job.id.clone()),
|
||||||
("datastore".into(), job.store.clone()),
|
("datastore".into(), job.store.clone()),
|
||||||
@ -327,7 +341,39 @@ pub fn send_sync_status(job: &SyncJobConfig, result: &Result<(), Error>) -> Resu
|
|||||||
("type".into(), "sync".into()),
|
("type".into(), "sync".into()),
|
||||||
]);
|
]);
|
||||||
|
|
||||||
let notification = Notification::from_template(severity, template, data, metadata);
|
let notification = match result {
|
||||||
|
Ok(()) => {
|
||||||
|
let template_data = SyncOkTemplateData {
|
||||||
|
common: CommonData::new(),
|
||||||
|
datastore: job.store.clone(),
|
||||||
|
job_id: job.id.clone(),
|
||||||
|
remote: job.remote.clone(),
|
||||||
|
remote_datastore: job.remote_store.clone(),
|
||||||
|
};
|
||||||
|
Notification::from_template(
|
||||||
|
Severity::Info,
|
||||||
|
"sync-ok",
|
||||||
|
serde_json::to_value(template_data)?,
|
||||||
|
metadata,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
Err(err) => {
|
||||||
|
let template_data = SyncErrTemplateData {
|
||||||
|
common: CommonData::new(),
|
||||||
|
datastore: job.store.clone(),
|
||||||
|
job_id: job.id.clone(),
|
||||||
|
remote: job.remote.clone(),
|
||||||
|
remote_datastore: job.remote_store.clone(),
|
||||||
|
error: format!("{err:#}"),
|
||||||
|
};
|
||||||
|
Notification::from_template(
|
||||||
|
Severity::Error,
|
||||||
|
"sync-err",
|
||||||
|
serde_json::to_value(template_data)?,
|
||||||
|
metadata,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
let (email, notify, mode) = lookup_datastore_notify_settings(&job.store);
|
let (email, notify, mode) = lookup_datastore_notify_settings(&job.store);
|
||||||
match mode {
|
match mode {
|
||||||
@ -356,26 +402,6 @@ pub fn send_tape_backup_status(
|
|||||||
result: &Result<(), Error>,
|
result: &Result<(), Error>,
|
||||||
summary: TapeBackupJobSummary,
|
summary: TapeBackupJobSummary,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
let (fqdn, port) = get_server_url();
|
|
||||||
let duration: proxmox_time::TimeSpan = summary.duration.into();
|
|
||||||
let mut data = json!({
|
|
||||||
"job": job,
|
|
||||||
"fqdn": fqdn,
|
|
||||||
"port": port,
|
|
||||||
"id": id,
|
|
||||||
"snapshot-list": summary.snapshot_list,
|
|
||||||
"used-tapes": summary.used_tapes,
|
|
||||||
"job-duration": duration.to_string(),
|
|
||||||
});
|
|
||||||
|
|
||||||
let (template, severity) = match result {
|
|
||||||
Ok(()) => ("tape-backup-ok", Severity::Info),
|
|
||||||
Err(err) => {
|
|
||||||
data["error"] = err.to_string().into();
|
|
||||||
("tape-backup-err", Severity::Error)
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let mut metadata = HashMap::from([
|
let mut metadata = HashMap::from([
|
||||||
("datastore".into(), job.store.clone()),
|
("datastore".into(), job.store.clone()),
|
||||||
("media-pool".into(), job.pool.clone()),
|
("media-pool".into(), job.pool.clone()),
|
||||||
@ -387,7 +413,49 @@ pub fn send_tape_backup_status(
|
|||||||
metadata.insert("job-id".into(), id.into());
|
metadata.insert("job-id".into(), id.into());
|
||||||
}
|
}
|
||||||
|
|
||||||
let notification = Notification::from_template(severity, template, data, metadata);
|
let duration = summary.duration.as_secs();
|
||||||
|
|
||||||
|
let notification = match result {
|
||||||
|
Ok(()) => {
|
||||||
|
let template_data = TapeBackupOkTemplateData {
|
||||||
|
common: CommonData::new(),
|
||||||
|
datastore: job.store.clone(),
|
||||||
|
job_id: id.map(|id| id.into()),
|
||||||
|
job_duration: duration,
|
||||||
|
tape_pool: job.pool.clone(),
|
||||||
|
tape_drive: job.drive.clone(),
|
||||||
|
used_tapes_list: summary.used_tapes.unwrap_or_default(),
|
||||||
|
snapshot_list: summary.snapshot_list,
|
||||||
|
};
|
||||||
|
|
||||||
|
Notification::from_template(
|
||||||
|
Severity::Info,
|
||||||
|
"tape-backup-ok",
|
||||||
|
serde_json::to_value(template_data)?,
|
||||||
|
metadata,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
Err(err) => {
|
||||||
|
let template_data = TapeBackupErrTemplateData {
|
||||||
|
common: CommonData::new(),
|
||||||
|
datastore: job.store.clone(),
|
||||||
|
job_id: id.map(|id| id.into()),
|
||||||
|
job_duration: duration,
|
||||||
|
tape_pool: job.pool.clone(),
|
||||||
|
tape_drive: job.drive.clone(),
|
||||||
|
used_tapes_list: summary.used_tapes.unwrap_or_default(),
|
||||||
|
snapshot_list: summary.snapshot_list,
|
||||||
|
error: format!("{err:#}"),
|
||||||
|
};
|
||||||
|
|
||||||
|
Notification::from_template(
|
||||||
|
Severity::Error,
|
||||||
|
"tape-backup-err",
|
||||||
|
serde_json::to_value(template_data)?,
|
||||||
|
metadata,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
let mode = TapeNotificationMode::from(job);
|
let mode = TapeNotificationMode::from(job);
|
||||||
|
|
||||||
@ -415,21 +483,28 @@ pub fn send_load_media_notification(
|
|||||||
label_text: &str,
|
label_text: &str,
|
||||||
reason: Option<String>,
|
reason: Option<String>,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
let device_type = if changer { "changer" } else { "drive" };
|
|
||||||
|
|
||||||
let data = json!({
|
|
||||||
"device-type": device_type,
|
|
||||||
"device": device,
|
|
||||||
"label-text": label_text,
|
|
||||||
"reason": reason,
|
|
||||||
"is-changer": changer,
|
|
||||||
});
|
|
||||||
|
|
||||||
let metadata = HashMap::from([
|
let metadata = HashMap::from([
|
||||||
("hostname".into(), proxmox_sys::nodename().into()),
|
("hostname".into(), proxmox_sys::nodename().into()),
|
||||||
("type".into(), "tape-load".into()),
|
("type".into(), "tape-load".into()),
|
||||||
]);
|
]);
|
||||||
let notification = Notification::from_template(Severity::Notice, "tape-load", data, metadata);
|
|
||||||
|
let device_type = if changer { "changer" } else { "drive" };
|
||||||
|
|
||||||
|
let template_data = TapeLoadTemplateData {
|
||||||
|
common: CommonData::new(),
|
||||||
|
load_reason: reason,
|
||||||
|
tape_drive: device.into(),
|
||||||
|
drive_type: device_type.into(),
|
||||||
|
drive_is_changer: changer,
|
||||||
|
tape_label: label_text.into(),
|
||||||
|
};
|
||||||
|
|
||||||
|
let notification = Notification::from_template(
|
||||||
|
Severity::Notice,
|
||||||
|
"tape-load",
|
||||||
|
serde_json::to_value(template_data)?,
|
||||||
|
metadata,
|
||||||
|
);
|
||||||
|
|
||||||
match mode {
|
match mode {
|
||||||
TapeNotificationMode::LegacySendmail { notify_user } => {
|
TapeNotificationMode::LegacySendmail { notify_user } => {
|
||||||
@ -447,42 +522,22 @@ pub fn send_load_media_notification(
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_server_url() -> (String, usize) {
|
|
||||||
// user will surely request that they can change this
|
|
||||||
|
|
||||||
let nodename = proxmox_sys::nodename();
|
|
||||||
let mut fqdn = nodename.to_owned();
|
|
||||||
|
|
||||||
if let Ok(resolv_conf) = crate::api2::node::dns::read_etc_resolv_conf() {
|
|
||||||
if let Some(search) = resolv_conf["search"].as_str() {
|
|
||||||
fqdn.push('.');
|
|
||||||
fqdn.push_str(search);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let port = 8007;
|
|
||||||
|
|
||||||
(fqdn, port)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn send_updates_available(updates: &[&APTUpdateInfo]) -> Result<(), Error> {
|
pub fn send_updates_available(updates: &[&APTUpdateInfo]) -> Result<(), Error> {
|
||||||
let (fqdn, port) = get_server_url();
|
|
||||||
let hostname = proxmox_sys::nodename().to_string();
|
let hostname = proxmox_sys::nodename().to_string();
|
||||||
|
|
||||||
let data = json!({
|
|
||||||
"fqdn": fqdn,
|
|
||||||
"hostname": &hostname,
|
|
||||||
"port": port,
|
|
||||||
"updates": updates,
|
|
||||||
});
|
|
||||||
|
|
||||||
let metadata = HashMap::from([
|
let metadata = HashMap::from([
|
||||||
("hostname".into(), hostname),
|
("hostname".into(), hostname),
|
||||||
("type".into(), "package-updates".into()),
|
("type".into(), "package-updates".into()),
|
||||||
]);
|
]);
|
||||||
|
|
||||||
let notification =
|
let template_data = PackageUpdatesTemplateData::new(updates);
|
||||||
Notification::from_template(Severity::Info, "package-updates", data, metadata);
|
|
||||||
|
let notification = Notification::from_template(
|
||||||
|
Severity::Info,
|
||||||
|
"package-updates",
|
||||||
|
serde_json::to_value(template_data)?,
|
||||||
|
metadata,
|
||||||
|
);
|
||||||
|
|
||||||
send_notification(notification)?;
|
send_notification(notification)?;
|
||||||
Ok(())
|
Ok(())
|
||||||
@ -491,24 +546,26 @@ pub fn send_updates_available(updates: &[&APTUpdateInfo]) -> Result<(), Error> {
|
|||||||
/// send email on certificate renewal failure.
|
/// send email on certificate renewal failure.
|
||||||
pub fn send_certificate_renewal_mail(result: &Result<(), Error>) -> Result<(), Error> {
|
pub fn send_certificate_renewal_mail(result: &Result<(), Error>) -> Result<(), Error> {
|
||||||
let error: String = match result {
|
let error: String = match result {
|
||||||
Err(e) => e.to_string(),
|
Err(e) => format!("{e:#}"),
|
||||||
_ => return Ok(()),
|
_ => return Ok(()),
|
||||||
};
|
};
|
||||||
|
|
||||||
let (fqdn, port) = get_server_url();
|
|
||||||
|
|
||||||
let data = json!({
|
|
||||||
"fqdn": fqdn,
|
|
||||||
"port": port,
|
|
||||||
"error": error,
|
|
||||||
});
|
|
||||||
|
|
||||||
let metadata = HashMap::from([
|
let metadata = HashMap::from([
|
||||||
("hostname".into(), proxmox_sys::nodename().into()),
|
("hostname".into(), proxmox_sys::nodename().into()),
|
||||||
("type".into(), "acme".into()),
|
("type".into(), "acme".into()),
|
||||||
]);
|
]);
|
||||||
|
|
||||||
let notification = Notification::from_template(Severity::Info, "acme-err", data, metadata);
|
let template_data = AcmeErrTemplateData {
|
||||||
|
common: CommonData::new(),
|
||||||
|
error,
|
||||||
|
};
|
||||||
|
|
||||||
|
let notification = Notification::from_template(
|
||||||
|
Severity::Info,
|
||||||
|
"acme-err",
|
||||||
|
serde_json::to_value(template_data)?,
|
||||||
|
metadata,
|
||||||
|
);
|
||||||
|
|
||||||
send_notification(notification)?;
|
send_notification(notification)?;
|
||||||
Ok(())
|
Ok(())
|
344
src/server/notifications/template_data.rs
Normal file
344
src/server/notifications/template_data.rs
Normal file
@ -0,0 +1,344 @@
|
|||||||
|
use pbs_api_types::{APTUpdateInfo, GarbageCollectionStatus};
|
||||||
|
use serde::Serialize;
|
||||||
|
|
||||||
|
// NOTE: For some of these types, the `XyzOkTemplateData` and `XyzErrTemplateData`
|
||||||
|
// types are almost identical except for the `error` member.
|
||||||
|
// While at first glance I might make sense
|
||||||
|
// to consolidate the two and make `error` an `Option`, I would argue
|
||||||
|
// that it is actually quite nice to have a single, distinct type for
|
||||||
|
// each template. This makes it 100% clear which params are accessible
|
||||||
|
// for every single template, at the cost of some boilerplate code.
|
||||||
|
|
||||||
|
/// Template data which should be available in *all* notifications.
|
||||||
|
/// The fields of this struct will be flattened into the individual
|
||||||
|
/// *TemplateData structs.
|
||||||
|
#[derive(Serialize)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
pub struct CommonData {
|
||||||
|
/// The hostname of the PBS host.
|
||||||
|
pub hostname: String,
|
||||||
|
/// The FQDN of the PBS host.
|
||||||
|
pub fqdn: String,
|
||||||
|
/// The base URL for building links to the web interface.
|
||||||
|
pub base_url: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl CommonData {
|
||||||
|
pub fn new() -> CommonData {
|
||||||
|
let nodename = proxmox_sys::nodename();
|
||||||
|
let mut fqdn = nodename.to_owned();
|
||||||
|
|
||||||
|
if let Ok(resolv_conf) = crate::api2::node::dns::read_etc_resolv_conf() {
|
||||||
|
if let Some(search) = resolv_conf["search"].as_str() {
|
||||||
|
fqdn.push('.');
|
||||||
|
fqdn.push_str(search);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: Some users might want to be able to override this.
|
||||||
|
let base_url = format!("https://{fqdn}:8007");
|
||||||
|
|
||||||
|
CommonData {
|
||||||
|
hostname: nodename.into(),
|
||||||
|
fqdn,
|
||||||
|
base_url,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Template data for the gc-ok template.
|
||||||
|
#[derive(Serialize)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
pub struct GcOkTemplateData {
|
||||||
|
/// Common properties.
|
||||||
|
#[serde(flatten)]
|
||||||
|
pub common: CommonData,
|
||||||
|
/// The datastore.
|
||||||
|
pub datastore: String,
|
||||||
|
/// The task's UPID.
|
||||||
|
pub upid: Option<String>,
|
||||||
|
/// Number of processed index files.
|
||||||
|
pub index_file_count: usize,
|
||||||
|
/// Sum of bytes referred by index files.
|
||||||
|
pub index_data_bytes: u64,
|
||||||
|
/// Bytes used on disk.
|
||||||
|
pub disk_bytes: u64,
|
||||||
|
/// Chunks used on disk.
|
||||||
|
pub disk_chunks: usize,
|
||||||
|
/// Sum of removed bytes.
|
||||||
|
pub removed_bytes: u64,
|
||||||
|
/// Number of removed chunks.
|
||||||
|
pub removed_chunks: usize,
|
||||||
|
/// Sum of pending bytes (pending removal - kept for safety).
|
||||||
|
pub pending_bytes: u64,
|
||||||
|
/// Number of pending chunks (pending removal - kept for safety).
|
||||||
|
pub pending_chunks: usize,
|
||||||
|
/// Number of chunks marked as .bad by verify that have been removed by GC.
|
||||||
|
pub removed_bad: usize,
|
||||||
|
/// Number of chunks still marked as .bad after garbage collection.
|
||||||
|
pub still_bad: usize,
|
||||||
|
/// Factor of deduplication.
|
||||||
|
pub deduplication_factor: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl GcOkTemplateData {
|
||||||
|
/// Create new a new instance.
|
||||||
|
pub fn new(datastore: String, status: &GarbageCollectionStatus) -> Self {
|
||||||
|
let deduplication_factor = if status.disk_bytes > 0 {
|
||||||
|
(status.index_data_bytes as f64) / (status.disk_bytes as f64)
|
||||||
|
} else {
|
||||||
|
1.0
|
||||||
|
};
|
||||||
|
let deduplication_factor = format!("{:.2}", deduplication_factor);
|
||||||
|
|
||||||
|
Self {
|
||||||
|
common: CommonData::new(),
|
||||||
|
datastore,
|
||||||
|
upid: status.upid.clone(),
|
||||||
|
index_file_count: status.index_file_count,
|
||||||
|
index_data_bytes: status.index_data_bytes,
|
||||||
|
disk_bytes: status.disk_bytes,
|
||||||
|
disk_chunks: status.disk_chunks,
|
||||||
|
removed_bytes: status.removed_bytes,
|
||||||
|
removed_chunks: status.removed_chunks,
|
||||||
|
pending_bytes: status.pending_bytes,
|
||||||
|
pending_chunks: status.pending_chunks,
|
||||||
|
removed_bad: status.removed_bad,
|
||||||
|
still_bad: status.still_bad,
|
||||||
|
deduplication_factor,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Template data for the gc-err template.
|
||||||
|
#[derive(Serialize)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
pub struct GcErrTemplateData {
|
||||||
|
/// Common properties.
|
||||||
|
#[serde(flatten)]
|
||||||
|
pub common: CommonData,
|
||||||
|
/// The datastore.
|
||||||
|
pub datastore: String,
|
||||||
|
/// The error that occured during the GC job.
|
||||||
|
pub error: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl GcErrTemplateData {
|
||||||
|
/// Create new a new instance.
|
||||||
|
pub fn new(datastore: String, error: String) -> Self {
|
||||||
|
Self {
|
||||||
|
common: CommonData::new(),
|
||||||
|
datastore,
|
||||||
|
error,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Template data for the acme-err template.
|
||||||
|
#[derive(Serialize)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
pub struct AcmeErrTemplateData {
|
||||||
|
/// Common properties.
|
||||||
|
#[serde(flatten)]
|
||||||
|
pub common: CommonData,
|
||||||
|
/// The error that occured when trying to request the certificate.
|
||||||
|
pub error: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
/// A single package which can be upgraded.
|
||||||
|
pub struct UpgradablePackage {
|
||||||
|
/// The name of the package.
|
||||||
|
package_name: String,
|
||||||
|
/// The new version which can be installed.
|
||||||
|
available_version: String,
|
||||||
|
/// The currently installed version.
|
||||||
|
installed_version: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Template data for the package-updates template.
|
||||||
|
#[derive(Serialize)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
pub struct PackageUpdatesTemplateData {
|
||||||
|
/// Common properties.
|
||||||
|
#[serde(flatten)]
|
||||||
|
pub common: CommonData,
|
||||||
|
pub available_updates: Vec<UpgradablePackage>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl PackageUpdatesTemplateData {
|
||||||
|
/// Create new a new instance.
|
||||||
|
pub fn new(updates: &[&APTUpdateInfo]) -> Self {
|
||||||
|
Self {
|
||||||
|
common: CommonData::new(),
|
||||||
|
available_updates: updates
|
||||||
|
.iter()
|
||||||
|
.map(|info| UpgradablePackage {
|
||||||
|
package_name: info.package.clone(),
|
||||||
|
available_version: info.version.clone(),
|
||||||
|
installed_version: info.old_version.clone(),
|
||||||
|
})
|
||||||
|
.collect(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Template data for the prune-ok template.
|
||||||
|
#[derive(Serialize)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
pub struct PruneOkTemplateData {
|
||||||
|
/// Common properties.
|
||||||
|
#[serde(flatten)]
|
||||||
|
pub common: CommonData,
|
||||||
|
/// The datastore.
|
||||||
|
pub datastore: String,
|
||||||
|
/// The ID of the job.
|
||||||
|
pub job_id: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Template data for the prune-err template.
|
||||||
|
#[derive(Serialize)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
pub struct PruneErrTemplateData {
|
||||||
|
/// Common properties.
|
||||||
|
#[serde(flatten)]
|
||||||
|
pub common: CommonData,
|
||||||
|
/// The datastore.
|
||||||
|
pub datastore: String,
|
||||||
|
/// The ID of the job.
|
||||||
|
pub job_id: String,
|
||||||
|
/// The error that occured during the prune job.
|
||||||
|
pub error: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Template data for the sync-ok template.
|
||||||
|
#[derive(Serialize)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
pub struct SyncOkTemplateData {
|
||||||
|
/// Common properties.
|
||||||
|
#[serde(flatten)]
|
||||||
|
pub common: CommonData,
|
||||||
|
/// The datastore.
|
||||||
|
pub datastore: String,
|
||||||
|
/// The ID of the job.
|
||||||
|
pub job_id: String,
|
||||||
|
/// The remote.
|
||||||
|
pub remote: Option<String>,
|
||||||
|
/// The remote datastore we synced to/from.
|
||||||
|
pub remote_datastore: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Template data for the sync-err template.
|
||||||
|
#[derive(Serialize)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
pub struct SyncErrTemplateData {
|
||||||
|
/// Common properties.
|
||||||
|
#[serde(flatten)]
|
||||||
|
pub common: CommonData,
|
||||||
|
/// The datastore.
|
||||||
|
pub datastore: String,
|
||||||
|
/// The ID of the job.
|
||||||
|
pub job_id: String,
|
||||||
|
/// The remote.
|
||||||
|
pub remote: Option<String>,
|
||||||
|
/// The remote datastore we synced to/from.
|
||||||
|
pub remote_datastore: String,
|
||||||
|
/// The error that occurred during the sync job.
|
||||||
|
pub error: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Template data for the tape-backup-ok template.
|
||||||
|
#[derive(Serialize)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
pub struct TapeBackupOkTemplateData {
|
||||||
|
/// Common properties.
|
||||||
|
#[serde(flatten)]
|
||||||
|
pub common: CommonData,
|
||||||
|
/// The datastore that was backed up.
|
||||||
|
pub datastore: String,
|
||||||
|
/// The optional job id for this tape backup job.
|
||||||
|
pub job_id: Option<String>,
|
||||||
|
/// The total duration of the backup job in seconds.
|
||||||
|
pub job_duration: u64,
|
||||||
|
/// The tape pool.
|
||||||
|
pub tape_pool: String,
|
||||||
|
/// The name of the tape drive.
|
||||||
|
pub tape_drive: String,
|
||||||
|
/// The list of tapes which were used in this backup job.
|
||||||
|
pub used_tapes_list: Vec<String>,
|
||||||
|
/// The list of snapshots which were backed up.
|
||||||
|
pub snapshot_list: Vec<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Template data for the tape-backup-err template.
|
||||||
|
#[derive(Serialize)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
pub struct TapeBackupErrTemplateData {
|
||||||
|
/// Common properties.
|
||||||
|
#[serde(flatten)]
|
||||||
|
pub common: CommonData,
|
||||||
|
/// The datastore that was backed up.
|
||||||
|
pub datastore: String,
|
||||||
|
/// The optional job id for this tape backup job.
|
||||||
|
pub job_id: Option<String>,
|
||||||
|
/// The total duration of the backup job in seconds.
|
||||||
|
pub job_duration: u64,
|
||||||
|
/// The tape pool.
|
||||||
|
pub tape_pool: String,
|
||||||
|
/// The name of the tape drive.
|
||||||
|
pub tape_drive: String,
|
||||||
|
/// The list of tapes which were used in this backup job.
|
||||||
|
pub used_tapes_list: Vec<String>,
|
||||||
|
/// The list of snapshots which were backed up.
|
||||||
|
pub snapshot_list: Vec<String>,
|
||||||
|
/// The error that happend during the backup job.
|
||||||
|
pub error: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Template data for the tape-load template.
|
||||||
|
#[derive(Serialize)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
pub struct TapeLoadTemplateData {
|
||||||
|
/// Common properties.
|
||||||
|
#[serde(flatten)]
|
||||||
|
pub common: CommonData,
|
||||||
|
/// The reason why the tape must be loaded.
|
||||||
|
pub load_reason: Option<String>,
|
||||||
|
/// The tape drive.
|
||||||
|
pub tape_drive: String,
|
||||||
|
/// The type of the drive (changer/drive)
|
||||||
|
pub drive_type: String,
|
||||||
|
/// The drive is a tape changer.
|
||||||
|
pub drive_is_changer: bool,
|
||||||
|
/// The label of the tape.
|
||||||
|
pub tape_label: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Template data for the verify-ok template.
|
||||||
|
#[derive(Serialize)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
pub struct VerifyOkTemplateData {
|
||||||
|
/// Common properties.
|
||||||
|
#[serde(flatten)]
|
||||||
|
pub common: CommonData,
|
||||||
|
/// The datastore.
|
||||||
|
pub datastore: String,
|
||||||
|
/// The ID of the job.
|
||||||
|
pub job_id: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Template data for the verify-err template.
|
||||||
|
#[derive(Serialize)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
pub struct VerifyErrTemplateData {
|
||||||
|
/// Common properties.
|
||||||
|
#[serde(flatten)]
|
||||||
|
pub common: CommonData,
|
||||||
|
/// The datastore.
|
||||||
|
pub datastore: String,
|
||||||
|
/// The ID of the job.
|
||||||
|
pub job_id: String,
|
||||||
|
/// The list of snapshots that failed to verify.
|
||||||
|
pub failed_snapshot_list: Vec<String>,
|
||||||
|
}
|
@ -28,8 +28,8 @@ use pbs_datastore::{check_backup_owner, DataStore, StoreProgress};
|
|||||||
use pbs_tools::sha::sha256;
|
use pbs_tools::sha::sha256;
|
||||||
|
|
||||||
use super::sync::{
|
use super::sync::{
|
||||||
check_namespace_depth_limit, LocalSource, RemoteSource, RemovedVanishedStats, SkipInfo,
|
check_namespace_depth_limit, ignore_not_verified_or_encrypted, LocalSource, RemoteSource,
|
||||||
SkipReason, SyncSource, SyncSourceReader, SyncStats,
|
RemovedVanishedStats, SkipInfo, SkipReason, SyncSource, SyncSourceReader, SyncStats,
|
||||||
};
|
};
|
||||||
use crate::backup::{check_ns_modification_privs, check_ns_privs};
|
use crate::backup::{check_ns_modification_privs, check_ns_privs};
|
||||||
use crate::tools::parallel_handler::ParallelHandler;
|
use crate::tools::parallel_handler::ParallelHandler;
|
||||||
@ -55,6 +55,10 @@ pub(crate) struct PullParameters {
|
|||||||
group_filter: Vec<GroupFilter>,
|
group_filter: Vec<GroupFilter>,
|
||||||
/// How many snapshots should be transferred at most (taking the newest N snapshots)
|
/// How many snapshots should be transferred at most (taking the newest N snapshots)
|
||||||
transfer_last: Option<usize>,
|
transfer_last: Option<usize>,
|
||||||
|
/// Only sync encrypted backup snapshots
|
||||||
|
encrypted_only: bool,
|
||||||
|
/// Only sync verified backup snapshots
|
||||||
|
verified_only: bool,
|
||||||
/// Whether to re-sync corrupted snapshots
|
/// Whether to re-sync corrupted snapshots
|
||||||
resync_corrupt: bool,
|
resync_corrupt: bool,
|
||||||
}
|
}
|
||||||
@ -74,6 +78,8 @@ impl PullParameters {
|
|||||||
group_filter: Option<Vec<GroupFilter>>,
|
group_filter: Option<Vec<GroupFilter>>,
|
||||||
limit: RateLimitConfig,
|
limit: RateLimitConfig,
|
||||||
transfer_last: Option<usize>,
|
transfer_last: Option<usize>,
|
||||||
|
encrypted_only: Option<bool>,
|
||||||
|
verified_only: Option<bool>,
|
||||||
resync_corrupt: Option<bool>,
|
resync_corrupt: Option<bool>,
|
||||||
) -> Result<Self, Error> {
|
) -> Result<Self, Error> {
|
||||||
if let Some(max_depth) = max_depth {
|
if let Some(max_depth) = max_depth {
|
||||||
@ -82,6 +88,8 @@ impl PullParameters {
|
|||||||
};
|
};
|
||||||
let remove_vanished = remove_vanished.unwrap_or(false);
|
let remove_vanished = remove_vanished.unwrap_or(false);
|
||||||
let resync_corrupt = resync_corrupt.unwrap_or(false);
|
let resync_corrupt = resync_corrupt.unwrap_or(false);
|
||||||
|
let encrypted_only = encrypted_only.unwrap_or(false);
|
||||||
|
let verified_only = verified_only.unwrap_or(false);
|
||||||
|
|
||||||
let source: Arc<dyn SyncSource> = if let Some(remote) = remote {
|
let source: Arc<dyn SyncSource> = if let Some(remote) = remote {
|
||||||
let (remote_config, _digest) = pbs_config::remote::config()?;
|
let (remote_config, _digest) = pbs_config::remote::config()?;
|
||||||
@ -120,6 +128,8 @@ impl PullParameters {
|
|||||||
max_depth,
|
max_depth,
|
||||||
group_filter,
|
group_filter,
|
||||||
transfer_last,
|
transfer_last,
|
||||||
|
encrypted_only,
|
||||||
|
verified_only,
|
||||||
resync_corrupt,
|
resync_corrupt,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -334,11 +344,21 @@ async fn pull_single_archive<'a>(
|
|||||||
/// -- if not, pull it from the remote
|
/// -- if not, pull it from the remote
|
||||||
/// - Download log if not already existing
|
/// - Download log if not already existing
|
||||||
async fn pull_snapshot<'a>(
|
async fn pull_snapshot<'a>(
|
||||||
|
params: &PullParameters,
|
||||||
reader: Arc<dyn SyncSourceReader + 'a>,
|
reader: Arc<dyn SyncSourceReader + 'a>,
|
||||||
snapshot: &'a pbs_datastore::BackupDir,
|
snapshot: &'a pbs_datastore::BackupDir,
|
||||||
downloaded_chunks: Arc<Mutex<HashSet<[u8; 32]>>>,
|
downloaded_chunks: Arc<Mutex<HashSet<[u8; 32]>>>,
|
||||||
corrupt: bool,
|
corrupt: bool,
|
||||||
|
is_new: bool,
|
||||||
) -> Result<SyncStats, Error> {
|
) -> Result<SyncStats, Error> {
|
||||||
|
if is_new {
|
||||||
|
info!("sync snapshot {}", snapshot.dir());
|
||||||
|
} else if corrupt {
|
||||||
|
info!("re-sync snapshot {} due to corruption", snapshot.dir());
|
||||||
|
} else {
|
||||||
|
info!("re-sync snapshot {}", snapshot.dir());
|
||||||
|
}
|
||||||
|
|
||||||
let mut sync_stats = SyncStats::default();
|
let mut sync_stats = SyncStats::default();
|
||||||
let mut manifest_name = snapshot.full_path();
|
let mut manifest_name = snapshot.full_path();
|
||||||
manifest_name.push(MANIFEST_BLOB_NAME.as_ref());
|
manifest_name.push(MANIFEST_BLOB_NAME.as_ref());
|
||||||
@ -383,6 +403,22 @@ async fn pull_snapshot<'a>(
|
|||||||
|
|
||||||
let manifest = BackupManifest::try_from(tmp_manifest_blob)?;
|
let manifest = BackupManifest::try_from(tmp_manifest_blob)?;
|
||||||
|
|
||||||
|
if ignore_not_verified_or_encrypted(
|
||||||
|
&manifest,
|
||||||
|
snapshot.dir(),
|
||||||
|
params.verified_only,
|
||||||
|
params.encrypted_only,
|
||||||
|
) {
|
||||||
|
if is_new {
|
||||||
|
let path = snapshot.full_path();
|
||||||
|
// safe to remove as locked by caller
|
||||||
|
std::fs::remove_dir_all(&path).map_err(|err| {
|
||||||
|
format_err!("removing temporary backup snapshot {path:?} failed - {err}")
|
||||||
|
})?;
|
||||||
|
}
|
||||||
|
return Ok(sync_stats);
|
||||||
|
}
|
||||||
|
|
||||||
for item in manifest.files() {
|
for item in manifest.files() {
|
||||||
let mut path = snapshot.full_path();
|
let mut path = snapshot.full_path();
|
||||||
path.push(&item.filename);
|
path.push(&item.filename);
|
||||||
@ -447,6 +483,7 @@ async fn pull_snapshot<'a>(
|
|||||||
/// The `reader` is configured to read from the source backup directory, while the
|
/// The `reader` is configured to read from the source backup directory, while the
|
||||||
/// `snapshot` is pointing to the local datastore and target namespace.
|
/// `snapshot` is pointing to the local datastore and target namespace.
|
||||||
async fn pull_snapshot_from<'a>(
|
async fn pull_snapshot_from<'a>(
|
||||||
|
params: &PullParameters,
|
||||||
reader: Arc<dyn SyncSourceReader + 'a>,
|
reader: Arc<dyn SyncSourceReader + 'a>,
|
||||||
snapshot: &'a pbs_datastore::BackupDir,
|
snapshot: &'a pbs_datastore::BackupDir,
|
||||||
downloaded_chunks: Arc<Mutex<HashSet<[u8; 32]>>>,
|
downloaded_chunks: Arc<Mutex<HashSet<[u8; 32]>>>,
|
||||||
@ -456,11 +493,11 @@ async fn pull_snapshot_from<'a>(
|
|||||||
.datastore()
|
.datastore()
|
||||||
.create_locked_backup_dir(snapshot.backup_ns(), snapshot.as_ref())?;
|
.create_locked_backup_dir(snapshot.backup_ns(), snapshot.as_ref())?;
|
||||||
|
|
||||||
let sync_stats = if is_new {
|
let result = pull_snapshot(params, reader, snapshot, downloaded_chunks, corrupt, is_new).await;
|
||||||
info!("sync snapshot {}", snapshot.dir());
|
|
||||||
|
|
||||||
// this snapshot is new, so it can never be corrupt
|
if is_new {
|
||||||
match pull_snapshot(reader, snapshot, downloaded_chunks, false).await {
|
// Cleanup directory on error if snapshot was not present before
|
||||||
|
match result {
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
if let Err(cleanup_err) = snapshot.datastore().remove_backup_dir(
|
if let Err(cleanup_err) = snapshot.datastore().remove_backup_dir(
|
||||||
snapshot.backup_ns(),
|
snapshot.backup_ns(),
|
||||||
@ -471,21 +508,11 @@ async fn pull_snapshot_from<'a>(
|
|||||||
}
|
}
|
||||||
return Err(err);
|
return Err(err);
|
||||||
}
|
}
|
||||||
Ok(sync_stats) => {
|
Ok(_) => info!("sync snapshot {} done", snapshot.dir()),
|
||||||
info!("sync snapshot {} done", snapshot.dir());
|
|
||||||
sync_stats
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
} else {
|
}
|
||||||
if corrupt {
|
|
||||||
info!("re-sync snapshot {} due to corruption", snapshot.dir());
|
|
||||||
} else {
|
|
||||||
info!("re-sync snapshot {}", snapshot.dir());
|
|
||||||
}
|
|
||||||
pull_snapshot(reader, snapshot, downloaded_chunks, corrupt).await?
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok(sync_stats)
|
result
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Pulls a group according to `params`.
|
/// Pulls a group according to `params`.
|
||||||
@ -612,8 +639,14 @@ async fn pull_group(
|
|||||||
.source
|
.source
|
||||||
.reader(source_namespace, &from_snapshot)
|
.reader(source_namespace, &from_snapshot)
|
||||||
.await?;
|
.await?;
|
||||||
let result =
|
let result = pull_snapshot_from(
|
||||||
pull_snapshot_from(reader, &to_snapshot, downloaded_chunks.clone(), corrupt).await;
|
params,
|
||||||
|
reader,
|
||||||
|
&to_snapshot,
|
||||||
|
downloaded_chunks.clone(),
|
||||||
|
corrupt,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
|
||||||
progress.done_snapshots = pos as u64 + 1;
|
progress.done_snapshots = pos as u64 + 1;
|
||||||
info!("percentage done: {progress}");
|
info!("percentage done: {progress}");
|
||||||
@ -936,7 +969,7 @@ pub(crate) async fn pull_ns(
|
|||||||
match pull_group(params, namespace, &group, &mut progress).await {
|
match pull_group(params, namespace, &group, &mut progress).await {
|
||||||
Ok(stats) => sync_stats.add(stats),
|
Ok(stats) => sync_stats.add(stats),
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
info!("sync group {} failed - {err}", &group);
|
info!("sync group {} failed - {err:#}", &group);
|
||||||
errors = true; // do not stop here, instead continue
|
errors = true; // do not stop here, instead continue
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -26,8 +26,8 @@ use pbs_datastore::read_chunk::AsyncReadChunk;
|
|||||||
use pbs_datastore::{DataStore, StoreProgress};
|
use pbs_datastore::{DataStore, StoreProgress};
|
||||||
|
|
||||||
use super::sync::{
|
use super::sync::{
|
||||||
check_namespace_depth_limit, LocalSource, RemovedVanishedStats, SkipInfo, SkipReason,
|
check_namespace_depth_limit, ignore_not_verified_or_encrypted, LocalSource,
|
||||||
SyncSource, SyncStats,
|
RemovedVanishedStats, SkipInfo, SkipReason, SyncSource, SyncStats,
|
||||||
};
|
};
|
||||||
use crate::api2::config::remote;
|
use crate::api2::config::remote;
|
||||||
|
|
||||||
@ -73,6 +73,10 @@ pub(crate) struct PushParameters {
|
|||||||
max_depth: Option<usize>,
|
max_depth: Option<usize>,
|
||||||
/// Filters for reducing the push scope
|
/// Filters for reducing the push scope
|
||||||
group_filter: Vec<GroupFilter>,
|
group_filter: Vec<GroupFilter>,
|
||||||
|
/// Synchronize only encrypted backup snapshots
|
||||||
|
encrypted_only: bool,
|
||||||
|
/// Synchronize only verified backup snapshots
|
||||||
|
verified_only: bool,
|
||||||
/// How many snapshots should be transferred at most (taking the newest N snapshots)
|
/// How many snapshots should be transferred at most (taking the newest N snapshots)
|
||||||
transfer_last: Option<usize>,
|
transfer_last: Option<usize>,
|
||||||
}
|
}
|
||||||
@ -90,6 +94,8 @@ impl PushParameters {
|
|||||||
remove_vanished: Option<bool>,
|
remove_vanished: Option<bool>,
|
||||||
max_depth: Option<usize>,
|
max_depth: Option<usize>,
|
||||||
group_filter: Option<Vec<GroupFilter>>,
|
group_filter: Option<Vec<GroupFilter>>,
|
||||||
|
encrypted_only: Option<bool>,
|
||||||
|
verified_only: Option<bool>,
|
||||||
limit: RateLimitConfig,
|
limit: RateLimitConfig,
|
||||||
transfer_last: Option<usize>,
|
transfer_last: Option<usize>,
|
||||||
) -> Result<Self, Error> {
|
) -> Result<Self, Error> {
|
||||||
@ -98,6 +104,8 @@ impl PushParameters {
|
|||||||
remote_ns.check_max_depth(max_depth)?;
|
remote_ns.check_max_depth(max_depth)?;
|
||||||
};
|
};
|
||||||
let remove_vanished = remove_vanished.unwrap_or(false);
|
let remove_vanished = remove_vanished.unwrap_or(false);
|
||||||
|
let encrypted_only = encrypted_only.unwrap_or(false);
|
||||||
|
let verified_only = verified_only.unwrap_or(false);
|
||||||
let store = DataStore::lookup_datastore(store, Some(Operation::Read))?;
|
let store = DataStore::lookup_datastore(store, Some(Operation::Read))?;
|
||||||
|
|
||||||
if !store.namespace_exists(&ns) {
|
if !store.namespace_exists(&ns) {
|
||||||
@ -149,6 +157,8 @@ impl PushParameters {
|
|||||||
remove_vanished,
|
remove_vanished,
|
||||||
max_depth,
|
max_depth,
|
||||||
group_filter,
|
group_filter,
|
||||||
|
encrypted_only,
|
||||||
|
verified_only,
|
||||||
transfer_last,
|
transfer_last,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -800,6 +810,15 @@ pub(crate) async fn push_snapshot(
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
if ignore_not_verified_or_encrypted(
|
||||||
|
&source_manifest,
|
||||||
|
snapshot,
|
||||||
|
params.verified_only,
|
||||||
|
params.encrypted_only,
|
||||||
|
) {
|
||||||
|
return Ok(stats);
|
||||||
|
}
|
||||||
|
|
||||||
// Writer instance locks the snapshot on the remote side
|
// Writer instance locks the snapshot on the remote side
|
||||||
let backup_writer = BackupWriter::start(
|
let backup_writer = BackupWriter::start(
|
||||||
¶ms.target.client,
|
¶ms.target.client,
|
||||||
|
@ -10,6 +10,7 @@ use std::time::Duration;
|
|||||||
use anyhow::{bail, format_err, Context, Error};
|
use anyhow::{bail, format_err, Context, Error};
|
||||||
use futures::{future::FutureExt, select};
|
use futures::{future::FutureExt, select};
|
||||||
use hyper::http::StatusCode;
|
use hyper::http::StatusCode;
|
||||||
|
use pbs_config::BackupLockGuard;
|
||||||
use serde_json::json;
|
use serde_json::json;
|
||||||
use tracing::{info, warn};
|
use tracing::{info, warn};
|
||||||
|
|
||||||
@ -19,13 +20,13 @@ use proxmox_router::HttpError;
|
|||||||
|
|
||||||
use pbs_api_types::{
|
use pbs_api_types::{
|
||||||
Authid, BackupDir, BackupGroup, BackupNamespace, CryptMode, GroupListItem, SnapshotListItem,
|
Authid, BackupDir, BackupGroup, BackupNamespace, CryptMode, GroupListItem, SnapshotListItem,
|
||||||
SyncDirection, SyncJobConfig, CLIENT_LOG_BLOB_NAME, MAX_NAMESPACE_DEPTH, PRIV_DATASTORE_BACKUP,
|
SyncDirection, SyncJobConfig, VerifyState, CLIENT_LOG_BLOB_NAME, MAX_NAMESPACE_DEPTH,
|
||||||
PRIV_DATASTORE_READ,
|
PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_READ,
|
||||||
};
|
};
|
||||||
use pbs_client::{BackupReader, BackupRepository, HttpClient, RemoteChunkReader};
|
use pbs_client::{BackupReader, BackupRepository, HttpClient, RemoteChunkReader};
|
||||||
use pbs_datastore::data_blob::DataBlob;
|
use pbs_datastore::data_blob::DataBlob;
|
||||||
use pbs_datastore::read_chunk::AsyncReadChunk;
|
use pbs_datastore::read_chunk::AsyncReadChunk;
|
||||||
use pbs_datastore::{DataStore, ListNamespacesRecursive, LocalChunkReader};
|
use pbs_datastore::{BackupManifest, DataStore, ListNamespacesRecursive, LocalChunkReader};
|
||||||
|
|
||||||
use crate::backup::ListAccessibleBackupGroups;
|
use crate::backup::ListAccessibleBackupGroups;
|
||||||
use crate::server::jobstate::Job;
|
use crate::server::jobstate::Job;
|
||||||
@ -105,7 +106,7 @@ pub(crate) struct RemoteSourceReader {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) struct LocalSourceReader {
|
pub(crate) struct LocalSourceReader {
|
||||||
pub(crate) _dir_lock: Arc<Mutex<proxmox_sys::fs::DirLockGuard>>,
|
pub(crate) _dir_lock: Arc<Mutex<BackupLockGuard>>,
|
||||||
pub(crate) path: PathBuf,
|
pub(crate) path: PathBuf,
|
||||||
pub(crate) datastore: Arc<DataStore>,
|
pub(crate) datastore: Arc<DataStore>,
|
||||||
}
|
}
|
||||||
@ -478,13 +479,11 @@ impl SyncSource for LocalSource {
|
|||||||
dir: &BackupDir,
|
dir: &BackupDir,
|
||||||
) -> Result<Arc<dyn SyncSourceReader>, Error> {
|
) -> Result<Arc<dyn SyncSourceReader>, Error> {
|
||||||
let dir = self.store.backup_dir(ns.clone(), dir.clone())?;
|
let dir = self.store.backup_dir(ns.clone(), dir.clone())?;
|
||||||
let dir_lock = proxmox_sys::fs::lock_dir_noblock_shared(
|
let guard = dir
|
||||||
&dir.full_path(),
|
.lock_shared()
|
||||||
"snapshot",
|
.with_context(|| format!("while reading snapshot '{dir:?}' for a sync job"))?;
|
||||||
"locked by another operation",
|
|
||||||
)?;
|
|
||||||
Ok(Arc::new(LocalSourceReader {
|
Ok(Arc::new(LocalSourceReader {
|
||||||
_dir_lock: Arc::new(Mutex::new(dir_lock)),
|
_dir_lock: Arc::new(Mutex::new(guard)),
|
||||||
path: dir.full_path(),
|
path: dir.full_path(),
|
||||||
datastore: dir.datastore().clone(),
|
datastore: dir.datastore().clone(),
|
||||||
}))
|
}))
|
||||||
@ -672,6 +671,8 @@ pub fn do_sync_job(
|
|||||||
sync_job.remove_vanished,
|
sync_job.remove_vanished,
|
||||||
sync_job.max_depth,
|
sync_job.max_depth,
|
||||||
sync_job.group_filter.clone(),
|
sync_job.group_filter.clone(),
|
||||||
|
sync_job.encrypted_only,
|
||||||
|
sync_job.verified_only,
|
||||||
sync_job.limit.clone(),
|
sync_job.limit.clone(),
|
||||||
sync_job.transfer_last,
|
sync_job.transfer_last,
|
||||||
)
|
)
|
||||||
@ -731,3 +732,34 @@ pub fn do_sync_job(
|
|||||||
|
|
||||||
Ok(upid_str)
|
Ok(upid_str)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub(super) fn ignore_not_verified_or_encrypted(
|
||||||
|
manifest: &BackupManifest,
|
||||||
|
snapshot: &BackupDir,
|
||||||
|
verified_only: bool,
|
||||||
|
encrypted_only: bool,
|
||||||
|
) -> bool {
|
||||||
|
if verified_only {
|
||||||
|
match manifest.verify_state() {
|
||||||
|
Ok(Some(verify_state)) if verify_state.state == VerifyState::Ok => (),
|
||||||
|
_ => {
|
||||||
|
info!("Snapshot {snapshot} not verified but verified-only set, snapshot skipped");
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if encrypted_only {
|
||||||
|
// Consider only encrypted if all files in the manifest are marked as encrypted
|
||||||
|
if !manifest
|
||||||
|
.files()
|
||||||
|
.iter()
|
||||||
|
.all(|file| file.chunk_crypt_mode() == CryptMode::Encrypt)
|
||||||
|
{
|
||||||
|
info!("Snapshot {snapshot} not encrypted but encrypted-only set, snapshot skipped");
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
false
|
||||||
|
}
|
||||||
|
@ -56,6 +56,7 @@ pub struct PoolWriter {
|
|||||||
notification_mode: TapeNotificationMode,
|
notification_mode: TapeNotificationMode,
|
||||||
ns_magic: bool,
|
ns_magic: bool,
|
||||||
used_tapes: HashSet<Uuid>,
|
used_tapes: HashSet<Uuid>,
|
||||||
|
read_threads: usize,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl PoolWriter {
|
impl PoolWriter {
|
||||||
@ -93,9 +94,15 @@ impl PoolWriter {
|
|||||||
notification_mode,
|
notification_mode,
|
||||||
ns_magic,
|
ns_magic,
|
||||||
used_tapes: HashSet::new(),
|
used_tapes: HashSet::new(),
|
||||||
|
read_threads: 1,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Set the read threads to use when writing a backup to tape
|
||||||
|
pub fn set_read_thread_count(&mut self, read_threads: usize) {
|
||||||
|
self.read_threads = read_threads;
|
||||||
|
}
|
||||||
|
|
||||||
pub fn pool(&mut self) -> &mut MediaPool {
|
pub fn pool(&mut self) -> &mut MediaPool {
|
||||||
&mut self.pool
|
&mut self.pool
|
||||||
}
|
}
|
||||||
@ -541,7 +548,12 @@ impl PoolWriter {
|
|||||||
datastore: Arc<DataStore>,
|
datastore: Arc<DataStore>,
|
||||||
snapshot_reader: Arc<Mutex<SnapshotReader>>,
|
snapshot_reader: Arc<Mutex<SnapshotReader>>,
|
||||||
) -> Result<(std::thread::JoinHandle<()>, NewChunksIterator), Error> {
|
) -> Result<(std::thread::JoinHandle<()>, NewChunksIterator), Error> {
|
||||||
NewChunksIterator::spawn(datastore, snapshot_reader, Arc::clone(&self.catalog_set))
|
NewChunksIterator::spawn(
|
||||||
|
datastore,
|
||||||
|
snapshot_reader,
|
||||||
|
Arc::clone(&self.catalog_set),
|
||||||
|
self.read_threads,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn catalog_version(&self) -> [u8; 8] {
|
pub(crate) fn catalog_version(&self) -> [u8; 8] {
|
||||||
|
@ -6,8 +6,9 @@ use anyhow::{format_err, Error};
|
|||||||
use pbs_datastore::{DataBlob, DataStore, SnapshotReader};
|
use pbs_datastore::{DataBlob, DataStore, SnapshotReader};
|
||||||
|
|
||||||
use crate::tape::CatalogSet;
|
use crate::tape::CatalogSet;
|
||||||
|
use crate::tools::parallel_handler::ParallelHandler;
|
||||||
|
|
||||||
/// Chunk iterator which use a separate thread to read chunks
|
/// Chunk iterator which uses separate threads to read chunks
|
||||||
///
|
///
|
||||||
/// The iterator skips duplicate chunks and chunks already in the
|
/// The iterator skips duplicate chunks and chunks already in the
|
||||||
/// catalog.
|
/// catalog.
|
||||||
@ -24,8 +25,11 @@ impl NewChunksIterator {
|
|||||||
datastore: Arc<DataStore>,
|
datastore: Arc<DataStore>,
|
||||||
snapshot_reader: Arc<Mutex<SnapshotReader>>,
|
snapshot_reader: Arc<Mutex<SnapshotReader>>,
|
||||||
catalog_set: Arc<Mutex<CatalogSet>>,
|
catalog_set: Arc<Mutex<CatalogSet>>,
|
||||||
|
read_threads: usize,
|
||||||
) -> Result<(std::thread::JoinHandle<()>, Self), Error> {
|
) -> Result<(std::thread::JoinHandle<()>, Self), Error> {
|
||||||
let (tx, rx) = std::sync::mpsc::sync_channel(3);
|
// set the buffer size of the channel queues to twice the number of threads or 3, whichever
|
||||||
|
// is greater, to reduce the chance of a reader thread (producer) being blocked.
|
||||||
|
let (tx, rx) = std::sync::mpsc::sync_channel((read_threads * 2).max(3));
|
||||||
|
|
||||||
let reader_thread = std::thread::spawn(move || {
|
let reader_thread = std::thread::spawn(move || {
|
||||||
let snapshot_reader = snapshot_reader.lock().unwrap();
|
let snapshot_reader = snapshot_reader.lock().unwrap();
|
||||||
@ -35,36 +39,43 @@ impl NewChunksIterator {
|
|||||||
let datastore_name = snapshot_reader.datastore_name().to_string();
|
let datastore_name = snapshot_reader.datastore_name().to_string();
|
||||||
|
|
||||||
let result: Result<(), Error> = proxmox_lang::try_block!({
|
let result: Result<(), Error> = proxmox_lang::try_block!({
|
||||||
let mut chunk_iter = snapshot_reader.chunk_iterator(move |digest| {
|
let chunk_iter = snapshot_reader.chunk_iterator(move |digest| {
|
||||||
catalog_set
|
catalog_set
|
||||||
.lock()
|
.lock()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.contains_chunk(&datastore_name, digest)
|
.contains_chunk(&datastore_name, digest)
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
loop {
|
let reader_pool =
|
||||||
let digest = match chunk_iter.next() {
|
ParallelHandler::new("tape backup chunk reader pool", read_threads, {
|
||||||
None => {
|
let tx = tx.clone();
|
||||||
let _ = tx.send(Ok(None)); // ignore send error
|
move |digest| {
|
||||||
break;
|
let blob = datastore.load_chunk(&digest)?;
|
||||||
|
|
||||||
|
tx.send(Ok(Some((digest, blob)))).map_err(|err| {
|
||||||
|
format_err!("error sending result from reader thread: {err}")
|
||||||
|
})?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
Some(digest) => digest?,
|
});
|
||||||
};
|
|
||||||
|
for digest in chunk_iter {
|
||||||
|
let digest = digest?;
|
||||||
|
|
||||||
if chunk_index.contains(&digest) {
|
if chunk_index.contains(&digest) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
let blob = datastore.load_chunk(&digest)?;
|
reader_pool.send(digest)?;
|
||||||
//println!("LOAD CHUNK {}", hex::encode(&digest));
|
|
||||||
if let Err(err) = tx.send(Ok(Some((digest, blob)))) {
|
|
||||||
eprintln!("could not send chunk to reader thread: {err}");
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
chunk_index.insert(digest);
|
chunk_index.insert(digest);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
reader_pool.complete()?;
|
||||||
|
|
||||||
|
let _ = tx.send(Ok(None)); // ignore send error
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
});
|
});
|
||||||
if let Err(err) = result {
|
if let Err(err) = result {
|
||||||
|
@ -80,7 +80,7 @@ impl SharedRateLimiter {
|
|||||||
.owner(user.uid)
|
.owner(user.uid)
|
||||||
.group(user.gid);
|
.group(user.gid);
|
||||||
|
|
||||||
create_path(&path, Some(dir_opts.clone()), Some(dir_opts))?;
|
create_path(&path, Some(dir_opts), Some(dir_opts))?;
|
||||||
|
|
||||||
path.push(name);
|
path.push(name);
|
||||||
|
|
||||||
|
@ -24,7 +24,6 @@ NOTIFICATION_TEMPLATES= \
|
|||||||
default/tape-load-body.txt.hbs \
|
default/tape-load-body.txt.hbs \
|
||||||
default/tape-load-subject.txt.hbs \
|
default/tape-load-subject.txt.hbs \
|
||||||
default/test-body.txt.hbs \
|
default/test-body.txt.hbs \
|
||||||
default/test-body.html.hbs \
|
|
||||||
default/test-subject.txt.hbs \
|
default/test-subject.txt.hbs \
|
||||||
default/verify-err-body.txt.hbs \
|
default/verify-err-body.txt.hbs \
|
||||||
default/verify-ok-body.txt.hbs \
|
default/verify-ok-body.txt.hbs \
|
||||||
|
@ -4,4 +4,4 @@ Error: {{error}}
|
|||||||
|
|
||||||
Please visit the web interface for further details:
|
Please visit the web interface for further details:
|
||||||
|
|
||||||
<https://{{fqdn}}:{{port}}/#pbsCertificateConfiguration>
|
<{{base-url}}/#pbsCertificateConfiguration>
|
||||||
|
@ -5,4 +5,4 @@ Garbage collection failed: {{error}}
|
|||||||
|
|
||||||
Please visit the web interface for further details:
|
Please visit the web interface for further details:
|
||||||
|
|
||||||
<https://{{fqdn}}:{{port}}/#pbsServerAdministration:tasks>
|
<{{base-url}}/#pbsServerAdministration:tasks>
|
||||||
|
@ -1 +1 @@
|
|||||||
Garbage Collect Datastore '{{ datastore }}' failed
|
Garbage Collect Datastore '{{datastore}}' failed
|
||||||
|
@ -1,17 +1,17 @@
|
|||||||
Datastore: {{datastore}}
|
Datastore: {{datastore}}
|
||||||
Task ID: {{status.upid}}
|
Task ID: {{upid}}
|
||||||
Index file count: {{status.index-file-count}}
|
Index file count: {{index-file-count}}
|
||||||
|
|
||||||
Removed garbage: {{human-bytes status.removed-bytes}}
|
Removed garbage: {{human-bytes removed-bytes}}
|
||||||
Removed chunks: {{status.removed-chunks}}
|
Removed chunks: {{removed-chunks}}
|
||||||
Removed bad chunks: {{status.removed-bad}}
|
Removed bad chunks: {{removed-bad}}
|
||||||
|
|
||||||
Leftover bad chunks: {{status.still-bad}}
|
Leftover bad chunks: {{still-bad}}
|
||||||
Pending removals: {{human-bytes status.pending-bytes}} (in {{status.pending-chunks}} chunks)
|
Pending removals: {{human-bytes pending-bytes}} (in {{pending-chunks}} chunks)
|
||||||
|
|
||||||
Original Data usage: {{human-bytes status.index-data-bytes}}
|
Original Data usage: {{human-bytes index-data-bytes}}
|
||||||
On-Disk usage: {{human-bytes status.disk-bytes}} ({{relative-percentage status.disk-bytes status.index-data-bytes}})
|
On-Disk usage: {{human-bytes disk-bytes}} ({{relative-percentage disk-bytes index-data-bytes}})
|
||||||
On-Disk chunks: {{status.disk-chunks}}
|
On-Disk chunks: {{disk-chunks}}
|
||||||
|
|
||||||
Deduplication Factor: {{deduplication-factor}}
|
Deduplication Factor: {{deduplication-factor}}
|
||||||
|
|
||||||
@ -20,4 +20,4 @@ Garbage collection successful.
|
|||||||
|
|
||||||
Please visit the web interface for further details:
|
Please visit the web interface for further details:
|
||||||
|
|
||||||
<https://{{fqdn}}:{{port}}/#DataStore-{{datastore}}>
|
<{{base-url}}/#DataStore-{{datastore}}>
|
||||||
|
@ -1 +1 @@
|
|||||||
Garbage Collect Datastore '{{ datastore }}' successful
|
Garbage Collect Datastore '{{datastore}}' successful
|
||||||
|
@ -1,8 +1,8 @@
|
|||||||
Proxmox Backup Server has the following updates available:
|
Proxmox Backup Server has the following updates available:
|
||||||
{{#each updates }}
|
{{#each available-updates}}
|
||||||
{{Package}}: {{OldVersion}} -> {{Version~}}
|
{{this.package-name}}: {{this.installed-version}} -> {{this.available-version~}}
|
||||||
{{/each }}
|
{{/each}}
|
||||||
|
|
||||||
To upgrade visit the web interface:
|
To upgrade visit the web interface:
|
||||||
|
|
||||||
<https://{{fqdn}}:{{port}}/#pbsServerAdministration:updates>
|
<{{base-url}}/#pbsServerAdministration:updates>
|
||||||
|
@ -1 +1 @@
|
|||||||
New software packages available ({{ hostname }})
|
New software packages available ({{hostname}})
|
||||||
|
@ -1,10 +1,10 @@
|
|||||||
|
|
||||||
Job ID: {{jobname}}
|
Job ID: {{job-id}}
|
||||||
Datastore: {{store}}
|
Datastore: {{datastore}}
|
||||||
|
|
||||||
Pruning failed: {{error}}
|
Pruning failed: {{error}}
|
||||||
|
|
||||||
|
|
||||||
Please visit the web interface for further details:
|
Please visit the web interface for further details:
|
||||||
|
|
||||||
<https://{{fqdn}}:{{port}}/#pbsServerAdministration:tasks>
|
<{{base-url}}/#pbsServerAdministration:tasks>
|
||||||
|
@ -1 +1 @@
|
|||||||
Pruning datastore '{{ store }}' failed
|
Pruning datastore '{{datastore}}' failed
|
||||||
|
@ -1,10 +1,10 @@
|
|||||||
|
|
||||||
Job ID: {{jobname}}
|
Job ID: {{job-id}}
|
||||||
Datastore: {{store}}
|
Datastore: {{datastore}}
|
||||||
|
|
||||||
Pruning successful.
|
Pruning successful.
|
||||||
|
|
||||||
|
|
||||||
Please visit the web interface for further details:
|
Please visit the web interface for further details:
|
||||||
|
|
||||||
<https://{{fqdn}}:{{port}}/#DataStore-{{store}}>
|
<{{base-url}}/#DataStore-{{datastore}}>
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user