Compare commits

..

No commits in common. "master" and "v3.3.4" have entirely different histories.

133 changed files with 922 additions and 3343 deletions

View File

@ -1,5 +1,5 @@
[workspace.package] [workspace.package]
version = "3.4.1" version = "3.3.4"
authors = [ authors = [
"Dietmar Maurer <dietmar@proxmox.com>", "Dietmar Maurer <dietmar@proxmox.com>",
"Dominik Csapak <d.csapak@proxmox.com>", "Dominik Csapak <d.csapak@proxmox.com>",
@ -13,7 +13,7 @@ authors = [
edition = "2021" edition = "2021"
license = "AGPL-3" license = "AGPL-3"
repository = "https://git.proxmox.com/?p=proxmox-backup.git" repository = "https://git.proxmox.com/?p=proxmox-backup.git"
rust-version = "1.81" rust-version = "1.80"
[package] [package]
name = "proxmox-backup" name = "proxmox-backup"
@ -62,7 +62,7 @@ proxmox-compression = "0.2"
proxmox-config-digest = "0.1.0" proxmox-config-digest = "0.1.0"
proxmox-daemon = "0.1.0" proxmox-daemon = "0.1.0"
proxmox-fuse = "0.1.3" proxmox-fuse = "0.1.3"
proxmox-http = { version = "0.9.5", features = [ "client", "http-helpers", "websocket" ] } # see below proxmox-http = { version = "0.9.0", features = [ "client", "http-helpers", "websocket" ] } # see below
proxmox-human-byte = "0.1" proxmox-human-byte = "0.1"
proxmox-io = "1.0.1" # tools and client use "tokio" feature proxmox-io = "1.0.1" # tools and client use "tokio" feature
proxmox-lang = "1.1" proxmox-lang = "1.1"
@ -71,7 +71,7 @@ proxmox-ldap = "0.2.1"
proxmox-metrics = "0.3.1" proxmox-metrics = "0.3.1"
proxmox-notify = "0.5.1" proxmox-notify = "0.5.1"
proxmox-openid = "0.10.0" proxmox-openid = "0.10.0"
proxmox-rest-server = { version = "0.8.9", features = [ "templates" ] } proxmox-rest-server = { version = "0.8.5", features = [ "templates" ] }
# some use "cli", some use "cli" and "server", pbs-config uses nothing # some use "cli", some use "cli" and "server", pbs-config uses nothing
proxmox-router = { version = "3.0.0", default-features = false } proxmox-router = { version = "3.0.0", default-features = false }
proxmox-rrd = "0.4" proxmox-rrd = "0.4"
@ -84,13 +84,13 @@ proxmox-shared-cache = "0.1"
proxmox-shared-memory = "0.3.0" proxmox-shared-memory = "0.3.0"
proxmox-sortable-macro = "0.1.2" proxmox-sortable-macro = "0.1.2"
proxmox-subscription = { version = "0.5.0", features = [ "api-types" ] } proxmox-subscription = { version = "0.5.0", features = [ "api-types" ] }
proxmox-sys = "0.6.7" proxmox-sys = "0.6.5"
proxmox-systemd = "0.1" proxmox-systemd = "0.1"
proxmox-tfa = { version = "5", features = [ "api", "api-types" ] } proxmox-tfa = { version = "5", features = [ "api", "api-types" ] }
proxmox-time = "2" proxmox-time = "2"
proxmox-uuid = { version = "1", features = [ "serde" ] } proxmox-uuid = "1"
proxmox-worker-task = "0.1" proxmox-worker-task = "0.1"
pbs-api-types = "0.2.2" pbs-api-types = "0.2.0"
# other proxmox crates # other proxmox crates
pathpatterns = "0.3" pathpatterns = "0.3"
@ -120,15 +120,14 @@ crc32fast = "1"
const_format = "0.2" const_format = "0.2"
crossbeam-channel = "0.5" crossbeam-channel = "0.5"
endian_trait = { version = "0.6", features = ["arrays"] } endian_trait = { version = "0.6", features = ["arrays"] }
env_logger = "0.11" env_logger = "0.10"
flate2 = "1.0" flate2 = "1.0"
foreign-types = "0.3" foreign-types = "0.3"
futures = "0.3" futures = "0.3"
h2 = { version = "0.4", features = [ "legacy", "stream" ] } h2 = { version = "0.4", features = [ "stream" ] }
handlebars = "3.0" handlebars = "3.0"
hex = "0.4.3" hex = "0.4.3"
hickory-resolver = { version = "0.24.1", default-features = false, features = [ "system-config", "tokio-runtime" ] } hyper = { version = "0.14", features = [ "full" ] }
hyper = { version = "0.14", features = [ "backports", "deprecated", "full" ] }
libc = "0.2" libc = "0.2"
log = "0.4.17" log = "0.4.17"
nix = "0.26.1" nix = "0.26.1"
@ -142,6 +141,7 @@ regex = "1.5.5"
rustyline = "9" rustyline = "9"
serde = { version = "1.0", features = ["derive"] } serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0" serde_json = "1.0"
serde_plain = "1"
siphasher = "0.3" siphasher = "0.3"
syslog = "6" syslog = "6"
tar = "0.4" tar = "0.4"

View File

@ -1,10 +1,8 @@
include /usr/share/dpkg/default.mk include /usr/share/dpkg/default.mk
include /usr/share/rustc/architecture.mk
include defines.mk include defines.mk
PACKAGE := proxmox-backup PACKAGE := proxmox-backup
ARCH := $(DEB_BUILD_ARCH) ARCH := $(DEB_BUILD_ARCH)
export DEB_HOST_RUST_TYPE
SUBDIRS := etc www docs templates SUBDIRS := etc www docs templates
@ -38,20 +36,13 @@ SUBCRATES != cargo metadata --no-deps --format-version=1 \
| grep "$$PWD/" \ | grep "$$PWD/" \
| sed -e "s!.*$$PWD/!!g" -e 's/\#.*$$//g' -e 's/)$$//g' | sed -e "s!.*$$PWD/!!g" -e 's/\#.*$$//g' -e 's/)$$//g'
STATIC_TARGET_DIR := target/static-build
ifeq ($(BUILD_MODE), release) ifeq ($(BUILD_MODE), release)
CARGO_BUILD_ARGS += --release --target $(DEB_HOST_RUST_TYPE) CARGO_BUILD_ARGS += --release
COMPILEDIR := target/$(DEB_HOST_RUST_TYPE)/release COMPILEDIR := target/$(DEB_HOST_RUST_TYPE)/release
STATIC_COMPILEDIR := $(STATIC_TARGET_DIR)/$(DEB_HOST_RUST_TYPE)/release
else else
CARGO_BUILD_ARGS += --target $(DEB_HOST_RUST_TYPE)
COMPILEDIR := target/$(DEB_HOST_RUST_TYPE)/debug COMPILEDIR := target/$(DEB_HOST_RUST_TYPE)/debug
STATIC_COMPILEDIR := $(STATIC_TARGET_DIR)/$(DEB_HOST_RUST_TYPE)/debug
endif endif
STATIC_RUSTC_FLAGS := -C target-feature=+crt-static -L $(STATIC_COMPILEDIR)/deps-stubs/
ifeq ($(valgrind), yes) ifeq ($(valgrind), yes)
CARGO_BUILD_ARGS += --features valgrind CARGO_BUILD_ARGS += --features valgrind
endif endif
@ -61,9 +52,6 @@ CARGO ?= cargo
COMPILED_BINS := \ COMPILED_BINS := \
$(addprefix $(COMPILEDIR)/,$(USR_BIN) $(USR_SBIN) $(SERVICE_BIN) $(RESTORE_BIN)) $(addprefix $(COMPILEDIR)/,$(USR_BIN) $(USR_SBIN) $(SERVICE_BIN) $(RESTORE_BIN))
STATIC_BINS := \
$(addprefix $(STATIC_COMPILEDIR)/,proxmox-backup-client-static pxar-static)
export DEB_VERSION DEB_VERSION_UPSTREAM export DEB_VERSION DEB_VERSION_UPSTREAM
SERVER_DEB=$(PACKAGE)-server_$(DEB_VERSION)_$(ARCH).deb SERVER_DEB=$(PACKAGE)-server_$(DEB_VERSION)_$(ARCH).deb
@ -72,12 +60,10 @@ CLIENT_DEB=$(PACKAGE)-client_$(DEB_VERSION)_$(ARCH).deb
CLIENT_DBG_DEB=$(PACKAGE)-client-dbgsym_$(DEB_VERSION)_$(ARCH).deb CLIENT_DBG_DEB=$(PACKAGE)-client-dbgsym_$(DEB_VERSION)_$(ARCH).deb
RESTORE_DEB=proxmox-backup-file-restore_$(DEB_VERSION)_$(ARCH).deb RESTORE_DEB=proxmox-backup-file-restore_$(DEB_VERSION)_$(ARCH).deb
RESTORE_DBG_DEB=proxmox-backup-file-restore-dbgsym_$(DEB_VERSION)_$(ARCH).deb RESTORE_DBG_DEB=proxmox-backup-file-restore-dbgsym_$(DEB_VERSION)_$(ARCH).deb
STATIC_CLIENT_DEB=$(PACKAGE)-client-static_$(DEB_VERSION)_$(ARCH).deb
STATIC_CLIENT_DBG_DEB=$(PACKAGE)-client-static-dbgsym_$(DEB_VERSION)_$(ARCH).deb
DOC_DEB=$(PACKAGE)-docs_$(DEB_VERSION)_all.deb DOC_DEB=$(PACKAGE)-docs_$(DEB_VERSION)_all.deb
DEBS=$(SERVER_DEB) $(SERVER_DBG_DEB) $(CLIENT_DEB) $(CLIENT_DBG_DEB) \ DEBS=$(SERVER_DEB) $(SERVER_DBG_DEB) $(CLIENT_DEB) $(CLIENT_DBG_DEB) \
$(RESTORE_DEB) $(RESTORE_DBG_DEB) $(STATIC_CLIENT_DEB) $(STATIC_CLIENT_DBG_DEB) $(RESTORE_DEB) $(RESTORE_DBG_DEB)
DSC = rust-$(PACKAGE)_$(DEB_VERSION).dsc DSC = rust-$(PACKAGE)_$(DEB_VERSION).dsc
@ -85,7 +71,7 @@ DESTDIR=
tests ?= --workspace tests ?= --workspace
all: proxmox-backup-client-static $(SUBDIRS) all: $(SUBDIRS)
.PHONY: $(SUBDIRS) .PHONY: $(SUBDIRS)
$(SUBDIRS): $(SUBDIRS):
@ -155,7 +141,7 @@ clean: clean-deb
$(foreach i,$(SUBDIRS), \ $(foreach i,$(SUBDIRS), \
$(MAKE) -C $(i) clean ;) $(MAKE) -C $(i) clean ;)
$(CARGO) clean $(CARGO) clean
rm -f .do-cargo-build .do-static-cargo-build rm -f .do-cargo-build
# allows one to avoid running cargo clean when one just wants to tidy up after a package build # allows one to avoid running cargo clean when one just wants to tidy up after a package build
clean-deb: clean-deb:
@ -204,25 +190,12 @@ $(COMPILED_BINS) $(COMPILEDIR)/dump-catalog-shell-cli $(COMPILEDIR)/docgen: .do-
--bin sg-tape-cmd --bin sg-tape-cmd
touch "$@" touch "$@"
.PHONY: proxmox-backup-client-static
proxmox-backup-client-static:
rm -f .do-static-cargo-build
$(MAKE) $(STATIC_BINS)
$(STATIC_BINS): .do-static-cargo-build
.do-static-cargo-build:
mkdir -p $(STATIC_COMPILEDIR)/deps-stubs/ && \
echo '!<arch>' > $(STATIC_COMPILEDIR)/deps-stubs/libsystemd.a # workaround for to greedy linkage and proxmox-systemd
$(CARGO) rustc $(CARGO_BUILD_ARGS) --package pxar-bin --bin pxar \
--target-dir $(STATIC_TARGET_DIR) -- $(STATIC_RUSTC_FLAGS)
$(CARGO) rustc $(CARGO_BUILD_ARGS) --package proxmox-backup-client --bin proxmox-backup-client \
--target-dir $(STATIC_TARGET_DIR) -- $(STATIC_RUSTC_FLAGS)
.PHONY: lint .PHONY: lint
lint: lint:
cargo clippy -- -A clippy::all -D clippy::correctness cargo clippy -- -A clippy::all -D clippy::correctness
install: $(COMPILED_BINS) $(STATIC_BINS) install: $(COMPILED_BINS)
install -dm755 $(DESTDIR)$(BINDIR) install -dm755 $(DESTDIR)$(BINDIR)
install -dm755 $(DESTDIR)$(ZSH_COMPL_DEST) install -dm755 $(DESTDIR)$(ZSH_COMPL_DEST)
$(foreach i,$(USR_BIN), \ $(foreach i,$(USR_BIN), \
@ -241,19 +214,16 @@ install: $(COMPILED_BINS) $(STATIC_BINS)
install -m4755 -o root -g root $(COMPILEDIR)/sg-tape-cmd $(DESTDIR)$(LIBEXECDIR)/proxmox-backup/sg-tape-cmd install -m4755 -o root -g root $(COMPILEDIR)/sg-tape-cmd $(DESTDIR)$(LIBEXECDIR)/proxmox-backup/sg-tape-cmd
$(foreach i,$(SERVICE_BIN), \ $(foreach i,$(SERVICE_BIN), \
install -m755 $(COMPILEDIR)/$(i) $(DESTDIR)$(LIBEXECDIR)/proxmox-backup/ ;) install -m755 $(COMPILEDIR)/$(i) $(DESTDIR)$(LIBEXECDIR)/proxmox-backup/ ;)
install -m755 $(STATIC_COMPILEDIR)/proxmox-backup-client $(DESTDIR)$(BINDIR)/proxmox-backup-client-static
install -m755 $(STATIC_COMPILEDIR)/pxar $(DESTDIR)$(BINDIR)/pxar-static
$(MAKE) -C www install $(MAKE) -C www install
$(MAKE) -C docs install $(MAKE) -C docs install
$(MAKE) -C templates install $(MAKE) -C templates install
.PHONY: upload .PHONY: upload
upload: UPLOAD_DIST ?= $(DEB_DISTRIBUTION) upload: UPLOAD_DIST ?= $(DEB_DISTRIBUTION)
upload: $(SERVER_DEB) $(CLIENT_DEB) $(RESTORE_DEB) $(DOC_DEB) $(STATIC_CLIENT_DEB) upload: $(SERVER_DEB) $(CLIENT_DEB) $(RESTORE_DEB) $(DOC_DEB)
# check if working directory is clean # check if working directory is clean
git diff --exit-code --stat && git diff --exit-code --stat --staged git diff --exit-code --stat && git diff --exit-code --stat --staged
tar cf - $(SERVER_DEB) $(SERVER_DBG_DEB) $(DOC_DEB) $(CLIENT_DEB) $(CLIENT_DBG_DEB) \ tar cf - $(SERVER_DEB) $(SERVER_DBG_DEB) $(DOC_DEB) $(CLIENT_DEB) $(CLIENT_DBG_DEB) \
| ssh -X repoman@repo.proxmox.com upload --product pbs --dist $(UPLOAD_DIST) | ssh -X repoman@repo.proxmox.com upload --product pbs --dist $(UPLOAD_DIST)
tar cf - $(CLIENT_DEB) $(CLIENT_DBG_DEB) | ssh -X repoman@repo.proxmox.com upload --product "pve,pmg,pbs-client" --dist $(UPLOAD_DIST) tar cf - $(CLIENT_DEB) $(CLIENT_DBG_DEB) | ssh -X repoman@repo.proxmox.com upload --product "pve,pmg,pbs-client" --dist $(UPLOAD_DIST)
tar cf - $(STATIC_CLIENT_DEB) $(STATIC_CLIENT_DBG_DEB) | ssh -X repoman@repo.proxmox.com upload --product "pbs-client" --dist $(UPLOAD_DIST)
tar cf - $(RESTORE_DEB) $(RESTORE_DBG_DEB) | ssh -X repoman@repo.proxmox.com upload --product "pve" --dist $(UPLOAD_DIST) tar cf - $(RESTORE_DEB) $(RESTORE_DBG_DEB) | ssh -X repoman@repo.proxmox.com upload --product "pve" --dist $(UPLOAD_DIST)

184
debian/changelog vendored
View File

@ -1,187 +1,3 @@
rust-proxmox-backup (3.4.1-1) bookworm; urgency=medium
* ui: token view: fix typo in 'lose' and rephrase token regenerate dialog
message for more clarity.
* restrict consent-banner text length to 64 KiB.
* docs: describe the intend for the statically linked pbs client.
* api: backup: include previous snapshot name in log message.
* garbage collection: account for created/deleted index files concurrently
to GC to avoid potentially confusing log messages.
* garbage collection: fix rare race in chunk marking phase for setups doing
high frequent backups in quick succession while immediately pruning to a
single backup snapshot being left over after each such backup.
* tape: wait for calibration of LTO-9 tapes in general, not just in the
initial tape format procedure.
-- Proxmox Support Team <support@proxmox.com> Wed, 16 Apr 2025 14:45:37 +0200
rust-proxmox-backup (3.4.0-1) bookworm; urgency=medium
* fix #4788: build statically linked version of the proxmox-backup-client
package.
* ui: sync job: change the rate limit direction based on sync direction.
* docs: mention how to set the push sync jobs rate limit
* ui: set error mask: ensure that message is html-encoded to avoid visual
glitches.
* api server: increase maximal request body size fro 64 kiB to 512 kiB,
similar to a recent change for our perl based projects.
* notifications: include Content-Length header for broader compatibility in
the webhook and gotify targets.
* notifications: allow overriding notification templates.
* docs: notifications: add section about how to use custom templates
* sync: print whole error chain per group on failure for more context.
* ui: options-view: fix typo in empty-text for GC tuning option.
* memory info: use the "MemAvailable" field from '/proc/meminfo' to compute
used memory to fix overestimation of that metric and to better align with
what modern versions of tools like `free` do and to future proof against
changes in how the kernel accounts memory usage for.
* add "MemAvailable" field to ProcFsMemInfo to promote its usage over the
existing "MemFree" field, which is almost never the right choice. This new
field will be provided for external metric server.
* docs: mention different name resolution for statically linked binary.
* docs: add basic info for how to install the statically linked client.
* docs: mention new verify-only and encrypted-only flags for sync jobs.
-- Proxmox Support Team <support@proxmox.com> Wed, 09 Apr 2025 17:41:38 +0200
rust-proxmox-backup (3.3.7-1) bookworm; urgency=medium
* fix #5982: garbage collection: add a check to ensure that the underlying
file system supports and honors file access time (atime) updates.
The check is performed once on datastore creation and on start of every
garbage collection (GC) task, just to be sure. It can be disabled in the
datastore tuning options.
* garbage collection: support setting a custom access time cutoff,
overriding the default of one day and five minutes.
* ui: expose flag for GC access time support safety check and the access
time cutoff override in datastore tuning options.
* docs: describe rationale for new GC access time update check setting and
the access time cutoff check in tuning options.
* access control: add support to mark a specific authentication realm as
default selected realm for the login user interface.
* fix #4382: api: access control: remove permissions of token on deletion.
* fix #3887: api: access control: allow users to regenerate the secret of an
API token without changing any existing ACLs.
* fix #6072: sync jobs: support flags to limit sync to only encrypted and/or
verified snapshots.
* ui: datastore tuning options: expose overriding GC cache capacity so that
admins can either restrict the peak memory usage during GC or allow GC to
use more memory to reduce file system IO even for huge (multiple TiB)
referenced data in backup groups.
* ui: datastore tuning options: increase width and rework labels to provide
a tiny bit more context about what these options are.
* ui: sync job: increase edit window width to 720px to make it less cramped.
* ui: sync job: small field label casing consistency fixes.
-- Proxmox Support Team <support@proxmox.com> Sat, 05 Apr 2025 17:54:31 +0200
rust-proxmox-backup (3.3.6-1) bookworm; urgency=medium
* datastore: ignore group locking errors when removing snapshots, they
normally happen only due to old-locking, and the underlying snapshot is
deleted in any case at this point, so it's no help to confuse the user.
* api: datastore: add error message on failed removal due to old locking and
tell any admin what they can do to switch to the new locking.
* ui: only add delete parameter on token edit, not when creating tokens.
* pbs-client: allow reading fingerprint from system credential.
* docs: client: add section about system credentials integration.
-- Proxmox Support Team <support@proxmox.com> Thu, 03 Apr 2025 17:57:02 +0200
rust-proxmox-backup (3.3.5-1) bookworm; urgency=medium
* api: config: use guard for unmounting on failed datastore creation
* client: align description for backup specification to docs, using
`archive-name` and `type` over `label` and `ext`.
* client: read credentials from CREDENTIALS_DIRECTORY environment variable
following the "System and Service Credentials" specification. This allows
users to use native systemd capabilities for credential management if the
proxmox-backup-client is used in systemd units or, e.g., through a wrapper
like systemd-run.
* fix #3935: datastore/api/backup: move datastore locking to '/run' to avoid
that lock-files can block deleting backup groups or snapshots on the
datastore and to decouple locking from the underlying datastore
file-system.
* api: fix race when changing the owner of a backup-group.
* fix #3336: datastore: remove group if the last snapshot is removed to
avoid confusing situations where the group directory still exists and
blocks re-creating a group with another owner even though the empty group
was not visible in the web UI.
* notifications: clean-up and add dedicated types for all templates as to
allow declaring that interface stable in preparation for allowing
overriding them in the future (not included in this release).
* tape: introduce a tape backup job worker-thread option for restores.
Depending on the underlying storage using more threads can dramatically
improve the restore speed. Especially fast storage with low penalty for
random access, like flash-storage (SSDs) can profit from using more
worker threads. But on file systems backed by spinning disks (HDDs) the
performance can even degrade with more threads. This is why for now the
default is left at a single thread and the admin needs to tune this for
their storage.
* garbage collection: generate index file list via datastore iterators in a
structured manner.
* fix #5331: garbage collection: avoid multiple chunk atime updates by
keeping track of the recently marked chunks in phase 1 of garbage to avoid
multiple atime updates via relatively expensive utimensat (touch) calls.
Use a LRU cache with size 32 MiB for tracking already processed chunks,
this fully covers backup groups referencing up to 4 TiB of actual chunks
and even bigger ones can still benefit from the cache. On some real-world
benchmarks of a datastore with 1.5 million chunks, and original data
usage of 120 TiB and a referenced data usage of 2.7 TiB (high
deduplication count due to long-term history) we measured 21.1 times less
file updates (31.6 million) and a 6.1 times reduction in total GC runtime
(155.4 s to 22.8 s) on a ZFS RAID 10 system consisting of spinning HDDs
and a special device mirror backed by datacenter SSDs.
* logging helper: use new builder initializer not functional change
intended.
-- Proxmox Support Team <support@proxmox.com> Wed, 02 Apr 2025 19:42:38 +0200
rust-proxmox-backup (3.3.4-1) bookworm; urgency=medium rust-proxmox-backup (3.3.4-1) bookworm; urgency=medium
* fix #6185: client/docs: explicitly mention archive name restrictions * fix #6185: client/docs: explicitly mention archive name restrictions

42
debian/control vendored
View File

@ -25,17 +25,15 @@ Build-Depends: bash-completion,
librust-crossbeam-channel-0.5+default-dev, librust-crossbeam-channel-0.5+default-dev,
librust-endian-trait-0.6+arrays-dev, librust-endian-trait-0.6+arrays-dev,
librust-endian-trait-0.6+default-dev, librust-endian-trait-0.6+default-dev,
librust-env-logger-0.11+default-dev, librust-env-logger-0.10+default-dev,
librust-foreign-types-0.3+default-dev, librust-foreign-types-0.3+default-dev,
librust-futures-0.3+default-dev, librust-futures-0.3+default-dev,
librust-h2-0.4+default-dev, librust-h2-0.4+default-dev,
librust-h2-0.4+legacy-dev,
librust-h2-0.4+stream-dev, librust-h2-0.4+stream-dev,
librust-hex-0.4+default-dev (>= 0.4.3-~~), librust-hex-0.4+default-dev (>= 0.4.3-~~),
librust-hex-0.4+serde-dev (>= 0.4.3-~~), librust-hex-0.4+serde-dev (>= 0.4.3-~~),
librust-hyper-0.14+backports-dev, librust-http-0.2+default-dev,
librust-hyper-0.14+default-dev, librust-hyper-0.14+default-dev,
librust-hyper-0.14+deprecated-dev,
librust-hyper-0.14+full-dev, librust-hyper-0.14+full-dev,
librust-libc-0.2+default-dev, librust-libc-0.2+default-dev,
librust-log-0.4+default-dev (>= 0.4.17-~~), librust-log-0.4+default-dev (>= 0.4.17-~~),
@ -45,7 +43,7 @@ Build-Depends: bash-completion,
librust-once-cell-1+default-dev (>= 1.3.1-~~), librust-once-cell-1+default-dev (>= 1.3.1-~~),
librust-openssl-0.10+default-dev (>= 0.10.40-~~), librust-openssl-0.10+default-dev (>= 0.10.40-~~),
librust-pathpatterns-0.3+default-dev, librust-pathpatterns-0.3+default-dev,
librust-pbs-api-types-0.2+default-dev (>= 0.2.2), librust-pbs-api-types-0.2+default-dev,
librust-percent-encoding-2+default-dev (>= 2.1-~~), librust-percent-encoding-2+default-dev (>= 2.1-~~),
librust-pin-project-lite-0.2+default-dev, librust-pin-project-lite-0.2+default-dev,
librust-proxmox-acme-0.5+default-dev (>= 0.5.3-~~), librust-proxmox-acme-0.5+default-dev (>= 0.5.3-~~),
@ -54,6 +52,7 @@ Build-Depends: bash-completion,
librust-proxmox-apt-api-types-1+default-dev (>= 1.0.1-~~), librust-proxmox-apt-api-types-1+default-dev (>= 1.0.1-~~),
librust-proxmox-async-0.4+default-dev, librust-proxmox-async-0.4+default-dev,
librust-proxmox-auth-api-0.4+api-dev, librust-proxmox-auth-api-0.4+api-dev,
librust-proxmox-auth-api-0.4+api-types-dev,
librust-proxmox-auth-api-0.4+default-dev, librust-proxmox-auth-api-0.4+default-dev,
librust-proxmox-auth-api-0.4+pam-authenticator-dev, librust-proxmox-auth-api-0.4+pam-authenticator-dev,
librust-proxmox-borrow-1+default-dev, librust-proxmox-borrow-1+default-dev,
@ -61,14 +60,14 @@ Build-Depends: bash-completion,
librust-proxmox-config-digest-0.1+default-dev, librust-proxmox-config-digest-0.1+default-dev,
librust-proxmox-daemon-0.1+default-dev, librust-proxmox-daemon-0.1+default-dev,
librust-proxmox-fuse-0.1+default-dev (>= 0.1.3-~~), librust-proxmox-fuse-0.1+default-dev (>= 0.1.3-~~),
librust-proxmox-http-0.9+client-dev (>= 0.9.5-~~), librust-proxmox-http-0.9+client-dev,
librust-proxmox-http-0.9+client-trait-dev (>= 0.9.5-~~), librust-proxmox-http-0.9+client-trait-dev,
librust-proxmox-http-0.9+default-dev (>= 0.9.5-~~), librust-proxmox-http-0.9+default-dev,
librust-proxmox-http-0.9+http-helpers-dev (>= 0.9.5-~~), librust-proxmox-http-0.9+http-helpers-dev,
librust-proxmox-http-0.9+proxmox-async-dev (>= 0.9.5-~~), librust-proxmox-http-0.9+proxmox-async-dev,
librust-proxmox-http-0.9+rate-limited-stream-dev (>= 0.9.5-~~), librust-proxmox-http-0.9+rate-limited-stream-dev,
librust-proxmox-http-0.9+rate-limiter-dev (>= 0.9.5-~~), librust-proxmox-http-0.9+rate-limiter-dev,
librust-proxmox-http-0.9+websocket-dev (>= 0.9.5-~~), librust-proxmox-http-0.9+websocket-dev,
librust-proxmox-human-byte-0.1+default-dev, librust-proxmox-human-byte-0.1+default-dev,
librust-proxmox-io-1+default-dev (>= 1.0.1-~~), librust-proxmox-io-1+default-dev (>= 1.0.1-~~),
librust-proxmox-io-1+tokio-dev (>= 1.0.1-~~), librust-proxmox-io-1+tokio-dev (>= 1.0.1-~~),
@ -79,9 +78,9 @@ Build-Depends: bash-completion,
librust-proxmox-notify-0.5+default-dev (>= 0.5.1-~~), librust-proxmox-notify-0.5+default-dev (>= 0.5.1-~~),
librust-proxmox-notify-0.5+pbs-context-dev (>= 0.5.1-~~), librust-proxmox-notify-0.5+pbs-context-dev (>= 0.5.1-~~),
librust-proxmox-openid-0.10+default-dev, librust-proxmox-openid-0.10+default-dev,
librust-proxmox-rest-server-0.8+default-dev (>= 0.8.9-~~), librust-proxmox-rest-server-0.8+default-dev (>= 0.8.5-~~),
librust-proxmox-rest-server-0.8+rate-limited-stream-dev (>= 0.8.9-~~), librust-proxmox-rest-server-0.8+rate-limited-stream-dev (>= 0.8.5-~~),
librust-proxmox-rest-server-0.8+templates-dev (>= 0.8.9-~~), librust-proxmox-rest-server-0.8+templates-dev (>= 0.8.5-~~),
librust-proxmox-router-3+cli-dev, librust-proxmox-router-3+cli-dev,
librust-proxmox-router-3+server-dev, librust-proxmox-router-3+server-dev,
librust-proxmox-rrd-0.4+default-dev, librust-proxmox-rrd-0.4+default-dev,
@ -98,7 +97,7 @@ Build-Depends: bash-completion,
librust-proxmox-subscription-0.5+default-dev, librust-proxmox-subscription-0.5+default-dev,
librust-proxmox-sys-0.6+acl-dev (>= 0.6.5-~~), librust-proxmox-sys-0.6+acl-dev (>= 0.6.5-~~),
librust-proxmox-sys-0.6+crypt-dev (>= 0.6.5-~~), librust-proxmox-sys-0.6+crypt-dev (>= 0.6.5-~~),
librust-proxmox-sys-0.6+default-dev (>= 0.6.7-~~), librust-proxmox-sys-0.6+default-dev (>= 0.6.5-~~),
librust-proxmox-sys-0.6+logrotate-dev (>= 0.6.5-~~), librust-proxmox-sys-0.6+logrotate-dev (>= 0.6.5-~~),
librust-proxmox-sys-0.6+timer-dev (>= 0.6.5-~~), librust-proxmox-sys-0.6+timer-dev (>= 0.6.5-~~),
librust-proxmox-systemd-0.1+default-dev, librust-proxmox-systemd-0.1+default-dev,
@ -115,6 +114,7 @@ Build-Depends: bash-completion,
librust-serde-1+default-dev, librust-serde-1+default-dev,
librust-serde-1+derive-dev, librust-serde-1+derive-dev,
librust-serde-json-1+default-dev, librust-serde-json-1+default-dev,
librust-serde-plain-1+default-dev,
librust-syslog-6+default-dev, librust-syslog-6+default-dev,
librust-tar-0.4+default-dev, librust-tar-0.4+default-dev,
librust-termcolor-1+default-dev (>= 1.1.2-~~), librust-termcolor-1+default-dev (>= 1.1.2-~~),
@ -205,14 +205,6 @@ Description: Proxmox Backup Client tools
This package contains the Proxmox Backup client, which provides a This package contains the Proxmox Backup client, which provides a
simple command line tool to create and restore backups. simple command line tool to create and restore backups.
Package: proxmox-backup-client-static
Architecture: any
Depends: qrencode, ${misc:Depends},
Conflicts: proxmox-backup-client,
Description: Proxmox Backup Client tools (statically linked)
This package contains the Proxmox Backup client, which provides a
simple command line tool to create and restore backups.
Package: proxmox-backup-docs Package: proxmox-backup-docs
Build-Profiles: <!nodoc> Build-Profiles: <!nodoc>
Section: doc Section: doc

15
debian/postinst vendored
View File

@ -20,7 +20,15 @@ case "$1" in
# modeled after dh_systemd_start output # modeled after dh_systemd_start output
systemctl --system daemon-reload >/dev/null || true systemctl --system daemon-reload >/dev/null || true
if [ -n "$2" ]; then if [ -n "$2" ]; then
_dh_action=try-reload-or-restart if dpkg --compare-versions "$2" 'lt' '1.0.7-1'; then
# there was an issue with reloading and systemd being confused in older daemon versions
# so restart instead of reload if upgrading from there, see commit 0ec79339f7aebf9
# FIXME: remove with PBS 2.1
echo "Upgrading from older proxmox-backup-server: restart (not reload) daemons"
_dh_action=try-restart
else
_dh_action=try-reload-or-restart
fi
else else
_dh_action=start _dh_action=start
fi fi
@ -72,11 +80,6 @@ EOF
update_sync_job "$prev_job" update_sync_job "$prev_job"
fi fi
fi fi
if dpkg --compare-versions "$2" 'lt' '3.3.5~'; then
# ensure old locking is used by the daemon until a reboot happened
touch "/run/proxmox-backup/old-locking"
fi
fi fi
;; ;;

View File

@ -1,2 +0,0 @@
debian/proxmox-backup-client.bc proxmox-backup-client
debian/pxar.bc pxar

View File

@ -1,4 +0,0 @@
usr/share/man/man1/proxmox-backup-client.1
usr/share/man/man1/pxar.1
usr/share/zsh/vendor-completions/_proxmox-backup-client
usr/share/zsh/vendor-completions/_pxar

View File

@ -34,13 +34,13 @@ usr/share/man/man5/media-pool.cfg.5
usr/share/man/man5/notifications-priv.cfg.5 usr/share/man/man5/notifications-priv.cfg.5
usr/share/man/man5/notifications.cfg.5 usr/share/man/man5/notifications.cfg.5
usr/share/man/man5/proxmox-backup.node.cfg.5 usr/share/man/man5/proxmox-backup.node.cfg.5
usr/share/man/man5/prune.cfg.5
usr/share/man/man5/remote.cfg.5 usr/share/man/man5/remote.cfg.5
usr/share/man/man5/sync.cfg.5 usr/share/man/man5/sync.cfg.5
usr/share/man/man5/tape-job.cfg.5 usr/share/man/man5/tape-job.cfg.5
usr/share/man/man5/tape.cfg.5 usr/share/man/man5/tape.cfg.5
usr/share/man/man5/user.cfg.5 usr/share/man/man5/user.cfg.5
usr/share/man/man5/verification.cfg.5 usr/share/man/man5/verification.cfg.5
usr/share/man/man5/prune.cfg.5
usr/share/proxmox-backup/templates/default/acme-err-body.txt.hbs usr/share/proxmox-backup/templates/default/acme-err-body.txt.hbs
usr/share/proxmox-backup/templates/default/acme-err-subject.txt.hbs usr/share/proxmox-backup/templates/default/acme-err-subject.txt.hbs
usr/share/proxmox-backup/templates/default/gc-err-body.txt.hbs usr/share/proxmox-backup/templates/default/gc-err-body.txt.hbs
@ -63,6 +63,7 @@ usr/share/proxmox-backup/templates/default/tape-backup-ok-body.txt.hbs
usr/share/proxmox-backup/templates/default/tape-backup-ok-subject.txt.hbs usr/share/proxmox-backup/templates/default/tape-backup-ok-subject.txt.hbs
usr/share/proxmox-backup/templates/default/tape-load-body.txt.hbs usr/share/proxmox-backup/templates/default/tape-load-body.txt.hbs
usr/share/proxmox-backup/templates/default/tape-load-subject.txt.hbs usr/share/proxmox-backup/templates/default/tape-load-subject.txt.hbs
usr/share/proxmox-backup/templates/default/test-body.html.hbs
usr/share/proxmox-backup/templates/default/test-body.txt.hbs usr/share/proxmox-backup/templates/default/test-body.txt.hbs
usr/share/proxmox-backup/templates/default/test-subject.txt.hbs usr/share/proxmox-backup/templates/default/test-subject.txt.hbs
usr/share/proxmox-backup/templates/default/verify-err-body.txt.hbs usr/share/proxmox-backup/templates/default/verify-err-body.txt.hbs

3
debian/rules vendored
View File

@ -47,9 +47,6 @@ override_dh_auto_install:
dh_auto_install -- \ dh_auto_install -- \
PROXY_USER=backup \ PROXY_USER=backup \
LIBDIR=/usr/lib/$(DEB_HOST_MULTIARCH) LIBDIR=/usr/lib/$(DEB_HOST_MULTIARCH)
mkdir -p debian/proxmox-backup-client-static/usr/bin
mv debian/tmp/usr/bin/proxmox-backup-client-static debian/proxmox-backup-client-static/usr/bin/proxmox-backup-client
mv debian/tmp/usr/bin/pxar-static debian/proxmox-backup-client-static/usr/bin/pxar
override_dh_installsystemd: override_dh_installsystemd:
dh_installsystemd -pproxmox-backup-server proxmox-backup-daily-update.timer dh_installsystemd -pproxmox-backup-server proxmox-backup-daily-update.timer

View File

@ -1,5 +1,3 @@
.. _client_usage:
Backup Client Usage Backup Client Usage
=================== ===================
@ -46,24 +44,6 @@ user\@pbs!token@host:store ``user@pbs!token`` host:8007 store
[ff80::51]:1234:mydatastore ``root@pam`` [ff80::51]:1234 mydatastore [ff80::51]:1234:mydatastore ``root@pam`` [ff80::51]:1234 mydatastore
================================ ================== ================== =========== ================================ ================== ================== ===========
.. _statically_linked_client:
Statically Linked Backup Client
-------------------------------
A statically linked version of the Proxmox Backup client is available for Linux
based systems where the regular client is not available. Please note that it is
recommended to use the regular client when possible, as the statically linked
client is not a full replacement. For example, name resolution will not be
performed via the mechanisms provided by libc, but uses a resolver written
purely in the Rust programming language. Therefore, features and modules
provided by Name Service Switch cannot be used.
The statically linked client is available via the ``pbs-client`` repository as
described in the :ref:`installation <install_pbc>` section.
.. _environment-variables:
Environment Variables Environment Variables
--------------------- ---------------------
@ -109,43 +89,6 @@ Environment Variables
you can add arbitrary comments after the first newline. you can add arbitrary comments after the first newline.
System and Service Credentials
------------------------------
Some of the :ref:`environment variables <environment-variables>` above can be
set using `system and service credentials <https://systemd.io/CREDENTIALS/>`_
instead.
============================ ==============================================
Environment Variable Credential Name Equivalent
============================ ==============================================
``PBS_REPOSITORY`` ``proxmox-backup-client.repository``
``PBS_PASSWORD`` ``proxmox-backup-client.password``
``PBS_ENCRYPTION_PASSWORD`` ``proxmox-backup-client.encryption-password``
``PBS_FINGERPRINT`` ``proxmox-backup-client.fingerprint``
============================ ==============================================
For example, the repository password can be stored in an encrypted file as
follows:
.. code-block:: console
# systemd-ask-password -n | systemd-creds encrypt --name=proxmox-backup-client.password - my-api-token.cred
The credential can then be reused inside of unit files or in a transient scope
unit as follows:
.. code-block:: console
# systemd-run --pipe --wait \
--property=LoadCredentialEncrypted=proxmox-backup-client.password:/full/path/to/my-api-token.cred \
--property=SetCredential=proxmox-backup-client.repository:'my_default_repository' \
proxmox-backup-client ...
Additionally, system credentials (e.g. passed down from the hypervisor to a
virtual machine via SMBIOS type 11) can be loaded on a service via
`LoadCredential=` as described in the manual page ``systemd.exec(5)``.
Output Format Output Format
------------- -------------

View File

@ -138,26 +138,7 @@ you need to run:
# apt update # apt update
# apt install proxmox-backup-client # apt install proxmox-backup-client
Install Statically Linked Proxmox Backup Client .. note:: The client-only repository should be usable by most recent Debian and
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Ubuntu derivatives.
Proxmox provides a statically linked build of the Proxmox backup client that
should run on any modern x86-64 Linux system.
It is currently available as a Debian package. After configuring the
:ref:`package_repositories_client_only_apt`, you need to run:
.. code-block:: console
# apt update
# apt install proxmox-backup-client-static
This package conflicts with the `proxmox-backup-client` package, as both
provide the client as an executable in the `/usr/bin/proxmox-backup-client`
path.
You can copy this executable to other, e.g. non-Debian based Linux systems.
For details on using the Proxmox Backup Client, see :ref:`client_usage`.
.. include:: package-repositories.rst .. include:: package-repositories.rst

View File

@ -72,10 +72,6 @@ either start it manually from the GUI or provide it with a schedule (see
Backup snapshots, groups and namespaces which are no longer available on the Backup snapshots, groups and namespaces which are no longer available on the
**Remote** datastore can be removed from the local datastore as well by setting **Remote** datastore can be removed from the local datastore as well by setting
the ``remove-vanished`` option for the sync job. the ``remove-vanished`` option for the sync job.
Setting the ``verified-only`` or ``encrypted-only`` flags allows to limit the
sync jobs to backup snapshots which have been verified or encrypted,
respectively. This is particularly of interest when sending backups to a less
trusted remote backup server.
.. code-block:: console .. code-block:: console
@ -231,16 +227,13 @@ Bandwidth Limit
Syncing a datastore to an archive can produce a lot of traffic and impact other Syncing a datastore to an archive can produce a lot of traffic and impact other
users of the network. In order to avoid network or storage congestion, you can users of the network. In order to avoid network or storage congestion, you can
limit the bandwidth of a sync job in pull direction by setting the ``rate-in`` limit the bandwidth of the sync job by setting the ``rate-in`` option either in
option either in the web interface or using the ``proxmox-backup-manager`` the web interface or using the ``proxmox-backup-manager`` command-line tool:
command-line tool:
.. code-block:: console .. code-block:: console
# proxmox-backup-manager sync-job update ID --rate-in 20MiB # proxmox-backup-manager sync-job update ID --rate-in 20MiB
For sync jobs in push direction use the ``rate-out`` option instead.
Sync Direction Push Sync Direction Push
^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^

View File

@ -7,25 +7,26 @@ Overview
-------- --------
* Proxmox Backup Server emits :ref:`notification_events` in case of noteworthy * Proxmox Backup Server emits :ref:`notification_events` in case of noteworthy
events in the system. These events are handled by the notification system. A events in the system. These events are handled by the notification system.
notification event has metadata, for example a timestamp, a severity level, a A notification event has metadata, for example a timestamp, a severity level,
type and other metadata fields. a type and other metadata fields.
* :ref:`notification_matchers` route a notification event to one or more * :ref:`notification_matchers` route a notification event to one or more notification
notification targets. A matcher can have match rules to selectively route targets. A matcher can have match rules to selectively route based on the metadata
based on the metadata of a notification event. of a notification event.
* :ref:`notification_targets` are a destination to which a notification event * :ref:`notification_targets` are a destination to which a notification event
is routed to by a matcher. There are multiple types of target, mail-based is routed to by a matcher. There are multiple types of target, mail-based
(Sendmail and SMTP) and Gotify. (Sendmail and SMTP) and Gotify.
Datastores and tape backup jobs have a configurable :ref:`notification_mode`. Datastores and tape backup jobs have a configurable :ref:`notification_mode`.
It allows you to choose between the notification system and a legacy mode for It allows you to choose between the notification system and a legacy mode
sending notification emails. The legacy mode is equivalent to the way for sending notification emails. The legacy mode is equivalent to the
notifications were handled before Proxmox Backup Server 3.2. way notifications were handled before Proxmox Backup Server 3.2.
The notification system can be configured in the GUI under *Configuration → The notification system can be configured in the GUI under
Notifications*. The configuration is stored in :ref:`notifications.cfg` and *Configuration → Notifications*. The configuration is stored in
:ref:`notifications_priv.cfg` - the latter contains sensitive configuration :ref:`notifications.cfg` and :ref:`notifications_priv.cfg` -
options such as passwords or authentication tokens for notification targets and the latter contains sensitive configuration options such as
passwords or authentication tokens for notification targets and
can only be read by ``root``. can only be read by ``root``.
.. _notification_targets: .. _notification_targets:
@ -40,23 +41,22 @@ Proxmox Backup Server offers multiple types of notification targets.
Sendmail Sendmail
^^^^^^^^ ^^^^^^^^
The sendmail binary is a program commonly found on Unix-like operating systems The sendmail binary is a program commonly found on Unix-like operating systems
that handles the sending of email messages. It is a command-line utility that that handles the sending of email messages.
allows users and applications to send emails directly from the command line or It is a command-line utility that allows users and applications to send emails
from within scripts. directly from the command line or from within scripts.
The sendmail notification target uses the ``sendmail`` binary to send emails to The sendmail notification target uses the ``sendmail`` binary to send emails to a
a list of configured users or email addresses. If a user is selected as a list of configured users or email addresses. If a user is selected as a recipient,
recipient, the email address configured in user's settings will be used. For the email address configured in user's settings will be used.
the ``root@pam`` user, this is the email address entered during installation. A For the ``root@pam`` user, this is the email address entered during installation.
user's email address can be configured in ``Configuration → Access Control → A user's email address can be configured in ``Configuration -> Access Control -> User Management``.
User Management``. If a user has no associated email address, no email will be If a user has no associated email address, no email will be sent.
sent.
.. NOTE:: In standard Proxmox Backup Server installations, the ``sendmail`` .. NOTE:: In standard Proxmox Backup Server installations, the ``sendmail`` binary is provided by
binary is provided by Postfix. It may be necessary to configure Postfix so Postfix. It may be necessary to configure Postfix so that it can deliver
that it can deliver mails correctly - for example by setting an external mails correctly - for example by setting an external mail relay (smart host).
mail relay (smart host). In case of failed delivery, check the system logs In case of failed delivery, check the system logs for messages logged by
for messages logged by the Postfix daemon. the Postfix daemon.
See :ref:`notifications.cfg` for all configuration options. See :ref:`notifications.cfg` for all configuration options.
@ -64,13 +64,13 @@ See :ref:`notifications.cfg` for all configuration options.
SMTP SMTP
^^^^ ^^^^
SMTP notification targets can send emails directly to an SMTP mail relay. This SMTP notification targets can send emails directly to an SMTP mail relay.
target does not use the system's MTA to deliver emails. Similar to sendmail This target does not use the system's MTA to deliver emails.
targets, if a user is selected as a recipient, the user's configured email Similar to sendmail targets, if a user is selected as a recipient, the user's configured
address will be used. email address will be used.
.. NOTE:: Unlike sendmail targets, SMTP targets do not have any queuing/retry .. NOTE:: Unlike sendmail targets, SMTP targets do not have any queuing/retry mechanism
mechanism in case of a failed mail delivery. in case of a failed mail delivery.
See :ref:`notifications.cfg` for all configuration options. See :ref:`notifications.cfg` for all configuration options.
@ -78,10 +78,10 @@ See :ref:`notifications.cfg` for all configuration options.
Gotify Gotify
^^^^^^ ^^^^^^
`Gotify <http://gotify.net>`_ is an open-source self-hosted notification server `Gotify <http://gotify.net>`_ is an open-source self-hosted notification server that
that allows you to send push notifications to various devices and applications. allows you to send push notifications to various devices and
It provides a simple API and web interface, making it easy to integrate with applications. It provides a simple API and web interface, making it easy to
different platforms and services. integrate with different platforms and services.
.. NOTE:: Gotify targets will respect the HTTP proxy settings from .. NOTE:: Gotify targets will respect the HTTP proxy settings from
Configuration → Other → HTTP proxy Configuration → Other → HTTP proxy
@ -95,28 +95,27 @@ Webhook notification targets perform HTTP requests to a configurable URL.
The following configuration options are available: The following configuration options are available:
* ``url``: The URL to which to perform the HTTP requests. Supports templating * ``url``: The URL to which to perform the HTTP requests.
to inject message contents, metadata and secrets. Supports templating to inject message contents, metadata and secrets.
* ``method``: HTTP Method to use (POST/PUT/GET) * ``method``: HTTP Method to use (POST/PUT/GET)
* ``header``: Array of HTTP headers that should be set for the request. * ``header``: Array of HTTP headers that should be set for the request.
Supports templating to inject message contents, metadata and secrets. Supports templating to inject message contents, metadata and secrets.
* ``body``: HTTP body that should be sent. Supports templating to inject * ``body``: HTTP body that should be sent.
message contents, metadata and secrets. Supports templating to inject message contents, metadata and secrets.
* ``secret``: Array of secret key-value pairs. These will be stored in a * ``secret``: Array of secret key-value pairs. These will be stored in
protected configuration file only readable by root. Secrets can be a protected configuration file only readable by root. Secrets can be
accessed in body/header/URL templates via the ``secrets`` namespace. accessed in body/header/URL templates via the ``secrets`` namespace.
* ``comment``: Comment for this target. * ``comment``: Comment for this target.
For configuration options that support templating, the `Handlebars For configuration options that support templating, the
<https://handlebarsjs.com>`_ syntax can be used to access the following `Handlebars <https://handlebarsjs.com>`_ syntax can be used to
properties: access the following properties:
* ``{{ title }}``: The rendered notification title * ``{{ title }}``: The rendered notification title
* ``{{ message }}``: The rendered notification body * ``{{ message }}``: The rendered notification body
* ``{{ severity }}``: The severity of the notification (``info``, ``notice``, * ``{{ severity }}``: The severity of the notification (``info``, ``notice``,
``warning``, ``error``, ``unknown``) ``warning``, ``error``, ``unknown``)
* ``{{ timestamp }}``: The notification's timestamp as a UNIX epoch (in * ``{{ timestamp }}``: The notification's timestamp as a UNIX epoch (in seconds).
seconds).
* ``{{ fields.<name> }}``: Sub-namespace for any metadata fields of the * ``{{ fields.<name> }}``: Sub-namespace for any metadata fields of the
notification. For instance, ``fields.type`` contains the notification notification. For instance, ``fields.type`` contains the notification
type - for all available fields refer to :ref:`notification_events`. type - for all available fields refer to :ref:`notification_events`.
@ -198,19 +197,20 @@ Example - Slack
Notification Matchers Notification Matchers
--------------------- ---------------------
Notification matchers route notifications to notification targets based on Notification matchers route notifications to notification targets based
their matching rules. These rules can match certain properties of a on their matching rules. These rules can match certain properties of a
notification, such as the timestamp (``match-calendar``), the severity of the notification, such as the timestamp (``match-calendar``), the severity of
notification (``match-severity``) or metadata fields (``match-field``). If a the notification (``match-severity``) or metadata fields (``match-field``).
notification is matched by a matcher, all targets configured for the matcher If a notification is matched by a matcher, all targets configured for the
will receive the notification. matcher will receive the notification.
An arbitrary number of matchers can be created, each with with their own An arbitrary number of matchers can be created, each with with their own
matching rules and targets to notify. Every target is notified at most once for matching rules and targets to notify.
every notification, even if the target is used in multiple matchers. Every target is notified at most once for every notification, even if
the target is used in multiple matchers.
A matcher without rules matches any notification; the configured targets will A matcher without rules matches any notification; the configured targets
always be notified. will always be notified.
See :ref:`notifications.cfg` for all configuration options. See :ref:`notifications.cfg` for all configuration options.
@ -227,24 +227,20 @@ Examples:
Field Matching Rules Field Matching Rules
^^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^
Notifications have a selection of metadata fields that can be matched. When Notifications have a selection of metadata fields that can be matched.
using ``exact`` as a matching mode, a ``,`` can be used as a separator. The When using ``exact`` as a matching mode, a ``,`` can be used as a separator.
matching rule then matches if the metadata field has **any** of the specified The matching rule then matches if the metadata field has **any** of the specified
values. values.
Examples: Examples:
* ``match-field exact:type=gc`` Only match notifications for garbage collection * ``match-field exact:type=gc`` Only match notifications for garbage collection jobs
jobs * ``match-field exact:type=prune,verify`` Match prune job and verification job notifications.
* ``match-field exact:type=prune,verify`` Match prune job and verification job * ``match-field regex:datastore=^backup-.*$`` Match any datastore starting with ``backup``.
notifications.
* ``match-field regex:datastore=^backup-.*$`` Match any datastore starting with
``backup``.
If a notification does not have the matched field, the rule will **not** match. If a notification does not have the matched field, the rule will **not** match.
For instance, a ``match-field regex:datastore=.*`` directive will match any For instance, a ``match-field regex:datastore=.*`` directive will match any notification that has
notification that has a ``datastore`` metadata field, but will not match if the a ``datastore`` metadata field, but will not match if the field does not exist.
field does not exist.
Severity Matching Rules Severity Matching Rules
^^^^^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^^^^
@ -263,9 +259,9 @@ The following severities are in use:
Notification Events Notification Events
------------------- -------------------
The following table contains a list of all notification events in Proxmox The following table contains a list of all notification events in Proxmox Backup server, their
Backup server, their type, severity and additional metadata fields. ``type`` as type, severity and additional metadata fields. ``type`` as well as any other metadata field
well as any other metadata field may be used in ``match-field`` match rules. may be used in ``match-field`` match rules.
================================ ==================== ========== ============================================================== ================================ ==================== ========== ==============================================================
Event ``type`` Severity Metadata fields (in addition to ``type``) Event ``type`` Severity Metadata fields (in addition to ``type``)
@ -285,8 +281,8 @@ Verification job failure ``verification`` ``error`` ``datastore``,
Verification job success ``verification`` ``info`` ``datastore``, ``hostname``, ``job-id`` Verification job success ``verification`` ``info`` ``datastore``, ``hostname``, ``job-id``
================================ ==================== ========== ============================================================== ================================ ==================== ========== ==============================================================
The following table contains a description of all use metadata fields. All of The following table contains a description of all use metadata fields. All of these
these can be used in ``match-field`` match rules. can be used in ``match-field`` match rules.
==================== =================================== ==================== ===================================
Metadata field Description Metadata field Description
@ -303,45 +299,45 @@ Metadata field Description
System Mail Forwarding System Mail Forwarding
---------------------- ----------------------
Certain local system daemons, such as ``smartd``, send notification emails to Certain local system daemons, such as ``smartd``, send notification emails
the local ``root`` user. Proxmox Backup Server will feed these mails into the to the local ``root`` user. Proxmox Backup Server will feed these mails
notification system as a notification of type ``system-mail`` and with severity into the notification system as a notification of type ``system-mail``
``unknown``. and with severity ``unknown``.
When the email is forwarded to a sendmail target, the mail's content and When the email is forwarded to a sendmail target, the mail's content and headers
headers are forwarded as-is. For all other targets, the system tries to extract are forwarded as-is. For all other targets,
both a subject line and the main text body from the email content. In instances the system tries to extract both a subject line and the main text body
where emails solely consist of HTML content, they will be transformed into from the email content. In instances where emails solely consist of HTML
plain text format during this process. content, they will be transformed into plain text format during this process.
Permissions Permissions
----------- -----------
In order to modify/view the configuration for notification targets, the In order to modify/view the configuration for notification targets,
``Sys.Modify/Sys.Audit`` permissions are required for the the ``Sys.Modify/Sys.Audit`` permissions are required for the
``/system/notifications`` ACL node. ``/system/notifications`` ACL node.
.. _notification_mode: .. _notification_mode:
Notification Mode Notification Mode
----------------- -----------------
Datastores and tape backup/restore job configuration have a Datastores and tape backup/restore job configuration have a ``notification-mode``
``notification-mode`` option which can have one of two values: option which can have one of two values:
* ``legacy-sendmail``: Send notification emails via the system's ``sendmail`` * ``legacy-sendmail``: Send notification emails via the system's ``sendmail`` command.
command. The notification system will be bypassed and any configured The notification system will be bypassed and any configured targets/matchers will be ignored.
targets/matchers will be ignored. This mode is equivalent to the notification This mode is equivalent to the notification behavior for version before
behavior for version before Proxmox Backup Server 3.2. Proxmox Backup Server 3.2.
* ``notification-system``: Use the new, flexible notification system. * ``notification-system``: Use the new, flexible notification system.
If the ``notification-mode`` option is not set, Proxmox Backup Server will If the ``notification-mode`` option is not set, Proxmox Backup Server will default
default to ``legacy-sendmail``. to ``legacy-sendmail``.
Starting with Proxmox Backup Server 3.2, a datastore created in the UI will Starting with Proxmox Backup Server 3.2, a datastore created in the UI will
automatically opt in to the new notification system. If the datastore is automatically opt in to the new notification system. If the datastore is created
created via the API or the ``proxmox-backup-manager`` CLI, the via the API or the ``proxmox-backup-manager`` CLI, the ``notification-mode``
``notification-mode`` option has to be set explicitly to option has to be set explicitly to ``notification-system`` if the
``notification-system`` if the notification system shall be used. notification system shall be used.
The ``legacy-sendmail`` mode might be removed in a later release of The ``legacy-sendmail`` mode might be removed in a later release of
Proxmox Backup Server. Proxmox Backup Server.
@ -350,12 +346,12 @@ Settings for ``legacy-sendmail`` notification mode
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
If ``notification-mode`` is set to ``legacy-sendmail``, Proxmox Backup Server If ``notification-mode`` is set to ``legacy-sendmail``, Proxmox Backup Server
will send notification emails via the system's ``sendmail`` command to the will send notification emails via the system's ``sendmail`` command to the email
email address configured for the user set in the ``notify-user`` option address configured for the user set in the ``notify-user`` option
(falling back to ``root@pam`` if not set). (falling back to ``root@pam`` if not set).
For datastores, you can also change the level of notifications received per For datastores, you can also change the level of notifications received per task
task type via the ``notify`` option. type via the ``notify`` option.
* Always: send a notification for any scheduled task, independent of the * Always: send a notification for any scheduled task, independent of the
outcome outcome
@ -366,23 +362,3 @@ task type via the ``notify`` option.
The ``notify-user`` and ``notify`` options are ignored if ``notification-mode`` The ``notify-user`` and ``notify`` options are ignored if ``notification-mode``
is set to ``notification-system``. is set to ``notification-system``.
Overriding Notification Templates
---------------------------------
Proxmox Backup Server uses Handlebars templates to render notifications. The
original templates provided by Proxmox Backup Server are stored in
``/usr/share/proxmox-backup/templates/default/``.
Notification templates can be overridden by providing a custom template file in
the override directory at
``/etc/proxmox-backup/notification-templates/default/``. When rendering a
notification of a given type, Proxmox Backup Server will first attempt to load
a template from the override directory. If this one does not exist or fails to
render, the original template will be used.
The template files follow the naming convention of
``<type>-<body|subject>.txt.hbs``. For instance, the file
``gc-err-body.txt.hbs`` contains the template for rendering notifications for
garbage collection errors, while ``package-updates-subject.txt.hbs`` is used to
render the subject line of notifications for available package updates.

View File

@ -435,28 +435,9 @@ There are some tuning related options for the datastore that are more advanced:
This can be set with: This can be set with:
.. code-block:: console .. code-block:: console
# proxmox-backup-manager datastore update <storename> --tuning 'sync-level=filesystem' # proxmox-backup-manager datastore update <storename> --tuning 'sync-level=filesystem'
* ``gc-atime-safety-check``: Datastore GC atime update safety check:
You can explicitly `enable` or `disable` the atime update safety check
performed on datastore creation and garbage collection. This checks if atime
updates are handled as expected by garbage collection and therefore avoids the
risk of data loss by unexpected filesystem behavior. It is recommended to set
this to enabled, which is also the default value.
* ``gc-atime-cutoff``: Datastore GC atime cutoff for chunk cleanup:
This allows to set the cutoff for which a chunk is still considered in-use
during phase 2 of garbage collection (given no older writers). If the
``atime`` of the chunk is outside the range, it will be removed.
* ``gc-cache-capacity``: Datastore GC least recently used cache capacity:
Allows to control the cache capacity used to keep track of chunks for which
the access time has already been updated during phase 1 of garbage collection.
This avoids multiple updates and increases GC runtime performance. Higher
values can reduce GC runtime at the cost of increase memory usage, setting the
value to 0 disables caching.
If you want to set multiple tuning options simultaneously, you can separate them If you want to set multiple tuning options simultaneously, you can separate them
with a comma, like this: with a comma, like this:

View File

@ -16,8 +16,8 @@ User Configuration
choose the realm when you add a new user. Possible realms are: choose the realm when you add a new user. Possible realms are:
:pam: Linux PAM standard authentication. Use this if you want to :pam: Linux PAM standard authentication. Use this if you want to
authenticate as a Linux system user. The users needs to already exist on authenticate as a Linux system user (users need to exist on the
the host system. system).
:pbs: Proxmox Backup Server realm. This type stores hashed passwords in :pbs: Proxmox Backup Server realm. This type stores hashed passwords in
``/etc/proxmox-backup/shadow.json``. ``/etc/proxmox-backup/shadow.json``.
@ -599,32 +599,6 @@ list view in the web UI, or using the command line:
Authentication Realms Authentication Realms
--------------------- ---------------------
.. _user_realms_pam:
Linux PAM
~~~~~~~~~
Linux PAM is a framework for system-wide user authentication. These users are
created on the host system with commands such as ``adduser``.
If PAM users exist on the host system, corresponding entries can be added to
Proxmox Backup Server, to allow these users to log in via their system username
and password.
.. _user_realms_pbs:
Proxmox Backup authentication server
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This is a Unix-like password store, which stores hashed passwords in
``/etc/proxmox-backup/shadow.json``. Passwords are hashed using the SHA-256
hashing algorithm.
This is the most convenient realm for small-scale (or even mid-scale)
installations, where users do not need access to anything outside of Proxmox
Backup Server. In this case, users are fully managed by Proxmox Backup Server
and are able to change their own passwords via the GUI.
.. _user_realms_ldap: .. _user_realms_ldap:
LDAP LDAP

View File

@ -10,7 +10,7 @@ use tokio::net::TcpStream;
// Simple H2 client to test H2 download speed using h2server.rs // Simple H2 client to test H2 download speed using h2server.rs
struct Process { struct Process {
body: h2::legacy::RecvStream, body: h2::RecvStream,
trailers: bool, trailers: bool,
bytes: usize, bytes: usize,
} }
@ -50,7 +50,7 @@ impl Future for Process {
} }
fn send_request( fn send_request(
mut client: h2::legacy::client::SendRequest<bytes::Bytes>, mut client: h2::client::SendRequest<bytes::Bytes>,
) -> impl Future<Output = Result<usize, Error>> { ) -> impl Future<Output = Result<usize, Error>> {
println!("sending request"); println!("sending request");
@ -78,7 +78,7 @@ async fn run() -> Result<(), Error> {
let conn = TcpStream::connect(std::net::SocketAddr::from(([127, 0, 0, 1], 8008))).await?; let conn = TcpStream::connect(std::net::SocketAddr::from(([127, 0, 0, 1], 8008))).await?;
conn.set_nodelay(true).unwrap(); conn.set_nodelay(true).unwrap();
let (client, h2) = h2::legacy::client::Builder::new() let (client, h2) = h2::client::Builder::new()
.initial_connection_window_size(1024 * 1024 * 1024) .initial_connection_window_size(1024 * 1024 * 1024)
.initial_window_size(1024 * 1024 * 1024) .initial_window_size(1024 * 1024 * 1024)
.max_frame_size(4 * 1024 * 1024) .max_frame_size(4 * 1024 * 1024)

View File

@ -10,7 +10,7 @@ use tokio::net::TcpStream;
// Simple H2 client to test H2 download speed using h2s-server.rs // Simple H2 client to test H2 download speed using h2s-server.rs
struct Process { struct Process {
body: h2::legacy::RecvStream, body: h2::RecvStream,
trailers: bool, trailers: bool,
bytes: usize, bytes: usize,
} }
@ -50,7 +50,7 @@ impl Future for Process {
} }
fn send_request( fn send_request(
mut client: h2::legacy::client::SendRequest<bytes::Bytes>, mut client: h2::client::SendRequest<bytes::Bytes>,
) -> impl Future<Output = Result<usize, Error>> { ) -> impl Future<Output = Result<usize, Error>> {
println!("sending request"); println!("sending request");
@ -94,7 +94,7 @@ async fn run() -> Result<(), Error> {
.await .await
.map_err(|err| format_err!("connect failed - {}", err))?; .map_err(|err| format_err!("connect failed - {}", err))?;
let (client, h2) = h2::legacy::client::Builder::new() let (client, h2) = h2::client::Builder::new()
.initial_connection_window_size(1024 * 1024 * 1024) .initial_connection_window_size(1024 * 1024 * 1024)
.initial_window_size(1024 * 1024 * 1024) .initial_window_size(1024 * 1024 * 1024)
.max_frame_size(4 * 1024 * 1024) .max_frame_size(4 * 1024 * 1024)

View File

@ -8,19 +8,6 @@ use tokio::net::{TcpListener, TcpStream};
use pbs_buildcfg::configdir; use pbs_buildcfg::configdir;
#[derive(Clone, Copy)]
struct H2SExecutor;
impl<Fut> hyper::rt::Executor<Fut> for H2SExecutor
where
Fut: Future + Send + 'static,
Fut::Output: Send,
{
fn execute(&self, fut: Fut) {
tokio::spawn(fut);
}
}
fn main() -> Result<(), Error> { fn main() -> Result<(), Error> {
proxmox_async::runtime::main(run()) proxmox_async::runtime::main(run())
} }
@ -63,11 +50,12 @@ async fn handle_connection(socket: TcpStream, acceptor: Arc<SslAcceptor>) -> Res
stream.as_mut().accept().await?; stream.as_mut().accept().await?;
let mut http = hyper::server::conn::http2::Builder::new(H2SExecutor); let mut http = hyper::server::conn::Http::new();
http.http2_only(true);
// increase window size: todo - find optiomal size // increase window size: todo - find optiomal size
let max_window_size = (1 << 31) - 2; let max_window_size = (1 << 31) - 2;
http.initial_stream_window_size(max_window_size); http.http2_initial_stream_window_size(max_window_size);
http.initial_connection_window_size(max_window_size); http.http2_initial_connection_window_size(max_window_size);
let service = hyper::service::service_fn(|_req: Request<Body>| { let service = hyper::service::service_fn(|_req: Request<Body>| {
println!("Got request"); println!("Got request");

View File

@ -1,24 +1,9 @@
use std::future::Future;
use anyhow::Error; use anyhow::Error;
use futures::*; use futures::*;
use hyper::{Body, Request, Response}; use hyper::{Body, Request, Response};
use tokio::net::{TcpListener, TcpStream}; use tokio::net::{TcpListener, TcpStream};
#[derive(Clone, Copy)]
struct H2Executor;
impl<Fut> hyper::rt::Executor<Fut> for H2Executor
where
Fut: Future + Send + 'static,
Fut::Output: Send,
{
fn execute(&self, fut: Fut) {
tokio::spawn(fut);
}
}
fn main() -> Result<(), Error> { fn main() -> Result<(), Error> {
proxmox_async::runtime::main(run()) proxmox_async::runtime::main(run())
} }
@ -41,11 +26,12 @@ async fn run() -> Result<(), Error> {
async fn handle_connection(socket: TcpStream) -> Result<(), Error> { async fn handle_connection(socket: TcpStream) -> Result<(), Error> {
socket.set_nodelay(true).unwrap(); socket.set_nodelay(true).unwrap();
let mut http = hyper::server::conn::http2::Builder::new(H2Executor); let mut http = hyper::server::conn::Http::new();
http.http2_only(true);
// increase window size: todo - find optiomal size // increase window size: todo - find optiomal size
let max_window_size = (1 << 31) - 2; let max_window_size = (1 << 31) - 2;
http.initial_stream_window_size(max_window_size); http.http2_initial_stream_window_size(max_window_size);
http.initial_connection_window_size(max_window_size); http.http2_initial_connection_window_size(max_window_size);
let service = hyper::service::service_fn(|_req: Request<Body>| { let service = hyper::service::service_fn(|_req: Request<Body>| {
println!("Got request"); println!("Got request");

View File

@ -27,7 +27,6 @@ tokio = { workspace = true, features = [ "fs", "signal" ] }
tokio-stream.workspace = true tokio-stream.workspace = true
tower-service.workspace = true tower-service.workspace = true
xdg.workspace = true xdg.workspace = true
hickory-resolver.workspace = true
pathpatterns.workspace = true pathpatterns.workspace = true

View File

@ -8,9 +8,8 @@ const_regex! {
} }
pub const BACKUP_SOURCE_SCHEMA: Schema = StringSchema::new( pub const BACKUP_SOURCE_SCHEMA: Schema = StringSchema::new(
"Backup source specification ([<archive-name>.<type>:<source-path>]), the \ "Backup source specification ([<label>:<path>]), the specification \
'archive-name' must contain alphanumerics, hyphens and underscores only. \ 'label' must contain alphanumerics, hyphens and underscores only.",
The 'type' must be either 'pxar', 'img', 'conf' or 'log'.",
) )
.format(&ApiStringFormat::Pattern(&BACKUPSPEC_REGEX)) .format(&ApiStringFormat::Pattern(&BACKUPSPEC_REGEX))
.schema(); .schema();

View File

@ -56,7 +56,7 @@ pub struct UploadOptions {
} }
struct ChunkUploadResponse { struct ChunkUploadResponse {
future: h2::legacy::client::ResponseFuture, future: h2::client::ResponseFuture,
size: usize, size: usize,
} }
@ -143,7 +143,7 @@ impl BackupWriter {
param: Option<Value>, param: Option<Value>,
content_type: &str, content_type: &str,
data: Vec<u8>, data: Vec<u8>,
) -> Result<h2::legacy::client::ResponseFuture, Error> { ) -> Result<h2::client::ResponseFuture, Error> {
let request = let request =
H2Client::request_builder("localhost", method, path, param, Some(content_type)) H2Client::request_builder("localhost", method, path, param, Some(content_type))
.unwrap(); .unwrap();
@ -514,7 +514,7 @@ impl BackupWriter {
} }
fn response_queue() -> ( fn response_queue() -> (
mpsc::Sender<h2::legacy::client::ResponseFuture>, mpsc::Sender<h2::client::ResponseFuture>,
oneshot::Receiver<Result<(), Error>>, oneshot::Receiver<Result<(), Error>>,
) { ) {
let (verify_queue_tx, verify_queue_rx) = mpsc::channel(100); let (verify_queue_tx, verify_queue_rx) = mpsc::channel(100);
@ -537,7 +537,7 @@ impl BackupWriter {
tokio::spawn( tokio::spawn(
ReceiverStream::new(verify_queue_rx) ReceiverStream::new(verify_queue_rx)
.map(Ok::<_, Error>) .map(Ok::<_, Error>)
.try_for_each(move |response: h2::legacy::client::ResponseFuture| { .try_for_each(move |response: h2::client::ResponseFuture| {
response response
.map_err(Error::from) .map_err(Error::from)
.and_then(H2Client::h2api_response) .and_then(H2Client::h2api_response)

View File

@ -4,13 +4,11 @@ use std::time::Duration;
use anyhow::{bail, format_err, Error}; use anyhow::{bail, format_err, Error};
use futures::*; use futures::*;
#[cfg(not(target_feature = "crt-static"))]
use hyper::client::connect::dns::GaiResolver;
use hyper::client::{Client, HttpConnector}; use hyper::client::{Client, HttpConnector};
use hyper::http::header::HeaderValue; use hyper::http::header::HeaderValue;
use hyper::http::Uri; use hyper::http::Uri;
use hyper::http::{Request, Response}; use hyper::http::{Request, Response};
use hyper::{body::HttpBody, Body}; use hyper::Body;
use openssl::{ use openssl::{
ssl::{SslConnector, SslMethod}, ssl::{SslConnector, SslMethod},
x509::X509StoreContextRef, x509::X509StoreContextRef,
@ -35,74 +33,6 @@ use pbs_api_types::{Authid, RateLimitConfig, Userid};
use super::pipe_to_stream::PipeToSendStream; use super::pipe_to_stream::PipeToSendStream;
use super::PROXMOX_BACKUP_TCP_KEEPALIVE_TIME; use super::PROXMOX_BACKUP_TCP_KEEPALIVE_TIME;
#[cfg(not(target_feature = "crt-static"))]
type DnsResolver = GaiResolver;
#[cfg(target_feature = "crt-static")]
type DnsResolver = resolver::HickoryDnsResolver;
#[cfg(target_feature = "crt-static")]
mod resolver {
use std::net::SocketAddr;
use std::pin::Pin;
use std::sync::Arc;
use std::task::{Context, Poll};
use futures::Future;
use hickory_resolver::error::ResolveError;
use hickory_resolver::lookup_ip::LookupIpIntoIter;
use hickory_resolver::TokioAsyncResolver;
use hyper::client::connect::dns::Name;
use tower_service::Service;
pub(crate) struct SocketAddrIter {
inner: LookupIpIntoIter,
}
impl Iterator for SocketAddrIter {
type Item = SocketAddr;
fn next(&mut self) -> Option<Self::Item> {
self.inner.next().map(|ip_addr| SocketAddr::new(ip_addr, 0))
}
}
#[derive(Clone)]
pub(crate) struct HickoryDnsResolver {
inner: Arc<TokioAsyncResolver>,
}
impl HickoryDnsResolver {
pub(crate) fn new() -> Self {
Self {
inner: Arc::new(TokioAsyncResolver::tokio_from_system_conf().unwrap()),
}
}
}
impl Service<Name> for HickoryDnsResolver {
type Response = SocketAddrIter;
type Error = ResolveError;
type Future = Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>> + Send>>;
fn poll_ready(&mut self, _ctx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
Poll::Ready(Ok(()))
}
fn call(&mut self, name: Name) -> Self::Future {
let inner = self.inner.clone();
Box::pin(async move {
inner
.lookup_ip(name.as_str())
.await
.map(|r| SocketAddrIter {
inner: r.into_iter(),
})
})
}
}
}
/// Timeout used for several HTTP operations that are expected to finish quickly but may block in /// Timeout used for several HTTP operations that are expected to finish quickly but may block in
/// certain error conditions. Keep it generous, to avoid false-positive under high load. /// certain error conditions. Keep it generous, to avoid false-positive under high load.
const HTTP_TIMEOUT: Duration = Duration::from_secs(2 * 60); const HTTP_TIMEOUT: Duration = Duration::from_secs(2 * 60);
@ -204,7 +134,7 @@ impl Default for HttpClientOptions {
/// HTTP(S) API client /// HTTP(S) API client
pub struct HttpClient { pub struct HttpClient {
client: Client<HttpsConnector<DnsResolver>>, client: Client<HttpsConnector>,
server: String, server: String,
port: u16, port: u16,
fingerprint: Arc<Mutex<Option<String>>>, fingerprint: Arc<Mutex<Option<String>>>,
@ -435,8 +365,7 @@ impl HttpClient {
ssl_connector_builder.set_verify(openssl::ssl::SslVerifyMode::NONE); ssl_connector_builder.set_verify(openssl::ssl::SslVerifyMode::NONE);
} }
let resolver = DnsResolver::new(); let mut httpc = HttpConnector::new();
let mut httpc = HttpConnector::new_with_resolver(resolver);
httpc.set_nodelay(true); // important for h2 download performance! httpc.set_nodelay(true); // important for h2 download performance!
httpc.enforce_http(false); // we want https... httpc.enforce_http(false); // we want https...
@ -597,9 +526,7 @@ impl HttpClient {
_options: options, _options: options,
}) })
} }
}
impl HttpClient {
/// Login /// Login
/// ///
/// Login is done on demand, so this is only required if you need /// Login is done on demand, so this is only required if you need
@ -779,7 +706,8 @@ impl HttpClient {
.map(|_| Err(format_err!("unknown error"))) .map(|_| Err(format_err!("unknown error")))
.await? .await?
} else { } else {
futures::TryStreamExt::map_err(resp.into_body(), Error::from) resp.into_body()
.map_err(Error::from)
.try_fold(output, move |acc, chunk| async move { .try_fold(output, move |acc, chunk| async move {
acc.write_all(&chunk)?; acc.write_all(&chunk)?;
Ok::<_, Error>(acc) Ok::<_, Error>(acc)
@ -863,7 +791,7 @@ impl HttpClient {
let max_window_size = (1 << 31) - 2; let max_window_size = (1 << 31) - 2;
let (h2, connection) = h2::legacy::client::Builder::new() let (h2, connection) = h2::client::Builder::new()
.initial_connection_window_size(max_window_size) .initial_connection_window_size(max_window_size)
.initial_window_size(max_window_size) .initial_window_size(max_window_size)
.max_frame_size(4 * 1024 * 1024) .max_frame_size(4 * 1024 * 1024)
@ -887,7 +815,7 @@ impl HttpClient {
} }
async fn credentials( async fn credentials(
client: Client<HttpsConnector<DnsResolver>>, client: Client<HttpsConnector>,
server: String, server: String,
port: u16, port: u16,
username: Userid, username: Userid,
@ -916,7 +844,7 @@ impl HttpClient {
async fn api_response(response: Response<Body>) -> Result<Value, Error> { async fn api_response(response: Response<Body>) -> Result<Value, Error> {
let status = response.status(); let status = response.status();
let data = HttpBody::collect(response.into_body()).await?.to_bytes(); let data = hyper::body::to_bytes(response.into_body()).await?;
let text = String::from_utf8(data.to_vec()).unwrap(); let text = String::from_utf8(data.to_vec()).unwrap();
if status.is_success() { if status.is_success() {
@ -932,7 +860,7 @@ impl HttpClient {
} }
async fn api_request( async fn api_request(
client: Client<HttpsConnector<DnsResolver>>, client: Client<HttpsConnector>,
req: Request<Body>, req: Request<Body>,
) -> Result<Value, Error> { ) -> Result<Value, Error> {
Self::api_response( Self::api_response(
@ -1008,11 +936,11 @@ impl Drop for HttpClient {
#[derive(Clone)] #[derive(Clone)]
pub struct H2Client { pub struct H2Client {
h2: h2::legacy::client::SendRequest<bytes::Bytes>, h2: h2::client::SendRequest<bytes::Bytes>,
} }
impl H2Client { impl H2Client {
pub fn new(h2: h2::legacy::client::SendRequest<bytes::Bytes>) -> Self { pub fn new(h2: h2::client::SendRequest<bytes::Bytes>) -> Self {
Self { h2 } Self { h2 }
} }
@ -1092,7 +1020,7 @@ impl H2Client {
&self, &self,
request: Request<()>, request: Request<()>,
data: Option<bytes::Bytes>, data: Option<bytes::Bytes>,
) -> impl Future<Output = Result<h2::legacy::client::ResponseFuture, Error>> { ) -> impl Future<Output = Result<h2::client::ResponseFuture, Error>> {
self.h2 self.h2
.clone() .clone()
.ready() .ready()
@ -1109,9 +1037,7 @@ impl H2Client {
}) })
} }
pub async fn h2api_response( pub async fn h2api_response(response: Response<h2::RecvStream>) -> Result<Value, Error> {
response: Response<h2::legacy::RecvStream>,
) -> Result<Value, Error> {
let status = response.status(); let status = response.status();
let (_head, mut body) = response.into_parts(); let (_head, mut body) = response.into_parts();

View File

@ -8,7 +8,7 @@ use std::task::{Context, Poll};
use anyhow::{format_err, Error}; use anyhow::{format_err, Error};
use bytes::Bytes; use bytes::Bytes;
use futures::{ready, Future}; use futures::{ready, Future};
use h2::legacy::SendStream; use h2::SendStream;
pub struct PipeToSendStream { pub struct PipeToSendStream {
body_tx: SendStream<Bytes>, body_tx: SendStream<Bytes>,

View File

@ -345,8 +345,8 @@ pub(crate) unsafe fn set_test_default_master_pubkey(value: Result<Option<Vec<u8>
pub fn get_encryption_key_password() -> Result<Vec<u8>, Error> { pub fn get_encryption_key_password() -> Result<Vec<u8>, Error> {
// fixme: implement other input methods // fixme: implement other input methods
if let Some(password) = super::get_encryption_password()? { if let Some(password) = super::get_secret_from_env("PBS_ENCRYPTION_PASSWORD")? {
return Ok(password.into_bytes()); return Ok(password.as_bytes().to_vec());
} }
// If we're on a TTY, query the user for a password // If we're on a TTY, query the user for a password

View File

@ -28,21 +28,6 @@ pub mod key_source;
const ENV_VAR_PBS_FINGERPRINT: &str = "PBS_FINGERPRINT"; const ENV_VAR_PBS_FINGERPRINT: &str = "PBS_FINGERPRINT";
const ENV_VAR_PBS_PASSWORD: &str = "PBS_PASSWORD"; const ENV_VAR_PBS_PASSWORD: &str = "PBS_PASSWORD";
const ENV_VAR_PBS_ENCRYPTION_PASSWORD: &str = "PBS_ENCRYPTION_PASSWORD";
const ENV_VAR_PBS_REPOSITORY: &str = "PBS_REPOSITORY";
/// Directory with system [credential]s. See systemd-creds(1).
///
/// [credential]: https://systemd.io/CREDENTIALS/
const ENV_VAR_CREDENTIALS_DIRECTORY: &str = "CREDENTIALS_DIRECTORY";
/// Credential name of the encryption password.
const CRED_PBS_ENCRYPTION_PASSWORD: &str = "proxmox-backup-client.encryption-password";
/// Credential name of the the password.
const CRED_PBS_PASSWORD: &str = "proxmox-backup-client.password";
/// Credential name of the the repository.
const CRED_PBS_REPOSITORY: &str = "proxmox-backup-client.repository";
/// Credential name of the the fingerprint.
const CRED_PBS_FINGERPRINT: &str = "proxmox-backup-client.fingerprint";
pub const REPO_URL_SCHEMA: Schema = StringSchema::new("Repository URL.") pub const REPO_URL_SCHEMA: Schema = StringSchema::new("Repository URL.")
.format(&BACKUP_REPO_URL) .format(&BACKUP_REPO_URL)
@ -55,30 +40,6 @@ pub const CHUNK_SIZE_SCHEMA: Schema = IntegerSchema::new("Chunk size in KB. Must
.default(4096) .default(4096)
.schema(); .schema();
/// Retrieves a secret stored in a [credential] provided by systemd.
///
/// Returns `Ok(None)` if the credential does not exist.
///
/// [credential]: https://systemd.io/CREDENTIALS/
fn get_credential(cred_name: &str) -> std::io::Result<Option<Vec<u8>>> {
let Some(creds_dir) = std::env::var_os(ENV_VAR_CREDENTIALS_DIRECTORY) else {
return Ok(None);
};
let path = std::path::Path::new(&creds_dir).join(cred_name);
proxmox_log::debug!("attempting to use credential {cred_name} from {creds_dir:?}",);
// We read the whole contents without a BufRead. As per systemd-creds(1):
// Credentials are limited-size binary or textual objects.
match std::fs::read(&path) {
Ok(bytes) => Ok(Some(bytes)),
Err(err) if err.kind() == std::io::ErrorKind::NotFound => {
proxmox_log::debug!("no {cred_name} credential found in {creds_dir:?}");
Ok(None)
}
Err(err) => Err(err),
}
}
/// Helper to read a secret through a environment variable (ENV). /// Helper to read a secret through a environment variable (ENV).
/// ///
/// Tries the following variable names in order and returns the value /// Tries the following variable names in order and returns the value
@ -90,7 +51,7 @@ fn get_credential(cred_name: &str) -> std::io::Result<Option<Vec<u8>>> {
/// BASE_NAME_CMD => read the secret from specified command first line of output on stdout /// BASE_NAME_CMD => read the secret from specified command first line of output on stdout
/// ///
/// Only return the first line of data (without CRLF). /// Only return the first line of data (without CRLF).
fn get_secret_from_env(base_name: &str) -> Result<Option<String>, Error> { pub fn get_secret_from_env(base_name: &str) -> Result<Option<String>, Error> {
let firstline = |data: String| -> String { let firstline = |data: String| -> String {
match data.lines().next() { match data.lines().next() {
Some(line) => line.to_string(), Some(line) => line.to_string(),
@ -157,80 +118,8 @@ fn get_secret_from_env(base_name: &str) -> Result<Option<String>, Error> {
Ok(None) Ok(None)
} }
/// Gets a secret or value from the environment.
///
/// Checks for an environment variable named `env_variable`, and if missing, it
/// checks for a system [credential] named `credential_name`. Assumes the secret
/// is UTF-8 encoded.
///
/// [credential]: https://systemd.io/CREDENTIALS/
fn get_secret_impl(env_variable: &str, credential_name: &str) -> Result<Option<String>, Error> {
if let Some(password) = get_secret_from_env(env_variable)? {
Ok(Some(password))
} else if let Some(password) = get_credential(credential_name)? {
String::from_utf8(password)
.map(Option::Some)
.map_err(|_err| format_err!("credential {credential_name} is not utf8 encoded"))
} else {
Ok(None)
}
}
/// Gets the backup server's password.
///
/// Looks for a password in the `PBS_PASSWORD` environment variable, if there
/// isn't one it reads the `proxmox-backup-client.password` [credential].
///
/// Returns `Ok(None)` if neither the environment variable or credentials are
/// present.
///
/// [credential]: https://systemd.io/CREDENTIALS/
pub fn get_password() -> Result<Option<String>, Error> {
get_secret_impl(ENV_VAR_PBS_PASSWORD, CRED_PBS_PASSWORD)
}
/// Gets an encryption password.
///
///
/// Looks for a password in the `PBS_ENCRYPTION_PASSWORD` environment variable,
/// if there isn't one it reads the `proxmox-backup-client.encryption-password`
/// [credential].
///
/// Returns `Ok(None)` if neither the environment variable or credentials are
/// present.
///
/// [credential]: https://systemd.io/CREDENTIALS/
pub fn get_encryption_password() -> Result<Option<String>, Error> {
get_secret_impl(
ENV_VAR_PBS_ENCRYPTION_PASSWORD,
CRED_PBS_ENCRYPTION_PASSWORD,
)
}
pub fn get_default_repository() -> Option<String> { pub fn get_default_repository() -> Option<String> {
get_secret_impl(ENV_VAR_PBS_REPOSITORY, CRED_PBS_REPOSITORY) std::env::var("PBS_REPOSITORY").ok()
.inspect_err(|err| {
proxmox_log::error!("could not read default repository: {err:#}");
})
.unwrap_or_default()
}
/// Gets the repository fingerprint.
///
/// Looks for the fingerprint in the `PBS_FINGERPRINT` environment variable, if
/// there isn't one it reads the `proxmox-backup-client.fingerprint`
/// [credential].
///
/// Returns `None` if neither the environment variable or the credential are
/// present.
///
/// [credential]: https://systemd.io/CREDENTIALS/
pub fn get_fingerprint() -> Option<String> {
get_secret_impl(ENV_VAR_PBS_FINGERPRINT, CRED_PBS_FINGERPRINT)
.inspect_err(|err| {
proxmox_log::error!("could not read fingerprint: {err:#}");
})
.unwrap_or_default()
} }
pub fn remove_repository_from_value(param: &mut Value) -> Result<BackupRepository, Error> { pub fn remove_repository_from_value(param: &mut Value) -> Result<BackupRepository, Error> {
@ -290,9 +179,9 @@ fn connect_do(
auth_id: &Authid, auth_id: &Authid,
rate_limit: RateLimitConfig, rate_limit: RateLimitConfig,
) -> Result<HttpClient, Error> { ) -> Result<HttpClient, Error> {
let fingerprint = get_fingerprint(); let fingerprint = std::env::var(ENV_VAR_PBS_FINGERPRINT).ok();
let password = get_password()?; let password = get_secret_from_env(ENV_VAR_PBS_PASSWORD)?;
let options = HttpClientOptions::new_interactive(password, fingerprint).rate_limit(rate_limit); let options = HttpClientOptions::new_interactive(password, fingerprint).rate_limit(rate_limit);
HttpClient::new(server, port, auth_id, options) HttpClient::new(server, port, auth_id, options)
@ -300,8 +189,8 @@ fn connect_do(
/// like get, but simply ignore errors and return Null instead /// like get, but simply ignore errors and return Null instead
pub async fn try_get(repo: &BackupRepository, url: &str) -> Value { pub async fn try_get(repo: &BackupRepository, url: &str) -> Value {
let fingerprint = get_fingerprint(); let fingerprint = std::env::var(ENV_VAR_PBS_FINGERPRINT).ok();
let password = get_password().unwrap_or(None); let password = get_secret_from_env(ENV_VAR_PBS_PASSWORD).unwrap_or(None);
// ticket cache, but no questions asked // ticket cache, but no questions asked
let options = HttpClientOptions::new_interactive(password, fingerprint).interactive(false); let options = HttpClientOptions::new_interactive(password, fingerprint).interactive(false);

View File

@ -7,7 +7,7 @@ use hyper::client::connect::{Connected, Connection};
use hyper::client::Client; use hyper::client::Client;
use hyper::http::Uri; use hyper::http::Uri;
use hyper::http::{Request, Response}; use hyper::http::{Request, Response};
use hyper::{body::HttpBody, Body}; use hyper::Body;
use pin_project_lite::pin_project; use pin_project_lite::pin_project;
use serde_json::Value; use serde_json::Value;
use tokio::io::{AsyncRead, AsyncWrite, AsyncWriteExt, ReadBuf}; use tokio::io::{AsyncRead, AsyncWrite, AsyncWriteExt, ReadBuf};
@ -179,7 +179,8 @@ impl VsockClient {
if !status.is_success() { if !status.is_success() {
Self::api_response(resp).await.map(|_| ())? Self::api_response(resp).await.map(|_| ())?
} else { } else {
futures::TryStreamExt::map_err(resp.into_body(), Error::from) resp.into_body()
.map_err(Error::from)
.try_fold(output, move |acc, chunk| async move { .try_fold(output, move |acc, chunk| async move {
acc.write_all(&chunk).await?; acc.write_all(&chunk).await?;
Ok::<_, Error>(acc) Ok::<_, Error>(acc)
@ -191,7 +192,7 @@ impl VsockClient {
async fn api_response(response: Response<Body>) -> Result<Value, Error> { async fn api_response(response: Response<Body>) -> Result<Value, Error> {
let status = response.status(); let status = response.status();
let data = HttpBody::collect(response.into_body()).await?.to_bytes(); let data = hyper::body::to_bytes(response.into_body()).await?;
let text = String::from_utf8(data.to_vec()).unwrap(); let text = String::from_utf8(data.to_vec()).unwrap();
if status.is_success() { if status.is_success() {

View File

@ -24,7 +24,6 @@ proxmox-section-config.workspace = true
proxmox-shared-memory.workspace = true proxmox-shared-memory.workspace = true
proxmox-sys = { workspace = true, features = [ "acl", "crypt", "timer" ] } proxmox-sys = { workspace = true, features = [ "acl", "crypt", "timer" ] }
proxmox-time.workspace = true proxmox-time.workspace = true
proxmox-uuid.workspace = true
pbs-api-types.workspace = true pbs-api-types.workspace = true
pbs-buildcfg.workspace = true pbs-buildcfg.workspace = true

View File

@ -101,7 +101,7 @@ impl ConfigVersionCache {
let file_path = Path::new(FILE_PATH); let file_path = Path::new(FILE_PATH);
let dir_path = file_path.parent().unwrap(); let dir_path = file_path.parent().unwrap();
create_path(dir_path, Some(dir_opts), Some(dir_opts))?; create_path(dir_path, Some(dir_opts.clone()), Some(dir_opts))?;
let file_opts = CreateOptions::new() let file_opts = CreateOptions::new()
.perm(Mode::from_bits_truncate(0o660)) .perm(Mode::from_bits_truncate(0o660))

View File

@ -8,34 +8,17 @@ use proxmox_schema::{ApiType, ObjectSchema};
use proxmox_section_config::{SectionConfig, SectionConfigData, SectionConfigPlugin}; use proxmox_section_config::{SectionConfig, SectionConfigData, SectionConfigPlugin};
use crate::{open_backup_lockfile, replace_backup_config, BackupLockGuard}; use crate::{open_backup_lockfile, replace_backup_config, BackupLockGuard};
use pbs_api_types::{ use pbs_api_types::{AdRealmConfig, LdapRealmConfig, OpenIdRealmConfig, REALM_ID_SCHEMA};
AdRealmConfig, LdapRealmConfig, OpenIdRealmConfig, PamRealmConfig, PbsRealmConfig,
REALM_ID_SCHEMA,
};
pub static CONFIG: LazyLock<SectionConfig> = LazyLock::new(init); pub static CONFIG: LazyLock<SectionConfig> = LazyLock::new(init);
fn init() -> SectionConfig { fn init() -> SectionConfig {
const PAM_SCHEMA: &ObjectSchema = PamRealmConfig::API_SCHEMA.unwrap_object_schema();
const PBS_SCHEMA: &ObjectSchema = PbsRealmConfig::API_SCHEMA.unwrap_object_schema();
const AD_SCHEMA: &ObjectSchema = AdRealmConfig::API_SCHEMA.unwrap_object_schema(); const AD_SCHEMA: &ObjectSchema = AdRealmConfig::API_SCHEMA.unwrap_object_schema();
const LDAP_SCHEMA: &ObjectSchema = LdapRealmConfig::API_SCHEMA.unwrap_object_schema(); const LDAP_SCHEMA: &ObjectSchema = LdapRealmConfig::API_SCHEMA.unwrap_object_schema();
const OPENID_SCHEMA: &ObjectSchema = OpenIdRealmConfig::API_SCHEMA.unwrap_object_schema(); const OPENID_SCHEMA: &ObjectSchema = OpenIdRealmConfig::API_SCHEMA.unwrap_object_schema();
let mut config = SectionConfig::new(&REALM_ID_SCHEMA); let mut config = SectionConfig::new(&REALM_ID_SCHEMA);
config.register_plugin(SectionConfigPlugin::new(
"pam".to_owned(),
Some("realm".to_owned()),
PAM_SCHEMA,
));
config.register_plugin(SectionConfigPlugin::new(
"pbs".to_owned(),
Some("realm".to_owned()),
PBS_SCHEMA,
));
let plugin = SectionConfigPlugin::new( let plugin = SectionConfigPlugin::new(
"openid".to_string(), "openid".to_string(),
Some(String::from("realm")), Some(String::from("realm")),
@ -78,24 +61,9 @@ pub fn save_config(config: &SectionConfigData) -> Result<(), Error> {
replace_backup_config(DOMAINS_CFG_FILENAME, raw.as_bytes()) replace_backup_config(DOMAINS_CFG_FILENAME, raw.as_bytes())
} }
/// Unsets the default login realm for users by deleting the `default` property
/// from the respective realm.
///
/// This only updates the configuration as given in `config`, making it
/// permanent is left to the caller.
pub fn unset_default_realm(config: &mut SectionConfigData) -> Result<(), Error> {
for (_, data) in &mut config.sections.values_mut() {
if let Some(obj) = data.as_object_mut() {
obj.remove("default");
}
}
Ok(())
}
/// Check if a realm with the given name exists /// Check if a realm with the given name exists
pub fn exists(domains: &SectionConfigData, realm: &str) -> bool { pub fn exists(domains: &SectionConfigData, realm: &str) -> bool {
domains.sections.contains_key(realm) realm == "pbs" || realm == "pam" || domains.sections.contains_key(realm)
} }
// shell completion helper // shell completion helper

View File

@ -6,10 +6,10 @@
//! //!
//! Drive type [`VirtualTapeDrive`] is only useful for debugging. //! Drive type [`VirtualTapeDrive`] is only useful for debugging.
//! //!
//! [LtoTapeDrive]: pbs_api_types::LtoTapeDrive //! [LtoTapeDrive]: crate::api2::types::LtoTapeDrive
//! [VirtualTapeDrive]: pbs_api_types::VirtualTapeDrive //! [VirtualTapeDrive]: crate::api2::types::VirtualTapeDrive
//! [ScsiTapeChanger]: pbs_api_types::ScsiTapeChanger //! [ScsiTapeChanger]: crate::api2::types::ScsiTapeChanger
//! [SectionConfig]: proxmox_section_config::SectionConfig //! [SectionConfig]: proxmox::api::section_config::SectionConfig
use std::collections::HashMap; use std::collections::HashMap;
use std::sync::LazyLock; use std::sync::LazyLock;

View File

@ -22,8 +22,6 @@ pub use config_version_cache::ConfigVersionCache;
use anyhow::{format_err, Error}; use anyhow::{format_err, Error};
use nix::unistd::{Gid, Group, Uid, User}; use nix::unistd::{Gid, Group, Uid, User};
use proxmox_sys::fs::DirLockGuard;
use std::os::unix::prelude::AsRawFd;
pub use pbs_buildcfg::{BACKUP_GROUP_NAME, BACKUP_USER_NAME}; pub use pbs_buildcfg::{BACKUP_GROUP_NAME, BACKUP_USER_NAME};
@ -48,34 +46,13 @@ pub fn backup_group() -> Result<nix::unistd::Group, Error> {
} }
pub struct BackupLockGuard { pub struct BackupLockGuard {
file: Option<std::fs::File>, _file: Option<std::fs::File>,
// TODO: Remove `_legacy_dir` with PBS 5
_legacy_dir: Option<DirLockGuard>,
}
impl AsRawFd for BackupLockGuard {
fn as_raw_fd(&self) -> i32 {
self.file.as_ref().map_or(-1, |f| f.as_raw_fd())
}
}
// TODO: Remove with PBS 5
impl From<DirLockGuard> for BackupLockGuard {
fn from(value: DirLockGuard) -> Self {
Self {
file: None,
_legacy_dir: Some(value),
}
}
} }
#[doc(hidden)] #[doc(hidden)]
/// Note: do not use for production code, this is only intended for tests /// Note: do not use for production code, this is only intended for tests
pub unsafe fn create_mocked_lock() -> BackupLockGuard { pub unsafe fn create_mocked_lock() -> BackupLockGuard {
BackupLockGuard { BackupLockGuard { _file: None }
file: None,
_legacy_dir: None,
}
} }
/// Open or create a lock file owned by user "backup" and lock it. /// Open or create a lock file owned by user "backup" and lock it.
@ -99,10 +76,7 @@ pub fn open_backup_lockfile<P: AsRef<std::path::Path>>(
let timeout = timeout.unwrap_or(std::time::Duration::new(10, 0)); let timeout = timeout.unwrap_or(std::time::Duration::new(10, 0));
let file = proxmox_sys::fs::open_file_locked(&path, timeout, exclusive, options)?; let file = proxmox_sys::fs::open_file_locked(&path, timeout, exclusive, options)?;
Ok(BackupLockGuard { Ok(BackupLockGuard { _file: Some(file) })
file: Some(file),
_legacy_dir: None,
})
} }
/// Atomically write data to file owned by "root:backup" with permission "0640" /// Atomically write data to file owned by "root:backup" with permission "0640"

View File

@ -3,7 +3,7 @@
//! This configuration module is based on [`SectionConfig`], and //! This configuration module is based on [`SectionConfig`], and
//! provides a type safe interface to store [`MediaPoolConfig`], //! provides a type safe interface to store [`MediaPoolConfig`],
//! //!
//! [MediaPoolConfig]: pbs_api_types::MediaPoolConfig //! [MediaPoolConfig]: crate::api2::types::MediaPoolConfig
//! [SectionConfig]: proxmox_section_config::SectionConfig //! [SectionConfig]: proxmox_section_config::SectionConfig
use std::collections::HashMap; use std::collections::HashMap;

View File

@ -61,16 +61,8 @@ pub fn verify_secret(tokenid: &Authid, secret: &str) -> Result<(), Error> {
} }
} }
/// Generates a new secret for the given tokenid / API token, sets it then returns it.
/// The secret is stored as salted hash.
pub fn generate_and_set_secret(tokenid: &Authid) -> Result<String, Error> {
let secret = format!("{:x}", proxmox_uuid::Uuid::generate());
set_secret(tokenid, &secret)?;
Ok(secret)
}
/// Adds a new entry for the given tokenid / API token secret. The secret is stored as salted hash. /// Adds a new entry for the given tokenid / API token secret. The secret is stored as salted hash.
fn set_secret(tokenid: &Authid, secret: &str) -> Result<(), Error> { pub fn set_secret(tokenid: &Authid, secret: &str) -> Result<(), Error> {
if !tokenid.is_token() { if !tokenid.is_token() {
bail!("not an API token ID"); bail!("not an API token ID");
} }

View File

@ -35,7 +35,6 @@ proxmox-lang.workspace=true
proxmox-schema = { workspace = true, features = [ "api-macro" ] } proxmox-schema = { workspace = true, features = [ "api-macro" ] }
proxmox-serde = { workspace = true, features = [ "serde_json" ] } proxmox-serde = { workspace = true, features = [ "serde_json" ] }
proxmox-sys.workspace = true proxmox-sys.workspace = true
proxmox-systemd.workspace = true
proxmox-time.workspace = true proxmox-time.workspace = true
proxmox-uuid.workspace = true proxmox-uuid.workspace = true
proxmox-worker-task.workspace = true proxmox-worker-task.workspace = true

View File

@ -1,15 +1,11 @@
use std::fmt; use std::fmt;
use std::os::unix::io::{AsRawFd, RawFd}; use std::os::unix::io::RawFd;
use std::os::unix::prelude::OsStrExt;
use std::path::Path;
use std::path::PathBuf; use std::path::PathBuf;
use std::sync::{Arc, LazyLock}; use std::sync::Arc;
use std::time::Duration;
use anyhow::{bail, format_err, Context, Error}; use anyhow::{bail, format_err, Error};
use proxmox_sys::fs::{lock_dir_noblock, lock_dir_noblock_shared, replace_file, CreateOptions}; use proxmox_sys::fs::{lock_dir_noblock, replace_file, CreateOptions};
use proxmox_systemd::escape_unit;
use pbs_api_types::{ use pbs_api_types::{
Authid, BackupGroupDeleteStats, BackupNamespace, BackupType, GroupFilter, VerifyState, Authid, BackupGroupDeleteStats, BackupNamespace, BackupType, GroupFilter, VerifyState,
@ -20,18 +16,6 @@ use pbs_config::{open_backup_lockfile, BackupLockGuard};
use crate::manifest::{BackupManifest, MANIFEST_LOCK_NAME}; use crate::manifest::{BackupManifest, MANIFEST_LOCK_NAME};
use crate::{DataBlob, DataStore}; use crate::{DataBlob, DataStore};
pub const DATASTORE_LOCKS_DIR: &str = "/run/proxmox-backup/locks";
// TODO: Remove with PBS 5
// Note: The `expect()` call here will only happen if we can neither confirm nor deny the existence
// of the file. this should only happen if a user messes with the `/run/proxmox-backup` directory.
// if that happens, a lot more should fail as we rely on the existence of the directory throughout
// the code. so just panic with a reasonable message.
pub(crate) static OLD_LOCKING: LazyLock<bool> = LazyLock::new(|| {
std::fs::exists("/run/proxmox-backup/old-locking")
.expect("cannot read `/run/proxmox-backup`, please check permissions")
});
/// BackupGroup is a directory containing a list of BackupDir /// BackupGroup is a directory containing a list of BackupDir
#[derive(Clone)] #[derive(Clone)]
pub struct BackupGroup { pub struct BackupGroup {
@ -215,10 +199,9 @@ impl BackupGroup {
/// Returns `BackupGroupDeleteStats`, containing the number of deleted snapshots /// Returns `BackupGroupDeleteStats`, containing the number of deleted snapshots
/// and number of protected snaphsots, which therefore were not removed. /// and number of protected snaphsots, which therefore were not removed.
pub fn destroy(&self) -> Result<BackupGroupDeleteStats, Error> { pub fn destroy(&self) -> Result<BackupGroupDeleteStats, Error> {
let _guard = self
.lock()
.with_context(|| format!("while destroying group '{self:?}'"))?;
let path = self.full_group_path(); let path = self.full_group_path();
let _guard =
proxmox_sys::fs::lock_dir_noblock(&path, "backup group", "possible running backup")?;
log::info!("removing backup group {:?}", path); log::info!("removing backup group {:?}", path);
let mut delete_stats = BackupGroupDeleteStats::default(); let mut delete_stats = BackupGroupDeleteStats::default();
@ -232,34 +215,16 @@ impl BackupGroup {
delete_stats.increment_removed_snapshots(); delete_stats.increment_removed_snapshots();
} }
// Note: make sure the old locking mechanism isn't used as `remove_dir_all` is not safe in if delete_stats.all_removed() {
// that case std::fs::remove_dir_all(&path).map_err(|err| {
if delete_stats.all_removed() && !*OLD_LOCKING { format_err!("removing group directory {:?} failed - {}", path, err)
self.remove_group_dir()?; })?;
delete_stats.increment_removed_groups(); delete_stats.increment_removed_groups();
} }
Ok(delete_stats) Ok(delete_stats)
} }
/// Helper function, assumes that no more snapshots are present in the group.
fn remove_group_dir(&self) -> Result<(), Error> {
let owner_path = self.store.owner_path(&self.ns, &self.group);
std::fs::remove_file(&owner_path).map_err(|err| {
format_err!("removing the owner file '{owner_path:?}' failed - {err}")
})?;
let path = self.full_group_path();
std::fs::remove_dir(&path)
.map_err(|err| format_err!("removing group directory {path:?} failed - {err}"))?;
let _ = std::fs::remove_file(self.lock_path());
Ok(())
}
/// Returns the backup owner. /// Returns the backup owner.
/// ///
/// The backup owner is the entity who first created the backup group. /// The backup owner is the entity who first created the backup group.
@ -272,36 +237,6 @@ impl BackupGroup {
self.store self.store
.set_owner(&self.ns, self.as_ref(), auth_id, force) .set_owner(&self.ns, self.as_ref(), auth_id, force)
} }
/// Returns a file name for locking a group.
///
/// The lock file will be located in:
/// `${DATASTORE_LOCKS_DIR}/${datastore name}/${lock_file_path_helper(rpath)}`
/// where `rpath` is the relative path of the group.
fn lock_path(&self) -> PathBuf {
let path = Path::new(DATASTORE_LOCKS_DIR).join(self.store.name());
let rpath = Path::new(self.group.ty.as_str()).join(&self.group.id);
path.join(lock_file_path_helper(&self.ns, rpath))
}
/// Locks a group exclusively.
pub fn lock(&self) -> Result<BackupLockGuard, Error> {
if *OLD_LOCKING {
lock_dir_noblock(
&self.full_group_path(),
"backup group",
"possible runing backup, group is in use",
)
.map(BackupLockGuard::from)
} else {
lock_helper(self.store.name(), &self.lock_path(), |p| {
open_backup_lockfile(p, Some(Duration::from_secs(0)), true)
.with_context(|| format!("unable to acquire backup group lock {p:?}"))
})
}
}
} }
impl AsRef<pbs_api_types::BackupNamespace> for BackupGroup { impl AsRef<pbs_api_types::BackupNamespace> for BackupGroup {
@ -486,101 +421,36 @@ impl BackupDir {
/// Returns the filename to lock a manifest /// Returns the filename to lock a manifest
/// ///
/// Also creates the basedir. The lockfile is located in /// Also creates the basedir. The lockfile is located in
/// `${DATASTORE_LOCKS_DIR}/${datastore name}/${lock_file_path_helper(rpath)}.index.json.lck` /// '/run/proxmox-backup/locks/{datastore}/[ns/{ns}/]+{type}/{id}/{timestamp}.index.json.lck'
/// where rpath is the relative path of the snapshot. fn manifest_lock_path(&self) -> Result<PathBuf, Error> {
fn manifest_lock_path(&self) -> PathBuf { let mut path = PathBuf::from(&format!("/run/proxmox-backup/locks/{}", self.store.name()));
let path = Path::new(DATASTORE_LOCKS_DIR).join(self.store.name()); path.push(self.relative_path());
let rpath = Path::new(self.dir.group.ty.as_str()) std::fs::create_dir_all(&path)?;
.join(&self.dir.group.id) let ts = self.backup_time_string();
.join(&self.backup_time_string) path.push(format!("{ts}{MANIFEST_LOCK_NAME}"));
.join(MANIFEST_LOCK_NAME);
path.join(lock_file_path_helper(&self.ns, rpath)) Ok(path)
} }
/// Locks the manifest of a snapshot, for example, to update or delete it. /// Locks the manifest of a snapshot, for example, to update or delete it.
pub(crate) fn lock_manifest(&self) -> Result<BackupLockGuard, Error> { pub(crate) fn lock_manifest(&self) -> Result<BackupLockGuard, Error> {
let path = if *OLD_LOCKING { let path = self.manifest_lock_path()?;
// old manifest lock path
let path = Path::new(DATASTORE_LOCKS_DIR)
.join(self.store.name())
.join(self.relative_path());
std::fs::create_dir_all(&path)?; // actions locking the manifest should be relatively short, only wait a few seconds
open_backup_lockfile(&path, Some(std::time::Duration::from_secs(5)), true)
path.join(format!("{}{MANIFEST_LOCK_NAME}", self.backup_time_string())) .map_err(|err| format_err!("unable to acquire manifest lock {:?} - {}", &path, err))
} else {
self.manifest_lock_path()
};
lock_helper(self.store.name(), &path, |p| {
// update_manifest should never take a long time, so if
// someone else has the lock we can simply block a bit
// and should get it soon
open_backup_lockfile(p, Some(Duration::from_secs(5)), true)
.with_context(|| format_err!("unable to acquire manifest lock {p:?}"))
})
}
/// Returns a file name for locking a snapshot.
///
/// The lock file will be located in:
/// `${DATASTORE_LOCKS_DIR}/${datastore name}/${lock_file_path_helper(rpath)}`
/// where `rpath` is the relative path of the snapshot.
fn lock_path(&self) -> PathBuf {
let path = Path::new(DATASTORE_LOCKS_DIR).join(self.store.name());
let rpath = Path::new(self.dir.group.ty.as_str())
.join(&self.dir.group.id)
.join(&self.backup_time_string);
path.join(lock_file_path_helper(&self.ns, rpath))
}
/// Locks a snapshot exclusively.
pub fn lock(&self) -> Result<BackupLockGuard, Error> {
if *OLD_LOCKING {
lock_dir_noblock(
&self.full_path(),
"snapshot",
"backup is running or snapshot is in use",
)
.map(BackupLockGuard::from)
} else {
lock_helper(self.store.name(), &self.lock_path(), |p| {
open_backup_lockfile(p, Some(Duration::from_secs(0)), true)
.with_context(|| format!("unable to acquire snapshot lock {p:?}"))
})
}
}
/// Acquires a shared lock on a snapshot.
pub fn lock_shared(&self) -> Result<BackupLockGuard, Error> {
if *OLD_LOCKING {
lock_dir_noblock_shared(
&self.full_path(),
"snapshot",
"backup is running or snapshot is in use, could not acquire shared lock",
)
.map(BackupLockGuard::from)
} else {
lock_helper(self.store.name(), &self.lock_path(), |p| {
open_backup_lockfile(p, Some(Duration::from_secs(0)), false)
.with_context(|| format!("unable to acquire shared snapshot lock {p:?}"))
})
}
} }
/// Destroy the whole snapshot, bails if it's protected /// Destroy the whole snapshot, bails if it's protected
/// ///
/// Setting `force` to true skips locking and thus ignores if the backup is currently in use. /// Setting `force` to true skips locking and thus ignores if the backup is currently in use.
pub fn destroy(&self, force: bool) -> Result<(), Error> { pub fn destroy(&self, force: bool) -> Result<(), Error> {
let full_path = self.full_path();
let (_guard, _manifest_guard); let (_guard, _manifest_guard);
if !force { if !force {
_guard = self _guard = lock_dir_noblock(&full_path, "snapshot", "possibly running or in use")?;
.lock()
.with_context(|| format!("while destroying snapshot '{self:?}'"))?;
_manifest_guard = self.lock_manifest()?; _manifest_guard = self.lock_manifest()?;
} }
@ -588,37 +458,14 @@ impl BackupDir {
bail!("cannot remove protected snapshot"); // use special error type? bail!("cannot remove protected snapshot"); // use special error type?
} }
let full_path = self.full_path();
log::info!("removing backup snapshot {:?}", full_path); log::info!("removing backup snapshot {:?}", full_path);
std::fs::remove_dir_all(&full_path).map_err(|err| { std::fs::remove_dir_all(&full_path).map_err(|err| {
format_err!("removing backup snapshot {:?} failed - {}", full_path, err,) format_err!("removing backup snapshot {:?} failed - {}", full_path, err,)
})?; })?;
// remove no longer needed lock files // the manifest doesn't exist anymore, no need to keep the lock (already done by guard?)
let _ = std::fs::remove_file(self.manifest_lock_path()); // ignore errors if let Ok(path) = self.manifest_lock_path() {
let _ = std::fs::remove_file(self.lock_path()); // ignore errors let _ = std::fs::remove_file(path); // ignore errors
let group = BackupGroup::from(self);
let guard = group.lock().with_context(|| {
format!("while checking if group '{group:?}' is empty during snapshot destruction")
});
// Only remove the group if all of the following is true:
//
// - we can lock it: if we can't lock the group, it is still in use (either by another
// backup process or a parent caller (who needs to take care that empty groups are
// removed themselves).
// - it is now empty: if the group isn't empty, removing it will fail (to avoid removing
// backups that might still be used).
// - the new locking mechanism is used: if the old mechanism is used, a group removal here
// could lead to a race condition.
//
// Do not error out, as we have already removed the snapshot, there is nothing a user could
// do to rectify the situation.
if guard.is_ok() && group.list_backups()?.is_empty() && !*OLD_LOCKING {
group.remove_group_dir()?;
} else if let Err(err) = guard {
log::debug!("{err:#}");
} }
Ok(()) Ok(())
@ -814,75 +661,3 @@ fn list_backup_files<P: ?Sized + nix::NixPath>(
Ok(files) Ok(files)
} }
/// Creates a path to a lock file depending on the relative path of an object (snapshot, group,
/// manifest) in a datastore. First all namespaces will be concatenated with a colon (ns-folder).
/// Then the actual file name will depend on the length of the relative path without namespaces. If
/// it is shorter than 255 characters in its unit encoded form, than the unit encoded form will be
/// used directly. If not, the file name will consist of the first 80 character, the last 80
/// characters and the hash of the unit encoded relative path without namespaces. It will also be
/// placed into a "hashed" subfolder in the namespace folder.
///
/// Examples:
///
/// - vm-100
/// - vm-100-2022\x2d05\x2d02T08\x3a11\x3a33Z
/// - ns1:ns2:ns3:ns4:ns5:ns6:ns7/vm-100-2022\x2d05\x2d02T08\x3a11\x3a33Z
///
/// A "hashed" lock file would look like this:
/// - ns1:ns2:ns3/hashed/$first_eighty...$last_eighty-$hash
fn lock_file_path_helper(ns: &BackupNamespace, path: PathBuf) -> PathBuf {
let to_return = PathBuf::from(
ns.components()
.map(String::from)
.reduce(|acc, n| format!("{acc}:{n}"))
.unwrap_or_default(),
);
let path_bytes = path.as_os_str().as_bytes();
let enc = escape_unit(path_bytes, true);
if enc.len() < 255 {
return to_return.join(enc);
}
let to_return = to_return.join("hashed");
let first_eigthy = &enc[..80];
let last_eighty = &enc[enc.len() - 80..];
let hash = hex::encode(openssl::sha::sha256(path_bytes));
to_return.join(format!("{first_eigthy}...{last_eighty}-{hash}"))
}
/// Helps implement the double stat'ing procedure. It avoids certain race conditions upon lock
/// deletion.
///
/// It also creates the base directory for lock files.
fn lock_helper<F>(
store_name: &str,
path: &std::path::Path,
lock_fn: F,
) -> Result<BackupLockGuard, Error>
where
F: Fn(&std::path::Path) -> Result<BackupLockGuard, Error>,
{
let mut lock_dir = Path::new(DATASTORE_LOCKS_DIR).join(store_name);
if let Some(parent) = path.parent() {
lock_dir = lock_dir.join(parent);
};
std::fs::create_dir_all(&lock_dir)?;
let lock = lock_fn(path)?;
let inode = nix::sys::stat::fstat(lock.as_raw_fd())?.st_ino;
if nix::sys::stat::stat(path).map_or(true, |st| inode != st.st_ino) {
bail!("could not acquire lock, another thread modified the lock file");
}
Ok(lock)
}

View File

@ -1,11 +1,9 @@
use std::os::unix::fs::MetadataExt;
use std::os::unix::io::AsRawFd; use std::os::unix::io::AsRawFd;
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
use std::sync::{Arc, Mutex}; use std::sync::{Arc, Mutex};
use std::time::Duration;
use anyhow::{bail, format_err, Context, Error}; use anyhow::{bail, format_err, Error};
use tracing::{info, warn}; use tracing::info;
use pbs_api_types::{DatastoreFSyncLevel, GarbageCollectionStatus}; use pbs_api_types::{DatastoreFSyncLevel, GarbageCollectionStatus};
use proxmox_io::ReadExt; use proxmox_io::ReadExt;
@ -15,7 +13,6 @@ use proxmox_sys::process_locker::{
}; };
use proxmox_worker_task::WorkerTaskContext; use proxmox_worker_task::WorkerTaskContext;
use crate::data_blob::DataChunkBuilder;
use crate::file_formats::{ use crate::file_formats::{
COMPRESSED_BLOB_MAGIC_1_0, ENCRYPTED_BLOB_MAGIC_1_0, UNCOMPRESSED_BLOB_MAGIC_1_0, COMPRESSED_BLOB_MAGIC_1_0, ENCRYPTED_BLOB_MAGIC_1_0, UNCOMPRESSED_BLOB_MAGIC_1_0,
}; };
@ -112,7 +109,7 @@ impl ChunkStore {
let default_options = CreateOptions::new(); let default_options = CreateOptions::new();
match create_path(&base, Some(default_options), Some(options)) { match create_path(&base, Some(default_options), Some(options.clone())) {
Err(err) => bail!("unable to create chunk store '{name}' at {base:?} - {err}"), Err(err) => bail!("unable to create chunk store '{name}' at {base:?} - {err}"),
Ok(res) => { Ok(res) => {
if !res { if !res {
@ -121,13 +118,13 @@ impl ChunkStore {
} }
} }
if let Err(err) = create_dir(&chunk_dir, options) { if let Err(err) = create_dir(&chunk_dir, options.clone()) {
bail!("unable to create chunk store '{name}' subdir {chunk_dir:?} - {err}"); bail!("unable to create chunk store '{name}' subdir {chunk_dir:?} - {err}");
} }
// create lock file with correct owner/group // create lock file with correct owner/group
let lockfile_path = Self::lockfile_path(&base); let lockfile_path = Self::lockfile_path(&base);
proxmox_sys::fs::replace_file(lockfile_path, b"", options, false)?; proxmox_sys::fs::replace_file(lockfile_path, b"", options.clone(), false)?;
// create 64*1024 subdirs // create 64*1024 subdirs
let mut last_percentage = 0; let mut last_percentage = 0;
@ -135,7 +132,7 @@ impl ChunkStore {
for i in 0..64 * 1024 { for i in 0..64 * 1024 {
let mut l1path = chunk_dir.clone(); let mut l1path = chunk_dir.clone();
l1path.push(format!("{:04x}", i)); l1path.push(format!("{:04x}", i));
if let Err(err) = create_dir(&l1path, options) { if let Err(err) = create_dir(&l1path, options.clone()) {
bail!( bail!(
"unable to create chunk store '{}' subdir {:?} - {}", "unable to create chunk store '{}' subdir {:?} - {}",
name, name,
@ -180,7 +177,7 @@ impl ChunkStore {
/// Note that this must be used with care, as it's dangerous to create two instances on the /// Note that this must be used with care, as it's dangerous to create two instances on the
/// same base path, as closing the underlying ProcessLocker drops all locks from this process /// same base path, as closing the underlying ProcessLocker drops all locks from this process
/// on the lockfile (even if separate FDs) /// on the lockfile (even if separate FDs)
pub fn open<P: Into<PathBuf>>( pub(crate) fn open<P: Into<PathBuf>>(
name: &str, name: &str,
base: P, base: P,
sync_level: DatastoreFSyncLevel, sync_level: DatastoreFSyncLevel,
@ -356,7 +353,7 @@ impl ChunkStore {
pub fn sweep_unused_chunks( pub fn sweep_unused_chunks(
&self, &self,
oldest_writer: i64, oldest_writer: i64,
min_atime: i64, phase1_start_time: i64,
status: &mut GarbageCollectionStatus, status: &mut GarbageCollectionStatus,
worker: &dyn WorkerTaskContext, worker: &dyn WorkerTaskContext,
) -> Result<(), Error> { ) -> Result<(), Error> {
@ -366,6 +363,14 @@ impl ChunkStore {
use nix::sys::stat::fstatat; use nix::sys::stat::fstatat;
use nix::unistd::{unlinkat, UnlinkatFlags}; use nix::unistd::{unlinkat, UnlinkatFlags};
let mut min_atime = phase1_start_time - 3600 * 24; // at least 24h (see mount option relatime)
if oldest_writer < min_atime {
min_atime = oldest_writer;
}
min_atime -= 300; // add 5 mins gap for safety
let mut last_percentage = 0; let mut last_percentage = 0;
let mut chunk_count = 0; let mut chunk_count = 0;
@ -437,69 +442,6 @@ impl ChunkStore {
Ok(()) Ok(())
} }
/// Check if atime updates are honored by the filesystem backing the chunk store.
///
/// Checks if the atime is always updated by utimensat taking into consideration the Linux
/// kernel timestamp granularity.
/// If `retry_on_file_changed` is set to true, the check is performed again on the changed file
/// if a file change while testing is detected by differences in bith time or inode number.
/// Uses a 4 MiB fixed size, compressed but unencrypted chunk to test. The chunk is inserted in
/// the chunk store if not yet present.
/// Returns with error if the check could not be performed.
pub fn check_fs_atime_updates(&self, retry_on_file_changed: bool) -> Result<(), Error> {
let (zero_chunk, digest) = DataChunkBuilder::build_zero_chunk(None, 4096 * 1024, true)?;
let (pre_existing, _) = self.insert_chunk(&zero_chunk, &digest)?;
let (path, _digest) = self.chunk_path(&digest);
// Take into account timestamp update granularity in the kernel
// Blocking the thread is fine here since this runs in a worker.
std::thread::sleep(Duration::from_secs(1));
let metadata_before = std::fs::metadata(&path).context(format!(
"failed to get metadata for {path:?} before atime update"
))?;
// Second atime update if chunk pre-existed, insert_chunk already updates pre-existing ones
self.cond_touch_path(&path, true)?;
let metadata_now = std::fs::metadata(&path).context(format!(
"failed to get metadata for {path:?} after atime update"
))?;
// Check for the unlikely case that the file changed in-between the
// two metadata calls, try to check once again on changed file
if metadata_before.ino() != metadata_now.ino() {
if retry_on_file_changed {
return self.check_fs_atime_updates(false);
}
bail!("chunk {path:?} changed twice during access time safety check, cannot proceed.");
}
if metadata_before.accessed()? >= metadata_now.accessed()? {
let chunk_info_str = if pre_existing {
"pre-existing"
} else {
"newly inserted"
};
warn!("Chunk metadata was not correctly updated during access time safety check:");
info!(
"Timestamps before update: accessed {:?}, modified {:?}, created {:?}",
metadata_before.accessed().ok(),
metadata_before.modified().ok(),
metadata_before.created().ok(),
);
info!(
"Timestamps after update: accessed {:?}, modified {:?}, created {:?}",
metadata_now.accessed().ok(),
metadata_now.modified().ok(),
metadata_now.created().ok(),
);
bail!("access time safety check using {chunk_info_str} chunk failed, aborting GC!");
}
Ok(())
}
pub fn insert_chunk(&self, chunk: &DataBlob, digest: &[u8; 32]) -> Result<(bool, u64), Error> { pub fn insert_chunk(&self, chunk: &DataBlob, digest: &[u8; 32]) -> Result<(bool, u64), Error> {
// unwrap: only `None` in unit tests // unwrap: only `None` in unit tests
assert!(self.locker.is_some()); assert!(self.locker.is_some());
@ -561,16 +503,10 @@ impl ChunkStore {
.parent() .parent()
.ok_or_else(|| format_err!("unable to get chunk dir"))?; .ok_or_else(|| format_err!("unable to get chunk dir"))?;
let mut create_options = CreateOptions::new();
if nix::unistd::Uid::effective().is_root() {
let uid = pbs_config::backup_user()?.uid;
let gid = pbs_config::backup_group()?.gid;
create_options = create_options.owner(uid).group(gid);
}
proxmox_sys::fs::replace_file( proxmox_sys::fs::replace_file(
&chunk_path, &chunk_path,
raw_data, raw_data,
create_options, CreateOptions::new(),
self.sync_level == DatastoreFSyncLevel::File, self.sync_level == DatastoreFSyncLevel::File,
) )
.map_err(|err| { .map_err(|err| {

View File

@ -4,11 +4,9 @@ use std::os::unix::ffi::OsStrExt;
use std::os::unix::io::AsRawFd; use std::os::unix::io::AsRawFd;
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
use std::sync::{Arc, LazyLock, Mutex}; use std::sync::{Arc, LazyLock, Mutex};
use std::time::Duration;
use anyhow::{bail, format_err, Context, Error}; use anyhow::{bail, format_err, Error};
use nix::unistd::{unlinkat, UnlinkatFlags}; use nix::unistd::{unlinkat, UnlinkatFlags};
use pbs_tools::lru_cache::LruCache;
use tracing::{info, warn}; use tracing::{info, warn};
use proxmox_human_byte::HumanByte; use proxmox_human_byte::HumanByte;
@ -16,9 +14,9 @@ use proxmox_schema::ApiType;
use proxmox_sys::error::SysError; use proxmox_sys::error::SysError;
use proxmox_sys::fs::{file_read_optional_string, replace_file, CreateOptions}; use proxmox_sys::fs::{file_read_optional_string, replace_file, CreateOptions};
use proxmox_sys::fs::{lock_dir_noblock, DirLockGuard};
use proxmox_sys::linux::procfs::MountInfo; use proxmox_sys::linux::procfs::MountInfo;
use proxmox_sys::process_locker::ProcessLockSharedGuard; use proxmox_sys::process_locker::ProcessLockSharedGuard;
use proxmox_time::TimeSpan;
use proxmox_worker_task::WorkerTaskContext; use proxmox_worker_task::WorkerTaskContext;
use pbs_api_types::{ use pbs_api_types::{
@ -26,9 +24,8 @@ use pbs_api_types::{
DataStoreConfig, DatastoreFSyncLevel, DatastoreTuning, GarbageCollectionStatus, DataStoreConfig, DatastoreFSyncLevel, DatastoreTuning, GarbageCollectionStatus,
MaintenanceMode, MaintenanceType, Operation, UPID, MaintenanceMode, MaintenanceType, Operation, UPID,
}; };
use pbs_config::BackupLockGuard;
use crate::backup_info::{BackupDir, BackupGroup, BackupInfo, OLD_LOCKING}; use crate::backup_info::{BackupDir, BackupGroup};
use crate::chunk_store::ChunkStore; use crate::chunk_store::ChunkStore;
use crate::dynamic_index::{DynamicIndexReader, DynamicIndexWriter}; use crate::dynamic_index::{DynamicIndexReader, DynamicIndexWriter};
use crate::fixed_index::{FixedIndexReader, FixedIndexWriter}; use crate::fixed_index::{FixedIndexReader, FixedIndexWriter};
@ -709,11 +706,7 @@ impl DataStore {
} }
/// Return the path of the 'owner' file. /// Return the path of the 'owner' file.
pub(super) fn owner_path( fn owner_path(&self, ns: &BackupNamespace, group: &pbs_api_types::BackupGroup) -> PathBuf {
&self,
ns: &BackupNamespace,
group: &pbs_api_types::BackupGroup,
) -> PathBuf {
self.group_path(ns, group).join("owner") self.group_path(ns, group).join("owner")
} }
@ -781,35 +774,41 @@ impl DataStore {
/// ///
/// This also acquires an exclusive lock on the directory and returns the lock guard. /// This also acquires an exclusive lock on the directory and returns the lock guard.
pub fn create_locked_backup_group( pub fn create_locked_backup_group(
self: &Arc<Self>, &self,
ns: &BackupNamespace, ns: &BackupNamespace,
backup_group: &pbs_api_types::BackupGroup, backup_group: &pbs_api_types::BackupGroup,
auth_id: &Authid, auth_id: &Authid,
) -> Result<(Authid, BackupLockGuard), Error> { ) -> Result<(Authid, DirLockGuard), Error> {
let backup_group = self.backup_group(ns.clone(), backup_group.clone()); // create intermediate path first:
let mut full_path = self.base_path();
for ns in ns.components() {
full_path.push("ns");
full_path.push(ns);
}
full_path.push(backup_group.ty.as_str());
std::fs::create_dir_all(&full_path)?;
// create intermediate path first full_path.push(&backup_group.id);
let full_path = backup_group.full_group_path();
std::fs::create_dir_all(full_path.parent().ok_or_else(|| { // create the last component now
format_err!("could not construct parent path for group {backup_group:?}")
})?)?;
// now create the group, this allows us to check whether it existed before
match std::fs::create_dir(&full_path) { match std::fs::create_dir(&full_path) {
Ok(_) => { Ok(_) => {
let guard = backup_group.lock().with_context(|| { let guard = lock_dir_noblock(
format!("while creating new locked backup group '{backup_group:?}'") &full_path,
})?; "backup group",
self.set_owner(ns, backup_group.group(), auth_id, false)?; "another backup is already running",
let owner = self.get_owner(ns, backup_group.group())?; // just to be sure )?;
self.set_owner(ns, backup_group, auth_id, false)?;
let owner = self.get_owner(ns, backup_group)?; // just to be sure
Ok((owner, guard)) Ok((owner, guard))
} }
Err(ref err) if err.kind() == io::ErrorKind::AlreadyExists => { Err(ref err) if err.kind() == io::ErrorKind::AlreadyExists => {
let guard = backup_group.lock().with_context(|| { let guard = lock_dir_noblock(
format!("while creating locked backup group '{backup_group:?}'") &full_path,
})?; "backup group",
let owner = self.get_owner(ns, backup_group.group())?; // just to be sure "another backup is already running",
)?;
let owner = self.get_owner(ns, backup_group)?; // just to be sure
Ok((owner, guard)) Ok((owner, guard))
} }
Err(err) => bail!("unable to create backup group {:?} - {}", full_path, err), Err(err) => bail!("unable to create backup group {:?} - {}", full_path, err),
@ -820,25 +819,29 @@ impl DataStore {
/// ///
/// The BackupGroup directory needs to exist. /// The BackupGroup directory needs to exist.
pub fn create_locked_backup_dir( pub fn create_locked_backup_dir(
self: &Arc<Self>, &self,
ns: &BackupNamespace, ns: &BackupNamespace,
backup_dir: &pbs_api_types::BackupDir, backup_dir: &pbs_api_types::BackupDir,
) -> Result<(PathBuf, bool, BackupLockGuard), Error> { ) -> Result<(PathBuf, bool, DirLockGuard), Error> {
let backup_dir = self.backup_dir(ns.clone(), backup_dir.clone())?; let full_path = self.snapshot_path(ns, backup_dir);
let relative_path = backup_dir.relative_path(); let relative_path = full_path.strip_prefix(self.base_path()).map_err(|err| {
format_err!(
"failed to produce correct path for backup {backup_dir} in namespace {ns}: {err}"
)
})?;
match std::fs::create_dir(backup_dir.full_path()) { let lock = || {
Ok(_) => { lock_dir_noblock(
let guard = backup_dir.lock().with_context(|| { &full_path,
format!("while creating new locked snapshot '{backup_dir:?}'") "snapshot",
})?; "internal error - tried creating snapshot that's already in use",
Ok((relative_path, true, guard)) )
} };
match std::fs::create_dir(&full_path) {
Ok(_) => Ok((relative_path.to_owned(), true, lock()?)),
Err(ref e) if e.kind() == io::ErrorKind::AlreadyExists => { Err(ref e) if e.kind() == io::ErrorKind::AlreadyExists => {
let guard = backup_dir Ok((relative_path.to_owned(), false, lock()?))
.lock()
.with_context(|| format!("while creating locked snapshot '{backup_dir:?}'"))?;
Ok((relative_path, false, guard))
} }
Err(e) => Err(e.into()), Err(e) => Err(e.into()),
} }
@ -967,15 +970,10 @@ impl DataStore {
ListGroups::new(Arc::clone(self), ns)?.collect() ListGroups::new(Arc::clone(self), ns)?.collect()
} }
/// Lookup all index files to be found in the datastore without taking any logical iteration pub fn list_images(&self) -> Result<Vec<PathBuf>, Error> {
/// into account.
/// The filesystem is walked recursevly to detect index files based on their archive type based
/// on the filename. This however excludes the chunks folder, hidden files and does not follow
/// symlinks.
fn list_index_files(&self) -> Result<HashSet<PathBuf>, Error> {
let base = self.base_path(); let base = self.base_path();
let mut list = HashSet::new(); let mut list = vec![];
use walkdir::WalkDir; use walkdir::WalkDir;
@ -1023,7 +1021,7 @@ impl DataStore {
if archive_type == ArchiveType::FixedIndex if archive_type == ArchiveType::FixedIndex
|| archive_type == ArchiveType::DynamicIndex || archive_type == ArchiveType::DynamicIndex
{ {
list.insert(path); list.push(path);
} }
} }
} }
@ -1031,51 +1029,11 @@ impl DataStore {
Ok(list) Ok(list)
} }
// Similar to open index, but return with Ok(None) if index file vanished.
fn open_index_reader(
&self,
absolute_path: &Path,
) -> Result<Option<Box<dyn IndexFile>>, Error> {
let archive_type = match ArchiveType::from_path(absolute_path) {
// ignore archives with unknown archive type
Ok(ArchiveType::Blob) | Err(_) => bail!("unexpected archive type"),
Ok(archive_type) => archive_type,
};
if absolute_path.is_relative() {
bail!("expected absolute path, got '{absolute_path:?}'");
}
let file = match std::fs::File::open(absolute_path) {
Ok(file) => file,
// ignore vanished files
Err(err) if err.kind() == io::ErrorKind::NotFound => return Ok(None),
Err(err) => {
return Err(Error::from(err).context(format!("can't open file '{absolute_path:?}'")))
}
};
match archive_type {
ArchiveType::FixedIndex => {
let reader = FixedIndexReader::new(file)
.with_context(|| format!("can't open fixed index '{absolute_path:?}'"))?;
Ok(Some(Box::new(reader)))
}
ArchiveType::DynamicIndex => {
let reader = DynamicIndexReader::new(file)
.with_context(|| format!("can't open dynamic index '{absolute_path:?}'"))?;
Ok(Some(Box::new(reader)))
}
ArchiveType::Blob => bail!("unexpected archive type blob"),
}
}
// mark chunks used by ``index`` as used // mark chunks used by ``index`` as used
fn index_mark_used_chunks( fn index_mark_used_chunks<I: IndexFile>(
&self, &self,
index: Box<dyn IndexFile>, index: I,
file_name: &Path, // only used for error reporting file_name: &Path, // only used for error reporting
chunk_lru_cache: &mut LruCache<[u8; 32], ()>,
status: &mut GarbageCollectionStatus, status: &mut GarbageCollectionStatus,
worker: &dyn WorkerTaskContext, worker: &dyn WorkerTaskContext,
) -> Result<(), Error> { ) -> Result<(), Error> {
@ -1086,12 +1044,6 @@ impl DataStore {
worker.check_abort()?; worker.check_abort()?;
worker.fail_on_shutdown()?; worker.fail_on_shutdown()?;
let digest = index.index_digest(pos).unwrap(); let digest = index.index_digest(pos).unwrap();
// Avoid multiple expensive atime updates by utimensat
if chunk_lru_cache.insert(*digest, ()) {
continue;
}
if !self.inner.chunk_store.cond_touch_chunk(digest, false)? { if !self.inner.chunk_store.cond_touch_chunk(digest, false)? {
let hex = hex::encode(digest); let hex = hex::encode(digest);
warn!( warn!(
@ -1117,135 +1069,61 @@ impl DataStore {
&self, &self,
status: &mut GarbageCollectionStatus, status: &mut GarbageCollectionStatus,
worker: &dyn WorkerTaskContext, worker: &dyn WorkerTaskContext,
cache_capacity: usize,
) -> Result<(), Error> { ) -> Result<(), Error> {
// Iterate twice over the datastore to fetch index files, even if this comes with an let image_list = self.list_images()?;
// additional runtime cost: let image_count = image_list.len();
// - First iteration to find all index files, no matter if they are in a location expected
// by the datastore's hierarchy
// - Iterate using the datastore's helpers, so the namespaces, groups and snapshots are
// looked up given the expected hierarchy and iterator logic
//
// By this it is assured that all index files are used, even if they would not have been
// seen by the regular logic and the user is informed by the garbage collection run about
// the detected index files not following the iterators logic.
let mut unprocessed_index_list = self.list_index_files()?;
let mut index_count = unprocessed_index_list.len();
let mut chunk_lru_cache = LruCache::new(cache_capacity);
let mut processed_index_files = 0;
let mut last_percentage: usize = 0; let mut last_percentage: usize = 0;
let arc_self = Arc::new(self.clone()); let mut strange_paths_count: u64 = 0;
for namespace in arc_self
.recursive_iter_backup_ns(BackupNamespace::root())
.context("creating namespace iterator failed")?
{
let namespace = namespace.context("iterating namespaces failed")?;
for group in arc_self.iter_backup_groups(namespace)? {
let group = group.context("iterating backup groups failed")?;
// Avoid race between listing/marking of snapshots by GC and pruning the last for (i, img) in image_list.into_iter().enumerate() {
// snapshot in the group, following a new snapshot creation. Otherwise known chunks worker.check_abort()?;
// might only be referenced by the new snapshot, so it must be read as well. worker.fail_on_shutdown()?;
let mut retry_counter = 0;
'retry: loop {
let _lock = match retry_counter {
0..=9 => None,
10 => Some(
group
.lock()
.context("exhausted retries and failed to lock group")?,
),
_ => bail!("exhausted retries and unexpected counter overrun"),
};
let mut snapshots = match group.list_backups() { if let Some(backup_dir_path) = img.parent() {
Ok(snapshots) => snapshots, let backup_dir_path = backup_dir_path.strip_prefix(self.base_path())?;
Err(err) => { if let Some(backup_dir_str) = backup_dir_path.to_str() {
if group.exists() { if pbs_api_types::parse_ns_and_snapshot(backup_dir_str).is_err() {
return Err(err).context("listing snapshots failed")?; strange_paths_count += 1;
} }
break 'retry; }
} }
};
// Always start iteration with the last snapshot of the group to reduce race match std::fs::File::open(&img) {
// window with concurrent backup+prune previous last snapshot. Allows to retry Ok(file) => {
// without the need to keep track of already processed index files for the if let Ok(archive_type) = ArchiveType::from_path(&img) {
// current group. if archive_type == ArchiveType::FixedIndex {
BackupInfo::sort_list(&mut snapshots, true); let index = FixedIndexReader::new(file).map_err(|e| {
for (count, snapshot) in snapshots.into_iter().rev().enumerate() { format_err!("can't read index '{}' - {}", img.to_string_lossy(), e)
for file in snapshot.files { })?;
worker.check_abort()?; self.index_mark_used_chunks(index, &img, status, worker)?;
worker.fail_on_shutdown()?; } else if archive_type == ArchiveType::DynamicIndex {
let index = DynamicIndexReader::new(file).map_err(|e| {
match ArchiveType::from_path(&file) { format_err!("can't read index '{}' - {}", img.to_string_lossy(), e)
Ok(ArchiveType::FixedIndex) | Ok(ArchiveType::DynamicIndex) => (), })?;
Ok(ArchiveType::Blob) | Err(_) => continue, self.index_mark_used_chunks(index, &img, status, worker)?;
}
let mut path = snapshot.backup_dir.full_path();
path.push(file);
let index = match self.open_index_reader(&path)? {
Some(index) => index,
None => {
unprocessed_index_list.remove(&path);
if count == 0 {
retry_counter += 1;
continue 'retry;
}
continue;
}
};
self.index_mark_used_chunks(
index,
&path,
&mut chunk_lru_cache,
status,
worker,
)?;
if !unprocessed_index_list.remove(&path) {
info!("Encountered new index file '{path:?}', increment total index file count");
index_count += 1;
}
let percentage = (processed_index_files + 1) * 100 / index_count;
if percentage > last_percentage {
info!(
"marked {percentage}% ({} of {index_count} index files)",
processed_index_files + 1,
);
last_percentage = percentage;
}
processed_index_files += 1;
} }
} }
break;
} }
Err(err) if err.kind() == io::ErrorKind::NotFound => (), // ignore vanished files
Err(err) => bail!("can't open index {} - {}", img.to_string_lossy(), err),
}
let percentage = (i + 1) * 100 / image_count;
if percentage > last_percentage {
info!(
"marked {percentage}% ({} of {image_count} index files)",
i + 1,
);
last_percentage = percentage;
} }
} }
let mut strange_paths_count = unprocessed_index_list.len();
for path in unprocessed_index_list {
let index = match self.open_index_reader(&path)? {
Some(index) => index,
None => {
// do not count vanished (pruned) backup snapshots as strange paths.
strange_paths_count -= 1;
continue;
}
};
self.index_mark_used_chunks(index, &path, &mut chunk_lru_cache, status, worker)?;
warn!("Marked chunks for unexpected index file at '{path:?}'");
}
if strange_paths_count > 0 { if strange_paths_count > 0 {
warn!("Found {strange_paths_count} index files outside of expected directory scheme"); info!(
"found (and marked) {strange_paths_count} index files outside of expected directory scheme"
);
} }
Ok(()) Ok(())
@ -1292,62 +1170,15 @@ impl DataStore {
upid: Some(upid.to_string()), upid: Some(upid.to_string()),
..Default::default() ..Default::default()
}; };
let tuning: DatastoreTuning = serde_json::from_value(
DatastoreTuning::API_SCHEMA
.parse_property_string(gc_store_config.tuning.as_deref().unwrap_or(""))?,
)?;
if tuning.gc_atime_safety_check.unwrap_or(true) {
self.inner
.chunk_store
.check_fs_atime_updates(true)
.context("atime safety check failed")?;
info!("Access time update check successful, proceeding with GC.");
} else {
info!("Access time update check disabled by datastore tuning options.");
};
// Fallback to default 24h 5m if not set
let cutoff = tuning
.gc_atime_cutoff
.map(|cutoff| cutoff * 60)
.unwrap_or(3600 * 24 + 300);
let mut min_atime = phase1_start_time - cutoff as i64;
info!(
"Using access time cutoff {}, minimum access time is {}",
TimeSpan::from(Duration::from_secs(cutoff as u64)),
proxmox_time::epoch_to_rfc3339_utc(min_atime)?,
);
if oldest_writer < min_atime {
min_atime = oldest_writer - 300; // account for 5 min safety gap
info!(
"Oldest backup writer started at {}, extending minimum access time to {}",
TimeSpan::from(Duration::from_secs(oldest_writer as u64)),
proxmox_time::epoch_to_rfc3339_utc(min_atime)?,
);
}
let tuning: DatastoreTuning = serde_json::from_value(
DatastoreTuning::API_SCHEMA
.parse_property_string(gc_store_config.tuning.as_deref().unwrap_or(""))?,
)?;
let gc_cache_capacity = if let Some(capacity) = tuning.gc_cache_capacity {
info!("Using chunk digest cache capacity of {capacity}.");
capacity
} else {
1024 * 1024
};
info!("Start GC phase1 (mark used chunks)"); info!("Start GC phase1 (mark used chunks)");
self.mark_used_chunks(&mut gc_status, worker, gc_cache_capacity) self.mark_used_chunks(&mut gc_status, worker)?;
.context("marking used chunks failed")?;
info!("Start GC phase2 (sweep unused chunks)"); info!("Start GC phase2 (sweep unused chunks)");
self.inner.chunk_store.sweep_unused_chunks( self.inner.chunk_store.sweep_unused_chunks(
oldest_writer, oldest_writer,
min_atime, phase1_start_time,
&mut gc_status, &mut gc_status,
worker, worker,
)?; )?;
@ -1474,9 +1305,7 @@ impl DataStore {
bail!("snapshot {} does not exist!", backup_dir.dir()); bail!("snapshot {} does not exist!", backup_dir.dir());
} }
let _guard = backup_dir.lock().with_context(|| { let _guard = lock_dir_noblock(&full_path, "snapshot", "possibly running or in use")?;
format!("while updating the protection status of snapshot '{backup_dir:?}'")
})?;
let protected_path = backup_dir.protected_file(); let protected_path = backup_dir.protected_file();
if protection { if protection {
@ -1733,8 +1562,4 @@ impl DataStore {
Ok(()) Ok(())
} }
pub fn old_locking(&self) -> bool {
*OLD_LOCKING
}
} }

View File

@ -1,14 +1,13 @@
use std::fs::File; use std::fs::File;
use std::os::unix::io::{AsRawFd, FromRawFd}; use std::os::unix::io::{AsRawFd, FromRawFd};
use std::path::Path; use std::path::Path;
use std::rc::Rc;
use std::sync::Arc; use std::sync::Arc;
use std::rc::Rc;
use anyhow::{bail, Context, Error}; use anyhow::{bail, Error};
use nix::dir::Dir; use nix::dir::Dir;
use nix::fcntl::OFlag;
use nix::sys::stat::Mode; use proxmox_sys::fs::lock_dir_noblock_shared;
use pbs_config::BackupLockGuard;
use pbs_api_types::{ use pbs_api_types::{
print_store_and_ns, ArchiveType, BackupNamespace, Operation, CLIENT_LOG_BLOB_NAME, print_store_and_ns, ArchiveType, BackupNamespace, Operation, CLIENT_LOG_BLOB_NAME,
@ -29,10 +28,6 @@ pub struct SnapshotReader {
datastore_name: String, datastore_name: String,
file_list: Vec<String>, file_list: Vec<String>,
locked_dir: Dir, locked_dir: Dir,
// while this is never read, the lock needs to be kept until the
// reader is dropped to ensure valid locking semantics
_lock: BackupLockGuard,
} }
impl SnapshotReader { impl SnapshotReader {
@ -53,12 +48,8 @@ impl SnapshotReader {
bail!("snapshot {} does not exist!", snapshot.dir()); bail!("snapshot {} does not exist!", snapshot.dir());
} }
let lock = snapshot let locked_dir =
.lock_shared() lock_dir_noblock_shared(&snapshot_path, "snapshot", "locked by another operation")?;
.with_context(|| format!("while trying to read snapshot '{snapshot:?}'"))?;
let locked_dir = Dir::open(&snapshot_path, OFlag::O_RDONLY, Mode::empty())
.with_context(|| format!("unable to open snapshot directory {snapshot_path:?}"))?;
let datastore_name = datastore.name().to_string(); let datastore_name = datastore.name().to_string();
let manifest = match snapshot.load_manifest() { let manifest = match snapshot.load_manifest() {
@ -89,7 +80,6 @@ impl SnapshotReader {
datastore_name, datastore_name,
file_list, file_list,
locked_dir, locked_dir,
_lock: lock,
}) })
} }

View File

@ -47,7 +47,7 @@ fn open_lock_file(name: &str) -> Result<(std::fs::File, CreateOptions), Error> {
let timeout = std::time::Duration::new(10, 0); let timeout = std::time::Duration::new(10, 0);
Ok(( Ok((
open_file_locked(lock_path, timeout, true, options)?, open_file_locked(lock_path, timeout, true, options.clone())?,
options, options,
)) ))
} }

View File

@ -15,6 +15,7 @@
use anyhow::{bail, Error}; use anyhow::{bail, Error};
use serde_json::Value; use serde_json::Value;
use proxmox_log::init_cli_logger;
use proxmox_router::cli::*; use proxmox_router::cli::*;
use proxmox_router::RpcEnvironment; use proxmox_router::RpcEnvironment;
use proxmox_schema::{api, ArraySchema, IntegerSchema, Schema, StringSchema}; use proxmox_schema::{api, ArraySchema, IntegerSchema, Schema, StringSchema};
@ -799,9 +800,7 @@ fn options(
} }
fn main() -> Result<(), Error> { fn main() -> Result<(), Error> {
proxmox_log::Logger::from_env("PBS_LOG", proxmox_log::LevelFilter::INFO) init_cli_logger("PBS_LOG", proxmox_log::LevelFilter::INFO)?;
.stderr()
.init()?;
let uid = nix::unistd::Uid::current(); let uid = nix::unistd::Uid::current();

View File

@ -16,6 +16,7 @@ use std::fs::File;
use anyhow::{bail, Error}; use anyhow::{bail, Error};
use serde_json::Value; use serde_json::Value;
use proxmox_log::init_cli_logger;
use proxmox_router::cli::*; use proxmox_router::cli::*;
use proxmox_router::RpcEnvironment; use proxmox_router::RpcEnvironment;
use proxmox_schema::api; use proxmox_schema::api;
@ -387,9 +388,7 @@ fn scan(param: Value) -> Result<(), Error> {
} }
fn main() -> Result<(), Error> { fn main() -> Result<(), Error> {
proxmox_log::Logger::from_env("PBS_LOG", proxmox_log::LevelFilter::INFO) init_cli_logger("PBS_LOG", proxmox_log::LevelFilter::INFO)?;
.stderr()
.init()?;
let uid = nix::unistd::Uid::current(); let uid = nix::unistd::Uid::current();

View File

@ -659,8 +659,7 @@ impl SgTape {
pub fn wait_until_ready(&mut self, timeout: Option<u64>) -> Result<(), Error> { pub fn wait_until_ready(&mut self, timeout: Option<u64>) -> Result<(), Error> {
let start = SystemTime::now(); let start = SystemTime::now();
let timeout = timeout.unwrap_or(Self::SCSI_TAPE_DEFAULT_TIMEOUT as u64); let timeout = timeout.unwrap_or(Self::SCSI_TAPE_DEFAULT_TIMEOUT as u64);
let mut max_wait = std::time::Duration::new(timeout, 0); let max_wait = std::time::Duration::new(timeout, 0);
let mut increased_timeout = false;
loop { loop {
match self.test_unit_ready() { match self.test_unit_ready() {
@ -668,16 +667,6 @@ impl SgTape {
_ => { _ => {
std::thread::sleep(std::time::Duration::new(1, 0)); std::thread::sleep(std::time::Duration::new(1, 0));
if start.elapsed()? > max_wait { if start.elapsed()? > max_wait {
if !increased_timeout {
if let Ok(DeviceActivity::Calibrating) =
read_device_activity(&mut self.file)
{
log::info!("Detected drive calibration, increasing timeout to 2 hours 5 minutes");
max_wait = std::time::Duration::new(2 * 60 * 60 + 5 * 60, 0);
increased_timeout = true;
continue;
}
}
bail!("wait_until_ready failed - got timeout"); bail!("wait_until_ready failed - got timeout");
} }
} }

View File

@ -133,7 +133,7 @@ impl<K: std::cmp::Eq + std::hash::Hash + Copy, V> LruCache<K, V> {
/// Insert or update an entry identified by `key` with the given `value`. /// Insert or update an entry identified by `key` with the given `value`.
/// This entry is placed as the most recently used node at the head. /// This entry is placed as the most recently used node at the head.
pub fn insert(&mut self, key: K, value: V) -> bool { pub fn insert(&mut self, key: K, value: V) {
match self.map.entry(key) { match self.map.entry(key) {
Entry::Occupied(mut o) => { Entry::Occupied(mut o) => {
// Node present, update value // Node present, update value
@ -142,7 +142,6 @@ impl<K: std::cmp::Eq + std::hash::Hash + Copy, V> LruCache<K, V> {
let mut node = unsafe { Box::from_raw(node_ptr) }; let mut node = unsafe { Box::from_raw(node_ptr) };
node.value = value; node.value = value;
let _node_ptr = Box::into_raw(node); let _node_ptr = Box::into_raw(node);
true
} }
Entry::Vacant(v) => { Entry::Vacant(v) => {
// Node not present, insert a new one // Node not present, insert a new one
@ -160,7 +159,6 @@ impl<K: std::cmp::Eq + std::hash::Hash + Copy, V> LruCache<K, V> {
if self.map.len() > self.capacity { if self.map.len() > self.capacity {
self.pop_tail(); self.pop_tail();
} }
false
} }
} }
} }

View File

@ -16,6 +16,7 @@ use xdg::BaseDirectories;
use pathpatterns::{MatchEntry, MatchType, PatternFlag}; use pathpatterns::{MatchEntry, MatchType, PatternFlag};
use proxmox_async::blocking::TokioWriterAdapter; use proxmox_async::blocking::TokioWriterAdapter;
use proxmox_io::StdChannelWriter; use proxmox_io::StdChannelWriter;
use proxmox_log::init_cli_logger;
use proxmox_router::{cli::*, ApiMethod, RpcEnvironment}; use proxmox_router::{cli::*, ApiMethod, RpcEnvironment};
use proxmox_schema::api; use proxmox_schema::api;
use proxmox_sys::fs::{file_get_json, image_size, replace_file, CreateOptions}; use proxmox_sys::fs::{file_get_json, image_size, replace_file, CreateOptions};
@ -631,10 +632,9 @@ fn spawn_catalog_upload(
backupspec: { backupspec: {
type: Array, type: Array,
description: description:
"List of backup source specifications:\ "List of backup source specifications ([<label.ext>:<path>] ...), the \
\n\n[<archive-name>.<type>:<source-path>] ...\n\n\ specifications 'label' must contain alphanumerics, hyphens and underscores \
The 'archive-name' must only contain alphanumerics, hyphens and underscores \ only.",
while the 'type' must be either 'pxar', 'img', 'conf' or 'log'.",
items: { items: {
schema: BACKUP_SOURCE_SCHEMA, schema: BACKUP_SOURCE_SCHEMA,
} }
@ -827,36 +827,40 @@ async fn create_backup(
let mut target_set = HashSet::new(); let mut target_set = HashSet::new();
for backupspec in backupspec_list { for backupspec in backupspec_list {
let pbs_client::BackupSpecification { let spec = parse_backup_specification(backupspec.as_str().unwrap())?;
archive_name: target, let filename = &spec.config_string;
config_string: filename, let target = &spec.archive_name;
spec_type,
} = parse_backup_specification(backupspec.as_str().unwrap())?;
if target_set.contains(&target) { if target_set.contains(target) {
bail!("got target twice: '{}'", target); bail!("got target twice: '{}'", target);
} }
target_set.insert(target.clone()); target_set.insert(target.to_string());
use std::os::unix::fs::FileTypeExt; use std::os::unix::fs::FileTypeExt;
let metadata = std::fs::metadata(&filename) let metadata = std::fs::metadata(filename)
.map_err(|err| format_err!("unable to access '{}' - {}", filename, err))?; .map_err(|err| format_err!("unable to access '{}' - {}", filename, err))?;
let file_type = metadata.file_type(); let file_type = metadata.file_type();
match spec_type { match spec.spec_type {
BackupSpecificationType::PXAR => { BackupSpecificationType::PXAR => {
if !file_type.is_dir() { if !file_type.is_dir() {
bail!("got unexpected file type (expected directory)"); bail!("got unexpected file type (expected directory)");
} }
upload_list.push((BackupSpecificationType::PXAR, filename, target, "didx", 0)); upload_list.push((
BackupSpecificationType::PXAR,
filename.to_owned(),
target.to_owned(),
"didx",
0,
));
} }
BackupSpecificationType::IMAGE => { BackupSpecificationType::IMAGE => {
if !(file_type.is_file() || file_type.is_block_device()) { if !(file_type.is_file() || file_type.is_block_device()) {
bail!("got unexpected file type (expected file or block device)"); bail!("got unexpected file type (expected file or block device)");
} }
let size = image_size(&PathBuf::from(&filename))?; let size = image_size(&PathBuf::from(filename))?;
if size == 0 { if size == 0 {
bail!("got zero-sized file '{}'", filename); bail!("got zero-sized file '{}'", filename);
@ -864,8 +868,8 @@ async fn create_backup(
upload_list.push(( upload_list.push((
BackupSpecificationType::IMAGE, BackupSpecificationType::IMAGE,
filename, filename.to_owned(),
target, target.to_owned(),
"fidx", "fidx",
size, size,
)); ));
@ -876,8 +880,8 @@ async fn create_backup(
} }
upload_list.push(( upload_list.push((
BackupSpecificationType::CONFIG, BackupSpecificationType::CONFIG,
filename, filename.to_owned(),
target, target.to_owned(),
"blob", "blob",
metadata.len(), metadata.len(),
)); ));
@ -888,8 +892,8 @@ async fn create_backup(
} }
upload_list.push(( upload_list.push((
BackupSpecificationType::LOGFILE, BackupSpecificationType::LOGFILE,
filename, filename.to_owned(),
target, target.to_owned(),
"blob", "blob",
metadata.len(), metadata.len(),
)); ));
@ -1969,10 +1973,7 @@ impl ReadAt for BufferedDynamicReadAt {
fn main() { fn main() {
pbs_tools::setup_libc_malloc_opts(); pbs_tools::setup_libc_malloc_opts();
proxmox_log::Logger::from_env("PBS_LOG", proxmox_log::LevelFilter::INFO) init_cli_logger("PBS_LOG", proxmox_log::LevelFilter::INFO).expect("failed to initiate logger");
.stderr()
.init()
.expect("failed to initiate logger");
let backup_cmd_def = CliCommand::new(&API_METHOD_CREATE_BACKUP) let backup_cmd_def = CliCommand::new(&API_METHOD_CREATE_BACKUP)
.arg_param(&["backupspec"]) .arg_param(&["backupspec"])

View File

@ -10,6 +10,7 @@ use serde_json::{json, Value};
use tokio::io::AsyncWriteExt; use tokio::io::AsyncWriteExt;
use proxmox_compression::zstd::ZstdEncoder; use proxmox_compression::zstd::ZstdEncoder;
use proxmox_log::init_cli_logger;
use proxmox_router::cli::{ use proxmox_router::cli::{
complete_file_name, default_table_format_options, format_and_print_result_full, complete_file_name, default_table_format_options, format_and_print_result_full,
get_output_format, run_cli_command, CliCommand, CliCommandMap, CliEnvironment, ColumnConfig, get_output_format, run_cli_command, CliCommand, CliCommandMap, CliEnvironment, ColumnConfig,
@ -628,11 +629,7 @@ fn main() {
true => proxmox_log::LevelFilter::DEBUG, true => proxmox_log::LevelFilter::DEBUG,
false => proxmox_log::LevelFilter::INFO, false => proxmox_log::LevelFilter::INFO,
}; };
init_cli_logger("PBS_LOG", loglevel).expect("failed to initiate logger");
proxmox_log::Logger::from_env("PBS_LOG", loglevel)
.stderr()
.init()
.expect("failed to initiate logger");
let list_cmd_def = CliCommand::new(&API_METHOD_LIST) let list_cmd_def = CliCommand::new(&API_METHOD_LIST)
.arg_param(&["snapshot", "path"]) .arg_param(&["snapshot", "path"])

View File

@ -22,7 +22,7 @@ use pbs_client::pxar::{
use pxar::EntryKind; use pxar::EntryKind;
use proxmox_human_byte::HumanByte; use proxmox_human_byte::HumanByte;
use proxmox_log::{debug, enabled, error, Level}; use proxmox_log::{debug, enabled, error, init_cli_logger, Level};
use proxmox_router::cli::*; use proxmox_router::cli::*;
use proxmox_schema::api; use proxmox_schema::api;
@ -574,10 +574,7 @@ fn dump_archive(archive: String, payload_input: Option<String>) -> Result<(), Er
} }
fn main() { fn main() {
proxmox_log::Logger::from_env("PXAR_LOG", proxmox_log::LevelFilter::INFO) init_cli_logger("PXAR_LOG", proxmox_log::LevelFilter::INFO).expect("failed to initiate logger");
.stderr()
.init()
.expect("failed to initiate logger");
let cmd_def = CliCommandMap::new() let cmd_def = CliCommandMap::new()
.insert( .insert(

View File

@ -6,7 +6,7 @@ use std::os::unix::fs::OpenOptionsExt;
use anyhow::{bail, format_err}; use anyhow::{bail, format_err};
use bytes::Bytes; use bytes::Bytes;
use hyper::{body::HttpBody, Body, Request}; use hyper::{Body, Request};
use nix::sys::stat::Mode; use nix::sys::stat::Mode;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
@ -508,11 +508,9 @@ impl AcmeClient {
let (parts, body) = response.into_parts(); let (parts, body) = response.into_parts();
let status = parts.status.as_u16(); let status = parts.status.as_u16();
let body = body let body = hyper::body::to_bytes(body)
.collect()
.await .await
.map_err(|err| Error::Custom(format!("failed to retrieve response body: {}", err)))? .map_err(|err| Error::Custom(format!("failed to retrieve response body: {}", err)))?;
.to_bytes();
let got_nonce = if let Some(new_nonce) = parts.headers.get(proxmox_acme::REPLAY_NONCE) { let got_nonce = if let Some(new_nonce) = parts.headers.get(proxmox_acme::REPLAY_NONCE) {
let new_nonce = new_nonce.to_str().map_err(|err| { let new_nonce = new_nonce.to_str().map_err(|err| {

View File

@ -29,6 +29,19 @@ use crate::server::jobstate::Job;
/// Authentication domain/realm index. /// Authentication domain/realm index.
fn list_domains(rpcenv: &mut dyn RpcEnvironment) -> Result<Vec<BasicRealmInfo>, Error> { fn list_domains(rpcenv: &mut dyn RpcEnvironment) -> Result<Vec<BasicRealmInfo>, Error> {
let mut list = Vec::new(); let mut list = Vec::new();
list.push(serde_json::from_value(json!({
"realm": "pam",
"type": "pam",
"comment": "Linux PAM standard authentication",
"default": Some(true),
}))?);
list.push(serde_json::from_value(json!({
"realm": "pbs",
"type": "pbs",
"comment": "Proxmox Backup authentication server",
}))?);
let (config, digest) = pbs_config::domains::config()?; let (config, digest) = pbs_config::domains::config()?;
for (_, (section_type, v)) in config.sections.iter() { for (_, (section_type, v)) in config.sections.iter() {

View File

@ -8,16 +8,16 @@ use std::collections::HashMap;
use proxmox_router::{ApiMethod, Permission, Router, RpcEnvironment, SubdirMap}; use proxmox_router::{ApiMethod, Permission, Router, RpcEnvironment, SubdirMap};
use proxmox_schema::api; use proxmox_schema::api;
use proxmox_section_config::SectionConfigData;
use proxmox_tfa::api::TfaConfig; use proxmox_tfa::api::TfaConfig;
use pbs_api_types::{ use pbs_api_types::{
ApiToken, Authid, Tokenname, User, UserUpdater, UserWithTokens, Userid, ENABLE_USER_SCHEMA, ApiToken, Authid, Tokenname, User, UserUpdater, UserWithTokens, Userid, ENABLE_USER_SCHEMA,
EXPIRE_USER_SCHEMA, PASSWORD_FORMAT, PBS_PASSWORD_SCHEMA, PRIV_PERMISSIONS_MODIFY, EXPIRE_USER_SCHEMA, PASSWORD_FORMAT, PBS_PASSWORD_SCHEMA, PRIV_PERMISSIONS_MODIFY,
PRIV_SYS_AUDIT, PROXMOX_CONFIG_DIGEST_SCHEMA, REGENERATE_TOKEN_SCHEMA, PRIV_SYS_AUDIT, PROXMOX_CONFIG_DIGEST_SCHEMA, SINGLE_LINE_COMMENT_SCHEMA,
SINGLE_LINE_COMMENT_SCHEMA,
}; };
use pbs_config::{acl::AclTree, token_shadow, CachedUserInfo}; use pbs_config::token_shadow;
use pbs_config::CachedUserInfo;
fn new_user_with_tokens(user: User, tfa: &TfaConfig) -> UserWithTokens { fn new_user_with_tokens(user: User, tfa: &TfaConfig) -> UserWithTokens {
UserWithTokens { UserWithTokens {
@ -354,7 +354,6 @@ pub async fn update_user(
pub fn delete_user(userid: Userid, digest: Option<String>) -> Result<(), Error> { pub fn delete_user(userid: Userid, digest: Option<String>) -> Result<(), Error> {
let _lock = pbs_config::user::lock_config()?; let _lock = pbs_config::user::lock_config()?;
let _tfa_lock = crate::config::tfa::write_lock()?; let _tfa_lock = crate::config::tfa::write_lock()?;
let _acl_lock = pbs_config::acl::lock_config()?;
let (mut config, expected_digest) = pbs_config::user::config()?; let (mut config, expected_digest) = pbs_config::user::config()?;
@ -382,22 +381,6 @@ pub fn delete_user(userid: Userid, digest: Option<String>) -> Result<(), Error>
eprintln!("error updating TFA config after deleting user {userid:?} {err}",); eprintln!("error updating TFA config after deleting user {userid:?} {err}",);
} }
let user_tokens: Vec<ApiToken> = config
.convert_to_typed_array::<ApiToken>("token")?
.into_iter()
.filter(|token| token.tokenid.user().eq(&userid))
.collect();
let (mut acl_tree, _digest) = pbs_config::acl::config()?;
for token in user_tokens {
if let Some(name) = token.tokenid.tokenname() {
do_delete_token(name.to_owned(), &userid, &mut config, &mut acl_tree)?;
}
}
pbs_config::user::save_config(&config)?;
pbs_config::acl::save_config(&acl_tree)?;
Ok(()) Ok(())
} }
@ -512,7 +495,8 @@ pub fn generate_token(
); );
} }
let secret = token_shadow::generate_and_set_secret(&tokenid)?; let secret = format!("{:x}", proxmox_uuid::Uuid::generate());
token_shadow::set_secret(&tokenid, &secret)?;
let token = ApiToken { let token = ApiToken {
tokenid, tokenid,
@ -531,15 +515,6 @@ pub fn generate_token(
})) }))
} }
#[api()]
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "kebab-case")]
/// The set of properties that can be deleted from a token.
pub enum DeletableTokenProperty {
/// Delete the comment property.
Comment,
}
#[api( #[api(
protected: true, protected: true,
input: { input: {
@ -562,33 +537,11 @@ pub enum DeletableTokenProperty {
schema: EXPIRE_USER_SCHEMA, schema: EXPIRE_USER_SCHEMA,
optional: true, optional: true,
}, },
regenerate: {
schema: REGENERATE_TOKEN_SCHEMA,
optional: true,
},
delete: {
description: "List of properties to delete.",
type: Array,
optional: true,
items: {
type: DeletableTokenProperty,
}
},
digest: { digest: {
optional: true, optional: true,
schema: PROXMOX_CONFIG_DIGEST_SCHEMA, schema: PROXMOX_CONFIG_DIGEST_SCHEMA,
}, },
}, },
},
returns: {
description: "Regenerated secret, if regenerate is set.",
properties: {
secret: {
type: String,
optional: true,
description: "The new API token secret",
},
},
}, },
access: { access: {
permission: &Permission::Or(&[ permission: &Permission::Or(&[
@ -604,10 +557,8 @@ pub fn update_token(
comment: Option<String>, comment: Option<String>,
enable: Option<bool>, enable: Option<bool>,
expire: Option<i64>, expire: Option<i64>,
regenerate: Option<bool>,
delete: Option<Vec<DeletableTokenProperty>>,
digest: Option<String>, digest: Option<String>,
) -> Result<Value, Error> { ) -> Result<(), Error> {
let _lock = pbs_config::user::lock_config()?; let _lock = pbs_config::user::lock_config()?;
let (mut config, expected_digest) = pbs_config::user::config()?; let (mut config, expected_digest) = pbs_config::user::config()?;
@ -622,14 +573,6 @@ pub fn update_token(
let mut data: ApiToken = config.lookup("token", &tokenid_string)?; let mut data: ApiToken = config.lookup("token", &tokenid_string)?;
if let Some(delete) = delete {
for delete_prop in delete {
match delete_prop {
DeletableTokenProperty::Comment => data.comment = None,
}
}
}
if let Some(comment) = comment { if let Some(comment) = comment {
let comment = comment.trim().to_string(); let comment = comment.trim().to_string();
if comment.is_empty() { if comment.is_empty() {
@ -647,21 +590,11 @@ pub fn update_token(
data.expire = if expire > 0 { Some(expire) } else { None }; data.expire = if expire > 0 { Some(expire) } else { None };
} }
let new_secret = if regenerate.unwrap_or_default() {
Some(token_shadow::generate_and_set_secret(&tokenid)?)
} else {
None
};
config.set_data(&tokenid_string, "token", &data)?; config.set_data(&tokenid_string, "token", &data)?;
pbs_config::user::save_config(&config)?; pbs_config::user::save_config(&config)?;
if let Some(secret) = new_secret { Ok(())
Ok(json!({"secret": secret}))
} else {
Ok(Value::Null)
}
} }
#[api( #[api(
@ -693,41 +626,29 @@ pub fn delete_token(
token_name: Tokenname, token_name: Tokenname,
digest: Option<String>, digest: Option<String>,
) -> Result<(), Error> { ) -> Result<(), Error> {
let _acl_lock = pbs_config::acl::lock_config()?; let _lock = pbs_config::user::lock_config()?;
let _user_lock = pbs_config::user::lock_config()?;
let (mut user_config, expected_digest) = pbs_config::user::config()?; let (mut config, expected_digest) = pbs_config::user::config()?;
if let Some(ref digest) = digest { if let Some(ref digest) = digest {
let digest = <[u8; 32]>::from_hex(digest)?; let digest = <[u8; 32]>::from_hex(digest)?;
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?; crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
} }
let (mut acl_config, _digest) = pbs_config::acl::config()?;
do_delete_token(token_name, &userid, &mut user_config, &mut acl_config)?;
pbs_config::user::save_config(&user_config)?;
pbs_config::acl::save_config(&acl_config)?;
Ok(())
}
fn do_delete_token(
token_name: Tokenname,
userid: &Userid,
user_config: &mut SectionConfigData,
acl_config: &mut AclTree,
) -> Result<(), Error> {
let tokenid = Authid::from((userid.clone(), Some(token_name.clone()))); let tokenid = Authid::from((userid.clone(), Some(token_name.clone())));
let tokenid_string = tokenid.to_string(); let tokenid_string = tokenid.to_string();
if user_config.sections.remove(&tokenid_string).is_none() {
if config.sections.remove(&tokenid_string).is_none() {
bail!( bail!(
"token '{}' of user '{}' does not exist.", "token '{}' of user '{}' does not exist.",
token_name.as_str(), token_name.as_str(),
userid userid
); );
} }
token_shadow::delete_secret(&tokenid)?; token_shadow::delete_secret(&tokenid)?;
acl_config.delete_authid(&tokenid);
pbs_config::user::save_config(&config)?;
Ok(()) Ok(())
} }

View File

@ -7,7 +7,7 @@ use std::os::unix::ffi::OsStrExt;
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
use std::sync::Arc; use std::sync::Arc;
use anyhow::{bail, format_err, Context, Error}; use anyhow::{bail, format_err, Error};
use futures::*; use futures::*;
use hyper::http::request::Parts; use hyper::http::request::Parts;
use hyper::{header, Body, Response, StatusCode}; use hyper::{header, Body, Response, StatusCode};
@ -313,23 +313,13 @@ pub async fn delete_group(
)?; )?;
let delete_stats = datastore.remove_backup_group(&ns, &group)?; let delete_stats = datastore.remove_backup_group(&ns, &group)?;
if !delete_stats.all_removed() {
let error_msg = if datastore.old_locking() { if error_on_protected {
"could not remove empty groups directories due to old locking mechanism.\n\ bail!("group only partially deleted due to protected snapshots");
If you are an admin, please reboot PBS or ensure no old backup job is running anymore, \ } else {
then remove the file '/run/proxmox-backup/old-locking', and reload all PBS daemons" warn!("group only partially deleted due to protected snapshots");
} else if !delete_stats.all_removed() { }
"group only partially deleted due to protected snapshots"
} else {
return Ok(delete_stats);
};
if error_on_protected {
bail!(error_msg);
} else {
warn!(error_msg);
} }
Ok(delete_stats) Ok(delete_stats)
}) })
.await? .await?
@ -1228,7 +1218,11 @@ pub fn start_garbage_collection(
let upid_str = let upid_str =
crate::server::do_garbage_collection_job(job, datastore, &auth_id, None, to_stdout) crate::server::do_garbage_collection_job(job, datastore, &auth_id, None, to_stdout)
.map_err(|err| { .map_err(|err| {
format_err!("unable to start garbage collection job on datastore {store} - {err:#}") format_err!(
"unable to start garbage collection job on datastore {} - {}",
store,
err
)
})?; })?;
Ok(json!(upid_str)) Ok(json!(upid_str))
@ -2353,9 +2347,10 @@ pub async fn set_backup_owner(
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?; let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
let backup_group = datastore.backup_group(ns, backup_group); let backup_group = datastore.backup_group(ns, backup_group);
let owner = backup_group.get_owner()?;
if owner_check_required { if owner_check_required {
let owner = backup_group.get_owner()?;
let allowed = match (owner.is_token(), new_owner.is_token()) { let allowed = match (owner.is_token(), new_owner.is_token()) {
(true, true) => { (true, true) => {
// API token to API token, owned by same user // API token to API token, owned by same user
@ -2402,14 +2397,6 @@ pub async fn set_backup_owner(
); );
} }
let _guard = backup_group
.lock()
.with_context(|| format!("while setting the owner of group '{backup_group:?}'"))?;
if owner != backup_group.get_owner()? {
bail!("{owner} does not own this group anymore");
}
backup_group.set_owner(&new_owner, true)?; backup_group.set_owner(&new_owner, true)?;
Ok(()) Ok(())
@ -2429,12 +2416,20 @@ fn setup_mounted_device(datastore: &DataStoreConfig, tmp_mount_path: &str) -> Re
.owner(backup_user.uid) .owner(backup_user.uid)
.group(backup_user.gid); .group(backup_user.gid);
proxmox_sys::fs::create_path(&mount_point, Some(default_options), Some(options)) proxmox_sys::fs::create_path(
.map_err(|e| format_err!("creating mountpoint '{mount_point}' failed: {e}"))?; &mount_point,
Some(default_options.clone()),
Some(options.clone()),
)
.map_err(|e| format_err!("creating mountpoint '{mount_point}' failed: {e}"))?;
// can't be created before it is mounted, so we have to do it here // can't be created before it is mounted, so we have to do it here
proxmox_sys::fs::create_path(&full_store_path, Some(default_options), Some(options)) proxmox_sys::fs::create_path(
.map_err(|e| format_err!("creating datastore path '{full_store_path}' failed: {e}"))?; &full_store_path,
Some(default_options.clone()),
Some(options.clone()),
)
.map_err(|e| format_err!("creating datastore path '{full_store_path}' failed: {e}"))?;
info!( info!(
"bind mount '{}'({}) to '{}'", "bind mount '{}'({}) to '{}'",
@ -2473,8 +2468,8 @@ pub fn do_mount_device(datastore: DataStoreConfig) -> Result<(), Error> {
let default_options = proxmox_sys::fs::CreateOptions::new(); let default_options = proxmox_sys::fs::CreateOptions::new();
proxmox_sys::fs::create_path( proxmox_sys::fs::create_path(
&tmp_mount_path, &tmp_mount_path,
Some(default_options), Some(default_options.clone()),
Some(default_options), Some(default_options.clone()),
)?; )?;
info!("temporarily mounting '{uuid}' to '{}'", tmp_mount_path); info!("temporarily mounting '{uuid}' to '{}'", tmp_mount_path);

View File

@ -167,14 +167,7 @@ pub fn delete_namespace(
let (removed_all, stats) = datastore.remove_namespace_recursive(&ns, delete_groups)?; let (removed_all, stats) = datastore.remove_namespace_recursive(&ns, delete_groups)?;
if !removed_all { if !removed_all {
let err_msg = if delete_groups { let err_msg = if delete_groups {
if datastore.old_locking() { "group only partially deleted due to protected snapshots"
"could not remove empty group directoriess due to old locking mechanism.\n\
If you are an admin, please reboot PBS or ensure no old backup job is running \
anymore, then remove the file '/run/proxmox-backup/old-locking', and reload all \
PBS daemons"
} else {
"group only partially deleted due to protected snapshots"
}
} else { } else {
"only partially deleted due to existing groups but `delete-groups` not true" "only partially deleted due to existing groups but `delete-groups` not true"
}; };

View File

@ -1,6 +1,5 @@
use anyhow::{bail, format_err, Context, Error}; use anyhow::{bail, format_err, Error};
use pbs_config::BackupLockGuard; use nix::dir::Dir;
use std::collections::HashMap; use std::collections::HashMap;
use std::sync::{Arc, Mutex}; use std::sync::{Arc, Mutex};
use tracing::info; use tracing::info;
@ -9,7 +8,7 @@ use ::serde::Serialize;
use serde_json::{json, Value}; use serde_json::{json, Value};
use proxmox_router::{RpcEnvironment, RpcEnvironmentType}; use proxmox_router::{RpcEnvironment, RpcEnvironmentType};
use proxmox_sys::fs::{replace_file, CreateOptions}; use proxmox_sys::fs::{lock_dir_noblock_shared, replace_file, CreateOptions};
use pbs_api_types::Authid; use pbs_api_types::Authid;
use pbs_datastore::backup_info::{BackupDir, BackupInfo}; use pbs_datastore::backup_info::{BackupDir, BackupInfo};
@ -636,7 +635,7 @@ impl BackupEnvironment {
/// If verify-new is set on the datastore, this will run a new verify task /// If verify-new is set on the datastore, this will run a new verify task
/// for the backup. If not, this will return and also drop the passed lock /// for the backup. If not, this will return and also drop the passed lock
/// immediately. /// immediately.
pub fn verify_after_complete(&self, excl_snap_lock: BackupLockGuard) -> Result<(), Error> { pub fn verify_after_complete(&self, excl_snap_lock: Dir) -> Result<(), Error> {
self.ensure_finished()?; self.ensure_finished()?;
if !self.datastore.verify_new() { if !self.datastore.verify_new() {
@ -646,12 +645,12 @@ impl BackupEnvironment {
// Downgrade to shared lock, the backup itself is finished // Downgrade to shared lock, the backup itself is finished
drop(excl_snap_lock); drop(excl_snap_lock);
let snap_lock = self.backup_dir.lock_shared().with_context(|| { let snap_lock = lock_dir_noblock_shared(
format!( &self.backup_dir.full_path(),
"while trying to verify snapshot '{:?}' after completion", "snapshot",
self.backup_dir "snapshot is already locked by another operation",
) )?;
})?;
let worker_id = format!( let worker_id = format!(
"{}:{}/{}/{:08X}", "{}:{}/{}/{:08X}",
self.datastore.name(), self.datastore.name(),

View File

@ -1,6 +1,6 @@
//! Backup protocol (HTTP2 upgrade) //! Backup protocol (HTTP2 upgrade)
use anyhow::{bail, format_err, Context, Error}; use anyhow::{bail, format_err, Error};
use futures::*; use futures::*;
use hex::FromHex; use hex::FromHex;
use hyper::header::{HeaderValue, CONNECTION, UPGRADE}; use hyper::header::{HeaderValue, CONNECTION, UPGRADE};
@ -17,6 +17,7 @@ use proxmox_router::{
}; };
use proxmox_schema::*; use proxmox_schema::*;
use proxmox_sortable_macro::sortable; use proxmox_sortable_macro::sortable;
use proxmox_sys::fs::lock_dir_noblock_shared;
use pbs_api_types::{ use pbs_api_types::{
ArchiveType, Authid, BackupNamespace, BackupType, Operation, VerifyState, ArchiveType, Authid, BackupNamespace, BackupType, Operation, VerifyState,
@ -185,10 +186,12 @@ fn upgrade_to_backup_protocol(
} }
// lock last snapshot to prevent forgetting/pruning it during backup // lock last snapshot to prevent forgetting/pruning it during backup
let guard = last.backup_dir let full_path = last.backup_dir.full_path();
.lock_shared() Some(lock_dir_noblock_shared(
.with_context(|| format!("while locking last snapshot during backup '{last:?}'"))?; &full_path,
Some(guard) "snapshot",
"base snapshot is already locked by another operation",
)?)
} else { } else {
None None
}; };
@ -236,12 +239,14 @@ fn upgrade_to_backup_protocol(
.and_then(move |conn| { .and_then(move |conn| {
env2.debug("protocol upgrade done"); env2.debug("protocol upgrade done");
let mut http = hyper::server::conn::http2::Builder::new(ExecInheritLogContext); let mut http = hyper::server::conn::Http::new()
.with_executor(ExecInheritLogContext);
http.http2_only(true);
// increase window size: todo - find optiomal size // increase window size: todo - find optiomal size
let window_size = 32 * 1024 * 1024; // max = (1 << 31) - 2 let window_size = 32 * 1024 * 1024; // max = (1 << 31) - 2
http.initial_stream_window_size(window_size); http.http2_initial_stream_window_size(window_size);
http.initial_connection_window_size(window_size); http.http2_initial_connection_window_size(window_size);
http.max_frame_size(4 * 1024 * 1024); http.http2_max_frame_size(4 * 1024 * 1024);
let env3 = env2.clone(); let env3 = env2.clone();
http.serve_connection(conn, service).map(move |result| { http.serve_connection(conn, service).map(move |result| {
@ -853,8 +858,8 @@ fn download_previous(
}; };
if let Some(index) = index { if let Some(index) = index {
env.log(format!( env.log(format!(
"register chunks in '{archive_name}' from previous backup '{}'.", "register chunks in '{}' from previous backup.",
last_backup.backup_dir.dir(), archive_name
)); ));
for pos in 0..index.index_count() { for pos in 0..index.index_count() {
@ -865,10 +870,7 @@ fn download_previous(
} }
} }
env.log(format!( env.log(format!("download '{}' from previous backup.", archive_name));
"download '{archive_name}' from previous backup '{}'.",
last_backup.backup_dir.dir(),
));
crate::api2::helpers::create_download_response(path).await crate::api2::helpers::create_download_response(path).await
} }
.boxed() .boxed()

View File

@ -91,10 +91,6 @@ pub async fn create_ad_realm(
auth_helpers::store_ldap_bind_password(&config.realm, &password, &domain_config_lock)?; auth_helpers::store_ldap_bind_password(&config.realm, &password, &domain_config_lock)?;
} }
if let Some(true) = config.default {
domains::unset_default_realm(&mut domains)?;
}
domains.set_data(&config.realm, "ad", &config)?; domains.set_data(&config.realm, "ad", &config)?;
domains::save_config(&domains)?; domains::save_config(&domains)?;
@ -140,8 +136,6 @@ pub enum DeletableProperty {
Port, Port,
/// Comment /// Comment
Comment, Comment,
/// Is default realm
Default,
/// Verify server certificate /// Verify server certificate
Verify, Verify,
/// Mode (ldap, ldap+starttls or ldaps), /// Mode (ldap, ldap+starttls or ldaps),
@ -223,9 +217,6 @@ pub async fn update_ad_realm(
DeletableProperty::Comment => { DeletableProperty::Comment => {
config.comment = None; config.comment = None;
} }
DeletableProperty::Default => {
config.default = None;
}
DeletableProperty::Port => { DeletableProperty::Port => {
config.port = None; config.port = None;
} }
@ -282,13 +273,6 @@ pub async fn update_ad_realm(
} }
} }
if let Some(true) = update.default {
domains::unset_default_realm(&mut domains)?;
config.default = Some(true);
} else {
config.default = None;
}
if let Some(mode) = update.mode { if let Some(mode) = update.mode {
config.mode = Some(mode); config.mode = Some(mode);
} }

View File

@ -81,10 +81,6 @@ pub fn create_ldap_realm(config: LdapRealmConfig, password: Option<String>) -> R
auth_helpers::store_ldap_bind_password(&config.realm, &password, &domain_config_lock)?; auth_helpers::store_ldap_bind_password(&config.realm, &password, &domain_config_lock)?;
} }
if let Some(true) = config.default {
domains::unset_default_realm(&mut domains)?;
}
domains.set_data(&config.realm, "ldap", &config)?; domains.set_data(&config.realm, "ldap", &config)?;
domains::save_config(&domains)?; domains::save_config(&domains)?;
@ -175,8 +171,6 @@ pub enum DeletableProperty {
Port, Port,
/// Comment /// Comment
Comment, Comment,
/// Is default realm
Default,
/// Verify server certificate /// Verify server certificate
Verify, Verify,
/// Mode (ldap, ldap+starttls or ldaps), /// Mode (ldap, ldap+starttls or ldaps),
@ -258,9 +252,6 @@ pub fn update_ldap_realm(
DeletableProperty::Comment => { DeletableProperty::Comment => {
config.comment = None; config.comment = None;
} }
DeletableProperty::Default => {
config.default = None;
}
DeletableProperty::Port => { DeletableProperty::Port => {
config.port = None; config.port = None;
} }
@ -321,13 +312,6 @@ pub fn update_ldap_realm(
} }
} }
if let Some(true) = update.default {
domains::unset_default_realm(&mut domains)?;
config.default = Some(true);
} else {
config.default = None;
}
if let Some(mode) = update.mode { if let Some(mode) = update.mode {
config.mode = Some(mode); config.mode = Some(mode);
} }

View File

@ -5,14 +5,10 @@ use proxmox_sortable_macro::sortable;
pub mod ad; pub mod ad;
pub mod ldap; pub mod ldap;
pub mod openid; pub mod openid;
pub mod pam;
pub mod pbs;
pub mod tfa; pub mod tfa;
#[sortable] #[sortable]
const SUBDIRS: SubdirMap = &sorted!([ const SUBDIRS: SubdirMap = &sorted!([
("pam", &pam::ROUTER),
("pbs", &pbs::ROUTER),
("ad", &ad::ROUTER), ("ad", &ad::ROUTER),
("ldap", &ldap::ROUTER), ("ldap", &ldap::ROUTER),
("openid", &openid::ROUTER), ("openid", &openid::ROUTER),

View File

@ -65,10 +65,6 @@ pub fn create_openid_realm(config: OpenIdRealmConfig) -> Result<(), Error> {
param_bail!("realm", "realm '{}' already exists.", config.realm); param_bail!("realm", "realm '{}' already exists.", config.realm);
} }
if let Some(true) = config.default {
domains::unset_default_realm(&mut domains)?;
}
domains.set_data(&config.realm, "openid", &config)?; domains.set_data(&config.realm, "openid", &config)?;
domains::save_config(&domains)?; domains::save_config(&domains)?;
@ -153,8 +149,6 @@ pub enum DeletableProperty {
ClientKey, ClientKey,
/// Delete the comment property. /// Delete the comment property.
Comment, Comment,
/// Delete the default property.
Default,
/// Delete the autocreate property /// Delete the autocreate property
Autocreate, Autocreate,
/// Delete the scopes property /// Delete the scopes property
@ -223,9 +217,6 @@ pub fn update_openid_realm(
DeletableProperty::Comment => { DeletableProperty::Comment => {
config.comment = None; config.comment = None;
} }
DeletableProperty::Default => {
config.default = None;
}
DeletableProperty::Autocreate => { DeletableProperty::Autocreate => {
config.autocreate = None; config.autocreate = None;
} }
@ -251,13 +242,6 @@ pub fn update_openid_realm(
} }
} }
if let Some(true) = update.default {
domains::unset_default_realm(&mut domains)?;
config.default = Some(true);
} else {
config.default = None;
}
if let Some(issuer_url) = update.issuer_url { if let Some(issuer_url) = update.issuer_url {
config.issuer_url = issuer_url; config.issuer_url = issuer_url;
} }

View File

@ -1,130 +0,0 @@
use ::serde::{Deserialize, Serialize};
use anyhow::Error;
use hex::FromHex;
use proxmox_router::{Permission, Router, RpcEnvironment};
use proxmox_schema::api;
use pbs_api_types::{
PamRealmConfig, PamRealmConfigUpdater, PRIV_REALM_ALLOCATE, PRIV_SYS_AUDIT,
PROXMOX_CONFIG_DIGEST_SCHEMA,
};
use pbs_config::domains;
#[api(
returns: {
type: PamRealmConfig,
},
access: {
permission: &Permission::Privilege(&["access", "domains"], PRIV_SYS_AUDIT, false),
},
)]
/// Read the PAM realm configuration
pub fn read_pam_realm(rpcenv: &mut dyn RpcEnvironment) -> Result<PamRealmConfig, Error> {
let (domains, digest) = domains::config()?;
let config = domains.lookup("pam", "pam")?;
rpcenv["digest"] = hex::encode(digest).into();
Ok(config)
}
#[api]
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "kebab-case")]
/// Deletable property name
pub enum DeletableProperty {
/// Delete the comment property.
Comment,
/// Delete the default property.
Default,
}
#[api(
protected: true,
input: {
properties: {
update: {
type: PamRealmConfigUpdater,
flatten: true,
},
delete: {
description: "List of properties to delete.",
type: Array,
optional: true,
items: {
type: DeletableProperty,
}
},
digest: {
optional: true,
schema: PROXMOX_CONFIG_DIGEST_SCHEMA,
},
},
},
returns: {
type: PamRealmConfig,
},
access: {
permission: &Permission::Privilege(&["access", "domains"], PRIV_REALM_ALLOCATE, false),
},
)]
/// Update the PAM realm configuration
pub fn update_pam_realm(
update: PamRealmConfigUpdater,
delete: Option<Vec<DeletableProperty>>,
digest: Option<String>,
_rpcenv: &mut dyn RpcEnvironment,
) -> Result<(), Error> {
let _lock = domains::lock_config()?;
let (mut domains, expected_digest) = domains::config()?;
if let Some(ref digest) = digest {
let digest = <[u8; 32]>::from_hex(digest)?;
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
}
let mut config: PamRealmConfig = domains.lookup("pam", "pam")?;
if let Some(delete) = delete {
for delete_prop in delete {
match delete_prop {
DeletableProperty::Comment => {
config.comment = None;
}
DeletableProperty::Default => {
config.default = None;
}
}
}
}
if let Some(comment) = update.comment {
let comment = comment.trim().to_string();
if comment.is_empty() {
config.comment = None;
} else {
config.comment = Some(comment);
}
}
if let Some(true) = update.default {
pbs_config::domains::unset_default_realm(&mut domains)?;
config.default = Some(true);
} else {
config.default = None;
}
domains.set_data("pam", "pam", &config)?;
domains::save_config(&domains)?;
Ok(())
}
pub const ROUTER: Router = Router::new()
.get(&API_METHOD_READ_PAM_REALM)
.put(&API_METHOD_UPDATE_PAM_REALM);

View File

@ -1,130 +0,0 @@
use ::serde::{Deserialize, Serialize};
use anyhow::Error;
use hex::FromHex;
use proxmox_router::{Permission, Router, RpcEnvironment};
use proxmox_schema::api;
use pbs_api_types::{
PbsRealmConfig, PbsRealmConfigUpdater, PRIV_REALM_ALLOCATE, PRIV_SYS_AUDIT,
PROXMOX_CONFIG_DIGEST_SCHEMA,
};
use pbs_config::domains;
#[api(
returns: {
type: PbsRealmConfig,
},
access: {
permission: &Permission::Privilege(&["access", "domains"], PRIV_SYS_AUDIT, false),
},
)]
/// Read the Proxmox Backup authentication server realm configuration
pub fn read_pbs_realm(rpcenv: &mut dyn RpcEnvironment) -> Result<PbsRealmConfig, Error> {
let (domains, digest) = domains::config()?;
let config = domains.lookup("pbs", "pbs")?;
rpcenv["digest"] = hex::encode(digest).into();
Ok(config)
}
#[api]
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "kebab-case")]
/// Deletable property name
pub enum DeletableProperty {
/// Delete the comment property.
Comment,
/// Delete the default property.
Default,
}
#[api(
protected: true,
input: {
properties: {
update: {
type: PbsRealmConfigUpdater,
flatten: true,
},
delete: {
description: "List of properties to delete.",
type: Array,
optional: true,
items: {
type: DeletableProperty,
}
},
digest: {
optional: true,
schema: PROXMOX_CONFIG_DIGEST_SCHEMA,
},
},
},
returns: {
type: PbsRealmConfig,
},
access: {
permission: &Permission::Privilege(&["access", "domains"], PRIV_REALM_ALLOCATE, false),
},
)]
/// Update the Proxmox Backup authentication server realm configuration
pub fn update_pbs_realm(
update: PbsRealmConfigUpdater,
delete: Option<Vec<DeletableProperty>>,
digest: Option<String>,
_rpcenv: &mut dyn RpcEnvironment,
) -> Result<(), Error> {
let _lock = domains::lock_config()?;
let (mut domains, expected_digest) = domains::config()?;
if let Some(ref digest) = digest {
let digest = <[u8; 32]>::from_hex(digest)?;
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
}
let mut config: PbsRealmConfig = domains.lookup("pbs", "pbs")?;
if let Some(delete) = delete {
for delete_prop in delete {
match delete_prop {
DeletableProperty::Comment => {
config.comment = None;
}
DeletableProperty::Default => {
config.default = None;
}
}
}
}
if let Some(comment) = update.comment {
let comment = comment.trim().to_string();
if comment.is_empty() {
config.comment = None;
} else {
config.comment = Some(comment);
}
}
if let Some(true) = update.default {
pbs_config::domains::unset_default_realm(&mut domains)?;
config.default = Some(true);
} else {
config.default = None;
}
domains.set_data("pbs", "pbs", &config)?;
domains::save_config(&domains)?;
Ok(())
}
pub const ROUTER: Router = Router::new()
.get(&API_METHOD_READ_PBS_REALM)
.put(&API_METHOD_UPDATE_PBS_REALM);

View File

@ -1,10 +1,10 @@
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
use ::serde::{Deserialize, Serialize}; use ::serde::{Deserialize, Serialize};
use anyhow::{bail, Context, Error}; use anyhow::{bail, format_err, Error};
use hex::FromHex; use hex::FromHex;
use serde_json::Value; use serde_json::Value;
use tracing::{info, warn}; use tracing::warn;
use proxmox_router::{http_bail, Permission, Router, RpcEnvironment, RpcEnvironmentType}; use proxmox_router::{http_bail, Permission, Router, RpcEnvironment, RpcEnvironmentType};
use proxmox_schema::{api, param_bail, ApiType}; use proxmox_schema::{api, param_bail, ApiType};
@ -70,30 +70,6 @@ pub fn list_datastores(
Ok(list.into_iter().filter(filter_by_privs).collect()) Ok(list.into_iter().filter(filter_by_privs).collect())
} }
struct UnmountGuard {
path: Option<PathBuf>,
}
impl UnmountGuard {
fn new(path: Option<PathBuf>) -> Self {
UnmountGuard { path }
}
fn disable(mut self) {
self.path = None;
}
}
impl Drop for UnmountGuard {
fn drop(&mut self) {
if let Some(path) = &self.path {
if let Err(e) = unmount_by_mountpoint(path) {
warn!("could not unmount device: {e}");
}
}
}
}
pub(crate) fn do_create_datastore( pub(crate) fn do_create_datastore(
_lock: BackupLockGuard, _lock: BackupLockGuard,
mut config: SectionConfigData, mut config: SectionConfigData,
@ -111,66 +87,59 @@ pub(crate) fn do_create_datastore(
param_bail!("path", err); param_bail!("path", err);
} }
let need_unmount = datastore.backing_device.is_some();
if need_unmount {
do_mount_device(datastore.clone())?;
};
let tuning: DatastoreTuning = serde_json::from_value( let tuning: DatastoreTuning = serde_json::from_value(
DatastoreTuning::API_SCHEMA DatastoreTuning::API_SCHEMA
.parse_property_string(datastore.tuning.as_deref().unwrap_or(""))?, .parse_property_string(datastore.tuning.as_deref().unwrap_or(""))?,
)?; )?;
let unmount_guard = if datastore.backing_device.is_some() { let res = if reuse_datastore {
do_mount_device(datastore.clone())?; ChunkStore::verify_chunkstore(&path)
UnmountGuard::new(Some(path.clone()))
} else {
UnmountGuard::new(None)
};
let chunk_store = if reuse_datastore {
ChunkStore::verify_chunkstore(&path).and_then(|_| {
// Must be the only instance accessing and locking the chunk store,
// dropping will close all other locks from this process on the lockfile as well.
ChunkStore::open(
&datastore.name,
&path,
tuning.sync_level.unwrap_or_default(),
)
})?
} else { } else {
let mut is_empty = true;
if let Ok(dir) = std::fs::read_dir(&path) { if let Ok(dir) = std::fs::read_dir(&path) {
for file in dir { for file in dir {
let name = file?.file_name(); let name = file?.file_name();
let name = name.to_str(); let name = name.to_str();
if !name.is_some_and(|name| name.starts_with('.') || name == "lost+found") { if !name.is_some_and(|name| name.starts_with('.') || name == "lost+found") {
bail!("datastore path not empty"); is_empty = false;
break;
} }
} }
} }
let backup_user = pbs_config::backup_user()?; if is_empty {
ChunkStore::create( let backup_user = pbs_config::backup_user()?;
&datastore.name, ChunkStore::create(
path.clone(), &datastore.name,
backup_user.uid, path.clone(),
backup_user.gid, backup_user.uid,
tuning.sync_level.unwrap_or_default(), backup_user.gid,
)? tuning.sync_level.unwrap_or_default(),
)
.map(|_| ())
} else {
Err(format_err!("datastore path not empty"))
}
}; };
if tuning.gc_atime_safety_check.unwrap_or(true) { if res.is_err() {
chunk_store if need_unmount {
.check_fs_atime_updates(true) if let Err(e) = unmount_by_mountpoint(&path) {
.context("access time safety check failed")?; warn!("could not unmount device: {e}");
info!("Access time update check successful."); }
} else { }
info!("Access time update check skipped."); return res;
} }
config.set_data(&datastore.name, "datastore", &datastore)?; config.set_data(&datastore.name, "datastore", &datastore)?;
pbs_config::datastore::save_config(&config)?; pbs_config::datastore::save_config(&config)?;
jobstate::create_state_file("garbage_collection", &datastore.name)?; jobstate::create_state_file("garbage_collection", &datastore.name)
unmount_guard.disable();
Ok(())
} }
#[api( #[api(

View File

@ -335,10 +335,6 @@ pub enum DeletableProperty {
MaxDepth, MaxDepth,
/// Delete the transfer_last property, /// Delete the transfer_last property,
TransferLast, TransferLast,
/// Delete the encrypted_only property,
EncryptedOnly,
/// Delete the verified_only property,
VerifiedOnly,
/// Delete the sync_direction property, /// Delete the sync_direction property,
SyncDirection, SyncDirection,
} }
@ -452,12 +448,6 @@ pub fn update_sync_job(
DeletableProperty::TransferLast => { DeletableProperty::TransferLast => {
data.transfer_last = None; data.transfer_last = None;
} }
DeletableProperty::EncryptedOnly => {
data.encrypted_only = None;
}
DeletableProperty::VerifiedOnly => {
data.verified_only = None;
}
DeletableProperty::SyncDirection => { DeletableProperty::SyncDirection => {
data.sync_direction = None; data.sync_direction = None;
} }
@ -501,12 +491,6 @@ pub fn update_sync_job(
if let Some(resync_corrupt) = update.resync_corrupt { if let Some(resync_corrupt) = update.resync_corrupt {
data.resync_corrupt = Some(resync_corrupt); data.resync_corrupt = Some(resync_corrupt);
} }
if let Some(encrypted_only) = update.encrypted_only {
data.encrypted_only = Some(encrypted_only);
}
if let Some(verified_only) = update.verified_only {
data.verified_only = Some(verified_only);
}
if let Some(sync_direction) = update.sync_direction { if let Some(sync_direction) = update.sync_direction {
data.sync_direction = Some(sync_direction); data.sync_direction = Some(sync_direction);
} }
@ -681,8 +665,6 @@ acl:1:/remote/remote1/remotestore1:write@pbs:RemoteSyncOperator
schedule: None, schedule: None,
limit: pbs_api_types::RateLimitConfig::default(), // no limit limit: pbs_api_types::RateLimitConfig::default(), // no limit
transfer_last: None, transfer_last: None,
encrypted_only: None,
verified_only: None,
sync_direction: None, // use default sync_direction: None, // use default
}; };

View File

@ -140,8 +140,6 @@ pub enum DeletableProperty {
MaxDepth, MaxDepth,
/// Delete the 'ns' property /// Delete the 'ns' property
Ns, Ns,
/// Delete the 'worker-threads' property
WorkerThreads,
} }
#[api( #[api(
@ -224,9 +222,6 @@ pub fn update_tape_backup_job(
DeletableProperty::Ns => { DeletableProperty::Ns => {
data.setup.ns = None; data.setup.ns = None;
} }
DeletableProperty::WorkerThreads => {
data.setup.worker_threads = None;
}
} }
} }
} }
@ -265,9 +260,6 @@ pub fn update_tape_backup_job(
if update.setup.max_depth.is_some() { if update.setup.max_depth.is_some() {
data.setup.max_depth = update.setup.max_depth; data.setup.max_depth = update.setup.max_depth;
} }
if update.setup.worker_threads.is_some() {
data.setup.worker_threads = update.setup.worker_threads;
}
let schedule_changed = data.schedule != update.schedule; let schedule_changed = data.schedule != update.schedule;
if update.schedule.is_some() { if update.schedule.is_some() {

View File

@ -10,8 +10,7 @@ use pbs_api_types::{
Authid, BackupNamespace, GroupFilter, RateLimitConfig, SyncJobConfig, DATASTORE_SCHEMA, Authid, BackupNamespace, GroupFilter, RateLimitConfig, SyncJobConfig, DATASTORE_SCHEMA,
GROUP_FILTER_LIST_SCHEMA, NS_MAX_DEPTH_REDUCED_SCHEMA, PRIV_DATASTORE_BACKUP, GROUP_FILTER_LIST_SCHEMA, NS_MAX_DEPTH_REDUCED_SCHEMA, PRIV_DATASTORE_BACKUP,
PRIV_DATASTORE_PRUNE, PRIV_REMOTE_READ, REMOTE_ID_SCHEMA, REMOVE_VANISHED_BACKUPS_SCHEMA, PRIV_DATASTORE_PRUNE, PRIV_REMOTE_READ, REMOTE_ID_SCHEMA, REMOVE_VANISHED_BACKUPS_SCHEMA,
RESYNC_CORRUPT_SCHEMA, SYNC_ENCRYPTED_ONLY_SCHEMA, SYNC_VERIFIED_ONLY_SCHEMA, RESYNC_CORRUPT_SCHEMA, TRANSFER_LAST_SCHEMA,
TRANSFER_LAST_SCHEMA,
}; };
use pbs_config::CachedUserInfo; use pbs_config::CachedUserInfo;
use proxmox_rest_server::WorkerTask; use proxmox_rest_server::WorkerTask;
@ -88,8 +87,6 @@ impl TryFrom<&SyncJobConfig> for PullParameters {
sync_job.group_filter.clone(), sync_job.group_filter.clone(),
sync_job.limit.clone(), sync_job.limit.clone(),
sync_job.transfer_last, sync_job.transfer_last,
sync_job.encrypted_only,
sync_job.verified_only,
sync_job.resync_corrupt, sync_job.resync_corrupt,
) )
} }
@ -136,14 +133,6 @@ impl TryFrom<&SyncJobConfig> for PullParameters {
schema: TRANSFER_LAST_SCHEMA, schema: TRANSFER_LAST_SCHEMA,
optional: true, optional: true,
}, },
"encrypted-only": {
schema: SYNC_ENCRYPTED_ONLY_SCHEMA,
optional: true,
},
"verified-only": {
schema: SYNC_VERIFIED_ONLY_SCHEMA,
optional: true,
},
"resync-corrupt": { "resync-corrupt": {
schema: RESYNC_CORRUPT_SCHEMA, schema: RESYNC_CORRUPT_SCHEMA,
optional: true, optional: true,
@ -172,8 +161,6 @@ async fn pull(
group_filter: Option<Vec<GroupFilter>>, group_filter: Option<Vec<GroupFilter>>,
limit: RateLimitConfig, limit: RateLimitConfig,
transfer_last: Option<usize>, transfer_last: Option<usize>,
encrypted_only: Option<bool>,
verified_only: Option<bool>,
resync_corrupt: Option<bool>, resync_corrupt: Option<bool>,
rpcenv: &mut dyn RpcEnvironment, rpcenv: &mut dyn RpcEnvironment,
) -> Result<String, Error> { ) -> Result<String, Error> {
@ -212,8 +199,6 @@ async fn pull(
group_filter, group_filter,
limit, limit,
transfer_last, transfer_last,
encrypted_only,
verified_only,
resync_corrupt, resync_corrupt,
)?; )?;

View File

@ -5,8 +5,7 @@ use pbs_api_types::{
Authid, BackupNamespace, GroupFilter, RateLimitConfig, DATASTORE_SCHEMA, Authid, BackupNamespace, GroupFilter, RateLimitConfig, DATASTORE_SCHEMA,
GROUP_FILTER_LIST_SCHEMA, NS_MAX_DEPTH_REDUCED_SCHEMA, PRIV_DATASTORE_BACKUP, GROUP_FILTER_LIST_SCHEMA, NS_MAX_DEPTH_REDUCED_SCHEMA, PRIV_DATASTORE_BACKUP,
PRIV_DATASTORE_READ, PRIV_REMOTE_DATASTORE_BACKUP, PRIV_REMOTE_DATASTORE_PRUNE, PRIV_DATASTORE_READ, PRIV_REMOTE_DATASTORE_BACKUP, PRIV_REMOTE_DATASTORE_PRUNE,
REMOTE_ID_SCHEMA, REMOVE_VANISHED_BACKUPS_SCHEMA, SYNC_ENCRYPTED_ONLY_SCHEMA, REMOTE_ID_SCHEMA, REMOVE_VANISHED_BACKUPS_SCHEMA, TRANSFER_LAST_SCHEMA,
SYNC_VERIFIED_ONLY_SCHEMA, TRANSFER_LAST_SCHEMA,
}; };
use proxmox_rest_server::WorkerTask; use proxmox_rest_server::WorkerTask;
use proxmox_router::{Permission, Router, RpcEnvironment}; use proxmox_router::{Permission, Router, RpcEnvironment};
@ -92,14 +91,6 @@ fn check_push_privs(
schema: GROUP_FILTER_LIST_SCHEMA, schema: GROUP_FILTER_LIST_SCHEMA,
optional: true, optional: true,
}, },
"encrypted-only": {
schema: SYNC_ENCRYPTED_ONLY_SCHEMA,
optional: true,
},
"verified-only": {
schema: SYNC_VERIFIED_ONLY_SCHEMA,
optional: true,
},
limit: { limit: {
type: RateLimitConfig, type: RateLimitConfig,
flatten: true, flatten: true,
@ -129,8 +120,6 @@ async fn push(
remove_vanished: Option<bool>, remove_vanished: Option<bool>,
max_depth: Option<usize>, max_depth: Option<usize>,
group_filter: Option<Vec<GroupFilter>>, group_filter: Option<Vec<GroupFilter>>,
encrypted_only: Option<bool>,
verified_only: Option<bool>,
limit: RateLimitConfig, limit: RateLimitConfig,
transfer_last: Option<usize>, transfer_last: Option<usize>,
rpcenv: &mut dyn RpcEnvironment, rpcenv: &mut dyn RpcEnvironment,
@ -160,8 +149,6 @@ async fn push(
remove_vanished, remove_vanished,
max_depth, max_depth,
group_filter, group_filter,
encrypted_only,
verified_only,
limit, limit,
transfer_last, transfer_last,
) )

View File

@ -1,6 +1,6 @@
//! Backup reader/restore protocol (HTTP2 upgrade) //! Backup reader/restore protocol (HTTP2 upgrade)
use anyhow::{bail, format_err, Context, Error}; use anyhow::{bail, format_err, Error};
use futures::*; use futures::*;
use hex::FromHex; use hex::FromHex;
use hyper::header::{self, HeaderValue, CONNECTION, UPGRADE}; use hyper::header::{self, HeaderValue, CONNECTION, UPGRADE};
@ -16,6 +16,7 @@ use proxmox_router::{
}; };
use proxmox_schema::{BooleanSchema, ObjectSchema}; use proxmox_schema::{BooleanSchema, ObjectSchema};
use proxmox_sortable_macro::sortable; use proxmox_sortable_macro::sortable;
use proxmox_sys::fs::lock_dir_noblock_shared;
use pbs_api_types::{ use pbs_api_types::{
ArchiveType, Authid, Operation, BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA, ArchiveType, Authid, Operation, BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA,
@ -128,9 +129,11 @@ fn upgrade_to_backup_reader_protocol(
bail!("snapshot {} does not exist.", backup_dir.dir()); bail!("snapshot {} does not exist.", backup_dir.dir());
} }
let _guard = backup_dir let _guard = lock_dir_noblock_shared(
.lock_shared() &backup_dir.full_path(),
.with_context(|| format!("while reading snapshot '{backup_dir:?}'"))?; "snapshot",
"locked by another operation",
)?;
let path = datastore.base_path(); let path = datastore.base_path();
@ -180,12 +183,14 @@ fn upgrade_to_backup_reader_protocol(
let conn = hyper::upgrade::on(Request::from_parts(parts, req_body)).await?; let conn = hyper::upgrade::on(Request::from_parts(parts, req_body)).await?;
env2.debug("protocol upgrade done"); env2.debug("protocol upgrade done");
let mut http = hyper::server::conn::http2::Builder::new(ExecInheritLogContext); let mut http =
hyper::server::conn::Http::new().with_executor(ExecInheritLogContext);
http.http2_only(true);
// increase window size: todo - find optiomal size // increase window size: todo - find optiomal size
let window_size = 32 * 1024 * 1024; // max = (1 << 31) - 2 let window_size = 32 * 1024 * 1024; // max = (1 << 31) - 2
http.initial_stream_window_size(window_size); http.http2_initial_stream_window_size(window_size);
http.initial_connection_window_size(window_size); http.http2_initial_connection_window_size(window_size);
http.max_frame_size(4 * 1024 * 1024); http.http2_max_frame_size(4 * 1024 * 1024);
http.serve_connection(conn, service) http.serve_connection(conn, service)
.map_err(Error::from) .map_err(Error::from)

View File

@ -387,10 +387,6 @@ fn backup_worker(
ns_magic, ns_magic,
)?; )?;
if let Some(threads) = setup.worker_threads {
pool_writer.set_read_thread_count(threads as usize);
}
let mut group_list = Vec::new(); let mut group_list = Vec::new();
let namespaces = datastore.recursive_iter_backup_ns_ok(root_namespace, setup.max_depth)?; let namespaces = datastore.recursive_iter_backup_ns_ok(root_namespace, setup.max_depth)?;
for ns in namespaces { for ns in namespaces {

View File

@ -1,12 +1,13 @@
use pbs_config::BackupLockGuard;
use std::collections::HashSet; use std::collections::HashSet;
use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::{Arc, Mutex}; use std::sync::{Arc, Mutex};
use std::time::Instant; use std::time::Instant;
use anyhow::{bail, Error}; use anyhow::{bail, Error};
use nix::dir::Dir;
use tracing::{error, info, warn}; use tracing::{error, info, warn};
use proxmox_sys::fs::lock_dir_noblock_shared;
use proxmox_worker_task::WorkerTaskContext; use proxmox_worker_task::WorkerTaskContext;
use pbs_api_types::{ use pbs_api_types::{
@ -306,8 +307,11 @@ pub fn verify_backup_dir(
return Ok(true); return Ok(true);
} }
let snap_lock = backup_dir.lock_shared(); let snap_lock = lock_dir_noblock_shared(
&backup_dir.full_path(),
"snapshot",
"locked by another operation",
);
match snap_lock { match snap_lock {
Ok(snap_lock) => { Ok(snap_lock) => {
verify_backup_dir_with_lock(verify_worker, backup_dir, upid, filter, snap_lock) verify_backup_dir_with_lock(verify_worker, backup_dir, upid, filter, snap_lock)
@ -330,7 +334,7 @@ pub fn verify_backup_dir_with_lock(
backup_dir: &BackupDir, backup_dir: &BackupDir,
upid: UPID, upid: UPID,
filter: Option<&dyn Fn(&BackupManifest) -> bool>, filter: Option<&dyn Fn(&BackupManifest) -> bool>,
_snap_lock: BackupLockGuard, _snap_lock: Dir,
) -> Result<bool, Error> { ) -> Result<bool, Error> {
let datastore_name = verify_worker.datastore.name(); let datastore_name = verify_worker.datastore.name();
let backup_dir_name = backup_dir.dir(); let backup_dir_name = backup_dir.dir();

View File

@ -8,6 +8,7 @@ use hyper::{Body, StatusCode};
use tracing::level_filters::LevelFilter; use tracing::level_filters::LevelFilter;
use proxmox_lang::try_block; use proxmox_lang::try_block;
use proxmox_log::init_logger;
use proxmox_rest_server::{ApiConfig, RestServer}; use proxmox_rest_server::{ApiConfig, RestServer};
use proxmox_router::RpcEnvironmentType; use proxmox_router::RpcEnvironmentType;
use proxmox_sys::fs::CreateOptions; use proxmox_sys::fs::CreateOptions;
@ -40,15 +41,11 @@ fn get_index() -> Pin<Box<dyn Future<Output = Response<Body>> + Send>> {
} }
async fn run() -> Result<(), Error> { async fn run() -> Result<(), Error> {
proxmox_log::Logger::from_env("PBS_LOG", LevelFilter::INFO) init_logger("PBS_LOG", LevelFilter::INFO)?;
.journald_on_no_workertask()
.tasklog_pbs()
.init()?;
config::create_configdir()?; config::create_configdir()?;
config::update_self_signed_cert(false)?; config::update_self_signed_cert(false)?;
config::update_default_realms()?;
proxmox_backup::server::create_run_dir()?; proxmox_backup::server::create_run_dir()?;
proxmox_backup::server::create_state_dir()?; proxmox_backup::server::create_state_dir()?;
@ -89,21 +86,21 @@ async fn run() -> Result<(), Error> {
.default_api2_handler(&proxmox_backup::api2::ROUTER) .default_api2_handler(&proxmox_backup::api2::ROUTER)
.enable_access_log( .enable_access_log(
pbs_buildcfg::API_ACCESS_LOG_FN, pbs_buildcfg::API_ACCESS_LOG_FN,
Some(dir_opts), Some(dir_opts.clone()),
Some(file_opts), Some(file_opts.clone()),
&mut command_sock, &mut command_sock,
)? )?
.enable_auth_log( .enable_auth_log(
pbs_buildcfg::API_AUTH_LOG_FN, pbs_buildcfg::API_AUTH_LOG_FN,
Some(dir_opts), Some(dir_opts.clone()),
Some(file_opts), Some(file_opts.clone()),
&mut command_sock, &mut command_sock,
)?; )?;
let rest_server = RestServer::new(config); let rest_server = RestServer::new(config);
proxmox_rest_server::init_worker_tasks( proxmox_rest_server::init_worker_tasks(
pbs_buildcfg::PROXMOX_BACKUP_LOG_DIR_M!().into(), pbs_buildcfg::PROXMOX_BACKUP_LOG_DIR_M!().into(),
file_opts, file_opts.clone(),
)?; )?;
// http server future: // http server future:

View File

@ -1,3 +1,4 @@
use proxmox_log::init_cli_logger;
use proxmox_router::{ use proxmox_router::{
cli::{run_cli_command, CliCommandMap, CliEnvironment}, cli::{run_cli_command, CliCommandMap, CliEnvironment},
RpcEnvironment, RpcEnvironment,
@ -7,10 +8,7 @@ mod proxmox_backup_debug;
use proxmox_backup_debug::*; use proxmox_backup_debug::*;
fn main() { fn main() {
proxmox_log::Logger::from_env("PBS_LOG", proxmox_log::LevelFilter::INFO) init_cli_logger("PBS_LOG", proxmox_log::LevelFilter::INFO).expect("failed to initiate logger");
.stderr()
.init()
.expect("failed to initiate logger");
let cmd_def = CliCommandMap::new() let cmd_def = CliCommandMap::new()
.insert("inspect", inspect::inspect_commands()) .insert("inspect", inspect::inspect_commands())

View File

@ -3,6 +3,7 @@ use std::io::{self, Write};
use std::str::FromStr; use std::str::FromStr;
use anyhow::{format_err, Error}; use anyhow::{format_err, Error};
use proxmox_log::init_cli_logger;
use serde_json::{json, Value}; use serde_json::{json, Value};
use proxmox_router::{cli::*, RpcEnvironment}; use proxmox_router::{cli::*, RpcEnvironment};
@ -13,9 +14,8 @@ use pbs_api_types::percent_encoding::percent_encode_component;
use pbs_api_types::{ use pbs_api_types::{
BackupNamespace, GroupFilter, RateLimitConfig, SyncDirection, SyncJobConfig, DATASTORE_SCHEMA, BackupNamespace, GroupFilter, RateLimitConfig, SyncDirection, SyncJobConfig, DATASTORE_SCHEMA,
GROUP_FILTER_LIST_SCHEMA, IGNORE_VERIFIED_BACKUPS_SCHEMA, NS_MAX_DEPTH_SCHEMA, GROUP_FILTER_LIST_SCHEMA, IGNORE_VERIFIED_BACKUPS_SCHEMA, NS_MAX_DEPTH_SCHEMA,
REMOTE_ID_SCHEMA, REMOVE_VANISHED_BACKUPS_SCHEMA, RESYNC_CORRUPT_SCHEMA, REMOTE_ID_SCHEMA, REMOVE_VANISHED_BACKUPS_SCHEMA, RESYNC_CORRUPT_SCHEMA, TRANSFER_LAST_SCHEMA,
SYNC_ENCRYPTED_ONLY_SCHEMA, SYNC_VERIFIED_ONLY_SCHEMA, TRANSFER_LAST_SCHEMA, UPID_SCHEMA, UPID_SCHEMA, VERIFICATION_OUTDATED_AFTER_SCHEMA,
VERIFICATION_OUTDATED_AFTER_SCHEMA,
}; };
use pbs_client::{display_task_log, view_task_result}; use pbs_client::{display_task_log, view_task_result};
use pbs_config::sync; use pbs_config::sync;
@ -308,8 +308,6 @@ async fn sync_datastore(
limit: RateLimitConfig, limit: RateLimitConfig,
transfer_last: Option<usize>, transfer_last: Option<usize>,
resync_corrupt: Option<bool>, resync_corrupt: Option<bool>,
encrypted_only: Option<bool>,
verified_only: Option<bool>,
param: Value, param: Value,
sync_direction: SyncDirection, sync_direction: SyncDirection,
) -> Result<Value, Error> { ) -> Result<Value, Error> {
@ -350,14 +348,6 @@ async fn sync_datastore(
args["resync-corrupt"] = Value::from(resync); args["resync-corrupt"] = Value::from(resync);
} }
if let Some(encrypted_only) = encrypted_only {
args["encrypted-only"] = Value::from(encrypted_only);
}
if let Some(verified_only) = verified_only {
args["verified-only"] = Value::from(verified_only);
}
let mut limit_json = json!(limit); let mut limit_json = json!(limit);
let limit_map = limit_json let limit_map = limit_json
.as_object_mut() .as_object_mut()
@ -424,14 +414,6 @@ async fn sync_datastore(
schema: RESYNC_CORRUPT_SCHEMA, schema: RESYNC_CORRUPT_SCHEMA,
optional: true, optional: true,
}, },
"encrypted-only": {
schema: SYNC_ENCRYPTED_ONLY_SCHEMA,
optional: true,
},
"verified-only": {
schema: SYNC_VERIFIED_ONLY_SCHEMA,
optional: true,
},
} }
} }
)] )]
@ -449,8 +431,6 @@ async fn pull_datastore(
limit: RateLimitConfig, limit: RateLimitConfig,
transfer_last: Option<usize>, transfer_last: Option<usize>,
resync_corrupt: Option<bool>, resync_corrupt: Option<bool>,
encrypted_only: Option<bool>,
verified_only: Option<bool>,
param: Value, param: Value,
) -> Result<Value, Error> { ) -> Result<Value, Error> {
sync_datastore( sync_datastore(
@ -465,8 +445,6 @@ async fn pull_datastore(
limit, limit,
transfer_last, transfer_last,
resync_corrupt, resync_corrupt,
encrypted_only,
verified_only,
param, param,
SyncDirection::Pull, SyncDirection::Pull,
) )
@ -517,14 +495,6 @@ async fn pull_datastore(
schema: TRANSFER_LAST_SCHEMA, schema: TRANSFER_LAST_SCHEMA,
optional: true, optional: true,
}, },
"encrypted-only": {
schema: SYNC_ENCRYPTED_ONLY_SCHEMA,
optional: true,
},
"verified-only": {
schema: SYNC_VERIFIED_ONLY_SCHEMA,
optional: true,
},
} }
} }
)] )]
@ -541,8 +511,6 @@ async fn push_datastore(
group_filter: Option<Vec<GroupFilter>>, group_filter: Option<Vec<GroupFilter>>,
limit: RateLimitConfig, limit: RateLimitConfig,
transfer_last: Option<usize>, transfer_last: Option<usize>,
encrypted_only: Option<bool>,
verified_only: Option<bool>,
param: Value, param: Value,
) -> Result<Value, Error> { ) -> Result<Value, Error> {
sync_datastore( sync_datastore(
@ -557,8 +525,6 @@ async fn push_datastore(
limit, limit,
transfer_last, transfer_last,
None, None,
encrypted_only,
verified_only,
param, param,
SyncDirection::Push, SyncDirection::Push,
) )
@ -652,12 +618,7 @@ async fn get_versions(verbose: bool, param: Value) -> Result<Value, Error> {
} }
async fn run() -> Result<(), Error> { async fn run() -> Result<(), Error> {
// We need to use the tasklog logger here as well, because the proxmox-backup-manager can and init_cli_logger("PBS_LOG", proxmox_log::LevelFilter::INFO)?;
// will directly execute workertasks.
proxmox_log::Logger::from_env("PBS_LOG", proxmox_log::LevelFilter::INFO)
.stderr_on_no_workertask()
.tasklog_pbs()
.init()?;
proxmox_backup::server::notifications::init()?; proxmox_backup::server::notifications::init()?;
let cmd_def = CliCommandMap::new() let cmd_def = CliCommandMap::new()

View File

@ -16,6 +16,7 @@ use openssl::ssl::SslAcceptor;
use serde_json::{json, Value}; use serde_json::{json, Value};
use proxmox_lang::try_block; use proxmox_lang::try_block;
use proxmox_log::init_logger;
use proxmox_router::{RpcEnvironment, RpcEnvironmentType}; use proxmox_router::{RpcEnvironment, RpcEnvironmentType};
use proxmox_sys::fs::CreateOptions; use proxmox_sys::fs::CreateOptions;
use proxmox_sys::logrotate::LogRotate; use proxmox_sys::logrotate::LogRotate;
@ -178,10 +179,7 @@ async fn get_index_future(env: RestEnvironment, parts: Parts) -> Response<Body>
} }
async fn run() -> Result<(), Error> { async fn run() -> Result<(), Error> {
proxmox_log::Logger::from_env("PBS_LOG", LevelFilter::INFO) init_logger("PBS_LOG", LevelFilter::INFO)?;
.journald_on_no_workertask()
.tasklog_pbs()
.init()?;
proxmox_backup::auth_helpers::setup_auth_context(false); proxmox_backup::auth_helpers::setup_auth_context(false);
proxmox_backup::server::notifications::init()?; proxmox_backup::server::notifications::init()?;
@ -223,14 +221,14 @@ async fn run() -> Result<(), Error> {
config = config config = config
.enable_access_log( .enable_access_log(
pbs_buildcfg::API_ACCESS_LOG_FN, pbs_buildcfg::API_ACCESS_LOG_FN,
Some(dir_opts), Some(dir_opts.clone()),
Some(file_opts), Some(file_opts.clone()),
&mut command_sock, &mut command_sock,
)? )?
.enable_auth_log( .enable_auth_log(
pbs_buildcfg::API_AUTH_LOG_FN, pbs_buildcfg::API_AUTH_LOG_FN,
Some(dir_opts), Some(dir_opts.clone()),
Some(file_opts), Some(file_opts.clone()),
&mut command_sock, &mut command_sock,
)?; )?;
@ -238,7 +236,7 @@ async fn run() -> Result<(), Error> {
let redirector = Redirector::new(); let redirector = Redirector::new();
proxmox_rest_server::init_worker_tasks( proxmox_rest_server::init_worker_tasks(
pbs_buildcfg::PROXMOX_BACKUP_LOG_DIR_M!().into(), pbs_buildcfg::PROXMOX_BACKUP_LOG_DIR_M!().into(),
file_opts, file_opts.clone(),
)?; )?;
//openssl req -x509 -newkey rsa:4096 -keyout /etc/proxmox-backup/proxy.key -out /etc/proxmox-backup/proxy.pem -nodes //openssl req -x509 -newkey rsa:4096 -keyout /etc/proxmox-backup/proxy.key -out /etc/proxmox-backup/proxy.pem -nodes
@ -543,7 +541,7 @@ async fn schedule_datastore_garbage_collection() {
Some(event_str), Some(event_str),
false, false,
) { ) {
eprintln!("unable to start garbage collection job on datastore {store} - {err:#}"); eprintln!("unable to start garbage collection job on datastore {store} - {err}");
} }
} }
} }
@ -752,7 +750,7 @@ async fn schedule_task_log_rotate() {
true, true,
Some(max_files), Some(max_files),
max_days, max_days,
Some(options), Some(options.clone()),
)?; )?;
if has_rotated { if has_rotated {
@ -768,7 +766,7 @@ async fn schedule_task_log_rotate() {
pbs_buildcfg::API_ACCESS_LOG_FN, pbs_buildcfg::API_ACCESS_LOG_FN,
true, true,
Some(max_files), Some(max_files),
Some(options), Some(options.clone()),
)?; )?;
if logrotate.rotate(max_size)? { if logrotate.rotate(max_size)? {

View File

@ -95,7 +95,7 @@ async fn run(rpcenv: &mut dyn RpcEnvironment) -> Result<(), Error> {
.group(backup_user.gid); .group(backup_user.gid);
proxmox_rest_server::init_worker_tasks( proxmox_rest_server::init_worker_tasks(
pbs_buildcfg::PROXMOX_BACKUP_LOG_DIR_M!().into(), pbs_buildcfg::PROXMOX_BACKUP_LOG_DIR_M!().into(),
file_opts, file_opts.clone(),
)?; )?;
let mut command_sock = proxmox_daemon::command_socket::CommandSocket::new(backup_user.gid); let mut command_sock = proxmox_daemon::command_socket::CommandSocket::new(backup_user.gid);
@ -110,12 +110,13 @@ async fn run(rpcenv: &mut dyn RpcEnvironment) -> Result<(), Error> {
fn main() { fn main() {
proxmox_backup::tools::setup_safe_path_env(); proxmox_backup::tools::setup_safe_path_env();
// We need to use the tasklog layer here because we call a workertask. if let Err(err) = syslog::init(
proxmox_log::Logger::from_env("PBS_LOG", proxmox_log::LevelFilter::INFO) syslog::Facility::LOG_DAEMON,
.journald_on_no_workertask() log::LevelFilter::Info,
.tasklog_pbs() Some("proxmox-daily-update"),
.init() ) {
.expect("unable to initialize logger"); eprintln!("unable to initialize syslog - {err}");
}
let mut rpcenv = CliEnvironment::new(); let mut rpcenv = CliEnvironment::new();
rpcenv.set_auth_id(Some(String::from("root@pam"))); rpcenv.set_auth_id(Some(String::from("root@pam")));

View File

@ -5,6 +5,7 @@ use serde_json::{json, Value};
use proxmox_human_byte::HumanByte; use proxmox_human_byte::HumanByte;
use proxmox_io::ReadExt; use proxmox_io::ReadExt;
use proxmox_log::init_cli_logger;
use proxmox_router::cli::*; use proxmox_router::cli::*;
use proxmox_router::RpcEnvironment; use proxmox_router::RpcEnvironment;
use proxmox_schema::api; use proxmox_schema::api;
@ -997,10 +998,7 @@ async fn catalog_media(mut param: Value) -> Result<(), Error> {
} }
fn main() { fn main() {
proxmox_log::Logger::from_env("PBS_LOG", proxmox_log::LevelFilter::INFO) init_cli_logger("PBS_LOG", proxmox_log::LevelFilter::INFO).expect("failed to initiate logger");
.stderr()
.init()
.expect("failed to initiate logger");
let cmd_def = CliCommandMap::new() let cmd_def = CliCommandMap::new()
.insert( .insert(

View File

@ -409,8 +409,8 @@ fn inspect_device(device: String, param: Value) -> Result<(), Error> {
let default_options = proxmox_sys::fs::CreateOptions::new(); let default_options = proxmox_sys::fs::CreateOptions::new();
proxmox_sys::fs::create_path( proxmox_sys::fs::create_path(
&tmp_mount_path, &tmp_mount_path,
Some(default_options), Some(default_options.clone()),
Some(default_options), Some(default_options.clone()),
)?; )?;
let mut mount_cmd = std::process::Command::new("mount"); let mut mount_cmd = std::process::Command::new("mount");
mount_cmd.arg(device.clone()); mount_cmd.arg(device.clone());

View File

@ -10,6 +10,7 @@ use pbs_tape::sg_tape::SgTape;
use proxmox_backup::tape::encryption_keys::load_key; use proxmox_backup::tape::encryption_keys::load_key;
use serde_json::Value; use serde_json::Value;
use proxmox_log::init_cli_logger;
use proxmox_router::{cli::*, RpcEnvironment}; use proxmox_router::{cli::*, RpcEnvironment};
use proxmox_schema::api; use proxmox_schema::api;
use proxmox_uuid::Uuid; use proxmox_uuid::Uuid;
@ -124,9 +125,7 @@ fn set_encryption(
} }
fn main() -> Result<(), Error> { fn main() -> Result<(), Error> {
proxmox_log::Logger::from_env("PBS_LOG", proxmox_log::LevelFilter::INFO) init_cli_logger("PBS_LOG", proxmox_log::LevelFilter::INFO)?;
.stderr()
.init()?;
// check if we are user root or backup // check if we are user root or backup
let backup_uid = pbs_config::backup_user()?.uid; let backup_uid = pbs_config::backup_user()?.uid;

View File

@ -12,7 +12,6 @@ use std::path::Path;
use proxmox_lang::try_block; use proxmox_lang::try_block;
use pbs_api_types::{PamRealmConfig, PbsRealmConfig};
use pbs_buildcfg::{self, configdir}; use pbs_buildcfg::{self, configdir};
pub mod acme; pub mod acme;
@ -195,27 +194,3 @@ pub(crate) fn set_proxy_certificate(cert_pem: &[u8], key_pem: &[u8]) -> Result<(
Ok(()) Ok(())
} }
pub fn update_default_realms() -> Result<(), Error> {
let _lock = pbs_config::domains::lock_config()?;
let (mut domains, _) = pbs_config::domains::config()?;
if !pbs_config::domains::exists(&domains, "pam") {
domains.set_data(
"pam",
"pam",
PamRealmConfig {
// Setting it as default here is safe, because if we perform this
// migration, the user had not had any chance to set a custom default anyway.
default: Some(true),
..Default::default()
},
)?;
}
if !pbs_config::domains::exists(&domains, "pbs") {
domains.set_data("pbs", "pbs", PbsRealmConfig::default())?;
}
pbs_config::domains::save_config(&domains)
}

View File

@ -174,11 +174,6 @@ pub enum Translation {
"description" : { "description" : {
optional: true, optional: true,
schema: MULTI_LINE_COMMENT_SCHEMA, schema: MULTI_LINE_COMMENT_SCHEMA,
},
"consent-text" : {
optional: true,
type: String,
max_length: 64 * 1024,
} }
}, },
)] )]

View File

@ -199,15 +199,14 @@ impl proxmox_tfa::api::OpenUserChallengeData for UserAccess {
fn open(&self, userid: &str) -> Result<Box<dyn UserChallengeAccess>, Error> { fn open(&self, userid: &str) -> Result<Box<dyn UserChallengeAccess>, Error> {
crate::server::create_run_dir()?; crate::server::create_run_dir()?;
let options = CreateOptions::new().perm(Mode::from_bits_truncate(0o0600)); let options = CreateOptions::new().perm(Mode::from_bits_truncate(0o0600));
proxmox_sys::fs::create_path(CHALLENGE_DATA_PATH, Some(options), Some(options)).map_err( proxmox_sys::fs::create_path(CHALLENGE_DATA_PATH, Some(options.clone()), Some(options))
|err| { .map_err(|err| {
format_err!( format_err!(
"failed to crate challenge data dir {:?}: {}", "failed to crate challenge data dir {:?}: {}",
CHALLENGE_DATA_PATH, CHALLENGE_DATA_PATH,
err err
) )
}, })?;
)?;
let path = challenge_data_path_str(userid); let path = challenge_data_path_str(userid);

View File

@ -87,7 +87,7 @@ pub fn create_jobstate_dir() -> Result<(), Error> {
.owner(backup_user.uid) .owner(backup_user.uid)
.group(backup_user.gid); .group(backup_user.gid);
create_path(JOB_STATE_BASEDIR, Some(opts), Some(opts)) create_path(JOB_STATE_BASEDIR, Some(opts.clone()), Some(opts))
.map_err(|err: Error| format_err!("unable to create job state dir - {err}"))?; .map_err(|err: Error| format_err!("unable to create job state dir - {err}"))?;
Ok(()) Ok(())

View File

@ -73,9 +73,9 @@ pub fn get_all_metrics(start_time: i64) -> Result<Vec<MetricDataPoint>, Error> {
let mut points = Vec::new(); let mut points = Vec::new();
for generation in cached_datapoints { for gen in cached_datapoints {
if generation.timestamp > start_time { if gen.timestamp > start_time {
points.extend(generation.datapoints); points.extend(gen.datapoints);
} }
} }

View File

@ -5,6 +5,7 @@ use std::time::{Duration, Instant};
use anyhow::Error; use anyhow::Error;
use const_format::concatcp; use const_format::concatcp;
use nix::unistd::Uid; use nix::unistd::Uid;
use serde_json::json;
use proxmox_notify::context::pbs::PBS_CONTEXT; use proxmox_notify::context::pbs::PBS_CONTEXT;
use proxmox_schema::ApiType; use proxmox_schema::ApiType;
@ -20,15 +21,6 @@ use proxmox_notify::{Endpoint, Notification, Severity};
const SPOOL_DIR: &str = concatcp!(pbs_buildcfg::PROXMOX_BACKUP_STATE_DIR, "/notifications"); const SPOOL_DIR: &str = concatcp!(pbs_buildcfg::PROXMOX_BACKUP_STATE_DIR, "/notifications");
mod template_data;
use template_data::{
AcmeErrTemplateData, CommonData, GcErrTemplateData, GcOkTemplateData,
PackageUpdatesTemplateData, PruneErrTemplateData, PruneOkTemplateData, SyncErrTemplateData,
SyncOkTemplateData, TapeBackupErrTemplateData, TapeBackupOkTemplateData, TapeLoadTemplateData,
VerifyErrTemplateData, VerifyOkTemplateData,
};
/// Initialize the notification system by setting context in proxmox_notify /// Initialize the notification system by setting context in proxmox_notify
pub fn init() -> Result<(), Error> { pub fn init() -> Result<(), Error> {
proxmox_notify::context::set_context(&PBS_CONTEXT); proxmox_notify::context::set_context(&PBS_CONTEXT);
@ -154,32 +146,38 @@ pub fn send_gc_status(
status: &GarbageCollectionStatus, status: &GarbageCollectionStatus,
result: &Result<(), Error>, result: &Result<(), Error>,
) -> Result<(), Error> { ) -> Result<(), Error> {
let (fqdn, port) = get_server_url();
let mut data = json!({
"datastore": datastore,
"fqdn": fqdn,
"port": port,
});
let (severity, template) = match result {
Ok(()) => {
let deduplication_factor = if status.disk_bytes > 0 {
(status.index_data_bytes as f64) / (status.disk_bytes as f64)
} else {
1.0
};
data["status"] = json!(status);
data["deduplication-factor"] = format!("{:.2}", deduplication_factor).into();
(Severity::Info, "gc-ok")
}
Err(err) => {
data["error"] = err.to_string().into();
(Severity::Error, "gc-err")
}
};
let metadata = HashMap::from([ let metadata = HashMap::from([
("datastore".into(), datastore.into()), ("datastore".into(), datastore.into()),
("hostname".into(), proxmox_sys::nodename().into()), ("hostname".into(), proxmox_sys::nodename().into()),
("type".into(), "gc".into()), ("type".into(), "gc".into()),
]); ]);
let notification = match result { let notification = Notification::from_template(severity, template, data, metadata);
Ok(()) => {
let template_data = GcOkTemplateData::new(datastore.to_string(), status);
Notification::from_template(
Severity::Info,
"gc-ok",
serde_json::to_value(template_data)?,
metadata,
)
}
Err(err) => {
let template_data = GcErrTemplateData::new(datastore.to_string(), format!("{err:#}"));
Notification::from_template(
Severity::Error,
"gc-err",
serde_json::to_value(template_data)?,
metadata,
)
}
};
let (email, notify, mode) = lookup_datastore_notify_settings(datastore); let (email, notify, mode) = lookup_datastore_notify_settings(datastore);
match mode { match mode {
@ -206,6 +204,25 @@ pub fn send_verify_status(
job: VerificationJobConfig, job: VerificationJobConfig,
result: &Result<Vec<String>, Error>, result: &Result<Vec<String>, Error>,
) -> Result<(), Error> { ) -> Result<(), Error> {
let (fqdn, port) = get_server_url();
let mut data = json!({
"job": job,
"fqdn": fqdn,
"port": port,
});
let (template, severity) = match result {
Ok(errors) if errors.is_empty() => ("verify-ok", Severity::Info),
Ok(errors) => {
data["errors"] = json!(errors);
("verify-err", Severity::Error)
}
Err(_) => {
// aborted job - do not send any notification
return Ok(());
}
};
let metadata = HashMap::from([ let metadata = HashMap::from([
("job-id".into(), job.id.clone()), ("job-id".into(), job.id.clone()),
("datastore".into(), job.store.clone()), ("datastore".into(), job.store.clone()),
@ -213,39 +230,7 @@ pub fn send_verify_status(
("type".into(), "verify".into()), ("type".into(), "verify".into()),
]); ]);
let notification = match result { let notification = Notification::from_template(severity, template, data, metadata);
Err(_) => {
// aborted job - do not send any notification
return Ok(());
}
Ok(errors) if errors.is_empty() => {
let template_data = VerifyOkTemplateData {
common: CommonData::new(),
datastore: job.store.clone(),
job_id: job.id.clone(),
};
Notification::from_template(
Severity::Info,
"verify-ok",
serde_json::to_value(template_data)?,
metadata,
)
}
Ok(errors) => {
let template_data = VerifyErrTemplateData {
common: CommonData::new(),
datastore: job.store.clone(),
job_id: job.id.clone(),
failed_snapshot_list: errors.clone(),
};
Notification::from_template(
Severity::Error,
"verify-err",
serde_json::to_value(template_data)?,
metadata,
)
}
};
let (email, notify, mode) = lookup_datastore_notify_settings(&job.store); let (email, notify, mode) = lookup_datastore_notify_settings(&job.store);
match mode { match mode {
@ -273,6 +258,22 @@ pub fn send_prune_status(
jobname: &str, jobname: &str,
result: &Result<(), Error>, result: &Result<(), Error>,
) -> Result<(), Error> { ) -> Result<(), Error> {
let (fqdn, port) = get_server_url();
let mut data = json!({
"jobname": jobname,
"store": store,
"fqdn": fqdn,
"port": port,
});
let (template, severity) = match result {
Ok(()) => ("prune-ok", Severity::Info),
Err(err) => {
data["error"] = err.to_string().into();
("prune-err", Severity::Error)
}
};
let metadata = HashMap::from([ let metadata = HashMap::from([
("job-id".into(), jobname.to_string()), ("job-id".into(), jobname.to_string()),
("datastore".into(), store.into()), ("datastore".into(), store.into()),
@ -280,37 +281,7 @@ pub fn send_prune_status(
("type".into(), "prune".into()), ("type".into(), "prune".into()),
]); ]);
let notification = match result { let notification = Notification::from_template(severity, template, data, metadata);
Ok(()) => {
let template_data = PruneOkTemplateData {
common: CommonData::new(),
datastore: store.to_string(),
job_id: jobname.to_string(),
};
Notification::from_template(
Severity::Info,
"prune-ok",
serde_json::to_value(template_data)?,
metadata,
)
}
Err(err) => {
let template_data = PruneErrTemplateData {
common: CommonData::new(),
datastore: store.to_string(),
job_id: jobname.to_string(),
error: format!("{err:#}"),
};
Notification::from_template(
Severity::Error,
"prune-err",
serde_json::to_value(template_data)?,
metadata,
)
}
};
let (email, notify, mode) = lookup_datastore_notify_settings(store); let (email, notify, mode) = lookup_datastore_notify_settings(store);
match mode { match mode {
@ -334,6 +305,21 @@ pub fn send_prune_status(
} }
pub fn send_sync_status(job: &SyncJobConfig, result: &Result<(), Error>) -> Result<(), Error> { pub fn send_sync_status(job: &SyncJobConfig, result: &Result<(), Error>) -> Result<(), Error> {
let (fqdn, port) = get_server_url();
let mut data = json!({
"job": job,
"fqdn": fqdn,
"port": port,
});
let (template, severity) = match result {
Ok(()) => ("sync-ok", Severity::Info),
Err(err) => {
data["error"] = err.to_string().into();
("sync-err", Severity::Error)
}
};
let metadata = HashMap::from([ let metadata = HashMap::from([
("job-id".into(), job.id.clone()), ("job-id".into(), job.id.clone()),
("datastore".into(), job.store.clone()), ("datastore".into(), job.store.clone()),
@ -341,39 +327,7 @@ pub fn send_sync_status(job: &SyncJobConfig, result: &Result<(), Error>) -> Resu
("type".into(), "sync".into()), ("type".into(), "sync".into()),
]); ]);
let notification = match result { let notification = Notification::from_template(severity, template, data, metadata);
Ok(()) => {
let template_data = SyncOkTemplateData {
common: CommonData::new(),
datastore: job.store.clone(),
job_id: job.id.clone(),
remote: job.remote.clone(),
remote_datastore: job.remote_store.clone(),
};
Notification::from_template(
Severity::Info,
"sync-ok",
serde_json::to_value(template_data)?,
metadata,
)
}
Err(err) => {
let template_data = SyncErrTemplateData {
common: CommonData::new(),
datastore: job.store.clone(),
job_id: job.id.clone(),
remote: job.remote.clone(),
remote_datastore: job.remote_store.clone(),
error: format!("{err:#}"),
};
Notification::from_template(
Severity::Error,
"sync-err",
serde_json::to_value(template_data)?,
metadata,
)
}
};
let (email, notify, mode) = lookup_datastore_notify_settings(&job.store); let (email, notify, mode) = lookup_datastore_notify_settings(&job.store);
match mode { match mode {
@ -402,6 +356,26 @@ pub fn send_tape_backup_status(
result: &Result<(), Error>, result: &Result<(), Error>,
summary: TapeBackupJobSummary, summary: TapeBackupJobSummary,
) -> Result<(), Error> { ) -> Result<(), Error> {
let (fqdn, port) = get_server_url();
let duration: proxmox_time::TimeSpan = summary.duration.into();
let mut data = json!({
"job": job,
"fqdn": fqdn,
"port": port,
"id": id,
"snapshot-list": summary.snapshot_list,
"used-tapes": summary.used_tapes,
"job-duration": duration.to_string(),
});
let (template, severity) = match result {
Ok(()) => ("tape-backup-ok", Severity::Info),
Err(err) => {
data["error"] = err.to_string().into();
("tape-backup-err", Severity::Error)
}
};
let mut metadata = HashMap::from([ let mut metadata = HashMap::from([
("datastore".into(), job.store.clone()), ("datastore".into(), job.store.clone()),
("media-pool".into(), job.pool.clone()), ("media-pool".into(), job.pool.clone()),
@ -413,49 +387,7 @@ pub fn send_tape_backup_status(
metadata.insert("job-id".into(), id.into()); metadata.insert("job-id".into(), id.into());
} }
let duration = summary.duration.as_secs(); let notification = Notification::from_template(severity, template, data, metadata);
let notification = match result {
Ok(()) => {
let template_data = TapeBackupOkTemplateData {
common: CommonData::new(),
datastore: job.store.clone(),
job_id: id.map(|id| id.into()),
job_duration: duration,
tape_pool: job.pool.clone(),
tape_drive: job.drive.clone(),
used_tapes_list: summary.used_tapes.unwrap_or_default(),
snapshot_list: summary.snapshot_list,
};
Notification::from_template(
Severity::Info,
"tape-backup-ok",
serde_json::to_value(template_data)?,
metadata,
)
}
Err(err) => {
let template_data = TapeBackupErrTemplateData {
common: CommonData::new(),
datastore: job.store.clone(),
job_id: id.map(|id| id.into()),
job_duration: duration,
tape_pool: job.pool.clone(),
tape_drive: job.drive.clone(),
used_tapes_list: summary.used_tapes.unwrap_or_default(),
snapshot_list: summary.snapshot_list,
error: format!("{err:#}"),
};
Notification::from_template(
Severity::Error,
"tape-backup-err",
serde_json::to_value(template_data)?,
metadata,
)
}
};
let mode = TapeNotificationMode::from(job); let mode = TapeNotificationMode::from(job);
@ -483,28 +415,21 @@ pub fn send_load_media_notification(
label_text: &str, label_text: &str,
reason: Option<String>, reason: Option<String>,
) -> Result<(), Error> { ) -> Result<(), Error> {
let device_type = if changer { "changer" } else { "drive" };
let data = json!({
"device-type": device_type,
"device": device,
"label-text": label_text,
"reason": reason,
"is-changer": changer,
});
let metadata = HashMap::from([ let metadata = HashMap::from([
("hostname".into(), proxmox_sys::nodename().into()), ("hostname".into(), proxmox_sys::nodename().into()),
("type".into(), "tape-load".into()), ("type".into(), "tape-load".into()),
]); ]);
let notification = Notification::from_template(Severity::Notice, "tape-load", data, metadata);
let device_type = if changer { "changer" } else { "drive" };
let template_data = TapeLoadTemplateData {
common: CommonData::new(),
load_reason: reason,
tape_drive: device.into(),
drive_type: device_type.into(),
drive_is_changer: changer,
tape_label: label_text.into(),
};
let notification = Notification::from_template(
Severity::Notice,
"tape-load",
serde_json::to_value(template_data)?,
metadata,
);
match mode { match mode {
TapeNotificationMode::LegacySendmail { notify_user } => { TapeNotificationMode::LegacySendmail { notify_user } => {
@ -522,22 +447,42 @@ pub fn send_load_media_notification(
Ok(()) Ok(())
} }
fn get_server_url() -> (String, usize) {
// user will surely request that they can change this
let nodename = proxmox_sys::nodename();
let mut fqdn = nodename.to_owned();
if let Ok(resolv_conf) = crate::api2::node::dns::read_etc_resolv_conf() {
if let Some(search) = resolv_conf["search"].as_str() {
fqdn.push('.');
fqdn.push_str(search);
}
}
let port = 8007;
(fqdn, port)
}
pub fn send_updates_available(updates: &[&APTUpdateInfo]) -> Result<(), Error> { pub fn send_updates_available(updates: &[&APTUpdateInfo]) -> Result<(), Error> {
let (fqdn, port) = get_server_url();
let hostname = proxmox_sys::nodename().to_string(); let hostname = proxmox_sys::nodename().to_string();
let data = json!({
"fqdn": fqdn,
"hostname": &hostname,
"port": port,
"updates": updates,
});
let metadata = HashMap::from([ let metadata = HashMap::from([
("hostname".into(), hostname), ("hostname".into(), hostname),
("type".into(), "package-updates".into()), ("type".into(), "package-updates".into()),
]); ]);
let template_data = PackageUpdatesTemplateData::new(updates); let notification =
Notification::from_template(Severity::Info, "package-updates", data, metadata);
let notification = Notification::from_template(
Severity::Info,
"package-updates",
serde_json::to_value(template_data)?,
metadata,
);
send_notification(notification)?; send_notification(notification)?;
Ok(()) Ok(())
@ -546,26 +491,24 @@ pub fn send_updates_available(updates: &[&APTUpdateInfo]) -> Result<(), Error> {
/// send email on certificate renewal failure. /// send email on certificate renewal failure.
pub fn send_certificate_renewal_mail(result: &Result<(), Error>) -> Result<(), Error> { pub fn send_certificate_renewal_mail(result: &Result<(), Error>) -> Result<(), Error> {
let error: String = match result { let error: String = match result {
Err(e) => format!("{e:#}"), Err(e) => e.to_string(),
_ => return Ok(()), _ => return Ok(()),
}; };
let (fqdn, port) = get_server_url();
let data = json!({
"fqdn": fqdn,
"port": port,
"error": error,
});
let metadata = HashMap::from([ let metadata = HashMap::from([
("hostname".into(), proxmox_sys::nodename().into()), ("hostname".into(), proxmox_sys::nodename().into()),
("type".into(), "acme".into()), ("type".into(), "acme".into()),
]); ]);
let template_data = AcmeErrTemplateData { let notification = Notification::from_template(Severity::Info, "acme-err", data, metadata);
common: CommonData::new(),
error,
};
let notification = Notification::from_template(
Severity::Info,
"acme-err",
serde_json::to_value(template_data)?,
metadata,
);
send_notification(notification)?; send_notification(notification)?;
Ok(()) Ok(())

View File

@ -1,344 +0,0 @@
use pbs_api_types::{APTUpdateInfo, GarbageCollectionStatus};
use serde::Serialize;
// NOTE: For some of these types, the `XyzOkTemplateData` and `XyzErrTemplateData`
// types are almost identical except for the `error` member.
// While at first glance I might make sense
// to consolidate the two and make `error` an `Option`, I would argue
// that it is actually quite nice to have a single, distinct type for
// each template. This makes it 100% clear which params are accessible
// for every single template, at the cost of some boilerplate code.
/// Template data which should be available in *all* notifications.
/// The fields of this struct will be flattened into the individual
/// *TemplateData structs.
#[derive(Serialize)]
#[serde(rename_all = "kebab-case")]
pub struct CommonData {
/// The hostname of the PBS host.
pub hostname: String,
/// The FQDN of the PBS host.
pub fqdn: String,
/// The base URL for building links to the web interface.
pub base_url: String,
}
impl CommonData {
pub fn new() -> CommonData {
let nodename = proxmox_sys::nodename();
let mut fqdn = nodename.to_owned();
if let Ok(resolv_conf) = crate::api2::node::dns::read_etc_resolv_conf() {
if let Some(search) = resolv_conf["search"].as_str() {
fqdn.push('.');
fqdn.push_str(search);
}
}
// TODO: Some users might want to be able to override this.
let base_url = format!("https://{fqdn}:8007");
CommonData {
hostname: nodename.into(),
fqdn,
base_url,
}
}
}
/// Template data for the gc-ok template.
#[derive(Serialize)]
#[serde(rename_all = "kebab-case")]
pub struct GcOkTemplateData {
/// Common properties.
#[serde(flatten)]
pub common: CommonData,
/// The datastore.
pub datastore: String,
/// The task's UPID.
pub upid: Option<String>,
/// Number of processed index files.
pub index_file_count: usize,
/// Sum of bytes referred by index files.
pub index_data_bytes: u64,
/// Bytes used on disk.
pub disk_bytes: u64,
/// Chunks used on disk.
pub disk_chunks: usize,
/// Sum of removed bytes.
pub removed_bytes: u64,
/// Number of removed chunks.
pub removed_chunks: usize,
/// Sum of pending bytes (pending removal - kept for safety).
pub pending_bytes: u64,
/// Number of pending chunks (pending removal - kept for safety).
pub pending_chunks: usize,
/// Number of chunks marked as .bad by verify that have been removed by GC.
pub removed_bad: usize,
/// Number of chunks still marked as .bad after garbage collection.
pub still_bad: usize,
/// Factor of deduplication.
pub deduplication_factor: String,
}
impl GcOkTemplateData {
/// Create new a new instance.
pub fn new(datastore: String, status: &GarbageCollectionStatus) -> Self {
let deduplication_factor = if status.disk_bytes > 0 {
(status.index_data_bytes as f64) / (status.disk_bytes as f64)
} else {
1.0
};
let deduplication_factor = format!("{:.2}", deduplication_factor);
Self {
common: CommonData::new(),
datastore,
upid: status.upid.clone(),
index_file_count: status.index_file_count,
index_data_bytes: status.index_data_bytes,
disk_bytes: status.disk_bytes,
disk_chunks: status.disk_chunks,
removed_bytes: status.removed_bytes,
removed_chunks: status.removed_chunks,
pending_bytes: status.pending_bytes,
pending_chunks: status.pending_chunks,
removed_bad: status.removed_bad,
still_bad: status.still_bad,
deduplication_factor,
}
}
}
/// Template data for the gc-err template.
#[derive(Serialize)]
#[serde(rename_all = "kebab-case")]
pub struct GcErrTemplateData {
/// Common properties.
#[serde(flatten)]
pub common: CommonData,
/// The datastore.
pub datastore: String,
/// The error that occured during the GC job.
pub error: String,
}
impl GcErrTemplateData {
/// Create new a new instance.
pub fn new(datastore: String, error: String) -> Self {
Self {
common: CommonData::new(),
datastore,
error,
}
}
}
/// Template data for the acme-err template.
#[derive(Serialize)]
#[serde(rename_all = "kebab-case")]
pub struct AcmeErrTemplateData {
/// Common properties.
#[serde(flatten)]
pub common: CommonData,
/// The error that occured when trying to request the certificate.
pub error: String,
}
#[derive(Serialize)]
#[serde(rename_all = "kebab-case")]
/// A single package which can be upgraded.
pub struct UpgradablePackage {
/// The name of the package.
package_name: String,
/// The new version which can be installed.
available_version: String,
/// The currently installed version.
installed_version: String,
}
/// Template data for the package-updates template.
#[derive(Serialize)]
#[serde(rename_all = "kebab-case")]
pub struct PackageUpdatesTemplateData {
/// Common properties.
#[serde(flatten)]
pub common: CommonData,
pub available_updates: Vec<UpgradablePackage>,
}
impl PackageUpdatesTemplateData {
/// Create new a new instance.
pub fn new(updates: &[&APTUpdateInfo]) -> Self {
Self {
common: CommonData::new(),
available_updates: updates
.iter()
.map(|info| UpgradablePackage {
package_name: info.package.clone(),
available_version: info.version.clone(),
installed_version: info.old_version.clone(),
})
.collect(),
}
}
}
/// Template data for the prune-ok template.
#[derive(Serialize)]
#[serde(rename_all = "kebab-case")]
pub struct PruneOkTemplateData {
/// Common properties.
#[serde(flatten)]
pub common: CommonData,
/// The datastore.
pub datastore: String,
/// The ID of the job.
pub job_id: String,
}
/// Template data for the prune-err template.
#[derive(Serialize)]
#[serde(rename_all = "kebab-case")]
pub struct PruneErrTemplateData {
/// Common properties.
#[serde(flatten)]
pub common: CommonData,
/// The datastore.
pub datastore: String,
/// The ID of the job.
pub job_id: String,
/// The error that occured during the prune job.
pub error: String,
}
/// Template data for the sync-ok template.
#[derive(Serialize)]
#[serde(rename_all = "kebab-case")]
pub struct SyncOkTemplateData {
/// Common properties.
#[serde(flatten)]
pub common: CommonData,
/// The datastore.
pub datastore: String,
/// The ID of the job.
pub job_id: String,
/// The remote.
pub remote: Option<String>,
/// The remote datastore we synced to/from.
pub remote_datastore: String,
}
/// Template data for the sync-err template.
#[derive(Serialize)]
#[serde(rename_all = "kebab-case")]
pub struct SyncErrTemplateData {
/// Common properties.
#[serde(flatten)]
pub common: CommonData,
/// The datastore.
pub datastore: String,
/// The ID of the job.
pub job_id: String,
/// The remote.
pub remote: Option<String>,
/// The remote datastore we synced to/from.
pub remote_datastore: String,
/// The error that occurred during the sync job.
pub error: String,
}
/// Template data for the tape-backup-ok template.
#[derive(Serialize)]
#[serde(rename_all = "kebab-case")]
pub struct TapeBackupOkTemplateData {
/// Common properties.
#[serde(flatten)]
pub common: CommonData,
/// The datastore that was backed up.
pub datastore: String,
/// The optional job id for this tape backup job.
pub job_id: Option<String>,
/// The total duration of the backup job in seconds.
pub job_duration: u64,
/// The tape pool.
pub tape_pool: String,
/// The name of the tape drive.
pub tape_drive: String,
/// The list of tapes which were used in this backup job.
pub used_tapes_list: Vec<String>,
/// The list of snapshots which were backed up.
pub snapshot_list: Vec<String>,
}
/// Template data for the tape-backup-err template.
#[derive(Serialize)]
#[serde(rename_all = "kebab-case")]
pub struct TapeBackupErrTemplateData {
/// Common properties.
#[serde(flatten)]
pub common: CommonData,
/// The datastore that was backed up.
pub datastore: String,
/// The optional job id for this tape backup job.
pub job_id: Option<String>,
/// The total duration of the backup job in seconds.
pub job_duration: u64,
/// The tape pool.
pub tape_pool: String,
/// The name of the tape drive.
pub tape_drive: String,
/// The list of tapes which were used in this backup job.
pub used_tapes_list: Vec<String>,
/// The list of snapshots which were backed up.
pub snapshot_list: Vec<String>,
/// The error that happend during the backup job.
pub error: String,
}
/// Template data for the tape-load template.
#[derive(Serialize)]
#[serde(rename_all = "kebab-case")]
pub struct TapeLoadTemplateData {
/// Common properties.
#[serde(flatten)]
pub common: CommonData,
/// The reason why the tape must be loaded.
pub load_reason: Option<String>,
/// The tape drive.
pub tape_drive: String,
/// The type of the drive (changer/drive)
pub drive_type: String,
/// The drive is a tape changer.
pub drive_is_changer: bool,
/// The label of the tape.
pub tape_label: String,
}
/// Template data for the verify-ok template.
#[derive(Serialize)]
#[serde(rename_all = "kebab-case")]
pub struct VerifyOkTemplateData {
/// Common properties.
#[serde(flatten)]
pub common: CommonData,
/// The datastore.
pub datastore: String,
/// The ID of the job.
pub job_id: String,
}
/// Template data for the verify-err template.
#[derive(Serialize)]
#[serde(rename_all = "kebab-case")]
pub struct VerifyErrTemplateData {
/// Common properties.
#[serde(flatten)]
pub common: CommonData,
/// The datastore.
pub datastore: String,
/// The ID of the job.
pub job_id: String,
/// The list of snapshots that failed to verify.
pub failed_snapshot_list: Vec<String>,
}

View File

@ -28,8 +28,8 @@ use pbs_datastore::{check_backup_owner, DataStore, StoreProgress};
use pbs_tools::sha::sha256; use pbs_tools::sha::sha256;
use super::sync::{ use super::sync::{
check_namespace_depth_limit, ignore_not_verified_or_encrypted, LocalSource, RemoteSource, check_namespace_depth_limit, LocalSource, RemoteSource, RemovedVanishedStats, SkipInfo,
RemovedVanishedStats, SkipInfo, SkipReason, SyncSource, SyncSourceReader, SyncStats, SkipReason, SyncSource, SyncSourceReader, SyncStats,
}; };
use crate::backup::{check_ns_modification_privs, check_ns_privs}; use crate::backup::{check_ns_modification_privs, check_ns_privs};
use crate::tools::parallel_handler::ParallelHandler; use crate::tools::parallel_handler::ParallelHandler;
@ -55,10 +55,6 @@ pub(crate) struct PullParameters {
group_filter: Vec<GroupFilter>, group_filter: Vec<GroupFilter>,
/// How many snapshots should be transferred at most (taking the newest N snapshots) /// How many snapshots should be transferred at most (taking the newest N snapshots)
transfer_last: Option<usize>, transfer_last: Option<usize>,
/// Only sync encrypted backup snapshots
encrypted_only: bool,
/// Only sync verified backup snapshots
verified_only: bool,
/// Whether to re-sync corrupted snapshots /// Whether to re-sync corrupted snapshots
resync_corrupt: bool, resync_corrupt: bool,
} }
@ -78,8 +74,6 @@ impl PullParameters {
group_filter: Option<Vec<GroupFilter>>, group_filter: Option<Vec<GroupFilter>>,
limit: RateLimitConfig, limit: RateLimitConfig,
transfer_last: Option<usize>, transfer_last: Option<usize>,
encrypted_only: Option<bool>,
verified_only: Option<bool>,
resync_corrupt: Option<bool>, resync_corrupt: Option<bool>,
) -> Result<Self, Error> { ) -> Result<Self, Error> {
if let Some(max_depth) = max_depth { if let Some(max_depth) = max_depth {
@ -88,8 +82,6 @@ impl PullParameters {
}; };
let remove_vanished = remove_vanished.unwrap_or(false); let remove_vanished = remove_vanished.unwrap_or(false);
let resync_corrupt = resync_corrupt.unwrap_or(false); let resync_corrupt = resync_corrupt.unwrap_or(false);
let encrypted_only = encrypted_only.unwrap_or(false);
let verified_only = verified_only.unwrap_or(false);
let source: Arc<dyn SyncSource> = if let Some(remote) = remote { let source: Arc<dyn SyncSource> = if let Some(remote) = remote {
let (remote_config, _digest) = pbs_config::remote::config()?; let (remote_config, _digest) = pbs_config::remote::config()?;
@ -128,8 +120,6 @@ impl PullParameters {
max_depth, max_depth,
group_filter, group_filter,
transfer_last, transfer_last,
encrypted_only,
verified_only,
resync_corrupt, resync_corrupt,
}) })
} }
@ -344,21 +334,11 @@ async fn pull_single_archive<'a>(
/// -- if not, pull it from the remote /// -- if not, pull it from the remote
/// - Download log if not already existing /// - Download log if not already existing
async fn pull_snapshot<'a>( async fn pull_snapshot<'a>(
params: &PullParameters,
reader: Arc<dyn SyncSourceReader + 'a>, reader: Arc<dyn SyncSourceReader + 'a>,
snapshot: &'a pbs_datastore::BackupDir, snapshot: &'a pbs_datastore::BackupDir,
downloaded_chunks: Arc<Mutex<HashSet<[u8; 32]>>>, downloaded_chunks: Arc<Mutex<HashSet<[u8; 32]>>>,
corrupt: bool, corrupt: bool,
is_new: bool,
) -> Result<SyncStats, Error> { ) -> Result<SyncStats, Error> {
if is_new {
info!("sync snapshot {}", snapshot.dir());
} else if corrupt {
info!("re-sync snapshot {} due to corruption", snapshot.dir());
} else {
info!("re-sync snapshot {}", snapshot.dir());
}
let mut sync_stats = SyncStats::default(); let mut sync_stats = SyncStats::default();
let mut manifest_name = snapshot.full_path(); let mut manifest_name = snapshot.full_path();
manifest_name.push(MANIFEST_BLOB_NAME.as_ref()); manifest_name.push(MANIFEST_BLOB_NAME.as_ref());
@ -403,22 +383,6 @@ async fn pull_snapshot<'a>(
let manifest = BackupManifest::try_from(tmp_manifest_blob)?; let manifest = BackupManifest::try_from(tmp_manifest_blob)?;
if ignore_not_verified_or_encrypted(
&manifest,
snapshot.dir(),
params.verified_only,
params.encrypted_only,
) {
if is_new {
let path = snapshot.full_path();
// safe to remove as locked by caller
std::fs::remove_dir_all(&path).map_err(|err| {
format_err!("removing temporary backup snapshot {path:?} failed - {err}")
})?;
}
return Ok(sync_stats);
}
for item in manifest.files() { for item in manifest.files() {
let mut path = snapshot.full_path(); let mut path = snapshot.full_path();
path.push(&item.filename); path.push(&item.filename);
@ -483,7 +447,6 @@ async fn pull_snapshot<'a>(
/// The `reader` is configured to read from the source backup directory, while the /// The `reader` is configured to read from the source backup directory, while the
/// `snapshot` is pointing to the local datastore and target namespace. /// `snapshot` is pointing to the local datastore and target namespace.
async fn pull_snapshot_from<'a>( async fn pull_snapshot_from<'a>(
params: &PullParameters,
reader: Arc<dyn SyncSourceReader + 'a>, reader: Arc<dyn SyncSourceReader + 'a>,
snapshot: &'a pbs_datastore::BackupDir, snapshot: &'a pbs_datastore::BackupDir,
downloaded_chunks: Arc<Mutex<HashSet<[u8; 32]>>>, downloaded_chunks: Arc<Mutex<HashSet<[u8; 32]>>>,
@ -493,11 +456,11 @@ async fn pull_snapshot_from<'a>(
.datastore() .datastore()
.create_locked_backup_dir(snapshot.backup_ns(), snapshot.as_ref())?; .create_locked_backup_dir(snapshot.backup_ns(), snapshot.as_ref())?;
let result = pull_snapshot(params, reader, snapshot, downloaded_chunks, corrupt, is_new).await; let sync_stats = if is_new {
info!("sync snapshot {}", snapshot.dir());
if is_new { // this snapshot is new, so it can never be corrupt
// Cleanup directory on error if snapshot was not present before match pull_snapshot(reader, snapshot, downloaded_chunks, false).await {
match result {
Err(err) => { Err(err) => {
if let Err(cleanup_err) = snapshot.datastore().remove_backup_dir( if let Err(cleanup_err) = snapshot.datastore().remove_backup_dir(
snapshot.backup_ns(), snapshot.backup_ns(),
@ -508,11 +471,21 @@ async fn pull_snapshot_from<'a>(
} }
return Err(err); return Err(err);
} }
Ok(_) => info!("sync snapshot {} done", snapshot.dir()), Ok(sync_stats) => {
info!("sync snapshot {} done", snapshot.dir());
sync_stats
}
} }
} } else {
if corrupt {
info!("re-sync snapshot {} due to corruption", snapshot.dir());
} else {
info!("re-sync snapshot {}", snapshot.dir());
}
pull_snapshot(reader, snapshot, downloaded_chunks, corrupt).await?
};
result Ok(sync_stats)
} }
/// Pulls a group according to `params`. /// Pulls a group according to `params`.
@ -639,14 +612,8 @@ async fn pull_group(
.source .source
.reader(source_namespace, &from_snapshot) .reader(source_namespace, &from_snapshot)
.await?; .await?;
let result = pull_snapshot_from( let result =
params, pull_snapshot_from(reader, &to_snapshot, downloaded_chunks.clone(), corrupt).await;
reader,
&to_snapshot,
downloaded_chunks.clone(),
corrupt,
)
.await;
progress.done_snapshots = pos as u64 + 1; progress.done_snapshots = pos as u64 + 1;
info!("percentage done: {progress}"); info!("percentage done: {progress}");
@ -969,7 +936,7 @@ pub(crate) async fn pull_ns(
match pull_group(params, namespace, &group, &mut progress).await { match pull_group(params, namespace, &group, &mut progress).await {
Ok(stats) => sync_stats.add(stats), Ok(stats) => sync_stats.add(stats),
Err(err) => { Err(err) => {
info!("sync group {} failed - {err:#}", &group); info!("sync group {} failed - {err}", &group);
errors = true; // do not stop here, instead continue errors = true; // do not stop here, instead continue
} }
} }

View File

@ -26,8 +26,8 @@ use pbs_datastore::read_chunk::AsyncReadChunk;
use pbs_datastore::{DataStore, StoreProgress}; use pbs_datastore::{DataStore, StoreProgress};
use super::sync::{ use super::sync::{
check_namespace_depth_limit, ignore_not_verified_or_encrypted, LocalSource, check_namespace_depth_limit, LocalSource, RemovedVanishedStats, SkipInfo, SkipReason,
RemovedVanishedStats, SkipInfo, SkipReason, SyncSource, SyncStats, SyncSource, SyncStats,
}; };
use crate::api2::config::remote; use crate::api2::config::remote;
@ -73,10 +73,6 @@ pub(crate) struct PushParameters {
max_depth: Option<usize>, max_depth: Option<usize>,
/// Filters for reducing the push scope /// Filters for reducing the push scope
group_filter: Vec<GroupFilter>, group_filter: Vec<GroupFilter>,
/// Synchronize only encrypted backup snapshots
encrypted_only: bool,
/// Synchronize only verified backup snapshots
verified_only: bool,
/// How many snapshots should be transferred at most (taking the newest N snapshots) /// How many snapshots should be transferred at most (taking the newest N snapshots)
transfer_last: Option<usize>, transfer_last: Option<usize>,
} }
@ -94,8 +90,6 @@ impl PushParameters {
remove_vanished: Option<bool>, remove_vanished: Option<bool>,
max_depth: Option<usize>, max_depth: Option<usize>,
group_filter: Option<Vec<GroupFilter>>, group_filter: Option<Vec<GroupFilter>>,
encrypted_only: Option<bool>,
verified_only: Option<bool>,
limit: RateLimitConfig, limit: RateLimitConfig,
transfer_last: Option<usize>, transfer_last: Option<usize>,
) -> Result<Self, Error> { ) -> Result<Self, Error> {
@ -104,8 +98,6 @@ impl PushParameters {
remote_ns.check_max_depth(max_depth)?; remote_ns.check_max_depth(max_depth)?;
}; };
let remove_vanished = remove_vanished.unwrap_or(false); let remove_vanished = remove_vanished.unwrap_or(false);
let encrypted_only = encrypted_only.unwrap_or(false);
let verified_only = verified_only.unwrap_or(false);
let store = DataStore::lookup_datastore(store, Some(Operation::Read))?; let store = DataStore::lookup_datastore(store, Some(Operation::Read))?;
if !store.namespace_exists(&ns) { if !store.namespace_exists(&ns) {
@ -157,8 +149,6 @@ impl PushParameters {
remove_vanished, remove_vanished,
max_depth, max_depth,
group_filter, group_filter,
encrypted_only,
verified_only,
transfer_last, transfer_last,
}) })
} }
@ -810,15 +800,6 @@ pub(crate) async fn push_snapshot(
} }
}; };
if ignore_not_verified_or_encrypted(
&source_manifest,
snapshot,
params.verified_only,
params.encrypted_only,
) {
return Ok(stats);
}
// Writer instance locks the snapshot on the remote side // Writer instance locks the snapshot on the remote side
let backup_writer = BackupWriter::start( let backup_writer = BackupWriter::start(
&params.target.client, &params.target.client,

View File

@ -10,7 +10,6 @@ use std::time::Duration;
use anyhow::{bail, format_err, Context, Error}; use anyhow::{bail, format_err, Context, Error};
use futures::{future::FutureExt, select}; use futures::{future::FutureExt, select};
use hyper::http::StatusCode; use hyper::http::StatusCode;
use pbs_config::BackupLockGuard;
use serde_json::json; use serde_json::json;
use tracing::{info, warn}; use tracing::{info, warn};
@ -20,13 +19,13 @@ use proxmox_router::HttpError;
use pbs_api_types::{ use pbs_api_types::{
Authid, BackupDir, BackupGroup, BackupNamespace, CryptMode, GroupListItem, SnapshotListItem, Authid, BackupDir, BackupGroup, BackupNamespace, CryptMode, GroupListItem, SnapshotListItem,
SyncDirection, SyncJobConfig, VerifyState, CLIENT_LOG_BLOB_NAME, MAX_NAMESPACE_DEPTH, SyncDirection, SyncJobConfig, CLIENT_LOG_BLOB_NAME, MAX_NAMESPACE_DEPTH, PRIV_DATASTORE_BACKUP,
PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_READ, PRIV_DATASTORE_READ,
}; };
use pbs_client::{BackupReader, BackupRepository, HttpClient, RemoteChunkReader}; use pbs_client::{BackupReader, BackupRepository, HttpClient, RemoteChunkReader};
use pbs_datastore::data_blob::DataBlob; use pbs_datastore::data_blob::DataBlob;
use pbs_datastore::read_chunk::AsyncReadChunk; use pbs_datastore::read_chunk::AsyncReadChunk;
use pbs_datastore::{BackupManifest, DataStore, ListNamespacesRecursive, LocalChunkReader}; use pbs_datastore::{DataStore, ListNamespacesRecursive, LocalChunkReader};
use crate::backup::ListAccessibleBackupGroups; use crate::backup::ListAccessibleBackupGroups;
use crate::server::jobstate::Job; use crate::server::jobstate::Job;
@ -106,7 +105,7 @@ pub(crate) struct RemoteSourceReader {
} }
pub(crate) struct LocalSourceReader { pub(crate) struct LocalSourceReader {
pub(crate) _dir_lock: Arc<Mutex<BackupLockGuard>>, pub(crate) _dir_lock: Arc<Mutex<proxmox_sys::fs::DirLockGuard>>,
pub(crate) path: PathBuf, pub(crate) path: PathBuf,
pub(crate) datastore: Arc<DataStore>, pub(crate) datastore: Arc<DataStore>,
} }
@ -479,11 +478,13 @@ impl SyncSource for LocalSource {
dir: &BackupDir, dir: &BackupDir,
) -> Result<Arc<dyn SyncSourceReader>, Error> { ) -> Result<Arc<dyn SyncSourceReader>, Error> {
let dir = self.store.backup_dir(ns.clone(), dir.clone())?; let dir = self.store.backup_dir(ns.clone(), dir.clone())?;
let guard = dir let dir_lock = proxmox_sys::fs::lock_dir_noblock_shared(
.lock_shared() &dir.full_path(),
.with_context(|| format!("while reading snapshot '{dir:?}' for a sync job"))?; "snapshot",
"locked by another operation",
)?;
Ok(Arc::new(LocalSourceReader { Ok(Arc::new(LocalSourceReader {
_dir_lock: Arc::new(Mutex::new(guard)), _dir_lock: Arc::new(Mutex::new(dir_lock)),
path: dir.full_path(), path: dir.full_path(),
datastore: dir.datastore().clone(), datastore: dir.datastore().clone(),
})) }))
@ -671,8 +672,6 @@ pub fn do_sync_job(
sync_job.remove_vanished, sync_job.remove_vanished,
sync_job.max_depth, sync_job.max_depth,
sync_job.group_filter.clone(), sync_job.group_filter.clone(),
sync_job.encrypted_only,
sync_job.verified_only,
sync_job.limit.clone(), sync_job.limit.clone(),
sync_job.transfer_last, sync_job.transfer_last,
) )
@ -732,34 +731,3 @@ pub fn do_sync_job(
Ok(upid_str) Ok(upid_str)
} }
pub(super) fn ignore_not_verified_or_encrypted(
manifest: &BackupManifest,
snapshot: &BackupDir,
verified_only: bool,
encrypted_only: bool,
) -> bool {
if verified_only {
match manifest.verify_state() {
Ok(Some(verify_state)) if verify_state.state == VerifyState::Ok => (),
_ => {
info!("Snapshot {snapshot} not verified but verified-only set, snapshot skipped");
return true;
}
}
}
if encrypted_only {
// Consider only encrypted if all files in the manifest are marked as encrypted
if !manifest
.files()
.iter()
.all(|file| file.chunk_crypt_mode() == CryptMode::Encrypt)
{
info!("Snapshot {snapshot} not encrypted but encrypted-only set, snapshot skipped");
return true;
}
}
false
}

View File

@ -56,7 +56,6 @@ pub struct PoolWriter {
notification_mode: TapeNotificationMode, notification_mode: TapeNotificationMode,
ns_magic: bool, ns_magic: bool,
used_tapes: HashSet<Uuid>, used_tapes: HashSet<Uuid>,
read_threads: usize,
} }
impl PoolWriter { impl PoolWriter {
@ -94,15 +93,9 @@ impl PoolWriter {
notification_mode, notification_mode,
ns_magic, ns_magic,
used_tapes: HashSet::new(), used_tapes: HashSet::new(),
read_threads: 1,
}) })
} }
/// Set the read threads to use when writing a backup to tape
pub fn set_read_thread_count(&mut self, read_threads: usize) {
self.read_threads = read_threads;
}
pub fn pool(&mut self) -> &mut MediaPool { pub fn pool(&mut self) -> &mut MediaPool {
&mut self.pool &mut self.pool
} }
@ -548,12 +541,7 @@ impl PoolWriter {
datastore: Arc<DataStore>, datastore: Arc<DataStore>,
snapshot_reader: Arc<Mutex<SnapshotReader>>, snapshot_reader: Arc<Mutex<SnapshotReader>>,
) -> Result<(std::thread::JoinHandle<()>, NewChunksIterator), Error> { ) -> Result<(std::thread::JoinHandle<()>, NewChunksIterator), Error> {
NewChunksIterator::spawn( NewChunksIterator::spawn(datastore, snapshot_reader, Arc::clone(&self.catalog_set))
datastore,
snapshot_reader,
Arc::clone(&self.catalog_set),
self.read_threads,
)
} }
pub(crate) fn catalog_version(&self) -> [u8; 8] { pub(crate) fn catalog_version(&self) -> [u8; 8] {

View File

@ -6,9 +6,8 @@ use anyhow::{format_err, Error};
use pbs_datastore::{DataBlob, DataStore, SnapshotReader}; use pbs_datastore::{DataBlob, DataStore, SnapshotReader};
use crate::tape::CatalogSet; use crate::tape::CatalogSet;
use crate::tools::parallel_handler::ParallelHandler;
/// Chunk iterator which uses separate threads to read chunks /// Chunk iterator which use a separate thread to read chunks
/// ///
/// The iterator skips duplicate chunks and chunks already in the /// The iterator skips duplicate chunks and chunks already in the
/// catalog. /// catalog.
@ -25,11 +24,8 @@ impl NewChunksIterator {
datastore: Arc<DataStore>, datastore: Arc<DataStore>,
snapshot_reader: Arc<Mutex<SnapshotReader>>, snapshot_reader: Arc<Mutex<SnapshotReader>>,
catalog_set: Arc<Mutex<CatalogSet>>, catalog_set: Arc<Mutex<CatalogSet>>,
read_threads: usize,
) -> Result<(std::thread::JoinHandle<()>, Self), Error> { ) -> Result<(std::thread::JoinHandle<()>, Self), Error> {
// set the buffer size of the channel queues to twice the number of threads or 3, whichever let (tx, rx) = std::sync::mpsc::sync_channel(3);
// is greater, to reduce the chance of a reader thread (producer) being blocked.
let (tx, rx) = std::sync::mpsc::sync_channel((read_threads * 2).max(3));
let reader_thread = std::thread::spawn(move || { let reader_thread = std::thread::spawn(move || {
let snapshot_reader = snapshot_reader.lock().unwrap(); let snapshot_reader = snapshot_reader.lock().unwrap();
@ -39,43 +35,36 @@ impl NewChunksIterator {
let datastore_name = snapshot_reader.datastore_name().to_string(); let datastore_name = snapshot_reader.datastore_name().to_string();
let result: Result<(), Error> = proxmox_lang::try_block!({ let result: Result<(), Error> = proxmox_lang::try_block!({
let chunk_iter = snapshot_reader.chunk_iterator(move |digest| { let mut chunk_iter = snapshot_reader.chunk_iterator(move |digest| {
catalog_set catalog_set
.lock() .lock()
.unwrap() .unwrap()
.contains_chunk(&datastore_name, digest) .contains_chunk(&datastore_name, digest)
})?; })?;
let reader_pool = loop {
ParallelHandler::new("tape backup chunk reader pool", read_threads, { let digest = match chunk_iter.next() {
let tx = tx.clone(); None => {
move |digest| { let _ = tx.send(Ok(None)); // ignore send error
let blob = datastore.load_chunk(&digest)?; break;
tx.send(Ok(Some((digest, blob)))).map_err(|err| {
format_err!("error sending result from reader thread: {err}")
})?;
Ok(())
} }
}); Some(digest) => digest?,
};
for digest in chunk_iter {
let digest = digest?;
if chunk_index.contains(&digest) { if chunk_index.contains(&digest) {
continue; continue;
} }
reader_pool.send(digest)?; let blob = datastore.load_chunk(&digest)?;
//println!("LOAD CHUNK {}", hex::encode(&digest));
if let Err(err) = tx.send(Ok(Some((digest, blob)))) {
eprintln!("could not send chunk to reader thread: {err}");
break;
}
chunk_index.insert(digest); chunk_index.insert(digest);
} }
reader_pool.complete()?;
let _ = tx.send(Ok(None)); // ignore send error
Ok(()) Ok(())
}); });
if let Err(err) = result { if let Err(err) = result {

View File

@ -80,7 +80,7 @@ impl SharedRateLimiter {
.owner(user.uid) .owner(user.uid)
.group(user.gid); .group(user.gid);
create_path(&path, Some(dir_opts), Some(dir_opts))?; create_path(&path, Some(dir_opts.clone()), Some(dir_opts))?;
path.push(name); path.push(name);

View File

@ -24,6 +24,7 @@ NOTIFICATION_TEMPLATES= \
default/tape-load-body.txt.hbs \ default/tape-load-body.txt.hbs \
default/tape-load-subject.txt.hbs \ default/tape-load-subject.txt.hbs \
default/test-body.txt.hbs \ default/test-body.txt.hbs \
default/test-body.html.hbs \
default/test-subject.txt.hbs \ default/test-subject.txt.hbs \
default/verify-err-body.txt.hbs \ default/verify-err-body.txt.hbs \
default/verify-ok-body.txt.hbs \ default/verify-ok-body.txt.hbs \

View File

@ -4,4 +4,4 @@ Error: {{error}}
Please visit the web interface for further details: Please visit the web interface for further details:
<{{base-url}}/#pbsCertificateConfiguration> <https://{{fqdn}}:{{port}}/#pbsCertificateConfiguration>

View File

@ -5,4 +5,4 @@ Garbage collection failed: {{error}}
Please visit the web interface for further details: Please visit the web interface for further details:
<{{base-url}}/#pbsServerAdministration:tasks> <https://{{fqdn}}:{{port}}/#pbsServerAdministration:tasks>

View File

@ -1 +1 @@
Garbage Collect Datastore '{{datastore}}' failed Garbage Collect Datastore '{{ datastore }}' failed

View File

@ -1,17 +1,17 @@
Datastore: {{datastore}} Datastore: {{datastore}}
Task ID: {{upid}} Task ID: {{status.upid}}
Index file count: {{index-file-count}} Index file count: {{status.index-file-count}}
Removed garbage: {{human-bytes removed-bytes}} Removed garbage: {{human-bytes status.removed-bytes}}
Removed chunks: {{removed-chunks}} Removed chunks: {{status.removed-chunks}}
Removed bad chunks: {{removed-bad}} Removed bad chunks: {{status.removed-bad}}
Leftover bad chunks: {{still-bad}} Leftover bad chunks: {{status.still-bad}}
Pending removals: {{human-bytes pending-bytes}} (in {{pending-chunks}} chunks) Pending removals: {{human-bytes status.pending-bytes}} (in {{status.pending-chunks}} chunks)
Original Data usage: {{human-bytes index-data-bytes}} Original Data usage: {{human-bytes status.index-data-bytes}}
On-Disk usage: {{human-bytes disk-bytes}} ({{relative-percentage disk-bytes index-data-bytes}}) On-Disk usage: {{human-bytes status.disk-bytes}} ({{relative-percentage status.disk-bytes status.index-data-bytes}})
On-Disk chunks: {{disk-chunks}} On-Disk chunks: {{status.disk-chunks}}
Deduplication Factor: {{deduplication-factor}} Deduplication Factor: {{deduplication-factor}}
@ -20,4 +20,4 @@ Garbage collection successful.
Please visit the web interface for further details: Please visit the web interface for further details:
<{{base-url}}/#DataStore-{{datastore}}> <https://{{fqdn}}:{{port}}/#DataStore-{{datastore}}>

View File

@ -1 +1 @@
Garbage Collect Datastore '{{datastore}}' successful Garbage Collect Datastore '{{ datastore }}' successful

View File

@ -1,8 +1,8 @@
Proxmox Backup Server has the following updates available: Proxmox Backup Server has the following updates available:
{{#each available-updates}} {{#each updates }}
{{this.package-name}}: {{this.installed-version}} -> {{this.available-version~}} {{Package}}: {{OldVersion}} -> {{Version~}}
{{/each}} {{/each }}
To upgrade visit the web interface: To upgrade visit the web interface:
<{{base-url}}/#pbsServerAdministration:updates> <https://{{fqdn}}:{{port}}/#pbsServerAdministration:updates>

View File

@ -1 +1 @@
New software packages available ({{hostname}}) New software packages available ({{ hostname }})

View File

@ -1,10 +1,10 @@
Job ID: {{job-id}} Job ID: {{jobname}}
Datastore: {{datastore}} Datastore: {{store}}
Pruning failed: {{error}} Pruning failed: {{error}}
Please visit the web interface for further details: Please visit the web interface for further details:
<{{base-url}}/#pbsServerAdministration:tasks> <https://{{fqdn}}:{{port}}/#pbsServerAdministration:tasks>

View File

@ -1 +1 @@
Pruning datastore '{{datastore}}' failed Pruning datastore '{{ store }}' failed

View File

@ -1,10 +1,10 @@
Job ID: {{job-id}} Job ID: {{jobname}}
Datastore: {{datastore}} Datastore: {{store}}
Pruning successful. Pruning successful.
Please visit the web interface for further details: Please visit the web interface for further details:
<{{base-url}}/#DataStore-{{datastore}}> <https://{{fqdn}}:{{port}}/#DataStore-{{store}}>

Some files were not shown because too many files have changed in this diff Show More