Compare commits

..

No commits in common. "master" and "v3.3.1" have entirely different histories.

207 changed files with 8397 additions and 4150 deletions

View File

@ -1,5 +1,5 @@
[workspace.package]
version = "3.4.1"
version = "3.3.1"
authors = [
"Dietmar Maurer <dietmar@proxmox.com>",
"Dominik Csapak <d.csapak@proxmox.com>",
@ -13,7 +13,7 @@ authors = [
edition = "2021"
license = "AGPL-3"
repository = "https://git.proxmox.com/?p=proxmox-backup.git"
rust-version = "1.81"
rust-version = "1.80"
[package]
name = "proxmox-backup"
@ -29,6 +29,7 @@ exclude = [ "build", "debian", "tests/catar_data/test_symlink/symlink1"]
[workspace]
members = [
"pbs-api-types",
"pbs-buildcfg",
"pbs-client",
"pbs-config",
@ -62,7 +63,7 @@ proxmox-compression = "0.2"
proxmox-config-digest = "0.1.0"
proxmox-daemon = "0.1.0"
proxmox-fuse = "0.1.3"
proxmox-http = { version = "0.9.5", features = [ "client", "http-helpers", "websocket" ] } # see below
proxmox-http = { version = "0.9.0", features = [ "client", "http-helpers", "websocket" ] } # see below
proxmox-human-byte = "0.1"
proxmox-io = "1.0.1" # tools and client use "tokio" feature
proxmox-lang = "1.1"
@ -71,26 +72,25 @@ proxmox-ldap = "0.2.1"
proxmox-metrics = "0.3.1"
proxmox-notify = "0.5.1"
proxmox-openid = "0.10.0"
proxmox-rest-server = { version = "0.8.9", features = [ "templates" ] }
proxmox-rest-server = { version = "0.8.5", features = [ "templates" ] }
# some use "cli", some use "cli" and "server", pbs-config uses nothing
proxmox-router = { version = "3.0.0", default-features = false }
proxmox-rrd = "0.4"
proxmox-rrd-api-types = "1.0.2"
# everything but pbs-config and pbs-client use "api-macro"
proxmox-schema = "4"
proxmox-schema = "3"
proxmox-section-config = "2"
proxmox-serde = "0.1.1"
proxmox-shared-cache = "0.1"
proxmox-shared-memory = "0.3.0"
proxmox-sortable-macro = "0.1.2"
proxmox-subscription = { version = "0.5.0", features = [ "api-types" ] }
proxmox-sys = "0.6.7"
proxmox-sys = "0.6.5"
proxmox-systemd = "0.1"
proxmox-tfa = { version = "5", features = [ "api", "api-types" ] }
proxmox-time = "2"
proxmox-uuid = { version = "1", features = [ "serde" ] }
proxmox-uuid = "1"
proxmox-worker-task = "0.1"
pbs-api-types = "0.2.2"
# other proxmox crates
pathpatterns = "0.3"
@ -98,6 +98,7 @@ proxmox-acme = "0.5.3"
pxar = "0.12.1"
# PBS workspace
pbs-api-types = { path = "pbs-api-types" }
pbs-buildcfg = { path = "pbs-buildcfg" }
pbs-client = { path = "pbs-client" }
pbs-config = { path = "pbs-config" }
@ -120,15 +121,15 @@ crc32fast = "1"
const_format = "0.2"
crossbeam-channel = "0.5"
endian_trait = { version = "0.6", features = ["arrays"] }
env_logger = "0.11"
env_logger = "0.10"
flate2 = "1.0"
foreign-types = "0.3"
futures = "0.3"
h2 = { version = "0.4", features = [ "legacy", "stream" ] }
h2 = { version = "0.4", features = [ "stream" ] }
handlebars = "3.0"
hex = "0.4.3"
hickory-resolver = { version = "0.24.1", default-features = false, features = [ "system-config", "tokio-runtime" ] }
hyper = { version = "0.14", features = [ "backports", "deprecated", "full" ] }
http = "0.2"
hyper = { version = "0.14", features = [ "full" ] }
libc = "0.2"
log = "0.4.17"
nix = "0.26.1"
@ -142,6 +143,7 @@ regex = "1.5.5"
rustyline = "9"
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
serde_plain = "1"
siphasher = "0.3"
syslog = "6"
tar = "0.4"
@ -173,6 +175,7 @@ endian_trait.workspace = true
futures.workspace = true
h2.workspace = true
hex.workspace = true
http.workspace = true
hyper.workspace = true
libc.workspace = true
log.workspace = true
@ -233,13 +236,13 @@ proxmox-tfa.workspace = true
proxmox-time.workspace = true
proxmox-uuid.workspace = true
proxmox-worker-task.workspace = true
pbs-api-types.workspace = true
# in their respective repo
proxmox-acme.workspace = true
pxar.workspace = true
# proxmox-backup workspace/internal crates
pbs-api-types.workspace = true
pbs-buildcfg.workspace = true
pbs-client.workspace = true
pbs-config.workspace = true
@ -253,8 +256,7 @@ proxmox-rrd-api-types.workspace = true
# Local path overrides
# NOTE: You must run `cargo update` after changing this for it to take effect!
[patch.crates-io]
#pbs-api-types = { path = "../proxmox/pbs-api-types" }
#proxmox-acme = { path = "../proxmox/proxmox-acme" }
#proxmox-apt = { path = "../proxmox/proxmox-apt" }
#proxmox-apt-api-types = { path = "../proxmox/proxmox-apt-api-types" }
#proxmox-async = { path = "../proxmox/proxmox-async" }
@ -290,6 +292,7 @@ proxmox-rrd-api-types.workspace = true
#proxmox-uuid = { path = "../proxmox/proxmox-uuid" }
#proxmox-worker-task = { path = "../proxmox/proxmox-worker-task" }
#proxmox-acme = { path = "../proxmox/proxmox-acme" }
#pathpatterns = {path = "../pathpatterns" }
#pxar = { path = "../pxar" }

View File

@ -1,10 +1,8 @@
include /usr/share/dpkg/default.mk
include /usr/share/rustc/architecture.mk
include defines.mk
PACKAGE := proxmox-backup
ARCH := $(DEB_BUILD_ARCH)
export DEB_HOST_RUST_TYPE
SUBDIRS := etc www docs templates
@ -38,20 +36,13 @@ SUBCRATES != cargo metadata --no-deps --format-version=1 \
| grep "$$PWD/" \
| sed -e "s!.*$$PWD/!!g" -e 's/\#.*$$//g' -e 's/)$$//g'
STATIC_TARGET_DIR := target/static-build
ifeq ($(BUILD_MODE), release)
CARGO_BUILD_ARGS += --release --target $(DEB_HOST_RUST_TYPE)
CARGO_BUILD_ARGS += --release
COMPILEDIR := target/$(DEB_HOST_RUST_TYPE)/release
STATIC_COMPILEDIR := $(STATIC_TARGET_DIR)/$(DEB_HOST_RUST_TYPE)/release
else
CARGO_BUILD_ARGS += --target $(DEB_HOST_RUST_TYPE)
COMPILEDIR := target/$(DEB_HOST_RUST_TYPE)/debug
STATIC_COMPILEDIR := $(STATIC_TARGET_DIR)/$(DEB_HOST_RUST_TYPE)/debug
endif
STATIC_RUSTC_FLAGS := -C target-feature=+crt-static -L $(STATIC_COMPILEDIR)/deps-stubs/
ifeq ($(valgrind), yes)
CARGO_BUILD_ARGS += --features valgrind
endif
@ -61,9 +52,6 @@ CARGO ?= cargo
COMPILED_BINS := \
$(addprefix $(COMPILEDIR)/,$(USR_BIN) $(USR_SBIN) $(SERVICE_BIN) $(RESTORE_BIN))
STATIC_BINS := \
$(addprefix $(STATIC_COMPILEDIR)/,proxmox-backup-client-static pxar-static)
export DEB_VERSION DEB_VERSION_UPSTREAM
SERVER_DEB=$(PACKAGE)-server_$(DEB_VERSION)_$(ARCH).deb
@ -72,12 +60,10 @@ CLIENT_DEB=$(PACKAGE)-client_$(DEB_VERSION)_$(ARCH).deb
CLIENT_DBG_DEB=$(PACKAGE)-client-dbgsym_$(DEB_VERSION)_$(ARCH).deb
RESTORE_DEB=proxmox-backup-file-restore_$(DEB_VERSION)_$(ARCH).deb
RESTORE_DBG_DEB=proxmox-backup-file-restore-dbgsym_$(DEB_VERSION)_$(ARCH).deb
STATIC_CLIENT_DEB=$(PACKAGE)-client-static_$(DEB_VERSION)_$(ARCH).deb
STATIC_CLIENT_DBG_DEB=$(PACKAGE)-client-static-dbgsym_$(DEB_VERSION)_$(ARCH).deb
DOC_DEB=$(PACKAGE)-docs_$(DEB_VERSION)_all.deb
DEBS=$(SERVER_DEB) $(SERVER_DBG_DEB) $(CLIENT_DEB) $(CLIENT_DBG_DEB) \
$(RESTORE_DEB) $(RESTORE_DBG_DEB) $(STATIC_CLIENT_DEB) $(STATIC_CLIENT_DBG_DEB)
$(RESTORE_DEB) $(RESTORE_DBG_DEB)
DSC = rust-$(PACKAGE)_$(DEB_VERSION).dsc
@ -85,7 +71,7 @@ DESTDIR=
tests ?= --workspace
all: proxmox-backup-client-static $(SUBDIRS)
all: $(SUBDIRS)
.PHONY: $(SUBDIRS)
$(SUBDIRS):
@ -155,7 +141,7 @@ clean: clean-deb
$(foreach i,$(SUBDIRS), \
$(MAKE) -C $(i) clean ;)
$(CARGO) clean
rm -f .do-cargo-build .do-static-cargo-build
rm -f .do-cargo-build
# allows one to avoid running cargo clean when one just wants to tidy up after a package build
clean-deb:
@ -204,25 +190,12 @@ $(COMPILED_BINS) $(COMPILEDIR)/dump-catalog-shell-cli $(COMPILEDIR)/docgen: .do-
--bin sg-tape-cmd
touch "$@"
.PHONY: proxmox-backup-client-static
proxmox-backup-client-static:
rm -f .do-static-cargo-build
$(MAKE) $(STATIC_BINS)
$(STATIC_BINS): .do-static-cargo-build
.do-static-cargo-build:
mkdir -p $(STATIC_COMPILEDIR)/deps-stubs/ && \
echo '!<arch>' > $(STATIC_COMPILEDIR)/deps-stubs/libsystemd.a # workaround for to greedy linkage and proxmox-systemd
$(CARGO) rustc $(CARGO_BUILD_ARGS) --package pxar-bin --bin pxar \
--target-dir $(STATIC_TARGET_DIR) -- $(STATIC_RUSTC_FLAGS)
$(CARGO) rustc $(CARGO_BUILD_ARGS) --package proxmox-backup-client --bin proxmox-backup-client \
--target-dir $(STATIC_TARGET_DIR) -- $(STATIC_RUSTC_FLAGS)
.PHONY: lint
lint:
cargo clippy -- -A clippy::all -D clippy::correctness
install: $(COMPILED_BINS) $(STATIC_BINS)
install: $(COMPILED_BINS)
install -dm755 $(DESTDIR)$(BINDIR)
install -dm755 $(DESTDIR)$(ZSH_COMPL_DEST)
$(foreach i,$(USR_BIN), \
@ -241,19 +214,16 @@ install: $(COMPILED_BINS) $(STATIC_BINS)
install -m4755 -o root -g root $(COMPILEDIR)/sg-tape-cmd $(DESTDIR)$(LIBEXECDIR)/proxmox-backup/sg-tape-cmd
$(foreach i,$(SERVICE_BIN), \
install -m755 $(COMPILEDIR)/$(i) $(DESTDIR)$(LIBEXECDIR)/proxmox-backup/ ;)
install -m755 $(STATIC_COMPILEDIR)/proxmox-backup-client $(DESTDIR)$(BINDIR)/proxmox-backup-client-static
install -m755 $(STATIC_COMPILEDIR)/pxar $(DESTDIR)$(BINDIR)/pxar-static
$(MAKE) -C www install
$(MAKE) -C docs install
$(MAKE) -C templates install
.PHONY: upload
upload: UPLOAD_DIST ?= $(DEB_DISTRIBUTION)
upload: $(SERVER_DEB) $(CLIENT_DEB) $(RESTORE_DEB) $(DOC_DEB) $(STATIC_CLIENT_DEB)
upload: $(SERVER_DEB) $(CLIENT_DEB) $(RESTORE_DEB) $(DOC_DEB)
# check if working directory is clean
git diff --exit-code --stat && git diff --exit-code --stat --staged
tar cf - $(SERVER_DEB) $(SERVER_DBG_DEB) $(DOC_DEB) $(CLIENT_DEB) $(CLIENT_DBG_DEB) \
| ssh -X repoman@repo.proxmox.com upload --product pbs --dist $(UPLOAD_DIST)
tar cf - $(CLIENT_DEB) $(CLIENT_DBG_DEB) | ssh -X repoman@repo.proxmox.com upload --product "pve,pmg,pbs-client" --dist $(UPLOAD_DIST)
tar cf - $(STATIC_CLIENT_DEB) $(STATIC_CLIENT_DBG_DEB) | ssh -X repoman@repo.proxmox.com upload --product "pbs-client" --dist $(UPLOAD_DIST)
tar cf - $(RESTORE_DEB) $(RESTORE_DBG_DEB) | ssh -X repoman@repo.proxmox.com upload --product "pve" --dist $(UPLOAD_DIST)

View File

@ -5,11 +5,8 @@ Build & Release Notes
``rustup`` Toolchain
====================
We normally want to build with the ``rustc`` Debian package (see below). If you
still want to use ``rustup`` for other reasons (e.g. to easily switch between
the official stable, beta, and nightly compilers), you should set the following
``rustup`` configuration to use the Debian-provided ``rustc`` compiler
by default:
We normally want to build with the ``rustc`` Debian package. To do that
you can set the following ``rustup`` configuration:
# rustup toolchain link system /usr
# rustup default system

258
debian/changelog vendored
View File

@ -1,261 +1,3 @@
rust-proxmox-backup (3.4.1-1) bookworm; urgency=medium
* ui: token view: fix typo in 'lose' and rephrase token regenerate dialog
message for more clarity.
* restrict consent-banner text length to 64 KiB.
* docs: describe the intend for the statically linked pbs client.
* api: backup: include previous snapshot name in log message.
* garbage collection: account for created/deleted index files concurrently
to GC to avoid potentially confusing log messages.
* garbage collection: fix rare race in chunk marking phase for setups doing
high frequent backups in quick succession while immediately pruning to a
single backup snapshot being left over after each such backup.
* tape: wait for calibration of LTO-9 tapes in general, not just in the
initial tape format procedure.
-- Proxmox Support Team <support@proxmox.com> Wed, 16 Apr 2025 14:45:37 +0200
rust-proxmox-backup (3.4.0-1) bookworm; urgency=medium
* fix #4788: build statically linked version of the proxmox-backup-client
package.
* ui: sync job: change the rate limit direction based on sync direction.
* docs: mention how to set the push sync jobs rate limit
* ui: set error mask: ensure that message is html-encoded to avoid visual
glitches.
* api server: increase maximal request body size fro 64 kiB to 512 kiB,
similar to a recent change for our perl based projects.
* notifications: include Content-Length header for broader compatibility in
the webhook and gotify targets.
* notifications: allow overriding notification templates.
* docs: notifications: add section about how to use custom templates
* sync: print whole error chain per group on failure for more context.
* ui: options-view: fix typo in empty-text for GC tuning option.
* memory info: use the "MemAvailable" field from '/proc/meminfo' to compute
used memory to fix overestimation of that metric and to better align with
what modern versions of tools like `free` do and to future proof against
changes in how the kernel accounts memory usage for.
* add "MemAvailable" field to ProcFsMemInfo to promote its usage over the
existing "MemFree" field, which is almost never the right choice. This new
field will be provided for external metric server.
* docs: mention different name resolution for statically linked binary.
* docs: add basic info for how to install the statically linked client.
* docs: mention new verify-only and encrypted-only flags for sync jobs.
-- Proxmox Support Team <support@proxmox.com> Wed, 09 Apr 2025 17:41:38 +0200
rust-proxmox-backup (3.3.7-1) bookworm; urgency=medium
* fix #5982: garbage collection: add a check to ensure that the underlying
file system supports and honors file access time (atime) updates.
The check is performed once on datastore creation and on start of every
garbage collection (GC) task, just to be sure. It can be disabled in the
datastore tuning options.
* garbage collection: support setting a custom access time cutoff,
overriding the default of one day and five minutes.
* ui: expose flag for GC access time support safety check and the access
time cutoff override in datastore tuning options.
* docs: describe rationale for new GC access time update check setting and
the access time cutoff check in tuning options.
* access control: add support to mark a specific authentication realm as
default selected realm for the login user interface.
* fix #4382: api: access control: remove permissions of token on deletion.
* fix #3887: api: access control: allow users to regenerate the secret of an
API token without changing any existing ACLs.
* fix #6072: sync jobs: support flags to limit sync to only encrypted and/or
verified snapshots.
* ui: datastore tuning options: expose overriding GC cache capacity so that
admins can either restrict the peak memory usage during GC or allow GC to
use more memory to reduce file system IO even for huge (multiple TiB)
referenced data in backup groups.
* ui: datastore tuning options: increase width and rework labels to provide
a tiny bit more context about what these options are.
* ui: sync job: increase edit window width to 720px to make it less cramped.
* ui: sync job: small field label casing consistency fixes.
-- Proxmox Support Team <support@proxmox.com> Sat, 05 Apr 2025 17:54:31 +0200
rust-proxmox-backup (3.3.6-1) bookworm; urgency=medium
* datastore: ignore group locking errors when removing snapshots, they
normally happen only due to old-locking, and the underlying snapshot is
deleted in any case at this point, so it's no help to confuse the user.
* api: datastore: add error message on failed removal due to old locking and
tell any admin what they can do to switch to the new locking.
* ui: only add delete parameter on token edit, not when creating tokens.
* pbs-client: allow reading fingerprint from system credential.
* docs: client: add section about system credentials integration.
-- Proxmox Support Team <support@proxmox.com> Thu, 03 Apr 2025 17:57:02 +0200
rust-proxmox-backup (3.3.5-1) bookworm; urgency=medium
* api: config: use guard for unmounting on failed datastore creation
* client: align description for backup specification to docs, using
`archive-name` and `type` over `label` and `ext`.
* client: read credentials from CREDENTIALS_DIRECTORY environment variable
following the "System and Service Credentials" specification. This allows
users to use native systemd capabilities for credential management if the
proxmox-backup-client is used in systemd units or, e.g., through a wrapper
like systemd-run.
* fix #3935: datastore/api/backup: move datastore locking to '/run' to avoid
that lock-files can block deleting backup groups or snapshots on the
datastore and to decouple locking from the underlying datastore
file-system.
* api: fix race when changing the owner of a backup-group.
* fix #3336: datastore: remove group if the last snapshot is removed to
avoid confusing situations where the group directory still exists and
blocks re-creating a group with another owner even though the empty group
was not visible in the web UI.
* notifications: clean-up and add dedicated types for all templates as to
allow declaring that interface stable in preparation for allowing
overriding them in the future (not included in this release).
* tape: introduce a tape backup job worker-thread option for restores.
Depending on the underlying storage using more threads can dramatically
improve the restore speed. Especially fast storage with low penalty for
random access, like flash-storage (SSDs) can profit from using more
worker threads. But on file systems backed by spinning disks (HDDs) the
performance can even degrade with more threads. This is why for now the
default is left at a single thread and the admin needs to tune this for
their storage.
* garbage collection: generate index file list via datastore iterators in a
structured manner.
* fix #5331: garbage collection: avoid multiple chunk atime updates by
keeping track of the recently marked chunks in phase 1 of garbage to avoid
multiple atime updates via relatively expensive utimensat (touch) calls.
Use a LRU cache with size 32 MiB for tracking already processed chunks,
this fully covers backup groups referencing up to 4 TiB of actual chunks
and even bigger ones can still benefit from the cache. On some real-world
benchmarks of a datastore with 1.5 million chunks, and original data
usage of 120 TiB and a referenced data usage of 2.7 TiB (high
deduplication count due to long-term history) we measured 21.1 times less
file updates (31.6 million) and a 6.1 times reduction in total GC runtime
(155.4 s to 22.8 s) on a ZFS RAID 10 system consisting of spinning HDDs
and a special device mirror backed by datacenter SSDs.
* logging helper: use new builder initializer not functional change
intended.
-- Proxmox Support Team <support@proxmox.com> Wed, 02 Apr 2025 19:42:38 +0200
rust-proxmox-backup (3.3.4-1) bookworm; urgency=medium
* fix #6185: client/docs: explicitly mention archive name restrictions
* docs: using-the-installer: adapt to raised root password length requirement
* disks: wipe: replace dd with write_all_at for zeroing disk
* fix #5946: disks: wipe: ensure GPT header backup is wiped
* docs: fix hash collision probability comparison
-- Proxmox Support Team <support@proxmox.com> Thu, 13 Mar 2025 13:04:05 +0100
rust-proxmox-backup (3.3.3-1) bookworm; urgency=medium
* api: datastore list: move checking if a datastore is mounted after we
ensured that the user may actually access it. While this had no effect
security wise, it could significantly increase the cost of this API
endpoint in big setups with many datastores and many tenants that each
have only access to one, or a small set, of datastores.
* Revert "fix #5710: api: backup: stat known chunks on backup finish" due to
a big performance impact relative to what this is protectign against. We
will work out a more efficient fix for this issue in the future.
* prune simulator: show backup entries that are kept also in the flat list
of backups, not just in the calendar view.
* docs: improve the description for the garbage collection's cut-off time
* pxar extract: correctly honor the overwrite flag
* api: datastore: add missing log context for prune to avoid a case where
the worker state being unknown after it finished.
* docs: add synopsis and basic docs for prune job configuration
* backup verification: handle manifest update errors as non-fatal to avoid
that the job fails, as we want to continue with verificating the rest to
ensure we uncover as much potential problems as possible.
* fix #4408: docs: add 'disaster recovery' section for tapes
* fix #6069: prune simulator: correctly handle schedules that mix both, a
range and a step size at once.
* client: pxar: fix a race condition where the backup upload stream can miss
an error from the create archive function, because the error state is only
set after the backup stream was already polled. This avoids a edge case
where a file-based backup was incorrectly marked as having succeeded while
there was a error.
-- Proxmox Support Team <support@proxmox.com> Tue, 11 Feb 2025 20:24:27 +0100
rust-proxmox-backup (3.3.2-2) bookworm; urgency=medium
* file-restore: fix regression with the new blockdev method used to pass
disks of a backup to the isolated virtual machine.
-- Proxmox Support Team <support@proxmox.com> Tue, 10 Dec 2024 12:14:47 +0100
rust-proxmox-backup (3.3.2-1) bookworm; urgency=medium
* pbs-client: remove `log` dependency and migrate to our common,
`tracing`-based, logging infrastructure. No semantic change intended.
* file restore: switch to more modern blockdev option for drives in QEMU
wrapper for the restore VM.
* pxar: client: fix missing file size check for metadata comparison
-- Proxmox Support Team <support@proxmox.com> Mon, 09 Dec 2024 10:37:32 +0100
rust-proxmox-backup (3.3.1-1) bookworm; urgency=medium
* tree-wide: add missing O_CLOEXEC flags to `openat` calls to avoid passing

57
debian/control vendored
View File

@ -25,17 +25,15 @@ Build-Depends: bash-completion,
librust-crossbeam-channel-0.5+default-dev,
librust-endian-trait-0.6+arrays-dev,
librust-endian-trait-0.6+default-dev,
librust-env-logger-0.11+default-dev,
librust-env-logger-0.10+default-dev,
librust-foreign-types-0.3+default-dev,
librust-futures-0.3+default-dev,
librust-h2-0.4+default-dev,
librust-h2-0.4+legacy-dev,
librust-h2-0.4+stream-dev,
librust-hex-0.4+default-dev (>= 0.4.3-~~),
librust-hex-0.4+serde-dev (>= 0.4.3-~~),
librust-hyper-0.14+backports-dev,
librust-http-0.2+default-dev,
librust-hyper-0.14+default-dev,
librust-hyper-0.14+deprecated-dev,
librust-hyper-0.14+full-dev,
librust-libc-0.2+default-dev,
librust-log-0.4+default-dev (>= 0.4.17-~~),
@ -45,7 +43,6 @@ Build-Depends: bash-completion,
librust-once-cell-1+default-dev (>= 1.3.1-~~),
librust-openssl-0.10+default-dev (>= 0.10.40-~~),
librust-pathpatterns-0.3+default-dev,
librust-pbs-api-types-0.2+default-dev (>= 0.2.2),
librust-percent-encoding-2+default-dev (>= 2.1-~~),
librust-pin-project-lite-0.2+default-dev,
librust-proxmox-acme-0.5+default-dev (>= 0.5.3-~~),
@ -54,6 +51,7 @@ Build-Depends: bash-completion,
librust-proxmox-apt-api-types-1+default-dev (>= 1.0.1-~~),
librust-proxmox-async-0.4+default-dev,
librust-proxmox-auth-api-0.4+api-dev,
librust-proxmox-auth-api-0.4+api-types-dev,
librust-proxmox-auth-api-0.4+default-dev,
librust-proxmox-auth-api-0.4+pam-authenticator-dev,
librust-proxmox-borrow-1+default-dev,
@ -61,14 +59,14 @@ Build-Depends: bash-completion,
librust-proxmox-config-digest-0.1+default-dev,
librust-proxmox-daemon-0.1+default-dev,
librust-proxmox-fuse-0.1+default-dev (>= 0.1.3-~~),
librust-proxmox-http-0.9+client-dev (>= 0.9.5-~~),
librust-proxmox-http-0.9+client-trait-dev (>= 0.9.5-~~),
librust-proxmox-http-0.9+default-dev (>= 0.9.5-~~),
librust-proxmox-http-0.9+http-helpers-dev (>= 0.9.5-~~),
librust-proxmox-http-0.9+proxmox-async-dev (>= 0.9.5-~~),
librust-proxmox-http-0.9+rate-limited-stream-dev (>= 0.9.5-~~),
librust-proxmox-http-0.9+rate-limiter-dev (>= 0.9.5-~~),
librust-proxmox-http-0.9+websocket-dev (>= 0.9.5-~~),
librust-proxmox-http-0.9+client-dev,
librust-proxmox-http-0.9+client-trait-dev,
librust-proxmox-http-0.9+default-dev,
librust-proxmox-http-0.9+http-helpers-dev,
librust-proxmox-http-0.9+proxmox-async-dev,
librust-proxmox-http-0.9+rate-limited-stream-dev,
librust-proxmox-http-0.9+rate-limiter-dev,
librust-proxmox-http-0.9+websocket-dev,
librust-proxmox-human-byte-0.1+default-dev,
librust-proxmox-io-1+default-dev (>= 1.0.1-~~),
librust-proxmox-io-1+tokio-dev (>= 1.0.1-~~),
@ -76,18 +74,18 @@ Build-Depends: bash-completion,
librust-proxmox-ldap-0.2+default-dev (>= 0.2.1-~~),
librust-proxmox-log-0.2+default-dev (>= 0.2.6-~~),
librust-proxmox-metrics-0.3+default-dev (>= 0.3.1-~~),
librust-proxmox-notify-0.5+default-dev (>= 0.5.1-~~),
librust-proxmox-notify-0.5+pbs-context-dev (>= 0.5.1-~~),
librust-proxmox-notify-0.5+default-dev (>= 0.5.1~),
librust-proxmox-notify-0.5+pbs-context-dev (>= 0.5.1~),
librust-proxmox-openid-0.10+default-dev,
librust-proxmox-rest-server-0.8+default-dev (>= 0.8.9-~~),
librust-proxmox-rest-server-0.8+rate-limited-stream-dev (>= 0.8.9-~~),
librust-proxmox-rest-server-0.8+templates-dev (>= 0.8.9-~~),
librust-proxmox-rest-server-0.8+default-dev (>= 0.8.5-~~),
librust-proxmox-rest-server-0.8+rate-limited-stream-dev (>= 0.8.5-~~),
librust-proxmox-rest-server-0.8+templates-dev (>= 0.8.5-~~),
librust-proxmox-router-3+cli-dev,
librust-proxmox-router-3+server-dev,
librust-proxmox-rrd-0.4+default-dev,
librust-proxmox-rrd-api-types-1+default-dev (>= 1.0.2-~~),
librust-proxmox-schema-4+api-macro-dev,
librust-proxmox-schema-4+default-dev,
librust-proxmox-schema-3+api-macro-dev,
librust-proxmox-schema-3+default-dev,
librust-proxmox-section-config-2+default-dev,
librust-proxmox-serde-0.1+default-dev (>= 0.1.1-~~),
librust-proxmox-serde-0.1+serde-json-dev (>= 0.1.1-~~),
@ -96,11 +94,11 @@ Build-Depends: bash-completion,
librust-proxmox-sortable-macro-0.1+default-dev (>= 0.1.2-~~),
librust-proxmox-subscription-0.5+api-types-dev,
librust-proxmox-subscription-0.5+default-dev,
librust-proxmox-sys-0.6+acl-dev (>= 0.6.5-~~),
librust-proxmox-sys-0.6+crypt-dev (>= 0.6.5-~~),
librust-proxmox-sys-0.6+default-dev (>= 0.6.7-~~),
librust-proxmox-sys-0.6+logrotate-dev (>= 0.6.5-~~),
librust-proxmox-sys-0.6+timer-dev (>= 0.6.5-~~),
librust-proxmox-sys-0.6+acl-dev,
librust-proxmox-sys-0.6+crypt-dev,
librust-proxmox-sys-0.6+default-dev (>= 0.6.5~~),
librust-proxmox-sys-0.6+logrotate-dev,
librust-proxmox-sys-0.6+timer-dev,
librust-proxmox-systemd-0.1+default-dev,
librust-proxmox-tfa-5+api-dev,
librust-proxmox-tfa-5+api-types-dev,
@ -115,6 +113,7 @@ Build-Depends: bash-completion,
librust-serde-1+default-dev,
librust-serde-1+derive-dev,
librust-serde-json-1+default-dev,
librust-serde-plain-1+default-dev,
librust-syslog-6+default-dev,
librust-tar-0.4+default-dev,
librust-termcolor-1+default-dev (>= 1.1.2-~~),
@ -205,14 +204,6 @@ Description: Proxmox Backup Client tools
This package contains the Proxmox Backup client, which provides a
simple command line tool to create and restore backups.
Package: proxmox-backup-client-static
Architecture: any
Depends: qrencode, ${misc:Depends},
Conflicts: proxmox-backup-client,
Description: Proxmox Backup Client tools (statically linked)
This package contains the Proxmox Backup client, which provides a
simple command line tool to create and restore backups.
Package: proxmox-backup-docs
Build-Profiles: <!nodoc>
Section: doc

2
debian/copyright vendored
View File

@ -1,4 +1,4 @@
Copyright (C) 2019 - 2025 Proxmox Server Solutions GmbH
Copyright (C) 2019 - 2024 Proxmox Server Solutions GmbH
This software is written by Proxmox Server Solutions GmbH <support@proxmox.com>

13
debian/postinst vendored
View File

@ -20,7 +20,15 @@ case "$1" in
# modeled after dh_systemd_start output
systemctl --system daemon-reload >/dev/null || true
if [ -n "$2" ]; then
if dpkg --compare-versions "$2" 'lt' '1.0.7-1'; then
# there was an issue with reloading and systemd being confused in older daemon versions
# so restart instead of reload if upgrading from there, see commit 0ec79339f7aebf9
# FIXME: remove with PBS 2.1
echo "Upgrading from older proxmox-backup-server: restart (not reload) daemons"
_dh_action=try-restart
else
_dh_action=try-reload-or-restart
fi
else
_dh_action=start
fi
@ -72,11 +80,6 @@ EOF
update_sync_job "$prev_job"
fi
fi
if dpkg --compare-versions "$2" 'lt' '3.3.5~'; then
# ensure old locking is used by the daemon until a reboot happened
touch "/run/proxmox-backup/old-locking"
fi
fi
;;

View File

@ -1,2 +0,0 @@
debian/proxmox-backup-client.bc proxmox-backup-client
debian/pxar.bc pxar

View File

@ -1,4 +0,0 @@
usr/share/man/man1/proxmox-backup-client.1
usr/share/man/man1/pxar.1
usr/share/zsh/vendor-completions/_proxmox-backup-client
usr/share/zsh/vendor-completions/_pxar

View File

@ -34,7 +34,6 @@ usr/share/man/man5/media-pool.cfg.5
usr/share/man/man5/notifications-priv.cfg.5
usr/share/man/man5/notifications.cfg.5
usr/share/man/man5/proxmox-backup.node.cfg.5
usr/share/man/man5/prune.cfg.5
usr/share/man/man5/remote.cfg.5
usr/share/man/man5/sync.cfg.5
usr/share/man/man5/tape-job.cfg.5
@ -63,6 +62,7 @@ usr/share/proxmox-backup/templates/default/tape-backup-ok-body.txt.hbs
usr/share/proxmox-backup/templates/default/tape-backup-ok-subject.txt.hbs
usr/share/proxmox-backup/templates/default/tape-load-body.txt.hbs
usr/share/proxmox-backup/templates/default/tape-load-subject.txt.hbs
usr/share/proxmox-backup/templates/default/test-body.html.hbs
usr/share/proxmox-backup/templates/default/test-body.txt.hbs
usr/share/proxmox-backup/templates/default/test-subject.txt.hbs
usr/share/proxmox-backup/templates/default/verify-err-body.txt.hbs

3
debian/rules vendored
View File

@ -47,9 +47,6 @@ override_dh_auto_install:
dh_auto_install -- \
PROXY_USER=backup \
LIBDIR=/usr/lib/$(DEB_HOST_MULTIARCH)
mkdir -p debian/proxmox-backup-client-static/usr/bin
mv debian/tmp/usr/bin/proxmox-backup-client-static debian/proxmox-backup-client-static/usr/bin/proxmox-backup-client
mv debian/tmp/usr/bin/pxar-static debian/proxmox-backup-client-static/usr/bin/pxar
override_dh_installsystemd:
dh_installsystemd -pproxmox-backup-server proxmox-backup-daily-update.timer

View File

@ -13,7 +13,6 @@ GENERATED_SYNOPSIS := \
config/tape/config.rst \
config/user/config.rst \
config/verification/config.rst \
config/prune/config.rst \
pmt/synopsis.rst \
pmtx/synopsis.rst \
proxmox-backup-client/catalog-shell-synopsis.rst \
@ -53,7 +52,6 @@ MAN5_PAGES := \
tape.cfg.5 \
user.cfg.5 \
verification.cfg.5 \
prune.cfg.5 \
PRUNE_SIMULATOR_FILES := \
prune-simulator/index.html \

View File

@ -1,5 +1,3 @@
.. _client_usage:
Backup Client Usage
===================
@ -46,24 +44,6 @@ user\@pbs!token@host:store ``user@pbs!token`` host:8007 store
[ff80::51]:1234:mydatastore ``root@pam`` [ff80::51]:1234 mydatastore
================================ ================== ================== ===========
.. _statically_linked_client:
Statically Linked Backup Client
-------------------------------
A statically linked version of the Proxmox Backup client is available for Linux
based systems where the regular client is not available. Please note that it is
recommended to use the regular client when possible, as the statically linked
client is not a full replacement. For example, name resolution will not be
performed via the mechanisms provided by libc, but uses a resolver written
purely in the Rust programming language. Therefore, features and modules
provided by Name Service Switch cannot be used.
The statically linked client is available via the ``pbs-client`` repository as
described in the :ref:`installation <install_pbc>` section.
.. _environment-variables:
Environment Variables
---------------------
@ -109,43 +89,6 @@ Environment Variables
you can add arbitrary comments after the first newline.
System and Service Credentials
------------------------------
Some of the :ref:`environment variables <environment-variables>` above can be
set using `system and service credentials <https://systemd.io/CREDENTIALS/>`_
instead.
============================ ==============================================
Environment Variable Credential Name Equivalent
============================ ==============================================
``PBS_REPOSITORY`` ``proxmox-backup-client.repository``
``PBS_PASSWORD`` ``proxmox-backup-client.password``
``PBS_ENCRYPTION_PASSWORD`` ``proxmox-backup-client.encryption-password``
``PBS_FINGERPRINT`` ``proxmox-backup-client.fingerprint``
============================ ==============================================
For example, the repository password can be stored in an encrypted file as
follows:
.. code-block:: console
# systemd-ask-password -n | systemd-creds encrypt --name=proxmox-backup-client.password - my-api-token.cred
The credential can then be reused inside of unit files or in a transient scope
unit as follows:
.. code-block:: console
# systemd-run --pipe --wait \
--property=LoadCredentialEncrypted=proxmox-backup-client.password:/full/path/to/my-api-token.cred \
--property=SetCredential=proxmox-backup-client.repository:'my_default_repository' \
proxmox-backup-client ...
Additionally, system credentials (e.g. passed down from the hypervisor to a
virtual machine via SMBIOS type 11) can be loaded on a service via
`LoadCredential=` as described in the manual page ``systemd.exec(5)``.
Output Format
-------------
@ -226,7 +169,6 @@ the client. The format is:
<archive-name>.<type>:<source-path>
The ``archive-name`` must contain alphanumerics, hyphens and underscores only.
Common types are ``.pxar`` for file archives and ``.img`` for block
device images. To create a backup of a block device, run the following command:
@ -528,8 +470,6 @@ version of your master key. The following command sends the output of the
proxmox-backup-client key paperkey --output-format text > qrkey.txt
.. _client_restoring_data:
Restoring Data
--------------

View File

@ -71,7 +71,7 @@ master_doc = 'index'
# General information about the project.
project = 'Proxmox Backup'
copyright = '2019-2025, Proxmox Server Solutions GmbH'
copyright = '2019-2024, Proxmox Server Solutions GmbH'
author = 'Proxmox Support Team'
# The version info for the project you're documenting acts as a replacement for
@ -115,7 +115,6 @@ man_pages = [
('config/tape/man5', 'tape.cfg', 'Tape Drive and Changer Configuration', [author], 5),
('config/user/man5', 'user.cfg', 'User Configuration', [author], 5),
('config/verification/man5', 'verification.cfg', 'Verification Job Configuration', [author], 5),
('config/prune/man5', 'prune.cfg', 'Prune Job Configuration', [author], 5),
('config/notifications/man5', 'notifications.cfg', 'Notification target/matcher configuration', [author], 5),
('config/notifications-priv/man5', 'notifications-priv.cfg', 'Notification target secrets', [author], 5),
]

View File

@ -1,14 +0,0 @@
Each entry starts with the header ``prune: <name>``, followed by the job
configuration options.
::
prune: prune-store2
schedule mon..fri 10:30
store my-datastore
prune: ...
You can use the ``proxmox-backup-manager prune-job`` command to manipulate this
file.

View File

@ -1,23 +0,0 @@
:orphan:
=========
prune.cfg
=========
Description
===========
The file /etc/proxmox-backup/prune.cfg is a configuration file for Proxmox
Backup Server. It contains the prune job configuration.
File Format
===========
.. include:: format.rst
Options
=======
.. include:: config.rst
.. include:: ../../pbs-copyright.rst

View File

@ -7,8 +7,8 @@ verification.cfg
Description
===========
The file /etc/proxmox-backup/verification.cfg is a configuration file for
Proxmox Backup Server. It contains the verification job configuration.
The file /etc/proxmox-backup/sync.cfg is a configuration file for Proxmox
Backup Server. It contains the verification job configuration.
File Format
===========

View File

@ -108,21 +108,6 @@ Options
.. include:: config/notifications-priv/config.rst
``prune.cfg``
~~~~~~~~~~~~~
File Format
^^^^^^^^^^^
.. include:: config/prune/format.rst
Options
^^^^^^^
.. include:: config/prune/config.rst
``tape.cfg``
~~~~~~~~~~~~

View File

@ -138,26 +138,7 @@ you need to run:
# apt update
# apt install proxmox-backup-client
Install Statically Linked Proxmox Backup Client
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Proxmox provides a statically linked build of the Proxmox backup client that
should run on any modern x86-64 Linux system.
It is currently available as a Debian package. After configuring the
:ref:`package_repositories_client_only_apt`, you need to run:
.. code-block:: console
# apt update
# apt install proxmox-backup-client-static
This package conflicts with the `proxmox-backup-client` package, as both
provide the client as an executable in the `/usr/bin/proxmox-backup-client`
path.
You can copy this executable to other, e.g. non-Debian based Linux systems.
For details on using the Proxmox Backup Client, see :ref:`client_usage`.
.. note:: The client-only repository should be usable by most recent Debian and
Ubuntu derivatives.
.. include:: package-repositories.rst

View File

@ -233,8 +233,8 @@ into two phases:
The task iterates over all chunks and checks their file access time against a
cutoff time. The cutoff time is given by either the oldest backup writer
instance, if present, or 24 hours and 5 minutes before the start of the
garbage collection.
instance, if present, or 24 hours and 5 minutes after the start of garbage
collection.
Garbage collection considers chunk files with access time older than the
cutoff time to be neither referenced by any backup snapshot's index, nor part

View File

@ -72,10 +72,6 @@ either start it manually from the GUI or provide it with a schedule (see
Backup snapshots, groups and namespaces which are no longer available on the
**Remote** datastore can be removed from the local datastore as well by setting
the ``remove-vanished`` option for the sync job.
Setting the ``verified-only`` or ``encrypted-only`` flags allows to limit the
sync jobs to backup snapshots which have been verified or encrypted,
respectively. This is particularly of interest when sending backups to a less
trusted remote backup server.
.. code-block:: console
@ -231,16 +227,13 @@ Bandwidth Limit
Syncing a datastore to an archive can produce a lot of traffic and impact other
users of the network. In order to avoid network or storage congestion, you can
limit the bandwidth of a sync job in pull direction by setting the ``rate-in``
option either in the web interface or using the ``proxmox-backup-manager``
command-line tool:
limit the bandwidth of the sync job by setting the ``rate-in`` option either in
the web interface or using the ``proxmox-backup-manager`` command-line tool:
.. code-block:: console
# proxmox-backup-manager sync-job update ID --rate-in 20MiB
For sync jobs in push direction use the ``rate-out`` option instead.
Sync Direction Push
^^^^^^^^^^^^^^^^^^^

View File

@ -7,25 +7,26 @@ Overview
--------
* Proxmox Backup Server emits :ref:`notification_events` in case of noteworthy
events in the system. These events are handled by the notification system. A
notification event has metadata, for example a timestamp, a severity level, a
type and other metadata fields.
* :ref:`notification_matchers` route a notification event to one or more
notification targets. A matcher can have match rules to selectively route
based on the metadata of a notification event.
events in the system. These events are handled by the notification system.
A notification event has metadata, for example a timestamp, a severity level,
a type and other metadata fields.
* :ref:`notification_matchers` route a notification event to one or more notification
targets. A matcher can have match rules to selectively route based on the metadata
of a notification event.
* :ref:`notification_targets` are a destination to which a notification event
is routed to by a matcher. There are multiple types of target, mail-based
(Sendmail and SMTP) and Gotify.
Datastores and tape backup jobs have a configurable :ref:`notification_mode`.
It allows you to choose between the notification system and a legacy mode for
sending notification emails. The legacy mode is equivalent to the way
notifications were handled before Proxmox Backup Server 3.2.
It allows you to choose between the notification system and a legacy mode
for sending notification emails. The legacy mode is equivalent to the
way notifications were handled before Proxmox Backup Server 3.2.
The notification system can be configured in the GUI under *Configuration →
Notifications*. The configuration is stored in :ref:`notifications.cfg` and
:ref:`notifications_priv.cfg` - the latter contains sensitive configuration
options such as passwords or authentication tokens for notification targets and
The notification system can be configured in the GUI under
*Configuration → Notifications*. The configuration is stored in
:ref:`notifications.cfg` and :ref:`notifications_priv.cfg` -
the latter contains sensitive configuration options such as
passwords or authentication tokens for notification targets and
can only be read by ``root``.
.. _notification_targets:
@ -40,23 +41,22 @@ Proxmox Backup Server offers multiple types of notification targets.
Sendmail
^^^^^^^^
The sendmail binary is a program commonly found on Unix-like operating systems
that handles the sending of email messages. It is a command-line utility that
allows users and applications to send emails directly from the command line or
from within scripts.
that handles the sending of email messages.
It is a command-line utility that allows users and applications to send emails
directly from the command line or from within scripts.
The sendmail notification target uses the ``sendmail`` binary to send emails to
a list of configured users or email addresses. If a user is selected as a
recipient, the email address configured in user's settings will be used. For
the ``root@pam`` user, this is the email address entered during installation. A
user's email address can be configured in ``Configuration → Access Control →
User Management``. If a user has no associated email address, no email will be
sent.
The sendmail notification target uses the ``sendmail`` binary to send emails to a
list of configured users or email addresses. If a user is selected as a recipient,
the email address configured in user's settings will be used.
For the ``root@pam`` user, this is the email address entered during installation.
A user's email address can be configured in ``Configuration -> Access Control -> User Management``.
If a user has no associated email address, no email will be sent.
.. NOTE:: In standard Proxmox Backup Server installations, the ``sendmail``
binary is provided by Postfix. It may be necessary to configure Postfix so
that it can deliver mails correctly - for example by setting an external
mail relay (smart host). In case of failed delivery, check the system logs
for messages logged by the Postfix daemon.
.. NOTE:: In standard Proxmox Backup Server installations, the ``sendmail`` binary is provided by
Postfix. It may be necessary to configure Postfix so that it can deliver
mails correctly - for example by setting an external mail relay (smart host).
In case of failed delivery, check the system logs for messages logged by
the Postfix daemon.
See :ref:`notifications.cfg` for all configuration options.
@ -64,13 +64,13 @@ See :ref:`notifications.cfg` for all configuration options.
SMTP
^^^^
SMTP notification targets can send emails directly to an SMTP mail relay. This
target does not use the system's MTA to deliver emails. Similar to sendmail
targets, if a user is selected as a recipient, the user's configured email
address will be used.
SMTP notification targets can send emails directly to an SMTP mail relay.
This target does not use the system's MTA to deliver emails.
Similar to sendmail targets, if a user is selected as a recipient, the user's configured
email address will be used.
.. NOTE:: Unlike sendmail targets, SMTP targets do not have any queuing/retry
mechanism in case of a failed mail delivery.
.. NOTE:: Unlike sendmail targets, SMTP targets do not have any queuing/retry mechanism
in case of a failed mail delivery.
See :ref:`notifications.cfg` for all configuration options.
@ -78,13 +78,10 @@ See :ref:`notifications.cfg` for all configuration options.
Gotify
^^^^^^
`Gotify <http://gotify.net>`_ is an open-source self-hosted notification server
that allows you to send push notifications to various devices and applications.
It provides a simple API and web interface, making it easy to integrate with
different platforms and services.
.. NOTE:: Gotify targets will respect the HTTP proxy settings from
Configuration → Other → HTTP proxy
`Gotify <http://gotify.net>`_ is an open-source self-hosted notification server that
allows you to send push notifications to various devices and
applications. It provides a simple API and web interface, making it easy to
integrate with different platforms and services.
See :ref:`notifications.cfg` for all configuration options.
@ -95,28 +92,27 @@ Webhook notification targets perform HTTP requests to a configurable URL.
The following configuration options are available:
* ``url``: The URL to which to perform the HTTP requests. Supports templating
to inject message contents, metadata and secrets.
* ``url``: The URL to which to perform the HTTP requests.
Supports templating to inject message contents, metadata and secrets.
* ``method``: HTTP Method to use (POST/PUT/GET)
* ``header``: Array of HTTP headers that should be set for the request.
Supports templating to inject message contents, metadata and secrets.
* ``body``: HTTP body that should be sent. Supports templating to inject
message contents, metadata and secrets.
* ``secret``: Array of secret key-value pairs. These will be stored in a
protected configuration file only readable by root. Secrets can be
* ``body``: HTTP body that should be sent.
Supports templating to inject message contents, metadata and secrets.
* ``secret``: Array of secret key-value pairs. These will be stored in
a protected configuration file only readable by root. Secrets can be
accessed in body/header/URL templates via the ``secrets`` namespace.
* ``comment``: Comment for this target.
For configuration options that support templating, the `Handlebars
<https://handlebarsjs.com>`_ syntax can be used to access the following
properties:
For configuration options that support templating, the
`Handlebars <https://handlebarsjs.com>`_ syntax can be used to
access the following properties:
* ``{{ title }}``: The rendered notification title
* ``{{ message }}``: The rendered notification body
* ``{{ severity }}``: The severity of the notification (``info``, ``notice``,
``warning``, ``error``, ``unknown``)
* ``{{ timestamp }}``: The notification's timestamp as a UNIX epoch (in
seconds).
* ``{{ timestamp }}``: The notification's timestamp as a UNIX epoch (in seconds).
* ``{{ fields.<name> }}``: Sub-namespace for any metadata fields of the
notification. For instance, ``fields.type`` contains the notification
type - for all available fields refer to :ref:`notification_events`.
@ -132,10 +128,6 @@ For convenience, the following helpers are available:
to pass a whole sub-namespace (e.g. ``fields``) as a part of a JSON payload
(e.g. ``{{ json fields }}``).
.. NOTE:: Webhook targets will respect the HTTP proxy settings from
Configuration → Other → HTTP proxy
Example - ntfy.sh
"""""""""""""""""
@ -198,19 +190,20 @@ Example - Slack
Notification Matchers
---------------------
Notification matchers route notifications to notification targets based on
their matching rules. These rules can match certain properties of a
notification, such as the timestamp (``match-calendar``), the severity of the
notification (``match-severity``) or metadata fields (``match-field``). If a
notification is matched by a matcher, all targets configured for the matcher
will receive the notification.
Notification matchers route notifications to notification targets based
on their matching rules. These rules can match certain properties of a
notification, such as the timestamp (``match-calendar``), the severity of
the notification (``match-severity``) or metadata fields (``match-field``).
If a notification is matched by a matcher, all targets configured for the
matcher will receive the notification.
An arbitrary number of matchers can be created, each with with their own
matching rules and targets to notify. Every target is notified at most once for
every notification, even if the target is used in multiple matchers.
matching rules and targets to notify.
Every target is notified at most once for every notification, even if
the target is used in multiple matchers.
A matcher without rules matches any notification; the configured targets will
always be notified.
A matcher without rules matches any notification; the configured targets
will always be notified.
See :ref:`notifications.cfg` for all configuration options.
@ -227,24 +220,20 @@ Examples:
Field Matching Rules
^^^^^^^^^^^^^^^^^^^^
Notifications have a selection of metadata fields that can be matched. When
using ``exact`` as a matching mode, a ``,`` can be used as a separator. The
matching rule then matches if the metadata field has **any** of the specified
Notifications have a selection of metadata fields that can be matched.
When using ``exact`` as a matching mode, a ``,`` can be used as a separator.
The matching rule then matches if the metadata field has **any** of the specified
values.
Examples:
* ``match-field exact:type=gc`` Only match notifications for garbage collection
jobs
* ``match-field exact:type=prune,verify`` Match prune job and verification job
notifications.
* ``match-field regex:datastore=^backup-.*$`` Match any datastore starting with
``backup``.
* ``match-field exact:type=gc`` Only match notifications for garbage collection jobs
* ``match-field exact:type=prune,verify`` Match prune job and verification job notifications.
* ``match-field regex:datastore=^backup-.*$`` Match any datastore starting with ``backup``.
If a notification does not have the matched field, the rule will **not** match.
For instance, a ``match-field regex:datastore=.*`` directive will match any
notification that has a ``datastore`` metadata field, but will not match if the
field does not exist.
For instance, a ``match-field regex:datastore=.*`` directive will match any notification that has
a ``datastore`` metadata field, but will not match if the field does not exist.
Severity Matching Rules
^^^^^^^^^^^^^^^^^^^^^^^
@ -263,9 +252,9 @@ The following severities are in use:
Notification Events
-------------------
The following table contains a list of all notification events in Proxmox
Backup server, their type, severity and additional metadata fields. ``type`` as
well as any other metadata field may be used in ``match-field`` match rules.
The following table contains a list of all notification events in Proxmox Backup server, their
type, severity and additional metadata fields. ``type`` as well as any other metadata field
may be used in ``match-field`` match rules.
================================ ==================== ========== ==============================================================
Event ``type`` Severity Metadata fields (in addition to ``type``)
@ -285,8 +274,8 @@ Verification job failure ``verification`` ``error`` ``datastore``,
Verification job success ``verification`` ``info`` ``datastore``, ``hostname``, ``job-id``
================================ ==================== ========== ==============================================================
The following table contains a description of all use metadata fields. All of
these can be used in ``match-field`` match rules.
The following table contains a description of all use metadata fields. All of these
can be used in ``match-field`` match rules.
==================== ===================================
Metadata field Description
@ -303,45 +292,45 @@ Metadata field Description
System Mail Forwarding
----------------------
Certain local system daemons, such as ``smartd``, send notification emails to
the local ``root`` user. Proxmox Backup Server will feed these mails into the
notification system as a notification of type ``system-mail`` and with severity
``unknown``.
Certain local system daemons, such as ``smartd``, send notification emails
to the local ``root`` user. Proxmox Backup Server will feed these mails
into the notification system as a notification of type ``system-mail``
and with severity ``unknown``.
When the email is forwarded to a sendmail target, the mail's content and
headers are forwarded as-is. For all other targets, the system tries to extract
both a subject line and the main text body from the email content. In instances
where emails solely consist of HTML content, they will be transformed into
plain text format during this process.
When the email is forwarded to a sendmail target, the mail's content and headers
are forwarded as-is. For all other targets,
the system tries to extract both a subject line and the main text body
from the email content. In instances where emails solely consist of HTML
content, they will be transformed into plain text format during this process.
Permissions
-----------
In order to modify/view the configuration for notification targets, the
``Sys.Modify/Sys.Audit`` permissions are required for the
In order to modify/view the configuration for notification targets,
the ``Sys.Modify/Sys.Audit`` permissions are required for the
``/system/notifications`` ACL node.
.. _notification_mode:
Notification Mode
-----------------
Datastores and tape backup/restore job configuration have a
``notification-mode`` option which can have one of two values:
Datastores and tape backup/restore job configuration have a ``notification-mode``
option which can have one of two values:
* ``legacy-sendmail``: Send notification emails via the system's ``sendmail``
command. The notification system will be bypassed and any configured
targets/matchers will be ignored. This mode is equivalent to the notification
behavior for version before Proxmox Backup Server 3.2.
* ``legacy-sendmail``: Send notification emails via the system's ``sendmail`` command.
The notification system will be bypassed and any configured targets/matchers will be ignored.
This mode is equivalent to the notification behavior for version before
Proxmox Backup Server 3.2.
* ``notification-system``: Use the new, flexible notification system.
If the ``notification-mode`` option is not set, Proxmox Backup Server will
default to ``legacy-sendmail``.
If the ``notification-mode`` option is not set, Proxmox Backup Server will default
to ``legacy-sendmail``.
Starting with Proxmox Backup Server 3.2, a datastore created in the UI will
automatically opt in to the new notification system. If the datastore is
created via the API or the ``proxmox-backup-manager`` CLI, the
``notification-mode`` option has to be set explicitly to
``notification-system`` if the notification system shall be used.
automatically opt in to the new notification system. If the datastore is created
via the API or the ``proxmox-backup-manager`` CLI, the ``notification-mode``
option has to be set explicitly to ``notification-system`` if the
notification system shall be used.
The ``legacy-sendmail`` mode might be removed in a later release of
Proxmox Backup Server.
@ -350,12 +339,12 @@ Settings for ``legacy-sendmail`` notification mode
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
If ``notification-mode`` is set to ``legacy-sendmail``, Proxmox Backup Server
will send notification emails via the system's ``sendmail`` command to the
email address configured for the user set in the ``notify-user`` option
will send notification emails via the system's ``sendmail`` command to the email
address configured for the user set in the ``notify-user`` option
(falling back to ``root@pam`` if not set).
For datastores, you can also change the level of notifications received per
task type via the ``notify`` option.
For datastores, you can also change the level of notifications received per task
type via the ``notify`` option.
* Always: send a notification for any scheduled task, independent of the
outcome
@ -366,23 +355,3 @@ task type via the ``notify`` option.
The ``notify-user`` and ``notify`` options are ignored if ``notification-mode``
is set to ``notification-system``.
Overriding Notification Templates
---------------------------------
Proxmox Backup Server uses Handlebars templates to render notifications. The
original templates provided by Proxmox Backup Server are stored in
``/usr/share/proxmox-backup/templates/default/``.
Notification templates can be overridden by providing a custom template file in
the override directory at
``/etc/proxmox-backup/notification-templates/default/``. When rendering a
notification of a given type, Proxmox Backup Server will first attempt to load
a template from the override directory. If this one does not exist or fails to
render, the original template will be used.
The template files follow the naming convention of
``<type>-<body|subject>.txt.hbs``. For instance, the file
``gc-err-body.txt.hbs`` contains the template for rendering notifications for
garbage collection errors, while ``package-updates-subject.txt.hbs`` is used to
render the subject line of notifications for available package updates.

View File

@ -126,8 +126,7 @@ Ext.onReady(function() {
if (data.mark !== 'keep') {
return `<div style="text-decoration: line-through;">${text}</div>`;
}
let pruneList = this.up('prunesimulatorPruneList');
if (pruneList.useColors) {
if (me.useColors) {
let bgColor = COLORS[data.keepName];
let textColor = TEXT_COLORS[data.keepName];
return `<div style="background-color: ${bgColor};color: ${textColor};">${text}</div>`;
@ -354,17 +353,12 @@ Ext.onReady(function() {
specValues.forEach(function(value) {
if (value.includes('..')) {
let [start, end] = value.split('..');
let step = 1;
if (end.includes('/')) {
[end, step] = end.split('/');
step = assertValid(step);
}
start = assertValid(start);
end = assertValid(end);
if (start > end) {
throw "interval start is bigger then interval end '" + start + " > " + end + "'";
}
for (let i = start; i <= end; i += step) {
for (let i = start; i <= end; i++) {
matches[i] = 1;
}
} else if (value.includes('/')) {

View File

@ -209,7 +209,7 @@ allowed to be nested.
Removable datastores are created on the the device with the given relative path that is specified
on creation. In order to use a datastore on multiple PBS instances, it has to be created on one,
and added with ``Reuse existing datastore`` checked on the others. The path you set on creation
is how multiple datastores on a single device are identified. So when adding on a new PBS instance,
is how multiple datastores on a signle device are identified. So When adding on a new PBS instance,
it has to match what was set on creation.
.. code-block:: console
@ -231,7 +231,7 @@ All datastores present on a device can be listed using ``proxmox-backup-debug``.
Verify, Prune and Garbage Collection jobs are skipped if the removable
datastore is not mounted when they are scheduled. Sync jobs start, but fail
with an error saying the datastore was not mounted. The reason is that syncs
not happening as scheduled should at least be noticeable.
not happening as scheduled should at least be noticable.
Managing Datastores
^^^^^^^^^^^^^^^^^^^
@ -439,25 +439,6 @@ There are some tuning related options for the datastore that are more advanced:
# proxmox-backup-manager datastore update <storename> --tuning 'sync-level=filesystem'
* ``gc-atime-safety-check``: Datastore GC atime update safety check:
You can explicitly `enable` or `disable` the atime update safety check
performed on datastore creation and garbage collection. This checks if atime
updates are handled as expected by garbage collection and therefore avoids the
risk of data loss by unexpected filesystem behavior. It is recommended to set
this to enabled, which is also the default value.
* ``gc-atime-cutoff``: Datastore GC atime cutoff for chunk cleanup:
This allows to set the cutoff for which a chunk is still considered in-use
during phase 2 of garbage collection (given no older writers). If the
``atime`` of the chunk is outside the range, it will be removed.
* ``gc-cache-capacity``: Datastore GC least recently used cache capacity:
Allows to control the cache capacity used to keep track of chunks for which
the access time has already been updated during phase 1 of garbage collection.
This avoids multiple updates and increases GC runtime performance. Higher
values can reduce GC runtime at the cost of increase memory usage, setting the
value to 0 disables caching.
If you want to set multiple tuning options simultaneously, you can separate them
with a comma, like this:

View File

@ -61,7 +61,6 @@ In general, LTO tapes offer the following advantages:
Note that `Proxmox Backup Server` already stores compressed data, so using the
tape compression feature has no advantage.
.. _tape-supported-hardware:
Supported Hardware
------------------
@ -970,8 +969,6 @@ You can restore from a tape even without an existing catalog, but only the
whole media set. If you do this, the catalog will be automatically created.
.. _tape_key_management:
Encryption Key Management
~~~~~~~~~~~~~~~~~~~~~~~~~
@ -1183,159 +1180,3 @@ In combination with fitting prune settings and tape backup schedules, this
achieves long-term storage of some backups, while keeping the recent
backups on smaller media sets that expire roughly every 4 weeks (that is, three
plus the current week).
Disaster Recovery
-----------------
.. _Command-line Tools: command-line-tools.html
In case of major disasters, important data, or even whole servers might be
destroyed or at least damaged up to the point where everything - sometimes
including the backup server - has to be restored from a backup. For such cases,
the following step-by-step guide will help you to set up the Proxmox Backup
Server and restore everything from tape backups.
The following guide will explain the necessary steps using both the web GUI and
the command line tools. For an overview of the command line tools, see
`Command-line Tools`_.
Setting Up a Datastore
~~~~~~~~~~~~~~~~~~~~~~
.. _proxmox-backup-manager: proxmox-backup-manager/man1.html
.. _Installation: installation.html
After you set up a new Proxmox Backup Server, as outlined in the `Installation`_
chapter, first set up a datastore so a tape can be restored to it:
#. Go to **Administration -> Storage / Disks** and make sure that the disk that
will be used as a datastore shows up.
#. Under the **Directory** or **ZFS** tabs, you can either choose to create a
directory or create a ZFS ``zpool``, respectively. Here you can also directly
add the newly created directory or ZFS ``zpool`` as a datastore.
Alternatively, the `proxmox-backup-manager`_ can be used to perform the same
tasks. For more information, check the :ref:`datastore_intro` documentation.
Setting Up the Tape Drive
~~~~~~~~~~~~~~~~~~~~~~~~~
#. Make sure you have a properly working tape drive and/or changer matching to
medium you want to restore from.
#. Connect the tape changer(s) and the tape drive(s) to the backup server. These
should be detected automatically by Linux. You can get a list of available
drives using:
.. code-block:: console
# proxmox-tape drive scan
┌────────────────────────────────┬────────┬─────────────┬────────┐
│ path │ vendor │ model │ serial │
╞════════════════════════════════╪════════╪═════════════╪════════╡
│ /dev/tape/by-id/scsi-12345-sg │ IBM │ ULT3580-TD4 │ 12345 │
└────────────────────────────────┴────────┴─────────────┴────────┘
You can get a list of available changers with:
.. code-block:: console
# proxmox-tape changer scan
┌─────────────────────────────┬─────────┬──────────────┬────────┐
│ path │ vendor │ model │ serial │
╞═════════════════════════════╪═════════╪══════════════╪════════╡
│ /dev/tape/by-id/scsi-CC2C52 │ Quantum │ Superloader3 │ CC2C52 │
└─────────────────────────────┴─────────┴──────────────┴────────┘
For more information, please read the chapters
on :ref:`tape_changer_config` and :ref:`tape_drive_config`.
#. If you have a tape changer, go to the web interface of the Proxmox Backup
Server, go to **Tape Backup -> Changers** and add it. For examples using the
command line, read the chapter on :ref:`tape_changer_config`. If the changer
has been detected correctly by Linux, the changer should show up in the list.
#. In the web interface, go to **Tape Backup -> Drives** and add the tape drive
that will be used to read the tapes. For examples using the command line,
read the chapter on :ref:`tape_drive_config`. If the tape drive has been
detected correctly by Linux, the drive should show up in the list. If the
drive also has a tape changer, make sure to select the changer as well and
assign it the correct drive number.
Restoring Data From the Tape
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. _proxmox-tape: proxmox-tape/man1.html
.. _proxmox-backup-client: proxmox-backup-client/man1.html
.. _Restore: https://pve.proxmox.com/pve-docs/chapter-vzdump.html#vzdump_restore
The following guide will explain the steps necessary to restore data from a
tape, which can be done over either the web GUI or the command line. For details
on the command line, read the documentation on the `proxmox-tape`_ tool.
To restore data from tapes, do the following:
#. Insert the first tape (as displayed on the label) into the tape drive or, if
a tape changer is available, use the tape changer to insert the tape into the
right drive. The web GUI can also be used to load or transfer tapes between
tape drives by selecting the changer.
#. If the backup has been encrypted, the encryption keys need to be restored as
well. In the **Encryption Keys** tab, press **Restore Key**. For more
details or examples that use the command line, read the
:ref:`tape_key_management` chapter.
#. The procedure for restoring data is slightly different depending on whether
you are using a standalone tape drive or a changer:
* For changers, the procedure is simple:
#. Insert all tapes from the media set you want to restore from.
#. Click on the changer in the web GUI, click **Inventory**, make sure
**Restore Catalog** is selected and press OK.
* For standalone drives, the procedure would be:
#. Insert the first tape of the media set.
#. Click **Catalog**.
#. Eject the tape, then repeat the steps for the remaining tapes of the
media set.
#. Go back to **Tape Backup**. In the **Content** tab, press **Restore** and
select the desired media set. Choose the snapshot you want to restore, press
**Next**, select the drive and target datastore and press **Restore**.
#. By going to the datastore where the data has been restored, under the
**Content** tab you should be able to see the restored snapshots. In order to
access the backups from another machine, you will need to configure the
access to the backup server. Go to **Configuration -> Access Control** and
either create a new user, or a new API token (API tokens allow easy
revocation if the token is compromised). Under **Permissions**, add the
desired permissions, e.g. **DatastoreBackup**.
#. You can now perform virtual machine, container or file restores. You now have
the following options:
* If you want to restore files on Linux distributions that are not based on
Proxmox products or you prefer using a command line tool, you can use the
`proxmox-backup-client`_, as explained in the
:ref:`client_restoring_data` chapter. Use the newly created API token to
be able to access the data. You can then restore individual files or
mount an archive to your system.
* If you want to restore virtual machines or containers on a Proxmox VE
server, add the datastore of the backup server as storage and go to
**Backups**. Here you can restore VMs and containers, including their
configuration. For more information on restoring backups in Proxmox VE,
visit the `Restore`_ chapter of the Proxmox VE documentation.

View File

@ -147,7 +147,7 @@ in a single ``pxar`` archive, the latter two modes split data and metadata into
``ppxar`` and ``mpxar`` archives. This is done to allow for fast comparison of
metadata with the previous snapshot, used by the ``metadata`` mode to detect
reusable files. The ``data`` mode refrains from reusing unchanged files by
rechunking the file unconditionally. This mode therefore assures that no file
rechunking the file uncoditionally. This mode therefore assures that no file
changes are missed even if the metadata are unchanged.
.. NOTE:: ``pxar`` and ``mpxar``/``ppxar`` file formats are different and cannot
@ -298,8 +298,8 @@ will see that the probability of a collision in that scenario is:
For context, in a lottery game of guessing 6 numbers out of 45, the chance to
correctly guess all 6 numbers is only :math:`1.2277 * 10^{-7}`. This means the
chance of a collision is lower than winning 8 such lottery games *in a row*:
:math:`(1.2277 * 10^{-7})^{8} = 5.1623 * 10^{-56}`.
chance of a collision is about the same as winning 13 such lottery games *in a
row*.
In conclusion, it is extremely unlikely that such a collision would occur by
accident in a normal datastore.

View File

@ -16,8 +16,8 @@ User Configuration
choose the realm when you add a new user. Possible realms are:
:pam: Linux PAM standard authentication. Use this if you want to
authenticate as a Linux system user. The users needs to already exist on
the host system.
authenticate as a Linux system user (users need to exist on the
system).
:pbs: Proxmox Backup Server realm. This type stores hashed passwords in
``/etc/proxmox-backup/shadow.json``.
@ -599,32 +599,6 @@ list view in the web UI, or using the command line:
Authentication Realms
---------------------
.. _user_realms_pam:
Linux PAM
~~~~~~~~~
Linux PAM is a framework for system-wide user authentication. These users are
created on the host system with commands such as ``adduser``.
If PAM users exist on the host system, corresponding entries can be added to
Proxmox Backup Server, to allow these users to log in via their system username
and password.
.. _user_realms_pbs:
Proxmox Backup authentication server
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This is a Unix-like password store, which stores hashed passwords in
``/etc/proxmox-backup/shadow.json``. Passwords are hashed using the SHA-256
hashing algorithm.
This is the most convenient realm for small-scale (or even mid-scale)
installations, where users do not need access to anything outside of Proxmox
Backup Server. In this case, users are fully managed by Proxmox Backup Server
and are able to change their own passwords via the GUI.
.. _user_realms_ldap:
LDAP

View File

@ -152,7 +152,7 @@ not commonly used in your country.
:alt: Proxmox Backup Server Installer - Password and email configuration
Next the password of the superuser (``root``) and an email address needs to be
specified. The password must consist of at least 8 characters. It's highly
specified. The password must consist of at least 5 characters. It's highly
recommended to use a stronger password. Some guidelines are:
|

View File

@ -10,7 +10,7 @@ use tokio::net::TcpStream;
// Simple H2 client to test H2 download speed using h2server.rs
struct Process {
body: h2::legacy::RecvStream,
body: h2::RecvStream,
trailers: bool,
bytes: usize,
}
@ -50,11 +50,11 @@ impl Future for Process {
}
fn send_request(
mut client: h2::legacy::client::SendRequest<bytes::Bytes>,
mut client: h2::client::SendRequest<bytes::Bytes>,
) -> impl Future<Output = Result<usize, Error>> {
println!("sending request");
let request = hyper::http::Request::builder()
let request = http::Request::builder()
.uri("http://localhost/")
.body(())
.unwrap();
@ -78,7 +78,7 @@ async fn run() -> Result<(), Error> {
let conn = TcpStream::connect(std::net::SocketAddr::from(([127, 0, 0, 1], 8008))).await?;
conn.set_nodelay(true).unwrap();
let (client, h2) = h2::legacy::client::Builder::new()
let (client, h2) = h2::client::Builder::new()
.initial_connection_window_size(1024 * 1024 * 1024)
.initial_window_size(1024 * 1024 * 1024)
.max_frame_size(4 * 1024 * 1024)

View File

@ -10,7 +10,7 @@ use tokio::net::TcpStream;
// Simple H2 client to test H2 download speed using h2s-server.rs
struct Process {
body: h2::legacy::RecvStream,
body: h2::RecvStream,
trailers: bool,
bytes: usize,
}
@ -50,11 +50,11 @@ impl Future for Process {
}
fn send_request(
mut client: h2::legacy::client::SendRequest<bytes::Bytes>,
mut client: h2::client::SendRequest<bytes::Bytes>,
) -> impl Future<Output = Result<usize, Error>> {
println!("sending request");
let request = hyper::http::Request::builder()
let request = http::Request::builder()
.uri("http://localhost/")
.body(())
.unwrap();
@ -94,7 +94,7 @@ async fn run() -> Result<(), Error> {
.await
.map_err(|err| format_err!("connect failed - {}", err))?;
let (client, h2) = h2::legacy::client::Builder::new()
let (client, h2) = h2::client::Builder::new()
.initial_connection_window_size(1024 * 1024 * 1024)
.initial_window_size(1024 * 1024 * 1024)
.max_frame_size(4 * 1024 * 1024)

View File

@ -8,19 +8,6 @@ use tokio::net::{TcpListener, TcpStream};
use pbs_buildcfg::configdir;
#[derive(Clone, Copy)]
struct H2SExecutor;
impl<Fut> hyper::rt::Executor<Fut> for H2SExecutor
where
Fut: Future + Send + 'static,
Fut::Output: Send,
{
fn execute(&self, fut: Fut) {
tokio::spawn(fut);
}
}
fn main() -> Result<(), Error> {
proxmox_async::runtime::main(run())
}
@ -63,11 +50,12 @@ async fn handle_connection(socket: TcpStream, acceptor: Arc<SslAcceptor>) -> Res
stream.as_mut().accept().await?;
let mut http = hyper::server::conn::http2::Builder::new(H2SExecutor);
let mut http = hyper::server::conn::Http::new();
http.http2_only(true);
// increase window size: todo - find optiomal size
let max_window_size = (1 << 31) - 2;
http.initial_stream_window_size(max_window_size);
http.initial_connection_window_size(max_window_size);
http.http2_initial_stream_window_size(max_window_size);
http.http2_initial_connection_window_size(max_window_size);
let service = hyper::service::service_fn(|_req: Request<Body>| {
println!("Got request");
@ -75,11 +63,8 @@ async fn handle_connection(socket: TcpStream, acceptor: Arc<SslAcceptor>) -> Res
let body = Body::from(buffer);
let response = Response::builder()
.status(hyper::http::StatusCode::OK)
.header(
hyper::http::header::CONTENT_TYPE,
"application/octet-stream",
)
.status(http::StatusCode::OK)
.header(http::header::CONTENT_TYPE, "application/octet-stream")
.body(body)
.unwrap();
future::ok::<_, Error>(response)

View File

@ -1,24 +1,9 @@
use std::future::Future;
use anyhow::Error;
use futures::*;
use hyper::{Body, Request, Response};
use tokio::net::{TcpListener, TcpStream};
#[derive(Clone, Copy)]
struct H2Executor;
impl<Fut> hyper::rt::Executor<Fut> for H2Executor
where
Fut: Future + Send + 'static,
Fut::Output: Send,
{
fn execute(&self, fut: Fut) {
tokio::spawn(fut);
}
}
fn main() -> Result<(), Error> {
proxmox_async::runtime::main(run())
}
@ -41,11 +26,12 @@ async fn run() -> Result<(), Error> {
async fn handle_connection(socket: TcpStream) -> Result<(), Error> {
socket.set_nodelay(true).unwrap();
let mut http = hyper::server::conn::http2::Builder::new(H2Executor);
let mut http = hyper::server::conn::Http::new();
http.http2_only(true);
// increase window size: todo - find optiomal size
let max_window_size = (1 << 31) - 2;
http.initial_stream_window_size(max_window_size);
http.initial_connection_window_size(max_window_size);
http.http2_initial_stream_window_size(max_window_size);
http.http2_initial_connection_window_size(max_window_size);
let service = hyper::service::service_fn(|_req: Request<Body>| {
println!("Got request");
@ -53,11 +39,8 @@ async fn handle_connection(socket: TcpStream) -> Result<(), Error> {
let body = Body::from(buffer);
let response = Response::builder()
.status(hyper::http::StatusCode::OK)
.header(
hyper::http::header::CONTENT_TYPE,
"application/octet-stream",
)
.status(http::StatusCode::OK)
.header(http::header::CONTENT_TYPE, "application/octet-stream")
.body(body)
.unwrap();
future::ok::<_, Error>(response)

24
pbs-api-types/Cargo.toml Normal file
View File

@ -0,0 +1,24 @@
[package]
name = "pbs-api-types"
version = "0.1.0"
authors.workspace = true
edition.workspace = true
description = "general API type helpers for PBS"
[dependencies]
anyhow.workspace = true
const_format.workspace = true
hex.workspace = true
percent-encoding.workspace = true
regex.workspace = true
serde.workspace = true
serde_plain.workspace = true
proxmox-auth-api = { workspace = true, features = [ "api-types" ] }
proxmox-apt-api-types.workspace = true
proxmox-human-byte.workspace = true
proxmox-lang.workspace=true
proxmox-schema = { workspace = true, features = [ "api-macro" ] }
proxmox-serde.workspace = true
proxmox-time.workspace = true
proxmox-uuid = { workspace = true, features = [ "serde" ] }

332
pbs-api-types/src/acl.rs Normal file
View File

@ -0,0 +1,332 @@
use std::str::FromStr;
use const_format::concatcp;
use serde::de::{value, IntoDeserializer};
use serde::{Deserialize, Serialize};
use proxmox_lang::constnamedbitmap;
use proxmox_schema::{
api, const_regex, ApiStringFormat, BooleanSchema, EnumEntry, Schema, StringSchema,
};
use crate::PROXMOX_SAFE_ID_REGEX_STR;
const_regex! {
pub ACL_PATH_REGEX = concatcp!(r"^(?:/|", r"(?:/", PROXMOX_SAFE_ID_REGEX_STR, ")+", r")$");
}
// define Privilege bitfield
constnamedbitmap! {
/// Contains a list of privilege name to privilege value mappings.
///
/// The names are used when displaying/persisting privileges anywhere, the values are used to
/// allow easy matching of privileges as bitflags.
PRIVILEGES: u64 => {
/// Sys.Audit allows knowing about the system and its status
PRIV_SYS_AUDIT("Sys.Audit");
/// Sys.Modify allows modifying system-level configuration
PRIV_SYS_MODIFY("Sys.Modify");
/// Sys.Modify allows to poweroff/reboot/.. the system
PRIV_SYS_POWER_MANAGEMENT("Sys.PowerManagement");
/// Datastore.Audit allows knowing about a datastore,
/// including reading the configuration entry and listing its contents
PRIV_DATASTORE_AUDIT("Datastore.Audit");
/// Datastore.Allocate allows creating or deleting datastores
PRIV_DATASTORE_ALLOCATE("Datastore.Allocate");
/// Datastore.Modify allows modifying a datastore and its contents
PRIV_DATASTORE_MODIFY("Datastore.Modify");
/// Datastore.Read allows reading arbitrary backup contents
PRIV_DATASTORE_READ("Datastore.Read");
/// Allows verifying a datastore
PRIV_DATASTORE_VERIFY("Datastore.Verify");
/// Datastore.Backup allows Datastore.Read|Verify and creating new snapshots,
/// but also requires backup ownership
PRIV_DATASTORE_BACKUP("Datastore.Backup");
/// Datastore.Prune allows deleting snapshots,
/// but also requires backup ownership
PRIV_DATASTORE_PRUNE("Datastore.Prune");
/// Permissions.Modify allows modifying ACLs
PRIV_PERMISSIONS_MODIFY("Permissions.Modify");
/// Remote.Audit allows reading remote.cfg and sync.cfg entries
PRIV_REMOTE_AUDIT("Remote.Audit");
/// Remote.Modify allows modifying remote.cfg
PRIV_REMOTE_MODIFY("Remote.Modify");
/// Remote.Read allows reading data from a configured `Remote`
PRIV_REMOTE_READ("Remote.Read");
/// Remote.DatastoreBackup allows creating new snapshots on remote datastores
PRIV_REMOTE_DATASTORE_BACKUP("Remote.DatastoreBackup");
/// Remote.DatastoreModify allows to modify remote datastores
PRIV_REMOTE_DATASTORE_MODIFY("Remote.DatastoreModify");
/// Remote.DatastorePrune allows deleting snapshots on remote datastores
PRIV_REMOTE_DATASTORE_PRUNE("Remote.DatastorePrune");
/// Sys.Console allows access to the system's console
PRIV_SYS_CONSOLE("Sys.Console");
/// Tape.Audit allows reading tape backup configuration and status
PRIV_TAPE_AUDIT("Tape.Audit");
/// Tape.Modify allows modifying tape backup configuration
PRIV_TAPE_MODIFY("Tape.Modify");
/// Tape.Write allows writing tape media
PRIV_TAPE_WRITE("Tape.Write");
/// Tape.Read allows reading tape backup configuration and media contents
PRIV_TAPE_READ("Tape.Read");
/// Realm.Allocate allows viewing, creating, modifying and deleting realms
PRIV_REALM_ALLOCATE("Realm.Allocate");
}
}
pub fn privs_to_priv_names(privs: u64) -> Vec<&'static str> {
PRIVILEGES
.iter()
.fold(Vec::new(), |mut priv_names, (name, value)| {
if value & privs != 0 {
priv_names.push(name);
}
priv_names
})
}
/// Admin always has all privileges. It can do everything except a few actions
/// which are limited to the 'root@pam` superuser
pub const ROLE_ADMIN: u64 = u64::MAX;
/// NoAccess can be used to remove privileges from specific (sub-)paths
pub const ROLE_NO_ACCESS: u64 = 0;
#[rustfmt::skip]
#[allow(clippy::identity_op)]
/// Audit can view configuration and status information, but not modify it.
pub const ROLE_AUDIT: u64 = 0
| PRIV_SYS_AUDIT
| PRIV_DATASTORE_AUDIT;
#[rustfmt::skip]
#[allow(clippy::identity_op)]
/// Datastore.Admin can do anything on the datastore.
pub const ROLE_DATASTORE_ADMIN: u64 = 0
| PRIV_DATASTORE_AUDIT
| PRIV_DATASTORE_MODIFY
| PRIV_DATASTORE_READ
| PRIV_DATASTORE_VERIFY
| PRIV_DATASTORE_BACKUP
| PRIV_DATASTORE_PRUNE;
#[rustfmt::skip]
#[allow(clippy::identity_op)]
/// Datastore.Reader can read/verify datastore content and do restore
pub const ROLE_DATASTORE_READER: u64 = 0
| PRIV_DATASTORE_AUDIT
| PRIV_DATASTORE_VERIFY
| PRIV_DATASTORE_READ;
#[rustfmt::skip]
#[allow(clippy::identity_op)]
/// Datastore.Backup can do backup and restore, but no prune.
pub const ROLE_DATASTORE_BACKUP: u64 = 0
| PRIV_DATASTORE_BACKUP;
#[rustfmt::skip]
#[allow(clippy::identity_op)]
/// Datastore.PowerUser can do backup, restore, and prune.
pub const ROLE_DATASTORE_POWERUSER: u64 = 0
| PRIV_DATASTORE_PRUNE
| PRIV_DATASTORE_BACKUP;
#[rustfmt::skip]
#[allow(clippy::identity_op)]
/// Datastore.Audit can audit the datastore.
pub const ROLE_DATASTORE_AUDIT: u64 = 0
| PRIV_DATASTORE_AUDIT;
#[rustfmt::skip]
#[allow(clippy::identity_op)]
/// Remote.Audit can audit the remote
pub const ROLE_REMOTE_AUDIT: u64 = 0
| PRIV_REMOTE_AUDIT;
#[rustfmt::skip]
#[allow(clippy::identity_op)]
/// Remote.Admin can do anything on the remote.
pub const ROLE_REMOTE_ADMIN: u64 = 0
| PRIV_REMOTE_AUDIT
| PRIV_REMOTE_MODIFY
| PRIV_REMOTE_READ;
#[rustfmt::skip]
#[allow(clippy::identity_op)]
/// Remote.SyncOperator can do read and prune on the remote.
pub const ROLE_REMOTE_SYNC_OPERATOR: u64 = 0
| PRIV_REMOTE_AUDIT
| PRIV_REMOTE_READ;
#[rustfmt::skip]
#[allow(clippy::identity_op)]
/// Remote.SyncPushOperator can read and push snapshots to the remote.
pub const ROLE_REMOTE_SYNC_PUSH_OPERATOR: u64 = 0
| PRIV_REMOTE_AUDIT
| PRIV_REMOTE_DATASTORE_BACKUP;
#[rustfmt::skip]
#[allow(clippy::identity_op)]
/// Remote.DatastorePowerUser can read and push snapshots to the remote, and prune owned snapshots
/// and groups but not create or remove namespaces.
pub const ROLE_REMOTE_DATASTORE_POWERUSER: u64 = 0
| PRIV_REMOTE_AUDIT
| PRIV_REMOTE_DATASTORE_BACKUP
| PRIV_REMOTE_DATASTORE_PRUNE;
#[rustfmt::skip]
#[allow(clippy::identity_op)]
/// Remote.DatastoreAdmin can read and push snapshots to the remote, prune owned snapshots
/// and groups, as well as create or remove namespaces.
pub const ROLE_REMOTE_DATASTORE_ADMIN: u64 = 0
| PRIV_REMOTE_AUDIT
| PRIV_REMOTE_DATASTORE_BACKUP
| PRIV_REMOTE_DATASTORE_MODIFY
| PRIV_REMOTE_DATASTORE_PRUNE;
#[rustfmt::skip]
#[allow(clippy::identity_op)]
/// Tape.Audit can audit the tape backup configuration and media content
pub const ROLE_TAPE_AUDIT: u64 = 0
| PRIV_TAPE_AUDIT;
#[rustfmt::skip]
#[allow(clippy::identity_op)]
/// Tape.Admin can do anything on the tape backup
pub const ROLE_TAPE_ADMIN: u64 = 0
| PRIV_TAPE_AUDIT
| PRIV_TAPE_MODIFY
| PRIV_TAPE_READ
| PRIV_TAPE_WRITE;
#[rustfmt::skip]
#[allow(clippy::identity_op)]
/// Tape.Operator can do tape backup and restore (but no configuration changes)
pub const ROLE_TAPE_OPERATOR: u64 = 0
| PRIV_TAPE_AUDIT
| PRIV_TAPE_READ
| PRIV_TAPE_WRITE;
#[rustfmt::skip]
#[allow(clippy::identity_op)]
/// Tape.Reader can do read and inspect tape content
pub const ROLE_TAPE_READER: u64 = 0
| PRIV_TAPE_AUDIT
| PRIV_TAPE_READ;
/// NoAccess can be used to remove privileges from specific (sub-)paths
pub const ROLE_NAME_NO_ACCESS: &str = "NoAccess";
#[api(
type_text: "<role>",
)]
#[repr(u64)]
#[derive(Serialize, Deserialize)]
/// Enum representing roles via their [PRIVILEGES] combination.
///
/// Since privileges are implemented as bitflags, each unique combination of privileges maps to a
/// single, unique `u64` value that is used in this enum definition.
pub enum Role {
/// Administrator
Admin = ROLE_ADMIN,
/// Auditor
Audit = ROLE_AUDIT,
/// Disable Access
NoAccess = ROLE_NO_ACCESS,
/// Datastore Administrator
DatastoreAdmin = ROLE_DATASTORE_ADMIN,
/// Datastore Reader (inspect datastore content and do restores)
DatastoreReader = ROLE_DATASTORE_READER,
/// Datastore Backup (backup and restore owned backups)
DatastoreBackup = ROLE_DATASTORE_BACKUP,
/// Datastore PowerUser (backup, restore and prune owned backup)
DatastorePowerUser = ROLE_DATASTORE_POWERUSER,
/// Datastore Auditor
DatastoreAudit = ROLE_DATASTORE_AUDIT,
/// Remote Auditor
RemoteAudit = ROLE_REMOTE_AUDIT,
/// Remote Administrator
RemoteAdmin = ROLE_REMOTE_ADMIN,
/// Synchronization Operator
RemoteSyncOperator = ROLE_REMOTE_SYNC_OPERATOR,
/// Synchronisation Operator (push direction)
RemoteSyncPushOperator = ROLE_REMOTE_SYNC_PUSH_OPERATOR,
/// Remote Datastore Prune
RemoteDatastorePowerUser = ROLE_REMOTE_DATASTORE_POWERUSER,
/// Remote Datastore Admin
RemoteDatastoreAdmin = ROLE_REMOTE_DATASTORE_ADMIN,
/// Tape Auditor
TapeAudit = ROLE_TAPE_AUDIT,
/// Tape Administrator
TapeAdmin = ROLE_TAPE_ADMIN,
/// Tape Operator
TapeOperator = ROLE_TAPE_OPERATOR,
/// Tape Reader
TapeReader = ROLE_TAPE_READER,
}
impl FromStr for Role {
type Err = value::Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
Self::deserialize(s.into_deserializer())
}
}
pub const ACL_PATH_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&ACL_PATH_REGEX);
pub const ACL_PATH_SCHEMA: Schema = StringSchema::new("Access control path.")
.format(&ACL_PATH_FORMAT)
.min_length(1)
.max_length(128)
.schema();
pub const ACL_PROPAGATE_SCHEMA: Schema =
BooleanSchema::new("Allow to propagate (inherit) permissions.")
.default(true)
.schema();
pub const ACL_UGID_TYPE_SCHEMA: Schema = StringSchema::new("Type of 'ugid' property.")
.format(&ApiStringFormat::Enum(&[
EnumEntry::new("user", "User"),
EnumEntry::new("group", "Group"),
]))
.schema();
#[api(
properties: {
propagate: {
schema: ACL_PROPAGATE_SCHEMA,
},
path: {
schema: ACL_PATH_SCHEMA,
},
ugid_type: {
schema: ACL_UGID_TYPE_SCHEMA,
},
ugid: {
type: String,
description: "User or Group ID.",
},
roleid: {
type: Role,
}
}
)]
#[derive(Serialize, Deserialize, Clone, PartialEq)]
/// ACL list entry.
pub struct AclListItem {
pub path: String,
pub ugid: String,
pub ugid_type: String,
pub propagate: bool,
pub roleid: String,
}

98
pbs-api-types/src/ad.rs Normal file
View File

@ -0,0 +1,98 @@
use serde::{Deserialize, Serialize};
use proxmox_schema::{api, Updater};
use super::{
LdapMode, LDAP_DOMAIN_SCHEMA, REALM_ID_SCHEMA, SINGLE_LINE_COMMENT_SCHEMA,
SYNC_ATTRIBUTES_SCHEMA, SYNC_DEFAULTS_STRING_SCHEMA, USER_CLASSES_SCHEMA,
};
#[api(
properties: {
"realm": {
schema: REALM_ID_SCHEMA,
},
"comment": {
optional: true,
schema: SINGLE_LINE_COMMENT_SCHEMA,
},
"verify": {
optional: true,
default: false,
},
"sync-defaults-options": {
schema: SYNC_DEFAULTS_STRING_SCHEMA,
optional: true,
},
"sync-attributes": {
schema: SYNC_ATTRIBUTES_SCHEMA,
optional: true,
},
"user-classes" : {
optional: true,
schema: USER_CLASSES_SCHEMA,
},
"base-dn" : {
schema: LDAP_DOMAIN_SCHEMA,
optional: true,
},
"bind-dn" : {
schema: LDAP_DOMAIN_SCHEMA,
optional: true,
}
},
)]
#[derive(Serialize, Deserialize, Updater, Clone)]
#[serde(rename_all = "kebab-case")]
/// AD realm configuration properties.
pub struct AdRealmConfig {
#[updater(skip)]
pub realm: String,
/// AD server address
pub server1: String,
/// Fallback AD server address
#[serde(skip_serializing_if = "Option::is_none")]
pub server2: Option<String>,
/// AD server Port
#[serde(skip_serializing_if = "Option::is_none")]
pub port: Option<u16>,
/// Base domain name. Users are searched under this domain using a `subtree search`.
/// Expected to be set only internally to `defaultNamingContext` of the AD server, but can be
/// overridden if the need arises.
#[serde(skip_serializing_if = "Option::is_none")]
pub base_dn: Option<String>,
/// Comment
#[serde(skip_serializing_if = "Option::is_none")]
pub comment: Option<String>,
/// Connection security
#[serde(skip_serializing_if = "Option::is_none")]
pub mode: Option<LdapMode>,
/// Verify server certificate
#[serde(skip_serializing_if = "Option::is_none")]
pub verify: Option<bool>,
/// CA certificate to use for the server. The path can point to
/// either a file, or a directory. If it points to a file,
/// the PEM-formatted X.509 certificate stored at the path
/// will be added as a trusted certificate.
/// If the path points to a directory,
/// the directory replaces the system's default certificate
/// store at `/etc/ssl/certs` - Every file in the directory
/// will be loaded as a trusted certificate.
#[serde(skip_serializing_if = "Option::is_none")]
pub capath: Option<String>,
/// Bind domain to use for looking up users
#[serde(skip_serializing_if = "Option::is_none")]
pub bind_dn: Option<String>,
/// Custom LDAP search filter for user sync
#[serde(skip_serializing_if = "Option::is_none")]
pub filter: Option<String>,
/// Default options for AD sync
#[serde(skip_serializing_if = "Option::is_none")]
pub sync_defaults_options: Option<String>,
/// List of LDAP attributes to sync from AD to user config
#[serde(skip_serializing_if = "Option::is_none")]
pub sync_attributes: Option<String>,
/// User ``objectClass`` classes to sync
#[serde(skip_serializing_if = "Option::is_none")]
pub user_classes: Option<String>,
}

View File

@ -0,0 +1,95 @@
use std::fmt::{self, Display};
use anyhow::Error;
use serde::{Deserialize, Serialize};
use proxmox_schema::api;
#[api(default: "encrypt")]
#[derive(Copy, Clone, Debug, Eq, PartialEq, Deserialize, Serialize)]
#[serde(rename_all = "kebab-case")]
/// Defines whether data is encrypted (using an AEAD cipher), only signed, or neither.
pub enum CryptMode {
/// Don't encrypt.
None,
/// Encrypt.
Encrypt,
/// Only sign.
SignOnly,
}
#[derive(Debug, Eq, PartialEq, Hash, Clone, Deserialize, Serialize)]
#[serde(transparent)]
/// 32-byte fingerprint, usually calculated with SHA256.
pub struct Fingerprint {
#[serde(with = "bytes_as_fingerprint")]
bytes: [u8; 32],
}
impl Fingerprint {
pub fn new(bytes: [u8; 32]) -> Self {
Self { bytes }
}
pub fn bytes(&self) -> &[u8; 32] {
&self.bytes
}
pub fn signature(&self) -> String {
as_fingerprint(&self.bytes)
}
}
/// Display as short key ID
impl Display for Fingerprint {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", as_fingerprint(&self.bytes[0..8]))
}
}
impl std::str::FromStr for Fingerprint {
type Err = Error;
fn from_str(s: &str) -> Result<Self, Error> {
let mut tmp = s.to_string();
tmp.retain(|c| c != ':');
let mut bytes = [0u8; 32];
hex::decode_to_slice(&tmp, &mut bytes)?;
Ok(Fingerprint::new(bytes))
}
}
fn as_fingerprint(bytes: &[u8]) -> String {
hex::encode(bytes)
.as_bytes()
.chunks(2)
.map(|v| unsafe { std::str::from_utf8_unchecked(v) }) // it's a hex string
.collect::<Vec<&str>>()
.join(":")
}
pub mod bytes_as_fingerprint {
use std::mem::MaybeUninit;
use serde::{Deserialize, Deserializer, Serializer};
pub fn serialize<S>(bytes: &[u8; 32], serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let s = super::as_fingerprint(bytes);
serializer.serialize_str(&s)
}
pub fn deserialize<'de, D>(deserializer: D) -> Result<[u8; 32], D::Error>
where
D: Deserializer<'de>,
{
// TODO: more efficiently implement with a Visitor implementing visit_str using split() and
// hex::decode by-byte
let mut s = String::deserialize(deserializer)?;
s.retain(|c| c != ':');
let mut out = MaybeUninit::<[u8; 32]>::uninit();
hex::decode_to_slice(s.as_bytes(), unsafe { &mut (*out.as_mut_ptr())[..] })
.map_err(serde::de::Error::custom)?;
Ok(unsafe { out.assume_init() })
}
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,30 @@
use serde::{Deserialize, Serialize};
use proxmox_schema::api;
#[api]
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "kebab-case")]
/// General status information about a running VM file-restore daemon
pub struct RestoreDaemonStatus {
/// VM uptime in seconds
pub uptime: i64,
/// time left until auto-shutdown, keep in mind that this is useless when 'keep-timeout' is
/// not set, as then the status call will have reset the timer before returning the value
pub timeout: i64,
}
#[api]
#[derive(Serialize, Deserialize, PartialEq, Eq)]
#[serde(rename_all = "kebab-case")]
/// The desired format of the result.
pub enum FileRestoreFormat {
/// Plain file (only works for single files)
Plain,
/// PXAR archive
Pxar,
/// ZIP archive
Zip,
/// TAR archive
Tar,
}

844
pbs-api-types/src/jobs.rs Normal file
View File

@ -0,0 +1,844 @@
use std::str::FromStr;
use anyhow::bail;
use const_format::concatcp;
use regex::Regex;
use serde::{Deserialize, Serialize};
use proxmox_schema::*;
use crate::{
Authid, BackupNamespace, BackupType, NotificationMode, RateLimitConfig, Userid,
BACKUP_GROUP_SCHEMA, BACKUP_NAMESPACE_SCHEMA, BACKUP_NS_RE, DATASTORE_SCHEMA,
DRIVE_NAME_SCHEMA, MEDIA_POOL_NAME_SCHEMA, NS_MAX_DEPTH_REDUCED_SCHEMA, PROXMOX_SAFE_ID_FORMAT,
PROXMOX_SAFE_ID_REGEX_STR, REMOTE_ID_SCHEMA, SINGLE_LINE_COMMENT_SCHEMA,
};
const_regex! {
/// Regex for verification jobs 'DATASTORE:ACTUAL_JOB_ID'
pub VERIFICATION_JOB_WORKER_ID_REGEX = concatcp!(r"^(", PROXMOX_SAFE_ID_REGEX_STR, r"):");
/// Regex for sync jobs '(REMOTE|\-):REMOTE_DATASTORE:LOCAL_DATASTORE:(?:LOCAL_NS_ANCHOR:)ACTUAL_JOB_ID'
pub SYNC_JOB_WORKER_ID_REGEX = concatcp!(r"^(", PROXMOX_SAFE_ID_REGEX_STR, r"|\-):(", PROXMOX_SAFE_ID_REGEX_STR, r"):(", PROXMOX_SAFE_ID_REGEX_STR, r")(?::(", BACKUP_NS_RE, r"))?:");
}
pub const JOB_ID_SCHEMA: Schema = StringSchema::new("Job ID.")
.format(&PROXMOX_SAFE_ID_FORMAT)
.min_length(3)
.max_length(32)
.schema();
pub const SYNC_SCHEDULE_SCHEMA: Schema = StringSchema::new("Run sync job at specified schedule.")
.format(&ApiStringFormat::VerifyFn(
proxmox_time::verify_calendar_event,
))
.type_text("<calendar-event>")
.schema();
pub const GC_SCHEDULE_SCHEMA: Schema =
StringSchema::new("Run garbage collection job at specified schedule.")
.format(&ApiStringFormat::VerifyFn(
proxmox_time::verify_calendar_event,
))
.type_text("<calendar-event>")
.schema();
pub const PRUNE_SCHEDULE_SCHEMA: Schema = StringSchema::new("Run prune job at specified schedule.")
.format(&ApiStringFormat::VerifyFn(
proxmox_time::verify_calendar_event,
))
.type_text("<calendar-event>")
.schema();
pub const VERIFICATION_SCHEDULE_SCHEMA: Schema =
StringSchema::new("Run verify job at specified schedule.")
.format(&ApiStringFormat::VerifyFn(
proxmox_time::verify_calendar_event,
))
.type_text("<calendar-event>")
.schema();
pub const REMOVE_VANISHED_BACKUPS_SCHEMA: Schema = BooleanSchema::new(
"Delete vanished backups. This remove the local copy if the remote backup was deleted.",
)
.default(false)
.schema();
#[api(
properties: {
"next-run": {
description: "Estimated time of the next run (UNIX epoch).",
optional: true,
type: Integer,
},
"last-run-state": {
description: "Result of the last run.",
optional: true,
type: String,
},
"last-run-upid": {
description: "Task UPID of the last run.",
optional: true,
type: String,
},
"last-run-endtime": {
description: "Endtime of the last run.",
optional: true,
type: Integer,
},
}
)]
#[derive(Serialize, Deserialize, Default, Clone, PartialEq)]
#[serde(rename_all = "kebab-case")]
/// Job Scheduling Status
pub struct JobScheduleStatus {
#[serde(skip_serializing_if = "Option::is_none")]
pub next_run: Option<i64>,
#[serde(skip_serializing_if = "Option::is_none")]
pub last_run_state: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub last_run_upid: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub last_run_endtime: Option<i64>,
}
#[api()]
#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
/// When do we send notifications
pub enum Notify {
/// Never send notification
Never,
/// Send notifications for failed and successful jobs
Always,
/// Send notifications for failed jobs only
Error,
}
#[api(
properties: {
gc: {
type: Notify,
optional: true,
},
verify: {
type: Notify,
optional: true,
},
sync: {
type: Notify,
optional: true,
},
prune: {
type: Notify,
optional: true,
},
},
)]
#[derive(Debug, Serialize, Deserialize)]
/// Datastore notify settings
pub struct DatastoreNotify {
/// Garbage collection settings
#[serde(skip_serializing_if = "Option::is_none")]
pub gc: Option<Notify>,
/// Verify job setting
#[serde(skip_serializing_if = "Option::is_none")]
pub verify: Option<Notify>,
/// Sync job setting
#[serde(skip_serializing_if = "Option::is_none")]
pub sync: Option<Notify>,
/// Prune job setting
#[serde(skip_serializing_if = "Option::is_none")]
pub prune: Option<Notify>,
}
pub const DATASTORE_NOTIFY_STRING_SCHEMA: Schema = StringSchema::new(
"Datastore notification setting, enum can be one of 'always', 'never', or 'error'.",
)
.format(&ApiStringFormat::PropertyString(
&DatastoreNotify::API_SCHEMA,
))
.schema();
pub const IGNORE_VERIFIED_BACKUPS_SCHEMA: Schema = BooleanSchema::new(
"Do not verify backups that are already verified if their verification is not outdated.",
)
.default(true)
.schema();
pub const VERIFICATION_OUTDATED_AFTER_SCHEMA: Schema =
IntegerSchema::new("Days after that a verification becomes outdated. (0 is deprecated)'")
.minimum(0)
.schema();
#[api(
properties: {
id: {
schema: JOB_ID_SCHEMA,
},
store: {
schema: DATASTORE_SCHEMA,
},
"ignore-verified": {
optional: true,
schema: IGNORE_VERIFIED_BACKUPS_SCHEMA,
},
"outdated-after": {
optional: true,
schema: VERIFICATION_OUTDATED_AFTER_SCHEMA,
},
comment: {
optional: true,
schema: SINGLE_LINE_COMMENT_SCHEMA,
},
schedule: {
optional: true,
schema: VERIFICATION_SCHEDULE_SCHEMA,
},
ns: {
optional: true,
schema: BACKUP_NAMESPACE_SCHEMA,
},
"max-depth": {
optional: true,
schema: crate::NS_MAX_DEPTH_SCHEMA,
},
}
)]
#[derive(Serialize, Deserialize, Updater, Clone, PartialEq)]
#[serde(rename_all = "kebab-case")]
/// Verification Job
pub struct VerificationJobConfig {
/// unique ID to address this job
#[updater(skip)]
pub id: String,
/// the datastore ID this verification job affects
pub store: String,
#[serde(skip_serializing_if = "Option::is_none")]
/// if not set to false, check the age of the last snapshot verification to filter
/// out recent ones, depending on 'outdated_after' configuration.
pub ignore_verified: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")]
/// Reverify snapshots after X days, never if 0. Ignored if 'ignore_verified' is false.
pub outdated_after: Option<i64>,
#[serde(skip_serializing_if = "Option::is_none")]
pub comment: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
/// when to schedule this job in calendar event notation
pub schedule: Option<String>,
#[serde(skip_serializing_if = "Option::is_none", default)]
/// on which backup namespace to run the verification recursively
pub ns: Option<BackupNamespace>,
#[serde(skip_serializing_if = "Option::is_none", default)]
/// how deep the verify should go from the `ns` level downwards. Passing 0 verifies only the
/// snapshots on the same level as the passed `ns`, or the datastore root if none.
pub max_depth: Option<usize>,
}
impl VerificationJobConfig {
pub fn acl_path(&self) -> Vec<&str> {
match self.ns.as_ref() {
Some(ns) => ns.acl_path(&self.store),
None => vec!["datastore", &self.store],
}
}
}
#[api(
properties: {
config: {
type: VerificationJobConfig,
},
status: {
type: JobScheduleStatus,
},
},
)]
#[derive(Serialize, Deserialize, Clone, PartialEq)]
#[serde(rename_all = "kebab-case")]
/// Status of Verification Job
pub struct VerificationJobStatus {
#[serde(flatten)]
pub config: VerificationJobConfig,
#[serde(flatten)]
pub status: JobScheduleStatus,
}
#[api(
properties: {
store: {
schema: DATASTORE_SCHEMA,
},
pool: {
schema: MEDIA_POOL_NAME_SCHEMA,
},
drive: {
schema: DRIVE_NAME_SCHEMA,
},
"eject-media": {
description: "Eject media upon job completion.",
type: bool,
optional: true,
},
"export-media-set": {
description: "Export media set upon job completion.",
type: bool,
optional: true,
},
"latest-only": {
description: "Backup latest snapshots only.",
type: bool,
optional: true,
},
"notify-user": {
optional: true,
type: Userid,
},
"group-filter": {
schema: GROUP_FILTER_LIST_SCHEMA,
optional: true,
},
ns: {
type: BackupNamespace,
optional: true,
},
"max-depth": {
schema: crate::NS_MAX_DEPTH_SCHEMA,
optional: true,
},
}
)]
#[derive(Serialize, Deserialize, Clone, Updater, PartialEq)]
#[serde(rename_all = "kebab-case")]
/// Tape Backup Job Setup
pub struct TapeBackupJobSetup {
pub store: String,
pub pool: String,
pub drive: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub eject_media: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")]
pub export_media_set: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")]
pub latest_only: Option<bool>,
/// Send job email notification to this user
#[serde(skip_serializing_if = "Option::is_none")]
pub notify_user: Option<Userid>,
#[serde(skip_serializing_if = "Option::is_none")]
pub notification_mode: Option<NotificationMode>,
#[serde(skip_serializing_if = "Option::is_none")]
pub group_filter: Option<Vec<GroupFilter>>,
#[serde(skip_serializing_if = "Option::is_none", default)]
pub ns: Option<BackupNamespace>,
#[serde(skip_serializing_if = "Option::is_none", default)]
pub max_depth: Option<usize>,
}
#[api(
properties: {
id: {
schema: JOB_ID_SCHEMA,
},
setup: {
type: TapeBackupJobSetup,
},
comment: {
optional: true,
schema: SINGLE_LINE_COMMENT_SCHEMA,
},
schedule: {
optional: true,
schema: SYNC_SCHEDULE_SCHEMA,
},
}
)]
#[derive(Serialize, Deserialize, Clone, Updater, PartialEq)]
#[serde(rename_all = "kebab-case")]
/// Tape Backup Job
pub struct TapeBackupJobConfig {
#[updater(skip)]
pub id: String,
#[serde(flatten)]
pub setup: TapeBackupJobSetup,
#[serde(skip_serializing_if = "Option::is_none")]
pub comment: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub schedule: Option<String>,
}
#[api(
properties: {
config: {
type: TapeBackupJobConfig,
},
status: {
type: JobScheduleStatus,
},
},
)]
#[derive(Serialize, Deserialize, Clone, PartialEq)]
#[serde(rename_all = "kebab-case")]
/// Status of Tape Backup Job
pub struct TapeBackupJobStatus {
#[serde(flatten)]
pub config: TapeBackupJobConfig,
#[serde(flatten)]
pub status: JobScheduleStatus,
/// Next tape used (best guess)
#[serde(skip_serializing_if = "Option::is_none")]
pub next_media_label: Option<String>,
}
#[derive(Clone, Debug)]
/// Filter for matching `BackupGroup`s, for use with `BackupGroup::filter`.
pub enum FilterType {
/// BackupGroup type - either `vm`, `ct`, or `host`.
BackupType(BackupType),
/// Full identifier of BackupGroup, including type
Group(String),
/// A regular expression matched against the full identifier of the BackupGroup
Regex(Regex),
}
impl PartialEq for FilterType {
fn eq(&self, other: &Self) -> bool {
match (self, other) {
(Self::BackupType(a), Self::BackupType(b)) => a == b,
(Self::Group(a), Self::Group(b)) => a == b,
(Self::Regex(a), Self::Regex(b)) => a.as_str() == b.as_str(),
_ => false,
}
}
}
impl std::str::FromStr for FilterType {
type Err = anyhow::Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
Ok(match s.split_once(':') {
Some(("group", value)) => BACKUP_GROUP_SCHEMA.parse_simple_value(value).map(|_| FilterType::Group(value.to_string()))?,
Some(("type", value)) => FilterType::BackupType(value.parse()?),
Some(("regex", value)) => FilterType::Regex(Regex::new(value)?),
Some((ty, _value)) => bail!("expected 'group', 'type' or 'regex' prefix, got '{}'", ty),
None => bail!("input doesn't match expected format '<group:GROUP||type:<vm|ct|host>|regex:REGEX>'"),
})
}
}
// used for serializing below, caution!
impl std::fmt::Display for FilterType {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
FilterType::BackupType(backup_type) => write!(f, "type:{}", backup_type),
FilterType::Group(backup_group) => write!(f, "group:{}", backup_group),
FilterType::Regex(regex) => write!(f, "regex:{}", regex.as_str()),
}
}
}
#[derive(Clone, Debug)]
pub struct GroupFilter {
pub is_exclude: bool,
pub filter_type: FilterType,
}
impl PartialEq for GroupFilter {
fn eq(&self, other: &Self) -> bool {
self.filter_type == other.filter_type && self.is_exclude == other.is_exclude
}
}
impl Eq for GroupFilter {}
impl std::str::FromStr for GroupFilter {
type Err = anyhow::Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let (is_exclude, type_str) = match s.split_once(':') {
Some(("include", value)) => (false, value),
Some(("exclude", value)) => (true, value),
_ => (false, s),
};
Ok(GroupFilter {
is_exclude,
filter_type: type_str.parse()?,
})
}
}
// used for serializing below, caution!
impl std::fmt::Display for GroupFilter {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
if self.is_exclude {
f.write_str("exclude:")?;
}
std::fmt::Display::fmt(&self.filter_type, f)
}
}
proxmox_serde::forward_deserialize_to_from_str!(GroupFilter);
proxmox_serde::forward_serialize_to_display!(GroupFilter);
fn verify_group_filter(input: &str) -> Result<(), anyhow::Error> {
GroupFilter::from_str(input).map(|_| ())
}
pub const GROUP_FILTER_SCHEMA: Schema = StringSchema::new(
"Group filter based on group identifier ('group:GROUP'), group type ('type:<vm|ct|host>'), or regex ('regex:RE'). Can be inverted by prepending 'exclude:'.")
.format(&ApiStringFormat::VerifyFn(verify_group_filter))
.type_text("[<exclude:|include:>]<type:<vm|ct|host>|group:GROUP|regex:RE>")
.schema();
pub const GROUP_FILTER_LIST_SCHEMA: Schema =
ArraySchema::new("List of group filters.", &GROUP_FILTER_SCHEMA).schema();
pub const TRANSFER_LAST_SCHEMA: Schema =
IntegerSchema::new("Limit transfer to last N snapshots (per group), skipping others")
.minimum(1)
.schema();
#[api()]
#[derive(Copy, Clone, Debug, Default, Eq, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "kebab-case")]
/// Direction of the sync job, push or pull
pub enum SyncDirection {
/// Sync direction pull
#[default]
Pull,
/// Sync direction push
Push,
}
impl std::fmt::Display for SyncDirection {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
SyncDirection::Pull => f.write_str("pull"),
SyncDirection::Push => f.write_str("push"),
}
}
}
pub const RESYNC_CORRUPT_SCHEMA: Schema =
BooleanSchema::new("If the verification failed for a local snapshot, try to pull it again.")
.schema();
#[api(
properties: {
id: {
schema: JOB_ID_SCHEMA,
},
store: {
schema: DATASTORE_SCHEMA,
},
ns: {
type: BackupNamespace,
optional: true,
},
"owner": {
type: Authid,
optional: true,
},
remote: {
schema: REMOTE_ID_SCHEMA,
optional: true,
},
"remote-store": {
schema: DATASTORE_SCHEMA,
},
"remote-ns": {
type: BackupNamespace,
optional: true,
},
"remove-vanished": {
schema: REMOVE_VANISHED_BACKUPS_SCHEMA,
optional: true,
},
"max-depth": {
schema: NS_MAX_DEPTH_REDUCED_SCHEMA,
optional: true,
},
comment: {
optional: true,
schema: SINGLE_LINE_COMMENT_SCHEMA,
},
limit: {
type: RateLimitConfig,
},
schedule: {
optional: true,
schema: SYNC_SCHEDULE_SCHEMA,
},
"group-filter": {
schema: GROUP_FILTER_LIST_SCHEMA,
optional: true,
},
"transfer-last": {
schema: TRANSFER_LAST_SCHEMA,
optional: true,
},
"resync-corrupt": {
schema: RESYNC_CORRUPT_SCHEMA,
optional: true,
},
"sync-direction": {
type: SyncDirection,
optional: true,
},
}
)]
#[derive(Serialize, Deserialize, Clone, Updater, PartialEq)]
#[serde(rename_all = "kebab-case")]
/// Sync Job
pub struct SyncJobConfig {
#[updater(skip)]
pub id: String,
pub store: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub ns: Option<BackupNamespace>,
#[serde(skip_serializing_if = "Option::is_none")]
pub owner: Option<Authid>,
#[serde(skip_serializing_if = "Option::is_none")]
/// None implies local sync.
pub remote: Option<String>,
pub remote_store: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub remote_ns: Option<BackupNamespace>,
#[serde(skip_serializing_if = "Option::is_none")]
pub remove_vanished: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")]
pub max_depth: Option<usize>,
#[serde(skip_serializing_if = "Option::is_none")]
pub comment: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub schedule: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub group_filter: Option<Vec<GroupFilter>>,
#[serde(flatten)]
pub limit: RateLimitConfig,
#[serde(skip_serializing_if = "Option::is_none")]
pub transfer_last: Option<usize>,
#[serde(skip_serializing_if = "Option::is_none")]
pub resync_corrupt: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")]
pub sync_direction: Option<SyncDirection>,
}
impl SyncJobConfig {
pub fn acl_path(&self) -> Vec<&str> {
match self.ns.as_ref() {
Some(ns) => ns.acl_path(&self.store),
None => vec!["datastore", &self.store],
}
}
pub fn remote_acl_path(&self) -> Option<Vec<&str>> {
let remote = self.remote.as_ref()?;
match &self.remote_ns {
Some(remote_ns) => Some(remote_ns.remote_acl_path(remote, &self.remote_store)),
None => Some(vec!["remote", remote, &self.remote_store]),
}
}
}
#[api(
properties: {
config: {
type: SyncJobConfig,
},
status: {
type: JobScheduleStatus,
},
},
)]
#[derive(Serialize, Deserialize, Clone, PartialEq)]
#[serde(rename_all = "kebab-case")]
/// Status of Sync Job
pub struct SyncJobStatus {
#[serde(flatten)]
pub config: SyncJobConfig,
#[serde(flatten)]
pub status: JobScheduleStatus,
}
/// These are used separately without `ns`/`max-depth` sometimes in the API, specifically in the API
/// call to prune a specific group, where `max-depth` makes no sense.
#[api(
properties: {
"keep-last": {
schema: crate::PRUNE_SCHEMA_KEEP_LAST,
optional: true,
},
"keep-hourly": {
schema: crate::PRUNE_SCHEMA_KEEP_HOURLY,
optional: true,
},
"keep-daily": {
schema: crate::PRUNE_SCHEMA_KEEP_DAILY,
optional: true,
},
"keep-weekly": {
schema: crate::PRUNE_SCHEMA_KEEP_WEEKLY,
optional: true,
},
"keep-monthly": {
schema: crate::PRUNE_SCHEMA_KEEP_MONTHLY,
optional: true,
},
"keep-yearly": {
schema: crate::PRUNE_SCHEMA_KEEP_YEARLY,
optional: true,
},
}
)]
#[derive(Serialize, Deserialize, Default, Updater, Clone, PartialEq)]
#[serde(rename_all = "kebab-case")]
/// Common pruning options
pub struct KeepOptions {
#[serde(skip_serializing_if = "Option::is_none")]
pub keep_last: Option<u64>,
#[serde(skip_serializing_if = "Option::is_none")]
pub keep_hourly: Option<u64>,
#[serde(skip_serializing_if = "Option::is_none")]
pub keep_daily: Option<u64>,
#[serde(skip_serializing_if = "Option::is_none")]
pub keep_weekly: Option<u64>,
#[serde(skip_serializing_if = "Option::is_none")]
pub keep_monthly: Option<u64>,
#[serde(skip_serializing_if = "Option::is_none")]
pub keep_yearly: Option<u64>,
}
impl KeepOptions {
pub fn keeps_something(&self) -> bool {
self.keep_last.unwrap_or(0)
+ self.keep_hourly.unwrap_or(0)
+ self.keep_daily.unwrap_or(0)
+ self.keep_weekly.unwrap_or(0)
+ self.keep_monthly.unwrap_or(0)
+ self.keep_yearly.unwrap_or(0)
> 0
}
}
#[api(
properties: {
keep: {
type: KeepOptions,
},
ns: {
type: BackupNamespace,
optional: true,
},
"max-depth": {
schema: NS_MAX_DEPTH_REDUCED_SCHEMA,
optional: true,
},
}
)]
#[derive(Serialize, Deserialize, Default, Updater, Clone, PartialEq)]
#[serde(rename_all = "kebab-case")]
/// Common pruning options
pub struct PruneJobOptions {
#[serde(flatten)]
pub keep: KeepOptions,
/// The (optional) recursion depth
#[serde(skip_serializing_if = "Option::is_none")]
pub max_depth: Option<usize>,
#[serde(skip_serializing_if = "Option::is_none")]
pub ns: Option<BackupNamespace>,
}
impl PruneJobOptions {
pub fn keeps_something(&self) -> bool {
self.keep.keeps_something()
}
pub fn acl_path<'a>(&'a self, store: &'a str) -> Vec<&'a str> {
match &self.ns {
Some(ns) => ns.acl_path(store),
None => vec!["datastore", store],
}
}
}
#[api(
properties: {
disable: {
type: Boolean,
optional: true,
default: false,
},
id: {
schema: JOB_ID_SCHEMA,
},
store: {
schema: DATASTORE_SCHEMA,
},
schedule: {
schema: PRUNE_SCHEDULE_SCHEMA,
},
comment: {
optional: true,
schema: SINGLE_LINE_COMMENT_SCHEMA,
},
options: {
type: PruneJobOptions,
},
},
)]
#[derive(Deserialize, Serialize, Updater, Clone, PartialEq)]
#[serde(rename_all = "kebab-case")]
/// Prune configuration.
pub struct PruneJobConfig {
/// unique ID to address this job
#[updater(skip)]
pub id: String,
pub store: String,
/// Disable this job.
#[serde(default, skip_serializing_if = "is_false")]
#[updater(serde(skip_serializing_if = "Option::is_none"))]
pub disable: bool,
pub schedule: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub comment: Option<String>,
#[serde(flatten)]
pub options: PruneJobOptions,
}
impl PruneJobConfig {
pub fn acl_path(&self) -> Vec<&str> {
self.options.acl_path(&self.store)
}
}
fn is_false(b: &bool) -> bool {
!b
}
#[api(
properties: {
config: {
type: PruneJobConfig,
},
status: {
type: JobScheduleStatus,
},
},
)]
#[derive(Serialize, Deserialize, Clone, PartialEq)]
#[serde(rename_all = "kebab-case")]
/// Status of prune job
pub struct PruneJobStatus {
#[serde(flatten)]
pub config: PruneJobConfig,
#[serde(flatten)]
pub status: JobScheduleStatus,
}

View File

@ -0,0 +1,55 @@
use serde::{Deserialize, Serialize};
use proxmox_schema::api;
use crate::CERT_FINGERPRINT_SHA256_SCHEMA;
#[api(default: "scrypt")]
#[derive(Clone, Copy, Debug, Deserialize, Serialize)]
#[serde(rename_all = "lowercase")]
/// Key derivation function for password protected encryption keys.
pub enum Kdf {
/// Do not encrypt the key.
None,
/// Encrypt they key with a password using SCrypt.
Scrypt,
/// Encrtypt the Key with a password using PBKDF2
PBKDF2,
}
impl Default for Kdf {
#[inline]
fn default() -> Self {
Kdf::Scrypt
}
}
#[api(
properties: {
kdf: {
type: Kdf,
},
fingerprint: {
schema: CERT_FINGERPRINT_SHA256_SCHEMA,
optional: true,
},
},
)]
#[derive(Deserialize, Serialize)]
/// Encryption Key Information
pub struct KeyInfo {
/// Path to key (if stored in a file)
#[serde(skip_serializing_if = "Option::is_none")]
pub path: Option<String>,
pub kdf: Kdf,
/// Key creation time
pub created: i64,
/// Key modification time
pub modified: i64,
/// Key fingerprint
#[serde(skip_serializing_if = "Option::is_none")]
pub fingerprint: Option<String>,
/// Password hint
#[serde(skip_serializing_if = "Option::is_none")]
pub hint: Option<String>,
}

208
pbs-api-types/src/ldap.rs Normal file
View File

@ -0,0 +1,208 @@
use serde::{Deserialize, Serialize};
use proxmox_schema::{api, ApiStringFormat, ApiType, ArraySchema, Schema, StringSchema, Updater};
use super::{REALM_ID_SCHEMA, SINGLE_LINE_COMMENT_SCHEMA};
#[api()]
#[derive(Copy, Clone, PartialEq, Eq, Serialize, Deserialize, Default)]
/// LDAP connection type
pub enum LdapMode {
/// Plaintext LDAP connection
#[serde(rename = "ldap")]
#[default]
Ldap,
/// Secure STARTTLS connection
#[serde(rename = "ldap+starttls")]
StartTls,
/// Secure LDAPS connection
#[serde(rename = "ldaps")]
Ldaps,
}
#[api(
properties: {
"realm": {
schema: REALM_ID_SCHEMA,
},
"comment": {
optional: true,
schema: SINGLE_LINE_COMMENT_SCHEMA,
},
"verify": {
optional: true,
default: false,
},
"sync-defaults-options": {
schema: SYNC_DEFAULTS_STRING_SCHEMA,
optional: true,
},
"sync-attributes": {
schema: SYNC_ATTRIBUTES_SCHEMA,
optional: true,
},
"user-classes" : {
optional: true,
schema: USER_CLASSES_SCHEMA,
},
"base-dn" : {
schema: LDAP_DOMAIN_SCHEMA,
},
"bind-dn" : {
schema: LDAP_DOMAIN_SCHEMA,
optional: true,
}
},
)]
#[derive(Serialize, Deserialize, Updater, Clone)]
#[serde(rename_all = "kebab-case")]
/// LDAP configuration properties.
pub struct LdapRealmConfig {
#[updater(skip)]
pub realm: String,
/// LDAP server address
pub server1: String,
/// Fallback LDAP server address
#[serde(skip_serializing_if = "Option::is_none")]
pub server2: Option<String>,
/// Port
#[serde(skip_serializing_if = "Option::is_none")]
pub port: Option<u16>,
/// Base domain name. Users are searched under this domain using a `subtree search`.
pub base_dn: String,
/// Username attribute. Used to map a ``userid`` to LDAP to an LDAP ``dn``.
pub user_attr: String,
/// Comment
#[serde(skip_serializing_if = "Option::is_none")]
pub comment: Option<String>,
/// Connection security
#[serde(skip_serializing_if = "Option::is_none")]
pub mode: Option<LdapMode>,
/// Verify server certificate
#[serde(skip_serializing_if = "Option::is_none")]
pub verify: Option<bool>,
/// CA certificate to use for the server. The path can point to
/// either a file, or a directory. If it points to a file,
/// the PEM-formatted X.509 certificate stored at the path
/// will be added as a trusted certificate.
/// If the path points to a directory,
/// the directory replaces the system's default certificate
/// store at `/etc/ssl/certs` - Every file in the directory
/// will be loaded as a trusted certificate.
#[serde(skip_serializing_if = "Option::is_none")]
pub capath: Option<String>,
/// Bind domain to use for looking up users
#[serde(skip_serializing_if = "Option::is_none")]
pub bind_dn: Option<String>,
/// Custom LDAP search filter for user sync
#[serde(skip_serializing_if = "Option::is_none")]
pub filter: Option<String>,
/// Default options for LDAP sync
#[serde(skip_serializing_if = "Option::is_none")]
pub sync_defaults_options: Option<String>,
/// List of attributes to sync from LDAP to user config
#[serde(skip_serializing_if = "Option::is_none")]
pub sync_attributes: Option<String>,
/// User ``objectClass`` classes to sync
#[serde(skip_serializing_if = "Option::is_none")]
pub user_classes: Option<String>,
}
#[api(
properties: {
"remove-vanished": {
optional: true,
schema: REMOVE_VANISHED_SCHEMA,
},
},
)]
#[derive(Serialize, Deserialize, Updater, Default, Debug)]
#[serde(rename_all = "kebab-case")]
/// Default options for LDAP synchronization runs
pub struct SyncDefaultsOptions {
/// How to handle vanished properties/users
pub remove_vanished: Option<String>,
/// Enable new users after sync
pub enable_new: Option<bool>,
}
#[api()]
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)]
#[serde(rename_all = "kebab-case")]
/// remove-vanished options
pub enum RemoveVanished {
/// Delete ACLs for vanished users
Acl,
/// Remove vanished users
Entry,
/// Remove vanished properties from users (e.g. email)
Properties,
}
pub const LDAP_DOMAIN_SCHEMA: Schema = StringSchema::new("LDAP Domain").schema();
pub const SYNC_DEFAULTS_STRING_SCHEMA: Schema = StringSchema::new("sync defaults options")
.format(&ApiStringFormat::PropertyString(
&SyncDefaultsOptions::API_SCHEMA,
))
.schema();
const REMOVE_VANISHED_DESCRIPTION: &str =
"A semicolon-separated list of things to remove when they or the user \
vanishes during user synchronization. The following values are possible: ``entry`` removes the \
user when not returned from the sync; ``properties`` removes any \
properties on existing user that do not appear in the source. \
``acl`` removes ACLs when the user is not returned from the sync.";
pub const REMOVE_VANISHED_SCHEMA: Schema = StringSchema::new(REMOVE_VANISHED_DESCRIPTION)
.format(&ApiStringFormat::PropertyString(&REMOVE_VANISHED_ARRAY))
.schema();
pub const REMOVE_VANISHED_ARRAY: Schema = ArraySchema::new(
"Array of remove-vanished options",
&RemoveVanished::API_SCHEMA,
)
.min_length(1)
.schema();
#[api()]
#[derive(Serialize, Deserialize, Updater, Default, Debug)]
#[serde(rename_all = "kebab-case")]
/// Determine which LDAP attributes should be synced to which user attributes
pub struct SyncAttributes {
/// Name of the LDAP attribute containing the user's email address
pub email: Option<String>,
/// Name of the LDAP attribute containing the user's first name
pub firstname: Option<String>,
/// Name of the LDAP attribute containing the user's last name
pub lastname: Option<String>,
}
const SYNC_ATTRIBUTES_TEXT: &str = "Comma-separated list of key=value pairs for specifying \
which LDAP attributes map to which PBS user field. For example, \
to map the LDAP attribute ``mail`` to PBS's ``email``, write \
``email=mail``.";
pub const SYNC_ATTRIBUTES_SCHEMA: Schema = StringSchema::new(SYNC_ATTRIBUTES_TEXT)
.format(&ApiStringFormat::PropertyString(
&SyncAttributes::API_SCHEMA,
))
.schema();
pub const USER_CLASSES_ARRAY: Schema = ArraySchema::new(
"Array of user classes",
&StringSchema::new("user class").schema(),
)
.min_length(1)
.schema();
const USER_CLASSES_TEXT: &str = "Comma-separated list of allowed objectClass values for \
user synchronization. For instance, if ``user-classes`` is set to ``person,user``, \
then user synchronization will consider all LDAP entities \
where ``objectClass: person`` `or` ``objectClass: user``.";
pub const USER_CLASSES_SCHEMA: Schema = StringSchema::new(USER_CLASSES_TEXT)
.format(&ApiStringFormat::PropertyString(&USER_CLASSES_ARRAY))
.default("inetorgperson,posixaccount,person,user")
.schema();

373
pbs-api-types/src/lib.rs Normal file
View File

@ -0,0 +1,373 @@
//! Basic API types used by most of the PBS code.
use const_format::concatcp;
use serde::{Deserialize, Serialize};
pub mod percent_encoding;
use proxmox_schema::{
api, const_regex, ApiStringFormat, ApiType, ArraySchema, ReturnType, Schema, StringSchema,
};
use proxmox_time::parse_daily_duration;
use proxmox_auth_api::types::{APITOKEN_ID_REGEX_STR, USER_ID_REGEX_STR};
pub use proxmox_schema::api_types::SAFE_ID_FORMAT as PROXMOX_SAFE_ID_FORMAT;
pub use proxmox_schema::api_types::SAFE_ID_REGEX as PROXMOX_SAFE_ID_REGEX;
pub use proxmox_schema::api_types::SAFE_ID_REGEX_STR as PROXMOX_SAFE_ID_REGEX_STR;
pub use proxmox_schema::api_types::{
BLOCKDEVICE_DISK_AND_PARTITION_NAME_REGEX, BLOCKDEVICE_NAME_REGEX,
};
pub use proxmox_schema::api_types::{DNS_ALIAS_REGEX, DNS_NAME_OR_IP_REGEX, DNS_NAME_REGEX};
pub use proxmox_schema::api_types::{FINGERPRINT_SHA256_REGEX, SHA256_HEX_REGEX};
pub use proxmox_schema::api_types::{
GENERIC_URI_REGEX, HOSTNAME_REGEX, HOST_PORT_REGEX, HTTP_URL_REGEX,
};
pub use proxmox_schema::api_types::{MULTI_LINE_COMMENT_REGEX, SINGLE_LINE_COMMENT_REGEX};
pub use proxmox_schema::api_types::{PASSWORD_REGEX, SYSTEMD_DATETIME_REGEX, UUID_REGEX};
pub use proxmox_schema::api_types::{CIDR_FORMAT, CIDR_REGEX};
pub use proxmox_schema::api_types::{CIDR_V4_FORMAT, CIDR_V4_REGEX};
pub use proxmox_schema::api_types::{CIDR_V6_FORMAT, CIDR_V6_REGEX};
pub use proxmox_schema::api_types::{IPRE_STR, IP_FORMAT, IP_REGEX};
pub use proxmox_schema::api_types::{IPV4RE_STR, IP_V4_FORMAT, IP_V4_REGEX};
pub use proxmox_schema::api_types::{IPV6RE_STR, IP_V6_FORMAT, IP_V6_REGEX};
pub use proxmox_schema::api_types::COMMENT_SCHEMA as SINGLE_LINE_COMMENT_SCHEMA;
pub use proxmox_schema::api_types::HOSTNAME_SCHEMA;
pub use proxmox_schema::api_types::HOST_PORT_SCHEMA;
pub use proxmox_schema::api_types::HTTP_URL_SCHEMA;
pub use proxmox_schema::api_types::MULTI_LINE_COMMENT_SCHEMA;
pub use proxmox_schema::api_types::NODE_SCHEMA;
pub use proxmox_schema::api_types::SINGLE_LINE_COMMENT_FORMAT;
pub use proxmox_schema::api_types::{
BLOCKDEVICE_DISK_AND_PARTITION_NAME_SCHEMA, BLOCKDEVICE_NAME_SCHEMA,
};
pub use proxmox_schema::api_types::{CERT_FINGERPRINT_SHA256_SCHEMA, FINGERPRINT_SHA256_FORMAT};
pub use proxmox_schema::api_types::{DISK_ARRAY_SCHEMA, DISK_LIST_SCHEMA};
pub use proxmox_schema::api_types::{DNS_ALIAS_FORMAT, DNS_NAME_FORMAT, DNS_NAME_OR_IP_SCHEMA};
pub use proxmox_schema::api_types::{PASSWORD_FORMAT, PASSWORD_SCHEMA};
pub use proxmox_schema::api_types::{SERVICE_ID_SCHEMA, UUID_FORMAT};
pub use proxmox_schema::api_types::{SYSTEMD_DATETIME_FORMAT, TIME_ZONE_SCHEMA};
use proxmox_schema::api_types::{DNS_NAME_STR, IPRE_BRACKET_STR};
// re-export APT API types
pub use proxmox_apt_api_types::{
APTChangeRepositoryOptions, APTGetChangelogOptions, APTRepositoriesResult, APTRepositoryFile,
APTRepositoryFileError, APTRepositoryHandle, APTRepositoryInfo, APTStandardRepository,
APTUpdateInfo, APTUpdateOptions,
};
#[rustfmt::skip]
pub const BACKUP_ID_RE: &str = r"[A-Za-z0-9_][A-Za-z0-9._\-]*";
#[rustfmt::skip]
pub const BACKUP_TYPE_RE: &str = r"(?:host|vm|ct)";
#[rustfmt::skip]
pub const BACKUP_TIME_RE: &str = r"[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}Z";
#[rustfmt::skip]
pub const BACKUP_NS_RE: &str =
concatcp!("(?:",
"(?:", PROXMOX_SAFE_ID_REGEX_STR, r"/){0,7}", PROXMOX_SAFE_ID_REGEX_STR,
")?");
#[rustfmt::skip]
pub const BACKUP_NS_PATH_RE: &str =
concatcp!(r"(?:ns/", PROXMOX_SAFE_ID_REGEX_STR, r"/){0,7}ns/", PROXMOX_SAFE_ID_REGEX_STR, r"/");
#[rustfmt::skip]
pub const SNAPSHOT_PATH_REGEX_STR: &str =
concatcp!(
r"(", BACKUP_TYPE_RE, ")/(", BACKUP_ID_RE, ")/(", BACKUP_TIME_RE, r")",
);
#[rustfmt::skip]
pub const GROUP_OR_SNAPSHOT_PATH_REGEX_STR: &str =
concatcp!(
r"(", BACKUP_TYPE_RE, ")/(", BACKUP_ID_RE, ")(?:/(", BACKUP_TIME_RE, r"))?",
);
mod acl;
pub use acl::*;
mod datastore;
pub use datastore::*;
mod jobs;
pub use jobs::*;
mod key_derivation;
pub use key_derivation::{Kdf, KeyInfo};
mod maintenance;
pub use maintenance::*;
mod network;
pub use network::*;
mod node;
pub use node::*;
pub use proxmox_auth_api::types as userid;
pub use proxmox_auth_api::types::{Authid, Userid};
pub use proxmox_auth_api::types::{Realm, RealmRef};
pub use proxmox_auth_api::types::{Tokenname, TokennameRef};
pub use proxmox_auth_api::types::{Username, UsernameRef};
pub use proxmox_auth_api::types::{
PROXMOX_GROUP_ID_SCHEMA, PROXMOX_TOKEN_ID_SCHEMA, PROXMOX_TOKEN_NAME_SCHEMA,
};
#[macro_use]
mod user;
pub use user::*;
pub use proxmox_schema::upid::*;
mod crypto;
pub use crypto::{bytes_as_fingerprint, CryptMode, Fingerprint};
pub mod file_restore;
mod openid;
pub use openid::*;
mod ldap;
pub use ldap::*;
mod ad;
pub use ad::*;
mod remote;
pub use remote::*;
mod pathpatterns;
pub use pathpatterns::*;
mod tape;
pub use tape::*;
mod traffic_control;
pub use traffic_control::*;
mod zfs;
pub use zfs::*;
mod metrics;
pub use metrics::*;
mod version;
pub use version::*;
const_regex! {
// just a rough check - dummy acceptor is used before persisting
pub OPENSSL_CIPHERS_REGEX = r"^[0-9A-Za-z_:, +!\-@=.]+$";
pub BACKUP_REPO_URL_REGEX = concatcp!(
r"^^(?:(?:(",
USER_ID_REGEX_STR, "|", APITOKEN_ID_REGEX_STR,
")@)?(",
DNS_NAME_STR, "|", IPRE_BRACKET_STR,
"):)?(?:([0-9]{1,5}):)?(", PROXMOX_SAFE_ID_REGEX_STR, r")$"
);
pub SUBSCRIPTION_KEY_REGEX = concat!(r"^pbs(?:[cbsp])-[0-9a-f]{10}$");
}
pub const PVE_CONFIG_DIGEST_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&SHA256_HEX_REGEX);
pub const SUBSCRIPTION_KEY_FORMAT: ApiStringFormat =
ApiStringFormat::Pattern(&SUBSCRIPTION_KEY_REGEX);
pub const OPENSSL_CIPHERS_TLS_FORMAT: ApiStringFormat =
ApiStringFormat::Pattern(&OPENSSL_CIPHERS_REGEX);
pub const DAILY_DURATION_FORMAT: ApiStringFormat =
ApiStringFormat::VerifyFn(|s| parse_daily_duration(s).map(drop));
pub const SEARCH_DOMAIN_SCHEMA: Schema =
StringSchema::new("Search domain for host-name lookup.").schema();
pub const FIRST_DNS_SERVER_SCHEMA: Schema = StringSchema::new("First name server IP address.")
.format(&IP_FORMAT)
.schema();
pub const SECOND_DNS_SERVER_SCHEMA: Schema = StringSchema::new("Second name server IP address.")
.format(&IP_FORMAT)
.schema();
pub const THIRD_DNS_SERVER_SCHEMA: Schema = StringSchema::new("Third name server IP address.")
.format(&IP_FORMAT)
.schema();
pub const OPENSSL_CIPHERS_TLS_1_2_SCHEMA: Schema =
StringSchema::new("OpenSSL cipher list used by the proxy for TLS <= 1.2")
.format(&OPENSSL_CIPHERS_TLS_FORMAT)
.schema();
pub const OPENSSL_CIPHERS_TLS_1_3_SCHEMA: Schema =
StringSchema::new("OpenSSL ciphersuites list used by the proxy for TLS 1.3")
.format(&OPENSSL_CIPHERS_TLS_FORMAT)
.schema();
pub const PBS_PASSWORD_SCHEMA: Schema = StringSchema::new("User Password.")
.format(&PASSWORD_FORMAT)
.min_length(8)
.max_length(64)
.schema();
pub const REALM_ID_SCHEMA: Schema = StringSchema::new("Realm name.")
.format(&PROXMOX_SAFE_ID_FORMAT)
.min_length(2)
.max_length(32)
.schema();
pub const SUBSCRIPTION_KEY_SCHEMA: Schema =
StringSchema::new("Proxmox Backup Server subscription key.")
.format(&SUBSCRIPTION_KEY_FORMAT)
.min_length(15)
.max_length(16)
.schema();
pub const PROXMOX_CONFIG_DIGEST_SCHEMA: Schema = StringSchema::new(
"Prevent changes if current configuration file has different \
SHA256 digest. This can be used to prevent concurrent \
modifications.",
)
.format(&PVE_CONFIG_DIGEST_FORMAT)
.schema();
/// API schema format definition for repository URLs
pub const BACKUP_REPO_URL: ApiStringFormat = ApiStringFormat::Pattern(&BACKUP_REPO_URL_REGEX);
// Complex type definitions
#[api()]
#[derive(Default, Serialize, Deserialize)]
/// Storage space usage information.
pub struct StorageStatus {
/// Total space (bytes).
pub total: u64,
/// Used space (bytes).
pub used: u64,
/// Available space (bytes).
pub avail: u64,
}
pub const PASSWORD_HINT_SCHEMA: Schema = StringSchema::new("Password hint.")
.format(&SINGLE_LINE_COMMENT_FORMAT)
.min_length(1)
.max_length(64)
.schema();
#[api()]
#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
/// Node Power command type.
pub enum NodePowerCommand {
/// Restart the server
Reboot,
/// Shutdown the server
Shutdown,
}
#[api()]
#[derive(Eq, PartialEq, Debug, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
/// The state (result) of a finished worker task.
pub enum TaskStateType {
/// Ok
OK,
/// Warning
Warning,
/// Error
Error,
/// Unknown
Unknown,
}
#[api(
properties: {
upid: { schema: UPID::API_SCHEMA },
},
)]
#[derive(Serialize, Deserialize, Clone, PartialEq)]
/// Task properties.
pub struct TaskListItem {
pub upid: String,
/// The node name where the task is running on.
pub node: String,
/// The Unix PID
pub pid: i64,
/// The task start time (Epoch)
pub pstart: u64,
/// The task start time (Epoch)
pub starttime: i64,
/// Worker type (arbitrary ASCII string)
pub worker_type: String,
/// Worker ID (arbitrary ASCII string)
pub worker_id: Option<String>,
/// The authenticated entity who started the task
pub user: String,
/// The task end time (Epoch)
#[serde(skip_serializing_if = "Option::is_none")]
pub endtime: Option<i64>,
/// Task end status
#[serde(skip_serializing_if = "Option::is_none")]
pub status: Option<String>,
}
pub const NODE_TASKS_LIST_TASKS_RETURN_TYPE: ReturnType = ReturnType {
optional: false,
schema: &ArraySchema::new("A list of tasks.", &TaskListItem::API_SCHEMA).schema(),
};
#[api]
#[derive(Deserialize, Serialize, Copy, Clone, PartialEq, Eq)]
#[serde(rename_all = "lowercase")]
/// type of the realm
pub enum RealmType {
/// The PAM realm
Pam,
/// The PBS realm
Pbs,
/// An OpenID Connect realm
OpenId,
/// An LDAP realm
Ldap,
/// An Active Directory (AD) realm
Ad,
}
serde_plain::derive_display_from_serialize!(RealmType);
serde_plain::derive_fromstr_from_deserialize!(RealmType);
#[api(
properties: {
realm: {
schema: REALM_ID_SCHEMA,
},
"type": {
type: RealmType,
},
comment: {
optional: true,
schema: SINGLE_LINE_COMMENT_SCHEMA,
},
},
)]
#[derive(Deserialize, Serialize, Clone, PartialEq)]
#[serde(rename_all = "kebab-case")]
/// Basic Information about a realm
pub struct BasicRealmInfo {
pub realm: String,
#[serde(rename = "type")]
pub ty: RealmType,
/// True if it is the default realm
#[serde(skip_serializing_if = "Option::is_none")]
pub default: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")]
pub comment: Option<String>,
}

View File

@ -0,0 +1,110 @@
use anyhow::{bail, Error};
use serde::{Deserialize, Serialize};
use std::borrow::Cow;
use proxmox_schema::{api, const_regex, ApiStringFormat, Schema, StringSchema};
const_regex! {
pub MAINTENANCE_MESSAGE_REGEX = r"^[[:^cntrl:]]*$";
}
pub const MAINTENANCE_MESSAGE_FORMAT: ApiStringFormat =
ApiStringFormat::Pattern(&MAINTENANCE_MESSAGE_REGEX);
pub const MAINTENANCE_MESSAGE_SCHEMA: Schema =
StringSchema::new("Message describing the reason for the maintenance.")
.format(&MAINTENANCE_MESSAGE_FORMAT)
.max_length(64)
.schema();
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
/// Operation requirements, used when checking for maintenance mode.
pub enum Operation {
/// for any read operation like backup restore or RRD metric collection
Read,
/// for any write/delete operation, like backup create or GC
Write,
/// for any purely logical operation on the in-memory state of the datastore, e.g., to check if
/// some mutex could be locked (e.g., GC already running?)
///
/// NOTE: one must *not* do any IO operations when only helding this Op state
Lookup,
// GarbageCollect or Delete?
}
#[api]
#[derive(Copy, Clone, Deserialize, Serialize, PartialEq, Eq)]
#[serde(rename_all = "kebab-case")]
/// Maintenance type.
pub enum MaintenanceType {
// TODO:
// - Add "GarbageCollection" or "DeleteOnly" as type and track GC (or all deletes) as separate
// operation, so that one can enable a mode where nothing new can be added but stuff can be
// cleaned
/// Only read operations are allowed on the datastore.
ReadOnly,
/// Neither read nor write operations are allowed on the datastore.
Offline,
/// The datastore is being deleted.
Delete,
/// The (removable) datastore is being unmounted.
Unmount,
}
serde_plain::derive_display_from_serialize!(MaintenanceType);
serde_plain::derive_fromstr_from_deserialize!(MaintenanceType);
#[api(
properties: {
type: {
type: MaintenanceType,
},
message: {
optional: true,
schema: MAINTENANCE_MESSAGE_SCHEMA,
}
},
default_key: "type",
)]
#[derive(Deserialize, Serialize)]
/// Maintenance mode
pub struct MaintenanceMode {
/// Type of maintenance ("read-only" or "offline").
#[serde(rename = "type")]
pub ty: MaintenanceType,
/// Reason for maintenance.
#[serde(skip_serializing_if = "Option::is_none")]
pub message: Option<String>,
}
impl MaintenanceMode {
/// Used for deciding whether the datastore is cleared from the internal cache
pub fn clear_from_cache(&self) -> bool {
self.ty == MaintenanceType::Offline
|| self.ty == MaintenanceType::Delete
|| self.ty == MaintenanceType::Unmount
}
pub fn check(&self, operation: Option<Operation>) -> Result<(), Error> {
if self.ty == MaintenanceType::Delete {
bail!("datastore is being deleted");
}
let message = percent_encoding::percent_decode_str(self.message.as_deref().unwrap_or(""))
.decode_utf8()
.unwrap_or(Cow::Borrowed(""));
if let Some(Operation::Lookup) = operation {
return Ok(());
} else if self.ty == MaintenanceType::Unmount {
bail!("datastore is being unmounted");
} else if self.ty == MaintenanceType::Offline {
bail!("offline maintenance mode: {}", message);
} else if self.ty == MaintenanceType::ReadOnly {
if let Some(Operation::Write) = operation {
bail!("read-only maintenance mode: {}", message);
}
}
Ok(())
}
}

View File

@ -0,0 +1,255 @@
use serde::{Deserialize, Serialize};
use crate::{
HOST_PORT_SCHEMA, HTTP_URL_SCHEMA, PROXMOX_SAFE_ID_FORMAT, SINGLE_LINE_COMMENT_SCHEMA,
};
use proxmox_schema::{api, Schema, StringSchema, Updater};
pub const METRIC_SERVER_ID_SCHEMA: Schema = StringSchema::new("Metrics Server ID.")
.format(&PROXMOX_SAFE_ID_FORMAT)
.min_length(3)
.max_length(32)
.schema();
pub const INFLUXDB_BUCKET_SCHEMA: Schema = StringSchema::new("InfluxDB Bucket.")
.min_length(3)
.max_length(32)
.default("proxmox")
.schema();
pub const INFLUXDB_ORGANIZATION_SCHEMA: Schema = StringSchema::new("InfluxDB Organization.")
.min_length(3)
.max_length(32)
.default("proxmox")
.schema();
fn return_true() -> bool {
true
}
fn is_true(b: &bool) -> bool {
*b
}
#[api(
properties: {
name: {
schema: METRIC_SERVER_ID_SCHEMA,
},
enable: {
type: bool,
optional: true,
default: true,
},
host: {
schema: HOST_PORT_SCHEMA,
},
mtu: {
type: u16,
optional: true,
default: 1500,
},
comment: {
optional: true,
schema: SINGLE_LINE_COMMENT_SCHEMA,
},
},
)]
#[derive(Serialize, Deserialize, Updater)]
#[serde(rename_all = "kebab-case")]
/// InfluxDB Server (UDP)
pub struct InfluxDbUdp {
#[updater(skip)]
pub name: String,
#[serde(default = "return_true", skip_serializing_if = "is_true")]
#[updater(serde(skip_serializing_if = "Option::is_none"))]
/// Enables or disables the metrics server
pub enable: bool,
/// the host + port
pub host: String,
#[serde(skip_serializing_if = "Option::is_none")]
/// The MTU
pub mtu: Option<u16>,
#[serde(skip_serializing_if = "Option::is_none")]
pub comment: Option<String>,
}
#[api(
properties: {
name: {
schema: METRIC_SERVER_ID_SCHEMA,
},
enable: {
type: bool,
optional: true,
default: true,
},
url: {
schema: HTTP_URL_SCHEMA,
},
token: {
type: String,
optional: true,
},
bucket: {
schema: INFLUXDB_BUCKET_SCHEMA,
optional: true,
},
organization: {
schema: INFLUXDB_ORGANIZATION_SCHEMA,
optional: true,
},
"max-body-size": {
type: usize,
optional: true,
default: 25_000_000,
},
"verify-tls": {
type: bool,
optional: true,
default: true,
},
comment: {
optional: true,
schema: SINGLE_LINE_COMMENT_SCHEMA,
},
},
)]
#[derive(Serialize, Deserialize, Updater)]
#[serde(rename_all = "kebab-case")]
/// InfluxDB Server (HTTP(s))
pub struct InfluxDbHttp {
#[updater(skip)]
pub name: String,
#[serde(default = "return_true", skip_serializing_if = "is_true")]
#[updater(serde(skip_serializing_if = "Option::is_none"))]
/// Enables or disables the metrics server
pub enable: bool,
/// The base url of the influxdb server
pub url: String,
#[serde(skip_serializing_if = "Option::is_none")]
/// The (optional) API token
pub token: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
/// Named location where time series data is stored
pub bucket: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
/// Workspace for a group of users
pub organization: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
/// The (optional) maximum body size
pub max_body_size: Option<usize>,
#[serde(skip_serializing_if = "Option::is_none")]
/// If true, the certificate will be validated.
pub verify_tls: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")]
pub comment: Option<String>,
}
#[api]
#[derive(Copy, Clone, Deserialize, Serialize, PartialEq, Eq, PartialOrd, Ord)]
/// Type of the metric server
pub enum MetricServerType {
/// InfluxDB HTTP
#[serde(rename = "influxdb-http")]
InfluxDbHttp,
/// InfluxDB UDP
#[serde(rename = "influxdb-udp")]
InfluxDbUdp,
}
#[api(
properties: {
name: {
schema: METRIC_SERVER_ID_SCHEMA,
},
"type": {
type: MetricServerType,
},
comment: {
optional: true,
schema: SINGLE_LINE_COMMENT_SCHEMA,
},
},
)]
#[derive(Clone, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)]
#[serde(rename_all = "kebab-case")]
/// Basic information about a metric server that's available for all types
pub struct MetricServerInfo {
pub name: String,
#[serde(rename = "type")]
pub ty: MetricServerType,
/// Enables or disables the metrics server
#[serde(skip_serializing_if = "Option::is_none")]
pub enable: Option<bool>,
/// The target server
pub server: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub comment: Option<String>,
}
#[derive(Clone, Debug, Deserialize, Serialize)]
#[api(
properties: {
data: {
type: Array,
items: {
type: MetricDataPoint,
}
}
}
)]
/// Return type for the metric API endpoint
pub struct Metrics {
/// List of metric data points, sorted by timestamp
pub data: Vec<MetricDataPoint>,
}
#[api(
properties: {
id: {
type: String,
},
metric: {
type: String,
},
timestamp: {
type: Integer,
},
},
)]
/// Metric data point
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct MetricDataPoint {
/// Unique identifier for this metric object, for instance `node/<nodename>`
/// or `qemu/<vmid>`.
pub id: String,
/// Name of the metric.
pub metric: String,
/// Time at which this metric was observed
pub timestamp: i64,
#[serde(rename = "type")]
pub ty: MetricDataType,
/// Metric value.
pub value: f64,
}
#[api]
/// Type of the metric.
#[derive(Clone, Copy, Debug, Eq, PartialEq, Deserialize, Serialize)]
#[serde(rename_all = "lowercase")]
pub enum MetricDataType {
/// gauge.
Gauge,
/// counter.
Counter,
/// derive.
Derive,
}
serde_plain::derive_display_from_serialize!(MetricDataType);
serde_plain::derive_fromstr_from_deserialize!(MetricDataType);

View File

@ -0,0 +1,345 @@
use std::fmt;
use serde::{Deserialize, Serialize};
use proxmox_schema::*;
use crate::{
CIDR_FORMAT, CIDR_V4_FORMAT, CIDR_V6_FORMAT, IP_FORMAT, IP_V4_FORMAT, IP_V6_FORMAT,
PROXMOX_SAFE_ID_REGEX,
};
pub const NETWORK_INTERFACE_FORMAT: ApiStringFormat =
ApiStringFormat::Pattern(&PROXMOX_SAFE_ID_REGEX);
pub const IP_V4_SCHEMA: Schema = StringSchema::new("IPv4 address.")
.format(&IP_V4_FORMAT)
.max_length(15)
.schema();
pub const IP_V6_SCHEMA: Schema = StringSchema::new("IPv6 address.")
.format(&IP_V6_FORMAT)
.max_length(39)
.schema();
pub const IP_SCHEMA: Schema = StringSchema::new("IP (IPv4 or IPv6) address.")
.format(&IP_FORMAT)
.max_length(39)
.schema();
pub const CIDR_V4_SCHEMA: Schema = StringSchema::new("IPv4 address with netmask (CIDR notation).")
.format(&CIDR_V4_FORMAT)
.max_length(18)
.schema();
pub const CIDR_V6_SCHEMA: Schema = StringSchema::new("IPv6 address with netmask (CIDR notation).")
.format(&CIDR_V6_FORMAT)
.max_length(43)
.schema();
pub const CIDR_SCHEMA: Schema =
StringSchema::new("IP address (IPv4 or IPv6) with netmask (CIDR notation).")
.format(&CIDR_FORMAT)
.max_length(43)
.schema();
#[api()]
#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
/// Interface configuration method
pub enum NetworkConfigMethod {
/// Configuration is done manually using other tools
Manual,
/// Define interfaces with statically allocated addresses.
Static,
/// Obtain an address via DHCP
DHCP,
/// Define the loopback interface.
Loopback,
}
#[api()]
#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "kebab-case")]
#[repr(u8)]
/// Linux Bond Mode
pub enum LinuxBondMode {
/// Round-robin policy
BalanceRr = 0,
/// Active-backup policy
ActiveBackup = 1,
/// XOR policy
BalanceXor = 2,
/// Broadcast policy
Broadcast = 3,
/// IEEE 802.3ad Dynamic link aggregation
#[serde(rename = "802.3ad")]
Ieee802_3ad = 4,
/// Adaptive transmit load balancing
BalanceTlb = 5,
/// Adaptive load balancing
BalanceAlb = 6,
}
impl fmt::Display for LinuxBondMode {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str(match self {
LinuxBondMode::BalanceRr => "balance-rr",
LinuxBondMode::ActiveBackup => "active-backup",
LinuxBondMode::BalanceXor => "balance-xor",
LinuxBondMode::Broadcast => "broadcast",
LinuxBondMode::Ieee802_3ad => "802.3ad",
LinuxBondMode::BalanceTlb => "balance-tlb",
LinuxBondMode::BalanceAlb => "balance-alb",
})
}
}
#[api()]
#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "kebab-case")]
#[repr(u8)]
/// Bond Transmit Hash Policy for LACP (802.3ad)
pub enum BondXmitHashPolicy {
/// Layer 2
Layer2 = 0,
/// Layer 2+3
#[serde(rename = "layer2+3")]
Layer2_3 = 1,
/// Layer 3+4
#[serde(rename = "layer3+4")]
Layer3_4 = 2,
}
impl fmt::Display for BondXmitHashPolicy {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str(match self {
BondXmitHashPolicy::Layer2 => "layer2",
BondXmitHashPolicy::Layer2_3 => "layer2+3",
BondXmitHashPolicy::Layer3_4 => "layer3+4",
})
}
}
#[api()]
#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
/// Network interface type
pub enum NetworkInterfaceType {
/// Loopback
Loopback,
/// Physical Ethernet device
Eth,
/// Linux Bridge
Bridge,
/// Linux Bond
Bond,
/// Linux VLAN (eth.10)
Vlan,
/// Interface Alias (eth:1)
Alias,
/// Unknown interface type
Unknown,
}
pub const NETWORK_INTERFACE_NAME_SCHEMA: Schema = StringSchema::new("Network interface name.")
.format(&NETWORK_INTERFACE_FORMAT)
.min_length(1)
.max_length(15) // libc::IFNAMSIZ-1
.schema();
pub const NETWORK_INTERFACE_ARRAY_SCHEMA: Schema =
ArraySchema::new("Network interface list.", &NETWORK_INTERFACE_NAME_SCHEMA).schema();
pub const NETWORK_INTERFACE_LIST_SCHEMA: Schema =
StringSchema::new("A list of network devices, comma separated.")
.format(&ApiStringFormat::PropertyString(
&NETWORK_INTERFACE_ARRAY_SCHEMA,
))
.schema();
#[api(
properties: {
name: {
schema: NETWORK_INTERFACE_NAME_SCHEMA,
},
"type": {
type: NetworkInterfaceType,
},
method: {
type: NetworkConfigMethod,
optional: true,
},
method6: {
type: NetworkConfigMethod,
optional: true,
},
cidr: {
schema: CIDR_V4_SCHEMA,
optional: true,
},
cidr6: {
schema: CIDR_V6_SCHEMA,
optional: true,
},
gateway: {
schema: IP_V4_SCHEMA,
optional: true,
},
gateway6: {
schema: IP_V6_SCHEMA,
optional: true,
},
options: {
description: "Option list (inet)",
type: Array,
items: {
description: "Optional attribute line.",
type: String,
},
},
options6: {
description: "Option list (inet6)",
type: Array,
items: {
description: "Optional attribute line.",
type: String,
},
},
comments: {
description: "Comments (inet, may span multiple lines)",
type: String,
optional: true,
},
comments6: {
description: "Comments (inet6, may span multiple lines)",
type: String,
optional: true,
},
bridge_ports: {
schema: NETWORK_INTERFACE_ARRAY_SCHEMA,
optional: true,
},
slaves: {
schema: NETWORK_INTERFACE_ARRAY_SCHEMA,
optional: true,
},
"vlan-id": {
description: "VLAN ID.",
type: u16,
optional: true,
},
"vlan-raw-device": {
schema: NETWORK_INTERFACE_NAME_SCHEMA,
optional: true,
},
bond_mode: {
type: LinuxBondMode,
optional: true,
},
"bond-primary": {
schema: NETWORK_INTERFACE_NAME_SCHEMA,
optional: true,
},
bond_xmit_hash_policy: {
type: BondXmitHashPolicy,
optional: true,
},
}
)]
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)]
/// Network Interface configuration
pub struct Interface {
/// Autostart interface
#[serde(rename = "autostart")]
pub autostart: bool,
/// Interface is active (UP)
pub active: bool,
/// Interface name
pub name: String,
/// Interface type
#[serde(rename = "type")]
pub interface_type: NetworkInterfaceType,
#[serde(skip_serializing_if = "Option::is_none")]
pub method: Option<NetworkConfigMethod>,
#[serde(skip_serializing_if = "Option::is_none")]
pub method6: Option<NetworkConfigMethod>,
#[serde(skip_serializing_if = "Option::is_none")]
/// IPv4 address with netmask
pub cidr: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
/// IPv4 gateway
pub gateway: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
/// IPv6 address with netmask
pub cidr6: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
/// IPv6 gateway
pub gateway6: Option<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub options: Vec<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub options6: Vec<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub comments: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub comments6: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
/// Maximum Transmission Unit
pub mtu: Option<u64>,
#[serde(skip_serializing_if = "Option::is_none")]
pub bridge_ports: Option<Vec<String>>,
/// Enable bridge vlan support.
#[serde(skip_serializing_if = "Option::is_none")]
pub bridge_vlan_aware: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")]
#[serde(rename = "vlan-id")]
pub vlan_id: Option<u16>,
#[serde(skip_serializing_if = "Option::is_none")]
#[serde(rename = "vlan-raw-device")]
pub vlan_raw_device: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub slaves: Option<Vec<String>>,
#[serde(skip_serializing_if = "Option::is_none")]
pub bond_mode: Option<LinuxBondMode>,
#[serde(skip_serializing_if = "Option::is_none")]
#[serde(rename = "bond-primary")]
pub bond_primary: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub bond_xmit_hash_policy: Option<BondXmitHashPolicy>,
}
impl Interface {
pub fn new(name: String) -> Self {
Self {
name,
interface_type: NetworkInterfaceType::Unknown,
autostart: false,
active: false,
method: None,
method6: None,
cidr: None,
gateway: None,
cidr6: None,
gateway6: None,
options: Vec::new(),
options6: Vec::new(),
comments: None,
comments6: None,
mtu: None,
bridge_ports: None,
bridge_vlan_aware: None,
vlan_id: None,
vlan_raw_device: None,
slaves: None,
bond_mode: None,
bond_primary: None,
bond_xmit_hash_policy: None,
}
}
}

162
pbs-api-types/src/node.rs Normal file
View File

@ -0,0 +1,162 @@
use std::ffi::OsStr;
use proxmox_schema::*;
use serde::{Deserialize, Serialize};
use crate::StorageStatus;
#[api]
#[derive(Serialize, Deserialize, Default)]
#[serde(rename_all = "kebab-case")]
/// Node memory usage counters
pub struct NodeMemoryCounters {
/// Total memory
pub total: u64,
/// Used memory
pub used: u64,
/// Free memory
pub free: u64,
}
#[api]
#[derive(Serialize, Deserialize, Default)]
#[serde(rename_all = "kebab-case")]
/// Node swap usage counters
pub struct NodeSwapCounters {
/// Total swap
pub total: u64,
/// Used swap
pub used: u64,
/// Free swap
pub free: u64,
}
#[api]
#[derive(Serialize, Deserialize, Default)]
#[serde(rename_all = "kebab-case")]
/// Contains general node information such as the fingerprint`
pub struct NodeInformation {
/// The SSL Fingerprint
pub fingerprint: String,
}
#[api]
#[derive(Serialize, Deserialize, Default)]
#[serde(rename_all = "lowercase")]
/// The current kernel version (output of `uname`)
pub struct KernelVersionInformation {
/// The systemname/nodename
pub sysname: String,
/// The kernel release number
pub release: String,
/// The kernel version
pub version: String,
/// The machine architecture
pub machine: String,
}
impl KernelVersionInformation {
pub fn from_uname_parts(
sysname: &OsStr,
release: &OsStr,
version: &OsStr,
machine: &OsStr,
) -> Self {
KernelVersionInformation {
sysname: sysname.to_str().map(String::from).unwrap_or_default(),
release: release.to_str().map(String::from).unwrap_or_default(),
version: version.to_str().map(String::from).unwrap_or_default(),
machine: machine.to_str().map(String::from).unwrap_or_default(),
}
}
pub fn get_legacy(&self) -> String {
format!("{} {} {}", self.sysname, self.release, self.version)
}
}
#[api]
#[derive(Serialize, Deserialize, Copy, Clone)]
#[serde(rename_all = "kebab-case")]
/// The possible BootModes
pub enum BootMode {
/// The BootMode is EFI/UEFI
Efi,
/// The BootMode is Legacy BIOS
LegacyBios,
}
#[api]
#[derive(Serialize, Deserialize, Clone)]
#[serde(rename_all = "lowercase")]
/// Holds the Bootmodes
pub struct BootModeInformation {
/// The BootMode, either Efi or Bios
pub mode: BootMode,
/// SecureBoot status
pub secureboot: bool,
}
#[api]
#[derive(Serialize, Deserialize, Default)]
#[serde(rename_all = "kebab-case")]
/// Information about the CPU
pub struct NodeCpuInformation {
/// The CPU model
pub model: String,
/// The number of CPU sockets
pub sockets: usize,
/// The number of CPU cores (incl. threads)
pub cpus: usize,
}
#[api(
properties: {
memory: {
type: NodeMemoryCounters,
},
root: {
type: StorageStatus,
},
swap: {
type: NodeSwapCounters,
},
loadavg: {
type: Array,
items: {
type: Number,
description: "the load",
}
},
cpuinfo: {
type: NodeCpuInformation,
},
info: {
type: NodeInformation,
}
},
)]
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "kebab-case")]
/// The Node status
pub struct NodeStatus {
pub memory: NodeMemoryCounters,
pub root: StorageStatus,
pub swap: NodeSwapCounters,
/// The current uptime of the server.
pub uptime: u64,
/// Load for 1, 5 and 15 minutes.
pub loadavg: [f64; 3],
/// The current kernel version (NEW struct type).
pub current_kernel: KernelVersionInformation,
/// The current kernel version (LEGACY string type).
pub kversion: String,
/// Total CPU usage since last query.
pub cpu: f64,
/// Total IO wait since last query.
pub wait: f64,
pub cpuinfo: NodeCpuInformation,
pub info: NodeInformation,
/// Current boot mode
pub boot_info: BootModeInformation,
}

120
pbs-api-types/src/openid.rs Normal file
View File

@ -0,0 +1,120 @@
use serde::{Deserialize, Serialize};
use proxmox_schema::{api, ApiStringFormat, ArraySchema, Schema, StringSchema, Updater};
use super::{
GENERIC_URI_REGEX, PROXMOX_SAFE_ID_FORMAT, PROXMOX_SAFE_ID_REGEX, REALM_ID_SCHEMA,
SINGLE_LINE_COMMENT_SCHEMA,
};
pub const OPENID_SCOPE_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&PROXMOX_SAFE_ID_REGEX);
pub const OPENID_SCOPE_SCHEMA: Schema = StringSchema::new("OpenID Scope Name.")
.format(&OPENID_SCOPE_FORMAT)
.schema();
pub const OPENID_SCOPE_ARRAY_SCHEMA: Schema =
ArraySchema::new("Array of OpenId Scopes.", &OPENID_SCOPE_SCHEMA).schema();
pub const OPENID_SCOPE_LIST_FORMAT: ApiStringFormat =
ApiStringFormat::PropertyString(&OPENID_SCOPE_ARRAY_SCHEMA);
pub const OPENID_DEFAILT_SCOPE_LIST: &str = "email profile";
pub const OPENID_SCOPE_LIST_SCHEMA: Schema = StringSchema::new("OpenID Scope List")
.format(&OPENID_SCOPE_LIST_FORMAT)
.default(OPENID_DEFAILT_SCOPE_LIST)
.schema();
pub const OPENID_ACR_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&GENERIC_URI_REGEX);
pub const OPENID_ACR_SCHEMA: Schema =
StringSchema::new("OpenID Authentication Context Class Reference.")
.format(&OPENID_ACR_FORMAT)
.schema();
pub const OPENID_ACR_ARRAY_SCHEMA: Schema =
ArraySchema::new("Array of OpenId ACRs.", &OPENID_ACR_SCHEMA).schema();
pub const OPENID_ACR_LIST_FORMAT: ApiStringFormat =
ApiStringFormat::PropertyString(&OPENID_ACR_ARRAY_SCHEMA);
pub const OPENID_ACR_LIST_SCHEMA: Schema = StringSchema::new("OpenID ACR List")
.format(&OPENID_ACR_LIST_FORMAT)
.schema();
pub const OPENID_USERNAME_CLAIM_SCHEMA: Schema = StringSchema::new(
"Use the value of this attribute/claim as unique user name. It \
is up to the identity provider to guarantee the uniqueness. The \
OpenID specification only guarantees that Subject ('sub') is \
unique. Also make sure that the user is not allowed to change that \
attribute by himself!",
)
.max_length(64)
.min_length(1)
.format(&PROXMOX_SAFE_ID_FORMAT)
.schema();
#[api(
properties: {
realm: {
schema: REALM_ID_SCHEMA,
},
"client-key": {
optional: true,
},
"scopes": {
schema: OPENID_SCOPE_LIST_SCHEMA,
optional: true,
},
"acr-values": {
schema: OPENID_ACR_LIST_SCHEMA,
optional: true,
},
prompt: {
description: "OpenID Prompt",
type: String,
format: &PROXMOX_SAFE_ID_FORMAT,
optional: true,
},
comment: {
optional: true,
schema: SINGLE_LINE_COMMENT_SCHEMA,
},
autocreate: {
optional: true,
default: false,
},
"username-claim": {
schema: OPENID_USERNAME_CLAIM_SCHEMA,
optional: true,
},
},
)]
#[derive(Serialize, Deserialize, Updater)]
#[serde(rename_all = "kebab-case")]
/// OpenID configuration properties.
pub struct OpenIdRealmConfig {
#[updater(skip)]
pub realm: String,
/// OpenID Issuer Url
pub issuer_url: String,
/// OpenID Client ID
pub client_id: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub scopes: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub acr_values: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub prompt: Option<String>,
/// OpenID Client Key
#[serde(skip_serializing_if = "Option::is_none")]
pub client_key: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub comment: Option<String>,
/// Automatically create users if they do not exist.
#[serde(skip_serializing_if = "Option::is_none")]
pub autocreate: Option<bool>,
#[updater(skip)]
#[serde(skip_serializing_if = "Option::is_none")]
pub username_claim: Option<String>,
}

View File

@ -0,0 +1,30 @@
use proxmox_schema::{const_regex, ApiStringFormat, ApiType, Schema, StringSchema};
use serde::{Deserialize, Serialize};
const_regex! {
pub PATH_PATTERN_REGEX = concat!(r"^.+[^\\]$");
}
pub const PATH_PATTERN_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&PATH_PATTERN_REGEX);
pub const PATH_PATTERN_SCHEMA: Schema =
StringSchema::new("Path or match pattern for matching filenames.")
.format(&PATH_PATTERN_FORMAT)
.schema();
#[derive(Default, Deserialize, Serialize)]
/// Path or path pattern for filename matching
pub struct PathPattern {
pattern: String,
}
impl ApiType for PathPattern {
const API_SCHEMA: Schema = PATH_PATTERN_SCHEMA;
}
impl AsRef<[u8]> for PathPattern {
fn as_ref(&self) -> &[u8] {
self.pattern.as_bytes()
}
}

View File

@ -0,0 +1,22 @@
use percent_encoding::{utf8_percent_encode, AsciiSet};
/// This used to be: `SIMPLE_ENCODE_SET` plus space, `"`, `#`, `<`, `>`, backtick, `?`, `{`, `}`
pub const DEFAULT_ENCODE_SET: &AsciiSet = &percent_encoding::CONTROLS // 0..1f and 7e
// The SIMPLE_ENCODE_SET adds space and anything >= 0x7e (7e itself is already included above)
.add(0x20)
.add(0x7f)
// the DEFAULT_ENCODE_SET added:
.add(b' ')
.add(b'"')
.add(b'#')
.add(b'<')
.add(b'>')
.add(b'`')
.add(b'?')
.add(b'{')
.add(b'}');
/// percent encode a url component
pub fn percent_encode_component(comp: &str) -> String {
utf8_percent_encode(comp, percent_encoding::NON_ALPHANUMERIC).to_string()
}

106
pbs-api-types/src/remote.rs Normal file
View File

@ -0,0 +1,106 @@
use serde::{Deserialize, Serialize};
use super::*;
use proxmox_schema::*;
pub const REMOTE_PASSWORD_SCHEMA: Schema =
StringSchema::new("Password or auth token for remote host.")
.format(&PASSWORD_FORMAT)
.min_length(1)
.max_length(1024)
.schema();
pub const REMOTE_PASSWORD_BASE64_SCHEMA: Schema =
StringSchema::new("Password or auth token for remote host (stored as base64 string).")
.format(&PASSWORD_FORMAT)
.min_length(1)
.max_length(1024)
.schema();
pub const REMOTE_ID_SCHEMA: Schema = StringSchema::new("Remote ID.")
.format(&PROXMOX_SAFE_ID_FORMAT)
.min_length(3)
.max_length(32)
.schema();
#[api(
properties: {
comment: {
optional: true,
schema: SINGLE_LINE_COMMENT_SCHEMA,
},
host: {
schema: DNS_NAME_OR_IP_SCHEMA,
},
port: {
optional: true,
description: "The (optional) port",
type: u16,
},
"auth-id": {
type: Authid,
},
fingerprint: {
optional: true,
schema: CERT_FINGERPRINT_SHA256_SCHEMA,
},
},
)]
#[derive(Serialize, Deserialize, Updater, Clone, PartialEq)]
#[serde(rename_all = "kebab-case")]
/// Remote configuration properties.
pub struct RemoteConfig {
#[serde(skip_serializing_if = "Option::is_none")]
pub comment: Option<String>,
pub host: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub port: Option<u16>,
pub auth_id: Authid,
#[serde(skip_serializing_if = "Option::is_none")]
pub fingerprint: Option<String>,
}
#[api(
properties: {
name: {
schema: REMOTE_ID_SCHEMA,
},
config: {
type: RemoteConfig,
},
password: {
schema: REMOTE_PASSWORD_BASE64_SCHEMA,
},
},
)]
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "kebab-case")]
/// Remote properties.
pub struct Remote {
pub name: String,
// Note: The stored password is base64 encoded
#[serde(default, skip_serializing_if = "String::is_empty")]
#[serde(with = "proxmox_serde::string_as_base64")]
pub password: String,
#[serde(flatten)]
pub config: RemoteConfig,
}
#[api(
properties: {
name: {
schema: REMOTE_ID_SCHEMA,
},
config: {
type: RemoteConfig,
},
},
)]
#[derive(Serialize, Deserialize, Clone, PartialEq)]
#[serde(rename_all = "kebab-case")]
/// Remote properties.
pub struct RemoteWithoutPassword {
pub name: String,
#[serde(flatten)]
pub config: RemoteConfig,
}

View File

@ -0,0 +1,134 @@
//! Types for tape changer API
use serde::{Deserialize, Serialize};
use proxmox_schema::{
api, ApiStringFormat, ArraySchema, IntegerSchema, Schema, StringSchema, Updater,
};
use crate::{OptionalDeviceIdentification, PROXMOX_SAFE_ID_FORMAT};
pub const CHANGER_NAME_SCHEMA: Schema = StringSchema::new("Tape Changer Identifier.")
.format(&PROXMOX_SAFE_ID_FORMAT)
.min_length(3)
.max_length(32)
.schema();
pub const SCSI_CHANGER_PATH_SCHEMA: Schema =
StringSchema::new("Path to Linux generic SCSI device (e.g. '/dev/sg4')").schema();
pub const MEDIA_LABEL_SCHEMA: Schema = StringSchema::new("Media Label/Barcode.")
.format(&PROXMOX_SAFE_ID_FORMAT)
.min_length(2)
.max_length(32)
.schema();
pub const SLOT_ARRAY_SCHEMA: Schema = ArraySchema::new(
"Slot list.",
&IntegerSchema::new("Slot number").minimum(1).schema(),
)
.schema();
pub const EXPORT_SLOT_LIST_SCHEMA: Schema = StringSchema::new(
"\
A list of slot numbers, comma separated. Those slots are reserved for
Import/Export, i.e. any media in those slots are considered to be
'offline'.
",
)
.format(&ApiStringFormat::PropertyString(&SLOT_ARRAY_SCHEMA))
.schema();
#[api(
properties: {
name: {
schema: CHANGER_NAME_SCHEMA,
},
path: {
schema: SCSI_CHANGER_PATH_SCHEMA,
},
"export-slots": {
schema: EXPORT_SLOT_LIST_SCHEMA,
optional: true,
},
"eject-before-unload": {
optional: true,
default: false,
}
},
)]
#[derive(Serialize, Deserialize, Updater)]
#[serde(rename_all = "kebab-case")]
/// SCSI tape changer
pub struct ScsiTapeChanger {
#[updater(skip)]
pub name: String,
pub path: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub export_slots: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
/// if set to true, tapes are ejected manually before unloading
pub eject_before_unload: Option<bool>,
}
#[api(
properties: {
config: {
type: ScsiTapeChanger,
},
info: {
type: OptionalDeviceIdentification,
},
},
)]
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "kebab-case")]
/// Changer config with optional device identification attributes
pub struct ChangerListEntry {
#[serde(flatten)]
pub config: ScsiTapeChanger,
#[serde(flatten)]
pub info: OptionalDeviceIdentification,
}
#[api()]
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "kebab-case")]
/// Mtx Entry Kind
pub enum MtxEntryKind {
/// Drive
Drive,
/// Slot
Slot,
/// Import/Export Slot
ImportExport,
}
#[api(
properties: {
"entry-kind": {
type: MtxEntryKind,
},
"label-text": {
schema: MEDIA_LABEL_SCHEMA,
optional: true,
},
},
)]
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "kebab-case")]
/// Mtx Status Entry
pub struct MtxStatusEntry {
pub entry_kind: MtxEntryKind,
/// The ID of the slot or drive
pub entry_id: u64,
/// The media label (volume tag) if the slot/drive is full
#[serde(skip_serializing_if = "Option::is_none")]
pub label_text: Option<String>,
/// The slot the drive was loaded from
#[serde(skip_serializing_if = "Option::is_none")]
pub loaded_slot: Option<u64>,
/// The current state of the drive
#[serde(skip_serializing_if = "Option::is_none")]
pub state: Option<String>,
}

View File

@ -0,0 +1,55 @@
use ::serde::{Deserialize, Serialize};
use proxmox_schema::api;
#[api()]
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "kebab-case")]
/// Optional Device Identification Attributes
pub struct OptionalDeviceIdentification {
/// Vendor (autodetected)
#[serde(skip_serializing_if = "Option::is_none")]
pub vendor: Option<String>,
/// Model (autodetected)
#[serde(skip_serializing_if = "Option::is_none")]
pub model: Option<String>,
/// Serial number (autodetected)
#[serde(skip_serializing_if = "Option::is_none")]
pub serial: Option<String>,
}
#[api()]
#[derive(Debug, Serialize, Deserialize)]
#[serde(rename_all = "kebab-case")]
/// Kind of device
pub enum DeviceKind {
/// Tape changer (Autoloader, Robot)
Changer,
/// Normal SCSI tape device
Tape,
}
#[api(
properties: {
kind: {
type: DeviceKind,
},
},
)]
#[derive(Debug, Serialize, Deserialize)]
/// Tape device information
pub struct TapeDeviceInfo {
pub kind: DeviceKind,
/// Path to the linux device node
pub path: String,
/// Serial number (autodetected)
pub serial: String,
/// Vendor (autodetected)
pub vendor: String,
/// Model (autodetected)
pub model: String,
/// Device major number
pub major: u32,
/// Device minor number
pub minor: u32,
}

View File

@ -0,0 +1,350 @@
//! Types for tape drive API
use anyhow::{bail, Error};
use serde::{Deserialize, Serialize};
use proxmox_schema::{api, IntegerSchema, Schema, StringSchema, Updater};
use crate::{OptionalDeviceIdentification, CHANGER_NAME_SCHEMA, PROXMOX_SAFE_ID_FORMAT};
pub const DRIVE_NAME_SCHEMA: Schema = StringSchema::new("Drive Identifier.")
.format(&PROXMOX_SAFE_ID_FORMAT)
.min_length(3)
.max_length(32)
.schema();
pub const LTO_DRIVE_PATH_SCHEMA: Schema =
StringSchema::new("The path to a LTO SCSI-generic tape device (i.e. '/dev/sg0')").schema();
pub const CHANGER_DRIVENUM_SCHEMA: Schema =
IntegerSchema::new("Associated changer drive number (requires option changer)")
.minimum(0)
.maximum(255)
.default(0)
.schema();
#[api(
properties: {
name: {
schema: DRIVE_NAME_SCHEMA,
}
}
)]
#[derive(Serialize, Deserialize)]
/// Simulate tape drives (only for test and debug)
#[serde(rename_all = "kebab-case")]
pub struct VirtualTapeDrive {
pub name: String,
/// Path to directory
pub path: String,
/// Virtual tape size
#[serde(skip_serializing_if = "Option::is_none")]
pub max_size: Option<usize>,
}
#[api(
properties: {
name: {
schema: DRIVE_NAME_SCHEMA,
},
path: {
schema: LTO_DRIVE_PATH_SCHEMA,
},
changer: {
schema: CHANGER_NAME_SCHEMA,
optional: true,
},
"changer-drivenum": {
schema: CHANGER_DRIVENUM_SCHEMA,
optional: true,
},
}
)]
#[derive(Serialize, Deserialize, Updater, Clone)]
#[serde(rename_all = "kebab-case")]
/// Lto SCSI tape driver
pub struct LtoTapeDrive {
#[updater(skip)]
pub name: String,
pub path: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub changer: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub changer_drivenum: Option<u64>,
}
#[api(
properties: {
config: {
type: LtoTapeDrive,
},
info: {
type: OptionalDeviceIdentification,
},
},
)]
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "kebab-case")]
/// Drive list entry
pub struct DriveListEntry {
#[serde(flatten)]
pub config: LtoTapeDrive,
#[serde(flatten)]
pub info: OptionalDeviceIdentification,
/// the state of the drive if locked
#[serde(skip_serializing_if = "Option::is_none")]
pub state: Option<String>,
/// Current device activity
#[serde(skip_serializing_if = "Option::is_none")]
pub activity: Option<DeviceActivity>,
}
#[api()]
#[derive(Serialize, Deserialize)]
/// Medium auxiliary memory attributes (MAM)
pub struct MamAttribute {
/// Attribute id
pub id: u16,
/// Attribute name
pub name: String,
/// Attribute value
pub value: String,
}
#[api()]
#[derive(Serialize, Deserialize, Copy, Clone, Debug, PartialOrd, PartialEq)]
/// The density of a tape medium, derived from the LTO version.
pub enum TapeDensity {
/// Unknown (no media loaded)
Unknown,
/// LTO1
LTO1,
/// LTO2
LTO2,
/// LTO3
LTO3,
/// LTO4
LTO4,
/// LTO5
LTO5,
/// LTO6
LTO6,
/// LTO7
LTO7,
/// LTO7M8
LTO7M8,
/// LTO8
LTO8,
/// LTO9
LTO9,
}
impl TryFrom<u8> for TapeDensity {
type Error = Error;
fn try_from(value: u8) -> Result<Self, Self::Error> {
let density = match value {
0x00 => TapeDensity::Unknown,
0x40 => TapeDensity::LTO1,
0x42 => TapeDensity::LTO2,
0x44 => TapeDensity::LTO3,
0x46 => TapeDensity::LTO4,
0x58 => TapeDensity::LTO5,
0x5a => TapeDensity::LTO6,
0x5c => TapeDensity::LTO7,
0x5d => TapeDensity::LTO7M8,
0x5e => TapeDensity::LTO8,
0x60 => TapeDensity::LTO9,
_ => bail!("unknown tape density code 0x{:02x}", value),
};
Ok(density)
}
}
#[api(
properties: {
density: {
type: TapeDensity,
optional: true,
},
},
)]
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "kebab-case")]
/// Drive/Media status for Lto SCSI drives.
///
/// Media related data is optional - only set if there is a medium
/// loaded.
pub struct LtoDriveAndMediaStatus {
/// Vendor
pub vendor: String,
/// Product
pub product: String,
/// Revision
pub revision: String,
/// Block size (0 is variable size)
pub blocksize: u32,
/// Compression enabled
pub compression: bool,
/// Drive buffer mode
pub buffer_mode: u8,
/// Tape density
pub density: TapeDensity,
/// Media is write protected
#[serde(skip_serializing_if = "Option::is_none")]
pub write_protect: Option<bool>,
/// Tape Alert Flags
#[serde(skip_serializing_if = "Option::is_none")]
pub alert_flags: Option<String>,
/// Current file number
#[serde(skip_serializing_if = "Option::is_none")]
pub file_number: Option<u64>,
/// Current block number
#[serde(skip_serializing_if = "Option::is_none")]
pub block_number: Option<u64>,
/// Medium Manufacture Date (epoch)
#[serde(skip_serializing_if = "Option::is_none")]
pub manufactured: Option<i64>,
/// Total Bytes Read in Medium Life
#[serde(skip_serializing_if = "Option::is_none")]
pub bytes_read: Option<u64>,
/// Total Bytes Written in Medium Life
#[serde(skip_serializing_if = "Option::is_none")]
pub bytes_written: Option<u64>,
/// Number of mounts for the current volume (i.e., Thread Count)
#[serde(skip_serializing_if = "Option::is_none")]
pub volume_mounts: Option<u64>,
/// Count of the total number of times the medium has passed over
/// the head.
#[serde(skip_serializing_if = "Option::is_none")]
pub medium_passes: Option<u64>,
/// Estimated tape wearout factor (assuming max. 16000 end-to-end passes)
#[serde(skip_serializing_if = "Option::is_none")]
pub medium_wearout: Option<f64>,
/// Current device activity
#[serde(skip_serializing_if = "Option::is_none")]
pub drive_activity: Option<DeviceActivity>,
}
#[api()]
/// Volume statistics from SCSI log page 17h
#[derive(Default, Serialize, Deserialize)]
#[serde(rename_all = "kebab-case")]
pub struct Lp17VolumeStatistics {
/// Volume mounts (thread count)
pub volume_mounts: u64,
/// Total data sets written
pub volume_datasets_written: u64,
/// Write retries
pub volume_recovered_write_data_errors: u64,
/// Total unrecovered write errors
pub volume_unrecovered_write_data_errors: u64,
/// Total suspended writes
pub volume_write_servo_errors: u64,
/// Total fatal suspended writes
pub volume_unrecovered_write_servo_errors: u64,
/// Total datasets read
pub volume_datasets_read: u64,
/// Total read retries
pub volume_recovered_read_errors: u64,
/// Total unrecovered read errors
pub volume_unrecovered_read_errors: u64,
/// Last mount unrecovered write errors
pub last_mount_unrecovered_write_errors: u64,
/// Last mount unrecovered read errors
pub last_mount_unrecovered_read_errors: u64,
/// Last mount bytes written
pub last_mount_bytes_written: u64,
/// Last mount bytes read
pub last_mount_bytes_read: u64,
/// Lifetime bytes written
pub lifetime_bytes_written: u64,
/// Lifetime bytes read
pub lifetime_bytes_read: u64,
/// Last load write compression ratio
pub last_load_write_compression_ratio: u64,
/// Last load read compression ratio
pub last_load_read_compression_ratio: u64,
/// Medium mount time
pub medium_mount_time: u64,
/// Medium ready time
pub medium_ready_time: u64,
/// Total native capacity
pub total_native_capacity: u64,
/// Total used native capacity
pub total_used_native_capacity: u64,
/// Write protect
pub write_protect: bool,
/// Volume is WORM
pub worm: bool,
/// Beginning of medium passes
pub beginning_of_medium_passes: u64,
/// Middle of medium passes
pub middle_of_tape_passes: u64,
/// Volume serial number
pub serial: String,
}
/// The DT Device Activity from DT Device Status LP page
#[api]
#[derive(Copy, Clone, Serialize, Deserialize)]
#[serde(rename_all = "kebab-case")]
pub enum DeviceActivity {
/// No activity
NoActivity,
/// Cleaning
Cleaning,
/// Loading
Loading,
/// Unloading
Unloading,
/// Other unspecified activity
Other,
/// Reading
Reading,
/// Writing
Writing,
/// Locating
Locating,
/// Rewinding
Rewinding,
/// Erasing
Erasing,
/// Formatting
Formatting,
/// Calibrating
Calibrating,
/// Other (DT)
OtherDT,
/// Updating microcode
MicrocodeUpdate,
/// Reading encrypted data
ReadingEncrypted,
/// Writing encrypted data
WritingEncrypted,
}
impl TryFrom<u8> for DeviceActivity {
type Error = Error;
fn try_from(value: u8) -> Result<Self, Self::Error> {
Ok(match value {
0x00 => DeviceActivity::NoActivity,
0x01 => DeviceActivity::Cleaning,
0x02 => DeviceActivity::Loading,
0x03 => DeviceActivity::Unloading,
0x04 => DeviceActivity::Other,
0x05 => DeviceActivity::Reading,
0x06 => DeviceActivity::Writing,
0x07 => DeviceActivity::Locating,
0x08 => DeviceActivity::Rewinding,
0x09 => DeviceActivity::Erasing,
0x0A => DeviceActivity::Formatting,
0x0B => DeviceActivity::Calibrating,
0x0C => DeviceActivity::OtherDT,
0x0D => DeviceActivity::MicrocodeUpdate,
0x0E => DeviceActivity::ReadingEncrypted,
0x0F => DeviceActivity::WritingEncrypted,
other => bail!("invalid DT device activity value: {:x}", other),
})
}
}

View File

@ -0,0 +1,179 @@
use ::serde::{Deserialize, Serialize};
use proxmox_schema::*;
use proxmox_uuid::Uuid;
use crate::{MediaLocation, MediaStatus, UUID_FORMAT};
pub const MEDIA_SET_UUID_SCHEMA: Schema = StringSchema::new(
"MediaSet Uuid (We use the all-zero Uuid to reseve an empty media for a specific pool).",
)
.format(&UUID_FORMAT)
.schema();
pub const MEDIA_UUID_SCHEMA: Schema = StringSchema::new("Media Uuid.")
.format(&UUID_FORMAT)
.schema();
#[api(
properties: {
"media-set-uuid": {
schema: MEDIA_SET_UUID_SCHEMA,
},
},
)]
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "kebab-case")]
/// Media Set list entry
pub struct MediaSetListEntry {
/// Media set name
pub media_set_name: String,
pub media_set_uuid: Uuid,
/// MediaSet creation time stamp
pub media_set_ctime: i64,
/// Media Pool
pub pool: String,
}
#[api(
properties: {
location: {
type: MediaLocation,
},
status: {
type: MediaStatus,
},
uuid: {
schema: MEDIA_UUID_SCHEMA,
},
"media-set-uuid": {
schema: MEDIA_SET_UUID_SCHEMA,
optional: true,
},
},
)]
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "kebab-case")]
/// Media list entry
pub struct MediaListEntry {
/// Media label text (or Barcode)
pub label_text: String,
pub uuid: Uuid,
/// Creation time stamp
pub ctime: i64,
pub location: MediaLocation,
pub status: MediaStatus,
/// Expired flag
pub expired: bool,
/// Catalog status OK
pub catalog: bool,
/// Media set name
#[serde(skip_serializing_if = "Option::is_none")]
pub media_set_name: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub media_set_uuid: Option<Uuid>,
/// Media set seq_nr
#[serde(skip_serializing_if = "Option::is_none")]
pub seq_nr: Option<u64>,
/// MediaSet creation time stamp
#[serde(skip_serializing_if = "Option::is_none")]
pub media_set_ctime: Option<i64>,
/// Media Pool
#[serde(skip_serializing_if = "Option::is_none")]
pub pool: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
/// Bytes currently used
pub bytes_used: Option<u64>,
}
#[api(
properties: {
uuid: {
schema: MEDIA_UUID_SCHEMA,
},
"media-set-uuid": {
schema: MEDIA_SET_UUID_SCHEMA,
optional: true,
},
},
)]
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "kebab-case")]
/// Media label info
pub struct MediaIdFlat {
/// Unique ID
pub uuid: Uuid,
/// Media label text (or Barcode)
pub label_text: String,
/// Creation time stamp
pub ctime: i64,
// All MediaSet properties are optional here
/// MediaSet Pool
#[serde(skip_serializing_if = "Option::is_none")]
pub pool: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub media_set_uuid: Option<Uuid>,
/// MediaSet media sequence number
#[serde(skip_serializing_if = "Option::is_none")]
pub seq_nr: Option<u64>,
/// MediaSet Creation time stamp
#[serde(skip_serializing_if = "Option::is_none")]
pub media_set_ctime: Option<i64>,
/// Encryption key fingerprint
#[serde(skip_serializing_if = "Option::is_none")]
pub encryption_key_fingerprint: Option<String>,
}
#[api(
properties: {
uuid: {
schema: MEDIA_UUID_SCHEMA,
optional: true,
},
},
)]
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "kebab-case")]
/// Label with optional Uuid
pub struct LabelUuidMap {
/// Changer label text (or Barcode)
pub label_text: String,
/// Associated Uuid (if any)
pub uuid: Option<Uuid>,
}
#[api(
properties: {
uuid: {
schema: MEDIA_UUID_SCHEMA,
},
"media-set-uuid": {
schema: MEDIA_SET_UUID_SCHEMA,
},
},
)]
#[derive(Serialize, Deserialize, Clone, PartialEq)]
#[serde(rename_all = "kebab-case")]
/// Media content list entry
pub struct MediaContentEntry {
/// Media label text (or Barcode)
pub label_text: String,
/// Media Uuid
pub uuid: Uuid,
/// Media set name
pub media_set_name: String,
/// Media set uuid
pub media_set_uuid: Uuid,
/// MediaSet Creation time stamp
pub media_set_ctime: i64,
/// Media set seq_nr
pub seq_nr: u64,
/// Media Pool
pub pool: String,
/// Datastore Name
pub store: String,
/// Backup snapshot
pub snapshot: String,
/// Snapshot creation time (epoch)
pub backup_time: i64,
}

View File

@ -0,0 +1,80 @@
use anyhow::{bail, Error};
use proxmox_schema::{ApiStringFormat, Schema, StringSchema};
use crate::{CHANGER_NAME_SCHEMA, PROXMOX_SAFE_ID_FORMAT};
pub const VAULT_NAME_SCHEMA: Schema = StringSchema::new("Vault name.")
.format(&PROXMOX_SAFE_ID_FORMAT)
.min_length(3)
.max_length(32)
.schema();
#[derive(Debug, PartialEq, Eq, Clone)]
/// Media location
pub enum MediaLocation {
/// Ready for use (inside tape library)
Online(String),
/// Local available, but need to be mounted (insert into tape
/// drive)
Offline,
/// Media is inside a Vault
Vault(String),
}
proxmox_serde::forward_deserialize_to_from_str!(MediaLocation);
proxmox_serde::forward_serialize_to_display!(MediaLocation);
impl proxmox_schema::ApiType for MediaLocation {
const API_SCHEMA: Schema = StringSchema::new(
"Media location (e.g. 'offline', 'online-<changer_name>', 'vault-<vault_name>')",
)
.format(&ApiStringFormat::VerifyFn(|text| {
let location: MediaLocation = text.parse()?;
match location {
MediaLocation::Online(ref changer) => {
CHANGER_NAME_SCHEMA.parse_simple_value(changer)?;
}
MediaLocation::Vault(ref vault) => {
VAULT_NAME_SCHEMA.parse_simple_value(vault)?;
}
MediaLocation::Offline => { /* OK */ }
}
Ok(())
}))
.schema();
}
impl std::fmt::Display for MediaLocation {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
MediaLocation::Offline => {
write!(f, "offline")
}
MediaLocation::Online(changer) => {
write!(f, "online-{}", changer)
}
MediaLocation::Vault(vault) => {
write!(f, "vault-{}", vault)
}
}
}
}
impl std::str::FromStr for MediaLocation {
type Err = Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
if s == "offline" {
return Ok(MediaLocation::Offline);
}
if let Some(changer) = s.strip_prefix("online-") {
return Ok(MediaLocation::Online(changer.to_string()));
}
if let Some(vault) = s.strip_prefix("vault-") {
return Ok(MediaLocation::Vault(vault.to_string()));
}
bail!("MediaLocation parse error");
}
}

View File

@ -0,0 +1,161 @@
//! Types for tape media pool API
//!
//! Note: Both MediaSetPolicy and RetentionPolicy are complex enums,
//! so we cannot use them directly for the API. Instead, we represent
//! them as String.
use std::str::FromStr;
use anyhow::Error;
use serde::{Deserialize, Serialize};
use proxmox_schema::{api, ApiStringFormat, Schema, StringSchema, Updater};
use proxmox_time::{CalendarEvent, TimeSpan};
use crate::{
PROXMOX_SAFE_ID_FORMAT, SINGLE_LINE_COMMENT_FORMAT, SINGLE_LINE_COMMENT_SCHEMA,
TAPE_ENCRYPTION_KEY_FINGERPRINT_SCHEMA,
};
pub const MEDIA_POOL_NAME_SCHEMA: Schema = StringSchema::new("Media pool name.")
.format(&PROXMOX_SAFE_ID_FORMAT)
.min_length(2)
.max_length(32)
.schema();
pub const MEDIA_SET_NAMING_TEMPLATE_SCHEMA: Schema = StringSchema::new(
"Media set naming template (may contain strftime() time format specifications).",
)
.format(&SINGLE_LINE_COMMENT_FORMAT)
.min_length(2)
.max_length(64)
.schema();
pub const MEDIA_SET_ALLOCATION_POLICY_FORMAT: ApiStringFormat = ApiStringFormat::VerifyFn(|s| {
MediaSetPolicy::from_str(s)?;
Ok(())
});
pub const MEDIA_SET_ALLOCATION_POLICY_SCHEMA: Schema =
StringSchema::new("Media set allocation policy ('continue', 'always', or a calendar event).")
.format(&MEDIA_SET_ALLOCATION_POLICY_FORMAT)
.schema();
/// Media set allocation policy
pub enum MediaSetPolicy {
/// Try to use the current media set
ContinueCurrent,
/// Each backup job creates a new media set
AlwaysCreate,
/// Create a new set when the specified CalendarEvent triggers
CreateAt(CalendarEvent),
}
impl std::str::FromStr for MediaSetPolicy {
type Err = Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
if s == "continue" {
return Ok(MediaSetPolicy::ContinueCurrent);
}
if s == "always" {
return Ok(MediaSetPolicy::AlwaysCreate);
}
let event = s.parse()?;
Ok(MediaSetPolicy::CreateAt(event))
}
}
pub const MEDIA_RETENTION_POLICY_FORMAT: ApiStringFormat = ApiStringFormat::VerifyFn(|s| {
RetentionPolicy::from_str(s)?;
Ok(())
});
pub const MEDIA_RETENTION_POLICY_SCHEMA: Schema =
StringSchema::new("Media retention policy ('overwrite', 'keep', or time span).")
.format(&MEDIA_RETENTION_POLICY_FORMAT)
.schema();
/// Media retention Policy
pub enum RetentionPolicy {
/// Always overwrite media
OverwriteAlways,
/// Protect data for the timespan specified
ProtectFor(TimeSpan),
/// Never overwrite data
KeepForever,
}
impl std::str::FromStr for RetentionPolicy {
type Err = Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
if s == "overwrite" {
return Ok(RetentionPolicy::OverwriteAlways);
}
if s == "keep" {
return Ok(RetentionPolicy::KeepForever);
}
let time_span = s.parse()?;
Ok(RetentionPolicy::ProtectFor(time_span))
}
}
#[api(
properties: {
name: {
schema: MEDIA_POOL_NAME_SCHEMA,
},
allocation: {
schema: MEDIA_SET_ALLOCATION_POLICY_SCHEMA,
optional: true,
},
retention: {
schema: MEDIA_RETENTION_POLICY_SCHEMA,
optional: true,
},
template: {
schema: MEDIA_SET_NAMING_TEMPLATE_SCHEMA,
optional: true,
},
encrypt: {
schema: TAPE_ENCRYPTION_KEY_FINGERPRINT_SCHEMA,
optional: true,
},
comment: {
optional: true,
schema: SINGLE_LINE_COMMENT_SCHEMA,
},
},
)]
#[derive(Serialize, Deserialize, Updater)]
/// Media pool configuration
pub struct MediaPoolConfig {
/// The pool name
#[updater(skip)]
pub name: String,
/// Media Set allocation policy
#[serde(skip_serializing_if = "Option::is_none")]
pub allocation: Option<String>,
/// Media retention policy
#[serde(skip_serializing_if = "Option::is_none")]
pub retention: Option<String>,
/// Media set naming template (default "%c")
///
/// The template is UTF8 text, and can include strftime time
/// format specifications.
#[serde(skip_serializing_if = "Option::is_none")]
pub template: Option<String>,
/// Encryption key fingerprint
///
/// If set, encrypt all data using the specified key.
#[serde(skip_serializing_if = "Option::is_none")]
pub encrypt: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub comment: Option<String>,
}

View File

@ -0,0 +1,21 @@
use serde::{Deserialize, Serialize};
use proxmox_schema::api;
#[api()]
/// Media status
#[derive(Debug, PartialEq, Eq, Copy, Clone, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
/// Media Status
pub enum MediaStatus {
/// Media is ready to be written
Writable,
/// Media is full (contains data)
Full,
/// Media is marked as unknown, needs rescan
Unknown,
/// Media is marked as damaged
Damaged,
/// Media is marked as retired
Retired,
}

View File

@ -0,0 +1,92 @@
//! Types for tape backup API
mod device;
pub use device::*;
mod changer;
pub use changer::*;
mod drive;
pub use drive::*;
mod media_pool;
pub use media_pool::*;
mod media_status;
pub use media_status::*;
mod media_location;
pub use media_location::*;
mod media;
pub use media::*;
use const_format::concatcp;
use serde::{Deserialize, Serialize};
use proxmox_schema::{api, const_regex, ApiStringFormat, Schema, StringSchema};
use proxmox_uuid::Uuid;
use crate::{
BackupType, BACKUP_ID_SCHEMA, BACKUP_NS_PATH_RE, FINGERPRINT_SHA256_FORMAT,
PROXMOX_SAFE_ID_REGEX_STR, SNAPSHOT_PATH_REGEX_STR,
};
const_regex! {
pub TAPE_RESTORE_SNAPSHOT_REGEX = concatcp!(r"^", PROXMOX_SAFE_ID_REGEX_STR, r":(?:", BACKUP_NS_PATH_RE,")?", SNAPSHOT_PATH_REGEX_STR, r"$");
}
pub const TAPE_RESTORE_SNAPSHOT_FORMAT: ApiStringFormat =
ApiStringFormat::Pattern(&TAPE_RESTORE_SNAPSHOT_REGEX);
pub const TAPE_ENCRYPTION_KEY_FINGERPRINT_SCHEMA: Schema =
StringSchema::new("Tape encryption key fingerprint (sha256).")
.format(&FINGERPRINT_SHA256_FORMAT)
.schema();
pub const TAPE_RESTORE_SNAPSHOT_SCHEMA: Schema =
StringSchema::new("A snapshot in the format: 'store:[ns/namespace/...]type/id/time")
.format(&TAPE_RESTORE_SNAPSHOT_FORMAT)
.type_text("store:[ns/namespace/...]type/id/time")
.schema();
#[api(
properties: {
pool: {
schema: MEDIA_POOL_NAME_SCHEMA,
optional: true,
},
"label-text": {
schema: MEDIA_LABEL_SCHEMA,
optional: true,
},
"media": {
schema: MEDIA_UUID_SCHEMA,
optional: true,
},
"media-set": {
schema: MEDIA_SET_UUID_SCHEMA,
optional: true,
},
"backup-type": {
type: BackupType,
optional: true,
},
"backup-id": {
schema: BACKUP_ID_SCHEMA,
optional: true,
},
},
)]
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "kebab-case")]
/// Content list filter parameters
pub struct MediaContentListFilter {
pub pool: Option<String>,
pub label_text: Option<String>,
pub media: Option<Uuid>,
pub media_set: Option<Uuid>,
pub backup_type: Option<BackupType>,
pub backup_id: Option<String>,
}

View File

@ -0,0 +1,170 @@
use serde::{Deserialize, Serialize};
use proxmox_human_byte::HumanByte;
use proxmox_schema::{api, ApiType, Schema, StringSchema, Updater};
use crate::{
CIDR_SCHEMA, DAILY_DURATION_FORMAT, PROXMOX_SAFE_ID_FORMAT, SINGLE_LINE_COMMENT_SCHEMA,
};
pub const TRAFFIC_CONTROL_TIMEFRAME_SCHEMA: Schema =
StringSchema::new("Timeframe to specify when the rule is active.")
.format(&DAILY_DURATION_FORMAT)
.schema();
pub const TRAFFIC_CONTROL_ID_SCHEMA: Schema = StringSchema::new("Rule ID.")
.format(&PROXMOX_SAFE_ID_FORMAT)
.min_length(3)
.max_length(32)
.schema();
#[api(
properties: {
"rate-in": {
type: HumanByte,
optional: true,
},
"burst-in": {
type: HumanByte,
optional: true,
},
"rate-out": {
type: HumanByte,
optional: true,
},
"burst-out": {
type: HumanByte,
optional: true,
},
},
)]
#[derive(Serialize, Deserialize, Default, Clone, Updater, PartialEq)]
#[serde(rename_all = "kebab-case")]
/// Rate Limit Configuration
pub struct RateLimitConfig {
#[serde(skip_serializing_if = "Option::is_none")]
pub rate_in: Option<HumanByte>,
#[serde(skip_serializing_if = "Option::is_none")]
pub burst_in: Option<HumanByte>,
#[serde(skip_serializing_if = "Option::is_none")]
pub rate_out: Option<HumanByte>,
#[serde(skip_serializing_if = "Option::is_none")]
pub burst_out: Option<HumanByte>,
}
impl RateLimitConfig {
pub fn with_same_inout(rate: Option<HumanByte>, burst: Option<HumanByte>) -> Self {
Self {
rate_in: rate,
burst_in: burst,
rate_out: rate,
burst_out: burst,
}
}
/// Create a [RateLimitConfig] from a [ClientRateLimitConfig]
pub fn from_client_config(limit: ClientRateLimitConfig) -> Self {
Self::with_same_inout(limit.rate, limit.burst)
}
}
const CLIENT_RATE_LIMIT_SCHEMA: Schema = StringSchema {
description: "Rate limit (for Token bucket filter) in bytes/s with optional unit (B, KB (base 10), MB, GB, ..., KiB (base 2), MiB, Gib, ...).",
..*HumanByte::API_SCHEMA.unwrap_string_schema()
}
.schema();
const CLIENT_BURST_SCHEMA: Schema = StringSchema {
description: "Size of the token bucket (for Token bucket filter) in bytes with optional unit (B, KB (base 10), MB, GB, ..., KiB (base 2), MiB, Gib, ...).",
..*HumanByte::API_SCHEMA.unwrap_string_schema()
}
.schema();
#[api(
properties: {
rate: {
schema: CLIENT_RATE_LIMIT_SCHEMA,
optional: true,
},
burst: {
schema: CLIENT_BURST_SCHEMA,
optional: true,
},
},
)]
#[derive(Serialize, Deserialize, Default, Clone)]
#[serde(rename_all = "kebab-case")]
/// Client Rate Limit Configuration
pub struct ClientRateLimitConfig {
#[serde(skip_serializing_if = "Option::is_none")]
rate: Option<HumanByte>,
#[serde(skip_serializing_if = "Option::is_none")]
burst: Option<HumanByte>,
}
#[api(
properties: {
name: {
schema: TRAFFIC_CONTROL_ID_SCHEMA,
},
comment: {
optional: true,
schema: SINGLE_LINE_COMMENT_SCHEMA,
},
limit: {
type: RateLimitConfig,
},
network: {
type: Array,
items: {
schema: CIDR_SCHEMA,
},
},
timeframe: {
type: Array,
items: {
schema: TRAFFIC_CONTROL_TIMEFRAME_SCHEMA,
},
optional: true,
},
},
)]
#[derive(Clone, Serialize, Deserialize, PartialEq, Updater)]
#[serde(rename_all = "kebab-case")]
/// Traffic control rule
pub struct TrafficControlRule {
#[updater(skip)]
pub name: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub comment: Option<String>,
/// Rule applies to Source IPs within this networks
pub network: Vec<String>,
#[serde(flatten)]
pub limit: RateLimitConfig,
// fixme: expose this?
// /// Bandwidth is shared across all connections
// #[serde(skip_serializing_if="Option::is_none")]
// pub shared: Option<bool>,
/// Enable the rule at specific times
#[serde(skip_serializing_if = "Option::is_none")]
pub timeframe: Option<Vec<String>>,
}
#[api(
properties: {
config: {
type: TrafficControlRule,
},
},
)]
#[derive(Clone, Serialize, Deserialize, PartialEq)]
#[serde(rename_all = "kebab-case")]
/// Traffic control rule config with current rates
pub struct TrafficControlCurrentRate {
#[serde(flatten)]
pub config: TrafficControlRule,
/// Current ingress rate in bytes/second
pub cur_rate_in: u64,
/// Current egress rate in bytes/second
pub cur_rate_out: u64,
}

226
pbs-api-types/src/user.rs Normal file
View File

@ -0,0 +1,226 @@
use serde::{Deserialize, Serialize};
use proxmox_schema::{api, BooleanSchema, IntegerSchema, Schema, StringSchema, Updater};
use super::userid::{Authid, Userid, PROXMOX_TOKEN_ID_SCHEMA};
use super::{SINGLE_LINE_COMMENT_FORMAT, SINGLE_LINE_COMMENT_SCHEMA};
pub const ENABLE_USER_SCHEMA: Schema = BooleanSchema::new(
"Enable the account (default). You can set this to '0' to disable the account.",
)
.default(true)
.schema();
pub const EXPIRE_USER_SCHEMA: Schema = IntegerSchema::new(
"Account expiration date (seconds since epoch). '0' means no expiration date.",
)
.default(0)
.minimum(0)
.schema();
pub const FIRST_NAME_SCHEMA: Schema = StringSchema::new("First name.")
.format(&SINGLE_LINE_COMMENT_FORMAT)
.min_length(2)
.max_length(64)
.schema();
pub const LAST_NAME_SCHEMA: Schema = StringSchema::new("Last name.")
.format(&SINGLE_LINE_COMMENT_FORMAT)
.min_length(2)
.max_length(64)
.schema();
pub const EMAIL_SCHEMA: Schema = StringSchema::new("E-Mail Address.")
.format(&SINGLE_LINE_COMMENT_FORMAT)
.min_length(2)
.max_length(64)
.schema();
#[api(
properties: {
userid: {
type: Userid,
},
comment: {
optional: true,
schema: SINGLE_LINE_COMMENT_SCHEMA,
},
enable: {
optional: true,
schema: ENABLE_USER_SCHEMA,
},
expire: {
optional: true,
schema: EXPIRE_USER_SCHEMA,
},
firstname: {
optional: true,
schema: FIRST_NAME_SCHEMA,
},
lastname: {
schema: LAST_NAME_SCHEMA,
optional: true,
},
email: {
schema: EMAIL_SCHEMA,
optional: true,
},
tokens: {
type: Array,
optional: true,
description: "List of user's API tokens.",
items: {
type: ApiToken
},
},
"totp-locked": {
type: bool,
optional: true,
default: false,
description: "True if the user is currently locked out of TOTP factors",
},
"tfa-locked-until": {
optional: true,
description: "Contains a timestamp until when a user is locked out of 2nd factors",
},
}
)]
#[derive(Serialize, Deserialize, Clone, PartialEq)]
#[serde(rename_all = "kebab-case")]
/// User properties with added list of ApiTokens
pub struct UserWithTokens {
pub userid: Userid,
#[serde(skip_serializing_if = "Option::is_none")]
pub comment: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub enable: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")]
pub expire: Option<i64>,
#[serde(skip_serializing_if = "Option::is_none")]
pub firstname: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub lastname: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub email: Option<String>,
#[serde(skip_serializing_if = "Vec::is_empty", default)]
pub tokens: Vec<ApiToken>,
#[serde(skip_serializing_if = "bool_is_false", default)]
pub totp_locked: bool,
#[serde(skip_serializing_if = "Option::is_none")]
pub tfa_locked_until: Option<i64>,
}
fn bool_is_false(b: &bool) -> bool {
!b
}
#[api(
properties: {
tokenid: {
schema: PROXMOX_TOKEN_ID_SCHEMA,
},
comment: {
optional: true,
schema: SINGLE_LINE_COMMENT_SCHEMA,
},
enable: {
optional: true,
schema: ENABLE_USER_SCHEMA,
},
expire: {
optional: true,
schema: EXPIRE_USER_SCHEMA,
},
}
)]
#[derive(Serialize, Deserialize, Clone, PartialEq)]
/// ApiToken properties.
pub struct ApiToken {
pub tokenid: Authid,
#[serde(skip_serializing_if = "Option::is_none")]
pub comment: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub enable: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")]
pub expire: Option<i64>,
}
impl ApiToken {
pub fn is_active(&self) -> bool {
if !self.enable.unwrap_or(true) {
return false;
}
if let Some(expire) = self.expire {
let now = proxmox_time::epoch_i64();
if expire > 0 && expire <= now {
return false;
}
}
true
}
}
#[api(
properties: {
userid: {
type: Userid,
},
comment: {
optional: true,
schema: SINGLE_LINE_COMMENT_SCHEMA,
},
enable: {
optional: true,
schema: ENABLE_USER_SCHEMA,
},
expire: {
optional: true,
schema: EXPIRE_USER_SCHEMA,
},
firstname: {
optional: true,
schema: FIRST_NAME_SCHEMA,
},
lastname: {
schema: LAST_NAME_SCHEMA,
optional: true,
},
email: {
schema: EMAIL_SCHEMA,
optional: true,
},
}
)]
#[derive(Serialize, Deserialize, Updater, PartialEq, Eq)]
/// User properties.
pub struct User {
#[updater(skip)]
pub userid: Userid,
#[serde(skip_serializing_if = "Option::is_none")]
pub comment: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub enable: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")]
pub expire: Option<i64>,
#[serde(skip_serializing_if = "Option::is_none")]
pub firstname: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub lastname: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub email: Option<String>,
}
impl User {
pub fn is_active(&self) -> bool {
if !self.enable.unwrap_or(true) {
return false;
}
if let Some(expire) = self.expire {
let now = proxmox_time::epoch_i64();
if expire > 0 && expire <= now {
return false;
}
}
true
}
}

View File

@ -0,0 +1,190 @@
//! Defines the types for the api version info endpoint
use std::cmp::Ordering;
use std::convert::TryFrom;
use anyhow::{format_err, Context};
use proxmox_schema::api;
#[api(
description: "Api version information",
properties: {
"version": {
description: "Version 'major.minor'",
type: String,
},
"release": {
description: "Version release",
type: String,
},
"repoid": {
description: "Version repository id",
type: String,
},
}
)]
#[derive(serde::Deserialize, serde::Serialize)]
pub struct ApiVersionInfo {
pub version: String,
pub release: String,
pub repoid: String,
}
pub type ApiVersionMajor = u64;
pub type ApiVersionMinor = u64;
pub type ApiVersionRelease = u64;
#[derive(PartialEq, Eq)]
pub struct ApiVersion {
pub major: ApiVersionMajor,
pub minor: ApiVersionMinor,
pub release: ApiVersionRelease,
}
impl TryFrom<ApiVersionInfo> for ApiVersion {
type Error = anyhow::Error;
fn try_from(value: ApiVersionInfo) -> Result<Self, Self::Error> {
let (major, minor) = value
.version
.split_once('.')
.ok_or_else(|| format_err!("malformed API version {}", value.version))?;
let major: ApiVersionMajor = major
.parse()
.with_context(|| "failed to parse major version")?;
let minor: ApiVersionMinor = minor
.parse()
.with_context(|| "failed to parse minor version")?;
let release: ApiVersionRelease = value
.release
.parse()
.with_context(|| "failed to parse release version")?;
Ok(Self {
major,
minor,
release,
})
}
}
impl PartialOrd for ApiVersion {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
let ordering = match (
self.major.cmp(&other.major),
self.minor.cmp(&other.minor),
self.release.cmp(&other.release),
) {
(Ordering::Equal, Ordering::Equal, ordering) => ordering,
(Ordering::Equal, ordering, _) => ordering,
(ordering, _, _) => ordering,
};
Some(ordering)
}
}
impl ApiVersion {
pub fn new(major: ApiVersionMajor, minor: ApiVersionMinor, release: ApiVersionRelease) -> Self {
Self {
major,
minor,
release,
}
}
}
#[test]
fn same_level_version_comarison() {
let major_base = ApiVersion::new(2, 0, 0);
let major_less = ApiVersion::new(1, 0, 0);
let major_greater = ApiVersion::new(3, 0, 0);
let minor_base = ApiVersion::new(2, 2, 0);
let minor_less = ApiVersion::new(2, 1, 0);
let minor_greater = ApiVersion::new(2, 3, 0);
let release_base = ApiVersion::new(2, 2, 2);
let release_less = ApiVersion::new(2, 2, 1);
let release_greater = ApiVersion::new(2, 2, 3);
assert!(major_base == major_base);
assert!(minor_base == minor_base);
assert!(release_base == release_base);
assert!(major_base > major_less);
assert!(major_base >= major_less);
assert!(major_base != major_less);
assert!(major_base < major_greater);
assert!(major_base <= major_greater);
assert!(major_base != major_greater);
assert!(minor_base > minor_less);
assert!(minor_base >= minor_less);
assert!(minor_base != minor_less);
assert!(minor_base < minor_greater);
assert!(minor_base <= minor_greater);
assert!(minor_base != minor_greater);
assert!(release_base > release_less);
assert!(release_base >= release_less);
assert!(release_base != release_less);
assert!(release_base < release_greater);
assert!(release_base <= release_greater);
assert!(release_base != release_greater);
}
#[test]
fn mixed_level_version_comarison() {
let major_base = ApiVersion::new(2, 0, 0);
let major_less = ApiVersion::new(1, 0, 0);
let major_greater = ApiVersion::new(3, 0, 0);
let minor_base = ApiVersion::new(2, 2, 0);
let minor_less = ApiVersion::new(2, 1, 0);
let minor_greater = ApiVersion::new(2, 3, 0);
let release_base = ApiVersion::new(2, 2, 2);
let release_less = ApiVersion::new(2, 2, 1);
let release_greater = ApiVersion::new(2, 2, 3);
assert!(major_base < minor_base);
assert!(major_base < minor_less);
assert!(major_base < minor_greater);
assert!(major_base < release_base);
assert!(major_base < release_less);
assert!(major_base < release_greater);
assert!(major_less < minor_base);
assert!(major_less < minor_less);
assert!(major_less < minor_greater);
assert!(major_less < release_base);
assert!(major_less < release_less);
assert!(major_less < release_greater);
assert!(major_greater > minor_base);
assert!(major_greater > minor_less);
assert!(major_greater > minor_greater);
assert!(major_greater > release_base);
assert!(major_greater > release_less);
assert!(major_greater > release_greater);
assert!(minor_base < release_base);
assert!(minor_base < release_less);
assert!(minor_base < release_greater);
assert!(minor_greater > release_base);
assert!(minor_greater > release_less);
assert!(minor_greater > release_greater);
assert!(minor_less < release_base);
assert!(minor_less < release_less);
assert!(minor_less < release_greater);
}

78
pbs-api-types/src/zfs.rs Normal file
View File

@ -0,0 +1,78 @@
use serde::{Deserialize, Serialize};
use proxmox_schema::*;
const_regex! {
pub ZPOOL_NAME_REGEX = r"^[a-zA-Z][a-z0-9A-Z\-_.:]+$";
}
pub const ZFS_ASHIFT_SCHEMA: Schema = IntegerSchema::new("Pool sector size exponent.")
.minimum(9)
.maximum(16)
.default(12)
.schema();
pub const ZPOOL_NAME_SCHEMA: Schema = StringSchema::new("ZFS Pool Name")
.format(&ApiStringFormat::Pattern(&ZPOOL_NAME_REGEX))
.schema();
#[api(default: "On")]
#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
/// The ZFS compression algorithm to use.
pub enum ZfsCompressionType {
/// Gnu Zip
Gzip,
/// LZ4
Lz4,
/// LZJB
Lzjb,
/// ZLE
Zle,
/// ZStd
ZStd,
/// Enable compression using the default algorithm.
On,
/// Disable compression.
Off,
}
#[api()]
#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
/// The ZFS RAID level to use.
pub enum ZfsRaidLevel {
/// Single Disk
Single,
/// Mirror
Mirror,
/// Raid10
Raid10,
/// RaidZ
RaidZ,
/// RaidZ2
RaidZ2,
/// RaidZ3
RaidZ3,
}
#[api()]
#[derive(Debug, Serialize, Deserialize)]
#[serde(rename_all = "kebab-case")]
/// zpool list item
pub struct ZpoolListItem {
/// zpool name
pub name: String,
/// Health
pub health: String,
/// Total size
pub size: u64,
/// Used size
pub alloc: u64,
/// Free space
pub free: u64,
/// ZFS fragnentation level
pub frag: u64,
/// ZFS deduplication ratio
pub dedup: f64,
}

View File

@ -0,0 +1,76 @@
use pbs_api_types::{BackupGroup, BackupType, GroupFilter};
use std::str::FromStr;
#[test]
fn test_no_filters() {
let group_filters = vec![];
let do_backup = [
"vm/101", "vm/102", "vm/103", "vm/104", "vm/105", "vm/106", "vm/107", "vm/108", "vm/109",
];
for id in do_backup {
assert!(BackupGroup::new(BackupType::Vm, id).apply_filters(&group_filters));
}
}
#[test]
fn test_include_filters() {
let group_filters = vec![GroupFilter::from_str("regex:.*10[2-8]").unwrap()];
let do_backup = [
"vm/102", "vm/103", "vm/104", "vm/105", "vm/106", "vm/107", "vm/108",
];
let dont_backup = ["vm/101", "vm/109"];
for id in do_backup {
assert!(BackupGroup::new(BackupType::Vm, id).apply_filters(&group_filters));
}
for id in dont_backup {
assert!(!BackupGroup::new(BackupType::Vm, id).apply_filters(&group_filters));
}
}
#[test]
fn test_exclude_filters() {
let group_filters = [
GroupFilter::from_str("exclude:regex:.*10[1-3]").unwrap(),
GroupFilter::from_str("exclude:regex:.*10[5-7]").unwrap(),
];
let do_backup = ["vm/104", "vm/108", "vm/109"];
let dont_backup = ["vm/101", "vm/102", "vm/103", "vm/105", "vm/106", "vm/107"];
for id in do_backup {
assert!(BackupGroup::new(BackupType::Vm, id).apply_filters(&group_filters));
}
for id in dont_backup {
assert!(!BackupGroup::new(BackupType::Vm, id).apply_filters(&group_filters));
}
}
#[test]
fn test_include_and_exclude_filters() {
let group_filters = [
GroupFilter::from_str("exclude:regex:.*10[1-3]").unwrap(),
GroupFilter::from_str("regex:.*10[2-8]").unwrap(),
GroupFilter::from_str("exclude:regex:.*10[5-7]").unwrap(),
];
let do_backup = ["vm/104", "vm/108"];
let dont_backup = [
"vm/101", "vm/102", "vm/103", "vm/105", "vm/106", "vm/107", "vm/109",
];
for id in do_backup {
assert!(BackupGroup::new(BackupType::Vm, id).apply_filters(&group_filters));
}
for id in dont_backup {
assert!(!BackupGroup::new(BackupType::Vm, id).apply_filters(&group_filters));
}
}

View File

@ -12,8 +12,10 @@ bytes.workspace = true
futures.workspace = true
h2.workspace = true
hex.workspace = true
http.workspace = true
hyper.workspace = true
libc.workspace = true
log.workspace = true
nix.workspace = true
openssl.workspace = true
percent-encoding.workspace = true
@ -27,7 +29,6 @@ tokio = { workspace = true, features = [ "fs", "signal" ] }
tokio-stream.workspace = true
tower-service.workspace = true
xdg.workspace = true
hickory-resolver.workspace = true
pathpatterns.workspace = true
@ -37,7 +38,6 @@ proxmox-compression.workspace = true
proxmox-http = { workspace = true, features = [ "rate-limiter" ] }
proxmox-human-byte.workspace = true
proxmox-io = { workspace = true, features = [ "tokio" ] }
proxmox-log = { workspace = true }
proxmox-router = { workspace = true, features = [ "cli", "server" ] }
proxmox-schema.workspace = true
proxmox-sys.workspace = true

View File

@ -7,11 +7,8 @@ const_regex! {
BACKUPSPEC_REGEX = r"^([a-zA-Z0-9_-]+\.(pxar|img|conf|log)):(.+)$";
}
pub const BACKUP_SOURCE_SCHEMA: Schema = StringSchema::new(
"Backup source specification ([<archive-name>.<type>:<source-path>]), the \
'archive-name' must contain alphanumerics, hyphens and underscores only. \
The 'type' must be either 'pxar', 'img', 'conf' or 'log'.",
)
pub const BACKUP_SOURCE_SCHEMA: Schema =
StringSchema::new("Backup source specification ([<label>:<path>]).")
.format(&ApiStringFormat::Pattern(&BACKUPSPEC_REGEX))
.schema();
@ -38,7 +35,7 @@ pub fn parse_backup_specification(value: &str) -> Result<BackupSpecification, Er
"img" => BackupSpecificationType::IMAGE,
"conf" => BackupSpecificationType::CONFIG,
"log" => BackupSpecificationType::LOGFILE,
_ => bail!("unknown backup source type '{extension}'"),
_ => bail!("unknown backup source type '{}'", extension),
};
return Ok(BackupSpecification {
archive_name,
@ -47,7 +44,7 @@ pub fn parse_backup_specification(value: &str) -> Result<BackupSpecification, Er
});
}
bail!("unable to parse backup source specification '{value}'");
bail!("unable to parse backup source specification '{}'", value);
}
#[api]

View File

@ -25,7 +25,6 @@ use pbs_datastore::PROXMOX_BACKUP_PROTOCOL_ID_V1;
use pbs_tools::crypt_config::CryptConfig;
use proxmox_human_byte::HumanByte;
use proxmox_log::{debug, enabled, info, trace, warn, Level};
use proxmox_time::TimeSpan;
use super::backup_stats::{BackupStats, UploadCounters, UploadStats};
@ -56,7 +55,7 @@ pub struct UploadOptions {
}
struct ChunkUploadResponse {
future: h2::legacy::client::ResponseFuture,
future: h2::client::ResponseFuture,
size: usize,
}
@ -143,7 +142,7 @@ impl BackupWriter {
param: Option<Value>,
content_type: &str,
data: Vec<u8>,
) -> Result<h2::legacy::client::ResponseFuture, Error> {
) -> Result<h2::client::ResponseFuture, Error> {
let request =
H2Client::request_builder("localhost", method, path, param, Some(content_type))
.unwrap();
@ -392,7 +391,7 @@ impl BackupWriter {
.iter()
.any(|file| file.filename == archive_name.as_ref())
{
info!("Previous manifest does not contain an archive called '{archive_name}', skipping download..");
log::info!("Previous manifest does not contain an archive called '{archive_name}', skipping download..");
} else {
// try, but ignore errors
match archive_name.archive_type() {
@ -405,7 +404,7 @@ impl BackupWriter {
)
.await
{
warn!("Error downloading .fidx from previous manifest: {}", err);
log::warn!("Error downloading .fidx from previous manifest: {}", err);
}
}
ArchiveType::DynamicIndex => {
@ -417,7 +416,7 @@ impl BackupWriter {
)
.await
{
warn!("Error downloading .didx from previous manifest: {}", err);
log::warn!("Error downloading .didx from previous manifest: {}", err);
}
}
_ => { /* do nothing */ }
@ -451,14 +450,14 @@ impl BackupWriter {
let size_dirty = upload_stats.size - upload_stats.size_reused;
let size: HumanByte = upload_stats.size.into();
let archive = if enabled!(Level::DEBUG) {
let archive = if log::log_enabled!(log::Level::Debug) {
archive_name.to_string()
} else {
archive_name.without_type_extension()
};
if upload_stats.chunk_injected > 0 {
info!(
log::info!(
"{archive}: reused {} from previous snapshot for unchanged files ({} chunks)",
HumanByte::from(upload_stats.size_injected),
upload_stats.chunk_injected,
@ -470,33 +469,37 @@ impl BackupWriter {
((size_dirty * 1_000_000) / (upload_stats.duration.as_micros() as usize)).into();
let size_dirty: HumanByte = size_dirty.into();
let size_compressed: HumanByte = upload_stats.size_compressed.into();
info!(
log::info!(
"{archive}: had to backup {size_dirty} of {size} (compressed {size_compressed}) in {:.2} s (average {speed}/s)",
upload_stats.duration.as_secs_f64()
);
} else {
info!("Uploaded backup catalog ({})", size);
log::info!("Uploaded backup catalog ({})", size);
}
if upload_stats.size_reused > 0 && upload_stats.size > 1024 * 1024 {
let reused_percent = upload_stats.size_reused as f64 * 100. / upload_stats.size as f64;
let reused: HumanByte = upload_stats.size_reused.into();
info!(
log::info!(
"{}: backup was done incrementally, reused {} ({:.1}%)",
archive, reused, reused_percent
archive,
reused,
reused_percent
);
}
if enabled!(Level::DEBUG) && upload_stats.chunk_count > 0 {
debug!(
if log::log_enabled!(log::Level::Debug) && upload_stats.chunk_count > 0 {
log::debug!(
"{}: Reused {} from {} chunks.",
archive, upload_stats.chunk_reused, upload_stats.chunk_count
archive,
upload_stats.chunk_reused,
upload_stats.chunk_count
);
debug!(
log::debug!(
"{}: Average chunk size was {}.",
archive,
HumanByte::from(upload_stats.size / upload_stats.chunk_count)
);
debug!(
log::debug!(
"{}: Average time per request: {} microseconds.",
archive,
(upload_stats.duration.as_micros()) / (upload_stats.chunk_count as u128)
@ -514,7 +517,7 @@ impl BackupWriter {
}
fn response_queue() -> (
mpsc::Sender<h2::legacy::client::ResponseFuture>,
mpsc::Sender<h2::client::ResponseFuture>,
oneshot::Receiver<Result<(), Error>>,
) {
let (verify_queue_tx, verify_queue_rx) = mpsc::channel(100);
@ -537,11 +540,11 @@ impl BackupWriter {
tokio::spawn(
ReceiverStream::new(verify_queue_rx)
.map(Ok::<_, Error>)
.try_for_each(move |response: h2::legacy::client::ResponseFuture| {
.try_for_each(move |response: h2::client::ResponseFuture| {
response
.map_err(Error::from)
.and_then(H2Client::h2api_response)
.map_ok(move |result| debug!("RESPONSE: {:?}", result))
.map_ok(move |result| log::debug!("RESPONSE: {:?}", result))
.map_err(|err| format_err!("pipelined request failed: {}", err))
})
.map(|result| {
@ -599,7 +602,7 @@ impl BackupWriter {
digest_list.push(hex::encode(digest));
offset_list.push(offset);
}
debug!("append chunks list len ({})", digest_list.len());
log::debug!("append chunks list len ({})", digest_list.len());
let param = json!({ "wid": wid, "digest-list": digest_list, "offset-list": offset_list });
let request = H2Client::request_builder("localhost", "PUT", &path, None, Some("application/json")).unwrap();
let param_data = bytes::Bytes::from(param.to_string().into_bytes());
@ -651,7 +654,7 @@ impl BackupWriter {
known_chunks.insert(*index.index_digest(i).unwrap());
}
debug!(
log::debug!(
"{}: known chunks list length is {}",
archive_name,
index.index_count()
@ -685,7 +688,7 @@ impl BackupWriter {
known_chunks.insert(*index.index_digest(i).unwrap());
}
debug!(
log::debug!(
"{}: known chunks list length is {}",
archive_name,
index.index_count()
@ -857,7 +860,7 @@ impl BackupWriter {
let size_uploaded = HumanByte::from(uploaded_len.load(Ordering::SeqCst));
let elapsed = TimeSpan::from(start_time.elapsed());
info!("processed {size} in {elapsed}, uploaded {size_uploaded}");
log::info!("processed {size} in {elapsed}, uploaded {size_uploaded}");
}
}))
} else {
@ -873,7 +876,7 @@ impl BackupWriter {
let digest = chunk_info.digest;
let digest_str = hex::encode(digest);
trace!(
log::trace!(
"upload new chunk {} ({} bytes, offset {})",
digest_str,
chunk_info.chunk_len,
@ -964,7 +967,7 @@ impl BackupWriter {
break;
}
debug!("send test data ({} bytes)", data.len());
log::debug!("send test data ({} bytes)", data.len());
let request =
H2Client::request_builder("localhost", "POST", "speedtest", None, None).unwrap();
let request_future = self
@ -979,13 +982,13 @@ impl BackupWriter {
let _ = upload_result.await?;
info!(
log::info!(
"Uploaded {} chunks in {} seconds.",
repeat,
start_time.elapsed().as_secs()
);
let speed = ((item_len * (repeat as usize)) as f64) / start_time.elapsed().as_secs_f64();
info!(
log::info!(
"Time per request: {} microseconds.",
(start_time.elapsed().as_micros()) / (repeat as u128)
);

View File

@ -23,7 +23,6 @@ use pxar::{EntryKind, Metadata};
use pbs_datastore::catalog::{self, DirEntryAttribute};
use proxmox_async::runtime::{block_in_place, block_on};
use proxmox_log::error;
use crate::pxar::Flags;
@ -107,7 +106,7 @@ fn complete_path(complete_me: &str, _map: &HashMap<String, String>) -> Vec<Strin
match shell.complete_path(complete_me) {
Ok(list) => list,
Err(err) => {
error!("error during completion: {}", err);
log::error!("error during completion: {}", err);
Vec::new()
}
}
@ -418,7 +417,7 @@ impl Shell {
let args = match cli::shellword_split(&line) {
Ok(args) => args,
Err(err) => {
error!("Error: {}", err);
log::error!("Error: {}", err);
continue;
}
};

View File

@ -4,13 +4,11 @@ use std::time::Duration;
use anyhow::{bail, format_err, Error};
use futures::*;
#[cfg(not(target_feature = "crt-static"))]
use hyper::client::connect::dns::GaiResolver;
use http::header::HeaderValue;
use http::Uri;
use http::{Request, Response};
use hyper::client::{Client, HttpConnector};
use hyper::http::header::HeaderValue;
use hyper::http::Uri;
use hyper::http::{Request, Response};
use hyper::{body::HttpBody, Body};
use hyper::Body;
use openssl::{
ssl::{SslConnector, SslMethod},
x509::X509StoreContextRef,
@ -27,7 +25,6 @@ use proxmox_async::broadcast_future::BroadcastFuture;
use proxmox_http::client::HttpsConnector;
use proxmox_http::uri::{build_authority, json_object_to_query};
use proxmox_http::{ProxyConfig, RateLimiter};
use proxmox_log::{error, info, warn};
use pbs_api_types::percent_encoding::DEFAULT_ENCODE_SET;
use pbs_api_types::{Authid, RateLimitConfig, Userid};
@ -35,74 +32,6 @@ use pbs_api_types::{Authid, RateLimitConfig, Userid};
use super::pipe_to_stream::PipeToSendStream;
use super::PROXMOX_BACKUP_TCP_KEEPALIVE_TIME;
#[cfg(not(target_feature = "crt-static"))]
type DnsResolver = GaiResolver;
#[cfg(target_feature = "crt-static")]
type DnsResolver = resolver::HickoryDnsResolver;
#[cfg(target_feature = "crt-static")]
mod resolver {
use std::net::SocketAddr;
use std::pin::Pin;
use std::sync::Arc;
use std::task::{Context, Poll};
use futures::Future;
use hickory_resolver::error::ResolveError;
use hickory_resolver::lookup_ip::LookupIpIntoIter;
use hickory_resolver::TokioAsyncResolver;
use hyper::client::connect::dns::Name;
use tower_service::Service;
pub(crate) struct SocketAddrIter {
inner: LookupIpIntoIter,
}
impl Iterator for SocketAddrIter {
type Item = SocketAddr;
fn next(&mut self) -> Option<Self::Item> {
self.inner.next().map(|ip_addr| SocketAddr::new(ip_addr, 0))
}
}
#[derive(Clone)]
pub(crate) struct HickoryDnsResolver {
inner: Arc<TokioAsyncResolver>,
}
impl HickoryDnsResolver {
pub(crate) fn new() -> Self {
Self {
inner: Arc::new(TokioAsyncResolver::tokio_from_system_conf().unwrap()),
}
}
}
impl Service<Name> for HickoryDnsResolver {
type Response = SocketAddrIter;
type Error = ResolveError;
type Future = Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>> + Send>>;
fn poll_ready(&mut self, _ctx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
Poll::Ready(Ok(()))
}
fn call(&mut self, name: Name) -> Self::Future {
let inner = self.inner.clone();
Box::pin(async move {
inner
.lookup_ip(name.as_str())
.await
.map(|r| SocketAddrIter {
inner: r.into_iter(),
})
})
}
}
}
/// Timeout used for several HTTP operations that are expected to finish quickly but may block in
/// certain error conditions. Keep it generous, to avoid false-positive under high load.
const HTTP_TIMEOUT: Duration = Duration::from_secs(2 * 60);
@ -204,7 +133,7 @@ impl Default for HttpClientOptions {
/// HTTP(S) API client
pub struct HttpClient {
client: Client<HttpsConnector<DnsResolver>>,
client: Client<HttpsConnector>,
server: String,
port: u16,
fingerprint: Arc<Mutex<Option<String>>>,
@ -419,14 +348,14 @@ impl HttpClient {
if let Err(err) =
store_fingerprint(prefix.as_ref().unwrap(), &server, &fingerprint)
{
error!("{}", err);
log::error!("{}", err);
}
}
*verified_fingerprint.lock().unwrap() = Some(fingerprint);
true
}
Err(err) => {
error!("certificate validation failed - {}", err);
log::error!("certificate validation failed - {}", err);
false
}
},
@ -435,8 +364,7 @@ impl HttpClient {
ssl_connector_builder.set_verify(openssl::ssl::SslVerifyMode::NONE);
}
let resolver = DnsResolver::new();
let mut httpc = HttpConnector::new_with_resolver(resolver);
let mut httpc = HttpConnector::new();
httpc.set_nodelay(true); // important for h2 download performance!
httpc.enforce_http(false); // we want https...
@ -465,7 +393,7 @@ impl HttpClient {
let proxy_config = ProxyConfig::from_proxy_env()?;
if let Some(config) = proxy_config {
info!("Using proxy connection: {}:{}", config.host, config.port);
log::info!("Using proxy connection: {}:{}", config.host, config.port);
https.set_proxy(config);
}
@ -533,14 +461,14 @@ impl HttpClient {
&auth.token,
) {
if std::io::stdout().is_terminal() {
error!("storing login ticket failed: {}", err);
log::error!("storing login ticket failed: {}", err);
}
}
}
*auth2.write().unwrap() = auth;
}
Err(err) => {
error!("re-authentication failed: {}", err);
log::error!("re-authentication failed: {}", err);
}
}
}
@ -570,7 +498,7 @@ impl HttpClient {
&auth.token,
) {
if std::io::stdout().is_terminal() {
error!("storing login ticket failed: {}", err);
log::error!("storing login ticket failed: {}", err);
}
}
}
@ -597,9 +525,7 @@ impl HttpClient {
_options: options,
})
}
}
impl HttpClient {
/// Login
///
/// Login is done on demand, so this is only required if you need
@ -674,14 +600,14 @@ impl HttpClient {
if expected_fingerprint == fp_string {
return Ok(Some(fp_string));
} else {
warn!("WARNING: certificate fingerprint does not match expected fingerprint!");
warn!("expected: {}", expected_fingerprint);
log::warn!("WARNING: certificate fingerprint does not match expected fingerprint!");
log::warn!("expected: {}", expected_fingerprint);
}
}
// If we're on a TTY, query the user
if interactive && std::io::stdin().is_terminal() {
info!("fingerprint: {}", fp_string);
log::info!("fingerprint: {}", fp_string);
loop {
eprint!("Are you sure you want to continue connecting? (y/n): ");
let _ = std::io::stdout().flush();
@ -779,7 +705,8 @@ impl HttpClient {
.map(|_| Err(format_err!("unknown error")))
.await?
} else {
futures::TryStreamExt::map_err(resp.into_body(), Error::from)
resp.into_body()
.map_err(Error::from)
.try_fold(output, move |acc, chunk| async move {
acc.write_all(&chunk)?;
Ok::<_, Error>(acc)
@ -854,7 +781,7 @@ impl HttpClient {
.map_err(|_| format_err!("http upgrade request timed out"))??;
let status = resp.status();
if status != hyper::http::StatusCode::SWITCHING_PROTOCOLS {
if status != http::StatusCode::SWITCHING_PROTOCOLS {
Self::api_response(resp).await?;
bail!("unknown error");
}
@ -863,14 +790,14 @@ impl HttpClient {
let max_window_size = (1 << 31) - 2;
let (h2, connection) = h2::legacy::client::Builder::new()
let (h2, connection) = h2::client::Builder::new()
.initial_connection_window_size(max_window_size)
.initial_window_size(max_window_size)
.max_frame_size(4 * 1024 * 1024)
.handshake(upgraded)
.await?;
let connection = connection.map_err(|_| error!("HTTP/2.0 connection failed"));
let connection = connection.map_err(|_| log::error!("HTTP/2.0 connection failed"));
let (connection, abort) = futures::future::abortable(connection);
// A cancellable future returns an Option which is None when cancelled and
@ -887,7 +814,7 @@ impl HttpClient {
}
async fn credentials(
client: Client<HttpsConnector<DnsResolver>>,
client: Client<HttpsConnector>,
server: String,
port: u16,
username: Userid,
@ -916,7 +843,7 @@ impl HttpClient {
async fn api_response(response: Response<Body>) -> Result<Value, Error> {
let status = response.status();
let data = HttpBody::collect(response.into_body()).await?.to_bytes();
let data = hyper::body::to_bytes(response.into_body()).await?;
let text = String::from_utf8(data.to_vec()).unwrap();
if status.is_success() {
@ -932,7 +859,7 @@ impl HttpClient {
}
async fn api_request(
client: Client<HttpsConnector<DnsResolver>>,
client: Client<HttpsConnector>,
req: Request<Body>,
) -> Result<Value, Error> {
Self::api_response(
@ -1008,11 +935,11 @@ impl Drop for HttpClient {
#[derive(Clone)]
pub struct H2Client {
h2: h2::legacy::client::SendRequest<bytes::Bytes>,
h2: h2::client::SendRequest<bytes::Bytes>,
}
impl H2Client {
pub fn new(h2: h2::legacy::client::SendRequest<bytes::Bytes>) -> Self {
pub fn new(h2: h2::client::SendRequest<bytes::Bytes>) -> Self {
Self { h2 }
}
@ -1092,7 +1019,7 @@ impl H2Client {
&self,
request: Request<()>,
data: Option<bytes::Bytes>,
) -> impl Future<Output = Result<h2::legacy::client::ResponseFuture, Error>> {
) -> impl Future<Output = Result<h2::client::ResponseFuture, Error>> {
self.h2
.clone()
.ready()
@ -1109,9 +1036,7 @@ impl H2Client {
})
}
pub async fn h2api_response(
response: Response<h2::legacy::RecvStream>,
) -> Result<Value, Error> {
pub async fn h2api_response(response: Response<h2::RecvStream>) -> Result<Value, Error> {
let status = response.status();
let (_head, mut body) = response.into_parts();

View File

@ -8,7 +8,7 @@ use std::task::{Context, Poll};
use anyhow::{format_err, Error};
use bytes::Bytes;
use futures::{ready, Future};
use h2::legacy::SendStream;
use h2::SendStream;
pub struct PipeToSendStream {
body_tx: SendStream<Bytes>,

View File

@ -27,7 +27,6 @@ use pxar::{EntryKind, Metadata, PxarVariant};
use proxmox_human_byte::HumanByte;
use proxmox_io::vec;
use proxmox_log::{debug, error, info, warn};
use proxmox_sys::fs::{self, acl, xattr};
use pbs_datastore::catalog::BackupCatalogWriter;
@ -316,25 +315,25 @@ where
encoder.close().await?;
if metadata_mode {
info!("Change detection summary:");
info!(
log::info!("Change detection summary:");
log::info!(
" - {} total files ({} hardlinks)",
archiver.reuse_stats.files_reused_count
+ archiver.reuse_stats.files_reencoded_count
+ archiver.reuse_stats.files_hardlink_count,
archiver.reuse_stats.files_hardlink_count,
);
info!(
log::info!(
" - {} unchanged, reusable files with {} data",
archiver.reuse_stats.files_reused_count,
HumanByte::from(archiver.reuse_stats.total_reused_payload_size),
);
info!(
log::info!(
" - {} changed or non-reusable files with {} data",
archiver.reuse_stats.files_reencoded_count,
HumanByte::from(archiver.reuse_stats.total_reencoded_size),
);
info!(
log::info!(
" - {} padding in {} partially reused chunks",
HumanByte::from(
archiver.reuse_stats.total_injected_size
@ -423,7 +422,6 @@ impl Archiver {
previous_metadata_accessor: &Option<Directory<MetadataArchiveReader>>,
file_name: &Path,
metadata: &Metadata,
file_size: u64,
) -> Result<Option<Range<u64>>, Error> {
if let Some(previous_metadata_accessor) = previous_metadata_accessor {
if let Some(file_entry) = previous_metadata_accessor.lookup(file_name).await? {
@ -434,23 +432,20 @@ impl Archiver {
..
} = file_entry.entry().kind()
{
if file_size != *size {
return Ok(None);
}
let range =
*offset..*offset + size + size_of::<pxar::format::Header>() as u64;
debug!(
log::debug!(
"reusable: {file_name:?} at range {range:?} has unchanged metadata."
);
return Ok(Some(range));
}
debug!("re-encode: {file_name:?} not a regular file.");
log::debug!("re-encode: {file_name:?} not a regular file.");
return Ok(None);
}
debug!("re-encode: {file_name:?} metadata did not match.");
log::debug!("re-encode: {file_name:?} metadata did not match.");
return Ok(None);
}
debug!("re-encode: {file_name:?} not found in previous archive.");
log::debug!("re-encode: {file_name:?} not found in previous archive.");
}
Ok(None)
@ -486,7 +481,7 @@ impl Archiver {
Ok(None)
}
Err(Errno::EACCES) => {
warn!("failed to open file: {:?}: access denied", file_name);
log::warn!("failed to open file: {:?}: access denied", file_name);
Ok(None)
}
Err(Errno::ESTALE) => {
@ -520,9 +515,10 @@ impl Archiver {
let line = match line {
Ok(line) => line,
Err(err) => {
warn!(
log::warn!(
"ignoring .pxarexclude after read error in {:?}: {}",
self.path, err,
self.path,
err,
);
self.patterns.truncate(old_pattern_count);
return Ok(());
@ -562,7 +558,7 @@ impl Archiver {
}
}
Err(err) => {
error!("bad pattern in {:?}: {}", self.path, err);
log::error!("bad pattern in {:?}: {}", self.path, err);
}
}
}
@ -644,7 +640,7 @@ impl Archiver {
match match_result {
Ok(Some(MatchType::Exclude)) => {
debug!("matched by exclude pattern '{full_path:?}'");
log::debug!("matched by exclude pattern '{full_path:?}'");
continue;
}
Ok(_) => (),
@ -696,22 +692,22 @@ impl Archiver {
fn report_stale_file_handle(&self, path: Option<&PathBuf>) {
let path = path.unwrap_or(&self.path);
warn!("warning: stale file handle encountered while reading: {path:?}");
log::warn!("warning: stale file handle encountered while reading: {path:?}");
}
fn report_vanished_file(&self) {
warn!("warning: file vanished while reading: {:?}", self.path);
log::warn!("warning: file vanished while reading: {:?}", self.path);
}
fn report_file_shrunk_while_reading(&self) {
warn!(
log::warn!(
"warning: file size shrunk while reading: {:?}, file will be padded with zeros!",
self.path,
);
}
fn report_file_grew_while_reading(&self) {
warn!(
log::warn!(
"warning: file size increased while reading: {:?}, file will be truncated!",
self.path,
);
@ -770,7 +766,7 @@ impl Archiver {
// Avoid having to many open file handles in cached entries
if self.cache.is_full() {
debug!("Max cache size reached, reuse cached entries");
log::debug!("Max cache size reached, reuse cached entries");
self.flush_cached_reusing_if_below_threshold(encoder, true)
.await?;
}
@ -802,13 +798,12 @@ impl Archiver {
}
let file_name: &Path = OsStr::from_bytes(c_file_name.to_bytes()).as_ref();
let file_size = stat.st_size as u64;
if let Some(payload_range) = self
.is_reusable_entry(previous_metadata, file_name, &metadata, file_size)
.is_reusable_entry(previous_metadata, file_name, &metadata)
.await?
{
if !self.cache.try_extend_range(payload_range.clone()) {
debug!("Cache range has hole, new range: {payload_range:?}");
log::debug!("Cache range has hole, new range: {payload_range:?}");
self.flush_cached_reusing_if_below_threshold(encoder, true)
.await?;
// range has to be set after flushing of cached entries, which resets the range
@ -819,7 +814,7 @@ impl Archiver {
// actual chunks, which needs to be added before encoding the payload reference
let offset =
PayloadOffset::default().add(payload_range.start - self.cache.range().start);
debug!("Offset relative to range start: {offset:?}");
log::debug!("Offset relative to range start: {offset:?}");
self.cache.insert(
fd,
@ -871,7 +866,6 @@ impl Archiver {
.await
}
#[allow(clippy::too_many_arguments)]
async fn add_entry_to_archive<T: SeqWrite + Send>(
&mut self,
encoder: &mut Encoder<'_, T>,
@ -1024,7 +1018,7 @@ impl Archiver {
// do not reuse chunks if introduced padding higher than threshold
// opt for re-encoding in that case
if ratio > CHUNK_PADDING_THRESHOLD {
debug!(
log::debug!(
"Padding ratio: {ratio} > {CHUNK_PADDING_THRESHOLD}, padding: {}, total {}, chunks: {}",
HumanByte::from(padding),
HumanByte::from(total_size),
@ -1033,7 +1027,7 @@ impl Archiver {
self.cache.update_last_chunk(prev_last_chunk);
self.encode_entries_to_archive(encoder, None).await?;
} else {
debug!(
log::debug!(
"Padding ratio: {ratio} < {CHUNK_PADDING_THRESHOLD}, padding: {}, total {}, chunks: {}",
HumanByte::from(padding),
HumanByte::from(total_size),
@ -1084,7 +1078,7 @@ impl Archiver {
let (entries, start_path) = self.cache.take_and_reset();
let old_path = self.path.clone();
self.path = start_path;
debug!(
log::debug!(
"Got {} cache entries to encode: reuse is {}",
entries.len(),
base_offset.is_some()
@ -1153,7 +1147,7 @@ impl Archiver {
let mut size = PayloadOffset::default();
for chunk in chunks.iter() {
debug!(
log::debug!(
"Injecting chunk with {} padding (chunk size {})",
HumanByte::from(chunk.padding),
HumanByte::from(chunk.size()),
@ -1181,7 +1175,7 @@ impl Archiver {
};
injection_boundary = injection_boundary.add(size.raw());
debug!("Advance payload position by: {size:?}");
log::debug!("Advance payload position by: {size:?}");
encoder.advance(size)?;
}
@ -1231,7 +1225,7 @@ impl Archiver {
}
let result = if skip_contents {
info!("skipping mount point: {:?}", self.path);
log::info!("skipping mount point: {:?}", self.path);
Ok(())
} else {
let mut dir_accessor = None;

View File

@ -22,7 +22,6 @@ use pxar::format::Device;
use pxar::{Entry, EntryKind, Metadata};
use proxmox_io::{sparse_copy, sparse_copy_async};
use proxmox_log::{debug, error, info};
use proxmox_sys::c_result;
use proxmox_sys::fs::{create_path, CreateOptions};
@ -133,27 +132,18 @@ where
if let Some(ref path) = options.prelude_path {
if let Some(entry) = prelude {
let overwrite = options.overwrite_flags.contains(OverwriteFlags::FILE);
let mut open_options = OpenOptions::new();
open_options.write(true);
if overwrite {
open_options.create(true);
open_options.truncate(true);
} else {
open_options.create_new(true);
}
let mut prelude_file = open_options
let mut prelude_file = OpenOptions::new()
.create(true)
.write(true)
.open(path)
.with_context(|| format!("error creating prelude file '{path:?}'"))?;
if let pxar::EntryKind::Prelude(ref prelude) = entry.kind() {
prelude_file.write_all(prelude.as_ref())?;
} else {
info!("unexpected entry kind for prelude");
log::info!("unexpected entry kind for prelude");
}
} else {
info!("No prelude entry found, skip prelude restore.");
log::info!("No prelude entry found, skip prelude restore.");
}
}
@ -212,22 +202,23 @@ where
{
type Item = Result<(), Error>;
/// Performs the extraction of [`Entries`][Entry] yielded by the [`Decoder`][D].
/// Performs the extraction of [`Entries`][E] yielded by the [`Decoder`][D].
///
/// In detail, the [`ExtractorIter`] will stop if and only if one of the
/// following conditions is true:
/// * The [`Decoder`][D] is exhausted
/// * The [`Decoder`][D] failed to read from the archive and consequently
/// yielded an [`io::Error`]
/// * The [`Entry`]'s filename is invalid (contains nul bytes or a slash)
/// * The [`Entry`][E]'s filename is invalid (contains nul bytes or a slash)
///
/// Should an error occur during any point of extraction (**not** while
/// fetching the next [`Entry`]), the error may be handled by the
/// fetching the next [`Entry`][E]), the error may be handled by the
/// [`ErrorHandler`] provided by the [`PxarExtractOptions`] used to
/// initialize the iterator.
///
/// Extraction errors will have a corresponding [`PxarExtractContext`] attached.
///
/// [E]: pxar::Entry
/// [D]: pxar::decoder::Decoder
fn next(&mut self) -> Option<Self::Item> {
if self.state.end_reached {
@ -811,9 +802,9 @@ fn add_metadata_to_header(header: &mut tar::Header, metadata: &Metadata) {
header.set_gid(metadata.stat.gid as u64);
}
async fn tar_add_file<W, T>(
async fn tar_add_file<'a, W, T>(
tar: &mut proxmox_compression::tar::Builder<W>,
contents: Option<Contents<'_, T>>,
contents: Option<Contents<'a, T>>,
size: u64,
metadata: &Metadata,
path: &Path,
@ -896,7 +887,7 @@ where
let metadata = realfile.entry().metadata();
let realpath = Path::new(link);
debug!("adding '{}' to tar", path.display());
log::debug!("adding '{}' to tar", path.display());
let stripped_path = match realpath.strip_prefix(prefix) {
Ok(path) => path,
@ -925,7 +916,7 @@ where
}
}
EntryKind::Symlink(link) if !link.data.is_empty() => {
debug!("adding '{}' to tar", path.display());
log::debug!("adding '{}' to tar", path.display());
let realpath = Path::new(link);
let mut header = tar::Header::new_gnu();
header.set_entry_type(tar::EntryType::Symlink);
@ -937,7 +928,7 @@ where
.context("could not send symlink entry")?;
}
EntryKind::Fifo => {
debug!("adding '{}' to tar", path.display());
log::debug!("adding '{}' to tar", path.display());
let mut header = tar::Header::new_gnu();
header.set_entry_type(tar::EntryType::Fifo);
add_metadata_to_header(&mut header, metadata);
@ -951,7 +942,7 @@ where
.context("could not send fifo entry")?;
}
EntryKind::Directory => {
debug!("adding '{}' to tar", path.display());
log::debug!("adding '{}' to tar", path.display());
// we cannot add the root path itself
if path != Path::new("/") {
let mut header = tar::Header::new_gnu();
@ -966,7 +957,7 @@ where
}
}
EntryKind::Device(device) => {
debug!("adding '{}' to tar", path.display());
log::debug!("adding '{}' to tar", path.display());
let entry_type = if metadata.stat.is_chardev() {
tar::EntryType::Char
} else {
@ -989,7 +980,7 @@ where
}
tarencoder.finish().await.map_err(|err| {
error!("error during finishing of zip: {}", err);
log::error!("error during finishing of zip: {}", err);
err
})?;
Ok(())
@ -1038,7 +1029,7 @@ where
match entry.kind() {
EntryKind::File { .. } => {
debug!("adding '{}' to zip", path.display());
log::debug!("adding '{}' to zip", path.display());
let entry = ZipEntry::new(
path,
metadata.stat.mtime.secs,
@ -1057,7 +1048,7 @@ where
.with_context(|| format!("error looking up {:?}", path))?;
let realfile = accessor.follow_hardlink(&entry).await?;
let metadata = realfile.entry().metadata();
debug!("adding '{}' to zip", path.display());
log::debug!("adding '{}' to zip", path.display());
let entry = ZipEntry::new(
path,
metadata.stat.mtime.secs,
@ -1070,7 +1061,7 @@ where
.context("could not send file entry")?;
}
EntryKind::Directory => {
debug!("adding '{}' to zip", path.display());
log::debug!("adding '{}' to zip", path.display());
let entry = ZipEntry::new(
path,
metadata.stat.mtime.secs,
@ -1160,7 +1151,7 @@ where
let mut extractor = get_extractor(destination, root.metadata().clone())?;
if let Err(err) = seq_files_extractor(&mut extractor, decoder).await {
error!("error extracting pxar archive: {}", err);
log::error!("error extracting pxar archive: {}", err);
}
Ok(())
@ -1224,7 +1215,7 @@ where
let metadata = entry.metadata();
let (file_name_os, file_name) = get_filename(entry)?;
debug!("extracting: {}", file.path().display());
log::debug!("extracting: {}", file.path().display());
match file.kind() {
EntryKind::Directory => {
@ -1276,7 +1267,7 @@ where
let (file_name_os, file_name) = get_filename(&entry)?;
if !matches!(entry.kind(), EntryKind::GoodbyeTable) {
debug!("extracting: {}", entry.path().display());
log::debug!("extracting: {}", entry.path().display());
}
if let Err(err) = async {
@ -1312,13 +1303,13 @@ where
}
.await
{
let display_string = entry.path().display().to_string();
error!(
let display = entry.path().display().to_string();
log::error!(
"error extracting {}: {}",
if matches!(entry.kind(), EntryKind::GoodbyeTable) {
"<directory>"
} else {
&display_string
&display
},
err
);

View File

@ -9,7 +9,6 @@ use nix::sys::stat::Mode;
use pxar::Metadata;
use proxmox_log::{info, warn};
use proxmox_sys::c_result;
use proxmox_sys::error::SysError;
use proxmox_sys::fs::{self, acl, xattr};
@ -222,7 +221,7 @@ fn apply_xattrs(
}
if !xattr::is_valid_xattr_name(xattr.name()) {
info!("skipping invalid xattr named {:?}", xattr.name());
log::info!("skipping invalid xattr named {:?}", xattr.name());
continue;
}
@ -283,7 +282,7 @@ fn apply_acls(
acl.add_entry_full(acl::ACL_GROUP_OBJ, None, mode)?;
if !metadata.acl.users.is_empty() || !metadata.acl.groups.is_empty() {
warn!(
log::warn!(
"Warning: {:?}: Missing GROUP_OBJ entry in ACL, resetting to value of MASK",
path_info,
);
@ -301,7 +300,7 @@ fn apply_acls(
}
if !acl.is_valid() {
warn!("Warning: {path_info:?} - ACL invalid, attempting restore anyway..");
log::warn!("Warning: {path_info:?} - ACL invalid, attempting restore anyway..");
}
acl.set_file(c_proc_path, acl::ACL_TYPE_ACCESS)?;
@ -330,7 +329,7 @@ fn apply_acls(
}
if !acl.is_valid() {
warn!("Warning: {path_info:?} - ACL invalid, attempting restore anyway..");
log::warn!("Warning: {path_info:?} - ACL invalid, attempting restore anyway..");
}
acl.set_file(c_proc_path, acl::ACL_TYPE_DEFAULT)?;

View File

@ -21,7 +21,6 @@ use pbs_datastore::dynamic_index::{BufferedDynamicReader, LocalDynamicReadAt};
use pbs_datastore::index::IndexFile;
use pbs_datastore::BackupManifest;
use pbs_tools::crypt_config::CryptConfig;
use proxmox_log::{debug, info};
use crate::{BackupReader, RemoteChunkReader};
@ -308,11 +307,11 @@ pub fn handle_root_with_optional_format_version_prelude<R: pxar::decoder::SeqRea
match first.kind() {
pxar::EntryKind::Directory => {
let version = pxar::format::FormatVersion::Version1;
debug!("pxar format version '{version:?}'");
log::debug!("pxar format version '{version:?}'");
Ok((first, None))
}
pxar::EntryKind::Version(version) => {
debug!("pxar format version '{version:?}'");
log::debug!("pxar format version '{version:?}'");
let second = decoder
.next()
.ok_or_else(|| format_err!("missing root entry"))??;
@ -406,14 +405,14 @@ pub async fn pxar_metadata_catalog_dump_dir<T: Clone + Send + Sync + ReadAt>(
if let Ok(s) = proxmox_time::strftime_local("%FT%TZ", mtime) {
mtime_string = s;
}
info!("{etype} {entry_path:?} {size} {mtime_string}");
log::info!("{etype} {entry_path:?} {size} {mtime_string}");
}
DirEntryAttribute::Directory { .. } => {
info!("{etype} {entry_path:?}");
log::info!("{etype} {entry_path:?}");
let dir = entry.enter_directory().await?;
pxar_metadata_catalog_dump_dir(dir, path_prefix).await?;
}
_ => info!("{etype} {entry_path:?}"),
_ => log::info!("{etype} {entry_path:?}"),
}
}

View File

@ -11,11 +11,9 @@ use futures::stream::Stream;
use nix::dir::Dir;
use nix::fcntl::OFlag;
use nix::sys::stat::Mode;
use tokio::sync::Notify;
use proxmox_async::blocking::TokioWriterAdapter;
use proxmox_io::StdChannelWriter;
use proxmox_log::debug;
use pbs_datastore::catalog::{BackupCatalogWriter, CatalogWriter};
@ -32,8 +30,6 @@ pub struct PxarBackupStream {
pub suggested_boundaries: Option<std::sync::mpsc::Receiver<u64>>,
handle: Option<AbortHandle>,
error: Arc<Mutex<Option<Error>>>,
finished: bool,
archiver_finished_notification: Arc<Notify>,
}
impl Drop for PxarBackupStream {
@ -83,10 +79,6 @@ impl PxarBackupStream {
let error = Arc::new(Mutex::new(None));
let error2 = Arc::clone(&error);
let stream_notifier = Arc::new(Notify::new());
let stream_notification_receiver = stream_notifier.clone();
let payload_stream_notifier = Arc::new(Notify::new());
let payload_stream_notification_receiver = payload_stream_notifier.clone();
let handler = async move {
if let Err(err) = crate::pxar::create_archive(
dir,
@ -96,7 +88,7 @@ impl PxarBackupStream {
),
crate::pxar::Flags::DEFAULT,
move |path| {
debug!("{:?}", path);
log::debug!("{:?}", path);
Ok(())
},
options,
@ -108,10 +100,6 @@ impl PxarBackupStream {
let mut error = error2.lock().unwrap();
*error = Some(err);
}
// Notify upload streams that archiver is finished (with or without error)
stream_notifier.notify_one();
payload_stream_notifier.notify_one();
};
let (handle, registration) = AbortHandle::new_pair();
@ -123,8 +111,6 @@ impl PxarBackupStream {
suggested_boundaries: None,
handle: Some(handle.clone()),
error: Arc::clone(&error),
finished: false,
archiver_finished_notification: stream_notification_receiver,
};
let backup_payload_stream = payload_rx.map(|rx| Self {
@ -132,8 +118,6 @@ impl PxarBackupStream {
suggested_boundaries: suggested_boundaries_rx,
handle: Some(handle),
error,
finished: false,
archiver_finished_notification: payload_stream_notification_receiver,
});
Ok((backup_stream, backup_payload_stream))
@ -156,31 +140,18 @@ impl Stream for PxarBackupStream {
type Item = Result<Vec<u8>, Error>;
fn poll_next(self: Pin<&mut Self>, _cx: &mut Context) -> Poll<Option<Self::Item>> {
let this = self.get_mut();
if this.finished {
// Channel has already been finished and eventual errors propagated,
// early return to avoid blocking on further archiver finished notifications
// by subsequent polls.
return Poll::Ready(None);
}
{
// limit lock scope
let mut error = this.error.lock().unwrap();
let mut error = self.error.lock().unwrap();
if let Some(err) = error.take() {
return Poll::Ready(Some(Err(err)));
}
}
match proxmox_async::runtime::block_in_place(|| this.rx.as_ref().unwrap().recv()) {
match proxmox_async::runtime::block_in_place(|| self.rx.as_ref().unwrap().recv()) {
Ok(data) => Poll::Ready(Some(data)),
Err(_) => {
// Wait for archiver to finish
proxmox_async::runtime::block_on(this.archiver_finished_notification.notified());
// Never block for archiver finished notification on subsequent calls.
// Eventual error will already have been propagated.
this.finished = true;
let mut error = this.error.lock().unwrap();
let mut error = self.error.lock().unwrap();
if let Some(err) = error.take() {
return Poll::Ready(Some(Err(err)));
}

View File

@ -8,7 +8,6 @@ use futures::*;
use serde_json::{json, Value};
use tokio::signal::unix::{signal, SignalKind};
use proxmox_log::info;
use proxmox_router::cli::format_and_print_result;
use pbs_api_types::percent_encoding::percent_encode_component;
@ -33,10 +32,10 @@ pub async fn display_task_log(
let abort_future = async move {
while signal_stream.recv().await.is_some() {
info!("got shutdown request (SIGINT)");
log::info!("got shutdown request (SIGINT)");
let prev_count = abort_count2.fetch_add(1, Ordering::SeqCst);
if prev_count >= 1 {
info!("forced exit (task still running)");
log::info!("forced exit (task still running)");
break;
}
}

View File

@ -5,7 +5,6 @@ use std::path::PathBuf;
use anyhow::{bail, format_err, Error};
use serde_json::Value;
use proxmox_log::info;
use proxmox_schema::*;
use proxmox_sys::fs::file_get_contents;
use proxmox_sys::linux::tty;
@ -231,7 +230,7 @@ fn do_crypto_parameters(param: &Value, keep_keyfd_open: bool) -> Result<CryptoPa
(None, master_pubkey) => match read_optional_default_encryption_key()? {
None => bail!("--crypt-mode without --keyfile and no default key file available"),
enc_key => {
info!("Encrypting with default encryption key!");
log::info!("Encrypting with default encryption key!");
let master_pubkey = match master_pubkey {
None => read_optional_default_master_pubkey()?,
master_pubkey => master_pubkey,
@ -345,8 +344,8 @@ pub(crate) unsafe fn set_test_default_master_pubkey(value: Result<Option<Vec<u8>
pub fn get_encryption_key_password() -> Result<Vec<u8>, Error> {
// fixme: implement other input methods
if let Some(password) = super::get_encryption_password()? {
return Ok(password.into_bytes());
if let Some(password) = super::get_secret_from_env("PBS_ENCRYPTION_PASSWORD")? {
return Ok(password.as_bytes().to_vec());
}
// If we're on a TTY, query the user for a password

View File

@ -28,21 +28,6 @@ pub mod key_source;
const ENV_VAR_PBS_FINGERPRINT: &str = "PBS_FINGERPRINT";
const ENV_VAR_PBS_PASSWORD: &str = "PBS_PASSWORD";
const ENV_VAR_PBS_ENCRYPTION_PASSWORD: &str = "PBS_ENCRYPTION_PASSWORD";
const ENV_VAR_PBS_REPOSITORY: &str = "PBS_REPOSITORY";
/// Directory with system [credential]s. See systemd-creds(1).
///
/// [credential]: https://systemd.io/CREDENTIALS/
const ENV_VAR_CREDENTIALS_DIRECTORY: &str = "CREDENTIALS_DIRECTORY";
/// Credential name of the encryption password.
const CRED_PBS_ENCRYPTION_PASSWORD: &str = "proxmox-backup-client.encryption-password";
/// Credential name of the the password.
const CRED_PBS_PASSWORD: &str = "proxmox-backup-client.password";
/// Credential name of the the repository.
const CRED_PBS_REPOSITORY: &str = "proxmox-backup-client.repository";
/// Credential name of the the fingerprint.
const CRED_PBS_FINGERPRINT: &str = "proxmox-backup-client.fingerprint";
pub const REPO_URL_SCHEMA: Schema = StringSchema::new("Repository URL.")
.format(&BACKUP_REPO_URL)
@ -55,30 +40,6 @@ pub const CHUNK_SIZE_SCHEMA: Schema = IntegerSchema::new("Chunk size in KB. Must
.default(4096)
.schema();
/// Retrieves a secret stored in a [credential] provided by systemd.
///
/// Returns `Ok(None)` if the credential does not exist.
///
/// [credential]: https://systemd.io/CREDENTIALS/
fn get_credential(cred_name: &str) -> std::io::Result<Option<Vec<u8>>> {
let Some(creds_dir) = std::env::var_os(ENV_VAR_CREDENTIALS_DIRECTORY) else {
return Ok(None);
};
let path = std::path::Path::new(&creds_dir).join(cred_name);
proxmox_log::debug!("attempting to use credential {cred_name} from {creds_dir:?}",);
// We read the whole contents without a BufRead. As per systemd-creds(1):
// Credentials are limited-size binary or textual objects.
match std::fs::read(&path) {
Ok(bytes) => Ok(Some(bytes)),
Err(err) if err.kind() == std::io::ErrorKind::NotFound => {
proxmox_log::debug!("no {cred_name} credential found in {creds_dir:?}");
Ok(None)
}
Err(err) => Err(err),
}
}
/// Helper to read a secret through a environment variable (ENV).
///
/// Tries the following variable names in order and returns the value
@ -90,7 +51,7 @@ fn get_credential(cred_name: &str) -> std::io::Result<Option<Vec<u8>>> {
/// BASE_NAME_CMD => read the secret from specified command first line of output on stdout
///
/// Only return the first line of data (without CRLF).
fn get_secret_from_env(base_name: &str) -> Result<Option<String>, Error> {
pub fn get_secret_from_env(base_name: &str) -> Result<Option<String>, Error> {
let firstline = |data: String| -> String {
match data.lines().next() {
Some(line) => line.to_string(),
@ -157,80 +118,8 @@ fn get_secret_from_env(base_name: &str) -> Result<Option<String>, Error> {
Ok(None)
}
/// Gets a secret or value from the environment.
///
/// Checks for an environment variable named `env_variable`, and if missing, it
/// checks for a system [credential] named `credential_name`. Assumes the secret
/// is UTF-8 encoded.
///
/// [credential]: https://systemd.io/CREDENTIALS/
fn get_secret_impl(env_variable: &str, credential_name: &str) -> Result<Option<String>, Error> {
if let Some(password) = get_secret_from_env(env_variable)? {
Ok(Some(password))
} else if let Some(password) = get_credential(credential_name)? {
String::from_utf8(password)
.map(Option::Some)
.map_err(|_err| format_err!("credential {credential_name} is not utf8 encoded"))
} else {
Ok(None)
}
}
/// Gets the backup server's password.
///
/// Looks for a password in the `PBS_PASSWORD` environment variable, if there
/// isn't one it reads the `proxmox-backup-client.password` [credential].
///
/// Returns `Ok(None)` if neither the environment variable or credentials are
/// present.
///
/// [credential]: https://systemd.io/CREDENTIALS/
pub fn get_password() -> Result<Option<String>, Error> {
get_secret_impl(ENV_VAR_PBS_PASSWORD, CRED_PBS_PASSWORD)
}
/// Gets an encryption password.
///
///
/// Looks for a password in the `PBS_ENCRYPTION_PASSWORD` environment variable,
/// if there isn't one it reads the `proxmox-backup-client.encryption-password`
/// [credential].
///
/// Returns `Ok(None)` if neither the environment variable or credentials are
/// present.
///
/// [credential]: https://systemd.io/CREDENTIALS/
pub fn get_encryption_password() -> Result<Option<String>, Error> {
get_secret_impl(
ENV_VAR_PBS_ENCRYPTION_PASSWORD,
CRED_PBS_ENCRYPTION_PASSWORD,
)
}
pub fn get_default_repository() -> Option<String> {
get_secret_impl(ENV_VAR_PBS_REPOSITORY, CRED_PBS_REPOSITORY)
.inspect_err(|err| {
proxmox_log::error!("could not read default repository: {err:#}");
})
.unwrap_or_default()
}
/// Gets the repository fingerprint.
///
/// Looks for the fingerprint in the `PBS_FINGERPRINT` environment variable, if
/// there isn't one it reads the `proxmox-backup-client.fingerprint`
/// [credential].
///
/// Returns `None` if neither the environment variable or the credential are
/// present.
///
/// [credential]: https://systemd.io/CREDENTIALS/
pub fn get_fingerprint() -> Option<String> {
get_secret_impl(ENV_VAR_PBS_FINGERPRINT, CRED_PBS_FINGERPRINT)
.inspect_err(|err| {
proxmox_log::error!("could not read fingerprint: {err:#}");
})
.unwrap_or_default()
std::env::var("PBS_REPOSITORY").ok()
}
pub fn remove_repository_from_value(param: &mut Value) -> Result<BackupRepository, Error> {
@ -290,9 +179,9 @@ fn connect_do(
auth_id: &Authid,
rate_limit: RateLimitConfig,
) -> Result<HttpClient, Error> {
let fingerprint = get_fingerprint();
let fingerprint = std::env::var(ENV_VAR_PBS_FINGERPRINT).ok();
let password = get_password()?;
let password = get_secret_from_env(ENV_VAR_PBS_PASSWORD)?;
let options = HttpClientOptions::new_interactive(password, fingerprint).rate_limit(rate_limit);
HttpClient::new(server, port, auth_id, options)
@ -300,8 +189,8 @@ fn connect_do(
/// like get, but simply ignore errors and return Null instead
pub async fn try_get(repo: &BackupRepository, url: &str) -> Value {
let fingerprint = get_fingerprint();
let password = get_password().unwrap_or(None);
let fingerprint = std::env::var(ENV_VAR_PBS_FINGERPRINT).ok();
let password = get_secret_from_env(ENV_VAR_PBS_PASSWORD).unwrap_or(None);
// ticket cache, but no questions asked
let options = HttpClientOptions::new_interactive(password, fingerprint).interactive(false);

View File

@ -3,11 +3,11 @@ use std::task::{Context, Poll};
use anyhow::{bail, format_err, Error};
use futures::*;
use http::Uri;
use http::{Request, Response};
use hyper::client::connect::{Connected, Connection};
use hyper::client::Client;
use hyper::http::Uri;
use hyper::http::{Request, Response};
use hyper::{body::HttpBody, Body};
use hyper::Body;
use pin_project_lite::pin_project;
use serde_json::Value;
use tokio::io::{AsyncRead, AsyncWrite, AsyncWriteExt, ReadBuf};
@ -179,7 +179,8 @@ impl VsockClient {
if !status.is_success() {
Self::api_response(resp).await.map(|_| ())?
} else {
futures::TryStreamExt::map_err(resp.into_body(), Error::from)
resp.into_body()
.map_err(Error::from)
.try_fold(output, move |acc, chunk| async move {
acc.write_all(&chunk).await?;
Ok::<_, Error>(acc)
@ -191,7 +192,7 @@ impl VsockClient {
async fn api_response(response: Response<Body>) -> Result<Value, Error> {
let status = response.status();
let data = HttpBody::collect(response.into_body()).await?.to_bytes();
let data = hyper::body::to_bytes(response.into_body()).await?;
let text = String::from_utf8(data.to_vec()).unwrap();
if status.is_success() {

View File

@ -24,7 +24,6 @@ proxmox-section-config.workspace = true
proxmox-shared-memory.workspace = true
proxmox-sys = { workspace = true, features = [ "acl", "crypt", "timer" ] }
proxmox-time.workspace = true
proxmox-uuid.workspace = true
pbs-api-types.workspace = true
pbs-buildcfg.workspace = true

View File

@ -342,7 +342,10 @@ impl AclTree {
let mut node = &self.root;
for outer in path {
for comp in outer.split('/') {
node = node.children.get(comp)?;
node = match node.children.get(comp) {
Some(n) => n,
None => return None,
};
}
}
Some(node)
@ -352,7 +355,10 @@ impl AclTree {
let mut node = &mut self.root;
for outer in path {
for comp in outer.split('/') {
node = node.children.get_mut(comp)?;
node = match node.children.get_mut(comp) {
Some(n) => n,
None => return None,
};
}
}
Some(node)

View File

@ -101,7 +101,7 @@ impl ConfigVersionCache {
let file_path = Path::new(FILE_PATH);
let dir_path = file_path.parent().unwrap();
create_path(dir_path, Some(dir_opts), Some(dir_opts))?;
create_path(dir_path, Some(dir_opts.clone()), Some(dir_opts))?;
let file_opts = CreateOptions::new()
.perm(Mode::from_bits_truncate(0o660))

View File

@ -8,34 +8,17 @@ use proxmox_schema::{ApiType, ObjectSchema};
use proxmox_section_config::{SectionConfig, SectionConfigData, SectionConfigPlugin};
use crate::{open_backup_lockfile, replace_backup_config, BackupLockGuard};
use pbs_api_types::{
AdRealmConfig, LdapRealmConfig, OpenIdRealmConfig, PamRealmConfig, PbsRealmConfig,
REALM_ID_SCHEMA,
};
use pbs_api_types::{AdRealmConfig, LdapRealmConfig, OpenIdRealmConfig, REALM_ID_SCHEMA};
pub static CONFIG: LazyLock<SectionConfig> = LazyLock::new(init);
fn init() -> SectionConfig {
const PAM_SCHEMA: &ObjectSchema = PamRealmConfig::API_SCHEMA.unwrap_object_schema();
const PBS_SCHEMA: &ObjectSchema = PbsRealmConfig::API_SCHEMA.unwrap_object_schema();
const AD_SCHEMA: &ObjectSchema = AdRealmConfig::API_SCHEMA.unwrap_object_schema();
const LDAP_SCHEMA: &ObjectSchema = LdapRealmConfig::API_SCHEMA.unwrap_object_schema();
const OPENID_SCHEMA: &ObjectSchema = OpenIdRealmConfig::API_SCHEMA.unwrap_object_schema();
let mut config = SectionConfig::new(&REALM_ID_SCHEMA);
config.register_plugin(SectionConfigPlugin::new(
"pam".to_owned(),
Some("realm".to_owned()),
PAM_SCHEMA,
));
config.register_plugin(SectionConfigPlugin::new(
"pbs".to_owned(),
Some("realm".to_owned()),
PBS_SCHEMA,
));
let plugin = SectionConfigPlugin::new(
"openid".to_string(),
Some(String::from("realm")),
@ -78,24 +61,9 @@ pub fn save_config(config: &SectionConfigData) -> Result<(), Error> {
replace_backup_config(DOMAINS_CFG_FILENAME, raw.as_bytes())
}
/// Unsets the default login realm for users by deleting the `default` property
/// from the respective realm.
///
/// This only updates the configuration as given in `config`, making it
/// permanent is left to the caller.
pub fn unset_default_realm(config: &mut SectionConfigData) -> Result<(), Error> {
for (_, data) in &mut config.sections.values_mut() {
if let Some(obj) = data.as_object_mut() {
obj.remove("default");
}
}
Ok(())
}
/// Check if a realm with the given name exists
pub fn exists(domains: &SectionConfigData, realm: &str) -> bool {
domains.sections.contains_key(realm)
realm == "pbs" || realm == "pam" || domains.sections.contains_key(realm)
}
// shell completion helper

View File

@ -6,10 +6,10 @@
//!
//! Drive type [`VirtualTapeDrive`] is only useful for debugging.
//!
//! [LtoTapeDrive]: pbs_api_types::LtoTapeDrive
//! [VirtualTapeDrive]: pbs_api_types::VirtualTapeDrive
//! [ScsiTapeChanger]: pbs_api_types::ScsiTapeChanger
//! [SectionConfig]: proxmox_section_config::SectionConfig
//! [LtoTapeDrive]: crate::api2::types::LtoTapeDrive
//! [VirtualTapeDrive]: crate::api2::types::VirtualTapeDrive
//! [ScsiTapeChanger]: crate::api2::types::ScsiTapeChanger
//! [SectionConfig]: proxmox::api::section_config::SectionConfig
use std::collections::HashMap;
use std::sync::LazyLock;

View File

@ -22,8 +22,6 @@ pub use config_version_cache::ConfigVersionCache;
use anyhow::{format_err, Error};
use nix::unistd::{Gid, Group, Uid, User};
use proxmox_sys::fs::DirLockGuard;
use std::os::unix::prelude::AsRawFd;
pub use pbs_buildcfg::{BACKUP_GROUP_NAME, BACKUP_USER_NAME};
@ -48,34 +46,13 @@ pub fn backup_group() -> Result<nix::unistd::Group, Error> {
}
pub struct BackupLockGuard {
file: Option<std::fs::File>,
// TODO: Remove `_legacy_dir` with PBS 5
_legacy_dir: Option<DirLockGuard>,
}
impl AsRawFd for BackupLockGuard {
fn as_raw_fd(&self) -> i32 {
self.file.as_ref().map_or(-1, |f| f.as_raw_fd())
}
}
// TODO: Remove with PBS 5
impl From<DirLockGuard> for BackupLockGuard {
fn from(value: DirLockGuard) -> Self {
Self {
file: None,
_legacy_dir: Some(value),
}
}
_file: Option<std::fs::File>,
}
#[doc(hidden)]
/// Note: do not use for production code, this is only intended for tests
pub unsafe fn create_mocked_lock() -> BackupLockGuard {
BackupLockGuard {
file: None,
_legacy_dir: None,
}
BackupLockGuard { _file: None }
}
/// Open or create a lock file owned by user "backup" and lock it.
@ -99,10 +76,7 @@ pub fn open_backup_lockfile<P: AsRef<std::path::Path>>(
let timeout = timeout.unwrap_or(std::time::Duration::new(10, 0));
let file = proxmox_sys::fs::open_file_locked(&path, timeout, exclusive, options)?;
Ok(BackupLockGuard {
file: Some(file),
_legacy_dir: None,
})
Ok(BackupLockGuard { _file: Some(file) })
}
/// Atomically write data to file owned by "root:backup" with permission "0640"

View File

@ -3,7 +3,7 @@
//! This configuration module is based on [`SectionConfig`], and
//! provides a type safe interface to store [`MediaPoolConfig`],
//!
//! [MediaPoolConfig]: pbs_api_types::MediaPoolConfig
//! [MediaPoolConfig]: crate::api2::types::MediaPoolConfig
//! [SectionConfig]: proxmox_section_config::SectionConfig
use std::collections::HashMap;

View File

@ -61,16 +61,8 @@ pub fn verify_secret(tokenid: &Authid, secret: &str) -> Result<(), Error> {
}
}
/// Generates a new secret for the given tokenid / API token, sets it then returns it.
/// The secret is stored as salted hash.
pub fn generate_and_set_secret(tokenid: &Authid) -> Result<String, Error> {
let secret = format!("{:x}", proxmox_uuid::Uuid::generate());
set_secret(tokenid, &secret)?;
Ok(secret)
}
/// Adds a new entry for the given tokenid / API token secret. The secret is stored as salted hash.
fn set_secret(tokenid: &Authid, secret: &str) -> Result<(), Error> {
pub fn set_secret(tokenid: &Authid, secret: &str) -> Result<(), Error> {
if !tokenid.is_token() {
bail!("not an API token ID");
}

View File

@ -35,7 +35,6 @@ proxmox-lang.workspace=true
proxmox-schema = { workspace = true, features = [ "api-macro" ] }
proxmox-serde = { workspace = true, features = [ "serde_json" ] }
proxmox-sys.workspace = true
proxmox-systemd.workspace = true
proxmox-time.workspace = true
proxmox-uuid.workspace = true
proxmox-worker-task.workspace = true

View File

@ -1,15 +1,11 @@
use std::fmt;
use std::os::unix::io::{AsRawFd, RawFd};
use std::os::unix::prelude::OsStrExt;
use std::path::Path;
use std::os::unix::io::RawFd;
use std::path::PathBuf;
use std::sync::{Arc, LazyLock};
use std::time::Duration;
use std::sync::Arc;
use anyhow::{bail, format_err, Context, Error};
use anyhow::{bail, format_err, Error};
use proxmox_sys::fs::{lock_dir_noblock, lock_dir_noblock_shared, replace_file, CreateOptions};
use proxmox_systemd::escape_unit;
use proxmox_sys::fs::{lock_dir_noblock, replace_file, CreateOptions};
use pbs_api_types::{
Authid, BackupGroupDeleteStats, BackupNamespace, BackupType, GroupFilter, VerifyState,
@ -20,18 +16,6 @@ use pbs_config::{open_backup_lockfile, BackupLockGuard};
use crate::manifest::{BackupManifest, MANIFEST_LOCK_NAME};
use crate::{DataBlob, DataStore};
pub const DATASTORE_LOCKS_DIR: &str = "/run/proxmox-backup/locks";
// TODO: Remove with PBS 5
// Note: The `expect()` call here will only happen if we can neither confirm nor deny the existence
// of the file. this should only happen if a user messes with the `/run/proxmox-backup` directory.
// if that happens, a lot more should fail as we rely on the existence of the directory throughout
// the code. so just panic with a reasonable message.
pub(crate) static OLD_LOCKING: LazyLock<bool> = LazyLock::new(|| {
std::fs::exists("/run/proxmox-backup/old-locking")
.expect("cannot read `/run/proxmox-backup`, please check permissions")
});
/// BackupGroup is a directory containing a list of BackupDir
#[derive(Clone)]
pub struct BackupGroup {
@ -215,10 +199,9 @@ impl BackupGroup {
/// Returns `BackupGroupDeleteStats`, containing the number of deleted snapshots
/// and number of protected snaphsots, which therefore were not removed.
pub fn destroy(&self) -> Result<BackupGroupDeleteStats, Error> {
let _guard = self
.lock()
.with_context(|| format!("while destroying group '{self:?}'"))?;
let path = self.full_group_path();
let _guard =
proxmox_sys::fs::lock_dir_noblock(&path, "backup group", "possible running backup")?;
log::info!("removing backup group {:?}", path);
let mut delete_stats = BackupGroupDeleteStats::default();
@ -232,34 +215,16 @@ impl BackupGroup {
delete_stats.increment_removed_snapshots();
}
// Note: make sure the old locking mechanism isn't used as `remove_dir_all` is not safe in
// that case
if delete_stats.all_removed() && !*OLD_LOCKING {
self.remove_group_dir()?;
if delete_stats.all_removed() {
std::fs::remove_dir_all(&path).map_err(|err| {
format_err!("removing group directory {:?} failed - {}", path, err)
})?;
delete_stats.increment_removed_groups();
}
Ok(delete_stats)
}
/// Helper function, assumes that no more snapshots are present in the group.
fn remove_group_dir(&self) -> Result<(), Error> {
let owner_path = self.store.owner_path(&self.ns, &self.group);
std::fs::remove_file(&owner_path).map_err(|err| {
format_err!("removing the owner file '{owner_path:?}' failed - {err}")
})?;
let path = self.full_group_path();
std::fs::remove_dir(&path)
.map_err(|err| format_err!("removing group directory {path:?} failed - {err}"))?;
let _ = std::fs::remove_file(self.lock_path());
Ok(())
}
/// Returns the backup owner.
///
/// The backup owner is the entity who first created the backup group.
@ -272,36 +237,6 @@ impl BackupGroup {
self.store
.set_owner(&self.ns, self.as_ref(), auth_id, force)
}
/// Returns a file name for locking a group.
///
/// The lock file will be located in:
/// `${DATASTORE_LOCKS_DIR}/${datastore name}/${lock_file_path_helper(rpath)}`
/// where `rpath` is the relative path of the group.
fn lock_path(&self) -> PathBuf {
let path = Path::new(DATASTORE_LOCKS_DIR).join(self.store.name());
let rpath = Path::new(self.group.ty.as_str()).join(&self.group.id);
path.join(lock_file_path_helper(&self.ns, rpath))
}
/// Locks a group exclusively.
pub fn lock(&self) -> Result<BackupLockGuard, Error> {
if *OLD_LOCKING {
lock_dir_noblock(
&self.full_group_path(),
"backup group",
"possible runing backup, group is in use",
)
.map(BackupLockGuard::from)
} else {
lock_helper(self.store.name(), &self.lock_path(), |p| {
open_backup_lockfile(p, Some(Duration::from_secs(0)), true)
.with_context(|| format!("unable to acquire backup group lock {p:?}"))
})
}
}
}
impl AsRef<pbs_api_types::BackupNamespace> for BackupGroup {
@ -486,101 +421,36 @@ impl BackupDir {
/// Returns the filename to lock a manifest
///
/// Also creates the basedir. The lockfile is located in
/// `${DATASTORE_LOCKS_DIR}/${datastore name}/${lock_file_path_helper(rpath)}.index.json.lck`
/// where rpath is the relative path of the snapshot.
fn manifest_lock_path(&self) -> PathBuf {
let path = Path::new(DATASTORE_LOCKS_DIR).join(self.store.name());
/// '/run/proxmox-backup/locks/{datastore}/[ns/{ns}/]+{type}/{id}/{timestamp}.index.json.lck'
fn manifest_lock_path(&self) -> Result<PathBuf, Error> {
let mut path = PathBuf::from(&format!("/run/proxmox-backup/locks/{}", self.store.name()));
path.push(self.relative_path());
let rpath = Path::new(self.dir.group.ty.as_str())
.join(&self.dir.group.id)
.join(&self.backup_time_string)
.join(MANIFEST_LOCK_NAME);
std::fs::create_dir_all(&path)?;
let ts = self.backup_time_string();
path.push(format!("{ts}{MANIFEST_LOCK_NAME}"));
path.join(lock_file_path_helper(&self.ns, rpath))
Ok(path)
}
/// Locks the manifest of a snapshot, for example, to update or delete it.
pub(crate) fn lock_manifest(&self) -> Result<BackupLockGuard, Error> {
let path = if *OLD_LOCKING {
// old manifest lock path
let path = Path::new(DATASTORE_LOCKS_DIR)
.join(self.store.name())
.join(self.relative_path());
let path = self.manifest_lock_path()?;
std::fs::create_dir_all(&path)?;
path.join(format!("{}{MANIFEST_LOCK_NAME}", self.backup_time_string()))
} else {
self.manifest_lock_path()
};
lock_helper(self.store.name(), &path, |p| {
// update_manifest should never take a long time, so if
// someone else has the lock we can simply block a bit
// and should get it soon
open_backup_lockfile(p, Some(Duration::from_secs(5)), true)
.with_context(|| format_err!("unable to acquire manifest lock {p:?}"))
})
}
/// Returns a file name for locking a snapshot.
///
/// The lock file will be located in:
/// `${DATASTORE_LOCKS_DIR}/${datastore name}/${lock_file_path_helper(rpath)}`
/// where `rpath` is the relative path of the snapshot.
fn lock_path(&self) -> PathBuf {
let path = Path::new(DATASTORE_LOCKS_DIR).join(self.store.name());
let rpath = Path::new(self.dir.group.ty.as_str())
.join(&self.dir.group.id)
.join(&self.backup_time_string);
path.join(lock_file_path_helper(&self.ns, rpath))
}
/// Locks a snapshot exclusively.
pub fn lock(&self) -> Result<BackupLockGuard, Error> {
if *OLD_LOCKING {
lock_dir_noblock(
&self.full_path(),
"snapshot",
"backup is running or snapshot is in use",
)
.map(BackupLockGuard::from)
} else {
lock_helper(self.store.name(), &self.lock_path(), |p| {
open_backup_lockfile(p, Some(Duration::from_secs(0)), true)
.with_context(|| format!("unable to acquire snapshot lock {p:?}"))
})
}
}
/// Acquires a shared lock on a snapshot.
pub fn lock_shared(&self) -> Result<BackupLockGuard, Error> {
if *OLD_LOCKING {
lock_dir_noblock_shared(
&self.full_path(),
"snapshot",
"backup is running or snapshot is in use, could not acquire shared lock",
)
.map(BackupLockGuard::from)
} else {
lock_helper(self.store.name(), &self.lock_path(), |p| {
open_backup_lockfile(p, Some(Duration::from_secs(0)), false)
.with_context(|| format!("unable to acquire shared snapshot lock {p:?}"))
})
}
// actions locking the manifest should be relatively short, only wait a few seconds
open_backup_lockfile(&path, Some(std::time::Duration::from_secs(5)), true)
.map_err(|err| format_err!("unable to acquire manifest lock {:?} - {}", &path, err))
}
/// Destroy the whole snapshot, bails if it's protected
///
/// Setting `force` to true skips locking and thus ignores if the backup is currently in use.
pub fn destroy(&self, force: bool) -> Result<(), Error> {
let full_path = self.full_path();
let (_guard, _manifest_guard);
if !force {
_guard = self
.lock()
.with_context(|| format!("while destroying snapshot '{self:?}'"))?;
_guard = lock_dir_noblock(&full_path, "snapshot", "possibly running or in use")?;
_manifest_guard = self.lock_manifest()?;
}
@ -588,37 +458,14 @@ impl BackupDir {
bail!("cannot remove protected snapshot"); // use special error type?
}
let full_path = self.full_path();
log::info!("removing backup snapshot {:?}", full_path);
std::fs::remove_dir_all(&full_path).map_err(|err| {
format_err!("removing backup snapshot {:?} failed - {}", full_path, err,)
})?;
// remove no longer needed lock files
let _ = std::fs::remove_file(self.manifest_lock_path()); // ignore errors
let _ = std::fs::remove_file(self.lock_path()); // ignore errors
let group = BackupGroup::from(self);
let guard = group.lock().with_context(|| {
format!("while checking if group '{group:?}' is empty during snapshot destruction")
});
// Only remove the group if all of the following is true:
//
// - we can lock it: if we can't lock the group, it is still in use (either by another
// backup process or a parent caller (who needs to take care that empty groups are
// removed themselves).
// - it is now empty: if the group isn't empty, removing it will fail (to avoid removing
// backups that might still be used).
// - the new locking mechanism is used: if the old mechanism is used, a group removal here
// could lead to a race condition.
//
// Do not error out, as we have already removed the snapshot, there is nothing a user could
// do to rectify the situation.
if guard.is_ok() && group.list_backups()?.is_empty() && !*OLD_LOCKING {
group.remove_group_dir()?;
} else if let Err(err) = guard {
log::debug!("{err:#}");
// the manifest doesn't exist anymore, no need to keep the lock (already done by guard?)
if let Ok(path) = self.manifest_lock_path() {
let _ = std::fs::remove_file(path); // ignore errors
}
Ok(())
@ -814,75 +661,3 @@ fn list_backup_files<P: ?Sized + nix::NixPath>(
Ok(files)
}
/// Creates a path to a lock file depending on the relative path of an object (snapshot, group,
/// manifest) in a datastore. First all namespaces will be concatenated with a colon (ns-folder).
/// Then the actual file name will depend on the length of the relative path without namespaces. If
/// it is shorter than 255 characters in its unit encoded form, than the unit encoded form will be
/// used directly. If not, the file name will consist of the first 80 character, the last 80
/// characters and the hash of the unit encoded relative path without namespaces. It will also be
/// placed into a "hashed" subfolder in the namespace folder.
///
/// Examples:
///
/// - vm-100
/// - vm-100-2022\x2d05\x2d02T08\x3a11\x3a33Z
/// - ns1:ns2:ns3:ns4:ns5:ns6:ns7/vm-100-2022\x2d05\x2d02T08\x3a11\x3a33Z
///
/// A "hashed" lock file would look like this:
/// - ns1:ns2:ns3/hashed/$first_eighty...$last_eighty-$hash
fn lock_file_path_helper(ns: &BackupNamespace, path: PathBuf) -> PathBuf {
let to_return = PathBuf::from(
ns.components()
.map(String::from)
.reduce(|acc, n| format!("{acc}:{n}"))
.unwrap_or_default(),
);
let path_bytes = path.as_os_str().as_bytes();
let enc = escape_unit(path_bytes, true);
if enc.len() < 255 {
return to_return.join(enc);
}
let to_return = to_return.join("hashed");
let first_eigthy = &enc[..80];
let last_eighty = &enc[enc.len() - 80..];
let hash = hex::encode(openssl::sha::sha256(path_bytes));
to_return.join(format!("{first_eigthy}...{last_eighty}-{hash}"))
}
/// Helps implement the double stat'ing procedure. It avoids certain race conditions upon lock
/// deletion.
///
/// It also creates the base directory for lock files.
fn lock_helper<F>(
store_name: &str,
path: &std::path::Path,
lock_fn: F,
) -> Result<BackupLockGuard, Error>
where
F: Fn(&std::path::Path) -> Result<BackupLockGuard, Error>,
{
let mut lock_dir = Path::new(DATASTORE_LOCKS_DIR).join(store_name);
if let Some(parent) = path.parent() {
lock_dir = lock_dir.join(parent);
};
std::fs::create_dir_all(&lock_dir)?;
let lock = lock_fn(path)?;
let inode = nix::sys::stat::fstat(lock.as_raw_fd())?.st_ino;
if nix::sys::stat::stat(path).map_or(true, |st| inode != st.st_ino) {
bail!("could not acquire lock, another thread modified the lock file");
}
Ok(lock)
}

View File

@ -1,11 +1,9 @@
use std::os::unix::fs::MetadataExt;
use std::os::unix::io::AsRawFd;
use std::path::{Path, PathBuf};
use std::sync::{Arc, Mutex};
use std::time::Duration;
use anyhow::{bail, format_err, Context, Error};
use tracing::{info, warn};
use anyhow::{bail, format_err, Error};
use tracing::info;
use pbs_api_types::{DatastoreFSyncLevel, GarbageCollectionStatus};
use proxmox_io::ReadExt;
@ -15,7 +13,6 @@ use proxmox_sys::process_locker::{
};
use proxmox_worker_task::WorkerTaskContext;
use crate::data_blob::DataChunkBuilder;
use crate::file_formats::{
COMPRESSED_BLOB_MAGIC_1_0, ENCRYPTED_BLOB_MAGIC_1_0, UNCOMPRESSED_BLOB_MAGIC_1_0,
};
@ -112,7 +109,7 @@ impl ChunkStore {
let default_options = CreateOptions::new();
match create_path(&base, Some(default_options), Some(options)) {
match create_path(&base, Some(default_options), Some(options.clone())) {
Err(err) => bail!("unable to create chunk store '{name}' at {base:?} - {err}"),
Ok(res) => {
if !res {
@ -121,13 +118,13 @@ impl ChunkStore {
}
}
if let Err(err) = create_dir(&chunk_dir, options) {
if let Err(err) = create_dir(&chunk_dir, options.clone()) {
bail!("unable to create chunk store '{name}' subdir {chunk_dir:?} - {err}");
}
// create lock file with correct owner/group
let lockfile_path = Self::lockfile_path(&base);
proxmox_sys::fs::replace_file(lockfile_path, b"", options, false)?;
proxmox_sys::fs::replace_file(lockfile_path, b"", options.clone(), false)?;
// create 64*1024 subdirs
let mut last_percentage = 0;
@ -135,7 +132,7 @@ impl ChunkStore {
for i in 0..64 * 1024 {
let mut l1path = chunk_dir.clone();
l1path.push(format!("{:04x}", i));
if let Err(err) = create_dir(&l1path, options) {
if let Err(err) = create_dir(&l1path, options.clone()) {
bail!(
"unable to create chunk store '{}' subdir {:?} - {}",
name,
@ -180,7 +177,7 @@ impl ChunkStore {
/// Note that this must be used with care, as it's dangerous to create two instances on the
/// same base path, as closing the underlying ProcessLocker drops all locks from this process
/// on the lockfile (even if separate FDs)
pub fn open<P: Into<PathBuf>>(
pub(crate) fn open<P: Into<PathBuf>>(
name: &str,
base: P,
sync_level: DatastoreFSyncLevel,
@ -223,16 +220,19 @@ impl ChunkStore {
// unwrap: only `None` in unit tests
assert!(self.locker.is_some());
const UTIME_NOW: i64 = (1 << 30) - 1;
const UTIME_OMIT: i64 = (1 << 30) - 2;
let times: [libc::timespec; 2] = [
// access time -> update to now
libc::timespec {
tv_sec: 0,
tv_nsec: libc::UTIME_NOW,
tv_nsec: UTIME_NOW,
},
// modification time -> keep as is
libc::timespec {
tv_sec: 0,
tv_nsec: libc::UTIME_OMIT,
tv_nsec: UTIME_OMIT,
},
];
@ -356,7 +356,7 @@ impl ChunkStore {
pub fn sweep_unused_chunks(
&self,
oldest_writer: i64,
min_atime: i64,
phase1_start_time: i64,
status: &mut GarbageCollectionStatus,
worker: &dyn WorkerTaskContext,
) -> Result<(), Error> {
@ -366,6 +366,14 @@ impl ChunkStore {
use nix::sys::stat::fstatat;
use nix::unistd::{unlinkat, UnlinkatFlags};
let mut min_atime = phase1_start_time - 3600 * 24; // at least 24h (see mount option relatime)
if oldest_writer < min_atime {
min_atime = oldest_writer;
}
min_atime -= 300; // add 5 mins gap for safety
let mut last_percentage = 0;
let mut chunk_count = 0;
@ -437,69 +445,6 @@ impl ChunkStore {
Ok(())
}
/// Check if atime updates are honored by the filesystem backing the chunk store.
///
/// Checks if the atime is always updated by utimensat taking into consideration the Linux
/// kernel timestamp granularity.
/// If `retry_on_file_changed` is set to true, the check is performed again on the changed file
/// if a file change while testing is detected by differences in bith time or inode number.
/// Uses a 4 MiB fixed size, compressed but unencrypted chunk to test. The chunk is inserted in
/// the chunk store if not yet present.
/// Returns with error if the check could not be performed.
pub fn check_fs_atime_updates(&self, retry_on_file_changed: bool) -> Result<(), Error> {
let (zero_chunk, digest) = DataChunkBuilder::build_zero_chunk(None, 4096 * 1024, true)?;
let (pre_existing, _) = self.insert_chunk(&zero_chunk, &digest)?;
let (path, _digest) = self.chunk_path(&digest);
// Take into account timestamp update granularity in the kernel
// Blocking the thread is fine here since this runs in a worker.
std::thread::sleep(Duration::from_secs(1));
let metadata_before = std::fs::metadata(&path).context(format!(
"failed to get metadata for {path:?} before atime update"
))?;
// Second atime update if chunk pre-existed, insert_chunk already updates pre-existing ones
self.cond_touch_path(&path, true)?;
let metadata_now = std::fs::metadata(&path).context(format!(
"failed to get metadata for {path:?} after atime update"
))?;
// Check for the unlikely case that the file changed in-between the
// two metadata calls, try to check once again on changed file
if metadata_before.ino() != metadata_now.ino() {
if retry_on_file_changed {
return self.check_fs_atime_updates(false);
}
bail!("chunk {path:?} changed twice during access time safety check, cannot proceed.");
}
if metadata_before.accessed()? >= metadata_now.accessed()? {
let chunk_info_str = if pre_existing {
"pre-existing"
} else {
"newly inserted"
};
warn!("Chunk metadata was not correctly updated during access time safety check:");
info!(
"Timestamps before update: accessed {:?}, modified {:?}, created {:?}",
metadata_before.accessed().ok(),
metadata_before.modified().ok(),
metadata_before.created().ok(),
);
info!(
"Timestamps after update: accessed {:?}, modified {:?}, created {:?}",
metadata_now.accessed().ok(),
metadata_now.modified().ok(),
metadata_now.created().ok(),
);
bail!("access time safety check using {chunk_info_str} chunk failed, aborting GC!");
}
Ok(())
}
pub fn insert_chunk(&self, chunk: &DataBlob, digest: &[u8; 32]) -> Result<(bool, u64), Error> {
// unwrap: only `None` in unit tests
assert!(self.locker.is_some());
@ -561,16 +506,10 @@ impl ChunkStore {
.parent()
.ok_or_else(|| format_err!("unable to get chunk dir"))?;
let mut create_options = CreateOptions::new();
if nix::unistd::Uid::effective().is_root() {
let uid = pbs_config::backup_user()?.uid;
let gid = pbs_config::backup_group()?.gid;
create_options = create_options.owner(uid).group(gid);
}
proxmox_sys::fs::replace_file(
&chunk_path,
raw_data,
create_options,
CreateOptions::new(),
self.sync_level == DatastoreFSyncLevel::File,
)
.map_err(|err| {

View File

@ -552,7 +552,7 @@ impl<'a, 'b> DataChunkBuilder<'a, 'b> {
/// Check if the error code returned by `zstd_safe::compress`, or anything else that does FFI calls
/// into zstd code, was `70` 'Destination buffer is too small' by subtracting the error code from
/// `0` (with underflow), see `ERR_getErrorCode` in
/// <https://github.com/facebook/zstd/blob/dev/lib/common/error_private.h>
/// https://github.com/facebook/zstd/blob/dev/lib/common/error_private.h
///
/// There is a test below to ensure we catch any change in the interface or internal value.
fn zstd_error_is_target_too_small(err: usize) -> bool {

View File

@ -4,11 +4,9 @@ use std::os::unix::ffi::OsStrExt;
use std::os::unix::io::AsRawFd;
use std::path::{Path, PathBuf};
use std::sync::{Arc, LazyLock, Mutex};
use std::time::Duration;
use anyhow::{bail, format_err, Context, Error};
use anyhow::{bail, format_err, Error};
use nix::unistd::{unlinkat, UnlinkatFlags};
use pbs_tools::lru_cache::LruCache;
use tracing::{info, warn};
use proxmox_human_byte::HumanByte;
@ -16,9 +14,9 @@ use proxmox_schema::ApiType;
use proxmox_sys::error::SysError;
use proxmox_sys::fs::{file_read_optional_string, replace_file, CreateOptions};
use proxmox_sys::fs::{lock_dir_noblock, DirLockGuard};
use proxmox_sys::linux::procfs::MountInfo;
use proxmox_sys::process_locker::ProcessLockSharedGuard;
use proxmox_time::TimeSpan;
use proxmox_worker_task::WorkerTaskContext;
use pbs_api_types::{
@ -26,9 +24,8 @@ use pbs_api_types::{
DataStoreConfig, DatastoreFSyncLevel, DatastoreTuning, GarbageCollectionStatus,
MaintenanceMode, MaintenanceType, Operation, UPID,
};
use pbs_config::BackupLockGuard;
use crate::backup_info::{BackupDir, BackupGroup, BackupInfo, OLD_LOCKING};
use crate::backup_info::{BackupDir, BackupGroup};
use crate::chunk_store::ChunkStore;
use crate::dynamic_index::{DynamicIndexReader, DynamicIndexWriter};
use crate::fixed_index::{FixedIndexReader, FixedIndexWriter};
@ -59,9 +56,9 @@ pub fn check_backup_owner(owner: &Authid, auth_id: &Authid) -> Result<(), Error>
/// returning false.
///
/// Reasons it could fail other than not being mounted where expected:
/// - could not read `/proc/self/mountinfo`
/// - could not stat `/dev/disk/by-uuid/<uuid>`
/// - `/dev/disk/by-uuid/<uuid>` is not a block device
/// - could not read /proc/self/mountinfo
/// - could not stat /dev/disk/by-uuid/<uuid>
/// - /dev/disk/by-uuid/<uuid> is not a block device
///
/// Since these are very much out of our control, there is no real value in distinguishing
/// between them, so for this function they all are treated as 'device not mounted'
@ -182,9 +179,9 @@ impl Drop for DataStore {
let remove_from_cache = last_task
&& pbs_config::datastore::config()
.and_then(|(s, _)| s.lookup::<DataStoreConfig>("datastore", self.name()))
.is_ok_and(|c| {
.map_or(false, |c| {
c.get_maintenance_mode()
.is_some_and(|m| m.clear_from_cache())
.map_or(false, |m| m.clear_from_cache())
});
if remove_from_cache {
@ -290,7 +287,7 @@ impl DataStore {
let datastore: DataStoreConfig = config.lookup("datastore", name)?;
if datastore
.get_maintenance_mode()
.is_some_and(|m| m.clear_from_cache())
.map_or(false, |m| m.clear_from_cache())
{
// the datastore drop handler does the checking if tasks are running and clears the
// cache entry, so we just have to trigger it here
@ -709,11 +706,7 @@ impl DataStore {
}
/// Return the path of the 'owner' file.
pub(super) fn owner_path(
&self,
ns: &BackupNamespace,
group: &pbs_api_types::BackupGroup,
) -> PathBuf {
fn owner_path(&self, ns: &BackupNamespace, group: &pbs_api_types::BackupGroup) -> PathBuf {
self.group_path(ns, group).join("owner")
}
@ -756,9 +749,9 @@ impl DataStore {
let mut open_options = std::fs::OpenOptions::new();
open_options.write(true);
open_options.truncate(true);
if force {
open_options.truncate(true);
open_options.create(true);
} else {
open_options.create_new(true);
@ -781,35 +774,41 @@ impl DataStore {
///
/// This also acquires an exclusive lock on the directory and returns the lock guard.
pub fn create_locked_backup_group(
self: &Arc<Self>,
&self,
ns: &BackupNamespace,
backup_group: &pbs_api_types::BackupGroup,
auth_id: &Authid,
) -> Result<(Authid, BackupLockGuard), Error> {
let backup_group = self.backup_group(ns.clone(), backup_group.clone());
) -> Result<(Authid, DirLockGuard), Error> {
// create intermediate path first:
let mut full_path = self.base_path();
for ns in ns.components() {
full_path.push("ns");
full_path.push(ns);
}
full_path.push(backup_group.ty.as_str());
std::fs::create_dir_all(&full_path)?;
// create intermediate path first
let full_path = backup_group.full_group_path();
full_path.push(&backup_group.id);
std::fs::create_dir_all(full_path.parent().ok_or_else(|| {
format_err!("could not construct parent path for group {backup_group:?}")
})?)?;
// now create the group, this allows us to check whether it existed before
// create the last component now
match std::fs::create_dir(&full_path) {
Ok(_) => {
let guard = backup_group.lock().with_context(|| {
format!("while creating new locked backup group '{backup_group:?}'")
})?;
self.set_owner(ns, backup_group.group(), auth_id, false)?;
let owner = self.get_owner(ns, backup_group.group())?; // just to be sure
let guard = lock_dir_noblock(
&full_path,
"backup group",
"another backup is already running",
)?;
self.set_owner(ns, backup_group, auth_id, false)?;
let owner = self.get_owner(ns, backup_group)?; // just to be sure
Ok((owner, guard))
}
Err(ref err) if err.kind() == io::ErrorKind::AlreadyExists => {
let guard = backup_group.lock().with_context(|| {
format!("while creating locked backup group '{backup_group:?}'")
})?;
let owner = self.get_owner(ns, backup_group.group())?; // just to be sure
let guard = lock_dir_noblock(
&full_path,
"backup group",
"another backup is already running",
)?;
let owner = self.get_owner(ns, backup_group)?; // just to be sure
Ok((owner, guard))
}
Err(err) => bail!("unable to create backup group {:?} - {}", full_path, err),
@ -820,25 +819,29 @@ impl DataStore {
///
/// The BackupGroup directory needs to exist.
pub fn create_locked_backup_dir(
self: &Arc<Self>,
&self,
ns: &BackupNamespace,
backup_dir: &pbs_api_types::BackupDir,
) -> Result<(PathBuf, bool, BackupLockGuard), Error> {
let backup_dir = self.backup_dir(ns.clone(), backup_dir.clone())?;
let relative_path = backup_dir.relative_path();
match std::fs::create_dir(backup_dir.full_path()) {
Ok(_) => {
let guard = backup_dir.lock().with_context(|| {
format!("while creating new locked snapshot '{backup_dir:?}'")
) -> Result<(PathBuf, bool, DirLockGuard), Error> {
let full_path = self.snapshot_path(ns, backup_dir);
let relative_path = full_path.strip_prefix(self.base_path()).map_err(|err| {
format_err!(
"failed to produce correct path for backup {backup_dir} in namespace {ns}: {err}"
)
})?;
Ok((relative_path, true, guard))
}
let lock = || {
lock_dir_noblock(
&full_path,
"snapshot",
"internal error - tried creating snapshot that's already in use",
)
};
match std::fs::create_dir(&full_path) {
Ok(_) => Ok((relative_path.to_owned(), true, lock()?)),
Err(ref e) if e.kind() == io::ErrorKind::AlreadyExists => {
let guard = backup_dir
.lock()
.with_context(|| format!("while creating locked snapshot '{backup_dir:?}'"))?;
Ok((relative_path, false, guard))
Ok((relative_path.to_owned(), false, lock()?))
}
Err(e) => Err(e.into()),
}
@ -967,15 +970,10 @@ impl DataStore {
ListGroups::new(Arc::clone(self), ns)?.collect()
}
/// Lookup all index files to be found in the datastore without taking any logical iteration
/// into account.
/// The filesystem is walked recursevly to detect index files based on their archive type based
/// on the filename. This however excludes the chunks folder, hidden files and does not follow
/// symlinks.
fn list_index_files(&self) -> Result<HashSet<PathBuf>, Error> {
pub fn list_images(&self) -> Result<Vec<PathBuf>, Error> {
let base = self.base_path();
let mut list = HashSet::new();
let mut list = vec![];
use walkdir::WalkDir;
@ -1023,7 +1021,7 @@ impl DataStore {
if archive_type == ArchiveType::FixedIndex
|| archive_type == ArchiveType::DynamicIndex
{
list.insert(path);
list.push(path);
}
}
}
@ -1031,51 +1029,11 @@ impl DataStore {
Ok(list)
}
// Similar to open index, but return with Ok(None) if index file vanished.
fn open_index_reader(
&self,
absolute_path: &Path,
) -> Result<Option<Box<dyn IndexFile>>, Error> {
let archive_type = match ArchiveType::from_path(absolute_path) {
// ignore archives with unknown archive type
Ok(ArchiveType::Blob) | Err(_) => bail!("unexpected archive type"),
Ok(archive_type) => archive_type,
};
if absolute_path.is_relative() {
bail!("expected absolute path, got '{absolute_path:?}'");
}
let file = match std::fs::File::open(absolute_path) {
Ok(file) => file,
// ignore vanished files
Err(err) if err.kind() == io::ErrorKind::NotFound => return Ok(None),
Err(err) => {
return Err(Error::from(err).context(format!("can't open file '{absolute_path:?}'")))
}
};
match archive_type {
ArchiveType::FixedIndex => {
let reader = FixedIndexReader::new(file)
.with_context(|| format!("can't open fixed index '{absolute_path:?}'"))?;
Ok(Some(Box::new(reader)))
}
ArchiveType::DynamicIndex => {
let reader = DynamicIndexReader::new(file)
.with_context(|| format!("can't open dynamic index '{absolute_path:?}'"))?;
Ok(Some(Box::new(reader)))
}
ArchiveType::Blob => bail!("unexpected archive type blob"),
}
}
// mark chunks used by ``index`` as used
fn index_mark_used_chunks(
fn index_mark_used_chunks<I: IndexFile>(
&self,
index: Box<dyn IndexFile>,
index: I,
file_name: &Path, // only used for error reporting
chunk_lru_cache: &mut LruCache<[u8; 32], ()>,
status: &mut GarbageCollectionStatus,
worker: &dyn WorkerTaskContext,
) -> Result<(), Error> {
@ -1086,12 +1044,6 @@ impl DataStore {
worker.check_abort()?;
worker.fail_on_shutdown()?;
let digest = index.index_digest(pos).unwrap();
// Avoid multiple expensive atime updates by utimensat
if chunk_lru_cache.insert(*digest, ()) {
continue;
}
if !self.inner.chunk_store.cond_touch_chunk(digest, false)? {
let hex = hex::encode(digest);
warn!(
@ -1117,135 +1069,61 @@ impl DataStore {
&self,
status: &mut GarbageCollectionStatus,
worker: &dyn WorkerTaskContext,
cache_capacity: usize,
) -> Result<(), Error> {
// Iterate twice over the datastore to fetch index files, even if this comes with an
// additional runtime cost:
// - First iteration to find all index files, no matter if they are in a location expected
// by the datastore's hierarchy
// - Iterate using the datastore's helpers, so the namespaces, groups and snapshots are
// looked up given the expected hierarchy and iterator logic
//
// By this it is assured that all index files are used, even if they would not have been
// seen by the regular logic and the user is informed by the garbage collection run about
// the detected index files not following the iterators logic.
let image_list = self.list_images()?;
let image_count = image_list.len();
let mut unprocessed_index_list = self.list_index_files()?;
let mut index_count = unprocessed_index_list.len();
let mut chunk_lru_cache = LruCache::new(cache_capacity);
let mut processed_index_files = 0;
let mut last_percentage: usize = 0;
let arc_self = Arc::new(self.clone());
for namespace in arc_self
.recursive_iter_backup_ns(BackupNamespace::root())
.context("creating namespace iterator failed")?
{
let namespace = namespace.context("iterating namespaces failed")?;
for group in arc_self.iter_backup_groups(namespace)? {
let group = group.context("iterating backup groups failed")?;
let mut strange_paths_count: u64 = 0;
// Avoid race between listing/marking of snapshots by GC and pruning the last
// snapshot in the group, following a new snapshot creation. Otherwise known chunks
// might only be referenced by the new snapshot, so it must be read as well.
let mut retry_counter = 0;
'retry: loop {
let _lock = match retry_counter {
0..=9 => None,
10 => Some(
group
.lock()
.context("exhausted retries and failed to lock group")?,
),
_ => bail!("exhausted retries and unexpected counter overrun"),
};
let mut snapshots = match group.list_backups() {
Ok(snapshots) => snapshots,
Err(err) => {
if group.exists() {
return Err(err).context("listing snapshots failed")?;
}
break 'retry;
}
};
// Always start iteration with the last snapshot of the group to reduce race
// window with concurrent backup+prune previous last snapshot. Allows to retry
// without the need to keep track of already processed index files for the
// current group.
BackupInfo::sort_list(&mut snapshots, true);
for (count, snapshot) in snapshots.into_iter().rev().enumerate() {
for file in snapshot.files {
for (i, img) in image_list.into_iter().enumerate() {
worker.check_abort()?;
worker.fail_on_shutdown()?;
match ArchiveType::from_path(&file) {
Ok(ArchiveType::FixedIndex) | Ok(ArchiveType::DynamicIndex) => (),
Ok(ArchiveType::Blob) | Err(_) => continue,
if let Some(backup_dir_path) = img.parent() {
let backup_dir_path = backup_dir_path.strip_prefix(self.base_path())?;
if let Some(backup_dir_str) = backup_dir_path.to_str() {
if pbs_api_types::parse_ns_and_snapshot(backup_dir_str).is_err() {
strange_paths_count += 1;
}
}
}
let mut path = snapshot.backup_dir.full_path();
path.push(file);
let index = match self.open_index_reader(&path)? {
Some(index) => index,
None => {
unprocessed_index_list.remove(&path);
if count == 0 {
retry_counter += 1;
continue 'retry;
match std::fs::File::open(&img) {
Ok(file) => {
if let Ok(archive_type) = ArchiveType::from_path(&img) {
if archive_type == ArchiveType::FixedIndex {
let index = FixedIndexReader::new(file).map_err(|e| {
format_err!("can't read index '{}' - {}", img.to_string_lossy(), e)
})?;
self.index_mark_used_chunks(index, &img, status, worker)?;
} else if archive_type == ArchiveType::DynamicIndex {
let index = DynamicIndexReader::new(file).map_err(|e| {
format_err!("can't read index '{}' - {}", img.to_string_lossy(), e)
})?;
self.index_mark_used_chunks(index, &img, status, worker)?;
}
continue;
}
};
self.index_mark_used_chunks(
index,
&path,
&mut chunk_lru_cache,
status,
worker,
)?;
if !unprocessed_index_list.remove(&path) {
info!("Encountered new index file '{path:?}', increment total index file count");
index_count += 1;
}
Err(err) if err.kind() == io::ErrorKind::NotFound => (), // ignore vanished files
Err(err) => bail!("can't open index {} - {}", img.to_string_lossy(), err),
}
let percentage = (processed_index_files + 1) * 100 / index_count;
let percentage = (i + 1) * 100 / image_count;
if percentage > last_percentage {
info!(
"marked {percentage}% ({} of {index_count} index files)",
processed_index_files + 1,
"marked {percentage}% ({} of {image_count} index files)",
i + 1,
);
last_percentage = percentage;
}
processed_index_files += 1;
}
}
break;
}
}
}
let mut strange_paths_count = unprocessed_index_list.len();
for path in unprocessed_index_list {
let index = match self.open_index_reader(&path)? {
Some(index) => index,
None => {
// do not count vanished (pruned) backup snapshots as strange paths.
strange_paths_count -= 1;
continue;
}
};
self.index_mark_used_chunks(index, &path, &mut chunk_lru_cache, status, worker)?;
warn!("Marked chunks for unexpected index file at '{path:?}'");
}
if strange_paths_count > 0 {
warn!("Found {strange_paths_count} index files outside of expected directory scheme");
info!(
"found (and marked) {strange_paths_count} index files outside of expected directory scheme"
);
}
Ok(())
@ -1292,62 +1170,15 @@ impl DataStore {
upid: Some(upid.to_string()),
..Default::default()
};
let tuning: DatastoreTuning = serde_json::from_value(
DatastoreTuning::API_SCHEMA
.parse_property_string(gc_store_config.tuning.as_deref().unwrap_or(""))?,
)?;
if tuning.gc_atime_safety_check.unwrap_or(true) {
self.inner
.chunk_store
.check_fs_atime_updates(true)
.context("atime safety check failed")?;
info!("Access time update check successful, proceeding with GC.");
} else {
info!("Access time update check disabled by datastore tuning options.");
};
// Fallback to default 24h 5m if not set
let cutoff = tuning
.gc_atime_cutoff
.map(|cutoff| cutoff * 60)
.unwrap_or(3600 * 24 + 300);
let mut min_atime = phase1_start_time - cutoff as i64;
info!(
"Using access time cutoff {}, minimum access time is {}",
TimeSpan::from(Duration::from_secs(cutoff as u64)),
proxmox_time::epoch_to_rfc3339_utc(min_atime)?,
);
if oldest_writer < min_atime {
min_atime = oldest_writer - 300; // account for 5 min safety gap
info!(
"Oldest backup writer started at {}, extending minimum access time to {}",
TimeSpan::from(Duration::from_secs(oldest_writer as u64)),
proxmox_time::epoch_to_rfc3339_utc(min_atime)?,
);
}
let tuning: DatastoreTuning = serde_json::from_value(
DatastoreTuning::API_SCHEMA
.parse_property_string(gc_store_config.tuning.as_deref().unwrap_or(""))?,
)?;
let gc_cache_capacity = if let Some(capacity) = tuning.gc_cache_capacity {
info!("Using chunk digest cache capacity of {capacity}.");
capacity
} else {
1024 * 1024
};
info!("Start GC phase1 (mark used chunks)");
self.mark_used_chunks(&mut gc_status, worker, gc_cache_capacity)
.context("marking used chunks failed")?;
self.mark_used_chunks(&mut gc_status, worker)?;
info!("Start GC phase2 (sweep unused chunks)");
self.inner.chunk_store.sweep_unused_chunks(
oldest_writer,
min_atime,
phase1_start_time,
&mut gc_status,
worker,
)?;
@ -1474,9 +1305,7 @@ impl DataStore {
bail!("snapshot {} does not exist!", backup_dir.dir());
}
let _guard = backup_dir.lock().with_context(|| {
format!("while updating the protection status of snapshot '{backup_dir:?}'")
})?;
let _guard = lock_dir_noblock(&full_path, "snapshot", "possibly running or in use")?;
let protected_path = backup_dir.protected_file();
if protection {
@ -1733,8 +1562,4 @@ impl DataStore {
Ok(())
}
pub fn old_locking(&self) -> bool {
*OLD_LOCKING
}
}

View File

@ -417,9 +417,10 @@ impl Iterator for ListNamespacesRecursive {
if state.is_empty() {
return None; // there's a state but it's empty -> we're all done
}
// should we just unwrap on None?
let iter = state.last_mut()?;
let iter = match state.last_mut() {
Some(iter) => iter,
None => return None, // unexpected, should we just unwrap?
};
match iter.next() {
Some(Ok(ns)) => {
if state.len() < self.max_depth as usize {

View File

@ -124,13 +124,13 @@ pub fn compute_prune_info(
if let Some(keep_hourly) = options.keep_hourly {
mark_selections(&mut mark, &list, keep_hourly as usize, |info| {
strftime_local("%Y/%m/%d/%H", info.backup_dir.backup_time())
strftime_local("%Y/%m/%d/%H", info.backup_dir.backup_time()).map_err(Error::from)
})?;
}
if let Some(keep_daily) = options.keep_daily {
mark_selections(&mut mark, &list, keep_daily as usize, |info| {
strftime_local("%Y/%m/%d", info.backup_dir.backup_time())
strftime_local("%Y/%m/%d", info.backup_dir.backup_time()).map_err(Error::from)
})?;
}
@ -138,19 +138,19 @@ pub fn compute_prune_info(
mark_selections(&mut mark, &list, keep_weekly as usize, |info| {
// Note: Use iso-week year/week here. This year number
// might not match the calendar year number.
strftime_local("%G/%V", info.backup_dir.backup_time())
strftime_local("%G/%V", info.backup_dir.backup_time()).map_err(Error::from)
})?;
}
if let Some(keep_monthly) = options.keep_monthly {
mark_selections(&mut mark, &list, keep_monthly as usize, |info| {
strftime_local("%Y/%m", info.backup_dir.backup_time())
strftime_local("%Y/%m", info.backup_dir.backup_time()).map_err(Error::from)
})?;
}
if let Some(keep_yearly) = options.keep_yearly {
mark_selections(&mut mark, &list, keep_yearly as usize, |info| {
strftime_local("%Y", info.backup_dir.backup_time())
strftime_local("%Y", info.backup_dir.backup_time()).map_err(Error::from)
})?;
}

View File

@ -1,14 +1,12 @@
use std::fs::File;
use std::os::unix::io::{AsRawFd, FromRawFd};
use std::path::Path;
use std::rc::Rc;
use std::sync::Arc;
use anyhow::{bail, Context, Error};
use anyhow::{bail, Error};
use nix::dir::Dir;
use nix::fcntl::OFlag;
use nix::sys::stat::Mode;
use pbs_config::BackupLockGuard;
use proxmox_sys::fs::lock_dir_noblock_shared;
use pbs_api_types::{
print_store_and_ns, ArchiveType, BackupNamespace, Operation, CLIENT_LOG_BLOB_NAME,
@ -29,10 +27,6 @@ pub struct SnapshotReader {
datastore_name: String,
file_list: Vec<String>,
locked_dir: Dir,
// while this is never read, the lock needs to be kept until the
// reader is dropped to ensure valid locking semantics
_lock: BackupLockGuard,
}
impl SnapshotReader {
@ -53,12 +47,8 @@ impl SnapshotReader {
bail!("snapshot {} does not exist!", snapshot.dir());
}
let lock = snapshot
.lock_shared()
.with_context(|| format!("while trying to read snapshot '{snapshot:?}'"))?;
let locked_dir = Dir::open(&snapshot_path, OFlag::O_RDONLY, Mode::empty())
.with_context(|| format!("unable to open snapshot directory {snapshot_path:?}"))?;
let locked_dir =
lock_dir_noblock_shared(&snapshot_path, "snapshot", "locked by another operation")?;
let datastore_name = datastore.name().to_string();
let manifest = match snapshot.load_manifest() {
@ -89,7 +79,6 @@ impl SnapshotReader {
datastore_name,
file_list,
locked_dir,
_lock: lock,
})
}
@ -139,7 +128,7 @@ pub struct SnapshotChunkIterator<'a, F: Fn(&[u8; 32]) -> bool> {
todo_list: Vec<String>,
skip_fn: F,
#[allow(clippy::type_complexity)]
current_index: Option<(Rc<Box<dyn IndexFile + Send>>, usize, Vec<(usize, u64)>)>,
current_index: Option<(Arc<Box<dyn IndexFile + Send>>, usize, Vec<(usize, u64)>)>,
}
impl<F: Fn(&[u8; 32]) -> bool> Iterator for SnapshotChunkIterator<'_, F> {
@ -169,7 +158,7 @@ impl<F: Fn(&[u8; 32]) -> bool> Iterator for SnapshotChunkIterator<'_, F> {
let order =
datastore.get_chunks_in_order(&*index, &self.skip_fn, |_| Ok(()))?;
self.current_index = Some((Rc::new(index), 0, order));
self.current_index = Some((Arc::new(index), 0, order));
} else {
return Ok(None);
}

View File

@ -47,7 +47,7 @@ fn open_lock_file(name: &str) -> Result<(std::fs::File, CreateOptions), Error> {
let timeout = std::time::Duration::new(10, 0);
Ok((
open_file_locked(lock_path, timeout, true, options)?,
open_file_locked(lock_path, timeout, true, options.clone())?,
options,
))
}

View File

@ -15,6 +15,7 @@
use anyhow::{bail, Error};
use serde_json::Value;
use proxmox_log::init_cli_logger;
use proxmox_router::cli::*;
use proxmox_router::RpcEnvironment;
use proxmox_schema::{api, ArraySchema, IntegerSchema, Schema, StringSchema};
@ -799,9 +800,7 @@ fn options(
}
fn main() -> Result<(), Error> {
proxmox_log::Logger::from_env("PBS_LOG", proxmox_log::LevelFilter::INFO)
.stderr()
.init()?;
init_cli_logger("PBS_LOG", proxmox_log::LevelFilter::INFO)?;
let uid = nix::unistd::Uid::current();

View File

@ -16,6 +16,7 @@ use std::fs::File;
use anyhow::{bail, Error};
use serde_json::Value;
use proxmox_log::init_cli_logger;
use proxmox_router::cli::*;
use proxmox_router::RpcEnvironment;
use proxmox_schema::api;
@ -387,9 +388,7 @@ fn scan(param: Value) -> Result<(), Error> {
}
fn main() -> Result<(), Error> {
proxmox_log::Logger::from_env("PBS_LOG", proxmox_log::LevelFilter::INFO)
.stderr()
.init()?;
init_cli_logger("PBS_LOG", proxmox_log::LevelFilter::INFO)?;
let uid = nix::unistd::Uid::current();

View File

@ -910,7 +910,7 @@ mod test {
if let Some(voltag) = &desc.pvoltag {
res.extend_from_slice(voltag.as_bytes());
let rem = SCSI_VOLUME_TAG_LEN - voltag.len();
let rem = SCSI_VOLUME_TAG_LEN - voltag.as_bytes().len();
if rem > 0 {
res.resize(res.len() + rem, 0);
}

View File

@ -659,8 +659,7 @@ impl SgTape {
pub fn wait_until_ready(&mut self, timeout: Option<u64>) -> Result<(), Error> {
let start = SystemTime::now();
let timeout = timeout.unwrap_or(Self::SCSI_TAPE_DEFAULT_TIMEOUT as u64);
let mut max_wait = std::time::Duration::new(timeout, 0);
let mut increased_timeout = false;
let max_wait = std::time::Duration::new(timeout, 0);
loop {
match self.test_unit_ready() {
@ -668,16 +667,6 @@ impl SgTape {
_ => {
std::thread::sleep(std::time::Duration::new(1, 0));
if start.elapsed()? > max_wait {
if !increased_timeout {
if let Ok(DeviceActivity::Calibrating) =
read_device_activity(&mut self.file)
{
log::info!("Detected drive calibration, increasing timeout to 2 hours 5 minutes");
max_wait = std::time::Duration::new(2 * 60 * 60 + 5 * 60, 0);
increased_timeout = true;
continue;
}
}
bail!("wait_until_ready failed - got timeout");
}
}

View File

@ -133,7 +133,7 @@ impl<K: std::cmp::Eq + std::hash::Hash + Copy, V> LruCache<K, V> {
/// Insert or update an entry identified by `key` with the given `value`.
/// This entry is placed as the most recently used node at the head.
pub fn insert(&mut self, key: K, value: V) -> bool {
pub fn insert(&mut self, key: K, value: V) {
match self.map.entry(key) {
Entry::Occupied(mut o) => {
// Node present, update value
@ -142,7 +142,6 @@ impl<K: std::cmp::Eq + std::hash::Hash + Copy, V> LruCache<K, V> {
let mut node = unsafe { Box::from_raw(node_ptr) };
node.value = value;
let _node_ptr = Box::into_raw(node);
true
}
Entry::Vacant(v) => {
// Node not present, insert a new one
@ -160,7 +159,6 @@ impl<K: std::cmp::Eq + std::hash::Hash + Copy, V> LruCache<K, V> {
if self.map.len() > self.capacity {
self.pop_tail();
}
false
}
}
}

Some files were not shown because too many files have changed in this diff Show More