Compare commits

..

No commits in common. "master" and "v3.2.2" have entirely different histories.

414 changed files with 14233 additions and 18134 deletions

View File

@ -3,6 +3,3 @@
directory = "/usr/share/cargo/registry"
[source.crates-io]
replace-with = "debian-packages"
[profile.release]
debug=true

View File

@ -1,5 +1,5 @@
[workspace.package]
version = "3.4.1"
version = "3.2.2"
authors = [
"Dietmar Maurer <dietmar@proxmox.com>",
"Dominik Csapak <d.csapak@proxmox.com>",
@ -13,7 +13,6 @@ authors = [
edition = "2021"
license = "AGPL-3"
repository = "https://git.proxmox.com/?p=proxmox-backup.git"
rust-version = "1.81"
[package]
name = "proxmox-backup"
@ -29,6 +28,7 @@ exclude = [ "build", "debian", "tests/catar_data/test_symlink/symlink1"]
[workspace]
members = [
"pbs-api-types",
"pbs-buildcfg",
"pbs-client",
"pbs-config",
@ -53,51 +53,43 @@ path = "src/lib.rs"
[workspace.dependencies]
# proxmox workspace
proxmox-apt = { version = "0.11", features = [ "cache" ] }
proxmox-apt-api-types = "1.0.1"
proxmox-apt = "0.10.5"
proxmox-async = "0.4"
proxmox-auth-api = "0.4"
proxmox-auth-api = "0.3"
proxmox-borrow = "1"
proxmox-compression = "0.2"
proxmox-config-digest = "0.1.0"
proxmox-daemon = "0.1.0"
proxmox-fuse = "0.1.3"
proxmox-http = { version = "0.9.5", features = [ "client", "http-helpers", "websocket" ] } # see below
proxmox-http = { version = "0.9.0", features = [ "client", "http-helpers", "websocket" ] } # see below
proxmox-human-byte = "0.1"
proxmox-io = "1.0.1" # tools and client use "tokio" feature
proxmox-lang = "1.1"
proxmox-log = "0.2.6"
proxmox-ldap = "0.2.1"
proxmox-metrics = "0.3.1"
proxmox-notify = "0.5.1"
proxmox-metrics = "0.3"
proxmox-notify = "0.4"
proxmox-openid = "0.10.0"
proxmox-rest-server = { version = "0.8.9", features = [ "templates" ] }
proxmox-rest-server = { version = "0.5.1", features = [ "templates" ] }
# some use "cli", some use "cli" and "server", pbs-config uses nothing
proxmox-router = { version = "3.0.0", default-features = false }
proxmox-rrd = "0.4"
proxmox-rrd-api-types = "1.0.2"
proxmox-router = { version = "2.0.0", default_features = false }
proxmox-rrd = { version = "0.1" }
# everything but pbs-config and pbs-client use "api-macro"
proxmox-schema = "4"
proxmox-schema = "3"
proxmox-section-config = "2"
proxmox-serde = "0.1.1"
proxmox-shared-cache = "0.1"
proxmox-shared-memory = "0.3.0"
proxmox-sortable-macro = "0.1.2"
proxmox-subscription = { version = "0.5.0", features = [ "api-types" ] }
proxmox-sys = "0.6.7"
proxmox-systemd = "0.1"
proxmox-tfa = { version = "5", features = [ "api", "api-types" ] }
proxmox-time = "2"
proxmox-uuid = { version = "1", features = [ "serde" ] }
proxmox-worker-task = "0.1"
pbs-api-types = "0.2.2"
proxmox-subscription = { version = "0.4.2", features = [ "api-types" ] }
proxmox-sys = "0.5.3"
proxmox-tfa = { version = "4.0.4", features = [ "api", "api-types" ] }
proxmox-time = "1.1.6"
proxmox-uuid = "1"
# other proxmox crates
pathpatterns = "0.3"
proxmox-acme = "0.5.3"
pxar = "0.12.1"
proxmox-acme = "0.5"
pxar = "0.10.2"
# PBS workspace
pbs-api-types = { path = "pbs-api-types" }
pbs-buildcfg = { path = "pbs-buildcfg" }
pbs-client = { path = "pbs-client" }
pbs-config = { path = "pbs-config" }
@ -113,22 +105,23 @@ anyhow = "1.0"
async-trait = "0.1.56"
apt-pkg-native = "0.3.2"
base64 = "0.13"
bitflags = "2.4"
bitflags = "1.2.1"
bytes = "1.0"
cidr = "0.2.1"
crc32fast = "1"
const_format = "0.2"
crossbeam-channel = "0.5"
endian_trait = { version = "0.6", features = ["arrays"] }
env_logger = "0.11"
env_logger = "0.10"
flate2 = "1.0"
foreign-types = "0.3"
futures = "0.3"
h2 = { version = "0.4", features = [ "legacy", "stream" ] }
h2 = { version = "0.3", features = [ "stream" ] }
handlebars = "3.0"
hex = "0.4.3"
hickory-resolver = { version = "0.24.1", default-features = false, features = [ "system-config", "tokio-runtime" ] }
hyper = { version = "0.14", features = [ "backports", "deprecated", "full" ] }
http = "0.2"
hyper = { version = "0.14", features = [ "full" ] }
lazy_static = "1.4"
libc = "0.2"
log = "0.4.17"
nix = "0.26.1"
@ -142,6 +135,7 @@ regex = "1.5.5"
rustyline = "9"
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
serde_plain = "1"
siphasher = "0.3"
syslog = "6"
tar = "0.4"
@ -151,29 +145,33 @@ tokio = "1.6"
tokio-openssl = "0.6.1"
tokio-stream = "0.1.0"
tokio-util = { version = "0.7", features = [ "io" ] }
tracing = "0.1"
tower-service = "0.3.0"
udev = "0.4"
url = "2.1"
walkdir = "2"
xdg = "2.2"
zstd = { version = "0.12", features = [ "bindgen" ] }
zstd-safe = "6.0"
[dependencies]
anyhow.workspace = true
async-trait.workspace = true
apt-pkg-native.workspace = true
base64.workspace = true
bitflags.workspace = true
bytes.workspace = true
cidr.workspace = true
const_format.workspace = true
crc32fast.workspace = true
crossbeam-channel.workspace = true
endian_trait.workspace = true
flate2.workspace = true
futures.workspace = true
h2.workspace = true
handlebars.workspace = true
hex.workspace = true
http.workspace = true
hyper.workspace = true
lazy_static.workspace = true
libc.workspace = true
log.workspace = true
nix.workspace = true
@ -186,6 +184,7 @@ regex.workspace = true
rustyline.workspace = true
serde.workspace = true
serde_json.workspace = true
siphasher.workspace = true
syslog.workspace = true
termcolor.workspace = true
thiserror.workspace = true
@ -193,27 +192,24 @@ tokio = { workspace = true, features = [ "fs", "io-util", "io-std", "macros", "n
tokio-openssl.workspace = true
tokio-stream.workspace = true
tokio-util = { workspace = true, features = [ "codec" ] }
tracing.workspace = true
tower-service.workspace = true
udev.workspace = true
url.workspace = true
walkdir.workspace = true
xdg.workspace = true
zstd.workspace = true
#valgrind_request = { git = "https://github.com/edef1c/libvalgrind_request", version = "1.1.0", optional = true }
# proxmox workspace
proxmox-apt.workspace = true
proxmox-apt-api-types.workspace = true
proxmox-async.workspace = true
proxmox-auth-api = { workspace = true, features = [ "api", "pam-authenticator" ] }
proxmox-compression.workspace = true
proxmox-config-digest.workspace = true
proxmox-daemon.workspace = true
proxmox-http = { workspace = true, features = [ "client-trait", "proxmox-async", "rate-limited-stream" ] } # pbs-client doesn't use these
proxmox-human-byte.workspace = true
proxmox-io.workspace = true
proxmox-lang.workspace = true
proxmox-log.workspace = true
proxmox-ldap.workspace = true
proxmox-metrics.workspace = true
proxmox-notify = { workspace = true, features = [ "pbs-context" ] }
@ -223,23 +219,21 @@ proxmox-router = { workspace = true, features = [ "cli", "server"] }
proxmox-schema = { workspace = true, features = [ "api-macro" ] }
proxmox-section-config.workspace = true
proxmox-serde = { workspace = true, features = [ "serde_json" ] }
proxmox-shared-cache.workspace = true
proxmox-shared-memory.workspace = true
proxmox-sortable-macro.workspace = true
proxmox-subscription.workspace = true
proxmox-sys = { workspace = true, features = [ "timer" ] }
proxmox-systemd.workspace = true
proxmox-tfa.workspace = true
proxmox-time.workspace = true
proxmox-uuid.workspace = true
proxmox-worker-task.workspace = true
pbs-api-types.workspace = true
# in their respective repo
pathpatterns.workspace = true
proxmox-acme.workspace = true
pxar.workspace = true
# proxmox-backup workspace/internal crates
pbs-api-types.workspace = true
pbs-buildcfg.workspace = true
pbs-client.workspace = true
pbs-config.workspace = true
@ -248,27 +242,21 @@ pbs-key-config.workspace = true
pbs-tape.workspace = true
pbs-tools.workspace = true
proxmox-rrd.workspace = true
proxmox-rrd-api-types.workspace = true
# Local path overrides
# NOTE: You must run `cargo update` after changing this for it to take effect!
[patch.crates-io]
#pbs-api-types = { path = "../proxmox/pbs-api-types" }
#proxmox-acme = { path = "../proxmox/proxmox-acme" }
#proxmox-apt = { path = "../proxmox/proxmox-apt" }
#proxmox-apt-api-types = { path = "../proxmox/proxmox-apt-api-types" }
#proxmox-async = { path = "../proxmox/proxmox-async" }
#proxmox-auth-api = { path = "../proxmox/proxmox-auth-api" }
#proxmox-borrow = { path = "../proxmox/proxmox-borrow" }
#proxmox-compression = { path = "../proxmox/proxmox-compression" }
#proxmox-config-digest = { path = "../proxmox/proxmox-config-digest" }
#proxmox-daemon = { path = "../proxmox/proxmox-daemon" }
#proxmox-fuse = { path = "../proxmox-fuse" }
#proxmox-http = { path = "../proxmox/proxmox-http" }
#proxmox-human-byte = { path = "../proxmox/proxmox-human-byte" }
#proxmox-io = { path = "../proxmox/proxmox-io" }
#proxmox-lang = { path = "../proxmox/proxmox-lang" }
#proxmox-log = { path = "../proxmox/proxmox-log" }
#proxmox-ldap = { path = "../proxmox/proxmox-ldap" }
#proxmox-metrics = { path = "../proxmox/proxmox-metrics" }
#proxmox-notify = { path = "../proxmox/proxmox-notify" }
@ -276,7 +264,6 @@ proxmox-rrd-api-types.workspace = true
#proxmox-rest-server = { path = "../proxmox/proxmox-rest-server" }
#proxmox-router = { path = "../proxmox/proxmox-router" }
#proxmox-rrd = { path = "../proxmox/proxmox-rrd" }
#proxmox-rrd-api-types = { path = "../proxmox/proxmox-rrd-api-types" }
#proxmox-schema = { path = "../proxmox/proxmox-schema" }
#proxmox-section-config = { path = "../proxmox/proxmox-section-config" }
#proxmox-serde = { path = "../proxmox/proxmox-serde" }
@ -284,12 +271,11 @@ proxmox-rrd-api-types.workspace = true
#proxmox-sortable-macro = { path = "../proxmox/proxmox-sortable-macro" }
#proxmox-subscription = { path = "../proxmox/proxmox-subscription" }
#proxmox-sys = { path = "../proxmox/proxmox-sys" }
#proxmox-systemd = { path = "../proxmox/proxmox-systemd" }
#proxmox-tfa = { path = "../proxmox/proxmox-tfa" }
#proxmox-time = { path = "../proxmox/proxmox-time" }
#proxmox-uuid = { path = "../proxmox/proxmox-uuid" }
#proxmox-worker-task = { path = "../proxmox/proxmox-worker-task" }
#proxmox-acme = { path = "../proxmox/proxmox-acme" }
#pathpatterns = {path = "../pathpatterns" }
#pxar = { path = "../pxar" }

View File

@ -1,10 +1,8 @@
include /usr/share/dpkg/default.mk
include /usr/share/rustc/architecture.mk
include defines.mk
PACKAGE := proxmox-backup
ARCH := $(DEB_BUILD_ARCH)
export DEB_HOST_RUST_TYPE
SUBDIRS := etc www docs templates
@ -35,23 +33,15 @@ RESTORE_BIN := \
SUBCRATES != cargo metadata --no-deps --format-version=1 \
| jq -r .workspace_members'[]' \
| grep "$$PWD/" \
| sed -e "s!.*$$PWD/!!g" -e 's/\#.*$$//g' -e 's/)$$//g'
STATIC_TARGET_DIR := target/static-build
| awk '!/^proxmox-backup[[:space:]]/ { printf "%s ", $$1 }'
ifeq ($(BUILD_MODE), release)
CARGO_BUILD_ARGS += --release --target $(DEB_HOST_RUST_TYPE)
COMPILEDIR := target/$(DEB_HOST_RUST_TYPE)/release
STATIC_COMPILEDIR := $(STATIC_TARGET_DIR)/$(DEB_HOST_RUST_TYPE)/release
CARGO_BUILD_ARGS += --release
COMPILEDIR := target/release
else
CARGO_BUILD_ARGS += --target $(DEB_HOST_RUST_TYPE)
COMPILEDIR := target/$(DEB_HOST_RUST_TYPE)/debug
STATIC_COMPILEDIR := $(STATIC_TARGET_DIR)/$(DEB_HOST_RUST_TYPE)/debug
COMPILEDIR := target/debug
endif
STATIC_RUSTC_FLAGS := -C target-feature=+crt-static -L $(STATIC_COMPILEDIR)/deps-stubs/
ifeq ($(valgrind), yes)
CARGO_BUILD_ARGS += --features valgrind
endif
@ -61,9 +51,6 @@ CARGO ?= cargo
COMPILED_BINS := \
$(addprefix $(COMPILEDIR)/,$(USR_BIN) $(USR_SBIN) $(SERVICE_BIN) $(RESTORE_BIN))
STATIC_BINS := \
$(addprefix $(STATIC_COMPILEDIR)/,proxmox-backup-client-static pxar-static)
export DEB_VERSION DEB_VERSION_UPSTREAM
SERVER_DEB=$(PACKAGE)-server_$(DEB_VERSION)_$(ARCH).deb
@ -72,12 +59,10 @@ CLIENT_DEB=$(PACKAGE)-client_$(DEB_VERSION)_$(ARCH).deb
CLIENT_DBG_DEB=$(PACKAGE)-client-dbgsym_$(DEB_VERSION)_$(ARCH).deb
RESTORE_DEB=proxmox-backup-file-restore_$(DEB_VERSION)_$(ARCH).deb
RESTORE_DBG_DEB=proxmox-backup-file-restore-dbgsym_$(DEB_VERSION)_$(ARCH).deb
STATIC_CLIENT_DEB=$(PACKAGE)-client-static_$(DEB_VERSION)_$(ARCH).deb
STATIC_CLIENT_DBG_DEB=$(PACKAGE)-client-static-dbgsym_$(DEB_VERSION)_$(ARCH).deb
DOC_DEB=$(PACKAGE)-docs_$(DEB_VERSION)_all.deb
DEBS=$(SERVER_DEB) $(SERVER_DBG_DEB) $(CLIENT_DEB) $(CLIENT_DBG_DEB) \
$(RESTORE_DEB) $(RESTORE_DBG_DEB) $(STATIC_CLIENT_DEB) $(STATIC_CLIENT_DBG_DEB)
$(RESTORE_DEB) $(RESTORE_DBG_DEB)
DSC = rust-$(PACKAGE)_$(DEB_VERSION).dsc
@ -85,7 +70,7 @@ DESTDIR=
tests ?= --workspace
all: proxmox-backup-client-static $(SUBDIRS)
all: $(SUBDIRS)
.PHONY: $(SUBDIRS)
$(SUBDIRS):
@ -123,15 +108,12 @@ proxmox-backup-docs: build
cd build; dpkg-buildpackage -b -us -uc --no-pre-clean
lintian $(DOC_DEB)
.PHONY: deb dsc deb-nodoc deb-nostrip
# copy the local target/ dir as a build-cache
.PHONY: deb dsc deb-nodoc
deb-nodoc: build
cd build; dpkg-buildpackage -b -us -uc --no-pre-clean --build-profiles=nodoc
lintian $(DEBS)
deb-nostrip: build
cd build; DEB_BUILD_OPTIONS=nostrip dpkg-buildpackage -b -us -uc
lintian $(DEBS) $(DOC_DEB)
$(DEBS): deb
deb: build
cd build; dpkg-buildpackage -b -us -uc
@ -155,7 +137,7 @@ clean: clean-deb
$(foreach i,$(SUBDIRS), \
$(MAKE) -C $(i) clean ;)
$(CARGO) clean
rm -f .do-cargo-build .do-static-cargo-build
rm -f .do-cargo-build
# allows one to avoid running cargo clean when one just wants to tidy up after a package build
clean-deb:
@ -194,7 +176,6 @@ $(COMPILED_BINS) $(COMPILEDIR)/dump-catalog-shell-cli $(COMPILEDIR)/docgen: .do-
--bin proxmox-restore-daemon \
--package proxmox-backup \
--bin docgen \
--bin pbs2to3 \
--bin proxmox-backup-api \
--bin proxmox-backup-manager \
--bin proxmox-backup-proxy \
@ -204,25 +185,12 @@ $(COMPILED_BINS) $(COMPILEDIR)/dump-catalog-shell-cli $(COMPILEDIR)/docgen: .do-
--bin sg-tape-cmd
touch "$@"
.PHONY: proxmox-backup-client-static
proxmox-backup-client-static:
rm -f .do-static-cargo-build
$(MAKE) $(STATIC_BINS)
$(STATIC_BINS): .do-static-cargo-build
.do-static-cargo-build:
mkdir -p $(STATIC_COMPILEDIR)/deps-stubs/ && \
echo '!<arch>' > $(STATIC_COMPILEDIR)/deps-stubs/libsystemd.a # workaround for to greedy linkage and proxmox-systemd
$(CARGO) rustc $(CARGO_BUILD_ARGS) --package pxar-bin --bin pxar \
--target-dir $(STATIC_TARGET_DIR) -- $(STATIC_RUSTC_FLAGS)
$(CARGO) rustc $(CARGO_BUILD_ARGS) --package proxmox-backup-client --bin proxmox-backup-client \
--target-dir $(STATIC_TARGET_DIR) -- $(STATIC_RUSTC_FLAGS)
.PHONY: lint
lint:
cargo clippy -- -A clippy::all -D clippy::correctness
install: $(COMPILED_BINS) $(STATIC_BINS)
install: $(COMPILED_BINS)
install -dm755 $(DESTDIR)$(BINDIR)
install -dm755 $(DESTDIR)$(ZSH_COMPL_DEST)
$(foreach i,$(USR_BIN), \
@ -241,19 +209,16 @@ install: $(COMPILED_BINS) $(STATIC_BINS)
install -m4755 -o root -g root $(COMPILEDIR)/sg-tape-cmd $(DESTDIR)$(LIBEXECDIR)/proxmox-backup/sg-tape-cmd
$(foreach i,$(SERVICE_BIN), \
install -m755 $(COMPILEDIR)/$(i) $(DESTDIR)$(LIBEXECDIR)/proxmox-backup/ ;)
install -m755 $(STATIC_COMPILEDIR)/proxmox-backup-client $(DESTDIR)$(BINDIR)/proxmox-backup-client-static
install -m755 $(STATIC_COMPILEDIR)/pxar $(DESTDIR)$(BINDIR)/pxar-static
$(MAKE) -C www install
$(MAKE) -C docs install
$(MAKE) -C templates install
.PHONY: upload
upload: UPLOAD_DIST ?= $(DEB_DISTRIBUTION)
upload: $(SERVER_DEB) $(CLIENT_DEB) $(RESTORE_DEB) $(DOC_DEB) $(STATIC_CLIENT_DEB)
upload: $(SERVER_DEB) $(CLIENT_DEB) $(RESTORE_DEB) $(DOC_DEB)
# check if working directory is clean
git diff --exit-code --stat && git diff --exit-code --stat --staged
tar cf - $(SERVER_DEB) $(SERVER_DBG_DEB) $(DOC_DEB) $(CLIENT_DEB) $(CLIENT_DBG_DEB) \
| ssh -X repoman@repo.proxmox.com upload --product pbs --dist $(UPLOAD_DIST)
tar cf - $(CLIENT_DEB) $(CLIENT_DBG_DEB) | ssh -X repoman@repo.proxmox.com upload --product "pve,pmg,pbs-client" --dist $(UPLOAD_DIST)
tar cf - $(STATIC_CLIENT_DEB) $(STATIC_CLIENT_DBG_DEB) | ssh -X repoman@repo.proxmox.com upload --product "pbs-client" --dist $(UPLOAD_DIST)
tar cf - $(RESTORE_DEB) $(RESTORE_DBG_DEB) | ssh -X repoman@repo.proxmox.com upload --product "pve" --dist $(UPLOAD_DIST)

View File

@ -5,11 +5,8 @@ Build & Release Notes
``rustup`` Toolchain
====================
We normally want to build with the ``rustc`` Debian package (see below). If you
still want to use ``rustup`` for other reasons (e.g. to easily switch between
the official stable, beta, and nightly compilers), you should set the following
``rustup`` configuration to use the Debian-provided ``rustc`` compiler
by default:
We normally want to build with the ``rustc`` Debian package. To do that
you can set the following ``rustup`` configuration:
# rustup toolchain link system /usr
# rustup default system
@ -33,7 +30,7 @@ pre-release version number (e.g., "0.1.1-dev.1" instead of "0.1.0").
Local cargo config
==================
This repository ships with a ``.cargo/config.toml`` that replaces the crates.io
This repository ships with a ``.cargo/config`` that replaces the crates.io
registry with packaged crates located in ``/usr/share/cargo/registry``.
A similar config is also applied building with dh_cargo. Cargo.lock needs to be

702
debian/changelog vendored
View File

@ -1,705 +1,3 @@
rust-proxmox-backup (3.4.1-1) bookworm; urgency=medium
* ui: token view: fix typo in 'lose' and rephrase token regenerate dialog
message for more clarity.
* restrict consent-banner text length to 64 KiB.
* docs: describe the intend for the statically linked pbs client.
* api: backup: include previous snapshot name in log message.
* garbage collection: account for created/deleted index files concurrently
to GC to avoid potentially confusing log messages.
* garbage collection: fix rare race in chunk marking phase for setups doing
high frequent backups in quick succession while immediately pruning to a
single backup snapshot being left over after each such backup.
* tape: wait for calibration of LTO-9 tapes in general, not just in the
initial tape format procedure.
-- Proxmox Support Team <support@proxmox.com> Wed, 16 Apr 2025 14:45:37 +0200
rust-proxmox-backup (3.4.0-1) bookworm; urgency=medium
* fix #4788: build statically linked version of the proxmox-backup-client
package.
* ui: sync job: change the rate limit direction based on sync direction.
* docs: mention how to set the push sync jobs rate limit
* ui: set error mask: ensure that message is html-encoded to avoid visual
glitches.
* api server: increase maximal request body size fro 64 kiB to 512 kiB,
similar to a recent change for our perl based projects.
* notifications: include Content-Length header for broader compatibility in
the webhook and gotify targets.
* notifications: allow overriding notification templates.
* docs: notifications: add section about how to use custom templates
* sync: print whole error chain per group on failure for more context.
* ui: options-view: fix typo in empty-text for GC tuning option.
* memory info: use the "MemAvailable" field from '/proc/meminfo' to compute
used memory to fix overestimation of that metric and to better align with
what modern versions of tools like `free` do and to future proof against
changes in how the kernel accounts memory usage for.
* add "MemAvailable" field to ProcFsMemInfo to promote its usage over the
existing "MemFree" field, which is almost never the right choice. This new
field will be provided for external metric server.
* docs: mention different name resolution for statically linked binary.
* docs: add basic info for how to install the statically linked client.
* docs: mention new verify-only and encrypted-only flags for sync jobs.
-- Proxmox Support Team <support@proxmox.com> Wed, 09 Apr 2025 17:41:38 +0200
rust-proxmox-backup (3.3.7-1) bookworm; urgency=medium
* fix #5982: garbage collection: add a check to ensure that the underlying
file system supports and honors file access time (atime) updates.
The check is performed once on datastore creation and on start of every
garbage collection (GC) task, just to be sure. It can be disabled in the
datastore tuning options.
* garbage collection: support setting a custom access time cutoff,
overriding the default of one day and five minutes.
* ui: expose flag for GC access time support safety check and the access
time cutoff override in datastore tuning options.
* docs: describe rationale for new GC access time update check setting and
the access time cutoff check in tuning options.
* access control: add support to mark a specific authentication realm as
default selected realm for the login user interface.
* fix #4382: api: access control: remove permissions of token on deletion.
* fix #3887: api: access control: allow users to regenerate the secret of an
API token without changing any existing ACLs.
* fix #6072: sync jobs: support flags to limit sync to only encrypted and/or
verified snapshots.
* ui: datastore tuning options: expose overriding GC cache capacity so that
admins can either restrict the peak memory usage during GC or allow GC to
use more memory to reduce file system IO even for huge (multiple TiB)
referenced data in backup groups.
* ui: datastore tuning options: increase width and rework labels to provide
a tiny bit more context about what these options are.
* ui: sync job: increase edit window width to 720px to make it less cramped.
* ui: sync job: small field label casing consistency fixes.
-- Proxmox Support Team <support@proxmox.com> Sat, 05 Apr 2025 17:54:31 +0200
rust-proxmox-backup (3.3.6-1) bookworm; urgency=medium
* datastore: ignore group locking errors when removing snapshots, they
normally happen only due to old-locking, and the underlying snapshot is
deleted in any case at this point, so it's no help to confuse the user.
* api: datastore: add error message on failed removal due to old locking and
tell any admin what they can do to switch to the new locking.
* ui: only add delete parameter on token edit, not when creating tokens.
* pbs-client: allow reading fingerprint from system credential.
* docs: client: add section about system credentials integration.
-- Proxmox Support Team <support@proxmox.com> Thu, 03 Apr 2025 17:57:02 +0200
rust-proxmox-backup (3.3.5-1) bookworm; urgency=medium
* api: config: use guard for unmounting on failed datastore creation
* client: align description for backup specification to docs, using
`archive-name` and `type` over `label` and `ext`.
* client: read credentials from CREDENTIALS_DIRECTORY environment variable
following the "System and Service Credentials" specification. This allows
users to use native systemd capabilities for credential management if the
proxmox-backup-client is used in systemd units or, e.g., through a wrapper
like systemd-run.
* fix #3935: datastore/api/backup: move datastore locking to '/run' to avoid
that lock-files can block deleting backup groups or snapshots on the
datastore and to decouple locking from the underlying datastore
file-system.
* api: fix race when changing the owner of a backup-group.
* fix #3336: datastore: remove group if the last snapshot is removed to
avoid confusing situations where the group directory still exists and
blocks re-creating a group with another owner even though the empty group
was not visible in the web UI.
* notifications: clean-up and add dedicated types for all templates as to
allow declaring that interface stable in preparation for allowing
overriding them in the future (not included in this release).
* tape: introduce a tape backup job worker-thread option for restores.
Depending on the underlying storage using more threads can dramatically
improve the restore speed. Especially fast storage with low penalty for
random access, like flash-storage (SSDs) can profit from using more
worker threads. But on file systems backed by spinning disks (HDDs) the
performance can even degrade with more threads. This is why for now the
default is left at a single thread and the admin needs to tune this for
their storage.
* garbage collection: generate index file list via datastore iterators in a
structured manner.
* fix #5331: garbage collection: avoid multiple chunk atime updates by
keeping track of the recently marked chunks in phase 1 of garbage to avoid
multiple atime updates via relatively expensive utimensat (touch) calls.
Use a LRU cache with size 32 MiB for tracking already processed chunks,
this fully covers backup groups referencing up to 4 TiB of actual chunks
and even bigger ones can still benefit from the cache. On some real-world
benchmarks of a datastore with 1.5 million chunks, and original data
usage of 120 TiB and a referenced data usage of 2.7 TiB (high
deduplication count due to long-term history) we measured 21.1 times less
file updates (31.6 million) and a 6.1 times reduction in total GC runtime
(155.4 s to 22.8 s) on a ZFS RAID 10 system consisting of spinning HDDs
and a special device mirror backed by datacenter SSDs.
* logging helper: use new builder initializer not functional change
intended.
-- Proxmox Support Team <support@proxmox.com> Wed, 02 Apr 2025 19:42:38 +0200
rust-proxmox-backup (3.3.4-1) bookworm; urgency=medium
* fix #6185: client/docs: explicitly mention archive name restrictions
* docs: using-the-installer: adapt to raised root password length requirement
* disks: wipe: replace dd with write_all_at for zeroing disk
* fix #5946: disks: wipe: ensure GPT header backup is wiped
* docs: fix hash collision probability comparison
-- Proxmox Support Team <support@proxmox.com> Thu, 13 Mar 2025 13:04:05 +0100
rust-proxmox-backup (3.3.3-1) bookworm; urgency=medium
* api: datastore list: move checking if a datastore is mounted after we
ensured that the user may actually access it. While this had no effect
security wise, it could significantly increase the cost of this API
endpoint in big setups with many datastores and many tenants that each
have only access to one, or a small set, of datastores.
* Revert "fix #5710: api: backup: stat known chunks on backup finish" due to
a big performance impact relative to what this is protectign against. We
will work out a more efficient fix for this issue in the future.
* prune simulator: show backup entries that are kept also in the flat list
of backups, not just in the calendar view.
* docs: improve the description for the garbage collection's cut-off time
* pxar extract: correctly honor the overwrite flag
* api: datastore: add missing log context for prune to avoid a case where
the worker state being unknown after it finished.
* docs: add synopsis and basic docs for prune job configuration
* backup verification: handle manifest update errors as non-fatal to avoid
that the job fails, as we want to continue with verificating the rest to
ensure we uncover as much potential problems as possible.
* fix #4408: docs: add 'disaster recovery' section for tapes
* fix #6069: prune simulator: correctly handle schedules that mix both, a
range and a step size at once.
* client: pxar: fix a race condition where the backup upload stream can miss
an error from the create archive function, because the error state is only
set after the backup stream was already polled. This avoids a edge case
where a file-based backup was incorrectly marked as having succeeded while
there was a error.
-- Proxmox Support Team <support@proxmox.com> Tue, 11 Feb 2025 20:24:27 +0100
rust-proxmox-backup (3.3.2-2) bookworm; urgency=medium
* file-restore: fix regression with the new blockdev method used to pass
disks of a backup to the isolated virtual machine.
-- Proxmox Support Team <support@proxmox.com> Tue, 10 Dec 2024 12:14:47 +0100
rust-proxmox-backup (3.3.2-1) bookworm; urgency=medium
* pbs-client: remove `log` dependency and migrate to our common,
`tracing`-based, logging infrastructure. No semantic change intended.
* file restore: switch to more modern blockdev option for drives in QEMU
wrapper for the restore VM.
* pxar: client: fix missing file size check for metadata comparison
-- Proxmox Support Team <support@proxmox.com> Mon, 09 Dec 2024 10:37:32 +0100
rust-proxmox-backup (3.3.1-1) bookworm; urgency=medium
* tree-wide: add missing O_CLOEXEC flags to `openat` calls to avoid passing
any open FD to new child processes which can have undesired side-effects
like keeping a lock open longer than it should.
* cargo: update proxmox dependency of rest-server and sys crates to include
some fixes for open FDs and a fix for the active task worker tracking, as
on failing to update the index file the daemon did not finished the
worker, causing a reference count issue where an old daemon could keep
running forever.
* ui: check that store is set before trying to select anythin in the garbage
collection (GC) job view.
-- Proxmox Support Team <support@proxmox.com> Tue, 03 Dec 2024 18:11:04 +0100
rust-proxmox-backup (3.3.0-2) bookworm; urgency=medium
* tree-wide: fix various typos.
* ui: fix remove vanished tooltip to be valid for both sync directions.
* ui: mask unmounted datastores in datastore overview.
* server: push: fix supported api version check for minor version bump.
-- Proxmox Support Team <support@proxmox.com> Thu, 28 Nov 2024 13:03:03 +0100
rust-proxmox-backup (3.3.0-1) bookworm; urgency=medium
* GC: add safety-check for nested datastore
* ui: make some more strings translatable
* docs: make sphinx ignore the environment cache to avoid missing synopsis
in some HTML output, like for example the "Command Syntax" appendix.
* docs: add note for why FAT is not supported for as backing file system for
datastores
* api: disks: directory: fail if mount unit already exists for a new file
system
* : filter partitions without proper UUID in partition selector
* ui: version info: replace wrong hyphen separator with dot
-- Proxmox Support Team <support@proxmox.com> Wed, 27 Nov 2024 20:38:41 +0100
rust-proxmox-backup (3.2.14-1) bookworm; urgency=medium
* pull-sync: do not interpret older missing snapshots as needs-resync
* api: directory: use relative path when creating removable datastore
* ui: prune keep input: actually clear value on clear trigger click
* ui: datastore edit: fix empty-text for path field
* sync: push: pass full error context when returning error to job
* api: mount removable datastore: only log an informational message if the
correct device is already mounted.
* api: sync: restrict edit permissions for the new push sync jobs to avoid
that a user is able to create or edit sync jobs in push direction, but not
able to see them.
* api: create datastore: fix checks to avoid that any datastore can contain
another one to better handle the case for the new removable datastores.
-- Proxmox Support Team <support@proxmox.com> Wed, 27 Nov 2024 14:42:56 +0100
rust-proxmox-backup (3.2.13-1) bookworm; urgency=medium
* update pxar dependency to fix selective extraction with the newly
supported match patterns.
* reuse-datastore: avoid creating another prune job
* api: notification: add API routes for webhook targets
* management cli: add CLI for webhook targets
* ui: utils: enable webhook edit window
* ui: utils: add task description for mounting/unmounting
* ui: add onlineHelp for consent-banner option
* docs: client: fix example commands for client usage
* docs: explain some further caveats of the change detection modes
* ui: use same label for removable datastore created from disk
* api: maintenance: allow setting of maintenance mode if 'unmounting'
* docs: add more information for removable datastores
* ui: sync jobs: revert to single list for pull/push jobs, improve
distinction between push and pull jobs through other means.
* ui: sync jobs: change default sorting to 'store' -> 'direction' -> 'id'
* ui: sync jobs: add search filter-box
* config: sync: use same config section type `sync` for push and pull, note
that this breaks existing configurations and needs manual clean-up. As the
package versions never made it beyond test this is ignored, as while it's
not really ideal we never give guarantees for testing package versions,
and the maintenance burden with the old style would not be ideal either.
* api: removable datastores: require Sys.Modify permission on /system/disks
* ui: allow resetting unmounting maintenance
* datastore: re-phrase error message when datastore is unavailable
* client: backup writer: fix regression in progress output
-- Proxmox Support Team <support@proxmox.com> Tue, 26 Nov 2024 17:05:23 +0100
rust-proxmox-backup (3.2.12-1) bookworm; urgency=medium
* fix #5853: client: pxar: exclude stale files on metadata/link read
* docs: fix wrong product name in certificate docs
* docs: explain the working principle of the change detection modes
* allow datastore creation in directory with lost+found directory
* fix #5801: manager: switch datastore update command to real API call to
avoid early cancellation of the task.
* server: push: consistently use remote over target for error messages and
various smaller improvements to related log messages.
* push: move log messages for removed snapshot/group
* fix #5710: api: backup: stat known chunks on backup finish to ensure any
problem/corruption is caught earlier.
* pxar: extract: make invalid ACLs non-fatal, but only log them, there's
nothing to win by failing the restore completely.
* server: push: log encountered empty backup groups during sync
* fix #3786: ui, api, cli: add resync-corrupt option to sync jobs
* docs: add security implications of prune and change detection mode
* fix #2996: client: backup restore: add support to pass match patterns for
a selective restore
* docs: add installation media preparation and installation wizard guides
* api: enforce minimum character limit of 8 on new passwords to follow
recent NIST recommendations.
* ui, api: support configuring a consent banner that is shown before login
to allow complying with some (government) policy frameworks.
* ui, api: add initial support for removable datastore providing better
integration for datastore located on a non-permanently attached medium.
-- Proxmox Support Team <support@proxmox.com> Mon, 25 Nov 2024 22:52:11 +0100
rust-proxmox-backup (3.2.11-1) bookworm; urgency=medium
* fix #3044: server: implement push support for sync operations
* push sync related refactors
-- Proxmox Support Team <support@proxmox.com> Thu, 21 Nov 2024 12:03:50 +0100
rust-proxmox-backup (3.2.10-1) bookworm; urgency=medium
* api: disk list: do not fail but just log error on gathering smart data
* cargo: require proxmox-log 0.2.6 to reduce spamming the logs with the
whole worker task contents
-- Proxmox Support Team <support@proxmox.com> Tue, 19 Nov 2024 22:36:14 +0100
rust-proxmox-backup (3.2.9-1) bookworm; urgency=medium
* client: catalog: fallback to metadata archives for dumping the catalog
* client: catalog shell: make the catalog optional and use the pxar accessor
for navigation if the catalog is not provided, like its the case for
example for split pxar archives.
* client: catalog shell: drop payload offset in `stat` output, as this is a
internal value that only helps on debugging some specific development.
* sync: fix premature return in snapshot-skip filter logic to avoid that the
first snapshot newer that the last synced one gets unconditionally
included.
* fix #5861: ui: remove minimum required username length in dialog for
changing the owner of a backup group, as PBS support usernames shorter
than 4 characters since a while now.
* fix #5439: allow one to reuse an existing datastore on datastore creation
* ui: disallow datastore in the file system root, this is almost never what
user want and they can still use the CLI for such an edge case.
* fix #5233: api: tape: add explicit required permissions for the move tape,
update tape and destroy tape endpoints, requiring Tape.Modify and
Tape.Write on the `/tape` ACL object path, respectively. This avoids
requiring the use of the root account for basic tape management.
* client: catalog shell: make the root element its own parent to avoid
navigating below archive root, which makes no sense and just causes odd
glitches.
* api: disk management: avoid retrieving lsblk result twice when listing
disks, while it's not overly expensive it certainly does not help to be
performant either.
* api: disk management: parallelize retrieving the output from smartctl
checks.
* fix #5600: pbs2to3: make check more flexible to allow one to run arbitrary
newer '-pve' kernels after upgrade
* client: pxar: perform match pattern check for exclusion only once
* client: pxar: add debug output for exclude pattern matches to more
conveniently debug possible issues.
* fix #5868: rest-server: handshake detection: avoid infinite loop on
connections abort
-- Proxmox Support Team <support@proxmox.com> Thu, 14 Nov 2024 16:10:10 +0100
rust-proxmox-backup (3.2.8-1) bookworm; urgency=medium
* switch various log statements in worker tasks to the newer, more flexible
proxmox log crate. With this change, errors from task logs are now also
logged to the system log, increasing their visibility.
* datastore api: list snapshots: avoid calculating protected attribute
twice per snapshot, this reduces the amounts of file metadata requests.
* avoid re-calculating the backup snapshot path's date time component when
getting the full path, reducing calls to the relatively slow strftime
function from libc.
* fix #3699: client: prefer the XDG cache directory for temporary files with
a fallback to using /tmp, as before.
* sync job: improve log message for when syncing the root namespace.
* client: increase read buffer from 8 KiB to 4 MiB for raw image based
backups. This reduces the time spent polling between the reader, chunker
and uploader async tasks and thus can improve backup speed significantly,
especially on setups with fast network and storage.
* client benchmark: avoid unnecessary allocation in the AES benchmark,
causing artificial overhead. The benchmark AES results should now be more
in line with the hardware capability and what the PBS client could already
do. On our test system we saw an increase by an factor of 2.3 on this
specific benchmark.
* docs: add external metrics server page
* tfa: webauthn: serialize OriginUrl following RFC6454
* factor out apt and apt-repository handling into a new library crate for
re-use in other projects. There should be no functional change.
* fix various typos all over the place found using the rust based `typos`
tool.
* datastore: data blob compression: increase compression throughput by
switching away from a higher level zstd method to a lower level one, which
allows us to control the target buffer size directly and thus avoid some
allocation and syscall overhead. We saw the compression bandwidth increase
by a factor of 1.19 in our tests where both the source data and the target
datastore where located in memory backed tmpfs.
* daily-update: ensure notification system context is initialized.
* backup reader: derive if debug messages should be printed from the global
log level. This avoids printing some debug messages by default, e.g., the
"protocol upgrade done" message from sync jobs.
* ui: user view: disable 'Unlock TFA' button by default to improve UX if no
user is selected.
* manager cli: ensure the worker tasks finishes when triggering a reload of
the system network.
* fix #5622: backup client: properly handle rate and burst parameters.
Previously, passing any non-integer value, like `1mb`, was ignored.
* tape: read element status: ignore responses where the library specifies
that it will return a volume tag but then does not includes that field in
the actual response. As both the primary and the alternative volume tag
are not required by PBS, this specific error can simply be downgraded to a
warning.
* pxar: dump archive: print entries to stdout instead of stderr
* sync jobs: various clean-ups and refactoring that should not result in any
semantic change.
* metric collection: put metrics in a cache with a 30 minutes lifetime.
* api: add /status/metrics API to allow pull-based metric server to gather
data directly.
* partial fix #5560: client: periodically show backup progress
* docs: add proxmox-backup.node.cfg man page
* docs: sync: explicitly mention `removed-vanish` flag
-- Proxmox Support Team <support@proxmox.com> Fri, 18 Oct 2024 19:05:41 +0200
rust-proxmox-backup (3.2.7-1) bookworm; urgency=medium
* docs: drop blanket statement recommending against remote storage
* ui: gc job edit: fix i18n gettext usage
* pxar: improve error handling, e.g., avoiding duplicate information
* close #4763: client: add command to forget (delete) whole backup group
with all its snapshots
* close #5571: client: fix regression for `map` command
* client: mount: wait for child to return before exiting to provide better
UX for some edge paths
* fix #5304: client: set process uid/gid for `.pxarexclude-cli` to avoid
issues when trying to backup and restore the backup as non-root user.
* http client: keep renewal future running on failed re-auth to make it more
resilient against some transient errors, like the request just failing due
to network instability.
* datastore: fix problem with operations counting for the case where the
`.chunks/` directory is not available (deleted/moved)
* manager: use confirmation helper in wipe-disk command
-- Proxmox Support Team <support@proxmox.com> Wed, 03 Jul 2024 13:33:51 +0200
rust-proxmox-backup (3.2.6-1) bookworm; urgency=medium
* tape: disable Programmable Early Warning Zone (PEWZ)
* tape: handle PEWZ like regular early warning
* docs: add note for not using remote storages
* client: pxar: fix fuse mount performance for split archives
-- Proxmox Support Team <support@proxmox.com> Mon, 17 Jun 2024 10:18:13 +0200
rust-proxmox-backup (3.2.5-1) bookworm; urgency=medium
* pxar: add support for split archives
* fix #3174: pxar: enable caching and meta comparison
* docs: file formats: describe split pxar archive file layout
* docs: add section describing change detection mode
* api: datastore: add optional archive-name to file-restore
* client: backup: conditionally write catalog for file level backups
* docs: add table listing possible change detection modes
-- Proxmox Support Team <support@proxmox.com> Mon, 10 Jun 2024 13:39:54 +0200
rust-proxmox-backup (3.2.4-1) bookworm; urgency=medium
* fix: network api: permission using wrong pathname
* fix #5503: d/control: bump dependency for proxmox-widget-toolkit
* auth: add locking to `PbsAuthenticator` to avoid race conditions
-- Proxmox Support Team <support@proxmox.com> Wed, 05 Jun 2024 16:23:38 +0200
rust-proxmox-backup (3.2.3-1) bookworm; urgency=medium
* api-types: remove influxdb bucket name restrictions
* api: datastore status: delay lookup after permission check to improve
consistency of tracked read operations
* tape: improve throughput by not unnecessarily syncing/committing after
every archive written beyond the first 128 GiB
* tape: save 'bytes used' in the tape inventory and show them on the web UI
to allow users to more easily see the usage of a tape
* tape drive status: return drive activity (like cleaning, loading,
unloading, writing, ...) in the API and show them in the UI
* ui: tape drive status: avoid checking some specific status if the current
drive activity would block doing so anyway
* tape: write out basic MAM host-type attributes to media to make them more
easily identifiable as Proxmox Backup Server tape by common LTO tooling.
* api: syslog: fix the documented type of the return value
* fix #5465: restore daemon: mount NTFS with UTF-8 charset
* restore daemon: log some more errors on directory traversal
* fix #5422: ui: garbage-collection: make columns in global view sortable
* auth: move to hmac keys for csrf tokens as future-proofing
* auth: upgrade hashes on user log in if a users password is not hashed with
the latest password hashing function for hardening purpose
* auth: use ed25519 keys when generating new auth api keys
* notifications: fix legacy sync notifications
* docs: document notification-mode and merge old notification section
* docs: notifications: rewrite overview for more clarity
* ui: datastore options: link to 'notification-mode' section
* acme: explicitly print a query when prompting for the custom directory URI
-- Proxmox Support Team <support@proxmox.com> Wed, 22 May 2024 19:31:35 +0200
rust-proxmox-backup (3.2.2-1) bookworm; urgency=medium
* ui: notifications fix empty text format for the default mail author

115
debian/control vendored
View File

@ -15,28 +15,29 @@ Build-Depends: bash-completion,
libacl1-dev,
libfuse3-dev,
librust-anyhow-1+default-dev,
librust-apt-pkg-native-0.3+default-dev (>= 0.3.2-~~),
librust-async-trait-0.1+default-dev (>= 0.1.56-~~),
librust-base64-0.13+default-dev,
librust-bitflags-2+default-dev (>= 2.4-~~),
librust-bitflags-1+default-dev (>= 1.2.1-~~),
librust-bytes-1+default-dev,
librust-cidr-0.2+default-dev (>= 0.2.1-~~),
librust-const-format-0.2+default-dev,
librust-crc32fast-1+default-dev,
librust-crossbeam-channel-0.5+default-dev,
librust-endian-trait-0.6+arrays-dev,
librust-endian-trait-0.6+default-dev,
librust-env-logger-0.11+default-dev,
librust-env-logger-0.10+default-dev,
librust-flate2-1+default-dev,
librust-foreign-types-0.3+default-dev,
librust-futures-0.3+default-dev,
librust-h2-0.4+default-dev,
librust-h2-0.4+legacy-dev,
librust-h2-0.4+stream-dev,
librust-h2-0.3+default-dev,
librust-h2-0.3+stream-dev,
librust-handlebars-3+default-dev,
librust-hex-0.4+default-dev (>= 0.4.3-~~),
librust-hex-0.4+serde-dev (>= 0.4.3-~~),
librust-hyper-0.14+backports-dev,
librust-http-0.2+default-dev,
librust-hyper-0.14+default-dev,
librust-hyper-0.14+deprecated-dev,
librust-hyper-0.14+full-dev,
librust-lazy-static-1+default-dev (>= 1.4-~~),
librust-libc-0.2+default-dev,
librust-log-0.4+default-dev (>= 0.4.17-~~),
librust-nix-0.26+default-dev (>= 0.26.1-~~),
@ -45,76 +46,70 @@ Build-Depends: bash-completion,
librust-once-cell-1+default-dev (>= 1.3.1-~~),
librust-openssl-0.10+default-dev (>= 0.10.40-~~),
librust-pathpatterns-0.3+default-dev,
librust-pbs-api-types-0.2+default-dev (>= 0.2.2),
librust-percent-encoding-2+default-dev (>= 2.1-~~),
librust-pin-project-lite-0.2+default-dev,
librust-proxmox-acme-0.5+default-dev (>= 0.5.3-~~),
librust-proxmox-apt-0.11+cache-dev,
librust-proxmox-apt-0.11+default-dev,
librust-proxmox-apt-api-types-1+default-dev (>= 1.0.1-~~),
librust-proxmox-acme-0.5+default-dev,
librust-proxmox-apt-0.10+default-dev (>= 0.10.5-~~),
librust-proxmox-async-0.4+default-dev,
librust-proxmox-auth-api-0.4+api-dev,
librust-proxmox-auth-api-0.4+default-dev,
librust-proxmox-auth-api-0.4+pam-authenticator-dev,
librust-proxmox-auth-api-0.3+api-dev,
librust-proxmox-auth-api-0.3+api-types-dev,
librust-proxmox-auth-api-0.3+default-dev,
librust-proxmox-auth-api-0.3+pam-authenticator-dev,
librust-proxmox-borrow-1+default-dev,
librust-proxmox-compression-0.2+default-dev,
librust-proxmox-config-digest-0.1+default-dev,
librust-proxmox-daemon-0.1+default-dev,
librust-proxmox-fuse-0.1+default-dev (>= 0.1.3-~~),
librust-proxmox-http-0.9+client-dev (>= 0.9.5-~~),
librust-proxmox-http-0.9+client-trait-dev (>= 0.9.5-~~),
librust-proxmox-http-0.9+default-dev (>= 0.9.5-~~),
librust-proxmox-http-0.9+http-helpers-dev (>= 0.9.5-~~),
librust-proxmox-http-0.9+proxmox-async-dev (>= 0.9.5-~~),
librust-proxmox-http-0.9+rate-limited-stream-dev (>= 0.9.5-~~),
librust-proxmox-http-0.9+rate-limiter-dev (>= 0.9.5-~~),
librust-proxmox-http-0.9+websocket-dev (>= 0.9.5-~~),
librust-proxmox-http-0.9+client-dev,
librust-proxmox-http-0.9+client-trait-dev,
librust-proxmox-http-0.9+default-dev,
librust-proxmox-http-0.9+http-helpers-dev,
librust-proxmox-http-0.9+proxmox-async-dev,
librust-proxmox-http-0.9+rate-limited-stream-dev,
librust-proxmox-http-0.9+rate-limiter-dev,
librust-proxmox-http-0.9+websocket-dev,
librust-proxmox-human-byte-0.1+default-dev,
librust-proxmox-io-1+default-dev (>= 1.0.1-~~),
librust-proxmox-io-1+tokio-dev (>= 1.0.1-~~),
librust-proxmox-lang-1+default-dev (>= 1.1-~~),
librust-proxmox-ldap-0.2+default-dev (>= 0.2.1-~~),
librust-proxmox-log-0.2+default-dev (>= 0.2.6-~~),
librust-proxmox-metrics-0.3+default-dev (>= 0.3.1-~~),
librust-proxmox-notify-0.5+default-dev (>= 0.5.1-~~),
librust-proxmox-notify-0.5+pbs-context-dev (>= 0.5.1-~~),
librust-proxmox-metrics-0.3+default-dev,
librust-proxmox-notify+default-dev (>= 0.4),
librust-proxmox-notify+pbs-context-dev (>= 0.4),
librust-proxmox-openid-0.10+default-dev,
librust-proxmox-rest-server-0.8+default-dev (>= 0.8.9-~~),
librust-proxmox-rest-server-0.8+rate-limited-stream-dev (>= 0.8.9-~~),
librust-proxmox-rest-server-0.8+templates-dev (>= 0.8.9-~~),
librust-proxmox-router-3+cli-dev,
librust-proxmox-router-3+server-dev,
librust-proxmox-rrd-0.4+default-dev,
librust-proxmox-rrd-api-types-1+default-dev (>= 1.0.2-~~),
librust-proxmox-schema-4+api-macro-dev,
librust-proxmox-schema-4+default-dev,
librust-proxmox-rest-server-0.5+default-dev (>= 0.5.1-~~),
librust-proxmox-rest-server-0.5+rate-limited-stream-dev (>= 0.5.1-~~),
librust-proxmox-rest-server-0.5+templates-dev (>= 0.5.1-~~),
librust-proxmox-router-2+cli-dev,
librust-proxmox-router-2+default-dev,
librust-proxmox-router-2+server-dev,
librust-proxmox-rrd-0.1+default-dev,
librust-proxmox-schema-3+api-macro-dev,
librust-proxmox-schema-3+default-dev,
librust-proxmox-section-config-2+default-dev,
librust-proxmox-serde-0.1+default-dev (>= 0.1.1-~~),
librust-proxmox-serde-0.1+serde-json-dev (>= 0.1.1-~~),
librust-proxmox-shared-cache-0.1+default-dev,
librust-proxmox-shared-memory-0.3+default-dev,
librust-proxmox-sortable-macro-0.1+default-dev (>= 0.1.2-~~),
librust-proxmox-subscription-0.5+api-types-dev,
librust-proxmox-subscription-0.5+default-dev,
librust-proxmox-sys-0.6+acl-dev (>= 0.6.5-~~),
librust-proxmox-sys-0.6+crypt-dev (>= 0.6.5-~~),
librust-proxmox-sys-0.6+default-dev (>= 0.6.7-~~),
librust-proxmox-sys-0.6+logrotate-dev (>= 0.6.5-~~),
librust-proxmox-sys-0.6+timer-dev (>= 0.6.5-~~),
librust-proxmox-systemd-0.1+default-dev,
librust-proxmox-tfa-5+api-dev,
librust-proxmox-tfa-5+api-types-dev,
librust-proxmox-tfa-5+default-dev,
librust-proxmox-time-2+default-dev,
librust-proxmox-subscription-0.4+api-types-dev (>= 0.4.2-~~),
librust-proxmox-subscription-0.4+default-dev (>= 0.4.2-~~),
librust-proxmox-sys-0.5+acl-dev (>= 0.5.3-~~),
librust-proxmox-sys-0.5+crypt-dev (>= 0.5.3-~~),
librust-proxmox-sys-0.5+default-dev (>= 0.5.3-~~),
librust-proxmox-sys-0.5+logrotate-dev (>= 0.5.3-~~),
librust-proxmox-sys-0.5+timer-dev (>= 0.5.3-~~),
librust-proxmox-tfa-4+api-dev (>= 4.0.4-~~),
librust-proxmox-tfa-4+api-types-dev (>= 4.0.4-~~),
librust-proxmox-tfa-4+default-dev (>= 4.0.4-~~),
librust-proxmox-time-1+default-dev (>= 1.1.6-~~),
librust-proxmox-uuid-1+default-dev,
librust-proxmox-uuid-1+serde-dev,
librust-proxmox-worker-task-0.1+default-dev,
librust-pxar-0.12+default-dev (>= 0.12.1-~~),
librust-pxar-0.10+default-dev (>= 0.10.2-~~),
librust-regex-1+default-dev (>= 1.5.5-~~),
librust-rustyline-9+default-dev,
librust-serde-1+default-dev,
librust-serde-1+derive-dev,
librust-serde-json-1+default-dev,
librust-serde-plain-1+default-dev,
librust-siphasher-0.3+default-dev,
librust-syslog-6+default-dev,
librust-tar-0.4+default-dev,
librust-termcolor-1+default-dev (>= 1.1.2-~~),
@ -138,14 +133,12 @@ Build-Depends: bash-completion,
librust-tokio-util-0.7+default-dev,
librust-tokio-util-0.7+io-dev,
librust-tower-service-0.3+default-dev,
librust-tracing-0.1+default-dev,
librust-udev-0.4+default-dev,
librust-url-2+default-dev (>= 2.1-~~),
librust-walkdir-2+default-dev,
librust-xdg-2+default-dev (>= 2.2-~~),
librust-zstd-0.12+bindgen-dev,
librust-zstd-0.12+default-dev,
librust-zstd-safe-6+default-dev,
libsgutils2-dev,
libstd-rust-dev,
libsystemd-dev (>= 246-~~),
@ -184,7 +177,7 @@ Depends: fonts-font-awesome,
postfix | mail-transport-agent,
proxmox-backup-docs,
proxmox-mini-journalreader,
proxmox-widget-toolkit (>= 4.3.3),
proxmox-widget-toolkit (>= 3.5.2),
pve-xtermjs (>= 4.7.0-1),
sg3-utils,
smartmontools,
@ -205,14 +198,6 @@ Description: Proxmox Backup Client tools
This package contains the Proxmox Backup client, which provides a
simple command line tool to create and restore backups.
Package: proxmox-backup-client-static
Architecture: any
Depends: qrencode, ${misc:Depends},
Conflicts: proxmox-backup-client,
Description: Proxmox Backup Client tools (statically linked)
This package contains the Proxmox Backup client, which provides a
simple command line tool to create and restore backups.
Package: proxmox-backup-docs
Build-Profiles: <!nodoc>
Section: doc

2
debian/copyright vendored
View File

@ -1,4 +1,4 @@
Copyright (C) 2019 - 2025 Proxmox Server Solutions GmbH
Copyright (C) 2019 - 2024 Proxmox Server Solutions GmbH
This software is written by Proxmox Server Solutions GmbH <support@proxmox.com>

13
debian/postinst vendored
View File

@ -20,7 +20,15 @@ case "$1" in
# modeled after dh_systemd_start output
systemctl --system daemon-reload >/dev/null || true
if [ -n "$2" ]; then
if dpkg --compare-versions "$2" 'lt' '1.0.7-1'; then
# there was an issue with reloading and systemd being confused in older daemon versions
# so restart instead of reload if upgrading from there, see commit 0ec79339f7aebf9
# FIXME: remove with PBS 2.1
echo "Upgrading from older proxmox-backup-server: restart (not reload) daemons"
_dh_action=try-restart
else
_dh_action=try-reload-or-restart
fi
else
_dh_action=start
fi
@ -72,11 +80,6 @@ EOF
update_sync_job "$prev_job"
fi
fi
if dpkg --compare-versions "$2" 'lt' '3.3.5~'; then
# ensure old locking is used by the daemon until a reboot happened
touch "/run/proxmox-backup/old-locking"
fi
fi
;;

View File

@ -1,2 +0,0 @@
debian/proxmox-backup-client.bc proxmox-backup-client
debian/pxar.bc pxar

View File

@ -1,4 +0,0 @@
usr/share/man/man1/proxmox-backup-client.1
usr/share/man/man1/pxar.1
usr/share/zsh/vendor-completions/_proxmox-backup-client
usr/share/zsh/vendor-completions/_pxar

View File

@ -9,7 +9,7 @@ update_initramfs() {
CACHE_PATH_DBG="/var/cache/proxmox-backup/file-restore-initramfs-debug.img"
# cleanup first, in case proxmox-file-restore was uninstalled since we do
# not want an unusable image lying around
# not want an unuseable image lying around
rm -f "$CACHE_PATH"
if [ ! -f "$INST_PATH/initramfs.img" ]; then

View File

@ -4,7 +4,6 @@ etc/proxmox-backup-daily-update.service /lib/systemd/system/
etc/proxmox-backup-daily-update.timer /lib/systemd/system/
etc/proxmox-backup-proxy.service /lib/systemd/system/
etc/proxmox-backup.service /lib/systemd/system/
etc/removable-device-attach@.service /lib/systemd/system/
usr/bin/pmt
usr/bin/pmtx
usr/bin/proxmox-tape
@ -31,31 +30,34 @@ usr/share/man/man5/acl.cfg.5
usr/share/man/man5/datastore.cfg.5
usr/share/man/man5/domains.cfg.5
usr/share/man/man5/media-pool.cfg.5
usr/share/man/man5/notifications-priv.cfg.5
usr/share/man/man5/notifications.cfg.5
usr/share/man/man5/proxmox-backup.node.cfg.5
usr/share/man/man5/prune.cfg.5
usr/share/man/man5/notifications-priv.cfg.5
usr/share/man/man5/remote.cfg.5
usr/share/man/man5/sync.cfg.5
usr/share/man/man5/tape-job.cfg.5
usr/share/man/man5/tape.cfg.5
usr/share/man/man5/user.cfg.5
usr/share/man/man5/verification.cfg.5
usr/share/zsh/vendor-completions/_pmt
usr/share/zsh/vendor-completions/_pmtx
usr/share/zsh/vendor-completions/_proxmox-backup-debug
usr/share/zsh/vendor-completions/_proxmox-backup-manager
usr/share/zsh/vendor-completions/_proxmox-tape
usr/share/proxmox-backup/templates/default/acme-err-body.txt.hbs
usr/share/proxmox-backup/templates/default/acme-err-subject.txt.hbs
usr/share/proxmox-backup/templates/default/gc-err-body.txt.hbs
usr/share/proxmox-backup/templates/default/gc-err-subject.txt.hbs
usr/share/proxmox-backup/templates/default/gc-ok-body.txt.hbs
usr/share/proxmox-backup/templates/default/gc-err-subject.txt.hbs
usr/share/proxmox-backup/templates/default/gc-ok-subject.txt.hbs
usr/share/proxmox-backup/templates/default/package-updates-body.txt.hbs
usr/share/proxmox-backup/templates/default/package-updates-subject.txt.hbs
usr/share/proxmox-backup/templates/default/prune-err-body.txt.hbs
usr/share/proxmox-backup/templates/default/prune-err-subject.txt.hbs
usr/share/proxmox-backup/templates/default/prune-ok-body.txt.hbs
usr/share/proxmox-backup/templates/default/prune-err-subject.txt.hbs
usr/share/proxmox-backup/templates/default/prune-ok-subject.txt.hbs
usr/share/proxmox-backup/templates/default/sync-err-body.txt.hbs
usr/share/proxmox-backup/templates/default/sync-err-subject.txt.hbs
usr/share/proxmox-backup/templates/default/sync-ok-body.txt.hbs
usr/share/proxmox-backup/templates/default/sync-err-subject.txt.hbs
usr/share/proxmox-backup/templates/default/sync-ok-subject.txt.hbs
usr/share/proxmox-backup/templates/default/tape-backup-err-body.txt.hbs
usr/share/proxmox-backup/templates/default/tape-backup-err-subject.txt.hbs
@ -64,13 +66,9 @@ usr/share/proxmox-backup/templates/default/tape-backup-ok-subject.txt.hbs
usr/share/proxmox-backup/templates/default/tape-load-body.txt.hbs
usr/share/proxmox-backup/templates/default/tape-load-subject.txt.hbs
usr/share/proxmox-backup/templates/default/test-body.txt.hbs
usr/share/proxmox-backup/templates/default/test-body.html.hbs
usr/share/proxmox-backup/templates/default/test-subject.txt.hbs
usr/share/proxmox-backup/templates/default/verify-err-body.txt.hbs
usr/share/proxmox-backup/templates/default/verify-err-subject.txt.hbs
usr/share/proxmox-backup/templates/default/verify-ok-body.txt.hbs
usr/share/proxmox-backup/templates/default/verify-err-subject.txt.hbs
usr/share/proxmox-backup/templates/default/verify-ok-subject.txt.hbs
usr/share/zsh/vendor-completions/_pmt
usr/share/zsh/vendor-completions/_pmtx
usr/share/zsh/vendor-completions/_proxmox-backup-debug
usr/share/zsh/vendor-completions/_proxmox-backup-manager
usr/share/zsh/vendor-completions/_proxmox-tape

View File

@ -16,6 +16,3 @@ SUBSYSTEM=="scsi_generic", SUBSYSTEMS=="scsi", ATTRS{type}=="1", ENV{ID_SCSI_SER
SYMLINK+="tape/by-id/scsi-$env{ID_SCSI_SERIAL}-sg"
LABEL="persistent_storage_tape_end"
# triggers the mounting of a removable device
ACTION=="add", SUBSYSTEM=="block", ENV{ID_FS_UUID}!="", TAG+="systemd", ENV{SYSTEMD_WANTS}="removable-device-attach@$env{ID_FS_UUID}"

10
debian/rules vendored
View File

@ -8,7 +8,7 @@ include /usr/share/rustc/architecture.mk
export BUILD_MODE=release
export CARGO=/usr/share/cargo/bin/cargo
CARGO=/usr/share/cargo/bin/cargo
export CFLAGS CXXFLAGS CPPFLAGS LDFLAGS
export DEB_HOST_RUST_TYPE DEB_HOST_GNU_TYPE
@ -28,11 +28,6 @@ override_dh_auto_configure:
@perl -ne 'if (/^version\s*=\s*"(\d+(?:\.\d+)+)"/) { my $$v_cargo = $$1; my $$v_deb = "$(DEB_VERSION_UPSTREAM)"; \
die "ERROR: d/changelog <-> Cargo.toml version mismatch: $$v_cargo != $$v_deb\n" if $$v_cargo ne $$v_deb; exit(0); }' Cargo.toml
$(CARGO) prepare-debian $(CURDIR)/debian/cargo_registry --link-from-system
# `cargo build` and `cargo install` have different config precedence, symlink
# the wrapper config into a place where `build` picks it up as well..
# https://doc.rust-lang.org/cargo/commands/cargo-install.html#configuration-discovery
mkdir -p .cargo
ln -s $(CARGO_HOME)/config.toml $(CURDIR)/.cargo/config.toml
dh_auto_configure
override_dh_auto_build:
@ -47,9 +42,6 @@ override_dh_auto_install:
dh_auto_install -- \
PROXY_USER=backup \
LIBDIR=/usr/lib/$(DEB_HOST_MULTIARCH)
mkdir -p debian/proxmox-backup-client-static/usr/bin
mv debian/tmp/usr/bin/proxmox-backup-client-static debian/proxmox-backup-client-static/usr/bin/proxmox-backup-client
mv debian/tmp/usr/bin/pxar-static debian/proxmox-backup-client-static/usr/bin/pxar
override_dh_installsystemd:
dh_installsystemd -pproxmox-backup-server proxmox-backup-daily-update.timer

View File

@ -1,65 +1,59 @@
include ../defines.mk
GENERATED_SYNOPSIS := \
config/acl/roles.rst \
config/datastore/config.rst \
config/domains/config.rst \
proxmox-tape/synopsis.rst \
proxmox-backup-client/synopsis.rst \
proxmox-backup-client/catalog-shell-synopsis.rst \
proxmox-backup-manager/synopsis.rst \
proxmox-backup-debug/synopsis.rst \
proxmox-file-restore/synopsis.rst \
pxar/synopsis.rst \
pmtx/synopsis.rst \
pmt/synopsis.rst \
config/media-pool/config.rst \
config/notifications-priv/config.rst \
config/notifications/config.rst \
config/notifications-priv/config.rst \
config/tape/config.rst \
config/tape-job/config.rst \
config/user/config.rst \
config/remote/config.rst \
config/sync/config.rst \
config/tape-job/config.rst \
config/tape/config.rst \
config/user/config.rst \
config/verification/config.rst \
config/prune/config.rst \
pmt/synopsis.rst \
pmtx/synopsis.rst \
proxmox-backup-client/catalog-shell-synopsis.rst \
proxmox-backup-client/synopsis.rst \
proxmox-backup-debug/synopsis.rst \
proxmox-backup-manager/synopsis.rst \
proxmox-file-restore/synopsis.rst \
proxmox-tape/synopsis.rst \
pxar/synopsis.rst \
config/acl/roles.rst \
config/datastore/config.rst \
config/domains/config.rst
MAN1_PAGES := \
pbs2to3.1 \
pmt.1 \
pmtx.1 \
proxmox-backup-client.1 \
proxmox-backup-debug.1 \
proxmox-backup-manager.1 \
proxmox-backup-proxy.1 \
proxmox-file-restore.1 \
proxmox-tape.1 \
pxar.1 \
pmtx.1 \
pmt.1 \
proxmox-tape.1 \
proxmox-backup-proxy.1 \
proxmox-backup-client.1 \
proxmox-backup-manager.1 \
proxmox-file-restore.1 \
proxmox-backup-debug.1 \
pbs2to3.1 \
# FIXME: prefix all man pages that are not directly relating to an existing executable with
# `proxmox-backup.`, like the newer added proxmox-backup.node.cfg but add backwards compatible
# symlinks, e.g. with a "5pbs" man page "suffix section".
MAN5_PAGES := \
acl.cfg.5 \
datastore.cfg.5 \
domains.cfg.5 \
media-pool.cfg.5 \
proxmox-backup.node.cfg.5 \
notifications-priv.cfg.5 \
notifications.cfg.5 \
tape.cfg.5 \
tape-job.cfg.5 \
acl.cfg.5 \
user.cfg.5 \
remote.cfg.5 \
sync.cfg.5 \
tape-job.cfg.5 \
tape.cfg.5 \
user.cfg.5 \
verification.cfg.5 \
prune.cfg.5 \
datastore.cfg.5 \
domains.cfg.5 \
notifications.cfg.5 \
notifications-priv.cfg.5 \
PRUNE_SIMULATOR_FILES := \
prune-simulator/index.html \
prune-simulator/clear-trigger.png \
prune-simulator/documentation.html \
prune-simulator/prune-simulator.js \
prune-simulator/clear-trigger.png \
prune-simulator/prune-simulator.js
PRUNE_SIMULATOR_JS_SOURCE := \
/usr/share/javascript/proxmox-widget-toolkit-dev/Toolkit.js \
@ -91,15 +85,15 @@ API_VIEWER_FILES := \
/usr/share/javascript/proxmox-widget-toolkit-dev/APIViewer.js \
# Sphinx documentation setup
SPHINXOPTS = -E
SPHINXOPTS =
SPHINXBUILD = sphinx-build
BUILDDIR = output
ifeq ($(BUILD_MODE), release)
COMPILEDIR := ../target/$(DEB_HOST_RUST_TYPE)/release
COMPILEDIR := ../target/release
SPHINXOPTS += -t release
else
COMPILEDIR := ../target/$(DEB_HOST_RUST_TYPE)/debug
COMPILEDIR := ../target/debug
SPHINXOPTS += -t devbuild
endif

View File

@ -1,5 +1,3 @@
.. _client_usage:
Backup Client Usage
===================
@ -46,24 +44,6 @@ user\@pbs!token@host:store ``user@pbs!token`` host:8007 store
[ff80::51]:1234:mydatastore ``root@pam`` [ff80::51]:1234 mydatastore
================================ ================== ================== ===========
.. _statically_linked_client:
Statically Linked Backup Client
-------------------------------
A statically linked version of the Proxmox Backup client is available for Linux
based systems where the regular client is not available. Please note that it is
recommended to use the regular client when possible, as the statically linked
client is not a full replacement. For example, name resolution will not be
performed via the mechanisms provided by libc, but uses a resolver written
purely in the Rust programming language. Therefore, features and modules
provided by Name Service Switch cannot be used.
The statically linked client is available via the ``pbs-client`` repository as
described in the :ref:`installation <install_pbc>` section.
.. _environment-variables:
Environment Variables
---------------------
@ -109,43 +89,6 @@ Environment Variables
you can add arbitrary comments after the first newline.
System and Service Credentials
------------------------------
Some of the :ref:`environment variables <environment-variables>` above can be
set using `system and service credentials <https://systemd.io/CREDENTIALS/>`_
instead.
============================ ==============================================
Environment Variable Credential Name Equivalent
============================ ==============================================
``PBS_REPOSITORY`` ``proxmox-backup-client.repository``
``PBS_PASSWORD`` ``proxmox-backup-client.password``
``PBS_ENCRYPTION_PASSWORD`` ``proxmox-backup-client.encryption-password``
``PBS_FINGERPRINT`` ``proxmox-backup-client.fingerprint``
============================ ==============================================
For example, the repository password can be stored in an encrypted file as
follows:
.. code-block:: console
# systemd-ask-password -n | systemd-creds encrypt --name=proxmox-backup-client.password - my-api-token.cred
The credential can then be reused inside of unit files or in a transient scope
unit as follows:
.. code-block:: console
# systemd-run --pipe --wait \
--property=LoadCredentialEncrypted=proxmox-backup-client.password:/full/path/to/my-api-token.cred \
--property=SetCredential=proxmox-backup-client.repository:'my_default_repository' \
proxmox-backup-client ...
Additionally, system credentials (e.g. passed down from the hypervisor to a
virtual machine via SMBIOS type 11) can be loaded on a service via
`LoadCredential=` as described in the manual page ``systemd.exec(5)``.
Output Format
-------------
@ -226,7 +169,6 @@ the client. The format is:
<archive-name>.<type>:<source-path>
The ``archive-name`` must contain alphanumerics, hyphens and underscores only.
Common types are ``.pxar`` for file archives and ``.img`` for block
device images. To create a backup of a block device, run the following command:
@ -330,64 +272,13 @@ parameter. For example:
.. code-block:: console
# proxmox-backup-client backup archive-name.pxar:./linux --exclude /usr
# proxmox-backup-client backup.pxar:./linux --exclude /usr
Multiple paths can be excluded like this:
.. code-block:: console
# proxmox-backup-client backup archive-name.pxar:./linux --exclude=/usr --exclude=/rust
.. _client_change_detection_mode:
Change Detection Mode
~~~~~~~~~~~~~~~~~~~~~
File-based backups containing a lot of data can take a long time, as the default
behavior for the Proxmox backup client is to read all data and encode it into a
pxar archive.
The encoded stream is split into variable sized chunks. For each chunk, a digest
is calculated and used to decide whether the chunk needs to be uploaded or can
be indexed without upload, as it is already available on the server (and
therefore deduplicated). If the backed up files are largely unchanged,
re-reading and then detecting the corresponding chunks don't need to be uploaded
after all is time consuming and undesired.
The backup client's ``change-detection-mode`` can be switched from default to
``metadata`` based detection to reduce limitations as described above,
instructing the client to avoid re-reading files with unchanged metadata
whenever possible.
When using this mode, instead of the regular pxar archive, the backup snapshot
is stored into two separate files: the ``mpxar`` containing the archive's
metadata and the ``ppxar`` containing a concatenation of the file contents. This
splitting allows for efficient metadata lookups. When creating the backup
archives, the current file metadata is compared to the one looked up in the
previous ``mpxar`` archive. The operational details are explained more in depth
in the :ref:`technical documentation <change-detection-mode-metadata>`.
Using the ``change-detection-mode`` set to ``data`` allows to create the same
split archive as when using the ``metadata`` mode, but without using a previous
reference and therefore reencoding all file payloads. For details of this mode
please see the :ref:`technical documentation <change-detection-mode-data>`.
.. _client_change_detection_mode_table:
============ ===================================================================
Mode Description
============ ===================================================================
``legacy`` (current default): Encode all files into a self contained pxar
archive.
``data`` Encode all files into a split data and metadata pxar archive.
``metadata`` Encode changed files, reuse unchanged from previous snapshot,
creating a split archive.
============ ===================================================================
The following shows an example for the client invocation with the `metadata`
mode:
.. code-block:: console
# proxmox-backup-client backup archive-name.pxar:./linux --change-detection-mode=metadata
# proxmox-backup-client backup.pxar:./linux --exclude=/usr --exclude=/rust
.. _client_encryption:
@ -528,8 +419,6 @@ version of your master key. The following command sends the output of the
proxmox-backup-client key paperkey --output-format text > qrkey.txt
.. _client_restoring_data:
Restoring Data
--------------
@ -841,25 +730,29 @@ Garbage Collection
------------------
The ``prune`` command removes only the backup index files, not the data
from the datastore. Deletion of unused backup data from the datastore is done by
:ref:`garbage collection<_maintenance_gc>`. It is therefore recommended to
schedule garbage collection tasks on a regular basis. The working principle of
garbage collection is described in more details in the related :ref:`background
section <gc_background>`.
from the datastore. This task is left to the garbage collection
command. It is recommended to carry out garbage collection on a regular basis.
To start garbage collection from the client side, run the following command:
.. code-block:: console
# proxmox-backup-client garbage-collect
The garbage collection works in two phases. In the first phase, all
data blocks that are still in use are marked. In the second phase,
unused data blocks are removed.
.. note:: This command needs to read all existing backup index files
and touches the complete chunk-store. This can take a long time
depending on the number of chunks and the speed of the underlying
disks.
The progress of the garbage collection will be displayed as shown in the example
below:
.. note:: The garbage collection will only remove chunks that haven't been used
for at least one day (exactly 24h 5m). This grace period is necessary because
chunks in use are marked by touching the chunk which updates the ``atime``
(access time) property. Filesystems are mounted with the ``relatime`` option
by default. This results in a better performance by only updating the
``atime`` property if the last access has been at least 24 hours ago. The
downside is that touching a chunk within these 24 hours will not always
update its ``atime`` property.
Chunks in the grace period will be logged at the end of the garbage
collection task as *Pending removals*.
.. code-block:: console

View File

@ -44,8 +44,10 @@ web-interface/API or using the ``proxmox-backup-manager`` CLI tool.
Upload Custom Certificate
~~~~~~~~~~~~~~~~~~~~~~~~~
If you already have a certificate which you want to use for a `Proxmox Backup`_
host, you can simply upload that certificate over the web interface.
If you already have a certificate which you want to use for a Proxmox
Mail Gateway host, you can simply upload that certificate over the web
interface.
.. image:: images/screenshots/pbs-gui-certs-upload-custom.png
:target: _images/pbs-gui-certs-upload-custom.png

View File

@ -71,7 +71,7 @@ master_doc = 'index'
# General information about the project.
project = 'Proxmox Backup'
copyright = '2019-2025, Proxmox Server Solutions GmbH'
copyright = '2019-2023, Proxmox Server Solutions GmbH'
author = 'Proxmox Support Team'
# The version info for the project you're documenting acts as a replacement for
@ -108,14 +108,12 @@ man_pages = [
('config/datastore/man5', 'datastore.cfg', 'Datastore Configuration', [author], 5),
('config/domains/man5', 'domains.cfg', 'Realm Configuration', [author], 5),
('config/media-pool/man5', 'media-pool.cfg', 'Media Pool Configuration', [author], 5),
('config/node/man5', 'proxmox-backup.node.cfg', 'Proxmox Backup Server - Node Configuration', [author], 5),
('config/remote/man5', 'remote.cfg', 'Remote Server Configuration', [author], 5),
('config/sync/man5', 'sync.cfg', 'Synchronization Job Configuration', [author], 5),
('config/tape-job/man5', 'tape-job.cfg', 'Tape Job Configuration', [author], 5),
('config/tape/man5', 'tape.cfg', 'Tape Drive and Changer Configuration', [author], 5),
('config/user/man5', 'user.cfg', 'User Configuration', [author], 5),
('config/verification/man5', 'verification.cfg', 'Verification Job Configuration', [author], 5),
('config/prune/man5', 'prune.cfg', 'Prune Job Configuration', [author], 5),
('config/notifications/man5', 'notifications.cfg', 'Notification target/matcher configuration', [author], 5),
('config/notifications-priv/man5', 'notifications-priv.cfg', 'Notification target secrets', [author], 5),
]

View File

@ -1,49 +0,0 @@
The file contains these options:
:acme: The ACME account to use on this node.
:acmedomain0: ACME domain.
:acmedomain1: ACME domain.
:acmedomain2: ACME domain.
:acmedomain3: ACME domain.
:acmedomain4: ACME domain.
:http-proxy: Set proxy for apt and subscription checks.
:email-from: Fallback email from which notifications will be sent.
:ciphers-tls-1.3: List of TLS ciphers for TLS 1.3 that will be used by the proxy. Colon-separated and in descending priority (https://docs.openssl.org/master/man1/openssl-ciphers/). (Proxy has to be restarted for changes to take effect.)
:ciphers-tls-1.2: List of TLS ciphers for TLS <= 1.2 that will be used by the proxy. Colon-separated and in descending priority (https://docs.openssl.org/master/man1/openssl-ciphers/). (Proxy has to be restarted for changes to take effect.)
:default-lang: Default language used in the GUI.
:description: Node description.
:task-log-max-days: Maximum days to keep task logs.
For example:
::
acme: local
acmedomain0: first.domain.com
acmedomain1: second.domain.com
acmedomain2: third.domain.com
acmedomain3: fourth.domain.com
acmedomain4: fifth.domain.com
http-proxy: internal.proxy.com
email-from: proxmox@mail.com
ciphers-tls-1.3: TLS_AES_128_GCM_SHA256:TLS_AES_128_CCM_8_SHA256:TLS_CHACHA20_POLY1305_SHA256
ciphers-tls-1.2: RSA_WITH_AES_128_CCM:DHE_RSA_WITH_AES_128_CCM
default-lang: en
description: Primary PBS instance
task-log-max-days: 30
You can use the ``proxmox-backup-manager node`` command to manipulate
this file.

View File

@ -1,18 +0,0 @@
:orphan:
========
node.cfg
========
Description
===========
The file /etc/proxmox-backup/node.cfg is a configuration file for Proxmox
Backup Server. It contains the general configuration regarding this node.
Options
=======
.. include:: format.rst
.. include:: ../../pbs-copyright.rst

View File

@ -8,7 +8,7 @@ Description
===========
The file /etc/proxmox-backup/notifications-priv.cfg is a configuration file
for Proxmox Backup Server. It contains the configuration for the
for Proxmox Backup Server. It contains the configration for the
notification system configuration.
File Format

View File

@ -8,7 +8,7 @@ Description
===========
The file /etc/proxmox-backup/notifications.cfg is a configuration file
for Proxmox Backup Server. It contains the configuration for the
for Proxmox Backup Server. It contains the configration for the
notification system configuration.
File Format

View File

@ -1,14 +0,0 @@
Each entry starts with the header ``prune: <name>``, followed by the job
configuration options.
::
prune: prune-store2
schedule mon..fri 10:30
store my-datastore
prune: ...
You can use the ``proxmox-backup-manager prune-job`` command to manipulate this
file.

View File

@ -1,23 +0,0 @@
:orphan:
=========
prune.cfg
=========
Description
===========
The file /etc/proxmox-backup/prune.cfg is a configuration file for Proxmox
Backup Server. It contains the prune job configuration.
File Format
===========
.. include:: format.rst
Options
=======
.. include:: config.rst
.. include:: ../../pbs-copyright.rst

View File

@ -7,8 +7,8 @@ verification.cfg
Description
===========
The file /etc/proxmox-backup/verification.cfg is a configuration file for
Proxmox Backup Server. It contains the verification job configuration.
The file /etc/proxmox-backup/sync.cfg is a configuration file for Proxmox
Backup Server. It contains the verification job configuration.
File Format
===========

View File

@ -67,14 +67,6 @@ Options
.. include:: config/media-pool/config.rst
``node.cfg``
~~~~~~~~~~~~~~~~~~
Options
^^^^^^^
.. include:: config/node/format.rst
.. _notifications.cfg:
``notifications.cfg``
@ -91,8 +83,6 @@ Options
.. include:: config/notifications/config.rst
.. _notifications_priv.cfg:
``notifications-priv.cfg``
~~~~~~~~~~~~~~~~~~~~~~~~~~
@ -108,21 +98,6 @@ Options
.. include:: config/notifications-priv/config.rst
``prune.cfg``
~~~~~~~~~~~~~
File Format
^^^^^^^^^^^
.. include:: config/prune/format.rst
Options
^^^^^^^
.. include:: config/prune/config.rst
``tape.cfg``
~~~~~~~~~~~~

View File

@ -1,55 +0,0 @@
External Metric Server
----------------------
Proxmox Backup Server periodically sends various metrics about your host's memory,
network and disk activity to configured external metric servers.
Currently supported are:
* InfluxDB (HTTP) (see https://docs.influxdata.com/influxdb/v2/ )
* InfluxDB (UDP) (see https://docs.influxdata.com/influxdb/v1/ )
The external metric server definitions are saved in
'/etc/proxmox-backup/metricserver.cfg', and can be edited through the web
interface.
.. note::
Using HTTP is recommended as UDP support has been dropped in InfluxDB v2.
InfluxDB (HTTP) plugin configuration
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The plugin can be configured to use the HTTP(s) API of InfluxDB 2.x.
InfluxDB 1.8.x does contain a forwards compatible API endpoint for this v2 API.
Since InfluxDB's v2 API is only available with authentication, you have
to generate a token that can write into the correct bucket and set it.
In the v2 compatible API of 1.8.x, you can use 'user:password' as token
(if required), and can omit the 'organization' since that has no meaning in InfluxDB 1.x.
You can also set the maximum batch size (default 25000000 bytes) with the
'max-body-size' setting (this corresponds to the InfluxDB setting with the
same name).
InfluxDB (UDP) plugin configuration
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Proxmox Backup Server can also send data via UDP. This requires the InfluxDB
server to be configured correctly. The MTU can also be configured here if
necessary.
Here is an example configuration for InfluxDB (on your InfluxDB server):
.. code-block:: console
[[udp]]
enabled = true
bind-address = "0.0.0.0:8089"
database = "proxmox"
batch-size = 1000
batch-timeout = "1s"
With this configuration, the InfluxDB server listens on all IP addresses on
port 8089, and writes the data in the *proxmox* database.

View File

@ -8,53 +8,7 @@ Proxmox File Archive Format (``.pxar``)
.. graphviz:: pxar-format-overview.dot
.. _pxar-meta-format:
Proxmox File Archive Format - Meta (``.mpxar``)
-----------------------------------------------
Pxar metadata archive with same structure as a regular pxar archive, with the
exception of regular file payloads not being contained within the archive
itself, but rather being stored as payload references to the corresponding pxar
payload (``.ppxar``) file.
Can be used to lookup all the archive entries and metadata without the size
overhead introduced by the file payloads.
.. graphviz:: meta-format-overview.dot
.. _ppxar-format:
Proxmox File Archive Format - Payload (``.ppxar``)
--------------------------------------------------
Pxar payload file storing regular file payloads to be referenced and accessed by
the corresponding pxar metadata (``.mpxar``) archive. Contains a concatenation
of regular file payloads, each prefixed by a `PAYLOAD` header. Further, the
actual referenced payload entries might be separated by padding (full/partial
payloads not referenced), introduced when reusing chunks of a previous backup
run, when chunk boundaries did not aligned to payload entry offsets.
All headers are stored as little-endian.
.. list-table::
:widths: auto
* - ``PAYLOAD_START_MARKER``
- header of ``[u8; 16]`` consisting of type hash and size;
marks start
* - ``PAYLOAD``
- header of ``[u8; 16]`` cosisting of type hash and size;
referenced by metadata archive
* - Payload
- raw regular file payload
* - Padding
- partial/full unreferenced payloads, caused by unaligned chunk boundary
* - ...
- further concatenation of payload header, payload and padding
* - ``PAYLOAD_TAIL_MARKER``
- header of ``[u8; 16]`` consisting of type hash and size;
marks end
.. _data-blob-format:
Data Blob Format (``.blob``)

View File

@ -40,16 +40,6 @@ Proxmox Backup Server supports various languages and authentication back ends
.. note:: For convenience, you can save the username on the client side, by
selecting the "Save User name" checkbox at the bottom of the window.
.. _consent_banner:
Consent Banner
^^^^^^^^^^^^^^
A custom consent banner that has to be accepted before login can be configured
in **Configuration -> Other -> General -> Consent Text**. If there is no
content, the consent banner will not be displayed. The text will be stored as a
base64 string in the ``/etc/proxmox-backup/node.cfg`` config file.
GUI Overview
------------

Binary file not shown.

Before

Width:  |  Height:  |  Size: 65 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 143 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 153 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 141 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 162 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 164 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 139 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 4.6 KiB

View File

@ -1,157 +0,0 @@
.. _installation_medium:
Installation Medium
-------------------
Proxmox Backup Server can be installed via
:ref:`different methods <install_pbs>`. The recommended method is the
usage of an installation medium, to simply boot the interactive
installer.
Prepare Installation Medium
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Download the installer ISO image from |DOWNLOADS|.
The Proxmox Backup Server installation medium is a hybrid ISO image.
It works in two ways:
- An ISO image file ready to burn to a DVD.
- A raw sector (IMG) image file ready to copy to a USB flash drive (USB stick).
Using a USB flash drive to install Proxmox Backup Server is the
recommended way since it is the faster and more frequently available
option these days.
Prepare a USB Flash Drive as Installation Medium
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The flash drive needs to have at least 2 GB of storage space.
.. note::
Do not use *UNetbootin*. It does not work with the Proxmox Backup
Server installation image.
.. important::
Existing data on the USB flash drive will be overwritten.
Therefore, make sure that it does not contain any still needed data
and unmount it afterwards again before proceeding.
Instructions for GNU/Linux
~~~~~~~~~~~~~~~~~~~~~~~~~~
On Unix-like operating systems use the ``dd`` command to copy the ISO
image to the USB flash drive. First find the correct device name of the
USB flash drive (see below). Then run the ``dd`` command. Depending on
your environment, you will need to have root privileges to execute
``dd``.
.. code-block:: console
# dd bs=1M conv=fdatasync if=./proxmox-backup-server_*.iso of=/dev/XYZ
.. note::
Be sure to replace ``/dev/XYZ`` with the correct device name and adapt
the input filename (*if*) path.
.. caution::
Be very careful, and do not overwrite the wrong disk!
Find the Correct USB Device Name
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
There are two ways to find out the name of the USB flash drive. The
first one is to compare the last lines of the ``dmesg`` command output
before and after plugging in the flash drive. The second way is to
compare the output of the ``lsblk`` command. Open a terminal and run:
.. code-block:: console
# lsblk
Then plug in your USB flash drive and run the command again:
.. code-block:: console
# lsblk
A new device will appear. This is the one you want to use. To be on the
extra safe side check if the reported size matches your USB flash drive.
Instructions for macOS
~~~~~~~~~~~~~~~~~~~~~~
Open the terminal (query *Terminal* in Spotlight).
Convert the ``.iso`` file to ``.dmg`` format using the convert option of
``hdiutil``, for example:
.. code-block:: console
# hdiutil convert proxmox-backup-server_*.iso -format UDRW -o proxmox-backup-server_*.dmg
.. note::
macOS tends to automatically add ``.dmg`` to the output file name.
To get the current list of devices run the command:
.. code-block:: console
# diskutil list
Now insert the USB flash drive and run this command again to determine
which device node has been assigned to it. (e.g., ``/dev/diskX``).
.. code-block:: console
# diskutil list
# diskutil unmountDisk /dev/diskX
.. note::
replace *X* with the disk number from the last command.
.. code-block:: console
# sudo dd if=proxmox-backup-server_*.dmg bs=1M of=/dev/rdiskX
.. note::
*rdiskX*, instead of *diskX*, in the last command is intended. It
will increase the write speed.
Instructions for Windows
~~~~~~~~~~~~~~~~~~~~~~~~
Using Etcher
^^^^^^^^^^^^
Etcher works out of the box. Download Etcher from https://etcher.io. It
will guide you through the process of selecting the ISO and your USB
flash drive.
Using Rufus
^^^^^^^^^^^
Rufus is a more lightweight alternative, but you need to use the **DD
mode** to make it work. Download Rufus from https://rufus.ie/. Either
install it or use the portable version. Select the destination drive
and the downloaded Proxmox ISO file.
.. important::
Once you click *Start*, you have to click *No* on the dialog asking to
download a different version of Grub. In the next dialog select **DD mode**.
Use the Installation Medium
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Insert the created USB flash drive (or DVD) into your server. Continue
by reading the :ref:`installer <using_the_installer>` chapter, which
also describes possible boot issues.

View File

@ -7,9 +7,7 @@ Debian_ from the provided package repository.
.. include:: system-requirements.rst
.. include:: installation-media.rst
.. _install_pbs:
.. include:: package-repositories.rst
Server Installation
-------------------
@ -20,37 +18,44 @@ for various management tasks such as disk management.
.. note:: You always need a backup server. It is not possible to use
Proxmox Backup without the server part.
Using our provided disk image (ISO file) is the recommended
installation method, as it includes a convenient installer, a complete
Debian system as well as all necessary packages for the Proxmox Backup
Server.
The disk image (ISO file) provided by Proxmox includes a complete Debian system
as well as all necessary packages for the Proxmox Backup Server.
Once you have created an :ref:`installation_medium`, the booted
:ref:`installer <using_the_installer>` will guide you through the
setup process. It will help you to partition your disks, apply basic
settings such as the language, time zone and network configuration,
and finally install all required packages within minutes.
The installer will guide you through the setup process and allow
you to partition the local disk(s), apply basic system configuration
(for example timezone, language, network), and install all required packages.
The provided ISO will get you started in just a few minutes, and is the
recommended method for new and existing users.
As an alternative to the interactive installer, advanced users may
wish to install Proxmox Backup Server
:ref:`unattended <install_pbs_unattended>`.
Alternatively, Proxmox Backup Server can be installed on top of an
existing Debian system.
With sufficient Debian knowledge, you can also install Proxmox Backup
Server :ref:`on top of Debian <install_pbs_on_debian>` yourself.
Install `Proxmox Backup`_ Server using the Installer
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
While not recommended, Proxmox Backup Server could also be installed
:ref:`on Proxmox VE <install_pbs_on_pve>`.
Download the ISO from |DOWNLOADS|.
It includes the following:
.. include:: using-the-installer.rst
* The Proxmox Backup Server installer, which partitions the local
disk(s) with ext4, xfs or ZFS, and installs the operating system
.. _install_pbs_unattended:
* Complete operating system (Debian Linux, 64-bit)
* Proxmox Linux kernel with ZFS support
* Complete tool-set to administer backups and all necessary resources
* Web based management interface
.. note:: During the installation process, the complete server
is used by default and all existing data is removed.
Install `Proxmox Backup`_ Server Unattended
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
It is possible to install Proxmox Backup Server automatically in an
unattended manner. This enables you to fully automate the setup process on
bare-metal. Once the installation is complete and the host has booted up,
automation tools like Ansible can be used to further configure the installation.
It is possible to install {pve} automatically in an unattended manner. This
enables you to fully automate the setup process on bare-metal. Once the
installation is complete and the host has booted up, automation tools like
Ansible can be used to further configure the installation.
The necessary options for the installer must be provided in an answer file.
This file allows the use of filter rules to determine which disks and network
@ -61,7 +66,6 @@ installation ISO. For more details and information on the unattended
installation see `our wiki
<https://pve.proxmox.com/wiki/Automated_Installation>`_.
.. _install_pbs_on_debian:
Install `Proxmox Backup`_ Server on Debian
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@ -99,8 +103,6 @@ support, and a set of common and useful packages.
your web browser, using HTTPS on port 8007. For example at
``https://<ip-or-dns-name>:8007``
.. _install_pbs_on_pve:
Install Proxmox Backup Server on `Proxmox VE`_
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@ -121,8 +123,6 @@ After configuring the
your web browser, using HTTPS on port 8007. For example at
``https://<ip-or-dns-name>:8007``
.. _install_pbc:
Client Installation
-------------------
@ -138,26 +138,7 @@ you need to run:
# apt update
# apt install proxmox-backup-client
Install Statically Linked Proxmox Backup Client
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Proxmox provides a statically linked build of the Proxmox backup client that
should run on any modern x86-64 Linux system.
.. note:: The client-only repository should be usable by most recent Debian and
Ubuntu derivatives.
It is currently available as a Debian package. After configuring the
:ref:`package_repositories_client_only_apt`, you need to run:
.. code-block:: console
# apt update
# apt install proxmox-backup-client-static
This package conflicts with the `proxmox-backup-client` package, as both
provide the client as an executable in the `/usr/bin/proxmox-backup-client`
path.
You can copy this executable to other, e.g. non-Debian based Linux systems.
For details on using the Proxmox Backup Client, see :ref:`client_usage`.
.. include:: package-repositories.rst

View File

@ -264,7 +264,6 @@ systems with more than 256 GiB of total memory, where simply setting
# update-initramfs -u
.. _zfs_swap:
Swap on ZFS
^^^^^^^^^^^

View File

@ -108,7 +108,7 @@ Ext.define('PageCalibration', {
xtype: 'numberfield',
value: 'a4',
name: 's_x',
fieldLabel: 'Measured Start Offset Sx (mm)',
fieldLabel: 'Meassured Start Offset Sx (mm)',
allowBlank: false,
labelWidth: 200,
},
@ -116,7 +116,7 @@ Ext.define('PageCalibration', {
xtype: 'numberfield',
value: 'a4',
name: 'd_x',
fieldLabel: 'Measured Length Dx (mm)',
fieldLabel: 'Meassured Length Dx (mm)',
allowBlank: false,
labelWidth: 200,
},
@ -124,7 +124,7 @@ Ext.define('PageCalibration', {
xtype: 'numberfield',
value: 'a4',
name: 's_y',
fieldLabel: 'Measured Start Offset Sy (mm)',
fieldLabel: 'Meassured Start Offset Sy (mm)',
allowBlank: false,
labelWidth: 200,
},
@ -132,7 +132,7 @@ Ext.define('PageCalibration', {
xtype: 'numberfield',
value: 'a4',
name: 'd_y',
fieldLabel: 'Measured Length Dy (mm)',
fieldLabel: 'Meassured Length Dy (mm)',
allowBlank: false,
labelWidth: 200,
},

View File

@ -6,34 +6,8 @@ Maintenance Tasks
Pruning
-------
Prune lets you specify which backup snapshots you want to keep, removing others.
When pruning a snapshot, only the snapshot metadata (manifest, indices, blobs,
log and notes) is removed. The chunks containing the actual backup data and
previously referenced by the pruned snapshot, have to be removed by a garbage
collection run.
.. Caution:: Take into consideration that sensitive information stored in a
given data chunk will outlive pruned snapshots and remain present in the
datastore as long as referenced by at least one backup snapshot. Further,
*even* if no snapshot references a given chunk, it will remain present until
removed by the garbage collection.
Moreover, file-level backups created using the change detection mode
``metadata`` can reference backup chunks containing files which have vanished
since the previous backup. These files might still be accessible by reading
the chunks raw data (client or server side).
To remove chunks containing sensitive data, prune any snapshot made while the
data was part of the backup input and run a garbage collection. Further, if
using file-based backups with change detection mode ``metadata``,
additionally prune all snapshots since the sensitive data was no longer part
of the backup input and run a garbage collection.
The no longer referenced chunks will then be marked for deletion on the next
garbage collection run and removed by a subsequent run after the grace
period.
The following retention options are available for pruning:
Prune lets you specify which backup snapshots you want to keep.
The following retention options are available:
``keep-last <N>``
Keep the last ``<N>`` backup snapshots.
@ -197,8 +171,6 @@ It's recommended to setup a schedule to ensure that unused space is cleaned up
periodically. For most setups a weekly schedule provides a good interval to
start.
.. _gc_background:
GC Background
^^^^^^^^^^^^^
@ -224,31 +196,17 @@ datastore or interfering with other backups.
The garbage collection (GC) process is performed per datastore and is split
into two phases:
- Phase one (Mark):
- Phase one: Mark
All index files are read, and the access time of the referred chunk files is
updated.
All index files are read, and the access time (``atime``) of the referenced
chunk files is updated.
- Phase two (Sweep):
The task iterates over all chunks and checks their file access time against a
cutoff time. The cutoff time is given by either the oldest backup writer
instance, if present, or 24 hours and 5 minutes before the start of the
garbage collection.
Garbage collection considers chunk files with access time older than the
cutoff time to be neither referenced by any backup snapshot's index, nor part
of any currently running backup job. Therefore, these chunks can safely be
deleted.
Chunks within the grace period will not be deleted and logged at the end of
the garbage collection task as *Pending removals*.
.. note:: The grace period for backup chunk removal is not arbitrary, but stems
from the fact that filesystems are typically mounted with the ``relatime``
option by default. This results in better performance by only updating the
``atime`` property if a file has been modified since the last access or the
last access has been at least 24 hours ago.
- Phase two: Sweep
The task iterates over all chunks, checks their file access time, and if it
is older than the cutoff time (i.e., the time when GC started, plus some
headroom for safety and Linux file system behavior), the task knows that the
chunk was neither referred to in any backup index nor part of any currently
running backup that has no index to scan for. As such, the chunk can be
safely deleted.
Manually Starting GC
^^^^^^^^^^^^^^^^^^^^
@ -319,10 +277,26 @@ the **Actions** column in the table.
Notifications
-------------
Proxmox Backup Server can send you notifications about automatically
Proxmox Backup Server can send you notification emails about automatically
scheduled verification, garbage-collection and synchronization tasks results.
Refer to the :ref:`notifications` chapter for more details.
By default, notifications are sent to the email address configured for the
`root@pam` user. You can instead set this user for each datastore.
.. image:: images/screenshots/pbs-gui-datastore-options.png
:target: _images/pbs-gui-datastore-options.png
:align: right
:alt: Datastore Options
You can also change the level of notification received per task type, the
following options are available:
* Always: send a notification for any scheduled task, independent of the
outcome
* Errors: send a notification for any scheduled task that results in an error
* Never: do not send any notification at all
.. _maintenance_mode:

View File

@ -69,13 +69,6 @@ sync-job`` command. The configuration information for sync jobs is stored at
in the GUI, or use the ``create`` subcommand. After creating a sync job, you can
either start it manually from the GUI or provide it with a schedule (see
:ref:`calendar-event-scheduling`) to run regularly.
Backup snapshots, groups and namespaces which are no longer available on the
**Remote** datastore can be removed from the local datastore as well by setting
the ``remove-vanished`` option for the sync job.
Setting the ``verified-only`` or ``encrypted-only`` flags allows to limit the
sync jobs to backup snapshots which have been verified or encrypted,
respectively. This is particularly of interest when sending backups to a less
trusted remote backup server.
.. code-block:: console
@ -139,12 +132,6 @@ For mixing include and exclude filter, following rules apply:
.. note:: The ``protected`` flag of remote backup snapshots will not be synced.
Enabling the advanced option 'resync-corrupt' will re-sync all snapshots that have
failed to verify during the last :ref:`maintenance_verification`. Hence, a verification
job needs to be run before a sync job with 'resync-corrupt' can be carried out. Be aware
that a 'resync-corrupt'-job needs to check the manifests of all snapshots in a datastore
and might take much longer than regular sync jobs.
Namespace Support
^^^^^^^^^^^^^^^^^
@ -231,52 +218,9 @@ Bandwidth Limit
Syncing a datastore to an archive can produce a lot of traffic and impact other
users of the network. In order to avoid network or storage congestion, you can
limit the bandwidth of a sync job in pull direction by setting the ``rate-in``
option either in the web interface or using the ``proxmox-backup-manager``
command-line tool:
limit the bandwidth of the sync job by setting the ``rate-in`` option either in
the web interface or using the ``proxmox-backup-manager`` command-line tool:
.. code-block:: console
# proxmox-backup-manager sync-job update ID --rate-in 20MiB
For sync jobs in push direction use the ``rate-out`` option instead.
Sync Direction Push
^^^^^^^^^^^^^^^^^^^
Sync jobs can be configured for pull or push direction. Sync jobs in push
direction are not identical in behaviour because of the limited access to the
target datastore via the remote servers API. Most notably, pushed content will
always be owned by the user configured in the remote configuration, being
independent from the local user as configured in the sync job. Latter is used
exclusively for permission check and scope checks on the pushing side.
.. note:: It is strongly advised to create a dedicated remote configuration for
each individual sync job in push direction, using a dedicated user on the
remote. Otherwise, sync jobs pushing to the same target might remove each
others snapshots and/or groups, if the remove vanished flag is set or skip
snapshots if the backup time is not incremental.
This is because the backup groups on the target are owned by the user
given in the remote configuration.
The following permissions are required for a sync job in push direction:
#. ``Remote.Audit`` on ``/remote/{remote}`` and ``Remote.DatastoreBackup`` on
``/remote/{remote}/{remote-store}/{remote-ns}`` path or subnamespace.
#. At least ``Datastore.Read`` and ``Datastore.Audit`` on the local source
datastore namespace (``/datastore/{store}/{ns}``) or ``Datastore.Backup`` if
owner of the sync job.
#. ``Remote.DatastorePrune`` on ``/remote/{remote}/{remote-store}/{remote-ns}``
path to remove vanished snapshots and groups. Make sure to use a dedicated
remote for each sync job in push direction as noted above.
#. ``Remote.DatastoreModify`` on ``/remote/{remote}/{remote-store}/{remote-ns}``
path to remove vanished namespaces. A remote user with limited access should
be used on the remote backup server instance. Consider the implications as
noted below.
.. note:: ``Remote.DatastoreModify`` will allow to remove whole namespaces on the
remote target datastore, independent of ownership. Make sure the user as
configured in remote.cfg has limited permissions on the remote side.
.. note:: Sync jobs in push direction require namespace support on the remote
Proxmox Backup Server instance (minimum version 2.2).

View File

@ -1,50 +0,0 @@
digraph g {
graph [
rankdir = "LR"
fontname="Helvetica"
];
node [
fontsize = "16"
shape = "record"
];
edge [
];
"archive" [
label = "archive.mpxar"
shape = "record"
];
"rootdir" [
label = "<fv>FORMAT_VERSION\l|PRELUDE\l|<f0>ENTRY\l|\{XATTR\}\* extended attribute list\l|\{ACL_USER\}\* USER ACL entries\l|\{ACL_GROUP\}\* GROUP ACL entries\l|\[ACL_GROUP_OBJ\] the ACL_GROUP_OBJ \l|\[ACL_DEFAULT\] the various default ACL fields\l|\{ACL_DEFAULT_USER\}\* USER ACL entries\l|\{ACL_DEFAULT_GROUP\}\* GROUP ACL entries\l|\[FCAPS\] file capability in Linux disk format\l|\[QUOTA_PROJECT_ID\] the ext4/xfs quota project ID\l|{<pl> PAYLOAD_REF|SYMLINK|DEVICE|{<de> \{DirectoryEntries\}\*|GOODBYE}}"
shape = "record"
];
"entry" [
label = "<f0> size: u64 = 64\l|type: u64 = ENTRY\l|feature_flags: u64\l|mode: u64\l|flags: u64\l|uid: u64\l|gid: u64\l|mtime: u64\l"
labeljust = "l"
shape = "record"
];
"direntry" [
label = "<f0> FILENAME\l|{ENTRY\l|HARDLINK\l}"
shape = "record"
];
"payloadrefentry" [
label = "<f0> offset: u64\l|size: u64\l"
shape = "record"
];
"archive" -> "rootdir":fv
"rootdir":f0 -> "entry":f0
"rootdir":de -> "direntry":f0
"rootdir":pl -> "payloadrefentry":f0
}

View File

@ -1,34 +1,38 @@
.. _notifications:
Notifications
=============
Overview
--------
* Proxmox Backup Server emits :ref:`notification_events` in case of noteworthy
events in the system. These events are handled by the notification system. A
notification event has metadata, for example a timestamp, a severity level, a
type and other metadata fields.
* :ref:`notification_matchers` route a notification event to one or more
notification targets. A matcher can have match rules to selectively route
based on the metadata of a notification event.
* :ref:`notification_targets` are a destination to which a notification event
is routed to by a matcher. There are multiple types of target, mail-based
(Sendmail and SMTP) and Gotify.
Proxmox Backup Server will send notifications if case of noteworthy
events.
Datastores and tape backup jobs have a configurable :ref:`notification_mode`.
It allows you to choose between the notification system and a legacy mode for
sending notification emails. The legacy mode is equivalent to the way
notifications were handled before Proxmox Backup Server 3.2.
There are a number of different :ref:`Notification Events`,
each with their own set of metadata fields that can be used in
notification matchers.
The notification system can be configured in the GUI under *Configuration →
Notifications*. The configuration is stored in :ref:`notifications.cfg` and
:ref:`notifications_priv.cfg` - the latter contains sensitive configuration
options such as passwords or authentication tokens for notification targets and
A notification matcher determines *which* notifications shall be sent *where*.
A matcher has *match rules*, that can be used to
match on certain notification properties (e.g. timestamp, severity,
metadata fields).
If a matcher matches a notification, the notification will be routed
to a configured set of notification targets.
A notification target is an abstraction for a destination where a
notification should be sent to - for instance a Gotify server instance,
or a set of email addresses.
There are multiple types of notification targets, including
sendmail, which uses the system's sendmail command to send emails,
or gotify, which sends a notification to a Gotify instance.
The notification system can be configured in the GUI under
``Configuration -> Notifications``. The configuration is stored in
``/etc/proxmox-backup/notifications.cfg`` and
``/etc/proxmox-backup/notifications-priv.cfg`` -
the latter contains sensitive configuration options such as
passwords or authentication tokens for notification targets and
can only be read by ``root``.
.. _notification_targets:
Notification Targets
--------------------
@ -40,23 +44,22 @@ Proxmox Backup Server offers multiple types of notification targets.
Sendmail
^^^^^^^^
The sendmail binary is a program commonly found on Unix-like operating systems
that handles the sending of email messages. It is a command-line utility that
allows users and applications to send emails directly from the command line or
from within scripts.
that handles the sending of email messages.
It is a command-line utility that allows users and applications to send emails
directly from the command line or from within scripts.
The sendmail notification target uses the ``sendmail`` binary to send emails to
a list of configured users or email addresses. If a user is selected as a
recipient, the email address configured in user's settings will be used. For
the ``root@pam`` user, this is the email address entered during installation. A
user's email address can be configured in ``Configuration → Access Control →
User Management``. If a user has no associated email address, no email will be
sent.
The sendmail notification target uses the ``sendmail`` binary to send emails to a
list of configured users or email addresses. If a user is selected as a recipient,
the email address configured in user's settings will be used.
For the ``root@pam`` user, this is the email address entered during installation.
A user's email address can be configured in ``Configuration -> Access Control -> User Management``.
If a user has no associated email address, no email will be sent.
.. NOTE:: In standard Proxmox Backup Server installations, the ``sendmail``
binary is provided by Postfix. It may be necessary to configure Postfix so
that it can deliver mails correctly - for example by setting an external
mail relay (smart host). In case of failed delivery, check the system logs
for messages logged by the Postfix daemon.
.. NOTE:: In standard Proxmox Backup Server installations, the ``sendmail`` binary is provided by
Postfix. It may be necessary to configure Postfix so that it can deliver
mails correctly - for example by setting an external mail relay (smart host).
In case of failed delivery, check the system logs for messages logged by
the Postfix daemon.
See :ref:`notifications.cfg` for all configuration options.
@ -64,13 +67,13 @@ See :ref:`notifications.cfg` for all configuration options.
SMTP
^^^^
SMTP notification targets can send emails directly to an SMTP mail relay. This
target does not use the system's MTA to deliver emails. Similar to sendmail
targets, if a user is selected as a recipient, the user's configured email
address will be used.
SMTP notification targets can send emails directly to an SMTP mail relay.
This target does not use the system's MTA to deliver emails.
Similar to sendmail targets, if a user is selected as a recipient, the user's configured
email address will be used.
.. NOTE:: Unlike sendmail targets, SMTP targets do not have any queuing/retry
mechanism in case of a failed mail delivery.
.. NOTE:: Unlike sendmail targets, SMTP targets do not have any queuing/retry mechanism
in case of a failed mail delivery.
See :ref:`notifications.cfg` for all configuration options.
@ -78,139 +81,32 @@ See :ref:`notifications.cfg` for all configuration options.
Gotify
^^^^^^
`Gotify <http://gotify.net>`_ is an open-source self-hosted notification server
that allows you to send push notifications to various devices and applications.
It provides a simple API and web interface, making it easy to integrate with
different platforms and services.
.. NOTE:: Gotify targets will respect the HTTP proxy settings from
Configuration → Other → HTTP proxy
`Gotify <http://gotify.net>`_ is an open-source self-hosted notification server that
allows you to send push notifications to various devices and
applications. It provides a simple API and web interface, making it easy to
integrate with different platforms and services.
See :ref:`notifications.cfg` for all configuration options.
.. _notification_targets_webhook:
Webhook
^^^^^^^
Webhook notification targets perform HTTP requests to a configurable URL.
The following configuration options are available:
* ``url``: The URL to which to perform the HTTP requests. Supports templating
to inject message contents, metadata and secrets.
* ``method``: HTTP Method to use (POST/PUT/GET)
* ``header``: Array of HTTP headers that should be set for the request.
Supports templating to inject message contents, metadata and secrets.
* ``body``: HTTP body that should be sent. Supports templating to inject
message contents, metadata and secrets.
* ``secret``: Array of secret key-value pairs. These will be stored in a
protected configuration file only readable by root. Secrets can be
accessed in body/header/URL templates via the ``secrets`` namespace.
* ``comment``: Comment for this target.
For configuration options that support templating, the `Handlebars
<https://handlebarsjs.com>`_ syntax can be used to access the following
properties:
* ``{{ title }}``: The rendered notification title
* ``{{ message }}``: The rendered notification body
* ``{{ severity }}``: The severity of the notification (``info``, ``notice``,
``warning``, ``error``, ``unknown``)
* ``{{ timestamp }}``: The notification's timestamp as a UNIX epoch (in
seconds).
* ``{{ fields.<name> }}``: Sub-namespace for any metadata fields of the
notification. For instance, ``fields.type`` contains the notification
type - for all available fields refer to :ref:`notification_events`.
* ``{{ secrets.<name> }}``: Sub-namespace for secrets. For instance, a secret
named ``token`` is accessible via ``secrets.token``.
For convenience, the following helpers are available:
* ``{{ url-encode <value/property> }}``: URL-encode a property/literal.
* ``{{ escape <value/property> }}``: Escape any control characters that cannot
be safely represented as a JSON string.
* ``{{ json <value/property> }}``: Render a value as JSON. This can be useful
to pass a whole sub-namespace (e.g. ``fields``) as a part of a JSON payload
(e.g. ``{{ json fields }}``).
.. NOTE:: Webhook targets will respect the HTTP proxy settings from
Configuration → Other → HTTP proxy
Example - ntfy.sh
"""""""""""""""""
* Method: ``POST``
* URL: ``https://ntfy.sh/{{ secrets.channel }}``
* Headers:
* ``Markdown``: ``Yes``
* Body::
```
{{ message }}
```
* Secrets:
* ``channel``: ``<your ntfy.sh channel>``
Example - Discord
"""""""""""""""""
* Method: ``POST``
* URL: ``https://discord.com/api/webhooks/{{ secrets.token }}``
* Headers:
* ``Content-Type``: ``application/json``
* Body::
{
"content": "``` {{ escape message }}```"
}
* Secrets:
* ``token``: ``<token>``
Example - Slack
"""""""""""""""
* Method: ``POST``
* URL: ``https://hooks.slack.com/services/{{ secrets.token }}``
* Headers:
* ``Content-Type``: ``application/json``
* Body::
{
"text": "``` {{escape message}}```",
"type": "mrkdwn"
}
* Secrets:
* ``token``: ``<token>``
.. _notification_matchers:
Notification Matchers
---------------------
Notification matchers route notifications to notification targets based on
their matching rules. These rules can match certain properties of a
notification, such as the timestamp (``match-calendar``), the severity of the
notification (``match-severity``) or metadata fields (``match-field``). If a
notification is matched by a matcher, all targets configured for the matcher
will receive the notification.
Notification matchers route notifications to notification targets based
on their matching rules. These rules can match certain properties of a
notification, such as the timestamp (``match-calendar``), the severity of
the notification (``match-severity``) or metadata fields (``match-field``).
If a notification is matched by a matcher, all targets configured for the
matcher will receive the notification.
An arbitrary number of matchers can be created, each with with their own
matching rules and targets to notify. Every target is notified at most once for
every notification, even if the target is used in multiple matchers.
matching rules and targets to notify.
Every target is notified at most once for every notification, even if
the target is used in multiple matchers.
A matcher without rules matches any notification; the configured targets will
always be notified.
A matcher without rules matches any notification; the configured targets
will always be notified.
See :ref:`notifications.cfg` for all configuration options.
@ -227,24 +123,20 @@ Examples:
Field Matching Rules
^^^^^^^^^^^^^^^^^^^^
Notifications have a selection of metadata fields that can be matched. When
using ``exact`` as a matching mode, a ``,`` can be used as a separator. The
matching rule then matches if the metadata field has **any** of the specified
Notifications have a selection of metadata fields that can be matched.
When using ``exact`` as a matching mode, a ``,`` can be used as a separator.
The matching rule then matches if the metadata field has **any** of the specified
values.
Examples:
* ``match-field exact:type=gc`` Only match notifications for garbage collection
jobs
* ``match-field exact:type=prune,verify`` Match prune job and verification job
notifications.
* ``match-field regex:datastore=^backup-.*$`` Match any datastore starting with
``backup``.
* ``match-field exact:type=gc`` Only match notifications for garbage collection jobs
* ``match-field exact:type=prune,verify`` Match prune job and verification job notifications.
* ``match-field regex:datastore=^backup-.*$`` Match any datastore starting with ``backup``.
If a notification does not have the matched field, the rule will **not** match.
For instance, a ``match-field regex:datastore=.*`` directive will match any
notification that has a ``datastore`` metadata field, but will not match if the
field does not exist.
For instance, a ``match-field regex:datastore=.*`` directive will match any notification that has
a ``datastore`` metadata field, but will not match if the field does not exist.
Severity Matching Rules
^^^^^^^^^^^^^^^^^^^^^^^
@ -258,14 +150,14 @@ Examples:
The following severities are in use:
``info``, ``notice``, ``warning``, ``error``, ``unknown``.
.. _notification_events:
.. _Notification Events:
Notification Events
-------------------
The following table contains a list of all notification events in Proxmox
Backup server, their type, severity and additional metadata fields. ``type`` as
well as any other metadata field may be used in ``match-field`` match rules.
The following table contains a list of all notification events in Proxmox Backup server, their
type, severity and additional metadata fields. ``type`` as well as any other metadata field
may be used in ``match-field`` match rules.
================================ ==================== ========== ==============================================================
Event ``type`` Severity Metadata fields (in addition to ``type``)
@ -285,8 +177,8 @@ Verification job failure ``verification`` ``error`` ``datastore``,
Verification job success ``verification`` ``info`` ``datastore``, ``hostname``, ``job-id``
================================ ==================== ========== ==============================================================
The following table contains a description of all use metadata fields. All of
these can be used in ``match-field`` match rules.
The following table contains a description of all use metadata fields. All of these
can be used in ``match-field`` match rules.
==================== ===================================
Metadata field Description
@ -303,86 +195,19 @@ Metadata field Description
System Mail Forwarding
----------------------
Certain local system daemons, such as ``smartd``, send notification emails to
the local ``root`` user. Proxmox Backup Server will feed these mails into the
notification system as a notification of type ``system-mail`` and with severity
``unknown``.
Certain local system daemons, such as ``smartd``, send notification emails
to the local ``root`` user. Proxmox Backup Server will feed these mails
into the notification system as a notification of type ``system-mail``
and with severity ``unknown``.
When the email is forwarded to a sendmail target, the mail's content and
headers are forwarded as-is. For all other targets, the system tries to extract
both a subject line and the main text body from the email content. In instances
where emails solely consist of HTML content, they will be transformed into
plain text format during this process.
When the email is forwarded to a sendmail target, the mail's content and headers
are forwarded as-is. For all other targets,
the system tries to extract both a subject line and the main text body
from the email content. In instances where emails solely consist of HTML
content, they will be transformed into plain text format during this process.
Permissions
-----------
In order to modify/view the configuration for notification targets, the
``Sys.Modify/Sys.Audit`` permissions are required for the
In order to modify/view the configuration for notification targets,
the ``Sys.Modify/Sys.Audit`` permissions are required for the
``/system/notifications`` ACL node.
.. _notification_mode:
Notification Mode
-----------------
Datastores and tape backup/restore job configuration have a
``notification-mode`` option which can have one of two values:
* ``legacy-sendmail``: Send notification emails via the system's ``sendmail``
command. The notification system will be bypassed and any configured
targets/matchers will be ignored. This mode is equivalent to the notification
behavior for version before Proxmox Backup Server 3.2.
* ``notification-system``: Use the new, flexible notification system.
If the ``notification-mode`` option is not set, Proxmox Backup Server will
default to ``legacy-sendmail``.
Starting with Proxmox Backup Server 3.2, a datastore created in the UI will
automatically opt in to the new notification system. If the datastore is
created via the API or the ``proxmox-backup-manager`` CLI, the
``notification-mode`` option has to be set explicitly to
``notification-system`` if the notification system shall be used.
The ``legacy-sendmail`` mode might be removed in a later release of
Proxmox Backup Server.
Settings for ``legacy-sendmail`` notification mode
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
If ``notification-mode`` is set to ``legacy-sendmail``, Proxmox Backup Server
will send notification emails via the system's ``sendmail`` command to the
email address configured for the user set in the ``notify-user`` option
(falling back to ``root@pam`` if not set).
For datastores, you can also change the level of notifications received per
task type via the ``notify`` option.
* Always: send a notification for any scheduled task, independent of the
outcome
* Errors: send a notification for any scheduled task that results in an error
* Never: do not send any notification at all
The ``notify-user`` and ``notify`` options are ignored if ``notification-mode``
is set to ``notification-system``.
Overriding Notification Templates
---------------------------------
Proxmox Backup Server uses Handlebars templates to render notifications. The
original templates provided by Proxmox Backup Server are stored in
``/usr/share/proxmox-backup/templates/default/``.
Notification templates can be overridden by providing a custom template file in
the override directory at
``/etc/proxmox-backup/notification-templates/default/``. When rendering a
notification of a given type, Proxmox Backup Server will first attempt to load
a template from the override directory. If this one does not exist or fails to
render, the original template will be used.
The template files follow the naming convention of
``<type>-<body|subject>.txt.hbs``. For instance, the file
``gc-err-body.txt.hbs`` contains the template for rendering notifications for
garbage collection errors, while ``package-updates-subject.txt.hbs`` is used to
render the subject line of notifications for available package updates.

View File

@ -149,7 +149,7 @@ Currently there's only a client-repository for APT based systems.
.. _package_repositories_client_only_apt:
APT-based Proxmox Backup Client Repository
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
++++++++++++++++++++++++++++++++++++++++++
For modern Linux distributions using `apt` as package manager, like all Debian
and Ubuntu Derivative do, you may be able to use the APT-based repository.

View File

@ -126,8 +126,7 @@ Ext.onReady(function() {
if (data.mark !== 'keep') {
return `<div style="text-decoration: line-through;">${text}</div>`;
}
let pruneList = this.up('prunesimulatorPruneList');
if (pruneList.useColors) {
if (me.useColors) {
let bgColor = COLORS[data.keepName];
let textColor = TEXT_COLORS[data.keepName];
return `<div style="background-color: ${bgColor};color: ${textColor};">${text}</div>`;
@ -354,17 +353,12 @@ Ext.onReady(function() {
specValues.forEach(function(value) {
if (value.includes('..')) {
let [start, end] = value.split('..');
let step = 1;
if (end.includes('/')) {
[end, step] = end.split('/');
step = assertValid(step);
}
start = assertValid(start);
end = assertValid(end);
if (start > end) {
throw "interval start is bigger then interval end '" + start + " > " + end + "'";
}
for (let i = start; i <= end; i += step) {
for (let i = start; i <= end; i++) {
matches[i] = 1;
}
} else if (value.includes('/')) {

View File

@ -165,74 +165,6 @@ following command creates a new datastore called ``store1`` on
# proxmox-backup-manager datastore create store1 /backup/disk1/store1
Removable Datastores
^^^^^^^^^^^^^^^^^^^^
Removable datastores have a ``backing-device`` associated with them, they can be
mounted and unmounted. Other than that they behave the same way a normal datastore
would.
They can be created on already correctly formatted partitions, which should be
either ``ext4`` or ``xfs`` as with normal datastores, but most modern file
systems supported by the Proxmox Linux kernel should work.
.. note:: FAT-based file systems do not support the POSIX file ownership
concept and have relatively low limits on the number of files per directory.
Therefore, creating a datastore is not supported on FAT file systems.
Because some external drives are preformatted with such a FAT-based file
system, you may need to reformat the drive before you can use it as a
backing-device for a removable datastore.
It is also possible to create them on completely unused disks through
"Administration" > "Disks / Storage" > "Directory", using this method the disk will
be partitioned and formatted automatically for the datastore.
Devices with only one datastore on them will be mounted automatically. Unmounting has
to be done through the UI by clicking "Unmount" on the summary page or using the CLI.
If unmounting fails, the reason is logged in the unmount task log, and the
datastore will stay in maintenance mode ``unmounting``, which prevents any IO
operations. In such cases, the maintenance mode has to be reset manually using:
.. code-block:: console
# proxmox-backup-manager datastore update --maintenance-mode offline
to prevent any IO, or to clear it use:
.. code-block:: console
# proxmox-backup-manager datastore update --delete maintenance-mode
A single device can house multiple datastores, they only limitation is that they are not
allowed to be nested.
Removable datastores are created on the the device with the given relative path that is specified
on creation. In order to use a datastore on multiple PBS instances, it has to be created on one,
and added with ``Reuse existing datastore`` checked on the others. The path you set on creation
is how multiple datastores on a single device are identified. So when adding on a new PBS instance,
it has to match what was set on creation.
.. code-block:: console
# proxmox-backup-manager datastore unmount store1
both will wait for any running tasks to finish and unmount the device.
All removable datastores are mounted under /mnt/datastore/<name>, and the specified path
refers to the path on the device.
All datastores present on a device can be listed using ``proxmox-backup-debug``.
.. code-block:: console
# proxmox-backup-debug inspect device /dev/...
Verify, Prune and Garbage Collection jobs are skipped if the removable
datastore is not mounted when they are scheduled. Sync jobs start, but fail
with an error saying the datastore was not mounted. The reason is that syncs
not happening as scheduled should at least be noticeable.
Managing Datastores
^^^^^^^^^^^^^^^^^^^
@ -382,7 +314,7 @@ Options
There are a few per-datastore options:
* :ref:`Notification mode and legacy notification settings <notification_mode>`
* :ref:`Notifications <maintenance_notification>`
* :ref:`Maintenance Mode <maintenance_mode>`
* Verification of incoming backups
@ -435,29 +367,10 @@ There are some tuning related options for the datastore that are more advanced:
This can be set with:
.. code-block:: console
.. code-block:: console
# proxmox-backup-manager datastore update <storename> --tuning 'sync-level=filesystem'
* ``gc-atime-safety-check``: Datastore GC atime update safety check:
You can explicitly `enable` or `disable` the atime update safety check
performed on datastore creation and garbage collection. This checks if atime
updates are handled as expected by garbage collection and therefore avoids the
risk of data loss by unexpected filesystem behavior. It is recommended to set
this to enabled, which is also the default value.
* ``gc-atime-cutoff``: Datastore GC atime cutoff for chunk cleanup:
This allows to set the cutoff for which a chunk is still considered in-use
during phase 2 of garbage collection (given no older writers). If the
``atime`` of the chunk is outside the range, it will be removed.
* ``gc-cache-capacity``: Datastore GC least recently used cache capacity:
Allows to control the cache capacity used to keep track of chunks for which
the access time has already been updated during phase 1 of garbage collection.
This avoids multiple updates and increases GC runtime performance. Higher
values can reduce GC runtime at the cost of increase memory usage, setting the
value to 0 disables caching.
If you want to set multiple tuning options simultaneously, you can separate them
with a comma, like this:
@ -506,7 +419,7 @@ remote-source to avoid that an attacker that took over the source can cause
deletions of backups on the target hosts.
If the source-host became victim of a ransomware attack, there is a good chance
that sync jobs will fail, triggering an :ref:`error notification
<Notification Events>`.
<maintenance_notification>`.
It is also possible to create :ref:`tape backups <tape_backup>` as a second
storage medium. This way, you get an additional copy of your data on a

View File

@ -30,8 +30,6 @@ please refer to the standard Debian documentation.
.. include:: certificate-management.rst
.. include:: external-metric-server.rst
.. include:: services.rst
.. include:: command-line-tools.rst

View File

@ -6,8 +6,6 @@ production. To further decrease the impact of a failed host, you can set up
periodic, efficient, incremental :ref:`datastore synchronization <syncjobs>`
from other Proxmox Backup Server instances.
.. _minimum_system_requirements:
Minimum Server Requirements, for Evaluation
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@ -40,8 +38,7 @@ Recommended Server System Requirements
* Backup storage:
* Prefer fast storage that delivers high IOPS for random IO workloads; use
only enterprise SSDs for best results.
* Use only SSDs, for best results
* If HDDs are used: Using a metadata cache is highly recommended, for example,
add a ZFS :ref:`special device mirror <local_zfs_special_device>`.

View File

@ -61,7 +61,6 @@ In general, LTO tapes offer the following advantages:
Note that `Proxmox Backup Server` already stores compressed data, so using the
tape compression feature has no advantage.
.. _tape-supported-hardware:
Supported Hardware
------------------
@ -970,8 +969,6 @@ You can restore from a tape even without an existing catalog, but only the
whole media set. If you do this, the catalog will be automatically created.
.. _tape_key_management:
Encryption Key Management
~~~~~~~~~~~~~~~~~~~~~~~~~
@ -1183,159 +1180,3 @@ In combination with fitting prune settings and tape backup schedules, this
achieves long-term storage of some backups, while keeping the recent
backups on smaller media sets that expire roughly every 4 weeks (that is, three
plus the current week).
Disaster Recovery
-----------------
.. _Command-line Tools: command-line-tools.html
In case of major disasters, important data, or even whole servers might be
destroyed or at least damaged up to the point where everything - sometimes
including the backup server - has to be restored from a backup. For such cases,
the following step-by-step guide will help you to set up the Proxmox Backup
Server and restore everything from tape backups.
The following guide will explain the necessary steps using both the web GUI and
the command line tools. For an overview of the command line tools, see
`Command-line Tools`_.
Setting Up a Datastore
~~~~~~~~~~~~~~~~~~~~~~
.. _proxmox-backup-manager: proxmox-backup-manager/man1.html
.. _Installation: installation.html
After you set up a new Proxmox Backup Server, as outlined in the `Installation`_
chapter, first set up a datastore so a tape can be restored to it:
#. Go to **Administration -> Storage / Disks** and make sure that the disk that
will be used as a datastore shows up.
#. Under the **Directory** or **ZFS** tabs, you can either choose to create a
directory or create a ZFS ``zpool``, respectively. Here you can also directly
add the newly created directory or ZFS ``zpool`` as a datastore.
Alternatively, the `proxmox-backup-manager`_ can be used to perform the same
tasks. For more information, check the :ref:`datastore_intro` documentation.
Setting Up the Tape Drive
~~~~~~~~~~~~~~~~~~~~~~~~~
#. Make sure you have a properly working tape drive and/or changer matching to
medium you want to restore from.
#. Connect the tape changer(s) and the tape drive(s) to the backup server. These
should be detected automatically by Linux. You can get a list of available
drives using:
.. code-block:: console
# proxmox-tape drive scan
┌────────────────────────────────┬────────┬─────────────┬────────┐
│ path │ vendor │ model │ serial │
╞════════════════════════════════╪════════╪═════════════╪════════╡
│ /dev/tape/by-id/scsi-12345-sg │ IBM │ ULT3580-TD4 │ 12345 │
└────────────────────────────────┴────────┴─────────────┴────────┘
You can get a list of available changers with:
.. code-block:: console
# proxmox-tape changer scan
┌─────────────────────────────┬─────────┬──────────────┬────────┐
│ path │ vendor │ model │ serial │
╞═════════════════════════════╪═════════╪══════════════╪════════╡
│ /dev/tape/by-id/scsi-CC2C52 │ Quantum │ Superloader3 │ CC2C52 │
└─────────────────────────────┴─────────┴──────────────┴────────┘
For more information, please read the chapters
on :ref:`tape_changer_config` and :ref:`tape_drive_config`.
#. If you have a tape changer, go to the web interface of the Proxmox Backup
Server, go to **Tape Backup -> Changers** and add it. For examples using the
command line, read the chapter on :ref:`tape_changer_config`. If the changer
has been detected correctly by Linux, the changer should show up in the list.
#. In the web interface, go to **Tape Backup -> Drives** and add the tape drive
that will be used to read the tapes. For examples using the command line,
read the chapter on :ref:`tape_drive_config`. If the tape drive has been
detected correctly by Linux, the drive should show up in the list. If the
drive also has a tape changer, make sure to select the changer as well and
assign it the correct drive number.
Restoring Data From the Tape
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. _proxmox-tape: proxmox-tape/man1.html
.. _proxmox-backup-client: proxmox-backup-client/man1.html
.. _Restore: https://pve.proxmox.com/pve-docs/chapter-vzdump.html#vzdump_restore
The following guide will explain the steps necessary to restore data from a
tape, which can be done over either the web GUI or the command line. For details
on the command line, read the documentation on the `proxmox-tape`_ tool.
To restore data from tapes, do the following:
#. Insert the first tape (as displayed on the label) into the tape drive or, if
a tape changer is available, use the tape changer to insert the tape into the
right drive. The web GUI can also be used to load or transfer tapes between
tape drives by selecting the changer.
#. If the backup has been encrypted, the encryption keys need to be restored as
well. In the **Encryption Keys** tab, press **Restore Key**. For more
details or examples that use the command line, read the
:ref:`tape_key_management` chapter.
#. The procedure for restoring data is slightly different depending on whether
you are using a standalone tape drive or a changer:
* For changers, the procedure is simple:
#. Insert all tapes from the media set you want to restore from.
#. Click on the changer in the web GUI, click **Inventory**, make sure
**Restore Catalog** is selected and press OK.
* For standalone drives, the procedure would be:
#. Insert the first tape of the media set.
#. Click **Catalog**.
#. Eject the tape, then repeat the steps for the remaining tapes of the
media set.
#. Go back to **Tape Backup**. In the **Content** tab, press **Restore** and
select the desired media set. Choose the snapshot you want to restore, press
**Next**, select the drive and target datastore and press **Restore**.
#. By going to the datastore where the data has been restored, under the
**Content** tab you should be able to see the restored snapshots. In order to
access the backups from another machine, you will need to configure the
access to the backup server. Go to **Configuration -> Access Control** and
either create a new user, or a new API token (API tokens allow easy
revocation if the token is compromised). Under **Permissions**, add the
desired permissions, e.g. **DatastoreBackup**.
#. You can now perform virtual machine, container or file restores. You now have
the following options:
* If you want to restore files on Linux distributions that are not based on
Proxmox products or you prefer using a command line tool, you can use the
`proxmox-backup-client`_, as explained in the
:ref:`client_restoring_data` chapter. Use the newly created API token to
be able to access the data. You can then restore individual files or
mount an archive to your system.
* If you want to restore virtual machines or containers on a Proxmox VE
server, add the datastore of the backup server as storage and go to
**Backups**. Here you can restore VMs and containers, including their
configuration. For more information on restoring backups in Proxmox VE,
visit the `Restore`_ chapter of the Proxmox VE documentation.

View File

@ -28,9 +28,6 @@ which are not chunked, e.g. the client log), or one or more indexes
When uploading an index, the client first has to read the source data, chunk it
and send the data as chunks with their identifying checksum to the server.
When using the :ref:`change detection mode <change_detection_mode>` payload
chunks for unchanged files are reused from the previous snapshot, thereby not
reading the source data again.
If there is a previous Snapshot in the backup group, the client can first
download the chunk list of the previous Snapshot. If it detects a chunk that
@ -56,9 +53,8 @@ The chunks of a datastore are found in
<datastore-root>/.chunks/
This chunk directory is further subdivided into directories grouping chunks by
their checksums 2 byte prefix (given as 4 hexadecimal digits), so a chunk with
the checksum
This chunk directory is further subdivided by the first four bytes of the
chunk's checksum, so a chunk with the checksum
a342e8151cbf439ce65f3df696b54c67a114982cc0aa751f2852c2f7acc19a8b
@ -134,141 +130,6 @@ This is done to speed up the client part of the backup, since it only needs to
encrypt chunks that are actually getting uploaded. Chunks that exist already in
the previous backup, do not need to be encrypted and uploaded.
Change Detection Mode for File-Based Backups
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The change detection mode controls how to detect and act for files which did not
change in-between subsequent backup runs as well as the archive file format used
to encode the directory entries.
There are 3 modes available, the current default ``legacy`` mode, as well as the
``data`` and ``metadata`` mode. While the ``legacy`` mode encodes all contents
in a single ``pxar`` archive, the latter two modes split data and metadata into
``ppxar`` and ``mpxar`` archives. This is done to allow for fast comparison of
metadata with the previous snapshot, used by the ``metadata`` mode to detect
reusable files. The ``data`` mode refrains from reusing unchanged files by
rechunking the file unconditionally. This mode therefore assures that no file
changes are missed even if the metadata are unchanged.
.. NOTE:: ``pxar`` and ``mpxar``/``ppxar`` file formats are different and cannot
be deduplicated as efficiently if a datastore stores archive snapshots of
both types.
As the change detection modes are client side changes, they are backwards
compatible with older versions of Proxmox Backup Server. Exploring the backup
contents for the new archive format via the web interface requires however a
Proxmox Backup Server with version 3.2.5 or higher. Upgrading to the latest
version is recommended for full feature compatibility.
.. _change-detection-mode-legacy:
Legacy Mode
+++++++++++
Backup snapshots of filesystems are created by recursively scanning the
directory entries. All entries to be included in the snapshot are read and
serialized by encoding them using the ``pxar``
:ref:`archive format <pxar-format>`. The resulting stream is chunked into
:ref:`dynamically sized chunks <dynamically-sized-chunks>` and uploaded to the
Proxmox Backup Server, deduplicating chunks based on their content digest for
space efficient storage.
File contents are read and chunked unconditionally, no check is performed to
detect unchanged files.
.. _change-detection-mode-data:
Data Mode
+++++++++
Like for ``legacy`` mode file contents are read and chunked unconditionally, no
check is performed to detect unchanged files.
However, in contrast to ``legacy`` mode, which stores entries metadata and data
in a single self-contained ``pxar`` archive, the ``data`` mode encodes metadata
and file contents into two separate streams. The resulting backup snapshots
therefore contain split archives, an archive in ``mpxar``
:ref:`format <pxar-meta-format>` containing the entries metadata and an archive
with ``ppxar`` :ref:`format <ppxar-format>` , containing the actual file
contents, separated by payload headers for consistency checks. The metadata
archive stores a reference offset to the corresponding payload archive entry so
the file contents can be accessed. Both of these archives are chunked and
uploaded by the Proxmox backup client, resulting in separated indices and
independent chunks.
The ``mpxar`` archive can be used to efficiently fetch the associated metadata
for archive entries without the overhead of payload data stored within the same
chunks. This is used for example for entry lookups to list the archive contents
or to navigate the mounted filesystem via the FUSE implementation. No dedicated
catalog is therefore created for archives encoded using this mode.
By not comparing metadata to the previous backup snapshot, no files will be
considered reusable by this mode, in contrast to the ``metadata`` mode.
Latter can reuse files which have changed, but file size and mtime did not
change because restored after changing the files contents.
.. _change-detection-mode-metadata:
Metadata Mode
+++++++++++++
The ``metadata`` mode detects files whose file metadata did not change
in-between subsequent backup runs. The metadata comparison includes file size,
file type, ownership and permission information, as well as acls and attributes
and most importantly the file's mtime, for details see the
:ref:`pxar metadata archive format <pxar-meta-format>`. Files ctime and inode
number are not stored and used for comparison, since some tools (e.g.
``vzdump``) might sync the contents of the filesystem to a temporary location
before actually performing the backup via the Proxmox backup client. For these
cases, ctime and inode number will always change.
This mode will avoid reading and rechunking the file contents whenever possible
by reusing the file content chunks of unchanged files from the previous backup
snapshot.
To compare the metadata, the previous snapshots ``mpxar`` metadata archive is
downloaded at the start of the backup run and used as a reference. Further, the
index of the payload archive ``ppxar`` is fetched and used to lookup the file
content chunk's digests, which will be used to reindex pre-existing chunks
without the need to reread and rechunk the file contents.
During backup, the metadata and payload archives are encoded in the same manner
as for the ``data`` mode, but for the ``metadata`` mode each entry is
additionally looked up in the metadata reference archive for comparison first.
If the file did not change as compared to the reference, the file is considered
as unchanged and the Proxmox backup client enters a look-ahead caching mode. In
this mode, the client will keep reading and comparing then following entries in
the filesystem as long as they are reusable. Further, it keeps track of the
payload archive offset range these file contents are stored in. The additional
look-ahead caching is needed, as file boundaries are not required to be aligned
with chunk boundaries, therefore reused chunks can contain possibly wasted chunk
content (also called padding) if reused unconditionally.
The look-ahead cache will greedily cache all unchanged entries up to the point
where either the cache size limit is reached, a file entry with changed
metadata is encountered, or the range of payload chunks considered for reuse is
not continuous. An example for the latter is a file which disappeared in-between
subsequent backup runs, leaving a hole in the range. At this point, the caching
mode is disabled and the client calculates the wasted padding size which would
be introduced by reusing the payload chunks for all the unchanged files cached
up to this point. If the padding is acceptable (below a preset limit of 10% of
the actually reused chunk content), the files are reused by encoding them in the
metadata archive using updated offset references to the contents and reindexing
the pre-existing chunks in the new ``ppxar`` archive. If however the padding is
not acceptable, exceeding the limit, all cached entries are reencoded, not
reusing any of the pre-existing data. The metadata as cached will be encoded in
the metadata archive, no matter if cached file contents are to be reused or
reencoded.
This combination of look-ahead caching and reuse of pre-existing payload archive
chunks for files with unchanged contents therefore speeds up the backup
process by avoiding rereading and rechunking file contents whenever possible.
To reduce paddings and increase chunk reusability, during creation of the
archives in ``data`` mode and ``metadata`` mode the pxar encoder signals
encountered file boundaries as suggested chunk boundaries to the sliding window
chunker. The chunker then decides based on the internal state if the suggested
boundary is accepted or disregarded.
Caveats and Limitations
-----------------------
@ -298,8 +159,8 @@ will see that the probability of a collision in that scenario is:
For context, in a lottery game of guessing 6 numbers out of 45, the chance to
correctly guess all 6 numbers is only :math:`1.2277 * 10^{-7}`. This means the
chance of a collision is lower than winning 8 such lottery games *in a row*:
:math:`(1.2277 * 10^{-7})^{8} = 5.1623 * 10^{-56}`.
chance of a collision is about the same as winning 13 such lottery games *in a
row*.
In conclusion, it is extremely unlikely that such a collision would occur by
accident in a normal datastore.
@ -319,9 +180,6 @@ read all files again for every backup, otherwise it would not be possible to
generate a consistent, independent pxar archive where the original chunks can be
reused. Note that in spite of this, only new or changed chunks will be uploaded.
In order to avoid these limitations, the Change Detection Mode ``metadata`` was
introduced.
Verification of Encrypted Chunks
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^

View File

@ -16,8 +16,8 @@ User Configuration
choose the realm when you add a new user. Possible realms are:
:pam: Linux PAM standard authentication. Use this if you want to
authenticate as a Linux system user. The users needs to already exist on
the host system.
authenticate as a Linux system user (users need to exist on the
system).
:pbs: Proxmox Backup Server realm. This type stores hashed passwords in
``/etc/proxmox-backup/shadow.json``.
@ -599,32 +599,6 @@ list view in the web UI, or using the command line:
Authentication Realms
---------------------
.. _user_realms_pam:
Linux PAM
~~~~~~~~~
Linux PAM is a framework for system-wide user authentication. These users are
created on the host system with commands such as ``adduser``.
If PAM users exist on the host system, corresponding entries can be added to
Proxmox Backup Server, to allow these users to log in via their system username
and password.
.. _user_realms_pbs:
Proxmox Backup authentication server
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This is a Unix-like password store, which stores hashed passwords in
``/etc/proxmox-backup/shadow.json``. Passwords are hashed using the SHA-256
hashing algorithm.
This is the most convenient realm for small-scale (or even mid-scale)
installations, where users do not need access to anything outside of Proxmox
Backup Server. In this case, users are fully managed by Proxmox Backup Server
and are able to change their own passwords via the GUI.
.. _user_realms_ldap:
LDAP
@ -689,7 +663,7 @@ address must be specified. Most options from :ref:`user_realms_ldap` apply to
Active Directory as well, most importantly the bind credentials ``bind-dn``
and ``password``. This is typically required by default for Microsoft Active
Directory. The ``bind-dn`` can be specified either in AD-specific
``user@company.net`` syntax or the common LDAP-DN syntax.
``user@company.net`` syntax or the commen LDAP-DN syntax.
The authentication domain name must only be specified if anonymous bind is
requested. If bind credentials are given, the domain name is automatically

View File

@ -1,346 +0,0 @@
.. _using_the_installer:
Install `Proxmox Backup`_ Server using the Installer
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Download the ISO from |DOWNLOADS|.
It includes the following:
* The Proxmox Backup Server installer, which partitions the local
disk(s) with ext4, xfs or ZFS, and installs the operating system
* Complete operating system (Debian Linux, 64-bit)
* Proxmox Linux kernel with ZFS support
* Complete toolset to administer backups and all necessary resources
* Web-based management interface
.. note:: Any existing data on the selected drives will be overwritten
during the installation process. The installer does not add boot
menu entries for other operating systems.
Please insert the :ref:`installation_medium` (for example, USB flash
drive or DVD) and boot from it.
.. note:: You may need to go into your server's firmware settings, to
enable booting from your installation medium (for example, USB) and
set the desired boot order. When booting an installer prior to
`Proxmox Backup`_ Server version 3.1, Secure Boot needs to be
disabled.
.. image:: images/screenshots/pbs-installer-grub-menu.png
:target: _images/pbs-installer-grub-menu.png
:align: right
:alt: Proxmox Backup Server Installer GRUB Menu
After choosing the correct entry (for example, *Boot from USB*) the
Proxmox Backup Server menu will be displayed, and one of the following
options can be selected:
**Install Proxmox Backup Server (Graphical)**
Starts the normal installation.
TIP: It's possible to use the installation wizard with a keyboard only. Buttons
can be clicked by pressing the ``ALT`` key combined with the underlined character
from the respective button. For example, ``ALT + N`` to press a ``Next`` button.
**Install Proxmox Backup Server (Console)**
Starts the terminal-mode installation wizard. It provides the same overall
installation experience as the graphical installer, but has generally better
compatibility with very old and very new hardware.
**Install Proxmox Backup Server (Terminal UI, Serial Console)**
Starts the terminal-mode installation wizard, additionally setting up the Linux
kernel to use the (first) serial port of the machine for in- and output. This
can be used if the machine is completely headless and only has a serial console
available.
.. image:: images/screenshots/pbs-tui-installer.png
:target: _images/pbs-tui-installer.png
:align: right
:alt: Proxmox Backup Server Terminal UI Installer
Both modes use the same code base for the actual installation process to
benefit from more than a decade of bug fixes and ensure feature parity.
TIP: The *Console* or *Terminal UI* option can be used in case the graphical
installer does not work correctly, due to e.g. driver issues. See also
:ref:`nomodeset_kernel_param`.
**Advanced Options: Install Proxmox Backup Server (Debug Mode)**
Starts the installation in debug mode. A console will be opened at several
installation steps. This helps to debug the situation if something goes wrong.
To exit a debug console, press ``CTRL-D``. This option can be used to boot a
live system with all basic tools available. You can use it, for example, to
repair a degraded ZFS *rpool* or fix the :ref:`chapter-systembooting` for an
existing Proxmox Backup Server setup.
**Advanced Options: Install Proxmox Backup Server (Terminal UI, Debug Mode)**
Same as the graphical debug mode, but preparing the system to run the
terminal-based installer instead.
**Advanced Options: Install Proxmox Backup Server (Serial Console Debug Mode)**
Same the terminal-based debug mode, but additionally sets up the Linux kernel to
use the (first) serial port of the machine for in- and output.
**Advanced Options: Rescue Boot**
With this option you can boot an existing installation. It searches all attached
hard disks. If it finds an existing installation, it boots directly into that
disk using the Linux kernel from the ISO. This can be useful if there are
problems with the bootloader (GRUB/``systemd-boot``) or the BIOS/UEFI is unable
to read the boot block from the disk.
**Advanced Options: Test Memory (memtest86+)**
Runs *memtest86+*. This is useful to check if the memory is functional and free
of errors. Secure Boot must be turned off in the UEFI firmware setup utility to
run this option.
You normally select *Install Proxmox Backup Server (Graphical)* to start the
installation.
The first step is to read our EULA (End User License Agreement). Following this,
you can select the target hard disk(s) for the installation.
.. caution:: By default, the whole server is used and all existing data is
removed. Make sure there is no important data on the server before proceeding
with the installation.
The *Options* button lets you select the target file system, which defaults to
``ext4``. The installer uses LVM if you select ``ext4`` or ``xfs`` as a file
system, and offers additional options to restrict LVM space (see :ref:`below
<advanced_lvm_options>`).
.. image:: images/screenshots/pbs-installer-select-disk.png
:target: _images/pbs-installer-select-disk.png
:align: right
:alt: Proxmox Backup Server Installer - Harddisk Selection Dialog
Proxmox Backup Server can also be installed on ZFS. As ZFS offers several
software RAID levels, this is an option for systems that don't have a hardware
RAID controller. The target disks must be selected in the *Options* dialog. More
ZFS specific settings can be changed under :ref:`Advanced Options
<advanced_zfs_options>`.
.. warning:: ZFS on top of any hardware RAID is not supported and can result in
data loss.
.. image:: images/screenshots/pbs-installer-location.png
:target: _images/pbs-installer-location.png
:align: right
:alt: Proxmox Backup Server Installer - Location and timezone configuration
The next page asks for basic configuration options like your location, time
zone, and keyboard layout. The location is used to select a nearby download
server, in order to increase the speed of updates. The installer is usually able
to auto-detect these settings, so you only need to change them in rare
situations when auto-detection fails, or when you want to use a keyboard layout
not commonly used in your country.
.. image:: images/screenshots/pbs-installer-password.png
:target: _images/pbs-installer-password.png
:align: left
:alt: Proxmox Backup Server Installer - Password and email configuration
Next the password of the superuser (``root``) and an email address needs to be
specified. The password must consist of at least 8 characters. It's highly
recommended to use a stronger password. Some guidelines are:
|
|
- Use a minimum password length of at least 12 characters.
- Include lowercase and uppercase alphabetic characters, numbers, and symbols.
- Avoid character repetition, keyboard patterns, common dictionary words,
letter or number sequences, usernames, relative or pet names, romantic links
(current or past), and biographical information (for example ID numbers,
ancestors' names or dates).
The email address is used to send notifications to the system administrator.
For example:
- Information about available package updates.
- Error messages from periodic *cron* jobs.
.. image:: images/screenshots/pbs-installer-network.png
:target: _images/pbs-installer-network.png
:align: right
:alt: Proxmox Backup Server Installer - Network configuration
All those notification mails will be sent to the specified email address.
The last step is the network configuration. Network interfaces that are *UP*
show a filled circle in front of their name in the drop down menu. Please note
that during installation you can either specify an IPv4 or IPv6 address, but not
both. To configure a dual stack node, add additional IP addresses after the
installation.
.. image:: images/screenshots/pbs-installer-progress.png
:target: _images/pbs-installer-progress.png
:align: left
:alt: Proxmox Backup Server Installer - Installation progress
The next step shows a summary of the previously selected options. Please
re-check every setting and use the *Previous* button if a setting needs to be
changed.
After clicking *Install*, the installer will begin to format the disks and copy
packages to the target disk(s). Please wait until this step has finished; then
remove the installation medium and restart your system.
.. image:: images/screenshots/pbs-installer-summary.png
:target: _images/pbs-installer-summary.png
:align: right
:alt: Proxmox Backup Server Installer - Installation summary
Copying the packages usually takes several minutes, mostly depending on the
speed of the installation medium and the target disk performance.
When copying and setting up the packages has finished, you can reboot the
server. This will be done automatically after a few seconds by default.
Installation Failure
^^^^^^^^^^^^^^^^^^^^
If the installation failed, check out specific errors on the second TTY
(``CTRL + ALT + F2``) and ensure that the systems meets the
:ref:`minimum requirements <minimum_system_requirements>`.
If the installation is still not working, look at the :ref:`how to get help
chapter <get_help>`.
Accessing the Management Interface Post-Installation
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. image:: images/screenshots/pbs-gui-login-window.png
:target: _images/pbs-gui-login-window.png
:align: right
:alt: Proxmox Backup Server - Management interface login dialog
After a successful installation and reboot of the system you can use the Proxmox
Backup Server web interface for further configuration.
- Point your browser to the IP address given during the installation and port
8007, for example: https://pbs.yourdomain.tld:8007
- Log in using the ``root`` (realm *Linux PAM standard authentication*) username
and the password chosen during installation.
- Upload your subscription key to gain access to the Enterprise repository.
Otherwise, you will need to set up one of the public, less tested package
repositories to get updates for security fixes, bug fixes, and new features.
- Check the IP configuration and hostname.
- Check the timezone.
.. _advanced_lvm_options:
Advanced LVM Configuration Options
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The installer creates a Volume Group (VG) called ``pbs``, and additional Logical
Volumes (LVs) called ``root`` and ``swap``, if ``ext4`` or ``xfs`` as filesystem
is used. To control the size of these volumes use:
- *hdsize*
Defines the total hard disk size to be used. This way you can reserve free
space on the hard disk for further partitioning.
- *swapsize*
Defines the size of the ``swap`` volume. The default is the size of the
installed memory, minimum 4 GB and maximum 8 GB. The resulting value cannot
be greater than ``hdsize/8``.
If set to ``0``, no ``swap`` volume will be created.
- *minfree*
Defines the amount of free space that should be left in the LVM volume group
``pbs``. With more than 128GB storage available, the default is 16GB,
otherwise ``hdsize/8`` will be used.
.. _advanced_zfs_options:
Advanced ZFS Configuration Options
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The installer creates the ZFS pool ``rpool``, if ZFS is used. No swap space is
created but you can reserve some unpartitioned space on the install disks for
swap. You can also create a swap zvol after the installation, although this can
lead to problems (see :ref:`ZFS swap notes <zfs_swap>`).
- *ashift*
Defines the *ashift* value for the created pool. The *ashift* needs to be
set at least to the sector-size of the underlying disks (2 to the power of
*ashift* is the sector-size), or any disk which might be put in the pool
(for example the replacement of a defective disk).
- *compress*
Defines whether compression is enabled for ``rpool``.
- *checksum*
Defines which checksumming algorithm should be used for ``rpool``.
- *copies*
Defines the *copies* parameter for ``rpool``. Check the ``zfs(8)`` manpage
for the semantics, and why this does not replace redundancy on disk-level.
- *hdsize*
Defines the total hard disk size to be used. This is useful to save free
space on the hard disk(s) for further partitioning (for example, to create a
swap partition). *hdsize* is only honored for bootable disks, that is only
the first disk or mirror for RAID0, RAID1 or RAID10, and all disks in
RAID-Z[123].
ZFS Performance Tips
^^^^^^^^^^^^^^^^^^^^
ZFS works best with a lot of memory. If you intend to use ZFS make sure to have
enough RAM available for it. A good calculation is 4GB plus 1GB RAM for each TB
of raw disk space.
ZFS can use a dedicated drive as write cache, called the ZFS Intent Log (ZIL).
Use a fast drive (SSD) for it. It can be added after installation with the
following command:
.. code-block:: console
# zpool add <pool-name> log </dev/path_to_fast_ssd>
.. _nomodeset_kernel_param:
Adding the ``nomodeset`` Kernel Parameter
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Problems may arise on very old or very new hardware due to graphics drivers. If
the installation hangs during boot, you can try adding the ``nomodeset``
parameter. This prevents the Linux kernel from loading any graphics drivers and
forces it to continue using the BIOS/UEFI-provided framebuffer.
On the Proxmox Backup Server bootloader menu, navigate to *Install Proxmox
Backup Server (Console)* and press ``e`` to edit the entry. Using the arrow
keys, navigate to the line starting with ``linux``, move the cursor to the end
of that line and add the parameter ``nomodeset``, separated by a space from the
pre-existing last parameter.
Then press ``Ctrl-X`` or ``F10`` to boot the configuration.

View File

@ -2,7 +2,6 @@ include ../defines.mk
UNITS := \
proxmox-backup-daily-update.timer \
removable-device-attach@.service
DYNAMIC_UNITS := \
proxmox-backup-banner.service \

View File

@ -1,8 +0,0 @@
[Unit]
Description=Try to mount the removable device of a datastore with uuid '%i'.
After=proxmox-backup-proxy.service
Requires=proxmox-backup-proxy.service
[Service]
Type=simple
ExecStart=/usr/sbin/proxmox-backup-manager datastore uuid-mount %i

View File

@ -10,7 +10,7 @@ use tokio::net::TcpStream;
// Simple H2 client to test H2 download speed using h2server.rs
struct Process {
body: h2::legacy::RecvStream,
body: h2::RecvStream,
trailers: bool,
bytes: usize,
}
@ -50,11 +50,11 @@ impl Future for Process {
}
fn send_request(
mut client: h2::legacy::client::SendRequest<bytes::Bytes>,
mut client: h2::client::SendRequest<bytes::Bytes>,
) -> impl Future<Output = Result<usize, Error>> {
println!("sending request");
let request = hyper::http::Request::builder()
let request = http::Request::builder()
.uri("http://localhost/")
.body(())
.unwrap();
@ -78,7 +78,7 @@ async fn run() -> Result<(), Error> {
let conn = TcpStream::connect(std::net::SocketAddr::from(([127, 0, 0, 1], 8008))).await?;
conn.set_nodelay(true).unwrap();
let (client, h2) = h2::legacy::client::Builder::new()
let (client, h2) = h2::client::Builder::new()
.initial_connection_window_size(1024 * 1024 * 1024)
.initial_window_size(1024 * 1024 * 1024)
.max_frame_size(4 * 1024 * 1024)

View File

@ -10,7 +10,7 @@ use tokio::net::TcpStream;
// Simple H2 client to test H2 download speed using h2s-server.rs
struct Process {
body: h2::legacy::RecvStream,
body: h2::RecvStream,
trailers: bool,
bytes: usize,
}
@ -50,11 +50,11 @@ impl Future for Process {
}
fn send_request(
mut client: h2::legacy::client::SendRequest<bytes::Bytes>,
mut client: h2::client::SendRequest<bytes::Bytes>,
) -> impl Future<Output = Result<usize, Error>> {
println!("sending request");
let request = hyper::http::Request::builder()
let request = http::Request::builder()
.uri("http://localhost/")
.body(())
.unwrap();
@ -94,7 +94,7 @@ async fn run() -> Result<(), Error> {
.await
.map_err(|err| format_err!("connect failed - {}", err))?;
let (client, h2) = h2::legacy::client::Builder::new()
let (client, h2) = h2::client::Builder::new()
.initial_connection_window_size(1024 * 1024 * 1024)
.initial_window_size(1024 * 1024 * 1024)
.max_frame_size(4 * 1024 * 1024)

View File

@ -8,19 +8,6 @@ use tokio::net::{TcpListener, TcpStream};
use pbs_buildcfg::configdir;
#[derive(Clone, Copy)]
struct H2SExecutor;
impl<Fut> hyper::rt::Executor<Fut> for H2SExecutor
where
Fut: Future + Send + 'static,
Fut::Output: Send,
{
fn execute(&self, fut: Fut) {
tokio::spawn(fut);
}
}
fn main() -> Result<(), Error> {
proxmox_async::runtime::main(run())
}
@ -63,11 +50,12 @@ async fn handle_connection(socket: TcpStream, acceptor: Arc<SslAcceptor>) -> Res
stream.as_mut().accept().await?;
let mut http = hyper::server::conn::http2::Builder::new(H2SExecutor);
let mut http = hyper::server::conn::Http::new();
http.http2_only(true);
// increase window size: todo - find optiomal size
let max_window_size = (1 << 31) - 2;
http.initial_stream_window_size(max_window_size);
http.initial_connection_window_size(max_window_size);
http.http2_initial_stream_window_size(max_window_size);
http.http2_initial_connection_window_size(max_window_size);
let service = hyper::service::service_fn(|_req: Request<Body>| {
println!("Got request");
@ -75,11 +63,8 @@ async fn handle_connection(socket: TcpStream, acceptor: Arc<SslAcceptor>) -> Res
let body = Body::from(buffer);
let response = Response::builder()
.status(hyper::http::StatusCode::OK)
.header(
hyper::http::header::CONTENT_TYPE,
"application/octet-stream",
)
.status(http::StatusCode::OK)
.header(http::header::CONTENT_TYPE, "application/octet-stream")
.body(body)
.unwrap();
future::ok::<_, Error>(response)

View File

@ -1,24 +1,9 @@
use std::future::Future;
use anyhow::Error;
use futures::*;
use hyper::{Body, Request, Response};
use tokio::net::{TcpListener, TcpStream};
#[derive(Clone, Copy)]
struct H2Executor;
impl<Fut> hyper::rt::Executor<Fut> for H2Executor
where
Fut: Future + Send + 'static,
Fut::Output: Send,
{
fn execute(&self, fut: Fut) {
tokio::spawn(fut);
}
}
fn main() -> Result<(), Error> {
proxmox_async::runtime::main(run())
}
@ -41,11 +26,12 @@ async fn run() -> Result<(), Error> {
async fn handle_connection(socket: TcpStream) -> Result<(), Error> {
socket.set_nodelay(true).unwrap();
let mut http = hyper::server::conn::http2::Builder::new(H2Executor);
let mut http = hyper::server::conn::Http::new();
http.http2_only(true);
// increase window size: todo - find optiomal size
let max_window_size = (1 << 31) - 2;
http.initial_stream_window_size(max_window_size);
http.initial_connection_window_size(max_window_size);
http.http2_initial_stream_window_size(max_window_size);
http.http2_initial_connection_window_size(max_window_size);
let service = hyper::service::service_fn(|_req: Request<Body>| {
println!("Got request");
@ -53,11 +39,8 @@ async fn handle_connection(socket: TcpStream) -> Result<(), Error> {
let body = Body::from(buffer);
let response = Response::builder()
.status(hyper::http::StatusCode::OK)
.header(
hyper::http::header::CONTENT_TYPE,
"application/octet-stream",
)
.status(http::StatusCode::OK)
.header(http::header::CONTENT_TYPE, "application/octet-stream")
.body(body)
.unwrap();
future::ok::<_, Error>(response)

View File

@ -1,91 +0,0 @@
use std::{
fs::File,
io::Read,
time::{Duration, SystemTime},
};
use anyhow::{format_err, Error};
use pbs_tape::TapeWrite;
use proxmox_backup::tape::drive::{LtoTapeHandle, TapeDriver};
const URANDOM_PATH: &str = "/dev/urandom";
const CHUNK_SIZE: usize = 4 * 1024 * 1024; // 4 MiB
const LOG_LIMIT: usize = 4 * 1024 * 1024 * 1024; // 4 GiB
fn write_chunks<'a>(
mut writer: Box<dyn 'a + TapeWrite>,
blob_size: usize,
max_size: usize,
max_time: Duration,
) -> Result<(), Error> {
// prepare chunks in memory
let mut blob: Vec<u8> = vec![0u8; blob_size];
let mut file = File::open(URANDOM_PATH)?;
file.read_exact(&mut blob[..])?;
let start_time = SystemTime::now();
loop {
let iteration_time = SystemTime::now();
let mut count = 0;
let mut bytes_written = 0;
let mut idx = 0;
let mut incr_count = 0;
loop {
if writer.write_all(&blob)? {
eprintln!("LEOM reached");
break;
}
// modifying chunks a bit to mitigate compression/deduplication
blob[idx] = blob[idx].wrapping_add(1);
incr_count += 1;
if incr_count >= 256 {
incr_count = 0;
idx += 1;
}
count += 1;
bytes_written += blob_size;
if bytes_written > max_size {
break;
}
}
let elapsed = iteration_time.elapsed()?.as_secs_f64();
let elapsed_total = start_time.elapsed()?;
eprintln!(
"{:.2}s: wrote {} chunks ({:.2} MB at {:.2} MB/s, average: {:.2} MB/s)",
elapsed_total.as_secs_f64(),
count,
bytes_written as f64 / 1_000_000.0,
(bytes_written as f64) / (1_000_000.0 * elapsed),
(writer.bytes_written() as f64) / (1_000_000.0 * elapsed_total.as_secs_f64()),
);
if elapsed_total > max_time {
break;
}
}
Ok(())
}
fn main() -> Result<(), Error> {
let mut args = std::env::args_os();
args.next(); // binary name
let path = args.next().expect("no path to tape device given");
let file = File::open(path).map_err(|err| format_err!("could not open tape device: {err}"))?;
let mut drive = LtoTapeHandle::new(file)
.map_err(|err| format_err!("error creating drive handle: {err}"))?;
write_chunks(
drive
.write_file()
.map_err(|err| format_err!("error starting file write: {err}"))?,
CHUNK_SIZE,
LOG_LIMIT,
Duration::new(60 * 20, 0),
)
.map_err(|err| format_err!("error writing data to tape: {err}"))?;
Ok(())
}

View File

@ -5,10 +5,10 @@ extern crate proxmox_backup;
use anyhow::Error;
use std::io::{Read, Write};
use pbs_datastore::{Chunker, ChunkerImpl};
use pbs_datastore::Chunker;
struct ChunkWriter {
chunker: ChunkerImpl,
chunker: Chunker,
last_chunk: usize,
chunk_offset: usize,
@ -23,7 +23,7 @@ struct ChunkWriter {
impl ChunkWriter {
fn new(chunk_size: usize) -> Self {
ChunkWriter {
chunker: ChunkerImpl::new(chunk_size),
chunker: Chunker::new(chunk_size),
last_chunk: 0,
chunk_offset: 0,
chunk_count: 0,
@ -69,8 +69,7 @@ impl Write for ChunkWriter {
fn write(&mut self, data: &[u8]) -> std::result::Result<usize, std::io::Error> {
let chunker = &mut self.chunker;
let ctx = pbs_datastore::chunker::Context::default();
let pos = chunker.scan(data, &ctx);
let pos = chunker.scan(data);
if pos > 0 {
self.chunk_offset += pos;

View File

@ -1,6 +1,6 @@
extern crate proxmox_backup;
use pbs_datastore::{Chunker, ChunkerImpl};
use pbs_datastore::Chunker;
fn main() {
let mut buffer = Vec::new();
@ -12,7 +12,7 @@ fn main() {
buffer.push(byte);
}
}
let mut chunker = ChunkerImpl::new(64 * 1024);
let mut chunker = Chunker::new(64 * 1024);
let count = 5;
@ -23,9 +23,8 @@ fn main() {
for _i in 0..count {
let mut pos = 0;
let mut _last = 0;
let ctx = pbs_datastore::chunker::Context::default();
while pos < buffer.len() {
let k = chunker.scan(&buffer[pos..], &ctx);
let k = chunker.scan(&buffer[pos..]);
if k == 0 {
//println!("LAST {}", pos);
break;

View File

@ -1,10 +1,9 @@
use std::str::FromStr;
use anyhow::Error;
use futures::*;
extern crate proxmox_backup;
use pbs_client::ChunkStream;
use proxmox_human_byte::HumanByte;
// Test Chunker with real data read from a file.
//
@ -22,22 +21,12 @@ fn main() {
async fn run() -> Result<(), Error> {
let file = tokio::fs::File::open("random-test.dat").await?;
let mut args = std::env::args();
args.next();
let buffer_size = args.next().unwrap_or("8k".to_string());
let buffer_size = HumanByte::from_str(&buffer_size)?;
println!("Using buffer size {buffer_size}");
let stream = tokio_util::codec::FramedRead::with_capacity(
file,
tokio_util::codec::BytesCodec::new(),
buffer_size.as_u64() as usize,
)
let stream = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
.map_ok(|bytes| bytes.to_vec())
.map_err(Error::from);
//let chunk_stream = FixedChunkStream::new(stream, 4*1024*1024);
let mut chunk_stream = ChunkStream::new(stream, None, None, None);
let mut chunk_stream = ChunkStream::new(stream, None);
let start_time = std::time::Instant::now();
@ -51,7 +40,7 @@ async fn run() -> Result<(), Error> {
repeat += 1;
stream_len += chunk.len();
//println!("Got chunk {}", chunk.len());
println!("Got chunk {}", chunk.len());
}
let speed =

24
pbs-api-types/Cargo.toml Normal file
View File

@ -0,0 +1,24 @@
[package]
name = "pbs-api-types"
version = "0.1.0"
authors.workspace = true
edition.workspace = true
description = "general API type helpers for PBS"
[dependencies]
anyhow.workspace = true
const_format.workspace = true
hex.workspace = true
lazy_static.workspace = true
percent-encoding.workspace = true
regex.workspace = true
serde.workspace = true
serde_plain.workspace = true
proxmox-auth-api = { workspace = true, features = [ "api-types" ] }
proxmox-human-byte.workspace = true
proxmox-lang.workspace=true
proxmox-schema = { workspace = true, features = [ "api-macro" ] }
proxmox-serde.workspace = true
proxmox-time.workspace = true
proxmox-uuid = { workspace = true, features = [ "serde" ] }

294
pbs-api-types/src/acl.rs Normal file
View File

@ -0,0 +1,294 @@
use std::str::FromStr;
use const_format::concatcp;
use serde::de::{value, IntoDeserializer};
use serde::{Deserialize, Serialize};
use proxmox_lang::constnamedbitmap;
use proxmox_schema::{
api, const_regex, ApiStringFormat, BooleanSchema, EnumEntry, Schema, StringSchema,
};
use crate::PROXMOX_SAFE_ID_REGEX_STR;
const_regex! {
pub ACL_PATH_REGEX = concatcp!(r"^(?:/|", r"(?:/", PROXMOX_SAFE_ID_REGEX_STR, ")+", r")$");
}
// define Privilege bitfield
constnamedbitmap! {
/// Contains a list of privilege name to privilege value mappings.
///
/// The names are used when displaying/persisting privileges anywhere, the values are used to
/// allow easy matching of privileges as bitflags.
PRIVILEGES: u64 => {
/// Sys.Audit allows knowing about the system and its status
PRIV_SYS_AUDIT("Sys.Audit");
/// Sys.Modify allows modifying system-level configuration
PRIV_SYS_MODIFY("Sys.Modify");
/// Sys.Modify allows to poweroff/reboot/.. the system
PRIV_SYS_POWER_MANAGEMENT("Sys.PowerManagement");
/// Datastore.Audit allows knowing about a datastore,
/// including reading the configuration entry and listing its contents
PRIV_DATASTORE_AUDIT("Datastore.Audit");
/// Datastore.Allocate allows creating or deleting datastores
PRIV_DATASTORE_ALLOCATE("Datastore.Allocate");
/// Datastore.Modify allows modifying a datastore and its contents
PRIV_DATASTORE_MODIFY("Datastore.Modify");
/// Datastore.Read allows reading arbitrary backup contents
PRIV_DATASTORE_READ("Datastore.Read");
/// Allows verifying a datastore
PRIV_DATASTORE_VERIFY("Datastore.Verify");
/// Datastore.Backup allows Datastore.Read|Verify and creating new snapshots,
/// but also requires backup ownership
PRIV_DATASTORE_BACKUP("Datastore.Backup");
/// Datastore.Prune allows deleting snapshots,
/// but also requires backup ownership
PRIV_DATASTORE_PRUNE("Datastore.Prune");
/// Permissions.Modify allows modifying ACLs
PRIV_PERMISSIONS_MODIFY("Permissions.Modify");
/// Remote.Audit allows reading remote.cfg and sync.cfg entries
PRIV_REMOTE_AUDIT("Remote.Audit");
/// Remote.Modify allows modifying remote.cfg
PRIV_REMOTE_MODIFY("Remote.Modify");
/// Remote.Read allows reading data from a configured `Remote`
PRIV_REMOTE_READ("Remote.Read");
/// Sys.Console allows access to the system's console
PRIV_SYS_CONSOLE("Sys.Console");
/// Tape.Audit allows reading tape backup configuration and status
PRIV_TAPE_AUDIT("Tape.Audit");
/// Tape.Modify allows modifying tape backup configuration
PRIV_TAPE_MODIFY("Tape.Modify");
/// Tape.Write allows writing tape media
PRIV_TAPE_WRITE("Tape.Write");
/// Tape.Read allows reading tape backup configuration and media contents
PRIV_TAPE_READ("Tape.Read");
/// Realm.Allocate allows viewing, creating, modifying and deleting realms
PRIV_REALM_ALLOCATE("Realm.Allocate");
}
}
pub fn privs_to_priv_names(privs: u64) -> Vec<&'static str> {
PRIVILEGES
.iter()
.fold(Vec::new(), |mut priv_names, (name, value)| {
if value & privs != 0 {
priv_names.push(name);
}
priv_names
})
}
/// Admin always has all privileges. It can do everything except a few actions
/// which are limited to the 'root@pam` superuser
pub const ROLE_ADMIN: u64 = u64::MAX;
/// NoAccess can be used to remove privileges from specific (sub-)paths
pub const ROLE_NO_ACCESS: u64 = 0;
#[rustfmt::skip]
#[allow(clippy::identity_op)]
/// Audit can view configuration and status information, but not modify it.
pub const ROLE_AUDIT: u64 = 0
| PRIV_SYS_AUDIT
| PRIV_DATASTORE_AUDIT;
#[rustfmt::skip]
#[allow(clippy::identity_op)]
/// Datastore.Admin can do anything on the datastore.
pub const ROLE_DATASTORE_ADMIN: u64 = 0
| PRIV_DATASTORE_AUDIT
| PRIV_DATASTORE_MODIFY
| PRIV_DATASTORE_READ
| PRIV_DATASTORE_VERIFY
| PRIV_DATASTORE_BACKUP
| PRIV_DATASTORE_PRUNE;
#[rustfmt::skip]
#[allow(clippy::identity_op)]
/// Datastore.Reader can read/verify datastore content and do restore
pub const ROLE_DATASTORE_READER: u64 = 0
| PRIV_DATASTORE_AUDIT
| PRIV_DATASTORE_VERIFY
| PRIV_DATASTORE_READ;
#[rustfmt::skip]
#[allow(clippy::identity_op)]
/// Datastore.Backup can do backup and restore, but no prune.
pub const ROLE_DATASTORE_BACKUP: u64 = 0
| PRIV_DATASTORE_BACKUP;
#[rustfmt::skip]
#[allow(clippy::identity_op)]
/// Datastore.PowerUser can do backup, restore, and prune.
pub const ROLE_DATASTORE_POWERUSER: u64 = 0
| PRIV_DATASTORE_PRUNE
| PRIV_DATASTORE_BACKUP;
#[rustfmt::skip]
#[allow(clippy::identity_op)]
/// Datastore.Audit can audit the datastore.
pub const ROLE_DATASTORE_AUDIT: u64 = 0
| PRIV_DATASTORE_AUDIT;
#[rustfmt::skip]
#[allow(clippy::identity_op)]
/// Remote.Audit can audit the remote
pub const ROLE_REMOTE_AUDIT: u64 = 0
| PRIV_REMOTE_AUDIT;
#[rustfmt::skip]
#[allow(clippy::identity_op)]
/// Remote.Admin can do anything on the remote.
pub const ROLE_REMOTE_ADMIN: u64 = 0
| PRIV_REMOTE_AUDIT
| PRIV_REMOTE_MODIFY
| PRIV_REMOTE_READ;
#[rustfmt::skip]
#[allow(clippy::identity_op)]
/// Remote.SyncOperator can do read and prune on the remote.
pub const ROLE_REMOTE_SYNC_OPERATOR: u64 = 0
| PRIV_REMOTE_AUDIT
| PRIV_REMOTE_READ;
#[rustfmt::skip]
#[allow(clippy::identity_op)]
/// Tape.Audit can audit the tape backup configuration and media content
pub const ROLE_TAPE_AUDIT: u64 = 0
| PRIV_TAPE_AUDIT;
#[rustfmt::skip]
#[allow(clippy::identity_op)]
/// Tape.Admin can do anything on the tape backup
pub const ROLE_TAPE_ADMIN: u64 = 0
| PRIV_TAPE_AUDIT
| PRIV_TAPE_MODIFY
| PRIV_TAPE_READ
| PRIV_TAPE_WRITE;
#[rustfmt::skip]
#[allow(clippy::identity_op)]
/// Tape.Operator can do tape backup and restore (but no configuration changes)
pub const ROLE_TAPE_OPERATOR: u64 = 0
| PRIV_TAPE_AUDIT
| PRIV_TAPE_READ
| PRIV_TAPE_WRITE;
#[rustfmt::skip]
#[allow(clippy::identity_op)]
/// Tape.Reader can do read and inspect tape content
pub const ROLE_TAPE_READER: u64 = 0
| PRIV_TAPE_AUDIT
| PRIV_TAPE_READ;
/// NoAccess can be used to remove privileges from specific (sub-)paths
pub const ROLE_NAME_NO_ACCESS: &str = "NoAccess";
#[api(
type_text: "<role>",
)]
#[repr(u64)]
#[derive(Serialize, Deserialize)]
/// Enum representing roles via their [PRIVILEGES] combination.
///
/// Since privileges are implemented as bitflags, each unique combination of privileges maps to a
/// single, unique `u64` value that is used in this enum definition.
pub enum Role {
/// Administrator
Admin = ROLE_ADMIN,
/// Auditor
Audit = ROLE_AUDIT,
/// Disable Access
NoAccess = ROLE_NO_ACCESS,
/// Datastore Administrator
DatastoreAdmin = ROLE_DATASTORE_ADMIN,
/// Datastore Reader (inspect datastore content and do restores)
DatastoreReader = ROLE_DATASTORE_READER,
/// Datastore Backup (backup and restore owned backups)
DatastoreBackup = ROLE_DATASTORE_BACKUP,
/// Datastore PowerUser (backup, restore and prune owned backup)
DatastorePowerUser = ROLE_DATASTORE_POWERUSER,
/// Datastore Auditor
DatastoreAudit = ROLE_DATASTORE_AUDIT,
/// Remote Auditor
RemoteAudit = ROLE_REMOTE_AUDIT,
/// Remote Administrator
RemoteAdmin = ROLE_REMOTE_ADMIN,
/// Syncronisation Opertator
RemoteSyncOperator = ROLE_REMOTE_SYNC_OPERATOR,
/// Tape Auditor
TapeAudit = ROLE_TAPE_AUDIT,
/// Tape Administrator
TapeAdmin = ROLE_TAPE_ADMIN,
/// Tape Operator
TapeOperator = ROLE_TAPE_OPERATOR,
/// Tape Reader
TapeReader = ROLE_TAPE_READER,
}
impl FromStr for Role {
type Err = value::Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
Self::deserialize(s.into_deserializer())
}
}
pub const ACL_PATH_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&ACL_PATH_REGEX);
pub const ACL_PATH_SCHEMA: Schema = StringSchema::new("Access control path.")
.format(&ACL_PATH_FORMAT)
.min_length(1)
.max_length(128)
.schema();
pub const ACL_PROPAGATE_SCHEMA: Schema =
BooleanSchema::new("Allow to propagate (inherit) permissions.")
.default(true)
.schema();
pub const ACL_UGID_TYPE_SCHEMA: Schema = StringSchema::new("Type of 'ugid' property.")
.format(&ApiStringFormat::Enum(&[
EnumEntry::new("user", "User"),
EnumEntry::new("group", "Group"),
]))
.schema();
#[api(
properties: {
propagate: {
schema: ACL_PROPAGATE_SCHEMA,
},
path: {
schema: ACL_PATH_SCHEMA,
},
ugid_type: {
schema: ACL_UGID_TYPE_SCHEMA,
},
ugid: {
type: String,
description: "User or Group ID.",
},
roleid: {
type: Role,
}
}
)]
#[derive(Serialize, Deserialize, Clone, PartialEq)]
/// ACL list entry.
pub struct AclListItem {
pub path: String,
pub ugid: String,
pub ugid_type: String,
pub propagate: bool,
pub roleid: String,
}

98
pbs-api-types/src/ad.rs Normal file
View File

@ -0,0 +1,98 @@
use serde::{Deserialize, Serialize};
use proxmox_schema::{api, Updater};
use super::{
LdapMode, LDAP_DOMAIN_SCHEMA, REALM_ID_SCHEMA, SINGLE_LINE_COMMENT_SCHEMA,
SYNC_ATTRIBUTES_SCHEMA, SYNC_DEFAULTS_STRING_SCHEMA, USER_CLASSES_SCHEMA,
};
#[api(
properties: {
"realm": {
schema: REALM_ID_SCHEMA,
},
"comment": {
optional: true,
schema: SINGLE_LINE_COMMENT_SCHEMA,
},
"verify": {
optional: true,
default: false,
},
"sync-defaults-options": {
schema: SYNC_DEFAULTS_STRING_SCHEMA,
optional: true,
},
"sync-attributes": {
schema: SYNC_ATTRIBUTES_SCHEMA,
optional: true,
},
"user-classes" : {
optional: true,
schema: USER_CLASSES_SCHEMA,
},
"base-dn" : {
schema: LDAP_DOMAIN_SCHEMA,
optional: true,
},
"bind-dn" : {
schema: LDAP_DOMAIN_SCHEMA,
optional: true,
}
},
)]
#[derive(Serialize, Deserialize, Updater, Clone)]
#[serde(rename_all = "kebab-case")]
/// AD realm configuration properties.
pub struct AdRealmConfig {
#[updater(skip)]
pub realm: String,
/// AD server address
pub server1: String,
/// Fallback AD server address
#[serde(skip_serializing_if = "Option::is_none")]
pub server2: Option<String>,
/// AD server Port
#[serde(skip_serializing_if = "Option::is_none")]
pub port: Option<u16>,
/// Base domain name. Users are searched under this domain using a `subtree search`.
/// Expected to be set only internally to `defaultNamingContext` of the AD server, but can be
/// overridden if the need arises.
#[serde(skip_serializing_if = "Option::is_none")]
pub base_dn: Option<String>,
/// Comment
#[serde(skip_serializing_if = "Option::is_none")]
pub comment: Option<String>,
/// Connection security
#[serde(skip_serializing_if = "Option::is_none")]
pub mode: Option<LdapMode>,
/// Verify server certificate
#[serde(skip_serializing_if = "Option::is_none")]
pub verify: Option<bool>,
/// CA certificate to use for the server. The path can point to
/// either a file, or a directory. If it points to a file,
/// the PEM-formatted X.509 certificate stored at the path
/// will be added as a trusted certificate.
/// If the path points to a directory,
/// the directory replaces the system's default certificate
/// store at `/etc/ssl/certs` - Every file in the directory
/// will be loaded as a trusted certificate.
#[serde(skip_serializing_if = "Option::is_none")]
pub capath: Option<String>,
/// Bind domain to use for looking up users
#[serde(skip_serializing_if = "Option::is_none")]
pub bind_dn: Option<String>,
/// Custom LDAP search filter for user sync
#[serde(skip_serializing_if = "Option::is_none")]
pub filter: Option<String>,
/// Default options for AD sync
#[serde(skip_serializing_if = "Option::is_none")]
pub sync_defaults_options: Option<String>,
/// List of LDAP attributes to sync from AD to user config
#[serde(skip_serializing_if = "Option::is_none")]
pub sync_attributes: Option<String>,
/// User ``objectClass`` classes to sync
#[serde(skip_serializing_if = "Option::is_none")]
pub user_classes: Option<String>,
}

View File

@ -0,0 +1,95 @@
use std::fmt::{self, Display};
use anyhow::Error;
use serde::{Deserialize, Serialize};
use proxmox_schema::api;
#[api(default: "encrypt")]
#[derive(Copy, Clone, Debug, Eq, PartialEq, Deserialize, Serialize)]
#[serde(rename_all = "kebab-case")]
/// Defines whether data is encrypted (using an AEAD cipher), only signed, or neither.
pub enum CryptMode {
/// Don't encrypt.
None,
/// Encrypt.
Encrypt,
/// Only sign.
SignOnly,
}
#[derive(Debug, Eq, PartialEq, Hash, Clone, Deserialize, Serialize)]
#[serde(transparent)]
/// 32-byte fingerprint, usually calculated with SHA256.
pub struct Fingerprint {
#[serde(with = "bytes_as_fingerprint")]
bytes: [u8; 32],
}
impl Fingerprint {
pub fn new(bytes: [u8; 32]) -> Self {
Self { bytes }
}
pub fn bytes(&self) -> &[u8; 32] {
&self.bytes
}
pub fn signature(&self) -> String {
as_fingerprint(&self.bytes)
}
}
/// Display as short key ID
impl Display for Fingerprint {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", as_fingerprint(&self.bytes[0..8]))
}
}
impl std::str::FromStr for Fingerprint {
type Err = Error;
fn from_str(s: &str) -> Result<Self, Error> {
let mut tmp = s.to_string();
tmp.retain(|c| c != ':');
let mut bytes = [0u8; 32];
hex::decode_to_slice(&tmp, &mut bytes)?;
Ok(Fingerprint::new(bytes))
}
}
fn as_fingerprint(bytes: &[u8]) -> String {
hex::encode(bytes)
.as_bytes()
.chunks(2)
.map(|v| unsafe { std::str::from_utf8_unchecked(v) }) // it's a hex string
.collect::<Vec<&str>>()
.join(":")
}
pub mod bytes_as_fingerprint {
use std::mem::MaybeUninit;
use serde::{Deserialize, Deserializer, Serializer};
pub fn serialize<S>(bytes: &[u8; 32], serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let s = super::as_fingerprint(bytes);
serializer.serialize_str(&s)
}
pub fn deserialize<'de, D>(deserializer: D) -> Result<[u8; 32], D::Error>
where
D: Deserializer<'de>,
{
// TODO: more efficiently implement with a Visitor implementing visit_str using split() and
// hex::decode by-byte
let mut s = String::deserialize(deserializer)?;
s.retain(|c| c != ':');
let mut out = MaybeUninit::<[u8; 32]>::uninit();
hex::decode_to_slice(s.as_bytes(), unsafe { &mut (*out.as_mut_ptr())[..] })
.map_err(serde::de::Error::custom)?;
Ok(unsafe { out.assume_init() })
}
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,30 @@
use serde::{Deserialize, Serialize};
use proxmox_schema::api;
#[api]
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "kebab-case")]
/// General status information about a running VM file-restore daemon
pub struct RestoreDaemonStatus {
/// VM uptime in seconds
pub uptime: i64,
/// time left until auto-shutdown, keep in mind that this is useless when 'keep-timeout' is
/// not set, as then the status call will have reset the timer before returning the value
pub timeout: i64,
}
#[api]
#[derive(Serialize, Deserialize, PartialEq, Eq)]
#[serde(rename_all = "kebab-case")]
/// The desired format of the result.
pub enum FileRestoreFormat {
/// Plain file (only works for single files)
Plain,
/// PXAR archive
Pxar,
/// ZIP archive
Zip,
/// TAR archive
Tar,
}

799
pbs-api-types/src/jobs.rs Normal file
View File

@ -0,0 +1,799 @@
use std::str::FromStr;
use anyhow::bail;
use const_format::concatcp;
use regex::Regex;
use serde::{Deserialize, Serialize};
use proxmox_schema::*;
use crate::{
Authid, BackupNamespace, BackupType, NotificationMode, RateLimitConfig, Userid,
BACKUP_GROUP_SCHEMA, BACKUP_NAMESPACE_SCHEMA, BACKUP_NS_RE, DATASTORE_SCHEMA,
DRIVE_NAME_SCHEMA, MEDIA_POOL_NAME_SCHEMA, NS_MAX_DEPTH_REDUCED_SCHEMA, PROXMOX_SAFE_ID_FORMAT,
PROXMOX_SAFE_ID_REGEX_STR, REMOTE_ID_SCHEMA, SINGLE_LINE_COMMENT_SCHEMA,
};
const_regex! {
/// Regex for verification jobs 'DATASTORE:ACTUAL_JOB_ID'
pub VERIFICATION_JOB_WORKER_ID_REGEX = concatcp!(r"^(", PROXMOX_SAFE_ID_REGEX_STR, r"):");
/// Regex for sync jobs '(REMOTE|\-):REMOTE_DATASTORE:LOCAL_DATASTORE:(?:LOCAL_NS_ANCHOR:)ACTUAL_JOB_ID'
pub SYNC_JOB_WORKER_ID_REGEX = concatcp!(r"^(", PROXMOX_SAFE_ID_REGEX_STR, r"|\-):(", PROXMOX_SAFE_ID_REGEX_STR, r"):(", PROXMOX_SAFE_ID_REGEX_STR, r")(?::(", BACKUP_NS_RE, r"))?:");
}
pub const JOB_ID_SCHEMA: Schema = StringSchema::new("Job ID.")
.format(&PROXMOX_SAFE_ID_FORMAT)
.min_length(3)
.max_length(32)
.schema();
pub const SYNC_SCHEDULE_SCHEMA: Schema = StringSchema::new("Run sync job at specified schedule.")
.format(&ApiStringFormat::VerifyFn(
proxmox_time::verify_calendar_event,
))
.type_text("<calendar-event>")
.schema();
pub const GC_SCHEDULE_SCHEMA: Schema =
StringSchema::new("Run garbage collection job at specified schedule.")
.format(&ApiStringFormat::VerifyFn(
proxmox_time::verify_calendar_event,
))
.type_text("<calendar-event>")
.schema();
pub const PRUNE_SCHEDULE_SCHEMA: Schema = StringSchema::new("Run prune job at specified schedule.")
.format(&ApiStringFormat::VerifyFn(
proxmox_time::verify_calendar_event,
))
.type_text("<calendar-event>")
.schema();
pub const VERIFICATION_SCHEDULE_SCHEMA: Schema =
StringSchema::new("Run verify job at specified schedule.")
.format(&ApiStringFormat::VerifyFn(
proxmox_time::verify_calendar_event,
))
.type_text("<calendar-event>")
.schema();
pub const REMOVE_VANISHED_BACKUPS_SCHEMA: Schema = BooleanSchema::new(
"Delete vanished backups. This remove the local copy if the remote backup was deleted.",
)
.default(false)
.schema();
#[api(
properties: {
"next-run": {
description: "Estimated time of the next run (UNIX epoch).",
optional: true,
type: Integer,
},
"last-run-state": {
description: "Result of the last run.",
optional: true,
type: String,
},
"last-run-upid": {
description: "Task UPID of the last run.",
optional: true,
type: String,
},
"last-run-endtime": {
description: "Endtime of the last run.",
optional: true,
type: Integer,
},
}
)]
#[derive(Serialize, Deserialize, Default, Clone, PartialEq)]
#[serde(rename_all = "kebab-case")]
/// Job Scheduling Status
pub struct JobScheduleStatus {
#[serde(skip_serializing_if = "Option::is_none")]
pub next_run: Option<i64>,
#[serde(skip_serializing_if = "Option::is_none")]
pub last_run_state: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub last_run_upid: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub last_run_endtime: Option<i64>,
}
#[api()]
#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
/// When do we send notifications
pub enum Notify {
/// Never send notification
Never,
/// Send notifications for failed and successful jobs
Always,
/// Send notifications for failed jobs only
Error,
}
#[api(
properties: {
gc: {
type: Notify,
optional: true,
},
verify: {
type: Notify,
optional: true,
},
sync: {
type: Notify,
optional: true,
},
prune: {
type: Notify,
optional: true,
},
},
)]
#[derive(Debug, Serialize, Deserialize)]
/// Datastore notify settings
pub struct DatastoreNotify {
/// Garbage collection settings
#[serde(skip_serializing_if = "Option::is_none")]
pub gc: Option<Notify>,
/// Verify job setting
#[serde(skip_serializing_if = "Option::is_none")]
pub verify: Option<Notify>,
/// Sync job setting
#[serde(skip_serializing_if = "Option::is_none")]
pub sync: Option<Notify>,
/// Prune job setting
#[serde(skip_serializing_if = "Option::is_none")]
pub prune: Option<Notify>,
}
pub const DATASTORE_NOTIFY_STRING_SCHEMA: Schema = StringSchema::new(
"Datastore notification setting, enum can be one of 'always', 'never', or 'error'.",
)
.format(&ApiStringFormat::PropertyString(
&DatastoreNotify::API_SCHEMA,
))
.schema();
pub const IGNORE_VERIFIED_BACKUPS_SCHEMA: Schema = BooleanSchema::new(
"Do not verify backups that are already verified if their verification is not outdated.",
)
.default(true)
.schema();
pub const VERIFICATION_OUTDATED_AFTER_SCHEMA: Schema =
IntegerSchema::new("Days after that a verification becomes outdated. (0 is deprecated)'")
.minimum(0)
.schema();
#[api(
properties: {
id: {
schema: JOB_ID_SCHEMA,
},
store: {
schema: DATASTORE_SCHEMA,
},
"ignore-verified": {
optional: true,
schema: IGNORE_VERIFIED_BACKUPS_SCHEMA,
},
"outdated-after": {
optional: true,
schema: VERIFICATION_OUTDATED_AFTER_SCHEMA,
},
comment: {
optional: true,
schema: SINGLE_LINE_COMMENT_SCHEMA,
},
schedule: {
optional: true,
schema: VERIFICATION_SCHEDULE_SCHEMA,
},
ns: {
optional: true,
schema: BACKUP_NAMESPACE_SCHEMA,
},
"max-depth": {
optional: true,
schema: crate::NS_MAX_DEPTH_SCHEMA,
},
}
)]
#[derive(Serialize, Deserialize, Updater, Clone, PartialEq)]
#[serde(rename_all = "kebab-case")]
/// Verification Job
pub struct VerificationJobConfig {
/// unique ID to address this job
#[updater(skip)]
pub id: String,
/// the datastore ID this verification job affects
pub store: String,
#[serde(skip_serializing_if = "Option::is_none")]
/// if not set to false, check the age of the last snapshot verification to filter
/// out recent ones, depending on 'outdated_after' configuration.
pub ignore_verified: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")]
/// Reverify snapshots after X days, never if 0. Ignored if 'ignore_verified' is false.
pub outdated_after: Option<i64>,
#[serde(skip_serializing_if = "Option::is_none")]
pub comment: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
/// when to schedule this job in calendar event notation
pub schedule: Option<String>,
#[serde(skip_serializing_if = "Option::is_none", default)]
/// on which backup namespace to run the verification recursively
pub ns: Option<BackupNamespace>,
#[serde(skip_serializing_if = "Option::is_none", default)]
/// how deep the verify should go from the `ns` level downwards. Passing 0 verifies only the
/// snapshots on the same level as the passed `ns`, or the datastore root if none.
pub max_depth: Option<usize>,
}
impl VerificationJobConfig {
pub fn acl_path(&self) -> Vec<&str> {
match self.ns.as_ref() {
Some(ns) => ns.acl_path(&self.store),
None => vec!["datastore", &self.store],
}
}
}
#[api(
properties: {
config: {
type: VerificationJobConfig,
},
status: {
type: JobScheduleStatus,
},
},
)]
#[derive(Serialize, Deserialize, Clone, PartialEq)]
#[serde(rename_all = "kebab-case")]
/// Status of Verification Job
pub struct VerificationJobStatus {
#[serde(flatten)]
pub config: VerificationJobConfig,
#[serde(flatten)]
pub status: JobScheduleStatus,
}
#[api(
properties: {
store: {
schema: DATASTORE_SCHEMA,
},
pool: {
schema: MEDIA_POOL_NAME_SCHEMA,
},
drive: {
schema: DRIVE_NAME_SCHEMA,
},
"eject-media": {
description: "Eject media upon job completion.",
type: bool,
optional: true,
},
"export-media-set": {
description: "Export media set upon job completion.",
type: bool,
optional: true,
},
"latest-only": {
description: "Backup latest snapshots only.",
type: bool,
optional: true,
},
"notify-user": {
optional: true,
type: Userid,
},
"group-filter": {
schema: GROUP_FILTER_LIST_SCHEMA,
optional: true,
},
ns: {
type: BackupNamespace,
optional: true,
},
"max-depth": {
schema: crate::NS_MAX_DEPTH_SCHEMA,
optional: true,
},
}
)]
#[derive(Serialize, Deserialize, Clone, Updater, PartialEq)]
#[serde(rename_all = "kebab-case")]
/// Tape Backup Job Setup
pub struct TapeBackupJobSetup {
pub store: String,
pub pool: String,
pub drive: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub eject_media: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")]
pub export_media_set: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")]
pub latest_only: Option<bool>,
/// Send job email notification to this user
#[serde(skip_serializing_if = "Option::is_none")]
pub notify_user: Option<Userid>,
#[serde(skip_serializing_if = "Option::is_none")]
pub notification_mode: Option<NotificationMode>,
#[serde(skip_serializing_if = "Option::is_none")]
pub group_filter: Option<Vec<GroupFilter>>,
#[serde(skip_serializing_if = "Option::is_none", default)]
pub ns: Option<BackupNamespace>,
#[serde(skip_serializing_if = "Option::is_none", default)]
pub max_depth: Option<usize>,
}
#[api(
properties: {
id: {
schema: JOB_ID_SCHEMA,
},
setup: {
type: TapeBackupJobSetup,
},
comment: {
optional: true,
schema: SINGLE_LINE_COMMENT_SCHEMA,
},
schedule: {
optional: true,
schema: SYNC_SCHEDULE_SCHEMA,
},
}
)]
#[derive(Serialize, Deserialize, Clone, Updater, PartialEq)]
#[serde(rename_all = "kebab-case")]
/// Tape Backup Job
pub struct TapeBackupJobConfig {
#[updater(skip)]
pub id: String,
#[serde(flatten)]
pub setup: TapeBackupJobSetup,
#[serde(skip_serializing_if = "Option::is_none")]
pub comment: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub schedule: Option<String>,
}
#[api(
properties: {
config: {
type: TapeBackupJobConfig,
},
status: {
type: JobScheduleStatus,
},
},
)]
#[derive(Serialize, Deserialize, Clone, PartialEq)]
#[serde(rename_all = "kebab-case")]
/// Status of Tape Backup Job
pub struct TapeBackupJobStatus {
#[serde(flatten)]
pub config: TapeBackupJobConfig,
#[serde(flatten)]
pub status: JobScheduleStatus,
/// Next tape used (best guess)
#[serde(skip_serializing_if = "Option::is_none")]
pub next_media_label: Option<String>,
}
#[derive(Clone, Debug)]
/// Filter for matching `BackupGroup`s, for use with `BackupGroup::filter`.
pub enum FilterType {
/// BackupGroup type - either `vm`, `ct`, or `host`.
BackupType(BackupType),
/// Full identifier of BackupGroup, including type
Group(String),
/// A regular expression matched against the full identifier of the BackupGroup
Regex(Regex),
}
impl PartialEq for FilterType {
fn eq(&self, other: &Self) -> bool {
match (self, other) {
(Self::BackupType(a), Self::BackupType(b)) => a == b,
(Self::Group(a), Self::Group(b)) => a == b,
(Self::Regex(a), Self::Regex(b)) => a.as_str() == b.as_str(),
_ => false,
}
}
}
impl std::str::FromStr for FilterType {
type Err = anyhow::Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
Ok(match s.split_once(':') {
Some(("group", value)) => BACKUP_GROUP_SCHEMA.parse_simple_value(value).map(|_| FilterType::Group(value.to_string()))?,
Some(("type", value)) => FilterType::BackupType(value.parse()?),
Some(("regex", value)) => FilterType::Regex(Regex::new(value)?),
Some((ty, _value)) => bail!("expected 'group', 'type' or 'regex' prefix, got '{}'", ty),
None => bail!("input doesn't match expected format '<group:GROUP||type:<vm|ct|host>|regex:REGEX>'"),
})
}
}
// used for serializing below, caution!
impl std::fmt::Display for FilterType {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
FilterType::BackupType(backup_type) => write!(f, "type:{}", backup_type),
FilterType::Group(backup_group) => write!(f, "group:{}", backup_group),
FilterType::Regex(regex) => write!(f, "regex:{}", regex.as_str()),
}
}
}
#[derive(Clone, Debug)]
pub struct GroupFilter {
pub is_exclude: bool,
pub filter_type: FilterType,
}
impl PartialEq for GroupFilter {
fn eq(&self, other: &Self) -> bool {
self.filter_type == other.filter_type && self.is_exclude == other.is_exclude
}
}
impl Eq for GroupFilter {}
impl std::str::FromStr for GroupFilter {
type Err = anyhow::Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let (is_exclude, type_str) = match s.split_once(':') {
Some(("include", value)) => (false, value),
Some(("exclude", value)) => (true, value),
_ => (false, s),
};
Ok(GroupFilter {
is_exclude,
filter_type: type_str.parse()?,
})
}
}
// used for serializing below, caution!
impl std::fmt::Display for GroupFilter {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
if self.is_exclude {
f.write_str("exclude:")?;
}
std::fmt::Display::fmt(&self.filter_type, f)
}
}
proxmox_serde::forward_deserialize_to_from_str!(GroupFilter);
proxmox_serde::forward_serialize_to_display!(GroupFilter);
fn verify_group_filter(input: &str) -> Result<(), anyhow::Error> {
GroupFilter::from_str(input).map(|_| ())
}
pub const GROUP_FILTER_SCHEMA: Schema = StringSchema::new(
"Group filter based on group identifier ('group:GROUP'), group type ('type:<vm|ct|host>'), or regex ('regex:RE'). Can be inverted by prepending 'exclude:'.")
.format(&ApiStringFormat::VerifyFn(verify_group_filter))
.type_text("[<exclude:|include:>]<type:<vm|ct|host>|group:GROUP|regex:RE>")
.schema();
pub const GROUP_FILTER_LIST_SCHEMA: Schema =
ArraySchema::new("List of group filters.", &GROUP_FILTER_SCHEMA).schema();
pub const TRANSFER_LAST_SCHEMA: Schema =
IntegerSchema::new("Limit transfer to last N snapshots (per group), skipping others")
.minimum(1)
.schema();
#[api(
properties: {
id: {
schema: JOB_ID_SCHEMA,
},
store: {
schema: DATASTORE_SCHEMA,
},
ns: {
type: BackupNamespace,
optional: true,
},
"owner": {
type: Authid,
optional: true,
},
remote: {
schema: REMOTE_ID_SCHEMA,
optional: true,
},
"remote-store": {
schema: DATASTORE_SCHEMA,
},
"remote-ns": {
type: BackupNamespace,
optional: true,
},
"remove-vanished": {
schema: REMOVE_VANISHED_BACKUPS_SCHEMA,
optional: true,
},
"max-depth": {
schema: NS_MAX_DEPTH_REDUCED_SCHEMA,
optional: true,
},
comment: {
optional: true,
schema: SINGLE_LINE_COMMENT_SCHEMA,
},
limit: {
type: RateLimitConfig,
},
schedule: {
optional: true,
schema: SYNC_SCHEDULE_SCHEMA,
},
"group-filter": {
schema: GROUP_FILTER_LIST_SCHEMA,
optional: true,
},
"transfer-last": {
schema: TRANSFER_LAST_SCHEMA,
optional: true,
},
}
)]
#[derive(Serialize, Deserialize, Clone, Updater, PartialEq)]
#[serde(rename_all = "kebab-case")]
/// Sync Job
pub struct SyncJobConfig {
#[updater(skip)]
pub id: String,
pub store: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub ns: Option<BackupNamespace>,
#[serde(skip_serializing_if = "Option::is_none")]
pub owner: Option<Authid>,
#[serde(skip_serializing_if = "Option::is_none")]
/// None implies local sync.
pub remote: Option<String>,
pub remote_store: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub remote_ns: Option<BackupNamespace>,
#[serde(skip_serializing_if = "Option::is_none")]
pub remove_vanished: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")]
pub max_depth: Option<usize>,
#[serde(skip_serializing_if = "Option::is_none")]
pub comment: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub schedule: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub group_filter: Option<Vec<GroupFilter>>,
#[serde(flatten)]
pub limit: RateLimitConfig,
#[serde(skip_serializing_if = "Option::is_none")]
pub transfer_last: Option<usize>,
}
impl SyncJobConfig {
pub fn acl_path(&self) -> Vec<&str> {
match self.ns.as_ref() {
Some(ns) => ns.acl_path(&self.store),
None => vec!["datastore", &self.store],
}
}
}
#[api(
properties: {
config: {
type: SyncJobConfig,
},
status: {
type: JobScheduleStatus,
},
},
)]
#[derive(Serialize, Deserialize, Clone, PartialEq)]
#[serde(rename_all = "kebab-case")]
/// Status of Sync Job
pub struct SyncJobStatus {
#[serde(flatten)]
pub config: SyncJobConfig,
#[serde(flatten)]
pub status: JobScheduleStatus,
}
/// These are used separately without `ns`/`max-depth` sometimes in the API, specifically in the API
/// call to prune a specific group, where `max-depth` makes no sense.
#[api(
properties: {
"keep-last": {
schema: crate::PRUNE_SCHEMA_KEEP_LAST,
optional: true,
},
"keep-hourly": {
schema: crate::PRUNE_SCHEMA_KEEP_HOURLY,
optional: true,
},
"keep-daily": {
schema: crate::PRUNE_SCHEMA_KEEP_DAILY,
optional: true,
},
"keep-weekly": {
schema: crate::PRUNE_SCHEMA_KEEP_WEEKLY,
optional: true,
},
"keep-monthly": {
schema: crate::PRUNE_SCHEMA_KEEP_MONTHLY,
optional: true,
},
"keep-yearly": {
schema: crate::PRUNE_SCHEMA_KEEP_YEARLY,
optional: true,
},
}
)]
#[derive(Serialize, Deserialize, Default, Updater, Clone, PartialEq)]
#[serde(rename_all = "kebab-case")]
/// Common pruning options
pub struct KeepOptions {
#[serde(skip_serializing_if = "Option::is_none")]
pub keep_last: Option<u64>,
#[serde(skip_serializing_if = "Option::is_none")]
pub keep_hourly: Option<u64>,
#[serde(skip_serializing_if = "Option::is_none")]
pub keep_daily: Option<u64>,
#[serde(skip_serializing_if = "Option::is_none")]
pub keep_weekly: Option<u64>,
#[serde(skip_serializing_if = "Option::is_none")]
pub keep_monthly: Option<u64>,
#[serde(skip_serializing_if = "Option::is_none")]
pub keep_yearly: Option<u64>,
}
impl KeepOptions {
pub fn keeps_something(&self) -> bool {
self.keep_last.unwrap_or(0)
+ self.keep_hourly.unwrap_or(0)
+ self.keep_daily.unwrap_or(0)
+ self.keep_weekly.unwrap_or(0)
+ self.keep_monthly.unwrap_or(0)
+ self.keep_yearly.unwrap_or(0)
> 0
}
}
#[api(
properties: {
keep: {
type: KeepOptions,
},
ns: {
type: BackupNamespace,
optional: true,
},
"max-depth": {
schema: NS_MAX_DEPTH_REDUCED_SCHEMA,
optional: true,
},
}
)]
#[derive(Serialize, Deserialize, Default, Updater, Clone, PartialEq)]
#[serde(rename_all = "kebab-case")]
/// Common pruning options
pub struct PruneJobOptions {
#[serde(flatten)]
pub keep: KeepOptions,
/// The (optional) recursion depth
#[serde(skip_serializing_if = "Option::is_none")]
pub max_depth: Option<usize>,
#[serde(skip_serializing_if = "Option::is_none")]
pub ns: Option<BackupNamespace>,
}
impl PruneJobOptions {
pub fn keeps_something(&self) -> bool {
self.keep.keeps_something()
}
pub fn acl_path<'a>(&'a self, store: &'a str) -> Vec<&'a str> {
match &self.ns {
Some(ns) => ns.acl_path(store),
None => vec!["datastore", store],
}
}
}
#[api(
properties: {
disable: {
type: Boolean,
optional: true,
default: false,
},
id: {
schema: JOB_ID_SCHEMA,
},
store: {
schema: DATASTORE_SCHEMA,
},
schedule: {
schema: PRUNE_SCHEDULE_SCHEMA,
},
comment: {
optional: true,
schema: SINGLE_LINE_COMMENT_SCHEMA,
},
options: {
type: PruneJobOptions,
},
},
)]
#[derive(Deserialize, Serialize, Updater, Clone, PartialEq)]
#[serde(rename_all = "kebab-case")]
/// Prune configuration.
pub struct PruneJobConfig {
/// unique ID to address this job
#[updater(skip)]
pub id: String,
pub store: String,
/// Disable this job.
#[serde(default, skip_serializing_if = "is_false")]
#[updater(serde(skip_serializing_if = "Option::is_none"))]
pub disable: bool,
pub schedule: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub comment: Option<String>,
#[serde(flatten)]
pub options: PruneJobOptions,
}
impl PruneJobConfig {
pub fn acl_path(&self) -> Vec<&str> {
self.options.acl_path(&self.store)
}
}
fn is_false(b: &bool) -> bool {
!b
}
#[api(
properties: {
config: {
type: PruneJobConfig,
},
status: {
type: JobScheduleStatus,
},
},
)]
#[derive(Serialize, Deserialize, Clone, PartialEq)]
#[serde(rename_all = "kebab-case")]
/// Status of prune job
pub struct PruneJobStatus {
#[serde(flatten)]
pub config: PruneJobConfig,
#[serde(flatten)]
pub status: JobScheduleStatus,
}

View File

@ -0,0 +1,55 @@
use serde::{Deserialize, Serialize};
use proxmox_schema::api;
use crate::CERT_FINGERPRINT_SHA256_SCHEMA;
#[api(default: "scrypt")]
#[derive(Clone, Copy, Debug, Deserialize, Serialize)]
#[serde(rename_all = "lowercase")]
/// Key derivation function for password protected encryption keys.
pub enum Kdf {
/// Do not encrypt the key.
None,
/// Encrypt they key with a password using SCrypt.
Scrypt,
/// Encrtypt the Key with a password using PBKDF2
PBKDF2,
}
impl Default for Kdf {
#[inline]
fn default() -> Self {
Kdf::Scrypt
}
}
#[api(
properties: {
kdf: {
type: Kdf,
},
fingerprint: {
schema: CERT_FINGERPRINT_SHA256_SCHEMA,
optional: true,
},
},
)]
#[derive(Deserialize, Serialize)]
/// Encryption Key Information
pub struct KeyInfo {
/// Path to key (if stored in a file)
#[serde(skip_serializing_if = "Option::is_none")]
pub path: Option<String>,
pub kdf: Kdf,
/// Key creation time
pub created: i64,
/// Key modification time
pub modified: i64,
/// Key fingerprint
#[serde(skip_serializing_if = "Option::is_none")]
pub fingerprint: Option<String>,
/// Password hint
#[serde(skip_serializing_if = "Option::is_none")]
pub hint: Option<String>,
}

208
pbs-api-types/src/ldap.rs Normal file
View File

@ -0,0 +1,208 @@
use serde::{Deserialize, Serialize};
use proxmox_schema::{api, ApiStringFormat, ApiType, ArraySchema, Schema, StringSchema, Updater};
use super::{REALM_ID_SCHEMA, SINGLE_LINE_COMMENT_SCHEMA};
#[api()]
#[derive(Copy, Clone, PartialEq, Eq, Serialize, Deserialize, Default)]
/// LDAP connection type
pub enum LdapMode {
/// Plaintext LDAP connection
#[serde(rename = "ldap")]
#[default]
Ldap,
/// Secure STARTTLS connection
#[serde(rename = "ldap+starttls")]
StartTls,
/// Secure LDAPS connection
#[serde(rename = "ldaps")]
Ldaps,
}
#[api(
properties: {
"realm": {
schema: REALM_ID_SCHEMA,
},
"comment": {
optional: true,
schema: SINGLE_LINE_COMMENT_SCHEMA,
},
"verify": {
optional: true,
default: false,
},
"sync-defaults-options": {
schema: SYNC_DEFAULTS_STRING_SCHEMA,
optional: true,
},
"sync-attributes": {
schema: SYNC_ATTRIBUTES_SCHEMA,
optional: true,
},
"user-classes" : {
optional: true,
schema: USER_CLASSES_SCHEMA,
},
"base-dn" : {
schema: LDAP_DOMAIN_SCHEMA,
},
"bind-dn" : {
schema: LDAP_DOMAIN_SCHEMA,
optional: true,
}
},
)]
#[derive(Serialize, Deserialize, Updater, Clone)]
#[serde(rename_all = "kebab-case")]
/// LDAP configuration properties.
pub struct LdapRealmConfig {
#[updater(skip)]
pub realm: String,
/// LDAP server address
pub server1: String,
/// Fallback LDAP server address
#[serde(skip_serializing_if = "Option::is_none")]
pub server2: Option<String>,
/// Port
#[serde(skip_serializing_if = "Option::is_none")]
pub port: Option<u16>,
/// Base domain name. Users are searched under this domain using a `subtree search`.
pub base_dn: String,
/// Username attribute. Used to map a ``userid`` to LDAP to an LDAP ``dn``.
pub user_attr: String,
/// Comment
#[serde(skip_serializing_if = "Option::is_none")]
pub comment: Option<String>,
/// Connection security
#[serde(skip_serializing_if = "Option::is_none")]
pub mode: Option<LdapMode>,
/// Verify server certificate
#[serde(skip_serializing_if = "Option::is_none")]
pub verify: Option<bool>,
/// CA certificate to use for the server. The path can point to
/// either a file, or a directory. If it points to a file,
/// the PEM-formatted X.509 certificate stored at the path
/// will be added as a trusted certificate.
/// If the path points to a directory,
/// the directory replaces the system's default certificate
/// store at `/etc/ssl/certs` - Every file in the directory
/// will be loaded as a trusted certificate.
#[serde(skip_serializing_if = "Option::is_none")]
pub capath: Option<String>,
/// Bind domain to use for looking up users
#[serde(skip_serializing_if = "Option::is_none")]
pub bind_dn: Option<String>,
/// Custom LDAP search filter for user sync
#[serde(skip_serializing_if = "Option::is_none")]
pub filter: Option<String>,
/// Default options for LDAP sync
#[serde(skip_serializing_if = "Option::is_none")]
pub sync_defaults_options: Option<String>,
/// List of attributes to sync from LDAP to user config
#[serde(skip_serializing_if = "Option::is_none")]
pub sync_attributes: Option<String>,
/// User ``objectClass`` classes to sync
#[serde(skip_serializing_if = "Option::is_none")]
pub user_classes: Option<String>,
}
#[api(
properties: {
"remove-vanished": {
optional: true,
schema: REMOVE_VANISHED_SCHEMA,
},
},
)]
#[derive(Serialize, Deserialize, Updater, Default, Debug)]
#[serde(rename_all = "kebab-case")]
/// Default options for LDAP synchronization runs
pub struct SyncDefaultsOptions {
/// How to handle vanished properties/users
pub remove_vanished: Option<String>,
/// Enable new users after sync
pub enable_new: Option<bool>,
}
#[api()]
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)]
#[serde(rename_all = "kebab-case")]
/// remove-vanished options
pub enum RemoveVanished {
/// Delete ACLs for vanished users
Acl,
/// Remove vanished users
Entry,
/// Remove vanished properties from users (e.g. email)
Properties,
}
pub const LDAP_DOMAIN_SCHEMA: Schema = StringSchema::new("LDAP Domain").schema();
pub const SYNC_DEFAULTS_STRING_SCHEMA: Schema = StringSchema::new("sync defaults options")
.format(&ApiStringFormat::PropertyString(
&SyncDefaultsOptions::API_SCHEMA,
))
.schema();
const REMOVE_VANISHED_DESCRIPTION: &str =
"A semicolon-seperated list of things to remove when they or the user \
vanishes during user synchronization. The following values are possible: ``entry`` removes the \
user when not returned from the sync; ``properties`` removes any \
properties on existing user that do not appear in the source. \
``acl`` removes ACLs when the user is not returned from the sync.";
pub const REMOVE_VANISHED_SCHEMA: Schema = StringSchema::new(REMOVE_VANISHED_DESCRIPTION)
.format(&ApiStringFormat::PropertyString(&REMOVE_VANISHED_ARRAY))
.schema();
pub const REMOVE_VANISHED_ARRAY: Schema = ArraySchema::new(
"Array of remove-vanished options",
&RemoveVanished::API_SCHEMA,
)
.min_length(1)
.schema();
#[api()]
#[derive(Serialize, Deserialize, Updater, Default, Debug)]
#[serde(rename_all = "kebab-case")]
/// Determine which LDAP attributes should be synced to which user attributes
pub struct SyncAttributes {
/// Name of the LDAP attribute containing the user's email address
pub email: Option<String>,
/// Name of the LDAP attribute containing the user's first name
pub firstname: Option<String>,
/// Name of the LDAP attribute containing the user's last name
pub lastname: Option<String>,
}
const SYNC_ATTRIBUTES_TEXT: &str = "Comma-separated list of key=value pairs for specifying \
which LDAP attributes map to which PBS user field. For example, \
to map the LDAP attribute ``mail`` to PBS's ``email``, write \
``email=mail``.";
pub const SYNC_ATTRIBUTES_SCHEMA: Schema = StringSchema::new(SYNC_ATTRIBUTES_TEXT)
.format(&ApiStringFormat::PropertyString(
&SyncAttributes::API_SCHEMA,
))
.schema();
pub const USER_CLASSES_ARRAY: Schema = ArraySchema::new(
"Array of user classes",
&StringSchema::new("user class").schema(),
)
.min_length(1)
.schema();
const USER_CLASSES_TEXT: &str = "Comma-separated list of allowed objectClass values for \
user synchronization. For instance, if ``user-classes`` is set to ``person,user``, \
then user synchronization will consider all LDAP entities \
where ``objectClass: person`` `or` ``objectClass: user``.";
pub const USER_CLASSES_SCHEMA: Schema = StringSchema::new(USER_CLASSES_TEXT)
.format(&ApiStringFormat::PropertyString(&USER_CLASSES_ARRAY))
.default("inetorgperson,posixaccount,person,user")
.schema();

417
pbs-api-types/src/lib.rs Normal file
View File

@ -0,0 +1,417 @@
//! Basic API types used by most of the PBS code.
use const_format::concatcp;
use serde::{Deserialize, Serialize};
pub mod percent_encoding;
use proxmox_schema::{
api, const_regex, ApiStringFormat, ApiType, ArraySchema, ReturnType, Schema, StringSchema,
};
use proxmox_time::parse_daily_duration;
use proxmox_auth_api::types::{APITOKEN_ID_REGEX_STR, USER_ID_REGEX_STR};
pub use proxmox_schema::api_types::SAFE_ID_FORMAT as PROXMOX_SAFE_ID_FORMAT;
pub use proxmox_schema::api_types::SAFE_ID_REGEX as PROXMOX_SAFE_ID_REGEX;
pub use proxmox_schema::api_types::SAFE_ID_REGEX_STR as PROXMOX_SAFE_ID_REGEX_STR;
pub use proxmox_schema::api_types::{
BLOCKDEVICE_DISK_AND_PARTITION_NAME_REGEX, BLOCKDEVICE_NAME_REGEX,
};
pub use proxmox_schema::api_types::{DNS_ALIAS_REGEX, DNS_NAME_OR_IP_REGEX, DNS_NAME_REGEX};
pub use proxmox_schema::api_types::{FINGERPRINT_SHA256_REGEX, SHA256_HEX_REGEX};
pub use proxmox_schema::api_types::{
GENERIC_URI_REGEX, HOSTNAME_REGEX, HOST_PORT_REGEX, HTTP_URL_REGEX,
};
pub use proxmox_schema::api_types::{MULTI_LINE_COMMENT_REGEX, SINGLE_LINE_COMMENT_REGEX};
pub use proxmox_schema::api_types::{PASSWORD_REGEX, SYSTEMD_DATETIME_REGEX, UUID_REGEX};
pub use proxmox_schema::api_types::{CIDR_FORMAT, CIDR_REGEX};
pub use proxmox_schema::api_types::{CIDR_V4_FORMAT, CIDR_V4_REGEX};
pub use proxmox_schema::api_types::{CIDR_V6_FORMAT, CIDR_V6_REGEX};
pub use proxmox_schema::api_types::{IPRE_STR, IP_FORMAT, IP_REGEX};
pub use proxmox_schema::api_types::{IPV4RE_STR, IP_V4_FORMAT, IP_V4_REGEX};
pub use proxmox_schema::api_types::{IPV6RE_STR, IP_V6_FORMAT, IP_V6_REGEX};
pub use proxmox_schema::api_types::COMMENT_SCHEMA as SINGLE_LINE_COMMENT_SCHEMA;
pub use proxmox_schema::api_types::HOSTNAME_SCHEMA;
pub use proxmox_schema::api_types::HOST_PORT_SCHEMA;
pub use proxmox_schema::api_types::HTTP_URL_SCHEMA;
pub use proxmox_schema::api_types::MULTI_LINE_COMMENT_SCHEMA;
pub use proxmox_schema::api_types::NODE_SCHEMA;
pub use proxmox_schema::api_types::SINGLE_LINE_COMMENT_FORMAT;
pub use proxmox_schema::api_types::{
BLOCKDEVICE_DISK_AND_PARTITION_NAME_SCHEMA, BLOCKDEVICE_NAME_SCHEMA,
};
pub use proxmox_schema::api_types::{CERT_FINGERPRINT_SHA256_SCHEMA, FINGERPRINT_SHA256_FORMAT};
pub use proxmox_schema::api_types::{DISK_ARRAY_SCHEMA, DISK_LIST_SCHEMA};
pub use proxmox_schema::api_types::{DNS_ALIAS_FORMAT, DNS_NAME_FORMAT, DNS_NAME_OR_IP_SCHEMA};
pub use proxmox_schema::api_types::{PASSWORD_FORMAT, PASSWORD_SCHEMA};
pub use proxmox_schema::api_types::{SERVICE_ID_SCHEMA, UUID_FORMAT};
pub use proxmox_schema::api_types::{SYSTEMD_DATETIME_FORMAT, TIME_ZONE_SCHEMA};
use proxmox_schema::api_types::{DNS_NAME_STR, IPRE_BRACKET_STR};
#[rustfmt::skip]
pub const BACKUP_ID_RE: &str = r"[A-Za-z0-9_][A-Za-z0-9._\-]*";
#[rustfmt::skip]
pub const BACKUP_TYPE_RE: &str = r"(?:host|vm|ct)";
#[rustfmt::skip]
pub const BACKUP_TIME_RE: &str = r"[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}Z";
#[rustfmt::skip]
pub const BACKUP_NS_RE: &str =
concatcp!("(?:",
"(?:", PROXMOX_SAFE_ID_REGEX_STR, r"/){0,7}", PROXMOX_SAFE_ID_REGEX_STR,
")?");
#[rustfmt::skip]
pub const BACKUP_NS_PATH_RE: &str =
concatcp!(r"(?:ns/", PROXMOX_SAFE_ID_REGEX_STR, r"/){0,7}ns/", PROXMOX_SAFE_ID_REGEX_STR, r"/");
#[rustfmt::skip]
pub const SNAPSHOT_PATH_REGEX_STR: &str =
concatcp!(
r"(", BACKUP_TYPE_RE, ")/(", BACKUP_ID_RE, ")/(", BACKUP_TIME_RE, r")",
);
#[rustfmt::skip]
pub const GROUP_OR_SNAPSHOT_PATH_REGEX_STR: &str =
concatcp!(
r"(", BACKUP_TYPE_RE, ")/(", BACKUP_ID_RE, ")(?:/(", BACKUP_TIME_RE, r"))?",
);
mod acl;
pub use acl::*;
mod datastore;
pub use datastore::*;
mod jobs;
pub use jobs::*;
mod key_derivation;
pub use key_derivation::{Kdf, KeyInfo};
mod maintenance;
pub use maintenance::*;
mod network;
pub use network::*;
mod node;
pub use node::*;
pub use proxmox_auth_api::types as userid;
pub use proxmox_auth_api::types::{Authid, Userid};
pub use proxmox_auth_api::types::{Realm, RealmRef};
pub use proxmox_auth_api::types::{Tokenname, TokennameRef};
pub use proxmox_auth_api::types::{Username, UsernameRef};
pub use proxmox_auth_api::types::{
PROXMOX_GROUP_ID_SCHEMA, PROXMOX_TOKEN_ID_SCHEMA, PROXMOX_TOKEN_NAME_SCHEMA,
};
#[macro_use]
mod user;
pub use user::*;
pub use proxmox_schema::upid::*;
mod crypto;
pub use crypto::{bytes_as_fingerprint, CryptMode, Fingerprint};
pub mod file_restore;
mod openid;
pub use openid::*;
mod ldap;
pub use ldap::*;
mod ad;
pub use ad::*;
mod remote;
pub use remote::*;
mod tape;
pub use tape::*;
mod traffic_control;
pub use traffic_control::*;
mod zfs;
pub use zfs::*;
mod metrics;
pub use metrics::*;
const_regex! {
// just a rough check - dummy acceptor is used before persisting
pub OPENSSL_CIPHERS_REGEX = r"^[0-9A-Za-z_:, +!\-@=.]+$";
pub BACKUP_REPO_URL_REGEX = concatcp!(
r"^^(?:(?:(",
USER_ID_REGEX_STR, "|", APITOKEN_ID_REGEX_STR,
")@)?(",
DNS_NAME_STR, "|", IPRE_BRACKET_STR,
"):)?(?:([0-9]{1,5}):)?(", PROXMOX_SAFE_ID_REGEX_STR, r")$"
);
pub SUBSCRIPTION_KEY_REGEX = concat!(r"^pbs(?:[cbsp])-[0-9a-f]{10}$");
}
pub const PVE_CONFIG_DIGEST_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&SHA256_HEX_REGEX);
pub const SUBSCRIPTION_KEY_FORMAT: ApiStringFormat =
ApiStringFormat::Pattern(&SUBSCRIPTION_KEY_REGEX);
pub const OPENSSL_CIPHERS_TLS_FORMAT: ApiStringFormat =
ApiStringFormat::Pattern(&OPENSSL_CIPHERS_REGEX);
pub const DAILY_DURATION_FORMAT: ApiStringFormat =
ApiStringFormat::VerifyFn(|s| parse_daily_duration(s).map(drop));
pub const SEARCH_DOMAIN_SCHEMA: Schema =
StringSchema::new("Search domain for host-name lookup.").schema();
pub const FIRST_DNS_SERVER_SCHEMA: Schema = StringSchema::new("First name server IP address.")
.format(&IP_FORMAT)
.schema();
pub const SECOND_DNS_SERVER_SCHEMA: Schema = StringSchema::new("Second name server IP address.")
.format(&IP_FORMAT)
.schema();
pub const THIRD_DNS_SERVER_SCHEMA: Schema = StringSchema::new("Third name server IP address.")
.format(&IP_FORMAT)
.schema();
pub const OPENSSL_CIPHERS_TLS_1_2_SCHEMA: Schema =
StringSchema::new("OpenSSL cipher list used by the proxy for TLS <= 1.2")
.format(&OPENSSL_CIPHERS_TLS_FORMAT)
.schema();
pub const OPENSSL_CIPHERS_TLS_1_3_SCHEMA: Schema =
StringSchema::new("OpenSSL ciphersuites list used by the proxy for TLS 1.3")
.format(&OPENSSL_CIPHERS_TLS_FORMAT)
.schema();
pub const PBS_PASSWORD_SCHEMA: Schema = StringSchema::new("User Password.")
.format(&PASSWORD_FORMAT)
.min_length(5)
.max_length(64)
.schema();
pub const REALM_ID_SCHEMA: Schema = StringSchema::new("Realm name.")
.format(&PROXMOX_SAFE_ID_FORMAT)
.min_length(2)
.max_length(32)
.schema();
pub const SUBSCRIPTION_KEY_SCHEMA: Schema =
StringSchema::new("Proxmox Backup Server subscription key.")
.format(&SUBSCRIPTION_KEY_FORMAT)
.min_length(15)
.max_length(16)
.schema();
pub const PROXMOX_CONFIG_DIGEST_SCHEMA: Schema = StringSchema::new(
"Prevent changes if current configuration file has different \
SHA256 digest. This can be used to prevent concurrent \
modifications.",
)
.format(&PVE_CONFIG_DIGEST_FORMAT)
.schema();
/// API schema format definition for repository URLs
pub const BACKUP_REPO_URL: ApiStringFormat = ApiStringFormat::Pattern(&BACKUP_REPO_URL_REGEX);
// Complex type definitions
#[api()]
#[derive(Default, Serialize, Deserialize)]
/// Storage space usage information.
pub struct StorageStatus {
/// Total space (bytes).
pub total: u64,
/// Used space (bytes).
pub used: u64,
/// Available space (bytes).
pub avail: u64,
}
pub const PASSWORD_HINT_SCHEMA: Schema = StringSchema::new("Password hint.")
.format(&SINGLE_LINE_COMMENT_FORMAT)
.min_length(1)
.max_length(64)
.schema();
#[api()]
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "PascalCase")]
/// Describes a package for which an update is available.
pub struct APTUpdateInfo {
/// Package name
pub package: String,
/// Package title
pub title: String,
/// Package architecture
pub arch: String,
/// Human readable package description
pub description: String,
/// New version to be updated to
pub version: String,
/// Old version currently installed
pub old_version: String,
/// Package origin
pub origin: String,
/// Package priority in human-readable form
pub priority: String,
/// Package section
pub section: String,
/// Custom extra field for additional package information
#[serde(skip_serializing_if = "Option::is_none")]
pub extra_info: Option<String>,
}
#[api()]
#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
/// Node Power command type.
pub enum NodePowerCommand {
/// Restart the server
Reboot,
/// Shutdown the server
Shutdown,
}
#[api()]
#[derive(Eq, PartialEq, Debug, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum TaskStateType {
/// Ok
OK,
/// Warning
Warning,
/// Error
Error,
/// Unknown
Unknown,
}
#[api(
properties: {
upid: { schema: UPID::API_SCHEMA },
},
)]
#[derive(Serialize, Deserialize, Clone, PartialEq)]
/// Task properties.
pub struct TaskListItem {
pub upid: String,
/// The node name where the task is running on.
pub node: String,
/// The Unix PID
pub pid: i64,
/// The task start time (Epoch)
pub pstart: u64,
/// The task start time (Epoch)
pub starttime: i64,
/// Worker type (arbitrary ASCII string)
pub worker_type: String,
/// Worker ID (arbitrary ASCII string)
pub worker_id: Option<String>,
/// The authenticated entity who started the task
pub user: String,
/// The task end time (Epoch)
#[serde(skip_serializing_if = "Option::is_none")]
pub endtime: Option<i64>,
/// Task end status
#[serde(skip_serializing_if = "Option::is_none")]
pub status: Option<String>,
}
pub const NODE_TASKS_LIST_TASKS_RETURN_TYPE: ReturnType = ReturnType {
optional: false,
schema: &ArraySchema::new("A list of tasks.", &TaskListItem::API_SCHEMA).schema(),
};
#[api()]
#[derive(Copy, Clone, Serialize, Deserialize)]
#[serde(rename_all = "UPPERCASE")]
/// RRD consolidation mode
pub enum RRDMode {
/// Maximum
Max,
/// Average
Average,
}
#[api()]
#[derive(Copy, Clone, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
/// RRD time frame
pub enum RRDTimeFrame {
/// Hour
Hour,
/// Day
Day,
/// Week
Week,
/// Month
Month,
/// Year
Year,
/// Decade (10 years)
Decade,
}
#[api]
#[derive(Deserialize, Serialize, Copy, Clone, PartialEq, Eq)]
#[serde(rename_all = "lowercase")]
/// type of the realm
pub enum RealmType {
/// The PAM realm
Pam,
/// The PBS realm
Pbs,
/// An OpenID Connect realm
OpenId,
/// An LDAP realm
Ldap,
/// An Active Directory (AD) realm
Ad,
}
serde_plain::derive_display_from_serialize!(RealmType);
serde_plain::derive_fromstr_from_deserialize!(RealmType);
#[api(
properties: {
realm: {
schema: REALM_ID_SCHEMA,
},
"type": {
type: RealmType,
},
comment: {
optional: true,
schema: SINGLE_LINE_COMMENT_SCHEMA,
},
},
)]
#[derive(Deserialize, Serialize, Clone, PartialEq)]
#[serde(rename_all = "kebab-case")]
/// Basic Information about a realm
pub struct BasicRealmInfo {
pub realm: String,
#[serde(rename = "type")]
pub ty: RealmType,
/// True if it is the default realm
#[serde(skip_serializing_if = "Option::is_none")]
pub default: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")]
pub comment: Option<String>,
}

View File

@ -0,0 +1,106 @@
use anyhow::{bail, Error};
use serde::{Deserialize, Serialize};
use std::borrow::Cow;
use proxmox_schema::{api, const_regex, ApiStringFormat, Schema, StringSchema};
const_regex! {
pub MAINTENANCE_MESSAGE_REGEX = r"^[[:^cntrl:]]*$";
}
pub const MAINTENANCE_MESSAGE_FORMAT: ApiStringFormat =
ApiStringFormat::Pattern(&MAINTENANCE_MESSAGE_REGEX);
pub const MAINTENANCE_MESSAGE_SCHEMA: Schema =
StringSchema::new("Message describing the reason for the maintenance.")
.format(&MAINTENANCE_MESSAGE_FORMAT)
.max_length(64)
.schema();
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
/// Operation requirements, used when checking for maintenance mode.
pub enum Operation {
/// for any read operation like backup restore or RRD metric collection
Read,
/// for any write/delete operation, like backup create or GC
Write,
/// for any purely logical operation on the in-memory state of the datastore, e.g., to check if
/// some mutex could be locked (e.g., GC already running?)
///
/// NOTE: one must *not* do any IO operations when only helding this Op state
Lookup,
// GarbageCollect or Delete?
}
#[api]
#[derive(Copy, Clone, Deserialize, Serialize, PartialEq, Eq)]
#[serde(rename_all = "kebab-case")]
/// Maintenance type.
pub enum MaintenanceType {
// TODO:
// - Add "unmounting" once we got pluggable datastores
// - Add "GarbageCollection" or "DeleteOnly" as type and track GC (or all deletes) as separate
// operation, so that one can enable a mode where nothing new can be added but stuff can be
// cleaned
/// Only read operations are allowed on the datastore.
ReadOnly,
/// Neither read nor write operations are allowed on the datastore.
Offline,
/// The datastore is being deleted.
Delete,
}
serde_plain::derive_display_from_serialize!(MaintenanceType);
serde_plain::derive_fromstr_from_deserialize!(MaintenanceType);
#[api(
properties: {
type: {
type: MaintenanceType,
},
message: {
optional: true,
schema: MAINTENANCE_MESSAGE_SCHEMA,
}
},
default_key: "type",
)]
#[derive(Deserialize, Serialize)]
/// Maintenance mode
pub struct MaintenanceMode {
/// Type of maintenance ("read-only" or "offline").
#[serde(rename = "type")]
pub ty: MaintenanceType,
/// Reason for maintenance.
#[serde(skip_serializing_if = "Option::is_none")]
pub message: Option<String>,
}
impl MaintenanceMode {
/// Used for deciding whether the datastore is cleared from the internal cache after the last
/// task finishes, so all open files are closed.
pub fn is_offline(&self) -> bool {
self.ty == MaintenanceType::Offline
}
pub fn check(&self, operation: Option<Operation>) -> Result<(), Error> {
if self.ty == MaintenanceType::Delete {
bail!("datastore is being deleted");
}
let message = percent_encoding::percent_decode_str(self.message.as_deref().unwrap_or(""))
.decode_utf8()
.unwrap_or(Cow::Borrowed(""));
if let Some(Operation::Lookup) = operation {
return Ok(());
} else if self.ty == MaintenanceType::Offline {
bail!("offline maintenance mode: {}", message);
} else if self.ty == MaintenanceType::ReadOnly {
if let Some(Operation::Write) = operation {
bail!("read-only maintenance mode: {}", message);
}
}
Ok(())
}
}

View File

@ -0,0 +1,190 @@
use serde::{Deserialize, Serialize};
use crate::{
HOST_PORT_SCHEMA, HTTP_URL_SCHEMA, PROXMOX_SAFE_ID_FORMAT, SINGLE_LINE_COMMENT_SCHEMA,
};
use proxmox_schema::{api, Schema, StringSchema, Updater};
pub const METRIC_SERVER_ID_SCHEMA: Schema = StringSchema::new("Metrics Server ID.")
.format(&PROXMOX_SAFE_ID_FORMAT)
.min_length(3)
.max_length(32)
.schema();
pub const INFLUXDB_BUCKET_SCHEMA: Schema = StringSchema::new("InfluxDB Bucket.")
.format(&PROXMOX_SAFE_ID_FORMAT)
.min_length(3)
.max_length(32)
.default("proxmox")
.schema();
pub const INFLUXDB_ORGANIZATION_SCHEMA: Schema = StringSchema::new("InfluxDB Organization.")
.format(&PROXMOX_SAFE_ID_FORMAT)
.min_length(3)
.max_length(32)
.default("proxmox")
.schema();
fn return_true() -> bool {
true
}
fn is_true(b: &bool) -> bool {
*b
}
#[api(
properties: {
name: {
schema: METRIC_SERVER_ID_SCHEMA,
},
enable: {
type: bool,
optional: true,
default: true,
},
host: {
schema: HOST_PORT_SCHEMA,
},
mtu: {
type: u16,
optional: true,
default: 1500,
},
comment: {
optional: true,
schema: SINGLE_LINE_COMMENT_SCHEMA,
},
},
)]
#[derive(Serialize, Deserialize, Updater)]
#[serde(rename_all = "kebab-case")]
/// InfluxDB Server (UDP)
pub struct InfluxDbUdp {
#[updater(skip)]
pub name: String,
#[serde(default = "return_true", skip_serializing_if = "is_true")]
#[updater(serde(skip_serializing_if = "Option::is_none"))]
/// Enables or disables the metrics server
pub enable: bool,
/// the host + port
pub host: String,
#[serde(skip_serializing_if = "Option::is_none")]
/// The MTU
pub mtu: Option<u16>,
#[serde(skip_serializing_if = "Option::is_none")]
pub comment: Option<String>,
}
#[api(
properties: {
name: {
schema: METRIC_SERVER_ID_SCHEMA,
},
enable: {
type: bool,
optional: true,
default: true,
},
url: {
schema: HTTP_URL_SCHEMA,
},
token: {
type: String,
optional: true,
},
bucket: {
schema: INFLUXDB_BUCKET_SCHEMA,
optional: true,
},
organization: {
schema: INFLUXDB_ORGANIZATION_SCHEMA,
optional: true,
},
"max-body-size": {
type: usize,
optional: true,
default: 25_000_000,
},
"verify-tls": {
type: bool,
optional: true,
default: true,
},
comment: {
optional: true,
schema: SINGLE_LINE_COMMENT_SCHEMA,
},
},
)]
#[derive(Serialize, Deserialize, Updater)]
#[serde(rename_all = "kebab-case")]
/// InfluxDB Server (HTTP(s))
pub struct InfluxDbHttp {
#[updater(skip)]
pub name: String,
#[serde(default = "return_true", skip_serializing_if = "is_true")]
#[updater(serde(skip_serializing_if = "Option::is_none"))]
/// Enables or disables the metrics server
pub enable: bool,
/// The base url of the influxdb server
pub url: String,
/// The Optional Token
#[serde(skip_serializing_if = "Option::is_none")]
/// The (optional) API token
pub token: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub bucket: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub organization: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
/// The (optional) maximum body size
pub max_body_size: Option<usize>,
#[serde(skip_serializing_if = "Option::is_none")]
/// If true, the certificate will be validated.
pub verify_tls: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")]
pub comment: Option<String>,
}
#[api]
#[derive(Copy, Clone, Deserialize, Serialize, PartialEq, Eq, PartialOrd, Ord)]
/// Type of the metric server
pub enum MetricServerType {
/// InfluxDB HTTP
#[serde(rename = "influxdb-http")]
InfluxDbHttp,
/// InfluxDB UDP
#[serde(rename = "influxdb-udp")]
InfluxDbUdp,
}
#[api(
properties: {
name: {
schema: METRIC_SERVER_ID_SCHEMA,
},
"type": {
type: MetricServerType,
},
comment: {
optional: true,
schema: SINGLE_LINE_COMMENT_SCHEMA,
},
},
)]
#[derive(Clone, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)]
#[serde(rename_all = "kebab-case")]
/// Basic information about a metric server that's available for all types
pub struct MetricServerInfo {
pub name: String,
#[serde(rename = "type")]
pub ty: MetricServerType,
/// Enables or disables the metrics server
#[serde(skip_serializing_if = "Option::is_none")]
pub enable: Option<bool>,
/// The target server
pub server: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub comment: Option<String>,
}

View File

@ -0,0 +1,345 @@
use std::fmt;
use serde::{Deserialize, Serialize};
use proxmox_schema::*;
use crate::{
CIDR_FORMAT, CIDR_V4_FORMAT, CIDR_V6_FORMAT, IP_FORMAT, IP_V4_FORMAT, IP_V6_FORMAT,
PROXMOX_SAFE_ID_REGEX,
};
pub const NETWORK_INTERFACE_FORMAT: ApiStringFormat =
ApiStringFormat::Pattern(&PROXMOX_SAFE_ID_REGEX);
pub const IP_V4_SCHEMA: Schema = StringSchema::new("IPv4 address.")
.format(&IP_V4_FORMAT)
.max_length(15)
.schema();
pub const IP_V6_SCHEMA: Schema = StringSchema::new("IPv6 address.")
.format(&IP_V6_FORMAT)
.max_length(39)
.schema();
pub const IP_SCHEMA: Schema = StringSchema::new("IP (IPv4 or IPv6) address.")
.format(&IP_FORMAT)
.max_length(39)
.schema();
pub const CIDR_V4_SCHEMA: Schema = StringSchema::new("IPv4 address with netmask (CIDR notation).")
.format(&CIDR_V4_FORMAT)
.max_length(18)
.schema();
pub const CIDR_V6_SCHEMA: Schema = StringSchema::new("IPv6 address with netmask (CIDR notation).")
.format(&CIDR_V6_FORMAT)
.max_length(43)
.schema();
pub const CIDR_SCHEMA: Schema =
StringSchema::new("IP address (IPv4 or IPv6) with netmask (CIDR notation).")
.format(&CIDR_FORMAT)
.max_length(43)
.schema();
#[api()]
#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
/// Interface configuration method
pub enum NetworkConfigMethod {
/// Configuration is done manually using other tools
Manual,
/// Define interfaces with statically allocated addresses.
Static,
/// Obtain an address via DHCP
DHCP,
/// Define the loopback interface.
Loopback,
}
#[api()]
#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "kebab-case")]
#[repr(u8)]
/// Linux Bond Mode
pub enum LinuxBondMode {
/// Round-robin policy
BalanceRr = 0,
/// Active-backup policy
ActiveBackup = 1,
/// XOR policy
BalanceXor = 2,
/// Broadcast policy
Broadcast = 3,
/// IEEE 802.3ad Dynamic link aggregation
#[serde(rename = "802.3ad")]
Ieee802_3ad = 4,
/// Adaptive transmit load balancing
BalanceTlb = 5,
/// Adaptive load balancing
BalanceAlb = 6,
}
impl fmt::Display for LinuxBondMode {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str(match self {
LinuxBondMode::BalanceRr => "balance-rr",
LinuxBondMode::ActiveBackup => "active-backup",
LinuxBondMode::BalanceXor => "balance-xor",
LinuxBondMode::Broadcast => "broadcast",
LinuxBondMode::Ieee802_3ad => "802.3ad",
LinuxBondMode::BalanceTlb => "balance-tlb",
LinuxBondMode::BalanceAlb => "balance-alb",
})
}
}
#[api()]
#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "kebab-case")]
#[repr(u8)]
/// Bond Transmit Hash Policy for LACP (802.3ad)
pub enum BondXmitHashPolicy {
/// Layer 2
Layer2 = 0,
/// Layer 2+3
#[serde(rename = "layer2+3")]
Layer2_3 = 1,
/// Layer 3+4
#[serde(rename = "layer3+4")]
Layer3_4 = 2,
}
impl fmt::Display for BondXmitHashPolicy {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str(match self {
BondXmitHashPolicy::Layer2 => "layer2",
BondXmitHashPolicy::Layer2_3 => "layer2+3",
BondXmitHashPolicy::Layer3_4 => "layer3+4",
})
}
}
#[api()]
#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
/// Network interface type
pub enum NetworkInterfaceType {
/// Loopback
Loopback,
/// Physical Ethernet device
Eth,
/// Linux Bridge
Bridge,
/// Linux Bond
Bond,
/// Linux VLAN (eth.10)
Vlan,
/// Interface Alias (eth:1)
Alias,
/// Unknown interface type
Unknown,
}
pub const NETWORK_INTERFACE_NAME_SCHEMA: Schema = StringSchema::new("Network interface name.")
.format(&NETWORK_INTERFACE_FORMAT)
.min_length(1)
.max_length(15) // libc::IFNAMSIZ-1
.schema();
pub const NETWORK_INTERFACE_ARRAY_SCHEMA: Schema =
ArraySchema::new("Network interface list.", &NETWORK_INTERFACE_NAME_SCHEMA).schema();
pub const NETWORK_INTERFACE_LIST_SCHEMA: Schema =
StringSchema::new("A list of network devices, comma separated.")
.format(&ApiStringFormat::PropertyString(
&NETWORK_INTERFACE_ARRAY_SCHEMA,
))
.schema();
#[api(
properties: {
name: {
schema: NETWORK_INTERFACE_NAME_SCHEMA,
},
"type": {
type: NetworkInterfaceType,
},
method: {
type: NetworkConfigMethod,
optional: true,
},
method6: {
type: NetworkConfigMethod,
optional: true,
},
cidr: {
schema: CIDR_V4_SCHEMA,
optional: true,
},
cidr6: {
schema: CIDR_V6_SCHEMA,
optional: true,
},
gateway: {
schema: IP_V4_SCHEMA,
optional: true,
},
gateway6: {
schema: IP_V6_SCHEMA,
optional: true,
},
options: {
description: "Option list (inet)",
type: Array,
items: {
description: "Optional attribute line.",
type: String,
},
},
options6: {
description: "Option list (inet6)",
type: Array,
items: {
description: "Optional attribute line.",
type: String,
},
},
comments: {
description: "Comments (inet, may span multiple lines)",
type: String,
optional: true,
},
comments6: {
description: "Comments (inet6, may span multiple lines)",
type: String,
optional: true,
},
bridge_ports: {
schema: NETWORK_INTERFACE_ARRAY_SCHEMA,
optional: true,
},
slaves: {
schema: NETWORK_INTERFACE_ARRAY_SCHEMA,
optional: true,
},
"vlan-id": {
description: "VLAN ID.",
type: u16,
optional: true,
},
"vlan-raw-device": {
schema: NETWORK_INTERFACE_NAME_SCHEMA,
optional: true,
},
bond_mode: {
type: LinuxBondMode,
optional: true,
},
"bond-primary": {
schema: NETWORK_INTERFACE_NAME_SCHEMA,
optional: true,
},
bond_xmit_hash_policy: {
type: BondXmitHashPolicy,
optional: true,
},
}
)]
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)]
/// Network Interface configuration
pub struct Interface {
/// Autostart interface
#[serde(rename = "autostart")]
pub autostart: bool,
/// Interface is active (UP)
pub active: bool,
/// Interface name
pub name: String,
/// Interface type
#[serde(rename = "type")]
pub interface_type: NetworkInterfaceType,
#[serde(skip_serializing_if = "Option::is_none")]
pub method: Option<NetworkConfigMethod>,
#[serde(skip_serializing_if = "Option::is_none")]
pub method6: Option<NetworkConfigMethod>,
#[serde(skip_serializing_if = "Option::is_none")]
/// IPv4 address with netmask
pub cidr: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
/// IPv4 gateway
pub gateway: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
/// IPv6 address with netmask
pub cidr6: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
/// IPv6 gateway
pub gateway6: Option<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub options: Vec<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub options6: Vec<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub comments: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub comments6: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
/// Maximum Transmission Unit
pub mtu: Option<u64>,
#[serde(skip_serializing_if = "Option::is_none")]
pub bridge_ports: Option<Vec<String>>,
/// Enable bridge vlan support.
#[serde(skip_serializing_if = "Option::is_none")]
pub bridge_vlan_aware: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")]
#[serde(rename = "vlan-id")]
pub vlan_id: Option<u16>,
#[serde(skip_serializing_if = "Option::is_none")]
#[serde(rename = "vlan-raw-device")]
pub vlan_raw_device: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub slaves: Option<Vec<String>>,
#[serde(skip_serializing_if = "Option::is_none")]
pub bond_mode: Option<LinuxBondMode>,
#[serde(skip_serializing_if = "Option::is_none")]
#[serde(rename = "bond-primary")]
pub bond_primary: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub bond_xmit_hash_policy: Option<BondXmitHashPolicy>,
}
impl Interface {
pub fn new(name: String) -> Self {
Self {
name,
interface_type: NetworkInterfaceType::Unknown,
autostart: false,
active: false,
method: None,
method6: None,
cidr: None,
gateway: None,
cidr6: None,
gateway6: None,
options: Vec::new(),
options6: Vec::new(),
comments: None,
comments6: None,
mtu: None,
bridge_ports: None,
bridge_vlan_aware: None,
vlan_id: None,
vlan_raw_device: None,
slaves: None,
bond_mode: None,
bond_primary: None,
bond_xmit_hash_policy: None,
}
}
}

162
pbs-api-types/src/node.rs Normal file
View File

@ -0,0 +1,162 @@
use std::ffi::OsStr;
use proxmox_schema::*;
use serde::{Deserialize, Serialize};
use crate::StorageStatus;
#[api]
#[derive(Serialize, Deserialize, Default)]
#[serde(rename_all = "kebab-case")]
/// Node memory usage counters
pub struct NodeMemoryCounters {
/// Total memory
pub total: u64,
/// Used memory
pub used: u64,
/// Free memory
pub free: u64,
}
#[api]
#[derive(Serialize, Deserialize, Default)]
#[serde(rename_all = "kebab-case")]
/// Node swap usage counters
pub struct NodeSwapCounters {
/// Total swap
pub total: u64,
/// Used swap
pub used: u64,
/// Free swap
pub free: u64,
}
#[api]
#[derive(Serialize, Deserialize, Default)]
#[serde(rename_all = "kebab-case")]
/// Contains general node information such as the fingerprint`
pub struct NodeInformation {
/// The SSL Fingerprint
pub fingerprint: String,
}
#[api]
#[derive(Serialize, Deserialize, Default)]
#[serde(rename_all = "lowercase")]
/// The current kernel version (output of `uname`)
pub struct KernelVersionInformation {
/// The systemname/nodename
pub sysname: String,
/// The kernel release number
pub release: String,
/// The kernel version
pub version: String,
/// The machine architecture
pub machine: String,
}
impl KernelVersionInformation {
pub fn from_uname_parts(
sysname: &OsStr,
release: &OsStr,
version: &OsStr,
machine: &OsStr,
) -> Self {
KernelVersionInformation {
sysname: sysname.to_str().map(String::from).unwrap_or_default(),
release: release.to_str().map(String::from).unwrap_or_default(),
version: version.to_str().map(String::from).unwrap_or_default(),
machine: machine.to_str().map(String::from).unwrap_or_default(),
}
}
pub fn get_legacy(&self) -> String {
format!("{} {} {}", self.sysname, self.release, self.version)
}
}
#[api]
#[derive(Serialize, Deserialize, Copy, Clone)]
#[serde(rename_all = "kebab-case")]
/// The possible BootModes
pub enum BootMode {
/// The BootMode is EFI/UEFI
Efi,
/// The BootMode is Legacy BIOS
LegacyBios,
}
#[api]
#[derive(Serialize, Deserialize, Clone)]
#[serde(rename_all = "lowercase")]
/// Holds the Bootmodes
pub struct BootModeInformation {
/// The BootMode, either Efi or Bios
pub mode: BootMode,
/// SecureBoot status
pub secureboot: bool,
}
#[api]
#[derive(Serialize, Deserialize, Default)]
#[serde(rename_all = "kebab-case")]
/// Information about the CPU
pub struct NodeCpuInformation {
/// The CPU model
pub model: String,
/// The number of CPU sockets
pub sockets: usize,
/// The number of CPU cores (incl. threads)
pub cpus: usize,
}
#[api(
properties: {
memory: {
type: NodeMemoryCounters,
},
root: {
type: StorageStatus,
},
swap: {
type: NodeSwapCounters,
},
loadavg: {
type: Array,
items: {
type: Number,
description: "the load",
}
},
cpuinfo: {
type: NodeCpuInformation,
},
info: {
type: NodeInformation,
}
},
)]
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "kebab-case")]
/// The Node status
pub struct NodeStatus {
pub memory: NodeMemoryCounters,
pub root: StorageStatus,
pub swap: NodeSwapCounters,
/// The current uptime of the server.
pub uptime: u64,
/// Load for 1, 5 and 15 minutes.
pub loadavg: [f64; 3],
/// The current kernel version (NEW struct type).
pub current_kernel: KernelVersionInformation,
/// The current kernel version (LEGACY string type).
pub kversion: String,
/// Total CPU usage since last query.
pub cpu: f64,
/// Total IO wait since last query.
pub wait: f64,
pub cpuinfo: NodeCpuInformation,
pub info: NodeInformation,
/// Current boot mode
pub boot_info: BootModeInformation,
}

120
pbs-api-types/src/openid.rs Normal file
View File

@ -0,0 +1,120 @@
use serde::{Deserialize, Serialize};
use proxmox_schema::{api, ApiStringFormat, ArraySchema, Schema, StringSchema, Updater};
use super::{
GENERIC_URI_REGEX, PROXMOX_SAFE_ID_FORMAT, PROXMOX_SAFE_ID_REGEX, REALM_ID_SCHEMA,
SINGLE_LINE_COMMENT_SCHEMA,
};
pub const OPENID_SCOPE_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&PROXMOX_SAFE_ID_REGEX);
pub const OPENID_SCOPE_SCHEMA: Schema = StringSchema::new("OpenID Scope Name.")
.format(&OPENID_SCOPE_FORMAT)
.schema();
pub const OPENID_SCOPE_ARRAY_SCHEMA: Schema =
ArraySchema::new("Array of OpenId Scopes.", &OPENID_SCOPE_SCHEMA).schema();
pub const OPENID_SCOPE_LIST_FORMAT: ApiStringFormat =
ApiStringFormat::PropertyString(&OPENID_SCOPE_ARRAY_SCHEMA);
pub const OPENID_DEFAILT_SCOPE_LIST: &str = "email profile";
pub const OPENID_SCOPE_LIST_SCHEMA: Schema = StringSchema::new("OpenID Scope List")
.format(&OPENID_SCOPE_LIST_FORMAT)
.default(OPENID_DEFAILT_SCOPE_LIST)
.schema();
pub const OPENID_ACR_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&GENERIC_URI_REGEX);
pub const OPENID_ACR_SCHEMA: Schema =
StringSchema::new("OpenID Authentication Context Class Reference.")
.format(&OPENID_ACR_FORMAT)
.schema();
pub const OPENID_ACR_ARRAY_SCHEMA: Schema =
ArraySchema::new("Array of OpenId ACRs.", &OPENID_ACR_SCHEMA).schema();
pub const OPENID_ACR_LIST_FORMAT: ApiStringFormat =
ApiStringFormat::PropertyString(&OPENID_ACR_ARRAY_SCHEMA);
pub const OPENID_ACR_LIST_SCHEMA: Schema = StringSchema::new("OpenID ACR List")
.format(&OPENID_ACR_LIST_FORMAT)
.schema();
pub const OPENID_USERNAME_CLAIM_SCHEMA: Schema = StringSchema::new(
"Use the value of this attribute/claim as unique user name. It \
is up to the identity provider to guarantee the uniqueness. The \
OpenID specification only guarantees that Subject ('sub') is \
unique. Also make sure that the user is not allowed to change that \
attribute by himself!",
)
.max_length(64)
.min_length(1)
.format(&PROXMOX_SAFE_ID_FORMAT)
.schema();
#[api(
properties: {
realm: {
schema: REALM_ID_SCHEMA,
},
"client-key": {
optional: true,
},
"scopes": {
schema: OPENID_SCOPE_LIST_SCHEMA,
optional: true,
},
"acr-values": {
schema: OPENID_ACR_LIST_SCHEMA,
optional: true,
},
prompt: {
description: "OpenID Prompt",
type: String,
format: &PROXMOX_SAFE_ID_FORMAT,
optional: true,
},
comment: {
optional: true,
schema: SINGLE_LINE_COMMENT_SCHEMA,
},
autocreate: {
optional: true,
default: false,
},
"username-claim": {
schema: OPENID_USERNAME_CLAIM_SCHEMA,
optional: true,
},
},
)]
#[derive(Serialize, Deserialize, Updater)]
#[serde(rename_all = "kebab-case")]
/// OpenID configuration properties.
pub struct OpenIdRealmConfig {
#[updater(skip)]
pub realm: String,
/// OpenID Issuer Url
pub issuer_url: String,
/// OpenID Client ID
pub client_id: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub scopes: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub acr_values: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub prompt: Option<String>,
/// OpenID Client Key
#[serde(skip_serializing_if = "Option::is_none")]
pub client_key: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub comment: Option<String>,
/// Automatically create users if they do not exist.
#[serde(skip_serializing_if = "Option::is_none")]
pub autocreate: Option<bool>,
#[updater(skip)]
#[serde(skip_serializing_if = "Option::is_none")]
pub username_claim: Option<String>,
}

View File

@ -0,0 +1,22 @@
use percent_encoding::{utf8_percent_encode, AsciiSet};
/// This used to be: `SIMPLE_ENCODE_SET` plus space, `"`, `#`, `<`, `>`, backtick, `?`, `{`, `}`
pub const DEFAULT_ENCODE_SET: &AsciiSet = &percent_encoding::CONTROLS // 0..1f and 7e
// The SIMPLE_ENCODE_SET adds space and anything >= 0x7e (7e itself is already included above)
.add(0x20)
.add(0x7f)
// the DEFAULT_ENCODE_SET added:
.add(b' ')
.add(b'"')
.add(b'#')
.add(b'<')
.add(b'>')
.add(b'`')
.add(b'?')
.add(b'{')
.add(b'}');
/// percent encode a url component
pub fn percent_encode_component(comp: &str) -> String {
utf8_percent_encode(comp, percent_encoding::NON_ALPHANUMERIC).to_string()
}

106
pbs-api-types/src/remote.rs Normal file
View File

@ -0,0 +1,106 @@
use serde::{Deserialize, Serialize};
use super::*;
use proxmox_schema::*;
pub const REMOTE_PASSWORD_SCHEMA: Schema =
StringSchema::new("Password or auth token for remote host.")
.format(&PASSWORD_FORMAT)
.min_length(1)
.max_length(1024)
.schema();
pub const REMOTE_PASSWORD_BASE64_SCHEMA: Schema =
StringSchema::new("Password or auth token for remote host (stored as base64 string).")
.format(&PASSWORD_FORMAT)
.min_length(1)
.max_length(1024)
.schema();
pub const REMOTE_ID_SCHEMA: Schema = StringSchema::new("Remote ID.")
.format(&PROXMOX_SAFE_ID_FORMAT)
.min_length(3)
.max_length(32)
.schema();
#[api(
properties: {
comment: {
optional: true,
schema: SINGLE_LINE_COMMENT_SCHEMA,
},
host: {
schema: DNS_NAME_OR_IP_SCHEMA,
},
port: {
optional: true,
description: "The (optional) port",
type: u16,
},
"auth-id": {
type: Authid,
},
fingerprint: {
optional: true,
schema: CERT_FINGERPRINT_SHA256_SCHEMA,
},
},
)]
#[derive(Serialize, Deserialize, Updater, Clone, PartialEq)]
#[serde(rename_all = "kebab-case")]
/// Remote configuration properties.
pub struct RemoteConfig {
#[serde(skip_serializing_if = "Option::is_none")]
pub comment: Option<String>,
pub host: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub port: Option<u16>,
pub auth_id: Authid,
#[serde(skip_serializing_if = "Option::is_none")]
pub fingerprint: Option<String>,
}
#[api(
properties: {
name: {
schema: REMOTE_ID_SCHEMA,
},
config: {
type: RemoteConfig,
},
password: {
schema: REMOTE_PASSWORD_BASE64_SCHEMA,
},
},
)]
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "kebab-case")]
/// Remote properties.
pub struct Remote {
pub name: String,
// Note: The stored password is base64 encoded
#[serde(default, skip_serializing_if = "String::is_empty")]
#[serde(with = "proxmox_serde::string_as_base64")]
pub password: String,
#[serde(flatten)]
pub config: RemoteConfig,
}
#[api(
properties: {
name: {
schema: REMOTE_ID_SCHEMA,
},
config: {
type: RemoteConfig,
},
},
)]
#[derive(Serialize, Deserialize, Clone, PartialEq)]
#[serde(rename_all = "kebab-case")]
/// Remote properties.
pub struct RemoteWithoutPassword {
pub name: String,
#[serde(flatten)]
pub config: RemoteConfig,
}

View File

@ -0,0 +1,134 @@
//! Types for tape changer API
use serde::{Deserialize, Serialize};
use proxmox_schema::{
api, ApiStringFormat, ArraySchema, IntegerSchema, Schema, StringSchema, Updater,
};
use crate::{OptionalDeviceIdentification, PROXMOX_SAFE_ID_FORMAT};
pub const CHANGER_NAME_SCHEMA: Schema = StringSchema::new("Tape Changer Identifier.")
.format(&PROXMOX_SAFE_ID_FORMAT)
.min_length(3)
.max_length(32)
.schema();
pub const SCSI_CHANGER_PATH_SCHEMA: Schema =
StringSchema::new("Path to Linux generic SCSI device (e.g. '/dev/sg4')").schema();
pub const MEDIA_LABEL_SCHEMA: Schema = StringSchema::new("Media Label/Barcode.")
.format(&PROXMOX_SAFE_ID_FORMAT)
.min_length(2)
.max_length(32)
.schema();
pub const SLOT_ARRAY_SCHEMA: Schema = ArraySchema::new(
"Slot list.",
&IntegerSchema::new("Slot number").minimum(1).schema(),
)
.schema();
pub const EXPORT_SLOT_LIST_SCHEMA: Schema = StringSchema::new(
"\
A list of slot numbers, comma separated. Those slots are reserved for
Import/Export, i.e. any media in those slots are considered to be
'offline'.
",
)
.format(&ApiStringFormat::PropertyString(&SLOT_ARRAY_SCHEMA))
.schema();
#[api(
properties: {
name: {
schema: CHANGER_NAME_SCHEMA,
},
path: {
schema: SCSI_CHANGER_PATH_SCHEMA,
},
"export-slots": {
schema: EXPORT_SLOT_LIST_SCHEMA,
optional: true,
},
"eject-before-unload": {
optional: true,
default: false,
}
},
)]
#[derive(Serialize, Deserialize, Updater)]
#[serde(rename_all = "kebab-case")]
/// SCSI tape changer
pub struct ScsiTapeChanger {
#[updater(skip)]
pub name: String,
pub path: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub export_slots: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
/// if set to true, tapes are ejected manually before unloading
pub eject_before_unload: Option<bool>,
}
#[api(
properties: {
config: {
type: ScsiTapeChanger,
},
info: {
type: OptionalDeviceIdentification,
},
},
)]
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "kebab-case")]
/// Changer config with optional device identification attributes
pub struct ChangerListEntry {
#[serde(flatten)]
pub config: ScsiTapeChanger,
#[serde(flatten)]
pub info: OptionalDeviceIdentification,
}
#[api()]
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "kebab-case")]
/// Mtx Entry Kind
pub enum MtxEntryKind {
/// Drive
Drive,
/// Slot
Slot,
/// Import/Export Slot
ImportExport,
}
#[api(
properties: {
"entry-kind": {
type: MtxEntryKind,
},
"label-text": {
schema: MEDIA_LABEL_SCHEMA,
optional: true,
},
},
)]
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "kebab-case")]
/// Mtx Status Entry
pub struct MtxStatusEntry {
pub entry_kind: MtxEntryKind,
/// The ID of the slot or drive
pub entry_id: u64,
/// The media label (volume tag) if the slot/drive is full
#[serde(skip_serializing_if = "Option::is_none")]
pub label_text: Option<String>,
/// The slot the drive was loaded from
#[serde(skip_serializing_if = "Option::is_none")]
pub loaded_slot: Option<u64>,
/// The current state of the drive
#[serde(skip_serializing_if = "Option::is_none")]
pub state: Option<String>,
}

View File

@ -0,0 +1,55 @@
use ::serde::{Deserialize, Serialize};
use proxmox_schema::api;
#[api()]
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "kebab-case")]
/// Optional Device Identification Attributes
pub struct OptionalDeviceIdentification {
/// Vendor (autodetected)
#[serde(skip_serializing_if = "Option::is_none")]
pub vendor: Option<String>,
/// Model (autodetected)
#[serde(skip_serializing_if = "Option::is_none")]
pub model: Option<String>,
/// Serial number (autodetected)
#[serde(skip_serializing_if = "Option::is_none")]
pub serial: Option<String>,
}
#[api()]
#[derive(Debug, Serialize, Deserialize)]
#[serde(rename_all = "kebab-case")]
/// Kind of device
pub enum DeviceKind {
/// Tape changer (Autoloader, Robot)
Changer,
/// Normal SCSI tape device
Tape,
}
#[api(
properties: {
kind: {
type: DeviceKind,
},
},
)]
#[derive(Debug, Serialize, Deserialize)]
/// Tape device information
pub struct TapeDeviceInfo {
pub kind: DeviceKind,
/// Path to the linux device node
pub path: String,
/// Serial number (autodetected)
pub serial: String,
/// Vendor (autodetected)
pub vendor: String,
/// Model (autodetected)
pub model: String,
/// Device major number
pub major: u32,
/// Device minor number
pub minor: u32,
}

View File

@ -0,0 +1,278 @@
//! Types for tape drive API
use anyhow::{bail, Error};
use serde::{Deserialize, Serialize};
use proxmox_schema::{api, IntegerSchema, Schema, StringSchema, Updater};
use crate::{OptionalDeviceIdentification, CHANGER_NAME_SCHEMA, PROXMOX_SAFE_ID_FORMAT};
pub const DRIVE_NAME_SCHEMA: Schema = StringSchema::new("Drive Identifier.")
.format(&PROXMOX_SAFE_ID_FORMAT)
.min_length(3)
.max_length(32)
.schema();
pub const LTO_DRIVE_PATH_SCHEMA: Schema =
StringSchema::new("The path to a LTO SCSI-generic tape device (i.e. '/dev/sg0')").schema();
pub const CHANGER_DRIVENUM_SCHEMA: Schema =
IntegerSchema::new("Associated changer drive number (requires option changer)")
.minimum(0)
.maximum(255)
.default(0)
.schema();
#[api(
properties: {
name: {
schema: DRIVE_NAME_SCHEMA,
}
}
)]
#[derive(Serialize, Deserialize)]
/// Simulate tape drives (only for test and debug)
#[serde(rename_all = "kebab-case")]
pub struct VirtualTapeDrive {
pub name: String,
/// Path to directory
pub path: String,
/// Virtual tape size
#[serde(skip_serializing_if = "Option::is_none")]
pub max_size: Option<usize>,
}
#[api(
properties: {
name: {
schema: DRIVE_NAME_SCHEMA,
},
path: {
schema: LTO_DRIVE_PATH_SCHEMA,
},
changer: {
schema: CHANGER_NAME_SCHEMA,
optional: true,
},
"changer-drivenum": {
schema: CHANGER_DRIVENUM_SCHEMA,
optional: true,
},
}
)]
#[derive(Serialize, Deserialize, Updater, Clone)]
#[serde(rename_all = "kebab-case")]
/// Lto SCSI tape driver
pub struct LtoTapeDrive {
#[updater(skip)]
pub name: String,
pub path: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub changer: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub changer_drivenum: Option<u64>,
}
#[api(
properties: {
config: {
type: LtoTapeDrive,
},
info: {
type: OptionalDeviceIdentification,
},
},
)]
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "kebab-case")]
/// Drive list entry
pub struct DriveListEntry {
#[serde(flatten)]
pub config: LtoTapeDrive,
#[serde(flatten)]
pub info: OptionalDeviceIdentification,
/// the state of the drive if locked
#[serde(skip_serializing_if = "Option::is_none")]
pub state: Option<String>,
}
#[api()]
#[derive(Serialize, Deserialize)]
/// Medium auxiliary memory attributes (MAM)
pub struct MamAttribute {
/// Attribute id
pub id: u16,
/// Attribute name
pub name: String,
/// Attribute value
pub value: String,
}
#[api()]
#[derive(Serialize, Deserialize, Copy, Clone, Debug, PartialOrd, PartialEq)]
pub enum TapeDensity {
/// Unknown (no media loaded)
Unknown,
/// LTO1
LTO1,
/// LTO2
LTO2,
/// LTO3
LTO3,
/// LTO4
LTO4,
/// LTO5
LTO5,
/// LTO6
LTO6,
/// LTO7
LTO7,
/// LTO7M8
LTO7M8,
/// LTO8
LTO8,
/// LTO9
LTO9,
}
impl TryFrom<u8> for TapeDensity {
type Error = Error;
fn try_from(value: u8) -> Result<Self, Self::Error> {
let density = match value {
0x00 => TapeDensity::Unknown,
0x40 => TapeDensity::LTO1,
0x42 => TapeDensity::LTO2,
0x44 => TapeDensity::LTO3,
0x46 => TapeDensity::LTO4,
0x58 => TapeDensity::LTO5,
0x5a => TapeDensity::LTO6,
0x5c => TapeDensity::LTO7,
0x5d => TapeDensity::LTO7M8,
0x5e => TapeDensity::LTO8,
0x60 => TapeDensity::LTO9,
_ => bail!("unknown tape density code 0x{:02x}", value),
};
Ok(density)
}
}
#[api(
properties: {
density: {
type: TapeDensity,
optional: true,
},
},
)]
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "kebab-case")]
/// Drive/Media status for Lto SCSI drives.
///
/// Media related data is optional - only set if there is a medium
/// loaded.
pub struct LtoDriveAndMediaStatus {
/// Vendor
pub vendor: String,
/// Product
pub product: String,
/// Revision
pub revision: String,
/// Block size (0 is variable size)
pub blocksize: u32,
/// Compression enabled
pub compression: bool,
/// Drive buffer mode
pub buffer_mode: u8,
/// Tape density
pub density: TapeDensity,
/// Media is write protected
#[serde(skip_serializing_if = "Option::is_none")]
pub write_protect: Option<bool>,
/// Tape Alert Flags
#[serde(skip_serializing_if = "Option::is_none")]
pub alert_flags: Option<String>,
/// Current file number
#[serde(skip_serializing_if = "Option::is_none")]
pub file_number: Option<u64>,
/// Current block number
#[serde(skip_serializing_if = "Option::is_none")]
pub block_number: Option<u64>,
/// Medium Manufacture Date (epoch)
#[serde(skip_serializing_if = "Option::is_none")]
pub manufactured: Option<i64>,
/// Total Bytes Read in Medium Life
#[serde(skip_serializing_if = "Option::is_none")]
pub bytes_read: Option<u64>,
/// Total Bytes Written in Medium Life
#[serde(skip_serializing_if = "Option::is_none")]
pub bytes_written: Option<u64>,
/// Number of mounts for the current volume (i.e., Thread Count)
#[serde(skip_serializing_if = "Option::is_none")]
pub volume_mounts: Option<u64>,
/// Count of the total number of times the medium has passed over
/// the head.
#[serde(skip_serializing_if = "Option::is_none")]
pub medium_passes: Option<u64>,
/// Estimated tape wearout factor (assuming max. 16000 end-to-end passes)
#[serde(skip_serializing_if = "Option::is_none")]
pub medium_wearout: Option<f64>,
}
#[api()]
/// Volume statistics from SCSI log page 17h
#[derive(Default, Serialize, Deserialize)]
#[serde(rename_all = "kebab-case")]
pub struct Lp17VolumeStatistics {
/// Volume mounts (thread count)
pub volume_mounts: u64,
/// Total data sets written
pub volume_datasets_written: u64,
/// Write retries
pub volume_recovered_write_data_errors: u64,
/// Total unrecovered write errors
pub volume_unrecovered_write_data_errors: u64,
/// Total suspended writes
pub volume_write_servo_errors: u64,
/// Total fatal suspended writes
pub volume_unrecovered_write_servo_errors: u64,
/// Total datasets read
pub volume_datasets_read: u64,
/// Total read retries
pub volume_recovered_read_errors: u64,
/// Total unrecovered read errors
pub volume_unrecovered_read_errors: u64,
/// Last mount unrecovered write errors
pub last_mount_unrecovered_write_errors: u64,
/// Last mount unrecovered read errors
pub last_mount_unrecovered_read_errors: u64,
/// Last mount bytes written
pub last_mount_bytes_written: u64,
/// Last mount bytes read
pub last_mount_bytes_read: u64,
/// Lifetime bytes written
pub lifetime_bytes_written: u64,
/// Lifetime bytes read
pub lifetime_bytes_read: u64,
/// Last load write compression ratio
pub last_load_write_compression_ratio: u64,
/// Last load read compression ratio
pub last_load_read_compression_ratio: u64,
/// Medium mount time
pub medium_mount_time: u64,
/// Medium ready time
pub medium_ready_time: u64,
/// Total native capacity
pub total_native_capacity: u64,
/// Total used native capacity
pub total_used_native_capacity: u64,
/// Write protect
pub write_protect: bool,
/// Volume is WORM
pub worm: bool,
/// Beginning of medium passes
pub beginning_of_medium_passes: u64,
/// Middle of medium passes
pub middle_of_tape_passes: u64,
/// Volume serial number
pub serial: String,
}

View File

@ -0,0 +1,176 @@
use ::serde::{Deserialize, Serialize};
use proxmox_schema::*;
use proxmox_uuid::Uuid;
use crate::{MediaLocation, MediaStatus, UUID_FORMAT};
pub const MEDIA_SET_UUID_SCHEMA: Schema = StringSchema::new(
"MediaSet Uuid (We use the all-zero Uuid to reseve an empty media for a specific pool).",
)
.format(&UUID_FORMAT)
.schema();
pub const MEDIA_UUID_SCHEMA: Schema = StringSchema::new("Media Uuid.")
.format(&UUID_FORMAT)
.schema();
#[api(
properties: {
"media-set-uuid": {
schema: MEDIA_SET_UUID_SCHEMA,
},
},
)]
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "kebab-case")]
/// Media Set list entry
pub struct MediaSetListEntry {
/// Media set name
pub media_set_name: String,
pub media_set_uuid: Uuid,
/// MediaSet creation time stamp
pub media_set_ctime: i64,
/// Media Pool
pub pool: String,
}
#[api(
properties: {
location: {
type: MediaLocation,
},
status: {
type: MediaStatus,
},
uuid: {
schema: MEDIA_UUID_SCHEMA,
},
"media-set-uuid": {
schema: MEDIA_SET_UUID_SCHEMA,
optional: true,
},
},
)]
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "kebab-case")]
/// Media list entry
pub struct MediaListEntry {
/// Media label text (or Barcode)
pub label_text: String,
pub uuid: Uuid,
/// Creation time stamp
pub ctime: i64,
pub location: MediaLocation,
pub status: MediaStatus,
/// Expired flag
pub expired: bool,
/// Catalog status OK
pub catalog: bool,
/// Media set name
#[serde(skip_serializing_if = "Option::is_none")]
pub media_set_name: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub media_set_uuid: Option<Uuid>,
/// Media set seq_nr
#[serde(skip_serializing_if = "Option::is_none")]
pub seq_nr: Option<u64>,
/// MediaSet creation time stamp
#[serde(skip_serializing_if = "Option::is_none")]
pub media_set_ctime: Option<i64>,
/// Media Pool
#[serde(skip_serializing_if = "Option::is_none")]
pub pool: Option<String>,
}
#[api(
properties: {
uuid: {
schema: MEDIA_UUID_SCHEMA,
},
"media-set-uuid": {
schema: MEDIA_SET_UUID_SCHEMA,
optional: true,
},
},
)]
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "kebab-case")]
/// Media label info
pub struct MediaIdFlat {
/// Unique ID
pub uuid: Uuid,
/// Media label text (or Barcode)
pub label_text: String,
/// Creation time stamp
pub ctime: i64,
// All MediaSet properties are optional here
/// MediaSet Pool
#[serde(skip_serializing_if = "Option::is_none")]
pub pool: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub media_set_uuid: Option<Uuid>,
/// MediaSet media sequence number
#[serde(skip_serializing_if = "Option::is_none")]
pub seq_nr: Option<u64>,
/// MediaSet Creation time stamp
#[serde(skip_serializing_if = "Option::is_none")]
pub media_set_ctime: Option<i64>,
/// Encryption key fingerprint
#[serde(skip_serializing_if = "Option::is_none")]
pub encryption_key_fingerprint: Option<String>,
}
#[api(
properties: {
uuid: {
schema: MEDIA_UUID_SCHEMA,
optional: true,
},
},
)]
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "kebab-case")]
/// Label with optional Uuid
pub struct LabelUuidMap {
/// Changer label text (or Barcode)
pub label_text: String,
/// Associated Uuid (if any)
pub uuid: Option<Uuid>,
}
#[api(
properties: {
uuid: {
schema: MEDIA_UUID_SCHEMA,
},
"media-set-uuid": {
schema: MEDIA_SET_UUID_SCHEMA,
},
},
)]
#[derive(Serialize, Deserialize, Clone, PartialEq)]
#[serde(rename_all = "kebab-case")]
/// Media content list entry
pub struct MediaContentEntry {
/// Media label text (or Barcode)
pub label_text: String,
/// Media Uuid
pub uuid: Uuid,
/// Media set name
pub media_set_name: String,
/// Media set uuid
pub media_set_uuid: Uuid,
/// MediaSet Creation time stamp
pub media_set_ctime: i64,
/// Media set seq_nr
pub seq_nr: u64,
/// Media Pool
pub pool: String,
/// Datastore Name
pub store: String,
/// Backup snapshot
pub snapshot: String,
/// Snapshot creation time (epoch)
pub backup_time: i64,
}

View File

@ -0,0 +1,80 @@
use anyhow::{bail, Error};
use proxmox_schema::{ApiStringFormat, Schema, StringSchema};
use crate::{CHANGER_NAME_SCHEMA, PROXMOX_SAFE_ID_FORMAT};
pub const VAULT_NAME_SCHEMA: Schema = StringSchema::new("Vault name.")
.format(&PROXMOX_SAFE_ID_FORMAT)
.min_length(3)
.max_length(32)
.schema();
#[derive(Debug, PartialEq, Eq, Clone)]
/// Media location
pub enum MediaLocation {
/// Ready for use (inside tape library)
Online(String),
/// Local available, but need to be mounted (insert into tape
/// drive)
Offline,
/// Media is inside a Vault
Vault(String),
}
proxmox_serde::forward_deserialize_to_from_str!(MediaLocation);
proxmox_serde::forward_serialize_to_display!(MediaLocation);
impl proxmox_schema::ApiType for MediaLocation {
const API_SCHEMA: Schema = StringSchema::new(
"Media location (e.g. 'offline', 'online-<changer_name>', 'vault-<vault_name>')",
)
.format(&ApiStringFormat::VerifyFn(|text| {
let location: MediaLocation = text.parse()?;
match location {
MediaLocation::Online(ref changer) => {
CHANGER_NAME_SCHEMA.parse_simple_value(changer)?;
}
MediaLocation::Vault(ref vault) => {
VAULT_NAME_SCHEMA.parse_simple_value(vault)?;
}
MediaLocation::Offline => { /* OK */ }
}
Ok(())
}))
.schema();
}
impl std::fmt::Display for MediaLocation {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
MediaLocation::Offline => {
write!(f, "offline")
}
MediaLocation::Online(changer) => {
write!(f, "online-{}", changer)
}
MediaLocation::Vault(vault) => {
write!(f, "vault-{}", vault)
}
}
}
}
impl std::str::FromStr for MediaLocation {
type Err = Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
if s == "offline" {
return Ok(MediaLocation::Offline);
}
if let Some(changer) = s.strip_prefix("online-") {
return Ok(MediaLocation::Online(changer.to_string()));
}
if let Some(vault) = s.strip_prefix("vault-") {
return Ok(MediaLocation::Vault(vault.to_string()));
}
bail!("MediaLocation parse error");
}
}

View File

@ -0,0 +1,161 @@
//! Types for tape media pool API
//!
//! Note: Both MediaSetPolicy and RetentionPolicy are complex enums,
//! so we cannot use them directly for the API. Instead, we represent
//! them as String.
use std::str::FromStr;
use anyhow::Error;
use serde::{Deserialize, Serialize};
use proxmox_schema::{api, ApiStringFormat, Schema, StringSchema, Updater};
use proxmox_time::{CalendarEvent, TimeSpan};
use crate::{
PROXMOX_SAFE_ID_FORMAT, SINGLE_LINE_COMMENT_FORMAT, SINGLE_LINE_COMMENT_SCHEMA,
TAPE_ENCRYPTION_KEY_FINGERPRINT_SCHEMA,
};
pub const MEDIA_POOL_NAME_SCHEMA: Schema = StringSchema::new("Media pool name.")
.format(&PROXMOX_SAFE_ID_FORMAT)
.min_length(2)
.max_length(32)
.schema();
pub const MEDIA_SET_NAMING_TEMPLATE_SCHEMA: Schema = StringSchema::new(
"Media set naming template (may contain strftime() time format specifications).",
)
.format(&SINGLE_LINE_COMMENT_FORMAT)
.min_length(2)
.max_length(64)
.schema();
pub const MEDIA_SET_ALLOCATION_POLICY_FORMAT: ApiStringFormat = ApiStringFormat::VerifyFn(|s| {
MediaSetPolicy::from_str(s)?;
Ok(())
});
pub const MEDIA_SET_ALLOCATION_POLICY_SCHEMA: Schema =
StringSchema::new("Media set allocation policy ('continue', 'always', or a calendar event).")
.format(&MEDIA_SET_ALLOCATION_POLICY_FORMAT)
.schema();
/// Media set allocation policy
pub enum MediaSetPolicy {
/// Try to use the current media set
ContinueCurrent,
/// Each backup job creates a new media set
AlwaysCreate,
/// Create a new set when the specified CalendarEvent triggers
CreateAt(CalendarEvent),
}
impl std::str::FromStr for MediaSetPolicy {
type Err = Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
if s == "continue" {
return Ok(MediaSetPolicy::ContinueCurrent);
}
if s == "always" {
return Ok(MediaSetPolicy::AlwaysCreate);
}
let event = s.parse()?;
Ok(MediaSetPolicy::CreateAt(event))
}
}
pub const MEDIA_RETENTION_POLICY_FORMAT: ApiStringFormat = ApiStringFormat::VerifyFn(|s| {
RetentionPolicy::from_str(s)?;
Ok(())
});
pub const MEDIA_RETENTION_POLICY_SCHEMA: Schema =
StringSchema::new("Media retention policy ('overwrite', 'keep', or time span).")
.format(&MEDIA_RETENTION_POLICY_FORMAT)
.schema();
/// Media retention Policy
pub enum RetentionPolicy {
/// Always overwrite media
OverwriteAlways,
/// Protect data for the timespan specified
ProtectFor(TimeSpan),
/// Never overwrite data
KeepForever,
}
impl std::str::FromStr for RetentionPolicy {
type Err = Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
if s == "overwrite" {
return Ok(RetentionPolicy::OverwriteAlways);
}
if s == "keep" {
return Ok(RetentionPolicy::KeepForever);
}
let time_span = s.parse()?;
Ok(RetentionPolicy::ProtectFor(time_span))
}
}
#[api(
properties: {
name: {
schema: MEDIA_POOL_NAME_SCHEMA,
},
allocation: {
schema: MEDIA_SET_ALLOCATION_POLICY_SCHEMA,
optional: true,
},
retention: {
schema: MEDIA_RETENTION_POLICY_SCHEMA,
optional: true,
},
template: {
schema: MEDIA_SET_NAMING_TEMPLATE_SCHEMA,
optional: true,
},
encrypt: {
schema: TAPE_ENCRYPTION_KEY_FINGERPRINT_SCHEMA,
optional: true,
},
comment: {
optional: true,
schema: SINGLE_LINE_COMMENT_SCHEMA,
},
},
)]
#[derive(Serialize, Deserialize, Updater)]
/// Media pool configuration
pub struct MediaPoolConfig {
/// The pool name
#[updater(skip)]
pub name: String,
/// Media Set allocation policy
#[serde(skip_serializing_if = "Option::is_none")]
pub allocation: Option<String>,
/// Media retention policy
#[serde(skip_serializing_if = "Option::is_none")]
pub retention: Option<String>,
/// Media set naming template (default "%c")
///
/// The template is UTF8 text, and can include strftime time
/// format specifications.
#[serde(skip_serializing_if = "Option::is_none")]
pub template: Option<String>,
/// Encryption key fingerprint
///
/// If set, encrypt all data using the specified key.
#[serde(skip_serializing_if = "Option::is_none")]
pub encrypt: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub comment: Option<String>,
}

View File

@ -0,0 +1,21 @@
use serde::{Deserialize, Serialize};
use proxmox_schema::api;
#[api()]
/// Media status
#[derive(Debug, PartialEq, Eq, Copy, Clone, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
/// Media Status
pub enum MediaStatus {
/// Media is ready to be written
Writable,
/// Media is full (contains data)
Full,
/// Media is marked as unknown, needs rescan
Unknown,
/// Media is marked as damaged
Damaged,
/// Media is marked as retired
Retired,
}

View File

@ -0,0 +1,92 @@
//! Types for tape backup API
mod device;
pub use device::*;
mod changer;
pub use changer::*;
mod drive;
pub use drive::*;
mod media_pool;
pub use media_pool::*;
mod media_status;
pub use media_status::*;
mod media_location;
pub use media_location::*;
mod media;
pub use media::*;
use const_format::concatcp;
use serde::{Deserialize, Serialize};
use proxmox_schema::{api, const_regex, ApiStringFormat, Schema, StringSchema};
use proxmox_uuid::Uuid;
use crate::{
BackupType, BACKUP_ID_SCHEMA, BACKUP_NS_PATH_RE, FINGERPRINT_SHA256_FORMAT,
PROXMOX_SAFE_ID_REGEX_STR, SNAPSHOT_PATH_REGEX_STR,
};
const_regex! {
pub TAPE_RESTORE_SNAPSHOT_REGEX = concatcp!(r"^", PROXMOX_SAFE_ID_REGEX_STR, r":(?:", BACKUP_NS_PATH_RE,")?", SNAPSHOT_PATH_REGEX_STR, r"$");
}
pub const TAPE_RESTORE_SNAPSHOT_FORMAT: ApiStringFormat =
ApiStringFormat::Pattern(&TAPE_RESTORE_SNAPSHOT_REGEX);
pub const TAPE_ENCRYPTION_KEY_FINGERPRINT_SCHEMA: Schema =
StringSchema::new("Tape encryption key fingerprint (sha256).")
.format(&FINGERPRINT_SHA256_FORMAT)
.schema();
pub const TAPE_RESTORE_SNAPSHOT_SCHEMA: Schema =
StringSchema::new("A snapshot in the format: 'store:[ns/namespace/...]type/id/time")
.format(&TAPE_RESTORE_SNAPSHOT_FORMAT)
.type_text("store:[ns/namespace/...]type/id/time")
.schema();
#[api(
properties: {
pool: {
schema: MEDIA_POOL_NAME_SCHEMA,
optional: true,
},
"label-text": {
schema: MEDIA_LABEL_SCHEMA,
optional: true,
},
"media": {
schema: MEDIA_UUID_SCHEMA,
optional: true,
},
"media-set": {
schema: MEDIA_SET_UUID_SCHEMA,
optional: true,
},
"backup-type": {
type: BackupType,
optional: true,
},
"backup-id": {
schema: BACKUP_ID_SCHEMA,
optional: true,
},
},
)]
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "kebab-case")]
/// Content list filter parameters
pub struct MediaContentListFilter {
pub pool: Option<String>,
pub label_text: Option<String>,
pub media: Option<Uuid>,
pub media_set: Option<Uuid>,
pub backup_type: Option<BackupType>,
pub backup_id: Option<String>,
}

View File

@ -0,0 +1,141 @@
use serde::{Deserialize, Serialize};
use proxmox_human_byte::HumanByte;
use proxmox_schema::{api, IntegerSchema, Schema, StringSchema, Updater};
use crate::{
CIDR_SCHEMA, DAILY_DURATION_FORMAT, PROXMOX_SAFE_ID_FORMAT, SINGLE_LINE_COMMENT_SCHEMA,
};
pub const TRAFFIC_CONTROL_TIMEFRAME_SCHEMA: Schema =
StringSchema::new("Timeframe to specify when the rule is active.")
.format(&DAILY_DURATION_FORMAT)
.schema();
pub const TRAFFIC_CONTROL_ID_SCHEMA: Schema = StringSchema::new("Rule ID.")
.format(&PROXMOX_SAFE_ID_FORMAT)
.min_length(3)
.max_length(32)
.schema();
pub const TRAFFIC_CONTROL_RATE_SCHEMA: Schema =
IntegerSchema::new("Rate limit (for Token bucket filter) in bytes/second.")
.minimum(100_000)
.schema();
pub const TRAFFIC_CONTROL_BURST_SCHEMA: Schema =
IntegerSchema::new("Size of the token bucket (for Token bucket filter) in bytes.")
.minimum(1000)
.schema();
#[api(
properties: {
"rate-in": {
type: HumanByte,
optional: true,
},
"burst-in": {
type: HumanByte,
optional: true,
},
"rate-out": {
type: HumanByte,
optional: true,
},
"burst-out": {
type: HumanByte,
optional: true,
},
},
)]
#[derive(Serialize, Deserialize, Default, Clone, Updater, PartialEq)]
#[serde(rename_all = "kebab-case")]
/// Rate Limit Configuration
pub struct RateLimitConfig {
#[serde(skip_serializing_if = "Option::is_none")]
pub rate_in: Option<HumanByte>,
#[serde(skip_serializing_if = "Option::is_none")]
pub burst_in: Option<HumanByte>,
#[serde(skip_serializing_if = "Option::is_none")]
pub rate_out: Option<HumanByte>,
#[serde(skip_serializing_if = "Option::is_none")]
pub burst_out: Option<HumanByte>,
}
impl RateLimitConfig {
pub fn with_same_inout(rate: Option<HumanByte>, burst: Option<HumanByte>) -> Self {
Self {
rate_in: rate,
burst_in: burst,
rate_out: rate,
burst_out: burst,
}
}
}
#[api(
properties: {
name: {
schema: TRAFFIC_CONTROL_ID_SCHEMA,
},
comment: {
optional: true,
schema: SINGLE_LINE_COMMENT_SCHEMA,
},
limit: {
type: RateLimitConfig,
},
network: {
type: Array,
items: {
schema: CIDR_SCHEMA,
},
},
timeframe: {
type: Array,
items: {
schema: TRAFFIC_CONTROL_TIMEFRAME_SCHEMA,
},
optional: true,
},
},
)]
#[derive(Clone, Serialize, Deserialize, PartialEq, Updater)]
#[serde(rename_all = "kebab-case")]
/// Traffic control rule
pub struct TrafficControlRule {
#[updater(skip)]
pub name: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub comment: Option<String>,
/// Rule applies to Source IPs within this networks
pub network: Vec<String>,
#[serde(flatten)]
pub limit: RateLimitConfig,
// fixme: expose this?
// /// Bandwidth is shared across all connections
// #[serde(skip_serializing_if="Option::is_none")]
// pub shared: Option<bool>,
/// Enable the rule at specific times
#[serde(skip_serializing_if = "Option::is_none")]
pub timeframe: Option<Vec<String>>,
}
#[api(
properties: {
config: {
type: TrafficControlRule,
},
},
)]
#[derive(Clone, Serialize, Deserialize, PartialEq)]
#[serde(rename_all = "kebab-case")]
/// Traffic control rule config with current rates
pub struct TrafficControlCurrentRate {
#[serde(flatten)]
pub config: TrafficControlRule,
/// Current ingress rate in bytes/second
pub cur_rate_in: u64,
/// Current egress rate in bytes/second
pub cur_rate_out: u64,
}

226
pbs-api-types/src/user.rs Normal file
View File

@ -0,0 +1,226 @@
use serde::{Deserialize, Serialize};
use proxmox_schema::{api, BooleanSchema, IntegerSchema, Schema, StringSchema, Updater};
use super::userid::{Authid, Userid, PROXMOX_TOKEN_ID_SCHEMA};
use super::{SINGLE_LINE_COMMENT_FORMAT, SINGLE_LINE_COMMENT_SCHEMA};
pub const ENABLE_USER_SCHEMA: Schema = BooleanSchema::new(
"Enable the account (default). You can set this to '0' to disable the account.",
)
.default(true)
.schema();
pub const EXPIRE_USER_SCHEMA: Schema = IntegerSchema::new(
"Account expiration date (seconds since epoch). '0' means no expiration date.",
)
.default(0)
.minimum(0)
.schema();
pub const FIRST_NAME_SCHEMA: Schema = StringSchema::new("First name.")
.format(&SINGLE_LINE_COMMENT_FORMAT)
.min_length(2)
.max_length(64)
.schema();
pub const LAST_NAME_SCHEMA: Schema = StringSchema::new("Last name.")
.format(&SINGLE_LINE_COMMENT_FORMAT)
.min_length(2)
.max_length(64)
.schema();
pub const EMAIL_SCHEMA: Schema = StringSchema::new("E-Mail Address.")
.format(&SINGLE_LINE_COMMENT_FORMAT)
.min_length(2)
.max_length(64)
.schema();
#[api(
properties: {
userid: {
type: Userid,
},
comment: {
optional: true,
schema: SINGLE_LINE_COMMENT_SCHEMA,
},
enable: {
optional: true,
schema: ENABLE_USER_SCHEMA,
},
expire: {
optional: true,
schema: EXPIRE_USER_SCHEMA,
},
firstname: {
optional: true,
schema: FIRST_NAME_SCHEMA,
},
lastname: {
schema: LAST_NAME_SCHEMA,
optional: true,
},
email: {
schema: EMAIL_SCHEMA,
optional: true,
},
tokens: {
type: Array,
optional: true,
description: "List of user's API tokens.",
items: {
type: ApiToken
},
},
"totp-locked": {
type: bool,
optional: true,
default: false,
description: "True if the user is currently locked out of TOTP factors",
},
"tfa-locked-until": {
optional: true,
description: "Contains a timestamp until when a user is locked out of 2nd factors",
},
}
)]
#[derive(Serialize, Deserialize, Clone, PartialEq)]
#[serde(rename_all = "kebab-case")]
/// User properties with added list of ApiTokens
pub struct UserWithTokens {
pub userid: Userid,
#[serde(skip_serializing_if = "Option::is_none")]
pub comment: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub enable: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")]
pub expire: Option<i64>,
#[serde(skip_serializing_if = "Option::is_none")]
pub firstname: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub lastname: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub email: Option<String>,
#[serde(skip_serializing_if = "Vec::is_empty", default)]
pub tokens: Vec<ApiToken>,
#[serde(skip_serializing_if = "bool_is_false", default)]
pub totp_locked: bool,
#[serde(skip_serializing_if = "Option::is_none")]
pub tfa_locked_until: Option<i64>,
}
fn bool_is_false(b: &bool) -> bool {
!b
}
#[api(
properties: {
tokenid: {
schema: PROXMOX_TOKEN_ID_SCHEMA,
},
comment: {
optional: true,
schema: SINGLE_LINE_COMMENT_SCHEMA,
},
enable: {
optional: true,
schema: ENABLE_USER_SCHEMA,
},
expire: {
optional: true,
schema: EXPIRE_USER_SCHEMA,
},
}
)]
#[derive(Serialize, Deserialize, Clone, PartialEq)]
/// ApiToken properties.
pub struct ApiToken {
pub tokenid: Authid,
#[serde(skip_serializing_if = "Option::is_none")]
pub comment: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub enable: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")]
pub expire: Option<i64>,
}
impl ApiToken {
pub fn is_active(&self) -> bool {
if !self.enable.unwrap_or(true) {
return false;
}
if let Some(expire) = self.expire {
let now = proxmox_time::epoch_i64();
if expire > 0 && expire <= now {
return false;
}
}
true
}
}
#[api(
properties: {
userid: {
type: Userid,
},
comment: {
optional: true,
schema: SINGLE_LINE_COMMENT_SCHEMA,
},
enable: {
optional: true,
schema: ENABLE_USER_SCHEMA,
},
expire: {
optional: true,
schema: EXPIRE_USER_SCHEMA,
},
firstname: {
optional: true,
schema: FIRST_NAME_SCHEMA,
},
lastname: {
schema: LAST_NAME_SCHEMA,
optional: true,
},
email: {
schema: EMAIL_SCHEMA,
optional: true,
},
}
)]
#[derive(Serialize, Deserialize, Updater, PartialEq, Eq)]
/// User properties.
pub struct User {
#[updater(skip)]
pub userid: Userid,
#[serde(skip_serializing_if = "Option::is_none")]
pub comment: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub enable: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")]
pub expire: Option<i64>,
#[serde(skip_serializing_if = "Option::is_none")]
pub firstname: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub lastname: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub email: Option<String>,
}
impl User {
pub fn is_active(&self) -> bool {
if !self.enable.unwrap_or(true) {
return false;
}
if let Some(expire) = self.expire {
let now = proxmox_time::epoch_i64();
if expire > 0 && expire <= now {
return false;
}
}
true
}
}

78
pbs-api-types/src/zfs.rs Normal file
View File

@ -0,0 +1,78 @@
use serde::{Deserialize, Serialize};
use proxmox_schema::*;
const_regex! {
pub ZPOOL_NAME_REGEX = r"^[a-zA-Z][a-z0-9A-Z\-_.:]+$";
}
pub const ZFS_ASHIFT_SCHEMA: Schema = IntegerSchema::new("Pool sector size exponent.")
.minimum(9)
.maximum(16)
.default(12)
.schema();
pub const ZPOOL_NAME_SCHEMA: Schema = StringSchema::new("ZFS Pool Name")
.format(&ApiStringFormat::Pattern(&ZPOOL_NAME_REGEX))
.schema();
#[api(default: "On")]
#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
/// The ZFS compression algorithm to use.
pub enum ZfsCompressionType {
/// Gnu Zip
Gzip,
/// LZ4
Lz4,
/// LZJB
Lzjb,
/// ZLE
Zle,
/// ZStd
ZStd,
/// Enable compression using the default algorithm.
On,
/// Disable compression.
Off,
}
#[api()]
#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
/// The ZFS RAID level to use.
pub enum ZfsRaidLevel {
/// Single Disk
Single,
/// Mirror
Mirror,
/// Raid10
Raid10,
/// RaidZ
RaidZ,
/// RaidZ2
RaidZ2,
/// RaidZ3
RaidZ3,
}
#[api()]
#[derive(Debug, Serialize, Deserialize)]
#[serde(rename_all = "kebab-case")]
/// zpool list item
pub struct ZpoolListItem {
/// zpool name
pub name: String,
/// Health
pub health: String,
/// Total size
pub size: u64,
/// Used size
pub alloc: u64,
/// Free space
pub free: u64,
/// ZFS fragnentation level
pub frag: u64,
/// ZFS deduplication ratio
pub dedup: f64,
}

View File

@ -0,0 +1,76 @@
use pbs_api_types::{BackupGroup, BackupType, GroupFilter};
use std::str::FromStr;
#[test]
fn test_no_filters() {
let group_filters = vec![];
let do_backup = [
"vm/101", "vm/102", "vm/103", "vm/104", "vm/105", "vm/106", "vm/107", "vm/108", "vm/109",
];
for id in do_backup {
assert!(BackupGroup::new(BackupType::Vm, id).apply_filters(&group_filters));
}
}
#[test]
fn test_include_filters() {
let group_filters = vec![GroupFilter::from_str("regex:.*10[2-8]").unwrap()];
let do_backup = [
"vm/102", "vm/103", "vm/104", "vm/105", "vm/106", "vm/107", "vm/108",
];
let dont_backup = ["vm/101", "vm/109"];
for id in do_backup {
assert!(BackupGroup::new(BackupType::Vm, id).apply_filters(&group_filters));
}
for id in dont_backup {
assert!(!BackupGroup::new(BackupType::Vm, id).apply_filters(&group_filters));
}
}
#[test]
fn test_exclude_filters() {
let group_filters = [
GroupFilter::from_str("exclude:regex:.*10[1-3]").unwrap(),
GroupFilter::from_str("exclude:regex:.*10[5-7]").unwrap(),
];
let do_backup = ["vm/104", "vm/108", "vm/109"];
let dont_backup = ["vm/101", "vm/102", "vm/103", "vm/105", "vm/106", "vm/107"];
for id in do_backup {
assert!(BackupGroup::new(BackupType::Vm, id).apply_filters(&group_filters));
}
for id in dont_backup {
assert!(!BackupGroup::new(BackupType::Vm, id).apply_filters(&group_filters));
}
}
#[test]
fn test_include_and_exclude_filters() {
let group_filters = [
GroupFilter::from_str("exclude:regex:.*10[1-3]").unwrap(),
GroupFilter::from_str("regex:.*10[2-8]").unwrap(),
GroupFilter::from_str("exclude:regex:.*10[5-7]").unwrap(),
];
let do_backup = ["vm/104", "vm/108"];
let dont_backup = [
"vm/101", "vm/102", "vm/103", "vm/105", "vm/106", "vm/107", "vm/109",
];
for id in do_backup {
assert!(BackupGroup::new(BackupType::Vm, id).apply_filters(&group_filters));
}
for id in dont_backup {
assert!(!BackupGroup::new(BackupType::Vm, id).apply_filters(&group_filters));
}
}

View File

@ -1,11 +1,5 @@
//! Exports configuration data from the build system
pub const PROXMOX_BACKUP_CRATE_VERSION: &str = env!("CARGO_PKG_VERSION");
// TODO: clean-up, drop the RELEASE one, should not be required on its own and if it would be just
// the X.Y part, also add the Debian package revision (extracted through build.rs) in an existing
// or new constant.
pub const PROXMOX_PKG_VERSION: &str = concat!(
env!("CARGO_PKG_VERSION_MAJOR"),
".",
@ -98,8 +92,6 @@ pub const PROXMOX_BACKUP_KERNEL_FN: &str =
pub const PROXMOX_BACKUP_SUBSCRIPTION_FN: &str = configdir!("/subscription");
pub const APT_PKG_STATE_FN: &str = concat!(PROXMOX_BACKUP_STATE_DIR_M!(), "/pkg-state.json");
/// Prepend configuration directory to a file name
///
/// This is a simply way to get the full path for configuration files.

View File

@ -12,8 +12,11 @@ bytes.workspace = true
futures.workspace = true
h2.workspace = true
hex.workspace = true
http.workspace = true
hyper.workspace = true
lazy_static.workspace = true
libc.workspace = true
log.workspace = true
nix.workspace = true
openssl.workspace = true
percent-encoding.workspace = true
@ -27,7 +30,6 @@ tokio = { workspace = true, features = [ "fs", "signal" ] }
tokio-stream.workspace = true
tower-service.workspace = true
xdg.workspace = true
hickory-resolver.workspace = true
pathpatterns.workspace = true
@ -37,7 +39,7 @@ proxmox-compression.workspace = true
proxmox-http = { workspace = true, features = [ "rate-limiter" ] }
proxmox-human-byte.workspace = true
proxmox-io = { workspace = true, features = [ "tokio" ] }
proxmox-log = { workspace = true }
proxmox-lang.workspace = true
proxmox-router = { workspace = true, features = [ "cli", "server" ] }
proxmox-schema.workspace = true
proxmox-sys.workspace = true
@ -46,5 +48,6 @@ proxmox-time.workspace = true
pxar.workspace = true
pbs-api-types.workspace = true
pbs-buildcfg.workspace = true
pbs-datastore.workspace = true
pbs-tools.workspace = true

View File

@ -1,17 +1,19 @@
use anyhow::{format_err, Error};
use std::fs::File;
use std::io::{Seek, SeekFrom, Write};
use std::os::unix::fs::OpenOptionsExt;
use std::sync::Arc;
use futures::future::AbortHandle;
use serde_json::{json, Value};
use pbs_api_types::{BackupArchiveName, BackupDir, BackupNamespace, MANIFEST_BLOB_NAME};
use pbs_api_types::{BackupDir, BackupNamespace};
use pbs_datastore::data_blob::DataBlob;
use pbs_datastore::data_blob_reader::DataBlobReader;
use pbs_datastore::dynamic_index::DynamicIndexReader;
use pbs_datastore::fixed_index::FixedIndexReader;
use pbs_datastore::index::IndexFile;
use pbs_datastore::manifest::MANIFEST_BLOB_NAME;
use pbs_datastore::{BackupManifest, PROXMOX_BACKUP_READER_PROTOCOL_ID_V1};
use pbs_tools::crypt_config::CryptConfig;
use pbs_tools::sha::sha256;
@ -126,8 +128,7 @@ impl BackupReader {
/// The manifest signature is verified if we have a crypt_config.
pub async fn download_manifest(&self) -> Result<(BackupManifest, Vec<u8>), Error> {
let mut raw_data = Vec::with_capacity(64 * 1024);
self.download(MANIFEST_BLOB_NAME.as_ref(), &mut raw_data)
.await?;
self.download(MANIFEST_BLOB_NAME, &mut raw_data).await?;
let blob = DataBlob::load_from_reader(&mut &raw_data[..])?;
// no expected digest available
let data = blob.decode(None, None)?;
@ -140,16 +141,20 @@ impl BackupReader {
/// Download a .blob file
///
/// This creates a temporary file (See [`crate::tools::create_tmp_file`] for
/// details). The data is verified using the provided manifest.
/// This creates a temporary file in /tmp (using O_TMPFILE). The data is verified using
/// the provided manifest.
pub async fn download_blob(
&self,
manifest: &BackupManifest,
name: &BackupArchiveName,
name: &str,
) -> Result<DataBlobReader<'_, File>, Error> {
let mut tmpfile = crate::tools::create_tmp_file()?;
let mut tmpfile = std::fs::OpenOptions::new()
.write(true)
.read(true)
.custom_flags(libc::O_TMPFILE)
.open("/tmp")?;
self.download(name.as_ref(), &mut tmpfile).await?;
self.download(name, &mut tmpfile).await?;
tmpfile.seek(SeekFrom::Start(0))?;
let (csum, size) = sha256(&mut tmpfile)?;
@ -162,16 +167,20 @@ impl BackupReader {
/// Download dynamic index file
///
/// This creates a temporary file (See [`crate::tools::create_tmp_file`] for
/// details). The index is verified using the provided manifest.
/// This creates a temporary file in /tmp (using O_TMPFILE). The index is verified using
/// the provided manifest.
pub async fn download_dynamic_index(
&self,
manifest: &BackupManifest,
name: &BackupArchiveName,
name: &str,
) -> Result<DynamicIndexReader, Error> {
let mut tmpfile = crate::tools::create_tmp_file()?;
let mut tmpfile = std::fs::OpenOptions::new()
.write(true)
.read(true)
.custom_flags(libc::O_TMPFILE)
.open("/tmp")?;
self.download(name.as_ref(), &mut tmpfile).await?;
self.download(name, &mut tmpfile).await?;
let index = DynamicIndexReader::new(tmpfile)
.map_err(|err| format_err!("unable to read dynamic index '{}' - {}", name, err))?;
@ -185,16 +194,20 @@ impl BackupReader {
/// Download fixed index file
///
/// This creates a temporary file (See [`crate::tools::create_tmp_file`] for
/// details). The index is verified using the provided manifest.
/// This creates a temporary file in /tmp (using O_TMPFILE). The index is verified using
/// the provided manifest.
pub async fn download_fixed_index(
&self,
manifest: &BackupManifest,
name: &BackupArchiveName,
name: &str,
) -> Result<FixedIndexReader, Error> {
let mut tmpfile = crate::tools::create_tmp_file()?;
let mut tmpfile = std::fs::OpenOptions::new()
.write(true)
.read(true)
.custom_flags(libc::O_TMPFILE)
.open("/tmp")?;
self.download(name.as_ref(), &mut tmpfile).await?;
self.download(name, &mut tmpfile).await?;
let index = FixedIndexReader::new(tmpfile)
.map_err(|err| format_err!("unable to read fixed index '{}' - {}", name, err))?;

View File

@ -1,5 +1,4 @@
use anyhow::{bail, Error};
use serde::{Deserialize, Serialize};
use proxmox_schema::*;
@ -7,13 +6,10 @@ const_regex! {
BACKUPSPEC_REGEX = r"^([a-zA-Z0-9_-]+\.(pxar|img|conf|log)):(.+)$";
}
pub const BACKUP_SOURCE_SCHEMA: Schema = StringSchema::new(
"Backup source specification ([<archive-name>.<type>:<source-path>]), the \
'archive-name' must contain alphanumerics, hyphens and underscores only. \
The 'type' must be either 'pxar', 'img', 'conf' or 'log'.",
)
.format(&ApiStringFormat::Pattern(&BACKUPSPEC_REGEX))
.schema();
pub const BACKUP_SOURCE_SCHEMA: Schema =
StringSchema::new("Backup source specification ([<label>:<path>]).")
.format(&ApiStringFormat::Pattern(&BACKUPSPEC_REGEX))
.schema();
pub enum BackupSpecificationType {
PXAR,
@ -38,7 +34,7 @@ pub fn parse_backup_specification(value: &str) -> Result<BackupSpecification, Er
"img" => BackupSpecificationType::IMAGE,
"conf" => BackupSpecificationType::CONFIG,
"log" => BackupSpecificationType::LOGFILE,
_ => bail!("unknown backup source type '{extension}'"),
_ => bail!("unknown backup source type '{}'", extension),
};
return Ok(BackupSpecification {
archive_name,
@ -47,30 +43,5 @@ pub fn parse_backup_specification(value: &str) -> Result<BackupSpecification, Er
});
}
bail!("unable to parse backup source specification '{value}'");
}
#[api]
#[derive(Default, Deserialize, Serialize)]
#[serde(rename_all = "lowercase")]
/// Mode to detect file changes since last backup run
pub enum BackupDetectionMode {
/// Encode backup as self contained pxar archive
#[default]
Legacy,
/// Split backup mode, re-encode payload data
Data,
/// Compare metadata, reuse payload chunks if metadata unchanged
Metadata,
}
impl BackupDetectionMode {
/// Selected mode is data based file change detection with split meta/payload streams
pub fn is_data(&self) -> bool {
matches!(self, Self::Data)
}
/// Selected mode is metadata based file change detection
pub fn is_metadata(&self) -> bool {
matches!(self, Self::Metadata)
}
bail!("unable to parse backup source specification '{}'", value);
}

View File

@ -1,119 +0,0 @@
//! Implements counters to generate statistics for log outputs during uploads with backup writer
use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering};
use std::sync::Arc;
use std::time::Duration;
use crate::pxar::create::ReusableDynamicEntry;
/// Basic backup run statistics and archive checksum
pub struct BackupStats {
pub size: u64,
pub csum: [u8; 32],
pub duration: Duration,
pub chunk_count: u64,
}
/// Extended backup run statistics and archive checksum
pub(crate) struct UploadStats {
pub(crate) chunk_count: usize,
pub(crate) chunk_reused: usize,
pub(crate) chunk_injected: usize,
pub(crate) size: usize,
pub(crate) size_reused: usize,
pub(crate) size_injected: usize,
pub(crate) size_compressed: usize,
pub(crate) duration: Duration,
pub(crate) csum: [u8; 32],
}
impl UploadStats {
/// Convert the upload stats to the more concise [`BackupStats`]
#[inline(always)]
pub(crate) fn to_backup_stats(&self) -> BackupStats {
BackupStats {
chunk_count: self.chunk_count as u64,
size: self.size as u64,
duration: self.duration,
csum: self.csum,
}
}
}
/// Atomic counters for accounting upload stream progress information
#[derive(Clone)]
pub(crate) struct UploadCounters {
injected_chunk_count: Arc<AtomicUsize>,
known_chunk_count: Arc<AtomicUsize>,
total_chunk_count: Arc<AtomicUsize>,
compressed_stream_len: Arc<AtomicU64>,
injected_stream_len: Arc<AtomicUsize>,
reused_stream_len: Arc<AtomicUsize>,
total_stream_len: Arc<AtomicUsize>,
}
impl UploadCounters {
/// Create and zero init new upload counters
pub(crate) fn new() -> Self {
Self {
total_chunk_count: Arc::new(AtomicUsize::new(0)),
injected_chunk_count: Arc::new(AtomicUsize::new(0)),
known_chunk_count: Arc::new(AtomicUsize::new(0)),
compressed_stream_len: Arc::new(AtomicU64::new(0)),
injected_stream_len: Arc::new(AtomicUsize::new(0)),
reused_stream_len: Arc::new(AtomicUsize::new(0)),
total_stream_len: Arc::new(AtomicUsize::new(0)),
}
}
#[inline(always)]
pub(crate) fn add_known_chunk(&mut self, chunk_len: usize) -> usize {
self.known_chunk_count.fetch_add(1, Ordering::SeqCst);
self.total_chunk_count.fetch_add(1, Ordering::SeqCst);
self.reused_stream_len
.fetch_add(chunk_len, Ordering::SeqCst);
self.total_stream_len.fetch_add(chunk_len, Ordering::SeqCst)
}
#[inline(always)]
pub(crate) fn add_new_chunk(&mut self, chunk_len: usize, chunk_raw_size: u64) -> usize {
self.total_chunk_count.fetch_add(1, Ordering::SeqCst);
self.compressed_stream_len
.fetch_add(chunk_raw_size, Ordering::SeqCst);
self.total_stream_len.fetch_add(chunk_len, Ordering::SeqCst)
}
#[inline(always)]
pub(crate) fn add_injected_chunk(&mut self, chunk: &ReusableDynamicEntry) -> usize {
self.total_chunk_count.fetch_add(1, Ordering::SeqCst);
self.injected_chunk_count.fetch_add(1, Ordering::SeqCst);
self.reused_stream_len
.fetch_add(chunk.size() as usize, Ordering::SeqCst);
self.injected_stream_len
.fetch_add(chunk.size() as usize, Ordering::SeqCst);
self.total_stream_len
.fetch_add(chunk.size() as usize, Ordering::SeqCst)
}
#[inline(always)]
pub(crate) fn total_stream_len(&self) -> usize {
self.total_stream_len.load(Ordering::SeqCst)
}
/// Convert the counters to [`UploadStats`], including given archive checksum and runtime.
#[inline(always)]
pub(crate) fn to_upload_stats(&self, csum: [u8; 32], duration: Duration) -> UploadStats {
UploadStats {
chunk_count: self.total_chunk_count.load(Ordering::SeqCst),
chunk_reused: self.known_chunk_count.load(Ordering::SeqCst),
chunk_injected: self.injected_chunk_count.load(Ordering::SeqCst),
size: self.total_stream_len.load(Ordering::SeqCst),
size_reused: self.reused_stream_len.load(Ordering::SeqCst),
size_injected: self.injected_stream_len.load(Ordering::SeqCst),
size_compressed: self.compressed_stream_len.load(Ordering::SeqCst) as usize,
duration,
csum,
}
}
}

View File

@ -1,35 +1,28 @@
use std::collections::HashSet;
use std::future::Future;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::os::unix::fs::OpenOptionsExt;
use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering};
use std::sync::{Arc, Mutex};
use std::time::Instant;
use anyhow::{bail, format_err, Error};
use futures::future::{self, AbortHandle, Either, FutureExt, TryFutureExt};
use futures::stream::{Stream, StreamExt, TryStreamExt};
use openssl::sha::Sha256;
use serde_json::{json, Value};
use tokio::io::AsyncReadExt;
use tokio::sync::{mpsc, oneshot};
use tokio_stream::wrappers::ReceiverStream;
use pbs_api_types::{
ArchiveType, BackupArchiveName, BackupDir, BackupNamespace, CATALOG_NAME, MANIFEST_BLOB_NAME,
};
use pbs_api_types::{BackupDir, BackupNamespace};
use pbs_datastore::data_blob::{ChunkInfo, DataBlob, DataChunkBuilder};
use pbs_datastore::dynamic_index::DynamicIndexReader;
use pbs_datastore::fixed_index::FixedIndexReader;
use pbs_datastore::index::IndexFile;
use pbs_datastore::manifest::BackupManifest;
use pbs_datastore::PROXMOX_BACKUP_PROTOCOL_ID_V1;
use pbs_datastore::manifest::{ArchiveType, BackupManifest, MANIFEST_BLOB_NAME};
use pbs_datastore::{CATALOG_NAME, PROXMOX_BACKUP_PROTOCOL_ID_V1};
use pbs_tools::crypt_config::CryptConfig;
use proxmox_human_byte::HumanByte;
use proxmox_log::{debug, enabled, info, trace, warn, Level};
use proxmox_time::TimeSpan;
use super::backup_stats::{BackupStats, UploadCounters, UploadStats};
use super::inject_reused_chunks::{InjectChunks, InjectReusedChunks, InjectedChunksInfo};
use super::merge_known_chunks::{MergeKnownChunks, MergedChunkInfo};
use super::{H2Client, HttpClient};
@ -46,6 +39,11 @@ impl Drop for BackupWriter {
}
}
pub struct BackupStats {
pub size: u64,
pub csum: [u8; 32],
}
/// Options for uploading blobs/streams to the server
#[derive(Default, Clone)]
pub struct UploadOptions {
@ -55,12 +53,17 @@ pub struct UploadOptions {
pub fixed_size: Option<u64>,
}
struct ChunkUploadResponse {
future: h2::legacy::client::ResponseFuture,
struct UploadStats {
chunk_count: usize,
chunk_reused: usize,
size: usize,
size_reused: usize,
size_compressed: usize,
duration: std::time::Duration,
csum: [u8; 32],
}
type UploadQueueSender = mpsc::Sender<(MergedChunkInfo, Option<ChunkUploadResponse>)>;
type UploadQueueSender = mpsc::Sender<(MergedChunkInfo, Option<h2::client::ResponseFuture>)>;
type UploadResultReceiver = oneshot::Receiver<Result<(), Error>>;
impl BackupWriter {
@ -143,7 +146,7 @@ impl BackupWriter {
param: Option<Value>,
content_type: &str,
data: Vec<u8>,
) -> Result<h2::legacy::client::ResponseFuture, Error> {
) -> Result<h2::client::ResponseFuture, Error> {
let request =
H2Client::request_builder("localhost", method, path, param, Some(content_type))
.unwrap();
@ -183,7 +186,6 @@ impl BackupWriter {
mut reader: R,
file_name: &str,
) -> Result<BackupStats, Error> {
let start_time = Instant::now();
let mut raw_data = Vec::new();
// fixme: avoid loading into memory
reader.read_to_end(&mut raw_data)?;
@ -201,12 +203,7 @@ impl BackupWriter {
raw_data,
)
.await?;
Ok(BackupStats {
size,
csum,
duration: start_time.elapsed(),
chunk_count: 0,
})
Ok(BackupStats { size, csum })
}
pub async fn upload_blob_from_data(
@ -215,7 +212,6 @@ impl BackupWriter {
file_name: &str,
options: UploadOptions,
) -> Result<BackupStats, Error> {
let start_time = Instant::now();
let blob = match (options.encrypt, &self.crypt_config) {
(false, _) => DataBlob::encode(&data, None, options.compress)?,
(true, None) => bail!("requested encryption without a crypt config"),
@ -239,12 +235,7 @@ impl BackupWriter {
raw_data,
)
.await?;
Ok(BackupStats {
size,
csum,
duration: start_time.elapsed(),
chunk_count: 0,
})
Ok(BackupStats { size, csum })
}
pub async fn upload_blob_from_file<P: AsRef<std::path::Path>>(
@ -269,105 +260,11 @@ impl BackupWriter {
.await
}
/// Upload chunks and index
pub async fn upload_index_chunk_info(
&self,
archive_name: &BackupArchiveName,
stream: impl Stream<Item = Result<MergedChunkInfo, Error>>,
options: UploadOptions,
) -> Result<BackupStats, Error> {
let mut param = json!({ "archive-name": archive_name });
let prefix = if let Some(size) = options.fixed_size {
param["size"] = size.into();
"fixed"
} else {
"dynamic"
};
if options.encrypt && self.crypt_config.is_none() {
bail!("requested encryption without a crypt config");
}
let wid = self
.h2
.post(&format!("{prefix}_index"), Some(param))
.await?
.as_u64()
.unwrap();
let mut counters = UploadCounters::new();
let counters_readonly = counters.clone();
let is_fixed_chunk_size = prefix == "fixed";
let index_csum = Arc::new(Mutex::new(Some(Sha256::new())));
let index_csum_2 = index_csum.clone();
let stream = stream
.and_then(move |mut merged_chunk_info| {
match merged_chunk_info {
MergedChunkInfo::New(ref chunk_info) => {
let chunk_len = chunk_info.chunk_len;
let offset =
counters.add_new_chunk(chunk_len as usize, chunk_info.chunk.raw_size());
let end_offset = offset as u64 + chunk_len;
let mut guard = index_csum.lock().unwrap();
let csum = guard.as_mut().unwrap();
if !is_fixed_chunk_size {
csum.update(&end_offset.to_le_bytes());
}
csum.update(&chunk_info.digest);
}
MergedChunkInfo::Known(ref mut known_chunk_list) => {
for (chunk_len, digest) in known_chunk_list {
let offset = counters.add_known_chunk(*chunk_len as usize);
let end_offset = offset as u64 + *chunk_len;
let mut guard = index_csum.lock().unwrap();
let csum = guard.as_mut().unwrap();
if !is_fixed_chunk_size {
csum.update(&end_offset.to_le_bytes());
}
csum.update(digest);
// Replace size with offset, expected by further stream
*chunk_len = offset as u64;
}
}
}
future::ok(merged_chunk_info)
})
.merge_known_chunks();
let upload_stats = Self::upload_merged_chunk_stream(
self.h2.clone(),
wid,
archive_name,
prefix,
stream,
index_csum_2,
counters_readonly,
)
.await?;
let param = json!({
"wid": wid ,
"chunk-count": upload_stats.chunk_count,
"size": upload_stats.size,
"csum": hex::encode(upload_stats.csum),
});
let _value = self
.h2
.post(&format!("{prefix}_close"), Some(param))
.await?;
Ok(upload_stats.to_backup_stats())
}
pub async fn upload_stream(
&self,
archive_name: &BackupArchiveName,
archive_name: &str,
stream: impl Stream<Item = Result<bytes::BytesMut, Error>>,
options: UploadOptions,
injections: Option<std::sync::mpsc::Receiver<InjectChunks>>,
) -> Result<BackupStats, Error> {
let known_chunks = Arc::new(Mutex::new(HashSet::new()));
@ -390,13 +287,13 @@ impl BackupWriter {
if !manifest
.files()
.iter()
.any(|file| file.filename == archive_name.as_ref())
.any(|file| file.filename == archive_name)
{
info!("Previous manifest does not contain an archive called '{archive_name}', skipping download..");
log::info!("Previous manifest does not contain an archive called '{archive_name}', skipping download..");
} else {
// try, but ignore errors
match archive_name.archive_type() {
ArchiveType::FixedIndex => {
match ArchiveType::from_path(archive_name) {
Ok(ArchiveType::FixedIndex) => {
if let Err(err) = self
.download_previous_fixed_index(
archive_name,
@ -405,10 +302,10 @@ impl BackupWriter {
)
.await
{
warn!("Error downloading .fidx from previous manifest: {}", err);
log::warn!("Error downloading .fidx from previous manifest: {}", err);
}
}
ArchiveType::DynamicIndex => {
Ok(ArchiveType::DynamicIndex) => {
if let Err(err) = self
.download_previous_dynamic_index(
archive_name,
@ -417,7 +314,7 @@ impl BackupWriter {
)
.await
{
warn!("Error downloading .didx from previous manifest: {}", err);
log::warn!("Error downloading .didx from previous manifest: {}", err);
}
}
_ => { /* do nothing */ }
@ -444,59 +341,58 @@ impl BackupWriter {
None
},
options.compress,
injections,
archive_name,
)
.await?;
let size_dirty = upload_stats.size - upload_stats.size_reused;
let size: HumanByte = upload_stats.size.into();
let archive = if enabled!(Level::DEBUG) {
archive_name.to_string()
let archive = if log::log_enabled!(log::Level::Debug) {
archive_name
} else {
archive_name.without_type_extension()
pbs_tools::format::strip_server_file_extension(archive_name)
};
if upload_stats.chunk_injected > 0 {
info!(
"{archive}: reused {} from previous snapshot for unchanged files ({} chunks)",
HumanByte::from(upload_stats.size_injected),
upload_stats.chunk_injected,
);
}
if *archive_name != *CATALOG_NAME {
if archive_name != CATALOG_NAME {
let speed: HumanByte =
((size_dirty * 1_000_000) / (upload_stats.duration.as_micros() as usize)).into();
let size_dirty: HumanByte = size_dirty.into();
let size_compressed: HumanByte = upload_stats.size_compressed.into();
info!(
"{archive}: had to backup {size_dirty} of {size} (compressed {size_compressed}) in {:.2} s (average {speed}/s)",
log::info!(
"{}: had to backup {} of {} (compressed {}) in {:.2}s",
archive,
size_dirty,
size,
size_compressed,
upload_stats.duration.as_secs_f64()
);
log::info!("{}: average backup speed: {}/s", archive, speed);
} else {
info!("Uploaded backup catalog ({})", size);
log::info!("Uploaded backup catalog ({})", size);
}
if upload_stats.size_reused > 0 && upload_stats.size > 1024 * 1024 {
let reused_percent = upload_stats.size_reused as f64 * 100. / upload_stats.size as f64;
let reused: HumanByte = upload_stats.size_reused.into();
info!(
log::info!(
"{}: backup was done incrementally, reused {} ({:.1}%)",
archive, reused, reused_percent
archive,
reused,
reused_percent
);
}
if enabled!(Level::DEBUG) && upload_stats.chunk_count > 0 {
debug!(
if log::log_enabled!(log::Level::Debug) && upload_stats.chunk_count > 0 {
log::debug!(
"{}: Reused {} from {} chunks.",
archive, upload_stats.chunk_reused, upload_stats.chunk_count
archive,
upload_stats.chunk_reused,
upload_stats.chunk_count
);
debug!(
log::debug!(
"{}: Average chunk size was {}.",
archive,
HumanByte::from(upload_stats.size / upload_stats.chunk_count)
);
debug!(
log::debug!(
"{}: Average time per request: {} microseconds.",
archive,
(upload_stats.duration.as_micros()) / (upload_stats.chunk_count as u128)
@ -510,11 +406,14 @@ impl BackupWriter {
"csum": hex::encode(upload_stats.csum),
});
let _value = self.h2.post(&close_path, Some(param)).await?;
Ok(upload_stats.to_backup_stats())
Ok(BackupStats {
size: upload_stats.size as u64,
csum: upload_stats.csum,
})
}
fn response_queue() -> (
mpsc::Sender<h2::legacy::client::ResponseFuture>,
mpsc::Sender<h2::client::ResponseFuture>,
oneshot::Receiver<Result<(), Error>>,
) {
let (verify_queue_tx, verify_queue_rx) = mpsc::channel(100);
@ -537,11 +436,11 @@ impl BackupWriter {
tokio::spawn(
ReceiverStream::new(verify_queue_rx)
.map(Ok::<_, Error>)
.try_for_each(move |response: h2::legacy::client::ResponseFuture| {
.try_for_each(move |response: h2::client::ResponseFuture| {
response
.map_err(Error::from)
.and_then(H2Client::h2api_response)
.map_ok(move |result| debug!("RESPONSE: {:?}", result))
.map_ok(move |result| log::debug!("RESPONSE: {:?}", result))
.map_err(|err| format_err!("pipelined request failed: {}", err))
})
.map(|result| {
@ -556,7 +455,6 @@ impl BackupWriter {
h2: H2Client,
wid: u64,
path: String,
uploaded: Arc<AtomicUsize>,
) -> (UploadQueueSender, UploadResultReceiver) {
let (verify_queue_tx, verify_queue_rx) = mpsc::channel(64);
let (verify_result_tx, verify_result_rx) = oneshot::channel();
@ -565,21 +463,15 @@ impl BackupWriter {
tokio::spawn(
ReceiverStream::new(verify_queue_rx)
.map(Ok::<_, Error>)
.and_then(move |(merged_chunk_info, response): (MergedChunkInfo, Option<ChunkUploadResponse>)| {
.and_then(move |(merged_chunk_info, response): (MergedChunkInfo, Option<h2::client::ResponseFuture>)| {
match (response, merged_chunk_info) {
(Some(response), MergedChunkInfo::Known(list)) => {
Either::Left(
response
.future
.map_err(Error::from)
.and_then(H2Client::h2api_response)
.and_then({
let uploaded = uploaded.clone();
move |_result| {
// account for uploaded bytes for progress output
uploaded.fetch_add(response.size, Ordering::SeqCst);
.and_then(move |_result| {
future::ok(MergedChunkInfo::Known(list))
}
})
)
}
@ -599,7 +491,7 @@ impl BackupWriter {
digest_list.push(hex::encode(digest));
offset_list.push(offset);
}
debug!("append chunks list len ({})", digest_list.len());
log::debug!("append chunks list len ({})", digest_list.len());
let param = json!({ "wid": wid, "digest-list": digest_list, "offset-list": offset_list });
let request = H2Client::request_builder("localhost", "PUT", &path, None, Some("application/json")).unwrap();
let param_data = bytes::Bytes::from(param.to_string().into_bytes());
@ -627,11 +519,15 @@ impl BackupWriter {
pub async fn download_previous_fixed_index(
&self,
archive_name: &BackupArchiveName,
archive_name: &str,
manifest: &BackupManifest,
known_chunks: Arc<Mutex<HashSet<[u8; 32]>>>,
) -> Result<FixedIndexReader, Error> {
let mut tmpfile = crate::tools::create_tmp_file()?;
let mut tmpfile = std::fs::OpenOptions::new()
.write(true)
.read(true)
.custom_flags(libc::O_TMPFILE)
.open("/tmp")?;
let param = json!({ "archive-name": archive_name });
self.h2
@ -651,7 +547,7 @@ impl BackupWriter {
known_chunks.insert(*index.index_digest(i).unwrap());
}
debug!(
log::debug!(
"{}: known chunks list length is {}",
archive_name,
index.index_count()
@ -662,11 +558,15 @@ impl BackupWriter {
pub async fn download_previous_dynamic_index(
&self,
archive_name: &BackupArchiveName,
archive_name: &str,
manifest: &BackupManifest,
known_chunks: Arc<Mutex<HashSet<[u8; 32]>>>,
) -> Result<DynamicIndexReader, Error> {
let mut tmpfile = crate::tools::create_tmp_file()?;
let mut tmpfile = std::fs::OpenOptions::new()
.write(true)
.read(true)
.custom_flags(libc::O_TMPFILE)
.open("/tmp")?;
let param = json!({ "archive-name": archive_name });
self.h2
@ -685,7 +585,7 @@ impl BackupWriter {
known_chunks.insert(*index.index_digest(i).unwrap());
}
debug!(
log::debug!(
"{}: known chunks list length is {}",
archive_name,
index.index_count()
@ -709,7 +609,7 @@ impl BackupWriter {
pub async fn download_previous_manifest(&self) -> Result<BackupManifest, Error> {
let mut raw_data = Vec::with_capacity(64 * 1024);
let param = json!({ "archive-name": MANIFEST_BLOB_NAME.to_string() });
let param = json!({ "archive-name": MANIFEST_BLOB_NAME });
self.h2
.download("previous", Some(param), &mut raw_data)
.await?;
@ -736,39 +636,38 @@ impl BackupWriter {
known_chunks: Arc<Mutex<HashSet<[u8; 32]>>>,
crypt_config: Option<Arc<CryptConfig>>,
compress: bool,
injections: Option<std::sync::mpsc::Receiver<InjectChunks>>,
archive: &BackupArchiveName,
) -> impl Future<Output = Result<UploadStats, Error>> {
let mut counters = UploadCounters::new();
let counters_readonly = counters.clone();
let total_chunks = Arc::new(AtomicUsize::new(0));
let total_chunks2 = total_chunks.clone();
let known_chunk_count = Arc::new(AtomicUsize::new(0));
let known_chunk_count2 = known_chunk_count.clone();
let stream_len = Arc::new(AtomicUsize::new(0));
let stream_len2 = stream_len.clone();
let compressed_stream_len = Arc::new(AtomicU64::new(0));
let compressed_stream_len2 = compressed_stream_len.clone();
let reused_len = Arc::new(AtomicUsize::new(0));
let reused_len2 = reused_len.clone();
let append_chunk_path = format!("{}_index", prefix);
let upload_chunk_path = format!("{}_chunk", prefix);
let is_fixed_chunk_size = prefix == "fixed";
let (upload_queue, upload_result) =
Self::append_chunk_queue(h2.clone(), wid, append_chunk_path);
let start_time = std::time::Instant::now();
let index_csum = Arc::new(Mutex::new(Some(openssl::sha::Sha256::new())));
let index_csum_2 = index_csum.clone();
let stream = stream
.inject_reused_chunks(injections, counters.clone())
.and_then(move |chunk_info| match chunk_info {
InjectedChunksInfo::Known(chunks) => {
// account for injected chunks
let mut known = Vec::new();
let mut guard = index_csum.lock().unwrap();
let csum = guard.as_mut().unwrap();
for chunk in chunks {
let offset = counters.add_injected_chunk(&chunk) as u64;
let digest = chunk.digest();
known.push((offset, digest));
let end_offset = offset + chunk.size();
csum.update(&end_offset.to_le_bytes());
csum.update(&digest);
}
future::ok(MergedChunkInfo::Known(known))
}
InjectedChunksInfo::Raw(data) => {
// account for not injected chunks (new and known)
stream
.and_then(move |data| {
let chunk_len = data.len();
total_chunks.fetch_add(1, Ordering::SeqCst);
let offset = stream_len.fetch_add(chunk_len, Ordering::SeqCst) as u64;
let mut chunk_builder = DataChunkBuilder::new(data.as_ref()).compress(compress);
if let Some(ref crypt_config) = crypt_config {
@ -776,29 +675,7 @@ impl BackupWriter {
}
let mut known_chunks = known_chunks.lock().unwrap();
let digest = *chunk_builder.digest();
let (offset, res) = if known_chunks.contains(&digest) {
let offset = counters.add_known_chunk(chunk_len) as u64;
(offset, MergedChunkInfo::Known(vec![(offset, digest)]))
} else {
match chunk_builder.build() {
Ok((chunk, digest)) => {
let offset =
counters.add_new_chunk(chunk_len, chunk.raw_size()) as u64;
known_chunks.insert(digest);
(
offset,
MergedChunkInfo::New(ChunkInfo {
chunk,
digest,
chunk_len: chunk_len as u64,
offset,
}),
)
}
Err(err) => return future::err(err),
}
};
let digest = chunk_builder.digest();
let mut guard = index_csum.lock().unwrap();
let csum = guard.as_mut().unwrap();
@ -808,63 +685,28 @@ impl BackupWriter {
if !is_fixed_chunk_size {
csum.update(&chunk_end.to_le_bytes());
}
csum.update(&digest);
csum.update(digest);
future::ok(res)
let chunk_is_known = known_chunks.contains(digest);
if chunk_is_known {
known_chunk_count.fetch_add(1, Ordering::SeqCst);
reused_len.fetch_add(chunk_len, Ordering::SeqCst);
future::ok(MergedChunkInfo::Known(vec![(offset, *digest)]))
} else {
let compressed_stream_len2 = compressed_stream_len.clone();
known_chunks.insert(*digest);
future::ready(chunk_builder.build().map(move |(chunk, digest)| {
compressed_stream_len2.fetch_add(chunk.raw_size(), Ordering::SeqCst);
MergedChunkInfo::New(ChunkInfo {
chunk,
digest,
chunk_len: chunk_len as u64,
offset,
})
}))
}
})
.merge_known_chunks();
Self::upload_merged_chunk_stream(
h2,
wid,
archive,
prefix,
stream,
index_csum_2,
counters_readonly,
)
}
fn upload_merged_chunk_stream(
h2: H2Client,
wid: u64,
archive: &BackupArchiveName,
prefix: &str,
stream: impl Stream<Item = Result<MergedChunkInfo, Error>>,
index_csum: Arc<Mutex<Option<Sha256>>>,
counters: UploadCounters,
) -> impl Future<Output = Result<UploadStats, Error>> {
let append_chunk_path = format!("{prefix}_index");
let upload_chunk_path = format!("{prefix}_chunk");
let start_time = std::time::Instant::now();
let uploaded_len = Arc::new(AtomicUsize::new(0));
let (upload_queue, upload_result) =
Self::append_chunk_queue(h2.clone(), wid, append_chunk_path, uploaded_len.clone());
let progress_handle = if archive.ends_with(".img.fidx")
|| archive.ends_with(".pxar.didx")
|| archive.ends_with(".ppxar.didx")
{
let counters = counters.clone();
Some(tokio::spawn(async move {
loop {
tokio::time::sleep(tokio::time::Duration::from_secs(60)).await;
let size = HumanByte::from(counters.total_stream_len());
let size_uploaded = HumanByte::from(uploaded_len.load(Ordering::SeqCst));
let elapsed = TimeSpan::from(start_time.elapsed());
info!("processed {size} in {elapsed}, uploaded {size_uploaded}");
}
}))
} else {
None
};
stream
.merge_known_chunks()
.try_for_each(move |merged_chunk_info| {
let upload_queue = upload_queue.clone();
@ -873,7 +715,7 @@ impl BackupWriter {
let digest = chunk_info.digest;
let digest_str = hex::encode(digest);
trace!(
log::trace!(
"upload new chunk {} ({} bytes, offset {})",
digest_str,
chunk_info.chunk_len,
@ -904,13 +746,7 @@ impl BackupWriter {
Either::Left(h2.send_request(request, upload_data).and_then(
move |response| async move {
upload_queue
.send((
new_info,
Some(ChunkUploadResponse {
future: response,
size: chunk_info.chunk_len as usize,
}),
))
.send((new_info, Some(response)))
.await
.map_err(|err| {
format_err!("failed to send to upload queue: {}", err)
@ -928,14 +764,25 @@ impl BackupWriter {
})
.then(move |result| async move { upload_result.await?.and(result) }.boxed())
.and_then(move |_| {
let mut guard = index_csum.lock().unwrap();
let duration = start_time.elapsed();
let chunk_count = total_chunks2.load(Ordering::SeqCst);
let chunk_reused = known_chunk_count2.load(Ordering::SeqCst);
let size = stream_len2.load(Ordering::SeqCst);
let size_reused = reused_len2.load(Ordering::SeqCst);
let size_compressed = compressed_stream_len2.load(Ordering::SeqCst) as usize;
let mut guard = index_csum_2.lock().unwrap();
let csum = guard.take().unwrap().finish();
if let Some(handle) = progress_handle {
handle.abort();
}
futures::future::ok(counters.to_upload_stats(csum, start_time.elapsed()))
futures::future::ok(UploadStats {
chunk_count,
chunk_reused,
size,
size_reused,
size_compressed,
duration,
csum,
})
})
}
@ -964,7 +811,7 @@ impl BackupWriter {
break;
}
debug!("send test data ({} bytes)", data.len());
log::debug!("send test data ({} bytes)", data.len());
let request =
H2Client::request_builder("localhost", "POST", "speedtest", None, None).unwrap();
let request_future = self
@ -979,13 +826,13 @@ impl BackupWriter {
let _ = upload_result.await?;
info!(
log::info!(
"Uploaded {} chunks in {} seconds.",
repeat,
start_time.elapsed().as_secs()
);
let speed = ((item_len * (repeat as usize)) as f64) / start_time.elapsed().as_secs_f64();
info!(
log::info!(
"Time per request: {} microseconds.",
(start_time.elapsed().as_micros()) / (repeat as u128)
);

View File

@ -14,7 +14,6 @@ use nix::fcntl::OFlag;
use nix::sys::stat::Mode;
use pathpatterns::{MatchEntry, MatchList, MatchPattern, MatchType, PatternFlag};
use pbs_api_types::PathPattern;
use proxmox_router::cli::{self, CliCommand, CliCommandMap, CliHelper, CommandLineInterface};
use proxmox_schema::api;
use proxmox_sys::fs::{create_path, CreateOptions};
@ -22,8 +21,7 @@ use pxar::accessor::ReadAt;
use pxar::{EntryKind, Metadata};
use pbs_datastore::catalog::{self, DirEntryAttribute};
use proxmox_async::runtime::{block_in_place, block_on};
use proxmox_log::error;
use proxmox_async::runtime::block_in_place;
use crate::pxar::Flags;
@ -107,7 +105,7 @@ fn complete_path(complete_me: &str, _map: &HashMap<String, String>) -> Vec<Strin
match shell.complete_path(complete_me) {
Ok(list) => list,
Err(err) => {
error!("error during completion: {}", err);
log::error!("error during completion: {}", err);
Vec::new()
}
}
@ -242,7 +240,8 @@ async fn list_selected_command(patterns: bool) -> Result<(), Error> {
input: {
properties: {
pattern: {
type: PathPattern,
type: String,
description: "Match pattern for matching files in the catalog."
},
select: {
type: bool,
@ -283,8 +282,9 @@ async fn restore_selected_command(target: String) -> Result<(), Error> {
description: "target path for restore on local filesystem."
},
pattern: {
type: PathPattern,
type: String,
optional: true,
description: "match pattern to limit files for restore."
}
}
}
@ -304,6 +304,7 @@ async fn restore_command(target: String, pattern: Option<String>) -> Result<(),
/// The `Path` type's component iterator does not tell us anything about trailing slashes or
/// trailing `Component::CurDir` entries. Since we only support regular paths we'll roll our own
/// here:
pub struct Shell {
/// Readline instance handling input and callbacks
rl: rustyline::Editor<CliHelper>,
@ -311,9 +312,8 @@ pub struct Shell {
/// Interactive prompt.
prompt: String,
/// Optional catalog reader instance to navigate, if not present the Accessor is used for
/// navigation
catalog: Option<CatalogReader>,
/// Catalog reader instance to navigate
catalog: CatalogReader,
/// List of selected paths for restore
selected: HashMap<OsString, MatchEntry>,
@ -347,7 +347,7 @@ impl PathStackEntry {
impl Shell {
/// Create a new shell for the given catalog and pxar archive.
pub async fn new(
mut catalog: Option<CatalogReader>,
mut catalog: CatalogReader,
archive_name: &str,
archive: Accessor,
) -> Result<Self, Error> {
@ -355,31 +355,11 @@ impl Shell {
let mut rl = rustyline::Editor::<CliHelper>::new();
rl.set_helper(Some(cli_helper));
let mut position = Vec::new();
if let Some(catalog) = catalog.as_mut() {
let catalog_root = catalog.root()?;
let archive_root = catalog
.lookup(&catalog_root, archive_name.as_bytes())?
.ok_or_else(|| format_err!("archive not found in catalog"))?;
position.push(PathStackEntry::new(archive_root));
} else {
let root = archive.open_root().await?;
let root_entry = root.lookup_self().await?;
if let EntryKind::Directory = root_entry.kind() {
let entry_attr = DirEntryAttribute::Directory {
start: root_entry.entry_range_info().entry_range.start,
};
position.push(PathStackEntry {
catalog: catalog::DirEntry {
name: archive_name.into(),
attr: entry_attr,
},
pxar: Some(root_entry),
});
} else {
bail!("unexpected root entry type");
}
}
let position = vec![PathStackEntry::new(archive_root)];
let mut this = Self {
rl,
@ -418,7 +398,7 @@ impl Shell {
let args = match cli::shellword_split(&line) {
Ok(args) => args,
Err(err) => {
error!("Error: {}", err);
log::error!("Error: {}", err);
continue;
}
};
@ -470,7 +450,7 @@ impl Shell {
async fn resolve_symlink(
stack: &mut Vec<PathStackEntry>,
catalog: &mut Option<CatalogReader>,
catalog: &mut CatalogReader,
accessor: &Accessor,
follow_symlinks: &mut Option<usize>,
) -> Result<(), Error> {
@ -488,7 +468,7 @@ impl Shell {
};
let new_stack =
Self::lookup(stack, catalog, accessor, Some(path), follow_symlinks).await?;
Self::lookup(stack, &mut *catalog, accessor, Some(path), follow_symlinks).await?;
*stack = new_stack;
@ -504,7 +484,7 @@ impl Shell {
/// out.
async fn step(
stack: &mut Vec<PathStackEntry>,
catalog: &mut Option<CatalogReader>,
catalog: &mut CatalogReader,
accessor: &Accessor,
component: std::path::Component<'_>,
follow_symlinks: &mut Option<usize>,
@ -523,27 +503,9 @@ impl Shell {
if stack.last().unwrap().catalog.is_symlink() {
Self::resolve_symlink(stack, catalog, accessor, follow_symlinks).await?;
}
if let Some(catalog) = catalog {
match catalog.lookup(&stack.last().unwrap().catalog, entry.as_bytes())? {
Some(dir) => stack.push(PathStackEntry::new(dir)),
None => bail!("no such file or directory: {entry:?}"),
}
} else {
let pxar_entry = parent_pxar_entry(stack)?;
let parent_dir = pxar_entry.enter_directory().await?;
match parent_dir.lookup(entry).await? {
Some(entry) => {
let entry_attr = DirEntryAttribute::try_from(&entry)?;
stack.push(PathStackEntry {
catalog: catalog::DirEntry {
name: entry.entry().file_name().as_bytes().into(),
attr: entry_attr,
},
pxar: Some(entry),
})
}
None => bail!("no such file or directory: {entry:?}"),
}
None => bail!("no such file or directory: {:?}", entry),
}
}
}
@ -553,7 +515,7 @@ impl Shell {
fn step_nofollow(
stack: &mut Vec<PathStackEntry>,
catalog: &mut Option<CatalogReader>,
catalog: &mut CatalogReader,
component: std::path::Component<'_>,
) -> Result<(), Error> {
use std::path::Component;
@ -569,27 +531,11 @@ impl Shell {
Component::Normal(entry) => {
if stack.last().unwrap().catalog.is_symlink() {
bail!("target is a symlink");
} else if let Some(catalog) = catalog.as_mut() {
} else {
match catalog.lookup(&stack.last().unwrap().catalog, entry.as_bytes())? {
Some(dir) => stack.push(PathStackEntry::new(dir)),
None => bail!("no such file or directory: {:?}", entry),
}
} else {
let pxar_entry = parent_pxar_entry(stack)?;
let parent_dir = block_on(pxar_entry.enter_directory())?;
match block_on(parent_dir.lookup(entry))? {
Some(entry) => {
let entry_attr = DirEntryAttribute::try_from(&entry)?;
stack.push(PathStackEntry {
catalog: catalog::DirEntry {
name: entry.entry().file_name().as_bytes().into(),
attr: entry_attr,
},
pxar: Some(entry),
})
}
None => bail!("no such file or directory: {entry:?}"),
}
}
}
}
@ -599,7 +545,7 @@ impl Shell {
/// The pxar accessor is required to resolve symbolic links
async fn walk_catalog(
stack: &mut Vec<PathStackEntry>,
catalog: &mut Option<CatalogReader>,
catalog: &mut CatalogReader,
accessor: &Accessor,
path: &Path,
follow_symlinks: &mut Option<usize>,
@ -613,7 +559,7 @@ impl Shell {
/// Non-async version cannot follow symlinks.
fn walk_catalog_nofollow(
stack: &mut Vec<PathStackEntry>,
catalog: &mut Option<CatalogReader>,
catalog: &mut CatalogReader,
path: &Path,
) -> Result<(), Error> {
for c in path.components() {
@ -666,34 +612,12 @@ impl Shell {
tmp_stack = self.position.clone();
}
Self::walk_catalog_nofollow(&mut tmp_stack, &mut self.catalog, &path)?;
(&tmp_stack.last().unwrap(), base, part)
(&tmp_stack.last().unwrap().catalog, base, part)
}
None => (&self.position.last().unwrap(), "", input),
None => (&self.position.last().unwrap().catalog, "", input),
};
let entries = if let Some(catalog) = self.catalog.as_mut() {
catalog.read_dir(&parent.catalog)?
} else {
let dir = if let Some(entry) = parent.pxar.as_ref() {
block_on(entry.enter_directory())?
} else {
bail!("missing pxar entry for parent");
};
let mut out = Vec::new();
let entries = block_on(crate::pxar::tools::pxar_metadata_read_dir(dir))?;
for entry in entries {
let mut name = base.to_string();
let file_name = entry.file_name().as_bytes();
if file_name.starts_with(part.as_bytes()) {
name.push_str(std::str::from_utf8(file_name)?);
if entry.is_dir() {
name.push('/');
}
out.push(name);
}
}
return Ok(out);
};
let entries = self.catalog.read_dir(parent)?;
let mut out = Vec::new();
for entry in entries {
@ -713,7 +637,7 @@ impl Shell {
// Break async recursion here: lookup -> walk_catalog -> step -> lookup
fn lookup<'future, 's, 'c, 'a, 'p, 'y>(
stack: &'s [PathStackEntry],
catalog: &'c mut Option<CatalogReader>,
catalog: &'c mut CatalogReader,
accessor: &'a Accessor,
path: Option<&'p Path>,
follow_symlinks: &'y mut Option<usize>,
@ -754,23 +678,7 @@ impl Shell {
let last = stack.last().unwrap();
if last.catalog.is_directory() {
let items = if let Some(catalog) = self.catalog.as_mut() {
catalog.read_dir(&stack.last().unwrap().catalog)?
} else {
let dir = if let Some(entry) = last.pxar.as_ref() {
entry.enter_directory().await?
} else {
bail!("missing pxar entry for parent");
};
let mut out = std::io::stdout();
let items = crate::pxar::tools::pxar_metadata_read_dir(dir).await?;
for item in items {
out.write_all(item.file_name().as_bytes())?;
out.write_all(b"\n")?;
}
return Ok(());
};
let items = self.catalog.read_dir(&stack.last().unwrap().catalog)?;
let mut out = std::io::stdout();
// FIXME: columnize
for item in items {
@ -797,7 +705,7 @@ impl Shell {
let file = Self::walk_pxar_archive(&self.accessor, &mut stack).await?;
std::io::stdout()
.write_all(crate::pxar::tools::format_multi_line_entry(file.entry()).as_bytes())?;
.write_all(crate::pxar::format_multi_line_entry(file.entry()).as_bytes())?;
Ok(())
}
@ -812,14 +720,6 @@ impl Shell {
&mut None,
)
.await?;
if new_position.is_empty() {
// Avoid moving below archive root into catalog root, thereby treating
// the archive root as its own parent directory.
self.position.truncate(1);
return Ok(());
}
if !new_position.last().unwrap().catalog.is_directory() {
bail!("not a directory");
}
@ -920,8 +820,7 @@ impl Shell {
async fn list_matching_files(&mut self) -> Result<(), Error> {
let matches = self.build_match_list();
if let Some(catalog) = self.catalog.as_mut() {
catalog.find(
self.catalog.find(
&self.position[0].catalog,
&mut Vec::new(),
&matches,
@ -932,24 +831,6 @@ impl Shell {
Ok(())
},
)?;
} else {
let parent_dir = if let Some(pxar_entry) = self.position[0].pxar.as_ref() {
pxar_entry.enter_directory().await?
} else {
bail!("missing pxar entry for archive root");
};
crate::pxar::tools::pxar_metadata_catalog_find(
parent_dir,
&matches,
&|path: &[u8]| -> Result<(), Error> {
let mut out = std::io::stdout();
out.write_all(path)?;
out.write_all(b"\n")?;
Ok(())
},
)
.await?;
}
Ok(())
}
@ -960,8 +841,7 @@ impl Shell {
MatchEntry::parse_pattern(pattern, PatternFlag::PATH_NAME, MatchType::Include)?;
let mut found_some = false;
if let Some(catalog) = self.catalog.as_mut() {
catalog.find(
self.catalog.find(
&self.position[0].catalog,
&mut Vec::new(),
&[&pattern_entry],
@ -973,24 +853,6 @@ impl Shell {
Ok(())
},
)?;
} else {
let parent_dir = if let Some(pxar_entry) = self.position[0].pxar.as_ref() {
pxar_entry.enter_directory().await?
} else {
bail!("missing pxar entry for archive root");
};
crate::pxar::tools::pxar_metadata_catalog_find(
parent_dir,
&[&pattern_entry],
&|path: &[u8]| -> Result<(), Error> {
let mut out = std::io::stdout();
out.write_all(path)?;
out.write_all(b"\n")?;
Ok(())
},
)
.await?;
}
if found_some && select {
self.selected.insert(pattern_os, pattern_entry);
@ -1083,18 +945,6 @@ impl Shell {
}
}
fn parent_pxar_entry(dir_stack: &[PathStackEntry]) -> Result<&FileEntry, Error> {
if let Some(parent) = dir_stack.last().as_ref() {
if let Some(entry) = parent.pxar.as_ref() {
Ok(entry)
} else {
bail!("missing pxar entry for parent");
}
} else {
bail!("missing parent entry on stack");
}
}
struct ExtractorState<'a> {
path: Vec<u8>,
path_len: usize,
@ -1110,38 +960,22 @@ struct ExtractorState<'a> {
extractor: crate::pxar::extract::Extractor,
catalog: &'a mut Option<CatalogReader>,
catalog: &'a mut CatalogReader,
match_list: &'a [MatchEntry],
accessor: &'a Accessor,
}
impl<'a> ExtractorState<'a> {
pub fn new(
catalog: &'a mut Option<CatalogReader>,
catalog: &'a mut CatalogReader,
dir_stack: Vec<PathStackEntry>,
extractor: crate::pxar::extract::Extractor,
match_list: &'a [MatchEntry],
accessor: &'a Accessor,
) -> Result<Self, Error> {
let read_dir = if let Some(catalog) = catalog.as_mut() {
catalog
let read_dir = catalog
.read_dir(&dir_stack.last().unwrap().catalog)?
.into_iter()
} else {
let pxar_entry = parent_pxar_entry(&dir_stack)?;
let dir = block_on(pxar_entry.enter_directory())?;
let entries = block_on(crate::pxar::tools::pxar_metadata_read_dir(dir))?;
let mut catalog_entries = Vec::with_capacity(entries.len());
for entry in entries {
let entry_attr = DirEntryAttribute::try_from(&entry).unwrap();
catalog_entries.push(catalog::DirEntry {
name: entry.entry().file_name().as_bytes().into(),
attr: entry_attr,
});
}
catalog_entries.into_iter()
};
.into_iter();
Ok(Self {
path: Vec::new(),
path_len: 0,
@ -1219,29 +1053,11 @@ impl<'a> ExtractorState<'a> {
entry: catalog::DirEntry,
match_result: Option<MatchType>,
) -> Result<(), Error> {
let entry_iter = if let Some(catalog) = self.catalog.as_mut() {
catalog.read_dir(&entry)?.into_iter()
} else {
self.dir_stack.push(PathStackEntry::new(entry.clone()));
let dir = Shell::walk_pxar_archive(self.accessor, &mut self.dir_stack).await?;
self.dir_stack.pop();
let dir = dir.enter_directory().await?;
let entries = block_on(crate::pxar::tools::pxar_metadata_read_dir(dir))?;
entries
.into_iter()
.map(|entry| {
let entry_attr = DirEntryAttribute::try_from(&entry).unwrap();
catalog::DirEntry {
name: entry.entry().file_name().as_bytes().into(),
attr: entry_attr,
}
})
.collect::<Vec<catalog::DirEntry>>()
.into_iter()
};
// enter a new directory:
self.read_dir_stack
.push(mem::replace(&mut self.read_dir, entry_iter));
self.read_dir_stack.push(mem::replace(
&mut self.read_dir,
self.catalog.read_dir(&entry)?.into_iter(),
));
self.matches_stack.push(self.matches);
self.dir_stack.push(PathStackEntry::new(entry));
self.path_len_stack.push(self.path_len);

Some files were not shown because too many files have changed in this diff Show More