Compare commits

..

No commits in common. "master" and "v3.1.2" have entirely different histories.

503 changed files with 16917 additions and 24607 deletions

View File

@ -3,6 +3,3 @@
directory = "/usr/share/cargo/registry"
[source.crates-io]
replace-with = "debian-packages"
[profile.release]
debug=true

11
.gitignore vendored
View File

@ -14,19 +14,14 @@
/*.deb
/*.dsc
/*.tar*
/.do-cargo-build
/Cargo.lock
/docs/*/synopsis.rst
/docs/config/*/config.rst
/docs/config/acl/roles.rst
/docs/output
/docs/proxmox-backup-client/catalog-shell-synopsis.rst
# all services are generated from a .in file to set the libexec path
/etc/*.service
/etc/proxmox-backup-proxy.service
/etc/proxmox-backup.service
/proxmox-backup-server-dpkg-contents.txt
/target
/www/.lint-incremental
/www/js/
__pycache__/
build/
local.mak
target/

View File

@ -1,5 +1,5 @@
[workspace.package]
version = "3.4.1"
version = "3.1.2"
authors = [
"Dietmar Maurer <dietmar@proxmox.com>",
"Dominik Csapak <d.csapak@proxmox.com>",
@ -13,7 +13,6 @@ authors = [
edition = "2021"
license = "AGPL-3"
repository = "https://git.proxmox.com/?p=proxmox-backup.git"
rust-version = "1.81"
[package]
name = "proxmox-backup"
@ -29,6 +28,7 @@ exclude = [ "build", "debian", "tests/catar_data/test_symlink/symlink1"]
[workspace]
members = [
"pbs-api-types",
"pbs-buildcfg",
"pbs-client",
"pbs-config",
@ -43,6 +43,7 @@ members = [
"proxmox-backup-client",
"proxmox-file-restore",
"proxmox-restore-daemon",
"proxmox-rrd",
"pxar-bin",
]
@ -53,51 +54,41 @@ path = "src/lib.rs"
[workspace.dependencies]
# proxmox workspace
proxmox-apt = { version = "0.11", features = [ "cache" ] }
proxmox-apt-api-types = "1.0.1"
proxmox-apt = "0.10.5"
proxmox-async = "0.4"
proxmox-auth-api = "0.4"
proxmox-auth-api = "0.3"
proxmox-borrow = "1"
proxmox-compression = "0.2"
proxmox-config-digest = "0.1.0"
proxmox-daemon = "0.1.0"
proxmox-fuse = "0.1.3"
proxmox-http = { version = "0.9.5", features = [ "client", "http-helpers", "websocket" ] } # see below
proxmox-http = { version = "0.9.0", features = [ "client", "http-helpers", "websocket" ] } # see below
proxmox-human-byte = "0.1"
proxmox-io = "1.0.1" # tools and client use "tokio" feature
proxmox-lang = "1.1"
proxmox-log = "0.2.6"
proxmox-ldap = "0.2.1"
proxmox-metrics = "0.3.1"
proxmox-notify = "0.5.1"
proxmox-metrics = "0.3"
proxmox-openid = "0.10.0"
proxmox-rest-server = { version = "0.8.9", features = [ "templates" ] }
proxmox-rest-server = { version = "0.5.1", features = [ "templates" ] }
# some use "cli", some use "cli" and "server", pbs-config uses nothing
proxmox-router = { version = "3.0.0", default-features = false }
proxmox-rrd = "0.4"
proxmox-rrd-api-types = "1.0.2"
proxmox-router = { version = "2.0.0", default_features = false }
# everything but pbs-config and pbs-client use "api-macro"
proxmox-schema = "4"
proxmox-schema = "2.0.0"
proxmox-section-config = "2"
proxmox-serde = "0.1.1"
proxmox-shared-cache = "0.1"
proxmox-shared-memory = "0.3.0"
proxmox-sortable-macro = "0.1.2"
proxmox-subscription = { version = "0.5.0", features = [ "api-types" ] }
proxmox-sys = "0.6.7"
proxmox-systemd = "0.1"
proxmox-tfa = { version = "5", features = [ "api", "api-types" ] }
proxmox-time = "2"
proxmox-uuid = { version = "1", features = [ "serde" ] }
proxmox-worker-task = "0.1"
pbs-api-types = "0.2.2"
proxmox-subscription = { version = "0.4.2", features = [ "api-types" ] }
proxmox-sys = "0.5.2"
proxmox-tfa = { version = "4.0.4", features = [ "api", "api-types" ] }
proxmox-time = "1.1.2"
proxmox-uuid = "1"
# other proxmox crates
pathpatterns = "0.3"
proxmox-acme = "0.5.3"
pxar = "0.12.1"
proxmox-acme-rs = "0.4"
pxar = "0.10.2"
# PBS workspace
pbs-api-types = { path = "pbs-api-types" }
pbs-buildcfg = { path = "pbs-buildcfg" }
pbs-client = { path = "pbs-client" }
pbs-config = { path = "pbs-config" }
@ -107,28 +98,29 @@ pbs-key-config = { path = "pbs-key-config" }
pbs-pxar-fuse = { path = "pbs-pxar-fuse" }
pbs-tape = { path = "pbs-tape" }
pbs-tools = { path = "pbs-tools" }
proxmox-rrd = { path = "proxmox-rrd" }
# regular crates
anyhow = "1.0"
async-trait = "0.1.56"
apt-pkg-native = "0.3.2"
base64 = "0.13"
bitflags = "2.4"
bitflags = "1.2.1"
bytes = "1.0"
cidr = "0.2.1"
crc32fast = "1"
const_format = "0.2"
crossbeam-channel = "0.5"
endian_trait = { version = "0.6", features = ["arrays"] }
env_logger = "0.11"
env_logger = "0.10"
flate2 = "1.0"
foreign-types = "0.3"
futures = "0.3"
h2 = { version = "0.4", features = [ "legacy", "stream" ] }
h2 = { version = "0.3", features = [ "stream" ] }
handlebars = "3.0"
hex = "0.4.3"
hickory-resolver = { version = "0.24.1", default-features = false, features = [ "system-config", "tokio-runtime" ] }
hyper = { version = "0.14", features = [ "backports", "deprecated", "full" ] }
http = "0.2"
hyper = { version = "0.14", features = [ "full" ] }
lazy_static = "1.4"
libc = "0.2"
log = "0.4.17"
nix = "0.26.1"
@ -141,7 +133,9 @@ pin-project-lite = "0.2"
regex = "1.5.5"
rustyline = "9"
serde = { version = "1.0", features = ["derive"] }
serde_cbor = "0.11.1"
serde_json = "1.0"
serde_plain = "1"
siphasher = "0.3"
syslog = "6"
tar = "0.4"
@ -151,29 +145,32 @@ tokio = "1.6"
tokio-openssl = "0.6.1"
tokio-stream = "0.1.0"
tokio-util = { version = "0.7", features = [ "io" ] }
tracing = "0.1"
tower-service = "0.3.0"
udev = "0.4"
url = "2.1"
walkdir = "2"
xdg = "2.2"
zstd = { version = "0.12", features = [ "bindgen" ] }
zstd-safe = "6.0"
[dependencies]
anyhow.workspace = true
async-trait.workspace = true
apt-pkg-native.workspace = true
base64.workspace = true
bitflags.workspace = true
bytes.workspace = true
cidr.workspace = true
const_format.workspace = true
crc32fast.workspace = true
crossbeam-channel.workspace = true
endian_trait.workspace = true
flate2.workspace = true
futures.workspace = true
h2.workspace = true
handlebars.workspace = true
hex.workspace = true
http.workspace = true
hyper.workspace = true
lazy_static.workspace = true
libc.workspace = true
log.workspace = true
nix.workspace = true
@ -186,6 +183,7 @@ regex.workspace = true
rustyline.workspace = true
serde.workspace = true
serde_json.workspace = true
siphasher.workspace = true
syslog.workspace = true
termcolor.workspace = true
thiserror.workspace = true
@ -193,53 +191,47 @@ tokio = { workspace = true, features = [ "fs", "io-util", "io-std", "macros", "n
tokio-openssl.workspace = true
tokio-stream.workspace = true
tokio-util = { workspace = true, features = [ "codec" ] }
tracing.workspace = true
tower-service.workspace = true
udev.workspace = true
url.workspace = true
walkdir.workspace = true
xdg.workspace = true
zstd.workspace = true
#valgrind_request = { git = "https://github.com/edef1c/libvalgrind_request", version = "1.1.0", optional = true }
# proxmox workspace
proxmox-apt.workspace = true
proxmox-apt-api-types.workspace = true
proxmox-async.workspace = true
proxmox-auth-api = { workspace = true, features = [ "api", "pam-authenticator" ] }
proxmox-compression.workspace = true
proxmox-config-digest.workspace = true
proxmox-daemon.workspace = true
proxmox-http = { workspace = true, features = [ "client-trait", "proxmox-async", "rate-limited-stream" ] } # pbs-client doesn't use these
proxmox-human-byte.workspace = true
proxmox-io.workspace = true
proxmox-lang.workspace = true
proxmox-log.workspace = true
proxmox-ldap.workspace = true
proxmox-metrics.workspace = true
proxmox-notify = { workspace = true, features = [ "pbs-context" ] }
proxmox-openid.workspace = true
proxmox-rest-server = { workspace = true, features = [ "rate-limited-stream" ] }
proxmox-router = { workspace = true, features = [ "cli", "server"] }
proxmox-schema = { workspace = true, features = [ "api-macro" ] }
proxmox-section-config.workspace = true
proxmox-serde = { workspace = true, features = [ "serde_json" ] }
proxmox-shared-cache.workspace = true
proxmox-shared-memory.workspace = true
proxmox-sortable-macro.workspace = true
proxmox-subscription.workspace = true
proxmox-sys = { workspace = true, features = [ "timer" ] }
proxmox-systemd.workspace = true
proxmox-tfa.workspace = true
proxmox-time.workspace = true
proxmox-uuid.workspace = true
proxmox-worker-task.workspace = true
pbs-api-types.workspace = true
# in their respective repo
proxmox-acme.workspace = true
pathpatterns.workspace = true
proxmox-acme-rs.workspace = true
pxar.workspace = true
# proxmox-backup workspace/internal crates
pbs-api-types.workspace = true
pbs-buildcfg.workspace = true
pbs-client.workspace = true
pbs-config.workspace = true
@ -248,35 +240,26 @@ pbs-key-config.workspace = true
pbs-tape.workspace = true
pbs-tools.workspace = true
proxmox-rrd.workspace = true
proxmox-rrd-api-types.workspace = true
# Local path overrides
# NOTE: You must run `cargo update` after changing this for it to take effect!
[patch.crates-io]
#pbs-api-types = { path = "../proxmox/pbs-api-types" }
#proxmox-acme = { path = "../proxmox/proxmox-acme" }
#proxmox-apt = { path = "../proxmox/proxmox-apt" }
#proxmox-apt-api-types = { path = "../proxmox/proxmox-apt-api-types" }
#proxmox-async = { path = "../proxmox/proxmox-async" }
#proxmox-auth-api = { path = "../proxmox/proxmox-auth-api" }
#proxmox-borrow = { path = "../proxmox/proxmox-borrow" }
#proxmox-compression = { path = "../proxmox/proxmox-compression" }
#proxmox-config-digest = { path = "../proxmox/proxmox-config-digest" }
#proxmox-daemon = { path = "../proxmox/proxmox-daemon" }
#proxmox-fuse = { path = "../proxmox-fuse" }
#proxmox-http = { path = "../proxmox/proxmox-http" }
#proxmox-human-byte = { path = "../proxmox/proxmox-human-byte" }
#proxmox-io = { path = "../proxmox/proxmox-io" }
#proxmox-lang = { path = "../proxmox/proxmox-lang" }
#proxmox-log = { path = "../proxmox/proxmox-log" }
#proxmox-ldap = { path = "../proxmox/proxmox-ldap" }
#proxmox-metrics = { path = "../proxmox/proxmox-metrics" }
#proxmox-notify = { path = "../proxmox/proxmox-notify" }
#proxmox-openid = { path = "../proxmox/proxmox-openid" }
#proxmox-rest-server = { path = "../proxmox/proxmox-rest-server" }
#proxmox-router = { path = "../proxmox/proxmox-router" }
#proxmox-rrd = { path = "../proxmox/proxmox-rrd" }
#proxmox-rrd-api-types = { path = "../proxmox/proxmox-rrd-api-types" }
#proxmox-schema = { path = "../proxmox/proxmox-schema" }
#proxmox-section-config = { path = "../proxmox/proxmox-section-config" }
#proxmox-serde = { path = "../proxmox/proxmox-serde" }
@ -284,12 +267,11 @@ proxmox-rrd-api-types.workspace = true
#proxmox-sortable-macro = { path = "../proxmox/proxmox-sortable-macro" }
#proxmox-subscription = { path = "../proxmox/proxmox-subscription" }
#proxmox-sys = { path = "../proxmox/proxmox-sys" }
#proxmox-systemd = { path = "../proxmox/proxmox-systemd" }
#proxmox-tfa = { path = "../proxmox/proxmox-tfa" }
#proxmox-time = { path = "../proxmox/proxmox-time" }
#proxmox-uuid = { path = "../proxmox/proxmox-uuid" }
#proxmox-worker-task = { path = "../proxmox/proxmox-worker-task" }
#proxmox-acme-rs = { path = "../proxmox-acme-rs" }
#pathpatterns = {path = "../pathpatterns" }
#pxar = { path = "../pxar" }

View File

@ -1,12 +1,10 @@
include /usr/share/dpkg/default.mk
include /usr/share/rustc/architecture.mk
include defines.mk
PACKAGE := proxmox-backup
ARCH := $(DEB_BUILD_ARCH)
export DEB_HOST_RUST_TYPE
SUBDIRS := etc www docs templates
SUBDIRS := etc www docs
# Binaries usable by users
USR_BIN := \
@ -35,23 +33,15 @@ RESTORE_BIN := \
SUBCRATES != cargo metadata --no-deps --format-version=1 \
| jq -r .workspace_members'[]' \
| grep "$$PWD/" \
| sed -e "s!.*$$PWD/!!g" -e 's/\#.*$$//g' -e 's/)$$//g'
STATIC_TARGET_DIR := target/static-build
| awk '!/^proxmox-backup[[:space:]]/ { printf "%s ", $$1 }'
ifeq ($(BUILD_MODE), release)
CARGO_BUILD_ARGS += --release --target $(DEB_HOST_RUST_TYPE)
COMPILEDIR := target/$(DEB_HOST_RUST_TYPE)/release
STATIC_COMPILEDIR := $(STATIC_TARGET_DIR)/$(DEB_HOST_RUST_TYPE)/release
CARGO_BUILD_ARGS += --release
COMPILEDIR := target/release
else
CARGO_BUILD_ARGS += --target $(DEB_HOST_RUST_TYPE)
COMPILEDIR := target/$(DEB_HOST_RUST_TYPE)/debug
STATIC_COMPILEDIR := $(STATIC_TARGET_DIR)/$(DEB_HOST_RUST_TYPE)/debug
COMPILEDIR := target/debug
endif
STATIC_RUSTC_FLAGS := -C target-feature=+crt-static -L $(STATIC_COMPILEDIR)/deps-stubs/
ifeq ($(valgrind), yes)
CARGO_BUILD_ARGS += --features valgrind
endif
@ -61,9 +51,6 @@ CARGO ?= cargo
COMPILED_BINS := \
$(addprefix $(COMPILEDIR)/,$(USR_BIN) $(USR_SBIN) $(SERVICE_BIN) $(RESTORE_BIN))
STATIC_BINS := \
$(addprefix $(STATIC_COMPILEDIR)/,proxmox-backup-client-static pxar-static)
export DEB_VERSION DEB_VERSION_UPSTREAM
SERVER_DEB=$(PACKAGE)-server_$(DEB_VERSION)_$(ARCH).deb
@ -72,12 +59,10 @@ CLIENT_DEB=$(PACKAGE)-client_$(DEB_VERSION)_$(ARCH).deb
CLIENT_DBG_DEB=$(PACKAGE)-client-dbgsym_$(DEB_VERSION)_$(ARCH).deb
RESTORE_DEB=proxmox-backup-file-restore_$(DEB_VERSION)_$(ARCH).deb
RESTORE_DBG_DEB=proxmox-backup-file-restore-dbgsym_$(DEB_VERSION)_$(ARCH).deb
STATIC_CLIENT_DEB=$(PACKAGE)-client-static_$(DEB_VERSION)_$(ARCH).deb
STATIC_CLIENT_DBG_DEB=$(PACKAGE)-client-static-dbgsym_$(DEB_VERSION)_$(ARCH).deb
DOC_DEB=$(PACKAGE)-docs_$(DEB_VERSION)_all.deb
DEBS=$(SERVER_DEB) $(SERVER_DBG_DEB) $(CLIENT_DEB) $(CLIENT_DBG_DEB) \
$(RESTORE_DEB) $(RESTORE_DBG_DEB) $(STATIC_CLIENT_DEB) $(STATIC_CLIENT_DBG_DEB)
$(RESTORE_DEB) $(RESTORE_DBG_DEB)
DSC = rust-$(PACKAGE)_$(DEB_VERSION).dsc
@ -85,7 +70,7 @@ DESTDIR=
tests ?= --workspace
all: proxmox-backup-client-static $(SUBDIRS)
all: $(SUBDIRS)
.PHONY: $(SUBDIRS)
$(SUBDIRS):
@ -108,7 +93,7 @@ build:
cp -a debian \
Cargo.toml src \
$(SUBCRATES) \
docs etc examples tests www zsh-completions templates \
docs etc examples tests www zsh-completions \
defines.mk Makefile \
./build/
rm -f build/Cargo.lock
@ -123,15 +108,12 @@ proxmox-backup-docs: build
cd build; dpkg-buildpackage -b -us -uc --no-pre-clean
lintian $(DOC_DEB)
.PHONY: deb dsc deb-nodoc deb-nostrip
# copy the local target/ dir as a build-cache
.PHONY: deb dsc deb-nodoc
deb-nodoc: build
cd build; dpkg-buildpackage -b -us -uc --no-pre-clean --build-profiles=nodoc
lintian $(DEBS)
deb-nostrip: build
cd build; DEB_BUILD_OPTIONS=nostrip dpkg-buildpackage -b -us -uc
lintian $(DEBS) $(DOC_DEB)
$(DEBS): deb
deb: build
cd build; dpkg-buildpackage -b -us -uc
@ -155,7 +137,7 @@ clean: clean-deb
$(foreach i,$(SUBDIRS), \
$(MAKE) -C $(i) clean ;)
$(CARGO) clean
rm -f .do-cargo-build .do-static-cargo-build
rm -f .do-cargo-build
# allows one to avoid running cargo clean when one just wants to tidy up after a package build
clean-deb:
@ -194,7 +176,6 @@ $(COMPILED_BINS) $(COMPILEDIR)/dump-catalog-shell-cli $(COMPILEDIR)/docgen: .do-
--bin proxmox-restore-daemon \
--package proxmox-backup \
--bin docgen \
--bin pbs2to3 \
--bin proxmox-backup-api \
--bin proxmox-backup-manager \
--bin proxmox-backup-proxy \
@ -204,25 +185,12 @@ $(COMPILED_BINS) $(COMPILEDIR)/dump-catalog-shell-cli $(COMPILEDIR)/docgen: .do-
--bin sg-tape-cmd
touch "$@"
.PHONY: proxmox-backup-client-static
proxmox-backup-client-static:
rm -f .do-static-cargo-build
$(MAKE) $(STATIC_BINS)
$(STATIC_BINS): .do-static-cargo-build
.do-static-cargo-build:
mkdir -p $(STATIC_COMPILEDIR)/deps-stubs/ && \
echo '!<arch>' > $(STATIC_COMPILEDIR)/deps-stubs/libsystemd.a # workaround for to greedy linkage and proxmox-systemd
$(CARGO) rustc $(CARGO_BUILD_ARGS) --package pxar-bin --bin pxar \
--target-dir $(STATIC_TARGET_DIR) -- $(STATIC_RUSTC_FLAGS)
$(CARGO) rustc $(CARGO_BUILD_ARGS) --package proxmox-backup-client --bin proxmox-backup-client \
--target-dir $(STATIC_TARGET_DIR) -- $(STATIC_RUSTC_FLAGS)
.PHONY: lint
lint:
cargo clippy -- -A clippy::all -D clippy::correctness
install: $(COMPILED_BINS) $(STATIC_BINS)
install: $(COMPILED_BINS)
install -dm755 $(DESTDIR)$(BINDIR)
install -dm755 $(DESTDIR)$(ZSH_COMPL_DEST)
$(foreach i,$(USR_BIN), \
@ -241,19 +209,15 @@ install: $(COMPILED_BINS) $(STATIC_BINS)
install -m4755 -o root -g root $(COMPILEDIR)/sg-tape-cmd $(DESTDIR)$(LIBEXECDIR)/proxmox-backup/sg-tape-cmd
$(foreach i,$(SERVICE_BIN), \
install -m755 $(COMPILEDIR)/$(i) $(DESTDIR)$(LIBEXECDIR)/proxmox-backup/ ;)
install -m755 $(STATIC_COMPILEDIR)/proxmox-backup-client $(DESTDIR)$(BINDIR)/proxmox-backup-client-static
install -m755 $(STATIC_COMPILEDIR)/pxar $(DESTDIR)$(BINDIR)/pxar-static
$(MAKE) -C www install
$(MAKE) -C docs install
$(MAKE) -C templates install
.PHONY: upload
upload: UPLOAD_DIST ?= $(DEB_DISTRIBUTION)
upload: $(SERVER_DEB) $(CLIENT_DEB) $(RESTORE_DEB) $(DOC_DEB) $(STATIC_CLIENT_DEB)
upload: $(SERVER_DEB) $(CLIENT_DEB) $(RESTORE_DEB) $(DOC_DEB)
# check if working directory is clean
git diff --exit-code --stat && git diff --exit-code --stat --staged
tar cf - $(SERVER_DEB) $(SERVER_DBG_DEB) $(DOC_DEB) $(CLIENT_DEB) $(CLIENT_DBG_DEB) \
| ssh -X repoman@repo.proxmox.com upload --product pbs --dist $(UPLOAD_DIST)
tar cf - $(CLIENT_DEB) $(CLIENT_DBG_DEB) | ssh -X repoman@repo.proxmox.com upload --product "pve,pmg,pbs-client" --dist $(UPLOAD_DIST)
tar cf - $(STATIC_CLIENT_DEB) $(STATIC_CLIENT_DBG_DEB) | ssh -X repoman@repo.proxmox.com upload --product "pbs-client" --dist $(UPLOAD_DIST)
tar cf - $(RESTORE_DEB) $(RESTORE_DBG_DEB) | ssh -X repoman@repo.proxmox.com upload --product "pve" --dist $(UPLOAD_DIST)

View File

@ -5,11 +5,8 @@ Build & Release Notes
``rustup`` Toolchain
====================
We normally want to build with the ``rustc`` Debian package (see below). If you
still want to use ``rustup`` for other reasons (e.g. to easily switch between
the official stable, beta, and nightly compilers), you should set the following
``rustup`` configuration to use the Debian-provided ``rustc`` compiler
by default:
We normally want to build with the ``rustc`` Debian package. To do that
you can set the following ``rustup`` configuration:
# rustup toolchain link system /usr
# rustup default system
@ -33,7 +30,7 @@ pre-release version number (e.g., "0.1.1-dev.1" instead of "0.1.0").
Local cargo config
==================
This repository ships with a ``.cargo/config.toml`` that replaces the crates.io
This repository ships with a ``.cargo/config`` that replaces the crates.io
registry with packaged crates located in ``/usr/share/cargo/registry``.
A similar config is also applied building with dh_cargo. Cargo.lock needs to be

924
debian/changelog vendored
View File

@ -1,927 +1,3 @@
rust-proxmox-backup (3.4.1-1) bookworm; urgency=medium
* ui: token view: fix typo in 'lose' and rephrase token regenerate dialog
message for more clarity.
* restrict consent-banner text length to 64 KiB.
* docs: describe the intend for the statically linked pbs client.
* api: backup: include previous snapshot name in log message.
* garbage collection: account for created/deleted index files concurrently
to GC to avoid potentially confusing log messages.
* garbage collection: fix rare race in chunk marking phase for setups doing
high frequent backups in quick succession while immediately pruning to a
single backup snapshot being left over after each such backup.
* tape: wait for calibration of LTO-9 tapes in general, not just in the
initial tape format procedure.
-- Proxmox Support Team <support@proxmox.com> Wed, 16 Apr 2025 14:45:37 +0200
rust-proxmox-backup (3.4.0-1) bookworm; urgency=medium
* fix #4788: build statically linked version of the proxmox-backup-client
package.
* ui: sync job: change the rate limit direction based on sync direction.
* docs: mention how to set the push sync jobs rate limit
* ui: set error mask: ensure that message is html-encoded to avoid visual
glitches.
* api server: increase maximal request body size fro 64 kiB to 512 kiB,
similar to a recent change for our perl based projects.
* notifications: include Content-Length header for broader compatibility in
the webhook and gotify targets.
* notifications: allow overriding notification templates.
* docs: notifications: add section about how to use custom templates
* sync: print whole error chain per group on failure for more context.
* ui: options-view: fix typo in empty-text for GC tuning option.
* memory info: use the "MemAvailable" field from '/proc/meminfo' to compute
used memory to fix overestimation of that metric and to better align with
what modern versions of tools like `free` do and to future proof against
changes in how the kernel accounts memory usage for.
* add "MemAvailable" field to ProcFsMemInfo to promote its usage over the
existing "MemFree" field, which is almost never the right choice. This new
field will be provided for external metric server.
* docs: mention different name resolution for statically linked binary.
* docs: add basic info for how to install the statically linked client.
* docs: mention new verify-only and encrypted-only flags for sync jobs.
-- Proxmox Support Team <support@proxmox.com> Wed, 09 Apr 2025 17:41:38 +0200
rust-proxmox-backup (3.3.7-1) bookworm; urgency=medium
* fix #5982: garbage collection: add a check to ensure that the underlying
file system supports and honors file access time (atime) updates.
The check is performed once on datastore creation and on start of every
garbage collection (GC) task, just to be sure. It can be disabled in the
datastore tuning options.
* garbage collection: support setting a custom access time cutoff,
overriding the default of one day and five minutes.
* ui: expose flag for GC access time support safety check and the access
time cutoff override in datastore tuning options.
* docs: describe rationale for new GC access time update check setting and
the access time cutoff check in tuning options.
* access control: add support to mark a specific authentication realm as
default selected realm for the login user interface.
* fix #4382: api: access control: remove permissions of token on deletion.
* fix #3887: api: access control: allow users to regenerate the secret of an
API token without changing any existing ACLs.
* fix #6072: sync jobs: support flags to limit sync to only encrypted and/or
verified snapshots.
* ui: datastore tuning options: expose overriding GC cache capacity so that
admins can either restrict the peak memory usage during GC or allow GC to
use more memory to reduce file system IO even for huge (multiple TiB)
referenced data in backup groups.
* ui: datastore tuning options: increase width and rework labels to provide
a tiny bit more context about what these options are.
* ui: sync job: increase edit window width to 720px to make it less cramped.
* ui: sync job: small field label casing consistency fixes.
-- Proxmox Support Team <support@proxmox.com> Sat, 05 Apr 2025 17:54:31 +0200
rust-proxmox-backup (3.3.6-1) bookworm; urgency=medium
* datastore: ignore group locking errors when removing snapshots, they
normally happen only due to old-locking, and the underlying snapshot is
deleted in any case at this point, so it's no help to confuse the user.
* api: datastore: add error message on failed removal due to old locking and
tell any admin what they can do to switch to the new locking.
* ui: only add delete parameter on token edit, not when creating tokens.
* pbs-client: allow reading fingerprint from system credential.
* docs: client: add section about system credentials integration.
-- Proxmox Support Team <support@proxmox.com> Thu, 03 Apr 2025 17:57:02 +0200
rust-proxmox-backup (3.3.5-1) bookworm; urgency=medium
* api: config: use guard for unmounting on failed datastore creation
* client: align description for backup specification to docs, using
`archive-name` and `type` over `label` and `ext`.
* client: read credentials from CREDENTIALS_DIRECTORY environment variable
following the "System and Service Credentials" specification. This allows
users to use native systemd capabilities for credential management if the
proxmox-backup-client is used in systemd units or, e.g., through a wrapper
like systemd-run.
* fix #3935: datastore/api/backup: move datastore locking to '/run' to avoid
that lock-files can block deleting backup groups or snapshots on the
datastore and to decouple locking from the underlying datastore
file-system.
* api: fix race when changing the owner of a backup-group.
* fix #3336: datastore: remove group if the last snapshot is removed to
avoid confusing situations where the group directory still exists and
blocks re-creating a group with another owner even though the empty group
was not visible in the web UI.
* notifications: clean-up and add dedicated types for all templates as to
allow declaring that interface stable in preparation for allowing
overriding them in the future (not included in this release).
* tape: introduce a tape backup job worker-thread option for restores.
Depending on the underlying storage using more threads can dramatically
improve the restore speed. Especially fast storage with low penalty for
random access, like flash-storage (SSDs) can profit from using more
worker threads. But on file systems backed by spinning disks (HDDs) the
performance can even degrade with more threads. This is why for now the
default is left at a single thread and the admin needs to tune this for
their storage.
* garbage collection: generate index file list via datastore iterators in a
structured manner.
* fix #5331: garbage collection: avoid multiple chunk atime updates by
keeping track of the recently marked chunks in phase 1 of garbage to avoid
multiple atime updates via relatively expensive utimensat (touch) calls.
Use a LRU cache with size 32 MiB for tracking already processed chunks,
this fully covers backup groups referencing up to 4 TiB of actual chunks
and even bigger ones can still benefit from the cache. On some real-world
benchmarks of a datastore with 1.5 million chunks, and original data
usage of 120 TiB and a referenced data usage of 2.7 TiB (high
deduplication count due to long-term history) we measured 21.1 times less
file updates (31.6 million) and a 6.1 times reduction in total GC runtime
(155.4 s to 22.8 s) on a ZFS RAID 10 system consisting of spinning HDDs
and a special device mirror backed by datacenter SSDs.
* logging helper: use new builder initializer not functional change
intended.
-- Proxmox Support Team <support@proxmox.com> Wed, 02 Apr 2025 19:42:38 +0200
rust-proxmox-backup (3.3.4-1) bookworm; urgency=medium
* fix #6185: client/docs: explicitly mention archive name restrictions
* docs: using-the-installer: adapt to raised root password length requirement
* disks: wipe: replace dd with write_all_at for zeroing disk
* fix #5946: disks: wipe: ensure GPT header backup is wiped
* docs: fix hash collision probability comparison
-- Proxmox Support Team <support@proxmox.com> Thu, 13 Mar 2025 13:04:05 +0100
rust-proxmox-backup (3.3.3-1) bookworm; urgency=medium
* api: datastore list: move checking if a datastore is mounted after we
ensured that the user may actually access it. While this had no effect
security wise, it could significantly increase the cost of this API
endpoint in big setups with many datastores and many tenants that each
have only access to one, or a small set, of datastores.
* Revert "fix #5710: api: backup: stat known chunks on backup finish" due to
a big performance impact relative to what this is protectign against. We
will work out a more efficient fix for this issue in the future.
* prune simulator: show backup entries that are kept also in the flat list
of backups, not just in the calendar view.
* docs: improve the description for the garbage collection's cut-off time
* pxar extract: correctly honor the overwrite flag
* api: datastore: add missing log context for prune to avoid a case where
the worker state being unknown after it finished.
* docs: add synopsis and basic docs for prune job configuration
* backup verification: handle manifest update errors as non-fatal to avoid
that the job fails, as we want to continue with verificating the rest to
ensure we uncover as much potential problems as possible.
* fix #4408: docs: add 'disaster recovery' section for tapes
* fix #6069: prune simulator: correctly handle schedules that mix both, a
range and a step size at once.
* client: pxar: fix a race condition where the backup upload stream can miss
an error from the create archive function, because the error state is only
set after the backup stream was already polled. This avoids a edge case
where a file-based backup was incorrectly marked as having succeeded while
there was a error.
-- Proxmox Support Team <support@proxmox.com> Tue, 11 Feb 2025 20:24:27 +0100
rust-proxmox-backup (3.3.2-2) bookworm; urgency=medium
* file-restore: fix regression with the new blockdev method used to pass
disks of a backup to the isolated virtual machine.
-- Proxmox Support Team <support@proxmox.com> Tue, 10 Dec 2024 12:14:47 +0100
rust-proxmox-backup (3.3.2-1) bookworm; urgency=medium
* pbs-client: remove `log` dependency and migrate to our common,
`tracing`-based, logging infrastructure. No semantic change intended.
* file restore: switch to more modern blockdev option for drives in QEMU
wrapper for the restore VM.
* pxar: client: fix missing file size check for metadata comparison
-- Proxmox Support Team <support@proxmox.com> Mon, 09 Dec 2024 10:37:32 +0100
rust-proxmox-backup (3.3.1-1) bookworm; urgency=medium
* tree-wide: add missing O_CLOEXEC flags to `openat` calls to avoid passing
any open FD to new child processes which can have undesired side-effects
like keeping a lock open longer than it should.
* cargo: update proxmox dependency of rest-server and sys crates to include
some fixes for open FDs and a fix for the active task worker tracking, as
on failing to update the index file the daemon did not finished the
worker, causing a reference count issue where an old daemon could keep
running forever.
* ui: check that store is set before trying to select anythin in the garbage
collection (GC) job view.
-- Proxmox Support Team <support@proxmox.com> Tue, 03 Dec 2024 18:11:04 +0100
rust-proxmox-backup (3.3.0-2) bookworm; urgency=medium
* tree-wide: fix various typos.
* ui: fix remove vanished tooltip to be valid for both sync directions.
* ui: mask unmounted datastores in datastore overview.
* server: push: fix supported api version check for minor version bump.
-- Proxmox Support Team <support@proxmox.com> Thu, 28 Nov 2024 13:03:03 +0100
rust-proxmox-backup (3.3.0-1) bookworm; urgency=medium
* GC: add safety-check for nested datastore
* ui: make some more strings translatable
* docs: make sphinx ignore the environment cache to avoid missing synopsis
in some HTML output, like for example the "Command Syntax" appendix.
* docs: add note for why FAT is not supported for as backing file system for
datastores
* api: disks: directory: fail if mount unit already exists for a new file
system
* : filter partitions without proper UUID in partition selector
* ui: version info: replace wrong hyphen separator with dot
-- Proxmox Support Team <support@proxmox.com> Wed, 27 Nov 2024 20:38:41 +0100
rust-proxmox-backup (3.2.14-1) bookworm; urgency=medium
* pull-sync: do not interpret older missing snapshots as needs-resync
* api: directory: use relative path when creating removable datastore
* ui: prune keep input: actually clear value on clear trigger click
* ui: datastore edit: fix empty-text for path field
* sync: push: pass full error context when returning error to job
* api: mount removable datastore: only log an informational message if the
correct device is already mounted.
* api: sync: restrict edit permissions for the new push sync jobs to avoid
that a user is able to create or edit sync jobs in push direction, but not
able to see them.
* api: create datastore: fix checks to avoid that any datastore can contain
another one to better handle the case for the new removable datastores.
-- Proxmox Support Team <support@proxmox.com> Wed, 27 Nov 2024 14:42:56 +0100
rust-proxmox-backup (3.2.13-1) bookworm; urgency=medium
* update pxar dependency to fix selective extraction with the newly
supported match patterns.
* reuse-datastore: avoid creating another prune job
* api: notification: add API routes for webhook targets
* management cli: add CLI for webhook targets
* ui: utils: enable webhook edit window
* ui: utils: add task description for mounting/unmounting
* ui: add onlineHelp for consent-banner option
* docs: client: fix example commands for client usage
* docs: explain some further caveats of the change detection modes
* ui: use same label for removable datastore created from disk
* api: maintenance: allow setting of maintenance mode if 'unmounting'
* docs: add more information for removable datastores
* ui: sync jobs: revert to single list for pull/push jobs, improve
distinction between push and pull jobs through other means.
* ui: sync jobs: change default sorting to 'store' -> 'direction' -> 'id'
* ui: sync jobs: add search filter-box
* config: sync: use same config section type `sync` for push and pull, note
that this breaks existing configurations and needs manual clean-up. As the
package versions never made it beyond test this is ignored, as while it's
not really ideal we never give guarantees for testing package versions,
and the maintenance burden with the old style would not be ideal either.
* api: removable datastores: require Sys.Modify permission on /system/disks
* ui: allow resetting unmounting maintenance
* datastore: re-phrase error message when datastore is unavailable
* client: backup writer: fix regression in progress output
-- Proxmox Support Team <support@proxmox.com> Tue, 26 Nov 2024 17:05:23 +0100
rust-proxmox-backup (3.2.12-1) bookworm; urgency=medium
* fix #5853: client: pxar: exclude stale files on metadata/link read
* docs: fix wrong product name in certificate docs
* docs: explain the working principle of the change detection modes
* allow datastore creation in directory with lost+found directory
* fix #5801: manager: switch datastore update command to real API call to
avoid early cancellation of the task.
* server: push: consistently use remote over target for error messages and
various smaller improvements to related log messages.
* push: move log messages for removed snapshot/group
* fix #5710: api: backup: stat known chunks on backup finish to ensure any
problem/corruption is caught earlier.
* pxar: extract: make invalid ACLs non-fatal, but only log them, there's
nothing to win by failing the restore completely.
* server: push: log encountered empty backup groups during sync
* fix #3786: ui, api, cli: add resync-corrupt option to sync jobs
* docs: add security implications of prune and change detection mode
* fix #2996: client: backup restore: add support to pass match patterns for
a selective restore
* docs: add installation media preparation and installation wizard guides
* api: enforce minimum character limit of 8 on new passwords to follow
recent NIST recommendations.
* ui, api: support configuring a consent banner that is shown before login
to allow complying with some (government) policy frameworks.
* ui, api: add initial support for removable datastore providing better
integration for datastore located on a non-permanently attached medium.
-- Proxmox Support Team <support@proxmox.com> Mon, 25 Nov 2024 22:52:11 +0100
rust-proxmox-backup (3.2.11-1) bookworm; urgency=medium
* fix #3044: server: implement push support for sync operations
* push sync related refactors
-- Proxmox Support Team <support@proxmox.com> Thu, 21 Nov 2024 12:03:50 +0100
rust-proxmox-backup (3.2.10-1) bookworm; urgency=medium
* api: disk list: do not fail but just log error on gathering smart data
* cargo: require proxmox-log 0.2.6 to reduce spamming the logs with the
whole worker task contents
-- Proxmox Support Team <support@proxmox.com> Tue, 19 Nov 2024 22:36:14 +0100
rust-proxmox-backup (3.2.9-1) bookworm; urgency=medium
* client: catalog: fallback to metadata archives for dumping the catalog
* client: catalog shell: make the catalog optional and use the pxar accessor
for navigation if the catalog is not provided, like its the case for
example for split pxar archives.
* client: catalog shell: drop payload offset in `stat` output, as this is a
internal value that only helps on debugging some specific development.
* sync: fix premature return in snapshot-skip filter logic to avoid that the
first snapshot newer that the last synced one gets unconditionally
included.
* fix #5861: ui: remove minimum required username length in dialog for
changing the owner of a backup group, as PBS support usernames shorter
than 4 characters since a while now.
* fix #5439: allow one to reuse an existing datastore on datastore creation
* ui: disallow datastore in the file system root, this is almost never what
user want and they can still use the CLI for such an edge case.
* fix #5233: api: tape: add explicit required permissions for the move tape,
update tape and destroy tape endpoints, requiring Tape.Modify and
Tape.Write on the `/tape` ACL object path, respectively. This avoids
requiring the use of the root account for basic tape management.
* client: catalog shell: make the root element its own parent to avoid
navigating below archive root, which makes no sense and just causes odd
glitches.
* api: disk management: avoid retrieving lsblk result twice when listing
disks, while it's not overly expensive it certainly does not help to be
performant either.
* api: disk management: parallelize retrieving the output from smartctl
checks.
* fix #5600: pbs2to3: make check more flexible to allow one to run arbitrary
newer '-pve' kernels after upgrade
* client: pxar: perform match pattern check for exclusion only once
* client: pxar: add debug output for exclude pattern matches to more
conveniently debug possible issues.
* fix #5868: rest-server: handshake detection: avoid infinite loop on
connections abort
-- Proxmox Support Team <support@proxmox.com> Thu, 14 Nov 2024 16:10:10 +0100
rust-proxmox-backup (3.2.8-1) bookworm; urgency=medium
* switch various log statements in worker tasks to the newer, more flexible
proxmox log crate. With this change, errors from task logs are now also
logged to the system log, increasing their visibility.
* datastore api: list snapshots: avoid calculating protected attribute
twice per snapshot, this reduces the amounts of file metadata requests.
* avoid re-calculating the backup snapshot path's date time component when
getting the full path, reducing calls to the relatively slow strftime
function from libc.
* fix #3699: client: prefer the XDG cache directory for temporary files with
a fallback to using /tmp, as before.
* sync job: improve log message for when syncing the root namespace.
* client: increase read buffer from 8 KiB to 4 MiB for raw image based
backups. This reduces the time spent polling between the reader, chunker
and uploader async tasks and thus can improve backup speed significantly,
especially on setups with fast network and storage.
* client benchmark: avoid unnecessary allocation in the AES benchmark,
causing artificial overhead. The benchmark AES results should now be more
in line with the hardware capability and what the PBS client could already
do. On our test system we saw an increase by an factor of 2.3 on this
specific benchmark.
* docs: add external metrics server page
* tfa: webauthn: serialize OriginUrl following RFC6454
* factor out apt and apt-repository handling into a new library crate for
re-use in other projects. There should be no functional change.
* fix various typos all over the place found using the rust based `typos`
tool.
* datastore: data blob compression: increase compression throughput by
switching away from a higher level zstd method to a lower level one, which
allows us to control the target buffer size directly and thus avoid some
allocation and syscall overhead. We saw the compression bandwidth increase
by a factor of 1.19 in our tests where both the source data and the target
datastore where located in memory backed tmpfs.
* daily-update: ensure notification system context is initialized.
* backup reader: derive if debug messages should be printed from the global
log level. This avoids printing some debug messages by default, e.g., the
"protocol upgrade done" message from sync jobs.
* ui: user view: disable 'Unlock TFA' button by default to improve UX if no
user is selected.
* manager cli: ensure the worker tasks finishes when triggering a reload of
the system network.
* fix #5622: backup client: properly handle rate and burst parameters.
Previously, passing any non-integer value, like `1mb`, was ignored.
* tape: read element status: ignore responses where the library specifies
that it will return a volume tag but then does not includes that field in
the actual response. As both the primary and the alternative volume tag
are not required by PBS, this specific error can simply be downgraded to a
warning.
* pxar: dump archive: print entries to stdout instead of stderr
* sync jobs: various clean-ups and refactoring that should not result in any
semantic change.
* metric collection: put metrics in a cache with a 30 minutes lifetime.
* api: add /status/metrics API to allow pull-based metric server to gather
data directly.
* partial fix #5560: client: periodically show backup progress
* docs: add proxmox-backup.node.cfg man page
* docs: sync: explicitly mention `removed-vanish` flag
-- Proxmox Support Team <support@proxmox.com> Fri, 18 Oct 2024 19:05:41 +0200
rust-proxmox-backup (3.2.7-1) bookworm; urgency=medium
* docs: drop blanket statement recommending against remote storage
* ui: gc job edit: fix i18n gettext usage
* pxar: improve error handling, e.g., avoiding duplicate information
* close #4763: client: add command to forget (delete) whole backup group
with all its snapshots
* close #5571: client: fix regression for `map` command
* client: mount: wait for child to return before exiting to provide better
UX for some edge paths
* fix #5304: client: set process uid/gid for `.pxarexclude-cli` to avoid
issues when trying to backup and restore the backup as non-root user.
* http client: keep renewal future running on failed re-auth to make it more
resilient against some transient errors, like the request just failing due
to network instability.
* datastore: fix problem with operations counting for the case where the
`.chunks/` directory is not available (deleted/moved)
* manager: use confirmation helper in wipe-disk command
-- Proxmox Support Team <support@proxmox.com> Wed, 03 Jul 2024 13:33:51 +0200
rust-proxmox-backup (3.2.6-1) bookworm; urgency=medium
* tape: disable Programmable Early Warning Zone (PEWZ)
* tape: handle PEWZ like regular early warning
* docs: add note for not using remote storages
* client: pxar: fix fuse mount performance for split archives
-- Proxmox Support Team <support@proxmox.com> Mon, 17 Jun 2024 10:18:13 +0200
rust-proxmox-backup (3.2.5-1) bookworm; urgency=medium
* pxar: add support for split archives
* fix #3174: pxar: enable caching and meta comparison
* docs: file formats: describe split pxar archive file layout
* docs: add section describing change detection mode
* api: datastore: add optional archive-name to file-restore
* client: backup: conditionally write catalog for file level backups
* docs: add table listing possible change detection modes
-- Proxmox Support Team <support@proxmox.com> Mon, 10 Jun 2024 13:39:54 +0200
rust-proxmox-backup (3.2.4-1) bookworm; urgency=medium
* fix: network api: permission using wrong pathname
* fix #5503: d/control: bump dependency for proxmox-widget-toolkit
* auth: add locking to `PbsAuthenticator` to avoid race conditions
-- Proxmox Support Team <support@proxmox.com> Wed, 05 Jun 2024 16:23:38 +0200
rust-proxmox-backup (3.2.3-1) bookworm; urgency=medium
* api-types: remove influxdb bucket name restrictions
* api: datastore status: delay lookup after permission check to improve
consistency of tracked read operations
* tape: improve throughput by not unnecessarily syncing/committing after
every archive written beyond the first 128 GiB
* tape: save 'bytes used' in the tape inventory and show them on the web UI
to allow users to more easily see the usage of a tape
* tape drive status: return drive activity (like cleaning, loading,
unloading, writing, ...) in the API and show them in the UI
* ui: tape drive status: avoid checking some specific status if the current
drive activity would block doing so anyway
* tape: write out basic MAM host-type attributes to media to make them more
easily identifiable as Proxmox Backup Server tape by common LTO tooling.
* api: syslog: fix the documented type of the return value
* fix #5465: restore daemon: mount NTFS with UTF-8 charset
* restore daemon: log some more errors on directory traversal
* fix #5422: ui: garbage-collection: make columns in global view sortable
* auth: move to hmac keys for csrf tokens as future-proofing
* auth: upgrade hashes on user log in if a users password is not hashed with
the latest password hashing function for hardening purpose
* auth: use ed25519 keys when generating new auth api keys
* notifications: fix legacy sync notifications
* docs: document notification-mode and merge old notification section
* docs: notifications: rewrite overview for more clarity
* ui: datastore options: link to 'notification-mode' section
* acme: explicitly print a query when prompting for the custom directory URI
-- Proxmox Support Team <support@proxmox.com> Wed, 22 May 2024 19:31:35 +0200
rust-proxmox-backup (3.2.2-1) bookworm; urgency=medium
* ui: notifications fix empty text format for the default mail author
* ui: tape backup: do not try to delete the namespace property if its empty
* ui: sync job: fix error if local namespace is selected first
-- Proxmox Support Team <support@proxmox.com> Thu, 25 Apr 2024 12:06:04 +0200
rust-proxmox-backup (3.2.1-1) bookworm; urgency=medium
* implement Active Directory support:
- api: access: add routes for managing AD realms
- config: domains: add new "ad" section type for AD realms
- realm sync: add sync job for AD realms
- manager cli: add sub-command for managing AD realms
- docs: user-management: add section about AD realm support
* auth: fix requesting the TFA write lock exclusively
* installation: add section about unattended/automatic installation
* api: tape config: forbid reusing IDs between tape changer and tape drive
* api: add support for creating and updating VLAN interfaces
* ui: enable the VLAN widget that got moved over from PVE to the generic
widget-toolkit
-- Proxmox Support Team <support@proxmox.com> Wed, 24 Apr 2024 22:05:36 +0200
rust-proxmox-backup (3.2.0-1) bookworm; urgency=medium
* fix #5248: client: allow self-signed/untrusted certificate chains
* api: make prune-group a real worker task to avoid timeouts after 30s
* ui: sync view: rename column 'Max. Recursion' -> 'Max. Depth'
* api: assert that maintenance mode transitions are valid, e.g., do
not allow clearing the special "delete" maitenance mode
* fix #3217: ui: add global prune and GC job view for an overview over
all datastores
* fix #4723: manager: add new "garbage-collection list" CLI command to
list all GC jobs
* ui: garbage collection: show removed and pending data of last run in
bytes
* fix #5251: login: set autocomplete on password and user
* allow sending notifications via advanced proxmox-notify crate
* api: add endpoints for querying known notification values/fields
* api: add endpoints for gotify, smtp, and sendmail targets
* api: add endpoints for managing notification matchers
* api: add endpoints for querying/testing notification targets
* server: notifications:
- send tape notifications via notification system
- send ACME notifications via notification system
- send update notifications via notification system
- send sync notifications via notification system
- send verify notifications via notification system
- send prune notifications via notification system
- send GC notifications via notification system
* docs: add documentation for notification system
* ui: notifications: pull in UX improvements for match rules creation
* api: notification: also list datastores if user has only Backup
privs
* manager: add CLI commands for SMTP, sendmail, and gotify
endpoints
* manager: add CLI for administrating notification matchers and targets
-- Proxmox Support Team <support@proxmox.com> Tue, 23 Apr 2024 23:45:29 +0200
rust-proxmox-backup (3.1.5-1) bookworm; urgency=medium
* fix #5190: api: OIDC: accept generic URIs for the ACR value
* fix #5229: tape: remove max sequence number limit
* client: add "delete-groups" option to allow one to delete all groups in a
hierarchy, which allows removing non-empty namespaces
* fix #4975: client: add option to ignore E2BIG error flag when restoring,
to workaround the interface limitation of the kernel where on some file
systems can have a bigger extended attribute (xattr) as the kernel
supports reading. This can happen due to the file system having been
accessed by another OS version or even an entirely different OS type.
* docs: clarify prune settings slightly more
* ui: trim whitespaces when adding a subscription key
* ui: system config: fix deleting DNS entries
* fix #5217: api: send missing header when upgrading to HTTP/2 to improve
support with reverse proxies like caddy
* docs: lto barcod generator: disable add button when fields are not valid
* docs: lto barcode generator: add LTO-9 and WORM types
* ui: adapt to the API enforcing the not accepting any additional parameters
if a API endpoint does not explicitly state that it needs that behavior:
- metrics: avoid trying to load existing config, fixes case where a
wrong digest was send along, failing the creation of the metrics host
entry
- avoid sending a 'delete' parameter on create for tape backup jobs, prune
settings, sync jobs, verify jobs
- user edit: don't send realm property
* server: sync job: return `PullStats` for pull related methods
* fix #5285: api: sync job: add job summary to task log
* ui: tape: transfer: increase timeout to 3 minutes
* add 'confirmation-password' parameter to user password change API/UI
* remove datastore from internal cache based on maintenance mode to avoid
that a lock file is kept open. With that lock-file closed one can use a
offline maintenance mode to cleanly unmount the mount point where such a
datastore resides.
* docs: clarify difference between zfs cache and log
* fix #5188: gui: add copy to clipboard on snapshots
* api: sync job: log stats for removed vanished entities
* docs: add examples for --exclude parameter
* ui: prune job: disallow setting blank schedule
-- Proxmox Support Team <support@proxmox.com> Mon, 25 Mar 2024 16:52:03 +0100
rust-proxmox-backup (3.1.4-1) bookworm; urgency=medium
* api: acme: skip serializing empty 'api' and 'data' option
* tape: fix regression in restoring an encryption key from medium, avoid
trying to load the key to the drive, which cannot work in this special
case.
-- Proxmox Support Team <support@proxmox.com> Thu, 01 Feb 2024 16:30:18 +0100
rust-proxmox-backup (3.1.3-1) bookworm; urgency=medium
* improve efficiency of detecting if a block device is a partition
* acme: api: add option for external account binding to account registration
endpoint
* ui: datastore summary handle non-existent 'avail' and 'used' status values
* tape: allow configuring the time out for "wait until ready" to better cope
with the long initialization duration that happens on the first use of
LTO 9+ tapes
* tape: improve error on decode element status page
* tape: improve LTO 9 compatibility
* fix #4904: tape changer: add option to explicitly eject the tape before
unloading it
* docs: tape: replace use of 'export-media' with correct 'export-media-set'
option
* docs: tape: add LTO 9 considerations
* fix #5117: ui: node info: avoid invalid array access for certain foreign
kernels
* d/control: explicitly depend on gdisk package to ensure it's available
when installing on top of a plain Debian installation
* tape: work around buggy changer implementations when reading the element
status
* system report: include prune.cfg
* fix #4315: jobs: modify group filter so include/exclude is tracked
* ui: show if Filter includes or excludes
* datastore: add additional context for a parsing error when getting the
owner of a backup group
* api: tape: optionally accept uuid for destroying or moving a media, so
that one can uniquely identify existing tapes with duplicate labels.
* api: tape: don't allow duplicate media label-texts anymore
* ui: tape inventory: use uuid as id
* ui: tape: add button to remove a medium from the inventory, while not
touching the data
* api: custom certificate upload: make key optional and use the existing
key, if it's not specified.
* close #4819: ui: allow usernames shorter than 4 characters
* tape: rework on-drive encryption key handling and ensure this key does not
gets unloaded to early
-- Proxmox Support Team <support@proxmox.com> Mon, 22 Jan 2024 15:20:45 +0100
rust-proxmox-backup (3.1.2-1) bookworm; urgency=medium
* sync: fix recent regression with recursive remote sync

114
debian/control vendored
View File

@ -15,28 +15,29 @@ Build-Depends: bash-completion,
libacl1-dev,
libfuse3-dev,
librust-anyhow-1+default-dev,
librust-apt-pkg-native-0.3+default-dev (>= 0.3.2-~~),
librust-async-trait-0.1+default-dev (>= 0.1.56-~~),
librust-base64-0.13+default-dev,
librust-bitflags-2+default-dev (>= 2.4-~~),
librust-bitflags-1+default-dev (>= 1.2.1-~~),
librust-bytes-1+default-dev,
librust-cidr-0.2+default-dev (>= 0.2.1-~~),
librust-const-format-0.2+default-dev,
librust-crc32fast-1+default-dev,
librust-crossbeam-channel-0.5+default-dev,
librust-endian-trait-0.6+arrays-dev,
librust-endian-trait-0.6+default-dev,
librust-env-logger-0.11+default-dev,
librust-env-logger-0.10+default-dev,
librust-flate2-1+default-dev,
librust-foreign-types-0.3+default-dev,
librust-futures-0.3+default-dev,
librust-h2-0.4+default-dev,
librust-h2-0.4+legacy-dev,
librust-h2-0.4+stream-dev,
librust-h2-0.3+default-dev,
librust-h2-0.3+stream-dev,
librust-handlebars-3+default-dev,
librust-hex-0.4+default-dev (>= 0.4.3-~~),
librust-hex-0.4+serde-dev (>= 0.4.3-~~),
librust-hyper-0.14+backports-dev,
librust-http-0.2+default-dev,
librust-hyper-0.14+default-dev,
librust-hyper-0.14+deprecated-dev,
librust-hyper-0.14+full-dev,
librust-lazy-static-1+default-dev (>= 1.4-~~),
librust-libc-0.2+default-dev,
librust-log-0.4+default-dev (>= 0.4.17-~~),
librust-nix-0.26+default-dev (>= 0.26.1-~~),
@ -45,76 +46,68 @@ Build-Depends: bash-completion,
librust-once-cell-1+default-dev (>= 1.3.1-~~),
librust-openssl-0.10+default-dev (>= 0.10.40-~~),
librust-pathpatterns-0.3+default-dev,
librust-pbs-api-types-0.2+default-dev (>= 0.2.2),
librust-percent-encoding-2+default-dev (>= 2.1-~~),
librust-pin-project-lite-0.2+default-dev,
librust-proxmox-acme-0.5+default-dev (>= 0.5.3-~~),
librust-proxmox-apt-0.11+cache-dev,
librust-proxmox-apt-0.11+default-dev,
librust-proxmox-apt-api-types-1+default-dev (>= 1.0.1-~~),
librust-proxmox-acme-rs-0.4+default-dev,
librust-proxmox-apt-0.10+default-dev (>= 0.10.5-~~),
librust-proxmox-async-0.4+default-dev,
librust-proxmox-auth-api-0.4+api-dev,
librust-proxmox-auth-api-0.4+default-dev,
librust-proxmox-auth-api-0.4+pam-authenticator-dev,
librust-proxmox-auth-api-0.3+api-dev,
librust-proxmox-auth-api-0.3+api-types-dev,
librust-proxmox-auth-api-0.3+default-dev,
librust-proxmox-auth-api-0.3+pam-authenticator-dev,
librust-proxmox-borrow-1+default-dev,
librust-proxmox-compression-0.2+default-dev,
librust-proxmox-config-digest-0.1+default-dev,
librust-proxmox-daemon-0.1+default-dev,
librust-proxmox-fuse-0.1+default-dev (>= 0.1.3-~~),
librust-proxmox-http-0.9+client-dev (>= 0.9.5-~~),
librust-proxmox-http-0.9+client-trait-dev (>= 0.9.5-~~),
librust-proxmox-http-0.9+default-dev (>= 0.9.5-~~),
librust-proxmox-http-0.9+http-helpers-dev (>= 0.9.5-~~),
librust-proxmox-http-0.9+proxmox-async-dev (>= 0.9.5-~~),
librust-proxmox-http-0.9+rate-limited-stream-dev (>= 0.9.5-~~),
librust-proxmox-http-0.9+rate-limiter-dev (>= 0.9.5-~~),
librust-proxmox-http-0.9+websocket-dev (>= 0.9.5-~~),
librust-proxmox-http-0.9+client-dev,
librust-proxmox-http-0.9+client-trait-dev,
librust-proxmox-http-0.9+default-dev,
librust-proxmox-http-0.9+http-helpers-dev,
librust-proxmox-http-0.9+proxmox-async-dev,
librust-proxmox-http-0.9+rate-limited-stream-dev,
librust-proxmox-http-0.9+rate-limiter-dev,
librust-proxmox-http-0.9+websocket-dev,
librust-proxmox-human-byte-0.1+default-dev,
librust-proxmox-io-1+default-dev (>= 1.0.1-~~),
librust-proxmox-io-1+tokio-dev (>= 1.0.1-~~),
librust-proxmox-lang-1+default-dev (>= 1.1-~~),
librust-proxmox-ldap-0.2+default-dev (>= 0.2.1-~~),
librust-proxmox-log-0.2+default-dev (>= 0.2.6-~~),
librust-proxmox-metrics-0.3+default-dev (>= 0.3.1-~~),
librust-proxmox-notify-0.5+default-dev (>= 0.5.1-~~),
librust-proxmox-notify-0.5+pbs-context-dev (>= 0.5.1-~~),
librust-proxmox-metrics-0.3+default-dev,
librust-proxmox-openid-0.10+default-dev,
librust-proxmox-rest-server-0.8+default-dev (>= 0.8.9-~~),
librust-proxmox-rest-server-0.8+rate-limited-stream-dev (>= 0.8.9-~~),
librust-proxmox-rest-server-0.8+templates-dev (>= 0.8.9-~~),
librust-proxmox-router-3+cli-dev,
librust-proxmox-router-3+server-dev,
librust-proxmox-rrd-0.4+default-dev,
librust-proxmox-rrd-api-types-1+default-dev (>= 1.0.2-~~),
librust-proxmox-schema-4+api-macro-dev,
librust-proxmox-schema-4+default-dev,
librust-proxmox-rest-server-0.5+default-dev (>= 0.5.1-~~),
librust-proxmox-rest-server-0.5+rate-limited-stream-dev (>= 0.5.1-~~),
librust-proxmox-rest-server-0.5+templates-dev (>= 0.5.1-~~),
librust-proxmox-router-2+cli-dev,
librust-proxmox-router-2+default-dev,
librust-proxmox-router-2+server-dev,
librust-proxmox-schema-2+api-macro-dev,
librust-proxmox-schema-2+default-dev,
librust-proxmox-section-config-2+default-dev,
librust-proxmox-serde-0.1+default-dev (>= 0.1.1-~~),
librust-proxmox-serde-0.1+serde-json-dev (>= 0.1.1-~~),
librust-proxmox-shared-cache-0.1+default-dev,
librust-proxmox-shared-memory-0.3+default-dev,
librust-proxmox-sortable-macro-0.1+default-dev (>= 0.1.2-~~),
librust-proxmox-subscription-0.5+api-types-dev,
librust-proxmox-subscription-0.5+default-dev,
librust-proxmox-sys-0.6+acl-dev (>= 0.6.5-~~),
librust-proxmox-sys-0.6+crypt-dev (>= 0.6.5-~~),
librust-proxmox-sys-0.6+default-dev (>= 0.6.7-~~),
librust-proxmox-sys-0.6+logrotate-dev (>= 0.6.5-~~),
librust-proxmox-sys-0.6+timer-dev (>= 0.6.5-~~),
librust-proxmox-systemd-0.1+default-dev,
librust-proxmox-tfa-5+api-dev,
librust-proxmox-tfa-5+api-types-dev,
librust-proxmox-tfa-5+default-dev,
librust-proxmox-time-2+default-dev,
librust-proxmox-subscription-0.4+api-types-dev (>= 0.4.2-~~),
librust-proxmox-subscription-0.4+default-dev (>= 0.4.2-~~),
librust-proxmox-sys-0.5+acl-dev (>= 0.5.2-~~),
librust-proxmox-sys-0.5+crypt-dev (>= 0.5.2-~~),
librust-proxmox-sys-0.5+default-dev (>= 0.5.2-~~),
librust-proxmox-sys-0.5+logrotate-dev (>= 0.5.2-~~),
librust-proxmox-sys-0.5+timer-dev (>= 0.5.2-~~),
librust-proxmox-tfa-4+api-dev (>= 4.0.4-~~),
librust-proxmox-tfa-4+api-types-dev (>= 4.0.4-~~),
librust-proxmox-tfa-4+default-dev (>= 4.0.4-~~),
librust-proxmox-time-1+default-dev (>= 1.1.2-~~),
librust-proxmox-uuid-1+default-dev,
librust-proxmox-uuid-1+serde-dev,
librust-proxmox-worker-task-0.1+default-dev,
librust-pxar-0.12+default-dev (>= 0.12.1-~~),
librust-pxar-0.10+default-dev (>= 0.10.2-~~),
librust-regex-1+default-dev (>= 1.5.5-~~),
librust-rustyline-9+default-dev,
librust-serde-1+default-dev,
librust-serde-1+derive-dev,
librust-serde-cbor-0.11+default-dev (>= 0.11.1-~~),
librust-serde-json-1+default-dev,
librust-serde-plain-1+default-dev,
librust-siphasher-0.3+default-dev,
librust-syslog-6+default-dev,
librust-tar-0.4+default-dev,
librust-termcolor-1+default-dev (>= 1.1.2-~~),
@ -138,14 +131,12 @@ Build-Depends: bash-completion,
librust-tokio-util-0.7+default-dev,
librust-tokio-util-0.7+io-dev,
librust-tower-service-0.3+default-dev,
librust-tracing-0.1+default-dev,
librust-udev-0.4+default-dev,
librust-url-2+default-dev (>= 2.1-~~),
librust-walkdir-2+default-dev,
librust-xdg-2+default-dev (>= 2.2-~~),
librust-zstd-0.12+bindgen-dev,
librust-zstd-0.12+default-dev,
librust-zstd-safe-6+default-dev,
libsgutils2-dev,
libstd-rust-dev,
libsystemd-dev (>= 246-~~),
@ -172,7 +163,6 @@ Rules-Requires-Root: binary-targets
Package: proxmox-backup-server
Architecture: any
Depends: fonts-font-awesome,
gdisk,
libjs-extjs (>= 7~),
libjs-qrcodejs (>= 1.20201119),
libproxmox-acme-plugins,
@ -184,7 +174,7 @@ Depends: fonts-font-awesome,
postfix | mail-transport-agent,
proxmox-backup-docs,
proxmox-mini-journalreader,
proxmox-widget-toolkit (>= 4.3.3),
proxmox-widget-toolkit (>= 3.5.2),
pve-xtermjs (>= 4.7.0-1),
sg3-utils,
smartmontools,
@ -205,14 +195,6 @@ Description: Proxmox Backup Client tools
This package contains the Proxmox Backup client, which provides a
simple command line tool to create and restore backups.
Package: proxmox-backup-client-static
Architecture: any
Depends: qrencode, ${misc:Depends},
Conflicts: proxmox-backup-client,
Description: Proxmox Backup Client tools (statically linked)
This package contains the Proxmox Backup client, which provides a
simple command line tool to create and restore backups.
Package: proxmox-backup-docs
Build-Profiles: <!nodoc>
Section: doc

2
debian/copyright vendored
View File

@ -1,4 +1,4 @@
Copyright (C) 2019 - 2025 Proxmox Server Solutions GmbH
Copyright (C) 2019 - 2023 Proxmox Server Solutions GmbH
This software is written by Proxmox Server Solutions GmbH <support@proxmox.com>

View File

@ -4,5 +4,4 @@ proxmox-backup-server: elevated-privileges 4755 root/root [usr/lib/x86_64-linux-
proxmox-backup-server: systemd-service-file-refers-to-unusual-wantedby-target getty.target [lib/systemd/system/proxmox-backup-banner.service]
proxmox-backup-server: uses-dpkg-database-directly [usr/lib/x86_64-linux-gnu/proxmox-backup/proxmox-backup-api]
proxmox-backup-server: uses-dpkg-database-directly [usr/lib/x86_64-linux-gnu/proxmox-backup/proxmox-backup-proxy]
proxmox-backup-server: uses-dpkg-database-directly [usr/sbin/pbs2to3]
proxmox-backup-server: uses-dpkg-database-directly [usr/sbin/proxmox-backup-debug]

15
debian/postinst vendored
View File

@ -20,7 +20,15 @@ case "$1" in
# modeled after dh_systemd_start output
systemctl --system daemon-reload >/dev/null || true
if [ -n "$2" ]; then
_dh_action=try-reload-or-restart
if dpkg --compare-versions "$2" 'lt' '1.0.7-1'; then
# there was an issue with reloading and systemd being confused in older daemon versions
# so restart instead of reload if upgrading from there, see commit 0ec79339f7aebf9
# FIXME: remove with PBS 2.1
echo "Upgrading from older proxmox-backup-server: restart (not reload) daemons"
_dh_action=try-restart
else
_dh_action=try-reload-or-restart
fi
else
_dh_action=start
fi
@ -72,11 +80,6 @@ EOF
update_sync_job "$prev_job"
fi
fi
if dpkg --compare-versions "$2" 'lt' '3.3.5~'; then
# ensure old locking is used by the daemon until a reboot happened
touch "/run/proxmox-backup/old-locking"
fi
fi
;;

View File

@ -1,2 +0,0 @@
debian/proxmox-backup-client.bc proxmox-backup-client
debian/pxar.bc pxar

View File

@ -1,4 +0,0 @@
usr/share/man/man1/proxmox-backup-client.1
usr/share/man/man1/pxar.1
usr/share/zsh/vendor-completions/_proxmox-backup-client
usr/share/zsh/vendor-completions/_pxar

View File

@ -9,7 +9,7 @@ update_initramfs() {
CACHE_PATH_DBG="/var/cache/proxmox-backup/file-restore-initramfs-debug.img"
# cleanup first, in case proxmox-file-restore was uninstalled since we do
# not want an unusable image lying around
# not want an unuseable image lying around
rm -f "$CACHE_PATH"
if [ ! -f "$INST_PATH/initramfs.img" ]; then

View File

@ -4,7 +4,6 @@ etc/proxmox-backup-daily-update.service /lib/systemd/system/
etc/proxmox-backup-daily-update.timer /lib/systemd/system/
etc/proxmox-backup-proxy.service /lib/systemd/system/
etc/proxmox-backup.service /lib/systemd/system/
etc/removable-device-attach@.service /lib/systemd/system/
usr/bin/pmt
usr/bin/pmtx
usr/bin/proxmox-tape
@ -31,44 +30,12 @@ usr/share/man/man5/acl.cfg.5
usr/share/man/man5/datastore.cfg.5
usr/share/man/man5/domains.cfg.5
usr/share/man/man5/media-pool.cfg.5
usr/share/man/man5/notifications-priv.cfg.5
usr/share/man/man5/notifications.cfg.5
usr/share/man/man5/proxmox-backup.node.cfg.5
usr/share/man/man5/prune.cfg.5
usr/share/man/man5/remote.cfg.5
usr/share/man/man5/sync.cfg.5
usr/share/man/man5/tape-job.cfg.5
usr/share/man/man5/tape.cfg.5
usr/share/man/man5/user.cfg.5
usr/share/man/man5/verification.cfg.5
usr/share/proxmox-backup/templates/default/acme-err-body.txt.hbs
usr/share/proxmox-backup/templates/default/acme-err-subject.txt.hbs
usr/share/proxmox-backup/templates/default/gc-err-body.txt.hbs
usr/share/proxmox-backup/templates/default/gc-err-subject.txt.hbs
usr/share/proxmox-backup/templates/default/gc-ok-body.txt.hbs
usr/share/proxmox-backup/templates/default/gc-ok-subject.txt.hbs
usr/share/proxmox-backup/templates/default/package-updates-body.txt.hbs
usr/share/proxmox-backup/templates/default/package-updates-subject.txt.hbs
usr/share/proxmox-backup/templates/default/prune-err-body.txt.hbs
usr/share/proxmox-backup/templates/default/prune-err-subject.txt.hbs
usr/share/proxmox-backup/templates/default/prune-ok-body.txt.hbs
usr/share/proxmox-backup/templates/default/prune-ok-subject.txt.hbs
usr/share/proxmox-backup/templates/default/sync-err-body.txt.hbs
usr/share/proxmox-backup/templates/default/sync-err-subject.txt.hbs
usr/share/proxmox-backup/templates/default/sync-ok-body.txt.hbs
usr/share/proxmox-backup/templates/default/sync-ok-subject.txt.hbs
usr/share/proxmox-backup/templates/default/tape-backup-err-body.txt.hbs
usr/share/proxmox-backup/templates/default/tape-backup-err-subject.txt.hbs
usr/share/proxmox-backup/templates/default/tape-backup-ok-body.txt.hbs
usr/share/proxmox-backup/templates/default/tape-backup-ok-subject.txt.hbs
usr/share/proxmox-backup/templates/default/tape-load-body.txt.hbs
usr/share/proxmox-backup/templates/default/tape-load-subject.txt.hbs
usr/share/proxmox-backup/templates/default/test-body.txt.hbs
usr/share/proxmox-backup/templates/default/test-subject.txt.hbs
usr/share/proxmox-backup/templates/default/verify-err-body.txt.hbs
usr/share/proxmox-backup/templates/default/verify-err-subject.txt.hbs
usr/share/proxmox-backup/templates/default/verify-ok-body.txt.hbs
usr/share/proxmox-backup/templates/default/verify-ok-subject.txt.hbs
usr/share/zsh/vendor-completions/_pmt
usr/share/zsh/vendor-completions/_pmtx
usr/share/zsh/vendor-completions/_proxmox-backup-debug

View File

@ -16,6 +16,3 @@ SUBSYSTEM=="scsi_generic", SUBSYSTEMS=="scsi", ATTRS{type}=="1", ENV{ID_SCSI_SER
SYMLINK+="tape/by-id/scsi-$env{ID_SCSI_SERIAL}-sg"
LABEL="persistent_storage_tape_end"
# triggers the mounting of a removable device
ACTION=="add", SUBSYSTEM=="block", ENV{ID_FS_UUID}!="", TAG+="systemd", ENV{SYSTEMD_WANTS}="removable-device-attach@$env{ID_FS_UUID}"

10
debian/rules vendored
View File

@ -8,7 +8,7 @@ include /usr/share/rustc/architecture.mk
export BUILD_MODE=release
export CARGO=/usr/share/cargo/bin/cargo
CARGO=/usr/share/cargo/bin/cargo
export CFLAGS CXXFLAGS CPPFLAGS LDFLAGS
export DEB_HOST_RUST_TYPE DEB_HOST_GNU_TYPE
@ -28,11 +28,6 @@ override_dh_auto_configure:
@perl -ne 'if (/^version\s*=\s*"(\d+(?:\.\d+)+)"/) { my $$v_cargo = $$1; my $$v_deb = "$(DEB_VERSION_UPSTREAM)"; \
die "ERROR: d/changelog <-> Cargo.toml version mismatch: $$v_cargo != $$v_deb\n" if $$v_cargo ne $$v_deb; exit(0); }' Cargo.toml
$(CARGO) prepare-debian $(CURDIR)/debian/cargo_registry --link-from-system
# `cargo build` and `cargo install` have different config precedence, symlink
# the wrapper config into a place where `build` picks it up as well..
# https://doc.rust-lang.org/cargo/commands/cargo-install.html#configuration-discovery
mkdir -p .cargo
ln -s $(CARGO_HOME)/config.toml $(CURDIR)/.cargo/config.toml
dh_auto_configure
override_dh_auto_build:
@ -47,9 +42,6 @@ override_dh_auto_install:
dh_auto_install -- \
PROXY_USER=backup \
LIBDIR=/usr/lib/$(DEB_HOST_MULTIARCH)
mkdir -p debian/proxmox-backup-client-static/usr/bin
mv debian/tmp/usr/bin/proxmox-backup-client-static debian/proxmox-backup-client-static/usr/bin/proxmox-backup-client
mv debian/tmp/usr/bin/pxar-static debian/proxmox-backup-client-static/usr/bin/pxar
override_dh_installsystemd:
dh_installsystemd -pproxmox-backup-server proxmox-backup-daily-update.timer

View File

@ -1,65 +1,55 @@
include ../defines.mk
GENERATED_SYNOPSIS := \
config/acl/roles.rst \
config/datastore/config.rst \
config/domains/config.rst \
proxmox-tape/synopsis.rst \
proxmox-backup-client/synopsis.rst \
proxmox-backup-client/catalog-shell-synopsis.rst \
proxmox-backup-manager/synopsis.rst \
proxmox-backup-debug/synopsis.rst \
proxmox-file-restore/synopsis.rst \
pxar/synopsis.rst \
pmtx/synopsis.rst \
pmt/synopsis.rst \
config/media-pool/config.rst \
config/notifications-priv/config.rst \
config/notifications/config.rst \
config/tape/config.rst \
config/tape-job/config.rst \
config/user/config.rst \
config/remote/config.rst \
config/sync/config.rst \
config/tape-job/config.rst \
config/tape/config.rst \
config/user/config.rst \
config/verification/config.rst \
config/prune/config.rst \
pmt/synopsis.rst \
pmtx/synopsis.rst \
proxmox-backup-client/catalog-shell-synopsis.rst \
proxmox-backup-client/synopsis.rst \
proxmox-backup-debug/synopsis.rst \
proxmox-backup-manager/synopsis.rst \
proxmox-file-restore/synopsis.rst \
proxmox-tape/synopsis.rst \
pxar/synopsis.rst \
config/acl/roles.rst \
config/datastore/config.rst \
config/domains/config.rst
MAN1_PAGES := \
pbs2to3.1 \
pmt.1 \
pmtx.1 \
proxmox-backup-client.1 \
proxmox-backup-debug.1 \
proxmox-backup-manager.1 \
proxmox-backup-proxy.1 \
proxmox-file-restore.1 \
proxmox-tape.1 \
pxar.1 \
pmtx.1 \
pmt.1 \
proxmox-tape.1 \
proxmox-backup-proxy.1 \
proxmox-backup-client.1 \
proxmox-backup-manager.1 \
proxmox-file-restore.1 \
proxmox-backup-debug.1 \
pbs2to3.1 \
# FIXME: prefix all man pages that are not directly relating to an existing executable with
# `proxmox-backup.`, like the newer added proxmox-backup.node.cfg but add backwards compatible
# symlinks, e.g. with a "5pbs" man page "suffix section".
MAN5_PAGES := \
acl.cfg.5 \
datastore.cfg.5 \
domains.cfg.5 \
media-pool.cfg.5 \
proxmox-backup.node.cfg.5 \
notifications-priv.cfg.5 \
notifications.cfg.5 \
tape.cfg.5 \
tape-job.cfg.5 \
acl.cfg.5 \
user.cfg.5 \
remote.cfg.5 \
sync.cfg.5 \
tape-job.cfg.5 \
tape.cfg.5 \
user.cfg.5 \
verification.cfg.5 \
prune.cfg.5 \
datastore.cfg.5 \
domains.cfg.5
PRUNE_SIMULATOR_FILES := \
prune-simulator/index.html \
prune-simulator/clear-trigger.png \
prune-simulator/documentation.html \
prune-simulator/prune-simulator.js \
prune-simulator/clear-trigger.png \
prune-simulator/prune-simulator.js
PRUNE_SIMULATOR_JS_SOURCE := \
/usr/share/javascript/proxmox-widget-toolkit-dev/Toolkit.js \
@ -91,15 +81,15 @@ API_VIEWER_FILES := \
/usr/share/javascript/proxmox-widget-toolkit-dev/APIViewer.js \
# Sphinx documentation setup
SPHINXOPTS = -E
SPHINXOPTS =
SPHINXBUILD = sphinx-build
BUILDDIR = output
ifeq ($(BUILD_MODE), release)
COMPILEDIR := ../target/$(DEB_HOST_RUST_TYPE)/release
COMPILEDIR := ../target/release
SPHINXOPTS += -t release
else
COMPILEDIR := ../target/$(DEB_HOST_RUST_TYPE)/debug
COMPILEDIR := ../target/debug
SPHINXOPTS += -t devbuild
endif
@ -148,9 +138,9 @@ lto-barcode/lto-barcode-generator.js: ${LTO_BARCODE_JS_SOURCE}
mv $@.tmp $@
.PHONY: html
html: ${GENERATED_SYNOPSIS} images/proxmox-logo.svg _static/custom.css conf.py ${PRUNE_SIMULATOR_FILES} ${LTO_BARCODE_FILES} ${API_VIEWER_SOURCES}
html: ${GENERATED_SYNOPSIS} images/proxmox-logo.svg custom.css conf.py ${PRUNE_SIMULATOR_FILES} ${LTO_BARCODE_FILES} ${API_VIEWER_SOURCES}
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
install -m 0644 _static/custom.js _static/custom.css images/proxmox-logo.svg $(BUILDDIR)/html/_static/
install -m 0644 custom.js custom.css images/proxmox-logo.svg $(BUILDDIR)/html/_static/
install -dm 0755 $(BUILDDIR)/html/prune-simulator
install -m 0644 ${PRUNE_SIMULATOR_FILES} $(BUILDDIR)/html/prune-simulator
install -dm 0755 $(BUILDDIR)/html/lto-barcode

View File

@ -1,5 +1,3 @@
.. _client_usage:
Backup Client Usage
===================
@ -46,24 +44,6 @@ user\@pbs!token@host:store ``user@pbs!token`` host:8007 store
[ff80::51]:1234:mydatastore ``root@pam`` [ff80::51]:1234 mydatastore
================================ ================== ================== ===========
.. _statically_linked_client:
Statically Linked Backup Client
-------------------------------
A statically linked version of the Proxmox Backup client is available for Linux
based systems where the regular client is not available. Please note that it is
recommended to use the regular client when possible, as the statically linked
client is not a full replacement. For example, name resolution will not be
performed via the mechanisms provided by libc, but uses a resolver written
purely in the Rust programming language. Therefore, features and modules
provided by Name Service Switch cannot be used.
The statically linked client is available via the ``pbs-client`` repository as
described in the :ref:`installation <install_pbc>` section.
.. _environment-variables:
Environment Variables
---------------------
@ -109,43 +89,6 @@ Environment Variables
you can add arbitrary comments after the first newline.
System and Service Credentials
------------------------------
Some of the :ref:`environment variables <environment-variables>` above can be
set using `system and service credentials <https://systemd.io/CREDENTIALS/>`_
instead.
============================ ==============================================
Environment Variable Credential Name Equivalent
============================ ==============================================
``PBS_REPOSITORY`` ``proxmox-backup-client.repository``
``PBS_PASSWORD`` ``proxmox-backup-client.password``
``PBS_ENCRYPTION_PASSWORD`` ``proxmox-backup-client.encryption-password``
``PBS_FINGERPRINT`` ``proxmox-backup-client.fingerprint``
============================ ==============================================
For example, the repository password can be stored in an encrypted file as
follows:
.. code-block:: console
# systemd-ask-password -n | systemd-creds encrypt --name=proxmox-backup-client.password - my-api-token.cred
The credential can then be reused inside of unit files or in a transient scope
unit as follows:
.. code-block:: console
# systemd-run --pipe --wait \
--property=LoadCredentialEncrypted=proxmox-backup-client.password:/full/path/to/my-api-token.cred \
--property=SetCredential=proxmox-backup-client.repository:'my_default_repository' \
proxmox-backup-client ...
Additionally, system credentials (e.g. passed down from the hypervisor to a
virtual machine via SMBIOS type 11) can be loaded on a service via
`LoadCredential=` as described in the manual page ``systemd.exec(5)``.
Output Format
-------------
@ -226,7 +169,6 @@ the client. The format is:
<archive-name>.<type>:<source-path>
The ``archive-name`` must contain alphanumerics, hyphens and underscores only.
Common types are ``.pxar`` for file archives and ``.img`` for block
device images. To create a backup of a block device, run the following command:
@ -315,7 +257,7 @@ Restoring this backup will result in:
.. code-block:: console
# ls -aR restored
ls -aR restored
restored/:
. .. .pxarexclude subfolder0 subfolder1
@ -325,69 +267,6 @@ Restoring this backup will result in:
restored/subfolder1:
. .. file2
The same syntax can also be used directly in the cli with the ``--exclude``
parameter. For example:
.. code-block:: console
# proxmox-backup-client backup archive-name.pxar:./linux --exclude /usr
Multiple paths can be excluded like this:
.. code-block:: console
# proxmox-backup-client backup archive-name.pxar:./linux --exclude=/usr --exclude=/rust
.. _client_change_detection_mode:
Change Detection Mode
~~~~~~~~~~~~~~~~~~~~~
File-based backups containing a lot of data can take a long time, as the default
behavior for the Proxmox backup client is to read all data and encode it into a
pxar archive.
The encoded stream is split into variable sized chunks. For each chunk, a digest
is calculated and used to decide whether the chunk needs to be uploaded or can
be indexed without upload, as it is already available on the server (and
therefore deduplicated). If the backed up files are largely unchanged,
re-reading and then detecting the corresponding chunks don't need to be uploaded
after all is time consuming and undesired.
The backup client's ``change-detection-mode`` can be switched from default to
``metadata`` based detection to reduce limitations as described above,
instructing the client to avoid re-reading files with unchanged metadata
whenever possible.
When using this mode, instead of the regular pxar archive, the backup snapshot
is stored into two separate files: the ``mpxar`` containing the archive's
metadata and the ``ppxar`` containing a concatenation of the file contents. This
splitting allows for efficient metadata lookups. When creating the backup
archives, the current file metadata is compared to the one looked up in the
previous ``mpxar`` archive. The operational details are explained more in depth
in the :ref:`technical documentation <change-detection-mode-metadata>`.
Using the ``change-detection-mode`` set to ``data`` allows to create the same
split archive as when using the ``metadata`` mode, but without using a previous
reference and therefore reencoding all file payloads. For details of this mode
please see the :ref:`technical documentation <change-detection-mode-data>`.
.. _client_change_detection_mode_table:
============ ===================================================================
Mode Description
============ ===================================================================
``legacy`` (current default): Encode all files into a self contained pxar
archive.
``data`` Encode all files into a split data and metadata pxar archive.
``metadata`` Encode changed files, reuse unchanged from previous snapshot,
creating a split archive.
============ ===================================================================
The following shows an example for the client invocation with the `metadata`
mode:
.. code-block:: console
# proxmox-backup-client backup archive-name.pxar:./linux --change-detection-mode=metadata
.. _client_encryption:
@ -528,8 +407,6 @@ version of your master key. The following command sends the output of the
proxmox-backup-client key paperkey --output-format text > qrkey.txt
.. _client_restoring_data:
Restoring Data
--------------
@ -755,28 +632,28 @@ following retention options are available:
Keep the last ``<N>`` backup snapshots.
``--keep-hourly <N>``
Keep backups for the last ``<N>`` hours. If there is more than one backup for
a single hour, only the latest is kept. Hours without backups do not count.
Keep backups for the last ``<N>`` hours. If there is more than one
backup for a single hour, only the latest is kept.
``--keep-daily <N>``
Keep backups for the last ``<N>`` days. If there is more than one backup for a
single day, only the latest is kept. Days without backups do not count.
Keep backups for the last ``<N>`` days. If there is more than one
backup for a single day, only the latest is kept.
``--keep-weekly <N>``
Keep backups for the last ``<N>`` weeks. If there is more than one backup for
a single week, only the latest is kept. Weeks without backup do not count.
Keep backups for the last ``<N>`` weeks. If there is more than one
backup for a single week, only the latest is kept.
.. note:: Weeks start on Monday and end on Sunday. The software
uses the `ISO week date`_ system and handles weeks at
the end of the year correctly.
``--keep-monthly <N>``
Keep backups for the last ``<N>`` months. If there is more than one backup for
a single month, only the latest is kept. Months without backups do not count.
Keep backups for the last ``<N>`` months. If there is more than one
backup for a single month, only the latest is kept.
``--keep-yearly <N>``
Keep backups for the last ``<N>`` years. If there is more than one backup for
a single year, only the latest is kept. Years without backups do not count.
Keep backups for the last ``<N>`` years. If there is more than one
backup for a single year, only the latest is kept.
The retention options are processed in the order given above. Each option
only covers backups within its time period. The next option does not take care
@ -841,25 +718,29 @@ Garbage Collection
------------------
The ``prune`` command removes only the backup index files, not the data
from the datastore. Deletion of unused backup data from the datastore is done by
:ref:`garbage collection<_maintenance_gc>`. It is therefore recommended to
schedule garbage collection tasks on a regular basis. The working principle of
garbage collection is described in more details in the related :ref:`background
section <gc_background>`.
from the datastore. This task is left to the garbage collection
command. It is recommended to carry out garbage collection on a regular basis.
To start garbage collection from the client side, run the following command:
.. code-block:: console
# proxmox-backup-client garbage-collect
The garbage collection works in two phases. In the first phase, all
data blocks that are still in use are marked. In the second phase,
unused data blocks are removed.
.. note:: This command needs to read all existing backup index files
and touches the complete chunk-store. This can take a long time
depending on the number of chunks and the speed of the underlying
disks.
The progress of the garbage collection will be displayed as shown in the example
below:
.. note:: The garbage collection will only remove chunks that haven't been used
for at least one day (exactly 24h 5m). This grace period is necessary because
chunks in use are marked by touching the chunk which updates the ``atime``
(access time) property. Filesystems are mounted with the ``relatime`` option
by default. This results in a better performance by only updating the
``atime`` property if the last access has been at least 24 hours ago. The
downside is that touching a chunk within these 24 hours will not always
update its ``atime`` property.
Chunks in the grace period will be logged at the end of the garbage
collection task as *Pending removals*.
.. code-block:: console

View File

@ -44,8 +44,10 @@ web-interface/API or using the ``proxmox-backup-manager`` CLI tool.
Upload Custom Certificate
~~~~~~~~~~~~~~~~~~~~~~~~~
If you already have a certificate which you want to use for a `Proxmox Backup`_
host, you can simply upload that certificate over the web interface.
If you already have a certificate which you want to use for a Proxmox
Mail Gateway host, you can simply upload that certificate over the web
interface.
.. image:: images/screenshots/pbs-gui-certs-upload-custom.png
:target: _images/pbs-gui-certs-upload-custom.png

View File

@ -71,7 +71,7 @@ master_doc = 'index'
# General information about the project.
project = 'Proxmox Backup'
copyright = '2019-2025, Proxmox Server Solutions GmbH'
copyright = '2019-2023, Proxmox Server Solutions GmbH'
author = 'Proxmox Support Team'
# The version info for the project you're documenting acts as a replacement for
@ -108,16 +108,12 @@ man_pages = [
('config/datastore/man5', 'datastore.cfg', 'Datastore Configuration', [author], 5),
('config/domains/man5', 'domains.cfg', 'Realm Configuration', [author], 5),
('config/media-pool/man5', 'media-pool.cfg', 'Media Pool Configuration', [author], 5),
('config/node/man5', 'proxmox-backup.node.cfg', 'Proxmox Backup Server - Node Configuration', [author], 5),
('config/remote/man5', 'remote.cfg', 'Remote Server Configuration', [author], 5),
('config/sync/man5', 'sync.cfg', 'Synchronization Job Configuration', [author], 5),
('config/tape-job/man5', 'tape-job.cfg', 'Tape Job Configuration', [author], 5),
('config/tape/man5', 'tape.cfg', 'Tape Drive and Changer Configuration', [author], 5),
('config/user/man5', 'user.cfg', 'User Configuration', [author], 5),
('config/verification/man5', 'verification.cfg', 'Verification Job Configuration', [author], 5),
('config/prune/man5', 'prune.cfg', 'Prune Job Configuration', [author], 5),
('config/notifications/man5', 'notifications.cfg', 'Notification target/matcher configuration', [author], 5),
('config/notifications-priv/man5', 'notifications-priv.cfg', 'Notification target secrets', [author], 5),
]
@ -269,9 +265,6 @@ html_static_path = ['_static']
html_js_files = [
'custom.js',
]
html_css_files = [
'custom.css',
]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied

View File

@ -1,5 +1,3 @@
:orphan:
=======
acl.cfg
=======

View File

@ -1,5 +1,3 @@
:orphan:
=============
datastore.cfg
=============

View File

@ -23,5 +23,5 @@ For LDAP realms, the LDAP bind password is stored in ``ldap_passwords.json``.
user-classes inetorgperson,posixaccount,person,user
You can use the ``proxmox-backup-manager openid``, ``proxmox-backup-manager
ldap`` and ``proxmox-backup-manager ad`` commands to manipulate this file.
You can use the ``proxmox-backup-manager openid`` and ``proxmox-backup-manager ldap`` commands to manipulate
this file.

View File

@ -1,5 +1,3 @@
:orphan:
===========
domains.cfg
===========

View File

@ -1,5 +1,3 @@
:orphan:
==========================
media-pool.cfg
==========================

View File

@ -1,49 +0,0 @@
The file contains these options:
:acme: The ACME account to use on this node.
:acmedomain0: ACME domain.
:acmedomain1: ACME domain.
:acmedomain2: ACME domain.
:acmedomain3: ACME domain.
:acmedomain4: ACME domain.
:http-proxy: Set proxy for apt and subscription checks.
:email-from: Fallback email from which notifications will be sent.
:ciphers-tls-1.3: List of TLS ciphers for TLS 1.3 that will be used by the proxy. Colon-separated and in descending priority (https://docs.openssl.org/master/man1/openssl-ciphers/). (Proxy has to be restarted for changes to take effect.)
:ciphers-tls-1.2: List of TLS ciphers for TLS <= 1.2 that will be used by the proxy. Colon-separated and in descending priority (https://docs.openssl.org/master/man1/openssl-ciphers/). (Proxy has to be restarted for changes to take effect.)
:default-lang: Default language used in the GUI.
:description: Node description.
:task-log-max-days: Maximum days to keep task logs.
For example:
::
acme: local
acmedomain0: first.domain.com
acmedomain1: second.domain.com
acmedomain2: third.domain.com
acmedomain3: fourth.domain.com
acmedomain4: fifth.domain.com
http-proxy: internal.proxy.com
email-from: proxmox@mail.com
ciphers-tls-1.3: TLS_AES_128_GCM_SHA256:TLS_AES_128_CCM_8_SHA256:TLS_CHACHA20_POLY1305_SHA256
ciphers-tls-1.2: RSA_WITH_AES_128_CCM:DHE_RSA_WITH_AES_128_CCM
default-lang: en
description: Primary PBS instance
task-log-max-days: 30
You can use the ``proxmox-backup-manager node`` command to manipulate
this file.

View File

@ -1,18 +0,0 @@
:orphan:
========
node.cfg
========
Description
===========
The file /etc/proxmox-backup/node.cfg is a configuration file for Proxmox
Backup Server. It contains the general configuration regarding this node.
Options
=======
.. include:: format.rst
.. include:: ../../pbs-copyright.rst

View File

@ -1 +0,0 @@
This file contains protected credentials for notification targets.

View File

@ -1,24 +0,0 @@
:orphan:
======================
notifications-priv.cfg
======================
Description
===========
The file /etc/proxmox-backup/notifications-priv.cfg is a configuration file
for Proxmox Backup Server. It contains the configuration for the
notification system configuration.
File Format
===========
.. include:: format.rst
Options
=======
.. include:: config.rst
.. include:: ../../pbs-copyright.rst

View File

@ -1,2 +0,0 @@
This file contains configuration for notification targets and notification
matchers.

View File

@ -1,24 +0,0 @@
:orphan:
==================
notifications.cfg
==================
Description
===========
The file /etc/proxmox-backup/notifications.cfg is a configuration file
for Proxmox Backup Server. It contains the configuration for the
notification system configuration.
File Format
===========
.. include:: format.rst
Options
=======
.. include:: config.rst
.. include:: ../../pbs-copyright.rst

View File

@ -1,14 +0,0 @@
Each entry starts with the header ``prune: <name>``, followed by the job
configuration options.
::
prune: prune-store2
schedule mon..fri 10:30
store my-datastore
prune: ...
You can use the ``proxmox-backup-manager prune-job`` command to manipulate this
file.

View File

@ -1,23 +0,0 @@
:orphan:
=========
prune.cfg
=========
Description
===========
The file /etc/proxmox-backup/prune.cfg is a configuration file for Proxmox
Backup Server. It contains the prune job configuration.
File Format
===========
.. include:: format.rst
Options
=======
.. include:: config.rst
.. include:: ../../pbs-copyright.rst

View File

@ -1,5 +1,3 @@
:orphan:
==========
remote.cfg
==========

View File

@ -1,5 +1,3 @@
:orphan:
========
sync.cfg
========

View File

@ -1,5 +1,3 @@
:orphan:
============
tape-job.cfg
============

View File

@ -1,5 +1,3 @@
:orphan:
========
tape.cfg
========

View File

@ -1,5 +1,3 @@
:orphan:
========
user.cfg
========

View File

@ -1,5 +1,3 @@
:orphan:
================
verification.cfg
================
@ -7,8 +5,8 @@ verification.cfg
Description
===========
The file /etc/proxmox-backup/verification.cfg is a configuration file for
Proxmox Backup Server. It contains the verification job configuration.
The file /etc/proxmox-backup/sync.cfg is a configuration file for Proxmox
Backup Server. It contains the verification job configuration.
File Format
===========

View File

@ -67,61 +67,6 @@ Options
.. include:: config/media-pool/config.rst
``node.cfg``
~~~~~~~~~~~~~~~~~~
Options
^^^^^^^
.. include:: config/node/format.rst
.. _notifications.cfg:
``notifications.cfg``
~~~~~~~~~~~~~~~~~~~~~
File Format
^^^^^^^^^^^
.. include:: config/notifications/format.rst
Options
^^^^^^^
.. include:: config/notifications/config.rst
.. _notifications_priv.cfg:
``notifications-priv.cfg``
~~~~~~~~~~~~~~~~~~~~~~~~~~
File Format
^^^^^^^^^^^
.. include:: config/notifications-priv/format.rst
Options
^^^^^^^
.. include:: config/notifications-priv/config.rst
``prune.cfg``
~~~~~~~~~~~~~
File Format
^^^^^^^^^^^
.. include:: config/prune/format.rst
Options
^^^^^^^
.. include:: config/prune/config.rst
``tape.cfg``
~~~~~~~~~~~~

View File

@ -1,55 +0,0 @@
External Metric Server
----------------------
Proxmox Backup Server periodically sends various metrics about your host's memory,
network and disk activity to configured external metric servers.
Currently supported are:
* InfluxDB (HTTP) (see https://docs.influxdata.com/influxdb/v2/ )
* InfluxDB (UDP) (see https://docs.influxdata.com/influxdb/v1/ )
The external metric server definitions are saved in
'/etc/proxmox-backup/metricserver.cfg', and can be edited through the web
interface.
.. note::
Using HTTP is recommended as UDP support has been dropped in InfluxDB v2.
InfluxDB (HTTP) plugin configuration
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The plugin can be configured to use the HTTP(s) API of InfluxDB 2.x.
InfluxDB 1.8.x does contain a forwards compatible API endpoint for this v2 API.
Since InfluxDB's v2 API is only available with authentication, you have
to generate a token that can write into the correct bucket and set it.
In the v2 compatible API of 1.8.x, you can use 'user:password' as token
(if required), and can omit the 'organization' since that has no meaning in InfluxDB 1.x.
You can also set the maximum batch size (default 25000000 bytes) with the
'max-body-size' setting (this corresponds to the InfluxDB setting with the
same name).
InfluxDB (UDP) plugin configuration
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Proxmox Backup Server can also send data via UDP. This requires the InfluxDB
server to be configured correctly. The MTU can also be configured here if
necessary.
Here is an example configuration for InfluxDB (on your InfluxDB server):
.. code-block:: console
[[udp]]
enabled = true
bind-address = "0.0.0.0:8089"
database = "proxmox"
batch-size = 1000
batch-timeout = "1s"
With this configuration, the InfluxDB server listens on all IP addresses on
port 8089, and writes the data in the *proxmox* database.

View File

@ -8,53 +8,7 @@ Proxmox File Archive Format (``.pxar``)
.. graphviz:: pxar-format-overview.dot
.. _pxar-meta-format:
Proxmox File Archive Format - Meta (``.mpxar``)
-----------------------------------------------
Pxar metadata archive with same structure as a regular pxar archive, with the
exception of regular file payloads not being contained within the archive
itself, but rather being stored as payload references to the corresponding pxar
payload (``.ppxar``) file.
Can be used to lookup all the archive entries and metadata without the size
overhead introduced by the file payloads.
.. graphviz:: meta-format-overview.dot
.. _ppxar-format:
Proxmox File Archive Format - Payload (``.ppxar``)
--------------------------------------------------
Pxar payload file storing regular file payloads to be referenced and accessed by
the corresponding pxar metadata (``.mpxar``) archive. Contains a concatenation
of regular file payloads, each prefixed by a `PAYLOAD` header. Further, the
actual referenced payload entries might be separated by padding (full/partial
payloads not referenced), introduced when reusing chunks of a previous backup
run, when chunk boundaries did not aligned to payload entry offsets.
All headers are stored as little-endian.
.. list-table::
:widths: auto
* - ``PAYLOAD_START_MARKER``
- header of ``[u8; 16]`` consisting of type hash and size;
marks start
* - ``PAYLOAD``
- header of ``[u8; 16]`` cosisting of type hash and size;
referenced by metadata archive
* - Payload
- raw regular file payload
* - Padding
- partial/full unreferenced payloads, caused by unaligned chunk boundary
* - ...
- further concatenation of payload header, payload and padding
* - ``PAYLOAD_TAIL_MARKER``
- header of ``[u8; 16]`` consisting of type hash and size;
marks end
.. _data-blob-format:
Data Blob Format (``.blob``)

View File

@ -40,16 +40,6 @@ Proxmox Backup Server supports various languages and authentication back ends
.. note:: For convenience, you can save the username on the client side, by
selecting the "Save User name" checkbox at the bottom of the window.
.. _consent_banner:
Consent Banner
^^^^^^^^^^^^^^
A custom consent banner that has to be accepted before login can be configured
in **Configuration -> Other -> General -> Consent Text**. If there is no
content, the consent banner will not be displayed. The text will be stored as a
base64 string in the ``/etc/proxmox-backup/node.cfg`` config file.
GUI Overview
------------

Binary file not shown.

Before

Width:  |  Height:  |  Size: 65 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 143 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 153 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 141 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 162 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 164 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 139 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 4.6 KiB

View File

@ -34,7 +34,6 @@ in the section entitled "GNU Free Documentation License".
maintenance.rst
sysadmin.rst
network-management.rst
notifications.rst
technical-overview.rst
faq.rst

View File

@ -1,157 +0,0 @@
.. _installation_medium:
Installation Medium
-------------------
Proxmox Backup Server can be installed via
:ref:`different methods <install_pbs>`. The recommended method is the
usage of an installation medium, to simply boot the interactive
installer.
Prepare Installation Medium
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Download the installer ISO image from |DOWNLOADS|.
The Proxmox Backup Server installation medium is a hybrid ISO image.
It works in two ways:
- An ISO image file ready to burn to a DVD.
- A raw sector (IMG) image file ready to copy to a USB flash drive (USB stick).
Using a USB flash drive to install Proxmox Backup Server is the
recommended way since it is the faster and more frequently available
option these days.
Prepare a USB Flash Drive as Installation Medium
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The flash drive needs to have at least 2 GB of storage space.
.. note::
Do not use *UNetbootin*. It does not work with the Proxmox Backup
Server installation image.
.. important::
Existing data on the USB flash drive will be overwritten.
Therefore, make sure that it does not contain any still needed data
and unmount it afterwards again before proceeding.
Instructions for GNU/Linux
~~~~~~~~~~~~~~~~~~~~~~~~~~
On Unix-like operating systems use the ``dd`` command to copy the ISO
image to the USB flash drive. First find the correct device name of the
USB flash drive (see below). Then run the ``dd`` command. Depending on
your environment, you will need to have root privileges to execute
``dd``.
.. code-block:: console
# dd bs=1M conv=fdatasync if=./proxmox-backup-server_*.iso of=/dev/XYZ
.. note::
Be sure to replace ``/dev/XYZ`` with the correct device name and adapt
the input filename (*if*) path.
.. caution::
Be very careful, and do not overwrite the wrong disk!
Find the Correct USB Device Name
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
There are two ways to find out the name of the USB flash drive. The
first one is to compare the last lines of the ``dmesg`` command output
before and after plugging in the flash drive. The second way is to
compare the output of the ``lsblk`` command. Open a terminal and run:
.. code-block:: console
# lsblk
Then plug in your USB flash drive and run the command again:
.. code-block:: console
# lsblk
A new device will appear. This is the one you want to use. To be on the
extra safe side check if the reported size matches your USB flash drive.
Instructions for macOS
~~~~~~~~~~~~~~~~~~~~~~
Open the terminal (query *Terminal* in Spotlight).
Convert the ``.iso`` file to ``.dmg`` format using the convert option of
``hdiutil``, for example:
.. code-block:: console
# hdiutil convert proxmox-backup-server_*.iso -format UDRW -o proxmox-backup-server_*.dmg
.. note::
macOS tends to automatically add ``.dmg`` to the output file name.
To get the current list of devices run the command:
.. code-block:: console
# diskutil list
Now insert the USB flash drive and run this command again to determine
which device node has been assigned to it. (e.g., ``/dev/diskX``).
.. code-block:: console
# diskutil list
# diskutil unmountDisk /dev/diskX
.. note::
replace *X* with the disk number from the last command.
.. code-block:: console
# sudo dd if=proxmox-backup-server_*.dmg bs=1M of=/dev/rdiskX
.. note::
*rdiskX*, instead of *diskX*, in the last command is intended. It
will increase the write speed.
Instructions for Windows
~~~~~~~~~~~~~~~~~~~~~~~~
Using Etcher
^^^^^^^^^^^^
Etcher works out of the box. Download Etcher from https://etcher.io. It
will guide you through the process of selecting the ISO and your USB
flash drive.
Using Rufus
^^^^^^^^^^^
Rufus is a more lightweight alternative, but you need to use the **DD
mode** to make it work. Download Rufus from https://rufus.ie/. Either
install it or use the portable version. Select the destination drive
and the downloaded Proxmox ISO file.
.. important::
Once you click *Start*, you have to click *No* on the dialog asking to
download a different version of Grub. In the next dialog select **DD mode**.
Use the Installation Medium
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Insert the created USB flash drive (or DVD) into your server. Continue
by reading the :ref:`installer <using_the_installer>` chapter, which
also describes possible boot issues.

View File

@ -7,9 +7,7 @@ Debian_ from the provided package repository.
.. include:: system-requirements.rst
.. include:: installation-media.rst
.. _install_pbs:
.. include:: package-repositories.rst
Server Installation
-------------------
@ -20,48 +18,38 @@ for various management tasks such as disk management.
.. note:: You always need a backup server. It is not possible to use
Proxmox Backup without the server part.
Using our provided disk image (ISO file) is the recommended
installation method, as it includes a convenient installer, a complete
Debian system as well as all necessary packages for the Proxmox Backup
Server.
The disk image (ISO file) provided by Proxmox includes a complete Debian system
as well as all necessary packages for the Proxmox Backup Server.
Once you have created an :ref:`installation_medium`, the booted
:ref:`installer <using_the_installer>` will guide you through the
setup process. It will help you to partition your disks, apply basic
settings such as the language, time zone and network configuration,
and finally install all required packages within minutes.
The installer will guide you through the setup process and allow
you to partition the local disk(s), apply basic system configuration
(for example timezone, language, network), and install all required packages.
The provided ISO will get you started in just a few minutes, and is the
recommended method for new and existing users.
As an alternative to the interactive installer, advanced users may
wish to install Proxmox Backup Server
:ref:`unattended <install_pbs_unattended>`.
Alternatively, Proxmox Backup Server can be installed on top of an
existing Debian system.
With sufficient Debian knowledge, you can also install Proxmox Backup
Server :ref:`on top of Debian <install_pbs_on_debian>` yourself.
Install `Proxmox Backup`_ Server using the Installer
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
While not recommended, Proxmox Backup Server could also be installed
:ref:`on Proxmox VE <install_pbs_on_pve>`.
Download the ISO from |DOWNLOADS|.
It includes the following:
.. include:: using-the-installer.rst
* The Proxmox Backup Server installer, which partitions the local
disk(s) with ext4, xfs or ZFS, and installs the operating system
.. _install_pbs_unattended:
* Complete operating system (Debian Linux, 64-bit)
Install `Proxmox Backup`_ Server Unattended
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
It is possible to install Proxmox Backup Server automatically in an
unattended manner. This enables you to fully automate the setup process on
bare-metal. Once the installation is complete and the host has booted up,
automation tools like Ansible can be used to further configure the installation.
* Proxmox Linux kernel with ZFS support
The necessary options for the installer must be provided in an answer file.
This file allows the use of filter rules to determine which disks and network
cards should be used.
* Complete tool-set to administer backups and all necessary resources
To use the automated installation, it is first necessary to prepare an
installation ISO. For more details and information on the unattended
installation see `our wiki
<https://pve.proxmox.com/wiki/Automated_Installation>`_.
* Web based management interface
.. note:: During the installation process, the complete server
is used by default and all existing data is removed.
.. _install_pbs_on_debian:
Install `Proxmox Backup`_ Server on Debian
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@ -99,8 +87,6 @@ support, and a set of common and useful packages.
your web browser, using HTTPS on port 8007. For example at
``https://<ip-or-dns-name>:8007``
.. _install_pbs_on_pve:
Install Proxmox Backup Server on `Proxmox VE`_
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@ -121,8 +107,6 @@ After configuring the
your web browser, using HTTPS on port 8007. For example at
``https://<ip-or-dns-name>:8007``
.. _install_pbc:
Client Installation
-------------------
@ -138,26 +122,7 @@ you need to run:
# apt update
# apt install proxmox-backup-client
Install Statically Linked Proxmox Backup Client
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Proxmox provides a statically linked build of the Proxmox backup client that
should run on any modern x86-64 Linux system.
.. note:: The client-only repository should be usable by most recent Debian and
Ubuntu derivatives.
It is currently available as a Debian package. After configuring the
:ref:`package_repositories_client_only_apt`, you need to run:
.. code-block:: console
# apt update
# apt install proxmox-backup-client-static
This package conflicts with the `proxmox-backup-client` package, as both
provide the client as an executable in the `/usr/bin/proxmox-backup-client`
path.
You can copy this executable to other, e.g. non-Debian based Linux systems.
For details on using the Proxmox Backup Client, see :ref:`client_usage`.
.. include:: package-repositories.rst

View File

@ -121,7 +121,7 @@ Create a new pool with cache (L2ARC)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
It is possible to use a dedicated cache drive partition to increase
the read performance (use SSDs).
the performance (use SSD).
For `<device>`, you can use multiple devices, as is shown in
"Create a new pool with RAID*".
@ -134,7 +134,7 @@ Create a new pool with log (ZIL)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
It is possible to use a dedicated cache drive partition to increase
the write performance (use SSDs).
the performance (SSD).
For `<device>`, you can use multiple devices, as is shown in
"Create a new pool with RAID*".
@ -264,7 +264,6 @@ systems with more than 256 GiB of total memory, where simply setting
# update-initramfs -u
.. _zfs_swap:
Swap on ZFS
^^^^^^^^^^^

View File

@ -46,23 +46,6 @@ Ext.define('LabelSetupPanel', {
let params = view.getValues();
list.getStore().add(params);
},
validitychange: function() {
let me = this;
let isValid = true;
me.getView().query('field').forEach((field) => {
if (!field.isValid()) {
isValid = false;
}
});
me.lookup('addButton').setDisabled(!isValid);
},
control: {
'field': {
validitychange: 'validitychange',
},
},
},
items: [
@ -84,7 +67,7 @@ Ext.define('LabelSetupPanel', {
xtype: 'ltoTapeType',
name: 'tape_type',
fieldLabel: 'Type',
value: 'L9',
value: 'L8',
},
{
xtype: 'ltoLabelStyle',
@ -110,7 +93,6 @@ Ext.define('LabelSetupPanel', {
{
xtype: 'button',
text: 'Add',
reference: 'addButton',
handler: 'onAdd',
},
],

View File

@ -108,7 +108,7 @@ Ext.define('PageCalibration', {
xtype: 'numberfield',
value: 'a4',
name: 's_x',
fieldLabel: 'Measured Start Offset Sx (mm)',
fieldLabel: 'Meassured Start Offset Sx (mm)',
allowBlank: false,
labelWidth: 200,
},
@ -116,7 +116,7 @@ Ext.define('PageCalibration', {
xtype: 'numberfield',
value: 'a4',
name: 'd_x',
fieldLabel: 'Measured Length Dx (mm)',
fieldLabel: 'Meassured Length Dx (mm)',
allowBlank: false,
labelWidth: 200,
},
@ -124,7 +124,7 @@ Ext.define('PageCalibration', {
xtype: 'numberfield',
value: 'a4',
name: 's_y',
fieldLabel: 'Measured Start Offset Sy (mm)',
fieldLabel: 'Meassured Start Offset Sy (mm)',
allowBlank: false,
labelWidth: 200,
},
@ -132,7 +132,7 @@ Ext.define('PageCalibration', {
xtype: 'numberfield',
value: 'a4',
name: 'd_y',
fieldLabel: 'Measured Length Dy (mm)',
fieldLabel: 'Meassured Length Dy (mm)',
allowBlank: false,
labelWidth: 200,
},

View File

@ -11,20 +11,12 @@ Ext.define('LtoTapeType', {
store: {
field: ['value', 'text'],
data: [
{ value: 'L9', text: "LTO-9" },
{ value: 'LZ', text: "LTO-9 (WORM)" },
{ value: 'L8', text: "LTO-8" },
{ value: 'LY', text: "LTO-8 (WORM)" },
{ value: 'L7', text: "LTO-7" },
{ value: 'LX', text: "LTO-7 (WORM)" },
{ value: 'L6', text: "LTO-6" },
{ value: 'LW', text: "LTO-6 (WORM)" },
{ value: 'L5', text: "LTO-5" },
{ value: 'LV', text: "LTO-5 (WORM)" },
{ value: 'L4', text: "LTO-4" },
{ value: 'LU', text: "LTO-4 (WORM)" },
{ value: 'L3', text: "LTO-3" },
{ value: 'LT', text: "LTO-3 (WORM)" },
{ value: 'CU', text: "Cleaning Unit" },
],
},

View File

@ -6,64 +6,35 @@ Maintenance Tasks
Pruning
-------
Prune lets you specify which backup snapshots you want to keep, removing others.
When pruning a snapshot, only the snapshot metadata (manifest, indices, blobs,
log and notes) is removed. The chunks containing the actual backup data and
previously referenced by the pruned snapshot, have to be removed by a garbage
collection run.
.. Caution:: Take into consideration that sensitive information stored in a
given data chunk will outlive pruned snapshots and remain present in the
datastore as long as referenced by at least one backup snapshot. Further,
*even* if no snapshot references a given chunk, it will remain present until
removed by the garbage collection.
Moreover, file-level backups created using the change detection mode
``metadata`` can reference backup chunks containing files which have vanished
since the previous backup. These files might still be accessible by reading
the chunks raw data (client or server side).
To remove chunks containing sensitive data, prune any snapshot made while the
data was part of the backup input and run a garbage collection. Further, if
using file-based backups with change detection mode ``metadata``,
additionally prune all snapshots since the sensitive data was no longer part
of the backup input and run a garbage collection.
The no longer referenced chunks will then be marked for deletion on the next
garbage collection run and removed by a subsequent run after the grace
period.
The following retention options are available for pruning:
Prune lets you specify which backup snapshots you want to keep.
The following retention options are available:
``keep-last <N>``
Keep the last ``<N>`` backup snapshots.
``keep-hourly <N>``
Keep backups for the last ``<N>`` hours. If there is more than one backup for
a single hour, only the latest is retained. Hours without backups do not
count.
Keep backups for the last ``<N>`` hours. If there is more than one
backup for a single hour, only the latest is retained.
``keep-daily <N>``
Keep backups for the last ``<N>`` days. If there is more than one backup for a
single day, only the latest is retained. Days without backups do not count.
Keep backups for the last ``<N>`` days. If there is more than one
backup for a single day, only the latest is retained.
``keep-weekly <N>``
Keep backups for the last ``<N>`` weeks. If there is more than one backup for
a single week, only the latest is retained. Weeks without backup do not count.
Keep backups for the last ``<N>`` weeks. If there is more than one
backup for a single week, only the latest is retained.
.. note:: Weeks start on Monday and end on Sunday. The software
uses the `ISO week date`_ system and handles weeks at
the end of the year correctly.
``keep-monthly <N>``
Keep backups for the last ``<N>`` months. If there is more than one backup for
a single month, only the latest is retained. Months without backups do not
count.
Keep backups for the last ``<N>`` months. If there is more than one
backup for a single month, only the latest is retained.
``keep-yearly <N>``
Keep backups for the last ``<N>`` years. If there is more than one backup for
a single year, only the latest is retained. Years without backups do not
count.
Keep backups for the last ``<N>`` years. If there is more than one
backup for a single year, only the latest is retained.
The retention options are processed in the order given above. Each option
only covers backups within its time period. The next option does not take care
@ -197,8 +168,6 @@ It's recommended to setup a schedule to ensure that unused space is cleaned up
periodically. For most setups a weekly schedule provides a good interval to
start.
.. _gc_background:
GC Background
^^^^^^^^^^^^^
@ -224,31 +193,17 @@ datastore or interfering with other backups.
The garbage collection (GC) process is performed per datastore and is split
into two phases:
- Phase one (Mark):
- Phase one: Mark
All index files are read, and the access time of the referred chunk files is
updated.
All index files are read, and the access time (``atime``) of the referenced
chunk files is updated.
- Phase two (Sweep):
The task iterates over all chunks and checks their file access time against a
cutoff time. The cutoff time is given by either the oldest backup writer
instance, if present, or 24 hours and 5 minutes before the start of the
garbage collection.
Garbage collection considers chunk files with access time older than the
cutoff time to be neither referenced by any backup snapshot's index, nor part
of any currently running backup job. Therefore, these chunks can safely be
deleted.
Chunks within the grace period will not be deleted and logged at the end of
the garbage collection task as *Pending removals*.
.. note:: The grace period for backup chunk removal is not arbitrary, but stems
from the fact that filesystems are typically mounted with the ``relatime``
option by default. This results in better performance by only updating the
``atime`` property if a file has been modified since the last access or the
last access has been at least 24 hours ago.
- Phase two: Sweep
The task iterates over all chunks, checks their file access time, and if it
is older than the cutoff time (i.e., the time when GC started, plus some
headroom for safety and Linux file system behavior), the task knows that the
chunk was neither referred to in any backup index nor part of any currently
running backup that has no index to scan for. As such, the chunk can be
safely deleted.
Manually Starting GC
^^^^^^^^^^^^^^^^^^^^
@ -319,10 +274,26 @@ the **Actions** column in the table.
Notifications
-------------
Proxmox Backup Server can send you notifications about automatically
Proxmox Backup Server can send you notification emails about automatically
scheduled verification, garbage-collection and synchronization tasks results.
Refer to the :ref:`notifications` chapter for more details.
By default, notifications are sent to the email address configured for the
`root@pam` user. You can instead set this user for each datastore.
.. image:: images/screenshots/pbs-gui-datastore-options.png
:target: _images/pbs-gui-datastore-options.png
:align: right
:alt: Datastore Options
You can also change the level of notification received per task type, the
following options are available:
* Always: send a notification for any scheduled task, independent of the
outcome
* Errors: send a notification for any scheduled task that results in an error
* Never: do not send any notification at all
.. _maintenance_mode:

View File

@ -69,13 +69,6 @@ sync-job`` command. The configuration information for sync jobs is stored at
in the GUI, or use the ``create`` subcommand. After creating a sync job, you can
either start it manually from the GUI or provide it with a schedule (see
:ref:`calendar-event-scheduling`) to run regularly.
Backup snapshots, groups and namespaces which are no longer available on the
**Remote** datastore can be removed from the local datastore as well by setting
the ``remove-vanished`` option for the sync job.
Setting the ``verified-only`` or ``encrypted-only`` flags allows to limit the
sync jobs to backup snapshots which have been verified or encrypted,
respectively. This is particularly of interest when sending backups to a less
trusted remote backup server.
.. code-block:: console
@ -123,28 +116,8 @@ of the specified criteria are synced. The available criteria are:
The same filter is applied to local groups, for handling of the
``remove-vanished`` option.
A ``group-filter`` can be inverted by prepending ``exclude:`` to it.
* Regular expression example, excluding the match:
.. code-block:: console
# proxmox-backup-manager sync-job update ID --group-filter exclude:regex:'^vm/1\d{2,3}$'
For mixing include and exclude filter, following rules apply:
- no filters: all backup groups
- include: only those matching the include filters
- exclude: all but those matching the exclude filters
- both: those matching the include filters, but without those matching the exclude filters
.. note:: The ``protected`` flag of remote backup snapshots will not be synced.
Enabling the advanced option 'resync-corrupt' will re-sync all snapshots that have
failed to verify during the last :ref:`maintenance_verification`. Hence, a verification
job needs to be run before a sync job with 'resync-corrupt' can be carried out. Be aware
that a 'resync-corrupt'-job needs to check the manifests of all snapshots in a datastore
and might take much longer than regular sync jobs.
Namespace Support
^^^^^^^^^^^^^^^^^
@ -231,52 +204,9 @@ Bandwidth Limit
Syncing a datastore to an archive can produce a lot of traffic and impact other
users of the network. In order to avoid network or storage congestion, you can
limit the bandwidth of a sync job in pull direction by setting the ``rate-in``
option either in the web interface or using the ``proxmox-backup-manager``
command-line tool:
limit the bandwidth of the sync job by setting the ``rate-in`` option either in
the web interface or using the ``proxmox-backup-manager`` command-line tool:
.. code-block:: console
# proxmox-backup-manager sync-job update ID --rate-in 20MiB
For sync jobs in push direction use the ``rate-out`` option instead.
Sync Direction Push
^^^^^^^^^^^^^^^^^^^
Sync jobs can be configured for pull or push direction. Sync jobs in push
direction are not identical in behaviour because of the limited access to the
target datastore via the remote servers API. Most notably, pushed content will
always be owned by the user configured in the remote configuration, being
independent from the local user as configured in the sync job. Latter is used
exclusively for permission check and scope checks on the pushing side.
.. note:: It is strongly advised to create a dedicated remote configuration for
each individual sync job in push direction, using a dedicated user on the
remote. Otherwise, sync jobs pushing to the same target might remove each
others snapshots and/or groups, if the remove vanished flag is set or skip
snapshots if the backup time is not incremental.
This is because the backup groups on the target are owned by the user
given in the remote configuration.
The following permissions are required for a sync job in push direction:
#. ``Remote.Audit`` on ``/remote/{remote}`` and ``Remote.DatastoreBackup`` on
``/remote/{remote}/{remote-store}/{remote-ns}`` path or subnamespace.
#. At least ``Datastore.Read`` and ``Datastore.Audit`` on the local source
datastore namespace (``/datastore/{store}/{ns}``) or ``Datastore.Backup`` if
owner of the sync job.
#. ``Remote.DatastorePrune`` on ``/remote/{remote}/{remote-store}/{remote-ns}``
path to remove vanished snapshots and groups. Make sure to use a dedicated
remote for each sync job in push direction as noted above.
#. ``Remote.DatastoreModify`` on ``/remote/{remote}/{remote-store}/{remote-ns}``
path to remove vanished namespaces. A remote user with limited access should
be used on the remote backup server instance. Consider the implications as
noted below.
.. note:: ``Remote.DatastoreModify`` will allow to remove whole namespaces on the
remote target datastore, independent of ownership. Make sure the user as
configured in remote.cfg has limited permissions on the remote side.
.. note:: Sync jobs in push direction require namespace support on the remote
Proxmox Backup Server instance (minimum version 2.2).

View File

@ -1,50 +0,0 @@
digraph g {
graph [
rankdir = "LR"
fontname="Helvetica"
];
node [
fontsize = "16"
shape = "record"
];
edge [
];
"archive" [
label = "archive.mpxar"
shape = "record"
];
"rootdir" [
label = "<fv>FORMAT_VERSION\l|PRELUDE\l|<f0>ENTRY\l|\{XATTR\}\* extended attribute list\l|\{ACL_USER\}\* USER ACL entries\l|\{ACL_GROUP\}\* GROUP ACL entries\l|\[ACL_GROUP_OBJ\] the ACL_GROUP_OBJ \l|\[ACL_DEFAULT\] the various default ACL fields\l|\{ACL_DEFAULT_USER\}\* USER ACL entries\l|\{ACL_DEFAULT_GROUP\}\* GROUP ACL entries\l|\[FCAPS\] file capability in Linux disk format\l|\[QUOTA_PROJECT_ID\] the ext4/xfs quota project ID\l|{<pl> PAYLOAD_REF|SYMLINK|DEVICE|{<de> \{DirectoryEntries\}\*|GOODBYE}}"
shape = "record"
];
"entry" [
label = "<f0> size: u64 = 64\l|type: u64 = ENTRY\l|feature_flags: u64\l|mode: u64\l|flags: u64\l|uid: u64\l|gid: u64\l|mtime: u64\l"
labeljust = "l"
shape = "record"
];
"direntry" [
label = "<f0> FILENAME\l|{ENTRY\l|HARDLINK\l}"
shape = "record"
];
"payloadrefentry" [
label = "<f0> offset: u64\l|size: u64\l"
shape = "record"
];
"archive" -> "rootdir":fv
"rootdir":f0 -> "entry":f0
"rootdir":de -> "direntry":f0
"rootdir":pl -> "payloadrefentry":f0
}

View File

@ -1,388 +0,0 @@
.. _notifications:
Notifications
=============
Overview
--------
* Proxmox Backup Server emits :ref:`notification_events` in case of noteworthy
events in the system. These events are handled by the notification system. A
notification event has metadata, for example a timestamp, a severity level, a
type and other metadata fields.
* :ref:`notification_matchers` route a notification event to one or more
notification targets. A matcher can have match rules to selectively route
based on the metadata of a notification event.
* :ref:`notification_targets` are a destination to which a notification event
is routed to by a matcher. There are multiple types of target, mail-based
(Sendmail and SMTP) and Gotify.
Datastores and tape backup jobs have a configurable :ref:`notification_mode`.
It allows you to choose between the notification system and a legacy mode for
sending notification emails. The legacy mode is equivalent to the way
notifications were handled before Proxmox Backup Server 3.2.
The notification system can be configured in the GUI under *Configuration →
Notifications*. The configuration is stored in :ref:`notifications.cfg` and
:ref:`notifications_priv.cfg` - the latter contains sensitive configuration
options such as passwords or authentication tokens for notification targets and
can only be read by ``root``.
.. _notification_targets:
Notification Targets
--------------------
Proxmox Backup Server offers multiple types of notification targets.
.. _notification_targets_sendmail:
Sendmail
^^^^^^^^
The sendmail binary is a program commonly found on Unix-like operating systems
that handles the sending of email messages. It is a command-line utility that
allows users and applications to send emails directly from the command line or
from within scripts.
The sendmail notification target uses the ``sendmail`` binary to send emails to
a list of configured users or email addresses. If a user is selected as a
recipient, the email address configured in user's settings will be used. For
the ``root@pam`` user, this is the email address entered during installation. A
user's email address can be configured in ``Configuration → Access Control →
User Management``. If a user has no associated email address, no email will be
sent.
.. NOTE:: In standard Proxmox Backup Server installations, the ``sendmail``
binary is provided by Postfix. It may be necessary to configure Postfix so
that it can deliver mails correctly - for example by setting an external
mail relay (smart host). In case of failed delivery, check the system logs
for messages logged by the Postfix daemon.
See :ref:`notifications.cfg` for all configuration options.
.. _notification_targets_smtp:
SMTP
^^^^
SMTP notification targets can send emails directly to an SMTP mail relay. This
target does not use the system's MTA to deliver emails. Similar to sendmail
targets, if a user is selected as a recipient, the user's configured email
address will be used.
.. NOTE:: Unlike sendmail targets, SMTP targets do not have any queuing/retry
mechanism in case of a failed mail delivery.
See :ref:`notifications.cfg` for all configuration options.
.. _notification_targets_gotify:
Gotify
^^^^^^
`Gotify <http://gotify.net>`_ is an open-source self-hosted notification server
that allows you to send push notifications to various devices and applications.
It provides a simple API and web interface, making it easy to integrate with
different platforms and services.
.. NOTE:: Gotify targets will respect the HTTP proxy settings from
Configuration → Other → HTTP proxy
See :ref:`notifications.cfg` for all configuration options.
.. _notification_targets_webhook:
Webhook
^^^^^^^
Webhook notification targets perform HTTP requests to a configurable URL.
The following configuration options are available:
* ``url``: The URL to which to perform the HTTP requests. Supports templating
to inject message contents, metadata and secrets.
* ``method``: HTTP Method to use (POST/PUT/GET)
* ``header``: Array of HTTP headers that should be set for the request.
Supports templating to inject message contents, metadata and secrets.
* ``body``: HTTP body that should be sent. Supports templating to inject
message contents, metadata and secrets.
* ``secret``: Array of secret key-value pairs. These will be stored in a
protected configuration file only readable by root. Secrets can be
accessed in body/header/URL templates via the ``secrets`` namespace.
* ``comment``: Comment for this target.
For configuration options that support templating, the `Handlebars
<https://handlebarsjs.com>`_ syntax can be used to access the following
properties:
* ``{{ title }}``: The rendered notification title
* ``{{ message }}``: The rendered notification body
* ``{{ severity }}``: The severity of the notification (``info``, ``notice``,
``warning``, ``error``, ``unknown``)
* ``{{ timestamp }}``: The notification's timestamp as a UNIX epoch (in
seconds).
* ``{{ fields.<name> }}``: Sub-namespace for any metadata fields of the
notification. For instance, ``fields.type`` contains the notification
type - for all available fields refer to :ref:`notification_events`.
* ``{{ secrets.<name> }}``: Sub-namespace for secrets. For instance, a secret
named ``token`` is accessible via ``secrets.token``.
For convenience, the following helpers are available:
* ``{{ url-encode <value/property> }}``: URL-encode a property/literal.
* ``{{ escape <value/property> }}``: Escape any control characters that cannot
be safely represented as a JSON string.
* ``{{ json <value/property> }}``: Render a value as JSON. This can be useful
to pass a whole sub-namespace (e.g. ``fields``) as a part of a JSON payload
(e.g. ``{{ json fields }}``).
.. NOTE:: Webhook targets will respect the HTTP proxy settings from
Configuration → Other → HTTP proxy
Example - ntfy.sh
"""""""""""""""""
* Method: ``POST``
* URL: ``https://ntfy.sh/{{ secrets.channel }}``
* Headers:
* ``Markdown``: ``Yes``
* Body::
```
{{ message }}
```
* Secrets:
* ``channel``: ``<your ntfy.sh channel>``
Example - Discord
"""""""""""""""""
* Method: ``POST``
* URL: ``https://discord.com/api/webhooks/{{ secrets.token }}``
* Headers:
* ``Content-Type``: ``application/json``
* Body::
{
"content": "``` {{ escape message }}```"
}
* Secrets:
* ``token``: ``<token>``
Example - Slack
"""""""""""""""
* Method: ``POST``
* URL: ``https://hooks.slack.com/services/{{ secrets.token }}``
* Headers:
* ``Content-Type``: ``application/json``
* Body::
{
"text": "``` {{escape message}}```",
"type": "mrkdwn"
}
* Secrets:
* ``token``: ``<token>``
.. _notification_matchers:
Notification Matchers
---------------------
Notification matchers route notifications to notification targets based on
their matching rules. These rules can match certain properties of a
notification, such as the timestamp (``match-calendar``), the severity of the
notification (``match-severity``) or metadata fields (``match-field``). If a
notification is matched by a matcher, all targets configured for the matcher
will receive the notification.
An arbitrary number of matchers can be created, each with with their own
matching rules and targets to notify. Every target is notified at most once for
every notification, even if the target is used in multiple matchers.
A matcher without rules matches any notification; the configured targets will
always be notified.
See :ref:`notifications.cfg` for all configuration options.
Calendar Matching Rules
^^^^^^^^^^^^^^^^^^^^^^^
A calendar matcher matches a notification's timestamp.
Examples:
* ``match-calendar 8-12``
* ``match-calendar 8:00-15:30``
* ``match-calendar mon-fri 9:00-17:00``
* ``match-calendar sun,tue-wed,fri 9-17``
Field Matching Rules
^^^^^^^^^^^^^^^^^^^^
Notifications have a selection of metadata fields that can be matched. When
using ``exact`` as a matching mode, a ``,`` can be used as a separator. The
matching rule then matches if the metadata field has **any** of the specified
values.
Examples:
* ``match-field exact:type=gc`` Only match notifications for garbage collection
jobs
* ``match-field exact:type=prune,verify`` Match prune job and verification job
notifications.
* ``match-field regex:datastore=^backup-.*$`` Match any datastore starting with
``backup``.
If a notification does not have the matched field, the rule will **not** match.
For instance, a ``match-field regex:datastore=.*`` directive will match any
notification that has a ``datastore`` metadata field, but will not match if the
field does not exist.
Severity Matching Rules
^^^^^^^^^^^^^^^^^^^^^^^
A notification has a associated severity that can be matched.
Examples:
* ``match-severity error``: Only match errors
* ``match-severity warning,error``: Match warnings and error
The following severities are in use:
``info``, ``notice``, ``warning``, ``error``, ``unknown``.
.. _notification_events:
Notification Events
-------------------
The following table contains a list of all notification events in Proxmox
Backup server, their type, severity and additional metadata fields. ``type`` as
well as any other metadata field may be used in ``match-field`` match rules.
================================ ==================== ========== ==============================================================
Event ``type`` Severity Metadata fields (in addition to ``type``)
================================ ==================== ========== ==============================================================
ACME certificate renewal failed ``acme`` ``error`` ``hostname``
Garbage collection failure ``gc`` ``error`` ``datastore``, ``hostname``
Garbage collection success ``gc`` ``info`` ``datastore``, ``hostname``
Package updates available ``package-updates`` ``info`` ``hostname``
Prune job failure ``prune`` ``error`` ``datastore``, ``hostname``, ``job-id``
Prune job success ``prune`` ``info`` ``datastore``, ``hostname``, ``job-id``
Remote sync failure ``sync`` ``error`` ``datastore``, ``hostname``, ``job-id``
Remote sync success ``sync`` ``info`` ``datastore``, ``hostname``, ``job-id``
Tape backup job failure ``tape-backup`` ``error`` ``datastore``, ``hostname``, ``media-pool``, ``job-id``
Tape backup job success ``tape-backup`` ``info`` ``datastore``, ``hostname``, ``media-pool``, ``job-id``
Tape loading request ``tape-load`` ``notice`` ``hostname``
Verification job failure ``verification`` ``error`` ``datastore``, ``hostname``, ``job-id``
Verification job success ``verification`` ``info`` ``datastore``, ``hostname``, ``job-id``
================================ ==================== ========== ==============================================================
The following table contains a description of all use metadata fields. All of
these can be used in ``match-field`` match rules.
==================== ===================================
Metadata field Description
==================== ===================================
``datastore`` The name of the datastore
``hostname`` The hostname of the backup server
``job-id`` Job ID
``media-pool`` The name of the tape media pool
``type`` Notification event type
==================== ===================================
.. NOTE:: The daily task checking for any available system updates only sends
notifications if the node has an active subscription.
System Mail Forwarding
----------------------
Certain local system daemons, such as ``smartd``, send notification emails to
the local ``root`` user. Proxmox Backup Server will feed these mails into the
notification system as a notification of type ``system-mail`` and with severity
``unknown``.
When the email is forwarded to a sendmail target, the mail's content and
headers are forwarded as-is. For all other targets, the system tries to extract
both a subject line and the main text body from the email content. In instances
where emails solely consist of HTML content, they will be transformed into
plain text format during this process.
Permissions
-----------
In order to modify/view the configuration for notification targets, the
``Sys.Modify/Sys.Audit`` permissions are required for the
``/system/notifications`` ACL node.
.. _notification_mode:
Notification Mode
-----------------
Datastores and tape backup/restore job configuration have a
``notification-mode`` option which can have one of two values:
* ``legacy-sendmail``: Send notification emails via the system's ``sendmail``
command. The notification system will be bypassed and any configured
targets/matchers will be ignored. This mode is equivalent to the notification
behavior for version before Proxmox Backup Server 3.2.
* ``notification-system``: Use the new, flexible notification system.
If the ``notification-mode`` option is not set, Proxmox Backup Server will
default to ``legacy-sendmail``.
Starting with Proxmox Backup Server 3.2, a datastore created in the UI will
automatically opt in to the new notification system. If the datastore is
created via the API or the ``proxmox-backup-manager`` CLI, the
``notification-mode`` option has to be set explicitly to
``notification-system`` if the notification system shall be used.
The ``legacy-sendmail`` mode might be removed in a later release of
Proxmox Backup Server.
Settings for ``legacy-sendmail`` notification mode
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
If ``notification-mode`` is set to ``legacy-sendmail``, Proxmox Backup Server
will send notification emails via the system's ``sendmail`` command to the
email address configured for the user set in the ``notify-user`` option
(falling back to ``root@pam`` if not set).
For datastores, you can also change the level of notifications received per
task type via the ``notify`` option.
* Always: send a notification for any scheduled task, independent of the
outcome
* Errors: send a notification for any scheduled task that results in an error
* Never: do not send any notification at all
The ``notify-user`` and ``notify`` options are ignored if ``notification-mode``
is set to ``notification-system``.
Overriding Notification Templates
---------------------------------
Proxmox Backup Server uses Handlebars templates to render notifications. The
original templates provided by Proxmox Backup Server are stored in
``/usr/share/proxmox-backup/templates/default/``.
Notification templates can be overridden by providing a custom template file in
the override directory at
``/etc/proxmox-backup/notification-templates/default/``. When rendering a
notification of a given type, Proxmox Backup Server will first attempt to load
a template from the override directory. If this one does not exist or fails to
render, the original template will be used.
The template files follow the naming convention of
``<type>-<body|subject>.txt.hbs``. For instance, the file
``gc-err-body.txt.hbs`` contains the template for rendering notifications for
garbage collection errors, while ``package-updates-subject.txt.hbs`` is used to
render the subject line of notifications for available package updates.

View File

@ -149,7 +149,7 @@ Currently there's only a client-repository for APT based systems.
.. _package_repositories_client_only_apt:
APT-based Proxmox Backup Client Repository
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
++++++++++++++++++++++++++++++++++++++++++
For modern Linux distributions using `apt` as package manager, like all Debian
and Ubuntu Derivative do, you may be able to use the APT-based repository.

View File

@ -1,5 +1,3 @@
:orphan:
=======
pbs2to3

View File

@ -1,5 +1,3 @@
:orphan:
===
pmt
===

View File

@ -1,5 +1,3 @@
:orphan:
==========================
pmtx
==========================

View File

@ -1,5 +1,3 @@
:orphan:
=====================
proxmox-backup-client
=====================

View File

@ -1,5 +1,3 @@
:orphan:
====================
proxmox-backup-debug
====================

View File

@ -1,5 +1,3 @@
:orphan:
==========================
proxmox-backup-manager
==========================

View File

@ -1,5 +1,3 @@
:orphan:
==========================
proxmox-backup-proxy
==========================

View File

@ -1,5 +1,3 @@
:orphan:
==========================
proxmox-backup
==========================

View File

@ -1,5 +1,3 @@
:orphan:
====================
proxmox-file-restore
====================

View File

@ -1,5 +1,3 @@
:orphan:
============
proxmox-tape
============

View File

@ -82,13 +82,13 @@ available:</p>
<dd>Keep the last <code class="docutils literal notranslate"><span class="pre">&lt;N&gt;</span></code> backup snapshots.</dd>
<dt><code class="docutils literal notranslate"><span class="pre">keep-hourly</span> <span class="pre">&lt;N&gt;</span></code></dt>
<dd>Keep backups for the last <code class="docutils literal notranslate"><span class="pre">&lt;N&gt;</span></code> hours. If there is more than one
backup for a single hour, only the latest is kept. Hours without backups do not count.</dd>
backup for a single hour, only the latest is kept.</dd>
<dt><code class="docutils literal notranslate"><span class="pre">keep-daily</span> <span class="pre">&lt;N&gt;</span></code></dt>
<dd>Keep backups for the last <code class="docutils literal notranslate"><span class="pre">&lt;N&gt;</span></code> days. If there is more than one
backup for a single day, only the latest is kept. Days without backups do not count.</dd>
backup for a single day, only the latest is kept.</dd>
<dt><code class="docutils literal notranslate"><span class="pre">keep-weekly</span> <span class="pre">&lt;N&gt;</span></code></dt>
<dd>Keep backups for the last <code class="docutils literal notranslate"><span class="pre">&lt;N&gt;</span></code> weeks. If there is more than one
backup for a single week, only the latest is kept. Weeks without backups do not count.
backup for a single week, only the latest is kept.
<div class="last admonition note">
<p class="note-title">Note:</p>
<p class="last">Weeks start on Monday and end on Sunday. The software
@ -98,10 +98,10 @@ the end of the year correctly.</p>
</dd>
<dt><code class="docutils literal notranslate"><span class="pre">keep-monthly</span> <span class="pre">&lt;N&gt;</span></code></dt>
<dd>Keep backups for the last <code class="docutils literal notranslate"><span class="pre">&lt;N&gt;</span></code> months. If there is more than one
backup for a single month, only the latest is kept. Months without backups do not count.</dd>
backup for a single month, only the latest is kept.</dd>
<dt><code class="docutils literal notranslate"><span class="pre">keep-yearly</span> <span class="pre">&lt;N&gt;</span></code></dt>
<dd>Keep backups for the last <code class="docutils literal notranslate"><span class="pre">&lt;N&gt;</span></code> years. If there is more than one
backup for a single year, only the latest is kept. Years without backups do not count.</dd>
backup for a single year, only the latest is kept.</dd>
</dl>
<p>The retention options are processed in the order given above. Each option
only covers backups within its time period. The next option does not take care

View File

@ -126,8 +126,7 @@ Ext.onReady(function() {
if (data.mark !== 'keep') {
return `<div style="text-decoration: line-through;">${text}</div>`;
}
let pruneList = this.up('prunesimulatorPruneList');
if (pruneList.useColors) {
if (me.useColors) {
let bgColor = COLORS[data.keepName];
let textColor = TEXT_COLORS[data.keepName];
return `<div style="background-color: ${bgColor};color: ${textColor};">${text}</div>`;
@ -354,17 +353,12 @@ Ext.onReady(function() {
specValues.forEach(function(value) {
if (value.includes('..')) {
let [start, end] = value.split('..');
let step = 1;
if (end.includes('/')) {
[end, step] = end.split('/');
step = assertValid(step);
}
start = assertValid(start);
end = assertValid(end);
if (start > end) {
throw "interval start is bigger then interval end '" + start + " > " + end + "'";
}
for (let i = start; i <= end; i += step) {
for (let i = start; i <= end; i++) {
matches[i] = 1;
}
} else if (value.includes('/')) {

View File

@ -1,5 +1,3 @@
:orphan:
====
pxar
====

View File

@ -165,74 +165,6 @@ following command creates a new datastore called ``store1`` on
# proxmox-backup-manager datastore create store1 /backup/disk1/store1
Removable Datastores
^^^^^^^^^^^^^^^^^^^^
Removable datastores have a ``backing-device`` associated with them, they can be
mounted and unmounted. Other than that they behave the same way a normal datastore
would.
They can be created on already correctly formatted partitions, which should be
either ``ext4`` or ``xfs`` as with normal datastores, but most modern file
systems supported by the Proxmox Linux kernel should work.
.. note:: FAT-based file systems do not support the POSIX file ownership
concept and have relatively low limits on the number of files per directory.
Therefore, creating a datastore is not supported on FAT file systems.
Because some external drives are preformatted with such a FAT-based file
system, you may need to reformat the drive before you can use it as a
backing-device for a removable datastore.
It is also possible to create them on completely unused disks through
"Administration" > "Disks / Storage" > "Directory", using this method the disk will
be partitioned and formatted automatically for the datastore.
Devices with only one datastore on them will be mounted automatically. Unmounting has
to be done through the UI by clicking "Unmount" on the summary page or using the CLI.
If unmounting fails, the reason is logged in the unmount task log, and the
datastore will stay in maintenance mode ``unmounting``, which prevents any IO
operations. In such cases, the maintenance mode has to be reset manually using:
.. code-block:: console
# proxmox-backup-manager datastore update --maintenance-mode offline
to prevent any IO, or to clear it use:
.. code-block:: console
# proxmox-backup-manager datastore update --delete maintenance-mode
A single device can house multiple datastores, they only limitation is that they are not
allowed to be nested.
Removable datastores are created on the the device with the given relative path that is specified
on creation. In order to use a datastore on multiple PBS instances, it has to be created on one,
and added with ``Reuse existing datastore`` checked on the others. The path you set on creation
is how multiple datastores on a single device are identified. So when adding on a new PBS instance,
it has to match what was set on creation.
.. code-block:: console
# proxmox-backup-manager datastore unmount store1
both will wait for any running tasks to finish and unmount the device.
All removable datastores are mounted under /mnt/datastore/<name>, and the specified path
refers to the path on the device.
All datastores present on a device can be listed using ``proxmox-backup-debug``.
.. code-block:: console
# proxmox-backup-debug inspect device /dev/...
Verify, Prune and Garbage Collection jobs are skipped if the removable
datastore is not mounted when they are scheduled. Sync jobs start, but fail
with an error saying the datastore was not mounted. The reason is that syncs
not happening as scheduled should at least be noticeable.
Managing Datastores
^^^^^^^^^^^^^^^^^^^
@ -382,7 +314,7 @@ Options
There are a few per-datastore options:
* :ref:`Notification mode and legacy notification settings <notification_mode>`
* :ref:`Notifications <maintenance_notification>`
* :ref:`Maintenance Mode <maintenance_mode>`
* Verification of incoming backups
@ -435,28 +367,9 @@ There are some tuning related options for the datastore that are more advanced:
This can be set with:
.. code-block:: console
.. code-block:: console
# proxmox-backup-manager datastore update <storename> --tuning 'sync-level=filesystem'
* ``gc-atime-safety-check``: Datastore GC atime update safety check:
You can explicitly `enable` or `disable` the atime update safety check
performed on datastore creation and garbage collection. This checks if atime
updates are handled as expected by garbage collection and therefore avoids the
risk of data loss by unexpected filesystem behavior. It is recommended to set
this to enabled, which is also the default value.
* ``gc-atime-cutoff``: Datastore GC atime cutoff for chunk cleanup:
This allows to set the cutoff for which a chunk is still considered in-use
during phase 2 of garbage collection (given no older writers). If the
``atime`` of the chunk is outside the range, it will be removed.
* ``gc-cache-capacity``: Datastore GC least recently used cache capacity:
Allows to control the cache capacity used to keep track of chunks for which
the access time has already been updated during phase 1 of garbage collection.
This avoids multiple updates and increases GC runtime performance. Higher
values can reduce GC runtime at the cost of increase memory usage, setting the
value to 0 disables caching.
# proxmox-backup-manager datastore update <storename> --tuning 'sync-level=filesystem'
If you want to set multiple tuning options simultaneously, you can separate them
with a comma, like this:
@ -506,7 +419,7 @@ remote-source to avoid that an attacker that took over the source can cause
deletions of backups on the target hosts.
If the source-host became victim of a ransomware attack, there is a good chance
that sync jobs will fail, triggering an :ref:`error notification
<Notification Events>`.
<maintenance_notification>`.
It is also possible to create :ref:`tape backups <tape_backup>` as a second
storage medium. This way, you get an additional copy of your data on a

View File

@ -30,8 +30,6 @@ please refer to the standard Debian documentation.
.. include:: certificate-management.rst
.. include:: external-metric-server.rst
.. include:: services.rst
.. include:: command-line-tools.rst

View File

@ -6,8 +6,6 @@ production. To further decrease the impact of a failed host, you can set up
periodic, efficient, incremental :ref:`datastore synchronization <syncjobs>`
from other Proxmox Backup Server instances.
.. _minimum_system_requirements:
Minimum Server Requirements, for Evaluation
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@ -40,8 +38,7 @@ Recommended Server System Requirements
* Backup storage:
* Prefer fast storage that delivers high IOPS for random IO workloads; use
only enterprise SSDs for best results.
* Use only SSDs, for best results
* If HDDs are used: Using a metadata cache is highly recommended, for example,
add a ZFS :ref:`special device mirror <local_zfs_special_device>`.

View File

@ -61,7 +61,6 @@ In general, LTO tapes offer the following advantages:
Note that `Proxmox Backup Server` already stores compressed data, so using the
tape compression feature has no advantage.
.. _tape-supported-hardware:
Supported Hardware
------------------
@ -99,31 +98,6 @@ so it takes 33 hours to read the 12TB needed to fill up an LTO-8 tape. If you wa
to write to your tape at full speed, please make sure that the source
datastore is able to deliver that performance (for example, by using SSDs).
LTO-9+ considerations
~~~~~~~~~~~~~~~~~~~~~
Since LTO-9, it is necessary to initialize new media in your drives, this is
called `Media Optimization`. This usually takes between 40 and 120 minutes per
medium. It is recommended to initialize your media in this manner with the
tools provided by your hardware vendor of your drive or changer. Some tape
changers have a method to 'bulk' initialize your media.
Because of this, formatting tapes is handled differently in Proxmox Backup
Server to avoid re-optimizing on each format/labelling. If you want to format
your media for use with the Proxmox Backup Server the first time or after use
with another program, either use the functionality of your drive/changer, or
use the 'slow' format on the cli:
.. code-block:: console
# proxmox-tape format --drive your-drive --fast 0
This will completely remove all pre-existing data and trigger a `Media
Optimization` pass.
If you format a partitioned LTO-9 medium with the 'fast' method (the default or
by setting `--fast 1`), only the first partition will be formatted, so make
sure to use the 'slow' method.
Terminology
-----------
@ -352,25 +326,6 @@ the status output:
│ slot │ 14 │ │ │
└───────────────┴──────────┴────────────┴─────────────┘
Advanced options
^^^^^^^^^^^^^^^^
Since not all tape changer behave the same, there is sometimes the need
for configuring advanced options.
Currently there are the following:
* `eject-before-unload` : This is needed for some changers that require a tape
to be ejected before unloading from the drive.
You can set these options with `proxmox-tape` like this:
.. code-block:: console
# proxmox-tape changer update sl3 --eject-before-unload true
.. _tape_drive_config:
Tape drives
@ -560,6 +515,8 @@ a single media pool, so a job only uses tapes from that pool.
- Create a new set when the specified Calendar Event triggers.
.. _systemd.time manpage: https://manpages.debian.org/buster/systemd/systemd.time.7.en.html
This allows you to specify points in time by using systemd like
Calendar Event specifications (see `systemd.time manpage`_).
@ -707,16 +664,16 @@ dust protection than inside a drive:
.. Note:: For failed jobs, the tape remains in the drive.
For tape libraries, the ``export-media-set`` option moves all tapes from
For tape libraries, the ``export-media`` option moves all tapes from
the media set to an export slot, making sure that the following backup
cannot use the tapes. An operator can pick up those tapes and move them
to a vault.
.. code-block:: console
# proxmox-tape backup-job update job2 --export-media-set
# proxmox-tape backup-job update job2 --export-media
.. Note:: The ``export-media-set`` option can be used to force the start
.. Note:: The ``export-media`` option can be used to force the start
of a new media set, because tapes from the current set are no
longer online.
@ -970,8 +927,6 @@ You can restore from a tape even without an existing catalog, but only the
whole media set. If you do this, the catalog will be automatically created.
.. _tape_key_management:
Encryption Key Management
~~~~~~~~~~~~~~~~~~~~~~~~~
@ -1183,159 +1138,3 @@ In combination with fitting prune settings and tape backup schedules, this
achieves long-term storage of some backups, while keeping the recent
backups on smaller media sets that expire roughly every 4 weeks (that is, three
plus the current week).
Disaster Recovery
-----------------
.. _Command-line Tools: command-line-tools.html
In case of major disasters, important data, or even whole servers might be
destroyed or at least damaged up to the point where everything - sometimes
including the backup server - has to be restored from a backup. For such cases,
the following step-by-step guide will help you to set up the Proxmox Backup
Server and restore everything from tape backups.
The following guide will explain the necessary steps using both the web GUI and
the command line tools. For an overview of the command line tools, see
`Command-line Tools`_.
Setting Up a Datastore
~~~~~~~~~~~~~~~~~~~~~~
.. _proxmox-backup-manager: proxmox-backup-manager/man1.html
.. _Installation: installation.html
After you set up a new Proxmox Backup Server, as outlined in the `Installation`_
chapter, first set up a datastore so a tape can be restored to it:
#. Go to **Administration -> Storage / Disks** and make sure that the disk that
will be used as a datastore shows up.
#. Under the **Directory** or **ZFS** tabs, you can either choose to create a
directory or create a ZFS ``zpool``, respectively. Here you can also directly
add the newly created directory or ZFS ``zpool`` as a datastore.
Alternatively, the `proxmox-backup-manager`_ can be used to perform the same
tasks. For more information, check the :ref:`datastore_intro` documentation.
Setting Up the Tape Drive
~~~~~~~~~~~~~~~~~~~~~~~~~
#. Make sure you have a properly working tape drive and/or changer matching to
medium you want to restore from.
#. Connect the tape changer(s) and the tape drive(s) to the backup server. These
should be detected automatically by Linux. You can get a list of available
drives using:
.. code-block:: console
# proxmox-tape drive scan
┌────────────────────────────────┬────────┬─────────────┬────────┐
│ path │ vendor │ model │ serial │
╞════════════════════════════════╪════════╪═════════════╪════════╡
│ /dev/tape/by-id/scsi-12345-sg │ IBM │ ULT3580-TD4 │ 12345 │
└────────────────────────────────┴────────┴─────────────┴────────┘
You can get a list of available changers with:
.. code-block:: console
# proxmox-tape changer scan
┌─────────────────────────────┬─────────┬──────────────┬────────┐
│ path │ vendor │ model │ serial │
╞═════════════════════════════╪═════════╪══════════════╪════════╡
│ /dev/tape/by-id/scsi-CC2C52 │ Quantum │ Superloader3 │ CC2C52 │
└─────────────────────────────┴─────────┴──────────────┴────────┘
For more information, please read the chapters
on :ref:`tape_changer_config` and :ref:`tape_drive_config`.
#. If you have a tape changer, go to the web interface of the Proxmox Backup
Server, go to **Tape Backup -> Changers** and add it. For examples using the
command line, read the chapter on :ref:`tape_changer_config`. If the changer
has been detected correctly by Linux, the changer should show up in the list.
#. In the web interface, go to **Tape Backup -> Drives** and add the tape drive
that will be used to read the tapes. For examples using the command line,
read the chapter on :ref:`tape_drive_config`. If the tape drive has been
detected correctly by Linux, the drive should show up in the list. If the
drive also has a tape changer, make sure to select the changer as well and
assign it the correct drive number.
Restoring Data From the Tape
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. _proxmox-tape: proxmox-tape/man1.html
.. _proxmox-backup-client: proxmox-backup-client/man1.html
.. _Restore: https://pve.proxmox.com/pve-docs/chapter-vzdump.html#vzdump_restore
The following guide will explain the steps necessary to restore data from a
tape, which can be done over either the web GUI or the command line. For details
on the command line, read the documentation on the `proxmox-tape`_ tool.
To restore data from tapes, do the following:
#. Insert the first tape (as displayed on the label) into the tape drive or, if
a tape changer is available, use the tape changer to insert the tape into the
right drive. The web GUI can also be used to load or transfer tapes between
tape drives by selecting the changer.
#. If the backup has been encrypted, the encryption keys need to be restored as
well. In the **Encryption Keys** tab, press **Restore Key**. For more
details or examples that use the command line, read the
:ref:`tape_key_management` chapter.
#. The procedure for restoring data is slightly different depending on whether
you are using a standalone tape drive or a changer:
* For changers, the procedure is simple:
#. Insert all tapes from the media set you want to restore from.
#. Click on the changer in the web GUI, click **Inventory**, make sure
**Restore Catalog** is selected and press OK.
* For standalone drives, the procedure would be:
#. Insert the first tape of the media set.
#. Click **Catalog**.
#. Eject the tape, then repeat the steps for the remaining tapes of the
media set.
#. Go back to **Tape Backup**. In the **Content** tab, press **Restore** and
select the desired media set. Choose the snapshot you want to restore, press
**Next**, select the drive and target datastore and press **Restore**.
#. By going to the datastore where the data has been restored, under the
**Content** tab you should be able to see the restored snapshots. In order to
access the backups from another machine, you will need to configure the
access to the backup server. Go to **Configuration -> Access Control** and
either create a new user, or a new API token (API tokens allow easy
revocation if the token is compromised). Under **Permissions**, add the
desired permissions, e.g. **DatastoreBackup**.
#. You can now perform virtual machine, container or file restores. You now have
the following options:
* If you want to restore files on Linux distributions that are not based on
Proxmox products or you prefer using a command line tool, you can use the
`proxmox-backup-client`_, as explained in the
:ref:`client_restoring_data` chapter. Use the newly created API token to
be able to access the data. You can then restore individual files or
mount an archive to your system.
* If you want to restore virtual machines or containers on a Proxmox VE
server, add the datastore of the backup server as storage and go to
**Backups**. Here you can restore VMs and containers, including their
configuration. For more information on restoring backups in Proxmox VE,
visit the `Restore`_ chapter of the Proxmox VE documentation.

View File

@ -28,9 +28,6 @@ which are not chunked, e.g. the client log), or one or more indexes
When uploading an index, the client first has to read the source data, chunk it
and send the data as chunks with their identifying checksum to the server.
When using the :ref:`change detection mode <change_detection_mode>` payload
chunks for unchanged files are reused from the previous snapshot, thereby not
reading the source data again.
If there is a previous Snapshot in the backup group, the client can first
download the chunk list of the previous Snapshot. If it detects a chunk that
@ -56,9 +53,8 @@ The chunks of a datastore are found in
<datastore-root>/.chunks/
This chunk directory is further subdivided into directories grouping chunks by
their checksums 2 byte prefix (given as 4 hexadecimal digits), so a chunk with
the checksum
This chunk directory is further subdivided by the first four bytes of the
chunk's checksum, so a chunk with the checksum
a342e8151cbf439ce65f3df696b54c67a114982cc0aa751f2852c2f7acc19a8b
@ -134,141 +130,6 @@ This is done to speed up the client part of the backup, since it only needs to
encrypt chunks that are actually getting uploaded. Chunks that exist already in
the previous backup, do not need to be encrypted and uploaded.
Change Detection Mode for File-Based Backups
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The change detection mode controls how to detect and act for files which did not
change in-between subsequent backup runs as well as the archive file format used
to encode the directory entries.
There are 3 modes available, the current default ``legacy`` mode, as well as the
``data`` and ``metadata`` mode. While the ``legacy`` mode encodes all contents
in a single ``pxar`` archive, the latter two modes split data and metadata into
``ppxar`` and ``mpxar`` archives. This is done to allow for fast comparison of
metadata with the previous snapshot, used by the ``metadata`` mode to detect
reusable files. The ``data`` mode refrains from reusing unchanged files by
rechunking the file unconditionally. This mode therefore assures that no file
changes are missed even if the metadata are unchanged.
.. NOTE:: ``pxar`` and ``mpxar``/``ppxar`` file formats are different and cannot
be deduplicated as efficiently if a datastore stores archive snapshots of
both types.
As the change detection modes are client side changes, they are backwards
compatible with older versions of Proxmox Backup Server. Exploring the backup
contents for the new archive format via the web interface requires however a
Proxmox Backup Server with version 3.2.5 or higher. Upgrading to the latest
version is recommended for full feature compatibility.
.. _change-detection-mode-legacy:
Legacy Mode
+++++++++++
Backup snapshots of filesystems are created by recursively scanning the
directory entries. All entries to be included in the snapshot are read and
serialized by encoding them using the ``pxar``
:ref:`archive format <pxar-format>`. The resulting stream is chunked into
:ref:`dynamically sized chunks <dynamically-sized-chunks>` and uploaded to the
Proxmox Backup Server, deduplicating chunks based on their content digest for
space efficient storage.
File contents are read and chunked unconditionally, no check is performed to
detect unchanged files.
.. _change-detection-mode-data:
Data Mode
+++++++++
Like for ``legacy`` mode file contents are read and chunked unconditionally, no
check is performed to detect unchanged files.
However, in contrast to ``legacy`` mode, which stores entries metadata and data
in a single self-contained ``pxar`` archive, the ``data`` mode encodes metadata
and file contents into two separate streams. The resulting backup snapshots
therefore contain split archives, an archive in ``mpxar``
:ref:`format <pxar-meta-format>` containing the entries metadata and an archive
with ``ppxar`` :ref:`format <ppxar-format>` , containing the actual file
contents, separated by payload headers for consistency checks. The metadata
archive stores a reference offset to the corresponding payload archive entry so
the file contents can be accessed. Both of these archives are chunked and
uploaded by the Proxmox backup client, resulting in separated indices and
independent chunks.
The ``mpxar`` archive can be used to efficiently fetch the associated metadata
for archive entries without the overhead of payload data stored within the same
chunks. This is used for example for entry lookups to list the archive contents
or to navigate the mounted filesystem via the FUSE implementation. No dedicated
catalog is therefore created for archives encoded using this mode.
By not comparing metadata to the previous backup snapshot, no files will be
considered reusable by this mode, in contrast to the ``metadata`` mode.
Latter can reuse files which have changed, but file size and mtime did not
change because restored after changing the files contents.
.. _change-detection-mode-metadata:
Metadata Mode
+++++++++++++
The ``metadata`` mode detects files whose file metadata did not change
in-between subsequent backup runs. The metadata comparison includes file size,
file type, ownership and permission information, as well as acls and attributes
and most importantly the file's mtime, for details see the
:ref:`pxar metadata archive format <pxar-meta-format>`. Files ctime and inode
number are not stored and used for comparison, since some tools (e.g.
``vzdump``) might sync the contents of the filesystem to a temporary location
before actually performing the backup via the Proxmox backup client. For these
cases, ctime and inode number will always change.
This mode will avoid reading and rechunking the file contents whenever possible
by reusing the file content chunks of unchanged files from the previous backup
snapshot.
To compare the metadata, the previous snapshots ``mpxar`` metadata archive is
downloaded at the start of the backup run and used as a reference. Further, the
index of the payload archive ``ppxar`` is fetched and used to lookup the file
content chunk's digests, which will be used to reindex pre-existing chunks
without the need to reread and rechunk the file contents.
During backup, the metadata and payload archives are encoded in the same manner
as for the ``data`` mode, but for the ``metadata`` mode each entry is
additionally looked up in the metadata reference archive for comparison first.
If the file did not change as compared to the reference, the file is considered
as unchanged and the Proxmox backup client enters a look-ahead caching mode. In
this mode, the client will keep reading and comparing then following entries in
the filesystem as long as they are reusable. Further, it keeps track of the
payload archive offset range these file contents are stored in. The additional
look-ahead caching is needed, as file boundaries are not required to be aligned
with chunk boundaries, therefore reused chunks can contain possibly wasted chunk
content (also called padding) if reused unconditionally.
The look-ahead cache will greedily cache all unchanged entries up to the point
where either the cache size limit is reached, a file entry with changed
metadata is encountered, or the range of payload chunks considered for reuse is
not continuous. An example for the latter is a file which disappeared in-between
subsequent backup runs, leaving a hole in the range. At this point, the caching
mode is disabled and the client calculates the wasted padding size which would
be introduced by reusing the payload chunks for all the unchanged files cached
up to this point. If the padding is acceptable (below a preset limit of 10% of
the actually reused chunk content), the files are reused by encoding them in the
metadata archive using updated offset references to the contents and reindexing
the pre-existing chunks in the new ``ppxar`` archive. If however the padding is
not acceptable, exceeding the limit, all cached entries are reencoded, not
reusing any of the pre-existing data. The metadata as cached will be encoded in
the metadata archive, no matter if cached file contents are to be reused or
reencoded.
This combination of look-ahead caching and reuse of pre-existing payload archive
chunks for files with unchanged contents therefore speeds up the backup
process by avoiding rereading and rechunking file contents whenever possible.
To reduce paddings and increase chunk reusability, during creation of the
archives in ``data`` mode and ``metadata`` mode the pxar encoder signals
encountered file boundaries as suggested chunk boundaries to the sliding window
chunker. The chunker then decides based on the internal state if the suggested
boundary is accepted or disregarded.
Caveats and Limitations
-----------------------
@ -298,8 +159,8 @@ will see that the probability of a collision in that scenario is:
For context, in a lottery game of guessing 6 numbers out of 45, the chance to
correctly guess all 6 numbers is only :math:`1.2277 * 10^{-7}`. This means the
chance of a collision is lower than winning 8 such lottery games *in a row*:
:math:`(1.2277 * 10^{-7})^{8} = 5.1623 * 10^{-56}`.
chance of a collision is about the same as winning 13 such lottery games *in a
row*.
In conclusion, it is extremely unlikely that such a collision would occur by
accident in a normal datastore.
@ -319,9 +180,6 @@ read all files again for every backup, otherwise it would not be possible to
generate a consistent, independent pxar archive where the original chunks can be
reused. Note that in spite of this, only new or changed chunks will be uploaded.
In order to avoid these limitations, the Change Detection Mode ``metadata`` was
introduced.
Verification of Encrypted Chunks
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^

View File

@ -16,8 +16,8 @@ User Configuration
choose the realm when you add a new user. Possible realms are:
:pam: Linux PAM standard authentication. Use this if you want to
authenticate as a Linux system user. The users needs to already exist on
the host system.
authenticate as a Linux system user (users need to exist on the
system).
:pbs: Proxmox Backup Server realm. This type stores hashed passwords in
``/etc/proxmox-backup/shadow.json``.
@ -27,9 +27,6 @@ choose the realm when you add a new user. Possible realms are:
:ldap: LDAP server. Users can authenticate against external LDAP servers.
:ad: Active Directory server. Users can authenticate against external Active
Directory servers.
After installation, there is a single user, ``root@pam``, which corresponds to
the Unix superuser. User configuration information is stored in the file
``/etc/proxmox-backup/user.cfg``. You can use the ``proxmox-backup-manager``
@ -599,32 +596,6 @@ list view in the web UI, or using the command line:
Authentication Realms
---------------------
.. _user_realms_pam:
Linux PAM
~~~~~~~~~
Linux PAM is a framework for system-wide user authentication. These users are
created on the host system with commands such as ``adduser``.
If PAM users exist on the host system, corresponding entries can be added to
Proxmox Backup Server, to allow these users to log in via their system username
and password.
.. _user_realms_pbs:
Proxmox Backup authentication server
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This is a Unix-like password store, which stores hashed passwords in
``/etc/proxmox-backup/shadow.json``. Passwords are hashed using the SHA-256
hashing algorithm.
This is the most convenient realm for small-scale (or even mid-scale)
installations, where users do not need access to anything outside of Proxmox
Backup Server. In this case, users are fully managed by Proxmox Backup Server
and are able to change their own passwords via the GUI.
.. _user_realms_ldap:
LDAP
@ -675,47 +646,15 @@ A full list of all configuration parameters can be found at :ref:`domains.cfg`.
server, you must also add them as a user of that realm in Proxmox Backup
Server. This can be carried out automatically with syncing.
.. _user_realms_ad:
User Synchronization in LDAP realms
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Active Directory
~~~~~~~~~~~~~~~~
Proxmox Backup Server can also utilize external Microsoft Active Directory
servers for user authentication.
To achieve this, a realm of the type ``ad`` has to be configured.
For an Active Directory realm, the authentication domain name and the server
address must be specified. Most options from :ref:`user_realms_ldap` apply to
Active Directory as well, most importantly the bind credentials ``bind-dn``
and ``password``. This is typically required by default for Microsoft Active
Directory. The ``bind-dn`` can be specified either in AD-specific
``user@company.net`` syntax or the common LDAP-DN syntax.
The authentication domain name must only be specified if anonymous bind is
requested. If bind credentials are given, the domain name is automatically
inferred from the bind users' base domain, as reported by the Active Directory
server.
A full list of all configuration parameters can be found at :ref:`domains.cfg`.
.. note:: In order to allow a particular user to authenticate using the Active
Directory server, you must also add them as a user of that realm in Proxmox
Backup Server. This can be carried out automatically with syncing.
.. note:: Currently, case-insensitive usernames are not supported.
User Synchronization in LDAP/AD realms
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
It is possible to automatically sync users for LDAP and AD-based realms, rather
than having to add them to Proxmox Backup Server manually. Synchronization
options can be set in the LDAP realm configuration dialog window in the GUI and
via the ``proxmox-backup-manager ldap`` and ``proxmox-backup-manager ad``
commands, respectively.
User synchronization can be started in the GUI under **Configuration > Access
Control > Realms** by selecting a realm and pressing the `Sync` button. In the
sync dialog, some of the default options set in the realm configuration can be
overridden. Alternatively, user synchronization can also be started via the
``proxmox-backup-manager ldap sync`` and ``proxmox-backup-manager ad sync``
command, respectively.
It is possible to automatically sync users for LDAP-based realms, rather than
having to add them to Proxmox VE manually. Synchronization options can be set
in the LDAP realm configuration dialog window in the GUI and via the
``proxmox-backup-manager ldap create/update`` command.
User synchronization can started in the GUI at
Configuration > Access Control > Realms by selecting a realm and pressing the
`Sync` button. In the sync dialog, some of the default options set in the realm
configuration can be overridden. Alternatively, user synchronization can also
be started via the ``proxmox-backup-manager ldap sync`` command.

View File

@ -1,346 +0,0 @@
.. _using_the_installer:
Install `Proxmox Backup`_ Server using the Installer
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Download the ISO from |DOWNLOADS|.
It includes the following:
* The Proxmox Backup Server installer, which partitions the local
disk(s) with ext4, xfs or ZFS, and installs the operating system
* Complete operating system (Debian Linux, 64-bit)
* Proxmox Linux kernel with ZFS support
* Complete toolset to administer backups and all necessary resources
* Web-based management interface
.. note:: Any existing data on the selected drives will be overwritten
during the installation process. The installer does not add boot
menu entries for other operating systems.
Please insert the :ref:`installation_medium` (for example, USB flash
drive or DVD) and boot from it.
.. note:: You may need to go into your server's firmware settings, to
enable booting from your installation medium (for example, USB) and
set the desired boot order. When booting an installer prior to
`Proxmox Backup`_ Server version 3.1, Secure Boot needs to be
disabled.
.. image:: images/screenshots/pbs-installer-grub-menu.png
:target: _images/pbs-installer-grub-menu.png
:align: right
:alt: Proxmox Backup Server Installer GRUB Menu
After choosing the correct entry (for example, *Boot from USB*) the
Proxmox Backup Server menu will be displayed, and one of the following
options can be selected:
**Install Proxmox Backup Server (Graphical)**
Starts the normal installation.
TIP: It's possible to use the installation wizard with a keyboard only. Buttons
can be clicked by pressing the ``ALT`` key combined with the underlined character
from the respective button. For example, ``ALT + N`` to press a ``Next`` button.
**Install Proxmox Backup Server (Console)**
Starts the terminal-mode installation wizard. It provides the same overall
installation experience as the graphical installer, but has generally better
compatibility with very old and very new hardware.
**Install Proxmox Backup Server (Terminal UI, Serial Console)**
Starts the terminal-mode installation wizard, additionally setting up the Linux
kernel to use the (first) serial port of the machine for in- and output. This
can be used if the machine is completely headless and only has a serial console
available.
.. image:: images/screenshots/pbs-tui-installer.png
:target: _images/pbs-tui-installer.png
:align: right
:alt: Proxmox Backup Server Terminal UI Installer
Both modes use the same code base for the actual installation process to
benefit from more than a decade of bug fixes and ensure feature parity.
TIP: The *Console* or *Terminal UI* option can be used in case the graphical
installer does not work correctly, due to e.g. driver issues. See also
:ref:`nomodeset_kernel_param`.
**Advanced Options: Install Proxmox Backup Server (Debug Mode)**
Starts the installation in debug mode. A console will be opened at several
installation steps. This helps to debug the situation if something goes wrong.
To exit a debug console, press ``CTRL-D``. This option can be used to boot a
live system with all basic tools available. You can use it, for example, to
repair a degraded ZFS *rpool* or fix the :ref:`chapter-systembooting` for an
existing Proxmox Backup Server setup.
**Advanced Options: Install Proxmox Backup Server (Terminal UI, Debug Mode)**
Same as the graphical debug mode, but preparing the system to run the
terminal-based installer instead.
**Advanced Options: Install Proxmox Backup Server (Serial Console Debug Mode)**
Same the terminal-based debug mode, but additionally sets up the Linux kernel to
use the (first) serial port of the machine for in- and output.
**Advanced Options: Rescue Boot**
With this option you can boot an existing installation. It searches all attached
hard disks. If it finds an existing installation, it boots directly into that
disk using the Linux kernel from the ISO. This can be useful if there are
problems with the bootloader (GRUB/``systemd-boot``) or the BIOS/UEFI is unable
to read the boot block from the disk.
**Advanced Options: Test Memory (memtest86+)**
Runs *memtest86+*. This is useful to check if the memory is functional and free
of errors. Secure Boot must be turned off in the UEFI firmware setup utility to
run this option.
You normally select *Install Proxmox Backup Server (Graphical)* to start the
installation.
The first step is to read our EULA (End User License Agreement). Following this,
you can select the target hard disk(s) for the installation.
.. caution:: By default, the whole server is used and all existing data is
removed. Make sure there is no important data on the server before proceeding
with the installation.
The *Options* button lets you select the target file system, which defaults to
``ext4``. The installer uses LVM if you select ``ext4`` or ``xfs`` as a file
system, and offers additional options to restrict LVM space (see :ref:`below
<advanced_lvm_options>`).
.. image:: images/screenshots/pbs-installer-select-disk.png
:target: _images/pbs-installer-select-disk.png
:align: right
:alt: Proxmox Backup Server Installer - Harddisk Selection Dialog
Proxmox Backup Server can also be installed on ZFS. As ZFS offers several
software RAID levels, this is an option for systems that don't have a hardware
RAID controller. The target disks must be selected in the *Options* dialog. More
ZFS specific settings can be changed under :ref:`Advanced Options
<advanced_zfs_options>`.
.. warning:: ZFS on top of any hardware RAID is not supported and can result in
data loss.
.. image:: images/screenshots/pbs-installer-location.png
:target: _images/pbs-installer-location.png
:align: right
:alt: Proxmox Backup Server Installer - Location and timezone configuration
The next page asks for basic configuration options like your location, time
zone, and keyboard layout. The location is used to select a nearby download
server, in order to increase the speed of updates. The installer is usually able
to auto-detect these settings, so you only need to change them in rare
situations when auto-detection fails, or when you want to use a keyboard layout
not commonly used in your country.
.. image:: images/screenshots/pbs-installer-password.png
:target: _images/pbs-installer-password.png
:align: left
:alt: Proxmox Backup Server Installer - Password and email configuration
Next the password of the superuser (``root``) and an email address needs to be
specified. The password must consist of at least 8 characters. It's highly
recommended to use a stronger password. Some guidelines are:
|
|
- Use a minimum password length of at least 12 characters.
- Include lowercase and uppercase alphabetic characters, numbers, and symbols.
- Avoid character repetition, keyboard patterns, common dictionary words,
letter or number sequences, usernames, relative or pet names, romantic links
(current or past), and biographical information (for example ID numbers,
ancestors' names or dates).
The email address is used to send notifications to the system administrator.
For example:
- Information about available package updates.
- Error messages from periodic *cron* jobs.
.. image:: images/screenshots/pbs-installer-network.png
:target: _images/pbs-installer-network.png
:align: right
:alt: Proxmox Backup Server Installer - Network configuration
All those notification mails will be sent to the specified email address.
The last step is the network configuration. Network interfaces that are *UP*
show a filled circle in front of their name in the drop down menu. Please note
that during installation you can either specify an IPv4 or IPv6 address, but not
both. To configure a dual stack node, add additional IP addresses after the
installation.
.. image:: images/screenshots/pbs-installer-progress.png
:target: _images/pbs-installer-progress.png
:align: left
:alt: Proxmox Backup Server Installer - Installation progress
The next step shows a summary of the previously selected options. Please
re-check every setting and use the *Previous* button if a setting needs to be
changed.
After clicking *Install*, the installer will begin to format the disks and copy
packages to the target disk(s). Please wait until this step has finished; then
remove the installation medium and restart your system.
.. image:: images/screenshots/pbs-installer-summary.png
:target: _images/pbs-installer-summary.png
:align: right
:alt: Proxmox Backup Server Installer - Installation summary
Copying the packages usually takes several minutes, mostly depending on the
speed of the installation medium and the target disk performance.
When copying and setting up the packages has finished, you can reboot the
server. This will be done automatically after a few seconds by default.
Installation Failure
^^^^^^^^^^^^^^^^^^^^
If the installation failed, check out specific errors on the second TTY
(``CTRL + ALT + F2``) and ensure that the systems meets the
:ref:`minimum requirements <minimum_system_requirements>`.
If the installation is still not working, look at the :ref:`how to get help
chapter <get_help>`.
Accessing the Management Interface Post-Installation
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. image:: images/screenshots/pbs-gui-login-window.png
:target: _images/pbs-gui-login-window.png
:align: right
:alt: Proxmox Backup Server - Management interface login dialog
After a successful installation and reboot of the system you can use the Proxmox
Backup Server web interface for further configuration.
- Point your browser to the IP address given during the installation and port
8007, for example: https://pbs.yourdomain.tld:8007
- Log in using the ``root`` (realm *Linux PAM standard authentication*) username
and the password chosen during installation.
- Upload your subscription key to gain access to the Enterprise repository.
Otherwise, you will need to set up one of the public, less tested package
repositories to get updates for security fixes, bug fixes, and new features.
- Check the IP configuration and hostname.
- Check the timezone.
.. _advanced_lvm_options:
Advanced LVM Configuration Options
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The installer creates a Volume Group (VG) called ``pbs``, and additional Logical
Volumes (LVs) called ``root`` and ``swap``, if ``ext4`` or ``xfs`` as filesystem
is used. To control the size of these volumes use:
- *hdsize*
Defines the total hard disk size to be used. This way you can reserve free
space on the hard disk for further partitioning.
- *swapsize*
Defines the size of the ``swap`` volume. The default is the size of the
installed memory, minimum 4 GB and maximum 8 GB. The resulting value cannot
be greater than ``hdsize/8``.
If set to ``0``, no ``swap`` volume will be created.
- *minfree*
Defines the amount of free space that should be left in the LVM volume group
``pbs``. With more than 128GB storage available, the default is 16GB,
otherwise ``hdsize/8`` will be used.
.. _advanced_zfs_options:
Advanced ZFS Configuration Options
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The installer creates the ZFS pool ``rpool``, if ZFS is used. No swap space is
created but you can reserve some unpartitioned space on the install disks for
swap. You can also create a swap zvol after the installation, although this can
lead to problems (see :ref:`ZFS swap notes <zfs_swap>`).
- *ashift*
Defines the *ashift* value for the created pool. The *ashift* needs to be
set at least to the sector-size of the underlying disks (2 to the power of
*ashift* is the sector-size), or any disk which might be put in the pool
(for example the replacement of a defective disk).
- *compress*
Defines whether compression is enabled for ``rpool``.
- *checksum*
Defines which checksumming algorithm should be used for ``rpool``.
- *copies*
Defines the *copies* parameter for ``rpool``. Check the ``zfs(8)`` manpage
for the semantics, and why this does not replace redundancy on disk-level.
- *hdsize*
Defines the total hard disk size to be used. This is useful to save free
space on the hard disk(s) for further partitioning (for example, to create a
swap partition). *hdsize* is only honored for bootable disks, that is only
the first disk or mirror for RAID0, RAID1 or RAID10, and all disks in
RAID-Z[123].
ZFS Performance Tips
^^^^^^^^^^^^^^^^^^^^
ZFS works best with a lot of memory. If you intend to use ZFS make sure to have
enough RAM available for it. A good calculation is 4GB plus 1GB RAM for each TB
of raw disk space.
ZFS can use a dedicated drive as write cache, called the ZFS Intent Log (ZIL).
Use a fast drive (SSD) for it. It can be added after installation with the
following command:
.. code-block:: console
# zpool add <pool-name> log </dev/path_to_fast_ssd>
.. _nomodeset_kernel_param:
Adding the ``nomodeset`` Kernel Parameter
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Problems may arise on very old or very new hardware due to graphics drivers. If
the installation hangs during boot, you can try adding the ``nomodeset``
parameter. This prevents the Linux kernel from loading any graphics drivers and
forces it to continue using the BIOS/UEFI-provided framebuffer.
On the Proxmox Backup Server bootloader menu, navigate to *Install Proxmox
Backup Server (Console)* and press ``e`` to edit the entry. Using the arrow
keys, navigate to the line starting with ``linux``, move the cursor to the end
of that line and add the parameter ``nomodeset``, separated by a space from the
pre-existing last parameter.
Then press ``Ctrl-X`` or ``F10`` to boot the configuration.

View File

@ -2,7 +2,6 @@ include ../defines.mk
UNITS := \
proxmox-backup-daily-update.timer \
removable-device-attach@.service
DYNAMIC_UNITS := \
proxmox-backup-banner.service \

View File

@ -1,8 +0,0 @@
[Unit]
Description=Try to mount the removable device of a datastore with uuid '%i'.
After=proxmox-backup-proxy.service
Requires=proxmox-backup-proxy.service
[Service]
Type=simple
ExecStart=/usr/sbin/proxmox-backup-manager datastore uuid-mount %i

View File

@ -10,7 +10,7 @@ use tokio::net::TcpStream;
// Simple H2 client to test H2 download speed using h2server.rs
struct Process {
body: h2::legacy::RecvStream,
body: h2::RecvStream,
trailers: bool,
bytes: usize,
}
@ -50,11 +50,11 @@ impl Future for Process {
}
fn send_request(
mut client: h2::legacy::client::SendRequest<bytes::Bytes>,
mut client: h2::client::SendRequest<bytes::Bytes>,
) -> impl Future<Output = Result<usize, Error>> {
println!("sending request");
let request = hyper::http::Request::builder()
let request = http::Request::builder()
.uri("http://localhost/")
.body(())
.unwrap();
@ -78,7 +78,7 @@ async fn run() -> Result<(), Error> {
let conn = TcpStream::connect(std::net::SocketAddr::from(([127, 0, 0, 1], 8008))).await?;
conn.set_nodelay(true).unwrap();
let (client, h2) = h2::legacy::client::Builder::new()
let (client, h2) = h2::client::Builder::new()
.initial_connection_window_size(1024 * 1024 * 1024)
.initial_window_size(1024 * 1024 * 1024)
.max_frame_size(4 * 1024 * 1024)

View File

@ -10,7 +10,7 @@ use tokio::net::TcpStream;
// Simple H2 client to test H2 download speed using h2s-server.rs
struct Process {
body: h2::legacy::RecvStream,
body: h2::RecvStream,
trailers: bool,
bytes: usize,
}
@ -50,11 +50,11 @@ impl Future for Process {
}
fn send_request(
mut client: h2::legacy::client::SendRequest<bytes::Bytes>,
mut client: h2::client::SendRequest<bytes::Bytes>,
) -> impl Future<Output = Result<usize, Error>> {
println!("sending request");
let request = hyper::http::Request::builder()
let request = http::Request::builder()
.uri("http://localhost/")
.body(())
.unwrap();
@ -94,7 +94,7 @@ async fn run() -> Result<(), Error> {
.await
.map_err(|err| format_err!("connect failed - {}", err))?;
let (client, h2) = h2::legacy::client::Builder::new()
let (client, h2) = h2::client::Builder::new()
.initial_connection_window_size(1024 * 1024 * 1024)
.initial_window_size(1024 * 1024 * 1024)
.max_frame_size(4 * 1024 * 1024)

View File

@ -8,19 +8,6 @@ use tokio::net::{TcpListener, TcpStream};
use pbs_buildcfg::configdir;
#[derive(Clone, Copy)]
struct H2SExecutor;
impl<Fut> hyper::rt::Executor<Fut> for H2SExecutor
where
Fut: Future + Send + 'static,
Fut::Output: Send,
{
fn execute(&self, fut: Fut) {
tokio::spawn(fut);
}
}
fn main() -> Result<(), Error> {
proxmox_async::runtime::main(run())
}
@ -63,11 +50,12 @@ async fn handle_connection(socket: TcpStream, acceptor: Arc<SslAcceptor>) -> Res
stream.as_mut().accept().await?;
let mut http = hyper::server::conn::http2::Builder::new(H2SExecutor);
let mut http = hyper::server::conn::Http::new();
http.http2_only(true);
// increase window size: todo - find optiomal size
let max_window_size = (1 << 31) - 2;
http.initial_stream_window_size(max_window_size);
http.initial_connection_window_size(max_window_size);
http.http2_initial_stream_window_size(max_window_size);
http.http2_initial_connection_window_size(max_window_size);
let service = hyper::service::service_fn(|_req: Request<Body>| {
println!("Got request");
@ -75,11 +63,8 @@ async fn handle_connection(socket: TcpStream, acceptor: Arc<SslAcceptor>) -> Res
let body = Body::from(buffer);
let response = Response::builder()
.status(hyper::http::StatusCode::OK)
.header(
hyper::http::header::CONTENT_TYPE,
"application/octet-stream",
)
.status(http::StatusCode::OK)
.header(http::header::CONTENT_TYPE, "application/octet-stream")
.body(body)
.unwrap();
future::ok::<_, Error>(response)

View File

@ -1,24 +1,9 @@
use std::future::Future;
use anyhow::Error;
use futures::*;
use hyper::{Body, Request, Response};
use tokio::net::{TcpListener, TcpStream};
#[derive(Clone, Copy)]
struct H2Executor;
impl<Fut> hyper::rt::Executor<Fut> for H2Executor
where
Fut: Future + Send + 'static,
Fut::Output: Send,
{
fn execute(&self, fut: Fut) {
tokio::spawn(fut);
}
}
fn main() -> Result<(), Error> {
proxmox_async::runtime::main(run())
}
@ -41,11 +26,12 @@ async fn run() -> Result<(), Error> {
async fn handle_connection(socket: TcpStream) -> Result<(), Error> {
socket.set_nodelay(true).unwrap();
let mut http = hyper::server::conn::http2::Builder::new(H2Executor);
let mut http = hyper::server::conn::Http::new();
http.http2_only(true);
// increase window size: todo - find optiomal size
let max_window_size = (1 << 31) - 2;
http.initial_stream_window_size(max_window_size);
http.initial_connection_window_size(max_window_size);
http.http2_initial_stream_window_size(max_window_size);
http.http2_initial_connection_window_size(max_window_size);
let service = hyper::service::service_fn(|_req: Request<Body>| {
println!("Got request");
@ -53,11 +39,8 @@ async fn handle_connection(socket: TcpStream) -> Result<(), Error> {
let body = Body::from(buffer);
let response = Response::builder()
.status(hyper::http::StatusCode::OK)
.header(
hyper::http::header::CONTENT_TYPE,
"application/octet-stream",
)
.status(http::StatusCode::OK)
.header(http::header::CONTENT_TYPE, "application/octet-stream")
.body(body)
.unwrap();
future::ok::<_, Error>(response)

View File

@ -1,91 +0,0 @@
use std::{
fs::File,
io::Read,
time::{Duration, SystemTime},
};
use anyhow::{format_err, Error};
use pbs_tape::TapeWrite;
use proxmox_backup::tape::drive::{LtoTapeHandle, TapeDriver};
const URANDOM_PATH: &str = "/dev/urandom";
const CHUNK_SIZE: usize = 4 * 1024 * 1024; // 4 MiB
const LOG_LIMIT: usize = 4 * 1024 * 1024 * 1024; // 4 GiB
fn write_chunks<'a>(
mut writer: Box<dyn 'a + TapeWrite>,
blob_size: usize,
max_size: usize,
max_time: Duration,
) -> Result<(), Error> {
// prepare chunks in memory
let mut blob: Vec<u8> = vec![0u8; blob_size];
let mut file = File::open(URANDOM_PATH)?;
file.read_exact(&mut blob[..])?;
let start_time = SystemTime::now();
loop {
let iteration_time = SystemTime::now();
let mut count = 0;
let mut bytes_written = 0;
let mut idx = 0;
let mut incr_count = 0;
loop {
if writer.write_all(&blob)? {
eprintln!("LEOM reached");
break;
}
// modifying chunks a bit to mitigate compression/deduplication
blob[idx] = blob[idx].wrapping_add(1);
incr_count += 1;
if incr_count >= 256 {
incr_count = 0;
idx += 1;
}
count += 1;
bytes_written += blob_size;
if bytes_written > max_size {
break;
}
}
let elapsed = iteration_time.elapsed()?.as_secs_f64();
let elapsed_total = start_time.elapsed()?;
eprintln!(
"{:.2}s: wrote {} chunks ({:.2} MB at {:.2} MB/s, average: {:.2} MB/s)",
elapsed_total.as_secs_f64(),
count,
bytes_written as f64 / 1_000_000.0,
(bytes_written as f64) / (1_000_000.0 * elapsed),
(writer.bytes_written() as f64) / (1_000_000.0 * elapsed_total.as_secs_f64()),
);
if elapsed_total > max_time {
break;
}
}
Ok(())
}
fn main() -> Result<(), Error> {
let mut args = std::env::args_os();
args.next(); // binary name
let path = args.next().expect("no path to tape device given");
let file = File::open(path).map_err(|err| format_err!("could not open tape device: {err}"))?;
let mut drive = LtoTapeHandle::new(file)
.map_err(|err| format_err!("error creating drive handle: {err}"))?;
write_chunks(
drive
.write_file()
.map_err(|err| format_err!("error starting file write: {err}"))?,
CHUNK_SIZE,
LOG_LIMIT,
Duration::new(60 * 20, 0),
)
.map_err(|err| format_err!("error writing data to tape: {err}"))?;
Ok(())
}

View File

@ -5,10 +5,10 @@ extern crate proxmox_backup;
use anyhow::Error;
use std::io::{Read, Write};
use pbs_datastore::{Chunker, ChunkerImpl};
use pbs_datastore::Chunker;
struct ChunkWriter {
chunker: ChunkerImpl,
chunker: Chunker,
last_chunk: usize,
chunk_offset: usize,
@ -23,7 +23,7 @@ struct ChunkWriter {
impl ChunkWriter {
fn new(chunk_size: usize) -> Self {
ChunkWriter {
chunker: ChunkerImpl::new(chunk_size),
chunker: Chunker::new(chunk_size),
last_chunk: 0,
chunk_offset: 0,
chunk_count: 0,
@ -69,8 +69,7 @@ impl Write for ChunkWriter {
fn write(&mut self, data: &[u8]) -> std::result::Result<usize, std::io::Error> {
let chunker = &mut self.chunker;
let ctx = pbs_datastore::chunker::Context::default();
let pos = chunker.scan(data, &ctx);
let pos = chunker.scan(data);
if pos > 0 {
self.chunk_offset += pos;

View File

@ -1,6 +1,6 @@
extern crate proxmox_backup;
use pbs_datastore::{Chunker, ChunkerImpl};
use pbs_datastore::Chunker;
fn main() {
let mut buffer = Vec::new();
@ -12,7 +12,7 @@ fn main() {
buffer.push(byte);
}
}
let mut chunker = ChunkerImpl::new(64 * 1024);
let mut chunker = Chunker::new(64 * 1024);
let count = 5;
@ -23,9 +23,8 @@ fn main() {
for _i in 0..count {
let mut pos = 0;
let mut _last = 0;
let ctx = pbs_datastore::chunker::Context::default();
while pos < buffer.len() {
let k = chunker.scan(&buffer[pos..], &ctx);
let k = chunker.scan(&buffer[pos..]);
if k == 0 {
//println!("LAST {}", pos);
break;

View File

@ -1,10 +1,9 @@
use std::str::FromStr;
use anyhow::Error;
use futures::*;
extern crate proxmox_backup;
use pbs_client::ChunkStream;
use proxmox_human_byte::HumanByte;
// Test Chunker with real data read from a file.
//
@ -22,22 +21,12 @@ fn main() {
async fn run() -> Result<(), Error> {
let file = tokio::fs::File::open("random-test.dat").await?;
let mut args = std::env::args();
args.next();
let buffer_size = args.next().unwrap_or("8k".to_string());
let buffer_size = HumanByte::from_str(&buffer_size)?;
println!("Using buffer size {buffer_size}");
let stream = tokio_util::codec::FramedRead::with_capacity(
file,
tokio_util::codec::BytesCodec::new(),
buffer_size.as_u64() as usize,
)
.map_err(Error::from);
let stream = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
.map_ok(|bytes| bytes.to_vec())
.map_err(Error::from);
//let chunk_stream = FixedChunkStream::new(stream, 4*1024*1024);
let mut chunk_stream = ChunkStream::new(stream, None, None, None);
let mut chunk_stream = ChunkStream::new(stream, None);
let start_time = std::time::Instant::now();
@ -51,7 +40,7 @@ async fn run() -> Result<(), Error> {
repeat += 1;
stream_len += chunk.len();
//println!("Got chunk {}", chunk.len());
println!("Got chunk {}", chunk.len());
}
let speed =

View File

@ -18,7 +18,7 @@ async fn upload_speed() -> Result<f64, Error> {
let backup_time = proxmox_time::epoch_i64();
let client = BackupWriter::start(
&client,
client,
None,
datastore,
&BackupNamespace::root(),

23
pbs-api-types/Cargo.toml Normal file
View File

@ -0,0 +1,23 @@
[package]
name = "pbs-api-types"
version = "0.1.0"
authors.workspace = true
edition.workspace = true
description = "general API type helpers for PBS"
[dependencies]
anyhow.workspace = true
hex.workspace = true
lazy_static.workspace = true
percent-encoding.workspace = true
regex.workspace = true
serde.workspace = true
serde_plain.workspace = true
proxmox-auth-api = { workspace = true, features = [ "api-types" ] }
proxmox-human-byte.workspace = true
proxmox-lang.workspace=true
proxmox-schema = { workspace = true, features = [ "api-macro" ] }
proxmox-serde.workspace = true
proxmox-time.workspace = true
proxmox-uuid = { workspace = true, features = [ "serde" ] }

291
pbs-api-types/src/acl.rs Normal file
View File

@ -0,0 +1,291 @@
use std::str::FromStr;
use serde::de::{value, IntoDeserializer};
use serde::{Deserialize, Serialize};
use proxmox_lang::constnamedbitmap;
use proxmox_schema::{
api, const_regex, ApiStringFormat, BooleanSchema, EnumEntry, Schema, StringSchema,
};
const_regex! {
pub ACL_PATH_REGEX = concat!(r"^(?:/|", r"(?:/", PROXMOX_SAFE_ID_REGEX_STR!(), ")+", r")$");
}
// define Privilege bitfield
constnamedbitmap! {
/// Contains a list of privilege name to privilege value mappings.
///
/// The names are used when displaying/persisting privileges anywhere, the values are used to
/// allow easy matching of privileges as bitflags.
PRIVILEGES: u64 => {
/// Sys.Audit allows knowing about the system and its status
PRIV_SYS_AUDIT("Sys.Audit");
/// Sys.Modify allows modifying system-level configuration
PRIV_SYS_MODIFY("Sys.Modify");
/// Sys.Modify allows to poweroff/reboot/.. the system
PRIV_SYS_POWER_MANAGEMENT("Sys.PowerManagement");
/// Datastore.Audit allows knowing about a datastore,
/// including reading the configuration entry and listing its contents
PRIV_DATASTORE_AUDIT("Datastore.Audit");
/// Datastore.Allocate allows creating or deleting datastores
PRIV_DATASTORE_ALLOCATE("Datastore.Allocate");
/// Datastore.Modify allows modifying a datastore and its contents
PRIV_DATASTORE_MODIFY("Datastore.Modify");
/// Datastore.Read allows reading arbitrary backup contents
PRIV_DATASTORE_READ("Datastore.Read");
/// Allows verifying a datastore
PRIV_DATASTORE_VERIFY("Datastore.Verify");
/// Datastore.Backup allows Datastore.Read|Verify and creating new snapshots,
/// but also requires backup ownership
PRIV_DATASTORE_BACKUP("Datastore.Backup");
/// Datastore.Prune allows deleting snapshots,
/// but also requires backup ownership
PRIV_DATASTORE_PRUNE("Datastore.Prune");
/// Permissions.Modify allows modifying ACLs
PRIV_PERMISSIONS_MODIFY("Permissions.Modify");
/// Remote.Audit allows reading remote.cfg and sync.cfg entries
PRIV_REMOTE_AUDIT("Remote.Audit");
/// Remote.Modify allows modifying remote.cfg
PRIV_REMOTE_MODIFY("Remote.Modify");
/// Remote.Read allows reading data from a configured `Remote`
PRIV_REMOTE_READ("Remote.Read");
/// Sys.Console allows access to the system's console
PRIV_SYS_CONSOLE("Sys.Console");
/// Tape.Audit allows reading tape backup configuration and status
PRIV_TAPE_AUDIT("Tape.Audit");
/// Tape.Modify allows modifying tape backup configuration
PRIV_TAPE_MODIFY("Tape.Modify");
/// Tape.Write allows writing tape media
PRIV_TAPE_WRITE("Tape.Write");
/// Tape.Read allows reading tape backup configuration and media contents
PRIV_TAPE_READ("Tape.Read");
/// Realm.Allocate allows viewing, creating, modifying and deleting realms
PRIV_REALM_ALLOCATE("Realm.Allocate");
}
}
pub fn privs_to_priv_names(privs: u64) -> Vec<&'static str> {
PRIVILEGES
.iter()
.fold(Vec::new(), |mut priv_names, (name, value)| {
if value & privs != 0 {
priv_names.push(name);
}
priv_names
})
}
/// Admin always has all privileges. It can do everything except a few actions
/// which are limited to the 'root@pam` superuser
pub const ROLE_ADMIN: u64 = u64::MAX;
/// NoAccess can be used to remove privileges from specific (sub-)paths
pub const ROLE_NO_ACCESS: u64 = 0;
#[rustfmt::skip]
#[allow(clippy::identity_op)]
/// Audit can view configuration and status information, but not modify it.
pub const ROLE_AUDIT: u64 = 0
| PRIV_SYS_AUDIT
| PRIV_DATASTORE_AUDIT;
#[rustfmt::skip]
#[allow(clippy::identity_op)]
/// Datastore.Admin can do anything on the datastore.
pub const ROLE_DATASTORE_ADMIN: u64 = 0
| PRIV_DATASTORE_AUDIT
| PRIV_DATASTORE_MODIFY
| PRIV_DATASTORE_READ
| PRIV_DATASTORE_VERIFY
| PRIV_DATASTORE_BACKUP
| PRIV_DATASTORE_PRUNE;
#[rustfmt::skip]
#[allow(clippy::identity_op)]
/// Datastore.Reader can read/verify datastore content and do restore
pub const ROLE_DATASTORE_READER: u64 = 0
| PRIV_DATASTORE_AUDIT
| PRIV_DATASTORE_VERIFY
| PRIV_DATASTORE_READ;
#[rustfmt::skip]
#[allow(clippy::identity_op)]
/// Datastore.Backup can do backup and restore, but no prune.
pub const ROLE_DATASTORE_BACKUP: u64 = 0
| PRIV_DATASTORE_BACKUP;
#[rustfmt::skip]
#[allow(clippy::identity_op)]
/// Datastore.PowerUser can do backup, restore, and prune.
pub const ROLE_DATASTORE_POWERUSER: u64 = 0
| PRIV_DATASTORE_PRUNE
| PRIV_DATASTORE_BACKUP;
#[rustfmt::skip]
#[allow(clippy::identity_op)]
/// Datastore.Audit can audit the datastore.
pub const ROLE_DATASTORE_AUDIT: u64 = 0
| PRIV_DATASTORE_AUDIT;
#[rustfmt::skip]
#[allow(clippy::identity_op)]
/// Remote.Audit can audit the remote
pub const ROLE_REMOTE_AUDIT: u64 = 0
| PRIV_REMOTE_AUDIT;
#[rustfmt::skip]
#[allow(clippy::identity_op)]
/// Remote.Admin can do anything on the remote.
pub const ROLE_REMOTE_ADMIN: u64 = 0
| PRIV_REMOTE_AUDIT
| PRIV_REMOTE_MODIFY
| PRIV_REMOTE_READ;
#[rustfmt::skip]
#[allow(clippy::identity_op)]
/// Remote.SyncOperator can do read and prune on the remote.
pub const ROLE_REMOTE_SYNC_OPERATOR: u64 = 0
| PRIV_REMOTE_AUDIT
| PRIV_REMOTE_READ;
#[rustfmt::skip]
#[allow(clippy::identity_op)]
/// Tape.Audit can audit the tape backup configuration and media content
pub const ROLE_TAPE_AUDIT: u64 = 0
| PRIV_TAPE_AUDIT;
#[rustfmt::skip]
#[allow(clippy::identity_op)]
/// Tape.Admin can do anything on the tape backup
pub const ROLE_TAPE_ADMIN: u64 = 0
| PRIV_TAPE_AUDIT
| PRIV_TAPE_MODIFY
| PRIV_TAPE_READ
| PRIV_TAPE_WRITE;
#[rustfmt::skip]
#[allow(clippy::identity_op)]
/// Tape.Operator can do tape backup and restore (but no configuration changes)
pub const ROLE_TAPE_OPERATOR: u64 = 0
| PRIV_TAPE_AUDIT
| PRIV_TAPE_READ
| PRIV_TAPE_WRITE;
#[rustfmt::skip]
#[allow(clippy::identity_op)]
/// Tape.Reader can do read and inspect tape content
pub const ROLE_TAPE_READER: u64 = 0
| PRIV_TAPE_AUDIT
| PRIV_TAPE_READ;
/// NoAccess can be used to remove privileges from specific (sub-)paths
pub const ROLE_NAME_NO_ACCESS: &str = "NoAccess";
#[api(
type_text: "<role>",
)]
#[repr(u64)]
#[derive(Serialize, Deserialize)]
/// Enum representing roles via their [PRIVILEGES] combination.
///
/// Since privileges are implemented as bitflags, each unique combination of privileges maps to a
/// single, unique `u64` value that is used in this enum definition.
pub enum Role {
/// Administrator
Admin = ROLE_ADMIN,
/// Auditor
Audit = ROLE_AUDIT,
/// Disable Access
NoAccess = ROLE_NO_ACCESS,
/// Datastore Administrator
DatastoreAdmin = ROLE_DATASTORE_ADMIN,
/// Datastore Reader (inspect datastore content and do restores)
DatastoreReader = ROLE_DATASTORE_READER,
/// Datastore Backup (backup and restore owned backups)
DatastoreBackup = ROLE_DATASTORE_BACKUP,
/// Datastore PowerUser (backup, restore and prune owned backup)
DatastorePowerUser = ROLE_DATASTORE_POWERUSER,
/// Datastore Auditor
DatastoreAudit = ROLE_DATASTORE_AUDIT,
/// Remote Auditor
RemoteAudit = ROLE_REMOTE_AUDIT,
/// Remote Administrator
RemoteAdmin = ROLE_REMOTE_ADMIN,
/// Syncronisation Opertator
RemoteSyncOperator = ROLE_REMOTE_SYNC_OPERATOR,
/// Tape Auditor
TapeAudit = ROLE_TAPE_AUDIT,
/// Tape Administrator
TapeAdmin = ROLE_TAPE_ADMIN,
/// Tape Operator
TapeOperator = ROLE_TAPE_OPERATOR,
/// Tape Reader
TapeReader = ROLE_TAPE_READER,
}
impl FromStr for Role {
type Err = value::Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
Self::deserialize(s.into_deserializer())
}
}
pub const ACL_PATH_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&ACL_PATH_REGEX);
pub const ACL_PATH_SCHEMA: Schema = StringSchema::new("Access control path.")
.format(&ACL_PATH_FORMAT)
.min_length(1)
.max_length(128)
.schema();
pub const ACL_PROPAGATE_SCHEMA: Schema =
BooleanSchema::new("Allow to propagate (inherit) permissions.")
.default(true)
.schema();
pub const ACL_UGID_TYPE_SCHEMA: Schema = StringSchema::new("Type of 'ugid' property.")
.format(&ApiStringFormat::Enum(&[
EnumEntry::new("user", "User"),
EnumEntry::new("group", "Group"),
]))
.schema();
#[api(
properties: {
propagate: {
schema: ACL_PROPAGATE_SCHEMA,
},
path: {
schema: ACL_PATH_SCHEMA,
},
ugid_type: {
schema: ACL_UGID_TYPE_SCHEMA,
},
ugid: {
type: String,
description: "User or Group ID.",
},
roleid: {
type: Role,
}
}
)]
#[derive(Serialize, Deserialize, Clone, PartialEq)]
/// ACL list entry.
pub struct AclListItem {
pub path: String,
pub ugid: String,
pub ugid_type: String,
pub propagate: bool,
pub roleid: String,
}

View File

@ -0,0 +1,78 @@
//! Predefined Regular Expressions
//!
//! This is a collection of useful regular expressions
use lazy_static::lazy_static;
use regex::Regex;
#[rustfmt::skip]
#[macro_export]
macro_rules! IPV4OCTET { () => (r"(?:25[0-5]|(?:2[0-4]|1[0-9]|[1-9])?[0-9])") }
#[rustfmt::skip]
#[macro_export]
macro_rules! IPV6H16 { () => (r"(?:[0-9a-fA-F]{1,4})") }
#[rustfmt::skip]
#[macro_export]
macro_rules! IPV6LS32 { () => (concat!(r"(?:(?:", IPV4RE!(), "|", IPV6H16!(), ":", IPV6H16!(), "))" )) }
/// Returns the regular expression string to match IPv4 addresses
#[rustfmt::skip]
#[macro_export]
macro_rules! IPV4RE { () => (concat!(r"(?:(?:", IPV4OCTET!(), r"\.){3}", IPV4OCTET!(), ")")) }
/// Returns the regular expression string to match IPv6 addresses
#[rustfmt::skip]
#[macro_export]
macro_rules! IPV6RE { () => (concat!(r"(?:",
r"(?:(?:", r"(?:", IPV6H16!(), r":){6})", IPV6LS32!(), r")|",
r"(?:(?:", r"::(?:", IPV6H16!(), r":){5})", IPV6LS32!(), r")|",
r"(?:(?:(?:", IPV6H16!(), r")?::(?:", IPV6H16!(), r":){4})", IPV6LS32!(), r")|",
r"(?:(?:(?:(?:", IPV6H16!(), r":){0,1}", IPV6H16!(), r")?::(?:", IPV6H16!(), r":){3})", IPV6LS32!(), r")|",
r"(?:(?:(?:(?:", IPV6H16!(), r":){0,2}", IPV6H16!(), r")?::(?:", IPV6H16!(), r":){2})", IPV6LS32!(), r")|",
r"(?:(?:(?:(?:", IPV6H16!(), r":){0,3}", IPV6H16!(), r")?::(?:", IPV6H16!(), r":){1})", IPV6LS32!(), r")|",
r"(?:(?:(?:(?:", IPV6H16!(), r":){0,4}", IPV6H16!(), r")?::", ")", IPV6LS32!(), r")|",
r"(?:(?:(?:(?:", IPV6H16!(), r":){0,5}", IPV6H16!(), r")?::", ")", IPV6H16!(), r")|",
r"(?:(?:(?:(?:", IPV6H16!(), r":){0,6}", IPV6H16!(), r")?::", ")))"))
}
/// Returns the regular expression string to match IP addresses (v4 or v6)
#[rustfmt::skip]
#[macro_export]
macro_rules! IPRE { () => (concat!(r"(?:", IPV4RE!(), "|", IPV6RE!(), ")")) }
/// Regular expression string to match IP addresses where IPv6 addresses require brackets around
/// them, while for IPv4 they are forbidden.
#[rustfmt::skip]
#[macro_export]
macro_rules! IPRE_BRACKET { () => (
concat!(r"(?:",
IPV4RE!(),
r"|\[(?:",
IPV6RE!(),
r")\]",
r")"))
}
lazy_static! {
pub static ref IP_REGEX: Regex = Regex::new(concat!(r"^", IPRE!(), r"$")).unwrap();
pub static ref IP_BRACKET_REGEX: Regex =
Regex::new(concat!(r"^", IPRE_BRACKET!(), r"$")).unwrap();
pub static ref SHA256_HEX_REGEX: Regex = Regex::new(r"^[a-f0-9]{64}$").unwrap();
pub static ref SYSTEMD_DATETIME_REGEX: Regex =
Regex::new(r"^\d{4}-\d{2}-\d{2}( \d{2}:\d{2}(:\d{2})?)?$").unwrap();
}
#[test]
fn test_regexes() {
assert!(IP_REGEX.is_match("127.0.0.1"));
assert!(IP_REGEX.is_match("::1"));
assert!(IP_REGEX.is_match("2014:b3a::27"));
assert!(IP_REGEX.is_match("2014:b3a::192.168.0.1"));
assert!(IP_REGEX.is_match("2014:b3a:0102:adf1:1234:4321:4afA:BCDF"));
assert!(IP_BRACKET_REGEX.is_match("127.0.0.1"));
assert!(IP_BRACKET_REGEX.is_match("[::1]"));
assert!(IP_BRACKET_REGEX.is_match("[2014:b3a::27]"));
assert!(IP_BRACKET_REGEX.is_match("[2014:b3a::192.168.0.1]"));
assert!(IP_BRACKET_REGEX.is_match("[2014:b3a:0102:adf1:1234:4321:4afA:BCDF]"));
}

View File

@ -0,0 +1,95 @@
use std::fmt::{self, Display};
use anyhow::Error;
use serde::{Deserialize, Serialize};
use proxmox_schema::api;
#[api(default: "encrypt")]
#[derive(Copy, Clone, Debug, Eq, PartialEq, Deserialize, Serialize)]
#[serde(rename_all = "kebab-case")]
/// Defines whether data is encrypted (using an AEAD cipher), only signed, or neither.
pub enum CryptMode {
/// Don't encrypt.
None,
/// Encrypt.
Encrypt,
/// Only sign.
SignOnly,
}
#[derive(Debug, Eq, PartialEq, Hash, Clone, Deserialize, Serialize)]
#[serde(transparent)]
/// 32-byte fingerprint, usually calculated with SHA256.
pub struct Fingerprint {
#[serde(with = "bytes_as_fingerprint")]
bytes: [u8; 32],
}
impl Fingerprint {
pub fn new(bytes: [u8; 32]) -> Self {
Self { bytes }
}
pub fn bytes(&self) -> &[u8; 32] {
&self.bytes
}
pub fn signature(&self) -> String {
as_fingerprint(&self.bytes)
}
}
/// Display as short key ID
impl Display for Fingerprint {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", as_fingerprint(&self.bytes[0..8]))
}
}
impl std::str::FromStr for Fingerprint {
type Err = Error;
fn from_str(s: &str) -> Result<Self, Error> {
let mut tmp = s.to_string();
tmp.retain(|c| c != ':');
let mut bytes = [0u8; 32];
hex::decode_to_slice(&tmp, &mut bytes)?;
Ok(Fingerprint::new(bytes))
}
}
fn as_fingerprint(bytes: &[u8]) -> String {
hex::encode(bytes)
.as_bytes()
.chunks(2)
.map(|v| unsafe { std::str::from_utf8_unchecked(v) }) // it's a hex string
.collect::<Vec<&str>>()
.join(":")
}
pub mod bytes_as_fingerprint {
use std::mem::MaybeUninit;
use serde::{Deserialize, Deserializer, Serializer};
pub fn serialize<S>(bytes: &[u8; 32], serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let s = super::as_fingerprint(bytes);
serializer.serialize_str(&s)
}
pub fn deserialize<'de, D>(deserializer: D) -> Result<[u8; 32], D::Error>
where
D: Deserializer<'de>,
{
// TODO: more efficiently implement with a Visitor implementing visit_str using split() and
// hex::decode by-byte
let mut s = String::deserialize(deserializer)?;
s.retain(|c| c != ':');
let mut out = MaybeUninit::<[u8; 32]>::uninit();
hex::decode_to_slice(s.as_bytes(), unsafe { &mut (*out.as_mut_ptr())[..] })
.map_err(serde::de::Error::custom)?;
Ok(unsafe { out.assume_init() })
}
}

Some files were not shown because too many files have changed in this diff Show More