Compare commits

..

No commits in common. "master" and "v3.0.1" have entirely different histories.

533 changed files with 18141 additions and 27935 deletions

View File

@ -3,6 +3,3 @@
directory = "/usr/share/cargo/registry"
[source.crates-io]
replace-with = "debian-packages"
[profile.release]
debug=true

16
.gitignore vendored
View File

@ -14,19 +14,9 @@
/*.deb
/*.dsc
/*.tar*
/.do-cargo-build
/Cargo.lock
/docs/*/synopsis.rst
/docs/config/*/config.rst
/docs/config/acl/roles.rst
/docs/output
/docs/proxmox-backup-client/catalog-shell-synopsis.rst
# all services are generated from a .in file to set the libexec path
/etc/*.service
/proxmox-backup-server-dpkg-contents.txt
/www/.lint-incremental
/www/js/
/etc/proxmox-backup-proxy.service
/etc/proxmox-backup.service
/target
__pycache__/
build/
local.mak
target/

View File

@ -1,5 +1,5 @@
[workspace.package]
version = "3.4.1"
version = "3.0.1"
authors = [
"Dietmar Maurer <dietmar@proxmox.com>",
"Dominik Csapak <d.csapak@proxmox.com>",
@ -13,7 +13,6 @@ authors = [
edition = "2021"
license = "AGPL-3"
repository = "https://git.proxmox.com/?p=proxmox-backup.git"
rust-version = "1.81"
[package]
name = "proxmox-backup"
@ -29,6 +28,7 @@ exclude = [ "build", "debian", "tests/catar_data/test_symlink/symlink1"]
[workspace]
members = [
"pbs-api-types",
"pbs-buildcfg",
"pbs-client",
"pbs-config",
@ -43,6 +43,7 @@ members = [
"proxmox-backup-client",
"proxmox-file-restore",
"proxmox-restore-daemon",
"proxmox-rrd",
"pxar-bin",
]
@ -53,51 +54,41 @@ path = "src/lib.rs"
[workspace.dependencies]
# proxmox workspace
proxmox-apt = { version = "0.11", features = [ "cache" ] }
proxmox-apt-api-types = "1.0.1"
proxmox-async = "0.4"
proxmox-auth-api = "0.4"
proxmox-auth-api = "0.3"
proxmox-borrow = "1"
proxmox-compression = "0.2"
proxmox-config-digest = "0.1.0"
proxmox-daemon = "0.1.0"
proxmox-fuse = "0.1.3"
proxmox-http = { version = "0.9.5", features = [ "client", "http-helpers", "websocket" ] } # see below
proxmox-http = { version = "0.9.0", features = [ "client", "http-helpers", "websocket" ] } # see below
proxmox-human-byte = "0.1"
proxmox-io = "1.0.1" # tools and client use "tokio" feature
proxmox-lang = "1.1"
proxmox-log = "0.2.6"
proxmox-ldap = "0.2.1"
proxmox-metrics = "0.3.1"
proxmox-notify = "0.5.1"
proxmox-openid = "0.10.0"
proxmox-rest-server = { version = "0.8.9", features = [ "templates" ] }
proxmox-ldap = "0.2"
proxmox-metrics = "0.3"
proxmox-rest-server = { version = "0.4.1", features = [ "templates" ] }
# some use "cli", some use "cli" and "server", pbs-config uses nothing
proxmox-router = { version = "3.0.0", default-features = false }
proxmox-rrd = "0.4"
proxmox-rrd-api-types = "1.0.2"
# everything but pbs-config and pbs-client use "api-macro"
proxmox-schema = "4"
proxmox-section-config = "2"
proxmox-router = { version = "1.3.1", default_features = false }
# everything but pbs-config and pbs-client ues "api-macro"
proxmox-schema = "1.3.6"
proxmox-section-config = "1"
proxmox-serde = "0.1.1"
proxmox-shared-cache = "0.1"
proxmox-shared-memory = "0.3.0"
proxmox-sortable-macro = "0.1.2"
proxmox-subscription = { version = "0.5.0", features = [ "api-types" ] }
proxmox-sys = "0.6.7"
proxmox-systemd = "0.1"
proxmox-tfa = { version = "5", features = [ "api", "api-types" ] }
proxmox-time = "2"
proxmox-uuid = { version = "1", features = [ "serde" ] }
proxmox-worker-task = "0.1"
pbs-api-types = "0.2.2"
proxmox-subscription = { version = "0.4", features = [ "api-types" ] }
proxmox-sys = "0.5.0"
proxmox-tfa = { version = "4", features = [ "api", "api-types" ] }
proxmox-time = "1.1.2"
proxmox-uuid = "1"
# other proxmox crates
pathpatterns = "0.3"
proxmox-acme = "0.5.3"
pxar = "0.12.1"
pathpatterns = "0.1.2"
proxmox-acme-rs = "0.4"
proxmox-apt = "0.10.2"
proxmox-openid = "0.10.0"
pxar = "0.10.2"
# PBS workspace
pbs-api-types = { path = "pbs-api-types" }
pbs-buildcfg = { path = "pbs-buildcfg" }
pbs-client = { path = "pbs-client" }
pbs-config = { path = "pbs-config" }
@ -107,28 +98,28 @@ pbs-key-config = { path = "pbs-key-config" }
pbs-pxar-fuse = { path = "pbs-pxar-fuse" }
pbs-tape = { path = "pbs-tape" }
pbs-tools = { path = "pbs-tools" }
proxmox-rrd = { path = "proxmox-rrd" }
# regular crates
anyhow = "1.0"
async-trait = "0.1.56"
apt-pkg-native = "0.3.2"
base64 = "0.13"
bitflags = "2.4"
bitflags = "1.2.1"
bytes = "1.0"
cidr = "0.2.1"
crc32fast = "1"
const_format = "0.2"
crossbeam-channel = "0.5"
endian_trait = { version = "0.6", features = ["arrays"] }
env_logger = "0.11"
env_logger = "0.9"
flate2 = "1.0"
foreign-types = "0.3"
futures = "0.3"
h2 = { version = "0.4", features = [ "legacy", "stream" ] }
h2 = { version = "0.3", features = [ "stream" ] }
handlebars = "3.0"
hex = "0.4.3"
hickory-resolver = { version = "0.24.1", default-features = false, features = [ "system-config", "tokio-runtime" ] }
hyper = { version = "0.14", features = [ "backports", "deprecated", "full" ] }
http = "0.2"
hyper = { version = "0.14", features = [ "full" ] }
lazy_static = "1.4"
libc = "0.2"
log = "0.4.17"
nix = "0.26.1"
@ -141,7 +132,9 @@ pin-project-lite = "0.2"
regex = "1.5.5"
rustyline = "9"
serde = { version = "1.0", features = ["derive"] }
serde_cbor = "0.11.1"
serde_json = "1.0"
serde_plain = "1"
siphasher = "0.3"
syslog = "6"
tar = "0.4"
@ -151,29 +144,31 @@ tokio = "1.6"
tokio-openssl = "0.6.1"
tokio-stream = "0.1.0"
tokio-util = { version = "0.7", features = [ "io" ] }
tracing = "0.1"
tower-service = "0.3.0"
udev = "0.4"
url = "2.1"
walkdir = "2"
xdg = "2.2"
zstd = { version = "0.12", features = [ "bindgen" ] }
zstd-safe = "6.0"
zstd = { version = ">= 0.6, < 0.13", features = [ "bindgen" ] }
[dependencies]
anyhow.workspace = true
async-trait.workspace = true
apt-pkg-native.workspace = true
base64.workspace = true
bitflags.workspace = true
bytes.workspace = true
cidr.workspace = true
const_format.workspace = true
crc32fast.workspace = true
crossbeam-channel.workspace = true
endian_trait.workspace = true
flate2.workspace = true
futures.workspace = true
h2.workspace = true
handlebars.workspace = true
hex.workspace = true
http.workspace = true
hyper.workspace = true
lazy_static.workspace = true
libc.workspace = true
log.workspace = true
nix.workspace = true
@ -186,6 +181,7 @@ regex.workspace = true
rustyline.workspace = true
serde.workspace = true
serde_json.workspace = true
siphasher.workspace = true
syslog.workspace = true
termcolor.workspace = true
thiserror.workspace = true
@ -193,53 +189,44 @@ tokio = { workspace = true, features = [ "fs", "io-util", "io-std", "macros", "n
tokio-openssl.workspace = true
tokio-stream.workspace = true
tokio-util = { workspace = true, features = [ "codec" ] }
tracing.workspace = true
tower-service.workspace = true
udev.workspace = true
url.workspace = true
walkdir.workspace = true
xdg.workspace = true
zstd.workspace = true
#valgrind_request = { git = "https://github.com/edef1c/libvalgrind_request", version = "1.1.0", optional = true }
# proxmox workspace
proxmox-apt.workspace = true
proxmox-apt-api-types.workspace = true
proxmox-async.workspace = true
proxmox-auth-api = { workspace = true, features = [ "api", "pam-authenticator" ] }
proxmox-compression.workspace = true
proxmox-config-digest.workspace = true
proxmox-daemon.workspace = true
proxmox-http = { workspace = true, features = [ "client-trait", "proxmox-async", "rate-limited-stream" ] } # pbs-client doesn't use these
proxmox-human-byte.workspace = true
proxmox-io.workspace = true
proxmox-lang.workspace = true
proxmox-log.workspace = true
proxmox-ldap.workspace = true
proxmox-metrics.workspace = true
proxmox-notify = { workspace = true, features = [ "pbs-context" ] }
proxmox-openid.workspace = true
proxmox-rest-server = { workspace = true, features = [ "rate-limited-stream" ] }
proxmox-router = { workspace = true, features = [ "cli", "server"] }
proxmox-schema = { workspace = true, features = [ "api-macro" ] }
proxmox-section-config.workspace = true
proxmox-serde = { workspace = true, features = [ "serde_json" ] }
proxmox-shared-cache.workspace = true
proxmox-shared-memory.workspace = true
proxmox-sortable-macro.workspace = true
proxmox-subscription.workspace = true
proxmox-sys = { workspace = true, features = [ "timer" ] }
proxmox-systemd.workspace = true
proxmox-tfa.workspace = true
proxmox-time.workspace = true
proxmox-uuid.workspace = true
proxmox-worker-task.workspace = true
pbs-api-types.workspace = true
# in their respective repo
proxmox-acme.workspace = true
pathpatterns.workspace = true
proxmox-acme-rs.workspace = true
proxmox-apt.workspace = true
proxmox-openid.workspace = true
pxar.workspace = true
# proxmox-backup workspace/internal crates
pbs-api-types.workspace = true
pbs-buildcfg.workspace = true
pbs-client.workspace = true
pbs-config.workspace = true
@ -248,49 +235,34 @@ pbs-key-config.workspace = true
pbs-tape.workspace = true
pbs-tools.workspace = true
proxmox-rrd.workspace = true
proxmox-rrd-api-types.workspace = true
# Local path overrides
# NOTE: You must run `cargo update` after changing this for it to take effect!
[patch.crates-io]
#pbs-api-types = { path = "../proxmox/pbs-api-types" }
#proxmox-acme = { path = "../proxmox/proxmox-acme" }
#proxmox-apt = { path = "../proxmox/proxmox-apt" }
#proxmox-apt-api-types = { path = "../proxmox/proxmox-apt-api-types" }
#proxmox-acme-rs = { path = "../proxmox-acme-rs" }
#proxmox-async = { path = "../proxmox/proxmox-async" }
#proxmox-auth-api = { path = "../proxmox/proxmox-auth-api" }
#proxmox-borrow = { path = "../proxmox/proxmox-borrow" }
#proxmox-compression = { path = "../proxmox/proxmox-compression" }
#proxmox-config-digest = { path = "../proxmox/proxmox-config-digest" }
#proxmox-daemon = { path = "../proxmox/proxmox-daemon" }
#proxmox-fuse = { path = "../proxmox-fuse" }
#proxmox-http = { path = "../proxmox/proxmox-http" }
#proxmox-human-byte = { path = "../proxmox/proxmox-human-byte" }
#proxmox-io = { path = "../proxmox/proxmox-io" }
#proxmox-lang = { path = "../proxmox/proxmox-lang" }
#proxmox-log = { path = "../proxmox/proxmox-log" }
#proxmox-ldap = { path = "../proxmox/proxmox-ldap" }
#proxmox-metrics = { path = "../proxmox/proxmox-metrics" }
#proxmox-notify = { path = "../proxmox/proxmox-notify" }
#proxmox-openid = { path = "../proxmox/proxmox-openid" }
#proxmox-rest-server = { path = "../proxmox/proxmox-rest-server" }
#proxmox-router = { path = "../proxmox/proxmox-router" }
#proxmox-rrd = { path = "../proxmox/proxmox-rrd" }
#proxmox-rrd-api-types = { path = "../proxmox/proxmox-rrd-api-types" }
#proxmox-schema = { path = "../proxmox/proxmox-schema" }
#proxmox-section-config = { path = "../proxmox/proxmox-section-config" }
#proxmox-serde = { path = "../proxmox/proxmox-serde" }
#proxmox-shared-memory = { path = "../proxmox/proxmox-shared-memory" }
#proxmox-sortable-macro = { path = "../proxmox/proxmox-sortable-macro" }
#proxmox-subscription = { path = "../proxmox/proxmox-subscription" }
#proxmox-sys = { path = "../proxmox/proxmox-sys" }
#proxmox-systemd = { path = "../proxmox/proxmox-systemd" }
#proxmox-tfa = { path = "../proxmox/proxmox-tfa" }
#proxmox-time = { path = "../proxmox/proxmox-time" }
#proxmox-uuid = { path = "../proxmox/proxmox-uuid" }
#proxmox-worker-task = { path = "../proxmox/proxmox-worker-task" }
#pathpatterns = {path = "../pathpatterns" }
#proxmox-apt = { path = "../proxmox-apt" }
#proxmox-openid = { path = "../proxmox-openid-rs" }
#pxar = { path = "../pxar" }
[features]

View File

@ -1,12 +1,10 @@
include /usr/share/dpkg/default.mk
include /usr/share/rustc/architecture.mk
include defines.mk
PACKAGE := proxmox-backup
ARCH := $(DEB_BUILD_ARCH)
export DEB_HOST_RUST_TYPE
SUBDIRS := etc www docs templates
SUBDIRS := etc www docs
# Binaries usable by users
USR_BIN := \
@ -20,7 +18,7 @@ USR_BIN := \
# Binaries usable by admins
USR_SBIN := \
proxmox-backup-manager \
proxmox-backup-debug
proxmox-backup-debug \
# Binaries for services:
SERVICE_BIN := \
@ -35,23 +33,15 @@ RESTORE_BIN := \
SUBCRATES != cargo metadata --no-deps --format-version=1 \
| jq -r .workspace_members'[]' \
| grep "$$PWD/" \
| sed -e "s!.*$$PWD/!!g" -e 's/\#.*$$//g' -e 's/)$$//g'
STATIC_TARGET_DIR := target/static-build
| awk '!/^proxmox-backup[[:space:]]/ { printf "%s ", $$1 }'
ifeq ($(BUILD_MODE), release)
CARGO_BUILD_ARGS += --release --target $(DEB_HOST_RUST_TYPE)
COMPILEDIR := target/$(DEB_HOST_RUST_TYPE)/release
STATIC_COMPILEDIR := $(STATIC_TARGET_DIR)/$(DEB_HOST_RUST_TYPE)/release
CARGO_BUILD_ARGS += --release
COMPILEDIR := target/release
else
CARGO_BUILD_ARGS += --target $(DEB_HOST_RUST_TYPE)
COMPILEDIR := target/$(DEB_HOST_RUST_TYPE)/debug
STATIC_COMPILEDIR := $(STATIC_TARGET_DIR)/$(DEB_HOST_RUST_TYPE)/debug
COMPILEDIR := target/debug
endif
STATIC_RUSTC_FLAGS := -C target-feature=+crt-static -L $(STATIC_COMPILEDIR)/deps-stubs/
ifeq ($(valgrind), yes)
CARGO_BUILD_ARGS += --features valgrind
endif
@ -61,9 +51,6 @@ CARGO ?= cargo
COMPILED_BINS := \
$(addprefix $(COMPILEDIR)/,$(USR_BIN) $(USR_SBIN) $(SERVICE_BIN) $(RESTORE_BIN))
STATIC_BINS := \
$(addprefix $(STATIC_COMPILEDIR)/,proxmox-backup-client-static pxar-static)
export DEB_VERSION DEB_VERSION_UPSTREAM
SERVER_DEB=$(PACKAGE)-server_$(DEB_VERSION)_$(ARCH).deb
@ -72,12 +59,10 @@ CLIENT_DEB=$(PACKAGE)-client_$(DEB_VERSION)_$(ARCH).deb
CLIENT_DBG_DEB=$(PACKAGE)-client-dbgsym_$(DEB_VERSION)_$(ARCH).deb
RESTORE_DEB=proxmox-backup-file-restore_$(DEB_VERSION)_$(ARCH).deb
RESTORE_DBG_DEB=proxmox-backup-file-restore-dbgsym_$(DEB_VERSION)_$(ARCH).deb
STATIC_CLIENT_DEB=$(PACKAGE)-client-static_$(DEB_VERSION)_$(ARCH).deb
STATIC_CLIENT_DBG_DEB=$(PACKAGE)-client-static-dbgsym_$(DEB_VERSION)_$(ARCH).deb
DOC_DEB=$(PACKAGE)-docs_$(DEB_VERSION)_all.deb
DEBS=$(SERVER_DEB) $(SERVER_DBG_DEB) $(CLIENT_DEB) $(CLIENT_DBG_DEB) \
$(RESTORE_DEB) $(RESTORE_DBG_DEB) $(STATIC_CLIENT_DEB) $(STATIC_CLIENT_DBG_DEB)
$(RESTORE_DEB) $(RESTORE_DBG_DEB) $(DEBUG_DEB) $(DEBUG_DBG_DEB)
DSC = rust-$(PACKAGE)_$(DEB_VERSION).dsc
@ -85,7 +70,7 @@ DESTDIR=
tests ?= --workspace
all: proxmox-backup-client-static $(SUBDIRS)
all: $(SUBDIRS)
.PHONY: $(SUBDIRS)
$(SUBDIRS):
@ -108,7 +93,7 @@ build:
cp -a debian \
Cargo.toml src \
$(SUBCRATES) \
docs etc examples tests www zsh-completions templates \
docs etc examples tests www zsh-completions \
defines.mk Makefile \
./build/
rm -f build/Cargo.lock
@ -123,15 +108,12 @@ proxmox-backup-docs: build
cd build; dpkg-buildpackage -b -us -uc --no-pre-clean
lintian $(DOC_DEB)
.PHONY: deb dsc deb-nodoc deb-nostrip
# copy the local target/ dir as a build-cache
.PHONY: deb dsc deb-nodoc
deb-nodoc: build
cd build; dpkg-buildpackage -b -us -uc --no-pre-clean --build-profiles=nodoc
lintian $(DEBS)
deb-nostrip: build
cd build; DEB_BUILD_OPTIONS=nostrip dpkg-buildpackage -b -us -uc
lintian $(DEBS) $(DOC_DEB)
$(DEBS): deb
deb: build
cd build; dpkg-buildpackage -b -us -uc
@ -155,7 +137,7 @@ clean: clean-deb
$(foreach i,$(SUBDIRS), \
$(MAKE) -C $(i) clean ;)
$(CARGO) clean
rm -f .do-cargo-build .do-static-cargo-build
rm -f .do-cargo-build
# allows one to avoid running cargo clean when one just wants to tidy up after a package build
clean-deb:
@ -163,7 +145,8 @@ clean-deb:
rm -f *.deb *.dsc *.tar.* *.buildinfo *.build *.changes
.PHONY: dinstall
dinstall: $(SERVER_DEB) $(SERVER_DBG_DEB) $(CLIENT_DEB) $(CLIENT_DBG_DEB)
dinstall: $(SERVER_DEB) $(SERVER_DBG_DEB) $(CLIENT_DEB) $(CLIENT_DBG_DEB) \
$(DEBUG_DEB) $(DEBUG_DBG_DEB)
dpkg -i $^
# make sure we build binaries before docs
@ -194,7 +177,6 @@ $(COMPILED_BINS) $(COMPILEDIR)/dump-catalog-shell-cli $(COMPILEDIR)/docgen: .do-
--bin proxmox-restore-daemon \
--package proxmox-backup \
--bin docgen \
--bin pbs2to3 \
--bin proxmox-backup-api \
--bin proxmox-backup-manager \
--bin proxmox-backup-proxy \
@ -204,25 +186,12 @@ $(COMPILED_BINS) $(COMPILEDIR)/dump-catalog-shell-cli $(COMPILEDIR)/docgen: .do-
--bin sg-tape-cmd
touch "$@"
.PHONY: proxmox-backup-client-static
proxmox-backup-client-static:
rm -f .do-static-cargo-build
$(MAKE) $(STATIC_BINS)
$(STATIC_BINS): .do-static-cargo-build
.do-static-cargo-build:
mkdir -p $(STATIC_COMPILEDIR)/deps-stubs/ && \
echo '!<arch>' > $(STATIC_COMPILEDIR)/deps-stubs/libsystemd.a # workaround for to greedy linkage and proxmox-systemd
$(CARGO) rustc $(CARGO_BUILD_ARGS) --package pxar-bin --bin pxar \
--target-dir $(STATIC_TARGET_DIR) -- $(STATIC_RUSTC_FLAGS)
$(CARGO) rustc $(CARGO_BUILD_ARGS) --package proxmox-backup-client --bin proxmox-backup-client \
--target-dir $(STATIC_TARGET_DIR) -- $(STATIC_RUSTC_FLAGS)
.PHONY: lint
lint:
cargo clippy -- -A clippy::all -D clippy::correctness
install: $(COMPILED_BINS) $(STATIC_BINS)
install: $(COMPILED_BINS)
install -dm755 $(DESTDIR)$(BINDIR)
install -dm755 $(DESTDIR)$(ZSH_COMPL_DEST)
$(foreach i,$(USR_BIN), \
@ -232,7 +201,6 @@ install: $(COMPILED_BINS) $(STATIC_BINS)
$(foreach i,$(USR_SBIN), \
install -m755 $(COMPILEDIR)/$(i) $(DESTDIR)$(SBINDIR)/ ; \
install -m644 zsh-completions/_$(i) $(DESTDIR)$(ZSH_COMPL_DEST)/ ;)
install -m755 $(COMPILEDIR)/pbs2to3 $(DESTDIR)$(SBINDIR)/
install -dm755 $(DESTDIR)$(LIBEXECDIR)/proxmox-backup
install -dm755 $(DESTDIR)$(LIBEXECDIR)/proxmox-backup/file-restore
$(foreach i,$(RESTORE_BIN), \
@ -241,19 +209,16 @@ install: $(COMPILED_BINS) $(STATIC_BINS)
install -m4755 -o root -g root $(COMPILEDIR)/sg-tape-cmd $(DESTDIR)$(LIBEXECDIR)/proxmox-backup/sg-tape-cmd
$(foreach i,$(SERVICE_BIN), \
install -m755 $(COMPILEDIR)/$(i) $(DESTDIR)$(LIBEXECDIR)/proxmox-backup/ ;)
install -m755 $(STATIC_COMPILEDIR)/proxmox-backup-client $(DESTDIR)$(BINDIR)/proxmox-backup-client-static
install -m755 $(STATIC_COMPILEDIR)/pxar $(DESTDIR)$(BINDIR)/pxar-static
$(MAKE) -C www install
$(MAKE) -C docs install
$(MAKE) -C templates install
.PHONY: upload
upload: UPLOAD_DIST ?= $(DEB_DISTRIBUTION)
upload: $(SERVER_DEB) $(CLIENT_DEB) $(RESTORE_DEB) $(DOC_DEB) $(STATIC_CLIENT_DEB)
upload: $(SERVER_DEB) $(CLIENT_DEB) $(RESTORE_DEB) $(DOC_DEB) $(DEBUG_DEB)
# check if working directory is clean
git diff --exit-code --stat && git diff --exit-code --stat --staged
tar cf - $(SERVER_DEB) $(SERVER_DBG_DEB) $(DOC_DEB) $(CLIENT_DEB) $(CLIENT_DBG_DEB) \
tar cf - $(SERVER_DEB) $(SERVER_DBG_DEB) $(DOC_DEB) $(CLIENT_DEB) \
$(CLIENT_DBG_DEB) $(DEBUG_DEB) $(DEBUG_DBG_DEB) \
| ssh -X repoman@repo.proxmox.com upload --product pbs --dist $(UPLOAD_DIST)
tar cf - $(CLIENT_DEB) $(CLIENT_DBG_DEB) | ssh -X repoman@repo.proxmox.com upload --product "pve,pmg,pbs-client" --dist $(UPLOAD_DIST)
tar cf - $(STATIC_CLIENT_DEB) $(STATIC_CLIENT_DBG_DEB) | ssh -X repoman@repo.proxmox.com upload --product "pbs-client" --dist $(UPLOAD_DIST)
tar cf - $(RESTORE_DEB) $(RESTORE_DBG_DEB) | ssh -X repoman@repo.proxmox.com upload --product "pve" --dist $(UPLOAD_DIST)

View File

@ -5,11 +5,8 @@ Build & Release Notes
``rustup`` Toolchain
====================
We normally want to build with the ``rustc`` Debian package (see below). If you
still want to use ``rustup`` for other reasons (e.g. to easily switch between
the official stable, beta, and nightly compilers), you should set the following
``rustup`` configuration to use the Debian-provided ``rustc`` compiler
by default:
We normally want to build with the ``rustc`` Debian package. To do that
you can set the following ``rustup`` configuration:
# rustup toolchain link system /usr
# rustup default system
@ -33,7 +30,7 @@ pre-release version number (e.g., "0.1.1-dev.1" instead of "0.1.0").
Local cargo config
==================
This repository ships with a ``.cargo/config.toml`` that replaces the crates.io
This repository ships with a ``.cargo/config`` that replaces the crates.io
registry with packaged crates located in ``/usr/share/cargo/registry``.
A similar config is also applied building with dh_cargo. Cargo.lock needs to be
@ -47,7 +44,7 @@ example for proxmox crate above).
Build
=====
on Debian 12 Bookworm
on Debian 11 Bullseye
Setup:
1. # echo 'deb http://download.proxmox.com/debian/devel/ bookworm main' | sudo tee /etc/apt/sources.list.d/proxmox-devel.list
@ -61,7 +58,7 @@ Note: 2. may be skipped if you already added the PVE or PBS package repository
You are now able to build using the Makefile or cargo itself, e.g.::
# make deb
# make deb-all
# # or for a non-package build
# cargo build --all --release

1093
debian/changelog vendored

File diff suppressed because it is too large Load Diff

127
debian/control vendored
View File

@ -15,28 +15,28 @@ Build-Depends: bash-completion,
libacl1-dev,
libfuse3-dev,
librust-anyhow-1+default-dev,
librust-async-trait-0.1+default-dev (>= 0.1.56-~~),
librust-apt-pkg-native-0.3+default-dev (>= 0.3.2-~~),
librust-base64-0.13+default-dev,
librust-bitflags-2+default-dev (>= 2.4-~~),
librust-bitflags-1+default-dev (>= 1.2.1-~~),
librust-bytes-1+default-dev,
librust-cidr-0.2+default-dev (>= 0.2.1-~~),
librust-const-format-0.2+default-dev,
librust-crc32fast-1+default-dev,
librust-crossbeam-channel-0.5+default-dev,
librust-endian-trait-0.6+arrays-dev,
librust-endian-trait-0.6+default-dev,
librust-env-logger-0.11+default-dev,
librust-env-logger-0.9+default-dev,
librust-flate2-1+default-dev,
librust-foreign-types-0.3+default-dev,
librust-futures-0.3+default-dev,
librust-h2-0.4+default-dev,
librust-h2-0.4+legacy-dev,
librust-h2-0.4+stream-dev,
librust-h2-0.3+default-dev,
librust-h2-0.3+stream-dev,
librust-handlebars-3+default-dev,
librust-hex-0.4+default-dev (>= 0.4.3-~~),
librust-hex-0.4+serde-dev (>= 0.4.3-~~),
librust-hyper-0.14+backports-dev,
librust-http-0.2+default-dev,
librust-hyper-0.14+default-dev,
librust-hyper-0.14+deprecated-dev,
librust-hyper-0.14+full-dev,
librust-lazy-static-1+default-dev (>= 1.4-~~),
librust-libc-0.2+default-dev,
librust-log-0.4+default-dev (>= 0.4.17-~~),
librust-nix-0.26+default-dev (>= 0.26.1-~~),
@ -44,77 +44,69 @@ Build-Depends: bash-completion,
librust-num-traits-0.2+default-dev,
librust-once-cell-1+default-dev (>= 1.3.1-~~),
librust-openssl-0.10+default-dev (>= 0.10.40-~~),
librust-pathpatterns-0.3+default-dev,
librust-pbs-api-types-0.2+default-dev (>= 0.2.2),
librust-pathpatterns-0.1+default-dev (>= 0.1.2-~~),
librust-percent-encoding-2+default-dev (>= 2.1-~~),
librust-pin-project-lite-0.2+default-dev,
librust-proxmox-acme-0.5+default-dev (>= 0.5.3-~~),
librust-proxmox-apt-0.11+cache-dev,
librust-proxmox-apt-0.11+default-dev,
librust-proxmox-apt-api-types-1+default-dev (>= 1.0.1-~~),
librust-proxmox-acme-rs-0.4+default-dev,
librust-proxmox-apt-0.10+default-dev (>= 0.10.2~~),
librust-proxmox-async-0.4+default-dev,
librust-proxmox-auth-api-0.4+api-dev,
librust-proxmox-auth-api-0.4+default-dev,
librust-proxmox-auth-api-0.4+pam-authenticator-dev,
librust-proxmox-auth-api-0.3+api-dev,
librust-proxmox-auth-api-0.3+api-types-dev,
librust-proxmox-auth-api-0.3+default-dev,
librust-proxmox-auth-api-0.3+pam-authenticator-dev,
librust-proxmox-borrow-1+default-dev,
librust-proxmox-compression-0.2+default-dev,
librust-proxmox-config-digest-0.1+default-dev,
librust-proxmox-daemon-0.1+default-dev,
librust-proxmox-compression-0.2+default-dev (>= 0.2~),
librust-proxmox-fuse-0.1+default-dev (>= 0.1.3-~~),
librust-proxmox-http-0.9+client-dev (>= 0.9.5-~~),
librust-proxmox-http-0.9+client-trait-dev (>= 0.9.5-~~),
librust-proxmox-http-0.9+default-dev (>= 0.9.5-~~),
librust-proxmox-http-0.9+http-helpers-dev (>= 0.9.5-~~),
librust-proxmox-http-0.9+proxmox-async-dev (>= 0.9.5-~~),
librust-proxmox-http-0.9+rate-limited-stream-dev (>= 0.9.5-~~),
librust-proxmox-http-0.9+rate-limiter-dev (>= 0.9.5-~~),
librust-proxmox-http-0.9+websocket-dev (>= 0.9.5-~~),
librust-proxmox-http-0.9+client-dev,
librust-proxmox-http-0.9+client-trait-dev,
librust-proxmox-http-0.9+default-dev,
librust-proxmox-http-0.9+http-helpers-dev,
librust-proxmox-http-0.9+proxmox-async-dev,
librust-proxmox-http-0.9+rate-limited-stream-dev,
librust-proxmox-http-0.9+rate-limiter-dev,
librust-proxmox-http-0.9+websocket-dev,
librust-proxmox-human-byte-0.1+default-dev,
librust-proxmox-io-1+default-dev (>= 1.0.1-~~),
librust-proxmox-io-1+tokio-dev (>= 1.0.1-~~),
librust-proxmox-lang-1+default-dev (>= 1.1-~~),
librust-proxmox-ldap-0.2+default-dev (>= 0.2.1-~~),
librust-proxmox-log-0.2+default-dev (>= 0.2.6-~~),
librust-proxmox-metrics-0.3+default-dev (>= 0.3.1-~~),
librust-proxmox-notify-0.5+default-dev (>= 0.5.1-~~),
librust-proxmox-notify-0.5+pbs-context-dev (>= 0.5.1-~~),
librust-proxmox-openid-0.10+default-dev,
librust-proxmox-rest-server-0.8+default-dev (>= 0.8.9-~~),
librust-proxmox-rest-server-0.8+rate-limited-stream-dev (>= 0.8.9-~~),
librust-proxmox-rest-server-0.8+templates-dev (>= 0.8.9-~~),
librust-proxmox-router-3+cli-dev,
librust-proxmox-router-3+server-dev,
librust-proxmox-rrd-0.4+default-dev,
librust-proxmox-rrd-api-types-1+default-dev (>= 1.0.2-~~),
librust-proxmox-schema-4+api-macro-dev,
librust-proxmox-schema-4+default-dev,
librust-proxmox-section-config-2+default-dev,
librust-proxmox-ldap-0.2+default-dev,
librust-proxmox-metrics-0.3+default-dev,
librust-proxmox-openid-0.10+default-dev (>= 0.10.0-~~),
librust-proxmox-rest-server-0.4+default-dev (>= 0.4.1~~),
librust-proxmox-rest-server-0.4+rate-limited-stream-dev,
librust-proxmox-rest-server-0.4+templates-dev,
librust-proxmox-router-1+cli-dev (>= 1.3.1-~~),
librust-proxmox-router-1+default-dev (>= 1.3.1-~~),
librust-proxmox-router-1+server-dev (>= 1.3.1-~~),
librust-proxmox-schema-1+api-macro-dev (>= 1.3.6-~~),
librust-proxmox-schema-1+default-dev (>= 1.3.6-~~),
librust-proxmox-section-config-1+default-dev,
librust-proxmox-serde-0.1+default-dev (>= 0.1.1-~~),
librust-proxmox-serde-0.1+serde-json-dev (>= 0.1.1-~~),
librust-proxmox-shared-cache-0.1+default-dev,
librust-proxmox-shared-memory-0.3+default-dev,
librust-proxmox-shared-memory-0.3+default-dev (>= 0.3.0-~~),
librust-proxmox-sortable-macro-0.1+default-dev (>= 0.1.2-~~),
librust-proxmox-subscription-0.5+api-types-dev,
librust-proxmox-subscription-0.5+default-dev,
librust-proxmox-sys-0.6+acl-dev (>= 0.6.5-~~),
librust-proxmox-sys-0.6+crypt-dev (>= 0.6.5-~~),
librust-proxmox-sys-0.6+default-dev (>= 0.6.7-~~),
librust-proxmox-sys-0.6+logrotate-dev (>= 0.6.5-~~),
librust-proxmox-sys-0.6+timer-dev (>= 0.6.5-~~),
librust-proxmox-systemd-0.1+default-dev,
librust-proxmox-tfa-5+api-dev,
librust-proxmox-tfa-5+api-types-dev,
librust-proxmox-tfa-5+default-dev,
librust-proxmox-time-2+default-dev,
librust-proxmox-subscription-0.4+api-types-dev,
librust-proxmox-subscription-0.4+default-dev,
librust-proxmox-sys-0.5+acl-dev (>= 0.5.0-~~),
librust-proxmox-sys-0.5+crypt-dev (>= 0.5.0-~~),
librust-proxmox-sys-0.5+default-dev (>= 0.5.0-~~),
librust-proxmox-sys-0.5+logrotate-dev (>= 0.5.0-~~),
librust-proxmox-sys-0.5+timer-dev (>= 0.5.0-~~),
librust-proxmox-tfa-4+api-dev,
librust-proxmox-tfa-4+api-types-dev,
librust-proxmox-tfa-4+default-dev,
librust-proxmox-time-1+default-dev (>= 1.1.2-~~),
librust-proxmox-uuid-1+default-dev,
librust-proxmox-uuid-1+serde-dev,
librust-proxmox-worker-task-0.1+default-dev,
librust-pxar-0.12+default-dev (>= 0.12.1-~~),
librust-pxar-0.10+default-dev (>= 0.10.2-~~),
librust-regex-1+default-dev (>= 1.5.5-~~),
librust-rustyline-9+default-dev,
librust-serde-1+default-dev,
librust-serde-1+derive-dev,
librust-serde-cbor-0.11+default-dev (>= 0.11.1-~~),
librust-serde-json-1+default-dev,
librust-serde-plain-1+default-dev,
librust-siphasher-0.3+default-dev,
librust-syslog-6+default-dev,
librust-tar-0.4+default-dev,
librust-termcolor-1+default-dev (>= 1.1.2-~~),
@ -138,14 +130,12 @@ Build-Depends: bash-completion,
librust-tokio-util-0.7+default-dev,
librust-tokio-util-0.7+io-dev,
librust-tower-service-0.3+default-dev,
librust-tracing-0.1+default-dev,
librust-udev-0.4+default-dev,
librust-url-2+default-dev (>= 2.1-~~),
librust-walkdir-2+default-dev,
librust-xdg-2+default-dev (>= 2.2-~~),
librust-zstd-0.12+bindgen-dev,
librust-zstd-0.12+default-dev,
librust-zstd-safe-6+default-dev,
libsgutils2-dev,
libstd-rust-dev,
libsystemd-dev (>= 246-~~),
@ -172,7 +162,6 @@ Rules-Requires-Root: binary-targets
Package: proxmox-backup-server
Architecture: any
Depends: fonts-font-awesome,
gdisk,
libjs-extjs (>= 7~),
libjs-qrcodejs (>= 1.20201119),
libproxmox-acme-plugins,
@ -184,7 +173,7 @@ Depends: fonts-font-awesome,
postfix | mail-transport-agent,
proxmox-backup-docs,
proxmox-mini-journalreader,
proxmox-widget-toolkit (>= 4.3.3),
proxmox-widget-toolkit (>= 3.5.2),
pve-xtermjs (>= 4.7.0-1),
sg3-utils,
smartmontools,
@ -205,14 +194,6 @@ Description: Proxmox Backup Client tools
This package contains the Proxmox Backup client, which provides a
simple command line tool to create and restore backups.
Package: proxmox-backup-client-static
Architecture: any
Depends: qrencode, ${misc:Depends},
Conflicts: proxmox-backup-client,
Description: Proxmox Backup Client tools (statically linked)
This package contains the Proxmox Backup client, which provides a
simple command line tool to create and restore backups.
Package: proxmox-backup-docs
Build-Profiles: <!nodoc>
Section: doc

2
debian/copyright vendored
View File

@ -1,4 +1,4 @@
Copyright (C) 2019 - 2025 Proxmox Server Solutions GmbH
Copyright (C) 2019 - 2023 Proxmox Server Solutions GmbH
This software is written by Proxmox Server Solutions GmbH <support@proxmox.com>

View File

@ -4,5 +4,4 @@ proxmox-backup-server: elevated-privileges 4755 root/root [usr/lib/x86_64-linux-
proxmox-backup-server: systemd-service-file-refers-to-unusual-wantedby-target getty.target [lib/systemd/system/proxmox-backup-banner.service]
proxmox-backup-server: uses-dpkg-database-directly [usr/lib/x86_64-linux-gnu/proxmox-backup/proxmox-backup-api]
proxmox-backup-server: uses-dpkg-database-directly [usr/lib/x86_64-linux-gnu/proxmox-backup/proxmox-backup-proxy]
proxmox-backup-server: uses-dpkg-database-directly [usr/sbin/pbs2to3]
proxmox-backup-server: uses-dpkg-database-directly [usr/sbin/proxmox-backup-debug]

15
debian/postinst vendored
View File

@ -20,7 +20,15 @@ case "$1" in
# modeled after dh_systemd_start output
systemctl --system daemon-reload >/dev/null || true
if [ -n "$2" ]; then
_dh_action=try-reload-or-restart
if dpkg --compare-versions "$2" 'lt' '1.0.7-1'; then
# there was an issue with reloading and systemd being confused in older daemon versions
# so restart instead of reload if upgrading from there, see commit 0ec79339f7aebf9
# FIXME: remove with PBS 2.1
echo "Upgrading from older proxmox-backup-server: restart (not reload) daemons"
_dh_action=try-restart
else
_dh_action=try-reload-or-restart
fi
else
_dh_action=start
fi
@ -72,11 +80,6 @@ EOF
update_sync_job "$prev_job"
fi
fi
if dpkg --compare-versions "$2" 'lt' '3.3.5~'; then
# ensure old locking is used by the daemon until a reboot happened
touch "/run/proxmox-backup/old-locking"
fi
fi
;;

View File

@ -1,2 +0,0 @@
debian/proxmox-backup-client.bc proxmox-backup-client
debian/pxar.bc pxar

View File

@ -1,4 +0,0 @@
usr/share/man/man1/proxmox-backup-client.1
usr/share/man/man1/pxar.1
usr/share/zsh/vendor-completions/_proxmox-backup-client
usr/share/zsh/vendor-completions/_pxar

View File

@ -9,7 +9,7 @@ update_initramfs() {
CACHE_PATH_DBG="/var/cache/proxmox-backup/file-restore-initramfs-debug.img"
# cleanup first, in case proxmox-file-restore was uninstalled since we do
# not want an unusable image lying around
# not want an unuseable image lying around
rm -f "$CACHE_PATH"
if [ ! -f "$INST_PATH/initramfs.img" ]; then

View File

@ -4,7 +4,6 @@ etc/proxmox-backup-daily-update.service /lib/systemd/system/
etc/proxmox-backup-daily-update.timer /lib/systemd/system/
etc/proxmox-backup-proxy.service /lib/systemd/system/
etc/proxmox-backup.service /lib/systemd/system/
etc/removable-device-attach@.service /lib/systemd/system/
usr/bin/pmt
usr/bin/pmtx
usr/bin/proxmox-tape
@ -13,14 +12,12 @@ usr/lib/x86_64-linux-gnu/proxmox-backup/proxmox-backup-banner
usr/lib/x86_64-linux-gnu/proxmox-backup/proxmox-backup-proxy
usr/lib/x86_64-linux-gnu/proxmox-backup/proxmox-daily-update
usr/lib/x86_64-linux-gnu/proxmox-backup/sg-tape-cmd
usr/sbin/pbs2to3
usr/sbin/proxmox-backup-debug
usr/sbin/proxmox-backup-manager
usr/share/javascript/proxmox-backup/css/ext6-pbs.css
usr/share/javascript/proxmox-backup/images
usr/share/javascript/proxmox-backup/index.hbs
usr/share/javascript/proxmox-backup/js/proxmox-backup-gui.js
usr/share/man/man1/pbs2to3.1
usr/share/man/man1/pmt.1
usr/share/man/man1/pmtx.1
usr/share/man/man1/proxmox-backup-debug.1
@ -31,44 +28,12 @@ usr/share/man/man5/acl.cfg.5
usr/share/man/man5/datastore.cfg.5
usr/share/man/man5/domains.cfg.5
usr/share/man/man5/media-pool.cfg.5
usr/share/man/man5/notifications-priv.cfg.5
usr/share/man/man5/notifications.cfg.5
usr/share/man/man5/proxmox-backup.node.cfg.5
usr/share/man/man5/prune.cfg.5
usr/share/man/man5/remote.cfg.5
usr/share/man/man5/sync.cfg.5
usr/share/man/man5/tape-job.cfg.5
usr/share/man/man5/tape.cfg.5
usr/share/man/man5/user.cfg.5
usr/share/man/man5/verification.cfg.5
usr/share/proxmox-backup/templates/default/acme-err-body.txt.hbs
usr/share/proxmox-backup/templates/default/acme-err-subject.txt.hbs
usr/share/proxmox-backup/templates/default/gc-err-body.txt.hbs
usr/share/proxmox-backup/templates/default/gc-err-subject.txt.hbs
usr/share/proxmox-backup/templates/default/gc-ok-body.txt.hbs
usr/share/proxmox-backup/templates/default/gc-ok-subject.txt.hbs
usr/share/proxmox-backup/templates/default/package-updates-body.txt.hbs
usr/share/proxmox-backup/templates/default/package-updates-subject.txt.hbs
usr/share/proxmox-backup/templates/default/prune-err-body.txt.hbs
usr/share/proxmox-backup/templates/default/prune-err-subject.txt.hbs
usr/share/proxmox-backup/templates/default/prune-ok-body.txt.hbs
usr/share/proxmox-backup/templates/default/prune-ok-subject.txt.hbs
usr/share/proxmox-backup/templates/default/sync-err-body.txt.hbs
usr/share/proxmox-backup/templates/default/sync-err-subject.txt.hbs
usr/share/proxmox-backup/templates/default/sync-ok-body.txt.hbs
usr/share/proxmox-backup/templates/default/sync-ok-subject.txt.hbs
usr/share/proxmox-backup/templates/default/tape-backup-err-body.txt.hbs
usr/share/proxmox-backup/templates/default/tape-backup-err-subject.txt.hbs
usr/share/proxmox-backup/templates/default/tape-backup-ok-body.txt.hbs
usr/share/proxmox-backup/templates/default/tape-backup-ok-subject.txt.hbs
usr/share/proxmox-backup/templates/default/tape-load-body.txt.hbs
usr/share/proxmox-backup/templates/default/tape-load-subject.txt.hbs
usr/share/proxmox-backup/templates/default/test-body.txt.hbs
usr/share/proxmox-backup/templates/default/test-subject.txt.hbs
usr/share/proxmox-backup/templates/default/verify-err-body.txt.hbs
usr/share/proxmox-backup/templates/default/verify-err-subject.txt.hbs
usr/share/proxmox-backup/templates/default/verify-ok-body.txt.hbs
usr/share/proxmox-backup/templates/default/verify-ok-subject.txt.hbs
usr/share/zsh/vendor-completions/_pmt
usr/share/zsh/vendor-completions/_pmtx
usr/share/zsh/vendor-completions/_proxmox-backup-debug

View File

@ -16,6 +16,3 @@ SUBSYSTEM=="scsi_generic", SUBSYSTEMS=="scsi", ATTRS{type}=="1", ENV{ID_SCSI_SER
SYMLINK+="tape/by-id/scsi-$env{ID_SCSI_SERIAL}-sg"
LABEL="persistent_storage_tape_end"
# triggers the mounting of a removable device
ACTION=="add", SUBSYSTEM=="block", ENV{ID_FS_UUID}!="", TAG+="systemd", ENV{SYSTEMD_WANTS}="removable-device-attach@$env{ID_FS_UUID}"

10
debian/rules vendored
View File

@ -8,7 +8,7 @@ include /usr/share/rustc/architecture.mk
export BUILD_MODE=release
export CARGO=/usr/share/cargo/bin/cargo
CARGO=/usr/share/cargo/bin/cargo
export CFLAGS CXXFLAGS CPPFLAGS LDFLAGS
export DEB_HOST_RUST_TYPE DEB_HOST_GNU_TYPE
@ -28,11 +28,6 @@ override_dh_auto_configure:
@perl -ne 'if (/^version\s*=\s*"(\d+(?:\.\d+)+)"/) { my $$v_cargo = $$1; my $$v_deb = "$(DEB_VERSION_UPSTREAM)"; \
die "ERROR: d/changelog <-> Cargo.toml version mismatch: $$v_cargo != $$v_deb\n" if $$v_cargo ne $$v_deb; exit(0); }' Cargo.toml
$(CARGO) prepare-debian $(CURDIR)/debian/cargo_registry --link-from-system
# `cargo build` and `cargo install` have different config precedence, symlink
# the wrapper config into a place where `build` picks it up as well..
# https://doc.rust-lang.org/cargo/commands/cargo-install.html#configuration-discovery
mkdir -p .cargo
ln -s $(CARGO_HOME)/config.toml $(CURDIR)/.cargo/config.toml
dh_auto_configure
override_dh_auto_build:
@ -47,9 +42,6 @@ override_dh_auto_install:
dh_auto_install -- \
PROXY_USER=backup \
LIBDIR=/usr/lib/$(DEB_HOST_MULTIARCH)
mkdir -p debian/proxmox-backup-client-static/usr/bin
mv debian/tmp/usr/bin/proxmox-backup-client-static debian/proxmox-backup-client-static/usr/bin/proxmox-backup-client
mv debian/tmp/usr/bin/pxar-static debian/proxmox-backup-client-static/usr/bin/pxar
override_dh_installsystemd:
dh_installsystemd -pproxmox-backup-server proxmox-backup-daily-update.timer

View File

@ -1,65 +1,54 @@
include ../defines.mk
GENERATED_SYNOPSIS := \
config/acl/roles.rst \
config/datastore/config.rst \
config/domains/config.rst \
proxmox-tape/synopsis.rst \
proxmox-backup-client/synopsis.rst \
proxmox-backup-client/catalog-shell-synopsis.rst \
proxmox-backup-manager/synopsis.rst \
proxmox-backup-debug/synopsis.rst \
proxmox-file-restore/synopsis.rst \
pxar/synopsis.rst \
pmtx/synopsis.rst \
pmt/synopsis.rst \
config/media-pool/config.rst \
config/notifications-priv/config.rst \
config/notifications/config.rst \
config/tape/config.rst \
config/tape-job/config.rst \
config/user/config.rst \
config/remote/config.rst \
config/sync/config.rst \
config/tape-job/config.rst \
config/tape/config.rst \
config/user/config.rst \
config/verification/config.rst \
config/prune/config.rst \
pmt/synopsis.rst \
pmtx/synopsis.rst \
proxmox-backup-client/catalog-shell-synopsis.rst \
proxmox-backup-client/synopsis.rst \
proxmox-backup-debug/synopsis.rst \
proxmox-backup-manager/synopsis.rst \
proxmox-file-restore/synopsis.rst \
proxmox-tape/synopsis.rst \
pxar/synopsis.rst \
config/acl/roles.rst \
config/datastore/config.rst \
config/domains/config.rst
MAN1_PAGES := \
pbs2to3.1 \
pmt.1 \
pmtx.1 \
proxmox-backup-client.1 \
proxmox-backup-debug.1 \
proxmox-backup-manager.1 \
proxmox-backup-proxy.1 \
proxmox-file-restore.1 \
proxmox-tape.1 \
pxar.1 \
pmtx.1 \
pmt.1 \
proxmox-tape.1 \
proxmox-backup-proxy.1 \
proxmox-backup-client.1 \
proxmox-backup-manager.1 \
proxmox-file-restore.1 \
proxmox-backup-debug.1
# FIXME: prefix all man pages that are not directly relating to an existing executable with
# `proxmox-backup.`, like the newer added proxmox-backup.node.cfg but add backwards compatible
# symlinks, e.g. with a "5pbs" man page "suffix section".
MAN5_PAGES := \
acl.cfg.5 \
datastore.cfg.5 \
domains.cfg.5 \
media-pool.cfg.5 \
proxmox-backup.node.cfg.5 \
notifications-priv.cfg.5 \
notifications.cfg.5 \
tape.cfg.5 \
tape-job.cfg.5 \
acl.cfg.5 \
user.cfg.5 \
remote.cfg.5 \
sync.cfg.5 \
tape-job.cfg.5 \
tape.cfg.5 \
user.cfg.5 \
verification.cfg.5 \
prune.cfg.5 \
datastore.cfg.5 \
domains.cfg.5
PRUNE_SIMULATOR_FILES := \
prune-simulator/index.html \
prune-simulator/clear-trigger.png \
prune-simulator/documentation.html \
prune-simulator/prune-simulator.js \
prune-simulator/clear-trigger.png \
prune-simulator/prune-simulator.js
PRUNE_SIMULATOR_JS_SOURCE := \
/usr/share/javascript/proxmox-widget-toolkit-dev/Toolkit.js \
@ -91,15 +80,15 @@ API_VIEWER_FILES := \
/usr/share/javascript/proxmox-widget-toolkit-dev/APIViewer.js \
# Sphinx documentation setup
SPHINXOPTS = -E
SPHINXOPTS =
SPHINXBUILD = sphinx-build
BUILDDIR = output
ifeq ($(BUILD_MODE), release)
COMPILEDIR := ../target/$(DEB_HOST_RUST_TYPE)/release
COMPILEDIR := ../target/release
SPHINXOPTS += -t release
else
COMPILEDIR := ../target/$(DEB_HOST_RUST_TYPE)/debug
COMPILEDIR := ../target/debug
SPHINXOPTS += -t devbuild
endif
@ -148,9 +137,9 @@ lto-barcode/lto-barcode-generator.js: ${LTO_BARCODE_JS_SOURCE}
mv $@.tmp $@
.PHONY: html
html: ${GENERATED_SYNOPSIS} images/proxmox-logo.svg _static/custom.css conf.py ${PRUNE_SIMULATOR_FILES} ${LTO_BARCODE_FILES} ${API_VIEWER_SOURCES}
html: ${GENERATED_SYNOPSIS} images/proxmox-logo.svg custom.css conf.py ${PRUNE_SIMULATOR_FILES} ${LTO_BARCODE_FILES} ${API_VIEWER_SOURCES}
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
install -m 0644 _static/custom.js _static/custom.css images/proxmox-logo.svg $(BUILDDIR)/html/_static/
install -m 0644 custom.js custom.css images/proxmox-logo.svg $(BUILDDIR)/html/_static/
install -dm 0755 $(BUILDDIR)/html/prune-simulator
install -m 0644 ${PRUNE_SIMULATOR_FILES} $(BUILDDIR)/html/prune-simulator
install -dm 0755 $(BUILDDIR)/html/lto-barcode

View File

@ -1,9 +1,7 @@
.. _client_usage:
Backup Client Usage
===================
The command-line client for `Proxmox Backup`_ Server is called
The command-line client for Proxmox Backup Server is called
:command:`proxmox-backup-client`.
.. _client_repository:
@ -28,9 +26,6 @@ brackets (for example, `[fe80::01]`).
You can pass the repository with the ``--repository`` command-line option, or
by setting the ``PBS_REPOSITORY`` environment variable.
The web interface provides copyable repository text in the datastore summary
with the `Show Connection Information` button.
Below are some examples of valid repositories and their corresponding real
values:
@ -46,24 +41,6 @@ user\@pbs!token@host:store ``user@pbs!token`` host:8007 store
[ff80::51]:1234:mydatastore ``root@pam`` [ff80::51]:1234 mydatastore
================================ ================== ================== ===========
.. _statically_linked_client:
Statically Linked Backup Client
-------------------------------
A statically linked version of the Proxmox Backup client is available for Linux
based systems where the regular client is not available. Please note that it is
recommended to use the regular client when possible, as the statically linked
client is not a full replacement. For example, name resolution will not be
performed via the mechanisms provided by libc, but uses a resolver written
purely in the Rust programming language. Therefore, features and modules
provided by Name Service Switch cannot be used.
The statically linked client is available via the ``pbs-client`` repository as
described in the :ref:`installation <install_pbc>` section.
.. _environment-variables:
Environment Variables
---------------------
@ -109,43 +86,6 @@ Environment Variables
you can add arbitrary comments after the first newline.
System and Service Credentials
------------------------------
Some of the :ref:`environment variables <environment-variables>` above can be
set using `system and service credentials <https://systemd.io/CREDENTIALS/>`_
instead.
============================ ==============================================
Environment Variable Credential Name Equivalent
============================ ==============================================
``PBS_REPOSITORY`` ``proxmox-backup-client.repository``
``PBS_PASSWORD`` ``proxmox-backup-client.password``
``PBS_ENCRYPTION_PASSWORD`` ``proxmox-backup-client.encryption-password``
``PBS_FINGERPRINT`` ``proxmox-backup-client.fingerprint``
============================ ==============================================
For example, the repository password can be stored in an encrypted file as
follows:
.. code-block:: console
# systemd-ask-password -n | systemd-creds encrypt --name=proxmox-backup-client.password - my-api-token.cred
The credential can then be reused inside of unit files or in a transient scope
unit as follows:
.. code-block:: console
# systemd-run --pipe --wait \
--property=LoadCredentialEncrypted=proxmox-backup-client.password:/full/path/to/my-api-token.cred \
--property=SetCredential=proxmox-backup-client.repository:'my_default_repository' \
proxmox-backup-client ...
Additionally, system credentials (e.g. passed down from the hypervisor to a
virtual machine via SMBIOS type 11) can be loaded on a service via
`LoadCredential=` as described in the manual page ``systemd.exec(5)``.
Output Format
-------------
@ -226,7 +166,6 @@ the client. The format is:
<archive-name>.<type>:<source-path>
The ``archive-name`` must contain alphanumerics, hyphens and underscores only.
Common types are ``.pxar`` for file archives and ``.img`` for block
device images. To create a backup of a block device, run the following command:
@ -315,7 +254,7 @@ Restoring this backup will result in:
.. code-block:: console
# ls -aR restored
ls -aR restored
restored/:
. .. .pxarexclude subfolder0 subfolder1
@ -325,69 +264,6 @@ Restoring this backup will result in:
restored/subfolder1:
. .. file2
The same syntax can also be used directly in the cli with the ``--exclude``
parameter. For example:
.. code-block:: console
# proxmox-backup-client backup archive-name.pxar:./linux --exclude /usr
Multiple paths can be excluded like this:
.. code-block:: console
# proxmox-backup-client backup archive-name.pxar:./linux --exclude=/usr --exclude=/rust
.. _client_change_detection_mode:
Change Detection Mode
~~~~~~~~~~~~~~~~~~~~~
File-based backups containing a lot of data can take a long time, as the default
behavior for the Proxmox backup client is to read all data and encode it into a
pxar archive.
The encoded stream is split into variable sized chunks. For each chunk, a digest
is calculated and used to decide whether the chunk needs to be uploaded or can
be indexed without upload, as it is already available on the server (and
therefore deduplicated). If the backed up files are largely unchanged,
re-reading and then detecting the corresponding chunks don't need to be uploaded
after all is time consuming and undesired.
The backup client's ``change-detection-mode`` can be switched from default to
``metadata`` based detection to reduce limitations as described above,
instructing the client to avoid re-reading files with unchanged metadata
whenever possible.
When using this mode, instead of the regular pxar archive, the backup snapshot
is stored into two separate files: the ``mpxar`` containing the archive's
metadata and the ``ppxar`` containing a concatenation of the file contents. This
splitting allows for efficient metadata lookups. When creating the backup
archives, the current file metadata is compared to the one looked up in the
previous ``mpxar`` archive. The operational details are explained more in depth
in the :ref:`technical documentation <change-detection-mode-metadata>`.
Using the ``change-detection-mode`` set to ``data`` allows to create the same
split archive as when using the ``metadata`` mode, but without using a previous
reference and therefore reencoding all file payloads. For details of this mode
please see the :ref:`technical documentation <change-detection-mode-data>`.
.. _client_change_detection_mode_table:
============ ===================================================================
Mode Description
============ ===================================================================
``legacy`` (current default): Encode all files into a self contained pxar
archive.
``data`` Encode all files into a split data and metadata pxar archive.
``metadata`` Encode changed files, reuse unchanged from previous snapshot,
creating a split archive.
============ ===================================================================
The following shows an example for the client invocation with the `metadata`
mode:
.. code-block:: console
# proxmox-backup-client backup archive-name.pxar:./linux --change-detection-mode=metadata
.. _client_encryption:
@ -528,8 +404,6 @@ version of your master key. The following command sends the output of the
proxmox-backup-client key paperkey --output-format text > qrkey.txt
.. _client_restoring_data:
Restoring Data
--------------
@ -755,28 +629,28 @@ following retention options are available:
Keep the last ``<N>`` backup snapshots.
``--keep-hourly <N>``
Keep backups for the last ``<N>`` hours. If there is more than one backup for
a single hour, only the latest is kept. Hours without backups do not count.
Keep backups for the last ``<N>`` hours. If there is more than one
backup for a single hour, only the latest is kept.
``--keep-daily <N>``
Keep backups for the last ``<N>`` days. If there is more than one backup for a
single day, only the latest is kept. Days without backups do not count.
Keep backups for the last ``<N>`` days. If there is more than one
backup for a single day, only the latest is kept.
``--keep-weekly <N>``
Keep backups for the last ``<N>`` weeks. If there is more than one backup for
a single week, only the latest is kept. Weeks without backup do not count.
Keep backups for the last ``<N>`` weeks. If there is more than one
backup for a single week, only the latest is kept.
.. note:: Weeks start on Monday and end on Sunday. The software
uses the `ISO week date`_ system and handles weeks at
the end of the year correctly.
``--keep-monthly <N>``
Keep backups for the last ``<N>`` months. If there is more than one backup for
a single month, only the latest is kept. Months without backups do not count.
Keep backups for the last ``<N>`` months. If there is more than one
backup for a single month, only the latest is kept.
``--keep-yearly <N>``
Keep backups for the last ``<N>`` years. If there is more than one backup for
a single year, only the latest is kept. Years without backups do not count.
Keep backups for the last ``<N>`` years. If there is more than one
backup for a single year, only the latest is kept.
The retention options are processed in the order given above. Each option
only covers backups within its time period. The next option does not take care
@ -841,25 +715,29 @@ Garbage Collection
------------------
The ``prune`` command removes only the backup index files, not the data
from the datastore. Deletion of unused backup data from the datastore is done by
:ref:`garbage collection<_maintenance_gc>`. It is therefore recommended to
schedule garbage collection tasks on a regular basis. The working principle of
garbage collection is described in more details in the related :ref:`background
section <gc_background>`.
from the datastore. This task is left to the garbage collection
command. It is recommended to carry out garbage collection on a regular basis.
To start garbage collection from the client side, run the following command:
.. code-block:: console
# proxmox-backup-client garbage-collect
The garbage collection works in two phases. In the first phase, all
data blocks that are still in use are marked. In the second phase,
unused data blocks are removed.
.. note:: This command needs to read all existing backup index files
and touches the complete chunk-store. This can take a long time
depending on the number of chunks and the speed of the underlying
disks.
The progress of the garbage collection will be displayed as shown in the example
below:
.. note:: The garbage collection will only remove chunks that haven't been used
for at least one day (exactly 24h 5m). This grace period is necessary because
chunks in use are marked by touching the chunk which updates the ``atime``
(access time) property. Filesystems are mounted with the ``relatime`` option
by default. This results in a better performance by only updating the
``atime`` property if the last access has been at least 24 hours ago. The
downside is that touching a chunk within these 24 hours will not always
update its ``atime`` property.
Chunks in the grace period will be logged at the end of the garbage
collection task as *Pending removals*.
.. code-block:: console

View File

@ -1,7 +1,7 @@
Backup Protocol
===============
`Proxmox Backup`_ Server uses a REST-based API. While the management
Proxmox Backup Server uses a REST-based API. While the management
interface uses normal HTTP, the actual backup and restore interface uses
HTTP/2 for improved performance. Both HTTP and HTTP/2 are well known
standards, so the following section assumes that you are familiar with

View File

@ -7,7 +7,7 @@ Introduction and Format
-----------------------
Certain tasks, for example pruning and garbage collection, need to be
performed on a regular basis. `Proxmox Backup`_ Server uses a format inspired
performed on a regular basis. Proxmox Backup Server uses a format inspired
by the systemd Time and Date Specification (see `systemd.time manpage`_)
called `calendar events` for its schedules.
@ -89,11 +89,11 @@ Not all features of systemd calendar events are implemented:
Notes on Scheduling
-------------------
In Proxmox Backup, scheduling for most tasks is done in the
In `Proxmox Backup`_, scheduling for most tasks is done in the
`proxmox-backup-proxy`. This daemon checks all job schedules
every minute, to see if any are due. This means that even though
`calendar events` can contain seconds, it will only be checked
once per minute.
Also, all schedules will be checked against the timezone set
in the Proxmox Backup Server.
in the `Proxmox Backup`_ server.

View File

@ -9,7 +9,7 @@ own (self-signed) certificate. This certificate is used for encrypted
communication with the hosts ``proxmox-backup-proxy`` service, for any API
call between a user or backup-client and the web-interface.
Certificate verification when sending backups to a Proxmox Backup Server
Certificate verification when sending backups to a `Proxmox Backup`_ server
is either done based on pinning the certificate fingerprints in the storage/remote
configuration, or by using certificates, signed by a trusted certificate authority.
@ -18,7 +18,7 @@ configuration, or by using certificates, signed by a trusted certificate authori
Certificates for the API and SMTP
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Proxmox Backup stores its certificate and key in:
`Proxmox Backup`_ stores its certificate and key in:
- ``/etc/proxmox-backup/proxy.pem``
@ -33,19 +33,21 @@ You have the following options for the certificate:
commercial Certificate Authority (CA)).
3. Use an ACME provider like Lets Encrypt to get a trusted certificate
with automatic renewal; this is also integrated in the Proxmox Backup
with automatic renewal; this is also integrated in the `Proxmox Backup`_
API and web interface.
Certificates are managed through the Proxmox Backup
web-interface/API or using the ``proxmox-backup-manager`` CLI tool.
Certificates are managed through the `Proxmox Backup`_
web-interface/API or using the the ``proxmox-backup-manager`` CLI tool.
.. _sysadmin_certs_upload_custom:
Upload Custom Certificate
~~~~~~~~~~~~~~~~~~~~~~~~~
If you already have a certificate which you want to use for a `Proxmox Backup`_
host, you can simply upload that certificate over the web interface.
If you already have a certificate which you want to use for a Proxmox
Mail Gateway host, you can simply upload that certificate over the web
interface.
.. image:: images/screenshots/pbs-gui-certs-upload-custom.png
:target: _images/pbs-gui-certs-upload-custom.png
@ -59,9 +61,9 @@ Note that any certificate key files must not be password protected.
Trusted certificates via Lets Encrypt (ACME)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Proxmox Backup includes an implementation of the **A**\ utomatic
`Proxmox Backup`_ includes an implementation of the **A**\ utomatic
**C**\ ertificate **M**\ anagement **E**\ nvironment (**ACME**)
protocol, allowing Proxmox Backup admins to use an ACME provider
protocol, allowing `Proxmox Backup`_ admins to use an ACME provider
like Lets Encrypt for easy setup of TLS certificates, which are
accepted and trusted by modern operating systems and web browsers out of
the box.
@ -110,7 +112,7 @@ ACME Plugins
^^^^^^^^^^^^
The ACME plugins role is to provide automatic verification that you,
and thus the Proxmox Backup Server under your operation, are the
and thus the `Proxmox Backup`_ server under your operation, are the
real owner of a domain. This is the basic building block of automatic
certificate management.
@ -127,7 +129,7 @@ DNS record in the domains zone.
:align: right
:alt: Create ACME Account
Proxmox Backup supports both of those challenge types out of the
`Proxmox Backup`_ supports both of those challenge types out of the
box, you can configure plugins either over the web interface under
``Certificates -> ACME Challenges``, or using the
``proxmox-backup-manager acme plugin add`` command.
@ -178,7 +180,7 @@ with Lets Encrypts ACME.
- There **must** be no other listener on port 80.
- The requested (sub)domain needs to resolve to a public IP of the
Proxmox Backup host.
`Proxmox Backup`_ host.
.. _sysadmin_certs_acme_dns_challenge:
@ -195,7 +197,7 @@ allows provisioning of ``TXT`` records via an API.
Configuring ACME DNS APIs for validation
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Proxmox Backup re-uses the DNS plugins developed for the
`Proxmox Backup`_ re-uses the DNS plugins developed for the
``acme.sh`` [1]_ project. Please refer to its documentation for details
on configuration of specific APIs.
@ -213,7 +215,7 @@ and entering the credential data to access your account over their API.
your provider. Configuration values do not need to be quoted with
single or double quotes; for some plugins that is even an error.
As there are many DNS providers and API endpoints, Proxmox Backup
As there are many DNS providers and API endpoints, `Proxmox Backup`_
automatically generates the form for the credentials, but not all
providers are annotated yet. For those you will see a bigger text area,
into which you simply need to copy all the credentials
@ -229,7 +231,7 @@ domain/DNS server, in case your primary/real DNS does not support
provisioning via an API. Manually set up a permanent ``CNAME`` record
for ``_acme-challenge.domain1.example`` pointing to
``_acme-challenge.domain2.example``, and set the ``alias`` property in
the Proxmox Backup node configuration file ``/etc/proxmox-backup/node.cfg``
the `Proxmox Backup`_ node configuration file ``/etc/proxmox-backup/node.cfg``
to ``domain2.example`` to allow the DNS server of ``domain2.example`` to
validate all challenges for ``domain1.example``.
@ -277,12 +279,12 @@ expired or if it will expire in the next 30 days.
.. _manually_change_certificate_over_command_line:
Manually Change Certificate over the Command Line
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
If you want to get rid of certificate verification warnings, you have to
generate a valid certificate for your server.
Log in to your Proxmox Backup via ssh or use the console:
Log in to your `Proxmox Backup`_ via ssh or use the console:
::
@ -307,9 +309,9 @@ Follow the instructions on the screen, for example:
After you have finished the certificate request, you have to send the
file ``req.pem`` to your Certification Authority (CA). The CA will issue
the certificate (BASE64 encoded), based on your request save this file
as ``cert.pem`` to your Proxmox Backup.
as ``cert.pem`` to your `Proxmox Backup`_.
To activate the new certificate, do the following on your Proxmox Backup
To activate the new certificate, do the following on your `Proxmox Backup`_
::
@ -326,7 +328,7 @@ Test your new certificate, using your browser.
.. note::
To transfer files to and from your Proxmox Backup, you can use
To transfer files to and from your `Proxmox Backup`_, you can use
secure copy: If your desktop runs Linux, you can use the ``scp``
command-line tool. If your desktop PC runs windows, please use an scp
client like WinSCP (see https://winscp.net/).

View File

@ -71,7 +71,7 @@ master_doc = 'index'
# General information about the project.
project = 'Proxmox Backup'
copyright = '2019-2025, Proxmox Server Solutions GmbH'
copyright = '2019-2022, Proxmox Server Solutions GmbH'
author = 'Proxmox Support Team'
# The version info for the project you're documenting acts as a replacement for
@ -102,22 +102,17 @@ man_pages = [
('pxar/man1', 'pxar', 'Proxmox File Archive CLI Tool', [author], 1),
('pmt/man1', 'pmt', 'Control Linux Tape Devices', [author], 1),
('pmtx/man1', 'pmtx', 'Control SCSI media changer devices (tape autoloaders)', [author], 1),
('pbs2to3/man1', 'pbs2to3', 'Proxmox Backup Server upgrade checker script for 2.4+ to current 3.x major upgrades', [author], 1),
# configs
('config/acl/man5', 'acl.cfg', 'Access Control Configuration', [author], 5),
('config/datastore/man5', 'datastore.cfg', 'Datastore Configuration', [author], 5),
('config/domains/man5', 'domains.cfg', 'Realm Configuration', [author], 5),
('config/media-pool/man5', 'media-pool.cfg', 'Media Pool Configuration', [author], 5),
('config/node/man5', 'proxmox-backup.node.cfg', 'Proxmox Backup Server - Node Configuration', [author], 5),
('config/remote/man5', 'remote.cfg', 'Remote Server Configuration', [author], 5),
('config/sync/man5', 'sync.cfg', 'Synchronization Job Configuration', [author], 5),
('config/tape-job/man5', 'tape-job.cfg', 'Tape Job Configuration', [author], 5),
('config/tape/man5', 'tape.cfg', 'Tape Drive and Changer Configuration', [author], 5),
('config/user/man5', 'user.cfg', 'User Configuration', [author], 5),
('config/verification/man5', 'verification.cfg', 'Verification Job Configuration', [author], 5),
('config/prune/man5', 'prune.cfg', 'Prune Job Configuration', [author], 5),
('config/notifications/man5', 'notifications.cfg', 'Notification target/matcher configuration', [author], 5),
('config/notifications-priv/man5', 'notifications-priv.cfg', 'Notification target secrets', [author], 5),
]
@ -126,7 +121,7 @@ man_pages = [
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'en'
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
@ -203,12 +198,11 @@ html_theme_options = {
'show_powered_by': False,
'extra_nav_links': {
'Proxmox Homepage': 'https://www.proxmox.com',
'Proxmox Homepage': 'https://proxmox.com',
'PDF': 'proxmox-backup.pdf',
'API Viewer' : 'api-viewer/index.html',
'Prune Simulator' : 'prune-simulator/index.html',
'LTO Barcode Generator' : 'lto-barcode/index.html',
'Proxmox Backup Server Wiki' : 'https://pbs.proxmox.com'
},
'sidebar_width': '320px',
@ -269,9 +263,6 @@ html_static_path = ['_static']
html_js_files = [
'custom.js',
]
html_css_files = [
'custom.css',
]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
@ -423,8 +414,6 @@ latex_logo = "images/proxmox-logo.png"
#
# latex_domain_indices = True
latex_table_style = ['booktabs', 'colorrows']
# -- Options for Epub output ----------------------------------------------

View File

@ -1,5 +1,3 @@
:orphan:
=======
acl.cfg
=======

View File

@ -1,5 +1,3 @@
:orphan:
=============
datastore.cfg
=============

View File

@ -23,5 +23,5 @@ For LDAP realms, the LDAP bind password is stored in ``ldap_passwords.json``.
user-classes inetorgperson,posixaccount,person,user
You can use the ``proxmox-backup-manager openid``, ``proxmox-backup-manager
ldap`` and ``proxmox-backup-manager ad`` commands to manipulate this file.
You can use the ``proxmox-backup-manager openid`` and ``proxmox-backup-manager ldap`` commands to manipulate
this file.

View File

@ -1,5 +1,3 @@
:orphan:
===========
domains.cfg
===========

View File

@ -1,5 +1,3 @@
:orphan:
==========================
media-pool.cfg
==========================

View File

@ -1,49 +0,0 @@
The file contains these options:
:acme: The ACME account to use on this node.
:acmedomain0: ACME domain.
:acmedomain1: ACME domain.
:acmedomain2: ACME domain.
:acmedomain3: ACME domain.
:acmedomain4: ACME domain.
:http-proxy: Set proxy for apt and subscription checks.
:email-from: Fallback email from which notifications will be sent.
:ciphers-tls-1.3: List of TLS ciphers for TLS 1.3 that will be used by the proxy. Colon-separated and in descending priority (https://docs.openssl.org/master/man1/openssl-ciphers/). (Proxy has to be restarted for changes to take effect.)
:ciphers-tls-1.2: List of TLS ciphers for TLS <= 1.2 that will be used by the proxy. Colon-separated and in descending priority (https://docs.openssl.org/master/man1/openssl-ciphers/). (Proxy has to be restarted for changes to take effect.)
:default-lang: Default language used in the GUI.
:description: Node description.
:task-log-max-days: Maximum days to keep task logs.
For example:
::
acme: local
acmedomain0: first.domain.com
acmedomain1: second.domain.com
acmedomain2: third.domain.com
acmedomain3: fourth.domain.com
acmedomain4: fifth.domain.com
http-proxy: internal.proxy.com
email-from: proxmox@mail.com
ciphers-tls-1.3: TLS_AES_128_GCM_SHA256:TLS_AES_128_CCM_8_SHA256:TLS_CHACHA20_POLY1305_SHA256
ciphers-tls-1.2: RSA_WITH_AES_128_CCM:DHE_RSA_WITH_AES_128_CCM
default-lang: en
description: Primary PBS instance
task-log-max-days: 30
You can use the ``proxmox-backup-manager node`` command to manipulate
this file.

View File

@ -1,18 +0,0 @@
:orphan:
========
node.cfg
========
Description
===========
The file /etc/proxmox-backup/node.cfg is a configuration file for Proxmox
Backup Server. It contains the general configuration regarding this node.
Options
=======
.. include:: format.rst
.. include:: ../../pbs-copyright.rst

View File

@ -1 +0,0 @@
This file contains protected credentials for notification targets.

View File

@ -1,24 +0,0 @@
:orphan:
======================
notifications-priv.cfg
======================
Description
===========
The file /etc/proxmox-backup/notifications-priv.cfg is a configuration file
for Proxmox Backup Server. It contains the configuration for the
notification system configuration.
File Format
===========
.. include:: format.rst
Options
=======
.. include:: config.rst
.. include:: ../../pbs-copyright.rst

View File

@ -1,2 +0,0 @@
This file contains configuration for notification targets and notification
matchers.

View File

@ -1,24 +0,0 @@
:orphan:
==================
notifications.cfg
==================
Description
===========
The file /etc/proxmox-backup/notifications.cfg is a configuration file
for Proxmox Backup Server. It contains the configuration for the
notification system configuration.
File Format
===========
.. include:: format.rst
Options
=======
.. include:: config.rst
.. include:: ../../pbs-copyright.rst

View File

@ -1,14 +0,0 @@
Each entry starts with the header ``prune: <name>``, followed by the job
configuration options.
::
prune: prune-store2
schedule mon..fri 10:30
store my-datastore
prune: ...
You can use the ``proxmox-backup-manager prune-job`` command to manipulate this
file.

View File

@ -1,23 +0,0 @@
:orphan:
=========
prune.cfg
=========
Description
===========
The file /etc/proxmox-backup/prune.cfg is a configuration file for Proxmox
Backup Server. It contains the prune job configuration.
File Format
===========
.. include:: format.rst
Options
=======
.. include:: config.rst
.. include:: ../../pbs-copyright.rst

View File

@ -1,5 +1,3 @@
:orphan:
==========
remote.cfg
==========

View File

@ -1,5 +1,3 @@
:orphan:
========
sync.cfg
========

View File

@ -1,5 +1,3 @@
:orphan:
============
tape-job.cfg
============

View File

@ -1,5 +1,3 @@
:orphan:
========
tape.cfg
========

View File

@ -1,5 +1,3 @@
:orphan:
========
user.cfg
========

View File

@ -1,5 +1,3 @@
:orphan:
================
verification.cfg
================
@ -7,8 +5,8 @@ verification.cfg
Description
===========
The file /etc/proxmox-backup/verification.cfg is a configuration file for
Proxmox Backup Server. It contains the verification job configuration.
The file /etc/proxmox-backup/sync.cfg is a configuration file for Proxmox
Backup Server. It contains the verification job configuration.
File Format
===========

View File

@ -1,7 +1,7 @@
Configuration Files
===================
All `Proxmox Backup`_ Server configuration files reside in the directory
All Proxmox Backup Server configuration files reside in the directory
``/etc/proxmox-backup/``.
@ -67,61 +67,6 @@ Options
.. include:: config/media-pool/config.rst
``node.cfg``
~~~~~~~~~~~~~~~~~~
Options
^^^^^^^
.. include:: config/node/format.rst
.. _notifications.cfg:
``notifications.cfg``
~~~~~~~~~~~~~~~~~~~~~
File Format
^^^^^^^^^^^
.. include:: config/notifications/format.rst
Options
^^^^^^^
.. include:: config/notifications/config.rst
.. _notifications_priv.cfg:
``notifications-priv.cfg``
~~~~~~~~~~~~~~~~~~~~~~~~~~
File Format
^^^^^^^^^^^
.. include:: config/notifications-priv/format.rst
Options
^^^^^^^
.. include:: config/notifications-priv/config.rst
``prune.cfg``
~~~~~~~~~~~~~
File Format
^^^^^^^^^^^
.. include:: config/prune/format.rst
Options
^^^^^^^
.. include:: config/prune/config.rst
``tape.cfg``
~~~~~~~~~~~~

View File

@ -10,15 +10,15 @@
.. _Zstandard: https://en.wikipedia.org/wiki/Zstandard
.. _Proxmox: https://www.proxmox.com
.. _Proxmox Community Forum: https://forum.proxmox.com
.. _Proxmox Virtual Environment: https://www.proxmox.com/proxmox-virtual-environment
.. _Proxmox Backup: https://www.proxmox.com/proxmox-backup-server
.. _Proxmox Backup Server Development List: https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel
.. _Proxmox Virtual Environment: https://www.proxmox.com/proxmox-ve
.. _Proxmox Backup: https://pbs.proxmox.com/wiki/index.php/Main_Page
.. _PBS Development List: https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel
.. _reStructuredText: https://www.sphinx-doc.org/en/master/usage/restructuredtext/index.html
.. _Rust: https://www.rust-lang.org/
.. _SHA-256: https://en.wikipedia.org/wiki/SHA-2
.. _Sphinx: https://www.sphinx-doc.org
.. _Virtual machine: https://en.wikipedia.org/wiki/Virtual_machine
.. _APT: https://en.wikipedia.org/wiki/Advanced_Packaging_Tool
.. _APT: http://en.wikipedia.org/wiki/Advanced_Packaging_Tool
.. _QEMU: https://www.qemu.org/
.. _LXC: https://linuxcontainers.org/lxc/introduction/
@ -27,7 +27,7 @@
.. _GCM: https://en.wikipedia.org/wiki/Galois/Counter_Mode
.. _AGPL3: https://www.gnu.org/licenses/agpl-3.0.en.html
.. _Debian: https://www.debian.org/index.html
.. _Debian Administrator's Handbook: https://debian-handbook.info/
.. _Debian Administrator's Handbook: https://debian-handbook.info/download/stable/debian-handbook.pdf
.. _LVM: https://en.wikipedia.org/wiki/Logical_Volume_Manager_(Linux)
.. _ZFS: https://en.wikipedia.org/wiki/ZFS
@ -37,4 +37,4 @@
.. _UTC: https://en.wikipedia.org/wiki/Coordinated_Universal_Time
.. _ISO Week date: https://en.wikipedia.org/wiki/ISO_week_date
.. _systemd.time manpage: https://manpages.debian.org/stable/systemd/systemd.time.7.en.html
.. _systemd.time manpage: https://manpages.debian.org/buster/systemd/systemd.time.7.en.html

View File

@ -1,55 +0,0 @@
External Metric Server
----------------------
Proxmox Backup Server periodically sends various metrics about your host's memory,
network and disk activity to configured external metric servers.
Currently supported are:
* InfluxDB (HTTP) (see https://docs.influxdata.com/influxdb/v2/ )
* InfluxDB (UDP) (see https://docs.influxdata.com/influxdb/v1/ )
The external metric server definitions are saved in
'/etc/proxmox-backup/metricserver.cfg', and can be edited through the web
interface.
.. note::
Using HTTP is recommended as UDP support has been dropped in InfluxDB v2.
InfluxDB (HTTP) plugin configuration
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The plugin can be configured to use the HTTP(s) API of InfluxDB 2.x.
InfluxDB 1.8.x does contain a forwards compatible API endpoint for this v2 API.
Since InfluxDB's v2 API is only available with authentication, you have
to generate a token that can write into the correct bucket and set it.
In the v2 compatible API of 1.8.x, you can use 'user:password' as token
(if required), and can omit the 'organization' since that has no meaning in InfluxDB 1.x.
You can also set the maximum batch size (default 25000000 bytes) with the
'max-body-size' setting (this corresponds to the InfluxDB setting with the
same name).
InfluxDB (UDP) plugin configuration
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Proxmox Backup Server can also send data via UDP. This requires the InfluxDB
server to be configured correctly. The MTU can also be configured here if
necessary.
Here is an example configuration for InfluxDB (on your InfluxDB server):
.. code-block:: console
[[udp]]
enabled = true
bind-address = "0.0.0.0:8089"
database = "proxmox"
batch-size = 1000
batch-timeout = "1s"
With this configuration, the InfluxDB server listens on all IP addresses on
port 8089, and writes the data in the *proxmox* database.

View File

@ -1,4 +0,0 @@
Proxmox Backup Version , Debian Version , First Release , Debian EOL , Proxmox Backup EOL
Proxmox Backup 3 , Debian 12 (Bookworm) , 2023-06 , TBA , TBA
Proxmox Backup 2 , Debian 11 (Bullseye) , 2021-07 , 2024-07 , 2024-07
Proxmox Backup 1 , Debian 10 (Buster) , 2020-11 , 2022-08 , 2022-07
1 Proxmox Backup Version Debian Version First Release Debian EOL Proxmox Backup EOL
2 Proxmox Backup 3 Debian 12 (Bookworm) 2023-06 TBA TBA
3 Proxmox Backup 2 Debian 11 (Bullseye) 2021-07 2024-07 2024-07
4 Proxmox Backup 1 Debian 10 (Buster) 2020-11 2022-08 2022-07

View File

@ -4,7 +4,7 @@ FAQ
What distribution is Proxmox Backup Server (PBS) based on?
----------------------------------------------------------
`Proxmox Backup`_ Server is based on `Debian GNU/Linux <https://www.debian.org/>`_.
Proxmox Backup Server is based on `Debian GNU/Linux <https://www.debian.org/>`_.
Which platforms are supported as a backup source (client)?
@ -21,54 +21,17 @@ Proxmox Backup Server only supports 64-bit CPUs (AMD or Intel). There are no
future plans to support 32-bit processors.
.. _faq-support-table:
How long will my Proxmox Backup Server version be supported?
------------------------------------------------------------
.. csv-table::
:file: faq-release-support-table.csv
:widths: 30 26 13 13 18
:header-rows: 1
+-----------------------+----------------------+---------------+------------+--------------------+
|Proxmox Backup Version | Debian Version | First Release | Debian EOL | Proxmox Backup EOL |
+=======================+======================+===============+============+====================+
|Proxmox Backup 2.x | Debian 11 (Bullseye) | 2021-07 | tba | tba |
+-----------------------+----------------------+---------------+------------+--------------------+
|Proxmox Backup 1.x | Debian 10 (Buster) | 2020-11 | 2022-08 | 2022-07 |
+-----------------------+----------------------+---------------+------------+--------------------+
How can I upgrade Proxmox Backup Server to the next point release?
------------------------------------------------------------------
Minor version upgrades, for example upgrading from Proxmox Backup Server in
version 3.1 to 3.2 or 3.3, can be done just like any normal update.
But, you should still check the `release notes
<https://pbs.proxmox.com/wiki/index.php/Roadmap>`_ for any relevant notable,
or breaking change.
For the update itself use either the Web UI *Node -> Updates* panel or
through the CLI with:
.. code-block:: console
apt update
apt full-upgrade
.. note:: Always ensure you correctly setup the
:ref:`package repositories <sysadmin_package_repositories>` and only
continue with the actual upgrade if `apt update` did not hit any error.
.. _faq-upgrade-major:
How can I upgrade Proxmox Backup Server to the next major release?
------------------------------------------------------------------
Major version upgrades, for example going from Proxmox Backup Server 2.4 to
3.1, are also supported.
They must be carefully planned and tested and should **never** be started
without having an off-site copy of the important backups, e.g., via remote sync
or tape, ready.
Although the specific upgrade steps depend on your respective setup, we provide
general instructions and advice of how a upgrade should be performed:
* `Upgrade from Proxmox Backup Server 2 to 3 <https://pbs.proxmox.com/wiki/index.php/Upgrade_from_2_to_3>`_
* `Upgrade from Proxmox Backup Server 1 to 2 <https://pbs.proxmox.com/wiki/index.php/Upgrade_from_1.1_to_2.x>`_
Can I copy or synchronize my datastore to another location?
-----------------------------------------------------------
@ -111,7 +74,3 @@ data is then deduplicated on the server. This minimizes both the storage
consumed and the impact on the network. Each backup still references all
data and such is a full backup. For details see the
:ref:`Technical Overview <tech_design_overview>`
.. todo:: document our stability guarantees, i.e., the separate one for, in
increasing duration of how long we'll support it: api compat, backup
protocol compat and backup format compat

View File

@ -8,53 +8,7 @@ Proxmox File Archive Format (``.pxar``)
.. graphviz:: pxar-format-overview.dot
.. _pxar-meta-format:
Proxmox File Archive Format - Meta (``.mpxar``)
-----------------------------------------------
Pxar metadata archive with same structure as a regular pxar archive, with the
exception of regular file payloads not being contained within the archive
itself, but rather being stored as payload references to the corresponding pxar
payload (``.ppxar``) file.
Can be used to lookup all the archive entries and metadata without the size
overhead introduced by the file payloads.
.. graphviz:: meta-format-overview.dot
.. _ppxar-format:
Proxmox File Archive Format - Payload (``.ppxar``)
--------------------------------------------------
Pxar payload file storing regular file payloads to be referenced and accessed by
the corresponding pxar metadata (``.mpxar``) archive. Contains a concatenation
of regular file payloads, each prefixed by a `PAYLOAD` header. Further, the
actual referenced payload entries might be separated by padding (full/partial
payloads not referenced), introduced when reusing chunks of a previous backup
run, when chunk boundaries did not aligned to payload entry offsets.
All headers are stored as little-endian.
.. list-table::
:widths: auto
* - ``PAYLOAD_START_MARKER``
- header of ``[u8; 16]`` consisting of type hash and size;
marks start
* - ``PAYLOAD``
- header of ``[u8; 16]`` cosisting of type hash and size;
referenced by metadata archive
* - Payload
- raw regular file payload
* - Padding
- partial/full unreferenced payloads, caused by unaligned chunk boundary
* - ...
- further concatenation of payload header, payload and padding
* - ``PAYLOAD_TAIL_MARKER``
- header of ``[u8; 16]`` consisting of type hash and size;
marks end
.. _data-blob-format:
Data Blob Format (``.blob``)

View File

@ -1,7 +1,7 @@
Graphical User Interface
========================
`Proxmox Backup`_ Server offers an integrated, web-based interface to manage the
Proxmox Backup Server offers an integrated, web-based interface to manage the
server. This means that you can carry out all administration tasks through your
web browser, and that you don't have to worry about installing extra management
tools. The web interface also provides a built-in console, so if you prefer the
@ -40,16 +40,6 @@ Proxmox Backup Server supports various languages and authentication back ends
.. note:: For convenience, you can save the username on the client side, by
selecting the "Save User name" checkbox at the bottom of the window.
.. _consent_banner:
Consent Banner
^^^^^^^^^^^^^^
A custom consent banner that has to be accepted before login can be configured
in **Configuration -> Other -> General -> Consent Text**. If there is no
content, the consent banner will not be displayed. The text will be stored as a
base64 string in the ``/etc/proxmox-backup/node.cfg`` config file.
GUI Overview
------------

Binary file not shown.

Before

Width:  |  Height:  |  Size: 65 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 143 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 153 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 141 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 162 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 164 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 139 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 4.6 KiB

View File

@ -34,7 +34,6 @@ in the section entitled "GNU Free Documentation License".
maintenance.rst
sysadmin.rst
network-management.rst
notifications.rst
technical-overview.rst
faq.rst

View File

@ -1,157 +0,0 @@
.. _installation_medium:
Installation Medium
-------------------
Proxmox Backup Server can be installed via
:ref:`different methods <install_pbs>`. The recommended method is the
usage of an installation medium, to simply boot the interactive
installer.
Prepare Installation Medium
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Download the installer ISO image from |DOWNLOADS|.
The Proxmox Backup Server installation medium is a hybrid ISO image.
It works in two ways:
- An ISO image file ready to burn to a DVD.
- A raw sector (IMG) image file ready to copy to a USB flash drive (USB stick).
Using a USB flash drive to install Proxmox Backup Server is the
recommended way since it is the faster and more frequently available
option these days.
Prepare a USB Flash Drive as Installation Medium
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The flash drive needs to have at least 2 GB of storage space.
.. note::
Do not use *UNetbootin*. It does not work with the Proxmox Backup
Server installation image.
.. important::
Existing data on the USB flash drive will be overwritten.
Therefore, make sure that it does not contain any still needed data
and unmount it afterwards again before proceeding.
Instructions for GNU/Linux
~~~~~~~~~~~~~~~~~~~~~~~~~~
On Unix-like operating systems use the ``dd`` command to copy the ISO
image to the USB flash drive. First find the correct device name of the
USB flash drive (see below). Then run the ``dd`` command. Depending on
your environment, you will need to have root privileges to execute
``dd``.
.. code-block:: console
# dd bs=1M conv=fdatasync if=./proxmox-backup-server_*.iso of=/dev/XYZ
.. note::
Be sure to replace ``/dev/XYZ`` with the correct device name and adapt
the input filename (*if*) path.
.. caution::
Be very careful, and do not overwrite the wrong disk!
Find the Correct USB Device Name
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
There are two ways to find out the name of the USB flash drive. The
first one is to compare the last lines of the ``dmesg`` command output
before and after plugging in the flash drive. The second way is to
compare the output of the ``lsblk`` command. Open a terminal and run:
.. code-block:: console
# lsblk
Then plug in your USB flash drive and run the command again:
.. code-block:: console
# lsblk
A new device will appear. This is the one you want to use. To be on the
extra safe side check if the reported size matches your USB flash drive.
Instructions for macOS
~~~~~~~~~~~~~~~~~~~~~~
Open the terminal (query *Terminal* in Spotlight).
Convert the ``.iso`` file to ``.dmg`` format using the convert option of
``hdiutil``, for example:
.. code-block:: console
# hdiutil convert proxmox-backup-server_*.iso -format UDRW -o proxmox-backup-server_*.dmg
.. note::
macOS tends to automatically add ``.dmg`` to the output file name.
To get the current list of devices run the command:
.. code-block:: console
# diskutil list
Now insert the USB flash drive and run this command again to determine
which device node has been assigned to it. (e.g., ``/dev/diskX``).
.. code-block:: console
# diskutil list
# diskutil unmountDisk /dev/diskX
.. note::
replace *X* with the disk number from the last command.
.. code-block:: console
# sudo dd if=proxmox-backup-server_*.dmg bs=1M of=/dev/rdiskX
.. note::
*rdiskX*, instead of *diskX*, in the last command is intended. It
will increase the write speed.
Instructions for Windows
~~~~~~~~~~~~~~~~~~~~~~~~
Using Etcher
^^^^^^^^^^^^
Etcher works out of the box. Download Etcher from https://etcher.io. It
will guide you through the process of selecting the ISO and your USB
flash drive.
Using Rufus
^^^^^^^^^^^
Rufus is a more lightweight alternative, but you need to use the **DD
mode** to make it work. Download Rufus from https://rufus.ie/. Either
install it or use the portable version. Select the destination drive
and the downloaded Proxmox ISO file.
.. important::
Once you click *Start*, you have to click *No* on the dialog asking to
download a different version of Grub. In the next dialog select **DD mode**.
Use the Installation Medium
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Insert the created USB flash drive (or DVD) into your server. Continue
by reading the :ref:`installer <using_the_installer>` chapter, which
also describes possible boot issues.

View File

@ -7,9 +7,7 @@ Debian_ from the provided package repository.
.. include:: system-requirements.rst
.. include:: installation-media.rst
.. _install_pbs:
.. include:: package-repositories.rst
Server Installation
-------------------
@ -18,50 +16,40 @@ The backup server stores the actual backed up data and provides a web based GUI
for various management tasks such as disk management.
.. note:: You always need a backup server. It is not possible to use
Proxmox Backup without the server part.
`Proxmox Backup`_ without the server part.
Using our provided disk image (ISO file) is the recommended
installation method, as it includes a convenient installer, a complete
Debian system as well as all necessary packages for the Proxmox Backup
Server.
The disk image (ISO file) provided by Proxmox includes a complete Debian system
as well as all necessary packages for the `Proxmox Backup`_ Server.
Once you have created an :ref:`installation_medium`, the booted
:ref:`installer <using_the_installer>` will guide you through the
setup process. It will help you to partition your disks, apply basic
settings such as the language, time zone and network configuration,
and finally install all required packages within minutes.
The installer will guide you through the setup process and allow
you to partition the local disk(s), apply basic system configuration
(for example timezone, language, network), and install all required packages.
The provided ISO will get you started in just a few minutes, and is the
recommended method for new and existing users.
As an alternative to the interactive installer, advanced users may
wish to install Proxmox Backup Server
:ref:`unattended <install_pbs_unattended>`.
Alternatively, `Proxmox Backup`_ Server can be installed on top of an
existing Debian system.
With sufficient Debian knowledge, you can also install Proxmox Backup
Server :ref:`on top of Debian <install_pbs_on_debian>` yourself.
Install `Proxmox Backup`_ Server using the Installer
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
While not recommended, Proxmox Backup Server could also be installed
:ref:`on Proxmox VE <install_pbs_on_pve>`.
Download the ISO from |DOWNLOADS|.
It includes the following:
.. include:: using-the-installer.rst
* The `Proxmox Backup`_ Server installer, which partitions the local
disk(s) with ext4, xfs or ZFS, and installs the operating system
.. _install_pbs_unattended:
* Complete operating system (Debian Linux, 64-bit)
Install `Proxmox Backup`_ Server Unattended
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
It is possible to install Proxmox Backup Server automatically in an
unattended manner. This enables you to fully automate the setup process on
bare-metal. Once the installation is complete and the host has booted up,
automation tools like Ansible can be used to further configure the installation.
* Proxmox Linux kernel with ZFS support
The necessary options for the installer must be provided in an answer file.
This file allows the use of filter rules to determine which disks and network
cards should be used.
* Complete tool-set to administer backups and all necessary resources
To use the automated installation, it is first necessary to prepare an
installation ISO. For more details and information on the unattended
installation see `our wiki
<https://pve.proxmox.com/wiki/Automated_Installation>`_.
* Web based management interface
.. note:: During the installation process, the complete server
is used by default and all existing data is removed.
.. _install_pbs_on_debian:
Install `Proxmox Backup`_ Server on Debian
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@ -72,8 +60,8 @@ standard Debian installation. After configuring the
.. code-block:: console
# apt update
# apt install proxmox-backup-server
# apt-get update
# apt-get install proxmox-backup-server
The above commands keep the current (Debian) kernel and install a minimal
set of required packages.
@ -83,13 +71,13 @@ does, please use the following:
.. code-block:: console
# apt update
# apt install proxmox-backup
# apt-get update
# apt-get install proxmox-backup
This will install all required packages, the Proxmox kernel with ZFS_
support, and a set of common and useful packages.
.. caution:: Installing Proxmox Backup on top of an existing Debian_
.. caution:: Installing `Proxmox Backup`_ on top of an existing Debian_
installation looks easy, but it assumes that the base system and local
storage have been set up correctly. In general this is not trivial, especially
when LVM_ or ZFS_ is used. The network configuration is completely up to you
@ -99,8 +87,6 @@ support, and a set of common and useful packages.
your web browser, using HTTPS on port 8007. For example at
``https://<ip-or-dns-name>:8007``
.. _install_pbs_on_pve:
Install Proxmox Backup Server on `Proxmox VE`_
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@ -109,8 +95,8 @@ After configuring the
.. code-block:: console
# apt update
# apt install proxmox-backup-server
# apt-get update
# apt-get install proxmox-backup-server
.. caution:: Installing the backup server directly on the hypervisor
is not recommended. It is safer to use a separate physical
@ -121,12 +107,10 @@ After configuring the
your web browser, using HTTPS on port 8007. For example at
``https://<ip-or-dns-name>:8007``
.. _install_pbc:
Client Installation
-------------------
Install Proxmox Backup Client on Debian
Install `Proxmox Backup`_ Client on Debian
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Proxmox ships as a set of Debian packages to be installed on top of a standard
@ -135,29 +119,10 @@ you need to run:
.. code-block:: console
# apt update
# apt install proxmox-backup-client
# apt-get update
# apt-get install proxmox-backup-client
Install Statically Linked Proxmox Backup Client
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Proxmox provides a statically linked build of the Proxmox backup client that
should run on any modern x86-64 Linux system.
.. note:: The client-only repository should be usable by most recent Debian and
Ubuntu derivatives.
It is currently available as a Debian package. After configuring the
:ref:`package_repositories_client_only_apt`, you need to run:
.. code-block:: console
# apt update
# apt install proxmox-backup-client-static
This package conflicts with the `proxmox-backup-client` package, as both
provide the client as an executable in the `/usr/bin/proxmox-backup-client`
path.
You can copy this executable to other, e.g. non-Debian based Linux systems.
For details on using the Proxmox Backup Client, see :ref:`client_usage`.
.. include:: package-repositories.rst

View File

@ -4,8 +4,8 @@ Introduction
What is Proxmox Backup Server?
------------------------------
`Proxmox Backup`_ Server is an enterprise-class, client-server backup solution
that is capable of backing up :term:`virtual machine<Virtual machine>`\ s,
Proxmox Backup Server is an enterprise-class, client-server backup solution that
is capable of backing up :term:`virtual machine<Virtual machine>`\ s,
:term:`container<Container>`\ s, and physical hosts. It is specially optimized
for the `Proxmox Virtual Environment`_ platform and allows you to back up your
data securely, even between remote sites, providing easy management through a
@ -178,7 +178,7 @@ Mailing Lists
Proxmox Backup Server is fully open-source and contributions are welcome! Here
is the primary communication channel for developers:
:Mailing list for developers: `Proxmox Backup Server Development List`_
:Mailing list for developers: `Proxmox Backup Sever Development List`_
Bug Tracker
~~~~~~~~~~~

View File

@ -121,7 +121,7 @@ Create a new pool with cache (L2ARC)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
It is possible to use a dedicated cache drive partition to increase
the read performance (use SSDs).
the performance (use SSD).
For `<device>`, you can use multiple devices, as is shown in
"Create a new pool with RAID*".
@ -134,7 +134,7 @@ Create a new pool with log (ZIL)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
It is possible to use a dedicated cache drive partition to increase
the write performance (use SSDs).
the performance (SSD).
For `<device>`, you can use multiple devices, as is shown in
"Create a new pool with RAID*".
@ -172,7 +172,7 @@ Changing a failed device
Changing a failed bootable device
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Depending on how `Proxmox Backup`_ was installed, it is either using `grub` or
Depending on how Proxmox Backup was installed, it is either using `grub` or
`systemd-boot` as a bootloader.
In either case, the first steps of copying the partition table, reissuing GUIDs
@ -195,7 +195,7 @@ With `systemd-boot`:
# proxmox-boot-tool init <new ESP>
.. NOTE:: `ESP` stands for EFI System Partition, which is setup as partition #2 on
bootable disks setup by the Proxmox Backup installer. For details, see
bootable disks setup by the `Proxmox Backup`_ installer. For details, see
:ref:`Setting up a new partition for use as synced ESP <systembooting-proxmox-boot-setup>`.
With `grub`:
@ -214,17 +214,17 @@ Activate e-mail notification
ZFS comes with an event daemon, ``ZED``, which monitors events generated by the
ZFS kernel module. The daemon can also send emails upon ZFS events, such as pool
errors. Newer ZFS packages ship the daemon in a separate package ``zfs-zed``,
which should already be installed by default in Proxmox Backup.
which should already be installed by default in `Proxmox Backup`_.
You can configure the daemon via the file ``/etc/zfs/zed.d/zed.rc``, using your
preferred editor. The required setting for email notification is
preferred editor. The required setting for email notfication is
``ZED_EMAIL_ADDR``, which is set to ``root`` by default.
.. code-block:: console
ZED_EMAIL_ADDR="root"
Please note that Proxmox Backup forwards mails to `root` to the email address
Please note that `Proxmox Backup`_ forwards mails to `root` to the email address
configured for the root user.
@ -264,7 +264,6 @@ systems with more than 256 GiB of total memory, where simply setting
# update-initramfs -u
.. _zfs_swap:
Swap on ZFS
^^^^^^^^^^^
@ -291,17 +290,17 @@ an editor of your choice and add the following line:
vm.swappiness = 10
.. table:: Linux kernel `swappiness` parameter values
:widths: 1, 3
:widths:auto
=================== ===============================================================
Value Strategy
=================== ===============================================================
vm.swappiness = 0 The kernel will swap only to avoid an 'out of memory' condition
vm.swappiness = 1 Minimum amount of swapping without disabling it entirely.
vm.swappiness = 10 Sometimes recommended to improve performance when sufficient memory exists in a system.
vm.swappiness = 60 The default value.
vm.swappiness = 100 The kernel will swap aggressively.
=================== ===============================================================
==================== ===============================================================
Value Strategy
==================== ===============================================================
vm.swappiness = 0 The kernel will swap only to avoid an 'out of memory' condition
vm.swappiness = 1 Minimum amount of swapping without disabling it entirely.
vm.swappiness = 10 Sometimes recommended to improve performance when sufficient memory exists in a system.
vm.swappiness = 60 The default value.
vm.swappiness = 100 The kernel will swap aggressively.
==================== ===============================================================
ZFS compression
^^^^^^^^^^^^^^^

View File

@ -46,23 +46,6 @@ Ext.define('LabelSetupPanel', {
let params = view.getValues();
list.getStore().add(params);
},
validitychange: function() {
let me = this;
let isValid = true;
me.getView().query('field').forEach((field) => {
if (!field.isValid()) {
isValid = false;
}
});
me.lookup('addButton').setDisabled(!isValid);
},
control: {
'field': {
validitychange: 'validitychange',
},
},
},
items: [
@ -84,7 +67,7 @@ Ext.define('LabelSetupPanel', {
xtype: 'ltoTapeType',
name: 'tape_type',
fieldLabel: 'Type',
value: 'L9',
value: 'L8',
},
{
xtype: 'ltoLabelStyle',
@ -110,7 +93,6 @@ Ext.define('LabelSetupPanel', {
{
xtype: 'button',
text: 'Add',
reference: 'addButton',
handler: 'onAdd',
},
],

View File

@ -108,7 +108,7 @@ Ext.define('PageCalibration', {
xtype: 'numberfield',
value: 'a4',
name: 's_x',
fieldLabel: 'Measured Start Offset Sx (mm)',
fieldLabel: 'Meassured Start Offset Sx (mm)',
allowBlank: false,
labelWidth: 200,
},
@ -116,7 +116,7 @@ Ext.define('PageCalibration', {
xtype: 'numberfield',
value: 'a4',
name: 'd_x',
fieldLabel: 'Measured Length Dx (mm)',
fieldLabel: 'Meassured Length Dx (mm)',
allowBlank: false,
labelWidth: 200,
},
@ -124,7 +124,7 @@ Ext.define('PageCalibration', {
xtype: 'numberfield',
value: 'a4',
name: 's_y',
fieldLabel: 'Measured Start Offset Sy (mm)',
fieldLabel: 'Meassured Start Offset Sy (mm)',
allowBlank: false,
labelWidth: 200,
},
@ -132,7 +132,7 @@ Ext.define('PageCalibration', {
xtype: 'numberfield',
value: 'a4',
name: 'd_y',
fieldLabel: 'Measured Length Dy (mm)',
fieldLabel: 'Meassured Length Dy (mm)',
allowBlank: false,
labelWidth: 200,
},

View File

@ -11,20 +11,12 @@ Ext.define('LtoTapeType', {
store: {
field: ['value', 'text'],
data: [
{ value: 'L9', text: "LTO-9" },
{ value: 'LZ', text: "LTO-9 (WORM)" },
{ value: 'L8', text: "LTO-8" },
{ value: 'LY', text: "LTO-8 (WORM)" },
{ value: 'L7', text: "LTO-7" },
{ value: 'LX', text: "LTO-7 (WORM)" },
{ value: 'L6', text: "LTO-6" },
{ value: 'LW', text: "LTO-6 (WORM)" },
{ value: 'L5', text: "LTO-5" },
{ value: 'LV', text: "LTO-5 (WORM)" },
{ value: 'L4', text: "LTO-4" },
{ value: 'LU', text: "LTO-4 (WORM)" },
{ value: 'L3', text: "LTO-3" },
{ value: 'LT', text: "LTO-3 (WORM)" },
{ value: 'CU', text: "Cleaning Unit" },
],
},

View File

@ -6,64 +6,35 @@ Maintenance Tasks
Pruning
-------
Prune lets you specify which backup snapshots you want to keep, removing others.
When pruning a snapshot, only the snapshot metadata (manifest, indices, blobs,
log and notes) is removed. The chunks containing the actual backup data and
previously referenced by the pruned snapshot, have to be removed by a garbage
collection run.
.. Caution:: Take into consideration that sensitive information stored in a
given data chunk will outlive pruned snapshots and remain present in the
datastore as long as referenced by at least one backup snapshot. Further,
*even* if no snapshot references a given chunk, it will remain present until
removed by the garbage collection.
Moreover, file-level backups created using the change detection mode
``metadata`` can reference backup chunks containing files which have vanished
since the previous backup. These files might still be accessible by reading
the chunks raw data (client or server side).
To remove chunks containing sensitive data, prune any snapshot made while the
data was part of the backup input and run a garbage collection. Further, if
using file-based backups with change detection mode ``metadata``,
additionally prune all snapshots since the sensitive data was no longer part
of the backup input and run a garbage collection.
The no longer referenced chunks will then be marked for deletion on the next
garbage collection run and removed by a subsequent run after the grace
period.
The following retention options are available for pruning:
Prune lets you specify which backup snapshots you want to keep.
The following retention options are available:
``keep-last <N>``
Keep the last ``<N>`` backup snapshots.
``keep-hourly <N>``
Keep backups for the last ``<N>`` hours. If there is more than one backup for
a single hour, only the latest is retained. Hours without backups do not
count.
Keep backups for the last ``<N>`` hours. If there is more than one
backup for a single hour, only the latest is retained.
``keep-daily <N>``
Keep backups for the last ``<N>`` days. If there is more than one backup for a
single day, only the latest is retained. Days without backups do not count.
Keep backups for the last ``<N>`` days. If there is more than one
backup for a single day, only the latest is retained.
``keep-weekly <N>``
Keep backups for the last ``<N>`` weeks. If there is more than one backup for
a single week, only the latest is retained. Weeks without backup do not count.
Keep backups for the last ``<N>`` weeks. If there is more than one
backup for a single week, only the latest is retained.
.. note:: Weeks start on Monday and end on Sunday. The software
uses the `ISO week date`_ system and handles weeks at
the end of the year correctly.
``keep-monthly <N>``
Keep backups for the last ``<N>`` months. If there is more than one backup for
a single month, only the latest is retained. Months without backups do not
count.
Keep backups for the last ``<N>`` months. If there is more than one
backup for a single month, only the latest is retained.
``keep-yearly <N>``
Keep backups for the last ``<N>`` years. If there is more than one backup for
a single year, only the latest is retained. Years without backups do not
count.
Keep backups for the last ``<N>`` years. If there is more than one
backup for a single year, only the latest is retained.
The retention options are processed in the order given above. Each option
only covers backups within its time period. The next option does not take care
@ -197,12 +168,10 @@ It's recommended to setup a schedule to ensure that unused space is cleaned up
periodically. For most setups a weekly schedule provides a good interval to
start.
.. _gc_background:
GC Background
^^^^^^^^^^^^^
In `Proxmox Backup`_ Server, backup data is not saved directly, but rather as
In Proxmox Backup Server, backup data is not saved directly, but rather as
chunks that are referred to by the indexes of each backup snapshot. This
approach enables reuse of chunks through deduplication, among other benefits
that are detailed in the :ref:`tech_design_overview`.
@ -224,31 +193,17 @@ datastore or interfering with other backups.
The garbage collection (GC) process is performed per datastore and is split
into two phases:
- Phase one (Mark):
- Phase one: Mark
All index files are read, and the access time of the referred chunk files is
updated.
All index files are read, and the access time (``atime``) of the referenced
chunk files is updated.
- Phase two (Sweep):
The task iterates over all chunks and checks their file access time against a
cutoff time. The cutoff time is given by either the oldest backup writer
instance, if present, or 24 hours and 5 minutes before the start of the
garbage collection.
Garbage collection considers chunk files with access time older than the
cutoff time to be neither referenced by any backup snapshot's index, nor part
of any currently running backup job. Therefore, these chunks can safely be
deleted.
Chunks within the grace period will not be deleted and logged at the end of
the garbage collection task as *Pending removals*.
.. note:: The grace period for backup chunk removal is not arbitrary, but stems
from the fact that filesystems are typically mounted with the ``relatime``
option by default. This results in better performance by only updating the
``atime`` property if a file has been modified since the last access or the
last access has been at least 24 hours ago.
- Phase two: Sweep
The task iterates over all chunks, checks their file access time, and if it
is older than the cutoff time (i.e., the time when GC started, plus some
headroom for safety and Linux file system behavior), the task knows that the
chunk was neither referred to in any backup index nor part of any currently
running backup that has no index to scan for. As such, the chunk can be
safely deleted.
Manually Starting GC
^^^^^^^^^^^^^^^^^^^^
@ -319,10 +274,26 @@ the **Actions** column in the table.
Notifications
-------------
Proxmox Backup Server can send you notifications about automatically
Proxmox Backup Server can send you notification emails about automatically
scheduled verification, garbage-collection and synchronization tasks results.
Refer to the :ref:`notifications` chapter for more details.
By default, notifications are sent to the email address configured for the
`root@pam` user. You can instead set this user for each datastore.
.. image:: images/screenshots/pbs-gui-datastore-options.png
:target: _images/pbs-gui-datastore-options.png
:align: right
:alt: Datastore Options
You can also change the level of notification received per task type, the
following options are available:
* Always: send a notification for any scheduled task, independent of the
outcome
* Errors: send a notification for any scheduled task that results in an error
* Never: do not send any notification at all
.. _maintenance_mode:

View File

@ -6,11 +6,11 @@ Managing Remotes & Sync
:term:`Remote`
--------------
A remote refers to a separate `Proxmox Backup`_ Server installation and a user
on that installation, from which you can `sync` datastores to a local datastore
with a `Sync Job`. You can configure remotes in the web interface, under
**Configuration -> Remotes**. Alternatively, you can use the ``remote``
subcommand. The configuration information for remotes is stored in the file
A remote refers to a separate Proxmox Backup Server installation and a user on that
installation, from which you can `sync` datastores to a local datastore with a
`Sync Job`. You can configure remotes in the web interface, under **Configuration
-> Remotes**. Alternatively, you can use the ``remote`` subcommand. The
configuration information for remotes is stored in the file
``/etc/proxmox-backup/remote.cfg``.
.. image:: images/screenshots/pbs-gui-remote-add.png
@ -69,13 +69,6 @@ sync-job`` command. The configuration information for sync jobs is stored at
in the GUI, or use the ``create`` subcommand. After creating a sync job, you can
either start it manually from the GUI or provide it with a schedule (see
:ref:`calendar-event-scheduling`) to run regularly.
Backup snapshots, groups and namespaces which are no longer available on the
**Remote** datastore can be removed from the local datastore as well by setting
the ``remove-vanished`` option for the sync job.
Setting the ``verified-only`` or ``encrypted-only`` flags allows to limit the
sync jobs to backup snapshots which have been verified or encrypted,
respectively. This is particularly of interest when sending backups to a less
trusted remote backup server.
.. code-block:: console
@ -123,28 +116,8 @@ of the specified criteria are synced. The available criteria are:
The same filter is applied to local groups, for handling of the
``remove-vanished`` option.
A ``group-filter`` can be inverted by prepending ``exclude:`` to it.
* Regular expression example, excluding the match:
.. code-block:: console
# proxmox-backup-manager sync-job update ID --group-filter exclude:regex:'^vm/1\d{2,3}$'
For mixing include and exclude filter, following rules apply:
- no filters: all backup groups
- include: only those matching the include filters
- exclude: all but those matching the exclude filters
- both: those matching the include filters, but without those matching the exclude filters
.. note:: The ``protected`` flag of remote backup snapshots will not be synced.
Enabling the advanced option 'resync-corrupt' will re-sync all snapshots that have
failed to verify during the last :ref:`maintenance_verification`. Hence, a verification
job needs to be run before a sync job with 'resync-corrupt' can be carried out. Be aware
that a 'resync-corrupt'-job needs to check the manifests of all snapshots in a datastore
and might take much longer than regular sync jobs.
Namespace Support
^^^^^^^^^^^^^^^^^
@ -231,52 +204,9 @@ Bandwidth Limit
Syncing a datastore to an archive can produce a lot of traffic and impact other
users of the network. In order to avoid network or storage congestion, you can
limit the bandwidth of a sync job in pull direction by setting the ``rate-in``
option either in the web interface or using the ``proxmox-backup-manager``
command-line tool:
limit the bandwidth of the sync job by setting the ``rate-in`` option either in
the web interface or using the ``proxmox-backup-manager`` command-line tool:
.. code-block:: console
# proxmox-backup-manager sync-job update ID --rate-in 20MiB
For sync jobs in push direction use the ``rate-out`` option instead.
Sync Direction Push
^^^^^^^^^^^^^^^^^^^
Sync jobs can be configured for pull or push direction. Sync jobs in push
direction are not identical in behaviour because of the limited access to the
target datastore via the remote servers API. Most notably, pushed content will
always be owned by the user configured in the remote configuration, being
independent from the local user as configured in the sync job. Latter is used
exclusively for permission check and scope checks on the pushing side.
.. note:: It is strongly advised to create a dedicated remote configuration for
each individual sync job in push direction, using a dedicated user on the
remote. Otherwise, sync jobs pushing to the same target might remove each
others snapshots and/or groups, if the remove vanished flag is set or skip
snapshots if the backup time is not incremental.
This is because the backup groups on the target are owned by the user
given in the remote configuration.
The following permissions are required for a sync job in push direction:
#. ``Remote.Audit`` on ``/remote/{remote}`` and ``Remote.DatastoreBackup`` on
``/remote/{remote}/{remote-store}/{remote-ns}`` path or subnamespace.
#. At least ``Datastore.Read`` and ``Datastore.Audit`` on the local source
datastore namespace (``/datastore/{store}/{ns}``) or ``Datastore.Backup`` if
owner of the sync job.
#. ``Remote.DatastorePrune`` on ``/remote/{remote}/{remote-store}/{remote-ns}``
path to remove vanished snapshots and groups. Make sure to use a dedicated
remote for each sync job in push direction as noted above.
#. ``Remote.DatastoreModify`` on ``/remote/{remote}/{remote-store}/{remote-ns}``
path to remove vanished namespaces. A remote user with limited access should
be used on the remote backup server instance. Consider the implications as
noted below.
.. note:: ``Remote.DatastoreModify`` will allow to remove whole namespaces on the
remote target datastore, independent of ownership. Make sure the user as
configured in remote.cfg has limited permissions on the remote side.
.. note:: Sync jobs in push direction require namespace support on the remote
Proxmox Backup Server instance (minimum version 2.2).

View File

@ -5,12 +5,12 @@ Markdown Primer
"Markdown is a text-to-HTML conversion tool for web writers. Markdown allows
you to write using an easy-to-read, easy-to-write plain text format, then
convert it to structurally valid XHTML (or HTML)."
convertit to structurally valid XHTML (or HTML)."
-- John Gruber, https://daringfireball.net/projects/markdown/
The "Notes" panel of the `Proxmox Backup`_ Server web-interface supports
The "Notes" panel of the Proxmox Backup Server web-interface supports
rendering Markdown text.
Proxmox Backup Server supports CommonMark with most extensions of GFM (GitHub

View File

@ -1,50 +0,0 @@
digraph g {
graph [
rankdir = "LR"
fontname="Helvetica"
];
node [
fontsize = "16"
shape = "record"
];
edge [
];
"archive" [
label = "archive.mpxar"
shape = "record"
];
"rootdir" [
label = "<fv>FORMAT_VERSION\l|PRELUDE\l|<f0>ENTRY\l|\{XATTR\}\* extended attribute list\l|\{ACL_USER\}\* USER ACL entries\l|\{ACL_GROUP\}\* GROUP ACL entries\l|\[ACL_GROUP_OBJ\] the ACL_GROUP_OBJ \l|\[ACL_DEFAULT\] the various default ACL fields\l|\{ACL_DEFAULT_USER\}\* USER ACL entries\l|\{ACL_DEFAULT_GROUP\}\* GROUP ACL entries\l|\[FCAPS\] file capability in Linux disk format\l|\[QUOTA_PROJECT_ID\] the ext4/xfs quota project ID\l|{<pl> PAYLOAD_REF|SYMLINK|DEVICE|{<de> \{DirectoryEntries\}\*|GOODBYE}}"
shape = "record"
];
"entry" [
label = "<f0> size: u64 = 64\l|type: u64 = ENTRY\l|feature_flags: u64\l|mode: u64\l|flags: u64\l|uid: u64\l|gid: u64\l|mtime: u64\l"
labeljust = "l"
shape = "record"
];
"direntry" [
label = "<f0> FILENAME\l|{ENTRY\l|HARDLINK\l}"
shape = "record"
];
"payloadrefentry" [
label = "<f0> offset: u64\l|size: u64\l"
shape = "record"
];
"archive" -> "rootdir":fv
"rootdir":f0 -> "entry":f0
"rootdir":de -> "direntry":f0
"rootdir":pl -> "payloadrefentry":f0
}

View File

@ -8,8 +8,8 @@ Network Management
:align: right
:alt: System and Network Configuration Overview
`Proxmox Backup`_ Server provides both a web interface and a command-line tool
for network configuration. You can find the configuration options in the web
Proxmox Backup Server provides both a web interface and a command-line tool for
network configuration. You can find the configuration options in the web
interface under the **Network Interfaces** section of the **Configuration** menu
tree item. The command-line tool is accessed via the ``network`` subcommand.
These interfaces allow you to carry out some basic network management tasks,

View File

@ -1,388 +0,0 @@
.. _notifications:
Notifications
=============
Overview
--------
* Proxmox Backup Server emits :ref:`notification_events` in case of noteworthy
events in the system. These events are handled by the notification system. A
notification event has metadata, for example a timestamp, a severity level, a
type and other metadata fields.
* :ref:`notification_matchers` route a notification event to one or more
notification targets. A matcher can have match rules to selectively route
based on the metadata of a notification event.
* :ref:`notification_targets` are a destination to which a notification event
is routed to by a matcher. There are multiple types of target, mail-based
(Sendmail and SMTP) and Gotify.
Datastores and tape backup jobs have a configurable :ref:`notification_mode`.
It allows you to choose between the notification system and a legacy mode for
sending notification emails. The legacy mode is equivalent to the way
notifications were handled before Proxmox Backup Server 3.2.
The notification system can be configured in the GUI under *Configuration →
Notifications*. The configuration is stored in :ref:`notifications.cfg` and
:ref:`notifications_priv.cfg` - the latter contains sensitive configuration
options such as passwords or authentication tokens for notification targets and
can only be read by ``root``.
.. _notification_targets:
Notification Targets
--------------------
Proxmox Backup Server offers multiple types of notification targets.
.. _notification_targets_sendmail:
Sendmail
^^^^^^^^
The sendmail binary is a program commonly found on Unix-like operating systems
that handles the sending of email messages. It is a command-line utility that
allows users and applications to send emails directly from the command line or
from within scripts.
The sendmail notification target uses the ``sendmail`` binary to send emails to
a list of configured users or email addresses. If a user is selected as a
recipient, the email address configured in user's settings will be used. For
the ``root@pam`` user, this is the email address entered during installation. A
user's email address can be configured in ``Configuration → Access Control →
User Management``. If a user has no associated email address, no email will be
sent.
.. NOTE:: In standard Proxmox Backup Server installations, the ``sendmail``
binary is provided by Postfix. It may be necessary to configure Postfix so
that it can deliver mails correctly - for example by setting an external
mail relay (smart host). In case of failed delivery, check the system logs
for messages logged by the Postfix daemon.
See :ref:`notifications.cfg` for all configuration options.
.. _notification_targets_smtp:
SMTP
^^^^
SMTP notification targets can send emails directly to an SMTP mail relay. This
target does not use the system's MTA to deliver emails. Similar to sendmail
targets, if a user is selected as a recipient, the user's configured email
address will be used.
.. NOTE:: Unlike sendmail targets, SMTP targets do not have any queuing/retry
mechanism in case of a failed mail delivery.
See :ref:`notifications.cfg` for all configuration options.
.. _notification_targets_gotify:
Gotify
^^^^^^
`Gotify <http://gotify.net>`_ is an open-source self-hosted notification server
that allows you to send push notifications to various devices and applications.
It provides a simple API and web interface, making it easy to integrate with
different platforms and services.
.. NOTE:: Gotify targets will respect the HTTP proxy settings from
Configuration → Other → HTTP proxy
See :ref:`notifications.cfg` for all configuration options.
.. _notification_targets_webhook:
Webhook
^^^^^^^
Webhook notification targets perform HTTP requests to a configurable URL.
The following configuration options are available:
* ``url``: The URL to which to perform the HTTP requests. Supports templating
to inject message contents, metadata and secrets.
* ``method``: HTTP Method to use (POST/PUT/GET)
* ``header``: Array of HTTP headers that should be set for the request.
Supports templating to inject message contents, metadata and secrets.
* ``body``: HTTP body that should be sent. Supports templating to inject
message contents, metadata and secrets.
* ``secret``: Array of secret key-value pairs. These will be stored in a
protected configuration file only readable by root. Secrets can be
accessed in body/header/URL templates via the ``secrets`` namespace.
* ``comment``: Comment for this target.
For configuration options that support templating, the `Handlebars
<https://handlebarsjs.com>`_ syntax can be used to access the following
properties:
* ``{{ title }}``: The rendered notification title
* ``{{ message }}``: The rendered notification body
* ``{{ severity }}``: The severity of the notification (``info``, ``notice``,
``warning``, ``error``, ``unknown``)
* ``{{ timestamp }}``: The notification's timestamp as a UNIX epoch (in
seconds).
* ``{{ fields.<name> }}``: Sub-namespace for any metadata fields of the
notification. For instance, ``fields.type`` contains the notification
type - for all available fields refer to :ref:`notification_events`.
* ``{{ secrets.<name> }}``: Sub-namespace for secrets. For instance, a secret
named ``token`` is accessible via ``secrets.token``.
For convenience, the following helpers are available:
* ``{{ url-encode <value/property> }}``: URL-encode a property/literal.
* ``{{ escape <value/property> }}``: Escape any control characters that cannot
be safely represented as a JSON string.
* ``{{ json <value/property> }}``: Render a value as JSON. This can be useful
to pass a whole sub-namespace (e.g. ``fields``) as a part of a JSON payload
(e.g. ``{{ json fields }}``).
.. NOTE:: Webhook targets will respect the HTTP proxy settings from
Configuration → Other → HTTP proxy
Example - ntfy.sh
"""""""""""""""""
* Method: ``POST``
* URL: ``https://ntfy.sh/{{ secrets.channel }}``
* Headers:
* ``Markdown``: ``Yes``
* Body::
```
{{ message }}
```
* Secrets:
* ``channel``: ``<your ntfy.sh channel>``
Example - Discord
"""""""""""""""""
* Method: ``POST``
* URL: ``https://discord.com/api/webhooks/{{ secrets.token }}``
* Headers:
* ``Content-Type``: ``application/json``
* Body::
{
"content": "``` {{ escape message }}```"
}
* Secrets:
* ``token``: ``<token>``
Example - Slack
"""""""""""""""
* Method: ``POST``
* URL: ``https://hooks.slack.com/services/{{ secrets.token }}``
* Headers:
* ``Content-Type``: ``application/json``
* Body::
{
"text": "``` {{escape message}}```",
"type": "mrkdwn"
}
* Secrets:
* ``token``: ``<token>``
.. _notification_matchers:
Notification Matchers
---------------------
Notification matchers route notifications to notification targets based on
their matching rules. These rules can match certain properties of a
notification, such as the timestamp (``match-calendar``), the severity of the
notification (``match-severity``) or metadata fields (``match-field``). If a
notification is matched by a matcher, all targets configured for the matcher
will receive the notification.
An arbitrary number of matchers can be created, each with with their own
matching rules and targets to notify. Every target is notified at most once for
every notification, even if the target is used in multiple matchers.
A matcher without rules matches any notification; the configured targets will
always be notified.
See :ref:`notifications.cfg` for all configuration options.
Calendar Matching Rules
^^^^^^^^^^^^^^^^^^^^^^^
A calendar matcher matches a notification's timestamp.
Examples:
* ``match-calendar 8-12``
* ``match-calendar 8:00-15:30``
* ``match-calendar mon-fri 9:00-17:00``
* ``match-calendar sun,tue-wed,fri 9-17``
Field Matching Rules
^^^^^^^^^^^^^^^^^^^^
Notifications have a selection of metadata fields that can be matched. When
using ``exact`` as a matching mode, a ``,`` can be used as a separator. The
matching rule then matches if the metadata field has **any** of the specified
values.
Examples:
* ``match-field exact:type=gc`` Only match notifications for garbage collection
jobs
* ``match-field exact:type=prune,verify`` Match prune job and verification job
notifications.
* ``match-field regex:datastore=^backup-.*$`` Match any datastore starting with
``backup``.
If a notification does not have the matched field, the rule will **not** match.
For instance, a ``match-field regex:datastore=.*`` directive will match any
notification that has a ``datastore`` metadata field, but will not match if the
field does not exist.
Severity Matching Rules
^^^^^^^^^^^^^^^^^^^^^^^
A notification has a associated severity that can be matched.
Examples:
* ``match-severity error``: Only match errors
* ``match-severity warning,error``: Match warnings and error
The following severities are in use:
``info``, ``notice``, ``warning``, ``error``, ``unknown``.
.. _notification_events:
Notification Events
-------------------
The following table contains a list of all notification events in Proxmox
Backup server, their type, severity and additional metadata fields. ``type`` as
well as any other metadata field may be used in ``match-field`` match rules.
================================ ==================== ========== ==============================================================
Event ``type`` Severity Metadata fields (in addition to ``type``)
================================ ==================== ========== ==============================================================
ACME certificate renewal failed ``acme`` ``error`` ``hostname``
Garbage collection failure ``gc`` ``error`` ``datastore``, ``hostname``
Garbage collection success ``gc`` ``info`` ``datastore``, ``hostname``
Package updates available ``package-updates`` ``info`` ``hostname``
Prune job failure ``prune`` ``error`` ``datastore``, ``hostname``, ``job-id``
Prune job success ``prune`` ``info`` ``datastore``, ``hostname``, ``job-id``
Remote sync failure ``sync`` ``error`` ``datastore``, ``hostname``, ``job-id``
Remote sync success ``sync`` ``info`` ``datastore``, ``hostname``, ``job-id``
Tape backup job failure ``tape-backup`` ``error`` ``datastore``, ``hostname``, ``media-pool``, ``job-id``
Tape backup job success ``tape-backup`` ``info`` ``datastore``, ``hostname``, ``media-pool``, ``job-id``
Tape loading request ``tape-load`` ``notice`` ``hostname``
Verification job failure ``verification`` ``error`` ``datastore``, ``hostname``, ``job-id``
Verification job success ``verification`` ``info`` ``datastore``, ``hostname``, ``job-id``
================================ ==================== ========== ==============================================================
The following table contains a description of all use metadata fields. All of
these can be used in ``match-field`` match rules.
==================== ===================================
Metadata field Description
==================== ===================================
``datastore`` The name of the datastore
``hostname`` The hostname of the backup server
``job-id`` Job ID
``media-pool`` The name of the tape media pool
``type`` Notification event type
==================== ===================================
.. NOTE:: The daily task checking for any available system updates only sends
notifications if the node has an active subscription.
System Mail Forwarding
----------------------
Certain local system daemons, such as ``smartd``, send notification emails to
the local ``root`` user. Proxmox Backup Server will feed these mails into the
notification system as a notification of type ``system-mail`` and with severity
``unknown``.
When the email is forwarded to a sendmail target, the mail's content and
headers are forwarded as-is. For all other targets, the system tries to extract
both a subject line and the main text body from the email content. In instances
where emails solely consist of HTML content, they will be transformed into
plain text format during this process.
Permissions
-----------
In order to modify/view the configuration for notification targets, the
``Sys.Modify/Sys.Audit`` permissions are required for the
``/system/notifications`` ACL node.
.. _notification_mode:
Notification Mode
-----------------
Datastores and tape backup/restore job configuration have a
``notification-mode`` option which can have one of two values:
* ``legacy-sendmail``: Send notification emails via the system's ``sendmail``
command. The notification system will be bypassed and any configured
targets/matchers will be ignored. This mode is equivalent to the notification
behavior for version before Proxmox Backup Server 3.2.
* ``notification-system``: Use the new, flexible notification system.
If the ``notification-mode`` option is not set, Proxmox Backup Server will
default to ``legacy-sendmail``.
Starting with Proxmox Backup Server 3.2, a datastore created in the UI will
automatically opt in to the new notification system. If the datastore is
created via the API or the ``proxmox-backup-manager`` CLI, the
``notification-mode`` option has to be set explicitly to
``notification-system`` if the notification system shall be used.
The ``legacy-sendmail`` mode might be removed in a later release of
Proxmox Backup Server.
Settings for ``legacy-sendmail`` notification mode
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
If ``notification-mode`` is set to ``legacy-sendmail``, Proxmox Backup Server
will send notification emails via the system's ``sendmail`` command to the
email address configured for the user set in the ``notify-user`` option
(falling back to ``root@pam`` if not set).
For datastores, you can also change the level of notifications received per
task type via the ``notify`` option.
* Always: send a notification for any scheduled task, independent of the
outcome
* Errors: send a notification for any scheduled task that results in an error
* Never: do not send any notification at all
The ``notify-user`` and ``notify`` options are ignored if ``notification-mode``
is set to ``notification-system``.
Overriding Notification Templates
---------------------------------
Proxmox Backup Server uses Handlebars templates to render notifications. The
original templates provided by Proxmox Backup Server are stored in
``/usr/share/proxmox-backup/templates/default/``.
Notification templates can be overridden by providing a custom template file in
the override directory at
``/etc/proxmox-backup/notification-templates/default/``. When rendering a
notification of a given type, Proxmox Backup Server will first attempt to load
a template from the override directory. If this one does not exist or fails to
render, the original template will be used.
The template files follow the naming convention of
``<type>-<body|subject>.txt.hbs``. For instance, the file
``gc-err-body.txt.hbs`` contains the template for rendering notifications for
garbage collection errors, while ``package-updates-subject.txt.hbs`` is used to
render the subject line of notifications for available package updates.

View File

@ -70,7 +70,7 @@ and the md5sum, with the expected output below:
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This is the stable, recommended repository. It is available for
all Proxmox Backup subscription users. It contains the most stable packages,
all `Proxmox Backup`_ subscription users. It contains the most stable packages,
and is suitable for production use. The ``pbs-enterprise`` repository is
enabled by default:
@ -140,7 +140,7 @@ You can access this repository by adding the following line to
Proxmox Backup Client-only Repository
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
If you want to :ref:`use the Proxmox Backup Client <client_creating_backups>`
If you want to :ref:`use the the Proxmox Backup Client <client_creating_backups>`
on systems using a Linux distribution not based on Proxmox projects, you can
use the client-only repository.
@ -149,7 +149,7 @@ Currently there's only a client-repository for APT based systems.
.. _package_repositories_client_only_apt:
APT-based Proxmox Backup Client Repository
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
++++++++++++++++++++++++++++++++++++++++++
For modern Linux distributions using `apt` as package manager, like all Debian
and Ubuntu Derivative do, you may be able to use the APT-based repository.

View File

@ -1,16 +0,0 @@
:orphan:
=======
pbs2to3
=======
Description
===========
This tool will help you to detect common pitfalls and misconfiguration before,
and during the upgrade of a Proxmox Backup Server system. Any failures or
warnings must be addressed prior to the upgrade. If you suspect that a message
is a false positive, you have to make carefully sure that it really is.
.. include:: ../pbs-copyright.rst

View File

@ -1,5 +1,3 @@
:orphan:
===
pmt
===

View File

@ -1,5 +1,3 @@
:orphan:
==========================
pmtx
==========================

View File

@ -1,5 +1,3 @@
:orphan:
=====================
proxmox-backup-client
=====================

View File

@ -1,5 +1,3 @@
:orphan:
====================
proxmox-backup-debug
====================

View File

@ -1,5 +1,3 @@
:orphan:
==========================
proxmox-backup-manager
==========================

View File

@ -1,5 +1,3 @@
:orphan:
==========================
proxmox-backup-proxy
==========================

View File

@ -1,5 +1,3 @@
:orphan:
==========================
proxmox-backup
==========================

View File

@ -1,5 +1,3 @@
:orphan:
====================
proxmox-file-restore
====================

View File

@ -1,5 +1,3 @@
:orphan:
============
proxmox-tape
============

View File

@ -82,13 +82,13 @@ available:</p>
<dd>Keep the last <code class="docutils literal notranslate"><span class="pre">&lt;N&gt;</span></code> backup snapshots.</dd>
<dt><code class="docutils literal notranslate"><span class="pre">keep-hourly</span> <span class="pre">&lt;N&gt;</span></code></dt>
<dd>Keep backups for the last <code class="docutils literal notranslate"><span class="pre">&lt;N&gt;</span></code> hours. If there is more than one
backup for a single hour, only the latest is kept. Hours without backups do not count.</dd>
backup for a single hour, only the latest is kept.</dd>
<dt><code class="docutils literal notranslate"><span class="pre">keep-daily</span> <span class="pre">&lt;N&gt;</span></code></dt>
<dd>Keep backups for the last <code class="docutils literal notranslate"><span class="pre">&lt;N&gt;</span></code> days. If there is more than one
backup for a single day, only the latest is kept. Days without backups do not count.</dd>
backup for a single day, only the latest is kept.</dd>
<dt><code class="docutils literal notranslate"><span class="pre">keep-weekly</span> <span class="pre">&lt;N&gt;</span></code></dt>
<dd>Keep backups for the last <code class="docutils literal notranslate"><span class="pre">&lt;N&gt;</span></code> weeks. If there is more than one
backup for a single week, only the latest is kept. Weeks without backups do not count.
backup for a single week, only the latest is kept.
<div class="last admonition note">
<p class="note-title">Note:</p>
<p class="last">Weeks start on Monday and end on Sunday. The software
@ -98,10 +98,10 @@ the end of the year correctly.</p>
</dd>
<dt><code class="docutils literal notranslate"><span class="pre">keep-monthly</span> <span class="pre">&lt;N&gt;</span></code></dt>
<dd>Keep backups for the last <code class="docutils literal notranslate"><span class="pre">&lt;N&gt;</span></code> months. If there is more than one
backup for a single month, only the latest is kept. Months without backups do not count.</dd>
backup for a single month, only the latest is kept.</dd>
<dt><code class="docutils literal notranslate"><span class="pre">keep-yearly</span> <span class="pre">&lt;N&gt;</span></code></dt>
<dd>Keep backups for the last <code class="docutils literal notranslate"><span class="pre">&lt;N&gt;</span></code> years. If there is more than one
backup for a single year, only the latest is kept. Years without backups do not count.</dd>
backup for a single year, only the latest is kept.</dd>
</dl>
<p>The retention options are processed in the order given above. Each option
only covers backups within its time period. The next option does not take care

View File

@ -126,8 +126,7 @@ Ext.onReady(function() {
if (data.mark !== 'keep') {
return `<div style="text-decoration: line-through;">${text}</div>`;
}
let pruneList = this.up('prunesimulatorPruneList');
if (pruneList.useColors) {
if (me.useColors) {
let bgColor = COLORS[data.keepName];
let textColor = TEXT_COLORS[data.keepName];
return `<div style="background-color: ${bgColor};color: ${textColor};">${text}</div>`;
@ -354,17 +353,12 @@ Ext.onReady(function() {
specValues.forEach(function(value) {
if (value.includes('..')) {
let [start, end] = value.split('..');
let step = 1;
if (end.includes('/')) {
[end, step] = end.split('/');
step = assertValid(step);
}
start = assertValid(start);
end = assertValid(end);
if (start > end) {
throw "interval start is bigger then interval end '" + start + " > " + end + "'";
}
for (let i = start; i <= end; i += step) {
for (let i = start; i <= end; i++) {
matches[i] = 1;
}
} else if (value.includes('/')) {
@ -755,7 +749,7 @@ Ext.onReady(function() {
fieldLabel: 'End Time',
allowBlank: false,
format: 'H:i',
// can't bind value because ExtJS sets the year to 2008 to
// cant bind value because ExtJS sets the year to 2008 to
// protect against DST issues and date picker zeroes hour/minute
value: vm.get('now'),
listeners: {

View File

@ -3,8 +3,8 @@
`Proxmox VE`_ Integration
-------------------------
`Proxmox Backup`_ Server can be integrated into a Proxmox VE standalone or
cluster setup, by adding it as a storage in Proxmox VE.
Proxmox Backup Server can be integrated into a Proxmox VE standalone or cluster
setup, by adding it as a storage in Proxmox VE.
See also the `Proxmox VE Storage - Proxmox Backup Server
<https://pve.proxmox.com/pve-docs/pve-admin-guide.html#storage_pbs>`_ section

View File

@ -3,8 +3,8 @@
It is inspired by `casync file archive format
<http://0pointer.net/blog/casync-a-tool-for-distributing-file-system-images.html>`_,
which caters to a similar use-case.
The ``.pxar`` format is adapted to fulfill the specific needs of the
`Proxmox Backup`_ Server, for example, efficient storage of hard links.
The ``.pxar`` format is adapted to fulfill the specific needs of the Proxmox
Backup Server, for example, efficient storage of hard links.
The format is designed to reduce the required storage on the server by
achieving a high level of deduplication.

View File

@ -1,5 +1,3 @@
:orphan:
====
pxar
====

View File

@ -11,7 +11,7 @@ Disk Management
:align: right
:alt: List of disks
`Proxmox Backup`_ Server comes with a set of disk utilities, which are
Proxmox Backup Server comes with a set of disk utilities, which are
accessed using the ``disk`` subcommand or the web interface. This subcommand
allows you to initialize disks, create various filesystems, and get information
about the disks.
@ -48,12 +48,12 @@ You can create an ``ext4`` or ``xfs`` filesystem on a disk using ``fs
create``, or by navigating to **Administration -> Storage/Disks -> Directory**
in the web interface and creating one from there. The following command creates
an ``ext4`` filesystem and passes the ``--add-datastore`` parameter, in order to
automatically create a datastore on the disk. This will
automatically create a datastore on the disk (in this case ``sdd``). This will
create a datastore at the location ``/mnt/datastore/store1``:
.. code-block:: console
# proxmox-backup-manager disk fs create store1 --disk sdX --filesystem ext4 --add-datastore true
# proxmox-backup-manager disk fs create store1 --disk sdd --filesystem ext4 --add-datastore true
.. image:: images/screenshots/pbs-gui-disks-zfs-create.png
:align: right
@ -61,12 +61,12 @@ create a datastore at the location ``/mnt/datastore/store1``:
You can also create a ``zpool`` with various raid levels from **Administration
-> Storage/Disks -> ZFS** in the web interface, or by using ``zpool create``. The command
below creates a mirrored ``zpool`` using two disks and
below creates a mirrored ``zpool`` using two disks (``sdb`` & ``sdc``) and
mounts it under ``/mnt/datastore/zpool1``:
.. code-block:: console
# proxmox-backup-manager disk zpool create zpool1 --devices sdX,sdY --raidlevel mirror
# proxmox-backup-manager disk zpool create zpool1 --devices sdb,sdc --raidlevel mirror
.. note:: You can also pass the ``--add-datastore`` parameter here, to automatically
create a datastore from the disk.
@ -165,74 +165,6 @@ following command creates a new datastore called ``store1`` on
# proxmox-backup-manager datastore create store1 /backup/disk1/store1
Removable Datastores
^^^^^^^^^^^^^^^^^^^^
Removable datastores have a ``backing-device`` associated with them, they can be
mounted and unmounted. Other than that they behave the same way a normal datastore
would.
They can be created on already correctly formatted partitions, which should be
either ``ext4`` or ``xfs`` as with normal datastores, but most modern file
systems supported by the Proxmox Linux kernel should work.
.. note:: FAT-based file systems do not support the POSIX file ownership
concept and have relatively low limits on the number of files per directory.
Therefore, creating a datastore is not supported on FAT file systems.
Because some external drives are preformatted with such a FAT-based file
system, you may need to reformat the drive before you can use it as a
backing-device for a removable datastore.
It is also possible to create them on completely unused disks through
"Administration" > "Disks / Storage" > "Directory", using this method the disk will
be partitioned and formatted automatically for the datastore.
Devices with only one datastore on them will be mounted automatically. Unmounting has
to be done through the UI by clicking "Unmount" on the summary page or using the CLI.
If unmounting fails, the reason is logged in the unmount task log, and the
datastore will stay in maintenance mode ``unmounting``, which prevents any IO
operations. In such cases, the maintenance mode has to be reset manually using:
.. code-block:: console
# proxmox-backup-manager datastore update --maintenance-mode offline
to prevent any IO, or to clear it use:
.. code-block:: console
# proxmox-backup-manager datastore update --delete maintenance-mode
A single device can house multiple datastores, they only limitation is that they are not
allowed to be nested.
Removable datastores are created on the the device with the given relative path that is specified
on creation. In order to use a datastore on multiple PBS instances, it has to be created on one,
and added with ``Reuse existing datastore`` checked on the others. The path you set on creation
is how multiple datastores on a single device are identified. So when adding on a new PBS instance,
it has to match what was set on creation.
.. code-block:: console
# proxmox-backup-manager datastore unmount store1
both will wait for any running tasks to finish and unmount the device.
All removable datastores are mounted under /mnt/datastore/<name>, and the specified path
refers to the path on the device.
All datastores present on a device can be listed using ``proxmox-backup-debug``.
.. code-block:: console
# proxmox-backup-debug inspect device /dev/...
Verify, Prune and Garbage Collection jobs are skipped if the removable
datastore is not mounted when they are scheduled. Sync jobs start, but fail
with an error saying the datastore was not mounted. The reason is that syncs
not happening as scheduled should at least be noticeable.
Managing Datastores
^^^^^^^^^^^^^^^^^^^
@ -331,7 +263,7 @@ categorized by checksum, after a backup operation has been executed.
Once you've uploaded some backups or created namespaces, you may see the backup
type (`ct`, `vm`, `host`) and the start of the namespace hierarchy (`ns`).
type (`ct`, `vm`, `host`) and the start of the namespace hierachy (`ns`).
.. _storage_namespaces:
@ -382,7 +314,7 @@ Options
There are a few per-datastore options:
* :ref:`Notification mode and legacy notification settings <notification_mode>`
* :ref:`Notifications <maintenance_notification>`
* :ref:`Maintenance Mode <maintenance_mode>`
* Verification of incoming backups
@ -403,11 +335,11 @@ There are some tuning related options for the datastore that are more advanced:
index file (.fidx/.didx). While this might slow down iterating on many slow
storages, on very fast ones (for example: NVMEs) the collecting and sorting
can take more time than gained through the sorted iterating.
This option can be set with:
This option can be set with:
.. code-block:: console
.. code-block:: console
# proxmox-backup-manager datastore update <storename> --tuning 'chunk-order=none'
# proxmox-backup-manager datastore update <storename> --tuning 'chunk-order=none'
* ``sync-level``: Datastore fsync level:
@ -435,28 +367,9 @@ There are some tuning related options for the datastore that are more advanced:
This can be set with:
.. code-block:: console
.. code-block:: console
# proxmox-backup-manager datastore update <storename> --tuning 'sync-level=filesystem'
* ``gc-atime-safety-check``: Datastore GC atime update safety check:
You can explicitly `enable` or `disable` the atime update safety check
performed on datastore creation and garbage collection. This checks if atime
updates are handled as expected by garbage collection and therefore avoids the
risk of data loss by unexpected filesystem behavior. It is recommended to set
this to enabled, which is also the default value.
* ``gc-atime-cutoff``: Datastore GC atime cutoff for chunk cleanup:
This allows to set the cutoff for which a chunk is still considered in-use
during phase 2 of garbage collection (given no older writers). If the
``atime`` of the chunk is outside the range, it will be removed.
* ``gc-cache-capacity``: Datastore GC least recently used cache capacity:
Allows to control the cache capacity used to keep track of chunks for which
the access time has already been updated during phase 1 of garbage collection.
This avoids multiple updates and increases GC runtime performance. Higher
values can reduce GC runtime at the cost of increase memory usage, setting the
value to 0 disables caching.
# proxmox-backup-manager datastore update <storename> --tuning 'sync-level=filesystem'
If you want to set multiple tuning options simultaneously, you can separate them
with a comma, like this:
@ -506,7 +419,7 @@ remote-source to avoid that an attacker that took over the source can cause
deletions of backups on the target hosts.
If the source-host became victim of a ransomware attack, there is a good chance
that sync jobs will fail, triggering an :ref:`error notification
<Notification Events>`.
<maintenance_notification>`.
It is also possible to create :ref:`tape backups <tape_backup>` as a second
storage medium. This way, you get an additional copy of your data on a

View File

@ -9,7 +9,7 @@ Debian packages, and that the base system is well documented. The `Debian
Administrator's Handbook`_ is available online, and provides a
comprehensive introduction to the Debian operating system.
A standard Proxmox Backup installation uses the default
A standard `Proxmox Backup`_ installation uses the default
repositories from Debian, so you get bug fixes and security updates
through that channel. In addition, we provide our own package
repository to roll out all Proxmox related packages. This includes
@ -19,8 +19,8 @@ We also deliver a specially optimized Linux kernel, based on the Ubuntu
kernel. This kernel includes drivers for ZFS_.
The following sections will concentrate on backup related topics. They
will explain things which are different on Proxmox Backup, or
tasks which are commonly used on Proxmox Backup. For other topics,
will explain things which are different on `Proxmox Backup`_, or
tasks which are commonly used on `Proxmox Backup`_. For other topics,
please refer to the standard Debian documentation.
@ -30,8 +30,6 @@ please refer to the standard Debian documentation.
.. include:: certificate-management.rst
.. include:: external-metric-server.rst
.. include:: services.rst
.. include:: command-line-tools.rst

View File

@ -8,9 +8,8 @@ Host Bootloader
selected in the installer.
For EFI Systems installed with ZFS as the root filesystem ``systemd-boot`` is
used, unless Secure Boot is enabled. All other deployments use the standard
``grub`` bootloader (this usually also applies to systems which are installed
on top of Debian).
used. All other deployments use the standard ``grub`` bootloader (this usually
also applies to systems which are installed on top of Debian).
.. _systembooting-installer-part-scheme:
@ -18,7 +17,7 @@ on top of Debian).
Partitioning Scheme Used by the Installer
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The Proxmox Backup installer creates 3 partitions on all disks selected for
The `Proxmox Backup`_ installer creates 3 partitions on all disks selected for
installation.
The created partitions are:
@ -31,10 +30,9 @@ The created partitions are:
remaining space available for the chosen storage type
Systems using ZFS as a root filesystem are booted with a kernel and initrd image
stored on the 512 MB EFI System Partition. For legacy BIOS systems, and EFI
systems with Secure Boot enabled, ``grub`` is used, for EFI systems without
Secure Boot, ``systemd-boot`` is used. Both are installed and configured to
point to the ESPs.
stored on the 512 MB EFI System Partition. For legacy BIOS systems, ``grub`` is
used, for EFI systems ``systemd-boot`` is used. Both are installed and configured
to point to the ESPs.
``grub`` in BIOS mode (``--target i386-pc``) is installed onto the BIOS Boot
Partition of all selected disks on all systems booted with ``grub`` (that is,
@ -86,10 +84,10 @@ Setting up a New Partition for use as Synced ESP
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
To format and initialize a partition as synced ESP, for example, after replacing a
failed vdev in an rpool, ``proxmox-boot-tool`` from ``proxmox-kernel-helper`` can be used.
failed vdev in an rpool, ``proxmox-boot-tool`` from ``pve-kernel-helper`` can be used.
.. WARNING:: the ``format`` command will format the ``<partition>``. Make sure
to pass in the right device/partition!
WARNING: the ``format`` command will format the ``<partition>``. Make sure to pass
in the right device/partition!
For example, to format an empty partition ``/dev/sda2`` as ESP, run the following:
@ -98,21 +96,12 @@ For example, to format an empty partition ``/dev/sda2`` as ESP, run the followin
# proxmox-boot-tool format /dev/sda2
To setup an existing, unmounted ESP located on ``/dev/sda2`` for inclusion in
Proxmox Backup's kernel update synchronization mechanism, use the following:
`Proxmox Backup`_'s kernel update synchronization mechanism, use the following:
.. code-block:: console
# proxmox-boot-tool init /dev/sda2
or
.. code-block:: console
# proxmox-boot-tool init /dev/sda2 grub
to force initialization with Grub instead of systemd-boot, for example for
Secure Boot support.
Following this, `/etc/kernel/proxmox-boot-uuids`` should contain a new line with the
UUID of the newly added partition. The ``init`` command will also automatically
trigger a refresh of all configured ESPs.
@ -198,7 +187,7 @@ Determine which Bootloader is Used
:alt: Grub boot screen
The simplest and most reliable way to determine which bootloader is used, is to
watch the boot process of the Proxmox Backup node.
watch the boot process of the `Proxmox Backup`_ node.
You will either see the blue box of ``grub`` or the simple black on white
@ -254,8 +243,6 @@ and is quite well documented
(see the `Grub Manual
<https://www.gnu.org/software/grub/manual/grub/grub.html>`_).
.. _systembooting-grub-config:
Configuration
^^^^^^^^^^^^^
@ -278,8 +265,8 @@ Systemd-boot
``systemd-boot`` is a lightweight EFI bootloader. It reads the kernel and initrd
images directly from the EFI Service Partition (ESP) where it is installed.
The main advantage of directly loading the kernel from the ESP is that it does
not need to reimplement the drivers for accessing the storage. In Proxmox
Backup, :ref:`proxmox-boot-tool <systembooting-proxmox-boot-tool>` is used to
not need to reimplement the drivers for accessing the storage. In `Proxmox
Backup`_, :ref:`proxmox-boot-tool <systembooting-proxmox-boot-tool>` is used to
keep the configuration on the ESPs synchronized.
.. _systembooting-systemd-boot-config:
@ -313,8 +300,6 @@ Editing the Kernel Commandline
You can modify the kernel commandline in the following places, depending on the
bootloader used:
.. _systembooting-kernel-cmdline-grub:
Grub
^^^^
@ -323,8 +308,6 @@ The kernel commandline needs to be placed in the variable
``update-grub`` appends its content to all ``linux`` entries in
``/boot/grub/grub.cfg``.
.. _systembooting-kernel-cmdline-systemd-boot:
systemd-boot
^^^^^^^^^^^^
@ -359,7 +342,7 @@ would run:
# proxmox-boot-tool kernel pin 5.15.30-1-pve
.. TIP:: The pinning functionality works for all Proxmox Backup systems, not only those using
.. TIP:: The pinning functionality works for all `Proxmox Backup`_ systems, not only those using
``proxmox-boot-tool`` to synchronize the contents of the ESPs, if your system
does not use ``proxmox-boot-tool`` for synchronizing, you can also skip the
``proxmox-boot-tool refresh`` call in the end.
@ -392,188 +375,3 @@ content and configuration on the ESPs by running the ``refresh`` subcommand.
.. code-block:: console
# proxmox-boot-tool refresh
.. _systembooting-secure-boot:
Secure Boot
~~~~~~~~~~~
Since Proxmox Backup 3.1, Secure Boot is supported out of the box via signed
packages and integration in ``proxmox-boot-tool``.
The following packages need to be installed for Secure Boot to be enabled:
* ``shim-signed`` (shim bootloader signed by Microsoft)
* ``shim-helpers-amd64-signed`` (fallback bootloader and MOKManager, signed by Proxmox)
* ``grub-efi-amd64-signed`` (Grub EFI bootloader, signed by Proxmox)
* ``proxmox-kernel-6.X.Y-Z-pve-signed`` (Kernel image, signed by Proxmox)
Only Grub as bootloader is supported out of the box, since there are no other
pre-signed bootloader packages available. Any new installation of Proxmox Backup
will automatically have all of the above packages included.
More details about how Secure Boot works, and how to customize the setup, are
available in `our wiki <https://pve.proxmox.com/wiki/Secure_Boot_Setup>`_.
.. _systembooting-secure-boot-existing-installation:
Switching an Existing Installation to Secure Boot
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. WARNING:: This can lead to an unbootable installation in some cases if not
done correctly. Reinstalling the host will setup Secure Boot automatically if
available, without any extra interactions. **Make sure you have a working and
well-tested backup of your Proxmox Backup host!**
An existing UEFI installation can be switched over to Secure Boot if desired,
without having to reinstall Proxmox Backup from scratch.
First, ensure all your system is up-to-date. Next, install all the required
pre-signed packages as listed above. Grub automatically creates the needed EFI
boot entry for booting via the default shim.
.. _systembooting-secure-boot-existing-systemd-boot:
**systemd-boot**
""""""""""""""""
If ``systemd-boot`` is used as a bootloader (see
:ref:`Determine which Bootloader is used <systembooting-determine-bootloader>`),
some additional setup is needed. This is only the case if Proxmox Backup was
installed with ZFS-on-root.
To check the latter, run:
.. code-block:: console
# findmnt /
If the host is indeed using ZFS as root filesystem, the ``FSTYPE`` column should
contain ``zfs``:
.. code-block:: console
TARGET SOURCE FSTYPE OPTIONS
/ rpool/ROOT/pbs-1 zfs rw,relatime,xattr,noacl
Next, a suitable potential ESP (EFI system partition) must be found. This can be
done using the ``lsblk`` command as following:
.. code-block:: console
# lsblk -o +FSTYPE
The output should look something like this:
.. code-block:: console
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINTS FSTYPE
sda 8:0 0 32G 0 disk
├─sda1 8:1 0 1007K 0 part
├─sda2 8:2 0 512M 0 part vfat
└─sda3 8:3 0 31.5G 0 part zfs_member
sdb 8:16 0 32G 0 disk
├─sdb1 8:17 0 1007K 0 part
├─sdb2 8:18 0 512M 0 part vfat
└─sdb3 8:19 0 31.5G 0 part zfs_member
In this case, the partitions ``sda2`` and ``sdb2`` are the targets. They can be
identified by the their size of 512M and their ``FSTYPE`` being ``vfat``, in
this case on a ZFS RAID-1 installation.
These partitions must be properly set up for booting through Grub using
``proxmox-boot-tool``. This command (using ``sda2`` as an example) must be run
separately for each individual ESP:
.. code-block:: console
# proxmox-boot-tool init /dev/sda2 grub
Afterwards, you can sanity-check the setup by running the following command:
.. code-block:: console
# efibootmgr -v
This list should contain an entry looking similar to this:
.. code-block:: console
[..]
Boot0009* proxmox HD(2,GPT,..,0x800,0x100000)/File(\EFI\proxmox\shimx64.efi)
[..]
.. NOTE:: The old ``systemd-boot`` bootloader will be kept, but Grub will be
preferred. This way, if booting using Grub in Secure Boot mode does not work
for any reason, the system can still be booted using ``systemd-boot`` with
Secure Boot turned off.
Now the host can be rebooted and Secure Boot enabled in the UEFI firmware setup
utility.
On reboot, a new entry named ``proxmox`` should be selectable in the UEFI
firmware boot menu, which boots using the pre-signed EFI shim.
If, for any reason, no ``proxmox`` entry can be found in the UEFI boot menu, you
can try adding it manually (if supported by the firmware), by adding the file
``\EFI\proxmox\shimx64.efi`` as a custom boot entry.
.. NOTE:: Some UEFI firmwares are known to drop the ``proxmox`` boot option on
reboot. This can happen if the ``proxmox`` boot entry is pointing to a Grub
installation on a disk, where the disk itself is not a boot option. If
possible, try adding the disk as a boot option in the UEFI firmware setup
utility and run ``proxmox-boot-tool`` again.
.. TIP:: To enroll custom keys, see the accompanying `Secure Boot wiki page
<https://pve.proxmox.com/wiki/Secure_Boot_Setup#Setup_instructions_for_db_key_variant>`_.
.. _systembooting-secure-boot-other-modules:
Using DKMS/Third Party Modules With Secure Boot
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
On systems with Secure Boot enabled, the kernel will refuse to load modules
which are not signed by a trusted key. The default set of modules shipped with
the kernel packages is signed with an ephemeral key embedded in the kernel
image which is trusted by that specific version of the kernel image.
In order to load other modules, such as those built with DKMS or manually, they
need to be signed with a key trusted by the Secure Boot stack. The easiest way
to achieve this is to enroll them as Machine Owner Key (``MOK``) with
``mokutil``.
The ``dkms`` tool will automatically generate a keypair and certificate in
``/var/lib/dkms/mok.key`` and ``/var/lib/dkms/mok.pub`` and use it for signing
the kernel modules it builds and installs.
You can view the certificate contents with
.. code-block:: console
# openssl x509 -in /var/lib/dkms/mok.pub -noout -text
and enroll it on your system using the following command:
.. code-block:: console
# mokutil --import /var/lib/dkms/mok.pub
input password:
input password again:
The ``mokutil`` command will ask for a (temporary) password twice, this password
needs to be entered one more time in the next step of the process! Rebooting
the system should automatically boot into the ``MOKManager`` EFI binary, which
allows you to verify the key/certificate and confirm the enrollment using the
password selected when starting the enrollment using ``mokutil``. Afterwards,
the kernel should allow loading modules built with DKMS (which are signed with
the enrolled ``MOK``). The ``MOK`` can also be used to sign custom EFI binaries
and kernel images if desired.
The same procedure can also be used for custom/third-party modules not managed
with DKMS, but the key/certificate generation and signing steps need to be done
manually in that case.

View File

@ -6,8 +6,6 @@ production. To further decrease the impact of a failed host, you can set up
periodic, efficient, incremental :ref:`datastore synchronization <syncjobs>`
from other Proxmox Backup Server instances.
.. _minimum_system_requirements:
Minimum Server Requirements, for Evaluation
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@ -40,8 +38,7 @@ Recommended Server System Requirements
* Backup storage:
* Prefer fast storage that delivers high IOPS for random IO workloads; use
only enterprise SSDs for best results.
* Use only SSDs, for best results
* If HDDs are used: Using a metadata cache is highly recommended, for example,
add a ZFS :ref:`special device mirror <local_zfs_special_device>`.

View File

@ -61,12 +61,11 @@ In general, LTO tapes offer the following advantages:
Note that `Proxmox Backup Server` already stores compressed data, so using the
tape compression feature has no advantage.
.. _tape-supported-hardware:
Supported Hardware
------------------
`Proxmox Backup`_ Server supports `Linear Tape-Open`_ generation 5 (LTO-5)
Proxmox Backup Server supports `Linear Tape-Open`_ generation 5 (LTO-5)
or later and has best-effort support for generation 4 (LTO-4). While
many LTO-4 systems are known to work, some might need firmware updates or
do not implement necessary features to work with Proxmox Backup Server.
@ -99,31 +98,6 @@ so it takes 33 hours to read the 12TB needed to fill up an LTO-8 tape. If you wa
to write to your tape at full speed, please make sure that the source
datastore is able to deliver that performance (for example, by using SSDs).
LTO-9+ considerations
~~~~~~~~~~~~~~~~~~~~~
Since LTO-9, it is necessary to initialize new media in your drives, this is
called `Media Optimization`. This usually takes between 40 and 120 minutes per
medium. It is recommended to initialize your media in this manner with the
tools provided by your hardware vendor of your drive or changer. Some tape
changers have a method to 'bulk' initialize your media.
Because of this, formatting tapes is handled differently in Proxmox Backup
Server to avoid re-optimizing on each format/labelling. If you want to format
your media for use with the Proxmox Backup Server the first time or after use
with another program, either use the functionality of your drive/changer, or
use the 'slow' format on the cli:
.. code-block:: console
# proxmox-tape format --drive your-drive --fast 0
This will completely remove all pre-existing data and trigger a `Media
Optimization` pass.
If you format a partitioned LTO-9 medium with the 'fast' method (the default or
by setting `--fast 1`), only the first partition will be formatted, so make
sure to use the 'slow' method.
Terminology
-----------
@ -352,25 +326,6 @@ the status output:
│ slot │ 14 │ │ │
└───────────────┴──────────┴────────────┴─────────────┘
Advanced options
^^^^^^^^^^^^^^^^
Since not all tape changer behave the same, there is sometimes the need
for configuring advanced options.
Currently there are the following:
* `eject-before-unload` : This is needed for some changers that require a tape
to be ejected before unloading from the drive.
You can set these options with `proxmox-tape` like this:
.. code-block:: console
# proxmox-tape changer update sl3 --eject-before-unload true
.. _tape_drive_config:
Tape drives
@ -560,6 +515,8 @@ a single media pool, so a job only uses tapes from that pool.
- Create a new set when the specified Calendar Event triggers.
.. _systemd.time manpage: https://manpages.debian.org/buster/systemd/systemd.time.7.en.html
This allows you to specify points in time by using systemd like
Calendar Event specifications (see `systemd.time manpage`_).
@ -610,7 +567,7 @@ a single media pool, so a job only uses tapes from that pool.
will be double encrypted.
The password protected key is stored on each medium, so that it is
possible to `restore the key <tape_restore_encryption_key_>`_ using
possbible to `restore the key <tape_restore_encryption_key_>`_ using
the password. Please make sure to remember the password, in case
you need to restore the key.
@ -707,16 +664,16 @@ dust protection than inside a drive:
.. Note:: For failed jobs, the tape remains in the drive.
For tape libraries, the ``export-media-set`` option moves all tapes from
For tape libraries, the ``export-media`` option moves all tapes from
the media set to an export slot, making sure that the following backup
cannot use the tapes. An operator can pick up those tapes and move them
to a vault.
.. code-block:: console
# proxmox-tape backup-job update job2 --export-media-set
# proxmox-tape backup-job update job2 --export-media
.. Note:: The ``export-media-set`` option can be used to force the start
.. Note:: The ``export-media`` option can be used to force the start
of a new media set, because tapes from the current set are no
longer online.
@ -970,8 +927,6 @@ You can restore from a tape even without an existing catalog, but only the
whole media set. If you do this, the catalog will be automatically created.
.. _tape_key_management:
Encryption Key Management
~~~~~~~~~~~~~~~~~~~~~~~~~
@ -1043,7 +998,7 @@ into the drive. Then run:
.. code-block:: console
# proxmox-tape key restore
Tape Encryption Key Password: ***********
Tepe Encryption Key Password: ***********
If the password is correct, the key will get imported to the
database. Further restore jobs automatically use any available key.
@ -1183,159 +1138,3 @@ In combination with fitting prune settings and tape backup schedules, this
achieves long-term storage of some backups, while keeping the recent
backups on smaller media sets that expire roughly every 4 weeks (that is, three
plus the current week).
Disaster Recovery
-----------------
.. _Command-line Tools: command-line-tools.html
In case of major disasters, important data, or even whole servers might be
destroyed or at least damaged up to the point where everything - sometimes
including the backup server - has to be restored from a backup. For such cases,
the following step-by-step guide will help you to set up the Proxmox Backup
Server and restore everything from tape backups.
The following guide will explain the necessary steps using both the web GUI and
the command line tools. For an overview of the command line tools, see
`Command-line Tools`_.
Setting Up a Datastore
~~~~~~~~~~~~~~~~~~~~~~
.. _proxmox-backup-manager: proxmox-backup-manager/man1.html
.. _Installation: installation.html
After you set up a new Proxmox Backup Server, as outlined in the `Installation`_
chapter, first set up a datastore so a tape can be restored to it:
#. Go to **Administration -> Storage / Disks** and make sure that the disk that
will be used as a datastore shows up.
#. Under the **Directory** or **ZFS** tabs, you can either choose to create a
directory or create a ZFS ``zpool``, respectively. Here you can also directly
add the newly created directory or ZFS ``zpool`` as a datastore.
Alternatively, the `proxmox-backup-manager`_ can be used to perform the same
tasks. For more information, check the :ref:`datastore_intro` documentation.
Setting Up the Tape Drive
~~~~~~~~~~~~~~~~~~~~~~~~~
#. Make sure you have a properly working tape drive and/or changer matching to
medium you want to restore from.
#. Connect the tape changer(s) and the tape drive(s) to the backup server. These
should be detected automatically by Linux. You can get a list of available
drives using:
.. code-block:: console
# proxmox-tape drive scan
┌────────────────────────────────┬────────┬─────────────┬────────┐
│ path │ vendor │ model │ serial │
╞════════════════════════════════╪════════╪═════════════╪════════╡
│ /dev/tape/by-id/scsi-12345-sg │ IBM │ ULT3580-TD4 │ 12345 │
└────────────────────────────────┴────────┴─────────────┴────────┘
You can get a list of available changers with:
.. code-block:: console
# proxmox-tape changer scan
┌─────────────────────────────┬─────────┬──────────────┬────────┐
│ path │ vendor │ model │ serial │
╞═════════════════════════════╪═════════╪══════════════╪════════╡
│ /dev/tape/by-id/scsi-CC2C52 │ Quantum │ Superloader3 │ CC2C52 │
└─────────────────────────────┴─────────┴──────────────┴────────┘
For more information, please read the chapters
on :ref:`tape_changer_config` and :ref:`tape_drive_config`.
#. If you have a tape changer, go to the web interface of the Proxmox Backup
Server, go to **Tape Backup -> Changers** and add it. For examples using the
command line, read the chapter on :ref:`tape_changer_config`. If the changer
has been detected correctly by Linux, the changer should show up in the list.
#. In the web interface, go to **Tape Backup -> Drives** and add the tape drive
that will be used to read the tapes. For examples using the command line,
read the chapter on :ref:`tape_drive_config`. If the tape drive has been
detected correctly by Linux, the drive should show up in the list. If the
drive also has a tape changer, make sure to select the changer as well and
assign it the correct drive number.
Restoring Data From the Tape
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. _proxmox-tape: proxmox-tape/man1.html
.. _proxmox-backup-client: proxmox-backup-client/man1.html
.. _Restore: https://pve.proxmox.com/pve-docs/chapter-vzdump.html#vzdump_restore
The following guide will explain the steps necessary to restore data from a
tape, which can be done over either the web GUI or the command line. For details
on the command line, read the documentation on the `proxmox-tape`_ tool.
To restore data from tapes, do the following:
#. Insert the first tape (as displayed on the label) into the tape drive or, if
a tape changer is available, use the tape changer to insert the tape into the
right drive. The web GUI can also be used to load or transfer tapes between
tape drives by selecting the changer.
#. If the backup has been encrypted, the encryption keys need to be restored as
well. In the **Encryption Keys** tab, press **Restore Key**. For more
details or examples that use the command line, read the
:ref:`tape_key_management` chapter.
#. The procedure for restoring data is slightly different depending on whether
you are using a standalone tape drive or a changer:
* For changers, the procedure is simple:
#. Insert all tapes from the media set you want to restore from.
#. Click on the changer in the web GUI, click **Inventory**, make sure
**Restore Catalog** is selected and press OK.
* For standalone drives, the procedure would be:
#. Insert the first tape of the media set.
#. Click **Catalog**.
#. Eject the tape, then repeat the steps for the remaining tapes of the
media set.
#. Go back to **Tape Backup**. In the **Content** tab, press **Restore** and
select the desired media set. Choose the snapshot you want to restore, press
**Next**, select the drive and target datastore and press **Restore**.
#. By going to the datastore where the data has been restored, under the
**Content** tab you should be able to see the restored snapshots. In order to
access the backups from another machine, you will need to configure the
access to the backup server. Go to **Configuration -> Access Control** and
either create a new user, or a new API token (API tokens allow easy
revocation if the token is compromised). Under **Permissions**, add the
desired permissions, e.g. **DatastoreBackup**.
#. You can now perform virtual machine, container or file restores. You now have
the following options:
* If you want to restore files on Linux distributions that are not based on
Proxmox products or you prefer using a command line tool, you can use the
`proxmox-backup-client`_, as explained in the
:ref:`client_restoring_data` chapter. Use the newly created API token to
be able to access the data. You can then restore individual files or
mount an archive to your system.
* If you want to restore virtual machines or containers on a Proxmox VE
server, add the datastore of the backup server as storage and go to
**Backups**. Here you can restore VMs and containers, including their
configuration. For more information on restoring backups in Proxmox VE,
visit the `Restore`_ chapter of the Proxmox VE documentation.

View File

@ -28,9 +28,6 @@ which are not chunked, e.g. the client log), or one or more indexes
When uploading an index, the client first has to read the source data, chunk it
and send the data as chunks with their identifying checksum to the server.
When using the :ref:`change detection mode <change_detection_mode>` payload
chunks for unchanged files are reused from the previous snapshot, thereby not
reading the source data again.
If there is a previous Snapshot in the backup group, the client can first
download the chunk list of the previous Snapshot. If it detects a chunk that
@ -56,9 +53,8 @@ The chunks of a datastore are found in
<datastore-root>/.chunks/
This chunk directory is further subdivided into directories grouping chunks by
their checksums 2 byte prefix (given as 4 hexadecimal digits), so a chunk with
the checksum
This chunk directory is further subdivided by the first four bytes of the
chunk's checksum, so a chunk with the checksum
a342e8151cbf439ce65f3df696b54c67a114982cc0aa751f2852c2f7acc19a8b
@ -134,141 +130,6 @@ This is done to speed up the client part of the backup, since it only needs to
encrypt chunks that are actually getting uploaded. Chunks that exist already in
the previous backup, do not need to be encrypted and uploaded.
Change Detection Mode for File-Based Backups
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The change detection mode controls how to detect and act for files which did not
change in-between subsequent backup runs as well as the archive file format used
to encode the directory entries.
There are 3 modes available, the current default ``legacy`` mode, as well as the
``data`` and ``metadata`` mode. While the ``legacy`` mode encodes all contents
in a single ``pxar`` archive, the latter two modes split data and metadata into
``ppxar`` and ``mpxar`` archives. This is done to allow for fast comparison of
metadata with the previous snapshot, used by the ``metadata`` mode to detect
reusable files. The ``data`` mode refrains from reusing unchanged files by
rechunking the file unconditionally. This mode therefore assures that no file
changes are missed even if the metadata are unchanged.
.. NOTE:: ``pxar`` and ``mpxar``/``ppxar`` file formats are different and cannot
be deduplicated as efficiently if a datastore stores archive snapshots of
both types.
As the change detection modes are client side changes, they are backwards
compatible with older versions of Proxmox Backup Server. Exploring the backup
contents for the new archive format via the web interface requires however a
Proxmox Backup Server with version 3.2.5 or higher. Upgrading to the latest
version is recommended for full feature compatibility.
.. _change-detection-mode-legacy:
Legacy Mode
+++++++++++
Backup snapshots of filesystems are created by recursively scanning the
directory entries. All entries to be included in the snapshot are read and
serialized by encoding them using the ``pxar``
:ref:`archive format <pxar-format>`. The resulting stream is chunked into
:ref:`dynamically sized chunks <dynamically-sized-chunks>` and uploaded to the
Proxmox Backup Server, deduplicating chunks based on their content digest for
space efficient storage.
File contents are read and chunked unconditionally, no check is performed to
detect unchanged files.
.. _change-detection-mode-data:
Data Mode
+++++++++
Like for ``legacy`` mode file contents are read and chunked unconditionally, no
check is performed to detect unchanged files.
However, in contrast to ``legacy`` mode, which stores entries metadata and data
in a single self-contained ``pxar`` archive, the ``data`` mode encodes metadata
and file contents into two separate streams. The resulting backup snapshots
therefore contain split archives, an archive in ``mpxar``
:ref:`format <pxar-meta-format>` containing the entries metadata and an archive
with ``ppxar`` :ref:`format <ppxar-format>` , containing the actual file
contents, separated by payload headers for consistency checks. The metadata
archive stores a reference offset to the corresponding payload archive entry so
the file contents can be accessed. Both of these archives are chunked and
uploaded by the Proxmox backup client, resulting in separated indices and
independent chunks.
The ``mpxar`` archive can be used to efficiently fetch the associated metadata
for archive entries without the overhead of payload data stored within the same
chunks. This is used for example for entry lookups to list the archive contents
or to navigate the mounted filesystem via the FUSE implementation. No dedicated
catalog is therefore created for archives encoded using this mode.
By not comparing metadata to the previous backup snapshot, no files will be
considered reusable by this mode, in contrast to the ``metadata`` mode.
Latter can reuse files which have changed, but file size and mtime did not
change because restored after changing the files contents.
.. _change-detection-mode-metadata:
Metadata Mode
+++++++++++++
The ``metadata`` mode detects files whose file metadata did not change
in-between subsequent backup runs. The metadata comparison includes file size,
file type, ownership and permission information, as well as acls and attributes
and most importantly the file's mtime, for details see the
:ref:`pxar metadata archive format <pxar-meta-format>`. Files ctime and inode
number are not stored and used for comparison, since some tools (e.g.
``vzdump``) might sync the contents of the filesystem to a temporary location
before actually performing the backup via the Proxmox backup client. For these
cases, ctime and inode number will always change.
This mode will avoid reading and rechunking the file contents whenever possible
by reusing the file content chunks of unchanged files from the previous backup
snapshot.
To compare the metadata, the previous snapshots ``mpxar`` metadata archive is
downloaded at the start of the backup run and used as a reference. Further, the
index of the payload archive ``ppxar`` is fetched and used to lookup the file
content chunk's digests, which will be used to reindex pre-existing chunks
without the need to reread and rechunk the file contents.
During backup, the metadata and payload archives are encoded in the same manner
as for the ``data`` mode, but for the ``metadata`` mode each entry is
additionally looked up in the metadata reference archive for comparison first.
If the file did not change as compared to the reference, the file is considered
as unchanged and the Proxmox backup client enters a look-ahead caching mode. In
this mode, the client will keep reading and comparing then following entries in
the filesystem as long as they are reusable. Further, it keeps track of the
payload archive offset range these file contents are stored in. The additional
look-ahead caching is needed, as file boundaries are not required to be aligned
with chunk boundaries, therefore reused chunks can contain possibly wasted chunk
content (also called padding) if reused unconditionally.
The look-ahead cache will greedily cache all unchanged entries up to the point
where either the cache size limit is reached, a file entry with changed
metadata is encountered, or the range of payload chunks considered for reuse is
not continuous. An example for the latter is a file which disappeared in-between
subsequent backup runs, leaving a hole in the range. At this point, the caching
mode is disabled and the client calculates the wasted padding size which would
be introduced by reusing the payload chunks for all the unchanged files cached
up to this point. If the padding is acceptable (below a preset limit of 10% of
the actually reused chunk content), the files are reused by encoding them in the
metadata archive using updated offset references to the contents and reindexing
the pre-existing chunks in the new ``ppxar`` archive. If however the padding is
not acceptable, exceeding the limit, all cached entries are reencoded, not
reusing any of the pre-existing data. The metadata as cached will be encoded in
the metadata archive, no matter if cached file contents are to be reused or
reencoded.
This combination of look-ahead caching and reuse of pre-existing payload archive
chunks for files with unchanged contents therefore speeds up the backup
process by avoiding rereading and rechunking file contents whenever possible.
To reduce paddings and increase chunk reusability, during creation of the
archives in ``data`` mode and ``metadata`` mode the pxar encoder signals
encountered file boundaries as suggested chunk boundaries to the sliding window
chunker. The chunker then decides based on the internal state if the suggested
boundary is accepted or disregarded.
Caveats and Limitations
-----------------------
@ -298,8 +159,8 @@ will see that the probability of a collision in that scenario is:
For context, in a lottery game of guessing 6 numbers out of 45, the chance to
correctly guess all 6 numbers is only :math:`1.2277 * 10^{-7}`. This means the
chance of a collision is lower than winning 8 such lottery games *in a row*:
:math:`(1.2277 * 10^{-7})^{8} = 5.1623 * 10^{-56}`.
chance of a collision is about the same as winning 13 such lottery games *in a
row*.
In conclusion, it is extremely unlikely that such a collision would occur by
accident in a normal datastore.
@ -319,9 +180,6 @@ read all files again for every backup, otherwise it would not be possible to
generate a consistent, independent pxar archive where the original chunks can be
reused. Note that in spite of this, only new or changed chunks will be uploaded.
In order to avoid these limitations, the Change Detection Mode ``metadata`` was
introduced.
Verification of Encrypted Chunks
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@ -376,8 +234,8 @@ Restore without a Running Proxmox Backup Server
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
It's possible to restore specific files from a snapshot, without a running
`Proxmox Backup`_ Server instance, using the ``recover`` subcommand, provided
you have access to the intact index and chunk files. Note that you also need the
Proxmox Backup Server instance, using the ``recover`` subcommand, provided you
have access to the intact index and chunk files. Note that you also need the
corresponding key file if the backup was encrypted.
.. code-block:: console

View File

@ -17,7 +17,7 @@ backup virtual machine images.
Variable sized chunking needs more CPU power, but is essential to get
good deduplication rates for file archives.
The `Proxmox Backup`_ Server supports both strategies.
The Proxmox Backup Server supports both strategies.
Image Archives: ``<name>.img``

View File

@ -12,12 +12,12 @@ User Configuration
:align: right
:alt: User management
`Proxmox Backup`_ Server supports several authentication realms, and you need to
Proxmox Backup Server supports several authentication realms, and you need to
choose the realm when you add a new user. Possible realms are:
:pam: Linux PAM standard authentication. Use this if you want to
authenticate as a Linux system user. The users needs to already exist on
the host system.
authenticate as a Linux system user (users need to exist on the
system).
:pbs: Proxmox Backup Server realm. This type stores hashed passwords in
``/etc/proxmox-backup/shadow.json``.
@ -27,9 +27,6 @@ choose the realm when you add a new user. Possible realms are:
:ldap: LDAP server. Users can authenticate against external LDAP servers.
:ad: Active Directory server. Users can authenticate against external Active
Directory servers.
After installation, there is a single user, ``root@pam``, which corresponds to
the Unix superuser. User configuration information is stored in the file
``/etc/proxmox-backup/user.cfg``. You can use the ``proxmox-backup-manager``
@ -332,19 +329,16 @@ references are specified in curly brackets.
Some examples are:
.. table::
:align: left
=========================== =========================================================
``/datastore`` Access to *all* datastores on a Proxmox Backup server
``/datastore/{store}`` Access to a specific datastore on a Proxmox Backup server
``/datastore/{store}/{ns}`` Access to a specific namespace on a specific datastore
``/remote`` Access to all remote entries
``/system/network`` Access to configure the host network
``/tape/`` Access to tape devices, pools and jobs
``/access/users`` User administration
``/access/openid/{id}`` Administrative access to a specific OpenID Connect realm
=========================== =========================================================
* `/datastore`: Access to *all* datastores on a Proxmox Backup server
* `/datastore/{store}`: Access to a specific datastore on a Proxmox Backup
server
* `/datastore/{store}/{ns}`: Access to a specific namespace on a specific
datastore
* `/remote`: Access to all remote entries
* `/system/network`: Access to configure the host network
* `/tape/`: Access to tape devices, pools and jobs
* `/access/users`: User administration
* `/access/openid/{id}`: Administrative access to a specific OpenID Connect realm
Inheritance
^^^^^^^^^^^
@ -599,32 +593,6 @@ list view in the web UI, or using the command line:
Authentication Realms
---------------------
.. _user_realms_pam:
Linux PAM
~~~~~~~~~
Linux PAM is a framework for system-wide user authentication. These users are
created on the host system with commands such as ``adduser``.
If PAM users exist on the host system, corresponding entries can be added to
Proxmox Backup Server, to allow these users to log in via their system username
and password.
.. _user_realms_pbs:
Proxmox Backup authentication server
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This is a Unix-like password store, which stores hashed passwords in
``/etc/proxmox-backup/shadow.json``. Passwords are hashed using the SHA-256
hashing algorithm.
This is the most convenient realm for small-scale (or even mid-scale)
installations, where users do not need access to anything outside of Proxmox
Backup Server. In this case, users are fully managed by Proxmox Backup Server
and are able to change their own passwords via the GUI.
.. _user_realms_ldap:
LDAP
@ -675,47 +643,15 @@ A full list of all configuration parameters can be found at :ref:`domains.cfg`.
server, you must also add them as a user of that realm in Proxmox Backup
Server. This can be carried out automatically with syncing.
.. _user_realms_ad:
User Synchronization in LDAP realms
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Active Directory
~~~~~~~~~~~~~~~~
Proxmox Backup Server can also utilize external Microsoft Active Directory
servers for user authentication.
To achieve this, a realm of the type ``ad`` has to be configured.
For an Active Directory realm, the authentication domain name and the server
address must be specified. Most options from :ref:`user_realms_ldap` apply to
Active Directory as well, most importantly the bind credentials ``bind-dn``
and ``password``. This is typically required by default for Microsoft Active
Directory. The ``bind-dn`` can be specified either in AD-specific
``user@company.net`` syntax or the common LDAP-DN syntax.
The authentication domain name must only be specified if anonymous bind is
requested. If bind credentials are given, the domain name is automatically
inferred from the bind users' base domain, as reported by the Active Directory
server.
A full list of all configuration parameters can be found at :ref:`domains.cfg`.
.. note:: In order to allow a particular user to authenticate using the Active
Directory server, you must also add them as a user of that realm in Proxmox
Backup Server. This can be carried out automatically with syncing.
.. note:: Currently, case-insensitive usernames are not supported.
User Synchronization in LDAP/AD realms
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
It is possible to automatically sync users for LDAP and AD-based realms, rather
than having to add them to Proxmox Backup Server manually. Synchronization
options can be set in the LDAP realm configuration dialog window in the GUI and
via the ``proxmox-backup-manager ldap`` and ``proxmox-backup-manager ad``
commands, respectively.
User synchronization can be started in the GUI under **Configuration > Access
Control > Realms** by selecting a realm and pressing the `Sync` button. In the
sync dialog, some of the default options set in the realm configuration can be
overridden. Alternatively, user synchronization can also be started via the
``proxmox-backup-manager ldap sync`` and ``proxmox-backup-manager ad sync``
command, respectively.
It is possible to automatically sync users for LDAP-based realms, rather than
having to add them to Proxmox VE manually. Synchronization options can be set
in the LDAP realm configuration dialog window in the GUI and via the
``proxmox-backup-manager ldap create/update`` command.
User synchronization can started in the GUI at
Configuration > Access Control > Realms by selecting a realm and pressing the
`Sync` button. In the sync dialog, some of the default options set in the realm
configuration can be overridden. Alternatively, user synchronization can also
be started via the ``proxmox-backup-manager ldap sync`` command.

View File

@ -1,346 +0,0 @@
.. _using_the_installer:
Install `Proxmox Backup`_ Server using the Installer
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Download the ISO from |DOWNLOADS|.
It includes the following:
* The Proxmox Backup Server installer, which partitions the local
disk(s) with ext4, xfs or ZFS, and installs the operating system
* Complete operating system (Debian Linux, 64-bit)
* Proxmox Linux kernel with ZFS support
* Complete toolset to administer backups and all necessary resources
* Web-based management interface
.. note:: Any existing data on the selected drives will be overwritten
during the installation process. The installer does not add boot
menu entries for other operating systems.
Please insert the :ref:`installation_medium` (for example, USB flash
drive or DVD) and boot from it.
.. note:: You may need to go into your server's firmware settings, to
enable booting from your installation medium (for example, USB) and
set the desired boot order. When booting an installer prior to
`Proxmox Backup`_ Server version 3.1, Secure Boot needs to be
disabled.
.. image:: images/screenshots/pbs-installer-grub-menu.png
:target: _images/pbs-installer-grub-menu.png
:align: right
:alt: Proxmox Backup Server Installer GRUB Menu
After choosing the correct entry (for example, *Boot from USB*) the
Proxmox Backup Server menu will be displayed, and one of the following
options can be selected:
**Install Proxmox Backup Server (Graphical)**
Starts the normal installation.
TIP: It's possible to use the installation wizard with a keyboard only. Buttons
can be clicked by pressing the ``ALT`` key combined with the underlined character
from the respective button. For example, ``ALT + N`` to press a ``Next`` button.
**Install Proxmox Backup Server (Console)**
Starts the terminal-mode installation wizard. It provides the same overall
installation experience as the graphical installer, but has generally better
compatibility with very old and very new hardware.
**Install Proxmox Backup Server (Terminal UI, Serial Console)**
Starts the terminal-mode installation wizard, additionally setting up the Linux
kernel to use the (first) serial port of the machine for in- and output. This
can be used if the machine is completely headless and only has a serial console
available.
.. image:: images/screenshots/pbs-tui-installer.png
:target: _images/pbs-tui-installer.png
:align: right
:alt: Proxmox Backup Server Terminal UI Installer
Both modes use the same code base for the actual installation process to
benefit from more than a decade of bug fixes and ensure feature parity.
TIP: The *Console* or *Terminal UI* option can be used in case the graphical
installer does not work correctly, due to e.g. driver issues. See also
:ref:`nomodeset_kernel_param`.
**Advanced Options: Install Proxmox Backup Server (Debug Mode)**
Starts the installation in debug mode. A console will be opened at several
installation steps. This helps to debug the situation if something goes wrong.
To exit a debug console, press ``CTRL-D``. This option can be used to boot a
live system with all basic tools available. You can use it, for example, to
repair a degraded ZFS *rpool* or fix the :ref:`chapter-systembooting` for an
existing Proxmox Backup Server setup.
**Advanced Options: Install Proxmox Backup Server (Terminal UI, Debug Mode)**
Same as the graphical debug mode, but preparing the system to run the
terminal-based installer instead.
**Advanced Options: Install Proxmox Backup Server (Serial Console Debug Mode)**
Same the terminal-based debug mode, but additionally sets up the Linux kernel to
use the (first) serial port of the machine for in- and output.
**Advanced Options: Rescue Boot**
With this option you can boot an existing installation. It searches all attached
hard disks. If it finds an existing installation, it boots directly into that
disk using the Linux kernel from the ISO. This can be useful if there are
problems with the bootloader (GRUB/``systemd-boot``) or the BIOS/UEFI is unable
to read the boot block from the disk.
**Advanced Options: Test Memory (memtest86+)**
Runs *memtest86+*. This is useful to check if the memory is functional and free
of errors. Secure Boot must be turned off in the UEFI firmware setup utility to
run this option.
You normally select *Install Proxmox Backup Server (Graphical)* to start the
installation.
The first step is to read our EULA (End User License Agreement). Following this,
you can select the target hard disk(s) for the installation.
.. caution:: By default, the whole server is used and all existing data is
removed. Make sure there is no important data on the server before proceeding
with the installation.
The *Options* button lets you select the target file system, which defaults to
``ext4``. The installer uses LVM if you select ``ext4`` or ``xfs`` as a file
system, and offers additional options to restrict LVM space (see :ref:`below
<advanced_lvm_options>`).
.. image:: images/screenshots/pbs-installer-select-disk.png
:target: _images/pbs-installer-select-disk.png
:align: right
:alt: Proxmox Backup Server Installer - Harddisk Selection Dialog
Proxmox Backup Server can also be installed on ZFS. As ZFS offers several
software RAID levels, this is an option for systems that don't have a hardware
RAID controller. The target disks must be selected in the *Options* dialog. More
ZFS specific settings can be changed under :ref:`Advanced Options
<advanced_zfs_options>`.
.. warning:: ZFS on top of any hardware RAID is not supported and can result in
data loss.
.. image:: images/screenshots/pbs-installer-location.png
:target: _images/pbs-installer-location.png
:align: right
:alt: Proxmox Backup Server Installer - Location and timezone configuration
The next page asks for basic configuration options like your location, time
zone, and keyboard layout. The location is used to select a nearby download
server, in order to increase the speed of updates. The installer is usually able
to auto-detect these settings, so you only need to change them in rare
situations when auto-detection fails, or when you want to use a keyboard layout
not commonly used in your country.
.. image:: images/screenshots/pbs-installer-password.png
:target: _images/pbs-installer-password.png
:align: left
:alt: Proxmox Backup Server Installer - Password and email configuration
Next the password of the superuser (``root``) and an email address needs to be
specified. The password must consist of at least 8 characters. It's highly
recommended to use a stronger password. Some guidelines are:
|
|
- Use a minimum password length of at least 12 characters.
- Include lowercase and uppercase alphabetic characters, numbers, and symbols.
- Avoid character repetition, keyboard patterns, common dictionary words,
letter or number sequences, usernames, relative or pet names, romantic links
(current or past), and biographical information (for example ID numbers,
ancestors' names or dates).
The email address is used to send notifications to the system administrator.
For example:
- Information about available package updates.
- Error messages from periodic *cron* jobs.
.. image:: images/screenshots/pbs-installer-network.png
:target: _images/pbs-installer-network.png
:align: right
:alt: Proxmox Backup Server Installer - Network configuration
All those notification mails will be sent to the specified email address.
The last step is the network configuration. Network interfaces that are *UP*
show a filled circle in front of their name in the drop down menu. Please note
that during installation you can either specify an IPv4 or IPv6 address, but not
both. To configure a dual stack node, add additional IP addresses after the
installation.
.. image:: images/screenshots/pbs-installer-progress.png
:target: _images/pbs-installer-progress.png
:align: left
:alt: Proxmox Backup Server Installer - Installation progress
The next step shows a summary of the previously selected options. Please
re-check every setting and use the *Previous* button if a setting needs to be
changed.
After clicking *Install*, the installer will begin to format the disks and copy
packages to the target disk(s). Please wait until this step has finished; then
remove the installation medium and restart your system.
.. image:: images/screenshots/pbs-installer-summary.png
:target: _images/pbs-installer-summary.png
:align: right
:alt: Proxmox Backup Server Installer - Installation summary
Copying the packages usually takes several minutes, mostly depending on the
speed of the installation medium and the target disk performance.
When copying and setting up the packages has finished, you can reboot the
server. This will be done automatically after a few seconds by default.
Installation Failure
^^^^^^^^^^^^^^^^^^^^
If the installation failed, check out specific errors on the second TTY
(``CTRL + ALT + F2``) and ensure that the systems meets the
:ref:`minimum requirements <minimum_system_requirements>`.
If the installation is still not working, look at the :ref:`how to get help
chapter <get_help>`.
Accessing the Management Interface Post-Installation
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. image:: images/screenshots/pbs-gui-login-window.png
:target: _images/pbs-gui-login-window.png
:align: right
:alt: Proxmox Backup Server - Management interface login dialog
After a successful installation and reboot of the system you can use the Proxmox
Backup Server web interface for further configuration.
- Point your browser to the IP address given during the installation and port
8007, for example: https://pbs.yourdomain.tld:8007
- Log in using the ``root`` (realm *Linux PAM standard authentication*) username
and the password chosen during installation.
- Upload your subscription key to gain access to the Enterprise repository.
Otherwise, you will need to set up one of the public, less tested package
repositories to get updates for security fixes, bug fixes, and new features.
- Check the IP configuration and hostname.
- Check the timezone.
.. _advanced_lvm_options:
Advanced LVM Configuration Options
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The installer creates a Volume Group (VG) called ``pbs``, and additional Logical
Volumes (LVs) called ``root`` and ``swap``, if ``ext4`` or ``xfs`` as filesystem
is used. To control the size of these volumes use:
- *hdsize*
Defines the total hard disk size to be used. This way you can reserve free
space on the hard disk for further partitioning.
- *swapsize*
Defines the size of the ``swap`` volume. The default is the size of the
installed memory, minimum 4 GB and maximum 8 GB. The resulting value cannot
be greater than ``hdsize/8``.
If set to ``0``, no ``swap`` volume will be created.
- *minfree*
Defines the amount of free space that should be left in the LVM volume group
``pbs``. With more than 128GB storage available, the default is 16GB,
otherwise ``hdsize/8`` will be used.
.. _advanced_zfs_options:
Advanced ZFS Configuration Options
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The installer creates the ZFS pool ``rpool``, if ZFS is used. No swap space is
created but you can reserve some unpartitioned space on the install disks for
swap. You can also create a swap zvol after the installation, although this can
lead to problems (see :ref:`ZFS swap notes <zfs_swap>`).
- *ashift*
Defines the *ashift* value for the created pool. The *ashift* needs to be
set at least to the sector-size of the underlying disks (2 to the power of
*ashift* is the sector-size), or any disk which might be put in the pool
(for example the replacement of a defective disk).
- *compress*
Defines whether compression is enabled for ``rpool``.
- *checksum*
Defines which checksumming algorithm should be used for ``rpool``.
- *copies*
Defines the *copies* parameter for ``rpool``. Check the ``zfs(8)`` manpage
for the semantics, and why this does not replace redundancy on disk-level.
- *hdsize*
Defines the total hard disk size to be used. This is useful to save free
space on the hard disk(s) for further partitioning (for example, to create a
swap partition). *hdsize* is only honored for bootable disks, that is only
the first disk or mirror for RAID0, RAID1 or RAID10, and all disks in
RAID-Z[123].
ZFS Performance Tips
^^^^^^^^^^^^^^^^^^^^
ZFS works best with a lot of memory. If you intend to use ZFS make sure to have
enough RAM available for it. A good calculation is 4GB plus 1GB RAM for each TB
of raw disk space.
ZFS can use a dedicated drive as write cache, called the ZFS Intent Log (ZIL).
Use a fast drive (SSD) for it. It can be added after installation with the
following command:
.. code-block:: console
# zpool add <pool-name> log </dev/path_to_fast_ssd>
.. _nomodeset_kernel_param:
Adding the ``nomodeset`` Kernel Parameter
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Problems may arise on very old or very new hardware due to graphics drivers. If
the installation hangs during boot, you can try adding the ``nomodeset``
parameter. This prevents the Linux kernel from loading any graphics drivers and
forces it to continue using the BIOS/UEFI-provided framebuffer.
On the Proxmox Backup Server bootloader menu, navigate to *Install Proxmox
Backup Server (Console)* and press ``e`` to edit the entry. Using the arrow
keys, navigate to the line starting with ``linux``, move the cursor to the end
of that line and add the parameter ``nomodeset``, separated by a space from the
pre-existing last parameter.
Then press ``Ctrl-X`` or ``F10`` to boot the configuration.

View File

@ -2,7 +2,6 @@ include ../defines.mk
UNITS := \
proxmox-backup-daily-update.timer \
removable-device-attach@.service
DYNAMIC_UNITS := \
proxmox-backup-banner.service \

View File

@ -1,8 +0,0 @@
[Unit]
Description=Try to mount the removable device of a datastore with uuid '%i'.
After=proxmox-backup-proxy.service
Requires=proxmox-backup-proxy.service
[Service]
Type=simple
ExecStart=/usr/sbin/proxmox-backup-manager datastore uuid-mount %i

View File

@ -34,7 +34,7 @@ async fn run() -> Result<(), Error> {
let backup_time = proxmox_time::parse_rfc3339("2019-06-28T10:49:48Z")?;
let client = BackupReader::start(
&client,
client,
None,
"store2",
&BackupNamespace::root(),

Some files were not shown because too many files have changed in this diff Show More