Compare commits

..

No commits in common. "master" and "v3.4.0" have entirely different histories.

16 changed files with 72 additions and 189 deletions

View File

@ -1,5 +1,5 @@
[workspace.package]
version = "3.4.1"
version = "3.4.0"
authors = [
"Dietmar Maurer <dietmar@proxmox.com>",
"Dominik Csapak <d.csapak@proxmox.com>",

View File

@ -50,8 +50,6 @@ COMPILEDIR := target/$(DEB_HOST_RUST_TYPE)/debug
STATIC_COMPILEDIR := $(STATIC_TARGET_DIR)/$(DEB_HOST_RUST_TYPE)/debug
endif
STATIC_RUSTC_FLAGS := -C target-feature=+crt-static -L $(STATIC_COMPILEDIR)/deps-stubs/
ifeq ($(valgrind), yes)
CARGO_BUILD_ARGS += --features valgrind
endif
@ -61,8 +59,8 @@ CARGO ?= cargo
COMPILED_BINS := \
$(addprefix $(COMPILEDIR)/,$(USR_BIN) $(USR_SBIN) $(SERVICE_BIN) $(RESTORE_BIN))
STATIC_BINS := \
$(addprefix $(STATIC_COMPILEDIR)/,proxmox-backup-client-static pxar-static)
STATIC_BIN := \
$(addprefix $(STATIC_COMPILEDIR)/,proxmox-backup-client-static)
export DEB_VERSION DEB_VERSION_UPSTREAM
@ -155,7 +153,7 @@ clean: clean-deb
$(foreach i,$(SUBDIRS), \
$(MAKE) -C $(i) clean ;)
$(CARGO) clean
rm -f .do-cargo-build .do-static-cargo-build
rm -f .do-cargo-build
# allows one to avoid running cargo clean when one just wants to tidy up after a package build
clean-deb:
@ -204,25 +202,12 @@ $(COMPILED_BINS) $(COMPILEDIR)/dump-catalog-shell-cli $(COMPILEDIR)/docgen: .do-
--bin sg-tape-cmd
touch "$@"
.PHONY: proxmox-backup-client-static
proxmox-backup-client-static:
rm -f .do-static-cargo-build
$(MAKE) $(STATIC_BINS)
$(STATIC_BINS): .do-static-cargo-build
.do-static-cargo-build:
mkdir -p $(STATIC_COMPILEDIR)/deps-stubs/ && \
echo '!<arch>' > $(STATIC_COMPILEDIR)/deps-stubs/libsystemd.a # workaround for to greedy linkage and proxmox-systemd
$(CARGO) rustc $(CARGO_BUILD_ARGS) --package pxar-bin --bin pxar \
--target-dir $(STATIC_TARGET_DIR) -- $(STATIC_RUSTC_FLAGS)
$(CARGO) rustc $(CARGO_BUILD_ARGS) --package proxmox-backup-client --bin proxmox-backup-client \
--target-dir $(STATIC_TARGET_DIR) -- $(STATIC_RUSTC_FLAGS)
.PHONY: lint
lint:
cargo clippy -- -A clippy::all -D clippy::correctness
install: $(COMPILED_BINS) $(STATIC_BINS)
install: $(COMPILED_BINS) $(STATIC_BIN)
install -dm755 $(DESTDIR)$(BINDIR)
install -dm755 $(DESTDIR)$(ZSH_COMPL_DEST)
$(foreach i,$(USR_BIN), \
@ -242,7 +227,6 @@ install: $(COMPILED_BINS) $(STATIC_BINS)
$(foreach i,$(SERVICE_BIN), \
install -m755 $(COMPILEDIR)/$(i) $(DESTDIR)$(LIBEXECDIR)/proxmox-backup/ ;)
install -m755 $(STATIC_COMPILEDIR)/proxmox-backup-client $(DESTDIR)$(BINDIR)/proxmox-backup-client-static
install -m755 $(STATIC_COMPILEDIR)/pxar $(DESTDIR)$(BINDIR)/pxar-static
$(MAKE) -C www install
$(MAKE) -C docs install
$(MAKE) -C templates install
@ -257,3 +241,14 @@ upload: $(SERVER_DEB) $(CLIENT_DEB) $(RESTORE_DEB) $(DOC_DEB) $(STATIC_CLIENT_DE
tar cf - $(CLIENT_DEB) $(CLIENT_DBG_DEB) | ssh -X repoman@repo.proxmox.com upload --product "pve,pmg,pbs-client" --dist $(UPLOAD_DIST)
tar cf - $(STATIC_CLIENT_DEB) $(STATIC_CLIENT_DBG_DEB) | ssh -X repoman@repo.proxmox.com upload --product "pbs-client" --dist $(UPLOAD_DIST)
tar cf - $(RESTORE_DEB) $(RESTORE_DBG_DEB) | ssh -X repoman@repo.proxmox.com upload --product "pve" --dist $(UPLOAD_DIST)
.PHONY: proxmox-backup-client-static
proxmox-backup-client-static:
rm -f $(STATIC_BIN)
$(MAKE) $(STATIC_BIN)
$(STATIC_BIN):
mkdir -p $(STATIC_COMPILEDIR)/deps-stubs/ && \
echo '!<arch>' > $(STATIC_COMPILEDIR)/deps-stubs/libsystemd.a # workaround for to greedy linkage and proxmox-systemd
$(CARGO) rustc $(CARGO_BUILD_ARGS) --package proxmox-backup-client --bin proxmox-backup-client \
--target-dir $(STATIC_TARGET_DIR) -- -C target-feature=+crt-static -L $(STATIC_COMPILEDIR)/deps-stubs/

23
debian/changelog vendored
View File

@ -1,26 +1,3 @@
rust-proxmox-backup (3.4.1-1) bookworm; urgency=medium
* ui: token view: fix typo in 'lose' and rephrase token regenerate dialog
message for more clarity.
* restrict consent-banner text length to 64 KiB.
* docs: describe the intend for the statically linked pbs client.
* api: backup: include previous snapshot name in log message.
* garbage collection: account for created/deleted index files concurrently
to GC to avoid potentially confusing log messages.
* garbage collection: fix rare race in chunk marking phase for setups doing
high frequent backups in quick succession while immediately pruning to a
single backup snapshot being left over after each such backup.
* tape: wait for calibration of LTO-9 tapes in general, not just in the
initial tape format procedure.
-- Proxmox Support Team <support@proxmox.com> Wed, 16 Apr 2025 14:45:37 +0200
rust-proxmox-backup (3.4.0-1) bookworm; urgency=medium
* fix #4788: build statically linked version of the proxmox-backup-client

2
debian/control vendored
View File

@ -208,7 +208,7 @@ Description: Proxmox Backup Client tools
Package: proxmox-backup-client-static
Architecture: any
Depends: qrencode, ${misc:Depends},
Conflicts: proxmox-backup-client,
Conflicts: proxmox-backup-client
Description: Proxmox Backup Client tools (statically linked)
This package contains the Proxmox Backup client, which provides a
simple command line tool to create and restore backups.

View File

@ -1,2 +1 @@
debian/proxmox-backup-client.bc proxmox-backup-client
debian/pxar.bc pxar

View File

@ -1,4 +1,2 @@
usr/share/man/man1/proxmox-backup-client.1
usr/share/man/man1/pxar.1
usr/share/zsh/vendor-completions/_proxmox-backup-client
usr/share/zsh/vendor-completions/_pxar

View File

@ -34,13 +34,13 @@ usr/share/man/man5/media-pool.cfg.5
usr/share/man/man5/notifications-priv.cfg.5
usr/share/man/man5/notifications.cfg.5
usr/share/man/man5/proxmox-backup.node.cfg.5
usr/share/man/man5/prune.cfg.5
usr/share/man/man5/remote.cfg.5
usr/share/man/man5/sync.cfg.5
usr/share/man/man5/tape-job.cfg.5
usr/share/man/man5/tape.cfg.5
usr/share/man/man5/user.cfg.5
usr/share/man/man5/verification.cfg.5
usr/share/man/man5/prune.cfg.5
usr/share/proxmox-backup/templates/default/acme-err-body.txt.hbs
usr/share/proxmox-backup/templates/default/acme-err-subject.txt.hbs
usr/share/proxmox-backup/templates/default/gc-err-body.txt.hbs

1
debian/rules vendored
View File

@ -49,7 +49,6 @@ override_dh_auto_install:
LIBDIR=/usr/lib/$(DEB_HOST_MULTIARCH)
mkdir -p debian/proxmox-backup-client-static/usr/bin
mv debian/tmp/usr/bin/proxmox-backup-client-static debian/proxmox-backup-client-static/usr/bin/proxmox-backup-client
mv debian/tmp/usr/bin/pxar-static debian/proxmox-backup-client-static/usr/bin/pxar
override_dh_installsystemd:
dh_installsystemd -pproxmox-backup-server proxmox-backup-daily-update.timer

View File

@ -46,21 +46,11 @@ user\@pbs!token@host:store ``user@pbs!token`` host:8007 store
[ff80::51]:1234:mydatastore ``root@pam`` [ff80::51]:1234 mydatastore
================================ ================== ================== ===========
.. _statically_linked_client:
Statically Linked Backup Client
-------------------------------
A statically linked version of the Proxmox Backup client is available for Linux
based systems where the regular client is not available. Please note that it is
recommended to use the regular client when possible, as the statically linked
client is not a full replacement. For example, name resolution will not be
performed via the mechanisms provided by libc, but uses a resolver written
purely in the Rust programming language. Therefore, features and modules
provided by Name Service Switch cannot be used.
The statically linked client is available via the ``pbs-client`` repository as
described in the :ref:`installation <install_pbc>` section.
.. Note:: If you are using the statically linked binary of proxmox backup client
name resolution will not be performed via the mechanisms provided by libc,
but uses a resolver written purely in the Rust programming language.
Therefore, features and modules provided by Name Service Switch cannot be
used.
.. _environment-variables:

View File

@ -1031,15 +1031,13 @@ impl DataStore {
Ok(list)
}
// Similar to open index, but return with Ok(None) if index file vanished.
fn open_index_reader(
&self,
absolute_path: &Path,
) -> Result<Option<Box<dyn IndexFile>>, Error> {
// Similar to open index, but ignore index files with blob or unknown archive type.
// Further, do not fail if file vanished.
fn open_index_reader(&self, absolute_path: &Path) -> Result<Option<Box<dyn IndexFile>>, Error> {
let archive_type = match ArchiveType::from_path(absolute_path) {
// ignore archives with unknown archive type
Ok(ArchiveType::Blob) | Err(_) => bail!("unexpected archive type"),
Ok(archive_type) => archive_type,
// ignore archives with unknown archive type
Err(_) => return Ok(None),
};
if absolute_path.is_relative() {
@ -1066,7 +1064,7 @@ impl DataStore {
.with_context(|| format!("can't open dynamic index '{absolute_path:?}'"))?;
Ok(Some(Box::new(reader)))
}
ArchiveType::Blob => bail!("unexpected archive type blob"),
ArchiveType::Blob => Ok(None),
}
}
@ -1131,7 +1129,7 @@ impl DataStore {
// the detected index files not following the iterators logic.
let mut unprocessed_index_list = self.list_index_files()?;
let mut index_count = unprocessed_index_list.len();
let index_count = unprocessed_index_list.len();
let mut chunk_lru_cache = LruCache::new(cache_capacity);
let mut processed_index_files = 0;
@ -1145,108 +1143,57 @@ impl DataStore {
let namespace = namespace.context("iterating namespaces failed")?;
for group in arc_self.iter_backup_groups(namespace)? {
let group = group.context("iterating backup groups failed")?;
let mut snapshots = group.list_backups().context("listing snapshots failed")?;
// Sort by snapshot timestamp to iterate over consecutive snapshots for each image.
BackupInfo::sort_list(&mut snapshots, true);
for snapshot in snapshots {
for file in snapshot.files {
worker.check_abort()?;
worker.fail_on_shutdown()?;
// Avoid race between listing/marking of snapshots by GC and pruning the last
// snapshot in the group, following a new snapshot creation. Otherwise known chunks
// might only be referenced by the new snapshot, so it must be read as well.
let mut retry_counter = 0;
'retry: loop {
let _lock = match retry_counter {
0..=9 => None,
10 => Some(
group
.lock()
.context("exhausted retries and failed to lock group")?,
),
_ => bail!("exhausted retries and unexpected counter overrun"),
};
let mut path = snapshot.backup_dir.full_path();
path.push(file);
let mut snapshots = match group.list_backups() {
Ok(snapshots) => snapshots,
Err(err) => {
if group.exists() {
return Err(err).context("listing snapshots failed")?;
}
break 'retry;
}
};
// Always start iteration with the last snapshot of the group to reduce race
// window with concurrent backup+prune previous last snapshot. Allows to retry
// without the need to keep track of already processed index files for the
// current group.
BackupInfo::sort_list(&mut snapshots, true);
for (count, snapshot) in snapshots.into_iter().rev().enumerate() {
for file in snapshot.files {
worker.check_abort()?;
worker.fail_on_shutdown()?;
match ArchiveType::from_path(&file) {
Ok(ArchiveType::FixedIndex) | Ok(ArchiveType::DynamicIndex) => (),
Ok(ArchiveType::Blob) | Err(_) => continue,
}
let mut path = snapshot.backup_dir.full_path();
path.push(file);
let index = match self.open_index_reader(&path)? {
Some(index) => index,
None => {
unprocessed_index_list.remove(&path);
if count == 0 {
retry_counter += 1;
continue 'retry;
}
continue;
}
};
self.index_mark_used_chunks(
index,
&path,
&mut chunk_lru_cache,
status,
worker,
)?;
if !unprocessed_index_list.remove(&path) {
info!("Encountered new index file '{path:?}', increment total index file count");
index_count += 1;
}
let percentage = (processed_index_files + 1) * 100 / index_count;
if percentage > last_percentage {
info!(
"marked {percentage}% ({} of {index_count} index files)",
processed_index_files + 1,
);
last_percentage = percentage;
}
processed_index_files += 1;
let index = match self.open_index_reader(&path)? {
Some(index) => index,
None => continue,
};
self.index_mark_used_chunks(
index,
&path,
&mut chunk_lru_cache,
status,
worker,
)?;
unprocessed_index_list.remove(&path);
let percentage = (processed_index_files + 1) * 100 / index_count;
if percentage > last_percentage {
info!(
"marked {percentage}% ({} of {index_count} index files)",
processed_index_files + 1,
);
last_percentage = percentage;
}
processed_index_files += 1;
}
break;
}
}
}
let mut strange_paths_count = unprocessed_index_list.len();
let strange_paths_count = unprocessed_index_list.len();
if strange_paths_count > 0 {
warn!("found {strange_paths_count} index files outside of expected directory scheme");
}
for path in unprocessed_index_list {
let index = match self.open_index_reader(&path)? {
Some(index) => index,
None => {
// do not count vanished (pruned) backup snapshots as strange paths.
strange_paths_count -= 1;
continue;
}
None => continue,
};
self.index_mark_used_chunks(index, &path, &mut chunk_lru_cache, status, worker)?;
warn!("Marked chunks for unexpected index file at '{path:?}'");
}
if strange_paths_count > 0 {
warn!("Found {strange_paths_count} index files outside of expected directory scheme");
}
Ok(())
}

View File

@ -659,8 +659,7 @@ impl SgTape {
pub fn wait_until_ready(&mut self, timeout: Option<u64>) -> Result<(), Error> {
let start = SystemTime::now();
let timeout = timeout.unwrap_or(Self::SCSI_TAPE_DEFAULT_TIMEOUT as u64);
let mut max_wait = std::time::Duration::new(timeout, 0);
let mut increased_timeout = false;
let max_wait = std::time::Duration::new(timeout, 0);
loop {
match self.test_unit_ready() {
@ -668,16 +667,6 @@ impl SgTape {
_ => {
std::thread::sleep(std::time::Duration::new(1, 0));
if start.elapsed()? > max_wait {
if !increased_timeout {
if let Ok(DeviceActivity::Calibrating) =
read_device_activity(&mut self.file)
{
log::info!("Detected drive calibration, increasing timeout to 2 hours 5 minutes");
max_wait = std::time::Duration::new(2 * 60 * 60 + 5 * 60, 0);
increased_timeout = true;
continue;
}
}
bail!("wait_until_ready failed - got timeout");
}
}

View File

@ -853,8 +853,8 @@ fn download_previous(
};
if let Some(index) = index {
env.log(format!(
"register chunks in '{archive_name}' from previous backup '{}'.",
last_backup.backup_dir.dir(),
"register chunks in '{}' from previous backup.",
archive_name
));
for pos in 0..index.index_count() {
@ -865,10 +865,7 @@ fn download_previous(
}
}
env.log(format!(
"download '{archive_name}' from previous backup '{}'.",
last_backup.backup_dir.dir(),
));
env.log(format!("download '{}' from previous backup.", archive_name));
crate::api2::helpers::create_download_response(path).await
}
.boxed()

View File

@ -174,11 +174,6 @@ pub enum Translation {
"description" : {
optional: true,
schema: MULTI_LINE_COMMENT_SCHEMA,
},
"consent-text" : {
optional: true,
type: String,
max_length: 64 * 1024,
}
},
)]

View File

@ -480,7 +480,7 @@ impl SyncSource for LocalSource {
) -> Result<Arc<dyn SyncSourceReader>, Error> {
let dir = self.store.backup_dir(ns.clone(), dir.clone())?;
let guard = dir
.lock_shared()
.lock()
.with_context(|| format!("while reading snapshot '{dir:?}' for a sync job"))?;
Ok(Arc::new(LocalSourceReader {
_dir_lock: Arc::new(Mutex::new(guard)),

View File

@ -59,9 +59,6 @@ Ext.define('PBS.NodeOptionView', {
name: 'consent-text',
text: gettext('Consent Text'),
deleteEmpty: true,
fieldOpts: {
maxLength: 64 * 1024,
},
onlineHelp: 'consent_banner',
},
],

View File

@ -193,7 +193,7 @@ Ext.define('PBS.config.TokenView', {
handler: 'regenerateToken',
dangerous: true,
confirmMsg: rec => Ext.String.format(
gettext("Regenerate the secret of the API token '{0}'? All users of the previous token secret will lose access!"),
gettext("Regenerate the secret of the API token '{0}'? All current use-sites will loose access!"),
rec.data.tokenid,
),
},