mirror of
https://git.proxmox.com/git/proxmox-backup
synced 2025-11-02 15:18:42 +00:00
Compare commits
14 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
58fb448be5 | ||
|
|
07a21616c2 | ||
|
|
cb9814e331 | ||
|
|
31dbaf69ab | ||
|
|
af5ff86a26 | ||
|
|
5fc281cd89 | ||
|
|
6c6257b94e | ||
|
|
c644f7bc85 | ||
|
|
4a022e1a3f | ||
|
|
9247d57fdf | ||
|
|
427c687e35 | ||
|
|
f9532a3a84 | ||
|
|
d400673641 | ||
|
|
cdc710a736 |
@ -1,5 +1,5 @@
|
||||
[workspace.package]
|
||||
version = "3.4.0"
|
||||
version = "3.4.1"
|
||||
authors = [
|
||||
"Dietmar Maurer <dietmar@proxmox.com>",
|
||||
"Dominik Csapak <d.csapak@proxmox.com>",
|
||||
|
||||
35
Makefile
35
Makefile
@ -50,6 +50,8 @@ COMPILEDIR := target/$(DEB_HOST_RUST_TYPE)/debug
|
||||
STATIC_COMPILEDIR := $(STATIC_TARGET_DIR)/$(DEB_HOST_RUST_TYPE)/debug
|
||||
endif
|
||||
|
||||
STATIC_RUSTC_FLAGS := -C target-feature=+crt-static -L $(STATIC_COMPILEDIR)/deps-stubs/
|
||||
|
||||
ifeq ($(valgrind), yes)
|
||||
CARGO_BUILD_ARGS += --features valgrind
|
||||
endif
|
||||
@ -59,8 +61,8 @@ CARGO ?= cargo
|
||||
COMPILED_BINS := \
|
||||
$(addprefix $(COMPILEDIR)/,$(USR_BIN) $(USR_SBIN) $(SERVICE_BIN) $(RESTORE_BIN))
|
||||
|
||||
STATIC_BIN := \
|
||||
$(addprefix $(STATIC_COMPILEDIR)/,proxmox-backup-client-static)
|
||||
STATIC_BINS := \
|
||||
$(addprefix $(STATIC_COMPILEDIR)/,proxmox-backup-client-static pxar-static)
|
||||
|
||||
export DEB_VERSION DEB_VERSION_UPSTREAM
|
||||
|
||||
@ -153,7 +155,7 @@ clean: clean-deb
|
||||
$(foreach i,$(SUBDIRS), \
|
||||
$(MAKE) -C $(i) clean ;)
|
||||
$(CARGO) clean
|
||||
rm -f .do-cargo-build
|
||||
rm -f .do-cargo-build .do-static-cargo-build
|
||||
|
||||
# allows one to avoid running cargo clean when one just wants to tidy up after a package build
|
||||
clean-deb:
|
||||
@ -202,12 +204,25 @@ $(COMPILED_BINS) $(COMPILEDIR)/dump-catalog-shell-cli $(COMPILEDIR)/docgen: .do-
|
||||
--bin sg-tape-cmd
|
||||
touch "$@"
|
||||
|
||||
.PHONY: proxmox-backup-client-static
|
||||
proxmox-backup-client-static:
|
||||
rm -f .do-static-cargo-build
|
||||
$(MAKE) $(STATIC_BINS)
|
||||
|
||||
$(STATIC_BINS): .do-static-cargo-build
|
||||
.do-static-cargo-build:
|
||||
mkdir -p $(STATIC_COMPILEDIR)/deps-stubs/ && \
|
||||
echo '!<arch>' > $(STATIC_COMPILEDIR)/deps-stubs/libsystemd.a # workaround for to greedy linkage and proxmox-systemd
|
||||
$(CARGO) rustc $(CARGO_BUILD_ARGS) --package pxar-bin --bin pxar \
|
||||
--target-dir $(STATIC_TARGET_DIR) -- $(STATIC_RUSTC_FLAGS)
|
||||
$(CARGO) rustc $(CARGO_BUILD_ARGS) --package proxmox-backup-client --bin proxmox-backup-client \
|
||||
--target-dir $(STATIC_TARGET_DIR) -- $(STATIC_RUSTC_FLAGS)
|
||||
|
||||
.PHONY: lint
|
||||
lint:
|
||||
cargo clippy -- -A clippy::all -D clippy::correctness
|
||||
|
||||
install: $(COMPILED_BINS) $(STATIC_BIN)
|
||||
install: $(COMPILED_BINS) $(STATIC_BINS)
|
||||
install -dm755 $(DESTDIR)$(BINDIR)
|
||||
install -dm755 $(DESTDIR)$(ZSH_COMPL_DEST)
|
||||
$(foreach i,$(USR_BIN), \
|
||||
@ -227,6 +242,7 @@ install: $(COMPILED_BINS) $(STATIC_BIN)
|
||||
$(foreach i,$(SERVICE_BIN), \
|
||||
install -m755 $(COMPILEDIR)/$(i) $(DESTDIR)$(LIBEXECDIR)/proxmox-backup/ ;)
|
||||
install -m755 $(STATIC_COMPILEDIR)/proxmox-backup-client $(DESTDIR)$(BINDIR)/proxmox-backup-client-static
|
||||
install -m755 $(STATIC_COMPILEDIR)/pxar $(DESTDIR)$(BINDIR)/pxar-static
|
||||
$(MAKE) -C www install
|
||||
$(MAKE) -C docs install
|
||||
$(MAKE) -C templates install
|
||||
@ -241,14 +257,3 @@ upload: $(SERVER_DEB) $(CLIENT_DEB) $(RESTORE_DEB) $(DOC_DEB) $(STATIC_CLIENT_DE
|
||||
tar cf - $(CLIENT_DEB) $(CLIENT_DBG_DEB) | ssh -X repoman@repo.proxmox.com upload --product "pve,pmg,pbs-client" --dist $(UPLOAD_DIST)
|
||||
tar cf - $(STATIC_CLIENT_DEB) $(STATIC_CLIENT_DBG_DEB) | ssh -X repoman@repo.proxmox.com upload --product "pbs-client" --dist $(UPLOAD_DIST)
|
||||
tar cf - $(RESTORE_DEB) $(RESTORE_DBG_DEB) | ssh -X repoman@repo.proxmox.com upload --product "pve" --dist $(UPLOAD_DIST)
|
||||
|
||||
.PHONY: proxmox-backup-client-static
|
||||
proxmox-backup-client-static:
|
||||
rm -f $(STATIC_BIN)
|
||||
$(MAKE) $(STATIC_BIN)
|
||||
|
||||
$(STATIC_BIN):
|
||||
mkdir -p $(STATIC_COMPILEDIR)/deps-stubs/ && \
|
||||
echo '!<arch>' > $(STATIC_COMPILEDIR)/deps-stubs/libsystemd.a # workaround for to greedy linkage and proxmox-systemd
|
||||
$(CARGO) rustc $(CARGO_BUILD_ARGS) --package proxmox-backup-client --bin proxmox-backup-client \
|
||||
--target-dir $(STATIC_TARGET_DIR) -- -C target-feature=+crt-static -L $(STATIC_COMPILEDIR)/deps-stubs/
|
||||
|
||||
23
debian/changelog
vendored
23
debian/changelog
vendored
@ -1,3 +1,26 @@
|
||||
rust-proxmox-backup (3.4.1-1) bookworm; urgency=medium
|
||||
|
||||
* ui: token view: fix typo in 'lose' and rephrase token regenerate dialog
|
||||
message for more clarity.
|
||||
|
||||
* restrict consent-banner text length to 64 KiB.
|
||||
|
||||
* docs: describe the intend for the statically linked pbs client.
|
||||
|
||||
* api: backup: include previous snapshot name in log message.
|
||||
|
||||
* garbage collection: account for created/deleted index files concurrently
|
||||
to GC to avoid potentially confusing log messages.
|
||||
|
||||
* garbage collection: fix rare race in chunk marking phase for setups doing
|
||||
high frequent backups in quick succession while immediately pruning to a
|
||||
single backup snapshot being left over after each such backup.
|
||||
|
||||
* tape: wait for calibration of LTO-9 tapes in general, not just in the
|
||||
initial tape format procedure.
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Wed, 16 Apr 2025 14:45:37 +0200
|
||||
|
||||
rust-proxmox-backup (3.4.0-1) bookworm; urgency=medium
|
||||
|
||||
* fix #4788: build statically linked version of the proxmox-backup-client
|
||||
|
||||
2
debian/control
vendored
2
debian/control
vendored
@ -208,7 +208,7 @@ Description: Proxmox Backup Client tools
|
||||
Package: proxmox-backup-client-static
|
||||
Architecture: any
|
||||
Depends: qrencode, ${misc:Depends},
|
||||
Conflicts: proxmox-backup-client
|
||||
Conflicts: proxmox-backup-client,
|
||||
Description: Proxmox Backup Client tools (statically linked)
|
||||
This package contains the Proxmox Backup client, which provides a
|
||||
simple command line tool to create and restore backups.
|
||||
|
||||
@ -1 +1,2 @@
|
||||
debian/proxmox-backup-client.bc proxmox-backup-client
|
||||
debian/pxar.bc pxar
|
||||
|
||||
2
debian/proxmox-backup-client-static.install
vendored
2
debian/proxmox-backup-client-static.install
vendored
@ -1,2 +1,4 @@
|
||||
usr/share/man/man1/proxmox-backup-client.1
|
||||
usr/share/man/man1/pxar.1
|
||||
usr/share/zsh/vendor-completions/_proxmox-backup-client
|
||||
usr/share/zsh/vendor-completions/_pxar
|
||||
|
||||
2
debian/proxmox-backup-server.install
vendored
2
debian/proxmox-backup-server.install
vendored
@ -34,13 +34,13 @@ usr/share/man/man5/media-pool.cfg.5
|
||||
usr/share/man/man5/notifications-priv.cfg.5
|
||||
usr/share/man/man5/notifications.cfg.5
|
||||
usr/share/man/man5/proxmox-backup.node.cfg.5
|
||||
usr/share/man/man5/prune.cfg.5
|
||||
usr/share/man/man5/remote.cfg.5
|
||||
usr/share/man/man5/sync.cfg.5
|
||||
usr/share/man/man5/tape-job.cfg.5
|
||||
usr/share/man/man5/tape.cfg.5
|
||||
usr/share/man/man5/user.cfg.5
|
||||
usr/share/man/man5/verification.cfg.5
|
||||
usr/share/man/man5/prune.cfg.5
|
||||
usr/share/proxmox-backup/templates/default/acme-err-body.txt.hbs
|
||||
usr/share/proxmox-backup/templates/default/acme-err-subject.txt.hbs
|
||||
usr/share/proxmox-backup/templates/default/gc-err-body.txt.hbs
|
||||
|
||||
1
debian/rules
vendored
1
debian/rules
vendored
@ -49,6 +49,7 @@ override_dh_auto_install:
|
||||
LIBDIR=/usr/lib/$(DEB_HOST_MULTIARCH)
|
||||
mkdir -p debian/proxmox-backup-client-static/usr/bin
|
||||
mv debian/tmp/usr/bin/proxmox-backup-client-static debian/proxmox-backup-client-static/usr/bin/proxmox-backup-client
|
||||
mv debian/tmp/usr/bin/pxar-static debian/proxmox-backup-client-static/usr/bin/pxar
|
||||
|
||||
override_dh_installsystemd:
|
||||
dh_installsystemd -pproxmox-backup-server proxmox-backup-daily-update.timer
|
||||
|
||||
@ -46,11 +46,21 @@ user\@pbs!token@host:store ``user@pbs!token`` host:8007 store
|
||||
[ff80::51]:1234:mydatastore ``root@pam`` [ff80::51]:1234 mydatastore
|
||||
================================ ================== ================== ===========
|
||||
|
||||
.. Note:: If you are using the statically linked binary of proxmox backup client
|
||||
name resolution will not be performed via the mechanisms provided by libc,
|
||||
but uses a resolver written purely in the Rust programming language.
|
||||
Therefore, features and modules provided by Name Service Switch cannot be
|
||||
used.
|
||||
.. _statically_linked_client:
|
||||
|
||||
Statically Linked Backup Client
|
||||
-------------------------------
|
||||
|
||||
A statically linked version of the Proxmox Backup client is available for Linux
|
||||
based systems where the regular client is not available. Please note that it is
|
||||
recommended to use the regular client when possible, as the statically linked
|
||||
client is not a full replacement. For example, name resolution will not be
|
||||
performed via the mechanisms provided by libc, but uses a resolver written
|
||||
purely in the Rust programming language. Therefore, features and modules
|
||||
provided by Name Service Switch cannot be used.
|
||||
|
||||
The statically linked client is available via the ``pbs-client`` repository as
|
||||
described in the :ref:`installation <install_pbc>` section.
|
||||
|
||||
.. _environment-variables:
|
||||
|
||||
|
||||
@ -1031,13 +1031,15 @@ impl DataStore {
|
||||
Ok(list)
|
||||
}
|
||||
|
||||
// Similar to open index, but ignore index files with blob or unknown archive type.
|
||||
// Further, do not fail if file vanished.
|
||||
fn open_index_reader(&self, absolute_path: &Path) -> Result<Option<Box<dyn IndexFile>>, Error> {
|
||||
// Similar to open index, but return with Ok(None) if index file vanished.
|
||||
fn open_index_reader(
|
||||
&self,
|
||||
absolute_path: &Path,
|
||||
) -> Result<Option<Box<dyn IndexFile>>, Error> {
|
||||
let archive_type = match ArchiveType::from_path(absolute_path) {
|
||||
Ok(archive_type) => archive_type,
|
||||
// ignore archives with unknown archive type
|
||||
Err(_) => return Ok(None),
|
||||
Ok(ArchiveType::Blob) | Err(_) => bail!("unexpected archive type"),
|
||||
Ok(archive_type) => archive_type,
|
||||
};
|
||||
|
||||
if absolute_path.is_relative() {
|
||||
@ -1064,7 +1066,7 @@ impl DataStore {
|
||||
.with_context(|| format!("can't open dynamic index '{absolute_path:?}'"))?;
|
||||
Ok(Some(Box::new(reader)))
|
||||
}
|
||||
ArchiveType::Blob => Ok(None),
|
||||
ArchiveType::Blob => bail!("unexpected archive type blob"),
|
||||
}
|
||||
}
|
||||
|
||||
@ -1129,7 +1131,7 @@ impl DataStore {
|
||||
// the detected index files not following the iterators logic.
|
||||
|
||||
let mut unprocessed_index_list = self.list_index_files()?;
|
||||
let index_count = unprocessed_index_list.len();
|
||||
let mut index_count = unprocessed_index_list.len();
|
||||
|
||||
let mut chunk_lru_cache = LruCache::new(cache_capacity);
|
||||
let mut processed_index_files = 0;
|
||||
@ -1143,57 +1145,108 @@ impl DataStore {
|
||||
let namespace = namespace.context("iterating namespaces failed")?;
|
||||
for group in arc_self.iter_backup_groups(namespace)? {
|
||||
let group = group.context("iterating backup groups failed")?;
|
||||
let mut snapshots = group.list_backups().context("listing snapshots failed")?;
|
||||
// Sort by snapshot timestamp to iterate over consecutive snapshots for each image.
|
||||
BackupInfo::sort_list(&mut snapshots, true);
|
||||
for snapshot in snapshots {
|
||||
for file in snapshot.files {
|
||||
worker.check_abort()?;
|
||||
worker.fail_on_shutdown()?;
|
||||
|
||||
let mut path = snapshot.backup_dir.full_path();
|
||||
path.push(file);
|
||||
// Avoid race between listing/marking of snapshots by GC and pruning the last
|
||||
// snapshot in the group, following a new snapshot creation. Otherwise known chunks
|
||||
// might only be referenced by the new snapshot, so it must be read as well.
|
||||
let mut retry_counter = 0;
|
||||
'retry: loop {
|
||||
let _lock = match retry_counter {
|
||||
0..=9 => None,
|
||||
10 => Some(
|
||||
group
|
||||
.lock()
|
||||
.context("exhausted retries and failed to lock group")?,
|
||||
),
|
||||
_ => bail!("exhausted retries and unexpected counter overrun"),
|
||||
};
|
||||
|
||||
let index = match self.open_index_reader(&path)? {
|
||||
Some(index) => index,
|
||||
None => continue,
|
||||
};
|
||||
self.index_mark_used_chunks(
|
||||
index,
|
||||
&path,
|
||||
&mut chunk_lru_cache,
|
||||
status,
|
||||
worker,
|
||||
)?;
|
||||
|
||||
unprocessed_index_list.remove(&path);
|
||||
|
||||
let percentage = (processed_index_files + 1) * 100 / index_count;
|
||||
if percentage > last_percentage {
|
||||
info!(
|
||||
"marked {percentage}% ({} of {index_count} index files)",
|
||||
processed_index_files + 1,
|
||||
);
|
||||
last_percentage = percentage;
|
||||
let mut snapshots = match group.list_backups() {
|
||||
Ok(snapshots) => snapshots,
|
||||
Err(err) => {
|
||||
if group.exists() {
|
||||
return Err(err).context("listing snapshots failed")?;
|
||||
}
|
||||
break 'retry;
|
||||
}
|
||||
};
|
||||
|
||||
// Always start iteration with the last snapshot of the group to reduce race
|
||||
// window with concurrent backup+prune previous last snapshot. Allows to retry
|
||||
// without the need to keep track of already processed index files for the
|
||||
// current group.
|
||||
BackupInfo::sort_list(&mut snapshots, true);
|
||||
for (count, snapshot) in snapshots.into_iter().rev().enumerate() {
|
||||
for file in snapshot.files {
|
||||
worker.check_abort()?;
|
||||
worker.fail_on_shutdown()?;
|
||||
|
||||
match ArchiveType::from_path(&file) {
|
||||
Ok(ArchiveType::FixedIndex) | Ok(ArchiveType::DynamicIndex) => (),
|
||||
Ok(ArchiveType::Blob) | Err(_) => continue,
|
||||
}
|
||||
|
||||
let mut path = snapshot.backup_dir.full_path();
|
||||
path.push(file);
|
||||
|
||||
let index = match self.open_index_reader(&path)? {
|
||||
Some(index) => index,
|
||||
None => {
|
||||
unprocessed_index_list.remove(&path);
|
||||
if count == 0 {
|
||||
retry_counter += 1;
|
||||
continue 'retry;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
self.index_mark_used_chunks(
|
||||
index,
|
||||
&path,
|
||||
&mut chunk_lru_cache,
|
||||
status,
|
||||
worker,
|
||||
)?;
|
||||
|
||||
if !unprocessed_index_list.remove(&path) {
|
||||
info!("Encountered new index file '{path:?}', increment total index file count");
|
||||
index_count += 1;
|
||||
}
|
||||
|
||||
let percentage = (processed_index_files + 1) * 100 / index_count;
|
||||
if percentage > last_percentage {
|
||||
info!(
|
||||
"marked {percentage}% ({} of {index_count} index files)",
|
||||
processed_index_files + 1,
|
||||
);
|
||||
last_percentage = percentage;
|
||||
}
|
||||
processed_index_files += 1;
|
||||
}
|
||||
processed_index_files += 1;
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let strange_paths_count = unprocessed_index_list.len();
|
||||
if strange_paths_count > 0 {
|
||||
warn!("found {strange_paths_count} index files outside of expected directory scheme");
|
||||
}
|
||||
let mut strange_paths_count = unprocessed_index_list.len();
|
||||
for path in unprocessed_index_list {
|
||||
let index = match self.open_index_reader(&path)? {
|
||||
Some(index) => index,
|
||||
None => continue,
|
||||
None => {
|
||||
// do not count vanished (pruned) backup snapshots as strange paths.
|
||||
strange_paths_count -= 1;
|
||||
continue;
|
||||
}
|
||||
};
|
||||
self.index_mark_used_chunks(index, &path, &mut chunk_lru_cache, status, worker)?;
|
||||
warn!("Marked chunks for unexpected index file at '{path:?}'");
|
||||
}
|
||||
if strange_paths_count > 0 {
|
||||
warn!("Found {strange_paths_count} index files outside of expected directory scheme");
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@ -659,7 +659,8 @@ impl SgTape {
|
||||
pub fn wait_until_ready(&mut self, timeout: Option<u64>) -> Result<(), Error> {
|
||||
let start = SystemTime::now();
|
||||
let timeout = timeout.unwrap_or(Self::SCSI_TAPE_DEFAULT_TIMEOUT as u64);
|
||||
let max_wait = std::time::Duration::new(timeout, 0);
|
||||
let mut max_wait = std::time::Duration::new(timeout, 0);
|
||||
let mut increased_timeout = false;
|
||||
|
||||
loop {
|
||||
match self.test_unit_ready() {
|
||||
@ -667,6 +668,16 @@ impl SgTape {
|
||||
_ => {
|
||||
std::thread::sleep(std::time::Duration::new(1, 0));
|
||||
if start.elapsed()? > max_wait {
|
||||
if !increased_timeout {
|
||||
if let Ok(DeviceActivity::Calibrating) =
|
||||
read_device_activity(&mut self.file)
|
||||
{
|
||||
log::info!("Detected drive calibration, increasing timeout to 2 hours 5 minutes");
|
||||
max_wait = std::time::Duration::new(2 * 60 * 60 + 5 * 60, 0);
|
||||
increased_timeout = true;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
bail!("wait_until_ready failed - got timeout");
|
||||
}
|
||||
}
|
||||
|
||||
@ -853,8 +853,8 @@ fn download_previous(
|
||||
};
|
||||
if let Some(index) = index {
|
||||
env.log(format!(
|
||||
"register chunks in '{}' from previous backup.",
|
||||
archive_name
|
||||
"register chunks in '{archive_name}' from previous backup '{}'.",
|
||||
last_backup.backup_dir.dir(),
|
||||
));
|
||||
|
||||
for pos in 0..index.index_count() {
|
||||
@ -865,7 +865,10 @@ fn download_previous(
|
||||
}
|
||||
}
|
||||
|
||||
env.log(format!("download '{}' from previous backup.", archive_name));
|
||||
env.log(format!(
|
||||
"download '{archive_name}' from previous backup '{}'.",
|
||||
last_backup.backup_dir.dir(),
|
||||
));
|
||||
crate::api2::helpers::create_download_response(path).await
|
||||
}
|
||||
.boxed()
|
||||
|
||||
@ -174,6 +174,11 @@ pub enum Translation {
|
||||
"description" : {
|
||||
optional: true,
|
||||
schema: MULTI_LINE_COMMENT_SCHEMA,
|
||||
},
|
||||
"consent-text" : {
|
||||
optional: true,
|
||||
type: String,
|
||||
max_length: 64 * 1024,
|
||||
}
|
||||
},
|
||||
)]
|
||||
|
||||
@ -480,7 +480,7 @@ impl SyncSource for LocalSource {
|
||||
) -> Result<Arc<dyn SyncSourceReader>, Error> {
|
||||
let dir = self.store.backup_dir(ns.clone(), dir.clone())?;
|
||||
let guard = dir
|
||||
.lock()
|
||||
.lock_shared()
|
||||
.with_context(|| format!("while reading snapshot '{dir:?}' for a sync job"))?;
|
||||
Ok(Arc::new(LocalSourceReader {
|
||||
_dir_lock: Arc::new(Mutex::new(guard)),
|
||||
|
||||
@ -59,6 +59,9 @@ Ext.define('PBS.NodeOptionView', {
|
||||
name: 'consent-text',
|
||||
text: gettext('Consent Text'),
|
||||
deleteEmpty: true,
|
||||
fieldOpts: {
|
||||
maxLength: 64 * 1024,
|
||||
},
|
||||
onlineHelp: 'consent_banner',
|
||||
},
|
||||
],
|
||||
|
||||
@ -193,7 +193,7 @@ Ext.define('PBS.config.TokenView', {
|
||||
handler: 'regenerateToken',
|
||||
dangerous: true,
|
||||
confirmMsg: rec => Ext.String.format(
|
||||
gettext("Regenerate the secret of the API token '{0}'? All current use-sites will loose access!"),
|
||||
gettext("Regenerate the secret of the API token '{0}'? All users of the previous token secret will lose access!"),
|
||||
rec.data.tokenid,
|
||||
),
|
||||
},
|
||||
|
||||
Loading…
Reference in New Issue
Block a user