mirror of
https://github.com/openzfs/zfs.git
synced 2025-10-01 11:17:22 +00:00
Compare commits
148 Commits
master
...
zfs-2.3.0-
Author | SHA1 | Date | |
---|---|---|---|
![]() |
0c88ae6187 | ||
![]() |
307fd0da1f | ||
![]() |
9f1c5e0b10 | ||
![]() |
5ba50c8135 | ||
![]() |
25565403aa | ||
![]() |
47b7dc976b | ||
![]() |
125731436d | ||
![]() |
4425a7bb85 | ||
![]() |
e47b033eae | ||
![]() |
cfec8f13a2 | ||
![]() |
997db7a7fc | ||
![]() |
e411081aa0 | ||
![]() |
a55b6fe94a | ||
![]() |
939e9f0b6a | ||
![]() |
679b164cd3 | ||
![]() |
c2d9494f99 | ||
![]() |
b952e061df | ||
![]() |
0fea7fc109 | ||
![]() |
0f6d955a35 | ||
![]() |
c3d2412b05 | ||
![]() |
74064cb175 | ||
![]() |
30b97ce218 | ||
![]() |
9519e7ebcc | ||
![]() |
f9b02fe7e3 | ||
![]() |
cb8da70329 | ||
![]() |
c944c46a98 | ||
![]() |
166a7bc602 | ||
![]() |
e90124a7c8 | ||
![]() |
18b3bea861 | ||
![]() |
d67eb17e27 | ||
![]() |
1862c1c0a8 | ||
![]() |
b57f53036d | ||
![]() |
4b8bf3c48a | ||
![]() |
696943533c | ||
![]() |
2284a61129 | ||
![]() |
e1833a72f9 | ||
![]() |
5bb034f533 | ||
![]() |
1e08e49a28 | ||
![]() |
6604fe9a06 | ||
![]() |
7cbe7bbbd4 | ||
![]() |
2dcc8fe035 | ||
![]() |
38875918d8 | ||
![]() |
0d51852ec7 | ||
![]() |
73a73cba71 | ||
![]() |
87947f2440 | ||
![]() |
0e87150b6c | ||
![]() |
7742e29387 | ||
![]() |
f54052a122 | ||
![]() |
d874f27776 | ||
![]() |
84d7d53e91 | ||
![]() |
d90042dedb | ||
![]() |
0e46085ee6 | ||
![]() |
3b0c1131ef | ||
![]() |
5988de77b0 | ||
![]() |
00debc1361 | ||
![]() |
af10714e42 | ||
![]() |
747781a345 | ||
![]() |
b17ea73f9d | ||
![]() |
17cdb7a2b1 | ||
![]() |
b673bcba4d | ||
![]() |
d8886275df | ||
![]() |
1aee375947 | ||
![]() |
a1907b038a | ||
![]() |
d359f7f547 | ||
![]() |
90603601b4 | ||
![]() |
ecd0b1528e | ||
![]() |
3ed1d608a8 | ||
![]() |
c165daa0b1 | ||
![]() |
1a5414ba2f | ||
![]() |
409aad3f33 | ||
![]() |
1917c26944 | ||
![]() |
2b64d41be8 | ||
![]() |
9753feaa63 | ||
![]() |
3c0b8da206 | ||
![]() |
d7abeef621 | ||
![]() |
7fb7eb9a63 | ||
![]() |
3f9af023f6 | ||
![]() |
f7675ae30f | ||
![]() |
920603990a | ||
![]() |
9a4b2f08d3 | ||
![]() |
8023d9d4b5 | ||
![]() |
23f063d2e6 | ||
![]() |
18474efeec | ||
![]() |
260065099e | ||
![]() |
4c9f2cec46 | ||
![]() |
ee3677d321 | ||
![]() |
0274a9a57d | ||
![]() |
025f8b2e74 | ||
![]() |
37e8f3ae17 | ||
![]() |
7313c6e382 | ||
![]() |
1c6b0302ef | ||
![]() |
7e3af4658b | ||
![]() |
1a54b13aaf | ||
![]() |
9061a4da0b | ||
![]() |
e12d76176d | ||
![]() |
8131793d6f | ||
![]() |
c82eb27b22 | ||
![]() |
661bb434e6 | ||
![]() |
ae48c2f6a9 | ||
![]() |
b96845b632 | ||
![]() |
55cbd1f9bd | ||
![]() |
880b73956b | ||
![]() |
d367ef2995 | ||
![]() |
7546fbd6e9 | ||
![]() |
903d3f9187 | ||
![]() |
86b5853cfb | ||
![]() |
19a8dd48e1 | ||
![]() |
8ac70aade7 | ||
![]() |
bbc0d34bfd | ||
![]() |
f3823a9ab2 | ||
![]() |
fd2cae969f | ||
![]() |
f7b4bca66a | ||
![]() |
7e3ce4efaa | ||
![]() |
77d81974b6 | ||
![]() |
5237760b17 | ||
![]() |
ede715d1e4 | ||
![]() |
e30c69365d | ||
![]() |
78d39d91fa | ||
![]() |
2b359c7824 | ||
![]() |
ace2e17a9b | ||
![]() |
b4cd10ce5b | ||
![]() |
f52d7aaaac | ||
![]() |
d5db840260 | ||
![]() |
bcd61d9579 | ||
![]() |
36a67b50a2 | ||
![]() |
3d9129a7b6 | ||
![]() |
0409c47fe0 | ||
![]() |
b5a3825244 | ||
![]() |
77df762a1b | ||
![]() |
56871e465a | ||
![]() |
c645b07eaa | ||
![]() |
5bc27acf51 | ||
![]() |
7f830d783b | ||
![]() |
58162960a1 | ||
![]() |
666903610d | ||
![]() |
26ecd8b993 | ||
![]() |
774dcba86d | ||
![]() |
09f6b2ebe3 | ||
![]() |
2609d93b65 | ||
![]() |
10f46d2aba | ||
![]() |
0df10dc911 | ||
![]() |
0fbe9d352c | ||
![]() |
84f44ec07f | ||
![]() |
fc9608e2e6 | ||
![]() |
d32c05949a | ||
![]() |
1ebb6b866f | ||
![]() |
f019b445f3 | ||
![]() |
03822a61be |
21
.cirrus.yml
Normal file
21
.cirrus.yml
Normal file
@ -0,0 +1,21 @@
|
||||
env:
|
||||
CIRRUS_CLONE_DEPTH: 1
|
||||
ARCH: amd64
|
||||
|
||||
build_task:
|
||||
matrix:
|
||||
freebsd_instance:
|
||||
image_family: freebsd-12-4
|
||||
freebsd_instance:
|
||||
image_family: freebsd-13-2
|
||||
freebsd_instance:
|
||||
image_family: freebsd-14-0-snap
|
||||
prepare_script:
|
||||
- pkg install -y autoconf automake libtool gettext-runtime gmake ksh93 py39-packaging py39-cffi py39-sysctl
|
||||
configure_script:
|
||||
- env MAKE=gmake ./autogen.sh
|
||||
- env MAKE=gmake ./configure --with-config="user" --with-python=3.9
|
||||
build_script:
|
||||
- gmake -j `sysctl -n kern.smp.cpus`
|
||||
install_script:
|
||||
- gmake install
|
6
.github/PULL_REQUEST_TEMPLATE.md
vendored
6
.github/PULL_REQUEST_TEMPLATE.md
vendored
@ -2,6 +2,11 @@
|
||||
|
||||
<!--- Provide a general summary of your changes in the Title above -->
|
||||
|
||||
<!---
|
||||
Documentation on ZFS Buildbot options can be found at
|
||||
https://openzfs.github.io/openzfs-docs/Developer%20Resources/Buildbot%20Options.html
|
||||
-->
|
||||
|
||||
### Motivation and Context
|
||||
<!--- Why is this change required? What problem does it solve? -->
|
||||
<!--- If it fixes an open issue, please link to the issue here. -->
|
||||
@ -22,7 +27,6 @@
|
||||
- [ ] New feature (non-breaking change which adds functionality)
|
||||
- [ ] Performance enhancement (non-breaking change which improves efficiency)
|
||||
- [ ] Code cleanup (non-breaking change which makes code smaller or more readable)
|
||||
- [ ] Quality assurance (non-breaking change which makes the code more robust against bugs)
|
||||
- [ ] Breaking change (fix or feature that would cause existing functionality to change)
|
||||
- [ ] Library ABI change (libzfs, libzfs\_core, libnvpair, libuutil and libzfsbootenv)
|
||||
- [ ] Documentation (a change to man pages or other documentation)
|
||||
|
1
.github/codeql-cpp.yml
vendored
1
.github/codeql-cpp.yml
vendored
@ -2,4 +2,3 @@ name: "Custom CodeQL Analysis"
|
||||
|
||||
queries:
|
||||
- uses: ./.github/codeql/custom-queries/cpp/deprecatedFunctionUsage.ql
|
||||
- uses: ./.github/codeql/custom-queries/cpp/dslDatasetHoldReleMismatch.ql
|
||||
|
@ -1,34 +0,0 @@
|
||||
/**
|
||||
* @name Detect mismatched dsl_dataset_hold/_rele pairs
|
||||
* @description Flags instances of issue #12014 where
|
||||
* - a dataset held with dsl_dataset_hold_obj() ends up in dsl_dataset_rele_flags(), or
|
||||
* - a dataset held with dsl_dataset_hold_obj_flags() ends up in dsl_dataset_rele().
|
||||
* @kind problem
|
||||
* @severity error
|
||||
* @tags correctness
|
||||
* @id cpp/dslDatasetHoldReleMismatch
|
||||
*/
|
||||
|
||||
import cpp
|
||||
|
||||
from Variable ds, Call holdCall, Call releCall, string message
|
||||
where
|
||||
ds.getType().toString() = "dsl_dataset_t *" and
|
||||
holdCall.getASuccessor*() = releCall and
|
||||
(
|
||||
(holdCall.getTarget().getName() = "dsl_dataset_hold_obj_flags" and
|
||||
holdCall.getArgument(4).(AddressOfExpr).getOperand().(VariableAccess).getTarget() = ds and
|
||||
releCall.getTarget().getName() = "dsl_dataset_rele" and
|
||||
releCall.getArgument(0).(VariableAccess).getTarget() = ds and
|
||||
message = "Held with dsl_dataset_hold_obj_flags but released with dsl_dataset_rele")
|
||||
or
|
||||
(holdCall.getTarget().getName() = "dsl_dataset_hold_obj" and
|
||||
holdCall.getArgument(3).(AddressOfExpr).getOperand().(VariableAccess).getTarget() = ds and
|
||||
releCall.getTarget().getName() = "dsl_dataset_rele_flags" and
|
||||
releCall.getArgument(0).(VariableAccess).getTarget() = ds and
|
||||
message = "Held with dsl_dataset_hold_obj but released with dsl_dataset_rele_flags")
|
||||
)
|
||||
select releCall,
|
||||
"Mismatched release: held with $@ but released with " + releCall.getTarget().getName() + " for dataset $@",
|
||||
holdCall, holdCall.getTarget().getName(),
|
||||
ds, ds.toString()
|
2
.github/workflows/checkstyle.yaml
vendored
2
.github/workflows/checkstyle.yaml
vendored
@ -19,7 +19,7 @@ jobs:
|
||||
run: |
|
||||
# for x in lxd core20 snapd; do sudo snap remove $x; done
|
||||
sudo apt-get purge -y snapd google-chrome-stable firefox
|
||||
ONLY_DEPS=1 .github/workflows/scripts/qemu-3-deps-vm.sh ubuntu22
|
||||
ONLY_DEPS=1 .github/workflows/scripts/qemu-3-deps.sh ubuntu22
|
||||
sudo apt-get install -y cppcheck devscripts mandoc pax-utils shellcheck
|
||||
sudo python -m pipx install --quiet flake8
|
||||
# confirm that the tools are installed
|
||||
|
49
.github/workflows/labels.yml
vendored
49
.github/workflows/labels.yml
vendored
@ -1,49 +0,0 @@
|
||||
name: labels
|
||||
|
||||
on:
|
||||
pull_request_target:
|
||||
types: [ opened, synchronize, reopened, converted_to_draft, ready_for_review ]
|
||||
|
||||
permissions:
|
||||
pull-requests: write
|
||||
|
||||
jobs:
|
||||
open:
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ github.event.action == 'opened' && github.event.pull_request.draft }}
|
||||
steps:
|
||||
- env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
ISSUE: ${{ github.event.pull_request.html_url }}
|
||||
run: |
|
||||
gh pr edit $ISSUE --add-label "Status: Work in Progress"
|
||||
|
||||
push:
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ github.event.action == 'synchronize' || github.event.action == 'reopened' }}
|
||||
steps:
|
||||
- env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
ISSUE: ${{ github.event.pull_request.html_url }}
|
||||
run: |
|
||||
gh pr edit $ISSUE --remove-label "Status: Accepted,Status: Inactive,Status: Revision Needed,Status: Stale"
|
||||
|
||||
draft:
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ github.event.action == 'converted_to_draft' }}
|
||||
steps:
|
||||
- env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
ISSUE: ${{ github.event.pull_request.html_url }}
|
||||
run: |
|
||||
gh pr edit $ISSUE --remove-label "Status: Accepted,Status: Code Review Needed,Status: Inactive,Status: Revision Needed,Status: Stale" --add-label "Status: Work in Progress"
|
||||
|
||||
rfr:
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ github.event.action == 'ready_for_review' }}
|
||||
steps:
|
||||
- env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
ISSUE: ${{ github.event.pull_request.html_url }}
|
||||
run: |
|
||||
gh pr edit $ISSUE --remove-label "Status: Accepted,Status: Inactive,Status: Revision Needed,Status: Stale,Status: Work in Progress" --add-label "Status: Code Review Needed"
|
@ -29,7 +29,6 @@ FULL_RUN_IGNORE_REGEX = list(map(re.compile, [
|
||||
Patterns of files that are considered to trigger full CI.
|
||||
"""
|
||||
FULL_RUN_REGEX = list(map(re.compile, [
|
||||
r'\.github/workflows/scripts/.*',
|
||||
r'cmd.*',
|
||||
r'configs/.*',
|
||||
r'META',
|
||||
@ -65,7 +64,7 @@ if __name__ == '__main__':
|
||||
|
||||
# check last (HEAD) commit message
|
||||
last_commit_message_raw = subprocess.run([
|
||||
'git', 'show', '-s', '--format=%B', head
|
||||
'git', 'show', '-s', '--format=%B', 'HEAD'
|
||||
], check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
|
||||
for line in last_commit_message_raw.stdout.decode().splitlines():
|
||||
|
54
.github/workflows/scripts/qemu-1-setup.sh
vendored
54
.github/workflows/scripts/qemu-1-setup.sh
vendored
@ -6,23 +6,40 @@
|
||||
|
||||
set -eu
|
||||
|
||||
# We've been seeing this script take over 15min to run. This may or
|
||||
# may not be normal. Just to get a little more insight, print out
|
||||
# a message to stdout with the top running process, and do this every
|
||||
# 30 seconds. We can delete this watchdog later once we get a better
|
||||
# handle on what the timeout value should be.
|
||||
(while [ 1 ] ; do sleep 30 && echo "[watchdog: $(ps -eo cmd --sort=-pcpu | head -n 2 | tail -n 1)}')]"; done) &
|
||||
|
||||
# install needed packages
|
||||
export DEBIAN_FRONTEND="noninteractive"
|
||||
sudo apt-get -y update
|
||||
sudo apt-get install -y axel cloud-image-utils daemonize guestfs-tools \
|
||||
virt-manager linux-modules-extra-$(uname -r) zfsutils-linux
|
||||
ksmtuned virt-manager linux-modules-extra-$(uname -r) zfsutils-linux
|
||||
|
||||
# generate ssh keys
|
||||
rm -f ~/.ssh/id_ed25519
|
||||
ssh-keygen -t ed25519 -f ~/.ssh/id_ed25519 -q -N ""
|
||||
|
||||
# we expect RAM shortage
|
||||
cat << EOF | sudo tee /etc/ksmtuned.conf > /dev/null
|
||||
# /etc/ksmtuned.conf - Configuration file for ksmtuned
|
||||
# https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/7/html/virtualization_tuning_and_optimization_guide/chap-ksm
|
||||
KSM_MONITOR_INTERVAL=60
|
||||
|
||||
# Millisecond sleep between ksm scans for 16Gb server.
|
||||
# Smaller servers sleep more, bigger sleep less.
|
||||
KSM_SLEEP_MSEC=30
|
||||
|
||||
KSM_NPAGES_BOOST=0
|
||||
KSM_NPAGES_DECAY=0
|
||||
KSM_NPAGES_MIN=1000
|
||||
KSM_NPAGES_MAX=25000
|
||||
|
||||
KSM_THRES_COEF=80
|
||||
KSM_THRES_CONST=8192
|
||||
|
||||
LOGFILE=/var/log/ksmtuned.log
|
||||
DEBUG=1
|
||||
EOF
|
||||
sudo systemctl restart ksm
|
||||
sudo systemctl restart ksmtuned
|
||||
|
||||
# not needed
|
||||
sudo systemctl stop docker.socket
|
||||
sudo systemctl stop multipathd.socket
|
||||
@ -48,14 +65,16 @@ $DISK
|
||||
sync
|
||||
sleep 1
|
||||
|
||||
# swap with same size as RAM (16GiB)
|
||||
# swap with same size as RAM
|
||||
sudo mkswap $DISK-part1
|
||||
sudo swapon $DISK-part1
|
||||
|
||||
# JBOD 2xdisk for OpenZFS storage (test vm's)
|
||||
# 60GB data disk
|
||||
SSD1="$DISK-part2"
|
||||
sudo fallocate -l 12G /test.ssd2
|
||||
SSD2=$(sudo losetup -b 4096 -f /test.ssd2 --show)
|
||||
|
||||
# 10GB data disk on ext4
|
||||
sudo fallocate -l 10G /test.ssd1
|
||||
SSD2=$(sudo losetup -b 4096 -f /test.ssd1 --show)
|
||||
|
||||
# adjust zfs module parameter and create pool
|
||||
exec 1>/dev/null
|
||||
@ -64,14 +83,11 @@ ARC_MAX=$((1024*1024*512))
|
||||
echo $ARC_MIN | sudo tee /sys/module/zfs/parameters/zfs_arc_min
|
||||
echo $ARC_MAX | sudo tee /sys/module/zfs/parameters/zfs_arc_max
|
||||
echo 1 | sudo tee /sys/module/zfs/parameters/zvol_use_blk_mq
|
||||
sudo zpool create -f -o ashift=12 zpool $SSD1 $SSD2 -O relatime=off \
|
||||
-O atime=off -O xattr=sa -O compression=lz4 -O sync=disabled \
|
||||
-O redundant_metadata=none -O mountpoint=/mnt/tests
|
||||
sudo zpool create -f -o ashift=12 zpool $SSD1 $SSD2 \
|
||||
-O relatime=off -O atime=off -O xattr=sa -O compression=lz4 \
|
||||
-O mountpoint=/mnt/tests
|
||||
|
||||
# no need for some scheduler
|
||||
for i in /sys/block/s*/queue/scheduler; do
|
||||
echo "none" | sudo tee $i
|
||||
echo "none" | sudo tee $i > /dev/null
|
||||
done
|
||||
|
||||
# Kill off our watchdog
|
||||
kill $(jobs -p)
|
||||
|
199
.github/workflows/scripts/qemu-2-start.sh
vendored
199
.github/workflows/scripts/qemu-2-start.sh
vendored
@ -12,23 +12,19 @@ OS="$1"
|
||||
# OS variant (virt-install --os-variant list)
|
||||
OSv=$OS
|
||||
|
||||
# FreeBSD urls's
|
||||
FREEBSD_REL="https://download.freebsd.org/releases/CI-IMAGES"
|
||||
FREEBSD_SNAP="https://download.freebsd.org/snapshots/CI-IMAGES"
|
||||
URLxz=""
|
||||
# compressed with .zst extension
|
||||
REPO="https://github.com/mcmilk/openzfs-freebsd-images"
|
||||
FREEBSD="$REPO/releases/download/v2024-12-14"
|
||||
URLzs=""
|
||||
|
||||
# Ubuntu mirrors
|
||||
UBMIRROR="https://cloud-images.ubuntu.com"
|
||||
#UBMIRROR="https://cloud-images.ubuntu.com"
|
||||
#UBMIRROR="https://mirrors.cloud.tencent.com/ubuntu-cloud-images"
|
||||
#UBMIRROR="https://mirror.citrahost.com/ubuntu-cloud-images"
|
||||
UBMIRROR="https://mirror.citrahost.com/ubuntu-cloud-images"
|
||||
|
||||
# default nic model for vm's
|
||||
NIC="virtio"
|
||||
|
||||
# additional options for virt-install
|
||||
OPTS[0]=""
|
||||
OPTS[1]=""
|
||||
|
||||
case "$OS" in
|
||||
almalinux8)
|
||||
OSNAME="AlmaLinux 8"
|
||||
@ -38,14 +34,11 @@ case "$OS" in
|
||||
OSNAME="AlmaLinux 9"
|
||||
URL="https://repo.almalinux.org/almalinux/9/cloud/x86_64/images/AlmaLinux-9-GenericCloud-latest.x86_64.qcow2"
|
||||
;;
|
||||
almalinux10)
|
||||
OSNAME="AlmaLinux 10"
|
||||
OSv="almalinux9"
|
||||
URL="https://repo.almalinux.org/almalinux/10/cloud/x86_64/images/AlmaLinux-10-GenericCloud-latest.x86_64.qcow2"
|
||||
;;
|
||||
archlinux)
|
||||
OSNAME="Archlinux"
|
||||
URL="https://geo.mirror.pkgbuild.com/images/latest/Arch-Linux-x86_64-cloudimg.qcow2"
|
||||
# dns sometimes fails with that url :/
|
||||
echo "89.187.191.12 geo.mirror.pkgbuild.com" | sudo tee /etc/hosts > /dev/null
|
||||
;;
|
||||
centos-stream10)
|
||||
OSNAME="CentOS Stream 10"
|
||||
@ -65,67 +58,60 @@ case "$OS" in
|
||||
OSNAME="Debian 12"
|
||||
URL="https://cloud.debian.org/images/cloud/bookworm/latest/debian-12-generic-amd64.qcow2"
|
||||
;;
|
||||
debian13)
|
||||
OSNAME="Debian 13"
|
||||
# TODO: Overwrite OSv to debian13 for virt-install until it's added to osinfo
|
||||
OSv="debian12"
|
||||
URL="https://cloud.debian.org/images/cloud/trixie/latest/debian-13-generic-amd64.qcow2"
|
||||
OPTS[0]="--boot"
|
||||
OPTS[1]="uefi=on"
|
||||
fedora40)
|
||||
OSNAME="Fedora 40"
|
||||
OSv="fedora-unknown"
|
||||
URL="https://download.fedoraproject.org/pub/fedora/linux/releases/40/Cloud/x86_64/images/Fedora-Cloud-Base-Generic.x86_64-40-1.14.qcow2"
|
||||
;;
|
||||
fedora41)
|
||||
OSNAME="Fedora 41"
|
||||
OSv="fedora-unknown"
|
||||
URL="https://download.fedoraproject.org/pub/fedora/linux/releases/41/Cloud/x86_64/images/Fedora-Cloud-Base-Generic-41-1.4.x86_64.qcow2"
|
||||
;;
|
||||
fedora42)
|
||||
OSNAME="Fedora 42"
|
||||
OSv="fedora-unknown"
|
||||
URL="https://download.fedoraproject.org/pub/fedora/linux/releases/42/Cloud/x86_64/images/Fedora-Cloud-Base-Generic-42-1.1.x86_64.qcow2"
|
||||
;;
|
||||
freebsd13-5r)
|
||||
FreeBSD="13.5-RELEASE"
|
||||
OSNAME="FreeBSD $FreeBSD"
|
||||
freebsd13-3r)
|
||||
OSNAME="FreeBSD 13.3-RELEASE"
|
||||
OSv="freebsd13.0"
|
||||
URLxz="$FREEBSD_REL/$FreeBSD/amd64/Latest/FreeBSD-$FreeBSD-amd64-BASIC-CI.raw.xz"
|
||||
KSRC="$FREEBSD_REL/../amd64/$FreeBSD/src.txz"
|
||||
URLzs="$FREEBSD/amd64-freebsd-13.3-RELEASE.qcow2.zst"
|
||||
BASH="/usr/local/bin/bash"
|
||||
NIC="rtl8139"
|
||||
;;
|
||||
freebsd13-4r)
|
||||
OSNAME="FreeBSD 13.4-RELEASE"
|
||||
OSv="freebsd13.0"
|
||||
URLzs="$FREEBSD/amd64-freebsd-13.4-RELEASE.qcow2.zst"
|
||||
BASH="/usr/local/bin/bash"
|
||||
NIC="rtl8139"
|
||||
;;
|
||||
freebsd14-1r)
|
||||
OSNAME="FreeBSD 14.1-RELEASE"
|
||||
OSv="freebsd14.0"
|
||||
URLzs="$FREEBSD/amd64-freebsd-14.1-RELEASE.qcow2.zst"
|
||||
BASH="/usr/local/bin/bash"
|
||||
;;
|
||||
freebsd14-2r)
|
||||
FreeBSD="14.2-RELEASE"
|
||||
OSNAME="FreeBSD $FreeBSD"
|
||||
OSNAME="FreeBSD 14.2-RELEASE"
|
||||
OSv="freebsd14.0"
|
||||
KSRC="$FREEBSD_REL/../amd64/$FreeBSD/src.txz"
|
||||
URLxz="$FREEBSD_REL/$FreeBSD/amd64/Latest/FreeBSD-$FreeBSD-amd64-BASIC-CI.raw.xz"
|
||||
URLzs="$FREEBSD/amd64-freebsd-14.2-RELEASE.qcow2.zst"
|
||||
BASH="/usr/local/bin/bash"
|
||||
;;
|
||||
freebsd14-3r)
|
||||
FreeBSD="14.3-RELEASE"
|
||||
OSNAME="FreeBSD $FreeBSD"
|
||||
OSv="freebsd14.0"
|
||||
URLxz="$FREEBSD_REL/$FreeBSD/amd64/Latest/FreeBSD-$FreeBSD-amd64-BASIC-CI.raw.xz"
|
||||
KSRC="$FREEBSD_REL/../amd64/$FreeBSD/src.txz"
|
||||
;;
|
||||
freebsd13-5s)
|
||||
FreeBSD="13.5-STABLE"
|
||||
OSNAME="FreeBSD $FreeBSD"
|
||||
freebsd13-4s)
|
||||
OSNAME="FreeBSD 13.4-STABLE"
|
||||
OSv="freebsd13.0"
|
||||
URLxz="$FREEBSD_SNAP/$FreeBSD/amd64/Latest/FreeBSD-$FreeBSD-amd64-BASIC-CI.raw.xz"
|
||||
KSRC="$FREEBSD_SNAP/../amd64/$FreeBSD/src.txz"
|
||||
URLzs="$FREEBSD/amd64-freebsd-13.4-STABLE.qcow2.zst"
|
||||
BASH="/usr/local/bin/bash"
|
||||
NIC="rtl8139"
|
||||
;;
|
||||
freebsd14-3s)
|
||||
FreeBSD="14.3-STABLE"
|
||||
OSNAME="FreeBSD $FreeBSD"
|
||||
freebsd14-2s)
|
||||
OSNAME="FreeBSD 14.2-STABLE"
|
||||
OSv="freebsd14.0"
|
||||
URLxz="$FREEBSD_SNAP/$FreeBSD/amd64/Latest/FreeBSD-$FreeBSD-amd64-BASIC-CI-ufs.raw.xz"
|
||||
KSRC="$FREEBSD_SNAP/../amd64/$FreeBSD/src.txz"
|
||||
URLzs="$FREEBSD/amd64-freebsd-14.2-STABLE.qcow2.zst"
|
||||
BASH="/usr/local/bin/bash"
|
||||
;;
|
||||
freebsd15-0c)
|
||||
FreeBSD="15.0-ALPHA3"
|
||||
OSNAME="FreeBSD $FreeBSD"
|
||||
OSNAME="FreeBSD 15.0-CURRENT"
|
||||
OSv="freebsd14.0"
|
||||
URLxz="$FREEBSD_SNAP/$FreeBSD/amd64/Latest/FreeBSD-$FreeBSD-amd64-BASIC-CI-ufs.raw.xz"
|
||||
KSRC="$FREEBSD_SNAP/../amd64/$FreeBSD/src.txz"
|
||||
URLzs="$FREEBSD/amd64-freebsd-15.0-CURRENT.qcow2.zst"
|
||||
BASH="/usr/local/bin/bash"
|
||||
;;
|
||||
tumbleweed)
|
||||
OSNAME="openSUSE Tumbleweed"
|
||||
@ -133,6 +119,11 @@ case "$OS" in
|
||||
MIRROR="http://opensuse-mirror-gce-us.susecloud.net"
|
||||
URL="$MIRROR/tumbleweed/appliances/openSUSE-MicroOS.x86_64-OpenStack-Cloud.qcow2"
|
||||
;;
|
||||
ubuntu20)
|
||||
OSNAME="Ubuntu 20.04"
|
||||
OSv="ubuntu20.04"
|
||||
URL="$UBMIRROR/focal/current/focal-server-cloudimg-amd64.img"
|
||||
;;
|
||||
ubuntu22)
|
||||
OSNAME="Ubuntu 22.04"
|
||||
OSv="ubuntu22.04"
|
||||
@ -156,7 +147,7 @@ echo "ENV=$ENV" >> $ENV
|
||||
# result path
|
||||
echo 'RESPATH="/var/tmp/test_results"' >> $ENV
|
||||
|
||||
# FreeBSD 13 has problems with: e1000 and virtio
|
||||
# FreeBSD 13 has problems with: e1000+virtio
|
||||
echo "NIC=$NIC" >> $ENV
|
||||
|
||||
# freebsd15 -> used in zfs-qemu.yml
|
||||
@ -168,48 +159,34 @@ echo "OSv=\"$OSv\"" >> $ENV
|
||||
# FreeBSD 15 (Current) -> used for summary
|
||||
echo "OSNAME=\"$OSNAME\"" >> $ENV
|
||||
|
||||
# default vm count for testings
|
||||
VMs=2
|
||||
echo "VMs=\"$VMs\"" >> $ENV
|
||||
|
||||
# default cpu count for testing vm's
|
||||
CPU=2
|
||||
echo "CPU=\"$CPU\"" >> $ENV
|
||||
|
||||
sudo mkdir -p "/mnt/tests"
|
||||
sudo chown -R $(whoami) /mnt/tests
|
||||
|
||||
DISK="/dev/zvol/zpool/openzfs"
|
||||
sudo zfs create -ps -b 64k -V 80g zpool/openzfs
|
||||
while true; do test -b $DISK && break; sleep 1; done
|
||||
|
||||
# we are downloading via axel, curl and wget are mostly slower and
|
||||
# require more return value checking
|
||||
IMG="/mnt/tests/cloud-image"
|
||||
if [ ! -z "$URLxz" ]; then
|
||||
echo "Loading $URLxz ..."
|
||||
time axel -q -o "$IMG" "$URLxz"
|
||||
echo "Loading $KSRC ..."
|
||||
time axel -q -o ~/src.txz $KSRC
|
||||
IMG="/mnt/tests/cloudimg.qcow2"
|
||||
if [ ! -z "$URLzs" ]; then
|
||||
echo "Loading image $URLzs ..."
|
||||
time axel -q -o "$IMG.zst" "$URLzs"
|
||||
zstd -q -d --rm "$IMG.zst"
|
||||
else
|
||||
echo "Loading $URL ..."
|
||||
echo "Loading image $URL ..."
|
||||
time axel -q -o "$IMG" "$URL"
|
||||
fi
|
||||
|
||||
DISK="/dev/zvol/zpool/openzfs"
|
||||
FORMAT="raw"
|
||||
sudo zfs create -ps -b 64k -V 80g zpool/openzfs
|
||||
while true; do test -b $DISK && break; sleep 1; done
|
||||
echo "Importing VM image to zvol..."
|
||||
if [ ! -z "$URLxz" ]; then
|
||||
xzcat -T0 $IMG | sudo dd of=$DISK bs=4M
|
||||
else
|
||||
sudo qemu-img dd -f qcow2 -O raw if=$IMG of=$DISK bs=4M
|
||||
fi
|
||||
sudo qemu-img dd -f qcow2 -O raw if=$IMG of=$DISK bs=4M
|
||||
rm -f $IMG
|
||||
|
||||
PUBKEY=$(cat ~/.ssh/id_ed25519.pub)
|
||||
if [ ${OS:0:7} != "freebsd" ]; then
|
||||
cat <<EOF > /tmp/user-data
|
||||
cat <<EOF > /tmp/user-data
|
||||
#cloud-config
|
||||
|
||||
hostname: $OS
|
||||
fqdn: $OS
|
||||
|
||||
users:
|
||||
- name: root
|
||||
@ -225,19 +202,6 @@ growpart:
|
||||
devices: ['/']
|
||||
ignore_growroot_disabled: false
|
||||
EOF
|
||||
else
|
||||
cat <<EOF > /tmp/user-data
|
||||
#cloud-config
|
||||
|
||||
hostname: $OS
|
||||
|
||||
# minimized config without sudo for nuageinit of FreeBSD
|
||||
growpart:
|
||||
mode: auto
|
||||
devices: ['/']
|
||||
ignore_growroot_disabled: false
|
||||
EOF
|
||||
fi
|
||||
|
||||
sudo virsh net-update default add ip-dhcp-host \
|
||||
"<host mac='52:54:00:83:79:00' ip='192.168.122.10'/>" --live --config
|
||||
@ -253,17 +217,8 @@ sudo virt-install \
|
||||
--graphics none \
|
||||
--network bridge=virbr0,model=$NIC,mac='52:54:00:83:79:00' \
|
||||
--cloud-init user-data=/tmp/user-data \
|
||||
--disk $DISK,bus=virtio,cache=none,format=raw,driver.discard=unmap \
|
||||
--import --noautoconsole ${OPTS[0]} ${OPTS[1]} >/dev/null
|
||||
|
||||
# Give the VMs hostnames so we don't have to refer to them with
|
||||
# hardcoded IP addresses.
|
||||
#
|
||||
# vm0: Initial VM we install dependencies and build ZFS on.
|
||||
# vm1..2 Testing VMs
|
||||
for ((i=0; i<=VMs; i++)); do
|
||||
echo "192.168.122.1$i vm$i" | sudo tee -a /etc/hosts
|
||||
done
|
||||
--disk $DISK,bus=virtio,cache=none,format=$FORMAT,driver.discard=unmap \
|
||||
--import --noautoconsole >/dev/null
|
||||
|
||||
# in case the directory isn't there already
|
||||
mkdir -p $HOME/.ssh
|
||||
@ -275,29 +230,3 @@ StrictHostKeyChecking no
|
||||
# small timeout, used in while loops later
|
||||
ConnectTimeout 1
|
||||
EOF
|
||||
|
||||
if [ ${OS:0:7} != "freebsd" ]; then
|
||||
# enable KSM on Linux
|
||||
sudo virsh dommemstat --domain "openzfs" --period 5
|
||||
sudo virsh node-memory-tune 100 50 1
|
||||
echo 1 | sudo tee /sys/kernel/mm/ksm/run > /dev/null
|
||||
else
|
||||
# on FreeBSD we need some more init stuff, because of nuageinit
|
||||
BASH="/usr/local/bin/bash"
|
||||
while pidof /usr/bin/qemu-system-x86_64 >/dev/null; do
|
||||
ssh 2>/dev/null root@vm0 "uname -a" && break
|
||||
done
|
||||
ssh root@vm0 "pkg install -y bash ca_root_nss git qemu-guest-agent python3 py311-cloud-init"
|
||||
ssh root@vm0 "chsh -s $BASH root"
|
||||
ssh root@vm0 'sysrc qemu_guest_agent_enable="YES"'
|
||||
ssh root@vm0 'sysrc cloudinit_enable="YES"'
|
||||
ssh root@vm0 "pw add user zfs -w no -s $BASH"
|
||||
ssh root@vm0 'mkdir -p ~zfs/.ssh'
|
||||
ssh root@vm0 'echo "zfs ALL=(ALL:ALL) NOPASSWD: ALL" >> /usr/local/etc/sudoers'
|
||||
ssh root@vm0 'echo "PubkeyAuthentication yes" >> /etc/ssh/sshd_config'
|
||||
scp ~/.ssh/id_ed25519.pub "root@vm0:~zfs/.ssh/authorized_keys"
|
||||
ssh root@vm0 'chown -R zfs ~zfs'
|
||||
ssh root@vm0 'service sshd restart'
|
||||
scp ~/src.txz "root@vm0:/tmp/src.txz"
|
||||
ssh root@vm0 'tar -C / -zxf /tmp/src.txz'
|
||||
fi
|
||||
|
262
.github/workflows/scripts/qemu-3-deps-vm.sh
vendored
262
.github/workflows/scripts/qemu-3-deps-vm.sh
vendored
@ -1,262 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
######################################################################
|
||||
# 3) install dependencies for compiling and loading
|
||||
#
|
||||
# $1: OS name (like 'fedora41')
|
||||
# $2: (optional) Experimental Fedora kernel version, like "6.14" to
|
||||
# install instead of Fedora defaults.
|
||||
######################################################################
|
||||
|
||||
set -eu
|
||||
|
||||
function archlinux() {
|
||||
echo "##[group]Running pacman -Syu"
|
||||
sudo btrfs filesystem resize max /
|
||||
sudo pacman -Syu --noconfirm
|
||||
echo "##[endgroup]"
|
||||
|
||||
echo "##[group]Install Development Tools"
|
||||
sudo pacman -Sy --noconfirm base-devel bc cpio cryptsetup dhclient dkms \
|
||||
fakeroot fio gdb inetutils jq less linux linux-headers lsscsi nfs-utils \
|
||||
parted pax perf python-packaging python-setuptools qemu-guest-agent ksh \
|
||||
samba strace sysstat rng-tools rsync wget xxhash
|
||||
echo "##[endgroup]"
|
||||
}
|
||||
|
||||
function debian() {
|
||||
export DEBIAN_FRONTEND="noninteractive"
|
||||
|
||||
echo "##[group]Running apt-get update+upgrade"
|
||||
sudo sed -i '/[[:alpha:]]-backports/d' /etc/apt/sources.list
|
||||
sudo apt-get update -y
|
||||
sudo apt-get upgrade -y
|
||||
echo "##[endgroup]"
|
||||
|
||||
echo "##[group]Install Development Tools"
|
||||
sudo apt-get install -y \
|
||||
acl alien attr autoconf bc cpio cryptsetup curl dbench dh-python dkms \
|
||||
fakeroot fio gdb gdebi git ksh lcov isc-dhcp-client jq libacl1-dev \
|
||||
libaio-dev libattr1-dev libblkid-dev libcurl4-openssl-dev libdevmapper-dev \
|
||||
libelf-dev libffi-dev libmount-dev libpam0g-dev libselinux-dev libssl-dev \
|
||||
libtool libtool-bin libudev-dev libunwind-dev linux-headers-$(uname -r) \
|
||||
lsscsi nfs-kernel-server pamtester parted python3 python3-all-dev \
|
||||
python3-cffi python3-dev python3-distlib python3-packaging libtirpc-dev \
|
||||
python3-setuptools python3-sphinx qemu-guest-agent rng-tools rpm2cpio \
|
||||
rsync samba strace sysstat uuid-dev watchdog wget xfslibs-dev xxhash \
|
||||
zlib1g-dev
|
||||
echo "##[endgroup]"
|
||||
}
|
||||
|
||||
function freebsd() {
|
||||
export ASSUME_ALWAYS_YES="YES"
|
||||
|
||||
echo "##[group]Install Development Tools"
|
||||
sudo pkg install -y autoconf automake autotools base64 checkbashisms fio \
|
||||
gdb gettext gettext-runtime git gmake gsed jq ksh lcov libtool lscpu \
|
||||
pkgconf python python3 pamtester pamtester qemu-guest-agent rsync xxhash
|
||||
sudo pkg install -xy \
|
||||
'^samba4[[:digit:]]+$' \
|
||||
'^py3[[:digit:]]+-cffi$' \
|
||||
'^py3[[:digit:]]+-sysctl$' \
|
||||
'^py3[[:digit:]]+-setuptools$' \
|
||||
'^py3[[:digit:]]+-packaging$'
|
||||
echo "##[endgroup]"
|
||||
}
|
||||
|
||||
# common packages for: almalinux, centos, redhat
|
||||
function rhel() {
|
||||
echo "##[group]Running dnf update"
|
||||
echo "max_parallel_downloads=10" | sudo -E tee -a /etc/dnf/dnf.conf
|
||||
sudo dnf clean all
|
||||
sudo dnf update -y --setopt=fastestmirror=1 --refresh
|
||||
echo "##[endgroup]"
|
||||
|
||||
echo "##[group]Install Development Tools"
|
||||
|
||||
# Alma wants "Development Tools", Fedora 41 wants "development-tools"
|
||||
if ! sudo dnf group install -y "Development Tools" ; then
|
||||
echo "Trying 'development-tools' instead of 'Development Tools'"
|
||||
sudo dnf group install -y development-tools
|
||||
fi
|
||||
|
||||
sudo dnf install -y \
|
||||
acl attr bc bzip2 cryptsetup curl dbench dkms elfutils-libelf-devel fio \
|
||||
gdb git jq kernel-rpm-macros ksh libacl-devel libaio-devel \
|
||||
libargon2-devel libattr-devel libblkid-devel libcurl-devel libffi-devel \
|
||||
ncompress libselinux-devel libtirpc-devel libtool libudev-devel \
|
||||
libuuid-devel lsscsi mdadm nfs-utils openssl-devel pam-devel pamtester \
|
||||
parted perf python3 python3-cffi python3-devel python3-packaging \
|
||||
kernel-devel python3-setuptools qemu-guest-agent rng-tools rpcgen \
|
||||
rpm-build rsync samba strace sysstat systemd watchdog wget xfsprogs-devel \
|
||||
xxhash zlib-devel
|
||||
echo "##[endgroup]"
|
||||
}
|
||||
|
||||
function tumbleweed() {
|
||||
echo "##[group]Running zypper is TODO!"
|
||||
sleep 23456
|
||||
echo "##[endgroup]"
|
||||
}
|
||||
|
||||
# $1: Kernel version to install (like '6.14rc7')
|
||||
function install_fedora_experimental_kernel {
|
||||
|
||||
our_version="$1"
|
||||
sudo dnf -y copr enable @kernel-vanilla/stable
|
||||
sudo dnf -y copr enable @kernel-vanilla/mainline
|
||||
all="$(sudo dnf list --showduplicates kernel-* python3-perf* perf* bpftool*)"
|
||||
echo "Available versions:"
|
||||
echo "$all"
|
||||
|
||||
# You can have a bunch of minor variants of the version we want '6.14'.
|
||||
# Pick the newest variant (sorted by version number).
|
||||
specific_version=$(echo "$all" | grep $our_version | awk '{print $2}' | sort -V | tail -n 1)
|
||||
list="$(echo "$all" | grep $specific_version | grep -Ev 'kernel-rt|kernel-selftests|kernel-debuginfo' | sed 's/.x86_64//g' | awk '{print $1"-"$2}')"
|
||||
sudo dnf install -y $list
|
||||
sudo dnf -y copr disable @kernel-vanilla/stable
|
||||
sudo dnf -y copr disable @kernel-vanilla/mainline
|
||||
}
|
||||
|
||||
# Install dependencies
|
||||
case "$1" in
|
||||
almalinux8)
|
||||
echo "##[group]Enable epel and powertools repositories"
|
||||
sudo dnf config-manager -y --set-enabled powertools
|
||||
sudo dnf install -y epel-release
|
||||
echo "##[endgroup]"
|
||||
rhel
|
||||
echo "##[group]Install kernel-abi-whitelists"
|
||||
sudo dnf install -y kernel-abi-whitelists
|
||||
echo "##[endgroup]"
|
||||
;;
|
||||
almalinux9|almalinux10|centos-stream9|centos-stream10)
|
||||
echo "##[group]Enable epel and crb repositories"
|
||||
sudo dnf config-manager -y --set-enabled crb
|
||||
sudo dnf install -y epel-release
|
||||
echo "##[endgroup]"
|
||||
rhel
|
||||
echo "##[group]Install kernel-abi-stablelists"
|
||||
sudo dnf install -y kernel-abi-stablelists
|
||||
echo "##[endgroup]"
|
||||
;;
|
||||
archlinux)
|
||||
archlinux
|
||||
;;
|
||||
debian*)
|
||||
echo 'debconf debconf/frontend select Noninteractive' | sudo debconf-set-selections
|
||||
debian
|
||||
echo "##[group]Install Debian specific"
|
||||
sudo apt-get install -yq linux-perf dh-sequence-dkms
|
||||
echo "##[endgroup]"
|
||||
;;
|
||||
fedora*)
|
||||
rhel
|
||||
sudo dnf install -y libunwind-devel
|
||||
|
||||
# Fedora 42+ moves /usr/bin/script from 'util-linux' to 'util-linux-script'
|
||||
sudo dnf install -y util-linux-script || true
|
||||
|
||||
# Optional: Install an experimental kernel ($2 = kernel version)
|
||||
if [ -n "${2:-}" ] ; then
|
||||
install_fedora_experimental_kernel "$2"
|
||||
fi
|
||||
;;
|
||||
freebsd*)
|
||||
freebsd
|
||||
;;
|
||||
tumbleweed)
|
||||
tumbleweed
|
||||
;;
|
||||
ubuntu*)
|
||||
debian
|
||||
echo "##[group]Install Ubuntu specific"
|
||||
sudo apt-get install -yq linux-tools-common libtirpc-dev \
|
||||
linux-modules-extra-$(uname -r)
|
||||
sudo apt-get install -yq dh-sequence-dkms
|
||||
echo "##[endgroup]"
|
||||
echo "##[group]Delete Ubuntu OpenZFS modules"
|
||||
for i in $(find /lib/modules -name zfs -type d); do sudo rm -rvf $i; done
|
||||
echo "##[endgroup]"
|
||||
;;
|
||||
esac
|
||||
|
||||
# This script is used for checkstyle + zloop deps also.
|
||||
# Install only the needed packages and exit - when used this way.
|
||||
test -z "${ONLY_DEPS:-}" || exit 0
|
||||
|
||||
# Start services
|
||||
echo "##[group]Enable services"
|
||||
case "$1" in
|
||||
freebsd*)
|
||||
# add virtio things
|
||||
echo 'virtio_load="YES"' | sudo -E tee -a /boot/loader.conf
|
||||
for i in balloon blk console random scsi; do
|
||||
echo "virtio_${i}_load=\"YES\"" | sudo -E tee -a /boot/loader.conf
|
||||
done
|
||||
echo "fdescfs /dev/fd fdescfs rw 0 0" | sudo -E tee -a /etc/fstab
|
||||
sudo -E mount /dev/fd
|
||||
sudo -E touch /etc/zfs/exports
|
||||
sudo -E sysrc mountd_flags="/etc/zfs/exports"
|
||||
echo '[global]' | sudo -E tee /usr/local/etc/smb4.conf >/dev/null
|
||||
sudo -E service nfsd enable
|
||||
sudo -E service qemu-guest-agent enable
|
||||
sudo -E service samba_server enable
|
||||
;;
|
||||
debian*|ubuntu*)
|
||||
sudo -E systemctl enable nfs-kernel-server
|
||||
sudo -E systemctl enable qemu-guest-agent
|
||||
sudo -E systemctl enable smbd
|
||||
;;
|
||||
*)
|
||||
# All other linux distros
|
||||
sudo -E systemctl enable nfs-server
|
||||
sudo -E systemctl enable qemu-guest-agent
|
||||
sudo -E systemctl enable smb
|
||||
;;
|
||||
esac
|
||||
echo "##[endgroup]"
|
||||
|
||||
# Setup Kernel cmdline
|
||||
CMDLINE="console=tty0 console=ttyS0,115200n8"
|
||||
CMDLINE="$CMDLINE selinux=0"
|
||||
CMDLINE="$CMDLINE random.trust_cpu=on"
|
||||
CMDLINE="$CMDLINE no_timer_check"
|
||||
case "$1" in
|
||||
almalinux*|centos*|fedora*)
|
||||
GRUB_CFG="/boot/grub2/grub.cfg"
|
||||
GRUB_MKCONFIG="grub2-mkconfig"
|
||||
CMDLINE="$CMDLINE biosdevname=0 net.ifnames=0"
|
||||
echo 'GRUB_SERIAL_COMMAND="serial --speed=115200"' \
|
||||
| sudo tee -a /etc/default/grub >/dev/null
|
||||
;;
|
||||
ubuntu24)
|
||||
GRUB_CFG="/boot/grub/grub.cfg"
|
||||
GRUB_MKCONFIG="grub-mkconfig"
|
||||
echo 'GRUB_DISABLE_OS_PROBER="false"' \
|
||||
| sudo tee -a /etc/default/grub >/dev/null
|
||||
;;
|
||||
*)
|
||||
GRUB_CFG="/boot/grub/grub.cfg"
|
||||
GRUB_MKCONFIG="grub-mkconfig"
|
||||
;;
|
||||
esac
|
||||
|
||||
case "$1" in
|
||||
archlinux|freebsd*)
|
||||
true
|
||||
;;
|
||||
*)
|
||||
echo "##[group]Edit kernel cmdline"
|
||||
sudo sed -i -e '/^GRUB_CMDLINE_LINUX/d' /etc/default/grub || true
|
||||
echo "GRUB_CMDLINE_LINUX=\"$CMDLINE\"" \
|
||||
| sudo tee -a /etc/default/grub >/dev/null
|
||||
sudo $GRUB_MKCONFIG -o $GRUB_CFG
|
||||
echo "##[endgroup]"
|
||||
;;
|
||||
esac
|
||||
|
||||
# reset cloud-init configuration and poweroff
|
||||
sudo cloud-init clean --logs
|
||||
sleep 2 && sudo poweroff &
|
||||
exit 0
|
247
.github/workflows/scripts/qemu-3-deps.sh
vendored
247
.github/workflows/scripts/qemu-3-deps.sh
vendored
@ -1,28 +1,229 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
######################################################################
|
||||
# 3) Wait for VM to boot from previous step and launch dependencies
|
||||
# script on it.
|
||||
#
|
||||
# $1: OS name (like 'fedora41')
|
||||
# $2: (optional) Experimental kernel version to install on fedora,
|
||||
# like "6.14".
|
||||
# 3) install dependencies for compiling and loading
|
||||
######################################################################
|
||||
|
||||
.github/workflows/scripts/qemu-wait-for-vm.sh vm0
|
||||
set -eu
|
||||
|
||||
# SPECIAL CASE:
|
||||
#
|
||||
# If the user passed in an experimental kernel version to test on Fedora,
|
||||
# we need to update the kernel version in zfs's META file to allow the
|
||||
# build to happen. We update our local copy of META here, since we know
|
||||
# it will be rsync'd up in the next step.
|
||||
if [ -n "${2:-}" ] ; then
|
||||
sed -i -E 's/Linux-Maximum: .+/Linux-Maximum: 99.99/g' META
|
||||
fi
|
||||
function archlinux() {
|
||||
echo "##[group]Running pacman -Syu"
|
||||
sudo btrfs filesystem resize max /
|
||||
sudo pacman -Syu --noconfirm
|
||||
echo "##[endgroup]"
|
||||
|
||||
scp .github/workflows/scripts/qemu-3-deps-vm.sh zfs@vm0:qemu-3-deps-vm.sh
|
||||
PID=`pidof /usr/bin/qemu-system-x86_64`
|
||||
ssh zfs@vm0 '$HOME/qemu-3-deps-vm.sh' "$@"
|
||||
# wait for poweroff to succeed
|
||||
tail --pid=$PID -f /dev/null
|
||||
sleep 5 # avoid this: "error: Domain is already active"
|
||||
rm -f $HOME/.ssh/known_hosts
|
||||
echo "##[group]Install Development Tools"
|
||||
sudo pacman -Sy --noconfirm base-devel bc cpio cryptsetup dhclient dkms \
|
||||
fakeroot fio gdb inetutils jq less linux linux-headers lsscsi nfs-utils \
|
||||
parted pax perf python-packaging python-setuptools qemu-guest-agent ksh \
|
||||
samba sysstat rng-tools rsync wget xxhash
|
||||
echo "##[endgroup]"
|
||||
}
|
||||
|
||||
function debian() {
|
||||
export DEBIAN_FRONTEND="noninteractive"
|
||||
|
||||
echo "##[group]Running apt-get update+upgrade"
|
||||
sudo apt-get update -y
|
||||
sudo apt-get upgrade -y
|
||||
echo "##[endgroup]"
|
||||
|
||||
echo "##[group]Install Development Tools"
|
||||
sudo apt-get install -y \
|
||||
acl alien attr autoconf bc cpio cryptsetup curl dbench dh-python dkms \
|
||||
fakeroot fio gdb gdebi git ksh lcov isc-dhcp-client jq libacl1-dev \
|
||||
libaio-dev libattr1-dev libblkid-dev libcurl4-openssl-dev libdevmapper-dev \
|
||||
libelf-dev libffi-dev libmount-dev libpam0g-dev libselinux-dev libssl-dev \
|
||||
libtool libtool-bin libudev-dev libunwind-dev linux-headers-$(uname -r) \
|
||||
lsscsi nfs-kernel-server pamtester parted python3 python3-all-dev \
|
||||
python3-cffi python3-dev python3-distlib python3-packaging \
|
||||
python3-setuptools python3-sphinx qemu-guest-agent rng-tools rpm2cpio \
|
||||
rsync samba sysstat uuid-dev watchdog wget xfslibs-dev xxhash zlib1g-dev
|
||||
echo "##[endgroup]"
|
||||
}
|
||||
|
||||
function freebsd() {
|
||||
export ASSUME_ALWAYS_YES="YES"
|
||||
|
||||
echo "##[group]Install Development Tools"
|
||||
sudo pkg install -y autoconf automake autotools base64 checkbashisms fio \
|
||||
gdb gettext gettext-runtime git gmake gsed jq ksh93 lcov libtool lscpu \
|
||||
pkgconf python python3 pamtester pamtester qemu-guest-agent rsync xxhash
|
||||
sudo pkg install -xy \
|
||||
'^samba4[[:digit:]]+$' \
|
||||
'^py3[[:digit:]]+-cffi$' \
|
||||
'^py3[[:digit:]]+-sysctl$' \
|
||||
'^py3[[:digit:]]+-packaging$'
|
||||
echo "##[endgroup]"
|
||||
}
|
||||
|
||||
# common packages for: almalinux, centos, redhat
|
||||
function rhel() {
|
||||
echo "##[group]Running dnf update"
|
||||
echo "max_parallel_downloads=10" | sudo -E tee -a /etc/dnf/dnf.conf
|
||||
sudo dnf clean all
|
||||
sudo dnf update -y --setopt=fastestmirror=1 --refresh
|
||||
echo "##[endgroup]"
|
||||
|
||||
echo "##[group]Install Development Tools"
|
||||
|
||||
# Alma wants "Development Tools", Fedora 41 wants "development-tools"
|
||||
if ! sudo dnf group install -y "Development Tools" ; then
|
||||
echo "Trying 'development-tools' instead of 'Development Tools'"
|
||||
sudo dnf group install -y development-tools
|
||||
fi
|
||||
|
||||
sudo dnf install -y \
|
||||
acl attr bc bzip2 cryptsetup curl dbench dkms elfutils-libelf-devel fio \
|
||||
gdb git jq kernel-rpm-macros ksh libacl-devel libaio-devel \
|
||||
libargon2-devel libattr-devel libblkid-devel libcurl-devel libffi-devel \
|
||||
ncompress libselinux-devel libtirpc-devel libtool libudev-devel \
|
||||
libuuid-devel lsscsi mdadm nfs-utils openssl-devel pam-devel pamtester \
|
||||
parted perf python3 python3-cffi python3-devel python3-packaging \
|
||||
kernel-devel python3-setuptools qemu-guest-agent rng-tools rpcgen \
|
||||
rpm-build rsync samba sysstat systemd watchdog wget xfsprogs-devel xxhash \
|
||||
zlib-devel
|
||||
echo "##[endgroup]"
|
||||
}
|
||||
|
||||
function tumbleweed() {
|
||||
echo "##[group]Running zypper is TODO!"
|
||||
sleep 23456
|
||||
echo "##[endgroup]"
|
||||
}
|
||||
|
||||
# Install dependencies
|
||||
case "$1" in
|
||||
almalinux8)
|
||||
echo "##[group]Enable epel and powertools repositories"
|
||||
sudo dnf config-manager -y --set-enabled powertools
|
||||
sudo dnf install -y epel-release
|
||||
echo "##[endgroup]"
|
||||
rhel
|
||||
echo "##[group]Install kernel-abi-whitelists"
|
||||
sudo dnf install -y kernel-abi-whitelists
|
||||
echo "##[endgroup]"
|
||||
;;
|
||||
almalinux9|centos-stream9|centos-stream10)
|
||||
echo "##[group]Enable epel and crb repositories"
|
||||
sudo dnf config-manager -y --set-enabled crb
|
||||
sudo dnf install -y epel-release
|
||||
echo "##[endgroup]"
|
||||
rhel
|
||||
echo "##[group]Install kernel-abi-stablelists"
|
||||
sudo dnf install -y kernel-abi-stablelists
|
||||
echo "##[endgroup]"
|
||||
;;
|
||||
archlinux)
|
||||
archlinux
|
||||
;;
|
||||
debian*)
|
||||
echo 'debconf debconf/frontend select Noninteractive' | sudo debconf-set-selections
|
||||
debian
|
||||
echo "##[group]Install Debian specific"
|
||||
sudo apt-get install -yq linux-perf dh-sequence-dkms
|
||||
echo "##[endgroup]"
|
||||
;;
|
||||
fedora*)
|
||||
rhel
|
||||
;;
|
||||
freebsd*)
|
||||
freebsd
|
||||
;;
|
||||
tumbleweed)
|
||||
tumbleweed
|
||||
;;
|
||||
ubuntu*)
|
||||
debian
|
||||
echo "##[group]Install Ubuntu specific"
|
||||
sudo apt-get install -yq linux-tools-common libtirpc-dev \
|
||||
linux-modules-extra-$(uname -r)
|
||||
if [ "$1" != "ubuntu20" ]; then
|
||||
sudo apt-get install -yq dh-sequence-dkms
|
||||
fi
|
||||
echo "##[endgroup]"
|
||||
echo "##[group]Delete Ubuntu OpenZFS modules"
|
||||
for i in $(find /lib/modules -name zfs -type d); do sudo rm -rvf $i; done
|
||||
echo "##[endgroup]"
|
||||
;;
|
||||
esac
|
||||
|
||||
# This script is used for checkstyle + zloop deps also.
|
||||
# Install only the needed packages and exit - when used this way.
|
||||
test -z "${ONLY_DEPS:-}" || exit 0
|
||||
|
||||
# Start services
|
||||
echo "##[group]Enable services"
|
||||
case "$1" in
|
||||
freebsd*)
|
||||
# add virtio things
|
||||
echo 'virtio_load="YES"' | sudo -E tee -a /boot/loader.conf
|
||||
for i in balloon blk console random scsi; do
|
||||
echo "virtio_${i}_load=\"YES\"" | sudo -E tee -a /boot/loader.conf
|
||||
done
|
||||
echo "fdescfs /dev/fd fdescfs rw 0 0" | sudo -E tee -a /etc/fstab
|
||||
sudo -E mount /dev/fd
|
||||
sudo -E touch /etc/zfs/exports
|
||||
sudo -E sysrc mountd_flags="/etc/zfs/exports"
|
||||
echo '[global]' | sudo -E tee /usr/local/etc/smb4.conf >/dev/null
|
||||
sudo -E service nfsd enable
|
||||
sudo -E service qemu-guest-agent enable
|
||||
sudo -E service samba_server enable
|
||||
;;
|
||||
debian*|ubuntu*)
|
||||
sudo -E systemctl enable nfs-kernel-server
|
||||
sudo -E systemctl enable qemu-guest-agent
|
||||
sudo -E systemctl enable smbd
|
||||
;;
|
||||
*)
|
||||
# All other linux distros
|
||||
sudo -E systemctl enable nfs-server
|
||||
sudo -E systemctl enable qemu-guest-agent
|
||||
sudo -E systemctl enable smb
|
||||
;;
|
||||
esac
|
||||
echo "##[endgroup]"
|
||||
|
||||
# Setup Kernel cmdline
|
||||
CMDLINE="console=tty0 console=ttyS0,115200n8"
|
||||
CMDLINE="$CMDLINE selinux=0"
|
||||
CMDLINE="$CMDLINE random.trust_cpu=on"
|
||||
CMDLINE="$CMDLINE no_timer_check"
|
||||
case "$1" in
|
||||
almalinux*|centos*|fedora*)
|
||||
GRUB_CFG="/boot/grub2/grub.cfg"
|
||||
GRUB_MKCONFIG="grub2-mkconfig"
|
||||
CMDLINE="$CMDLINE biosdevname=0 net.ifnames=0"
|
||||
echo 'GRUB_SERIAL_COMMAND="serial --speed=115200"' \
|
||||
| sudo tee -a /etc/default/grub >/dev/null
|
||||
;;
|
||||
ubuntu24)
|
||||
GRUB_CFG="/boot/grub/grub.cfg"
|
||||
GRUB_MKCONFIG="grub-mkconfig"
|
||||
echo 'GRUB_DISABLE_OS_PROBER="false"' \
|
||||
| sudo tee -a /etc/default/grub >/dev/null
|
||||
;;
|
||||
*)
|
||||
GRUB_CFG="/boot/grub/grub.cfg"
|
||||
GRUB_MKCONFIG="grub-mkconfig"
|
||||
;;
|
||||
esac
|
||||
|
||||
case "$1" in
|
||||
archlinux|freebsd*)
|
||||
true
|
||||
;;
|
||||
*)
|
||||
echo "##[group]Edit kernel cmdline"
|
||||
sudo sed -i -e '/^GRUB_CMDLINE_LINUX/d' /etc/default/grub || true
|
||||
echo "GRUB_CMDLINE_LINUX=\"$CMDLINE\"" \
|
||||
| sudo tee -a /etc/default/grub >/dev/null
|
||||
sudo $GRUB_MKCONFIG -o $GRUB_CFG
|
||||
echo "##[endgroup]"
|
||||
;;
|
||||
esac
|
||||
|
||||
# reset cloud-init configuration and poweroff
|
||||
sudo cloud-init clean --logs
|
||||
sleep 2 && sudo poweroff &
|
||||
exit 0
|
||||
|
396
.github/workflows/scripts/qemu-4-build-vm.sh
vendored
396
.github/workflows/scripts/qemu-4-build-vm.sh
vendored
@ -1,396 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
######################################################################
|
||||
# 4) configure and build openzfs modules. This is run on the VMs.
|
||||
#
|
||||
# Usage:
|
||||
#
|
||||
# qemu-4-build-vm.sh OS [--enable-debug][--dkms][--patch-level NUM]
|
||||
# [--poweroff][--release][--repo][--tarball]
|
||||
#
|
||||
# OS: OS name like 'fedora41'
|
||||
# --enable-debug: Build RPMs with '--enable-debug' (for testing)
|
||||
# --dkms: Build DKMS RPMs as well
|
||||
# --patch-level NUM: Use a custom patch level number for packages.
|
||||
# --poweroff: Power-off the VM after building
|
||||
# --release Build zfs-release*.rpm as well
|
||||
# --repo After building everything, copy RPMs into /tmp/repo
|
||||
# in the ZFS RPM repository file structure. Also
|
||||
# copy tarballs if they were built.
|
||||
# --tarball: Also build a tarball of ZFS source
|
||||
######################################################################
|
||||
|
||||
ENABLE_DEBUG=""
|
||||
DKMS=""
|
||||
PATCH_LEVEL=""
|
||||
POWEROFF=""
|
||||
RELEASE=""
|
||||
REPO=""
|
||||
TARBALL=""
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case $1 in
|
||||
--enable-debug)
|
||||
ENABLE_DEBUG=1
|
||||
shift
|
||||
;;
|
||||
--dkms)
|
||||
DKMS=1
|
||||
shift
|
||||
;;
|
||||
--patch-level)
|
||||
PATCH_LEVEL=$2
|
||||
shift
|
||||
shift
|
||||
;;
|
||||
--poweroff)
|
||||
POWEROFF=1
|
||||
shift
|
||||
;;
|
||||
--release)
|
||||
RELEASE=1
|
||||
shift
|
||||
;;
|
||||
--repo)
|
||||
REPO=1
|
||||
shift
|
||||
;;
|
||||
--tarball)
|
||||
TARBALL=1
|
||||
shift
|
||||
;;
|
||||
*)
|
||||
OS=$1
|
||||
shift
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
set -eu
|
||||
|
||||
function run() {
|
||||
LOG="/var/tmp/build-stderr.txt"
|
||||
echo "****************************************************"
|
||||
echo "$(date) ($*)"
|
||||
echo "****************************************************"
|
||||
($@ || echo $? > /tmp/rv) 3>&1 1>&2 2>&3 | stdbuf -eL -oL tee -a $LOG
|
||||
if [ -f /tmp/rv ]; then
|
||||
RV=$(cat /tmp/rv)
|
||||
echo "****************************************************"
|
||||
echo "exit with value=$RV ($*)"
|
||||
echo "****************************************************"
|
||||
echo 1 > /var/tmp/build-exitcode.txt
|
||||
exit $RV
|
||||
fi
|
||||
}
|
||||
|
||||
# Look at the RPMs in the current directory and copy/move them to
|
||||
# /tmp/repo, using the directory structure we use for the ZFS RPM repos.
|
||||
#
|
||||
# For example:
|
||||
# /tmp/repo/epel-testing/9.5
|
||||
# /tmp/repo/epel-testing/9.5/SRPMS
|
||||
# /tmp/repo/epel-testing/9.5/SRPMS/zfs-2.3.99-1.el9.src.rpm
|
||||
# /tmp/repo/epel-testing/9.5/SRPMS/zfs-kmod-2.3.99-1.el9.src.rpm
|
||||
# /tmp/repo/epel-testing/9.5/kmod
|
||||
# /tmp/repo/epel-testing/9.5/kmod/x86_64
|
||||
# /tmp/repo/epel-testing/9.5/kmod/x86_64/debug
|
||||
# /tmp/repo/epel-testing/9.5/kmod/x86_64/debug/kmod-zfs-debuginfo-2.3.99-1.el9.x86_64.rpm
|
||||
# /tmp/repo/epel-testing/9.5/kmod/x86_64/debug/libnvpair3-debuginfo-2.3.99-1.el9.x86_64.rpm
|
||||
# /tmp/repo/epel-testing/9.5/kmod/x86_64/debug/libuutil3-debuginfo-2.3.99-1.el9.x86_64.rpm
|
||||
# ...
|
||||
function copy_rpms_to_repo {
|
||||
# Pick a RPM to query. It doesn't matter which one - we just want to extract
|
||||
# the 'Build Host' value from it.
|
||||
rpm=$(ls zfs-*.rpm | head -n 1)
|
||||
|
||||
# Get zfs version '2.2.99'
|
||||
zfs_ver=$(rpm -qpi $rpm | awk '/Version/{print $3}')
|
||||
|
||||
# Get "2.1" or "2.2"
|
||||
zfs_major=$(echo $zfs_ver | grep -Eo [0-9]+\.[0-9]+)
|
||||
|
||||
# Get 'almalinux9.5' or 'fedora41' type string
|
||||
build_host=$(rpm -qpi $rpm | awk '/Build Host/{print $4}')
|
||||
|
||||
# Get '9.5' or '41' OS version
|
||||
os_ver=$(echo $build_host | grep -Eo '[0-9\.]+$')
|
||||
|
||||
# Our ZFS version and OS name will determine which repo the RPMs
|
||||
# will go in (regular or testing). Fedora always gets the newest
|
||||
# releases, and Alma gets the older releases.
|
||||
case $build_host in
|
||||
almalinux*)
|
||||
case $zfs_major in
|
||||
2.2)
|
||||
d="epel"
|
||||
;;
|
||||
*)
|
||||
d="epel-testing"
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
fedora*)
|
||||
d="fedora"
|
||||
;;
|
||||
esac
|
||||
|
||||
prefix=/tmp/repo
|
||||
dst="$prefix/$d/$os_ver"
|
||||
|
||||
# Special case: move zfs-release*.rpm out of the way first (if we built them).
|
||||
# This will make filtering the other RPMs easier.
|
||||
mkdir -p $dst
|
||||
mv zfs-release*.rpm $dst || true
|
||||
|
||||
# Copy source RPMs
|
||||
mkdir -p $dst/SRPMS
|
||||
cp $(ls *.src.rpm) $dst/SRPMS/
|
||||
|
||||
if [[ "$build_host" =~ "almalinux" ]] ; then
|
||||
# Copy kmods+userspace
|
||||
mkdir -p $dst/kmod/x86_64/debug
|
||||
cp $(ls *.rpm | grep -Ev 'src.rpm|dkms|debuginfo') $dst/kmod/x86_64
|
||||
cp *debuginfo*.rpm $dst/kmod/x86_64/debug
|
||||
fi
|
||||
|
||||
if [ -n "$DKMS" ] ; then
|
||||
# Copy dkms+userspace
|
||||
mkdir -p $dst/x86_64
|
||||
cp $(ls *.rpm | grep -Ev 'src.rpm|kmod|debuginfo') $dst/x86_64
|
||||
fi
|
||||
|
||||
# Copy debug
|
||||
mkdir -p $dst/x86_64/debug
|
||||
cp $(ls *debuginfo*.rpm | grep -v kmod) $dst/x86_64/debug
|
||||
}
|
||||
|
||||
function freebsd() {
|
||||
extra="${1:-}"
|
||||
|
||||
export MAKE="gmake"
|
||||
echo "##[group]Autogen.sh"
|
||||
run ./autogen.sh
|
||||
echo "##[endgroup]"
|
||||
|
||||
echo "##[group]Configure"
|
||||
run ./configure \
|
||||
--prefix=/usr/local \
|
||||
--with-libintl-prefix=/usr/local \
|
||||
--enable-pyzfs \
|
||||
--enable-debuginfo $extra
|
||||
echo "##[endgroup]"
|
||||
|
||||
echo "##[group]Build"
|
||||
run gmake -j$(sysctl -n hw.ncpu)
|
||||
echo "##[endgroup]"
|
||||
|
||||
echo "##[group]Install"
|
||||
run sudo gmake install
|
||||
echo "##[endgroup]"
|
||||
}
|
||||
|
||||
function linux() {
|
||||
extra="${1:-}"
|
||||
|
||||
echo "##[group]Autogen.sh"
|
||||
run ./autogen.sh
|
||||
echo "##[endgroup]"
|
||||
|
||||
echo "##[group]Configure"
|
||||
run ./configure \
|
||||
--prefix=/usr \
|
||||
--enable-pyzfs \
|
||||
--enable-debuginfo $extra
|
||||
echo "##[endgroup]"
|
||||
|
||||
echo "##[group]Build"
|
||||
run make -j$(nproc)
|
||||
echo "##[endgroup]"
|
||||
|
||||
echo "##[group]Install"
|
||||
run sudo make install
|
||||
echo "##[endgroup]"
|
||||
}
|
||||
|
||||
function rpm_build_and_install() {
|
||||
extra="${1:-}"
|
||||
|
||||
# Build RPMs with XZ compression by default (since gzip decompression is slow)
|
||||
echo "%_binary_payload w7.xzdio" >> ~/.rpmmacros
|
||||
|
||||
echo "##[group]Autogen.sh"
|
||||
run ./autogen.sh
|
||||
echo "##[endgroup]"
|
||||
|
||||
if [ -n "$PATCH_LEVEL" ] ; then
|
||||
sed -i -E 's/(Release:\s+)1/\1'$PATCH_LEVEL'/g' META
|
||||
fi
|
||||
|
||||
echo "##[group]Configure"
|
||||
run ./configure --enable-debuginfo $extra
|
||||
echo "##[endgroup]"
|
||||
|
||||
echo "##[group]Build"
|
||||
run make pkg-kmod pkg-utils
|
||||
echo "##[endgroup]"
|
||||
|
||||
if [ -n "$DKMS" ] ; then
|
||||
echo "##[group]DKMS"
|
||||
make rpm-dkms
|
||||
echo "##[endgroup]"
|
||||
fi
|
||||
|
||||
if [ -n "$REPO" ] ; then
|
||||
echo "Skipping install since we're only building RPMs and nothing else"
|
||||
else
|
||||
echo "##[group]Install"
|
||||
run sudo dnf -y --nobest install $(ls *.rpm | grep -Ev 'dkms|src.rpm')
|
||||
echo "##[endgroup]"
|
||||
fi
|
||||
|
||||
# Optionally build the zfs-release.*.rpm
|
||||
if [ -n "$RELEASE" ] ; then
|
||||
echo "##[group]Release"
|
||||
pushd ~
|
||||
sudo dnf -y install rpm-build || true
|
||||
# Check out a sparse copy of zfsonlinux.github.com.git so we don't get
|
||||
# all the binaries. We just need a few kilobytes of files to build RPMs.
|
||||
git clone --depth 1 --no-checkout \
|
||||
https://github.com/zfsonlinux/zfsonlinux.github.com.git
|
||||
|
||||
cd zfsonlinux.github.com
|
||||
git sparse-checkout set zfs-release
|
||||
git checkout
|
||||
cd zfs-release
|
||||
|
||||
mkdir -p ~/rpmbuild/{BUILDROOT,SPECS,RPMS,SRPMS,SOURCES,BUILD}
|
||||
cp RPM-GPG-KEY-openzfs* *.repo ~/rpmbuild/SOURCES
|
||||
cp zfs-release.spec ~/rpmbuild/SPECS/
|
||||
rpmbuild -ba ~/rpmbuild/SPECS/zfs-release.spec
|
||||
|
||||
# ZFS release RPMs are built. Copy them to the ~/zfs directory just to
|
||||
# keep all the RPMs in the same place.
|
||||
cp ~/rpmbuild/RPMS/noarch/*.rpm ~/zfs
|
||||
cp ~/rpmbuild/SRPMS/*.rpm ~/zfs
|
||||
|
||||
popd
|
||||
rm -fr ~/rpmbuild
|
||||
echo "##[endgroup]"
|
||||
fi
|
||||
|
||||
if [ -n "$REPO" ] ; then
|
||||
echo "##[group]Repo"
|
||||
copy_rpms_to_repo
|
||||
echo "##[endgroup]"
|
||||
fi
|
||||
}
|
||||
|
||||
function deb_build_and_install() {
|
||||
extra="${1:-}"
|
||||
|
||||
echo "##[group]Autogen.sh"
|
||||
run ./autogen.sh
|
||||
echo "##[endgroup]"
|
||||
|
||||
echo "##[group]Configure"
|
||||
run ./configure \
|
||||
--prefix=/usr \
|
||||
--enable-pyzfs \
|
||||
--enable-debuginfo $extra
|
||||
echo "##[endgroup]"
|
||||
|
||||
echo "##[group]Build"
|
||||
run make native-deb-kmod native-deb-utils
|
||||
echo "##[endgroup]"
|
||||
|
||||
echo "##[group]Install"
|
||||
# Do kmod install. Note that when you build the native debs, the
|
||||
# packages themselves are placed in parent directory '../' rather than
|
||||
# in the source directory like the rpms are.
|
||||
run sudo apt-get -y install $(find ../ | grep -E '\.deb$' \
|
||||
| grep -Ev 'dkms|dracut')
|
||||
echo "##[endgroup]"
|
||||
}
|
||||
|
||||
function build_tarball {
|
||||
if [ -n "$REPO" ] ; then
|
||||
./autogen.sh
|
||||
./configure --with-config=srpm
|
||||
make dist
|
||||
mkdir -p /tmp/repo/releases
|
||||
# The tarball name is based off of 'Version' field in the META file.
|
||||
mv *.tar.gz /tmp/repo/releases/
|
||||
fi
|
||||
}
|
||||
|
||||
# Debug: show kernel cmdline
|
||||
if [ -f /proc/cmdline ] ; then
|
||||
cat /proc/cmdline || true
|
||||
fi
|
||||
|
||||
# Set our hostname to our OS name and version number. Specifically, we set the
|
||||
# major and minor number so that when we query the Build Host field in the RPMs
|
||||
# we build, we can see what specific version of Fedora/Almalinux we were using
|
||||
# to build them. This is helpful for matching up KMOD versions.
|
||||
#
|
||||
# Examples:
|
||||
#
|
||||
# rhel8.10
|
||||
# almalinux9.5
|
||||
# fedora42
|
||||
source /etc/os-release
|
||||
if which hostnamectl &> /dev/null ; then
|
||||
# Fedora 42+ use hostnamectl
|
||||
sudo hostnamectl set-hostname "$ID$VERSION_ID"
|
||||
sudo hostnamectl set-hostname --pretty "$ID$VERSION_ID"
|
||||
else
|
||||
sudo hostname "$ID$VERSION_ID"
|
||||
fi
|
||||
|
||||
# save some sysinfo
|
||||
uname -a > /var/tmp/uname.txt
|
||||
|
||||
cd $HOME/zfs
|
||||
export PATH="$PATH:/sbin:/usr/sbin:/usr/local/sbin"
|
||||
|
||||
extra=""
|
||||
if [ -n "$ENABLE_DEBUG" ] ; then
|
||||
extra="--enable-debug"
|
||||
fi
|
||||
|
||||
# build
|
||||
case "$OS" in
|
||||
freebsd*)
|
||||
freebsd "$extra"
|
||||
;;
|
||||
alma*|centos*)
|
||||
rpm_build_and_install "--with-spec=redhat $extra"
|
||||
;;
|
||||
fedora*)
|
||||
rpm_build_and_install "$extra"
|
||||
|
||||
# Historically, we've always built the release tarballs on Fedora, since
|
||||
# there was one instance long ago where we built them on CentOS 7, and they
|
||||
# didn't work correctly for everyone.
|
||||
if [ -n "$TARBALL" ] ; then
|
||||
build_tarball
|
||||
fi
|
||||
;;
|
||||
debian*|ubuntu*)
|
||||
deb_build_and_install "$extra"
|
||||
;;
|
||||
*)
|
||||
linux "$extra"
|
||||
;;
|
||||
esac
|
||||
|
||||
|
||||
# building the zfs module was ok
|
||||
echo 0 > /var/tmp/build-exitcode.txt
|
||||
|
||||
# reset cloud-init configuration and poweroff
|
||||
if [ -n "$POWEROFF" ] ; then
|
||||
sudo cloud-init clean --logs
|
||||
sync && sleep 2 && sudo poweroff &
|
||||
fi
|
||||
exit 0
|
150
.github/workflows/scripts/qemu-4-build.sh
vendored
150
.github/workflows/scripts/qemu-4-build.sh
vendored
@ -3,9 +3,151 @@
|
||||
######################################################################
|
||||
# 4) configure and build openzfs modules
|
||||
######################################################################
|
||||
echo "Build modules in QEMU machine"
|
||||
|
||||
# Bring our VM back up and copy over ZFS source
|
||||
.github/workflows/scripts/qemu-prepare-for-build.sh
|
||||
set -eu
|
||||
|
||||
ssh zfs@vm0 '$HOME/zfs/.github/workflows/scripts/qemu-4-build-vm.sh' $@
|
||||
function run() {
|
||||
LOG="/var/tmp/build-stderr.txt"
|
||||
echo "****************************************************"
|
||||
echo "$(date) ($*)"
|
||||
echo "****************************************************"
|
||||
($@ || echo $? > /tmp/rv) 3>&1 1>&2 2>&3 | stdbuf -eL -oL tee -a $LOG
|
||||
if [ -f /tmp/rv ]; then
|
||||
RV=$(cat /tmp/rv)
|
||||
echo "****************************************************"
|
||||
echo "exit with value=$RV ($*)"
|
||||
echo "****************************************************"
|
||||
echo 1 > /var/tmp/build-exitcode.txt
|
||||
exit $RV
|
||||
fi
|
||||
}
|
||||
|
||||
function freebsd() {
|
||||
export MAKE="gmake"
|
||||
echo "##[group]Autogen.sh"
|
||||
run ./autogen.sh
|
||||
echo "##[endgroup]"
|
||||
|
||||
echo "##[group]Configure"
|
||||
run ./configure \
|
||||
--prefix=/usr/local \
|
||||
--with-libintl-prefix=/usr/local \
|
||||
--enable-pyzfs \
|
||||
--enable-debug \
|
||||
--enable-debuginfo
|
||||
echo "##[endgroup]"
|
||||
|
||||
echo "##[group]Build"
|
||||
run gmake -j$(sysctl -n hw.ncpu)
|
||||
echo "##[endgroup]"
|
||||
|
||||
echo "##[group]Install"
|
||||
run sudo gmake install
|
||||
echo "##[endgroup]"
|
||||
}
|
||||
|
||||
function linux() {
|
||||
echo "##[group]Autogen.sh"
|
||||
run ./autogen.sh
|
||||
echo "##[endgroup]"
|
||||
|
||||
echo "##[group]Configure"
|
||||
run ./configure \
|
||||
--prefix=/usr \
|
||||
--enable-pyzfs \
|
||||
--enable-debug \
|
||||
--enable-debuginfo
|
||||
echo "##[endgroup]"
|
||||
|
||||
echo "##[group]Build"
|
||||
run make -j$(nproc)
|
||||
echo "##[endgroup]"
|
||||
|
||||
echo "##[group]Install"
|
||||
run sudo make install
|
||||
echo "##[endgroup]"
|
||||
}
|
||||
|
||||
function rpm_build_and_install() {
|
||||
EXTRA_CONFIG="${1:-}"
|
||||
echo "##[group]Autogen.sh"
|
||||
run ./autogen.sh
|
||||
echo "##[endgroup]"
|
||||
|
||||
echo "##[group]Configure"
|
||||
run ./configure --enable-debug --enable-debuginfo $EXTRA_CONFIG
|
||||
echo "##[endgroup]"
|
||||
|
||||
echo "##[group]Build"
|
||||
run make pkg-kmod pkg-utils
|
||||
echo "##[endgroup]"
|
||||
|
||||
echo "##[group]Install"
|
||||
run sudo dnf -y --nobest install $(ls *.rpm | grep -v src.rpm)
|
||||
echo "##[endgroup]"
|
||||
|
||||
}
|
||||
|
||||
function deb_build_and_install() {
|
||||
echo "##[group]Autogen.sh"
|
||||
run ./autogen.sh
|
||||
echo "##[endgroup]"
|
||||
|
||||
echo "##[group]Configure"
|
||||
run ./configure \
|
||||
--prefix=/usr \
|
||||
--enable-pyzfs \
|
||||
--enable-debug \
|
||||
--enable-debuginfo
|
||||
echo "##[endgroup]"
|
||||
|
||||
echo "##[group]Build"
|
||||
run make native-deb-kmod native-deb-utils
|
||||
echo "##[endgroup]"
|
||||
|
||||
echo "##[group]Install"
|
||||
# Do kmod install. Note that when you build the native debs, the
|
||||
# packages themselves are placed in parent directory '../' rather than
|
||||
# in the source directory like the rpms are.
|
||||
run sudo apt-get -y install $(find ../ | grep -E '\.deb$' \
|
||||
| grep -Ev 'dkms|dracut')
|
||||
echo "##[endgroup]"
|
||||
}
|
||||
|
||||
# Debug: show kernel cmdline
|
||||
if [ -f /proc/cmdline ] ; then
|
||||
cat /proc/cmdline || true
|
||||
fi
|
||||
|
||||
# save some sysinfo
|
||||
uname -a > /var/tmp/uname.txt
|
||||
|
||||
cd $HOME/zfs
|
||||
export PATH="$PATH:/sbin:/usr/sbin:/usr/local/sbin"
|
||||
|
||||
# build
|
||||
case "$1" in
|
||||
freebsd*)
|
||||
freebsd
|
||||
;;
|
||||
alma*|centos*)
|
||||
rpm_build_and_install "--with-spec=redhat"
|
||||
;;
|
||||
fedora*)
|
||||
rpm_build_and_install
|
||||
;;
|
||||
debian*|ubuntu*)
|
||||
deb_build_and_install
|
||||
;;
|
||||
*)
|
||||
linux
|
||||
;;
|
||||
esac
|
||||
|
||||
# building the zfs module was ok
|
||||
echo 0 > /var/tmp/build-exitcode.txt
|
||||
|
||||
# reset cloud-init configuration and poweroff
|
||||
sudo cloud-init clean --logs
|
||||
sync && sleep 2 && sudo poweroff &
|
||||
exit 0
|
||||
|
73
.github/workflows/scripts/qemu-5-setup.sh
vendored
73
.github/workflows/scripts/qemu-5-setup.sh
vendored
@ -12,45 +12,41 @@ source /var/tmp/env.txt
|
||||
# wait for poweroff to succeed
|
||||
PID=$(pidof /usr/bin/qemu-system-x86_64)
|
||||
tail --pid=$PID -f /dev/null
|
||||
sudo virsh undefine --nvram openzfs
|
||||
sudo virsh undefine openzfs
|
||||
|
||||
# default values per test vm:
|
||||
VMs=2
|
||||
CPU=2
|
||||
|
||||
# cpu pinning
|
||||
CPUSET=("0,1" "2,3")
|
||||
|
||||
# additional options for virt-install
|
||||
OPTS[0]=""
|
||||
OPTS[1]=""
|
||||
|
||||
case "$OS" in
|
||||
freebsd*)
|
||||
# FreeBSD needs only 6GiB
|
||||
# FreeBSD can't be optimized via ksmtuned
|
||||
RAM=6
|
||||
;;
|
||||
debian13)
|
||||
RAM=8
|
||||
# Boot Debian 13 with uefi=on and secureboot=off (ZFS Kernel Module not signed)
|
||||
OPTS[0]="--boot"
|
||||
OPTS[1]="firmware=efi,firmware.feature0.name=secure-boot,firmware.feature0.enabled=no"
|
||||
;;
|
||||
*)
|
||||
# Linux needs more memory, but can be optimized to share it via KSM
|
||||
# Linux can be optimized via ksmtuned
|
||||
RAM=8
|
||||
;;
|
||||
esac
|
||||
|
||||
# this can be different for each distro
|
||||
echo "VMs=$VMs" >> $ENV
|
||||
|
||||
# create snapshot we can clone later
|
||||
sudo zfs snapshot zpool/openzfs@now
|
||||
|
||||
# setup the testing vm's
|
||||
PUBKEY=$(cat ~/.ssh/id_ed25519.pub)
|
||||
for i in $(seq 1 $VMs); do
|
||||
|
||||
# start testing VMs
|
||||
for ((i=1; i<=VMs; i++)); do
|
||||
echo "Creating disk for vm$i..."
|
||||
DISK="/dev/zvol/zpool/vm$i"
|
||||
FORMAT="raw"
|
||||
sudo zfs clone zpool/openzfs@now zpool/vm$i-system
|
||||
sudo zfs create -ps -b 64k -V 64g zpool/vm$i-tests
|
||||
sudo zfs clone zpool/openzfs@now zpool/vm$i
|
||||
sudo zfs create -ps -b 64k -V 80g zpool/vm$i-2
|
||||
|
||||
cat <<EOF > /tmp/user-data
|
||||
#cloud-config
|
||||
@ -87,51 +83,44 @@ EOF
|
||||
--graphics none \
|
||||
--cloud-init user-data=/tmp/user-data \
|
||||
--network bridge=virbr0,model=$NIC,mac="52:54:00:83:79:0$i" \
|
||||
--disk $DISK-system,bus=virtio,cache=none,format=$FORMAT,driver.discard=unmap \
|
||||
--disk $DISK-tests,bus=virtio,cache=none,format=$FORMAT,driver.discard=unmap \
|
||||
--import --noautoconsole ${OPTS[0]} ${OPTS[1]}
|
||||
--disk $DISK,bus=virtio,cache=none,format=$FORMAT,driver.discard=unmap \
|
||||
--disk $DISK-2,bus=virtio,cache=none,format=$FORMAT,driver.discard=unmap \
|
||||
--import --noautoconsole >/dev/null
|
||||
done
|
||||
|
||||
# generate some memory stats
|
||||
# check the memory state from time to time
|
||||
cat <<EOF > cronjob.sh
|
||||
# $OS
|
||||
exec 1>>/var/tmp/stats.txt
|
||||
exec 2>&1
|
||||
echo "********************************************************************************"
|
||||
echo "*******************************************************"
|
||||
date
|
||||
uptime
|
||||
free -m
|
||||
df -h /mnt/tests
|
||||
zfs list
|
||||
EOF
|
||||
|
||||
sudo chmod +x cronjob.sh
|
||||
sudo mv -f cronjob.sh /root/cronjob.sh
|
||||
echo '*/5 * * * * /root/cronjob.sh' > crontab.txt
|
||||
sudo crontab crontab.txt
|
||||
rm crontab.txt
|
||||
|
||||
# check if the machines are okay
|
||||
echo "Waiting for vm's to come up... (${VMs}x CPU=$CPU RAM=$RAM)"
|
||||
for i in $(seq 1 $VMs); do
|
||||
while true; do
|
||||
ssh 2>/dev/null zfs@192.168.122.1$i "uname -a" && break
|
||||
done
|
||||
done
|
||||
echo "All $VMs VMs are up now."
|
||||
|
||||
# Save the VM's serial output (ttyS0) to /var/tmp/console.txt
|
||||
# - ttyS0 on the VM corresponds to a local /dev/pty/N entry
|
||||
# - use 'virsh ttyconsole' to lookup the /dev/pty/N entry
|
||||
for ((i=1; i<=VMs; i++)); do
|
||||
for i in $(seq 1 $VMs); do
|
||||
mkdir -p $RESPATH/vm$i
|
||||
read "pty" <<< $(sudo virsh ttyconsole vm$i)
|
||||
|
||||
# Create the file so we can tail it, even if there's no output.
|
||||
touch $RESPATH/vm$i/console.txt
|
||||
|
||||
sudo nohup bash -c "cat $pty > $RESPATH/vm$i/console.txt" &
|
||||
|
||||
# Write all VM boot lines to the console to aid in debugging failed boots.
|
||||
# The boot lines from all the VMs will be munged together, so prepend each
|
||||
# line with the vm hostname (like 'vm1:').
|
||||
(while IFS=$'\n' read -r line; do echo "vm$i: $line" ; done < <(sudo tail -f $RESPATH/vm$i/console.txt)) &
|
||||
|
||||
done
|
||||
echo "Console logging for ${VMs}x $OS started."
|
||||
|
||||
|
||||
# check if the machines are okay
|
||||
echo "Waiting for vm's to come up... (${VMs}x CPU=$CPU RAM=$RAM)"
|
||||
for ((i=1; i<=VMs; i++)); do
|
||||
.github/workflows/scripts/qemu-wait-for-vm.sh vm$i
|
||||
done
|
||||
echo "All $VMs VMs are up now."
|
||||
|
30
.github/workflows/scripts/qemu-6-tests.sh
vendored
30
.github/workflows/scripts/qemu-6-tests.sh
vendored
@ -21,13 +21,11 @@ function prefix() {
|
||||
S=$((DIFF-(M*60)))
|
||||
|
||||
CTR=$(cat /tmp/ctr)
|
||||
echo $LINE| grep -q '^\[.*] Test[: ]' && CTR=$((CTR+1)) && echo $CTR > /tmp/ctr
|
||||
echo $LINE| grep -q "^Test[: ]" && CTR=$((CTR+1)) && echo $CTR > /tmp/ctr
|
||||
|
||||
BASE="$HOME/work/zfs/zfs"
|
||||
COLOR="$BASE/scripts/zfs-tests-color.sh"
|
||||
CLINE=$(echo $LINE| grep '^\[.*] Test[: ]' \
|
||||
| sed -e 's|^\[.*] Test|Test|g' \
|
||||
| sed -e 's|/usr/local|/usr|g' \
|
||||
CLINE=$(echo $LINE| grep "^Test[ :]" | sed -e 's|/usr/local|/usr|g' \
|
||||
| sed -e 's| /usr/share/zfs/zfs-tests/tests/| |g' | $COLOR)
|
||||
if [ -z "$CLINE" ]; then
|
||||
printf "vm${ID}: %s\n" "$LINE"
|
||||
@ -47,7 +45,7 @@ if [ -z ${1:-} ]; then
|
||||
echo 0 > /tmp/ctr
|
||||
date "+%s" > /tmp/tsstart
|
||||
|
||||
for ((i=1; i<=VMs; i++)); do
|
||||
for i in $(seq 1 $VMs); do
|
||||
IP="192.168.122.1$i"
|
||||
daemonize -c /var/tmp -p vm${i}.pid -o vm${i}log.txt -- \
|
||||
$SSH zfs@$IP $TESTS $OS $i $VMs $CI_TYPE
|
||||
@ -60,7 +58,7 @@ if [ -z ${1:-} ]; then
|
||||
done
|
||||
|
||||
# wait for all vm's to finish
|
||||
for ((i=1; i<=VMs; i++)); do
|
||||
for i in $(seq 1 $VMs); do
|
||||
tail --pid=$(cat vm${i}.pid) -f /dev/null
|
||||
pid=$(cat vm${i}log.pid)
|
||||
rm -f vm${i}log.pid
|
||||
@ -74,31 +72,19 @@ fi
|
||||
export PATH="$PATH:/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/sbin:/usr/local/bin"
|
||||
case "$1" in
|
||||
freebsd*)
|
||||
TDIR="/usr/local/share/zfs"
|
||||
sudo kldstat -n zfs 2>/dev/null && sudo kldunload zfs
|
||||
sudo -E ./zfs/scripts/zfs.sh
|
||||
sudo mv -f /var/tmp/*.txt /tmp
|
||||
sudo newfs -U -t -L tmp /dev/vtbd1 >/dev/null
|
||||
sudo mount -o noatime /dev/vtbd1 /var/tmp
|
||||
sudo chmod 1777 /var/tmp
|
||||
sudo mv -f /tmp/*.txt /var/tmp
|
||||
TDIR="/usr/local/share/zfs"
|
||||
;;
|
||||
*)
|
||||
# use xfs @ /var/tmp for all distros
|
||||
TDIR="/usr/share/zfs"
|
||||
sudo -E modprobe zfs
|
||||
sudo mv -f /var/tmp/*.txt /tmp
|
||||
sudo mkfs.xfs -fq /dev/vdb
|
||||
sudo mount -o noatime /dev/vdb /var/tmp
|
||||
sudo chmod 1777 /var/tmp
|
||||
sudo mv -f /tmp/*.txt /var/tmp
|
||||
;;
|
||||
esac
|
||||
|
||||
# enable io_uring on el9/el10
|
||||
case "$1" in
|
||||
almalinux9|almalinux10|centos-stream*)
|
||||
sudo sysctl kernel.io_uring_disabled=0 > /dev/null
|
||||
sudo -E modprobe zfs
|
||||
TDIR="/usr/share/zfs"
|
||||
;;
|
||||
esac
|
||||
|
||||
@ -111,7 +97,7 @@ fi
|
||||
sudo dmesg -c > dmesg-prerun.txt
|
||||
mount > mount.txt
|
||||
df -h > df-prerun.txt
|
||||
$TDIR/zfs-tests.sh -vKO -s 3GB -T $TAGS
|
||||
$TDIR/zfs-tests.sh -vK -s 3GB -T $TAGS
|
||||
RV=$?
|
||||
df -h > df-postrun.txt
|
||||
echo $RV > tests-exitcode.txt
|
||||
|
9
.github/workflows/scripts/qemu-7-prepare.sh
vendored
9
.github/workflows/scripts/qemu-7-prepare.sh
vendored
@ -28,16 +28,15 @@ BASE="$HOME/work/zfs/zfs"
|
||||
MERGE="$BASE/.github/workflows/scripts/merge_summary.awk"
|
||||
|
||||
# catch result files of testings (vm's should be there)
|
||||
for ((i=1; i<=VMs; i++)); do
|
||||
rsync -arL zfs@vm$i:$RESPATH/current $RESPATH/vm$i || true
|
||||
scp zfs@vm$i:"/var/tmp/*.txt" $RESPATH/vm$i || true
|
||||
scp zfs@vm$i:"/var/tmp/*.rpm" $RESPATH/vm$i || true
|
||||
for i in $(seq 1 $VMs); do
|
||||
rsync -arL zfs@192.168.122.1$i:$RESPATH/current $RESPATH/vm$i || true
|
||||
scp zfs@192.168.122.1$i:"/var/tmp/*.txt" $RESPATH/vm$i || true
|
||||
done
|
||||
cp -f /var/tmp/*.txt $RESPATH || true
|
||||
cd $RESPATH
|
||||
|
||||
# prepare result files for summary
|
||||
for ((i=1; i<=VMs; i++)); do
|
||||
for i in $(seq 1 $VMs); do
|
||||
file="vm$i/build-stderr.txt"
|
||||
test -s $file && mv -f $file build-stderr.txt
|
||||
|
||||
|
2
.github/workflows/scripts/qemu-8-summary.sh
vendored
2
.github/workflows/scripts/qemu-8-summary.sh
vendored
@ -45,7 +45,7 @@ fi
|
||||
|
||||
echo -e "\nFull logs for download:\n $1\n"
|
||||
|
||||
for ((i=1; i<=VMs; i++)); do
|
||||
for i in $(seq 1 $VMs); do
|
||||
rv=$(cat vm$i/tests-exitcode.txt)
|
||||
|
||||
if [ $rv = 0 ]; then
|
||||
|
@ -1,8 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Helper script to run after installing dependencies. This brings the VM back
|
||||
# up and copies over the zfs source directory.
|
||||
echo "Build modules in QEMU machine"
|
||||
sudo virsh start openzfs
|
||||
.github/workflows/scripts/qemu-wait-for-vm.sh vm0
|
||||
rsync -ar $HOME/work/zfs/zfs zfs@vm0:./
|
90
.github/workflows/scripts/qemu-test-repo-vm.sh
vendored
90
.github/workflows/scripts/qemu-test-repo-vm.sh
vendored
@ -1,90 +0,0 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Do a test install of ZFS from an external repository.
|
||||
#
|
||||
# USAGE:
|
||||
#
|
||||
# ./qemu-test-repo-vm [URL]
|
||||
#
|
||||
# URL: URL to use instead of http://download.zfsonlinux.org
|
||||
# If blank, use the default repo from zfs-release RPM.
|
||||
|
||||
set -e
|
||||
|
||||
source /etc/os-release
|
||||
OS="$ID"
|
||||
VERSION="$VERSION_ID"
|
||||
|
||||
ALTHOST=""
|
||||
if [ -n "$1" ] ; then
|
||||
ALTHOST="$1"
|
||||
fi
|
||||
|
||||
# Write summary to /tmp/repo so our artifacts scripts pick it up
|
||||
mkdir /tmp/repo
|
||||
SUMMARY=/tmp/repo/$OS-$VERSION-summary.txt
|
||||
|
||||
# $1: Repo 'zfs' 'zfs-kmod' 'zfs-testing' 'zfs-testing-kmod'
|
||||
# $2: (optional) Alternate host than 'http://download.zfsonlinux.org' to
|
||||
# install from. Blank means use default from zfs-release RPM.
|
||||
function test_install {
|
||||
repo=$1
|
||||
host=""
|
||||
if [ -n "$2" ] ; then
|
||||
host=$2
|
||||
fi
|
||||
|
||||
args="--disablerepo=zfs --enablerepo=$repo"
|
||||
|
||||
# If we supplied an alternate repo URL, and have not already edited
|
||||
# zfs.repo, then update the repo file.
|
||||
if [ -n "$host" ] && ! grep -q $host /etc/yum.repos.d/zfs.repo ; then
|
||||
sudo sed -i "s;baseurl=http://download.zfsonlinux.org;baseurl=$host;g" /etc/yum.repos.d/zfs.repo
|
||||
fi
|
||||
|
||||
sudo dnf -y install $args zfs zfs-test
|
||||
|
||||
# Load modules and create a simple pool as a sanity test.
|
||||
sudo /usr/share/zfs/zfs.sh -r
|
||||
truncate -s 100M /tmp/file
|
||||
sudo zpool create tank /tmp/file
|
||||
sudo zpool status
|
||||
|
||||
# Print out repo name, rpm installed (kmod or dkms), and repo URL
|
||||
baseurl=$(grep -A 5 "\[$repo\]" /etc/yum.repos.d/zfs.repo | awk -F'=' '/baseurl=/{print $2; exit}')
|
||||
package=$(sudo rpm -qa | grep zfs | grep -E 'kmod|dkms')
|
||||
|
||||
echo "$repo $package $baseurl" >> $SUMMARY
|
||||
|
||||
sudo zpool destroy tank
|
||||
sudo rm /tmp/file
|
||||
sudo dnf -y remove zfs
|
||||
}
|
||||
|
||||
echo "##[group]Installing from repo"
|
||||
# The openzfs docs are the authoritative instructions for the install. Use
|
||||
# the specific version of zfs-release RPM it recommends.
|
||||
case $OS in
|
||||
almalinux*)
|
||||
url='https://raw.githubusercontent.com/openzfs/openzfs-docs/refs/heads/master/docs/Getting%20Started/RHEL-based%20distro/index.rst'
|
||||
name=$(curl -Ls $url | grep 'dnf install' | grep -Eo 'zfs-release-[0-9]+-[0-9]+')
|
||||
sudo dnf -y install https://zfsonlinux.org/epel/$name$(rpm --eval "%{dist}").noarch.rpm 2>&1
|
||||
sudo rpm -qi zfs-release
|
||||
test_install zfs $ALTHOST
|
||||
test_install zfs-kmod $ALTHOST
|
||||
test_install zfs-testing $ALTHOST
|
||||
test_install zfs-testing-kmod $ALTHOST
|
||||
;;
|
||||
fedora*)
|
||||
url='https://raw.githubusercontent.com/openzfs/openzfs-docs/refs/heads/master/docs/Getting%20Started/Fedora/index.rst'
|
||||
name=$(curl -Ls $url | grep 'dnf install' | grep -Eo 'zfs-release-[0-9]+-[0-9]+')
|
||||
sudo dnf -y install -y https://zfsonlinux.org/fedora/$name$(rpm --eval "%{dist}").noarch.rpm
|
||||
test_install zfs $ALTHOST
|
||||
;;
|
||||
esac
|
||||
echo "##[endgroup]"
|
||||
|
||||
# Write out a simple version of the summary here. Later on we will collate all
|
||||
# the summaries and put them into a nice table in the workflow Summary page.
|
||||
echo "Summary: "
|
||||
cat $SUMMARY
|
10
.github/workflows/scripts/qemu-wait-for-vm.sh
vendored
10
.github/workflows/scripts/qemu-wait-for-vm.sh
vendored
@ -1,10 +0,0 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Wait for a VM to boot up and become active. This is used in a number of our
|
||||
# scripts.
|
||||
#
|
||||
# $1: VM hostname or IP address
|
||||
|
||||
while pidof /usr/bin/qemu-system-x86_64 >/dev/null; do
|
||||
ssh 2>/dev/null zfs@$1 "uname -a" && break
|
||||
done
|
@ -1,32 +0,0 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Recursively go though a directory structure and replace duplicate files with
|
||||
# symlinks. This cuts down our RPM repo size by ~25%.
|
||||
#
|
||||
# replace-dupes-with-symlinks.sh [DIR]
|
||||
#
|
||||
# DIR: Directory to traverse. Defaults to current directory if not specified.
|
||||
#
|
||||
|
||||
src="$1"
|
||||
if [ -z "$src" ] ; then
|
||||
src="."
|
||||
fi
|
||||
|
||||
declare -A db
|
||||
|
||||
pushd "$src"
|
||||
while read line ; do
|
||||
bn="$(basename $line)"
|
||||
if [ -z "${db[$bn]}" ] ; then
|
||||
# First time this file has been seen
|
||||
db[$bn]="$line"
|
||||
else
|
||||
if diff -b "$line" "${db[$bn]}" &>/dev/null ; then
|
||||
# Files are the same, make a symlink
|
||||
rm "$line"
|
||||
ln -sr "${db[$bn]}" "$line"
|
||||
fi
|
||||
fi
|
||||
done <<< "$(find . -type f)"
|
||||
popd
|
151
.github/workflows/zfs-qemu-packages.yml
vendored
151
.github/workflows/zfs-qemu-packages.yml
vendored
@ -1,151 +0,0 @@
|
||||
# This workflow is used to build and test RPM packages. It is a
|
||||
# 'workflow_dispatch' workflow, which means it gets run manually.
|
||||
#
|
||||
# The workflow has a dropdown menu with two options:
|
||||
#
|
||||
# Build RPMs - Build release RPMs and tarballs and put them into an artifact
|
||||
# ZIP file. The directory structure used in the ZIP file mirrors
|
||||
# the ZFS yum repo.
|
||||
#
|
||||
# Test repo - Test install the ZFS RPMs from the ZFS repo. On EL distos, this
|
||||
# will do a DKMS and KMOD test install from both the regular and
|
||||
# testing repos. On Fedora, it will do a DKMS install from the
|
||||
# regular repo. All test install results will be displayed in the
|
||||
# Summary page. Note that the workflow provides an optional text
|
||||
# text box where you can specify the full URL to an alternate repo.
|
||||
# If left blank, it will install from the default repo from the
|
||||
# zfs-release RPM (http://download.zfsonlinux.org).
|
||||
#
|
||||
# Most users will never need to use this workflow. It will be used primary by
|
||||
# ZFS admins for building and testing releases.
|
||||
#
|
||||
name: zfs-qemu-packages
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
test_type:
|
||||
type: choice
|
||||
required: false
|
||||
default: "Build RPMs"
|
||||
description: "Build RPMs or test the repo?"
|
||||
options:
|
||||
- "Build RPMs"
|
||||
- "Test repo"
|
||||
patch_level:
|
||||
type: string
|
||||
required: false
|
||||
default: ""
|
||||
description: "(optional) patch level number"
|
||||
repo_url:
|
||||
type: string
|
||||
required: false
|
||||
default: ""
|
||||
description: "(optional) repo URL (blank: use http://download.zfsonlinux.org)"
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
zfs-qemu-packages-jobs:
|
||||
name: qemu-VMs
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
os: ['almalinux8', 'almalinux9', 'almalinux10', 'fedora41', 'fedora42']
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
|
||||
- name: Setup QEMU
|
||||
timeout-minutes: 10
|
||||
run: .github/workflows/scripts/qemu-1-setup.sh
|
||||
|
||||
- name: Start build machine
|
||||
timeout-minutes: 10
|
||||
run: .github/workflows/scripts/qemu-2-start.sh ${{ matrix.os }}
|
||||
|
||||
- name: Install dependencies
|
||||
timeout-minutes: 20
|
||||
run: |
|
||||
.github/workflows/scripts/qemu-3-deps.sh ${{ matrix.os }}
|
||||
|
||||
- name: Build modules or Test repo
|
||||
timeout-minutes: 30
|
||||
run: |
|
||||
set -e
|
||||
if [ "${{ github.event.inputs.test_type }}" == "Test repo" ] ; then
|
||||
# Bring VM back up and copy over zfs source
|
||||
.github/workflows/scripts/qemu-prepare-for-build.sh
|
||||
|
||||
mkdir -p /tmp/repo
|
||||
ssh zfs@vm0 '$HOME/zfs/.github/workflows/scripts/qemu-test-repo-vm.sh' ${{ github.event.inputs.repo_url }}
|
||||
else
|
||||
EXTRA=""
|
||||
if [ -n "${{ github.event.inputs.patch_level }}" ] ; then
|
||||
EXTRA="--patch-level ${{ github.event.inputs.patch_level }}"
|
||||
fi
|
||||
|
||||
.github/workflows/scripts/qemu-4-build.sh $EXTRA \
|
||||
--repo --release --dkms --tarball ${{ matrix.os }}
|
||||
fi
|
||||
|
||||
- name: Prepare artifacts
|
||||
if: always()
|
||||
timeout-minutes: 10
|
||||
run: |
|
||||
rsync -a zfs@vm0:/tmp/repo /tmp || true
|
||||
.github/workflows/scripts/replace-dupes-with-symlinks.sh /tmp/repo
|
||||
tar -cf ${{ matrix.os }}-repo.tar -C /tmp repo
|
||||
|
||||
- uses: actions/upload-artifact@v4
|
||||
id: artifact-upload
|
||||
if: always()
|
||||
with:
|
||||
name: ${{ matrix.os }}-repo
|
||||
path: ${{ matrix.os }}-repo.tar
|
||||
compression-level: 0
|
||||
retention-days: 2
|
||||
if-no-files-found: ignore
|
||||
|
||||
combine_repos:
|
||||
if: always()
|
||||
needs: [zfs-qemu-packages-jobs]
|
||||
name: "Results"
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/download-artifact@v4
|
||||
id: artifact-download
|
||||
if: always()
|
||||
- name: Test Summary
|
||||
if: always()
|
||||
run: |
|
||||
for i in $(find . -type f -iname "*.tar") ; do
|
||||
tar -xf $i -C /tmp
|
||||
done
|
||||
tar -cf all-repo.tar -C /tmp repo
|
||||
|
||||
# If we're installing from a repo, print out the summary of the versions
|
||||
# that got installed using Markdown.
|
||||
if [ "${{ github.event.inputs.test_type }}" == "Test repo" ] ; then
|
||||
cd /tmp/repo
|
||||
for i in $(ls *.txt) ; do
|
||||
nicename="$(echo $i | sed 's/.txt//g; s/-/ /g')"
|
||||
echo "### $nicename" >> $GITHUB_STEP_SUMMARY
|
||||
echo "|repo|RPM|URL|" >> $GITHUB_STEP_SUMMARY
|
||||
echo "|:---|:---|:---|" >> $GITHUB_STEP_SUMMARY
|
||||
awk '{print "|"$1"|"$2"|"$3"|"}' $i >> $GITHUB_STEP_SUMMARY
|
||||
done
|
||||
fi
|
||||
|
||||
- uses: actions/upload-artifact@v4
|
||||
id: artifact-upload2
|
||||
if: always()
|
||||
with:
|
||||
name: all-repo
|
||||
path: all-repo.tar
|
||||
compression-level: 0
|
||||
retention-days: 5
|
||||
if-no-files-found: ignore
|
83
.github/workflows/zfs-qemu.yml
vendored
83
.github/workflows/zfs-qemu.yml
vendored
@ -5,11 +5,16 @@ on:
|
||||
pull_request:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
fedora_kernel_ver:
|
||||
type: string
|
||||
include_stream9:
|
||||
type: boolean
|
||||
required: false
|
||||
default: ""
|
||||
description: "(optional) Experimental kernel version to install on Fedora (like '6.14' or '6.13.3-0.rc3')"
|
||||
default: false
|
||||
description: 'Test on CentOS 9 stream'
|
||||
include_stream10:
|
||||
type: boolean
|
||||
required: false
|
||||
default: false
|
||||
description: 'Test on CentOS 10 stream'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
@ -29,8 +34,8 @@ jobs:
|
||||
- name: Generate OS config and CI type
|
||||
id: os
|
||||
run: |
|
||||
FULL_OS='["almalinux8", "almalinux9", "almalinux10", "centos-stream9", "centos-stream10", "debian12", "debian13", "fedora41", "fedora42", "freebsd13-5r", "freebsd14-3s", "freebsd15-0c", "ubuntu22", "ubuntu24"]'
|
||||
QUICK_OS='["almalinux8", "almalinux9", "almalinux10", "debian12", "fedora42", "freebsd14-3s", "ubuntu24"]'
|
||||
FULL_OS='["almalinux8", "almalinux9", "debian11", "debian12", "fedora40", "fedora41", "freebsd13-3r", "freebsd13-4s", "freebsd14-1r", "freebsd14-2s", "freebsd15-0c", "ubuntu20", "ubuntu22", "ubuntu24"]'
|
||||
QUICK_OS='["almalinux8", "almalinux9", "debian12", "fedora41", "freebsd13-3r", "freebsd14-2r", "ubuntu24"]'
|
||||
# determine CI type when running on PR
|
||||
ci_type="full"
|
||||
if ${{ github.event_name == 'pull_request' }}; then
|
||||
@ -43,31 +48,35 @@ jobs:
|
||||
else
|
||||
os_selection="$FULL_OS"
|
||||
fi
|
||||
|
||||
if ${{ github.event.inputs.fedora_kernel_ver != '' }}; then
|
||||
# They specified a custom kernel version for Fedora. Use only
|
||||
# Fedora runners.
|
||||
os_json=$(echo ${os_selection} | jq -c '[.[] | select(startswith("fedora"))]')
|
||||
else
|
||||
# Normal case
|
||||
os_json=$(echo ${os_selection} | jq -c)
|
||||
os_json=$(echo ${os_selection} | jq -c)
|
||||
|
||||
# Add optional runners
|
||||
if [ "${{ github.event.inputs.include_stream9 }}" == 'true' ]; then
|
||||
os_json=$(echo $os_json | jq -c '. += ["centos-stream9"]')
|
||||
fi
|
||||
if [ "${{ github.event.inputs.include_stream10 }}" == 'true' ]; then
|
||||
os_json=$(echo $os_json | jq -c '. += ["centos-stream10"]')
|
||||
fi
|
||||
|
||||
echo $os_json
|
||||
echo "os=$os_json" >> $GITHUB_OUTPUT
|
||||
echo "ci_type=$ci_type" >> $GITHUB_OUTPUT
|
||||
|
||||
echo "os=$os_json" | tee -a $GITHUB_OUTPUT
|
||||
echo "ci_type=$ci_type" | tee -a $GITHUB_OUTPUT
|
||||
|
||||
|
||||
|
||||
qemu-vm:
|
||||
name: qemu-x86
|
||||
needs: [ test-config ]
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
# rhl: almalinux8, almalinux9, centos-stream9, fedora4x
|
||||
# debian: debian12, debian13, ubuntu22, ubuntu24
|
||||
# rhl: almalinux8, almalinux9, centos-stream9, fedora40, fedora41
|
||||
# debian: debian11, debian12, ubuntu20, ubuntu22, ubuntu24
|
||||
# misc: archlinux, tumbleweed
|
||||
# FreeBSD variants of 2025-06:
|
||||
# FreeBSD Release: freebsd13-5r, freebsd14-2r, freebsd14-3r
|
||||
# FreeBSD Stable: freebsd13-5s, freebsd14-3s
|
||||
# FreeBSD variants of 2024-12:
|
||||
# FreeBSD Release: freebsd13-3r, freebsd13-4r, freebsd14-1r, freebsd14-2r
|
||||
# FreeBSD Stable: freebsd13-4s, freebsd14-2s
|
||||
# FreeBSD Current: freebsd15-0c
|
||||
os: ${{ fromJson(needs.test-config.outputs.test_os) }}
|
||||
runs-on: ubuntu-24.04
|
||||
@ -77,12 +86,8 @@ jobs:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
|
||||
- name: Setup QEMU
|
||||
timeout-minutes: 20
|
||||
run: |
|
||||
# Add a timestamp to each line to debug timeouts
|
||||
while IFS=$'\n' read -r line; do
|
||||
echo "$(date +'%H:%M:%S') $line"
|
||||
done < <(.github/workflows/scripts/qemu-1-setup.sh)
|
||||
timeout-minutes: 10
|
||||
run: .github/workflows/scripts/qemu-1-setup.sh
|
||||
|
||||
- name: Start build machine
|
||||
timeout-minutes: 10
|
||||
@ -90,11 +95,31 @@ jobs:
|
||||
|
||||
- name: Install dependencies
|
||||
timeout-minutes: 20
|
||||
run: .github/workflows/scripts/qemu-3-deps.sh ${{ matrix.os }} ${{ github.event.inputs.fedora_kernel_ver }}
|
||||
run: |
|
||||
echo "Install dependencies in QEMU machine"
|
||||
IP=192.168.122.10
|
||||
while pidof /usr/bin/qemu-system-x86_64 >/dev/null; do
|
||||
ssh 2>/dev/null zfs@$IP "uname -a" && break
|
||||
done
|
||||
scp .github/workflows/scripts/qemu-3-deps.sh zfs@$IP:qemu-3-deps.sh
|
||||
PID=`pidof /usr/bin/qemu-system-x86_64`
|
||||
ssh zfs@$IP '$HOME/qemu-3-deps.sh' ${{ matrix.os }}
|
||||
# wait for poweroff to succeed
|
||||
tail --pid=$PID -f /dev/null
|
||||
sleep 5 # avoid this: "error: Domain is already active"
|
||||
rm -f $HOME/.ssh/known_hosts
|
||||
|
||||
- name: Build modules
|
||||
timeout-minutes: 30
|
||||
run: .github/workflows/scripts/qemu-4-build.sh --poweroff --enable-debug ${{ matrix.os }}
|
||||
run: |
|
||||
echo "Build modules in QEMU machine"
|
||||
sudo virsh start openzfs
|
||||
IP=192.168.122.10
|
||||
while pidof /usr/bin/qemu-system-x86_64 >/dev/null; do
|
||||
ssh 2>/dev/null zfs@$IP "uname -a" && break
|
||||
done
|
||||
rsync -ar $HOME/work/zfs/zfs zfs@$IP:./
|
||||
ssh zfs@$IP '$HOME/zfs/.github/workflows/scripts/qemu-4-build.sh' ${{ matrix.os }}
|
||||
|
||||
- name: Setup testing machines
|
||||
timeout-minutes: 5
|
||||
|
26
.github/workflows/zloop.yml
vendored
26
.github/workflows/zloop.yml
vendored
@ -12,8 +12,7 @@ jobs:
|
||||
zloop:
|
||||
runs-on: ubuntu-24.04
|
||||
env:
|
||||
WORK_DIR: /mnt/zloop
|
||||
CORE_DIR: /mnt/zloop/cores
|
||||
TEST_DIR: /var/tmp/zloop
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
@ -21,7 +20,7 @@ jobs:
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
sudo apt-get purge -y snapd google-chrome-stable firefox
|
||||
ONLY_DEPS=1 .github/workflows/scripts/qemu-3-deps-vm.sh ubuntu24
|
||||
ONLY_DEPS=1 .github/workflows/scripts/qemu-3-deps.sh ubuntu24
|
||||
- name: Autogen.sh
|
||||
run: |
|
||||
sed -i '/DEBUG_CFLAGS="-Werror"/s/^/#/' config/zfs-build.m4
|
||||
@ -41,37 +40,38 @@ jobs:
|
||||
sudo modprobe zfs
|
||||
- name: Tests
|
||||
run: |
|
||||
sudo truncate -s 256G /mnt/vdev
|
||||
sudo zpool create cipool -m $WORK_DIR -O compression=on -o autotrim=on /mnt/vdev
|
||||
sudo /usr/share/zfs/zloop.sh -t 600 -I 6 -l -m 1 -c $CORE_DIR -f $WORK_DIR -- -T 120 -P 60
|
||||
sudo mkdir -p $TEST_DIR
|
||||
# run for 10 minutes or at most 6 iterations for a maximum runner
|
||||
# time of 60 minutes.
|
||||
sudo /usr/share/zfs/zloop.sh -t 600 -I 6 -l -m 1 -- -T 120 -P 60
|
||||
- name: Prepare artifacts
|
||||
if: failure()
|
||||
run: |
|
||||
sudo chmod +r -R $WORK_DIR/
|
||||
sudo chmod +r -R $TEST_DIR/
|
||||
- name: Ztest log
|
||||
if: failure()
|
||||
run: |
|
||||
grep -B10 -A1000 'ASSERT' $CORE_DIR/*/ztest.out || tail -n 1000 $CORE_DIR/*/ztest.out
|
||||
grep -B10 -A1000 'ASSERT' $TEST_DIR/*/ztest.out || tail -n 1000 $TEST_DIR/*/ztest.out
|
||||
- name: Gdb log
|
||||
if: failure()
|
||||
run: |
|
||||
sed -n '/Backtraces (full)/q;p' $CORE_DIR/*/ztest.gdb
|
||||
sed -n '/Backtraces (full)/q;p' $TEST_DIR/*/ztest.gdb
|
||||
- name: Zdb log
|
||||
if: failure()
|
||||
run: |
|
||||
cat $CORE_DIR/*/ztest.zdb
|
||||
cat $TEST_DIR/*/ztest.zdb
|
||||
- uses: actions/upload-artifact@v4
|
||||
if: failure()
|
||||
with:
|
||||
name: Logs
|
||||
path: |
|
||||
/mnt/zloop/*/
|
||||
!/mnt/zloop/cores/*/vdev/
|
||||
/var/tmp/zloop/*/
|
||||
!/var/tmp/zloop/*/vdev/
|
||||
if-no-files-found: ignore
|
||||
- uses: actions/upload-artifact@v4
|
||||
if: failure()
|
||||
with:
|
||||
name: Pool files
|
||||
path: |
|
||||
/mnt/zloop/cores/*/vdev/
|
||||
/var/tmp/zloop/*/vdev/
|
||||
if-no-files-found: ignore
|
||||
|
15
.mailmap
15
.mailmap
@ -23,7 +23,6 @@
|
||||
# These maps are making names consistent where they have varied but the email
|
||||
# address has never changed. In most cases, the full name is in the
|
||||
# Signed-off-by of a commit with a matching author.
|
||||
Achill Gilgenast <achill@achill.org>
|
||||
Ahelenia Ziemiańska <nabijaczleweli@gmail.com>
|
||||
Ahelenia Ziemiańska <nabijaczleweli@nabijaczleweli.xyz>
|
||||
Alex John <alex@stty.io>
|
||||
@ -38,7 +37,6 @@ Crag Wang <crag0715@gmail.com>
|
||||
Damian Szuberski <szuberskidamian@gmail.com>
|
||||
Daniel Kolesa <daniel@octaforge.org>
|
||||
Debabrata Banerjee <dbavatar@gmail.com>
|
||||
Diwakar Kristappagari <diwakar-k@hpe.com>
|
||||
Finix Yan <yanchongwen@hotmail.com>
|
||||
Gaurav Kumar <gauravk.18@gmail.com>
|
||||
Gionatan Danti <g.danti@assyoma.it>
|
||||
@ -73,7 +71,6 @@ Rob Norris <rob.norris@klarasystems.com>
|
||||
Sam Lunt <samuel.j.lunt@gmail.com>
|
||||
Sanjeev Bagewadi <sanjeev.bagewadi@gmail.com>
|
||||
Sebastian Wuerl <s.wuerl@mailbox.org>
|
||||
SHENGYI HONG <aokblast@FreeBSD.org>
|
||||
Stoiko Ivanov <github@nomore.at>
|
||||
Tamas TEVESZ <ice@extreme.hu>
|
||||
WHR <msl0000023508@gmail.com>
|
||||
@ -81,14 +78,9 @@ Yanping Gao <yanping.gao@xtaotech.com>
|
||||
Youzhong Yang <youzhong@gmail.com>
|
||||
|
||||
# Signed-off-by: overriding Author:
|
||||
Alexander Ziaee <ziaee@FreeBSD.org> <concussious@runbox.com>
|
||||
Felix Schmidt <felixschmidt20@aol.com> <f.sch.prototype@gmail.com>
|
||||
Olivier Certner <olce@FreeBSD.org> <olce.freebsd@certner.fr>
|
||||
Phil Sutter <phil@nwl.cc> <p.github@nwl.cc>
|
||||
poscat <poscat@poscat.moe> <poscat0x04@outlook.com>
|
||||
Qiuhao Chen <chenqiuhao1997@gmail.com> <haohao0924@126.com>
|
||||
Ryan <errornointernet@envs.net> <error.nointernet@gmail.com>
|
||||
Sietse <sietse@wizdom.nu> <uglymotha@wizdom.nu>
|
||||
Qiuhao Chen <chenqiuhao1997@gmail.com> <haohao0924@126.com>
|
||||
Yuxin Wang <yuxinwang9999@gmail.com> <Bi11gates9999@gmail.com>
|
||||
Zhenlei Huang <zlei@FreeBSD.org> <zlei.huang@gmail.com>
|
||||
|
||||
@ -105,7 +97,6 @@ Tulsi Jain <tulsi.jain@delphix.com> <tulsi.jain@Tulsi-Jains-MacBook-Pro.local>
|
||||
# Mappings from Github no-reply addresses
|
||||
ajs124 <git@ajs124.de> <ajs124@users.noreply.github.com>
|
||||
Alek Pinchuk <apinchuk@axcient.com> <alek-p@users.noreply.github.com>
|
||||
Aleksandr Liber <aleksandr.liber@perforce.com> <61714074+AleksandrLiber@users.noreply.github.com>
|
||||
Alexander Lobakin <alobakin@pm.me> <solbjorn@users.noreply.github.com>
|
||||
Alexey Smirnoff <fling@member.fsf.org> <fling-@users.noreply.github.com>
|
||||
Allen Holl <allen.m.holl@gmail.com> <65494904+allen-4@users.noreply.github.com>
|
||||
@ -142,12 +133,10 @@ Fedor Uporov <fuporov.vstack@gmail.com> <60701163+fuporovvStack@users.noreply.gi
|
||||
Felix Dörre <felix@dogcraft.de> <felixdoerre@users.noreply.github.com>
|
||||
Felix Neumärker <xdch47@posteo.de> <34678034+xdch47@users.noreply.github.com>
|
||||
Finix Yan <yancw@info2soft.com> <Finix1979@users.noreply.github.com>
|
||||
Friedrich Weber <f.weber@proxmox.com> <56110206+frwbr@users.noreply.github.com>
|
||||
Gaurav Kumar <gauravk.18@gmail.com> <gaurkuma@users.noreply.github.com>
|
||||
George Gaydarov <git@gg7.io> <gg7@users.noreply.github.com>
|
||||
Georgy Yakovlev <gyakovlev@gentoo.org> <168902+gyakovlev@users.noreply.github.com>
|
||||
Gerardwx <gerardw@alum.mit.edu> <Gerardwx@users.noreply.github.com>
|
||||
Germano Massullo <germano.massullo@gmail.com> <Germano0@users.noreply.github.com>
|
||||
Gian-Carlo DeFazio <defazio1@llnl.gov> <defaziogiancarlo@users.noreply.github.com>
|
||||
Giuseppe Di Natale <dinatale2@llnl.gov> <dinatale2@users.noreply.github.com>
|
||||
Hajo Möller <dasjoe@gmail.com> <dasjoe@users.noreply.github.com>
|
||||
@ -167,7 +156,6 @@ John Ramsden <johnramsden@riseup.net> <johnramsden@users.noreply.github.com>
|
||||
Jonathon Fernyhough <jonathon@m2x.dev> <559369+jonathonf@users.noreply.github.com>
|
||||
Jose Luis Duran <jlduran@gmail.com> <jlduran@users.noreply.github.com>
|
||||
Justin Hibbits <chmeeedalf@gmail.com> <chmeeedalf@users.noreply.github.com>
|
||||
Kaitlin Hoang <kthoang@amazon.com> <khoang98@users.noreply.github.com>
|
||||
Kevin Greene <kevin.greene@delphix.com> <104801862+kxgreene@users.noreply.github.com>
|
||||
Kevin Jin <lostking2008@hotmail.com> <33590050+jxdking@users.noreply.github.com>
|
||||
Kevin P. Fleming <kevin@km6g.us> <kpfleming@users.noreply.github.com>
|
||||
@ -219,7 +207,6 @@ Torsten Wörtwein <twoertwein@gmail.com> <twoertwein@users.noreply.github.com>
|
||||
Tulsi Jain <tulsi.jain@delphix.com> <TulsiJain@users.noreply.github.com>
|
||||
Václav Skála <skala@vshosting.cz> <33496485+vaclavskala@users.noreply.github.com>
|
||||
Vaibhav Bhanawat <vaibhav.bhanawat@delphix.com> <88050553+vaibhav-delphix@users.noreply.github.com>
|
||||
Vandana Rungta <vrungta@amazon.com> <46906819+vandanarungta@users.noreply.github.com>
|
||||
Violet Purcell <vimproved@inventati.org> <66446404+vimproved@users.noreply.github.com>
|
||||
Vipin Kumar Verma <vipin.verma@hpe.com> <75025470+vermavipinkumar@users.noreply.github.com>
|
||||
Wolfgang Bumiller <w.bumiller@proxmox.com> <Blub@users.noreply.github.com>
|
||||
|
37
AUTHORS
37
AUTHORS
@ -10,7 +10,6 @@ PAST MAINTAINERS:
|
||||
CONTRIBUTORS:
|
||||
|
||||
Aaron Fineman <abyxcos@gmail.com>
|
||||
Achill Gilgenast <achill@achill.org>
|
||||
Adam D. Moss <c@yotes.com>
|
||||
Adam Leventhal <ahl@delphix.com>
|
||||
Adam Stevko <adam.stevko@gmail.com>
|
||||
@ -30,7 +29,6 @@ CONTRIBUTORS:
|
||||
Alejandro Colomar <Colomar.6.4.3@GMail.com>
|
||||
Alejandro R. Sedeño <asedeno@mit.edu>
|
||||
Alek Pinchuk <alek@nexenta.com>
|
||||
Aleksandr Liber <aleksandr.liber@perforce.com>
|
||||
Aleksa Sarai <cyphar@cyphar.com>
|
||||
Alexander Eremin <a.eremin@nexenta.com>
|
||||
Alexander Lobakin <alobakin@pm.me>
|
||||
@ -38,7 +36,6 @@ CONTRIBUTORS:
|
||||
Alexander Pyhalov <apyhalov@gmail.com>
|
||||
Alexander Richardson <Alexander.Richardson@cl.cam.ac.uk>
|
||||
Alexander Stetsenko <ams@nexenta.com>
|
||||
Alexander Ziaee <ziaee@FreeBSD.org>
|
||||
Alex Braunegg <alex.braunegg@gmail.com>
|
||||
Alexey Shvetsov <alexxy@gentoo.org>
|
||||
Alexey Smirnoff <fling@member.fsf.org>
|
||||
@ -60,7 +57,6 @@ CONTRIBUTORS:
|
||||
Andreas Buschmann <andreas.buschmann@tech.net.de>
|
||||
Andreas Dilger <adilger@intel.com>
|
||||
Andreas Vögele <andreas@andreasvoegele.com>
|
||||
Andres <a-d-j-i@users.noreply.github.com>
|
||||
Andrew Barnes <barnes333@gmail.com>
|
||||
Andrew Hamilton <ahamilto@tjhsst.edu>
|
||||
Andrew Innes <andrew.c12@gmail.com>
|
||||
@ -74,7 +70,6 @@ CONTRIBUTORS:
|
||||
Andrey Prokopenko <job@terem.fr>
|
||||
Andrey Vesnovaty <andrey.vesnovaty@gmail.com>
|
||||
Andriy Gapon <avg@freebsd.org>
|
||||
Andriy Tkachuk <andriy.tkachuk@seagate.com>
|
||||
Andy Bakun <github@thwartedefforts.org>
|
||||
Andy Fiddaman <omnios@citrus-it.co.uk>
|
||||
Aniruddha Shankar <k@191a.net>
|
||||
@ -85,7 +80,6 @@ CONTRIBUTORS:
|
||||
Arne Jansen <arne@die-jansens.de>
|
||||
Aron Xu <happyaron.xu@gmail.com>
|
||||
Arshad Hussain <arshad.hussain@aeoncomputing.com>
|
||||
Artem <artem.vlasenko@ossrevival.org>
|
||||
Arun KV <arun.kv@datacore.com>
|
||||
Arvind Sankar <nivedita@alum.mit.edu>
|
||||
Attila Fülöp <attila@fueloep.org>
|
||||
@ -123,7 +117,6 @@ CONTRIBUTORS:
|
||||
Caleb James DeLisle <calebdelisle@lavabit.com>
|
||||
Cameron Harr <harr1@llnl.gov>
|
||||
Cao Xuewen <cao.xuewen@zte.com.cn>
|
||||
Carl George <carlwgeorge@gmail.com>
|
||||
Carlo Landmeter <clandmeter@gmail.com>
|
||||
Carlos Alberto Lopez Perez <clopez@igalia.com>
|
||||
Cedric Maunoury <cedric.maunoury@gmail.com>
|
||||
@ -204,7 +197,6 @@ CONTRIBUTORS:
|
||||
Dimitri John Ledkov <xnox@ubuntu.com>
|
||||
Dimitry Andric <dimitry@andric.com>
|
||||
Dirkjan Bussink <d.bussink@gmail.com>
|
||||
Diwakar Kristappagari <diwakar-k@hpe.com>
|
||||
Dmitry Khasanov <pik4ez@gmail.com>
|
||||
Dominic Pearson <dsp@technoanimal.net>
|
||||
Dominik Hassler <hadfl@omniosce.org>
|
||||
@ -234,12 +226,10 @@ CONTRIBUTORS:
|
||||
Fedor Uporov <fuporov.vstack@gmail.com>
|
||||
Felix Dörre <felix@dogcraft.de>
|
||||
Felix Neumärker <xdch47@posteo.de>
|
||||
Felix Schmidt <felixschmidt20@aol.com>
|
||||
Feng Sun <loyou85@gmail.com>
|
||||
Finix Yan <yancw@info2soft.com>
|
||||
Francesco Mazzoli <f@mazzo.li>
|
||||
Frederik Wessels <wessels147@gmail.com>
|
||||
Friedrich Weber <f.weber@proxmox.com>
|
||||
Frédéric Vanniere <f.vanniere@planet-work.com>
|
||||
Gabriel A. Devenyi <gdevenyi@gmail.com>
|
||||
Garrett D'Amore <garrett@nexenta.com>
|
||||
@ -255,7 +245,6 @@ CONTRIBUTORS:
|
||||
George Wilson <gwilson@delphix.com>
|
||||
Georgy Yakovlev <ya@sysdump.net>
|
||||
Gerardwx <gerardw@alum.mit.edu>
|
||||
Germano Massullo <germano.massullo@gmail.com>
|
||||
Gian-Carlo DeFazio <defazio1@llnl.gov>
|
||||
Gionatan Danti <g.danti@assyoma.it>
|
||||
Giuseppe Di Natale <guss80@gmail.com>
|
||||
@ -293,14 +282,12 @@ CONTRIBUTORS:
|
||||
Igor K <igor@dilos.org>
|
||||
Igor Kozhukhov <ikozhukhov@gmail.com>
|
||||
Igor Lvovsky <ilvovsky@gmail.com>
|
||||
Igor Ostapenko <pm@igoro.pro>
|
||||
ilbsmart <wgqimut@gmail.com>
|
||||
Ilkka Sovanto <github@ilkka.kapsi.fi>
|
||||
illiliti <illiliti@protonmail.com>
|
||||
ilovezfs <ilovezfs@icloud.com>
|
||||
InsanePrawn <Insane.Prawny@gmail.com>
|
||||
Isaac Huang <he.huang@intel.com>
|
||||
Ivan Volosyuk <Ivan.Volosyuk@gmail.com>
|
||||
Jacek Fefliński <feflik@gmail.com>
|
||||
Jacob Adams <tookmund@gmail.com>
|
||||
Jake Howard <git@theorangeone.net>
|
||||
@ -308,7 +295,6 @@ CONTRIBUTORS:
|
||||
James H <james@kagisoft.co.uk>
|
||||
James Lee <jlee@thestaticvoid.com>
|
||||
James Pan <jiaming.pan@yahoo.com>
|
||||
James Reilly <jreilly1821@gmail.com>
|
||||
James Wah <james@laird-wah.net>
|
||||
Jan Engelhardt <jengelh@inai.de>
|
||||
Jan Kryl <jan.kryl@nexenta.com>
|
||||
@ -320,7 +306,6 @@ CONTRIBUTORS:
|
||||
Jason Lee <jasonlee@lanl.gov>
|
||||
Jason Zaman <jasonzaman@gmail.com>
|
||||
Javen Wu <wu.javen@gmail.com>
|
||||
Jaydeep Kshirsagar <jkshirsagar@maxlinear.com>
|
||||
Jean-Baptiste Lallement <jean-baptiste@ubuntu.com>
|
||||
Jeff Dike <jdike@akamai.com>
|
||||
Jeremy Faulkner <gldisater@gmail.com>
|
||||
@ -328,12 +313,10 @@ CONTRIBUTORS:
|
||||
Jeremy Jones <jeremy@delphix.com>
|
||||
Jeremy Visser <jeremy.visser@gmail.com>
|
||||
Jerry Jelinek <jerry.jelinek@joyent.com>
|
||||
Jerzy Kołosowski <jerzy@kolosowscy.pl>
|
||||
Jessica Clarke <jrtc27@jrtc27.com>
|
||||
Jinshan Xiong <jinshan.xiong@intel.com>
|
||||
Jitendra Patidar <jitendra.patidar@nutanix.com>
|
||||
JK Dingwall <james@dingwall.me.uk>
|
||||
Joel Low <joel@joelsplace.sg>
|
||||
Joe Stein <joe.stein@delphix.com>
|
||||
John-Mark Gurney <jmg@funkthat.com>
|
||||
John Albietz <inthecloud247@gmail.com>
|
||||
@ -382,7 +365,6 @@ CONTRIBUTORS:
|
||||
Kevin Jin <lostking2008@hotmail.com>
|
||||
Kevin P. Fleming <kevin@km6g.us>
|
||||
Kevin Tanguy <kevin.tanguy@ovh.net>
|
||||
khoang98 <khoang98@users.noreply.github.com>
|
||||
KireinaHoro <i@jsteward.moe>
|
||||
Kjeld Schouten-Lebbing <kjeld@schouten-lebbing.nl>
|
||||
Kleber Tarcísio <klebertarcisio@yahoo.com.br>
|
||||
@ -390,7 +372,6 @@ CONTRIBUTORS:
|
||||
Kohsuke Kawaguchi <kk@kohsuke.org>
|
||||
Konstantin Khorenko <khorenko@virtuozzo.com>
|
||||
KORN Andras <korn@elan.rulez.org>
|
||||
kotauskas <v.toncharov@gmail.com>
|
||||
Kristof Provost <github@sigsegv.be>
|
||||
Krzysztof Piecuch <piecuch@kpiecuch.pl>
|
||||
Kyle Blatter <kyleblatter@llnl.gov>
|
||||
@ -456,7 +437,6 @@ CONTRIBUTORS:
|
||||
Max Zettlmeißl <max@zettlmeissl.de>
|
||||
Md Islam <mdnahian@outlook.com>
|
||||
megari <megari@iki.fi>
|
||||
Meriel Luna Mittelbach <lunarlambda@gmail.com>
|
||||
Michael D Labriola <michael.d.labriola@gmail.com>
|
||||
Michael Franzl <michael@franzl.name>
|
||||
Michael Gebetsroither <michael@mgeb.org>
|
||||
@ -472,7 +452,6 @@ CONTRIBUTORS:
|
||||
Mike Swanson <mikeonthecomputer@gmail.com>
|
||||
Milan Jurik <milan.jurik@xylab.cz>
|
||||
Minsoo Choo <minsoochoo0122@proton.me>
|
||||
mnrx <mnrx@users.noreply.github.com>
|
||||
Mohamed Tawfik <m_tawfik@aucegypt.edu>
|
||||
Morgan Jones <mjones@rice.edu>
|
||||
Moritz Maxeiner <moritz@ucworks.org>
|
||||
@ -498,13 +477,12 @@ CONTRIBUTORS:
|
||||
Olaf Faaland <faaland1@llnl.gov>
|
||||
Oleg Drokin <green@linuxhacker.ru>
|
||||
Oleg Stepura <oleg@stepura.com>
|
||||
Olivier Certner <olce@FreeBSD.org>
|
||||
Olivier Certner <olce.freebsd@certner.fr>
|
||||
Olivier Mazouffre <olivier.mazouffre@ims-bordeaux.fr>
|
||||
omni <omni+vagant@hack.org>
|
||||
Orivej Desh <orivej@gmx.fr>
|
||||
Pablo Correa Gómez <ablocorrea@hotmail.com>
|
||||
Palash Gandhi <pbg4930@rit.edu>
|
||||
Patrick Fasano <patrick@patrickfasano.com>
|
||||
Patrick Mooney <pmooney@pfmooney.com>
|
||||
Patrik Greco <sikevux@sikevux.se>
|
||||
Paul B. Henson <henson@acm.org>
|
||||
@ -516,7 +494,6 @@ CONTRIBUTORS:
|
||||
Pawel Jakub Dawidek <pjd@FreeBSD.org>
|
||||
Pedro Giffuni <pfg@freebsd.org>
|
||||
Peng <peng.hse@xtaotech.com>
|
||||
Peng Liu <littlenewton6@gmail.com>
|
||||
Peter Ashford <ashford@accs.com>
|
||||
Peter Dave Hello <hsu@peterdavehello.org>
|
||||
Peter Doherty <peterd@acranox.org>
|
||||
@ -526,18 +503,15 @@ CONTRIBUTORS:
|
||||
Philip Pokorny <ppokorny@penguincomputing.com>
|
||||
Philipp Riederer <pt@philipptoelke.de>
|
||||
Phil Kauffman <philip@kauffman.me>
|
||||
Phil Sutter <phil@nwl.cc>
|
||||
Ping Huang <huangping@smartx.com>
|
||||
Piotr Kubaj <pkubaj@anongoth.pl>
|
||||
Piotr P. Stefaniak <pstef@freebsd.org>
|
||||
poscat <poscat@poscat.moe>
|
||||
Prakash Surya <prakash.surya@delphix.com>
|
||||
Prasad Joshi <prasadjoshi124@gmail.com>
|
||||
privb0x23 <privb0x23@users.noreply.github.com>
|
||||
P.SCH <p88@yahoo.com>
|
||||
Qiuhao Chen <chenqiuhao1997@gmail.com>
|
||||
Quartz <yyhran@163.com>
|
||||
Quentin Thébault <quentin.thebault@defenso.fr>
|
||||
Quentin Zdanis <zdanisq@gmail.com>
|
||||
Rafael Kitover <rkitover@gmail.com>
|
||||
RageLtMan <sempervictus@users.noreply.github.com>
|
||||
@ -546,7 +520,6 @@ CONTRIBUTORS:
|
||||
Remy Blank <remy.blank@pobox.com>
|
||||
renelson <bnelson@nelsonbe.com>
|
||||
Reno Reckling <e-github@wthack.de>
|
||||
René Wirnata <rene.wirnata@pandascience.net>
|
||||
Ricardo M. Correia <ricardo.correia@oracle.com>
|
||||
Riccardo Schirone <rschirone91@gmail.com>
|
||||
Richard Allen <belperite@gmail.com>
|
||||
@ -590,7 +563,6 @@ CONTRIBUTORS:
|
||||
Scot W. Stevenson <scot.stevenson@gmail.com>
|
||||
Sean Eric Fagan <sef@ixsystems.com>
|
||||
Sebastian Gottschall <s.gottschall@dd-wrt.com>
|
||||
Sebastian Pauka <me@spauka.se>
|
||||
Sebastian Wuerl <s.wuerl@mailbox.org>
|
||||
Sebastien Roy <seb@delphix.com>
|
||||
Sen Haerens <sen@senhaerens.be>
|
||||
@ -603,11 +575,9 @@ CONTRIBUTORS:
|
||||
Shaun Tancheff <shaun@aeonazure.com>
|
||||
Shawn Bayern <sbayern@law.fsu.edu>
|
||||
Shengqi Chen <harry-chen@outlook.com>
|
||||
SHENGYI HONG <aokblast@FreeBSD.org>
|
||||
Shen Yan <shenyanxxxy@qq.com>
|
||||
Sietse <sietse@wizdom.nu>
|
||||
Simon Guest <simon.guest@tesujimath.org>
|
||||
Simon Howard <fraggle@soulsphere.org>
|
||||
Simon Klinkert <simon.klinkert@gmail.com>
|
||||
Sowrabha Gopal <sowrabha.gopal@delphix.com>
|
||||
Spencer Kinny <spencerkinny1995@gmail.com>
|
||||
@ -629,7 +599,6 @@ CONTRIBUTORS:
|
||||
Stéphane Lesimple <speed47_github@speed47.net>
|
||||
Suman Chakravartula <schakrava@gmail.com>
|
||||
Sydney Vanda <sydney.m.vanda@intel.com>
|
||||
Syed Shahrukh Hussain <syed.shahrukh@ossrevival.org>
|
||||
Sören Tempel <soeren+git@soeren-tempel.net>
|
||||
Tamas TEVESZ <ice@extreme.hu>
|
||||
Teodor Spæren <teodor_spaeren@riseup.net>
|
||||
@ -647,12 +616,9 @@ CONTRIBUTORS:
|
||||
timor <timor.dd@googlemail.com>
|
||||
Timothy Day <tday141@gmail.com>
|
||||
Tim Schumacher <timschumi@gmx.de>
|
||||
Tim Smith <tim@mondoo.com>
|
||||
Tino Reichardt <milky-zfs@mcmilk.de>
|
||||
tleydxdy <shironeko.github@tesaguri.club>
|
||||
Tobin Harding <me@tobin.cc>
|
||||
Todd Seidelmann <seidelma@users.noreply.github.com>
|
||||
Todd Zullinger <tmz@pobox.com>
|
||||
Tom Caputi <tcaputi@datto.com>
|
||||
Tom Matthews <tom@axiom-partners.com>
|
||||
Tomohiro Kusumi <kusumi.tomohiro@gmail.com>
|
||||
@ -674,7 +640,6 @@ CONTRIBUTORS:
|
||||
Vaibhav Bhanawat <vaibhav.bhanawat@delphix.com>
|
||||
Valmiky Arquissandas <kayvlim@gmail.com>
|
||||
Val Packett <val@packett.cool>
|
||||
Vandana Rungta <vrungta@amazon.com>
|
||||
Vince van Oosten <techhazard@codeforyouand.me>
|
||||
Violet Purcell <vimproved@inventati.org>
|
||||
Vipin Kumar Verma <vipin.verma@hpe.com>
|
||||
|
6
META
6
META
@ -1,10 +1,10 @@
|
||||
Meta: 1
|
||||
Name: zfs
|
||||
Branch: 1.0
|
||||
Version: 2.4.99
|
||||
Release: 1
|
||||
Version: 2.3.0
|
||||
Release: rc5
|
||||
Release-Tags: relext
|
||||
License: CDDL
|
||||
Author: OpenZFS
|
||||
Linux-Maximum: 6.17
|
||||
Linux-Maximum: 6.12
|
||||
Linux-Minimum: 4.18
|
||||
|
@ -1,7 +1,6 @@
|
||||
CLEANFILES =
|
||||
dist_noinst_DATA =
|
||||
INSTALL_DATA_HOOKS =
|
||||
INSTALL_EXEC_HOOKS =
|
||||
ALL_LOCAL =
|
||||
CLEAN_LOCAL =
|
||||
CHECKS = shellcheck checkbashisms
|
||||
@ -72,9 +71,6 @@ all: gitrev
|
||||
PHONY += install-data-hook $(INSTALL_DATA_HOOKS)
|
||||
install-data-hook: $(INSTALL_DATA_HOOKS)
|
||||
|
||||
PHONY += install-exec-hook $(INSTALL_EXEC_HOOKS)
|
||||
install-exec-hook: $(INSTALL_EXEC_HOOKS)
|
||||
|
||||
PHONY += maintainer-clean-local
|
||||
maintainer-clean-local:
|
||||
-$(RM) $(GITREV)
|
||||
@ -116,10 +112,6 @@ commitcheck:
|
||||
${top_srcdir}/scripts/commitcheck.sh; \
|
||||
fi
|
||||
|
||||
CHECKS += spdxcheck
|
||||
spdxcheck:
|
||||
$(AM_V_at)$(top_srcdir)/scripts/spdxcheck.pl
|
||||
|
||||
if HAVE_PARALLEL
|
||||
cstyle_line = -print0 | parallel -X0 ${top_srcdir}/scripts/cstyle.pl -cpP {}
|
||||
else
|
||||
|
@ -28,7 +28,7 @@ Two release branches are maintained for OpenZFS, they are:
|
||||
Minor changes to support these distribution kernels will be applied as
|
||||
needed. New kernel versions released after the OpenZFS LTS release are
|
||||
not supported. LTS releases will receive patches for at least 2 years.
|
||||
The current LTS release is OpenZFS 2.2.
|
||||
The current LTS release is OpenZFS 2.1.
|
||||
|
||||
* OpenZFS current - Tracks the newest MAJOR.MINOR release. This branch
|
||||
includes support for the latest OpenZFS features and recently releases
|
||||
|
@ -98,16 +98,17 @@ endif
|
||||
|
||||
|
||||
if USING_PYTHON
|
||||
bin_SCRIPTS += zarcsummary zarcstat dbufstat zilstat
|
||||
CLEANFILES += zarcsummary zarcstat dbufstat zilstat
|
||||
dist_noinst_DATA += %D%/zarcsummary %D%/zarcstat.in %D%/dbufstat.in %D%/zilstat.in
|
||||
bin_SCRIPTS += arc_summary arcstat dbufstat zilstat
|
||||
CLEANFILES += arc_summary arcstat dbufstat zilstat
|
||||
dist_noinst_DATA += %D%/arc_summary %D%/arcstat.in %D%/dbufstat.in %D%/zilstat.in
|
||||
|
||||
$(call SUBST,zarcstat,%D%/)
|
||||
$(call SUBST,arcstat,%D%/)
|
||||
$(call SUBST,dbufstat,%D%/)
|
||||
$(call SUBST,zilstat,%D%/)
|
||||
zarcsummary: %D%/zarcsummary
|
||||
arc_summary: %D%/arc_summary
|
||||
$(AM_V_at)cp $< $@
|
||||
endif
|
||||
|
||||
|
||||
PHONY += cmd
|
||||
cmd: $(bin_SCRIPTS) $(bin_PROGRAMS) $(sbin_SCRIPTS) $(sbin_PROGRAMS) $(dist_bin_SCRIPTS) $(zfsexec_PROGRAMS) $(mounthelper_PROGRAMS)
|
||||
|
@ -1,5 +1,4 @@
|
||||
#!/usr/bin/env python3
|
||||
# SPDX-License-Identifier: BSD-2-Clause
|
||||
#
|
||||
# Copyright (c) 2008 Ben Rockwood <benr@cuddletech.com>,
|
||||
# Copyright (c) 2010 Martin Matuska <mm@FreeBSD.org>,
|
||||
@ -34,7 +33,7 @@ Provides basic information on the ARC, its efficiency, the L2ARC (if present),
|
||||
the Data Management Unit (DMU), Virtual Devices (VDEVs), and tunables. See
|
||||
the in-source documentation and code at
|
||||
https://github.com/openzfs/zfs/blob/master/module/zfs/arc.c for details.
|
||||
The original introduction to zarcsummary can be found at
|
||||
The original introduction to arc_summary can be found at
|
||||
http://cuddletech.com/?p=454
|
||||
"""
|
||||
|
||||
@ -161,7 +160,7 @@ elif sys.platform.startswith('linux'):
|
||||
return get_params(TUNABLES_PATH)
|
||||
|
||||
def get_version_impl(request):
|
||||
# The original zarcsummary called /sbin/modinfo/{spl,zfs} to get
|
||||
# The original arc_summary called /sbin/modinfo/{spl,zfs} to get
|
||||
# the version information. We switch to /sys/module/{spl,zfs}/version
|
||||
# to make sure we get what is really loaded in the kernel
|
||||
try:
|
||||
@ -439,7 +438,7 @@ def print_header():
|
||||
"""
|
||||
|
||||
# datetime is now recommended over time but we keep the exact formatting
|
||||
# from the older version of zarcsummary in case there are scripts
|
||||
# from the older version of arc_summary in case there are scripts
|
||||
# that expect it in this way
|
||||
daydate = time.strftime(DATE_FORMAT)
|
||||
spc_date = LINE_LENGTH-len(daydate)
|
||||
@ -559,7 +558,6 @@ def section_arc(kstats_dict):
|
||||
print()
|
||||
|
||||
compressed_size = arc_stats['compressed_size']
|
||||
uncompressed_size = arc_stats['uncompressed_size']
|
||||
overhead_size = arc_stats['overhead_size']
|
||||
bonus_size = arc_stats['bonus_size']
|
||||
dnode_size = arc_stats['dnode_size']
|
||||
@ -672,8 +670,6 @@ def section_arc(kstats_dict):
|
||||
print()
|
||||
|
||||
print('ARC misc:')
|
||||
prt_i2('Uncompressed size:', f_perc(uncompressed_size, compressed_size),
|
||||
f_bytes(uncompressed_size))
|
||||
prt_i1('Memory throttles:', arc_stats['memory_throttle_count'])
|
||||
prt_i1('Memory direct reclaims:', arc_stats['memory_direct_count'])
|
||||
prt_i1('Memory indirect reclaims:', arc_stats['memory_indirect_count'])
|
@ -1,8 +1,7 @@
|
||||
#!/usr/bin/env @PYTHON_SHEBANG@
|
||||
# SPDX-License-Identifier: CDDL-1.0
|
||||
#
|
||||
# Print out ZFS ARC Statistics exported via kstat(1)
|
||||
# For a definition of fields, or usage, use zarcstat -v
|
||||
# For a definition of fields, or usage, use arcstat -v
|
||||
#
|
||||
# This script was originally a fork of the original arcstat.pl (0.1)
|
||||
# by Neelakanth Nadgir, originally published on his Sun blog on
|
||||
@ -56,7 +55,6 @@ import time
|
||||
import getopt
|
||||
import re
|
||||
import copy
|
||||
import os
|
||||
|
||||
from signal import signal, SIGINT, SIGWINCH, SIG_DFL
|
||||
|
||||
@ -172,7 +170,7 @@ cols = {
|
||||
"zactive": [7, 1000, "zfetch prefetches active per second"],
|
||||
}
|
||||
|
||||
# ARC structural breakdown from zarcsummary
|
||||
# ARC structural breakdown from arc_summary
|
||||
structfields = {
|
||||
"cmp": ["compressed", "Compressed"],
|
||||
"ovh": ["overhead", "Overhead"],
|
||||
@ -188,7 +186,7 @@ structstats = { # size stats
|
||||
"sz": ["_size", "size"],
|
||||
}
|
||||
|
||||
# ARC types breakdown from zarcsummary
|
||||
# ARC types breakdown from arc_summary
|
||||
typefields = {
|
||||
"data": ["data", "ARC data"],
|
||||
"meta": ["metadata", "ARC metadata"],
|
||||
@ -199,7 +197,7 @@ typestats = { # size stats
|
||||
"sz": ["_size", "size"],
|
||||
}
|
||||
|
||||
# ARC states breakdown from zarcsummary
|
||||
# ARC states breakdown from arc_summary
|
||||
statefields = {
|
||||
"ano": ["anon", "Anonymous"],
|
||||
"mfu": ["mfu", "MFU"],
|
||||
@ -262,7 +260,7 @@ hdr_intr = 20 # Print header every 20 lines of output
|
||||
opfile = None
|
||||
sep = " " # Default separator is 2 spaces
|
||||
l2exist = False
|
||||
cmd = ("Usage: zarcstat [-havxp] [-f fields] [-o file] [-s string] [interval "
|
||||
cmd = ("Usage: arcstat [-havxp] [-f fields] [-o file] [-s string] [interval "
|
||||
"[count]]\n")
|
||||
cur = {}
|
||||
d = {}
|
||||
@ -349,10 +347,10 @@ def usage():
|
||||
"character or string\n")
|
||||
sys.stderr.write("\t -p : Disable auto-scaling of numerical fields\n")
|
||||
sys.stderr.write("\nExamples:\n")
|
||||
sys.stderr.write("\tzarcstat -o /tmp/a.log 2 10\n")
|
||||
sys.stderr.write("\tzarcstat -s \",\" -o /tmp/a.log 2 10\n")
|
||||
sys.stderr.write("\tzarcstat -v\n")
|
||||
sys.stderr.write("\tzarcstat -f time,hit%,dh%,ph%,mh% 1\n")
|
||||
sys.stderr.write("\tarcstat -o /tmp/a.log 2 10\n")
|
||||
sys.stderr.write("\tarcstat -s \",\" -o /tmp/a.log 2 10\n")
|
||||
sys.stderr.write("\tarcstat -v\n")
|
||||
sys.stderr.write("\tarcstat -f time,hit%,dh%,ph%,mh% 1\n")
|
||||
sys.stderr.write("\n")
|
||||
|
||||
sys.exit(1)
|
||||
@ -367,7 +365,7 @@ def snap_stats():
|
||||
|
||||
cur = kstat
|
||||
|
||||
# fill in additional values from zarcsummary
|
||||
# fill in additional values from arc_summary
|
||||
cur["caches_size"] = caches_size = cur["anon_data"]+cur["anon_metadata"]+\
|
||||
cur["mfu_data"]+cur["mfu_metadata"]+cur["mru_data"]+cur["mru_metadata"]+\
|
||||
cur["uncached_data"]+cur["uncached_metadata"]
|
||||
@ -736,14 +734,13 @@ def calculate():
|
||||
v[group["percent"]] if v[group["percent"]] > 0 else 0
|
||||
|
||||
if l2exist:
|
||||
l2asize = cur["l2_asize"]
|
||||
v["l2hits"] = d["l2_hits"] / sint
|
||||
v["l2miss"] = d["l2_misses"] / sint
|
||||
v["l2read"] = v["l2hits"] + v["l2miss"]
|
||||
v["l2hit%"] = 100 * v["l2hits"] / v["l2read"] if v["l2read"] > 0 else 0
|
||||
|
||||
v["l2miss%"] = 100 - v["l2hit%"] if v["l2read"] > 0 else 0
|
||||
v["l2asize"] = l2asize
|
||||
v["l2asize"] = cur["l2_asize"]
|
||||
v["l2size"] = cur["l2_size"]
|
||||
v["l2bytes"] = d["l2_read_bytes"] / sint
|
||||
v["l2wbytes"] = d["l2_write_bytes"] / sint
|
||||
@ -753,11 +750,11 @@ def calculate():
|
||||
v["l2mru"] = cur["l2_mru_asize"]
|
||||
v["l2data"] = cur["l2_bufc_data_asize"]
|
||||
v["l2meta"] = cur["l2_bufc_metadata_asize"]
|
||||
v["l2pref%"] = 100 * v["l2pref"] / l2asize if l2asize > 0 else 0
|
||||
v["l2mfu%"] = 100 * v["l2mfu"] / l2asize if l2asize > 0 else 0
|
||||
v["l2mru%"] = 100 * v["l2mru"] / l2asize if l2asize > 0 else 0
|
||||
v["l2data%"] = 100 * v["l2data"] / l2asize if l2asize > 0 else 0
|
||||
v["l2meta%"] = 100 * v["l2meta"] / l2asize if l2asize > 0 else 0
|
||||
v["l2pref%"] = 100 * v["l2pref"] / v["l2asize"]
|
||||
v["l2mfu%"] = 100 * v["l2mfu"] / v["l2asize"]
|
||||
v["l2mru%"] = 100 * v["l2mru"] / v["l2asize"]
|
||||
v["l2data%"] = 100 * v["l2data"] / v["l2asize"]
|
||||
v["l2meta%"] = 100 * v["l2meta"] / v["l2asize"]
|
||||
|
||||
v["grow"] = 0 if cur["arc_no_grow"] else 1
|
||||
v["need"] = cur["arc_need_free"]
|
||||
@ -767,7 +764,6 @@ def calculate():
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
global sint
|
||||
global count
|
||||
global hdr_intr
|
@ -1,5 +1,4 @@
|
||||
#!/usr/bin/env @PYTHON_SHEBANG@
|
||||
# SPDX-License-Identifier: CDDL-1.0
|
||||
#
|
||||
# Print out statistics for all cached dmu buffers. This information
|
||||
# is available through the dbufs kstat and may be post-processed as
|
||||
|
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
|
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
|
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
|
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
|
418
cmd/zdb/zdb.c
418
cmd/zdb/zdb.c
File diff suppressed because it is too large
Load Diff
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
@ -29,6 +28,6 @@
|
||||
#define _ZDB_H
|
||||
|
||||
void dump_intent_log(zilog_t *);
|
||||
extern uint8_t dump_opt[512];
|
||||
extern uint8_t dump_opt[256];
|
||||
|
||||
#endif /* _ZDB_H */
|
||||
|
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
@ -48,6 +47,8 @@
|
||||
|
||||
#include "zdb.h"
|
||||
|
||||
extern uint8_t dump_opt[256];
|
||||
|
||||
static char tab_prefix[4] = "\t\t\t";
|
||||
|
||||
static void
|
||||
@ -174,7 +175,7 @@ zil_prt_rec_write(zilog_t *zilog, int txtype, const void *arg)
|
||||
|
||||
if (lr->lr_common.lrc_reclen == sizeof (lr_write_t)) {
|
||||
(void) printf("%shas blkptr, %s\n", tab_prefix,
|
||||
!BP_IS_HOLE(bp) && BP_GET_BIRTH(bp) >=
|
||||
!BP_IS_HOLE(bp) && BP_GET_LOGICAL_BIRTH(bp) >=
|
||||
spa_min_claim_txg(zilog->zl_spa) ?
|
||||
"will claim" : "won't claim");
|
||||
print_log_bp(bp, tab_prefix);
|
||||
@ -187,7 +188,7 @@ zil_prt_rec_write(zilog_t *zilog, int txtype, const void *arg)
|
||||
(void) printf("%s<hole>\n", tab_prefix);
|
||||
return;
|
||||
}
|
||||
if (BP_GET_BIRTH(bp) < zilog->zl_header->zh_claim_txg) {
|
||||
if (BP_GET_LOGICAL_BIRTH(bp) < zilog->zl_header->zh_claim_txg) {
|
||||
(void) printf("%s<block already committed>\n",
|
||||
tab_prefix);
|
||||
return;
|
||||
@ -238,7 +239,7 @@ zil_prt_rec_write_enc(zilog_t *zilog, int txtype, const void *arg)
|
||||
|
||||
if (lr->lr_common.lrc_reclen == sizeof (lr_write_t)) {
|
||||
(void) printf("%shas blkptr, %s\n", tab_prefix,
|
||||
!BP_IS_HOLE(bp) && BP_GET_BIRTH(bp) >=
|
||||
!BP_IS_HOLE(bp) && BP_GET_LOGICAL_BIRTH(bp) >=
|
||||
spa_min_claim_txg(zilog->zl_spa) ?
|
||||
"will claim" : "won't claim");
|
||||
print_log_bp(bp, tab_prefix);
|
||||
@ -474,7 +475,7 @@ print_log_block(zilog_t *zilog, const blkptr_t *bp, void *arg,
|
||||
|
||||
if (claim_txg != 0)
|
||||
claim = "already claimed";
|
||||
else if (BP_GET_BIRTH(bp) >= spa_min_claim_txg(zilog->zl_spa))
|
||||
else if (BP_GET_LOGICAL_BIRTH(bp) >= spa_min_claim_txg(zilog->zl_spa))
|
||||
claim = "will claim";
|
||||
else
|
||||
claim = "won't claim";
|
||||
|
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
|
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
|
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
|
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
|
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
@ -134,13 +133,11 @@ zfs_agent_iter_vdev(zpool_handle_t *zhp, nvlist_t *nvl, void *arg)
|
||||
* of blkid cache and L2ARC VDEV does not contain pool guid in its
|
||||
* blkid, so this is a special case for L2ARC VDEV.
|
||||
*/
|
||||
else if (gsp->gs_vdev_guid != 0 &&
|
||||
else if (gsp->gs_vdev_guid != 0 && gsp->gs_devid == NULL &&
|
||||
nvlist_lookup_uint64(nvl, ZPOOL_CONFIG_GUID, &vdev_guid) == 0 &&
|
||||
gsp->gs_vdev_guid == vdev_guid) {
|
||||
if (gsp->gs_devid == NULL) {
|
||||
(void) nvlist_lookup_string(nvl, ZPOOL_CONFIG_DEVID,
|
||||
&gsp->gs_devid);
|
||||
}
|
||||
(void) nvlist_lookup_string(nvl, ZPOOL_CONFIG_DEVID,
|
||||
&gsp->gs_devid);
|
||||
(void) nvlist_lookup_uint64(nvl, ZPOOL_CONFIG_EXPANSION_TIME,
|
||||
&gsp->gs_vdev_expandtime);
|
||||
return (B_TRUE);
|
||||
@ -158,28 +155,22 @@ zfs_agent_iter_pool(zpool_handle_t *zhp, void *arg)
|
||||
/*
|
||||
* For each vdev in this pool, look for a match by devid
|
||||
*/
|
||||
boolean_t found = B_FALSE;
|
||||
uint64_t pool_guid;
|
||||
if ((config = zpool_get_config(zhp, NULL)) != NULL) {
|
||||
if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
|
||||
&nvl) == 0) {
|
||||
(void) zfs_agent_iter_vdev(zhp, nvl, gsp);
|
||||
}
|
||||
}
|
||||
/*
|
||||
* if a match was found then grab the pool guid
|
||||
*/
|
||||
if (gsp->gs_vdev_guid && gsp->gs_devid) {
|
||||
(void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
|
||||
&gsp->gs_pool_guid);
|
||||
}
|
||||
|
||||
/* Get pool configuration and extract pool GUID */
|
||||
if ((config = zpool_get_config(zhp, NULL)) == NULL ||
|
||||
nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
|
||||
&pool_guid) != 0)
|
||||
goto out;
|
||||
|
||||
/* Skip this pool if we're looking for a specific pool */
|
||||
if (gsp->gs_pool_guid != 0 && pool_guid != gsp->gs_pool_guid)
|
||||
goto out;
|
||||
|
||||
if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvl) == 0)
|
||||
found = zfs_agent_iter_vdev(zhp, nvl, gsp);
|
||||
|
||||
if (found && gsp->gs_pool_guid == 0)
|
||||
gsp->gs_pool_guid = pool_guid;
|
||||
|
||||
out:
|
||||
zpool_close(zhp);
|
||||
return (found);
|
||||
return (gsp->gs_devid != NULL && gsp->gs_vdev_guid != 0);
|
||||
}
|
||||
|
||||
void
|
||||
@ -241,17 +232,20 @@ zfs_agent_post_event(const char *class, const char *subclass, nvlist_t *nvl)
|
||||
* For multipath, spare and l2arc devices ZFS_EV_VDEV_GUID or
|
||||
* ZFS_EV_POOL_GUID may be missing so find them.
|
||||
*/
|
||||
search.gs_devid = devid;
|
||||
search.gs_vdev_guid = vdev_guid;
|
||||
search.gs_pool_guid = pool_guid;
|
||||
zpool_iter(g_zfs_hdl, zfs_agent_iter_pool, &search);
|
||||
if (devid == NULL)
|
||||
devid = search.gs_devid;
|
||||
if (pool_guid == 0)
|
||||
pool_guid = search.gs_pool_guid;
|
||||
if (vdev_guid == 0)
|
||||
vdev_guid = search.gs_vdev_guid;
|
||||
devtype = search.gs_vdev_type;
|
||||
if (devid == NULL || pool_guid == 0 || vdev_guid == 0) {
|
||||
if (devid == NULL)
|
||||
search.gs_vdev_guid = vdev_guid;
|
||||
else
|
||||
search.gs_devid = devid;
|
||||
zpool_iter(g_zfs_hdl, zfs_agent_iter_pool, &search);
|
||||
if (devid == NULL)
|
||||
devid = search.gs_devid;
|
||||
if (pool_guid == 0)
|
||||
pool_guid = search.gs_pool_guid;
|
||||
if (vdev_guid == 0)
|
||||
vdev_guid = search.gs_vdev_guid;
|
||||
devtype = search.gs_vdev_type;
|
||||
}
|
||||
|
||||
/*
|
||||
* We want to avoid reporting "remove" events coming from
|
||||
|
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
|
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
|
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
@ -215,7 +214,6 @@ zfs_process_add(zpool_handle_t *zhp, nvlist_t *vdev, boolean_t labeled)
|
||||
vdev_stat_t *vs;
|
||||
char **lines = NULL;
|
||||
int lines_cnt = 0;
|
||||
int rc;
|
||||
|
||||
/*
|
||||
* Get the persistent path, typically under the '/dev/disk/by-id' or
|
||||
@ -407,17 +405,17 @@ zfs_process_add(zpool_handle_t *zhp, nvlist_t *vdev, boolean_t labeled)
|
||||
}
|
||||
|
||||
nvlist_lookup_string(vdev, "new_devid", &new_devid);
|
||||
|
||||
if (is_mpath_wholedisk) {
|
||||
/* Don't label device mapper or multipath disks. */
|
||||
zed_log_msg(LOG_INFO,
|
||||
" it's a multipath wholedisk, don't label");
|
||||
rc = zpool_prepare_disk(zhp, vdev, "autoreplace", &lines,
|
||||
&lines_cnt);
|
||||
if (rc != 0) {
|
||||
if (zpool_prepare_disk(zhp, vdev, "autoreplace", &lines,
|
||||
&lines_cnt) != 0) {
|
||||
zed_log_msg(LOG_INFO,
|
||||
" zpool_prepare_disk: could not "
|
||||
"prepare '%s' (%s), path '%s', rc = %d", fullpath,
|
||||
libzfs_error_description(g_zfshdl), path, rc);
|
||||
"prepare '%s' (%s)", fullpath,
|
||||
libzfs_error_description(g_zfshdl));
|
||||
if (lines_cnt > 0) {
|
||||
zed_log_msg(LOG_INFO,
|
||||
" zfs_prepare_disk output:");
|
||||
@ -448,13 +446,12 @@ zfs_process_add(zpool_handle_t *zhp, nvlist_t *vdev, boolean_t labeled)
|
||||
* If this is a request to label a whole disk, then attempt to
|
||||
* write out the label.
|
||||
*/
|
||||
rc = zpool_prepare_and_label_disk(g_zfshdl, zhp, leafname,
|
||||
vdev, "autoreplace", &lines, &lines_cnt);
|
||||
if (rc != 0) {
|
||||
if (zpool_prepare_and_label_disk(g_zfshdl, zhp, leafname,
|
||||
vdev, "autoreplace", &lines, &lines_cnt) != 0) {
|
||||
zed_log_msg(LOG_WARNING,
|
||||
" zpool_prepare_and_label_disk: could not "
|
||||
"label '%s' (%s), rc = %d", leafname,
|
||||
libzfs_error_description(g_zfshdl), rc);
|
||||
"label '%s' (%s)", leafname,
|
||||
libzfs_error_description(g_zfshdl));
|
||||
if (lines_cnt > 0) {
|
||||
zed_log_msg(LOG_INFO,
|
||||
" zfs_prepare_disk output:");
|
||||
|
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
@ -404,7 +403,6 @@ zfs_retire_recv(fmd_hdl_t *hdl, fmd_event_t *ep, nvlist_t *nvl,
|
||||
(state == VDEV_STATE_REMOVED || state == VDEV_STATE_FAULTED))) {
|
||||
const char *devtype;
|
||||
char *devname;
|
||||
boolean_t skip_removal = B_FALSE;
|
||||
|
||||
if (nvlist_lookup_string(nvl, FM_EREPORT_PAYLOAD_ZFS_VDEV_TYPE,
|
||||
&devtype) == 0) {
|
||||
@ -442,28 +440,18 @@ zfs_retire_recv(fmd_hdl_t *hdl, fmd_event_t *ep, nvlist_t *nvl,
|
||||
nvlist_lookup_uint64_array(vdev, ZPOOL_CONFIG_VDEV_STATS,
|
||||
(uint64_t **)&vs, &c);
|
||||
|
||||
if (vs->vs_state == VDEV_STATE_OFFLINE)
|
||||
return;
|
||||
|
||||
/*
|
||||
* If state removed is requested for already removed vdev,
|
||||
* its a loopback event from spa_async_remove(). Just
|
||||
* ignore it.
|
||||
*/
|
||||
if ((vs->vs_state == VDEV_STATE_REMOVED &&
|
||||
state == VDEV_STATE_REMOVED)) {
|
||||
if (strcmp(class, "resource.fs.zfs.removed") == 0 &&
|
||||
nvlist_exists(nvl, "by_kernel")) {
|
||||
skip_removal = B_TRUE;
|
||||
} else {
|
||||
return;
|
||||
}
|
||||
}
|
||||
if ((vs->vs_state == VDEV_STATE_REMOVED && state ==
|
||||
VDEV_STATE_REMOVED) || vs->vs_state == VDEV_STATE_OFFLINE)
|
||||
return;
|
||||
|
||||
/* Remove the vdev since device is unplugged */
|
||||
int remove_status = 0;
|
||||
if (!skip_removal && (l2arc ||
|
||||
(strcmp(class, "resource.fs.zfs.removed") == 0))) {
|
||||
if (l2arc || (strcmp(class, "resource.fs.zfs.removed") == 0)) {
|
||||
remove_status = zpool_vdev_remove_wanted(zhp, devname);
|
||||
fmd_hdl_debug(hdl, "zpool_vdev_remove_wanted '%s'"
|
||||
", err:%d", devname, libzfs_errno(zhdl));
|
||||
|
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* This file is part of the ZFS Event Daemon (ZED).
|
||||
*
|
||||
|
@ -9,18 +9,18 @@ dist_zedexec_SCRIPTS = \
|
||||
%D%/all-debug.sh \
|
||||
%D%/all-syslog.sh \
|
||||
%D%/data-notify.sh \
|
||||
%D%/deadman-sync-slot_off.sh \
|
||||
%D%/deadman-slot_off.sh \
|
||||
%D%/generic-notify.sh \
|
||||
%D%/pool_import-sync-led.sh \
|
||||
%D%/pool_import-led.sh \
|
||||
%D%/resilver_finish-notify.sh \
|
||||
%D%/resilver_finish-start-scrub.sh \
|
||||
%D%/scrub_finish-notify.sh \
|
||||
%D%/statechange-sync-led.sh \
|
||||
%D%/statechange-led.sh \
|
||||
%D%/statechange-notify.sh \
|
||||
%D%/statechange-sync-slot_off.sh \
|
||||
%D%/statechange-slot_off.sh \
|
||||
%D%/trim_finish-notify.sh \
|
||||
%D%/vdev_attach-sync-led.sh \
|
||||
%D%/vdev_clear-sync-led.sh
|
||||
%D%/vdev_attach-led.sh \
|
||||
%D%/vdev_clear-led.sh
|
||||
|
||||
nodist_zedexec_SCRIPTS = \
|
||||
%D%/history_event-zfs-list-cacher.sh
|
||||
@ -30,17 +30,17 @@ SUBSTFILES += $(nodist_zedexec_SCRIPTS)
|
||||
zedconfdefaults = \
|
||||
all-syslog.sh \
|
||||
data-notify.sh \
|
||||
deadman-sync-slot_off.sh \
|
||||
deadman-slot_off.sh \
|
||||
history_event-zfs-list-cacher.sh \
|
||||
pool_import-sync-led.sh \
|
||||
pool_import-led.sh \
|
||||
resilver_finish-notify.sh \
|
||||
resilver_finish-start-scrub.sh \
|
||||
scrub_finish-notify.sh \
|
||||
statechange-sync-led.sh \
|
||||
statechange-led.sh \
|
||||
statechange-notify.sh \
|
||||
statechange-sync-slot_off.sh \
|
||||
vdev_attach-sync-led.sh \
|
||||
vdev_clear-sync-led.sh
|
||||
statechange-slot_off.sh \
|
||||
vdev_attach-led.sh \
|
||||
vdev_clear-led.sh
|
||||
|
||||
dist_noinst_DATA += %D%/README
|
||||
|
||||
|
1
cmd/zed/zed.d/pool_import-led.sh
Symbolic link
1
cmd/zed/zed.d/pool_import-led.sh
Symbolic link
@ -0,0 +1 @@
|
||||
statechange-led.sh
|
@ -1 +0,0 @@
|
||||
statechange-sync-led.sh
|
@ -1,5 +1,4 @@
|
||||
#!/bin/sh
|
||||
# SPDX-License-Identifier: CDDL-1.0
|
||||
# shellcheck disable=SC2154
|
||||
#
|
||||
# CDDL HEADER START
|
||||
|
1
cmd/zed/zed.d/vdev_attach-led.sh
Symbolic link
1
cmd/zed/zed.d/vdev_attach-led.sh
Symbolic link
@ -0,0 +1 @@
|
||||
statechange-led.sh
|
@ -1 +0,0 @@
|
||||
statechange-sync-led.sh
|
1
cmd/zed/zed.d/vdev_clear-led.sh
Symbolic link
1
cmd/zed/zed.d/vdev_clear-led.sh
Symbolic link
@ -0,0 +1 @@
|
||||
statechange-led.sh
|
@ -1 +0,0 @@
|
||||
statechange-sync-led.sh
|
@ -283,11 +283,6 @@ zed_notify_email()
|
||||
if [ "${ZED_EMAIL_OPTS%@SUBJECT@*}" = "${ZED_EMAIL_OPTS}" ] ; then
|
||||
# inject subject header
|
||||
printf "Subject: %s\n" "${subject}"
|
||||
# The following empty line is needed to separate the header from the
|
||||
# body of the message. Otherwise programs like sendmail will skip
|
||||
# everything up to the first empty line (or wont send an email at
|
||||
# all) and will still exit with exit code 0
|
||||
printf "\n"
|
||||
fi
|
||||
# output message
|
||||
cat "${pathname}"
|
||||
@ -441,9 +436,8 @@ zed_notify_slack_webhook()
|
||||
"${pathname}")"
|
||||
|
||||
# Construct the JSON message for posting.
|
||||
# shellcheck disable=SC2016
|
||||
#
|
||||
msg_json="$(printf '{"text": "*%s*\\n```%s```"}' "${subject}" "${msg_body}" )"
|
||||
msg_json="$(printf '{"text": "*%s*\\n%s"}' "${subject}" "${msg_body}" )"
|
||||
|
||||
# Send the POST request and check for errors.
|
||||
#
|
||||
|
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* This file is part of the ZFS Event Daemon (ZED).
|
||||
*
|
||||
|
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* This file is part of the ZFS Event Daemon (ZED).
|
||||
*
|
||||
|
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* This file is part of the ZFS Event Daemon (ZED).
|
||||
*
|
||||
|
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
|
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
|
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* This file is part of the ZFS Event Daemon (ZED).
|
||||
*
|
||||
@ -110,7 +109,7 @@ zed_event_fini(struct zed_conf *zcp)
|
||||
static void
|
||||
_bump_event_queue_length(void)
|
||||
{
|
||||
int zzlm, wr;
|
||||
int zzlm = -1, wr;
|
||||
char qlen_buf[12] = {0}; /* parameter is int => max "-2147483647\n" */
|
||||
long int qlen, orig_qlen;
|
||||
|
||||
|
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* This file is part of the ZFS Event Daemon (ZED).
|
||||
*
|
||||
|
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* This file is part of the ZFS Event Daemon (ZED).
|
||||
*
|
||||
@ -196,29 +195,37 @@ _nop(int sig)
|
||||
(void) sig;
|
||||
}
|
||||
|
||||
static void
|
||||
wait_for_children(boolean_t do_pause, boolean_t wait)
|
||||
static void *
|
||||
_reap_children(void *arg)
|
||||
{
|
||||
pid_t pid;
|
||||
struct rusage usage;
|
||||
int status;
|
||||
(void) arg;
|
||||
struct launched_process_node node, *pnode;
|
||||
pid_t pid;
|
||||
int status;
|
||||
struct rusage usage;
|
||||
struct sigaction sa = {};
|
||||
|
||||
(void) sigfillset(&sa.sa_mask);
|
||||
(void) sigdelset(&sa.sa_mask, SIGCHLD);
|
||||
(void) pthread_sigmask(SIG_SETMASK, &sa.sa_mask, NULL);
|
||||
|
||||
(void) sigemptyset(&sa.sa_mask);
|
||||
sa.sa_handler = _nop;
|
||||
sa.sa_flags = SA_NOCLDSTOP;
|
||||
(void) sigaction(SIGCHLD, &sa, NULL);
|
||||
|
||||
for (_reap_children_stop = B_FALSE; !_reap_children_stop; ) {
|
||||
(void) pthread_mutex_lock(&_launched_processes_lock);
|
||||
pid = wait4(0, &status, wait ? 0 : WNOHANG, &usage);
|
||||
pid = wait4(0, &status, WNOHANG, &usage);
|
||||
|
||||
if (pid == 0 || pid == (pid_t)-1) {
|
||||
(void) pthread_mutex_unlock(&_launched_processes_lock);
|
||||
if ((pid == 0) || (errno == ECHILD)) {
|
||||
if (do_pause)
|
||||
pause();
|
||||
} else if (errno != EINTR)
|
||||
if (pid == 0 || errno == ECHILD)
|
||||
pause();
|
||||
else if (errno != EINTR)
|
||||
zed_log_msg(LOG_WARNING,
|
||||
"Failed to wait for children: %s",
|
||||
strerror(errno));
|
||||
if (!do_pause)
|
||||
return;
|
||||
|
||||
} else {
|
||||
memset(&node, 0, sizeof (node));
|
||||
node.pid = pid;
|
||||
@ -270,25 +277,6 @@ wait_for_children(boolean_t do_pause, boolean_t wait)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
static void *
|
||||
_reap_children(void *arg)
|
||||
{
|
||||
(void) arg;
|
||||
struct sigaction sa = {};
|
||||
|
||||
(void) sigfillset(&sa.sa_mask);
|
||||
(void) sigdelset(&sa.sa_mask, SIGCHLD);
|
||||
(void) pthread_sigmask(SIG_SETMASK, &sa.sa_mask, NULL);
|
||||
|
||||
(void) sigemptyset(&sa.sa_mask);
|
||||
sa.sa_handler = _nop;
|
||||
sa.sa_flags = SA_NOCLDSTOP;
|
||||
(void) sigaction(SIGCHLD, &sa, NULL);
|
||||
|
||||
wait_for_children(B_TRUE, B_FALSE);
|
||||
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
@ -317,45 +305,6 @@ zed_exec_fini(void)
|
||||
_reap_children_tid = (pthread_t)-1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check if the zedlet name indicates if it is a synchronous zedlet
|
||||
*
|
||||
* Synchronous zedlets have a "-sync-" immediately following the event name in
|
||||
* their zedlet filename, like:
|
||||
*
|
||||
* EVENT_NAME-sync-ZEDLETNAME.sh
|
||||
*
|
||||
* For example, if you wanted a synchronous statechange script:
|
||||
*
|
||||
* statechange-sync-myzedlet.sh
|
||||
*
|
||||
* Synchronous zedlets are guaranteed to be the only zedlet running. No other
|
||||
* zedlets may run in parallel with a synchronous zedlet. A synchronous
|
||||
* zedlet will wait for all previously spawned zedlets to finish before running.
|
||||
* Users should be careful to only use synchronous zedlets when needed, since
|
||||
* they decrease parallelism.
|
||||
*/
|
||||
static boolean_t
|
||||
zedlet_is_sync(const char *zedlet, const char *event)
|
||||
{
|
||||
const char *sync_str = "-sync-";
|
||||
size_t sync_str_len;
|
||||
size_t zedlet_len;
|
||||
size_t event_len;
|
||||
|
||||
sync_str_len = strlen(sync_str);
|
||||
zedlet_len = strlen(zedlet);
|
||||
event_len = strlen(event);
|
||||
|
||||
if (event_len + sync_str_len >= zedlet_len)
|
||||
return (B_FALSE);
|
||||
|
||||
if (strncmp(&zedlet[event_len], sync_str, sync_str_len) == 0)
|
||||
return (B_TRUE);
|
||||
|
||||
return (B_FALSE);
|
||||
}
|
||||
|
||||
/*
|
||||
* Process the event [eid] by synchronously invoking all zedlets with a
|
||||
* matching class prefix.
|
||||
@ -418,28 +367,9 @@ zed_exec_process(uint64_t eid, const char *class, const char *subclass,
|
||||
z = zed_strings_next(zcp->zedlets)) {
|
||||
for (csp = class_strings; *csp; csp++) {
|
||||
n = strlen(*csp);
|
||||
if ((strncmp(z, *csp, n) == 0) && !isalpha(z[n])) {
|
||||
boolean_t is_sync = zedlet_is_sync(z, *csp);
|
||||
|
||||
if (is_sync) {
|
||||
/*
|
||||
* Wait for previous zedlets to
|
||||
* finish
|
||||
*/
|
||||
wait_for_children(B_FALSE, B_TRUE);
|
||||
}
|
||||
|
||||
if ((strncmp(z, *csp, n) == 0) && !isalpha(z[n]))
|
||||
_zed_exec_fork_child(eid, zcp->zedlet_dir,
|
||||
z, e, zcp->zevent_fd, zcp->do_foreground);
|
||||
|
||||
if (is_sync) {
|
||||
/*
|
||||
* Wait for sync zedlet we just launched
|
||||
* to finish.
|
||||
*/
|
||||
wait_for_children(B_FALSE, B_TRUE);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
free(e);
|
||||
|
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* This file is part of the ZFS Event Daemon (ZED).
|
||||
*
|
||||
|
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* This file is part of the ZFS Event Daemon (ZED).
|
||||
*
|
||||
|
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* This file is part of the ZFS Event Daemon (ZED).
|
||||
*
|
||||
|
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* This file is part of the ZFS Event Daemon (ZED).
|
||||
*
|
||||
|
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* This file is part of the ZFS Event Daemon (ZED).
|
||||
*
|
||||
|
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* This file is part of the ZFS Event Daemon (ZED).
|
||||
*
|
||||
|
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* This file is part of the ZFS Event Daemon (ZED).
|
||||
*
|
||||
|
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
|
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
|
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
@ -37,7 +36,6 @@
|
||||
#include <assert.h>
|
||||
#include <ctype.h>
|
||||
#include <sys/debug.h>
|
||||
#include <dirent.h>
|
||||
#include <errno.h>
|
||||
#include <getopt.h>
|
||||
#include <libgen.h>
|
||||
@ -122,7 +120,6 @@ static int zfs_do_change_key(int argc, char **argv);
|
||||
static int zfs_do_project(int argc, char **argv);
|
||||
static int zfs_do_version(int argc, char **argv);
|
||||
static int zfs_do_redact(int argc, char **argv);
|
||||
static int zfs_do_rewrite(int argc, char **argv);
|
||||
static int zfs_do_wait(int argc, char **argv);
|
||||
|
||||
#ifdef __FreeBSD__
|
||||
@ -195,7 +192,6 @@ typedef enum {
|
||||
HELP_CHANGE_KEY,
|
||||
HELP_VERSION,
|
||||
HELP_REDACT,
|
||||
HELP_REWRITE,
|
||||
HELP_JAIL,
|
||||
HELP_UNJAIL,
|
||||
HELP_WAIT,
|
||||
@ -230,7 +226,7 @@ static zfs_command_t command_table[] = {
|
||||
{ "promote", zfs_do_promote, HELP_PROMOTE },
|
||||
{ "rename", zfs_do_rename, HELP_RENAME },
|
||||
{ "bookmark", zfs_do_bookmark, HELP_BOOKMARK },
|
||||
{ "diff", zfs_do_diff, HELP_DIFF },
|
||||
{ "program", zfs_do_channel_program, HELP_CHANNEL_PROGRAM },
|
||||
{ NULL },
|
||||
{ "list", zfs_do_list, HELP_LIST },
|
||||
{ NULL },
|
||||
@ -252,31 +248,27 @@ static zfs_command_t command_table[] = {
|
||||
{ NULL },
|
||||
{ "send", zfs_do_send, HELP_SEND },
|
||||
{ "receive", zfs_do_receive, HELP_RECEIVE },
|
||||
{ "redact", zfs_do_redact, HELP_REDACT },
|
||||
{ NULL },
|
||||
{ "allow", zfs_do_allow, HELP_ALLOW },
|
||||
{ NULL },
|
||||
{ "unallow", zfs_do_unallow, HELP_UNALLOW },
|
||||
{ NULL },
|
||||
{ "hold", zfs_do_hold, HELP_HOLD },
|
||||
{ "holds", zfs_do_holds, HELP_HOLDS },
|
||||
{ "release", zfs_do_release, HELP_RELEASE },
|
||||
{ NULL },
|
||||
{ "diff", zfs_do_diff, HELP_DIFF },
|
||||
{ "load-key", zfs_do_load_key, HELP_LOAD_KEY },
|
||||
{ "unload-key", zfs_do_unload_key, HELP_UNLOAD_KEY },
|
||||
{ "change-key", zfs_do_change_key, HELP_CHANGE_KEY },
|
||||
{ NULL },
|
||||
{ "program", zfs_do_channel_program, HELP_CHANNEL_PROGRAM },
|
||||
{ "rewrite", zfs_do_rewrite, HELP_REWRITE },
|
||||
{ "redact", zfs_do_redact, HELP_REDACT },
|
||||
{ "wait", zfs_do_wait, HELP_WAIT },
|
||||
|
||||
#ifdef __FreeBSD__
|
||||
{ NULL },
|
||||
{ "jail", zfs_do_jail, HELP_JAIL },
|
||||
{ "unjail", zfs_do_unjail, HELP_UNJAIL },
|
||||
#endif
|
||||
|
||||
#ifdef __linux__
|
||||
{ NULL },
|
||||
{ "zone", zfs_do_zone, HELP_ZONE },
|
||||
{ "unzone", zfs_do_unzone, HELP_UNZONE },
|
||||
#endif
|
||||
@ -439,9 +431,6 @@ get_usage(zfs_help_t idx)
|
||||
case HELP_REDACT:
|
||||
return (gettext("\tredact <snapshot> <bookmark> "
|
||||
"<redaction_snapshot> ...\n"));
|
||||
case HELP_REWRITE:
|
||||
return (gettext("\trewrite [-Prvx] [-o <offset>] [-l <length>] "
|
||||
"<directory|file ...>\n"));
|
||||
case HELP_JAIL:
|
||||
return (gettext("\tjail <jailid|jailname> <filesystem>\n"));
|
||||
case HELP_UNJAIL:
|
||||
@ -923,22 +912,26 @@ zfs_do_clone(int argc, char **argv)
|
||||
return (!!ret);
|
||||
|
||||
usage:
|
||||
ASSERT0P(zhp);
|
||||
ASSERT3P(zhp, ==, NULL);
|
||||
nvlist_free(props);
|
||||
usage(B_FALSE);
|
||||
return (-1);
|
||||
}
|
||||
|
||||
/*
|
||||
* Calculate the minimum allocation size based on the top-level vdevs.
|
||||
* Return a default volblocksize for the pool which always uses more than
|
||||
* half of the data sectors. This primarily applies to dRAID which always
|
||||
* writes full stripe widths.
|
||||
*/
|
||||
static uint64_t
|
||||
calculate_volblocksize(nvlist_t *config)
|
||||
default_volblocksize(zpool_handle_t *zhp, nvlist_t *props)
|
||||
{
|
||||
uint64_t asize = SPA_MINBLOCKSIZE;
|
||||
uint64_t volblocksize, asize = SPA_MINBLOCKSIZE;
|
||||
nvlist_t *tree, **vdevs;
|
||||
uint_t nvdevs;
|
||||
|
||||
nvlist_t *config = zpool_get_config(zhp, NULL);
|
||||
|
||||
if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &tree) != 0 ||
|
||||
nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN,
|
||||
&vdevs, &nvdevs) != 0) {
|
||||
@ -969,24 +962,6 @@ calculate_volblocksize(nvlist_t *config)
|
||||
}
|
||||
}
|
||||
|
||||
return (asize);
|
||||
}
|
||||
|
||||
/*
|
||||
* Return a default volblocksize for the pool which always uses more than
|
||||
* half of the data sectors. This primarily applies to dRAID which always
|
||||
* writes full stripe widths.
|
||||
*/
|
||||
static uint64_t
|
||||
default_volblocksize(zpool_handle_t *zhp, nvlist_t *props)
|
||||
{
|
||||
uint64_t volblocksize, asize = SPA_MINBLOCKSIZE;
|
||||
|
||||
nvlist_t *config = zpool_get_config(zhp, NULL);
|
||||
|
||||
if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_MAX_ALLOC, &asize) != 0)
|
||||
asize = calculate_volblocksize(config);
|
||||
|
||||
/*
|
||||
* Calculate the target volblocksize such that more than half
|
||||
* of the asize is used. The following table is for 4k sectors.
|
||||
@ -1988,8 +1963,9 @@ fill_dataset_info(nvlist_t *list, zfs_handle_t *zhp, boolean_t as_int)
|
||||
}
|
||||
|
||||
if (type == ZFS_TYPE_SNAPSHOT) {
|
||||
char *snap = strdup(zfs_get_name(zhp));
|
||||
char *ds = strsep(&snap, "@");
|
||||
char *ds, *snap;
|
||||
ds = snap = strdup(zfs_get_name(zhp));
|
||||
ds = strsep(&snap, "@");
|
||||
fnvlist_add_string(list, "dataset", ds);
|
||||
fnvlist_add_string(list, "snapshot_name", snap);
|
||||
free(ds);
|
||||
@ -2032,7 +2008,8 @@ get_callback(zfs_handle_t *zhp, void *data)
|
||||
nvlist_t *user_props = zfs_get_user_props(zhp);
|
||||
zprop_list_t *pl = cbp->cb_proplist;
|
||||
nvlist_t *propval;
|
||||
nvlist_t *item, *d = NULL, *props = NULL;
|
||||
nvlist_t *item, *d, *props;
|
||||
item = d = props = NULL;
|
||||
const char *strval;
|
||||
const char *sourceval;
|
||||
boolean_t received = is_recvd_column(cbp);
|
||||
@ -3008,8 +2985,7 @@ us_type2str(unsigned field_type)
|
||||
}
|
||||
|
||||
static int
|
||||
userspace_cb(void *arg, const char *domain, uid_t rid, uint64_t space,
|
||||
uint64_t default_quota)
|
||||
userspace_cb(void *arg, const char *domain, uid_t rid, uint64_t space)
|
||||
{
|
||||
us_cbdata_t *cb = (us_cbdata_t *)arg;
|
||||
zfs_userquota_prop_t prop = cb->cb_prop;
|
||||
@ -3165,7 +3141,7 @@ userspace_cb(void *arg, const char *domain, uid_t rid, uint64_t space,
|
||||
prop == ZFS_PROP_PROJECTUSED) {
|
||||
propname = "used";
|
||||
if (!nvlist_exists(props, "quota"))
|
||||
(void) nvlist_add_uint64(props, "quota", default_quota);
|
||||
(void) nvlist_add_uint64(props, "quota", 0);
|
||||
} else if (prop == ZFS_PROP_USERQUOTA || prop == ZFS_PROP_GROUPQUOTA ||
|
||||
prop == ZFS_PROP_PROJECTQUOTA) {
|
||||
propname = "quota";
|
||||
@ -3174,10 +3150,8 @@ userspace_cb(void *arg, const char *domain, uid_t rid, uint64_t space,
|
||||
} else if (prop == ZFS_PROP_USEROBJUSED ||
|
||||
prop == ZFS_PROP_GROUPOBJUSED || prop == ZFS_PROP_PROJECTOBJUSED) {
|
||||
propname = "objused";
|
||||
if (!nvlist_exists(props, "objquota")) {
|
||||
(void) nvlist_add_uint64(props, "objquota",
|
||||
default_quota);
|
||||
}
|
||||
if (!nvlist_exists(props, "objquota"))
|
||||
(void) nvlist_add_uint64(props, "objquota", 0);
|
||||
} else if (prop == ZFS_PROP_USEROBJQUOTA ||
|
||||
prop == ZFS_PROP_GROUPOBJQUOTA ||
|
||||
prop == ZFS_PROP_PROJECTOBJQUOTA) {
|
||||
@ -4465,7 +4439,7 @@ zfs_do_rollback(int argc, char **argv)
|
||||
if (cb.cb_create > 0)
|
||||
min_txg = cb.cb_create;
|
||||
|
||||
if ((ret = zfs_iter_snapshots_sorted_v2(zhp, 0, rollback_check, &cb,
|
||||
if ((ret = zfs_iter_snapshots_v2(zhp, 0, rollback_check, &cb,
|
||||
min_txg, 0)) != 0)
|
||||
goto out;
|
||||
if ((ret = zfs_iter_bookmarks_v2(zhp, 0, rollback_check, &cb)) != 0)
|
||||
@ -5317,9 +5291,7 @@ zfs_do_receive(int argc, char **argv)
|
||||
#define ZFS_DELEG_PERM_MOUNT "mount"
|
||||
#define ZFS_DELEG_PERM_SHARE "share"
|
||||
#define ZFS_DELEG_PERM_SEND "send"
|
||||
#define ZFS_DELEG_PERM_SEND_RAW "send:raw"
|
||||
#define ZFS_DELEG_PERM_RECEIVE "receive"
|
||||
#define ZFS_DELEG_PERM_RECEIVE_APPEND "receive:append"
|
||||
#define ZFS_DELEG_PERM_ALLOW "allow"
|
||||
#define ZFS_DELEG_PERM_USERPROP "userprop"
|
||||
#define ZFS_DELEG_PERM_VSCAN "vscan" /* ??? */
|
||||
@ -5360,7 +5332,6 @@ static zfs_deleg_perm_tab_t zfs_deleg_perm_tbl[] = {
|
||||
{ ZFS_DELEG_PERM_RENAME, ZFS_DELEG_NOTE_RENAME },
|
||||
{ ZFS_DELEG_PERM_ROLLBACK, ZFS_DELEG_NOTE_ROLLBACK },
|
||||
{ ZFS_DELEG_PERM_SEND, ZFS_DELEG_NOTE_SEND },
|
||||
{ ZFS_DELEG_PERM_SEND_RAW, ZFS_DELEG_NOTE_SEND_RAW },
|
||||
{ ZFS_DELEG_PERM_SHARE, ZFS_DELEG_NOTE_SHARE },
|
||||
{ ZFS_DELEG_PERM_SNAPSHOT, ZFS_DELEG_NOTE_SNAPSHOT },
|
||||
{ ZFS_DELEG_PERM_BOOKMARK, ZFS_DELEG_NOTE_BOOKMARK },
|
||||
@ -5893,7 +5864,7 @@ parse_fs_perm_set(fs_perm_set_t *fspset, nvlist_t *nvl)
|
||||
static inline const char *
|
||||
deleg_perm_comment(zfs_deleg_note_t note)
|
||||
{
|
||||
const char *str;
|
||||
const char *str = "";
|
||||
|
||||
/* subcommands */
|
||||
switch (note) {
|
||||
@ -5945,10 +5916,6 @@ deleg_perm_comment(zfs_deleg_note_t note)
|
||||
case ZFS_DELEG_NOTE_SEND:
|
||||
str = gettext("");
|
||||
break;
|
||||
case ZFS_DELEG_NOTE_SEND_RAW:
|
||||
str = gettext("Allow sending ONLY encrypted (raw) replication"
|
||||
"\n\t\t\t\tstreams");
|
||||
break;
|
||||
case ZFS_DELEG_NOTE_SHARE:
|
||||
str = gettext("Allows sharing file systems over NFS or SMB"
|
||||
"\n\t\t\t\tprotocols");
|
||||
@ -6878,17 +6845,17 @@ print_holds(boolean_t scripted, int nwidth, int tagwidth, nvlist_t *nvl,
|
||||
|
||||
if (scripted) {
|
||||
if (parsable) {
|
||||
(void) printf("%s\t%s\t%lld\n", zname,
|
||||
tagname, (long long)time);
|
||||
(void) printf("%s\t%s\t%ld\n", zname,
|
||||
tagname, time);
|
||||
} else {
|
||||
(void) printf("%s\t%s\t%s\n", zname,
|
||||
tagname, tsbuf);
|
||||
}
|
||||
} else {
|
||||
if (parsable) {
|
||||
(void) printf("%-*s %-*s %lld\n",
|
||||
(void) printf("%-*s %-*s %ld\n",
|
||||
nwidth, zname, tagwidth,
|
||||
tagname, (long long)time);
|
||||
tagname, time);
|
||||
} else {
|
||||
(void) printf("%-*s %-*s %s\n",
|
||||
nwidth, zname, tagwidth,
|
||||
@ -7747,7 +7714,6 @@ unshare_unmount_path(int op, char *path, int flags, boolean_t is_manual)
|
||||
struct extmnttab entry;
|
||||
const char *cmdname = (op == OP_SHARE) ? "unshare" : "unmount";
|
||||
ino_t path_inode;
|
||||
char *zfs_mntpnt, *entry_mntpnt;
|
||||
|
||||
/*
|
||||
* Search for the given (major,minor) pair in the mount table.
|
||||
@ -7789,24 +7755,6 @@ unshare_unmount_path(int op, char *path, int flags, boolean_t is_manual)
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the filesystem is mounted, check that the mountpoint matches
|
||||
* the one in the mnttab entry w.r.t. provided path. If it doesn't,
|
||||
* then we should not proceed further.
|
||||
*/
|
||||
entry_mntpnt = strdup(entry.mnt_mountp);
|
||||
if (zfs_is_mounted(zhp, &zfs_mntpnt)) {
|
||||
if (strcmp(zfs_mntpnt, entry_mntpnt) != 0) {
|
||||
(void) fprintf(stderr, gettext("cannot %s '%s': "
|
||||
"not an original mountpoint\n"), cmdname, path);
|
||||
free(zfs_mntpnt);
|
||||
free(entry_mntpnt);
|
||||
goto out;
|
||||
}
|
||||
free(zfs_mntpnt);
|
||||
}
|
||||
free(entry_mntpnt);
|
||||
|
||||
if (op == OP_SHARE) {
|
||||
char nfs_mnt_prop[ZFS_MAXPROPLEN];
|
||||
char smbshare_prop[ZFS_MAXPROPLEN];
|
||||
@ -9063,195 +9011,6 @@ zfs_do_project(int argc, char **argv)
|
||||
return (ret);
|
||||
}
|
||||
|
||||
static int
|
||||
zfs_rewrite_file(const char *path, boolean_t verbose, zfs_rewrite_args_t *args)
|
||||
{
|
||||
int fd, ret = 0;
|
||||
|
||||
fd = open(path, O_WRONLY);
|
||||
if (fd < 0) {
|
||||
ret = errno;
|
||||
(void) fprintf(stderr, gettext("failed to open %s: %s\n"),
|
||||
path, strerror(errno));
|
||||
return (ret);
|
||||
}
|
||||
|
||||
if (ioctl(fd, ZFS_IOC_REWRITE, args) < 0) {
|
||||
ret = errno;
|
||||
(void) fprintf(stderr, gettext("failed to rewrite %s: %s\n"),
|
||||
path, strerror(errno));
|
||||
} else if (verbose) {
|
||||
printf("%s\n", path);
|
||||
}
|
||||
|
||||
close(fd);
|
||||
return (ret);
|
||||
}
|
||||
|
||||
static int
|
||||
zfs_rewrite_dir(const char *path, boolean_t verbose, boolean_t xdev, dev_t dev,
|
||||
zfs_rewrite_args_t *args, nvlist_t *dirs)
|
||||
{
|
||||
struct dirent *ent;
|
||||
DIR *dir;
|
||||
int ret = 0, err;
|
||||
|
||||
dir = opendir(path);
|
||||
if (dir == NULL) {
|
||||
if (errno == ENOENT)
|
||||
return (0);
|
||||
ret = errno;
|
||||
(void) fprintf(stderr, gettext("failed to opendir %s: %s\n"),
|
||||
path, strerror(errno));
|
||||
return (ret);
|
||||
}
|
||||
|
||||
size_t plen = strlen(path) + 1;
|
||||
while ((ent = readdir(dir)) != NULL) {
|
||||
char *fullname;
|
||||
struct stat st;
|
||||
|
||||
if (ent->d_type != DT_REG && ent->d_type != DT_DIR)
|
||||
continue;
|
||||
|
||||
if (strcmp(ent->d_name, ".") == 0 ||
|
||||
strcmp(ent->d_name, "..") == 0)
|
||||
continue;
|
||||
|
||||
if (plen + strlen(ent->d_name) >= PATH_MAX) {
|
||||
(void) fprintf(stderr, gettext("path too long %s/%s\n"),
|
||||
path, ent->d_name);
|
||||
ret = ENAMETOOLONG;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (asprintf(&fullname, "%s/%s", path, ent->d_name) == -1) {
|
||||
(void) fprintf(stderr,
|
||||
gettext("failed to allocate memory\n"));
|
||||
ret = ENOMEM;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (xdev) {
|
||||
if (lstat(fullname, &st) < 0) {
|
||||
ret = errno;
|
||||
(void) fprintf(stderr,
|
||||
gettext("failed to stat %s: %s\n"),
|
||||
fullname, strerror(errno));
|
||||
free(fullname);
|
||||
continue;
|
||||
}
|
||||
if (st.st_dev != dev) {
|
||||
free(fullname);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
if (ent->d_type == DT_REG) {
|
||||
err = zfs_rewrite_file(fullname, verbose, args);
|
||||
if (err)
|
||||
ret = err;
|
||||
} else { /* DT_DIR */
|
||||
fnvlist_add_uint64(dirs, fullname, dev);
|
||||
}
|
||||
|
||||
free(fullname);
|
||||
}
|
||||
|
||||
closedir(dir);
|
||||
return (ret);
|
||||
}
|
||||
|
||||
static int
|
||||
zfs_rewrite_path(const char *path, boolean_t verbose, boolean_t recurse,
|
||||
boolean_t xdev, zfs_rewrite_args_t *args, nvlist_t *dirs)
|
||||
{
|
||||
struct stat st;
|
||||
int ret = 0;
|
||||
|
||||
if (lstat(path, &st) < 0) {
|
||||
ret = errno;
|
||||
(void) fprintf(stderr, gettext("failed to stat %s: %s\n"),
|
||||
path, strerror(errno));
|
||||
return (ret);
|
||||
}
|
||||
|
||||
if (S_ISREG(st.st_mode)) {
|
||||
ret = zfs_rewrite_file(path, verbose, args);
|
||||
} else if (S_ISDIR(st.st_mode) && recurse) {
|
||||
ret = zfs_rewrite_dir(path, verbose, xdev, st.st_dev, args,
|
||||
dirs);
|
||||
}
|
||||
return (ret);
|
||||
}
|
||||
|
||||
static int
|
||||
zfs_do_rewrite(int argc, char **argv)
|
||||
{
|
||||
int ret = 0, err, c;
|
||||
boolean_t recurse = B_FALSE, verbose = B_FALSE, xdev = B_FALSE;
|
||||
|
||||
if (argc < 2)
|
||||
usage(B_FALSE);
|
||||
|
||||
zfs_rewrite_args_t args;
|
||||
memset(&args, 0, sizeof (args));
|
||||
|
||||
while ((c = getopt(argc, argv, "Pl:o:rvx")) != -1) {
|
||||
switch (c) {
|
||||
case 'P':
|
||||
args.flags |= ZFS_REWRITE_PHYSICAL;
|
||||
break;
|
||||
case 'l':
|
||||
args.len = strtoll(optarg, NULL, 0);
|
||||
break;
|
||||
case 'o':
|
||||
args.off = strtoll(optarg, NULL, 0);
|
||||
break;
|
||||
case 'r':
|
||||
recurse = B_TRUE;
|
||||
break;
|
||||
case 'v':
|
||||
verbose = B_TRUE;
|
||||
break;
|
||||
case 'x':
|
||||
xdev = B_TRUE;
|
||||
break;
|
||||
default:
|
||||
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
|
||||
optopt);
|
||||
usage(B_FALSE);
|
||||
}
|
||||
}
|
||||
|
||||
argv += optind;
|
||||
argc -= optind;
|
||||
if (argc == 0) {
|
||||
(void) fprintf(stderr,
|
||||
gettext("missing file or directory target(s)\n"));
|
||||
usage(B_FALSE);
|
||||
}
|
||||
|
||||
nvlist_t *dirs = fnvlist_alloc();
|
||||
for (int i = 0; i < argc; i++) {
|
||||
err = zfs_rewrite_path(argv[i], verbose, recurse, xdev, &args,
|
||||
dirs);
|
||||
if (err)
|
||||
ret = err;
|
||||
}
|
||||
nvpair_t *dir;
|
||||
while ((dir = nvlist_next_nvpair(dirs, NULL)) != NULL) {
|
||||
err = zfs_rewrite_dir(nvpair_name(dir), verbose, xdev,
|
||||
fnvpair_value_uint64(dir), &args, dirs);
|
||||
if (err)
|
||||
ret = err;
|
||||
fnvlist_remove_nvpair(dirs, dir);
|
||||
}
|
||||
fnvlist_free(dirs);
|
||||
|
||||
return (ret);
|
||||
}
|
||||
|
||||
static int
|
||||
zfs_do_wait(int argc, char **argv)
|
||||
{
|
||||
|
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
|
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
|
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
|
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
|
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
|
346
cmd/zhack.c
346
cmd/zhack.c
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
@ -54,7 +53,6 @@
|
||||
#include <sys/dmu_tx.h>
|
||||
#include <zfeature_common.h>
|
||||
#include <libzutil.h>
|
||||
#include <sys/metaslab_impl.h>
|
||||
|
||||
static importargs_t g_importargs;
|
||||
static char *g_pool;
|
||||
@ -70,8 +68,7 @@ static __attribute__((noreturn)) void
|
||||
usage(void)
|
||||
{
|
||||
(void) fprintf(stderr,
|
||||
"Usage: zhack [-o tunable] [-c cachefile] [-d dir] <subcommand> "
|
||||
"<args> ...\n"
|
||||
"Usage: zhack [-c cachefile] [-d dir] <subcommand> <args> ...\n"
|
||||
"where <subcommand> <args> is one of the following:\n"
|
||||
"\n");
|
||||
|
||||
@ -95,10 +92,7 @@ usage(void)
|
||||
" -c repair corrupted label checksums\n"
|
||||
" -u restore the label on a detached device\n"
|
||||
"\n"
|
||||
" <device> : path to vdev\n"
|
||||
"\n"
|
||||
" metaslab leak <pool>\n"
|
||||
" apply allocation map from zdb to specified pool\n");
|
||||
" <device> : path to vdev\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
@ -167,9 +161,9 @@ zhack_import(char *target, boolean_t readonly)
|
||||
|
||||
props = NULL;
|
||||
if (readonly) {
|
||||
VERIFY0(nvlist_alloc(&props, NV_UNIQUE_NAME, 0));
|
||||
VERIFY0(nvlist_add_uint64(props,
|
||||
zpool_prop_to_name(ZPOOL_PROP_READONLY), 1));
|
||||
VERIFY(nvlist_alloc(&props, NV_UNIQUE_NAME, 0) == 0);
|
||||
VERIFY(nvlist_add_uint64(props,
|
||||
zpool_prop_to_name(ZPOOL_PROP_READONLY), 1) == 0);
|
||||
}
|
||||
|
||||
zfeature_checks_disable = B_TRUE;
|
||||
@ -223,8 +217,8 @@ dump_obj(objset_t *os, uint64_t obj, const char *name)
|
||||
} else {
|
||||
ASSERT(za->za_integer_length == 1);
|
||||
char val[1024];
|
||||
VERIFY0(zap_lookup(os, obj, za->za_name,
|
||||
1, sizeof (val), val));
|
||||
VERIFY(zap_lookup(os, obj, za->za_name,
|
||||
1, sizeof (val), val) == 0);
|
||||
(void) printf("\t%s = %s\n", za->za_name, val);
|
||||
}
|
||||
}
|
||||
@ -368,12 +362,10 @@ feature_incr_sync(void *arg, dmu_tx_t *tx)
|
||||
zfeature_info_t *feature = arg;
|
||||
uint64_t refcount;
|
||||
|
||||
mutex_enter(&spa->spa_feat_stats_lock);
|
||||
VERIFY0(feature_get_refcount_from_disk(spa, feature, &refcount));
|
||||
feature_sync(spa, feature, refcount + 1, tx);
|
||||
spa_history_log_internal(spa, "zhack feature incr", tx,
|
||||
"name=%s", feature->fi_guid);
|
||||
mutex_exit(&spa->spa_feat_stats_lock);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -383,12 +375,10 @@ feature_decr_sync(void *arg, dmu_tx_t *tx)
|
||||
zfeature_info_t *feature = arg;
|
||||
uint64_t refcount;
|
||||
|
||||
mutex_enter(&spa->spa_feat_stats_lock);
|
||||
VERIFY0(feature_get_refcount_from_disk(spa, feature, &refcount));
|
||||
feature_sync(spa, feature, refcount - 1, tx);
|
||||
spa_history_log_internal(spa, "zhack feature decr", tx,
|
||||
"name=%s", feature->fi_guid);
|
||||
mutex_exit(&spa->spa_feat_stats_lock);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -505,186 +495,6 @@ zhack_do_feature(int argc, char **argv)
|
||||
return (0);
|
||||
}
|
||||
|
||||
static boolean_t
|
||||
strstarts(const char *a, const char *b)
|
||||
{
|
||||
return (strncmp(a, b, strlen(b)) == 0);
|
||||
}
|
||||
|
||||
static void
|
||||
metaslab_force_alloc(metaslab_t *msp, uint64_t start, uint64_t size,
|
||||
dmu_tx_t *tx)
|
||||
{
|
||||
ASSERT(msp->ms_disabled);
|
||||
ASSERT(MUTEX_HELD(&msp->ms_lock));
|
||||
uint64_t txg = dmu_tx_get_txg(tx);
|
||||
|
||||
uint64_t off = start;
|
||||
while (off < start + size) {
|
||||
uint64_t ostart, osize;
|
||||
boolean_t found = zfs_range_tree_find_in(msp->ms_allocatable,
|
||||
off, start + size - off, &ostart, &osize);
|
||||
if (!found)
|
||||
break;
|
||||
zfs_range_tree_remove(msp->ms_allocatable, ostart, osize);
|
||||
|
||||
if (zfs_range_tree_is_empty(msp->ms_allocating[txg & TXG_MASK]))
|
||||
vdev_dirty(msp->ms_group->mg_vd, VDD_METASLAB, msp,
|
||||
txg);
|
||||
|
||||
zfs_range_tree_add(msp->ms_allocating[txg & TXG_MASK], ostart,
|
||||
osize);
|
||||
msp->ms_allocating_total += osize;
|
||||
off = ostart + osize;
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
zhack_do_metaslab_leak(int argc, char **argv)
|
||||
{
|
||||
int c;
|
||||
char *target;
|
||||
spa_t *spa;
|
||||
|
||||
optind = 1;
|
||||
boolean_t force = B_FALSE;
|
||||
while ((c = getopt(argc, argv, "f")) != -1) {
|
||||
switch (c) {
|
||||
case 'f':
|
||||
force = B_TRUE;
|
||||
break;
|
||||
default:
|
||||
usage();
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
argc -= optind;
|
||||
argv += optind;
|
||||
|
||||
if (argc < 1) {
|
||||
(void) fprintf(stderr, "error: missing pool name\n");
|
||||
usage();
|
||||
}
|
||||
target = argv[0];
|
||||
|
||||
zhack_spa_open(target, B_FALSE, FTAG, &spa);
|
||||
spa_config_enter(spa, SCL_VDEV | SCL_ALLOC, FTAG, RW_READER);
|
||||
|
||||
char *line = NULL;
|
||||
size_t cap = 0;
|
||||
|
||||
vdev_t *vd = NULL;
|
||||
metaslab_t *prev = NULL;
|
||||
dmu_tx_t *tx = NULL;
|
||||
while (getline(&line, &cap, stdin) > 0) {
|
||||
if (strstarts(line, "\tvdev ")) {
|
||||
uint64_t vdev_id, ms_shift;
|
||||
if (sscanf(line,
|
||||
"\tvdev %10"PRIu64"\t%*s metaslab shift %4"PRIu64,
|
||||
&vdev_id, &ms_shift) == 1) {
|
||||
VERIFY3U(sscanf(line, "\tvdev %"PRIu64
|
||||
"\t metaslab shift %4"PRIu64,
|
||||
&vdev_id, &ms_shift), ==, 2);
|
||||
}
|
||||
vd = vdev_lookup_top(spa, vdev_id);
|
||||
if (vd == NULL) {
|
||||
fprintf(stderr, "error: no such vdev with "
|
||||
"id %"PRIu64"\n", vdev_id);
|
||||
break;
|
||||
}
|
||||
if (tx) {
|
||||
dmu_tx_commit(tx);
|
||||
mutex_exit(&prev->ms_lock);
|
||||
metaslab_enable(prev, B_FALSE, B_FALSE);
|
||||
tx = NULL;
|
||||
prev = NULL;
|
||||
}
|
||||
if (vd->vdev_ms_shift != ms_shift) {
|
||||
fprintf(stderr, "error: ms_shift mismatch: %"
|
||||
PRIu64" != %"PRIu64"\n", vd->vdev_ms_shift,
|
||||
ms_shift);
|
||||
break;
|
||||
}
|
||||
} else if (strstarts(line, "\tmetaslabs ")) {
|
||||
uint64_t ms_count;
|
||||
VERIFY3U(sscanf(line, "\tmetaslabs %"PRIu64, &ms_count),
|
||||
==, 1);
|
||||
ASSERT(vd);
|
||||
if (!force && vd->vdev_ms_count != ms_count) {
|
||||
fprintf(stderr, "error: ms_count mismatch: %"
|
||||
PRIu64" != %"PRIu64"\n", vd->vdev_ms_count,
|
||||
ms_count);
|
||||
break;
|
||||
}
|
||||
} else if (strstarts(line, "ALLOC:")) {
|
||||
uint64_t start, size;
|
||||
VERIFY3U(sscanf(line, "ALLOC: %"PRIu64" %"PRIu64"\n",
|
||||
&start, &size), ==, 2);
|
||||
|
||||
ASSERT(vd);
|
||||
metaslab_t *cur =
|
||||
vd->vdev_ms[start >> vd->vdev_ms_shift];
|
||||
if (prev != cur) {
|
||||
if (prev) {
|
||||
dmu_tx_commit(tx);
|
||||
mutex_exit(&prev->ms_lock);
|
||||
metaslab_enable(prev, B_FALSE, B_FALSE);
|
||||
}
|
||||
ASSERT(cur);
|
||||
metaslab_disable(cur);
|
||||
mutex_enter(&cur->ms_lock);
|
||||
metaslab_load(cur);
|
||||
prev = cur;
|
||||
tx = dmu_tx_create_dd(
|
||||
spa_get_dsl(vd->vdev_spa)->dp_root_dir);
|
||||
dmu_tx_assign(tx, DMU_TX_WAIT);
|
||||
}
|
||||
|
||||
metaslab_force_alloc(cur, start, size, tx);
|
||||
} else {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
if (tx) {
|
||||
dmu_tx_commit(tx);
|
||||
mutex_exit(&prev->ms_lock);
|
||||
metaslab_enable(prev, B_FALSE, B_FALSE);
|
||||
tx = NULL;
|
||||
prev = NULL;
|
||||
}
|
||||
if (line)
|
||||
free(line);
|
||||
|
||||
spa_config_exit(spa, SCL_VDEV | SCL_ALLOC, FTAG);
|
||||
spa_close(spa, FTAG);
|
||||
}
|
||||
|
||||
static int
|
||||
zhack_do_metaslab(int argc, char **argv)
|
||||
{
|
||||
char *subcommand;
|
||||
|
||||
argc--;
|
||||
argv++;
|
||||
if (argc == 0) {
|
||||
(void) fprintf(stderr,
|
||||
"error: no metaslab operation specified\n");
|
||||
usage();
|
||||
}
|
||||
|
||||
subcommand = argv[0];
|
||||
if (strcmp(subcommand, "leak") == 0) {
|
||||
zhack_do_metaslab_leak(argc, argv);
|
||||
} else {
|
||||
(void) fprintf(stderr, "error: unknown subcommand: %s\n",
|
||||
subcommand);
|
||||
usage();
|
||||
}
|
||||
|
||||
return (0);
|
||||
}
|
||||
|
||||
#define ASHIFT_UBERBLOCK_SHIFT(ashift) \
|
||||
MIN(MAX(ashift, UBERBLOCK_SHIFT), \
|
||||
MAX_UBERBLOCK_SHIFT)
|
||||
@ -714,23 +524,6 @@ zhack_repair_read_label(const int fd, vdev_label_t *vl,
|
||||
return (0);
|
||||
}
|
||||
|
||||
static int
|
||||
zhack_repair_get_byteswap(const zio_eck_t *vdev_eck, const int l, int *byteswap)
|
||||
{
|
||||
if (vdev_eck->zec_magic == ZEC_MAGIC) {
|
||||
*byteswap = B_FALSE;
|
||||
} else if (vdev_eck->zec_magic == BSWAP_64((uint64_t)ZEC_MAGIC)) {
|
||||
*byteswap = B_TRUE;
|
||||
} else {
|
||||
(void) fprintf(stderr, "error: label %d: "
|
||||
"Expected the nvlist checksum magic number but instead got "
|
||||
"0x%" PRIx64 "\n",
|
||||
l, vdev_eck->zec_magic);
|
||||
return (1);
|
||||
}
|
||||
return (0);
|
||||
}
|
||||
|
||||
static void
|
||||
zhack_repair_calc_cksum(const int byteswap, void *data, const uint64_t offset,
|
||||
const uint64_t abdsize, zio_eck_t *eck, zio_cksum_t *cksum)
|
||||
@ -757,10 +550,33 @@ zhack_repair_calc_cksum(const int byteswap, void *data, const uint64_t offset,
|
||||
}
|
||||
|
||||
static int
|
||||
zhack_repair_get_ashift(nvlist_t *cfg, const int l, uint64_t *ashift)
|
||||
zhack_repair_check_label(uberblock_t *ub, const int l, const char **cfg_keys,
|
||||
const size_t cfg_keys_len, nvlist_t *cfg, nvlist_t *vdev_tree_cfg,
|
||||
uint64_t *ashift)
|
||||
{
|
||||
int err;
|
||||
nvlist_t *vdev_tree_cfg;
|
||||
|
||||
if (ub->ub_txg != 0) {
|
||||
(void) fprintf(stderr,
|
||||
"error: label %d: UB TXG of 0 expected, but got %"
|
||||
PRIu64 "\n",
|
||||
l, ub->ub_txg);
|
||||
(void) fprintf(stderr, "It would appear the device was not "
|
||||
"properly removed.\n");
|
||||
return (1);
|
||||
}
|
||||
|
||||
for (int i = 0; i < cfg_keys_len; i++) {
|
||||
uint64_t val;
|
||||
err = nvlist_lookup_uint64(cfg, cfg_keys[i], &val);
|
||||
if (err) {
|
||||
(void) fprintf(stderr,
|
||||
"error: label %d, %d: "
|
||||
"cannot find nvlist key %s\n",
|
||||
l, i, cfg_keys[i]);
|
||||
return (err);
|
||||
}
|
||||
}
|
||||
|
||||
err = nvlist_lookup_nvlist(cfg,
|
||||
ZPOOL_CONFIG_VDEV_TREE, &vdev_tree_cfg);
|
||||
@ -784,7 +600,7 @@ zhack_repair_get_ashift(nvlist_t *cfg, const int l, uint64_t *ashift)
|
||||
(void) fprintf(stderr,
|
||||
"error: label %d: nvlist key %s is zero\n",
|
||||
l, ZPOOL_CONFIG_ASHIFT);
|
||||
return (1);
|
||||
return (err);
|
||||
}
|
||||
|
||||
return (0);
|
||||
@ -799,35 +615,30 @@ zhack_repair_undetach(uberblock_t *ub, nvlist_t *cfg, const int l)
|
||||
*/
|
||||
if (BP_GET_LOGICAL_BIRTH(&ub->ub_rootbp) != 0) {
|
||||
const uint64_t txg = BP_GET_LOGICAL_BIRTH(&ub->ub_rootbp);
|
||||
int err;
|
||||
|
||||
ub->ub_txg = txg;
|
||||
|
||||
err = nvlist_remove_all(cfg, ZPOOL_CONFIG_CREATE_TXG);
|
||||
if (err) {
|
||||
if (nvlist_remove_all(cfg, ZPOOL_CONFIG_CREATE_TXG) != 0) {
|
||||
(void) fprintf(stderr,
|
||||
"error: label %d: "
|
||||
"Failed to remove pool creation TXG\n",
|
||||
l);
|
||||
return (err);
|
||||
return (1);
|
||||
}
|
||||
|
||||
err = nvlist_remove_all(cfg, ZPOOL_CONFIG_POOL_TXG);
|
||||
if (err) {
|
||||
if (nvlist_remove_all(cfg, ZPOOL_CONFIG_POOL_TXG) != 0) {
|
||||
(void) fprintf(stderr,
|
||||
"error: label %d: Failed to remove pool TXG to "
|
||||
"be replaced.\n",
|
||||
l);
|
||||
return (err);
|
||||
return (1);
|
||||
}
|
||||
|
||||
err = nvlist_add_uint64(cfg, ZPOOL_CONFIG_POOL_TXG, txg);
|
||||
if (err) {
|
||||
if (nvlist_add_uint64(cfg, ZPOOL_CONFIG_POOL_TXG, txg) != 0) {
|
||||
(void) fprintf(stderr,
|
||||
"error: label %d: "
|
||||
"Failed to add pool TXG of %" PRIu64 "\n",
|
||||
l, txg);
|
||||
return (err);
|
||||
return (1);
|
||||
}
|
||||
}
|
||||
|
||||
@ -921,7 +732,6 @@ zhack_repair_test_cksum(const int byteswap, void *vdev_data,
|
||||
BSWAP_64(ZEC_MAGIC) : ZEC_MAGIC;
|
||||
const uint64_t actual_magic = vdev_eck->zec_magic;
|
||||
int err = 0;
|
||||
|
||||
if (actual_magic != expected_magic) {
|
||||
(void) fprintf(stderr, "error: label %d: "
|
||||
"Expected "
|
||||
@ -943,36 +753,6 @@ zhack_repair_test_cksum(const int byteswap, void *vdev_data,
|
||||
return (err);
|
||||
}
|
||||
|
||||
static int
|
||||
zhack_repair_unpack_cfg(vdev_label_t *vl, const int l, nvlist_t **cfg)
|
||||
{
|
||||
const char *cfg_keys[] = { ZPOOL_CONFIG_VERSION,
|
||||
ZPOOL_CONFIG_POOL_STATE, ZPOOL_CONFIG_GUID };
|
||||
int err;
|
||||
|
||||
err = nvlist_unpack(vl->vl_vdev_phys.vp_nvlist,
|
||||
VDEV_PHYS_SIZE - sizeof (zio_eck_t), cfg, 0);
|
||||
if (err) {
|
||||
(void) fprintf(stderr,
|
||||
"error: cannot unpack nvlist label %d\n", l);
|
||||
return (err);
|
||||
}
|
||||
|
||||
for (int i = 0; i < ARRAY_SIZE(cfg_keys); i++) {
|
||||
uint64_t val;
|
||||
err = nvlist_lookup_uint64(*cfg, cfg_keys[i], &val);
|
||||
if (err) {
|
||||
(void) fprintf(stderr,
|
||||
"error: label %d, %d: "
|
||||
"cannot find nvlist key %s\n",
|
||||
l, i, cfg_keys[i]);
|
||||
return (err);
|
||||
}
|
||||
}
|
||||
|
||||
return (0);
|
||||
}
|
||||
|
||||
static void
|
||||
zhack_repair_one_label(const zhack_repair_op_t op, const int fd,
|
||||
vdev_label_t *vl, const uint64_t label_offset, const int l,
|
||||
@ -986,7 +766,10 @@ zhack_repair_one_label(const zhack_repair_op_t op, const int fd,
|
||||
(zio_eck_t *)((char *)(vdev_data) + VDEV_PHYS_SIZE) - 1;
|
||||
const uint64_t vdev_phys_offset =
|
||||
label_offset + offsetof(vdev_label_t, vl_vdev_phys);
|
||||
const char *cfg_keys[] = { ZPOOL_CONFIG_VERSION,
|
||||
ZPOOL_CONFIG_POOL_STATE, ZPOOL_CONFIG_GUID };
|
||||
nvlist_t *cfg;
|
||||
nvlist_t *vdev_tree_cfg = NULL;
|
||||
uint64_t ashift;
|
||||
int byteswap;
|
||||
|
||||
@ -994,9 +777,18 @@ zhack_repair_one_label(const zhack_repair_op_t op, const int fd,
|
||||
if (err)
|
||||
return;
|
||||
|
||||
err = zhack_repair_get_byteswap(vdev_eck, l, &byteswap);
|
||||
if (err)
|
||||
if (vdev_eck->zec_magic == 0) {
|
||||
(void) fprintf(stderr, "error: label %d: "
|
||||
"Expected the nvlist checksum magic number to not be zero"
|
||||
"\n",
|
||||
l);
|
||||
(void) fprintf(stderr, "There should already be a checksum "
|
||||
"for the label.\n");
|
||||
return;
|
||||
}
|
||||
|
||||
byteswap =
|
||||
(vdev_eck->zec_magic == BSWAP_64((uint64_t)ZEC_MAGIC));
|
||||
|
||||
if (byteswap) {
|
||||
byteswap_uint64_array(&vdev_eck->zec_cksum,
|
||||
@ -1012,7 +804,16 @@ zhack_repair_one_label(const zhack_repair_op_t op, const int fd,
|
||||
return;
|
||||
}
|
||||
|
||||
err = zhack_repair_unpack_cfg(vl, l, &cfg);
|
||||
err = nvlist_unpack(vl->vl_vdev_phys.vp_nvlist,
|
||||
VDEV_PHYS_SIZE - sizeof (zio_eck_t), &cfg, 0);
|
||||
if (err) {
|
||||
(void) fprintf(stderr,
|
||||
"error: cannot unpack nvlist label %d\n", l);
|
||||
return;
|
||||
}
|
||||
|
||||
err = zhack_repair_check_label(ub,
|
||||
l, cfg_keys, ARRAY_SIZE(cfg_keys), cfg, vdev_tree_cfg, &ashift);
|
||||
if (err)
|
||||
return;
|
||||
|
||||
@ -1020,19 +821,6 @@ zhack_repair_one_label(const zhack_repair_op_t op, const int fd,
|
||||
char *buf;
|
||||
size_t buflen;
|
||||
|
||||
if (ub->ub_txg != 0) {
|
||||
(void) fprintf(stderr,
|
||||
"error: label %d: UB TXG of 0 expected, but got %"
|
||||
PRIu64 "\n", l, ub->ub_txg);
|
||||
(void) fprintf(stderr, "It would appear the device was "
|
||||
"not properly detached.\n");
|
||||
return;
|
||||
}
|
||||
|
||||
err = zhack_repair_get_ashift(cfg, l, &ashift);
|
||||
if (err)
|
||||
return;
|
||||
|
||||
err = zhack_repair_undetach(ub, cfg, l);
|
||||
if (err)
|
||||
return;
|
||||
@ -1192,7 +980,7 @@ main(int argc, char **argv)
|
||||
dprintf_setup(&argc, argv);
|
||||
zfs_prop_init();
|
||||
|
||||
while ((c = getopt(argc, argv, "+c:d:o:")) != -1) {
|
||||
while ((c = getopt(argc, argv, "+c:d:")) != -1) {
|
||||
switch (c) {
|
||||
case 'c':
|
||||
g_importargs.cachefile = optarg;
|
||||
@ -1201,10 +989,6 @@ main(int argc, char **argv)
|
||||
assert(g_importargs.paths < MAX_NUM_PATHS);
|
||||
g_importargs.path[g_importargs.paths++] = optarg;
|
||||
break;
|
||||
case 'o':
|
||||
if (handle_tunable_option(optarg, B_FALSE) != 0)
|
||||
exit(1);
|
||||
break;
|
||||
default:
|
||||
usage();
|
||||
break;
|
||||
@ -1226,8 +1010,6 @@ main(int argc, char **argv)
|
||||
rv = zhack_do_feature(argc, argv);
|
||||
} else if (strcmp(subcommand, "label") == 0) {
|
||||
return (zhack_do_label(argc, argv));
|
||||
} else if (strcmp(subcommand, "metaslab") == 0) {
|
||||
rv = zhack_do_metaslab(argc, argv);
|
||||
} else {
|
||||
(void) fprintf(stderr, "error: unknown subcommand: %s\n",
|
||||
subcommand);
|
||||
|
@ -1,5 +1,4 @@
|
||||
#!/usr/bin/env @PYTHON_SHEBANG@
|
||||
# SPDX-License-Identifier: CDDL-1.0
|
||||
#
|
||||
# Print out statistics for all zil stats. This information is
|
||||
# available through the zil kstat.
|
||||
@ -47,7 +46,6 @@ cols = {
|
||||
"cec": [5, 1000, "zil_commit_error_count"],
|
||||
"csc": [5, 1000, "zil_commit_stall_count"],
|
||||
"cSc": [5, 1000, "zil_commit_suspend_count"],
|
||||
"cCc": [5, 1000, "zil_commit_crash_count"],
|
||||
"ic": [5, 1000, "zil_itx_count"],
|
||||
"iic": [5, 1000, "zil_itx_indirect_count"],
|
||||
"iib": [5, 1024, "zil_itx_indirect_bytes"],
|
||||
|
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
|
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
@ -23,7 +22,7 @@
|
||||
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2015 by Delphix. All rights reserved.
|
||||
* Copyright (c) 2017, Intel Corporation.
|
||||
* Copyright (c) 2023-2025, Klara, Inc.
|
||||
* Copyright (c) 2023-2024, Klara Inc.
|
||||
*/
|
||||
|
||||
/*
|
||||
@ -243,36 +242,6 @@ err_to_str(int err)
|
||||
return ("[unknown]");
|
||||
}
|
||||
|
||||
static const char *const iotypestrtable[ZINJECT_IOTYPES] = {
|
||||
[ZINJECT_IOTYPE_NULL] = "null",
|
||||
[ZINJECT_IOTYPE_READ] = "read",
|
||||
[ZINJECT_IOTYPE_WRITE] = "write",
|
||||
[ZINJECT_IOTYPE_FREE] = "free",
|
||||
[ZINJECT_IOTYPE_CLAIM] = "claim",
|
||||
[ZINJECT_IOTYPE_FLUSH] = "flush",
|
||||
[ZINJECT_IOTYPE_TRIM] = "trim",
|
||||
[ZINJECT_IOTYPE_ALL] = "all",
|
||||
[ZINJECT_IOTYPE_PROBE] = "probe",
|
||||
};
|
||||
|
||||
static zinject_iotype_t
|
||||
str_to_iotype(const char *arg)
|
||||
{
|
||||
for (uint_t iotype = 0; iotype < ZINJECT_IOTYPES; iotype++)
|
||||
if (iotypestrtable[iotype] != NULL &&
|
||||
strcasecmp(iotypestrtable[iotype], arg) == 0)
|
||||
return (iotype);
|
||||
return (ZINJECT_IOTYPES);
|
||||
}
|
||||
|
||||
static const char *
|
||||
iotype_to_str(zinject_iotype_t iotype)
|
||||
{
|
||||
if (iotype >= ZINJECT_IOTYPES || iotypestrtable[iotype] == NULL)
|
||||
return ("[unknown]");
|
||||
return (iotypestrtable[iotype]);
|
||||
}
|
||||
|
||||
/*
|
||||
* Print usage message.
|
||||
*/
|
||||
@ -435,30 +404,26 @@ print_data_handler(int id, const char *pool, zinject_record_t *record,
|
||||
|
||||
if (*count == 0) {
|
||||
(void) printf("%3s %-15s %-6s %-6s %-8s %3s %-4s "
|
||||
"%-15s %-6s %-15s\n", "ID", "POOL", "OBJSET", "OBJECT",
|
||||
"TYPE", "LVL", "DVAs", "RANGE", "MATCH", "INJECT");
|
||||
"%-15s\n", "ID", "POOL", "OBJSET", "OBJECT", "TYPE",
|
||||
"LVL", "DVAs", "RANGE");
|
||||
(void) printf("--- --------------- ------ "
|
||||
"------ -------- --- ---- --------------- "
|
||||
"------ ------\n");
|
||||
"------ -------- --- ---- ---------------\n");
|
||||
}
|
||||
|
||||
*count += 1;
|
||||
|
||||
char rangebuf[32];
|
||||
if (record->zi_start == 0 && record->zi_end == -1ULL)
|
||||
snprintf(rangebuf, sizeof (rangebuf), "all");
|
||||
else
|
||||
snprintf(rangebuf, sizeof (rangebuf), "[%llu, %llu]",
|
||||
(u_longlong_t)record->zi_start,
|
||||
(u_longlong_t)record->zi_end);
|
||||
|
||||
|
||||
(void) printf("%3d %-15s %-6llu %-6llu %-8s %-3d 0x%02x %-15s "
|
||||
"%6" PRIu64 " %6" PRIu64 "\n", id, pool,
|
||||
(u_longlong_t)record->zi_objset,
|
||||
(void) printf("%3d %-15s %-6llu %-6llu %-8s %-3d 0x%02x ",
|
||||
id, pool, (u_longlong_t)record->zi_objset,
|
||||
(u_longlong_t)record->zi_object, type_to_name(record->zi_type),
|
||||
record->zi_level, record->zi_dvas, rangebuf,
|
||||
record->zi_match_count, record->zi_inject_count);
|
||||
record->zi_level, record->zi_dvas);
|
||||
|
||||
|
||||
if (record->zi_start == 0 &&
|
||||
record->zi_end == -1ULL)
|
||||
(void) printf("all\n");
|
||||
else
|
||||
(void) printf("[%llu, %llu]\n", (u_longlong_t)record->zi_start,
|
||||
(u_longlong_t)record->zi_end);
|
||||
|
||||
return (0);
|
||||
}
|
||||
@ -467,6 +432,10 @@ static int
|
||||
print_device_handler(int id, const char *pool, zinject_record_t *record,
|
||||
void *data)
|
||||
{
|
||||
static const char *iotypestr[] = {
|
||||
"null", "read", "write", "free", "claim", "flush", "trim", "all",
|
||||
};
|
||||
|
||||
int *count = data;
|
||||
|
||||
if (record->zi_guid == 0 || record->zi_func[0] != '\0')
|
||||
@ -476,14 +445,11 @@ print_device_handler(int id, const char *pool, zinject_record_t *record,
|
||||
return (0);
|
||||
|
||||
if (*count == 0) {
|
||||
(void) printf("%3s %-15s %-16s %-5s %-10s %-9s "
|
||||
"%-6s %-6s\n",
|
||||
"ID", "POOL", "GUID", "TYPE", "ERROR", "FREQ",
|
||||
"MATCH", "INJECT");
|
||||
(void) printf("%3s %-15s %-16s %-5s %-10s %-9s\n",
|
||||
"ID", "POOL", "GUID", "TYPE", "ERROR", "FREQ");
|
||||
(void) printf(
|
||||
"--- --------------- ---------------- "
|
||||
"----- ---------- --------- "
|
||||
"------ ------\n");
|
||||
"----- ---------- ---------\n");
|
||||
}
|
||||
|
||||
*count += 1;
|
||||
@ -491,11 +457,9 @@ print_device_handler(int id, const char *pool, zinject_record_t *record,
|
||||
double freq = record->zi_freq == 0 ? 100.0f :
|
||||
(((double)record->zi_freq) / ZI_PERCENTAGE_MAX) * 100.0f;
|
||||
|
||||
(void) printf("%3d %-15s %llx %-5s %-10s %8.4f%% "
|
||||
"%6" PRIu64 " %6" PRIu64 "\n", id, pool,
|
||||
(u_longlong_t)record->zi_guid,
|
||||
iotype_to_str(record->zi_iotype), err_to_str(record->zi_error),
|
||||
freq, record->zi_match_count, record->zi_inject_count);
|
||||
(void) printf("%3d %-15s %llx %-5s %-10s %8.4f%%\n", id, pool,
|
||||
(u_longlong_t)record->zi_guid, iotypestr[record->zi_iotype],
|
||||
err_to_str(record->zi_error), freq);
|
||||
|
||||
return (0);
|
||||
}
|
||||
@ -513,26 +477,18 @@ print_delay_handler(int id, const char *pool, zinject_record_t *record,
|
||||
return (0);
|
||||
|
||||
if (*count == 0) {
|
||||
(void) printf("%3s %-15s %-16s %-10s %-5s %-9s "
|
||||
"%-6s %-6s\n",
|
||||
"ID", "POOL", "GUID", "DELAY (ms)", "LANES", "FREQ",
|
||||
"MATCH", "INJECT");
|
||||
(void) printf("--- --------------- ---------------- "
|
||||
"---------- ----- --------- "
|
||||
"------ ------\n");
|
||||
(void) printf("%3s %-15s %-15s %-15s %s\n",
|
||||
"ID", "POOL", "DELAY (ms)", "LANES", "GUID");
|
||||
(void) printf("--- --------------- --------------- "
|
||||
"--------------- ----------------\n");
|
||||
}
|
||||
|
||||
*count += 1;
|
||||
|
||||
double freq = record->zi_freq == 0 ? 100.0f :
|
||||
(((double)record->zi_freq) / ZI_PERCENTAGE_MAX) * 100.0f;
|
||||
|
||||
(void) printf("%3d %-15s %llx %10llu %5llu %8.4f%% "
|
||||
"%6" PRIu64 " %6" PRIu64 "\n", id, pool,
|
||||
(u_longlong_t)record->zi_guid,
|
||||
(void) printf("%3d %-15s %-15llu %-15llu %llx\n", id, pool,
|
||||
(u_longlong_t)NSEC2MSEC(record->zi_timer),
|
||||
(u_longlong_t)record->zi_nlanes,
|
||||
freq, record->zi_match_count, record->zi_inject_count);
|
||||
(u_longlong_t)record->zi_guid);
|
||||
|
||||
return (0);
|
||||
}
|
||||
@ -896,7 +852,7 @@ main(int argc, char **argv)
|
||||
int quiet = 0;
|
||||
int error = 0;
|
||||
int domount = 0;
|
||||
int io_type = ZINJECT_IOTYPE_ALL;
|
||||
int io_type = ZIO_TYPES;
|
||||
int action = VDEV_STATE_UNKNOWN;
|
||||
err_type_t type = TYPE_INVAL;
|
||||
err_type_t label = TYPE_INVAL;
|
||||
@ -1090,8 +1046,19 @@ main(int argc, char **argv)
|
||||
}
|
||||
break;
|
||||
case 'T':
|
||||
io_type = str_to_iotype(optarg);
|
||||
if (io_type == ZINJECT_IOTYPES) {
|
||||
if (strcasecmp(optarg, "read") == 0) {
|
||||
io_type = ZIO_TYPE_READ;
|
||||
} else if (strcasecmp(optarg, "write") == 0) {
|
||||
io_type = ZIO_TYPE_WRITE;
|
||||
} else if (strcasecmp(optarg, "free") == 0) {
|
||||
io_type = ZIO_TYPE_FREE;
|
||||
} else if (strcasecmp(optarg, "claim") == 0) {
|
||||
io_type = ZIO_TYPE_CLAIM;
|
||||
} else if (strcasecmp(optarg, "flush") == 0) {
|
||||
io_type = ZIO_TYPE_FLUSH;
|
||||
} else if (strcasecmp(optarg, "all") == 0) {
|
||||
io_type = ZIO_TYPES;
|
||||
} else {
|
||||
(void) fprintf(stderr, "invalid I/O type "
|
||||
"'%s': must be 'read', 'write', 'free', "
|
||||
"'claim', 'flush' or 'all'\n", optarg);
|
||||
@ -1213,7 +1180,7 @@ main(int argc, char **argv)
|
||||
}
|
||||
|
||||
if (error == EILSEQ &&
|
||||
(record.zi_freq == 0 || io_type != ZINJECT_IOTYPE_READ)) {
|
||||
(record.zi_freq == 0 || io_type != ZIO_TYPE_READ)) {
|
||||
(void) fprintf(stderr, "device corrupt errors require "
|
||||
"io type read and a frequency value\n");
|
||||
libzfs_fini(g_zfs);
|
||||
@ -1228,9 +1195,9 @@ main(int argc, char **argv)
|
||||
|
||||
if (record.zi_nlanes) {
|
||||
switch (io_type) {
|
||||
case ZINJECT_IOTYPE_READ:
|
||||
case ZINJECT_IOTYPE_WRITE:
|
||||
case ZINJECT_IOTYPE_ALL:
|
||||
case ZIO_TYPE_READ:
|
||||
case ZIO_TYPE_WRITE:
|
||||
case ZIO_TYPES:
|
||||
break;
|
||||
default:
|
||||
(void) fprintf(stderr, "I/O type for a delay "
|
||||
|
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
|
@ -148,7 +148,6 @@ dist_zpoolcompat_DATA = \
|
||||
%D%/compatibility.d/openzfs-2.1-linux \
|
||||
%D%/compatibility.d/openzfs-2.2 \
|
||||
%D%/compatibility.d/openzfs-2.3 \
|
||||
%D%/compatibility.d/openzfs-2.4 \
|
||||
%D%/compatibility.d/openzfsonosx-1.7.0 \
|
||||
%D%/compatibility.d/openzfsonosx-1.8.1 \
|
||||
%D%/compatibility.d/openzfsonosx-1.9.3 \
|
||||
@ -188,9 +187,7 @@ zpoolcompatlinks = \
|
||||
"openzfs-2.2 openzfs-2.2-linux" \
|
||||
"openzfs-2.2 openzfs-2.2-freebsd" \
|
||||
"openzfs-2.3 openzfs-2.3-linux" \
|
||||
"openzfs-2.3 openzfs-2.3-freebsd" \
|
||||
"openzfs-2.4 openzfs-2.4-linux" \
|
||||
"openzfs-2.4 openzfs-2.4-freebsd"
|
||||
"openzfs-2.3 openzfs-2.3-freebsd"
|
||||
|
||||
zpoolconfdir = $(sysconfdir)/zfs/zpool.d
|
||||
INSTALL_DATA_HOOKS += zpool-install-data-hook
|
||||
|
@ -1,48 +0,0 @@
|
||||
# Features supported by OpenZFS 2.4 on Linux and FreeBSD
|
||||
allocation_classes
|
||||
async_destroy
|
||||
blake3
|
||||
block_cloning
|
||||
block_cloning_endian
|
||||
bookmark_v2
|
||||
bookmark_written
|
||||
bookmarks
|
||||
device_rebuild
|
||||
device_removal
|
||||
draid
|
||||
dynamic_gang_header
|
||||
edonr
|
||||
embedded_data
|
||||
empty_bpobj
|
||||
enabled_txg
|
||||
encryption
|
||||
extensible_dataset
|
||||
fast_dedup
|
||||
filesystem_limits
|
||||
head_errlog
|
||||
hole_birth
|
||||
large_blocks
|
||||
large_dnode
|
||||
large_microzap
|
||||
livelist
|
||||
log_spacemap
|
||||
longname
|
||||
lz4_compress
|
||||
multi_vdev_crash_dump
|
||||
obsolete_counts
|
||||
physical_rewrite
|
||||
project_quota
|
||||
raidz_expansion
|
||||
redacted_datasets
|
||||
redaction_bookmarks
|
||||
redaction_list_spill
|
||||
resilver_defer
|
||||
sha512
|
||||
skein
|
||||
spacemap_histogram
|
||||
spacemap_v2
|
||||
userobj_accounting
|
||||
vdev_zaps_v2
|
||||
zilsaxattr
|
||||
zpool_checkpoint
|
||||
zstd_compress
|
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
|
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
@ -88,8 +87,7 @@
|
||||
|
||||
typedef struct vdev_disk_db_entry
|
||||
{
|
||||
/* 24 byte name + 1 byte NULL terminator to make GCC happy */
|
||||
char id[25];
|
||||
char id[24];
|
||||
int sector_size;
|
||||
} vdev_disk_db_entry_t;
|
||||
|
||||
|
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
@ -26,7 +25,6 @@
|
||||
|
||||
/*
|
||||
* Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com>.
|
||||
* Copyright (c) 2025, Klara, Inc.
|
||||
*/
|
||||
|
||||
#include <libintl.h>
|
||||
@ -53,7 +51,7 @@
|
||||
typedef struct zpool_node {
|
||||
zpool_handle_t *zn_handle;
|
||||
uu_avl_node_t zn_avlnode;
|
||||
hrtime_t zn_last_refresh;
|
||||
int zn_mark;
|
||||
} zpool_node_t;
|
||||
|
||||
struct zpool_list {
|
||||
@ -63,7 +61,6 @@ struct zpool_list {
|
||||
uu_avl_pool_t *zl_pool;
|
||||
zprop_list_t **zl_proplist;
|
||||
zfs_type_t zl_type;
|
||||
hrtime_t zl_last_refresh;
|
||||
};
|
||||
|
||||
static int
|
||||
@ -83,47 +80,32 @@ zpool_compare(const void *larg, const void *rarg, void *unused)
|
||||
* of known pools.
|
||||
*/
|
||||
static int
|
||||
add_pool(zpool_handle_t *zhp, zpool_list_t *zlp)
|
||||
add_pool(zpool_handle_t *zhp, void *data)
|
||||
{
|
||||
zpool_node_t *node, *new = safe_malloc(sizeof (zpool_node_t));
|
||||
zpool_list_t *zlp = data;
|
||||
zpool_node_t *node = safe_malloc(sizeof (zpool_node_t));
|
||||
uu_avl_index_t idx;
|
||||
|
||||
new->zn_handle = zhp;
|
||||
uu_avl_node_init(new, &new->zn_avlnode, zlp->zl_pool);
|
||||
|
||||
node = uu_avl_find(zlp->zl_avl, new, NULL, &idx);
|
||||
if (node == NULL) {
|
||||
node->zn_handle = zhp;
|
||||
uu_avl_node_init(node, &node->zn_avlnode, zlp->zl_pool);
|
||||
if (uu_avl_find(zlp->zl_avl, node, NULL, &idx) == NULL) {
|
||||
if (zlp->zl_proplist &&
|
||||
zpool_expand_proplist(zhp, zlp->zl_proplist,
|
||||
zlp->zl_type, zlp->zl_literal) != 0) {
|
||||
zpool_close(zhp);
|
||||
free(new);
|
||||
free(node);
|
||||
return (-1);
|
||||
}
|
||||
new->zn_last_refresh = zlp->zl_last_refresh;
|
||||
uu_avl_insert(zlp->zl_avl, new, idx);
|
||||
uu_avl_insert(zlp->zl_avl, node, idx);
|
||||
} else {
|
||||
node->zn_last_refresh = zlp->zl_last_refresh;
|
||||
zpool_close(zhp);
|
||||
free(new);
|
||||
free(node);
|
||||
return (-1);
|
||||
}
|
||||
|
||||
return (0);
|
||||
}
|
||||
|
||||
/*
|
||||
* add_pool(), but always returns 0. This allows zpool_iter() to continue
|
||||
* even if a pool exists in the tree, or we fail to get the properties for
|
||||
* a new one.
|
||||
*/
|
||||
static int
|
||||
add_pool_cb(zpool_handle_t *zhp, void *data)
|
||||
{
|
||||
(void) add_pool(zhp, data);
|
||||
return (0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Create a list of pools based on the given arguments. If we're given no
|
||||
* arguments, then iterate over all pools in the system and add them to the AVL
|
||||
@ -152,10 +134,9 @@ pool_list_get(int argc, char **argv, zprop_list_t **proplist, zfs_type_t type,
|
||||
zlp->zl_type = type;
|
||||
|
||||
zlp->zl_literal = literal;
|
||||
zlp->zl_last_refresh = gethrtime();
|
||||
|
||||
if (argc == 0) {
|
||||
(void) zpool_iter(g_zfs, add_pool_cb, zlp);
|
||||
(void) zpool_iter(g_zfs, add_pool, zlp);
|
||||
zlp->zl_findall = B_TRUE;
|
||||
} else {
|
||||
int i;
|
||||
@ -177,69 +158,15 @@ pool_list_get(int argc, char **argv, zprop_list_t **proplist, zfs_type_t type,
|
||||
}
|
||||
|
||||
/*
|
||||
* Refresh the state of all pools on the list. Additionally, if no options were
|
||||
* given on the command line, add any new pools and remove any that are no
|
||||
* longer available.
|
||||
* Search for any new pools, adding them to the list. We only add pools when no
|
||||
* options were given on the command line. Otherwise, we keep the list fixed as
|
||||
* those that were explicitly specified.
|
||||
*/
|
||||
int
|
||||
pool_list_refresh(zpool_list_t *zlp)
|
||||
void
|
||||
pool_list_update(zpool_list_t *zlp)
|
||||
{
|
||||
zlp->zl_last_refresh = gethrtime();
|
||||
|
||||
if (!zlp->zl_findall) {
|
||||
/*
|
||||
* This list is a fixed list of pools, so we must not add
|
||||
* or remove any. Just walk over them and refresh their
|
||||
* state.
|
||||
*/
|
||||
int navail = 0;
|
||||
for (zpool_node_t *node = uu_avl_first(zlp->zl_avl);
|
||||
node != NULL; node = uu_avl_next(zlp->zl_avl, node)) {
|
||||
boolean_t missing;
|
||||
zpool_refresh_stats(node->zn_handle, &missing);
|
||||
navail += !missing;
|
||||
node->zn_last_refresh = zlp->zl_last_refresh;
|
||||
}
|
||||
return (navail);
|
||||
}
|
||||
|
||||
/*
|
||||
* Search for any new pools and add them to the list. zpool_iter()
|
||||
* will call zpool_refresh_stats() as part of its work, so this has
|
||||
* the side effect of updating all active handles.
|
||||
*/
|
||||
(void) zpool_iter(g_zfs, add_pool_cb, zlp);
|
||||
|
||||
/*
|
||||
* Walk the list for any that weren't refreshed, and update and remove
|
||||
* them. It's not enough to just skip available ones, as zpool_iter()
|
||||
* won't update them, so they'll still appear active in our list.
|
||||
*/
|
||||
zpool_node_t *node, *next;
|
||||
for (node = uu_avl_first(zlp->zl_avl); node != NULL; node = next) {
|
||||
next = uu_avl_next(zlp->zl_avl, node);
|
||||
|
||||
/*
|
||||
* Skip any that were refreshed and are online; they're already
|
||||
* handled.
|
||||
*/
|
||||
if (node->zn_last_refresh == zlp->zl_last_refresh &&
|
||||
zpool_get_state(node->zn_handle) != POOL_STATE_UNAVAIL)
|
||||
continue;
|
||||
|
||||
/* Do the refresh ourselves, just in case. */
|
||||
boolean_t missing;
|
||||
zpool_refresh_stats(node->zn_handle, &missing);
|
||||
if (missing) {
|
||||
uu_avl_remove(zlp->zl_avl, node);
|
||||
zpool_close(node->zn_handle);
|
||||
free(node);
|
||||
} else {
|
||||
node->zn_last_refresh = zlp->zl_last_refresh;
|
||||
}
|
||||
}
|
||||
|
||||
return (uu_avl_numnodes(zlp->zl_avl));
|
||||
if (zlp->zl_findall)
|
||||
(void) zpool_iter(g_zfs, add_pool, zlp);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -262,6 +189,23 @@ pool_list_iter(zpool_list_t *zlp, int unavail, zpool_iter_f func,
|
||||
return (ret);
|
||||
}
|
||||
|
||||
/*
|
||||
* Remove the given pool from the list. When running iostat, we want to remove
|
||||
* those pools that no longer exist.
|
||||
*/
|
||||
void
|
||||
pool_list_remove(zpool_list_t *zlp, zpool_handle_t *zhp)
|
||||
{
|
||||
zpool_node_t search, *node;
|
||||
|
||||
search.zn_handle = zhp;
|
||||
if ((node = uu_avl_find(zlp->zl_avl, &search, NULL, NULL)) != NULL) {
|
||||
uu_avl_remove(zlp->zl_avl, node);
|
||||
zpool_close(node->zn_handle);
|
||||
free(node);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Free all the handles associated with this list.
|
||||
*/
|
||||
@ -434,8 +378,8 @@ process_unique_cmd_columns(vdev_cmd_data_list_t *vcdl)
|
||||
static int
|
||||
vdev_process_cmd_output(vdev_cmd_data_t *data, char *line)
|
||||
{
|
||||
char *col;
|
||||
char *val;
|
||||
char *col = NULL;
|
||||
char *val = line;
|
||||
char *equals;
|
||||
char **tmp;
|
||||
|
||||
@ -452,7 +396,6 @@ vdev_process_cmd_output(vdev_cmd_data_t *data, char *line)
|
||||
col = line;
|
||||
val = equals + 1;
|
||||
} else {
|
||||
col = NULL;
|
||||
val = line;
|
||||
}
|
||||
|
||||
|
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
@ -33,8 +32,8 @@
|
||||
* Copyright (c) 2017, Intel Corporation.
|
||||
* Copyright (c) 2019, loli10K <ezomori.nozomu@gmail.com>
|
||||
* Copyright (c) 2021, Colm Buckley <colm@tuatha.org>
|
||||
* Copyright (c) 2021, 2023, 2025, Klara, Inc.
|
||||
* Copyright (c) 2021, 2025 Hewlett Packard Enterprise Development LP.
|
||||
* Copyright (c) 2021, 2023, Klara Inc.
|
||||
* Copyright [2021] Hewlett Packard Enterprise Development LP
|
||||
*/
|
||||
|
||||
#include <assert.h>
|
||||
@ -456,7 +455,7 @@ get_usage(zpool_help_t idx)
|
||||
"<pool> <vdev> ...\n"));
|
||||
case HELP_ATTACH:
|
||||
return (gettext("\tattach [-fsw] [-o property=value] "
|
||||
"<pool> <vdev> <new-device>\n"));
|
||||
"<pool> <device> <new-device>\n"));
|
||||
case HELP_CLEAR:
|
||||
return (gettext("\tclear [[--power]|[-nF]] <pool> [device]\n"));
|
||||
case HELP_CREATE:
|
||||
@ -510,22 +509,22 @@ get_usage(zpool_help_t idx)
|
||||
case HELP_REOPEN:
|
||||
return (gettext("\treopen [-n] <pool>\n"));
|
||||
case HELP_INITIALIZE:
|
||||
return (gettext("\tinitialize [-c | -s | -u] [-w] <-a | <pool> "
|
||||
"[<device> ...]>\n"));
|
||||
return (gettext("\tinitialize [-c | -s | -u] [-w] <pool> "
|
||||
"[<device> ...]\n"));
|
||||
case HELP_SCRUB:
|
||||
return (gettext("\tscrub [-e | -s | -p | -C | -E | -S] [-w] "
|
||||
"<-a | <pool> [<pool> ...]>\n"));
|
||||
return (gettext("\tscrub [-e | -s | -p | -C] [-w] "
|
||||
"<pool> ...\n"));
|
||||
case HELP_RESILVER:
|
||||
return (gettext("\tresilver <pool> ...\n"));
|
||||
case HELP_TRIM:
|
||||
return (gettext("\ttrim [-dw] [-r <rate>] [-c | -s] "
|
||||
"<-a | <pool> [<device> ...]>\n"));
|
||||
return (gettext("\ttrim [-dw] [-r <rate>] [-c | -s] <pool> "
|
||||
"[<device> ...]\n"));
|
||||
case HELP_STATUS:
|
||||
return (gettext("\tstatus [-DdegiLPpstvx] "
|
||||
"[-c script1[,script2,...]] ...\n"
|
||||
"\t [-j|--json [--json-flat-vdevs] [--json-int] "
|
||||
"[--json-pool-key-guid]] ...\n"
|
||||
"\t [-T d|u] [--power] [pool] [interval [count]]\n"));
|
||||
return (gettext("\tstatus [--power] [-j [--json-int, "
|
||||
"--json-flat-vdevs, ...\n"
|
||||
"\t --json-pool-key-guid]] [-c [script1,script2,...]] "
|
||||
"[-dDegiLpPstvx] ...\n"
|
||||
"\t [-T d|u] [pool] [interval [count]]\n"));
|
||||
case HELP_UPGRADE:
|
||||
return (gettext("\tupgrade\n"
|
||||
"\tupgrade -v\n"
|
||||
@ -560,6 +559,33 @@ get_usage(zpool_help_t idx)
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
zpool_collect_leaves(zpool_handle_t *zhp, nvlist_t *nvroot, nvlist_t *res)
|
||||
{
|
||||
uint_t children = 0;
|
||||
nvlist_t **child;
|
||||
uint_t i;
|
||||
|
||||
(void) nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
|
||||
&child, &children);
|
||||
|
||||
if (children == 0) {
|
||||
char *path = zpool_vdev_name(g_zfs, zhp, nvroot,
|
||||
VDEV_NAME_PATH);
|
||||
|
||||
if (strcmp(path, VDEV_TYPE_INDIRECT) != 0 &&
|
||||
strcmp(path, VDEV_TYPE_HOLE) != 0)
|
||||
fnvlist_add_boolean(res, path);
|
||||
|
||||
free(path);
|
||||
return;
|
||||
}
|
||||
|
||||
for (i = 0; i < children; i++) {
|
||||
zpool_collect_leaves(zhp, child[i], res);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Callback routine that will print out a pool property value.
|
||||
*/
|
||||
@ -752,11 +778,10 @@ usage(boolean_t requested)
|
||||
}
|
||||
|
||||
/*
|
||||
* zpool initialize [-c | -s | -u] [-w] <-a | pool> [<vdev> ...]
|
||||
* zpool initialize [-c | -s | -u] [-w] <pool> [<vdev> ...]
|
||||
* Initialize all unused blocks in the specified vdevs, or all vdevs in the pool
|
||||
* if none specified.
|
||||
*
|
||||
* -a Use all pools.
|
||||
* -c Cancel. Ends active initializing.
|
||||
* -s Suspend. Initializing can then be restarted with no flags.
|
||||
* -u Uninitialize. Clears initialization state.
|
||||
@ -768,26 +793,22 @@ zpool_do_initialize(int argc, char **argv)
|
||||
int c;
|
||||
char *poolname;
|
||||
zpool_handle_t *zhp;
|
||||
nvlist_t *vdevs;
|
||||
int err = 0;
|
||||
boolean_t wait = B_FALSE;
|
||||
boolean_t initialize_all = B_FALSE;
|
||||
|
||||
struct option long_options[] = {
|
||||
{"cancel", no_argument, NULL, 'c'},
|
||||
{"suspend", no_argument, NULL, 's'},
|
||||
{"uninit", no_argument, NULL, 'u'},
|
||||
{"wait", no_argument, NULL, 'w'},
|
||||
{"all", no_argument, NULL, 'a'},
|
||||
{0, 0, 0, 0}
|
||||
};
|
||||
|
||||
pool_initialize_func_t cmd_type = POOL_INITIALIZE_START;
|
||||
while ((c = getopt_long(argc, argv, "acsuw", long_options,
|
||||
while ((c = getopt_long(argc, argv, "csuw", long_options,
|
||||
NULL)) != -1) {
|
||||
switch (c) {
|
||||
case 'a':
|
||||
initialize_all = B_TRUE;
|
||||
break;
|
||||
case 'c':
|
||||
if (cmd_type != POOL_INITIALIZE_START &&
|
||||
cmd_type != POOL_INITIALIZE_CANCEL) {
|
||||
@ -834,18 +855,7 @@ zpool_do_initialize(int argc, char **argv)
|
||||
argc -= optind;
|
||||
argv += optind;
|
||||
|
||||
initialize_cbdata_t cbdata = {
|
||||
.wait = wait,
|
||||
.cmd_type = cmd_type
|
||||
};
|
||||
|
||||
if (initialize_all && argc > 0) {
|
||||
(void) fprintf(stderr, gettext("-a cannot be combined with "
|
||||
"individual pools or vdevs\n"));
|
||||
usage(B_FALSE);
|
||||
}
|
||||
|
||||
if (argc < 1 && !initialize_all) {
|
||||
if (argc < 1) {
|
||||
(void) fprintf(stderr, gettext("missing pool name argument\n"));
|
||||
usage(B_FALSE);
|
||||
return (-1);
|
||||
@ -857,35 +867,30 @@ zpool_do_initialize(int argc, char **argv)
|
||||
usage(B_FALSE);
|
||||
}
|
||||
|
||||
if (argc == 0 && initialize_all) {
|
||||
/* Initilize each pool recursively */
|
||||
err = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
|
||||
B_FALSE, zpool_initialize_one, &cbdata);
|
||||
return (err);
|
||||
} else if (argc == 1) {
|
||||
/* no individual leaf vdevs specified, initialize the pool */
|
||||
poolname = argv[0];
|
||||
zhp = zpool_open(g_zfs, poolname);
|
||||
if (zhp == NULL)
|
||||
return (-1);
|
||||
err = zpool_initialize_one(zhp, &cbdata);
|
||||
poolname = argv[0];
|
||||
zhp = zpool_open(g_zfs, poolname);
|
||||
if (zhp == NULL)
|
||||
return (-1);
|
||||
|
||||
vdevs = fnvlist_alloc();
|
||||
if (argc == 1) {
|
||||
/* no individual leaf vdevs specified, so add them all */
|
||||
nvlist_t *config = zpool_get_config(zhp, NULL);
|
||||
nvlist_t *nvroot = fnvlist_lookup_nvlist(config,
|
||||
ZPOOL_CONFIG_VDEV_TREE);
|
||||
zpool_collect_leaves(zhp, nvroot, vdevs);
|
||||
} else {
|
||||
/* individual leaf vdevs specified, initialize them */
|
||||
poolname = argv[0];
|
||||
zhp = zpool_open(g_zfs, poolname);
|
||||
if (zhp == NULL)
|
||||
return (-1);
|
||||
nvlist_t *vdevs = fnvlist_alloc();
|
||||
for (int i = 1; i < argc; i++) {
|
||||
fnvlist_add_boolean(vdevs, argv[i]);
|
||||
}
|
||||
if (wait)
|
||||
err = zpool_initialize_wait(zhp, cmd_type, vdevs);
|
||||
else
|
||||
err = zpool_initialize(zhp, cmd_type, vdevs);
|
||||
fnvlist_free(vdevs);
|
||||
}
|
||||
|
||||
if (wait)
|
||||
err = zpool_initialize_wait(zhp, cmd_type, vdevs);
|
||||
else
|
||||
err = zpool_initialize(zhp, cmd_type, vdevs);
|
||||
|
||||
fnvlist_free(vdevs);
|
||||
zpool_close(zhp);
|
||||
|
||||
return (err);
|
||||
@ -1782,7 +1787,7 @@ zpool_do_labelclear(int argc, char **argv)
|
||||
{
|
||||
char vdev[MAXPATHLEN];
|
||||
char *name = NULL;
|
||||
int c, fd, ret = 0;
|
||||
int c, fd = -1, ret = 0;
|
||||
nvlist_t *config;
|
||||
pool_state_t state;
|
||||
boolean_t inuse = B_FALSE;
|
||||
@ -5761,6 +5766,24 @@ children:
|
||||
return (ret);
|
||||
}
|
||||
|
||||
static int
|
||||
refresh_iostat(zpool_handle_t *zhp, void *data)
|
||||
{
|
||||
iostat_cbdata_t *cb = data;
|
||||
boolean_t missing;
|
||||
|
||||
/*
|
||||
* If the pool has disappeared, remove it from the list and continue.
|
||||
*/
|
||||
if (zpool_refresh_stats(zhp, &missing) != 0)
|
||||
return (-1);
|
||||
|
||||
if (missing)
|
||||
pool_list_remove(cb->cb_list, zhp);
|
||||
|
||||
return (0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Callback to print out the iostats for the given pool.
|
||||
*/
|
||||
@ -6133,6 +6156,7 @@ static void
|
||||
get_interval_count_filter_guids(int *argc, char **argv, float *interval,
|
||||
unsigned long *count, iostat_cbdata_t *cb)
|
||||
{
|
||||
char **tmpargv = argv;
|
||||
int argc_for_interval = 0;
|
||||
|
||||
/* Is the last arg an interval value? Or a guid? */
|
||||
@ -6156,7 +6180,7 @@ get_interval_count_filter_guids(int *argc, char **argv, float *interval,
|
||||
}
|
||||
|
||||
/* Point to our list of possible intervals */
|
||||
char **tmpargv = &argv[*argc - argc_for_interval];
|
||||
tmpargv = &argv[*argc - argc_for_interval];
|
||||
|
||||
*argc = *argc - argc_for_interval;
|
||||
get_interval_count(&argc_for_interval, tmpargv,
|
||||
@ -6341,16 +6365,18 @@ get_namewidth_iostat(zpool_handle_t *zhp, void *data)
|
||||
* This command can be tricky because we want to be able to deal with pool
|
||||
* creation/destruction as well as vdev configuration changes. The bulk of this
|
||||
* processing is handled by the pool_list_* routines in zpool_iter.c. We rely
|
||||
* on pool_list_refresh() to detect the addition and removal of pools.
|
||||
* Configuration changes are all handled within libzfs.
|
||||
* on pool_list_update() to detect the addition of new pools. Configuration
|
||||
* changes are all handled within libzfs.
|
||||
*/
|
||||
int
|
||||
zpool_do_iostat(int argc, char **argv)
|
||||
{
|
||||
int c;
|
||||
int ret;
|
||||
int npools;
|
||||
float interval = 0;
|
||||
unsigned long count = 0;
|
||||
int winheight = 24;
|
||||
zpool_list_t *list;
|
||||
boolean_t verbose = B_FALSE;
|
||||
boolean_t latency = B_FALSE, l_histo = B_FALSE, rq_histo = B_FALSE;
|
||||
@ -6599,24 +6625,10 @@ zpool_do_iostat(int argc, char **argv)
|
||||
return (1);
|
||||
}
|
||||
|
||||
int last_npools = 0;
|
||||
for (;;) {
|
||||
/*
|
||||
* Refresh all pools in list, adding or removing pools as
|
||||
* necessary.
|
||||
*/
|
||||
int npools = pool_list_refresh(list);
|
||||
if (npools == 0) {
|
||||
if ((npools = pool_list_count(list)) == 0)
|
||||
(void) fprintf(stderr, gettext("no pools available\n"));
|
||||
} else {
|
||||
/*
|
||||
* If the list of pools has changed since last time
|
||||
* around, reset the iteration count to force the
|
||||
* header to be redisplayed.
|
||||
*/
|
||||
if (last_npools != npools)
|
||||
cb.cb_iteration = 0;
|
||||
|
||||
else {
|
||||
/*
|
||||
* If this is the first iteration and -y was supplied
|
||||
* we skip any printing.
|
||||
@ -6624,6 +6636,15 @@ zpool_do_iostat(int argc, char **argv)
|
||||
boolean_t skip = (omit_since_boot &&
|
||||
cb.cb_iteration == 0);
|
||||
|
||||
/*
|
||||
* Refresh all statistics. This is done as an
|
||||
* explicit step before calculating the maximum name
|
||||
* width, so that any * configuration changes are
|
||||
* properly accounted for.
|
||||
*/
|
||||
(void) pool_list_iter(list, B_FALSE, refresh_iostat,
|
||||
&cb);
|
||||
|
||||
/*
|
||||
* Iterate over all pools to determine the maximum width
|
||||
* for the pool / device name column across all pools.
|
||||
@ -6651,7 +6672,7 @@ zpool_do_iostat(int argc, char **argv)
|
||||
* even when terminal window has its height
|
||||
* changed.
|
||||
*/
|
||||
int winheight = terminal_height();
|
||||
winheight = terminal_height();
|
||||
/*
|
||||
* Are we connected to TTY? If not, headers_once
|
||||
* should be true, to avoid breaking scripts.
|
||||
@ -6714,8 +6735,6 @@ zpool_do_iostat(int argc, char **argv)
|
||||
|
||||
(void) fflush(stdout);
|
||||
(void) fsleep(interval);
|
||||
|
||||
last_npools = npools;
|
||||
}
|
||||
|
||||
pool_list_free(list);
|
||||
@ -7632,7 +7651,7 @@ zpool_do_replace(int argc, char **argv)
|
||||
}
|
||||
|
||||
/*
|
||||
* zpool attach [-fsw] [-o property=value] <pool> <vdev> <new_device>
|
||||
* zpool attach [-fsw] [-o property=value] <pool> <device>|<vdev> <new_device>
|
||||
*
|
||||
* -f Force attach, even if <new_device> appears to be in use.
|
||||
* -s Use sequential instead of healing reconstruction for resilver.
|
||||
@ -7640,9 +7659,9 @@ zpool_do_replace(int argc, char **argv)
|
||||
* -w Wait for resilvering (mirror) or expansion (raidz) to complete
|
||||
* before returning.
|
||||
*
|
||||
* Attach <new_device> to a <vdev>, where the vdev can be of type
|
||||
* device, mirror or raidz. If <vdev> is not part of a mirror, then <vdev> will
|
||||
* be transformed into a mirror of <vdev> and <new_device>. When a mirror
|
||||
* Attach <new_device> to a <device> or <vdev>, where the vdev can be of type
|
||||
* mirror or raidz. If <device> is not part of a mirror, then <device> will
|
||||
* be transformed into a mirror of <device> and <new_device>. When a mirror
|
||||
* is involved, <new_device> will begin life with a DTL of [0, now], and will
|
||||
* immediately begin to resilver itself. For the raidz case, a expansion will
|
||||
* commence and reflow the raidz data across all the disks including the
|
||||
@ -8348,8 +8367,6 @@ zpool_do_reopen(int argc, char **argv)
|
||||
typedef struct scrub_cbdata {
|
||||
int cb_type;
|
||||
pool_scrub_cmd_t cb_scrub_cmd;
|
||||
time_t cb_date_start;
|
||||
time_t cb_date_end;
|
||||
} scrub_cbdata_t;
|
||||
|
||||
static boolean_t
|
||||
@ -8393,8 +8410,8 @@ scrub_callback(zpool_handle_t *zhp, void *data)
|
||||
return (1);
|
||||
}
|
||||
|
||||
err = zpool_scan_range(zhp, cb->cb_type, cb->cb_scrub_cmd,
|
||||
cb->cb_date_start, cb->cb_date_end);
|
||||
err = zpool_scan(zhp, cb->cb_type, cb->cb_scrub_cmd);
|
||||
|
||||
if (err == 0 && zpool_has_checkpoint(zhp) &&
|
||||
cb->cb_type == POOL_SCAN_SCRUB) {
|
||||
(void) printf(gettext("warning: will not scrub state that "
|
||||
@ -8412,35 +8429,10 @@ wait_callback(zpool_handle_t *zhp, void *data)
|
||||
return (zpool_wait(zhp, *act));
|
||||
}
|
||||
|
||||
static time_t
|
||||
date_string_to_sec(const char *timestr, boolean_t rounding)
|
||||
{
|
||||
struct tm tm = {0};
|
||||
int adjustment = rounding ? 1 : 0;
|
||||
|
||||
/* Allow mktime to determine timezone. */
|
||||
tm.tm_isdst = -1;
|
||||
|
||||
if (strptime(timestr, "%Y-%m-%d %H:%M", &tm) == NULL) {
|
||||
if (strptime(timestr, "%Y-%m-%d", &tm) == NULL) {
|
||||
fprintf(stderr, gettext("Failed to parse the date.\n"));
|
||||
usage(B_FALSE);
|
||||
}
|
||||
adjustment *= 24 * 60 * 60;
|
||||
} else {
|
||||
adjustment *= 60;
|
||||
}
|
||||
|
||||
return (mktime(&tm) + adjustment);
|
||||
}
|
||||
|
||||
/*
|
||||
* zpool scrub [-e | -s | -p | -C | -E | -S] [-w] [-a | <pool> ...]
|
||||
* zpool scrub [-e | -s | -p | -C] [-w] <pool> ...
|
||||
*
|
||||
* -a Scrub all pools.
|
||||
* -e Only scrub blocks in the error log.
|
||||
* -E End date of scrub.
|
||||
* -S Start date of scrub.
|
||||
* -s Stop. Stops any in-progress scrub.
|
||||
* -p Pause. Pause in-progress scrub.
|
||||
* -w Wait. Blocks until scrub has completed.
|
||||
@ -8456,36 +8448,21 @@ zpool_do_scrub(int argc, char **argv)
|
||||
|
||||
cb.cb_type = POOL_SCAN_SCRUB;
|
||||
cb.cb_scrub_cmd = POOL_SCRUB_NORMAL;
|
||||
cb.cb_date_start = cb.cb_date_end = 0;
|
||||
|
||||
boolean_t is_error_scrub = B_FALSE;
|
||||
boolean_t is_pause = B_FALSE;
|
||||
boolean_t is_stop = B_FALSE;
|
||||
boolean_t is_txg_continue = B_FALSE;
|
||||
boolean_t scrub_all = B_FALSE;
|
||||
|
||||
/* check options */
|
||||
while ((c = getopt(argc, argv, "aspweCE:S:")) != -1) {
|
||||
while ((c = getopt(argc, argv, "spweC")) != -1) {
|
||||
switch (c) {
|
||||
case 'a':
|
||||
scrub_all = B_TRUE;
|
||||
break;
|
||||
case 'e':
|
||||
is_error_scrub = B_TRUE;
|
||||
break;
|
||||
case 'E':
|
||||
/*
|
||||
* Round the date. It's better to scrub more data than
|
||||
* less. This also makes the date inclusive.
|
||||
*/
|
||||
cb.cb_date_end = date_string_to_sec(optarg, B_TRUE);
|
||||
break;
|
||||
case 's':
|
||||
is_stop = B_TRUE;
|
||||
break;
|
||||
case 'S':
|
||||
cb.cb_date_start = date_string_to_sec(optarg, B_FALSE);
|
||||
break;
|
||||
case 'p':
|
||||
is_pause = B_TRUE;
|
||||
break;
|
||||
@ -8504,19 +8481,19 @@ zpool_do_scrub(int argc, char **argv)
|
||||
|
||||
if (is_pause && is_stop) {
|
||||
(void) fprintf(stderr, gettext("invalid option "
|
||||
"combination: -s and -p are mutually exclusive\n"));
|
||||
"combination :-s and -p are mutually exclusive\n"));
|
||||
usage(B_FALSE);
|
||||
} else if (is_pause && is_txg_continue) {
|
||||
(void) fprintf(stderr, gettext("invalid option "
|
||||
"combination: -p and -C are mutually exclusive\n"));
|
||||
"combination :-p and -C are mutually exclusive\n"));
|
||||
usage(B_FALSE);
|
||||
} else if (is_stop && is_txg_continue) {
|
||||
(void) fprintf(stderr, gettext("invalid option "
|
||||
"combination: -s and -C are mutually exclusive\n"));
|
||||
"combination :-s and -C are mutually exclusive\n"));
|
||||
usage(B_FALSE);
|
||||
} else if (is_error_scrub && is_txg_continue) {
|
||||
(void) fprintf(stderr, gettext("invalid option "
|
||||
"combination: -e and -C are mutually exclusive\n"));
|
||||
"combination :-e and -C are mutually exclusive\n"));
|
||||
usage(B_FALSE);
|
||||
} else {
|
||||
if (is_error_scrub)
|
||||
@ -8533,19 +8510,6 @@ zpool_do_scrub(int argc, char **argv)
|
||||
}
|
||||
}
|
||||
|
||||
if ((cb.cb_date_start != 0 || cb.cb_date_end != 0) &&
|
||||
cb.cb_scrub_cmd != POOL_SCRUB_NORMAL) {
|
||||
(void) fprintf(stderr, gettext("invalid option combination: "
|
||||
"start/end date is available only with normal scrub\n"));
|
||||
usage(B_FALSE);
|
||||
}
|
||||
if (cb.cb_date_start != 0 && cb.cb_date_end != 0 &&
|
||||
cb.cb_date_start > cb.cb_date_end) {
|
||||
(void) fprintf(stderr, gettext("invalid arguments: "
|
||||
"end date has to be later than start date\n"));
|
||||
usage(B_FALSE);
|
||||
}
|
||||
|
||||
if (wait && (cb.cb_type == POOL_SCAN_NONE ||
|
||||
cb.cb_scrub_cmd == POOL_SCRUB_PAUSE)) {
|
||||
(void) fprintf(stderr, gettext("invalid option combination: "
|
||||
@ -8556,7 +8520,7 @@ zpool_do_scrub(int argc, char **argv)
|
||||
argc -= optind;
|
||||
argv += optind;
|
||||
|
||||
if (argc < 1 && !scrub_all) {
|
||||
if (argc < 1) {
|
||||
(void) fprintf(stderr, gettext("missing pool name argument\n"));
|
||||
usage(B_FALSE);
|
||||
}
|
||||
@ -8586,7 +8550,6 @@ zpool_do_resilver(int argc, char **argv)
|
||||
|
||||
cb.cb_type = POOL_SCAN_RESILVER;
|
||||
cb.cb_scrub_cmd = POOL_SCRUB_NORMAL;
|
||||
cb.cb_date_start = cb.cb_date_end = 0;
|
||||
|
||||
/* check options */
|
||||
while ((c = getopt(argc, argv, "")) != -1) {
|
||||
@ -8611,9 +8574,8 @@ zpool_do_resilver(int argc, char **argv)
|
||||
}
|
||||
|
||||
/*
|
||||
* zpool trim [-d] [-r <rate>] [-c | -s] <-a | pool> [<device> ...]
|
||||
* zpool trim [-d] [-r <rate>] [-c | -s] <pool> [<device> ...]
|
||||
*
|
||||
* -a Trim all pools.
|
||||
* -c Cancel. Ends any in-progress trim.
|
||||
* -d Secure trim. Requires kernel and device support.
|
||||
* -r <rate> Sets the TRIM rate in bytes (per second). Supports
|
||||
@ -8630,7 +8592,6 @@ zpool_do_trim(int argc, char **argv)
|
||||
{"rate", required_argument, NULL, 'r'},
|
||||
{"suspend", no_argument, NULL, 's'},
|
||||
{"wait", no_argument, NULL, 'w'},
|
||||
{"all", no_argument, NULL, 'a'},
|
||||
{0, 0, 0, 0}
|
||||
};
|
||||
|
||||
@ -8638,16 +8599,11 @@ zpool_do_trim(int argc, char **argv)
|
||||
uint64_t rate = 0;
|
||||
boolean_t secure = B_FALSE;
|
||||
boolean_t wait = B_FALSE;
|
||||
boolean_t trimall = B_FALSE;
|
||||
int error;
|
||||
|
||||
int c;
|
||||
while ((c = getopt_long(argc, argv, "acdr:sw", long_options, NULL))
|
||||
while ((c = getopt_long(argc, argv, "cdr:sw", long_options, NULL))
|
||||
!= -1) {
|
||||
switch (c) {
|
||||
case 'a':
|
||||
trimall = B_TRUE;
|
||||
break;
|
||||
case 'c':
|
||||
if (cmd_type != POOL_TRIM_START &&
|
||||
cmd_type != POOL_TRIM_CANCEL) {
|
||||
@ -8706,18 +8662,7 @@ zpool_do_trim(int argc, char **argv)
|
||||
argc -= optind;
|
||||
argv += optind;
|
||||
|
||||
trimflags_t trim_flags = {
|
||||
.secure = secure,
|
||||
.rate = rate,
|
||||
.wait = wait,
|
||||
};
|
||||
|
||||
trim_cbdata_t cbdata = {
|
||||
.trim_flags = trim_flags,
|
||||
.cmd_type = cmd_type
|
||||
};
|
||||
|
||||
if (argc < 1 && !trimall) {
|
||||
if (argc < 1) {
|
||||
(void) fprintf(stderr, gettext("missing pool name argument\n"));
|
||||
usage(B_FALSE);
|
||||
return (-1);
|
||||
@ -8725,46 +8670,41 @@ zpool_do_trim(int argc, char **argv)
|
||||
|
||||
if (wait && (cmd_type != POOL_TRIM_START)) {
|
||||
(void) fprintf(stderr, gettext("-w cannot be used with -c or "
|
||||
"-s options\n"));
|
||||
"-s\n"));
|
||||
usage(B_FALSE);
|
||||
}
|
||||
|
||||
if (trimall && argc > 0) {
|
||||
(void) fprintf(stderr, gettext("-a cannot be combined with "
|
||||
"individual zpools or vdevs\n"));
|
||||
usage(B_FALSE);
|
||||
}
|
||||
char *poolname = argv[0];
|
||||
zpool_handle_t *zhp = zpool_open(g_zfs, poolname);
|
||||
if (zhp == NULL)
|
||||
return (-1);
|
||||
|
||||
if (argc == 0 && trimall) {
|
||||
cbdata.trim_flags.fullpool = B_TRUE;
|
||||
/* Trim each pool recursively */
|
||||
error = for_each_pool(argc, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
|
||||
B_FALSE, zpool_trim_one, &cbdata);
|
||||
} else if (argc == 1) {
|
||||
char *poolname = argv[0];
|
||||
zpool_handle_t *zhp = zpool_open(g_zfs, poolname);
|
||||
if (zhp == NULL)
|
||||
return (-1);
|
||||
trimflags_t trim_flags = {
|
||||
.secure = secure,
|
||||
.rate = rate,
|
||||
.wait = wait,
|
||||
};
|
||||
|
||||
nvlist_t *vdevs = fnvlist_alloc();
|
||||
if (argc == 1) {
|
||||
/* no individual leaf vdevs specified, so add them all */
|
||||
error = zpool_trim_one(zhp, &cbdata);
|
||||
zpool_close(zhp);
|
||||
nvlist_t *config = zpool_get_config(zhp, NULL);
|
||||
nvlist_t *nvroot = fnvlist_lookup_nvlist(config,
|
||||
ZPOOL_CONFIG_VDEV_TREE);
|
||||
zpool_collect_leaves(zhp, nvroot, vdevs);
|
||||
trim_flags.fullpool = B_TRUE;
|
||||
} else {
|
||||
char *poolname = argv[0];
|
||||
zpool_handle_t *zhp = zpool_open(g_zfs, poolname);
|
||||
if (zhp == NULL)
|
||||
return (-1);
|
||||
/* leaf vdevs specified, trim only those */
|
||||
cbdata.trim_flags.fullpool = B_FALSE;
|
||||
nvlist_t *vdevs = fnvlist_alloc();
|
||||
trim_flags.fullpool = B_FALSE;
|
||||
for (int i = 1; i < argc; i++) {
|
||||
fnvlist_add_boolean(vdevs, argv[i]);
|
||||
}
|
||||
error = zpool_trim(zhp, cbdata.cmd_type, vdevs,
|
||||
&cbdata.trim_flags);
|
||||
fnvlist_free(vdevs);
|
||||
zpool_close(zhp);
|
||||
}
|
||||
|
||||
int error = zpool_trim(zhp, cmd_type, vdevs, &trim_flags);
|
||||
|
||||
fnvlist_free(vdevs);
|
||||
zpool_close(zhp);
|
||||
|
||||
return (error);
|
||||
}
|
||||
|
||||
@ -10491,9 +10431,10 @@ print_status_reason(zpool_handle_t *zhp, status_cbdata_t *cbp,
|
||||
break;
|
||||
|
||||
case ZPOOL_STATUS_REMOVED_DEV:
|
||||
snprintf(status, ST_SIZE, gettext("One or more devices have "
|
||||
"been removed.\n\tSufficient replicas exist for the pool "
|
||||
"to continue functioning in a\n\tdegraded state.\n"));
|
||||
snprintf(status, ST_SIZE, gettext("One or more devices has "
|
||||
"been removed by the administrator.\n\tSufficient "
|
||||
"replicas exist for the pool to continue functioning in "
|
||||
"a\n\tdegraded state.\n"));
|
||||
snprintf(action, AC_SIZE, gettext("Online the device "
|
||||
"using zpool online' or replace the device with\n\t'zpool "
|
||||
"replace'.\n"));
|
||||
@ -10765,6 +10706,7 @@ status_callback_json(zpool_handle_t *zhp, void *data)
|
||||
uint_t c;
|
||||
vdev_stat_t *vs;
|
||||
nvlist_t *item, *d, *load_info, *vds;
|
||||
item = d = NULL;
|
||||
|
||||
/* If dedup stats were requested, also fetch dedupcached. */
|
||||
if (cbp->cb_dedup_stats > 1)
|
||||
@ -11037,30 +10979,28 @@ status_callback(zpool_handle_t *zhp, void *data)
|
||||
}
|
||||
|
||||
/*
|
||||
* zpool status [-dDegiLpPstvx] [-c [script1,script2,...]] ...
|
||||
* [-j|--json [--json-flat-vdevs] [--json-int] ...
|
||||
* [--json-pool-key-guid]] [--power] [-T d|u] ...
|
||||
* [pool] [interval [count]]
|
||||
* zpool status [-c [script1,script2,...]] [-dDegiLpPstvx] [--power] ...
|
||||
* [-T d|u] [pool] [interval [count]]
|
||||
*
|
||||
* -c CMD For each vdev, run command CMD
|
||||
* -D Display dedup status (undocumented)
|
||||
* -d Display Direct I/O write verify errors
|
||||
* -D Display dedup status (undocumented)
|
||||
* -e Display only unhealthy vdevs
|
||||
* -g Display guid for individual vdev name.
|
||||
* -i Display vdev initialization status.
|
||||
* -j [...] Display output in JSON format
|
||||
* --json-flat-vdevs Display vdevs in flat hierarchy
|
||||
* --json-int Display numbers in integer format instead of string
|
||||
* --json-pool-key-guid Use pool GUID as key for pool objects
|
||||
* -L Follow links when resolving vdev path name.
|
||||
* -P Display full path for vdev name.
|
||||
* -p Display values in parsable (exact) format.
|
||||
* --power Display vdev enclosure slot power status
|
||||
* -P Display full path for vdev name.
|
||||
* -s Display slow IOs column.
|
||||
* -T Display a timestamp in date(1) or Unix format
|
||||
* -t Display vdev TRIM status.
|
||||
* -T Display a timestamp in date(1) or Unix format
|
||||
* -v Display complete error logs
|
||||
* -x Display only pools with potential problems
|
||||
* -j Display output in JSON format
|
||||
* --power Display vdev enclosure slot power status
|
||||
* --json-int Display numbers in inteeger format instead of string
|
||||
* --json-flat-vdevs Display vdevs in flat hierarchy
|
||||
* --json-pool-key-guid Use pool GUID as key for pool objects
|
||||
*
|
||||
* Describes the health status of all pools or some subset.
|
||||
*/
|
||||
@ -11388,8 +11328,7 @@ upgrade_enable_all(zpool_handle_t *zhp, int *countp)
|
||||
const char *fname = spa_feature_table[i].fi_uname;
|
||||
const char *fguid = spa_feature_table[i].fi_guid;
|
||||
|
||||
if (!spa_feature_table[i].fi_zfs_mod_supported ||
|
||||
(spa_feature_table[i].fi_flags & ZFEATURE_FLAG_NO_UPGRADE))
|
||||
if (!spa_feature_table[i].fi_zfs_mod_supported)
|
||||
continue;
|
||||
|
||||
if (!nvlist_exists(enabled, fguid) && requested_features[i]) {
|
||||
@ -11544,11 +11483,7 @@ upgrade_list_disabled_cb(zpool_handle_t *zhp, void *arg)
|
||||
"Note that the pool "
|
||||
"'compatibility' feature can be "
|
||||
"used to inhibit\nfeature "
|
||||
"upgrades.\n\n"
|
||||
"Features marked with (*) are not "
|
||||
"applied automatically on upgrade, "
|
||||
"and\nmust be applied explicitly "
|
||||
"with zpool-set(7).\n\n"));
|
||||
"upgrades.\n\n"));
|
||||
(void) printf(gettext("POOL "
|
||||
"FEATURE\n"));
|
||||
(void) printf(gettext("------"
|
||||
@ -11562,9 +11497,7 @@ upgrade_list_disabled_cb(zpool_handle_t *zhp, void *arg)
|
||||
poolfirst = B_FALSE;
|
||||
}
|
||||
|
||||
(void) printf(gettext(" %s%s\n"), fname,
|
||||
spa_feature_table[i].fi_flags &
|
||||
ZFEATURE_FLAG_NO_UPGRADE ? "(*)" : "");
|
||||
(void) printf(gettext(" %s\n"), fname);
|
||||
}
|
||||
/*
|
||||
* If they did "zpool upgrade -a", then we could
|
||||
@ -12134,11 +12067,6 @@ zpool_do_events_nvprint(nvlist_t *nvl, int depth)
|
||||
zfs_valstr_zio_stage(i32, flagstr,
|
||||
sizeof (flagstr));
|
||||
printf(gettext("0x%x [%s]"), i32, flagstr);
|
||||
} else if (strcmp(name,
|
||||
FM_EREPORT_PAYLOAD_ZFS_ZIO_TYPE) == 0) {
|
||||
zfs_valstr_zio_type(i32, flagstr,
|
||||
sizeof (flagstr));
|
||||
printf(gettext("0x%x [%s]"), i32, flagstr);
|
||||
} else if (strcmp(name,
|
||||
FM_EREPORT_PAYLOAD_ZFS_ZIO_PRIORITY) == 0) {
|
||||
zfs_valstr_zio_priority(i32, flagstr,
|
||||
@ -12365,7 +12293,7 @@ zpool_do_events_next(ev_opts_t *opts)
|
||||
nvlist_free(nvl);
|
||||
}
|
||||
|
||||
VERIFY0(close(zevent_fd));
|
||||
VERIFY(0 == close(zevent_fd));
|
||||
|
||||
return (ret);
|
||||
}
|
||||
@ -12818,13 +12746,11 @@ found:
|
||||
|
||||
if (strcmp(argv[1], "root") == 0)
|
||||
vdev = strdup("root-0");
|
||||
else
|
||||
vdev = strdup(argv[1]);
|
||||
|
||||
/* ... and the rest are vdev names */
|
||||
if (vdev == NULL)
|
||||
cb.cb_vdevs.cb_names = argv + 1;
|
||||
else
|
||||
cb.cb_vdevs.cb_names = &vdev;
|
||||
|
||||
cb.cb_vdevs.cb_names = &vdev;
|
||||
cb.cb_vdevs.cb_names_count = argc - 1;
|
||||
cb.cb_type = ZFS_TYPE_VDEV;
|
||||
argc = 1; /* One pool to process */
|
||||
|
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
|
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
@ -76,10 +75,11 @@ typedef struct zpool_list zpool_list_t;
|
||||
|
||||
zpool_list_t *pool_list_get(int, char **, zprop_list_t **, zfs_type_t,
|
||||
boolean_t, int *);
|
||||
int pool_list_refresh(zpool_list_t *);
|
||||
void pool_list_update(zpool_list_t *);
|
||||
int pool_list_iter(zpool_list_t *, int unavail, zpool_iter_f, void *);
|
||||
void pool_list_free(zpool_list_t *);
|
||||
int pool_list_count(zpool_list_t *);
|
||||
void pool_list_remove(zpool_list_t *, zpool_handle_t *);
|
||||
|
||||
extern libzfs_handle_t *g_zfs;
|
||||
|
||||
|
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
@ -574,6 +573,7 @@ get_replication(nvlist_t *nvroot, boolean_t fatal)
|
||||
nvlist_t *cnv = child[c];
|
||||
const char *path;
|
||||
struct stat64 statbuf;
|
||||
int64_t size = -1LL;
|
||||
const char *childtype;
|
||||
int fd, err;
|
||||
|
||||
@ -608,29 +608,23 @@ get_replication(nvlist_t *nvroot, boolean_t fatal)
|
||||
verify(nvlist_lookup_string(cnv,
|
||||
ZPOOL_CONFIG_PATH, &path) == 0);
|
||||
|
||||
/*
|
||||
* Skip active spares they should never cause
|
||||
* the pool to be evaluated as inconsistent.
|
||||
*/
|
||||
if (is_spare(NULL, path))
|
||||
continue;
|
||||
|
||||
/*
|
||||
* If we have a raidz/mirror that combines disks
|
||||
* with files, only report it as an error when
|
||||
* fatal is set to ensure all the replication
|
||||
* checks aren't skipped in check_replication().
|
||||
* with files, report it as an error.
|
||||
*/
|
||||
if (fatal && !dontreport && type != NULL &&
|
||||
if (!dontreport && type != NULL &&
|
||||
strcmp(type, childtype) != 0) {
|
||||
if (ret != NULL)
|
||||
free(ret);
|
||||
ret = NULL;
|
||||
vdev_error(gettext(
|
||||
"mismatched replication "
|
||||
"level: %s contains both "
|
||||
"files and devices\n"),
|
||||
rep.zprl_type);
|
||||
if (fatal)
|
||||
vdev_error(gettext(
|
||||
"mismatched replication "
|
||||
"level: %s contains both "
|
||||
"files and devices\n"),
|
||||
rep.zprl_type);
|
||||
else
|
||||
return (NULL);
|
||||
dontreport = B_TRUE;
|
||||
}
|
||||
|
||||
@ -661,7 +655,7 @@ get_replication(nvlist_t *nvroot, boolean_t fatal)
|
||||
statbuf.st_size == MAXOFFSET_T)
|
||||
continue;
|
||||
|
||||
int64_t size = statbuf.st_size;
|
||||
size = statbuf.st_size;
|
||||
|
||||
/*
|
||||
* Also make sure that devices and
|
||||
@ -881,18 +875,6 @@ check_replication(nvlist_t *config, nvlist_t *newroot)
|
||||
(u_longlong_t)mirror->zprl_children);
|
||||
ret = -1;
|
||||
}
|
||||
} else if (is_raidz_draid(current, new)) {
|
||||
if (current->zprl_parity != new->zprl_parity) {
|
||||
vdev_error(gettext(
|
||||
"mismatched replication level: pool and "
|
||||
"new vdev with different redundancy, %s "
|
||||
"and %s vdevs, %llu vs. %llu\n"),
|
||||
current->zprl_type,
|
||||
new->zprl_type,
|
||||
(u_longlong_t)current->zprl_parity,
|
||||
(u_longlong_t)new->zprl_parity);
|
||||
ret = -1;
|
||||
}
|
||||
} else if (strcmp(current->zprl_type, new->zprl_type) != 0) {
|
||||
vdev_error(gettext(
|
||||
"mismatched replication level: pool uses %s "
|
||||
@ -1370,7 +1352,7 @@ is_grouping(const char *type, int *mindev, int *maxdev)
|
||||
static int
|
||||
draid_config_by_type(nvlist_t *nv, const char *type, uint64_t children)
|
||||
{
|
||||
uint64_t nparity;
|
||||
uint64_t nparity = 1;
|
||||
uint64_t nspares = 0;
|
||||
uint64_t ndata = UINT64_MAX;
|
||||
uint64_t ngroups = 1;
|
||||
@ -1598,12 +1580,13 @@ construct_spec(nvlist_t *props, int argc, char **argv)
|
||||
is_dedup = is_spare = B_FALSE;
|
||||
}
|
||||
|
||||
if (is_log) {
|
||||
if (is_log || is_special || is_dedup) {
|
||||
if (strcmp(type, VDEV_TYPE_MIRROR) != 0) {
|
||||
(void) fprintf(stderr,
|
||||
gettext("invalid vdev "
|
||||
"specification: unsupported 'log' "
|
||||
"device: %s\n"), type);
|
||||
"specification: unsupported '%s' "
|
||||
"device: %s\n"), is_log ? "log" :
|
||||
"special", type);
|
||||
goto spec_out;
|
||||
}
|
||||
nlogs++;
|
||||
|
@ -1,4 +1,3 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* Gather top-level ZFS pool and resilver/scan statistics and print using
|
||||
* influxdb line protocol
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user