mirror of
https://git.proxmox.com/git/mirror_zfs
synced 2025-05-01 08:58:41 +00:00
Compare commits
199 Commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
494aaaed89 | ||
![]() |
522414da3b | ||
![]() |
a8c256046b | ||
![]() |
eb34de04d7 | ||
![]() |
d813aa8530 | ||
![]() |
3b267e72de | ||
![]() |
349fb77f11 | ||
![]() |
2a953e0ac9 | ||
![]() |
e4985bf5a1 | ||
![]() |
e96675a7b1 | ||
![]() |
d702f86eaf | ||
![]() |
41c4599cba | ||
![]() |
56a2a0981e | ||
![]() |
9b9b09f452 | ||
![]() |
89fcb8c6f9 | ||
![]() |
55dd24c4cc | ||
![]() |
78287023ce | ||
![]() |
479dca51c6 | ||
![]() |
87e9e82865 | ||
![]() |
0733fe2aa5 | ||
![]() |
fd836dfe24 | ||
![]() |
e92a680c70 | ||
![]() |
f1659cc782 | ||
![]() |
f863ac3d0f | ||
![]() |
f6d2e5c075 | ||
![]() |
f2fe4d51a8 | ||
![]() |
76663fe372 | ||
![]() |
44c8ff9b0c | ||
![]() |
f0ffcc3adc | ||
![]() |
e534ba5ce7 | ||
![]() |
1c7048357d | ||
![]() |
3ec4ea68d4 | ||
![]() |
bd7a02c251 | ||
![]() |
e82e68400a | ||
![]() |
3f67e012e4 | ||
![]() |
21875dd090 | ||
![]() |
fe9d409e90 | ||
![]() |
7aef672b77 | ||
![]() |
f9a9aea126 | ||
![]() |
8ba748d414 | ||
![]() |
e860cb0200 | ||
![]() |
86c3ed40e1 | ||
![]() |
6e41aca519 | ||
![]() |
79f7de5752 | ||
![]() |
0ef1964c79 | ||
![]() |
eaa62d9951 | ||
![]() |
8ca95d78c5 | ||
![]() |
edebca5dfc | ||
![]() |
1cc1bf4fa7 | ||
![]() |
0bcd1151f0 | ||
![]() |
78fd79eacd | ||
![]() |
6d693e20a2 | ||
![]() |
b76724ae47 | ||
![]() |
459c99ff23 | ||
![]() |
95785196f2 | ||
![]() |
2bba9fd479 | ||
![]() |
30ee2ee8ec | ||
![]() |
d7b6e470ff | ||
![]() |
04186d33be | ||
![]() |
810fc49a3e | ||
![]() |
75a7740574 | ||
![]() |
a80e1f1c90 | ||
![]() |
111ae3364c | ||
![]() |
3990273ffe | ||
![]() |
da93b72c91 | ||
![]() |
9fa06c5574 | ||
![]() |
8d47d2d579 | ||
![]() |
f6e6e77ed8 | ||
![]() |
120d1787d7 | ||
![]() |
2407f30bda | ||
![]() |
9be8ddfb3c | ||
![]() |
3755cde22a | ||
![]() |
33d7c2d165 | ||
![]() |
2919784be2 | ||
![]() |
8495536f7f | ||
![]() |
bcd010d3a5 | ||
![]() |
c27277daac | ||
![]() |
bf54da84fb | ||
![]() |
3158b5d718 | ||
![]() |
ba7797c8db | ||
![]() |
bc77a0c85e | ||
![]() |
1611b8e56e | ||
![]() |
8015e2ea66 | ||
![]() |
c53bc3837c | ||
![]() |
e9dc31c74e | ||
![]() |
b04b13ae79 | ||
![]() |
7b1d421adf | ||
![]() |
db5c3b4c76 | ||
![]() |
0d870a1775 | ||
![]() |
608741d062 | ||
![]() |
3079bf2e6c | ||
![]() |
b34bf2d5f6 | ||
![]() |
229ca7d738 | ||
![]() |
9e36c5769f | ||
![]() |
d38f4664a6 | ||
![]() |
99dc1fc340 | ||
![]() |
ba4dbbdae7 | ||
![]() |
8526b12f3d | ||
![]() |
0ce1b2ca19 | ||
![]() |
0aabd6b482 | ||
![]() |
5f30698670 | ||
![]() |
a199cac6cd | ||
![]() |
729507d309 | ||
![]() |
3af63683fe | ||
![]() |
9aa1a2878e | ||
![]() |
cc75c816c5 | ||
![]() |
1c2aee7a52 | ||
![]() |
62677576a7 | ||
![]() |
f7a07d76ee | ||
![]() |
54c6fbd378 | ||
![]() |
0ce7a068e9 | ||
![]() |
228b064d1b | ||
![]() |
b9b9cdcdb1 | ||
![]() |
11943656f9 | ||
![]() |
c011ef8c91 | ||
![]() |
cacc599aa2 | ||
![]() |
c7ee59a160 | ||
![]() |
58a707375f | ||
![]() |
5a22de144a | ||
![]() |
31a4673c05 | ||
![]() |
3a68f3c50f | ||
![]() |
8be6308e85 | ||
![]() |
0bf2c5365e | ||
![]() |
d76de9fb17 | ||
![]() |
c0f075c06b | ||
![]() |
6c2fc56916 | ||
![]() |
e96fbdba34 | ||
![]() |
739db06ce7 | ||
![]() |
4da8c7d11e | ||
![]() |
32949f2560 | ||
![]() |
79ac1b29d5 | ||
![]() |
7dc2baaa1f | ||
![]() |
5a7cb0b065 | ||
![]() |
400f56e3f8 | ||
![]() |
63159e5bda | ||
![]() |
7eabb0af37 | ||
![]() |
c65aaa8387 | ||
![]() |
e99e684b33 | ||
![]() |
1b696429c1 | ||
![]() |
084ff4abd2 | ||
![]() |
ab999406fe | ||
![]() |
d19304ffee | ||
![]() |
92f095a903 | ||
![]() |
645a7e4d95 | ||
![]() |
95649854ba | ||
![]() |
895cb689d3 | ||
![]() |
6bdc7259d1 | ||
![]() |
1e488eec60 | ||
![]() |
c418edf1d3 | ||
![]() |
df8c9f351d | ||
![]() |
bb31ded68b | ||
![]() |
c1801cbe59 | ||
![]() |
ffaedf0a44 | ||
![]() |
02ce9030e6 | ||
![]() |
0ae7bfc0a4 | ||
![]() |
bd1eab16eb | ||
![]() |
b3c1807d77 | ||
![]() |
b5e2456333 | ||
![]() |
c47f0f4417 | ||
![]() |
12f2b1f65e | ||
![]() |
4a104ac047 | ||
![]() |
c24a480631 | ||
![]() |
36d1a3ef4e | ||
![]() |
2768dc04cc | ||
![]() |
3366ceaf3a | ||
![]() |
5d12545da8 | ||
![]() |
a3ea8c8ee6 | ||
![]() |
0426e13271 | ||
![]() |
8aa4f0f0fc | ||
![]() |
7698503dca | ||
![]() |
b9aa32ff39 | ||
![]() |
571762b290 | ||
![]() |
991834f5dc | ||
![]() |
41a0f66279 | ||
![]() |
c79d1bae75 | ||
![]() |
70232483b4 | ||
![]() |
c5273e0c31 | ||
![]() |
685ae4429f | ||
![]() |
81be809a25 | ||
![]() |
8a6fde8213 | ||
![]() |
b6f618f8ff | ||
![]() |
51a2b59767 | ||
![]() |
8c81c0b05d | ||
![]() |
b221f43943 | ||
![]() |
e037327bfe | ||
![]() |
1a2e486d25 | ||
![]() |
d8011707cc | ||
![]() |
f5f5a2db95 | ||
![]() |
83b0967c1f | ||
![]() |
73ba5df31a | ||
![]() |
1bc244ae93 | ||
![]() |
931dc70550 | ||
![]() |
5299f4f289 | ||
![]() |
f917cf1c03 | ||
![]() |
56ed389a57 | ||
![]() |
e613e4bbe3 | ||
![]() |
b4e630b00c | ||
![]() |
bf6cd30796 | ||
![]() |
1266cebf87 |
10
.cirrus.yml
10
.cirrus.yml
@ -5,16 +5,16 @@ env:
|
|||||||
build_task:
|
build_task:
|
||||||
matrix:
|
matrix:
|
||||||
freebsd_instance:
|
freebsd_instance:
|
||||||
image_family: freebsd-13-5
|
image_family: freebsd-12-4
|
||||||
freebsd_instance:
|
freebsd_instance:
|
||||||
image_family: freebsd-14-2
|
image_family: freebsd-13-2
|
||||||
freebsd_instance:
|
freebsd_instance:
|
||||||
image_family: freebsd-15-0-snap
|
image_family: freebsd-14-0-snap
|
||||||
prepare_script:
|
prepare_script:
|
||||||
- pkg install -y autoconf automake libtool gettext-runtime gmake ksh93 py311-packaging py311-cffi py311-sysctl
|
- pkg install -y autoconf automake libtool gettext-runtime gmake ksh93 py39-packaging py39-cffi py39-sysctl
|
||||||
configure_script:
|
configure_script:
|
||||||
- env MAKE=gmake ./autogen.sh
|
- env MAKE=gmake ./autogen.sh
|
||||||
- env MAKE=gmake ./configure --with-config="user" --with-python=3.11
|
- env MAKE=gmake ./configure --with-config="user" --with-python=3.9
|
||||||
build_script:
|
build_script:
|
||||||
- gmake -j `sysctl -n kern.smp.cpus`
|
- gmake -j `sysctl -n kern.smp.cpus`
|
||||||
install_script:
|
install_script:
|
||||||
|
18
.github/CONTRIBUTING.md
vendored
18
.github/CONTRIBUTING.md
vendored
@ -145,18 +145,22 @@ Once everything is in good shape and the details have been worked out you can re
|
|||||||
Any required reviews can then be finalized and the pull request merged.
|
Any required reviews can then be finalized and the pull request merged.
|
||||||
|
|
||||||
#### Tests and Benchmarks
|
#### Tests and Benchmarks
|
||||||
* Every pull request is tested using a GitHub Actions workflow on multiple platforms by running the [zfs-tests.sh and zloop.sh](
|
* Every pull request will by tested by the buildbot on multiple platforms by running the [zfs-tests.sh and zloop.sh](
|
||||||
https://openzfs.github.io/openzfs-docs/Developer%20Resources/Building%20ZFS.html#running-zloop-sh-and-zfs-tests-sh) test suites.
|
https://openzfs.github.io/openzfs-docs/Developer%20Resources/Building%20ZFS.html#running-zloop-sh-and-zfs-tests-sh) test suites.
|
||||||
`.github/workflows/scripts/generate-ci-type.py` is used to determine whether the pull request is nonbehavior, i.e., not introducing behavior changes of any code, configuration or tests. If so, the CI will run on fewer platforms and only essential sanity tests will run. You can always override this by adding `ZFS-CI-Type` line to your commit message:
|
|
||||||
* If your last commit (or `HEAD` in git terms) contains a line `ZFS-CI-Type: quick`, quick mode is forced regardless of what files are changed.
|
|
||||||
* Otherwise, if any commit in a PR contains a line `ZFS-CI-Type: full`, full mode is forced.
|
|
||||||
* To verify your changes conform to the [style guidelines](
|
* To verify your changes conform to the [style guidelines](
|
||||||
https://github.com/openzfs/zfs/blob/master/.github/CONTRIBUTING.md#style-guides
|
https://github.com/openzfs/zfs/blob/master/.github/CONTRIBUTING.md#style-guides
|
||||||
), please run `make checkstyle` and resolve any warnings.
|
), please run `make checkstyle` and resolve any warnings.
|
||||||
* Code analysis is performed by [CodeQL](https://codeql.github.com/) for each pull request.
|
* Static code analysis of each pull request is performed by the buildbot; run `make lint` to check your changes.
|
||||||
* Test cases should be provided when appropriate. This includes making sure new features have adequate code coverage.
|
* Test cases should be provided when appropriate.
|
||||||
|
This includes making sure new features have adequate code coverage.
|
||||||
* If your pull request improves performance, please include some benchmarks.
|
* If your pull request improves performance, please include some benchmarks.
|
||||||
* The pull request must pass all CI checks before being accepted.
|
* The pull request must pass all required [ZFS
|
||||||
|
Buildbot](http://build.zfsonlinux.org/) builders before
|
||||||
|
being accepted. If you are experiencing intermittent TEST
|
||||||
|
builder failures, you may be experiencing a [test suite
|
||||||
|
issue](https://github.com/openzfs/zfs/issues?q=is%3Aissue+is%3Aopen+label%3A%22Type%3A+Test+Suite%22).
|
||||||
|
There are also various [buildbot options](https://openzfs.github.io/openzfs-docs/Developer%20Resources/Buildbot%20Options.html)
|
||||||
|
to control how changes are tested.
|
||||||
|
|
||||||
### Testing
|
### Testing
|
||||||
All help is appreciated! If you're in a position to run the latest code
|
All help is appreciated! If you're in a position to run the latest code
|
||||||
|
4
.github/codeql-cpp.yml
vendored
4
.github/codeql-cpp.yml
vendored
@ -1,4 +0,0 @@
|
|||||||
name: "Custom CodeQL Analysis"
|
|
||||||
|
|
||||||
queries:
|
|
||||||
- uses: ./.github/codeql/custom-queries/cpp/deprecatedFunctionUsage.ql
|
|
4
.github/codeql-python.yml
vendored
4
.github/codeql-python.yml
vendored
@ -1,4 +0,0 @@
|
|||||||
name: "Custom CodeQL Analysis"
|
|
||||||
|
|
||||||
paths-ignore:
|
|
||||||
- tests
|
|
@ -1,59 +0,0 @@
|
|||||||
/**
|
|
||||||
* @name Deprecated function usage detection
|
|
||||||
* @description Detects functions whose usage is banned from the OpenZFS
|
|
||||||
* codebase due to QA concerns.
|
|
||||||
* @kind problem
|
|
||||||
* @severity error
|
|
||||||
* @id cpp/deprecated-function-usage
|
|
||||||
*/
|
|
||||||
|
|
||||||
import cpp
|
|
||||||
|
|
||||||
predicate isDeprecatedFunction(Function f) {
|
|
||||||
f.getName() = "strtok" or
|
|
||||||
f.getName() = "__xpg_basename" or
|
|
||||||
f.getName() = "basename" or
|
|
||||||
f.getName() = "dirname" or
|
|
||||||
f.getName() = "bcopy" or
|
|
||||||
f.getName() = "bcmp" or
|
|
||||||
f.getName() = "bzero" or
|
|
||||||
f.getName() = "asctime" or
|
|
||||||
f.getName() = "asctime_r" or
|
|
||||||
f.getName() = "gmtime" or
|
|
||||||
f.getName() = "localtime" or
|
|
||||||
f.getName() = "strncpy"
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
string getReplacementMessage(Function f) {
|
|
||||||
if f.getName() = "strtok" then
|
|
||||||
result = "Use strtok_r(3) instead!"
|
|
||||||
else if f.getName() = "__xpg_basename" then
|
|
||||||
result = "basename(3) is underspecified. Use zfs_basename() instead!"
|
|
||||||
else if f.getName() = "basename" then
|
|
||||||
result = "basename(3) is underspecified. Use zfs_basename() instead!"
|
|
||||||
else if f.getName() = "dirname" then
|
|
||||||
result = "dirname(3) is underspecified. Use zfs_dirnamelen() instead!"
|
|
||||||
else if f.getName() = "bcopy" then
|
|
||||||
result = "bcopy(3) is deprecated. Use memcpy(3)/memmove(3) instead!"
|
|
||||||
else if f.getName() = "bcmp" then
|
|
||||||
result = "bcmp(3) is deprecated. Use memcmp(3) instead!"
|
|
||||||
else if f.getName() = "bzero" then
|
|
||||||
result = "bzero(3) is deprecated. Use memset(3) instead!"
|
|
||||||
else if f.getName() = "asctime" then
|
|
||||||
result = "Use strftime(3) instead!"
|
|
||||||
else if f.getName() = "asctime_r" then
|
|
||||||
result = "Use strftime(3) instead!"
|
|
||||||
else if f.getName() = "gmtime" then
|
|
||||||
result = "gmtime(3) isn't thread-safe. Use gmtime_r(3) instead!"
|
|
||||||
else if f.getName() = "localtime" then
|
|
||||||
result = "localtime(3) isn't thread-safe. Use localtime_r(3) instead!"
|
|
||||||
else
|
|
||||||
result = "strncpy(3) is deprecated. Use strlcpy(3) instead!"
|
|
||||||
}
|
|
||||||
|
|
||||||
from FunctionCall fc, Function f
|
|
||||||
where
|
|
||||||
fc.getTarget() = f and
|
|
||||||
isDeprecatedFunction(f)
|
|
||||||
select fc, getReplacementMessage(f)
|
|
4
.github/codeql/custom-queries/cpp/qlpack.yml
vendored
4
.github/codeql/custom-queries/cpp/qlpack.yml
vendored
@ -1,4 +0,0 @@
|
|||||||
name: openzfs-cpp-queries
|
|
||||||
version: 0.0.0
|
|
||||||
libraryPathDependencies: codeql-cpp
|
|
||||||
suites: openzfs-cpp-suite
|
|
46
.github/workflows/README.md
vendored
46
.github/workflows/README.md
vendored
@ -4,54 +4,44 @@
|
|||||||
```mermaid
|
```mermaid
|
||||||
flowchart TB
|
flowchart TB
|
||||||
subgraph CleanUp and Summary
|
subgraph CleanUp and Summary
|
||||||
CleanUp+Summary
|
Part1-20.04-->CleanUp+nice+Summary
|
||||||
|
Part2-20.04-->CleanUp+nice+Summary
|
||||||
|
PartN-20.04-->CleanUp+nice+Summary
|
||||||
|
Part1-22.04-->CleanUp+nice+Summary
|
||||||
|
Part2-22.04-->CleanUp+nice+Summary
|
||||||
|
PartN-22.04-->CleanUp+nice+Summary
|
||||||
end
|
end
|
||||||
|
|
||||||
subgraph Functional Testings
|
subgraph Functional Testings
|
||||||
sanity-checks-20.04
|
|
||||||
zloop-checks-20.04
|
|
||||||
functional-testing-20.04-->Part1-20.04
|
functional-testing-20.04-->Part1-20.04
|
||||||
functional-testing-20.04-->Part2-20.04
|
functional-testing-20.04-->Part2-20.04
|
||||||
functional-testing-20.04-->Part3-20.04
|
functional-testing-20.04-->PartN-20.04
|
||||||
functional-testing-20.04-->Part4-20.04
|
|
||||||
functional-testing-22.04-->Part1-22.04
|
functional-testing-22.04-->Part1-22.04
|
||||||
functional-testing-22.04-->Part2-22.04
|
functional-testing-22.04-->Part2-22.04
|
||||||
functional-testing-22.04-->Part3-22.04
|
functional-testing-22.04-->PartN-22.04
|
||||||
functional-testing-22.04-->Part4-22.04
|
end
|
||||||
sanity-checks-22.04
|
|
||||||
zloop-checks-22.04
|
subgraph Sanity and zloop Testings
|
||||||
|
sanity-checks-20.04-->functional-testing-20.04
|
||||||
|
sanity-checks-22.04-->functional-testing-22.04
|
||||||
|
zloop-checks-20.04-->functional
|
||||||
|
zloop-checks-22.04-->functional
|
||||||
end
|
end
|
||||||
|
|
||||||
subgraph Code Checking + Building
|
subgraph Code Checking + Building
|
||||||
Build-Ubuntu-20.04
|
|
||||||
codeql.yml
|
codeql.yml
|
||||||
checkstyle.yml
|
checkstyle.yml
|
||||||
Build-Ubuntu-22.04
|
|
||||||
end
|
|
||||||
|
|
||||||
Build-Ubuntu-20.04-->sanity-checks-20.04
|
Build-Ubuntu-20.04-->sanity-checks-20.04
|
||||||
Build-Ubuntu-20.04-->zloop-checks-20.04
|
|
||||||
Build-Ubuntu-20.04-->functional-testing-20.04
|
|
||||||
Build-Ubuntu-22.04-->sanity-checks-22.04
|
Build-Ubuntu-22.04-->sanity-checks-22.04
|
||||||
|
Build-Ubuntu-20.04-->zloop-checks-20.04
|
||||||
Build-Ubuntu-22.04-->zloop-checks-22.04
|
Build-Ubuntu-22.04-->zloop-checks-22.04
|
||||||
Build-Ubuntu-22.04-->functional-testing-22.04
|
end
|
||||||
|
|
||||||
sanity-checks-20.04-->CleanUp+Summary
|
|
||||||
Part1-20.04-->CleanUp+Summary
|
|
||||||
Part2-20.04-->CleanUp+Summary
|
|
||||||
Part3-20.04-->CleanUp+Summary
|
|
||||||
Part4-20.04-->CleanUp+Summary
|
|
||||||
Part1-22.04-->CleanUp+Summary
|
|
||||||
Part2-22.04-->CleanUp+Summary
|
|
||||||
Part3-22.04-->CleanUp+Summary
|
|
||||||
Part4-22.04-->CleanUp+Summary
|
|
||||||
sanity-checks-22.04-->CleanUp+Summary
|
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
1) build zfs modules for Ubuntu 20.04 and 22.04 (~15m)
|
1) build zfs modules for Ubuntu 20.04 and 22.04 (~15m)
|
||||||
2) 2x zloop test (~10m) + 2x sanity test (~25m)
|
2) 2x zloop test (~10m) + 2x sanity test (~25m)
|
||||||
3) 4x functional testings in parts 1..4 (each ~1h)
|
3) functional testings in parts 1..5 (each ~1h)
|
||||||
4) cleanup and create summary
|
4) cleanup and create summary
|
||||||
- content of summary depends on the results of the steps
|
- content of summary depends on the results of the steps
|
||||||
|
|
||||||
|
57
.github/workflows/build-dependencies.txt
vendored
Normal file
57
.github/workflows/build-dependencies.txt
vendored
Normal file
@ -0,0 +1,57 @@
|
|||||||
|
acl
|
||||||
|
alien
|
||||||
|
attr
|
||||||
|
autoconf
|
||||||
|
bc
|
||||||
|
build-essential
|
||||||
|
curl
|
||||||
|
dbench
|
||||||
|
debhelper-compat
|
||||||
|
dh-python
|
||||||
|
dkms
|
||||||
|
fakeroot
|
||||||
|
fio
|
||||||
|
gdb
|
||||||
|
gdebi
|
||||||
|
git
|
||||||
|
ksh
|
||||||
|
lcov
|
||||||
|
libacl1-dev
|
||||||
|
libaio-dev
|
||||||
|
libattr1-dev
|
||||||
|
libblkid-dev
|
||||||
|
libcurl4-openssl-dev
|
||||||
|
libdevmapper-dev
|
||||||
|
libelf-dev
|
||||||
|
libffi-dev
|
||||||
|
libmount-dev
|
||||||
|
libpam0g-dev
|
||||||
|
libselinux1-dev
|
||||||
|
libssl-dev
|
||||||
|
libtool
|
||||||
|
libudev-dev
|
||||||
|
linux-headers-generic
|
||||||
|
lsscsi
|
||||||
|
mdadm
|
||||||
|
nfs-kernel-server
|
||||||
|
pamtester
|
||||||
|
parted
|
||||||
|
po-debconf
|
||||||
|
python3
|
||||||
|
python3-all-dev
|
||||||
|
python3-cffi
|
||||||
|
python3-dev
|
||||||
|
python3-packaging
|
||||||
|
python3-pip
|
||||||
|
python3-setuptools
|
||||||
|
python3-sphinx
|
||||||
|
rng-tools-debian
|
||||||
|
rsync
|
||||||
|
samba
|
||||||
|
sysstat
|
||||||
|
uuid-dev
|
||||||
|
watchdog
|
||||||
|
wget
|
||||||
|
xfslibs-dev
|
||||||
|
xz-utils
|
||||||
|
zlib1g-dev
|
5
.github/workflows/checkstyle-dependencies.txt
vendored
Normal file
5
.github/workflows/checkstyle-dependencies.txt
vendored
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
cppcheck
|
||||||
|
devscripts
|
||||||
|
mandoc
|
||||||
|
pax-utils
|
||||||
|
shellcheck
|
27
.github/workflows/checkstyle.yaml
vendored
27
.github/workflows/checkstyle.yaml
vendored
@ -4,24 +4,24 @@ on:
|
|||||||
push:
|
push:
|
||||||
pull_request:
|
pull_request:
|
||||||
|
|
||||||
concurrency:
|
|
||||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
|
||||||
cancel-in-progress: true
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
checkstyle:
|
checkstyle:
|
||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-22.04
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v3
|
||||||
with:
|
with:
|
||||||
ref: ${{ github.event.pull_request.head.sha }}
|
ref: ${{ github.event.pull_request.head.sha }}
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
# for x in lxd core20 snapd; do sudo snap remove $x; done
|
# https://github.com/orgs/community/discussions/47863
|
||||||
sudo apt-get purge -y snapd google-chrome-stable firefox
|
sudo apt-mark hold grub-efi-amd64-signed
|
||||||
ONLY_DEPS=1 .github/workflows/scripts/qemu-3-deps-vm.sh ubuntu22
|
sudo apt-get update --fix-missing
|
||||||
sudo apt-get install -y cppcheck devscripts mandoc pax-utils shellcheck
|
sudo apt-get upgrade
|
||||||
sudo python -m pipx install --quiet flake8
|
sudo xargs --arg-file=${{ github.workspace }}/.github/workflows/build-dependencies.txt apt-get install -qq
|
||||||
|
sudo xargs --arg-file=${{ github.workspace }}/.github/workflows/checkstyle-dependencies.txt apt-get install -qq
|
||||||
|
sudo python3 -m pip install --quiet flake8
|
||||||
|
sudo apt-get clean
|
||||||
|
|
||||||
# confirm that the tools are installed
|
# confirm that the tools are installed
|
||||||
# the build system doesn't fail when they are not
|
# the build system doesn't fail when they are not
|
||||||
checkbashisms --version
|
checkbashisms --version
|
||||||
@ -31,13 +31,8 @@ jobs:
|
|||||||
shellcheck --version
|
shellcheck --version
|
||||||
- name: Prepare
|
- name: Prepare
|
||||||
run: |
|
run: |
|
||||||
sed -i '/DEBUG_CFLAGS="-Werror"/s/^/#/' config/zfs-build.m4
|
|
||||||
./autogen.sh
|
./autogen.sh
|
||||||
- name: Configure
|
|
||||||
run: |
|
|
||||||
./configure
|
./configure
|
||||||
- name: Make
|
|
||||||
run: |
|
|
||||||
make -j$(nproc) --no-print-directory --silent
|
make -j$(nproc) --no-print-directory --silent
|
||||||
- name: Checkstyle
|
- name: Checkstyle
|
||||||
run: |
|
run: |
|
||||||
@ -57,7 +52,7 @@ jobs:
|
|||||||
if: failure() && steps.CheckABI.outcome == 'failure'
|
if: failure() && steps.CheckABI.outcome == 'failure'
|
||||||
run: |
|
run: |
|
||||||
find -name *.abi | tar -cf abi_files.tar -T -
|
find -name *.abi | tar -cf abi_files.tar -T -
|
||||||
- uses: actions/upload-artifact@v4
|
- uses: actions/upload-artifact@v3
|
||||||
if: failure() && steps.CheckABI.outcome == 'failure'
|
if: failure() && steps.CheckABI.outcome == 'failure'
|
||||||
with:
|
with:
|
||||||
name: New ABI files (use only if you're sure about interface changes)
|
name: New ABI files (use only if you're sure about interface changes)
|
||||||
|
15
.github/workflows/codeql.yml
vendored
15
.github/workflows/codeql.yml
vendored
@ -4,14 +4,10 @@ on:
|
|||||||
push:
|
push:
|
||||||
pull_request:
|
pull_request:
|
||||||
|
|
||||||
concurrency:
|
|
||||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
|
||||||
cancel-in-progress: true
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
analyze:
|
analyze:
|
||||||
name: Analyze
|
name: Analyze
|
||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-latest
|
||||||
permissions:
|
permissions:
|
||||||
actions: read
|
actions: read
|
||||||
contents: read
|
contents: read
|
||||||
@ -28,18 +24,17 @@ jobs:
|
|||||||
echo "MAKEFLAGS=-j$(nproc)" >> $GITHUB_ENV
|
echo "MAKEFLAGS=-j$(nproc)" >> $GITHUB_ENV
|
||||||
|
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v3
|
||||||
|
|
||||||
- name: Initialize CodeQL
|
- name: Initialize CodeQL
|
||||||
uses: github/codeql-action/init@v3
|
uses: github/codeql-action/init@v2
|
||||||
with:
|
with:
|
||||||
config-file: .github/codeql-${{ matrix.language }}.yml
|
|
||||||
languages: ${{ matrix.language }}
|
languages: ${{ matrix.language }}
|
||||||
|
|
||||||
- name: Autobuild
|
- name: Autobuild
|
||||||
uses: github/codeql-action/autobuild@v3
|
uses: github/codeql-action/autobuild@v2
|
||||||
|
|
||||||
- name: Perform CodeQL Analysis
|
- name: Perform CodeQL Analysis
|
||||||
uses: github/codeql-action/analyze@v3
|
uses: github/codeql-action/analyze@v2
|
||||||
with:
|
with:
|
||||||
category: "/language:${{matrix.language}}"
|
category: "/language:${{matrix.language}}"
|
||||||
|
49
.github/workflows/labels.yml
vendored
49
.github/workflows/labels.yml
vendored
@ -1,49 +0,0 @@
|
|||||||
name: labels
|
|
||||||
|
|
||||||
on:
|
|
||||||
pull_request_target:
|
|
||||||
types: [ opened, synchronize, reopened, converted_to_draft, ready_for_review ]
|
|
||||||
|
|
||||||
permissions:
|
|
||||||
pull-requests: write
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
open:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
if: ${{ github.event.action == 'opened' && github.event.pull_request.draft }}
|
|
||||||
steps:
|
|
||||||
- env:
|
|
||||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
ISSUE: ${{ github.event.pull_request.html_url }}
|
|
||||||
run: |
|
|
||||||
gh pr edit $ISSUE --add-label "Status: Work in Progress"
|
|
||||||
|
|
||||||
push:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
if: ${{ github.event.action == 'synchronize' || github.event.action == 'reopened' }}
|
|
||||||
steps:
|
|
||||||
- env:
|
|
||||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
ISSUE: ${{ github.event.pull_request.html_url }}
|
|
||||||
run: |
|
|
||||||
gh pr edit $ISSUE --remove-label "Status: Accepted,Status: Inactive,Status: Revision Needed,Status: Stale"
|
|
||||||
|
|
||||||
draft:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
if: ${{ github.event.action == 'converted_to_draft' }}
|
|
||||||
steps:
|
|
||||||
- env:
|
|
||||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
ISSUE: ${{ github.event.pull_request.html_url }}
|
|
||||||
run: |
|
|
||||||
gh pr edit $ISSUE --remove-label "Status: Accepted,Status: Code Review Needed,Status: Inactive,Status: Revision Needed,Status: Stale" --add-label "Status: Work in Progress"
|
|
||||||
|
|
||||||
rfr:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
if: ${{ github.event.action == 'ready_for_review' }}
|
|
||||||
steps:
|
|
||||||
- env:
|
|
||||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
ISSUE: ${{ github.event.pull_request.html_url }}
|
|
||||||
run: |
|
|
||||||
gh pr edit $ISSUE --remove-label "Status: Accepted,Status: Inactive,Status: Revision Needed,Status: Stale,Status: Work in Progress" --add-label "Status: Code Review Needed"
|
|
14
.github/workflows/scripts/README.md
vendored
14
.github/workflows/scripts/README.md
vendored
@ -1,14 +0,0 @@
|
|||||||
|
|
||||||
Workflow for each operating system:
|
|
||||||
- install qemu on the github runner
|
|
||||||
- download current cloud image of operating system
|
|
||||||
- start and init that image via cloud-init
|
|
||||||
- install dependencies and poweroff system
|
|
||||||
- start system and build openzfs and then poweroff again
|
|
||||||
- clone build system and start 2 instances of it
|
|
||||||
- run functional testings and complete in around 3h
|
|
||||||
- when tests are done, do some logfile preparing
|
|
||||||
- show detailed results for each system
|
|
||||||
- in the end, generate the job summary
|
|
||||||
|
|
||||||
/TR 14.09.2024
|
|
107
.github/workflows/scripts/generate-ci-type.py
vendored
107
.github/workflows/scripts/generate-ci-type.py
vendored
@ -1,107 +0,0 @@
|
|||||||
#!/usr/bin/env python3
|
|
||||||
|
|
||||||
"""
|
|
||||||
Determine the CI type based on the change list and commit message.
|
|
||||||
|
|
||||||
Prints "quick" if (explicity required by user):
|
|
||||||
- the *last* commit message contains 'ZFS-CI-Type: quick'
|
|
||||||
or if (heuristics):
|
|
||||||
- the files changed are not in the list of specified directories, and
|
|
||||||
- all commit messages do not contain 'ZFS-CI-Type: full'
|
|
||||||
|
|
||||||
Otherwise prints "full".
|
|
||||||
"""
|
|
||||||
|
|
||||||
import sys
|
|
||||||
import subprocess
|
|
||||||
import re
|
|
||||||
|
|
||||||
"""
|
|
||||||
Patterns of files that are not considered to trigger full CI.
|
|
||||||
Note: not using pathlib.Path.match() because it does not support '**'
|
|
||||||
"""
|
|
||||||
FULL_RUN_IGNORE_REGEX = list(map(re.compile, [
|
|
||||||
r'.*\.md',
|
|
||||||
r'.*\.gitignore'
|
|
||||||
]))
|
|
||||||
|
|
||||||
"""
|
|
||||||
Patterns of files that are considered to trigger full CI.
|
|
||||||
"""
|
|
||||||
FULL_RUN_REGEX = list(map(re.compile, [
|
|
||||||
r'cmd.*',
|
|
||||||
r'configs/.*',
|
|
||||||
r'META',
|
|
||||||
r'.*\.am',
|
|
||||||
r'.*\.m4',
|
|
||||||
r'autogen\.sh',
|
|
||||||
r'configure\.ac',
|
|
||||||
r'copy-builtin',
|
|
||||||
r'contrib',
|
|
||||||
r'etc',
|
|
||||||
r'include',
|
|
||||||
r'lib/.*',
|
|
||||||
r'module/.*',
|
|
||||||
r'scripts/.*',
|
|
||||||
r'tests/.*',
|
|
||||||
r'udev/.*'
|
|
||||||
]))
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
|
|
||||||
prog = sys.argv[0]
|
|
||||||
|
|
||||||
if len(sys.argv) != 3:
|
|
||||||
print(f'Usage: {prog} <head_ref> <base_ref>')
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
head, base = sys.argv[1:3]
|
|
||||||
|
|
||||||
def output_type(type, reason):
|
|
||||||
print(f'{prog}: will run {type} CI: {reason}', file=sys.stderr)
|
|
||||||
print(type)
|
|
||||||
sys.exit(0)
|
|
||||||
|
|
||||||
# check last (HEAD) commit message
|
|
||||||
last_commit_message_raw = subprocess.run([
|
|
||||||
'git', 'show', '-s', '--format=%B', 'HEAD'
|
|
||||||
], check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
|
||||||
|
|
||||||
for line in last_commit_message_raw.stdout.decode().splitlines():
|
|
||||||
if line.strip().lower() == 'zfs-ci-type: quick':
|
|
||||||
output_type('quick', f'explicitly requested by HEAD commit {head}')
|
|
||||||
|
|
||||||
# check all commit messages
|
|
||||||
all_commit_message_raw = subprocess.run([
|
|
||||||
'git', 'show', '-s',
|
|
||||||
'--format=ZFS-CI-Commit: %H%n%B', f'{head}...{base}'
|
|
||||||
], check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
|
||||||
all_commit_message = all_commit_message_raw.stdout.decode().splitlines()
|
|
||||||
|
|
||||||
commit_ref = head
|
|
||||||
for line in all_commit_message:
|
|
||||||
if line.startswith('ZFS-CI-Commit:'):
|
|
||||||
commit_ref = line.lstrip('ZFS-CI-Commit:').rstrip()
|
|
||||||
if line.strip().lower() == 'zfs-ci-type: full':
|
|
||||||
output_type('full', f'explicitly requested by commit {commit_ref}')
|
|
||||||
|
|
||||||
# check changed files
|
|
||||||
changed_files_raw = subprocess.run([
|
|
||||||
'git', 'diff', '--name-only', head, base
|
|
||||||
], check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
|
||||||
changed_files = changed_files_raw.stdout.decode().splitlines()
|
|
||||||
|
|
||||||
for f in changed_files:
|
|
||||||
for r in FULL_RUN_IGNORE_REGEX:
|
|
||||||
if r.match(f):
|
|
||||||
break
|
|
||||||
else:
|
|
||||||
for r in FULL_RUN_REGEX:
|
|
||||||
if r.match(f):
|
|
||||||
output_type(
|
|
||||||
'full',
|
|
||||||
f'changed file "{f}" matches pattern "{r.pattern}"'
|
|
||||||
)
|
|
||||||
|
|
||||||
# catch-all
|
|
||||||
output_type('quick', 'no changed file matches full CI patterns')
|
|
119
.github/workflows/scripts/generate-summary.sh
vendored
Executable file
119
.github/workflows/scripts/generate-summary.sh
vendored
Executable file
@ -0,0 +1,119 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
# for runtime reasons we split functional testings into N parts
|
||||||
|
# - use a define to check for missing tarfiles
|
||||||
|
FUNCTIONAL_PARTS="4"
|
||||||
|
|
||||||
|
ZTS_REPORT="tests/test-runner/bin/zts-report.py"
|
||||||
|
chmod +x $ZTS_REPORT
|
||||||
|
|
||||||
|
function output() {
|
||||||
|
echo -e $* >> Summary.md
|
||||||
|
}
|
||||||
|
|
||||||
|
function error() {
|
||||||
|
output ":bangbang: $* :bangbang:\n"
|
||||||
|
}
|
||||||
|
|
||||||
|
# this function generates the real summary
|
||||||
|
# - expects a logfile "log" in current directory
|
||||||
|
function generate() {
|
||||||
|
# we issued some error already
|
||||||
|
test ! -s log && return
|
||||||
|
|
||||||
|
# for overview and zts-report
|
||||||
|
cat log | grep '^Test' > list
|
||||||
|
|
||||||
|
# error details
|
||||||
|
awk '/\[FAIL\]|\[KILLED\]/{ show=1; print; next; }
|
||||||
|
/\[SKIP\]|\[PASS\]/{ show=0; } show' log > err
|
||||||
|
|
||||||
|
# summary of errors
|
||||||
|
if [ -s err ]; then
|
||||||
|
output "<pre>"
|
||||||
|
$ZTS_REPORT --no-maybes ./list >> Summary.md
|
||||||
|
output "</pre>"
|
||||||
|
|
||||||
|
# generate seperate error logfile
|
||||||
|
ERRLOGS=$((ERRLOGS+1))
|
||||||
|
errfile="err-$ERRLOGS.md"
|
||||||
|
echo -e "\n## $headline (debugging)\n" >> $errfile
|
||||||
|
echo "<details><summary>Error Listing - with dmesg and dbgmsg</summary><pre>" >> $errfile
|
||||||
|
dd if=err bs=999k count=1 >> $errfile
|
||||||
|
echo "</pre></details>" >> $errfile
|
||||||
|
else
|
||||||
|
output "All tests passed :thumbsup:"
|
||||||
|
fi
|
||||||
|
|
||||||
|
output "<details><summary>Full Listing</summary><pre>"
|
||||||
|
cat list >> Summary.md
|
||||||
|
output "</pre></details>"
|
||||||
|
|
||||||
|
# remove tmp files
|
||||||
|
rm -f err list log
|
||||||
|
}
|
||||||
|
|
||||||
|
# check tarfiles and untar
|
||||||
|
function check_tarfile() {
|
||||||
|
if [ -f "$1" ]; then
|
||||||
|
tar xf "$1" || error "Tarfile $1 returns some error"
|
||||||
|
else
|
||||||
|
error "Tarfile $1 not found"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# check logfile and concatenate test results
|
||||||
|
function check_logfile() {
|
||||||
|
if [ -f "$1" ]; then
|
||||||
|
cat "$1" >> log
|
||||||
|
else
|
||||||
|
error "Logfile $1 not found"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# sanity
|
||||||
|
function summarize_s() {
|
||||||
|
headline="$1"
|
||||||
|
output "\n## $headline\n"
|
||||||
|
rm -rf testfiles
|
||||||
|
check_tarfile "$2/sanity.tar"
|
||||||
|
check_logfile "testfiles/log"
|
||||||
|
generate
|
||||||
|
}
|
||||||
|
|
||||||
|
# functional
|
||||||
|
function summarize_f() {
|
||||||
|
headline="$1"
|
||||||
|
output "\n## $headline\n"
|
||||||
|
rm -rf testfiles
|
||||||
|
for i in $(seq 1 $FUNCTIONAL_PARTS); do
|
||||||
|
tarfile="$2/part$i.tar"
|
||||||
|
check_tarfile "$tarfile"
|
||||||
|
check_logfile "testfiles/log"
|
||||||
|
done
|
||||||
|
generate
|
||||||
|
}
|
||||||
|
|
||||||
|
# https://docs.github.com/en/enterprise-server@3.6/actions/using-workflows/workflow-commands-for-github-actions#step-isolation-and-limits
|
||||||
|
# Job summaries are isolated between steps and each step is restricted to a maximum size of 1MiB.
|
||||||
|
# [ ] can not show all error findings here
|
||||||
|
# [x] split files into smaller ones and create additional steps
|
||||||
|
|
||||||
|
ERRLOGS=0
|
||||||
|
if [ ! -f Summary/Summary.md ]; then
|
||||||
|
# first call, we do the default summary (~500k)
|
||||||
|
echo -n > Summary.md
|
||||||
|
summarize_s "Sanity Tests Ubuntu 20.04" Logs-20.04-sanity
|
||||||
|
summarize_s "Sanity Tests Ubuntu 22.04" Logs-22.04-sanity
|
||||||
|
summarize_f "Functional Tests Ubuntu 20.04" Logs-20.04-functional
|
||||||
|
summarize_f "Functional Tests Ubuntu 22.04" Logs-22.04-functional
|
||||||
|
|
||||||
|
cat Summary.md >> $GITHUB_STEP_SUMMARY
|
||||||
|
mkdir -p Summary
|
||||||
|
mv *.md Summary
|
||||||
|
else
|
||||||
|
# here we get, when errors where returned in first call
|
||||||
|
test -f Summary/err-$1.md && cat Summary/err-$1.md >> $GITHUB_STEP_SUMMARY
|
||||||
|
fi
|
||||||
|
|
||||||
|
exit 0
|
109
.github/workflows/scripts/merge_summary.awk
vendored
109
.github/workflows/scripts/merge_summary.awk
vendored
@ -1,109 +0,0 @@
|
|||||||
#!/bin/awk -f
|
|
||||||
#
|
|
||||||
# Merge multiple ZTS tests results summaries into a single summary. This is
|
|
||||||
# needed when you're running different parts of ZTS on different tests
|
|
||||||
# runners or VMs.
|
|
||||||
#
|
|
||||||
# Usage:
|
|
||||||
#
|
|
||||||
# ./merge_summary.awk summary1.txt [summary2.txt] [summary3.txt] ...
|
|
||||||
#
|
|
||||||
# or:
|
|
||||||
#
|
|
||||||
# cat summary*.txt | ./merge_summary.awk
|
|
||||||
#
|
|
||||||
BEGIN {
|
|
||||||
i=-1
|
|
||||||
pass=0
|
|
||||||
fail=0
|
|
||||||
skip=0
|
|
||||||
state=""
|
|
||||||
cl=0
|
|
||||||
el=0
|
|
||||||
upl=0
|
|
||||||
ul=0
|
|
||||||
|
|
||||||
# Total seconds of tests runtime
|
|
||||||
total=0;
|
|
||||||
}
|
|
||||||
|
|
||||||
# Skip empty lines
|
|
||||||
/^\s*$/{next}
|
|
||||||
|
|
||||||
# Skip Configuration and Test lines
|
|
||||||
/^Test:/{state=""; next}
|
|
||||||
/Configuration/{state="";next}
|
|
||||||
|
|
||||||
# When we see "test-runner.py" stop saving config lines, and
|
|
||||||
# save test runner lines
|
|
||||||
/test-runner.py/{state="testrunner"; runner=runner$0"\n"; next}
|
|
||||||
|
|
||||||
# We need to differentiate the PASS counts from test result lines that start
|
|
||||||
# with PASS, like:
|
|
||||||
#
|
|
||||||
# PASS mv_files/setup
|
|
||||||
#
|
|
||||||
# Use state="pass_count" to differentiate
|
|
||||||
#
|
|
||||||
/Results Summary/{state="pass_count"; next}
|
|
||||||
/PASS/{ if (state=="pass_count") {pass += $2}}
|
|
||||||
/FAIL/{ if (state=="pass_count") {fail += $2}}
|
|
||||||
/SKIP/{ if (state=="pass_count") {skip += $2}}
|
|
||||||
/Running Time/{
|
|
||||||
state="";
|
|
||||||
running[i]=$3;
|
|
||||||
split($3, arr, ":")
|
|
||||||
total += arr[1] * 60 * 60;
|
|
||||||
total += arr[2] * 60;
|
|
||||||
total += arr[3]
|
|
||||||
next;
|
|
||||||
}
|
|
||||||
|
|
||||||
/Tests with results other than PASS that are expected/{state="expected_lines"; next}
|
|
||||||
/Tests with result of PASS that are unexpected/{state="unexpected_pass_lines"; next}
|
|
||||||
/Tests with results other than PASS that are unexpected/{state="unexpected_lines"; next}
|
|
||||||
{
|
|
||||||
if (state == "expected_lines") {
|
|
||||||
expected_lines[el] = $0
|
|
||||||
el++
|
|
||||||
}
|
|
||||||
|
|
||||||
if (state == "unexpected_pass_lines") {
|
|
||||||
unexpected_pass_lines[upl] = $0
|
|
||||||
upl++
|
|
||||||
}
|
|
||||||
if (state == "unexpected_lines") {
|
|
||||||
unexpected_lines[ul] = $0
|
|
||||||
ul++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
# Reproduce summary
|
|
||||||
END {
|
|
||||||
print runner;
|
|
||||||
print "\nResults Summary"
|
|
||||||
print "PASS\t"pass
|
|
||||||
print "FAIL\t"fail
|
|
||||||
print "SKIP\t"skip
|
|
||||||
print ""
|
|
||||||
print "Running Time:\t"strftime("%T", total, 1)
|
|
||||||
if (pass+fail+skip > 0) {
|
|
||||||
percent_passed=(pass/(pass+fail+skip) * 100)
|
|
||||||
}
|
|
||||||
printf "Percent passed:\t%3.2f%", percent_passed
|
|
||||||
|
|
||||||
print "\n\nTests with results other than PASS that are expected:"
|
|
||||||
asort(expected_lines, sorted)
|
|
||||||
for (j in sorted)
|
|
||||||
print sorted[j]
|
|
||||||
|
|
||||||
print "\n\nTests with result of PASS that are unexpected:"
|
|
||||||
asort(unexpected_pass_lines, sorted)
|
|
||||||
for (j in sorted)
|
|
||||||
print sorted[j]
|
|
||||||
|
|
||||||
print "\n\nTests with results other than PASS that are unexpected:"
|
|
||||||
asort(unexpected_lines, sorted)
|
|
||||||
for (j in sorted)
|
|
||||||
print sorted[j]
|
|
||||||
}
|
|
93
.github/workflows/scripts/qemu-1-setup.sh
vendored
93
.github/workflows/scripts/qemu-1-setup.sh
vendored
@ -1,93 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
######################################################################
|
|
||||||
# 1) setup qemu instance on action runner
|
|
||||||
######################################################################
|
|
||||||
|
|
||||||
set -eu
|
|
||||||
|
|
||||||
# install needed packages
|
|
||||||
export DEBIAN_FRONTEND="noninteractive"
|
|
||||||
sudo apt-get -y update
|
|
||||||
sudo apt-get install -y axel cloud-image-utils daemonize guestfs-tools \
|
|
||||||
ksmtuned virt-manager linux-modules-extra-$(uname -r) zfsutils-linux
|
|
||||||
|
|
||||||
# generate ssh keys
|
|
||||||
rm -f ~/.ssh/id_ed25519
|
|
||||||
ssh-keygen -t ed25519 -f ~/.ssh/id_ed25519 -q -N ""
|
|
||||||
|
|
||||||
# we expect RAM shortage
|
|
||||||
cat << EOF | sudo tee /etc/ksmtuned.conf > /dev/null
|
|
||||||
# /etc/ksmtuned.conf - Configuration file for ksmtuned
|
|
||||||
# https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/7/html/virtualization_tuning_and_optimization_guide/chap-ksm
|
|
||||||
KSM_MONITOR_INTERVAL=60
|
|
||||||
|
|
||||||
# Millisecond sleep between ksm scans for 16Gb server.
|
|
||||||
# Smaller servers sleep more, bigger sleep less.
|
|
||||||
KSM_SLEEP_MSEC=30
|
|
||||||
|
|
||||||
KSM_NPAGES_BOOST=0
|
|
||||||
KSM_NPAGES_DECAY=0
|
|
||||||
KSM_NPAGES_MIN=1000
|
|
||||||
KSM_NPAGES_MAX=25000
|
|
||||||
|
|
||||||
KSM_THRES_COEF=80
|
|
||||||
KSM_THRES_CONST=8192
|
|
||||||
|
|
||||||
LOGFILE=/var/log/ksmtuned.log
|
|
||||||
DEBUG=1
|
|
||||||
EOF
|
|
||||||
sudo systemctl restart ksm
|
|
||||||
sudo systemctl restart ksmtuned
|
|
||||||
|
|
||||||
# not needed
|
|
||||||
sudo systemctl stop docker.socket
|
|
||||||
sudo systemctl stop multipathd.socket
|
|
||||||
|
|
||||||
# remove default swapfile and /mnt
|
|
||||||
sudo swapoff -a
|
|
||||||
sudo umount -l /mnt
|
|
||||||
DISK="/dev/disk/cloud/azure_resource-part1"
|
|
||||||
sudo sed -e "s|^$DISK.*||g" -i /etc/fstab
|
|
||||||
sudo wipefs -aq $DISK
|
|
||||||
sudo systemctl daemon-reload
|
|
||||||
|
|
||||||
sudo modprobe loop
|
|
||||||
sudo modprobe zfs
|
|
||||||
|
|
||||||
# partition the disk as needed
|
|
||||||
DISK="/dev/disk/cloud/azure_resource"
|
|
||||||
sudo sgdisk --zap-all $DISK
|
|
||||||
sudo sgdisk -p \
|
|
||||||
-n 1:0:+16G -c 1:"swap" \
|
|
||||||
-n 2:0:0 -c 2:"tests" \
|
|
||||||
$DISK
|
|
||||||
sync
|
|
||||||
sleep 1
|
|
||||||
|
|
||||||
# swap with same size as RAM
|
|
||||||
sudo mkswap $DISK-part1
|
|
||||||
sudo swapon $DISK-part1
|
|
||||||
|
|
||||||
# 60GB data disk
|
|
||||||
SSD1="$DISK-part2"
|
|
||||||
|
|
||||||
# 10GB data disk on ext4
|
|
||||||
sudo fallocate -l 10G /test.ssd1
|
|
||||||
SSD2=$(sudo losetup -b 4096 -f /test.ssd1 --show)
|
|
||||||
|
|
||||||
# adjust zfs module parameter and create pool
|
|
||||||
exec 1>/dev/null
|
|
||||||
ARC_MIN=$((1024*1024*256))
|
|
||||||
ARC_MAX=$((1024*1024*512))
|
|
||||||
echo $ARC_MIN | sudo tee /sys/module/zfs/parameters/zfs_arc_min
|
|
||||||
echo $ARC_MAX | sudo tee /sys/module/zfs/parameters/zfs_arc_max
|
|
||||||
echo 1 | sudo tee /sys/module/zfs/parameters/zvol_use_blk_mq
|
|
||||||
sudo zpool create -f -o ashift=12 zpool $SSD1 $SSD2 \
|
|
||||||
-O relatime=off -O atime=off -O xattr=sa -O compression=lz4 \
|
|
||||||
-O mountpoint=/mnt/tests
|
|
||||||
|
|
||||||
# no need for some scheduler
|
|
||||||
for i in /sys/block/s*/queue/scheduler; do
|
|
||||||
echo "none" | sudo tee $i > /dev/null
|
|
||||||
done
|
|
228
.github/workflows/scripts/qemu-2-start.sh
vendored
228
.github/workflows/scripts/qemu-2-start.sh
vendored
@ -1,228 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
######################################################################
|
|
||||||
# 2) start qemu with some operating system, init via cloud-init
|
|
||||||
######################################################################
|
|
||||||
|
|
||||||
set -eu
|
|
||||||
|
|
||||||
# short name used in zfs-qemu.yml
|
|
||||||
OS="$1"
|
|
||||||
|
|
||||||
# OS variant (virt-install --os-variant list)
|
|
||||||
OSv=$OS
|
|
||||||
|
|
||||||
# compressed with .zst extension
|
|
||||||
REPO="https://github.com/mcmilk/openzfs-freebsd-images"
|
|
||||||
FREEBSD="$REPO/releases/download/v2024-12-14"
|
|
||||||
URLzs=""
|
|
||||||
|
|
||||||
# Ubuntu mirrors
|
|
||||||
#UBMIRROR="https://cloud-images.ubuntu.com"
|
|
||||||
#UBMIRROR="https://mirrors.cloud.tencent.com/ubuntu-cloud-images"
|
|
||||||
UBMIRROR="https://mirror.citrahost.com/ubuntu-cloud-images"
|
|
||||||
|
|
||||||
# default nic model for vm's
|
|
||||||
NIC="virtio"
|
|
||||||
|
|
||||||
case "$OS" in
|
|
||||||
almalinux8)
|
|
||||||
OSNAME="AlmaLinux 8"
|
|
||||||
URL="https://repo.almalinux.org/almalinux/8/cloud/x86_64/images/AlmaLinux-8-GenericCloud-latest.x86_64.qcow2"
|
|
||||||
;;
|
|
||||||
almalinux9)
|
|
||||||
OSNAME="AlmaLinux 9"
|
|
||||||
URL="https://repo.almalinux.org/almalinux/9/cloud/x86_64/images/AlmaLinux-9-GenericCloud-latest.x86_64.qcow2"
|
|
||||||
;;
|
|
||||||
archlinux)
|
|
||||||
OSNAME="Archlinux"
|
|
||||||
URL="https://geo.mirror.pkgbuild.com/images/latest/Arch-Linux-x86_64-cloudimg.qcow2"
|
|
||||||
# dns sometimes fails with that url :/
|
|
||||||
echo "89.187.191.12 geo.mirror.pkgbuild.com" | sudo tee /etc/hosts > /dev/null
|
|
||||||
;;
|
|
||||||
centos-stream10)
|
|
||||||
OSNAME="CentOS Stream 10"
|
|
||||||
# TODO: #16903 Overwrite OSv to stream9 for virt-install until it's added to osinfo
|
|
||||||
OSv="centos-stream9"
|
|
||||||
URL="https://cloud.centos.org/centos/10-stream/x86_64/images/CentOS-Stream-GenericCloud-10-latest.x86_64.qcow2"
|
|
||||||
;;
|
|
||||||
centos-stream9)
|
|
||||||
OSNAME="CentOS Stream 9"
|
|
||||||
URL="https://cloud.centos.org/centos/9-stream/x86_64/images/CentOS-Stream-GenericCloud-9-latest.x86_64.qcow2"
|
|
||||||
;;
|
|
||||||
debian11)
|
|
||||||
OSNAME="Debian 11"
|
|
||||||
URL="https://cloud.debian.org/images/cloud/bullseye/latest/debian-11-generic-amd64.qcow2"
|
|
||||||
;;
|
|
||||||
debian12)
|
|
||||||
OSNAME="Debian 12"
|
|
||||||
URL="https://cloud.debian.org/images/cloud/bookworm/latest/debian-12-generic-amd64.qcow2"
|
|
||||||
;;
|
|
||||||
fedora40)
|
|
||||||
OSNAME="Fedora 40"
|
|
||||||
OSv="fedora-unknown"
|
|
||||||
URL="https://download.fedoraproject.org/pub/fedora/linux/releases/40/Cloud/x86_64/images/Fedora-Cloud-Base-Generic.x86_64-40-1.14.qcow2"
|
|
||||||
;;
|
|
||||||
fedora41)
|
|
||||||
OSNAME="Fedora 41"
|
|
||||||
OSv="fedora-unknown"
|
|
||||||
URL="https://download.fedoraproject.org/pub/fedora/linux/releases/41/Cloud/x86_64/images/Fedora-Cloud-Base-Generic-41-1.4.x86_64.qcow2"
|
|
||||||
;;
|
|
||||||
freebsd13-4r)
|
|
||||||
OSNAME="FreeBSD 13.4-RELEASE"
|
|
||||||
OSv="freebsd13.0"
|
|
||||||
URLzs="$FREEBSD/amd64-freebsd-13.4-RELEASE.qcow2.zst"
|
|
||||||
BASH="/usr/local/bin/bash"
|
|
||||||
NIC="rtl8139"
|
|
||||||
;;
|
|
||||||
freebsd14-2r)
|
|
||||||
OSNAME="FreeBSD 14.2-RELEASE"
|
|
||||||
OSv="freebsd14.0"
|
|
||||||
URLzs="$FREEBSD/amd64-freebsd-14.2-RELEASE.qcow2.zst"
|
|
||||||
BASH="/usr/local/bin/bash"
|
|
||||||
;;
|
|
||||||
freebsd13-4s)
|
|
||||||
OSNAME="FreeBSD 13.4-STABLE"
|
|
||||||
OSv="freebsd13.0"
|
|
||||||
URLzs="$FREEBSD/amd64-freebsd-13.4-STABLE.qcow2.zst"
|
|
||||||
BASH="/usr/local/bin/bash"
|
|
||||||
NIC="rtl8139"
|
|
||||||
;;
|
|
||||||
freebsd14-2s)
|
|
||||||
OSNAME="FreeBSD 14.2-STABLE"
|
|
||||||
OSv="freebsd14.0"
|
|
||||||
URLzs="$FREEBSD/amd64-freebsd-14.2-STABLE.qcow2.zst"
|
|
||||||
BASH="/usr/local/bin/bash"
|
|
||||||
;;
|
|
||||||
freebsd15-0c)
|
|
||||||
OSNAME="FreeBSD 15.0-CURRENT"
|
|
||||||
OSv="freebsd14.0"
|
|
||||||
URLzs="$FREEBSD/amd64-freebsd-15.0-CURRENT.qcow2.zst"
|
|
||||||
BASH="/usr/local/bin/bash"
|
|
||||||
;;
|
|
||||||
tumbleweed)
|
|
||||||
OSNAME="openSUSE Tumbleweed"
|
|
||||||
OSv="opensusetumbleweed"
|
|
||||||
MIRROR="http://opensuse-mirror-gce-us.susecloud.net"
|
|
||||||
URL="$MIRROR/tumbleweed/appliances/openSUSE-MicroOS.x86_64-OpenStack-Cloud.qcow2"
|
|
||||||
;;
|
|
||||||
ubuntu20)
|
|
||||||
OSNAME="Ubuntu 20.04"
|
|
||||||
OSv="ubuntu20.04"
|
|
||||||
URL="$UBMIRROR/focal/current/focal-server-cloudimg-amd64.img"
|
|
||||||
;;
|
|
||||||
ubuntu22)
|
|
||||||
OSNAME="Ubuntu 22.04"
|
|
||||||
OSv="ubuntu22.04"
|
|
||||||
URL="$UBMIRROR/jammy/current/jammy-server-cloudimg-amd64.img"
|
|
||||||
;;
|
|
||||||
ubuntu24)
|
|
||||||
OSNAME="Ubuntu 24.04"
|
|
||||||
OSv="ubuntu24.04"
|
|
||||||
URL="$UBMIRROR/noble/current/noble-server-cloudimg-amd64.img"
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
echo "Wrong value for OS variable!"
|
|
||||||
exit 111
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
|
|
||||||
# environment file
|
|
||||||
ENV="/var/tmp/env.txt"
|
|
||||||
echo "ENV=$ENV" >> $ENV
|
|
||||||
|
|
||||||
# result path
|
|
||||||
echo 'RESPATH="/var/tmp/test_results"' >> $ENV
|
|
||||||
|
|
||||||
# FreeBSD 13 has problems with: e1000+virtio
|
|
||||||
echo "NIC=$NIC" >> $ENV
|
|
||||||
|
|
||||||
# freebsd15 -> used in zfs-qemu.yml
|
|
||||||
echo "OS=$OS" >> $ENV
|
|
||||||
|
|
||||||
# freebsd14.0 -> used for virt-install
|
|
||||||
echo "OSv=\"$OSv\"" >> $ENV
|
|
||||||
|
|
||||||
# FreeBSD 15 (Current) -> used for summary
|
|
||||||
echo "OSNAME=\"$OSNAME\"" >> $ENV
|
|
||||||
|
|
||||||
sudo mkdir -p "/mnt/tests"
|
|
||||||
sudo chown -R $(whoami) /mnt/tests
|
|
||||||
|
|
||||||
# we are downloading via axel, curl and wget are mostly slower and
|
|
||||||
# require more return value checking
|
|
||||||
IMG="/mnt/tests/cloudimg.qcow2"
|
|
||||||
if [ ! -z "$URLzs" ]; then
|
|
||||||
echo "Loading image $URLzs ..."
|
|
||||||
time axel -q -o "$IMG.zst" "$URLzs"
|
|
||||||
zstd -q -d --rm "$IMG.zst"
|
|
||||||
else
|
|
||||||
echo "Loading image $URL ..."
|
|
||||||
time axel -q -o "$IMG" "$URL"
|
|
||||||
fi
|
|
||||||
|
|
||||||
DISK="/dev/zvol/zpool/openzfs"
|
|
||||||
FORMAT="raw"
|
|
||||||
sudo zfs create -ps -b 64k -V 80g zpool/openzfs
|
|
||||||
while true; do test -b $DISK && break; sleep 1; done
|
|
||||||
echo "Importing VM image to zvol..."
|
|
||||||
sudo qemu-img dd -f qcow2 -O raw if=$IMG of=$DISK bs=4M
|
|
||||||
rm -f $IMG
|
|
||||||
|
|
||||||
PUBKEY=$(cat ~/.ssh/id_ed25519.pub)
|
|
||||||
cat <<EOF > /tmp/user-data
|
|
||||||
#cloud-config
|
|
||||||
|
|
||||||
fqdn: $OS
|
|
||||||
|
|
||||||
users:
|
|
||||||
- name: root
|
|
||||||
shell: $BASH
|
|
||||||
- name: zfs
|
|
||||||
sudo: ALL=(ALL) NOPASSWD:ALL
|
|
||||||
shell: $BASH
|
|
||||||
ssh_authorized_keys:
|
|
||||||
- $PUBKEY
|
|
||||||
|
|
||||||
growpart:
|
|
||||||
mode: auto
|
|
||||||
devices: ['/']
|
|
||||||
ignore_growroot_disabled: false
|
|
||||||
EOF
|
|
||||||
|
|
||||||
sudo virsh net-update default add ip-dhcp-host \
|
|
||||||
"<host mac='52:54:00:83:79:00' ip='192.168.122.10'/>" --live --config
|
|
||||||
|
|
||||||
sudo virt-install \
|
|
||||||
--os-variant $OSv \
|
|
||||||
--name "openzfs" \
|
|
||||||
--cpu host-passthrough \
|
|
||||||
--virt-type=kvm --hvm \
|
|
||||||
--vcpus=4,sockets=1 \
|
|
||||||
--memory $((1024*12)) \
|
|
||||||
--memballoon model=virtio \
|
|
||||||
--graphics none \
|
|
||||||
--network bridge=virbr0,model=$NIC,mac='52:54:00:83:79:00' \
|
|
||||||
--cloud-init user-data=/tmp/user-data \
|
|
||||||
--disk $DISK,bus=virtio,cache=none,format=$FORMAT,driver.discard=unmap \
|
|
||||||
--import --noautoconsole >/dev/null
|
|
||||||
|
|
||||||
# Give the VMs hostnames so we don't have to refer to them with
|
|
||||||
# hardcoded IP addresses.
|
|
||||||
#
|
|
||||||
# vm0: Initial VM we install dependencies and build ZFS on.
|
|
||||||
# vm1..2 Testing VMs
|
|
||||||
for i in {0..9} ; do
|
|
||||||
echo "192.168.122.1$i vm$i" | sudo tee -a /etc/hosts
|
|
||||||
done
|
|
||||||
|
|
||||||
# in case the directory isn't there already
|
|
||||||
mkdir -p $HOME/.ssh
|
|
||||||
|
|
||||||
cat <<EOF >> $HOME/.ssh/config
|
|
||||||
# no questions please
|
|
||||||
StrictHostKeyChecking no
|
|
||||||
|
|
||||||
# small timeout, used in while loops later
|
|
||||||
ConnectTimeout 1
|
|
||||||
EOF
|
|
232
.github/workflows/scripts/qemu-3-deps-vm.sh
vendored
232
.github/workflows/scripts/qemu-3-deps-vm.sh
vendored
@ -1,232 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
######################################################################
|
|
||||||
# 3) install dependencies for compiling and loading
|
|
||||||
#
|
|
||||||
# $1: OS name (like 'fedora41')
|
|
||||||
######################################################################
|
|
||||||
|
|
||||||
set -eu
|
|
||||||
|
|
||||||
function archlinux() {
|
|
||||||
echo "##[group]Running pacman -Syu"
|
|
||||||
sudo btrfs filesystem resize max /
|
|
||||||
sudo pacman -Syu --noconfirm
|
|
||||||
echo "##[endgroup]"
|
|
||||||
|
|
||||||
echo "##[group]Install Development Tools"
|
|
||||||
sudo pacman -Sy --noconfirm base-devel bc cpio cryptsetup dhclient dkms \
|
|
||||||
fakeroot fio gdb inetutils jq less linux linux-headers lsscsi nfs-utils \
|
|
||||||
parted pax perf python-packaging python-setuptools qemu-guest-agent ksh \
|
|
||||||
samba sysstat rng-tools rsync wget xxhash
|
|
||||||
echo "##[endgroup]"
|
|
||||||
}
|
|
||||||
|
|
||||||
function debian() {
|
|
||||||
export DEBIAN_FRONTEND="noninteractive"
|
|
||||||
|
|
||||||
echo "##[group]Running apt-get update+upgrade"
|
|
||||||
sudo apt-get update -y
|
|
||||||
sudo apt-get upgrade -y
|
|
||||||
echo "##[endgroup]"
|
|
||||||
|
|
||||||
echo "##[group]Install Development Tools"
|
|
||||||
sudo apt-get install -y \
|
|
||||||
acl alien attr autoconf bc cpio cryptsetup curl dbench dh-python dkms \
|
|
||||||
fakeroot fio gdb gdebi git ksh lcov isc-dhcp-client jq libacl1-dev \
|
|
||||||
libaio-dev libattr1-dev libblkid-dev libcurl4-openssl-dev libdevmapper-dev \
|
|
||||||
libelf-dev libffi-dev libmount-dev libpam0g-dev libselinux-dev libssl-dev \
|
|
||||||
libtool libtool-bin libudev-dev libunwind-dev linux-headers-$(uname -r) \
|
|
||||||
lsscsi nfs-kernel-server pamtester parted python3 python3-all-dev \
|
|
||||||
python3-cffi python3-dev python3-distlib python3-packaging \
|
|
||||||
python3-setuptools python3-sphinx qemu-guest-agent rng-tools rpm2cpio \
|
|
||||||
rsync samba sysstat uuid-dev watchdog wget xfslibs-dev xxhash zlib1g-dev
|
|
||||||
echo "##[endgroup]"
|
|
||||||
}
|
|
||||||
|
|
||||||
function freebsd() {
|
|
||||||
export ASSUME_ALWAYS_YES="YES"
|
|
||||||
|
|
||||||
echo "##[group]Install Development Tools"
|
|
||||||
sudo pkg install -y autoconf automake autotools base64 checkbashisms fio \
|
|
||||||
gdb gettext gettext-runtime git gmake gsed jq ksh93 lcov libtool lscpu \
|
|
||||||
pkgconf python python3 pamtester pamtester qemu-guest-agent rsync xxhash
|
|
||||||
sudo pkg install -xy \
|
|
||||||
'^samba4[[:digit:]]+$' \
|
|
||||||
'^py3[[:digit:]]+-cffi$' \
|
|
||||||
'^py3[[:digit:]]+-sysctl$' \
|
|
||||||
'^py3[[:digit:]]+-packaging$'
|
|
||||||
echo "##[endgroup]"
|
|
||||||
}
|
|
||||||
|
|
||||||
# common packages for: almalinux, centos, redhat
|
|
||||||
function rhel() {
|
|
||||||
echo "##[group]Running dnf update"
|
|
||||||
echo "max_parallel_downloads=10" | sudo -E tee -a /etc/dnf/dnf.conf
|
|
||||||
sudo dnf clean all
|
|
||||||
sudo dnf update -y --setopt=fastestmirror=1 --refresh
|
|
||||||
echo "##[endgroup]"
|
|
||||||
|
|
||||||
echo "##[group]Install Development Tools"
|
|
||||||
|
|
||||||
# Alma wants "Development Tools", Fedora 41 wants "development-tools"
|
|
||||||
if ! sudo dnf group install -y "Development Tools" ; then
|
|
||||||
echo "Trying 'development-tools' instead of 'Development Tools'"
|
|
||||||
sudo dnf group install -y development-tools
|
|
||||||
fi
|
|
||||||
|
|
||||||
sudo dnf install -y \
|
|
||||||
acl attr bc bzip2 cryptsetup curl dbench dkms elfutils-libelf-devel fio \
|
|
||||||
gdb git jq kernel-rpm-macros ksh libacl-devel libaio-devel \
|
|
||||||
libargon2-devel libattr-devel libblkid-devel libcurl-devel libffi-devel \
|
|
||||||
ncompress libselinux-devel libtirpc-devel libtool libudev-devel \
|
|
||||||
libuuid-devel lsscsi mdadm nfs-utils openssl-devel pam-devel pamtester \
|
|
||||||
parted perf python3 python3-cffi python3-devel python3-packaging \
|
|
||||||
kernel-devel python3-setuptools qemu-guest-agent rng-tools rpcgen \
|
|
||||||
rpm-build rsync samba sysstat systemd watchdog wget xfsprogs-devel xxhash \
|
|
||||||
zlib-devel
|
|
||||||
echo "##[endgroup]"
|
|
||||||
}
|
|
||||||
|
|
||||||
function tumbleweed() {
|
|
||||||
echo "##[group]Running zypper is TODO!"
|
|
||||||
sleep 23456
|
|
||||||
echo "##[endgroup]"
|
|
||||||
}
|
|
||||||
|
|
||||||
# Install dependencies
|
|
||||||
case "$1" in
|
|
||||||
almalinux8)
|
|
||||||
echo "##[group]Enable epel and powertools repositories"
|
|
||||||
sudo dnf config-manager -y --set-enabled powertools
|
|
||||||
sudo dnf install -y epel-release
|
|
||||||
echo "##[endgroup]"
|
|
||||||
rhel
|
|
||||||
echo "##[group]Install kernel-abi-whitelists"
|
|
||||||
sudo dnf install -y kernel-abi-whitelists
|
|
||||||
echo "##[endgroup]"
|
|
||||||
;;
|
|
||||||
almalinux9|centos-stream9|centos-stream10)
|
|
||||||
echo "##[group]Enable epel and crb repositories"
|
|
||||||
sudo dnf config-manager -y --set-enabled crb
|
|
||||||
sudo dnf install -y epel-release
|
|
||||||
echo "##[endgroup]"
|
|
||||||
rhel
|
|
||||||
echo "##[group]Install kernel-abi-stablelists"
|
|
||||||
sudo dnf install -y kernel-abi-stablelists
|
|
||||||
echo "##[endgroup]"
|
|
||||||
;;
|
|
||||||
archlinux)
|
|
||||||
archlinux
|
|
||||||
;;
|
|
||||||
debian*)
|
|
||||||
echo 'debconf debconf/frontend select Noninteractive' | sudo debconf-set-selections
|
|
||||||
debian
|
|
||||||
echo "##[group]Install Debian specific"
|
|
||||||
sudo apt-get install -yq linux-perf dh-sequence-dkms
|
|
||||||
echo "##[endgroup]"
|
|
||||||
;;
|
|
||||||
fedora*)
|
|
||||||
rhel
|
|
||||||
sudo dnf install -y libunwind-devel
|
|
||||||
;;
|
|
||||||
freebsd*)
|
|
||||||
freebsd
|
|
||||||
;;
|
|
||||||
tumbleweed)
|
|
||||||
tumbleweed
|
|
||||||
;;
|
|
||||||
ubuntu*)
|
|
||||||
debian
|
|
||||||
echo "##[group]Install Ubuntu specific"
|
|
||||||
sudo apt-get install -yq linux-tools-common libtirpc-dev \
|
|
||||||
linux-modules-extra-$(uname -r)
|
|
||||||
if [ "$1" != "ubuntu20" ]; then
|
|
||||||
sudo apt-get install -yq dh-sequence-dkms
|
|
||||||
fi
|
|
||||||
echo "##[endgroup]"
|
|
||||||
echo "##[group]Delete Ubuntu OpenZFS modules"
|
|
||||||
for i in $(find /lib/modules -name zfs -type d); do sudo rm -rvf $i; done
|
|
||||||
echo "##[endgroup]"
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
|
|
||||||
# This script is used for checkstyle + zloop deps also.
|
|
||||||
# Install only the needed packages and exit - when used this way.
|
|
||||||
test -z "${ONLY_DEPS:-}" || exit 0
|
|
||||||
|
|
||||||
# Start services
|
|
||||||
echo "##[group]Enable services"
|
|
||||||
case "$1" in
|
|
||||||
freebsd*)
|
|
||||||
# add virtio things
|
|
||||||
echo 'virtio_load="YES"' | sudo -E tee -a /boot/loader.conf
|
|
||||||
for i in balloon blk console random scsi; do
|
|
||||||
echo "virtio_${i}_load=\"YES\"" | sudo -E tee -a /boot/loader.conf
|
|
||||||
done
|
|
||||||
echo "fdescfs /dev/fd fdescfs rw 0 0" | sudo -E tee -a /etc/fstab
|
|
||||||
sudo -E mount /dev/fd
|
|
||||||
sudo -E touch /etc/zfs/exports
|
|
||||||
sudo -E sysrc mountd_flags="/etc/zfs/exports"
|
|
||||||
echo '[global]' | sudo -E tee /usr/local/etc/smb4.conf >/dev/null
|
|
||||||
sudo -E service nfsd enable
|
|
||||||
sudo -E service qemu-guest-agent enable
|
|
||||||
sudo -E service samba_server enable
|
|
||||||
;;
|
|
||||||
debian*|ubuntu*)
|
|
||||||
sudo -E systemctl enable nfs-kernel-server
|
|
||||||
sudo -E systemctl enable qemu-guest-agent
|
|
||||||
sudo -E systemctl enable smbd
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
# All other linux distros
|
|
||||||
sudo -E systemctl enable nfs-server
|
|
||||||
sudo -E systemctl enable qemu-guest-agent
|
|
||||||
sudo -E systemctl enable smb
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
echo "##[endgroup]"
|
|
||||||
|
|
||||||
# Setup Kernel cmdline
|
|
||||||
CMDLINE="console=tty0 console=ttyS0,115200n8"
|
|
||||||
CMDLINE="$CMDLINE selinux=0"
|
|
||||||
CMDLINE="$CMDLINE random.trust_cpu=on"
|
|
||||||
CMDLINE="$CMDLINE no_timer_check"
|
|
||||||
case "$1" in
|
|
||||||
almalinux*|centos*|fedora*)
|
|
||||||
GRUB_CFG="/boot/grub2/grub.cfg"
|
|
||||||
GRUB_MKCONFIG="grub2-mkconfig"
|
|
||||||
CMDLINE="$CMDLINE biosdevname=0 net.ifnames=0"
|
|
||||||
echo 'GRUB_SERIAL_COMMAND="serial --speed=115200"' \
|
|
||||||
| sudo tee -a /etc/default/grub >/dev/null
|
|
||||||
;;
|
|
||||||
ubuntu24)
|
|
||||||
GRUB_CFG="/boot/grub/grub.cfg"
|
|
||||||
GRUB_MKCONFIG="grub-mkconfig"
|
|
||||||
echo 'GRUB_DISABLE_OS_PROBER="false"' \
|
|
||||||
| sudo tee -a /etc/default/grub >/dev/null
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
GRUB_CFG="/boot/grub/grub.cfg"
|
|
||||||
GRUB_MKCONFIG="grub-mkconfig"
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
|
|
||||||
case "$1" in
|
|
||||||
archlinux|freebsd*)
|
|
||||||
true
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
echo "##[group]Edit kernel cmdline"
|
|
||||||
sudo sed -i -e '/^GRUB_CMDLINE_LINUX/d' /etc/default/grub || true
|
|
||||||
echo "GRUB_CMDLINE_LINUX=\"$CMDLINE\"" \
|
|
||||||
| sudo tee -a /etc/default/grub >/dev/null
|
|
||||||
sudo $GRUB_MKCONFIG -o $GRUB_CFG
|
|
||||||
echo "##[endgroup]"
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
|
|
||||||
# reset cloud-init configuration and poweroff
|
|
||||||
sudo cloud-init clean --logs
|
|
||||||
sleep 2 && sudo poweroff &
|
|
||||||
exit 0
|
|
15
.github/workflows/scripts/qemu-3-deps.sh
vendored
15
.github/workflows/scripts/qemu-3-deps.sh
vendored
@ -1,15 +0,0 @@
|
|||||||
######################################################################
|
|
||||||
# 3) Wait for VM to boot from previous step and launch dependencies
|
|
||||||
# script on it.
|
|
||||||
#
|
|
||||||
# $1: OS name (like 'fedora41')
|
|
||||||
######################################################################
|
|
||||||
|
|
||||||
.github/workflows/scripts/qemu-wait-for-vm.sh vm0
|
|
||||||
scp .github/workflows/scripts/qemu-3-deps-vm.sh zfs@vm0:qemu-3-deps-vm.sh
|
|
||||||
PID=`pidof /usr/bin/qemu-system-x86_64`
|
|
||||||
ssh zfs@vm0 '$HOME/qemu-3-deps-vm.sh' $1
|
|
||||||
# wait for poweroff to succeed
|
|
||||||
tail --pid=$PID -f /dev/null
|
|
||||||
sleep 5 # avoid this: "error: Domain is already active"
|
|
||||||
rm -f $HOME/.ssh/known_hosts
|
|
379
.github/workflows/scripts/qemu-4-build-vm.sh
vendored
379
.github/workflows/scripts/qemu-4-build-vm.sh
vendored
@ -1,379 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
######################################################################
|
|
||||||
# 4) configure and build openzfs modules. This is run on the VMs.
|
|
||||||
#
|
|
||||||
# Usage:
|
|
||||||
#
|
|
||||||
# qemu-4-build-vm.sh OS [--enable-debug][--dkms][--poweroff]
|
|
||||||
# [--release][--repo][--tarball]
|
|
||||||
#
|
|
||||||
# OS: OS name like 'fedora41'
|
|
||||||
# --enable-debug: Build RPMs with '--enable-debug' (for testing)
|
|
||||||
# --dkms: Build DKMS RPMs as well
|
|
||||||
# --poweroff: Power-off the VM after building
|
|
||||||
# --release Build zfs-release*.rpm as well
|
|
||||||
# --repo After building everything, copy RPMs into /tmp/repo
|
|
||||||
# in the ZFS RPM repository file structure. Also
|
|
||||||
# copy tarballs if they were built.
|
|
||||||
# --tarball: Also build a tarball of ZFS source
|
|
||||||
######################################################################
|
|
||||||
|
|
||||||
ENABLE_DEBUG=""
|
|
||||||
DKMS=""
|
|
||||||
POWEROFF=""
|
|
||||||
RELEASE=""
|
|
||||||
REPO=""
|
|
||||||
TARBALL=""
|
|
||||||
while [[ $# -gt 0 ]]; do
|
|
||||||
case $1 in
|
|
||||||
--enable-debug)
|
|
||||||
ENABLE_DEBUG=1
|
|
||||||
shift
|
|
||||||
;;
|
|
||||||
--dkms)
|
|
||||||
DKMS=1
|
|
||||||
shift
|
|
||||||
;;
|
|
||||||
--poweroff)
|
|
||||||
POWEROFF=1
|
|
||||||
shift
|
|
||||||
;;
|
|
||||||
--release)
|
|
||||||
RELEASE=1
|
|
||||||
shift
|
|
||||||
;;
|
|
||||||
--repo)
|
|
||||||
REPO=1
|
|
||||||
shift
|
|
||||||
;;
|
|
||||||
--tarball)
|
|
||||||
TARBALL=1
|
|
||||||
shift
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
OS=$1
|
|
||||||
shift
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
done
|
|
||||||
|
|
||||||
set -eu
|
|
||||||
|
|
||||||
function run() {
|
|
||||||
LOG="/var/tmp/build-stderr.txt"
|
|
||||||
echo "****************************************************"
|
|
||||||
echo "$(date) ($*)"
|
|
||||||
echo "****************************************************"
|
|
||||||
($@ || echo $? > /tmp/rv) 3>&1 1>&2 2>&3 | stdbuf -eL -oL tee -a $LOG
|
|
||||||
if [ -f /tmp/rv ]; then
|
|
||||||
RV=$(cat /tmp/rv)
|
|
||||||
echo "****************************************************"
|
|
||||||
echo "exit with value=$RV ($*)"
|
|
||||||
echo "****************************************************"
|
|
||||||
echo 1 > /var/tmp/build-exitcode.txt
|
|
||||||
exit $RV
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
# Look at the RPMs in the current directory and copy/move them to
|
|
||||||
# /tmp/repo, using the directory structure we use for the ZFS RPM repos.
|
|
||||||
#
|
|
||||||
# For example:
|
|
||||||
# /tmp/repo/epel-testing/9.5
|
|
||||||
# /tmp/repo/epel-testing/9.5/SRPMS
|
|
||||||
# /tmp/repo/epel-testing/9.5/SRPMS/zfs-2.3.99-1.el9.src.rpm
|
|
||||||
# /tmp/repo/epel-testing/9.5/SRPMS/zfs-kmod-2.3.99-1.el9.src.rpm
|
|
||||||
# /tmp/repo/epel-testing/9.5/kmod
|
|
||||||
# /tmp/repo/epel-testing/9.5/kmod/x86_64
|
|
||||||
# /tmp/repo/epel-testing/9.5/kmod/x86_64/debug
|
|
||||||
# /tmp/repo/epel-testing/9.5/kmod/x86_64/debug/kmod-zfs-debuginfo-2.3.99-1.el9.x86_64.rpm
|
|
||||||
# /tmp/repo/epel-testing/9.5/kmod/x86_64/debug/libnvpair3-debuginfo-2.3.99-1.el9.x86_64.rpm
|
|
||||||
# /tmp/repo/epel-testing/9.5/kmod/x86_64/debug/libuutil3-debuginfo-2.3.99-1.el9.x86_64.rpm
|
|
||||||
# ...
|
|
||||||
function copy_rpms_to_repo {
|
|
||||||
# Pick a RPM to query. It doesn't matter which one - we just want to extract
|
|
||||||
# the 'Build Host' value from it.
|
|
||||||
rpm=$(ls zfs-*.rpm | head -n 1)
|
|
||||||
|
|
||||||
# Get zfs version '2.2.99'
|
|
||||||
zfs_ver=$(rpm -qpi $rpm | awk '/Version/{print $3}')
|
|
||||||
|
|
||||||
# Get "2.1" or "2.2"
|
|
||||||
zfs_major=$(echo $zfs_ver | grep -Eo [0-9]+\.[0-9]+)
|
|
||||||
|
|
||||||
# Get 'almalinux9.5' or 'fedora41' type string
|
|
||||||
build_host=$(rpm -qpi $rpm | awk '/Build Host/{print $4}')
|
|
||||||
|
|
||||||
# Get '9.5' or '41' OS version
|
|
||||||
os_ver=$(echo $build_host | grep -Eo '[0-9\.]+$')
|
|
||||||
|
|
||||||
# Our ZFS version and OS name will determine which repo the RPMs
|
|
||||||
# will go in (regular or testing). Fedora always gets the newest
|
|
||||||
# releases, and Alma gets the older releases.
|
|
||||||
case $build_host in
|
|
||||||
almalinux*)
|
|
||||||
case $zfs_major in
|
|
||||||
2.2)
|
|
||||||
d="epel"
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
d="epel-testing"
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
;;
|
|
||||||
fedora*)
|
|
||||||
d="fedora"
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
|
|
||||||
prefix=/tmp/repo
|
|
||||||
dst="$prefix/$d/$os_ver"
|
|
||||||
|
|
||||||
# Special case: move zfs-release*.rpm out of the way first (if we built them).
|
|
||||||
# This will make filtering the other RPMs easier.
|
|
||||||
mkdir -p $dst
|
|
||||||
mv zfs-release*.rpm $dst || true
|
|
||||||
|
|
||||||
# Copy source RPMs
|
|
||||||
mkdir -p $dst/SRPMS
|
|
||||||
cp $(ls *.src.rpm) $dst/SRPMS/
|
|
||||||
|
|
||||||
if [[ "$build_host" =~ "almalinux" ]] ; then
|
|
||||||
# Copy kmods+userspace
|
|
||||||
mkdir -p $dst/kmod/x86_64/debug
|
|
||||||
cp $(ls *.rpm | grep -Ev 'src.rpm|dkms|debuginfo') $dst/kmod/x86_64
|
|
||||||
cp *debuginfo*.rpm $dst/kmod/x86_64/debug
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -n "$DKMS" ] ; then
|
|
||||||
# Copy dkms+userspace
|
|
||||||
mkdir -p $dst/x86_64
|
|
||||||
cp $(ls *.rpm | grep -Ev 'src.rpm|kmod|debuginfo') $dst/x86_64
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Copy debug
|
|
||||||
mkdir -p $dst/x86_64/debug
|
|
||||||
cp $(ls *debuginfo*.rpm | grep -v kmod) $dst/x86_64/debug
|
|
||||||
}
|
|
||||||
|
|
||||||
function freebsd() {
|
|
||||||
extra="${1:-}"
|
|
||||||
|
|
||||||
export MAKE="gmake"
|
|
||||||
echo "##[group]Autogen.sh"
|
|
||||||
run ./autogen.sh
|
|
||||||
echo "##[endgroup]"
|
|
||||||
|
|
||||||
echo "##[group]Configure"
|
|
||||||
run ./configure \
|
|
||||||
--prefix=/usr/local \
|
|
||||||
--with-libintl-prefix=/usr/local \
|
|
||||||
--enable-pyzfs \
|
|
||||||
--enable-debuginfo $extra
|
|
||||||
echo "##[endgroup]"
|
|
||||||
|
|
||||||
echo "##[group]Build"
|
|
||||||
run gmake -j$(sysctl -n hw.ncpu)
|
|
||||||
echo "##[endgroup]"
|
|
||||||
|
|
||||||
echo "##[group]Install"
|
|
||||||
run sudo gmake install
|
|
||||||
echo "##[endgroup]"
|
|
||||||
}
|
|
||||||
|
|
||||||
function linux() {
|
|
||||||
extra="${1:-}"
|
|
||||||
|
|
||||||
echo "##[group]Autogen.sh"
|
|
||||||
run ./autogen.sh
|
|
||||||
echo "##[endgroup]"
|
|
||||||
|
|
||||||
echo "##[group]Configure"
|
|
||||||
run ./configure \
|
|
||||||
--prefix=/usr \
|
|
||||||
--enable-pyzfs \
|
|
||||||
--enable-debuginfo $extra
|
|
||||||
echo "##[endgroup]"
|
|
||||||
|
|
||||||
echo "##[group]Build"
|
|
||||||
run make -j$(nproc)
|
|
||||||
echo "##[endgroup]"
|
|
||||||
|
|
||||||
echo "##[group]Install"
|
|
||||||
run sudo make install
|
|
||||||
echo "##[endgroup]"
|
|
||||||
}
|
|
||||||
|
|
||||||
function rpm_build_and_install() {
|
|
||||||
extra="${1:-}"
|
|
||||||
|
|
||||||
# Build RPMs with XZ compression by default (since gzip decompression is slow)
|
|
||||||
echo "%_binary_payload w7.xzdio" >> ~/.rpmmacros
|
|
||||||
|
|
||||||
echo "##[group]Autogen.sh"
|
|
||||||
run ./autogen.sh
|
|
||||||
echo "##[endgroup]"
|
|
||||||
|
|
||||||
echo "##[group]Configure"
|
|
||||||
run ./configure --enable-debuginfo $extra
|
|
||||||
echo "##[endgroup]"
|
|
||||||
|
|
||||||
echo "##[group]Build"
|
|
||||||
run make pkg-kmod pkg-utils
|
|
||||||
echo "##[endgroup]"
|
|
||||||
|
|
||||||
if [ -n "$DKMS" ] ; then
|
|
||||||
echo "##[group]DKMS"
|
|
||||||
make rpm-dkms
|
|
||||||
echo "##[endgroup]"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -n "$REPO" ] ; then
|
|
||||||
echo "Skipping install since we're only building RPMs and nothing else"
|
|
||||||
else
|
|
||||||
echo "##[group]Install"
|
|
||||||
run sudo dnf -y --nobest install $(ls *.rpm | grep -Ev 'dkms|src.rpm')
|
|
||||||
echo "##[endgroup]"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Optionally build the zfs-release.*.rpm
|
|
||||||
if [ -n "$RELEASE" ] ; then
|
|
||||||
echo "##[group]Release"
|
|
||||||
pushd ~
|
|
||||||
sudo dnf -y install rpm-build || true
|
|
||||||
# Check out a sparse copy of zfsonlinux.github.com.git so we don't get
|
|
||||||
# all the binaries. We just need a few kilobytes of files to build RPMs.
|
|
||||||
git clone --depth 1 --no-checkout \
|
|
||||||
https://github.com/zfsonlinux/zfsonlinux.github.com.git
|
|
||||||
|
|
||||||
cd zfsonlinux.github.com
|
|
||||||
git sparse-checkout set zfs-release
|
|
||||||
git checkout
|
|
||||||
cd zfs-release
|
|
||||||
|
|
||||||
mkdir -p ~/rpmbuild/{BUILDROOT,SPECS,RPMS,SRPMS,SOURCES,BUILD}
|
|
||||||
cp RPM-GPG-KEY-openzfs* *.repo ~/rpmbuild/SOURCES
|
|
||||||
cp zfs-release.spec ~/rpmbuild/SPECS/
|
|
||||||
rpmbuild -ba ~/rpmbuild/SPECS/zfs-release.spec
|
|
||||||
|
|
||||||
# ZFS release RPMs are built. Copy them to the ~/zfs directory just to
|
|
||||||
# keep all the RPMs in the same place.
|
|
||||||
cp ~/rpmbuild/RPMS/noarch/*.rpm ~/zfs
|
|
||||||
cp ~/rpmbuild/SRPMS/*.rpm ~/zfs
|
|
||||||
|
|
||||||
popd
|
|
||||||
rm -fr ~/rpmbuild
|
|
||||||
echo "##[endgroup]"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -n "$REPO" ] ; then
|
|
||||||
echo "##[group]Repo"
|
|
||||||
copy_rpms_to_repo
|
|
||||||
echo "##[endgroup]"
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
function deb_build_and_install() {
|
|
||||||
extra="${1:-}"
|
|
||||||
|
|
||||||
echo "##[group]Autogen.sh"
|
|
||||||
run ./autogen.sh
|
|
||||||
echo "##[endgroup]"
|
|
||||||
|
|
||||||
echo "##[group]Configure"
|
|
||||||
run ./configure \
|
|
||||||
--prefix=/usr \
|
|
||||||
--enable-pyzfs \
|
|
||||||
--enable-debuginfo $extra
|
|
||||||
echo "##[endgroup]"
|
|
||||||
|
|
||||||
echo "##[group]Build"
|
|
||||||
run make native-deb-kmod native-deb-utils
|
|
||||||
echo "##[endgroup]"
|
|
||||||
|
|
||||||
echo "##[group]Install"
|
|
||||||
# Do kmod install. Note that when you build the native debs, the
|
|
||||||
# packages themselves are placed in parent directory '../' rather than
|
|
||||||
# in the source directory like the rpms are.
|
|
||||||
run sudo apt-get -y install $(find ../ | grep -E '\.deb$' \
|
|
||||||
| grep -Ev 'dkms|dracut')
|
|
||||||
echo "##[endgroup]"
|
|
||||||
}
|
|
||||||
|
|
||||||
function build_tarball {
|
|
||||||
if [ -n "$REPO" ] ; then
|
|
||||||
./autogen.sh
|
|
||||||
./configure --with-config=srpm
|
|
||||||
make dist
|
|
||||||
mkdir -p /tmp/repo/releases
|
|
||||||
# The tarball name is based off of 'Version' field in the META file.
|
|
||||||
mv *.tar.gz /tmp/repo/releases/
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
# Debug: show kernel cmdline
|
|
||||||
if [ -f /proc/cmdline ] ; then
|
|
||||||
cat /proc/cmdline || true
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Set our hostname to our OS name and version number. Specifically, we set the
|
|
||||||
# major and minor number so that when we query the Build Host field in the RPMs
|
|
||||||
# we build, we can see what specific version of Fedora/Almalinux we were using
|
|
||||||
# to build them. This is helpful for matching up KMOD versions.
|
|
||||||
#
|
|
||||||
# Examples:
|
|
||||||
#
|
|
||||||
# rhel8.10
|
|
||||||
# almalinux9.5
|
|
||||||
# fedora40
|
|
||||||
source /etc/os-release
|
|
||||||
sudo hostname "$ID$VERSION_ID"
|
|
||||||
|
|
||||||
# save some sysinfo
|
|
||||||
uname -a > /var/tmp/uname.txt
|
|
||||||
|
|
||||||
cd $HOME/zfs
|
|
||||||
export PATH="$PATH:/sbin:/usr/sbin:/usr/local/sbin"
|
|
||||||
|
|
||||||
extra=""
|
|
||||||
if [ -n "$ENABLE_DEBUG" ] ; then
|
|
||||||
extra="--enable-debug"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# build
|
|
||||||
case "$OS" in
|
|
||||||
freebsd*)
|
|
||||||
freebsd "$extra"
|
|
||||||
;;
|
|
||||||
alma*|centos*)
|
|
||||||
rpm_build_and_install "--with-spec=redhat $extra"
|
|
||||||
;;
|
|
||||||
fedora*)
|
|
||||||
rpm_build_and_install "$extra"
|
|
||||||
|
|
||||||
# Historically, we've always built the release tarballs on Fedora, since
|
|
||||||
# there was one instance long ago where we built them on CentOS 7, and they
|
|
||||||
# didn't work correctly for everyone.
|
|
||||||
if [ -n "$TARBALL" ] ; then
|
|
||||||
build_tarball
|
|
||||||
fi
|
|
||||||
;;
|
|
||||||
debian*|ubuntu*)
|
|
||||||
deb_build_and_install "$extra"
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
linux "$extra"
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
|
|
||||||
|
|
||||||
# building the zfs module was ok
|
|
||||||
echo 0 > /var/tmp/build-exitcode.txt
|
|
||||||
|
|
||||||
# reset cloud-init configuration and poweroff
|
|
||||||
if [ -n "$POWEROFF" ] ; then
|
|
||||||
sudo cloud-init clean --logs
|
|
||||||
sync && sleep 2 && sudo poweroff &
|
|
||||||
fi
|
|
||||||
exit 0
|
|
11
.github/workflows/scripts/qemu-4-build.sh
vendored
11
.github/workflows/scripts/qemu-4-build.sh
vendored
@ -1,11 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
######################################################################
|
|
||||||
# 4) configure and build openzfs modules
|
|
||||||
######################################################################
|
|
||||||
echo "Build modules in QEMU machine"
|
|
||||||
|
|
||||||
# Bring our VM back up and copy over ZFS source
|
|
||||||
.github/workflows/scripts/qemu-prepare-for-build.sh
|
|
||||||
|
|
||||||
ssh zfs@vm0 '$HOME/zfs/.github/workflows/scripts/qemu-4-build-vm.sh' $@
|
|
124
.github/workflows/scripts/qemu-5-setup.sh
vendored
124
.github/workflows/scripts/qemu-5-setup.sh
vendored
@ -1,124 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
######################################################################
|
|
||||||
# 5) start test machines and load openzfs module
|
|
||||||
######################################################################
|
|
||||||
|
|
||||||
set -eu
|
|
||||||
|
|
||||||
# read our defined variables
|
|
||||||
source /var/tmp/env.txt
|
|
||||||
|
|
||||||
# wait for poweroff to succeed
|
|
||||||
PID=$(pidof /usr/bin/qemu-system-x86_64)
|
|
||||||
tail --pid=$PID -f /dev/null
|
|
||||||
sudo virsh undefine openzfs
|
|
||||||
|
|
||||||
# default values per test vm:
|
|
||||||
VMs=2
|
|
||||||
CPU=2
|
|
||||||
|
|
||||||
# cpu pinning
|
|
||||||
CPUSET=("0,1" "2,3")
|
|
||||||
|
|
||||||
case "$OS" in
|
|
||||||
freebsd*)
|
|
||||||
# FreeBSD can't be optimized via ksmtuned
|
|
||||||
RAM=6
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
# Linux can be optimized via ksmtuned
|
|
||||||
RAM=8
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
|
|
||||||
# this can be different for each distro
|
|
||||||
echo "VMs=$VMs" >> $ENV
|
|
||||||
|
|
||||||
# create snapshot we can clone later
|
|
||||||
sudo zfs snapshot zpool/openzfs@now
|
|
||||||
|
|
||||||
# setup the testing vm's
|
|
||||||
PUBKEY=$(cat ~/.ssh/id_ed25519.pub)
|
|
||||||
for i in $(seq 1 $VMs); do
|
|
||||||
|
|
||||||
echo "Creating disk for vm$i..."
|
|
||||||
DISK="/dev/zvol/zpool/vm$i"
|
|
||||||
FORMAT="raw"
|
|
||||||
sudo zfs clone zpool/openzfs@now zpool/vm$i
|
|
||||||
sudo zfs create -ps -b 64k -V 80g zpool/vm$i-2
|
|
||||||
|
|
||||||
cat <<EOF > /tmp/user-data
|
|
||||||
#cloud-config
|
|
||||||
|
|
||||||
fqdn: vm$i
|
|
||||||
|
|
||||||
users:
|
|
||||||
- name: root
|
|
||||||
shell: $BASH
|
|
||||||
- name: zfs
|
|
||||||
sudo: ALL=(ALL) NOPASSWD:ALL
|
|
||||||
shell: $BASH
|
|
||||||
ssh_authorized_keys:
|
|
||||||
- $PUBKEY
|
|
||||||
|
|
||||||
growpart:
|
|
||||||
mode: auto
|
|
||||||
devices: ['/']
|
|
||||||
ignore_growroot_disabled: false
|
|
||||||
EOF
|
|
||||||
|
|
||||||
sudo virsh net-update default add ip-dhcp-host \
|
|
||||||
"<host mac='52:54:00:83:79:0$i' ip='192.168.122.1$i'/>" --live --config
|
|
||||||
|
|
||||||
sudo virt-install \
|
|
||||||
--os-variant $OSv \
|
|
||||||
--name "vm$i" \
|
|
||||||
--cpu host-passthrough \
|
|
||||||
--virt-type=kvm --hvm \
|
|
||||||
--vcpus=$CPU,sockets=1 \
|
|
||||||
--cpuset=${CPUSET[$((i-1))]} \
|
|
||||||
--memory $((1024*RAM)) \
|
|
||||||
--memballoon model=virtio \
|
|
||||||
--graphics none \
|
|
||||||
--cloud-init user-data=/tmp/user-data \
|
|
||||||
--network bridge=virbr0,model=$NIC,mac="52:54:00:83:79:0$i" \
|
|
||||||
--disk $DISK,bus=virtio,cache=none,format=$FORMAT,driver.discard=unmap \
|
|
||||||
--disk $DISK-2,bus=virtio,cache=none,format=$FORMAT,driver.discard=unmap \
|
|
||||||
--import --noautoconsole >/dev/null
|
|
||||||
done
|
|
||||||
|
|
||||||
# check the memory state from time to time
|
|
||||||
cat <<EOF > cronjob.sh
|
|
||||||
# $OS
|
|
||||||
exec 1>>/var/tmp/stats.txt
|
|
||||||
exec 2>&1
|
|
||||||
echo "*******************************************************"
|
|
||||||
date
|
|
||||||
uptime
|
|
||||||
free -m
|
|
||||||
df -h /mnt/tests
|
|
||||||
zfs list
|
|
||||||
EOF
|
|
||||||
sudo chmod +x cronjob.sh
|
|
||||||
sudo mv -f cronjob.sh /root/cronjob.sh
|
|
||||||
echo '*/5 * * * * /root/cronjob.sh' > crontab.txt
|
|
||||||
sudo crontab crontab.txt
|
|
||||||
rm crontab.txt
|
|
||||||
|
|
||||||
# check if the machines are okay
|
|
||||||
echo "Waiting for vm's to come up... (${VMs}x CPU=$CPU RAM=$RAM)"
|
|
||||||
for i in $(seq 1 $VMs); do
|
|
||||||
.github/workflows/scripts/qemu-wait-for-vm.sh vm$i
|
|
||||||
done
|
|
||||||
echo "All $VMs VMs are up now."
|
|
||||||
|
|
||||||
# Save the VM's serial output (ttyS0) to /var/tmp/console.txt
|
|
||||||
# - ttyS0 on the VM corresponds to a local /dev/pty/N entry
|
|
||||||
# - use 'virsh ttyconsole' to lookup the /dev/pty/N entry
|
|
||||||
for i in $(seq 1 $VMs); do
|
|
||||||
mkdir -p $RESPATH/vm$i
|
|
||||||
read "pty" <<< $(sudo virsh ttyconsole vm$i)
|
|
||||||
sudo nohup bash -c "cat $pty > $RESPATH/vm$i/console.txt" &
|
|
||||||
done
|
|
||||||
echo "Console logging for ${VMs}x $OS started."
|
|
105
.github/workflows/scripts/qemu-6-tests.sh
vendored
105
.github/workflows/scripts/qemu-6-tests.sh
vendored
@ -1,105 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
######################################################################
|
|
||||||
# 6) load openzfs module and run the tests
|
|
||||||
#
|
|
||||||
# called on runner: qemu-6-tests.sh
|
|
||||||
# called on qemu-vm: qemu-6-tests.sh $OS $2/$3
|
|
||||||
######################################################################
|
|
||||||
|
|
||||||
set -eu
|
|
||||||
|
|
||||||
function prefix() {
|
|
||||||
ID="$1"
|
|
||||||
LINE="$2"
|
|
||||||
CURRENT=$(date +%s)
|
|
||||||
TSSTART=$(cat /tmp/tsstart)
|
|
||||||
DIFF=$((CURRENT-TSSTART))
|
|
||||||
H=$((DIFF/3600))
|
|
||||||
DIFF=$((DIFF-(H*3600)))
|
|
||||||
M=$((DIFF/60))
|
|
||||||
S=$((DIFF-(M*60)))
|
|
||||||
|
|
||||||
CTR=$(cat /tmp/ctr)
|
|
||||||
echo $LINE| grep -q "^Test[: ]" && CTR=$((CTR+1)) && echo $CTR > /tmp/ctr
|
|
||||||
|
|
||||||
BASE="$HOME/work/zfs/zfs"
|
|
||||||
COLOR="$BASE/scripts/zfs-tests-color.sh"
|
|
||||||
CLINE=$(echo $LINE| grep "^Test[ :]" | sed -e 's|/usr/local|/usr|g' \
|
|
||||||
| sed -e 's| /usr/share/zfs/zfs-tests/tests/| |g' | $COLOR)
|
|
||||||
if [ -z "$CLINE" ]; then
|
|
||||||
printf "vm${ID}: %s\n" "$LINE"
|
|
||||||
else
|
|
||||||
# [vm2: 00:15:54 256] Test: functional/checksum/setup (run as root) [00:00] [PASS]
|
|
||||||
printf "[vm${ID}: %02d:%02d:%02d %4d] %s\n" \
|
|
||||||
"$H" "$M" "$S" "$CTR" "$CLINE"
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
# called directly on the runner
|
|
||||||
if [ -z ${1:-} ]; then
|
|
||||||
cd "/var/tmp"
|
|
||||||
source env.txt
|
|
||||||
SSH=$(which ssh)
|
|
||||||
TESTS='$HOME/zfs/.github/workflows/scripts/qemu-6-tests.sh'
|
|
||||||
echo 0 > /tmp/ctr
|
|
||||||
date "+%s" > /tmp/tsstart
|
|
||||||
|
|
||||||
for i in $(seq 1 $VMs); do
|
|
||||||
IP="192.168.122.1$i"
|
|
||||||
daemonize -c /var/tmp -p vm${i}.pid -o vm${i}log.txt -- \
|
|
||||||
$SSH zfs@$IP $TESTS $OS $i $VMs $CI_TYPE
|
|
||||||
# handly line by line and add info prefix
|
|
||||||
stdbuf -oL tail -fq vm${i}log.txt \
|
|
||||||
| while read -r line; do prefix "$i" "$line"; done &
|
|
||||||
echo $! > vm${i}log.pid
|
|
||||||
# don't mix up the initial --- Configuration --- part
|
|
||||||
sleep 0.13
|
|
||||||
done
|
|
||||||
|
|
||||||
# wait for all vm's to finish
|
|
||||||
for i in $(seq 1 $VMs); do
|
|
||||||
tail --pid=$(cat vm${i}.pid) -f /dev/null
|
|
||||||
pid=$(cat vm${i}log.pid)
|
|
||||||
rm -f vm${i}log.pid
|
|
||||||
kill $pid
|
|
||||||
done
|
|
||||||
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
# this part runs inside qemu vm
|
|
||||||
export PATH="$PATH:/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/sbin:/usr/local/bin"
|
|
||||||
case "$1" in
|
|
||||||
freebsd*)
|
|
||||||
sudo kldstat -n zfs 2>/dev/null && sudo kldunload zfs
|
|
||||||
sudo -E ./zfs/scripts/zfs.sh
|
|
||||||
TDIR="/usr/local/share/zfs"
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
# use xfs @ /var/tmp for all distros
|
|
||||||
sudo mv -f /var/tmp/*.txt /tmp
|
|
||||||
sudo mkfs.xfs -fq /dev/vdb
|
|
||||||
sudo mount -o noatime /dev/vdb /var/tmp
|
|
||||||
sudo chmod 1777 /var/tmp
|
|
||||||
sudo mv -f /tmp/*.txt /var/tmp
|
|
||||||
sudo -E modprobe zfs
|
|
||||||
TDIR="/usr/share/zfs"
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
|
|
||||||
# run functional testings and save exitcode
|
|
||||||
cd /var/tmp
|
|
||||||
TAGS=$2/$3
|
|
||||||
if [ "$4" == "quick" ]; then
|
|
||||||
export RUNFILES="sanity.run"
|
|
||||||
fi
|
|
||||||
sudo dmesg -c > dmesg-prerun.txt
|
|
||||||
mount > mount.txt
|
|
||||||
df -h > df-prerun.txt
|
|
||||||
$TDIR/zfs-tests.sh -vK -s 3GB -T $TAGS
|
|
||||||
RV=$?
|
|
||||||
df -h > df-postrun.txt
|
|
||||||
echo $RV > tests-exitcode.txt
|
|
||||||
sync
|
|
||||||
exit 0
|
|
124
.github/workflows/scripts/qemu-7-prepare.sh
vendored
124
.github/workflows/scripts/qemu-7-prepare.sh
vendored
@ -1,124 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
######################################################################
|
|
||||||
# 7) prepare output of the results
|
|
||||||
# - this script pre-creates all needed logfiles for later summary
|
|
||||||
######################################################################
|
|
||||||
|
|
||||||
set -eu
|
|
||||||
|
|
||||||
# read our defined variables
|
|
||||||
cd /var/tmp
|
|
||||||
source env.txt
|
|
||||||
|
|
||||||
mkdir -p $RESPATH
|
|
||||||
|
|
||||||
# check if building the module has failed
|
|
||||||
if [ -z ${VMs:-} ]; then
|
|
||||||
cd $RESPATH
|
|
||||||
echo ":exclamation: ZFS module didn't build successfully :exclamation:" \
|
|
||||||
| tee summary.txt | tee /tmp/summary.txt
|
|
||||||
cp /var/tmp/*.txt .
|
|
||||||
tar cf /tmp/qemu-$OS.tar -C $RESPATH -h . || true
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
# build was okay
|
|
||||||
BASE="$HOME/work/zfs/zfs"
|
|
||||||
MERGE="$BASE/.github/workflows/scripts/merge_summary.awk"
|
|
||||||
|
|
||||||
# catch result files of testings (vm's should be there)
|
|
||||||
for i in $(seq 1 $VMs); do
|
|
||||||
rsync -arL zfs@vm$i:$RESPATH/current $RESPATH/vm$i || true
|
|
||||||
scp zfs@vm$i:"/var/tmp/*.txt" $RESPATH/vm$i || true
|
|
||||||
scp zfs@vm$i:"/var/tmp/*.rpm" $RESPATH/vm$i || true
|
|
||||||
done
|
|
||||||
cp -f /var/tmp/*.txt $RESPATH || true
|
|
||||||
cd $RESPATH
|
|
||||||
|
|
||||||
# prepare result files for summary
|
|
||||||
for i in $(seq 1 $VMs); do
|
|
||||||
file="vm$i/build-stderr.txt"
|
|
||||||
test -s $file && mv -f $file build-stderr.txt
|
|
||||||
|
|
||||||
file="vm$i/build-exitcode.txt"
|
|
||||||
test -s $file && mv -f $file build-exitcode.txt
|
|
||||||
|
|
||||||
file="vm$i/uname.txt"
|
|
||||||
test -s $file && mv -f $file uname.txt
|
|
||||||
|
|
||||||
file="vm$i/tests-exitcode.txt"
|
|
||||||
if [ ! -s $file ]; then
|
|
||||||
# XXX - add some tests for kernel panic's here
|
|
||||||
# tail -n 80 vm$i/console.txt | grep XYZ
|
|
||||||
echo 1 > $file
|
|
||||||
fi
|
|
||||||
rv=$(cat vm$i/tests-exitcode.txt)
|
|
||||||
test $rv != 0 && touch /tmp/have_failed_tests
|
|
||||||
|
|
||||||
file="vm$i/current/log"
|
|
||||||
if [ -s $file ]; then
|
|
||||||
cat $file >> log
|
|
||||||
awk '/\[FAIL\]|\[KILLED\]/{ show=1; print; next; }; \
|
|
||||||
/\[SKIP\]|\[PASS\]/{ show=0; } show' \
|
|
||||||
$file > /tmp/vm${i}dbg.txt
|
|
||||||
fi
|
|
||||||
|
|
||||||
file="vm${i}log.txt"
|
|
||||||
fileC="/tmp/vm${i}log.txt"
|
|
||||||
if [ -s $file ]; then
|
|
||||||
cat $file >> summary
|
|
||||||
cat $file | $BASE/scripts/zfs-tests-color.sh > $fileC
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
# create summary of tests
|
|
||||||
if [ -s summary ]; then
|
|
||||||
$MERGE summary | grep -v '^/' > summary.txt
|
|
||||||
$MERGE summary | $BASE/scripts/zfs-tests-color.sh > /tmp/summary.txt
|
|
||||||
rm -f summary
|
|
||||||
else
|
|
||||||
touch summary.txt /tmp/summary.txt
|
|
||||||
fi
|
|
||||||
|
|
||||||
# create file for debugging
|
|
||||||
if [ -s log ]; then
|
|
||||||
awk '/\[FAIL\]|\[KILLED\]/{ show=1; print; next; }; \
|
|
||||||
/\[SKIP\]|\[PASS\]/{ show=0; } show' \
|
|
||||||
log > summary-failure-logs.txt
|
|
||||||
rm -f log
|
|
||||||
else
|
|
||||||
touch summary-failure-logs.txt
|
|
||||||
fi
|
|
||||||
|
|
||||||
# create debug overview for failed tests
|
|
||||||
cat summary.txt \
|
|
||||||
| awk '/\(expected PASS\)/{ if ($1!="SKIP") print $2; next; } show' \
|
|
||||||
| while read t; do
|
|
||||||
cat summary-failure-logs.txt \
|
|
||||||
| awk '$0~/Test[: ]/{ show=0; } $0~v{ show=1; } show' v="$t" \
|
|
||||||
> /tmp/fail.txt
|
|
||||||
SIZE=$(stat --printf="%s" /tmp/fail.txt)
|
|
||||||
SIZE=$((SIZE/1024))
|
|
||||||
# Test Summary:
|
|
||||||
echo "##[group]$t ($SIZE KiB)" >> /tmp/failed.txt
|
|
||||||
cat /tmp/fail.txt | $BASE/scripts/zfs-tests-color.sh >> /tmp/failed.txt
|
|
||||||
echo "##[endgroup]" >> /tmp/failed.txt
|
|
||||||
# Job Summary:
|
|
||||||
echo -e "\n<details>\n<summary>$t ($SIZE KiB)</summary><pre>" >> failed.txt
|
|
||||||
cat /tmp/fail.txt >> failed.txt
|
|
||||||
echo "</pre></details>" >> failed.txt
|
|
||||||
done
|
|
||||||
|
|
||||||
if [ -e /tmp/have_failed_tests ]; then
|
|
||||||
echo ":warning: Some tests failed!" >> failed.txt
|
|
||||||
else
|
|
||||||
echo ":thumbsup: All tests passed." >> failed.txt
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ ! -s uname.txt ]; then
|
|
||||||
echo ":interrobang: Panic - where is my uname.txt?" > uname.txt
|
|
||||||
fi
|
|
||||||
|
|
||||||
# artifact ready now
|
|
||||||
tar cf /tmp/qemu-$OS.tar -C $RESPATH -h . || true
|
|
71
.github/workflows/scripts/qemu-8-summary.sh
vendored
71
.github/workflows/scripts/qemu-8-summary.sh
vendored
@ -1,71 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
######################################################################
|
|
||||||
# 8) show colored output of results
|
|
||||||
######################################################################
|
|
||||||
|
|
||||||
set -eu
|
|
||||||
|
|
||||||
# read our defined variables
|
|
||||||
source /var/tmp/env.txt
|
|
||||||
cd $RESPATH
|
|
||||||
|
|
||||||
# helper function for showing some content with headline
|
|
||||||
function showfile() {
|
|
||||||
content=$(dd if=$1 bs=1024 count=400k 2>/dev/null)
|
|
||||||
if [ -z "$2" ]; then
|
|
||||||
group1=""
|
|
||||||
group2=""
|
|
||||||
else
|
|
||||||
SIZE=$(stat --printf="%s" "$file")
|
|
||||||
SIZE=$((SIZE/1024))
|
|
||||||
group1="##[group]$2 ($SIZE KiB)"
|
|
||||||
group2="##[endgroup]"
|
|
||||||
fi
|
|
||||||
cat <<EOF > tmp$$
|
|
||||||
$group1
|
|
||||||
$content
|
|
||||||
$group2
|
|
||||||
EOF
|
|
||||||
cat tmp$$
|
|
||||||
rm -f tmp$$
|
|
||||||
}
|
|
||||||
|
|
||||||
# overview
|
|
||||||
cat /tmp/summary.txt
|
|
||||||
echo ""
|
|
||||||
|
|
||||||
if [ -f /tmp/have_failed_tests -a -s /tmp/failed.txt ]; then
|
|
||||||
echo "Debuginfo of failed tests:"
|
|
||||||
cat /tmp/failed.txt
|
|
||||||
echo ""
|
|
||||||
cat /tmp/summary.txt | grep -v '^/'
|
|
||||||
echo ""
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo -e "\nFull logs for download:\n $1\n"
|
|
||||||
|
|
||||||
for i in $(seq 1 $VMs); do
|
|
||||||
rv=$(cat vm$i/tests-exitcode.txt)
|
|
||||||
|
|
||||||
if [ $rv = 0 ]; then
|
|
||||||
vm="[92mvm$i[0m"
|
|
||||||
else
|
|
||||||
vm="[1;91mvm$i[0m"
|
|
||||||
fi
|
|
||||||
|
|
||||||
file="vm$i/dmesg-prerun.txt"
|
|
||||||
test -s "$file" && showfile "$file" "$vm: dmesg kernel"
|
|
||||||
|
|
||||||
file="/tmp/vm${i}log.txt"
|
|
||||||
test -s "$file" && showfile "$file" "$vm: test results"
|
|
||||||
|
|
||||||
file="vm$i/console.txt"
|
|
||||||
test -s "$file" && showfile "$file" "$vm: serial console"
|
|
||||||
|
|
||||||
file="/tmp/vm${i}dbg.txt"
|
|
||||||
test -s "$file" && showfile "$file" "$vm: failure logfile"
|
|
||||||
done
|
|
||||||
|
|
||||||
test -f /tmp/have_failed_tests && exit 1
|
|
||||||
exit 0
|
|
57
.github/workflows/scripts/qemu-9-summary-page.sh
vendored
57
.github/workflows/scripts/qemu-9-summary-page.sh
vendored
@ -1,57 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
######################################################################
|
|
||||||
# 9) generate github summary page of all the testings
|
|
||||||
######################################################################
|
|
||||||
|
|
||||||
set -eu
|
|
||||||
|
|
||||||
function output() {
|
|
||||||
echo -e $* >> "out-$logfile.md"
|
|
||||||
}
|
|
||||||
|
|
||||||
function outfile() {
|
|
||||||
cat "$1" >> "out-$logfile.md"
|
|
||||||
}
|
|
||||||
|
|
||||||
function outfile_plain() {
|
|
||||||
output "<pre>"
|
|
||||||
cat "$1" >> "out-$logfile.md"
|
|
||||||
output "</pre>"
|
|
||||||
}
|
|
||||||
|
|
||||||
function send2github() {
|
|
||||||
test -f "$1" || exit 0
|
|
||||||
dd if="$1" bs=1023k count=1 >> $GITHUB_STEP_SUMMARY
|
|
||||||
}
|
|
||||||
|
|
||||||
# https://docs.github.com/en/enterprise-server@3.6/actions/using-workflows/workflow-commands-for-github-actions#step-isolation-and-limits
|
|
||||||
# Job summaries are isolated between steps and each step is restricted to a maximum size of 1MiB.
|
|
||||||
# [ ] can not show all error findings here
|
|
||||||
# [x] split files into smaller ones and create additional steps
|
|
||||||
|
|
||||||
# first call, generate all summaries
|
|
||||||
if [ ! -f out-1.md ]; then
|
|
||||||
logfile="1"
|
|
||||||
for tarfile in Logs-functional-*/qemu-*.tar; do
|
|
||||||
rm -rf vm* *.txt
|
|
||||||
if [ ! -s "$tarfile" ]; then
|
|
||||||
output "\n## Functional Tests: unknown\n"
|
|
||||||
output ":exclamation: Tarfile $tarfile is empty :exclamation:"
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
tar xf "$tarfile"
|
|
||||||
test -s env.txt || continue
|
|
||||||
source env.txt
|
|
||||||
# when uname.txt is there, the other files are also ok
|
|
||||||
test -s uname.txt || continue
|
|
||||||
output "\n## Functional Tests: $OSNAME\n"
|
|
||||||
outfile_plain uname.txt
|
|
||||||
outfile_plain summary.txt
|
|
||||||
outfile failed.txt
|
|
||||||
logfile=$((logfile+1))
|
|
||||||
done
|
|
||||||
send2github out-1.md
|
|
||||||
else
|
|
||||||
send2github out-$1.md
|
|
||||||
fi
|
|
@ -1,8 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
# Helper script to run after installing dependencies. This brings the VM back
|
|
||||||
# up and copies over the zfs source directory.
|
|
||||||
echo "Build modules in QEMU machine"
|
|
||||||
sudo virsh start openzfs
|
|
||||||
.github/workflows/scripts/qemu-wait-for-vm.sh vm0
|
|
||||||
rsync -ar $HOME/work/zfs/zfs zfs@vm0:./
|
|
90
.github/workflows/scripts/qemu-test-repo-vm.sh
vendored
90
.github/workflows/scripts/qemu-test-repo-vm.sh
vendored
@ -1,90 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
#
|
|
||||||
# Do a test install of ZFS from an external repository.
|
|
||||||
#
|
|
||||||
# USAGE:
|
|
||||||
#
|
|
||||||
# ./qemu-test-repo-vm [URL]
|
|
||||||
#
|
|
||||||
# URL: URL to use instead of http://download.zfsonlinux.org
|
|
||||||
# If blank, use the default repo from zfs-release RPM.
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
source /etc/os-release
|
|
||||||
OS="$ID"
|
|
||||||
VERSION="$VERSION_ID"
|
|
||||||
|
|
||||||
ALTHOST=""
|
|
||||||
if [ -n "$1" ] ; then
|
|
||||||
ALTHOST="$1"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Write summary to /tmp/repo so our artifacts scripts pick it up
|
|
||||||
mkdir /tmp/repo
|
|
||||||
SUMMARY=/tmp/repo/$OS-$VERSION-summary.txt
|
|
||||||
|
|
||||||
# $1: Repo 'zfs' 'zfs-kmod' 'zfs-testing' 'zfs-testing-kmod'
|
|
||||||
# $2: (optional) Alternate host than 'http://download.zfsonlinux.org' to
|
|
||||||
# install from. Blank means use default from zfs-release RPM.
|
|
||||||
function test_install {
|
|
||||||
repo=$1
|
|
||||||
host=""
|
|
||||||
if [ -n "$2" ] ; then
|
|
||||||
host=$2
|
|
||||||
fi
|
|
||||||
|
|
||||||
args="--disablerepo=zfs --enablerepo=$repo"
|
|
||||||
|
|
||||||
# If we supplied an alternate repo URL, and have not already edited
|
|
||||||
# zfs.repo, then update the repo file.
|
|
||||||
if [ -n "$host" ] && ! grep -q $host /etc/yum.repos.d/zfs.repo ; then
|
|
||||||
sudo sed -i "s;baseurl=http://download.zfsonlinux.org;baseurl=$host;g" /etc/yum.repos.d/zfs.repo
|
|
||||||
fi
|
|
||||||
|
|
||||||
sudo dnf -y install $args zfs zfs-test
|
|
||||||
|
|
||||||
# Load modules and create a simple pool as a sanity test.
|
|
||||||
sudo /usr/share/zfs/zfs.sh -r
|
|
||||||
truncate -s 100M /tmp/file
|
|
||||||
sudo zpool create tank /tmp/file
|
|
||||||
sudo zpool status
|
|
||||||
|
|
||||||
# Print out repo name, rpm installed (kmod or dkms), and repo URL
|
|
||||||
baseurl=$(grep -A 5 "\[$repo\]" /etc/yum.repos.d/zfs.repo | awk -F'=' '/baseurl=/{print $2; exit}')
|
|
||||||
package=$(sudo rpm -qa | grep zfs | grep -E 'kmod|dkms')
|
|
||||||
|
|
||||||
echo "$repo $package $baseurl" >> $SUMMARY
|
|
||||||
|
|
||||||
sudo zpool destroy tank
|
|
||||||
sudo rm /tmp/file
|
|
||||||
sudo dnf -y remove zfs
|
|
||||||
}
|
|
||||||
|
|
||||||
echo "##[group]Installing from repo"
|
|
||||||
# The openzfs docs are the authoritative instructions for the install. Use
|
|
||||||
# the specific version of zfs-release RPM it recommends.
|
|
||||||
case $OS in
|
|
||||||
almalinux*)
|
|
||||||
url='https://raw.githubusercontent.com/openzfs/openzfs-docs/refs/heads/master/docs/Getting%20Started/RHEL-based%20distro/index.rst'
|
|
||||||
name=$(curl -Ls $url | grep 'dnf install' | grep -Eo 'zfs-release-[0-9]+-[0-9]+')
|
|
||||||
sudo dnf -y install https://zfsonlinux.org/epel/$name$(rpm --eval "%{dist}").noarch.rpm 2>&1
|
|
||||||
sudo rpm -qi zfs-release
|
|
||||||
test_install zfs $ALTHOST
|
|
||||||
test_install zfs-kmod $ALTHOST
|
|
||||||
test_install zfs-testing $ALTHOST
|
|
||||||
test_install zfs-testing-kmod $ALTHOST
|
|
||||||
;;
|
|
||||||
fedora*)
|
|
||||||
url='https://raw.githubusercontent.com/openzfs/openzfs-docs/refs/heads/master/docs/Getting%20Started/Fedora/index.rst'
|
|
||||||
name=$(curl -Ls $url | grep 'dnf install' | grep -Eo 'zfs-release-[0-9]+-[0-9]+')
|
|
||||||
sudo dnf -y install -y https://zfsonlinux.org/fedora/$name$(rpm --eval "%{dist}").noarch.rpm
|
|
||||||
test_install zfs $ALTHOST
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
echo "##[endgroup]"
|
|
||||||
|
|
||||||
# Write out a simple version of the summary here. Later on we will collate all
|
|
||||||
# the summaries and put them into a nice table in the workflow Summary page.
|
|
||||||
echo "Summary: "
|
|
||||||
cat $SUMMARY
|
|
10
.github/workflows/scripts/qemu-wait-for-vm.sh
vendored
10
.github/workflows/scripts/qemu-wait-for-vm.sh
vendored
@ -1,10 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
#
|
|
||||||
# Wait for a VM to boot up and become active. This is used in a number of our
|
|
||||||
# scripts.
|
|
||||||
#
|
|
||||||
# $1: VM hostname or IP address
|
|
||||||
|
|
||||||
while pidof /usr/bin/qemu-system-x86_64 >/dev/null; do
|
|
||||||
ssh 2>/dev/null zfs@$1 "uname -a" && break
|
|
||||||
done
|
|
@ -1,32 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
#
|
|
||||||
# Recursively go though a directory structure and replace duplicate files with
|
|
||||||
# symlinks. This cuts down our RPM repo size by ~25%.
|
|
||||||
#
|
|
||||||
# replace-dupes-with-symlinks.sh [DIR]
|
|
||||||
#
|
|
||||||
# DIR: Directory to traverse. Defaults to current directory if not specified.
|
|
||||||
#
|
|
||||||
|
|
||||||
src="$1"
|
|
||||||
if [ -z "$src" ] ; then
|
|
||||||
src="."
|
|
||||||
fi
|
|
||||||
|
|
||||||
declare -A db
|
|
||||||
|
|
||||||
pushd "$src"
|
|
||||||
while read line ; do
|
|
||||||
bn="$(basename $line)"
|
|
||||||
if [ -z "${db[$bn]}" ] ; then
|
|
||||||
# First time this file has been seen
|
|
||||||
db[$bn]="$line"
|
|
||||||
else
|
|
||||||
if diff -b "$line" "${db[$bn]}" &>/dev/null ; then
|
|
||||||
# Files are the same, make a symlink
|
|
||||||
rm "$line"
|
|
||||||
ln -sr "${db[$bn]}" "$line"
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
done <<< "$(find . -type f)"
|
|
||||||
popd
|
|
93
.github/workflows/scripts/setup-dependencies.sh
vendored
Executable file
93
.github/workflows/scripts/setup-dependencies.sh
vendored
Executable file
@ -0,0 +1,93 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
set -eu
|
||||||
|
|
||||||
|
function prerun() {
|
||||||
|
echo "::group::Install build dependencies"
|
||||||
|
# remove snap things, update+upgrade will be faster then
|
||||||
|
for x in lxd core20 snapd; do sudo snap remove $x; done
|
||||||
|
sudo apt-get purge snapd google-chrome-stable firefox
|
||||||
|
# https://github.com/orgs/community/discussions/47863
|
||||||
|
sudo apt-get remove grub-efi-amd64-bin grub-efi-amd64-signed shim-signed --allow-remove-essential
|
||||||
|
sudo apt-get update
|
||||||
|
sudo apt upgrade
|
||||||
|
sudo xargs --arg-file=.github/workflows/build-dependencies.txt apt-get install -qq
|
||||||
|
sudo apt-get clean
|
||||||
|
sudo dmesg -c > /var/tmp/dmesg-prerun
|
||||||
|
echo "::endgroup::"
|
||||||
|
}
|
||||||
|
|
||||||
|
function mod_build() {
|
||||||
|
echo "::group::Generate debian packages"
|
||||||
|
./autogen.sh
|
||||||
|
./configure --enable-debug --enable-debuginfo --enable-asan --enable-ubsan
|
||||||
|
make --no-print-directory --silent native-deb-utils native-deb-kmod
|
||||||
|
mv ../*.deb .
|
||||||
|
rm ./openzfs-zfs-dracut*.deb ./openzfs-zfs-dkms*.deb
|
||||||
|
echo "$ImageOS-$ImageVersion" > tests/ImageOS.txt
|
||||||
|
echo "::endgroup::"
|
||||||
|
}
|
||||||
|
|
||||||
|
function mod_install() {
|
||||||
|
# install the pre-built module only on the same runner image
|
||||||
|
MOD=`cat tests/ImageOS.txt`
|
||||||
|
if [ "$MOD" != "$ImageOS-$ImageVersion" ]; then
|
||||||
|
rm -f *.deb
|
||||||
|
mod_build
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "::group::Install and load modules"
|
||||||
|
# don't use kernel-shipped zfs modules
|
||||||
|
sudo sed -i.bak 's/updates/extra updates/' /etc/depmod.d/ubuntu.conf
|
||||||
|
sudo apt-get install --fix-missing ./*.deb
|
||||||
|
|
||||||
|
# Native Debian packages enable and start the services
|
||||||
|
# Stop zfs-zed daemon, as it may interfere with some ZTS test cases
|
||||||
|
sudo systemctl stop zfs-zed
|
||||||
|
sudo depmod -a
|
||||||
|
sudo modprobe zfs
|
||||||
|
sudo dmesg
|
||||||
|
sudo dmesg -c > /var/tmp/dmesg-module-load
|
||||||
|
echo "::endgroup::"
|
||||||
|
|
||||||
|
echo "::group::Report CPU information"
|
||||||
|
lscpu
|
||||||
|
cat /proc/spl/kstat/zfs/chksum_bench
|
||||||
|
echo "::endgroup::"
|
||||||
|
|
||||||
|
echo "::group::Reclaim and report disk space"
|
||||||
|
# remove 4GiB of images
|
||||||
|
sudo systemd-run docker system prune --force --all --volumes
|
||||||
|
|
||||||
|
# remove unused software
|
||||||
|
sudo systemd-run --wait rm -rf \
|
||||||
|
"$AGENT_TOOLSDIRECTORY" \
|
||||||
|
/opt/* \
|
||||||
|
/usr/local/* \
|
||||||
|
/usr/share/az* \
|
||||||
|
/usr/share/dotnet \
|
||||||
|
/usr/share/gradle* \
|
||||||
|
/usr/share/miniconda \
|
||||||
|
/usr/share/swift \
|
||||||
|
/var/lib/gems \
|
||||||
|
/var/lib/mysql \
|
||||||
|
/var/lib/snapd
|
||||||
|
|
||||||
|
# trim the cleaned space
|
||||||
|
sudo fstrim /
|
||||||
|
|
||||||
|
# disk usage afterwards
|
||||||
|
df -h /
|
||||||
|
echo "::endgroup::"
|
||||||
|
}
|
||||||
|
|
||||||
|
case "$1" in
|
||||||
|
build)
|
||||||
|
prerun
|
||||||
|
mod_build
|
||||||
|
;;
|
||||||
|
tests)
|
||||||
|
prerun
|
||||||
|
mod_install
|
||||||
|
;;
|
||||||
|
esac
|
24
.github/workflows/scripts/setup-functional.sh
vendored
Executable file
24
.github/workflows/scripts/setup-functional.sh
vendored
Executable file
@ -0,0 +1,24 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
set -eu
|
||||||
|
|
||||||
|
TDIR="/usr/share/zfs/zfs-tests/tests/functional"
|
||||||
|
echo -n "TODO="
|
||||||
|
case "$1" in
|
||||||
|
part1)
|
||||||
|
# ~1h 20m
|
||||||
|
echo "cli_root"
|
||||||
|
;;
|
||||||
|
part2)
|
||||||
|
# ~1h
|
||||||
|
ls $TDIR|grep '^[a-m]'|grep -v "cli_root"|xargs|tr -s ' ' ','
|
||||||
|
;;
|
||||||
|
part3)
|
||||||
|
# ~1h
|
||||||
|
ls $TDIR|grep '^[n-qs-z]'|xargs|tr -s ' ' ','
|
||||||
|
;;
|
||||||
|
part4)
|
||||||
|
# ~1h
|
||||||
|
ls $TDIR|grep '^r'|xargs|tr -s ' ' ','
|
||||||
|
;;
|
||||||
|
esac
|
124
.github/workflows/zfs-linux-tests.yml
vendored
Normal file
124
.github/workflows/zfs-linux-tests.yml
vendored
Normal file
@ -0,0 +1,124 @@
|
|||||||
|
name: zfs-linux-tests
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_call:
|
||||||
|
inputs:
|
||||||
|
os:
|
||||||
|
description: 'The ubuntu version: 20.02 or 22.04'
|
||||||
|
required: true
|
||||||
|
type: string
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
|
||||||
|
zloop:
|
||||||
|
runs-on: ubuntu-${{ inputs.os }}
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
with:
|
||||||
|
ref: ${{ github.event.pull_request.head.sha }}
|
||||||
|
- uses: actions/download-artifact@v3
|
||||||
|
with:
|
||||||
|
name: modules-${{ inputs.os }}
|
||||||
|
- name: Install modules
|
||||||
|
run: |
|
||||||
|
tar xzf modules-${{ inputs.os }}.tgz
|
||||||
|
.github/workflows/scripts/setup-dependencies.sh tests
|
||||||
|
- name: Tests
|
||||||
|
timeout-minutes: 30
|
||||||
|
run: |
|
||||||
|
sudo mkdir -p /var/tmp/zloop
|
||||||
|
# run for 10 minutes or at most 2 iterations for a maximum runner
|
||||||
|
# time of 20 minutes.
|
||||||
|
sudo /usr/share/zfs/zloop.sh -t 600 -I 2 -l -m1 -- -T 120 -P 60
|
||||||
|
- name: Prepare artifacts
|
||||||
|
if: failure()
|
||||||
|
run: |
|
||||||
|
sudo chmod +r -R /var/tmp/zloop/
|
||||||
|
- uses: actions/upload-artifact@v3
|
||||||
|
if: failure()
|
||||||
|
with:
|
||||||
|
name: Zpool-logs-${{ inputs.os }}
|
||||||
|
path: |
|
||||||
|
/var/tmp/zloop/*/
|
||||||
|
!/var/tmp/zloop/*/vdev/
|
||||||
|
retention-days: 14
|
||||||
|
if-no-files-found: ignore
|
||||||
|
- uses: actions/upload-artifact@v3
|
||||||
|
if: failure()
|
||||||
|
with:
|
||||||
|
name: Zpool-files-${{ inputs.os }}
|
||||||
|
path: |
|
||||||
|
/var/tmp/zloop/*/vdev/
|
||||||
|
retention-days: 14
|
||||||
|
if-no-files-found: ignore
|
||||||
|
|
||||||
|
sanity:
|
||||||
|
runs-on: ubuntu-${{ inputs.os }}
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
with:
|
||||||
|
ref: ${{ github.event.pull_request.head.sha }}
|
||||||
|
- uses: actions/download-artifact@v3
|
||||||
|
with:
|
||||||
|
name: modules-${{ inputs.os }}
|
||||||
|
- name: Install modules
|
||||||
|
run: |
|
||||||
|
tar xzf modules-${{ inputs.os }}.tgz
|
||||||
|
.github/workflows/scripts/setup-dependencies.sh tests
|
||||||
|
- name: Tests
|
||||||
|
timeout-minutes: 60
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
set -o pipefail
|
||||||
|
/usr/share/zfs/zfs-tests.sh -vKR -s 3G -r sanity | scripts/zfs-tests-color.sh
|
||||||
|
- name: Prepare artifacts
|
||||||
|
if: success() || failure()
|
||||||
|
run: |
|
||||||
|
RESPATH="/var/tmp/test_results"
|
||||||
|
mv -f $RESPATH/current $RESPATH/testfiles
|
||||||
|
tar cf $RESPATH/sanity.tar -h -C $RESPATH testfiles
|
||||||
|
- uses: actions/upload-artifact@v3
|
||||||
|
if: success() || failure()
|
||||||
|
with:
|
||||||
|
name: Logs-${{ inputs.os }}-sanity
|
||||||
|
path: /var/tmp/test_results/sanity.tar
|
||||||
|
if-no-files-found: ignore
|
||||||
|
|
||||||
|
functional:
|
||||||
|
runs-on: ubuntu-${{ inputs.os }}
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
tests: [ part1, part2, part3, part4 ]
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
with:
|
||||||
|
ref: ${{ github.event.pull_request.head.sha }}
|
||||||
|
- uses: actions/download-artifact@v3
|
||||||
|
with:
|
||||||
|
name: modules-${{ inputs.os }}
|
||||||
|
- name: Install modules
|
||||||
|
run: |
|
||||||
|
tar xzf modules-${{ inputs.os }}.tgz
|
||||||
|
.github/workflows/scripts/setup-dependencies.sh tests
|
||||||
|
- name: Setup tests
|
||||||
|
run: |
|
||||||
|
.github/workflows/scripts/setup-functional.sh ${{ matrix.tests }} >> $GITHUB_ENV
|
||||||
|
- name: Tests
|
||||||
|
timeout-minutes: 120
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
set -o pipefail
|
||||||
|
/usr/share/zfs/zfs-tests.sh -vKR -s 3G -T ${{ env.TODO }} | scripts/zfs-tests-color.sh
|
||||||
|
- name: Prepare artifacts
|
||||||
|
if: success() || failure()
|
||||||
|
run: |
|
||||||
|
RESPATH="/var/tmp/test_results"
|
||||||
|
mv -f $RESPATH/current $RESPATH/testfiles
|
||||||
|
tar cf $RESPATH/${{ matrix.tests }}.tar -h -C $RESPATH testfiles
|
||||||
|
- uses: actions/upload-artifact@v3
|
||||||
|
if: success() || failure()
|
||||||
|
with:
|
||||||
|
name: Logs-${{ inputs.os }}-functional
|
||||||
|
path: /var/tmp/test_results/${{ matrix.tests }}.tar
|
||||||
|
if-no-files-found: ignore
|
64
.github/workflows/zfs-linux.yml
vendored
Normal file
64
.github/workflows/zfs-linux.yml
vendored
Normal file
@ -0,0 +1,64 @@
|
|||||||
|
name: zfs-linux
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
pull_request:
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
|
||||||
|
build:
|
||||||
|
name: Build
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
os: [20.04, 22.04]
|
||||||
|
runs-on: ubuntu-${{ matrix.os }}
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
with:
|
||||||
|
ref: ${{ github.event.pull_request.head.sha }}
|
||||||
|
- name: Build modules
|
||||||
|
run: .github/workflows/scripts/setup-dependencies.sh build
|
||||||
|
- name: Prepare modules upload
|
||||||
|
run: tar czf modules-${{ matrix.os }}.tgz *.deb .github tests/test-runner tests/ImageOS.txt
|
||||||
|
- uses: actions/upload-artifact@v3
|
||||||
|
with:
|
||||||
|
name: modules-${{ matrix.os }}
|
||||||
|
path: modules-${{ matrix.os }}.tgz
|
||||||
|
retention-days: 14
|
||||||
|
|
||||||
|
testings:
|
||||||
|
name: Testing
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
os: [20.04, 22.04]
|
||||||
|
needs: build
|
||||||
|
uses: ./.github/workflows/zfs-linux-tests.yml
|
||||||
|
with:
|
||||||
|
os: ${{ matrix.os }}
|
||||||
|
|
||||||
|
cleanup:
|
||||||
|
if: always()
|
||||||
|
name: Cleanup
|
||||||
|
runs-on: ubuntu-22.04
|
||||||
|
needs: testings
|
||||||
|
steps:
|
||||||
|
- uses: actions/download-artifact@v3
|
||||||
|
- name: Generating summary
|
||||||
|
run: |
|
||||||
|
tar xzf modules-22.04/modules-22.04.tgz .github tests
|
||||||
|
.github/workflows/scripts/generate-summary.sh
|
||||||
|
# up to 4 steps, each can have 1 MiB output (for debugging log files)
|
||||||
|
- name: Summary for errors #1
|
||||||
|
run: .github/workflows/scripts/generate-summary.sh 1
|
||||||
|
- name: Summary for errors #2
|
||||||
|
run: .github/workflows/scripts/generate-summary.sh 2
|
||||||
|
- name: Summary for errors #3
|
||||||
|
run: .github/workflows/scripts/generate-summary.sh 3
|
||||||
|
- name: Summary for errors #4
|
||||||
|
run: .github/workflows/scripts/generate-summary.sh 4
|
||||||
|
- uses: actions/upload-artifact@v3
|
||||||
|
with:
|
||||||
|
name: Summary Files
|
||||||
|
path: Summary/
|
140
.github/workflows/zfs-qemu-packages.yml
vendored
140
.github/workflows/zfs-qemu-packages.yml
vendored
@ -1,140 +0,0 @@
|
|||||||
# This workflow is used to build and test RPM packages. It is a
|
|
||||||
# 'workflow_dispatch' workflow, which means it gets run manually.
|
|
||||||
#
|
|
||||||
# The workflow has a dropdown menu with two options:
|
|
||||||
#
|
|
||||||
# Build RPMs - Build release RPMs and tarballs and put them into an artifact
|
|
||||||
# ZIP file. The directory structure used in the ZIP file mirrors
|
|
||||||
# the ZFS yum repo.
|
|
||||||
#
|
|
||||||
# Test repo - Test install the ZFS RPMs from the ZFS repo. On EL distos, this
|
|
||||||
# will do a DKMS and KMOD test install from both the regular and
|
|
||||||
# testing repos. On Fedora, it will do a DKMS install from the
|
|
||||||
# regular repo. All test install results will be displayed in the
|
|
||||||
# Summary page. Note that the workflow provides an optional text
|
|
||||||
# text box where you can specify the full URL to an alternate repo.
|
|
||||||
# If left blank, it will install from the default repo from the
|
|
||||||
# zfs-release RPM (http://download.zfsonlinux.org).
|
|
||||||
#
|
|
||||||
# Most users will never need to use this workflow. It will be used primary by
|
|
||||||
# ZFS admins for building and testing releases.
|
|
||||||
#
|
|
||||||
name: zfs-qemu-packages
|
|
||||||
|
|
||||||
on:
|
|
||||||
workflow_dispatch:
|
|
||||||
inputs:
|
|
||||||
test_type:
|
|
||||||
type: choice
|
|
||||||
required: false
|
|
||||||
default: "Build RPMs"
|
|
||||||
description: "Build RPMs or test the repo?"
|
|
||||||
options:
|
|
||||||
- "Build RPMs"
|
|
||||||
- "Test repo"
|
|
||||||
repo_url:
|
|
||||||
type: string
|
|
||||||
required: false
|
|
||||||
default: ""
|
|
||||||
description: "(optional) repo URL (blank: use http://download.zfsonlinux.org)"
|
|
||||||
concurrency:
|
|
||||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
|
||||||
cancel-in-progress: true
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
zfs-qemu-packages-jobs:
|
|
||||||
name: qemu-VMs
|
|
||||||
strategy:
|
|
||||||
fail-fast: false
|
|
||||||
matrix:
|
|
||||||
os: ['almalinux8', 'almalinux9', 'fedora40', 'fedora41']
|
|
||||||
runs-on: ubuntu-24.04
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
ref: ${{ github.event.pull_request.head.sha }}
|
|
||||||
|
|
||||||
- name: Setup QEMU
|
|
||||||
timeout-minutes: 10
|
|
||||||
run: .github/workflows/scripts/qemu-1-setup.sh
|
|
||||||
|
|
||||||
- name: Start build machine
|
|
||||||
timeout-minutes: 10
|
|
||||||
run: .github/workflows/scripts/qemu-2-start.sh ${{ matrix.os }}
|
|
||||||
|
|
||||||
- name: Install dependencies
|
|
||||||
timeout-minutes: 20
|
|
||||||
run: |
|
|
||||||
.github/workflows/scripts/qemu-3-deps.sh ${{ matrix.os }}
|
|
||||||
|
|
||||||
- name: Build modules or Test repo
|
|
||||||
timeout-minutes: 30
|
|
||||||
run: |
|
|
||||||
set -e
|
|
||||||
if [ "${{ github.event.inputs.test_type }}" == "Test repo" ] ; then
|
|
||||||
# Bring VM back up and copy over zfs source
|
|
||||||
.github/workflows/scripts/qemu-prepare-for-build.sh
|
|
||||||
|
|
||||||
mkdir -p /tmp/repo
|
|
||||||
ssh zfs@vm0 '$HOME/zfs/.github/workflows/scripts/qemu-test-repo-vm.sh' ${{ github.event.inputs.repo_url }}
|
|
||||||
else
|
|
||||||
.github/workflows/scripts/qemu-4-build.sh --repo --release --dkms --tarball ${{ matrix.os }}
|
|
||||||
fi
|
|
||||||
|
|
||||||
- name: Prepare artifacts
|
|
||||||
if: always()
|
|
||||||
timeout-minutes: 10
|
|
||||||
run: |
|
|
||||||
rsync -a zfs@vm0:/tmp/repo /tmp || true
|
|
||||||
.github/workflows/scripts/replace-dupes-with-symlinks.sh /tmp/repo
|
|
||||||
tar -cf ${{ matrix.os }}-repo.tar -C /tmp repo
|
|
||||||
|
|
||||||
- uses: actions/upload-artifact@v4
|
|
||||||
id: artifact-upload
|
|
||||||
if: always()
|
|
||||||
with:
|
|
||||||
name: ${{ matrix.os }}-repo
|
|
||||||
path: ${{ matrix.os }}-repo.tar
|
|
||||||
compression-level: 0
|
|
||||||
retention-days: 2
|
|
||||||
if-no-files-found: ignore
|
|
||||||
|
|
||||||
combine_repos:
|
|
||||||
if: always()
|
|
||||||
needs: [zfs-qemu-packages-jobs]
|
|
||||||
name: "Results"
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/download-artifact@v4
|
|
||||||
id: artifact-download
|
|
||||||
if: always()
|
|
||||||
- name: Test Summary
|
|
||||||
if: always()
|
|
||||||
run: |
|
|
||||||
for i in $(find . -type f -iname "*.tar") ; do
|
|
||||||
tar -xf $i -C /tmp
|
|
||||||
done
|
|
||||||
tar -cf all-repo.tar -C /tmp repo
|
|
||||||
|
|
||||||
# If we're installing from a repo, print out the summary of the versions
|
|
||||||
# that got installed using Markdown.
|
|
||||||
if [ "${{ github.event.inputs.test_type }}" == "Test repo" ] ; then
|
|
||||||
cd /tmp/repo
|
|
||||||
for i in $(ls *.txt) ; do
|
|
||||||
nicename="$(echo $i | sed 's/.txt//g; s/-/ /g')"
|
|
||||||
echo "### $nicename" >> $GITHUB_STEP_SUMMARY
|
|
||||||
echo "|repo|RPM|URL|" >> $GITHUB_STEP_SUMMARY
|
|
||||||
echo "|:---|:---|:---|" >> $GITHUB_STEP_SUMMARY
|
|
||||||
awk '{print "|"$1"|"$2"|"$3"|"}' $i >> $GITHUB_STEP_SUMMARY
|
|
||||||
done
|
|
||||||
fi
|
|
||||||
|
|
||||||
- uses: actions/upload-artifact@v4
|
|
||||||
id: artifact-upload2
|
|
||||||
if: always()
|
|
||||||
with:
|
|
||||||
name: all-repo
|
|
||||||
path: all-repo.tar
|
|
||||||
compression-level: 0
|
|
||||||
retention-days: 5
|
|
||||||
if-no-files-found: ignore
|
|
180
.github/workflows/zfs-qemu.yml
vendored
180
.github/workflows/zfs-qemu.yml
vendored
@ -1,180 +0,0 @@
|
|||||||
name: zfs-qemu
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
pull_request:
|
|
||||||
workflow_dispatch:
|
|
||||||
inputs:
|
|
||||||
include_stream9:
|
|
||||||
type: boolean
|
|
||||||
required: false
|
|
||||||
default: false
|
|
||||||
description: 'Test on CentOS 9 stream'
|
|
||||||
include_stream10:
|
|
||||||
type: boolean
|
|
||||||
required: false
|
|
||||||
default: false
|
|
||||||
description: 'Test on CentOS 10 stream'
|
|
||||||
|
|
||||||
concurrency:
|
|
||||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
|
||||||
cancel-in-progress: true
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
test-config:
|
|
||||||
name: Setup
|
|
||||||
runs-on: ubuntu-24.04
|
|
||||||
outputs:
|
|
||||||
test_os: ${{ steps.os.outputs.os }}
|
|
||||||
ci_type: ${{ steps.os.outputs.ci_type }}
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
- name: Generate OS config and CI type
|
|
||||||
id: os
|
|
||||||
run: |
|
|
||||||
FULL_OS='["almalinux8", "almalinux9", "debian11", "debian12", "fedora40", "fedora41", "freebsd13-4r", "freebsd14-2r", "freebsd15-0c", "ubuntu20", "ubuntu22", "ubuntu24"]'
|
|
||||||
QUICK_OS='["almalinux8", "almalinux9", "debian12", "fedora41", "freebsd14-2r", "ubuntu24"]'
|
|
||||||
# determine CI type when running on PR
|
|
||||||
ci_type="full"
|
|
||||||
if ${{ github.event_name == 'pull_request' }}; then
|
|
||||||
head=${{ github.event.pull_request.head.sha }}
|
|
||||||
base=${{ github.event.pull_request.base.sha }}
|
|
||||||
ci_type=$(python3 .github/workflows/scripts/generate-ci-type.py $head $base)
|
|
||||||
fi
|
|
||||||
if [ "$ci_type" == "quick" ]; then
|
|
||||||
os_selection="$QUICK_OS"
|
|
||||||
else
|
|
||||||
os_selection="$FULL_OS"
|
|
||||||
fi
|
|
||||||
os_json=$(echo ${os_selection} | jq -c)
|
|
||||||
|
|
||||||
# Add optional runners
|
|
||||||
if [ "${{ github.event.inputs.include_stream9 }}" == 'true' ]; then
|
|
||||||
os_json=$(echo $os_json | jq -c '. += ["centos-stream9"]')
|
|
||||||
fi
|
|
||||||
if [ "${{ github.event.inputs.include_stream10 }}" == 'true' ]; then
|
|
||||||
os_json=$(echo $os_json | jq -c '. += ["centos-stream10"]')
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo $os_json
|
|
||||||
echo "os=$os_json" >> $GITHUB_OUTPUT
|
|
||||||
echo "ci_type=$ci_type" >> $GITHUB_OUTPUT
|
|
||||||
|
|
||||||
qemu-vm:
|
|
||||||
name: qemu-x86
|
|
||||||
needs: [ test-config ]
|
|
||||||
strategy:
|
|
||||||
fail-fast: false
|
|
||||||
matrix:
|
|
||||||
# rhl: almalinux8, almalinux9, centos-stream9, fedora40, fedora41
|
|
||||||
# debian: debian11, debian12, ubuntu20, ubuntu22, ubuntu24
|
|
||||||
# misc: archlinux, tumbleweed
|
|
||||||
# FreeBSD variants of 2024-12:
|
|
||||||
# FreeBSD Release: freebsd13-4r, freebsd14-2r
|
|
||||||
# FreeBSD Stable: freebsd13-4s, freebsd14-2s
|
|
||||||
# FreeBSD Current: freebsd15-0c
|
|
||||||
os: ${{ fromJson(needs.test-config.outputs.test_os) }}
|
|
||||||
runs-on: ubuntu-24.04
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
ref: ${{ github.event.pull_request.head.sha }}
|
|
||||||
|
|
||||||
- name: Setup QEMU
|
|
||||||
timeout-minutes: 10
|
|
||||||
run: .github/workflows/scripts/qemu-1-setup.sh
|
|
||||||
|
|
||||||
- name: Start build machine
|
|
||||||
timeout-minutes: 10
|
|
||||||
run: .github/workflows/scripts/qemu-2-start.sh ${{ matrix.os }}
|
|
||||||
|
|
||||||
- name: Install dependencies
|
|
||||||
timeout-minutes: 20
|
|
||||||
run: .github/workflows/scripts/qemu-3-deps.sh ${{ matrix.os }}
|
|
||||||
|
|
||||||
- name: Build modules
|
|
||||||
timeout-minutes: 30
|
|
||||||
run: .github/workflows/scripts/qemu-4-build.sh --poweroff --enable-debug ${{ matrix.os }}
|
|
||||||
|
|
||||||
- name: Setup testing machines
|
|
||||||
timeout-minutes: 5
|
|
||||||
run: .github/workflows/scripts/qemu-5-setup.sh
|
|
||||||
|
|
||||||
- name: Run tests
|
|
||||||
timeout-minutes: 270
|
|
||||||
run: .github/workflows/scripts/qemu-6-tests.sh
|
|
||||||
env:
|
|
||||||
CI_TYPE: ${{ needs.test-config.outputs.ci_type }}
|
|
||||||
|
|
||||||
- name: Prepare artifacts
|
|
||||||
if: always()
|
|
||||||
timeout-minutes: 10
|
|
||||||
run: .github/workflows/scripts/qemu-7-prepare.sh
|
|
||||||
|
|
||||||
- uses: actions/upload-artifact@v4
|
|
||||||
id: artifact-upload
|
|
||||||
if: always()
|
|
||||||
with:
|
|
||||||
name: Logs-functional-${{ matrix.os }}
|
|
||||||
path: /tmp/qemu-${{ matrix.os }}.tar
|
|
||||||
if-no-files-found: ignore
|
|
||||||
|
|
||||||
- name: Test Summary
|
|
||||||
if: always()
|
|
||||||
run: .github/workflows/scripts/qemu-8-summary.sh '${{ steps.artifact-upload.outputs.artifact-url }}'
|
|
||||||
|
|
||||||
cleanup:
|
|
||||||
if: always()
|
|
||||||
name: Cleanup
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
needs: [ qemu-vm ]
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
ref: ${{ github.event.pull_request.head.sha }}
|
|
||||||
- uses: actions/download-artifact@v4
|
|
||||||
- name: Generating summary
|
|
||||||
run: .github/workflows/scripts/qemu-9-summary-page.sh
|
|
||||||
- name: Generating summary...
|
|
||||||
run: .github/workflows/scripts/qemu-9-summary-page.sh 2
|
|
||||||
- name: Generating summary...
|
|
||||||
run: .github/workflows/scripts/qemu-9-summary-page.sh 3
|
|
||||||
- name: Generating summary...
|
|
||||||
run: .github/workflows/scripts/qemu-9-summary-page.sh 4
|
|
||||||
- name: Generating summary...
|
|
||||||
run: .github/workflows/scripts/qemu-9-summary-page.sh 5
|
|
||||||
- name: Generating summary...
|
|
||||||
run: .github/workflows/scripts/qemu-9-summary-page.sh 6
|
|
||||||
- name: Generating summary...
|
|
||||||
run: .github/workflows/scripts/qemu-9-summary-page.sh 7
|
|
||||||
- name: Generating summary...
|
|
||||||
run: .github/workflows/scripts/qemu-9-summary-page.sh 8
|
|
||||||
- name: Generating summary...
|
|
||||||
run: .github/workflows/scripts/qemu-9-summary-page.sh 9
|
|
||||||
- name: Generating summary...
|
|
||||||
run: .github/workflows/scripts/qemu-9-summary-page.sh 10
|
|
||||||
- name: Generating summary...
|
|
||||||
run: .github/workflows/scripts/qemu-9-summary-page.sh 11
|
|
||||||
- name: Generating summary...
|
|
||||||
run: .github/workflows/scripts/qemu-9-summary-page.sh 12
|
|
||||||
- name: Generating summary...
|
|
||||||
run: .github/workflows/scripts/qemu-9-summary-page.sh 13
|
|
||||||
- name: Generating summary...
|
|
||||||
run: .github/workflows/scripts/qemu-9-summary-page.sh 14
|
|
||||||
- name: Generating summary...
|
|
||||||
run: .github/workflows/scripts/qemu-9-summary-page.sh 15
|
|
||||||
- name: Generating summary...
|
|
||||||
run: .github/workflows/scripts/qemu-9-summary-page.sh 16
|
|
||||||
- name: Generating summary...
|
|
||||||
run: .github/workflows/scripts/qemu-9-summary-page.sh 17
|
|
||||||
- name: Generating summary...
|
|
||||||
run: .github/workflows/scripts/qemu-9-summary-page.sh 18
|
|
||||||
- name: Generating summary...
|
|
||||||
run: .github/workflows/scripts/qemu-9-summary-page.sh 19
|
|
||||||
- uses: actions/upload-artifact@v4
|
|
||||||
with:
|
|
||||||
name: Summary Files
|
|
||||||
path: out-*
|
|
77
.github/workflows/zloop.yml
vendored
77
.github/workflows/zloop.yml
vendored
@ -1,77 +0,0 @@
|
|||||||
name: zloop
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
pull_request:
|
|
||||||
|
|
||||||
concurrency:
|
|
||||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
|
||||||
cancel-in-progress: true
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
zloop:
|
|
||||||
runs-on: ubuntu-24.04
|
|
||||||
env:
|
|
||||||
TEST_DIR: /var/tmp/zloop
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
ref: ${{ github.event.pull_request.head.sha }}
|
|
||||||
- name: Install dependencies
|
|
||||||
run: |
|
|
||||||
sudo apt-get purge -y snapd google-chrome-stable firefox
|
|
||||||
ONLY_DEPS=1 .github/workflows/scripts/qemu-3-deps-vm.sh ubuntu24
|
|
||||||
- name: Autogen.sh
|
|
||||||
run: |
|
|
||||||
sed -i '/DEBUG_CFLAGS="-Werror"/s/^/#/' config/zfs-build.m4
|
|
||||||
./autogen.sh
|
|
||||||
- name: Configure
|
|
||||||
run: |
|
|
||||||
./configure --prefix=/usr --enable-debug --enable-debuginfo \
|
|
||||||
--enable-asan --enable-ubsan \
|
|
||||||
--enable-debug-kmem --enable-debug-kmem-tracking
|
|
||||||
- name: Make
|
|
||||||
run: |
|
|
||||||
make -j$(nproc)
|
|
||||||
- name: Install
|
|
||||||
run: |
|
|
||||||
sudo make install
|
|
||||||
sudo depmod
|
|
||||||
sudo modprobe zfs
|
|
||||||
- name: Tests
|
|
||||||
run: |
|
|
||||||
sudo mkdir -p $TEST_DIR
|
|
||||||
# run for 10 minutes or at most 6 iterations for a maximum runner
|
|
||||||
# time of 60 minutes.
|
|
||||||
sudo /usr/share/zfs/zloop.sh -t 600 -I 6 -l -m 1 -- -T 120 -P 60
|
|
||||||
- name: Prepare artifacts
|
|
||||||
if: failure()
|
|
||||||
run: |
|
|
||||||
sudo chmod +r -R $TEST_DIR/
|
|
||||||
- name: Ztest log
|
|
||||||
if: failure()
|
|
||||||
run: |
|
|
||||||
grep -B10 -A1000 'ASSERT' $TEST_DIR/*/ztest.out || tail -n 1000 $TEST_DIR/*/ztest.out
|
|
||||||
- name: Gdb log
|
|
||||||
if: failure()
|
|
||||||
run: |
|
|
||||||
sed -n '/Backtraces (full)/q;p' $TEST_DIR/*/ztest.gdb
|
|
||||||
- name: Zdb log
|
|
||||||
if: failure()
|
|
||||||
run: |
|
|
||||||
cat $TEST_DIR/*/ztest.zdb
|
|
||||||
- uses: actions/upload-artifact@v4
|
|
||||||
if: failure()
|
|
||||||
with:
|
|
||||||
name: Logs
|
|
||||||
path: |
|
|
||||||
/var/tmp/zloop/*/
|
|
||||||
!/var/tmp/zloop/*/vdev/
|
|
||||||
if-no-files-found: ignore
|
|
||||||
- uses: actions/upload-artifact@v4
|
|
||||||
if: failure()
|
|
||||||
with:
|
|
||||||
name: Pool files
|
|
||||||
path: |
|
|
||||||
/var/tmp/zloop/*/vdev/
|
|
||||||
if-no-files-found: ignore
|
|
32
.mailmap
32
.mailmap
@ -30,7 +30,6 @@ Andreas Dilger <adilger@dilger.ca>
|
|||||||
Andrew Walker <awalker@ixsystems.com>
|
Andrew Walker <awalker@ixsystems.com>
|
||||||
Benedikt Neuffer <github@itfriend.de>
|
Benedikt Neuffer <github@itfriend.de>
|
||||||
Chengfei Zhu <chengfeix.zhu@intel.com>
|
Chengfei Zhu <chengfeix.zhu@intel.com>
|
||||||
ChenHao Lu <18302010006@fudan.edu.cn>
|
|
||||||
Chris Lindee <chris.lindee+github@gmail.com>
|
Chris Lindee <chris.lindee+github@gmail.com>
|
||||||
Colm Buckley <colm@tuatha.org>
|
Colm Buckley <colm@tuatha.org>
|
||||||
Crag Wang <crag0715@gmail.com>
|
Crag Wang <crag0715@gmail.com>
|
||||||
@ -44,7 +43,6 @@ Glenn Washburn <development@efficientek.com>
|
|||||||
Gordan Bobic <gordan.bobic@gmail.com>
|
Gordan Bobic <gordan.bobic@gmail.com>
|
||||||
Gregory Bartholomew <gregory.lee.bartholomew@gmail.com>
|
Gregory Bartholomew <gregory.lee.bartholomew@gmail.com>
|
||||||
hedong zhang <h_d_zhang@163.com>
|
hedong zhang <h_d_zhang@163.com>
|
||||||
Ilkka Sovanto <github@ilkka.kapsi.fi>
|
|
||||||
InsanePrawn <Insane.Prawny@gmail.com>
|
InsanePrawn <Insane.Prawny@gmail.com>
|
||||||
Jason Cohen <jwittlincohen@gmail.com>
|
Jason Cohen <jwittlincohen@gmail.com>
|
||||||
Jason Harmening <jason.harmening@gmail.com>
|
Jason Harmening <jason.harmening@gmail.com>
|
||||||
@ -59,7 +57,6 @@ KernelOfTruth <kerneloftruth@gmail.com>
|
|||||||
Liu Hua <liu.hua130@zte.com.cn>
|
Liu Hua <liu.hua130@zte.com.cn>
|
||||||
Liu Qing <winglq@gmail.com>
|
Liu Qing <winglq@gmail.com>
|
||||||
loli10K <ezomori.nozomu@gmail.com>
|
loli10K <ezomori.nozomu@gmail.com>
|
||||||
Mart Frauenlob <allkind@fastest.cc>
|
|
||||||
Matthias Blankertz <matthias@blankertz.org>
|
Matthias Blankertz <matthias@blankertz.org>
|
||||||
Michael Gmelin <grembo@FreeBSD.org>
|
Michael Gmelin <grembo@FreeBSD.org>
|
||||||
Olivier Mazouffre <olivier.mazouffre@ims-bordeaux.fr>
|
Olivier Mazouffre <olivier.mazouffre@ims-bordeaux.fr>
|
||||||
@ -70,24 +67,12 @@ Rob Norris <robn@despairlabs.com>
|
|||||||
Rob Norris <rob.norris@klarasystems.com>
|
Rob Norris <rob.norris@klarasystems.com>
|
||||||
Sam Lunt <samuel.j.lunt@gmail.com>
|
Sam Lunt <samuel.j.lunt@gmail.com>
|
||||||
Sanjeev Bagewadi <sanjeev.bagewadi@gmail.com>
|
Sanjeev Bagewadi <sanjeev.bagewadi@gmail.com>
|
||||||
Sebastian Wuerl <s.wuerl@mailbox.org>
|
|
||||||
SHENGYI HONG <aokblast@FreeBSD.org>
|
|
||||||
Stoiko Ivanov <github@nomore.at>
|
Stoiko Ivanov <github@nomore.at>
|
||||||
Tamas TEVESZ <ice@extreme.hu>
|
Tamas TEVESZ <ice@extreme.hu>
|
||||||
WHR <msl0000023508@gmail.com>
|
WHR <msl0000023508@gmail.com>
|
||||||
Yanping Gao <yanping.gao@xtaotech.com>
|
Yanping Gao <yanping.gao@xtaotech.com>
|
||||||
Youzhong Yang <youzhong@gmail.com>
|
Youzhong Yang <youzhong@gmail.com>
|
||||||
|
|
||||||
# Signed-off-by: overriding Author:
|
|
||||||
Alexander Ziaee <ziaee@FreeBSD.org> <concussious@runbox.com>
|
|
||||||
Ryan <errornointernet@envs.net> <error.nointernet@gmail.com>
|
|
||||||
Sietse <sietse@wizdom.nu> <uglymotha@wizdom.nu>
|
|
||||||
Phil Sutter <phil@nwl.cc> <p.github@nwl.cc>
|
|
||||||
poscat <poscat@poscat.moe> <poscat0x04@outlook.com>
|
|
||||||
Qiuhao Chen <chenqiuhao1997@gmail.com> <haohao0924@126.com>
|
|
||||||
Yuxin Wang <yuxinwang9999@gmail.com> <Bi11gates9999@gmail.com>
|
|
||||||
Zhenlei Huang <zlei@FreeBSD.org> <zlei.huang@gmail.com>
|
|
||||||
|
|
||||||
# Commits from strange places, long ago
|
# Commits from strange places, long ago
|
||||||
Brian Behlendorf <behlendorf1@llnl.gov> <behlendo@7e1ea52c-4ff2-0310-8f11-9dd32ca42a1c>
|
Brian Behlendorf <behlendorf1@llnl.gov> <behlendo@7e1ea52c-4ff2-0310-8f11-9dd32ca42a1c>
|
||||||
Brian Behlendorf <behlendorf1@llnl.gov> <behlendo@fedora-17-amd64.(none)>
|
Brian Behlendorf <behlendorf1@llnl.gov> <behlendo@fedora-17-amd64.(none)>
|
||||||
@ -104,7 +89,6 @@ Alek Pinchuk <apinchuk@axcient.com> <alek-p@users.noreply.github.com>
|
|||||||
Alexander Lobakin <alobakin@pm.me> <solbjorn@users.noreply.github.com>
|
Alexander Lobakin <alobakin@pm.me> <solbjorn@users.noreply.github.com>
|
||||||
Alexey Smirnoff <fling@member.fsf.org> <fling-@users.noreply.github.com>
|
Alexey Smirnoff <fling@member.fsf.org> <fling-@users.noreply.github.com>
|
||||||
Allen Holl <allen.m.holl@gmail.com> <65494904+allen-4@users.noreply.github.com>
|
Allen Holl <allen.m.holl@gmail.com> <65494904+allen-4@users.noreply.github.com>
|
||||||
Alphan Yılmaz <alphanyilmaz@gmail.com> <a1ea321@users.noreply.github.com>
|
|
||||||
Ameer Hamza <ahamza@ixsystems.com> <106930537+ixhamza@users.noreply.github.com>
|
Ameer Hamza <ahamza@ixsystems.com> <106930537+ixhamza@users.noreply.github.com>
|
||||||
Andrew J. Hesford <ajh@sideband.org> <48421688+ahesford@users.noreply.github.com>>
|
Andrew J. Hesford <ajh@sideband.org> <48421688+ahesford@users.noreply.github.com>>
|
||||||
Andrew Sun <me@andrewsun.com> <as-com@users.noreply.github.com>
|
Andrew Sun <me@andrewsun.com> <as-com@users.noreply.github.com>
|
||||||
@ -112,22 +96,18 @@ Aron Xu <happyaron.xu@gmail.com> <happyaron@users.noreply.github.com>
|
|||||||
Arun KV <arun.kv@datacore.com> <65647132+arun-kv@users.noreply.github.com>
|
Arun KV <arun.kv@datacore.com> <65647132+arun-kv@users.noreply.github.com>
|
||||||
Ben Wolsieffer <benwolsieffer@gmail.com> <lopsided98@users.noreply.github.com>
|
Ben Wolsieffer <benwolsieffer@gmail.com> <lopsided98@users.noreply.github.com>
|
||||||
bernie1995 <bernie.pikes@gmail.com> <42413912+bernie1995@users.noreply.github.com>
|
bernie1995 <bernie.pikes@gmail.com> <42413912+bernie1995@users.noreply.github.com>
|
||||||
Bojan Novković <bnovkov@FreeBSD.org> <72801811+bnovkov@users.noreply.github.com>
|
|
||||||
Boris Protopopov <boris.protopopov@actifio.com> <bprotopopov@users.noreply.github.com>
|
Boris Protopopov <boris.protopopov@actifio.com> <bprotopopov@users.noreply.github.com>
|
||||||
Brad Forschinger <github@bnjf.id.au> <bnjf@users.noreply.github.com>
|
Brad Forschinger <github@bnjf.id.au> <bnjf@users.noreply.github.com>
|
||||||
Brandon Thetford <brandon@dodecatec.com> <dodexahedron@users.noreply.github.com>
|
Brandon Thetford <brandon@dodecatec.com> <dodexahedron@users.noreply.github.com>
|
||||||
buzzingwires <buzzingwires@outlook.com> <131118055+buzzingwires@users.noreply.github.com>
|
buzzingwires <buzzingwires@outlook.com> <131118055+buzzingwires@users.noreply.github.com>
|
||||||
Cedric Maunoury <cedric.maunoury@gmail.com> <38213715+cedricmaunoury@users.noreply.github.com>
|
Cedric Maunoury <cedric.maunoury@gmail.com> <38213715+cedricmaunoury@users.noreply.github.com>
|
||||||
Charles Suh <charles.suh@gmail.com> <charlessuh@users.noreply.github.com>
|
Charles Suh <charles.suh@gmail.com> <charlessuh@users.noreply.github.com>
|
||||||
Chris Peredun <chris.peredun@ixsystems.com> <126915832+chrisperedun@users.noreply.github.com>
|
|
||||||
Dacian Reece-Stremtan <dacianstremtan@gmail.com> <35844628+dacianstremtan@users.noreply.github.com>
|
Dacian Reece-Stremtan <dacianstremtan@gmail.com> <35844628+dacianstremtan@users.noreply.github.com>
|
||||||
Damian Szuberski <szuberskidamian@gmail.com> <30863496+szubersk@users.noreply.github.com>
|
Damian Szuberski <szuberskidamian@gmail.com> <30863496+szubersk@users.noreply.github.com>
|
||||||
Daniel Hiepler <d-git@coderdu.de> <32984777+heeplr@users.noreply.github.com>
|
Daniel Hiepler <d-git@coderdu.de> <32984777+heeplr@users.noreply.github.com>
|
||||||
Daniel Kobras <d.kobras@science-computing.de> <sckobras@users.noreply.github.com>
|
Daniel Kobras <d.kobras@science-computing.de> <sckobras@users.noreply.github.com>
|
||||||
Daniel Reichelt <hacking@nachtgeist.net> <nachtgeist@users.noreply.github.com>
|
Daniel Reichelt <hacking@nachtgeist.net> <nachtgeist@users.noreply.github.com>
|
||||||
David Quigley <david.quigley@intel.com> <dpquigl@users.noreply.github.com>
|
David Quigley <david.quigley@intel.com> <dpquigl@users.noreply.github.com>
|
||||||
Dennis R. Friedrichsen <dennis.r.friedrichsen@gmail.com> <31087738+dennisfriedrichsen@users.noreply.github.com>
|
|
||||||
Dex Wood <slash2314@gmail.com> <slash2314@users.noreply.github.com>
|
|
||||||
DHE <git@dehacked.net> <DeHackEd@users.noreply.github.com>
|
DHE <git@dehacked.net> <DeHackEd@users.noreply.github.com>
|
||||||
Dmitri John Ledkov <dimitri.ledkov@canonical.com> <19779+xnox@users.noreply.github.com>
|
Dmitri John Ledkov <dimitri.ledkov@canonical.com> <19779+xnox@users.noreply.github.com>
|
||||||
Dries Michiels <driesm.michiels@gmail.com> <32487486+driesmp@users.noreply.github.com>
|
Dries Michiels <driesm.michiels@gmail.com> <32487486+driesmp@users.noreply.github.com>
|
||||||
@ -148,7 +128,6 @@ Harry Mallon <hjmallon@gmail.com> <1816667+hjmallon@users.noreply.github.com>
|
|||||||
Hiếu Lê <leorize+oss@disroot.org> <alaviss@users.noreply.github.com>
|
Hiếu Lê <leorize+oss@disroot.org> <alaviss@users.noreply.github.com>
|
||||||
Jake Howard <git@theorangeone.net> <RealOrangeOne@users.noreply.github.com>
|
Jake Howard <git@theorangeone.net> <RealOrangeOne@users.noreply.github.com>
|
||||||
James Cowgill <james.cowgill@mips.com> <jcowgill@users.noreply.github.com>
|
James Cowgill <james.cowgill@mips.com> <jcowgill@users.noreply.github.com>
|
||||||
Jaron Kent-Dobias <jaron@kent-dobias.com> <kentdobias@users.noreply.github.com>
|
|
||||||
Jason King <jason.king@joyent.com> <jasonbking@users.noreply.github.com>
|
Jason King <jason.king@joyent.com> <jasonbking@users.noreply.github.com>
|
||||||
Jeff Dike <jdike@akamai.com> <52420226+jdike@users.noreply.github.com>
|
Jeff Dike <jdike@akamai.com> <52420226+jdike@users.noreply.github.com>
|
||||||
Jitendra Patidar <jitendra.patidar@nutanix.com> <53164267+jsai20@users.noreply.github.com>
|
Jitendra Patidar <jitendra.patidar@nutanix.com> <53164267+jsai20@users.noreply.github.com>
|
||||||
@ -158,9 +137,7 @@ John L. Hammond <john.hammond@intel.com> <35266395+jhammond-intel@users.noreply.
|
|||||||
John-Mark Gurney <jmg@funkthat.com> <jmgurney@users.noreply.github.com>
|
John-Mark Gurney <jmg@funkthat.com> <jmgurney@users.noreply.github.com>
|
||||||
John Ramsden <johnramsden@riseup.net> <johnramsden@users.noreply.github.com>
|
John Ramsden <johnramsden@riseup.net> <johnramsden@users.noreply.github.com>
|
||||||
Jonathon Fernyhough <jonathon@m2x.dev> <559369+jonathonf@users.noreply.github.com>
|
Jonathon Fernyhough <jonathon@m2x.dev> <559369+jonathonf@users.noreply.github.com>
|
||||||
Jose Luis Duran <jlduran@gmail.com> <jlduran@users.noreply.github.com>
|
|
||||||
Justin Hibbits <chmeeedalf@gmail.com> <chmeeedalf@users.noreply.github.com>
|
Justin Hibbits <chmeeedalf@gmail.com> <chmeeedalf@users.noreply.github.com>
|
||||||
Kevin Greene <kevin.greene@delphix.com> <104801862+kxgreene@users.noreply.github.com>
|
|
||||||
Kevin Jin <lostking2008@hotmail.com> <33590050+jxdking@users.noreply.github.com>
|
Kevin Jin <lostking2008@hotmail.com> <33590050+jxdking@users.noreply.github.com>
|
||||||
Kevin P. Fleming <kevin@km6g.us> <kpfleming@users.noreply.github.com>
|
Kevin P. Fleming <kevin@km6g.us> <kpfleming@users.noreply.github.com>
|
||||||
Krzysztof Piecuch <piecuch@kpiecuch.pl> <3964215+pikrzysztof@users.noreply.github.com>
|
Krzysztof Piecuch <piecuch@kpiecuch.pl> <3964215+pikrzysztof@users.noreply.github.com>
|
||||||
@ -171,11 +148,9 @@ Lorenz Hüdepohl <dev@stellardeath.org> <lhuedepohl@users.noreply.github.com>
|
|||||||
Luís Henriques <henrix@camandro.org> <73643340+lumigch@users.noreply.github.com>
|
Luís Henriques <henrix@camandro.org> <73643340+lumigch@users.noreply.github.com>
|
||||||
Marcin Skarbek <git@skarbek.name> <mskarbek@users.noreply.github.com>
|
Marcin Skarbek <git@skarbek.name> <mskarbek@users.noreply.github.com>
|
||||||
Matt Fiddaman <github@m.fiddaman.uk> <81489167+matt-fidd@users.noreply.github.com>
|
Matt Fiddaman <github@m.fiddaman.uk> <81489167+matt-fidd@users.noreply.github.com>
|
||||||
Maxim Filimonov <che@bein.link> <part1zano@users.noreply.github.com>
|
|
||||||
Max Zettlmeißl <max@zettlmeissl.de> <6818198+maxz@users.noreply.github.com>
|
Max Zettlmeißl <max@zettlmeissl.de> <6818198+maxz@users.noreply.github.com>
|
||||||
Michael Niewöhner <foss@mniewoehner.de> <c0d3z3r0@users.noreply.github.com>
|
Michael Niewöhner <foss@mniewoehner.de> <c0d3z3r0@users.noreply.github.com>
|
||||||
Michael Zhivich <mzhivich@akamai.com> <33133421+mzhivich@users.noreply.github.com>
|
Michael Zhivich <mzhivich@akamai.com> <33133421+mzhivich@users.noreply.github.com>
|
||||||
MigeljanImeri <ImeriMigel@gmail.com> <78048439+MigeljanImeri@users.noreply.github.com>
|
|
||||||
Mo Zhou <cdluminate@gmail.com> <5723047+cdluminate@users.noreply.github.com>
|
Mo Zhou <cdluminate@gmail.com> <5723047+cdluminate@users.noreply.github.com>
|
||||||
Nick Mattis <nickm970@gmail.com> <nmattis@users.noreply.github.com>
|
Nick Mattis <nickm970@gmail.com> <nmattis@users.noreply.github.com>
|
||||||
omni <omni+vagant@hack.org> <79493359+omnivagant@users.noreply.github.com>
|
omni <omni+vagant@hack.org> <79493359+omnivagant@users.noreply.github.com>
|
||||||
@ -189,7 +164,6 @@ Ping Huang <huangping@smartx.com> <101400146+hpingfs@users.noreply.github.com>
|
|||||||
Piotr P. Stefaniak <pstef@freebsd.org> <pstef@users.noreply.github.com>
|
Piotr P. Stefaniak <pstef@freebsd.org> <pstef@users.noreply.github.com>
|
||||||
Richard Allen <belperite@gmail.com> <33836503+belperite@users.noreply.github.com>
|
Richard Allen <belperite@gmail.com> <33836503+belperite@users.noreply.github.com>
|
||||||
Rich Ercolani <rincebrain@gmail.com> <214141+rincebrain@users.noreply.github.com>
|
Rich Ercolani <rincebrain@gmail.com> <214141+rincebrain@users.noreply.github.com>
|
||||||
Rick Macklem <rmacklem@uoguelph.ca> <64620010+rmacklem@users.noreply.github.com>
|
|
||||||
Rob Wing <rob.wing@klarasystems.com> <98866084+rob-wing@users.noreply.github.com>
|
Rob Wing <rob.wing@klarasystems.com> <98866084+rob-wing@users.noreply.github.com>
|
||||||
Roman Strashkin <roman.strashkin@nexenta.com> <Ramzec@users.noreply.github.com>
|
Roman Strashkin <roman.strashkin@nexenta.com> <Ramzec@users.noreply.github.com>
|
||||||
Ryan Hirasaki <ryanhirasaki@gmail.com> <4690732+RyanHir@users.noreply.github.com>
|
Ryan Hirasaki <ryanhirasaki@gmail.com> <4690732+RyanHir@users.noreply.github.com>
|
||||||
@ -200,22 +174,16 @@ Scott Colby <scott@scolby.com> <scolby33@users.noreply.github.com>
|
|||||||
Sean Eric Fagan <kithrup@mac.com> <kithrup@users.noreply.github.com>
|
Sean Eric Fagan <kithrup@mac.com> <kithrup@users.noreply.github.com>
|
||||||
Spencer Kinny <spencerkinny1995@gmail.com> <30333052+Spencer-Kinny@users.noreply.github.com>
|
Spencer Kinny <spencerkinny1995@gmail.com> <30333052+Spencer-Kinny@users.noreply.github.com>
|
||||||
Srikanth N S <srikanth.nagasubbaraoseetharaman@hpe.com> <75025422+nssrikanth@users.noreply.github.com>
|
Srikanth N S <srikanth.nagasubbaraoseetharaman@hpe.com> <75025422+nssrikanth@users.noreply.github.com>
|
||||||
Stefan Lendl <s.lendl@proxmox.com> <1321542+stfl@users.noreply.github.com>
|
|
||||||
Thomas Bertschinger <bertschinger@lanl.gov> <101425190+bertschinger@users.noreply.github.com>
|
|
||||||
Thomas Geppert <geppi@digitx.de> <geppi@users.noreply.github.com>
|
Thomas Geppert <geppi@digitx.de> <geppi@users.noreply.github.com>
|
||||||
Tim Crawford <tcrawford@datto.com> <crawfxrd@users.noreply.github.com>
|
Tim Crawford <tcrawford@datto.com> <crawfxrd@users.noreply.github.com>
|
||||||
Todd Seidelmann <18294602+seidelma@users.noreply.github.com>
|
|
||||||
Tom Matthews <tom@axiom-partners.com> <tomtastic@users.noreply.github.com>
|
Tom Matthews <tom@axiom-partners.com> <tomtastic@users.noreply.github.com>
|
||||||
Tony Perkins <tperkins@datto.com> <62951051+tony-zfs@users.noreply.github.com>
|
Tony Perkins <tperkins@datto.com> <62951051+tony-zfs@users.noreply.github.com>
|
||||||
Torsten Wörtwein <twoertwein@gmail.com> <twoertwein@users.noreply.github.com>
|
Torsten Wörtwein <twoertwein@gmail.com> <twoertwein@users.noreply.github.com>
|
||||||
Tulsi Jain <tulsi.jain@delphix.com> <TulsiJain@users.noreply.github.com>
|
Tulsi Jain <tulsi.jain@delphix.com> <TulsiJain@users.noreply.github.com>
|
||||||
Václav Skála <skala@vshosting.cz> <33496485+vaclavskala@users.noreply.github.com>
|
Václav Skála <skala@vshosting.cz> <33496485+vaclavskala@users.noreply.github.com>
|
||||||
Vaibhav Bhanawat <vaibhav.bhanawat@delphix.com> <88050553+vaibhav-delphix@users.noreply.github.com>
|
|
||||||
Vandana Rungta <vrungta@amazon.com> <46906819+vandanarungta@users.noreply.github.com>
|
|
||||||
Violet Purcell <vimproved@inventati.org> <66446404+vimproved@users.noreply.github.com>
|
Violet Purcell <vimproved@inventati.org> <66446404+vimproved@users.noreply.github.com>
|
||||||
Vipin Kumar Verma <vipin.verma@hpe.com> <75025470+vermavipinkumar@users.noreply.github.com>
|
Vipin Kumar Verma <vipin.verma@hpe.com> <75025470+vermavipinkumar@users.noreply.github.com>
|
||||||
Wolfgang Bumiller <w.bumiller@proxmox.com> <Blub@users.noreply.github.com>
|
Wolfgang Bumiller <w.bumiller@proxmox.com> <Blub@users.noreply.github.com>
|
||||||
XDTG <click1799@163.com> <35128600+XDTG@users.noreply.github.com>
|
|
||||||
xtouqh <xtouqh@hotmail.com> <72357159+xtouqh@users.noreply.github.com>
|
xtouqh <xtouqh@hotmail.com> <72357159+xtouqh@users.noreply.github.com>
|
||||||
Yuri Pankov <yuripv@FreeBSD.org> <113725409+yuripv@users.noreply.github.com>
|
Yuri Pankov <yuripv@FreeBSD.org> <113725409+yuripv@users.noreply.github.com>
|
||||||
Yuri Pankov <yuripv@FreeBSD.org> <82001006+yuripv@users.noreply.github.com>
|
Yuri Pankov <yuripv@FreeBSD.org> <82001006+yuripv@users.noreply.github.com>
|
||||||
|
70
AUTHORS
70
AUTHORS
@ -36,7 +36,6 @@ CONTRIBUTORS:
|
|||||||
Alexander Pyhalov <apyhalov@gmail.com>
|
Alexander Pyhalov <apyhalov@gmail.com>
|
||||||
Alexander Richardson <Alexander.Richardson@cl.cam.ac.uk>
|
Alexander Richardson <Alexander.Richardson@cl.cam.ac.uk>
|
||||||
Alexander Stetsenko <ams@nexenta.com>
|
Alexander Stetsenko <ams@nexenta.com>
|
||||||
Alexander Ziaee <ziaee@FreeBSD.org>
|
|
||||||
Alex Braunegg <alex.braunegg@gmail.com>
|
Alex Braunegg <alex.braunegg@gmail.com>
|
||||||
Alexey Shvetsov <alexxy@gentoo.org>
|
Alexey Shvetsov <alexxy@gentoo.org>
|
||||||
Alexey Smirnoff <fling@member.fsf.org>
|
Alexey Smirnoff <fling@member.fsf.org>
|
||||||
@ -47,7 +46,6 @@ CONTRIBUTORS:
|
|||||||
Alex Zhuravlev <alexey.zhuravlev@intel.com>
|
Alex Zhuravlev <alexey.zhuravlev@intel.com>
|
||||||
Allan Jude <allanjude@freebsd.org>
|
Allan Jude <allanjude@freebsd.org>
|
||||||
Allen Holl <allen.m.holl@gmail.com>
|
Allen Holl <allen.m.holl@gmail.com>
|
||||||
Alphan Yılmaz <alphanyilmaz@gmail.com>
|
|
||||||
alteriks <alteriks@gmail.com>
|
alteriks <alteriks@gmail.com>
|
||||||
Alyssa Ross <hi@alyssa.is>
|
Alyssa Ross <hi@alyssa.is>
|
||||||
Ameer Hamza <ahamza@ixsystems.com>
|
Ameer Hamza <ahamza@ixsystems.com>
|
||||||
@ -90,18 +88,15 @@ CONTRIBUTORS:
|
|||||||
Bassu <bassu@phi9.com>
|
Bassu <bassu@phi9.com>
|
||||||
Ben Allen <bsallen@alcf.anl.gov>
|
Ben Allen <bsallen@alcf.anl.gov>
|
||||||
Ben Cordero <bencord0@condi.me>
|
Ben Cordero <bencord0@condi.me>
|
||||||
Benda Xu <orv@debian.org>
|
|
||||||
Benedikt Neuffer <github@itfriend.de>
|
Benedikt Neuffer <github@itfriend.de>
|
||||||
Benjamin Albrecht <git@albrecht.io>
|
Benjamin Albrecht <git@albrecht.io>
|
||||||
Benjamin Gentil <benjgentil.pro@gmail.com>
|
Benjamin Gentil <benjgentil.pro@gmail.com>
|
||||||
Benjamin Sherman <benjamin@holyarmy.org>
|
|
||||||
Ben McGough <bmcgough@fredhutch.org>
|
Ben McGough <bmcgough@fredhutch.org>
|
||||||
Ben Rubson <ben.rubson@gmail.com>
|
Ben Rubson <ben.rubson@gmail.com>
|
||||||
Ben Wolsieffer <benwolsieffer@gmail.com>
|
Ben Wolsieffer <benwolsieffer@gmail.com>
|
||||||
bernie1995 <bernie.pikes@gmail.com>
|
bernie1995 <bernie.pikes@gmail.com>
|
||||||
Bill McGonigle <bill-github.com-public1@bfccomputing.com>
|
Bill McGonigle <bill-github.com-public1@bfccomputing.com>
|
||||||
Bill Pijewski <wdp@joyent.com>
|
Bill Pijewski <wdp@joyent.com>
|
||||||
Bojan Novković <bnovkov@FreeBSD.org>
|
|
||||||
Boris Protopopov <boris.protopopov@nexenta.com>
|
Boris Protopopov <boris.protopopov@nexenta.com>
|
||||||
Brad Forschinger <github@bnjf.id.au>
|
Brad Forschinger <github@bnjf.id.au>
|
||||||
Brad Lewis <brad.lewis@delphix.com>
|
Brad Lewis <brad.lewis@delphix.com>
|
||||||
@ -116,7 +111,6 @@ CONTRIBUTORS:
|
|||||||
bzzz77 <bzzz.tomas@gmail.com>
|
bzzz77 <bzzz.tomas@gmail.com>
|
||||||
cable2999 <cable2999@users.noreply.github.com>
|
cable2999 <cable2999@users.noreply.github.com>
|
||||||
Caleb James DeLisle <calebdelisle@lavabit.com>
|
Caleb James DeLisle <calebdelisle@lavabit.com>
|
||||||
Cameron Harr <harr1@llnl.gov>
|
|
||||||
Cao Xuewen <cao.xuewen@zte.com.cn>
|
Cao Xuewen <cao.xuewen@zte.com.cn>
|
||||||
Carlo Landmeter <clandmeter@gmail.com>
|
Carlo Landmeter <clandmeter@gmail.com>
|
||||||
Carlos Alberto Lopez Perez <clopez@igalia.com>
|
Carlos Alberto Lopez Perez <clopez@igalia.com>
|
||||||
@ -126,15 +120,12 @@ CONTRIBUTORS:
|
|||||||
Chen Can <chen.can2@zte.com.cn>
|
Chen Can <chen.can2@zte.com.cn>
|
||||||
Chengfei Zhu <chengfeix.zhu@intel.com>
|
Chengfei Zhu <chengfeix.zhu@intel.com>
|
||||||
Chen Haiquan <oc@yunify.com>
|
Chen Haiquan <oc@yunify.com>
|
||||||
ChenHao Lu <18302010006@fudan.edu.cn>
|
|
||||||
Chip Parker <aparker@enthought.com>
|
Chip Parker <aparker@enthought.com>
|
||||||
Chris Burroughs <chris.burroughs@gmail.com>
|
Chris Burroughs <chris.burroughs@gmail.com>
|
||||||
Chris Davidson <christopher.davidson@gmail.com>
|
|
||||||
Chris Dunlap <cdunlap@llnl.gov>
|
Chris Dunlap <cdunlap@llnl.gov>
|
||||||
Chris Dunlop <chris@onthe.net.au>
|
Chris Dunlop <chris@onthe.net.au>
|
||||||
Chris Lindee <chris.lindee+github@gmail.com>
|
Chris Lindee <chris.lindee+github@gmail.com>
|
||||||
Chris McDonough <chrism@plope.com>
|
Chris McDonough <chrism@plope.com>
|
||||||
Chris Peredun <chris.peredun@ixsystems.com>
|
|
||||||
Chris Siden <chris.siden@delphix.com>
|
Chris Siden <chris.siden@delphix.com>
|
||||||
Chris Siebenmann <cks.github@cs.toronto.edu>
|
Chris Siebenmann <cks.github@cs.toronto.edu>
|
||||||
Christer Ekholm <che@chrekh.se>
|
Christer Ekholm <che@chrekh.se>
|
||||||
@ -153,7 +144,6 @@ CONTRIBUTORS:
|
|||||||
Clint Armstrong <clint@clintarmstrong.net>
|
Clint Armstrong <clint@clintarmstrong.net>
|
||||||
Coleman Kane <ckane@colemankane.org>
|
Coleman Kane <ckane@colemankane.org>
|
||||||
Colin Ian King <colin.king@canonical.com>
|
Colin Ian King <colin.king@canonical.com>
|
||||||
Colin Percival <cperciva@tarsnap.com>
|
|
||||||
Colm Buckley <colm@tuatha.org>
|
Colm Buckley <colm@tuatha.org>
|
||||||
Crag Wang <crag0715@gmail.com>
|
Crag Wang <crag0715@gmail.com>
|
||||||
Craig Loomis <cloomis@astro.princeton.edu>
|
Craig Loomis <cloomis@astro.princeton.edu>
|
||||||
@ -166,12 +156,10 @@ CONTRIBUTORS:
|
|||||||
Damiano Albani <damiano.albani@gmail.com>
|
Damiano Albani <damiano.albani@gmail.com>
|
||||||
Damian Szuberski <szuberskidamian@gmail.com>
|
Damian Szuberski <szuberskidamian@gmail.com>
|
||||||
Damian Wojsław <damian@wojslaw.pl>
|
Damian Wojsław <damian@wojslaw.pl>
|
||||||
Daniel Berlin <dberlin@dberlin.org>
|
|
||||||
Daniel Hiepler <d-git@coderdu.de>
|
Daniel Hiepler <d-git@coderdu.de>
|
||||||
Daniel Hoffman <dj.hoffman@delphix.com>
|
Daniel Hoffman <dj.hoffman@delphix.com>
|
||||||
Daniel Kobras <d.kobras@science-computing.de>
|
Daniel Kobras <d.kobras@science-computing.de>
|
||||||
Daniel Kolesa <daniel@octaforge.org>
|
Daniel Kolesa <daniel@octaforge.org>
|
||||||
Daniel Perry <dtperry@amazon.com>
|
|
||||||
Daniel Reichelt <hacking@nachtgeist.net>
|
Daniel Reichelt <hacking@nachtgeist.net>
|
||||||
Daniel Stevenson <bot@dstev.net>
|
Daniel Stevenson <bot@dstev.net>
|
||||||
Daniel Verite <daniel@verite.pro>
|
Daniel Verite <daniel@verite.pro>
|
||||||
@ -188,11 +176,8 @@ CONTRIBUTORS:
|
|||||||
David Quigley <david.quigley@intel.com>
|
David Quigley <david.quigley@intel.com>
|
||||||
Debabrata Banerjee <dbanerje@akamai.com>
|
Debabrata Banerjee <dbanerje@akamai.com>
|
||||||
D. Ebdrup <debdrup@freebsd.org>
|
D. Ebdrup <debdrup@freebsd.org>
|
||||||
Dennis R. Friedrichsen <dennis.r.friedrichsen@gmail.com>
|
|
||||||
Denys Rtveliashvili <denys@rtveliashvili.name>
|
Denys Rtveliashvili <denys@rtveliashvili.name>
|
||||||
Derek Dai <daiderek@gmail.com>
|
Derek Dai <daiderek@gmail.com>
|
||||||
Derek Schrock <dereks@lifeofadishwasher.com>
|
|
||||||
Dex Wood <slash2314@gmail.com>
|
|
||||||
DHE <git@dehacked.net>
|
DHE <git@dehacked.net>
|
||||||
Didier Roche <didrocks@ubuntu.com>
|
Didier Roche <didrocks@ubuntu.com>
|
||||||
Dimitri John Ledkov <xnox@ubuntu.com>
|
Dimitri John Ledkov <xnox@ubuntu.com>
|
||||||
@ -249,14 +234,10 @@ CONTRIBUTORS:
|
|||||||
Gian-Carlo DeFazio <defazio1@llnl.gov>
|
Gian-Carlo DeFazio <defazio1@llnl.gov>
|
||||||
Gionatan Danti <g.danti@assyoma.it>
|
Gionatan Danti <g.danti@assyoma.it>
|
||||||
Giuseppe Di Natale <guss80@gmail.com>
|
Giuseppe Di Natale <guss80@gmail.com>
|
||||||
Gleb Smirnoff <glebius@FreeBSD.org>
|
|
||||||
Glenn Washburn <development@efficientek.com>
|
Glenn Washburn <development@efficientek.com>
|
||||||
glibg10b <glibg10b@users.noreply.github.com>
|
|
||||||
gofaster <felix.gofaster@gmail.com>
|
|
||||||
Gordan Bobic <gordan@redsleeve.org>
|
Gordan Bobic <gordan@redsleeve.org>
|
||||||
Gordon Bergling <gbergling@googlemail.com>
|
Gordon Bergling <gbergling@googlemail.com>
|
||||||
Gordon Ross <gwr@nexenta.com>
|
Gordon Ross <gwr@nexenta.com>
|
||||||
Gordon Tetlow <gordon@freebsd.org>
|
|
||||||
Graham Christensen <graham@grahamc.com>
|
Graham Christensen <graham@grahamc.com>
|
||||||
Graham Perrin <grahamperrin@gmail.com>
|
Graham Perrin <grahamperrin@gmail.com>
|
||||||
Gregor Kopka <gregor@kopka.net>
|
Gregor Kopka <gregor@kopka.net>
|
||||||
@ -284,12 +265,10 @@ CONTRIBUTORS:
|
|||||||
Igor Kozhukhov <ikozhukhov@gmail.com>
|
Igor Kozhukhov <ikozhukhov@gmail.com>
|
||||||
Igor Lvovsky <ilvovsky@gmail.com>
|
Igor Lvovsky <ilvovsky@gmail.com>
|
||||||
ilbsmart <wgqimut@gmail.com>
|
ilbsmart <wgqimut@gmail.com>
|
||||||
Ilkka Sovanto <github@ilkka.kapsi.fi>
|
|
||||||
illiliti <illiliti@protonmail.com>
|
illiliti <illiliti@protonmail.com>
|
||||||
ilovezfs <ilovezfs@icloud.com>
|
ilovezfs <ilovezfs@icloud.com>
|
||||||
InsanePrawn <Insane.Prawny@gmail.com>
|
InsanePrawn <Insane.Prawny@gmail.com>
|
||||||
Isaac Huang <he.huang@intel.com>
|
Isaac Huang <he.huang@intel.com>
|
||||||
Ivan Volosyuk <Ivan.Volosyuk@gmail.com>
|
|
||||||
Jacek Fefliński <feflik@gmail.com>
|
Jacek Fefliński <feflik@gmail.com>
|
||||||
Jacob Adams <tookmund@gmail.com>
|
Jacob Adams <tookmund@gmail.com>
|
||||||
Jake Howard <git@theorangeone.net>
|
Jake Howard <git@theorangeone.net>
|
||||||
@ -297,19 +276,15 @@ CONTRIBUTORS:
|
|||||||
James H <james@kagisoft.co.uk>
|
James H <james@kagisoft.co.uk>
|
||||||
James Lee <jlee@thestaticvoid.com>
|
James Lee <jlee@thestaticvoid.com>
|
||||||
James Pan <jiaming.pan@yahoo.com>
|
James Pan <jiaming.pan@yahoo.com>
|
||||||
James Reilly <jreilly1821@gmail.com>
|
|
||||||
James Wah <james@laird-wah.net>
|
James Wah <james@laird-wah.net>
|
||||||
Jan Engelhardt <jengelh@inai.de>
|
Jan Engelhardt <jengelh@inai.de>
|
||||||
Jan Kryl <jan.kryl@nexenta.com>
|
Jan Kryl <jan.kryl@nexenta.com>
|
||||||
Jan Sanislo <oystr@cs.washington.edu>
|
Jan Sanislo <oystr@cs.washington.edu>
|
||||||
Jaron Kent-Dobias <jaron@kent-dobias.com>
|
|
||||||
Jason Cohen <jwittlincohen@gmail.com>
|
Jason Cohen <jwittlincohen@gmail.com>
|
||||||
Jason Harmening <jason.harmening@gmail.com>
|
Jason Harmening <jason.harmening@gmail.com>
|
||||||
Jason King <jason.brian.king@gmail.com>
|
Jason King <jason.brian.king@gmail.com>
|
||||||
Jason Lee <jasonlee@lanl.gov>
|
|
||||||
Jason Zaman <jasonzaman@gmail.com>
|
Jason Zaman <jasonzaman@gmail.com>
|
||||||
Javen Wu <wu.javen@gmail.com>
|
Javen Wu <wu.javen@gmail.com>
|
||||||
Jaydeep Kshirsagar <jkshirsagar@maxlinear.com>
|
|
||||||
Jean-Baptiste Lallement <jean-baptiste@ubuntu.com>
|
Jean-Baptiste Lallement <jean-baptiste@ubuntu.com>
|
||||||
Jeff Dike <jdike@akamai.com>
|
Jeff Dike <jdike@akamai.com>
|
||||||
Jeremy Faulkner <gldisater@gmail.com>
|
Jeremy Faulkner <gldisater@gmail.com>
|
||||||
@ -317,7 +292,6 @@ CONTRIBUTORS:
|
|||||||
Jeremy Jones <jeremy@delphix.com>
|
Jeremy Jones <jeremy@delphix.com>
|
||||||
Jeremy Visser <jeremy.visser@gmail.com>
|
Jeremy Visser <jeremy.visser@gmail.com>
|
||||||
Jerry Jelinek <jerry.jelinek@joyent.com>
|
Jerry Jelinek <jerry.jelinek@joyent.com>
|
||||||
Jerzy Kołosowski <jerzy@kolosowscy.pl>
|
|
||||||
Jessica Clarke <jrtc27@jrtc27.com>
|
Jessica Clarke <jrtc27@jrtc27.com>
|
||||||
Jinshan Xiong <jinshan.xiong@intel.com>
|
Jinshan Xiong <jinshan.xiong@intel.com>
|
||||||
Jitendra Patidar <jitendra.patidar@nutanix.com>
|
Jitendra Patidar <jitendra.patidar@nutanix.com>
|
||||||
@ -339,7 +313,6 @@ CONTRIBUTORS:
|
|||||||
Jonathon Fernyhough <jonathon@m2x.dev>
|
Jonathon Fernyhough <jonathon@m2x.dev>
|
||||||
Jorgen Lundman <lundman@lundman.net>
|
Jorgen Lundman <lundman@lundman.net>
|
||||||
Josef 'Jeff' Sipek <josef.sipek@nexenta.com>
|
Josef 'Jeff' Sipek <josef.sipek@nexenta.com>
|
||||||
Jose Luis Duran <jlduran@gmail.com>
|
|
||||||
Josh Soref <jsoref@users.noreply.github.com>
|
Josh Soref <jsoref@users.noreply.github.com>
|
||||||
Joshua M. Clulow <josh@sysmgr.org>
|
Joshua M. Clulow <josh@sysmgr.org>
|
||||||
José Luis Salvador Rufo <salvador.joseluis@gmail.com>
|
José Luis Salvador Rufo <salvador.joseluis@gmail.com>
|
||||||
@ -363,10 +336,8 @@ CONTRIBUTORS:
|
|||||||
Kash Pande <kash@tripleback.net>
|
Kash Pande <kash@tripleback.net>
|
||||||
Kay Pedersen <christianpe96@gmail.com>
|
Kay Pedersen <christianpe96@gmail.com>
|
||||||
Keith M Wesolowski <wesolows@foobazco.org>
|
Keith M Wesolowski <wesolows@foobazco.org>
|
||||||
Kent Ross <k@mad.cash>
|
|
||||||
KernelOfTruth <kerneloftruth@gmail.com>
|
KernelOfTruth <kerneloftruth@gmail.com>
|
||||||
Kevin Bowling <kevin.bowling@kev009.com>
|
Kevin Bowling <kevin.bowling@kev009.com>
|
||||||
Kevin Greene <kevin.greene@delphix.com>
|
|
||||||
Kevin Jin <lostking2008@hotmail.com>
|
Kevin Jin <lostking2008@hotmail.com>
|
||||||
Kevin P. Fleming <kevin@km6g.us>
|
Kevin P. Fleming <kevin@km6g.us>
|
||||||
Kevin Tanguy <kevin.tanguy@ovh.net>
|
Kevin Tanguy <kevin.tanguy@ovh.net>
|
||||||
@ -377,7 +348,6 @@ CONTRIBUTORS:
|
|||||||
Kohsuke Kawaguchi <kk@kohsuke.org>
|
Kohsuke Kawaguchi <kk@kohsuke.org>
|
||||||
Konstantin Khorenko <khorenko@virtuozzo.com>
|
Konstantin Khorenko <khorenko@virtuozzo.com>
|
||||||
KORN Andras <korn@elan.rulez.org>
|
KORN Andras <korn@elan.rulez.org>
|
||||||
kotauskas <v.toncharov@gmail.com>
|
|
||||||
Kristof Provost <github@sigsegv.be>
|
Kristof Provost <github@sigsegv.be>
|
||||||
Krzysztof Piecuch <piecuch@kpiecuch.pl>
|
Krzysztof Piecuch <piecuch@kpiecuch.pl>
|
||||||
Kyle Blatter <kyleblatter@llnl.gov>
|
Kyle Blatter <kyleblatter@llnl.gov>
|
||||||
@ -419,17 +389,14 @@ CONTRIBUTORS:
|
|||||||
Mark Shellenbaum <Mark.Shellenbaum@Oracle.COM>
|
Mark Shellenbaum <Mark.Shellenbaum@Oracle.COM>
|
||||||
marku89 <mar42@kola.li>
|
marku89 <mar42@kola.li>
|
||||||
Mark Wright <markwright@internode.on.net>
|
Mark Wright <markwright@internode.on.net>
|
||||||
Mart Frauenlob <allkind@fastest.cc>
|
|
||||||
Martin Matuska <mm@FreeBSD.org>
|
Martin Matuska <mm@FreeBSD.org>
|
||||||
Martin Rüegg <martin.rueegg@metaworx.ch>
|
Martin Rüegg <martin.rueegg@metaworx.ch>
|
||||||
Martin Wagner <martin.wagner.dev@gmail.com>
|
|
||||||
Massimo Maggi <me@massimo-maggi.eu>
|
Massimo Maggi <me@massimo-maggi.eu>
|
||||||
Mateusz Guzik <mjguzik@gmail.com>
|
Mateusz Guzik <mjguzik@gmail.com>
|
||||||
Mateusz Piotrowski <0mp@FreeBSD.org>
|
Mateusz Piotrowski <0mp@FreeBSD.org>
|
||||||
Mathieu Velten <matmaul@gmail.com>
|
Mathieu Velten <matmaul@gmail.com>
|
||||||
Matt Fiddaman <github@m.fiddaman.uk>
|
Matt Fiddaman <github@m.fiddaman.uk>
|
||||||
Matthew Ahrens <matt@delphix.com>
|
Matthew Ahrens <matt@delphix.com>
|
||||||
Matthew Heller <matthew.f.heller@gmail.com>
|
|
||||||
Matthew Thode <mthode@mthode.org>
|
Matthew Thode <mthode@mthode.org>
|
||||||
Matthias Blankertz <matthias@blankertz.org>
|
Matthias Blankertz <matthias@blankertz.org>
|
||||||
Matt Johnston <matt@fugro-fsi.com.au>
|
Matt Johnston <matt@fugro-fsi.com.au>
|
||||||
@ -438,7 +405,6 @@ CONTRIBUTORS:
|
|||||||
Matus Kral <matuskral@me.com>
|
Matus Kral <matuskral@me.com>
|
||||||
Mauricio Faria de Oliveira <mfo@canonical.com>
|
Mauricio Faria de Oliveira <mfo@canonical.com>
|
||||||
Max Grossman <max.grossman@delphix.com>
|
Max Grossman <max.grossman@delphix.com>
|
||||||
Maxim Filimonov <che@bein.link>
|
|
||||||
Maximilian Mehnert <maximilian.mehnert@gmx.de>
|
Maximilian Mehnert <maximilian.mehnert@gmx.de>
|
||||||
Max Zettlmeißl <max@zettlmeissl.de>
|
Max Zettlmeißl <max@zettlmeissl.de>
|
||||||
Md Islam <mdnahian@outlook.com>
|
Md Islam <mdnahian@outlook.com>
|
||||||
@ -451,14 +417,12 @@ CONTRIBUTORS:
|
|||||||
Michael Niewöhner <foss@mniewoehner.de>
|
Michael Niewöhner <foss@mniewoehner.de>
|
||||||
Michael Zhivich <mzhivich@akamai.com>
|
Michael Zhivich <mzhivich@akamai.com>
|
||||||
Michal Vasilek <michal@vasilek.cz>
|
Michal Vasilek <michal@vasilek.cz>
|
||||||
MigeljanImeri <ImeriMigel@gmail.com>
|
|
||||||
Mike Gerdts <mike.gerdts@joyent.com>
|
Mike Gerdts <mike.gerdts@joyent.com>
|
||||||
Mike Harsch <mike@harschsystems.com>
|
Mike Harsch <mike@harschsystems.com>
|
||||||
Mike Leddy <mike.leddy@gmail.com>
|
Mike Leddy <mike.leddy@gmail.com>
|
||||||
Mike Swanson <mikeonthecomputer@gmail.com>
|
Mike Swanson <mikeonthecomputer@gmail.com>
|
||||||
Milan Jurik <milan.jurik@xylab.cz>
|
Milan Jurik <milan.jurik@xylab.cz>
|
||||||
Minsoo Choo <minsoochoo0122@proton.me>
|
Minsoo Choo <minsoochoo0122@proton.me>
|
||||||
mnrx <mnrx@users.noreply.github.com>
|
|
||||||
Mohamed Tawfik <m_tawfik@aucegypt.edu>
|
Mohamed Tawfik <m_tawfik@aucegypt.edu>
|
||||||
Morgan Jones <mjones@rice.edu>
|
Morgan Jones <mjones@rice.edu>
|
||||||
Moritz Maxeiner <moritz@ucworks.org>
|
Moritz Maxeiner <moritz@ucworks.org>
|
||||||
@ -484,7 +448,6 @@ CONTRIBUTORS:
|
|||||||
Olaf Faaland <faaland1@llnl.gov>
|
Olaf Faaland <faaland1@llnl.gov>
|
||||||
Oleg Drokin <green@linuxhacker.ru>
|
Oleg Drokin <green@linuxhacker.ru>
|
||||||
Oleg Stepura <oleg@stepura.com>
|
Oleg Stepura <oleg@stepura.com>
|
||||||
Olivier Certner <olce.freebsd@certner.fr>
|
|
||||||
Olivier Mazouffre <olivier.mazouffre@ims-bordeaux.fr>
|
Olivier Mazouffre <olivier.mazouffre@ims-bordeaux.fr>
|
||||||
omni <omni+vagant@hack.org>
|
omni <omni+vagant@hack.org>
|
||||||
Orivej Desh <orivej@gmx.fr>
|
Orivej Desh <orivej@gmx.fr>
|
||||||
@ -501,27 +464,21 @@ CONTRIBUTORS:
|
|||||||
Pawel Jakub Dawidek <pjd@FreeBSD.org>
|
Pawel Jakub Dawidek <pjd@FreeBSD.org>
|
||||||
Pedro Giffuni <pfg@freebsd.org>
|
Pedro Giffuni <pfg@freebsd.org>
|
||||||
Peng <peng.hse@xtaotech.com>
|
Peng <peng.hse@xtaotech.com>
|
||||||
Peng Liu <littlenewton6@gmail.com>
|
|
||||||
Peter Ashford <ashford@accs.com>
|
Peter Ashford <ashford@accs.com>
|
||||||
Peter Dave Hello <hsu@peterdavehello.org>
|
Peter Dave Hello <hsu@peterdavehello.org>
|
||||||
Peter Doherty <peterd@acranox.org>
|
|
||||||
Peter Levine <plevine457@gmail.com>
|
Peter Levine <plevine457@gmail.com>
|
||||||
Peter Wirdemo <peter.wirdemo@gmail.com>
|
Peter Wirdemo <peter.wirdemo@gmail.com>
|
||||||
Petros Koutoupis <petros@petroskoutoupis.com>
|
Petros Koutoupis <petros@petroskoutoupis.com>
|
||||||
Philip Pokorny <ppokorny@penguincomputing.com>
|
Philip Pokorny <ppokorny@penguincomputing.com>
|
||||||
Philipp Riederer <pt@philipptoelke.de>
|
Philipp Riederer <pt@philipptoelke.de>
|
||||||
Phil Kauffman <philip@kauffman.me>
|
Phil Kauffman <philip@kauffman.me>
|
||||||
Phil Sutter <phil@nwl.cc>
|
|
||||||
Ping Huang <huangping@smartx.com>
|
Ping Huang <huangping@smartx.com>
|
||||||
Piotr Kubaj <pkubaj@anongoth.pl>
|
Piotr Kubaj <pkubaj@anongoth.pl>
|
||||||
Piotr P. Stefaniak <pstef@freebsd.org>
|
Piotr P. Stefaniak <pstef@freebsd.org>
|
||||||
poscat <poscat@poscat.moe>
|
|
||||||
Prakash Surya <prakash.surya@delphix.com>
|
Prakash Surya <prakash.surya@delphix.com>
|
||||||
Prasad Joshi <prasadjoshi124@gmail.com>
|
Prasad Joshi <prasadjoshi124@gmail.com>
|
||||||
privb0x23 <privb0x23@users.noreply.github.com>
|
privb0x23 <privb0x23@users.noreply.github.com>
|
||||||
P.SCH <p88@yahoo.com>
|
P.SCH <p88@yahoo.com>
|
||||||
Qiuhao Chen <chenqiuhao1997@gmail.com>
|
|
||||||
Quartz <yyhran@163.com>
|
|
||||||
Quentin Zdanis <zdanisq@gmail.com>
|
Quentin Zdanis <zdanisq@gmail.com>
|
||||||
Rafael Kitover <rkitover@gmail.com>
|
Rafael Kitover <rkitover@gmail.com>
|
||||||
RageLtMan <sempervictus@users.noreply.github.com>
|
RageLtMan <sempervictus@users.noreply.github.com>
|
||||||
@ -534,15 +491,11 @@ CONTRIBUTORS:
|
|||||||
Riccardo Schirone <rschirone91@gmail.com>
|
Riccardo Schirone <rschirone91@gmail.com>
|
||||||
Richard Allen <belperite@gmail.com>
|
Richard Allen <belperite@gmail.com>
|
||||||
Richard Elling <Richard.Elling@RichardElling.com>
|
Richard Elling <Richard.Elling@RichardElling.com>
|
||||||
Richard Kojedzinszky <richard@kojedz.in>
|
|
||||||
Richard Laager <rlaager@wiktel.com>
|
Richard Laager <rlaager@wiktel.com>
|
||||||
Richard Lowe <richlowe@richlowe.net>
|
Richard Lowe <richlowe@richlowe.net>
|
||||||
Richard Sharpe <rsharpe@samba.org>
|
Richard Sharpe <rsharpe@samba.org>
|
||||||
Richard Yao <ryao@gentoo.org>
|
Richard Yao <ryao@gentoo.org>
|
||||||
Rich Ercolani <rincebrain@gmail.com>
|
Rich Ercolani <rincebrain@gmail.com>
|
||||||
Rick Macklem <rmacklem@uoguelph.ca>
|
|
||||||
rilysh <nightquick@proton.me>
|
|
||||||
Robert Evans <evansr@google.com>
|
|
||||||
Robert Novak <sailnfool@gmail.com>
|
Robert Novak <sailnfool@gmail.com>
|
||||||
Roberto Ricci <ricci@disroot.org>
|
Roberto Ricci <ricci@disroot.org>
|
||||||
Rob Norris <robn@despairlabs.com>
|
Rob Norris <robn@despairlabs.com>
|
||||||
@ -552,14 +505,11 @@ CONTRIBUTORS:
|
|||||||
Roman Strashkin <roman.strashkin@nexenta.com>
|
Roman Strashkin <roman.strashkin@nexenta.com>
|
||||||
Ross Williams <ross@ross-williams.net>
|
Ross Williams <ross@ross-williams.net>
|
||||||
Ruben Kerkhof <ruben@rubenkerkhof.com>
|
Ruben Kerkhof <ruben@rubenkerkhof.com>
|
||||||
Ryan <errornointernet@envs.net>
|
|
||||||
Ryan Hirasaki <ryanhirasaki@gmail.com>
|
Ryan Hirasaki <ryanhirasaki@gmail.com>
|
||||||
Ryan Lahfa <masterancpp@gmail.com>
|
Ryan Lahfa <masterancpp@gmail.com>
|
||||||
Ryan Libby <rlibby@FreeBSD.org>
|
Ryan Libby <rlibby@FreeBSD.org>
|
||||||
Ryan Moeller <freqlabs@FreeBSD.org>
|
Ryan Moeller <freqlabs@FreeBSD.org>
|
||||||
Sam Atkinson <samatk@amazon.com>
|
|
||||||
Sam Hathaway <github.com@munkynet.org>
|
Sam Hathaway <github.com@munkynet.org>
|
||||||
Sam James <sam@gentoo.org>
|
|
||||||
Sam Lunt <samuel.j.lunt@gmail.com>
|
Sam Lunt <samuel.j.lunt@gmail.com>
|
||||||
Samuel VERSCHELDE <stormi-github@ylix.fr>
|
Samuel VERSCHELDE <stormi-github@ylix.fr>
|
||||||
Samuel Wycliffe <samuelwycliffe@gmail.com>
|
Samuel Wycliffe <samuelwycliffe@gmail.com>
|
||||||
@ -573,28 +523,20 @@ CONTRIBUTORS:
|
|||||||
Scot W. Stevenson <scot.stevenson@gmail.com>
|
Scot W. Stevenson <scot.stevenson@gmail.com>
|
||||||
Sean Eric Fagan <sef@ixsystems.com>
|
Sean Eric Fagan <sef@ixsystems.com>
|
||||||
Sebastian Gottschall <s.gottschall@dd-wrt.com>
|
Sebastian Gottschall <s.gottschall@dd-wrt.com>
|
||||||
Sebastian Wuerl <s.wuerl@mailbox.org>
|
|
||||||
Sebastien Roy <seb@delphix.com>
|
Sebastien Roy <seb@delphix.com>
|
||||||
Sen Haerens <sen@senhaerens.be>
|
Sen Haerens <sen@senhaerens.be>
|
||||||
Serapheim Dimitropoulos <serapheim@delphix.com>
|
Serapheim Dimitropoulos <serapheim@delphix.com>
|
||||||
Seth Forshee <seth.forshee@canonical.com>
|
Seth Forshee <seth.forshee@canonical.com>
|
||||||
Seth Hoffert <Seth.Hoffert@gmail.com>
|
|
||||||
Seth Troisi <sethtroisi@google.com>
|
|
||||||
Shaan Nobee <sniper111@gmail.com>
|
Shaan Nobee <sniper111@gmail.com>
|
||||||
Shampavman <sham.pavman@nexenta.com>
|
Shampavman <sham.pavman@nexenta.com>
|
||||||
Shaun Tancheff <shaun@aeonazure.com>
|
Shaun Tancheff <shaun@aeonazure.com>
|
||||||
Shawn Bayern <sbayern@law.fsu.edu>
|
|
||||||
Shengqi Chen <harry-chen@outlook.com>
|
|
||||||
SHENGYI HONG <aokblast@FreeBSD.org>
|
|
||||||
Shen Yan <shenyanxxxy@qq.com>
|
Shen Yan <shenyanxxxy@qq.com>
|
||||||
Sietse <sietse@wizdom.nu>
|
|
||||||
Simon Guest <simon.guest@tesujimath.org>
|
Simon Guest <simon.guest@tesujimath.org>
|
||||||
Simon Klinkert <simon.klinkert@gmail.com>
|
Simon Klinkert <simon.klinkert@gmail.com>
|
||||||
Sowrabha Gopal <sowrabha.gopal@delphix.com>
|
Sowrabha Gopal <sowrabha.gopal@delphix.com>
|
||||||
Spencer Kinny <spencerkinny1995@gmail.com>
|
Spencer Kinny <spencerkinny1995@gmail.com>
|
||||||
Srikanth N S <srikanth.nagasubbaraoseetharaman@hpe.com>
|
Srikanth N S <srikanth.nagasubbaraoseetharaman@hpe.com>
|
||||||
Stanislav Seletskiy <s.seletskiy@gmail.com>
|
Stanislav Seletskiy <s.seletskiy@gmail.com>
|
||||||
Stefan Lendl <s.lendl@proxmox.com>
|
|
||||||
Steffen Müthing <steffen.muething@iwr.uni-heidelberg.de>
|
Steffen Müthing <steffen.muething@iwr.uni-heidelberg.de>
|
||||||
Stephen Blinick <stephen.blinick@delphix.com>
|
Stephen Blinick <stephen.blinick@delphix.com>
|
||||||
sterlingjensen <sterlingjensen@users.noreply.github.com>
|
sterlingjensen <sterlingjensen@users.noreply.github.com>
|
||||||
@ -614,9 +556,7 @@ CONTRIBUTORS:
|
|||||||
Tamas TEVESZ <ice@extreme.hu>
|
Tamas TEVESZ <ice@extreme.hu>
|
||||||
Teodor Spæren <teodor_spaeren@riseup.net>
|
Teodor Spæren <teodor_spaeren@riseup.net>
|
||||||
TerraTech <TerraTech@users.noreply.github.com>
|
TerraTech <TerraTech@users.noreply.github.com>
|
||||||
Theera K. <tkittich@hotmail.com>
|
|
||||||
Thijs Cramer <thijs.cramer@gmail.com>
|
Thijs Cramer <thijs.cramer@gmail.com>
|
||||||
Thomas Bertschinger <bertschinger@lanl.gov>
|
|
||||||
Thomas Geppert <geppi@digitx.de>
|
Thomas Geppert <geppi@digitx.de>
|
||||||
Thomas Lamprecht <guggentom@hotmail.de>
|
Thomas Lamprecht <guggentom@hotmail.de>
|
||||||
Till Maas <opensource@till.name>
|
Till Maas <opensource@till.name>
|
||||||
@ -627,11 +567,8 @@ CONTRIBUTORS:
|
|||||||
timor <timor.dd@googlemail.com>
|
timor <timor.dd@googlemail.com>
|
||||||
Timothy Day <tday141@gmail.com>
|
Timothy Day <tday141@gmail.com>
|
||||||
Tim Schumacher <timschumi@gmx.de>
|
Tim Schumacher <timschumi@gmx.de>
|
||||||
Tim Smith <tim@mondoo.com>
|
|
||||||
Tino Reichardt <milky-zfs@mcmilk.de>
|
Tino Reichardt <milky-zfs@mcmilk.de>
|
||||||
tleydxdy <shironeko.github@tesaguri.club>
|
|
||||||
Tobin Harding <me@tobin.cc>
|
Tobin Harding <me@tobin.cc>
|
||||||
Todd Seidelmann <seidelma@users.noreply.github.com>
|
|
||||||
Tom Caputi <tcaputi@datto.com>
|
Tom Caputi <tcaputi@datto.com>
|
||||||
Tom Matthews <tom@axiom-partners.com>
|
Tom Matthews <tom@axiom-partners.com>
|
||||||
Tomohiro Kusumi <kusumi.tomohiro@gmail.com>
|
Tomohiro Kusumi <kusumi.tomohiro@gmail.com>
|
||||||
@ -645,15 +582,12 @@ CONTRIBUTORS:
|
|||||||
Trevor Bautista <trevrb@trevrb.net>
|
Trevor Bautista <trevrb@trevrb.net>
|
||||||
Trey Dockendorf <treydock@gmail.com>
|
Trey Dockendorf <treydock@gmail.com>
|
||||||
Troels Nørgaard <tnn@tradeshift.com>
|
Troels Nørgaard <tnn@tradeshift.com>
|
||||||
tstabrawa <tstabrawa@users.noreply.github.com>
|
|
||||||
Tulsi Jain <tulsi.jain@delphix.com>
|
Tulsi Jain <tulsi.jain@delphix.com>
|
||||||
Turbo Fredriksson <turbo@bayour.com>
|
Turbo Fredriksson <turbo@bayour.com>
|
||||||
Tyler J. Stachecki <stachecki.tyler@gmail.com>
|
Tyler J. Stachecki <stachecki.tyler@gmail.com>
|
||||||
Umer Saleem <usaleem@ixsystems.com>
|
Umer Saleem <usaleem@ixsystems.com>
|
||||||
Vaibhav Bhanawat <vaibhav.bhanawat@delphix.com>
|
|
||||||
Valmiky Arquissandas <kayvlim@gmail.com>
|
Valmiky Arquissandas <kayvlim@gmail.com>
|
||||||
Val Packett <val@packett.cool>
|
Val Packett <val@packett.cool>
|
||||||
Vandana Rungta <vrungta@amazon.com>
|
|
||||||
Vince van Oosten <techhazard@codeforyouand.me>
|
Vince van Oosten <techhazard@codeforyouand.me>
|
||||||
Violet Purcell <vimproved@inventati.org>
|
Violet Purcell <vimproved@inventati.org>
|
||||||
Vipin Kumar Verma <vipin.verma@hpe.com>
|
Vipin Kumar Verma <vipin.verma@hpe.com>
|
||||||
@ -669,7 +603,6 @@ CONTRIBUTORS:
|
|||||||
Windel Bouwman <windel@windel.nl>
|
Windel Bouwman <windel@windel.nl>
|
||||||
Wojciech Małota-Wójcik <outofforest@users.noreply.github.com>
|
Wojciech Małota-Wójcik <outofforest@users.noreply.github.com>
|
||||||
Wolfgang Bumiller <w.bumiller@proxmox.com>
|
Wolfgang Bumiller <w.bumiller@proxmox.com>
|
||||||
XDTG <click1799@163.com>
|
|
||||||
Xin Li <delphij@FreeBSD.org>
|
Xin Li <delphij@FreeBSD.org>
|
||||||
Xinliang Liu <xinliang.liu@linaro.org>
|
Xinliang Liu <xinliang.liu@linaro.org>
|
||||||
xtouqh <xtouqh@hotmail.com>
|
xtouqh <xtouqh@hotmail.com>
|
||||||
@ -681,13 +614,10 @@ CONTRIBUTORS:
|
|||||||
yuina822 <ayuichi@club.kyutech.ac.jp>
|
yuina822 <ayuichi@club.kyutech.ac.jp>
|
||||||
YunQiang Su <syq@debian.org>
|
YunQiang Su <syq@debian.org>
|
||||||
Yuri Pankov <yuri.pankov@gmail.com>
|
Yuri Pankov <yuri.pankov@gmail.com>
|
||||||
Yuxin Wang <yuxinwang9999@gmail.com>
|
|
||||||
Yuxuan Shui <yshuiv7@gmail.com>
|
Yuxuan Shui <yshuiv7@gmail.com>
|
||||||
Zachary Bedell <zac@thebedells.org>
|
Zachary Bedell <zac@thebedells.org>
|
||||||
Zach Dykstra <dykstra.zachary@gmail.com>
|
Zach Dykstra <dykstra.zachary@gmail.com>
|
||||||
zgock <zgock@nuc.base.zgock-lab.net>
|
zgock <zgock@nuc.base.zgock-lab.net>
|
||||||
Zhao Yongming <zym@apache.org>
|
|
||||||
Zhenlei Huang <zlei@FreeBSD.org>
|
|
||||||
Zhu Chuang <chuang@melty.land>
|
Zhu Chuang <chuang@melty.land>
|
||||||
Érico Nogueira <erico.erc@gmail.com>
|
Érico Nogueira <erico.erc@gmail.com>
|
||||||
Đoàn Trần Công Danh <congdanhqx@gmail.com>
|
Đoàn Trần Công Danh <congdanhqx@gmail.com>
|
||||||
|
6
META
6
META
@ -1,10 +1,10 @@
|
|||||||
Meta: 1
|
Meta: 1
|
||||||
Name: zfs
|
Name: zfs
|
||||||
Branch: 1.0
|
Branch: 1.0
|
||||||
Version: 2.3.99
|
Version: 2.2.2
|
||||||
Release: 1
|
Release: 1
|
||||||
Release-Tags: relext
|
Release-Tags: relext
|
||||||
License: CDDL
|
License: CDDL
|
||||||
Author: OpenZFS
|
Author: OpenZFS
|
||||||
Linux-Maximum: 6.14
|
Linux-Maximum: 6.6
|
||||||
Linux-Minimum: 4.18
|
Linux-Minimum: 3.10
|
||||||
|
@ -112,10 +112,6 @@ commitcheck:
|
|||||||
${top_srcdir}/scripts/commitcheck.sh; \
|
${top_srcdir}/scripts/commitcheck.sh; \
|
||||||
fi
|
fi
|
||||||
|
|
||||||
CHECKS += spdxcheck
|
|
||||||
spdxcheck:
|
|
||||||
$(AM_V_at)$(top_srcdir)/scripts/spdxcheck.pl
|
|
||||||
|
|
||||||
if HAVE_PARALLEL
|
if HAVE_PARALLEL
|
||||||
cstyle_line = -print0 | parallel -X0 ${top_srcdir}/scripts/cstyle.pl -cpP {}
|
cstyle_line = -print0 | parallel -X0 ${top_srcdir}/scripts/cstyle.pl -cpP {}
|
||||||
else
|
else
|
||||||
|
@ -32,4 +32,4 @@ For more details see the NOTICE, LICENSE and COPYRIGHT files; `UCRL-CODE-235197`
|
|||||||
|
|
||||||
# Supported Kernels
|
# Supported Kernels
|
||||||
* The `META` file contains the officially recognized supported Linux kernel versions.
|
* The `META` file contains the officially recognized supported Linux kernel versions.
|
||||||
* Supported FreeBSD versions are any supported branches and releases starting from 13.0-RELEASE.
|
* Supported FreeBSD versions are any supported branches and releases starting from 12.4-RELEASE.
|
||||||
|
@ -28,7 +28,7 @@ Two release branches are maintained for OpenZFS, they are:
|
|||||||
Minor changes to support these distribution kernels will be applied as
|
Minor changes to support these distribution kernels will be applied as
|
||||||
needed. New kernel versions released after the OpenZFS LTS release are
|
needed. New kernel versions released after the OpenZFS LTS release are
|
||||||
not supported. LTS releases will receive patches for at least 2 years.
|
not supported. LTS releases will receive patches for at least 2 years.
|
||||||
The current LTS release is OpenZFS 2.2.
|
The current LTS release is OpenZFS 2.1.
|
||||||
|
|
||||||
* OpenZFS current - Tracks the newest MAJOR.MINOR release. This branch
|
* OpenZFS current - Tracks the newest MAJOR.MINOR release. This branch
|
||||||
includes support for the latest OpenZFS features and recently releases
|
includes support for the latest OpenZFS features and recently releases
|
||||||
|
@ -24,7 +24,7 @@ zfs_ids_to_path_LDADD = \
|
|||||||
libzfs.la
|
libzfs.la
|
||||||
|
|
||||||
|
|
||||||
zhack_CPPFLAGS = $(AM_CPPFLAGS) $(LIBZPOOL_CPPFLAGS)
|
zhack_CPPFLAGS = $(AM_CPPFLAGS) $(FORCEDEBUG_CPPFLAGS)
|
||||||
|
|
||||||
sbin_PROGRAMS += zhack
|
sbin_PROGRAMS += zhack
|
||||||
CPPCHECKTARGETS += zhack
|
CPPCHECKTARGETS += zhack
|
||||||
@ -39,7 +39,9 @@ zhack_LDADD = \
|
|||||||
|
|
||||||
|
|
||||||
ztest_CFLAGS = $(AM_CFLAGS) $(KERNEL_CFLAGS)
|
ztest_CFLAGS = $(AM_CFLAGS) $(KERNEL_CFLAGS)
|
||||||
ztest_CPPFLAGS = $(AM_CPPFLAGS) $(LIBZPOOL_CPPFLAGS)
|
# Get rid of compiler warning for unchecked truncating snprintfs on gcc 7.1.1
|
||||||
|
ztest_CFLAGS += $(NO_FORMAT_TRUNCATION)
|
||||||
|
ztest_CPPFLAGS = $(AM_CPPFLAGS) $(FORCEDEBUG_CPPFLAGS)
|
||||||
|
|
||||||
sbin_PROGRAMS += ztest
|
sbin_PROGRAMS += ztest
|
||||||
CPPCHECKTARGETS += ztest
|
CPPCHECKTARGETS += ztest
|
||||||
|
164
cmd/arc_summary
164
cmd/arc_summary
@ -1,5 +1,4 @@
|
|||||||
#!/usr/bin/env python3
|
#!/usr/bin/env python3
|
||||||
# SPDX-License-Identifier: BSD-2-Clause
|
|
||||||
#
|
#
|
||||||
# Copyright (c) 2008 Ben Rockwood <benr@cuddletech.com>,
|
# Copyright (c) 2008 Ben Rockwood <benr@cuddletech.com>,
|
||||||
# Copyright (c) 2010 Martin Matuska <mm@FreeBSD.org>,
|
# Copyright (c) 2010 Martin Matuska <mm@FreeBSD.org>,
|
||||||
@ -261,34 +260,33 @@ def draw_graph(kstats_dict):
|
|||||||
arc_stats = isolate_section('arcstats', kstats_dict)
|
arc_stats = isolate_section('arcstats', kstats_dict)
|
||||||
|
|
||||||
GRAPH_INDENT = ' '*4
|
GRAPH_INDENT = ' '*4
|
||||||
GRAPH_WIDTH = 70
|
GRAPH_WIDTH = 60
|
||||||
arc_max = int(arc_stats['c_max'])
|
|
||||||
arc_size = f_bytes(arc_stats['size'])
|
arc_size = f_bytes(arc_stats['size'])
|
||||||
arc_perc = f_perc(arc_stats['size'], arc_max)
|
arc_perc = f_perc(arc_stats['size'], arc_stats['c_max'])
|
||||||
data_size = f_bytes(arc_stats['data_size'])
|
mfu_size = f_bytes(arc_stats['mfu_size'])
|
||||||
meta_size = f_bytes(arc_stats['metadata_size'])
|
mru_size = f_bytes(arc_stats['mru_size'])
|
||||||
|
meta_size = f_bytes(arc_stats['arc_meta_used'])
|
||||||
|
dnode_limit = f_bytes(arc_stats['arc_dnode_limit'])
|
||||||
dnode_size = f_bytes(arc_stats['dnode_size'])
|
dnode_size = f_bytes(arc_stats['dnode_size'])
|
||||||
|
|
||||||
info_form = ('ARC: {0} ({1}) Data: {2} Meta: {3} Dnode: {4}')
|
info_form = ('ARC: {0} ({1}) MFU: {2} MRU: {3} META: {4} '
|
||||||
info_line = info_form.format(arc_size, arc_perc, data_size, meta_size,
|
'DNODE {5} ({6})')
|
||||||
dnode_size)
|
info_line = info_form.format(arc_size, arc_perc, mfu_size, mru_size,
|
||||||
|
meta_size, dnode_size, dnode_limit)
|
||||||
info_spc = ' '*int((GRAPH_WIDTH-len(info_line))/2)
|
info_spc = ' '*int((GRAPH_WIDTH-len(info_line))/2)
|
||||||
info_line = GRAPH_INDENT+info_spc+info_line
|
info_line = GRAPH_INDENT+info_spc+info_line
|
||||||
|
|
||||||
graph_line = GRAPH_INDENT+'+'+('-'*(GRAPH_WIDTH-2))+'+'
|
graph_line = GRAPH_INDENT+'+'+('-'*(GRAPH_WIDTH-2))+'+'
|
||||||
|
|
||||||
arc_perc = float(int(arc_stats['size'])/arc_max)
|
mfu_perc = float(int(arc_stats['mfu_size'])/int(arc_stats['c_max']))
|
||||||
data_perc = float(int(arc_stats['data_size'])/arc_max)
|
mru_perc = float(int(arc_stats['mru_size'])/int(arc_stats['c_max']))
|
||||||
meta_perc = float(int(arc_stats['metadata_size'])/arc_max)
|
arc_perc = float(int(arc_stats['size'])/int(arc_stats['c_max']))
|
||||||
dnode_perc = float(int(arc_stats['dnode_size'])/arc_max)
|
|
||||||
total_ticks = float(arc_perc)*GRAPH_WIDTH
|
total_ticks = float(arc_perc)*GRAPH_WIDTH
|
||||||
data_ticks = data_perc*GRAPH_WIDTH
|
mfu_ticks = mfu_perc*GRAPH_WIDTH
|
||||||
meta_ticks = meta_perc*GRAPH_WIDTH
|
mru_ticks = mru_perc*GRAPH_WIDTH
|
||||||
dnode_ticks = dnode_perc*GRAPH_WIDTH
|
other_ticks = total_ticks-(mfu_ticks+mru_ticks)
|
||||||
other_ticks = total_ticks-(data_ticks+meta_ticks+dnode_ticks)
|
|
||||||
|
|
||||||
core_form = 'D'*int(data_ticks)+'M'*int(meta_ticks)+'N'*int(dnode_ticks)+\
|
core_form = 'F'*int(mfu_ticks)+'R'*int(mru_ticks)+'O'*int(other_ticks)
|
||||||
'O'*int(other_ticks)
|
|
||||||
core_spc = ' '*(GRAPH_WIDTH-(2+len(core_form)))
|
core_spc = ' '*(GRAPH_WIDTH-(2+len(core_form)))
|
||||||
core_line = GRAPH_INDENT+'|'+core_form+core_spc+'|'
|
core_line = GRAPH_INDENT+'|'+core_form+core_spc+'|'
|
||||||
|
|
||||||
@ -538,87 +536,56 @@ def section_arc(kstats_dict):
|
|||||||
|
|
||||||
arc_stats = isolate_section('arcstats', kstats_dict)
|
arc_stats = isolate_section('arcstats', kstats_dict)
|
||||||
|
|
||||||
memory_all = arc_stats['memory_all_bytes']
|
throttle = arc_stats['memory_throttle_count']
|
||||||
memory_free = arc_stats['memory_free_bytes']
|
|
||||||
memory_avail = arc_stats['memory_available_bytes']
|
if throttle == '0':
|
||||||
|
health = 'HEALTHY'
|
||||||
|
else:
|
||||||
|
health = 'THROTTLED'
|
||||||
|
|
||||||
|
prt_1('ARC status:', health)
|
||||||
|
prt_i1('Memory throttle count:', throttle)
|
||||||
|
print()
|
||||||
|
|
||||||
arc_size = arc_stats['size']
|
arc_size = arc_stats['size']
|
||||||
arc_target_size = arc_stats['c']
|
arc_target_size = arc_stats['c']
|
||||||
arc_max = arc_stats['c_max']
|
arc_max = arc_stats['c_max']
|
||||||
arc_min = arc_stats['c_min']
|
arc_min = arc_stats['c_min']
|
||||||
dnode_limit = arc_stats['arc_dnode_limit']
|
|
||||||
|
|
||||||
print('ARC status:')
|
|
||||||
prt_i1('Total memory size:', f_bytes(memory_all))
|
|
||||||
prt_i2('Min target size:', f_perc(arc_min, memory_all), f_bytes(arc_min))
|
|
||||||
prt_i2('Max target size:', f_perc(arc_max, memory_all), f_bytes(arc_max))
|
|
||||||
prt_i2('Target size (adaptive):',
|
|
||||||
f_perc(arc_size, arc_max), f_bytes(arc_target_size))
|
|
||||||
prt_i2('Current size:', f_perc(arc_size, arc_max), f_bytes(arc_size))
|
|
||||||
prt_i1('Free memory size:', f_bytes(memory_free))
|
|
||||||
prt_i1('Available memory size:', f_bytes(memory_avail))
|
|
||||||
print()
|
|
||||||
|
|
||||||
compressed_size = arc_stats['compressed_size']
|
|
||||||
overhead_size = arc_stats['overhead_size']
|
|
||||||
bonus_size = arc_stats['bonus_size']
|
|
||||||
dnode_size = arc_stats['dnode_size']
|
|
||||||
dbuf_size = arc_stats['dbuf_size']
|
|
||||||
hdr_size = arc_stats['hdr_size']
|
|
||||||
l2_hdr_size = arc_stats['l2_hdr_size']
|
|
||||||
abd_chunk_waste_size = arc_stats['abd_chunk_waste_size']
|
|
||||||
|
|
||||||
prt_1('ARC structural breakdown (current size):', f_bytes(arc_size))
|
|
||||||
prt_i2('Compressed size:',
|
|
||||||
f_perc(compressed_size, arc_size), f_bytes(compressed_size))
|
|
||||||
prt_i2('Overhead size:',
|
|
||||||
f_perc(overhead_size, arc_size), f_bytes(overhead_size))
|
|
||||||
prt_i2('Bonus size:',
|
|
||||||
f_perc(bonus_size, arc_size), f_bytes(bonus_size))
|
|
||||||
prt_i2('Dnode size:',
|
|
||||||
f_perc(dnode_size, arc_size), f_bytes(dnode_size))
|
|
||||||
prt_i2('Dbuf size:',
|
|
||||||
f_perc(dbuf_size, arc_size), f_bytes(dbuf_size))
|
|
||||||
prt_i2('Header size:',
|
|
||||||
f_perc(hdr_size, arc_size), f_bytes(hdr_size))
|
|
||||||
prt_i2('L2 header size:',
|
|
||||||
f_perc(l2_hdr_size, arc_size), f_bytes(l2_hdr_size))
|
|
||||||
prt_i2('ABD chunk waste size:',
|
|
||||||
f_perc(abd_chunk_waste_size, arc_size), f_bytes(abd_chunk_waste_size))
|
|
||||||
print()
|
|
||||||
|
|
||||||
meta = arc_stats['meta']
|
meta = arc_stats['meta']
|
||||||
pd = arc_stats['pd']
|
pd = arc_stats['pd']
|
||||||
pm = arc_stats['pm']
|
pm = arc_stats['pm']
|
||||||
data_size = arc_stats['data_size']
|
|
||||||
metadata_size = arc_stats['metadata_size']
|
|
||||||
anon_data = arc_stats['anon_data']
|
anon_data = arc_stats['anon_data']
|
||||||
anon_metadata = arc_stats['anon_metadata']
|
anon_metadata = arc_stats['anon_metadata']
|
||||||
mfu_data = arc_stats['mfu_data']
|
mfu_data = arc_stats['mfu_data']
|
||||||
mfu_metadata = arc_stats['mfu_metadata']
|
mfu_metadata = arc_stats['mfu_metadata']
|
||||||
mfu_edata = arc_stats['mfu_evictable_data']
|
|
||||||
mfu_emetadata = arc_stats['mfu_evictable_metadata']
|
|
||||||
mru_data = arc_stats['mru_data']
|
mru_data = arc_stats['mru_data']
|
||||||
mru_metadata = arc_stats['mru_metadata']
|
mru_metadata = arc_stats['mru_metadata']
|
||||||
mru_edata = arc_stats['mru_evictable_data']
|
|
||||||
mru_emetadata = arc_stats['mru_evictable_metadata']
|
|
||||||
mfug_data = arc_stats['mfu_ghost_data']
|
mfug_data = arc_stats['mfu_ghost_data']
|
||||||
mfug_metadata = arc_stats['mfu_ghost_metadata']
|
mfug_metadata = arc_stats['mfu_ghost_metadata']
|
||||||
mrug_data = arc_stats['mru_ghost_data']
|
mrug_data = arc_stats['mru_ghost_data']
|
||||||
mrug_metadata = arc_stats['mru_ghost_metadata']
|
mrug_metadata = arc_stats['mru_ghost_metadata']
|
||||||
unc_data = arc_stats['uncached_data']
|
unc_data = arc_stats['uncached_data']
|
||||||
unc_metadata = arc_stats['uncached_metadata']
|
unc_metadata = arc_stats['uncached_metadata']
|
||||||
|
bonus_size = arc_stats['bonus_size']
|
||||||
|
dnode_limit = arc_stats['arc_dnode_limit']
|
||||||
|
dnode_size = arc_stats['dnode_size']
|
||||||
|
dbuf_size = arc_stats['dbuf_size']
|
||||||
|
hdr_size = arc_stats['hdr_size']
|
||||||
|
l2_hdr_size = arc_stats['l2_hdr_size']
|
||||||
|
abd_chunk_waste_size = arc_stats['abd_chunk_waste_size']
|
||||||
|
target_size_ratio = '{0}:1'.format(int(arc_max) // int(arc_min))
|
||||||
|
|
||||||
|
prt_2('ARC size (current):',
|
||||||
|
f_perc(arc_size, arc_max), f_bytes(arc_size))
|
||||||
|
prt_i2('Target size (adaptive):',
|
||||||
|
f_perc(arc_target_size, arc_max), f_bytes(arc_target_size))
|
||||||
|
prt_i2('Min size (hard limit):',
|
||||||
|
f_perc(arc_min, arc_max), f_bytes(arc_min))
|
||||||
|
prt_i2('Max size (high water):',
|
||||||
|
target_size_ratio, f_bytes(arc_max))
|
||||||
caches_size = int(anon_data)+int(anon_metadata)+\
|
caches_size = int(anon_data)+int(anon_metadata)+\
|
||||||
int(mfu_data)+int(mfu_metadata)+int(mru_data)+int(mru_metadata)+\
|
int(mfu_data)+int(mfu_metadata)+int(mru_data)+int(mru_metadata)+\
|
||||||
int(unc_data)+int(unc_metadata)
|
int(unc_data)+int(unc_metadata)
|
||||||
|
|
||||||
prt_1('ARC types breakdown (compressed + overhead):', f_bytes(caches_size))
|
|
||||||
prt_i2('Data size:',
|
|
||||||
f_perc(data_size, caches_size), f_bytes(data_size))
|
|
||||||
prt_i2('Metadata size:',
|
|
||||||
f_perc(metadata_size, caches_size), f_bytes(metadata_size))
|
|
||||||
print()
|
|
||||||
|
|
||||||
prt_1('ARC states breakdown (compressed + overhead):', f_bytes(caches_size))
|
|
||||||
prt_i2('Anonymous data size:',
|
prt_i2('Anonymous data size:',
|
||||||
f_perc(anon_data, caches_size), f_bytes(anon_data))
|
f_perc(anon_data, caches_size), f_bytes(anon_data))
|
||||||
prt_i2('Anonymous metadata size:',
|
prt_i2('Anonymous metadata size:',
|
||||||
@ -629,41 +596,50 @@ def section_arc(kstats_dict):
|
|||||||
f_bytes(v / 65536 * caches_size / 65536))
|
f_bytes(v / 65536 * caches_size / 65536))
|
||||||
prt_i2('MFU data size:',
|
prt_i2('MFU data size:',
|
||||||
f_perc(mfu_data, caches_size), f_bytes(mfu_data))
|
f_perc(mfu_data, caches_size), f_bytes(mfu_data))
|
||||||
prt_i2('MFU evictable data size:',
|
|
||||||
f_perc(mfu_edata, caches_size), f_bytes(mfu_edata))
|
|
||||||
prt_i1('MFU ghost data size:', f_bytes(mfug_data))
|
prt_i1('MFU ghost data size:', f_bytes(mfug_data))
|
||||||
v = (s-int(pm))*int(meta)/s
|
v = (s-int(pm))*int(meta)/s
|
||||||
prt_i2('MFU metadata target:', f_perc(v, s),
|
prt_i2('MFU metadata target:', f_perc(v, s),
|
||||||
f_bytes(v / 65536 * caches_size / 65536))
|
f_bytes(v / 65536 * caches_size / 65536))
|
||||||
prt_i2('MFU metadata size:',
|
prt_i2('MFU metadata size:',
|
||||||
f_perc(mfu_metadata, caches_size), f_bytes(mfu_metadata))
|
f_perc(mfu_metadata, caches_size), f_bytes(mfu_metadata))
|
||||||
prt_i2('MFU evictable metadata size:',
|
|
||||||
f_perc(mfu_emetadata, caches_size), f_bytes(mfu_emetadata))
|
|
||||||
prt_i1('MFU ghost metadata size:', f_bytes(mfug_metadata))
|
prt_i1('MFU ghost metadata size:', f_bytes(mfug_metadata))
|
||||||
v = int(pd)*(s-int(meta))/s
|
v = int(pd)*(s-int(meta))/s
|
||||||
prt_i2('MRU data target:', f_perc(v, s),
|
prt_i2('MRU data target:', f_perc(v, s),
|
||||||
f_bytes(v / 65536 * caches_size / 65536))
|
f_bytes(v / 65536 * caches_size / 65536))
|
||||||
prt_i2('MRU data size:',
|
prt_i2('MRU data size:',
|
||||||
f_perc(mru_data, caches_size), f_bytes(mru_data))
|
f_perc(mru_data, caches_size), f_bytes(mru_data))
|
||||||
prt_i2('MRU evictable data size:',
|
|
||||||
f_perc(mru_edata, caches_size), f_bytes(mru_edata))
|
|
||||||
prt_i1('MRU ghost data size:', f_bytes(mrug_data))
|
prt_i1('MRU ghost data size:', f_bytes(mrug_data))
|
||||||
v = int(pm)*int(meta)/s
|
v = int(pm)*int(meta)/s
|
||||||
prt_i2('MRU metadata target:', f_perc(v, s),
|
prt_i2('MRU metadata target:', f_perc(v, s),
|
||||||
f_bytes(v / 65536 * caches_size / 65536))
|
f_bytes(v / 65536 * caches_size / 65536))
|
||||||
prt_i2('MRU metadata size:',
|
prt_i2('MRU metadata size:',
|
||||||
f_perc(mru_metadata, caches_size), f_bytes(mru_metadata))
|
f_perc(mru_metadata, caches_size), f_bytes(mru_metadata))
|
||||||
prt_i2('MRU evictable metadata size:',
|
|
||||||
f_perc(mru_emetadata, caches_size), f_bytes(mru_emetadata))
|
|
||||||
prt_i1('MRU ghost metadata size:', f_bytes(mrug_metadata))
|
prt_i1('MRU ghost metadata size:', f_bytes(mrug_metadata))
|
||||||
prt_i2('Uncached data size:',
|
prt_i2('Uncached data size:',
|
||||||
f_perc(unc_data, caches_size), f_bytes(unc_data))
|
f_perc(unc_data, caches_size), f_bytes(unc_data))
|
||||||
prt_i2('Uncached metadata size:',
|
prt_i2('Uncached metadata size:',
|
||||||
f_perc(unc_metadata, caches_size), f_bytes(unc_metadata))
|
f_perc(unc_metadata, caches_size), f_bytes(unc_metadata))
|
||||||
|
prt_i2('Bonus size:',
|
||||||
|
f_perc(bonus_size, arc_size), f_bytes(bonus_size))
|
||||||
|
prt_i2('Dnode cache target:',
|
||||||
|
f_perc(dnode_limit, arc_max), f_bytes(dnode_limit))
|
||||||
|
prt_i2('Dnode cache size:',
|
||||||
|
f_perc(dnode_size, dnode_limit), f_bytes(dnode_size))
|
||||||
|
prt_i2('Dbuf size:',
|
||||||
|
f_perc(dbuf_size, arc_size), f_bytes(dbuf_size))
|
||||||
|
prt_i2('Header size:',
|
||||||
|
f_perc(hdr_size, arc_size), f_bytes(hdr_size))
|
||||||
|
prt_i2('L2 header size:',
|
||||||
|
f_perc(l2_hdr_size, arc_size), f_bytes(l2_hdr_size))
|
||||||
|
prt_i2('ABD chunk waste size:',
|
||||||
|
f_perc(abd_chunk_waste_size, arc_size), f_bytes(abd_chunk_waste_size))
|
||||||
print()
|
print()
|
||||||
|
|
||||||
print('ARC hash breakdown:')
|
print('ARC hash breakdown:')
|
||||||
prt_i1('Elements:', f_hits(arc_stats['hash_elements']))
|
prt_i1('Elements max:', f_hits(arc_stats['hash_elements_max']))
|
||||||
|
prt_i2('Elements current:',
|
||||||
|
f_perc(arc_stats['hash_elements'], arc_stats['hash_elements_max']),
|
||||||
|
f_hits(arc_stats['hash_elements']))
|
||||||
prt_i1('Collisions:', f_hits(arc_stats['hash_collisions']))
|
prt_i1('Collisions:', f_hits(arc_stats['hash_collisions']))
|
||||||
|
|
||||||
prt_i1('Chain max:', f_hits(arc_stats['hash_chain_max']))
|
prt_i1('Chain max:', f_hits(arc_stats['hash_chain_max']))
|
||||||
@ -671,9 +647,6 @@ def section_arc(kstats_dict):
|
|||||||
print()
|
print()
|
||||||
|
|
||||||
print('ARC misc:')
|
print('ARC misc:')
|
||||||
prt_i1('Memory throttles:', arc_stats['memory_throttle_count'])
|
|
||||||
prt_i1('Memory direct reclaims:', arc_stats['memory_direct_count'])
|
|
||||||
prt_i1('Memory indirect reclaims:', arc_stats['memory_indirect_count'])
|
|
||||||
prt_i1('Deleted:', f_hits(arc_stats['deleted']))
|
prt_i1('Deleted:', f_hits(arc_stats['deleted']))
|
||||||
prt_i1('Mutex misses:', f_hits(arc_stats['mutex_miss']))
|
prt_i1('Mutex misses:', f_hits(arc_stats['mutex_miss']))
|
||||||
prt_i1('Eviction skips:', f_hits(arc_stats['evict_skip']))
|
prt_i1('Eviction skips:', f_hits(arc_stats['evict_skip']))
|
||||||
@ -820,27 +793,18 @@ def section_dmu(kstats_dict):
|
|||||||
|
|
||||||
zfetch_stats = isolate_section('zfetchstats', kstats_dict)
|
zfetch_stats = isolate_section('zfetchstats', kstats_dict)
|
||||||
|
|
||||||
zfetch_access_total = int(zfetch_stats['hits']) +\
|
zfetch_access_total = int(zfetch_stats['hits'])+int(zfetch_stats['misses'])
|
||||||
int(zfetch_stats['future']) + int(zfetch_stats['stride']) +\
|
|
||||||
int(zfetch_stats['past']) + int(zfetch_stats['misses'])
|
|
||||||
|
|
||||||
prt_1('DMU predictive prefetcher calls:', f_hits(zfetch_access_total))
|
prt_1('DMU predictive prefetcher calls:', f_hits(zfetch_access_total))
|
||||||
prt_i2('Stream hits:',
|
prt_i2('Stream hits:',
|
||||||
f_perc(zfetch_stats['hits'], zfetch_access_total),
|
f_perc(zfetch_stats['hits'], zfetch_access_total),
|
||||||
f_hits(zfetch_stats['hits']))
|
f_hits(zfetch_stats['hits']))
|
||||||
future = int(zfetch_stats['future']) + int(zfetch_stats['stride'])
|
|
||||||
prt_i2('Hits ahead of stream:', f_perc(future, zfetch_access_total),
|
|
||||||
f_hits(future))
|
|
||||||
prt_i2('Hits behind stream:',
|
|
||||||
f_perc(zfetch_stats['past'], zfetch_access_total),
|
|
||||||
f_hits(zfetch_stats['past']))
|
|
||||||
prt_i2('Stream misses:',
|
prt_i2('Stream misses:',
|
||||||
f_perc(zfetch_stats['misses'], zfetch_access_total),
|
f_perc(zfetch_stats['misses'], zfetch_access_total),
|
||||||
f_hits(zfetch_stats['misses']))
|
f_hits(zfetch_stats['misses']))
|
||||||
prt_i2('Streams limit reached:',
|
prt_i2('Streams limit reached:',
|
||||||
f_perc(zfetch_stats['max_streams'], zfetch_stats['misses']),
|
f_perc(zfetch_stats['max_streams'], zfetch_stats['misses']),
|
||||||
f_hits(zfetch_stats['max_streams']))
|
f_hits(zfetch_stats['max_streams']))
|
||||||
prt_i1('Stream strides:', f_hits(zfetch_stats['stride']))
|
|
||||||
prt_i1('Prefetches issued', f_hits(zfetch_stats['io_issued']))
|
prt_i1('Prefetches issued', f_hits(zfetch_stats['io_issued']))
|
||||||
print()
|
print()
|
||||||
|
|
||||||
|
335
cmd/arcstat.in
335
cmd/arcstat.in
@ -1,5 +1,4 @@
|
|||||||
#!/usr/bin/env @PYTHON_SHEBANG@
|
#!/usr/bin/env @PYTHON_SHEBANG@
|
||||||
# SPDX-License-Identifier: CDDL-1.0
|
|
||||||
#
|
#
|
||||||
# Print out ZFS ARC Statistics exported via kstat(1)
|
# Print out ZFS ARC Statistics exported via kstat(1)
|
||||||
# For a definition of fields, or usage, use arcstat -v
|
# For a definition of fields, or usage, use arcstat -v
|
||||||
@ -153,108 +152,18 @@ cols = {
|
|||||||
"l2asize": [7, 1024, "Actual (compressed) size of the L2ARC"],
|
"l2asize": [7, 1024, "Actual (compressed) size of the L2ARC"],
|
||||||
"l2size": [6, 1024, "Size of the L2ARC"],
|
"l2size": [6, 1024, "Size of the L2ARC"],
|
||||||
"l2bytes": [7, 1024, "Bytes read per second from the L2ARC"],
|
"l2bytes": [7, 1024, "Bytes read per second from the L2ARC"],
|
||||||
"l2wbytes": [8, 1024, "Bytes written per second to the L2ARC"],
|
|
||||||
"grow": [4, 1000, "ARC grow disabled"],
|
"grow": [4, 1000, "ARC grow disabled"],
|
||||||
"need": [5, 1024, "ARC reclaim need"],
|
"need": [5, 1024, "ARC reclaim need"],
|
||||||
"free": [5, 1024, "ARC free memory"],
|
"free": [5, 1024, "ARC free memory"],
|
||||||
"avail": [5, 1024, "ARC available memory"],
|
"avail": [5, 1024, "ARC available memory"],
|
||||||
"waste": [5, 1024, "Wasted memory due to round up to pagesize"],
|
"waste": [5, 1024, "Wasted memory due to round up to pagesize"],
|
||||||
"ztotal": [6, 1000, "zfetch total prefetcher calls per second"],
|
|
||||||
"zhits": [5, 1000, "zfetch stream hits per second"],
|
|
||||||
"zahead": [6, 1000, "zfetch hits ahead of streams per second"],
|
|
||||||
"zpast": [5, 1000, "zfetch hits behind streams per second"],
|
|
||||||
"zmisses": [7, 1000, "zfetch stream misses per second"],
|
|
||||||
"zmax": [4, 1000, "zfetch limit reached per second"],
|
|
||||||
"zfuture": [7, 1000, "zfetch stream future per second"],
|
|
||||||
"zstride": [7, 1000, "zfetch stream strides per second"],
|
|
||||||
"zissued": [7, 1000, "zfetch prefetches issued per second"],
|
|
||||||
"zactive": [7, 1000, "zfetch prefetches active per second"],
|
|
||||||
}
|
}
|
||||||
|
|
||||||
# ARC structural breakdown from arc_summary
|
|
||||||
structfields = {
|
|
||||||
"cmp": ["compressed", "Compressed"],
|
|
||||||
"ovh": ["overhead", "Overhead"],
|
|
||||||
"bon": ["bonus", "Bonus"],
|
|
||||||
"dno": ["dnode", "Dnode"],
|
|
||||||
"dbu": ["dbuf", "Dbuf"],
|
|
||||||
"hdr": ["hdr", "Header"],
|
|
||||||
"l2h": ["l2_hdr", "L2 header"],
|
|
||||||
"abd": ["abd_chunk_waste", "ABD chunk waste"],
|
|
||||||
}
|
|
||||||
structstats = { # size stats
|
|
||||||
"percent": "size", # percentage of this value
|
|
||||||
"sz": ["_size", "size"],
|
|
||||||
}
|
|
||||||
|
|
||||||
# ARC types breakdown from arc_summary
|
|
||||||
typefields = {
|
|
||||||
"data": ["data", "ARC data"],
|
|
||||||
"meta": ["metadata", "ARC metadata"],
|
|
||||||
}
|
|
||||||
typestats = { # size stats
|
|
||||||
"percent": "cachessz", # percentage of this value
|
|
||||||
"tg": ["_target", "target"],
|
|
||||||
"sz": ["_size", "size"],
|
|
||||||
}
|
|
||||||
|
|
||||||
# ARC states breakdown from arc_summary
|
|
||||||
statefields = {
|
|
||||||
"ano": ["anon", "Anonymous"],
|
|
||||||
"mfu": ["mfu", "MFU"],
|
|
||||||
"mru": ["mru", "MRU"],
|
|
||||||
"unc": ["uncached", "Uncached"],
|
|
||||||
}
|
|
||||||
targetstats = {
|
|
||||||
"percent": "cachessz", # percentage of this value
|
|
||||||
"fields": ["mfu", "mru"], # only applicable to these fields
|
|
||||||
"tg": ["_target", "target"],
|
|
||||||
"dt": ["_data_target", "data target"],
|
|
||||||
"mt": ["_metadata_target", "metadata target"],
|
|
||||||
}
|
|
||||||
statestats = { # size stats
|
|
||||||
"percent": "cachessz", # percentage of this value
|
|
||||||
"sz": ["_size", "size"],
|
|
||||||
"da": ["_data", "data size"],
|
|
||||||
"me": ["_metadata", "metadata size"],
|
|
||||||
"ed": ["_evictable_data", "evictable data size"],
|
|
||||||
"em": ["_evictable_metadata", "evictable metadata size"],
|
|
||||||
}
|
|
||||||
ghoststats = {
|
|
||||||
"fields": ["mfu", "mru"], # only applicable to these fields
|
|
||||||
"gsz": ["_ghost_size", "ghost size"],
|
|
||||||
"gd": ["_ghost_data", "ghost data size"],
|
|
||||||
"gm": ["_ghost_metadata", "ghost metadata size"],
|
|
||||||
}
|
|
||||||
|
|
||||||
# fields and stats
|
|
||||||
fieldstats = [
|
|
||||||
[structfields, structstats],
|
|
||||||
[typefields, typestats],
|
|
||||||
[statefields, targetstats, statestats, ghoststats],
|
|
||||||
]
|
|
||||||
for fs in fieldstats:
|
|
||||||
fields, stats = fs[0], fs[1:]
|
|
||||||
for field, fieldval in fields.items():
|
|
||||||
for group in stats:
|
|
||||||
for stat, statval in group.items():
|
|
||||||
if stat in ["fields", "percent"] or \
|
|
||||||
("fields" in group and field not in group["fields"]):
|
|
||||||
continue
|
|
||||||
colname = field + stat
|
|
||||||
coldesc = fieldval[1] + " " + statval[1]
|
|
||||||
cols[colname] = [len(colname), 1024, coldesc]
|
|
||||||
if "percent" in group:
|
|
||||||
cols[colname + "%"] = [len(colname) + 1, 100, \
|
|
||||||
coldesc + " percentage"]
|
|
||||||
|
|
||||||
v = {}
|
v = {}
|
||||||
hdr = ["time", "read", "ddread", "ddh%", "dmread", "dmh%", "pread", "ph%",
|
hdr = ["time", "read", "ddread", "ddh%", "dmread", "dmh%", "pread", "ph%",
|
||||||
"size", "c", "avail"]
|
"size", "c", "avail"]
|
||||||
xhdr = ["time", "mfu", "mru", "mfug", "mrug", "unc", "eskip", "mtxmis",
|
xhdr = ["time", "mfu", "mru", "mfug", "mrug", "unc", "eskip", "mtxmis",
|
||||||
"dread", "pread", "read"]
|
"dread", "pread", "read"]
|
||||||
zhdr = ["time", "ztotal", "zhits", "zahead", "zpast", "zmisses", "zmax",
|
|
||||||
"zfuture", "zstride", "zissued", "zactive"]
|
|
||||||
sint = 1 # Default interval is 1 second
|
sint = 1 # Default interval is 1 second
|
||||||
count = 1 # Default count is 1
|
count = 1 # Default count is 1
|
||||||
hdr_intr = 20 # Print header every 20 lines of output
|
hdr_intr = 20 # Print header every 20 lines of output
|
||||||
@ -279,8 +188,6 @@ if sys.platform.startswith('freebsd'):
|
|||||||
|
|
||||||
k = [ctl for ctl in sysctl.filter('kstat.zfs.misc.arcstats')
|
k = [ctl for ctl in sysctl.filter('kstat.zfs.misc.arcstats')
|
||||||
if ctl.type != sysctl.CTLTYPE_NODE]
|
if ctl.type != sysctl.CTLTYPE_NODE]
|
||||||
k += [ctl for ctl in sysctl.filter('kstat.zfs.misc.zfetchstats')
|
|
||||||
if ctl.type != sysctl.CTLTYPE_NODE]
|
|
||||||
|
|
||||||
if not k:
|
if not k:
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
@ -292,28 +199,19 @@ if sys.platform.startswith('freebsd'):
|
|||||||
continue
|
continue
|
||||||
|
|
||||||
name, value = s.name, s.value
|
name, value = s.name, s.value
|
||||||
|
# Trims 'kstat.zfs.misc.arcstats' from the name
|
||||||
if "arcstats" in name:
|
kstat[name[24:]] = int(value)
|
||||||
# Trims 'kstat.zfs.misc.arcstats' from the name
|
|
||||||
kstat[name[24:]] = int(value)
|
|
||||||
else:
|
|
||||||
kstat["zfetch_" + name[27:]] = int(value)
|
|
||||||
|
|
||||||
elif sys.platform.startswith('linux'):
|
elif sys.platform.startswith('linux'):
|
||||||
def kstat_update():
|
def kstat_update():
|
||||||
global kstat
|
global kstat
|
||||||
|
|
||||||
k1 = [line.strip() for line in open('/proc/spl/kstat/zfs/arcstats')]
|
k = [line.strip() for line in open('/proc/spl/kstat/zfs/arcstats')]
|
||||||
|
|
||||||
k2 = ["zfetch_" + line.strip() for line in
|
if not k:
|
||||||
open('/proc/spl/kstat/zfs/zfetchstats')]
|
|
||||||
|
|
||||||
if k1 is None or k2 is None:
|
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
del k1[0:2]
|
del k[0:2]
|
||||||
del k2[0:2]
|
|
||||||
k = k1 + k2
|
|
||||||
kstat = {}
|
kstat = {}
|
||||||
|
|
||||||
for s in k:
|
for s in k:
|
||||||
@ -341,7 +239,6 @@ def usage():
|
|||||||
sys.stderr.write("\t -v : List all possible field headers and definitions"
|
sys.stderr.write("\t -v : List all possible field headers and definitions"
|
||||||
"\n")
|
"\n")
|
||||||
sys.stderr.write("\t -x : Print extended stats\n")
|
sys.stderr.write("\t -x : Print extended stats\n")
|
||||||
sys.stderr.write("\t -z : Print zfetch stats\n")
|
|
||||||
sys.stderr.write("\t -f : Specify specific fields to print (see -v)\n")
|
sys.stderr.write("\t -f : Specify specific fields to print (see -v)\n")
|
||||||
sys.stderr.write("\t -o : Redirect output to the specified file\n")
|
sys.stderr.write("\t -o : Redirect output to the specified file\n")
|
||||||
sys.stderr.write("\t -s : Override default field separator with custom "
|
sys.stderr.write("\t -s : Override default field separator with custom "
|
||||||
@ -365,29 +262,6 @@ def snap_stats():
|
|||||||
kstat_update()
|
kstat_update()
|
||||||
|
|
||||||
cur = kstat
|
cur = kstat
|
||||||
|
|
||||||
# fill in additional values from arc_summary
|
|
||||||
cur["caches_size"] = caches_size = cur["anon_data"]+cur["anon_metadata"]+\
|
|
||||||
cur["mfu_data"]+cur["mfu_metadata"]+cur["mru_data"]+cur["mru_metadata"]+\
|
|
||||||
cur["uncached_data"]+cur["uncached_metadata"]
|
|
||||||
s = 4294967296
|
|
||||||
pd = cur["pd"]
|
|
||||||
pm = cur["pm"]
|
|
||||||
meta = cur["meta"]
|
|
||||||
v = (s-int(pd))*(s-int(meta))/s
|
|
||||||
cur["mfu_data_target"] = v / 65536 * caches_size / 65536
|
|
||||||
v = (s-int(pm))*int(meta)/s
|
|
||||||
cur["mfu_metadata_target"] = v / 65536 * caches_size / 65536
|
|
||||||
v = int(pd)*(s-int(meta))/s
|
|
||||||
cur["mru_data_target"] = v / 65536 * caches_size / 65536
|
|
||||||
v = int(pm)*int(meta)/s
|
|
||||||
cur["mru_metadata_target"] = v / 65536 * caches_size / 65536
|
|
||||||
|
|
||||||
cur["data_target"] = cur["mfu_data_target"] + cur["mru_data_target"]
|
|
||||||
cur["metadata_target"] = cur["mfu_metadata_target"] + cur["mru_metadata_target"]
|
|
||||||
cur["mfu_target"] = cur["mfu_data_target"] + cur["mfu_metadata_target"]
|
|
||||||
cur["mru_target"] = cur["mru_data_target"] + cur["mru_metadata_target"]
|
|
||||||
|
|
||||||
for key in cur:
|
for key in cur:
|
||||||
if re.match(key, "class"):
|
if re.match(key, "class"):
|
||||||
continue
|
continue
|
||||||
@ -397,34 +271,31 @@ def snap_stats():
|
|||||||
d[key] = cur[key]
|
d[key] = cur[key]
|
||||||
|
|
||||||
|
|
||||||
def isint(num):
|
|
||||||
if isinstance(num, float):
|
|
||||||
return num.is_integer()
|
|
||||||
if isinstance(num, int):
|
|
||||||
return True
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
def prettynum(sz, scale, num=0):
|
def prettynum(sz, scale, num=0):
|
||||||
suffix = ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']
|
suffix = [' ', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']
|
||||||
index = 0
|
index = 0
|
||||||
|
save = 0
|
||||||
|
|
||||||
# Special case for date field
|
# Special case for date field
|
||||||
if scale == -1:
|
if scale == -1:
|
||||||
return "%s" % num
|
return "%s" % num
|
||||||
|
|
||||||
if scale != 100:
|
# Rounding error, return 0
|
||||||
while abs(num) > scale and index < 5:
|
elif 0 < num < 1:
|
||||||
num = num / scale
|
num = 0
|
||||||
index += 1
|
|
||||||
|
|
||||||
width = sz - (0 if index == 0 else 1)
|
while abs(num) > scale and index < 5:
|
||||||
intlen = len("%.0f" % num) # %.0f rounds to nearest int
|
save = num
|
||||||
if sint == 1 and isint(num) or width < intlen + 2:
|
num = num / scale
|
||||||
decimal = 0
|
index += 1
|
||||||
|
|
||||||
|
if index == 0:
|
||||||
|
return "%*d" % (sz, num)
|
||||||
|
|
||||||
|
if abs(save / scale) < 10:
|
||||||
|
return "%*.1f%s" % (sz - 1, num, suffix[index])
|
||||||
else:
|
else:
|
||||||
decimal = 1
|
return "%*d%s" % (sz - 1, num, suffix[index])
|
||||||
return "%*.*f%s" % (width, decimal, num, suffix[index])
|
|
||||||
|
|
||||||
|
|
||||||
def print_values():
|
def print_values():
|
||||||
@ -486,7 +357,6 @@ def init():
|
|||||||
global count
|
global count
|
||||||
global hdr
|
global hdr
|
||||||
global xhdr
|
global xhdr
|
||||||
global zhdr
|
|
||||||
global opfile
|
global opfile
|
||||||
global sep
|
global sep
|
||||||
global out
|
global out
|
||||||
@ -498,17 +368,15 @@ def init():
|
|||||||
xflag = False
|
xflag = False
|
||||||
hflag = False
|
hflag = False
|
||||||
vflag = False
|
vflag = False
|
||||||
zflag = False
|
|
||||||
i = 1
|
i = 1
|
||||||
|
|
||||||
try:
|
try:
|
||||||
opts, args = getopt.getopt(
|
opts, args = getopt.getopt(
|
||||||
sys.argv[1:],
|
sys.argv[1:],
|
||||||
"axzo:hvs:f:p",
|
"axo:hvs:f:p",
|
||||||
[
|
[
|
||||||
"all",
|
"all",
|
||||||
"extended",
|
"extended",
|
||||||
"zfetch",
|
|
||||||
"outfile",
|
"outfile",
|
||||||
"help",
|
"help",
|
||||||
"verbose",
|
"verbose",
|
||||||
@ -542,15 +410,13 @@ def init():
|
|||||||
i += 1
|
i += 1
|
||||||
if opt in ('-p', '--parsable'):
|
if opt in ('-p', '--parsable'):
|
||||||
pretty_print = False
|
pretty_print = False
|
||||||
if opt in ('-z', '--zfetch'):
|
|
||||||
zflag = True
|
|
||||||
i += 1
|
i += 1
|
||||||
|
|
||||||
argv = sys.argv[i:]
|
argv = sys.argv[i:]
|
||||||
sint = int(argv[0]) if argv else sint
|
sint = int(argv[0]) if argv else sint
|
||||||
count = int(argv[1]) if len(argv) > 1 else (0 if len(argv) > 0 else 1)
|
count = int(argv[1]) if len(argv) > 1 else (0 if len(argv) > 0 else 1)
|
||||||
|
|
||||||
if hflag or (xflag and zflag) or ((zflag or xflag) and desired_cols):
|
if hflag or (xflag and desired_cols):
|
||||||
usage()
|
usage()
|
||||||
|
|
||||||
if vflag:
|
if vflag:
|
||||||
@ -559,9 +425,6 @@ def init():
|
|||||||
if xflag:
|
if xflag:
|
||||||
hdr = xhdr
|
hdr = xhdr
|
||||||
|
|
||||||
if zflag:
|
|
||||||
hdr = zhdr
|
|
||||||
|
|
||||||
update_hdr_intr()
|
update_hdr_intr()
|
||||||
|
|
||||||
# check if L2ARC exists
|
# check if L2ARC exists
|
||||||
@ -614,148 +477,120 @@ def calculate():
|
|||||||
|
|
||||||
v = dict()
|
v = dict()
|
||||||
v["time"] = time.strftime("%H:%M:%S", time.localtime())
|
v["time"] = time.strftime("%H:%M:%S", time.localtime())
|
||||||
v["hits"] = d["hits"] / sint
|
v["hits"] = d["hits"] // sint
|
||||||
v["iohs"] = d["iohits"] / sint
|
v["iohs"] = d["iohits"] // sint
|
||||||
v["miss"] = d["misses"] / sint
|
v["miss"] = d["misses"] // sint
|
||||||
v["read"] = v["hits"] + v["iohs"] + v["miss"]
|
v["read"] = v["hits"] + v["iohs"] + v["miss"]
|
||||||
v["hit%"] = 100 * v["hits"] / v["read"] if v["read"] > 0 else 0
|
v["hit%"] = 100 * v["hits"] // v["read"] if v["read"] > 0 else 0
|
||||||
v["ioh%"] = 100 * v["iohs"] / v["read"] if v["read"] > 0 else 0
|
v["ioh%"] = 100 * v["iohs"] // v["read"] if v["read"] > 0 else 0
|
||||||
v["miss%"] = 100 - v["hit%"] - v["ioh%"] if v["read"] > 0 else 0
|
v["miss%"] = 100 - v["hit%"] - v["ioh%"] if v["read"] > 0 else 0
|
||||||
|
|
||||||
v["dhit"] = (d["demand_data_hits"] + d["demand_metadata_hits"]) / sint
|
v["dhit"] = (d["demand_data_hits"] + d["demand_metadata_hits"]) // sint
|
||||||
v["dioh"] = (d["demand_data_iohits"] + d["demand_metadata_iohits"]) / sint
|
v["dioh"] = (d["demand_data_iohits"] + d["demand_metadata_iohits"]) // sint
|
||||||
v["dmis"] = (d["demand_data_misses"] + d["demand_metadata_misses"]) / sint
|
v["dmis"] = (d["demand_data_misses"] + d["demand_metadata_misses"]) // sint
|
||||||
|
|
||||||
v["dread"] = v["dhit"] + v["dioh"] + v["dmis"]
|
v["dread"] = v["dhit"] + v["dioh"] + v["dmis"]
|
||||||
v["dh%"] = 100 * v["dhit"] / v["dread"] if v["dread"] > 0 else 0
|
v["dh%"] = 100 * v["dhit"] // v["dread"] if v["dread"] > 0 else 0
|
||||||
v["di%"] = 100 * v["dioh"] / v["dread"] if v["dread"] > 0 else 0
|
v["di%"] = 100 * v["dioh"] // v["dread"] if v["dread"] > 0 else 0
|
||||||
v["dm%"] = 100 - v["dh%"] - v["di%"] if v["dread"] > 0 else 0
|
v["dm%"] = 100 - v["dh%"] - v["di%"] if v["dread"] > 0 else 0
|
||||||
|
|
||||||
v["ddhit"] = d["demand_data_hits"] / sint
|
v["ddhit"] = d["demand_data_hits"] // sint
|
||||||
v["ddioh"] = d["demand_data_iohits"] / sint
|
v["ddioh"] = d["demand_data_iohits"] // sint
|
||||||
v["ddmis"] = d["demand_data_misses"] / sint
|
v["ddmis"] = d["demand_data_misses"] // sint
|
||||||
|
|
||||||
v["ddread"] = v["ddhit"] + v["ddioh"] + v["ddmis"]
|
v["ddread"] = v["ddhit"] + v["ddioh"] + v["ddmis"]
|
||||||
v["ddh%"] = 100 * v["ddhit"] / v["ddread"] if v["ddread"] > 0 else 0
|
v["ddh%"] = 100 * v["ddhit"] // v["ddread"] if v["ddread"] > 0 else 0
|
||||||
v["ddi%"] = 100 * v["ddioh"] / v["ddread"] if v["ddread"] > 0 else 0
|
v["ddi%"] = 100 * v["ddioh"] // v["ddread"] if v["ddread"] > 0 else 0
|
||||||
v["ddm%"] = 100 - v["ddh%"] - v["ddi%"] if v["ddread"] > 0 else 0
|
v["ddm%"] = 100 - v["ddh%"] - v["ddi%"] if v["ddread"] > 0 else 0
|
||||||
|
|
||||||
v["dmhit"] = d["demand_metadata_hits"] / sint
|
v["dmhit"] = d["demand_metadata_hits"] // sint
|
||||||
v["dmioh"] = d["demand_metadata_iohits"] / sint
|
v["dmioh"] = d["demand_metadata_iohits"] // sint
|
||||||
v["dmmis"] = d["demand_metadata_misses"] / sint
|
v["dmmis"] = d["demand_metadata_misses"] // sint
|
||||||
|
|
||||||
v["dmread"] = v["dmhit"] + v["dmioh"] + v["dmmis"]
|
v["dmread"] = v["dmhit"] + v["dmioh"] + v["dmmis"]
|
||||||
v["dmh%"] = 100 * v["dmhit"] / v["dmread"] if v["dmread"] > 0 else 0
|
v["dmh%"] = 100 * v["dmhit"] // v["dmread"] if v["dmread"] > 0 else 0
|
||||||
v["dmi%"] = 100 * v["dmioh"] / v["dmread"] if v["dmread"] > 0 else 0
|
v["dmi%"] = 100 * v["dmioh"] // v["dmread"] if v["dmread"] > 0 else 0
|
||||||
v["dmm%"] = 100 - v["dmh%"] - v["dmi%"] if v["dmread"] > 0 else 0
|
v["dmm%"] = 100 - v["dmh%"] - v["dmi%"] if v["dmread"] > 0 else 0
|
||||||
|
|
||||||
v["phit"] = (d["prefetch_data_hits"] + d["prefetch_metadata_hits"]) / sint
|
v["phit"] = (d["prefetch_data_hits"] + d["prefetch_metadata_hits"]) // sint
|
||||||
v["pioh"] = (d["prefetch_data_iohits"] +
|
v["pioh"] = (d["prefetch_data_iohits"] +
|
||||||
d["prefetch_metadata_iohits"]) / sint
|
d["prefetch_metadata_iohits"]) // sint
|
||||||
v["pmis"] = (d["prefetch_data_misses"] +
|
v["pmis"] = (d["prefetch_data_misses"] +
|
||||||
d["prefetch_metadata_misses"]) / sint
|
d["prefetch_metadata_misses"]) // sint
|
||||||
|
|
||||||
v["pread"] = v["phit"] + v["pioh"] + v["pmis"]
|
v["pread"] = v["phit"] + v["pioh"] + v["pmis"]
|
||||||
v["ph%"] = 100 * v["phit"] / v["pread"] if v["pread"] > 0 else 0
|
v["ph%"] = 100 * v["phit"] // v["pread"] if v["pread"] > 0 else 0
|
||||||
v["pi%"] = 100 * v["pioh"] / v["pread"] if v["pread"] > 0 else 0
|
v["pi%"] = 100 * v["pioh"] // v["pread"] if v["pread"] > 0 else 0
|
||||||
v["pm%"] = 100 - v["ph%"] - v["pi%"] if v["pread"] > 0 else 0
|
v["pm%"] = 100 - v["ph%"] - v["pi%"] if v["pread"] > 0 else 0
|
||||||
|
|
||||||
v["pdhit"] = d["prefetch_data_hits"] / sint
|
v["pdhit"] = d["prefetch_data_hits"] // sint
|
||||||
v["pdioh"] = d["prefetch_data_iohits"] / sint
|
v["pdioh"] = d["prefetch_data_iohits"] // sint
|
||||||
v["pdmis"] = d["prefetch_data_misses"] / sint
|
v["pdmis"] = d["prefetch_data_misses"] // sint
|
||||||
|
|
||||||
v["pdread"] = v["pdhit"] + v["pdioh"] + v["pdmis"]
|
v["pdread"] = v["pdhit"] + v["pdioh"] + v["pdmis"]
|
||||||
v["pdh%"] = 100 * v["pdhit"] / v["pdread"] if v["pdread"] > 0 else 0
|
v["pdh%"] = 100 * v["pdhit"] // v["pdread"] if v["pdread"] > 0 else 0
|
||||||
v["pdi%"] = 100 * v["pdioh"] / v["pdread"] if v["pdread"] > 0 else 0
|
v["pdi%"] = 100 * v["pdioh"] // v["pdread"] if v["pdread"] > 0 else 0
|
||||||
v["pdm%"] = 100 - v["pdh%"] - v["pdi%"] if v["pdread"] > 0 else 0
|
v["pdm%"] = 100 - v["pdh%"] - v["pdi%"] if v["pdread"] > 0 else 0
|
||||||
|
|
||||||
v["pmhit"] = d["prefetch_metadata_hits"] / sint
|
v["pmhit"] = d["prefetch_metadata_hits"] // sint
|
||||||
v["pmioh"] = d["prefetch_metadata_iohits"] / sint
|
v["pmioh"] = d["prefetch_metadata_iohits"] // sint
|
||||||
v["pmmis"] = d["prefetch_metadata_misses"] / sint
|
v["pmmis"] = d["prefetch_metadata_misses"] // sint
|
||||||
|
|
||||||
v["pmread"] = v["pmhit"] + v["pmioh"] + v["pmmis"]
|
v["pmread"] = v["pmhit"] + v["pmioh"] + v["pmmis"]
|
||||||
v["pmh%"] = 100 * v["pmhit"] / v["pmread"] if v["pmread"] > 0 else 0
|
v["pmh%"] = 100 * v["pmhit"] // v["pmread"] if v["pmread"] > 0 else 0
|
||||||
v["pmi%"] = 100 * v["pmioh"] / v["pmread"] if v["pmread"] > 0 else 0
|
v["pmi%"] = 100 * v["pmioh"] // v["pmread"] if v["pmread"] > 0 else 0
|
||||||
v["pmm%"] = 100 - v["pmh%"] - v["pmi%"] if v["pmread"] > 0 else 0
|
v["pmm%"] = 100 - v["pmh%"] - v["pmi%"] if v["pmread"] > 0 else 0
|
||||||
|
|
||||||
v["mhit"] = (d["prefetch_metadata_hits"] +
|
v["mhit"] = (d["prefetch_metadata_hits"] +
|
||||||
d["demand_metadata_hits"]) / sint
|
d["demand_metadata_hits"]) // sint
|
||||||
v["mioh"] = (d["prefetch_metadata_iohits"] +
|
v["mioh"] = (d["prefetch_metadata_iohits"] +
|
||||||
d["demand_metadata_iohits"]) / sint
|
d["demand_metadata_iohits"]) // sint
|
||||||
v["mmis"] = (d["prefetch_metadata_misses"] +
|
v["mmis"] = (d["prefetch_metadata_misses"] +
|
||||||
d["demand_metadata_misses"]) / sint
|
d["demand_metadata_misses"]) // sint
|
||||||
|
|
||||||
v["mread"] = v["mhit"] + v["mioh"] + v["mmis"]
|
v["mread"] = v["mhit"] + v["mioh"] + v["mmis"]
|
||||||
v["mh%"] = 100 * v["mhit"] / v["mread"] if v["mread"] > 0 else 0
|
v["mh%"] = 100 * v["mhit"] // v["mread"] if v["mread"] > 0 else 0
|
||||||
v["mi%"] = 100 * v["mioh"] / v["mread"] if v["mread"] > 0 else 0
|
v["mi%"] = 100 * v["mioh"] // v["mread"] if v["mread"] > 0 else 0
|
||||||
v["mm%"] = 100 - v["mh%"] - v["mi%"] if v["mread"] > 0 else 0
|
v["mm%"] = 100 - v["mh%"] - v["mi%"] if v["mread"] > 0 else 0
|
||||||
|
|
||||||
v["arcsz"] = cur["size"]
|
v["arcsz"] = cur["size"]
|
||||||
v["size"] = cur["size"]
|
v["size"] = cur["size"]
|
||||||
v["c"] = cur["c"]
|
v["c"] = cur["c"]
|
||||||
v["mfu"] = d["mfu_hits"] / sint
|
v["mfu"] = d["mfu_hits"] // sint
|
||||||
v["mru"] = d["mru_hits"] / sint
|
v["mru"] = d["mru_hits"] // sint
|
||||||
v["mrug"] = d["mru_ghost_hits"] / sint
|
v["mrug"] = d["mru_ghost_hits"] // sint
|
||||||
v["mfug"] = d["mfu_ghost_hits"] / sint
|
v["mfug"] = d["mfu_ghost_hits"] // sint
|
||||||
v["unc"] = d["uncached_hits"] / sint
|
v["unc"] = d["uncached_hits"] // sint
|
||||||
v["eskip"] = d["evict_skip"] / sint
|
v["eskip"] = d["evict_skip"] // sint
|
||||||
v["el2skip"] = d["evict_l2_skip"] / sint
|
v["el2skip"] = d["evict_l2_skip"] // sint
|
||||||
v["el2cach"] = d["evict_l2_cached"] / sint
|
v["el2cach"] = d["evict_l2_cached"] // sint
|
||||||
v["el2el"] = d["evict_l2_eligible"] / sint
|
v["el2el"] = d["evict_l2_eligible"] // sint
|
||||||
v["el2mfu"] = d["evict_l2_eligible_mfu"] / sint
|
v["el2mfu"] = d["evict_l2_eligible_mfu"] // sint
|
||||||
v["el2mru"] = d["evict_l2_eligible_mru"] / sint
|
v["el2mru"] = d["evict_l2_eligible_mru"] // sint
|
||||||
v["el2inel"] = d["evict_l2_ineligible"] / sint
|
v["el2inel"] = d["evict_l2_ineligible"] // sint
|
||||||
v["mtxmis"] = d["mutex_miss"] / sint
|
v["mtxmis"] = d["mutex_miss"] // sint
|
||||||
v["ztotal"] = (d["zfetch_hits"] + d["zfetch_future"] + d["zfetch_stride"] +
|
|
||||||
d["zfetch_past"] + d["zfetch_misses"]) / sint
|
|
||||||
v["zhits"] = d["zfetch_hits"] / sint
|
|
||||||
v["zahead"] = (d["zfetch_future"] + d["zfetch_stride"]) / sint
|
|
||||||
v["zpast"] = d["zfetch_past"] / sint
|
|
||||||
v["zmisses"] = d["zfetch_misses"] / sint
|
|
||||||
v["zmax"] = d["zfetch_max_streams"] / sint
|
|
||||||
v["zfuture"] = d["zfetch_future"] / sint
|
|
||||||
v["zstride"] = d["zfetch_stride"] / sint
|
|
||||||
v["zissued"] = d["zfetch_io_issued"] / sint
|
|
||||||
v["zactive"] = d["zfetch_io_active"] / sint
|
|
||||||
|
|
||||||
# ARC structural breakdown, ARC types breakdown, ARC states breakdown
|
|
||||||
v["cachessz"] = cur["caches_size"]
|
|
||||||
for fs in fieldstats:
|
|
||||||
fields, stats = fs[0], fs[1:]
|
|
||||||
for field, fieldval in fields.items():
|
|
||||||
for group in stats:
|
|
||||||
for stat, statval in group.items():
|
|
||||||
if stat in ["fields", "percent"] or \
|
|
||||||
("fields" in group and field not in group["fields"]):
|
|
||||||
continue
|
|
||||||
colname = field + stat
|
|
||||||
v[colname] = cur[fieldval[0] + statval[0]]
|
|
||||||
if "percent" in group:
|
|
||||||
v[colname + "%"] = 100 * v[colname] / \
|
|
||||||
v[group["percent"]] if v[group["percent"]] > 0 else 0
|
|
||||||
|
|
||||||
if l2exist:
|
if l2exist:
|
||||||
v["l2hits"] = d["l2_hits"] / sint
|
v["l2hits"] = d["l2_hits"] // sint
|
||||||
v["l2miss"] = d["l2_misses"] / sint
|
v["l2miss"] = d["l2_misses"] // sint
|
||||||
v["l2read"] = v["l2hits"] + v["l2miss"]
|
v["l2read"] = v["l2hits"] + v["l2miss"]
|
||||||
v["l2hit%"] = 100 * v["l2hits"] / v["l2read"] if v["l2read"] > 0 else 0
|
v["l2hit%"] = 100 * v["l2hits"] // v["l2read"] if v["l2read"] > 0 else 0
|
||||||
|
|
||||||
v["l2miss%"] = 100 - v["l2hit%"] if v["l2read"] > 0 else 0
|
v["l2miss%"] = 100 - v["l2hit%"] if v["l2read"] > 0 else 0
|
||||||
v["l2asize"] = cur["l2_asize"]
|
v["l2asize"] = cur["l2_asize"]
|
||||||
v["l2size"] = cur["l2_size"]
|
v["l2size"] = cur["l2_size"]
|
||||||
v["l2bytes"] = d["l2_read_bytes"] / sint
|
v["l2bytes"] = d["l2_read_bytes"] // sint
|
||||||
v["l2wbytes"] = d["l2_write_bytes"] / sint
|
|
||||||
|
|
||||||
v["l2pref"] = cur["l2_prefetch_asize"]
|
v["l2pref"] = cur["l2_prefetch_asize"]
|
||||||
v["l2mfu"] = cur["l2_mfu_asize"]
|
v["l2mfu"] = cur["l2_mfu_asize"]
|
||||||
v["l2mru"] = cur["l2_mru_asize"]
|
v["l2mru"] = cur["l2_mru_asize"]
|
||||||
v["l2data"] = cur["l2_bufc_data_asize"]
|
v["l2data"] = cur["l2_bufc_data_asize"]
|
||||||
v["l2meta"] = cur["l2_bufc_metadata_asize"]
|
v["l2meta"] = cur["l2_bufc_metadata_asize"]
|
||||||
v["l2pref%"] = 100 * v["l2pref"] / v["l2asize"]
|
v["l2pref%"] = 100 * v["l2pref"] // v["l2asize"]
|
||||||
v["l2mfu%"] = 100 * v["l2mfu"] / v["l2asize"]
|
v["l2mfu%"] = 100 * v["l2mfu"] // v["l2asize"]
|
||||||
v["l2mru%"] = 100 * v["l2mru"] / v["l2asize"]
|
v["l2mru%"] = 100 * v["l2mru"] // v["l2asize"]
|
||||||
v["l2data%"] = 100 * v["l2data"] / v["l2asize"]
|
v["l2data%"] = 100 * v["l2data"] // v["l2asize"]
|
||||||
v["l2meta%"] = 100 * v["l2meta"] / v["l2asize"]
|
v["l2meta%"] = 100 * v["l2meta"] // v["l2asize"]
|
||||||
|
|
||||||
v["grow"] = 0 if cur["arc_no_grow"] else 1
|
v["grow"] = 0 if cur["arc_no_grow"] else 1
|
||||||
v["need"] = cur["arc_need_free"]
|
v["need"] = cur["arc_need_free"]
|
||||||
|
@ -1,5 +1,4 @@
|
|||||||
#!/usr/bin/env @PYTHON_SHEBANG@
|
#!/usr/bin/env @PYTHON_SHEBANG@
|
||||||
# SPDX-License-Identifier: CDDL-1.0
|
|
||||||
#
|
#
|
||||||
# Print out statistics for all cached dmu buffers. This information
|
# Print out statistics for all cached dmu buffers. This information
|
||||||
# is available through the dbufs kstat and may be post-processed as
|
# is available through the dbufs kstat and may be post-processed as
|
||||||
@ -38,7 +37,7 @@ import re
|
|||||||
|
|
||||||
bhdr = ["pool", "objset", "object", "level", "blkid", "offset", "dbsize"]
|
bhdr = ["pool", "objset", "object", "level", "blkid", "offset", "dbsize"]
|
||||||
bxhdr = ["pool", "objset", "object", "level", "blkid", "offset", "dbsize",
|
bxhdr = ["pool", "objset", "object", "level", "blkid", "offset", "dbsize",
|
||||||
"usize", "meta", "state", "dbholds", "dbc", "list", "atype", "flags",
|
"meta", "state", "dbholds", "dbc", "list", "atype", "flags",
|
||||||
"count", "asize", "access", "mru", "gmru", "mfu", "gmfu", "l2",
|
"count", "asize", "access", "mru", "gmru", "mfu", "gmfu", "l2",
|
||||||
"l2_dattr", "l2_asize", "l2_comp", "aholds", "dtype", "btype",
|
"l2_dattr", "l2_asize", "l2_comp", "aholds", "dtype", "btype",
|
||||||
"data_bs", "meta_bs", "bsize", "lvls", "dholds", "blocks", "dsize"]
|
"data_bs", "meta_bs", "bsize", "lvls", "dholds", "blocks", "dsize"]
|
||||||
@ -48,17 +47,17 @@ dhdr = ["pool", "objset", "object", "dtype", "cached"]
|
|||||||
dxhdr = ["pool", "objset", "object", "dtype", "btype", "data_bs", "meta_bs",
|
dxhdr = ["pool", "objset", "object", "dtype", "btype", "data_bs", "meta_bs",
|
||||||
"bsize", "lvls", "dholds", "blocks", "dsize", "cached", "direct",
|
"bsize", "lvls", "dholds", "blocks", "dsize", "cached", "direct",
|
||||||
"indirect", "bonus", "spill"]
|
"indirect", "bonus", "spill"]
|
||||||
dincompat = ["level", "blkid", "offset", "dbsize", "usize", "meta", "state",
|
dincompat = ["level", "blkid", "offset", "dbsize", "meta", "state", "dbholds",
|
||||||
"dbholds", "dbc", "list", "atype", "flags", "count", "asize",
|
"dbc", "list", "atype", "flags", "count", "asize", "access",
|
||||||
"access", "mru", "gmru", "mfu", "gmfu", "l2", "l2_dattr",
|
"mru", "gmru", "mfu", "gmfu", "l2", "l2_dattr", "l2_asize",
|
||||||
"l2_asize", "l2_comp", "aholds"]
|
"l2_comp", "aholds"]
|
||||||
|
|
||||||
thdr = ["pool", "objset", "dtype", "cached"]
|
thdr = ["pool", "objset", "dtype", "cached"]
|
||||||
txhdr = ["pool", "objset", "dtype", "cached", "direct", "indirect",
|
txhdr = ["pool", "objset", "dtype", "cached", "direct", "indirect",
|
||||||
"bonus", "spill"]
|
"bonus", "spill"]
|
||||||
tincompat = ["object", "level", "blkid", "offset", "dbsize", "usize", "meta",
|
tincompat = ["object", "level", "blkid", "offset", "dbsize", "meta", "state",
|
||||||
"state", "dbc", "dbholds", "list", "atype", "flags", "count",
|
"dbc", "dbholds", "list", "atype", "flags", "count", "asize",
|
||||||
"asize", "access", "mru", "gmru", "mfu", "gmfu", "l2", "l2_dattr",
|
"access", "mru", "gmru", "mfu", "gmfu", "l2", "l2_dattr",
|
||||||
"l2_asize", "l2_comp", "aholds", "btype", "data_bs", "meta_bs",
|
"l2_asize", "l2_comp", "aholds", "btype", "data_bs", "meta_bs",
|
||||||
"bsize", "lvls", "dholds", "blocks", "dsize"]
|
"bsize", "lvls", "dholds", "blocks", "dsize"]
|
||||||
|
|
||||||
@ -71,7 +70,6 @@ cols = {
|
|||||||
"blkid": [8, -1, "block number of buffer"],
|
"blkid": [8, -1, "block number of buffer"],
|
||||||
"offset": [12, 1024, "offset in object of buffer"],
|
"offset": [12, 1024, "offset in object of buffer"],
|
||||||
"dbsize": [7, 1024, "size of buffer"],
|
"dbsize": [7, 1024, "size of buffer"],
|
||||||
"usize": [7, 1024, "size of attached user data"],
|
|
||||||
"meta": [4, -1, "is this buffer metadata?"],
|
"meta": [4, -1, "is this buffer metadata?"],
|
||||||
"state": [5, -1, "state of buffer (read, cached, etc)"],
|
"state": [5, -1, "state of buffer (read, cached, etc)"],
|
||||||
"dbholds": [7, 1000, "number of holds on buffer"],
|
"dbholds": [7, 1000, "number of holds on buffer"],
|
||||||
@ -401,7 +399,6 @@ def update_dict(d, k, line, labels):
|
|||||||
key = line[labels[k]]
|
key = line[labels[k]]
|
||||||
|
|
||||||
dbsize = int(line[labels['dbsize']])
|
dbsize = int(line[labels['dbsize']])
|
||||||
usize = int(line[labels['usize']])
|
|
||||||
blkid = int(line[labels['blkid']])
|
blkid = int(line[labels['blkid']])
|
||||||
level = int(line[labels['level']])
|
level = int(line[labels['level']])
|
||||||
|
|
||||||
@ -419,7 +416,7 @@ def update_dict(d, k, line, labels):
|
|||||||
d[pool][objset][key]['indirect'] = 0
|
d[pool][objset][key]['indirect'] = 0
|
||||||
d[pool][objset][key]['spill'] = 0
|
d[pool][objset][key]['spill'] = 0
|
||||||
|
|
||||||
d[pool][objset][key]['cached'] += dbsize + usize
|
d[pool][objset][key]['cached'] += dbsize
|
||||||
|
|
||||||
if blkid == -1:
|
if blkid == -1:
|
||||||
d[pool][objset][key]['bonus'] += dbsize
|
d[pool][objset][key]['bonus'] += dbsize
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
// SPDX-License-Identifier: CDDL-1.0
|
|
||||||
/*
|
/*
|
||||||
* CDDL HEADER START
|
* CDDL HEADER START
|
||||||
*
|
*
|
||||||
@ -270,7 +269,8 @@ main(int argc, char **argv)
|
|||||||
return (MOUNT_USAGE);
|
return (MOUNT_USAGE);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (sloppy || libzfs_envvar_is_set("ZFS_MOUNT_HELPER")) {
|
if (!zfsutil || sloppy ||
|
||||||
|
libzfs_envvar_is_set("ZFS_MOUNT_HELPER")) {
|
||||||
zfs_adjust_mount_options(zhp, mntpoint, mntopts, mtabopt);
|
zfs_adjust_mount_options(zhp, mntpoint, mntopts, mtabopt);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -337,7 +337,7 @@ main(int argc, char **argv)
|
|||||||
dataset, mntpoint, mntflags, zfsflags, mntopts, mtabopt);
|
dataset, mntpoint, mntflags, zfsflags, mntopts, mtabopt);
|
||||||
|
|
||||||
if (!fake) {
|
if (!fake) {
|
||||||
if (!remount && !sloppy &&
|
if (zfsutil && !sloppy &&
|
||||||
!libzfs_envvar_is_set("ZFS_MOUNT_HELPER")) {
|
!libzfs_envvar_is_set("ZFS_MOUNT_HELPER")) {
|
||||||
error = zfs_mount_at(zhp, mntopts, mntflags, mntpoint);
|
error = zfs_mount_at(zhp, mntopts, mntflags, mntpoint);
|
||||||
if (error) {
|
if (error) {
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
raidz_test_CFLAGS = $(AM_CFLAGS) $(KERNEL_CFLAGS)
|
raidz_test_CFLAGS = $(AM_CFLAGS) $(KERNEL_CFLAGS)
|
||||||
raidz_test_CPPFLAGS = $(AM_CPPFLAGS) $(LIBZPOOL_CPPFLAGS)
|
raidz_test_CPPFLAGS = $(AM_CPPFLAGS) $(FORCEDEBUG_CPPFLAGS)
|
||||||
|
|
||||||
bin_PROGRAMS += raidz_test
|
bin_PROGRAMS += raidz_test
|
||||||
CPPCHECKTARGETS += raidz_test
|
CPPCHECKTARGETS += raidz_test
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
// SPDX-License-Identifier: CDDL-1.0
|
|
||||||
/*
|
/*
|
||||||
* CDDL HEADER START
|
* CDDL HEADER START
|
||||||
*
|
*
|
||||||
@ -85,10 +84,10 @@ run_gen_bench_impl(const char *impl)
|
|||||||
|
|
||||||
if (rto_opts.rto_expand) {
|
if (rto_opts.rto_expand) {
|
||||||
rm_bench = vdev_raidz_map_alloc_expanded(
|
rm_bench = vdev_raidz_map_alloc_expanded(
|
||||||
&zio_bench,
|
zio_bench.io_abd,
|
||||||
|
zio_bench.io_size, zio_bench.io_offset,
|
||||||
rto_opts.rto_ashift, ncols+1, ncols,
|
rto_opts.rto_ashift, ncols+1, ncols,
|
||||||
fn+1, rto_opts.rto_expand_offset,
|
fn+1, rto_opts.rto_expand_offset);
|
||||||
0, B_FALSE);
|
|
||||||
} else {
|
} else {
|
||||||
rm_bench = vdev_raidz_map_alloc(&zio_bench,
|
rm_bench = vdev_raidz_map_alloc(&zio_bench,
|
||||||
BENCH_ASHIFT, ncols, fn+1);
|
BENCH_ASHIFT, ncols, fn+1);
|
||||||
@ -173,10 +172,10 @@ run_rec_bench_impl(const char *impl)
|
|||||||
|
|
||||||
if (rto_opts.rto_expand) {
|
if (rto_opts.rto_expand) {
|
||||||
rm_bench = vdev_raidz_map_alloc_expanded(
|
rm_bench = vdev_raidz_map_alloc_expanded(
|
||||||
&zio_bench,
|
zio_bench.io_abd,
|
||||||
|
zio_bench.io_size, zio_bench.io_offset,
|
||||||
BENCH_ASHIFT, ncols+1, ncols,
|
BENCH_ASHIFT, ncols+1, ncols,
|
||||||
PARITY_PQR,
|
PARITY_PQR, rto_opts.rto_expand_offset);
|
||||||
rto_opts.rto_expand_offset, 0, B_FALSE);
|
|
||||||
} else {
|
} else {
|
||||||
rm_bench = vdev_raidz_map_alloc(&zio_bench,
|
rm_bench = vdev_raidz_map_alloc(&zio_bench,
|
||||||
BENCH_ASHIFT, ncols, PARITY_PQR);
|
BENCH_ASHIFT, ncols, PARITY_PQR);
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
// SPDX-License-Identifier: CDDL-1.0
|
|
||||||
/*
|
/*
|
||||||
* CDDL HEADER START
|
* CDDL HEADER START
|
||||||
*
|
*
|
||||||
@ -328,12 +327,14 @@ init_raidz_golden_map(raidz_test_opts_t *opts, const int parity)
|
|||||||
|
|
||||||
if (opts->rto_expand) {
|
if (opts->rto_expand) {
|
||||||
opts->rm_golden =
|
opts->rm_golden =
|
||||||
vdev_raidz_map_alloc_expanded(opts->zio_golden,
|
vdev_raidz_map_alloc_expanded(opts->zio_golden->io_abd,
|
||||||
|
opts->zio_golden->io_size, opts->zio_golden->io_offset,
|
||||||
opts->rto_ashift, total_ncols+1, total_ncols,
|
opts->rto_ashift, total_ncols+1, total_ncols,
|
||||||
parity, opts->rto_expand_offset, 0, B_FALSE);
|
parity, opts->rto_expand_offset);
|
||||||
rm_test = vdev_raidz_map_alloc_expanded(zio_test,
|
rm_test = vdev_raidz_map_alloc_expanded(zio_test->io_abd,
|
||||||
|
zio_test->io_size, zio_test->io_offset,
|
||||||
opts->rto_ashift, total_ncols+1, total_ncols,
|
opts->rto_ashift, total_ncols+1, total_ncols,
|
||||||
parity, opts->rto_expand_offset, 0, B_FALSE);
|
parity, opts->rto_expand_offset);
|
||||||
} else {
|
} else {
|
||||||
opts->rm_golden = vdev_raidz_map_alloc(opts->zio_golden,
|
opts->rm_golden = vdev_raidz_map_alloc(opts->zio_golden,
|
||||||
opts->rto_ashift, total_ncols, parity);
|
opts->rto_ashift, total_ncols, parity);
|
||||||
@ -360,6 +361,187 @@ init_raidz_golden_map(raidz_test_opts_t *opts, const int parity)
|
|||||||
return (err);
|
return (err);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If reflow is not in progress, reflow_offset should be UINT64_MAX.
|
||||||
|
* For each row, if the row is entirely before reflow_offset, it will
|
||||||
|
* come from the new location. Otherwise this row will come from the
|
||||||
|
* old location. Therefore, rows that straddle the reflow_offset will
|
||||||
|
* come from the old location.
|
||||||
|
*
|
||||||
|
* NOTE: Until raidz expansion is implemented this function is only
|
||||||
|
* needed by raidz_test.c to the multi-row raid_map_t functionality.
|
||||||
|
*/
|
||||||
|
raidz_map_t *
|
||||||
|
vdev_raidz_map_alloc_expanded(abd_t *abd, uint64_t size, uint64_t offset,
|
||||||
|
uint64_t ashift, uint64_t physical_cols, uint64_t logical_cols,
|
||||||
|
uint64_t nparity, uint64_t reflow_offset)
|
||||||
|
{
|
||||||
|
/* The zio's size in units of the vdev's minimum sector size. */
|
||||||
|
uint64_t s = size >> ashift;
|
||||||
|
uint64_t q, r, bc, devidx, asize = 0, tot;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* "Quotient": The number of data sectors for this stripe on all but
|
||||||
|
* the "big column" child vdevs that also contain "remainder" data.
|
||||||
|
* AKA "full rows"
|
||||||
|
*/
|
||||||
|
q = s / (logical_cols - nparity);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* "Remainder": The number of partial stripe data sectors in this I/O.
|
||||||
|
* This will add a sector to some, but not all, child vdevs.
|
||||||
|
*/
|
||||||
|
r = s - q * (logical_cols - nparity);
|
||||||
|
|
||||||
|
/* The number of "big columns" - those which contain remainder data. */
|
||||||
|
bc = (r == 0 ? 0 : r + nparity);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The total number of data and parity sectors associated with
|
||||||
|
* this I/O.
|
||||||
|
*/
|
||||||
|
tot = s + nparity * (q + (r == 0 ? 0 : 1));
|
||||||
|
|
||||||
|
/* How many rows contain data (not skip) */
|
||||||
|
uint64_t rows = howmany(tot, logical_cols);
|
||||||
|
int cols = MIN(tot, logical_cols);
|
||||||
|
|
||||||
|
raidz_map_t *rm = kmem_zalloc(offsetof(raidz_map_t, rm_row[rows]),
|
||||||
|
KM_SLEEP);
|
||||||
|
rm->rm_nrows = rows;
|
||||||
|
|
||||||
|
for (uint64_t row = 0; row < rows; row++) {
|
||||||
|
raidz_row_t *rr = kmem_alloc(offsetof(raidz_row_t,
|
||||||
|
rr_col[cols]), KM_SLEEP);
|
||||||
|
rm->rm_row[row] = rr;
|
||||||
|
|
||||||
|
/* The starting RAIDZ (parent) vdev sector of the row. */
|
||||||
|
uint64_t b = (offset >> ashift) + row * logical_cols;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If we are in the middle of a reflow, and any part of this
|
||||||
|
* row has not been copied, then use the old location of
|
||||||
|
* this row.
|
||||||
|
*/
|
||||||
|
int row_phys_cols = physical_cols;
|
||||||
|
if (b + (logical_cols - nparity) > reflow_offset >> ashift)
|
||||||
|
row_phys_cols--;
|
||||||
|
|
||||||
|
/* starting child of this row */
|
||||||
|
uint64_t child_id = b % row_phys_cols;
|
||||||
|
/* The starting byte offset on each child vdev. */
|
||||||
|
uint64_t child_offset = (b / row_phys_cols) << ashift;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We set cols to the entire width of the block, even
|
||||||
|
* if this row is shorter. This is needed because parity
|
||||||
|
* generation (for Q and R) needs to know the entire width,
|
||||||
|
* because it treats the short row as though it was
|
||||||
|
* full-width (and the "phantom" sectors were zero-filled).
|
||||||
|
*
|
||||||
|
* Another approach to this would be to set cols shorter
|
||||||
|
* (to just the number of columns that we might do i/o to)
|
||||||
|
* and have another mechanism to tell the parity generation
|
||||||
|
* about the "entire width". Reconstruction (at least
|
||||||
|
* vdev_raidz_reconstruct_general()) would also need to
|
||||||
|
* know about the "entire width".
|
||||||
|
*/
|
||||||
|
rr->rr_cols = cols;
|
||||||
|
rr->rr_bigcols = bc;
|
||||||
|
rr->rr_missingdata = 0;
|
||||||
|
rr->rr_missingparity = 0;
|
||||||
|
rr->rr_firstdatacol = nparity;
|
||||||
|
rr->rr_abd_empty = NULL;
|
||||||
|
rr->rr_nempty = 0;
|
||||||
|
|
||||||
|
for (int c = 0; c < rr->rr_cols; c++, child_id++) {
|
||||||
|
if (child_id >= row_phys_cols) {
|
||||||
|
child_id -= row_phys_cols;
|
||||||
|
child_offset += 1ULL << ashift;
|
||||||
|
}
|
||||||
|
rr->rr_col[c].rc_devidx = child_id;
|
||||||
|
rr->rr_col[c].rc_offset = child_offset;
|
||||||
|
rr->rr_col[c].rc_orig_data = NULL;
|
||||||
|
rr->rr_col[c].rc_error = 0;
|
||||||
|
rr->rr_col[c].rc_tried = 0;
|
||||||
|
rr->rr_col[c].rc_skipped = 0;
|
||||||
|
rr->rr_col[c].rc_need_orig_restore = B_FALSE;
|
||||||
|
|
||||||
|
uint64_t dc = c - rr->rr_firstdatacol;
|
||||||
|
if (c < rr->rr_firstdatacol) {
|
||||||
|
rr->rr_col[c].rc_size = 1ULL << ashift;
|
||||||
|
rr->rr_col[c].rc_abd =
|
||||||
|
abd_alloc_linear(rr->rr_col[c].rc_size,
|
||||||
|
B_TRUE);
|
||||||
|
} else if (row == rows - 1 && bc != 0 && c >= bc) {
|
||||||
|
/*
|
||||||
|
* Past the end, this for parity generation.
|
||||||
|
*/
|
||||||
|
rr->rr_col[c].rc_size = 0;
|
||||||
|
rr->rr_col[c].rc_abd = NULL;
|
||||||
|
} else {
|
||||||
|
/*
|
||||||
|
* "data column" (col excluding parity)
|
||||||
|
* Add an ASCII art diagram here
|
||||||
|
*/
|
||||||
|
uint64_t off;
|
||||||
|
|
||||||
|
if (c < bc || r == 0) {
|
||||||
|
off = dc * rows + row;
|
||||||
|
} else {
|
||||||
|
off = r * rows +
|
||||||
|
(dc - r) * (rows - 1) + row;
|
||||||
|
}
|
||||||
|
rr->rr_col[c].rc_size = 1ULL << ashift;
|
||||||
|
rr->rr_col[c].rc_abd = abd_get_offset_struct(
|
||||||
|
&rr->rr_col[c].rc_abdstruct,
|
||||||
|
abd, off << ashift, 1 << ashift);
|
||||||
|
}
|
||||||
|
|
||||||
|
asize += rr->rr_col[c].rc_size;
|
||||||
|
}
|
||||||
|
/*
|
||||||
|
* If all data stored spans all columns, there's a danger that
|
||||||
|
* parity will always be on the same device and, since parity
|
||||||
|
* isn't read during normal operation, that that device's I/O
|
||||||
|
* bandwidth won't be used effectively. We therefore switch
|
||||||
|
* the parity every 1MB.
|
||||||
|
*
|
||||||
|
* ...at least that was, ostensibly, the theory. As a practical
|
||||||
|
* matter unless we juggle the parity between all devices
|
||||||
|
* evenly, we won't see any benefit. Further, occasional writes
|
||||||
|
* that aren't a multiple of the LCM of the number of children
|
||||||
|
* and the minimum stripe width are sufficient to avoid pessimal
|
||||||
|
* behavior. Unfortunately, this decision created an implicit
|
||||||
|
* on-disk format requirement that we need to support for all
|
||||||
|
* eternity, but only for single-parity RAID-Z.
|
||||||
|
*
|
||||||
|
* If we intend to skip a sector in the zeroth column for
|
||||||
|
* padding we must make sure to note this swap. We will never
|
||||||
|
* intend to skip the first column since at least one data and
|
||||||
|
* one parity column must appear in each row.
|
||||||
|
*/
|
||||||
|
if (rr->rr_firstdatacol == 1 && rr->rr_cols > 1 &&
|
||||||
|
(offset & (1ULL << 20))) {
|
||||||
|
ASSERT(rr->rr_cols >= 2);
|
||||||
|
ASSERT(rr->rr_col[0].rc_size == rr->rr_col[1].rc_size);
|
||||||
|
devidx = rr->rr_col[0].rc_devidx;
|
||||||
|
uint64_t o = rr->rr_col[0].rc_offset;
|
||||||
|
rr->rr_col[0].rc_devidx = rr->rr_col[1].rc_devidx;
|
||||||
|
rr->rr_col[0].rc_offset = rr->rr_col[1].rc_offset;
|
||||||
|
rr->rr_col[1].rc_devidx = devidx;
|
||||||
|
rr->rr_col[1].rc_offset = o;
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
ASSERT3U(asize, ==, tot << ashift);
|
||||||
|
|
||||||
|
/* init RAIDZ parity ops */
|
||||||
|
rm->rm_ops = vdev_raidz_math_get_ops();
|
||||||
|
|
||||||
|
return (rm);
|
||||||
|
}
|
||||||
|
|
||||||
static raidz_map_t *
|
static raidz_map_t *
|
||||||
init_raidz_map(raidz_test_opts_t *opts, zio_t **zio, const int parity)
|
init_raidz_map(raidz_test_opts_t *opts, zio_t **zio, const int parity)
|
||||||
{
|
{
|
||||||
@ -379,9 +561,10 @@ init_raidz_map(raidz_test_opts_t *opts, zio_t **zio, const int parity)
|
|||||||
init_zio_abd(*zio);
|
init_zio_abd(*zio);
|
||||||
|
|
||||||
if (opts->rto_expand) {
|
if (opts->rto_expand) {
|
||||||
rm = vdev_raidz_map_alloc_expanded(*zio,
|
rm = vdev_raidz_map_alloc_expanded((*zio)->io_abd,
|
||||||
|
(*zio)->io_size, (*zio)->io_offset,
|
||||||
opts->rto_ashift, total_ncols+1, total_ncols,
|
opts->rto_ashift, total_ncols+1, total_ncols,
|
||||||
parity, opts->rto_expand_offset, 0, B_FALSE);
|
parity, opts->rto_expand_offset);
|
||||||
} else {
|
} else {
|
||||||
rm = vdev_raidz_map_alloc(*zio, opts->rto_ashift,
|
rm = vdev_raidz_map_alloc(*zio, opts->rto_ashift,
|
||||||
total_ncols, parity);
|
total_ncols, parity);
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
// SPDX-License-Identifier: CDDL-1.0
|
|
||||||
/*
|
/*
|
||||||
* CDDL HEADER START
|
* CDDL HEADER START
|
||||||
*
|
*
|
||||||
@ -120,4 +119,7 @@ void init_zio_abd(zio_t *zio);
|
|||||||
|
|
||||||
void run_raidz_benchmark(void);
|
void run_raidz_benchmark(void);
|
||||||
|
|
||||||
|
struct raidz_map *vdev_raidz_map_alloc_expanded(abd_t *, uint64_t, uint64_t,
|
||||||
|
uint64_t, uint64_t, uint64_t, uint64_t, uint64_t);
|
||||||
|
|
||||||
#endif /* RAIDZ_TEST_H */
|
#endif /* RAIDZ_TEST_H */
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
zdb_CPPFLAGS = $(AM_CPPFLAGS) $(LIBZPOOL_CPPFLAGS)
|
zdb_CPPFLAGS = $(AM_CPPFLAGS) $(FORCEDEBUG_CPPFLAGS)
|
||||||
zdb_CFLAGS = $(AM_CFLAGS) $(LIBCRYPTO_CFLAGS)
|
zdb_CFLAGS = $(AM_CFLAGS) $(LIBCRYPTO_CFLAGS)
|
||||||
|
|
||||||
sbin_PROGRAMS += zdb
|
sbin_PROGRAMS += zdb
|
||||||
@ -10,7 +10,6 @@ zdb_SOURCES = \
|
|||||||
%D%/zdb_il.c
|
%D%/zdb_il.c
|
||||||
|
|
||||||
zdb_LDADD = \
|
zdb_LDADD = \
|
||||||
libzdb.la \
|
|
||||||
libzpool.la \
|
libzpool.la \
|
||||||
libzfs_core.la \
|
libzfs_core.la \
|
||||||
libnvpair.la
|
libnvpair.la
|
||||||
|
1381
cmd/zdb/zdb.c
1381
cmd/zdb/zdb.c
File diff suppressed because it is too large
Load Diff
@ -1,4 +1,3 @@
|
|||||||
// SPDX-License-Identifier: CDDL-1.0
|
|
||||||
/*
|
/*
|
||||||
* CDDL HEADER START
|
* CDDL HEADER START
|
||||||
*
|
*
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
// SPDX-License-Identifier: CDDL-1.0
|
|
||||||
/*
|
/*
|
||||||
* CDDL HEADER START
|
* CDDL HEADER START
|
||||||
*
|
*
|
||||||
@ -65,22 +64,21 @@ static void
|
|||||||
zil_prt_rec_create(zilog_t *zilog, int txtype, const void *arg)
|
zil_prt_rec_create(zilog_t *zilog, int txtype, const void *arg)
|
||||||
{
|
{
|
||||||
(void) zilog;
|
(void) zilog;
|
||||||
const lr_create_t *lrc = arg;
|
const lr_create_t *lr = arg;
|
||||||
const _lr_create_t *lr = &lrc->lr_create;
|
|
||||||
time_t crtime = lr->lr_crtime[0];
|
time_t crtime = lr->lr_crtime[0];
|
||||||
const char *name, *link;
|
char *name, *link;
|
||||||
lr_attr_t *lrattr;
|
lr_attr_t *lrattr;
|
||||||
|
|
||||||
name = (const char *)&lrc->lr_data[0];
|
name = (char *)(lr + 1);
|
||||||
|
|
||||||
if (lr->lr_common.lrc_txtype == TX_CREATE_ATTR ||
|
if (lr->lr_common.lrc_txtype == TX_CREATE_ATTR ||
|
||||||
lr->lr_common.lrc_txtype == TX_MKDIR_ATTR) {
|
lr->lr_common.lrc_txtype == TX_MKDIR_ATTR) {
|
||||||
lrattr = (lr_attr_t *)&lrc->lr_data[0];
|
lrattr = (lr_attr_t *)(lr + 1);
|
||||||
name += ZIL_XVAT_SIZE(lrattr->lr_attr_masksize);
|
name += ZIL_XVAT_SIZE(lrattr->lr_attr_masksize);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (txtype == TX_SYMLINK) {
|
if (txtype == TX_SYMLINK) {
|
||||||
link = (const char *)&lrc->lr_data[strlen(name) + 1];
|
link = name + strlen(name) + 1;
|
||||||
(void) printf("%s%s -> %s\n", tab_prefix, name, link);
|
(void) printf("%s%s -> %s\n", tab_prefix, name, link);
|
||||||
} else if (txtype != TX_MKXATTR) {
|
} else if (txtype != TX_MKXATTR) {
|
||||||
(void) printf("%s%s\n", tab_prefix, name);
|
(void) printf("%s%s\n", tab_prefix, name);
|
||||||
@ -105,7 +103,7 @@ zil_prt_rec_remove(zilog_t *zilog, int txtype, const void *arg)
|
|||||||
const lr_remove_t *lr = arg;
|
const lr_remove_t *lr = arg;
|
||||||
|
|
||||||
(void) printf("%sdoid %llu, name %s\n", tab_prefix,
|
(void) printf("%sdoid %llu, name %s\n", tab_prefix,
|
||||||
(u_longlong_t)lr->lr_doid, (const char *)&lr->lr_data[0]);
|
(u_longlong_t)lr->lr_doid, (char *)(lr + 1));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
@ -116,17 +114,16 @@ zil_prt_rec_link(zilog_t *zilog, int txtype, const void *arg)
|
|||||||
|
|
||||||
(void) printf("%sdoid %llu, link_obj %llu, name %s\n", tab_prefix,
|
(void) printf("%sdoid %llu, link_obj %llu, name %s\n", tab_prefix,
|
||||||
(u_longlong_t)lr->lr_doid, (u_longlong_t)lr->lr_link_obj,
|
(u_longlong_t)lr->lr_doid, (u_longlong_t)lr->lr_link_obj,
|
||||||
(const char *)&lr->lr_data[0]);
|
(char *)(lr + 1));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
zil_prt_rec_rename(zilog_t *zilog, int txtype, const void *arg)
|
zil_prt_rec_rename(zilog_t *zilog, int txtype, const void *arg)
|
||||||
{
|
{
|
||||||
(void) zilog, (void) txtype;
|
(void) zilog, (void) txtype;
|
||||||
const lr_rename_t *lrr = arg;
|
const lr_rename_t *lr = arg;
|
||||||
const _lr_rename_t *lr = &lrr->lr_rename;
|
char *snm = (char *)(lr + 1);
|
||||||
const char *snm = (const char *)&lrr->lr_data[0];
|
char *tnm = snm + strlen(snm) + 1;
|
||||||
const char *tnm = (const char *)&lrr->lr_data[strlen(snm) + 1];
|
|
||||||
|
|
||||||
(void) printf("%ssdoid %llu, tdoid %llu\n", tab_prefix,
|
(void) printf("%ssdoid %llu, tdoid %llu\n", tab_prefix,
|
||||||
(u_longlong_t)lr->lr_sdoid, (u_longlong_t)lr->lr_tdoid);
|
(u_longlong_t)lr->lr_sdoid, (u_longlong_t)lr->lr_tdoid);
|
||||||
@ -171,25 +168,23 @@ zil_prt_rec_write(zilog_t *zilog, int txtype, const void *arg)
|
|||||||
(u_longlong_t)lr->lr_foid, (u_longlong_t)lr->lr_offset,
|
(u_longlong_t)lr->lr_foid, (u_longlong_t)lr->lr_offset,
|
||||||
(u_longlong_t)lr->lr_length);
|
(u_longlong_t)lr->lr_length);
|
||||||
|
|
||||||
if (txtype == TX_WRITE2 || verbose < 4)
|
if (txtype == TX_WRITE2 || verbose < 5)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (lr->lr_common.lrc_reclen == sizeof (lr_write_t)) {
|
if (lr->lr_common.lrc_reclen == sizeof (lr_write_t)) {
|
||||||
(void) printf("%shas blkptr, %s\n", tab_prefix,
|
(void) printf("%shas blkptr, %s\n", tab_prefix,
|
||||||
!BP_IS_HOLE(bp) && BP_GET_LOGICAL_BIRTH(bp) >=
|
!BP_IS_HOLE(bp) &&
|
||||||
spa_min_claim_txg(zilog->zl_spa) ?
|
bp->blk_birth >= spa_min_claim_txg(zilog->zl_spa) ?
|
||||||
"will claim" : "won't claim");
|
"will claim" : "won't claim");
|
||||||
print_log_bp(bp, tab_prefix);
|
print_log_bp(bp, tab_prefix);
|
||||||
|
|
||||||
if (verbose < 5)
|
|
||||||
return;
|
|
||||||
if (BP_IS_HOLE(bp)) {
|
if (BP_IS_HOLE(bp)) {
|
||||||
(void) printf("\t\t\tLSIZE 0x%llx\n",
|
(void) printf("\t\t\tLSIZE 0x%llx\n",
|
||||||
(u_longlong_t)BP_GET_LSIZE(bp));
|
(u_longlong_t)BP_GET_LSIZE(bp));
|
||||||
(void) printf("%s<hole>\n", tab_prefix);
|
(void) printf("%s<hole>\n", tab_prefix);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
if (BP_GET_LOGICAL_BIRTH(bp) < zilog->zl_header->zh_claim_txg) {
|
if (bp->blk_birth < zilog->zl_header->zh_claim_txg) {
|
||||||
(void) printf("%s<block already committed>\n",
|
(void) printf("%s<block already committed>\n",
|
||||||
tab_prefix);
|
tab_prefix);
|
||||||
return;
|
return;
|
||||||
@ -207,12 +202,9 @@ zil_prt_rec_write(zilog_t *zilog, int txtype, const void *arg)
|
|||||||
if (error)
|
if (error)
|
||||||
goto out;
|
goto out;
|
||||||
} else {
|
} else {
|
||||||
if (verbose < 5)
|
|
||||||
return;
|
|
||||||
|
|
||||||
/* data is stored after the end of the lr_write record */
|
/* data is stored after the end of the lr_write record */
|
||||||
data = abd_alloc(lr->lr_length, B_FALSE);
|
data = abd_alloc(lr->lr_length, B_FALSE);
|
||||||
abd_copy_from_buf(data, &lr->lr_data[0], lr->lr_length);
|
abd_copy_from_buf(data, lr + 1, lr->lr_length);
|
||||||
}
|
}
|
||||||
|
|
||||||
(void) printf("%s", tab_prefix);
|
(void) printf("%s", tab_prefix);
|
||||||
@ -225,28 +217,6 @@ out:
|
|||||||
abd_free(data);
|
abd_free(data);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
|
||||||
zil_prt_rec_write_enc(zilog_t *zilog, int txtype, const void *arg)
|
|
||||||
{
|
|
||||||
(void) txtype;
|
|
||||||
const lr_write_t *lr = arg;
|
|
||||||
const blkptr_t *bp = &lr->lr_blkptr;
|
|
||||||
int verbose = MAX(dump_opt['d'], dump_opt['i']);
|
|
||||||
|
|
||||||
(void) printf("%s(encrypted)\n", tab_prefix);
|
|
||||||
|
|
||||||
if (verbose < 4)
|
|
||||||
return;
|
|
||||||
|
|
||||||
if (lr->lr_common.lrc_reclen == sizeof (lr_write_t)) {
|
|
||||||
(void) printf("%shas blkptr, %s\n", tab_prefix,
|
|
||||||
!BP_IS_HOLE(bp) && BP_GET_LOGICAL_BIRTH(bp) >=
|
|
||||||
spa_min_claim_txg(zilog->zl_spa) ?
|
|
||||||
"will claim" : "won't claim");
|
|
||||||
print_log_bp(bp, tab_prefix);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static void
|
static void
|
||||||
zil_prt_rec_truncate(zilog_t *zilog, int txtype, const void *arg)
|
zil_prt_rec_truncate(zilog_t *zilog, int txtype, const void *arg)
|
||||||
{
|
{
|
||||||
@ -310,7 +280,7 @@ zil_prt_rec_setsaxattr(zilog_t *zilog, int txtype, const void *arg)
|
|||||||
(void) zilog, (void) txtype;
|
(void) zilog, (void) txtype;
|
||||||
const lr_setsaxattr_t *lr = arg;
|
const lr_setsaxattr_t *lr = arg;
|
||||||
|
|
||||||
const char *name = (const char *)&lr->lr_data[0];
|
char *name = (char *)(lr + 1);
|
||||||
(void) printf("%sfoid %llu\n", tab_prefix,
|
(void) printf("%sfoid %llu\n", tab_prefix,
|
||||||
(u_longlong_t)lr->lr_foid);
|
(u_longlong_t)lr->lr_foid);
|
||||||
|
|
||||||
@ -319,7 +289,7 @@ zil_prt_rec_setsaxattr(zilog_t *zilog, int txtype, const void *arg)
|
|||||||
(void) printf("%sXAT_VALUE NULL\n", tab_prefix);
|
(void) printf("%sXAT_VALUE NULL\n", tab_prefix);
|
||||||
} else {
|
} else {
|
||||||
(void) printf("%sXAT_VALUE ", tab_prefix);
|
(void) printf("%sXAT_VALUE ", tab_prefix);
|
||||||
const char *val = (const char *)&lr->lr_data[strlen(name) + 1];
|
char *val = name + (strlen(name) + 1);
|
||||||
for (int i = 0; i < lr->lr_size; i++) {
|
for (int i = 0; i < lr->lr_size; i++) {
|
||||||
(void) printf("%c", *val);
|
(void) printf("%c", *val);
|
||||||
val++;
|
val++;
|
||||||
@ -342,34 +312,11 @@ zil_prt_rec_clone_range(zilog_t *zilog, int txtype, const void *arg)
|
|||||||
{
|
{
|
||||||
(void) zilog, (void) txtype;
|
(void) zilog, (void) txtype;
|
||||||
const lr_clone_range_t *lr = arg;
|
const lr_clone_range_t *lr = arg;
|
||||||
int verbose = MAX(dump_opt['d'], dump_opt['i']);
|
|
||||||
|
|
||||||
(void) printf("%sfoid %llu, offset %llx, length %llx, blksize %llx\n",
|
(void) printf("%sfoid %llu, offset %llx, length %llx, blksize %llx\n",
|
||||||
tab_prefix, (u_longlong_t)lr->lr_foid, (u_longlong_t)lr->lr_offset,
|
tab_prefix, (u_longlong_t)lr->lr_foid, (u_longlong_t)lr->lr_offset,
|
||||||
(u_longlong_t)lr->lr_length, (u_longlong_t)lr->lr_blksz);
|
(u_longlong_t)lr->lr_length, (u_longlong_t)lr->lr_blksz);
|
||||||
|
|
||||||
if (verbose < 4)
|
|
||||||
return;
|
|
||||||
|
|
||||||
for (unsigned int i = 0; i < lr->lr_nbps; i++) {
|
|
||||||
(void) printf("%s[%u/%llu] ", tab_prefix, i + 1,
|
|
||||||
(u_longlong_t)lr->lr_nbps);
|
|
||||||
print_log_bp(&lr->lr_bps[i], "");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static void
|
|
||||||
zil_prt_rec_clone_range_enc(zilog_t *zilog, int txtype, const void *arg)
|
|
||||||
{
|
|
||||||
(void) zilog, (void) txtype;
|
|
||||||
const lr_clone_range_t *lr = arg;
|
|
||||||
int verbose = MAX(dump_opt['d'], dump_opt['i']);
|
|
||||||
|
|
||||||
(void) printf("%s(encrypted)\n", tab_prefix);
|
|
||||||
|
|
||||||
if (verbose < 4)
|
|
||||||
return;
|
|
||||||
|
|
||||||
for (unsigned int i = 0; i < lr->lr_nbps; i++) {
|
for (unsigned int i = 0; i < lr->lr_nbps; i++) {
|
||||||
(void) printf("%s[%u/%llu] ", tab_prefix, i + 1,
|
(void) printf("%s[%u/%llu] ", tab_prefix, i + 1,
|
||||||
(u_longlong_t)lr->lr_nbps);
|
(u_longlong_t)lr->lr_nbps);
|
||||||
@ -380,7 +327,6 @@ zil_prt_rec_clone_range_enc(zilog_t *zilog, int txtype, const void *arg)
|
|||||||
typedef void (*zil_prt_rec_func_t)(zilog_t *, int, const void *);
|
typedef void (*zil_prt_rec_func_t)(zilog_t *, int, const void *);
|
||||||
typedef struct zil_rec_info {
|
typedef struct zil_rec_info {
|
||||||
zil_prt_rec_func_t zri_print;
|
zil_prt_rec_func_t zri_print;
|
||||||
zil_prt_rec_func_t zri_print_enc;
|
|
||||||
const char *zri_name;
|
const char *zri_name;
|
||||||
uint64_t zri_count;
|
uint64_t zri_count;
|
||||||
} zil_rec_info_t;
|
} zil_rec_info_t;
|
||||||
@ -395,9 +341,7 @@ static zil_rec_info_t zil_rec_info[TX_MAX_TYPE] = {
|
|||||||
{.zri_print = zil_prt_rec_remove, .zri_name = "TX_RMDIR "},
|
{.zri_print = zil_prt_rec_remove, .zri_name = "TX_RMDIR "},
|
||||||
{.zri_print = zil_prt_rec_link, .zri_name = "TX_LINK "},
|
{.zri_print = zil_prt_rec_link, .zri_name = "TX_LINK "},
|
||||||
{.zri_print = zil_prt_rec_rename, .zri_name = "TX_RENAME "},
|
{.zri_print = zil_prt_rec_rename, .zri_name = "TX_RENAME "},
|
||||||
{.zri_print = zil_prt_rec_write,
|
{.zri_print = zil_prt_rec_write, .zri_name = "TX_WRITE "},
|
||||||
.zri_print_enc = zil_prt_rec_write_enc,
|
|
||||||
.zri_name = "TX_WRITE "},
|
|
||||||
{.zri_print = zil_prt_rec_truncate, .zri_name = "TX_TRUNCATE "},
|
{.zri_print = zil_prt_rec_truncate, .zri_name = "TX_TRUNCATE "},
|
||||||
{.zri_print = zil_prt_rec_setattr, .zri_name = "TX_SETATTR "},
|
{.zri_print = zil_prt_rec_setattr, .zri_name = "TX_SETATTR "},
|
||||||
{.zri_print = zil_prt_rec_acl, .zri_name = "TX_ACL_V0 "},
|
{.zri_print = zil_prt_rec_acl, .zri_name = "TX_ACL_V0 "},
|
||||||
@ -414,7 +358,6 @@ static zil_rec_info_t zil_rec_info[TX_MAX_TYPE] = {
|
|||||||
{.zri_print = zil_prt_rec_rename, .zri_name = "TX_RENAME_EXCHANGE "},
|
{.zri_print = zil_prt_rec_rename, .zri_name = "TX_RENAME_EXCHANGE "},
|
||||||
{.zri_print = zil_prt_rec_rename, .zri_name = "TX_RENAME_WHITEOUT "},
|
{.zri_print = zil_prt_rec_rename, .zri_name = "TX_RENAME_WHITEOUT "},
|
||||||
{.zri_print = zil_prt_rec_clone_range,
|
{.zri_print = zil_prt_rec_clone_range,
|
||||||
.zri_print_enc = zil_prt_rec_clone_range_enc,
|
|
||||||
.zri_name = "TX_CLONE_RANGE "},
|
.zri_name = "TX_CLONE_RANGE "},
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -441,8 +384,6 @@ print_log_record(zilog_t *zilog, const lr_t *lr, void *arg, uint64_t claim_txg)
|
|||||||
if (txtype && verbose >= 3) {
|
if (txtype && verbose >= 3) {
|
||||||
if (!zilog->zl_os->os_encrypted) {
|
if (!zilog->zl_os->os_encrypted) {
|
||||||
zil_rec_info[txtype].zri_print(zilog, txtype, lr);
|
zil_rec_info[txtype].zri_print(zilog, txtype, lr);
|
||||||
} else if (zil_rec_info[txtype].zri_print_enc) {
|
|
||||||
zil_rec_info[txtype].zri_print_enc(zilog, txtype, lr);
|
|
||||||
} else {
|
} else {
|
||||||
(void) printf("%s(encrypted)\n", tab_prefix);
|
(void) printf("%s(encrypted)\n", tab_prefix);
|
||||||
}
|
}
|
||||||
@ -476,7 +417,7 @@ print_log_block(zilog_t *zilog, const blkptr_t *bp, void *arg,
|
|||||||
|
|
||||||
if (claim_txg != 0)
|
if (claim_txg != 0)
|
||||||
claim = "already claimed";
|
claim = "already claimed";
|
||||||
else if (BP_GET_LOGICAL_BIRTH(bp) >= spa_min_claim_txg(zilog->zl_spa))
|
else if (bp->blk_birth >= spa_min_claim_txg(zilog->zl_spa))
|
||||||
claim = "will claim";
|
claim = "will claim";
|
||||||
else
|
else
|
||||||
claim = "won't claim";
|
claim = "won't claim";
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
// SPDX-License-Identifier: CDDL-1.0
|
|
||||||
/*
|
/*
|
||||||
* CDDL HEADER START
|
* CDDL HEADER START
|
||||||
*
|
*
|
||||||
@ -23,7 +22,6 @@
|
|||||||
* Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||||
*
|
*
|
||||||
* Copyright (c) 2016, Intel Corporation.
|
* Copyright (c) 2016, Intel Corporation.
|
||||||
* Copyright (c) 2023, Klara Inc.
|
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -233,6 +231,28 @@ fmd_prop_get_int32(fmd_hdl_t *hdl, const char *name)
|
|||||||
if (strcmp(name, "spare_on_remove") == 0)
|
if (strcmp(name, "spare_on_remove") == 0)
|
||||||
return (1);
|
return (1);
|
||||||
|
|
||||||
|
if (strcmp(name, "io_N") == 0 || strcmp(name, "checksum_N") == 0)
|
||||||
|
return (10); /* N = 10 events */
|
||||||
|
|
||||||
|
return (0);
|
||||||
|
}
|
||||||
|
|
||||||
|
int64_t
|
||||||
|
fmd_prop_get_int64(fmd_hdl_t *hdl, const char *name)
|
||||||
|
{
|
||||||
|
(void) hdl;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* These can be looked up in mp->modinfo->fmdi_props
|
||||||
|
* For now we just hard code for phase 2. In the
|
||||||
|
* future, there can be a ZED based override.
|
||||||
|
*/
|
||||||
|
if (strcmp(name, "remove_timeout") == 0)
|
||||||
|
return (15ULL * 1000ULL * 1000ULL * 1000ULL); /* 15 sec */
|
||||||
|
|
||||||
|
if (strcmp(name, "io_T") == 0 || strcmp(name, "checksum_T") == 0)
|
||||||
|
return (1000ULL * 1000ULL * 1000ULL * 600ULL); /* 10 min */
|
||||||
|
|
||||||
return (0);
|
return (0);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -515,19 +535,6 @@ fmd_serd_exists(fmd_hdl_t *hdl, const char *name)
|
|||||||
return (fmd_serd_eng_lookup(&mp->mod_serds, name) != NULL);
|
return (fmd_serd_eng_lookup(&mp->mod_serds, name) != NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
int
|
|
||||||
fmd_serd_active(fmd_hdl_t *hdl, const char *name)
|
|
||||||
{
|
|
||||||
fmd_module_t *mp = (fmd_module_t *)hdl;
|
|
||||||
fmd_serd_eng_t *sgp;
|
|
||||||
|
|
||||||
if ((sgp = fmd_serd_eng_lookup(&mp->mod_serds, name)) == NULL) {
|
|
||||||
zed_log_msg(LOG_ERR, "serd engine '%s' does not exist", name);
|
|
||||||
return (0);
|
|
||||||
}
|
|
||||||
return (fmd_serd_eng_fired(sgp) || !fmd_serd_eng_empty(sgp));
|
|
||||||
}
|
|
||||||
|
|
||||||
void
|
void
|
||||||
fmd_serd_reset(fmd_hdl_t *hdl, const char *name)
|
fmd_serd_reset(fmd_hdl_t *hdl, const char *name)
|
||||||
{
|
{
|
||||||
@ -536,10 +543,12 @@ fmd_serd_reset(fmd_hdl_t *hdl, const char *name)
|
|||||||
|
|
||||||
if ((sgp = fmd_serd_eng_lookup(&mp->mod_serds, name)) == NULL) {
|
if ((sgp = fmd_serd_eng_lookup(&mp->mod_serds, name)) == NULL) {
|
||||||
zed_log_msg(LOG_ERR, "serd engine '%s' does not exist", name);
|
zed_log_msg(LOG_ERR, "serd engine '%s' does not exist", name);
|
||||||
} else {
|
return;
|
||||||
fmd_serd_eng_reset(sgp);
|
|
||||||
fmd_hdl_debug(hdl, "serd_reset %s", name);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fmd_serd_eng_reset(sgp);
|
||||||
|
|
||||||
|
fmd_hdl_debug(hdl, "serd_reset %s", name);
|
||||||
}
|
}
|
||||||
|
|
||||||
int
|
int
|
||||||
@ -547,21 +556,16 @@ fmd_serd_record(fmd_hdl_t *hdl, const char *name, fmd_event_t *ep)
|
|||||||
{
|
{
|
||||||
fmd_module_t *mp = (fmd_module_t *)hdl;
|
fmd_module_t *mp = (fmd_module_t *)hdl;
|
||||||
fmd_serd_eng_t *sgp;
|
fmd_serd_eng_t *sgp;
|
||||||
|
int err;
|
||||||
|
|
||||||
if ((sgp = fmd_serd_eng_lookup(&mp->mod_serds, name)) == NULL) {
|
if ((sgp = fmd_serd_eng_lookup(&mp->mod_serds, name)) == NULL) {
|
||||||
zed_log_msg(LOG_ERR, "failed to add record to SERD engine '%s'",
|
zed_log_msg(LOG_ERR, "failed to add record to SERD engine '%s'",
|
||||||
name);
|
name);
|
||||||
return (0);
|
return (0);
|
||||||
}
|
}
|
||||||
return (fmd_serd_eng_record(sgp, ep->ev_hrt));
|
err = fmd_serd_eng_record(sgp, ep->ev_hrt);
|
||||||
}
|
|
||||||
|
|
||||||
void
|
return (err);
|
||||||
fmd_serd_gc(fmd_hdl_t *hdl)
|
|
||||||
{
|
|
||||||
fmd_module_t *mp = (fmd_module_t *)hdl;
|
|
||||||
|
|
||||||
fmd_serd_hash_apply(&mp->mod_serds, fmd_serd_eng_gc, NULL);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* FMD Timers */
|
/* FMD Timers */
|
||||||
@ -575,7 +579,7 @@ _timer_notify(union sigval sv)
|
|||||||
const fmd_hdl_ops_t *ops = mp->mod_info->fmdi_ops;
|
const fmd_hdl_ops_t *ops = mp->mod_info->fmdi_ops;
|
||||||
struct itimerspec its;
|
struct itimerspec its;
|
||||||
|
|
||||||
fmd_hdl_debug(hdl, "%s timer fired (%p)", mp->mod_name, ftp->ft_tid);
|
fmd_hdl_debug(hdl, "timer fired (%p)", ftp->ft_tid);
|
||||||
|
|
||||||
/* disarm the timer */
|
/* disarm the timer */
|
||||||
memset(&its, 0, sizeof (struct itimerspec));
|
memset(&its, 0, sizeof (struct itimerspec));
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
// SPDX-License-Identifier: CDDL-1.0
|
|
||||||
/*
|
/*
|
||||||
* CDDL HEADER START
|
* CDDL HEADER START
|
||||||
*
|
*
|
||||||
@ -152,6 +151,7 @@ extern void fmd_hdl_vdebug(fmd_hdl_t *, const char *, va_list);
|
|||||||
extern void fmd_hdl_debug(fmd_hdl_t *, const char *, ...);
|
extern void fmd_hdl_debug(fmd_hdl_t *, const char *, ...);
|
||||||
|
|
||||||
extern int32_t fmd_prop_get_int32(fmd_hdl_t *, const char *);
|
extern int32_t fmd_prop_get_int32(fmd_hdl_t *, const char *);
|
||||||
|
extern int64_t fmd_prop_get_int64(fmd_hdl_t *, const char *);
|
||||||
|
|
||||||
#define FMD_STAT_NOALLOC 0x0 /* fmd should use caller's memory */
|
#define FMD_STAT_NOALLOC 0x0 /* fmd should use caller's memory */
|
||||||
#define FMD_STAT_ALLOC 0x1 /* fmd should allocate stats memory */
|
#define FMD_STAT_ALLOC 0x1 /* fmd should allocate stats memory */
|
||||||
@ -195,12 +195,10 @@ extern size_t fmd_buf_size(fmd_hdl_t *, fmd_case_t *, const char *);
|
|||||||
extern void fmd_serd_create(fmd_hdl_t *, const char *, uint_t, hrtime_t);
|
extern void fmd_serd_create(fmd_hdl_t *, const char *, uint_t, hrtime_t);
|
||||||
extern void fmd_serd_destroy(fmd_hdl_t *, const char *);
|
extern void fmd_serd_destroy(fmd_hdl_t *, const char *);
|
||||||
extern int fmd_serd_exists(fmd_hdl_t *, const char *);
|
extern int fmd_serd_exists(fmd_hdl_t *, const char *);
|
||||||
extern int fmd_serd_active(fmd_hdl_t *, const char *);
|
|
||||||
extern void fmd_serd_reset(fmd_hdl_t *, const char *);
|
extern void fmd_serd_reset(fmd_hdl_t *, const char *);
|
||||||
extern int fmd_serd_record(fmd_hdl_t *, const char *, fmd_event_t *);
|
extern int fmd_serd_record(fmd_hdl_t *, const char *, fmd_event_t *);
|
||||||
extern int fmd_serd_fired(fmd_hdl_t *, const char *);
|
extern int fmd_serd_fired(fmd_hdl_t *, const char *);
|
||||||
extern int fmd_serd_empty(fmd_hdl_t *, const char *);
|
extern int fmd_serd_empty(fmd_hdl_t *, const char *);
|
||||||
extern void fmd_serd_gc(fmd_hdl_t *);
|
|
||||||
|
|
||||||
extern id_t fmd_timer_install(fmd_hdl_t *, void *, fmd_event_t *, hrtime_t);
|
extern id_t fmd_timer_install(fmd_hdl_t *, void *, fmd_event_t *, hrtime_t);
|
||||||
extern void fmd_timer_remove(fmd_hdl_t *, id_t);
|
extern void fmd_timer_remove(fmd_hdl_t *, id_t);
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
// SPDX-License-Identifier: CDDL-1.0
|
|
||||||
/*
|
/*
|
||||||
* CDDL HEADER START
|
* CDDL HEADER START
|
||||||
*
|
*
|
||||||
@ -311,9 +310,8 @@ fmd_serd_eng_reset(fmd_serd_eng_t *sgp)
|
|||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
fmd_serd_eng_gc(fmd_serd_eng_t *sgp, void *arg)
|
fmd_serd_eng_gc(fmd_serd_eng_t *sgp)
|
||||||
{
|
{
|
||||||
(void) arg;
|
|
||||||
fmd_serd_elem_t *sep, *nep;
|
fmd_serd_elem_t *sep, *nep;
|
||||||
hrtime_t hrt;
|
hrtime_t hrt;
|
||||||
|
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
// SPDX-License-Identifier: CDDL-1.0
|
|
||||||
/*
|
/*
|
||||||
* CDDL HEADER START
|
* CDDL HEADER START
|
||||||
*
|
*
|
||||||
@ -78,7 +77,7 @@ extern int fmd_serd_eng_fired(fmd_serd_eng_t *);
|
|||||||
extern int fmd_serd_eng_empty(fmd_serd_eng_t *);
|
extern int fmd_serd_eng_empty(fmd_serd_eng_t *);
|
||||||
|
|
||||||
extern void fmd_serd_eng_reset(fmd_serd_eng_t *);
|
extern void fmd_serd_eng_reset(fmd_serd_eng_t *);
|
||||||
extern void fmd_serd_eng_gc(fmd_serd_eng_t *, void *);
|
extern void fmd_serd_eng_gc(fmd_serd_eng_t *);
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
// SPDX-License-Identifier: CDDL-1.0
|
|
||||||
/*
|
/*
|
||||||
* CDDL HEADER START
|
* CDDL HEADER START
|
||||||
*
|
*
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
// SPDX-License-Identifier: CDDL-1.0
|
|
||||||
/*
|
/*
|
||||||
* CDDL HEADER START
|
* CDDL HEADER START
|
||||||
*
|
*
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
// SPDX-License-Identifier: CDDL-1.0
|
|
||||||
/*
|
/*
|
||||||
* CDDL HEADER START
|
* CDDL HEADER START
|
||||||
*
|
*
|
||||||
@ -24,7 +23,6 @@
|
|||||||
* Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
|
||||||
* Copyright 2015 Nexenta Systems, Inc. All rights reserved.
|
* Copyright 2015 Nexenta Systems, Inc. All rights reserved.
|
||||||
* Copyright (c) 2016, Intel Corporation.
|
* Copyright (c) 2016, Intel Corporation.
|
||||||
* Copyright (c) 2023, Klara Inc.
|
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <stddef.h>
|
#include <stddef.h>
|
||||||
@ -49,16 +47,11 @@
|
|||||||
#define DEFAULT_CHECKSUM_T 600 /* seconds */
|
#define DEFAULT_CHECKSUM_T 600 /* seconds */
|
||||||
#define DEFAULT_IO_N 10 /* events */
|
#define DEFAULT_IO_N 10 /* events */
|
||||||
#define DEFAULT_IO_T 600 /* seconds */
|
#define DEFAULT_IO_T 600 /* seconds */
|
||||||
#define DEFAULT_SLOW_IO_N 10 /* events */
|
|
||||||
#define DEFAULT_SLOW_IO_T 30 /* seconds */
|
|
||||||
|
|
||||||
#define CASE_GC_TIMEOUT_SECS 43200 /* 12 hours */
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Our serd engines are named in the following format:
|
* Our serd engines are named 'zfs_<pool_guid>_<vdev_guid>_{checksum,io}'. This
|
||||||
* 'zfs_<pool_guid>_<vdev_guid>_{checksum,io,slow_io}'
|
* #define reserves enough space for two 64-bit hex values plus the length of
|
||||||
* This #define reserves enough space for two 64-bit hex values plus the
|
* the longest string.
|
||||||
* length of the longest string.
|
|
||||||
*/
|
*/
|
||||||
#define MAX_SERDLEN (16 * 2 + sizeof ("zfs___checksum"))
|
#define MAX_SERDLEN (16 * 2 + sizeof ("zfs___checksum"))
|
||||||
|
|
||||||
@ -72,11 +65,9 @@ typedef struct zfs_case_data {
|
|||||||
uint64_t zc_ena;
|
uint64_t zc_ena;
|
||||||
uint64_t zc_pool_guid;
|
uint64_t zc_pool_guid;
|
||||||
uint64_t zc_vdev_guid;
|
uint64_t zc_vdev_guid;
|
||||||
uint64_t zc_parent_guid;
|
|
||||||
int zc_pool_state;
|
int zc_pool_state;
|
||||||
char zc_serd_checksum[MAX_SERDLEN];
|
char zc_serd_checksum[MAX_SERDLEN];
|
||||||
char zc_serd_io[MAX_SERDLEN];
|
char zc_serd_io[MAX_SERDLEN];
|
||||||
char zc_serd_slow_io[MAX_SERDLEN];
|
|
||||||
int zc_has_remove_timer;
|
int zc_has_remove_timer;
|
||||||
} zfs_case_data_t;
|
} zfs_case_data_t;
|
||||||
|
|
||||||
@ -123,8 +114,7 @@ zfs_de_stats_t zfs_stats = {
|
|||||||
{ "resource_drops", FMD_TYPE_UINT64, "resource related ereports" }
|
{ "resource_drops", FMD_TYPE_UINT64, "resource related ereports" }
|
||||||
};
|
};
|
||||||
|
|
||||||
/* wait 15 seconds after a removal */
|
static hrtime_t zfs_remove_timeout;
|
||||||
static hrtime_t zfs_remove_timeout = SEC2NSEC(15);
|
|
||||||
|
|
||||||
uu_list_pool_t *zfs_case_pool;
|
uu_list_pool_t *zfs_case_pool;
|
||||||
uu_list_t *zfs_cases;
|
uu_list_t *zfs_cases;
|
||||||
@ -134,8 +124,6 @@ uu_list_t *zfs_cases;
|
|||||||
#define ZFS_MAKE_EREPORT(type) \
|
#define ZFS_MAKE_EREPORT(type) \
|
||||||
FM_EREPORT_CLASS "." ZFS_ERROR_CLASS "." type
|
FM_EREPORT_CLASS "." ZFS_ERROR_CLASS "." type
|
||||||
|
|
||||||
static void zfs_purge_cases(fmd_hdl_t *hdl);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Write out the persistent representation of an active case.
|
* Write out the persistent representation of an active case.
|
||||||
*/
|
*/
|
||||||
@ -182,64 +170,6 @@ zfs_case_unserialize(fmd_hdl_t *hdl, fmd_case_t *cp)
|
|||||||
return (zcp);
|
return (zcp);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Return count of other unique SERD cases under same vdev parent
|
|
||||||
*/
|
|
||||||
static uint_t
|
|
||||||
zfs_other_serd_cases(fmd_hdl_t *hdl, const zfs_case_data_t *zfs_case)
|
|
||||||
{
|
|
||||||
zfs_case_t *zcp;
|
|
||||||
uint_t cases = 0;
|
|
||||||
static hrtime_t next_check = 0;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Note that plumbing in some external GC would require adding locking,
|
|
||||||
* since most of this module code is not thread safe and assumes there
|
|
||||||
* is only one thread running against the module. So we perform GC here
|
|
||||||
* inline periodically so that future delay induced faults will be
|
|
||||||
* possible once the issue causing multiple vdev delays is resolved.
|
|
||||||
*/
|
|
||||||
if (gethrestime_sec() > next_check) {
|
|
||||||
/* Periodically purge old SERD entries and stale cases */
|
|
||||||
fmd_serd_gc(hdl);
|
|
||||||
zfs_purge_cases(hdl);
|
|
||||||
next_check = gethrestime_sec() + CASE_GC_TIMEOUT_SECS;
|
|
||||||
}
|
|
||||||
|
|
||||||
for (zcp = uu_list_first(zfs_cases); zcp != NULL;
|
|
||||||
zcp = uu_list_next(zfs_cases, zcp)) {
|
|
||||||
zfs_case_data_t *zcd = &zcp->zc_data;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* must be same pool and parent vdev but different leaf vdev
|
|
||||||
*/
|
|
||||||
if (zcd->zc_pool_guid != zfs_case->zc_pool_guid ||
|
|
||||||
zcd->zc_parent_guid != zfs_case->zc_parent_guid ||
|
|
||||||
zcd->zc_vdev_guid == zfs_case->zc_vdev_guid) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Check if there is another active serd case besides zfs_case
|
|
||||||
*
|
|
||||||
* Only one serd engine will be assigned to the case
|
|
||||||
*/
|
|
||||||
if (zcd->zc_serd_checksum[0] == zfs_case->zc_serd_checksum[0] &&
|
|
||||||
fmd_serd_active(hdl, zcd->zc_serd_checksum)) {
|
|
||||||
cases++;
|
|
||||||
}
|
|
||||||
if (zcd->zc_serd_io[0] == zfs_case->zc_serd_io[0] &&
|
|
||||||
fmd_serd_active(hdl, zcd->zc_serd_io)) {
|
|
||||||
cases++;
|
|
||||||
}
|
|
||||||
if (zcd->zc_serd_slow_io[0] == zfs_case->zc_serd_slow_io[0] &&
|
|
||||||
fmd_serd_active(hdl, zcd->zc_serd_slow_io)) {
|
|
||||||
cases++;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return (cases);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Iterate over any active cases. If any cases are associated with a pool or
|
* Iterate over any active cases. If any cases are associated with a pool or
|
||||||
* vdev which is no longer present on the system, close the associated case.
|
* vdev which is no longer present on the system, close the associated case.
|
||||||
@ -446,14 +376,6 @@ zfs_serd_name(char *buf, uint64_t pool_guid, uint64_t vdev_guid,
|
|||||||
(long long unsigned int)vdev_guid, type);
|
(long long unsigned int)vdev_guid, type);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
|
||||||
zfs_case_retire(fmd_hdl_t *hdl, zfs_case_t *zcp)
|
|
||||||
{
|
|
||||||
fmd_hdl_debug(hdl, "retiring case");
|
|
||||||
|
|
||||||
fmd_case_close(hdl, zcp->zc_case);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Solve a given ZFS case. This first checks to make sure the diagnosis is
|
* Solve a given ZFS case. This first checks to make sure the diagnosis is
|
||||||
* still valid, as well as cleaning up any pending timer associated with the
|
* still valid, as well as cleaning up any pending timer associated with the
|
||||||
@ -526,34 +448,6 @@ zfs_ereport_when(fmd_hdl_t *hdl, nvlist_t *nvl, er_timeval_t *when)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Record the specified event in the SERD engine and return a
|
|
||||||
* boolean value indicating whether or not the engine fired as
|
|
||||||
* the result of inserting this event.
|
|
||||||
*
|
|
||||||
* When the pool has similar active cases on other vdevs, then
|
|
||||||
* the fired state is disregarded and the case is retired.
|
|
||||||
*/
|
|
||||||
static int
|
|
||||||
zfs_fm_serd_record(fmd_hdl_t *hdl, const char *name, fmd_event_t *ep,
|
|
||||||
zfs_case_t *zcp, const char *err_type)
|
|
||||||
{
|
|
||||||
int fired = fmd_serd_record(hdl, name, ep);
|
|
||||||
int peers = 0;
|
|
||||||
|
|
||||||
if (fired && (peers = zfs_other_serd_cases(hdl, &zcp->zc_data)) > 0) {
|
|
||||||
fmd_hdl_debug(hdl, "pool %llu is tracking %d other %s cases "
|
|
||||||
"-- skip faulting the vdev %llu",
|
|
||||||
(u_longlong_t)zcp->zc_data.zc_pool_guid,
|
|
||||||
peers, err_type,
|
|
||||||
(u_longlong_t)zcp->zc_data.zc_vdev_guid);
|
|
||||||
zfs_case_retire(hdl, zcp);
|
|
||||||
fired = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
return (fired);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Main fmd entry point.
|
* Main fmd entry point.
|
||||||
*/
|
*/
|
||||||
@ -562,7 +456,7 @@ zfs_fm_recv(fmd_hdl_t *hdl, fmd_event_t *ep, nvlist_t *nvl, const char *class)
|
|||||||
{
|
{
|
||||||
zfs_case_t *zcp, *dcp;
|
zfs_case_t *zcp, *dcp;
|
||||||
int32_t pool_state;
|
int32_t pool_state;
|
||||||
uint64_t ena, pool_guid, vdev_guid, parent_guid;
|
uint64_t ena, pool_guid, vdev_guid;
|
||||||
uint64_t checksum_n, checksum_t;
|
uint64_t checksum_n, checksum_t;
|
||||||
uint64_t io_n, io_t;
|
uint64_t io_n, io_t;
|
||||||
er_timeval_t pool_load;
|
er_timeval_t pool_load;
|
||||||
@ -652,9 +546,6 @@ zfs_fm_recv(fmd_hdl_t *hdl, fmd_event_t *ep, nvlist_t *nvl, const char *class)
|
|||||||
if (nvlist_lookup_uint64(nvl,
|
if (nvlist_lookup_uint64(nvl,
|
||||||
FM_EREPORT_PAYLOAD_ZFS_VDEV_GUID, &vdev_guid) != 0)
|
FM_EREPORT_PAYLOAD_ZFS_VDEV_GUID, &vdev_guid) != 0)
|
||||||
vdev_guid = 0;
|
vdev_guid = 0;
|
||||||
if (nvlist_lookup_uint64(nvl,
|
|
||||||
FM_EREPORT_PAYLOAD_ZFS_PARENT_GUID, &parent_guid) != 0)
|
|
||||||
parent_guid = 0;
|
|
||||||
if (nvlist_lookup_uint64(nvl, FM_EREPORT_ENA, &ena) != 0)
|
if (nvlist_lookup_uint64(nvl, FM_EREPORT_ENA, &ena) != 0)
|
||||||
ena = 0;
|
ena = 0;
|
||||||
|
|
||||||
@ -741,7 +632,9 @@ zfs_fm_recv(fmd_hdl_t *hdl, fmd_event_t *ep, nvlist_t *nvl, const char *class)
|
|||||||
if (strcmp(class,
|
if (strcmp(class,
|
||||||
ZFS_MAKE_EREPORT(FM_EREPORT_ZFS_DATA)) == 0 ||
|
ZFS_MAKE_EREPORT(FM_EREPORT_ZFS_DATA)) == 0 ||
|
||||||
strcmp(class,
|
strcmp(class,
|
||||||
ZFS_MAKE_EREPORT(FM_EREPORT_ZFS_CONFIG_CACHE_WRITE)) == 0) {
|
ZFS_MAKE_EREPORT(FM_EREPORT_ZFS_CONFIG_CACHE_WRITE)) == 0 ||
|
||||||
|
strcmp(class,
|
||||||
|
ZFS_MAKE_EREPORT(FM_EREPORT_ZFS_DELAY)) == 0) {
|
||||||
zfs_stats.resource_drops.fmds_value.ui64++;
|
zfs_stats.resource_drops.fmds_value.ui64++;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -765,7 +658,6 @@ zfs_fm_recv(fmd_hdl_t *hdl, fmd_event_t *ep, nvlist_t *nvl, const char *class)
|
|||||||
data.zc_ena = ena;
|
data.zc_ena = ena;
|
||||||
data.zc_pool_guid = pool_guid;
|
data.zc_pool_guid = pool_guid;
|
||||||
data.zc_vdev_guid = vdev_guid;
|
data.zc_vdev_guid = vdev_guid;
|
||||||
data.zc_parent_guid = parent_guid;
|
|
||||||
data.zc_pool_state = (int)pool_state;
|
data.zc_pool_state = (int)pool_state;
|
||||||
|
|
||||||
fmd_buf_write(hdl, cs, CASE_DATA, &data, sizeof (data));
|
fmd_buf_write(hdl, cs, CASE_DATA, &data, sizeof (data));
|
||||||
@ -810,9 +702,6 @@ zfs_fm_recv(fmd_hdl_t *hdl, fmd_event_t *ep, nvlist_t *nvl, const char *class)
|
|||||||
if (zcp->zc_data.zc_serd_checksum[0] != '\0')
|
if (zcp->zc_data.zc_serd_checksum[0] != '\0')
|
||||||
fmd_serd_reset(hdl,
|
fmd_serd_reset(hdl,
|
||||||
zcp->zc_data.zc_serd_checksum);
|
zcp->zc_data.zc_serd_checksum);
|
||||||
if (zcp->zc_data.zc_serd_slow_io[0] != '\0')
|
|
||||||
fmd_serd_reset(hdl,
|
|
||||||
zcp->zc_data.zc_serd_slow_io);
|
|
||||||
} else if (fmd_nvl_class_match(hdl, nvl,
|
} else if (fmd_nvl_class_match(hdl, nvl,
|
||||||
ZFS_MAKE_RSRC(FM_RESOURCE_STATECHANGE))) {
|
ZFS_MAKE_RSRC(FM_RESOURCE_STATECHANGE))) {
|
||||||
uint64_t state = 0;
|
uint64_t state = 0;
|
||||||
@ -841,11 +730,7 @@ zfs_fm_recv(fmd_hdl_t *hdl, fmd_event_t *ep, nvlist_t *nvl, const char *class)
|
|||||||
if (fmd_case_solved(hdl, zcp->zc_case))
|
if (fmd_case_solved(hdl, zcp->zc_case))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (vdev_guid)
|
fmd_hdl_debug(hdl, "error event '%s'", class);
|
||||||
fmd_hdl_debug(hdl, "error event '%s', vdev %llu", class,
|
|
||||||
vdev_guid);
|
|
||||||
else
|
|
||||||
fmd_hdl_debug(hdl, "error event '%s'", class);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Determine if we should solve the case and generate a fault. We solve
|
* Determine if we should solve the case and generate a fault. We solve
|
||||||
@ -894,12 +779,11 @@ zfs_fm_recv(fmd_hdl_t *hdl, fmd_event_t *ep, nvlist_t *nvl, const char *class)
|
|||||||
fmd_nvl_class_match(hdl, nvl,
|
fmd_nvl_class_match(hdl, nvl,
|
||||||
ZFS_MAKE_EREPORT(FM_EREPORT_ZFS_IO_FAILURE)) ||
|
ZFS_MAKE_EREPORT(FM_EREPORT_ZFS_IO_FAILURE)) ||
|
||||||
fmd_nvl_class_match(hdl, nvl,
|
fmd_nvl_class_match(hdl, nvl,
|
||||||
ZFS_MAKE_EREPORT(FM_EREPORT_ZFS_DELAY)) ||
|
|
||||||
fmd_nvl_class_match(hdl, nvl,
|
|
||||||
ZFS_MAKE_EREPORT(FM_EREPORT_ZFS_PROBE_FAILURE))) {
|
ZFS_MAKE_EREPORT(FM_EREPORT_ZFS_PROBE_FAILURE))) {
|
||||||
const char *failmode = NULL;
|
const char *failmode = NULL;
|
||||||
boolean_t checkremove = B_FALSE;
|
boolean_t checkremove = B_FALSE;
|
||||||
uint32_t pri = 0;
|
uint32_t pri = 0;
|
||||||
|
int32_t flags = 0;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If this is a checksum or I/O error, then toss it into the
|
* If this is a checksum or I/O error, then toss it into the
|
||||||
@ -928,64 +812,22 @@ zfs_fm_recv(fmd_hdl_t *hdl, fmd_event_t *ep, nvlist_t *nvl, const char *class)
|
|||||||
SEC2NSEC(io_t));
|
SEC2NSEC(io_t));
|
||||||
zfs_case_serialize(zcp);
|
zfs_case_serialize(zcp);
|
||||||
}
|
}
|
||||||
if (zfs_fm_serd_record(hdl, zcp->zc_data.zc_serd_io,
|
if (fmd_serd_record(hdl, zcp->zc_data.zc_serd_io, ep))
|
||||||
ep, zcp, "io error")) {
|
|
||||||
checkremove = B_TRUE;
|
checkremove = B_TRUE;
|
||||||
}
|
|
||||||
} else if (fmd_nvl_class_match(hdl, nvl,
|
|
||||||
ZFS_MAKE_EREPORT(FM_EREPORT_ZFS_DELAY))) {
|
|
||||||
uint64_t slow_io_n, slow_io_t;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Create a slow io SERD engine when the VDEV has the
|
|
||||||
* 'vdev_slow_io_n' and 'vdev_slow_io_n' properties.
|
|
||||||
*/
|
|
||||||
if (zcp->zc_data.zc_serd_slow_io[0] == '\0' &&
|
|
||||||
nvlist_lookup_uint64(nvl,
|
|
||||||
FM_EREPORT_PAYLOAD_ZFS_VDEV_SLOW_IO_N,
|
|
||||||
&slow_io_n) == 0 &&
|
|
||||||
nvlist_lookup_uint64(nvl,
|
|
||||||
FM_EREPORT_PAYLOAD_ZFS_VDEV_SLOW_IO_T,
|
|
||||||
&slow_io_t) == 0) {
|
|
||||||
zfs_serd_name(zcp->zc_data.zc_serd_slow_io,
|
|
||||||
pool_guid, vdev_guid, "slow_io");
|
|
||||||
fmd_serd_create(hdl,
|
|
||||||
zcp->zc_data.zc_serd_slow_io,
|
|
||||||
slow_io_n,
|
|
||||||
SEC2NSEC(slow_io_t));
|
|
||||||
zfs_case_serialize(zcp);
|
|
||||||
}
|
|
||||||
/* Pass event to SERD engine and see if this triggers */
|
|
||||||
if (zcp->zc_data.zc_serd_slow_io[0] != '\0' &&
|
|
||||||
zfs_fm_serd_record(hdl,
|
|
||||||
zcp->zc_data.zc_serd_slow_io, ep, zcp, "slow io")) {
|
|
||||||
zfs_case_solve(hdl, zcp,
|
|
||||||
"fault.fs.zfs.vdev.slow_io");
|
|
||||||
}
|
|
||||||
} else if (fmd_nvl_class_match(hdl, nvl,
|
} else if (fmd_nvl_class_match(hdl, nvl,
|
||||||
ZFS_MAKE_EREPORT(FM_EREPORT_ZFS_CHECKSUM))) {
|
ZFS_MAKE_EREPORT(FM_EREPORT_ZFS_CHECKSUM))) {
|
||||||
uint64_t flags = 0;
|
|
||||||
int32_t flags32 = 0;
|
|
||||||
/*
|
/*
|
||||||
* We ignore ereports for checksum errors generated by
|
* We ignore ereports for checksum errors generated by
|
||||||
* scrub/resilver I/O to avoid potentially further
|
* scrub/resilver I/O to avoid potentially further
|
||||||
* degrading the pool while it's being repaired.
|
* degrading the pool while it's being repaired.
|
||||||
*
|
|
||||||
* Note that FM_EREPORT_PAYLOAD_ZFS_ZIO_FLAGS used to
|
|
||||||
* be int32. To allow newer zed to work on older
|
|
||||||
* kernels, if we don't find the flags, we look for
|
|
||||||
* the older ones too.
|
|
||||||
*/
|
*/
|
||||||
if (((nvlist_lookup_uint32(nvl,
|
if (((nvlist_lookup_uint32(nvl,
|
||||||
FM_EREPORT_PAYLOAD_ZFS_ZIO_PRIORITY, &pri) == 0) &&
|
FM_EREPORT_PAYLOAD_ZFS_ZIO_PRIORITY, &pri) == 0) &&
|
||||||
(pri == ZIO_PRIORITY_SCRUB ||
|
(pri == ZIO_PRIORITY_SCRUB ||
|
||||||
pri == ZIO_PRIORITY_REBUILD)) ||
|
pri == ZIO_PRIORITY_REBUILD)) ||
|
||||||
((nvlist_lookup_uint64(nvl,
|
|
||||||
FM_EREPORT_PAYLOAD_ZFS_ZIO_FLAGS, &flags) == 0) &&
|
|
||||||
(flags & (ZIO_FLAG_SCRUB | ZIO_FLAG_RESILVER))) ||
|
|
||||||
((nvlist_lookup_int32(nvl,
|
((nvlist_lookup_int32(nvl,
|
||||||
FM_EREPORT_PAYLOAD_ZFS_ZIO_FLAGS, &flags32) == 0) &&
|
FM_EREPORT_PAYLOAD_ZFS_ZIO_FLAGS, &flags) == 0) &&
|
||||||
(flags32 & (ZIO_FLAG_SCRUB | ZIO_FLAG_RESILVER)))) {
|
(flags & (ZIO_FLAG_SCRUB | ZIO_FLAG_RESILVER)))) {
|
||||||
fmd_hdl_debug(hdl, "ignoring '%s' for "
|
fmd_hdl_debug(hdl, "ignoring '%s' for "
|
||||||
"scrub/resilver I/O", class);
|
"scrub/resilver I/O", class);
|
||||||
return;
|
return;
|
||||||
@ -1011,9 +853,8 @@ zfs_fm_recv(fmd_hdl_t *hdl, fmd_event_t *ep, nvlist_t *nvl, const char *class)
|
|||||||
SEC2NSEC(checksum_t));
|
SEC2NSEC(checksum_t));
|
||||||
zfs_case_serialize(zcp);
|
zfs_case_serialize(zcp);
|
||||||
}
|
}
|
||||||
if (zfs_fm_serd_record(hdl,
|
if (fmd_serd_record(hdl,
|
||||||
zcp->zc_data.zc_serd_checksum, ep, zcp,
|
zcp->zc_data.zc_serd_checksum, ep)) {
|
||||||
"checksum")) {
|
|
||||||
zfs_case_solve(hdl, zcp,
|
zfs_case_solve(hdl, zcp,
|
||||||
"fault.fs.zfs.vdev.checksum");
|
"fault.fs.zfs.vdev.checksum");
|
||||||
}
|
}
|
||||||
@ -1083,8 +924,6 @@ zfs_fm_close(fmd_hdl_t *hdl, fmd_case_t *cs)
|
|||||||
fmd_serd_destroy(hdl, zcp->zc_data.zc_serd_checksum);
|
fmd_serd_destroy(hdl, zcp->zc_data.zc_serd_checksum);
|
||||||
if (zcp->zc_data.zc_serd_io[0] != '\0')
|
if (zcp->zc_data.zc_serd_io[0] != '\0')
|
||||||
fmd_serd_destroy(hdl, zcp->zc_data.zc_serd_io);
|
fmd_serd_destroy(hdl, zcp->zc_data.zc_serd_io);
|
||||||
if (zcp->zc_data.zc_serd_slow_io[0] != '\0')
|
|
||||||
fmd_serd_destroy(hdl, zcp->zc_data.zc_serd_slow_io);
|
|
||||||
if (zcp->zc_data.zc_has_remove_timer)
|
if (zcp->zc_data.zc_has_remove_timer)
|
||||||
fmd_timer_remove(hdl, zcp->zc_remove_timer);
|
fmd_timer_remove(hdl, zcp->zc_remove_timer);
|
||||||
|
|
||||||
@ -1093,15 +932,30 @@ zfs_fm_close(fmd_hdl_t *hdl, fmd_case_t *cs)
|
|||||||
fmd_hdl_free(hdl, zcp, sizeof (zfs_case_t));
|
fmd_hdl_free(hdl, zcp, sizeof (zfs_case_t));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We use the fmd gc entry point to look for old cases that no longer apply.
|
||||||
|
* This allows us to keep our set of case data small in a long running system.
|
||||||
|
*/
|
||||||
|
static void
|
||||||
|
zfs_fm_gc(fmd_hdl_t *hdl)
|
||||||
|
{
|
||||||
|
zfs_purge_cases(hdl);
|
||||||
|
}
|
||||||
|
|
||||||
static const fmd_hdl_ops_t fmd_ops = {
|
static const fmd_hdl_ops_t fmd_ops = {
|
||||||
zfs_fm_recv, /* fmdo_recv */
|
zfs_fm_recv, /* fmdo_recv */
|
||||||
zfs_fm_timeout, /* fmdo_timeout */
|
zfs_fm_timeout, /* fmdo_timeout */
|
||||||
zfs_fm_close, /* fmdo_close */
|
zfs_fm_close, /* fmdo_close */
|
||||||
NULL, /* fmdo_stats */
|
NULL, /* fmdo_stats */
|
||||||
NULL, /* fmdo_gc */
|
zfs_fm_gc, /* fmdo_gc */
|
||||||
};
|
};
|
||||||
|
|
||||||
static const fmd_prop_t fmd_props[] = {
|
static const fmd_prop_t fmd_props[] = {
|
||||||
|
{ "checksum_N", FMD_TYPE_UINT32, "10" },
|
||||||
|
{ "checksum_T", FMD_TYPE_TIME, "10min" },
|
||||||
|
{ "io_N", FMD_TYPE_UINT32, "10" },
|
||||||
|
{ "io_T", FMD_TYPE_TIME, "10min" },
|
||||||
|
{ "remove_timeout", FMD_TYPE_TIME, "15sec" },
|
||||||
{ NULL, 0, NULL }
|
{ NULL, 0, NULL }
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -1142,6 +996,8 @@ _zfs_diagnosis_init(fmd_hdl_t *hdl)
|
|||||||
|
|
||||||
(void) fmd_stat_create(hdl, FMD_STAT_NOALLOC, sizeof (zfs_stats) /
|
(void) fmd_stat_create(hdl, FMD_STAT_NOALLOC, sizeof (zfs_stats) /
|
||||||
sizeof (fmd_stat_t), (fmd_stat_t *)&zfs_stats);
|
sizeof (fmd_stat_t), (fmd_stat_t *)&zfs_stats);
|
||||||
|
|
||||||
|
zfs_remove_timeout = fmd_prop_get_int64(hdl, "remove_timeout");
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
// SPDX-License-Identifier: CDDL-1.0
|
|
||||||
/*
|
/*
|
||||||
* CDDL HEADER START
|
* CDDL HEADER START
|
||||||
*
|
*
|
||||||
@ -215,7 +214,6 @@ zfs_process_add(zpool_handle_t *zhp, nvlist_t *vdev, boolean_t labeled)
|
|||||||
vdev_stat_t *vs;
|
vdev_stat_t *vs;
|
||||||
char **lines = NULL;
|
char **lines = NULL;
|
||||||
int lines_cnt = 0;
|
int lines_cnt = 0;
|
||||||
int rc;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Get the persistent path, typically under the '/dev/disk/by-id' or
|
* Get the persistent path, typically under the '/dev/disk/by-id' or
|
||||||
@ -235,12 +233,8 @@ zfs_process_add(zpool_handle_t *zhp, nvlist_t *vdev, boolean_t labeled)
|
|||||||
}
|
}
|
||||||
|
|
||||||
(void) nvlist_lookup_string(vdev, ZPOOL_CONFIG_PHYS_PATH, &physpath);
|
(void) nvlist_lookup_string(vdev, ZPOOL_CONFIG_PHYS_PATH, &physpath);
|
||||||
|
|
||||||
update_vdev_config_dev_sysfs_path(vdev, path,
|
|
||||||
ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH);
|
|
||||||
(void) nvlist_lookup_string(vdev, ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH,
|
(void) nvlist_lookup_string(vdev, ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH,
|
||||||
&enc_sysfs_path);
|
&enc_sysfs_path);
|
||||||
|
|
||||||
(void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_WHOLE_DISK, &wholedisk);
|
(void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_WHOLE_DISK, &wholedisk);
|
||||||
(void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_OFFLINE, &offline);
|
(void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_OFFLINE, &offline);
|
||||||
(void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_FAULTED, &faulted);
|
(void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_FAULTED, &faulted);
|
||||||
@ -407,17 +401,17 @@ zfs_process_add(zpool_handle_t *zhp, nvlist_t *vdev, boolean_t labeled)
|
|||||||
}
|
}
|
||||||
|
|
||||||
nvlist_lookup_string(vdev, "new_devid", &new_devid);
|
nvlist_lookup_string(vdev, "new_devid", &new_devid);
|
||||||
|
|
||||||
if (is_mpath_wholedisk) {
|
if (is_mpath_wholedisk) {
|
||||||
/* Don't label device mapper or multipath disks. */
|
/* Don't label device mapper or multipath disks. */
|
||||||
zed_log_msg(LOG_INFO,
|
zed_log_msg(LOG_INFO,
|
||||||
" it's a multipath wholedisk, don't label");
|
" it's a multipath wholedisk, don't label");
|
||||||
rc = zpool_prepare_disk(zhp, vdev, "autoreplace", &lines,
|
if (zpool_prepare_disk(zhp, vdev, "autoreplace", &lines,
|
||||||
&lines_cnt);
|
&lines_cnt) != 0) {
|
||||||
if (rc != 0) {
|
|
||||||
zed_log_msg(LOG_INFO,
|
zed_log_msg(LOG_INFO,
|
||||||
" zpool_prepare_disk: could not "
|
" zpool_prepare_disk: could not "
|
||||||
"prepare '%s' (%s), path '%s', rc = %d", fullpath,
|
"prepare '%s' (%s)", fullpath,
|
||||||
libzfs_error_description(g_zfshdl), path, rc);
|
libzfs_error_description(g_zfshdl));
|
||||||
if (lines_cnt > 0) {
|
if (lines_cnt > 0) {
|
||||||
zed_log_msg(LOG_INFO,
|
zed_log_msg(LOG_INFO,
|
||||||
" zfs_prepare_disk output:");
|
" zfs_prepare_disk output:");
|
||||||
@ -448,13 +442,12 @@ zfs_process_add(zpool_handle_t *zhp, nvlist_t *vdev, boolean_t labeled)
|
|||||||
* If this is a request to label a whole disk, then attempt to
|
* If this is a request to label a whole disk, then attempt to
|
||||||
* write out the label.
|
* write out the label.
|
||||||
*/
|
*/
|
||||||
rc = zpool_prepare_and_label_disk(g_zfshdl, zhp, leafname,
|
if (zpool_prepare_and_label_disk(g_zfshdl, zhp, leafname,
|
||||||
vdev, "autoreplace", &lines, &lines_cnt);
|
vdev, "autoreplace", &lines, &lines_cnt) != 0) {
|
||||||
if (rc != 0) {
|
|
||||||
zed_log_msg(LOG_WARNING,
|
zed_log_msg(LOG_WARNING,
|
||||||
" zpool_prepare_and_label_disk: could not "
|
" zpool_prepare_and_label_disk: could not "
|
||||||
"label '%s' (%s), rc = %d", leafname,
|
"label '%s' (%s)", leafname,
|
||||||
libzfs_error_description(g_zfshdl), rc);
|
libzfs_error_description(g_zfshdl));
|
||||||
if (lines_cnt > 0) {
|
if (lines_cnt > 0) {
|
||||||
zed_log_msg(LOG_INFO,
|
zed_log_msg(LOG_INFO,
|
||||||
" zfs_prepare_disk output:");
|
" zfs_prepare_disk output:");
|
||||||
@ -705,7 +698,7 @@ zfs_enable_ds(void *arg)
|
|||||||
{
|
{
|
||||||
unavailpool_t *pool = (unavailpool_t *)arg;
|
unavailpool_t *pool = (unavailpool_t *)arg;
|
||||||
|
|
||||||
(void) zpool_enable_datasets(pool->uap_zhp, NULL, 0, 512);
|
(void) zpool_enable_datasets(pool->uap_zhp, NULL, 0);
|
||||||
zpool_close(pool->uap_zhp);
|
zpool_close(pool->uap_zhp);
|
||||||
free(pool);
|
free(pool);
|
||||||
}
|
}
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
// SPDX-License-Identifier: CDDL-1.0
|
|
||||||
/*
|
/*
|
||||||
* CDDL HEADER START
|
* CDDL HEADER START
|
||||||
*
|
*
|
||||||
@ -404,7 +403,6 @@ zfs_retire_recv(fmd_hdl_t *hdl, fmd_event_t *ep, nvlist_t *nvl,
|
|||||||
(state == VDEV_STATE_REMOVED || state == VDEV_STATE_FAULTED))) {
|
(state == VDEV_STATE_REMOVED || state == VDEV_STATE_FAULTED))) {
|
||||||
const char *devtype;
|
const char *devtype;
|
||||||
char *devname;
|
char *devname;
|
||||||
boolean_t skip_removal = B_FALSE;
|
|
||||||
|
|
||||||
if (nvlist_lookup_string(nvl, FM_EREPORT_PAYLOAD_ZFS_VDEV_TYPE,
|
if (nvlist_lookup_string(nvl, FM_EREPORT_PAYLOAD_ZFS_VDEV_TYPE,
|
||||||
&devtype) == 0) {
|
&devtype) == 0) {
|
||||||
@ -442,28 +440,18 @@ zfs_retire_recv(fmd_hdl_t *hdl, fmd_event_t *ep, nvlist_t *nvl,
|
|||||||
nvlist_lookup_uint64_array(vdev, ZPOOL_CONFIG_VDEV_STATS,
|
nvlist_lookup_uint64_array(vdev, ZPOOL_CONFIG_VDEV_STATS,
|
||||||
(uint64_t **)&vs, &c);
|
(uint64_t **)&vs, &c);
|
||||||
|
|
||||||
if (vs->vs_state == VDEV_STATE_OFFLINE)
|
|
||||||
return;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If state removed is requested for already removed vdev,
|
* If state removed is requested for already removed vdev,
|
||||||
* its a loopback event from spa_async_remove(). Just
|
* its a loopback event from spa_async_remove(). Just
|
||||||
* ignore it.
|
* ignore it.
|
||||||
*/
|
*/
|
||||||
if ((vs->vs_state == VDEV_STATE_REMOVED &&
|
if (vs->vs_state == VDEV_STATE_REMOVED &&
|
||||||
state == VDEV_STATE_REMOVED)) {
|
state == VDEV_STATE_REMOVED)
|
||||||
if (strcmp(class, "resource.fs.zfs.removed") == 0 &&
|
return;
|
||||||
nvlist_exists(nvl, "by_kernel")) {
|
|
||||||
skip_removal = B_TRUE;
|
|
||||||
} else {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Remove the vdev since device is unplugged */
|
/* Remove the vdev since device is unplugged */
|
||||||
int remove_status = 0;
|
int remove_status = 0;
|
||||||
if (!skip_removal && (l2arc ||
|
if (l2arc || (strcmp(class, "resource.fs.zfs.removed") == 0)) {
|
||||||
(strcmp(class, "resource.fs.zfs.removed") == 0))) {
|
|
||||||
remove_status = zpool_vdev_remove_wanted(zhp, devname);
|
remove_status = zpool_vdev_remove_wanted(zhp, devname);
|
||||||
fmd_hdl_debug(hdl, "zpool_vdev_remove_wanted '%s'"
|
fmd_hdl_debug(hdl, "zpool_vdev_remove_wanted '%s'"
|
||||||
", err:%d", devname, libzfs_errno(zhdl));
|
", err:%d", devname, libzfs_errno(zhdl));
|
||||||
@ -535,9 +523,6 @@ zfs_retire_recv(fmd_hdl_t *hdl, fmd_event_t *ep, nvlist_t *nvl,
|
|||||||
} else if (fmd_nvl_class_match(hdl, fault,
|
} else if (fmd_nvl_class_match(hdl, fault,
|
||||||
"fault.fs.zfs.vdev.checksum")) {
|
"fault.fs.zfs.vdev.checksum")) {
|
||||||
degrade_device = B_TRUE;
|
degrade_device = B_TRUE;
|
||||||
} else if (fmd_nvl_class_match(hdl, fault,
|
|
||||||
"fault.fs.zfs.vdev.slow_io")) {
|
|
||||||
degrade_device = B_TRUE;
|
|
||||||
} else if (fmd_nvl_class_match(hdl, fault,
|
} else if (fmd_nvl_class_match(hdl, fault,
|
||||||
"fault.fs.zfs.device")) {
|
"fault.fs.zfs.device")) {
|
||||||
fault_device = B_FALSE;
|
fault_device = B_FALSE;
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
// SPDX-License-Identifier: CDDL-1.0
|
|
||||||
/*
|
/*
|
||||||
* This file is part of the ZFS Event Daemon (ZED).
|
* This file is part of the ZFS Event Daemon (ZED).
|
||||||
*
|
*
|
||||||
|
@ -9,7 +9,6 @@ dist_zedexec_SCRIPTS = \
|
|||||||
%D%/all-debug.sh \
|
%D%/all-debug.sh \
|
||||||
%D%/all-syslog.sh \
|
%D%/all-syslog.sh \
|
||||||
%D%/data-notify.sh \
|
%D%/data-notify.sh \
|
||||||
%D%/deadman-slot_off.sh \
|
|
||||||
%D%/generic-notify.sh \
|
%D%/generic-notify.sh \
|
||||||
%D%/pool_import-led.sh \
|
%D%/pool_import-led.sh \
|
||||||
%D%/resilver_finish-notify.sh \
|
%D%/resilver_finish-notify.sh \
|
||||||
@ -30,7 +29,6 @@ SUBSTFILES += $(nodist_zedexec_SCRIPTS)
|
|||||||
zedconfdefaults = \
|
zedconfdefaults = \
|
||||||
all-syslog.sh \
|
all-syslog.sh \
|
||||||
data-notify.sh \
|
data-notify.sh \
|
||||||
deadman-slot_off.sh \
|
|
||||||
history_event-zfs-list-cacher.sh \
|
history_event-zfs-list-cacher.sh \
|
||||||
pool_import-led.sh \
|
pool_import-led.sh \
|
||||||
resilver_finish-notify.sh \
|
resilver_finish-notify.sh \
|
||||||
|
@ -1,71 +0,0 @@
|
|||||||
#!/bin/sh
|
|
||||||
# shellcheck disable=SC3014,SC2154,SC2086,SC2034
|
|
||||||
#
|
|
||||||
# Turn off disk's enclosure slot if an I/O is hung triggering the deadman.
|
|
||||||
#
|
|
||||||
# It's possible for outstanding I/O to a misbehaving SCSI disk to neither
|
|
||||||
# promptly complete or return an error. This can occur due to retry and
|
|
||||||
# recovery actions taken by the SCSI layer, driver, or disk. When it occurs
|
|
||||||
# the pool will be unresponsive even though there may be sufficient redundancy
|
|
||||||
# configured to proceeded without this single disk.
|
|
||||||
#
|
|
||||||
# When a hung I/O is detected by the kmods it will be posted as a deadman
|
|
||||||
# event. By default an I/O is considered to be hung after 5 minutes. This
|
|
||||||
# value can be changed with the zfs_deadman_ziotime_ms module parameter.
|
|
||||||
# If ZED_POWER_OFF_ENCLOSURE_SLOT_ON_DEADMAN is set the disk's enclosure
|
|
||||||
# slot will be powered off causing the outstanding I/O to fail. The ZED
|
|
||||||
# will then handle this like a normal disk failure and FAULT the vdev.
|
|
||||||
#
|
|
||||||
# We assume the user will be responsible for turning the slot back on
|
|
||||||
# after replacing the disk.
|
|
||||||
#
|
|
||||||
# Note that this script requires that your enclosure be supported by the
|
|
||||||
# Linux SCSI Enclosure services (SES) driver. The script will do nothing
|
|
||||||
# if you have no enclosure, or if your enclosure isn't supported.
|
|
||||||
#
|
|
||||||
# Exit codes:
|
|
||||||
# 0: slot successfully powered off
|
|
||||||
# 1: enclosure not available
|
|
||||||
# 2: ZED_POWER_OFF_ENCLOSURE_SLOT_ON_DEADMAN disabled
|
|
||||||
# 3: System not configured to wait on deadman
|
|
||||||
# 4: The enclosure sysfs path passed from ZFS does not exist
|
|
||||||
# 5: Enclosure slot didn't actually turn off after we told it to
|
|
||||||
|
|
||||||
[ -f "${ZED_ZEDLET_DIR}/zed.rc" ] && . "${ZED_ZEDLET_DIR}/zed.rc"
|
|
||||||
. "${ZED_ZEDLET_DIR}/zed-functions.sh"
|
|
||||||
|
|
||||||
if [ ! -d /sys/class/enclosure ] ; then
|
|
||||||
# No JBOD enclosure or NVMe slots
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ "${ZED_POWER_OFF_ENCLOSURE_SLOT_ON_DEADMAN}" != "1" ] ; then
|
|
||||||
exit 2
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ "$ZEVENT_POOL_FAILMODE" != "wait" ] ; then
|
|
||||||
exit 3
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ ! -f "$ZEVENT_VDEV_ENC_SYSFS_PATH/power_status" ] ; then
|
|
||||||
exit 4
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Turn off the slot and wait for sysfs to report that the slot is off.
|
|
||||||
# It can take ~400ms on some enclosures and multiple retries may be needed.
|
|
||||||
for i in $(seq 1 20) ; do
|
|
||||||
echo "off" | tee "$ZEVENT_VDEV_ENC_SYSFS_PATH/power_status"
|
|
||||||
|
|
||||||
for j in $(seq 1 5) ; do
|
|
||||||
if [ "$(cat $ZEVENT_VDEV_ENC_SYSFS_PATH/power_status)" == "off" ] ; then
|
|
||||||
break 2
|
|
||||||
fi
|
|
||||||
sleep 0.1
|
|
||||||
done
|
|
||||||
done
|
|
||||||
|
|
||||||
if [ "$(cat $ZEVENT_VDEV_ENC_SYSFS_PATH/power_status)" != "off" ] ; then
|
|
||||||
exit 5
|
|
||||||
fi
|
|
||||||
|
|
||||||
zed_log_msg "powered down slot $ZEVENT_VDEV_ENC_SYSFS_PATH for $ZEVENT_VDEV_PATH"
|
|
@ -1,5 +1,4 @@
|
|||||||
#!/bin/sh
|
#!/bin/sh
|
||||||
# SPDX-License-Identifier: CDDL-1.0
|
|
||||||
# shellcheck disable=SC2154
|
# shellcheck disable=SC2154
|
||||||
#
|
#
|
||||||
# CDDL HEADER START
|
# CDDL HEADER START
|
||||||
|
@ -5,7 +5,7 @@
|
|||||||
#
|
#
|
||||||
# Bad SCSI disks can often "disappear and reappear" causing all sorts of chaos
|
# Bad SCSI disks can often "disappear and reappear" causing all sorts of chaos
|
||||||
# as they flip between FAULTED and ONLINE. If
|
# as they flip between FAULTED and ONLINE. If
|
||||||
# ZED_POWER_OFF_ENCLOSURE_SLOT_ON_FAULT is set in zed.rc, and the disk gets
|
# ZED_POWER_OFF_ENCLOUSRE_SLOT_ON_FAULT is set in zed.rc, and the disk gets
|
||||||
# FAULTED, then power down the slot via sysfs:
|
# FAULTED, then power down the slot via sysfs:
|
||||||
#
|
#
|
||||||
# /sys/class/enclosure/<enclosure>/<slot>/power_status
|
# /sys/class/enclosure/<enclosure>/<slot>/power_status
|
||||||
@ -19,7 +19,7 @@
|
|||||||
# Exit codes:
|
# Exit codes:
|
||||||
# 0: slot successfully powered off
|
# 0: slot successfully powered off
|
||||||
# 1: enclosure not available
|
# 1: enclosure not available
|
||||||
# 2: ZED_POWER_OFF_ENCLOSURE_SLOT_ON_FAULT disabled
|
# 2: ZED_POWER_OFF_ENCLOUSRE_SLOT_ON_FAULT disabled
|
||||||
# 3: vdev was not FAULTED
|
# 3: vdev was not FAULTED
|
||||||
# 4: The enclosure sysfs path passed from ZFS does not exist
|
# 4: The enclosure sysfs path passed from ZFS does not exist
|
||||||
# 5: Enclosure slot didn't actually turn off after we told it to
|
# 5: Enclosure slot didn't actually turn off after we told it to
|
||||||
@ -32,7 +32,7 @@ if [ ! -d /sys/class/enclosure ] ; then
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ "${ZED_POWER_OFF_ENCLOSURE_SLOT_ON_FAULT}" != "1" ] ; then
|
if [ "${ZED_POWER_OFF_ENCLOUSRE_SLOT_ON_FAULT}" != "1" ] ; then
|
||||||
exit 2
|
exit 2
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
@ -205,14 +205,6 @@ zed_notify()
|
|||||||
[ "${rv}" -eq 0 ] && num_success=$((num_success + 1))
|
[ "${rv}" -eq 0 ] && num_success=$((num_success + 1))
|
||||||
[ "${rv}" -eq 1 ] && num_failure=$((num_failure + 1))
|
[ "${rv}" -eq 1 ] && num_failure=$((num_failure + 1))
|
||||||
|
|
||||||
zed_notify_ntfy "${subject}" "${pathname}"; rv=$?
|
|
||||||
[ "${rv}" -eq 0 ] && num_success=$((num_success + 1))
|
|
||||||
[ "${rv}" -eq 1 ] && num_failure=$((num_failure + 1))
|
|
||||||
|
|
||||||
zed_notify_gotify "${subject}" "${pathname}"; rv=$?
|
|
||||||
[ "${rv}" -eq 0 ] && num_success=$((num_success + 1))
|
|
||||||
[ "${rv}" -eq 1 ] && num_failure=$((num_failure + 1))
|
|
||||||
|
|
||||||
[ "${num_success}" -gt 0 ] && return 0
|
[ "${num_success}" -gt 0 ] && return 0
|
||||||
[ "${num_failure}" -gt 0 ] && return 1
|
[ "${num_failure}" -gt 0 ] && return 1
|
||||||
return 2
|
return 2
|
||||||
@ -535,191 +527,6 @@ zed_notify_pushover()
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
# zed_notify_ntfy (subject, pathname)
|
|
||||||
#
|
|
||||||
# Send a notification via Ntfy.sh <https://ntfy.sh/>.
|
|
||||||
# The ntfy topic (ZED_NTFY_TOPIC) identifies the topic that the notification
|
|
||||||
# will be sent to Ntfy.sh server. The ntfy url (ZED_NTFY_URL) defines the
|
|
||||||
# self-hosted or provided hosted ntfy service location. The ntfy access token
|
|
||||||
# <https://docs.ntfy.sh/publish/#access-tokens> (ZED_NTFY_ACCESS_TOKEN) reprsents an
|
|
||||||
# access token that could be used if a topic is read/write protected. If a
|
|
||||||
# topic can be written to publicaly, a ZED_NTFY_ACCESS_TOKEN is not required.
|
|
||||||
#
|
|
||||||
# Requires curl and sed executables to be installed in the standard PATH.
|
|
||||||
#
|
|
||||||
# References
|
|
||||||
# https://docs.ntfy.sh
|
|
||||||
#
|
|
||||||
# Arguments
|
|
||||||
# subject: notification subject
|
|
||||||
# pathname: pathname containing the notification message (OPTIONAL)
|
|
||||||
#
|
|
||||||
# Globals
|
|
||||||
# ZED_NTFY_TOPIC
|
|
||||||
# ZED_NTFY_ACCESS_TOKEN (OPTIONAL)
|
|
||||||
# ZED_NTFY_URL
|
|
||||||
#
|
|
||||||
# Return
|
|
||||||
# 0: notification sent
|
|
||||||
# 1: notification failed
|
|
||||||
# 2: not configured
|
|
||||||
#
|
|
||||||
zed_notify_ntfy()
|
|
||||||
{
|
|
||||||
local subject="$1"
|
|
||||||
local pathname="${2:-"/dev/null"}"
|
|
||||||
local msg_body
|
|
||||||
local msg_out
|
|
||||||
local msg_err
|
|
||||||
|
|
||||||
[ -n "${ZED_NTFY_TOPIC}" ] || return 2
|
|
||||||
local url="${ZED_NTFY_URL:-"https://ntfy.sh"}/${ZED_NTFY_TOPIC}"
|
|
||||||
|
|
||||||
if [ ! -r "${pathname}" ]; then
|
|
||||||
zed_log_err "ntfy cannot read \"${pathname}\""
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
zed_check_cmd "curl" "sed" || return 1
|
|
||||||
|
|
||||||
# Read the message body in.
|
|
||||||
#
|
|
||||||
msg_body="$(cat "${pathname}")"
|
|
||||||
|
|
||||||
if [ -z "${msg_body}" ]
|
|
||||||
then
|
|
||||||
msg_body=$subject
|
|
||||||
subject=""
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Send the POST request and check for errors.
|
|
||||||
#
|
|
||||||
if [ -n "${ZED_NTFY_ACCESS_TOKEN}" ]; then
|
|
||||||
msg_out="$( \
|
|
||||||
curl \
|
|
||||||
-u ":${ZED_NTFY_ACCESS_TOKEN}" \
|
|
||||||
-H "Title: ${subject}" \
|
|
||||||
-d "${msg_body}" \
|
|
||||||
-H "Priority: high" \
|
|
||||||
"${url}" \
|
|
||||||
2>/dev/null \
|
|
||||||
)"; rv=$?
|
|
||||||
else
|
|
||||||
msg_out="$( \
|
|
||||||
curl \
|
|
||||||
-H "Title: ${subject}" \
|
|
||||||
-d "${msg_body}" \
|
|
||||||
-H "Priority: high" \
|
|
||||||
"${url}" \
|
|
||||||
2>/dev/null \
|
|
||||||
)"; rv=$?
|
|
||||||
fi
|
|
||||||
if [ "${rv}" -ne 0 ]; then
|
|
||||||
zed_log_err "curl exit=${rv}"
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
msg_err="$(echo "${msg_out}" \
|
|
||||||
| sed -n -e 's/.*"errors" *:.*\[\(.*\)\].*/\1/p')"
|
|
||||||
if [ -n "${msg_err}" ]; then
|
|
||||||
zed_log_err "ntfy \"${msg_err}"\"
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
# zed_notify_gotify (subject, pathname)
|
|
||||||
#
|
|
||||||
# Send a notification via Gotify <https://gotify.net/>.
|
|
||||||
# The Gotify URL (ZED_GOTIFY_URL) defines a self-hosted Gotify location.
|
|
||||||
# The Gotify application token (ZED_GOTIFY_APPTOKEN) defines a
|
|
||||||
# Gotify application token which is associated with a message.
|
|
||||||
# The optional Gotify priority value (ZED_GOTIFY_PRIORITY) overrides the
|
|
||||||
# default or configured priority at the Gotify server for the application.
|
|
||||||
#
|
|
||||||
# Requires curl and sed executables to be installed in the standard PATH.
|
|
||||||
#
|
|
||||||
# References
|
|
||||||
# https://gotify.net/docs/index
|
|
||||||
#
|
|
||||||
# Arguments
|
|
||||||
# subject: notification subject
|
|
||||||
# pathname: pathname containing the notification message (OPTIONAL)
|
|
||||||
#
|
|
||||||
# Globals
|
|
||||||
# ZED_GOTIFY_URL
|
|
||||||
# ZED_GOTIFY_APPTOKEN
|
|
||||||
# ZED_GOTIFY_PRIORITY
|
|
||||||
#
|
|
||||||
# Return
|
|
||||||
# 0: notification sent
|
|
||||||
# 1: notification failed
|
|
||||||
# 2: not configured
|
|
||||||
#
|
|
||||||
zed_notify_gotify()
|
|
||||||
{
|
|
||||||
local subject="$1"
|
|
||||||
local pathname="${2:-"/dev/null"}"
|
|
||||||
local msg_body
|
|
||||||
local msg_out
|
|
||||||
local msg_err
|
|
||||||
|
|
||||||
[ -n "${ZED_GOTIFY_URL}" ] && [ -n "${ZED_GOTIFY_APPTOKEN}" ] || return 2
|
|
||||||
local url="${ZED_GOTIFY_URL}/message?token=${ZED_GOTIFY_APPTOKEN}"
|
|
||||||
|
|
||||||
if [ ! -r "${pathname}" ]; then
|
|
||||||
zed_log_err "gotify cannot read \"${pathname}\""
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
zed_check_cmd "curl" "sed" || return 1
|
|
||||||
|
|
||||||
# Read the message body in.
|
|
||||||
#
|
|
||||||
msg_body="$(cat "${pathname}")"
|
|
||||||
|
|
||||||
if [ -z "${msg_body}" ]
|
|
||||||
then
|
|
||||||
msg_body=$subject
|
|
||||||
subject=""
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Send the POST request and check for errors.
|
|
||||||
#
|
|
||||||
if [ -n "${ZED_GOTIFY_PRIORITY}" ]; then
|
|
||||||
msg_out="$( \
|
|
||||||
curl \
|
|
||||||
--form-string "title=${subject}" \
|
|
||||||
--form-string "message=${msg_body}" \
|
|
||||||
--form-string "priority=${ZED_GOTIFY_PRIORITY}" \
|
|
||||||
"${url}" \
|
|
||||||
2>/dev/null \
|
|
||||||
)"; rv=$?
|
|
||||||
else
|
|
||||||
msg_out="$( \
|
|
||||||
curl \
|
|
||||||
--form-string "title=${subject}" \
|
|
||||||
--form-string "message=${msg_body}" \
|
|
||||||
"${url}" \
|
|
||||||
2>/dev/null \
|
|
||||||
)"; rv=$?
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ "${rv}" -ne 0 ]; then
|
|
||||||
zed_log_err "curl exit=${rv}"
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
msg_err="$(echo "${msg_out}" \
|
|
||||||
| sed -n -e 's/.*"errors" *:.*\[\(.*\)\].*/\1/p')"
|
|
||||||
if [ -n "${msg_err}" ]; then
|
|
||||||
zed_log_err "gotify \"${msg_err}"\"
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# zed_rate_limit (tag, [interval])
|
# zed_rate_limit (tag, [interval])
|
||||||
#
|
#
|
||||||
# Check whether an event of a given type [tag] has already occurred within the
|
# Check whether an event of a given type [tag] has already occurred within the
|
||||||
|
@ -146,54 +146,4 @@ ZED_SYSLOG_SUBCLASS_EXCLUDE="history_event"
|
|||||||
# Power off the drive's slot in the enclosure if it becomes FAULTED. This can
|
# Power off the drive's slot in the enclosure if it becomes FAULTED. This can
|
||||||
# help silence misbehaving drives. This assumes your drive enclosure fully
|
# help silence misbehaving drives. This assumes your drive enclosure fully
|
||||||
# supports slot power control via sysfs.
|
# supports slot power control via sysfs.
|
||||||
#ZED_POWER_OFF_ENCLOSURE_SLOT_ON_FAULT=1
|
#ZED_POWER_OFF_ENCLOUSRE_SLOT_ON_FAULT=1
|
||||||
|
|
||||||
##
|
|
||||||
# Power off the drive's slot in the enclosure if there is a hung I/O which
|
|
||||||
# exceeds the deadman timeout. This can help prevent a single misbehaving
|
|
||||||
# drive from rendering a redundant pool unavailable. This assumes your drive
|
|
||||||
# enclosure fully supports slot power control via sysfs.
|
|
||||||
#ZED_POWER_OFF_ENCLOSURE_SLOT_ON_DEADMAN=1
|
|
||||||
|
|
||||||
##
|
|
||||||
# Ntfy topic
|
|
||||||
# This defines which topic will receive the ntfy notification.
|
|
||||||
# <https://docs.ntfy.sh/publish/>
|
|
||||||
# Disabled by default; uncomment to enable.
|
|
||||||
#ZED_NTFY_TOPIC=""
|
|
||||||
|
|
||||||
##
|
|
||||||
# Ntfy access token (optional for public topics)
|
|
||||||
# This defines an access token which can be used
|
|
||||||
# to allow you to authenticate when sending to topics
|
|
||||||
# <https://docs.ntfy.sh/publish/#access-tokens>
|
|
||||||
# Disabled by default; uncomment to enable.
|
|
||||||
#ZED_NTFY_ACCESS_TOKEN=""
|
|
||||||
|
|
||||||
##
|
|
||||||
# Ntfy Service URL
|
|
||||||
# This defines which service the ntfy call will be directed toward
|
|
||||||
# <https://docs.ntfy.sh/install/>
|
|
||||||
# https://ntfy.sh by default; uncomment to enable an alternative service url.
|
|
||||||
#ZED_NTFY_URL="https://ntfy.sh"
|
|
||||||
|
|
||||||
##
|
|
||||||
# Gotify server URL
|
|
||||||
# This defines a URL that the Gotify call will be directed toward.
|
|
||||||
# <https://gotify.net/docs/index>
|
|
||||||
# Disabled by default; uncomment to enable.
|
|
||||||
#ZED_GOTIFY_URL=""
|
|
||||||
|
|
||||||
##
|
|
||||||
# Gotify application token
|
|
||||||
# This defines a Gotify application token which a message is associated with.
|
|
||||||
# This token is generated when an application is created on the Gotify server.
|
|
||||||
# Disabled by default; uncomment to enable.
|
|
||||||
#ZED_GOTIFY_APPTOKEN=""
|
|
||||||
|
|
||||||
##
|
|
||||||
# Gotify priority (optional)
|
|
||||||
# If defined, this overrides the default priority of the
|
|
||||||
# Gotify application associated with ZED_GOTIFY_APPTOKEN.
|
|
||||||
# Value is an integer 0 and up.
|
|
||||||
#ZED_GOTIFY_PRIORITY=""
|
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
// SPDX-License-Identifier: CDDL-1.0
|
|
||||||
/*
|
/*
|
||||||
* This file is part of the ZFS Event Daemon (ZED).
|
* This file is part of the ZFS Event Daemon (ZED).
|
||||||
*
|
*
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
// SPDX-License-Identifier: CDDL-1.0
|
|
||||||
/*
|
/*
|
||||||
* This file is part of the ZFS Event Daemon (ZED).
|
* This file is part of the ZFS Event Daemon (ZED).
|
||||||
*
|
*
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
// SPDX-License-Identifier: CDDL-1.0
|
|
||||||
/*
|
/*
|
||||||
* This file is part of the ZFS Event Daemon (ZED).
|
* This file is part of the ZFS Event Daemon (ZED).
|
||||||
*
|
*
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
// SPDX-License-Identifier: CDDL-1.0
|
|
||||||
/*
|
/*
|
||||||
* CDDL HEADER START
|
* CDDL HEADER START
|
||||||
*
|
*
|
||||||
@ -140,8 +139,7 @@ dev_event_nvlist(struct udev_device *dev)
|
|||||||
* is /dev/sda.
|
* is /dev/sda.
|
||||||
*/
|
*/
|
||||||
struct udev_device *parent_dev = udev_device_get_parent(dev);
|
struct udev_device *parent_dev = udev_device_get_parent(dev);
|
||||||
if (parent_dev != NULL &&
|
if ((value = udev_device_get_sysattr_value(parent_dev, "size"))
|
||||||
(value = udev_device_get_sysattr_value(parent_dev, "size"))
|
|
||||||
!= NULL) {
|
!= NULL) {
|
||||||
uint64_t numval = DEV_BSIZE;
|
uint64_t numval = DEV_BSIZE;
|
||||||
|
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
// SPDX-License-Identifier: CDDL-1.0
|
|
||||||
/*
|
/*
|
||||||
* CDDL HEADER START
|
* CDDL HEADER START
|
||||||
*
|
*
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
// SPDX-License-Identifier: CDDL-1.0
|
|
||||||
/*
|
/*
|
||||||
* This file is part of the ZFS Event Daemon (ZED).
|
* This file is part of the ZFS Event Daemon (ZED).
|
||||||
*
|
*
|
||||||
@ -36,7 +35,6 @@
|
|||||||
#include "zed_strings.h"
|
#include "zed_strings.h"
|
||||||
|
|
||||||
#include "agents/zfs_agents.h"
|
#include "agents/zfs_agents.h"
|
||||||
#include <libzutil.h>
|
|
||||||
|
|
||||||
#define MAXBUF 4096
|
#define MAXBUF 4096
|
||||||
|
|
||||||
@ -924,25 +922,6 @@ _zed_event_add_time_strings(uint64_t eid, zed_strings_t *zsp, int64_t etime[])
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static void
|
|
||||||
_zed_event_update_enc_sysfs_path(nvlist_t *nvl)
|
|
||||||
{
|
|
||||||
const char *vdev_path;
|
|
||||||
|
|
||||||
if (nvlist_lookup_string(nvl, FM_EREPORT_PAYLOAD_ZFS_VDEV_PATH,
|
|
||||||
&vdev_path) != 0) {
|
|
||||||
return; /* some other kind of event, ignore it */
|
|
||||||
}
|
|
||||||
|
|
||||||
if (vdev_path == NULL) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
update_vdev_config_dev_sysfs_path(nvl, vdev_path,
|
|
||||||
FM_EREPORT_PAYLOAD_ZFS_VDEV_ENC_SYSFS_PATH);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Service the next zevent, blocking until one is available.
|
* Service the next zevent, blocking until one is available.
|
||||||
*/
|
*/
|
||||||
@ -990,17 +969,6 @@ zed_event_service(struct zed_conf *zcp)
|
|||||||
zed_log_msg(LOG_WARNING,
|
zed_log_msg(LOG_WARNING,
|
||||||
"Failed to lookup zevent class (eid=%llu)", eid);
|
"Failed to lookup zevent class (eid=%llu)", eid);
|
||||||
} else {
|
} else {
|
||||||
/*
|
|
||||||
* Special case: If we can dynamically detect an enclosure sysfs
|
|
||||||
* path, then use that value rather than the one stored in the
|
|
||||||
* vd->vdev_enc_sysfs_path. There have been rare cases where
|
|
||||||
* vd->vdev_enc_sysfs_path becomes outdated. However, there
|
|
||||||
* will be other times when we can not dynamically detect the
|
|
||||||
* sysfs path (like if a disk disappears) and have to rely on
|
|
||||||
* the old value for things like turning on the fault LED.
|
|
||||||
*/
|
|
||||||
_zed_event_update_enc_sysfs_path(nvl);
|
|
||||||
|
|
||||||
/* let internal modules see this event first */
|
/* let internal modules see this event first */
|
||||||
zfs_agent_post_event(class, NULL, nvl);
|
zfs_agent_post_event(class, NULL, nvl);
|
||||||
|
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
// SPDX-License-Identifier: CDDL-1.0
|
|
||||||
/*
|
/*
|
||||||
* This file is part of the ZFS Event Daemon (ZED).
|
* This file is part of the ZFS Event Daemon (ZED).
|
||||||
*
|
*
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
// SPDX-License-Identifier: CDDL-1.0
|
|
||||||
/*
|
/*
|
||||||
* This file is part of the ZFS Event Daemon (ZED).
|
* This file is part of the ZFS Event Daemon (ZED).
|
||||||
*
|
*
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
// SPDX-License-Identifier: CDDL-1.0
|
|
||||||
/*
|
/*
|
||||||
* This file is part of the ZFS Event Daemon (ZED).
|
* This file is part of the ZFS Event Daemon (ZED).
|
||||||
*
|
*
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
// SPDX-License-Identifier: CDDL-1.0
|
|
||||||
/*
|
/*
|
||||||
* This file is part of the ZFS Event Daemon (ZED).
|
* This file is part of the ZFS Event Daemon (ZED).
|
||||||
*
|
*
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
// SPDX-License-Identifier: CDDL-1.0
|
|
||||||
/*
|
/*
|
||||||
* This file is part of the ZFS Event Daemon (ZED).
|
* This file is part of the ZFS Event Daemon (ZED).
|
||||||
*
|
*
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
// SPDX-License-Identifier: CDDL-1.0
|
|
||||||
/*
|
/*
|
||||||
* This file is part of the ZFS Event Daemon (ZED).
|
* This file is part of the ZFS Event Daemon (ZED).
|
||||||
*
|
*
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
// SPDX-License-Identifier: CDDL-1.0
|
|
||||||
/*
|
/*
|
||||||
* This file is part of the ZFS Event Daemon (ZED).
|
* This file is part of the ZFS Event Daemon (ZED).
|
||||||
*
|
*
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
// SPDX-License-Identifier: CDDL-1.0
|
|
||||||
/*
|
/*
|
||||||
* This file is part of the ZFS Event Daemon (ZED).
|
* This file is part of the ZFS Event Daemon (ZED).
|
||||||
*
|
*
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
// SPDX-License-Identifier: CDDL-1.0
|
|
||||||
/*
|
/*
|
||||||
* This file is part of the ZFS Event Daemon (ZED).
|
* This file is part of the ZFS Event Daemon (ZED).
|
||||||
*
|
*
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
// SPDX-License-Identifier: CDDL-1.0
|
|
||||||
/*
|
/*
|
||||||
* CDDL HEADER START
|
* CDDL HEADER START
|
||||||
*
|
*
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
// SPDX-License-Identifier: CDDL-1.0
|
|
||||||
/*
|
/*
|
||||||
* CDDL HEADER START
|
* CDDL HEADER START
|
||||||
*
|
*
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -1,4 +1,3 @@
|
|||||||
// SPDX-License-Identifier: CDDL-1.0
|
|
||||||
/*
|
/*
|
||||||
* CDDL HEADER START
|
* CDDL HEADER START
|
||||||
*
|
*
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
// SPDX-License-Identifier: CDDL-1.0
|
|
||||||
/*
|
/*
|
||||||
* CDDL HEADER START
|
* CDDL HEADER START
|
||||||
*
|
*
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
// SPDX-License-Identifier: CDDL-1.0
|
|
||||||
/*
|
/*
|
||||||
* CDDL HEADER START
|
* CDDL HEADER START
|
||||||
*
|
*
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
// SPDX-License-Identifier: CDDL-1.0
|
|
||||||
/*
|
/*
|
||||||
* CDDL HEADER START
|
* CDDL HEADER START
|
||||||
*
|
*
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
// SPDX-License-Identifier: CDDL-1.0
|
|
||||||
/*
|
/*
|
||||||
* CDDL HEADER START
|
* CDDL HEADER START
|
||||||
*
|
*
|
||||||
|
22
cmd/zhack.c
22
cmd/zhack.c
@ -1,4 +1,3 @@
|
|||||||
// SPDX-License-Identifier: CDDL-1.0
|
|
||||||
/*
|
/*
|
||||||
* CDDL HEADER START
|
* CDDL HEADER START
|
||||||
*
|
*
|
||||||
@ -204,27 +203,26 @@ static void
|
|||||||
dump_obj(objset_t *os, uint64_t obj, const char *name)
|
dump_obj(objset_t *os, uint64_t obj, const char *name)
|
||||||
{
|
{
|
||||||
zap_cursor_t zc;
|
zap_cursor_t zc;
|
||||||
zap_attribute_t *za = zap_attribute_long_alloc();
|
zap_attribute_t za;
|
||||||
|
|
||||||
(void) printf("%s_obj:\n", name);
|
(void) printf("%s_obj:\n", name);
|
||||||
|
|
||||||
for (zap_cursor_init(&zc, os, obj);
|
for (zap_cursor_init(&zc, os, obj);
|
||||||
zap_cursor_retrieve(&zc, za) == 0;
|
zap_cursor_retrieve(&zc, &za) == 0;
|
||||||
zap_cursor_advance(&zc)) {
|
zap_cursor_advance(&zc)) {
|
||||||
if (za->za_integer_length == 8) {
|
if (za.za_integer_length == 8) {
|
||||||
ASSERT(za->za_num_integers == 1);
|
ASSERT(za.za_num_integers == 1);
|
||||||
(void) printf("\t%s = %llu\n",
|
(void) printf("\t%s = %llu\n",
|
||||||
za->za_name, (u_longlong_t)za->za_first_integer);
|
za.za_name, (u_longlong_t)za.za_first_integer);
|
||||||
} else {
|
} else {
|
||||||
ASSERT(za->za_integer_length == 1);
|
ASSERT(za.za_integer_length == 1);
|
||||||
char val[1024];
|
char val[1024];
|
||||||
VERIFY(zap_lookup(os, obj, za->za_name,
|
VERIFY(zap_lookup(os, obj, za.za_name,
|
||||||
1, sizeof (val), val) == 0);
|
1, sizeof (val), val) == 0);
|
||||||
(void) printf("\t%s = %s\n", za->za_name, val);
|
(void) printf("\t%s = %s\n", za.za_name, val);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
zap_cursor_fini(&zc);
|
zap_cursor_fini(&zc);
|
||||||
zap_attribute_free(za);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
@ -614,8 +612,8 @@ zhack_repair_undetach(uberblock_t *ub, nvlist_t *cfg, const int l)
|
|||||||
* Uberblock root block pointer has valid birth TXG.
|
* Uberblock root block pointer has valid birth TXG.
|
||||||
* Copying it to the label NVlist
|
* Copying it to the label NVlist
|
||||||
*/
|
*/
|
||||||
if (BP_GET_LOGICAL_BIRTH(&ub->ub_rootbp) != 0) {
|
if (ub->ub_rootbp.blk_birth != 0) {
|
||||||
const uint64_t txg = BP_GET_LOGICAL_BIRTH(&ub->ub_rootbp);
|
const uint64_t txg = ub->ub_rootbp.blk_birth;
|
||||||
ub->ub_txg = txg;
|
ub->ub_txg = txg;
|
||||||
|
|
||||||
if (nvlist_remove_all(cfg, ZPOOL_CONFIG_CREATE_TXG) != 0) {
|
if (nvlist_remove_all(cfg, ZPOOL_CONFIG_CREATE_TXG) != 0) {
|
||||||
|
@ -1,5 +1,4 @@
|
|||||||
#!/usr/bin/env @PYTHON_SHEBANG@
|
#!/usr/bin/env @PYTHON_SHEBANG@
|
||||||
# SPDX-License-Identifier: CDDL-1.0
|
|
||||||
#
|
#
|
||||||
# Print out statistics for all zil stats. This information is
|
# Print out statistics for all zil stats. This information is
|
||||||
# available through the zil kstat.
|
# available through the zil kstat.
|
||||||
@ -44,9 +43,6 @@ cols = {
|
|||||||
"obj": [12, -1, "objset"],
|
"obj": [12, -1, "objset"],
|
||||||
"cc": [5, 1000, "zil_commit_count"],
|
"cc": [5, 1000, "zil_commit_count"],
|
||||||
"cwc": [5, 1000, "zil_commit_writer_count"],
|
"cwc": [5, 1000, "zil_commit_writer_count"],
|
||||||
"cec": [5, 1000, "zil_commit_error_count"],
|
|
||||||
"csc": [5, 1000, "zil_commit_stall_count"],
|
|
||||||
"cSc": [5, 1000, "zil_commit_suspend_count"],
|
|
||||||
"ic": [5, 1000, "zil_itx_count"],
|
"ic": [5, 1000, "zil_itx_count"],
|
||||||
"iic": [5, 1000, "zil_itx_indirect_count"],
|
"iic": [5, 1000, "zil_itx_indirect_count"],
|
||||||
"iib": [5, 1024, "zil_itx_indirect_bytes"],
|
"iib": [5, 1024, "zil_itx_indirect_bytes"],
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
// SPDX-License-Identifier: CDDL-1.0
|
|
||||||
/*
|
/*
|
||||||
* CDDL HEADER START
|
* CDDL HEADER START
|
||||||
*
|
*
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
// SPDX-License-Identifier: CDDL-1.0
|
|
||||||
/*
|
/*
|
||||||
* CDDL HEADER START
|
* CDDL HEADER START
|
||||||
*
|
*
|
||||||
@ -23,7 +22,6 @@
|
|||||||
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||||
* Copyright (c) 2012, 2015 by Delphix. All rights reserved.
|
* Copyright (c) 2012, 2015 by Delphix. All rights reserved.
|
||||||
* Copyright (c) 2017, Intel Corporation.
|
* Copyright (c) 2017, Intel Corporation.
|
||||||
* Copyright (c) 2023-2025, Klara, Inc.
|
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -210,68 +208,6 @@ type_to_name(uint64_t type)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
struct errstr {
|
|
||||||
int err;
|
|
||||||
const char *str;
|
|
||||||
};
|
|
||||||
static const struct errstr errstrtable[] = {
|
|
||||||
{ EIO, "io" },
|
|
||||||
{ ECKSUM, "checksum" },
|
|
||||||
{ EINVAL, "decompress" },
|
|
||||||
{ EACCES, "decrypt" },
|
|
||||||
{ ENXIO, "nxio" },
|
|
||||||
{ ECHILD, "dtl" },
|
|
||||||
{ EILSEQ, "corrupt" },
|
|
||||||
{ ENOSYS, "noop" },
|
|
||||||
{ 0, NULL },
|
|
||||||
};
|
|
||||||
|
|
||||||
static int
|
|
||||||
str_to_err(const char *str)
|
|
||||||
{
|
|
||||||
for (int i = 0; errstrtable[i].str != NULL; i++)
|
|
||||||
if (strcasecmp(errstrtable[i].str, str) == 0)
|
|
||||||
return (errstrtable[i].err);
|
|
||||||
return (-1);
|
|
||||||
}
|
|
||||||
static const char *
|
|
||||||
err_to_str(int err)
|
|
||||||
{
|
|
||||||
for (int i = 0; errstrtable[i].str != NULL; i++)
|
|
||||||
if (errstrtable[i].err == err)
|
|
||||||
return (errstrtable[i].str);
|
|
||||||
return ("[unknown]");
|
|
||||||
}
|
|
||||||
|
|
||||||
static const char *const iotypestrtable[ZINJECT_IOTYPES] = {
|
|
||||||
[ZINJECT_IOTYPE_NULL] = "null",
|
|
||||||
[ZINJECT_IOTYPE_READ] = "read",
|
|
||||||
[ZINJECT_IOTYPE_WRITE] = "write",
|
|
||||||
[ZINJECT_IOTYPE_FREE] = "free",
|
|
||||||
[ZINJECT_IOTYPE_CLAIM] = "claim",
|
|
||||||
[ZINJECT_IOTYPE_FLUSH] = "flush",
|
|
||||||
[ZINJECT_IOTYPE_TRIM] = "trim",
|
|
||||||
[ZINJECT_IOTYPE_ALL] = "all",
|
|
||||||
[ZINJECT_IOTYPE_PROBE] = "probe",
|
|
||||||
};
|
|
||||||
|
|
||||||
static zinject_iotype_t
|
|
||||||
str_to_iotype(const char *arg)
|
|
||||||
{
|
|
||||||
for (uint_t iotype = 0; iotype < ZINJECT_IOTYPES; iotype++)
|
|
||||||
if (iotypestrtable[iotype] != NULL &&
|
|
||||||
strcasecmp(iotypestrtable[iotype], arg) == 0)
|
|
||||||
return (iotype);
|
|
||||||
return (ZINJECT_IOTYPES);
|
|
||||||
}
|
|
||||||
|
|
||||||
static const char *
|
|
||||||
iotype_to_str(zinject_iotype_t iotype)
|
|
||||||
{
|
|
||||||
if (iotype >= ZINJECT_IOTYPES || iotypestrtable[iotype] == NULL)
|
|
||||||
return ("[unknown]");
|
|
||||||
return (iotypestrtable[iotype]);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Print usage message.
|
* Print usage message.
|
||||||
@ -297,12 +233,12 @@ usage(void)
|
|||||||
"\t\tspa_vdev_exit() will trigger a panic.\n"
|
"\t\tspa_vdev_exit() will trigger a panic.\n"
|
||||||
"\n"
|
"\n"
|
||||||
"\tzinject -d device [-e errno] [-L <nvlist|uber|pad1|pad2>] [-F]\n"
|
"\tzinject -d device [-e errno] [-L <nvlist|uber|pad1|pad2>] [-F]\n"
|
||||||
"\t\t[-T <read|write|free|claim|flush|all>] [-f frequency] pool\n\n"
|
"\t\t[-T <read|write|free|claim|all>] [-f frequency] pool\n\n"
|
||||||
"\t\tInject a fault into a particular device or the device's\n"
|
"\t\tInject a fault into a particular device or the device's\n"
|
||||||
"\t\tlabel. Label injection can either be 'nvlist', 'uber',\n "
|
"\t\tlabel. Label injection can either be 'nvlist', 'uber',\n "
|
||||||
"\t\t'pad1', or 'pad2'.\n"
|
"\t\t'pad1', or 'pad2'.\n"
|
||||||
"\t\t'errno' can be 'nxio' (the default), 'io', 'dtl',\n"
|
"\t\t'errno' can be 'nxio' (the default), 'io', 'dtl', or\n"
|
||||||
"\t\t'corrupt' (bit flip), or 'noop' (successfully do nothing).\n"
|
"\t\t'corrupt' (bit flip).\n"
|
||||||
"\t\t'frequency' is a value between 0.0001 and 100.0 that limits\n"
|
"\t\t'frequency' is a value between 0.0001 and 100.0 that limits\n"
|
||||||
"\t\tdevice error injection to a percentage of the IOs.\n"
|
"\t\tdevice error injection to a percentage of the IOs.\n"
|
||||||
"\n"
|
"\n"
|
||||||
@ -341,11 +277,6 @@ usage(void)
|
|||||||
"\t\tcreate 3 lanes on the device; one lane with a latency\n"
|
"\t\tcreate 3 lanes on the device; one lane with a latency\n"
|
||||||
"\t\tof 10 ms and two lanes with a 25 ms latency.\n"
|
"\t\tof 10 ms and two lanes with a 25 ms latency.\n"
|
||||||
"\n"
|
"\n"
|
||||||
"\tzinject -P import|export -s <seconds> pool\n"
|
|
||||||
"\t\tAdd an artificial delay to a future pool import or export,\n"
|
|
||||||
"\t\tsuch that the operation takes a minimum of supplied seconds\n"
|
|
||||||
"\t\tto complete.\n"
|
|
||||||
"\n"
|
|
||||||
"\tzinject -I [-s <seconds> | -g <txgs>] pool\n"
|
"\tzinject -I [-s <seconds> | -g <txgs>] pool\n"
|
||||||
"\t\tCause the pool to stop writing blocks yet not\n"
|
"\t\tCause the pool to stop writing blocks yet not\n"
|
||||||
"\t\treport errors for a duration. Simulates buggy hardware\n"
|
"\t\treport errors for a duration. Simulates buggy hardware\n"
|
||||||
@ -428,37 +359,31 @@ print_data_handler(int id, const char *pool, zinject_record_t *record,
|
|||||||
{
|
{
|
||||||
int *count = data;
|
int *count = data;
|
||||||
|
|
||||||
if (record->zi_guid != 0 || record->zi_func[0] != '\0' ||
|
if (record->zi_guid != 0 || record->zi_func[0] != '\0')
|
||||||
record->zi_duration != 0) {
|
|
||||||
return (0);
|
return (0);
|
||||||
}
|
|
||||||
|
|
||||||
if (*count == 0) {
|
if (*count == 0) {
|
||||||
(void) printf("%3s %-15s %-6s %-6s %-8s %3s %-4s "
|
(void) printf("%3s %-15s %-6s %-6s %-8s %3s %-4s "
|
||||||
"%-15s %-6s %-15s\n", "ID", "POOL", "OBJSET", "OBJECT",
|
"%-15s\n", "ID", "POOL", "OBJSET", "OBJECT", "TYPE",
|
||||||
"TYPE", "LVL", "DVAs", "RANGE", "MATCH", "INJECT");
|
"LVL", "DVAs", "RANGE");
|
||||||
(void) printf("--- --------------- ------ "
|
(void) printf("--- --------------- ------ "
|
||||||
"------ -------- --- ---- --------------- "
|
"------ -------- --- ---- ---------------\n");
|
||||||
"------ ------\n");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
*count += 1;
|
*count += 1;
|
||||||
|
|
||||||
char rangebuf[32];
|
(void) printf("%3d %-15s %-6llu %-6llu %-8s %-3d 0x%02x ",
|
||||||
if (record->zi_start == 0 && record->zi_end == -1ULL)
|
id, pool, (u_longlong_t)record->zi_objset,
|
||||||
snprintf(rangebuf, sizeof (rangebuf), "all");
|
|
||||||
else
|
|
||||||
snprintf(rangebuf, sizeof (rangebuf), "[%llu, %llu]",
|
|
||||||
(u_longlong_t)record->zi_start,
|
|
||||||
(u_longlong_t)record->zi_end);
|
|
||||||
|
|
||||||
|
|
||||||
(void) printf("%3d %-15s %-6llu %-6llu %-8s %-3d 0x%02x %-15s "
|
|
||||||
"%6" PRIu64 " %6" PRIu64 "\n", id, pool,
|
|
||||||
(u_longlong_t)record->zi_objset,
|
|
||||||
(u_longlong_t)record->zi_object, type_to_name(record->zi_type),
|
(u_longlong_t)record->zi_object, type_to_name(record->zi_type),
|
||||||
record->zi_level, record->zi_dvas, rangebuf,
|
record->zi_level, record->zi_dvas);
|
||||||
record->zi_match_count, record->zi_inject_count);
|
|
||||||
|
|
||||||
|
if (record->zi_start == 0 &&
|
||||||
|
record->zi_end == -1ULL)
|
||||||
|
(void) printf("all\n");
|
||||||
|
else
|
||||||
|
(void) printf("[%llu, %llu]\n", (u_longlong_t)record->zi_start,
|
||||||
|
(u_longlong_t)record->zi_end);
|
||||||
|
|
||||||
return (0);
|
return (0);
|
||||||
}
|
}
|
||||||
@ -476,26 +401,14 @@ print_device_handler(int id, const char *pool, zinject_record_t *record,
|
|||||||
return (0);
|
return (0);
|
||||||
|
|
||||||
if (*count == 0) {
|
if (*count == 0) {
|
||||||
(void) printf("%3s %-15s %-16s %-5s %-10s %-9s "
|
(void) printf("%3s %-15s %s\n", "ID", "POOL", "GUID");
|
||||||
"%-6s %-6s\n",
|
(void) printf("--- --------------- ----------------\n");
|
||||||
"ID", "POOL", "GUID", "TYPE", "ERROR", "FREQ",
|
|
||||||
"MATCH", "INJECT");
|
|
||||||
(void) printf(
|
|
||||||
"--- --------------- ---------------- "
|
|
||||||
"----- ---------- --------- "
|
|
||||||
"------ ------\n");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
*count += 1;
|
*count += 1;
|
||||||
|
|
||||||
double freq = record->zi_freq == 0 ? 100.0f :
|
(void) printf("%3d %-15s %llx\n", id, pool,
|
||||||
(((double)record->zi_freq) / ZI_PERCENTAGE_MAX) * 100.0f;
|
(u_longlong_t)record->zi_guid);
|
||||||
|
|
||||||
(void) printf("%3d %-15s %llx %-5s %-10s %8.4f%% "
|
|
||||||
"%6" PRIu64 " %6" PRIu64 "\n", id, pool,
|
|
||||||
(u_longlong_t)record->zi_guid,
|
|
||||||
iotype_to_str(record->zi_iotype), err_to_str(record->zi_error),
|
|
||||||
freq, record->zi_match_count, record->zi_inject_count);
|
|
||||||
|
|
||||||
return (0);
|
return (0);
|
||||||
}
|
}
|
||||||
@ -513,26 +426,18 @@ print_delay_handler(int id, const char *pool, zinject_record_t *record,
|
|||||||
return (0);
|
return (0);
|
||||||
|
|
||||||
if (*count == 0) {
|
if (*count == 0) {
|
||||||
(void) printf("%3s %-15s %-16s %-10s %-5s %-9s "
|
(void) printf("%3s %-15s %-15s %-15s %s\n",
|
||||||
"%-6s %-6s\n",
|
"ID", "POOL", "DELAY (ms)", "LANES", "GUID");
|
||||||
"ID", "POOL", "GUID", "DELAY (ms)", "LANES", "FREQ",
|
(void) printf("--- --------------- --------------- "
|
||||||
"MATCH", "INJECT");
|
"--------------- ----------------\n");
|
||||||
(void) printf("--- --------------- ---------------- "
|
|
||||||
"---------- ----- --------- "
|
|
||||||
"------ ------\n");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
*count += 1;
|
*count += 1;
|
||||||
|
|
||||||
double freq = record->zi_freq == 0 ? 100.0f :
|
(void) printf("%3d %-15s %-15llu %-15llu %llx\n", id, pool,
|
||||||
(((double)record->zi_freq) / ZI_PERCENTAGE_MAX) * 100.0f;
|
|
||||||
|
|
||||||
(void) printf("%3d %-15s %llx %10llu %5llu %8.4f%% "
|
|
||||||
"%6" PRIu64 " %6" PRIu64 "\n", id, pool,
|
|
||||||
(u_longlong_t)record->zi_guid,
|
|
||||||
(u_longlong_t)NSEC2MSEC(record->zi_timer),
|
(u_longlong_t)NSEC2MSEC(record->zi_timer),
|
||||||
(u_longlong_t)record->zi_nlanes,
|
(u_longlong_t)record->zi_nlanes,
|
||||||
freq, record->zi_match_count, record->zi_inject_count);
|
(u_longlong_t)record->zi_guid);
|
||||||
|
|
||||||
return (0);
|
return (0);
|
||||||
}
|
}
|
||||||
@ -558,33 +463,6 @@ print_panic_handler(int id, const char *pool, zinject_record_t *record,
|
|||||||
return (0);
|
return (0);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
|
||||||
print_pool_delay_handler(int id, const char *pool, zinject_record_t *record,
|
|
||||||
void *data)
|
|
||||||
{
|
|
||||||
int *count = data;
|
|
||||||
|
|
||||||
if (record->zi_cmd != ZINJECT_DELAY_IMPORT &&
|
|
||||||
record->zi_cmd != ZINJECT_DELAY_EXPORT) {
|
|
||||||
return (0);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (*count == 0) {
|
|
||||||
(void) printf("%3s %-19s %-11s %s\n",
|
|
||||||
"ID", "POOL", "DELAY (sec)", "COMMAND");
|
|
||||||
(void) printf("--- ------------------- -----------"
|
|
||||||
" -------\n");
|
|
||||||
}
|
|
||||||
|
|
||||||
*count += 1;
|
|
||||||
|
|
||||||
(void) printf("%3d %-19s %-11llu %s\n",
|
|
||||||
id, pool, (u_longlong_t)record->zi_duration,
|
|
||||||
record->zi_cmd == ZINJECT_DELAY_IMPORT ? "import": "export");
|
|
||||||
|
|
||||||
return (0);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Print all registered error handlers. Returns the number of handlers
|
* Print all registered error handlers. Returns the number of handlers
|
||||||
* registered.
|
* registered.
|
||||||
@ -615,13 +493,6 @@ print_all_handlers(void)
|
|||||||
count = 0;
|
count = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
(void) iter_handlers(print_pool_delay_handler, &count);
|
|
||||||
if (count > 0) {
|
|
||||||
total += count;
|
|
||||||
(void) printf("\n");
|
|
||||||
count = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
(void) iter_handlers(print_panic_handler, &count);
|
(void) iter_handlers(print_panic_handler, &count);
|
||||||
|
|
||||||
return (count + total);
|
return (count + total);
|
||||||
@ -694,27 +565,9 @@ register_handler(const char *pool, int flags, zinject_record_t *record,
|
|||||||
zc.zc_guid = flags;
|
zc.zc_guid = flags;
|
||||||
|
|
||||||
if (zfs_ioctl(g_zfs, ZFS_IOC_INJECT_FAULT, &zc) != 0) {
|
if (zfs_ioctl(g_zfs, ZFS_IOC_INJECT_FAULT, &zc) != 0) {
|
||||||
const char *errmsg = strerror(errno);
|
(void) fprintf(stderr, "failed to add handler: %s\n",
|
||||||
|
errno == EDOM ? "block level exceeds max level of object" :
|
||||||
switch (errno) {
|
strerror(errno));
|
||||||
case EDOM:
|
|
||||||
errmsg = "block level exceeds max level of object";
|
|
||||||
break;
|
|
||||||
case EEXIST:
|
|
||||||
if (record->zi_cmd == ZINJECT_DELAY_IMPORT)
|
|
||||||
errmsg = "pool already imported";
|
|
||||||
if (record->zi_cmd == ZINJECT_DELAY_EXPORT)
|
|
||||||
errmsg = "a handler already exists";
|
|
||||||
break;
|
|
||||||
case ENOENT:
|
|
||||||
/* import delay injector running on older zfs module */
|
|
||||||
if (record->zi_cmd == ZINJECT_DELAY_IMPORT)
|
|
||||||
errmsg = "import delay injector not supported";
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
(void) fprintf(stderr, "failed to add handler: %s\n", errmsg);
|
|
||||||
return (1);
|
return (1);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -739,9 +592,6 @@ register_handler(const char *pool, int flags, zinject_record_t *record,
|
|||||||
} else if (record->zi_duration < 0) {
|
} else if (record->zi_duration < 0) {
|
||||||
(void) printf(" txgs: %lld \n",
|
(void) printf(" txgs: %lld \n",
|
||||||
(u_longlong_t)-record->zi_duration);
|
(u_longlong_t)-record->zi_duration);
|
||||||
} else if (record->zi_timer > 0) {
|
|
||||||
(void) printf(" timer: %lld ms\n",
|
|
||||||
(u_longlong_t)NSEC2MSEC(record->zi_timer));
|
|
||||||
} else {
|
} else {
|
||||||
(void) printf("objset: %llu\n",
|
(void) printf("objset: %llu\n",
|
||||||
(u_longlong_t)record->zi_objset);
|
(u_longlong_t)record->zi_objset);
|
||||||
@ -896,7 +746,7 @@ main(int argc, char **argv)
|
|||||||
int quiet = 0;
|
int quiet = 0;
|
||||||
int error = 0;
|
int error = 0;
|
||||||
int domount = 0;
|
int domount = 0;
|
||||||
int io_type = ZINJECT_IOTYPE_ALL;
|
int io_type = ZIO_TYPES;
|
||||||
int action = VDEV_STATE_UNKNOWN;
|
int action = VDEV_STATE_UNKNOWN;
|
||||||
err_type_t type = TYPE_INVAL;
|
err_type_t type = TYPE_INVAL;
|
||||||
err_type_t label = TYPE_INVAL;
|
err_type_t label = TYPE_INVAL;
|
||||||
@ -940,7 +790,7 @@ main(int argc, char **argv)
|
|||||||
}
|
}
|
||||||
|
|
||||||
while ((c = getopt(argc, argv,
|
while ((c = getopt(argc, argv,
|
||||||
":aA:b:C:d:D:f:Fg:qhIc:t:T:l:mr:s:e:uL:p:P:")) != -1) {
|
":aA:b:C:d:D:f:Fg:qhIc:t:T:l:mr:s:e:uL:p:")) != -1) {
|
||||||
switch (c) {
|
switch (c) {
|
||||||
case 'a':
|
case 'a':
|
||||||
flags |= ZINJECT_FLUSH_ARC;
|
flags |= ZINJECT_FLUSH_ARC;
|
||||||
@ -992,12 +842,24 @@ main(int argc, char **argv)
|
|||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case 'e':
|
case 'e':
|
||||||
error = str_to_err(optarg);
|
if (strcasecmp(optarg, "io") == 0) {
|
||||||
if (error < 0) {
|
error = EIO;
|
||||||
|
} else if (strcasecmp(optarg, "checksum") == 0) {
|
||||||
|
error = ECKSUM;
|
||||||
|
} else if (strcasecmp(optarg, "decompress") == 0) {
|
||||||
|
error = EINVAL;
|
||||||
|
} else if (strcasecmp(optarg, "decrypt") == 0) {
|
||||||
|
error = EACCES;
|
||||||
|
} else if (strcasecmp(optarg, "nxio") == 0) {
|
||||||
|
error = ENXIO;
|
||||||
|
} else if (strcasecmp(optarg, "dtl") == 0) {
|
||||||
|
error = ECHILD;
|
||||||
|
} else if (strcasecmp(optarg, "corrupt") == 0) {
|
||||||
|
error = EILSEQ;
|
||||||
|
} else {
|
||||||
(void) fprintf(stderr, "invalid error type "
|
(void) fprintf(stderr, "invalid error type "
|
||||||
"'%s': must be one of: io decompress "
|
"'%s': must be 'io', 'checksum' or "
|
||||||
"decrypt nxio dtl corrupt noop\n",
|
"'nxio'\n", optarg);
|
||||||
optarg);
|
|
||||||
usage();
|
usage();
|
||||||
libzfs_fini(g_zfs);
|
libzfs_fini(g_zfs);
|
||||||
return (1);
|
return (1);
|
||||||
@ -1058,19 +920,6 @@ main(int argc, char **argv)
|
|||||||
sizeof (record.zi_func));
|
sizeof (record.zi_func));
|
||||||
record.zi_cmd = ZINJECT_PANIC;
|
record.zi_cmd = ZINJECT_PANIC;
|
||||||
break;
|
break;
|
||||||
case 'P':
|
|
||||||
if (strcasecmp(optarg, "import") == 0) {
|
|
||||||
record.zi_cmd = ZINJECT_DELAY_IMPORT;
|
|
||||||
} else if (strcasecmp(optarg, "export") == 0) {
|
|
||||||
record.zi_cmd = ZINJECT_DELAY_EXPORT;
|
|
||||||
} else {
|
|
||||||
(void) fprintf(stderr, "invalid command '%s': "
|
|
||||||
"must be 'import' or 'export'\n", optarg);
|
|
||||||
usage();
|
|
||||||
libzfs_fini(g_zfs);
|
|
||||||
return (1);
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
case 'q':
|
case 'q':
|
||||||
quiet = 1;
|
quiet = 1;
|
||||||
break;
|
break;
|
||||||
@ -1090,11 +939,20 @@ main(int argc, char **argv)
|
|||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case 'T':
|
case 'T':
|
||||||
io_type = str_to_iotype(optarg);
|
if (strcasecmp(optarg, "read") == 0) {
|
||||||
if (io_type == ZINJECT_IOTYPES) {
|
io_type = ZIO_TYPE_READ;
|
||||||
|
} else if (strcasecmp(optarg, "write") == 0) {
|
||||||
|
io_type = ZIO_TYPE_WRITE;
|
||||||
|
} else if (strcasecmp(optarg, "free") == 0) {
|
||||||
|
io_type = ZIO_TYPE_FREE;
|
||||||
|
} else if (strcasecmp(optarg, "claim") == 0) {
|
||||||
|
io_type = ZIO_TYPE_CLAIM;
|
||||||
|
} else if (strcasecmp(optarg, "all") == 0) {
|
||||||
|
io_type = ZIO_TYPES;
|
||||||
|
} else {
|
||||||
(void) fprintf(stderr, "invalid I/O type "
|
(void) fprintf(stderr, "invalid I/O type "
|
||||||
"'%s': must be 'read', 'write', 'free', "
|
"'%s': must be 'read', 'write', 'free', "
|
||||||
"'claim', 'flush' or 'all'\n", optarg);
|
"'claim' or 'all'\n", optarg);
|
||||||
usage();
|
usage();
|
||||||
libzfs_fini(g_zfs);
|
libzfs_fini(g_zfs);
|
||||||
return (1);
|
return (1);
|
||||||
@ -1141,7 +999,7 @@ main(int argc, char **argv)
|
|||||||
argc -= optind;
|
argc -= optind;
|
||||||
argv += optind;
|
argv += optind;
|
||||||
|
|
||||||
if (record.zi_duration != 0 && record.zi_cmd == 0)
|
if (record.zi_duration != 0)
|
||||||
record.zi_cmd = ZINJECT_IGNORED_WRITES;
|
record.zi_cmd = ZINJECT_IGNORED_WRITES;
|
||||||
|
|
||||||
if (cancel != NULL) {
|
if (cancel != NULL) {
|
||||||
@ -1213,7 +1071,7 @@ main(int argc, char **argv)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (error == EILSEQ &&
|
if (error == EILSEQ &&
|
||||||
(record.zi_freq == 0 || io_type != ZINJECT_IOTYPE_READ)) {
|
(record.zi_freq == 0 || io_type != ZIO_TYPE_READ)) {
|
||||||
(void) fprintf(stderr, "device corrupt errors require "
|
(void) fprintf(stderr, "device corrupt errors require "
|
||||||
"io type read and a frequency value\n");
|
"io type read and a frequency value\n");
|
||||||
libzfs_fini(g_zfs);
|
libzfs_fini(g_zfs);
|
||||||
@ -1225,22 +1083,6 @@ main(int argc, char **argv)
|
|||||||
libzfs_fini(g_zfs);
|
libzfs_fini(g_zfs);
|
||||||
return (1);
|
return (1);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (record.zi_nlanes) {
|
|
||||||
switch (io_type) {
|
|
||||||
case ZINJECT_IOTYPE_READ:
|
|
||||||
case ZINJECT_IOTYPE_WRITE:
|
|
||||||
case ZINJECT_IOTYPE_ALL:
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
(void) fprintf(stderr, "I/O type for a delay "
|
|
||||||
"must be 'read' or 'write'\n");
|
|
||||||
usage();
|
|
||||||
libzfs_fini(g_zfs);
|
|
||||||
return (1);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!error)
|
if (!error)
|
||||||
error = ENXIO;
|
error = ENXIO;
|
||||||
|
|
||||||
@ -1287,8 +1129,8 @@ main(int argc, char **argv)
|
|||||||
if (raw != NULL || range != NULL || type != TYPE_INVAL ||
|
if (raw != NULL || range != NULL || type != TYPE_INVAL ||
|
||||||
level != 0 || device != NULL || record.zi_freq > 0 ||
|
level != 0 || device != NULL || record.zi_freq > 0 ||
|
||||||
dvas != 0) {
|
dvas != 0) {
|
||||||
(void) fprintf(stderr, "%s incompatible with other "
|
(void) fprintf(stderr, "panic (-p) incompatible with "
|
||||||
"options\n", "import|export delay (-P)");
|
"other options\n");
|
||||||
usage();
|
usage();
|
||||||
libzfs_fini(g_zfs);
|
libzfs_fini(g_zfs);
|
||||||
return (2);
|
return (2);
|
||||||
@ -1306,28 +1148,6 @@ main(int argc, char **argv)
|
|||||||
if (argv[1] != NULL)
|
if (argv[1] != NULL)
|
||||||
record.zi_type = atoi(argv[1]);
|
record.zi_type = atoi(argv[1]);
|
||||||
dataset[0] = '\0';
|
dataset[0] = '\0';
|
||||||
} else if (record.zi_cmd == ZINJECT_DELAY_IMPORT ||
|
|
||||||
record.zi_cmd == ZINJECT_DELAY_EXPORT) {
|
|
||||||
if (raw != NULL || range != NULL || type != TYPE_INVAL ||
|
|
||||||
level != 0 || device != NULL || record.zi_freq > 0 ||
|
|
||||||
dvas != 0) {
|
|
||||||
(void) fprintf(stderr, "%s incompatible with other "
|
|
||||||
"options\n", "import|export delay (-P)");
|
|
||||||
usage();
|
|
||||||
libzfs_fini(g_zfs);
|
|
||||||
return (2);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (argc != 1 || record.zi_duration <= 0) {
|
|
||||||
(void) fprintf(stderr, "import|export delay (-P) "
|
|
||||||
"injection requires a duration (-s) and a single "
|
|
||||||
"pool name\n");
|
|
||||||
usage();
|
|
||||||
libzfs_fini(g_zfs);
|
|
||||||
return (2);
|
|
||||||
}
|
|
||||||
|
|
||||||
(void) strlcpy(pool, argv[0], sizeof (pool));
|
|
||||||
} else if (record.zi_cmd == ZINJECT_IGNORED_WRITES) {
|
} else if (record.zi_cmd == ZINJECT_IGNORED_WRITES) {
|
||||||
if (raw != NULL || range != NULL || type != TYPE_INVAL ||
|
if (raw != NULL || range != NULL || type != TYPE_INVAL ||
|
||||||
level != 0 || record.zi_freq > 0 || dvas != 0) {
|
level != 0 || record.zi_freq > 0 || dvas != 0) {
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user