mirror of
https://github.com/openzfs/zfs.git
synced 2025-10-01 19:56:28 +00:00
Compare commits
290 Commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
c883088df8 | ||
![]() |
c0c4866f8a | ||
![]() |
b62fd2cef9 | ||
![]() |
d92fbe2150 | ||
![]() |
a4978d2605 | ||
![]() |
a6f6c881ff | ||
![]() |
fc3d34bd08 | ||
![]() |
36116b4612 | ||
![]() |
b699dacb4a | ||
![]() |
d22bf6a9bd | ||
![]() |
40e20d808c | ||
![]() |
9bb8d26bd5 | ||
![]() |
08fd5ccc38 | ||
![]() |
00d85a98ea | ||
![]() |
9ef15845f5 | ||
![]() |
69142125d7 | ||
![]() |
59112ca27d | ||
![]() |
992d8871eb | ||
![]() |
e6ca28c970 | ||
![]() |
cbd51c5f24 | ||
![]() |
09e6724e1e | ||
![]() |
7466e09a49 | ||
![]() |
ce782d0804 | ||
![]() |
64afc4e66e | ||
![]() |
621dfaff5c | ||
![]() |
ab653603f8 | ||
![]() |
acc7cd8e99 | ||
![]() |
dd0874cf7e | ||
![]() |
7cd666d54b | ||
![]() |
0606ce2055 | ||
![]() |
dd3a0a2715 | ||
![]() |
9ad150446f | ||
![]() |
9da745f5de | ||
![]() |
cfa29b9945 | ||
![]() |
09a7961364 | ||
![]() |
276be5357c | ||
![]() |
424d06a298 | ||
![]() |
6b64acc157 | ||
![]() |
a2e71db664 | ||
![]() |
eb4a36bcef | ||
![]() |
52cee9a3eb | ||
![]() |
2006ac1f4a | ||
![]() |
509526ad21 | ||
![]() |
4db88c37cc | ||
![]() |
8b1c6db3d2 | ||
![]() |
22e4f08c30 | ||
![]() |
7bccf98a73 | ||
![]() |
4d4972ed98 | ||
![]() |
3425484eb9 | ||
![]() |
9e0304c363 | ||
![]() |
c1161e2851 | ||
![]() |
ef527958c6 | ||
![]() |
d2f7b2e557 | ||
![]() |
83c0ccc7cf | ||
![]() |
c16d103422 | ||
![]() |
f94a77951d | ||
![]() |
d8b0b6032b | ||
![]() |
387f003be3 | ||
![]() |
07cf973fe9 | ||
![]() |
2ecc2dfe42 | ||
![]() |
9ecd112dc1 | ||
![]() |
a00231a3fc | ||
![]() |
9181e94f0b | ||
![]() |
3bd23fd78d | ||
![]() |
ac592318b8 | ||
![]() |
152a775eac | ||
![]() |
976bf9b6a6 | ||
![]() |
f71c16a661 | ||
![]() |
9c40ae0219 | ||
![]() |
a701548eb4 | ||
![]() |
b13c91bb29 | ||
![]() |
e09356fa05 | ||
![]() |
1e1d748cae | ||
![]() |
dea2d3c6cd | ||
![]() |
121924575e | ||
![]() |
e11b3eb1c6 | ||
![]() |
3b8f227362 | ||
![]() |
e48195c816 | ||
![]() |
ad47eca195 | ||
![]() |
2e259c6f00 | ||
![]() |
a8c29a79df | ||
![]() |
f13593619b | ||
![]() |
c34fe8dcbc | ||
![]() |
2a59b6bfa9 | ||
![]() |
db2db50e37 | ||
![]() |
d530d5d8a5 | ||
![]() |
3c502e376b | ||
![]() |
03b84099d9 | ||
![]() |
18a9185165 | ||
![]() |
3c13601a12 | ||
![]() |
b3626f0a35 | ||
![]() |
494aaaed89 | ||
![]() |
522414da3b | ||
![]() |
a8c256046b | ||
![]() |
eb34de04d7 | ||
![]() |
d813aa8530 | ||
![]() |
3b267e72de | ||
![]() |
349fb77f11 | ||
![]() |
2a953e0ac9 | ||
![]() |
e4985bf5a1 | ||
![]() |
e96675a7b1 | ||
![]() |
d702f86eaf | ||
![]() |
41c4599cba | ||
![]() |
56a2a0981e | ||
![]() |
9b9b09f452 | ||
![]() |
89fcb8c6f9 | ||
![]() |
55dd24c4cc | ||
![]() |
78287023ce | ||
![]() |
479dca51c6 | ||
![]() |
87e9e82865 | ||
![]() |
0733fe2aa5 | ||
![]() |
fd836dfe24 | ||
![]() |
e92a680c70 | ||
![]() |
f1659cc782 | ||
![]() |
f863ac3d0f | ||
![]() |
f6d2e5c075 | ||
![]() |
f2fe4d51a8 | ||
![]() |
76663fe372 | ||
![]() |
44c8ff9b0c | ||
![]() |
f0ffcc3adc | ||
![]() |
e534ba5ce7 | ||
![]() |
1c7048357d | ||
![]() |
3ec4ea68d4 | ||
![]() |
bd7a02c251 | ||
![]() |
e82e68400a | ||
![]() |
3f67e012e4 | ||
![]() |
21875dd090 | ||
![]() |
fe9d409e90 | ||
![]() |
7aef672b77 | ||
![]() |
f9a9aea126 | ||
![]() |
8ba748d414 | ||
![]() |
e860cb0200 | ||
![]() |
86c3ed40e1 | ||
![]() |
6e41aca519 | ||
![]() |
79f7de5752 | ||
![]() |
0ef1964c79 | ||
![]() |
eaa62d9951 | ||
![]() |
8ca95d78c5 | ||
![]() |
edebca5dfc | ||
![]() |
1cc1bf4fa7 | ||
![]() |
0bcd1151f0 | ||
![]() |
78fd79eacd | ||
![]() |
6d693e20a2 | ||
![]() |
b76724ae47 | ||
![]() |
459c99ff23 | ||
![]() |
95785196f2 | ||
![]() |
2bba9fd479 | ||
![]() |
30ee2ee8ec | ||
![]() |
d7b6e470ff | ||
![]() |
04186d33be | ||
![]() |
810fc49a3e | ||
![]() |
75a7740574 | ||
![]() |
a80e1f1c90 | ||
![]() |
111ae3364c | ||
![]() |
3990273ffe | ||
![]() |
da93b72c91 | ||
![]() |
9fa06c5574 | ||
![]() |
8d47d2d579 | ||
![]() |
f6e6e77ed8 | ||
![]() |
120d1787d7 | ||
![]() |
2407f30bda | ||
![]() |
9be8ddfb3c | ||
![]() |
3755cde22a | ||
![]() |
33d7c2d165 | ||
![]() |
2919784be2 | ||
![]() |
8495536f7f | ||
![]() |
bcd010d3a5 | ||
![]() |
c27277daac | ||
![]() |
bf54da84fb | ||
![]() |
3158b5d718 | ||
![]() |
ba7797c8db | ||
![]() |
bc77a0c85e | ||
![]() |
1611b8e56e | ||
![]() |
8015e2ea66 | ||
![]() |
c53bc3837c | ||
![]() |
e9dc31c74e | ||
![]() |
b04b13ae79 | ||
![]() |
7b1d421adf | ||
![]() |
db5c3b4c76 | ||
![]() |
0d870a1775 | ||
![]() |
608741d062 | ||
![]() |
3079bf2e6c | ||
![]() |
b34bf2d5f6 | ||
![]() |
229ca7d738 | ||
![]() |
9e36c5769f | ||
![]() |
d38f4664a6 | ||
![]() |
99dc1fc340 | ||
![]() |
ba4dbbdae7 | ||
![]() |
8526b12f3d | ||
![]() |
0ce1b2ca19 | ||
![]() |
0aabd6b482 | ||
![]() |
5f30698670 | ||
![]() |
a199cac6cd | ||
![]() |
729507d309 | ||
![]() |
3af63683fe | ||
![]() |
9aa1a2878e | ||
![]() |
cc75c816c5 | ||
![]() |
1c2aee7a52 | ||
![]() |
62677576a7 | ||
![]() |
f7a07d76ee | ||
![]() |
54c6fbd378 | ||
![]() |
0ce7a068e9 | ||
![]() |
228b064d1b | ||
![]() |
b9b9cdcdb1 | ||
![]() |
11943656f9 | ||
![]() |
c011ef8c91 | ||
![]() |
cacc599aa2 | ||
![]() |
c7ee59a160 | ||
![]() |
58a707375f | ||
![]() |
5a22de144a | ||
![]() |
31a4673c05 | ||
![]() |
3a68f3c50f | ||
![]() |
8be6308e85 | ||
![]() |
0bf2c5365e | ||
![]() |
d76de9fb17 | ||
![]() |
c0f075c06b | ||
![]() |
6c2fc56916 | ||
![]() |
e96fbdba34 | ||
![]() |
739db06ce7 | ||
![]() |
4da8c7d11e | ||
![]() |
32949f2560 | ||
![]() |
79ac1b29d5 | ||
![]() |
7dc2baaa1f | ||
![]() |
5a7cb0b065 | ||
![]() |
400f56e3f8 | ||
![]() |
63159e5bda | ||
![]() |
7eabb0af37 | ||
![]() |
c65aaa8387 | ||
![]() |
e99e684b33 | ||
![]() |
1b696429c1 | ||
![]() |
084ff4abd2 | ||
![]() |
ab999406fe | ||
![]() |
d19304ffee | ||
![]() |
92f095a903 | ||
![]() |
645a7e4d95 | ||
![]() |
95649854ba | ||
![]() |
895cb689d3 | ||
![]() |
6bdc7259d1 | ||
![]() |
1e488eec60 | ||
![]() |
c418edf1d3 | ||
![]() |
df8c9f351d | ||
![]() |
bb31ded68b | ||
![]() |
c1801cbe59 | ||
![]() |
ffaedf0a44 | ||
![]() |
02ce9030e6 | ||
![]() |
0ae7bfc0a4 | ||
![]() |
bd1eab16eb | ||
![]() |
b3c1807d77 | ||
![]() |
b5e2456333 | ||
![]() |
c47f0f4417 | ||
![]() |
12f2b1f65e | ||
![]() |
4a104ac047 | ||
![]() |
c24a480631 | ||
![]() |
36d1a3ef4e | ||
![]() |
2768dc04cc | ||
![]() |
3366ceaf3a | ||
![]() |
5d12545da8 | ||
![]() |
a3ea8c8ee6 | ||
![]() |
0426e13271 | ||
![]() |
8aa4f0f0fc | ||
![]() |
7698503dca | ||
![]() |
b9aa32ff39 | ||
![]() |
571762b290 | ||
![]() |
991834f5dc | ||
![]() |
41a0f66279 | ||
![]() |
c79d1bae75 | ||
![]() |
70232483b4 | ||
![]() |
c5273e0c31 | ||
![]() |
685ae4429f | ||
![]() |
81be809a25 | ||
![]() |
8a6fde8213 | ||
![]() |
b6f618f8ff | ||
![]() |
51a2b59767 | ||
![]() |
8c81c0b05d | ||
![]() |
b221f43943 | ||
![]() |
e037327bfe | ||
![]() |
1a2e486d25 | ||
![]() |
d8011707cc | ||
![]() |
f5f5a2db95 | ||
![]() |
83b0967c1f | ||
![]() |
73ba5df31a | ||
![]() |
1bc244ae93 | ||
![]() |
931dc70550 | ||
![]() |
5299f4f289 | ||
![]() |
f917cf1c03 | ||
![]() |
56ed389a57 | ||
![]() |
e613e4bbe3 | ||
![]() |
b4e630b00c | ||
![]() |
bf6cd30796 | ||
![]() |
1266cebf87 |
21
.cirrus.yml
Normal file
21
.cirrus.yml
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
env:
|
||||||
|
CIRRUS_CLONE_DEPTH: 1
|
||||||
|
ARCH: amd64
|
||||||
|
|
||||||
|
build_task:
|
||||||
|
matrix:
|
||||||
|
freebsd_instance:
|
||||||
|
image_family: freebsd-12-4
|
||||||
|
freebsd_instance:
|
||||||
|
image_family: freebsd-13-2
|
||||||
|
freebsd_instance:
|
||||||
|
image_family: freebsd-14-0-snap
|
||||||
|
prepare_script:
|
||||||
|
- pkg install -y autoconf automake libtool gettext-runtime gmake ksh93 py39-packaging py39-cffi py39-sysctl
|
||||||
|
configure_script:
|
||||||
|
- env MAKE=gmake ./autogen.sh
|
||||||
|
- env MAKE=gmake ./configure --with-config="user" --with-python=3.9
|
||||||
|
build_script:
|
||||||
|
- gmake -j `sysctl -n kern.smp.cpus`
|
||||||
|
install_script:
|
||||||
|
- gmake install
|
18
.github/CONTRIBUTING.md
vendored
18
.github/CONTRIBUTING.md
vendored
@ -145,18 +145,22 @@ Once everything is in good shape and the details have been worked out you can re
|
|||||||
Any required reviews can then be finalized and the pull request merged.
|
Any required reviews can then be finalized and the pull request merged.
|
||||||
|
|
||||||
#### Tests and Benchmarks
|
#### Tests and Benchmarks
|
||||||
* Every pull request is tested using a GitHub Actions workflow on multiple platforms by running the [zfs-tests.sh and zloop.sh](
|
* Every pull request will by tested by the buildbot on multiple platforms by running the [zfs-tests.sh and zloop.sh](
|
||||||
https://openzfs.github.io/openzfs-docs/Developer%20Resources/Building%20ZFS.html#running-zloop-sh-and-zfs-tests-sh) test suites.
|
https://openzfs.github.io/openzfs-docs/Developer%20Resources/Building%20ZFS.html#running-zloop-sh-and-zfs-tests-sh) test suites.
|
||||||
`.github/workflows/scripts/generate-ci-type.py` is used to determine whether the pull request is nonbehavior, i.e., not introducing behavior changes of any code, configuration or tests. If so, the CI will run on fewer platforms and only essential sanity tests will run. You can always override this by adding `ZFS-CI-Type` line to your commit message:
|
|
||||||
* If your last commit (or `HEAD` in git terms) contains a line `ZFS-CI-Type: quick`, quick mode is forced regardless of what files are changed.
|
|
||||||
* Otherwise, if any commit in a PR contains a line `ZFS-CI-Type: full`, full mode is forced.
|
|
||||||
* To verify your changes conform to the [style guidelines](
|
* To verify your changes conform to the [style guidelines](
|
||||||
https://github.com/openzfs/zfs/blob/master/.github/CONTRIBUTING.md#style-guides
|
https://github.com/openzfs/zfs/blob/master/.github/CONTRIBUTING.md#style-guides
|
||||||
), please run `make checkstyle` and resolve any warnings.
|
), please run `make checkstyle` and resolve any warnings.
|
||||||
* Code analysis is performed by [CodeQL](https://codeql.github.com/) for each pull request.
|
* Static code analysis of each pull request is performed by the buildbot; run `make lint` to check your changes.
|
||||||
* Test cases should be provided when appropriate. This includes making sure new features have adequate code coverage.
|
* Test cases should be provided when appropriate.
|
||||||
|
This includes making sure new features have adequate code coverage.
|
||||||
* If your pull request improves performance, please include some benchmarks.
|
* If your pull request improves performance, please include some benchmarks.
|
||||||
* The pull request must pass all CI checks before being accepted.
|
* The pull request must pass all required [ZFS
|
||||||
|
Buildbot](http://build.zfsonlinux.org/) builders before
|
||||||
|
being accepted. If you are experiencing intermittent TEST
|
||||||
|
builder failures, you may be experiencing a [test suite
|
||||||
|
issue](https://github.com/openzfs/zfs/issues?q=is%3Aissue+is%3Aopen+label%3A%22Type%3A+Test+Suite%22).
|
||||||
|
There are also various [buildbot options](https://openzfs.github.io/openzfs-docs/Developer%20Resources/Buildbot%20Options.html)
|
||||||
|
to control how changes are tested.
|
||||||
|
|
||||||
### Testing
|
### Testing
|
||||||
All help is appreciated! If you're in a position to run the latest code
|
All help is appreciated! If you're in a position to run the latest code
|
||||||
|
2
.github/ISSUE_TEMPLATE/feature_request.md
vendored
2
.github/ISSUE_TEMPLATE/feature_request.md
vendored
@ -14,7 +14,7 @@ Please check our issue tracker before opening a new feature request.
|
|||||||
Filling out the following template will help other contributors better understand your proposed feature.
|
Filling out the following template will help other contributors better understand your proposed feature.
|
||||||
-->
|
-->
|
||||||
|
|
||||||
### Describe the feature you would like to see added to OpenZFS
|
### Describe the feature would like to see added to OpenZFS
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
Provide a clear and concise description of the feature.
|
Provide a clear and concise description of the feature.
|
||||||
|
6
.github/PULL_REQUEST_TEMPLATE.md
vendored
6
.github/PULL_REQUEST_TEMPLATE.md
vendored
@ -2,6 +2,11 @@
|
|||||||
|
|
||||||
<!--- Provide a general summary of your changes in the Title above -->
|
<!--- Provide a general summary of your changes in the Title above -->
|
||||||
|
|
||||||
|
<!---
|
||||||
|
Documentation on ZFS Buildbot options can be found at
|
||||||
|
https://openzfs.github.io/openzfs-docs/Developer%20Resources/Buildbot%20Options.html
|
||||||
|
-->
|
||||||
|
|
||||||
### Motivation and Context
|
### Motivation and Context
|
||||||
<!--- Why is this change required? What problem does it solve? -->
|
<!--- Why is this change required? What problem does it solve? -->
|
||||||
<!--- If it fixes an open issue, please link to the issue here. -->
|
<!--- If it fixes an open issue, please link to the issue here. -->
|
||||||
@ -22,7 +27,6 @@
|
|||||||
- [ ] New feature (non-breaking change which adds functionality)
|
- [ ] New feature (non-breaking change which adds functionality)
|
||||||
- [ ] Performance enhancement (non-breaking change which improves efficiency)
|
- [ ] Performance enhancement (non-breaking change which improves efficiency)
|
||||||
- [ ] Code cleanup (non-breaking change which makes code smaller or more readable)
|
- [ ] Code cleanup (non-breaking change which makes code smaller or more readable)
|
||||||
- [ ] Quality assurance (non-breaking change which makes the code more robust against bugs)
|
|
||||||
- [ ] Breaking change (fix or feature that would cause existing functionality to change)
|
- [ ] Breaking change (fix or feature that would cause existing functionality to change)
|
||||||
- [ ] Library ABI change (libzfs, libzfs\_core, libnvpair, libuutil and libzfsbootenv)
|
- [ ] Library ABI change (libzfs, libzfs\_core, libnvpair, libuutil and libzfsbootenv)
|
||||||
- [ ] Documentation (a change to man pages or other documentation)
|
- [ ] Documentation (a change to man pages or other documentation)
|
||||||
|
1
.github/codeql-cpp.yml
vendored
1
.github/codeql-cpp.yml
vendored
@ -2,4 +2,3 @@ name: "Custom CodeQL Analysis"
|
|||||||
|
|
||||||
queries:
|
queries:
|
||||||
- uses: ./.github/codeql/custom-queries/cpp/deprecatedFunctionUsage.ql
|
- uses: ./.github/codeql/custom-queries/cpp/deprecatedFunctionUsage.ql
|
||||||
- uses: ./.github/codeql/custom-queries/cpp/dslDatasetHoldReleMismatch.ql
|
|
||||||
|
@ -1,34 +0,0 @@
|
|||||||
/**
|
|
||||||
* @name Detect mismatched dsl_dataset_hold/_rele pairs
|
|
||||||
* @description Flags instances of issue #12014 where
|
|
||||||
* - a dataset held with dsl_dataset_hold_obj() ends up in dsl_dataset_rele_flags(), or
|
|
||||||
* - a dataset held with dsl_dataset_hold_obj_flags() ends up in dsl_dataset_rele().
|
|
||||||
* @kind problem
|
|
||||||
* @severity error
|
|
||||||
* @tags correctness
|
|
||||||
* @id cpp/dslDatasetHoldReleMismatch
|
|
||||||
*/
|
|
||||||
|
|
||||||
import cpp
|
|
||||||
|
|
||||||
from Variable ds, Call holdCall, Call releCall, string message
|
|
||||||
where
|
|
||||||
ds.getType().toString() = "dsl_dataset_t *" and
|
|
||||||
holdCall.getASuccessor*() = releCall and
|
|
||||||
(
|
|
||||||
(holdCall.getTarget().getName() = "dsl_dataset_hold_obj_flags" and
|
|
||||||
holdCall.getArgument(4).(AddressOfExpr).getOperand().(VariableAccess).getTarget() = ds and
|
|
||||||
releCall.getTarget().getName() = "dsl_dataset_rele" and
|
|
||||||
releCall.getArgument(0).(VariableAccess).getTarget() = ds and
|
|
||||||
message = "Held with dsl_dataset_hold_obj_flags but released with dsl_dataset_rele")
|
|
||||||
or
|
|
||||||
(holdCall.getTarget().getName() = "dsl_dataset_hold_obj" and
|
|
||||||
holdCall.getArgument(3).(AddressOfExpr).getOperand().(VariableAccess).getTarget() = ds and
|
|
||||||
releCall.getTarget().getName() = "dsl_dataset_rele_flags" and
|
|
||||||
releCall.getArgument(0).(VariableAccess).getTarget() = ds and
|
|
||||||
message = "Held with dsl_dataset_hold_obj but released with dsl_dataset_rele_flags")
|
|
||||||
)
|
|
||||||
select releCall,
|
|
||||||
"Mismatched release: held with $@ but released with " + releCall.getTarget().getName() + " for dataset $@",
|
|
||||||
holdCall, holdCall.getTarget().getName(),
|
|
||||||
ds, ds.toString()
|
|
57
.github/workflows/build-dependencies.txt
vendored
Normal file
57
.github/workflows/build-dependencies.txt
vendored
Normal file
@ -0,0 +1,57 @@
|
|||||||
|
acl
|
||||||
|
alien
|
||||||
|
attr
|
||||||
|
autoconf
|
||||||
|
bc
|
||||||
|
build-essential
|
||||||
|
curl
|
||||||
|
dbench
|
||||||
|
debhelper-compat
|
||||||
|
dh-python
|
||||||
|
dkms
|
||||||
|
fakeroot
|
||||||
|
fio
|
||||||
|
gdb
|
||||||
|
gdebi
|
||||||
|
git
|
||||||
|
ksh
|
||||||
|
lcov
|
||||||
|
libacl1-dev
|
||||||
|
libaio-dev
|
||||||
|
libattr1-dev
|
||||||
|
libblkid-dev
|
||||||
|
libcurl4-openssl-dev
|
||||||
|
libdevmapper-dev
|
||||||
|
libelf-dev
|
||||||
|
libffi-dev
|
||||||
|
libmount-dev
|
||||||
|
libpam0g-dev
|
||||||
|
libselinux1-dev
|
||||||
|
libssl-dev
|
||||||
|
libtool
|
||||||
|
libudev-dev
|
||||||
|
linux-headers-generic
|
||||||
|
lsscsi
|
||||||
|
mdadm
|
||||||
|
nfs-kernel-server
|
||||||
|
pamtester
|
||||||
|
parted
|
||||||
|
po-debconf
|
||||||
|
python3
|
||||||
|
python3-all-dev
|
||||||
|
python3-cffi
|
||||||
|
python3-dev
|
||||||
|
python3-packaging
|
||||||
|
python3-pip
|
||||||
|
python3-setuptools
|
||||||
|
python3-sphinx
|
||||||
|
rng-tools-debian
|
||||||
|
rsync
|
||||||
|
samba
|
||||||
|
sysstat
|
||||||
|
uuid-dev
|
||||||
|
watchdog
|
||||||
|
wget
|
||||||
|
xfslibs-dev
|
||||||
|
xz-utils
|
||||||
|
zlib1g-dev
|
5
.github/workflows/checkstyle-dependencies.txt
vendored
Normal file
5
.github/workflows/checkstyle-dependencies.txt
vendored
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
cppcheck
|
||||||
|
devscripts
|
||||||
|
mandoc
|
||||||
|
pax-utils
|
||||||
|
shellcheck
|
23
.github/workflows/checkstyle.yaml
vendored
23
.github/workflows/checkstyle.yaml
vendored
@ -4,10 +4,6 @@ on:
|
|||||||
push:
|
push:
|
||||||
pull_request:
|
pull_request:
|
||||||
|
|
||||||
concurrency:
|
|
||||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
|
||||||
cancel-in-progress: true
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
checkstyle:
|
checkstyle:
|
||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-22.04
|
||||||
@ -17,11 +13,15 @@ jobs:
|
|||||||
ref: ${{ github.event.pull_request.head.sha }}
|
ref: ${{ github.event.pull_request.head.sha }}
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
# for x in lxd core20 snapd; do sudo snap remove $x; done
|
# https://github.com/orgs/community/discussions/47863
|
||||||
sudo apt-get purge -y snapd google-chrome-stable firefox
|
sudo apt-mark hold grub-efi-amd64-signed
|
||||||
ONLY_DEPS=1 .github/workflows/scripts/qemu-3-deps-vm.sh ubuntu22
|
sudo apt-get update --fix-missing
|
||||||
sudo apt-get install -y cppcheck devscripts mandoc pax-utils shellcheck
|
sudo apt-get upgrade
|
||||||
sudo python -m pipx install --quiet flake8
|
sudo xargs --arg-file=${{ github.workspace }}/.github/workflows/build-dependencies.txt apt-get install -qq
|
||||||
|
sudo xargs --arg-file=${{ github.workspace }}/.github/workflows/checkstyle-dependencies.txt apt-get install -qq
|
||||||
|
sudo python3 -m pip install --quiet flake8
|
||||||
|
sudo apt-get clean
|
||||||
|
|
||||||
# confirm that the tools are installed
|
# confirm that the tools are installed
|
||||||
# the build system doesn't fail when they are not
|
# the build system doesn't fail when they are not
|
||||||
checkbashisms --version
|
checkbashisms --version
|
||||||
@ -31,13 +31,8 @@ jobs:
|
|||||||
shellcheck --version
|
shellcheck --version
|
||||||
- name: Prepare
|
- name: Prepare
|
||||||
run: |
|
run: |
|
||||||
sed -i '/DEBUG_CFLAGS="-Werror"/s/^/#/' config/zfs-build.m4
|
|
||||||
./autogen.sh
|
./autogen.sh
|
||||||
- name: Configure
|
|
||||||
run: |
|
|
||||||
./configure
|
./configure
|
||||||
- name: Make
|
|
||||||
run: |
|
|
||||||
make -j$(nproc) --no-print-directory --silent
|
make -j$(nproc) --no-print-directory --silent
|
||||||
- name: Checkstyle
|
- name: Checkstyle
|
||||||
run: |
|
run: |
|
||||||
|
12
.github/workflows/codeql.yml
vendored
12
.github/workflows/codeql.yml
vendored
@ -4,14 +4,10 @@ on:
|
|||||||
push:
|
push:
|
||||||
pull_request:
|
pull_request:
|
||||||
|
|
||||||
concurrency:
|
|
||||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
|
||||||
cancel-in-progress: true
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
analyze:
|
analyze:
|
||||||
name: Analyze
|
name: Analyze
|
||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-latest
|
||||||
permissions:
|
permissions:
|
||||||
actions: read
|
actions: read
|
||||||
contents: read
|
contents: read
|
||||||
@ -31,15 +27,15 @@ jobs:
|
|||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
- name: Initialize CodeQL
|
- name: Initialize CodeQL
|
||||||
uses: github/codeql-action/init@v3
|
uses: github/codeql-action/init@v2
|
||||||
with:
|
with:
|
||||||
config-file: .github/codeql-${{ matrix.language }}.yml
|
config-file: .github/codeql-${{ matrix.language }}.yml
|
||||||
languages: ${{ matrix.language }}
|
languages: ${{ matrix.language }}
|
||||||
|
|
||||||
- name: Autobuild
|
- name: Autobuild
|
||||||
uses: github/codeql-action/autobuild@v3
|
uses: github/codeql-action/autobuild@v2
|
||||||
|
|
||||||
- name: Perform CodeQL Analysis
|
- name: Perform CodeQL Analysis
|
||||||
uses: github/codeql-action/analyze@v3
|
uses: github/codeql-action/analyze@v2
|
||||||
with:
|
with:
|
||||||
category: "/language:${{matrix.language}}"
|
category: "/language:${{matrix.language}}"
|
||||||
|
49
.github/workflows/labels.yml
vendored
49
.github/workflows/labels.yml
vendored
@ -1,49 +0,0 @@
|
|||||||
name: labels
|
|
||||||
|
|
||||||
on:
|
|
||||||
pull_request_target:
|
|
||||||
types: [ opened, synchronize, reopened, converted_to_draft, ready_for_review ]
|
|
||||||
|
|
||||||
permissions:
|
|
||||||
pull-requests: write
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
open:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
if: ${{ github.event.action == 'opened' && github.event.pull_request.draft }}
|
|
||||||
steps:
|
|
||||||
- env:
|
|
||||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
ISSUE: ${{ github.event.pull_request.html_url }}
|
|
||||||
run: |
|
|
||||||
gh pr edit $ISSUE --add-label "Status: Work in Progress"
|
|
||||||
|
|
||||||
push:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
if: ${{ github.event.action == 'synchronize' || github.event.action == 'reopened' }}
|
|
||||||
steps:
|
|
||||||
- env:
|
|
||||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
ISSUE: ${{ github.event.pull_request.html_url }}
|
|
||||||
run: |
|
|
||||||
gh pr edit $ISSUE --remove-label "Status: Accepted,Status: Inactive,Status: Revision Needed,Status: Stale"
|
|
||||||
|
|
||||||
draft:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
if: ${{ github.event.action == 'converted_to_draft' }}
|
|
||||||
steps:
|
|
||||||
- env:
|
|
||||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
ISSUE: ${{ github.event.pull_request.html_url }}
|
|
||||||
run: |
|
|
||||||
gh pr edit $ISSUE --remove-label "Status: Accepted,Status: Code Review Needed,Status: Inactive,Status: Revision Needed,Status: Stale" --add-label "Status: Work in Progress"
|
|
||||||
|
|
||||||
rfr:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
if: ${{ github.event.action == 'ready_for_review' }}
|
|
||||||
steps:
|
|
||||||
- env:
|
|
||||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
ISSUE: ${{ github.event.pull_request.html_url }}
|
|
||||||
run: |
|
|
||||||
gh pr edit $ISSUE --remove-label "Status: Accepted,Status: Inactive,Status: Revision Needed,Status: Stale,Status: Work in Progress" --add-label "Status: Code Review Needed"
|
|
14
.github/workflows/scripts/README.md
vendored
14
.github/workflows/scripts/README.md
vendored
@ -1,14 +0,0 @@
|
|||||||
|
|
||||||
Workflow for each operating system:
|
|
||||||
- install qemu on the github runner
|
|
||||||
- download current cloud image of operating system
|
|
||||||
- start and init that image via cloud-init
|
|
||||||
- install dependencies and poweroff system
|
|
||||||
- start system and build openzfs and then poweroff again
|
|
||||||
- clone build system and start 2 instances of it
|
|
||||||
- run functional testings and complete in around 3h
|
|
||||||
- when tests are done, do some logfile preparing
|
|
||||||
- show detailed results for each system
|
|
||||||
- in the end, generate the job summary
|
|
||||||
|
|
||||||
/TR 14.09.2024
|
|
108
.github/workflows/scripts/generate-ci-type.py
vendored
108
.github/workflows/scripts/generate-ci-type.py
vendored
@ -1,108 +0,0 @@
|
|||||||
#!/usr/bin/env python3
|
|
||||||
|
|
||||||
"""
|
|
||||||
Determine the CI type based on the change list and commit message.
|
|
||||||
|
|
||||||
Prints "quick" if (explicity required by user):
|
|
||||||
- the *last* commit message contains 'ZFS-CI-Type: quick'
|
|
||||||
or if (heuristics):
|
|
||||||
- the files changed are not in the list of specified directories, and
|
|
||||||
- all commit messages do not contain 'ZFS-CI-Type: full'
|
|
||||||
|
|
||||||
Otherwise prints "full".
|
|
||||||
"""
|
|
||||||
|
|
||||||
import sys
|
|
||||||
import subprocess
|
|
||||||
import re
|
|
||||||
|
|
||||||
"""
|
|
||||||
Patterns of files that are not considered to trigger full CI.
|
|
||||||
Note: not using pathlib.Path.match() because it does not support '**'
|
|
||||||
"""
|
|
||||||
FULL_RUN_IGNORE_REGEX = list(map(re.compile, [
|
|
||||||
r'.*\.md',
|
|
||||||
r'.*\.gitignore'
|
|
||||||
]))
|
|
||||||
|
|
||||||
"""
|
|
||||||
Patterns of files that are considered to trigger full CI.
|
|
||||||
"""
|
|
||||||
FULL_RUN_REGEX = list(map(re.compile, [
|
|
||||||
r'\.github/workflows/scripts/.*',
|
|
||||||
r'cmd.*',
|
|
||||||
r'configs/.*',
|
|
||||||
r'META',
|
|
||||||
r'.*\.am',
|
|
||||||
r'.*\.m4',
|
|
||||||
r'autogen\.sh',
|
|
||||||
r'configure\.ac',
|
|
||||||
r'copy-builtin',
|
|
||||||
r'contrib',
|
|
||||||
r'etc',
|
|
||||||
r'include',
|
|
||||||
r'lib/.*',
|
|
||||||
r'module/.*',
|
|
||||||
r'scripts/.*',
|
|
||||||
r'tests/.*',
|
|
||||||
r'udev/.*'
|
|
||||||
]))
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
|
|
||||||
prog = sys.argv[0]
|
|
||||||
|
|
||||||
if len(sys.argv) != 3:
|
|
||||||
print(f'Usage: {prog} <head_ref> <base_ref>')
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
head, base = sys.argv[1:3]
|
|
||||||
|
|
||||||
def output_type(type, reason):
|
|
||||||
print(f'{prog}: will run {type} CI: {reason}', file=sys.stderr)
|
|
||||||
print(type)
|
|
||||||
sys.exit(0)
|
|
||||||
|
|
||||||
# check last (HEAD) commit message
|
|
||||||
last_commit_message_raw = subprocess.run([
|
|
||||||
'git', 'show', '-s', '--format=%B', head
|
|
||||||
], check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
|
||||||
|
|
||||||
for line in last_commit_message_raw.stdout.decode().splitlines():
|
|
||||||
if line.strip().lower() == 'zfs-ci-type: quick':
|
|
||||||
output_type('quick', f'explicitly requested by HEAD commit {head}')
|
|
||||||
|
|
||||||
# check all commit messages
|
|
||||||
all_commit_message_raw = subprocess.run([
|
|
||||||
'git', 'show', '-s',
|
|
||||||
'--format=ZFS-CI-Commit: %H%n%B', f'{head}...{base}'
|
|
||||||
], check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
|
||||||
all_commit_message = all_commit_message_raw.stdout.decode().splitlines()
|
|
||||||
|
|
||||||
commit_ref = head
|
|
||||||
for line in all_commit_message:
|
|
||||||
if line.startswith('ZFS-CI-Commit:'):
|
|
||||||
commit_ref = line.lstrip('ZFS-CI-Commit:').rstrip()
|
|
||||||
if line.strip().lower() == 'zfs-ci-type: full':
|
|
||||||
output_type('full', f'explicitly requested by commit {commit_ref}')
|
|
||||||
|
|
||||||
# check changed files
|
|
||||||
changed_files_raw = subprocess.run([
|
|
||||||
'git', 'diff', '--name-only', head, base
|
|
||||||
], check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
|
||||||
changed_files = changed_files_raw.stdout.decode().splitlines()
|
|
||||||
|
|
||||||
for f in changed_files:
|
|
||||||
for r in FULL_RUN_IGNORE_REGEX:
|
|
||||||
if r.match(f):
|
|
||||||
break
|
|
||||||
else:
|
|
||||||
for r in FULL_RUN_REGEX:
|
|
||||||
if r.match(f):
|
|
||||||
output_type(
|
|
||||||
'full',
|
|
||||||
f'changed file "{f}" matches pattern "{r.pattern}"'
|
|
||||||
)
|
|
||||||
|
|
||||||
# catch-all
|
|
||||||
output_type('quick', 'no changed file matches full CI patterns')
|
|
119
.github/workflows/scripts/generate-summary.sh
vendored
Executable file
119
.github/workflows/scripts/generate-summary.sh
vendored
Executable file
@ -0,0 +1,119 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
# for runtime reasons we split functional testings into N parts
|
||||||
|
# - use a define to check for missing tarfiles
|
||||||
|
FUNCTIONAL_PARTS="4"
|
||||||
|
|
||||||
|
ZTS_REPORT="tests/test-runner/bin/zts-report.py"
|
||||||
|
chmod +x $ZTS_REPORT
|
||||||
|
|
||||||
|
function output() {
|
||||||
|
echo -e $* >> Summary.md
|
||||||
|
}
|
||||||
|
|
||||||
|
function error() {
|
||||||
|
output ":bangbang: $* :bangbang:\n"
|
||||||
|
}
|
||||||
|
|
||||||
|
# this function generates the real summary
|
||||||
|
# - expects a logfile "log" in current directory
|
||||||
|
function generate() {
|
||||||
|
# we issued some error already
|
||||||
|
test ! -s log && return
|
||||||
|
|
||||||
|
# for overview and zts-report
|
||||||
|
cat log | grep '^Test' > list
|
||||||
|
|
||||||
|
# error details
|
||||||
|
awk '/\[FAIL\]|\[KILLED\]/{ show=1; print; next; }
|
||||||
|
/\[SKIP\]|\[PASS\]/{ show=0; } show' log > err
|
||||||
|
|
||||||
|
# summary of errors
|
||||||
|
if [ -s err ]; then
|
||||||
|
output "<pre>"
|
||||||
|
$ZTS_REPORT --no-maybes ./list >> Summary.md
|
||||||
|
output "</pre>"
|
||||||
|
|
||||||
|
# generate seperate error logfile
|
||||||
|
ERRLOGS=$((ERRLOGS+1))
|
||||||
|
errfile="err-$ERRLOGS.md"
|
||||||
|
echo -e "\n## $headline (debugging)\n" >> $errfile
|
||||||
|
echo "<details><summary>Error Listing - with dmesg and dbgmsg</summary><pre>" >> $errfile
|
||||||
|
dd if=err bs=999k count=1 >> $errfile
|
||||||
|
echo "</pre></details>" >> $errfile
|
||||||
|
else
|
||||||
|
output "All tests passed :thumbsup:"
|
||||||
|
fi
|
||||||
|
|
||||||
|
output "<details><summary>Full Listing</summary><pre>"
|
||||||
|
cat list >> Summary.md
|
||||||
|
output "</pre></details>"
|
||||||
|
|
||||||
|
# remove tmp files
|
||||||
|
rm -f err list log
|
||||||
|
}
|
||||||
|
|
||||||
|
# check tarfiles and untar
|
||||||
|
function check_tarfile() {
|
||||||
|
if [ -f "$1" ]; then
|
||||||
|
tar xf "$1" || error "Tarfile $1 returns some error"
|
||||||
|
else
|
||||||
|
error "Tarfile $1 not found"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# check logfile and concatenate test results
|
||||||
|
function check_logfile() {
|
||||||
|
if [ -f "$1" ]; then
|
||||||
|
cat "$1" >> log
|
||||||
|
else
|
||||||
|
error "Logfile $1 not found"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# sanity
|
||||||
|
function summarize_s() {
|
||||||
|
headline="$1"
|
||||||
|
output "\n## $headline\n"
|
||||||
|
rm -rf testfiles
|
||||||
|
check_tarfile "$2/sanity.tar"
|
||||||
|
check_logfile "testfiles/log"
|
||||||
|
generate
|
||||||
|
}
|
||||||
|
|
||||||
|
# functional
|
||||||
|
function summarize_f() {
|
||||||
|
headline="$1"
|
||||||
|
output "\n## $headline\n"
|
||||||
|
rm -rf testfiles
|
||||||
|
for i in $(seq 1 $FUNCTIONAL_PARTS); do
|
||||||
|
tarfile="$2-part$i/part$i.tar"
|
||||||
|
check_tarfile "$tarfile"
|
||||||
|
check_logfile "testfiles/log"
|
||||||
|
done
|
||||||
|
generate
|
||||||
|
}
|
||||||
|
|
||||||
|
# https://docs.github.com/en/enterprise-server@3.6/actions/using-workflows/workflow-commands-for-github-actions#step-isolation-and-limits
|
||||||
|
# Job summaries are isolated between steps and each step is restricted to a maximum size of 1MiB.
|
||||||
|
# [ ] can not show all error findings here
|
||||||
|
# [x] split files into smaller ones and create additional steps
|
||||||
|
|
||||||
|
ERRLOGS=0
|
||||||
|
if [ ! -f Summary/Summary.md ]; then
|
||||||
|
# first call, we do the default summary (~500k)
|
||||||
|
echo -n > Summary.md
|
||||||
|
summarize_s "Sanity Tests Ubuntu 20.04" Logs-20.04-sanity
|
||||||
|
summarize_s "Sanity Tests Ubuntu 22.04" Logs-22.04-sanity
|
||||||
|
summarize_f "Functional Tests Ubuntu 20.04" Logs-20.04-functional
|
||||||
|
summarize_f "Functional Tests Ubuntu 22.04" Logs-22.04-functional
|
||||||
|
|
||||||
|
cat Summary.md >> $GITHUB_STEP_SUMMARY
|
||||||
|
mkdir -p Summary
|
||||||
|
mv *.md Summary
|
||||||
|
else
|
||||||
|
# here we get, when errors where returned in first call
|
||||||
|
test -f Summary/err-$1.md && cat Summary/err-$1.md >> $GITHUB_STEP_SUMMARY
|
||||||
|
fi
|
||||||
|
|
||||||
|
exit 0
|
109
.github/workflows/scripts/merge_summary.awk
vendored
109
.github/workflows/scripts/merge_summary.awk
vendored
@ -1,109 +0,0 @@
|
|||||||
#!/bin/awk -f
|
|
||||||
#
|
|
||||||
# Merge multiple ZTS tests results summaries into a single summary. This is
|
|
||||||
# needed when you're running different parts of ZTS on different tests
|
|
||||||
# runners or VMs.
|
|
||||||
#
|
|
||||||
# Usage:
|
|
||||||
#
|
|
||||||
# ./merge_summary.awk summary1.txt [summary2.txt] [summary3.txt] ...
|
|
||||||
#
|
|
||||||
# or:
|
|
||||||
#
|
|
||||||
# cat summary*.txt | ./merge_summary.awk
|
|
||||||
#
|
|
||||||
BEGIN {
|
|
||||||
i=-1
|
|
||||||
pass=0
|
|
||||||
fail=0
|
|
||||||
skip=0
|
|
||||||
state=""
|
|
||||||
cl=0
|
|
||||||
el=0
|
|
||||||
upl=0
|
|
||||||
ul=0
|
|
||||||
|
|
||||||
# Total seconds of tests runtime
|
|
||||||
total=0;
|
|
||||||
}
|
|
||||||
|
|
||||||
# Skip empty lines
|
|
||||||
/^\s*$/{next}
|
|
||||||
|
|
||||||
# Skip Configuration and Test lines
|
|
||||||
/^Test:/{state=""; next}
|
|
||||||
/Configuration/{state="";next}
|
|
||||||
|
|
||||||
# When we see "test-runner.py" stop saving config lines, and
|
|
||||||
# save test runner lines
|
|
||||||
/test-runner.py/{state="testrunner"; runner=runner$0"\n"; next}
|
|
||||||
|
|
||||||
# We need to differentiate the PASS counts from test result lines that start
|
|
||||||
# with PASS, like:
|
|
||||||
#
|
|
||||||
# PASS mv_files/setup
|
|
||||||
#
|
|
||||||
# Use state="pass_count" to differentiate
|
|
||||||
#
|
|
||||||
/Results Summary/{state="pass_count"; next}
|
|
||||||
/PASS/{ if (state=="pass_count") {pass += $2}}
|
|
||||||
/FAIL/{ if (state=="pass_count") {fail += $2}}
|
|
||||||
/SKIP/{ if (state=="pass_count") {skip += $2}}
|
|
||||||
/Running Time/{
|
|
||||||
state="";
|
|
||||||
running[i]=$3;
|
|
||||||
split($3, arr, ":")
|
|
||||||
total += arr[1] * 60 * 60;
|
|
||||||
total += arr[2] * 60;
|
|
||||||
total += arr[3]
|
|
||||||
next;
|
|
||||||
}
|
|
||||||
|
|
||||||
/Tests with results other than PASS that are expected/{state="expected_lines"; next}
|
|
||||||
/Tests with result of PASS that are unexpected/{state="unexpected_pass_lines"; next}
|
|
||||||
/Tests with results other than PASS that are unexpected/{state="unexpected_lines"; next}
|
|
||||||
{
|
|
||||||
if (state == "expected_lines") {
|
|
||||||
expected_lines[el] = $0
|
|
||||||
el++
|
|
||||||
}
|
|
||||||
|
|
||||||
if (state == "unexpected_pass_lines") {
|
|
||||||
unexpected_pass_lines[upl] = $0
|
|
||||||
upl++
|
|
||||||
}
|
|
||||||
if (state == "unexpected_lines") {
|
|
||||||
unexpected_lines[ul] = $0
|
|
||||||
ul++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
# Reproduce summary
|
|
||||||
END {
|
|
||||||
print runner;
|
|
||||||
print "\nResults Summary"
|
|
||||||
print "PASS\t"pass
|
|
||||||
print "FAIL\t"fail
|
|
||||||
print "SKIP\t"skip
|
|
||||||
print ""
|
|
||||||
print "Running Time:\t"strftime("%T", total, 1)
|
|
||||||
if (pass+fail+skip > 0) {
|
|
||||||
percent_passed=(pass/(pass+fail+skip) * 100)
|
|
||||||
}
|
|
||||||
printf "Percent passed:\t%3.2f%", percent_passed
|
|
||||||
|
|
||||||
print "\n\nTests with results other than PASS that are expected:"
|
|
||||||
asort(expected_lines, sorted)
|
|
||||||
for (j in sorted)
|
|
||||||
print sorted[j]
|
|
||||||
|
|
||||||
print "\n\nTests with result of PASS that are unexpected:"
|
|
||||||
asort(unexpected_pass_lines, sorted)
|
|
||||||
for (j in sorted)
|
|
||||||
print sorted[j]
|
|
||||||
|
|
||||||
print "\n\nTests with results other than PASS that are unexpected:"
|
|
||||||
asort(unexpected_lines, sorted)
|
|
||||||
for (j in sorted)
|
|
||||||
print sorted[j]
|
|
||||||
}
|
|
77
.github/workflows/scripts/qemu-1-setup.sh
vendored
77
.github/workflows/scripts/qemu-1-setup.sh
vendored
@ -1,77 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
######################################################################
|
|
||||||
# 1) setup qemu instance on action runner
|
|
||||||
######################################################################
|
|
||||||
|
|
||||||
set -eu
|
|
||||||
|
|
||||||
# We've been seeing this script take over 15min to run. This may or
|
|
||||||
# may not be normal. Just to get a little more insight, print out
|
|
||||||
# a message to stdout with the top running process, and do this every
|
|
||||||
# 30 seconds. We can delete this watchdog later once we get a better
|
|
||||||
# handle on what the timeout value should be.
|
|
||||||
(while [ 1 ] ; do sleep 30 && echo "[watchdog: $(ps -eo cmd --sort=-pcpu | head -n 2 | tail -n 1)}')]"; done) &
|
|
||||||
|
|
||||||
# install needed packages
|
|
||||||
export DEBIAN_FRONTEND="noninteractive"
|
|
||||||
sudo apt-get -y update
|
|
||||||
sudo apt-get install -y axel cloud-image-utils daemonize guestfs-tools \
|
|
||||||
virt-manager linux-modules-extra-$(uname -r) zfsutils-linux
|
|
||||||
|
|
||||||
# generate ssh keys
|
|
||||||
rm -f ~/.ssh/id_ed25519
|
|
||||||
ssh-keygen -t ed25519 -f ~/.ssh/id_ed25519 -q -N ""
|
|
||||||
|
|
||||||
# not needed
|
|
||||||
sudo systemctl stop docker.socket
|
|
||||||
sudo systemctl stop multipathd.socket
|
|
||||||
|
|
||||||
# remove default swapfile and /mnt
|
|
||||||
sudo swapoff -a
|
|
||||||
sudo umount -l /mnt
|
|
||||||
DISK="/dev/disk/cloud/azure_resource-part1"
|
|
||||||
sudo sed -e "s|^$DISK.*||g" -i /etc/fstab
|
|
||||||
sudo wipefs -aq $DISK
|
|
||||||
sudo systemctl daemon-reload
|
|
||||||
|
|
||||||
sudo modprobe loop
|
|
||||||
sudo modprobe zfs
|
|
||||||
|
|
||||||
# partition the disk as needed
|
|
||||||
DISK="/dev/disk/cloud/azure_resource"
|
|
||||||
sudo sgdisk --zap-all $DISK
|
|
||||||
sudo sgdisk -p \
|
|
||||||
-n 1:0:+16G -c 1:"swap" \
|
|
||||||
-n 2:0:0 -c 2:"tests" \
|
|
||||||
$DISK
|
|
||||||
sync
|
|
||||||
sleep 1
|
|
||||||
|
|
||||||
# swap with same size as RAM (16GiB)
|
|
||||||
sudo mkswap $DISK-part1
|
|
||||||
sudo swapon $DISK-part1
|
|
||||||
|
|
||||||
# JBOD 2xdisk for OpenZFS storage (test vm's)
|
|
||||||
SSD1="$DISK-part2"
|
|
||||||
sudo fallocate -l 12G /test.ssd2
|
|
||||||
SSD2=$(sudo losetup -b 4096 -f /test.ssd2 --show)
|
|
||||||
|
|
||||||
# adjust zfs module parameter and create pool
|
|
||||||
exec 1>/dev/null
|
|
||||||
ARC_MIN=$((1024*1024*256))
|
|
||||||
ARC_MAX=$((1024*1024*512))
|
|
||||||
echo $ARC_MIN | sudo tee /sys/module/zfs/parameters/zfs_arc_min
|
|
||||||
echo $ARC_MAX | sudo tee /sys/module/zfs/parameters/zfs_arc_max
|
|
||||||
echo 1 | sudo tee /sys/module/zfs/parameters/zvol_use_blk_mq
|
|
||||||
sudo zpool create -f -o ashift=12 zpool $SSD1 $SSD2 -O relatime=off \
|
|
||||||
-O atime=off -O xattr=sa -O compression=lz4 -O sync=disabled \
|
|
||||||
-O redundant_metadata=none -O mountpoint=/mnt/tests
|
|
||||||
|
|
||||||
# no need for some scheduler
|
|
||||||
for i in /sys/block/s*/queue/scheduler; do
|
|
||||||
echo "none" | sudo tee $i
|
|
||||||
done
|
|
||||||
|
|
||||||
# Kill off our watchdog
|
|
||||||
kill $(jobs -p)
|
|
303
.github/workflows/scripts/qemu-2-start.sh
vendored
303
.github/workflows/scripts/qemu-2-start.sh
vendored
@ -1,303 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
######################################################################
|
|
||||||
# 2) start qemu with some operating system, init via cloud-init
|
|
||||||
######################################################################
|
|
||||||
|
|
||||||
set -eu
|
|
||||||
|
|
||||||
# short name used in zfs-qemu.yml
|
|
||||||
OS="$1"
|
|
||||||
|
|
||||||
# OS variant (virt-install --os-variant list)
|
|
||||||
OSv=$OS
|
|
||||||
|
|
||||||
# FreeBSD urls's
|
|
||||||
FREEBSD_REL="https://download.freebsd.org/releases/CI-IMAGES"
|
|
||||||
FREEBSD_SNAP="https://download.freebsd.org/snapshots/CI-IMAGES"
|
|
||||||
URLxz=""
|
|
||||||
|
|
||||||
# Ubuntu mirrors
|
|
||||||
UBMIRROR="https://cloud-images.ubuntu.com"
|
|
||||||
#UBMIRROR="https://mirrors.cloud.tencent.com/ubuntu-cloud-images"
|
|
||||||
#UBMIRROR="https://mirror.citrahost.com/ubuntu-cloud-images"
|
|
||||||
|
|
||||||
# default nic model for vm's
|
|
||||||
NIC="virtio"
|
|
||||||
|
|
||||||
# additional options for virt-install
|
|
||||||
OPTS[0]=""
|
|
||||||
OPTS[1]=""
|
|
||||||
|
|
||||||
case "$OS" in
|
|
||||||
almalinux8)
|
|
||||||
OSNAME="AlmaLinux 8"
|
|
||||||
URL="https://repo.almalinux.org/almalinux/8/cloud/x86_64/images/AlmaLinux-8-GenericCloud-latest.x86_64.qcow2"
|
|
||||||
;;
|
|
||||||
almalinux9)
|
|
||||||
OSNAME="AlmaLinux 9"
|
|
||||||
URL="https://repo.almalinux.org/almalinux/9/cloud/x86_64/images/AlmaLinux-9-GenericCloud-latest.x86_64.qcow2"
|
|
||||||
;;
|
|
||||||
almalinux10)
|
|
||||||
OSNAME="AlmaLinux 10"
|
|
||||||
OSv="almalinux9"
|
|
||||||
URL="https://repo.almalinux.org/almalinux/10/cloud/x86_64/images/AlmaLinux-10-GenericCloud-latest.x86_64.qcow2"
|
|
||||||
;;
|
|
||||||
archlinux)
|
|
||||||
OSNAME="Archlinux"
|
|
||||||
URL="https://geo.mirror.pkgbuild.com/images/latest/Arch-Linux-x86_64-cloudimg.qcow2"
|
|
||||||
;;
|
|
||||||
centos-stream10)
|
|
||||||
OSNAME="CentOS Stream 10"
|
|
||||||
# TODO: #16903 Overwrite OSv to stream9 for virt-install until it's added to osinfo
|
|
||||||
OSv="centos-stream9"
|
|
||||||
URL="https://cloud.centos.org/centos/10-stream/x86_64/images/CentOS-Stream-GenericCloud-10-latest.x86_64.qcow2"
|
|
||||||
;;
|
|
||||||
centos-stream9)
|
|
||||||
OSNAME="CentOS Stream 9"
|
|
||||||
URL="https://cloud.centos.org/centos/9-stream/x86_64/images/CentOS-Stream-GenericCloud-9-latest.x86_64.qcow2"
|
|
||||||
;;
|
|
||||||
debian11)
|
|
||||||
OSNAME="Debian 11"
|
|
||||||
URL="https://cloud.debian.org/images/cloud/bullseye/latest/debian-11-generic-amd64.qcow2"
|
|
||||||
;;
|
|
||||||
debian12)
|
|
||||||
OSNAME="Debian 12"
|
|
||||||
URL="https://cloud.debian.org/images/cloud/bookworm/latest/debian-12-generic-amd64.qcow2"
|
|
||||||
;;
|
|
||||||
debian13)
|
|
||||||
OSNAME="Debian 13"
|
|
||||||
# TODO: Overwrite OSv to debian13 for virt-install until it's added to osinfo
|
|
||||||
OSv="debian12"
|
|
||||||
URL="https://cloud.debian.org/images/cloud/trixie/latest/debian-13-generic-amd64.qcow2"
|
|
||||||
OPTS[0]="--boot"
|
|
||||||
OPTS[1]="uefi=on"
|
|
||||||
;;
|
|
||||||
fedora41)
|
|
||||||
OSNAME="Fedora 41"
|
|
||||||
OSv="fedora-unknown"
|
|
||||||
URL="https://download.fedoraproject.org/pub/fedora/linux/releases/41/Cloud/x86_64/images/Fedora-Cloud-Base-Generic-41-1.4.x86_64.qcow2"
|
|
||||||
;;
|
|
||||||
fedora42)
|
|
||||||
OSNAME="Fedora 42"
|
|
||||||
OSv="fedora-unknown"
|
|
||||||
URL="https://download.fedoraproject.org/pub/fedora/linux/releases/42/Cloud/x86_64/images/Fedora-Cloud-Base-Generic-42-1.1.x86_64.qcow2"
|
|
||||||
;;
|
|
||||||
freebsd13-5r)
|
|
||||||
FreeBSD="13.5-RELEASE"
|
|
||||||
OSNAME="FreeBSD $FreeBSD"
|
|
||||||
OSv="freebsd13.0"
|
|
||||||
URLxz="$FREEBSD_REL/$FreeBSD/amd64/Latest/FreeBSD-$FreeBSD-amd64-BASIC-CI.raw.xz"
|
|
||||||
KSRC="$FREEBSD_REL/../amd64/$FreeBSD/src.txz"
|
|
||||||
NIC="rtl8139"
|
|
||||||
;;
|
|
||||||
freebsd14-2r)
|
|
||||||
FreeBSD="14.2-RELEASE"
|
|
||||||
OSNAME="FreeBSD $FreeBSD"
|
|
||||||
OSv="freebsd14.0"
|
|
||||||
KSRC="$FREEBSD_REL/../amd64/$FreeBSD/src.txz"
|
|
||||||
URLxz="$FREEBSD_REL/$FreeBSD/amd64/Latest/FreeBSD-$FreeBSD-amd64-BASIC-CI.raw.xz"
|
|
||||||
;;
|
|
||||||
freebsd14-3r)
|
|
||||||
FreeBSD="14.3-RELEASE"
|
|
||||||
OSNAME="FreeBSD $FreeBSD"
|
|
||||||
OSv="freebsd14.0"
|
|
||||||
URLxz="$FREEBSD_REL/$FreeBSD/amd64/Latest/FreeBSD-$FreeBSD-amd64-BASIC-CI.raw.xz"
|
|
||||||
KSRC="$FREEBSD_REL/../amd64/$FreeBSD/src.txz"
|
|
||||||
;;
|
|
||||||
freebsd13-5s)
|
|
||||||
FreeBSD="13.5-STABLE"
|
|
||||||
OSNAME="FreeBSD $FreeBSD"
|
|
||||||
OSv="freebsd13.0"
|
|
||||||
URLxz="$FREEBSD_SNAP/$FreeBSD/amd64/Latest/FreeBSD-$FreeBSD-amd64-BASIC-CI.raw.xz"
|
|
||||||
KSRC="$FREEBSD_SNAP/../amd64/$FreeBSD/src.txz"
|
|
||||||
NIC="rtl8139"
|
|
||||||
;;
|
|
||||||
freebsd14-3s)
|
|
||||||
FreeBSD="14.3-STABLE"
|
|
||||||
OSNAME="FreeBSD $FreeBSD"
|
|
||||||
OSv="freebsd14.0"
|
|
||||||
URLxz="$FREEBSD_SNAP/$FreeBSD/amd64/Latest/FreeBSD-$FreeBSD-amd64-BASIC-CI-ufs.raw.xz"
|
|
||||||
KSRC="$FREEBSD_SNAP/../amd64/$FreeBSD/src.txz"
|
|
||||||
;;
|
|
||||||
freebsd15-0c)
|
|
||||||
FreeBSD="15.0-ALPHA3"
|
|
||||||
OSNAME="FreeBSD $FreeBSD"
|
|
||||||
OSv="freebsd14.0"
|
|
||||||
URLxz="$FREEBSD_SNAP/$FreeBSD/amd64/Latest/FreeBSD-$FreeBSD-amd64-BASIC-CI-ufs.raw.xz"
|
|
||||||
KSRC="$FREEBSD_SNAP/../amd64/$FreeBSD/src.txz"
|
|
||||||
;;
|
|
||||||
tumbleweed)
|
|
||||||
OSNAME="openSUSE Tumbleweed"
|
|
||||||
OSv="opensusetumbleweed"
|
|
||||||
MIRROR="http://opensuse-mirror-gce-us.susecloud.net"
|
|
||||||
URL="$MIRROR/tumbleweed/appliances/openSUSE-MicroOS.x86_64-OpenStack-Cloud.qcow2"
|
|
||||||
;;
|
|
||||||
ubuntu22)
|
|
||||||
OSNAME="Ubuntu 22.04"
|
|
||||||
OSv="ubuntu22.04"
|
|
||||||
URL="$UBMIRROR/jammy/current/jammy-server-cloudimg-amd64.img"
|
|
||||||
;;
|
|
||||||
ubuntu24)
|
|
||||||
OSNAME="Ubuntu 24.04"
|
|
||||||
OSv="ubuntu24.04"
|
|
||||||
URL="$UBMIRROR/noble/current/noble-server-cloudimg-amd64.img"
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
echo "Wrong value for OS variable!"
|
|
||||||
exit 111
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
|
|
||||||
# environment file
|
|
||||||
ENV="/var/tmp/env.txt"
|
|
||||||
echo "ENV=$ENV" >> $ENV
|
|
||||||
|
|
||||||
# result path
|
|
||||||
echo 'RESPATH="/var/tmp/test_results"' >> $ENV
|
|
||||||
|
|
||||||
# FreeBSD 13 has problems with: e1000 and virtio
|
|
||||||
echo "NIC=$NIC" >> $ENV
|
|
||||||
|
|
||||||
# freebsd15 -> used in zfs-qemu.yml
|
|
||||||
echo "OS=$OS" >> $ENV
|
|
||||||
|
|
||||||
# freebsd14.0 -> used for virt-install
|
|
||||||
echo "OSv=\"$OSv\"" >> $ENV
|
|
||||||
|
|
||||||
# FreeBSD 15 (Current) -> used for summary
|
|
||||||
echo "OSNAME=\"$OSNAME\"" >> $ENV
|
|
||||||
|
|
||||||
# default vm count for testings
|
|
||||||
VMs=2
|
|
||||||
echo "VMs=\"$VMs\"" >> $ENV
|
|
||||||
|
|
||||||
# default cpu count for testing vm's
|
|
||||||
CPU=2
|
|
||||||
echo "CPU=\"$CPU\"" >> $ENV
|
|
||||||
|
|
||||||
sudo mkdir -p "/mnt/tests"
|
|
||||||
sudo chown -R $(whoami) /mnt/tests
|
|
||||||
|
|
||||||
DISK="/dev/zvol/zpool/openzfs"
|
|
||||||
sudo zfs create -ps -b 64k -V 80g zpool/openzfs
|
|
||||||
while true; do test -b $DISK && break; sleep 1; done
|
|
||||||
|
|
||||||
# we are downloading via axel, curl and wget are mostly slower and
|
|
||||||
# require more return value checking
|
|
||||||
IMG="/mnt/tests/cloud-image"
|
|
||||||
if [ ! -z "$URLxz" ]; then
|
|
||||||
echo "Loading $URLxz ..."
|
|
||||||
time axel -q -o "$IMG" "$URLxz"
|
|
||||||
echo "Loading $KSRC ..."
|
|
||||||
time axel -q -o ~/src.txz $KSRC
|
|
||||||
else
|
|
||||||
echo "Loading $URL ..."
|
|
||||||
time axel -q -o "$IMG" "$URL"
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "Importing VM image to zvol..."
|
|
||||||
if [ ! -z "$URLxz" ]; then
|
|
||||||
xzcat -T0 $IMG | sudo dd of=$DISK bs=4M
|
|
||||||
else
|
|
||||||
sudo qemu-img dd -f qcow2 -O raw if=$IMG of=$DISK bs=4M
|
|
||||||
fi
|
|
||||||
rm -f $IMG
|
|
||||||
|
|
||||||
PUBKEY=$(cat ~/.ssh/id_ed25519.pub)
|
|
||||||
if [ ${OS:0:7} != "freebsd" ]; then
|
|
||||||
cat <<EOF > /tmp/user-data
|
|
||||||
#cloud-config
|
|
||||||
|
|
||||||
hostname: $OS
|
|
||||||
|
|
||||||
users:
|
|
||||||
- name: root
|
|
||||||
shell: $BASH
|
|
||||||
- name: zfs
|
|
||||||
sudo: ALL=(ALL) NOPASSWD:ALL
|
|
||||||
shell: $BASH
|
|
||||||
ssh_authorized_keys:
|
|
||||||
- $PUBKEY
|
|
||||||
|
|
||||||
growpart:
|
|
||||||
mode: auto
|
|
||||||
devices: ['/']
|
|
||||||
ignore_growroot_disabled: false
|
|
||||||
EOF
|
|
||||||
else
|
|
||||||
cat <<EOF > /tmp/user-data
|
|
||||||
#cloud-config
|
|
||||||
|
|
||||||
hostname: $OS
|
|
||||||
|
|
||||||
# minimized config without sudo for nuageinit of FreeBSD
|
|
||||||
growpart:
|
|
||||||
mode: auto
|
|
||||||
devices: ['/']
|
|
||||||
ignore_growroot_disabled: false
|
|
||||||
EOF
|
|
||||||
fi
|
|
||||||
|
|
||||||
sudo virsh net-update default add ip-dhcp-host \
|
|
||||||
"<host mac='52:54:00:83:79:00' ip='192.168.122.10'/>" --live --config
|
|
||||||
|
|
||||||
sudo virt-install \
|
|
||||||
--os-variant $OSv \
|
|
||||||
--name "openzfs" \
|
|
||||||
--cpu host-passthrough \
|
|
||||||
--virt-type=kvm --hvm \
|
|
||||||
--vcpus=4,sockets=1 \
|
|
||||||
--memory $((1024*12)) \
|
|
||||||
--memballoon model=virtio \
|
|
||||||
--graphics none \
|
|
||||||
--network bridge=virbr0,model=$NIC,mac='52:54:00:83:79:00' \
|
|
||||||
--cloud-init user-data=/tmp/user-data \
|
|
||||||
--disk $DISK,bus=virtio,cache=none,format=raw,driver.discard=unmap \
|
|
||||||
--import --noautoconsole ${OPTS[0]} ${OPTS[1]} >/dev/null
|
|
||||||
|
|
||||||
# Give the VMs hostnames so we don't have to refer to them with
|
|
||||||
# hardcoded IP addresses.
|
|
||||||
#
|
|
||||||
# vm0: Initial VM we install dependencies and build ZFS on.
|
|
||||||
# vm1..2 Testing VMs
|
|
||||||
for ((i=0; i<=VMs; i++)); do
|
|
||||||
echo "192.168.122.1$i vm$i" | sudo tee -a /etc/hosts
|
|
||||||
done
|
|
||||||
|
|
||||||
# in case the directory isn't there already
|
|
||||||
mkdir -p $HOME/.ssh
|
|
||||||
|
|
||||||
cat <<EOF >> $HOME/.ssh/config
|
|
||||||
# no questions please
|
|
||||||
StrictHostKeyChecking no
|
|
||||||
|
|
||||||
# small timeout, used in while loops later
|
|
||||||
ConnectTimeout 1
|
|
||||||
EOF
|
|
||||||
|
|
||||||
if [ ${OS:0:7} != "freebsd" ]; then
|
|
||||||
# enable KSM on Linux
|
|
||||||
sudo virsh dommemstat --domain "openzfs" --period 5
|
|
||||||
sudo virsh node-memory-tune 100 50 1
|
|
||||||
echo 1 | sudo tee /sys/kernel/mm/ksm/run > /dev/null
|
|
||||||
else
|
|
||||||
# on FreeBSD we need some more init stuff, because of nuageinit
|
|
||||||
BASH="/usr/local/bin/bash"
|
|
||||||
while pidof /usr/bin/qemu-system-x86_64 >/dev/null; do
|
|
||||||
ssh 2>/dev/null root@vm0 "uname -a" && break
|
|
||||||
done
|
|
||||||
ssh root@vm0 "pkg install -y bash ca_root_nss git qemu-guest-agent python3 py311-cloud-init"
|
|
||||||
ssh root@vm0 "chsh -s $BASH root"
|
|
||||||
ssh root@vm0 'sysrc qemu_guest_agent_enable="YES"'
|
|
||||||
ssh root@vm0 'sysrc cloudinit_enable="YES"'
|
|
||||||
ssh root@vm0 "pw add user zfs -w no -s $BASH"
|
|
||||||
ssh root@vm0 'mkdir -p ~zfs/.ssh'
|
|
||||||
ssh root@vm0 'echo "zfs ALL=(ALL:ALL) NOPASSWD: ALL" >> /usr/local/etc/sudoers'
|
|
||||||
ssh root@vm0 'echo "PubkeyAuthentication yes" >> /etc/ssh/sshd_config'
|
|
||||||
scp ~/.ssh/id_ed25519.pub "root@vm0:~zfs/.ssh/authorized_keys"
|
|
||||||
ssh root@vm0 'chown -R zfs ~zfs'
|
|
||||||
ssh root@vm0 'service sshd restart'
|
|
||||||
scp ~/src.txz "root@vm0:/tmp/src.txz"
|
|
||||||
ssh root@vm0 'tar -C / -zxf /tmp/src.txz'
|
|
||||||
fi
|
|
262
.github/workflows/scripts/qemu-3-deps-vm.sh
vendored
262
.github/workflows/scripts/qemu-3-deps-vm.sh
vendored
@ -1,262 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
######################################################################
|
|
||||||
# 3) install dependencies for compiling and loading
|
|
||||||
#
|
|
||||||
# $1: OS name (like 'fedora41')
|
|
||||||
# $2: (optional) Experimental Fedora kernel version, like "6.14" to
|
|
||||||
# install instead of Fedora defaults.
|
|
||||||
######################################################################
|
|
||||||
|
|
||||||
set -eu
|
|
||||||
|
|
||||||
function archlinux() {
|
|
||||||
echo "##[group]Running pacman -Syu"
|
|
||||||
sudo btrfs filesystem resize max /
|
|
||||||
sudo pacman -Syu --noconfirm
|
|
||||||
echo "##[endgroup]"
|
|
||||||
|
|
||||||
echo "##[group]Install Development Tools"
|
|
||||||
sudo pacman -Sy --noconfirm base-devel bc cpio cryptsetup dhclient dkms \
|
|
||||||
fakeroot fio gdb inetutils jq less linux linux-headers lsscsi nfs-utils \
|
|
||||||
parted pax perf python-packaging python-setuptools qemu-guest-agent ksh \
|
|
||||||
samba strace sysstat rng-tools rsync wget xxhash
|
|
||||||
echo "##[endgroup]"
|
|
||||||
}
|
|
||||||
|
|
||||||
function debian() {
|
|
||||||
export DEBIAN_FRONTEND="noninteractive"
|
|
||||||
|
|
||||||
echo "##[group]Running apt-get update+upgrade"
|
|
||||||
sudo sed -i '/[[:alpha:]]-backports/d' /etc/apt/sources.list
|
|
||||||
sudo apt-get update -y
|
|
||||||
sudo apt-get upgrade -y
|
|
||||||
echo "##[endgroup]"
|
|
||||||
|
|
||||||
echo "##[group]Install Development Tools"
|
|
||||||
sudo apt-get install -y \
|
|
||||||
acl alien attr autoconf bc cpio cryptsetup curl dbench dh-python dkms \
|
|
||||||
fakeroot fio gdb gdebi git ksh lcov isc-dhcp-client jq libacl1-dev \
|
|
||||||
libaio-dev libattr1-dev libblkid-dev libcurl4-openssl-dev libdevmapper-dev \
|
|
||||||
libelf-dev libffi-dev libmount-dev libpam0g-dev libselinux-dev libssl-dev \
|
|
||||||
libtool libtool-bin libudev-dev libunwind-dev linux-headers-$(uname -r) \
|
|
||||||
lsscsi nfs-kernel-server pamtester parted python3 python3-all-dev \
|
|
||||||
python3-cffi python3-dev python3-distlib python3-packaging libtirpc-dev \
|
|
||||||
python3-setuptools python3-sphinx qemu-guest-agent rng-tools rpm2cpio \
|
|
||||||
rsync samba strace sysstat uuid-dev watchdog wget xfslibs-dev xxhash \
|
|
||||||
zlib1g-dev
|
|
||||||
echo "##[endgroup]"
|
|
||||||
}
|
|
||||||
|
|
||||||
function freebsd() {
|
|
||||||
export ASSUME_ALWAYS_YES="YES"
|
|
||||||
|
|
||||||
echo "##[group]Install Development Tools"
|
|
||||||
sudo pkg install -y autoconf automake autotools base64 checkbashisms fio \
|
|
||||||
gdb gettext gettext-runtime git gmake gsed jq ksh lcov libtool lscpu \
|
|
||||||
pkgconf python python3 pamtester pamtester qemu-guest-agent rsync xxhash
|
|
||||||
sudo pkg install -xy \
|
|
||||||
'^samba4[[:digit:]]+$' \
|
|
||||||
'^py3[[:digit:]]+-cffi$' \
|
|
||||||
'^py3[[:digit:]]+-sysctl$' \
|
|
||||||
'^py3[[:digit:]]+-setuptools$' \
|
|
||||||
'^py3[[:digit:]]+-packaging$'
|
|
||||||
echo "##[endgroup]"
|
|
||||||
}
|
|
||||||
|
|
||||||
# common packages for: almalinux, centos, redhat
|
|
||||||
function rhel() {
|
|
||||||
echo "##[group]Running dnf update"
|
|
||||||
echo "max_parallel_downloads=10" | sudo -E tee -a /etc/dnf/dnf.conf
|
|
||||||
sudo dnf clean all
|
|
||||||
sudo dnf update -y --setopt=fastestmirror=1 --refresh
|
|
||||||
echo "##[endgroup]"
|
|
||||||
|
|
||||||
echo "##[group]Install Development Tools"
|
|
||||||
|
|
||||||
# Alma wants "Development Tools", Fedora 41 wants "development-tools"
|
|
||||||
if ! sudo dnf group install -y "Development Tools" ; then
|
|
||||||
echo "Trying 'development-tools' instead of 'Development Tools'"
|
|
||||||
sudo dnf group install -y development-tools
|
|
||||||
fi
|
|
||||||
|
|
||||||
sudo dnf install -y \
|
|
||||||
acl attr bc bzip2 cryptsetup curl dbench dkms elfutils-libelf-devel fio \
|
|
||||||
gdb git jq kernel-rpm-macros ksh libacl-devel libaio-devel \
|
|
||||||
libargon2-devel libattr-devel libblkid-devel libcurl-devel libffi-devel \
|
|
||||||
ncompress libselinux-devel libtirpc-devel libtool libudev-devel \
|
|
||||||
libuuid-devel lsscsi mdadm nfs-utils openssl-devel pam-devel pamtester \
|
|
||||||
parted perf python3 python3-cffi python3-devel python3-packaging \
|
|
||||||
kernel-devel python3-setuptools qemu-guest-agent rng-tools rpcgen \
|
|
||||||
rpm-build rsync samba strace sysstat systemd watchdog wget xfsprogs-devel \
|
|
||||||
xxhash zlib-devel
|
|
||||||
echo "##[endgroup]"
|
|
||||||
}
|
|
||||||
|
|
||||||
function tumbleweed() {
|
|
||||||
echo "##[group]Running zypper is TODO!"
|
|
||||||
sleep 23456
|
|
||||||
echo "##[endgroup]"
|
|
||||||
}
|
|
||||||
|
|
||||||
# $1: Kernel version to install (like '6.14rc7')
|
|
||||||
function install_fedora_experimental_kernel {
|
|
||||||
|
|
||||||
our_version="$1"
|
|
||||||
sudo dnf -y copr enable @kernel-vanilla/stable
|
|
||||||
sudo dnf -y copr enable @kernel-vanilla/mainline
|
|
||||||
all="$(sudo dnf list --showduplicates kernel-* python3-perf* perf* bpftool*)"
|
|
||||||
echo "Available versions:"
|
|
||||||
echo "$all"
|
|
||||||
|
|
||||||
# You can have a bunch of minor variants of the version we want '6.14'.
|
|
||||||
# Pick the newest variant (sorted by version number).
|
|
||||||
specific_version=$(echo "$all" | grep $our_version | awk '{print $2}' | sort -V | tail -n 1)
|
|
||||||
list="$(echo "$all" | grep $specific_version | grep -Ev 'kernel-rt|kernel-selftests|kernel-debuginfo' | sed 's/.x86_64//g' | awk '{print $1"-"$2}')"
|
|
||||||
sudo dnf install -y $list
|
|
||||||
sudo dnf -y copr disable @kernel-vanilla/stable
|
|
||||||
sudo dnf -y copr disable @kernel-vanilla/mainline
|
|
||||||
}
|
|
||||||
|
|
||||||
# Install dependencies
|
|
||||||
case "$1" in
|
|
||||||
almalinux8)
|
|
||||||
echo "##[group]Enable epel and powertools repositories"
|
|
||||||
sudo dnf config-manager -y --set-enabled powertools
|
|
||||||
sudo dnf install -y epel-release
|
|
||||||
echo "##[endgroup]"
|
|
||||||
rhel
|
|
||||||
echo "##[group]Install kernel-abi-whitelists"
|
|
||||||
sudo dnf install -y kernel-abi-whitelists
|
|
||||||
echo "##[endgroup]"
|
|
||||||
;;
|
|
||||||
almalinux9|almalinux10|centos-stream9|centos-stream10)
|
|
||||||
echo "##[group]Enable epel and crb repositories"
|
|
||||||
sudo dnf config-manager -y --set-enabled crb
|
|
||||||
sudo dnf install -y epel-release
|
|
||||||
echo "##[endgroup]"
|
|
||||||
rhel
|
|
||||||
echo "##[group]Install kernel-abi-stablelists"
|
|
||||||
sudo dnf install -y kernel-abi-stablelists
|
|
||||||
echo "##[endgroup]"
|
|
||||||
;;
|
|
||||||
archlinux)
|
|
||||||
archlinux
|
|
||||||
;;
|
|
||||||
debian*)
|
|
||||||
echo 'debconf debconf/frontend select Noninteractive' | sudo debconf-set-selections
|
|
||||||
debian
|
|
||||||
echo "##[group]Install Debian specific"
|
|
||||||
sudo apt-get install -yq linux-perf dh-sequence-dkms
|
|
||||||
echo "##[endgroup]"
|
|
||||||
;;
|
|
||||||
fedora*)
|
|
||||||
rhel
|
|
||||||
sudo dnf install -y libunwind-devel
|
|
||||||
|
|
||||||
# Fedora 42+ moves /usr/bin/script from 'util-linux' to 'util-linux-script'
|
|
||||||
sudo dnf install -y util-linux-script || true
|
|
||||||
|
|
||||||
# Optional: Install an experimental kernel ($2 = kernel version)
|
|
||||||
if [ -n "${2:-}" ] ; then
|
|
||||||
install_fedora_experimental_kernel "$2"
|
|
||||||
fi
|
|
||||||
;;
|
|
||||||
freebsd*)
|
|
||||||
freebsd
|
|
||||||
;;
|
|
||||||
tumbleweed)
|
|
||||||
tumbleweed
|
|
||||||
;;
|
|
||||||
ubuntu*)
|
|
||||||
debian
|
|
||||||
echo "##[group]Install Ubuntu specific"
|
|
||||||
sudo apt-get install -yq linux-tools-common libtirpc-dev \
|
|
||||||
linux-modules-extra-$(uname -r)
|
|
||||||
sudo apt-get install -yq dh-sequence-dkms
|
|
||||||
echo "##[endgroup]"
|
|
||||||
echo "##[group]Delete Ubuntu OpenZFS modules"
|
|
||||||
for i in $(find /lib/modules -name zfs -type d); do sudo rm -rvf $i; done
|
|
||||||
echo "##[endgroup]"
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
|
|
||||||
# This script is used for checkstyle + zloop deps also.
|
|
||||||
# Install only the needed packages and exit - when used this way.
|
|
||||||
test -z "${ONLY_DEPS:-}" || exit 0
|
|
||||||
|
|
||||||
# Start services
|
|
||||||
echo "##[group]Enable services"
|
|
||||||
case "$1" in
|
|
||||||
freebsd*)
|
|
||||||
# add virtio things
|
|
||||||
echo 'virtio_load="YES"' | sudo -E tee -a /boot/loader.conf
|
|
||||||
for i in balloon blk console random scsi; do
|
|
||||||
echo "virtio_${i}_load=\"YES\"" | sudo -E tee -a /boot/loader.conf
|
|
||||||
done
|
|
||||||
echo "fdescfs /dev/fd fdescfs rw 0 0" | sudo -E tee -a /etc/fstab
|
|
||||||
sudo -E mount /dev/fd
|
|
||||||
sudo -E touch /etc/zfs/exports
|
|
||||||
sudo -E sysrc mountd_flags="/etc/zfs/exports"
|
|
||||||
echo '[global]' | sudo -E tee /usr/local/etc/smb4.conf >/dev/null
|
|
||||||
sudo -E service nfsd enable
|
|
||||||
sudo -E service qemu-guest-agent enable
|
|
||||||
sudo -E service samba_server enable
|
|
||||||
;;
|
|
||||||
debian*|ubuntu*)
|
|
||||||
sudo -E systemctl enable nfs-kernel-server
|
|
||||||
sudo -E systemctl enable qemu-guest-agent
|
|
||||||
sudo -E systemctl enable smbd
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
# All other linux distros
|
|
||||||
sudo -E systemctl enable nfs-server
|
|
||||||
sudo -E systemctl enable qemu-guest-agent
|
|
||||||
sudo -E systemctl enable smb
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
echo "##[endgroup]"
|
|
||||||
|
|
||||||
# Setup Kernel cmdline
|
|
||||||
CMDLINE="console=tty0 console=ttyS0,115200n8"
|
|
||||||
CMDLINE="$CMDLINE selinux=0"
|
|
||||||
CMDLINE="$CMDLINE random.trust_cpu=on"
|
|
||||||
CMDLINE="$CMDLINE no_timer_check"
|
|
||||||
case "$1" in
|
|
||||||
almalinux*|centos*|fedora*)
|
|
||||||
GRUB_CFG="/boot/grub2/grub.cfg"
|
|
||||||
GRUB_MKCONFIG="grub2-mkconfig"
|
|
||||||
CMDLINE="$CMDLINE biosdevname=0 net.ifnames=0"
|
|
||||||
echo 'GRUB_SERIAL_COMMAND="serial --speed=115200"' \
|
|
||||||
| sudo tee -a /etc/default/grub >/dev/null
|
|
||||||
;;
|
|
||||||
ubuntu24)
|
|
||||||
GRUB_CFG="/boot/grub/grub.cfg"
|
|
||||||
GRUB_MKCONFIG="grub-mkconfig"
|
|
||||||
echo 'GRUB_DISABLE_OS_PROBER="false"' \
|
|
||||||
| sudo tee -a /etc/default/grub >/dev/null
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
GRUB_CFG="/boot/grub/grub.cfg"
|
|
||||||
GRUB_MKCONFIG="grub-mkconfig"
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
|
|
||||||
case "$1" in
|
|
||||||
archlinux|freebsd*)
|
|
||||||
true
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
echo "##[group]Edit kernel cmdline"
|
|
||||||
sudo sed -i -e '/^GRUB_CMDLINE_LINUX/d' /etc/default/grub || true
|
|
||||||
echo "GRUB_CMDLINE_LINUX=\"$CMDLINE\"" \
|
|
||||||
| sudo tee -a /etc/default/grub >/dev/null
|
|
||||||
sudo $GRUB_MKCONFIG -o $GRUB_CFG
|
|
||||||
echo "##[endgroup]"
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
|
|
||||||
# reset cloud-init configuration and poweroff
|
|
||||||
sudo cloud-init clean --logs
|
|
||||||
sleep 2 && sudo poweroff &
|
|
||||||
exit 0
|
|
28
.github/workflows/scripts/qemu-3-deps.sh
vendored
28
.github/workflows/scripts/qemu-3-deps.sh
vendored
@ -1,28 +0,0 @@
|
|||||||
######################################################################
|
|
||||||
# 3) Wait for VM to boot from previous step and launch dependencies
|
|
||||||
# script on it.
|
|
||||||
#
|
|
||||||
# $1: OS name (like 'fedora41')
|
|
||||||
# $2: (optional) Experimental kernel version to install on fedora,
|
|
||||||
# like "6.14".
|
|
||||||
######################################################################
|
|
||||||
|
|
||||||
.github/workflows/scripts/qemu-wait-for-vm.sh vm0
|
|
||||||
|
|
||||||
# SPECIAL CASE:
|
|
||||||
#
|
|
||||||
# If the user passed in an experimental kernel version to test on Fedora,
|
|
||||||
# we need to update the kernel version in zfs's META file to allow the
|
|
||||||
# build to happen. We update our local copy of META here, since we know
|
|
||||||
# it will be rsync'd up in the next step.
|
|
||||||
if [ -n "${2:-}" ] ; then
|
|
||||||
sed -i -E 's/Linux-Maximum: .+/Linux-Maximum: 99.99/g' META
|
|
||||||
fi
|
|
||||||
|
|
||||||
scp .github/workflows/scripts/qemu-3-deps-vm.sh zfs@vm0:qemu-3-deps-vm.sh
|
|
||||||
PID=`pidof /usr/bin/qemu-system-x86_64`
|
|
||||||
ssh zfs@vm0 '$HOME/qemu-3-deps-vm.sh' "$@"
|
|
||||||
# wait for poweroff to succeed
|
|
||||||
tail --pid=$PID -f /dev/null
|
|
||||||
sleep 5 # avoid this: "error: Domain is already active"
|
|
||||||
rm -f $HOME/.ssh/known_hosts
|
|
396
.github/workflows/scripts/qemu-4-build-vm.sh
vendored
396
.github/workflows/scripts/qemu-4-build-vm.sh
vendored
@ -1,396 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
######################################################################
|
|
||||||
# 4) configure and build openzfs modules. This is run on the VMs.
|
|
||||||
#
|
|
||||||
# Usage:
|
|
||||||
#
|
|
||||||
# qemu-4-build-vm.sh OS [--enable-debug][--dkms][--patch-level NUM]
|
|
||||||
# [--poweroff][--release][--repo][--tarball]
|
|
||||||
#
|
|
||||||
# OS: OS name like 'fedora41'
|
|
||||||
# --enable-debug: Build RPMs with '--enable-debug' (for testing)
|
|
||||||
# --dkms: Build DKMS RPMs as well
|
|
||||||
# --patch-level NUM: Use a custom patch level number for packages.
|
|
||||||
# --poweroff: Power-off the VM after building
|
|
||||||
# --release Build zfs-release*.rpm as well
|
|
||||||
# --repo After building everything, copy RPMs into /tmp/repo
|
|
||||||
# in the ZFS RPM repository file structure. Also
|
|
||||||
# copy tarballs if they were built.
|
|
||||||
# --tarball: Also build a tarball of ZFS source
|
|
||||||
######################################################################
|
|
||||||
|
|
||||||
ENABLE_DEBUG=""
|
|
||||||
DKMS=""
|
|
||||||
PATCH_LEVEL=""
|
|
||||||
POWEROFF=""
|
|
||||||
RELEASE=""
|
|
||||||
REPO=""
|
|
||||||
TARBALL=""
|
|
||||||
while [[ $# -gt 0 ]]; do
|
|
||||||
case $1 in
|
|
||||||
--enable-debug)
|
|
||||||
ENABLE_DEBUG=1
|
|
||||||
shift
|
|
||||||
;;
|
|
||||||
--dkms)
|
|
||||||
DKMS=1
|
|
||||||
shift
|
|
||||||
;;
|
|
||||||
--patch-level)
|
|
||||||
PATCH_LEVEL=$2
|
|
||||||
shift
|
|
||||||
shift
|
|
||||||
;;
|
|
||||||
--poweroff)
|
|
||||||
POWEROFF=1
|
|
||||||
shift
|
|
||||||
;;
|
|
||||||
--release)
|
|
||||||
RELEASE=1
|
|
||||||
shift
|
|
||||||
;;
|
|
||||||
--repo)
|
|
||||||
REPO=1
|
|
||||||
shift
|
|
||||||
;;
|
|
||||||
--tarball)
|
|
||||||
TARBALL=1
|
|
||||||
shift
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
OS=$1
|
|
||||||
shift
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
done
|
|
||||||
|
|
||||||
set -eu
|
|
||||||
|
|
||||||
function run() {
|
|
||||||
LOG="/var/tmp/build-stderr.txt"
|
|
||||||
echo "****************************************************"
|
|
||||||
echo "$(date) ($*)"
|
|
||||||
echo "****************************************************"
|
|
||||||
($@ || echo $? > /tmp/rv) 3>&1 1>&2 2>&3 | stdbuf -eL -oL tee -a $LOG
|
|
||||||
if [ -f /tmp/rv ]; then
|
|
||||||
RV=$(cat /tmp/rv)
|
|
||||||
echo "****************************************************"
|
|
||||||
echo "exit with value=$RV ($*)"
|
|
||||||
echo "****************************************************"
|
|
||||||
echo 1 > /var/tmp/build-exitcode.txt
|
|
||||||
exit $RV
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
# Look at the RPMs in the current directory and copy/move them to
|
|
||||||
# /tmp/repo, using the directory structure we use for the ZFS RPM repos.
|
|
||||||
#
|
|
||||||
# For example:
|
|
||||||
# /tmp/repo/epel-testing/9.5
|
|
||||||
# /tmp/repo/epel-testing/9.5/SRPMS
|
|
||||||
# /tmp/repo/epel-testing/9.5/SRPMS/zfs-2.3.99-1.el9.src.rpm
|
|
||||||
# /tmp/repo/epel-testing/9.5/SRPMS/zfs-kmod-2.3.99-1.el9.src.rpm
|
|
||||||
# /tmp/repo/epel-testing/9.5/kmod
|
|
||||||
# /tmp/repo/epel-testing/9.5/kmod/x86_64
|
|
||||||
# /tmp/repo/epel-testing/9.5/kmod/x86_64/debug
|
|
||||||
# /tmp/repo/epel-testing/9.5/kmod/x86_64/debug/kmod-zfs-debuginfo-2.3.99-1.el9.x86_64.rpm
|
|
||||||
# /tmp/repo/epel-testing/9.5/kmod/x86_64/debug/libnvpair3-debuginfo-2.3.99-1.el9.x86_64.rpm
|
|
||||||
# /tmp/repo/epel-testing/9.5/kmod/x86_64/debug/libuutil3-debuginfo-2.3.99-1.el9.x86_64.rpm
|
|
||||||
# ...
|
|
||||||
function copy_rpms_to_repo {
|
|
||||||
# Pick a RPM to query. It doesn't matter which one - we just want to extract
|
|
||||||
# the 'Build Host' value from it.
|
|
||||||
rpm=$(ls zfs-*.rpm | head -n 1)
|
|
||||||
|
|
||||||
# Get zfs version '2.2.99'
|
|
||||||
zfs_ver=$(rpm -qpi $rpm | awk '/Version/{print $3}')
|
|
||||||
|
|
||||||
# Get "2.1" or "2.2"
|
|
||||||
zfs_major=$(echo $zfs_ver | grep -Eo [0-9]+\.[0-9]+)
|
|
||||||
|
|
||||||
# Get 'almalinux9.5' or 'fedora41' type string
|
|
||||||
build_host=$(rpm -qpi $rpm | awk '/Build Host/{print $4}')
|
|
||||||
|
|
||||||
# Get '9.5' or '41' OS version
|
|
||||||
os_ver=$(echo $build_host | grep -Eo '[0-9\.]+$')
|
|
||||||
|
|
||||||
# Our ZFS version and OS name will determine which repo the RPMs
|
|
||||||
# will go in (regular or testing). Fedora always gets the newest
|
|
||||||
# releases, and Alma gets the older releases.
|
|
||||||
case $build_host in
|
|
||||||
almalinux*)
|
|
||||||
case $zfs_major in
|
|
||||||
2.2)
|
|
||||||
d="epel"
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
d="epel-testing"
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
;;
|
|
||||||
fedora*)
|
|
||||||
d="fedora"
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
|
|
||||||
prefix=/tmp/repo
|
|
||||||
dst="$prefix/$d/$os_ver"
|
|
||||||
|
|
||||||
# Special case: move zfs-release*.rpm out of the way first (if we built them).
|
|
||||||
# This will make filtering the other RPMs easier.
|
|
||||||
mkdir -p $dst
|
|
||||||
mv zfs-release*.rpm $dst || true
|
|
||||||
|
|
||||||
# Copy source RPMs
|
|
||||||
mkdir -p $dst/SRPMS
|
|
||||||
cp $(ls *.src.rpm) $dst/SRPMS/
|
|
||||||
|
|
||||||
if [[ "$build_host" =~ "almalinux" ]] ; then
|
|
||||||
# Copy kmods+userspace
|
|
||||||
mkdir -p $dst/kmod/x86_64/debug
|
|
||||||
cp $(ls *.rpm | grep -Ev 'src.rpm|dkms|debuginfo') $dst/kmod/x86_64
|
|
||||||
cp *debuginfo*.rpm $dst/kmod/x86_64/debug
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -n "$DKMS" ] ; then
|
|
||||||
# Copy dkms+userspace
|
|
||||||
mkdir -p $dst/x86_64
|
|
||||||
cp $(ls *.rpm | grep -Ev 'src.rpm|kmod|debuginfo') $dst/x86_64
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Copy debug
|
|
||||||
mkdir -p $dst/x86_64/debug
|
|
||||||
cp $(ls *debuginfo*.rpm | grep -v kmod) $dst/x86_64/debug
|
|
||||||
}
|
|
||||||
|
|
||||||
function freebsd() {
|
|
||||||
extra="${1:-}"
|
|
||||||
|
|
||||||
export MAKE="gmake"
|
|
||||||
echo "##[group]Autogen.sh"
|
|
||||||
run ./autogen.sh
|
|
||||||
echo "##[endgroup]"
|
|
||||||
|
|
||||||
echo "##[group]Configure"
|
|
||||||
run ./configure \
|
|
||||||
--prefix=/usr/local \
|
|
||||||
--with-libintl-prefix=/usr/local \
|
|
||||||
--enable-pyzfs \
|
|
||||||
--enable-debuginfo $extra
|
|
||||||
echo "##[endgroup]"
|
|
||||||
|
|
||||||
echo "##[group]Build"
|
|
||||||
run gmake -j$(sysctl -n hw.ncpu)
|
|
||||||
echo "##[endgroup]"
|
|
||||||
|
|
||||||
echo "##[group]Install"
|
|
||||||
run sudo gmake install
|
|
||||||
echo "##[endgroup]"
|
|
||||||
}
|
|
||||||
|
|
||||||
function linux() {
|
|
||||||
extra="${1:-}"
|
|
||||||
|
|
||||||
echo "##[group]Autogen.sh"
|
|
||||||
run ./autogen.sh
|
|
||||||
echo "##[endgroup]"
|
|
||||||
|
|
||||||
echo "##[group]Configure"
|
|
||||||
run ./configure \
|
|
||||||
--prefix=/usr \
|
|
||||||
--enable-pyzfs \
|
|
||||||
--enable-debuginfo $extra
|
|
||||||
echo "##[endgroup]"
|
|
||||||
|
|
||||||
echo "##[group]Build"
|
|
||||||
run make -j$(nproc)
|
|
||||||
echo "##[endgroup]"
|
|
||||||
|
|
||||||
echo "##[group]Install"
|
|
||||||
run sudo make install
|
|
||||||
echo "##[endgroup]"
|
|
||||||
}
|
|
||||||
|
|
||||||
function rpm_build_and_install() {
|
|
||||||
extra="${1:-}"
|
|
||||||
|
|
||||||
# Build RPMs with XZ compression by default (since gzip decompression is slow)
|
|
||||||
echo "%_binary_payload w7.xzdio" >> ~/.rpmmacros
|
|
||||||
|
|
||||||
echo "##[group]Autogen.sh"
|
|
||||||
run ./autogen.sh
|
|
||||||
echo "##[endgroup]"
|
|
||||||
|
|
||||||
if [ -n "$PATCH_LEVEL" ] ; then
|
|
||||||
sed -i -E 's/(Release:\s+)1/\1'$PATCH_LEVEL'/g' META
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "##[group]Configure"
|
|
||||||
run ./configure --enable-debuginfo $extra
|
|
||||||
echo "##[endgroup]"
|
|
||||||
|
|
||||||
echo "##[group]Build"
|
|
||||||
run make pkg-kmod pkg-utils
|
|
||||||
echo "##[endgroup]"
|
|
||||||
|
|
||||||
if [ -n "$DKMS" ] ; then
|
|
||||||
echo "##[group]DKMS"
|
|
||||||
make rpm-dkms
|
|
||||||
echo "##[endgroup]"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -n "$REPO" ] ; then
|
|
||||||
echo "Skipping install since we're only building RPMs and nothing else"
|
|
||||||
else
|
|
||||||
echo "##[group]Install"
|
|
||||||
run sudo dnf -y --nobest install $(ls *.rpm | grep -Ev 'dkms|src.rpm')
|
|
||||||
echo "##[endgroup]"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Optionally build the zfs-release.*.rpm
|
|
||||||
if [ -n "$RELEASE" ] ; then
|
|
||||||
echo "##[group]Release"
|
|
||||||
pushd ~
|
|
||||||
sudo dnf -y install rpm-build || true
|
|
||||||
# Check out a sparse copy of zfsonlinux.github.com.git so we don't get
|
|
||||||
# all the binaries. We just need a few kilobytes of files to build RPMs.
|
|
||||||
git clone --depth 1 --no-checkout \
|
|
||||||
https://github.com/zfsonlinux/zfsonlinux.github.com.git
|
|
||||||
|
|
||||||
cd zfsonlinux.github.com
|
|
||||||
git sparse-checkout set zfs-release
|
|
||||||
git checkout
|
|
||||||
cd zfs-release
|
|
||||||
|
|
||||||
mkdir -p ~/rpmbuild/{BUILDROOT,SPECS,RPMS,SRPMS,SOURCES,BUILD}
|
|
||||||
cp RPM-GPG-KEY-openzfs* *.repo ~/rpmbuild/SOURCES
|
|
||||||
cp zfs-release.spec ~/rpmbuild/SPECS/
|
|
||||||
rpmbuild -ba ~/rpmbuild/SPECS/zfs-release.spec
|
|
||||||
|
|
||||||
# ZFS release RPMs are built. Copy them to the ~/zfs directory just to
|
|
||||||
# keep all the RPMs in the same place.
|
|
||||||
cp ~/rpmbuild/RPMS/noarch/*.rpm ~/zfs
|
|
||||||
cp ~/rpmbuild/SRPMS/*.rpm ~/zfs
|
|
||||||
|
|
||||||
popd
|
|
||||||
rm -fr ~/rpmbuild
|
|
||||||
echo "##[endgroup]"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -n "$REPO" ] ; then
|
|
||||||
echo "##[group]Repo"
|
|
||||||
copy_rpms_to_repo
|
|
||||||
echo "##[endgroup]"
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
function deb_build_and_install() {
|
|
||||||
extra="${1:-}"
|
|
||||||
|
|
||||||
echo "##[group]Autogen.sh"
|
|
||||||
run ./autogen.sh
|
|
||||||
echo "##[endgroup]"
|
|
||||||
|
|
||||||
echo "##[group]Configure"
|
|
||||||
run ./configure \
|
|
||||||
--prefix=/usr \
|
|
||||||
--enable-pyzfs \
|
|
||||||
--enable-debuginfo $extra
|
|
||||||
echo "##[endgroup]"
|
|
||||||
|
|
||||||
echo "##[group]Build"
|
|
||||||
run make native-deb-kmod native-deb-utils
|
|
||||||
echo "##[endgroup]"
|
|
||||||
|
|
||||||
echo "##[group]Install"
|
|
||||||
# Do kmod install. Note that when you build the native debs, the
|
|
||||||
# packages themselves are placed in parent directory '../' rather than
|
|
||||||
# in the source directory like the rpms are.
|
|
||||||
run sudo apt-get -y install $(find ../ | grep -E '\.deb$' \
|
|
||||||
| grep -Ev 'dkms|dracut')
|
|
||||||
echo "##[endgroup]"
|
|
||||||
}
|
|
||||||
|
|
||||||
function build_tarball {
|
|
||||||
if [ -n "$REPO" ] ; then
|
|
||||||
./autogen.sh
|
|
||||||
./configure --with-config=srpm
|
|
||||||
make dist
|
|
||||||
mkdir -p /tmp/repo/releases
|
|
||||||
# The tarball name is based off of 'Version' field in the META file.
|
|
||||||
mv *.tar.gz /tmp/repo/releases/
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
# Debug: show kernel cmdline
|
|
||||||
if [ -f /proc/cmdline ] ; then
|
|
||||||
cat /proc/cmdline || true
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Set our hostname to our OS name and version number. Specifically, we set the
|
|
||||||
# major and minor number so that when we query the Build Host field in the RPMs
|
|
||||||
# we build, we can see what specific version of Fedora/Almalinux we were using
|
|
||||||
# to build them. This is helpful for matching up KMOD versions.
|
|
||||||
#
|
|
||||||
# Examples:
|
|
||||||
#
|
|
||||||
# rhel8.10
|
|
||||||
# almalinux9.5
|
|
||||||
# fedora42
|
|
||||||
source /etc/os-release
|
|
||||||
if which hostnamectl &> /dev/null ; then
|
|
||||||
# Fedora 42+ use hostnamectl
|
|
||||||
sudo hostnamectl set-hostname "$ID$VERSION_ID"
|
|
||||||
sudo hostnamectl set-hostname --pretty "$ID$VERSION_ID"
|
|
||||||
else
|
|
||||||
sudo hostname "$ID$VERSION_ID"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# save some sysinfo
|
|
||||||
uname -a > /var/tmp/uname.txt
|
|
||||||
|
|
||||||
cd $HOME/zfs
|
|
||||||
export PATH="$PATH:/sbin:/usr/sbin:/usr/local/sbin"
|
|
||||||
|
|
||||||
extra=""
|
|
||||||
if [ -n "$ENABLE_DEBUG" ] ; then
|
|
||||||
extra="--enable-debug"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# build
|
|
||||||
case "$OS" in
|
|
||||||
freebsd*)
|
|
||||||
freebsd "$extra"
|
|
||||||
;;
|
|
||||||
alma*|centos*)
|
|
||||||
rpm_build_and_install "--with-spec=redhat $extra"
|
|
||||||
;;
|
|
||||||
fedora*)
|
|
||||||
rpm_build_and_install "$extra"
|
|
||||||
|
|
||||||
# Historically, we've always built the release tarballs on Fedora, since
|
|
||||||
# there was one instance long ago where we built them on CentOS 7, and they
|
|
||||||
# didn't work correctly for everyone.
|
|
||||||
if [ -n "$TARBALL" ] ; then
|
|
||||||
build_tarball
|
|
||||||
fi
|
|
||||||
;;
|
|
||||||
debian*|ubuntu*)
|
|
||||||
deb_build_and_install "$extra"
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
linux "$extra"
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
|
|
||||||
|
|
||||||
# building the zfs module was ok
|
|
||||||
echo 0 > /var/tmp/build-exitcode.txt
|
|
||||||
|
|
||||||
# reset cloud-init configuration and poweroff
|
|
||||||
if [ -n "$POWEROFF" ] ; then
|
|
||||||
sudo cloud-init clean --logs
|
|
||||||
sync && sleep 2 && sudo poweroff &
|
|
||||||
fi
|
|
||||||
exit 0
|
|
11
.github/workflows/scripts/qemu-4-build.sh
vendored
11
.github/workflows/scripts/qemu-4-build.sh
vendored
@ -1,11 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
######################################################################
|
|
||||||
# 4) configure and build openzfs modules
|
|
||||||
######################################################################
|
|
||||||
echo "Build modules in QEMU machine"
|
|
||||||
|
|
||||||
# Bring our VM back up and copy over ZFS source
|
|
||||||
.github/workflows/scripts/qemu-prepare-for-build.sh
|
|
||||||
|
|
||||||
ssh zfs@vm0 '$HOME/zfs/.github/workflows/scripts/qemu-4-build-vm.sh' $@
|
|
137
.github/workflows/scripts/qemu-5-setup.sh
vendored
137
.github/workflows/scripts/qemu-5-setup.sh
vendored
@ -1,137 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
######################################################################
|
|
||||||
# 5) start test machines and load openzfs module
|
|
||||||
######################################################################
|
|
||||||
|
|
||||||
set -eu
|
|
||||||
|
|
||||||
# read our defined variables
|
|
||||||
source /var/tmp/env.txt
|
|
||||||
|
|
||||||
# wait for poweroff to succeed
|
|
||||||
PID=$(pidof /usr/bin/qemu-system-x86_64)
|
|
||||||
tail --pid=$PID -f /dev/null
|
|
||||||
sudo virsh undefine --nvram openzfs
|
|
||||||
|
|
||||||
# cpu pinning
|
|
||||||
CPUSET=("0,1" "2,3")
|
|
||||||
|
|
||||||
# additional options for virt-install
|
|
||||||
OPTS[0]=""
|
|
||||||
OPTS[1]=""
|
|
||||||
|
|
||||||
case "$OS" in
|
|
||||||
freebsd*)
|
|
||||||
# FreeBSD needs only 6GiB
|
|
||||||
RAM=6
|
|
||||||
;;
|
|
||||||
debian13)
|
|
||||||
RAM=8
|
|
||||||
# Boot Debian 13 with uefi=on and secureboot=off (ZFS Kernel Module not signed)
|
|
||||||
OPTS[0]="--boot"
|
|
||||||
OPTS[1]="firmware=efi,firmware.feature0.name=secure-boot,firmware.feature0.enabled=no"
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
# Linux needs more memory, but can be optimized to share it via KSM
|
|
||||||
RAM=8
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
|
|
||||||
# create snapshot we can clone later
|
|
||||||
sudo zfs snapshot zpool/openzfs@now
|
|
||||||
|
|
||||||
# setup the testing vm's
|
|
||||||
PUBKEY=$(cat ~/.ssh/id_ed25519.pub)
|
|
||||||
|
|
||||||
# start testing VMs
|
|
||||||
for ((i=1; i<=VMs; i++)); do
|
|
||||||
echo "Creating disk for vm$i..."
|
|
||||||
DISK="/dev/zvol/zpool/vm$i"
|
|
||||||
FORMAT="raw"
|
|
||||||
sudo zfs clone zpool/openzfs@now zpool/vm$i-system
|
|
||||||
sudo zfs create -ps -b 64k -V 64g zpool/vm$i-tests
|
|
||||||
|
|
||||||
cat <<EOF > /tmp/user-data
|
|
||||||
#cloud-config
|
|
||||||
|
|
||||||
fqdn: vm$i
|
|
||||||
|
|
||||||
users:
|
|
||||||
- name: root
|
|
||||||
shell: $BASH
|
|
||||||
- name: zfs
|
|
||||||
sudo: ALL=(ALL) NOPASSWD:ALL
|
|
||||||
shell: $BASH
|
|
||||||
ssh_authorized_keys:
|
|
||||||
- $PUBKEY
|
|
||||||
|
|
||||||
growpart:
|
|
||||||
mode: auto
|
|
||||||
devices: ['/']
|
|
||||||
ignore_growroot_disabled: false
|
|
||||||
EOF
|
|
||||||
|
|
||||||
sudo virsh net-update default add ip-dhcp-host \
|
|
||||||
"<host mac='52:54:00:83:79:0$i' ip='192.168.122.1$i'/>" --live --config
|
|
||||||
|
|
||||||
sudo virt-install \
|
|
||||||
--os-variant $OSv \
|
|
||||||
--name "vm$i" \
|
|
||||||
--cpu host-passthrough \
|
|
||||||
--virt-type=kvm --hvm \
|
|
||||||
--vcpus=$CPU,sockets=1 \
|
|
||||||
--cpuset=${CPUSET[$((i-1))]} \
|
|
||||||
--memory $((1024*RAM)) \
|
|
||||||
--memballoon model=virtio \
|
|
||||||
--graphics none \
|
|
||||||
--cloud-init user-data=/tmp/user-data \
|
|
||||||
--network bridge=virbr0,model=$NIC,mac="52:54:00:83:79:0$i" \
|
|
||||||
--disk $DISK-system,bus=virtio,cache=none,format=$FORMAT,driver.discard=unmap \
|
|
||||||
--disk $DISK-tests,bus=virtio,cache=none,format=$FORMAT,driver.discard=unmap \
|
|
||||||
--import --noautoconsole ${OPTS[0]} ${OPTS[1]}
|
|
||||||
done
|
|
||||||
|
|
||||||
# generate some memory stats
|
|
||||||
cat <<EOF > cronjob.sh
|
|
||||||
exec 1>>/var/tmp/stats.txt
|
|
||||||
exec 2>&1
|
|
||||||
echo "********************************************************************************"
|
|
||||||
uptime
|
|
||||||
free -m
|
|
||||||
zfs list
|
|
||||||
EOF
|
|
||||||
|
|
||||||
sudo chmod +x cronjob.sh
|
|
||||||
sudo mv -f cronjob.sh /root/cronjob.sh
|
|
||||||
echo '*/5 * * * * /root/cronjob.sh' > crontab.txt
|
|
||||||
sudo crontab crontab.txt
|
|
||||||
rm crontab.txt
|
|
||||||
|
|
||||||
# Save the VM's serial output (ttyS0) to /var/tmp/console.txt
|
|
||||||
# - ttyS0 on the VM corresponds to a local /dev/pty/N entry
|
|
||||||
# - use 'virsh ttyconsole' to lookup the /dev/pty/N entry
|
|
||||||
for ((i=1; i<=VMs; i++)); do
|
|
||||||
mkdir -p $RESPATH/vm$i
|
|
||||||
read "pty" <<< $(sudo virsh ttyconsole vm$i)
|
|
||||||
|
|
||||||
# Create the file so we can tail it, even if there's no output.
|
|
||||||
touch $RESPATH/vm$i/console.txt
|
|
||||||
|
|
||||||
sudo nohup bash -c "cat $pty > $RESPATH/vm$i/console.txt" &
|
|
||||||
|
|
||||||
# Write all VM boot lines to the console to aid in debugging failed boots.
|
|
||||||
# The boot lines from all the VMs will be munged together, so prepend each
|
|
||||||
# line with the vm hostname (like 'vm1:').
|
|
||||||
(while IFS=$'\n' read -r line; do echo "vm$i: $line" ; done < <(sudo tail -f $RESPATH/vm$i/console.txt)) &
|
|
||||||
|
|
||||||
done
|
|
||||||
echo "Console logging for ${VMs}x $OS started."
|
|
||||||
|
|
||||||
|
|
||||||
# check if the machines are okay
|
|
||||||
echo "Waiting for vm's to come up... (${VMs}x CPU=$CPU RAM=$RAM)"
|
|
||||||
for ((i=1; i<=VMs; i++)); do
|
|
||||||
.github/workflows/scripts/qemu-wait-for-vm.sh vm$i
|
|
||||||
done
|
|
||||||
echo "All $VMs VMs are up now."
|
|
119
.github/workflows/scripts/qemu-6-tests.sh
vendored
119
.github/workflows/scripts/qemu-6-tests.sh
vendored
@ -1,119 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
######################################################################
|
|
||||||
# 6) load openzfs module and run the tests
|
|
||||||
#
|
|
||||||
# called on runner: qemu-6-tests.sh
|
|
||||||
# called on qemu-vm: qemu-6-tests.sh $OS $2/$3
|
|
||||||
######################################################################
|
|
||||||
|
|
||||||
set -eu
|
|
||||||
|
|
||||||
function prefix() {
|
|
||||||
ID="$1"
|
|
||||||
LINE="$2"
|
|
||||||
CURRENT=$(date +%s)
|
|
||||||
TSSTART=$(cat /tmp/tsstart)
|
|
||||||
DIFF=$((CURRENT-TSSTART))
|
|
||||||
H=$((DIFF/3600))
|
|
||||||
DIFF=$((DIFF-(H*3600)))
|
|
||||||
M=$((DIFF/60))
|
|
||||||
S=$((DIFF-(M*60)))
|
|
||||||
|
|
||||||
CTR=$(cat /tmp/ctr)
|
|
||||||
echo $LINE| grep -q '^\[.*] Test[: ]' && CTR=$((CTR+1)) && echo $CTR > /tmp/ctr
|
|
||||||
|
|
||||||
BASE="$HOME/work/zfs/zfs"
|
|
||||||
COLOR="$BASE/scripts/zfs-tests-color.sh"
|
|
||||||
CLINE=$(echo $LINE| grep '^\[.*] Test[: ]' \
|
|
||||||
| sed -e 's|^\[.*] Test|Test|g' \
|
|
||||||
| sed -e 's|/usr/local|/usr|g' \
|
|
||||||
| sed -e 's| /usr/share/zfs/zfs-tests/tests/| |g' | $COLOR)
|
|
||||||
if [ -z "$CLINE" ]; then
|
|
||||||
printf "vm${ID}: %s\n" "$LINE"
|
|
||||||
else
|
|
||||||
# [vm2: 00:15:54 256] Test: functional/checksum/setup (run as root) [00:00] [PASS]
|
|
||||||
printf "[vm${ID}: %02d:%02d:%02d %4d] %s\n" \
|
|
||||||
"$H" "$M" "$S" "$CTR" "$CLINE"
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
# called directly on the runner
|
|
||||||
if [ -z ${1:-} ]; then
|
|
||||||
cd "/var/tmp"
|
|
||||||
source env.txt
|
|
||||||
SSH=$(which ssh)
|
|
||||||
TESTS='$HOME/zfs/.github/workflows/scripts/qemu-6-tests.sh'
|
|
||||||
echo 0 > /tmp/ctr
|
|
||||||
date "+%s" > /tmp/tsstart
|
|
||||||
|
|
||||||
for ((i=1; i<=VMs; i++)); do
|
|
||||||
IP="192.168.122.1$i"
|
|
||||||
daemonize -c /var/tmp -p vm${i}.pid -o vm${i}log.txt -- \
|
|
||||||
$SSH zfs@$IP $TESTS $OS $i $VMs $CI_TYPE
|
|
||||||
# handly line by line and add info prefix
|
|
||||||
stdbuf -oL tail -fq vm${i}log.txt \
|
|
||||||
| while read -r line; do prefix "$i" "$line"; done &
|
|
||||||
echo $! > vm${i}log.pid
|
|
||||||
# don't mix up the initial --- Configuration --- part
|
|
||||||
sleep 0.13
|
|
||||||
done
|
|
||||||
|
|
||||||
# wait for all vm's to finish
|
|
||||||
for ((i=1; i<=VMs; i++)); do
|
|
||||||
tail --pid=$(cat vm${i}.pid) -f /dev/null
|
|
||||||
pid=$(cat vm${i}log.pid)
|
|
||||||
rm -f vm${i}log.pid
|
|
||||||
kill $pid
|
|
||||||
done
|
|
||||||
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
# this part runs inside qemu vm
|
|
||||||
export PATH="$PATH:/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/sbin:/usr/local/bin"
|
|
||||||
case "$1" in
|
|
||||||
freebsd*)
|
|
||||||
TDIR="/usr/local/share/zfs"
|
|
||||||
sudo kldstat -n zfs 2>/dev/null && sudo kldunload zfs
|
|
||||||
sudo -E ./zfs/scripts/zfs.sh
|
|
||||||
sudo mv -f /var/tmp/*.txt /tmp
|
|
||||||
sudo newfs -U -t -L tmp /dev/vtbd1 >/dev/null
|
|
||||||
sudo mount -o noatime /dev/vtbd1 /var/tmp
|
|
||||||
sudo chmod 1777 /var/tmp
|
|
||||||
sudo mv -f /tmp/*.txt /var/tmp
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
# use xfs @ /var/tmp for all distros
|
|
||||||
TDIR="/usr/share/zfs"
|
|
||||||
sudo -E modprobe zfs
|
|
||||||
sudo mv -f /var/tmp/*.txt /tmp
|
|
||||||
sudo mkfs.xfs -fq /dev/vdb
|
|
||||||
sudo mount -o noatime /dev/vdb /var/tmp
|
|
||||||
sudo chmod 1777 /var/tmp
|
|
||||||
sudo mv -f /tmp/*.txt /var/tmp
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
|
|
||||||
# enable io_uring on el9/el10
|
|
||||||
case "$1" in
|
|
||||||
almalinux9|almalinux10|centos-stream*)
|
|
||||||
sudo sysctl kernel.io_uring_disabled=0 > /dev/null
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
|
|
||||||
# run functional testings and save exitcode
|
|
||||||
cd /var/tmp
|
|
||||||
TAGS=$2/$3
|
|
||||||
if [ "$4" == "quick" ]; then
|
|
||||||
export RUNFILES="sanity.run"
|
|
||||||
fi
|
|
||||||
sudo dmesg -c > dmesg-prerun.txt
|
|
||||||
mount > mount.txt
|
|
||||||
df -h > df-prerun.txt
|
|
||||||
$TDIR/zfs-tests.sh -vKO -s 3GB -T $TAGS
|
|
||||||
RV=$?
|
|
||||||
df -h > df-postrun.txt
|
|
||||||
echo $RV > tests-exitcode.txt
|
|
||||||
sync
|
|
||||||
exit 0
|
|
124
.github/workflows/scripts/qemu-7-prepare.sh
vendored
124
.github/workflows/scripts/qemu-7-prepare.sh
vendored
@ -1,124 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
######################################################################
|
|
||||||
# 7) prepare output of the results
|
|
||||||
# - this script pre-creates all needed logfiles for later summary
|
|
||||||
######################################################################
|
|
||||||
|
|
||||||
set -eu
|
|
||||||
|
|
||||||
# read our defined variables
|
|
||||||
cd /var/tmp
|
|
||||||
source env.txt
|
|
||||||
|
|
||||||
mkdir -p $RESPATH
|
|
||||||
|
|
||||||
# check if building the module has failed
|
|
||||||
if [ -z ${VMs:-} ]; then
|
|
||||||
cd $RESPATH
|
|
||||||
echo ":exclamation: ZFS module didn't build successfully :exclamation:" \
|
|
||||||
| tee summary.txt | tee /tmp/summary.txt
|
|
||||||
cp /var/tmp/*.txt .
|
|
||||||
tar cf /tmp/qemu-$OS.tar -C $RESPATH -h . || true
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
# build was okay
|
|
||||||
BASE="$HOME/work/zfs/zfs"
|
|
||||||
MERGE="$BASE/.github/workflows/scripts/merge_summary.awk"
|
|
||||||
|
|
||||||
# catch result files of testings (vm's should be there)
|
|
||||||
for ((i=1; i<=VMs; i++)); do
|
|
||||||
rsync -arL zfs@vm$i:$RESPATH/current $RESPATH/vm$i || true
|
|
||||||
scp zfs@vm$i:"/var/tmp/*.txt" $RESPATH/vm$i || true
|
|
||||||
scp zfs@vm$i:"/var/tmp/*.rpm" $RESPATH/vm$i || true
|
|
||||||
done
|
|
||||||
cp -f /var/tmp/*.txt $RESPATH || true
|
|
||||||
cd $RESPATH
|
|
||||||
|
|
||||||
# prepare result files for summary
|
|
||||||
for ((i=1; i<=VMs; i++)); do
|
|
||||||
file="vm$i/build-stderr.txt"
|
|
||||||
test -s $file && mv -f $file build-stderr.txt
|
|
||||||
|
|
||||||
file="vm$i/build-exitcode.txt"
|
|
||||||
test -s $file && mv -f $file build-exitcode.txt
|
|
||||||
|
|
||||||
file="vm$i/uname.txt"
|
|
||||||
test -s $file && mv -f $file uname.txt
|
|
||||||
|
|
||||||
file="vm$i/tests-exitcode.txt"
|
|
||||||
if [ ! -s $file ]; then
|
|
||||||
# XXX - add some tests for kernel panic's here
|
|
||||||
# tail -n 80 vm$i/console.txt | grep XYZ
|
|
||||||
echo 1 > $file
|
|
||||||
fi
|
|
||||||
rv=$(cat vm$i/tests-exitcode.txt)
|
|
||||||
test $rv != 0 && touch /tmp/have_failed_tests
|
|
||||||
|
|
||||||
file="vm$i/current/log"
|
|
||||||
if [ -s $file ]; then
|
|
||||||
cat $file >> log
|
|
||||||
awk '/\[FAIL\]|\[KILLED\]/{ show=1; print; next; }; \
|
|
||||||
/\[SKIP\]|\[PASS\]/{ show=0; } show' \
|
|
||||||
$file > /tmp/vm${i}dbg.txt
|
|
||||||
fi
|
|
||||||
|
|
||||||
file="vm${i}log.txt"
|
|
||||||
fileC="/tmp/vm${i}log.txt"
|
|
||||||
if [ -s $file ]; then
|
|
||||||
cat $file >> summary
|
|
||||||
cat $file | $BASE/scripts/zfs-tests-color.sh > $fileC
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
# create summary of tests
|
|
||||||
if [ -s summary ]; then
|
|
||||||
$MERGE summary | grep -v '^/' > summary.txt
|
|
||||||
$MERGE summary | $BASE/scripts/zfs-tests-color.sh > /tmp/summary.txt
|
|
||||||
rm -f summary
|
|
||||||
else
|
|
||||||
touch summary.txt /tmp/summary.txt
|
|
||||||
fi
|
|
||||||
|
|
||||||
# create file for debugging
|
|
||||||
if [ -s log ]; then
|
|
||||||
awk '/\[FAIL\]|\[KILLED\]/{ show=1; print; next; }; \
|
|
||||||
/\[SKIP\]|\[PASS\]/{ show=0; } show' \
|
|
||||||
log > summary-failure-logs.txt
|
|
||||||
rm -f log
|
|
||||||
else
|
|
||||||
touch summary-failure-logs.txt
|
|
||||||
fi
|
|
||||||
|
|
||||||
# create debug overview for failed tests
|
|
||||||
cat summary.txt \
|
|
||||||
| awk '/\(expected PASS\)/{ if ($1!="SKIP") print $2; next; } show' \
|
|
||||||
| while read t; do
|
|
||||||
cat summary-failure-logs.txt \
|
|
||||||
| awk '$0~/Test[: ]/{ show=0; } $0~v{ show=1; } show' v="$t" \
|
|
||||||
> /tmp/fail.txt
|
|
||||||
SIZE=$(stat --printf="%s" /tmp/fail.txt)
|
|
||||||
SIZE=$((SIZE/1024))
|
|
||||||
# Test Summary:
|
|
||||||
echo "##[group]$t ($SIZE KiB)" >> /tmp/failed.txt
|
|
||||||
cat /tmp/fail.txt | $BASE/scripts/zfs-tests-color.sh >> /tmp/failed.txt
|
|
||||||
echo "##[endgroup]" >> /tmp/failed.txt
|
|
||||||
# Job Summary:
|
|
||||||
echo -e "\n<details>\n<summary>$t ($SIZE KiB)</summary><pre>" >> failed.txt
|
|
||||||
cat /tmp/fail.txt >> failed.txt
|
|
||||||
echo "</pre></details>" >> failed.txt
|
|
||||||
done
|
|
||||||
|
|
||||||
if [ -e /tmp/have_failed_tests ]; then
|
|
||||||
echo ":warning: Some tests failed!" >> failed.txt
|
|
||||||
else
|
|
||||||
echo ":thumbsup: All tests passed." >> failed.txt
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ ! -s uname.txt ]; then
|
|
||||||
echo ":interrobang: Panic - where is my uname.txt?" > uname.txt
|
|
||||||
fi
|
|
||||||
|
|
||||||
# artifact ready now
|
|
||||||
tar cf /tmp/qemu-$OS.tar -C $RESPATH -h . || true
|
|
71
.github/workflows/scripts/qemu-8-summary.sh
vendored
71
.github/workflows/scripts/qemu-8-summary.sh
vendored
@ -1,71 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
######################################################################
|
|
||||||
# 8) show colored output of results
|
|
||||||
######################################################################
|
|
||||||
|
|
||||||
set -eu
|
|
||||||
|
|
||||||
# read our defined variables
|
|
||||||
source /var/tmp/env.txt
|
|
||||||
cd $RESPATH
|
|
||||||
|
|
||||||
# helper function for showing some content with headline
|
|
||||||
function showfile() {
|
|
||||||
content=$(dd if=$1 bs=1024 count=400k 2>/dev/null)
|
|
||||||
if [ -z "$2" ]; then
|
|
||||||
group1=""
|
|
||||||
group2=""
|
|
||||||
else
|
|
||||||
SIZE=$(stat --printf="%s" "$file")
|
|
||||||
SIZE=$((SIZE/1024))
|
|
||||||
group1="##[group]$2 ($SIZE KiB)"
|
|
||||||
group2="##[endgroup]"
|
|
||||||
fi
|
|
||||||
cat <<EOF > tmp$$
|
|
||||||
$group1
|
|
||||||
$content
|
|
||||||
$group2
|
|
||||||
EOF
|
|
||||||
cat tmp$$
|
|
||||||
rm -f tmp$$
|
|
||||||
}
|
|
||||||
|
|
||||||
# overview
|
|
||||||
cat /tmp/summary.txt
|
|
||||||
echo ""
|
|
||||||
|
|
||||||
if [ -f /tmp/have_failed_tests -a -s /tmp/failed.txt ]; then
|
|
||||||
echo "Debuginfo of failed tests:"
|
|
||||||
cat /tmp/failed.txt
|
|
||||||
echo ""
|
|
||||||
cat /tmp/summary.txt | grep -v '^/'
|
|
||||||
echo ""
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo -e "\nFull logs for download:\n $1\n"
|
|
||||||
|
|
||||||
for ((i=1; i<=VMs; i++)); do
|
|
||||||
rv=$(cat vm$i/tests-exitcode.txt)
|
|
||||||
|
|
||||||
if [ $rv = 0 ]; then
|
|
||||||
vm="[92mvm$i[0m"
|
|
||||||
else
|
|
||||||
vm="[1;91mvm$i[0m"
|
|
||||||
fi
|
|
||||||
|
|
||||||
file="vm$i/dmesg-prerun.txt"
|
|
||||||
test -s "$file" && showfile "$file" "$vm: dmesg kernel"
|
|
||||||
|
|
||||||
file="/tmp/vm${i}log.txt"
|
|
||||||
test -s "$file" && showfile "$file" "$vm: test results"
|
|
||||||
|
|
||||||
file="vm$i/console.txt"
|
|
||||||
test -s "$file" && showfile "$file" "$vm: serial console"
|
|
||||||
|
|
||||||
file="/tmp/vm${i}dbg.txt"
|
|
||||||
test -s "$file" && showfile "$file" "$vm: failure logfile"
|
|
||||||
done
|
|
||||||
|
|
||||||
test -f /tmp/have_failed_tests && exit 1
|
|
||||||
exit 0
|
|
57
.github/workflows/scripts/qemu-9-summary-page.sh
vendored
57
.github/workflows/scripts/qemu-9-summary-page.sh
vendored
@ -1,57 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
######################################################################
|
|
||||||
# 9) generate github summary page of all the testings
|
|
||||||
######################################################################
|
|
||||||
|
|
||||||
set -eu
|
|
||||||
|
|
||||||
function output() {
|
|
||||||
echo -e $* >> "out-$logfile.md"
|
|
||||||
}
|
|
||||||
|
|
||||||
function outfile() {
|
|
||||||
cat "$1" >> "out-$logfile.md"
|
|
||||||
}
|
|
||||||
|
|
||||||
function outfile_plain() {
|
|
||||||
output "<pre>"
|
|
||||||
cat "$1" >> "out-$logfile.md"
|
|
||||||
output "</pre>"
|
|
||||||
}
|
|
||||||
|
|
||||||
function send2github() {
|
|
||||||
test -f "$1" || exit 0
|
|
||||||
dd if="$1" bs=1023k count=1 >> $GITHUB_STEP_SUMMARY
|
|
||||||
}
|
|
||||||
|
|
||||||
# https://docs.github.com/en/enterprise-server@3.6/actions/using-workflows/workflow-commands-for-github-actions#step-isolation-and-limits
|
|
||||||
# Job summaries are isolated between steps and each step is restricted to a maximum size of 1MiB.
|
|
||||||
# [ ] can not show all error findings here
|
|
||||||
# [x] split files into smaller ones and create additional steps
|
|
||||||
|
|
||||||
# first call, generate all summaries
|
|
||||||
if [ ! -f out-1.md ]; then
|
|
||||||
logfile="1"
|
|
||||||
for tarfile in Logs-functional-*/qemu-*.tar; do
|
|
||||||
rm -rf vm* *.txt
|
|
||||||
if [ ! -s "$tarfile" ]; then
|
|
||||||
output "\n## Functional Tests: unknown\n"
|
|
||||||
output ":exclamation: Tarfile $tarfile is empty :exclamation:"
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
tar xf "$tarfile"
|
|
||||||
test -s env.txt || continue
|
|
||||||
source env.txt
|
|
||||||
# when uname.txt is there, the other files are also ok
|
|
||||||
test -s uname.txt || continue
|
|
||||||
output "\n## Functional Tests: $OSNAME\n"
|
|
||||||
outfile_plain uname.txt
|
|
||||||
outfile_plain summary.txt
|
|
||||||
outfile failed.txt
|
|
||||||
logfile=$((logfile+1))
|
|
||||||
done
|
|
||||||
send2github out-1.md
|
|
||||||
else
|
|
||||||
send2github out-$1.md
|
|
||||||
fi
|
|
@ -1,8 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
# Helper script to run after installing dependencies. This brings the VM back
|
|
||||||
# up and copies over the zfs source directory.
|
|
||||||
echo "Build modules in QEMU machine"
|
|
||||||
sudo virsh start openzfs
|
|
||||||
.github/workflows/scripts/qemu-wait-for-vm.sh vm0
|
|
||||||
rsync -ar $HOME/work/zfs/zfs zfs@vm0:./
|
|
90
.github/workflows/scripts/qemu-test-repo-vm.sh
vendored
90
.github/workflows/scripts/qemu-test-repo-vm.sh
vendored
@ -1,90 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
#
|
|
||||||
# Do a test install of ZFS from an external repository.
|
|
||||||
#
|
|
||||||
# USAGE:
|
|
||||||
#
|
|
||||||
# ./qemu-test-repo-vm [URL]
|
|
||||||
#
|
|
||||||
# URL: URL to use instead of http://download.zfsonlinux.org
|
|
||||||
# If blank, use the default repo from zfs-release RPM.
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
source /etc/os-release
|
|
||||||
OS="$ID"
|
|
||||||
VERSION="$VERSION_ID"
|
|
||||||
|
|
||||||
ALTHOST=""
|
|
||||||
if [ -n "$1" ] ; then
|
|
||||||
ALTHOST="$1"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Write summary to /tmp/repo so our artifacts scripts pick it up
|
|
||||||
mkdir /tmp/repo
|
|
||||||
SUMMARY=/tmp/repo/$OS-$VERSION-summary.txt
|
|
||||||
|
|
||||||
# $1: Repo 'zfs' 'zfs-kmod' 'zfs-testing' 'zfs-testing-kmod'
|
|
||||||
# $2: (optional) Alternate host than 'http://download.zfsonlinux.org' to
|
|
||||||
# install from. Blank means use default from zfs-release RPM.
|
|
||||||
function test_install {
|
|
||||||
repo=$1
|
|
||||||
host=""
|
|
||||||
if [ -n "$2" ] ; then
|
|
||||||
host=$2
|
|
||||||
fi
|
|
||||||
|
|
||||||
args="--disablerepo=zfs --enablerepo=$repo"
|
|
||||||
|
|
||||||
# If we supplied an alternate repo URL, and have not already edited
|
|
||||||
# zfs.repo, then update the repo file.
|
|
||||||
if [ -n "$host" ] && ! grep -q $host /etc/yum.repos.d/zfs.repo ; then
|
|
||||||
sudo sed -i "s;baseurl=http://download.zfsonlinux.org;baseurl=$host;g" /etc/yum.repos.d/zfs.repo
|
|
||||||
fi
|
|
||||||
|
|
||||||
sudo dnf -y install $args zfs zfs-test
|
|
||||||
|
|
||||||
# Load modules and create a simple pool as a sanity test.
|
|
||||||
sudo /usr/share/zfs/zfs.sh -r
|
|
||||||
truncate -s 100M /tmp/file
|
|
||||||
sudo zpool create tank /tmp/file
|
|
||||||
sudo zpool status
|
|
||||||
|
|
||||||
# Print out repo name, rpm installed (kmod or dkms), and repo URL
|
|
||||||
baseurl=$(grep -A 5 "\[$repo\]" /etc/yum.repos.d/zfs.repo | awk -F'=' '/baseurl=/{print $2; exit}')
|
|
||||||
package=$(sudo rpm -qa | grep zfs | grep -E 'kmod|dkms')
|
|
||||||
|
|
||||||
echo "$repo $package $baseurl" >> $SUMMARY
|
|
||||||
|
|
||||||
sudo zpool destroy tank
|
|
||||||
sudo rm /tmp/file
|
|
||||||
sudo dnf -y remove zfs
|
|
||||||
}
|
|
||||||
|
|
||||||
echo "##[group]Installing from repo"
|
|
||||||
# The openzfs docs are the authoritative instructions for the install. Use
|
|
||||||
# the specific version of zfs-release RPM it recommends.
|
|
||||||
case $OS in
|
|
||||||
almalinux*)
|
|
||||||
url='https://raw.githubusercontent.com/openzfs/openzfs-docs/refs/heads/master/docs/Getting%20Started/RHEL-based%20distro/index.rst'
|
|
||||||
name=$(curl -Ls $url | grep 'dnf install' | grep -Eo 'zfs-release-[0-9]+-[0-9]+')
|
|
||||||
sudo dnf -y install https://zfsonlinux.org/epel/$name$(rpm --eval "%{dist}").noarch.rpm 2>&1
|
|
||||||
sudo rpm -qi zfs-release
|
|
||||||
test_install zfs $ALTHOST
|
|
||||||
test_install zfs-kmod $ALTHOST
|
|
||||||
test_install zfs-testing $ALTHOST
|
|
||||||
test_install zfs-testing-kmod $ALTHOST
|
|
||||||
;;
|
|
||||||
fedora*)
|
|
||||||
url='https://raw.githubusercontent.com/openzfs/openzfs-docs/refs/heads/master/docs/Getting%20Started/Fedora/index.rst'
|
|
||||||
name=$(curl -Ls $url | grep 'dnf install' | grep -Eo 'zfs-release-[0-9]+-[0-9]+')
|
|
||||||
sudo dnf -y install -y https://zfsonlinux.org/fedora/$name$(rpm --eval "%{dist}").noarch.rpm
|
|
||||||
test_install zfs $ALTHOST
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
echo "##[endgroup]"
|
|
||||||
|
|
||||||
# Write out a simple version of the summary here. Later on we will collate all
|
|
||||||
# the summaries and put them into a nice table in the workflow Summary page.
|
|
||||||
echo "Summary: "
|
|
||||||
cat $SUMMARY
|
|
10
.github/workflows/scripts/qemu-wait-for-vm.sh
vendored
10
.github/workflows/scripts/qemu-wait-for-vm.sh
vendored
@ -1,10 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
#
|
|
||||||
# Wait for a VM to boot up and become active. This is used in a number of our
|
|
||||||
# scripts.
|
|
||||||
#
|
|
||||||
# $1: VM hostname or IP address
|
|
||||||
|
|
||||||
while pidof /usr/bin/qemu-system-x86_64 >/dev/null; do
|
|
||||||
ssh 2>/dev/null zfs@$1 "uname -a" && break
|
|
||||||
done
|
|
@ -1,32 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
#
|
|
||||||
# Recursively go though a directory structure and replace duplicate files with
|
|
||||||
# symlinks. This cuts down our RPM repo size by ~25%.
|
|
||||||
#
|
|
||||||
# replace-dupes-with-symlinks.sh [DIR]
|
|
||||||
#
|
|
||||||
# DIR: Directory to traverse. Defaults to current directory if not specified.
|
|
||||||
#
|
|
||||||
|
|
||||||
src="$1"
|
|
||||||
if [ -z "$src" ] ; then
|
|
||||||
src="."
|
|
||||||
fi
|
|
||||||
|
|
||||||
declare -A db
|
|
||||||
|
|
||||||
pushd "$src"
|
|
||||||
while read line ; do
|
|
||||||
bn="$(basename $line)"
|
|
||||||
if [ -z "${db[$bn]}" ] ; then
|
|
||||||
# First time this file has been seen
|
|
||||||
db[$bn]="$line"
|
|
||||||
else
|
|
||||||
if diff -b "$line" "${db[$bn]}" &>/dev/null ; then
|
|
||||||
# Files are the same, make a symlink
|
|
||||||
rm "$line"
|
|
||||||
ln -sr "${db[$bn]}" "$line"
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
done <<< "$(find . -type f)"
|
|
||||||
popd
|
|
88
.github/workflows/scripts/setup-dependencies.sh
vendored
Executable file
88
.github/workflows/scripts/setup-dependencies.sh
vendored
Executable file
@ -0,0 +1,88 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
set -eu
|
||||||
|
|
||||||
|
function prerun() {
|
||||||
|
echo "::group::Install build dependencies"
|
||||||
|
# remove snap things, update+upgrade will be faster then
|
||||||
|
for x in lxd core20 snapd; do sudo snap remove $x; done
|
||||||
|
sudo apt-get purge snapd google-chrome-stable firefox
|
||||||
|
# https://github.com/orgs/community/discussions/47863
|
||||||
|
sudo apt-get remove grub-efi-amd64-bin grub-efi-amd64-signed shim-signed --allow-remove-essential
|
||||||
|
sudo apt-get update
|
||||||
|
sudo apt upgrade
|
||||||
|
sudo xargs --arg-file=.github/workflows/build-dependencies.txt apt-get install -qq
|
||||||
|
sudo apt-get clean
|
||||||
|
sudo dmesg -c > /var/tmp/dmesg-prerun
|
||||||
|
echo "::endgroup::"
|
||||||
|
}
|
||||||
|
|
||||||
|
function mod_build() {
|
||||||
|
echo "::group::Generate debian packages"
|
||||||
|
./autogen.sh
|
||||||
|
./configure --enable-debug --enable-debuginfo --enable-asan --enable-ubsan
|
||||||
|
make --no-print-directory --silent native-deb-utils native-deb-kmod
|
||||||
|
mv ../*.deb .
|
||||||
|
rm ./openzfs-zfs-dracut*.deb ./openzfs-zfs-dkms*.deb
|
||||||
|
echo "$ImageOS-$ImageVersion" > tests/ImageOS.txt
|
||||||
|
echo "::endgroup::"
|
||||||
|
}
|
||||||
|
|
||||||
|
function mod_install() {
|
||||||
|
# install the pre-built module only on the same runner image
|
||||||
|
MOD=`cat tests/ImageOS.txt`
|
||||||
|
if [ "$MOD" != "$ImageOS-$ImageVersion" ]; then
|
||||||
|
rm -f *.deb
|
||||||
|
mod_build
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "::group::Install and load modules"
|
||||||
|
# don't use kernel-shipped zfs modules
|
||||||
|
sudo sed -i.bak 's/updates/extra updates/' /etc/depmod.d/ubuntu.conf
|
||||||
|
sudo apt-get install --fix-missing ./*.deb
|
||||||
|
|
||||||
|
# Native Debian packages enable and start the services
|
||||||
|
# Stop zfs-zed daemon, as it may interfere with some ZTS test cases
|
||||||
|
sudo systemctl stop zfs-zed
|
||||||
|
sudo depmod -a
|
||||||
|
sudo modprobe zfs
|
||||||
|
sudo dmesg
|
||||||
|
sudo dmesg -c > /var/tmp/dmesg-module-load
|
||||||
|
echo "::endgroup::"
|
||||||
|
|
||||||
|
echo "::group::Report CPU information"
|
||||||
|
lscpu
|
||||||
|
cat /proc/spl/kstat/zfs/chksum_bench
|
||||||
|
echo "::endgroup::"
|
||||||
|
|
||||||
|
echo "::group::Optimize storage for ZFS testings"
|
||||||
|
# remove swap and umount fast storage
|
||||||
|
# 89GiB -> rootfs + bootfs with ~80MB/s -> don't care
|
||||||
|
# 64GiB -> /mnt with 420MB/s -> new testing ssd
|
||||||
|
sudo swapoff -a
|
||||||
|
|
||||||
|
# this one is fast and mounted @ /mnt
|
||||||
|
# -> we reformat with ext4 + move it to /var/tmp
|
||||||
|
DEV="/dev/disk/azure/resource-part1"
|
||||||
|
sudo umount /mnt
|
||||||
|
sudo mkfs.ext4 -O ^has_journal -F $DEV
|
||||||
|
sudo mount -o noatime,barrier=0 $DEV /var/tmp
|
||||||
|
sudo chmod 1777 /var/tmp
|
||||||
|
|
||||||
|
# disk usage afterwards
|
||||||
|
sudo df -h /
|
||||||
|
sudo df -h /var/tmp
|
||||||
|
sudo fstrim -a
|
||||||
|
echo "::endgroup::"
|
||||||
|
}
|
||||||
|
|
||||||
|
case "$1" in
|
||||||
|
build)
|
||||||
|
prerun
|
||||||
|
mod_build
|
||||||
|
;;
|
||||||
|
tests)
|
||||||
|
prerun
|
||||||
|
mod_install
|
||||||
|
;;
|
||||||
|
esac
|
24
.github/workflows/scripts/setup-functional.sh
vendored
Executable file
24
.github/workflows/scripts/setup-functional.sh
vendored
Executable file
@ -0,0 +1,24 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
set -eu
|
||||||
|
|
||||||
|
TDIR="/usr/share/zfs/zfs-tests/tests/functional"
|
||||||
|
echo -n "TODO="
|
||||||
|
case "$1" in
|
||||||
|
part1)
|
||||||
|
# ~1h 20m
|
||||||
|
echo "cli_root"
|
||||||
|
;;
|
||||||
|
part2)
|
||||||
|
# ~1h
|
||||||
|
ls $TDIR|grep '^[a-m]'|grep -v "cli_root"|xargs|tr -s ' ' ','
|
||||||
|
;;
|
||||||
|
part3)
|
||||||
|
# ~1h
|
||||||
|
ls $TDIR|grep '^[n-qs-z]'|xargs|tr -s ' ' ','
|
||||||
|
;;
|
||||||
|
part4)
|
||||||
|
# ~1h
|
||||||
|
ls $TDIR|grep '^r'|xargs|tr -s ' ' ','
|
||||||
|
;;
|
||||||
|
esac
|
124
.github/workflows/zfs-linux-tests.yml
vendored
Normal file
124
.github/workflows/zfs-linux-tests.yml
vendored
Normal file
@ -0,0 +1,124 @@
|
|||||||
|
name: zfs-linux-tests
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_call:
|
||||||
|
inputs:
|
||||||
|
os:
|
||||||
|
description: 'The ubuntu version: 20.02 or 22.04'
|
||||||
|
required: true
|
||||||
|
type: string
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
|
||||||
|
zloop:
|
||||||
|
runs-on: ubuntu-${{ inputs.os }}
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
ref: ${{ github.event.pull_request.head.sha }}
|
||||||
|
- uses: actions/download-artifact@v4
|
||||||
|
with:
|
||||||
|
name: modules-${{ inputs.os }}
|
||||||
|
- name: Install modules
|
||||||
|
run: |
|
||||||
|
tar xzf modules-${{ inputs.os }}.tgz
|
||||||
|
.github/workflows/scripts/setup-dependencies.sh tests
|
||||||
|
- name: Tests
|
||||||
|
timeout-minutes: 30
|
||||||
|
run: |
|
||||||
|
sudo mkdir -p /var/tmp/zloop
|
||||||
|
# run for 10 minutes or at most 2 iterations for a maximum runner
|
||||||
|
# time of 20 minutes.
|
||||||
|
sudo /usr/share/zfs/zloop.sh -t 600 -I 2 -l -m1 -- -T 120 -P 60
|
||||||
|
- name: Prepare artifacts
|
||||||
|
if: failure()
|
||||||
|
run: |
|
||||||
|
sudo chmod +r -R /var/tmp/zloop/
|
||||||
|
- uses: actions/upload-artifact@v4
|
||||||
|
if: failure()
|
||||||
|
with:
|
||||||
|
name: Zpool-logs-${{ inputs.os }}
|
||||||
|
path: |
|
||||||
|
/var/tmp/zloop/*/
|
||||||
|
!/var/tmp/zloop/*/vdev/
|
||||||
|
retention-days: 14
|
||||||
|
if-no-files-found: ignore
|
||||||
|
- uses: actions/upload-artifact@v4
|
||||||
|
if: failure()
|
||||||
|
with:
|
||||||
|
name: Zpool-files-${{ inputs.os }}
|
||||||
|
path: |
|
||||||
|
/var/tmp/zloop/*/vdev/
|
||||||
|
retention-days: 14
|
||||||
|
if-no-files-found: ignore
|
||||||
|
|
||||||
|
sanity:
|
||||||
|
runs-on: ubuntu-${{ inputs.os }}
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
ref: ${{ github.event.pull_request.head.sha }}
|
||||||
|
- uses: actions/download-artifact@v4
|
||||||
|
with:
|
||||||
|
name: modules-${{ inputs.os }}
|
||||||
|
- name: Install modules
|
||||||
|
run: |
|
||||||
|
tar xzf modules-${{ inputs.os }}.tgz
|
||||||
|
.github/workflows/scripts/setup-dependencies.sh tests
|
||||||
|
- name: Tests
|
||||||
|
timeout-minutes: 60
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
set -o pipefail
|
||||||
|
/usr/share/zfs/zfs-tests.sh -vKR -s 3G -r sanity | scripts/zfs-tests-color.sh
|
||||||
|
- name: Prepare artifacts
|
||||||
|
if: success() || failure()
|
||||||
|
run: |
|
||||||
|
RESPATH="/var/tmp/test_results"
|
||||||
|
mv -f $RESPATH/current $RESPATH/testfiles
|
||||||
|
tar cf $RESPATH/sanity.tar -h -C $RESPATH testfiles
|
||||||
|
- uses: actions/upload-artifact@v4
|
||||||
|
if: success() || failure()
|
||||||
|
with:
|
||||||
|
name: Logs-${{ inputs.os }}-sanity
|
||||||
|
path: /var/tmp/test_results/sanity.tar
|
||||||
|
if-no-files-found: ignore
|
||||||
|
|
||||||
|
functional:
|
||||||
|
runs-on: ubuntu-${{ inputs.os }}
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
tests: [ part1, part2, part3, part4 ]
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
ref: ${{ github.event.pull_request.head.sha }}
|
||||||
|
- uses: actions/download-artifact@v4
|
||||||
|
with:
|
||||||
|
name: modules-${{ inputs.os }}
|
||||||
|
- name: Install modules
|
||||||
|
run: |
|
||||||
|
tar xzf modules-${{ inputs.os }}.tgz
|
||||||
|
.github/workflows/scripts/setup-dependencies.sh tests
|
||||||
|
- name: Setup tests
|
||||||
|
run: |
|
||||||
|
.github/workflows/scripts/setup-functional.sh ${{ matrix.tests }} >> $GITHUB_ENV
|
||||||
|
- name: Tests
|
||||||
|
timeout-minutes: 120
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
set -o pipefail
|
||||||
|
/usr/share/zfs/zfs-tests.sh -vKR -s 3G -T ${{ env.TODO }} | scripts/zfs-tests-color.sh
|
||||||
|
- name: Prepare artifacts
|
||||||
|
if: success() || failure()
|
||||||
|
run: |
|
||||||
|
RESPATH="/var/tmp/test_results"
|
||||||
|
mv -f $RESPATH/current $RESPATH/testfiles
|
||||||
|
tar cf $RESPATH/${{ matrix.tests }}.tar -h -C $RESPATH testfiles
|
||||||
|
- uses: actions/upload-artifact@v4
|
||||||
|
if: success() || failure()
|
||||||
|
with:
|
||||||
|
name: Logs-${{ inputs.os }}-functional-${{ matrix.tests }}
|
||||||
|
path: /var/tmp/test_results/${{ matrix.tests }}.tar
|
||||||
|
if-no-files-found: ignore
|
64
.github/workflows/zfs-linux.yml
vendored
Normal file
64
.github/workflows/zfs-linux.yml
vendored
Normal file
@ -0,0 +1,64 @@
|
|||||||
|
name: zfs-linux
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
pull_request:
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
|
||||||
|
build:
|
||||||
|
name: Build
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
os: [20.04, 22.04]
|
||||||
|
runs-on: ubuntu-${{ matrix.os }}
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
ref: ${{ github.event.pull_request.head.sha }}
|
||||||
|
- name: Build modules
|
||||||
|
run: .github/workflows/scripts/setup-dependencies.sh build
|
||||||
|
- name: Prepare modules upload
|
||||||
|
run: tar czf modules-${{ matrix.os }}.tgz *.deb .github tests/test-runner tests/ImageOS.txt
|
||||||
|
- uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: modules-${{ matrix.os }}
|
||||||
|
path: modules-${{ matrix.os }}.tgz
|
||||||
|
retention-days: 14
|
||||||
|
|
||||||
|
testings:
|
||||||
|
name: Testing
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
os: [20.04, 22.04]
|
||||||
|
needs: build
|
||||||
|
uses: ./.github/workflows/zfs-linux-tests.yml
|
||||||
|
with:
|
||||||
|
os: ${{ matrix.os }}
|
||||||
|
|
||||||
|
cleanup:
|
||||||
|
if: always()
|
||||||
|
name: Cleanup
|
||||||
|
runs-on: ubuntu-22.04
|
||||||
|
needs: testings
|
||||||
|
steps:
|
||||||
|
- uses: actions/download-artifact@v4
|
||||||
|
- name: Generating summary
|
||||||
|
run: |
|
||||||
|
tar xzf modules-22.04/modules-22.04.tgz .github tests
|
||||||
|
.github/workflows/scripts/generate-summary.sh
|
||||||
|
# up to 4 steps, each can have 1 MiB output (for debugging log files)
|
||||||
|
- name: Summary for errors #1
|
||||||
|
run: .github/workflows/scripts/generate-summary.sh 1
|
||||||
|
- name: Summary for errors #2
|
||||||
|
run: .github/workflows/scripts/generate-summary.sh 2
|
||||||
|
- name: Summary for errors #3
|
||||||
|
run: .github/workflows/scripts/generate-summary.sh 3
|
||||||
|
- name: Summary for errors #4
|
||||||
|
run: .github/workflows/scripts/generate-summary.sh 4
|
||||||
|
- uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: Summary Files
|
||||||
|
path: Summary/
|
151
.github/workflows/zfs-qemu-packages.yml
vendored
151
.github/workflows/zfs-qemu-packages.yml
vendored
@ -1,151 +0,0 @@
|
|||||||
# This workflow is used to build and test RPM packages. It is a
|
|
||||||
# 'workflow_dispatch' workflow, which means it gets run manually.
|
|
||||||
#
|
|
||||||
# The workflow has a dropdown menu with two options:
|
|
||||||
#
|
|
||||||
# Build RPMs - Build release RPMs and tarballs and put them into an artifact
|
|
||||||
# ZIP file. The directory structure used in the ZIP file mirrors
|
|
||||||
# the ZFS yum repo.
|
|
||||||
#
|
|
||||||
# Test repo - Test install the ZFS RPMs from the ZFS repo. On EL distos, this
|
|
||||||
# will do a DKMS and KMOD test install from both the regular and
|
|
||||||
# testing repos. On Fedora, it will do a DKMS install from the
|
|
||||||
# regular repo. All test install results will be displayed in the
|
|
||||||
# Summary page. Note that the workflow provides an optional text
|
|
||||||
# text box where you can specify the full URL to an alternate repo.
|
|
||||||
# If left blank, it will install from the default repo from the
|
|
||||||
# zfs-release RPM (http://download.zfsonlinux.org).
|
|
||||||
#
|
|
||||||
# Most users will never need to use this workflow. It will be used primary by
|
|
||||||
# ZFS admins for building and testing releases.
|
|
||||||
#
|
|
||||||
name: zfs-qemu-packages
|
|
||||||
|
|
||||||
on:
|
|
||||||
workflow_dispatch:
|
|
||||||
inputs:
|
|
||||||
test_type:
|
|
||||||
type: choice
|
|
||||||
required: false
|
|
||||||
default: "Build RPMs"
|
|
||||||
description: "Build RPMs or test the repo?"
|
|
||||||
options:
|
|
||||||
- "Build RPMs"
|
|
||||||
- "Test repo"
|
|
||||||
patch_level:
|
|
||||||
type: string
|
|
||||||
required: false
|
|
||||||
default: ""
|
|
||||||
description: "(optional) patch level number"
|
|
||||||
repo_url:
|
|
||||||
type: string
|
|
||||||
required: false
|
|
||||||
default: ""
|
|
||||||
description: "(optional) repo URL (blank: use http://download.zfsonlinux.org)"
|
|
||||||
concurrency:
|
|
||||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
|
||||||
cancel-in-progress: true
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
zfs-qemu-packages-jobs:
|
|
||||||
name: qemu-VMs
|
|
||||||
strategy:
|
|
||||||
fail-fast: false
|
|
||||||
matrix:
|
|
||||||
os: ['almalinux8', 'almalinux9', 'almalinux10', 'fedora41', 'fedora42']
|
|
||||||
runs-on: ubuntu-24.04
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
ref: ${{ github.event.pull_request.head.sha }}
|
|
||||||
|
|
||||||
- name: Setup QEMU
|
|
||||||
timeout-minutes: 10
|
|
||||||
run: .github/workflows/scripts/qemu-1-setup.sh
|
|
||||||
|
|
||||||
- name: Start build machine
|
|
||||||
timeout-minutes: 10
|
|
||||||
run: .github/workflows/scripts/qemu-2-start.sh ${{ matrix.os }}
|
|
||||||
|
|
||||||
- name: Install dependencies
|
|
||||||
timeout-minutes: 20
|
|
||||||
run: |
|
|
||||||
.github/workflows/scripts/qemu-3-deps.sh ${{ matrix.os }}
|
|
||||||
|
|
||||||
- name: Build modules or Test repo
|
|
||||||
timeout-minutes: 30
|
|
||||||
run: |
|
|
||||||
set -e
|
|
||||||
if [ "${{ github.event.inputs.test_type }}" == "Test repo" ] ; then
|
|
||||||
# Bring VM back up and copy over zfs source
|
|
||||||
.github/workflows/scripts/qemu-prepare-for-build.sh
|
|
||||||
|
|
||||||
mkdir -p /tmp/repo
|
|
||||||
ssh zfs@vm0 '$HOME/zfs/.github/workflows/scripts/qemu-test-repo-vm.sh' ${{ github.event.inputs.repo_url }}
|
|
||||||
else
|
|
||||||
EXTRA=""
|
|
||||||
if [ -n "${{ github.event.inputs.patch_level }}" ] ; then
|
|
||||||
EXTRA="--patch-level ${{ github.event.inputs.patch_level }}"
|
|
||||||
fi
|
|
||||||
|
|
||||||
.github/workflows/scripts/qemu-4-build.sh $EXTRA \
|
|
||||||
--repo --release --dkms --tarball ${{ matrix.os }}
|
|
||||||
fi
|
|
||||||
|
|
||||||
- name: Prepare artifacts
|
|
||||||
if: always()
|
|
||||||
timeout-minutes: 10
|
|
||||||
run: |
|
|
||||||
rsync -a zfs@vm0:/tmp/repo /tmp || true
|
|
||||||
.github/workflows/scripts/replace-dupes-with-symlinks.sh /tmp/repo
|
|
||||||
tar -cf ${{ matrix.os }}-repo.tar -C /tmp repo
|
|
||||||
|
|
||||||
- uses: actions/upload-artifact@v4
|
|
||||||
id: artifact-upload
|
|
||||||
if: always()
|
|
||||||
with:
|
|
||||||
name: ${{ matrix.os }}-repo
|
|
||||||
path: ${{ matrix.os }}-repo.tar
|
|
||||||
compression-level: 0
|
|
||||||
retention-days: 2
|
|
||||||
if-no-files-found: ignore
|
|
||||||
|
|
||||||
combine_repos:
|
|
||||||
if: always()
|
|
||||||
needs: [zfs-qemu-packages-jobs]
|
|
||||||
name: "Results"
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/download-artifact@v4
|
|
||||||
id: artifact-download
|
|
||||||
if: always()
|
|
||||||
- name: Test Summary
|
|
||||||
if: always()
|
|
||||||
run: |
|
|
||||||
for i in $(find . -type f -iname "*.tar") ; do
|
|
||||||
tar -xf $i -C /tmp
|
|
||||||
done
|
|
||||||
tar -cf all-repo.tar -C /tmp repo
|
|
||||||
|
|
||||||
# If we're installing from a repo, print out the summary of the versions
|
|
||||||
# that got installed using Markdown.
|
|
||||||
if [ "${{ github.event.inputs.test_type }}" == "Test repo" ] ; then
|
|
||||||
cd /tmp/repo
|
|
||||||
for i in $(ls *.txt) ; do
|
|
||||||
nicename="$(echo $i | sed 's/.txt//g; s/-/ /g')"
|
|
||||||
echo "### $nicename" >> $GITHUB_STEP_SUMMARY
|
|
||||||
echo "|repo|RPM|URL|" >> $GITHUB_STEP_SUMMARY
|
|
||||||
echo "|:---|:---|:---|" >> $GITHUB_STEP_SUMMARY
|
|
||||||
awk '{print "|"$1"|"$2"|"$3"|"}' $i >> $GITHUB_STEP_SUMMARY
|
|
||||||
done
|
|
||||||
fi
|
|
||||||
|
|
||||||
- uses: actions/upload-artifact@v4
|
|
||||||
id: artifact-upload2
|
|
||||||
if: always()
|
|
||||||
with:
|
|
||||||
name: all-repo
|
|
||||||
path: all-repo.tar
|
|
||||||
compression-level: 0
|
|
||||||
retention-days: 5
|
|
||||||
if-no-files-found: ignore
|
|
178
.github/workflows/zfs-qemu.yml
vendored
178
.github/workflows/zfs-qemu.yml
vendored
@ -1,178 +0,0 @@
|
|||||||
name: zfs-qemu
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
pull_request:
|
|
||||||
workflow_dispatch:
|
|
||||||
inputs:
|
|
||||||
fedora_kernel_ver:
|
|
||||||
type: string
|
|
||||||
required: false
|
|
||||||
default: ""
|
|
||||||
description: "(optional) Experimental kernel version to install on Fedora (like '6.14' or '6.13.3-0.rc3')"
|
|
||||||
|
|
||||||
concurrency:
|
|
||||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
|
||||||
cancel-in-progress: true
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
test-config:
|
|
||||||
name: Setup
|
|
||||||
runs-on: ubuntu-24.04
|
|
||||||
outputs:
|
|
||||||
test_os: ${{ steps.os.outputs.os }}
|
|
||||||
ci_type: ${{ steps.os.outputs.ci_type }}
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
- name: Generate OS config and CI type
|
|
||||||
id: os
|
|
||||||
run: |
|
|
||||||
FULL_OS='["almalinux8", "almalinux9", "almalinux10", "centos-stream9", "centos-stream10", "debian12", "debian13", "fedora41", "fedora42", "freebsd13-5r", "freebsd14-3s", "freebsd15-0c", "ubuntu22", "ubuntu24"]'
|
|
||||||
QUICK_OS='["almalinux8", "almalinux9", "almalinux10", "debian12", "fedora42", "freebsd14-3s", "ubuntu24"]'
|
|
||||||
# determine CI type when running on PR
|
|
||||||
ci_type="full"
|
|
||||||
if ${{ github.event_name == 'pull_request' }}; then
|
|
||||||
head=${{ github.event.pull_request.head.sha }}
|
|
||||||
base=${{ github.event.pull_request.base.sha }}
|
|
||||||
ci_type=$(python3 .github/workflows/scripts/generate-ci-type.py $head $base)
|
|
||||||
fi
|
|
||||||
if [ "$ci_type" == "quick" ]; then
|
|
||||||
os_selection="$QUICK_OS"
|
|
||||||
else
|
|
||||||
os_selection="$FULL_OS"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if ${{ github.event.inputs.fedora_kernel_ver != '' }}; then
|
|
||||||
# They specified a custom kernel version for Fedora. Use only
|
|
||||||
# Fedora runners.
|
|
||||||
os_json=$(echo ${os_selection} | jq -c '[.[] | select(startswith("fedora"))]')
|
|
||||||
else
|
|
||||||
# Normal case
|
|
||||||
os_json=$(echo ${os_selection} | jq -c)
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "os=$os_json" | tee -a $GITHUB_OUTPUT
|
|
||||||
echo "ci_type=$ci_type" | tee -a $GITHUB_OUTPUT
|
|
||||||
|
|
||||||
qemu-vm:
|
|
||||||
name: qemu-x86
|
|
||||||
needs: [ test-config ]
|
|
||||||
strategy:
|
|
||||||
fail-fast: false
|
|
||||||
matrix:
|
|
||||||
# rhl: almalinux8, almalinux9, centos-stream9, fedora4x
|
|
||||||
# debian: debian12, debian13, ubuntu22, ubuntu24
|
|
||||||
# misc: archlinux, tumbleweed
|
|
||||||
# FreeBSD variants of 2025-06:
|
|
||||||
# FreeBSD Release: freebsd13-5r, freebsd14-2r, freebsd14-3r
|
|
||||||
# FreeBSD Stable: freebsd13-5s, freebsd14-3s
|
|
||||||
# FreeBSD Current: freebsd15-0c
|
|
||||||
os: ${{ fromJson(needs.test-config.outputs.test_os) }}
|
|
||||||
runs-on: ubuntu-24.04
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
ref: ${{ github.event.pull_request.head.sha }}
|
|
||||||
|
|
||||||
- name: Setup QEMU
|
|
||||||
timeout-minutes: 20
|
|
||||||
run: |
|
|
||||||
# Add a timestamp to each line to debug timeouts
|
|
||||||
while IFS=$'\n' read -r line; do
|
|
||||||
echo "$(date +'%H:%M:%S') $line"
|
|
||||||
done < <(.github/workflows/scripts/qemu-1-setup.sh)
|
|
||||||
|
|
||||||
- name: Start build machine
|
|
||||||
timeout-minutes: 10
|
|
||||||
run: .github/workflows/scripts/qemu-2-start.sh ${{ matrix.os }}
|
|
||||||
|
|
||||||
- name: Install dependencies
|
|
||||||
timeout-minutes: 20
|
|
||||||
run: .github/workflows/scripts/qemu-3-deps.sh ${{ matrix.os }} ${{ github.event.inputs.fedora_kernel_ver }}
|
|
||||||
|
|
||||||
- name: Build modules
|
|
||||||
timeout-minutes: 30
|
|
||||||
run: .github/workflows/scripts/qemu-4-build.sh --poweroff --enable-debug ${{ matrix.os }}
|
|
||||||
|
|
||||||
- name: Setup testing machines
|
|
||||||
timeout-minutes: 5
|
|
||||||
run: .github/workflows/scripts/qemu-5-setup.sh
|
|
||||||
|
|
||||||
- name: Run tests
|
|
||||||
timeout-minutes: 270
|
|
||||||
run: .github/workflows/scripts/qemu-6-tests.sh
|
|
||||||
env:
|
|
||||||
CI_TYPE: ${{ needs.test-config.outputs.ci_type }}
|
|
||||||
|
|
||||||
- name: Prepare artifacts
|
|
||||||
if: always()
|
|
||||||
timeout-minutes: 10
|
|
||||||
run: .github/workflows/scripts/qemu-7-prepare.sh
|
|
||||||
|
|
||||||
- uses: actions/upload-artifact@v4
|
|
||||||
id: artifact-upload
|
|
||||||
if: always()
|
|
||||||
with:
|
|
||||||
name: Logs-functional-${{ matrix.os }}
|
|
||||||
path: /tmp/qemu-${{ matrix.os }}.tar
|
|
||||||
if-no-files-found: ignore
|
|
||||||
|
|
||||||
- name: Test Summary
|
|
||||||
if: always()
|
|
||||||
run: .github/workflows/scripts/qemu-8-summary.sh '${{ steps.artifact-upload.outputs.artifact-url }}'
|
|
||||||
|
|
||||||
cleanup:
|
|
||||||
if: always()
|
|
||||||
name: Cleanup
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
needs: [ qemu-vm ]
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
ref: ${{ github.event.pull_request.head.sha }}
|
|
||||||
- uses: actions/download-artifact@v4
|
|
||||||
- name: Generating summary
|
|
||||||
run: .github/workflows/scripts/qemu-9-summary-page.sh
|
|
||||||
- name: Generating summary...
|
|
||||||
run: .github/workflows/scripts/qemu-9-summary-page.sh 2
|
|
||||||
- name: Generating summary...
|
|
||||||
run: .github/workflows/scripts/qemu-9-summary-page.sh 3
|
|
||||||
- name: Generating summary...
|
|
||||||
run: .github/workflows/scripts/qemu-9-summary-page.sh 4
|
|
||||||
- name: Generating summary...
|
|
||||||
run: .github/workflows/scripts/qemu-9-summary-page.sh 5
|
|
||||||
- name: Generating summary...
|
|
||||||
run: .github/workflows/scripts/qemu-9-summary-page.sh 6
|
|
||||||
- name: Generating summary...
|
|
||||||
run: .github/workflows/scripts/qemu-9-summary-page.sh 7
|
|
||||||
- name: Generating summary...
|
|
||||||
run: .github/workflows/scripts/qemu-9-summary-page.sh 8
|
|
||||||
- name: Generating summary...
|
|
||||||
run: .github/workflows/scripts/qemu-9-summary-page.sh 9
|
|
||||||
- name: Generating summary...
|
|
||||||
run: .github/workflows/scripts/qemu-9-summary-page.sh 10
|
|
||||||
- name: Generating summary...
|
|
||||||
run: .github/workflows/scripts/qemu-9-summary-page.sh 11
|
|
||||||
- name: Generating summary...
|
|
||||||
run: .github/workflows/scripts/qemu-9-summary-page.sh 12
|
|
||||||
- name: Generating summary...
|
|
||||||
run: .github/workflows/scripts/qemu-9-summary-page.sh 13
|
|
||||||
- name: Generating summary...
|
|
||||||
run: .github/workflows/scripts/qemu-9-summary-page.sh 14
|
|
||||||
- name: Generating summary...
|
|
||||||
run: .github/workflows/scripts/qemu-9-summary-page.sh 15
|
|
||||||
- name: Generating summary...
|
|
||||||
run: .github/workflows/scripts/qemu-9-summary-page.sh 16
|
|
||||||
- name: Generating summary...
|
|
||||||
run: .github/workflows/scripts/qemu-9-summary-page.sh 17
|
|
||||||
- name: Generating summary...
|
|
||||||
run: .github/workflows/scripts/qemu-9-summary-page.sh 18
|
|
||||||
- name: Generating summary...
|
|
||||||
run: .github/workflows/scripts/qemu-9-summary-page.sh 19
|
|
||||||
- uses: actions/upload-artifact@v4
|
|
||||||
with:
|
|
||||||
name: Summary Files
|
|
||||||
path: out-*
|
|
77
.github/workflows/zloop.yml
vendored
77
.github/workflows/zloop.yml
vendored
@ -1,77 +0,0 @@
|
|||||||
name: zloop
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
pull_request:
|
|
||||||
|
|
||||||
concurrency:
|
|
||||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
|
||||||
cancel-in-progress: true
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
zloop:
|
|
||||||
runs-on: ubuntu-24.04
|
|
||||||
env:
|
|
||||||
WORK_DIR: /mnt/zloop
|
|
||||||
CORE_DIR: /mnt/zloop/cores
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
ref: ${{ github.event.pull_request.head.sha }}
|
|
||||||
- name: Install dependencies
|
|
||||||
run: |
|
|
||||||
sudo apt-get purge -y snapd google-chrome-stable firefox
|
|
||||||
ONLY_DEPS=1 .github/workflows/scripts/qemu-3-deps-vm.sh ubuntu24
|
|
||||||
- name: Autogen.sh
|
|
||||||
run: |
|
|
||||||
sed -i '/DEBUG_CFLAGS="-Werror"/s/^/#/' config/zfs-build.m4
|
|
||||||
./autogen.sh
|
|
||||||
- name: Configure
|
|
||||||
run: |
|
|
||||||
./configure --prefix=/usr --enable-debug --enable-debuginfo \
|
|
||||||
--enable-asan --enable-ubsan \
|
|
||||||
--enable-debug-kmem --enable-debug-kmem-tracking
|
|
||||||
- name: Make
|
|
||||||
run: |
|
|
||||||
make -j$(nproc)
|
|
||||||
- name: Install
|
|
||||||
run: |
|
|
||||||
sudo make install
|
|
||||||
sudo depmod
|
|
||||||
sudo modprobe zfs
|
|
||||||
- name: Tests
|
|
||||||
run: |
|
|
||||||
sudo truncate -s 256G /mnt/vdev
|
|
||||||
sudo zpool create cipool -m $WORK_DIR -O compression=on -o autotrim=on /mnt/vdev
|
|
||||||
sudo /usr/share/zfs/zloop.sh -t 600 -I 6 -l -m 1 -c $CORE_DIR -f $WORK_DIR -- -T 120 -P 60
|
|
||||||
- name: Prepare artifacts
|
|
||||||
if: failure()
|
|
||||||
run: |
|
|
||||||
sudo chmod +r -R $WORK_DIR/
|
|
||||||
- name: Ztest log
|
|
||||||
if: failure()
|
|
||||||
run: |
|
|
||||||
grep -B10 -A1000 'ASSERT' $CORE_DIR/*/ztest.out || tail -n 1000 $CORE_DIR/*/ztest.out
|
|
||||||
- name: Gdb log
|
|
||||||
if: failure()
|
|
||||||
run: |
|
|
||||||
sed -n '/Backtraces (full)/q;p' $CORE_DIR/*/ztest.gdb
|
|
||||||
- name: Zdb log
|
|
||||||
if: failure()
|
|
||||||
run: |
|
|
||||||
cat $CORE_DIR/*/ztest.zdb
|
|
||||||
- uses: actions/upload-artifact@v4
|
|
||||||
if: failure()
|
|
||||||
with:
|
|
||||||
name: Logs
|
|
||||||
path: |
|
|
||||||
/mnt/zloop/*/
|
|
||||||
!/mnt/zloop/cores/*/vdev/
|
|
||||||
if-no-files-found: ignore
|
|
||||||
- uses: actions/upload-artifact@v4
|
|
||||||
if: failure()
|
|
||||||
with:
|
|
||||||
name: Pool files
|
|
||||||
path: |
|
|
||||||
/mnt/zloop/cores/*/vdev/
|
|
||||||
if-no-files-found: ignore
|
|
40
.mailmap
40
.mailmap
@ -23,7 +23,6 @@
|
|||||||
# These maps are making names consistent where they have varied but the email
|
# These maps are making names consistent where they have varied but the email
|
||||||
# address has never changed. In most cases, the full name is in the
|
# address has never changed. In most cases, the full name is in the
|
||||||
# Signed-off-by of a commit with a matching author.
|
# Signed-off-by of a commit with a matching author.
|
||||||
Achill Gilgenast <achill@achill.org>
|
|
||||||
Ahelenia Ziemiańska <nabijaczleweli@gmail.com>
|
Ahelenia Ziemiańska <nabijaczleweli@gmail.com>
|
||||||
Ahelenia Ziemiańska <nabijaczleweli@nabijaczleweli.xyz>
|
Ahelenia Ziemiańska <nabijaczleweli@nabijaczleweli.xyz>
|
||||||
Alex John <alex@stty.io>
|
Alex John <alex@stty.io>
|
||||||
@ -31,14 +30,12 @@ Andreas Dilger <adilger@dilger.ca>
|
|||||||
Andrew Walker <awalker@ixsystems.com>
|
Andrew Walker <awalker@ixsystems.com>
|
||||||
Benedikt Neuffer <github@itfriend.de>
|
Benedikt Neuffer <github@itfriend.de>
|
||||||
Chengfei Zhu <chengfeix.zhu@intel.com>
|
Chengfei Zhu <chengfeix.zhu@intel.com>
|
||||||
ChenHao Lu <18302010006@fudan.edu.cn>
|
|
||||||
Chris Lindee <chris.lindee+github@gmail.com>
|
Chris Lindee <chris.lindee+github@gmail.com>
|
||||||
Colm Buckley <colm@tuatha.org>
|
Colm Buckley <colm@tuatha.org>
|
||||||
Crag Wang <crag0715@gmail.com>
|
Crag Wang <crag0715@gmail.com>
|
||||||
Damian Szuberski <szuberskidamian@gmail.com>
|
Damian Szuberski <szuberskidamian@gmail.com>
|
||||||
Daniel Kolesa <daniel@octaforge.org>
|
Daniel Kolesa <daniel@octaforge.org>
|
||||||
Debabrata Banerjee <dbavatar@gmail.com>
|
Debabrata Banerjee <dbavatar@gmail.com>
|
||||||
Diwakar Kristappagari <diwakar-k@hpe.com>
|
|
||||||
Finix Yan <yanchongwen@hotmail.com>
|
Finix Yan <yanchongwen@hotmail.com>
|
||||||
Gaurav Kumar <gauravk.18@gmail.com>
|
Gaurav Kumar <gauravk.18@gmail.com>
|
||||||
Gionatan Danti <g.danti@assyoma.it>
|
Gionatan Danti <g.danti@assyoma.it>
|
||||||
@ -46,7 +43,6 @@ Glenn Washburn <development@efficientek.com>
|
|||||||
Gordan Bobic <gordan.bobic@gmail.com>
|
Gordan Bobic <gordan.bobic@gmail.com>
|
||||||
Gregory Bartholomew <gregory.lee.bartholomew@gmail.com>
|
Gregory Bartholomew <gregory.lee.bartholomew@gmail.com>
|
||||||
hedong zhang <h_d_zhang@163.com>
|
hedong zhang <h_d_zhang@163.com>
|
||||||
Ilkka Sovanto <github@ilkka.kapsi.fi>
|
|
||||||
InsanePrawn <Insane.Prawny@gmail.com>
|
InsanePrawn <Insane.Prawny@gmail.com>
|
||||||
Jason Cohen <jwittlincohen@gmail.com>
|
Jason Cohen <jwittlincohen@gmail.com>
|
||||||
Jason Harmening <jason.harmening@gmail.com>
|
Jason Harmening <jason.harmening@gmail.com>
|
||||||
@ -61,7 +57,6 @@ KernelOfTruth <kerneloftruth@gmail.com>
|
|||||||
Liu Hua <liu.hua130@zte.com.cn>
|
Liu Hua <liu.hua130@zte.com.cn>
|
||||||
Liu Qing <winglq@gmail.com>
|
Liu Qing <winglq@gmail.com>
|
||||||
loli10K <ezomori.nozomu@gmail.com>
|
loli10K <ezomori.nozomu@gmail.com>
|
||||||
Mart Frauenlob <allkind@fastest.cc>
|
|
||||||
Matthias Blankertz <matthias@blankertz.org>
|
Matthias Blankertz <matthias@blankertz.org>
|
||||||
Michael Gmelin <grembo@FreeBSD.org>
|
Michael Gmelin <grembo@FreeBSD.org>
|
||||||
Olivier Mazouffre <olivier.mazouffre@ims-bordeaux.fr>
|
Olivier Mazouffre <olivier.mazouffre@ims-bordeaux.fr>
|
||||||
@ -72,26 +67,12 @@ Rob Norris <robn@despairlabs.com>
|
|||||||
Rob Norris <rob.norris@klarasystems.com>
|
Rob Norris <rob.norris@klarasystems.com>
|
||||||
Sam Lunt <samuel.j.lunt@gmail.com>
|
Sam Lunt <samuel.j.lunt@gmail.com>
|
||||||
Sanjeev Bagewadi <sanjeev.bagewadi@gmail.com>
|
Sanjeev Bagewadi <sanjeev.bagewadi@gmail.com>
|
||||||
Sebastian Wuerl <s.wuerl@mailbox.org>
|
|
||||||
SHENGYI HONG <aokblast@FreeBSD.org>
|
|
||||||
Stoiko Ivanov <github@nomore.at>
|
Stoiko Ivanov <github@nomore.at>
|
||||||
Tamas TEVESZ <ice@extreme.hu>
|
Tamas TEVESZ <ice@extreme.hu>
|
||||||
WHR <msl0000023508@gmail.com>
|
WHR <msl0000023508@gmail.com>
|
||||||
Yanping Gao <yanping.gao@xtaotech.com>
|
Yanping Gao <yanping.gao@xtaotech.com>
|
||||||
Youzhong Yang <youzhong@gmail.com>
|
Youzhong Yang <youzhong@gmail.com>
|
||||||
|
|
||||||
# Signed-off-by: overriding Author:
|
|
||||||
Alexander Ziaee <ziaee@FreeBSD.org> <concussious@runbox.com>
|
|
||||||
Felix Schmidt <felixschmidt20@aol.com> <f.sch.prototype@gmail.com>
|
|
||||||
Olivier Certner <olce@FreeBSD.org> <olce.freebsd@certner.fr>
|
|
||||||
Phil Sutter <phil@nwl.cc> <p.github@nwl.cc>
|
|
||||||
poscat <poscat@poscat.moe> <poscat0x04@outlook.com>
|
|
||||||
Qiuhao Chen <chenqiuhao1997@gmail.com> <haohao0924@126.com>
|
|
||||||
Ryan <errornointernet@envs.net> <error.nointernet@gmail.com>
|
|
||||||
Sietse <sietse@wizdom.nu> <uglymotha@wizdom.nu>
|
|
||||||
Yuxin Wang <yuxinwang9999@gmail.com> <Bi11gates9999@gmail.com>
|
|
||||||
Zhenlei Huang <zlei@FreeBSD.org> <zlei.huang@gmail.com>
|
|
||||||
|
|
||||||
# Commits from strange places, long ago
|
# Commits from strange places, long ago
|
||||||
Brian Behlendorf <behlendorf1@llnl.gov> <behlendo@7e1ea52c-4ff2-0310-8f11-9dd32ca42a1c>
|
Brian Behlendorf <behlendorf1@llnl.gov> <behlendo@7e1ea52c-4ff2-0310-8f11-9dd32ca42a1c>
|
||||||
Brian Behlendorf <behlendorf1@llnl.gov> <behlendo@fedora-17-amd64.(none)>
|
Brian Behlendorf <behlendorf1@llnl.gov> <behlendo@fedora-17-amd64.(none)>
|
||||||
@ -105,11 +86,9 @@ Tulsi Jain <tulsi.jain@delphix.com> <tulsi.jain@Tulsi-Jains-MacBook-Pro.local>
|
|||||||
# Mappings from Github no-reply addresses
|
# Mappings from Github no-reply addresses
|
||||||
ajs124 <git@ajs124.de> <ajs124@users.noreply.github.com>
|
ajs124 <git@ajs124.de> <ajs124@users.noreply.github.com>
|
||||||
Alek Pinchuk <apinchuk@axcient.com> <alek-p@users.noreply.github.com>
|
Alek Pinchuk <apinchuk@axcient.com> <alek-p@users.noreply.github.com>
|
||||||
Aleksandr Liber <aleksandr.liber@perforce.com> <61714074+AleksandrLiber@users.noreply.github.com>
|
|
||||||
Alexander Lobakin <alobakin@pm.me> <solbjorn@users.noreply.github.com>
|
Alexander Lobakin <alobakin@pm.me> <solbjorn@users.noreply.github.com>
|
||||||
Alexey Smirnoff <fling@member.fsf.org> <fling-@users.noreply.github.com>
|
Alexey Smirnoff <fling@member.fsf.org> <fling-@users.noreply.github.com>
|
||||||
Allen Holl <allen.m.holl@gmail.com> <65494904+allen-4@users.noreply.github.com>
|
Allen Holl <allen.m.holl@gmail.com> <65494904+allen-4@users.noreply.github.com>
|
||||||
Alphan Yılmaz <alphanyilmaz@gmail.com> <a1ea321@users.noreply.github.com>
|
|
||||||
Ameer Hamza <ahamza@ixsystems.com> <106930537+ixhamza@users.noreply.github.com>
|
Ameer Hamza <ahamza@ixsystems.com> <106930537+ixhamza@users.noreply.github.com>
|
||||||
Andrew J. Hesford <ajh@sideband.org> <48421688+ahesford@users.noreply.github.com>>
|
Andrew J. Hesford <ajh@sideband.org> <48421688+ahesford@users.noreply.github.com>>
|
||||||
Andrew Sun <me@andrewsun.com> <as-com@users.noreply.github.com>
|
Andrew Sun <me@andrewsun.com> <as-com@users.noreply.github.com>
|
||||||
@ -117,22 +96,18 @@ Aron Xu <happyaron.xu@gmail.com> <happyaron@users.noreply.github.com>
|
|||||||
Arun KV <arun.kv@datacore.com> <65647132+arun-kv@users.noreply.github.com>
|
Arun KV <arun.kv@datacore.com> <65647132+arun-kv@users.noreply.github.com>
|
||||||
Ben Wolsieffer <benwolsieffer@gmail.com> <lopsided98@users.noreply.github.com>
|
Ben Wolsieffer <benwolsieffer@gmail.com> <lopsided98@users.noreply.github.com>
|
||||||
bernie1995 <bernie.pikes@gmail.com> <42413912+bernie1995@users.noreply.github.com>
|
bernie1995 <bernie.pikes@gmail.com> <42413912+bernie1995@users.noreply.github.com>
|
||||||
Bojan Novković <bnovkov@FreeBSD.org> <72801811+bnovkov@users.noreply.github.com>
|
|
||||||
Boris Protopopov <boris.protopopov@actifio.com> <bprotopopov@users.noreply.github.com>
|
Boris Protopopov <boris.protopopov@actifio.com> <bprotopopov@users.noreply.github.com>
|
||||||
Brad Forschinger <github@bnjf.id.au> <bnjf@users.noreply.github.com>
|
Brad Forschinger <github@bnjf.id.au> <bnjf@users.noreply.github.com>
|
||||||
Brandon Thetford <brandon@dodecatec.com> <dodexahedron@users.noreply.github.com>
|
Brandon Thetford <brandon@dodecatec.com> <dodexahedron@users.noreply.github.com>
|
||||||
buzzingwires <buzzingwires@outlook.com> <131118055+buzzingwires@users.noreply.github.com>
|
buzzingwires <buzzingwires@outlook.com> <131118055+buzzingwires@users.noreply.github.com>
|
||||||
Cedric Maunoury <cedric.maunoury@gmail.com> <38213715+cedricmaunoury@users.noreply.github.com>
|
Cedric Maunoury <cedric.maunoury@gmail.com> <38213715+cedricmaunoury@users.noreply.github.com>
|
||||||
Charles Suh <charles.suh@gmail.com> <charlessuh@users.noreply.github.com>
|
Charles Suh <charles.suh@gmail.com> <charlessuh@users.noreply.github.com>
|
||||||
Chris Peredun <chris.peredun@ixsystems.com> <126915832+chrisperedun@users.noreply.github.com>
|
|
||||||
Dacian Reece-Stremtan <dacianstremtan@gmail.com> <35844628+dacianstremtan@users.noreply.github.com>
|
Dacian Reece-Stremtan <dacianstremtan@gmail.com> <35844628+dacianstremtan@users.noreply.github.com>
|
||||||
Damian Szuberski <szuberskidamian@gmail.com> <30863496+szubersk@users.noreply.github.com>
|
Damian Szuberski <szuberskidamian@gmail.com> <30863496+szubersk@users.noreply.github.com>
|
||||||
Daniel Hiepler <d-git@coderdu.de> <32984777+heeplr@users.noreply.github.com>
|
Daniel Hiepler <d-git@coderdu.de> <32984777+heeplr@users.noreply.github.com>
|
||||||
Daniel Kobras <d.kobras@science-computing.de> <sckobras@users.noreply.github.com>
|
Daniel Kobras <d.kobras@science-computing.de> <sckobras@users.noreply.github.com>
|
||||||
Daniel Reichelt <hacking@nachtgeist.net> <nachtgeist@users.noreply.github.com>
|
Daniel Reichelt <hacking@nachtgeist.net> <nachtgeist@users.noreply.github.com>
|
||||||
David Quigley <david.quigley@intel.com> <dpquigl@users.noreply.github.com>
|
David Quigley <david.quigley@intel.com> <dpquigl@users.noreply.github.com>
|
||||||
Dennis R. Friedrichsen <dennis.r.friedrichsen@gmail.com> <31087738+dennisfriedrichsen@users.noreply.github.com>
|
|
||||||
Dex Wood <slash2314@gmail.com> <slash2314@users.noreply.github.com>
|
|
||||||
DHE <git@dehacked.net> <DeHackEd@users.noreply.github.com>
|
DHE <git@dehacked.net> <DeHackEd@users.noreply.github.com>
|
||||||
Dmitri John Ledkov <dimitri.ledkov@canonical.com> <19779+xnox@users.noreply.github.com>
|
Dmitri John Ledkov <dimitri.ledkov@canonical.com> <19779+xnox@users.noreply.github.com>
|
||||||
Dries Michiels <driesm.michiels@gmail.com> <32487486+driesmp@users.noreply.github.com>
|
Dries Michiels <driesm.michiels@gmail.com> <32487486+driesmp@users.noreply.github.com>
|
||||||
@ -142,12 +117,10 @@ Fedor Uporov <fuporov.vstack@gmail.com> <60701163+fuporovvStack@users.noreply.gi
|
|||||||
Felix Dörre <felix@dogcraft.de> <felixdoerre@users.noreply.github.com>
|
Felix Dörre <felix@dogcraft.de> <felixdoerre@users.noreply.github.com>
|
||||||
Felix Neumärker <xdch47@posteo.de> <34678034+xdch47@users.noreply.github.com>
|
Felix Neumärker <xdch47@posteo.de> <34678034+xdch47@users.noreply.github.com>
|
||||||
Finix Yan <yancw@info2soft.com> <Finix1979@users.noreply.github.com>
|
Finix Yan <yancw@info2soft.com> <Finix1979@users.noreply.github.com>
|
||||||
Friedrich Weber <f.weber@proxmox.com> <56110206+frwbr@users.noreply.github.com>
|
|
||||||
Gaurav Kumar <gauravk.18@gmail.com> <gaurkuma@users.noreply.github.com>
|
Gaurav Kumar <gauravk.18@gmail.com> <gaurkuma@users.noreply.github.com>
|
||||||
George Gaydarov <git@gg7.io> <gg7@users.noreply.github.com>
|
George Gaydarov <git@gg7.io> <gg7@users.noreply.github.com>
|
||||||
Georgy Yakovlev <gyakovlev@gentoo.org> <168902+gyakovlev@users.noreply.github.com>
|
Georgy Yakovlev <gyakovlev@gentoo.org> <168902+gyakovlev@users.noreply.github.com>
|
||||||
Gerardwx <gerardw@alum.mit.edu> <Gerardwx@users.noreply.github.com>
|
Gerardwx <gerardw@alum.mit.edu> <Gerardwx@users.noreply.github.com>
|
||||||
Germano Massullo <germano.massullo@gmail.com> <Germano0@users.noreply.github.com>
|
|
||||||
Gian-Carlo DeFazio <defazio1@llnl.gov> <defaziogiancarlo@users.noreply.github.com>
|
Gian-Carlo DeFazio <defazio1@llnl.gov> <defaziogiancarlo@users.noreply.github.com>
|
||||||
Giuseppe Di Natale <dinatale2@llnl.gov> <dinatale2@users.noreply.github.com>
|
Giuseppe Di Natale <dinatale2@llnl.gov> <dinatale2@users.noreply.github.com>
|
||||||
Hajo Möller <dasjoe@gmail.com> <dasjoe@users.noreply.github.com>
|
Hajo Möller <dasjoe@gmail.com> <dasjoe@users.noreply.github.com>
|
||||||
@ -155,7 +128,6 @@ Harry Mallon <hjmallon@gmail.com> <1816667+hjmallon@users.noreply.github.com>
|
|||||||
Hiếu Lê <leorize+oss@disroot.org> <alaviss@users.noreply.github.com>
|
Hiếu Lê <leorize+oss@disroot.org> <alaviss@users.noreply.github.com>
|
||||||
Jake Howard <git@theorangeone.net> <RealOrangeOne@users.noreply.github.com>
|
Jake Howard <git@theorangeone.net> <RealOrangeOne@users.noreply.github.com>
|
||||||
James Cowgill <james.cowgill@mips.com> <jcowgill@users.noreply.github.com>
|
James Cowgill <james.cowgill@mips.com> <jcowgill@users.noreply.github.com>
|
||||||
Jaron Kent-Dobias <jaron@kent-dobias.com> <kentdobias@users.noreply.github.com>
|
|
||||||
Jason King <jason.king@joyent.com> <jasonbking@users.noreply.github.com>
|
Jason King <jason.king@joyent.com> <jasonbking@users.noreply.github.com>
|
||||||
Jeff Dike <jdike@akamai.com> <52420226+jdike@users.noreply.github.com>
|
Jeff Dike <jdike@akamai.com> <52420226+jdike@users.noreply.github.com>
|
||||||
Jitendra Patidar <jitendra.patidar@nutanix.com> <53164267+jsai20@users.noreply.github.com>
|
Jitendra Patidar <jitendra.patidar@nutanix.com> <53164267+jsai20@users.noreply.github.com>
|
||||||
@ -165,10 +137,7 @@ John L. Hammond <john.hammond@intel.com> <35266395+jhammond-intel@users.noreply.
|
|||||||
John-Mark Gurney <jmg@funkthat.com> <jmgurney@users.noreply.github.com>
|
John-Mark Gurney <jmg@funkthat.com> <jmgurney@users.noreply.github.com>
|
||||||
John Ramsden <johnramsden@riseup.net> <johnramsden@users.noreply.github.com>
|
John Ramsden <johnramsden@riseup.net> <johnramsden@users.noreply.github.com>
|
||||||
Jonathon Fernyhough <jonathon@m2x.dev> <559369+jonathonf@users.noreply.github.com>
|
Jonathon Fernyhough <jonathon@m2x.dev> <559369+jonathonf@users.noreply.github.com>
|
||||||
Jose Luis Duran <jlduran@gmail.com> <jlduran@users.noreply.github.com>
|
|
||||||
Justin Hibbits <chmeeedalf@gmail.com> <chmeeedalf@users.noreply.github.com>
|
Justin Hibbits <chmeeedalf@gmail.com> <chmeeedalf@users.noreply.github.com>
|
||||||
Kaitlin Hoang <kthoang@amazon.com> <khoang98@users.noreply.github.com>
|
|
||||||
Kevin Greene <kevin.greene@delphix.com> <104801862+kxgreene@users.noreply.github.com>
|
|
||||||
Kevin Jin <lostking2008@hotmail.com> <33590050+jxdking@users.noreply.github.com>
|
Kevin Jin <lostking2008@hotmail.com> <33590050+jxdking@users.noreply.github.com>
|
||||||
Kevin P. Fleming <kevin@km6g.us> <kpfleming@users.noreply.github.com>
|
Kevin P. Fleming <kevin@km6g.us> <kpfleming@users.noreply.github.com>
|
||||||
Krzysztof Piecuch <piecuch@kpiecuch.pl> <3964215+pikrzysztof@users.noreply.github.com>
|
Krzysztof Piecuch <piecuch@kpiecuch.pl> <3964215+pikrzysztof@users.noreply.github.com>
|
||||||
@ -179,11 +148,9 @@ Lorenz Hüdepohl <dev@stellardeath.org> <lhuedepohl@users.noreply.github.com>
|
|||||||
Luís Henriques <henrix@camandro.org> <73643340+lumigch@users.noreply.github.com>
|
Luís Henriques <henrix@camandro.org> <73643340+lumigch@users.noreply.github.com>
|
||||||
Marcin Skarbek <git@skarbek.name> <mskarbek@users.noreply.github.com>
|
Marcin Skarbek <git@skarbek.name> <mskarbek@users.noreply.github.com>
|
||||||
Matt Fiddaman <github@m.fiddaman.uk> <81489167+matt-fidd@users.noreply.github.com>
|
Matt Fiddaman <github@m.fiddaman.uk> <81489167+matt-fidd@users.noreply.github.com>
|
||||||
Maxim Filimonov <che@bein.link> <part1zano@users.noreply.github.com>
|
|
||||||
Max Zettlmeißl <max@zettlmeissl.de> <6818198+maxz@users.noreply.github.com>
|
Max Zettlmeißl <max@zettlmeissl.de> <6818198+maxz@users.noreply.github.com>
|
||||||
Michael Niewöhner <foss@mniewoehner.de> <c0d3z3r0@users.noreply.github.com>
|
Michael Niewöhner <foss@mniewoehner.de> <c0d3z3r0@users.noreply.github.com>
|
||||||
Michael Zhivich <mzhivich@akamai.com> <33133421+mzhivich@users.noreply.github.com>
|
Michael Zhivich <mzhivich@akamai.com> <33133421+mzhivich@users.noreply.github.com>
|
||||||
MigeljanImeri <ImeriMigel@gmail.com> <78048439+MigeljanImeri@users.noreply.github.com>
|
|
||||||
Mo Zhou <cdluminate@gmail.com> <5723047+cdluminate@users.noreply.github.com>
|
Mo Zhou <cdluminate@gmail.com> <5723047+cdluminate@users.noreply.github.com>
|
||||||
Nick Mattis <nickm970@gmail.com> <nmattis@users.noreply.github.com>
|
Nick Mattis <nickm970@gmail.com> <nmattis@users.noreply.github.com>
|
||||||
omni <omni+vagant@hack.org> <79493359+omnivagant@users.noreply.github.com>
|
omni <omni+vagant@hack.org> <79493359+omnivagant@users.noreply.github.com>
|
||||||
@ -197,7 +164,6 @@ Ping Huang <huangping@smartx.com> <101400146+hpingfs@users.noreply.github.com>
|
|||||||
Piotr P. Stefaniak <pstef@freebsd.org> <pstef@users.noreply.github.com>
|
Piotr P. Stefaniak <pstef@freebsd.org> <pstef@users.noreply.github.com>
|
||||||
Richard Allen <belperite@gmail.com> <33836503+belperite@users.noreply.github.com>
|
Richard Allen <belperite@gmail.com> <33836503+belperite@users.noreply.github.com>
|
||||||
Rich Ercolani <rincebrain@gmail.com> <214141+rincebrain@users.noreply.github.com>
|
Rich Ercolani <rincebrain@gmail.com> <214141+rincebrain@users.noreply.github.com>
|
||||||
Rick Macklem <rmacklem@uoguelph.ca> <64620010+rmacklem@users.noreply.github.com>
|
|
||||||
Rob Wing <rob.wing@klarasystems.com> <98866084+rob-wing@users.noreply.github.com>
|
Rob Wing <rob.wing@klarasystems.com> <98866084+rob-wing@users.noreply.github.com>
|
||||||
Roman Strashkin <roman.strashkin@nexenta.com> <Ramzec@users.noreply.github.com>
|
Roman Strashkin <roman.strashkin@nexenta.com> <Ramzec@users.noreply.github.com>
|
||||||
Ryan Hirasaki <ryanhirasaki@gmail.com> <4690732+RyanHir@users.noreply.github.com>
|
Ryan Hirasaki <ryanhirasaki@gmail.com> <4690732+RyanHir@users.noreply.github.com>
|
||||||
@ -208,22 +174,16 @@ Scott Colby <scott@scolby.com> <scolby33@users.noreply.github.com>
|
|||||||
Sean Eric Fagan <kithrup@mac.com> <kithrup@users.noreply.github.com>
|
Sean Eric Fagan <kithrup@mac.com> <kithrup@users.noreply.github.com>
|
||||||
Spencer Kinny <spencerkinny1995@gmail.com> <30333052+Spencer-Kinny@users.noreply.github.com>
|
Spencer Kinny <spencerkinny1995@gmail.com> <30333052+Spencer-Kinny@users.noreply.github.com>
|
||||||
Srikanth N S <srikanth.nagasubbaraoseetharaman@hpe.com> <75025422+nssrikanth@users.noreply.github.com>
|
Srikanth N S <srikanth.nagasubbaraoseetharaman@hpe.com> <75025422+nssrikanth@users.noreply.github.com>
|
||||||
Stefan Lendl <s.lendl@proxmox.com> <1321542+stfl@users.noreply.github.com>
|
|
||||||
Thomas Bertschinger <bertschinger@lanl.gov> <101425190+bertschinger@users.noreply.github.com>
|
|
||||||
Thomas Geppert <geppi@digitx.de> <geppi@users.noreply.github.com>
|
Thomas Geppert <geppi@digitx.de> <geppi@users.noreply.github.com>
|
||||||
Tim Crawford <tcrawford@datto.com> <crawfxrd@users.noreply.github.com>
|
Tim Crawford <tcrawford@datto.com> <crawfxrd@users.noreply.github.com>
|
||||||
Todd Seidelmann <18294602+seidelma@users.noreply.github.com>
|
|
||||||
Tom Matthews <tom@axiom-partners.com> <tomtastic@users.noreply.github.com>
|
Tom Matthews <tom@axiom-partners.com> <tomtastic@users.noreply.github.com>
|
||||||
Tony Perkins <tperkins@datto.com> <62951051+tony-zfs@users.noreply.github.com>
|
Tony Perkins <tperkins@datto.com> <62951051+tony-zfs@users.noreply.github.com>
|
||||||
Torsten Wörtwein <twoertwein@gmail.com> <twoertwein@users.noreply.github.com>
|
Torsten Wörtwein <twoertwein@gmail.com> <twoertwein@users.noreply.github.com>
|
||||||
Tulsi Jain <tulsi.jain@delphix.com> <TulsiJain@users.noreply.github.com>
|
Tulsi Jain <tulsi.jain@delphix.com> <TulsiJain@users.noreply.github.com>
|
||||||
Václav Skála <skala@vshosting.cz> <33496485+vaclavskala@users.noreply.github.com>
|
Václav Skála <skala@vshosting.cz> <33496485+vaclavskala@users.noreply.github.com>
|
||||||
Vaibhav Bhanawat <vaibhav.bhanawat@delphix.com> <88050553+vaibhav-delphix@users.noreply.github.com>
|
|
||||||
Vandana Rungta <vrungta@amazon.com> <46906819+vandanarungta@users.noreply.github.com>
|
|
||||||
Violet Purcell <vimproved@inventati.org> <66446404+vimproved@users.noreply.github.com>
|
Violet Purcell <vimproved@inventati.org> <66446404+vimproved@users.noreply.github.com>
|
||||||
Vipin Kumar Verma <vipin.verma@hpe.com> <75025470+vermavipinkumar@users.noreply.github.com>
|
Vipin Kumar Verma <vipin.verma@hpe.com> <75025470+vermavipinkumar@users.noreply.github.com>
|
||||||
Wolfgang Bumiller <w.bumiller@proxmox.com> <Blub@users.noreply.github.com>
|
Wolfgang Bumiller <w.bumiller@proxmox.com> <Blub@users.noreply.github.com>
|
||||||
XDTG <click1799@163.com> <35128600+XDTG@users.noreply.github.com>
|
|
||||||
xtouqh <xtouqh@hotmail.com> <72357159+xtouqh@users.noreply.github.com>
|
xtouqh <xtouqh@hotmail.com> <72357159+xtouqh@users.noreply.github.com>
|
||||||
Yuri Pankov <yuripv@FreeBSD.org> <113725409+yuripv@users.noreply.github.com>
|
Yuri Pankov <yuripv@FreeBSD.org> <113725409+yuripv@users.noreply.github.com>
|
||||||
Yuri Pankov <yuripv@FreeBSD.org> <82001006+yuripv@users.noreply.github.com>
|
Yuri Pankov <yuripv@FreeBSD.org> <82001006+yuripv@users.noreply.github.com>
|
||||||
|
91
AUTHORS
91
AUTHORS
@ -10,7 +10,6 @@ PAST MAINTAINERS:
|
|||||||
CONTRIBUTORS:
|
CONTRIBUTORS:
|
||||||
|
|
||||||
Aaron Fineman <abyxcos@gmail.com>
|
Aaron Fineman <abyxcos@gmail.com>
|
||||||
Achill Gilgenast <achill@achill.org>
|
|
||||||
Adam D. Moss <c@yotes.com>
|
Adam D. Moss <c@yotes.com>
|
||||||
Adam Leventhal <ahl@delphix.com>
|
Adam Leventhal <ahl@delphix.com>
|
||||||
Adam Stevko <adam.stevko@gmail.com>
|
Adam Stevko <adam.stevko@gmail.com>
|
||||||
@ -30,7 +29,6 @@ CONTRIBUTORS:
|
|||||||
Alejandro Colomar <Colomar.6.4.3@GMail.com>
|
Alejandro Colomar <Colomar.6.4.3@GMail.com>
|
||||||
Alejandro R. Sedeño <asedeno@mit.edu>
|
Alejandro R. Sedeño <asedeno@mit.edu>
|
||||||
Alek Pinchuk <alek@nexenta.com>
|
Alek Pinchuk <alek@nexenta.com>
|
||||||
Aleksandr Liber <aleksandr.liber@perforce.com>
|
|
||||||
Aleksa Sarai <cyphar@cyphar.com>
|
Aleksa Sarai <cyphar@cyphar.com>
|
||||||
Alexander Eremin <a.eremin@nexenta.com>
|
Alexander Eremin <a.eremin@nexenta.com>
|
||||||
Alexander Lobakin <alobakin@pm.me>
|
Alexander Lobakin <alobakin@pm.me>
|
||||||
@ -38,7 +36,6 @@ CONTRIBUTORS:
|
|||||||
Alexander Pyhalov <apyhalov@gmail.com>
|
Alexander Pyhalov <apyhalov@gmail.com>
|
||||||
Alexander Richardson <Alexander.Richardson@cl.cam.ac.uk>
|
Alexander Richardson <Alexander.Richardson@cl.cam.ac.uk>
|
||||||
Alexander Stetsenko <ams@nexenta.com>
|
Alexander Stetsenko <ams@nexenta.com>
|
||||||
Alexander Ziaee <ziaee@FreeBSD.org>
|
|
||||||
Alex Braunegg <alex.braunegg@gmail.com>
|
Alex Braunegg <alex.braunegg@gmail.com>
|
||||||
Alexey Shvetsov <alexxy@gentoo.org>
|
Alexey Shvetsov <alexxy@gentoo.org>
|
||||||
Alexey Smirnoff <fling@member.fsf.org>
|
Alexey Smirnoff <fling@member.fsf.org>
|
||||||
@ -49,7 +46,6 @@ CONTRIBUTORS:
|
|||||||
Alex Zhuravlev <alexey.zhuravlev@intel.com>
|
Alex Zhuravlev <alexey.zhuravlev@intel.com>
|
||||||
Allan Jude <allanjude@freebsd.org>
|
Allan Jude <allanjude@freebsd.org>
|
||||||
Allen Holl <allen.m.holl@gmail.com>
|
Allen Holl <allen.m.holl@gmail.com>
|
||||||
Alphan Yılmaz <alphanyilmaz@gmail.com>
|
|
||||||
alteriks <alteriks@gmail.com>
|
alteriks <alteriks@gmail.com>
|
||||||
Alyssa Ross <hi@alyssa.is>
|
Alyssa Ross <hi@alyssa.is>
|
||||||
Ameer Hamza <ahamza@ixsystems.com>
|
Ameer Hamza <ahamza@ixsystems.com>
|
||||||
@ -60,7 +56,6 @@ CONTRIBUTORS:
|
|||||||
Andreas Buschmann <andreas.buschmann@tech.net.de>
|
Andreas Buschmann <andreas.buschmann@tech.net.de>
|
||||||
Andreas Dilger <adilger@intel.com>
|
Andreas Dilger <adilger@intel.com>
|
||||||
Andreas Vögele <andreas@andreasvoegele.com>
|
Andreas Vögele <andreas@andreasvoegele.com>
|
||||||
Andres <a-d-j-i@users.noreply.github.com>
|
|
||||||
Andrew Barnes <barnes333@gmail.com>
|
Andrew Barnes <barnes333@gmail.com>
|
||||||
Andrew Hamilton <ahamilto@tjhsst.edu>
|
Andrew Hamilton <ahamilto@tjhsst.edu>
|
||||||
Andrew Innes <andrew.c12@gmail.com>
|
Andrew Innes <andrew.c12@gmail.com>
|
||||||
@ -74,7 +69,6 @@ CONTRIBUTORS:
|
|||||||
Andrey Prokopenko <job@terem.fr>
|
Andrey Prokopenko <job@terem.fr>
|
||||||
Andrey Vesnovaty <andrey.vesnovaty@gmail.com>
|
Andrey Vesnovaty <andrey.vesnovaty@gmail.com>
|
||||||
Andriy Gapon <avg@freebsd.org>
|
Andriy Gapon <avg@freebsd.org>
|
||||||
Andriy Tkachuk <andriy.tkachuk@seagate.com>
|
|
||||||
Andy Bakun <github@thwartedefforts.org>
|
Andy Bakun <github@thwartedefforts.org>
|
||||||
Andy Fiddaman <omnios@citrus-it.co.uk>
|
Andy Fiddaman <omnios@citrus-it.co.uk>
|
||||||
Aniruddha Shankar <k@191a.net>
|
Aniruddha Shankar <k@191a.net>
|
||||||
@ -85,7 +79,6 @@ CONTRIBUTORS:
|
|||||||
Arne Jansen <arne@die-jansens.de>
|
Arne Jansen <arne@die-jansens.de>
|
||||||
Aron Xu <happyaron.xu@gmail.com>
|
Aron Xu <happyaron.xu@gmail.com>
|
||||||
Arshad Hussain <arshad.hussain@aeoncomputing.com>
|
Arshad Hussain <arshad.hussain@aeoncomputing.com>
|
||||||
Artem <artem.vlasenko@ossrevival.org>
|
|
||||||
Arun KV <arun.kv@datacore.com>
|
Arun KV <arun.kv@datacore.com>
|
||||||
Arvind Sankar <nivedita@alum.mit.edu>
|
Arvind Sankar <nivedita@alum.mit.edu>
|
||||||
Attila Fülöp <attila@fueloep.org>
|
Attila Fülöp <attila@fueloep.org>
|
||||||
@ -95,18 +88,15 @@ CONTRIBUTORS:
|
|||||||
Bassu <bassu@phi9.com>
|
Bassu <bassu@phi9.com>
|
||||||
Ben Allen <bsallen@alcf.anl.gov>
|
Ben Allen <bsallen@alcf.anl.gov>
|
||||||
Ben Cordero <bencord0@condi.me>
|
Ben Cordero <bencord0@condi.me>
|
||||||
Benda Xu <orv@debian.org>
|
|
||||||
Benedikt Neuffer <github@itfriend.de>
|
Benedikt Neuffer <github@itfriend.de>
|
||||||
Benjamin Albrecht <git@albrecht.io>
|
Benjamin Albrecht <git@albrecht.io>
|
||||||
Benjamin Gentil <benjgentil.pro@gmail.com>
|
Benjamin Gentil <benjgentil.pro@gmail.com>
|
||||||
Benjamin Sherman <benjamin@holyarmy.org>
|
|
||||||
Ben McGough <bmcgough@fredhutch.org>
|
Ben McGough <bmcgough@fredhutch.org>
|
||||||
Ben Rubson <ben.rubson@gmail.com>
|
Ben Rubson <ben.rubson@gmail.com>
|
||||||
Ben Wolsieffer <benwolsieffer@gmail.com>
|
Ben Wolsieffer <benwolsieffer@gmail.com>
|
||||||
bernie1995 <bernie.pikes@gmail.com>
|
bernie1995 <bernie.pikes@gmail.com>
|
||||||
Bill McGonigle <bill-github.com-public1@bfccomputing.com>
|
Bill McGonigle <bill-github.com-public1@bfccomputing.com>
|
||||||
Bill Pijewski <wdp@joyent.com>
|
Bill Pijewski <wdp@joyent.com>
|
||||||
Bojan Novković <bnovkov@FreeBSD.org>
|
|
||||||
Boris Protopopov <boris.protopopov@nexenta.com>
|
Boris Protopopov <boris.protopopov@nexenta.com>
|
||||||
Brad Forschinger <github@bnjf.id.au>
|
Brad Forschinger <github@bnjf.id.au>
|
||||||
Brad Lewis <brad.lewis@delphix.com>
|
Brad Lewis <brad.lewis@delphix.com>
|
||||||
@ -121,9 +111,7 @@ CONTRIBUTORS:
|
|||||||
bzzz77 <bzzz.tomas@gmail.com>
|
bzzz77 <bzzz.tomas@gmail.com>
|
||||||
cable2999 <cable2999@users.noreply.github.com>
|
cable2999 <cable2999@users.noreply.github.com>
|
||||||
Caleb James DeLisle <calebdelisle@lavabit.com>
|
Caleb James DeLisle <calebdelisle@lavabit.com>
|
||||||
Cameron Harr <harr1@llnl.gov>
|
|
||||||
Cao Xuewen <cao.xuewen@zte.com.cn>
|
Cao Xuewen <cao.xuewen@zte.com.cn>
|
||||||
Carl George <carlwgeorge@gmail.com>
|
|
||||||
Carlo Landmeter <clandmeter@gmail.com>
|
Carlo Landmeter <clandmeter@gmail.com>
|
||||||
Carlos Alberto Lopez Perez <clopez@igalia.com>
|
Carlos Alberto Lopez Perez <clopez@igalia.com>
|
||||||
Cedric Maunoury <cedric.maunoury@gmail.com>
|
Cedric Maunoury <cedric.maunoury@gmail.com>
|
||||||
@ -132,15 +120,12 @@ CONTRIBUTORS:
|
|||||||
Chen Can <chen.can2@zte.com.cn>
|
Chen Can <chen.can2@zte.com.cn>
|
||||||
Chengfei Zhu <chengfeix.zhu@intel.com>
|
Chengfei Zhu <chengfeix.zhu@intel.com>
|
||||||
Chen Haiquan <oc@yunify.com>
|
Chen Haiquan <oc@yunify.com>
|
||||||
ChenHao Lu <18302010006@fudan.edu.cn>
|
|
||||||
Chip Parker <aparker@enthought.com>
|
Chip Parker <aparker@enthought.com>
|
||||||
Chris Burroughs <chris.burroughs@gmail.com>
|
Chris Burroughs <chris.burroughs@gmail.com>
|
||||||
Chris Davidson <christopher.davidson@gmail.com>
|
|
||||||
Chris Dunlap <cdunlap@llnl.gov>
|
Chris Dunlap <cdunlap@llnl.gov>
|
||||||
Chris Dunlop <chris@onthe.net.au>
|
Chris Dunlop <chris@onthe.net.au>
|
||||||
Chris Lindee <chris.lindee+github@gmail.com>
|
Chris Lindee <chris.lindee+github@gmail.com>
|
||||||
Chris McDonough <chrism@plope.com>
|
Chris McDonough <chrism@plope.com>
|
||||||
Chris Peredun <chris.peredun@ixsystems.com>
|
|
||||||
Chris Siden <chris.siden@delphix.com>
|
Chris Siden <chris.siden@delphix.com>
|
||||||
Chris Siebenmann <cks.github@cs.toronto.edu>
|
Chris Siebenmann <cks.github@cs.toronto.edu>
|
||||||
Christer Ekholm <che@chrekh.se>
|
Christer Ekholm <che@chrekh.se>
|
||||||
@ -159,7 +144,6 @@ CONTRIBUTORS:
|
|||||||
Clint Armstrong <clint@clintarmstrong.net>
|
Clint Armstrong <clint@clintarmstrong.net>
|
||||||
Coleman Kane <ckane@colemankane.org>
|
Coleman Kane <ckane@colemankane.org>
|
||||||
Colin Ian King <colin.king@canonical.com>
|
Colin Ian King <colin.king@canonical.com>
|
||||||
Colin Percival <cperciva@tarsnap.com>
|
|
||||||
Colm Buckley <colm@tuatha.org>
|
Colm Buckley <colm@tuatha.org>
|
||||||
Crag Wang <crag0715@gmail.com>
|
Crag Wang <crag0715@gmail.com>
|
||||||
Craig Loomis <cloomis@astro.princeton.edu>
|
Craig Loomis <cloomis@astro.princeton.edu>
|
||||||
@ -172,12 +156,10 @@ CONTRIBUTORS:
|
|||||||
Damiano Albani <damiano.albani@gmail.com>
|
Damiano Albani <damiano.albani@gmail.com>
|
||||||
Damian Szuberski <szuberskidamian@gmail.com>
|
Damian Szuberski <szuberskidamian@gmail.com>
|
||||||
Damian Wojsław <damian@wojslaw.pl>
|
Damian Wojsław <damian@wojslaw.pl>
|
||||||
Daniel Berlin <dberlin@dberlin.org>
|
|
||||||
Daniel Hiepler <d-git@coderdu.de>
|
Daniel Hiepler <d-git@coderdu.de>
|
||||||
Daniel Hoffman <dj.hoffman@delphix.com>
|
Daniel Hoffman <dj.hoffman@delphix.com>
|
||||||
Daniel Kobras <d.kobras@science-computing.de>
|
Daniel Kobras <d.kobras@science-computing.de>
|
||||||
Daniel Kolesa <daniel@octaforge.org>
|
Daniel Kolesa <daniel@octaforge.org>
|
||||||
Daniel Perry <dtperry@amazon.com>
|
|
||||||
Daniel Reichelt <hacking@nachtgeist.net>
|
Daniel Reichelt <hacking@nachtgeist.net>
|
||||||
Daniel Stevenson <bot@dstev.net>
|
Daniel Stevenson <bot@dstev.net>
|
||||||
Daniel Verite <daniel@verite.pro>
|
Daniel Verite <daniel@verite.pro>
|
||||||
@ -194,17 +176,13 @@ CONTRIBUTORS:
|
|||||||
David Quigley <david.quigley@intel.com>
|
David Quigley <david.quigley@intel.com>
|
||||||
Debabrata Banerjee <dbanerje@akamai.com>
|
Debabrata Banerjee <dbanerje@akamai.com>
|
||||||
D. Ebdrup <debdrup@freebsd.org>
|
D. Ebdrup <debdrup@freebsd.org>
|
||||||
Dennis R. Friedrichsen <dennis.r.friedrichsen@gmail.com>
|
|
||||||
Denys Rtveliashvili <denys@rtveliashvili.name>
|
Denys Rtveliashvili <denys@rtveliashvili.name>
|
||||||
Derek Dai <daiderek@gmail.com>
|
Derek Dai <daiderek@gmail.com>
|
||||||
Derek Schrock <dereks@lifeofadishwasher.com>
|
|
||||||
Dex Wood <slash2314@gmail.com>
|
|
||||||
DHE <git@dehacked.net>
|
DHE <git@dehacked.net>
|
||||||
Didier Roche <didrocks@ubuntu.com>
|
Didier Roche <didrocks@ubuntu.com>
|
||||||
Dimitri John Ledkov <xnox@ubuntu.com>
|
Dimitri John Ledkov <xnox@ubuntu.com>
|
||||||
Dimitry Andric <dimitry@andric.com>
|
Dimitry Andric <dimitry@andric.com>
|
||||||
Dirkjan Bussink <d.bussink@gmail.com>
|
Dirkjan Bussink <d.bussink@gmail.com>
|
||||||
Diwakar Kristappagari <diwakar-k@hpe.com>
|
|
||||||
Dmitry Khasanov <pik4ez@gmail.com>
|
Dmitry Khasanov <pik4ez@gmail.com>
|
||||||
Dominic Pearson <dsp@technoanimal.net>
|
Dominic Pearson <dsp@technoanimal.net>
|
||||||
Dominik Hassler <hadfl@omniosce.org>
|
Dominik Hassler <hadfl@omniosce.org>
|
||||||
@ -234,12 +212,10 @@ CONTRIBUTORS:
|
|||||||
Fedor Uporov <fuporov.vstack@gmail.com>
|
Fedor Uporov <fuporov.vstack@gmail.com>
|
||||||
Felix Dörre <felix@dogcraft.de>
|
Felix Dörre <felix@dogcraft.de>
|
||||||
Felix Neumärker <xdch47@posteo.de>
|
Felix Neumärker <xdch47@posteo.de>
|
||||||
Felix Schmidt <felixschmidt20@aol.com>
|
|
||||||
Feng Sun <loyou85@gmail.com>
|
Feng Sun <loyou85@gmail.com>
|
||||||
Finix Yan <yancw@info2soft.com>
|
Finix Yan <yancw@info2soft.com>
|
||||||
Francesco Mazzoli <f@mazzo.li>
|
Francesco Mazzoli <f@mazzo.li>
|
||||||
Frederik Wessels <wessels147@gmail.com>
|
Frederik Wessels <wessels147@gmail.com>
|
||||||
Friedrich Weber <f.weber@proxmox.com>
|
|
||||||
Frédéric Vanniere <f.vanniere@planet-work.com>
|
Frédéric Vanniere <f.vanniere@planet-work.com>
|
||||||
Gabriel A. Devenyi <gdevenyi@gmail.com>
|
Gabriel A. Devenyi <gdevenyi@gmail.com>
|
||||||
Garrett D'Amore <garrett@nexenta.com>
|
Garrett D'Amore <garrett@nexenta.com>
|
||||||
@ -255,18 +231,13 @@ CONTRIBUTORS:
|
|||||||
George Wilson <gwilson@delphix.com>
|
George Wilson <gwilson@delphix.com>
|
||||||
Georgy Yakovlev <ya@sysdump.net>
|
Georgy Yakovlev <ya@sysdump.net>
|
||||||
Gerardwx <gerardw@alum.mit.edu>
|
Gerardwx <gerardw@alum.mit.edu>
|
||||||
Germano Massullo <germano.massullo@gmail.com>
|
|
||||||
Gian-Carlo DeFazio <defazio1@llnl.gov>
|
Gian-Carlo DeFazio <defazio1@llnl.gov>
|
||||||
Gionatan Danti <g.danti@assyoma.it>
|
Gionatan Danti <g.danti@assyoma.it>
|
||||||
Giuseppe Di Natale <guss80@gmail.com>
|
Giuseppe Di Natale <guss80@gmail.com>
|
||||||
Gleb Smirnoff <glebius@FreeBSD.org>
|
|
||||||
Glenn Washburn <development@efficientek.com>
|
Glenn Washburn <development@efficientek.com>
|
||||||
glibg10b <glibg10b@users.noreply.github.com>
|
|
||||||
gofaster <felix.gofaster@gmail.com>
|
|
||||||
Gordan Bobic <gordan@redsleeve.org>
|
Gordan Bobic <gordan@redsleeve.org>
|
||||||
Gordon Bergling <gbergling@googlemail.com>
|
Gordon Bergling <gbergling@googlemail.com>
|
||||||
Gordon Ross <gwr@nexenta.com>
|
Gordon Ross <gwr@nexenta.com>
|
||||||
Gordon Tetlow <gordon@freebsd.org>
|
|
||||||
Graham Christensen <graham@grahamc.com>
|
Graham Christensen <graham@grahamc.com>
|
||||||
Graham Perrin <grahamperrin@gmail.com>
|
Graham Perrin <grahamperrin@gmail.com>
|
||||||
Gregor Kopka <gregor@kopka.net>
|
Gregor Kopka <gregor@kopka.net>
|
||||||
@ -293,14 +264,11 @@ CONTRIBUTORS:
|
|||||||
Igor K <igor@dilos.org>
|
Igor K <igor@dilos.org>
|
||||||
Igor Kozhukhov <ikozhukhov@gmail.com>
|
Igor Kozhukhov <ikozhukhov@gmail.com>
|
||||||
Igor Lvovsky <ilvovsky@gmail.com>
|
Igor Lvovsky <ilvovsky@gmail.com>
|
||||||
Igor Ostapenko <pm@igoro.pro>
|
|
||||||
ilbsmart <wgqimut@gmail.com>
|
ilbsmart <wgqimut@gmail.com>
|
||||||
Ilkka Sovanto <github@ilkka.kapsi.fi>
|
|
||||||
illiliti <illiliti@protonmail.com>
|
illiliti <illiliti@protonmail.com>
|
||||||
ilovezfs <ilovezfs@icloud.com>
|
ilovezfs <ilovezfs@icloud.com>
|
||||||
InsanePrawn <Insane.Prawny@gmail.com>
|
InsanePrawn <Insane.Prawny@gmail.com>
|
||||||
Isaac Huang <he.huang@intel.com>
|
Isaac Huang <he.huang@intel.com>
|
||||||
Ivan Volosyuk <Ivan.Volosyuk@gmail.com>
|
|
||||||
Jacek Fefliński <feflik@gmail.com>
|
Jacek Fefliński <feflik@gmail.com>
|
||||||
Jacob Adams <tookmund@gmail.com>
|
Jacob Adams <tookmund@gmail.com>
|
||||||
Jake Howard <git@theorangeone.net>
|
Jake Howard <git@theorangeone.net>
|
||||||
@ -308,19 +276,15 @@ CONTRIBUTORS:
|
|||||||
James H <james@kagisoft.co.uk>
|
James H <james@kagisoft.co.uk>
|
||||||
James Lee <jlee@thestaticvoid.com>
|
James Lee <jlee@thestaticvoid.com>
|
||||||
James Pan <jiaming.pan@yahoo.com>
|
James Pan <jiaming.pan@yahoo.com>
|
||||||
James Reilly <jreilly1821@gmail.com>
|
|
||||||
James Wah <james@laird-wah.net>
|
James Wah <james@laird-wah.net>
|
||||||
Jan Engelhardt <jengelh@inai.de>
|
Jan Engelhardt <jengelh@inai.de>
|
||||||
Jan Kryl <jan.kryl@nexenta.com>
|
Jan Kryl <jan.kryl@nexenta.com>
|
||||||
Jan Sanislo <oystr@cs.washington.edu>
|
Jan Sanislo <oystr@cs.washington.edu>
|
||||||
Jaron Kent-Dobias <jaron@kent-dobias.com>
|
|
||||||
Jason Cohen <jwittlincohen@gmail.com>
|
Jason Cohen <jwittlincohen@gmail.com>
|
||||||
Jason Harmening <jason.harmening@gmail.com>
|
Jason Harmening <jason.harmening@gmail.com>
|
||||||
Jason King <jason.brian.king@gmail.com>
|
Jason King <jason.brian.king@gmail.com>
|
||||||
Jason Lee <jasonlee@lanl.gov>
|
|
||||||
Jason Zaman <jasonzaman@gmail.com>
|
Jason Zaman <jasonzaman@gmail.com>
|
||||||
Javen Wu <wu.javen@gmail.com>
|
Javen Wu <wu.javen@gmail.com>
|
||||||
Jaydeep Kshirsagar <jkshirsagar@maxlinear.com>
|
|
||||||
Jean-Baptiste Lallement <jean-baptiste@ubuntu.com>
|
Jean-Baptiste Lallement <jean-baptiste@ubuntu.com>
|
||||||
Jeff Dike <jdike@akamai.com>
|
Jeff Dike <jdike@akamai.com>
|
||||||
Jeremy Faulkner <gldisater@gmail.com>
|
Jeremy Faulkner <gldisater@gmail.com>
|
||||||
@ -328,12 +292,10 @@ CONTRIBUTORS:
|
|||||||
Jeremy Jones <jeremy@delphix.com>
|
Jeremy Jones <jeremy@delphix.com>
|
||||||
Jeremy Visser <jeremy.visser@gmail.com>
|
Jeremy Visser <jeremy.visser@gmail.com>
|
||||||
Jerry Jelinek <jerry.jelinek@joyent.com>
|
Jerry Jelinek <jerry.jelinek@joyent.com>
|
||||||
Jerzy Kołosowski <jerzy@kolosowscy.pl>
|
|
||||||
Jessica Clarke <jrtc27@jrtc27.com>
|
Jessica Clarke <jrtc27@jrtc27.com>
|
||||||
Jinshan Xiong <jinshan.xiong@intel.com>
|
Jinshan Xiong <jinshan.xiong@intel.com>
|
||||||
Jitendra Patidar <jitendra.patidar@nutanix.com>
|
Jitendra Patidar <jitendra.patidar@nutanix.com>
|
||||||
JK Dingwall <james@dingwall.me.uk>
|
JK Dingwall <james@dingwall.me.uk>
|
||||||
Joel Low <joel@joelsplace.sg>
|
|
||||||
Joe Stein <joe.stein@delphix.com>
|
Joe Stein <joe.stein@delphix.com>
|
||||||
John-Mark Gurney <jmg@funkthat.com>
|
John-Mark Gurney <jmg@funkthat.com>
|
||||||
John Albietz <inthecloud247@gmail.com>
|
John Albietz <inthecloud247@gmail.com>
|
||||||
@ -351,7 +313,6 @@ CONTRIBUTORS:
|
|||||||
Jonathon Fernyhough <jonathon@m2x.dev>
|
Jonathon Fernyhough <jonathon@m2x.dev>
|
||||||
Jorgen Lundman <lundman@lundman.net>
|
Jorgen Lundman <lundman@lundman.net>
|
||||||
Josef 'Jeff' Sipek <josef.sipek@nexenta.com>
|
Josef 'Jeff' Sipek <josef.sipek@nexenta.com>
|
||||||
Jose Luis Duran <jlduran@gmail.com>
|
|
||||||
Josh Soref <jsoref@users.noreply.github.com>
|
Josh Soref <jsoref@users.noreply.github.com>
|
||||||
Joshua M. Clulow <josh@sysmgr.org>
|
Joshua M. Clulow <josh@sysmgr.org>
|
||||||
José Luis Salvador Rufo <salvador.joseluis@gmail.com>
|
José Luis Salvador Rufo <salvador.joseluis@gmail.com>
|
||||||
@ -375,14 +336,11 @@ CONTRIBUTORS:
|
|||||||
Kash Pande <kash@tripleback.net>
|
Kash Pande <kash@tripleback.net>
|
||||||
Kay Pedersen <christianpe96@gmail.com>
|
Kay Pedersen <christianpe96@gmail.com>
|
||||||
Keith M Wesolowski <wesolows@foobazco.org>
|
Keith M Wesolowski <wesolows@foobazco.org>
|
||||||
Kent Ross <k@mad.cash>
|
|
||||||
KernelOfTruth <kerneloftruth@gmail.com>
|
KernelOfTruth <kerneloftruth@gmail.com>
|
||||||
Kevin Bowling <kevin.bowling@kev009.com>
|
Kevin Bowling <kevin.bowling@kev009.com>
|
||||||
Kevin Greene <kevin.greene@delphix.com>
|
|
||||||
Kevin Jin <lostking2008@hotmail.com>
|
Kevin Jin <lostking2008@hotmail.com>
|
||||||
Kevin P. Fleming <kevin@km6g.us>
|
Kevin P. Fleming <kevin@km6g.us>
|
||||||
Kevin Tanguy <kevin.tanguy@ovh.net>
|
Kevin Tanguy <kevin.tanguy@ovh.net>
|
||||||
khoang98 <khoang98@users.noreply.github.com>
|
|
||||||
KireinaHoro <i@jsteward.moe>
|
KireinaHoro <i@jsteward.moe>
|
||||||
Kjeld Schouten-Lebbing <kjeld@schouten-lebbing.nl>
|
Kjeld Schouten-Lebbing <kjeld@schouten-lebbing.nl>
|
||||||
Kleber Tarcísio <klebertarcisio@yahoo.com.br>
|
Kleber Tarcísio <klebertarcisio@yahoo.com.br>
|
||||||
@ -390,7 +348,6 @@ CONTRIBUTORS:
|
|||||||
Kohsuke Kawaguchi <kk@kohsuke.org>
|
Kohsuke Kawaguchi <kk@kohsuke.org>
|
||||||
Konstantin Khorenko <khorenko@virtuozzo.com>
|
Konstantin Khorenko <khorenko@virtuozzo.com>
|
||||||
KORN Andras <korn@elan.rulez.org>
|
KORN Andras <korn@elan.rulez.org>
|
||||||
kotauskas <v.toncharov@gmail.com>
|
|
||||||
Kristof Provost <github@sigsegv.be>
|
Kristof Provost <github@sigsegv.be>
|
||||||
Krzysztof Piecuch <piecuch@kpiecuch.pl>
|
Krzysztof Piecuch <piecuch@kpiecuch.pl>
|
||||||
Kyle Blatter <kyleblatter@llnl.gov>
|
Kyle Blatter <kyleblatter@llnl.gov>
|
||||||
@ -432,17 +389,14 @@ CONTRIBUTORS:
|
|||||||
Mark Shellenbaum <Mark.Shellenbaum@Oracle.COM>
|
Mark Shellenbaum <Mark.Shellenbaum@Oracle.COM>
|
||||||
marku89 <mar42@kola.li>
|
marku89 <mar42@kola.li>
|
||||||
Mark Wright <markwright@internode.on.net>
|
Mark Wright <markwright@internode.on.net>
|
||||||
Mart Frauenlob <allkind@fastest.cc>
|
|
||||||
Martin Matuska <mm@FreeBSD.org>
|
Martin Matuska <mm@FreeBSD.org>
|
||||||
Martin Rüegg <martin.rueegg@metaworx.ch>
|
Martin Rüegg <martin.rueegg@metaworx.ch>
|
||||||
Martin Wagner <martin.wagner.dev@gmail.com>
|
|
||||||
Massimo Maggi <me@massimo-maggi.eu>
|
Massimo Maggi <me@massimo-maggi.eu>
|
||||||
Mateusz Guzik <mjguzik@gmail.com>
|
Mateusz Guzik <mjguzik@gmail.com>
|
||||||
Mateusz Piotrowski <0mp@FreeBSD.org>
|
Mateusz Piotrowski <0mp@FreeBSD.org>
|
||||||
Mathieu Velten <matmaul@gmail.com>
|
Mathieu Velten <matmaul@gmail.com>
|
||||||
Matt Fiddaman <github@m.fiddaman.uk>
|
Matt Fiddaman <github@m.fiddaman.uk>
|
||||||
Matthew Ahrens <matt@delphix.com>
|
Matthew Ahrens <matt@delphix.com>
|
||||||
Matthew Heller <matthew.f.heller@gmail.com>
|
|
||||||
Matthew Thode <mthode@mthode.org>
|
Matthew Thode <mthode@mthode.org>
|
||||||
Matthias Blankertz <matthias@blankertz.org>
|
Matthias Blankertz <matthias@blankertz.org>
|
||||||
Matt Johnston <matt@fugro-fsi.com.au>
|
Matt Johnston <matt@fugro-fsi.com.au>
|
||||||
@ -451,12 +405,10 @@ CONTRIBUTORS:
|
|||||||
Matus Kral <matuskral@me.com>
|
Matus Kral <matuskral@me.com>
|
||||||
Mauricio Faria de Oliveira <mfo@canonical.com>
|
Mauricio Faria de Oliveira <mfo@canonical.com>
|
||||||
Max Grossman <max.grossman@delphix.com>
|
Max Grossman <max.grossman@delphix.com>
|
||||||
Maxim Filimonov <che@bein.link>
|
|
||||||
Maximilian Mehnert <maximilian.mehnert@gmx.de>
|
Maximilian Mehnert <maximilian.mehnert@gmx.de>
|
||||||
Max Zettlmeißl <max@zettlmeissl.de>
|
Max Zettlmeißl <max@zettlmeissl.de>
|
||||||
Md Islam <mdnahian@outlook.com>
|
Md Islam <mdnahian@outlook.com>
|
||||||
megari <megari@iki.fi>
|
megari <megari@iki.fi>
|
||||||
Meriel Luna Mittelbach <lunarlambda@gmail.com>
|
|
||||||
Michael D Labriola <michael.d.labriola@gmail.com>
|
Michael D Labriola <michael.d.labriola@gmail.com>
|
||||||
Michael Franzl <michael@franzl.name>
|
Michael Franzl <michael@franzl.name>
|
||||||
Michael Gebetsroither <michael@mgeb.org>
|
Michael Gebetsroither <michael@mgeb.org>
|
||||||
@ -465,14 +417,12 @@ CONTRIBUTORS:
|
|||||||
Michael Niewöhner <foss@mniewoehner.de>
|
Michael Niewöhner <foss@mniewoehner.de>
|
||||||
Michael Zhivich <mzhivich@akamai.com>
|
Michael Zhivich <mzhivich@akamai.com>
|
||||||
Michal Vasilek <michal@vasilek.cz>
|
Michal Vasilek <michal@vasilek.cz>
|
||||||
MigeljanImeri <ImeriMigel@gmail.com>
|
|
||||||
Mike Gerdts <mike.gerdts@joyent.com>
|
Mike Gerdts <mike.gerdts@joyent.com>
|
||||||
Mike Harsch <mike@harschsystems.com>
|
Mike Harsch <mike@harschsystems.com>
|
||||||
Mike Leddy <mike.leddy@gmail.com>
|
Mike Leddy <mike.leddy@gmail.com>
|
||||||
Mike Swanson <mikeonthecomputer@gmail.com>
|
Mike Swanson <mikeonthecomputer@gmail.com>
|
||||||
Milan Jurik <milan.jurik@xylab.cz>
|
Milan Jurik <milan.jurik@xylab.cz>
|
||||||
Minsoo Choo <minsoochoo0122@proton.me>
|
Minsoo Choo <minsoochoo0122@proton.me>
|
||||||
mnrx <mnrx@users.noreply.github.com>
|
|
||||||
Mohamed Tawfik <m_tawfik@aucegypt.edu>
|
Mohamed Tawfik <m_tawfik@aucegypt.edu>
|
||||||
Morgan Jones <mjones@rice.edu>
|
Morgan Jones <mjones@rice.edu>
|
||||||
Moritz Maxeiner <moritz@ucworks.org>
|
Moritz Maxeiner <moritz@ucworks.org>
|
||||||
@ -498,13 +448,11 @@ CONTRIBUTORS:
|
|||||||
Olaf Faaland <faaland1@llnl.gov>
|
Olaf Faaland <faaland1@llnl.gov>
|
||||||
Oleg Drokin <green@linuxhacker.ru>
|
Oleg Drokin <green@linuxhacker.ru>
|
||||||
Oleg Stepura <oleg@stepura.com>
|
Oleg Stepura <oleg@stepura.com>
|
||||||
Olivier Certner <olce@FreeBSD.org>
|
|
||||||
Olivier Mazouffre <olivier.mazouffre@ims-bordeaux.fr>
|
Olivier Mazouffre <olivier.mazouffre@ims-bordeaux.fr>
|
||||||
omni <omni+vagant@hack.org>
|
omni <omni+vagant@hack.org>
|
||||||
Orivej Desh <orivej@gmx.fr>
|
Orivej Desh <orivej@gmx.fr>
|
||||||
Pablo Correa Gómez <ablocorrea@hotmail.com>
|
Pablo Correa Gómez <ablocorrea@hotmail.com>
|
||||||
Palash Gandhi <pbg4930@rit.edu>
|
Palash Gandhi <pbg4930@rit.edu>
|
||||||
Patrick Fasano <patrick@patrickfasano.com>
|
|
||||||
Patrick Mooney <pmooney@pfmooney.com>
|
Patrick Mooney <pmooney@pfmooney.com>
|
||||||
Patrik Greco <sikevux@sikevux.se>
|
Patrik Greco <sikevux@sikevux.se>
|
||||||
Paul B. Henson <henson@acm.org>
|
Paul B. Henson <henson@acm.org>
|
||||||
@ -516,28 +464,21 @@ CONTRIBUTORS:
|
|||||||
Pawel Jakub Dawidek <pjd@FreeBSD.org>
|
Pawel Jakub Dawidek <pjd@FreeBSD.org>
|
||||||
Pedro Giffuni <pfg@freebsd.org>
|
Pedro Giffuni <pfg@freebsd.org>
|
||||||
Peng <peng.hse@xtaotech.com>
|
Peng <peng.hse@xtaotech.com>
|
||||||
Peng Liu <littlenewton6@gmail.com>
|
|
||||||
Peter Ashford <ashford@accs.com>
|
Peter Ashford <ashford@accs.com>
|
||||||
Peter Dave Hello <hsu@peterdavehello.org>
|
Peter Dave Hello <hsu@peterdavehello.org>
|
||||||
Peter Doherty <peterd@acranox.org>
|
|
||||||
Peter Levine <plevine457@gmail.com>
|
Peter Levine <plevine457@gmail.com>
|
||||||
Peter Wirdemo <peter.wirdemo@gmail.com>
|
Peter Wirdemo <peter.wirdemo@gmail.com>
|
||||||
Petros Koutoupis <petros@petroskoutoupis.com>
|
Petros Koutoupis <petros@petroskoutoupis.com>
|
||||||
Philip Pokorny <ppokorny@penguincomputing.com>
|
Philip Pokorny <ppokorny@penguincomputing.com>
|
||||||
Philipp Riederer <pt@philipptoelke.de>
|
Philipp Riederer <pt@philipptoelke.de>
|
||||||
Phil Kauffman <philip@kauffman.me>
|
Phil Kauffman <philip@kauffman.me>
|
||||||
Phil Sutter <phil@nwl.cc>
|
|
||||||
Ping Huang <huangping@smartx.com>
|
Ping Huang <huangping@smartx.com>
|
||||||
Piotr Kubaj <pkubaj@anongoth.pl>
|
Piotr Kubaj <pkubaj@anongoth.pl>
|
||||||
Piotr P. Stefaniak <pstef@freebsd.org>
|
Piotr P. Stefaniak <pstef@freebsd.org>
|
||||||
poscat <poscat@poscat.moe>
|
|
||||||
Prakash Surya <prakash.surya@delphix.com>
|
Prakash Surya <prakash.surya@delphix.com>
|
||||||
Prasad Joshi <prasadjoshi124@gmail.com>
|
Prasad Joshi <prasadjoshi124@gmail.com>
|
||||||
privb0x23 <privb0x23@users.noreply.github.com>
|
privb0x23 <privb0x23@users.noreply.github.com>
|
||||||
P.SCH <p88@yahoo.com>
|
P.SCH <p88@yahoo.com>
|
||||||
Qiuhao Chen <chenqiuhao1997@gmail.com>
|
|
||||||
Quartz <yyhran@163.com>
|
|
||||||
Quentin Thébault <quentin.thebault@defenso.fr>
|
|
||||||
Quentin Zdanis <zdanisq@gmail.com>
|
Quentin Zdanis <zdanisq@gmail.com>
|
||||||
Rafael Kitover <rkitover@gmail.com>
|
Rafael Kitover <rkitover@gmail.com>
|
||||||
RageLtMan <sempervictus@users.noreply.github.com>
|
RageLtMan <sempervictus@users.noreply.github.com>
|
||||||
@ -546,20 +487,15 @@ CONTRIBUTORS:
|
|||||||
Remy Blank <remy.blank@pobox.com>
|
Remy Blank <remy.blank@pobox.com>
|
||||||
renelson <bnelson@nelsonbe.com>
|
renelson <bnelson@nelsonbe.com>
|
||||||
Reno Reckling <e-github@wthack.de>
|
Reno Reckling <e-github@wthack.de>
|
||||||
René Wirnata <rene.wirnata@pandascience.net>
|
|
||||||
Ricardo M. Correia <ricardo.correia@oracle.com>
|
Ricardo M. Correia <ricardo.correia@oracle.com>
|
||||||
Riccardo Schirone <rschirone91@gmail.com>
|
Riccardo Schirone <rschirone91@gmail.com>
|
||||||
Richard Allen <belperite@gmail.com>
|
Richard Allen <belperite@gmail.com>
|
||||||
Richard Elling <Richard.Elling@RichardElling.com>
|
Richard Elling <Richard.Elling@RichardElling.com>
|
||||||
Richard Kojedzinszky <richard@kojedz.in>
|
|
||||||
Richard Laager <rlaager@wiktel.com>
|
Richard Laager <rlaager@wiktel.com>
|
||||||
Richard Lowe <richlowe@richlowe.net>
|
Richard Lowe <richlowe@richlowe.net>
|
||||||
Richard Sharpe <rsharpe@samba.org>
|
Richard Sharpe <rsharpe@samba.org>
|
||||||
Richard Yao <ryao@gentoo.org>
|
Richard Yao <ryao@gentoo.org>
|
||||||
Rich Ercolani <rincebrain@gmail.com>
|
Rich Ercolani <rincebrain@gmail.com>
|
||||||
Rick Macklem <rmacklem@uoguelph.ca>
|
|
||||||
rilysh <nightquick@proton.me>
|
|
||||||
Robert Evans <evansr@google.com>
|
|
||||||
Robert Novak <sailnfool@gmail.com>
|
Robert Novak <sailnfool@gmail.com>
|
||||||
Roberto Ricci <ricci@disroot.org>
|
Roberto Ricci <ricci@disroot.org>
|
||||||
Rob Norris <robn@despairlabs.com>
|
Rob Norris <robn@despairlabs.com>
|
||||||
@ -569,14 +505,11 @@ CONTRIBUTORS:
|
|||||||
Roman Strashkin <roman.strashkin@nexenta.com>
|
Roman Strashkin <roman.strashkin@nexenta.com>
|
||||||
Ross Williams <ross@ross-williams.net>
|
Ross Williams <ross@ross-williams.net>
|
||||||
Ruben Kerkhof <ruben@rubenkerkhof.com>
|
Ruben Kerkhof <ruben@rubenkerkhof.com>
|
||||||
Ryan <errornointernet@envs.net>
|
|
||||||
Ryan Hirasaki <ryanhirasaki@gmail.com>
|
Ryan Hirasaki <ryanhirasaki@gmail.com>
|
||||||
Ryan Lahfa <masterancpp@gmail.com>
|
Ryan Lahfa <masterancpp@gmail.com>
|
||||||
Ryan Libby <rlibby@FreeBSD.org>
|
Ryan Libby <rlibby@FreeBSD.org>
|
||||||
Ryan Moeller <freqlabs@FreeBSD.org>
|
Ryan Moeller <freqlabs@FreeBSD.org>
|
||||||
Sam Atkinson <samatk@amazon.com>
|
|
||||||
Sam Hathaway <github.com@munkynet.org>
|
Sam Hathaway <github.com@munkynet.org>
|
||||||
Sam James <sam@gentoo.org>
|
|
||||||
Sam Lunt <samuel.j.lunt@gmail.com>
|
Sam Lunt <samuel.j.lunt@gmail.com>
|
||||||
Samuel VERSCHELDE <stormi-github@ylix.fr>
|
Samuel VERSCHELDE <stormi-github@ylix.fr>
|
||||||
Samuel Wycliffe <samuelwycliffe@gmail.com>
|
Samuel Wycliffe <samuelwycliffe@gmail.com>
|
||||||
@ -590,30 +523,20 @@ CONTRIBUTORS:
|
|||||||
Scot W. Stevenson <scot.stevenson@gmail.com>
|
Scot W. Stevenson <scot.stevenson@gmail.com>
|
||||||
Sean Eric Fagan <sef@ixsystems.com>
|
Sean Eric Fagan <sef@ixsystems.com>
|
||||||
Sebastian Gottschall <s.gottschall@dd-wrt.com>
|
Sebastian Gottschall <s.gottschall@dd-wrt.com>
|
||||||
Sebastian Pauka <me@spauka.se>
|
|
||||||
Sebastian Wuerl <s.wuerl@mailbox.org>
|
|
||||||
Sebastien Roy <seb@delphix.com>
|
Sebastien Roy <seb@delphix.com>
|
||||||
Sen Haerens <sen@senhaerens.be>
|
Sen Haerens <sen@senhaerens.be>
|
||||||
Serapheim Dimitropoulos <serapheim@delphix.com>
|
Serapheim Dimitropoulos <serapheim@delphix.com>
|
||||||
Seth Forshee <seth.forshee@canonical.com>
|
Seth Forshee <seth.forshee@canonical.com>
|
||||||
Seth Hoffert <Seth.Hoffert@gmail.com>
|
|
||||||
Seth Troisi <sethtroisi@google.com>
|
|
||||||
Shaan Nobee <sniper111@gmail.com>
|
Shaan Nobee <sniper111@gmail.com>
|
||||||
Shampavman <sham.pavman@nexenta.com>
|
Shampavman <sham.pavman@nexenta.com>
|
||||||
Shaun Tancheff <shaun@aeonazure.com>
|
Shaun Tancheff <shaun@aeonazure.com>
|
||||||
Shawn Bayern <sbayern@law.fsu.edu>
|
|
||||||
Shengqi Chen <harry-chen@outlook.com>
|
|
||||||
SHENGYI HONG <aokblast@FreeBSD.org>
|
|
||||||
Shen Yan <shenyanxxxy@qq.com>
|
Shen Yan <shenyanxxxy@qq.com>
|
||||||
Sietse <sietse@wizdom.nu>
|
|
||||||
Simon Guest <simon.guest@tesujimath.org>
|
Simon Guest <simon.guest@tesujimath.org>
|
||||||
Simon Howard <fraggle@soulsphere.org>
|
|
||||||
Simon Klinkert <simon.klinkert@gmail.com>
|
Simon Klinkert <simon.klinkert@gmail.com>
|
||||||
Sowrabha Gopal <sowrabha.gopal@delphix.com>
|
Sowrabha Gopal <sowrabha.gopal@delphix.com>
|
||||||
Spencer Kinny <spencerkinny1995@gmail.com>
|
Spencer Kinny <spencerkinny1995@gmail.com>
|
||||||
Srikanth N S <srikanth.nagasubbaraoseetharaman@hpe.com>
|
Srikanth N S <srikanth.nagasubbaraoseetharaman@hpe.com>
|
||||||
Stanislav Seletskiy <s.seletskiy@gmail.com>
|
Stanislav Seletskiy <s.seletskiy@gmail.com>
|
||||||
Stefan Lendl <s.lendl@proxmox.com>
|
|
||||||
Steffen Müthing <steffen.muething@iwr.uni-heidelberg.de>
|
Steffen Müthing <steffen.muething@iwr.uni-heidelberg.de>
|
||||||
Stephen Blinick <stephen.blinick@delphix.com>
|
Stephen Blinick <stephen.blinick@delphix.com>
|
||||||
sterlingjensen <sterlingjensen@users.noreply.github.com>
|
sterlingjensen <sterlingjensen@users.noreply.github.com>
|
||||||
@ -629,14 +552,11 @@ CONTRIBUTORS:
|
|||||||
Stéphane Lesimple <speed47_github@speed47.net>
|
Stéphane Lesimple <speed47_github@speed47.net>
|
||||||
Suman Chakravartula <schakrava@gmail.com>
|
Suman Chakravartula <schakrava@gmail.com>
|
||||||
Sydney Vanda <sydney.m.vanda@intel.com>
|
Sydney Vanda <sydney.m.vanda@intel.com>
|
||||||
Syed Shahrukh Hussain <syed.shahrukh@ossrevival.org>
|
|
||||||
Sören Tempel <soeren+git@soeren-tempel.net>
|
Sören Tempel <soeren+git@soeren-tempel.net>
|
||||||
Tamas TEVESZ <ice@extreme.hu>
|
Tamas TEVESZ <ice@extreme.hu>
|
||||||
Teodor Spæren <teodor_spaeren@riseup.net>
|
Teodor Spæren <teodor_spaeren@riseup.net>
|
||||||
TerraTech <TerraTech@users.noreply.github.com>
|
TerraTech <TerraTech@users.noreply.github.com>
|
||||||
Theera K. <tkittich@hotmail.com>
|
|
||||||
Thijs Cramer <thijs.cramer@gmail.com>
|
Thijs Cramer <thijs.cramer@gmail.com>
|
||||||
Thomas Bertschinger <bertschinger@lanl.gov>
|
|
||||||
Thomas Geppert <geppi@digitx.de>
|
Thomas Geppert <geppi@digitx.de>
|
||||||
Thomas Lamprecht <guggentom@hotmail.de>
|
Thomas Lamprecht <guggentom@hotmail.de>
|
||||||
Till Maas <opensource@till.name>
|
Till Maas <opensource@till.name>
|
||||||
@ -647,12 +567,8 @@ CONTRIBUTORS:
|
|||||||
timor <timor.dd@googlemail.com>
|
timor <timor.dd@googlemail.com>
|
||||||
Timothy Day <tday141@gmail.com>
|
Timothy Day <tday141@gmail.com>
|
||||||
Tim Schumacher <timschumi@gmx.de>
|
Tim Schumacher <timschumi@gmx.de>
|
||||||
Tim Smith <tim@mondoo.com>
|
|
||||||
Tino Reichardt <milky-zfs@mcmilk.de>
|
Tino Reichardt <milky-zfs@mcmilk.de>
|
||||||
tleydxdy <shironeko.github@tesaguri.club>
|
|
||||||
Tobin Harding <me@tobin.cc>
|
Tobin Harding <me@tobin.cc>
|
||||||
Todd Seidelmann <seidelma@users.noreply.github.com>
|
|
||||||
Todd Zullinger <tmz@pobox.com>
|
|
||||||
Tom Caputi <tcaputi@datto.com>
|
Tom Caputi <tcaputi@datto.com>
|
||||||
Tom Matthews <tom@axiom-partners.com>
|
Tom Matthews <tom@axiom-partners.com>
|
||||||
Tomohiro Kusumi <kusumi.tomohiro@gmail.com>
|
Tomohiro Kusumi <kusumi.tomohiro@gmail.com>
|
||||||
@ -666,15 +582,12 @@ CONTRIBUTORS:
|
|||||||
Trevor Bautista <trevrb@trevrb.net>
|
Trevor Bautista <trevrb@trevrb.net>
|
||||||
Trey Dockendorf <treydock@gmail.com>
|
Trey Dockendorf <treydock@gmail.com>
|
||||||
Troels Nørgaard <tnn@tradeshift.com>
|
Troels Nørgaard <tnn@tradeshift.com>
|
||||||
tstabrawa <tstabrawa@users.noreply.github.com>
|
|
||||||
Tulsi Jain <tulsi.jain@delphix.com>
|
Tulsi Jain <tulsi.jain@delphix.com>
|
||||||
Turbo Fredriksson <turbo@bayour.com>
|
Turbo Fredriksson <turbo@bayour.com>
|
||||||
Tyler J. Stachecki <stachecki.tyler@gmail.com>
|
Tyler J. Stachecki <stachecki.tyler@gmail.com>
|
||||||
Umer Saleem <usaleem@ixsystems.com>
|
Umer Saleem <usaleem@ixsystems.com>
|
||||||
Vaibhav Bhanawat <vaibhav.bhanawat@delphix.com>
|
|
||||||
Valmiky Arquissandas <kayvlim@gmail.com>
|
Valmiky Arquissandas <kayvlim@gmail.com>
|
||||||
Val Packett <val@packett.cool>
|
Val Packett <val@packett.cool>
|
||||||
Vandana Rungta <vrungta@amazon.com>
|
|
||||||
Vince van Oosten <techhazard@codeforyouand.me>
|
Vince van Oosten <techhazard@codeforyouand.me>
|
||||||
Violet Purcell <vimproved@inventati.org>
|
Violet Purcell <vimproved@inventati.org>
|
||||||
Vipin Kumar Verma <vipin.verma@hpe.com>
|
Vipin Kumar Verma <vipin.verma@hpe.com>
|
||||||
@ -690,7 +603,6 @@ CONTRIBUTORS:
|
|||||||
Windel Bouwman <windel@windel.nl>
|
Windel Bouwman <windel@windel.nl>
|
||||||
Wojciech Małota-Wójcik <outofforest@users.noreply.github.com>
|
Wojciech Małota-Wójcik <outofforest@users.noreply.github.com>
|
||||||
Wolfgang Bumiller <w.bumiller@proxmox.com>
|
Wolfgang Bumiller <w.bumiller@proxmox.com>
|
||||||
XDTG <click1799@163.com>
|
|
||||||
Xin Li <delphij@FreeBSD.org>
|
Xin Li <delphij@FreeBSD.org>
|
||||||
Xinliang Liu <xinliang.liu@linaro.org>
|
Xinliang Liu <xinliang.liu@linaro.org>
|
||||||
xtouqh <xtouqh@hotmail.com>
|
xtouqh <xtouqh@hotmail.com>
|
||||||
@ -702,13 +614,10 @@ CONTRIBUTORS:
|
|||||||
yuina822 <ayuichi@club.kyutech.ac.jp>
|
yuina822 <ayuichi@club.kyutech.ac.jp>
|
||||||
YunQiang Su <syq@debian.org>
|
YunQiang Su <syq@debian.org>
|
||||||
Yuri Pankov <yuri.pankov@gmail.com>
|
Yuri Pankov <yuri.pankov@gmail.com>
|
||||||
Yuxin Wang <yuxinwang9999@gmail.com>
|
|
||||||
Yuxuan Shui <yshuiv7@gmail.com>
|
Yuxuan Shui <yshuiv7@gmail.com>
|
||||||
Zachary Bedell <zac@thebedells.org>
|
Zachary Bedell <zac@thebedells.org>
|
||||||
Zach Dykstra <dykstra.zachary@gmail.com>
|
Zach Dykstra <dykstra.zachary@gmail.com>
|
||||||
zgock <zgock@nuc.base.zgock-lab.net>
|
zgock <zgock@nuc.base.zgock-lab.net>
|
||||||
Zhao Yongming <zym@apache.org>
|
|
||||||
Zhenlei Huang <zlei@FreeBSD.org>
|
|
||||||
Zhu Chuang <chuang@melty.land>
|
Zhu Chuang <chuang@melty.land>
|
||||||
Érico Nogueira <erico.erc@gmail.com>
|
Érico Nogueira <erico.erc@gmail.com>
|
||||||
Đoàn Trần Công Danh <congdanhqx@gmail.com>
|
Đoàn Trần Công Danh <congdanhqx@gmail.com>
|
||||||
|
6
META
6
META
@ -1,10 +1,10 @@
|
|||||||
Meta: 1
|
Meta: 1
|
||||||
Name: zfs
|
Name: zfs
|
||||||
Branch: 1.0
|
Branch: 1.0
|
||||||
Version: 2.4.99
|
Version: 2.2.3
|
||||||
Release: 1
|
Release: 1
|
||||||
Release-Tags: relext
|
Release-Tags: relext
|
||||||
License: CDDL
|
License: CDDL
|
||||||
Author: OpenZFS
|
Author: OpenZFS
|
||||||
Linux-Maximum: 6.17
|
Linux-Maximum: 6.7
|
||||||
Linux-Minimum: 4.18
|
Linux-Minimum: 3.10
|
||||||
|
@ -1,7 +1,6 @@
|
|||||||
CLEANFILES =
|
CLEANFILES =
|
||||||
dist_noinst_DATA =
|
dist_noinst_DATA =
|
||||||
INSTALL_DATA_HOOKS =
|
INSTALL_DATA_HOOKS =
|
||||||
INSTALL_EXEC_HOOKS =
|
|
||||||
ALL_LOCAL =
|
ALL_LOCAL =
|
||||||
CLEAN_LOCAL =
|
CLEAN_LOCAL =
|
||||||
CHECKS = shellcheck checkbashisms
|
CHECKS = shellcheck checkbashisms
|
||||||
@ -72,9 +71,6 @@ all: gitrev
|
|||||||
PHONY += install-data-hook $(INSTALL_DATA_HOOKS)
|
PHONY += install-data-hook $(INSTALL_DATA_HOOKS)
|
||||||
install-data-hook: $(INSTALL_DATA_HOOKS)
|
install-data-hook: $(INSTALL_DATA_HOOKS)
|
||||||
|
|
||||||
PHONY += install-exec-hook $(INSTALL_EXEC_HOOKS)
|
|
||||||
install-exec-hook: $(INSTALL_EXEC_HOOKS)
|
|
||||||
|
|
||||||
PHONY += maintainer-clean-local
|
PHONY += maintainer-clean-local
|
||||||
maintainer-clean-local:
|
maintainer-clean-local:
|
||||||
-$(RM) $(GITREV)
|
-$(RM) $(GITREV)
|
||||||
@ -116,10 +112,6 @@ commitcheck:
|
|||||||
${top_srcdir}/scripts/commitcheck.sh; \
|
${top_srcdir}/scripts/commitcheck.sh; \
|
||||||
fi
|
fi
|
||||||
|
|
||||||
CHECKS += spdxcheck
|
|
||||||
spdxcheck:
|
|
||||||
$(AM_V_at)$(top_srcdir)/scripts/spdxcheck.pl
|
|
||||||
|
|
||||||
if HAVE_PARALLEL
|
if HAVE_PARALLEL
|
||||||
cstyle_line = -print0 | parallel -X0 ${top_srcdir}/scripts/cstyle.pl -cpP {}
|
cstyle_line = -print0 | parallel -X0 ${top_srcdir}/scripts/cstyle.pl -cpP {}
|
||||||
else
|
else
|
||||||
|
@ -32,4 +32,4 @@ For more details see the NOTICE, LICENSE and COPYRIGHT files; `UCRL-CODE-235197`
|
|||||||
|
|
||||||
# Supported Kernels
|
# Supported Kernels
|
||||||
* The `META` file contains the officially recognized supported Linux kernel versions.
|
* The `META` file contains the officially recognized supported Linux kernel versions.
|
||||||
* Supported FreeBSD versions are any supported branches and releases starting from 13.0-RELEASE.
|
* Supported FreeBSD versions are any supported branches and releases starting from 12.4-RELEASE.
|
||||||
|
@ -28,7 +28,7 @@ Two release branches are maintained for OpenZFS, they are:
|
|||||||
Minor changes to support these distribution kernels will be applied as
|
Minor changes to support these distribution kernels will be applied as
|
||||||
needed. New kernel versions released after the OpenZFS LTS release are
|
needed. New kernel versions released after the OpenZFS LTS release are
|
||||||
not supported. LTS releases will receive patches for at least 2 years.
|
not supported. LTS releases will receive patches for at least 2 years.
|
||||||
The current LTS release is OpenZFS 2.2.
|
The current LTS release is OpenZFS 2.1.
|
||||||
|
|
||||||
* OpenZFS current - Tracks the newest MAJOR.MINOR release. This branch
|
* OpenZFS current - Tracks the newest MAJOR.MINOR release. This branch
|
||||||
includes support for the latest OpenZFS features and recently releases
|
includes support for the latest OpenZFS features and recently releases
|
||||||
|
@ -24,7 +24,7 @@ zfs_ids_to_path_LDADD = \
|
|||||||
libzfs.la
|
libzfs.la
|
||||||
|
|
||||||
|
|
||||||
zhack_CPPFLAGS = $(AM_CPPFLAGS) $(LIBZPOOL_CPPFLAGS)
|
zhack_CPPFLAGS = $(AM_CPPFLAGS) $(FORCEDEBUG_CPPFLAGS)
|
||||||
|
|
||||||
sbin_PROGRAMS += zhack
|
sbin_PROGRAMS += zhack
|
||||||
CPPCHECKTARGETS += zhack
|
CPPCHECKTARGETS += zhack
|
||||||
@ -39,7 +39,9 @@ zhack_LDADD = \
|
|||||||
|
|
||||||
|
|
||||||
ztest_CFLAGS = $(AM_CFLAGS) $(KERNEL_CFLAGS)
|
ztest_CFLAGS = $(AM_CFLAGS) $(KERNEL_CFLAGS)
|
||||||
ztest_CPPFLAGS = $(AM_CPPFLAGS) $(LIBZPOOL_CPPFLAGS)
|
# Get rid of compiler warning for unchecked truncating snprintfs on gcc 7.1.1
|
||||||
|
ztest_CFLAGS += $(NO_FORMAT_TRUNCATION)
|
||||||
|
ztest_CPPFLAGS = $(AM_CPPFLAGS) $(FORCEDEBUG_CPPFLAGS)
|
||||||
|
|
||||||
sbin_PROGRAMS += ztest
|
sbin_PROGRAMS += ztest
|
||||||
CPPCHECKTARGETS += ztest
|
CPPCHECKTARGETS += ztest
|
||||||
@ -98,16 +100,17 @@ endif
|
|||||||
|
|
||||||
|
|
||||||
if USING_PYTHON
|
if USING_PYTHON
|
||||||
bin_SCRIPTS += zarcsummary zarcstat dbufstat zilstat
|
bin_SCRIPTS += arc_summary arcstat dbufstat zilstat
|
||||||
CLEANFILES += zarcsummary zarcstat dbufstat zilstat
|
CLEANFILES += arc_summary arcstat dbufstat zilstat
|
||||||
dist_noinst_DATA += %D%/zarcsummary %D%/zarcstat.in %D%/dbufstat.in %D%/zilstat.in
|
dist_noinst_DATA += %D%/arc_summary %D%/arcstat.in %D%/dbufstat.in %D%/zilstat.in
|
||||||
|
|
||||||
$(call SUBST,zarcstat,%D%/)
|
$(call SUBST,arcstat,%D%/)
|
||||||
$(call SUBST,dbufstat,%D%/)
|
$(call SUBST,dbufstat,%D%/)
|
||||||
$(call SUBST,zilstat,%D%/)
|
$(call SUBST,zilstat,%D%/)
|
||||||
zarcsummary: %D%/zarcsummary
|
arc_summary: %D%/arc_summary
|
||||||
$(AM_V_at)cp $< $@
|
$(AM_V_at)cp $< $@
|
||||||
endif
|
endif
|
||||||
|
|
||||||
|
|
||||||
PHONY += cmd
|
PHONY += cmd
|
||||||
cmd: $(bin_SCRIPTS) $(bin_PROGRAMS) $(sbin_SCRIPTS) $(sbin_PROGRAMS) $(dist_bin_SCRIPTS) $(zfsexec_PROGRAMS) $(mounthelper_PROGRAMS)
|
cmd: $(bin_SCRIPTS) $(bin_PROGRAMS) $(sbin_SCRIPTS) $(sbin_PROGRAMS) $(dist_bin_SCRIPTS) $(zfsexec_PROGRAMS) $(mounthelper_PROGRAMS)
|
||||||
|
@ -1,5 +1,4 @@
|
|||||||
#!/usr/bin/env python3
|
#!/usr/bin/env python3
|
||||||
# SPDX-License-Identifier: BSD-2-Clause
|
|
||||||
#
|
#
|
||||||
# Copyright (c) 2008 Ben Rockwood <benr@cuddletech.com>,
|
# Copyright (c) 2008 Ben Rockwood <benr@cuddletech.com>,
|
||||||
# Copyright (c) 2010 Martin Matuska <mm@FreeBSD.org>,
|
# Copyright (c) 2010 Martin Matuska <mm@FreeBSD.org>,
|
||||||
@ -34,7 +33,7 @@ Provides basic information on the ARC, its efficiency, the L2ARC (if present),
|
|||||||
the Data Management Unit (DMU), Virtual Devices (VDEVs), and tunables. See
|
the Data Management Unit (DMU), Virtual Devices (VDEVs), and tunables. See
|
||||||
the in-source documentation and code at
|
the in-source documentation and code at
|
||||||
https://github.com/openzfs/zfs/blob/master/module/zfs/arc.c for details.
|
https://github.com/openzfs/zfs/blob/master/module/zfs/arc.c for details.
|
||||||
The original introduction to zarcsummary can be found at
|
The original introduction to arc_summary can be found at
|
||||||
http://cuddletech.com/?p=454
|
http://cuddletech.com/?p=454
|
||||||
"""
|
"""
|
||||||
|
|
||||||
@ -161,7 +160,7 @@ elif sys.platform.startswith('linux'):
|
|||||||
return get_params(TUNABLES_PATH)
|
return get_params(TUNABLES_PATH)
|
||||||
|
|
||||||
def get_version_impl(request):
|
def get_version_impl(request):
|
||||||
# The original zarcsummary called /sbin/modinfo/{spl,zfs} to get
|
# The original arc_summary called /sbin/modinfo/{spl,zfs} to get
|
||||||
# the version information. We switch to /sys/module/{spl,zfs}/version
|
# the version information. We switch to /sys/module/{spl,zfs}/version
|
||||||
# to make sure we get what is really loaded in the kernel
|
# to make sure we get what is really loaded in the kernel
|
||||||
try:
|
try:
|
||||||
@ -261,34 +260,33 @@ def draw_graph(kstats_dict):
|
|||||||
arc_stats = isolate_section('arcstats', kstats_dict)
|
arc_stats = isolate_section('arcstats', kstats_dict)
|
||||||
|
|
||||||
GRAPH_INDENT = ' '*4
|
GRAPH_INDENT = ' '*4
|
||||||
GRAPH_WIDTH = 70
|
GRAPH_WIDTH = 60
|
||||||
arc_max = int(arc_stats['c_max'])
|
|
||||||
arc_size = f_bytes(arc_stats['size'])
|
arc_size = f_bytes(arc_stats['size'])
|
||||||
arc_perc = f_perc(arc_stats['size'], arc_max)
|
arc_perc = f_perc(arc_stats['size'], arc_stats['c_max'])
|
||||||
data_size = f_bytes(arc_stats['data_size'])
|
mfu_size = f_bytes(arc_stats['mfu_size'])
|
||||||
meta_size = f_bytes(arc_stats['metadata_size'])
|
mru_size = f_bytes(arc_stats['mru_size'])
|
||||||
|
meta_size = f_bytes(arc_stats['arc_meta_used'])
|
||||||
|
dnode_limit = f_bytes(arc_stats['arc_dnode_limit'])
|
||||||
dnode_size = f_bytes(arc_stats['dnode_size'])
|
dnode_size = f_bytes(arc_stats['dnode_size'])
|
||||||
|
|
||||||
info_form = ('ARC: {0} ({1}) Data: {2} Meta: {3} Dnode: {4}')
|
info_form = ('ARC: {0} ({1}) MFU: {2} MRU: {3} META: {4} '
|
||||||
info_line = info_form.format(arc_size, arc_perc, data_size, meta_size,
|
'DNODE {5} ({6})')
|
||||||
dnode_size)
|
info_line = info_form.format(arc_size, arc_perc, mfu_size, mru_size,
|
||||||
|
meta_size, dnode_size, dnode_limit)
|
||||||
info_spc = ' '*int((GRAPH_WIDTH-len(info_line))/2)
|
info_spc = ' '*int((GRAPH_WIDTH-len(info_line))/2)
|
||||||
info_line = GRAPH_INDENT+info_spc+info_line
|
info_line = GRAPH_INDENT+info_spc+info_line
|
||||||
|
|
||||||
graph_line = GRAPH_INDENT+'+'+('-'*(GRAPH_WIDTH-2))+'+'
|
graph_line = GRAPH_INDENT+'+'+('-'*(GRAPH_WIDTH-2))+'+'
|
||||||
|
|
||||||
arc_perc = float(int(arc_stats['size'])/arc_max)
|
mfu_perc = float(int(arc_stats['mfu_size'])/int(arc_stats['c_max']))
|
||||||
data_perc = float(int(arc_stats['data_size'])/arc_max)
|
mru_perc = float(int(arc_stats['mru_size'])/int(arc_stats['c_max']))
|
||||||
meta_perc = float(int(arc_stats['metadata_size'])/arc_max)
|
arc_perc = float(int(arc_stats['size'])/int(arc_stats['c_max']))
|
||||||
dnode_perc = float(int(arc_stats['dnode_size'])/arc_max)
|
|
||||||
total_ticks = float(arc_perc)*GRAPH_WIDTH
|
total_ticks = float(arc_perc)*GRAPH_WIDTH
|
||||||
data_ticks = data_perc*GRAPH_WIDTH
|
mfu_ticks = mfu_perc*GRAPH_WIDTH
|
||||||
meta_ticks = meta_perc*GRAPH_WIDTH
|
mru_ticks = mru_perc*GRAPH_WIDTH
|
||||||
dnode_ticks = dnode_perc*GRAPH_WIDTH
|
other_ticks = total_ticks-(mfu_ticks+mru_ticks)
|
||||||
other_ticks = total_ticks-(data_ticks+meta_ticks+dnode_ticks)
|
|
||||||
|
|
||||||
core_form = 'D'*int(data_ticks)+'M'*int(meta_ticks)+'N'*int(dnode_ticks)+\
|
core_form = 'F'*int(mfu_ticks)+'R'*int(mru_ticks)+'O'*int(other_ticks)
|
||||||
'O'*int(other_ticks)
|
|
||||||
core_spc = ' '*(GRAPH_WIDTH-(2+len(core_form)))
|
core_spc = ' '*(GRAPH_WIDTH-(2+len(core_form)))
|
||||||
core_line = GRAPH_INDENT+'|'+core_form+core_spc+'|'
|
core_line = GRAPH_INDENT+'|'+core_form+core_spc+'|'
|
||||||
|
|
||||||
@ -439,7 +437,7 @@ def print_header():
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
# datetime is now recommended over time but we keep the exact formatting
|
# datetime is now recommended over time but we keep the exact formatting
|
||||||
# from the older version of zarcsummary in case there are scripts
|
# from the older version of arc_summary in case there are scripts
|
||||||
# that expect it in this way
|
# that expect it in this way
|
||||||
daydate = time.strftime(DATE_FORMAT)
|
daydate = time.strftime(DATE_FORMAT)
|
||||||
spc_date = LINE_LENGTH-len(daydate)
|
spc_date = LINE_LENGTH-len(daydate)
|
||||||
@ -538,88 +536,56 @@ def section_arc(kstats_dict):
|
|||||||
|
|
||||||
arc_stats = isolate_section('arcstats', kstats_dict)
|
arc_stats = isolate_section('arcstats', kstats_dict)
|
||||||
|
|
||||||
memory_all = arc_stats['memory_all_bytes']
|
throttle = arc_stats['memory_throttle_count']
|
||||||
memory_free = arc_stats['memory_free_bytes']
|
|
||||||
memory_avail = arc_stats['memory_available_bytes']
|
if throttle == '0':
|
||||||
|
health = 'HEALTHY'
|
||||||
|
else:
|
||||||
|
health = 'THROTTLED'
|
||||||
|
|
||||||
|
prt_1('ARC status:', health)
|
||||||
|
prt_i1('Memory throttle count:', throttle)
|
||||||
|
print()
|
||||||
|
|
||||||
arc_size = arc_stats['size']
|
arc_size = arc_stats['size']
|
||||||
arc_target_size = arc_stats['c']
|
arc_target_size = arc_stats['c']
|
||||||
arc_max = arc_stats['c_max']
|
arc_max = arc_stats['c_max']
|
||||||
arc_min = arc_stats['c_min']
|
arc_min = arc_stats['c_min']
|
||||||
dnode_limit = arc_stats['arc_dnode_limit']
|
|
||||||
|
|
||||||
print('ARC status:')
|
|
||||||
prt_i1('Total memory size:', f_bytes(memory_all))
|
|
||||||
prt_i2('Min target size:', f_perc(arc_min, memory_all), f_bytes(arc_min))
|
|
||||||
prt_i2('Max target size:', f_perc(arc_max, memory_all), f_bytes(arc_max))
|
|
||||||
prt_i2('Target size (adaptive):',
|
|
||||||
f_perc(arc_size, arc_max), f_bytes(arc_target_size))
|
|
||||||
prt_i2('Current size:', f_perc(arc_size, arc_max), f_bytes(arc_size))
|
|
||||||
prt_i1('Free memory size:', f_bytes(memory_free))
|
|
||||||
prt_i1('Available memory size:', f_bytes(memory_avail))
|
|
||||||
print()
|
|
||||||
|
|
||||||
compressed_size = arc_stats['compressed_size']
|
|
||||||
uncompressed_size = arc_stats['uncompressed_size']
|
|
||||||
overhead_size = arc_stats['overhead_size']
|
|
||||||
bonus_size = arc_stats['bonus_size']
|
|
||||||
dnode_size = arc_stats['dnode_size']
|
|
||||||
dbuf_size = arc_stats['dbuf_size']
|
|
||||||
hdr_size = arc_stats['hdr_size']
|
|
||||||
l2_hdr_size = arc_stats['l2_hdr_size']
|
|
||||||
abd_chunk_waste_size = arc_stats['abd_chunk_waste_size']
|
|
||||||
|
|
||||||
prt_1('ARC structural breakdown (current size):', f_bytes(arc_size))
|
|
||||||
prt_i2('Compressed size:',
|
|
||||||
f_perc(compressed_size, arc_size), f_bytes(compressed_size))
|
|
||||||
prt_i2('Overhead size:',
|
|
||||||
f_perc(overhead_size, arc_size), f_bytes(overhead_size))
|
|
||||||
prt_i2('Bonus size:',
|
|
||||||
f_perc(bonus_size, arc_size), f_bytes(bonus_size))
|
|
||||||
prt_i2('Dnode size:',
|
|
||||||
f_perc(dnode_size, arc_size), f_bytes(dnode_size))
|
|
||||||
prt_i2('Dbuf size:',
|
|
||||||
f_perc(dbuf_size, arc_size), f_bytes(dbuf_size))
|
|
||||||
prt_i2('Header size:',
|
|
||||||
f_perc(hdr_size, arc_size), f_bytes(hdr_size))
|
|
||||||
prt_i2('L2 header size:',
|
|
||||||
f_perc(l2_hdr_size, arc_size), f_bytes(l2_hdr_size))
|
|
||||||
prt_i2('ABD chunk waste size:',
|
|
||||||
f_perc(abd_chunk_waste_size, arc_size), f_bytes(abd_chunk_waste_size))
|
|
||||||
print()
|
|
||||||
|
|
||||||
meta = arc_stats['meta']
|
meta = arc_stats['meta']
|
||||||
pd = arc_stats['pd']
|
pd = arc_stats['pd']
|
||||||
pm = arc_stats['pm']
|
pm = arc_stats['pm']
|
||||||
data_size = arc_stats['data_size']
|
|
||||||
metadata_size = arc_stats['metadata_size']
|
|
||||||
anon_data = arc_stats['anon_data']
|
anon_data = arc_stats['anon_data']
|
||||||
anon_metadata = arc_stats['anon_metadata']
|
anon_metadata = arc_stats['anon_metadata']
|
||||||
mfu_data = arc_stats['mfu_data']
|
mfu_data = arc_stats['mfu_data']
|
||||||
mfu_metadata = arc_stats['mfu_metadata']
|
mfu_metadata = arc_stats['mfu_metadata']
|
||||||
mfu_edata = arc_stats['mfu_evictable_data']
|
|
||||||
mfu_emetadata = arc_stats['mfu_evictable_metadata']
|
|
||||||
mru_data = arc_stats['mru_data']
|
mru_data = arc_stats['mru_data']
|
||||||
mru_metadata = arc_stats['mru_metadata']
|
mru_metadata = arc_stats['mru_metadata']
|
||||||
mru_edata = arc_stats['mru_evictable_data']
|
|
||||||
mru_emetadata = arc_stats['mru_evictable_metadata']
|
|
||||||
mfug_data = arc_stats['mfu_ghost_data']
|
mfug_data = arc_stats['mfu_ghost_data']
|
||||||
mfug_metadata = arc_stats['mfu_ghost_metadata']
|
mfug_metadata = arc_stats['mfu_ghost_metadata']
|
||||||
mrug_data = arc_stats['mru_ghost_data']
|
mrug_data = arc_stats['mru_ghost_data']
|
||||||
mrug_metadata = arc_stats['mru_ghost_metadata']
|
mrug_metadata = arc_stats['mru_ghost_metadata']
|
||||||
unc_data = arc_stats['uncached_data']
|
unc_data = arc_stats['uncached_data']
|
||||||
unc_metadata = arc_stats['uncached_metadata']
|
unc_metadata = arc_stats['uncached_metadata']
|
||||||
|
bonus_size = arc_stats['bonus_size']
|
||||||
|
dnode_limit = arc_stats['arc_dnode_limit']
|
||||||
|
dnode_size = arc_stats['dnode_size']
|
||||||
|
dbuf_size = arc_stats['dbuf_size']
|
||||||
|
hdr_size = arc_stats['hdr_size']
|
||||||
|
l2_hdr_size = arc_stats['l2_hdr_size']
|
||||||
|
abd_chunk_waste_size = arc_stats['abd_chunk_waste_size']
|
||||||
|
target_size_ratio = '{0}:1'.format(int(arc_max) // int(arc_min))
|
||||||
|
|
||||||
|
prt_2('ARC size (current):',
|
||||||
|
f_perc(arc_size, arc_max), f_bytes(arc_size))
|
||||||
|
prt_i2('Target size (adaptive):',
|
||||||
|
f_perc(arc_target_size, arc_max), f_bytes(arc_target_size))
|
||||||
|
prt_i2('Min size (hard limit):',
|
||||||
|
f_perc(arc_min, arc_max), f_bytes(arc_min))
|
||||||
|
prt_i2('Max size (high water):',
|
||||||
|
target_size_ratio, f_bytes(arc_max))
|
||||||
caches_size = int(anon_data)+int(anon_metadata)+\
|
caches_size = int(anon_data)+int(anon_metadata)+\
|
||||||
int(mfu_data)+int(mfu_metadata)+int(mru_data)+int(mru_metadata)+\
|
int(mfu_data)+int(mfu_metadata)+int(mru_data)+int(mru_metadata)+\
|
||||||
int(unc_data)+int(unc_metadata)
|
int(unc_data)+int(unc_metadata)
|
||||||
|
|
||||||
prt_1('ARC types breakdown (compressed + overhead):', f_bytes(caches_size))
|
|
||||||
prt_i2('Data size:',
|
|
||||||
f_perc(data_size, caches_size), f_bytes(data_size))
|
|
||||||
prt_i2('Metadata size:',
|
|
||||||
f_perc(metadata_size, caches_size), f_bytes(metadata_size))
|
|
||||||
print()
|
|
||||||
|
|
||||||
prt_1('ARC states breakdown (compressed + overhead):', f_bytes(caches_size))
|
|
||||||
prt_i2('Anonymous data size:',
|
prt_i2('Anonymous data size:',
|
||||||
f_perc(anon_data, caches_size), f_bytes(anon_data))
|
f_perc(anon_data, caches_size), f_bytes(anon_data))
|
||||||
prt_i2('Anonymous metadata size:',
|
prt_i2('Anonymous metadata size:',
|
||||||
@ -630,41 +596,50 @@ def section_arc(kstats_dict):
|
|||||||
f_bytes(v / 65536 * caches_size / 65536))
|
f_bytes(v / 65536 * caches_size / 65536))
|
||||||
prt_i2('MFU data size:',
|
prt_i2('MFU data size:',
|
||||||
f_perc(mfu_data, caches_size), f_bytes(mfu_data))
|
f_perc(mfu_data, caches_size), f_bytes(mfu_data))
|
||||||
prt_i2('MFU evictable data size:',
|
|
||||||
f_perc(mfu_edata, caches_size), f_bytes(mfu_edata))
|
|
||||||
prt_i1('MFU ghost data size:', f_bytes(mfug_data))
|
prt_i1('MFU ghost data size:', f_bytes(mfug_data))
|
||||||
v = (s-int(pm))*int(meta)/s
|
v = (s-int(pm))*int(meta)/s
|
||||||
prt_i2('MFU metadata target:', f_perc(v, s),
|
prt_i2('MFU metadata target:', f_perc(v, s),
|
||||||
f_bytes(v / 65536 * caches_size / 65536))
|
f_bytes(v / 65536 * caches_size / 65536))
|
||||||
prt_i2('MFU metadata size:',
|
prt_i2('MFU metadata size:',
|
||||||
f_perc(mfu_metadata, caches_size), f_bytes(mfu_metadata))
|
f_perc(mfu_metadata, caches_size), f_bytes(mfu_metadata))
|
||||||
prt_i2('MFU evictable metadata size:',
|
|
||||||
f_perc(mfu_emetadata, caches_size), f_bytes(mfu_emetadata))
|
|
||||||
prt_i1('MFU ghost metadata size:', f_bytes(mfug_metadata))
|
prt_i1('MFU ghost metadata size:', f_bytes(mfug_metadata))
|
||||||
v = int(pd)*(s-int(meta))/s
|
v = int(pd)*(s-int(meta))/s
|
||||||
prt_i2('MRU data target:', f_perc(v, s),
|
prt_i2('MRU data target:', f_perc(v, s),
|
||||||
f_bytes(v / 65536 * caches_size / 65536))
|
f_bytes(v / 65536 * caches_size / 65536))
|
||||||
prt_i2('MRU data size:',
|
prt_i2('MRU data size:',
|
||||||
f_perc(mru_data, caches_size), f_bytes(mru_data))
|
f_perc(mru_data, caches_size), f_bytes(mru_data))
|
||||||
prt_i2('MRU evictable data size:',
|
|
||||||
f_perc(mru_edata, caches_size), f_bytes(mru_edata))
|
|
||||||
prt_i1('MRU ghost data size:', f_bytes(mrug_data))
|
prt_i1('MRU ghost data size:', f_bytes(mrug_data))
|
||||||
v = int(pm)*int(meta)/s
|
v = int(pm)*int(meta)/s
|
||||||
prt_i2('MRU metadata target:', f_perc(v, s),
|
prt_i2('MRU metadata target:', f_perc(v, s),
|
||||||
f_bytes(v / 65536 * caches_size / 65536))
|
f_bytes(v / 65536 * caches_size / 65536))
|
||||||
prt_i2('MRU metadata size:',
|
prt_i2('MRU metadata size:',
|
||||||
f_perc(mru_metadata, caches_size), f_bytes(mru_metadata))
|
f_perc(mru_metadata, caches_size), f_bytes(mru_metadata))
|
||||||
prt_i2('MRU evictable metadata size:',
|
|
||||||
f_perc(mru_emetadata, caches_size), f_bytes(mru_emetadata))
|
|
||||||
prt_i1('MRU ghost metadata size:', f_bytes(mrug_metadata))
|
prt_i1('MRU ghost metadata size:', f_bytes(mrug_metadata))
|
||||||
prt_i2('Uncached data size:',
|
prt_i2('Uncached data size:',
|
||||||
f_perc(unc_data, caches_size), f_bytes(unc_data))
|
f_perc(unc_data, caches_size), f_bytes(unc_data))
|
||||||
prt_i2('Uncached metadata size:',
|
prt_i2('Uncached metadata size:',
|
||||||
f_perc(unc_metadata, caches_size), f_bytes(unc_metadata))
|
f_perc(unc_metadata, caches_size), f_bytes(unc_metadata))
|
||||||
|
prt_i2('Bonus size:',
|
||||||
|
f_perc(bonus_size, arc_size), f_bytes(bonus_size))
|
||||||
|
prt_i2('Dnode cache target:',
|
||||||
|
f_perc(dnode_limit, arc_max), f_bytes(dnode_limit))
|
||||||
|
prt_i2('Dnode cache size:',
|
||||||
|
f_perc(dnode_size, dnode_limit), f_bytes(dnode_size))
|
||||||
|
prt_i2('Dbuf size:',
|
||||||
|
f_perc(dbuf_size, arc_size), f_bytes(dbuf_size))
|
||||||
|
prt_i2('Header size:',
|
||||||
|
f_perc(hdr_size, arc_size), f_bytes(hdr_size))
|
||||||
|
prt_i2('L2 header size:',
|
||||||
|
f_perc(l2_hdr_size, arc_size), f_bytes(l2_hdr_size))
|
||||||
|
prt_i2('ABD chunk waste size:',
|
||||||
|
f_perc(abd_chunk_waste_size, arc_size), f_bytes(abd_chunk_waste_size))
|
||||||
print()
|
print()
|
||||||
|
|
||||||
print('ARC hash breakdown:')
|
print('ARC hash breakdown:')
|
||||||
prt_i1('Elements:', f_hits(arc_stats['hash_elements']))
|
prt_i1('Elements max:', f_hits(arc_stats['hash_elements_max']))
|
||||||
|
prt_i2('Elements current:',
|
||||||
|
f_perc(arc_stats['hash_elements'], arc_stats['hash_elements_max']),
|
||||||
|
f_hits(arc_stats['hash_elements']))
|
||||||
prt_i1('Collisions:', f_hits(arc_stats['hash_collisions']))
|
prt_i1('Collisions:', f_hits(arc_stats['hash_collisions']))
|
||||||
|
|
||||||
prt_i1('Chain max:', f_hits(arc_stats['hash_chain_max']))
|
prt_i1('Chain max:', f_hits(arc_stats['hash_chain_max']))
|
||||||
@ -672,11 +647,6 @@ def section_arc(kstats_dict):
|
|||||||
print()
|
print()
|
||||||
|
|
||||||
print('ARC misc:')
|
print('ARC misc:')
|
||||||
prt_i2('Uncompressed size:', f_perc(uncompressed_size, compressed_size),
|
|
||||||
f_bytes(uncompressed_size))
|
|
||||||
prt_i1('Memory throttles:', arc_stats['memory_throttle_count'])
|
|
||||||
prt_i1('Memory direct reclaims:', arc_stats['memory_direct_count'])
|
|
||||||
prt_i1('Memory indirect reclaims:', arc_stats['memory_indirect_count'])
|
|
||||||
prt_i1('Deleted:', f_hits(arc_stats['deleted']))
|
prt_i1('Deleted:', f_hits(arc_stats['deleted']))
|
||||||
prt_i1('Mutex misses:', f_hits(arc_stats['mutex_miss']))
|
prt_i1('Mutex misses:', f_hits(arc_stats['mutex_miss']))
|
||||||
prt_i1('Eviction skips:', f_hits(arc_stats['evict_skip']))
|
prt_i1('Eviction skips:', f_hits(arc_stats['evict_skip']))
|
||||||
@ -823,27 +793,18 @@ def section_dmu(kstats_dict):
|
|||||||
|
|
||||||
zfetch_stats = isolate_section('zfetchstats', kstats_dict)
|
zfetch_stats = isolate_section('zfetchstats', kstats_dict)
|
||||||
|
|
||||||
zfetch_access_total = int(zfetch_stats['hits']) +\
|
zfetch_access_total = int(zfetch_stats['hits'])+int(zfetch_stats['misses'])
|
||||||
int(zfetch_stats['future']) + int(zfetch_stats['stride']) +\
|
|
||||||
int(zfetch_stats['past']) + int(zfetch_stats['misses'])
|
|
||||||
|
|
||||||
prt_1('DMU predictive prefetcher calls:', f_hits(zfetch_access_total))
|
prt_1('DMU predictive prefetcher calls:', f_hits(zfetch_access_total))
|
||||||
prt_i2('Stream hits:',
|
prt_i2('Stream hits:',
|
||||||
f_perc(zfetch_stats['hits'], zfetch_access_total),
|
f_perc(zfetch_stats['hits'], zfetch_access_total),
|
||||||
f_hits(zfetch_stats['hits']))
|
f_hits(zfetch_stats['hits']))
|
||||||
future = int(zfetch_stats['future']) + int(zfetch_stats['stride'])
|
|
||||||
prt_i2('Hits ahead of stream:', f_perc(future, zfetch_access_total),
|
|
||||||
f_hits(future))
|
|
||||||
prt_i2('Hits behind stream:',
|
|
||||||
f_perc(zfetch_stats['past'], zfetch_access_total),
|
|
||||||
f_hits(zfetch_stats['past']))
|
|
||||||
prt_i2('Stream misses:',
|
prt_i2('Stream misses:',
|
||||||
f_perc(zfetch_stats['misses'], zfetch_access_total),
|
f_perc(zfetch_stats['misses'], zfetch_access_total),
|
||||||
f_hits(zfetch_stats['misses']))
|
f_hits(zfetch_stats['misses']))
|
||||||
prt_i2('Streams limit reached:',
|
prt_i2('Streams limit reached:',
|
||||||
f_perc(zfetch_stats['max_streams'], zfetch_stats['misses']),
|
f_perc(zfetch_stats['max_streams'], zfetch_stats['misses']),
|
||||||
f_hits(zfetch_stats['max_streams']))
|
f_hits(zfetch_stats['max_streams']))
|
||||||
prt_i1('Stream strides:', f_hits(zfetch_stats['stride']))
|
|
||||||
prt_i1('Prefetches issued', f_hits(zfetch_stats['io_issued']))
|
prt_i1('Prefetches issued', f_hits(zfetch_stats['io_issued']))
|
||||||
print()
|
print()
|
||||||
|
|
@ -1,8 +1,7 @@
|
|||||||
#!/usr/bin/env @PYTHON_SHEBANG@
|
#!/usr/bin/env @PYTHON_SHEBANG@
|
||||||
# SPDX-License-Identifier: CDDL-1.0
|
|
||||||
#
|
#
|
||||||
# Print out ZFS ARC Statistics exported via kstat(1)
|
# Print out ZFS ARC Statistics exported via kstat(1)
|
||||||
# For a definition of fields, or usage, use zarcstat -v
|
# For a definition of fields, or usage, use arcstat -v
|
||||||
#
|
#
|
||||||
# This script was originally a fork of the original arcstat.pl (0.1)
|
# This script was originally a fork of the original arcstat.pl (0.1)
|
||||||
# by Neelakanth Nadgir, originally published on his Sun blog on
|
# by Neelakanth Nadgir, originally published on his Sun blog on
|
||||||
@ -56,7 +55,6 @@ import time
|
|||||||
import getopt
|
import getopt
|
||||||
import re
|
import re
|
||||||
import copy
|
import copy
|
||||||
import os
|
|
||||||
|
|
||||||
from signal import signal, SIGINT, SIGWINCH, SIG_DFL
|
from signal import signal, SIGINT, SIGWINCH, SIG_DFL
|
||||||
|
|
||||||
@ -154,115 +152,25 @@ cols = {
|
|||||||
"l2asize": [7, 1024, "Actual (compressed) size of the L2ARC"],
|
"l2asize": [7, 1024, "Actual (compressed) size of the L2ARC"],
|
||||||
"l2size": [6, 1024, "Size of the L2ARC"],
|
"l2size": [6, 1024, "Size of the L2ARC"],
|
||||||
"l2bytes": [7, 1024, "Bytes read per second from the L2ARC"],
|
"l2bytes": [7, 1024, "Bytes read per second from the L2ARC"],
|
||||||
"l2wbytes": [8, 1024, "Bytes written per second to the L2ARC"],
|
|
||||||
"grow": [4, 1000, "ARC grow disabled"],
|
"grow": [4, 1000, "ARC grow disabled"],
|
||||||
"need": [5, 1024, "ARC reclaim need"],
|
"need": [5, 1024, "ARC reclaim need"],
|
||||||
"free": [5, 1024, "ARC free memory"],
|
"free": [5, 1024, "ARC free memory"],
|
||||||
"avail": [5, 1024, "ARC available memory"],
|
"avail": [5, 1024, "ARC available memory"],
|
||||||
"waste": [5, 1024, "Wasted memory due to round up to pagesize"],
|
"waste": [5, 1024, "Wasted memory due to round up to pagesize"],
|
||||||
"ztotal": [6, 1000, "zfetch total prefetcher calls per second"],
|
|
||||||
"zhits": [5, 1000, "zfetch stream hits per second"],
|
|
||||||
"zahead": [6, 1000, "zfetch hits ahead of streams per second"],
|
|
||||||
"zpast": [5, 1000, "zfetch hits behind streams per second"],
|
|
||||||
"zmisses": [7, 1000, "zfetch stream misses per second"],
|
|
||||||
"zmax": [4, 1000, "zfetch limit reached per second"],
|
|
||||||
"zfuture": [7, 1000, "zfetch stream future per second"],
|
|
||||||
"zstride": [7, 1000, "zfetch stream strides per second"],
|
|
||||||
"zissued": [7, 1000, "zfetch prefetches issued per second"],
|
|
||||||
"zactive": [7, 1000, "zfetch prefetches active per second"],
|
|
||||||
}
|
}
|
||||||
|
|
||||||
# ARC structural breakdown from zarcsummary
|
|
||||||
structfields = {
|
|
||||||
"cmp": ["compressed", "Compressed"],
|
|
||||||
"ovh": ["overhead", "Overhead"],
|
|
||||||
"bon": ["bonus", "Bonus"],
|
|
||||||
"dno": ["dnode", "Dnode"],
|
|
||||||
"dbu": ["dbuf", "Dbuf"],
|
|
||||||
"hdr": ["hdr", "Header"],
|
|
||||||
"l2h": ["l2_hdr", "L2 header"],
|
|
||||||
"abd": ["abd_chunk_waste", "ABD chunk waste"],
|
|
||||||
}
|
|
||||||
structstats = { # size stats
|
|
||||||
"percent": "size", # percentage of this value
|
|
||||||
"sz": ["_size", "size"],
|
|
||||||
}
|
|
||||||
|
|
||||||
# ARC types breakdown from zarcsummary
|
|
||||||
typefields = {
|
|
||||||
"data": ["data", "ARC data"],
|
|
||||||
"meta": ["metadata", "ARC metadata"],
|
|
||||||
}
|
|
||||||
typestats = { # size stats
|
|
||||||
"percent": "cachessz", # percentage of this value
|
|
||||||
"tg": ["_target", "target"],
|
|
||||||
"sz": ["_size", "size"],
|
|
||||||
}
|
|
||||||
|
|
||||||
# ARC states breakdown from zarcsummary
|
|
||||||
statefields = {
|
|
||||||
"ano": ["anon", "Anonymous"],
|
|
||||||
"mfu": ["mfu", "MFU"],
|
|
||||||
"mru": ["mru", "MRU"],
|
|
||||||
"unc": ["uncached", "Uncached"],
|
|
||||||
}
|
|
||||||
targetstats = {
|
|
||||||
"percent": "cachessz", # percentage of this value
|
|
||||||
"fields": ["mfu", "mru"], # only applicable to these fields
|
|
||||||
"tg": ["_target", "target"],
|
|
||||||
"dt": ["_data_target", "data target"],
|
|
||||||
"mt": ["_metadata_target", "metadata target"],
|
|
||||||
}
|
|
||||||
statestats = { # size stats
|
|
||||||
"percent": "cachessz", # percentage of this value
|
|
||||||
"sz": ["_size", "size"],
|
|
||||||
"da": ["_data", "data size"],
|
|
||||||
"me": ["_metadata", "metadata size"],
|
|
||||||
"ed": ["_evictable_data", "evictable data size"],
|
|
||||||
"em": ["_evictable_metadata", "evictable metadata size"],
|
|
||||||
}
|
|
||||||
ghoststats = {
|
|
||||||
"fields": ["mfu", "mru"], # only applicable to these fields
|
|
||||||
"gsz": ["_ghost_size", "ghost size"],
|
|
||||||
"gd": ["_ghost_data", "ghost data size"],
|
|
||||||
"gm": ["_ghost_metadata", "ghost metadata size"],
|
|
||||||
}
|
|
||||||
|
|
||||||
# fields and stats
|
|
||||||
fieldstats = [
|
|
||||||
[structfields, structstats],
|
|
||||||
[typefields, typestats],
|
|
||||||
[statefields, targetstats, statestats, ghoststats],
|
|
||||||
]
|
|
||||||
for fs in fieldstats:
|
|
||||||
fields, stats = fs[0], fs[1:]
|
|
||||||
for field, fieldval in fields.items():
|
|
||||||
for group in stats:
|
|
||||||
for stat, statval in group.items():
|
|
||||||
if stat in ["fields", "percent"] or \
|
|
||||||
("fields" in group and field not in group["fields"]):
|
|
||||||
continue
|
|
||||||
colname = field + stat
|
|
||||||
coldesc = fieldval[1] + " " + statval[1]
|
|
||||||
cols[colname] = [len(colname), 1024, coldesc]
|
|
||||||
if "percent" in group:
|
|
||||||
cols[colname + "%"] = [len(colname) + 1, 100, \
|
|
||||||
coldesc + " percentage"]
|
|
||||||
|
|
||||||
v = {}
|
v = {}
|
||||||
hdr = ["time", "read", "ddread", "ddh%", "dmread", "dmh%", "pread", "ph%",
|
hdr = ["time", "read", "ddread", "ddh%", "dmread", "dmh%", "pread", "ph%",
|
||||||
"size", "c", "avail"]
|
"size", "c", "avail"]
|
||||||
xhdr = ["time", "mfu", "mru", "mfug", "mrug", "unc", "eskip", "mtxmis",
|
xhdr = ["time", "mfu", "mru", "mfug", "mrug", "unc", "eskip", "mtxmis",
|
||||||
"dread", "pread", "read"]
|
"dread", "pread", "read"]
|
||||||
zhdr = ["time", "ztotal", "zhits", "zahead", "zpast", "zmisses", "zmax",
|
|
||||||
"zfuture", "zstride", "zissued", "zactive"]
|
|
||||||
sint = 1 # Default interval is 1 second
|
sint = 1 # Default interval is 1 second
|
||||||
count = 1 # Default count is 1
|
count = 1 # Default count is 1
|
||||||
hdr_intr = 20 # Print header every 20 lines of output
|
hdr_intr = 20 # Print header every 20 lines of output
|
||||||
opfile = None
|
opfile = None
|
||||||
sep = " " # Default separator is 2 spaces
|
sep = " " # Default separator is 2 spaces
|
||||||
l2exist = False
|
l2exist = False
|
||||||
cmd = ("Usage: zarcstat [-havxp] [-f fields] [-o file] [-s string] [interval "
|
cmd = ("Usage: arcstat [-havxp] [-f fields] [-o file] [-s string] [interval "
|
||||||
"[count]]\n")
|
"[count]]\n")
|
||||||
cur = {}
|
cur = {}
|
||||||
d = {}
|
d = {}
|
||||||
@ -280,8 +188,6 @@ if sys.platform.startswith('freebsd'):
|
|||||||
|
|
||||||
k = [ctl for ctl in sysctl.filter('kstat.zfs.misc.arcstats')
|
k = [ctl for ctl in sysctl.filter('kstat.zfs.misc.arcstats')
|
||||||
if ctl.type != sysctl.CTLTYPE_NODE]
|
if ctl.type != sysctl.CTLTYPE_NODE]
|
||||||
k += [ctl for ctl in sysctl.filter('kstat.zfs.misc.zfetchstats')
|
|
||||||
if ctl.type != sysctl.CTLTYPE_NODE]
|
|
||||||
|
|
||||||
if not k:
|
if not k:
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
@ -293,28 +199,19 @@ if sys.platform.startswith('freebsd'):
|
|||||||
continue
|
continue
|
||||||
|
|
||||||
name, value = s.name, s.value
|
name, value = s.name, s.value
|
||||||
|
# Trims 'kstat.zfs.misc.arcstats' from the name
|
||||||
if "arcstats" in name:
|
kstat[name[24:]] = int(value)
|
||||||
# Trims 'kstat.zfs.misc.arcstats' from the name
|
|
||||||
kstat[name[24:]] = int(value)
|
|
||||||
else:
|
|
||||||
kstat["zfetch_" + name[27:]] = int(value)
|
|
||||||
|
|
||||||
elif sys.platform.startswith('linux'):
|
elif sys.platform.startswith('linux'):
|
||||||
def kstat_update():
|
def kstat_update():
|
||||||
global kstat
|
global kstat
|
||||||
|
|
||||||
k1 = [line.strip() for line in open('/proc/spl/kstat/zfs/arcstats')]
|
k = [line.strip() for line in open('/proc/spl/kstat/zfs/arcstats')]
|
||||||
|
|
||||||
k2 = ["zfetch_" + line.strip() for line in
|
if not k:
|
||||||
open('/proc/spl/kstat/zfs/zfetchstats')]
|
|
||||||
|
|
||||||
if k1 is None or k2 is None:
|
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
del k1[0:2]
|
del k[0:2]
|
||||||
del k2[0:2]
|
|
||||||
k = k1 + k2
|
|
||||||
kstat = {}
|
kstat = {}
|
||||||
|
|
||||||
for s in k:
|
for s in k:
|
||||||
@ -342,17 +239,16 @@ def usage():
|
|||||||
sys.stderr.write("\t -v : List all possible field headers and definitions"
|
sys.stderr.write("\t -v : List all possible field headers and definitions"
|
||||||
"\n")
|
"\n")
|
||||||
sys.stderr.write("\t -x : Print extended stats\n")
|
sys.stderr.write("\t -x : Print extended stats\n")
|
||||||
sys.stderr.write("\t -z : Print zfetch stats\n")
|
|
||||||
sys.stderr.write("\t -f : Specify specific fields to print (see -v)\n")
|
sys.stderr.write("\t -f : Specify specific fields to print (see -v)\n")
|
||||||
sys.stderr.write("\t -o : Redirect output to the specified file\n")
|
sys.stderr.write("\t -o : Redirect output to the specified file\n")
|
||||||
sys.stderr.write("\t -s : Override default field separator with custom "
|
sys.stderr.write("\t -s : Override default field separator with custom "
|
||||||
"character or string\n")
|
"character or string\n")
|
||||||
sys.stderr.write("\t -p : Disable auto-scaling of numerical fields\n")
|
sys.stderr.write("\t -p : Disable auto-scaling of numerical fields\n")
|
||||||
sys.stderr.write("\nExamples:\n")
|
sys.stderr.write("\nExamples:\n")
|
||||||
sys.stderr.write("\tzarcstat -o /tmp/a.log 2 10\n")
|
sys.stderr.write("\tarcstat -o /tmp/a.log 2 10\n")
|
||||||
sys.stderr.write("\tzarcstat -s \",\" -o /tmp/a.log 2 10\n")
|
sys.stderr.write("\tarcstat -s \",\" -o /tmp/a.log 2 10\n")
|
||||||
sys.stderr.write("\tzarcstat -v\n")
|
sys.stderr.write("\tarcstat -v\n")
|
||||||
sys.stderr.write("\tzarcstat -f time,hit%,dh%,ph%,mh% 1\n")
|
sys.stderr.write("\tarcstat -f time,hit%,dh%,ph%,mh% 1\n")
|
||||||
sys.stderr.write("\n")
|
sys.stderr.write("\n")
|
||||||
|
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
@ -366,29 +262,6 @@ def snap_stats():
|
|||||||
kstat_update()
|
kstat_update()
|
||||||
|
|
||||||
cur = kstat
|
cur = kstat
|
||||||
|
|
||||||
# fill in additional values from zarcsummary
|
|
||||||
cur["caches_size"] = caches_size = cur["anon_data"]+cur["anon_metadata"]+\
|
|
||||||
cur["mfu_data"]+cur["mfu_metadata"]+cur["mru_data"]+cur["mru_metadata"]+\
|
|
||||||
cur["uncached_data"]+cur["uncached_metadata"]
|
|
||||||
s = 4294967296
|
|
||||||
pd = cur["pd"]
|
|
||||||
pm = cur["pm"]
|
|
||||||
meta = cur["meta"]
|
|
||||||
v = (s-int(pd))*(s-int(meta))/s
|
|
||||||
cur["mfu_data_target"] = v / 65536 * caches_size / 65536
|
|
||||||
v = (s-int(pm))*int(meta)/s
|
|
||||||
cur["mfu_metadata_target"] = v / 65536 * caches_size / 65536
|
|
||||||
v = int(pd)*(s-int(meta))/s
|
|
||||||
cur["mru_data_target"] = v / 65536 * caches_size / 65536
|
|
||||||
v = int(pm)*int(meta)/s
|
|
||||||
cur["mru_metadata_target"] = v / 65536 * caches_size / 65536
|
|
||||||
|
|
||||||
cur["data_target"] = cur["mfu_data_target"] + cur["mru_data_target"]
|
|
||||||
cur["metadata_target"] = cur["mfu_metadata_target"] + cur["mru_metadata_target"]
|
|
||||||
cur["mfu_target"] = cur["mfu_data_target"] + cur["mfu_metadata_target"]
|
|
||||||
cur["mru_target"] = cur["mru_data_target"] + cur["mru_metadata_target"]
|
|
||||||
|
|
||||||
for key in cur:
|
for key in cur:
|
||||||
if re.match(key, "class"):
|
if re.match(key, "class"):
|
||||||
continue
|
continue
|
||||||
@ -398,34 +271,31 @@ def snap_stats():
|
|||||||
d[key] = cur[key]
|
d[key] = cur[key]
|
||||||
|
|
||||||
|
|
||||||
def isint(num):
|
|
||||||
if isinstance(num, float):
|
|
||||||
return num.is_integer()
|
|
||||||
if isinstance(num, int):
|
|
||||||
return True
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
def prettynum(sz, scale, num=0):
|
def prettynum(sz, scale, num=0):
|
||||||
suffix = ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']
|
suffix = [' ', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']
|
||||||
index = 0
|
index = 0
|
||||||
|
save = 0
|
||||||
|
|
||||||
# Special case for date field
|
# Special case for date field
|
||||||
if scale == -1:
|
if scale == -1:
|
||||||
return "%s" % num
|
return "%s" % num
|
||||||
|
|
||||||
if scale != 100:
|
# Rounding error, return 0
|
||||||
while abs(num) > scale and index < 5:
|
elif 0 < num < 1:
|
||||||
num = num / scale
|
num = 0
|
||||||
index += 1
|
|
||||||
|
|
||||||
width = sz - (0 if index == 0 else 1)
|
while abs(num) > scale and index < 5:
|
||||||
intlen = len("%.0f" % num) # %.0f rounds to nearest int
|
save = num
|
||||||
if sint == 1 and isint(num) or width < intlen + 2:
|
num = num / scale
|
||||||
decimal = 0
|
index += 1
|
||||||
|
|
||||||
|
if index == 0:
|
||||||
|
return "%*d" % (sz, num)
|
||||||
|
|
||||||
|
if abs(save / scale) < 10:
|
||||||
|
return "%*.1f%s" % (sz - 1, num, suffix[index])
|
||||||
else:
|
else:
|
||||||
decimal = 1
|
return "%*d%s" % (sz - 1, num, suffix[index])
|
||||||
return "%*.*f%s" % (width, decimal, num, suffix[index])
|
|
||||||
|
|
||||||
|
|
||||||
def print_values():
|
def print_values():
|
||||||
@ -487,7 +357,6 @@ def init():
|
|||||||
global count
|
global count
|
||||||
global hdr
|
global hdr
|
||||||
global xhdr
|
global xhdr
|
||||||
global zhdr
|
|
||||||
global opfile
|
global opfile
|
||||||
global sep
|
global sep
|
||||||
global out
|
global out
|
||||||
@ -499,17 +368,15 @@ def init():
|
|||||||
xflag = False
|
xflag = False
|
||||||
hflag = False
|
hflag = False
|
||||||
vflag = False
|
vflag = False
|
||||||
zflag = False
|
|
||||||
i = 1
|
i = 1
|
||||||
|
|
||||||
try:
|
try:
|
||||||
opts, args = getopt.getopt(
|
opts, args = getopt.getopt(
|
||||||
sys.argv[1:],
|
sys.argv[1:],
|
||||||
"axzo:hvs:f:p",
|
"axo:hvs:f:p",
|
||||||
[
|
[
|
||||||
"all",
|
"all",
|
||||||
"extended",
|
"extended",
|
||||||
"zfetch",
|
|
||||||
"outfile",
|
"outfile",
|
||||||
"help",
|
"help",
|
||||||
"verbose",
|
"verbose",
|
||||||
@ -543,15 +410,13 @@ def init():
|
|||||||
i += 1
|
i += 1
|
||||||
if opt in ('-p', '--parsable'):
|
if opt in ('-p', '--parsable'):
|
||||||
pretty_print = False
|
pretty_print = False
|
||||||
if opt in ('-z', '--zfetch'):
|
|
||||||
zflag = True
|
|
||||||
i += 1
|
i += 1
|
||||||
|
|
||||||
argv = sys.argv[i:]
|
argv = sys.argv[i:]
|
||||||
sint = int(argv[0]) if argv else sint
|
sint = int(argv[0]) if argv else sint
|
||||||
count = int(argv[1]) if len(argv) > 1 else (0 if len(argv) > 0 else 1)
|
count = int(argv[1]) if len(argv) > 1 else (0 if len(argv) > 0 else 1)
|
||||||
|
|
||||||
if hflag or (xflag and zflag) or ((zflag or xflag) and desired_cols):
|
if hflag or (xflag and desired_cols):
|
||||||
usage()
|
usage()
|
||||||
|
|
||||||
if vflag:
|
if vflag:
|
||||||
@ -560,9 +425,6 @@ def init():
|
|||||||
if xflag:
|
if xflag:
|
||||||
hdr = xhdr
|
hdr = xhdr
|
||||||
|
|
||||||
if zflag:
|
|
||||||
hdr = zhdr
|
|
||||||
|
|
||||||
update_hdr_intr()
|
update_hdr_intr()
|
||||||
|
|
||||||
# check if L2ARC exists
|
# check if L2ARC exists
|
||||||
@ -615,149 +477,120 @@ def calculate():
|
|||||||
|
|
||||||
v = dict()
|
v = dict()
|
||||||
v["time"] = time.strftime("%H:%M:%S", time.localtime())
|
v["time"] = time.strftime("%H:%M:%S", time.localtime())
|
||||||
v["hits"] = d["hits"] / sint
|
v["hits"] = d["hits"] // sint
|
||||||
v["iohs"] = d["iohits"] / sint
|
v["iohs"] = d["iohits"] // sint
|
||||||
v["miss"] = d["misses"] / sint
|
v["miss"] = d["misses"] // sint
|
||||||
v["read"] = v["hits"] + v["iohs"] + v["miss"]
|
v["read"] = v["hits"] + v["iohs"] + v["miss"]
|
||||||
v["hit%"] = 100 * v["hits"] / v["read"] if v["read"] > 0 else 0
|
v["hit%"] = 100 * v["hits"] // v["read"] if v["read"] > 0 else 0
|
||||||
v["ioh%"] = 100 * v["iohs"] / v["read"] if v["read"] > 0 else 0
|
v["ioh%"] = 100 * v["iohs"] // v["read"] if v["read"] > 0 else 0
|
||||||
v["miss%"] = 100 - v["hit%"] - v["ioh%"] if v["read"] > 0 else 0
|
v["miss%"] = 100 - v["hit%"] - v["ioh%"] if v["read"] > 0 else 0
|
||||||
|
|
||||||
v["dhit"] = (d["demand_data_hits"] + d["demand_metadata_hits"]) / sint
|
v["dhit"] = (d["demand_data_hits"] + d["demand_metadata_hits"]) // sint
|
||||||
v["dioh"] = (d["demand_data_iohits"] + d["demand_metadata_iohits"]) / sint
|
v["dioh"] = (d["demand_data_iohits"] + d["demand_metadata_iohits"]) // sint
|
||||||
v["dmis"] = (d["demand_data_misses"] + d["demand_metadata_misses"]) / sint
|
v["dmis"] = (d["demand_data_misses"] + d["demand_metadata_misses"]) // sint
|
||||||
|
|
||||||
v["dread"] = v["dhit"] + v["dioh"] + v["dmis"]
|
v["dread"] = v["dhit"] + v["dioh"] + v["dmis"]
|
||||||
v["dh%"] = 100 * v["dhit"] / v["dread"] if v["dread"] > 0 else 0
|
v["dh%"] = 100 * v["dhit"] // v["dread"] if v["dread"] > 0 else 0
|
||||||
v["di%"] = 100 * v["dioh"] / v["dread"] if v["dread"] > 0 else 0
|
v["di%"] = 100 * v["dioh"] // v["dread"] if v["dread"] > 0 else 0
|
||||||
v["dm%"] = 100 - v["dh%"] - v["di%"] if v["dread"] > 0 else 0
|
v["dm%"] = 100 - v["dh%"] - v["di%"] if v["dread"] > 0 else 0
|
||||||
|
|
||||||
v["ddhit"] = d["demand_data_hits"] / sint
|
v["ddhit"] = d["demand_data_hits"] // sint
|
||||||
v["ddioh"] = d["demand_data_iohits"] / sint
|
v["ddioh"] = d["demand_data_iohits"] // sint
|
||||||
v["ddmis"] = d["demand_data_misses"] / sint
|
v["ddmis"] = d["demand_data_misses"] // sint
|
||||||
|
|
||||||
v["ddread"] = v["ddhit"] + v["ddioh"] + v["ddmis"]
|
v["ddread"] = v["ddhit"] + v["ddioh"] + v["ddmis"]
|
||||||
v["ddh%"] = 100 * v["ddhit"] / v["ddread"] if v["ddread"] > 0 else 0
|
v["ddh%"] = 100 * v["ddhit"] // v["ddread"] if v["ddread"] > 0 else 0
|
||||||
v["ddi%"] = 100 * v["ddioh"] / v["ddread"] if v["ddread"] > 0 else 0
|
v["ddi%"] = 100 * v["ddioh"] // v["ddread"] if v["ddread"] > 0 else 0
|
||||||
v["ddm%"] = 100 - v["ddh%"] - v["ddi%"] if v["ddread"] > 0 else 0
|
v["ddm%"] = 100 - v["ddh%"] - v["ddi%"] if v["ddread"] > 0 else 0
|
||||||
|
|
||||||
v["dmhit"] = d["demand_metadata_hits"] / sint
|
v["dmhit"] = d["demand_metadata_hits"] // sint
|
||||||
v["dmioh"] = d["demand_metadata_iohits"] / sint
|
v["dmioh"] = d["demand_metadata_iohits"] // sint
|
||||||
v["dmmis"] = d["demand_metadata_misses"] / sint
|
v["dmmis"] = d["demand_metadata_misses"] // sint
|
||||||
|
|
||||||
v["dmread"] = v["dmhit"] + v["dmioh"] + v["dmmis"]
|
v["dmread"] = v["dmhit"] + v["dmioh"] + v["dmmis"]
|
||||||
v["dmh%"] = 100 * v["dmhit"] / v["dmread"] if v["dmread"] > 0 else 0
|
v["dmh%"] = 100 * v["dmhit"] // v["dmread"] if v["dmread"] > 0 else 0
|
||||||
v["dmi%"] = 100 * v["dmioh"] / v["dmread"] if v["dmread"] > 0 else 0
|
v["dmi%"] = 100 * v["dmioh"] // v["dmread"] if v["dmread"] > 0 else 0
|
||||||
v["dmm%"] = 100 - v["dmh%"] - v["dmi%"] if v["dmread"] > 0 else 0
|
v["dmm%"] = 100 - v["dmh%"] - v["dmi%"] if v["dmread"] > 0 else 0
|
||||||
|
|
||||||
v["phit"] = (d["prefetch_data_hits"] + d["prefetch_metadata_hits"]) / sint
|
v["phit"] = (d["prefetch_data_hits"] + d["prefetch_metadata_hits"]) // sint
|
||||||
v["pioh"] = (d["prefetch_data_iohits"] +
|
v["pioh"] = (d["prefetch_data_iohits"] +
|
||||||
d["prefetch_metadata_iohits"]) / sint
|
d["prefetch_metadata_iohits"]) // sint
|
||||||
v["pmis"] = (d["prefetch_data_misses"] +
|
v["pmis"] = (d["prefetch_data_misses"] +
|
||||||
d["prefetch_metadata_misses"]) / sint
|
d["prefetch_metadata_misses"]) // sint
|
||||||
|
|
||||||
v["pread"] = v["phit"] + v["pioh"] + v["pmis"]
|
v["pread"] = v["phit"] + v["pioh"] + v["pmis"]
|
||||||
v["ph%"] = 100 * v["phit"] / v["pread"] if v["pread"] > 0 else 0
|
v["ph%"] = 100 * v["phit"] // v["pread"] if v["pread"] > 0 else 0
|
||||||
v["pi%"] = 100 * v["pioh"] / v["pread"] if v["pread"] > 0 else 0
|
v["pi%"] = 100 * v["pioh"] // v["pread"] if v["pread"] > 0 else 0
|
||||||
v["pm%"] = 100 - v["ph%"] - v["pi%"] if v["pread"] > 0 else 0
|
v["pm%"] = 100 - v["ph%"] - v["pi%"] if v["pread"] > 0 else 0
|
||||||
|
|
||||||
v["pdhit"] = d["prefetch_data_hits"] / sint
|
v["pdhit"] = d["prefetch_data_hits"] // sint
|
||||||
v["pdioh"] = d["prefetch_data_iohits"] / sint
|
v["pdioh"] = d["prefetch_data_iohits"] // sint
|
||||||
v["pdmis"] = d["prefetch_data_misses"] / sint
|
v["pdmis"] = d["prefetch_data_misses"] // sint
|
||||||
|
|
||||||
v["pdread"] = v["pdhit"] + v["pdioh"] + v["pdmis"]
|
v["pdread"] = v["pdhit"] + v["pdioh"] + v["pdmis"]
|
||||||
v["pdh%"] = 100 * v["pdhit"] / v["pdread"] if v["pdread"] > 0 else 0
|
v["pdh%"] = 100 * v["pdhit"] // v["pdread"] if v["pdread"] > 0 else 0
|
||||||
v["pdi%"] = 100 * v["pdioh"] / v["pdread"] if v["pdread"] > 0 else 0
|
v["pdi%"] = 100 * v["pdioh"] // v["pdread"] if v["pdread"] > 0 else 0
|
||||||
v["pdm%"] = 100 - v["pdh%"] - v["pdi%"] if v["pdread"] > 0 else 0
|
v["pdm%"] = 100 - v["pdh%"] - v["pdi%"] if v["pdread"] > 0 else 0
|
||||||
|
|
||||||
v["pmhit"] = d["prefetch_metadata_hits"] / sint
|
v["pmhit"] = d["prefetch_metadata_hits"] // sint
|
||||||
v["pmioh"] = d["prefetch_metadata_iohits"] / sint
|
v["pmioh"] = d["prefetch_metadata_iohits"] // sint
|
||||||
v["pmmis"] = d["prefetch_metadata_misses"] / sint
|
v["pmmis"] = d["prefetch_metadata_misses"] // sint
|
||||||
|
|
||||||
v["pmread"] = v["pmhit"] + v["pmioh"] + v["pmmis"]
|
v["pmread"] = v["pmhit"] + v["pmioh"] + v["pmmis"]
|
||||||
v["pmh%"] = 100 * v["pmhit"] / v["pmread"] if v["pmread"] > 0 else 0
|
v["pmh%"] = 100 * v["pmhit"] // v["pmread"] if v["pmread"] > 0 else 0
|
||||||
v["pmi%"] = 100 * v["pmioh"] / v["pmread"] if v["pmread"] > 0 else 0
|
v["pmi%"] = 100 * v["pmioh"] // v["pmread"] if v["pmread"] > 0 else 0
|
||||||
v["pmm%"] = 100 - v["pmh%"] - v["pmi%"] if v["pmread"] > 0 else 0
|
v["pmm%"] = 100 - v["pmh%"] - v["pmi%"] if v["pmread"] > 0 else 0
|
||||||
|
|
||||||
v["mhit"] = (d["prefetch_metadata_hits"] +
|
v["mhit"] = (d["prefetch_metadata_hits"] +
|
||||||
d["demand_metadata_hits"]) / sint
|
d["demand_metadata_hits"]) // sint
|
||||||
v["mioh"] = (d["prefetch_metadata_iohits"] +
|
v["mioh"] = (d["prefetch_metadata_iohits"] +
|
||||||
d["demand_metadata_iohits"]) / sint
|
d["demand_metadata_iohits"]) // sint
|
||||||
v["mmis"] = (d["prefetch_metadata_misses"] +
|
v["mmis"] = (d["prefetch_metadata_misses"] +
|
||||||
d["demand_metadata_misses"]) / sint
|
d["demand_metadata_misses"]) // sint
|
||||||
|
|
||||||
v["mread"] = v["mhit"] + v["mioh"] + v["mmis"]
|
v["mread"] = v["mhit"] + v["mioh"] + v["mmis"]
|
||||||
v["mh%"] = 100 * v["mhit"] / v["mread"] if v["mread"] > 0 else 0
|
v["mh%"] = 100 * v["mhit"] // v["mread"] if v["mread"] > 0 else 0
|
||||||
v["mi%"] = 100 * v["mioh"] / v["mread"] if v["mread"] > 0 else 0
|
v["mi%"] = 100 * v["mioh"] // v["mread"] if v["mread"] > 0 else 0
|
||||||
v["mm%"] = 100 - v["mh%"] - v["mi%"] if v["mread"] > 0 else 0
|
v["mm%"] = 100 - v["mh%"] - v["mi%"] if v["mread"] > 0 else 0
|
||||||
|
|
||||||
v["arcsz"] = cur["size"]
|
v["arcsz"] = cur["size"]
|
||||||
v["size"] = cur["size"]
|
v["size"] = cur["size"]
|
||||||
v["c"] = cur["c"]
|
v["c"] = cur["c"]
|
||||||
v["mfu"] = d["mfu_hits"] / sint
|
v["mfu"] = d["mfu_hits"] // sint
|
||||||
v["mru"] = d["mru_hits"] / sint
|
v["mru"] = d["mru_hits"] // sint
|
||||||
v["mrug"] = d["mru_ghost_hits"] / sint
|
v["mrug"] = d["mru_ghost_hits"] // sint
|
||||||
v["mfug"] = d["mfu_ghost_hits"] / sint
|
v["mfug"] = d["mfu_ghost_hits"] // sint
|
||||||
v["unc"] = d["uncached_hits"] / sint
|
v["unc"] = d["uncached_hits"] // sint
|
||||||
v["eskip"] = d["evict_skip"] / sint
|
v["eskip"] = d["evict_skip"] // sint
|
||||||
v["el2skip"] = d["evict_l2_skip"] / sint
|
v["el2skip"] = d["evict_l2_skip"] // sint
|
||||||
v["el2cach"] = d["evict_l2_cached"] / sint
|
v["el2cach"] = d["evict_l2_cached"] // sint
|
||||||
v["el2el"] = d["evict_l2_eligible"] / sint
|
v["el2el"] = d["evict_l2_eligible"] // sint
|
||||||
v["el2mfu"] = d["evict_l2_eligible_mfu"] / sint
|
v["el2mfu"] = d["evict_l2_eligible_mfu"] // sint
|
||||||
v["el2mru"] = d["evict_l2_eligible_mru"] / sint
|
v["el2mru"] = d["evict_l2_eligible_mru"] // sint
|
||||||
v["el2inel"] = d["evict_l2_ineligible"] / sint
|
v["el2inel"] = d["evict_l2_ineligible"] // sint
|
||||||
v["mtxmis"] = d["mutex_miss"] / sint
|
v["mtxmis"] = d["mutex_miss"] // sint
|
||||||
v["ztotal"] = (d["zfetch_hits"] + d["zfetch_future"] + d["zfetch_stride"] +
|
|
||||||
d["zfetch_past"] + d["zfetch_misses"]) / sint
|
|
||||||
v["zhits"] = d["zfetch_hits"] / sint
|
|
||||||
v["zahead"] = (d["zfetch_future"] + d["zfetch_stride"]) / sint
|
|
||||||
v["zpast"] = d["zfetch_past"] / sint
|
|
||||||
v["zmisses"] = d["zfetch_misses"] / sint
|
|
||||||
v["zmax"] = d["zfetch_max_streams"] / sint
|
|
||||||
v["zfuture"] = d["zfetch_future"] / sint
|
|
||||||
v["zstride"] = d["zfetch_stride"] / sint
|
|
||||||
v["zissued"] = d["zfetch_io_issued"] / sint
|
|
||||||
v["zactive"] = d["zfetch_io_active"] / sint
|
|
||||||
|
|
||||||
# ARC structural breakdown, ARC types breakdown, ARC states breakdown
|
|
||||||
v["cachessz"] = cur["caches_size"]
|
|
||||||
for fs in fieldstats:
|
|
||||||
fields, stats = fs[0], fs[1:]
|
|
||||||
for field, fieldval in fields.items():
|
|
||||||
for group in stats:
|
|
||||||
for stat, statval in group.items():
|
|
||||||
if stat in ["fields", "percent"] or \
|
|
||||||
("fields" in group and field not in group["fields"]):
|
|
||||||
continue
|
|
||||||
colname = field + stat
|
|
||||||
v[colname] = cur[fieldval[0] + statval[0]]
|
|
||||||
if "percent" in group:
|
|
||||||
v[colname + "%"] = 100 * v[colname] / \
|
|
||||||
v[group["percent"]] if v[group["percent"]] > 0 else 0
|
|
||||||
|
|
||||||
if l2exist:
|
if l2exist:
|
||||||
l2asize = cur["l2_asize"]
|
v["l2hits"] = d["l2_hits"] // sint
|
||||||
v["l2hits"] = d["l2_hits"] / sint
|
v["l2miss"] = d["l2_misses"] // sint
|
||||||
v["l2miss"] = d["l2_misses"] / sint
|
|
||||||
v["l2read"] = v["l2hits"] + v["l2miss"]
|
v["l2read"] = v["l2hits"] + v["l2miss"]
|
||||||
v["l2hit%"] = 100 * v["l2hits"] / v["l2read"] if v["l2read"] > 0 else 0
|
v["l2hit%"] = 100 * v["l2hits"] // v["l2read"] if v["l2read"] > 0 else 0
|
||||||
|
|
||||||
v["l2miss%"] = 100 - v["l2hit%"] if v["l2read"] > 0 else 0
|
v["l2miss%"] = 100 - v["l2hit%"] if v["l2read"] > 0 else 0
|
||||||
v["l2asize"] = l2asize
|
v["l2asize"] = cur["l2_asize"]
|
||||||
v["l2size"] = cur["l2_size"]
|
v["l2size"] = cur["l2_size"]
|
||||||
v["l2bytes"] = d["l2_read_bytes"] / sint
|
v["l2bytes"] = d["l2_read_bytes"] // sint
|
||||||
v["l2wbytes"] = d["l2_write_bytes"] / sint
|
|
||||||
|
|
||||||
v["l2pref"] = cur["l2_prefetch_asize"]
|
v["l2pref"] = cur["l2_prefetch_asize"]
|
||||||
v["l2mfu"] = cur["l2_mfu_asize"]
|
v["l2mfu"] = cur["l2_mfu_asize"]
|
||||||
v["l2mru"] = cur["l2_mru_asize"]
|
v["l2mru"] = cur["l2_mru_asize"]
|
||||||
v["l2data"] = cur["l2_bufc_data_asize"]
|
v["l2data"] = cur["l2_bufc_data_asize"]
|
||||||
v["l2meta"] = cur["l2_bufc_metadata_asize"]
|
v["l2meta"] = cur["l2_bufc_metadata_asize"]
|
||||||
v["l2pref%"] = 100 * v["l2pref"] / l2asize if l2asize > 0 else 0
|
v["l2pref%"] = 100 * v["l2pref"] // v["l2asize"]
|
||||||
v["l2mfu%"] = 100 * v["l2mfu"] / l2asize if l2asize > 0 else 0
|
v["l2mfu%"] = 100 * v["l2mfu"] // v["l2asize"]
|
||||||
v["l2mru%"] = 100 * v["l2mru"] / l2asize if l2asize > 0 else 0
|
v["l2mru%"] = 100 * v["l2mru"] // v["l2asize"]
|
||||||
v["l2data%"] = 100 * v["l2data"] / l2asize if l2asize > 0 else 0
|
v["l2data%"] = 100 * v["l2data"] // v["l2asize"]
|
||||||
v["l2meta%"] = 100 * v["l2meta"] / l2asize if l2asize > 0 else 0
|
v["l2meta%"] = 100 * v["l2meta"] // v["l2asize"]
|
||||||
|
|
||||||
v["grow"] = 0 if cur["arc_no_grow"] else 1
|
v["grow"] = 0 if cur["arc_no_grow"] else 1
|
||||||
v["need"] = cur["arc_need_free"]
|
v["need"] = cur["arc_need_free"]
|
||||||
@ -767,7 +600,6 @@ def calculate():
|
|||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
|
|
||||||
global sint
|
global sint
|
||||||
global count
|
global count
|
||||||
global hdr_intr
|
global hdr_intr
|
@ -1,5 +1,4 @@
|
|||||||
#!/usr/bin/env @PYTHON_SHEBANG@
|
#!/usr/bin/env @PYTHON_SHEBANG@
|
||||||
# SPDX-License-Identifier: CDDL-1.0
|
|
||||||
#
|
#
|
||||||
# Print out statistics for all cached dmu buffers. This information
|
# Print out statistics for all cached dmu buffers. This information
|
||||||
# is available through the dbufs kstat and may be post-processed as
|
# is available through the dbufs kstat and may be post-processed as
|
||||||
@ -38,7 +37,7 @@ import re
|
|||||||
|
|
||||||
bhdr = ["pool", "objset", "object", "level", "blkid", "offset", "dbsize"]
|
bhdr = ["pool", "objset", "object", "level", "blkid", "offset", "dbsize"]
|
||||||
bxhdr = ["pool", "objset", "object", "level", "blkid", "offset", "dbsize",
|
bxhdr = ["pool", "objset", "object", "level", "blkid", "offset", "dbsize",
|
||||||
"usize", "meta", "state", "dbholds", "dbc", "list", "atype", "flags",
|
"meta", "state", "dbholds", "dbc", "list", "atype", "flags",
|
||||||
"count", "asize", "access", "mru", "gmru", "mfu", "gmfu", "l2",
|
"count", "asize", "access", "mru", "gmru", "mfu", "gmfu", "l2",
|
||||||
"l2_dattr", "l2_asize", "l2_comp", "aholds", "dtype", "btype",
|
"l2_dattr", "l2_asize", "l2_comp", "aholds", "dtype", "btype",
|
||||||
"data_bs", "meta_bs", "bsize", "lvls", "dholds", "blocks", "dsize"]
|
"data_bs", "meta_bs", "bsize", "lvls", "dholds", "blocks", "dsize"]
|
||||||
@ -48,17 +47,17 @@ dhdr = ["pool", "objset", "object", "dtype", "cached"]
|
|||||||
dxhdr = ["pool", "objset", "object", "dtype", "btype", "data_bs", "meta_bs",
|
dxhdr = ["pool", "objset", "object", "dtype", "btype", "data_bs", "meta_bs",
|
||||||
"bsize", "lvls", "dholds", "blocks", "dsize", "cached", "direct",
|
"bsize", "lvls", "dholds", "blocks", "dsize", "cached", "direct",
|
||||||
"indirect", "bonus", "spill"]
|
"indirect", "bonus", "spill"]
|
||||||
dincompat = ["level", "blkid", "offset", "dbsize", "usize", "meta", "state",
|
dincompat = ["level", "blkid", "offset", "dbsize", "meta", "state", "dbholds",
|
||||||
"dbholds", "dbc", "list", "atype", "flags", "count", "asize",
|
"dbc", "list", "atype", "flags", "count", "asize", "access",
|
||||||
"access", "mru", "gmru", "mfu", "gmfu", "l2", "l2_dattr",
|
"mru", "gmru", "mfu", "gmfu", "l2", "l2_dattr", "l2_asize",
|
||||||
"l2_asize", "l2_comp", "aholds"]
|
"l2_comp", "aholds"]
|
||||||
|
|
||||||
thdr = ["pool", "objset", "dtype", "cached"]
|
thdr = ["pool", "objset", "dtype", "cached"]
|
||||||
txhdr = ["pool", "objset", "dtype", "cached", "direct", "indirect",
|
txhdr = ["pool", "objset", "dtype", "cached", "direct", "indirect",
|
||||||
"bonus", "spill"]
|
"bonus", "spill"]
|
||||||
tincompat = ["object", "level", "blkid", "offset", "dbsize", "usize", "meta",
|
tincompat = ["object", "level", "blkid", "offset", "dbsize", "meta", "state",
|
||||||
"state", "dbc", "dbholds", "list", "atype", "flags", "count",
|
"dbc", "dbholds", "list", "atype", "flags", "count", "asize",
|
||||||
"asize", "access", "mru", "gmru", "mfu", "gmfu", "l2", "l2_dattr",
|
"access", "mru", "gmru", "mfu", "gmfu", "l2", "l2_dattr",
|
||||||
"l2_asize", "l2_comp", "aholds", "btype", "data_bs", "meta_bs",
|
"l2_asize", "l2_comp", "aholds", "btype", "data_bs", "meta_bs",
|
||||||
"bsize", "lvls", "dholds", "blocks", "dsize"]
|
"bsize", "lvls", "dholds", "blocks", "dsize"]
|
||||||
|
|
||||||
@ -71,7 +70,6 @@ cols = {
|
|||||||
"blkid": [8, -1, "block number of buffer"],
|
"blkid": [8, -1, "block number of buffer"],
|
||||||
"offset": [12, 1024, "offset in object of buffer"],
|
"offset": [12, 1024, "offset in object of buffer"],
|
||||||
"dbsize": [7, 1024, "size of buffer"],
|
"dbsize": [7, 1024, "size of buffer"],
|
||||||
"usize": [7, 1024, "size of attached user data"],
|
|
||||||
"meta": [4, -1, "is this buffer metadata?"],
|
"meta": [4, -1, "is this buffer metadata?"],
|
||||||
"state": [5, -1, "state of buffer (read, cached, etc)"],
|
"state": [5, -1, "state of buffer (read, cached, etc)"],
|
||||||
"dbholds": [7, 1000, "number of holds on buffer"],
|
"dbholds": [7, 1000, "number of holds on buffer"],
|
||||||
@ -401,7 +399,6 @@ def update_dict(d, k, line, labels):
|
|||||||
key = line[labels[k]]
|
key = line[labels[k]]
|
||||||
|
|
||||||
dbsize = int(line[labels['dbsize']])
|
dbsize = int(line[labels['dbsize']])
|
||||||
usize = int(line[labels['usize']])
|
|
||||||
blkid = int(line[labels['blkid']])
|
blkid = int(line[labels['blkid']])
|
||||||
level = int(line[labels['level']])
|
level = int(line[labels['level']])
|
||||||
|
|
||||||
@ -419,7 +416,7 @@ def update_dict(d, k, line, labels):
|
|||||||
d[pool][objset][key]['indirect'] = 0
|
d[pool][objset][key]['indirect'] = 0
|
||||||
d[pool][objset][key]['spill'] = 0
|
d[pool][objset][key]['spill'] = 0
|
||||||
|
|
||||||
d[pool][objset][key]['cached'] += dbsize + usize
|
d[pool][objset][key]['cached'] += dbsize
|
||||||
|
|
||||||
if blkid == -1:
|
if blkid == -1:
|
||||||
d[pool][objset][key]['bonus'] += dbsize
|
d[pool][objset][key]['bonus'] += dbsize
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
// SPDX-License-Identifier: CDDL-1.0
|
|
||||||
/*
|
/*
|
||||||
* CDDL HEADER START
|
* CDDL HEADER START
|
||||||
*
|
*
|
||||||
@ -270,7 +269,8 @@ main(int argc, char **argv)
|
|||||||
return (MOUNT_USAGE);
|
return (MOUNT_USAGE);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (sloppy || libzfs_envvar_is_set("ZFS_MOUNT_HELPER")) {
|
if (!zfsutil || sloppy ||
|
||||||
|
libzfs_envvar_is_set("ZFS_MOUNT_HELPER")) {
|
||||||
zfs_adjust_mount_options(zhp, mntpoint, mntopts, mtabopt);
|
zfs_adjust_mount_options(zhp, mntpoint, mntopts, mtabopt);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -337,7 +337,7 @@ main(int argc, char **argv)
|
|||||||
dataset, mntpoint, mntflags, zfsflags, mntopts, mtabopt);
|
dataset, mntpoint, mntflags, zfsflags, mntopts, mtabopt);
|
||||||
|
|
||||||
if (!fake) {
|
if (!fake) {
|
||||||
if (!remount && !sloppy &&
|
if (zfsutil && !sloppy &&
|
||||||
!libzfs_envvar_is_set("ZFS_MOUNT_HELPER")) {
|
!libzfs_envvar_is_set("ZFS_MOUNT_HELPER")) {
|
||||||
error = zfs_mount_at(zhp, mntopts, mntflags, mntpoint);
|
error = zfs_mount_at(zhp, mntopts, mntflags, mntpoint);
|
||||||
if (error) {
|
if (error) {
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
raidz_test_CFLAGS = $(AM_CFLAGS) $(KERNEL_CFLAGS)
|
raidz_test_CFLAGS = $(AM_CFLAGS) $(KERNEL_CFLAGS)
|
||||||
raidz_test_CPPFLAGS = $(AM_CPPFLAGS) $(LIBZPOOL_CPPFLAGS)
|
raidz_test_CPPFLAGS = $(AM_CPPFLAGS) $(FORCEDEBUG_CPPFLAGS)
|
||||||
|
|
||||||
bin_PROGRAMS += raidz_test
|
bin_PROGRAMS += raidz_test
|
||||||
CPPCHECKTARGETS += raidz_test
|
CPPCHECKTARGETS += raidz_test
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
// SPDX-License-Identifier: CDDL-1.0
|
|
||||||
/*
|
/*
|
||||||
* CDDL HEADER START
|
* CDDL HEADER START
|
||||||
*
|
*
|
||||||
@ -85,10 +84,10 @@ run_gen_bench_impl(const char *impl)
|
|||||||
|
|
||||||
if (rto_opts.rto_expand) {
|
if (rto_opts.rto_expand) {
|
||||||
rm_bench = vdev_raidz_map_alloc_expanded(
|
rm_bench = vdev_raidz_map_alloc_expanded(
|
||||||
&zio_bench,
|
zio_bench.io_abd,
|
||||||
|
zio_bench.io_size, zio_bench.io_offset,
|
||||||
rto_opts.rto_ashift, ncols+1, ncols,
|
rto_opts.rto_ashift, ncols+1, ncols,
|
||||||
fn+1, rto_opts.rto_expand_offset,
|
fn+1, rto_opts.rto_expand_offset);
|
||||||
0, B_FALSE);
|
|
||||||
} else {
|
} else {
|
||||||
rm_bench = vdev_raidz_map_alloc(&zio_bench,
|
rm_bench = vdev_raidz_map_alloc(&zio_bench,
|
||||||
BENCH_ASHIFT, ncols, fn+1);
|
BENCH_ASHIFT, ncols, fn+1);
|
||||||
@ -173,10 +172,10 @@ run_rec_bench_impl(const char *impl)
|
|||||||
|
|
||||||
if (rto_opts.rto_expand) {
|
if (rto_opts.rto_expand) {
|
||||||
rm_bench = vdev_raidz_map_alloc_expanded(
|
rm_bench = vdev_raidz_map_alloc_expanded(
|
||||||
&zio_bench,
|
zio_bench.io_abd,
|
||||||
|
zio_bench.io_size, zio_bench.io_offset,
|
||||||
BENCH_ASHIFT, ncols+1, ncols,
|
BENCH_ASHIFT, ncols+1, ncols,
|
||||||
PARITY_PQR,
|
PARITY_PQR, rto_opts.rto_expand_offset);
|
||||||
rto_opts.rto_expand_offset, 0, B_FALSE);
|
|
||||||
} else {
|
} else {
|
||||||
rm_bench = vdev_raidz_map_alloc(&zio_bench,
|
rm_bench = vdev_raidz_map_alloc(&zio_bench,
|
||||||
BENCH_ASHIFT, ncols, PARITY_PQR);
|
BENCH_ASHIFT, ncols, PARITY_PQR);
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
// SPDX-License-Identifier: CDDL-1.0
|
|
||||||
/*
|
/*
|
||||||
* CDDL HEADER START
|
* CDDL HEADER START
|
||||||
*
|
*
|
||||||
@ -328,12 +327,14 @@ init_raidz_golden_map(raidz_test_opts_t *opts, const int parity)
|
|||||||
|
|
||||||
if (opts->rto_expand) {
|
if (opts->rto_expand) {
|
||||||
opts->rm_golden =
|
opts->rm_golden =
|
||||||
vdev_raidz_map_alloc_expanded(opts->zio_golden,
|
vdev_raidz_map_alloc_expanded(opts->zio_golden->io_abd,
|
||||||
|
opts->zio_golden->io_size, opts->zio_golden->io_offset,
|
||||||
opts->rto_ashift, total_ncols+1, total_ncols,
|
opts->rto_ashift, total_ncols+1, total_ncols,
|
||||||
parity, opts->rto_expand_offset, 0, B_FALSE);
|
parity, opts->rto_expand_offset);
|
||||||
rm_test = vdev_raidz_map_alloc_expanded(zio_test,
|
rm_test = vdev_raidz_map_alloc_expanded(zio_test->io_abd,
|
||||||
|
zio_test->io_size, zio_test->io_offset,
|
||||||
opts->rto_ashift, total_ncols+1, total_ncols,
|
opts->rto_ashift, total_ncols+1, total_ncols,
|
||||||
parity, opts->rto_expand_offset, 0, B_FALSE);
|
parity, opts->rto_expand_offset);
|
||||||
} else {
|
} else {
|
||||||
opts->rm_golden = vdev_raidz_map_alloc(opts->zio_golden,
|
opts->rm_golden = vdev_raidz_map_alloc(opts->zio_golden,
|
||||||
opts->rto_ashift, total_ncols, parity);
|
opts->rto_ashift, total_ncols, parity);
|
||||||
@ -360,6 +361,187 @@ init_raidz_golden_map(raidz_test_opts_t *opts, const int parity)
|
|||||||
return (err);
|
return (err);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If reflow is not in progress, reflow_offset should be UINT64_MAX.
|
||||||
|
* For each row, if the row is entirely before reflow_offset, it will
|
||||||
|
* come from the new location. Otherwise this row will come from the
|
||||||
|
* old location. Therefore, rows that straddle the reflow_offset will
|
||||||
|
* come from the old location.
|
||||||
|
*
|
||||||
|
* NOTE: Until raidz expansion is implemented this function is only
|
||||||
|
* needed by raidz_test.c to the multi-row raid_map_t functionality.
|
||||||
|
*/
|
||||||
|
raidz_map_t *
|
||||||
|
vdev_raidz_map_alloc_expanded(abd_t *abd, uint64_t size, uint64_t offset,
|
||||||
|
uint64_t ashift, uint64_t physical_cols, uint64_t logical_cols,
|
||||||
|
uint64_t nparity, uint64_t reflow_offset)
|
||||||
|
{
|
||||||
|
/* The zio's size in units of the vdev's minimum sector size. */
|
||||||
|
uint64_t s = size >> ashift;
|
||||||
|
uint64_t q, r, bc, devidx, asize = 0, tot;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* "Quotient": The number of data sectors for this stripe on all but
|
||||||
|
* the "big column" child vdevs that also contain "remainder" data.
|
||||||
|
* AKA "full rows"
|
||||||
|
*/
|
||||||
|
q = s / (logical_cols - nparity);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* "Remainder": The number of partial stripe data sectors in this I/O.
|
||||||
|
* This will add a sector to some, but not all, child vdevs.
|
||||||
|
*/
|
||||||
|
r = s - q * (logical_cols - nparity);
|
||||||
|
|
||||||
|
/* The number of "big columns" - those which contain remainder data. */
|
||||||
|
bc = (r == 0 ? 0 : r + nparity);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The total number of data and parity sectors associated with
|
||||||
|
* this I/O.
|
||||||
|
*/
|
||||||
|
tot = s + nparity * (q + (r == 0 ? 0 : 1));
|
||||||
|
|
||||||
|
/* How many rows contain data (not skip) */
|
||||||
|
uint64_t rows = howmany(tot, logical_cols);
|
||||||
|
int cols = MIN(tot, logical_cols);
|
||||||
|
|
||||||
|
raidz_map_t *rm = kmem_zalloc(offsetof(raidz_map_t, rm_row[rows]),
|
||||||
|
KM_SLEEP);
|
||||||
|
rm->rm_nrows = rows;
|
||||||
|
|
||||||
|
for (uint64_t row = 0; row < rows; row++) {
|
||||||
|
raidz_row_t *rr = kmem_alloc(offsetof(raidz_row_t,
|
||||||
|
rr_col[cols]), KM_SLEEP);
|
||||||
|
rm->rm_row[row] = rr;
|
||||||
|
|
||||||
|
/* The starting RAIDZ (parent) vdev sector of the row. */
|
||||||
|
uint64_t b = (offset >> ashift) + row * logical_cols;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If we are in the middle of a reflow, and any part of this
|
||||||
|
* row has not been copied, then use the old location of
|
||||||
|
* this row.
|
||||||
|
*/
|
||||||
|
int row_phys_cols = physical_cols;
|
||||||
|
if (b + (logical_cols - nparity) > reflow_offset >> ashift)
|
||||||
|
row_phys_cols--;
|
||||||
|
|
||||||
|
/* starting child of this row */
|
||||||
|
uint64_t child_id = b % row_phys_cols;
|
||||||
|
/* The starting byte offset on each child vdev. */
|
||||||
|
uint64_t child_offset = (b / row_phys_cols) << ashift;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We set cols to the entire width of the block, even
|
||||||
|
* if this row is shorter. This is needed because parity
|
||||||
|
* generation (for Q and R) needs to know the entire width,
|
||||||
|
* because it treats the short row as though it was
|
||||||
|
* full-width (and the "phantom" sectors were zero-filled).
|
||||||
|
*
|
||||||
|
* Another approach to this would be to set cols shorter
|
||||||
|
* (to just the number of columns that we might do i/o to)
|
||||||
|
* and have another mechanism to tell the parity generation
|
||||||
|
* about the "entire width". Reconstruction (at least
|
||||||
|
* vdev_raidz_reconstruct_general()) would also need to
|
||||||
|
* know about the "entire width".
|
||||||
|
*/
|
||||||
|
rr->rr_cols = cols;
|
||||||
|
rr->rr_bigcols = bc;
|
||||||
|
rr->rr_missingdata = 0;
|
||||||
|
rr->rr_missingparity = 0;
|
||||||
|
rr->rr_firstdatacol = nparity;
|
||||||
|
rr->rr_abd_empty = NULL;
|
||||||
|
rr->rr_nempty = 0;
|
||||||
|
|
||||||
|
for (int c = 0; c < rr->rr_cols; c++, child_id++) {
|
||||||
|
if (child_id >= row_phys_cols) {
|
||||||
|
child_id -= row_phys_cols;
|
||||||
|
child_offset += 1ULL << ashift;
|
||||||
|
}
|
||||||
|
rr->rr_col[c].rc_devidx = child_id;
|
||||||
|
rr->rr_col[c].rc_offset = child_offset;
|
||||||
|
rr->rr_col[c].rc_orig_data = NULL;
|
||||||
|
rr->rr_col[c].rc_error = 0;
|
||||||
|
rr->rr_col[c].rc_tried = 0;
|
||||||
|
rr->rr_col[c].rc_skipped = 0;
|
||||||
|
rr->rr_col[c].rc_need_orig_restore = B_FALSE;
|
||||||
|
|
||||||
|
uint64_t dc = c - rr->rr_firstdatacol;
|
||||||
|
if (c < rr->rr_firstdatacol) {
|
||||||
|
rr->rr_col[c].rc_size = 1ULL << ashift;
|
||||||
|
rr->rr_col[c].rc_abd =
|
||||||
|
abd_alloc_linear(rr->rr_col[c].rc_size,
|
||||||
|
B_TRUE);
|
||||||
|
} else if (row == rows - 1 && bc != 0 && c >= bc) {
|
||||||
|
/*
|
||||||
|
* Past the end, this for parity generation.
|
||||||
|
*/
|
||||||
|
rr->rr_col[c].rc_size = 0;
|
||||||
|
rr->rr_col[c].rc_abd = NULL;
|
||||||
|
} else {
|
||||||
|
/*
|
||||||
|
* "data column" (col excluding parity)
|
||||||
|
* Add an ASCII art diagram here
|
||||||
|
*/
|
||||||
|
uint64_t off;
|
||||||
|
|
||||||
|
if (c < bc || r == 0) {
|
||||||
|
off = dc * rows + row;
|
||||||
|
} else {
|
||||||
|
off = r * rows +
|
||||||
|
(dc - r) * (rows - 1) + row;
|
||||||
|
}
|
||||||
|
rr->rr_col[c].rc_size = 1ULL << ashift;
|
||||||
|
rr->rr_col[c].rc_abd = abd_get_offset_struct(
|
||||||
|
&rr->rr_col[c].rc_abdstruct,
|
||||||
|
abd, off << ashift, 1 << ashift);
|
||||||
|
}
|
||||||
|
|
||||||
|
asize += rr->rr_col[c].rc_size;
|
||||||
|
}
|
||||||
|
/*
|
||||||
|
* If all data stored spans all columns, there's a danger that
|
||||||
|
* parity will always be on the same device and, since parity
|
||||||
|
* isn't read during normal operation, that that device's I/O
|
||||||
|
* bandwidth won't be used effectively. We therefore switch
|
||||||
|
* the parity every 1MB.
|
||||||
|
*
|
||||||
|
* ...at least that was, ostensibly, the theory. As a practical
|
||||||
|
* matter unless we juggle the parity between all devices
|
||||||
|
* evenly, we won't see any benefit. Further, occasional writes
|
||||||
|
* that aren't a multiple of the LCM of the number of children
|
||||||
|
* and the minimum stripe width are sufficient to avoid pessimal
|
||||||
|
* behavior. Unfortunately, this decision created an implicit
|
||||||
|
* on-disk format requirement that we need to support for all
|
||||||
|
* eternity, but only for single-parity RAID-Z.
|
||||||
|
*
|
||||||
|
* If we intend to skip a sector in the zeroth column for
|
||||||
|
* padding we must make sure to note this swap. We will never
|
||||||
|
* intend to skip the first column since at least one data and
|
||||||
|
* one parity column must appear in each row.
|
||||||
|
*/
|
||||||
|
if (rr->rr_firstdatacol == 1 && rr->rr_cols > 1 &&
|
||||||
|
(offset & (1ULL << 20))) {
|
||||||
|
ASSERT(rr->rr_cols >= 2);
|
||||||
|
ASSERT(rr->rr_col[0].rc_size == rr->rr_col[1].rc_size);
|
||||||
|
devidx = rr->rr_col[0].rc_devidx;
|
||||||
|
uint64_t o = rr->rr_col[0].rc_offset;
|
||||||
|
rr->rr_col[0].rc_devidx = rr->rr_col[1].rc_devidx;
|
||||||
|
rr->rr_col[0].rc_offset = rr->rr_col[1].rc_offset;
|
||||||
|
rr->rr_col[1].rc_devidx = devidx;
|
||||||
|
rr->rr_col[1].rc_offset = o;
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
ASSERT3U(asize, ==, tot << ashift);
|
||||||
|
|
||||||
|
/* init RAIDZ parity ops */
|
||||||
|
rm->rm_ops = vdev_raidz_math_get_ops();
|
||||||
|
|
||||||
|
return (rm);
|
||||||
|
}
|
||||||
|
|
||||||
static raidz_map_t *
|
static raidz_map_t *
|
||||||
init_raidz_map(raidz_test_opts_t *opts, zio_t **zio, const int parity)
|
init_raidz_map(raidz_test_opts_t *opts, zio_t **zio, const int parity)
|
||||||
{
|
{
|
||||||
@ -379,9 +561,10 @@ init_raidz_map(raidz_test_opts_t *opts, zio_t **zio, const int parity)
|
|||||||
init_zio_abd(*zio);
|
init_zio_abd(*zio);
|
||||||
|
|
||||||
if (opts->rto_expand) {
|
if (opts->rto_expand) {
|
||||||
rm = vdev_raidz_map_alloc_expanded(*zio,
|
rm = vdev_raidz_map_alloc_expanded((*zio)->io_abd,
|
||||||
|
(*zio)->io_size, (*zio)->io_offset,
|
||||||
opts->rto_ashift, total_ncols+1, total_ncols,
|
opts->rto_ashift, total_ncols+1, total_ncols,
|
||||||
parity, opts->rto_expand_offset, 0, B_FALSE);
|
parity, opts->rto_expand_offset);
|
||||||
} else {
|
} else {
|
||||||
rm = vdev_raidz_map_alloc(*zio, opts->rto_ashift,
|
rm = vdev_raidz_map_alloc(*zio, opts->rto_ashift,
|
||||||
total_ncols, parity);
|
total_ncols, parity);
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
// SPDX-License-Identifier: CDDL-1.0
|
|
||||||
/*
|
/*
|
||||||
* CDDL HEADER START
|
* CDDL HEADER START
|
||||||
*
|
*
|
||||||
@ -120,4 +119,7 @@ void init_zio_abd(zio_t *zio);
|
|||||||
|
|
||||||
void run_raidz_benchmark(void);
|
void run_raidz_benchmark(void);
|
||||||
|
|
||||||
|
struct raidz_map *vdev_raidz_map_alloc_expanded(abd_t *, uint64_t, uint64_t,
|
||||||
|
uint64_t, uint64_t, uint64_t, uint64_t, uint64_t);
|
||||||
|
|
||||||
#endif /* RAIDZ_TEST_H */
|
#endif /* RAIDZ_TEST_H */
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
zdb_CPPFLAGS = $(AM_CPPFLAGS) $(LIBZPOOL_CPPFLAGS)
|
zdb_CPPFLAGS = $(AM_CPPFLAGS) $(FORCEDEBUG_CPPFLAGS)
|
||||||
zdb_CFLAGS = $(AM_CFLAGS) $(LIBCRYPTO_CFLAGS)
|
zdb_CFLAGS = $(AM_CFLAGS) $(LIBCRYPTO_CFLAGS)
|
||||||
|
|
||||||
sbin_PROGRAMS += zdb
|
sbin_PROGRAMS += zdb
|
||||||
@ -10,7 +10,6 @@ zdb_SOURCES = \
|
|||||||
%D%/zdb_il.c
|
%D%/zdb_il.c
|
||||||
|
|
||||||
zdb_LDADD = \
|
zdb_LDADD = \
|
||||||
libzdb.la \
|
|
||||||
libzpool.la \
|
libzpool.la \
|
||||||
libzfs_core.la \
|
libzfs_core.la \
|
||||||
libnvpair.la
|
libnvpair.la
|
||||||
|
1631
cmd/zdb/zdb.c
1631
cmd/zdb/zdb.c
File diff suppressed because it is too large
Load Diff
@ -1,4 +1,3 @@
|
|||||||
// SPDX-License-Identifier: CDDL-1.0
|
|
||||||
/*
|
/*
|
||||||
* CDDL HEADER START
|
* CDDL HEADER START
|
||||||
*
|
*
|
||||||
@ -29,6 +28,6 @@
|
|||||||
#define _ZDB_H
|
#define _ZDB_H
|
||||||
|
|
||||||
void dump_intent_log(zilog_t *);
|
void dump_intent_log(zilog_t *);
|
||||||
extern uint8_t dump_opt[512];
|
extern uint8_t dump_opt[256];
|
||||||
|
|
||||||
#endif /* _ZDB_H */
|
#endif /* _ZDB_H */
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
// SPDX-License-Identifier: CDDL-1.0
|
|
||||||
/*
|
/*
|
||||||
* CDDL HEADER START
|
* CDDL HEADER START
|
||||||
*
|
*
|
||||||
@ -48,6 +47,8 @@
|
|||||||
|
|
||||||
#include "zdb.h"
|
#include "zdb.h"
|
||||||
|
|
||||||
|
extern uint8_t dump_opt[256];
|
||||||
|
|
||||||
static char tab_prefix[4] = "\t\t\t";
|
static char tab_prefix[4] = "\t\t\t";
|
||||||
|
|
||||||
static void
|
static void
|
||||||
@ -63,22 +64,21 @@ static void
|
|||||||
zil_prt_rec_create(zilog_t *zilog, int txtype, const void *arg)
|
zil_prt_rec_create(zilog_t *zilog, int txtype, const void *arg)
|
||||||
{
|
{
|
||||||
(void) zilog;
|
(void) zilog;
|
||||||
const lr_create_t *lrc = arg;
|
const lr_create_t *lr = arg;
|
||||||
const _lr_create_t *lr = &lrc->lr_create;
|
|
||||||
time_t crtime = lr->lr_crtime[0];
|
time_t crtime = lr->lr_crtime[0];
|
||||||
const char *name, *link;
|
char *name, *link;
|
||||||
lr_attr_t *lrattr;
|
lr_attr_t *lrattr;
|
||||||
|
|
||||||
name = (const char *)&lrc->lr_data[0];
|
name = (char *)(lr + 1);
|
||||||
|
|
||||||
if (lr->lr_common.lrc_txtype == TX_CREATE_ATTR ||
|
if (lr->lr_common.lrc_txtype == TX_CREATE_ATTR ||
|
||||||
lr->lr_common.lrc_txtype == TX_MKDIR_ATTR) {
|
lr->lr_common.lrc_txtype == TX_MKDIR_ATTR) {
|
||||||
lrattr = (lr_attr_t *)&lrc->lr_data[0];
|
lrattr = (lr_attr_t *)(lr + 1);
|
||||||
name += ZIL_XVAT_SIZE(lrattr->lr_attr_masksize);
|
name += ZIL_XVAT_SIZE(lrattr->lr_attr_masksize);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (txtype == TX_SYMLINK) {
|
if (txtype == TX_SYMLINK) {
|
||||||
link = (const char *)&lrc->lr_data[strlen(name) + 1];
|
link = name + strlen(name) + 1;
|
||||||
(void) printf("%s%s -> %s\n", tab_prefix, name, link);
|
(void) printf("%s%s -> %s\n", tab_prefix, name, link);
|
||||||
} else if (txtype != TX_MKXATTR) {
|
} else if (txtype != TX_MKXATTR) {
|
||||||
(void) printf("%s%s\n", tab_prefix, name);
|
(void) printf("%s%s\n", tab_prefix, name);
|
||||||
@ -103,7 +103,7 @@ zil_prt_rec_remove(zilog_t *zilog, int txtype, const void *arg)
|
|||||||
const lr_remove_t *lr = arg;
|
const lr_remove_t *lr = arg;
|
||||||
|
|
||||||
(void) printf("%sdoid %llu, name %s\n", tab_prefix,
|
(void) printf("%sdoid %llu, name %s\n", tab_prefix,
|
||||||
(u_longlong_t)lr->lr_doid, (const char *)&lr->lr_data[0]);
|
(u_longlong_t)lr->lr_doid, (char *)(lr + 1));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
@ -114,17 +114,16 @@ zil_prt_rec_link(zilog_t *zilog, int txtype, const void *arg)
|
|||||||
|
|
||||||
(void) printf("%sdoid %llu, link_obj %llu, name %s\n", tab_prefix,
|
(void) printf("%sdoid %llu, link_obj %llu, name %s\n", tab_prefix,
|
||||||
(u_longlong_t)lr->lr_doid, (u_longlong_t)lr->lr_link_obj,
|
(u_longlong_t)lr->lr_doid, (u_longlong_t)lr->lr_link_obj,
|
||||||
(const char *)&lr->lr_data[0]);
|
(char *)(lr + 1));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
zil_prt_rec_rename(zilog_t *zilog, int txtype, const void *arg)
|
zil_prt_rec_rename(zilog_t *zilog, int txtype, const void *arg)
|
||||||
{
|
{
|
||||||
(void) zilog, (void) txtype;
|
(void) zilog, (void) txtype;
|
||||||
const lr_rename_t *lrr = arg;
|
const lr_rename_t *lr = arg;
|
||||||
const _lr_rename_t *lr = &lrr->lr_rename;
|
char *snm = (char *)(lr + 1);
|
||||||
const char *snm = (const char *)&lrr->lr_data[0];
|
char *tnm = snm + strlen(snm) + 1;
|
||||||
const char *tnm = (const char *)&lrr->lr_data[strlen(snm) + 1];
|
|
||||||
|
|
||||||
(void) printf("%ssdoid %llu, tdoid %llu\n", tab_prefix,
|
(void) printf("%ssdoid %llu, tdoid %llu\n", tab_prefix,
|
||||||
(u_longlong_t)lr->lr_sdoid, (u_longlong_t)lr->lr_tdoid);
|
(u_longlong_t)lr->lr_sdoid, (u_longlong_t)lr->lr_tdoid);
|
||||||
@ -174,8 +173,8 @@ zil_prt_rec_write(zilog_t *zilog, int txtype, const void *arg)
|
|||||||
|
|
||||||
if (lr->lr_common.lrc_reclen == sizeof (lr_write_t)) {
|
if (lr->lr_common.lrc_reclen == sizeof (lr_write_t)) {
|
||||||
(void) printf("%shas blkptr, %s\n", tab_prefix,
|
(void) printf("%shas blkptr, %s\n", tab_prefix,
|
||||||
!BP_IS_HOLE(bp) && BP_GET_BIRTH(bp) >=
|
!BP_IS_HOLE(bp) &&
|
||||||
spa_min_claim_txg(zilog->zl_spa) ?
|
bp->blk_birth >= spa_min_claim_txg(zilog->zl_spa) ?
|
||||||
"will claim" : "won't claim");
|
"will claim" : "won't claim");
|
||||||
print_log_bp(bp, tab_prefix);
|
print_log_bp(bp, tab_prefix);
|
||||||
|
|
||||||
@ -187,7 +186,7 @@ zil_prt_rec_write(zilog_t *zilog, int txtype, const void *arg)
|
|||||||
(void) printf("%s<hole>\n", tab_prefix);
|
(void) printf("%s<hole>\n", tab_prefix);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
if (BP_GET_BIRTH(bp) < zilog->zl_header->zh_claim_txg) {
|
if (bp->blk_birth < zilog->zl_header->zh_claim_txg) {
|
||||||
(void) printf("%s<block already committed>\n",
|
(void) printf("%s<block already committed>\n",
|
||||||
tab_prefix);
|
tab_prefix);
|
||||||
return;
|
return;
|
||||||
@ -210,7 +209,7 @@ zil_prt_rec_write(zilog_t *zilog, int txtype, const void *arg)
|
|||||||
|
|
||||||
/* data is stored after the end of the lr_write record */
|
/* data is stored after the end of the lr_write record */
|
||||||
data = abd_alloc(lr->lr_length, B_FALSE);
|
data = abd_alloc(lr->lr_length, B_FALSE);
|
||||||
abd_copy_from_buf(data, &lr->lr_data[0], lr->lr_length);
|
abd_copy_from_buf(data, lr + 1, lr->lr_length);
|
||||||
}
|
}
|
||||||
|
|
||||||
(void) printf("%s", tab_prefix);
|
(void) printf("%s", tab_prefix);
|
||||||
@ -238,8 +237,8 @@ zil_prt_rec_write_enc(zilog_t *zilog, int txtype, const void *arg)
|
|||||||
|
|
||||||
if (lr->lr_common.lrc_reclen == sizeof (lr_write_t)) {
|
if (lr->lr_common.lrc_reclen == sizeof (lr_write_t)) {
|
||||||
(void) printf("%shas blkptr, %s\n", tab_prefix,
|
(void) printf("%shas blkptr, %s\n", tab_prefix,
|
||||||
!BP_IS_HOLE(bp) && BP_GET_BIRTH(bp) >=
|
!BP_IS_HOLE(bp) &&
|
||||||
spa_min_claim_txg(zilog->zl_spa) ?
|
bp->blk_birth >= spa_min_claim_txg(zilog->zl_spa) ?
|
||||||
"will claim" : "won't claim");
|
"will claim" : "won't claim");
|
||||||
print_log_bp(bp, tab_prefix);
|
print_log_bp(bp, tab_prefix);
|
||||||
}
|
}
|
||||||
@ -308,7 +307,7 @@ zil_prt_rec_setsaxattr(zilog_t *zilog, int txtype, const void *arg)
|
|||||||
(void) zilog, (void) txtype;
|
(void) zilog, (void) txtype;
|
||||||
const lr_setsaxattr_t *lr = arg;
|
const lr_setsaxattr_t *lr = arg;
|
||||||
|
|
||||||
const char *name = (const char *)&lr->lr_data[0];
|
char *name = (char *)(lr + 1);
|
||||||
(void) printf("%sfoid %llu\n", tab_prefix,
|
(void) printf("%sfoid %llu\n", tab_prefix,
|
||||||
(u_longlong_t)lr->lr_foid);
|
(u_longlong_t)lr->lr_foid);
|
||||||
|
|
||||||
@ -317,7 +316,7 @@ zil_prt_rec_setsaxattr(zilog_t *zilog, int txtype, const void *arg)
|
|||||||
(void) printf("%sXAT_VALUE NULL\n", tab_prefix);
|
(void) printf("%sXAT_VALUE NULL\n", tab_prefix);
|
||||||
} else {
|
} else {
|
||||||
(void) printf("%sXAT_VALUE ", tab_prefix);
|
(void) printf("%sXAT_VALUE ", tab_prefix);
|
||||||
const char *val = (const char *)&lr->lr_data[strlen(name) + 1];
|
char *val = name + (strlen(name) + 1);
|
||||||
for (int i = 0; i < lr->lr_size; i++) {
|
for (int i = 0; i < lr->lr_size; i++) {
|
||||||
(void) printf("%c", *val);
|
(void) printf("%c", *val);
|
||||||
val++;
|
val++;
|
||||||
@ -474,7 +473,7 @@ print_log_block(zilog_t *zilog, const blkptr_t *bp, void *arg,
|
|||||||
|
|
||||||
if (claim_txg != 0)
|
if (claim_txg != 0)
|
||||||
claim = "already claimed";
|
claim = "already claimed";
|
||||||
else if (BP_GET_BIRTH(bp) >= spa_min_claim_txg(zilog->zl_spa))
|
else if (bp->blk_birth >= spa_min_claim_txg(zilog->zl_spa))
|
||||||
claim = "will claim";
|
claim = "will claim";
|
||||||
else
|
else
|
||||||
claim = "won't claim";
|
claim = "won't claim";
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
// SPDX-License-Identifier: CDDL-1.0
|
|
||||||
/*
|
/*
|
||||||
* CDDL HEADER START
|
* CDDL HEADER START
|
||||||
*
|
*
|
||||||
@ -23,7 +22,6 @@
|
|||||||
* Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||||
*
|
*
|
||||||
* Copyright (c) 2016, Intel Corporation.
|
* Copyright (c) 2016, Intel Corporation.
|
||||||
* Copyright (c) 2023, Klara Inc.
|
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -233,6 +231,28 @@ fmd_prop_get_int32(fmd_hdl_t *hdl, const char *name)
|
|||||||
if (strcmp(name, "spare_on_remove") == 0)
|
if (strcmp(name, "spare_on_remove") == 0)
|
||||||
return (1);
|
return (1);
|
||||||
|
|
||||||
|
if (strcmp(name, "io_N") == 0 || strcmp(name, "checksum_N") == 0)
|
||||||
|
return (10); /* N = 10 events */
|
||||||
|
|
||||||
|
return (0);
|
||||||
|
}
|
||||||
|
|
||||||
|
int64_t
|
||||||
|
fmd_prop_get_int64(fmd_hdl_t *hdl, const char *name)
|
||||||
|
{
|
||||||
|
(void) hdl;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* These can be looked up in mp->modinfo->fmdi_props
|
||||||
|
* For now we just hard code for phase 2. In the
|
||||||
|
* future, there can be a ZED based override.
|
||||||
|
*/
|
||||||
|
if (strcmp(name, "remove_timeout") == 0)
|
||||||
|
return (15ULL * 1000ULL * 1000ULL * 1000ULL); /* 15 sec */
|
||||||
|
|
||||||
|
if (strcmp(name, "io_T") == 0 || strcmp(name, "checksum_T") == 0)
|
||||||
|
return (1000ULL * 1000ULL * 1000ULL * 600ULL); /* 10 min */
|
||||||
|
|
||||||
return (0);
|
return (0);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -515,19 +535,6 @@ fmd_serd_exists(fmd_hdl_t *hdl, const char *name)
|
|||||||
return (fmd_serd_eng_lookup(&mp->mod_serds, name) != NULL);
|
return (fmd_serd_eng_lookup(&mp->mod_serds, name) != NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
int
|
|
||||||
fmd_serd_active(fmd_hdl_t *hdl, const char *name)
|
|
||||||
{
|
|
||||||
fmd_module_t *mp = (fmd_module_t *)hdl;
|
|
||||||
fmd_serd_eng_t *sgp;
|
|
||||||
|
|
||||||
if ((sgp = fmd_serd_eng_lookup(&mp->mod_serds, name)) == NULL) {
|
|
||||||
zed_log_msg(LOG_ERR, "serd engine '%s' does not exist", name);
|
|
||||||
return (0);
|
|
||||||
}
|
|
||||||
return (fmd_serd_eng_fired(sgp) || !fmd_serd_eng_empty(sgp));
|
|
||||||
}
|
|
||||||
|
|
||||||
void
|
void
|
||||||
fmd_serd_reset(fmd_hdl_t *hdl, const char *name)
|
fmd_serd_reset(fmd_hdl_t *hdl, const char *name)
|
||||||
{
|
{
|
||||||
@ -536,10 +543,12 @@ fmd_serd_reset(fmd_hdl_t *hdl, const char *name)
|
|||||||
|
|
||||||
if ((sgp = fmd_serd_eng_lookup(&mp->mod_serds, name)) == NULL) {
|
if ((sgp = fmd_serd_eng_lookup(&mp->mod_serds, name)) == NULL) {
|
||||||
zed_log_msg(LOG_ERR, "serd engine '%s' does not exist", name);
|
zed_log_msg(LOG_ERR, "serd engine '%s' does not exist", name);
|
||||||
} else {
|
return;
|
||||||
fmd_serd_eng_reset(sgp);
|
|
||||||
fmd_hdl_debug(hdl, "serd_reset %s", name);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fmd_serd_eng_reset(sgp);
|
||||||
|
|
||||||
|
fmd_hdl_debug(hdl, "serd_reset %s", name);
|
||||||
}
|
}
|
||||||
|
|
||||||
int
|
int
|
||||||
@ -547,21 +556,16 @@ fmd_serd_record(fmd_hdl_t *hdl, const char *name, fmd_event_t *ep)
|
|||||||
{
|
{
|
||||||
fmd_module_t *mp = (fmd_module_t *)hdl;
|
fmd_module_t *mp = (fmd_module_t *)hdl;
|
||||||
fmd_serd_eng_t *sgp;
|
fmd_serd_eng_t *sgp;
|
||||||
|
int err;
|
||||||
|
|
||||||
if ((sgp = fmd_serd_eng_lookup(&mp->mod_serds, name)) == NULL) {
|
if ((sgp = fmd_serd_eng_lookup(&mp->mod_serds, name)) == NULL) {
|
||||||
zed_log_msg(LOG_ERR, "failed to add record to SERD engine '%s'",
|
zed_log_msg(LOG_ERR, "failed to add record to SERD engine '%s'",
|
||||||
name);
|
name);
|
||||||
return (0);
|
return (0);
|
||||||
}
|
}
|
||||||
return (fmd_serd_eng_record(sgp, ep->ev_hrt));
|
err = fmd_serd_eng_record(sgp, ep->ev_hrt);
|
||||||
}
|
|
||||||
|
|
||||||
void
|
return (err);
|
||||||
fmd_serd_gc(fmd_hdl_t *hdl)
|
|
||||||
{
|
|
||||||
fmd_module_t *mp = (fmd_module_t *)hdl;
|
|
||||||
|
|
||||||
fmd_serd_hash_apply(&mp->mod_serds, fmd_serd_eng_gc, NULL);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* FMD Timers */
|
/* FMD Timers */
|
||||||
@ -575,7 +579,7 @@ _timer_notify(union sigval sv)
|
|||||||
const fmd_hdl_ops_t *ops = mp->mod_info->fmdi_ops;
|
const fmd_hdl_ops_t *ops = mp->mod_info->fmdi_ops;
|
||||||
struct itimerspec its;
|
struct itimerspec its;
|
||||||
|
|
||||||
fmd_hdl_debug(hdl, "%s timer fired (%p)", mp->mod_name, ftp->ft_tid);
|
fmd_hdl_debug(hdl, "timer fired (%p)", ftp->ft_tid);
|
||||||
|
|
||||||
/* disarm the timer */
|
/* disarm the timer */
|
||||||
memset(&its, 0, sizeof (struct itimerspec));
|
memset(&its, 0, sizeof (struct itimerspec));
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
// SPDX-License-Identifier: CDDL-1.0
|
|
||||||
/*
|
/*
|
||||||
* CDDL HEADER START
|
* CDDL HEADER START
|
||||||
*
|
*
|
||||||
@ -152,6 +151,7 @@ extern void fmd_hdl_vdebug(fmd_hdl_t *, const char *, va_list);
|
|||||||
extern void fmd_hdl_debug(fmd_hdl_t *, const char *, ...);
|
extern void fmd_hdl_debug(fmd_hdl_t *, const char *, ...);
|
||||||
|
|
||||||
extern int32_t fmd_prop_get_int32(fmd_hdl_t *, const char *);
|
extern int32_t fmd_prop_get_int32(fmd_hdl_t *, const char *);
|
||||||
|
extern int64_t fmd_prop_get_int64(fmd_hdl_t *, const char *);
|
||||||
|
|
||||||
#define FMD_STAT_NOALLOC 0x0 /* fmd should use caller's memory */
|
#define FMD_STAT_NOALLOC 0x0 /* fmd should use caller's memory */
|
||||||
#define FMD_STAT_ALLOC 0x1 /* fmd should allocate stats memory */
|
#define FMD_STAT_ALLOC 0x1 /* fmd should allocate stats memory */
|
||||||
@ -195,12 +195,10 @@ extern size_t fmd_buf_size(fmd_hdl_t *, fmd_case_t *, const char *);
|
|||||||
extern void fmd_serd_create(fmd_hdl_t *, const char *, uint_t, hrtime_t);
|
extern void fmd_serd_create(fmd_hdl_t *, const char *, uint_t, hrtime_t);
|
||||||
extern void fmd_serd_destroy(fmd_hdl_t *, const char *);
|
extern void fmd_serd_destroy(fmd_hdl_t *, const char *);
|
||||||
extern int fmd_serd_exists(fmd_hdl_t *, const char *);
|
extern int fmd_serd_exists(fmd_hdl_t *, const char *);
|
||||||
extern int fmd_serd_active(fmd_hdl_t *, const char *);
|
|
||||||
extern void fmd_serd_reset(fmd_hdl_t *, const char *);
|
extern void fmd_serd_reset(fmd_hdl_t *, const char *);
|
||||||
extern int fmd_serd_record(fmd_hdl_t *, const char *, fmd_event_t *);
|
extern int fmd_serd_record(fmd_hdl_t *, const char *, fmd_event_t *);
|
||||||
extern int fmd_serd_fired(fmd_hdl_t *, const char *);
|
extern int fmd_serd_fired(fmd_hdl_t *, const char *);
|
||||||
extern int fmd_serd_empty(fmd_hdl_t *, const char *);
|
extern int fmd_serd_empty(fmd_hdl_t *, const char *);
|
||||||
extern void fmd_serd_gc(fmd_hdl_t *);
|
|
||||||
|
|
||||||
extern id_t fmd_timer_install(fmd_hdl_t *, void *, fmd_event_t *, hrtime_t);
|
extern id_t fmd_timer_install(fmd_hdl_t *, void *, fmd_event_t *, hrtime_t);
|
||||||
extern void fmd_timer_remove(fmd_hdl_t *, id_t);
|
extern void fmd_timer_remove(fmd_hdl_t *, id_t);
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
// SPDX-License-Identifier: CDDL-1.0
|
|
||||||
/*
|
/*
|
||||||
* CDDL HEADER START
|
* CDDL HEADER START
|
||||||
*
|
*
|
||||||
@ -311,9 +310,8 @@ fmd_serd_eng_reset(fmd_serd_eng_t *sgp)
|
|||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
fmd_serd_eng_gc(fmd_serd_eng_t *sgp, void *arg)
|
fmd_serd_eng_gc(fmd_serd_eng_t *sgp)
|
||||||
{
|
{
|
||||||
(void) arg;
|
|
||||||
fmd_serd_elem_t *sep, *nep;
|
fmd_serd_elem_t *sep, *nep;
|
||||||
hrtime_t hrt;
|
hrtime_t hrt;
|
||||||
|
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
// SPDX-License-Identifier: CDDL-1.0
|
|
||||||
/*
|
/*
|
||||||
* CDDL HEADER START
|
* CDDL HEADER START
|
||||||
*
|
*
|
||||||
@ -78,7 +77,7 @@ extern int fmd_serd_eng_fired(fmd_serd_eng_t *);
|
|||||||
extern int fmd_serd_eng_empty(fmd_serd_eng_t *);
|
extern int fmd_serd_eng_empty(fmd_serd_eng_t *);
|
||||||
|
|
||||||
extern void fmd_serd_eng_reset(fmd_serd_eng_t *);
|
extern void fmd_serd_eng_reset(fmd_serd_eng_t *);
|
||||||
extern void fmd_serd_eng_gc(fmd_serd_eng_t *, void *);
|
extern void fmd_serd_eng_gc(fmd_serd_eng_t *);
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
// SPDX-License-Identifier: CDDL-1.0
|
|
||||||
/*
|
/*
|
||||||
* CDDL HEADER START
|
* CDDL HEADER START
|
||||||
*
|
*
|
||||||
@ -134,13 +133,11 @@ zfs_agent_iter_vdev(zpool_handle_t *zhp, nvlist_t *nvl, void *arg)
|
|||||||
* of blkid cache and L2ARC VDEV does not contain pool guid in its
|
* of blkid cache and L2ARC VDEV does not contain pool guid in its
|
||||||
* blkid, so this is a special case for L2ARC VDEV.
|
* blkid, so this is a special case for L2ARC VDEV.
|
||||||
*/
|
*/
|
||||||
else if (gsp->gs_vdev_guid != 0 &&
|
else if (gsp->gs_vdev_guid != 0 && gsp->gs_devid == NULL &&
|
||||||
nvlist_lookup_uint64(nvl, ZPOOL_CONFIG_GUID, &vdev_guid) == 0 &&
|
nvlist_lookup_uint64(nvl, ZPOOL_CONFIG_GUID, &vdev_guid) == 0 &&
|
||||||
gsp->gs_vdev_guid == vdev_guid) {
|
gsp->gs_vdev_guid == vdev_guid) {
|
||||||
if (gsp->gs_devid == NULL) {
|
(void) nvlist_lookup_string(nvl, ZPOOL_CONFIG_DEVID,
|
||||||
(void) nvlist_lookup_string(nvl, ZPOOL_CONFIG_DEVID,
|
&gsp->gs_devid);
|
||||||
&gsp->gs_devid);
|
|
||||||
}
|
|
||||||
(void) nvlist_lookup_uint64(nvl, ZPOOL_CONFIG_EXPANSION_TIME,
|
(void) nvlist_lookup_uint64(nvl, ZPOOL_CONFIG_EXPANSION_TIME,
|
||||||
&gsp->gs_vdev_expandtime);
|
&gsp->gs_vdev_expandtime);
|
||||||
return (B_TRUE);
|
return (B_TRUE);
|
||||||
@ -158,28 +155,22 @@ zfs_agent_iter_pool(zpool_handle_t *zhp, void *arg)
|
|||||||
/*
|
/*
|
||||||
* For each vdev in this pool, look for a match by devid
|
* For each vdev in this pool, look for a match by devid
|
||||||
*/
|
*/
|
||||||
boolean_t found = B_FALSE;
|
if ((config = zpool_get_config(zhp, NULL)) != NULL) {
|
||||||
uint64_t pool_guid;
|
if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
|
||||||
|
&nvl) == 0) {
|
||||||
|
(void) zfs_agent_iter_vdev(zhp, nvl, gsp);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
/*
|
||||||
|
* if a match was found then grab the pool guid
|
||||||
|
*/
|
||||||
|
if (gsp->gs_vdev_guid && gsp->gs_devid) {
|
||||||
|
(void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
|
||||||
|
&gsp->gs_pool_guid);
|
||||||
|
}
|
||||||
|
|
||||||
/* Get pool configuration and extract pool GUID */
|
|
||||||
if ((config = zpool_get_config(zhp, NULL)) == NULL ||
|
|
||||||
nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
|
|
||||||
&pool_guid) != 0)
|
|
||||||
goto out;
|
|
||||||
|
|
||||||
/* Skip this pool if we're looking for a specific pool */
|
|
||||||
if (gsp->gs_pool_guid != 0 && pool_guid != gsp->gs_pool_guid)
|
|
||||||
goto out;
|
|
||||||
|
|
||||||
if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvl) == 0)
|
|
||||||
found = zfs_agent_iter_vdev(zhp, nvl, gsp);
|
|
||||||
|
|
||||||
if (found && gsp->gs_pool_guid == 0)
|
|
||||||
gsp->gs_pool_guid = pool_guid;
|
|
||||||
|
|
||||||
out:
|
|
||||||
zpool_close(zhp);
|
zpool_close(zhp);
|
||||||
return (found);
|
return (gsp->gs_devid != NULL && gsp->gs_vdev_guid != 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
@ -241,17 +232,20 @@ zfs_agent_post_event(const char *class, const char *subclass, nvlist_t *nvl)
|
|||||||
* For multipath, spare and l2arc devices ZFS_EV_VDEV_GUID or
|
* For multipath, spare and l2arc devices ZFS_EV_VDEV_GUID or
|
||||||
* ZFS_EV_POOL_GUID may be missing so find them.
|
* ZFS_EV_POOL_GUID may be missing so find them.
|
||||||
*/
|
*/
|
||||||
search.gs_devid = devid;
|
if (devid == NULL || pool_guid == 0 || vdev_guid == 0) {
|
||||||
search.gs_vdev_guid = vdev_guid;
|
if (devid == NULL)
|
||||||
search.gs_pool_guid = pool_guid;
|
search.gs_vdev_guid = vdev_guid;
|
||||||
zpool_iter(g_zfs_hdl, zfs_agent_iter_pool, &search);
|
else
|
||||||
if (devid == NULL)
|
search.gs_devid = devid;
|
||||||
devid = search.gs_devid;
|
zpool_iter(g_zfs_hdl, zfs_agent_iter_pool, &search);
|
||||||
if (pool_guid == 0)
|
if (devid == NULL)
|
||||||
pool_guid = search.gs_pool_guid;
|
devid = search.gs_devid;
|
||||||
if (vdev_guid == 0)
|
if (pool_guid == 0)
|
||||||
vdev_guid = search.gs_vdev_guid;
|
pool_guid = search.gs_pool_guid;
|
||||||
devtype = search.gs_vdev_type;
|
if (vdev_guid == 0)
|
||||||
|
vdev_guid = search.gs_vdev_guid;
|
||||||
|
devtype = search.gs_vdev_type;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We want to avoid reporting "remove" events coming from
|
* We want to avoid reporting "remove" events coming from
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
// SPDX-License-Identifier: CDDL-1.0
|
|
||||||
/*
|
/*
|
||||||
* CDDL HEADER START
|
* CDDL HEADER START
|
||||||
*
|
*
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
// SPDX-License-Identifier: CDDL-1.0
|
|
||||||
/*
|
/*
|
||||||
* CDDL HEADER START
|
* CDDL HEADER START
|
||||||
*
|
*
|
||||||
@ -24,7 +23,6 @@
|
|||||||
* Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
|
||||||
* Copyright 2015 Nexenta Systems, Inc. All rights reserved.
|
* Copyright 2015 Nexenta Systems, Inc. All rights reserved.
|
||||||
* Copyright (c) 2016, Intel Corporation.
|
* Copyright (c) 2016, Intel Corporation.
|
||||||
* Copyright (c) 2023, Klara Inc.
|
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <stddef.h>
|
#include <stddef.h>
|
||||||
@ -49,16 +47,11 @@
|
|||||||
#define DEFAULT_CHECKSUM_T 600 /* seconds */
|
#define DEFAULT_CHECKSUM_T 600 /* seconds */
|
||||||
#define DEFAULT_IO_N 10 /* events */
|
#define DEFAULT_IO_N 10 /* events */
|
||||||
#define DEFAULT_IO_T 600 /* seconds */
|
#define DEFAULT_IO_T 600 /* seconds */
|
||||||
#define DEFAULT_SLOW_IO_N 10 /* events */
|
|
||||||
#define DEFAULT_SLOW_IO_T 30 /* seconds */
|
|
||||||
|
|
||||||
#define CASE_GC_TIMEOUT_SECS 43200 /* 12 hours */
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Our serd engines are named in the following format:
|
* Our serd engines are named 'zfs_<pool_guid>_<vdev_guid>_{checksum,io}'. This
|
||||||
* 'zfs_<pool_guid>_<vdev_guid>_{checksum,io,slow_io}'
|
* #define reserves enough space for two 64-bit hex values plus the length of
|
||||||
* This #define reserves enough space for two 64-bit hex values plus the
|
* the longest string.
|
||||||
* length of the longest string.
|
|
||||||
*/
|
*/
|
||||||
#define MAX_SERDLEN (16 * 2 + sizeof ("zfs___checksum"))
|
#define MAX_SERDLEN (16 * 2 + sizeof ("zfs___checksum"))
|
||||||
|
|
||||||
@ -72,11 +65,9 @@ typedef struct zfs_case_data {
|
|||||||
uint64_t zc_ena;
|
uint64_t zc_ena;
|
||||||
uint64_t zc_pool_guid;
|
uint64_t zc_pool_guid;
|
||||||
uint64_t zc_vdev_guid;
|
uint64_t zc_vdev_guid;
|
||||||
uint64_t zc_parent_guid;
|
|
||||||
int zc_pool_state;
|
int zc_pool_state;
|
||||||
char zc_serd_checksum[MAX_SERDLEN];
|
char zc_serd_checksum[MAX_SERDLEN];
|
||||||
char zc_serd_io[MAX_SERDLEN];
|
char zc_serd_io[MAX_SERDLEN];
|
||||||
char zc_serd_slow_io[MAX_SERDLEN];
|
|
||||||
int zc_has_remove_timer;
|
int zc_has_remove_timer;
|
||||||
} zfs_case_data_t;
|
} zfs_case_data_t;
|
||||||
|
|
||||||
@ -123,8 +114,7 @@ zfs_de_stats_t zfs_stats = {
|
|||||||
{ "resource_drops", FMD_TYPE_UINT64, "resource related ereports" }
|
{ "resource_drops", FMD_TYPE_UINT64, "resource related ereports" }
|
||||||
};
|
};
|
||||||
|
|
||||||
/* wait 15 seconds after a removal */
|
static hrtime_t zfs_remove_timeout;
|
||||||
static hrtime_t zfs_remove_timeout = SEC2NSEC(15);
|
|
||||||
|
|
||||||
uu_list_pool_t *zfs_case_pool;
|
uu_list_pool_t *zfs_case_pool;
|
||||||
uu_list_t *zfs_cases;
|
uu_list_t *zfs_cases;
|
||||||
@ -134,8 +124,6 @@ uu_list_t *zfs_cases;
|
|||||||
#define ZFS_MAKE_EREPORT(type) \
|
#define ZFS_MAKE_EREPORT(type) \
|
||||||
FM_EREPORT_CLASS "." ZFS_ERROR_CLASS "." type
|
FM_EREPORT_CLASS "." ZFS_ERROR_CLASS "." type
|
||||||
|
|
||||||
static void zfs_purge_cases(fmd_hdl_t *hdl);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Write out the persistent representation of an active case.
|
* Write out the persistent representation of an active case.
|
||||||
*/
|
*/
|
||||||
@ -182,64 +170,6 @@ zfs_case_unserialize(fmd_hdl_t *hdl, fmd_case_t *cp)
|
|||||||
return (zcp);
|
return (zcp);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Return count of other unique SERD cases under same vdev parent
|
|
||||||
*/
|
|
||||||
static uint_t
|
|
||||||
zfs_other_serd_cases(fmd_hdl_t *hdl, const zfs_case_data_t *zfs_case)
|
|
||||||
{
|
|
||||||
zfs_case_t *zcp;
|
|
||||||
uint_t cases = 0;
|
|
||||||
static hrtime_t next_check = 0;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Note that plumbing in some external GC would require adding locking,
|
|
||||||
* since most of this module code is not thread safe and assumes there
|
|
||||||
* is only one thread running against the module. So we perform GC here
|
|
||||||
* inline periodically so that future delay induced faults will be
|
|
||||||
* possible once the issue causing multiple vdev delays is resolved.
|
|
||||||
*/
|
|
||||||
if (gethrestime_sec() > next_check) {
|
|
||||||
/* Periodically purge old SERD entries and stale cases */
|
|
||||||
fmd_serd_gc(hdl);
|
|
||||||
zfs_purge_cases(hdl);
|
|
||||||
next_check = gethrestime_sec() + CASE_GC_TIMEOUT_SECS;
|
|
||||||
}
|
|
||||||
|
|
||||||
for (zcp = uu_list_first(zfs_cases); zcp != NULL;
|
|
||||||
zcp = uu_list_next(zfs_cases, zcp)) {
|
|
||||||
zfs_case_data_t *zcd = &zcp->zc_data;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* must be same pool and parent vdev but different leaf vdev
|
|
||||||
*/
|
|
||||||
if (zcd->zc_pool_guid != zfs_case->zc_pool_guid ||
|
|
||||||
zcd->zc_parent_guid != zfs_case->zc_parent_guid ||
|
|
||||||
zcd->zc_vdev_guid == zfs_case->zc_vdev_guid) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Check if there is another active serd case besides zfs_case
|
|
||||||
*
|
|
||||||
* Only one serd engine will be assigned to the case
|
|
||||||
*/
|
|
||||||
if (zcd->zc_serd_checksum[0] == zfs_case->zc_serd_checksum[0] &&
|
|
||||||
fmd_serd_active(hdl, zcd->zc_serd_checksum)) {
|
|
||||||
cases++;
|
|
||||||
}
|
|
||||||
if (zcd->zc_serd_io[0] == zfs_case->zc_serd_io[0] &&
|
|
||||||
fmd_serd_active(hdl, zcd->zc_serd_io)) {
|
|
||||||
cases++;
|
|
||||||
}
|
|
||||||
if (zcd->zc_serd_slow_io[0] == zfs_case->zc_serd_slow_io[0] &&
|
|
||||||
fmd_serd_active(hdl, zcd->zc_serd_slow_io)) {
|
|
||||||
cases++;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return (cases);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Iterate over any active cases. If any cases are associated with a pool or
|
* Iterate over any active cases. If any cases are associated with a pool or
|
||||||
* vdev which is no longer present on the system, close the associated case.
|
* vdev which is no longer present on the system, close the associated case.
|
||||||
@ -446,14 +376,6 @@ zfs_serd_name(char *buf, uint64_t pool_guid, uint64_t vdev_guid,
|
|||||||
(long long unsigned int)vdev_guid, type);
|
(long long unsigned int)vdev_guid, type);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
|
||||||
zfs_case_retire(fmd_hdl_t *hdl, zfs_case_t *zcp)
|
|
||||||
{
|
|
||||||
fmd_hdl_debug(hdl, "retiring case");
|
|
||||||
|
|
||||||
fmd_case_close(hdl, zcp->zc_case);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Solve a given ZFS case. This first checks to make sure the diagnosis is
|
* Solve a given ZFS case. This first checks to make sure the diagnosis is
|
||||||
* still valid, as well as cleaning up any pending timer associated with the
|
* still valid, as well as cleaning up any pending timer associated with the
|
||||||
@ -526,34 +448,6 @@ zfs_ereport_when(fmd_hdl_t *hdl, nvlist_t *nvl, er_timeval_t *when)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Record the specified event in the SERD engine and return a
|
|
||||||
* boolean value indicating whether or not the engine fired as
|
|
||||||
* the result of inserting this event.
|
|
||||||
*
|
|
||||||
* When the pool has similar active cases on other vdevs, then
|
|
||||||
* the fired state is disregarded and the case is retired.
|
|
||||||
*/
|
|
||||||
static int
|
|
||||||
zfs_fm_serd_record(fmd_hdl_t *hdl, const char *name, fmd_event_t *ep,
|
|
||||||
zfs_case_t *zcp, const char *err_type)
|
|
||||||
{
|
|
||||||
int fired = fmd_serd_record(hdl, name, ep);
|
|
||||||
int peers = 0;
|
|
||||||
|
|
||||||
if (fired && (peers = zfs_other_serd_cases(hdl, &zcp->zc_data)) > 0) {
|
|
||||||
fmd_hdl_debug(hdl, "pool %llu is tracking %d other %s cases "
|
|
||||||
"-- skip faulting the vdev %llu",
|
|
||||||
(u_longlong_t)zcp->zc_data.zc_pool_guid,
|
|
||||||
peers, err_type,
|
|
||||||
(u_longlong_t)zcp->zc_data.zc_vdev_guid);
|
|
||||||
zfs_case_retire(hdl, zcp);
|
|
||||||
fired = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
return (fired);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Main fmd entry point.
|
* Main fmd entry point.
|
||||||
*/
|
*/
|
||||||
@ -562,7 +456,7 @@ zfs_fm_recv(fmd_hdl_t *hdl, fmd_event_t *ep, nvlist_t *nvl, const char *class)
|
|||||||
{
|
{
|
||||||
zfs_case_t *zcp, *dcp;
|
zfs_case_t *zcp, *dcp;
|
||||||
int32_t pool_state;
|
int32_t pool_state;
|
||||||
uint64_t ena, pool_guid, vdev_guid, parent_guid;
|
uint64_t ena, pool_guid, vdev_guid;
|
||||||
uint64_t checksum_n, checksum_t;
|
uint64_t checksum_n, checksum_t;
|
||||||
uint64_t io_n, io_t;
|
uint64_t io_n, io_t;
|
||||||
er_timeval_t pool_load;
|
er_timeval_t pool_load;
|
||||||
@ -652,9 +546,6 @@ zfs_fm_recv(fmd_hdl_t *hdl, fmd_event_t *ep, nvlist_t *nvl, const char *class)
|
|||||||
if (nvlist_lookup_uint64(nvl,
|
if (nvlist_lookup_uint64(nvl,
|
||||||
FM_EREPORT_PAYLOAD_ZFS_VDEV_GUID, &vdev_guid) != 0)
|
FM_EREPORT_PAYLOAD_ZFS_VDEV_GUID, &vdev_guid) != 0)
|
||||||
vdev_guid = 0;
|
vdev_guid = 0;
|
||||||
if (nvlist_lookup_uint64(nvl,
|
|
||||||
FM_EREPORT_PAYLOAD_ZFS_PARENT_GUID, &parent_guid) != 0)
|
|
||||||
parent_guid = 0;
|
|
||||||
if (nvlist_lookup_uint64(nvl, FM_EREPORT_ENA, &ena) != 0)
|
if (nvlist_lookup_uint64(nvl, FM_EREPORT_ENA, &ena) != 0)
|
||||||
ena = 0;
|
ena = 0;
|
||||||
|
|
||||||
@ -741,7 +632,9 @@ zfs_fm_recv(fmd_hdl_t *hdl, fmd_event_t *ep, nvlist_t *nvl, const char *class)
|
|||||||
if (strcmp(class,
|
if (strcmp(class,
|
||||||
ZFS_MAKE_EREPORT(FM_EREPORT_ZFS_DATA)) == 0 ||
|
ZFS_MAKE_EREPORT(FM_EREPORT_ZFS_DATA)) == 0 ||
|
||||||
strcmp(class,
|
strcmp(class,
|
||||||
ZFS_MAKE_EREPORT(FM_EREPORT_ZFS_CONFIG_CACHE_WRITE)) == 0) {
|
ZFS_MAKE_EREPORT(FM_EREPORT_ZFS_CONFIG_CACHE_WRITE)) == 0 ||
|
||||||
|
strcmp(class,
|
||||||
|
ZFS_MAKE_EREPORT(FM_EREPORT_ZFS_DELAY)) == 0) {
|
||||||
zfs_stats.resource_drops.fmds_value.ui64++;
|
zfs_stats.resource_drops.fmds_value.ui64++;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -765,7 +658,6 @@ zfs_fm_recv(fmd_hdl_t *hdl, fmd_event_t *ep, nvlist_t *nvl, const char *class)
|
|||||||
data.zc_ena = ena;
|
data.zc_ena = ena;
|
||||||
data.zc_pool_guid = pool_guid;
|
data.zc_pool_guid = pool_guid;
|
||||||
data.zc_vdev_guid = vdev_guid;
|
data.zc_vdev_guid = vdev_guid;
|
||||||
data.zc_parent_guid = parent_guid;
|
|
||||||
data.zc_pool_state = (int)pool_state;
|
data.zc_pool_state = (int)pool_state;
|
||||||
|
|
||||||
fmd_buf_write(hdl, cs, CASE_DATA, &data, sizeof (data));
|
fmd_buf_write(hdl, cs, CASE_DATA, &data, sizeof (data));
|
||||||
@ -810,9 +702,6 @@ zfs_fm_recv(fmd_hdl_t *hdl, fmd_event_t *ep, nvlist_t *nvl, const char *class)
|
|||||||
if (zcp->zc_data.zc_serd_checksum[0] != '\0')
|
if (zcp->zc_data.zc_serd_checksum[0] != '\0')
|
||||||
fmd_serd_reset(hdl,
|
fmd_serd_reset(hdl,
|
||||||
zcp->zc_data.zc_serd_checksum);
|
zcp->zc_data.zc_serd_checksum);
|
||||||
if (zcp->zc_data.zc_serd_slow_io[0] != '\0')
|
|
||||||
fmd_serd_reset(hdl,
|
|
||||||
zcp->zc_data.zc_serd_slow_io);
|
|
||||||
} else if (fmd_nvl_class_match(hdl, nvl,
|
} else if (fmd_nvl_class_match(hdl, nvl,
|
||||||
ZFS_MAKE_RSRC(FM_RESOURCE_STATECHANGE))) {
|
ZFS_MAKE_RSRC(FM_RESOURCE_STATECHANGE))) {
|
||||||
uint64_t state = 0;
|
uint64_t state = 0;
|
||||||
@ -841,11 +730,7 @@ zfs_fm_recv(fmd_hdl_t *hdl, fmd_event_t *ep, nvlist_t *nvl, const char *class)
|
|||||||
if (fmd_case_solved(hdl, zcp->zc_case))
|
if (fmd_case_solved(hdl, zcp->zc_case))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (vdev_guid)
|
fmd_hdl_debug(hdl, "error event '%s'", class);
|
||||||
fmd_hdl_debug(hdl, "error event '%s', vdev %llu", class,
|
|
||||||
vdev_guid);
|
|
||||||
else
|
|
||||||
fmd_hdl_debug(hdl, "error event '%s'", class);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Determine if we should solve the case and generate a fault. We solve
|
* Determine if we should solve the case and generate a fault. We solve
|
||||||
@ -894,12 +779,11 @@ zfs_fm_recv(fmd_hdl_t *hdl, fmd_event_t *ep, nvlist_t *nvl, const char *class)
|
|||||||
fmd_nvl_class_match(hdl, nvl,
|
fmd_nvl_class_match(hdl, nvl,
|
||||||
ZFS_MAKE_EREPORT(FM_EREPORT_ZFS_IO_FAILURE)) ||
|
ZFS_MAKE_EREPORT(FM_EREPORT_ZFS_IO_FAILURE)) ||
|
||||||
fmd_nvl_class_match(hdl, nvl,
|
fmd_nvl_class_match(hdl, nvl,
|
||||||
ZFS_MAKE_EREPORT(FM_EREPORT_ZFS_DELAY)) ||
|
|
||||||
fmd_nvl_class_match(hdl, nvl,
|
|
||||||
ZFS_MAKE_EREPORT(FM_EREPORT_ZFS_PROBE_FAILURE))) {
|
ZFS_MAKE_EREPORT(FM_EREPORT_ZFS_PROBE_FAILURE))) {
|
||||||
const char *failmode = NULL;
|
const char *failmode = NULL;
|
||||||
boolean_t checkremove = B_FALSE;
|
boolean_t checkremove = B_FALSE;
|
||||||
uint32_t pri = 0;
|
uint32_t pri = 0;
|
||||||
|
int32_t flags = 0;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If this is a checksum or I/O error, then toss it into the
|
* If this is a checksum or I/O error, then toss it into the
|
||||||
@ -928,64 +812,22 @@ zfs_fm_recv(fmd_hdl_t *hdl, fmd_event_t *ep, nvlist_t *nvl, const char *class)
|
|||||||
SEC2NSEC(io_t));
|
SEC2NSEC(io_t));
|
||||||
zfs_case_serialize(zcp);
|
zfs_case_serialize(zcp);
|
||||||
}
|
}
|
||||||
if (zfs_fm_serd_record(hdl, zcp->zc_data.zc_serd_io,
|
if (fmd_serd_record(hdl, zcp->zc_data.zc_serd_io, ep))
|
||||||
ep, zcp, "io error")) {
|
|
||||||
checkremove = B_TRUE;
|
checkremove = B_TRUE;
|
||||||
}
|
|
||||||
} else if (fmd_nvl_class_match(hdl, nvl,
|
|
||||||
ZFS_MAKE_EREPORT(FM_EREPORT_ZFS_DELAY))) {
|
|
||||||
uint64_t slow_io_n, slow_io_t;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Create a slow io SERD engine when the VDEV has the
|
|
||||||
* 'vdev_slow_io_n' and 'vdev_slow_io_n' properties.
|
|
||||||
*/
|
|
||||||
if (zcp->zc_data.zc_serd_slow_io[0] == '\0' &&
|
|
||||||
nvlist_lookup_uint64(nvl,
|
|
||||||
FM_EREPORT_PAYLOAD_ZFS_VDEV_SLOW_IO_N,
|
|
||||||
&slow_io_n) == 0 &&
|
|
||||||
nvlist_lookup_uint64(nvl,
|
|
||||||
FM_EREPORT_PAYLOAD_ZFS_VDEV_SLOW_IO_T,
|
|
||||||
&slow_io_t) == 0) {
|
|
||||||
zfs_serd_name(zcp->zc_data.zc_serd_slow_io,
|
|
||||||
pool_guid, vdev_guid, "slow_io");
|
|
||||||
fmd_serd_create(hdl,
|
|
||||||
zcp->zc_data.zc_serd_slow_io,
|
|
||||||
slow_io_n,
|
|
||||||
SEC2NSEC(slow_io_t));
|
|
||||||
zfs_case_serialize(zcp);
|
|
||||||
}
|
|
||||||
/* Pass event to SERD engine and see if this triggers */
|
|
||||||
if (zcp->zc_data.zc_serd_slow_io[0] != '\0' &&
|
|
||||||
zfs_fm_serd_record(hdl,
|
|
||||||
zcp->zc_data.zc_serd_slow_io, ep, zcp, "slow io")) {
|
|
||||||
zfs_case_solve(hdl, zcp,
|
|
||||||
"fault.fs.zfs.vdev.slow_io");
|
|
||||||
}
|
|
||||||
} else if (fmd_nvl_class_match(hdl, nvl,
|
} else if (fmd_nvl_class_match(hdl, nvl,
|
||||||
ZFS_MAKE_EREPORT(FM_EREPORT_ZFS_CHECKSUM))) {
|
ZFS_MAKE_EREPORT(FM_EREPORT_ZFS_CHECKSUM))) {
|
||||||
uint64_t flags = 0;
|
|
||||||
int32_t flags32 = 0;
|
|
||||||
/*
|
/*
|
||||||
* We ignore ereports for checksum errors generated by
|
* We ignore ereports for checksum errors generated by
|
||||||
* scrub/resilver I/O to avoid potentially further
|
* scrub/resilver I/O to avoid potentially further
|
||||||
* degrading the pool while it's being repaired.
|
* degrading the pool while it's being repaired.
|
||||||
*
|
|
||||||
* Note that FM_EREPORT_PAYLOAD_ZFS_ZIO_FLAGS used to
|
|
||||||
* be int32. To allow newer zed to work on older
|
|
||||||
* kernels, if we don't find the flags, we look for
|
|
||||||
* the older ones too.
|
|
||||||
*/
|
*/
|
||||||
if (((nvlist_lookup_uint32(nvl,
|
if (((nvlist_lookup_uint32(nvl,
|
||||||
FM_EREPORT_PAYLOAD_ZFS_ZIO_PRIORITY, &pri) == 0) &&
|
FM_EREPORT_PAYLOAD_ZFS_ZIO_PRIORITY, &pri) == 0) &&
|
||||||
(pri == ZIO_PRIORITY_SCRUB ||
|
(pri == ZIO_PRIORITY_SCRUB ||
|
||||||
pri == ZIO_PRIORITY_REBUILD)) ||
|
pri == ZIO_PRIORITY_REBUILD)) ||
|
||||||
((nvlist_lookup_uint64(nvl,
|
|
||||||
FM_EREPORT_PAYLOAD_ZFS_ZIO_FLAGS, &flags) == 0) &&
|
|
||||||
(flags & (ZIO_FLAG_SCRUB | ZIO_FLAG_RESILVER))) ||
|
|
||||||
((nvlist_lookup_int32(nvl,
|
((nvlist_lookup_int32(nvl,
|
||||||
FM_EREPORT_PAYLOAD_ZFS_ZIO_FLAGS, &flags32) == 0) &&
|
FM_EREPORT_PAYLOAD_ZFS_ZIO_FLAGS, &flags) == 0) &&
|
||||||
(flags32 & (ZIO_FLAG_SCRUB | ZIO_FLAG_RESILVER)))) {
|
(flags & (ZIO_FLAG_SCRUB | ZIO_FLAG_RESILVER)))) {
|
||||||
fmd_hdl_debug(hdl, "ignoring '%s' for "
|
fmd_hdl_debug(hdl, "ignoring '%s' for "
|
||||||
"scrub/resilver I/O", class);
|
"scrub/resilver I/O", class);
|
||||||
return;
|
return;
|
||||||
@ -1011,9 +853,8 @@ zfs_fm_recv(fmd_hdl_t *hdl, fmd_event_t *ep, nvlist_t *nvl, const char *class)
|
|||||||
SEC2NSEC(checksum_t));
|
SEC2NSEC(checksum_t));
|
||||||
zfs_case_serialize(zcp);
|
zfs_case_serialize(zcp);
|
||||||
}
|
}
|
||||||
if (zfs_fm_serd_record(hdl,
|
if (fmd_serd_record(hdl,
|
||||||
zcp->zc_data.zc_serd_checksum, ep, zcp,
|
zcp->zc_data.zc_serd_checksum, ep)) {
|
||||||
"checksum")) {
|
|
||||||
zfs_case_solve(hdl, zcp,
|
zfs_case_solve(hdl, zcp,
|
||||||
"fault.fs.zfs.vdev.checksum");
|
"fault.fs.zfs.vdev.checksum");
|
||||||
}
|
}
|
||||||
@ -1083,8 +924,6 @@ zfs_fm_close(fmd_hdl_t *hdl, fmd_case_t *cs)
|
|||||||
fmd_serd_destroy(hdl, zcp->zc_data.zc_serd_checksum);
|
fmd_serd_destroy(hdl, zcp->zc_data.zc_serd_checksum);
|
||||||
if (zcp->zc_data.zc_serd_io[0] != '\0')
|
if (zcp->zc_data.zc_serd_io[0] != '\0')
|
||||||
fmd_serd_destroy(hdl, zcp->zc_data.zc_serd_io);
|
fmd_serd_destroy(hdl, zcp->zc_data.zc_serd_io);
|
||||||
if (zcp->zc_data.zc_serd_slow_io[0] != '\0')
|
|
||||||
fmd_serd_destroy(hdl, zcp->zc_data.zc_serd_slow_io);
|
|
||||||
if (zcp->zc_data.zc_has_remove_timer)
|
if (zcp->zc_data.zc_has_remove_timer)
|
||||||
fmd_timer_remove(hdl, zcp->zc_remove_timer);
|
fmd_timer_remove(hdl, zcp->zc_remove_timer);
|
||||||
|
|
||||||
@ -1093,15 +932,30 @@ zfs_fm_close(fmd_hdl_t *hdl, fmd_case_t *cs)
|
|||||||
fmd_hdl_free(hdl, zcp, sizeof (zfs_case_t));
|
fmd_hdl_free(hdl, zcp, sizeof (zfs_case_t));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We use the fmd gc entry point to look for old cases that no longer apply.
|
||||||
|
* This allows us to keep our set of case data small in a long running system.
|
||||||
|
*/
|
||||||
|
static void
|
||||||
|
zfs_fm_gc(fmd_hdl_t *hdl)
|
||||||
|
{
|
||||||
|
zfs_purge_cases(hdl);
|
||||||
|
}
|
||||||
|
|
||||||
static const fmd_hdl_ops_t fmd_ops = {
|
static const fmd_hdl_ops_t fmd_ops = {
|
||||||
zfs_fm_recv, /* fmdo_recv */
|
zfs_fm_recv, /* fmdo_recv */
|
||||||
zfs_fm_timeout, /* fmdo_timeout */
|
zfs_fm_timeout, /* fmdo_timeout */
|
||||||
zfs_fm_close, /* fmdo_close */
|
zfs_fm_close, /* fmdo_close */
|
||||||
NULL, /* fmdo_stats */
|
NULL, /* fmdo_stats */
|
||||||
NULL, /* fmdo_gc */
|
zfs_fm_gc, /* fmdo_gc */
|
||||||
};
|
};
|
||||||
|
|
||||||
static const fmd_prop_t fmd_props[] = {
|
static const fmd_prop_t fmd_props[] = {
|
||||||
|
{ "checksum_N", FMD_TYPE_UINT32, "10" },
|
||||||
|
{ "checksum_T", FMD_TYPE_TIME, "10min" },
|
||||||
|
{ "io_N", FMD_TYPE_UINT32, "10" },
|
||||||
|
{ "io_T", FMD_TYPE_TIME, "10min" },
|
||||||
|
{ "remove_timeout", FMD_TYPE_TIME, "15sec" },
|
||||||
{ NULL, 0, NULL }
|
{ NULL, 0, NULL }
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -1142,6 +996,8 @@ _zfs_diagnosis_init(fmd_hdl_t *hdl)
|
|||||||
|
|
||||||
(void) fmd_stat_create(hdl, FMD_STAT_NOALLOC, sizeof (zfs_stats) /
|
(void) fmd_stat_create(hdl, FMD_STAT_NOALLOC, sizeof (zfs_stats) /
|
||||||
sizeof (fmd_stat_t), (fmd_stat_t *)&zfs_stats);
|
sizeof (fmd_stat_t), (fmd_stat_t *)&zfs_stats);
|
||||||
|
|
||||||
|
zfs_remove_timeout = fmd_prop_get_int64(hdl, "remove_timeout");
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
// SPDX-License-Identifier: CDDL-1.0
|
|
||||||
/*
|
/*
|
||||||
* CDDL HEADER START
|
* CDDL HEADER START
|
||||||
*
|
*
|
||||||
@ -215,7 +214,6 @@ zfs_process_add(zpool_handle_t *zhp, nvlist_t *vdev, boolean_t labeled)
|
|||||||
vdev_stat_t *vs;
|
vdev_stat_t *vs;
|
||||||
char **lines = NULL;
|
char **lines = NULL;
|
||||||
int lines_cnt = 0;
|
int lines_cnt = 0;
|
||||||
int rc;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Get the persistent path, typically under the '/dev/disk/by-id' or
|
* Get the persistent path, typically under the '/dev/disk/by-id' or
|
||||||
@ -407,17 +405,17 @@ zfs_process_add(zpool_handle_t *zhp, nvlist_t *vdev, boolean_t labeled)
|
|||||||
}
|
}
|
||||||
|
|
||||||
nvlist_lookup_string(vdev, "new_devid", &new_devid);
|
nvlist_lookup_string(vdev, "new_devid", &new_devid);
|
||||||
|
|
||||||
if (is_mpath_wholedisk) {
|
if (is_mpath_wholedisk) {
|
||||||
/* Don't label device mapper or multipath disks. */
|
/* Don't label device mapper or multipath disks. */
|
||||||
zed_log_msg(LOG_INFO,
|
zed_log_msg(LOG_INFO,
|
||||||
" it's a multipath wholedisk, don't label");
|
" it's a multipath wholedisk, don't label");
|
||||||
rc = zpool_prepare_disk(zhp, vdev, "autoreplace", &lines,
|
if (zpool_prepare_disk(zhp, vdev, "autoreplace", &lines,
|
||||||
&lines_cnt);
|
&lines_cnt) != 0) {
|
||||||
if (rc != 0) {
|
|
||||||
zed_log_msg(LOG_INFO,
|
zed_log_msg(LOG_INFO,
|
||||||
" zpool_prepare_disk: could not "
|
" zpool_prepare_disk: could not "
|
||||||
"prepare '%s' (%s), path '%s', rc = %d", fullpath,
|
"prepare '%s' (%s)", fullpath,
|
||||||
libzfs_error_description(g_zfshdl), path, rc);
|
libzfs_error_description(g_zfshdl));
|
||||||
if (lines_cnt > 0) {
|
if (lines_cnt > 0) {
|
||||||
zed_log_msg(LOG_INFO,
|
zed_log_msg(LOG_INFO,
|
||||||
" zfs_prepare_disk output:");
|
" zfs_prepare_disk output:");
|
||||||
@ -448,13 +446,12 @@ zfs_process_add(zpool_handle_t *zhp, nvlist_t *vdev, boolean_t labeled)
|
|||||||
* If this is a request to label a whole disk, then attempt to
|
* If this is a request to label a whole disk, then attempt to
|
||||||
* write out the label.
|
* write out the label.
|
||||||
*/
|
*/
|
||||||
rc = zpool_prepare_and_label_disk(g_zfshdl, zhp, leafname,
|
if (zpool_prepare_and_label_disk(g_zfshdl, zhp, leafname,
|
||||||
vdev, "autoreplace", &lines, &lines_cnt);
|
vdev, "autoreplace", &lines, &lines_cnt) != 0) {
|
||||||
if (rc != 0) {
|
|
||||||
zed_log_msg(LOG_WARNING,
|
zed_log_msg(LOG_WARNING,
|
||||||
" zpool_prepare_and_label_disk: could not "
|
" zpool_prepare_and_label_disk: could not "
|
||||||
"label '%s' (%s), rc = %d", leafname,
|
"label '%s' (%s)", leafname,
|
||||||
libzfs_error_description(g_zfshdl), rc);
|
libzfs_error_description(g_zfshdl));
|
||||||
if (lines_cnt > 0) {
|
if (lines_cnt > 0) {
|
||||||
zed_log_msg(LOG_INFO,
|
zed_log_msg(LOG_INFO,
|
||||||
" zfs_prepare_disk output:");
|
" zfs_prepare_disk output:");
|
||||||
@ -705,7 +702,7 @@ zfs_enable_ds(void *arg)
|
|||||||
{
|
{
|
||||||
unavailpool_t *pool = (unavailpool_t *)arg;
|
unavailpool_t *pool = (unavailpool_t *)arg;
|
||||||
|
|
||||||
(void) zpool_enable_datasets(pool->uap_zhp, NULL, 0, 512);
|
(void) zpool_enable_datasets(pool->uap_zhp, NULL, 0);
|
||||||
zpool_close(pool->uap_zhp);
|
zpool_close(pool->uap_zhp);
|
||||||
free(pool);
|
free(pool);
|
||||||
}
|
}
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
// SPDX-License-Identifier: CDDL-1.0
|
|
||||||
/*
|
/*
|
||||||
* CDDL HEADER START
|
* CDDL HEADER START
|
||||||
*
|
*
|
||||||
@ -404,7 +403,6 @@ zfs_retire_recv(fmd_hdl_t *hdl, fmd_event_t *ep, nvlist_t *nvl,
|
|||||||
(state == VDEV_STATE_REMOVED || state == VDEV_STATE_FAULTED))) {
|
(state == VDEV_STATE_REMOVED || state == VDEV_STATE_FAULTED))) {
|
||||||
const char *devtype;
|
const char *devtype;
|
||||||
char *devname;
|
char *devname;
|
||||||
boolean_t skip_removal = B_FALSE;
|
|
||||||
|
|
||||||
if (nvlist_lookup_string(nvl, FM_EREPORT_PAYLOAD_ZFS_VDEV_TYPE,
|
if (nvlist_lookup_string(nvl, FM_EREPORT_PAYLOAD_ZFS_VDEV_TYPE,
|
||||||
&devtype) == 0) {
|
&devtype) == 0) {
|
||||||
@ -442,28 +440,18 @@ zfs_retire_recv(fmd_hdl_t *hdl, fmd_event_t *ep, nvlist_t *nvl,
|
|||||||
nvlist_lookup_uint64_array(vdev, ZPOOL_CONFIG_VDEV_STATS,
|
nvlist_lookup_uint64_array(vdev, ZPOOL_CONFIG_VDEV_STATS,
|
||||||
(uint64_t **)&vs, &c);
|
(uint64_t **)&vs, &c);
|
||||||
|
|
||||||
if (vs->vs_state == VDEV_STATE_OFFLINE)
|
|
||||||
return;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If state removed is requested for already removed vdev,
|
* If state removed is requested for already removed vdev,
|
||||||
* its a loopback event from spa_async_remove(). Just
|
* its a loopback event from spa_async_remove(). Just
|
||||||
* ignore it.
|
* ignore it.
|
||||||
*/
|
*/
|
||||||
if ((vs->vs_state == VDEV_STATE_REMOVED &&
|
if (vs->vs_state == VDEV_STATE_REMOVED &&
|
||||||
state == VDEV_STATE_REMOVED)) {
|
state == VDEV_STATE_REMOVED)
|
||||||
if (strcmp(class, "resource.fs.zfs.removed") == 0 &&
|
return;
|
||||||
nvlist_exists(nvl, "by_kernel")) {
|
|
||||||
skip_removal = B_TRUE;
|
|
||||||
} else {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Remove the vdev since device is unplugged */
|
/* Remove the vdev since device is unplugged */
|
||||||
int remove_status = 0;
|
int remove_status = 0;
|
||||||
if (!skip_removal && (l2arc ||
|
if (l2arc || (strcmp(class, "resource.fs.zfs.removed") == 0)) {
|
||||||
(strcmp(class, "resource.fs.zfs.removed") == 0))) {
|
|
||||||
remove_status = zpool_vdev_remove_wanted(zhp, devname);
|
remove_status = zpool_vdev_remove_wanted(zhp, devname);
|
||||||
fmd_hdl_debug(hdl, "zpool_vdev_remove_wanted '%s'"
|
fmd_hdl_debug(hdl, "zpool_vdev_remove_wanted '%s'"
|
||||||
", err:%d", devname, libzfs_errno(zhdl));
|
", err:%d", devname, libzfs_errno(zhdl));
|
||||||
@ -535,9 +523,6 @@ zfs_retire_recv(fmd_hdl_t *hdl, fmd_event_t *ep, nvlist_t *nvl,
|
|||||||
} else if (fmd_nvl_class_match(hdl, fault,
|
} else if (fmd_nvl_class_match(hdl, fault,
|
||||||
"fault.fs.zfs.vdev.checksum")) {
|
"fault.fs.zfs.vdev.checksum")) {
|
||||||
degrade_device = B_TRUE;
|
degrade_device = B_TRUE;
|
||||||
} else if (fmd_nvl_class_match(hdl, fault,
|
|
||||||
"fault.fs.zfs.vdev.slow_io")) {
|
|
||||||
degrade_device = B_TRUE;
|
|
||||||
} else if (fmd_nvl_class_match(hdl, fault,
|
} else if (fmd_nvl_class_match(hdl, fault,
|
||||||
"fault.fs.zfs.device")) {
|
"fault.fs.zfs.device")) {
|
||||||
fault_device = B_FALSE;
|
fault_device = B_FALSE;
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
// SPDX-License-Identifier: CDDL-1.0
|
|
||||||
/*
|
/*
|
||||||
* This file is part of the ZFS Event Daemon (ZED).
|
* This file is part of the ZFS Event Daemon (ZED).
|
||||||
*
|
*
|
||||||
|
@ -9,18 +9,17 @@ dist_zedexec_SCRIPTS = \
|
|||||||
%D%/all-debug.sh \
|
%D%/all-debug.sh \
|
||||||
%D%/all-syslog.sh \
|
%D%/all-syslog.sh \
|
||||||
%D%/data-notify.sh \
|
%D%/data-notify.sh \
|
||||||
%D%/deadman-sync-slot_off.sh \
|
|
||||||
%D%/generic-notify.sh \
|
%D%/generic-notify.sh \
|
||||||
%D%/pool_import-sync-led.sh \
|
%D%/pool_import-led.sh \
|
||||||
%D%/resilver_finish-notify.sh \
|
%D%/resilver_finish-notify.sh \
|
||||||
%D%/resilver_finish-start-scrub.sh \
|
%D%/resilver_finish-start-scrub.sh \
|
||||||
%D%/scrub_finish-notify.sh \
|
%D%/scrub_finish-notify.sh \
|
||||||
%D%/statechange-sync-led.sh \
|
%D%/statechange-led.sh \
|
||||||
%D%/statechange-notify.sh \
|
%D%/statechange-notify.sh \
|
||||||
%D%/statechange-sync-slot_off.sh \
|
%D%/statechange-slot_off.sh \
|
||||||
%D%/trim_finish-notify.sh \
|
%D%/trim_finish-notify.sh \
|
||||||
%D%/vdev_attach-sync-led.sh \
|
%D%/vdev_attach-led.sh \
|
||||||
%D%/vdev_clear-sync-led.sh
|
%D%/vdev_clear-led.sh
|
||||||
|
|
||||||
nodist_zedexec_SCRIPTS = \
|
nodist_zedexec_SCRIPTS = \
|
||||||
%D%/history_event-zfs-list-cacher.sh
|
%D%/history_event-zfs-list-cacher.sh
|
||||||
@ -30,17 +29,16 @@ SUBSTFILES += $(nodist_zedexec_SCRIPTS)
|
|||||||
zedconfdefaults = \
|
zedconfdefaults = \
|
||||||
all-syslog.sh \
|
all-syslog.sh \
|
||||||
data-notify.sh \
|
data-notify.sh \
|
||||||
deadman-sync-slot_off.sh \
|
|
||||||
history_event-zfs-list-cacher.sh \
|
history_event-zfs-list-cacher.sh \
|
||||||
pool_import-sync-led.sh \
|
pool_import-led.sh \
|
||||||
resilver_finish-notify.sh \
|
resilver_finish-notify.sh \
|
||||||
resilver_finish-start-scrub.sh \
|
resilver_finish-start-scrub.sh \
|
||||||
scrub_finish-notify.sh \
|
scrub_finish-notify.sh \
|
||||||
statechange-sync-led.sh \
|
statechange-led.sh \
|
||||||
statechange-notify.sh \
|
statechange-notify.sh \
|
||||||
statechange-sync-slot_off.sh \
|
statechange-slot_off.sh \
|
||||||
vdev_attach-sync-led.sh \
|
vdev_attach-led.sh \
|
||||||
vdev_clear-sync-led.sh
|
vdev_clear-led.sh
|
||||||
|
|
||||||
dist_noinst_DATA += %D%/README
|
dist_noinst_DATA += %D%/README
|
||||||
|
|
||||||
|
@ -1,71 +0,0 @@
|
|||||||
#!/bin/sh
|
|
||||||
# shellcheck disable=SC3014,SC2154,SC2086,SC2034
|
|
||||||
#
|
|
||||||
# Turn off disk's enclosure slot if an I/O is hung triggering the deadman.
|
|
||||||
#
|
|
||||||
# It's possible for outstanding I/O to a misbehaving SCSI disk to neither
|
|
||||||
# promptly complete or return an error. This can occur due to retry and
|
|
||||||
# recovery actions taken by the SCSI layer, driver, or disk. When it occurs
|
|
||||||
# the pool will be unresponsive even though there may be sufficient redundancy
|
|
||||||
# configured to proceeded without this single disk.
|
|
||||||
#
|
|
||||||
# When a hung I/O is detected by the kmods it will be posted as a deadman
|
|
||||||
# event. By default an I/O is considered to be hung after 5 minutes. This
|
|
||||||
# value can be changed with the zfs_deadman_ziotime_ms module parameter.
|
|
||||||
# If ZED_POWER_OFF_ENCLOSURE_SLOT_ON_DEADMAN is set the disk's enclosure
|
|
||||||
# slot will be powered off causing the outstanding I/O to fail. The ZED
|
|
||||||
# will then handle this like a normal disk failure and FAULT the vdev.
|
|
||||||
#
|
|
||||||
# We assume the user will be responsible for turning the slot back on
|
|
||||||
# after replacing the disk.
|
|
||||||
#
|
|
||||||
# Note that this script requires that your enclosure be supported by the
|
|
||||||
# Linux SCSI Enclosure services (SES) driver. The script will do nothing
|
|
||||||
# if you have no enclosure, or if your enclosure isn't supported.
|
|
||||||
#
|
|
||||||
# Exit codes:
|
|
||||||
# 0: slot successfully powered off
|
|
||||||
# 1: enclosure not available
|
|
||||||
# 2: ZED_POWER_OFF_ENCLOSURE_SLOT_ON_DEADMAN disabled
|
|
||||||
# 3: System not configured to wait on deadman
|
|
||||||
# 4: The enclosure sysfs path passed from ZFS does not exist
|
|
||||||
# 5: Enclosure slot didn't actually turn off after we told it to
|
|
||||||
|
|
||||||
[ -f "${ZED_ZEDLET_DIR}/zed.rc" ] && . "${ZED_ZEDLET_DIR}/zed.rc"
|
|
||||||
. "${ZED_ZEDLET_DIR}/zed-functions.sh"
|
|
||||||
|
|
||||||
if [ ! -d /sys/class/enclosure ] ; then
|
|
||||||
# No JBOD enclosure or NVMe slots
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ "${ZED_POWER_OFF_ENCLOSURE_SLOT_ON_DEADMAN}" != "1" ] ; then
|
|
||||||
exit 2
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ "$ZEVENT_POOL_FAILMODE" != "wait" ] ; then
|
|
||||||
exit 3
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ ! -f "$ZEVENT_VDEV_ENC_SYSFS_PATH/power_status" ] ; then
|
|
||||||
exit 4
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Turn off the slot and wait for sysfs to report that the slot is off.
|
|
||||||
# It can take ~400ms on some enclosures and multiple retries may be needed.
|
|
||||||
for i in $(seq 1 20) ; do
|
|
||||||
echo "off" | tee "$ZEVENT_VDEV_ENC_SYSFS_PATH/power_status"
|
|
||||||
|
|
||||||
for j in $(seq 1 5) ; do
|
|
||||||
if [ "$(cat $ZEVENT_VDEV_ENC_SYSFS_PATH/power_status)" == "off" ] ; then
|
|
||||||
break 2
|
|
||||||
fi
|
|
||||||
sleep 0.1
|
|
||||||
done
|
|
||||||
done
|
|
||||||
|
|
||||||
if [ "$(cat $ZEVENT_VDEV_ENC_SYSFS_PATH/power_status)" != "off" ] ; then
|
|
||||||
exit 5
|
|
||||||
fi
|
|
||||||
|
|
||||||
zed_log_msg "powered down slot $ZEVENT_VDEV_ENC_SYSFS_PATH for $ZEVENT_VDEV_PATH"
|
|
1
cmd/zed/zed.d/pool_import-led.sh
Symbolic link
1
cmd/zed/zed.d/pool_import-led.sh
Symbolic link
@ -0,0 +1 @@
|
|||||||
|
statechange-led.sh
|
@ -1 +0,0 @@
|
|||||||
statechange-sync-led.sh
|
|
@ -1,5 +1,4 @@
|
|||||||
#!/bin/sh
|
#!/bin/sh
|
||||||
# SPDX-License-Identifier: CDDL-1.0
|
|
||||||
# shellcheck disable=SC2154
|
# shellcheck disable=SC2154
|
||||||
#
|
#
|
||||||
# CDDL HEADER START
|
# CDDL HEADER START
|
||||||
|
1
cmd/zed/zed.d/vdev_attach-led.sh
Symbolic link
1
cmd/zed/zed.d/vdev_attach-led.sh
Symbolic link
@ -0,0 +1 @@
|
|||||||
|
statechange-led.sh
|
@ -1 +0,0 @@
|
|||||||
statechange-sync-led.sh
|
|
1
cmd/zed/zed.d/vdev_clear-led.sh
Symbolic link
1
cmd/zed/zed.d/vdev_clear-led.sh
Symbolic link
@ -0,0 +1 @@
|
|||||||
|
statechange-led.sh
|
@ -1 +0,0 @@
|
|||||||
statechange-sync-led.sh
|
|
@ -209,10 +209,6 @@ zed_notify()
|
|||||||
[ "${rv}" -eq 0 ] && num_success=$((num_success + 1))
|
[ "${rv}" -eq 0 ] && num_success=$((num_success + 1))
|
||||||
[ "${rv}" -eq 1 ] && num_failure=$((num_failure + 1))
|
[ "${rv}" -eq 1 ] && num_failure=$((num_failure + 1))
|
||||||
|
|
||||||
zed_notify_gotify "${subject}" "${pathname}"; rv=$?
|
|
||||||
[ "${rv}" -eq 0 ] && num_success=$((num_success + 1))
|
|
||||||
[ "${rv}" -eq 1 ] && num_failure=$((num_failure + 1))
|
|
||||||
|
|
||||||
[ "${num_success}" -gt 0 ] && return 0
|
[ "${num_success}" -gt 0 ] && return 0
|
||||||
[ "${num_failure}" -gt 0 ] && return 1
|
[ "${num_failure}" -gt 0 ] && return 1
|
||||||
return 2
|
return 2
|
||||||
@ -283,11 +279,6 @@ zed_notify_email()
|
|||||||
if [ "${ZED_EMAIL_OPTS%@SUBJECT@*}" = "${ZED_EMAIL_OPTS}" ] ; then
|
if [ "${ZED_EMAIL_OPTS%@SUBJECT@*}" = "${ZED_EMAIL_OPTS}" ] ; then
|
||||||
# inject subject header
|
# inject subject header
|
||||||
printf "Subject: %s\n" "${subject}"
|
printf "Subject: %s\n" "${subject}"
|
||||||
# The following empty line is needed to separate the header from the
|
|
||||||
# body of the message. Otherwise programs like sendmail will skip
|
|
||||||
# everything up to the first empty line (or wont send an email at
|
|
||||||
# all) and will still exit with exit code 0
|
|
||||||
printf "\n"
|
|
||||||
fi
|
fi
|
||||||
# output message
|
# output message
|
||||||
cat "${pathname}"
|
cat "${pathname}"
|
||||||
@ -441,9 +432,8 @@ zed_notify_slack_webhook()
|
|||||||
"${pathname}")"
|
"${pathname}")"
|
||||||
|
|
||||||
# Construct the JSON message for posting.
|
# Construct the JSON message for posting.
|
||||||
# shellcheck disable=SC2016
|
|
||||||
#
|
#
|
||||||
msg_json="$(printf '{"text": "*%s*\\n```%s```"}' "${subject}" "${msg_body}" )"
|
msg_json="$(printf '{"text": "*%s*\\n%s"}' "${subject}" "${msg_body}" )"
|
||||||
|
|
||||||
# Send the POST request and check for errors.
|
# Send the POST request and check for errors.
|
||||||
#
|
#
|
||||||
@ -634,97 +624,6 @@ zed_notify_ntfy()
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
# zed_notify_gotify (subject, pathname)
|
|
||||||
#
|
|
||||||
# Send a notification via Gotify <https://gotify.net/>.
|
|
||||||
# The Gotify URL (ZED_GOTIFY_URL) defines a self-hosted Gotify location.
|
|
||||||
# The Gotify application token (ZED_GOTIFY_APPTOKEN) defines a
|
|
||||||
# Gotify application token which is associated with a message.
|
|
||||||
# The optional Gotify priority value (ZED_GOTIFY_PRIORITY) overrides the
|
|
||||||
# default or configured priority at the Gotify server for the application.
|
|
||||||
#
|
|
||||||
# Requires curl and sed executables to be installed in the standard PATH.
|
|
||||||
#
|
|
||||||
# References
|
|
||||||
# https://gotify.net/docs/index
|
|
||||||
#
|
|
||||||
# Arguments
|
|
||||||
# subject: notification subject
|
|
||||||
# pathname: pathname containing the notification message (OPTIONAL)
|
|
||||||
#
|
|
||||||
# Globals
|
|
||||||
# ZED_GOTIFY_URL
|
|
||||||
# ZED_GOTIFY_APPTOKEN
|
|
||||||
# ZED_GOTIFY_PRIORITY
|
|
||||||
#
|
|
||||||
# Return
|
|
||||||
# 0: notification sent
|
|
||||||
# 1: notification failed
|
|
||||||
# 2: not configured
|
|
||||||
#
|
|
||||||
zed_notify_gotify()
|
|
||||||
{
|
|
||||||
local subject="$1"
|
|
||||||
local pathname="${2:-"/dev/null"}"
|
|
||||||
local msg_body
|
|
||||||
local msg_out
|
|
||||||
local msg_err
|
|
||||||
|
|
||||||
[ -n "${ZED_GOTIFY_URL}" ] && [ -n "${ZED_GOTIFY_APPTOKEN}" ] || return 2
|
|
||||||
local url="${ZED_GOTIFY_URL}/message?token=${ZED_GOTIFY_APPTOKEN}"
|
|
||||||
|
|
||||||
if [ ! -r "${pathname}" ]; then
|
|
||||||
zed_log_err "gotify cannot read \"${pathname}\""
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
zed_check_cmd "curl" "sed" || return 1
|
|
||||||
|
|
||||||
# Read the message body in.
|
|
||||||
#
|
|
||||||
msg_body="$(cat "${pathname}")"
|
|
||||||
|
|
||||||
if [ -z "${msg_body}" ]
|
|
||||||
then
|
|
||||||
msg_body=$subject
|
|
||||||
subject=""
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Send the POST request and check for errors.
|
|
||||||
#
|
|
||||||
if [ -n "${ZED_GOTIFY_PRIORITY}" ]; then
|
|
||||||
msg_out="$( \
|
|
||||||
curl \
|
|
||||||
--form-string "title=${subject}" \
|
|
||||||
--form-string "message=${msg_body}" \
|
|
||||||
--form-string "priority=${ZED_GOTIFY_PRIORITY}" \
|
|
||||||
"${url}" \
|
|
||||||
2>/dev/null \
|
|
||||||
)"; rv=$?
|
|
||||||
else
|
|
||||||
msg_out="$( \
|
|
||||||
curl \
|
|
||||||
--form-string "title=${subject}" \
|
|
||||||
--form-string "message=${msg_body}" \
|
|
||||||
"${url}" \
|
|
||||||
2>/dev/null \
|
|
||||||
)"; rv=$?
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ "${rv}" -ne 0 ]; then
|
|
||||||
zed_log_err "curl exit=${rv}"
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
msg_err="$(echo "${msg_out}" \
|
|
||||||
| sed -n -e 's/.*"errors" *:.*\[\(.*\)\].*/\1/p')"
|
|
||||||
if [ -n "${msg_err}" ]; then
|
|
||||||
zed_log_err "gotify \"${msg_err}"\"
|
|
||||||
return 1
|
|
||||||
fi
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# zed_rate_limit (tag, [interval])
|
# zed_rate_limit (tag, [interval])
|
||||||
#
|
#
|
||||||
|
@ -148,13 +148,6 @@ ZED_SYSLOG_SUBCLASS_EXCLUDE="history_event"
|
|||||||
# supports slot power control via sysfs.
|
# supports slot power control via sysfs.
|
||||||
#ZED_POWER_OFF_ENCLOSURE_SLOT_ON_FAULT=1
|
#ZED_POWER_OFF_ENCLOSURE_SLOT_ON_FAULT=1
|
||||||
|
|
||||||
##
|
|
||||||
# Power off the drive's slot in the enclosure if there is a hung I/O which
|
|
||||||
# exceeds the deadman timeout. This can help prevent a single misbehaving
|
|
||||||
# drive from rendering a redundant pool unavailable. This assumes your drive
|
|
||||||
# enclosure fully supports slot power control via sysfs.
|
|
||||||
#ZED_POWER_OFF_ENCLOSURE_SLOT_ON_DEADMAN=1
|
|
||||||
|
|
||||||
##
|
##
|
||||||
# Ntfy topic
|
# Ntfy topic
|
||||||
# This defines which topic will receive the ntfy notification.
|
# This defines which topic will receive the ntfy notification.
|
||||||
@ -176,24 +169,3 @@ ZED_SYSLOG_SUBCLASS_EXCLUDE="history_event"
|
|||||||
# <https://docs.ntfy.sh/install/>
|
# <https://docs.ntfy.sh/install/>
|
||||||
# https://ntfy.sh by default; uncomment to enable an alternative service url.
|
# https://ntfy.sh by default; uncomment to enable an alternative service url.
|
||||||
#ZED_NTFY_URL="https://ntfy.sh"
|
#ZED_NTFY_URL="https://ntfy.sh"
|
||||||
|
|
||||||
##
|
|
||||||
# Gotify server URL
|
|
||||||
# This defines a URL that the Gotify call will be directed toward.
|
|
||||||
# <https://gotify.net/docs/index>
|
|
||||||
# Disabled by default; uncomment to enable.
|
|
||||||
#ZED_GOTIFY_URL=""
|
|
||||||
|
|
||||||
##
|
|
||||||
# Gotify application token
|
|
||||||
# This defines a Gotify application token which a message is associated with.
|
|
||||||
# This token is generated when an application is created on the Gotify server.
|
|
||||||
# Disabled by default; uncomment to enable.
|
|
||||||
#ZED_GOTIFY_APPTOKEN=""
|
|
||||||
|
|
||||||
##
|
|
||||||
# Gotify priority (optional)
|
|
||||||
# If defined, this overrides the default priority of the
|
|
||||||
# Gotify application associated with ZED_GOTIFY_APPTOKEN.
|
|
||||||
# Value is an integer 0 and up.
|
|
||||||
#ZED_GOTIFY_PRIORITY=""
|
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
// SPDX-License-Identifier: CDDL-1.0
|
|
||||||
/*
|
/*
|
||||||
* This file is part of the ZFS Event Daemon (ZED).
|
* This file is part of the ZFS Event Daemon (ZED).
|
||||||
*
|
*
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
// SPDX-License-Identifier: CDDL-1.0
|
|
||||||
/*
|
/*
|
||||||
* This file is part of the ZFS Event Daemon (ZED).
|
* This file is part of the ZFS Event Daemon (ZED).
|
||||||
*
|
*
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
// SPDX-License-Identifier: CDDL-1.0
|
|
||||||
/*
|
/*
|
||||||
* This file is part of the ZFS Event Daemon (ZED).
|
* This file is part of the ZFS Event Daemon (ZED).
|
||||||
*
|
*
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
// SPDX-License-Identifier: CDDL-1.0
|
|
||||||
/*
|
/*
|
||||||
* CDDL HEADER START
|
* CDDL HEADER START
|
||||||
*
|
*
|
||||||
@ -140,8 +139,7 @@ dev_event_nvlist(struct udev_device *dev)
|
|||||||
* is /dev/sda.
|
* is /dev/sda.
|
||||||
*/
|
*/
|
||||||
struct udev_device *parent_dev = udev_device_get_parent(dev);
|
struct udev_device *parent_dev = udev_device_get_parent(dev);
|
||||||
if (parent_dev != NULL &&
|
if ((value = udev_device_get_sysattr_value(parent_dev, "size"))
|
||||||
(value = udev_device_get_sysattr_value(parent_dev, "size"))
|
|
||||||
!= NULL) {
|
!= NULL) {
|
||||||
uint64_t numval = DEV_BSIZE;
|
uint64_t numval = DEV_BSIZE;
|
||||||
|
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
// SPDX-License-Identifier: CDDL-1.0
|
|
||||||
/*
|
/*
|
||||||
* CDDL HEADER START
|
* CDDL HEADER START
|
||||||
*
|
*
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
// SPDX-License-Identifier: CDDL-1.0
|
|
||||||
/*
|
/*
|
||||||
* This file is part of the ZFS Event Daemon (ZED).
|
* This file is part of the ZFS Event Daemon (ZED).
|
||||||
*
|
*
|
||||||
@ -110,7 +109,7 @@ zed_event_fini(struct zed_conf *zcp)
|
|||||||
static void
|
static void
|
||||||
_bump_event_queue_length(void)
|
_bump_event_queue_length(void)
|
||||||
{
|
{
|
||||||
int zzlm, wr;
|
int zzlm = -1, wr;
|
||||||
char qlen_buf[12] = {0}; /* parameter is int => max "-2147483647\n" */
|
char qlen_buf[12] = {0}; /* parameter is int => max "-2147483647\n" */
|
||||||
long int qlen, orig_qlen;
|
long int qlen, orig_qlen;
|
||||||
|
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
// SPDX-License-Identifier: CDDL-1.0
|
|
||||||
/*
|
/*
|
||||||
* This file is part of the ZFS Event Daemon (ZED).
|
* This file is part of the ZFS Event Daemon (ZED).
|
||||||
*
|
*
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
// SPDX-License-Identifier: CDDL-1.0
|
|
||||||
/*
|
/*
|
||||||
* This file is part of the ZFS Event Daemon (ZED).
|
* This file is part of the ZFS Event Daemon (ZED).
|
||||||
*
|
*
|
||||||
@ -196,29 +195,37 @@ _nop(int sig)
|
|||||||
(void) sig;
|
(void) sig;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void *
|
||||||
wait_for_children(boolean_t do_pause, boolean_t wait)
|
_reap_children(void *arg)
|
||||||
{
|
{
|
||||||
pid_t pid;
|
(void) arg;
|
||||||
struct rusage usage;
|
|
||||||
int status;
|
|
||||||
struct launched_process_node node, *pnode;
|
struct launched_process_node node, *pnode;
|
||||||
|
pid_t pid;
|
||||||
|
int status;
|
||||||
|
struct rusage usage;
|
||||||
|
struct sigaction sa = {};
|
||||||
|
|
||||||
|
(void) sigfillset(&sa.sa_mask);
|
||||||
|
(void) sigdelset(&sa.sa_mask, SIGCHLD);
|
||||||
|
(void) pthread_sigmask(SIG_SETMASK, &sa.sa_mask, NULL);
|
||||||
|
|
||||||
|
(void) sigemptyset(&sa.sa_mask);
|
||||||
|
sa.sa_handler = _nop;
|
||||||
|
sa.sa_flags = SA_NOCLDSTOP;
|
||||||
|
(void) sigaction(SIGCHLD, &sa, NULL);
|
||||||
|
|
||||||
for (_reap_children_stop = B_FALSE; !_reap_children_stop; ) {
|
for (_reap_children_stop = B_FALSE; !_reap_children_stop; ) {
|
||||||
(void) pthread_mutex_lock(&_launched_processes_lock);
|
(void) pthread_mutex_lock(&_launched_processes_lock);
|
||||||
pid = wait4(0, &status, wait ? 0 : WNOHANG, &usage);
|
pid = wait4(0, &status, WNOHANG, &usage);
|
||||||
|
|
||||||
if (pid == 0 || pid == (pid_t)-1) {
|
if (pid == 0 || pid == (pid_t)-1) {
|
||||||
(void) pthread_mutex_unlock(&_launched_processes_lock);
|
(void) pthread_mutex_unlock(&_launched_processes_lock);
|
||||||
if ((pid == 0) || (errno == ECHILD)) {
|
if (pid == 0 || errno == ECHILD)
|
||||||
if (do_pause)
|
pause();
|
||||||
pause();
|
else if (errno != EINTR)
|
||||||
} else if (errno != EINTR)
|
|
||||||
zed_log_msg(LOG_WARNING,
|
zed_log_msg(LOG_WARNING,
|
||||||
"Failed to wait for children: %s",
|
"Failed to wait for children: %s",
|
||||||
strerror(errno));
|
strerror(errno));
|
||||||
if (!do_pause)
|
|
||||||
return;
|
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
memset(&node, 0, sizeof (node));
|
memset(&node, 0, sizeof (node));
|
||||||
node.pid = pid;
|
node.pid = pid;
|
||||||
@ -270,25 +277,6 @@ wait_for_children(boolean_t do_pause, boolean_t wait)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
static void *
|
|
||||||
_reap_children(void *arg)
|
|
||||||
{
|
|
||||||
(void) arg;
|
|
||||||
struct sigaction sa = {};
|
|
||||||
|
|
||||||
(void) sigfillset(&sa.sa_mask);
|
|
||||||
(void) sigdelset(&sa.sa_mask, SIGCHLD);
|
|
||||||
(void) pthread_sigmask(SIG_SETMASK, &sa.sa_mask, NULL);
|
|
||||||
|
|
||||||
(void) sigemptyset(&sa.sa_mask);
|
|
||||||
sa.sa_handler = _nop;
|
|
||||||
sa.sa_flags = SA_NOCLDSTOP;
|
|
||||||
(void) sigaction(SIGCHLD, &sa, NULL);
|
|
||||||
|
|
||||||
wait_for_children(B_TRUE, B_FALSE);
|
|
||||||
|
|
||||||
return (NULL);
|
return (NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -317,45 +305,6 @@ zed_exec_fini(void)
|
|||||||
_reap_children_tid = (pthread_t)-1;
|
_reap_children_tid = (pthread_t)-1;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Check if the zedlet name indicates if it is a synchronous zedlet
|
|
||||||
*
|
|
||||||
* Synchronous zedlets have a "-sync-" immediately following the event name in
|
|
||||||
* their zedlet filename, like:
|
|
||||||
*
|
|
||||||
* EVENT_NAME-sync-ZEDLETNAME.sh
|
|
||||||
*
|
|
||||||
* For example, if you wanted a synchronous statechange script:
|
|
||||||
*
|
|
||||||
* statechange-sync-myzedlet.sh
|
|
||||||
*
|
|
||||||
* Synchronous zedlets are guaranteed to be the only zedlet running. No other
|
|
||||||
* zedlets may run in parallel with a synchronous zedlet. A synchronous
|
|
||||||
* zedlet will wait for all previously spawned zedlets to finish before running.
|
|
||||||
* Users should be careful to only use synchronous zedlets when needed, since
|
|
||||||
* they decrease parallelism.
|
|
||||||
*/
|
|
||||||
static boolean_t
|
|
||||||
zedlet_is_sync(const char *zedlet, const char *event)
|
|
||||||
{
|
|
||||||
const char *sync_str = "-sync-";
|
|
||||||
size_t sync_str_len;
|
|
||||||
size_t zedlet_len;
|
|
||||||
size_t event_len;
|
|
||||||
|
|
||||||
sync_str_len = strlen(sync_str);
|
|
||||||
zedlet_len = strlen(zedlet);
|
|
||||||
event_len = strlen(event);
|
|
||||||
|
|
||||||
if (event_len + sync_str_len >= zedlet_len)
|
|
||||||
return (B_FALSE);
|
|
||||||
|
|
||||||
if (strncmp(&zedlet[event_len], sync_str, sync_str_len) == 0)
|
|
||||||
return (B_TRUE);
|
|
||||||
|
|
||||||
return (B_FALSE);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Process the event [eid] by synchronously invoking all zedlets with a
|
* Process the event [eid] by synchronously invoking all zedlets with a
|
||||||
* matching class prefix.
|
* matching class prefix.
|
||||||
@ -418,28 +367,9 @@ zed_exec_process(uint64_t eid, const char *class, const char *subclass,
|
|||||||
z = zed_strings_next(zcp->zedlets)) {
|
z = zed_strings_next(zcp->zedlets)) {
|
||||||
for (csp = class_strings; *csp; csp++) {
|
for (csp = class_strings; *csp; csp++) {
|
||||||
n = strlen(*csp);
|
n = strlen(*csp);
|
||||||
if ((strncmp(z, *csp, n) == 0) && !isalpha(z[n])) {
|
if ((strncmp(z, *csp, n) == 0) && !isalpha(z[n]))
|
||||||
boolean_t is_sync = zedlet_is_sync(z, *csp);
|
|
||||||
|
|
||||||
if (is_sync) {
|
|
||||||
/*
|
|
||||||
* Wait for previous zedlets to
|
|
||||||
* finish
|
|
||||||
*/
|
|
||||||
wait_for_children(B_FALSE, B_TRUE);
|
|
||||||
}
|
|
||||||
|
|
||||||
_zed_exec_fork_child(eid, zcp->zedlet_dir,
|
_zed_exec_fork_child(eid, zcp->zedlet_dir,
|
||||||
z, e, zcp->zevent_fd, zcp->do_foreground);
|
z, e, zcp->zevent_fd, zcp->do_foreground);
|
||||||
|
|
||||||
if (is_sync) {
|
|
||||||
/*
|
|
||||||
* Wait for sync zedlet we just launched
|
|
||||||
* to finish.
|
|
||||||
*/
|
|
||||||
wait_for_children(B_FALSE, B_TRUE);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
free(e);
|
free(e);
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
// SPDX-License-Identifier: CDDL-1.0
|
|
||||||
/*
|
/*
|
||||||
* This file is part of the ZFS Event Daemon (ZED).
|
* This file is part of the ZFS Event Daemon (ZED).
|
||||||
*
|
*
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
// SPDX-License-Identifier: CDDL-1.0
|
|
||||||
/*
|
/*
|
||||||
* This file is part of the ZFS Event Daemon (ZED).
|
* This file is part of the ZFS Event Daemon (ZED).
|
||||||
*
|
*
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
// SPDX-License-Identifier: CDDL-1.0
|
|
||||||
/*
|
/*
|
||||||
* This file is part of the ZFS Event Daemon (ZED).
|
* This file is part of the ZFS Event Daemon (ZED).
|
||||||
*
|
*
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
// SPDX-License-Identifier: CDDL-1.0
|
|
||||||
/*
|
/*
|
||||||
* This file is part of the ZFS Event Daemon (ZED).
|
* This file is part of the ZFS Event Daemon (ZED).
|
||||||
*
|
*
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
// SPDX-License-Identifier: CDDL-1.0
|
|
||||||
/*
|
/*
|
||||||
* This file is part of the ZFS Event Daemon (ZED).
|
* This file is part of the ZFS Event Daemon (ZED).
|
||||||
*
|
*
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
// SPDX-License-Identifier: CDDL-1.0
|
|
||||||
/*
|
/*
|
||||||
* This file is part of the ZFS Event Daemon (ZED).
|
* This file is part of the ZFS Event Daemon (ZED).
|
||||||
*
|
*
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
// SPDX-License-Identifier: CDDL-1.0
|
|
||||||
/*
|
/*
|
||||||
* This file is part of the ZFS Event Daemon (ZED).
|
* This file is part of the ZFS Event Daemon (ZED).
|
||||||
*
|
*
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
// SPDX-License-Identifier: CDDL-1.0
|
|
||||||
/*
|
/*
|
||||||
* CDDL HEADER START
|
* CDDL HEADER START
|
||||||
*
|
*
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
// SPDX-License-Identifier: CDDL-1.0
|
|
||||||
/*
|
/*
|
||||||
* CDDL HEADER START
|
* CDDL HEADER START
|
||||||
*
|
*
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -1,4 +1,3 @@
|
|||||||
// SPDX-License-Identifier: CDDL-1.0
|
|
||||||
/*
|
/*
|
||||||
* CDDL HEADER START
|
* CDDL HEADER START
|
||||||
*
|
*
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
// SPDX-License-Identifier: CDDL-1.0
|
|
||||||
/*
|
/*
|
||||||
* CDDL HEADER START
|
* CDDL HEADER START
|
||||||
*
|
*
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
// SPDX-License-Identifier: CDDL-1.0
|
|
||||||
/*
|
/*
|
||||||
* CDDL HEADER START
|
* CDDL HEADER START
|
||||||
*
|
*
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user