mirror of
https://github.com/openzfs/zfs.git
synced 2025-10-01 19:56:28 +00:00
Compare commits
1564 Commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
8869caae5f | ||
![]() |
fa4d4b1f80 | ||
![]() |
32ce74ff32 | ||
![]() |
102ff2a640 | ||
![]() |
e4a407f29f | ||
![]() |
f0a95e8971 | ||
![]() |
75be5f2973 | ||
![]() |
8d4c3ee9e6 | ||
![]() |
4ff25e9013 | ||
![]() |
a44985315e | ||
![]() |
79be201806 | ||
![]() |
aecd6deeb3 | ||
![]() |
5c38029f4b | ||
![]() |
26b0f561be | ||
![]() |
c722bf8812 | ||
![]() |
0e1a53a8c0 | ||
![]() |
3e9347c9f7 | ||
![]() |
b2196fbedf | ||
![]() |
ea37c30fcb | ||
![]() |
876f705cc4 | ||
![]() |
11787965e0 | ||
![]() |
6ba51da93b | ||
![]() |
545d66204d | ||
![]() |
3387d34093 | ||
![]() |
ab8cc63c77 | ||
![]() |
ab93b4b70e | ||
![]() |
ffe93aee0a | ||
![]() |
1d2d812986 | ||
![]() |
d36684201f | ||
![]() |
45ac6045cc | ||
![]() |
faf2db3435 | ||
![]() |
d147ed7d26 | ||
![]() |
58b84289e8 | ||
![]() |
5c46baa1ce | ||
![]() |
f330b463de | ||
![]() |
4b764fb01a | ||
![]() |
f319ff3570 | ||
![]() |
3f4312a0a4 | ||
![]() |
9b772f328b | ||
![]() |
455c36156c | ||
![]() |
bc8bcfc71a | ||
![]() |
cb5f9aa582 | ||
![]() |
35f47cb4f4 | ||
![]() |
37cd30f714 | ||
![]() |
955fbc5ade | ||
![]() |
7b1cc9eb61 | ||
![]() |
654f2dcb42 | ||
![]() |
bc0b5318aa | ||
![]() |
d64711c202 | ||
![]() |
0620c979a5 | ||
![]() |
7f7c58389e | ||
![]() |
cd6db758f3 | ||
![]() |
bc4aac0395 | ||
![]() |
8f15d2e4d5 | ||
![]() |
92ca3ae56a | ||
![]() |
9ae20cf03d | ||
![]() |
a5571a0dd1 | ||
![]() |
d3429a75b0 | ||
![]() |
c6fe41cac5 | ||
![]() |
fe2f7cf6d7 | ||
![]() |
7939bad5e7 | ||
![]() |
9e5e95c24d | ||
![]() |
4b83891db0 | ||
![]() |
e29bfa5bd0 | ||
![]() |
a2424312c4 | ||
![]() |
59f8f5dfe1 | ||
![]() |
8266fa5858 | ||
![]() |
e3c3e86c04 | ||
![]() |
ced72fdd69 | ||
![]() |
b9c6b0e09b | ||
![]() |
c69b7ea6ca | ||
![]() |
ffba31c236 | ||
![]() |
dfc2c32590 | ||
![]() |
11b5c50238 | ||
![]() |
0e88a0e1ea | ||
![]() |
69b65dda8a | ||
![]() |
64d3143e82 | ||
![]() |
9acedbacee | ||
![]() |
5a8ba4520b | ||
![]() |
ccf5a8a6fc | ||
![]() |
1da2c30bed | ||
![]() |
976f765341 | ||
![]() |
ee7c362645 | ||
![]() |
14bad10f96 | ||
![]() |
e903177b56 | ||
![]() |
d247538e15 | ||
![]() |
0d54ae2880 | ||
![]() |
b6bd3228bb | ||
![]() |
e7485d04f1 | ||
![]() |
f1f74577cb | ||
![]() |
00dfa094ac | ||
![]() |
28ff57505b | ||
![]() |
94413bc75d | ||
![]() |
574eec2964 | ||
![]() |
aa6f0f878b | ||
![]() |
eecff1b4a9 | ||
![]() |
f3e49b0cf5 | ||
![]() |
3abf72b251 | ||
![]() |
14d2480708 | ||
![]() |
79a59ae787 | ||
![]() |
2d7928428b | ||
![]() |
a9410ccbd9 | ||
![]() |
2c877e8453 | ||
![]() |
dcd73069f0 | ||
![]() |
8a0e5e8b54 | ||
![]() |
96f9d271ea | ||
![]() |
b2c792778c | ||
![]() |
6bb8fe5528 | ||
![]() |
30a915efed | ||
![]() |
7b54567c1f | ||
![]() |
f65321e30c | ||
![]() |
9b0a9b410e | ||
![]() |
5061f959d1 | ||
![]() |
d151432073 | ||
![]() |
8d35a022e4 | ||
![]() |
28433c4547 | ||
![]() |
930f9cc66c | ||
![]() |
bb9225ea86 | ||
![]() |
885d929cf8 | ||
![]() |
3cfd670e74 | ||
![]() |
077269bfed | ||
![]() |
d3c1d27afd | ||
![]() |
f8bc01c79f | ||
![]() |
22671c4da4 | ||
![]() |
152e34822b | ||
![]() |
1ccae433e9 | ||
![]() |
e0e60d319c | ||
![]() |
531568f438 | ||
![]() |
7c9adc6858 | ||
![]() |
f562e0f691 | ||
![]() |
92da3e18c8 | ||
![]() |
508c546975 | ||
![]() |
6b3333de2d | ||
![]() |
1d0b94c4e7 | ||
![]() |
2fd145b578 | ||
![]() |
90a1e13df2 | ||
![]() |
ef4058fcdc | ||
![]() |
3d6ee9a68c | ||
![]() |
391e85f519 | ||
![]() |
72602f6ad9 | ||
![]() |
99a5f5d1ba | ||
![]() |
967b15b888 | ||
![]() |
1f8c39ddb2 | ||
![]() |
b270663e8a | ||
![]() |
82d6f7b047 | ||
![]() |
f7bdd84328 | ||
![]() |
611b95da18 | ||
![]() |
5c7df3bcac | ||
![]() |
c39e076f23 | ||
![]() |
2564308cb2 | ||
![]() |
e44e51f28d | ||
![]() |
e6eb03a991 | ||
![]() |
3e671f2353 | ||
![]() |
0c376d0f59 | ||
![]() |
3e004369f7 | ||
![]() |
25930cb8a1 | ||
![]() |
03592417cb | ||
![]() |
8302b6e32b | ||
![]() |
60f714e6e2 | ||
![]() |
4ae8bf406b | ||
![]() |
894edd084e | ||
![]() |
c3496b5cc6 | ||
![]() |
fb7a8503bc | ||
![]() |
a18c9edda6 | ||
![]() |
7ac5440ecf | ||
![]() |
31c4fa93bb | ||
![]() |
8ecf044d62 | ||
![]() |
2c8beeece0 | ||
![]() |
48c9b2e79d | ||
![]() |
0b6fd024a7 | ||
![]() |
0f8a1105ee | ||
![]() |
1aec627c60 | ||
![]() |
92da9e0e93 | ||
![]() |
cb5e7e097d | ||
![]() |
2957eabbef | ||
![]() |
dea0fc969b | ||
![]() |
ce9c3b4b94 | ||
![]() |
f70c85086b | ||
![]() |
10a78e2647 | ||
![]() |
b6e8db509d | ||
![]() |
fc885f308f | ||
![]() |
f23e040a37 | ||
![]() |
cf146460c1 | ||
![]() |
4bd7a2eaa5 | ||
![]() |
a8646a8186 | ||
![]() |
5a9b9c7f87 | ||
![]() |
00ce064d8f | ||
![]() |
bf38c15071 | ||
![]() |
d2b9e66b88 | ||
![]() |
e9d249d7e4 | ||
![]() |
2755e2aa60 | ||
![]() |
9292071565 | ||
![]() |
1c483cf3d0 | ||
![]() |
96d20d7d59 | ||
![]() |
cecff09faa | ||
![]() |
a7a144e655 | ||
![]() |
be1e991a1a | ||
![]() |
2669b00f13 | ||
![]() |
d7ab07dfb4 | ||
![]() |
c1e51c55f5 | ||
![]() |
b21e04e8d9 | ||
![]() |
d323fbf49c | ||
![]() |
ee2a2d941a | ||
![]() |
1b84bd1dff | ||
![]() |
fce18e04d5 | ||
![]() |
cb9742e532 | ||
![]() |
967ce75669 | ||
![]() |
3a494c6d2a | ||
![]() |
fe3b2b76cf | ||
![]() |
8de8e0df9f | ||
![]() |
2461e6f636 | ||
![]() |
d6dcae3166 | ||
![]() |
f66b57c87d | ||
![]() |
ea38787f2e | ||
![]() |
a981cb69e4 | ||
![]() |
e845be28e7 | ||
![]() |
4c2a7f85d5 | ||
![]() |
523d9d6007 | ||
![]() |
6af8db61b1 | ||
![]() |
92d3b4ee2c | ||
![]() |
ee0cb4cb89 | ||
![]() |
d411ea2e4d | ||
![]() |
c98a393cb6 | ||
![]() |
4e92aee233 | ||
![]() |
17ee0fd4fa | ||
![]() |
6d838ec0b6 | ||
![]() |
bf846dcb7d | ||
![]() |
eacf618a65 | ||
![]() |
dee62e074a | ||
![]() |
69ee01aa4b | ||
![]() |
ea076d6921 | ||
![]() |
0a2163d194 | ||
![]() |
d461a67d0a | ||
![]() |
46a4075100 | ||
![]() |
48ce292ea0 | ||
![]() |
8170eb6ebc | ||
![]() |
5e5253be84 | ||
![]() |
717213d431 | ||
![]() |
585dbbf13b | ||
![]() |
6cf17f6538 | ||
![]() |
bd27b75401 | ||
![]() |
b4ebba0e04 | ||
![]() |
238eab7dc1 | ||
![]() |
aa964ce61b | ||
![]() |
d1c88cbd4c | ||
![]() |
e3f5e317e0 | ||
![]() |
52352dd748 | ||
![]() |
c1b7bc52fe | ||
![]() |
e0ef4d2768 | ||
![]() |
66ec7fb269 | ||
![]() |
4ae931aa93 | ||
![]() |
3ff2eca0be | ||
![]() |
1987498b66 | ||
![]() |
fbfda270d5 | ||
![]() |
560e3170ef | ||
![]() |
ba227e2cc2 | ||
![]() |
bcd0430236 | ||
![]() |
0e9e2e2501 | ||
![]() |
46b82de618 | ||
![]() |
af7d609592 | ||
![]() |
b96f1a4b1f | ||
![]() |
b7f919d228 | ||
![]() |
68817d28c5 | ||
![]() |
108562344c | ||
![]() |
e0edfcbd4e | ||
![]() |
e1677d9ee1 | ||
![]() |
e8e602d987 | ||
![]() |
90011644ce | ||
![]() |
2d33c8edb6 | ||
![]() |
f03f9c9bde | ||
![]() |
08bf660ac4 | ||
![]() |
a38376b37a | ||
![]() |
1bd225ed8a | ||
![]() |
008c9666ef | ||
![]() |
f5a6dd8b70 | ||
![]() |
b3b3cd1e4f | ||
![]() |
1d482ca6e3 | ||
![]() |
5764e218ba | ||
![]() |
44e3266894 | ||
![]() |
0c94d3838d | ||
![]() |
3084336ae4 | ||
![]() |
3dfa98d013 | ||
![]() |
fa697b94e6 | ||
![]() |
00360efa35 | ||
![]() |
9392be427e | ||
![]() |
dd4e2f99f0 | ||
![]() |
589d99171f | ||
![]() |
6e7e7ea7ef | ||
![]() |
58235f52af | ||
![]() |
7b183f1918 | ||
![]() |
b0e053a10d | ||
![]() |
d1724b59dc | ||
![]() |
5ef91c2bee | ||
![]() |
906ced88df | ||
![]() |
c464f1d014 | ||
![]() |
0372def8c9 | ||
![]() |
2303775fea | ||
![]() |
55d035e866 | ||
![]() |
4653e2f7d3 | ||
![]() |
ac2e579521 | ||
![]() |
468d22d60c | ||
![]() |
8487945034 | ||
![]() |
06fa8f3f69 | ||
![]() |
087d7d80c7 | ||
![]() |
fc617645a3 | ||
![]() |
284580c878 | ||
![]() |
2a91d577b1 | ||
![]() |
b048bfa9c1 | ||
![]() |
9d76950d67 | ||
![]() |
b01d7bd32d | ||
![]() |
92157c840c | ||
![]() |
cdb4c44684 | ||
![]() |
ddf28f27c5 | ||
![]() |
5c30b24381 | ||
![]() |
a6f20250de | ||
![]() |
a387b7599c | ||
![]() |
c4c3917b2a | ||
![]() |
f454cc1723 | ||
![]() |
2e5e4bb0f8 | ||
![]() |
83fa80a550 | ||
![]() |
f0baaa329a | ||
![]() |
841be1d049 | ||
![]() |
bb740d66de | ||
![]() |
d8a33bc0a5 | ||
![]() |
ea74cdedda | ||
![]() |
e55225be3e | ||
![]() |
d5616ad34a | ||
![]() |
086105f4c4 | ||
![]() |
b6916f995e | ||
![]() |
89a8a91582 | ||
![]() |
0aa83dce99 | ||
![]() |
b55256e5bb | ||
![]() |
734eba251d | ||
![]() |
e2ba0f7643 | ||
![]() |
485a2d0112 | ||
![]() |
ae2caf9cb0 | ||
![]() |
8e318fda80 | ||
![]() |
b2284aedab | ||
![]() |
49fbdd4533 | ||
![]() |
9aae14a14a | ||
![]() |
8b9c4e643b | ||
![]() |
2ee5b51a57 | ||
![]() |
c17bdc4914 | ||
![]() |
1a8f5ad3b0 | ||
![]() |
f13d760aa8 | ||
![]() |
4800181b3b | ||
![]() |
78628a5c15 | ||
![]() |
b1ccab1721 | ||
![]() |
c5a6b4417d | ||
![]() |
246e5883bb | ||
![]() |
a7de203c86 | ||
![]() |
f85c96edf7 | ||
![]() |
f86d9af16b | ||
![]() |
634c172ee8 | ||
![]() |
a6cca8a7da | ||
![]() |
f40ab9e399 | ||
![]() |
3b18877269 | ||
![]() |
27f3d94940 | ||
![]() |
aa46cc9812 | ||
![]() |
81dec433b0 | ||
![]() |
c8fa39b46c | ||
![]() |
ba17cedf65 | ||
![]() |
63de2d2dbd | ||
![]() |
88ec6c4f40 | ||
![]() |
d947b9aedd | ||
![]() |
1ef706c4ad | ||
![]() |
1b4826b9a2 | ||
![]() |
7031a48c70 | ||
![]() |
37a3e26552 | ||
![]() |
38c3a8be83 | ||
![]() |
6afb405d96 | ||
![]() |
8f2c2dea3c | ||
![]() |
cb49e7701f | ||
![]() |
5f5321effa | ||
![]() |
1d8f625233 | ||
![]() |
8d1489735b | ||
![]() |
ba03054c83 | ||
![]() |
155847c72d | ||
![]() |
4866c2fabf | ||
![]() |
131df3bbf2 | ||
![]() |
189dc26296 | ||
![]() |
5ab601771c | ||
![]() |
ab9bb193f9 | ||
![]() |
a497c5fc8b | ||
![]() |
8f08dbfbe1 | ||
![]() |
88e3885cf4 | ||
![]() |
09fc7bb47e | ||
![]() |
78a7c78bdf | ||
![]() |
b14b3e3985 | ||
![]() |
c050b7315d | ||
![]() |
7bb13950b4 | ||
![]() |
c967faf19e | ||
![]() |
7c4ff2a051 | ||
![]() |
6f6c504700 | ||
![]() |
9cb9a59e1c | ||
![]() |
20705a8430 | ||
![]() |
2a8d9d9607 | ||
![]() |
7be9fa259e | ||
![]() |
5b0c27cd14 | ||
![]() |
029c4ae03a | ||
![]() |
047803e906 | ||
![]() |
87f8bf6b0c | ||
![]() |
301da593ad | ||
![]() |
367d34b3aa | ||
![]() |
11ca12dbd3 | ||
![]() |
75e921da6f | ||
![]() |
5b29e70ae1 | ||
![]() |
30cc2331f4 | ||
![]() |
dd2a46b5e6 | ||
![]() |
4abc21b28c | ||
![]() |
50d87fed6a | ||
![]() |
240fc4a6d1 | ||
![]() |
a0e62718cf | ||
![]() |
9611dfdc70 | ||
![]() |
885f87fa3e | ||
![]() |
fd018248d5 | ||
![]() |
ef81812726 | ||
![]() |
e759a86fa5 | ||
![]() |
1d4505d7a1 | ||
![]() |
b386bf87c1 | ||
![]() |
73494f3352 | ||
![]() |
530ddcd5f1 | ||
![]() |
94a3fabcb0 | ||
![]() |
3862ebbf1f | ||
![]() |
45e9b54e9e | ||
![]() |
d28d2e3007 | ||
![]() |
5b5a514955 | ||
![]() |
9250403ba6 | ||
![]() |
94b9cbbe1e | ||
![]() |
676b7ef104 | ||
![]() |
83fa051ceb | ||
![]() |
d033f26765 | ||
![]() |
3cd9934a48 | ||
![]() |
6e89095873 | ||
![]() |
cb4b854838 | ||
![]() |
f69631992d | ||
![]() |
21850f519b | ||
![]() |
f5312d2996 | ||
![]() |
a8847a7e4f | ||
![]() |
bf754d6010 | ||
![]() |
44dc09614e | ||
![]() |
9fd7401d75 | ||
![]() |
ff28d9fc6f | ||
![]() |
f8d29099e7 | ||
![]() |
f83431b3bd | ||
![]() |
c404ffb446 | ||
![]() |
7d8dd8d9a5 | ||
![]() |
4eafa9e5e8 | ||
![]() |
137045be98 | ||
![]() |
eb9098ed47 | ||
![]() |
201d262949 | ||
![]() |
1b495eeab3 | ||
![]() |
084531bcce | ||
![]() |
0ea44e576b | ||
![]() |
09f4dd06c3 | ||
![]() |
13ec35ce3b | ||
![]() |
0433523ca2 | ||
![]() |
4ddaf45ba4 | ||
![]() |
41823a0ede | ||
![]() |
62a9d372f8 | ||
![]() |
fe674998bb | ||
![]() |
57f192fcaa | ||
![]() |
fc460bfbaf | ||
![]() |
a44f423b00 | ||
![]() |
72c0fde609 | ||
![]() |
60031906b4 | ||
![]() |
2c897e0666 | ||
![]() |
4581c4fcbe | ||
![]() |
cc9180d338 | ||
![]() |
4afec534cc | ||
![]() |
f65fc98a8c | ||
![]() |
7f05fface3 | ||
![]() |
88b0594f93 | ||
![]() |
ece35e0e66 | ||
![]() |
ab3db6d15d | ||
![]() |
701093c44f | ||
![]() |
a5fb5c55be | ||
![]() |
d7d2744711 | ||
![]() |
523e3adac9 | ||
![]() |
ecc44c45cb | ||
![]() |
ee8803adc2 | ||
![]() |
682c5f6a0a | ||
![]() |
c43df8bbbf | ||
![]() |
6a2f7b3844 | ||
![]() |
68473c4fd8 | ||
![]() |
d4a5a7e3aa | ||
![]() |
db62886d98 | ||
![]() |
b901d4a0b6 | ||
![]() |
b8c73ab780 | ||
![]() |
d2147de319 | ||
![]() |
c2458ba921 | ||
![]() |
88020b993c | ||
![]() |
40496514b8 | ||
![]() |
2cccbacefc | ||
![]() |
0be1da26cb | ||
![]() |
2ca91ba3cf | ||
![]() |
7ef6b70e96 | ||
![]() |
390f6c1190 | ||
![]() |
21205f6488 | ||
![]() |
387ed5ca41 | ||
![]() |
1e32c57893 | ||
![]() |
12f0baf348 | ||
![]() |
34205715e1 | ||
![]() |
26e38aec46 | ||
![]() |
dfdc5ea993 | ||
![]() |
59a77512ca | ||
![]() |
4acce93fde | ||
![]() |
722cf7bfd5 | ||
![]() |
b8c0c154ad | ||
![]() |
919bc4d10e | ||
![]() |
788e69ca5d | ||
![]() |
aaeb7fa3d0 | ||
![]() |
fe44c5ae27 | ||
![]() |
2aa3fbe761 | ||
![]() |
fae4c664a4 | ||
![]() |
b8e09c7007 | ||
![]() |
18c67d2418 | ||
![]() |
3a445f2ef5 | ||
![]() |
dc0324bfa9 | ||
![]() |
939e0237c5 | ||
![]() |
50cbb14641 | ||
![]() |
ee3bde9dad | ||
![]() |
c02e1cf055 | ||
![]() |
e94549d868 | ||
![]() |
478b09577a | ||
![]() |
d35f9f2e84 | ||
![]() |
d6b4110d71 | ||
![]() |
8dc15ef4b3 | ||
![]() |
3c2267a873 | ||
![]() |
25238baad5 | ||
![]() |
54126fdb5b | ||
![]() |
9dd5fe1095 | ||
![]() |
a153397f41 | ||
![]() |
b66d910113 | ||
![]() |
8bf1e83eef | ||
![]() |
c4e5fa5e17 | ||
![]() |
03b7cfdef3 | ||
![]() |
779c5a5deb | ||
![]() |
89f796dec6 | ||
![]() |
c37a2ddaaa | ||
![]() |
1acd246964 | ||
![]() |
219a89cbbf | ||
![]() |
f00a57a786 | ||
![]() |
e5ac7786bd | ||
![]() |
ab7cbbe789 | ||
![]() |
830a531249 | ||
![]() |
882a809983 | ||
![]() |
c6442bd3b6 | ||
![]() |
acda137d8c | ||
![]() |
22259fb24d | ||
![]() |
ff6266ee9b | ||
![]() |
586304ac44 | ||
![]() |
46e06feded | ||
![]() |
fbea92432a | ||
![]() |
76f57ab9f7 | ||
![]() |
ecc0970e3e | ||
![]() |
6c9b4f18d3 | ||
![]() |
19a04e5ad2 | ||
![]() |
e0039c7057 | ||
![]() |
0de8ae56f7 | ||
![]() |
ba00a6f9a3 | ||
![]() |
903895ea5f | ||
![]() |
a44eaf1690 | ||
![]() |
e8b333e4d3 | ||
![]() |
44446dccdb | ||
![]() |
2507db612d | ||
![]() |
a01504b35c | ||
![]() |
c33a55b0c2 | ||
![]() |
0e020bf3e1 | ||
![]() |
4b4e346b9f | ||
![]() |
1cd2419ece | ||
![]() |
654ade8ca2 | ||
![]() |
ae00c807dc | ||
![]() |
027b3e06ed | ||
![]() |
6e3c109bc0 | ||
![]() |
3e9ba0f223 | ||
![]() |
c8a326aab7 | ||
![]() |
f29dcc25c7 | ||
![]() |
0ffa6f3464 | ||
![]() |
d2b0ca953f | ||
![]() |
d0a91b9f88 | ||
![]() |
b3b0ce64d5 | ||
![]() |
38c0324c0f | ||
![]() |
ae1d11882d | ||
![]() |
225e76cd7d | ||
![]() |
9a81484e35 | ||
![]() |
d02257c280 | ||
![]() |
d76d79fd27 | ||
![]() |
457f8b76e7 | ||
![]() |
49a377aa30 | ||
![]() |
0ca82c5680 | ||
![]() |
534688948c | ||
![]() |
ffe2112795 | ||
![]() |
e08e832b10 | ||
![]() |
0d6306be8c | ||
![]() |
ff3df1211c | ||
![]() |
fd6e8c1d2a | ||
![]() |
309ce6303f | ||
![]() |
1ee251bdde | ||
![]() |
483087b06f | ||
![]() |
648873f020 | ||
![]() |
de2e9a5c6b | ||
![]() |
3462f3bd50 | ||
![]() |
8dc452d907 | ||
![]() |
46c4f2ce0b | ||
![]() |
a60ed3822b | ||
![]() |
178682506f | ||
![]() |
25eb538778 | ||
![]() |
1c9a4c8cb4 | ||
![]() |
3a0a142f1c | ||
![]() |
57fc5971f6 | ||
![]() |
4a7a0a0290 | ||
![]() |
187f931372 | ||
![]() |
5945676bcc | ||
![]() |
7b6e9675da | ||
![]() |
f38e2d239f | ||
![]() |
60c202cca4 | ||
![]() |
e17a9698fa | ||
![]() |
5e726779f5 | ||
![]() |
5a2333b10f | ||
![]() |
c8aed9f973 | ||
![]() |
b16e096198 | ||
![]() |
91bd12dfeb | ||
![]() |
1c7d4b4c94 | ||
![]() |
db2354534d | ||
![]() |
673efbbf5d | ||
![]() |
acb6e71eda | ||
![]() |
3c650bec15 | ||
![]() |
e7425ae624 | ||
![]() |
63bafe60ec | ||
![]() |
ae93aeb849 | ||
![]() |
6187b19434 | ||
![]() |
2bf1520211 | ||
![]() |
c480e06d88 | ||
![]() |
e5d1f68167 | ||
![]() |
94a03dd1e4 | ||
![]() |
2e4e092822 | ||
![]() |
aefc2da8a5 | ||
![]() |
152ae5c9bc | ||
![]() |
96f382d113 | ||
![]() |
21cba06bef | ||
![]() |
b2f6de7b58 | ||
![]() |
a9851ea3dd | ||
![]() |
fba6a90696 | ||
![]() |
b85c564161 | ||
![]() |
2596a75306 | ||
![]() |
c7e47b3d9a | ||
![]() |
0a001f3088 | ||
![]() |
27e8f56102 | ||
![]() |
38a04f0a7c | ||
![]() |
e0bf43d64e | ||
![]() |
e7b64159f8 | ||
![]() |
c642e985e5 | ||
![]() |
48dfe39747 | ||
![]() |
97ba7c210c | ||
![]() |
9f3f80c0cc | ||
![]() |
34efa8e2d8 | ||
![]() |
7e4be92750 | ||
![]() |
7bf525530a | ||
![]() |
b4e4cbeb20 | ||
![]() |
efeb60b86a | ||
![]() |
4319e71402 | ||
![]() |
cefef28e98 | ||
![]() |
75dda92dc3 | ||
![]() |
65a94ffa80 | ||
![]() |
ca0141f325 | ||
![]() |
ab777f436c | ||
![]() |
e8f0aa143e | ||
![]() |
c59d5495fe | ||
![]() |
0b4dcbe5b4 | ||
![]() |
995a3a61fd | ||
![]() |
87ca6ba9a8 | ||
![]() |
437227a9cc | ||
![]() |
3a9fca901b | ||
![]() |
45addf7605 | ||
![]() |
cc9e36a42e | ||
![]() |
c204c3f340 | ||
![]() |
42ce4b11e7 | ||
![]() |
4ebe674d91 | ||
![]() |
0d77e738e6 | ||
![]() |
3d0175d10e | ||
![]() |
17a2b35be5 | ||
![]() |
224393a321 | ||
![]() |
412105977c | ||
![]() |
d34d4f97a8 | ||
![]() |
86737c5927 | ||
![]() |
e8cbb5952d | ||
![]() |
0cf14bf4b5 | ||
![]() |
1c7f2f6a50 | ||
![]() |
df3b9d881b | ||
![]() |
d6b8c17f1d | ||
![]() |
bc96b80550 | ||
![]() |
20232ecfaa | ||
![]() |
3cf2bfa570 | ||
![]() |
141368a4b6 | ||
![]() |
7e957fde73 | ||
![]() |
d6cb544669 | ||
![]() |
c84a37ae93 | ||
![]() |
5591505299 | ||
![]() |
b1958b531b | ||
![]() |
834b90fb81 | ||
![]() |
6f50f8e16b | ||
![]() |
ac8837d4d6 | ||
![]() |
d7ab2816ee | ||
![]() |
ab1b87e747 | ||
![]() |
b052035990 | ||
![]() |
b2ca5105b7 | ||
![]() |
05a7a9594e | ||
![]() |
48d1be254f | ||
![]() |
832f66b218 | ||
![]() |
ccc420acd5 | ||
![]() |
ed7d224ee1 | ||
![]() |
4b5fbf70f2 | ||
![]() |
d40d40913d | ||
![]() |
9c03b22f8f | ||
![]() |
56ab541521 | ||
![]() |
78e9e987e1 | ||
![]() |
410287f7f8 | ||
![]() |
e419a63bf4 | ||
![]() |
1d84c9eb66 | ||
![]() |
d565835c47 | ||
![]() |
5520fdde29 | ||
![]() |
9122772b2a | ||
![]() |
53b77c32af | ||
![]() |
3014dcb762 | ||
![]() |
2aafc2ea1f | ||
![]() |
aeb79f2b29 | ||
![]() |
80645d6582 | ||
![]() |
f9d4f1b480 | ||
![]() |
5d01243964 | ||
![]() |
45a4a94b64 | ||
![]() |
7a965b9bc8 | ||
![]() |
01852ffbf8 | ||
![]() |
b32d48a625 | ||
![]() |
522f2629c8 | ||
![]() |
4bf6a2ab87 | ||
![]() |
e8ede2ba78 | ||
![]() |
c22d56e3ed | ||
![]() |
4c9b59e541 | ||
![]() |
305d0a5fba | ||
![]() |
d70b2c0687 | ||
![]() |
8fc0beb66b | ||
![]() |
b7e43d6e7f | ||
![]() |
a83762b3f4 | ||
![]() |
a877b39624 | ||
![]() |
0ae4460c61 | ||
![]() |
1c35206124 | ||
![]() |
4d469acd17 | ||
![]() |
ddf5f34f06 | ||
![]() |
f245541e24 | ||
![]() |
fa330646b9 | ||
![]() |
7cdfda3934 | ||
![]() |
0807423369 | ||
![]() |
5df65ca9c1 | ||
![]() |
696d7a71a0 | ||
![]() |
1cb46d9d1a | ||
![]() |
c57d268a78 | ||
![]() |
a298801426 | ||
![]() |
0a61e51736 | ||
![]() |
d4e5538014 | ||
![]() |
7a02229293 | ||
![]() |
df9795f2d7 | ||
![]() |
f6661d1153 | ||
![]() |
dcb8e5ec7c | ||
![]() |
1bf93713d8 | ||
![]() |
ec48dd0976 | ||
![]() |
8d9cb04ea8 | ||
![]() |
0a00804585 | ||
![]() |
de10132c34 | ||
![]() |
2de203163d | ||
![]() |
b32b6ac6e5 | ||
![]() |
8396c84346 | ||
![]() |
4059455cda | ||
![]() |
7736b68376 | ||
![]() |
6e625bd7bd | ||
![]() |
f95d76cc28 | ||
![]() |
b3458270b5 | ||
![]() |
2c84b59e73 | ||
![]() |
536a0a8a84 | ||
![]() |
940f5d5658 | ||
![]() |
a5b3a87030 | ||
![]() |
80d7f0f98e | ||
![]() |
06c34465b7 | ||
![]() |
a817992559 | ||
![]() |
54af0088fb | ||
![]() |
f4c4df1638 | ||
![]() |
3008e691a2 | ||
![]() |
6e3b863df3 | ||
![]() |
d69bb93c9f | ||
![]() |
30a2907ce9 | ||
![]() |
6a00b01385 | ||
![]() |
0f15852981 | ||
![]() |
cde79fe11a | ||
![]() |
1af510b550 | ||
![]() |
e055f0e053 | ||
![]() |
e6713cfd54 | ||
![]() |
79c307def9 | ||
![]() |
72d3fa215f | ||
![]() |
df35eab0bf | ||
![]() |
432f6e9eec | ||
![]() |
846b598519 | ||
![]() |
95d85f032f | ||
![]() |
7cc89f83ff | ||
![]() |
a1832d1ecb | ||
![]() |
d1a07741f8 | ||
![]() |
6374171bb4 | ||
![]() |
9b6f93a72f | ||
![]() |
6a17061213 | ||
![]() |
bbc52ed501 | ||
![]() |
ddf5122a29 | ||
![]() |
43bf9a712b | ||
![]() |
afcc0fb0fa | ||
![]() |
96b0785f52 | ||
![]() |
9a1c7240ba | ||
![]() |
230bc538cb | ||
![]() |
9914684d36 | ||
![]() |
733317966f | ||
![]() |
eb73000dbb | ||
![]() |
a987057c67 | ||
![]() |
c9e8d0e0b5 | ||
![]() |
2bba420245 | ||
![]() |
99c143a5a1 | ||
![]() |
ed048fdc5b | ||
![]() |
ec6ba977b7 | ||
![]() |
6a28491f8e | ||
![]() |
9f5c9af77c | ||
![]() |
1a64c06ec0 | ||
![]() |
72be1f4062 | ||
![]() |
f3d30f1ce0 | ||
![]() |
b545b07b2f | ||
![]() |
d60d4ad809 | ||
![]() |
6840e3b18b | ||
![]() |
3d37b1d6d4 | ||
![]() |
67b0c883df | ||
![]() |
583e2e25b9 | ||
![]() |
f07485c46e | ||
![]() |
1b522c4583 | ||
![]() |
eb230c789a | ||
![]() |
d4bbe2ff38 | ||
![]() |
714d7666e5 | ||
![]() |
cf006e3496 | ||
![]() |
7af642af4d | ||
![]() |
6a1d8a9cf0 | ||
![]() |
257e40f9d9 | ||
![]() |
525f06b5f6 | ||
![]() |
f70ffacdfc | ||
![]() |
8e002ee26e | ||
![]() |
dc6af4a4b5 | ||
![]() |
92f7ec6075 | ||
![]() |
efc293e371 | ||
![]() |
147c82bd5e | ||
![]() |
609559e5b9 | ||
![]() |
233bed67a8 | ||
![]() |
02f4b63db9 | ||
![]() |
2b069768ab | ||
![]() |
29c9e6c324 | ||
![]() |
ec0209418f | ||
![]() |
90af1e83e8 | ||
![]() |
ac04407ffe | ||
![]() |
bca9b64e7b | ||
![]() |
c4d1a19b33 | ||
![]() |
5cb3e2861e | ||
![]() |
4999f49513 | ||
![]() |
a10e552b99 | ||
![]() |
1713aa7b4d | ||
![]() |
308f7c2f14 | ||
![]() |
63253dbf4f | ||
![]() |
88433e640d | ||
![]() |
8be2f4c3d2 | ||
![]() |
5c67820265 | ||
![]() |
b109925820 | ||
![]() |
17dd66deda | ||
![]() |
82ff9aafd6 | ||
![]() |
d4d79451cb | ||
![]() |
4a4f7b019f | ||
![]() |
bf8c61f489 | ||
![]() |
b3b7491615 | ||
![]() |
92fca1c2d0 | ||
![]() |
50b32cb925 | ||
![]() |
73866cf346 | ||
![]() |
6be8bf5552 | ||
![]() |
2420ee6e12 | ||
![]() |
34118eac06 | ||
![]() |
cb36f4f352 | ||
![]() |
a537d90734 | ||
![]() |
a9c94bea9f | ||
![]() |
f62e6e1f98 | ||
![]() |
d3c12383c9 | ||
![]() |
522816498c | ||
![]() |
dd0c08f9c6 | ||
![]() |
e119483a95 | ||
![]() |
b4d81b1a6a | ||
![]() |
5eede0d5fd | ||
![]() |
ba2209ec9e | ||
![]() |
9e15877dfb | ||
![]() |
b69bebb535 | ||
![]() |
5b9e695392 | ||
![]() |
7a5b4355e2 | ||
![]() |
2b7d9a7863 | ||
![]() |
b3f4e4e1ec | ||
![]() |
bbe8512a93 | ||
![]() |
a2c4e95cfd | ||
![]() |
8e6a9aabb1 | ||
![]() |
816d2b2bfc | ||
![]() |
3f8fd3cae0 | ||
![]() |
db40fe4cf6 | ||
![]() |
f0ad031cd9 | ||
![]() |
06a7b123ac | ||
![]() |
77a797a382 | ||
![]() |
a60e15d6b9 | ||
![]() |
0d2707815d | ||
![]() |
a1902f4950 | ||
![]() |
cd69ba3d49 | ||
![]() |
cbb9ef0a4c | ||
![]() |
592f38900d | ||
![]() |
27e9cb5f80 | ||
![]() |
f4aeb23f52 | ||
![]() |
0ba5f503c5 | ||
![]() |
4d686c3da5 | ||
![]() |
d17ab631a9 | ||
![]() |
d63f5d7e50 | ||
![]() |
2b131d7345 | ||
![]() |
db2b1fdb79 | ||
![]() |
bdf4d6be1d | ||
![]() |
5807de90a1 | ||
![]() |
fb432660c3 | ||
![]() |
f2f4ada240 | ||
![]() |
963e6c9f3f | ||
![]() |
83f359245a | ||
![]() |
d2ccc21552 | ||
![]() |
244ea5c488 | ||
![]() |
d06de4f007 | ||
![]() |
2633075e09 | ||
![]() |
3abffc8781 | ||
![]() |
f5236fe47a | ||
![]() |
0b741a0351 | ||
![]() |
22619523f6 | ||
![]() |
7e98d30f46 | ||
![]() |
e95b732e49 | ||
![]() |
767b37019f | ||
![]() |
9c56b8ec78 | ||
![]() |
6caff8447d | ||
![]() |
ed0db1cc8b | ||
![]() |
cf6e8b218d | ||
![]() |
b0bf14cdb5 | ||
![]() |
f87fe67b44 | ||
![]() |
8041b2f019 | ||
![]() |
3ae05e34e5 | ||
![]() |
5b9f3b7664 | ||
![]() |
aef452f108 | ||
![]() |
24e6585e76 | ||
![]() |
2558518c5d | ||
![]() |
7ceb9ad630 | ||
![]() |
b38fccc646 | ||
![]() |
5536c0dee2 | ||
![]() |
0ccd4b9d01 | ||
![]() |
dea8fabf73 | ||
![]() |
1f5bf91a85 | ||
![]() |
bd949b10be | ||
![]() |
cbcb522439 | ||
![]() |
90a02d7063 | ||
![]() |
02a9f7fed7 | ||
![]() |
dab810014e | ||
![]() |
959e963c81 | ||
![]() |
4e6b3f7e1d | ||
![]() |
eb2b824bde | ||
![]() |
5cbdd5ea4f | ||
![]() |
cad4c0ef1a | ||
![]() |
443abfc71d | ||
![]() |
aa15b60e58 | ||
![]() |
6c7d41a643 | ||
![]() |
670147be53 | ||
![]() |
88aab1d2d0 | ||
![]() |
9b9a3934ad | ||
![]() |
6c82951d11 | ||
![]() |
e9f51ebd94 | ||
![]() |
8d4ad5adc7 | ||
![]() |
46ebd0af8a | ||
![]() |
1fdcb653bc | ||
![]() |
cdd53fea1e | ||
![]() |
c8184d714b | ||
![]() |
c092bddfe7 | ||
![]() |
d60debbf59 | ||
![]() |
c21dc56ea3 | ||
![]() |
ec580bc520 | ||
![]() |
d4b5517ef9 | ||
![]() |
d54d0fff39 | ||
![]() |
e26b3771ee | ||
![]() |
ed87d456e4 | ||
![]() |
1a3e32e6a2 | ||
![]() |
62e7d3c89e | ||
![]() |
2ed1aebaf6 | ||
![]() |
7ddc1f737f | ||
![]() |
2fc646160f | ||
![]() |
55427add3c | ||
![]() |
c7ada64bb6 | ||
![]() |
82f281ad99 | ||
![]() |
a1be921673 | ||
![]() |
dbe07928ba | ||
![]() |
fb6d8cf229 | ||
![]() |
6657f89eca | ||
![]() |
9dfc5c4a0c | ||
![]() |
37275fd109 | ||
![]() |
aea42e1379 | ||
![]() |
1147a27978 | ||
![]() |
ab6d9bd89a | ||
![]() |
dc91e74524 | ||
![]() |
5de3ac2236 | ||
![]() |
393b7ad695 | ||
![]() |
41902c8e6d | ||
![]() |
f2ebbe46f6 | ||
![]() |
7ca7bb7fd7 | ||
![]() |
e951dba48a | ||
![]() |
b409892ae5 | ||
![]() |
4ee66cdf4e | ||
![]() |
a7fc4c85e3 | ||
![]() |
c87cb22ba9 | ||
![]() |
f7d8b13336 | ||
![]() |
398e675f58 | ||
![]() |
cbd95a950a | ||
![]() |
a10faf5ce6 | ||
![]() |
156a64161b | ||
![]() |
4367312760 | ||
![]() |
326040b285 | ||
![]() |
9ffe441361 | ||
![]() |
f72e081fbf | ||
![]() |
fd51786f86 | ||
![]() |
5f220c62e1 | ||
![]() |
49f3ce3385 | ||
![]() |
c98295eed2 | ||
![]() |
121a2d3354 | ||
![]() |
20c8bdd85e | ||
![]() |
4de260efe3 | ||
![]() |
b558f0a9d6 | ||
![]() |
23a489a411 | ||
![]() |
a72751a342 | ||
![]() |
4e714c0be1 | ||
![]() |
ae512620d0 | ||
![]() |
f39241aeb3 | ||
![]() |
10de12e9ed | ||
![]() |
1291c46ea4 | ||
![]() |
94f1e56e41 | ||
![]() |
4ed91dc26e | ||
![]() |
57249bcddc | ||
![]() |
4185179190 | ||
![]() |
a301dc364c | ||
![]() |
e2357561b9 | ||
![]() |
5137c132a5 | ||
![]() |
ae22044da9 | ||
![]() |
01c8efdd59 | ||
![]() |
6b95031f56 | ||
![]() |
800d59d577 | ||
![]() |
02c5aa9b09 | ||
![]() |
8865dfbcaa | ||
![]() |
d0aa9dbccf | ||
![]() |
34906f8bbe | ||
![]() |
708be0f415 | ||
![]() |
7572e8ca04 | ||
![]() |
08648cf0da | ||
![]() |
efbef9e6cc | ||
![]() |
fec16b93c4 | ||
![]() |
d0d7c0d8f9 | ||
![]() |
a043b60f1e | ||
![]() |
e675852bc1 | ||
![]() |
3c941d1818 | ||
![]() |
fa99d9cd9c | ||
![]() |
1ea8c59441 | ||
![]() |
3974ef045e | ||
![]() |
e7b451941b | ||
![]() |
91c46d4399 | ||
![]() |
0a543db371 | ||
![]() |
515c4dd213 | ||
![]() |
adda768e3e | ||
![]() |
cc38691534 | ||
![]() |
b64afa41d5 | ||
![]() |
eced2e2f1e | ||
![]() |
f625d038d2 | ||
![]() |
89acef992b | ||
![]() |
975a13259b | ||
![]() |
abec7dcd30 | ||
![]() |
136c053211 | ||
![]() |
41ae864b69 | ||
![]() |
1ede0c716b | ||
![]() |
414acbd37e | ||
![]() |
3400127a75 | ||
![]() |
af5dbed319 | ||
![]() |
a0f3c8aaf1 | ||
![]() |
2dff7527d4 | ||
![]() |
04bae5ec95 | ||
![]() |
8f1b7a6fa6 | ||
![]() |
645b833079 | ||
![]() |
8fd3a5d02f | ||
![]() |
051460b8b2 | ||
![]() |
2152c405ba | ||
![]() |
dec697ad68 | ||
![]() |
394800200e | ||
![]() |
4429ad9276 | ||
![]() |
7ac00d3c26 | ||
![]() |
a6edc0adb2 | ||
![]() |
c3f2f1aa2d | ||
![]() |
b28461b7c6 | ||
![]() |
db499e68f9 | ||
![]() |
4840f023af | ||
![]() |
21bc066ece | ||
![]() |
317b31eedb | ||
![]() |
5044c4e3ff | ||
![]() |
67d13998b3 | ||
![]() |
1f940de072 | ||
![]() |
87d81d1d13 | ||
![]() |
4036b8d027 | ||
![]() |
c346068e5e | ||
![]() |
7e52795aad | ||
![]() |
cdae59e153 | ||
![]() |
9b43d7ba85 | ||
![]() |
c183d164aa | ||
![]() |
f4f156157d | ||
![]() |
9f83eec039 | ||
![]() |
26d49fec5f | ||
![]() |
f75574cbaa | ||
![]() |
cd3e6b4f4c | ||
![]() |
35bf258485 | ||
![]() |
454c0b0e46 | ||
![]() |
cf60db6ebe | ||
![]() |
90ba19eb7b | ||
![]() |
c6da985e28 | ||
![]() |
4725e543be | ||
![]() |
f22b110f60 | ||
![]() |
b181b2e604 | ||
![]() |
d7605ae77b | ||
![]() |
b613709c46 | ||
![]() |
c9c838aa1f | ||
![]() |
cac416f106 | ||
![]() |
a100a195fa | ||
![]() |
e2035cdbf7 | ||
![]() |
1bf649cb0a | ||
![]() |
bc27c49404 | ||
![]() |
44f337be30 | ||
![]() |
e5ddecd1a7 | ||
![]() |
e5e2a5a3b8 | ||
![]() |
d98973dbdd | ||
![]() |
997f85b4d3 | ||
![]() |
9e63631dea | ||
![]() |
162cc80b81 | ||
![]() |
f07389d3ad | ||
![]() |
aa5445c28b | ||
![]() |
5e5fd0a178 | ||
![]() |
eeca9a91d6 | ||
![]() |
76d1dde94c | ||
![]() |
ba9f587a77 | ||
![]() |
03987f71e3 | ||
![]() |
c13400c9a2 | ||
![]() |
99741bde59 | ||
![]() |
30c4eba4ea | ||
![]() |
66929f6829 | ||
![]() |
ea2862cdda | ||
![]() |
b21b967bd5 | ||
![]() |
b6bbaa8372 | ||
![]() |
756e10b0a1 | ||
![]() |
fa480fe5ba | ||
![]() |
ca678bc0bc | ||
![]() |
6097a7ba8b | ||
![]() |
e3120f73d0 | ||
![]() |
917ff75e95 | ||
![]() |
a9a4290173 | ||
![]() |
b12738182c | ||
![]() |
39be46f43f | ||
![]() |
2553f94c42 | ||
![]() |
cfb96c772b | ||
![]() |
c0aab8b8f9 | ||
![]() |
b1e46f869e | ||
![]() |
e39e20b6dc | ||
![]() |
0c8eb974ff | ||
![]() |
b403427624 | ||
![]() |
a89d209bb6 | ||
![]() |
8cd8ccca53 | ||
![]() |
c6be6ce175 | ||
![]() |
72fd834c47 | ||
![]() |
df2169d141 | ||
![]() |
06a196020e | ||
![]() |
c4a13ba483 | ||
![]() |
867178ae1d | ||
![]() |
f3b85d706b | ||
![]() |
390b448726 | ||
![]() |
df04efe321 | ||
![]() |
f68bde7236 | ||
![]() |
493fcce9be | ||
![]() |
4616b96a64 | ||
![]() |
80cc516295 | ||
![]() |
bf8f72359d | ||
![]() |
102b468b5e | ||
![]() |
c28f94f32e | ||
![]() |
f1b368359b | ||
![]() |
2c01cae8b9 | ||
![]() |
5c4a4f82c8 | ||
![]() |
c9d8f6c59a | ||
![]() |
45e23abed5 | ||
![]() |
ef08a4d406 | ||
![]() |
90ff732358 | ||
![]() |
8f2f6cd2ac | ||
![]() |
c00c085bfb | ||
![]() |
5600dff0ef | ||
![]() |
af4da5ccf2 | ||
![]() |
d6a3d3f12a | ||
![]() |
5720b00632 | ||
![]() |
d961954688 | ||
![]() |
406562c563 | ||
![]() |
2cffddd405 | ||
![]() |
9029278dde | ||
![]() |
5ee0f9c649 | ||
![]() |
3bad70040a | ||
![]() |
c8f694fe39 | ||
![]() |
8e414fcdf4 | ||
![]() |
909006049f | ||
![]() |
5973854153 | ||
![]() |
0cb1ef60ae | ||
![]() |
5c4cc21fd4 | ||
![]() |
86e91c030c | ||
![]() |
d3bafe4554 | ||
![]() |
e0bd8118d0 | ||
![]() |
a0635ae731 | ||
![]() |
6cc93ccde7 | ||
![]() |
79c6dffa6b | ||
![]() |
d0d2733204 | ||
![]() |
a5a725440b | ||
![]() |
cbe882298e | ||
![]() |
229b9f4ed0 | ||
![]() |
0823388752 | ||
![]() |
6dccdf501e | ||
![]() |
a0d3fe72bf | ||
![]() |
06e25f9c4b | ||
![]() |
2e6b3c4d94 | ||
![]() |
7a2e54b7d3 | ||
![]() |
7692d86de4 | ||
![]() |
84980ee0e6 | ||
![]() |
386d6a7533 | ||
![]() |
a41d0b29a9 | ||
![]() |
0c11f7c96f | ||
![]() |
8161b73272 | ||
![]() |
401c3563d4 | ||
![]() |
692f0daba3 | ||
![]() |
0cbf135293 | ||
![]() |
c3fd7a5217 | ||
![]() |
9ad362c1de | ||
![]() |
3965c9ba38 | ||
![]() |
78e8c1f844 | ||
![]() |
884a48d991 | ||
![]() |
aeb33776f5 | ||
![]() |
fb27698cfc | ||
![]() |
e7af89d972 | ||
![]() |
dac0bae561 | ||
![]() |
a4bf6baaeb | ||
![]() |
435b173fd9 | ||
![]() |
d9cb42da99 | ||
![]() |
a0b2a93c41 | ||
![]() |
e3d3d772de | ||
![]() |
1494e8fbaa | ||
![]() |
f45dd90f34 | ||
![]() |
f0bf7a247d | ||
![]() |
ef00da803d | ||
![]() |
29ea6faf8f | ||
![]() |
b64be1624c | ||
![]() |
2df2a58dc1 | ||
![]() |
d9885b3776 | ||
![]() |
1f5bf96001 | ||
![]() |
995734ed12 | ||
![]() |
a1771d243a | ||
![]() |
363368c670 | ||
![]() |
5a703d1368 | ||
![]() |
3bddc4daec | ||
![]() |
6138af86b3 | ||
![]() |
66670ba9f0 | ||
![]() |
c4fa674367 | ||
![]() |
20dd16d9f7 | ||
![]() |
1a11ad9d20 | ||
![]() |
d8b2686603 | ||
![]() |
a382e21194 | ||
![]() |
e78aca3b33 | ||
![]() |
255741fc97 | ||
![]() |
7ecaa07580 | ||
![]() |
bd3f90c0c1 | ||
![]() |
9e0d12e310 | ||
![]() |
7b89149c9f | ||
![]() |
07e95b4670 | ||
![]() |
4cf4bc7334 | ||
![]() |
233d34e47e | ||
![]() |
5a4915660c | ||
![]() |
eff77a802d | ||
![]() |
a9520e6e59 | ||
![]() |
6930ecbb75 | ||
![]() |
957dc1037a | ||
![]() |
1d324aceef | ||
![]() |
db4fc559cc | ||
![]() |
00f40961e0 | ||
![]() |
dbda45160f | ||
![]() |
9b1677fb5a | ||
![]() |
86e115e21e | ||
![]() |
86063d9031 | ||
![]() |
f4b97c1e00 | ||
![]() |
a9b937e066 | ||
![]() |
b1748eaee0 | ||
![]() |
9f6ad4dcb6 | ||
![]() |
8ad73bf449 | ||
![]() |
e53e60c0bd | ||
![]() |
2ebb9a4811 | ||
![]() |
5ff43969e7 | ||
![]() |
3c7650491b | ||
![]() |
450f2d0b08 | ||
![]() |
f0cb6482e1 | ||
![]() |
4836d293c0 | ||
![]() |
11656234b5 | ||
![]() |
9743d09635 | ||
![]() |
2aa3a482ab | ||
![]() |
f9765b182e | ||
![]() |
86239a5b9c | ||
![]() |
687e4d7f9c | ||
![]() |
727497ccdf | ||
![]() |
5f2700eee5 | ||
![]() |
c7b6119268 | ||
![]() |
55b764e062 | ||
![]() |
014265f4e6 | ||
![]() |
bcd83ccd25 | ||
![]() |
adcea23cb0 | ||
![]() |
3e4bef52b0 | ||
![]() |
735ba3a7b7 | ||
![]() |
a03ebd9bee | ||
![]() |
8adf2e3066 | ||
![]() |
7d68900af3 | ||
![]() |
1c38cdfe98 | ||
![]() |
2a27fd4111 | ||
![]() |
b94ce4e17d | ||
![]() |
4340f69be1 | ||
![]() |
b3a985fa42 | ||
![]() |
688514e470 | ||
![]() |
67894a597f | ||
![]() |
30d581121b | ||
![]() |
acb33ee1c1 | ||
![]() |
c1a47de86f | ||
![]() |
213d682967 | ||
![]() |
803a9c12c9 | ||
![]() |
3551a32e5e | ||
![]() |
95b68eb693 | ||
![]() |
cf33166336 | ||
![]() |
126efb5889 | ||
![]() |
a490875103 | ||
![]() |
27d8c23c58 | ||
![]() |
7bbd42ef49 | ||
![]() |
a94860a6de | ||
![]() |
cd67bc0ae4 | ||
![]() |
5a3bffab10 | ||
![]() |
22c8c33a58 | ||
![]() |
92dc4ad83d | ||
![]() |
6c6fae6fae | ||
![]() |
03e9caaec0 | ||
![]() |
5796e3a742 | ||
![]() |
35da345160 | ||
![]() |
da51bd17e5 | ||
![]() |
2319656802 | ||
![]() |
786641dcf9 | ||
![]() |
3a8d9b8487 | ||
![]() |
40fccc423a | ||
![]() |
15a8fa76b2 | ||
![]() |
887a3c533b | ||
![]() |
1c1be60fa2 | ||
![]() |
a160c153e2 | ||
![]() |
3a81bf4ad2 | ||
![]() |
5caeef02fa | ||
![]() |
9198de8f10 | ||
![]() |
dc45a00eac | ||
![]() |
3d86999c75 | ||
![]() |
78ac868824 | ||
![]() |
020f6fd093 | ||
![]() |
58398cbd03 | ||
![]() |
e36ff84c33 | ||
![]() |
9ce567c6ff | ||
![]() |
f4cd1bac72 | ||
![]() |
358ce2cf28 | ||
![]() |
2a154b8484 | ||
![]() |
3bd4df3841 | ||
![]() |
0527774066 | ||
![]() |
41e55b476b | ||
![]() |
763ca47fa8 | ||
![]() |
cba99a046e | ||
![]() |
9ccdb8becd | ||
![]() |
60387facd2 | ||
![]() |
dbe839a9ca | ||
![]() |
799e09f75a | ||
![]() |
514d661ca1 | ||
![]() |
05a7348a7e | ||
![]() |
c3773de168 | ||
![]() |
043c6ee3b6 | ||
![]() |
97a0b5be50 | ||
![]() |
6a629f3234 | ||
![]() |
3afdc97d91 | ||
![]() |
05c4710e89 | ||
![]() |
7c9b6fed16 | ||
![]() |
252f46be7d | ||
![]() |
e007908a16 | ||
![]() |
07345ac252 | ||
![]() |
e57909265b | ||
![]() |
e9725abd83 | ||
![]() |
b5e6091885 | ||
![]() |
797f55ef12 | ||
![]() |
57b4098562 | ||
![]() |
4fbc524955 | ||
![]() |
de7b1ae30a | ||
![]() |
b29e98fa8d | ||
![]() |
0d6cec418e | ||
![]() |
b9384b9498 | ||
![]() |
ea30b5a9e0 | ||
![]() |
f0f330e121 | ||
![]() |
c0e58995e3 | ||
![]() |
380c25f640 | ||
![]() |
8a74070128 | ||
![]() |
fd51286227 | ||
![]() |
1b310dfb1d | ||
![]() |
9facf2d1ad | ||
![]() |
822b32ee51 | ||
![]() |
bc29124b1b | ||
![]() |
483ccf0c63 | ||
![]() |
aefb6a2bd6 | ||
![]() |
28096c7c13 | ||
![]() |
000cfca068 | ||
![]() |
dc1d3303d2 | ||
![]() |
250349ffff | ||
![]() |
954a380e19 | ||
![]() |
008baa091f | ||
![]() |
c57ff818f6 | ||
![]() |
66b81b3497 | ||
![]() |
74ed1ae08a | ||
![]() |
54b1b1d893 | ||
![]() |
8f5aa8cb00 | ||
![]() |
5b8688e620 | ||
![]() |
342357cd9e | ||
![]() |
75a2eb7fac | ||
![]() |
96b9cf42e0 | ||
![]() |
82d2291398 | ||
![]() |
4848a0898e | ||
![]() |
2a6c62109c | ||
![]() |
f795e90a11 | ||
![]() |
eb955f6e93 | ||
![]() |
e14293a4e5 | ||
![]() |
e69ade32e1 | ||
![]() |
4e16964e1c | ||
![]() |
249d759caf | ||
![]() |
fe4d055b36 | ||
![]() |
e135388564 | ||
![]() |
f9c39dc862 | ||
![]() |
ba769ea351 | ||
![]() |
5551dcd762 | ||
![]() |
ec994486b1 | ||
![]() |
a59e294e21 | ||
![]() |
87725534db | ||
![]() |
8a9ba769ee | ||
![]() |
2e2a46e0a5 | ||
![]() |
e5d70f4677 | ||
![]() |
7ac56b86cd | ||
![]() |
01d00dfa9e | ||
![]() |
b37f29341b | ||
![]() |
2dc89b922b | ||
![]() |
b53077a9e7 | ||
![]() |
4647353c8b | ||
![]() |
6d9bc3ec9f | ||
![]() |
90149552b1 | ||
![]() |
5f1479d92f | ||
![]() |
506fe78c48 | ||
![]() |
01d9283af3 | ||
![]() |
c63aabaf1c | ||
![]() |
bbac1d2977 | ||
![]() |
7228ba1114 | ||
![]() |
2076011e0c | ||
![]() |
741c215bab | ||
![]() |
6cb933c56e | ||
![]() |
e923bcd16c | ||
![]() |
ee720ad7bc | ||
![]() |
529bec7d7b | ||
![]() |
9192ab7777 | ||
![]() |
8af8d2abb1 | ||
![]() |
4d1b70175c | ||
![]() |
0ee9b02390 | ||
![]() |
5cc1876f14 | ||
![]() |
12ce45f260 | ||
![]() |
3602775330 | ||
![]() |
95f71c019d | ||
![]() |
71472bf375 | ||
![]() |
bcb1159c09 | ||
![]() |
9da6b60417 | ||
![]() |
b1b99e10a6 | ||
![]() |
bbcf18c293 | ||
![]() |
010c003e5f | ||
![]() |
cad00d5180 | ||
![]() |
bee9cfb813 | ||
![]() |
11326f8eb1 | ||
![]() |
277f2e587b | ||
![]() |
f0e34c8879 | ||
![]() |
d54358ff59 | ||
![]() |
804414aad2 | ||
![]() |
ed39d668ea | ||
![]() |
eda3fcd56f | ||
![]() |
11fbcacf37 | ||
![]() |
cae502c175 | ||
![]() |
8e20e0ff39 | ||
![]() |
bdb7df4245 | ||
![]() |
8ce2eba9e6 | ||
![]() |
8150090257 | ||
![]() |
b8c9070d09 | ||
![]() |
019dea0a55 | ||
![]() |
683edb32b7 | ||
![]() |
36261c8238 | ||
![]() |
fdb8fff916 | ||
![]() |
6c94e64963 | ||
![]() |
a97b8fc2dd | ||
![]() |
e47e9bbe86 | ||
![]() |
a5fdba1185 | ||
![]() |
12373b0cc7 | ||
![]() |
ead3eea3e0 | ||
![]() |
fcd61d937f | ||
![]() |
114a39964f | ||
![]() |
a21ca18d4d | ||
![]() |
6751634d77 | ||
![]() |
325505e5c4 | ||
![]() |
43e8f6e37f | ||
![]() |
3b8e318b77 | ||
![]() |
b35374fd64 | ||
![]() |
e9c59310f7 | ||
![]() |
b22bab2547 | ||
![]() |
5bdfff5cfc | ||
![]() |
704c80f048 | ||
![]() |
782312c612 | ||
![]() |
2848de11e5 | ||
![]() |
48d0e9465d | ||
![]() |
6b0a4be5fe | ||
![]() |
9927f219f1 | ||
![]() |
5a35c68b67 | ||
![]() |
f6facd2429 | ||
![]() |
d4edecd1a2 | ||
![]() |
87a6e135c5 | ||
![]() |
8d21c002c6 | ||
![]() |
2cb992a99c | ||
![]() |
fb344f5aeb | ||
![]() |
13ec73a028 | ||
![]() |
46adb2820a | ||
![]() |
34b3d498a9 | ||
![]() |
28430b51e3 | ||
![]() |
6fd87e1d8d | ||
![]() |
cf2a225b24 | ||
![]() |
ab0b0393cb | ||
![]() |
2d8a2b51dc | ||
![]() |
d9bb583c25 | ||
![]() |
929173ab42 | ||
![]() |
4d2dad04aa | ||
![]() |
e6ea31de9f | ||
![]() |
7d0df5422c | ||
![]() |
74f8ce4ca5 | ||
![]() |
8beabfd3bf | ||
![]() |
d3d63cac4d | ||
![]() |
3a3e0d6fbc | ||
![]() |
c4e8742149 | ||
![]() |
67c5e1ba4f | ||
![]() |
fdba8cbb79 | ||
![]() |
6db781d52c | ||
![]() |
6c9aa1d2a6 | ||
![]() |
58f4a094b4 | ||
![]() |
736d5962b4 | ||
![]() |
ca960ce56c |
21
.cirrus.yml
21
.cirrus.yml
@ -1,21 +0,0 @@
|
||||
env:
|
||||
CIRRUS_CLONE_DEPTH: 1
|
||||
ARCH: amd64
|
||||
|
||||
build_task:
|
||||
matrix:
|
||||
freebsd_instance:
|
||||
image_family: freebsd-12-4
|
||||
freebsd_instance:
|
||||
image_family: freebsd-13-2
|
||||
freebsd_instance:
|
||||
image_family: freebsd-14-0-snap
|
||||
prepare_script:
|
||||
- pkg install -y autoconf automake libtool gettext-runtime gmake ksh93 py39-packaging py39-cffi py39-sysctl
|
||||
configure_script:
|
||||
- env MAKE=gmake ./autogen.sh
|
||||
- env MAKE=gmake ./configure --with-config="user" --with-python=3.9
|
||||
build_script:
|
||||
- gmake -j `sysctl -n kern.smp.cpus`
|
||||
install_script:
|
||||
- gmake install
|
18
.github/CONTRIBUTING.md
vendored
18
.github/CONTRIBUTING.md
vendored
@ -145,22 +145,18 @@ Once everything is in good shape and the details have been worked out you can re
|
||||
Any required reviews can then be finalized and the pull request merged.
|
||||
|
||||
#### Tests and Benchmarks
|
||||
* Every pull request will by tested by the buildbot on multiple platforms by running the [zfs-tests.sh and zloop.sh](
|
||||
* Every pull request is tested using a GitHub Actions workflow on multiple platforms by running the [zfs-tests.sh and zloop.sh](
|
||||
https://openzfs.github.io/openzfs-docs/Developer%20Resources/Building%20ZFS.html#running-zloop-sh-and-zfs-tests-sh) test suites.
|
||||
`.github/workflows/scripts/generate-ci-type.py` is used to determine whether the pull request is nonbehavior, i.e., not introducing behavior changes of any code, configuration or tests. If so, the CI will run on fewer platforms and only essential sanity tests will run. You can always override this by adding `ZFS-CI-Type` line to your commit message:
|
||||
* If your last commit (or `HEAD` in git terms) contains a line `ZFS-CI-Type: quick`, quick mode is forced regardless of what files are changed.
|
||||
* Otherwise, if any commit in a PR contains a line `ZFS-CI-Type: full`, full mode is forced.
|
||||
* To verify your changes conform to the [style guidelines](
|
||||
https://github.com/openzfs/zfs/blob/master/.github/CONTRIBUTING.md#style-guides
|
||||
), please run `make checkstyle` and resolve any warnings.
|
||||
* Static code analysis of each pull request is performed by the buildbot; run `make lint` to check your changes.
|
||||
* Test cases should be provided when appropriate.
|
||||
This includes making sure new features have adequate code coverage.
|
||||
* Code analysis is performed by [CodeQL](https://codeql.github.com/) for each pull request.
|
||||
* Test cases should be provided when appropriate. This includes making sure new features have adequate code coverage.
|
||||
* If your pull request improves performance, please include some benchmarks.
|
||||
* The pull request must pass all required [ZFS
|
||||
Buildbot](http://build.zfsonlinux.org/) builders before
|
||||
being accepted. If you are experiencing intermittent TEST
|
||||
builder failures, you may be experiencing a [test suite
|
||||
issue](https://github.com/openzfs/zfs/issues?q=is%3Aissue+is%3Aopen+label%3A%22Type%3A+Test+Suite%22).
|
||||
There are also various [buildbot options](https://openzfs.github.io/openzfs-docs/Developer%20Resources/Buildbot%20Options.html)
|
||||
to control how changes are tested.
|
||||
* The pull request must pass all CI checks before being accepted.
|
||||
|
||||
### Testing
|
||||
All help is appreciated! If you're in a position to run the latest code
|
||||
|
2
.github/ISSUE_TEMPLATE/feature_request.md
vendored
2
.github/ISSUE_TEMPLATE/feature_request.md
vendored
@ -14,7 +14,7 @@ Please check our issue tracker before opening a new feature request.
|
||||
Filling out the following template will help other contributors better understand your proposed feature.
|
||||
-->
|
||||
|
||||
### Describe the feature would like to see added to OpenZFS
|
||||
### Describe the feature you would like to see added to OpenZFS
|
||||
|
||||
<!--
|
||||
Provide a clear and concise description of the feature.
|
||||
|
6
.github/PULL_REQUEST_TEMPLATE.md
vendored
6
.github/PULL_REQUEST_TEMPLATE.md
vendored
@ -2,11 +2,6 @@
|
||||
|
||||
<!--- Provide a general summary of your changes in the Title above -->
|
||||
|
||||
<!---
|
||||
Documentation on ZFS Buildbot options can be found at
|
||||
https://openzfs.github.io/openzfs-docs/Developer%20Resources/Buildbot%20Options.html
|
||||
-->
|
||||
|
||||
### Motivation and Context
|
||||
<!--- Why is this change required? What problem does it solve? -->
|
||||
<!--- If it fixes an open issue, please link to the issue here. -->
|
||||
@ -27,6 +22,7 @@ https://openzfs.github.io/openzfs-docs/Developer%20Resources/Buildbot%20Options.
|
||||
- [ ] New feature (non-breaking change which adds functionality)
|
||||
- [ ] Performance enhancement (non-breaking change which improves efficiency)
|
||||
- [ ] Code cleanup (non-breaking change which makes code smaller or more readable)
|
||||
- [ ] Quality assurance (non-breaking change which makes the code more robust against bugs)
|
||||
- [ ] Breaking change (fix or feature that would cause existing functionality to change)
|
||||
- [ ] Library ABI change (libzfs, libzfs\_core, libnvpair, libuutil and libzfsbootenv)
|
||||
- [ ] Documentation (a change to man pages or other documentation)
|
||||
|
1
.github/codeql-cpp.yml
vendored
1
.github/codeql-cpp.yml
vendored
@ -2,3 +2,4 @@ name: "Custom CodeQL Analysis"
|
||||
|
||||
queries:
|
||||
- uses: ./.github/codeql/custom-queries/cpp/deprecatedFunctionUsage.ql
|
||||
- uses: ./.github/codeql/custom-queries/cpp/dslDatasetHoldReleMismatch.ql
|
||||
|
34
.github/codeql/custom-queries/cpp/dslDatasetHoldReleMismatch.ql
vendored
Normal file
34
.github/codeql/custom-queries/cpp/dslDatasetHoldReleMismatch.ql
vendored
Normal file
@ -0,0 +1,34 @@
|
||||
/**
|
||||
* @name Detect mismatched dsl_dataset_hold/_rele pairs
|
||||
* @description Flags instances of issue #12014 where
|
||||
* - a dataset held with dsl_dataset_hold_obj() ends up in dsl_dataset_rele_flags(), or
|
||||
* - a dataset held with dsl_dataset_hold_obj_flags() ends up in dsl_dataset_rele().
|
||||
* @kind problem
|
||||
* @severity error
|
||||
* @tags correctness
|
||||
* @id cpp/dslDatasetHoldReleMismatch
|
||||
*/
|
||||
|
||||
import cpp
|
||||
|
||||
from Variable ds, Call holdCall, Call releCall, string message
|
||||
where
|
||||
ds.getType().toString() = "dsl_dataset_t *" and
|
||||
holdCall.getASuccessor*() = releCall and
|
||||
(
|
||||
(holdCall.getTarget().getName() = "dsl_dataset_hold_obj_flags" and
|
||||
holdCall.getArgument(4).(AddressOfExpr).getOperand().(VariableAccess).getTarget() = ds and
|
||||
releCall.getTarget().getName() = "dsl_dataset_rele" and
|
||||
releCall.getArgument(0).(VariableAccess).getTarget() = ds and
|
||||
message = "Held with dsl_dataset_hold_obj_flags but released with dsl_dataset_rele")
|
||||
or
|
||||
(holdCall.getTarget().getName() = "dsl_dataset_hold_obj" and
|
||||
holdCall.getArgument(3).(AddressOfExpr).getOperand().(VariableAccess).getTarget() = ds and
|
||||
releCall.getTarget().getName() = "dsl_dataset_rele_flags" and
|
||||
releCall.getArgument(0).(VariableAccess).getTarget() = ds and
|
||||
message = "Held with dsl_dataset_hold_obj but released with dsl_dataset_rele_flags")
|
||||
)
|
||||
select releCall,
|
||||
"Mismatched release: held with $@ but released with " + releCall.getTarget().getName() + " for dataset $@",
|
||||
holdCall, holdCall.getTarget().getName(),
|
||||
ds, ds.toString()
|
57
.github/workflows/build-dependencies.txt
vendored
57
.github/workflows/build-dependencies.txt
vendored
@ -1,57 +0,0 @@
|
||||
acl
|
||||
alien
|
||||
attr
|
||||
autoconf
|
||||
bc
|
||||
build-essential
|
||||
curl
|
||||
dbench
|
||||
debhelper-compat
|
||||
dh-python
|
||||
dkms
|
||||
fakeroot
|
||||
fio
|
||||
gdb
|
||||
gdebi
|
||||
git
|
||||
ksh
|
||||
lcov
|
||||
libacl1-dev
|
||||
libaio-dev
|
||||
libattr1-dev
|
||||
libblkid-dev
|
||||
libcurl4-openssl-dev
|
||||
libdevmapper-dev
|
||||
libelf-dev
|
||||
libffi-dev
|
||||
libmount-dev
|
||||
libpam0g-dev
|
||||
libselinux1-dev
|
||||
libssl-dev
|
||||
libtool
|
||||
libudev-dev
|
||||
linux-headers-generic
|
||||
lsscsi
|
||||
mdadm
|
||||
nfs-kernel-server
|
||||
pamtester
|
||||
parted
|
||||
po-debconf
|
||||
python3
|
||||
python3-all-dev
|
||||
python3-cffi
|
||||
python3-dev
|
||||
python3-packaging
|
||||
python3-pip
|
||||
python3-setuptools
|
||||
python3-sphinx
|
||||
rng-tools-debian
|
||||
rsync
|
||||
samba
|
||||
sysstat
|
||||
uuid-dev
|
||||
watchdog
|
||||
wget
|
||||
xfslibs-dev
|
||||
xz-utils
|
||||
zlib1g-dev
|
@ -1,5 +0,0 @@
|
||||
cppcheck
|
||||
devscripts
|
||||
mandoc
|
||||
pax-utils
|
||||
shellcheck
|
23
.github/workflows/checkstyle.yaml
vendored
23
.github/workflows/checkstyle.yaml
vendored
@ -4,6 +4,10 @@ on:
|
||||
push:
|
||||
pull_request:
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
checkstyle:
|
||||
runs-on: ubuntu-22.04
|
||||
@ -13,15 +17,11 @@ jobs:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
# https://github.com/orgs/community/discussions/47863
|
||||
sudo apt-mark hold grub-efi-amd64-signed
|
||||
sudo apt-get update --fix-missing
|
||||
sudo apt-get upgrade
|
||||
sudo xargs --arg-file=${{ github.workspace }}/.github/workflows/build-dependencies.txt apt-get install -qq
|
||||
sudo xargs --arg-file=${{ github.workspace }}/.github/workflows/checkstyle-dependencies.txt apt-get install -qq
|
||||
sudo python3 -m pip install --quiet flake8
|
||||
sudo apt-get clean
|
||||
|
||||
# for x in lxd core20 snapd; do sudo snap remove $x; done
|
||||
sudo apt-get purge -y snapd google-chrome-stable firefox
|
||||
ONLY_DEPS=1 .github/workflows/scripts/qemu-3-deps-vm.sh ubuntu22
|
||||
sudo apt-get install -y cppcheck devscripts mandoc pax-utils shellcheck
|
||||
sudo python -m pipx install --quiet flake8
|
||||
# confirm that the tools are installed
|
||||
# the build system doesn't fail when they are not
|
||||
checkbashisms --version
|
||||
@ -31,8 +31,13 @@ jobs:
|
||||
shellcheck --version
|
||||
- name: Prepare
|
||||
run: |
|
||||
sed -i '/DEBUG_CFLAGS="-Werror"/s/^/#/' config/zfs-build.m4
|
||||
./autogen.sh
|
||||
- name: Configure
|
||||
run: |
|
||||
./configure
|
||||
- name: Make
|
||||
run: |
|
||||
make -j$(nproc) --no-print-directory --silent
|
||||
- name: Checkstyle
|
||||
run: |
|
||||
|
12
.github/workflows/codeql.yml
vendored
12
.github/workflows/codeql.yml
vendored
@ -4,10 +4,14 @@ on:
|
||||
push:
|
||||
pull_request:
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
analyze:
|
||||
name: Analyze
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-22.04
|
||||
permissions:
|
||||
actions: read
|
||||
contents: read
|
||||
@ -27,15 +31,15 @@ jobs:
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v2
|
||||
uses: github/codeql-action/init@v3
|
||||
with:
|
||||
config-file: .github/codeql-${{ matrix.language }}.yml
|
||||
languages: ${{ matrix.language }}
|
||||
|
||||
- name: Autobuild
|
||||
uses: github/codeql-action/autobuild@v2
|
||||
uses: github/codeql-action/autobuild@v3
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@v2
|
||||
uses: github/codeql-action/analyze@v3
|
||||
with:
|
||||
category: "/language:${{matrix.language}}"
|
||||
|
49
.github/workflows/labels.yml
vendored
Normal file
49
.github/workflows/labels.yml
vendored
Normal file
@ -0,0 +1,49 @@
|
||||
name: labels
|
||||
|
||||
on:
|
||||
pull_request_target:
|
||||
types: [ opened, synchronize, reopened, converted_to_draft, ready_for_review ]
|
||||
|
||||
permissions:
|
||||
pull-requests: write
|
||||
|
||||
jobs:
|
||||
open:
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ github.event.action == 'opened' && github.event.pull_request.draft }}
|
||||
steps:
|
||||
- env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
ISSUE: ${{ github.event.pull_request.html_url }}
|
||||
run: |
|
||||
gh pr edit $ISSUE --add-label "Status: Work in Progress"
|
||||
|
||||
push:
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ github.event.action == 'synchronize' || github.event.action == 'reopened' }}
|
||||
steps:
|
||||
- env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
ISSUE: ${{ github.event.pull_request.html_url }}
|
||||
run: |
|
||||
gh pr edit $ISSUE --remove-label "Status: Accepted,Status: Inactive,Status: Revision Needed,Status: Stale"
|
||||
|
||||
draft:
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ github.event.action == 'converted_to_draft' }}
|
||||
steps:
|
||||
- env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
ISSUE: ${{ github.event.pull_request.html_url }}
|
||||
run: |
|
||||
gh pr edit $ISSUE --remove-label "Status: Accepted,Status: Code Review Needed,Status: Inactive,Status: Revision Needed,Status: Stale" --add-label "Status: Work in Progress"
|
||||
|
||||
rfr:
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ github.event.action == 'ready_for_review' }}
|
||||
steps:
|
||||
- env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
ISSUE: ${{ github.event.pull_request.html_url }}
|
||||
run: |
|
||||
gh pr edit $ISSUE --remove-label "Status: Accepted,Status: Inactive,Status: Revision Needed,Status: Stale,Status: Work in Progress" --add-label "Status: Code Review Needed"
|
14
.github/workflows/scripts/README.md
vendored
Normal file
14
.github/workflows/scripts/README.md
vendored
Normal file
@ -0,0 +1,14 @@
|
||||
|
||||
Workflow for each operating system:
|
||||
- install qemu on the github runner
|
||||
- download current cloud image of operating system
|
||||
- start and init that image via cloud-init
|
||||
- install dependencies and poweroff system
|
||||
- start system and build openzfs and then poweroff again
|
||||
- clone build system and start 2 instances of it
|
||||
- run functional testings and complete in around 3h
|
||||
- when tests are done, do some logfile preparing
|
||||
- show detailed results for each system
|
||||
- in the end, generate the job summary
|
||||
|
||||
/TR 14.09.2024
|
108
.github/workflows/scripts/generate-ci-type.py
vendored
Executable file
108
.github/workflows/scripts/generate-ci-type.py
vendored
Executable file
@ -0,0 +1,108 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
"""
|
||||
Determine the CI type based on the change list and commit message.
|
||||
|
||||
Prints "quick" if (explicity required by user):
|
||||
- the *last* commit message contains 'ZFS-CI-Type: quick'
|
||||
or if (heuristics):
|
||||
- the files changed are not in the list of specified directories, and
|
||||
- all commit messages do not contain 'ZFS-CI-Type: full'
|
||||
|
||||
Otherwise prints "full".
|
||||
"""
|
||||
|
||||
import sys
|
||||
import subprocess
|
||||
import re
|
||||
|
||||
"""
|
||||
Patterns of files that are not considered to trigger full CI.
|
||||
Note: not using pathlib.Path.match() because it does not support '**'
|
||||
"""
|
||||
FULL_RUN_IGNORE_REGEX = list(map(re.compile, [
|
||||
r'.*\.md',
|
||||
r'.*\.gitignore'
|
||||
]))
|
||||
|
||||
"""
|
||||
Patterns of files that are considered to trigger full CI.
|
||||
"""
|
||||
FULL_RUN_REGEX = list(map(re.compile, [
|
||||
r'\.github/workflows/scripts/.*',
|
||||
r'cmd.*',
|
||||
r'configs/.*',
|
||||
r'META',
|
||||
r'.*\.am',
|
||||
r'.*\.m4',
|
||||
r'autogen\.sh',
|
||||
r'configure\.ac',
|
||||
r'copy-builtin',
|
||||
r'contrib',
|
||||
r'etc',
|
||||
r'include',
|
||||
r'lib/.*',
|
||||
r'module/.*',
|
||||
r'scripts/.*',
|
||||
r'tests/.*',
|
||||
r'udev/.*'
|
||||
]))
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
prog = sys.argv[0]
|
||||
|
||||
if len(sys.argv) != 3:
|
||||
print(f'Usage: {prog} <head_ref> <base_ref>')
|
||||
sys.exit(1)
|
||||
|
||||
head, base = sys.argv[1:3]
|
||||
|
||||
def output_type(type, reason):
|
||||
print(f'{prog}: will run {type} CI: {reason}', file=sys.stderr)
|
||||
print(type)
|
||||
sys.exit(0)
|
||||
|
||||
# check last (HEAD) commit message
|
||||
last_commit_message_raw = subprocess.run([
|
||||
'git', 'show', '-s', '--format=%B', head
|
||||
], check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
|
||||
for line in last_commit_message_raw.stdout.decode().splitlines():
|
||||
if line.strip().lower() == 'zfs-ci-type: quick':
|
||||
output_type('quick', f'explicitly requested by HEAD commit {head}')
|
||||
|
||||
# check all commit messages
|
||||
all_commit_message_raw = subprocess.run([
|
||||
'git', 'show', '-s',
|
||||
'--format=ZFS-CI-Commit: %H%n%B', f'{head}...{base}'
|
||||
], check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
all_commit_message = all_commit_message_raw.stdout.decode().splitlines()
|
||||
|
||||
commit_ref = head
|
||||
for line in all_commit_message:
|
||||
if line.startswith('ZFS-CI-Commit:'):
|
||||
commit_ref = line.lstrip('ZFS-CI-Commit:').rstrip()
|
||||
if line.strip().lower() == 'zfs-ci-type: full':
|
||||
output_type('full', f'explicitly requested by commit {commit_ref}')
|
||||
|
||||
# check changed files
|
||||
changed_files_raw = subprocess.run([
|
||||
'git', 'diff', '--name-only', head, base
|
||||
], check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
changed_files = changed_files_raw.stdout.decode().splitlines()
|
||||
|
||||
for f in changed_files:
|
||||
for r in FULL_RUN_IGNORE_REGEX:
|
||||
if r.match(f):
|
||||
break
|
||||
else:
|
||||
for r in FULL_RUN_REGEX:
|
||||
if r.match(f):
|
||||
output_type(
|
||||
'full',
|
||||
f'changed file "{f}" matches pattern "{r.pattern}"'
|
||||
)
|
||||
|
||||
# catch-all
|
||||
output_type('quick', 'no changed file matches full CI patterns')
|
119
.github/workflows/scripts/generate-summary.sh
vendored
119
.github/workflows/scripts/generate-summary.sh
vendored
@ -1,119 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# for runtime reasons we split functional testings into N parts
|
||||
# - use a define to check for missing tarfiles
|
||||
FUNCTIONAL_PARTS="4"
|
||||
|
||||
ZTS_REPORT="tests/test-runner/bin/zts-report.py"
|
||||
chmod +x $ZTS_REPORT
|
||||
|
||||
function output() {
|
||||
echo -e $* >> Summary.md
|
||||
}
|
||||
|
||||
function error() {
|
||||
output ":bangbang: $* :bangbang:\n"
|
||||
}
|
||||
|
||||
# this function generates the real summary
|
||||
# - expects a logfile "log" in current directory
|
||||
function generate() {
|
||||
# we issued some error already
|
||||
test ! -s log && return
|
||||
|
||||
# for overview and zts-report
|
||||
cat log | grep '^Test' > list
|
||||
|
||||
# error details
|
||||
awk '/\[FAIL\]|\[KILLED\]/{ show=1; print; next; }
|
||||
/\[SKIP\]|\[PASS\]/{ show=0; } show' log > err
|
||||
|
||||
# summary of errors
|
||||
if [ -s err ]; then
|
||||
output "<pre>"
|
||||
$ZTS_REPORT --no-maybes ./list >> Summary.md
|
||||
output "</pre>"
|
||||
|
||||
# generate seperate error logfile
|
||||
ERRLOGS=$((ERRLOGS+1))
|
||||
errfile="err-$ERRLOGS.md"
|
||||
echo -e "\n## $headline (debugging)\n" >> $errfile
|
||||
echo "<details><summary>Error Listing - with dmesg and dbgmsg</summary><pre>" >> $errfile
|
||||
dd if=err bs=999k count=1 >> $errfile
|
||||
echo "</pre></details>" >> $errfile
|
||||
else
|
||||
output "All tests passed :thumbsup:"
|
||||
fi
|
||||
|
||||
output "<details><summary>Full Listing</summary><pre>"
|
||||
cat list >> Summary.md
|
||||
output "</pre></details>"
|
||||
|
||||
# remove tmp files
|
||||
rm -f err list log
|
||||
}
|
||||
|
||||
# check tarfiles and untar
|
||||
function check_tarfile() {
|
||||
if [ -f "$1" ]; then
|
||||
tar xf "$1" || error "Tarfile $1 returns some error"
|
||||
else
|
||||
error "Tarfile $1 not found"
|
||||
fi
|
||||
}
|
||||
|
||||
# check logfile and concatenate test results
|
||||
function check_logfile() {
|
||||
if [ -f "$1" ]; then
|
||||
cat "$1" >> log
|
||||
else
|
||||
error "Logfile $1 not found"
|
||||
fi
|
||||
}
|
||||
|
||||
# sanity
|
||||
function summarize_s() {
|
||||
headline="$1"
|
||||
output "\n## $headline\n"
|
||||
rm -rf testfiles
|
||||
check_tarfile "$2/sanity.tar"
|
||||
check_logfile "testfiles/log"
|
||||
generate
|
||||
}
|
||||
|
||||
# functional
|
||||
function summarize_f() {
|
||||
headline="$1"
|
||||
output "\n## $headline\n"
|
||||
rm -rf testfiles
|
||||
for i in $(seq 1 $FUNCTIONAL_PARTS); do
|
||||
tarfile="$2-part$i/part$i.tar"
|
||||
check_tarfile "$tarfile"
|
||||
check_logfile "testfiles/log"
|
||||
done
|
||||
generate
|
||||
}
|
||||
|
||||
# https://docs.github.com/en/enterprise-server@3.6/actions/using-workflows/workflow-commands-for-github-actions#step-isolation-and-limits
|
||||
# Job summaries are isolated between steps and each step is restricted to a maximum size of 1MiB.
|
||||
# [ ] can not show all error findings here
|
||||
# [x] split files into smaller ones and create additional steps
|
||||
|
||||
ERRLOGS=0
|
||||
if [ ! -f Summary/Summary.md ]; then
|
||||
# first call, we do the default summary (~500k)
|
||||
echo -n > Summary.md
|
||||
summarize_s "Sanity Tests Ubuntu 20.04" Logs-20.04-sanity
|
||||
summarize_s "Sanity Tests Ubuntu 22.04" Logs-22.04-sanity
|
||||
summarize_f "Functional Tests Ubuntu 20.04" Logs-20.04-functional
|
||||
summarize_f "Functional Tests Ubuntu 22.04" Logs-22.04-functional
|
||||
|
||||
cat Summary.md >> $GITHUB_STEP_SUMMARY
|
||||
mkdir -p Summary
|
||||
mv *.md Summary
|
||||
else
|
||||
# here we get, when errors where returned in first call
|
||||
test -f Summary/err-$1.md && cat Summary/err-$1.md >> $GITHUB_STEP_SUMMARY
|
||||
fi
|
||||
|
||||
exit 0
|
109
.github/workflows/scripts/merge_summary.awk
vendored
Executable file
109
.github/workflows/scripts/merge_summary.awk
vendored
Executable file
@ -0,0 +1,109 @@
|
||||
#!/bin/awk -f
|
||||
#
|
||||
# Merge multiple ZTS tests results summaries into a single summary. This is
|
||||
# needed when you're running different parts of ZTS on different tests
|
||||
# runners or VMs.
|
||||
#
|
||||
# Usage:
|
||||
#
|
||||
# ./merge_summary.awk summary1.txt [summary2.txt] [summary3.txt] ...
|
||||
#
|
||||
# or:
|
||||
#
|
||||
# cat summary*.txt | ./merge_summary.awk
|
||||
#
|
||||
BEGIN {
|
||||
i=-1
|
||||
pass=0
|
||||
fail=0
|
||||
skip=0
|
||||
state=""
|
||||
cl=0
|
||||
el=0
|
||||
upl=0
|
||||
ul=0
|
||||
|
||||
# Total seconds of tests runtime
|
||||
total=0;
|
||||
}
|
||||
|
||||
# Skip empty lines
|
||||
/^\s*$/{next}
|
||||
|
||||
# Skip Configuration and Test lines
|
||||
/^Test:/{state=""; next}
|
||||
/Configuration/{state="";next}
|
||||
|
||||
# When we see "test-runner.py" stop saving config lines, and
|
||||
# save test runner lines
|
||||
/test-runner.py/{state="testrunner"; runner=runner$0"\n"; next}
|
||||
|
||||
# We need to differentiate the PASS counts from test result lines that start
|
||||
# with PASS, like:
|
||||
#
|
||||
# PASS mv_files/setup
|
||||
#
|
||||
# Use state="pass_count" to differentiate
|
||||
#
|
||||
/Results Summary/{state="pass_count"; next}
|
||||
/PASS/{ if (state=="pass_count") {pass += $2}}
|
||||
/FAIL/{ if (state=="pass_count") {fail += $2}}
|
||||
/SKIP/{ if (state=="pass_count") {skip += $2}}
|
||||
/Running Time/{
|
||||
state="";
|
||||
running[i]=$3;
|
||||
split($3, arr, ":")
|
||||
total += arr[1] * 60 * 60;
|
||||
total += arr[2] * 60;
|
||||
total += arr[3]
|
||||
next;
|
||||
}
|
||||
|
||||
/Tests with results other than PASS that are expected/{state="expected_lines"; next}
|
||||
/Tests with result of PASS that are unexpected/{state="unexpected_pass_lines"; next}
|
||||
/Tests with results other than PASS that are unexpected/{state="unexpected_lines"; next}
|
||||
{
|
||||
if (state == "expected_lines") {
|
||||
expected_lines[el] = $0
|
||||
el++
|
||||
}
|
||||
|
||||
if (state == "unexpected_pass_lines") {
|
||||
unexpected_pass_lines[upl] = $0
|
||||
upl++
|
||||
}
|
||||
if (state == "unexpected_lines") {
|
||||
unexpected_lines[ul] = $0
|
||||
ul++
|
||||
}
|
||||
}
|
||||
|
||||
# Reproduce summary
|
||||
END {
|
||||
print runner;
|
||||
print "\nResults Summary"
|
||||
print "PASS\t"pass
|
||||
print "FAIL\t"fail
|
||||
print "SKIP\t"skip
|
||||
print ""
|
||||
print "Running Time:\t"strftime("%T", total, 1)
|
||||
if (pass+fail+skip > 0) {
|
||||
percent_passed=(pass/(pass+fail+skip) * 100)
|
||||
}
|
||||
printf "Percent passed:\t%3.2f%", percent_passed
|
||||
|
||||
print "\n\nTests with results other than PASS that are expected:"
|
||||
asort(expected_lines, sorted)
|
||||
for (j in sorted)
|
||||
print sorted[j]
|
||||
|
||||
print "\n\nTests with result of PASS that are unexpected:"
|
||||
asort(unexpected_pass_lines, sorted)
|
||||
for (j in sorted)
|
||||
print sorted[j]
|
||||
|
||||
print "\n\nTests with results other than PASS that are unexpected:"
|
||||
asort(unexpected_lines, sorted)
|
||||
for (j in sorted)
|
||||
print sorted[j]
|
||||
}
|
77
.github/workflows/scripts/qemu-1-setup.sh
vendored
Executable file
77
.github/workflows/scripts/qemu-1-setup.sh
vendored
Executable file
@ -0,0 +1,77 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
######################################################################
|
||||
# 1) setup qemu instance on action runner
|
||||
######################################################################
|
||||
|
||||
set -eu
|
||||
|
||||
# We've been seeing this script take over 15min to run. This may or
|
||||
# may not be normal. Just to get a little more insight, print out
|
||||
# a message to stdout with the top running process, and do this every
|
||||
# 30 seconds. We can delete this watchdog later once we get a better
|
||||
# handle on what the timeout value should be.
|
||||
(while [ 1 ] ; do sleep 30 && echo "[watchdog: $(ps -eo cmd --sort=-pcpu | head -n 2 | tail -n 1)}')]"; done) &
|
||||
|
||||
# install needed packages
|
||||
export DEBIAN_FRONTEND="noninteractive"
|
||||
sudo apt-get -y update
|
||||
sudo apt-get install -y axel cloud-image-utils daemonize guestfs-tools \
|
||||
virt-manager linux-modules-extra-$(uname -r) zfsutils-linux
|
||||
|
||||
# generate ssh keys
|
||||
rm -f ~/.ssh/id_ed25519
|
||||
ssh-keygen -t ed25519 -f ~/.ssh/id_ed25519 -q -N ""
|
||||
|
||||
# not needed
|
||||
sudo systemctl stop docker.socket
|
||||
sudo systemctl stop multipathd.socket
|
||||
|
||||
# remove default swapfile and /mnt
|
||||
sudo swapoff -a
|
||||
sudo umount -l /mnt
|
||||
DISK="/dev/disk/cloud/azure_resource-part1"
|
||||
sudo sed -e "s|^$DISK.*||g" -i /etc/fstab
|
||||
sudo wipefs -aq $DISK
|
||||
sudo systemctl daemon-reload
|
||||
|
||||
sudo modprobe loop
|
||||
sudo modprobe zfs
|
||||
|
||||
# partition the disk as needed
|
||||
DISK="/dev/disk/cloud/azure_resource"
|
||||
sudo sgdisk --zap-all $DISK
|
||||
sudo sgdisk -p \
|
||||
-n 1:0:+16G -c 1:"swap" \
|
||||
-n 2:0:0 -c 2:"tests" \
|
||||
$DISK
|
||||
sync
|
||||
sleep 1
|
||||
|
||||
# swap with same size as RAM (16GiB)
|
||||
sudo mkswap $DISK-part1
|
||||
sudo swapon $DISK-part1
|
||||
|
||||
# JBOD 2xdisk for OpenZFS storage (test vm's)
|
||||
SSD1="$DISK-part2"
|
||||
sudo fallocate -l 12G /test.ssd2
|
||||
SSD2=$(sudo losetup -b 4096 -f /test.ssd2 --show)
|
||||
|
||||
# adjust zfs module parameter and create pool
|
||||
exec 1>/dev/null
|
||||
ARC_MIN=$((1024*1024*256))
|
||||
ARC_MAX=$((1024*1024*512))
|
||||
echo $ARC_MIN | sudo tee /sys/module/zfs/parameters/zfs_arc_min
|
||||
echo $ARC_MAX | sudo tee /sys/module/zfs/parameters/zfs_arc_max
|
||||
echo 1 | sudo tee /sys/module/zfs/parameters/zvol_use_blk_mq
|
||||
sudo zpool create -f -o ashift=12 zpool $SSD1 $SSD2 -O relatime=off \
|
||||
-O atime=off -O xattr=sa -O compression=lz4 -O sync=disabled \
|
||||
-O redundant_metadata=none -O mountpoint=/mnt/tests
|
||||
|
||||
# no need for some scheduler
|
||||
for i in /sys/block/s*/queue/scheduler; do
|
||||
echo "none" | sudo tee $i
|
||||
done
|
||||
|
||||
# Kill off our watchdog
|
||||
kill $(jobs -p)
|
303
.github/workflows/scripts/qemu-2-start.sh
vendored
Executable file
303
.github/workflows/scripts/qemu-2-start.sh
vendored
Executable file
@ -0,0 +1,303 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
######################################################################
|
||||
# 2) start qemu with some operating system, init via cloud-init
|
||||
######################################################################
|
||||
|
||||
set -eu
|
||||
|
||||
# short name used in zfs-qemu.yml
|
||||
OS="$1"
|
||||
|
||||
# OS variant (virt-install --os-variant list)
|
||||
OSv=$OS
|
||||
|
||||
# FreeBSD urls's
|
||||
FREEBSD_REL="https://download.freebsd.org/releases/CI-IMAGES"
|
||||
FREEBSD_SNAP="https://download.freebsd.org/snapshots/CI-IMAGES"
|
||||
URLxz=""
|
||||
|
||||
# Ubuntu mirrors
|
||||
UBMIRROR="https://cloud-images.ubuntu.com"
|
||||
#UBMIRROR="https://mirrors.cloud.tencent.com/ubuntu-cloud-images"
|
||||
#UBMIRROR="https://mirror.citrahost.com/ubuntu-cloud-images"
|
||||
|
||||
# default nic model for vm's
|
||||
NIC="virtio"
|
||||
|
||||
# additional options for virt-install
|
||||
OPTS[0]=""
|
||||
OPTS[1]=""
|
||||
|
||||
case "$OS" in
|
||||
almalinux8)
|
||||
OSNAME="AlmaLinux 8"
|
||||
URL="https://repo.almalinux.org/almalinux/8/cloud/x86_64/images/AlmaLinux-8-GenericCloud-latest.x86_64.qcow2"
|
||||
;;
|
||||
almalinux9)
|
||||
OSNAME="AlmaLinux 9"
|
||||
URL="https://repo.almalinux.org/almalinux/9/cloud/x86_64/images/AlmaLinux-9-GenericCloud-latest.x86_64.qcow2"
|
||||
;;
|
||||
almalinux10)
|
||||
OSNAME="AlmaLinux 10"
|
||||
OSv="almalinux9"
|
||||
URL="https://repo.almalinux.org/almalinux/10/cloud/x86_64/images/AlmaLinux-10-GenericCloud-latest.x86_64.qcow2"
|
||||
;;
|
||||
archlinux)
|
||||
OSNAME="Archlinux"
|
||||
URL="https://geo.mirror.pkgbuild.com/images/latest/Arch-Linux-x86_64-cloudimg.qcow2"
|
||||
;;
|
||||
centos-stream10)
|
||||
OSNAME="CentOS Stream 10"
|
||||
# TODO: #16903 Overwrite OSv to stream9 for virt-install until it's added to osinfo
|
||||
OSv="centos-stream9"
|
||||
URL="https://cloud.centos.org/centos/10-stream/x86_64/images/CentOS-Stream-GenericCloud-10-latest.x86_64.qcow2"
|
||||
;;
|
||||
centos-stream9)
|
||||
OSNAME="CentOS Stream 9"
|
||||
URL="https://cloud.centos.org/centos/9-stream/x86_64/images/CentOS-Stream-GenericCloud-9-latest.x86_64.qcow2"
|
||||
;;
|
||||
debian11)
|
||||
OSNAME="Debian 11"
|
||||
URL="https://cloud.debian.org/images/cloud/bullseye/latest/debian-11-generic-amd64.qcow2"
|
||||
;;
|
||||
debian12)
|
||||
OSNAME="Debian 12"
|
||||
URL="https://cloud.debian.org/images/cloud/bookworm/latest/debian-12-generic-amd64.qcow2"
|
||||
;;
|
||||
debian13)
|
||||
OSNAME="Debian 13"
|
||||
# TODO: Overwrite OSv to debian13 for virt-install until it's added to osinfo
|
||||
OSv="debian12"
|
||||
URL="https://cloud.debian.org/images/cloud/trixie/latest/debian-13-generic-amd64.qcow2"
|
||||
OPTS[0]="--boot"
|
||||
OPTS[1]="uefi=on"
|
||||
;;
|
||||
fedora41)
|
||||
OSNAME="Fedora 41"
|
||||
OSv="fedora-unknown"
|
||||
URL="https://download.fedoraproject.org/pub/fedora/linux/releases/41/Cloud/x86_64/images/Fedora-Cloud-Base-Generic-41-1.4.x86_64.qcow2"
|
||||
;;
|
||||
fedora42)
|
||||
OSNAME="Fedora 42"
|
||||
OSv="fedora-unknown"
|
||||
URL="https://download.fedoraproject.org/pub/fedora/linux/releases/42/Cloud/x86_64/images/Fedora-Cloud-Base-Generic-42-1.1.x86_64.qcow2"
|
||||
;;
|
||||
freebsd13-5r)
|
||||
FreeBSD="13.5-RELEASE"
|
||||
OSNAME="FreeBSD $FreeBSD"
|
||||
OSv="freebsd13.0"
|
||||
URLxz="$FREEBSD_REL/$FreeBSD/amd64/Latest/FreeBSD-$FreeBSD-amd64-BASIC-CI.raw.xz"
|
||||
KSRC="$FREEBSD_REL/../amd64/$FreeBSD/src.txz"
|
||||
NIC="rtl8139"
|
||||
;;
|
||||
freebsd14-2r)
|
||||
FreeBSD="14.2-RELEASE"
|
||||
OSNAME="FreeBSD $FreeBSD"
|
||||
OSv="freebsd14.0"
|
||||
KSRC="$FREEBSD_REL/../amd64/$FreeBSD/src.txz"
|
||||
URLxz="$FREEBSD_REL/$FreeBSD/amd64/Latest/FreeBSD-$FreeBSD-amd64-BASIC-CI.raw.xz"
|
||||
;;
|
||||
freebsd14-3r)
|
||||
FreeBSD="14.3-RELEASE"
|
||||
OSNAME="FreeBSD $FreeBSD"
|
||||
OSv="freebsd14.0"
|
||||
URLxz="$FREEBSD_REL/$FreeBSD/amd64/Latest/FreeBSD-$FreeBSD-amd64-BASIC-CI.raw.xz"
|
||||
KSRC="$FREEBSD_REL/../amd64/$FreeBSD/src.txz"
|
||||
;;
|
||||
freebsd13-5s)
|
||||
FreeBSD="13.5-STABLE"
|
||||
OSNAME="FreeBSD $FreeBSD"
|
||||
OSv="freebsd13.0"
|
||||
URLxz="$FREEBSD_SNAP/$FreeBSD/amd64/Latest/FreeBSD-$FreeBSD-amd64-BASIC-CI.raw.xz"
|
||||
KSRC="$FREEBSD_SNAP/../amd64/$FreeBSD/src.txz"
|
||||
NIC="rtl8139"
|
||||
;;
|
||||
freebsd14-3s)
|
||||
FreeBSD="14.3-STABLE"
|
||||
OSNAME="FreeBSD $FreeBSD"
|
||||
OSv="freebsd14.0"
|
||||
URLxz="$FREEBSD_SNAP/$FreeBSD/amd64/Latest/FreeBSD-$FreeBSD-amd64-BASIC-CI-ufs.raw.xz"
|
||||
KSRC="$FREEBSD_SNAP/../amd64/$FreeBSD/src.txz"
|
||||
;;
|
||||
freebsd15-0c)
|
||||
FreeBSD="15.0-ALPHA3"
|
||||
OSNAME="FreeBSD $FreeBSD"
|
||||
OSv="freebsd14.0"
|
||||
URLxz="$FREEBSD_SNAP/$FreeBSD/amd64/Latest/FreeBSD-$FreeBSD-amd64-BASIC-CI-ufs.raw.xz"
|
||||
KSRC="$FREEBSD_SNAP/../amd64/$FreeBSD/src.txz"
|
||||
;;
|
||||
tumbleweed)
|
||||
OSNAME="openSUSE Tumbleweed"
|
||||
OSv="opensusetumbleweed"
|
||||
MIRROR="http://opensuse-mirror-gce-us.susecloud.net"
|
||||
URL="$MIRROR/tumbleweed/appliances/openSUSE-MicroOS.x86_64-OpenStack-Cloud.qcow2"
|
||||
;;
|
||||
ubuntu22)
|
||||
OSNAME="Ubuntu 22.04"
|
||||
OSv="ubuntu22.04"
|
||||
URL="$UBMIRROR/jammy/current/jammy-server-cloudimg-amd64.img"
|
||||
;;
|
||||
ubuntu24)
|
||||
OSNAME="Ubuntu 24.04"
|
||||
OSv="ubuntu24.04"
|
||||
URL="$UBMIRROR/noble/current/noble-server-cloudimg-amd64.img"
|
||||
;;
|
||||
*)
|
||||
echo "Wrong value for OS variable!"
|
||||
exit 111
|
||||
;;
|
||||
esac
|
||||
|
||||
# environment file
|
||||
ENV="/var/tmp/env.txt"
|
||||
echo "ENV=$ENV" >> $ENV
|
||||
|
||||
# result path
|
||||
echo 'RESPATH="/var/tmp/test_results"' >> $ENV
|
||||
|
||||
# FreeBSD 13 has problems with: e1000 and virtio
|
||||
echo "NIC=$NIC" >> $ENV
|
||||
|
||||
# freebsd15 -> used in zfs-qemu.yml
|
||||
echo "OS=$OS" >> $ENV
|
||||
|
||||
# freebsd14.0 -> used for virt-install
|
||||
echo "OSv=\"$OSv\"" >> $ENV
|
||||
|
||||
# FreeBSD 15 (Current) -> used for summary
|
||||
echo "OSNAME=\"$OSNAME\"" >> $ENV
|
||||
|
||||
# default vm count for testings
|
||||
VMs=2
|
||||
echo "VMs=\"$VMs\"" >> $ENV
|
||||
|
||||
# default cpu count for testing vm's
|
||||
CPU=2
|
||||
echo "CPU=\"$CPU\"" >> $ENV
|
||||
|
||||
sudo mkdir -p "/mnt/tests"
|
||||
sudo chown -R $(whoami) /mnt/tests
|
||||
|
||||
DISK="/dev/zvol/zpool/openzfs"
|
||||
sudo zfs create -ps -b 64k -V 80g zpool/openzfs
|
||||
while true; do test -b $DISK && break; sleep 1; done
|
||||
|
||||
# we are downloading via axel, curl and wget are mostly slower and
|
||||
# require more return value checking
|
||||
IMG="/mnt/tests/cloud-image"
|
||||
if [ ! -z "$URLxz" ]; then
|
||||
echo "Loading $URLxz ..."
|
||||
time axel -q -o "$IMG" "$URLxz"
|
||||
echo "Loading $KSRC ..."
|
||||
time axel -q -o ~/src.txz $KSRC
|
||||
else
|
||||
echo "Loading $URL ..."
|
||||
time axel -q -o "$IMG" "$URL"
|
||||
fi
|
||||
|
||||
echo "Importing VM image to zvol..."
|
||||
if [ ! -z "$URLxz" ]; then
|
||||
xzcat -T0 $IMG | sudo dd of=$DISK bs=4M
|
||||
else
|
||||
sudo qemu-img dd -f qcow2 -O raw if=$IMG of=$DISK bs=4M
|
||||
fi
|
||||
rm -f $IMG
|
||||
|
||||
PUBKEY=$(cat ~/.ssh/id_ed25519.pub)
|
||||
if [ ${OS:0:7} != "freebsd" ]; then
|
||||
cat <<EOF > /tmp/user-data
|
||||
#cloud-config
|
||||
|
||||
hostname: $OS
|
||||
|
||||
users:
|
||||
- name: root
|
||||
shell: $BASH
|
||||
- name: zfs
|
||||
sudo: ALL=(ALL) NOPASSWD:ALL
|
||||
shell: $BASH
|
||||
ssh_authorized_keys:
|
||||
- $PUBKEY
|
||||
|
||||
growpart:
|
||||
mode: auto
|
||||
devices: ['/']
|
||||
ignore_growroot_disabled: false
|
||||
EOF
|
||||
else
|
||||
cat <<EOF > /tmp/user-data
|
||||
#cloud-config
|
||||
|
||||
hostname: $OS
|
||||
|
||||
# minimized config without sudo for nuageinit of FreeBSD
|
||||
growpart:
|
||||
mode: auto
|
||||
devices: ['/']
|
||||
ignore_growroot_disabled: false
|
||||
EOF
|
||||
fi
|
||||
|
||||
sudo virsh net-update default add ip-dhcp-host \
|
||||
"<host mac='52:54:00:83:79:00' ip='192.168.122.10'/>" --live --config
|
||||
|
||||
sudo virt-install \
|
||||
--os-variant $OSv \
|
||||
--name "openzfs" \
|
||||
--cpu host-passthrough \
|
||||
--virt-type=kvm --hvm \
|
||||
--vcpus=4,sockets=1 \
|
||||
--memory $((1024*12)) \
|
||||
--memballoon model=virtio \
|
||||
--graphics none \
|
||||
--network bridge=virbr0,model=$NIC,mac='52:54:00:83:79:00' \
|
||||
--cloud-init user-data=/tmp/user-data \
|
||||
--disk $DISK,bus=virtio,cache=none,format=raw,driver.discard=unmap \
|
||||
--import --noautoconsole ${OPTS[0]} ${OPTS[1]} >/dev/null
|
||||
|
||||
# Give the VMs hostnames so we don't have to refer to them with
|
||||
# hardcoded IP addresses.
|
||||
#
|
||||
# vm0: Initial VM we install dependencies and build ZFS on.
|
||||
# vm1..2 Testing VMs
|
||||
for ((i=0; i<=VMs; i++)); do
|
||||
echo "192.168.122.1$i vm$i" | sudo tee -a /etc/hosts
|
||||
done
|
||||
|
||||
# in case the directory isn't there already
|
||||
mkdir -p $HOME/.ssh
|
||||
|
||||
cat <<EOF >> $HOME/.ssh/config
|
||||
# no questions please
|
||||
StrictHostKeyChecking no
|
||||
|
||||
# small timeout, used in while loops later
|
||||
ConnectTimeout 1
|
||||
EOF
|
||||
|
||||
if [ ${OS:0:7} != "freebsd" ]; then
|
||||
# enable KSM on Linux
|
||||
sudo virsh dommemstat --domain "openzfs" --period 5
|
||||
sudo virsh node-memory-tune 100 50 1
|
||||
echo 1 | sudo tee /sys/kernel/mm/ksm/run > /dev/null
|
||||
else
|
||||
# on FreeBSD we need some more init stuff, because of nuageinit
|
||||
BASH="/usr/local/bin/bash"
|
||||
while pidof /usr/bin/qemu-system-x86_64 >/dev/null; do
|
||||
ssh 2>/dev/null root@vm0 "uname -a" && break
|
||||
done
|
||||
ssh root@vm0 "pkg install -y bash ca_root_nss git qemu-guest-agent python3 py311-cloud-init"
|
||||
ssh root@vm0 "chsh -s $BASH root"
|
||||
ssh root@vm0 'sysrc qemu_guest_agent_enable="YES"'
|
||||
ssh root@vm0 'sysrc cloudinit_enable="YES"'
|
||||
ssh root@vm0 "pw add user zfs -w no -s $BASH"
|
||||
ssh root@vm0 'mkdir -p ~zfs/.ssh'
|
||||
ssh root@vm0 'echo "zfs ALL=(ALL:ALL) NOPASSWD: ALL" >> /usr/local/etc/sudoers'
|
||||
ssh root@vm0 'echo "PubkeyAuthentication yes" >> /etc/ssh/sshd_config'
|
||||
scp ~/.ssh/id_ed25519.pub "root@vm0:~zfs/.ssh/authorized_keys"
|
||||
ssh root@vm0 'chown -R zfs ~zfs'
|
||||
ssh root@vm0 'service sshd restart'
|
||||
scp ~/src.txz "root@vm0:/tmp/src.txz"
|
||||
ssh root@vm0 'tar -C / -zxf /tmp/src.txz'
|
||||
fi
|
262
.github/workflows/scripts/qemu-3-deps-vm.sh
vendored
Executable file
262
.github/workflows/scripts/qemu-3-deps-vm.sh
vendored
Executable file
@ -0,0 +1,262 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
######################################################################
|
||||
# 3) install dependencies for compiling and loading
|
||||
#
|
||||
# $1: OS name (like 'fedora41')
|
||||
# $2: (optional) Experimental Fedora kernel version, like "6.14" to
|
||||
# install instead of Fedora defaults.
|
||||
######################################################################
|
||||
|
||||
set -eu
|
||||
|
||||
function archlinux() {
|
||||
echo "##[group]Running pacman -Syu"
|
||||
sudo btrfs filesystem resize max /
|
||||
sudo pacman -Syu --noconfirm
|
||||
echo "##[endgroup]"
|
||||
|
||||
echo "##[group]Install Development Tools"
|
||||
sudo pacman -Sy --noconfirm base-devel bc cpio cryptsetup dhclient dkms \
|
||||
fakeroot fio gdb inetutils jq less linux linux-headers lsscsi nfs-utils \
|
||||
parted pax perf python-packaging python-setuptools qemu-guest-agent ksh \
|
||||
samba strace sysstat rng-tools rsync wget xxhash
|
||||
echo "##[endgroup]"
|
||||
}
|
||||
|
||||
function debian() {
|
||||
export DEBIAN_FRONTEND="noninteractive"
|
||||
|
||||
echo "##[group]Running apt-get update+upgrade"
|
||||
sudo sed -i '/[[:alpha:]]-backports/d' /etc/apt/sources.list
|
||||
sudo apt-get update -y
|
||||
sudo apt-get upgrade -y
|
||||
echo "##[endgroup]"
|
||||
|
||||
echo "##[group]Install Development Tools"
|
||||
sudo apt-get install -y \
|
||||
acl alien attr autoconf bc cpio cryptsetup curl dbench dh-python dkms \
|
||||
fakeroot fio gdb gdebi git ksh lcov isc-dhcp-client jq libacl1-dev \
|
||||
libaio-dev libattr1-dev libblkid-dev libcurl4-openssl-dev libdevmapper-dev \
|
||||
libelf-dev libffi-dev libmount-dev libpam0g-dev libselinux-dev libssl-dev \
|
||||
libtool libtool-bin libudev-dev libunwind-dev linux-headers-$(uname -r) \
|
||||
lsscsi nfs-kernel-server pamtester parted python3 python3-all-dev \
|
||||
python3-cffi python3-dev python3-distlib python3-packaging libtirpc-dev \
|
||||
python3-setuptools python3-sphinx qemu-guest-agent rng-tools rpm2cpio \
|
||||
rsync samba strace sysstat uuid-dev watchdog wget xfslibs-dev xxhash \
|
||||
zlib1g-dev
|
||||
echo "##[endgroup]"
|
||||
}
|
||||
|
||||
function freebsd() {
|
||||
export ASSUME_ALWAYS_YES="YES"
|
||||
|
||||
echo "##[group]Install Development Tools"
|
||||
sudo pkg install -y autoconf automake autotools base64 checkbashisms fio \
|
||||
gdb gettext gettext-runtime git gmake gsed jq ksh lcov libtool lscpu \
|
||||
pkgconf python python3 pamtester pamtester qemu-guest-agent rsync xxhash
|
||||
sudo pkg install -xy \
|
||||
'^samba4[[:digit:]]+$' \
|
||||
'^py3[[:digit:]]+-cffi$' \
|
||||
'^py3[[:digit:]]+-sysctl$' \
|
||||
'^py3[[:digit:]]+-setuptools$' \
|
||||
'^py3[[:digit:]]+-packaging$'
|
||||
echo "##[endgroup]"
|
||||
}
|
||||
|
||||
# common packages for: almalinux, centos, redhat
|
||||
function rhel() {
|
||||
echo "##[group]Running dnf update"
|
||||
echo "max_parallel_downloads=10" | sudo -E tee -a /etc/dnf/dnf.conf
|
||||
sudo dnf clean all
|
||||
sudo dnf update -y --setopt=fastestmirror=1 --refresh
|
||||
echo "##[endgroup]"
|
||||
|
||||
echo "##[group]Install Development Tools"
|
||||
|
||||
# Alma wants "Development Tools", Fedora 41 wants "development-tools"
|
||||
if ! sudo dnf group install -y "Development Tools" ; then
|
||||
echo "Trying 'development-tools' instead of 'Development Tools'"
|
||||
sudo dnf group install -y development-tools
|
||||
fi
|
||||
|
||||
sudo dnf install -y \
|
||||
acl attr bc bzip2 cryptsetup curl dbench dkms elfutils-libelf-devel fio \
|
||||
gdb git jq kernel-rpm-macros ksh libacl-devel libaio-devel \
|
||||
libargon2-devel libattr-devel libblkid-devel libcurl-devel libffi-devel \
|
||||
ncompress libselinux-devel libtirpc-devel libtool libudev-devel \
|
||||
libuuid-devel lsscsi mdadm nfs-utils openssl-devel pam-devel pamtester \
|
||||
parted perf python3 python3-cffi python3-devel python3-packaging \
|
||||
kernel-devel python3-setuptools qemu-guest-agent rng-tools rpcgen \
|
||||
rpm-build rsync samba strace sysstat systemd watchdog wget xfsprogs-devel \
|
||||
xxhash zlib-devel
|
||||
echo "##[endgroup]"
|
||||
}
|
||||
|
||||
function tumbleweed() {
|
||||
echo "##[group]Running zypper is TODO!"
|
||||
sleep 23456
|
||||
echo "##[endgroup]"
|
||||
}
|
||||
|
||||
# $1: Kernel version to install (like '6.14rc7')
|
||||
function install_fedora_experimental_kernel {
|
||||
|
||||
our_version="$1"
|
||||
sudo dnf -y copr enable @kernel-vanilla/stable
|
||||
sudo dnf -y copr enable @kernel-vanilla/mainline
|
||||
all="$(sudo dnf list --showduplicates kernel-* python3-perf* perf* bpftool*)"
|
||||
echo "Available versions:"
|
||||
echo "$all"
|
||||
|
||||
# You can have a bunch of minor variants of the version we want '6.14'.
|
||||
# Pick the newest variant (sorted by version number).
|
||||
specific_version=$(echo "$all" | grep $our_version | awk '{print $2}' | sort -V | tail -n 1)
|
||||
list="$(echo "$all" | grep $specific_version | grep -Ev 'kernel-rt|kernel-selftests|kernel-debuginfo' | sed 's/.x86_64//g' | awk '{print $1"-"$2}')"
|
||||
sudo dnf install -y $list
|
||||
sudo dnf -y copr disable @kernel-vanilla/stable
|
||||
sudo dnf -y copr disable @kernel-vanilla/mainline
|
||||
}
|
||||
|
||||
# Install dependencies
|
||||
case "$1" in
|
||||
almalinux8)
|
||||
echo "##[group]Enable epel and powertools repositories"
|
||||
sudo dnf config-manager -y --set-enabled powertools
|
||||
sudo dnf install -y epel-release
|
||||
echo "##[endgroup]"
|
||||
rhel
|
||||
echo "##[group]Install kernel-abi-whitelists"
|
||||
sudo dnf install -y kernel-abi-whitelists
|
||||
echo "##[endgroup]"
|
||||
;;
|
||||
almalinux9|almalinux10|centos-stream9|centos-stream10)
|
||||
echo "##[group]Enable epel and crb repositories"
|
||||
sudo dnf config-manager -y --set-enabled crb
|
||||
sudo dnf install -y epel-release
|
||||
echo "##[endgroup]"
|
||||
rhel
|
||||
echo "##[group]Install kernel-abi-stablelists"
|
||||
sudo dnf install -y kernel-abi-stablelists
|
||||
echo "##[endgroup]"
|
||||
;;
|
||||
archlinux)
|
||||
archlinux
|
||||
;;
|
||||
debian*)
|
||||
echo 'debconf debconf/frontend select Noninteractive' | sudo debconf-set-selections
|
||||
debian
|
||||
echo "##[group]Install Debian specific"
|
||||
sudo apt-get install -yq linux-perf dh-sequence-dkms
|
||||
echo "##[endgroup]"
|
||||
;;
|
||||
fedora*)
|
||||
rhel
|
||||
sudo dnf install -y libunwind-devel
|
||||
|
||||
# Fedora 42+ moves /usr/bin/script from 'util-linux' to 'util-linux-script'
|
||||
sudo dnf install -y util-linux-script || true
|
||||
|
||||
# Optional: Install an experimental kernel ($2 = kernel version)
|
||||
if [ -n "${2:-}" ] ; then
|
||||
install_fedora_experimental_kernel "$2"
|
||||
fi
|
||||
;;
|
||||
freebsd*)
|
||||
freebsd
|
||||
;;
|
||||
tumbleweed)
|
||||
tumbleweed
|
||||
;;
|
||||
ubuntu*)
|
||||
debian
|
||||
echo "##[group]Install Ubuntu specific"
|
||||
sudo apt-get install -yq linux-tools-common libtirpc-dev \
|
||||
linux-modules-extra-$(uname -r)
|
||||
sudo apt-get install -yq dh-sequence-dkms
|
||||
echo "##[endgroup]"
|
||||
echo "##[group]Delete Ubuntu OpenZFS modules"
|
||||
for i in $(find /lib/modules -name zfs -type d); do sudo rm -rvf $i; done
|
||||
echo "##[endgroup]"
|
||||
;;
|
||||
esac
|
||||
|
||||
# This script is used for checkstyle + zloop deps also.
|
||||
# Install only the needed packages and exit - when used this way.
|
||||
test -z "${ONLY_DEPS:-}" || exit 0
|
||||
|
||||
# Start services
|
||||
echo "##[group]Enable services"
|
||||
case "$1" in
|
||||
freebsd*)
|
||||
# add virtio things
|
||||
echo 'virtio_load="YES"' | sudo -E tee -a /boot/loader.conf
|
||||
for i in balloon blk console random scsi; do
|
||||
echo "virtio_${i}_load=\"YES\"" | sudo -E tee -a /boot/loader.conf
|
||||
done
|
||||
echo "fdescfs /dev/fd fdescfs rw 0 0" | sudo -E tee -a /etc/fstab
|
||||
sudo -E mount /dev/fd
|
||||
sudo -E touch /etc/zfs/exports
|
||||
sudo -E sysrc mountd_flags="/etc/zfs/exports"
|
||||
echo '[global]' | sudo -E tee /usr/local/etc/smb4.conf >/dev/null
|
||||
sudo -E service nfsd enable
|
||||
sudo -E service qemu-guest-agent enable
|
||||
sudo -E service samba_server enable
|
||||
;;
|
||||
debian*|ubuntu*)
|
||||
sudo -E systemctl enable nfs-kernel-server
|
||||
sudo -E systemctl enable qemu-guest-agent
|
||||
sudo -E systemctl enable smbd
|
||||
;;
|
||||
*)
|
||||
# All other linux distros
|
||||
sudo -E systemctl enable nfs-server
|
||||
sudo -E systemctl enable qemu-guest-agent
|
||||
sudo -E systemctl enable smb
|
||||
;;
|
||||
esac
|
||||
echo "##[endgroup]"
|
||||
|
||||
# Setup Kernel cmdline
|
||||
CMDLINE="console=tty0 console=ttyS0,115200n8"
|
||||
CMDLINE="$CMDLINE selinux=0"
|
||||
CMDLINE="$CMDLINE random.trust_cpu=on"
|
||||
CMDLINE="$CMDLINE no_timer_check"
|
||||
case "$1" in
|
||||
almalinux*|centos*|fedora*)
|
||||
GRUB_CFG="/boot/grub2/grub.cfg"
|
||||
GRUB_MKCONFIG="grub2-mkconfig"
|
||||
CMDLINE="$CMDLINE biosdevname=0 net.ifnames=0"
|
||||
echo 'GRUB_SERIAL_COMMAND="serial --speed=115200"' \
|
||||
| sudo tee -a /etc/default/grub >/dev/null
|
||||
;;
|
||||
ubuntu24)
|
||||
GRUB_CFG="/boot/grub/grub.cfg"
|
||||
GRUB_MKCONFIG="grub-mkconfig"
|
||||
echo 'GRUB_DISABLE_OS_PROBER="false"' \
|
||||
| sudo tee -a /etc/default/grub >/dev/null
|
||||
;;
|
||||
*)
|
||||
GRUB_CFG="/boot/grub/grub.cfg"
|
||||
GRUB_MKCONFIG="grub-mkconfig"
|
||||
;;
|
||||
esac
|
||||
|
||||
case "$1" in
|
||||
archlinux|freebsd*)
|
||||
true
|
||||
;;
|
||||
*)
|
||||
echo "##[group]Edit kernel cmdline"
|
||||
sudo sed -i -e '/^GRUB_CMDLINE_LINUX/d' /etc/default/grub || true
|
||||
echo "GRUB_CMDLINE_LINUX=\"$CMDLINE\"" \
|
||||
| sudo tee -a /etc/default/grub >/dev/null
|
||||
sudo $GRUB_MKCONFIG -o $GRUB_CFG
|
||||
echo "##[endgroup]"
|
||||
;;
|
||||
esac
|
||||
|
||||
# reset cloud-init configuration and poweroff
|
||||
sudo cloud-init clean --logs
|
||||
sleep 2 && sudo poweroff &
|
||||
exit 0
|
28
.github/workflows/scripts/qemu-3-deps.sh
vendored
Executable file
28
.github/workflows/scripts/qemu-3-deps.sh
vendored
Executable file
@ -0,0 +1,28 @@
|
||||
######################################################################
|
||||
# 3) Wait for VM to boot from previous step and launch dependencies
|
||||
# script on it.
|
||||
#
|
||||
# $1: OS name (like 'fedora41')
|
||||
# $2: (optional) Experimental kernel version to install on fedora,
|
||||
# like "6.14".
|
||||
######################################################################
|
||||
|
||||
.github/workflows/scripts/qemu-wait-for-vm.sh vm0
|
||||
|
||||
# SPECIAL CASE:
|
||||
#
|
||||
# If the user passed in an experimental kernel version to test on Fedora,
|
||||
# we need to update the kernel version in zfs's META file to allow the
|
||||
# build to happen. We update our local copy of META here, since we know
|
||||
# it will be rsync'd up in the next step.
|
||||
if [ -n "${2:-}" ] ; then
|
||||
sed -i -E 's/Linux-Maximum: .+/Linux-Maximum: 99.99/g' META
|
||||
fi
|
||||
|
||||
scp .github/workflows/scripts/qemu-3-deps-vm.sh zfs@vm0:qemu-3-deps-vm.sh
|
||||
PID=`pidof /usr/bin/qemu-system-x86_64`
|
||||
ssh zfs@vm0 '$HOME/qemu-3-deps-vm.sh' "$@"
|
||||
# wait for poweroff to succeed
|
||||
tail --pid=$PID -f /dev/null
|
||||
sleep 5 # avoid this: "error: Domain is already active"
|
||||
rm -f $HOME/.ssh/known_hosts
|
396
.github/workflows/scripts/qemu-4-build-vm.sh
vendored
Executable file
396
.github/workflows/scripts/qemu-4-build-vm.sh
vendored
Executable file
@ -0,0 +1,396 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
######################################################################
|
||||
# 4) configure and build openzfs modules. This is run on the VMs.
|
||||
#
|
||||
# Usage:
|
||||
#
|
||||
# qemu-4-build-vm.sh OS [--enable-debug][--dkms][--patch-level NUM]
|
||||
# [--poweroff][--release][--repo][--tarball]
|
||||
#
|
||||
# OS: OS name like 'fedora41'
|
||||
# --enable-debug: Build RPMs with '--enable-debug' (for testing)
|
||||
# --dkms: Build DKMS RPMs as well
|
||||
# --patch-level NUM: Use a custom patch level number for packages.
|
||||
# --poweroff: Power-off the VM after building
|
||||
# --release Build zfs-release*.rpm as well
|
||||
# --repo After building everything, copy RPMs into /tmp/repo
|
||||
# in the ZFS RPM repository file structure. Also
|
||||
# copy tarballs if they were built.
|
||||
# --tarball: Also build a tarball of ZFS source
|
||||
######################################################################
|
||||
|
||||
ENABLE_DEBUG=""
|
||||
DKMS=""
|
||||
PATCH_LEVEL=""
|
||||
POWEROFF=""
|
||||
RELEASE=""
|
||||
REPO=""
|
||||
TARBALL=""
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case $1 in
|
||||
--enable-debug)
|
||||
ENABLE_DEBUG=1
|
||||
shift
|
||||
;;
|
||||
--dkms)
|
||||
DKMS=1
|
||||
shift
|
||||
;;
|
||||
--patch-level)
|
||||
PATCH_LEVEL=$2
|
||||
shift
|
||||
shift
|
||||
;;
|
||||
--poweroff)
|
||||
POWEROFF=1
|
||||
shift
|
||||
;;
|
||||
--release)
|
||||
RELEASE=1
|
||||
shift
|
||||
;;
|
||||
--repo)
|
||||
REPO=1
|
||||
shift
|
||||
;;
|
||||
--tarball)
|
||||
TARBALL=1
|
||||
shift
|
||||
;;
|
||||
*)
|
||||
OS=$1
|
||||
shift
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
set -eu
|
||||
|
||||
function run() {
|
||||
LOG="/var/tmp/build-stderr.txt"
|
||||
echo "****************************************************"
|
||||
echo "$(date) ($*)"
|
||||
echo "****************************************************"
|
||||
($@ || echo $? > /tmp/rv) 3>&1 1>&2 2>&3 | stdbuf -eL -oL tee -a $LOG
|
||||
if [ -f /tmp/rv ]; then
|
||||
RV=$(cat /tmp/rv)
|
||||
echo "****************************************************"
|
||||
echo "exit with value=$RV ($*)"
|
||||
echo "****************************************************"
|
||||
echo 1 > /var/tmp/build-exitcode.txt
|
||||
exit $RV
|
||||
fi
|
||||
}
|
||||
|
||||
# Look at the RPMs in the current directory and copy/move them to
|
||||
# /tmp/repo, using the directory structure we use for the ZFS RPM repos.
|
||||
#
|
||||
# For example:
|
||||
# /tmp/repo/epel-testing/9.5
|
||||
# /tmp/repo/epel-testing/9.5/SRPMS
|
||||
# /tmp/repo/epel-testing/9.5/SRPMS/zfs-2.3.99-1.el9.src.rpm
|
||||
# /tmp/repo/epel-testing/9.5/SRPMS/zfs-kmod-2.3.99-1.el9.src.rpm
|
||||
# /tmp/repo/epel-testing/9.5/kmod
|
||||
# /tmp/repo/epel-testing/9.5/kmod/x86_64
|
||||
# /tmp/repo/epel-testing/9.5/kmod/x86_64/debug
|
||||
# /tmp/repo/epel-testing/9.5/kmod/x86_64/debug/kmod-zfs-debuginfo-2.3.99-1.el9.x86_64.rpm
|
||||
# /tmp/repo/epel-testing/9.5/kmod/x86_64/debug/libnvpair3-debuginfo-2.3.99-1.el9.x86_64.rpm
|
||||
# /tmp/repo/epel-testing/9.5/kmod/x86_64/debug/libuutil3-debuginfo-2.3.99-1.el9.x86_64.rpm
|
||||
# ...
|
||||
function copy_rpms_to_repo {
|
||||
# Pick a RPM to query. It doesn't matter which one - we just want to extract
|
||||
# the 'Build Host' value from it.
|
||||
rpm=$(ls zfs-*.rpm | head -n 1)
|
||||
|
||||
# Get zfs version '2.2.99'
|
||||
zfs_ver=$(rpm -qpi $rpm | awk '/Version/{print $3}')
|
||||
|
||||
# Get "2.1" or "2.2"
|
||||
zfs_major=$(echo $zfs_ver | grep -Eo [0-9]+\.[0-9]+)
|
||||
|
||||
# Get 'almalinux9.5' or 'fedora41' type string
|
||||
build_host=$(rpm -qpi $rpm | awk '/Build Host/{print $4}')
|
||||
|
||||
# Get '9.5' or '41' OS version
|
||||
os_ver=$(echo $build_host | grep -Eo '[0-9\.]+$')
|
||||
|
||||
# Our ZFS version and OS name will determine which repo the RPMs
|
||||
# will go in (regular or testing). Fedora always gets the newest
|
||||
# releases, and Alma gets the older releases.
|
||||
case $build_host in
|
||||
almalinux*)
|
||||
case $zfs_major in
|
||||
2.2)
|
||||
d="epel"
|
||||
;;
|
||||
*)
|
||||
d="epel-testing"
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
fedora*)
|
||||
d="fedora"
|
||||
;;
|
||||
esac
|
||||
|
||||
prefix=/tmp/repo
|
||||
dst="$prefix/$d/$os_ver"
|
||||
|
||||
# Special case: move zfs-release*.rpm out of the way first (if we built them).
|
||||
# This will make filtering the other RPMs easier.
|
||||
mkdir -p $dst
|
||||
mv zfs-release*.rpm $dst || true
|
||||
|
||||
# Copy source RPMs
|
||||
mkdir -p $dst/SRPMS
|
||||
cp $(ls *.src.rpm) $dst/SRPMS/
|
||||
|
||||
if [[ "$build_host" =~ "almalinux" ]] ; then
|
||||
# Copy kmods+userspace
|
||||
mkdir -p $dst/kmod/x86_64/debug
|
||||
cp $(ls *.rpm | grep -Ev 'src.rpm|dkms|debuginfo') $dst/kmod/x86_64
|
||||
cp *debuginfo*.rpm $dst/kmod/x86_64/debug
|
||||
fi
|
||||
|
||||
if [ -n "$DKMS" ] ; then
|
||||
# Copy dkms+userspace
|
||||
mkdir -p $dst/x86_64
|
||||
cp $(ls *.rpm | grep -Ev 'src.rpm|kmod|debuginfo') $dst/x86_64
|
||||
fi
|
||||
|
||||
# Copy debug
|
||||
mkdir -p $dst/x86_64/debug
|
||||
cp $(ls *debuginfo*.rpm | grep -v kmod) $dst/x86_64/debug
|
||||
}
|
||||
|
||||
function freebsd() {
|
||||
extra="${1:-}"
|
||||
|
||||
export MAKE="gmake"
|
||||
echo "##[group]Autogen.sh"
|
||||
run ./autogen.sh
|
||||
echo "##[endgroup]"
|
||||
|
||||
echo "##[group]Configure"
|
||||
run ./configure \
|
||||
--prefix=/usr/local \
|
||||
--with-libintl-prefix=/usr/local \
|
||||
--enable-pyzfs \
|
||||
--enable-debuginfo $extra
|
||||
echo "##[endgroup]"
|
||||
|
||||
echo "##[group]Build"
|
||||
run gmake -j$(sysctl -n hw.ncpu)
|
||||
echo "##[endgroup]"
|
||||
|
||||
echo "##[group]Install"
|
||||
run sudo gmake install
|
||||
echo "##[endgroup]"
|
||||
}
|
||||
|
||||
function linux() {
|
||||
extra="${1:-}"
|
||||
|
||||
echo "##[group]Autogen.sh"
|
||||
run ./autogen.sh
|
||||
echo "##[endgroup]"
|
||||
|
||||
echo "##[group]Configure"
|
||||
run ./configure \
|
||||
--prefix=/usr \
|
||||
--enable-pyzfs \
|
||||
--enable-debuginfo $extra
|
||||
echo "##[endgroup]"
|
||||
|
||||
echo "##[group]Build"
|
||||
run make -j$(nproc)
|
||||
echo "##[endgroup]"
|
||||
|
||||
echo "##[group]Install"
|
||||
run sudo make install
|
||||
echo "##[endgroup]"
|
||||
}
|
||||
|
||||
function rpm_build_and_install() {
|
||||
extra="${1:-}"
|
||||
|
||||
# Build RPMs with XZ compression by default (since gzip decompression is slow)
|
||||
echo "%_binary_payload w7.xzdio" >> ~/.rpmmacros
|
||||
|
||||
echo "##[group]Autogen.sh"
|
||||
run ./autogen.sh
|
||||
echo "##[endgroup]"
|
||||
|
||||
if [ -n "$PATCH_LEVEL" ] ; then
|
||||
sed -i -E 's/(Release:\s+)1/\1'$PATCH_LEVEL'/g' META
|
||||
fi
|
||||
|
||||
echo "##[group]Configure"
|
||||
run ./configure --enable-debuginfo $extra
|
||||
echo "##[endgroup]"
|
||||
|
||||
echo "##[group]Build"
|
||||
run make pkg-kmod pkg-utils
|
||||
echo "##[endgroup]"
|
||||
|
||||
if [ -n "$DKMS" ] ; then
|
||||
echo "##[group]DKMS"
|
||||
make rpm-dkms
|
||||
echo "##[endgroup]"
|
||||
fi
|
||||
|
||||
if [ -n "$REPO" ] ; then
|
||||
echo "Skipping install since we're only building RPMs and nothing else"
|
||||
else
|
||||
echo "##[group]Install"
|
||||
run sudo dnf -y --nobest install $(ls *.rpm | grep -Ev 'dkms|src.rpm')
|
||||
echo "##[endgroup]"
|
||||
fi
|
||||
|
||||
# Optionally build the zfs-release.*.rpm
|
||||
if [ -n "$RELEASE" ] ; then
|
||||
echo "##[group]Release"
|
||||
pushd ~
|
||||
sudo dnf -y install rpm-build || true
|
||||
# Check out a sparse copy of zfsonlinux.github.com.git so we don't get
|
||||
# all the binaries. We just need a few kilobytes of files to build RPMs.
|
||||
git clone --depth 1 --no-checkout \
|
||||
https://github.com/zfsonlinux/zfsonlinux.github.com.git
|
||||
|
||||
cd zfsonlinux.github.com
|
||||
git sparse-checkout set zfs-release
|
||||
git checkout
|
||||
cd zfs-release
|
||||
|
||||
mkdir -p ~/rpmbuild/{BUILDROOT,SPECS,RPMS,SRPMS,SOURCES,BUILD}
|
||||
cp RPM-GPG-KEY-openzfs* *.repo ~/rpmbuild/SOURCES
|
||||
cp zfs-release.spec ~/rpmbuild/SPECS/
|
||||
rpmbuild -ba ~/rpmbuild/SPECS/zfs-release.spec
|
||||
|
||||
# ZFS release RPMs are built. Copy them to the ~/zfs directory just to
|
||||
# keep all the RPMs in the same place.
|
||||
cp ~/rpmbuild/RPMS/noarch/*.rpm ~/zfs
|
||||
cp ~/rpmbuild/SRPMS/*.rpm ~/zfs
|
||||
|
||||
popd
|
||||
rm -fr ~/rpmbuild
|
||||
echo "##[endgroup]"
|
||||
fi
|
||||
|
||||
if [ -n "$REPO" ] ; then
|
||||
echo "##[group]Repo"
|
||||
copy_rpms_to_repo
|
||||
echo "##[endgroup]"
|
||||
fi
|
||||
}
|
||||
|
||||
function deb_build_and_install() {
|
||||
extra="${1:-}"
|
||||
|
||||
echo "##[group]Autogen.sh"
|
||||
run ./autogen.sh
|
||||
echo "##[endgroup]"
|
||||
|
||||
echo "##[group]Configure"
|
||||
run ./configure \
|
||||
--prefix=/usr \
|
||||
--enable-pyzfs \
|
||||
--enable-debuginfo $extra
|
||||
echo "##[endgroup]"
|
||||
|
||||
echo "##[group]Build"
|
||||
run make native-deb-kmod native-deb-utils
|
||||
echo "##[endgroup]"
|
||||
|
||||
echo "##[group]Install"
|
||||
# Do kmod install. Note that when you build the native debs, the
|
||||
# packages themselves are placed in parent directory '../' rather than
|
||||
# in the source directory like the rpms are.
|
||||
run sudo apt-get -y install $(find ../ | grep -E '\.deb$' \
|
||||
| grep -Ev 'dkms|dracut')
|
||||
echo "##[endgroup]"
|
||||
}
|
||||
|
||||
function build_tarball {
|
||||
if [ -n "$REPO" ] ; then
|
||||
./autogen.sh
|
||||
./configure --with-config=srpm
|
||||
make dist
|
||||
mkdir -p /tmp/repo/releases
|
||||
# The tarball name is based off of 'Version' field in the META file.
|
||||
mv *.tar.gz /tmp/repo/releases/
|
||||
fi
|
||||
}
|
||||
|
||||
# Debug: show kernel cmdline
|
||||
if [ -f /proc/cmdline ] ; then
|
||||
cat /proc/cmdline || true
|
||||
fi
|
||||
|
||||
# Set our hostname to our OS name and version number. Specifically, we set the
|
||||
# major and minor number so that when we query the Build Host field in the RPMs
|
||||
# we build, we can see what specific version of Fedora/Almalinux we were using
|
||||
# to build them. This is helpful for matching up KMOD versions.
|
||||
#
|
||||
# Examples:
|
||||
#
|
||||
# rhel8.10
|
||||
# almalinux9.5
|
||||
# fedora42
|
||||
source /etc/os-release
|
||||
if which hostnamectl &> /dev/null ; then
|
||||
# Fedora 42+ use hostnamectl
|
||||
sudo hostnamectl set-hostname "$ID$VERSION_ID"
|
||||
sudo hostnamectl set-hostname --pretty "$ID$VERSION_ID"
|
||||
else
|
||||
sudo hostname "$ID$VERSION_ID"
|
||||
fi
|
||||
|
||||
# save some sysinfo
|
||||
uname -a > /var/tmp/uname.txt
|
||||
|
||||
cd $HOME/zfs
|
||||
export PATH="$PATH:/sbin:/usr/sbin:/usr/local/sbin"
|
||||
|
||||
extra=""
|
||||
if [ -n "$ENABLE_DEBUG" ] ; then
|
||||
extra="--enable-debug"
|
||||
fi
|
||||
|
||||
# build
|
||||
case "$OS" in
|
||||
freebsd*)
|
||||
freebsd "$extra"
|
||||
;;
|
||||
alma*|centos*)
|
||||
rpm_build_and_install "--with-spec=redhat $extra"
|
||||
;;
|
||||
fedora*)
|
||||
rpm_build_and_install "$extra"
|
||||
|
||||
# Historically, we've always built the release tarballs on Fedora, since
|
||||
# there was one instance long ago where we built them on CentOS 7, and they
|
||||
# didn't work correctly for everyone.
|
||||
if [ -n "$TARBALL" ] ; then
|
||||
build_tarball
|
||||
fi
|
||||
;;
|
||||
debian*|ubuntu*)
|
||||
deb_build_and_install "$extra"
|
||||
;;
|
||||
*)
|
||||
linux "$extra"
|
||||
;;
|
||||
esac
|
||||
|
||||
|
||||
# building the zfs module was ok
|
||||
echo 0 > /var/tmp/build-exitcode.txt
|
||||
|
||||
# reset cloud-init configuration and poweroff
|
||||
if [ -n "$POWEROFF" ] ; then
|
||||
sudo cloud-init clean --logs
|
||||
sync && sleep 2 && sudo poweroff &
|
||||
fi
|
||||
exit 0
|
11
.github/workflows/scripts/qemu-4-build.sh
vendored
Executable file
11
.github/workflows/scripts/qemu-4-build.sh
vendored
Executable file
@ -0,0 +1,11 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
######################################################################
|
||||
# 4) configure and build openzfs modules
|
||||
######################################################################
|
||||
echo "Build modules in QEMU machine"
|
||||
|
||||
# Bring our VM back up and copy over ZFS source
|
||||
.github/workflows/scripts/qemu-prepare-for-build.sh
|
||||
|
||||
ssh zfs@vm0 '$HOME/zfs/.github/workflows/scripts/qemu-4-build-vm.sh' $@
|
137
.github/workflows/scripts/qemu-5-setup.sh
vendored
Executable file
137
.github/workflows/scripts/qemu-5-setup.sh
vendored
Executable file
@ -0,0 +1,137 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
######################################################################
|
||||
# 5) start test machines and load openzfs module
|
||||
######################################################################
|
||||
|
||||
set -eu
|
||||
|
||||
# read our defined variables
|
||||
source /var/tmp/env.txt
|
||||
|
||||
# wait for poweroff to succeed
|
||||
PID=$(pidof /usr/bin/qemu-system-x86_64)
|
||||
tail --pid=$PID -f /dev/null
|
||||
sudo virsh undefine --nvram openzfs
|
||||
|
||||
# cpu pinning
|
||||
CPUSET=("0,1" "2,3")
|
||||
|
||||
# additional options for virt-install
|
||||
OPTS[0]=""
|
||||
OPTS[1]=""
|
||||
|
||||
case "$OS" in
|
||||
freebsd*)
|
||||
# FreeBSD needs only 6GiB
|
||||
RAM=6
|
||||
;;
|
||||
debian13)
|
||||
RAM=8
|
||||
# Boot Debian 13 with uefi=on and secureboot=off (ZFS Kernel Module not signed)
|
||||
OPTS[0]="--boot"
|
||||
OPTS[1]="firmware=efi,firmware.feature0.name=secure-boot,firmware.feature0.enabled=no"
|
||||
;;
|
||||
*)
|
||||
# Linux needs more memory, but can be optimized to share it via KSM
|
||||
RAM=8
|
||||
;;
|
||||
esac
|
||||
|
||||
# create snapshot we can clone later
|
||||
sudo zfs snapshot zpool/openzfs@now
|
||||
|
||||
# setup the testing vm's
|
||||
PUBKEY=$(cat ~/.ssh/id_ed25519.pub)
|
||||
|
||||
# start testing VMs
|
||||
for ((i=1; i<=VMs; i++)); do
|
||||
echo "Creating disk for vm$i..."
|
||||
DISK="/dev/zvol/zpool/vm$i"
|
||||
FORMAT="raw"
|
||||
sudo zfs clone zpool/openzfs@now zpool/vm$i-system
|
||||
sudo zfs create -ps -b 64k -V 64g zpool/vm$i-tests
|
||||
|
||||
cat <<EOF > /tmp/user-data
|
||||
#cloud-config
|
||||
|
||||
fqdn: vm$i
|
||||
|
||||
users:
|
||||
- name: root
|
||||
shell: $BASH
|
||||
- name: zfs
|
||||
sudo: ALL=(ALL) NOPASSWD:ALL
|
||||
shell: $BASH
|
||||
ssh_authorized_keys:
|
||||
- $PUBKEY
|
||||
|
||||
growpart:
|
||||
mode: auto
|
||||
devices: ['/']
|
||||
ignore_growroot_disabled: false
|
||||
EOF
|
||||
|
||||
sudo virsh net-update default add ip-dhcp-host \
|
||||
"<host mac='52:54:00:83:79:0$i' ip='192.168.122.1$i'/>" --live --config
|
||||
|
||||
sudo virt-install \
|
||||
--os-variant $OSv \
|
||||
--name "vm$i" \
|
||||
--cpu host-passthrough \
|
||||
--virt-type=kvm --hvm \
|
||||
--vcpus=$CPU,sockets=1 \
|
||||
--cpuset=${CPUSET[$((i-1))]} \
|
||||
--memory $((1024*RAM)) \
|
||||
--memballoon model=virtio \
|
||||
--graphics none \
|
||||
--cloud-init user-data=/tmp/user-data \
|
||||
--network bridge=virbr0,model=$NIC,mac="52:54:00:83:79:0$i" \
|
||||
--disk $DISK-system,bus=virtio,cache=none,format=$FORMAT,driver.discard=unmap \
|
||||
--disk $DISK-tests,bus=virtio,cache=none,format=$FORMAT,driver.discard=unmap \
|
||||
--import --noautoconsole ${OPTS[0]} ${OPTS[1]}
|
||||
done
|
||||
|
||||
# generate some memory stats
|
||||
cat <<EOF > cronjob.sh
|
||||
exec 1>>/var/tmp/stats.txt
|
||||
exec 2>&1
|
||||
echo "********************************************************************************"
|
||||
uptime
|
||||
free -m
|
||||
zfs list
|
||||
EOF
|
||||
|
||||
sudo chmod +x cronjob.sh
|
||||
sudo mv -f cronjob.sh /root/cronjob.sh
|
||||
echo '*/5 * * * * /root/cronjob.sh' > crontab.txt
|
||||
sudo crontab crontab.txt
|
||||
rm crontab.txt
|
||||
|
||||
# Save the VM's serial output (ttyS0) to /var/tmp/console.txt
|
||||
# - ttyS0 on the VM corresponds to a local /dev/pty/N entry
|
||||
# - use 'virsh ttyconsole' to lookup the /dev/pty/N entry
|
||||
for ((i=1; i<=VMs; i++)); do
|
||||
mkdir -p $RESPATH/vm$i
|
||||
read "pty" <<< $(sudo virsh ttyconsole vm$i)
|
||||
|
||||
# Create the file so we can tail it, even if there's no output.
|
||||
touch $RESPATH/vm$i/console.txt
|
||||
|
||||
sudo nohup bash -c "cat $pty > $RESPATH/vm$i/console.txt" &
|
||||
|
||||
# Write all VM boot lines to the console to aid in debugging failed boots.
|
||||
# The boot lines from all the VMs will be munged together, so prepend each
|
||||
# line with the vm hostname (like 'vm1:').
|
||||
(while IFS=$'\n' read -r line; do echo "vm$i: $line" ; done < <(sudo tail -f $RESPATH/vm$i/console.txt)) &
|
||||
|
||||
done
|
||||
echo "Console logging for ${VMs}x $OS started."
|
||||
|
||||
|
||||
# check if the machines are okay
|
||||
echo "Waiting for vm's to come up... (${VMs}x CPU=$CPU RAM=$RAM)"
|
||||
for ((i=1; i<=VMs; i++)); do
|
||||
.github/workflows/scripts/qemu-wait-for-vm.sh vm$i
|
||||
done
|
||||
echo "All $VMs VMs are up now."
|
119
.github/workflows/scripts/qemu-6-tests.sh
vendored
Executable file
119
.github/workflows/scripts/qemu-6-tests.sh
vendored
Executable file
@ -0,0 +1,119 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
######################################################################
|
||||
# 6) load openzfs module and run the tests
|
||||
#
|
||||
# called on runner: qemu-6-tests.sh
|
||||
# called on qemu-vm: qemu-6-tests.sh $OS $2/$3
|
||||
######################################################################
|
||||
|
||||
set -eu
|
||||
|
||||
function prefix() {
|
||||
ID="$1"
|
||||
LINE="$2"
|
||||
CURRENT=$(date +%s)
|
||||
TSSTART=$(cat /tmp/tsstart)
|
||||
DIFF=$((CURRENT-TSSTART))
|
||||
H=$((DIFF/3600))
|
||||
DIFF=$((DIFF-(H*3600)))
|
||||
M=$((DIFF/60))
|
||||
S=$((DIFF-(M*60)))
|
||||
|
||||
CTR=$(cat /tmp/ctr)
|
||||
echo $LINE| grep -q '^\[.*] Test[: ]' && CTR=$((CTR+1)) && echo $CTR > /tmp/ctr
|
||||
|
||||
BASE="$HOME/work/zfs/zfs"
|
||||
COLOR="$BASE/scripts/zfs-tests-color.sh"
|
||||
CLINE=$(echo $LINE| grep '^\[.*] Test[: ]' \
|
||||
| sed -e 's|^\[.*] Test|Test|g' \
|
||||
| sed -e 's|/usr/local|/usr|g' \
|
||||
| sed -e 's| /usr/share/zfs/zfs-tests/tests/| |g' | $COLOR)
|
||||
if [ -z "$CLINE" ]; then
|
||||
printf "vm${ID}: %s\n" "$LINE"
|
||||
else
|
||||
# [vm2: 00:15:54 256] Test: functional/checksum/setup (run as root) [00:00] [PASS]
|
||||
printf "[vm${ID}: %02d:%02d:%02d %4d] %s\n" \
|
||||
"$H" "$M" "$S" "$CTR" "$CLINE"
|
||||
fi
|
||||
}
|
||||
|
||||
# called directly on the runner
|
||||
if [ -z ${1:-} ]; then
|
||||
cd "/var/tmp"
|
||||
source env.txt
|
||||
SSH=$(which ssh)
|
||||
TESTS='$HOME/zfs/.github/workflows/scripts/qemu-6-tests.sh'
|
||||
echo 0 > /tmp/ctr
|
||||
date "+%s" > /tmp/tsstart
|
||||
|
||||
for ((i=1; i<=VMs; i++)); do
|
||||
IP="192.168.122.1$i"
|
||||
daemonize -c /var/tmp -p vm${i}.pid -o vm${i}log.txt -- \
|
||||
$SSH zfs@$IP $TESTS $OS $i $VMs $CI_TYPE
|
||||
# handly line by line and add info prefix
|
||||
stdbuf -oL tail -fq vm${i}log.txt \
|
||||
| while read -r line; do prefix "$i" "$line"; done &
|
||||
echo $! > vm${i}log.pid
|
||||
# don't mix up the initial --- Configuration --- part
|
||||
sleep 0.13
|
||||
done
|
||||
|
||||
# wait for all vm's to finish
|
||||
for ((i=1; i<=VMs; i++)); do
|
||||
tail --pid=$(cat vm${i}.pid) -f /dev/null
|
||||
pid=$(cat vm${i}log.pid)
|
||||
rm -f vm${i}log.pid
|
||||
kill $pid
|
||||
done
|
||||
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# this part runs inside qemu vm
|
||||
export PATH="$PATH:/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/sbin:/usr/local/bin"
|
||||
case "$1" in
|
||||
freebsd*)
|
||||
TDIR="/usr/local/share/zfs"
|
||||
sudo kldstat -n zfs 2>/dev/null && sudo kldunload zfs
|
||||
sudo -E ./zfs/scripts/zfs.sh
|
||||
sudo mv -f /var/tmp/*.txt /tmp
|
||||
sudo newfs -U -t -L tmp /dev/vtbd1 >/dev/null
|
||||
sudo mount -o noatime /dev/vtbd1 /var/tmp
|
||||
sudo chmod 1777 /var/tmp
|
||||
sudo mv -f /tmp/*.txt /var/tmp
|
||||
;;
|
||||
*)
|
||||
# use xfs @ /var/tmp for all distros
|
||||
TDIR="/usr/share/zfs"
|
||||
sudo -E modprobe zfs
|
||||
sudo mv -f /var/tmp/*.txt /tmp
|
||||
sudo mkfs.xfs -fq /dev/vdb
|
||||
sudo mount -o noatime /dev/vdb /var/tmp
|
||||
sudo chmod 1777 /var/tmp
|
||||
sudo mv -f /tmp/*.txt /var/tmp
|
||||
;;
|
||||
esac
|
||||
|
||||
# enable io_uring on el9/el10
|
||||
case "$1" in
|
||||
almalinux9|almalinux10|centos-stream*)
|
||||
sudo sysctl kernel.io_uring_disabled=0 > /dev/null
|
||||
;;
|
||||
esac
|
||||
|
||||
# run functional testings and save exitcode
|
||||
cd /var/tmp
|
||||
TAGS=$2/$3
|
||||
if [ "$4" == "quick" ]; then
|
||||
export RUNFILES="sanity.run"
|
||||
fi
|
||||
sudo dmesg -c > dmesg-prerun.txt
|
||||
mount > mount.txt
|
||||
df -h > df-prerun.txt
|
||||
$TDIR/zfs-tests.sh -vKO -s 3GB -T $TAGS
|
||||
RV=$?
|
||||
df -h > df-postrun.txt
|
||||
echo $RV > tests-exitcode.txt
|
||||
sync
|
||||
exit 0
|
124
.github/workflows/scripts/qemu-7-prepare.sh
vendored
Executable file
124
.github/workflows/scripts/qemu-7-prepare.sh
vendored
Executable file
@ -0,0 +1,124 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
######################################################################
|
||||
# 7) prepare output of the results
|
||||
# - this script pre-creates all needed logfiles for later summary
|
||||
######################################################################
|
||||
|
||||
set -eu
|
||||
|
||||
# read our defined variables
|
||||
cd /var/tmp
|
||||
source env.txt
|
||||
|
||||
mkdir -p $RESPATH
|
||||
|
||||
# check if building the module has failed
|
||||
if [ -z ${VMs:-} ]; then
|
||||
cd $RESPATH
|
||||
echo ":exclamation: ZFS module didn't build successfully :exclamation:" \
|
||||
| tee summary.txt | tee /tmp/summary.txt
|
||||
cp /var/tmp/*.txt .
|
||||
tar cf /tmp/qemu-$OS.tar -C $RESPATH -h . || true
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# build was okay
|
||||
BASE="$HOME/work/zfs/zfs"
|
||||
MERGE="$BASE/.github/workflows/scripts/merge_summary.awk"
|
||||
|
||||
# catch result files of testings (vm's should be there)
|
||||
for ((i=1; i<=VMs; i++)); do
|
||||
rsync -arL zfs@vm$i:$RESPATH/current $RESPATH/vm$i || true
|
||||
scp zfs@vm$i:"/var/tmp/*.txt" $RESPATH/vm$i || true
|
||||
scp zfs@vm$i:"/var/tmp/*.rpm" $RESPATH/vm$i || true
|
||||
done
|
||||
cp -f /var/tmp/*.txt $RESPATH || true
|
||||
cd $RESPATH
|
||||
|
||||
# prepare result files for summary
|
||||
for ((i=1; i<=VMs; i++)); do
|
||||
file="vm$i/build-stderr.txt"
|
||||
test -s $file && mv -f $file build-stderr.txt
|
||||
|
||||
file="vm$i/build-exitcode.txt"
|
||||
test -s $file && mv -f $file build-exitcode.txt
|
||||
|
||||
file="vm$i/uname.txt"
|
||||
test -s $file && mv -f $file uname.txt
|
||||
|
||||
file="vm$i/tests-exitcode.txt"
|
||||
if [ ! -s $file ]; then
|
||||
# XXX - add some tests for kernel panic's here
|
||||
# tail -n 80 vm$i/console.txt | grep XYZ
|
||||
echo 1 > $file
|
||||
fi
|
||||
rv=$(cat vm$i/tests-exitcode.txt)
|
||||
test $rv != 0 && touch /tmp/have_failed_tests
|
||||
|
||||
file="vm$i/current/log"
|
||||
if [ -s $file ]; then
|
||||
cat $file >> log
|
||||
awk '/\[FAIL\]|\[KILLED\]/{ show=1; print; next; }; \
|
||||
/\[SKIP\]|\[PASS\]/{ show=0; } show' \
|
||||
$file > /tmp/vm${i}dbg.txt
|
||||
fi
|
||||
|
||||
file="vm${i}log.txt"
|
||||
fileC="/tmp/vm${i}log.txt"
|
||||
if [ -s $file ]; then
|
||||
cat $file >> summary
|
||||
cat $file | $BASE/scripts/zfs-tests-color.sh > $fileC
|
||||
fi
|
||||
done
|
||||
|
||||
# create summary of tests
|
||||
if [ -s summary ]; then
|
||||
$MERGE summary | grep -v '^/' > summary.txt
|
||||
$MERGE summary | $BASE/scripts/zfs-tests-color.sh > /tmp/summary.txt
|
||||
rm -f summary
|
||||
else
|
||||
touch summary.txt /tmp/summary.txt
|
||||
fi
|
||||
|
||||
# create file for debugging
|
||||
if [ -s log ]; then
|
||||
awk '/\[FAIL\]|\[KILLED\]/{ show=1; print; next; }; \
|
||||
/\[SKIP\]|\[PASS\]/{ show=0; } show' \
|
||||
log > summary-failure-logs.txt
|
||||
rm -f log
|
||||
else
|
||||
touch summary-failure-logs.txt
|
||||
fi
|
||||
|
||||
# create debug overview for failed tests
|
||||
cat summary.txt \
|
||||
| awk '/\(expected PASS\)/{ if ($1!="SKIP") print $2; next; } show' \
|
||||
| while read t; do
|
||||
cat summary-failure-logs.txt \
|
||||
| awk '$0~/Test[: ]/{ show=0; } $0~v{ show=1; } show' v="$t" \
|
||||
> /tmp/fail.txt
|
||||
SIZE=$(stat --printf="%s" /tmp/fail.txt)
|
||||
SIZE=$((SIZE/1024))
|
||||
# Test Summary:
|
||||
echo "##[group]$t ($SIZE KiB)" >> /tmp/failed.txt
|
||||
cat /tmp/fail.txt | $BASE/scripts/zfs-tests-color.sh >> /tmp/failed.txt
|
||||
echo "##[endgroup]" >> /tmp/failed.txt
|
||||
# Job Summary:
|
||||
echo -e "\n<details>\n<summary>$t ($SIZE KiB)</summary><pre>" >> failed.txt
|
||||
cat /tmp/fail.txt >> failed.txt
|
||||
echo "</pre></details>" >> failed.txt
|
||||
done
|
||||
|
||||
if [ -e /tmp/have_failed_tests ]; then
|
||||
echo ":warning: Some tests failed!" >> failed.txt
|
||||
else
|
||||
echo ":thumbsup: All tests passed." >> failed.txt
|
||||
fi
|
||||
|
||||
if [ ! -s uname.txt ]; then
|
||||
echo ":interrobang: Panic - where is my uname.txt?" > uname.txt
|
||||
fi
|
||||
|
||||
# artifact ready now
|
||||
tar cf /tmp/qemu-$OS.tar -C $RESPATH -h . || true
|
71
.github/workflows/scripts/qemu-8-summary.sh
vendored
Executable file
71
.github/workflows/scripts/qemu-8-summary.sh
vendored
Executable file
@ -0,0 +1,71 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
######################################################################
|
||||
# 8) show colored output of results
|
||||
######################################################################
|
||||
|
||||
set -eu
|
||||
|
||||
# read our defined variables
|
||||
source /var/tmp/env.txt
|
||||
cd $RESPATH
|
||||
|
||||
# helper function for showing some content with headline
|
||||
function showfile() {
|
||||
content=$(dd if=$1 bs=1024 count=400k 2>/dev/null)
|
||||
if [ -z "$2" ]; then
|
||||
group1=""
|
||||
group2=""
|
||||
else
|
||||
SIZE=$(stat --printf="%s" "$file")
|
||||
SIZE=$((SIZE/1024))
|
||||
group1="##[group]$2 ($SIZE KiB)"
|
||||
group2="##[endgroup]"
|
||||
fi
|
||||
cat <<EOF > tmp$$
|
||||
$group1
|
||||
$content
|
||||
$group2
|
||||
EOF
|
||||
cat tmp$$
|
||||
rm -f tmp$$
|
||||
}
|
||||
|
||||
# overview
|
||||
cat /tmp/summary.txt
|
||||
echo ""
|
||||
|
||||
if [ -f /tmp/have_failed_tests -a -s /tmp/failed.txt ]; then
|
||||
echo "Debuginfo of failed tests:"
|
||||
cat /tmp/failed.txt
|
||||
echo ""
|
||||
cat /tmp/summary.txt | grep -v '^/'
|
||||
echo ""
|
||||
fi
|
||||
|
||||
echo -e "\nFull logs for download:\n $1\n"
|
||||
|
||||
for ((i=1; i<=VMs; i++)); do
|
||||
rv=$(cat vm$i/tests-exitcode.txt)
|
||||
|
||||
if [ $rv = 0 ]; then
|
||||
vm="[92mvm$i[0m"
|
||||
else
|
||||
vm="[1;91mvm$i[0m"
|
||||
fi
|
||||
|
||||
file="vm$i/dmesg-prerun.txt"
|
||||
test -s "$file" && showfile "$file" "$vm: dmesg kernel"
|
||||
|
||||
file="/tmp/vm${i}log.txt"
|
||||
test -s "$file" && showfile "$file" "$vm: test results"
|
||||
|
||||
file="vm$i/console.txt"
|
||||
test -s "$file" && showfile "$file" "$vm: serial console"
|
||||
|
||||
file="/tmp/vm${i}dbg.txt"
|
||||
test -s "$file" && showfile "$file" "$vm: failure logfile"
|
||||
done
|
||||
|
||||
test -f /tmp/have_failed_tests && exit 1
|
||||
exit 0
|
57
.github/workflows/scripts/qemu-9-summary-page.sh
vendored
Executable file
57
.github/workflows/scripts/qemu-9-summary-page.sh
vendored
Executable file
@ -0,0 +1,57 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
######################################################################
|
||||
# 9) generate github summary page of all the testings
|
||||
######################################################################
|
||||
|
||||
set -eu
|
||||
|
||||
function output() {
|
||||
echo -e $* >> "out-$logfile.md"
|
||||
}
|
||||
|
||||
function outfile() {
|
||||
cat "$1" >> "out-$logfile.md"
|
||||
}
|
||||
|
||||
function outfile_plain() {
|
||||
output "<pre>"
|
||||
cat "$1" >> "out-$logfile.md"
|
||||
output "</pre>"
|
||||
}
|
||||
|
||||
function send2github() {
|
||||
test -f "$1" || exit 0
|
||||
dd if="$1" bs=1023k count=1 >> $GITHUB_STEP_SUMMARY
|
||||
}
|
||||
|
||||
# https://docs.github.com/en/enterprise-server@3.6/actions/using-workflows/workflow-commands-for-github-actions#step-isolation-and-limits
|
||||
# Job summaries are isolated between steps and each step is restricted to a maximum size of 1MiB.
|
||||
# [ ] can not show all error findings here
|
||||
# [x] split files into smaller ones and create additional steps
|
||||
|
||||
# first call, generate all summaries
|
||||
if [ ! -f out-1.md ]; then
|
||||
logfile="1"
|
||||
for tarfile in Logs-functional-*/qemu-*.tar; do
|
||||
rm -rf vm* *.txt
|
||||
if [ ! -s "$tarfile" ]; then
|
||||
output "\n## Functional Tests: unknown\n"
|
||||
output ":exclamation: Tarfile $tarfile is empty :exclamation:"
|
||||
continue
|
||||
fi
|
||||
tar xf "$tarfile"
|
||||
test -s env.txt || continue
|
||||
source env.txt
|
||||
# when uname.txt is there, the other files are also ok
|
||||
test -s uname.txt || continue
|
||||
output "\n## Functional Tests: $OSNAME\n"
|
||||
outfile_plain uname.txt
|
||||
outfile_plain summary.txt
|
||||
outfile failed.txt
|
||||
logfile=$((logfile+1))
|
||||
done
|
||||
send2github out-1.md
|
||||
else
|
||||
send2github out-$1.md
|
||||
fi
|
8
.github/workflows/scripts/qemu-prepare-for-build.sh
vendored
Executable file
8
.github/workflows/scripts/qemu-prepare-for-build.sh
vendored
Executable file
@ -0,0 +1,8 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Helper script to run after installing dependencies. This brings the VM back
|
||||
# up and copies over the zfs source directory.
|
||||
echo "Build modules in QEMU machine"
|
||||
sudo virsh start openzfs
|
||||
.github/workflows/scripts/qemu-wait-for-vm.sh vm0
|
||||
rsync -ar $HOME/work/zfs/zfs zfs@vm0:./
|
90
.github/workflows/scripts/qemu-test-repo-vm.sh
vendored
Executable file
90
.github/workflows/scripts/qemu-test-repo-vm.sh
vendored
Executable file
@ -0,0 +1,90 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Do a test install of ZFS from an external repository.
|
||||
#
|
||||
# USAGE:
|
||||
#
|
||||
# ./qemu-test-repo-vm [URL]
|
||||
#
|
||||
# URL: URL to use instead of http://download.zfsonlinux.org
|
||||
# If blank, use the default repo from zfs-release RPM.
|
||||
|
||||
set -e
|
||||
|
||||
source /etc/os-release
|
||||
OS="$ID"
|
||||
VERSION="$VERSION_ID"
|
||||
|
||||
ALTHOST=""
|
||||
if [ -n "$1" ] ; then
|
||||
ALTHOST="$1"
|
||||
fi
|
||||
|
||||
# Write summary to /tmp/repo so our artifacts scripts pick it up
|
||||
mkdir /tmp/repo
|
||||
SUMMARY=/tmp/repo/$OS-$VERSION-summary.txt
|
||||
|
||||
# $1: Repo 'zfs' 'zfs-kmod' 'zfs-testing' 'zfs-testing-kmod'
|
||||
# $2: (optional) Alternate host than 'http://download.zfsonlinux.org' to
|
||||
# install from. Blank means use default from zfs-release RPM.
|
||||
function test_install {
|
||||
repo=$1
|
||||
host=""
|
||||
if [ -n "$2" ] ; then
|
||||
host=$2
|
||||
fi
|
||||
|
||||
args="--disablerepo=zfs --enablerepo=$repo"
|
||||
|
||||
# If we supplied an alternate repo URL, and have not already edited
|
||||
# zfs.repo, then update the repo file.
|
||||
if [ -n "$host" ] && ! grep -q $host /etc/yum.repos.d/zfs.repo ; then
|
||||
sudo sed -i "s;baseurl=http://download.zfsonlinux.org;baseurl=$host;g" /etc/yum.repos.d/zfs.repo
|
||||
fi
|
||||
|
||||
sudo dnf -y install $args zfs zfs-test
|
||||
|
||||
# Load modules and create a simple pool as a sanity test.
|
||||
sudo /usr/share/zfs/zfs.sh -r
|
||||
truncate -s 100M /tmp/file
|
||||
sudo zpool create tank /tmp/file
|
||||
sudo zpool status
|
||||
|
||||
# Print out repo name, rpm installed (kmod or dkms), and repo URL
|
||||
baseurl=$(grep -A 5 "\[$repo\]" /etc/yum.repos.d/zfs.repo | awk -F'=' '/baseurl=/{print $2; exit}')
|
||||
package=$(sudo rpm -qa | grep zfs | grep -E 'kmod|dkms')
|
||||
|
||||
echo "$repo $package $baseurl" >> $SUMMARY
|
||||
|
||||
sudo zpool destroy tank
|
||||
sudo rm /tmp/file
|
||||
sudo dnf -y remove zfs
|
||||
}
|
||||
|
||||
echo "##[group]Installing from repo"
|
||||
# The openzfs docs are the authoritative instructions for the install. Use
|
||||
# the specific version of zfs-release RPM it recommends.
|
||||
case $OS in
|
||||
almalinux*)
|
||||
url='https://raw.githubusercontent.com/openzfs/openzfs-docs/refs/heads/master/docs/Getting%20Started/RHEL-based%20distro/index.rst'
|
||||
name=$(curl -Ls $url | grep 'dnf install' | grep -Eo 'zfs-release-[0-9]+-[0-9]+')
|
||||
sudo dnf -y install https://zfsonlinux.org/epel/$name$(rpm --eval "%{dist}").noarch.rpm 2>&1
|
||||
sudo rpm -qi zfs-release
|
||||
test_install zfs $ALTHOST
|
||||
test_install zfs-kmod $ALTHOST
|
||||
test_install zfs-testing $ALTHOST
|
||||
test_install zfs-testing-kmod $ALTHOST
|
||||
;;
|
||||
fedora*)
|
||||
url='https://raw.githubusercontent.com/openzfs/openzfs-docs/refs/heads/master/docs/Getting%20Started/Fedora/index.rst'
|
||||
name=$(curl -Ls $url | grep 'dnf install' | grep -Eo 'zfs-release-[0-9]+-[0-9]+')
|
||||
sudo dnf -y install -y https://zfsonlinux.org/fedora/$name$(rpm --eval "%{dist}").noarch.rpm
|
||||
test_install zfs $ALTHOST
|
||||
;;
|
||||
esac
|
||||
echo "##[endgroup]"
|
||||
|
||||
# Write out a simple version of the summary here. Later on we will collate all
|
||||
# the summaries and put them into a nice table in the workflow Summary page.
|
||||
echo "Summary: "
|
||||
cat $SUMMARY
|
10
.github/workflows/scripts/qemu-wait-for-vm.sh
vendored
Executable file
10
.github/workflows/scripts/qemu-wait-for-vm.sh
vendored
Executable file
@ -0,0 +1,10 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Wait for a VM to boot up and become active. This is used in a number of our
|
||||
# scripts.
|
||||
#
|
||||
# $1: VM hostname or IP address
|
||||
|
||||
while pidof /usr/bin/qemu-system-x86_64 >/dev/null; do
|
||||
ssh 2>/dev/null zfs@$1 "uname -a" && break
|
||||
done
|
32
.github/workflows/scripts/replace-dupes-with-symlinks.sh
vendored
Executable file
32
.github/workflows/scripts/replace-dupes-with-symlinks.sh
vendored
Executable file
@ -0,0 +1,32 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Recursively go though a directory structure and replace duplicate files with
|
||||
# symlinks. This cuts down our RPM repo size by ~25%.
|
||||
#
|
||||
# replace-dupes-with-symlinks.sh [DIR]
|
||||
#
|
||||
# DIR: Directory to traverse. Defaults to current directory if not specified.
|
||||
#
|
||||
|
||||
src="$1"
|
||||
if [ -z "$src" ] ; then
|
||||
src="."
|
||||
fi
|
||||
|
||||
declare -A db
|
||||
|
||||
pushd "$src"
|
||||
while read line ; do
|
||||
bn="$(basename $line)"
|
||||
if [ -z "${db[$bn]}" ] ; then
|
||||
# First time this file has been seen
|
||||
db[$bn]="$line"
|
||||
else
|
||||
if diff -b "$line" "${db[$bn]}" &>/dev/null ; then
|
||||
# Files are the same, make a symlink
|
||||
rm "$line"
|
||||
ln -sr "${db[$bn]}" "$line"
|
||||
fi
|
||||
fi
|
||||
done <<< "$(find . -type f)"
|
||||
popd
|
88
.github/workflows/scripts/setup-dependencies.sh
vendored
88
.github/workflows/scripts/setup-dependencies.sh
vendored
@ -1,88 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -eu
|
||||
|
||||
function prerun() {
|
||||
echo "::group::Install build dependencies"
|
||||
# remove snap things, update+upgrade will be faster then
|
||||
for x in lxd core20 snapd; do sudo snap remove $x; done
|
||||
sudo apt-get purge snapd google-chrome-stable firefox
|
||||
# https://github.com/orgs/community/discussions/47863
|
||||
sudo apt-get remove grub-efi-amd64-bin grub-efi-amd64-signed shim-signed --allow-remove-essential
|
||||
sudo apt-get update
|
||||
sudo apt upgrade
|
||||
sudo xargs --arg-file=.github/workflows/build-dependencies.txt apt-get install -qq
|
||||
sudo apt-get clean
|
||||
sudo dmesg -c > /var/tmp/dmesg-prerun
|
||||
echo "::endgroup::"
|
||||
}
|
||||
|
||||
function mod_build() {
|
||||
echo "::group::Generate debian packages"
|
||||
./autogen.sh
|
||||
./configure --enable-debug --enable-debuginfo --enable-asan --enable-ubsan
|
||||
make --no-print-directory --silent native-deb-utils native-deb-kmod
|
||||
mv ../*.deb .
|
||||
rm ./openzfs-zfs-dracut*.deb ./openzfs-zfs-dkms*.deb
|
||||
echo "$ImageOS-$ImageVersion" > tests/ImageOS.txt
|
||||
echo "::endgroup::"
|
||||
}
|
||||
|
||||
function mod_install() {
|
||||
# install the pre-built module only on the same runner image
|
||||
MOD=`cat tests/ImageOS.txt`
|
||||
if [ "$MOD" != "$ImageOS-$ImageVersion" ]; then
|
||||
rm -f *.deb
|
||||
mod_build
|
||||
fi
|
||||
|
||||
echo "::group::Install and load modules"
|
||||
# don't use kernel-shipped zfs modules
|
||||
sudo sed -i.bak 's/updates/extra updates/' /etc/depmod.d/ubuntu.conf
|
||||
sudo apt-get install --fix-missing ./*.deb
|
||||
|
||||
# Native Debian packages enable and start the services
|
||||
# Stop zfs-zed daemon, as it may interfere with some ZTS test cases
|
||||
sudo systemctl stop zfs-zed
|
||||
sudo depmod -a
|
||||
sudo modprobe zfs
|
||||
sudo dmesg
|
||||
sudo dmesg -c > /var/tmp/dmesg-module-load
|
||||
echo "::endgroup::"
|
||||
|
||||
echo "::group::Report CPU information"
|
||||
lscpu
|
||||
cat /proc/spl/kstat/zfs/chksum_bench
|
||||
echo "::endgroup::"
|
||||
|
||||
echo "::group::Optimize storage for ZFS testings"
|
||||
# remove swap and umount fast storage
|
||||
# 89GiB -> rootfs + bootfs with ~80MB/s -> don't care
|
||||
# 64GiB -> /mnt with 420MB/s -> new testing ssd
|
||||
sudo swapoff -a
|
||||
|
||||
# this one is fast and mounted @ /mnt
|
||||
# -> we reformat with ext4 + move it to /var/tmp
|
||||
DEV="/dev/disk/azure/resource-part1"
|
||||
sudo umount /mnt
|
||||
sudo mkfs.ext4 -O ^has_journal -F $DEV
|
||||
sudo mount -o noatime,barrier=0 $DEV /var/tmp
|
||||
sudo chmod 1777 /var/tmp
|
||||
|
||||
# disk usage afterwards
|
||||
sudo df -h /
|
||||
sudo df -h /var/tmp
|
||||
sudo fstrim -a
|
||||
echo "::endgroup::"
|
||||
}
|
||||
|
||||
case "$1" in
|
||||
build)
|
||||
prerun
|
||||
mod_build
|
||||
;;
|
||||
tests)
|
||||
prerun
|
||||
mod_install
|
||||
;;
|
||||
esac
|
24
.github/workflows/scripts/setup-functional.sh
vendored
24
.github/workflows/scripts/setup-functional.sh
vendored
@ -1,24 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -eu
|
||||
|
||||
TDIR="/usr/share/zfs/zfs-tests/tests/functional"
|
||||
echo -n "TODO="
|
||||
case "$1" in
|
||||
part1)
|
||||
# ~1h 20m
|
||||
echo "cli_root"
|
||||
;;
|
||||
part2)
|
||||
# ~1h
|
||||
ls $TDIR|grep '^[a-m]'|grep -v "cli_root"|xargs|tr -s ' ' ','
|
||||
;;
|
||||
part3)
|
||||
# ~1h
|
||||
ls $TDIR|grep '^[n-qs-z]'|xargs|tr -s ' ' ','
|
||||
;;
|
||||
part4)
|
||||
# ~1h
|
||||
ls $TDIR|grep '^r'|xargs|tr -s ' ' ','
|
||||
;;
|
||||
esac
|
124
.github/workflows/zfs-linux-tests.yml
vendored
124
.github/workflows/zfs-linux-tests.yml
vendored
@ -1,124 +0,0 @@
|
||||
name: zfs-linux-tests
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
os:
|
||||
description: 'The ubuntu version: 20.02 or 22.04'
|
||||
required: true
|
||||
type: string
|
||||
|
||||
jobs:
|
||||
|
||||
zloop:
|
||||
runs-on: ubuntu-${{ inputs.os }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
- uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: modules-${{ inputs.os }}
|
||||
- name: Install modules
|
||||
run: |
|
||||
tar xzf modules-${{ inputs.os }}.tgz
|
||||
.github/workflows/scripts/setup-dependencies.sh tests
|
||||
- name: Tests
|
||||
timeout-minutes: 30
|
||||
run: |
|
||||
sudo mkdir -p /var/tmp/zloop
|
||||
# run for 10 minutes or at most 2 iterations for a maximum runner
|
||||
# time of 20 minutes.
|
||||
sudo /usr/share/zfs/zloop.sh -t 600 -I 2 -l -m1 -- -T 120 -P 60
|
||||
- name: Prepare artifacts
|
||||
if: failure()
|
||||
run: |
|
||||
sudo chmod +r -R /var/tmp/zloop/
|
||||
- uses: actions/upload-artifact@v4
|
||||
if: failure()
|
||||
with:
|
||||
name: Zpool-logs-${{ inputs.os }}
|
||||
path: |
|
||||
/var/tmp/zloop/*/
|
||||
!/var/tmp/zloop/*/vdev/
|
||||
retention-days: 14
|
||||
if-no-files-found: ignore
|
||||
- uses: actions/upload-artifact@v4
|
||||
if: failure()
|
||||
with:
|
||||
name: Zpool-files-${{ inputs.os }}
|
||||
path: |
|
||||
/var/tmp/zloop/*/vdev/
|
||||
retention-days: 14
|
||||
if-no-files-found: ignore
|
||||
|
||||
sanity:
|
||||
runs-on: ubuntu-${{ inputs.os }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
- uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: modules-${{ inputs.os }}
|
||||
- name: Install modules
|
||||
run: |
|
||||
tar xzf modules-${{ inputs.os }}.tgz
|
||||
.github/workflows/scripts/setup-dependencies.sh tests
|
||||
- name: Tests
|
||||
timeout-minutes: 60
|
||||
shell: bash
|
||||
run: |
|
||||
set -o pipefail
|
||||
/usr/share/zfs/zfs-tests.sh -vKR -s 3G -r sanity | scripts/zfs-tests-color.sh
|
||||
- name: Prepare artifacts
|
||||
if: success() || failure()
|
||||
run: |
|
||||
RESPATH="/var/tmp/test_results"
|
||||
mv -f $RESPATH/current $RESPATH/testfiles
|
||||
tar cf $RESPATH/sanity.tar -h -C $RESPATH testfiles
|
||||
- uses: actions/upload-artifact@v4
|
||||
if: success() || failure()
|
||||
with:
|
||||
name: Logs-${{ inputs.os }}-sanity
|
||||
path: /var/tmp/test_results/sanity.tar
|
||||
if-no-files-found: ignore
|
||||
|
||||
functional:
|
||||
runs-on: ubuntu-${{ inputs.os }}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
tests: [ part1, part2, part3, part4 ]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
- uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: modules-${{ inputs.os }}
|
||||
- name: Install modules
|
||||
run: |
|
||||
tar xzf modules-${{ inputs.os }}.tgz
|
||||
.github/workflows/scripts/setup-dependencies.sh tests
|
||||
- name: Setup tests
|
||||
run: |
|
||||
.github/workflows/scripts/setup-functional.sh ${{ matrix.tests }} >> $GITHUB_ENV
|
||||
- name: Tests
|
||||
timeout-minutes: 120
|
||||
shell: bash
|
||||
run: |
|
||||
set -o pipefail
|
||||
/usr/share/zfs/zfs-tests.sh -vKR -s 3G -T ${{ env.TODO }} | scripts/zfs-tests-color.sh
|
||||
- name: Prepare artifacts
|
||||
if: success() || failure()
|
||||
run: |
|
||||
RESPATH="/var/tmp/test_results"
|
||||
mv -f $RESPATH/current $RESPATH/testfiles
|
||||
tar cf $RESPATH/${{ matrix.tests }}.tar -h -C $RESPATH testfiles
|
||||
- uses: actions/upload-artifact@v4
|
||||
if: success() || failure()
|
||||
with:
|
||||
name: Logs-${{ inputs.os }}-functional-${{ matrix.tests }}
|
||||
path: /var/tmp/test_results/${{ matrix.tests }}.tar
|
||||
if-no-files-found: ignore
|
64
.github/workflows/zfs-linux.yml
vendored
64
.github/workflows/zfs-linux.yml
vendored
@ -1,64 +0,0 @@
|
||||
name: zfs-linux
|
||||
|
||||
on:
|
||||
push:
|
||||
pull_request:
|
||||
|
||||
jobs:
|
||||
|
||||
build:
|
||||
name: Build
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
os: [20.04, 22.04]
|
||||
runs-on: ubuntu-${{ matrix.os }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
- name: Build modules
|
||||
run: .github/workflows/scripts/setup-dependencies.sh build
|
||||
- name: Prepare modules upload
|
||||
run: tar czf modules-${{ matrix.os }}.tgz *.deb .github tests/test-runner tests/ImageOS.txt
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: modules-${{ matrix.os }}
|
||||
path: modules-${{ matrix.os }}.tgz
|
||||
retention-days: 14
|
||||
|
||||
testings:
|
||||
name: Testing
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
os: [20.04, 22.04]
|
||||
needs: build
|
||||
uses: ./.github/workflows/zfs-linux-tests.yml
|
||||
with:
|
||||
os: ${{ matrix.os }}
|
||||
|
||||
cleanup:
|
||||
if: always()
|
||||
name: Cleanup
|
||||
runs-on: ubuntu-22.04
|
||||
needs: testings
|
||||
steps:
|
||||
- uses: actions/download-artifact@v4
|
||||
- name: Generating summary
|
||||
run: |
|
||||
tar xzf modules-22.04/modules-22.04.tgz .github tests
|
||||
.github/workflows/scripts/generate-summary.sh
|
||||
# up to 4 steps, each can have 1 MiB output (for debugging log files)
|
||||
- name: Summary for errors #1
|
||||
run: .github/workflows/scripts/generate-summary.sh 1
|
||||
- name: Summary for errors #2
|
||||
run: .github/workflows/scripts/generate-summary.sh 2
|
||||
- name: Summary for errors #3
|
||||
run: .github/workflows/scripts/generate-summary.sh 3
|
||||
- name: Summary for errors #4
|
||||
run: .github/workflows/scripts/generate-summary.sh 4
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: Summary Files
|
||||
path: Summary/
|
151
.github/workflows/zfs-qemu-packages.yml
vendored
Normal file
151
.github/workflows/zfs-qemu-packages.yml
vendored
Normal file
@ -0,0 +1,151 @@
|
||||
# This workflow is used to build and test RPM packages. It is a
|
||||
# 'workflow_dispatch' workflow, which means it gets run manually.
|
||||
#
|
||||
# The workflow has a dropdown menu with two options:
|
||||
#
|
||||
# Build RPMs - Build release RPMs and tarballs and put them into an artifact
|
||||
# ZIP file. The directory structure used in the ZIP file mirrors
|
||||
# the ZFS yum repo.
|
||||
#
|
||||
# Test repo - Test install the ZFS RPMs from the ZFS repo. On EL distos, this
|
||||
# will do a DKMS and KMOD test install from both the regular and
|
||||
# testing repos. On Fedora, it will do a DKMS install from the
|
||||
# regular repo. All test install results will be displayed in the
|
||||
# Summary page. Note that the workflow provides an optional text
|
||||
# text box where you can specify the full URL to an alternate repo.
|
||||
# If left blank, it will install from the default repo from the
|
||||
# zfs-release RPM (http://download.zfsonlinux.org).
|
||||
#
|
||||
# Most users will never need to use this workflow. It will be used primary by
|
||||
# ZFS admins for building and testing releases.
|
||||
#
|
||||
name: zfs-qemu-packages
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
test_type:
|
||||
type: choice
|
||||
required: false
|
||||
default: "Build RPMs"
|
||||
description: "Build RPMs or test the repo?"
|
||||
options:
|
||||
- "Build RPMs"
|
||||
- "Test repo"
|
||||
patch_level:
|
||||
type: string
|
||||
required: false
|
||||
default: ""
|
||||
description: "(optional) patch level number"
|
||||
repo_url:
|
||||
type: string
|
||||
required: false
|
||||
default: ""
|
||||
description: "(optional) repo URL (blank: use http://download.zfsonlinux.org)"
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
zfs-qemu-packages-jobs:
|
||||
name: qemu-VMs
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
os: ['almalinux8', 'almalinux9', 'almalinux10', 'fedora41', 'fedora42']
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
|
||||
- name: Setup QEMU
|
||||
timeout-minutes: 10
|
||||
run: .github/workflows/scripts/qemu-1-setup.sh
|
||||
|
||||
- name: Start build machine
|
||||
timeout-minutes: 10
|
||||
run: .github/workflows/scripts/qemu-2-start.sh ${{ matrix.os }}
|
||||
|
||||
- name: Install dependencies
|
||||
timeout-minutes: 20
|
||||
run: |
|
||||
.github/workflows/scripts/qemu-3-deps.sh ${{ matrix.os }}
|
||||
|
||||
- name: Build modules or Test repo
|
||||
timeout-minutes: 30
|
||||
run: |
|
||||
set -e
|
||||
if [ "${{ github.event.inputs.test_type }}" == "Test repo" ] ; then
|
||||
# Bring VM back up and copy over zfs source
|
||||
.github/workflows/scripts/qemu-prepare-for-build.sh
|
||||
|
||||
mkdir -p /tmp/repo
|
||||
ssh zfs@vm0 '$HOME/zfs/.github/workflows/scripts/qemu-test-repo-vm.sh' ${{ github.event.inputs.repo_url }}
|
||||
else
|
||||
EXTRA=""
|
||||
if [ -n "${{ github.event.inputs.patch_level }}" ] ; then
|
||||
EXTRA="--patch-level ${{ github.event.inputs.patch_level }}"
|
||||
fi
|
||||
|
||||
.github/workflows/scripts/qemu-4-build.sh $EXTRA \
|
||||
--repo --release --dkms --tarball ${{ matrix.os }}
|
||||
fi
|
||||
|
||||
- name: Prepare artifacts
|
||||
if: always()
|
||||
timeout-minutes: 10
|
||||
run: |
|
||||
rsync -a zfs@vm0:/tmp/repo /tmp || true
|
||||
.github/workflows/scripts/replace-dupes-with-symlinks.sh /tmp/repo
|
||||
tar -cf ${{ matrix.os }}-repo.tar -C /tmp repo
|
||||
|
||||
- uses: actions/upload-artifact@v4
|
||||
id: artifact-upload
|
||||
if: always()
|
||||
with:
|
||||
name: ${{ matrix.os }}-repo
|
||||
path: ${{ matrix.os }}-repo.tar
|
||||
compression-level: 0
|
||||
retention-days: 2
|
||||
if-no-files-found: ignore
|
||||
|
||||
combine_repos:
|
||||
if: always()
|
||||
needs: [zfs-qemu-packages-jobs]
|
||||
name: "Results"
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/download-artifact@v4
|
||||
id: artifact-download
|
||||
if: always()
|
||||
- name: Test Summary
|
||||
if: always()
|
||||
run: |
|
||||
for i in $(find . -type f -iname "*.tar") ; do
|
||||
tar -xf $i -C /tmp
|
||||
done
|
||||
tar -cf all-repo.tar -C /tmp repo
|
||||
|
||||
# If we're installing from a repo, print out the summary of the versions
|
||||
# that got installed using Markdown.
|
||||
if [ "${{ github.event.inputs.test_type }}" == "Test repo" ] ; then
|
||||
cd /tmp/repo
|
||||
for i in $(ls *.txt) ; do
|
||||
nicename="$(echo $i | sed 's/.txt//g; s/-/ /g')"
|
||||
echo "### $nicename" >> $GITHUB_STEP_SUMMARY
|
||||
echo "|repo|RPM|URL|" >> $GITHUB_STEP_SUMMARY
|
||||
echo "|:---|:---|:---|" >> $GITHUB_STEP_SUMMARY
|
||||
awk '{print "|"$1"|"$2"|"$3"|"}' $i >> $GITHUB_STEP_SUMMARY
|
||||
done
|
||||
fi
|
||||
|
||||
- uses: actions/upload-artifact@v4
|
||||
id: artifact-upload2
|
||||
if: always()
|
||||
with:
|
||||
name: all-repo
|
||||
path: all-repo.tar
|
||||
compression-level: 0
|
||||
retention-days: 5
|
||||
if-no-files-found: ignore
|
178
.github/workflows/zfs-qemu.yml
vendored
Normal file
178
.github/workflows/zfs-qemu.yml
vendored
Normal file
@ -0,0 +1,178 @@
|
||||
name: zfs-qemu
|
||||
|
||||
on:
|
||||
push:
|
||||
pull_request:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
fedora_kernel_ver:
|
||||
type: string
|
||||
required: false
|
||||
default: ""
|
||||
description: "(optional) Experimental kernel version to install on Fedora (like '6.14' or '6.13.3-0.rc3')"
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
test-config:
|
||||
name: Setup
|
||||
runs-on: ubuntu-24.04
|
||||
outputs:
|
||||
test_os: ${{ steps.os.outputs.os }}
|
||||
ci_type: ${{ steps.os.outputs.ci_type }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Generate OS config and CI type
|
||||
id: os
|
||||
run: |
|
||||
FULL_OS='["almalinux8", "almalinux9", "almalinux10", "centos-stream9", "centos-stream10", "debian12", "debian13", "fedora41", "fedora42", "freebsd13-5r", "freebsd14-3s", "freebsd15-0c", "ubuntu22", "ubuntu24"]'
|
||||
QUICK_OS='["almalinux8", "almalinux9", "almalinux10", "debian12", "fedora42", "freebsd14-3s", "ubuntu24"]'
|
||||
# determine CI type when running on PR
|
||||
ci_type="full"
|
||||
if ${{ github.event_name == 'pull_request' }}; then
|
||||
head=${{ github.event.pull_request.head.sha }}
|
||||
base=${{ github.event.pull_request.base.sha }}
|
||||
ci_type=$(python3 .github/workflows/scripts/generate-ci-type.py $head $base)
|
||||
fi
|
||||
if [ "$ci_type" == "quick" ]; then
|
||||
os_selection="$QUICK_OS"
|
||||
else
|
||||
os_selection="$FULL_OS"
|
||||
fi
|
||||
|
||||
if ${{ github.event.inputs.fedora_kernel_ver != '' }}; then
|
||||
# They specified a custom kernel version for Fedora. Use only
|
||||
# Fedora runners.
|
||||
os_json=$(echo ${os_selection} | jq -c '[.[] | select(startswith("fedora"))]')
|
||||
else
|
||||
# Normal case
|
||||
os_json=$(echo ${os_selection} | jq -c)
|
||||
fi
|
||||
|
||||
echo "os=$os_json" | tee -a $GITHUB_OUTPUT
|
||||
echo "ci_type=$ci_type" | tee -a $GITHUB_OUTPUT
|
||||
|
||||
qemu-vm:
|
||||
name: qemu-x86
|
||||
needs: [ test-config ]
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
# rhl: almalinux8, almalinux9, centos-stream9, fedora4x
|
||||
# debian: debian12, debian13, ubuntu22, ubuntu24
|
||||
# misc: archlinux, tumbleweed
|
||||
# FreeBSD variants of 2025-06:
|
||||
# FreeBSD Release: freebsd13-5r, freebsd14-2r, freebsd14-3r
|
||||
# FreeBSD Stable: freebsd13-5s, freebsd14-3s
|
||||
# FreeBSD Current: freebsd15-0c
|
||||
os: ${{ fromJson(needs.test-config.outputs.test_os) }}
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
|
||||
- name: Setup QEMU
|
||||
timeout-minutes: 20
|
||||
run: |
|
||||
# Add a timestamp to each line to debug timeouts
|
||||
while IFS=$'\n' read -r line; do
|
||||
echo "$(date +'%H:%M:%S') $line"
|
||||
done < <(.github/workflows/scripts/qemu-1-setup.sh)
|
||||
|
||||
- name: Start build machine
|
||||
timeout-minutes: 10
|
||||
run: .github/workflows/scripts/qemu-2-start.sh ${{ matrix.os }}
|
||||
|
||||
- name: Install dependencies
|
||||
timeout-minutes: 20
|
||||
run: .github/workflows/scripts/qemu-3-deps.sh ${{ matrix.os }} ${{ github.event.inputs.fedora_kernel_ver }}
|
||||
|
||||
- name: Build modules
|
||||
timeout-minutes: 30
|
||||
run: .github/workflows/scripts/qemu-4-build.sh --poweroff --enable-debug ${{ matrix.os }}
|
||||
|
||||
- name: Setup testing machines
|
||||
timeout-minutes: 5
|
||||
run: .github/workflows/scripts/qemu-5-setup.sh
|
||||
|
||||
- name: Run tests
|
||||
timeout-minutes: 270
|
||||
run: .github/workflows/scripts/qemu-6-tests.sh
|
||||
env:
|
||||
CI_TYPE: ${{ needs.test-config.outputs.ci_type }}
|
||||
|
||||
- name: Prepare artifacts
|
||||
if: always()
|
||||
timeout-minutes: 10
|
||||
run: .github/workflows/scripts/qemu-7-prepare.sh
|
||||
|
||||
- uses: actions/upload-artifact@v4
|
||||
id: artifact-upload
|
||||
if: always()
|
||||
with:
|
||||
name: Logs-functional-${{ matrix.os }}
|
||||
path: /tmp/qemu-${{ matrix.os }}.tar
|
||||
if-no-files-found: ignore
|
||||
|
||||
- name: Test Summary
|
||||
if: always()
|
||||
run: .github/workflows/scripts/qemu-8-summary.sh '${{ steps.artifact-upload.outputs.artifact-url }}'
|
||||
|
||||
cleanup:
|
||||
if: always()
|
||||
name: Cleanup
|
||||
runs-on: ubuntu-latest
|
||||
needs: [ qemu-vm ]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
- uses: actions/download-artifact@v4
|
||||
- name: Generating summary
|
||||
run: .github/workflows/scripts/qemu-9-summary-page.sh
|
||||
- name: Generating summary...
|
||||
run: .github/workflows/scripts/qemu-9-summary-page.sh 2
|
||||
- name: Generating summary...
|
||||
run: .github/workflows/scripts/qemu-9-summary-page.sh 3
|
||||
- name: Generating summary...
|
||||
run: .github/workflows/scripts/qemu-9-summary-page.sh 4
|
||||
- name: Generating summary...
|
||||
run: .github/workflows/scripts/qemu-9-summary-page.sh 5
|
||||
- name: Generating summary...
|
||||
run: .github/workflows/scripts/qemu-9-summary-page.sh 6
|
||||
- name: Generating summary...
|
||||
run: .github/workflows/scripts/qemu-9-summary-page.sh 7
|
||||
- name: Generating summary...
|
||||
run: .github/workflows/scripts/qemu-9-summary-page.sh 8
|
||||
- name: Generating summary...
|
||||
run: .github/workflows/scripts/qemu-9-summary-page.sh 9
|
||||
- name: Generating summary...
|
||||
run: .github/workflows/scripts/qemu-9-summary-page.sh 10
|
||||
- name: Generating summary...
|
||||
run: .github/workflows/scripts/qemu-9-summary-page.sh 11
|
||||
- name: Generating summary...
|
||||
run: .github/workflows/scripts/qemu-9-summary-page.sh 12
|
||||
- name: Generating summary...
|
||||
run: .github/workflows/scripts/qemu-9-summary-page.sh 13
|
||||
- name: Generating summary...
|
||||
run: .github/workflows/scripts/qemu-9-summary-page.sh 14
|
||||
- name: Generating summary...
|
||||
run: .github/workflows/scripts/qemu-9-summary-page.sh 15
|
||||
- name: Generating summary...
|
||||
run: .github/workflows/scripts/qemu-9-summary-page.sh 16
|
||||
- name: Generating summary...
|
||||
run: .github/workflows/scripts/qemu-9-summary-page.sh 17
|
||||
- name: Generating summary...
|
||||
run: .github/workflows/scripts/qemu-9-summary-page.sh 18
|
||||
- name: Generating summary...
|
||||
run: .github/workflows/scripts/qemu-9-summary-page.sh 19
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: Summary Files
|
||||
path: out-*
|
77
.github/workflows/zloop.yml
vendored
Normal file
77
.github/workflows/zloop.yml
vendored
Normal file
@ -0,0 +1,77 @@
|
||||
name: zloop
|
||||
|
||||
on:
|
||||
push:
|
||||
pull_request:
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
zloop:
|
||||
runs-on: ubuntu-24.04
|
||||
env:
|
||||
WORK_DIR: /mnt/zloop
|
||||
CORE_DIR: /mnt/zloop/cores
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
sudo apt-get purge -y snapd google-chrome-stable firefox
|
||||
ONLY_DEPS=1 .github/workflows/scripts/qemu-3-deps-vm.sh ubuntu24
|
||||
- name: Autogen.sh
|
||||
run: |
|
||||
sed -i '/DEBUG_CFLAGS="-Werror"/s/^/#/' config/zfs-build.m4
|
||||
./autogen.sh
|
||||
- name: Configure
|
||||
run: |
|
||||
./configure --prefix=/usr --enable-debug --enable-debuginfo \
|
||||
--enable-asan --enable-ubsan \
|
||||
--enable-debug-kmem --enable-debug-kmem-tracking
|
||||
- name: Make
|
||||
run: |
|
||||
make -j$(nproc)
|
||||
- name: Install
|
||||
run: |
|
||||
sudo make install
|
||||
sudo depmod
|
||||
sudo modprobe zfs
|
||||
- name: Tests
|
||||
run: |
|
||||
sudo truncate -s 256G /mnt/vdev
|
||||
sudo zpool create cipool -m $WORK_DIR -O compression=on -o autotrim=on /mnt/vdev
|
||||
sudo /usr/share/zfs/zloop.sh -t 600 -I 6 -l -m 1 -c $CORE_DIR -f $WORK_DIR -- -T 120 -P 60
|
||||
- name: Prepare artifacts
|
||||
if: failure()
|
||||
run: |
|
||||
sudo chmod +r -R $WORK_DIR/
|
||||
- name: Ztest log
|
||||
if: failure()
|
||||
run: |
|
||||
grep -B10 -A1000 'ASSERT' $CORE_DIR/*/ztest.out || tail -n 1000 $CORE_DIR/*/ztest.out
|
||||
- name: Gdb log
|
||||
if: failure()
|
||||
run: |
|
||||
sed -n '/Backtraces (full)/q;p' $CORE_DIR/*/ztest.gdb
|
||||
- name: Zdb log
|
||||
if: failure()
|
||||
run: |
|
||||
cat $CORE_DIR/*/ztest.zdb
|
||||
- uses: actions/upload-artifact@v4
|
||||
if: failure()
|
||||
with:
|
||||
name: Logs
|
||||
path: |
|
||||
/mnt/zloop/*/
|
||||
!/mnt/zloop/cores/*/vdev/
|
||||
if-no-files-found: ignore
|
||||
- uses: actions/upload-artifact@v4
|
||||
if: failure()
|
||||
with:
|
||||
name: Pool files
|
||||
path: |
|
||||
/mnt/zloop/cores/*/vdev/
|
||||
if-no-files-found: ignore
|
18
.mailmap
18
.mailmap
@ -23,6 +23,7 @@
|
||||
# These maps are making names consistent where they have varied but the email
|
||||
# address has never changed. In most cases, the full name is in the
|
||||
# Signed-off-by of a commit with a matching author.
|
||||
Achill Gilgenast <achill@achill.org>
|
||||
Ahelenia Ziemiańska <nabijaczleweli@gmail.com>
|
||||
Ahelenia Ziemiańska <nabijaczleweli@nabijaczleweli.xyz>
|
||||
Alex John <alex@stty.io>
|
||||
@ -37,6 +38,7 @@ Crag Wang <crag0715@gmail.com>
|
||||
Damian Szuberski <szuberskidamian@gmail.com>
|
||||
Daniel Kolesa <daniel@octaforge.org>
|
||||
Debabrata Banerjee <dbavatar@gmail.com>
|
||||
Diwakar Kristappagari <diwakar-k@hpe.com>
|
||||
Finix Yan <yanchongwen@hotmail.com>
|
||||
Gaurav Kumar <gauravk.18@gmail.com>
|
||||
Gionatan Danti <g.danti@assyoma.it>
|
||||
@ -70,6 +72,8 @@ Rob Norris <robn@despairlabs.com>
|
||||
Rob Norris <rob.norris@klarasystems.com>
|
||||
Sam Lunt <samuel.j.lunt@gmail.com>
|
||||
Sanjeev Bagewadi <sanjeev.bagewadi@gmail.com>
|
||||
Sebastian Wuerl <s.wuerl@mailbox.org>
|
||||
SHENGYI HONG <aokblast@FreeBSD.org>
|
||||
Stoiko Ivanov <github@nomore.at>
|
||||
Tamas TEVESZ <ice@extreme.hu>
|
||||
WHR <msl0000023508@gmail.com>
|
||||
@ -77,8 +81,14 @@ Yanping Gao <yanping.gao@xtaotech.com>
|
||||
Youzhong Yang <youzhong@gmail.com>
|
||||
|
||||
# Signed-off-by: overriding Author:
|
||||
Ryan <errornointernet@envs.net> <error.nointernet@gmail.com>
|
||||
Alexander Ziaee <ziaee@FreeBSD.org> <concussious@runbox.com>
|
||||
Felix Schmidt <felixschmidt20@aol.com> <f.sch.prototype@gmail.com>
|
||||
Olivier Certner <olce@FreeBSD.org> <olce.freebsd@certner.fr>
|
||||
Phil Sutter <phil@nwl.cc> <p.github@nwl.cc>
|
||||
poscat <poscat@poscat.moe> <poscat0x04@outlook.com>
|
||||
Qiuhao Chen <chenqiuhao1997@gmail.com> <haohao0924@126.com>
|
||||
Ryan <errornointernet@envs.net> <error.nointernet@gmail.com>
|
||||
Sietse <sietse@wizdom.nu> <uglymotha@wizdom.nu>
|
||||
Yuxin Wang <yuxinwang9999@gmail.com> <Bi11gates9999@gmail.com>
|
||||
Zhenlei Huang <zlei@FreeBSD.org> <zlei.huang@gmail.com>
|
||||
|
||||
@ -95,6 +105,7 @@ Tulsi Jain <tulsi.jain@delphix.com> <tulsi.jain@Tulsi-Jains-MacBook-Pro.local>
|
||||
# Mappings from Github no-reply addresses
|
||||
ajs124 <git@ajs124.de> <ajs124@users.noreply.github.com>
|
||||
Alek Pinchuk <apinchuk@axcient.com> <alek-p@users.noreply.github.com>
|
||||
Aleksandr Liber <aleksandr.liber@perforce.com> <61714074+AleksandrLiber@users.noreply.github.com>
|
||||
Alexander Lobakin <alobakin@pm.me> <solbjorn@users.noreply.github.com>
|
||||
Alexey Smirnoff <fling@member.fsf.org> <fling-@users.noreply.github.com>
|
||||
Allen Holl <allen.m.holl@gmail.com> <65494904+allen-4@users.noreply.github.com>
|
||||
@ -131,10 +142,12 @@ Fedor Uporov <fuporov.vstack@gmail.com> <60701163+fuporovvStack@users.noreply.gi
|
||||
Felix Dörre <felix@dogcraft.de> <felixdoerre@users.noreply.github.com>
|
||||
Felix Neumärker <xdch47@posteo.de> <34678034+xdch47@users.noreply.github.com>
|
||||
Finix Yan <yancw@info2soft.com> <Finix1979@users.noreply.github.com>
|
||||
Friedrich Weber <f.weber@proxmox.com> <56110206+frwbr@users.noreply.github.com>
|
||||
Gaurav Kumar <gauravk.18@gmail.com> <gaurkuma@users.noreply.github.com>
|
||||
George Gaydarov <git@gg7.io> <gg7@users.noreply.github.com>
|
||||
Georgy Yakovlev <gyakovlev@gentoo.org> <168902+gyakovlev@users.noreply.github.com>
|
||||
Gerardwx <gerardw@alum.mit.edu> <Gerardwx@users.noreply.github.com>
|
||||
Germano Massullo <germano.massullo@gmail.com> <Germano0@users.noreply.github.com>
|
||||
Gian-Carlo DeFazio <defazio1@llnl.gov> <defaziogiancarlo@users.noreply.github.com>
|
||||
Giuseppe Di Natale <dinatale2@llnl.gov> <dinatale2@users.noreply.github.com>
|
||||
Hajo Möller <dasjoe@gmail.com> <dasjoe@users.noreply.github.com>
|
||||
@ -154,6 +167,7 @@ John Ramsden <johnramsden@riseup.net> <johnramsden@users.noreply.github.com>
|
||||
Jonathon Fernyhough <jonathon@m2x.dev> <559369+jonathonf@users.noreply.github.com>
|
||||
Jose Luis Duran <jlduran@gmail.com> <jlduran@users.noreply.github.com>
|
||||
Justin Hibbits <chmeeedalf@gmail.com> <chmeeedalf@users.noreply.github.com>
|
||||
Kaitlin Hoang <kthoang@amazon.com> <khoang98@users.noreply.github.com>
|
||||
Kevin Greene <kevin.greene@delphix.com> <104801862+kxgreene@users.noreply.github.com>
|
||||
Kevin Jin <lostking2008@hotmail.com> <33590050+jxdking@users.noreply.github.com>
|
||||
Kevin P. Fleming <kevin@km6g.us> <kpfleming@users.noreply.github.com>
|
||||
@ -205,9 +219,11 @@ Torsten Wörtwein <twoertwein@gmail.com> <twoertwein@users.noreply.github.com>
|
||||
Tulsi Jain <tulsi.jain@delphix.com> <TulsiJain@users.noreply.github.com>
|
||||
Václav Skála <skala@vshosting.cz> <33496485+vaclavskala@users.noreply.github.com>
|
||||
Vaibhav Bhanawat <vaibhav.bhanawat@delphix.com> <88050553+vaibhav-delphix@users.noreply.github.com>
|
||||
Vandana Rungta <vrungta@amazon.com> <46906819+vandanarungta@users.noreply.github.com>
|
||||
Violet Purcell <vimproved@inventati.org> <66446404+vimproved@users.noreply.github.com>
|
||||
Vipin Kumar Verma <vipin.verma@hpe.com> <75025470+vermavipinkumar@users.noreply.github.com>
|
||||
Wolfgang Bumiller <w.bumiller@proxmox.com> <Blub@users.noreply.github.com>
|
||||
XDTG <click1799@163.com> <35128600+XDTG@users.noreply.github.com>
|
||||
xtouqh <xtouqh@hotmail.com> <72357159+xtouqh@users.noreply.github.com>
|
||||
Yuri Pankov <yuripv@FreeBSD.org> <113725409+yuripv@users.noreply.github.com>
|
||||
Yuri Pankov <yuripv@FreeBSD.org> <82001006+yuripv@users.noreply.github.com>
|
||||
|
45
AUTHORS
45
AUTHORS
@ -10,6 +10,7 @@ PAST MAINTAINERS:
|
||||
CONTRIBUTORS:
|
||||
|
||||
Aaron Fineman <abyxcos@gmail.com>
|
||||
Achill Gilgenast <achill@achill.org>
|
||||
Adam D. Moss <c@yotes.com>
|
||||
Adam Leventhal <ahl@delphix.com>
|
||||
Adam Stevko <adam.stevko@gmail.com>
|
||||
@ -29,6 +30,7 @@ CONTRIBUTORS:
|
||||
Alejandro Colomar <Colomar.6.4.3@GMail.com>
|
||||
Alejandro R. Sedeño <asedeno@mit.edu>
|
||||
Alek Pinchuk <alek@nexenta.com>
|
||||
Aleksandr Liber <aleksandr.liber@perforce.com>
|
||||
Aleksa Sarai <cyphar@cyphar.com>
|
||||
Alexander Eremin <a.eremin@nexenta.com>
|
||||
Alexander Lobakin <alobakin@pm.me>
|
||||
@ -36,6 +38,7 @@ CONTRIBUTORS:
|
||||
Alexander Pyhalov <apyhalov@gmail.com>
|
||||
Alexander Richardson <Alexander.Richardson@cl.cam.ac.uk>
|
||||
Alexander Stetsenko <ams@nexenta.com>
|
||||
Alexander Ziaee <ziaee@FreeBSD.org>
|
||||
Alex Braunegg <alex.braunegg@gmail.com>
|
||||
Alexey Shvetsov <alexxy@gentoo.org>
|
||||
Alexey Smirnoff <fling@member.fsf.org>
|
||||
@ -57,6 +60,7 @@ CONTRIBUTORS:
|
||||
Andreas Buschmann <andreas.buschmann@tech.net.de>
|
||||
Andreas Dilger <adilger@intel.com>
|
||||
Andreas Vögele <andreas@andreasvoegele.com>
|
||||
Andres <a-d-j-i@users.noreply.github.com>
|
||||
Andrew Barnes <barnes333@gmail.com>
|
||||
Andrew Hamilton <ahamilto@tjhsst.edu>
|
||||
Andrew Innes <andrew.c12@gmail.com>
|
||||
@ -70,6 +74,7 @@ CONTRIBUTORS:
|
||||
Andrey Prokopenko <job@terem.fr>
|
||||
Andrey Vesnovaty <andrey.vesnovaty@gmail.com>
|
||||
Andriy Gapon <avg@freebsd.org>
|
||||
Andriy Tkachuk <andriy.tkachuk@seagate.com>
|
||||
Andy Bakun <github@thwartedefforts.org>
|
||||
Andy Fiddaman <omnios@citrus-it.co.uk>
|
||||
Aniruddha Shankar <k@191a.net>
|
||||
@ -80,6 +85,7 @@ CONTRIBUTORS:
|
||||
Arne Jansen <arne@die-jansens.de>
|
||||
Aron Xu <happyaron.xu@gmail.com>
|
||||
Arshad Hussain <arshad.hussain@aeoncomputing.com>
|
||||
Artem <artem.vlasenko@ossrevival.org>
|
||||
Arun KV <arun.kv@datacore.com>
|
||||
Arvind Sankar <nivedita@alum.mit.edu>
|
||||
Attila Fülöp <attila@fueloep.org>
|
||||
@ -117,6 +123,7 @@ CONTRIBUTORS:
|
||||
Caleb James DeLisle <calebdelisle@lavabit.com>
|
||||
Cameron Harr <harr1@llnl.gov>
|
||||
Cao Xuewen <cao.xuewen@zte.com.cn>
|
||||
Carl George <carlwgeorge@gmail.com>
|
||||
Carlo Landmeter <clandmeter@gmail.com>
|
||||
Carlos Alberto Lopez Perez <clopez@igalia.com>
|
||||
Cedric Maunoury <cedric.maunoury@gmail.com>
|
||||
@ -197,6 +204,7 @@ CONTRIBUTORS:
|
||||
Dimitri John Ledkov <xnox@ubuntu.com>
|
||||
Dimitry Andric <dimitry@andric.com>
|
||||
Dirkjan Bussink <d.bussink@gmail.com>
|
||||
Diwakar Kristappagari <diwakar-k@hpe.com>
|
||||
Dmitry Khasanov <pik4ez@gmail.com>
|
||||
Dominic Pearson <dsp@technoanimal.net>
|
||||
Dominik Hassler <hadfl@omniosce.org>
|
||||
@ -226,10 +234,12 @@ CONTRIBUTORS:
|
||||
Fedor Uporov <fuporov.vstack@gmail.com>
|
||||
Felix Dörre <felix@dogcraft.de>
|
||||
Felix Neumärker <xdch47@posteo.de>
|
||||
Felix Schmidt <felixschmidt20@aol.com>
|
||||
Feng Sun <loyou85@gmail.com>
|
||||
Finix Yan <yancw@info2soft.com>
|
||||
Francesco Mazzoli <f@mazzo.li>
|
||||
Frederik Wessels <wessels147@gmail.com>
|
||||
Friedrich Weber <f.weber@proxmox.com>
|
||||
Frédéric Vanniere <f.vanniere@planet-work.com>
|
||||
Gabriel A. Devenyi <gdevenyi@gmail.com>
|
||||
Garrett D'Amore <garrett@nexenta.com>
|
||||
@ -245,9 +255,11 @@ CONTRIBUTORS:
|
||||
George Wilson <gwilson@delphix.com>
|
||||
Georgy Yakovlev <ya@sysdump.net>
|
||||
Gerardwx <gerardw@alum.mit.edu>
|
||||
Germano Massullo <germano.massullo@gmail.com>
|
||||
Gian-Carlo DeFazio <defazio1@llnl.gov>
|
||||
Gionatan Danti <g.danti@assyoma.it>
|
||||
Giuseppe Di Natale <guss80@gmail.com>
|
||||
Gleb Smirnoff <glebius@FreeBSD.org>
|
||||
Glenn Washburn <development@efficientek.com>
|
||||
glibg10b <glibg10b@users.noreply.github.com>
|
||||
gofaster <felix.gofaster@gmail.com>
|
||||
@ -281,12 +293,14 @@ CONTRIBUTORS:
|
||||
Igor K <igor@dilos.org>
|
||||
Igor Kozhukhov <ikozhukhov@gmail.com>
|
||||
Igor Lvovsky <ilvovsky@gmail.com>
|
||||
Igor Ostapenko <pm@igoro.pro>
|
||||
ilbsmart <wgqimut@gmail.com>
|
||||
Ilkka Sovanto <github@ilkka.kapsi.fi>
|
||||
illiliti <illiliti@protonmail.com>
|
||||
ilovezfs <ilovezfs@icloud.com>
|
||||
InsanePrawn <Insane.Prawny@gmail.com>
|
||||
Isaac Huang <he.huang@intel.com>
|
||||
Ivan Volosyuk <Ivan.Volosyuk@gmail.com>
|
||||
Jacek Fefliński <feflik@gmail.com>
|
||||
Jacob Adams <tookmund@gmail.com>
|
||||
Jake Howard <git@theorangeone.net>
|
||||
@ -294,6 +308,7 @@ CONTRIBUTORS:
|
||||
James H <james@kagisoft.co.uk>
|
||||
James Lee <jlee@thestaticvoid.com>
|
||||
James Pan <jiaming.pan@yahoo.com>
|
||||
James Reilly <jreilly1821@gmail.com>
|
||||
James Wah <james@laird-wah.net>
|
||||
Jan Engelhardt <jengelh@inai.de>
|
||||
Jan Kryl <jan.kryl@nexenta.com>
|
||||
@ -305,6 +320,7 @@ CONTRIBUTORS:
|
||||
Jason Lee <jasonlee@lanl.gov>
|
||||
Jason Zaman <jasonzaman@gmail.com>
|
||||
Javen Wu <wu.javen@gmail.com>
|
||||
Jaydeep Kshirsagar <jkshirsagar@maxlinear.com>
|
||||
Jean-Baptiste Lallement <jean-baptiste@ubuntu.com>
|
||||
Jeff Dike <jdike@akamai.com>
|
||||
Jeremy Faulkner <gldisater@gmail.com>
|
||||
@ -312,10 +328,12 @@ CONTRIBUTORS:
|
||||
Jeremy Jones <jeremy@delphix.com>
|
||||
Jeremy Visser <jeremy.visser@gmail.com>
|
||||
Jerry Jelinek <jerry.jelinek@joyent.com>
|
||||
Jerzy Kołosowski <jerzy@kolosowscy.pl>
|
||||
Jessica Clarke <jrtc27@jrtc27.com>
|
||||
Jinshan Xiong <jinshan.xiong@intel.com>
|
||||
Jitendra Patidar <jitendra.patidar@nutanix.com>
|
||||
JK Dingwall <james@dingwall.me.uk>
|
||||
Joel Low <joel@joelsplace.sg>
|
||||
Joe Stein <joe.stein@delphix.com>
|
||||
John-Mark Gurney <jmg@funkthat.com>
|
||||
John Albietz <inthecloud247@gmail.com>
|
||||
@ -364,6 +382,7 @@ CONTRIBUTORS:
|
||||
Kevin Jin <lostking2008@hotmail.com>
|
||||
Kevin P. Fleming <kevin@km6g.us>
|
||||
Kevin Tanguy <kevin.tanguy@ovh.net>
|
||||
khoang98 <khoang98@users.noreply.github.com>
|
||||
KireinaHoro <i@jsteward.moe>
|
||||
Kjeld Schouten-Lebbing <kjeld@schouten-lebbing.nl>
|
||||
Kleber Tarcísio <klebertarcisio@yahoo.com.br>
|
||||
@ -371,6 +390,7 @@ CONTRIBUTORS:
|
||||
Kohsuke Kawaguchi <kk@kohsuke.org>
|
||||
Konstantin Khorenko <khorenko@virtuozzo.com>
|
||||
KORN Andras <korn@elan.rulez.org>
|
||||
kotauskas <v.toncharov@gmail.com>
|
||||
Kristof Provost <github@sigsegv.be>
|
||||
Krzysztof Piecuch <piecuch@kpiecuch.pl>
|
||||
Kyle Blatter <kyleblatter@llnl.gov>
|
||||
@ -422,6 +442,7 @@ CONTRIBUTORS:
|
||||
Mathieu Velten <matmaul@gmail.com>
|
||||
Matt Fiddaman <github@m.fiddaman.uk>
|
||||
Matthew Ahrens <matt@delphix.com>
|
||||
Matthew Heller <matthew.f.heller@gmail.com>
|
||||
Matthew Thode <mthode@mthode.org>
|
||||
Matthias Blankertz <matthias@blankertz.org>
|
||||
Matt Johnston <matt@fugro-fsi.com.au>
|
||||
@ -435,6 +456,7 @@ CONTRIBUTORS:
|
||||
Max Zettlmeißl <max@zettlmeissl.de>
|
||||
Md Islam <mdnahian@outlook.com>
|
||||
megari <megari@iki.fi>
|
||||
Meriel Luna Mittelbach <lunarlambda@gmail.com>
|
||||
Michael D Labriola <michael.d.labriola@gmail.com>
|
||||
Michael Franzl <michael@franzl.name>
|
||||
Michael Gebetsroither <michael@mgeb.org>
|
||||
@ -450,6 +472,7 @@ CONTRIBUTORS:
|
||||
Mike Swanson <mikeonthecomputer@gmail.com>
|
||||
Milan Jurik <milan.jurik@xylab.cz>
|
||||
Minsoo Choo <minsoochoo0122@proton.me>
|
||||
mnrx <mnrx@users.noreply.github.com>
|
||||
Mohamed Tawfik <m_tawfik@aucegypt.edu>
|
||||
Morgan Jones <mjones@rice.edu>
|
||||
Moritz Maxeiner <moritz@ucworks.org>
|
||||
@ -475,12 +498,13 @@ CONTRIBUTORS:
|
||||
Olaf Faaland <faaland1@llnl.gov>
|
||||
Oleg Drokin <green@linuxhacker.ru>
|
||||
Oleg Stepura <oleg@stepura.com>
|
||||
Olivier Certner <olce.freebsd@certner.fr>
|
||||
Olivier Certner <olce@FreeBSD.org>
|
||||
Olivier Mazouffre <olivier.mazouffre@ims-bordeaux.fr>
|
||||
omni <omni+vagant@hack.org>
|
||||
Orivej Desh <orivej@gmx.fr>
|
||||
Pablo Correa Gómez <ablocorrea@hotmail.com>
|
||||
Palash Gandhi <pbg4930@rit.edu>
|
||||
Patrick Fasano <patrick@patrickfasano.com>
|
||||
Patrick Mooney <pmooney@pfmooney.com>
|
||||
Patrik Greco <sikevux@sikevux.se>
|
||||
Paul B. Henson <henson@acm.org>
|
||||
@ -492,6 +516,7 @@ CONTRIBUTORS:
|
||||
Pawel Jakub Dawidek <pjd@FreeBSD.org>
|
||||
Pedro Giffuni <pfg@freebsd.org>
|
||||
Peng <peng.hse@xtaotech.com>
|
||||
Peng Liu <littlenewton6@gmail.com>
|
||||
Peter Ashford <ashford@accs.com>
|
||||
Peter Dave Hello <hsu@peterdavehello.org>
|
||||
Peter Doherty <peterd@acranox.org>
|
||||
@ -501,15 +526,18 @@ CONTRIBUTORS:
|
||||
Philip Pokorny <ppokorny@penguincomputing.com>
|
||||
Philipp Riederer <pt@philipptoelke.de>
|
||||
Phil Kauffman <philip@kauffman.me>
|
||||
Phil Sutter <phil@nwl.cc>
|
||||
Ping Huang <huangping@smartx.com>
|
||||
Piotr Kubaj <pkubaj@anongoth.pl>
|
||||
Piotr P. Stefaniak <pstef@freebsd.org>
|
||||
poscat <poscat@poscat.moe>
|
||||
Prakash Surya <prakash.surya@delphix.com>
|
||||
Prasad Joshi <prasadjoshi124@gmail.com>
|
||||
privb0x23 <privb0x23@users.noreply.github.com>
|
||||
P.SCH <p88@yahoo.com>
|
||||
Qiuhao Chen <chenqiuhao1997@gmail.com>
|
||||
Quartz <yyhran@163.com>
|
||||
Quentin Thébault <quentin.thebault@defenso.fr>
|
||||
Quentin Zdanis <zdanisq@gmail.com>
|
||||
Rafael Kitover <rkitover@gmail.com>
|
||||
RageLtMan <sempervictus@users.noreply.github.com>
|
||||
@ -518,6 +546,7 @@ CONTRIBUTORS:
|
||||
Remy Blank <remy.blank@pobox.com>
|
||||
renelson <bnelson@nelsonbe.com>
|
||||
Reno Reckling <e-github@wthack.de>
|
||||
René Wirnata <rene.wirnata@pandascience.net>
|
||||
Ricardo M. Correia <ricardo.correia@oracle.com>
|
||||
Riccardo Schirone <rschirone91@gmail.com>
|
||||
Richard Allen <belperite@gmail.com>
|
||||
@ -561,18 +590,24 @@ CONTRIBUTORS:
|
||||
Scot W. Stevenson <scot.stevenson@gmail.com>
|
||||
Sean Eric Fagan <sef@ixsystems.com>
|
||||
Sebastian Gottschall <s.gottschall@dd-wrt.com>
|
||||
Sebastian Pauka <me@spauka.se>
|
||||
Sebastian Wuerl <s.wuerl@mailbox.org>
|
||||
Sebastien Roy <seb@delphix.com>
|
||||
Sen Haerens <sen@senhaerens.be>
|
||||
Serapheim Dimitropoulos <serapheim@delphix.com>
|
||||
Seth Forshee <seth.forshee@canonical.com>
|
||||
Seth Hoffert <Seth.Hoffert@gmail.com>
|
||||
Seth Troisi <sethtroisi@google.com>
|
||||
Shaan Nobee <sniper111@gmail.com>
|
||||
Shampavman <sham.pavman@nexenta.com>
|
||||
Shaun Tancheff <shaun@aeonazure.com>
|
||||
Shawn Bayern <sbayern@law.fsu.edu>
|
||||
Shengqi Chen <harry-chen@outlook.com>
|
||||
SHENGYI HONG <aokblast@FreeBSD.org>
|
||||
Shen Yan <shenyanxxxy@qq.com>
|
||||
Sietse <sietse@wizdom.nu>
|
||||
Simon Guest <simon.guest@tesujimath.org>
|
||||
Simon Howard <fraggle@soulsphere.org>
|
||||
Simon Klinkert <simon.klinkert@gmail.com>
|
||||
Sowrabha Gopal <sowrabha.gopal@delphix.com>
|
||||
Spencer Kinny <spencerkinny1995@gmail.com>
|
||||
@ -594,10 +629,12 @@ CONTRIBUTORS:
|
||||
Stéphane Lesimple <speed47_github@speed47.net>
|
||||
Suman Chakravartula <schakrava@gmail.com>
|
||||
Sydney Vanda <sydney.m.vanda@intel.com>
|
||||
Syed Shahrukh Hussain <syed.shahrukh@ossrevival.org>
|
||||
Sören Tempel <soeren+git@soeren-tempel.net>
|
||||
Tamas TEVESZ <ice@extreme.hu>
|
||||
Teodor Spæren <teodor_spaeren@riseup.net>
|
||||
TerraTech <TerraTech@users.noreply.github.com>
|
||||
Theera K. <tkittich@hotmail.com>
|
||||
Thijs Cramer <thijs.cramer@gmail.com>
|
||||
Thomas Bertschinger <bertschinger@lanl.gov>
|
||||
Thomas Geppert <geppi@digitx.de>
|
||||
@ -610,9 +647,12 @@ CONTRIBUTORS:
|
||||
timor <timor.dd@googlemail.com>
|
||||
Timothy Day <tday141@gmail.com>
|
||||
Tim Schumacher <timschumi@gmx.de>
|
||||
Tim Smith <tim@mondoo.com>
|
||||
Tino Reichardt <milky-zfs@mcmilk.de>
|
||||
tleydxdy <shironeko.github@tesaguri.club>
|
||||
Tobin Harding <me@tobin.cc>
|
||||
Todd Seidelmann <seidelma@users.noreply.github.com>
|
||||
Todd Zullinger <tmz@pobox.com>
|
||||
Tom Caputi <tcaputi@datto.com>
|
||||
Tom Matthews <tom@axiom-partners.com>
|
||||
Tomohiro Kusumi <kusumi.tomohiro@gmail.com>
|
||||
@ -626,6 +666,7 @@ CONTRIBUTORS:
|
||||
Trevor Bautista <trevrb@trevrb.net>
|
||||
Trey Dockendorf <treydock@gmail.com>
|
||||
Troels Nørgaard <tnn@tradeshift.com>
|
||||
tstabrawa <tstabrawa@users.noreply.github.com>
|
||||
Tulsi Jain <tulsi.jain@delphix.com>
|
||||
Turbo Fredriksson <turbo@bayour.com>
|
||||
Tyler J. Stachecki <stachecki.tyler@gmail.com>
|
||||
@ -633,6 +674,7 @@ CONTRIBUTORS:
|
||||
Vaibhav Bhanawat <vaibhav.bhanawat@delphix.com>
|
||||
Valmiky Arquissandas <kayvlim@gmail.com>
|
||||
Val Packett <val@packett.cool>
|
||||
Vandana Rungta <vrungta@amazon.com>
|
||||
Vince van Oosten <techhazard@codeforyouand.me>
|
||||
Violet Purcell <vimproved@inventati.org>
|
||||
Vipin Kumar Verma <vipin.verma@hpe.com>
|
||||
@ -648,6 +690,7 @@ CONTRIBUTORS:
|
||||
Windel Bouwman <windel@windel.nl>
|
||||
Wojciech Małota-Wójcik <outofforest@users.noreply.github.com>
|
||||
Wolfgang Bumiller <w.bumiller@proxmox.com>
|
||||
XDTG <click1799@163.com>
|
||||
Xin Li <delphij@FreeBSD.org>
|
||||
Xinliang Liu <xinliang.liu@linaro.org>
|
||||
xtouqh <xtouqh@hotmail.com>
|
||||
|
6
META
6
META
@ -1,10 +1,10 @@
|
||||
Meta: 1
|
||||
Name: zfs
|
||||
Branch: 1.0
|
||||
Version: 2.2.5
|
||||
Version: 2.4.99
|
||||
Release: 1
|
||||
Release-Tags: relext
|
||||
License: CDDL
|
||||
Author: OpenZFS
|
||||
Linux-Maximum: 6.9
|
||||
Linux-Minimum: 3.10
|
||||
Linux-Maximum: 6.17
|
||||
Linux-Minimum: 4.18
|
||||
|
@ -1,6 +1,7 @@
|
||||
CLEANFILES =
|
||||
dist_noinst_DATA =
|
||||
INSTALL_DATA_HOOKS =
|
||||
INSTALL_EXEC_HOOKS =
|
||||
ALL_LOCAL =
|
||||
CLEAN_LOCAL =
|
||||
CHECKS = shellcheck checkbashisms
|
||||
@ -71,6 +72,9 @@ all: gitrev
|
||||
PHONY += install-data-hook $(INSTALL_DATA_HOOKS)
|
||||
install-data-hook: $(INSTALL_DATA_HOOKS)
|
||||
|
||||
PHONY += install-exec-hook $(INSTALL_EXEC_HOOKS)
|
||||
install-exec-hook: $(INSTALL_EXEC_HOOKS)
|
||||
|
||||
PHONY += maintainer-clean-local
|
||||
maintainer-clean-local:
|
||||
-$(RM) $(GITREV)
|
||||
@ -112,6 +116,10 @@ commitcheck:
|
||||
${top_srcdir}/scripts/commitcheck.sh; \
|
||||
fi
|
||||
|
||||
CHECKS += spdxcheck
|
||||
spdxcheck:
|
||||
$(AM_V_at)$(top_srcdir)/scripts/spdxcheck.pl
|
||||
|
||||
if HAVE_PARALLEL
|
||||
cstyle_line = -print0 | parallel -X0 ${top_srcdir}/scripts/cstyle.pl -cpP {}
|
||||
else
|
||||
|
@ -32,4 +32,4 @@ For more details see the NOTICE, LICENSE and COPYRIGHT files; `UCRL-CODE-235197`
|
||||
|
||||
# Supported Kernels
|
||||
* The `META` file contains the officially recognized supported Linux kernel versions.
|
||||
* Supported FreeBSD versions are any supported branches and releases starting from 12.4-RELEASE.
|
||||
* Supported FreeBSD versions are any supported branches and releases starting from 13.0-RELEASE.
|
||||
|
@ -28,7 +28,7 @@ Two release branches are maintained for OpenZFS, they are:
|
||||
Minor changes to support these distribution kernels will be applied as
|
||||
needed. New kernel versions released after the OpenZFS LTS release are
|
||||
not supported. LTS releases will receive patches for at least 2 years.
|
||||
The current LTS release is OpenZFS 2.1.
|
||||
The current LTS release is OpenZFS 2.2.
|
||||
|
||||
* OpenZFS current - Tracks the newest MAJOR.MINOR release. This branch
|
||||
includes support for the latest OpenZFS features and recently releases
|
||||
|
@ -24,7 +24,7 @@ zfs_ids_to_path_LDADD = \
|
||||
libzfs.la
|
||||
|
||||
|
||||
zhack_CPPFLAGS = $(AM_CPPFLAGS) $(FORCEDEBUG_CPPFLAGS)
|
||||
zhack_CPPFLAGS = $(AM_CPPFLAGS) $(LIBZPOOL_CPPFLAGS)
|
||||
|
||||
sbin_PROGRAMS += zhack
|
||||
CPPCHECKTARGETS += zhack
|
||||
@ -39,9 +39,7 @@ zhack_LDADD = \
|
||||
|
||||
|
||||
ztest_CFLAGS = $(AM_CFLAGS) $(KERNEL_CFLAGS)
|
||||
# Get rid of compiler warning for unchecked truncating snprintfs on gcc 7.1.1
|
||||
ztest_CFLAGS += $(NO_FORMAT_TRUNCATION)
|
||||
ztest_CPPFLAGS = $(AM_CPPFLAGS) $(FORCEDEBUG_CPPFLAGS)
|
||||
ztest_CPPFLAGS = $(AM_CPPFLAGS) $(LIBZPOOL_CPPFLAGS)
|
||||
|
||||
sbin_PROGRAMS += ztest
|
||||
CPPCHECKTARGETS += ztest
|
||||
@ -100,17 +98,16 @@ endif
|
||||
|
||||
|
||||
if USING_PYTHON
|
||||
bin_SCRIPTS += arc_summary arcstat dbufstat zilstat
|
||||
CLEANFILES += arc_summary arcstat dbufstat zilstat
|
||||
dist_noinst_DATA += %D%/arc_summary %D%/arcstat.in %D%/dbufstat.in %D%/zilstat.in
|
||||
bin_SCRIPTS += zarcsummary zarcstat dbufstat zilstat
|
||||
CLEANFILES += zarcsummary zarcstat dbufstat zilstat
|
||||
dist_noinst_DATA += %D%/zarcsummary %D%/zarcstat.in %D%/dbufstat.in %D%/zilstat.in
|
||||
|
||||
$(call SUBST,arcstat,%D%/)
|
||||
$(call SUBST,zarcstat,%D%/)
|
||||
$(call SUBST,dbufstat,%D%/)
|
||||
$(call SUBST,zilstat,%D%/)
|
||||
arc_summary: %D%/arc_summary
|
||||
zarcsummary: %D%/zarcsummary
|
||||
$(AM_V_at)cp $< $@
|
||||
endif
|
||||
|
||||
|
||||
PHONY += cmd
|
||||
cmd: $(bin_SCRIPTS) $(bin_PROGRAMS) $(sbin_SCRIPTS) $(sbin_PROGRAMS) $(dist_bin_SCRIPTS) $(zfsexec_PROGRAMS) $(mounthelper_PROGRAMS)
|
||||
|
@ -1,4 +1,5 @@
|
||||
#!/usr/bin/env @PYTHON_SHEBANG@
|
||||
# SPDX-License-Identifier: CDDL-1.0
|
||||
#
|
||||
# Print out statistics for all cached dmu buffers. This information
|
||||
# is available through the dbufs kstat and may be post-processed as
|
||||
@ -37,7 +38,7 @@ import re
|
||||
|
||||
bhdr = ["pool", "objset", "object", "level", "blkid", "offset", "dbsize"]
|
||||
bxhdr = ["pool", "objset", "object", "level", "blkid", "offset", "dbsize",
|
||||
"meta", "state", "dbholds", "dbc", "list", "atype", "flags",
|
||||
"usize", "meta", "state", "dbholds", "dbc", "list", "atype", "flags",
|
||||
"count", "asize", "access", "mru", "gmru", "mfu", "gmfu", "l2",
|
||||
"l2_dattr", "l2_asize", "l2_comp", "aholds", "dtype", "btype",
|
||||
"data_bs", "meta_bs", "bsize", "lvls", "dholds", "blocks", "dsize"]
|
||||
@ -47,17 +48,17 @@ dhdr = ["pool", "objset", "object", "dtype", "cached"]
|
||||
dxhdr = ["pool", "objset", "object", "dtype", "btype", "data_bs", "meta_bs",
|
||||
"bsize", "lvls", "dholds", "blocks", "dsize", "cached", "direct",
|
||||
"indirect", "bonus", "spill"]
|
||||
dincompat = ["level", "blkid", "offset", "dbsize", "meta", "state", "dbholds",
|
||||
"dbc", "list", "atype", "flags", "count", "asize", "access",
|
||||
"mru", "gmru", "mfu", "gmfu", "l2", "l2_dattr", "l2_asize",
|
||||
"l2_comp", "aholds"]
|
||||
dincompat = ["level", "blkid", "offset", "dbsize", "usize", "meta", "state",
|
||||
"dbholds", "dbc", "list", "atype", "flags", "count", "asize",
|
||||
"access", "mru", "gmru", "mfu", "gmfu", "l2", "l2_dattr",
|
||||
"l2_asize", "l2_comp", "aholds"]
|
||||
|
||||
thdr = ["pool", "objset", "dtype", "cached"]
|
||||
txhdr = ["pool", "objset", "dtype", "cached", "direct", "indirect",
|
||||
"bonus", "spill"]
|
||||
tincompat = ["object", "level", "blkid", "offset", "dbsize", "meta", "state",
|
||||
"dbc", "dbholds", "list", "atype", "flags", "count", "asize",
|
||||
"access", "mru", "gmru", "mfu", "gmfu", "l2", "l2_dattr",
|
||||
tincompat = ["object", "level", "blkid", "offset", "dbsize", "usize", "meta",
|
||||
"state", "dbc", "dbholds", "list", "atype", "flags", "count",
|
||||
"asize", "access", "mru", "gmru", "mfu", "gmfu", "l2", "l2_dattr",
|
||||
"l2_asize", "l2_comp", "aholds", "btype", "data_bs", "meta_bs",
|
||||
"bsize", "lvls", "dholds", "blocks", "dsize"]
|
||||
|
||||
@ -70,6 +71,7 @@ cols = {
|
||||
"blkid": [8, -1, "block number of buffer"],
|
||||
"offset": [12, 1024, "offset in object of buffer"],
|
||||
"dbsize": [7, 1024, "size of buffer"],
|
||||
"usize": [7, 1024, "size of attached user data"],
|
||||
"meta": [4, -1, "is this buffer metadata?"],
|
||||
"state": [5, -1, "state of buffer (read, cached, etc)"],
|
||||
"dbholds": [7, 1000, "number of holds on buffer"],
|
||||
@ -399,6 +401,7 @@ def update_dict(d, k, line, labels):
|
||||
key = line[labels[k]]
|
||||
|
||||
dbsize = int(line[labels['dbsize']])
|
||||
usize = int(line[labels['usize']])
|
||||
blkid = int(line[labels['blkid']])
|
||||
level = int(line[labels['level']])
|
||||
|
||||
@ -416,7 +419,7 @@ def update_dict(d, k, line, labels):
|
||||
d[pool][objset][key]['indirect'] = 0
|
||||
d[pool][objset][key]['spill'] = 0
|
||||
|
||||
d[pool][objset][key]['cached'] += dbsize
|
||||
d[pool][objset][key]['cached'] += dbsize + usize
|
||||
|
||||
if blkid == -1:
|
||||
d[pool][objset][key]['bonus'] += dbsize
|
||||
|
@ -1,3 +1,4 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
@ -269,8 +270,7 @@ main(int argc, char **argv)
|
||||
return (MOUNT_USAGE);
|
||||
}
|
||||
|
||||
if (!zfsutil || sloppy ||
|
||||
libzfs_envvar_is_set("ZFS_MOUNT_HELPER")) {
|
||||
if (sloppy || libzfs_envvar_is_set("ZFS_MOUNT_HELPER")) {
|
||||
zfs_adjust_mount_options(zhp, mntpoint, mntopts, mtabopt);
|
||||
}
|
||||
|
||||
@ -337,7 +337,7 @@ main(int argc, char **argv)
|
||||
dataset, mntpoint, mntflags, zfsflags, mntopts, mtabopt);
|
||||
|
||||
if (!fake) {
|
||||
if (zfsutil && !sloppy &&
|
||||
if (!remount && !sloppy &&
|
||||
!libzfs_envvar_is_set("ZFS_MOUNT_HELPER")) {
|
||||
error = zfs_mount_at(zhp, mntopts, mntflags, mntpoint);
|
||||
if (error) {
|
||||
|
@ -1,5 +1,5 @@
|
||||
raidz_test_CFLAGS = $(AM_CFLAGS) $(KERNEL_CFLAGS)
|
||||
raidz_test_CPPFLAGS = $(AM_CPPFLAGS) $(FORCEDEBUG_CPPFLAGS)
|
||||
raidz_test_CPPFLAGS = $(AM_CPPFLAGS) $(LIBZPOOL_CPPFLAGS)
|
||||
|
||||
bin_PROGRAMS += raidz_test
|
||||
CPPCHECKTARGETS += raidz_test
|
||||
|
@ -1,3 +1,4 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
@ -84,10 +85,10 @@ run_gen_bench_impl(const char *impl)
|
||||
|
||||
if (rto_opts.rto_expand) {
|
||||
rm_bench = vdev_raidz_map_alloc_expanded(
|
||||
zio_bench.io_abd,
|
||||
zio_bench.io_size, zio_bench.io_offset,
|
||||
&zio_bench,
|
||||
rto_opts.rto_ashift, ncols+1, ncols,
|
||||
fn+1, rto_opts.rto_expand_offset);
|
||||
fn+1, rto_opts.rto_expand_offset,
|
||||
0, B_FALSE);
|
||||
} else {
|
||||
rm_bench = vdev_raidz_map_alloc(&zio_bench,
|
||||
BENCH_ASHIFT, ncols, fn+1);
|
||||
@ -172,10 +173,10 @@ run_rec_bench_impl(const char *impl)
|
||||
|
||||
if (rto_opts.rto_expand) {
|
||||
rm_bench = vdev_raidz_map_alloc_expanded(
|
||||
zio_bench.io_abd,
|
||||
zio_bench.io_size, zio_bench.io_offset,
|
||||
&zio_bench,
|
||||
BENCH_ASHIFT, ncols+1, ncols,
|
||||
PARITY_PQR, rto_opts.rto_expand_offset);
|
||||
PARITY_PQR,
|
||||
rto_opts.rto_expand_offset, 0, B_FALSE);
|
||||
} else {
|
||||
rm_bench = vdev_raidz_map_alloc(&zio_bench,
|
||||
BENCH_ASHIFT, ncols, PARITY_PQR);
|
||||
|
@ -1,3 +1,4 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
@ -327,14 +328,12 @@ init_raidz_golden_map(raidz_test_opts_t *opts, const int parity)
|
||||
|
||||
if (opts->rto_expand) {
|
||||
opts->rm_golden =
|
||||
vdev_raidz_map_alloc_expanded(opts->zio_golden->io_abd,
|
||||
opts->zio_golden->io_size, opts->zio_golden->io_offset,
|
||||
vdev_raidz_map_alloc_expanded(opts->zio_golden,
|
||||
opts->rto_ashift, total_ncols+1, total_ncols,
|
||||
parity, opts->rto_expand_offset);
|
||||
rm_test = vdev_raidz_map_alloc_expanded(zio_test->io_abd,
|
||||
zio_test->io_size, zio_test->io_offset,
|
||||
parity, opts->rto_expand_offset, 0, B_FALSE);
|
||||
rm_test = vdev_raidz_map_alloc_expanded(zio_test,
|
||||
opts->rto_ashift, total_ncols+1, total_ncols,
|
||||
parity, opts->rto_expand_offset);
|
||||
parity, opts->rto_expand_offset, 0, B_FALSE);
|
||||
} else {
|
||||
opts->rm_golden = vdev_raidz_map_alloc(opts->zio_golden,
|
||||
opts->rto_ashift, total_ncols, parity);
|
||||
@ -361,187 +360,6 @@ init_raidz_golden_map(raidz_test_opts_t *opts, const int parity)
|
||||
return (err);
|
||||
}
|
||||
|
||||
/*
|
||||
* If reflow is not in progress, reflow_offset should be UINT64_MAX.
|
||||
* For each row, if the row is entirely before reflow_offset, it will
|
||||
* come from the new location. Otherwise this row will come from the
|
||||
* old location. Therefore, rows that straddle the reflow_offset will
|
||||
* come from the old location.
|
||||
*
|
||||
* NOTE: Until raidz expansion is implemented this function is only
|
||||
* needed by raidz_test.c to the multi-row raid_map_t functionality.
|
||||
*/
|
||||
raidz_map_t *
|
||||
vdev_raidz_map_alloc_expanded(abd_t *abd, uint64_t size, uint64_t offset,
|
||||
uint64_t ashift, uint64_t physical_cols, uint64_t logical_cols,
|
||||
uint64_t nparity, uint64_t reflow_offset)
|
||||
{
|
||||
/* The zio's size in units of the vdev's minimum sector size. */
|
||||
uint64_t s = size >> ashift;
|
||||
uint64_t q, r, bc, devidx, asize = 0, tot;
|
||||
|
||||
/*
|
||||
* "Quotient": The number of data sectors for this stripe on all but
|
||||
* the "big column" child vdevs that also contain "remainder" data.
|
||||
* AKA "full rows"
|
||||
*/
|
||||
q = s / (logical_cols - nparity);
|
||||
|
||||
/*
|
||||
* "Remainder": The number of partial stripe data sectors in this I/O.
|
||||
* This will add a sector to some, but not all, child vdevs.
|
||||
*/
|
||||
r = s - q * (logical_cols - nparity);
|
||||
|
||||
/* The number of "big columns" - those which contain remainder data. */
|
||||
bc = (r == 0 ? 0 : r + nparity);
|
||||
|
||||
/*
|
||||
* The total number of data and parity sectors associated with
|
||||
* this I/O.
|
||||
*/
|
||||
tot = s + nparity * (q + (r == 0 ? 0 : 1));
|
||||
|
||||
/* How many rows contain data (not skip) */
|
||||
uint64_t rows = howmany(tot, logical_cols);
|
||||
int cols = MIN(tot, logical_cols);
|
||||
|
||||
raidz_map_t *rm = kmem_zalloc(offsetof(raidz_map_t, rm_row[rows]),
|
||||
KM_SLEEP);
|
||||
rm->rm_nrows = rows;
|
||||
|
||||
for (uint64_t row = 0; row < rows; row++) {
|
||||
raidz_row_t *rr = kmem_alloc(offsetof(raidz_row_t,
|
||||
rr_col[cols]), KM_SLEEP);
|
||||
rm->rm_row[row] = rr;
|
||||
|
||||
/* The starting RAIDZ (parent) vdev sector of the row. */
|
||||
uint64_t b = (offset >> ashift) + row * logical_cols;
|
||||
|
||||
/*
|
||||
* If we are in the middle of a reflow, and any part of this
|
||||
* row has not been copied, then use the old location of
|
||||
* this row.
|
||||
*/
|
||||
int row_phys_cols = physical_cols;
|
||||
if (b + (logical_cols - nparity) > reflow_offset >> ashift)
|
||||
row_phys_cols--;
|
||||
|
||||
/* starting child of this row */
|
||||
uint64_t child_id = b % row_phys_cols;
|
||||
/* The starting byte offset on each child vdev. */
|
||||
uint64_t child_offset = (b / row_phys_cols) << ashift;
|
||||
|
||||
/*
|
||||
* We set cols to the entire width of the block, even
|
||||
* if this row is shorter. This is needed because parity
|
||||
* generation (for Q and R) needs to know the entire width,
|
||||
* because it treats the short row as though it was
|
||||
* full-width (and the "phantom" sectors were zero-filled).
|
||||
*
|
||||
* Another approach to this would be to set cols shorter
|
||||
* (to just the number of columns that we might do i/o to)
|
||||
* and have another mechanism to tell the parity generation
|
||||
* about the "entire width". Reconstruction (at least
|
||||
* vdev_raidz_reconstruct_general()) would also need to
|
||||
* know about the "entire width".
|
||||
*/
|
||||
rr->rr_cols = cols;
|
||||
rr->rr_bigcols = bc;
|
||||
rr->rr_missingdata = 0;
|
||||
rr->rr_missingparity = 0;
|
||||
rr->rr_firstdatacol = nparity;
|
||||
rr->rr_abd_empty = NULL;
|
||||
rr->rr_nempty = 0;
|
||||
|
||||
for (int c = 0; c < rr->rr_cols; c++, child_id++) {
|
||||
if (child_id >= row_phys_cols) {
|
||||
child_id -= row_phys_cols;
|
||||
child_offset += 1ULL << ashift;
|
||||
}
|
||||
rr->rr_col[c].rc_devidx = child_id;
|
||||
rr->rr_col[c].rc_offset = child_offset;
|
||||
rr->rr_col[c].rc_orig_data = NULL;
|
||||
rr->rr_col[c].rc_error = 0;
|
||||
rr->rr_col[c].rc_tried = 0;
|
||||
rr->rr_col[c].rc_skipped = 0;
|
||||
rr->rr_col[c].rc_need_orig_restore = B_FALSE;
|
||||
|
||||
uint64_t dc = c - rr->rr_firstdatacol;
|
||||
if (c < rr->rr_firstdatacol) {
|
||||
rr->rr_col[c].rc_size = 1ULL << ashift;
|
||||
rr->rr_col[c].rc_abd =
|
||||
abd_alloc_linear(rr->rr_col[c].rc_size,
|
||||
B_TRUE);
|
||||
} else if (row == rows - 1 && bc != 0 && c >= bc) {
|
||||
/*
|
||||
* Past the end, this for parity generation.
|
||||
*/
|
||||
rr->rr_col[c].rc_size = 0;
|
||||
rr->rr_col[c].rc_abd = NULL;
|
||||
} else {
|
||||
/*
|
||||
* "data column" (col excluding parity)
|
||||
* Add an ASCII art diagram here
|
||||
*/
|
||||
uint64_t off;
|
||||
|
||||
if (c < bc || r == 0) {
|
||||
off = dc * rows + row;
|
||||
} else {
|
||||
off = r * rows +
|
||||
(dc - r) * (rows - 1) + row;
|
||||
}
|
||||
rr->rr_col[c].rc_size = 1ULL << ashift;
|
||||
rr->rr_col[c].rc_abd = abd_get_offset_struct(
|
||||
&rr->rr_col[c].rc_abdstruct,
|
||||
abd, off << ashift, 1 << ashift);
|
||||
}
|
||||
|
||||
asize += rr->rr_col[c].rc_size;
|
||||
}
|
||||
/*
|
||||
* If all data stored spans all columns, there's a danger that
|
||||
* parity will always be on the same device and, since parity
|
||||
* isn't read during normal operation, that that device's I/O
|
||||
* bandwidth won't be used effectively. We therefore switch
|
||||
* the parity every 1MB.
|
||||
*
|
||||
* ...at least that was, ostensibly, the theory. As a practical
|
||||
* matter unless we juggle the parity between all devices
|
||||
* evenly, we won't see any benefit. Further, occasional writes
|
||||
* that aren't a multiple of the LCM of the number of children
|
||||
* and the minimum stripe width are sufficient to avoid pessimal
|
||||
* behavior. Unfortunately, this decision created an implicit
|
||||
* on-disk format requirement that we need to support for all
|
||||
* eternity, but only for single-parity RAID-Z.
|
||||
*
|
||||
* If we intend to skip a sector in the zeroth column for
|
||||
* padding we must make sure to note this swap. We will never
|
||||
* intend to skip the first column since at least one data and
|
||||
* one parity column must appear in each row.
|
||||
*/
|
||||
if (rr->rr_firstdatacol == 1 && rr->rr_cols > 1 &&
|
||||
(offset & (1ULL << 20))) {
|
||||
ASSERT(rr->rr_cols >= 2);
|
||||
ASSERT(rr->rr_col[0].rc_size == rr->rr_col[1].rc_size);
|
||||
devidx = rr->rr_col[0].rc_devidx;
|
||||
uint64_t o = rr->rr_col[0].rc_offset;
|
||||
rr->rr_col[0].rc_devidx = rr->rr_col[1].rc_devidx;
|
||||
rr->rr_col[0].rc_offset = rr->rr_col[1].rc_offset;
|
||||
rr->rr_col[1].rc_devidx = devidx;
|
||||
rr->rr_col[1].rc_offset = o;
|
||||
}
|
||||
|
||||
}
|
||||
ASSERT3U(asize, ==, tot << ashift);
|
||||
|
||||
/* init RAIDZ parity ops */
|
||||
rm->rm_ops = vdev_raidz_math_get_ops();
|
||||
|
||||
return (rm);
|
||||
}
|
||||
|
||||
static raidz_map_t *
|
||||
init_raidz_map(raidz_test_opts_t *opts, zio_t **zio, const int parity)
|
||||
{
|
||||
@ -561,10 +379,9 @@ init_raidz_map(raidz_test_opts_t *opts, zio_t **zio, const int parity)
|
||||
init_zio_abd(*zio);
|
||||
|
||||
if (opts->rto_expand) {
|
||||
rm = vdev_raidz_map_alloc_expanded((*zio)->io_abd,
|
||||
(*zio)->io_size, (*zio)->io_offset,
|
||||
rm = vdev_raidz_map_alloc_expanded(*zio,
|
||||
opts->rto_ashift, total_ncols+1, total_ncols,
|
||||
parity, opts->rto_expand_offset);
|
||||
parity, opts->rto_expand_offset, 0, B_FALSE);
|
||||
} else {
|
||||
rm = vdev_raidz_map_alloc(*zio, opts->rto_ashift,
|
||||
total_ncols, parity);
|
||||
|
@ -1,3 +1,4 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
@ -119,7 +120,4 @@ void init_zio_abd(zio_t *zio);
|
||||
|
||||
void run_raidz_benchmark(void);
|
||||
|
||||
struct raidz_map *vdev_raidz_map_alloc_expanded(abd_t *, uint64_t, uint64_t,
|
||||
uint64_t, uint64_t, uint64_t, uint64_t, uint64_t);
|
||||
|
||||
#endif /* RAIDZ_TEST_H */
|
||||
|
@ -1,7 +1,8 @@
|
||||
#!/usr/bin/env @PYTHON_SHEBANG@
|
||||
# SPDX-License-Identifier: CDDL-1.0
|
||||
#
|
||||
# Print out ZFS ARC Statistics exported via kstat(1)
|
||||
# For a definition of fields, or usage, use arcstat -v
|
||||
# For a definition of fields, or usage, use zarcstat -v
|
||||
#
|
||||
# This script was originally a fork of the original arcstat.pl (0.1)
|
||||
# by Neelakanth Nadgir, originally published on his Sun blog on
|
||||
@ -55,6 +56,7 @@ import time
|
||||
import getopt
|
||||
import re
|
||||
import copy
|
||||
import os
|
||||
|
||||
from signal import signal, SIGINT, SIGWINCH, SIG_DFL
|
||||
|
||||
@ -152,6 +154,7 @@ cols = {
|
||||
"l2asize": [7, 1024, "Actual (compressed) size of the L2ARC"],
|
||||
"l2size": [6, 1024, "Size of the L2ARC"],
|
||||
"l2bytes": [7, 1024, "Bytes read per second from the L2ARC"],
|
||||
"l2wbytes": [8, 1024, "Bytes written per second to the L2ARC"],
|
||||
"grow": [4, 1000, "ARC grow disabled"],
|
||||
"need": [5, 1024, "ARC reclaim need"],
|
||||
"free": [5, 1024, "ARC free memory"],
|
||||
@ -169,6 +172,83 @@ cols = {
|
||||
"zactive": [7, 1000, "zfetch prefetches active per second"],
|
||||
}
|
||||
|
||||
# ARC structural breakdown from zarcsummary
|
||||
structfields = {
|
||||
"cmp": ["compressed", "Compressed"],
|
||||
"ovh": ["overhead", "Overhead"],
|
||||
"bon": ["bonus", "Bonus"],
|
||||
"dno": ["dnode", "Dnode"],
|
||||
"dbu": ["dbuf", "Dbuf"],
|
||||
"hdr": ["hdr", "Header"],
|
||||
"l2h": ["l2_hdr", "L2 header"],
|
||||
"abd": ["abd_chunk_waste", "ABD chunk waste"],
|
||||
}
|
||||
structstats = { # size stats
|
||||
"percent": "size", # percentage of this value
|
||||
"sz": ["_size", "size"],
|
||||
}
|
||||
|
||||
# ARC types breakdown from zarcsummary
|
||||
typefields = {
|
||||
"data": ["data", "ARC data"],
|
||||
"meta": ["metadata", "ARC metadata"],
|
||||
}
|
||||
typestats = { # size stats
|
||||
"percent": "cachessz", # percentage of this value
|
||||
"tg": ["_target", "target"],
|
||||
"sz": ["_size", "size"],
|
||||
}
|
||||
|
||||
# ARC states breakdown from zarcsummary
|
||||
statefields = {
|
||||
"ano": ["anon", "Anonymous"],
|
||||
"mfu": ["mfu", "MFU"],
|
||||
"mru": ["mru", "MRU"],
|
||||
"unc": ["uncached", "Uncached"],
|
||||
}
|
||||
targetstats = {
|
||||
"percent": "cachessz", # percentage of this value
|
||||
"fields": ["mfu", "mru"], # only applicable to these fields
|
||||
"tg": ["_target", "target"],
|
||||
"dt": ["_data_target", "data target"],
|
||||
"mt": ["_metadata_target", "metadata target"],
|
||||
}
|
||||
statestats = { # size stats
|
||||
"percent": "cachessz", # percentage of this value
|
||||
"sz": ["_size", "size"],
|
||||
"da": ["_data", "data size"],
|
||||
"me": ["_metadata", "metadata size"],
|
||||
"ed": ["_evictable_data", "evictable data size"],
|
||||
"em": ["_evictable_metadata", "evictable metadata size"],
|
||||
}
|
||||
ghoststats = {
|
||||
"fields": ["mfu", "mru"], # only applicable to these fields
|
||||
"gsz": ["_ghost_size", "ghost size"],
|
||||
"gd": ["_ghost_data", "ghost data size"],
|
||||
"gm": ["_ghost_metadata", "ghost metadata size"],
|
||||
}
|
||||
|
||||
# fields and stats
|
||||
fieldstats = [
|
||||
[structfields, structstats],
|
||||
[typefields, typestats],
|
||||
[statefields, targetstats, statestats, ghoststats],
|
||||
]
|
||||
for fs in fieldstats:
|
||||
fields, stats = fs[0], fs[1:]
|
||||
for field, fieldval in fields.items():
|
||||
for group in stats:
|
||||
for stat, statval in group.items():
|
||||
if stat in ["fields", "percent"] or \
|
||||
("fields" in group and field not in group["fields"]):
|
||||
continue
|
||||
colname = field + stat
|
||||
coldesc = fieldval[1] + " " + statval[1]
|
||||
cols[colname] = [len(colname), 1024, coldesc]
|
||||
if "percent" in group:
|
||||
cols[colname + "%"] = [len(colname) + 1, 100, \
|
||||
coldesc + " percentage"]
|
||||
|
||||
v = {}
|
||||
hdr = ["time", "read", "ddread", "ddh%", "dmread", "dmh%", "pread", "ph%",
|
||||
"size", "c", "avail"]
|
||||
@ -182,7 +262,7 @@ hdr_intr = 20 # Print header every 20 lines of output
|
||||
opfile = None
|
||||
sep = " " # Default separator is 2 spaces
|
||||
l2exist = False
|
||||
cmd = ("Usage: arcstat [-havxp] [-f fields] [-o file] [-s string] [interval "
|
||||
cmd = ("Usage: zarcstat [-havxp] [-f fields] [-o file] [-s string] [interval "
|
||||
"[count]]\n")
|
||||
cur = {}
|
||||
d = {}
|
||||
@ -269,10 +349,10 @@ def usage():
|
||||
"character or string\n")
|
||||
sys.stderr.write("\t -p : Disable auto-scaling of numerical fields\n")
|
||||
sys.stderr.write("\nExamples:\n")
|
||||
sys.stderr.write("\tarcstat -o /tmp/a.log 2 10\n")
|
||||
sys.stderr.write("\tarcstat -s \",\" -o /tmp/a.log 2 10\n")
|
||||
sys.stderr.write("\tarcstat -v\n")
|
||||
sys.stderr.write("\tarcstat -f time,hit%,dh%,ph%,mh% 1\n")
|
||||
sys.stderr.write("\tzarcstat -o /tmp/a.log 2 10\n")
|
||||
sys.stderr.write("\tzarcstat -s \",\" -o /tmp/a.log 2 10\n")
|
||||
sys.stderr.write("\tzarcstat -v\n")
|
||||
sys.stderr.write("\tzarcstat -f time,hit%,dh%,ph%,mh% 1\n")
|
||||
sys.stderr.write("\n")
|
||||
|
||||
sys.exit(1)
|
||||
@ -286,6 +366,29 @@ def snap_stats():
|
||||
kstat_update()
|
||||
|
||||
cur = kstat
|
||||
|
||||
# fill in additional values from zarcsummary
|
||||
cur["caches_size"] = caches_size = cur["anon_data"]+cur["anon_metadata"]+\
|
||||
cur["mfu_data"]+cur["mfu_metadata"]+cur["mru_data"]+cur["mru_metadata"]+\
|
||||
cur["uncached_data"]+cur["uncached_metadata"]
|
||||
s = 4294967296
|
||||
pd = cur["pd"]
|
||||
pm = cur["pm"]
|
||||
meta = cur["meta"]
|
||||
v = (s-int(pd))*(s-int(meta))/s
|
||||
cur["mfu_data_target"] = v / 65536 * caches_size / 65536
|
||||
v = (s-int(pm))*int(meta)/s
|
||||
cur["mfu_metadata_target"] = v / 65536 * caches_size / 65536
|
||||
v = int(pd)*(s-int(meta))/s
|
||||
cur["mru_data_target"] = v / 65536 * caches_size / 65536
|
||||
v = int(pm)*int(meta)/s
|
||||
cur["mru_metadata_target"] = v / 65536 * caches_size / 65536
|
||||
|
||||
cur["data_target"] = cur["mfu_data_target"] + cur["mru_data_target"]
|
||||
cur["metadata_target"] = cur["mfu_metadata_target"] + cur["mru_metadata_target"]
|
||||
cur["mfu_target"] = cur["mfu_data_target"] + cur["mfu_metadata_target"]
|
||||
cur["mru_target"] = cur["mru_data_target"] + cur["mru_metadata_target"]
|
||||
|
||||
for key in cur:
|
||||
if re.match(key, "class"):
|
||||
continue
|
||||
@ -295,31 +398,34 @@ def snap_stats():
|
||||
d[key] = cur[key]
|
||||
|
||||
|
||||
def isint(num):
|
||||
if isinstance(num, float):
|
||||
return num.is_integer()
|
||||
if isinstance(num, int):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def prettynum(sz, scale, num=0):
|
||||
suffix = [' ', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']
|
||||
suffix = ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']
|
||||
index = 0
|
||||
save = 0
|
||||
|
||||
# Special case for date field
|
||||
if scale == -1:
|
||||
return "%s" % num
|
||||
|
||||
# Rounding error, return 0
|
||||
elif 0 < num < 1:
|
||||
num = 0
|
||||
if scale != 100:
|
||||
while abs(num) > scale and index < 5:
|
||||
num = num / scale
|
||||
index += 1
|
||||
|
||||
while abs(num) > scale and index < 5:
|
||||
save = num
|
||||
num = num / scale
|
||||
index += 1
|
||||
|
||||
if index == 0:
|
||||
return "%*d" % (sz, num)
|
||||
|
||||
if abs(save / scale) < 10:
|
||||
return "%*.1f%s" % (sz - 1, num, suffix[index])
|
||||
width = sz - (0 if index == 0 else 1)
|
||||
intlen = len("%.0f" % num) # %.0f rounds to nearest int
|
||||
if sint == 1 and isint(num) or width < intlen + 2:
|
||||
decimal = 0
|
||||
else:
|
||||
return "%*d%s" % (sz - 1, num, suffix[index])
|
||||
decimal = 1
|
||||
return "%*.*f%s" % (width, decimal, num, suffix[index])
|
||||
|
||||
|
||||
def print_values():
|
||||
@ -509,131 +615,149 @@ def calculate():
|
||||
|
||||
v = dict()
|
||||
v["time"] = time.strftime("%H:%M:%S", time.localtime())
|
||||
v["hits"] = d["hits"] // sint
|
||||
v["iohs"] = d["iohits"] // sint
|
||||
v["miss"] = d["misses"] // sint
|
||||
v["hits"] = d["hits"] / sint
|
||||
v["iohs"] = d["iohits"] / sint
|
||||
v["miss"] = d["misses"] / sint
|
||||
v["read"] = v["hits"] + v["iohs"] + v["miss"]
|
||||
v["hit%"] = 100 * v["hits"] // v["read"] if v["read"] > 0 else 0
|
||||
v["ioh%"] = 100 * v["iohs"] // v["read"] if v["read"] > 0 else 0
|
||||
v["hit%"] = 100 * v["hits"] / v["read"] if v["read"] > 0 else 0
|
||||
v["ioh%"] = 100 * v["iohs"] / v["read"] if v["read"] > 0 else 0
|
||||
v["miss%"] = 100 - v["hit%"] - v["ioh%"] if v["read"] > 0 else 0
|
||||
|
||||
v["dhit"] = (d["demand_data_hits"] + d["demand_metadata_hits"]) // sint
|
||||
v["dioh"] = (d["demand_data_iohits"] + d["demand_metadata_iohits"]) // sint
|
||||
v["dmis"] = (d["demand_data_misses"] + d["demand_metadata_misses"]) // sint
|
||||
v["dhit"] = (d["demand_data_hits"] + d["demand_metadata_hits"]) / sint
|
||||
v["dioh"] = (d["demand_data_iohits"] + d["demand_metadata_iohits"]) / sint
|
||||
v["dmis"] = (d["demand_data_misses"] + d["demand_metadata_misses"]) / sint
|
||||
|
||||
v["dread"] = v["dhit"] + v["dioh"] + v["dmis"]
|
||||
v["dh%"] = 100 * v["dhit"] // v["dread"] if v["dread"] > 0 else 0
|
||||
v["di%"] = 100 * v["dioh"] // v["dread"] if v["dread"] > 0 else 0
|
||||
v["dh%"] = 100 * v["dhit"] / v["dread"] if v["dread"] > 0 else 0
|
||||
v["di%"] = 100 * v["dioh"] / v["dread"] if v["dread"] > 0 else 0
|
||||
v["dm%"] = 100 - v["dh%"] - v["di%"] if v["dread"] > 0 else 0
|
||||
|
||||
v["ddhit"] = d["demand_data_hits"] // sint
|
||||
v["ddioh"] = d["demand_data_iohits"] // sint
|
||||
v["ddmis"] = d["demand_data_misses"] // sint
|
||||
v["ddhit"] = d["demand_data_hits"] / sint
|
||||
v["ddioh"] = d["demand_data_iohits"] / sint
|
||||
v["ddmis"] = d["demand_data_misses"] / sint
|
||||
|
||||
v["ddread"] = v["ddhit"] + v["ddioh"] + v["ddmis"]
|
||||
v["ddh%"] = 100 * v["ddhit"] // v["ddread"] if v["ddread"] > 0 else 0
|
||||
v["ddi%"] = 100 * v["ddioh"] // v["ddread"] if v["ddread"] > 0 else 0
|
||||
v["ddh%"] = 100 * v["ddhit"] / v["ddread"] if v["ddread"] > 0 else 0
|
||||
v["ddi%"] = 100 * v["ddioh"] / v["ddread"] if v["ddread"] > 0 else 0
|
||||
v["ddm%"] = 100 - v["ddh%"] - v["ddi%"] if v["ddread"] > 0 else 0
|
||||
|
||||
v["dmhit"] = d["demand_metadata_hits"] // sint
|
||||
v["dmioh"] = d["demand_metadata_iohits"] // sint
|
||||
v["dmmis"] = d["demand_metadata_misses"] // sint
|
||||
v["dmhit"] = d["demand_metadata_hits"] / sint
|
||||
v["dmioh"] = d["demand_metadata_iohits"] / sint
|
||||
v["dmmis"] = d["demand_metadata_misses"] / sint
|
||||
|
||||
v["dmread"] = v["dmhit"] + v["dmioh"] + v["dmmis"]
|
||||
v["dmh%"] = 100 * v["dmhit"] // v["dmread"] if v["dmread"] > 0 else 0
|
||||
v["dmi%"] = 100 * v["dmioh"] // v["dmread"] if v["dmread"] > 0 else 0
|
||||
v["dmh%"] = 100 * v["dmhit"] / v["dmread"] if v["dmread"] > 0 else 0
|
||||
v["dmi%"] = 100 * v["dmioh"] / v["dmread"] if v["dmread"] > 0 else 0
|
||||
v["dmm%"] = 100 - v["dmh%"] - v["dmi%"] if v["dmread"] > 0 else 0
|
||||
|
||||
v["phit"] = (d["prefetch_data_hits"] + d["prefetch_metadata_hits"]) // sint
|
||||
v["phit"] = (d["prefetch_data_hits"] + d["prefetch_metadata_hits"]) / sint
|
||||
v["pioh"] = (d["prefetch_data_iohits"] +
|
||||
d["prefetch_metadata_iohits"]) // sint
|
||||
d["prefetch_metadata_iohits"]) / sint
|
||||
v["pmis"] = (d["prefetch_data_misses"] +
|
||||
d["prefetch_metadata_misses"]) // sint
|
||||
d["prefetch_metadata_misses"]) / sint
|
||||
|
||||
v["pread"] = v["phit"] + v["pioh"] + v["pmis"]
|
||||
v["ph%"] = 100 * v["phit"] // v["pread"] if v["pread"] > 0 else 0
|
||||
v["pi%"] = 100 * v["pioh"] // v["pread"] if v["pread"] > 0 else 0
|
||||
v["ph%"] = 100 * v["phit"] / v["pread"] if v["pread"] > 0 else 0
|
||||
v["pi%"] = 100 * v["pioh"] / v["pread"] if v["pread"] > 0 else 0
|
||||
v["pm%"] = 100 - v["ph%"] - v["pi%"] if v["pread"] > 0 else 0
|
||||
|
||||
v["pdhit"] = d["prefetch_data_hits"] // sint
|
||||
v["pdioh"] = d["prefetch_data_iohits"] // sint
|
||||
v["pdmis"] = d["prefetch_data_misses"] // sint
|
||||
v["pdhit"] = d["prefetch_data_hits"] / sint
|
||||
v["pdioh"] = d["prefetch_data_iohits"] / sint
|
||||
v["pdmis"] = d["prefetch_data_misses"] / sint
|
||||
|
||||
v["pdread"] = v["pdhit"] + v["pdioh"] + v["pdmis"]
|
||||
v["pdh%"] = 100 * v["pdhit"] // v["pdread"] if v["pdread"] > 0 else 0
|
||||
v["pdi%"] = 100 * v["pdioh"] // v["pdread"] if v["pdread"] > 0 else 0
|
||||
v["pdh%"] = 100 * v["pdhit"] / v["pdread"] if v["pdread"] > 0 else 0
|
||||
v["pdi%"] = 100 * v["pdioh"] / v["pdread"] if v["pdread"] > 0 else 0
|
||||
v["pdm%"] = 100 - v["pdh%"] - v["pdi%"] if v["pdread"] > 0 else 0
|
||||
|
||||
v["pmhit"] = d["prefetch_metadata_hits"] // sint
|
||||
v["pmioh"] = d["prefetch_metadata_iohits"] // sint
|
||||
v["pmmis"] = d["prefetch_metadata_misses"] // sint
|
||||
v["pmhit"] = d["prefetch_metadata_hits"] / sint
|
||||
v["pmioh"] = d["prefetch_metadata_iohits"] / sint
|
||||
v["pmmis"] = d["prefetch_metadata_misses"] / sint
|
||||
|
||||
v["pmread"] = v["pmhit"] + v["pmioh"] + v["pmmis"]
|
||||
v["pmh%"] = 100 * v["pmhit"] // v["pmread"] if v["pmread"] > 0 else 0
|
||||
v["pmi%"] = 100 * v["pmioh"] // v["pmread"] if v["pmread"] > 0 else 0
|
||||
v["pmh%"] = 100 * v["pmhit"] / v["pmread"] if v["pmread"] > 0 else 0
|
||||
v["pmi%"] = 100 * v["pmioh"] / v["pmread"] if v["pmread"] > 0 else 0
|
||||
v["pmm%"] = 100 - v["pmh%"] - v["pmi%"] if v["pmread"] > 0 else 0
|
||||
|
||||
v["mhit"] = (d["prefetch_metadata_hits"] +
|
||||
d["demand_metadata_hits"]) // sint
|
||||
d["demand_metadata_hits"]) / sint
|
||||
v["mioh"] = (d["prefetch_metadata_iohits"] +
|
||||
d["demand_metadata_iohits"]) // sint
|
||||
d["demand_metadata_iohits"]) / sint
|
||||
v["mmis"] = (d["prefetch_metadata_misses"] +
|
||||
d["demand_metadata_misses"]) // sint
|
||||
d["demand_metadata_misses"]) / sint
|
||||
|
||||
v["mread"] = v["mhit"] + v["mioh"] + v["mmis"]
|
||||
v["mh%"] = 100 * v["mhit"] // v["mread"] if v["mread"] > 0 else 0
|
||||
v["mi%"] = 100 * v["mioh"] // v["mread"] if v["mread"] > 0 else 0
|
||||
v["mh%"] = 100 * v["mhit"] / v["mread"] if v["mread"] > 0 else 0
|
||||
v["mi%"] = 100 * v["mioh"] / v["mread"] if v["mread"] > 0 else 0
|
||||
v["mm%"] = 100 - v["mh%"] - v["mi%"] if v["mread"] > 0 else 0
|
||||
|
||||
v["arcsz"] = cur["size"]
|
||||
v["size"] = cur["size"]
|
||||
v["c"] = cur["c"]
|
||||
v["mfu"] = d["mfu_hits"] // sint
|
||||
v["mru"] = d["mru_hits"] // sint
|
||||
v["mrug"] = d["mru_ghost_hits"] // sint
|
||||
v["mfug"] = d["mfu_ghost_hits"] // sint
|
||||
v["unc"] = d["uncached_hits"] // sint
|
||||
v["eskip"] = d["evict_skip"] // sint
|
||||
v["el2skip"] = d["evict_l2_skip"] // sint
|
||||
v["el2cach"] = d["evict_l2_cached"] // sint
|
||||
v["el2el"] = d["evict_l2_eligible"] // sint
|
||||
v["el2mfu"] = d["evict_l2_eligible_mfu"] // sint
|
||||
v["el2mru"] = d["evict_l2_eligible_mru"] // sint
|
||||
v["el2inel"] = d["evict_l2_ineligible"] // sint
|
||||
v["mtxmis"] = d["mutex_miss"] // sint
|
||||
v["mfu"] = d["mfu_hits"] / sint
|
||||
v["mru"] = d["mru_hits"] / sint
|
||||
v["mrug"] = d["mru_ghost_hits"] / sint
|
||||
v["mfug"] = d["mfu_ghost_hits"] / sint
|
||||
v["unc"] = d["uncached_hits"] / sint
|
||||
v["eskip"] = d["evict_skip"] / sint
|
||||
v["el2skip"] = d["evict_l2_skip"] / sint
|
||||
v["el2cach"] = d["evict_l2_cached"] / sint
|
||||
v["el2el"] = d["evict_l2_eligible"] / sint
|
||||
v["el2mfu"] = d["evict_l2_eligible_mfu"] / sint
|
||||
v["el2mru"] = d["evict_l2_eligible_mru"] / sint
|
||||
v["el2inel"] = d["evict_l2_ineligible"] / sint
|
||||
v["mtxmis"] = d["mutex_miss"] / sint
|
||||
v["ztotal"] = (d["zfetch_hits"] + d["zfetch_future"] + d["zfetch_stride"] +
|
||||
d["zfetch_past"] + d["zfetch_misses"]) // sint
|
||||
v["zhits"] = d["zfetch_hits"] // sint
|
||||
v["zahead"] = (d["zfetch_future"] + d["zfetch_stride"]) // sint
|
||||
v["zpast"] = d["zfetch_past"] // sint
|
||||
v["zmisses"] = d["zfetch_misses"] // sint
|
||||
v["zmax"] = d["zfetch_max_streams"] // sint
|
||||
v["zfuture"] = d["zfetch_future"] // sint
|
||||
v["zstride"] = d["zfetch_stride"] // sint
|
||||
v["zissued"] = d["zfetch_io_issued"] // sint
|
||||
v["zactive"] = d["zfetch_io_active"] // sint
|
||||
d["zfetch_past"] + d["zfetch_misses"]) / sint
|
||||
v["zhits"] = d["zfetch_hits"] / sint
|
||||
v["zahead"] = (d["zfetch_future"] + d["zfetch_stride"]) / sint
|
||||
v["zpast"] = d["zfetch_past"] / sint
|
||||
v["zmisses"] = d["zfetch_misses"] / sint
|
||||
v["zmax"] = d["zfetch_max_streams"] / sint
|
||||
v["zfuture"] = d["zfetch_future"] / sint
|
||||
v["zstride"] = d["zfetch_stride"] / sint
|
||||
v["zissued"] = d["zfetch_io_issued"] / sint
|
||||
v["zactive"] = d["zfetch_io_active"] / sint
|
||||
|
||||
# ARC structural breakdown, ARC types breakdown, ARC states breakdown
|
||||
v["cachessz"] = cur["caches_size"]
|
||||
for fs in fieldstats:
|
||||
fields, stats = fs[0], fs[1:]
|
||||
for field, fieldval in fields.items():
|
||||
for group in stats:
|
||||
for stat, statval in group.items():
|
||||
if stat in ["fields", "percent"] or \
|
||||
("fields" in group and field not in group["fields"]):
|
||||
continue
|
||||
colname = field + stat
|
||||
v[colname] = cur[fieldval[0] + statval[0]]
|
||||
if "percent" in group:
|
||||
v[colname + "%"] = 100 * v[colname] / \
|
||||
v[group["percent"]] if v[group["percent"]] > 0 else 0
|
||||
|
||||
if l2exist:
|
||||
v["l2hits"] = d["l2_hits"] // sint
|
||||
v["l2miss"] = d["l2_misses"] // sint
|
||||
l2asize = cur["l2_asize"]
|
||||
v["l2hits"] = d["l2_hits"] / sint
|
||||
v["l2miss"] = d["l2_misses"] / sint
|
||||
v["l2read"] = v["l2hits"] + v["l2miss"]
|
||||
v["l2hit%"] = 100 * v["l2hits"] // v["l2read"] if v["l2read"] > 0 else 0
|
||||
v["l2hit%"] = 100 * v["l2hits"] / v["l2read"] if v["l2read"] > 0 else 0
|
||||
|
||||
v["l2miss%"] = 100 - v["l2hit%"] if v["l2read"] > 0 else 0
|
||||
v["l2asize"] = cur["l2_asize"]
|
||||
v["l2asize"] = l2asize
|
||||
v["l2size"] = cur["l2_size"]
|
||||
v["l2bytes"] = d["l2_read_bytes"] // sint
|
||||
v["l2bytes"] = d["l2_read_bytes"] / sint
|
||||
v["l2wbytes"] = d["l2_write_bytes"] / sint
|
||||
|
||||
v["l2pref"] = cur["l2_prefetch_asize"]
|
||||
v["l2mfu"] = cur["l2_mfu_asize"]
|
||||
v["l2mru"] = cur["l2_mru_asize"]
|
||||
v["l2data"] = cur["l2_bufc_data_asize"]
|
||||
v["l2meta"] = cur["l2_bufc_metadata_asize"]
|
||||
v["l2pref%"] = 100 * v["l2pref"] // v["l2asize"]
|
||||
v["l2mfu%"] = 100 * v["l2mfu"] // v["l2asize"]
|
||||
v["l2mru%"] = 100 * v["l2mru"] // v["l2asize"]
|
||||
v["l2data%"] = 100 * v["l2data"] // v["l2asize"]
|
||||
v["l2meta%"] = 100 * v["l2meta"] // v["l2asize"]
|
||||
v["l2pref%"] = 100 * v["l2pref"] / l2asize if l2asize > 0 else 0
|
||||
v["l2mfu%"] = 100 * v["l2mfu"] / l2asize if l2asize > 0 else 0
|
||||
v["l2mru%"] = 100 * v["l2mru"] / l2asize if l2asize > 0 else 0
|
||||
v["l2data%"] = 100 * v["l2data"] / l2asize if l2asize > 0 else 0
|
||||
v["l2meta%"] = 100 * v["l2meta"] / l2asize if l2asize > 0 else 0
|
||||
|
||||
v["grow"] = 0 if cur["arc_no_grow"] else 1
|
||||
v["need"] = cur["arc_need_free"]
|
||||
@ -643,6 +767,7 @@ def calculate():
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
global sint
|
||||
global count
|
||||
global hdr_intr
|
@ -1,4 +1,5 @@
|
||||
#!/usr/bin/env python3
|
||||
# SPDX-License-Identifier: BSD-2-Clause
|
||||
#
|
||||
# Copyright (c) 2008 Ben Rockwood <benr@cuddletech.com>,
|
||||
# Copyright (c) 2010 Martin Matuska <mm@FreeBSD.org>,
|
||||
@ -33,7 +34,7 @@ Provides basic information on the ARC, its efficiency, the L2ARC (if present),
|
||||
the Data Management Unit (DMU), Virtual Devices (VDEVs), and tunables. See
|
||||
the in-source documentation and code at
|
||||
https://github.com/openzfs/zfs/blob/master/module/zfs/arc.c for details.
|
||||
The original introduction to arc_summary can be found at
|
||||
The original introduction to zarcsummary can be found at
|
||||
http://cuddletech.com/?p=454
|
||||
"""
|
||||
|
||||
@ -160,7 +161,7 @@ elif sys.platform.startswith('linux'):
|
||||
return get_params(TUNABLES_PATH)
|
||||
|
||||
def get_version_impl(request):
|
||||
# The original arc_summary called /sbin/modinfo/{spl,zfs} to get
|
||||
# The original zarcsummary called /sbin/modinfo/{spl,zfs} to get
|
||||
# the version information. We switch to /sys/module/{spl,zfs}/version
|
||||
# to make sure we get what is really loaded in the kernel
|
||||
try:
|
||||
@ -260,33 +261,34 @@ def draw_graph(kstats_dict):
|
||||
arc_stats = isolate_section('arcstats', kstats_dict)
|
||||
|
||||
GRAPH_INDENT = ' '*4
|
||||
GRAPH_WIDTH = 60
|
||||
GRAPH_WIDTH = 70
|
||||
arc_max = int(arc_stats['c_max'])
|
||||
arc_size = f_bytes(arc_stats['size'])
|
||||
arc_perc = f_perc(arc_stats['size'], arc_stats['c_max'])
|
||||
mfu_size = f_bytes(arc_stats['mfu_size'])
|
||||
mru_size = f_bytes(arc_stats['mru_size'])
|
||||
meta_size = f_bytes(arc_stats['arc_meta_used'])
|
||||
dnode_limit = f_bytes(arc_stats['arc_dnode_limit'])
|
||||
arc_perc = f_perc(arc_stats['size'], arc_max)
|
||||
data_size = f_bytes(arc_stats['data_size'])
|
||||
meta_size = f_bytes(arc_stats['metadata_size'])
|
||||
dnode_size = f_bytes(arc_stats['dnode_size'])
|
||||
|
||||
info_form = ('ARC: {0} ({1}) MFU: {2} MRU: {3} META: {4} '
|
||||
'DNODE {5} ({6})')
|
||||
info_line = info_form.format(arc_size, arc_perc, mfu_size, mru_size,
|
||||
meta_size, dnode_size, dnode_limit)
|
||||
info_form = ('ARC: {0} ({1}) Data: {2} Meta: {3} Dnode: {4}')
|
||||
info_line = info_form.format(arc_size, arc_perc, data_size, meta_size,
|
||||
dnode_size)
|
||||
info_spc = ' '*int((GRAPH_WIDTH-len(info_line))/2)
|
||||
info_line = GRAPH_INDENT+info_spc+info_line
|
||||
|
||||
graph_line = GRAPH_INDENT+'+'+('-'*(GRAPH_WIDTH-2))+'+'
|
||||
|
||||
mfu_perc = float(int(arc_stats['mfu_size'])/int(arc_stats['c_max']))
|
||||
mru_perc = float(int(arc_stats['mru_size'])/int(arc_stats['c_max']))
|
||||
arc_perc = float(int(arc_stats['size'])/int(arc_stats['c_max']))
|
||||
arc_perc = float(int(arc_stats['size'])/arc_max)
|
||||
data_perc = float(int(arc_stats['data_size'])/arc_max)
|
||||
meta_perc = float(int(arc_stats['metadata_size'])/arc_max)
|
||||
dnode_perc = float(int(arc_stats['dnode_size'])/arc_max)
|
||||
total_ticks = float(arc_perc)*GRAPH_WIDTH
|
||||
mfu_ticks = mfu_perc*GRAPH_WIDTH
|
||||
mru_ticks = mru_perc*GRAPH_WIDTH
|
||||
other_ticks = total_ticks-(mfu_ticks+mru_ticks)
|
||||
data_ticks = data_perc*GRAPH_WIDTH
|
||||
meta_ticks = meta_perc*GRAPH_WIDTH
|
||||
dnode_ticks = dnode_perc*GRAPH_WIDTH
|
||||
other_ticks = total_ticks-(data_ticks+meta_ticks+dnode_ticks)
|
||||
|
||||
core_form = 'F'*int(mfu_ticks)+'R'*int(mru_ticks)+'O'*int(other_ticks)
|
||||
core_form = 'D'*int(data_ticks)+'M'*int(meta_ticks)+'N'*int(dnode_ticks)+\
|
||||
'O'*int(other_ticks)
|
||||
core_spc = ' '*(GRAPH_WIDTH-(2+len(core_form)))
|
||||
core_line = GRAPH_INDENT+'|'+core_form+core_spc+'|'
|
||||
|
||||
@ -437,7 +439,7 @@ def print_header():
|
||||
"""
|
||||
|
||||
# datetime is now recommended over time but we keep the exact formatting
|
||||
# from the older version of arc_summary in case there are scripts
|
||||
# from the older version of zarcsummary in case there are scripts
|
||||
# that expect it in this way
|
||||
daydate = time.strftime(DATE_FORMAT)
|
||||
spc_date = LINE_LENGTH-len(daydate)
|
||||
@ -536,95 +538,45 @@ def section_arc(kstats_dict):
|
||||
|
||||
arc_stats = isolate_section('arcstats', kstats_dict)
|
||||
|
||||
throttle = arc_stats['memory_throttle_count']
|
||||
|
||||
if throttle == '0':
|
||||
health = 'HEALTHY'
|
||||
else:
|
||||
health = 'THROTTLED'
|
||||
|
||||
prt_1('ARC status:', health)
|
||||
prt_i1('Memory throttle count:', throttle)
|
||||
print()
|
||||
|
||||
memory_all = arc_stats['memory_all_bytes']
|
||||
memory_free = arc_stats['memory_free_bytes']
|
||||
memory_avail = arc_stats['memory_available_bytes']
|
||||
arc_size = arc_stats['size']
|
||||
arc_target_size = arc_stats['c']
|
||||
arc_max = arc_stats['c_max']
|
||||
arc_min = arc_stats['c_min']
|
||||
meta = arc_stats['meta']
|
||||
pd = arc_stats['pd']
|
||||
pm = arc_stats['pm']
|
||||
anon_data = arc_stats['anon_data']
|
||||
anon_metadata = arc_stats['anon_metadata']
|
||||
mfu_data = arc_stats['mfu_data']
|
||||
mfu_metadata = arc_stats['mfu_metadata']
|
||||
mru_data = arc_stats['mru_data']
|
||||
mru_metadata = arc_stats['mru_metadata']
|
||||
mfug_data = arc_stats['mfu_ghost_data']
|
||||
mfug_metadata = arc_stats['mfu_ghost_metadata']
|
||||
mrug_data = arc_stats['mru_ghost_data']
|
||||
mrug_metadata = arc_stats['mru_ghost_metadata']
|
||||
unc_data = arc_stats['uncached_data']
|
||||
unc_metadata = arc_stats['uncached_metadata']
|
||||
bonus_size = arc_stats['bonus_size']
|
||||
dnode_limit = arc_stats['arc_dnode_limit']
|
||||
|
||||
print('ARC status:')
|
||||
prt_i1('Total memory size:', f_bytes(memory_all))
|
||||
prt_i2('Min target size:', f_perc(arc_min, memory_all), f_bytes(arc_min))
|
||||
prt_i2('Max target size:', f_perc(arc_max, memory_all), f_bytes(arc_max))
|
||||
prt_i2('Target size (adaptive):',
|
||||
f_perc(arc_size, arc_max), f_bytes(arc_target_size))
|
||||
prt_i2('Current size:', f_perc(arc_size, arc_max), f_bytes(arc_size))
|
||||
prt_i1('Free memory size:', f_bytes(memory_free))
|
||||
prt_i1('Available memory size:', f_bytes(memory_avail))
|
||||
print()
|
||||
|
||||
compressed_size = arc_stats['compressed_size']
|
||||
uncompressed_size = arc_stats['uncompressed_size']
|
||||
overhead_size = arc_stats['overhead_size']
|
||||
bonus_size = arc_stats['bonus_size']
|
||||
dnode_size = arc_stats['dnode_size']
|
||||
dbuf_size = arc_stats['dbuf_size']
|
||||
hdr_size = arc_stats['hdr_size']
|
||||
l2_hdr_size = arc_stats['l2_hdr_size']
|
||||
abd_chunk_waste_size = arc_stats['abd_chunk_waste_size']
|
||||
target_size_ratio = '{0}:1'.format(int(arc_max) // int(arc_min))
|
||||
|
||||
prt_2('ARC size (current):',
|
||||
f_perc(arc_size, arc_max), f_bytes(arc_size))
|
||||
prt_i2('Target size (adaptive):',
|
||||
f_perc(arc_target_size, arc_max), f_bytes(arc_target_size))
|
||||
prt_i2('Min size (hard limit):',
|
||||
f_perc(arc_min, arc_max), f_bytes(arc_min))
|
||||
prt_i2('Max size (high water):',
|
||||
target_size_ratio, f_bytes(arc_max))
|
||||
caches_size = int(anon_data)+int(anon_metadata)+\
|
||||
int(mfu_data)+int(mfu_metadata)+int(mru_data)+int(mru_metadata)+\
|
||||
int(unc_data)+int(unc_metadata)
|
||||
prt_i2('Anonymous data size:',
|
||||
f_perc(anon_data, caches_size), f_bytes(anon_data))
|
||||
prt_i2('Anonymous metadata size:',
|
||||
f_perc(anon_metadata, caches_size), f_bytes(anon_metadata))
|
||||
s = 4294967296
|
||||
v = (s-int(pd))*(s-int(meta))/s
|
||||
prt_i2('MFU data target:', f_perc(v, s),
|
||||
f_bytes(v / 65536 * caches_size / 65536))
|
||||
prt_i2('MFU data size:',
|
||||
f_perc(mfu_data, caches_size), f_bytes(mfu_data))
|
||||
prt_i1('MFU ghost data size:', f_bytes(mfug_data))
|
||||
v = (s-int(pm))*int(meta)/s
|
||||
prt_i2('MFU metadata target:', f_perc(v, s),
|
||||
f_bytes(v / 65536 * caches_size / 65536))
|
||||
prt_i2('MFU metadata size:',
|
||||
f_perc(mfu_metadata, caches_size), f_bytes(mfu_metadata))
|
||||
prt_i1('MFU ghost metadata size:', f_bytes(mfug_metadata))
|
||||
v = int(pd)*(s-int(meta))/s
|
||||
prt_i2('MRU data target:', f_perc(v, s),
|
||||
f_bytes(v / 65536 * caches_size / 65536))
|
||||
prt_i2('MRU data size:',
|
||||
f_perc(mru_data, caches_size), f_bytes(mru_data))
|
||||
prt_i1('MRU ghost data size:', f_bytes(mrug_data))
|
||||
v = int(pm)*int(meta)/s
|
||||
prt_i2('MRU metadata target:', f_perc(v, s),
|
||||
f_bytes(v / 65536 * caches_size / 65536))
|
||||
prt_i2('MRU metadata size:',
|
||||
f_perc(mru_metadata, caches_size), f_bytes(mru_metadata))
|
||||
prt_i1('MRU ghost metadata size:', f_bytes(mrug_metadata))
|
||||
prt_i2('Uncached data size:',
|
||||
f_perc(unc_data, caches_size), f_bytes(unc_data))
|
||||
prt_i2('Uncached metadata size:',
|
||||
f_perc(unc_metadata, caches_size), f_bytes(unc_metadata))
|
||||
prt_1('ARC structural breakdown (current size):', f_bytes(arc_size))
|
||||
prt_i2('Compressed size:',
|
||||
f_perc(compressed_size, arc_size), f_bytes(compressed_size))
|
||||
prt_i2('Overhead size:',
|
||||
f_perc(overhead_size, arc_size), f_bytes(overhead_size))
|
||||
prt_i2('Bonus size:',
|
||||
f_perc(bonus_size, arc_size), f_bytes(bonus_size))
|
||||
prt_i2('Dnode cache target:',
|
||||
f_perc(dnode_limit, arc_max), f_bytes(dnode_limit))
|
||||
prt_i2('Dnode cache size:',
|
||||
f_perc(dnode_size, dnode_limit), f_bytes(dnode_size))
|
||||
prt_i2('Dnode size:',
|
||||
f_perc(dnode_size, arc_size), f_bytes(dnode_size))
|
||||
prt_i2('Dbuf size:',
|
||||
f_perc(dbuf_size, arc_size), f_bytes(dbuf_size))
|
||||
prt_i2('Header size:',
|
||||
@ -635,11 +587,84 @@ def section_arc(kstats_dict):
|
||||
f_perc(abd_chunk_waste_size, arc_size), f_bytes(abd_chunk_waste_size))
|
||||
print()
|
||||
|
||||
meta = arc_stats['meta']
|
||||
pd = arc_stats['pd']
|
||||
pm = arc_stats['pm']
|
||||
data_size = arc_stats['data_size']
|
||||
metadata_size = arc_stats['metadata_size']
|
||||
anon_data = arc_stats['anon_data']
|
||||
anon_metadata = arc_stats['anon_metadata']
|
||||
mfu_data = arc_stats['mfu_data']
|
||||
mfu_metadata = arc_stats['mfu_metadata']
|
||||
mfu_edata = arc_stats['mfu_evictable_data']
|
||||
mfu_emetadata = arc_stats['mfu_evictable_metadata']
|
||||
mru_data = arc_stats['mru_data']
|
||||
mru_metadata = arc_stats['mru_metadata']
|
||||
mru_edata = arc_stats['mru_evictable_data']
|
||||
mru_emetadata = arc_stats['mru_evictable_metadata']
|
||||
mfug_data = arc_stats['mfu_ghost_data']
|
||||
mfug_metadata = arc_stats['mfu_ghost_metadata']
|
||||
mrug_data = arc_stats['mru_ghost_data']
|
||||
mrug_metadata = arc_stats['mru_ghost_metadata']
|
||||
unc_data = arc_stats['uncached_data']
|
||||
unc_metadata = arc_stats['uncached_metadata']
|
||||
caches_size = int(anon_data)+int(anon_metadata)+\
|
||||
int(mfu_data)+int(mfu_metadata)+int(mru_data)+int(mru_metadata)+\
|
||||
int(unc_data)+int(unc_metadata)
|
||||
|
||||
prt_1('ARC types breakdown (compressed + overhead):', f_bytes(caches_size))
|
||||
prt_i2('Data size:',
|
||||
f_perc(data_size, caches_size), f_bytes(data_size))
|
||||
prt_i2('Metadata size:',
|
||||
f_perc(metadata_size, caches_size), f_bytes(metadata_size))
|
||||
print()
|
||||
|
||||
prt_1('ARC states breakdown (compressed + overhead):', f_bytes(caches_size))
|
||||
prt_i2('Anonymous data size:',
|
||||
f_perc(anon_data, caches_size), f_bytes(anon_data))
|
||||
prt_i2('Anonymous metadata size:',
|
||||
f_perc(anon_metadata, caches_size), f_bytes(anon_metadata))
|
||||
s = 4294967296
|
||||
v = (s-int(pd))*(s-int(meta))/s
|
||||
prt_i2('MFU data target:', f_perc(v, s),
|
||||
f_bytes(v / 65536 * caches_size / 65536))
|
||||
prt_i2('MFU data size:',
|
||||
f_perc(mfu_data, caches_size), f_bytes(mfu_data))
|
||||
prt_i2('MFU evictable data size:',
|
||||
f_perc(mfu_edata, caches_size), f_bytes(mfu_edata))
|
||||
prt_i1('MFU ghost data size:', f_bytes(mfug_data))
|
||||
v = (s-int(pm))*int(meta)/s
|
||||
prt_i2('MFU metadata target:', f_perc(v, s),
|
||||
f_bytes(v / 65536 * caches_size / 65536))
|
||||
prt_i2('MFU metadata size:',
|
||||
f_perc(mfu_metadata, caches_size), f_bytes(mfu_metadata))
|
||||
prt_i2('MFU evictable metadata size:',
|
||||
f_perc(mfu_emetadata, caches_size), f_bytes(mfu_emetadata))
|
||||
prt_i1('MFU ghost metadata size:', f_bytes(mfug_metadata))
|
||||
v = int(pd)*(s-int(meta))/s
|
||||
prt_i2('MRU data target:', f_perc(v, s),
|
||||
f_bytes(v / 65536 * caches_size / 65536))
|
||||
prt_i2('MRU data size:',
|
||||
f_perc(mru_data, caches_size), f_bytes(mru_data))
|
||||
prt_i2('MRU evictable data size:',
|
||||
f_perc(mru_edata, caches_size), f_bytes(mru_edata))
|
||||
prt_i1('MRU ghost data size:', f_bytes(mrug_data))
|
||||
v = int(pm)*int(meta)/s
|
||||
prt_i2('MRU metadata target:', f_perc(v, s),
|
||||
f_bytes(v / 65536 * caches_size / 65536))
|
||||
prt_i2('MRU metadata size:',
|
||||
f_perc(mru_metadata, caches_size), f_bytes(mru_metadata))
|
||||
prt_i2('MRU evictable metadata size:',
|
||||
f_perc(mru_emetadata, caches_size), f_bytes(mru_emetadata))
|
||||
prt_i1('MRU ghost metadata size:', f_bytes(mrug_metadata))
|
||||
prt_i2('Uncached data size:',
|
||||
f_perc(unc_data, caches_size), f_bytes(unc_data))
|
||||
prt_i2('Uncached metadata size:',
|
||||
f_perc(unc_metadata, caches_size), f_bytes(unc_metadata))
|
||||
print()
|
||||
|
||||
print('ARC hash breakdown:')
|
||||
prt_i1('Elements max:', f_hits(arc_stats['hash_elements_max']))
|
||||
prt_i2('Elements current:',
|
||||
f_perc(arc_stats['hash_elements'], arc_stats['hash_elements_max']),
|
||||
f_hits(arc_stats['hash_elements']))
|
||||
prt_i1('Elements:', f_hits(arc_stats['hash_elements']))
|
||||
prt_i1('Collisions:', f_hits(arc_stats['hash_collisions']))
|
||||
|
||||
prt_i1('Chain max:', f_hits(arc_stats['hash_chain_max']))
|
||||
@ -647,6 +672,11 @@ def section_arc(kstats_dict):
|
||||
print()
|
||||
|
||||
print('ARC misc:')
|
||||
prt_i2('Uncompressed size:', f_perc(uncompressed_size, compressed_size),
|
||||
f_bytes(uncompressed_size))
|
||||
prt_i1('Memory throttles:', arc_stats['memory_throttle_count'])
|
||||
prt_i1('Memory direct reclaims:', arc_stats['memory_direct_count'])
|
||||
prt_i1('Memory indirect reclaims:', arc_stats['memory_indirect_count'])
|
||||
prt_i1('Deleted:', f_hits(arc_stats['deleted']))
|
||||
prt_i1('Mutex misses:', f_hits(arc_stats['mutex_miss']))
|
||||
prt_i1('Eviction skips:', f_hits(arc_stats['evict_skip']))
|
@ -1,4 +1,4 @@
|
||||
zdb_CPPFLAGS = $(AM_CPPFLAGS) $(FORCEDEBUG_CPPFLAGS)
|
||||
zdb_CPPFLAGS = $(AM_CPPFLAGS) $(LIBZPOOL_CPPFLAGS)
|
||||
zdb_CFLAGS = $(AM_CFLAGS) $(LIBCRYPTO_CFLAGS)
|
||||
|
||||
sbin_PROGRAMS += zdb
|
||||
@ -10,6 +10,7 @@ zdb_SOURCES = \
|
||||
%D%/zdb_il.c
|
||||
|
||||
zdb_LDADD = \
|
||||
libzdb.la \
|
||||
libzpool.la \
|
||||
libzfs_core.la \
|
||||
libnvpair.la
|
||||
|
1586
cmd/zdb/zdb.c
1586
cmd/zdb/zdb.c
File diff suppressed because it is too large
Load Diff
@ -1,3 +1,4 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
@ -28,6 +29,6 @@
|
||||
#define _ZDB_H
|
||||
|
||||
void dump_intent_log(zilog_t *);
|
||||
extern uint8_t dump_opt[256];
|
||||
extern uint8_t dump_opt[512];
|
||||
|
||||
#endif /* _ZDB_H */
|
||||
|
@ -1,3 +1,4 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
@ -47,8 +48,6 @@
|
||||
|
||||
#include "zdb.h"
|
||||
|
||||
extern uint8_t dump_opt[256];
|
||||
|
||||
static char tab_prefix[4] = "\t\t\t";
|
||||
|
||||
static void
|
||||
@ -64,21 +63,22 @@ static void
|
||||
zil_prt_rec_create(zilog_t *zilog, int txtype, const void *arg)
|
||||
{
|
||||
(void) zilog;
|
||||
const lr_create_t *lr = arg;
|
||||
const lr_create_t *lrc = arg;
|
||||
const _lr_create_t *lr = &lrc->lr_create;
|
||||
time_t crtime = lr->lr_crtime[0];
|
||||
char *name, *link;
|
||||
const char *name, *link;
|
||||
lr_attr_t *lrattr;
|
||||
|
||||
name = (char *)(lr + 1);
|
||||
name = (const char *)&lrc->lr_data[0];
|
||||
|
||||
if (lr->lr_common.lrc_txtype == TX_CREATE_ATTR ||
|
||||
lr->lr_common.lrc_txtype == TX_MKDIR_ATTR) {
|
||||
lrattr = (lr_attr_t *)(lr + 1);
|
||||
lrattr = (lr_attr_t *)&lrc->lr_data[0];
|
||||
name += ZIL_XVAT_SIZE(lrattr->lr_attr_masksize);
|
||||
}
|
||||
|
||||
if (txtype == TX_SYMLINK) {
|
||||
link = name + strlen(name) + 1;
|
||||
link = (const char *)&lrc->lr_data[strlen(name) + 1];
|
||||
(void) printf("%s%s -> %s\n", tab_prefix, name, link);
|
||||
} else if (txtype != TX_MKXATTR) {
|
||||
(void) printf("%s%s\n", tab_prefix, name);
|
||||
@ -103,7 +103,7 @@ zil_prt_rec_remove(zilog_t *zilog, int txtype, const void *arg)
|
||||
const lr_remove_t *lr = arg;
|
||||
|
||||
(void) printf("%sdoid %llu, name %s\n", tab_prefix,
|
||||
(u_longlong_t)lr->lr_doid, (char *)(lr + 1));
|
||||
(u_longlong_t)lr->lr_doid, (const char *)&lr->lr_data[0]);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -114,16 +114,17 @@ zil_prt_rec_link(zilog_t *zilog, int txtype, const void *arg)
|
||||
|
||||
(void) printf("%sdoid %llu, link_obj %llu, name %s\n", tab_prefix,
|
||||
(u_longlong_t)lr->lr_doid, (u_longlong_t)lr->lr_link_obj,
|
||||
(char *)(lr + 1));
|
||||
(const char *)&lr->lr_data[0]);
|
||||
}
|
||||
|
||||
static void
|
||||
zil_prt_rec_rename(zilog_t *zilog, int txtype, const void *arg)
|
||||
{
|
||||
(void) zilog, (void) txtype;
|
||||
const lr_rename_t *lr = arg;
|
||||
char *snm = (char *)(lr + 1);
|
||||
char *tnm = snm + strlen(snm) + 1;
|
||||
const lr_rename_t *lrr = arg;
|
||||
const _lr_rename_t *lr = &lrr->lr_rename;
|
||||
const char *snm = (const char *)&lrr->lr_data[0];
|
||||
const char *tnm = (const char *)&lrr->lr_data[strlen(snm) + 1];
|
||||
|
||||
(void) printf("%ssdoid %llu, tdoid %llu\n", tab_prefix,
|
||||
(u_longlong_t)lr->lr_sdoid, (u_longlong_t)lr->lr_tdoid);
|
||||
@ -173,8 +174,8 @@ zil_prt_rec_write(zilog_t *zilog, int txtype, const void *arg)
|
||||
|
||||
if (lr->lr_common.lrc_reclen == sizeof (lr_write_t)) {
|
||||
(void) printf("%shas blkptr, %s\n", tab_prefix,
|
||||
!BP_IS_HOLE(bp) &&
|
||||
bp->blk_birth >= spa_min_claim_txg(zilog->zl_spa) ?
|
||||
!BP_IS_HOLE(bp) && BP_GET_BIRTH(bp) >=
|
||||
spa_min_claim_txg(zilog->zl_spa) ?
|
||||
"will claim" : "won't claim");
|
||||
print_log_bp(bp, tab_prefix);
|
||||
|
||||
@ -186,7 +187,7 @@ zil_prt_rec_write(zilog_t *zilog, int txtype, const void *arg)
|
||||
(void) printf("%s<hole>\n", tab_prefix);
|
||||
return;
|
||||
}
|
||||
if (bp->blk_birth < zilog->zl_header->zh_claim_txg) {
|
||||
if (BP_GET_BIRTH(bp) < zilog->zl_header->zh_claim_txg) {
|
||||
(void) printf("%s<block already committed>\n",
|
||||
tab_prefix);
|
||||
return;
|
||||
@ -209,7 +210,7 @@ zil_prt_rec_write(zilog_t *zilog, int txtype, const void *arg)
|
||||
|
||||
/* data is stored after the end of the lr_write record */
|
||||
data = abd_alloc(lr->lr_length, B_FALSE);
|
||||
abd_copy_from_buf(data, lr + 1, lr->lr_length);
|
||||
abd_copy_from_buf(data, &lr->lr_data[0], lr->lr_length);
|
||||
}
|
||||
|
||||
(void) printf("%s", tab_prefix);
|
||||
@ -237,8 +238,8 @@ zil_prt_rec_write_enc(zilog_t *zilog, int txtype, const void *arg)
|
||||
|
||||
if (lr->lr_common.lrc_reclen == sizeof (lr_write_t)) {
|
||||
(void) printf("%shas blkptr, %s\n", tab_prefix,
|
||||
!BP_IS_HOLE(bp) &&
|
||||
bp->blk_birth >= spa_min_claim_txg(zilog->zl_spa) ?
|
||||
!BP_IS_HOLE(bp) && BP_GET_BIRTH(bp) >=
|
||||
spa_min_claim_txg(zilog->zl_spa) ?
|
||||
"will claim" : "won't claim");
|
||||
print_log_bp(bp, tab_prefix);
|
||||
}
|
||||
@ -307,7 +308,7 @@ zil_prt_rec_setsaxattr(zilog_t *zilog, int txtype, const void *arg)
|
||||
(void) zilog, (void) txtype;
|
||||
const lr_setsaxattr_t *lr = arg;
|
||||
|
||||
char *name = (char *)(lr + 1);
|
||||
const char *name = (const char *)&lr->lr_data[0];
|
||||
(void) printf("%sfoid %llu\n", tab_prefix,
|
||||
(u_longlong_t)lr->lr_foid);
|
||||
|
||||
@ -316,7 +317,7 @@ zil_prt_rec_setsaxattr(zilog_t *zilog, int txtype, const void *arg)
|
||||
(void) printf("%sXAT_VALUE NULL\n", tab_prefix);
|
||||
} else {
|
||||
(void) printf("%sXAT_VALUE ", tab_prefix);
|
||||
char *val = name + (strlen(name) + 1);
|
||||
const char *val = (const char *)&lr->lr_data[strlen(name) + 1];
|
||||
for (int i = 0; i < lr->lr_size; i++) {
|
||||
(void) printf("%c", *val);
|
||||
val++;
|
||||
@ -473,7 +474,7 @@ print_log_block(zilog_t *zilog, const blkptr_t *bp, void *arg,
|
||||
|
||||
if (claim_txg != 0)
|
||||
claim = "already claimed";
|
||||
else if (bp->blk_birth >= spa_min_claim_txg(zilog->zl_spa))
|
||||
else if (BP_GET_BIRTH(bp) >= spa_min_claim_txg(zilog->zl_spa))
|
||||
claim = "will claim";
|
||||
else
|
||||
claim = "won't claim";
|
||||
|
@ -1,3 +1,4 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
|
@ -1,3 +1,4 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
|
@ -1,3 +1,4 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
|
@ -1,3 +1,4 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
|
@ -1,3 +1,4 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
@ -133,11 +134,13 @@ zfs_agent_iter_vdev(zpool_handle_t *zhp, nvlist_t *nvl, void *arg)
|
||||
* of blkid cache and L2ARC VDEV does not contain pool guid in its
|
||||
* blkid, so this is a special case for L2ARC VDEV.
|
||||
*/
|
||||
else if (gsp->gs_vdev_guid != 0 && gsp->gs_devid == NULL &&
|
||||
else if (gsp->gs_vdev_guid != 0 &&
|
||||
nvlist_lookup_uint64(nvl, ZPOOL_CONFIG_GUID, &vdev_guid) == 0 &&
|
||||
gsp->gs_vdev_guid == vdev_guid) {
|
||||
(void) nvlist_lookup_string(nvl, ZPOOL_CONFIG_DEVID,
|
||||
&gsp->gs_devid);
|
||||
if (gsp->gs_devid == NULL) {
|
||||
(void) nvlist_lookup_string(nvl, ZPOOL_CONFIG_DEVID,
|
||||
&gsp->gs_devid);
|
||||
}
|
||||
(void) nvlist_lookup_uint64(nvl, ZPOOL_CONFIG_EXPANSION_TIME,
|
||||
&gsp->gs_vdev_expandtime);
|
||||
return (B_TRUE);
|
||||
@ -155,22 +158,28 @@ zfs_agent_iter_pool(zpool_handle_t *zhp, void *arg)
|
||||
/*
|
||||
* For each vdev in this pool, look for a match by devid
|
||||
*/
|
||||
if ((config = zpool_get_config(zhp, NULL)) != NULL) {
|
||||
if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
|
||||
&nvl) == 0) {
|
||||
(void) zfs_agent_iter_vdev(zhp, nvl, gsp);
|
||||
}
|
||||
}
|
||||
/*
|
||||
* if a match was found then grab the pool guid
|
||||
*/
|
||||
if (gsp->gs_vdev_guid && gsp->gs_devid) {
|
||||
(void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
|
||||
&gsp->gs_pool_guid);
|
||||
}
|
||||
boolean_t found = B_FALSE;
|
||||
uint64_t pool_guid;
|
||||
|
||||
/* Get pool configuration and extract pool GUID */
|
||||
if ((config = zpool_get_config(zhp, NULL)) == NULL ||
|
||||
nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
|
||||
&pool_guid) != 0)
|
||||
goto out;
|
||||
|
||||
/* Skip this pool if we're looking for a specific pool */
|
||||
if (gsp->gs_pool_guid != 0 && pool_guid != gsp->gs_pool_guid)
|
||||
goto out;
|
||||
|
||||
if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvl) == 0)
|
||||
found = zfs_agent_iter_vdev(zhp, nvl, gsp);
|
||||
|
||||
if (found && gsp->gs_pool_guid == 0)
|
||||
gsp->gs_pool_guid = pool_guid;
|
||||
|
||||
out:
|
||||
zpool_close(zhp);
|
||||
return (gsp->gs_devid != NULL && gsp->gs_vdev_guid != 0);
|
||||
return (found);
|
||||
}
|
||||
|
||||
void
|
||||
@ -232,20 +241,17 @@ zfs_agent_post_event(const char *class, const char *subclass, nvlist_t *nvl)
|
||||
* For multipath, spare and l2arc devices ZFS_EV_VDEV_GUID or
|
||||
* ZFS_EV_POOL_GUID may be missing so find them.
|
||||
*/
|
||||
if (devid == NULL || pool_guid == 0 || vdev_guid == 0) {
|
||||
if (devid == NULL)
|
||||
search.gs_vdev_guid = vdev_guid;
|
||||
else
|
||||
search.gs_devid = devid;
|
||||
zpool_iter(g_zfs_hdl, zfs_agent_iter_pool, &search);
|
||||
if (devid == NULL)
|
||||
devid = search.gs_devid;
|
||||
if (pool_guid == 0)
|
||||
pool_guid = search.gs_pool_guid;
|
||||
if (vdev_guid == 0)
|
||||
vdev_guid = search.gs_vdev_guid;
|
||||
devtype = search.gs_vdev_type;
|
||||
}
|
||||
search.gs_devid = devid;
|
||||
search.gs_vdev_guid = vdev_guid;
|
||||
search.gs_pool_guid = pool_guid;
|
||||
zpool_iter(g_zfs_hdl, zfs_agent_iter_pool, &search);
|
||||
if (devid == NULL)
|
||||
devid = search.gs_devid;
|
||||
if (pool_guid == 0)
|
||||
pool_guid = search.gs_pool_guid;
|
||||
if (vdev_guid == 0)
|
||||
vdev_guid = search.gs_vdev_guid;
|
||||
devtype = search.gs_vdev_type;
|
||||
|
||||
/*
|
||||
* We want to avoid reporting "remove" events coming from
|
||||
|
@ -1,3 +1,4 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
|
@ -1,3 +1,4 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
@ -71,6 +72,7 @@ typedef struct zfs_case_data {
|
||||
uint64_t zc_ena;
|
||||
uint64_t zc_pool_guid;
|
||||
uint64_t zc_vdev_guid;
|
||||
uint64_t zc_parent_guid;
|
||||
int zc_pool_state;
|
||||
char zc_serd_checksum[MAX_SERDLEN];
|
||||
char zc_serd_io[MAX_SERDLEN];
|
||||
@ -181,10 +183,10 @@ zfs_case_unserialize(fmd_hdl_t *hdl, fmd_case_t *cp)
|
||||
}
|
||||
|
||||
/*
|
||||
* count other unique slow-io cases in a pool
|
||||
* Return count of other unique SERD cases under same vdev parent
|
||||
*/
|
||||
static uint_t
|
||||
zfs_other_slow_cases(fmd_hdl_t *hdl, const zfs_case_data_t *zfs_case)
|
||||
zfs_other_serd_cases(fmd_hdl_t *hdl, const zfs_case_data_t *zfs_case)
|
||||
{
|
||||
zfs_case_t *zcp;
|
||||
uint_t cases = 0;
|
||||
@ -206,10 +208,32 @@ zfs_other_slow_cases(fmd_hdl_t *hdl, const zfs_case_data_t *zfs_case)
|
||||
|
||||
for (zcp = uu_list_first(zfs_cases); zcp != NULL;
|
||||
zcp = uu_list_next(zfs_cases, zcp)) {
|
||||
if (zcp->zc_data.zc_pool_guid == zfs_case->zc_pool_guid &&
|
||||
zcp->zc_data.zc_vdev_guid != zfs_case->zc_vdev_guid &&
|
||||
zcp->zc_data.zc_serd_slow_io[0] != '\0' &&
|
||||
fmd_serd_active(hdl, zcp->zc_data.zc_serd_slow_io)) {
|
||||
zfs_case_data_t *zcd = &zcp->zc_data;
|
||||
|
||||
/*
|
||||
* must be same pool and parent vdev but different leaf vdev
|
||||
*/
|
||||
if (zcd->zc_pool_guid != zfs_case->zc_pool_guid ||
|
||||
zcd->zc_parent_guid != zfs_case->zc_parent_guid ||
|
||||
zcd->zc_vdev_guid == zfs_case->zc_vdev_guid) {
|
||||
continue;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check if there is another active serd case besides zfs_case
|
||||
*
|
||||
* Only one serd engine will be assigned to the case
|
||||
*/
|
||||
if (zcd->zc_serd_checksum[0] == zfs_case->zc_serd_checksum[0] &&
|
||||
fmd_serd_active(hdl, zcd->zc_serd_checksum)) {
|
||||
cases++;
|
||||
}
|
||||
if (zcd->zc_serd_io[0] == zfs_case->zc_serd_io[0] &&
|
||||
fmd_serd_active(hdl, zcd->zc_serd_io)) {
|
||||
cases++;
|
||||
}
|
||||
if (zcd->zc_serd_slow_io[0] == zfs_case->zc_serd_slow_io[0] &&
|
||||
fmd_serd_active(hdl, zcd->zc_serd_slow_io)) {
|
||||
cases++;
|
||||
}
|
||||
}
|
||||
@ -502,6 +526,34 @@ zfs_ereport_when(fmd_hdl_t *hdl, nvlist_t *nvl, er_timeval_t *when)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Record the specified event in the SERD engine and return a
|
||||
* boolean value indicating whether or not the engine fired as
|
||||
* the result of inserting this event.
|
||||
*
|
||||
* When the pool has similar active cases on other vdevs, then
|
||||
* the fired state is disregarded and the case is retired.
|
||||
*/
|
||||
static int
|
||||
zfs_fm_serd_record(fmd_hdl_t *hdl, const char *name, fmd_event_t *ep,
|
||||
zfs_case_t *zcp, const char *err_type)
|
||||
{
|
||||
int fired = fmd_serd_record(hdl, name, ep);
|
||||
int peers = 0;
|
||||
|
||||
if (fired && (peers = zfs_other_serd_cases(hdl, &zcp->zc_data)) > 0) {
|
||||
fmd_hdl_debug(hdl, "pool %llu is tracking %d other %s cases "
|
||||
"-- skip faulting the vdev %llu",
|
||||
(u_longlong_t)zcp->zc_data.zc_pool_guid,
|
||||
peers, err_type,
|
||||
(u_longlong_t)zcp->zc_data.zc_vdev_guid);
|
||||
zfs_case_retire(hdl, zcp);
|
||||
fired = 0;
|
||||
}
|
||||
|
||||
return (fired);
|
||||
}
|
||||
|
||||
/*
|
||||
* Main fmd entry point.
|
||||
*/
|
||||
@ -510,7 +562,7 @@ zfs_fm_recv(fmd_hdl_t *hdl, fmd_event_t *ep, nvlist_t *nvl, const char *class)
|
||||
{
|
||||
zfs_case_t *zcp, *dcp;
|
||||
int32_t pool_state;
|
||||
uint64_t ena, pool_guid, vdev_guid;
|
||||
uint64_t ena, pool_guid, vdev_guid, parent_guid;
|
||||
uint64_t checksum_n, checksum_t;
|
||||
uint64_t io_n, io_t;
|
||||
er_timeval_t pool_load;
|
||||
@ -600,6 +652,9 @@ zfs_fm_recv(fmd_hdl_t *hdl, fmd_event_t *ep, nvlist_t *nvl, const char *class)
|
||||
if (nvlist_lookup_uint64(nvl,
|
||||
FM_EREPORT_PAYLOAD_ZFS_VDEV_GUID, &vdev_guid) != 0)
|
||||
vdev_guid = 0;
|
||||
if (nvlist_lookup_uint64(nvl,
|
||||
FM_EREPORT_PAYLOAD_ZFS_PARENT_GUID, &parent_guid) != 0)
|
||||
parent_guid = 0;
|
||||
if (nvlist_lookup_uint64(nvl, FM_EREPORT_ENA, &ena) != 0)
|
||||
ena = 0;
|
||||
|
||||
@ -710,6 +765,7 @@ zfs_fm_recv(fmd_hdl_t *hdl, fmd_event_t *ep, nvlist_t *nvl, const char *class)
|
||||
data.zc_ena = ena;
|
||||
data.zc_pool_guid = pool_guid;
|
||||
data.zc_vdev_guid = vdev_guid;
|
||||
data.zc_parent_guid = parent_guid;
|
||||
data.zc_pool_state = (int)pool_state;
|
||||
|
||||
fmd_buf_write(hdl, cs, CASE_DATA, &data, sizeof (data));
|
||||
@ -844,7 +900,6 @@ zfs_fm_recv(fmd_hdl_t *hdl, fmd_event_t *ep, nvlist_t *nvl, const char *class)
|
||||
const char *failmode = NULL;
|
||||
boolean_t checkremove = B_FALSE;
|
||||
uint32_t pri = 0;
|
||||
int32_t flags = 0;
|
||||
|
||||
/*
|
||||
* If this is a checksum or I/O error, then toss it into the
|
||||
@ -873,8 +928,10 @@ zfs_fm_recv(fmd_hdl_t *hdl, fmd_event_t *ep, nvlist_t *nvl, const char *class)
|
||||
SEC2NSEC(io_t));
|
||||
zfs_case_serialize(zcp);
|
||||
}
|
||||
if (fmd_serd_record(hdl, zcp->zc_data.zc_serd_io, ep))
|
||||
if (zfs_fm_serd_record(hdl, zcp->zc_data.zc_serd_io,
|
||||
ep, zcp, "io error")) {
|
||||
checkremove = B_TRUE;
|
||||
}
|
||||
} else if (fmd_nvl_class_match(hdl, nvl,
|
||||
ZFS_MAKE_EREPORT(FM_EREPORT_ZFS_DELAY))) {
|
||||
uint64_t slow_io_n, slow_io_t;
|
||||
@ -900,40 +957,35 @@ zfs_fm_recv(fmd_hdl_t *hdl, fmd_event_t *ep, nvlist_t *nvl, const char *class)
|
||||
}
|
||||
/* Pass event to SERD engine and see if this triggers */
|
||||
if (zcp->zc_data.zc_serd_slow_io[0] != '\0' &&
|
||||
fmd_serd_record(hdl, zcp->zc_data.zc_serd_slow_io,
|
||||
ep)) {
|
||||
/*
|
||||
* Ignore a slow io diagnosis when other
|
||||
* VDEVs in the pool show signs of being slow.
|
||||
*/
|
||||
if (zfs_other_slow_cases(hdl, &zcp->zc_data)) {
|
||||
zfs_case_retire(hdl, zcp);
|
||||
fmd_hdl_debug(hdl, "pool %llu has "
|
||||
"multiple slow io cases -- skip "
|
||||
"degrading vdev %llu",
|
||||
(u_longlong_t)
|
||||
zcp->zc_data.zc_pool_guid,
|
||||
(u_longlong_t)
|
||||
zcp->zc_data.zc_vdev_guid);
|
||||
} else {
|
||||
zfs_case_solve(hdl, zcp,
|
||||
"fault.fs.zfs.vdev.slow_io");
|
||||
}
|
||||
zfs_fm_serd_record(hdl,
|
||||
zcp->zc_data.zc_serd_slow_io, ep, zcp, "slow io")) {
|
||||
zfs_case_solve(hdl, zcp,
|
||||
"fault.fs.zfs.vdev.slow_io");
|
||||
}
|
||||
} else if (fmd_nvl_class_match(hdl, nvl,
|
||||
ZFS_MAKE_EREPORT(FM_EREPORT_ZFS_CHECKSUM))) {
|
||||
uint64_t flags = 0;
|
||||
int32_t flags32 = 0;
|
||||
/*
|
||||
* We ignore ereports for checksum errors generated by
|
||||
* scrub/resilver I/O to avoid potentially further
|
||||
* degrading the pool while it's being repaired.
|
||||
*
|
||||
* Note that FM_EREPORT_PAYLOAD_ZFS_ZIO_FLAGS used to
|
||||
* be int32. To allow newer zed to work on older
|
||||
* kernels, if we don't find the flags, we look for
|
||||
* the older ones too.
|
||||
*/
|
||||
if (((nvlist_lookup_uint32(nvl,
|
||||
FM_EREPORT_PAYLOAD_ZFS_ZIO_PRIORITY, &pri) == 0) &&
|
||||
(pri == ZIO_PRIORITY_SCRUB ||
|
||||
pri == ZIO_PRIORITY_REBUILD)) ||
|
||||
((nvlist_lookup_int32(nvl,
|
||||
((nvlist_lookup_uint64(nvl,
|
||||
FM_EREPORT_PAYLOAD_ZFS_ZIO_FLAGS, &flags) == 0) &&
|
||||
(flags & (ZIO_FLAG_SCRUB | ZIO_FLAG_RESILVER)))) {
|
||||
(flags & (ZIO_FLAG_SCRUB | ZIO_FLAG_RESILVER))) ||
|
||||
((nvlist_lookup_int32(nvl,
|
||||
FM_EREPORT_PAYLOAD_ZFS_ZIO_FLAGS, &flags32) == 0) &&
|
||||
(flags32 & (ZIO_FLAG_SCRUB | ZIO_FLAG_RESILVER)))) {
|
||||
fmd_hdl_debug(hdl, "ignoring '%s' for "
|
||||
"scrub/resilver I/O", class);
|
||||
return;
|
||||
@ -959,8 +1011,9 @@ zfs_fm_recv(fmd_hdl_t *hdl, fmd_event_t *ep, nvlist_t *nvl, const char *class)
|
||||
SEC2NSEC(checksum_t));
|
||||
zfs_case_serialize(zcp);
|
||||
}
|
||||
if (fmd_serd_record(hdl,
|
||||
zcp->zc_data.zc_serd_checksum, ep)) {
|
||||
if (zfs_fm_serd_record(hdl,
|
||||
zcp->zc_data.zc_serd_checksum, ep, zcp,
|
||||
"checksum")) {
|
||||
zfs_case_solve(hdl, zcp,
|
||||
"fault.fs.zfs.vdev.checksum");
|
||||
}
|
||||
|
@ -1,3 +1,4 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
@ -214,6 +215,7 @@ zfs_process_add(zpool_handle_t *zhp, nvlist_t *vdev, boolean_t labeled)
|
||||
vdev_stat_t *vs;
|
||||
char **lines = NULL;
|
||||
int lines_cnt = 0;
|
||||
int rc;
|
||||
|
||||
/*
|
||||
* Get the persistent path, typically under the '/dev/disk/by-id' or
|
||||
@ -405,17 +407,17 @@ zfs_process_add(zpool_handle_t *zhp, nvlist_t *vdev, boolean_t labeled)
|
||||
}
|
||||
|
||||
nvlist_lookup_string(vdev, "new_devid", &new_devid);
|
||||
|
||||
if (is_mpath_wholedisk) {
|
||||
/* Don't label device mapper or multipath disks. */
|
||||
zed_log_msg(LOG_INFO,
|
||||
" it's a multipath wholedisk, don't label");
|
||||
if (zpool_prepare_disk(zhp, vdev, "autoreplace", &lines,
|
||||
&lines_cnt) != 0) {
|
||||
rc = zpool_prepare_disk(zhp, vdev, "autoreplace", &lines,
|
||||
&lines_cnt);
|
||||
if (rc != 0) {
|
||||
zed_log_msg(LOG_INFO,
|
||||
" zpool_prepare_disk: could not "
|
||||
"prepare '%s' (%s)", fullpath,
|
||||
libzfs_error_description(g_zfshdl));
|
||||
"prepare '%s' (%s), path '%s', rc = %d", fullpath,
|
||||
libzfs_error_description(g_zfshdl), path, rc);
|
||||
if (lines_cnt > 0) {
|
||||
zed_log_msg(LOG_INFO,
|
||||
" zfs_prepare_disk output:");
|
||||
@ -446,12 +448,13 @@ zfs_process_add(zpool_handle_t *zhp, nvlist_t *vdev, boolean_t labeled)
|
||||
* If this is a request to label a whole disk, then attempt to
|
||||
* write out the label.
|
||||
*/
|
||||
if (zpool_prepare_and_label_disk(g_zfshdl, zhp, leafname,
|
||||
vdev, "autoreplace", &lines, &lines_cnt) != 0) {
|
||||
rc = zpool_prepare_and_label_disk(g_zfshdl, zhp, leafname,
|
||||
vdev, "autoreplace", &lines, &lines_cnt);
|
||||
if (rc != 0) {
|
||||
zed_log_msg(LOG_WARNING,
|
||||
" zpool_prepare_and_label_disk: could not "
|
||||
"label '%s' (%s)", leafname,
|
||||
libzfs_error_description(g_zfshdl));
|
||||
"label '%s' (%s), rc = %d", leafname,
|
||||
libzfs_error_description(g_zfshdl), rc);
|
||||
if (lines_cnt > 0) {
|
||||
zed_log_msg(LOG_INFO,
|
||||
" zfs_prepare_disk output:");
|
||||
@ -702,7 +705,7 @@ zfs_enable_ds(void *arg)
|
||||
{
|
||||
unavailpool_t *pool = (unavailpool_t *)arg;
|
||||
|
||||
(void) zpool_enable_datasets(pool->uap_zhp, NULL, 0);
|
||||
(void) zpool_enable_datasets(pool->uap_zhp, NULL, 0, 512);
|
||||
zpool_close(pool->uap_zhp);
|
||||
free(pool);
|
||||
}
|
||||
|
@ -1,3 +1,4 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
@ -403,6 +404,7 @@ zfs_retire_recv(fmd_hdl_t *hdl, fmd_event_t *ep, nvlist_t *nvl,
|
||||
(state == VDEV_STATE_REMOVED || state == VDEV_STATE_FAULTED))) {
|
||||
const char *devtype;
|
||||
char *devname;
|
||||
boolean_t skip_removal = B_FALSE;
|
||||
|
||||
if (nvlist_lookup_string(nvl, FM_EREPORT_PAYLOAD_ZFS_VDEV_TYPE,
|
||||
&devtype) == 0) {
|
||||
@ -440,18 +442,28 @@ zfs_retire_recv(fmd_hdl_t *hdl, fmd_event_t *ep, nvlist_t *nvl,
|
||||
nvlist_lookup_uint64_array(vdev, ZPOOL_CONFIG_VDEV_STATS,
|
||||
(uint64_t **)&vs, &c);
|
||||
|
||||
if (vs->vs_state == VDEV_STATE_OFFLINE)
|
||||
return;
|
||||
|
||||
/*
|
||||
* If state removed is requested for already removed vdev,
|
||||
* its a loopback event from spa_async_remove(). Just
|
||||
* ignore it.
|
||||
*/
|
||||
if (vs->vs_state == VDEV_STATE_REMOVED &&
|
||||
state == VDEV_STATE_REMOVED)
|
||||
return;
|
||||
if ((vs->vs_state == VDEV_STATE_REMOVED &&
|
||||
state == VDEV_STATE_REMOVED)) {
|
||||
if (strcmp(class, "resource.fs.zfs.removed") == 0 &&
|
||||
nvlist_exists(nvl, "by_kernel")) {
|
||||
skip_removal = B_TRUE;
|
||||
} else {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
/* Remove the vdev since device is unplugged */
|
||||
int remove_status = 0;
|
||||
if (l2arc || (strcmp(class, "resource.fs.zfs.removed") == 0)) {
|
||||
if (!skip_removal && (l2arc ||
|
||||
(strcmp(class, "resource.fs.zfs.removed") == 0))) {
|
||||
remove_status = zpool_vdev_remove_wanted(zhp, devname);
|
||||
fmd_hdl_debug(hdl, "zpool_vdev_remove_wanted '%s'"
|
||||
", err:%d", devname, libzfs_errno(zhdl));
|
||||
|
@ -1,3 +1,4 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* This file is part of the ZFS Event Daemon (ZED).
|
||||
*
|
||||
|
@ -9,17 +9,18 @@ dist_zedexec_SCRIPTS = \
|
||||
%D%/all-debug.sh \
|
||||
%D%/all-syslog.sh \
|
||||
%D%/data-notify.sh \
|
||||
%D%/deadman-sync-slot_off.sh \
|
||||
%D%/generic-notify.sh \
|
||||
%D%/pool_import-led.sh \
|
||||
%D%/pool_import-sync-led.sh \
|
||||
%D%/resilver_finish-notify.sh \
|
||||
%D%/resilver_finish-start-scrub.sh \
|
||||
%D%/scrub_finish-notify.sh \
|
||||
%D%/statechange-led.sh \
|
||||
%D%/statechange-sync-led.sh \
|
||||
%D%/statechange-notify.sh \
|
||||
%D%/statechange-slot_off.sh \
|
||||
%D%/statechange-sync-slot_off.sh \
|
||||
%D%/trim_finish-notify.sh \
|
||||
%D%/vdev_attach-led.sh \
|
||||
%D%/vdev_clear-led.sh
|
||||
%D%/vdev_attach-sync-led.sh \
|
||||
%D%/vdev_clear-sync-led.sh
|
||||
|
||||
nodist_zedexec_SCRIPTS = \
|
||||
%D%/history_event-zfs-list-cacher.sh
|
||||
@ -29,16 +30,17 @@ SUBSTFILES += $(nodist_zedexec_SCRIPTS)
|
||||
zedconfdefaults = \
|
||||
all-syslog.sh \
|
||||
data-notify.sh \
|
||||
deadman-sync-slot_off.sh \
|
||||
history_event-zfs-list-cacher.sh \
|
||||
pool_import-led.sh \
|
||||
pool_import-sync-led.sh \
|
||||
resilver_finish-notify.sh \
|
||||
resilver_finish-start-scrub.sh \
|
||||
scrub_finish-notify.sh \
|
||||
statechange-led.sh \
|
||||
statechange-sync-led.sh \
|
||||
statechange-notify.sh \
|
||||
statechange-slot_off.sh \
|
||||
vdev_attach-led.sh \
|
||||
vdev_clear-led.sh
|
||||
statechange-sync-slot_off.sh \
|
||||
vdev_attach-sync-led.sh \
|
||||
vdev_clear-sync-led.sh
|
||||
|
||||
dist_noinst_DATA += %D%/README
|
||||
|
||||
|
71
cmd/zed/zed.d/deadman-sync-slot_off.sh
Executable file
71
cmd/zed/zed.d/deadman-sync-slot_off.sh
Executable file
@ -0,0 +1,71 @@
|
||||
#!/bin/sh
|
||||
# shellcheck disable=SC3014,SC2154,SC2086,SC2034
|
||||
#
|
||||
# Turn off disk's enclosure slot if an I/O is hung triggering the deadman.
|
||||
#
|
||||
# It's possible for outstanding I/O to a misbehaving SCSI disk to neither
|
||||
# promptly complete or return an error. This can occur due to retry and
|
||||
# recovery actions taken by the SCSI layer, driver, or disk. When it occurs
|
||||
# the pool will be unresponsive even though there may be sufficient redundancy
|
||||
# configured to proceeded without this single disk.
|
||||
#
|
||||
# When a hung I/O is detected by the kmods it will be posted as a deadman
|
||||
# event. By default an I/O is considered to be hung after 5 minutes. This
|
||||
# value can be changed with the zfs_deadman_ziotime_ms module parameter.
|
||||
# If ZED_POWER_OFF_ENCLOSURE_SLOT_ON_DEADMAN is set the disk's enclosure
|
||||
# slot will be powered off causing the outstanding I/O to fail. The ZED
|
||||
# will then handle this like a normal disk failure and FAULT the vdev.
|
||||
#
|
||||
# We assume the user will be responsible for turning the slot back on
|
||||
# after replacing the disk.
|
||||
#
|
||||
# Note that this script requires that your enclosure be supported by the
|
||||
# Linux SCSI Enclosure services (SES) driver. The script will do nothing
|
||||
# if you have no enclosure, or if your enclosure isn't supported.
|
||||
#
|
||||
# Exit codes:
|
||||
# 0: slot successfully powered off
|
||||
# 1: enclosure not available
|
||||
# 2: ZED_POWER_OFF_ENCLOSURE_SLOT_ON_DEADMAN disabled
|
||||
# 3: System not configured to wait on deadman
|
||||
# 4: The enclosure sysfs path passed from ZFS does not exist
|
||||
# 5: Enclosure slot didn't actually turn off after we told it to
|
||||
|
||||
[ -f "${ZED_ZEDLET_DIR}/zed.rc" ] && . "${ZED_ZEDLET_DIR}/zed.rc"
|
||||
. "${ZED_ZEDLET_DIR}/zed-functions.sh"
|
||||
|
||||
if [ ! -d /sys/class/enclosure ] ; then
|
||||
# No JBOD enclosure or NVMe slots
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "${ZED_POWER_OFF_ENCLOSURE_SLOT_ON_DEADMAN}" != "1" ] ; then
|
||||
exit 2
|
||||
fi
|
||||
|
||||
if [ "$ZEVENT_POOL_FAILMODE" != "wait" ] ; then
|
||||
exit 3
|
||||
fi
|
||||
|
||||
if [ ! -f "$ZEVENT_VDEV_ENC_SYSFS_PATH/power_status" ] ; then
|
||||
exit 4
|
||||
fi
|
||||
|
||||
# Turn off the slot and wait for sysfs to report that the slot is off.
|
||||
# It can take ~400ms on some enclosures and multiple retries may be needed.
|
||||
for i in $(seq 1 20) ; do
|
||||
echo "off" | tee "$ZEVENT_VDEV_ENC_SYSFS_PATH/power_status"
|
||||
|
||||
for j in $(seq 1 5) ; do
|
||||
if [ "$(cat $ZEVENT_VDEV_ENC_SYSFS_PATH/power_status)" == "off" ] ; then
|
||||
break 2
|
||||
fi
|
||||
sleep 0.1
|
||||
done
|
||||
done
|
||||
|
||||
if [ "$(cat $ZEVENT_VDEV_ENC_SYSFS_PATH/power_status)" != "off" ] ; then
|
||||
exit 5
|
||||
fi
|
||||
|
||||
zed_log_msg "powered down slot $ZEVENT_VDEV_ENC_SYSFS_PATH for $ZEVENT_VDEV_PATH"
|
@ -1 +0,0 @@
|
||||
statechange-led.sh
|
1
cmd/zed/zed.d/pool_import-sync-led.sh
Symbolic link
1
cmd/zed/zed.d/pool_import-sync-led.sh
Symbolic link
@ -0,0 +1 @@
|
||||
statechange-sync-led.sh
|
@ -1,4 +1,5 @@
|
||||
#!/bin/sh
|
||||
# SPDX-License-Identifier: CDDL-1.0
|
||||
# shellcheck disable=SC2154
|
||||
#
|
||||
# CDDL HEADER START
|
||||
|
@ -1 +0,0 @@
|
||||
statechange-led.sh
|
1
cmd/zed/zed.d/vdev_attach-sync-led.sh
Symbolic link
1
cmd/zed/zed.d/vdev_attach-sync-led.sh
Symbolic link
@ -0,0 +1 @@
|
||||
statechange-sync-led.sh
|
@ -1 +0,0 @@
|
||||
statechange-led.sh
|
1
cmd/zed/zed.d/vdev_clear-sync-led.sh
Symbolic link
1
cmd/zed/zed.d/vdev_clear-sync-led.sh
Symbolic link
@ -0,0 +1 @@
|
||||
statechange-sync-led.sh
|
@ -209,6 +209,10 @@ zed_notify()
|
||||
[ "${rv}" -eq 0 ] && num_success=$((num_success + 1))
|
||||
[ "${rv}" -eq 1 ] && num_failure=$((num_failure + 1))
|
||||
|
||||
zed_notify_gotify "${subject}" "${pathname}"; rv=$?
|
||||
[ "${rv}" -eq 0 ] && num_success=$((num_success + 1))
|
||||
[ "${rv}" -eq 1 ] && num_failure=$((num_failure + 1))
|
||||
|
||||
[ "${num_success}" -gt 0 ] && return 0
|
||||
[ "${num_failure}" -gt 0 ] && return 1
|
||||
return 2
|
||||
@ -279,6 +283,11 @@ zed_notify_email()
|
||||
if [ "${ZED_EMAIL_OPTS%@SUBJECT@*}" = "${ZED_EMAIL_OPTS}" ] ; then
|
||||
# inject subject header
|
||||
printf "Subject: %s\n" "${subject}"
|
||||
# The following empty line is needed to separate the header from the
|
||||
# body of the message. Otherwise programs like sendmail will skip
|
||||
# everything up to the first empty line (or wont send an email at
|
||||
# all) and will still exit with exit code 0
|
||||
printf "\n"
|
||||
fi
|
||||
# output message
|
||||
cat "${pathname}"
|
||||
@ -432,8 +441,9 @@ zed_notify_slack_webhook()
|
||||
"${pathname}")"
|
||||
|
||||
# Construct the JSON message for posting.
|
||||
# shellcheck disable=SC2016
|
||||
#
|
||||
msg_json="$(printf '{"text": "*%s*\\n%s"}' "${subject}" "${msg_body}" )"
|
||||
msg_json="$(printf '{"text": "*%s*\\n```%s```"}' "${subject}" "${msg_body}" )"
|
||||
|
||||
# Send the POST request and check for errors.
|
||||
#
|
||||
@ -624,6 +634,97 @@ zed_notify_ntfy()
|
||||
}
|
||||
|
||||
|
||||
# zed_notify_gotify (subject, pathname)
|
||||
#
|
||||
# Send a notification via Gotify <https://gotify.net/>.
|
||||
# The Gotify URL (ZED_GOTIFY_URL) defines a self-hosted Gotify location.
|
||||
# The Gotify application token (ZED_GOTIFY_APPTOKEN) defines a
|
||||
# Gotify application token which is associated with a message.
|
||||
# The optional Gotify priority value (ZED_GOTIFY_PRIORITY) overrides the
|
||||
# default or configured priority at the Gotify server for the application.
|
||||
#
|
||||
# Requires curl and sed executables to be installed in the standard PATH.
|
||||
#
|
||||
# References
|
||||
# https://gotify.net/docs/index
|
||||
#
|
||||
# Arguments
|
||||
# subject: notification subject
|
||||
# pathname: pathname containing the notification message (OPTIONAL)
|
||||
#
|
||||
# Globals
|
||||
# ZED_GOTIFY_URL
|
||||
# ZED_GOTIFY_APPTOKEN
|
||||
# ZED_GOTIFY_PRIORITY
|
||||
#
|
||||
# Return
|
||||
# 0: notification sent
|
||||
# 1: notification failed
|
||||
# 2: not configured
|
||||
#
|
||||
zed_notify_gotify()
|
||||
{
|
||||
local subject="$1"
|
||||
local pathname="${2:-"/dev/null"}"
|
||||
local msg_body
|
||||
local msg_out
|
||||
local msg_err
|
||||
|
||||
[ -n "${ZED_GOTIFY_URL}" ] && [ -n "${ZED_GOTIFY_APPTOKEN}" ] || return 2
|
||||
local url="${ZED_GOTIFY_URL}/message?token=${ZED_GOTIFY_APPTOKEN}"
|
||||
|
||||
if [ ! -r "${pathname}" ]; then
|
||||
zed_log_err "gotify cannot read \"${pathname}\""
|
||||
return 1
|
||||
fi
|
||||
|
||||
zed_check_cmd "curl" "sed" || return 1
|
||||
|
||||
# Read the message body in.
|
||||
#
|
||||
msg_body="$(cat "${pathname}")"
|
||||
|
||||
if [ -z "${msg_body}" ]
|
||||
then
|
||||
msg_body=$subject
|
||||
subject=""
|
||||
fi
|
||||
|
||||
# Send the POST request and check for errors.
|
||||
#
|
||||
if [ -n "${ZED_GOTIFY_PRIORITY}" ]; then
|
||||
msg_out="$( \
|
||||
curl \
|
||||
--form-string "title=${subject}" \
|
||||
--form-string "message=${msg_body}" \
|
||||
--form-string "priority=${ZED_GOTIFY_PRIORITY}" \
|
||||
"${url}" \
|
||||
2>/dev/null \
|
||||
)"; rv=$?
|
||||
else
|
||||
msg_out="$( \
|
||||
curl \
|
||||
--form-string "title=${subject}" \
|
||||
--form-string "message=${msg_body}" \
|
||||
"${url}" \
|
||||
2>/dev/null \
|
||||
)"; rv=$?
|
||||
fi
|
||||
|
||||
if [ "${rv}" -ne 0 ]; then
|
||||
zed_log_err "curl exit=${rv}"
|
||||
return 1
|
||||
fi
|
||||
msg_err="$(echo "${msg_out}" \
|
||||
| sed -n -e 's/.*"errors" *:.*\[\(.*\)\].*/\1/p')"
|
||||
if [ -n "${msg_err}" ]; then
|
||||
zed_log_err "gotify \"${msg_err}"\"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
|
||||
|
||||
# zed_rate_limit (tag, [interval])
|
||||
#
|
||||
|
@ -148,6 +148,13 @@ ZED_SYSLOG_SUBCLASS_EXCLUDE="history_event"
|
||||
# supports slot power control via sysfs.
|
||||
#ZED_POWER_OFF_ENCLOSURE_SLOT_ON_FAULT=1
|
||||
|
||||
##
|
||||
# Power off the drive's slot in the enclosure if there is a hung I/O which
|
||||
# exceeds the deadman timeout. This can help prevent a single misbehaving
|
||||
# drive from rendering a redundant pool unavailable. This assumes your drive
|
||||
# enclosure fully supports slot power control via sysfs.
|
||||
#ZED_POWER_OFF_ENCLOSURE_SLOT_ON_DEADMAN=1
|
||||
|
||||
##
|
||||
# Ntfy topic
|
||||
# This defines which topic will receive the ntfy notification.
|
||||
@ -169,3 +176,24 @@ ZED_SYSLOG_SUBCLASS_EXCLUDE="history_event"
|
||||
# <https://docs.ntfy.sh/install/>
|
||||
# https://ntfy.sh by default; uncomment to enable an alternative service url.
|
||||
#ZED_NTFY_URL="https://ntfy.sh"
|
||||
|
||||
##
|
||||
# Gotify server URL
|
||||
# This defines a URL that the Gotify call will be directed toward.
|
||||
# <https://gotify.net/docs/index>
|
||||
# Disabled by default; uncomment to enable.
|
||||
#ZED_GOTIFY_URL=""
|
||||
|
||||
##
|
||||
# Gotify application token
|
||||
# This defines a Gotify application token which a message is associated with.
|
||||
# This token is generated when an application is created on the Gotify server.
|
||||
# Disabled by default; uncomment to enable.
|
||||
#ZED_GOTIFY_APPTOKEN=""
|
||||
|
||||
##
|
||||
# Gotify priority (optional)
|
||||
# If defined, this overrides the default priority of the
|
||||
# Gotify application associated with ZED_GOTIFY_APPTOKEN.
|
||||
# Value is an integer 0 and up.
|
||||
#ZED_GOTIFY_PRIORITY=""
|
||||
|
@ -1,3 +1,4 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* This file is part of the ZFS Event Daemon (ZED).
|
||||
*
|
||||
|
@ -1,3 +1,4 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* This file is part of the ZFS Event Daemon (ZED).
|
||||
*
|
||||
|
@ -1,3 +1,4 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* This file is part of the ZFS Event Daemon (ZED).
|
||||
*
|
||||
|
@ -1,3 +1,4 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
@ -139,7 +140,8 @@ dev_event_nvlist(struct udev_device *dev)
|
||||
* is /dev/sda.
|
||||
*/
|
||||
struct udev_device *parent_dev = udev_device_get_parent(dev);
|
||||
if ((value = udev_device_get_sysattr_value(parent_dev, "size"))
|
||||
if (parent_dev != NULL &&
|
||||
(value = udev_device_get_sysattr_value(parent_dev, "size"))
|
||||
!= NULL) {
|
||||
uint64_t numval = DEV_BSIZE;
|
||||
|
||||
|
@ -1,3 +1,4 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
|
@ -1,3 +1,4 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* This file is part of the ZFS Event Daemon (ZED).
|
||||
*
|
||||
@ -109,7 +110,7 @@ zed_event_fini(struct zed_conf *zcp)
|
||||
static void
|
||||
_bump_event_queue_length(void)
|
||||
{
|
||||
int zzlm = -1, wr;
|
||||
int zzlm, wr;
|
||||
char qlen_buf[12] = {0}; /* parameter is int => max "-2147483647\n" */
|
||||
long int qlen, orig_qlen;
|
||||
|
||||
|
@ -1,3 +1,4 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* This file is part of the ZFS Event Daemon (ZED).
|
||||
*
|
||||
|
@ -1,3 +1,4 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* This file is part of the ZFS Event Daemon (ZED).
|
||||
*
|
||||
@ -195,37 +196,29 @@ _nop(int sig)
|
||||
(void) sig;
|
||||
}
|
||||
|
||||
static void *
|
||||
_reap_children(void *arg)
|
||||
static void
|
||||
wait_for_children(boolean_t do_pause, boolean_t wait)
|
||||
{
|
||||
(void) arg;
|
||||
struct launched_process_node node, *pnode;
|
||||
pid_t pid;
|
||||
int status;
|
||||
struct rusage usage;
|
||||
struct sigaction sa = {};
|
||||
|
||||
(void) sigfillset(&sa.sa_mask);
|
||||
(void) sigdelset(&sa.sa_mask, SIGCHLD);
|
||||
(void) pthread_sigmask(SIG_SETMASK, &sa.sa_mask, NULL);
|
||||
|
||||
(void) sigemptyset(&sa.sa_mask);
|
||||
sa.sa_handler = _nop;
|
||||
sa.sa_flags = SA_NOCLDSTOP;
|
||||
(void) sigaction(SIGCHLD, &sa, NULL);
|
||||
int status;
|
||||
struct launched_process_node node, *pnode;
|
||||
|
||||
for (_reap_children_stop = B_FALSE; !_reap_children_stop; ) {
|
||||
(void) pthread_mutex_lock(&_launched_processes_lock);
|
||||
pid = wait4(0, &status, WNOHANG, &usage);
|
||||
|
||||
pid = wait4(0, &status, wait ? 0 : WNOHANG, &usage);
|
||||
if (pid == 0 || pid == (pid_t)-1) {
|
||||
(void) pthread_mutex_unlock(&_launched_processes_lock);
|
||||
if (pid == 0 || errno == ECHILD)
|
||||
pause();
|
||||
else if (errno != EINTR)
|
||||
if ((pid == 0) || (errno == ECHILD)) {
|
||||
if (do_pause)
|
||||
pause();
|
||||
} else if (errno != EINTR)
|
||||
zed_log_msg(LOG_WARNING,
|
||||
"Failed to wait for children: %s",
|
||||
strerror(errno));
|
||||
if (!do_pause)
|
||||
return;
|
||||
|
||||
} else {
|
||||
memset(&node, 0, sizeof (node));
|
||||
node.pid = pid;
|
||||
@ -277,6 +270,25 @@ _reap_children(void *arg)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
static void *
|
||||
_reap_children(void *arg)
|
||||
{
|
||||
(void) arg;
|
||||
struct sigaction sa = {};
|
||||
|
||||
(void) sigfillset(&sa.sa_mask);
|
||||
(void) sigdelset(&sa.sa_mask, SIGCHLD);
|
||||
(void) pthread_sigmask(SIG_SETMASK, &sa.sa_mask, NULL);
|
||||
|
||||
(void) sigemptyset(&sa.sa_mask);
|
||||
sa.sa_handler = _nop;
|
||||
sa.sa_flags = SA_NOCLDSTOP;
|
||||
(void) sigaction(SIGCHLD, &sa, NULL);
|
||||
|
||||
wait_for_children(B_TRUE, B_FALSE);
|
||||
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
@ -305,6 +317,45 @@ zed_exec_fini(void)
|
||||
_reap_children_tid = (pthread_t)-1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check if the zedlet name indicates if it is a synchronous zedlet
|
||||
*
|
||||
* Synchronous zedlets have a "-sync-" immediately following the event name in
|
||||
* their zedlet filename, like:
|
||||
*
|
||||
* EVENT_NAME-sync-ZEDLETNAME.sh
|
||||
*
|
||||
* For example, if you wanted a synchronous statechange script:
|
||||
*
|
||||
* statechange-sync-myzedlet.sh
|
||||
*
|
||||
* Synchronous zedlets are guaranteed to be the only zedlet running. No other
|
||||
* zedlets may run in parallel with a synchronous zedlet. A synchronous
|
||||
* zedlet will wait for all previously spawned zedlets to finish before running.
|
||||
* Users should be careful to only use synchronous zedlets when needed, since
|
||||
* they decrease parallelism.
|
||||
*/
|
||||
static boolean_t
|
||||
zedlet_is_sync(const char *zedlet, const char *event)
|
||||
{
|
||||
const char *sync_str = "-sync-";
|
||||
size_t sync_str_len;
|
||||
size_t zedlet_len;
|
||||
size_t event_len;
|
||||
|
||||
sync_str_len = strlen(sync_str);
|
||||
zedlet_len = strlen(zedlet);
|
||||
event_len = strlen(event);
|
||||
|
||||
if (event_len + sync_str_len >= zedlet_len)
|
||||
return (B_FALSE);
|
||||
|
||||
if (strncmp(&zedlet[event_len], sync_str, sync_str_len) == 0)
|
||||
return (B_TRUE);
|
||||
|
||||
return (B_FALSE);
|
||||
}
|
||||
|
||||
/*
|
||||
* Process the event [eid] by synchronously invoking all zedlets with a
|
||||
* matching class prefix.
|
||||
@ -367,9 +418,28 @@ zed_exec_process(uint64_t eid, const char *class, const char *subclass,
|
||||
z = zed_strings_next(zcp->zedlets)) {
|
||||
for (csp = class_strings; *csp; csp++) {
|
||||
n = strlen(*csp);
|
||||
if ((strncmp(z, *csp, n) == 0) && !isalpha(z[n]))
|
||||
if ((strncmp(z, *csp, n) == 0) && !isalpha(z[n])) {
|
||||
boolean_t is_sync = zedlet_is_sync(z, *csp);
|
||||
|
||||
if (is_sync) {
|
||||
/*
|
||||
* Wait for previous zedlets to
|
||||
* finish
|
||||
*/
|
||||
wait_for_children(B_FALSE, B_TRUE);
|
||||
}
|
||||
|
||||
_zed_exec_fork_child(eid, zcp->zedlet_dir,
|
||||
z, e, zcp->zevent_fd, zcp->do_foreground);
|
||||
|
||||
if (is_sync) {
|
||||
/*
|
||||
* Wait for sync zedlet we just launched
|
||||
* to finish.
|
||||
*/
|
||||
wait_for_children(B_FALSE, B_TRUE);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
free(e);
|
||||
|
@ -1,3 +1,4 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* This file is part of the ZFS Event Daemon (ZED).
|
||||
*
|
||||
|
@ -1,3 +1,4 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* This file is part of the ZFS Event Daemon (ZED).
|
||||
*
|
||||
|
@ -1,3 +1,4 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* This file is part of the ZFS Event Daemon (ZED).
|
||||
*
|
||||
|
@ -1,3 +1,4 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* This file is part of the ZFS Event Daemon (ZED).
|
||||
*
|
||||
|
@ -1,3 +1,4 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* This file is part of the ZFS Event Daemon (ZED).
|
||||
*
|
||||
|
@ -1,3 +1,4 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* This file is part of the ZFS Event Daemon (ZED).
|
||||
*
|
||||
|
@ -1,3 +1,4 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* This file is part of the ZFS Event Daemon (ZED).
|
||||
*
|
||||
|
@ -1,3 +1,4 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
|
@ -1,3 +1,4 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,3 +1,4 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
|
@ -1,3 +1,4 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
|
@ -1,3 +1,4 @@
|
||||
// SPDX-License-Identifier: CDDL-1.0
|
||||
/*
|
||||
* CDDL HEADER START
|
||||
*
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user