mirror of
https://github.com/openzfs/zfs.git
synced 2025-10-02 04:33:38 +00:00
Compare commits
22 Commits
master
...
zfs-2.3.0-
Author | SHA1 | Date | |
---|---|---|---|
![]() |
0409c47fe0 | ||
![]() |
b5a3825244 | ||
![]() |
77df762a1b | ||
![]() |
56871e465a | ||
![]() |
c645b07eaa | ||
![]() |
5bc27acf51 | ||
![]() |
7f830d783b | ||
![]() |
58162960a1 | ||
![]() |
666903610d | ||
![]() |
26ecd8b993 | ||
![]() |
774dcba86d | ||
![]() |
09f6b2ebe3 | ||
![]() |
2609d93b65 | ||
![]() |
10f46d2aba | ||
![]() |
0df10dc911 | ||
![]() |
0fbe9d352c | ||
![]() |
84f44ec07f | ||
![]() |
fc9608e2e6 | ||
![]() |
d32c05949a | ||
![]() |
1ebb6b866f | ||
![]() |
f019b445f3 | ||
![]() |
03822a61be |
2
.github/workflows/codeql.yml
vendored
2
.github/workflows/codeql.yml
vendored
@ -11,7 +11,7 @@ concurrency:
|
|||||||
jobs:
|
jobs:
|
||||||
analyze:
|
analyze:
|
||||||
name: Analyze
|
name: Analyze
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-22.04
|
||||||
permissions:
|
permissions:
|
||||||
actions: read
|
actions: read
|
||||||
contents: read
|
contents: read
|
||||||
|
16
.github/workflows/scripts/qemu-1-setup.sh
vendored
16
.github/workflows/scripts/qemu-1-setup.sh
vendored
@ -18,19 +18,21 @@ ssh-keygen -t ed25519 -f ~/.ssh/id_ed25519 -q -N ""
|
|||||||
|
|
||||||
# we expect RAM shortage
|
# we expect RAM shortage
|
||||||
cat << EOF | sudo tee /etc/ksmtuned.conf > /dev/null
|
cat << EOF | sudo tee /etc/ksmtuned.conf > /dev/null
|
||||||
|
# /etc/ksmtuned.conf - Configuration file for ksmtuned
|
||||||
# https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/7/html/virtualization_tuning_and_optimization_guide/chap-ksm
|
# https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/7/html/virtualization_tuning_and_optimization_guide/chap-ksm
|
||||||
KSM_MONITOR_INTERVAL=60
|
KSM_MONITOR_INTERVAL=60
|
||||||
|
|
||||||
# Millisecond sleep between ksm scans for 16Gb server.
|
# Millisecond sleep between ksm scans for 16Gb server.
|
||||||
# Smaller servers sleep more, bigger sleep less.
|
# Smaller servers sleep more, bigger sleep less.
|
||||||
KSM_SLEEP_MSEC=10
|
KSM_SLEEP_MSEC=30
|
||||||
KSM_NPAGES_BOOST=300
|
|
||||||
KSM_NPAGES_DECAY=-50
|
|
||||||
KSM_NPAGES_MIN=64
|
|
||||||
KSM_NPAGES_MAX=2048
|
|
||||||
|
|
||||||
KSM_THRES_COEF=25
|
KSM_NPAGES_BOOST=0
|
||||||
KSM_THRES_CONST=2048
|
KSM_NPAGES_DECAY=0
|
||||||
|
KSM_NPAGES_MIN=1000
|
||||||
|
KSM_NPAGES_MAX=25000
|
||||||
|
|
||||||
|
KSM_THRES_COEF=80
|
||||||
|
KSM_THRES_CONST=8192
|
||||||
|
|
||||||
LOGFILE=/var/log/ksmtuned.log
|
LOGFILE=/var/log/ksmtuned.log
|
||||||
DEBUG=1
|
DEBUG=1
|
||||||
|
32
.github/workflows/scripts/qemu-2-start.sh
vendored
32
.github/workflows/scripts/qemu-2-start.sh
vendored
@ -14,7 +14,7 @@ OSv=$OS
|
|||||||
|
|
||||||
# compressed with .zst extension
|
# compressed with .zst extension
|
||||||
REPO="https://github.com/mcmilk/openzfs-freebsd-images"
|
REPO="https://github.com/mcmilk/openzfs-freebsd-images"
|
||||||
FREEBSD="$REPO/releases/download/v2024-09-16"
|
FREEBSD="$REPO/releases/download/v2024-10-05"
|
||||||
URLzs=""
|
URLzs=""
|
||||||
|
|
||||||
# Ubuntu mirrors
|
# Ubuntu mirrors
|
||||||
@ -62,33 +62,45 @@ case "$OS" in
|
|||||||
OSv="fedora39"
|
OSv="fedora39"
|
||||||
URL="https://download.fedoraproject.org/pub/fedora/linux/releases/40/Cloud/x86_64/images/Fedora-Cloud-Base-Generic.x86_64-40-1.14.qcow2"
|
URL="https://download.fedoraproject.org/pub/fedora/linux/releases/40/Cloud/x86_64/images/Fedora-Cloud-Base-Generic.x86_64-40-1.14.qcow2"
|
||||||
;;
|
;;
|
||||||
freebsd13r)
|
freebsd13-3r)
|
||||||
|
OSNAME="FreeBSD 13.3-RELEASE"
|
||||||
|
OSv="freebsd13.0"
|
||||||
|
URLzs="$FREEBSD/amd64-freebsd-13.3-RELEASE.qcow2.zst"
|
||||||
|
BASH="/usr/local/bin/bash"
|
||||||
|
NIC="rtl8139"
|
||||||
|
;;
|
||||||
|
freebsd13-4r)
|
||||||
OSNAME="FreeBSD 13.4-RELEASE"
|
OSNAME="FreeBSD 13.4-RELEASE"
|
||||||
OSv="freebsd13.0"
|
OSv="freebsd13.0"
|
||||||
URLzs="$FREEBSD/amd64-freebsd-13.4-RELEASE.qcow2.zst"
|
URLzs="$FREEBSD/amd64-freebsd-13.4-RELEASE.qcow2.zst"
|
||||||
BASH="/usr/local/bin/bash"
|
BASH="/usr/local/bin/bash"
|
||||||
NIC="rtl8139"
|
NIC="rtl8139"
|
||||||
;;
|
;;
|
||||||
freebsd13)
|
freebsd14-0r)
|
||||||
OSNAME="FreeBSD 13.4-STABLE"
|
OSNAME="FreeBSD 14.0-RELEASE"
|
||||||
OSv="freebsd13.0"
|
OSv="freebsd14.0"
|
||||||
URLzs="$FREEBSD/amd64-freebsd-13.4-STABLE.qcow2.zst"
|
URLzs="$FREEBSD/amd64-freebsd-14.0-RELEASE.qcow2.zst"
|
||||||
BASH="/usr/local/bin/bash"
|
BASH="/usr/local/bin/bash"
|
||||||
NIC="rtl8139"
|
|
||||||
;;
|
;;
|
||||||
freebsd14r)
|
freebsd14-1r)
|
||||||
OSNAME="FreeBSD 14.1-RELEASE"
|
OSNAME="FreeBSD 14.1-RELEASE"
|
||||||
OSv="freebsd14.0"
|
OSv="freebsd14.0"
|
||||||
URLzs="$FREEBSD/amd64-freebsd-14.1-RELEASE.qcow2.zst"
|
URLzs="$FREEBSD/amd64-freebsd-14.1-RELEASE.qcow2.zst"
|
||||||
BASH="/usr/local/bin/bash"
|
BASH="/usr/local/bin/bash"
|
||||||
;;
|
;;
|
||||||
freebsd14)
|
freebsd13-4s)
|
||||||
|
OSNAME="FreeBSD 13.4-STABLE"
|
||||||
|
OSv="freebsd13.0"
|
||||||
|
URLzs="$FREEBSD/amd64-freebsd-13.4-STABLE.qcow2.zst"
|
||||||
|
BASH="/usr/local/bin/bash"
|
||||||
|
;;
|
||||||
|
freebsd14-1s)
|
||||||
OSNAME="FreeBSD 14.1-STABLE"
|
OSNAME="FreeBSD 14.1-STABLE"
|
||||||
OSv="freebsd14.0"
|
OSv="freebsd14.0"
|
||||||
URLzs="$FREEBSD/amd64-freebsd-14.1-STABLE.qcow2.zst"
|
URLzs="$FREEBSD/amd64-freebsd-14.1-STABLE.qcow2.zst"
|
||||||
BASH="/usr/local/bin/bash"
|
BASH="/usr/local/bin/bash"
|
||||||
;;
|
;;
|
||||||
freebsd15)
|
freebsd15-0c)
|
||||||
OSNAME="FreeBSD 15.0-CURRENT"
|
OSNAME="FreeBSD 15.0-CURRENT"
|
||||||
OSv="freebsd14.0"
|
OSv="freebsd14.0"
|
||||||
URLzs="$FREEBSD/amd64-freebsd-15.0-CURRENT.qcow2.zst"
|
URLzs="$FREEBSD/amd64-freebsd-15.0-CURRENT.qcow2.zst"
|
||||||
|
17
.github/workflows/scripts/qemu-5-setup.sh
vendored
17
.github/workflows/scripts/qemu-5-setup.sh
vendored
@ -14,17 +14,21 @@ PID=$(pidof /usr/bin/qemu-system-x86_64)
|
|||||||
tail --pid=$PID -f /dev/null
|
tail --pid=$PID -f /dev/null
|
||||||
sudo virsh undefine openzfs
|
sudo virsh undefine openzfs
|
||||||
|
|
||||||
# definitions of per operating system
|
# default values per test vm:
|
||||||
|
VMs=2
|
||||||
|
CPU=2
|
||||||
|
|
||||||
|
# cpu pinning
|
||||||
|
CPUSET=("0,1" "2,3")
|
||||||
|
|
||||||
case "$OS" in
|
case "$OS" in
|
||||||
freebsd*)
|
freebsd*)
|
||||||
VMs=2
|
# FreeBSD can't be optimized via ksmtuned
|
||||||
CPU=3
|
|
||||||
RAM=6
|
RAM=6
|
||||||
;;
|
;;
|
||||||
*)
|
*)
|
||||||
VMs=2
|
# Linux can be optimized via ksmtuned
|
||||||
CPU=3
|
RAM=8
|
||||||
RAM=7
|
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
|
|
||||||
@ -73,6 +77,7 @@ EOF
|
|||||||
--cpu host-passthrough \
|
--cpu host-passthrough \
|
||||||
--virt-type=kvm --hvm \
|
--virt-type=kvm --hvm \
|
||||||
--vcpus=$CPU,sockets=1 \
|
--vcpus=$CPU,sockets=1 \
|
||||||
|
--cpuset=${CPUSET[$((i-1))]} \
|
||||||
--memory $((1024*RAM)) \
|
--memory $((1024*RAM)) \
|
||||||
--memballoon model=virtio \
|
--memballoon model=virtio \
|
||||||
--graphics none \
|
--graphics none \
|
||||||
|
@ -11,12 +11,10 @@ function output() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
function outfile() {
|
function outfile() {
|
||||||
test -s "$1" || return
|
|
||||||
cat "$1" >> "out-$logfile.md"
|
cat "$1" >> "out-$logfile.md"
|
||||||
}
|
}
|
||||||
|
|
||||||
function outfile_plain() {
|
function outfile_plain() {
|
||||||
test -s "$1" || return
|
|
||||||
output "<pre>"
|
output "<pre>"
|
||||||
cat "$1" >> "out-$logfile.md"
|
cat "$1" >> "out-$logfile.md"
|
||||||
output "</pre>"
|
output "</pre>"
|
||||||
@ -45,6 +43,8 @@ if [ ! -f out-1.md ]; then
|
|||||||
tar xf "$tarfile"
|
tar xf "$tarfile"
|
||||||
test -s env.txt || continue
|
test -s env.txt || continue
|
||||||
source env.txt
|
source env.txt
|
||||||
|
# when uname.txt is there, the other files are also ok
|
||||||
|
test -s uname.txt || continue
|
||||||
output "\n## Functional Tests: $OSNAME\n"
|
output "\n## Functional Tests: $OSNAME\n"
|
||||||
outfile_plain uname.txt
|
outfile_plain uname.txt
|
||||||
outfile_plain summary.txt
|
outfile_plain summary.txt
|
||||||
|
14
.github/workflows/zfs-qemu.yml
vendored
14
.github/workflows/zfs-qemu.yml
vendored
@ -22,8 +22,8 @@ jobs:
|
|||||||
- name: Generate OS config and CI type
|
- name: Generate OS config and CI type
|
||||||
id: os
|
id: os
|
||||||
run: |
|
run: |
|
||||||
FULL_OS='["almalinux8", "almalinux9", "centos-stream9", "debian11", "debian12", "fedora39", "fedora40", "freebsd13", "freebsd13r", "freebsd14", "freebsd14r", "ubuntu20", "ubuntu22", "ubuntu24"]'
|
FULL_OS='["almalinux8", "almalinux9", "centos-stream9", "debian11", "debian12", "fedora39", "fedora40", "freebsd13-4r", "freebsd14-0r", "freebsd14-1s", "ubuntu20", "ubuntu22", "ubuntu24"]'
|
||||||
QUICK_OS='["almalinux8", "almalinux9", "debian12", "fedora40", "freebsd13", "freebsd14", "ubuntu24"]'
|
QUICK_OS='["almalinux8", "almalinux9", "debian12", "fedora40", "freebsd13-3r", "freebsd14-1r", "ubuntu24"]'
|
||||||
# determine CI type when running on PR
|
# determine CI type when running on PR
|
||||||
ci_type="full"
|
ci_type="full"
|
||||||
if ${{ github.event_name == 'pull_request' }}; then
|
if ${{ github.event_name == 'pull_request' }}; then
|
||||||
@ -46,10 +46,12 @@ jobs:
|
|||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
# all:
|
# rhl: almalinux8, almalinux9, centos-stream9, fedora39, fedora40
|
||||||
# os: [almalinux8, almalinux9, archlinux, centos-stream9, fedora39, fedora40, debian11, debian12, freebsd13, freebsd13r, freebsd14, freebsd14r, freebsd15, ubuntu20, ubuntu22, ubuntu24]
|
# debian: debian11, debian12, ubuntu20, ubuntu22, ubuntu24
|
||||||
# openzfs:
|
# misc: archlinux, tumbleweed
|
||||||
# os: [almalinux8, almalinux9, centos-stream9, debian11, debian12, fedora39, fedora40, freebsd13, freebsd13r, freebsd14, freebsd14r, ubuntu20, ubuntu22, ubuntu24]
|
# FreeBSD Release: freebsd13-3r, freebsd13-4r, freebsd14-0r, freebsd14-1r
|
||||||
|
# FreeBSD Stable: freebsd13-4s, freebsd14-1s
|
||||||
|
# FreeBSD Current: freebsd15-0c
|
||||||
os: ${{ fromJson(needs.test-config.outputs.test_os) }}
|
os: ${{ fromJson(needs.test-config.outputs.test_os) }}
|
||||||
runs-on: ubuntu-24.04
|
runs-on: ubuntu-24.04
|
||||||
steps:
|
steps:
|
||||||
|
2
META
2
META
@ -2,7 +2,7 @@ Meta: 1
|
|||||||
Name: zfs
|
Name: zfs
|
||||||
Branch: 1.0
|
Branch: 1.0
|
||||||
Version: 2.3.0
|
Version: 2.3.0
|
||||||
Release: rc1
|
Release: rc2
|
||||||
Release-Tags: relext
|
Release-Tags: relext
|
||||||
License: CDDL
|
License: CDDL
|
||||||
Author: OpenZFS
|
Author: OpenZFS
|
||||||
|
@ -1131,7 +1131,7 @@ dump_zap(objset_t *os, uint64_t object, void *data, size_t size)
|
|||||||
!!(zap_getflags(zc.zc_zap) & ZAP_FLAG_UINT64_KEY);
|
!!(zap_getflags(zc.zc_zap) & ZAP_FLAG_UINT64_KEY);
|
||||||
|
|
||||||
if (key64)
|
if (key64)
|
||||||
(void) printf("\t\t0x%010lx = ",
|
(void) printf("\t\t0x%010" PRIu64 "x = ",
|
||||||
*(uint64_t *)attrp->za_name);
|
*(uint64_t *)attrp->za_name);
|
||||||
else
|
else
|
||||||
(void) printf("\t\t%s = ", attrp->za_name);
|
(void) printf("\t\t%s = ", attrp->za_name);
|
||||||
|
@ -2162,6 +2162,7 @@ zfs_do_get(int argc, char **argv)
|
|||||||
cb.cb_type = ZFS_TYPE_DATASET;
|
cb.cb_type = ZFS_TYPE_DATASET;
|
||||||
|
|
||||||
struct option long_options[] = {
|
struct option long_options[] = {
|
||||||
|
{"json", no_argument, NULL, 'j'},
|
||||||
{"json-int", no_argument, NULL, ZFS_OPTION_JSON_NUMS_AS_INT},
|
{"json-int", no_argument, NULL, ZFS_OPTION_JSON_NUMS_AS_INT},
|
||||||
{0, 0, 0, 0}
|
{0, 0, 0, 0}
|
||||||
};
|
};
|
||||||
@ -3852,6 +3853,7 @@ zfs_do_list(int argc, char **argv)
|
|||||||
nvlist_t *data = NULL;
|
nvlist_t *data = NULL;
|
||||||
|
|
||||||
struct option long_options[] = {
|
struct option long_options[] = {
|
||||||
|
{"json", no_argument, NULL, 'j'},
|
||||||
{"json-int", no_argument, NULL, ZFS_OPTION_JSON_NUMS_AS_INT},
|
{"json-int", no_argument, NULL, ZFS_OPTION_JSON_NUMS_AS_INT},
|
||||||
{0, 0, 0, 0}
|
{0, 0, 0, 0}
|
||||||
};
|
};
|
||||||
@ -7436,9 +7438,15 @@ share_mount(int op, int argc, char **argv)
|
|||||||
uint_t nthr;
|
uint_t nthr;
|
||||||
jsobj = data = item = NULL;
|
jsobj = data = item = NULL;
|
||||||
|
|
||||||
|
struct option long_options[] = {
|
||||||
|
{"json", no_argument, NULL, 'j'},
|
||||||
|
{0, 0, 0, 0}
|
||||||
|
};
|
||||||
|
|
||||||
/* check options */
|
/* check options */
|
||||||
while ((c = getopt(argc, argv, op == OP_MOUNT ? ":ajRlvo:Of" : "al"))
|
while ((c = getopt_long(argc, argv,
|
||||||
!= -1) {
|
op == OP_MOUNT ? ":ajRlvo:Of" : "al",
|
||||||
|
op == OP_MOUNT ? long_options : NULL, NULL)) != -1) {
|
||||||
switch (c) {
|
switch (c) {
|
||||||
case 'a':
|
case 'a':
|
||||||
do_all = 1;
|
do_all = 1;
|
||||||
@ -8374,8 +8382,14 @@ zfs_do_channel_program(int argc, char **argv)
|
|||||||
boolean_t sync_flag = B_TRUE, json_output = B_FALSE;
|
boolean_t sync_flag = B_TRUE, json_output = B_FALSE;
|
||||||
zpool_handle_t *zhp;
|
zpool_handle_t *zhp;
|
||||||
|
|
||||||
|
struct option long_options[] = {
|
||||||
|
{"json", no_argument, NULL, 'j'},
|
||||||
|
{0, 0, 0, 0}
|
||||||
|
};
|
||||||
|
|
||||||
/* check options */
|
/* check options */
|
||||||
while ((c = getopt(argc, argv, "nt:m:j")) != -1) {
|
while ((c = getopt_long(argc, argv, "nt:m:j", long_options,
|
||||||
|
NULL)) != -1) {
|
||||||
switch (c) {
|
switch (c) {
|
||||||
case 't':
|
case 't':
|
||||||
case 'm': {
|
case 'm': {
|
||||||
@ -9083,7 +9097,13 @@ zfs_do_version(int argc, char **argv)
|
|||||||
int c;
|
int c;
|
||||||
nvlist_t *jsobj = NULL, *zfs_ver = NULL;
|
nvlist_t *jsobj = NULL, *zfs_ver = NULL;
|
||||||
boolean_t json = B_FALSE;
|
boolean_t json = B_FALSE;
|
||||||
while ((c = getopt(argc, argv, "j")) != -1) {
|
|
||||||
|
struct option long_options[] = {
|
||||||
|
{"json", no_argument, NULL, 'j'},
|
||||||
|
{0, 0, 0, 0}
|
||||||
|
};
|
||||||
|
|
||||||
|
while ((c = getopt_long(argc, argv, "j", long_options, NULL)) != -1) {
|
||||||
switch (c) {
|
switch (c) {
|
||||||
case 'j':
|
case 'j':
|
||||||
json = B_TRUE;
|
json = B_TRUE;
|
||||||
@ -9187,7 +9207,7 @@ main(int argc, char **argv)
|
|||||||
* Special case '-V|--version'
|
* Special case '-V|--version'
|
||||||
*/
|
*/
|
||||||
if ((strcmp(cmdname, "-V") == 0) || (strcmp(cmdname, "--version") == 0))
|
if ((strcmp(cmdname, "-V") == 0) || (strcmp(cmdname, "--version") == 0))
|
||||||
return (zfs_do_version(argc, argv));
|
return (zfs_version_print() != 0);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Special case 'help'
|
* Special case 'help'
|
||||||
|
@ -7340,6 +7340,7 @@ zpool_do_list(int argc, char **argv)
|
|||||||
current_prop_type = ZFS_TYPE_POOL;
|
current_prop_type = ZFS_TYPE_POOL;
|
||||||
|
|
||||||
struct option long_options[] = {
|
struct option long_options[] = {
|
||||||
|
{"json", no_argument, NULL, 'j'},
|
||||||
{"json-int", no_argument, NULL, ZPOOL_OPTION_JSON_NUMS_AS_INT},
|
{"json-int", no_argument, NULL, ZPOOL_OPTION_JSON_NUMS_AS_INT},
|
||||||
{"json-pool-key-guid", no_argument, NULL,
|
{"json-pool-key-guid", no_argument, NULL,
|
||||||
ZPOOL_OPTION_POOL_KEY_GUID},
|
ZPOOL_OPTION_POOL_KEY_GUID},
|
||||||
@ -9224,6 +9225,12 @@ vdev_stats_nvlist(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t *nv,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (cb->cb_print_dio_verify) {
|
||||||
|
nice_num_str_nvlist(vds, "dio_verify_errors",
|
||||||
|
vs->vs_dio_verify_errors, cb->cb_literal,
|
||||||
|
cb->cb_json_as_int, ZFS_NICENUM_1024);
|
||||||
|
}
|
||||||
|
|
||||||
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
|
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
|
||||||
¬present) == 0) {
|
¬present) == 0) {
|
||||||
nice_num_str_nvlist(vds, ZPOOL_CONFIG_NOT_PRESENT,
|
nice_num_str_nvlist(vds, ZPOOL_CONFIG_NOT_PRESENT,
|
||||||
@ -10975,6 +10982,7 @@ zpool_do_status(int argc, char **argv)
|
|||||||
|
|
||||||
struct option long_options[] = {
|
struct option long_options[] = {
|
||||||
{"power", no_argument, NULL, ZPOOL_OPTION_POWER},
|
{"power", no_argument, NULL, ZPOOL_OPTION_POWER},
|
||||||
|
{"json", no_argument, NULL, 'j'},
|
||||||
{"json-int", no_argument, NULL, ZPOOL_OPTION_JSON_NUMS_AS_INT},
|
{"json-int", no_argument, NULL, ZPOOL_OPTION_JSON_NUMS_AS_INT},
|
||||||
{"json-flat-vdevs", no_argument, NULL,
|
{"json-flat-vdevs", no_argument, NULL,
|
||||||
ZPOOL_OPTION_JSON_FLAT_VDEVS},
|
ZPOOL_OPTION_JSON_FLAT_VDEVS},
|
||||||
@ -12583,6 +12591,7 @@ zpool_do_get(int argc, char **argv)
|
|||||||
current_prop_type = cb.cb_type;
|
current_prop_type = cb.cb_type;
|
||||||
|
|
||||||
struct option long_options[] = {
|
struct option long_options[] = {
|
||||||
|
{"json", no_argument, NULL, 'j'},
|
||||||
{"json-int", no_argument, NULL, ZPOOL_OPTION_JSON_NUMS_AS_INT},
|
{"json-int", no_argument, NULL, ZPOOL_OPTION_JSON_NUMS_AS_INT},
|
||||||
{"json-pool-key-guid", no_argument, NULL,
|
{"json-pool-key-guid", no_argument, NULL,
|
||||||
ZPOOL_OPTION_POOL_KEY_GUID},
|
ZPOOL_OPTION_POOL_KEY_GUID},
|
||||||
@ -13497,7 +13506,12 @@ zpool_do_version(int argc, char **argv)
|
|||||||
int c;
|
int c;
|
||||||
nvlist_t *jsobj = NULL, *zfs_ver = NULL;
|
nvlist_t *jsobj = NULL, *zfs_ver = NULL;
|
||||||
boolean_t json = B_FALSE;
|
boolean_t json = B_FALSE;
|
||||||
while ((c = getopt(argc, argv, "j")) != -1) {
|
|
||||||
|
struct option long_options[] = {
|
||||||
|
{"json", no_argument, NULL, 'j'},
|
||||||
|
};
|
||||||
|
|
||||||
|
while ((c = getopt_long(argc, argv, "j", long_options, NULL)) != -1) {
|
||||||
switch (c) {
|
switch (c) {
|
||||||
case 'j':
|
case 'j':
|
||||||
json = B_TRUE;
|
json = B_TRUE;
|
||||||
@ -13613,7 +13627,7 @@ main(int argc, char **argv)
|
|||||||
* Special case '-V|--version'
|
* Special case '-V|--version'
|
||||||
*/
|
*/
|
||||||
if ((strcmp(cmdname, "-V") == 0) || (strcmp(cmdname, "--version") == 0))
|
if ((strcmp(cmdname, "-V") == 0) || (strcmp(cmdname, "--version") == 0))
|
||||||
return (zpool_do_version(argc, argv));
|
return (zfs_version_print() != 0);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Special case 'help'
|
* Special case 'help'
|
||||||
|
19
cmd/ztest.c
19
cmd/ztest.c
@ -6717,6 +6717,17 @@ out:
|
|||||||
*
|
*
|
||||||
* Only after a full scrub has been completed is it safe to start injecting
|
* Only after a full scrub has been completed is it safe to start injecting
|
||||||
* data corruption. See the comment in zfs_fault_inject().
|
* data corruption. See the comment in zfs_fault_inject().
|
||||||
|
*
|
||||||
|
* EBUSY may be returned for the following six cases. It's the callers
|
||||||
|
* responsibility to handle them accordingly.
|
||||||
|
*
|
||||||
|
* Current state Requested
|
||||||
|
* 1. Normal Scrub Running Normal Scrub or Error Scrub
|
||||||
|
* 2. Normal Scrub Paused Error Scrub
|
||||||
|
* 3. Normal Scrub Paused Pause Normal Scrub
|
||||||
|
* 4. Error Scrub Running Normal Scrub or Error Scrub
|
||||||
|
* 5. Error Scrub Paused Pause Error Scrub
|
||||||
|
* 6. Resilvering Anything else
|
||||||
*/
|
*/
|
||||||
static int
|
static int
|
||||||
ztest_scrub_impl(spa_t *spa)
|
ztest_scrub_impl(spa_t *spa)
|
||||||
@ -8082,8 +8093,14 @@ ztest_raidz_expand_check(spa_t *spa)
|
|||||||
(void) printf("verifying an interrupted raidz "
|
(void) printf("verifying an interrupted raidz "
|
||||||
"expansion using a pool scrub ...\n");
|
"expansion using a pool scrub ...\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Will fail here if there is non-recoverable corruption detected */
|
/* Will fail here if there is non-recoverable corruption detected */
|
||||||
VERIFY0(ztest_scrub_impl(spa));
|
int error = ztest_scrub_impl(spa);
|
||||||
|
if (error == EBUSY)
|
||||||
|
error = 0;
|
||||||
|
|
||||||
|
VERIFY0(error);
|
||||||
|
|
||||||
if (ztest_opts.zo_verbose >= 1) {
|
if (ztest_opts.zo_verbose >= 1) {
|
||||||
(void) printf("raidz expansion scrub check complete\n");
|
(void) printf("raidz expansion scrub check complete\n");
|
||||||
}
|
}
|
||||||
|
@ -58,9 +58,9 @@ deb-utils: deb-local rpm-utils-initramfs
|
|||||||
pkg1=$${name}-$${version}.$${arch}.rpm; \
|
pkg1=$${name}-$${version}.$${arch}.rpm; \
|
||||||
pkg2=libnvpair3-$${version}.$${arch}.rpm; \
|
pkg2=libnvpair3-$${version}.$${arch}.rpm; \
|
||||||
pkg3=libuutil3-$${version}.$${arch}.rpm; \
|
pkg3=libuutil3-$${version}.$${arch}.rpm; \
|
||||||
pkg4=libzfs5-$${version}.$${arch}.rpm; \
|
pkg4=libzfs6-$${version}.$${arch}.rpm; \
|
||||||
pkg5=libzpool5-$${version}.$${arch}.rpm; \
|
pkg5=libzpool6-$${version}.$${arch}.rpm; \
|
||||||
pkg6=libzfs5-devel-$${version}.$${arch}.rpm; \
|
pkg6=libzfs6-devel-$${version}.$${arch}.rpm; \
|
||||||
pkg7=$${name}-test-$${version}.$${arch}.rpm; \
|
pkg7=$${name}-test-$${version}.$${arch}.rpm; \
|
||||||
pkg8=$${name}-dracut-$${version}.noarch.rpm; \
|
pkg8=$${name}-dracut-$${version}.noarch.rpm; \
|
||||||
pkg9=$${name}-initramfs-$${version}.$${arch}.rpm; \
|
pkg9=$${name}-initramfs-$${version}.$${arch}.rpm; \
|
||||||
@ -72,7 +72,7 @@ deb-utils: deb-local rpm-utils-initramfs
|
|||||||
path_prepend=`mktemp -d /tmp/intercept.XXXXXX`; \
|
path_prepend=`mktemp -d /tmp/intercept.XXXXXX`; \
|
||||||
echo "#!$(SHELL)" > $${path_prepend}/dh_shlibdeps; \
|
echo "#!$(SHELL)" > $${path_prepend}/dh_shlibdeps; \
|
||||||
echo "`which dh_shlibdeps` -- \
|
echo "`which dh_shlibdeps` -- \
|
||||||
-xlibuutil3linux -xlibnvpair3linux -xlibzfs5linux -xlibzpool5linux" \
|
-xlibuutil3linux -xlibnvpair3linux -xlibzfs6linux -xlibzpool6linux" \
|
||||||
>> $${path_prepend}/dh_shlibdeps; \
|
>> $${path_prepend}/dh_shlibdeps; \
|
||||||
## These -x arguments are passed to dpkg-shlibdeps, which exclude the
|
## These -x arguments are passed to dpkg-shlibdeps, which exclude the
|
||||||
## Debianized packages from the auto-generated dependencies of the new debs,
|
## Debianized packages from the auto-generated dependencies of the new debs,
|
||||||
@ -93,13 +93,17 @@ debian:
|
|||||||
cp -r contrib/debian debian; chmod +x debian/rules;
|
cp -r contrib/debian debian; chmod +x debian/rules;
|
||||||
|
|
||||||
native-deb-utils: native-deb-local debian
|
native-deb-utils: native-deb-local debian
|
||||||
|
while [ -f debian/deb-build.lock ]; do sleep 1; done; \
|
||||||
|
echo "native-deb-utils" > debian/deb-build.lock; \
|
||||||
cp contrib/debian/control debian/control; \
|
cp contrib/debian/control debian/control; \
|
||||||
$(DPKGBUILD) -b -rfakeroot -us -uc;
|
$(DPKGBUILD) -b -rfakeroot -us -uc; \
|
||||||
|
$(RM) -f debian/deb-build.lock
|
||||||
|
|
||||||
native-deb-kmod: native-deb-local debian
|
native-deb-kmod: native-deb-local debian
|
||||||
|
while [ -f debian/deb-build.lock ]; do sleep 1; done; \
|
||||||
|
echo "native-deb-kmod" > debian/deb-build.lock; \
|
||||||
sh scripts/make_gitrev.sh; \
|
sh scripts/make_gitrev.sh; \
|
||||||
fakeroot debian/rules override_dh_binary-modules;
|
fakeroot debian/rules override_dh_binary-modules; \
|
||||||
|
$(RM) -f debian/deb-build.lock
|
||||||
|
|
||||||
native-deb: native-deb-utils native-deb-kmod
|
native-deb: native-deb-utils native-deb-kmod
|
||||||
|
|
||||||
.NOTPARALLEL: native-deb native-deb-utils native-deb-kmod
|
|
||||||
|
@ -33,7 +33,7 @@ AC_DEFUN([ZFS_AC_CONFIG_USER], [
|
|||||||
ZFS_AC_CONFIG_USER_MAKEDEV_IN_MKDEV
|
ZFS_AC_CONFIG_USER_MAKEDEV_IN_MKDEV
|
||||||
ZFS_AC_CONFIG_USER_ZFSEXEC
|
ZFS_AC_CONFIG_USER_ZFSEXEC
|
||||||
|
|
||||||
AC_CHECK_FUNCS([execvpe issetugid mlockall strlcat strlcpy gettid])
|
AC_CHECK_FUNCS([execvpe issetugid mlockall strerror_l strlcat strlcpy gettid])
|
||||||
|
|
||||||
AC_SUBST(RM)
|
AC_SUBST(RM)
|
||||||
])
|
])
|
||||||
|
@ -12,14 +12,14 @@ dist_noinst_DATA += %D%/openzfs-libpam-zfs.postinst
|
|||||||
dist_noinst_DATA += %D%/openzfs-libpam-zfs.prerm
|
dist_noinst_DATA += %D%/openzfs-libpam-zfs.prerm
|
||||||
dist_noinst_DATA += %D%/openzfs-libuutil3.docs
|
dist_noinst_DATA += %D%/openzfs-libuutil3.docs
|
||||||
dist_noinst_DATA += %D%/openzfs-libuutil3.install.in
|
dist_noinst_DATA += %D%/openzfs-libuutil3.install.in
|
||||||
dist_noinst_DATA += %D%/openzfs-libzfs4.docs
|
dist_noinst_DATA += %D%/openzfs-libzfs6.docs
|
||||||
dist_noinst_DATA += %D%/openzfs-libzfs4.install.in
|
dist_noinst_DATA += %D%/openzfs-libzfs6.install.in
|
||||||
dist_noinst_DATA += %D%/openzfs-libzfsbootenv1.docs
|
dist_noinst_DATA += %D%/openzfs-libzfsbootenv1.docs
|
||||||
dist_noinst_DATA += %D%/openzfs-libzfsbootenv1.install.in
|
dist_noinst_DATA += %D%/openzfs-libzfsbootenv1.install.in
|
||||||
dist_noinst_DATA += %D%/openzfs-libzfs-dev.docs
|
dist_noinst_DATA += %D%/openzfs-libzfs-dev.docs
|
||||||
dist_noinst_DATA += %D%/openzfs-libzfs-dev.install.in
|
dist_noinst_DATA += %D%/openzfs-libzfs-dev.install.in
|
||||||
dist_noinst_DATA += %D%/openzfs-libzpool5.docs
|
dist_noinst_DATA += %D%/openzfs-libzpool6.docs
|
||||||
dist_noinst_DATA += %D%/openzfs-libzpool5.install.in
|
dist_noinst_DATA += %D%/openzfs-libzpool6.install.in
|
||||||
dist_noinst_DATA += %D%/openzfs-python3-pyzfs.install
|
dist_noinst_DATA += %D%/openzfs-python3-pyzfs.install
|
||||||
dist_noinst_DATA += %D%/openzfs-zfs-dkms.config
|
dist_noinst_DATA += %D%/openzfs-zfs-dkms.config
|
||||||
dist_noinst_DATA += %D%/openzfs-zfs-dkms.dkms
|
dist_noinst_DATA += %D%/openzfs-zfs-dkms.dkms
|
||||||
|
@ -6,6 +6,6 @@ contrib/pyzfs/libzfs_core/bindings/__pycache__/
|
|||||||
contrib/pyzfs/pyzfs.egg-info/
|
contrib/pyzfs/pyzfs.egg-info/
|
||||||
debian/openzfs-libnvpair3.install
|
debian/openzfs-libnvpair3.install
|
||||||
debian/openzfs-libuutil3.install
|
debian/openzfs-libuutil3.install
|
||||||
debian/openzfs-libzfs4.install
|
debian/openzfs-libzfs6.install
|
||||||
debian/openzfs-libzfs-dev.install
|
debian/openzfs-libzfs-dev.install
|
||||||
debian/openzfs-libzpool5.install
|
debian/openzfs-libzpool6.install
|
||||||
|
@ -78,9 +78,9 @@ Architecture: linux-any
|
|||||||
Depends: libssl-dev | libssl1.0-dev,
|
Depends: libssl-dev | libssl1.0-dev,
|
||||||
openzfs-libnvpair3 (= ${binary:Version}),
|
openzfs-libnvpair3 (= ${binary:Version}),
|
||||||
openzfs-libuutil3 (= ${binary:Version}),
|
openzfs-libuutil3 (= ${binary:Version}),
|
||||||
openzfs-libzfs4 (= ${binary:Version}),
|
openzfs-libzfs6 (= ${binary:Version}),
|
||||||
openzfs-libzfsbootenv1 (= ${binary:Version}),
|
openzfs-libzfsbootenv1 (= ${binary:Version}),
|
||||||
openzfs-libzpool5 (= ${binary:Version}),
|
openzfs-libzpool6 (= ${binary:Version}),
|
||||||
${misc:Depends}
|
${misc:Depends}
|
||||||
Replaces: libzfslinux-dev
|
Replaces: libzfslinux-dev
|
||||||
Conflicts: libzfslinux-dev
|
Conflicts: libzfslinux-dev
|
||||||
@ -90,18 +90,18 @@ Description: OpenZFS filesystem development files for Linux
|
|||||||
libraries of OpenZFS filesystem.
|
libraries of OpenZFS filesystem.
|
||||||
.
|
.
|
||||||
This package includes the development files of libnvpair3, libuutil3,
|
This package includes the development files of libnvpair3, libuutil3,
|
||||||
libzpool5 and libzfs4.
|
libzpool6 and libzfs6.
|
||||||
|
|
||||||
Package: openzfs-libzfs4
|
Package: openzfs-libzfs6
|
||||||
Section: contrib/libs
|
Section: contrib/libs
|
||||||
Architecture: linux-any
|
Architecture: linux-any
|
||||||
Depends: ${misc:Depends}, ${shlibs:Depends}
|
Depends: ${misc:Depends}, ${shlibs:Depends}
|
||||||
# The libcurl4 is loaded through dlopen("libcurl.so.4").
|
# The libcurl4 is loaded through dlopen("libcurl.so.4").
|
||||||
# https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=988521
|
# https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=988521
|
||||||
Recommends: libcurl4
|
Recommends: libcurl4
|
||||||
Breaks: libzfs2, libzfs4
|
Breaks: libzfs2, libzfs4, libzfs4linux, libzfs6linux
|
||||||
Replaces: libzfs2, libzfs4, libzfs4linux
|
Replaces: libzfs2, libzfs4, libzfs4linux, libzfs6linux
|
||||||
Conflicts: libzfs4linux
|
Conflicts: libzfs6linux
|
||||||
Description: OpenZFS filesystem library for Linux - general support
|
Description: OpenZFS filesystem library for Linux - general support
|
||||||
OpenZFS is a storage platform that encompasses the functionality of
|
OpenZFS is a storage platform that encompasses the functionality of
|
||||||
traditional filesystems and volume managers. It supports data checksums,
|
traditional filesystems and volume managers. It supports data checksums,
|
||||||
@ -123,13 +123,13 @@ Description: OpenZFS filesystem library for Linux - label info support
|
|||||||
.
|
.
|
||||||
The zfsbootenv library provides support for modifying ZFS label information.
|
The zfsbootenv library provides support for modifying ZFS label information.
|
||||||
|
|
||||||
Package: openzfs-libzpool5
|
Package: openzfs-libzpool6
|
||||||
Section: contrib/libs
|
Section: contrib/libs
|
||||||
Architecture: linux-any
|
Architecture: linux-any
|
||||||
Depends: ${misc:Depends}, ${shlibs:Depends}
|
Depends: ${misc:Depends}, ${shlibs:Depends}
|
||||||
Breaks: libzpool2, libzpool5
|
Breaks: libzpool2, libzpool5, libzpool5linux, libzpool6linux
|
||||||
Replaces: libzpool2, libzpool5, libzpool5linux
|
Replaces: libzpool2, libzpool5, libzpool5linux, libzpool6linux
|
||||||
Conflicts: libzpool5linux
|
Conflicts: libzpool6linux
|
||||||
Description: OpenZFS pool library for Linux
|
Description: OpenZFS pool library for Linux
|
||||||
OpenZFS is a storage platform that encompasses the functionality of
|
OpenZFS is a storage platform that encompasses the functionality of
|
||||||
traditional filesystems and volume managers. It supports data checksums,
|
traditional filesystems and volume managers. It supports data checksums,
|
||||||
@ -246,8 +246,8 @@ Architecture: linux-any
|
|||||||
Pre-Depends: ${misc:Pre-Depends}
|
Pre-Depends: ${misc:Pre-Depends}
|
||||||
Depends: openzfs-libnvpair3 (= ${binary:Version}),
|
Depends: openzfs-libnvpair3 (= ${binary:Version}),
|
||||||
openzfs-libuutil3 (= ${binary:Version}),
|
openzfs-libuutil3 (= ${binary:Version}),
|
||||||
openzfs-libzfs4 (= ${binary:Version}),
|
openzfs-libzfs6 (= ${binary:Version}),
|
||||||
openzfs-libzpool5 (= ${binary:Version}),
|
openzfs-libzpool6 (= ${binary:Version}),
|
||||||
python3,
|
python3,
|
||||||
${misc:Depends},
|
${misc:Depends},
|
||||||
${shlibs:Depends}
|
${shlibs:Depends}
|
||||||
|
@ -98,6 +98,7 @@ usr/share/man/man8/zpool-attach.8
|
|||||||
usr/share/man/man8/zpool-checkpoint.8
|
usr/share/man/man8/zpool-checkpoint.8
|
||||||
usr/share/man/man8/zpool-clear.8
|
usr/share/man/man8/zpool-clear.8
|
||||||
usr/share/man/man8/zpool-create.8
|
usr/share/man/man8/zpool-create.8
|
||||||
|
usr/share/man/man8/zpool-ddtprune.8
|
||||||
usr/share/man/man8/zpool-destroy.8
|
usr/share/man/man8/zpool-destroy.8
|
||||||
usr/share/man/man8/zpool-detach.8
|
usr/share/man/man8/zpool-detach.8
|
||||||
usr/share/man/man8/zpool-ddtprune.8
|
usr/share/man/man8/zpool-ddtprune.8
|
||||||
@ -113,6 +114,7 @@ usr/share/man/man8/zpool-list.8
|
|||||||
usr/share/man/man8/zpool-offline.8
|
usr/share/man/man8/zpool-offline.8
|
||||||
usr/share/man/man8/zpool-online.8
|
usr/share/man/man8/zpool-online.8
|
||||||
usr/share/man/man8/zpool-prefetch.8
|
usr/share/man/man8/zpool-prefetch.8
|
||||||
|
usr/share/man/man8/zpool-prefetch.8
|
||||||
usr/share/man/man8/zpool-reguid.8
|
usr/share/man/man8/zpool-reguid.8
|
||||||
usr/share/man/man8/zpool-remove.8
|
usr/share/man/man8/zpool-remove.8
|
||||||
usr/share/man/man8/zpool-reopen.8
|
usr/share/man/man8/zpool-reopen.8
|
||||||
|
@ -276,7 +276,11 @@ _LIBZUTIL_H void update_vdev_config_dev_sysfs_path(nvlist_t *nv,
|
|||||||
* Thread-safe strerror() for use in ZFS libraries
|
* Thread-safe strerror() for use in ZFS libraries
|
||||||
*/
|
*/
|
||||||
static inline char *zfs_strerror(int errnum) {
|
static inline char *zfs_strerror(int errnum) {
|
||||||
|
#ifdef HAVE_STRERROR_L
|
||||||
return (strerror_l(errnum, uselocale(0)));
|
return (strerror_l(errnum, uselocale(0)));
|
||||||
|
#else
|
||||||
|
return (strerror(errnum));
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
|
@ -26,8 +26,10 @@
|
|||||||
#ifndef _ABD_OS_H
|
#ifndef _ABD_OS_H
|
||||||
#define _ABD_OS_H
|
#define _ABD_OS_H
|
||||||
|
|
||||||
|
#ifdef _KERNEL
|
||||||
#include <sys/vm.h>
|
#include <sys/vm.h>
|
||||||
#include <vm/vm_page.h>
|
#include <vm/vm_page.h>
|
||||||
|
#endif
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
extern "C" {
|
extern "C" {
|
||||||
@ -47,8 +49,10 @@ struct abd_linear {
|
|||||||
#endif
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#ifdef _KERNEL
|
||||||
__attribute__((malloc))
|
__attribute__((malloc))
|
||||||
struct abd *abd_alloc_from_pages(vm_page_t *, unsigned long, uint64_t);
|
struct abd *abd_alloc_from_pages(vm_page_t *, unsigned long, uint64_t);
|
||||||
|
#endif
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
|
@ -42,7 +42,8 @@ extern "C" {
|
|||||||
#define FM_EREPORT_ZFS_DATA "data"
|
#define FM_EREPORT_ZFS_DATA "data"
|
||||||
#define FM_EREPORT_ZFS_DELAY "delay"
|
#define FM_EREPORT_ZFS_DELAY "delay"
|
||||||
#define FM_EREPORT_ZFS_DEADMAN "deadman"
|
#define FM_EREPORT_ZFS_DEADMAN "deadman"
|
||||||
#define FM_EREPORT_ZFS_DIO_VERIFY "dio_verify"
|
#define FM_EREPORT_ZFS_DIO_VERIFY_WR "dio_verify_wr"
|
||||||
|
#define FM_EREPORT_ZFS_DIO_VERIFY_RD "dio_verify_rd"
|
||||||
#define FM_EREPORT_ZFS_POOL "zpool"
|
#define FM_EREPORT_ZFS_POOL "zpool"
|
||||||
#define FM_EREPORT_ZFS_DEVICE_UNKNOWN "vdev.unknown"
|
#define FM_EREPORT_ZFS_DEVICE_UNKNOWN "vdev.unknown"
|
||||||
#define FM_EREPORT_ZFS_DEVICE_OPEN_FAILED "vdev.open_failed"
|
#define FM_EREPORT_ZFS_DEVICE_OPEN_FAILED "vdev.open_failed"
|
||||||
|
@ -57,7 +57,7 @@ void vdev_raidz_reconstruct(struct raidz_map *, const int *, int);
|
|||||||
void vdev_raidz_child_done(zio_t *);
|
void vdev_raidz_child_done(zio_t *);
|
||||||
void vdev_raidz_io_done(zio_t *);
|
void vdev_raidz_io_done(zio_t *);
|
||||||
void vdev_raidz_checksum_error(zio_t *, struct raidz_col *, abd_t *);
|
void vdev_raidz_checksum_error(zio_t *, struct raidz_col *, abd_t *);
|
||||||
struct raidz_row *vdev_raidz_row_alloc(int);
|
struct raidz_row *vdev_raidz_row_alloc(int, zio_t *);
|
||||||
void vdev_raidz_reflow_copy_scratch(spa_t *);
|
void vdev_raidz_reflow_copy_scratch(spa_t *);
|
||||||
void raidz_dtl_reassessed(vdev_t *);
|
void raidz_dtl_reassessed(vdev_t *);
|
||||||
|
|
||||||
|
@ -208,25 +208,25 @@ typedef uint64_t zio_flag_t;
|
|||||||
#define ZIO_FLAG_PROBE (1ULL << 16)
|
#define ZIO_FLAG_PROBE (1ULL << 16)
|
||||||
#define ZIO_FLAG_TRYHARD (1ULL << 17)
|
#define ZIO_FLAG_TRYHARD (1ULL << 17)
|
||||||
#define ZIO_FLAG_OPTIONAL (1ULL << 18)
|
#define ZIO_FLAG_OPTIONAL (1ULL << 18)
|
||||||
|
#define ZIO_FLAG_DIO_READ (1ULL << 19)
|
||||||
#define ZIO_FLAG_VDEV_INHERIT (ZIO_FLAG_DONT_QUEUE - 1)
|
#define ZIO_FLAG_VDEV_INHERIT (ZIO_FLAG_DONT_QUEUE - 1)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Flags not inherited by any children.
|
* Flags not inherited by any children.
|
||||||
*/
|
*/
|
||||||
#define ZIO_FLAG_DONT_QUEUE (1ULL << 19) /* must be first for INHERIT */
|
#define ZIO_FLAG_DONT_QUEUE (1ULL << 20) /* must be first for INHERIT */
|
||||||
#define ZIO_FLAG_DONT_PROPAGATE (1ULL << 20)
|
#define ZIO_FLAG_DONT_PROPAGATE (1ULL << 21)
|
||||||
#define ZIO_FLAG_IO_BYPASS (1ULL << 21)
|
#define ZIO_FLAG_IO_BYPASS (1ULL << 22)
|
||||||
#define ZIO_FLAG_IO_REWRITE (1ULL << 22)
|
#define ZIO_FLAG_IO_REWRITE (1ULL << 23)
|
||||||
#define ZIO_FLAG_RAW_COMPRESS (1ULL << 23)
|
#define ZIO_FLAG_RAW_COMPRESS (1ULL << 24)
|
||||||
#define ZIO_FLAG_RAW_ENCRYPT (1ULL << 24)
|
#define ZIO_FLAG_RAW_ENCRYPT (1ULL << 25)
|
||||||
#define ZIO_FLAG_GANG_CHILD (1ULL << 25)
|
#define ZIO_FLAG_GANG_CHILD (1ULL << 26)
|
||||||
#define ZIO_FLAG_DDT_CHILD (1ULL << 26)
|
#define ZIO_FLAG_DDT_CHILD (1ULL << 27)
|
||||||
#define ZIO_FLAG_GODFATHER (1ULL << 27)
|
#define ZIO_FLAG_GODFATHER (1ULL << 28)
|
||||||
#define ZIO_FLAG_NOPWRITE (1ULL << 28)
|
#define ZIO_FLAG_NOPWRITE (1ULL << 29)
|
||||||
#define ZIO_FLAG_REEXECUTED (1ULL << 29)
|
#define ZIO_FLAG_REEXECUTED (1ULL << 30)
|
||||||
#define ZIO_FLAG_DELEGATED (1ULL << 30)
|
#define ZIO_FLAG_DELEGATED (1ULL << 31)
|
||||||
#define ZIO_FLAG_DIO_CHKSUM_ERR (1ULL << 31)
|
#define ZIO_FLAG_DIO_CHKSUM_ERR (1ULL << 32)
|
||||||
|
|
||||||
#define ZIO_ALLOCATOR_NONE (-1)
|
#define ZIO_ALLOCATOR_NONE (-1)
|
||||||
#define ZIO_HAS_ALLOCATOR(zio) ((zio)->io_allocator != ZIO_ALLOCATOR_NONE)
|
#define ZIO_HAS_ALLOCATOR(zio) ((zio)->io_allocator != ZIO_ALLOCATOR_NONE)
|
||||||
@ -647,6 +647,7 @@ extern void zio_vdev_io_redone(zio_t *zio);
|
|||||||
extern void zio_change_priority(zio_t *pio, zio_priority_t priority);
|
extern void zio_change_priority(zio_t *pio, zio_priority_t priority);
|
||||||
|
|
||||||
extern void zio_checksum_verified(zio_t *zio);
|
extern void zio_checksum_verified(zio_t *zio);
|
||||||
|
extern void zio_dio_chksum_verify_error_report(zio_t *zio);
|
||||||
extern int zio_worst_error(int e1, int e2);
|
extern int zio_worst_error(int e1, int e2);
|
||||||
|
|
||||||
extern enum zio_checksum zio_checksum_select(enum zio_checksum child,
|
extern enum zio_checksum zio_checksum_select(enum zio_checksum child,
|
||||||
|
@ -92,20 +92,20 @@ zfs_dio_page_aligned(void *buf)
|
|||||||
static inline boolean_t
|
static inline boolean_t
|
||||||
zfs_dio_offset_aligned(uint64_t offset, uint64_t blksz)
|
zfs_dio_offset_aligned(uint64_t offset, uint64_t blksz)
|
||||||
{
|
{
|
||||||
return (IS_P2ALIGNED(offset, blksz));
|
return ((IS_P2ALIGNED(offset, blksz)) ? B_TRUE : B_FALSE);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline boolean_t
|
static inline boolean_t
|
||||||
zfs_dio_size_aligned(uint64_t size, uint64_t blksz)
|
zfs_dio_size_aligned(uint64_t size, uint64_t blksz)
|
||||||
{
|
{
|
||||||
return ((size % blksz) == 0);
|
return (((size % blksz) == 0) ? B_TRUE : B_FALSE);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline boolean_t
|
static inline boolean_t
|
||||||
zfs_dio_aligned(uint64_t offset, uint64_t size, uint64_t blksz)
|
zfs_dio_aligned(uint64_t offset, uint64_t size, uint64_t blksz)
|
||||||
{
|
{
|
||||||
return (zfs_dio_offset_aligned(offset, blksz) &&
|
return ((zfs_dio_offset_aligned(offset, blksz) &&
|
||||||
zfs_dio_size_aligned(size, blksz));
|
zfs_dio_size_aligned(size, blksz)) ? B_TRUE : B_FALSE);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
|
@ -70,7 +70,7 @@ if BUILD_FREEBSD
|
|||||||
libzfs_la_LIBADD += -lutil -lgeom
|
libzfs_la_LIBADD += -lutil -lgeom
|
||||||
endif
|
endif
|
||||||
|
|
||||||
libzfs_la_LDFLAGS += -version-info 5:0:1
|
libzfs_la_LDFLAGS += -version-info 6:0:0
|
||||||
|
|
||||||
pkgconfig_DATA += %D%/libzfs.pc
|
pkgconfig_DATA += %D%/libzfs.pc
|
||||||
|
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
<abi-corpus version='2.0' architecture='elf-amd-x86_64' soname='libzfs.so.4'>
|
<abi-corpus version='2.0' architecture='elf-amd-x86_64' soname='libzfs.so.6'>
|
||||||
<elf-needed>
|
<elf-needed>
|
||||||
<dependency name='libzfs_core.so.3'/>
|
<dependency name='libzfs_core.so.3'/>
|
||||||
<dependency name='libnvpair.so.3'/>
|
<dependency name='libnvpair.so.3'/>
|
||||||
|
@ -2796,7 +2796,7 @@ zpool_scan(zpool_handle_t *zhp, pool_scan_func_t func, pool_scrub_cmd_t cmd)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* With EBUSY, five cases are possible:
|
* With EBUSY, six cases are possible:
|
||||||
*
|
*
|
||||||
* Current state Requested
|
* Current state Requested
|
||||||
* 1. Normal Scrub Running Normal Scrub or Error Scrub
|
* 1. Normal Scrub Running Normal Scrub or Error Scrub
|
||||||
|
@ -212,7 +212,7 @@ if BUILD_FREEBSD
|
|||||||
libzpool_la_LIBADD += -lgeom
|
libzpool_la_LIBADD += -lgeom
|
||||||
endif
|
endif
|
||||||
|
|
||||||
libzpool_la_LDFLAGS += -version-info 5:0:0
|
libzpool_la_LDFLAGS += -version-info 6:0:0
|
||||||
|
|
||||||
if TARGET_CPU_POWERPC
|
if TARGET_CPU_POWERPC
|
||||||
module/zfs/libzpool_la-vdev_raidz_math_powerpc_altivec.$(OBJEXT) : CFLAGS += -maltivec
|
module/zfs/libzpool_la-vdev_raidz_math_powerpc_altivec.$(OBJEXT) : CFLAGS += -maltivec
|
||||||
|
@ -436,7 +436,7 @@ write.
|
|||||||
It can also help to identify if reported checksum errors are tied to Direct I/O
|
It can also help to identify if reported checksum errors are tied to Direct I/O
|
||||||
writes.
|
writes.
|
||||||
Each verify error causes a
|
Each verify error causes a
|
||||||
.Sy dio_verify
|
.Sy dio_verify_wr
|
||||||
zevent.
|
zevent.
|
||||||
Direct Write I/O checkum verify errors can be seen with
|
Direct Write I/O checkum verify errors can be seen with
|
||||||
.Nm zpool Cm status Fl d .
|
.Nm zpool Cm status Fl d .
|
||||||
|
@ -92,6 +92,11 @@ before a generic mapping for the same slot.
|
|||||||
In this way a custom mapping may be applied to a particular channel
|
In this way a custom mapping may be applied to a particular channel
|
||||||
and a default mapping applied to the others.
|
and a default mapping applied to the others.
|
||||||
.
|
.
|
||||||
|
.It Sy zpad_slot Ar digits
|
||||||
|
Pad slot numbers with zeros to make them
|
||||||
|
.Ar digits
|
||||||
|
long, which can help to make disk names a consistent length and easier to sort.
|
||||||
|
.
|
||||||
.It Sy multipath Sy yes Ns | Ns Sy no
|
.It Sy multipath Sy yes Ns | Ns Sy no
|
||||||
Specifies whether
|
Specifies whether
|
||||||
.Xr vdev_id 8
|
.Xr vdev_id 8
|
||||||
@ -122,7 +127,7 @@ device is connected to.
|
|||||||
The default is
|
The default is
|
||||||
.Sy 4 .
|
.Sy 4 .
|
||||||
.
|
.
|
||||||
.It Sy slot Sy bay Ns | Ns Sy phy Ns | Ns Sy port Ns | Ns Sy id Ns | Ns Sy lun Ns | Ns Sy ses
|
.It Sy slot Sy bay Ns | Ns Sy phy Ns | Ns Sy port Ns | Ns Sy id Ns | Ns Sy lun Ns | Ns Sy bay_lun Ns | Ns Sy ses
|
||||||
Specifies from which element of a SAS identifier the slot number is
|
Specifies from which element of a SAS identifier the slot number is
|
||||||
taken.
|
taken.
|
||||||
The default is
|
The default is
|
||||||
@ -138,6 +143,9 @@ use the SAS port as the slot number.
|
|||||||
use the scsi id as the slot number.
|
use the scsi id as the slot number.
|
||||||
.It Sy lun
|
.It Sy lun
|
||||||
use the scsi lun as the slot number.
|
use the scsi lun as the slot number.
|
||||||
|
.It Sy bay_lun
|
||||||
|
read the slot number from the bay identifier and append the lun number.
|
||||||
|
Useful for multi-lun multi-actuator hard drives.
|
||||||
.It Sy ses
|
.It Sy ses
|
||||||
use the SCSI Enclosure Services (SES) enclosure device slot number,
|
use the SCSI Enclosure Services (SES) enclosure device slot number,
|
||||||
as reported by
|
as reported by
|
||||||
|
@ -71,7 +71,7 @@ The following fields are displayed:
|
|||||||
Used for scripting mode.
|
Used for scripting mode.
|
||||||
Do not print headers and separate fields by a single tab instead of arbitrary
|
Do not print headers and separate fields by a single tab instead of arbitrary
|
||||||
white space.
|
white space.
|
||||||
.It Fl j Op Ar --json-int
|
.It Fl j , -json Op Ar --json-int
|
||||||
Print the output in JSON format.
|
Print the output in JSON format.
|
||||||
Specify
|
Specify
|
||||||
.Sy --json-int
|
.Sy --json-int
|
||||||
|
@ -59,7 +59,7 @@
|
|||||||
.Xc
|
.Xc
|
||||||
Displays all ZFS file systems currently mounted.
|
Displays all ZFS file systems currently mounted.
|
||||||
.Bl -tag -width "-j"
|
.Bl -tag -width "-j"
|
||||||
.It Fl j
|
.It Fl j , -json
|
||||||
Displays all mounted file systems in JSON format.
|
Displays all mounted file systems in JSON format.
|
||||||
.El
|
.El
|
||||||
.It Xo
|
.It Xo
|
||||||
|
@ -50,7 +50,7 @@ and any attempts to access or modify other pools will cause an error.
|
|||||||
.
|
.
|
||||||
.Sh OPTIONS
|
.Sh OPTIONS
|
||||||
.Bl -tag -width "-t"
|
.Bl -tag -width "-t"
|
||||||
.It Fl j
|
.It Fl j , -json
|
||||||
Display channel program output in JSON format.
|
Display channel program output in JSON format.
|
||||||
When this flag is specified and standard output is empty -
|
When this flag is specified and standard output is empty -
|
||||||
channel program encountered an error.
|
channel program encountered an error.
|
||||||
|
@ -130,7 +130,7 @@ The value
|
|||||||
can be used to display all properties that apply to the given dataset's type
|
can be used to display all properties that apply to the given dataset's type
|
||||||
.Pq Sy filesystem , volume , snapshot , No or Sy bookmark .
|
.Pq Sy filesystem , volume , snapshot , No or Sy bookmark .
|
||||||
.Bl -tag -width "-s source"
|
.Bl -tag -width "-s source"
|
||||||
.It Fl j Op Ar --json-int
|
.It Fl j , -json Op Ar --json-int
|
||||||
Display the output in JSON format.
|
Display the output in JSON format.
|
||||||
Specify
|
Specify
|
||||||
.Sy --json-int
|
.Sy --json-int
|
||||||
|
@ -98,7 +98,10 @@ This can be an indicator of problems with the underlying storage device.
|
|||||||
The number of delay events is ratelimited by the
|
The number of delay events is ratelimited by the
|
||||||
.Sy zfs_slow_io_events_per_second
|
.Sy zfs_slow_io_events_per_second
|
||||||
module parameter.
|
module parameter.
|
||||||
.It Sy dio_verify
|
.It Sy dio_verify_rd
|
||||||
|
Issued when there was a checksum verify error after a Direct I/O read has been
|
||||||
|
issued.
|
||||||
|
.It Sy dio_verify_wr
|
||||||
Issued when there was a checksum verify error after a Direct I/O write has been
|
Issued when there was a checksum verify error after a Direct I/O write has been
|
||||||
issued.
|
issued.
|
||||||
This event can only take place if the module parameter
|
This event can only take place if the module parameter
|
||||||
|
@ -98,7 +98,7 @@ See the
|
|||||||
.Xr zpoolprops 7
|
.Xr zpoolprops 7
|
||||||
manual page for more information on the available pool properties.
|
manual page for more information on the available pool properties.
|
||||||
.Bl -tag -compact -offset Ds -width "-o field"
|
.Bl -tag -compact -offset Ds -width "-o field"
|
||||||
.It Fl j Op Ar --json-int, --json-pool-key-guid
|
.It Fl j , -json Op Ar --json-int, --json-pool-key-guid
|
||||||
Display the list of properties in JSON format.
|
Display the list of properties in JSON format.
|
||||||
Specify
|
Specify
|
||||||
.Sy --json-int
|
.Sy --json-int
|
||||||
@ -157,7 +157,7 @@ See the
|
|||||||
.Xr vdevprops 7
|
.Xr vdevprops 7
|
||||||
manual page for more information on the available pool properties.
|
manual page for more information on the available pool properties.
|
||||||
.Bl -tag -compact -offset Ds -width "-o field"
|
.Bl -tag -compact -offset Ds -width "-o field"
|
||||||
.It Fl j Op Ar --json-int
|
.It Fl j , -json Op Ar --json-int
|
||||||
Display the list of properties in JSON format.
|
Display the list of properties in JSON format.
|
||||||
Specify
|
Specify
|
||||||
.Sy --json-int
|
.Sy --json-int
|
||||||
|
@ -59,7 +59,7 @@ is specified, the command exits after
|
|||||||
.Ar count
|
.Ar count
|
||||||
reports are printed.
|
reports are printed.
|
||||||
.Bl -tag -width Ds
|
.Bl -tag -width Ds
|
||||||
.It Fl j Op Ar --json-int, --json-pool-key-guid
|
.It Fl j , -json Op Ar --json-int, --json-pool-key-guid
|
||||||
Display the list of pools in JSON format.
|
Display the list of pools in JSON format.
|
||||||
Specify
|
Specify
|
||||||
.Sy --json-int
|
.Sy --json-int
|
||||||
|
@ -70,7 +70,7 @@ See the
|
|||||||
option of
|
option of
|
||||||
.Nm zpool Cm iostat
|
.Nm zpool Cm iostat
|
||||||
for complete details.
|
for complete details.
|
||||||
.It Fl j Op Ar --json-int, --json-flat-vdevs, --json-pool-key-guid
|
.It Fl j , -json Op Ar --json-int, --json-flat-vdevs, --json-pool-key-guid
|
||||||
Display the status for ZFS pools in JSON format.
|
Display the status for ZFS pools in JSON format.
|
||||||
Specify
|
Specify
|
||||||
.Sy --json-int
|
.Sy --json-int
|
||||||
@ -82,14 +82,18 @@ Specify
|
|||||||
.Sy --json-pool-key-guid
|
.Sy --json-pool-key-guid
|
||||||
to set pool GUID as key for pool objects instead of pool names.
|
to set pool GUID as key for pool objects instead of pool names.
|
||||||
.It Fl d
|
.It Fl d
|
||||||
Display the number of Direct I/O write checksum verify errors that have occured
|
Display the number of Direct I/O read/write checksum verify errors that have
|
||||||
on a top-level VDEV.
|
occured on a top-level VDEV.
|
||||||
See
|
See
|
||||||
.Sx zfs_vdev_direct_write_verify
|
.Sx zfs_vdev_direct_write_verify
|
||||||
in
|
in
|
||||||
.Xr zfs 4
|
.Xr zfs 4
|
||||||
for details about the conditions that can cause Direct I/O write checksum
|
for details about the conditions that can cause Direct I/O write checksum
|
||||||
verify failures to occur.
|
verify failures to occur.
|
||||||
|
Direct I/O reads checksum verify errors can also occur if the contents of the
|
||||||
|
buffer are being manipulated after the I/O has been issued and is in flight.
|
||||||
|
In the case of Direct I/O read checksum verify errors, the I/O will be reissued
|
||||||
|
through the ARC.
|
||||||
.It Fl D
|
.It Fl D
|
||||||
Display a histogram of deduplication statistics, showing the allocated
|
Display a histogram of deduplication statistics, showing the allocated
|
||||||
.Pq physically present on disk
|
.Pq physically present on disk
|
||||||
|
@ -620,9 +620,16 @@ abd_borrow_buf_copy(abd_t *abd, size_t n)
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* Return a borrowed raw buffer to an ABD. If the ABD is scattered, this will
|
* Return a borrowed raw buffer to an ABD. If the ABD is scattered, this will
|
||||||
* no change the contents of the ABD and will ASSERT that you didn't modify
|
* not change the contents of the ABD. If you want any changes you made to
|
||||||
* the buffer since it was borrowed. If you want any changes you made to buf to
|
* buf to be copied back to abd, use abd_return_buf_copy() instead. If the
|
||||||
* be copied back to abd, use abd_return_buf_copy() instead.
|
* ABD is not constructed from user pages from Direct I/O then an ASSERT
|
||||||
|
* checks to make sure the contents of the buffer have not changed since it was
|
||||||
|
* borrowed. We can not ASSERT the contents of the buffer have not changed if
|
||||||
|
* it is composed of user pages. While Direct I/O write pages are placed under
|
||||||
|
* write protection and can not be changed, this is not the case for Direct I/O
|
||||||
|
* reads. The pages of a Direct I/O read could be manipulated at any time.
|
||||||
|
* Checksum verifications in the ZIO pipeline check for this issue and handle
|
||||||
|
* it by returning an error on checksum verification failure.
|
||||||
*/
|
*/
|
||||||
void
|
void
|
||||||
abd_return_buf(abd_t *abd, void *buf, size_t n)
|
abd_return_buf(abd_t *abd, void *buf, size_t n)
|
||||||
@ -632,8 +639,34 @@ abd_return_buf(abd_t *abd, void *buf, size_t n)
|
|||||||
#ifdef ZFS_DEBUG
|
#ifdef ZFS_DEBUG
|
||||||
(void) zfs_refcount_remove_many(&abd->abd_children, n, buf);
|
(void) zfs_refcount_remove_many(&abd->abd_children, n, buf);
|
||||||
#endif
|
#endif
|
||||||
if (abd_is_linear(abd)) {
|
if (abd_is_from_pages(abd)) {
|
||||||
|
if (!abd_is_linear_page(abd))
|
||||||
|
zio_buf_free(buf, n);
|
||||||
|
} else if (abd_is_linear(abd)) {
|
||||||
ASSERT3P(buf, ==, abd_to_buf(abd));
|
ASSERT3P(buf, ==, abd_to_buf(abd));
|
||||||
|
} else if (abd_is_gang(abd)) {
|
||||||
|
#ifdef ZFS_DEBUG
|
||||||
|
/*
|
||||||
|
* We have to be careful with gang ABD's that we do not ASSERT
|
||||||
|
* for any ABD's that contain user pages from Direct I/O. See
|
||||||
|
* the comment above about Direct I/O read buffers possibly
|
||||||
|
* being manipulated. In order to handle this, we jsut iterate
|
||||||
|
* through the gang ABD and only verify ABD's that are not from
|
||||||
|
* user pages.
|
||||||
|
*/
|
||||||
|
void *cmp_buf = buf;
|
||||||
|
|
||||||
|
for (abd_t *cabd = list_head(&ABD_GANG(abd).abd_gang_chain);
|
||||||
|
cabd != NULL;
|
||||||
|
cabd = list_next(&ABD_GANG(abd).abd_gang_chain, cabd)) {
|
||||||
|
if (!abd_is_from_pages(cabd)) {
|
||||||
|
ASSERT0(abd_cmp_buf(cabd, cmp_buf,
|
||||||
|
cabd->abd_size));
|
||||||
|
}
|
||||||
|
cmp_buf = (char *)cmp_buf + cabd->abd_size;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
zio_buf_free(buf, n);
|
||||||
} else {
|
} else {
|
||||||
ASSERT0(abd_cmp_buf(abd, buf, n));
|
ASSERT0(abd_cmp_buf(abd, buf, n));
|
||||||
zio_buf_free(buf, n);
|
zio_buf_free(buf, n);
|
||||||
|
@ -1008,7 +1008,9 @@ abd_borrow_buf_copy(abd_t *abd, size_t n)
|
|||||||
* borrowed. We can not ASSERT that the contents of the buffer have not changed
|
* borrowed. We can not ASSERT that the contents of the buffer have not changed
|
||||||
* if it is composed of user pages because the pages can not be placed under
|
* if it is composed of user pages because the pages can not be placed under
|
||||||
* write protection and the user could have possibly changed the contents in
|
* write protection and the user could have possibly changed the contents in
|
||||||
* the pages at any time.
|
* the pages at any time. This is also an issue for Direct I/O reads. Checksum
|
||||||
|
* verifications in the ZIO pipeline check for this issue and handle it by
|
||||||
|
* returning an error on checksum verification failure.
|
||||||
*/
|
*/
|
||||||
void
|
void
|
||||||
abd_return_buf(abd_t *abd, void *buf, size_t n)
|
abd_return_buf(abd_t *abd, void *buf, size_t n)
|
||||||
|
@ -206,6 +206,7 @@ _VALSTR_BITFIELD_IMPL(zio_flag,
|
|||||||
{ '.', "PR", "PROBE" },
|
{ '.', "PR", "PROBE" },
|
||||||
{ '.', "TH", "TRYHARD" },
|
{ '.', "TH", "TRYHARD" },
|
||||||
{ '.', "OP", "OPTIONAL" },
|
{ '.', "OP", "OPTIONAL" },
|
||||||
|
{ '.', "RD", "DIO_READ" },
|
||||||
{ '.', "DQ", "DONT_QUEUE" },
|
{ '.', "DQ", "DONT_QUEUE" },
|
||||||
{ '.', "DP", "DONT_PROPAGATE" },
|
{ '.', "DP", "DONT_PROPAGATE" },
|
||||||
{ '.', "BY", "IO_BYPASS" },
|
{ '.', "BY", "IO_BYPASS" },
|
||||||
|
@ -330,7 +330,7 @@ dmu_read_abd(dnode_t *dn, uint64_t offset, uint64_t size,
|
|||||||
*/
|
*/
|
||||||
zio_t *cio = zio_read(rio, spa, bp, mbuf, db->db.db_size,
|
zio_t *cio = zio_read(rio, spa, bp, mbuf, db->db.db_size,
|
||||||
dmu_read_abd_done, NULL, ZIO_PRIORITY_SYNC_READ,
|
dmu_read_abd_done, NULL, ZIO_PRIORITY_SYNC_READ,
|
||||||
ZIO_FLAG_CANFAIL, &zb);
|
ZIO_FLAG_CANFAIL | ZIO_FLAG_DIO_READ, &zb);
|
||||||
mutex_exit(&db->db_mtx);
|
mutex_exit(&db->db_mtx);
|
||||||
|
|
||||||
zfs_racct_read(spa, db->db.db_size, 1, flags);
|
zfs_racct_read(spa, db->db.db_size, 1, flags);
|
||||||
|
@ -2987,6 +2987,7 @@ dsl_dataset_rename_snapshot_sync_impl(dsl_pool_t *dp,
|
|||||||
dsl_dataset_t *ds;
|
dsl_dataset_t *ds;
|
||||||
uint64_t val;
|
uint64_t val;
|
||||||
dmu_tx_t *tx = ddrsa->ddrsa_tx;
|
dmu_tx_t *tx = ddrsa->ddrsa_tx;
|
||||||
|
char *oldname, *newname;
|
||||||
int error;
|
int error;
|
||||||
|
|
||||||
error = dsl_dataset_snap_lookup(hds, ddrsa->ddrsa_oldsnapname, &val);
|
error = dsl_dataset_snap_lookup(hds, ddrsa->ddrsa_oldsnapname, &val);
|
||||||
@ -3011,8 +3012,14 @@ dsl_dataset_rename_snapshot_sync_impl(dsl_pool_t *dp,
|
|||||||
VERIFY0(zap_add(dp->dp_meta_objset,
|
VERIFY0(zap_add(dp->dp_meta_objset,
|
||||||
dsl_dataset_phys(hds)->ds_snapnames_zapobj,
|
dsl_dataset_phys(hds)->ds_snapnames_zapobj,
|
||||||
ds->ds_snapname, 8, 1, &ds->ds_object, tx));
|
ds->ds_snapname, 8, 1, &ds->ds_object, tx));
|
||||||
zvol_rename_minors(dp->dp_spa, ddrsa->ddrsa_oldsnapname,
|
|
||||||
ddrsa->ddrsa_newsnapname, B_TRUE);
|
oldname = kmem_asprintf("%s@%s", ddrsa->ddrsa_fsname,
|
||||||
|
ddrsa->ddrsa_oldsnapname);
|
||||||
|
newname = kmem_asprintf("%s@%s", ddrsa->ddrsa_fsname,
|
||||||
|
ddrsa->ddrsa_newsnapname);
|
||||||
|
zvol_rename_minors(dp->dp_spa, oldname, newname, B_TRUE);
|
||||||
|
kmem_strfree(oldname);
|
||||||
|
kmem_strfree(newname);
|
||||||
|
|
||||||
dsl_dataset_rele(ds, FTAG);
|
dsl_dataset_rele(ds, FTAG);
|
||||||
return (0);
|
return (0);
|
||||||
|
@ -1026,7 +1026,7 @@ vdev_draid_map_alloc_row(zio_t *zio, raidz_row_t **rrp, uint64_t io_offset,
|
|||||||
|
|
||||||
ASSERT3U(vdc->vdc_nparity, >, 0);
|
ASSERT3U(vdc->vdc_nparity, >, 0);
|
||||||
|
|
||||||
raidz_row_t *rr = vdev_raidz_row_alloc(groupwidth);
|
raidz_row_t *rr = vdev_raidz_row_alloc(groupwidth, zio);
|
||||||
rr->rr_bigcols = bc;
|
rr->rr_bigcols = bc;
|
||||||
rr->rr_firstdatacol = vdc->vdc_nparity;
|
rr->rr_firstdatacol = vdc->vdc_nparity;
|
||||||
#ifdef ZFS_DEBUG
|
#ifdef ZFS_DEBUG
|
||||||
|
@ -34,6 +34,7 @@
|
|||||||
#include <sys/zap.h>
|
#include <sys/zap.h>
|
||||||
#include <sys/abd.h>
|
#include <sys/abd.h>
|
||||||
#include <sys/zthr.h>
|
#include <sys/zthr.h>
|
||||||
|
#include <sys/fm/fs/zfs.h>
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* An indirect vdev corresponds to a vdev that has been removed. Since
|
* An indirect vdev corresponds to a vdev that has been removed. Since
|
||||||
@ -1832,6 +1833,19 @@ vdev_indirect_io_done(zio_t *zio)
|
|||||||
|
|
||||||
zio_bad_cksum_t zbc;
|
zio_bad_cksum_t zbc;
|
||||||
int ret = zio_checksum_error(zio, &zbc);
|
int ret = zio_checksum_error(zio, &zbc);
|
||||||
|
/*
|
||||||
|
* Any Direct I/O read that has a checksum error must be treated as
|
||||||
|
* suspicious as the contents of the buffer could be getting
|
||||||
|
* manipulated while the I/O is taking place. The checksum verify error
|
||||||
|
* will be reported to the top-level VDEV.
|
||||||
|
*/
|
||||||
|
if (zio->io_flags & ZIO_FLAG_DIO_READ && ret == ECKSUM) {
|
||||||
|
zio->io_error = ret;
|
||||||
|
zio->io_flags |= ZIO_FLAG_DIO_CHKSUM_ERR;
|
||||||
|
zio_dio_chksum_verify_error_report(zio);
|
||||||
|
ret = 0;
|
||||||
|
}
|
||||||
|
|
||||||
if (ret == 0) {
|
if (ret == 0) {
|
||||||
zio_checksum_verified(zio);
|
zio_checksum_verified(zio);
|
||||||
return;
|
return;
|
||||||
|
@ -764,6 +764,27 @@ vdev_mirror_io_done(zio_t *zio)
|
|||||||
|
|
||||||
ASSERT(zio->io_type == ZIO_TYPE_READ);
|
ASSERT(zio->io_type == ZIO_TYPE_READ);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Any Direct I/O read that has a checksum error must be treated as
|
||||||
|
* suspicious as the contents of the buffer could be getting
|
||||||
|
* manipulated while the I/O is taking place. The checksum verify error
|
||||||
|
* will be reported to the top-level Mirror VDEV.
|
||||||
|
*
|
||||||
|
* There will be no attampt at reading any additional data copies. If
|
||||||
|
* the buffer is still being manipulated while attempting to read from
|
||||||
|
* another child, there exists a possibly that the checksum could be
|
||||||
|
* verified as valid. However, the buffer contents could again get
|
||||||
|
* manipulated after verifying the checksum. This would lead to bad data
|
||||||
|
* being written out during self healing.
|
||||||
|
*/
|
||||||
|
if ((zio->io_flags & ZIO_FLAG_DIO_READ) &&
|
||||||
|
(zio->io_flags & ZIO_FLAG_DIO_CHKSUM_ERR)) {
|
||||||
|
zio_dio_chksum_verify_error_report(zio);
|
||||||
|
zio->io_error = vdev_mirror_worst_error(mm);
|
||||||
|
ASSERT3U(zio->io_error, ==, ECKSUM);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If we don't have a good copy yet, keep trying other children.
|
* If we don't have a good copy yet, keep trying other children.
|
||||||
*/
|
*/
|
||||||
|
@ -433,7 +433,7 @@ const zio_vsd_ops_t vdev_raidz_vsd_ops = {
|
|||||||
};
|
};
|
||||||
|
|
||||||
raidz_row_t *
|
raidz_row_t *
|
||||||
vdev_raidz_row_alloc(int cols)
|
vdev_raidz_row_alloc(int cols, zio_t *zio)
|
||||||
{
|
{
|
||||||
raidz_row_t *rr =
|
raidz_row_t *rr =
|
||||||
kmem_zalloc(offsetof(raidz_row_t, rr_col[cols]), KM_SLEEP);
|
kmem_zalloc(offsetof(raidz_row_t, rr_col[cols]), KM_SLEEP);
|
||||||
@ -445,7 +445,17 @@ vdev_raidz_row_alloc(int cols)
|
|||||||
raidz_col_t *rc = &rr->rr_col[c];
|
raidz_col_t *rc = &rr->rr_col[c];
|
||||||
rc->rc_shadow_devidx = INT_MAX;
|
rc->rc_shadow_devidx = INT_MAX;
|
||||||
rc->rc_shadow_offset = UINT64_MAX;
|
rc->rc_shadow_offset = UINT64_MAX;
|
||||||
rc->rc_allow_repair = 1;
|
/*
|
||||||
|
* We can not allow self healing to take place for Direct I/O
|
||||||
|
* reads. There is nothing that stops the buffer contents from
|
||||||
|
* being manipulated while the I/O is in flight. It is possible
|
||||||
|
* that the checksum could be verified on the buffer and then
|
||||||
|
* the contents of that buffer are manipulated afterwards. This
|
||||||
|
* could lead to bad data being written out during self
|
||||||
|
* healing.
|
||||||
|
*/
|
||||||
|
if (!(zio->io_flags & ZIO_FLAG_DIO_READ))
|
||||||
|
rc->rc_allow_repair = 1;
|
||||||
}
|
}
|
||||||
return (rr);
|
return (rr);
|
||||||
}
|
}
|
||||||
@ -619,7 +629,7 @@ vdev_raidz_map_alloc(zio_t *zio, uint64_t ashift, uint64_t dcols,
|
|||||||
}
|
}
|
||||||
|
|
||||||
ASSERT3U(acols, <=, scols);
|
ASSERT3U(acols, <=, scols);
|
||||||
rr = vdev_raidz_row_alloc(scols);
|
rr = vdev_raidz_row_alloc(scols, zio);
|
||||||
rm->rm_row[0] = rr;
|
rm->rm_row[0] = rr;
|
||||||
rr->rr_cols = acols;
|
rr->rr_cols = acols;
|
||||||
rr->rr_bigcols = bc;
|
rr->rr_bigcols = bc;
|
||||||
@ -765,7 +775,7 @@ vdev_raidz_map_alloc_expanded(zio_t *zio,
|
|||||||
|
|
||||||
for (uint64_t row = 0; row < rows; row++) {
|
for (uint64_t row = 0; row < rows; row++) {
|
||||||
boolean_t row_use_scratch = B_FALSE;
|
boolean_t row_use_scratch = B_FALSE;
|
||||||
raidz_row_t *rr = vdev_raidz_row_alloc(cols);
|
raidz_row_t *rr = vdev_raidz_row_alloc(cols, zio);
|
||||||
rm->rm_row[row] = rr;
|
rm->rm_row[row] = rr;
|
||||||
|
|
||||||
/* The starting RAIDZ (parent) vdev sector of the row. */
|
/* The starting RAIDZ (parent) vdev sector of the row. */
|
||||||
@ -2633,6 +2643,20 @@ raidz_checksum_verify(zio_t *zio)
|
|||||||
raidz_map_t *rm = zio->io_vsd;
|
raidz_map_t *rm = zio->io_vsd;
|
||||||
|
|
||||||
int ret = zio_checksum_error(zio, &zbc);
|
int ret = zio_checksum_error(zio, &zbc);
|
||||||
|
/*
|
||||||
|
* Any Direct I/O read that has a checksum error must be treated as
|
||||||
|
* suspicious as the contents of the buffer could be getting
|
||||||
|
* manipulated while the I/O is taking place. The checksum verify error
|
||||||
|
* will be reported to the top-level RAIDZ VDEV.
|
||||||
|
*/
|
||||||
|
if (zio->io_flags & ZIO_FLAG_DIO_READ && ret == ECKSUM) {
|
||||||
|
zio->io_error = ret;
|
||||||
|
zio->io_flags |= ZIO_FLAG_DIO_CHKSUM_ERR;
|
||||||
|
zio_dio_chksum_verify_error_report(zio);
|
||||||
|
zio_checksum_verified(zio);
|
||||||
|
return (0);
|
||||||
|
}
|
||||||
|
|
||||||
if (ret != 0 && zbc.zbc_injected != 0)
|
if (ret != 0 && zbc.zbc_injected != 0)
|
||||||
rm->rm_ecksuminjected = 1;
|
rm->rm_ecksuminjected = 1;
|
||||||
|
|
||||||
@ -2776,6 +2800,11 @@ vdev_raidz_io_done_verified(zio_t *zio, raidz_row_t *rr)
|
|||||||
(rc->rc_error == 0 || rc->rc_size == 0)) {
|
(rc->rc_error == 0 || rc->rc_size == 0)) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
/*
|
||||||
|
* We do not allow self healing for Direct I/O reads.
|
||||||
|
* See comment in vdev_raid_row_alloc().
|
||||||
|
*/
|
||||||
|
ASSERT0(zio->io_flags & ZIO_FLAG_DIO_READ);
|
||||||
|
|
||||||
zfs_dbgmsg("zio=%px repairing c=%u devidx=%u "
|
zfs_dbgmsg("zio=%px repairing c=%u devidx=%u "
|
||||||
"offset=%llx",
|
"offset=%llx",
|
||||||
@ -2979,6 +3008,8 @@ raidz_reconstruct(zio_t *zio, int *ltgts, int ntgts, int nparity)
|
|||||||
|
|
||||||
/* Check for success */
|
/* Check for success */
|
||||||
if (raidz_checksum_verify(zio) == 0) {
|
if (raidz_checksum_verify(zio) == 0) {
|
||||||
|
if (zio->io_flags & ZIO_FLAG_DIO_CHKSUM_ERR)
|
||||||
|
return (0);
|
||||||
|
|
||||||
/* Reconstruction succeeded - report errors */
|
/* Reconstruction succeeded - report errors */
|
||||||
for (int i = 0; i < rm->rm_nrows; i++) {
|
for (int i = 0; i < rm->rm_nrows; i++) {
|
||||||
@ -3379,7 +3410,6 @@ vdev_raidz_io_done_unrecoverable(zio_t *zio)
|
|||||||
zio_bad_cksum_t zbc;
|
zio_bad_cksum_t zbc;
|
||||||
zbc.zbc_has_cksum = 0;
|
zbc.zbc_has_cksum = 0;
|
||||||
zbc.zbc_injected = rm->rm_ecksuminjected;
|
zbc.zbc_injected = rm->rm_ecksuminjected;
|
||||||
|
|
||||||
mutex_enter(&cvd->vdev_stat_lock);
|
mutex_enter(&cvd->vdev_stat_lock);
|
||||||
cvd->vdev_stat.vs_checksum_errors++;
|
cvd->vdev_stat.vs_checksum_errors++;
|
||||||
mutex_exit(&cvd->vdev_stat_lock);
|
mutex_exit(&cvd->vdev_stat_lock);
|
||||||
@ -3444,6 +3474,9 @@ vdev_raidz_io_done(zio_t *zio)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (raidz_checksum_verify(zio) == 0) {
|
if (raidz_checksum_verify(zio) == 0) {
|
||||||
|
if (zio->io_flags & ZIO_FLAG_DIO_CHKSUM_ERR)
|
||||||
|
goto done;
|
||||||
|
|
||||||
for (int i = 0; i < rm->rm_nrows; i++) {
|
for (int i = 0; i < rm->rm_nrows; i++) {
|
||||||
raidz_row_t *rr = rm->rm_row[i];
|
raidz_row_t *rr = rm->rm_row[i];
|
||||||
vdev_raidz_io_done_verified(zio, rr);
|
vdev_raidz_io_done_verified(zio, rr);
|
||||||
@ -3538,6 +3571,7 @@ vdev_raidz_io_done(zio_t *zio)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
done:
|
||||||
if (rm->rm_lr != NULL) {
|
if (rm->rm_lr != NULL) {
|
||||||
zfs_rangelock_exit(rm->rm_lr);
|
zfs_rangelock_exit(rm->rm_lr);
|
||||||
rm->rm_lr = NULL;
|
rm->rm_lr = NULL;
|
||||||
|
@ -303,6 +303,7 @@ zfs_read(struct znode *zp, zfs_uio_t *uio, int ioflag, cred_t *cr)
|
|||||||
(void) cr;
|
(void) cr;
|
||||||
int error = 0;
|
int error = 0;
|
||||||
boolean_t frsync = B_FALSE;
|
boolean_t frsync = B_FALSE;
|
||||||
|
boolean_t dio_checksum_failure = B_FALSE;
|
||||||
|
|
||||||
zfsvfs_t *zfsvfs = ZTOZSB(zp);
|
zfsvfs_t *zfsvfs = ZTOZSB(zp);
|
||||||
if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
|
if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
|
||||||
@ -424,8 +425,26 @@ zfs_read(struct znode *zp, zfs_uio_t *uio, int ioflag, cred_t *cr)
|
|||||||
|
|
||||||
if (error) {
|
if (error) {
|
||||||
/* convert checksum errors into IO errors */
|
/* convert checksum errors into IO errors */
|
||||||
if (error == ECKSUM)
|
if (error == ECKSUM) {
|
||||||
error = SET_ERROR(EIO);
|
/*
|
||||||
|
* If a Direct I/O read returned a checksum
|
||||||
|
* verify error, then it must be treated as
|
||||||
|
* suspicious. The contents of the buffer could
|
||||||
|
* have beeen manipulated while the I/O was in
|
||||||
|
* flight. In this case, the remainder of I/O
|
||||||
|
* request will just be reissued through the
|
||||||
|
* ARC.
|
||||||
|
*/
|
||||||
|
if (uio->uio_extflg & UIO_DIRECT) {
|
||||||
|
dio_checksum_failure = B_TRUE;
|
||||||
|
uio->uio_extflg &= ~UIO_DIRECT;
|
||||||
|
n += dio_remaining_resid;
|
||||||
|
dio_remaining_resid = 0;
|
||||||
|
continue;
|
||||||
|
} else {
|
||||||
|
error = SET_ERROR(EIO);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#if defined(__linux__)
|
#if defined(__linux__)
|
||||||
/*
|
/*
|
||||||
@ -472,6 +491,9 @@ zfs_read(struct znode *zp, zfs_uio_t *uio, int ioflag, cred_t *cr)
|
|||||||
out:
|
out:
|
||||||
zfs_rangelock_exit(lr);
|
zfs_rangelock_exit(lr);
|
||||||
|
|
||||||
|
if (dio_checksum_failure == B_TRUE)
|
||||||
|
uio->uio_extflg |= UIO_DIRECT;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Cleanup for Direct I/O if requested.
|
* Cleanup for Direct I/O if requested.
|
||||||
*/
|
*/
|
||||||
|
120
module/zfs/zio.c
120
module/zfs/zio.c
@ -804,11 +804,11 @@ zio_notify_parent(zio_t *pio, zio_t *zio, enum zio_wait_type wait,
|
|||||||
pio->io_reexecute |= zio->io_reexecute;
|
pio->io_reexecute |= zio->io_reexecute;
|
||||||
ASSERT3U(*countp, >, 0);
|
ASSERT3U(*countp, >, 0);
|
||||||
|
|
||||||
if (zio->io_flags & ZIO_FLAG_DIO_CHKSUM_ERR) {
|
/*
|
||||||
ASSERT3U(*errorp, ==, EIO);
|
* Propogate the Direct I/O checksum verify failure to the parent.
|
||||||
ASSERT3U(pio->io_child_type, ==, ZIO_CHILD_LOGICAL);
|
*/
|
||||||
|
if (zio->io_flags & ZIO_FLAG_DIO_CHKSUM_ERR)
|
||||||
pio->io_flags |= ZIO_FLAG_DIO_CHKSUM_ERR;
|
pio->io_flags |= ZIO_FLAG_DIO_CHKSUM_ERR;
|
||||||
}
|
|
||||||
|
|
||||||
(*countp)--;
|
(*countp)--;
|
||||||
|
|
||||||
@ -1573,6 +1573,14 @@ zio_vdev_child_io(zio_t *pio, blkptr_t *bp, vdev_t *vd, uint64_t offset,
|
|||||||
*/
|
*/
|
||||||
pipeline |= ZIO_STAGE_CHECKSUM_VERIFY;
|
pipeline |= ZIO_STAGE_CHECKSUM_VERIFY;
|
||||||
pio->io_pipeline &= ~ZIO_STAGE_CHECKSUM_VERIFY;
|
pio->io_pipeline &= ~ZIO_STAGE_CHECKSUM_VERIFY;
|
||||||
|
/*
|
||||||
|
* We never allow the mirror VDEV to attempt reading from any
|
||||||
|
* additional data copies after the first Direct I/O checksum
|
||||||
|
* verify failure. This is to avoid bad data being written out
|
||||||
|
* through the mirror during self healing. See comment in
|
||||||
|
* vdev_mirror_io_done() for more details.
|
||||||
|
*/
|
||||||
|
ASSERT0(pio->io_flags & ZIO_FLAG_DIO_CHKSUM_ERR);
|
||||||
} else if (type == ZIO_TYPE_WRITE &&
|
} else if (type == ZIO_TYPE_WRITE &&
|
||||||
pio->io_prop.zp_direct_write == B_TRUE) {
|
pio->io_prop.zp_direct_write == B_TRUE) {
|
||||||
/*
|
/*
|
||||||
@ -4555,18 +4563,18 @@ zio_vdev_io_assess(zio_t *zio)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If a Direct I/O write checksum verify error has occurred then this
|
* If a Direct I/O operation has a checksum verify error then this I/O
|
||||||
* I/O should not attempt to be issued again. Instead the EIO will
|
* should not attempt to be issued again.
|
||||||
* be returned.
|
|
||||||
*/
|
*/
|
||||||
if (zio->io_flags & ZIO_FLAG_DIO_CHKSUM_ERR) {
|
if (zio->io_flags & ZIO_FLAG_DIO_CHKSUM_ERR) {
|
||||||
ASSERT3U(zio->io_child_type, ==, ZIO_CHILD_LOGICAL);
|
if (zio->io_type == ZIO_TYPE_WRITE) {
|
||||||
ASSERT3U(zio->io_error, ==, EIO);
|
ASSERT3U(zio->io_child_type, ==, ZIO_CHILD_LOGICAL);
|
||||||
|
ASSERT3U(zio->io_error, ==, EIO);
|
||||||
|
}
|
||||||
zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
|
zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
|
||||||
return (zio);
|
return (zio);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
if (zio_injection_enabled && zio->io_error == 0)
|
if (zio_injection_enabled && zio->io_error == 0)
|
||||||
zio->io_error = zio_handle_fault_injection(zio, EIO);
|
zio->io_error = zio_handle_fault_injection(zio, EIO);
|
||||||
|
|
||||||
@ -4864,16 +4872,40 @@ zio_checksum_verify(zio_t *zio)
|
|||||||
ASSERT3U(zio->io_prop.zp_checksum, ==, ZIO_CHECKSUM_LABEL);
|
ASSERT3U(zio->io_prop.zp_checksum, ==, ZIO_CHECKSUM_LABEL);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ASSERT0(zio->io_flags & ZIO_FLAG_DIO_CHKSUM_ERR);
|
||||||
|
IMPLY(zio->io_flags & ZIO_FLAG_DIO_READ,
|
||||||
|
!(zio->io_flags & ZIO_FLAG_SPECULATIVE));
|
||||||
|
|
||||||
if ((error = zio_checksum_error(zio, &info)) != 0) {
|
if ((error = zio_checksum_error(zio, &info)) != 0) {
|
||||||
zio->io_error = error;
|
zio->io_error = error;
|
||||||
if (error == ECKSUM &&
|
if (error == ECKSUM &&
|
||||||
!(zio->io_flags & ZIO_FLAG_SPECULATIVE)) {
|
!(zio->io_flags & ZIO_FLAG_SPECULATIVE)) {
|
||||||
mutex_enter(&zio->io_vd->vdev_stat_lock);
|
if (zio->io_flags & ZIO_FLAG_DIO_READ) {
|
||||||
zio->io_vd->vdev_stat.vs_checksum_errors++;
|
zio->io_flags |= ZIO_FLAG_DIO_CHKSUM_ERR;
|
||||||
mutex_exit(&zio->io_vd->vdev_stat_lock);
|
zio_t *pio = zio_unique_parent(zio);
|
||||||
(void) zfs_ereport_start_checksum(zio->io_spa,
|
/*
|
||||||
zio->io_vd, &zio->io_bookmark, zio,
|
* Any Direct I/O read that has a checksum
|
||||||
zio->io_offset, zio->io_size, &info);
|
* error must be treated as suspicous as the
|
||||||
|
* contents of the buffer could be getting
|
||||||
|
* manipulated while the I/O is taking place.
|
||||||
|
*
|
||||||
|
* The checksum verify error will only be
|
||||||
|
* reported here for disk and file VDEV's and
|
||||||
|
* will be reported on those that the failure
|
||||||
|
* occurred on. Other types of VDEV's report the
|
||||||
|
* verify failure in their own code paths.
|
||||||
|
*/
|
||||||
|
if (pio->io_child_type == ZIO_CHILD_LOGICAL) {
|
||||||
|
zio_dio_chksum_verify_error_report(zio);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
mutex_enter(&zio->io_vd->vdev_stat_lock);
|
||||||
|
zio->io_vd->vdev_stat.vs_checksum_errors++;
|
||||||
|
mutex_exit(&zio->io_vd->vdev_stat_lock);
|
||||||
|
(void) zfs_ereport_start_checksum(zio->io_spa,
|
||||||
|
zio->io_vd, &zio->io_bookmark, zio,
|
||||||
|
zio->io_offset, zio->io_size, &info);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -4899,22 +4931,8 @@ zio_dio_checksum_verify(zio_t *zio)
|
|||||||
if ((error = zio_checksum_error(zio, NULL)) != 0) {
|
if ((error = zio_checksum_error(zio, NULL)) != 0) {
|
||||||
zio->io_error = error;
|
zio->io_error = error;
|
||||||
if (error == ECKSUM) {
|
if (error == ECKSUM) {
|
||||||
mutex_enter(&zio->io_vd->vdev_stat_lock);
|
|
||||||
zio->io_vd->vdev_stat.vs_dio_verify_errors++;
|
|
||||||
mutex_exit(&zio->io_vd->vdev_stat_lock);
|
|
||||||
zio->io_error = SET_ERROR(EIO);
|
|
||||||
zio->io_flags |= ZIO_FLAG_DIO_CHKSUM_ERR;
|
zio->io_flags |= ZIO_FLAG_DIO_CHKSUM_ERR;
|
||||||
|
zio_dio_chksum_verify_error_report(zio);
|
||||||
/*
|
|
||||||
* The EIO error must be propagated up to the logical
|
|
||||||
* parent ZIO in zio_notify_parent() so it can be
|
|
||||||
* returned to dmu_write_abd().
|
|
||||||
*/
|
|
||||||
zio->io_flags &= ~ZIO_FLAG_DONT_PROPAGATE;
|
|
||||||
|
|
||||||
(void) zfs_ereport_post(FM_EREPORT_ZFS_DIO_VERIFY,
|
|
||||||
zio->io_spa, zio->io_vd, &zio->io_bookmark,
|
|
||||||
zio, 0);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -4932,6 +4950,39 @@ zio_checksum_verified(zio_t *zio)
|
|||||||
zio->io_pipeline &= ~ZIO_STAGE_CHECKSUM_VERIFY;
|
zio->io_pipeline &= ~ZIO_STAGE_CHECKSUM_VERIFY;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Report Direct I/O checksum verify error and create ZED event.
|
||||||
|
*/
|
||||||
|
void
|
||||||
|
zio_dio_chksum_verify_error_report(zio_t *zio)
|
||||||
|
{
|
||||||
|
ASSERT(zio->io_flags & ZIO_FLAG_DIO_CHKSUM_ERR);
|
||||||
|
|
||||||
|
if (zio->io_child_type == ZIO_CHILD_LOGICAL)
|
||||||
|
return;
|
||||||
|
|
||||||
|
mutex_enter(&zio->io_vd->vdev_stat_lock);
|
||||||
|
zio->io_vd->vdev_stat.vs_dio_verify_errors++;
|
||||||
|
mutex_exit(&zio->io_vd->vdev_stat_lock);
|
||||||
|
if (zio->io_type == ZIO_TYPE_WRITE) {
|
||||||
|
/*
|
||||||
|
* Convert checksum error for writes into EIO.
|
||||||
|
*/
|
||||||
|
zio->io_error = SET_ERROR(EIO);
|
||||||
|
/*
|
||||||
|
* Report dio_verify_wr ZED event.
|
||||||
|
*/
|
||||||
|
(void) zfs_ereport_post(FM_EREPORT_ZFS_DIO_VERIFY_WR,
|
||||||
|
zio->io_spa, zio->io_vd, &zio->io_bookmark, zio, 0);
|
||||||
|
} else {
|
||||||
|
/*
|
||||||
|
* Report dio_verify_rd ZED event.
|
||||||
|
*/
|
||||||
|
(void) zfs_ereport_post(FM_EREPORT_ZFS_DIO_VERIFY_RD,
|
||||||
|
zio->io_spa, zio->io_vd, &zio->io_bookmark, zio, 0);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* ==========================================================================
|
* ==========================================================================
|
||||||
* Error rank. Error are ranked in the order 0, ENXIO, ECKSUM, EIO, other.
|
* Error rank. Error are ranked in the order 0, ENXIO, ECKSUM, EIO, other.
|
||||||
@ -5343,10 +5394,9 @@ zio_done(zio_t *zio)
|
|||||||
|
|
||||||
if (zio->io_reexecute) {
|
if (zio->io_reexecute) {
|
||||||
/*
|
/*
|
||||||
* A Direct I/O write that has a checksum verify error should
|
* A Direct I/O operation that has a checksum verify error
|
||||||
* not attempt to reexecute. Instead, EAGAIN should just be
|
* should not attempt to reexecute. Instead, the error should
|
||||||
* propagated back up so the write can be attempt to be issued
|
* just be propagated back.
|
||||||
* through the ARC.
|
|
||||||
*/
|
*/
|
||||||
ASSERT(!(zio->io_flags & ZIO_FLAG_DIO_CHKSUM_ERR));
|
ASSERT(!(zio->io_flags & ZIO_FLAG_DIO_CHKSUM_ERR));
|
||||||
|
|
||||||
|
@ -99,10 +99,10 @@ License: @ZFS_META_LICENSE@
|
|||||||
URL: https://github.com/openzfs/zfs
|
URL: https://github.com/openzfs/zfs
|
||||||
Source0: %{name}-%{version}.tar.gz
|
Source0: %{name}-%{version}.tar.gz
|
||||||
BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n)
|
BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n)
|
||||||
Requires: libzpool5%{?_isa} = %{version}-%{release}
|
Requires: libzpool6%{?_isa} = %{version}-%{release}
|
||||||
Requires: libnvpair3%{?_isa} = %{version}-%{release}
|
Requires: libnvpair3%{?_isa} = %{version}-%{release}
|
||||||
Requires: libuutil3%{?_isa} = %{version}-%{release}
|
Requires: libuutil3%{?_isa} = %{version}-%{release}
|
||||||
Requires: libzfs5%{?_isa} = %{version}-%{release}
|
Requires: libzfs6%{?_isa} = %{version}-%{release}
|
||||||
Requires: %{name}-kmod = %{version}
|
Requires: %{name}-kmod = %{version}
|
||||||
Provides: %{name}-kmod-common = %{version}-%{release}
|
Provides: %{name}-kmod-common = %{version}-%{release}
|
||||||
Obsoletes: spl <= %{version}
|
Obsoletes: spl <= %{version}
|
||||||
@ -150,21 +150,22 @@ Requires: sysstat
|
|||||||
%description
|
%description
|
||||||
This package contains the core ZFS command line utilities.
|
This package contains the core ZFS command line utilities.
|
||||||
|
|
||||||
%package -n libzpool5
|
%package -n libzpool6
|
||||||
Summary: Native ZFS pool library for Linux
|
Summary: Native ZFS pool library for Linux
|
||||||
Group: System Environment/Kernel
|
Group: System Environment/Kernel
|
||||||
Obsoletes: libzpool2 <= %{version}
|
Obsoletes: libzpool2 <= %{version}
|
||||||
Obsoletes: libzpool4 <= %{version}
|
Obsoletes: libzpool4 <= %{version}
|
||||||
|
Obsoletes: libzpool5 <= %{version}
|
||||||
|
|
||||||
%description -n libzpool5
|
%description -n libzpool6
|
||||||
This package contains the zpool library, which provides support
|
This package contains the zpool library, which provides support
|
||||||
for managing zpools
|
for managing zpools
|
||||||
|
|
||||||
%if %{defined ldconfig_scriptlets}
|
%if %{defined ldconfig_scriptlets}
|
||||||
%ldconfig_scriptlets -n libzpool5
|
%ldconfig_scriptlets -n libzpool6
|
||||||
%else
|
%else
|
||||||
%post -n libzpool5 -p /sbin/ldconfig
|
%post -n libzpool6 -p /sbin/ldconfig
|
||||||
%postun -n libzpool5 -p /sbin/ldconfig
|
%postun -n libzpool6 -p /sbin/ldconfig
|
||||||
%endif
|
%endif
|
||||||
|
|
||||||
%package -n libnvpair3
|
%package -n libnvpair3
|
||||||
@ -211,37 +212,39 @@ This library provides a variety of compatibility functions for OpenZFS:
|
|||||||
# The library version is encoded in the package name. When updating the
|
# The library version is encoded in the package name. When updating the
|
||||||
# version information it is important to add an obsoletes line below for
|
# version information it is important to add an obsoletes line below for
|
||||||
# the previous version of the package.
|
# the previous version of the package.
|
||||||
%package -n libzfs5
|
%package -n libzfs6
|
||||||
Summary: Native ZFS filesystem library for Linux
|
Summary: Native ZFS filesystem library for Linux
|
||||||
Group: System Environment/Kernel
|
Group: System Environment/Kernel
|
||||||
Obsoletes: libzfs2 <= %{version}
|
Obsoletes: libzfs2 <= %{version}
|
||||||
Obsoletes: libzfs4 <= %{version}
|
Obsoletes: libzfs4 <= %{version}
|
||||||
|
Obsoletes: libzfs5 <= %{version}
|
||||||
|
|
||||||
%description -n libzfs5
|
%description -n libzfs6
|
||||||
This package provides support for managing ZFS filesystems
|
This package provides support for managing ZFS filesystems
|
||||||
|
|
||||||
%if %{defined ldconfig_scriptlets}
|
%if %{defined ldconfig_scriptlets}
|
||||||
%ldconfig_scriptlets -n libzfs5
|
%ldconfig_scriptlets -n libzfs6
|
||||||
%else
|
%else
|
||||||
%post -n libzfs5 -p /sbin/ldconfig
|
%post -n libzfs6 -p /sbin/ldconfig
|
||||||
%postun -n libzfs5 -p /sbin/ldconfig
|
%postun -n libzfs6 -p /sbin/ldconfig
|
||||||
%endif
|
%endif
|
||||||
|
|
||||||
%package -n libzfs5-devel
|
%package -n libzfs6-devel
|
||||||
Summary: Development headers
|
Summary: Development headers
|
||||||
Group: System Environment/Kernel
|
Group: System Environment/Kernel
|
||||||
Requires: libzfs5%{?_isa} = %{version}-%{release}
|
Requires: libzfs6%{?_isa} = %{version}-%{release}
|
||||||
Requires: libzpool5%{?_isa} = %{version}-%{release}
|
Requires: libzpool6%{?_isa} = %{version}-%{release}
|
||||||
Requires: libnvpair3%{?_isa} = %{version}-%{release}
|
Requires: libnvpair3%{?_isa} = %{version}-%{release}
|
||||||
Requires: libuutil3%{?_isa} = %{version}-%{release}
|
Requires: libuutil3%{?_isa} = %{version}-%{release}
|
||||||
Provides: libzpool5-devel = %{version}-%{release}
|
Provides: libzpool6-devel = %{version}-%{release}
|
||||||
Provides: libnvpair3-devel = %{version}-%{release}
|
Provides: libnvpair3-devel = %{version}-%{release}
|
||||||
Provides: libuutil3-devel = %{version}-%{release}
|
Provides: libuutil3-devel = %{version}-%{release}
|
||||||
Obsoletes: zfs-devel <= %{version}
|
Obsoletes: zfs-devel <= %{version}
|
||||||
Obsoletes: libzfs2-devel <= %{version}
|
Obsoletes: libzfs2-devel <= %{version}
|
||||||
Obsoletes: libzfs4-devel <= %{version}
|
Obsoletes: libzfs4-devel <= %{version}
|
||||||
|
Obsoletes: libzfs5-devel <= %{version}
|
||||||
|
|
||||||
%description -n libzfs5-devel
|
%description -n libzfs6-devel
|
||||||
This package contains the header files needed for building additional
|
This package contains the header files needed for building additional
|
||||||
applications against the ZFS libraries.
|
applications against the ZFS libraries.
|
||||||
|
|
||||||
@ -290,7 +293,7 @@ Summary: Python %{python_version} wrapper for libzfs_core
|
|||||||
Group: Development/Languages/Python
|
Group: Development/Languages/Python
|
||||||
License: Apache-2.0
|
License: Apache-2.0
|
||||||
BuildArch: noarch
|
BuildArch: noarch
|
||||||
Requires: libzfs5 = %{version}-%{release}
|
Requires: libzfs6 = %{version}-%{release}
|
||||||
Requires: libnvpair3 = %{version}-%{release}
|
Requires: libnvpair3 = %{version}-%{release}
|
||||||
Requires: libffi
|
Requires: libffi
|
||||||
Requires: python%{__python_pkg_version}
|
Requires: python%{__python_pkg_version}
|
||||||
@ -534,7 +537,7 @@ systemctl --system daemon-reload >/dev/null || true
|
|||||||
%config(noreplace) %{_bashcompletiondir}/zfs
|
%config(noreplace) %{_bashcompletiondir}/zfs
|
||||||
%config(noreplace) %{_bashcompletiondir}/zpool
|
%config(noreplace) %{_bashcompletiondir}/zpool
|
||||||
|
|
||||||
%files -n libzpool5
|
%files -n libzpool6
|
||||||
%{_libdir}/libzpool.so.*
|
%{_libdir}/libzpool.so.*
|
||||||
|
|
||||||
%files -n libnvpair3
|
%files -n libnvpair3
|
||||||
@ -543,10 +546,10 @@ systemctl --system daemon-reload >/dev/null || true
|
|||||||
%files -n libuutil3
|
%files -n libuutil3
|
||||||
%{_libdir}/libuutil.so.*
|
%{_libdir}/libuutil.so.*
|
||||||
|
|
||||||
%files -n libzfs5
|
%files -n libzfs6
|
||||||
%{_libdir}/libzfs*.so.*
|
%{_libdir}/libzfs*.so.*
|
||||||
|
|
||||||
%files -n libzfs5-devel
|
%files -n libzfs6-devel
|
||||||
%{_pkgconfigdir}/libzfs.pc
|
%{_pkgconfigdir}/libzfs.pc
|
||||||
%{_pkgconfigdir}/libzfsbootenv.pc
|
%{_pkgconfigdir}/libzfsbootenv.pc
|
||||||
%{_pkgconfigdir}/libzfs_core.pc
|
%{_pkgconfigdir}/libzfs_core.pc
|
||||||
|
@ -697,8 +697,8 @@ tags = ['functional', 'delegate']
|
|||||||
tests = ['dio_aligned_block', 'dio_async_always', 'dio_async_fio_ioengines',
|
tests = ['dio_aligned_block', 'dio_async_always', 'dio_async_fio_ioengines',
|
||||||
'dio_compression', 'dio_dedup', 'dio_encryption', 'dio_grow_block',
|
'dio_compression', 'dio_dedup', 'dio_encryption', 'dio_grow_block',
|
||||||
'dio_max_recordsize', 'dio_mixed', 'dio_mmap', 'dio_overwrites',
|
'dio_max_recordsize', 'dio_mixed', 'dio_mmap', 'dio_overwrites',
|
||||||
'dio_property', 'dio_random', 'dio_recordsize', 'dio_unaligned_block',
|
'dio_property', 'dio_random', 'dio_read_verify', 'dio_recordsize',
|
||||||
'dio_unaligned_filesize']
|
'dio_unaligned_block', 'dio_unaligned_filesize']
|
||||||
tags = ['functional', 'direct']
|
tags = ['functional', 'direct']
|
||||||
|
|
||||||
[tests/functional/exec]
|
[tests/functional/exec]
|
||||||
|
@ -20,7 +20,7 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2022 by Triad National Security, LLC.
|
* Copyright (c) 2024 by Triad National Security, LLC.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <sys/types.h>
|
#include <sys/types.h>
|
||||||
@ -39,51 +39,59 @@
|
|||||||
#define MIN(a, b) ((a) < (b)) ? (a) : (b)
|
#define MIN(a, b) ((a) < (b)) ? (a) : (b)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static char *outputfile = NULL;
|
static char *filename = NULL;
|
||||||
static int blocksize = 131072; /* 128K */
|
static int blocksize = 131072; /* 128K */
|
||||||
static int wr_err_expected = 0;
|
static int err_expected = 0;
|
||||||
|
static int read_op = 0;
|
||||||
|
static int write_op = 0;
|
||||||
static int numblocks = 100;
|
static int numblocks = 100;
|
||||||
static char *execname = NULL;
|
static char *execname = NULL;
|
||||||
static int print_usage = 0;
|
static int print_usage = 0;
|
||||||
static int randompattern = 0;
|
static int randompattern = 0;
|
||||||
static int ofd;
|
static int fd;
|
||||||
char *buf = NULL;
|
char *buf = NULL;
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
int entire_file_written;
|
int entire_file_completed;
|
||||||
} pthread_args_t;
|
} pthread_args_t;
|
||||||
|
|
||||||
static void
|
static void
|
||||||
usage(void)
|
usage(void)
|
||||||
{
|
{
|
||||||
(void) fprintf(stderr,
|
(void) fprintf(stderr,
|
||||||
"usage %s -o outputfile [-b blocksize] [-e wr_error_expected]\n"
|
"usage %s -f filename [-b blocksize] [-e wr_error_expected]\n"
|
||||||
" [-n numblocks] [-p randpattern] [-h help]\n"
|
" [-n numblocks] [-p randompattern] -r read_op \n"
|
||||||
|
" -w write_op [-h help]\n"
|
||||||
"\n"
|
"\n"
|
||||||
"Testing whether checksum verify works correctly for O_DIRECT.\n"
|
"Testing whether checksum verify works correctly for O_DIRECT.\n"
|
||||||
"when manipulating the contents of a userspace buffer.\n"
|
"when manipulating the contents of a userspace buffer.\n"
|
||||||
"\n"
|
"\n"
|
||||||
" outputfile: File to write to.\n"
|
" filename: File to read or write to.\n"
|
||||||
" blocksize: Size of each block to write (must be at \n"
|
" blocksize: Size of each block to write (must be at \n"
|
||||||
" least >= 512).\n"
|
" least >= 512).\n"
|
||||||
" wr_err_expected: Whether pwrite() is expected to return EIO\n"
|
" err_expected: Whether write() is expected to return EIO\n"
|
||||||
" while manipulating the contents of the\n"
|
" while manipulating the contents of the\n"
|
||||||
" buffer.\n"
|
" buffer.\n"
|
||||||
" numblocks: Total number of blocksized blocks to\n"
|
" numblocks: Total number of blocksized blocks to\n"
|
||||||
" write.\n"
|
" write.\n"
|
||||||
" randpattern: Fill data buffer with random data. Default\n"
|
" read_op: Perform reads to the filename file while\n"
|
||||||
" behavior is to fill the buffer with the \n"
|
" while manipulating the buffer contents\n"
|
||||||
" known data pattern (0xdeadbeef).\n"
|
" write_op: Perform writes to the filename file while\n"
|
||||||
|
" manipulating the buffer contents\n"
|
||||||
|
" randompattern: Fill data buffer with random data for \n"
|
||||||
|
" writes. Default behavior is to fill the \n"
|
||||||
|
" buffer with known data pattern (0xdeadbeef)\n"
|
||||||
" help: Print usage information and exit.\n"
|
" help: Print usage information and exit.\n"
|
||||||
"\n"
|
"\n"
|
||||||
" Required parameters:\n"
|
" Required parameters:\n"
|
||||||
" outputfile\n"
|
" filename\n"
|
||||||
|
" read_op or write_op\n"
|
||||||
"\n"
|
"\n"
|
||||||
" Default Values:\n"
|
" Default Values:\n"
|
||||||
" blocksize -> 131072\n"
|
" blocksize -> 131072\n"
|
||||||
" wr_err_expexted -> false\n"
|
" wr_err_expexted -> false\n"
|
||||||
" numblocks -> 100\n"
|
" numblocks -> 100\n"
|
||||||
" randpattern -> false\n",
|
" randompattern -> false\n",
|
||||||
execname);
|
execname);
|
||||||
(void) exit(1);
|
(void) exit(1);
|
||||||
}
|
}
|
||||||
@ -97,16 +105,21 @@ parse_options(int argc, char *argv[])
|
|||||||
extern int optind, optopt;
|
extern int optind, optopt;
|
||||||
execname = argv[0];
|
execname = argv[0];
|
||||||
|
|
||||||
while ((c = getopt(argc, argv, "b:ehn:o:p")) != -1) {
|
while ((c = getopt(argc, argv, "b:ef:hn:rw")) != -1) {
|
||||||
switch (c) {
|
switch (c) {
|
||||||
case 'b':
|
case 'b':
|
||||||
blocksize = atoi(optarg);
|
blocksize = atoi(optarg);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case 'e':
|
case 'e':
|
||||||
wr_err_expected = 1;
|
err_expected = 1;
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
case 'f':
|
||||||
|
filename = optarg;
|
||||||
|
break;
|
||||||
|
|
||||||
|
|
||||||
case 'h':
|
case 'h':
|
||||||
print_usage = 1;
|
print_usage = 1;
|
||||||
break;
|
break;
|
||||||
@ -115,12 +128,12 @@ parse_options(int argc, char *argv[])
|
|||||||
numblocks = atoi(optarg);
|
numblocks = atoi(optarg);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case 'o':
|
case 'r':
|
||||||
outputfile = optarg;
|
read_op = 1;
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case 'p':
|
case 'w':
|
||||||
randompattern = 1;
|
write_op = 1;
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case ':':
|
case ':':
|
||||||
@ -141,7 +154,8 @@ parse_options(int argc, char *argv[])
|
|||||||
if (errflag || print_usage == 1)
|
if (errflag || print_usage == 1)
|
||||||
(void) usage();
|
(void) usage();
|
||||||
|
|
||||||
if (blocksize < 512 || outputfile == NULL || numblocks <= 0) {
|
if (blocksize < 512 || filename == NULL || numblocks <= 0 ||
|
||||||
|
(read_op == 0 && write_op == 0)) {
|
||||||
(void) fprintf(stderr,
|
(void) fprintf(stderr,
|
||||||
"Required paramater(s) missing or invalid.\n");
|
"Required paramater(s) missing or invalid.\n");
|
||||||
(void) usage();
|
(void) usage();
|
||||||
@ -160,10 +174,10 @@ write_thread(void *arg)
|
|||||||
ssize_t wrote = 0;
|
ssize_t wrote = 0;
|
||||||
pthread_args_t *args = (pthread_args_t *)arg;
|
pthread_args_t *args = (pthread_args_t *)arg;
|
||||||
|
|
||||||
while (!args->entire_file_written) {
|
while (!args->entire_file_completed) {
|
||||||
wrote = pwrite(ofd, buf, blocksize, offset);
|
wrote = pwrite(fd, buf, blocksize, offset);
|
||||||
if (wrote != blocksize) {
|
if (wrote != blocksize) {
|
||||||
if (wr_err_expected)
|
if (err_expected)
|
||||||
assert(errno == EIO);
|
assert(errno == EIO);
|
||||||
else
|
else
|
||||||
exit(2);
|
exit(2);
|
||||||
@ -173,7 +187,35 @@ write_thread(void *arg)
|
|||||||
left -= blocksize;
|
left -= blocksize;
|
||||||
|
|
||||||
if (left == 0)
|
if (left == 0)
|
||||||
args->entire_file_written = 1;
|
args->entire_file_completed = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
pthread_exit(NULL);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Read blocksize * numblocks to the file using O_DIRECT.
|
||||||
|
*/
|
||||||
|
static void *
|
||||||
|
read_thread(void *arg)
|
||||||
|
{
|
||||||
|
size_t offset = 0;
|
||||||
|
int total_data = blocksize * numblocks;
|
||||||
|
int left = total_data;
|
||||||
|
ssize_t read = 0;
|
||||||
|
pthread_args_t *args = (pthread_args_t *)arg;
|
||||||
|
|
||||||
|
while (!args->entire_file_completed) {
|
||||||
|
read = pread(fd, buf, blocksize, offset);
|
||||||
|
if (read != blocksize) {
|
||||||
|
exit(2);
|
||||||
|
}
|
||||||
|
|
||||||
|
offset = ((offset + blocksize) % total_data);
|
||||||
|
left -= blocksize;
|
||||||
|
|
||||||
|
if (left == 0)
|
||||||
|
args->entire_file_completed = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
pthread_exit(NULL);
|
pthread_exit(NULL);
|
||||||
@ -189,7 +231,7 @@ manipulate_buf_thread(void *arg)
|
|||||||
char rand_char;
|
char rand_char;
|
||||||
pthread_args_t *args = (pthread_args_t *)arg;
|
pthread_args_t *args = (pthread_args_t *)arg;
|
||||||
|
|
||||||
while (!args->entire_file_written) {
|
while (!args->entire_file_completed) {
|
||||||
rand_offset = (rand() % blocksize);
|
rand_offset = (rand() % blocksize);
|
||||||
rand_char = (rand() % (126 - 33) + 33);
|
rand_char = (rand() % (126 - 33) + 33);
|
||||||
buf[rand_offset] = rand_char;
|
buf[rand_offset] = rand_char;
|
||||||
@ -202,9 +244,9 @@ int
|
|||||||
main(int argc, char *argv[])
|
main(int argc, char *argv[])
|
||||||
{
|
{
|
||||||
const char *datapattern = "0xdeadbeef";
|
const char *datapattern = "0xdeadbeef";
|
||||||
int ofd_flags = O_WRONLY | O_CREAT | O_DIRECT;
|
int fd_flags = O_DIRECT;
|
||||||
mode_t mode = S_IRUSR | S_IWUSR;
|
mode_t mode = S_IRUSR | S_IWUSR;
|
||||||
pthread_t write_thr;
|
pthread_t io_thr;
|
||||||
pthread_t manipul_thr;
|
pthread_t manipul_thr;
|
||||||
int left = blocksize;
|
int left = blocksize;
|
||||||
int offset = 0;
|
int offset = 0;
|
||||||
@ -213,9 +255,15 @@ main(int argc, char *argv[])
|
|||||||
|
|
||||||
parse_options(argc, argv);
|
parse_options(argc, argv);
|
||||||
|
|
||||||
ofd = open(outputfile, ofd_flags, mode);
|
if (write_op) {
|
||||||
if (ofd == -1) {
|
fd_flags |= (O_WRONLY | O_CREAT);
|
||||||
(void) fprintf(stderr, "%s, %s\n", execname, outputfile);
|
} else {
|
||||||
|
fd_flags |= O_RDONLY;
|
||||||
|
}
|
||||||
|
|
||||||
|
fd = open(filename, fd_flags, mode);
|
||||||
|
if (fd == -1) {
|
||||||
|
(void) fprintf(stderr, "%s, %s\n", execname, filename);
|
||||||
perror("open");
|
perror("open");
|
||||||
exit(2);
|
exit(2);
|
||||||
}
|
}
|
||||||
@ -228,24 +276,22 @@ main(int argc, char *argv[])
|
|||||||
exit(2);
|
exit(2);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!randompattern) {
|
if (write_op) {
|
||||||
/* Putting known data pattern in buffer */
|
if (!randompattern) {
|
||||||
while (left) {
|
/* Putting known data pattern in buffer */
|
||||||
size_t amt = MIN(strlen(datapattern), left);
|
while (left) {
|
||||||
memcpy(&buf[offset], datapattern, amt);
|
size_t amt = MIN(strlen(datapattern), left);
|
||||||
offset += amt;
|
memcpy(&buf[offset], datapattern, amt);
|
||||||
left -= amt;
|
offset += amt;
|
||||||
|
left -= amt;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
/* Putting random data in buffer */
|
||||||
|
for (int i = 0; i < blocksize; i++)
|
||||||
|
buf[i] = rand();
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
/* Putting random data in buffer */
|
|
||||||
for (int i = 0; i < blocksize; i++)
|
|
||||||
buf[i] = rand();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Writing using O_DIRECT while manipulating the buffer contents until
|
|
||||||
* the entire file is written.
|
|
||||||
*/
|
|
||||||
if ((rc = pthread_create(&manipul_thr, NULL, manipulate_buf_thread,
|
if ((rc = pthread_create(&manipul_thr, NULL, manipulate_buf_thread,
|
||||||
&args))) {
|
&args))) {
|
||||||
fprintf(stderr, "error: pthreads_create, manipul_thr, "
|
fprintf(stderr, "error: pthreads_create, manipul_thr, "
|
||||||
@ -253,18 +299,34 @@ main(int argc, char *argv[])
|
|||||||
exit(2);
|
exit(2);
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((rc = pthread_create(&write_thr, NULL, write_thread, &args))) {
|
if (write_op) {
|
||||||
fprintf(stderr, "error: pthreads_create, write_thr, "
|
/*
|
||||||
"rc: %d\n", rc);
|
* Writing using O_DIRECT while manipulating the buffer contents
|
||||||
exit(2);
|
* until the entire file is written.
|
||||||
|
*/
|
||||||
|
if ((rc = pthread_create(&io_thr, NULL, write_thread, &args))) {
|
||||||
|
fprintf(stderr, "error: pthreads_create, io_thr, "
|
||||||
|
"rc: %d\n", rc);
|
||||||
|
exit(2);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
/*
|
||||||
|
* Reading using O_DIRECT while manipulating the buffer contents
|
||||||
|
* until the entire file is read.
|
||||||
|
*/
|
||||||
|
if ((rc = pthread_create(&io_thr, NULL, read_thread, &args))) {
|
||||||
|
fprintf(stderr, "error: pthreads_create, io_thr, "
|
||||||
|
"rc: %d\n", rc);
|
||||||
|
exit(2);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pthread_join(write_thr, NULL);
|
pthread_join(io_thr, NULL);
|
||||||
pthread_join(manipul_thr, NULL);
|
pthread_join(manipul_thr, NULL);
|
||||||
|
|
||||||
assert(args.entire_file_written == 1);
|
assert(args.entire_file_completed == 1);
|
||||||
|
|
||||||
(void) close(ofd);
|
(void) close(fd);
|
||||||
|
|
||||||
free(buf);
|
free(buf);
|
||||||
|
|
||||||
|
@ -1477,6 +1477,7 @@ nobase_dist_datadir_zfs_tests_tests_SCRIPTS += \
|
|||||||
functional/direct/dio_overwrites.ksh \
|
functional/direct/dio_overwrites.ksh \
|
||||||
functional/direct/dio_property.ksh \
|
functional/direct/dio_property.ksh \
|
||||||
functional/direct/dio_random.ksh \
|
functional/direct/dio_random.ksh \
|
||||||
|
functional/direct/dio_read_verify.ksh \
|
||||||
functional/direct/dio_recordsize.ksh \
|
functional/direct/dio_recordsize.ksh \
|
||||||
functional/direct/dio_unaligned_block.ksh \
|
functional/direct/dio_unaligned_block.ksh \
|
||||||
functional/direct/dio_unaligned_filesize.ksh \
|
functional/direct/dio_unaligned_filesize.ksh \
|
||||||
|
@ -30,28 +30,39 @@
|
|||||||
# STRATEGY:
|
# STRATEGY:
|
||||||
# 1. Run different zfs/zpool -j commands and check for valid JSON
|
# 1. Run different zfs/zpool -j commands and check for valid JSON
|
||||||
|
|
||||||
|
#
|
||||||
|
# -j and --json mean the same thing. Each command will be run twice, replacing
|
||||||
|
# JSONFLAG with the flag under test.
|
||||||
list=(
|
list=(
|
||||||
"zpool status -j -g --json-int --json-flat-vdevs --json-pool-key-guid"
|
"zpool status JSONFLAG -g --json-int --json-flat-vdevs --json-pool-key-guid"
|
||||||
"zpool status -p -j -g --json-int --json-flat-vdevs --json-pool-key-guid"
|
"zpool status -p JSONFLAG -g --json-int --json-flat-vdevs --json-pool-key-guid"
|
||||||
"zpool status -j -c upath"
|
"zpool status JSONFLAG -c upath"
|
||||||
"zpool status -j"
|
"zpool status JSONFLAG"
|
||||||
"zpool status -j testpool1"
|
"zpool status JSONFLAG testpool1"
|
||||||
"zpool list -j"
|
"zpool list JSONFLAG"
|
||||||
"zpool list -j -g"
|
"zpool list JSONFLAG -g"
|
||||||
"zpool list -j -o fragmentation"
|
"zpool list JSONFLAG -o fragmentation"
|
||||||
"zpool get -j size"
|
"zpool get JSONFLAG size"
|
||||||
"zpool get -j all"
|
"zpool get JSONFLAG all"
|
||||||
"zpool version -j"
|
"zpool version JSONFLAG"
|
||||||
"zfs list -j"
|
"zfs list JSONFLAG"
|
||||||
"zfs list -j testpool1"
|
"zfs list JSONFLAG testpool1"
|
||||||
"zfs get -j all"
|
"zfs get JSONFLAG all"
|
||||||
"zfs get -j available"
|
"zfs get JSONFLAG available"
|
||||||
"zfs mount -j"
|
"zfs mount JSONFLAG"
|
||||||
"zfs version -j"
|
"zfs version JSONFLAG"
|
||||||
)
|
)
|
||||||
|
|
||||||
for cmd in "${list[@]}" ; do
|
function run_json_tests
|
||||||
log_must eval "$cmd | jq > /dev/null"
|
{
|
||||||
done
|
typeset flag=$1
|
||||||
|
for cmd in "${list[@]}" ; do
|
||||||
|
cmd=${cmd//JSONFLAG/$flag}
|
||||||
|
log_must eval "$cmd | jq > /dev/null"
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
log_must run_json_tests -j
|
||||||
|
log_must run_json_tests --json
|
||||||
|
|
||||||
log_pass "zpool and zfs commands outputted valid JSON"
|
log_pass "zpool and zfs commands outputted valid JSON"
|
||||||
|
@ -113,7 +113,7 @@ wait
|
|||||||
parallel_time=$SECONDS
|
parallel_time=$SECONDS
|
||||||
log_note "asyncronously imported 4 pools in $parallel_time seconds"
|
log_note "asyncronously imported 4 pools in $parallel_time seconds"
|
||||||
|
|
||||||
log_must test $parallel_time -lt $(($sequential_time / 3))
|
log_must test $parallel_time -lt $(($sequential_time / 2))
|
||||||
|
|
||||||
#
|
#
|
||||||
# export pools with import delay injectors
|
# export pools with import delay injectors
|
||||||
@ -132,6 +132,6 @@ log_must zpool import -a -d $DEVICE_DIR -f
|
|||||||
parallel_time=$SECONDS
|
parallel_time=$SECONDS
|
||||||
log_note "asyncronously imported 4 pools in $parallel_time seconds"
|
log_note "asyncronously imported 4 pools in $parallel_time seconds"
|
||||||
|
|
||||||
log_must test $parallel_time -lt $(($sequential_time / 3))
|
log_must test $parallel_time -lt $(($sequential_time / 2))
|
||||||
|
|
||||||
log_pass "Pool imports occur in parallel"
|
log_pass "Pool imports occur in parallel"
|
||||||
|
@ -221,7 +221,7 @@ function ddt_dedup_vdev_limit
|
|||||||
# For here, we just set the entry count a little higher than what we
|
# For here, we just set the entry count a little higher than what we
|
||||||
# expect to allow for some instability.
|
# expect to allow for some instability.
|
||||||
#
|
#
|
||||||
log_must test $(ddt_entries) -le 600000
|
log_must test $(ddt_entries) -le 650000
|
||||||
|
|
||||||
do_clean
|
do_clean
|
||||||
}
|
}
|
||||||
|
@ -84,8 +84,9 @@ function get_zpool_status_chksum_verify_failures # pool_name vdev_type
|
|||||||
function get_zed_dio_verify_events # pool
|
function get_zed_dio_verify_events # pool
|
||||||
{
|
{
|
||||||
typeset pool=$1
|
typeset pool=$1
|
||||||
|
typeset op=$2
|
||||||
|
|
||||||
val=$(zpool events $pool | grep -c dio_verify)
|
val=$(zpool events $pool | grep -c "dio_verify_${op}")
|
||||||
|
|
||||||
echo "$val"
|
echo "$val"
|
||||||
}
|
}
|
||||||
@ -96,11 +97,12 @@ function get_zed_dio_verify_events # pool
|
|||||||
# zpool events
|
# zpool events
|
||||||
# After getting that counts will clear the out the ZPool errors and events
|
# After getting that counts will clear the out the ZPool errors and events
|
||||||
#
|
#
|
||||||
function check_dio_write_chksum_verify_failures # pool vdev_type expect_errors
|
function check_dio_chksum_verify_failures # pool vdev_type op expect_errors
|
||||||
{
|
{
|
||||||
typeset pool=$1
|
typeset pool=$1
|
||||||
typeset vdev_type=$2
|
typeset vdev_type=$2
|
||||||
typeset expect_errors=$3
|
typeset expect_errors=$3
|
||||||
|
typeset op=$4
|
||||||
typeset note_str="expecting none"
|
typeset note_str="expecting none"
|
||||||
|
|
||||||
if [[ $expect_errors -ne 0 ]]; then
|
if [[ $expect_errors -ne 0 ]]; then
|
||||||
@ -108,10 +110,10 @@ function check_dio_write_chksum_verify_failures # pool vdev_type expect_errors
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
log_note "Checking for Direct I/O write checksum verify errors \
|
log_note "Checking for Direct I/O write checksum verify errors \
|
||||||
$note_str on ZPool: $pool"
|
$note_str on ZPool: $pool with $vdev_type"
|
||||||
|
|
||||||
status_failures=$(get_zpool_status_chksum_verify_failures $pool $vdev_type)
|
status_failures=$(get_zpool_status_chksum_verify_failures $pool $vdev_type)
|
||||||
zed_dio_verify_events=$(get_zed_dio_verify_events $pool)
|
zed_dio_verify_events=$(get_zed_dio_verify_events $pool $op)
|
||||||
|
|
||||||
if [[ $expect_errors -ne 0 ]]; then
|
if [[ $expect_errors -ne 0 ]]; then
|
||||||
if [[ $status_failures -eq 0 ||
|
if [[ $status_failures -eq 0 ||
|
||||||
|
107
tests/zfs-tests/tests/functional/direct/dio_read_verify.ksh
Executable file
107
tests/zfs-tests/tests/functional/direct/dio_read_verify.ksh
Executable file
@ -0,0 +1,107 @@
|
|||||||
|
#!/bin/ksh -p
|
||||||
|
#
|
||||||
|
# CDDL HEADER START
|
||||||
|
#
|
||||||
|
# The contents of this file are subject to the terms of the
|
||||||
|
# Common Development and Distribution License (the "License").
|
||||||
|
# You may not use this file except in compliance with the License.
|
||||||
|
#
|
||||||
|
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
|
||||||
|
# or https://opensource.org/licenses/CDDL-1.0.
|
||||||
|
# See the License for the specific language governing permissions
|
||||||
|
# and limitations under the License.
|
||||||
|
#
|
||||||
|
# When distributing Covered Code, include this CDDL HEADER in each
|
||||||
|
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
|
||||||
|
# If applicable, add the following below this CDDL HEADER, with the
|
||||||
|
# fields enclosed by brackets "[]" replaced with your own identifying
|
||||||
|
# information: Portions Copyright [yyyy] [name of copyright owner]
|
||||||
|
#
|
||||||
|
# CDDL HEADER END
|
||||||
|
#
|
||||||
|
|
||||||
|
#
|
||||||
|
# Copyright (c) 2024 by Triad National Security, LLC.
|
||||||
|
#
|
||||||
|
|
||||||
|
. $STF_SUITE/include/libtest.shlib
|
||||||
|
. $STF_SUITE/tests/functional/direct/dio.cfg
|
||||||
|
. $STF_SUITE/tests/functional/direct/dio.kshlib
|
||||||
|
|
||||||
|
#
|
||||||
|
# DESCRIPTION:
|
||||||
|
# Verify checksum verify works for Direct I/O reads.
|
||||||
|
#
|
||||||
|
# STRATEGY:
|
||||||
|
# 1. Create a zpool from each vdev type.
|
||||||
|
# 2. Start a Direct I/O read workload while manipulating the user buffer
|
||||||
|
# contents.
|
||||||
|
# 3. Verify there are Direct I/O read verify failures using
|
||||||
|
# zpool status -d and checking for zevents. We also make sure there
|
||||||
|
# are reported no data errors.
|
||||||
|
#
|
||||||
|
|
||||||
|
verify_runnable "global"
|
||||||
|
|
||||||
|
log_assert "Verify checksum verify works for Direct I/O reads."
|
||||||
|
|
||||||
|
log_onexit dio_cleanup
|
||||||
|
|
||||||
|
NUMBLOCKS=300
|
||||||
|
BS=$((128 * 1024)) # 128k
|
||||||
|
|
||||||
|
log_must truncate -s $MINVDEVSIZE $DIO_VDEVS
|
||||||
|
|
||||||
|
# We will verify that there are no checksum errors for every Direct I/O read
|
||||||
|
# while manipulating the buffer contents while the I/O is still in flight and
|
||||||
|
# also that Direct I/O checksum verify failures and dio_verify_rd zevents are
|
||||||
|
# reported.
|
||||||
|
|
||||||
|
|
||||||
|
for type in "" "mirror" "raidz" "draid"; do
|
||||||
|
typeset vdev_type=$type
|
||||||
|
if [[ "${vdev_type}" == "" ]]; then
|
||||||
|
vdev_type="stripe"
|
||||||
|
fi
|
||||||
|
|
||||||
|
log_note "Verifying every Direct I/O read verify with VDEV type \
|
||||||
|
${vdev_type}"
|
||||||
|
|
||||||
|
create_pool $TESTPOOL1 $type $DIO_VDEVS
|
||||||
|
log_must eval "zfs create -o recordsize=128k -o compression=off \
|
||||||
|
$TESTPOOL1/$TESTFS1"
|
||||||
|
|
||||||
|
mntpnt=$(get_prop mountpoint $TESTPOOL1/$TESTFS1)
|
||||||
|
prev_dio_rd=$(get_iostats_stat $TESTPOOL1 direct_read_count)
|
||||||
|
prev_arc_rd=$(get_iostats_stat $TESTPOOL1 arc_read_count)
|
||||||
|
|
||||||
|
# Create the file before trying to manipulate the contents
|
||||||
|
log_must stride_dd -o "$mntpnt/direct-write.iso" -i /dev/urandom \
|
||||||
|
-b $BS -c $NUMBLOCKS -D
|
||||||
|
# Manipulate the buffer contents will reading the file with Direct I/O
|
||||||
|
log_must manipulate_user_buffer -f "$mntpnt/direct-write.iso" \
|
||||||
|
-n $NUMBLOCKS -b $BS -r
|
||||||
|
|
||||||
|
# Getting new Direct I/O and ARC Write counts.
|
||||||
|
curr_dio_rd=$(get_iostats_stat $TESTPOOL1 direct_read_count)
|
||||||
|
curr_arc_rd=$(get_iostats_stat $TESTPOOL1 arc_read_count)
|
||||||
|
total_dio_rd=$((curr_dio_rd - prev_dio_rd))
|
||||||
|
total_arc_rd=$((curr_arc_rd - prev_arc_rd))
|
||||||
|
|
||||||
|
log_note "Making sure there are no checksum errors with the ZPool"
|
||||||
|
log_must check_pool_status $TESTPOOL "errors" "No known data errors"
|
||||||
|
|
||||||
|
log_note "Making sure we have Direct I/O and ARC reads logged"
|
||||||
|
if [[ $total_dio_rd -lt 1 ]]; then
|
||||||
|
log_fail "No Direct I/O reads $total_dio_rd"
|
||||||
|
fi
|
||||||
|
if [[ $total_arc_rd -lt 1 ]]; then
|
||||||
|
log_fail "No ARC reads $total_arc_rd"
|
||||||
|
fi
|
||||||
|
|
||||||
|
log_note "Making sure we have Direct I/O write checksum verifies with ZPool"
|
||||||
|
check_dio_chksum_verify_failures "$TESTPOOL1" "$vdev_type" 1 "rd"
|
||||||
|
destroy_pool $TESTPOOL1
|
||||||
|
done
|
||||||
|
|
||||||
|
log_pass "Verified checksum verify works for Direct I/O reads."
|
@ -46,7 +46,7 @@ verify_runnable "global"
|
|||||||
function cleanup
|
function cleanup
|
||||||
{
|
{
|
||||||
log_must rm -f "$mntpnt/direct-write.iso"
|
log_must rm -f "$mntpnt/direct-write.iso"
|
||||||
check_dio_write_chksum_verify_failures $TESTPOOL "raidz" 0
|
check_dio_chksum_verify_failures $TESTPOOL "raidz" 0 "wr"
|
||||||
}
|
}
|
||||||
|
|
||||||
log_assert "Verify stable pages work for Direct I/O writes."
|
log_assert "Verify stable pages work for Direct I/O writes."
|
||||||
@ -76,8 +76,8 @@ do
|
|||||||
|
|
||||||
# Manipulate the user's buffer while running O_DIRECT write
|
# Manipulate the user's buffer while running O_DIRECT write
|
||||||
# workload with the buffer.
|
# workload with the buffer.
|
||||||
log_must manipulate_user_buffer -o "$mntpnt/direct-write.iso" \
|
log_must manipulate_user_buffer -f "$mntpnt/direct-write.iso" \
|
||||||
-n $NUMBLOCKS -b $BS
|
-n $NUMBLOCKS -b $BS -w
|
||||||
|
|
||||||
# Reading back the contents of the file
|
# Reading back the contents of the file
|
||||||
log_must stride_dd -i $mntpnt/direct-write.iso -o /dev/null \
|
log_must stride_dd -i $mntpnt/direct-write.iso -o /dev/null \
|
||||||
|
@ -91,8 +91,8 @@ log_must set_tunable32 VDEV_DIRECT_WR_VERIFY 0
|
|||||||
log_note "Verifying no panics for Direct I/O writes with compression"
|
log_note "Verifying no panics for Direct I/O writes with compression"
|
||||||
log_must zfs set compression=on $TESTPOOL/$TESTFS
|
log_must zfs set compression=on $TESTPOOL/$TESTFS
|
||||||
prev_dio_wr=$(get_iostats_stat $TESTPOOL direct_write_count)
|
prev_dio_wr=$(get_iostats_stat $TESTPOOL direct_write_count)
|
||||||
log_must manipulate_user_buffer -o "$mntpnt/direct-write.iso" -n $NUMBLOCKS \
|
log_must manipulate_user_buffer -f "$mntpnt/direct-write.iso" -n $NUMBLOCKS \
|
||||||
-b $BS
|
-b $BS -w
|
||||||
curr_dio_wr=$(get_iostats_stat $TESTPOOL direct_write_count)
|
curr_dio_wr=$(get_iostats_stat $TESTPOOL direct_write_count)
|
||||||
total_dio_wr=$((curr_dio_wr - prev_dio_wr))
|
total_dio_wr=$((curr_dio_wr - prev_dio_wr))
|
||||||
|
|
||||||
@ -116,8 +116,8 @@ for i in $(seq 1 $ITERATIONS); do
|
|||||||
$i of $ITERATIONS with zfs_vdev_direct_write_verify=0"
|
$i of $ITERATIONS with zfs_vdev_direct_write_verify=0"
|
||||||
|
|
||||||
prev_dio_wr=$(get_iostats_stat $TESTPOOL direct_write_count)
|
prev_dio_wr=$(get_iostats_stat $TESTPOOL direct_write_count)
|
||||||
log_must manipulate_user_buffer -o "$mntpnt/direct-write.iso" \
|
log_must manipulate_user_buffer -f "$mntpnt/direct-write.iso" \
|
||||||
-n $NUMBLOCKS -b $BS
|
-n $NUMBLOCKS -b $BS -w
|
||||||
|
|
||||||
# Reading file back to verify checksum errors
|
# Reading file back to verify checksum errors
|
||||||
filesize=$(get_file_size "$mntpnt/direct-write.iso")
|
filesize=$(get_file_size "$mntpnt/direct-write.iso")
|
||||||
@ -144,7 +144,7 @@ for i in $(seq 1 $ITERATIONS); do
|
|||||||
fi
|
fi
|
||||||
log_note "Making sure we have no Direct I/O write checksum verifies \
|
log_note "Making sure we have no Direct I/O write checksum verifies \
|
||||||
with ZPool"
|
with ZPool"
|
||||||
check_dio_write_chksum_verify_failures $TESTPOOL "raidz" 0
|
check_dio_chksum_verify_failures $TESTPOOL "raidz" 0 "wr"
|
||||||
|
|
||||||
log_must rm -f "$mntpnt/direct-write.iso"
|
log_must rm -f "$mntpnt/direct-write.iso"
|
||||||
done
|
done
|
||||||
@ -166,8 +166,8 @@ for i in $(seq 1 $ITERATIONS); do
|
|||||||
$ITERATIONS with zfs_vdev_direct_write_verify=1"
|
$ITERATIONS with zfs_vdev_direct_write_verify=1"
|
||||||
|
|
||||||
prev_dio_wr=$(get_iostats_stat $TESTPOOL direct_write_count)
|
prev_dio_wr=$(get_iostats_stat $TESTPOOL direct_write_count)
|
||||||
log_must manipulate_user_buffer -o "$mntpnt/direct-write.iso" \
|
log_must manipulate_user_buffer -f "$mntpnt/direct-write.iso" \
|
||||||
-n $NUMBLOCKS -b $BS -e
|
-n $NUMBLOCKS -b $BS -e -w
|
||||||
|
|
||||||
# Reading file back to verify there no are checksum errors
|
# Reading file back to verify there no are checksum errors
|
||||||
filesize=$(get_file_size "$mntpnt/direct-write.iso")
|
filesize=$(get_file_size "$mntpnt/direct-write.iso")
|
||||||
@ -175,7 +175,7 @@ for i in $(seq 1 $ITERATIONS); do
|
|||||||
log_must stride_dd -i "$mntpnt/direct-write.iso" -o /dev/null -b $BS \
|
log_must stride_dd -i "$mntpnt/direct-write.iso" -o /dev/null -b $BS \
|
||||||
-c $num_blocks
|
-c $num_blocks
|
||||||
|
|
||||||
# Getting new Direct I/O and ARC Write counts.
|
# Getting new Direct I/O write counts.
|
||||||
curr_dio_wr=$(get_iostats_stat $TESTPOOL direct_write_count)
|
curr_dio_wr=$(get_iostats_stat $TESTPOOL direct_write_count)
|
||||||
total_dio_wr=$((curr_dio_wr - prev_dio_wr))
|
total_dio_wr=$((curr_dio_wr - prev_dio_wr))
|
||||||
|
|
||||||
@ -188,7 +188,7 @@ for i in $(seq 1 $ITERATIONS); do
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
log_note "Making sure we have Direct I/O write checksum verifies with ZPool"
|
log_note "Making sure we have Direct I/O write checksum verifies with ZPool"
|
||||||
check_dio_write_chksum_verify_failures "$TESTPOOL" "raidz" 1
|
check_dio_chksum_verify_failures "$TESTPOOL" "raidz" 1 "wr"
|
||||||
done
|
done
|
||||||
|
|
||||||
log_must rm -f "$mntpnt/direct-write.iso"
|
log_must rm -f "$mntpnt/direct-write.iso"
|
||||||
|
@ -48,6 +48,8 @@ function cleanup
|
|||||||
log_must set_tunable32 RESILVER_MIN_TIME_MS $ORIG_RESILVER_MIN_TIME
|
log_must set_tunable32 RESILVER_MIN_TIME_MS $ORIG_RESILVER_MIN_TIME
|
||||||
log_must set_tunable32 SCAN_SUSPEND_PROGRESS \
|
log_must set_tunable32 SCAN_SUSPEND_PROGRESS \
|
||||||
$ORIG_SCAN_SUSPEND_PROGRESS
|
$ORIG_SCAN_SUSPEND_PROGRESS
|
||||||
|
log_must set_tunable32 RESILVER_DEFER_PERCENT \
|
||||||
|
$ORIG_RESILVER_DEFER_PERCENT
|
||||||
log_must set_tunable32 ZEVENT_LEN_MAX $ORIG_ZFS_ZEVENT_LEN_MAX
|
log_must set_tunable32 ZEVENT_LEN_MAX $ORIG_ZFS_ZEVENT_LEN_MAX
|
||||||
log_must zinject -c all
|
log_must zinject -c all
|
||||||
destroy_pool $TESTPOOL1
|
destroy_pool $TESTPOOL1
|
||||||
@ -90,6 +92,7 @@ log_assert "Check for unnecessary resilver restarts"
|
|||||||
|
|
||||||
ORIG_RESILVER_MIN_TIME=$(get_tunable RESILVER_MIN_TIME_MS)
|
ORIG_RESILVER_MIN_TIME=$(get_tunable RESILVER_MIN_TIME_MS)
|
||||||
ORIG_SCAN_SUSPEND_PROGRESS=$(get_tunable SCAN_SUSPEND_PROGRESS)
|
ORIG_SCAN_SUSPEND_PROGRESS=$(get_tunable SCAN_SUSPEND_PROGRESS)
|
||||||
|
ORIG_RESILVER_DEFER_PERCENT=$(get_tunable RESILVER_DEFER_PERCENT)
|
||||||
ORIG_ZFS_ZEVENT_LEN_MAX=$(get_tunable ZEVENT_LEN_MAX)
|
ORIG_ZFS_ZEVENT_LEN_MAX=$(get_tunable ZEVENT_LEN_MAX)
|
||||||
|
|
||||||
set -A RESTARTS -- '1' '2' '2' '2'
|
set -A RESTARTS -- '1' '2' '2' '2'
|
||||||
|
@ -117,5 +117,18 @@ log_must zfs set snapdev=visible $TESTPOOL
|
|||||||
verify_inherited 'snapdev' 'hidden' $SUBZVOL $VOLFS
|
verify_inherited 'snapdev' 'hidden' $SUBZVOL $VOLFS
|
||||||
blockdev_missing $SUBSNAPDEV
|
blockdev_missing $SUBSNAPDEV
|
||||||
blockdev_exists $SNAPDEV
|
blockdev_exists $SNAPDEV
|
||||||
|
log_must zfs destroy $SNAP
|
||||||
|
|
||||||
|
# 4. Verify "rename" is correctly reflected when "snapdev=visible"
|
||||||
|
# 4.1 First create a snapshot and verify the device is present
|
||||||
|
log_must zfs snapshot $SNAP
|
||||||
|
log_must zfs set snapdev=visible $ZVOL
|
||||||
|
blockdev_exists $SNAPDEV
|
||||||
|
# 4.2 rename the snapshot and verify the devices are updated
|
||||||
|
log_must zfs rename $SNAP $SNAP-new
|
||||||
|
blockdev_missing $SNAPDEV
|
||||||
|
blockdev_exists $SNAPDEV-new
|
||||||
|
# 4.3 cleanup
|
||||||
|
log_must zfs destroy $SNAP-new
|
||||||
|
|
||||||
log_pass "ZFS volume property 'snapdev' works as expected"
|
log_pass "ZFS volume property 'snapdev' works as expected"
|
||||||
|
18
udev/vdev_id
18
udev/vdev_id
@ -124,6 +124,7 @@ TOPOLOGY=
|
|||||||
BAY=
|
BAY=
|
||||||
ENCL_ID=""
|
ENCL_ID=""
|
||||||
UNIQ_ENCL_ID=""
|
UNIQ_ENCL_ID=""
|
||||||
|
ZPAD=1
|
||||||
|
|
||||||
usage() {
|
usage() {
|
||||||
cat << EOF
|
cat << EOF
|
||||||
@ -154,7 +155,7 @@ map_slot() {
|
|||||||
if [ -z "$MAPPED_SLOT" ] ; then
|
if [ -z "$MAPPED_SLOT" ] ; then
|
||||||
MAPPED_SLOT=$LINUX_SLOT
|
MAPPED_SLOT=$LINUX_SLOT
|
||||||
fi
|
fi
|
||||||
printf "%d" "${MAPPED_SLOT}"
|
printf "%0${ZPAD}d" "${MAPPED_SLOT}"
|
||||||
}
|
}
|
||||||
|
|
||||||
map_channel() {
|
map_channel() {
|
||||||
@ -430,6 +431,15 @@ sas_handler() {
|
|||||||
d=$(eval echo '$'{$i})
|
d=$(eval echo '$'{$i})
|
||||||
SLOT=$(echo "$d" | sed -e 's/^.*://')
|
SLOT=$(echo "$d" | sed -e 's/^.*://')
|
||||||
;;
|
;;
|
||||||
|
"bay_lun")
|
||||||
|
# Like 'bay' but with the LUN number appened. Added for SAS
|
||||||
|
# multi-actuator HDDs, where one physical drive has multiple
|
||||||
|
# LUNs, thus multiple logical drives share the same bay number
|
||||||
|
i=$((i + 2))
|
||||||
|
d=$(eval echo '$'{$i})
|
||||||
|
LUN="-lun$(echo "$d" | sed -e 's/^.*://')"
|
||||||
|
SLOT=$(cat "$end_device_dir/bay_identifier" 2>/dev/null)
|
||||||
|
;;
|
||||||
"ses")
|
"ses")
|
||||||
# look for this SAS path in all SCSI Enclosure Services
|
# look for this SAS path in all SCSI Enclosure Services
|
||||||
# (SES) enclosures
|
# (SES) enclosures
|
||||||
@ -460,7 +470,7 @@ sas_handler() {
|
|||||||
if [ -z "$CHAN" ] ; then
|
if [ -z "$CHAN" ] ; then
|
||||||
return
|
return
|
||||||
fi
|
fi
|
||||||
echo "${CHAN}"-"${JBOD}"-"${SLOT}${PART}"
|
echo "${CHAN}"-"${JBOD}"-"${SLOT}${LUN}${PART}"
|
||||||
else
|
else
|
||||||
CHAN=$(map_channel "$PCI_ID" "$PORT")
|
CHAN=$(map_channel "$PCI_ID" "$PORT")
|
||||||
SLOT=$(map_slot "$SLOT" "$CHAN")
|
SLOT=$(map_slot "$SLOT" "$CHAN")
|
||||||
@ -468,7 +478,7 @@ sas_handler() {
|
|||||||
if [ -z "$CHAN" ] ; then
|
if [ -z "$CHAN" ] ; then
|
||||||
return
|
return
|
||||||
fi
|
fi
|
||||||
echo "${CHAN}${SLOT}${PART}"
|
echo "${CHAN}${SLOT}${LUN}${PART}"
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -748,6 +758,8 @@ if [ -z "$BAY" ] ; then
|
|||||||
BAY=$(awk '($1 == "slot") {print $2; exit}' "$CONFIG")
|
BAY=$(awk '($1 == "slot") {print $2; exit}' "$CONFIG")
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
ZPAD=$(awk '($1 == "zpad_slot") {print $2; exit}' "$CONFIG")
|
||||||
|
|
||||||
TOPOLOGY=${TOPOLOGY:-sas_direct}
|
TOPOLOGY=${TOPOLOGY:-sas_direct}
|
||||||
|
|
||||||
# Should we create /dev/by-enclosure symlinks?
|
# Should we create /dev/by-enclosure symlinks?
|
||||||
|
Loading…
Reference in New Issue
Block a user