Remove unnecessary CSTYLED escapes on top-level macro invocations

cstyle can handle these cases now, so we don't need to disable it.

Sponsored-by: https://despairlabs.com/sponsor/
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Rob Norris <robn@despairlabs.com>
Closes #16840
This commit is contained in:
Rob Norris 2024-12-04 17:49:09 +11:00 committed by Brian Behlendorf
parent 73a73cba71
commit 0d51852ec7
43 changed files with 5 additions and 196 deletions

View File

@ -31,9 +31,9 @@
#include_next <sys/sdt.h> #include_next <sys/sdt.h>
#ifdef KDTRACE_HOOKS #ifdef KDTRACE_HOOKS
/* BEGIN CSTYLED */
SDT_PROBE_DECLARE(sdt, , , set__error); SDT_PROBE_DECLARE(sdt, , , set__error);
/* BEGIN CSTYLED */
#define SET_ERROR(err) ({ \ #define SET_ERROR(err) ({ \
SDT_PROBE1(sdt, , , set__error, (uintptr_t)err); \ SDT_PROBE1(sdt, , , set__error, (uintptr_t)err); \
err; \ err; \

View File

@ -35,7 +35,6 @@
(void) __atomic_add_fetch(target, 1, __ATOMIC_SEQ_CST); \ (void) __atomic_add_fetch(target, 1, __ATOMIC_SEQ_CST); \
} }
/* BEGIN CSTYLED */
ATOMIC_INC(8, uint8_t) ATOMIC_INC(8, uint8_t)
ATOMIC_INC(16, uint16_t) ATOMIC_INC(16, uint16_t)
ATOMIC_INC(32, uint32_t) ATOMIC_INC(32, uint32_t)
@ -44,7 +43,6 @@ ATOMIC_INC(uchar, uchar_t)
ATOMIC_INC(ushort, ushort_t) ATOMIC_INC(ushort, ushort_t)
ATOMIC_INC(uint, uint_t) ATOMIC_INC(uint, uint_t)
ATOMIC_INC(ulong, ulong_t) ATOMIC_INC(ulong, ulong_t)
/* END CSTYLED */
#define ATOMIC_DEC(name, type) \ #define ATOMIC_DEC(name, type) \
@ -53,7 +51,6 @@ ATOMIC_INC(ulong, ulong_t)
(void) __atomic_sub_fetch(target, 1, __ATOMIC_SEQ_CST); \ (void) __atomic_sub_fetch(target, 1, __ATOMIC_SEQ_CST); \
} }
/* BEGIN CSTYLED */
ATOMIC_DEC(8, uint8_t) ATOMIC_DEC(8, uint8_t)
ATOMIC_DEC(16, uint16_t) ATOMIC_DEC(16, uint16_t)
ATOMIC_DEC(32, uint32_t) ATOMIC_DEC(32, uint32_t)
@ -62,7 +59,6 @@ ATOMIC_DEC(uchar, uchar_t)
ATOMIC_DEC(ushort, ushort_t) ATOMIC_DEC(ushort, ushort_t)
ATOMIC_DEC(uint, uint_t) ATOMIC_DEC(uint, uint_t)
ATOMIC_DEC(ulong, ulong_t) ATOMIC_DEC(ulong, ulong_t)
/* END CSTYLED */
#define ATOMIC_ADD(name, type1, type2) \ #define ATOMIC_ADD(name, type1, type2) \
@ -77,7 +73,6 @@ atomic_add_ptr(volatile void *target, ssize_t bits)
(void) __atomic_add_fetch((void **)target, bits, __ATOMIC_SEQ_CST); (void) __atomic_add_fetch((void **)target, bits, __ATOMIC_SEQ_CST);
} }
/* BEGIN CSTYLED */
ATOMIC_ADD(8, uint8_t, int8_t) ATOMIC_ADD(8, uint8_t, int8_t)
ATOMIC_ADD(16, uint16_t, int16_t) ATOMIC_ADD(16, uint16_t, int16_t)
ATOMIC_ADD(32, uint32_t, int32_t) ATOMIC_ADD(32, uint32_t, int32_t)
@ -86,7 +81,6 @@ ATOMIC_ADD(char, uchar_t, signed char)
ATOMIC_ADD(short, ushort_t, short) ATOMIC_ADD(short, ushort_t, short)
ATOMIC_ADD(int, uint_t, int) ATOMIC_ADD(int, uint_t, int)
ATOMIC_ADD(long, ulong_t, long) ATOMIC_ADD(long, ulong_t, long)
/* END CSTYLED */
#define ATOMIC_SUB(name, type1, type2) \ #define ATOMIC_SUB(name, type1, type2) \
@ -101,7 +95,6 @@ atomic_sub_ptr(volatile void *target, ssize_t bits)
(void) __atomic_sub_fetch((void **)target, bits, __ATOMIC_SEQ_CST); (void) __atomic_sub_fetch((void **)target, bits, __ATOMIC_SEQ_CST);
} }
/* BEGIN CSTYLED */
ATOMIC_SUB(8, uint8_t, int8_t) ATOMIC_SUB(8, uint8_t, int8_t)
ATOMIC_SUB(16, uint16_t, int16_t) ATOMIC_SUB(16, uint16_t, int16_t)
ATOMIC_SUB(32, uint32_t, int32_t) ATOMIC_SUB(32, uint32_t, int32_t)
@ -110,7 +103,6 @@ ATOMIC_SUB(char, uchar_t, signed char)
ATOMIC_SUB(short, ushort_t, short) ATOMIC_SUB(short, ushort_t, short)
ATOMIC_SUB(int, uint_t, int) ATOMIC_SUB(int, uint_t, int)
ATOMIC_SUB(long, ulong_t, long) ATOMIC_SUB(long, ulong_t, long)
/* END CSTYLED */
#define ATOMIC_OR(name, type) \ #define ATOMIC_OR(name, type) \
@ -119,7 +111,6 @@ ATOMIC_SUB(long, ulong_t, long)
(void) __atomic_or_fetch(target, bits, __ATOMIC_SEQ_CST); \ (void) __atomic_or_fetch(target, bits, __ATOMIC_SEQ_CST); \
} }
/* BEGIN CSTYLED */
ATOMIC_OR(8, uint8_t) ATOMIC_OR(8, uint8_t)
ATOMIC_OR(16, uint16_t) ATOMIC_OR(16, uint16_t)
ATOMIC_OR(32, uint32_t) ATOMIC_OR(32, uint32_t)
@ -128,7 +119,6 @@ ATOMIC_OR(uchar, uchar_t)
ATOMIC_OR(ushort, ushort_t) ATOMIC_OR(ushort, ushort_t)
ATOMIC_OR(uint, uint_t) ATOMIC_OR(uint, uint_t)
ATOMIC_OR(ulong, ulong_t) ATOMIC_OR(ulong, ulong_t)
/* END CSTYLED */
#define ATOMIC_AND(name, type) \ #define ATOMIC_AND(name, type) \
@ -137,7 +127,6 @@ ATOMIC_OR(ulong, ulong_t)
(void) __atomic_and_fetch(target, bits, __ATOMIC_SEQ_CST); \ (void) __atomic_and_fetch(target, bits, __ATOMIC_SEQ_CST); \
} }
/* BEGIN CSTYLED */
ATOMIC_AND(8, uint8_t) ATOMIC_AND(8, uint8_t)
ATOMIC_AND(16, uint16_t) ATOMIC_AND(16, uint16_t)
ATOMIC_AND(32, uint32_t) ATOMIC_AND(32, uint32_t)
@ -146,7 +135,6 @@ ATOMIC_AND(uchar, uchar_t)
ATOMIC_AND(ushort, ushort_t) ATOMIC_AND(ushort, ushort_t)
ATOMIC_AND(uint, uint_t) ATOMIC_AND(uint, uint_t)
ATOMIC_AND(ulong, ulong_t) ATOMIC_AND(ulong, ulong_t)
/* END CSTYLED */
/* /*
@ -159,7 +147,6 @@ ATOMIC_AND(ulong, ulong_t)
return (__atomic_add_fetch(target, 1, __ATOMIC_SEQ_CST)); \ return (__atomic_add_fetch(target, 1, __ATOMIC_SEQ_CST)); \
} }
/* BEGIN CSTYLED */
ATOMIC_INC_NV(8, uint8_t) ATOMIC_INC_NV(8, uint8_t)
ATOMIC_INC_NV(16, uint16_t) ATOMIC_INC_NV(16, uint16_t)
ATOMIC_INC_NV(32, uint32_t) ATOMIC_INC_NV(32, uint32_t)
@ -168,7 +155,6 @@ ATOMIC_INC_NV(uchar, uchar_t)
ATOMIC_INC_NV(ushort, ushort_t) ATOMIC_INC_NV(ushort, ushort_t)
ATOMIC_INC_NV(uint, uint_t) ATOMIC_INC_NV(uint, uint_t)
ATOMIC_INC_NV(ulong, ulong_t) ATOMIC_INC_NV(ulong, ulong_t)
/* END CSTYLED */
#define ATOMIC_DEC_NV(name, type) \ #define ATOMIC_DEC_NV(name, type) \
@ -177,7 +163,6 @@ ATOMIC_INC_NV(ulong, ulong_t)
return (__atomic_sub_fetch(target, 1, __ATOMIC_SEQ_CST)); \ return (__atomic_sub_fetch(target, 1, __ATOMIC_SEQ_CST)); \
} }
/* BEGIN CSTYLED */
ATOMIC_DEC_NV(8, uint8_t) ATOMIC_DEC_NV(8, uint8_t)
ATOMIC_DEC_NV(16, uint16_t) ATOMIC_DEC_NV(16, uint16_t)
ATOMIC_DEC_NV(32, uint32_t) ATOMIC_DEC_NV(32, uint32_t)
@ -186,7 +171,6 @@ ATOMIC_DEC_NV(uchar, uchar_t)
ATOMIC_DEC_NV(ushort, ushort_t) ATOMIC_DEC_NV(ushort, ushort_t)
ATOMIC_DEC_NV(uint, uint_t) ATOMIC_DEC_NV(uint, uint_t)
ATOMIC_DEC_NV(ulong, ulong_t) ATOMIC_DEC_NV(ulong, ulong_t)
/* END CSTYLED */
#define ATOMIC_ADD_NV(name, type1, type2) \ #define ATOMIC_ADD_NV(name, type1, type2) \
@ -201,7 +185,6 @@ atomic_add_ptr_nv(volatile void *target, ssize_t bits)
return (__atomic_add_fetch((void **)target, bits, __ATOMIC_SEQ_CST)); return (__atomic_add_fetch((void **)target, bits, __ATOMIC_SEQ_CST));
} }
/* BEGIN CSTYLED */
ATOMIC_ADD_NV(8, uint8_t, int8_t) ATOMIC_ADD_NV(8, uint8_t, int8_t)
ATOMIC_ADD_NV(16, uint16_t, int16_t) ATOMIC_ADD_NV(16, uint16_t, int16_t)
ATOMIC_ADD_NV(32, uint32_t, int32_t) ATOMIC_ADD_NV(32, uint32_t, int32_t)
@ -210,7 +193,6 @@ ATOMIC_ADD_NV(char, uchar_t, signed char)
ATOMIC_ADD_NV(short, ushort_t, short) ATOMIC_ADD_NV(short, ushort_t, short)
ATOMIC_ADD_NV(int, uint_t, int) ATOMIC_ADD_NV(int, uint_t, int)
ATOMIC_ADD_NV(long, ulong_t, long) ATOMIC_ADD_NV(long, ulong_t, long)
/* END CSTYLED */
#define ATOMIC_SUB_NV(name, type1, type2) \ #define ATOMIC_SUB_NV(name, type1, type2) \
@ -225,7 +207,6 @@ atomic_sub_ptr_nv(volatile void *target, ssize_t bits)
return (__atomic_sub_fetch((void **)target, bits, __ATOMIC_SEQ_CST)); return (__atomic_sub_fetch((void **)target, bits, __ATOMIC_SEQ_CST));
} }
/* BEGIN CSTYLED */
ATOMIC_SUB_NV(8, uint8_t, int8_t) ATOMIC_SUB_NV(8, uint8_t, int8_t)
ATOMIC_SUB_NV(char, uchar_t, signed char) ATOMIC_SUB_NV(char, uchar_t, signed char)
ATOMIC_SUB_NV(16, uint16_t, int16_t) ATOMIC_SUB_NV(16, uint16_t, int16_t)
@ -234,7 +215,6 @@ ATOMIC_SUB_NV(32, uint32_t, int32_t)
ATOMIC_SUB_NV(int, uint_t, int) ATOMIC_SUB_NV(int, uint_t, int)
ATOMIC_SUB_NV(long, ulong_t, long) ATOMIC_SUB_NV(long, ulong_t, long)
ATOMIC_SUB_NV(64, uint64_t, int64_t) ATOMIC_SUB_NV(64, uint64_t, int64_t)
/* END CSTYLED */
#define ATOMIC_OR_NV(name, type) \ #define ATOMIC_OR_NV(name, type) \
@ -243,7 +223,6 @@ ATOMIC_SUB_NV(64, uint64_t, int64_t)
return (__atomic_or_fetch(target, bits, __ATOMIC_SEQ_CST)); \ return (__atomic_or_fetch(target, bits, __ATOMIC_SEQ_CST)); \
} }
/* BEGIN CSTYLED */
ATOMIC_OR_NV(8, uint8_t) ATOMIC_OR_NV(8, uint8_t)
ATOMIC_OR_NV(16, uint16_t) ATOMIC_OR_NV(16, uint16_t)
ATOMIC_OR_NV(32, uint32_t) ATOMIC_OR_NV(32, uint32_t)
@ -252,7 +231,6 @@ ATOMIC_OR_NV(uchar, uchar_t)
ATOMIC_OR_NV(ushort, ushort_t) ATOMIC_OR_NV(ushort, ushort_t)
ATOMIC_OR_NV(uint, uint_t) ATOMIC_OR_NV(uint, uint_t)
ATOMIC_OR_NV(ulong, ulong_t) ATOMIC_OR_NV(ulong, ulong_t)
/* END CSTYLED */
#define ATOMIC_AND_NV(name, type) \ #define ATOMIC_AND_NV(name, type) \
@ -261,7 +239,6 @@ ATOMIC_OR_NV(ulong, ulong_t)
return (__atomic_and_fetch(target, bits, __ATOMIC_SEQ_CST)); \ return (__atomic_and_fetch(target, bits, __ATOMIC_SEQ_CST)); \
} }
/* BEGIN CSTYLED */
ATOMIC_AND_NV(8, uint8_t) ATOMIC_AND_NV(8, uint8_t)
ATOMIC_AND_NV(16, uint16_t) ATOMIC_AND_NV(16, uint16_t)
ATOMIC_AND_NV(32, uint32_t) ATOMIC_AND_NV(32, uint32_t)
@ -270,7 +247,6 @@ ATOMIC_AND_NV(uchar, uchar_t)
ATOMIC_AND_NV(ushort, ushort_t) ATOMIC_AND_NV(ushort, ushort_t)
ATOMIC_AND_NV(uint, uint_t) ATOMIC_AND_NV(uint, uint_t)
ATOMIC_AND_NV(ulong, ulong_t) ATOMIC_AND_NV(ulong, ulong_t)
/* END CSTYLED */
/* /*
@ -300,7 +276,6 @@ atomic_cas_ptr(volatile void *target, void *exp, void *des)
return (exp); return (exp);
} }
/* BEGIN CSTYLED */
ATOMIC_CAS(8, uint8_t) ATOMIC_CAS(8, uint8_t)
ATOMIC_CAS(16, uint16_t) ATOMIC_CAS(16, uint16_t)
ATOMIC_CAS(32, uint32_t) ATOMIC_CAS(32, uint32_t)
@ -309,7 +284,6 @@ ATOMIC_CAS(uchar, uchar_t)
ATOMIC_CAS(ushort, ushort_t) ATOMIC_CAS(ushort, ushort_t)
ATOMIC_CAS(uint, uint_t) ATOMIC_CAS(uint, uint_t)
ATOMIC_CAS(ulong, ulong_t) ATOMIC_CAS(ulong, ulong_t)
/* END CSTYLED */
/* /*
@ -322,7 +296,6 @@ ATOMIC_CAS(ulong, ulong_t)
return (__atomic_exchange_n(target, bits, __ATOMIC_SEQ_CST)); \ return (__atomic_exchange_n(target, bits, __ATOMIC_SEQ_CST)); \
} }
/* BEGIN CSTYLED */
ATOMIC_SWAP(8, uint8_t) ATOMIC_SWAP(8, uint8_t)
ATOMIC_SWAP(16, uint16_t) ATOMIC_SWAP(16, uint16_t)
ATOMIC_SWAP(32, uint32_t) ATOMIC_SWAP(32, uint32_t)
@ -331,7 +304,6 @@ ATOMIC_SWAP(uchar, uchar_t)
ATOMIC_SWAP(ushort, ushort_t) ATOMIC_SWAP(ushort, ushort_t)
ATOMIC_SWAP(uint, uint_t) ATOMIC_SWAP(uint, uint_t)
ATOMIC_SWAP(ulong, ulong_t) ATOMIC_SWAP(ulong, ulong_t)
/* END CSTYLED */
void * void *
atomic_swap_ptr(volatile void *target, void *bits) atomic_swap_ptr(volatile void *target, void *bits)

View File

@ -3281,7 +3281,6 @@ nvs_xdr_nvp_##type(XDR *xdrs, void *ptr, ...) \
#endif #endif
/* BEGIN CSTYLED */
NVS_BUILD_XDRPROC_T(char); NVS_BUILD_XDRPROC_T(char);
NVS_BUILD_XDRPROC_T(short); NVS_BUILD_XDRPROC_T(short);
NVS_BUILD_XDRPROC_T(u_short); NVS_BUILD_XDRPROC_T(u_short);
@ -3289,7 +3288,6 @@ NVS_BUILD_XDRPROC_T(int);
NVS_BUILD_XDRPROC_T(u_int); NVS_BUILD_XDRPROC_T(u_int);
NVS_BUILD_XDRPROC_T(longlong_t); NVS_BUILD_XDRPROC_T(longlong_t);
NVS_BUILD_XDRPROC_T(u_longlong_t); NVS_BUILD_XDRPROC_T(u_longlong_t);
/* END CSTYLED */
/* /*
* The format of xdr encoded nvpair is: * The format of xdr encoded nvpair is:

View File

@ -31,5 +31,4 @@
#include <sys/queue.h> #include <sys/queue.h>
#include <sys/sdt.h> #include <sys/sdt.h>
/* CSTYLED */
SDT_PROBE_DEFINE1(sdt, , , set__error, "int"); SDT_PROBE_DEFINE1(sdt, , , set__error, "int");

View File

@ -187,12 +187,10 @@ param_set_arc_max(SYSCTL_HANDLER_ARGS)
return (0); return (0);
} }
/* BEGIN CSTYLED */
SYSCTL_PROC(_vfs_zfs, OID_AUTO, arc_max, SYSCTL_PROC(_vfs_zfs, OID_AUTO, arc_max,
CTLTYPE_ULONG | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, CTLTYPE_ULONG | CTLFLAG_RWTUN | CTLFLAG_MPSAFE,
NULL, 0, param_set_arc_max, "LU", NULL, 0, param_set_arc_max, "LU",
"Maximum ARC size in bytes (LEGACY)"); "Maximum ARC size in bytes (LEGACY)");
/* END CSTYLED */
int int
param_set_arc_min(SYSCTL_HANDLER_ARGS) param_set_arc_min(SYSCTL_HANDLER_ARGS)
@ -218,12 +216,10 @@ param_set_arc_min(SYSCTL_HANDLER_ARGS)
return (0); return (0);
} }
/* BEGIN CSTYLED */
SYSCTL_PROC(_vfs_zfs, OID_AUTO, arc_min, SYSCTL_PROC(_vfs_zfs, OID_AUTO, arc_min,
CTLTYPE_ULONG | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, CTLTYPE_ULONG | CTLFLAG_RWTUN | CTLFLAG_MPSAFE,
NULL, 0, param_set_arc_min, "LU", NULL, 0, param_set_arc_min, "LU",
"Minimum ARC size in bytes (LEGACY)"); "Minimum ARC size in bytes (LEGACY)");
/* END CSTYLED */
extern uint_t zfs_arc_free_target; extern uint_t zfs_arc_free_target;
@ -252,13 +248,11 @@ param_set_arc_free_target(SYSCTL_HANDLER_ARGS)
* NOTE: This sysctl is CTLFLAG_RW not CTLFLAG_RWTUN due to its dependency on * NOTE: This sysctl is CTLFLAG_RW not CTLFLAG_RWTUN due to its dependency on
* pagedaemon initialization. * pagedaemon initialization.
*/ */
/* BEGIN CSTYLED */
SYSCTL_PROC(_vfs_zfs, OID_AUTO, arc_free_target, SYSCTL_PROC(_vfs_zfs, OID_AUTO, arc_free_target,
CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE,
NULL, 0, param_set_arc_free_target, "IU", NULL, 0, param_set_arc_free_target, "IU",
"Desired number of free pages below which ARC triggers reclaim" "Desired number of free pages below which ARC triggers reclaim"
" (LEGACY)"); " (LEGACY)");
/* END CSTYLED */
int int
param_set_arc_no_grow_shift(SYSCTL_HANDLER_ARGS) param_set_arc_no_grow_shift(SYSCTL_HANDLER_ARGS)
@ -278,84 +272,64 @@ param_set_arc_no_grow_shift(SYSCTL_HANDLER_ARGS)
return (0); return (0);
} }
/* BEGIN CSTYLED */
SYSCTL_PROC(_vfs_zfs, OID_AUTO, arc_no_grow_shift, SYSCTL_PROC(_vfs_zfs, OID_AUTO, arc_no_grow_shift,
CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_MPSAFE,
NULL, 0, param_set_arc_no_grow_shift, "I", NULL, 0, param_set_arc_no_grow_shift, "I",
"log2(fraction of ARC which must be free to allow growing) (LEGACY)"); "log2(fraction of ARC which must be free to allow growing) (LEGACY)");
/* END CSTYLED */
extern uint64_t l2arc_write_max; extern uint64_t l2arc_write_max;
/* BEGIN CSTYLED */
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_write_max, SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_write_max,
CTLFLAG_RWTUN, &l2arc_write_max, 0, CTLFLAG_RWTUN, &l2arc_write_max, 0,
"Max write bytes per interval (LEGACY)"); "Max write bytes per interval (LEGACY)");
/* END CSTYLED */
extern uint64_t l2arc_write_boost; extern uint64_t l2arc_write_boost;
/* BEGIN CSTYLED */
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_write_boost, SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_write_boost,
CTLFLAG_RWTUN, &l2arc_write_boost, 0, CTLFLAG_RWTUN, &l2arc_write_boost, 0,
"Extra write bytes during device warmup (LEGACY)"); "Extra write bytes during device warmup (LEGACY)");
/* END CSTYLED */
extern uint64_t l2arc_headroom; extern uint64_t l2arc_headroom;
/* BEGIN CSTYLED */
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_headroom, SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_headroom,
CTLFLAG_RWTUN, &l2arc_headroom, 0, CTLFLAG_RWTUN, &l2arc_headroom, 0,
"Number of max device writes to precache (LEGACY)"); "Number of max device writes to precache (LEGACY)");
/* END CSTYLED */
extern uint64_t l2arc_headroom_boost; extern uint64_t l2arc_headroom_boost;
/* BEGIN CSTYLED */
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_headroom_boost, SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_headroom_boost,
CTLFLAG_RWTUN, &l2arc_headroom_boost, 0, CTLFLAG_RWTUN, &l2arc_headroom_boost, 0,
"Compressed l2arc_headroom multiplier (LEGACY)"); "Compressed l2arc_headroom multiplier (LEGACY)");
/* END CSTYLED */
extern uint64_t l2arc_feed_secs; extern uint64_t l2arc_feed_secs;
/* BEGIN CSTYLED */
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_feed_secs, SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_feed_secs,
CTLFLAG_RWTUN, &l2arc_feed_secs, 0, CTLFLAG_RWTUN, &l2arc_feed_secs, 0,
"Seconds between L2ARC writing (LEGACY)"); "Seconds between L2ARC writing (LEGACY)");
/* END CSTYLED */
extern uint64_t l2arc_feed_min_ms; extern uint64_t l2arc_feed_min_ms;
/* BEGIN CSTYLED */
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_feed_min_ms, SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_feed_min_ms,
CTLFLAG_RWTUN, &l2arc_feed_min_ms, 0, CTLFLAG_RWTUN, &l2arc_feed_min_ms, 0,
"Min feed interval in milliseconds (LEGACY)"); "Min feed interval in milliseconds (LEGACY)");
/* END CSTYLED */
extern int l2arc_noprefetch; extern int l2arc_noprefetch;
/* BEGIN CSTYLED */
SYSCTL_INT(_vfs_zfs, OID_AUTO, l2arc_noprefetch, SYSCTL_INT(_vfs_zfs, OID_AUTO, l2arc_noprefetch,
CTLFLAG_RWTUN, &l2arc_noprefetch, 0, CTLFLAG_RWTUN, &l2arc_noprefetch, 0,
"Skip caching prefetched buffers (LEGACY)"); "Skip caching prefetched buffers (LEGACY)");
/* END CSTYLED */
extern int l2arc_feed_again; extern int l2arc_feed_again;
/* BEGIN CSTYLED */
SYSCTL_INT(_vfs_zfs, OID_AUTO, l2arc_feed_again, SYSCTL_INT(_vfs_zfs, OID_AUTO, l2arc_feed_again,
CTLFLAG_RWTUN, &l2arc_feed_again, 0, CTLFLAG_RWTUN, &l2arc_feed_again, 0,
"Turbo L2ARC warmup (LEGACY)"); "Turbo L2ARC warmup (LEGACY)");
/* END CSTYLED */
extern int l2arc_norw; extern int l2arc_norw;
/* BEGIN CSTYLED */
SYSCTL_INT(_vfs_zfs, OID_AUTO, l2arc_norw, SYSCTL_INT(_vfs_zfs, OID_AUTO, l2arc_norw,
CTLFLAG_RWTUN, &l2arc_norw, 0, CTLFLAG_RWTUN, &l2arc_norw, 0,
"No reads during writes (LEGACY)"); "No reads during writes (LEGACY)");
/* END CSTYLED */
static int static int
param_get_arc_state_size(SYSCTL_HANDLER_ARGS) param_get_arc_state_size(SYSCTL_HANDLER_ARGS)
@ -370,7 +344,6 @@ param_get_arc_state_size(SYSCTL_HANDLER_ARGS)
extern arc_state_t ARC_anon; extern arc_state_t ARC_anon;
/* BEGIN CSTYLED */
SYSCTL_PROC(_vfs_zfs, OID_AUTO, anon_size, SYSCTL_PROC(_vfs_zfs, OID_AUTO, anon_size,
CTLTYPE_S64 | CTLFLAG_RD | CTLFLAG_MPSAFE, CTLTYPE_S64 | CTLFLAG_RD | CTLFLAG_MPSAFE,
&ARC_anon, 0, param_get_arc_state_size, "Q", &ARC_anon, 0, param_get_arc_state_size, "Q",
@ -381,11 +354,9 @@ SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, anon_metadata_esize, CTLFLAG_RD,
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, anon_data_esize, CTLFLAG_RD, SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, anon_data_esize, CTLFLAG_RD,
&ARC_anon.arcs_esize[ARC_BUFC_DATA].rc_count, 0, &ARC_anon.arcs_esize[ARC_BUFC_DATA].rc_count, 0,
"size of evictable data in anonymous state"); "size of evictable data in anonymous state");
/* END CSTYLED */
extern arc_state_t ARC_mru; extern arc_state_t ARC_mru;
/* BEGIN CSTYLED */
SYSCTL_PROC(_vfs_zfs, OID_AUTO, mru_size, SYSCTL_PROC(_vfs_zfs, OID_AUTO, mru_size,
CTLTYPE_S64 | CTLFLAG_RD | CTLFLAG_MPSAFE, CTLTYPE_S64 | CTLFLAG_RD | CTLFLAG_MPSAFE,
&ARC_mru, 0, param_get_arc_state_size, "Q", &ARC_mru, 0, param_get_arc_state_size, "Q",
@ -396,11 +367,9 @@ SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_metadata_esize, CTLFLAG_RD,
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_data_esize, CTLFLAG_RD, SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_data_esize, CTLFLAG_RD,
&ARC_mru.arcs_esize[ARC_BUFC_DATA].rc_count, 0, &ARC_mru.arcs_esize[ARC_BUFC_DATA].rc_count, 0,
"size of evictable data in mru state"); "size of evictable data in mru state");
/* END CSTYLED */
extern arc_state_t ARC_mru_ghost; extern arc_state_t ARC_mru_ghost;
/* BEGIN CSTYLED */
SYSCTL_PROC(_vfs_zfs, OID_AUTO, mru_ghost_size, SYSCTL_PROC(_vfs_zfs, OID_AUTO, mru_ghost_size,
CTLTYPE_S64 | CTLFLAG_RD | CTLFLAG_MPSAFE, CTLTYPE_S64 | CTLFLAG_RD | CTLFLAG_MPSAFE,
&ARC_mru_ghost, 0, param_get_arc_state_size, "Q", &ARC_mru_ghost, 0, param_get_arc_state_size, "Q",
@ -411,11 +380,9 @@ SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_ghost_metadata_esize, CTLFLAG_RD,
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_ghost_data_esize, CTLFLAG_RD, SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_ghost_data_esize, CTLFLAG_RD,
&ARC_mru_ghost.arcs_esize[ARC_BUFC_DATA].rc_count, 0, &ARC_mru_ghost.arcs_esize[ARC_BUFC_DATA].rc_count, 0,
"size of evictable data in mru ghost state"); "size of evictable data in mru ghost state");
/* END CSTYLED */
extern arc_state_t ARC_mfu; extern arc_state_t ARC_mfu;
/* BEGIN CSTYLED */
SYSCTL_PROC(_vfs_zfs, OID_AUTO, mfu_size, SYSCTL_PROC(_vfs_zfs, OID_AUTO, mfu_size,
CTLTYPE_S64 | CTLFLAG_RD | CTLFLAG_MPSAFE, CTLTYPE_S64 | CTLFLAG_RD | CTLFLAG_MPSAFE,
&ARC_mfu, 0, param_get_arc_state_size, "Q", &ARC_mfu, 0, param_get_arc_state_size, "Q",
@ -426,11 +393,9 @@ SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_metadata_esize, CTLFLAG_RD,
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_data_esize, CTLFLAG_RD, SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_data_esize, CTLFLAG_RD,
&ARC_mfu.arcs_esize[ARC_BUFC_DATA].rc_count, 0, &ARC_mfu.arcs_esize[ARC_BUFC_DATA].rc_count, 0,
"size of evictable data in mfu state"); "size of evictable data in mfu state");
/* END CSTYLED */
extern arc_state_t ARC_mfu_ghost; extern arc_state_t ARC_mfu_ghost;
/* BEGIN CSTYLED */
SYSCTL_PROC(_vfs_zfs, OID_AUTO, mfu_ghost_size, SYSCTL_PROC(_vfs_zfs, OID_AUTO, mfu_ghost_size,
CTLTYPE_S64 | CTLFLAG_RD | CTLFLAG_MPSAFE, CTLTYPE_S64 | CTLFLAG_RD | CTLFLAG_MPSAFE,
&ARC_mfu_ghost, 0, param_get_arc_state_size, "Q", &ARC_mfu_ghost, 0, param_get_arc_state_size, "Q",
@ -441,11 +406,9 @@ SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_ghost_metadata_esize, CTLFLAG_RD,
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_ghost_data_esize, CTLFLAG_RD, SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_ghost_data_esize, CTLFLAG_RD,
&ARC_mfu_ghost.arcs_esize[ARC_BUFC_DATA].rc_count, 0, &ARC_mfu_ghost.arcs_esize[ARC_BUFC_DATA].rc_count, 0,
"size of evictable data in mfu ghost state"); "size of evictable data in mfu ghost state");
/* END CSTYLED */
extern arc_state_t ARC_uncached; extern arc_state_t ARC_uncached;
/* BEGIN CSTYLED */
SYSCTL_PROC(_vfs_zfs, OID_AUTO, uncached_size, SYSCTL_PROC(_vfs_zfs, OID_AUTO, uncached_size,
CTLTYPE_S64 | CTLFLAG_RD | CTLFLAG_MPSAFE, CTLTYPE_S64 | CTLFLAG_RD | CTLFLAG_MPSAFE,
&ARC_uncached, 0, param_get_arc_state_size, "Q", &ARC_uncached, 0, param_get_arc_state_size, "Q",
@ -456,16 +419,13 @@ SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, uncached_metadata_esize, CTLFLAG_RD,
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, uncached_data_esize, CTLFLAG_RD, SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, uncached_data_esize, CTLFLAG_RD,
&ARC_uncached.arcs_esize[ARC_BUFC_DATA].rc_count, 0, &ARC_uncached.arcs_esize[ARC_BUFC_DATA].rc_count, 0,
"size of evictable data in uncached state"); "size of evictable data in uncached state");
/* END CSTYLED */
extern arc_state_t ARC_l2c_only; extern arc_state_t ARC_l2c_only;
/* BEGIN CSTYLED */
SYSCTL_PROC(_vfs_zfs, OID_AUTO, l2c_only_size, SYSCTL_PROC(_vfs_zfs, OID_AUTO, l2c_only_size,
CTLTYPE_S64 | CTLFLAG_RD | CTLFLAG_MPSAFE, CTLTYPE_S64 | CTLFLAG_RD | CTLFLAG_MPSAFE,
&ARC_l2c_only, 0, param_get_arc_state_size, "Q", &ARC_l2c_only, 0, param_get_arc_state_size, "Q",
"size of l2c_only state"); "size of l2c_only state");
/* END CSTYLED */
/* dbuf.c */ /* dbuf.c */
@ -477,19 +437,15 @@ SYSCTL_NODE(_vfs_zfs, OID_AUTO, zfetch, CTLFLAG_RW, 0, "ZFS ZFETCH (LEGACY)");
extern uint32_t zfetch_max_distance; extern uint32_t zfetch_max_distance;
/* BEGIN CSTYLED */
SYSCTL_UINT(_vfs_zfs_zfetch, OID_AUTO, max_distance, SYSCTL_UINT(_vfs_zfs_zfetch, OID_AUTO, max_distance,
CTLFLAG_RWTUN, &zfetch_max_distance, 0, CTLFLAG_RWTUN, &zfetch_max_distance, 0,
"Max bytes to prefetch per stream (LEGACY)"); "Max bytes to prefetch per stream (LEGACY)");
/* END CSTYLED */
extern uint32_t zfetch_max_idistance; extern uint32_t zfetch_max_idistance;
/* BEGIN CSTYLED */
SYSCTL_UINT(_vfs_zfs_zfetch, OID_AUTO, max_idistance, SYSCTL_UINT(_vfs_zfs_zfetch, OID_AUTO, max_idistance,
CTLFLAG_RWTUN, &zfetch_max_idistance, 0, CTLFLAG_RWTUN, &zfetch_max_idistance, 0,
"Max bytes to prefetch indirects for per stream (LEGACY)"); "Max bytes to prefetch indirects for per stream (LEGACY)");
/* END CSTYLED */
/* dsl_pool.c */ /* dsl_pool.c */
@ -527,12 +483,10 @@ param_set_active_allocator(SYSCTL_HANDLER_ARGS)
*/ */
extern int zfs_metaslab_sm_blksz_no_log; extern int zfs_metaslab_sm_blksz_no_log;
/* BEGIN CSTYLED */
SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, sm_blksz_no_log, SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, sm_blksz_no_log,
CTLFLAG_RDTUN, &zfs_metaslab_sm_blksz_no_log, 0, CTLFLAG_RDTUN, &zfs_metaslab_sm_blksz_no_log, 0,
"Block size for space map in pools with log space map disabled. " "Block size for space map in pools with log space map disabled. "
"Power of 2 greater than 4096."); "Power of 2 greater than 4096.");
/* END CSTYLED */
/* /*
* When the log space map feature is enabled, we accumulate a lot of * When the log space map feature is enabled, we accumulate a lot of
@ -541,12 +495,10 @@ SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, sm_blksz_no_log,
*/ */
extern int zfs_metaslab_sm_blksz_with_log; extern int zfs_metaslab_sm_blksz_with_log;
/* BEGIN CSTYLED */
SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, sm_blksz_with_log, SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, sm_blksz_with_log,
CTLFLAG_RDTUN, &zfs_metaslab_sm_blksz_with_log, 0, CTLFLAG_RDTUN, &zfs_metaslab_sm_blksz_with_log, 0,
"Block size for space map in pools with log space map enabled. " "Block size for space map in pools with log space map enabled. "
"Power of 2 greater than 4096."); "Power of 2 greater than 4096.");
/* END CSTYLED */
/* /*
* The in-core space map representation is more compact than its on-disk form. * The in-core space map representation is more compact than its on-disk form.
@ -556,29 +508,23 @@ SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, sm_blksz_with_log,
*/ */
extern uint_t zfs_condense_pct; extern uint_t zfs_condense_pct;
/* BEGIN CSTYLED */
SYSCTL_UINT(_vfs_zfs, OID_AUTO, condense_pct, SYSCTL_UINT(_vfs_zfs, OID_AUTO, condense_pct,
CTLFLAG_RWTUN, &zfs_condense_pct, 0, CTLFLAG_RWTUN, &zfs_condense_pct, 0,
"Condense on-disk spacemap when it is more than this many percents" "Condense on-disk spacemap when it is more than this many percents"
" of in-memory counterpart"); " of in-memory counterpart");
/* END CSTYLED */
extern uint_t zfs_remove_max_segment; extern uint_t zfs_remove_max_segment;
/* BEGIN CSTYLED */
SYSCTL_UINT(_vfs_zfs, OID_AUTO, remove_max_segment, SYSCTL_UINT(_vfs_zfs, OID_AUTO, remove_max_segment,
CTLFLAG_RWTUN, &zfs_remove_max_segment, 0, CTLFLAG_RWTUN, &zfs_remove_max_segment, 0,
"Largest contiguous segment ZFS will attempt to allocate when removing" "Largest contiguous segment ZFS will attempt to allocate when removing"
" a device"); " a device");
/* END CSTYLED */
extern int zfs_removal_suspend_progress; extern int zfs_removal_suspend_progress;
/* BEGIN CSTYLED */
SYSCTL_INT(_vfs_zfs, OID_AUTO, removal_suspend_progress, SYSCTL_INT(_vfs_zfs, OID_AUTO, removal_suspend_progress,
CTLFLAG_RWTUN, &zfs_removal_suspend_progress, 0, CTLFLAG_RWTUN, &zfs_removal_suspend_progress, 0,
"Ensures certain actions can happen while in the middle of a removal"); "Ensures certain actions can happen while in the middle of a removal");
/* END CSTYLED */
/* /*
* Minimum size which forces the dynamic allocator to change * Minimum size which forces the dynamic allocator to change
@ -588,12 +534,10 @@ SYSCTL_INT(_vfs_zfs, OID_AUTO, removal_suspend_progress,
*/ */
extern uint64_t metaslab_df_alloc_threshold; extern uint64_t metaslab_df_alloc_threshold;
/* BEGIN CSTYLED */
SYSCTL_QUAD(_vfs_zfs_metaslab, OID_AUTO, df_alloc_threshold, SYSCTL_QUAD(_vfs_zfs_metaslab, OID_AUTO, df_alloc_threshold,
CTLFLAG_RWTUN, &metaslab_df_alloc_threshold, 0, CTLFLAG_RWTUN, &metaslab_df_alloc_threshold, 0,
"Minimum size which forces the dynamic allocator to change its" "Minimum size which forces the dynamic allocator to change its"
" allocation strategy"); " allocation strategy");
/* END CSTYLED */
/* /*
* The minimum free space, in percent, which must be available * The minimum free space, in percent, which must be available
@ -603,12 +547,10 @@ SYSCTL_QUAD(_vfs_zfs_metaslab, OID_AUTO, df_alloc_threshold,
*/ */
extern uint_t metaslab_df_free_pct; extern uint_t metaslab_df_free_pct;
/* BEGIN CSTYLED */
SYSCTL_UINT(_vfs_zfs_metaslab, OID_AUTO, df_free_pct, SYSCTL_UINT(_vfs_zfs_metaslab, OID_AUTO, df_free_pct,
CTLFLAG_RWTUN, &metaslab_df_free_pct, 0, CTLFLAG_RWTUN, &metaslab_df_free_pct, 0,
"The minimum free space, in percent, which must be available in a" "The minimum free space, in percent, which must be available in a"
" space map to continue allocations in a first-fit fashion"); " space map to continue allocations in a first-fit fashion");
/* END CSTYLED */
/* mmp.c */ /* mmp.c */
@ -631,28 +573,22 @@ param_set_multihost_interval(SYSCTL_HANDLER_ARGS)
extern int zfs_ccw_retry_interval; extern int zfs_ccw_retry_interval;
/* BEGIN CSTYLED */
SYSCTL_INT(_vfs_zfs, OID_AUTO, ccw_retry_interval, SYSCTL_INT(_vfs_zfs, OID_AUTO, ccw_retry_interval,
CTLFLAG_RWTUN, &zfs_ccw_retry_interval, 0, CTLFLAG_RWTUN, &zfs_ccw_retry_interval, 0,
"Configuration cache file write, retry after failure, interval" "Configuration cache file write, retry after failure, interval"
" (seconds)"); " (seconds)");
/* END CSTYLED */
extern uint64_t zfs_max_missing_tvds_cachefile; extern uint64_t zfs_max_missing_tvds_cachefile;
/* BEGIN CSTYLED */
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, max_missing_tvds_cachefile, SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, max_missing_tvds_cachefile,
CTLFLAG_RWTUN, &zfs_max_missing_tvds_cachefile, 0, CTLFLAG_RWTUN, &zfs_max_missing_tvds_cachefile, 0,
"Allow importing pools with missing top-level vdevs in cache file"); "Allow importing pools with missing top-level vdevs in cache file");
/* END CSTYLED */
extern uint64_t zfs_max_missing_tvds_scan; extern uint64_t zfs_max_missing_tvds_scan;
/* BEGIN CSTYLED */
SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, max_missing_tvds_scan, SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, max_missing_tvds_scan,
CTLFLAG_RWTUN, &zfs_max_missing_tvds_scan, 0, CTLFLAG_RWTUN, &zfs_max_missing_tvds_scan, 0,
"Allow importing pools with missing top-level vdevs during scan"); "Allow importing pools with missing top-level vdevs during scan");
/* END CSTYLED */
/* spa_misc.c */ /* spa_misc.c */
@ -681,11 +617,9 @@ sysctl_vfs_zfs_debug_flags(SYSCTL_HANDLER_ARGS)
return (0); return (0);
} }
/* BEGIN CSTYLED */
SYSCTL_PROC(_vfs_zfs, OID_AUTO, debugflags, SYSCTL_PROC(_vfs_zfs, OID_AUTO, debugflags,
CTLTYPE_UINT | CTLFLAG_MPSAFE | CTLFLAG_RWTUN, NULL, 0, CTLTYPE_UINT | CTLFLAG_MPSAFE | CTLFLAG_RWTUN, NULL, 0,
sysctl_vfs_zfs_debug_flags, "IU", "Debug flags for ZFS testing."); sysctl_vfs_zfs_debug_flags, "IU", "Debug flags for ZFS testing.");
/* END CSTYLED */
int int
param_set_deadman_synctime(SYSCTL_HANDLER_ARGS) param_set_deadman_synctime(SYSCTL_HANDLER_ARGS)
@ -768,10 +702,8 @@ param_set_slop_shift(SYSCTL_HANDLER_ARGS)
extern int space_map_ibs; extern int space_map_ibs;
/* BEGIN CSTYLED */
SYSCTL_INT(_vfs_zfs, OID_AUTO, space_map_ibs, CTLFLAG_RWTUN, SYSCTL_INT(_vfs_zfs, OID_AUTO, space_map_ibs, CTLFLAG_RWTUN,
&space_map_ibs, 0, "Space map indirect block shift"); &space_map_ibs, 0, "Space map indirect block shift");
/* END CSTYLED */
/* vdev.c */ /* vdev.c */
@ -795,13 +727,11 @@ param_set_min_auto_ashift(SYSCTL_HANDLER_ARGS)
return (0); return (0);
} }
/* BEGIN CSTYLED */
SYSCTL_PROC(_vfs_zfs, OID_AUTO, min_auto_ashift, SYSCTL_PROC(_vfs_zfs, OID_AUTO, min_auto_ashift,
CTLTYPE_UINT | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, CTLTYPE_UINT | CTLFLAG_RWTUN | CTLFLAG_MPSAFE,
&zfs_vdev_min_auto_ashift, sizeof (zfs_vdev_min_auto_ashift), &zfs_vdev_min_auto_ashift, sizeof (zfs_vdev_min_auto_ashift),
param_set_min_auto_ashift, "IU", param_set_min_auto_ashift, "IU",
"Min ashift used when creating new top-level vdev. (LEGACY)"); "Min ashift used when creating new top-level vdev. (LEGACY)");
/* END CSTYLED */
int int
param_set_max_auto_ashift(SYSCTL_HANDLER_ARGS) param_set_max_auto_ashift(SYSCTL_HANDLER_ARGS)
@ -822,14 +752,12 @@ param_set_max_auto_ashift(SYSCTL_HANDLER_ARGS)
return (0); return (0);
} }
/* BEGIN CSTYLED */
SYSCTL_PROC(_vfs_zfs, OID_AUTO, max_auto_ashift, SYSCTL_PROC(_vfs_zfs, OID_AUTO, max_auto_ashift,
CTLTYPE_UINT | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, CTLTYPE_UINT | CTLFLAG_RWTUN | CTLFLAG_MPSAFE,
&zfs_vdev_max_auto_ashift, sizeof (zfs_vdev_max_auto_ashift), &zfs_vdev_max_auto_ashift, sizeof (zfs_vdev_max_auto_ashift),
param_set_max_auto_ashift, "IU", param_set_max_auto_ashift, "IU",
"Max ashift used when optimizing for logical -> physical sector size on" "Max ashift used when optimizing for logical -> physical sector size on"
" new top-level vdevs. (LEGACY)"); " new top-level vdevs. (LEGACY)");
/* END CSTYLED */
/* /*
* Since the DTL space map of a vdev is not expected to have a lot of * Since the DTL space map of a vdev is not expected to have a lot of
@ -837,11 +765,9 @@ SYSCTL_PROC(_vfs_zfs, OID_AUTO, max_auto_ashift,
*/ */
extern int zfs_vdev_dtl_sm_blksz; extern int zfs_vdev_dtl_sm_blksz;
/* BEGIN CSTYLED */
SYSCTL_INT(_vfs_zfs, OID_AUTO, dtl_sm_blksz, SYSCTL_INT(_vfs_zfs, OID_AUTO, dtl_sm_blksz,
CTLFLAG_RDTUN, &zfs_vdev_dtl_sm_blksz, 0, CTLFLAG_RDTUN, &zfs_vdev_dtl_sm_blksz, 0,
"Block size for DTL space map. Power of 2 greater than 4096."); "Block size for DTL space map. Power of 2 greater than 4096.");
/* END CSTYLED */
/* /*
* vdev-wide space maps that have lots of entries written to them at * vdev-wide space maps that have lots of entries written to them at
@ -850,19 +776,15 @@ SYSCTL_INT(_vfs_zfs, OID_AUTO, dtl_sm_blksz,
*/ */
extern int zfs_vdev_standard_sm_blksz; extern int zfs_vdev_standard_sm_blksz;
/* BEGIN CSTYLED */
SYSCTL_INT(_vfs_zfs, OID_AUTO, standard_sm_blksz, SYSCTL_INT(_vfs_zfs, OID_AUTO, standard_sm_blksz,
CTLFLAG_RDTUN, &zfs_vdev_standard_sm_blksz, 0, CTLFLAG_RDTUN, &zfs_vdev_standard_sm_blksz, 0,
"Block size for standard space map. Power of 2 greater than 4096."); "Block size for standard space map. Power of 2 greater than 4096.");
/* END CSTYLED */
extern int vdev_validate_skip; extern int vdev_validate_skip;
/* BEGIN CSTYLED */
SYSCTL_INT(_vfs_zfs, OID_AUTO, validate_skip, SYSCTL_INT(_vfs_zfs, OID_AUTO, validate_skip,
CTLFLAG_RDTUN, &vdev_validate_skip, 0, CTLFLAG_RDTUN, &vdev_validate_skip, 0,
"Enable to bypass vdev_validate()."); "Enable to bypass vdev_validate().");
/* END CSTYLED */
/* vdev_mirror.c */ /* vdev_mirror.c */
@ -870,17 +792,13 @@ SYSCTL_INT(_vfs_zfs, OID_AUTO, validate_skip,
extern uint_t zfs_vdev_max_active; extern uint_t zfs_vdev_max_active;
/* BEGIN CSTYLED */
SYSCTL_UINT(_vfs_zfs, OID_AUTO, top_maxinflight, SYSCTL_UINT(_vfs_zfs, OID_AUTO, top_maxinflight,
CTLFLAG_RWTUN, &zfs_vdev_max_active, 0, CTLFLAG_RWTUN, &zfs_vdev_max_active, 0,
"The maximum number of I/Os of all types active for each device." "The maximum number of I/Os of all types active for each device."
" (LEGACY)"); " (LEGACY)");
/* END CSTYLED */
/* zio.c */ /* zio.c */
/* BEGIN CSTYLED */
SYSCTL_INT(_vfs_zfs_zio, OID_AUTO, exclude_metadata, SYSCTL_INT(_vfs_zfs_zio, OID_AUTO, exclude_metadata,
CTLFLAG_RDTUN, &zio_exclude_metadata, 0, CTLFLAG_RDTUN, &zio_exclude_metadata, 0,
"Exclude metadata buffers from dumps as well"); "Exclude metadata buffers from dumps as well");
/* END CSTYLED */

View File

@ -1823,7 +1823,6 @@ error:
} }
#if defined(_KERNEL) && defined(HAVE_SPL) #if defined(_KERNEL) && defined(HAVE_SPL)
/* CSTYLED */
module_param(zfs_key_max_salt_uses, ulong, 0644); module_param(zfs_key_max_salt_uses, ulong, 0644);
MODULE_PARM_DESC(zfs_key_max_salt_uses, "Max number of times a salt value " MODULE_PARM_DESC(zfs_key_max_salt_uses, "Max number of times a salt value "
"can be used for generating encryption keys before it is rotated"); "can be used for generating encryption keys before it is rotated");

View File

@ -33,7 +33,6 @@
* But we would still default to the current default of not to do that. * But we would still default to the current default of not to do that.
*/ */
static unsigned int spl_panic_halt; static unsigned int spl_panic_halt;
/* CSTYLED */
module_param(spl_panic_halt, uint, 0644); module_param(spl_panic_halt, uint, 0644);
MODULE_PARM_DESC(spl_panic_halt, "Cause kernel panic on assertion failures"); MODULE_PARM_DESC(spl_panic_halt, "Cause kernel panic on assertion failures");

View File

@ -54,7 +54,6 @@
unsigned long spl_hostid = 0; unsigned long spl_hostid = 0;
EXPORT_SYMBOL(spl_hostid); EXPORT_SYMBOL(spl_hostid);
/* CSTYLED */
module_param(spl_hostid, ulong, 0644); module_param(spl_hostid, ulong, 0644);
MODULE_PARM_DESC(spl_hostid, "The system hostid."); MODULE_PARM_DESC(spl_hostid, "The system hostid.");

View File

@ -48,7 +48,6 @@
#define smp_mb__after_atomic(x) smp_mb__after_clear_bit(x) #define smp_mb__after_atomic(x) smp_mb__after_clear_bit(x)
#endif #endif
/* BEGIN CSTYLED */
/* /*
* Cache magazines are an optimization designed to minimize the cost of * Cache magazines are an optimization designed to minimize the cost of
* allocating memory. They do this by keeping a per-cpu cache of recently * allocating memory. They do this by keeping a per-cpu cache of recently
@ -97,7 +96,6 @@ static unsigned int spl_kmem_cache_kmem_threads = 4;
module_param(spl_kmem_cache_kmem_threads, uint, 0444); module_param(spl_kmem_cache_kmem_threads, uint, 0444);
MODULE_PARM_DESC(spl_kmem_cache_kmem_threads, MODULE_PARM_DESC(spl_kmem_cache_kmem_threads,
"Number of spl_kmem_cache threads"); "Number of spl_kmem_cache threads");
/* END CSTYLED */
/* /*
* Slab allocation interfaces * Slab allocation interfaces

View File

@ -26,7 +26,6 @@
#include <sys/kmem.h> #include <sys/kmem.h>
#include <sys/vmem.h> #include <sys/vmem.h>
/* BEGIN CSTYLED */
/* /*
* As a general rule kmem_alloc() allocations should be small, preferably * As a general rule kmem_alloc() allocations should be small, preferably
* just a few pages since they must by physically contiguous. Therefore, a * just a few pages since they must by physically contiguous. Therefore, a
@ -62,7 +61,6 @@ module_param(spl_kmem_alloc_max, uint, 0644);
MODULE_PARM_DESC(spl_kmem_alloc_max, MODULE_PARM_DESC(spl_kmem_alloc_max,
"Maximum size in bytes for a kmem_alloc()"); "Maximum size in bytes for a kmem_alloc()");
EXPORT_SYMBOL(spl_kmem_alloc_max); EXPORT_SYMBOL(spl_kmem_alloc_max);
/* END CSTYLED */
int int
kmem_debugging(void) kmem_debugging(void)

View File

@ -117,9 +117,7 @@ module_param(spl_taskq_thread_bind, int, 0644);
MODULE_PARM_DESC(spl_taskq_thread_bind, "Bind taskq thread to CPU by default"); MODULE_PARM_DESC(spl_taskq_thread_bind, "Bind taskq thread to CPU by default");
static uint_t spl_taskq_thread_timeout_ms = 5000; static uint_t spl_taskq_thread_timeout_ms = 5000;
/* BEGIN CSTYLED */
module_param(spl_taskq_thread_timeout_ms, uint, 0644); module_param(spl_taskq_thread_timeout_ms, uint, 0644);
/* END CSTYLED */
MODULE_PARM_DESC(spl_taskq_thread_timeout_ms, MODULE_PARM_DESC(spl_taskq_thread_timeout_ms,
"Minimum idle threads exit interval for dynamic taskqs"); "Minimum idle threads exit interval for dynamic taskqs");
@ -133,9 +131,7 @@ MODULE_PARM_DESC(spl_taskq_thread_priority,
"Allow non-default priority for taskq threads"); "Allow non-default priority for taskq threads");
static uint_t spl_taskq_thread_sequential = 4; static uint_t spl_taskq_thread_sequential = 4;
/* BEGIN CSTYLED */
module_param(spl_taskq_thread_sequential, uint, 0644); module_param(spl_taskq_thread_sequential, uint, 0644);
/* END CSTYLED */
MODULE_PARM_DESC(spl_taskq_thread_sequential, MODULE_PARM_DESC(spl_taskq_thread_sequential,
"Create new taskq threads after N sequential tasks"); "Create new taskq threads after N sequential tasks");

View File

@ -1346,7 +1346,6 @@ MODULE_PARM_DESC(zfs_abd_scatter_enabled,
module_param(zfs_abd_scatter_min_size, int, 0644); module_param(zfs_abd_scatter_min_size, int, 0644);
MODULE_PARM_DESC(zfs_abd_scatter_min_size, MODULE_PARM_DESC(zfs_abd_scatter_min_size,
"Minimum size of scatter allocations."); "Minimum size of scatter allocations.");
/* CSTYLED */
module_param(zfs_abd_scatter_max_order, uint, 0644); module_param(zfs_abd_scatter_max_order, uint, 0644);
MODULE_PARM_DESC(zfs_abd_scatter_max_order, MODULE_PARM_DESC(zfs_abd_scatter_max_order,
"Maximum order allocation used for a scatter ABD."); "Maximum order allocation used for a scatter ABD.");

View File

@ -214,7 +214,5 @@ __dprintf(boolean_t dprint, const char *file, const char *func,
module_param(zfs_dbgmsg_enable, int, 0644); module_param(zfs_dbgmsg_enable, int, 0644);
MODULE_PARM_DESC(zfs_dbgmsg_enable, "Enable ZFS debug message log"); MODULE_PARM_DESC(zfs_dbgmsg_enable, "Enable ZFS debug message log");
/* BEGIN CSTYLED */
module_param(zfs_dbgmsg_maxsize, uint, 0644); module_param(zfs_dbgmsg_maxsize, uint, 0644);
/* END CSTYLED */
MODULE_PARM_DESC(zfs_dbgmsg_maxsize, "Maximum ZFS debug log size"); MODULE_PARM_DESC(zfs_dbgmsg_maxsize, "Maximum ZFS debug log size");

View File

@ -4345,7 +4345,6 @@ EXPORT_SYMBOL(zfs_putpage);
EXPORT_SYMBOL(zfs_dirty_inode); EXPORT_SYMBOL(zfs_dirty_inode);
EXPORT_SYMBOL(zfs_map); EXPORT_SYMBOL(zfs_map);
/* CSTYLED */
module_param(zfs_delete_blocks, ulong, 0644); module_param(zfs_delete_blocks, ulong, 0644);
MODULE_PARM_DESC(zfs_delete_blocks, "Delete files larger than N blocks async"); MODULE_PARM_DESC(zfs_delete_blocks, "Delete files larger than N blocks async");
#endif #endif

View File

@ -1967,7 +1967,6 @@ zfs_create_fs(objset_t *os, cred_t *cr, nvlist_t *zplprops, dmu_tx_t *tx)
EXPORT_SYMBOL(zfs_create_fs); EXPORT_SYMBOL(zfs_create_fs);
EXPORT_SYMBOL(zfs_obj_to_path); EXPORT_SYMBOL(zfs_obj_to_path);
/* CSTYLED */
module_param(zfs_object_mutex_size, uint, 0644); module_param(zfs_object_mutex_size, uint, 0644);
MODULE_PARM_DESC(zfs_object_mutex_size, "Size of znode hold array"); MODULE_PARM_DESC(zfs_object_mutex_size, "Size of znode hold array");
module_param(zfs_unlink_suspend_progress, int, 0644); module_param(zfs_unlink_suspend_progress, int, 0644);

View File

@ -2073,7 +2073,6 @@ error:
} }
#if defined(_KERNEL) #if defined(_KERNEL)
/* CSTYLED */
module_param(zfs_key_max_salt_uses, ulong, 0644); module_param(zfs_key_max_salt_uses, ulong, 0644);
MODULE_PARM_DESC(zfs_key_max_salt_uses, "Max number of times a salt value " MODULE_PARM_DESC(zfs_key_max_salt_uses, "Max number of times a salt value "
"can be used for generating encryption keys before it is rotated"); "can be used for generating encryption keys before it is rotated");

View File

@ -1143,7 +1143,6 @@ const struct file_operations zpl_dir_file_operations = {
#endif #endif
}; };
/* CSTYLED */
module_param(zfs_fallocate_reserve_percent, uint, 0644); module_param(zfs_fallocate_reserve_percent, uint, 0644);
MODULE_PARM_DESC(zfs_fallocate_reserve_percent, MODULE_PARM_DESC(zfs_fallocate_reserve_percent,
"Percentage of length to use for the available capacity check"); "Percentage of length to use for the available capacity check");

View File

@ -1899,7 +1899,6 @@ zvol_fini(void)
ida_destroy(&zvol_ida); ida_destroy(&zvol_ida);
} }
/* BEGIN CSTYLED */
module_param(zvol_inhibit_dev, uint, 0644); module_param(zvol_inhibit_dev, uint, 0644);
MODULE_PARM_DESC(zvol_inhibit_dev, "Do not create zvol device nodes"); MODULE_PARM_DESC(zvol_inhibit_dev, "Do not create zvol device nodes");
@ -1939,5 +1938,3 @@ MODULE_PARM_DESC(zvol_blk_mq_blocks_per_thread,
module_param(zvol_open_timeout_ms, uint, 0644); module_param(zvol_open_timeout_ms, uint, 0644);
MODULE_PARM_DESC(zvol_open_timeout_ms, "Timeout for ZVOL open retries"); MODULE_PARM_DESC(zvol_open_timeout_ms, "Timeout for ZVOL open retries");
#endif #endif
/* END CSTYLED */

View File

@ -185,7 +185,6 @@ zfs_valstr_ ## name(int v, char *out, size_t outlen) \
/* String tables */ /* String tables */
/* ZIO flags: zio_flag_t, typically zio->io_flags */ /* ZIO flags: zio_flag_t, typically zio->io_flags */
/* BEGIN CSTYLED */
_VALSTR_BITFIELD_IMPL(zio_flag, _VALSTR_BITFIELD_IMPL(zio_flag,
{ '.', "DA", "DONT_AGGREGATE" }, { '.', "DA", "DONT_AGGREGATE" },
{ '.', "RP", "IO_REPAIR" }, { '.', "RP", "IO_REPAIR" },
@ -221,13 +220,11 @@ _VALSTR_BITFIELD_IMPL(zio_flag,
{ '.', "DG", "DELEGATED" }, { '.', "DG", "DELEGATED" },
{ '.', "DC", "DIO_CHKSUM_ERR" }, { '.', "DC", "DIO_CHKSUM_ERR" },
) )
/* END CSTYLED */
/* /*
* ZIO pipeline stage(s): enum zio_stage, typically zio->io_stage or * ZIO pipeline stage(s): enum zio_stage, typically zio->io_stage or
* zio->io_pipeline. * zio->io_pipeline.
*/ */
/* BEGIN CSTYLED */
_VALSTR_BITFIELD_IMPL(zio_stage, _VALSTR_BITFIELD_IMPL(zio_stage,
{ 'O', "O ", "OPEN" }, { 'O', "O ", "OPEN" },
{ 'I', "RI", "READ_BP_INIT" }, { 'I', "RI", "READ_BP_INIT" },
@ -257,10 +254,8 @@ _VALSTR_BITFIELD_IMPL(zio_stage,
{ 'C', "DC", "DIO_CHECKSUM_VERIFY" }, { 'C', "DC", "DIO_CHECKSUM_VERIFY" },
{ 'X', "X ", "DONE" }, { 'X', "X ", "DONE" },
) )
/* END CSTYLED */
/* ZIO priority: zio_priority_t, typically zio->io_priority */ /* ZIO priority: zio_priority_t, typically zio->io_priority */
/* BEGIN CSTYLED */
_VALSTR_ENUM_IMPL(zio_priority, _VALSTR_ENUM_IMPL(zio_priority,
"SYNC_READ", "SYNC_READ",
"SYNC_WRITE", "SYNC_WRITE",
@ -274,7 +269,6 @@ _VALSTR_ENUM_IMPL(zio_priority,
"[NUM_QUEUEABLE]", "[NUM_QUEUEABLE]",
"NOW", "NOW",
) )
/* END CSTYLED */
#undef _VALSTR_BITFIELD_IMPL #undef _VALSTR_BITFIELD_IMPL
#undef _VALSTR_ENUM_IMPL #undef _VALSTR_ENUM_IMPL

View File

@ -1473,11 +1473,9 @@ brt_unload(spa_t *spa)
spa->spa_brt_rangesize = 0; spa->spa_brt_rangesize = 0;
} }
/* BEGIN CSTYLED */
ZFS_MODULE_PARAM(zfs_brt, , brt_zap_prefetch, INT, ZMOD_RW, ZFS_MODULE_PARAM(zfs_brt, , brt_zap_prefetch, INT, ZMOD_RW,
"Enable prefetching of BRT ZAP entries"); "Enable prefetching of BRT ZAP entries");
ZFS_MODULE_PARAM(zfs_brt, , brt_zap_default_bs, UINT, ZMOD_RW, ZFS_MODULE_PARAM(zfs_brt, , brt_zap_default_bs, UINT, ZMOD_RW,
"BRT ZAP leaf blockshift"); "BRT ZAP leaf blockshift");
ZFS_MODULE_PARAM(zfs_brt, , brt_zap_default_ibs, UINT, ZMOD_RW, ZFS_MODULE_PARAM(zfs_brt, , brt_zap_default_ibs, UINT, ZMOD_RW,
"BRT ZAP indirect blockshift"); "BRT ZAP indirect blockshift");
/* END CSTYLED */

View File

@ -2208,8 +2208,6 @@ zfs_btree_verify(zfs_btree_t *tree)
zfs_btree_verify_poison(tree); zfs_btree_verify_poison(tree);
} }
/* BEGIN CSTYLED */
ZFS_MODULE_PARAM(zfs, zfs_, btree_verify_intensity, UINT, ZMOD_RW, ZFS_MODULE_PARAM(zfs, zfs_, btree_verify_intensity, UINT, ZMOD_RW,
"Enable btree verification. Levels above 4 require ZFS be built " "Enable btree verification. Levels above 4 require ZFS be built "
"with debugging"); "with debugging");
/* END CSTYLED */

View File

@ -258,9 +258,7 @@ const ddt_ops_t ddt_zap_ops = {
ddt_zap_count, ddt_zap_count,
}; };
/* BEGIN CSTYLED */
ZFS_MODULE_PARAM(zfs_dedup, , ddt_zap_default_bs, UINT, ZMOD_RW, ZFS_MODULE_PARAM(zfs_dedup, , ddt_zap_default_bs, UINT, ZMOD_RW,
"DDT ZAP leaf blockshift"); "DDT ZAP leaf blockshift");
ZFS_MODULE_PARAM(zfs_dedup, , ddt_zap_default_ibs, UINT, ZMOD_RW, ZFS_MODULE_PARAM(zfs_dedup, , ddt_zap_default_ibs, UINT, ZMOD_RW,
"DDT ZAP indirect blockshift"); "DDT ZAP indirect blockshift");
/* END CSTYLED */

View File

@ -2942,10 +2942,8 @@ ZFS_MODULE_PARAM(zfs, zfs_, per_txg_dirty_frees_percent, UINT, ZMOD_RW,
ZFS_MODULE_PARAM(zfs, zfs_, dmu_offset_next_sync, INT, ZMOD_RW, ZFS_MODULE_PARAM(zfs, zfs_, dmu_offset_next_sync, INT, ZMOD_RW,
"Enable forcing txg sync to find holes"); "Enable forcing txg sync to find holes");
/* CSTYLED */
ZFS_MODULE_PARAM(zfs, , dmu_prefetch_max, UINT, ZMOD_RW, ZFS_MODULE_PARAM(zfs, , dmu_prefetch_max, UINT, ZMOD_RW,
"Limit one prefetch call to this size"); "Limit one prefetch call to this size");
/* CSTYLED */
ZFS_MODULE_PARAM(zfs, , dmu_ddt_copies, UINT, ZMOD_RW, ZFS_MODULE_PARAM(zfs, , dmu_ddt_copies, UINT, ZMOD_RW,
"Override copies= for dedup objects"); "Override copies= for dedup objects");

View File

@ -519,7 +519,5 @@ EXPORT_SYMBOL(dmu_object_next);
EXPORT_SYMBOL(dmu_object_zapify); EXPORT_SYMBOL(dmu_object_zapify);
EXPORT_SYMBOL(dmu_object_free_zapified); EXPORT_SYMBOL(dmu_object_free_zapified);
/* BEGIN CSTYLED */
ZFS_MODULE_PARAM(zfs, , dmu_object_alloc_chunk_shift, UINT, ZMOD_RW, ZFS_MODULE_PARAM(zfs, , dmu_object_alloc_chunk_shift, UINT, ZMOD_RW,
"CPU-specific allocator grabs 2^N objects at once"); "CPU-specific allocator grabs 2^N objects at once");
/* END CSTYLED */

View File

@ -3843,4 +3843,3 @@ ZFS_MODULE_PARAM(zfs_recv, zfs_recv_, write_batch_size, UINT, ZMOD_RW,
ZFS_MODULE_PARAM(zfs_recv, zfs_recv_, best_effort_corrective, INT, ZMOD_RW, ZFS_MODULE_PARAM(zfs_recv, zfs_recv_, best_effort_corrective, INT, ZMOD_RW,
"Ignore errors during corrective receive"); "Ignore errors during corrective receive");
/* END CSTYLED */

View File

@ -818,6 +818,5 @@ MODULE_PARM_DESC(ignore_hole_birth,
"Alias for send_holes_without_birth_time"); "Alias for send_holes_without_birth_time");
#endif #endif
/* CSTYLED */
ZFS_MODULE_PARAM(zfs, , send_holes_without_birth_time, INT, ZMOD_RW, ZFS_MODULE_PARAM(zfs, , send_holes_without_birth_time, INT, ZMOD_RW,
"Ignore hole_birth txg for zfs send"); "Ignore hole_birth txg for zfs send");

View File

@ -2494,6 +2494,5 @@ EXPORT_SYMBOL(dsl_dir_set_quota);
EXPORT_SYMBOL(dsl_dir_set_reservation); EXPORT_SYMBOL(dsl_dir_set_reservation);
#endif #endif
/* CSTYLED */
ZFS_MODULE_PARAM(zfs, , zvol_enforce_quotas, INT, ZMOD_RW, ZFS_MODULE_PARAM(zfs, , zvol_enforce_quotas, INT, ZMOD_RW,
"Enable strict ZVOL quota enforcment"); "Enable strict ZVOL quota enforcment");

View File

@ -5345,4 +5345,3 @@ ZFS_MODULE_PARAM(zfs, zfs_, resilver_defer_percent, UINT, ZMOD_RW,
ZFS_MODULE_PARAM(zfs, zfs_, scrub_error_blocks_per_txg, UINT, ZMOD_RW, ZFS_MODULE_PARAM(zfs, zfs_, scrub_error_blocks_per_txg, UINT, ZMOD_RW,
"Error blocks to be scrubbed in one txg"); "Error blocks to be scrubbed in one txg");
/* END CSTYLED */

View File

@ -6226,7 +6226,6 @@ ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, unload_delay, UINT, ZMOD_RW,
ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, unload_delay_ms, UINT, ZMOD_RW, ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, unload_delay_ms, UINT, ZMOD_RW,
"Delay in milliseconds after metaslab was last used before unloading"); "Delay in milliseconds after metaslab was last used before unloading");
/* BEGIN CSTYLED */
ZFS_MODULE_PARAM(zfs_mg, zfs_mg_, noalloc_threshold, UINT, ZMOD_RW, ZFS_MODULE_PARAM(zfs_mg, zfs_mg_, noalloc_threshold, UINT, ZMOD_RW,
"Percentage of metaslab group size that should be free to make it " "Percentage of metaslab group size that should be free to make it "
"eligible for allocation"); "eligible for allocation");
@ -6239,7 +6238,6 @@ ZFS_MODULE_PARAM(zfs_mg, zfs_mg_, fragmentation_threshold, UINT, ZMOD_RW,
ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, fragmentation_factor_enabled, INT, ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, fragmentation_factor_enabled, INT,
ZMOD_RW, ZMOD_RW,
"Use the fragmentation metric to prefer less fragmented metaslabs"); "Use the fragmentation metric to prefer less fragmented metaslabs");
/* END CSTYLED */
ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, fragmentation_threshold, UINT, ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, fragmentation_threshold, UINT,
ZMOD_RW, "Fragmentation for metaslab to allow allocation"); ZMOD_RW, "Fragmentation for metaslab to allow allocation");
@ -6280,8 +6278,6 @@ ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, try_hard_before_gang, INT,
ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, find_max_tries, UINT, ZMOD_RW, ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, find_max_tries, UINT, ZMOD_RW,
"Normally only consider this many of the best metaslabs in each vdev"); "Normally only consider this many of the best metaslabs in each vdev");
/* BEGIN CSTYLED */
ZFS_MODULE_PARAM_CALL(zfs, zfs_, active_allocator, ZFS_MODULE_PARAM_CALL(zfs, zfs_, active_allocator,
param_set_active_allocator, param_get_charp, ZMOD_RW, param_set_active_allocator, param_get_charp, ZMOD_RW,
"SPA active allocator"); "SPA active allocator");
/* END CSTYLED */

View File

@ -736,11 +736,9 @@ mmp_signal_all_threads(void)
mutex_exit(&spa_namespace_lock); mutex_exit(&spa_namespace_lock);
} }
/* BEGIN CSTYLED */
ZFS_MODULE_PARAM_CALL(zfs_multihost, zfs_multihost_, interval, ZFS_MODULE_PARAM_CALL(zfs_multihost, zfs_multihost_, interval,
param_set_multihost_interval, spl_param_get_u64, ZMOD_RW, param_set_multihost_interval, spl_param_get_u64, ZMOD_RW,
"Milliseconds between mmp writes to each leaf"); "Milliseconds between mmp writes to each leaf");
/* END CSTYLED */
ZFS_MODULE_PARAM(zfs_multihost, zfs_multihost_, fail_intervals, UINT, ZMOD_RW, ZFS_MODULE_PARAM(zfs_multihost, zfs_multihost_, fail_intervals, UINT, ZMOD_RW,
"Max allowed period without a successful mmp write"); "Max allowed period without a successful mmp write");

View File

@ -349,11 +349,9 @@ EXPORT_SYMBOL(zfs_refcount_add);
EXPORT_SYMBOL(zfs_refcount_remove); EXPORT_SYMBOL(zfs_refcount_remove);
EXPORT_SYMBOL(zfs_refcount_held); EXPORT_SYMBOL(zfs_refcount_held);
/* BEGIN CSTYLED */
ZFS_MODULE_PARAM(zfs, , reference_tracking_enable, INT, ZMOD_RW, ZFS_MODULE_PARAM(zfs, , reference_tracking_enable, INT, ZMOD_RW,
"Track reference holders to refcount_t objects"); "Track reference holders to refcount_t objects");
ZFS_MODULE_PARAM(zfs, , reference_history, UINT, ZMOD_RW, ZFS_MODULE_PARAM(zfs, , reference_history, UINT, ZMOD_RW,
"Maximum reference holders being tracked"); "Maximum reference holders being tracked");
/* END CSTYLED */
#endif /* ZFS_DEBUG */ #endif /* ZFS_DEBUG */

View File

@ -11011,11 +11011,9 @@ EXPORT_SYMBOL(spa_event_notify);
ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, preload_pct, UINT, ZMOD_RW, ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, preload_pct, UINT, ZMOD_RW,
"Percentage of CPUs to run a metaslab preload taskq"); "Percentage of CPUs to run a metaslab preload taskq");
/* BEGIN CSTYLED */
ZFS_MODULE_PARAM(zfs_spa, spa_, load_verify_shift, UINT, ZMOD_RW, ZFS_MODULE_PARAM(zfs_spa, spa_, load_verify_shift, UINT, ZMOD_RW,
"log2 fraction of arc that can be used by inflight I/Os when " "log2 fraction of arc that can be used by inflight I/Os when "
"verifying pool during import"); "verifying pool during import");
/* END CSTYLED */
ZFS_MODULE_PARAM(zfs_spa, spa_, load_verify_metadata, INT, ZMOD_RW, ZFS_MODULE_PARAM(zfs_spa, spa_, load_verify_metadata, INT, ZMOD_RW,
"Set to traverse metadata on pool import"); "Set to traverse metadata on pool import");
@ -11032,11 +11030,9 @@ ZFS_MODULE_PARAM(zfs_zio, zio_, taskq_batch_pct, UINT, ZMOD_RW,
ZFS_MODULE_PARAM(zfs_zio, zio_, taskq_batch_tpq, UINT, ZMOD_RW, ZFS_MODULE_PARAM(zfs_zio, zio_, taskq_batch_tpq, UINT, ZMOD_RW,
"Number of threads per IO worker taskqueue"); "Number of threads per IO worker taskqueue");
/* BEGIN CSTYLED */
ZFS_MODULE_PARAM(zfs, zfs_, max_missing_tvds, U64, ZMOD_RW, ZFS_MODULE_PARAM(zfs, zfs_, max_missing_tvds, U64, ZMOD_RW,
"Allow importing pool with up to this number of missing top-level " "Allow importing pool with up to this number of missing top-level "
"vdevs (in read-only mode)"); "vdevs (in read-only mode)");
/* END CSTYLED */
ZFS_MODULE_PARAM(zfs_livelist_condense, zfs_livelist_condense_, zthr_pause, INT, ZFS_MODULE_PARAM(zfs_livelist_condense, zfs_livelist_condense_, zthr_pause, INT,
ZMOD_RW, "Set the livelist condense zthr to pause"); ZMOD_RW, "Set the livelist condense zthr to pause");
@ -11044,7 +11040,6 @@ ZFS_MODULE_PARAM(zfs_livelist_condense, zfs_livelist_condense_, zthr_pause, INT,
ZFS_MODULE_PARAM(zfs_livelist_condense, zfs_livelist_condense_, sync_pause, INT, ZFS_MODULE_PARAM(zfs_livelist_condense, zfs_livelist_condense_, sync_pause, INT,
ZMOD_RW, "Set the livelist condense synctask to pause"); ZMOD_RW, "Set the livelist condense synctask to pause");
/* BEGIN CSTYLED */
ZFS_MODULE_PARAM(zfs_livelist_condense, zfs_livelist_condense_, sync_cancel, ZFS_MODULE_PARAM(zfs_livelist_condense, zfs_livelist_condense_, sync_cancel,
INT, ZMOD_RW, INT, ZMOD_RW,
"Whether livelist condensing was canceled in the synctask"); "Whether livelist condensing was canceled in the synctask");
@ -11066,7 +11061,6 @@ ZFS_MODULE_VIRTUAL_PARAM_CALL(zfs_zio, zio_, taskq_write,
spa_taskq_write_param_set, spa_taskq_write_param_get, ZMOD_RW, spa_taskq_write_param_set, spa_taskq_write_param_get, ZMOD_RW,
"Configure IO queues for write IO"); "Configure IO queues for write IO");
#endif #endif
/* END CSTYLED */
ZFS_MODULE_PARAM(zfs_zio, zio_, taskq_write_tpq, UINT, ZMOD_RW, ZFS_MODULE_PARAM(zfs_zio, zio_, taskq_write_tpq, UINT, ZMOD_RW,
"Number of CPUs per write issue taskq"); "Number of CPUs per write issue taskq");

View File

@ -633,8 +633,6 @@ EXPORT_SYMBOL(spa_checkpoint_get_stats);
EXPORT_SYMBOL(spa_checkpoint_discard_thread); EXPORT_SYMBOL(spa_checkpoint_discard_thread);
EXPORT_SYMBOL(spa_checkpoint_discard_thread_check); EXPORT_SYMBOL(spa_checkpoint_discard_thread_check);
/* BEGIN CSTYLED */
ZFS_MODULE_PARAM(zfs_spa, zfs_spa_, discard_memory_limit, U64, ZMOD_RW, ZFS_MODULE_PARAM(zfs_spa, zfs_spa_, discard_memory_limit, U64, ZMOD_RW,
"Limit for memory used in prefetching the checkpoint space map done " "Limit for memory used in prefetching the checkpoint space map done "
"on each vdev while discarding the checkpoint"); "on each vdev while discarding the checkpoint");
/* END CSTYLED */

View File

@ -1491,8 +1491,6 @@ EXPORT_SYMBOL(zep_to_zb);
EXPORT_SYMBOL(name_to_errphys); EXPORT_SYMBOL(name_to_errphys);
#endif #endif
/* BEGIN CSTYLED */
ZFS_MODULE_PARAM(zfs_spa, spa_, upgrade_errlog_limit, UINT, ZMOD_RW, ZFS_MODULE_PARAM(zfs_spa, spa_, upgrade_errlog_limit, UINT, ZMOD_RW,
"Limit the number of errors which will be upgraded to the new " "Limit the number of errors which will be upgraded to the new "
"on-disk error log when enabling head_errlog"); "on-disk error log when enabling head_errlog");
/* END CSTYLED */

View File

@ -1364,7 +1364,6 @@ spa_ld_log_spacemaps(spa_t *spa)
return (error); return (error);
} }
/* BEGIN CSTYLED */
ZFS_MODULE_PARAM(zfs, zfs_, unflushed_max_mem_amt, U64, ZMOD_RW, ZFS_MODULE_PARAM(zfs, zfs_, unflushed_max_mem_amt, U64, ZMOD_RW,
"Specific hard-limit in memory that ZFS allows to be used for " "Specific hard-limit in memory that ZFS allows to be used for "
"unflushed changes"); "unflushed changes");
@ -1399,7 +1398,6 @@ ZFS_MODULE_PARAM(zfs, zfs_, max_log_walking, U64, ZMOD_RW,
ZFS_MODULE_PARAM(zfs, zfs_, keep_log_spacemaps_at_export, INT, ZMOD_RW, ZFS_MODULE_PARAM(zfs, zfs_, keep_log_spacemaps_at_export, INT, ZMOD_RW,
"Prevent the log spacemaps from being flushed and destroyed " "Prevent the log spacemaps from being flushed and destroyed "
"during pool export/destroy"); "during pool export/destroy");
/* END CSTYLED */
ZFS_MODULE_PARAM(zfs, zfs_, max_logsm_summary_length, U64, ZMOD_RW, ZFS_MODULE_PARAM(zfs, zfs_, max_logsm_summary_length, U64, ZMOD_RW,
"Maximum number of rows allowed in the summary of the spacemap log"); "Maximum number of rows allowed in the summary of the spacemap log");

View File

@ -3123,7 +3123,6 @@ ZFS_MODULE_PARAM(zfs, zfs_, ddt_data_is_special, INT, ZMOD_RW,
ZFS_MODULE_PARAM(zfs, zfs_, user_indirect_is_special, INT, ZMOD_RW, ZFS_MODULE_PARAM(zfs, zfs_, user_indirect_is_special, INT, ZMOD_RW,
"Place user data indirect blocks into the special class"); "Place user data indirect blocks into the special class");
/* BEGIN CSTYLED */
ZFS_MODULE_PARAM_CALL(zfs_deadman, zfs_deadman_, failmode, ZFS_MODULE_PARAM_CALL(zfs_deadman, zfs_deadman_, failmode,
param_set_deadman_failmode, param_get_charp, ZMOD_RW, param_set_deadman_failmode, param_get_charp, ZMOD_RW,
"Failmode for deadman timer"); "Failmode for deadman timer");
@ -3139,7 +3138,6 @@ ZFS_MODULE_PARAM_CALL(zfs_deadman, zfs_deadman_, ziotime_ms,
ZFS_MODULE_PARAM(zfs, zfs_, special_class_metadata_reserve_pct, UINT, ZMOD_RW, ZFS_MODULE_PARAM(zfs, zfs_, special_class_metadata_reserve_pct, UINT, ZMOD_RW,
"Small file blocks in special vdevs depends on this much " "Small file blocks in special vdevs depends on this much "
"free space available"); "free space available");
/* END CSTYLED */
ZFS_MODULE_PARAM_CALL(zfs_spa, spa_, slop_shift, param_set_slop_shift, ZFS_MODULE_PARAM_CALL(zfs_spa, spa_, slop_shift, param_set_slop_shift,
param_get_uint, ZMOD_RW, "Reserved free space in pool"); param_get_uint, ZMOD_RW, "Reserved free space in pool");

View File

@ -6551,7 +6551,6 @@ ZFS_MODULE_PARAM(zfs, zfs_, deadman_events_per_second, UINT, ZMOD_RW,
ZFS_MODULE_PARAM(zfs, zfs_, dio_write_verify_events_per_second, UINT, ZMOD_RW, ZFS_MODULE_PARAM(zfs, zfs_, dio_write_verify_events_per_second, UINT, ZMOD_RW,
"Rate Direct I/O write verify events to this many per second"); "Rate Direct I/O write verify events to this many per second");
/* BEGIN CSTYLED */
ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, direct_write_verify, UINT, ZMOD_RW, ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, direct_write_verify, UINT, ZMOD_RW,
"Direct I/O writes will perform for checksum verification before " "Direct I/O writes will perform for checksum verification before "
"commiting write"); "commiting write");
@ -6559,7 +6558,6 @@ ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, direct_write_verify, UINT, ZMOD_RW,
ZFS_MODULE_PARAM(zfs, zfs_, checksum_events_per_second, UINT, ZMOD_RW, ZFS_MODULE_PARAM(zfs, zfs_, checksum_events_per_second, UINT, ZMOD_RW,
"Rate limit checksum events to this many checksum errors per second " "Rate limit checksum events to this many checksum errors per second "
"(do not set below ZED threshold)."); "(do not set below ZED threshold).");
/* END CSTYLED */
ZFS_MODULE_PARAM(zfs, zfs_, scan_ignore_errors, INT, ZMOD_RW, ZFS_MODULE_PARAM(zfs, zfs_, scan_ignore_errors, INT, ZMOD_RW,
"Ignore errors during resilver/scrub"); "Ignore errors during resilver/scrub");
@ -6573,7 +6571,6 @@ ZFS_MODULE_PARAM(zfs, zfs_, nocacheflush, INT, ZMOD_RW,
ZFS_MODULE_PARAM(zfs, zfs_, embedded_slog_min_ms, UINT, ZMOD_RW, ZFS_MODULE_PARAM(zfs, zfs_, embedded_slog_min_ms, UINT, ZMOD_RW,
"Minimum number of metaslabs required to dedicate one for log blocks"); "Minimum number of metaslabs required to dedicate one for log blocks");
/* BEGIN CSTYLED */
ZFS_MODULE_PARAM_CALL(zfs_vdev, zfs_vdev_, min_auto_ashift, ZFS_MODULE_PARAM_CALL(zfs_vdev, zfs_vdev_, min_auto_ashift,
param_set_min_auto_ashift, param_get_uint, ZMOD_RW, param_set_min_auto_ashift, param_get_uint, ZMOD_RW,
"Minimum ashift used when creating new top-level vdevs"); "Minimum ashift used when creating new top-level vdevs");
@ -6582,4 +6579,3 @@ ZFS_MODULE_PARAM_CALL(zfs_vdev, zfs_vdev_, max_auto_ashift,
param_set_max_auto_ashift, param_get_uint, ZMOD_RW, param_set_max_auto_ashift, param_get_uint, ZMOD_RW,
"Maximum ashift used when optimizing for logical -> physical sector " "Maximum ashift used when optimizing for logical -> physical sector "
"size on new top-level vdevs"); "size on new top-level vdevs");
/* END CSTYLED */

View File

@ -1897,7 +1897,6 @@ EXPORT_SYMBOL(vdev_indirect_sync_obsolete);
EXPORT_SYMBOL(vdev_obsolete_counts_are_precise); EXPORT_SYMBOL(vdev_obsolete_counts_are_precise);
EXPORT_SYMBOL(vdev_obsolete_sm_object); EXPORT_SYMBOL(vdev_obsolete_sm_object);
/* BEGIN CSTYLED */
ZFS_MODULE_PARAM(zfs_condense, zfs_condense_, indirect_vdevs_enable, INT, ZFS_MODULE_PARAM(zfs_condense, zfs_condense_, indirect_vdevs_enable, INT,
ZMOD_RW, "Whether to attempt condensing indirect vdev mappings"); ZMOD_RW, "Whether to attempt condensing indirect vdev mappings");
@ -1922,4 +1921,3 @@ ZFS_MODULE_PARAM(zfs_condense, zfs_condense_, indirect_commit_entry_delay_ms,
ZFS_MODULE_PARAM(zfs_reconstruct, zfs_reconstruct_, indirect_combinations_max, ZFS_MODULE_PARAM(zfs_reconstruct, zfs_reconstruct_, indirect_combinations_max,
UINT, ZMOD_RW, UINT, ZMOD_RW,
"Maximum number of combinations when reconstructing split segments"); "Maximum number of combinations when reconstructing split segments");
/* END CSTYLED */

View File

@ -1047,12 +1047,10 @@ ZFS_MODULE_PARAM(zfs_vdev_mirror, zfs_vdev_mirror_, rotating_inc, INT, ZMOD_RW,
ZFS_MODULE_PARAM(zfs_vdev_mirror, zfs_vdev_mirror_, rotating_seek_inc, INT, ZFS_MODULE_PARAM(zfs_vdev_mirror, zfs_vdev_mirror_, rotating_seek_inc, INT,
ZMOD_RW, "Rotating media load increment for seeking I/Os"); ZMOD_RW, "Rotating media load increment for seeking I/Os");
/* BEGIN CSTYLED */
ZFS_MODULE_PARAM(zfs_vdev_mirror, zfs_vdev_mirror_, rotating_seek_offset, INT, ZFS_MODULE_PARAM(zfs_vdev_mirror, zfs_vdev_mirror_, rotating_seek_offset, INT,
ZMOD_RW, ZMOD_RW,
"Offset in bytes from the last I/O which triggers " "Offset in bytes from the last I/O which triggers "
"a reduced rotating media seek increment"); "a reduced rotating media seek increment");
/* END CSTYLED */
ZFS_MODULE_PARAM(zfs_vdev_mirror, zfs_vdev_mirror_, non_rotating_inc, INT, ZFS_MODULE_PARAM(zfs_vdev_mirror, zfs_vdev_mirror_, non_rotating_inc, INT,
ZMOD_RW, "Non-rotating media load increment for non-seeking I/Os"); ZMOD_RW, "Non-rotating media load increment for non-seeking I/Os");

View File

@ -5111,7 +5111,6 @@ vdev_ops_t vdev_raidz_ops = {
.vdev_op_leaf = B_FALSE /* not a leaf vdev */ .vdev_op_leaf = B_FALSE /* not a leaf vdev */
}; };
/* BEGIN CSTYLED */
ZFS_MODULE_PARAM(zfs_vdev, raidz_, expand_max_reflow_bytes, ULONG, ZMOD_RW, ZFS_MODULE_PARAM(zfs_vdev, raidz_, expand_max_reflow_bytes, ULONG, ZMOD_RW,
"For testing, pause RAIDZ expansion after reflowing this many bytes"); "For testing, pause RAIDZ expansion after reflowing this many bytes");
ZFS_MODULE_PARAM(zfs_vdev, raidz_, expand_max_copy_bytes, ULONG, ZMOD_RW, ZFS_MODULE_PARAM(zfs_vdev, raidz_, expand_max_copy_bytes, ULONG, ZMOD_RW,
@ -5121,4 +5120,3 @@ ZFS_MODULE_PARAM(zfs_vdev, raidz_, io_aggregate_rows, ULONG, ZMOD_RW,
ZFS_MODULE_PARAM(zfs, zfs_, scrub_after_expand, INT, ZMOD_RW, ZFS_MODULE_PARAM(zfs, zfs_, scrub_after_expand, INT, ZMOD_RW,
"For expanded RAIDZ, automatically start a pool scrub when expansion " "For expanded RAIDZ, automatically start a pool scrub when expansion "
"completes"); "completes");
/* END CSTYLED */

View File

@ -2551,11 +2551,9 @@ ZFS_MODULE_PARAM(zfs_vdev, zfs_, remove_max_segment, UINT, ZMOD_RW,
ZFS_MODULE_PARAM(zfs_vdev, vdev_, removal_max_span, UINT, ZMOD_RW, ZFS_MODULE_PARAM(zfs_vdev, vdev_, removal_max_span, UINT, ZMOD_RW,
"Largest span of free chunks a remap segment can span"); "Largest span of free chunks a remap segment can span");
/* BEGIN CSTYLED */
ZFS_MODULE_PARAM(zfs_vdev, zfs_, removal_suspend_progress, UINT, ZMOD_RW, ZFS_MODULE_PARAM(zfs_vdev, zfs_, removal_suspend_progress, UINT, ZMOD_RW,
"Pause device removal after this many bytes are copied " "Pause device removal after this many bytes are copied "
"(debug use only - causes removal to hang)"); "(debug use only - causes removal to hang)");
/* END CSTYLED */
EXPORT_SYMBOL(free_from_removing_vdev); EXPORT_SYMBOL(free_from_removing_vdev);
EXPORT_SYMBOL(spa_removal_get_stats); EXPORT_SYMBOL(spa_removal_get_stats);

View File

@ -1706,10 +1706,8 @@ zap_shrink(zap_name_t *zn, zap_leaf_t *l, dmu_tx_t *tx)
return (err); return (err);
} }
/* CSTYLED */
ZFS_MODULE_PARAM(zfs, , zap_iterate_prefetch, INT, ZMOD_RW, ZFS_MODULE_PARAM(zfs, , zap_iterate_prefetch, INT, ZMOD_RW,
"When iterating ZAP object, prefetch it"); "When iterating ZAP object, prefetch it");
/* CSTYLED */
ZFS_MODULE_PARAM(zfs, , zap_shrink_enabled, INT, ZMOD_RW, ZFS_MODULE_PARAM(zfs, , zap_shrink_enabled, INT, ZMOD_RW,
"Enable ZAP shrinking"); "Enable ZAP shrinking");

View File

@ -2030,7 +2030,6 @@ EXPORT_SYMBOL(zap_cursor_serialize);
EXPORT_SYMBOL(zap_cursor_init_serialized); EXPORT_SYMBOL(zap_cursor_init_serialized);
EXPORT_SYMBOL(zap_get_stats); EXPORT_SYMBOL(zap_get_stats);
/* CSTYLED */
ZFS_MODULE_PARAM(zfs, , zap_micro_max_size, INT, ZMOD_RW, ZFS_MODULE_PARAM(zfs, , zap_micro_max_size, INT, ZMOD_RW,
"Maximum micro ZAP size, before converting to a fat ZAP, in bytes"); "Maximum micro ZAP size, before converting to a fat ZAP, in bytes");
#endif #endif