mirror of
https://git.proxmox.com/git/mirror_zfs
synced 2025-04-28 06:00:44 +00:00
dmu_tx: rename dmu_tx_assign() flags from TXG_* to DMU_TX_* (#17143)
This helps to avoids confusion with the similarly-named txg_wait_synced(). Sponsored-by: Klara, Inc. Sponsored-by: Wasabi Technology, Inc. Signed-off-by: Rob Norris <rob.norris@klarasystems.com> Reviewed-by: Alexander Motin <mav@FreeBSD.org> Reviewed-by: Mariusz Zaborski <mariusz.zaborski@klarasystems.com> Reviewed-by: Tony Hutter <hutter2@llnl.gov>
This commit is contained in:
parent
21850f519b
commit
f69631992d
33
cmd/ztest.c
33
cmd/ztest.c
@ -1812,7 +1812,8 @@ ztest_zd_fini(ztest_ds_t *zd)
|
||||
ztest_rll_destroy(&zd->zd_range_lock[l]);
|
||||
}
|
||||
|
||||
#define TXG_MIGHTWAIT (ztest_random(10) == 0 ? TXG_NOWAIT : TXG_WAIT)
|
||||
#define DMU_TX_MIGHTWAIT \
|
||||
(ztest_random(10) == 0 ? DMU_TX_NOWAIT : DMU_TX_WAIT)
|
||||
|
||||
static uint64_t
|
||||
ztest_tx_assign(dmu_tx_t *tx, uint64_t txg_how, const char *tag)
|
||||
@ -1826,7 +1827,7 @@ ztest_tx_assign(dmu_tx_t *tx, uint64_t txg_how, const char *tag)
|
||||
error = dmu_tx_assign(tx, txg_how);
|
||||
if (error) {
|
||||
if (error == ERESTART) {
|
||||
ASSERT3U(txg_how, ==, TXG_NOWAIT);
|
||||
ASSERT3U(txg_how, ==, DMU_TX_NOWAIT);
|
||||
dmu_tx_wait(tx);
|
||||
} else {
|
||||
ASSERT3U(error, ==, ENOSPC);
|
||||
@ -2073,7 +2074,7 @@ ztest_replay_create(void *arg1, void *arg2, boolean_t byteswap)
|
||||
dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT);
|
||||
}
|
||||
|
||||
txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
|
||||
txg = ztest_tx_assign(tx, DMU_TX_WAIT, FTAG);
|
||||
if (txg == 0)
|
||||
return (ENOSPC);
|
||||
|
||||
@ -2163,7 +2164,7 @@ ztest_replay_remove(void *arg1, void *arg2, boolean_t byteswap)
|
||||
dmu_tx_hold_zap(tx, lr->lr_doid, B_FALSE, name);
|
||||
dmu_tx_hold_free(tx, object, 0, DMU_OBJECT_END);
|
||||
|
||||
txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
|
||||
txg = ztest_tx_assign(tx, DMU_TX_WAIT, FTAG);
|
||||
if (txg == 0) {
|
||||
ztest_object_unlock(zd, object);
|
||||
return (ENOSPC);
|
||||
@ -2245,7 +2246,7 @@ ztest_replay_write(void *arg1, void *arg2, boolean_t byteswap)
|
||||
P2PHASE(offset, length) == 0)
|
||||
abuf = dmu_request_arcbuf(db, length);
|
||||
|
||||
txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
|
||||
txg = ztest_tx_assign(tx, DMU_TX_WAIT, FTAG);
|
||||
if (txg == 0) {
|
||||
if (abuf != NULL)
|
||||
dmu_return_arcbuf(abuf);
|
||||
@ -2343,7 +2344,7 @@ ztest_replay_truncate(void *arg1, void *arg2, boolean_t byteswap)
|
||||
|
||||
dmu_tx_hold_free(tx, lr->lr_foid, lr->lr_offset, lr->lr_length);
|
||||
|
||||
txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
|
||||
txg = ztest_tx_assign(tx, DMU_TX_WAIT, FTAG);
|
||||
if (txg == 0) {
|
||||
ztest_range_unlock(rl);
|
||||
ztest_object_unlock(zd, lr->lr_foid);
|
||||
@ -2384,7 +2385,7 @@ ztest_replay_setattr(void *arg1, void *arg2, boolean_t byteswap)
|
||||
tx = dmu_tx_create(os);
|
||||
dmu_tx_hold_bonus(tx, lr->lr_foid);
|
||||
|
||||
txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
|
||||
txg = ztest_tx_assign(tx, DMU_TX_WAIT, FTAG);
|
||||
if (txg == 0) {
|
||||
dmu_buf_rele(db, FTAG);
|
||||
ztest_object_unlock(zd, lr->lr_foid);
|
||||
@ -2802,7 +2803,7 @@ ztest_prealloc(ztest_ds_t *zd, uint64_t object, uint64_t offset, uint64_t size)
|
||||
|
||||
dmu_tx_hold_write(tx, object, offset, size);
|
||||
|
||||
txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
|
||||
txg = ztest_tx_assign(tx, DMU_TX_WAIT, FTAG);
|
||||
|
||||
if (txg != 0) {
|
||||
dmu_prealloc(os, object, offset, size, tx);
|
||||
@ -5170,7 +5171,7 @@ ztest_dmu_read_write(ztest_ds_t *zd, uint64_t id)
|
||||
/* This accounts for setting the checksum/compression. */
|
||||
dmu_tx_hold_bonus(tx, bigobj);
|
||||
|
||||
txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
|
||||
txg = ztest_tx_assign(tx, DMU_TX_MIGHTWAIT, FTAG);
|
||||
if (txg == 0) {
|
||||
umem_free(packbuf, packsize);
|
||||
umem_free(bigbuf, bigsize);
|
||||
@ -5471,7 +5472,7 @@ ztest_dmu_read_write_zcopy(ztest_ds_t *zd, uint64_t id)
|
||||
dmu_tx_hold_write(tx, packobj, packoff, packsize);
|
||||
dmu_tx_hold_write(tx, bigobj, bigoff, bigsize);
|
||||
|
||||
txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
|
||||
txg = ztest_tx_assign(tx, DMU_TX_MIGHTWAIT, FTAG);
|
||||
if (txg == 0) {
|
||||
umem_free(packbuf, packsize);
|
||||
umem_free(bigbuf, bigsize);
|
||||
@ -5691,7 +5692,7 @@ ztest_zap(ztest_ds_t *zd, uint64_t id)
|
||||
*/
|
||||
tx = dmu_tx_create(os);
|
||||
dmu_tx_hold_zap(tx, object, B_TRUE, NULL);
|
||||
txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
|
||||
txg = ztest_tx_assign(tx, DMU_TX_MIGHTWAIT, FTAG);
|
||||
if (txg == 0)
|
||||
goto out;
|
||||
for (i = 0; i < 2; i++) {
|
||||
@ -5759,7 +5760,7 @@ ztest_zap(ztest_ds_t *zd, uint64_t id)
|
||||
*/
|
||||
tx = dmu_tx_create(os);
|
||||
dmu_tx_hold_zap(tx, object, B_TRUE, NULL);
|
||||
txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
|
||||
txg = ztest_tx_assign(tx, DMU_TX_MIGHTWAIT, FTAG);
|
||||
if (txg == 0)
|
||||
goto out;
|
||||
|
||||
@ -5793,7 +5794,7 @@ ztest_zap(ztest_ds_t *zd, uint64_t id)
|
||||
|
||||
tx = dmu_tx_create(os);
|
||||
dmu_tx_hold_zap(tx, object, B_TRUE, NULL);
|
||||
txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
|
||||
txg = ztest_tx_assign(tx, DMU_TX_MIGHTWAIT, FTAG);
|
||||
if (txg == 0)
|
||||
goto out;
|
||||
VERIFY0(zap_remove(os, object, txgname, tx));
|
||||
@ -5836,7 +5837,7 @@ ztest_fzap(ztest_ds_t *zd, uint64_t id)
|
||||
|
||||
tx = dmu_tx_create(os);
|
||||
dmu_tx_hold_zap(tx, object, B_TRUE, name);
|
||||
txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
|
||||
txg = ztest_tx_assign(tx, DMU_TX_MIGHTWAIT, FTAG);
|
||||
if (txg == 0)
|
||||
goto out;
|
||||
error = zap_add(os, object, name, sizeof (uint64_t), 1,
|
||||
@ -5907,7 +5908,7 @@ ztest_zap_parallel(ztest_ds_t *zd, uint64_t id)
|
||||
if (i >= 2) {
|
||||
tx = dmu_tx_create(os);
|
||||
dmu_tx_hold_zap(tx, object, B_TRUE, NULL);
|
||||
txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
|
||||
txg = ztest_tx_assign(tx, DMU_TX_MIGHTWAIT, FTAG);
|
||||
if (txg == 0) {
|
||||
umem_free(od, sizeof (ztest_od_t));
|
||||
return;
|
||||
@ -6073,7 +6074,7 @@ ztest_dmu_commit_callbacks(ztest_ds_t *zd, uint64_t id)
|
||||
error = -1;
|
||||
|
||||
if (!error)
|
||||
error = dmu_tx_assign(tx, TXG_NOWAIT);
|
||||
error = dmu_tx_assign(tx, DMU_TX_NOWAIT);
|
||||
|
||||
txg = error ? 0 : dmu_tx_get_txg(tx);
|
||||
|
||||
|
@ -29,6 +29,7 @@
|
||||
* Copyright 2013 Saso Kiselkov. All rights reserved.
|
||||
* Copyright (c) 2017, Intel Corporation.
|
||||
* Copyright (c) 2022 Hewlett Packard Enterprise Development LP.
|
||||
* Copyright (c) 2025, Klara, Inc.
|
||||
*/
|
||||
|
||||
/* Portions Copyright 2010 Robert Milkowski */
|
||||
@ -276,13 +277,13 @@ typedef enum dmu_object_type {
|
||||
} dmu_object_type_t;
|
||||
|
||||
/*
|
||||
* These flags are intended to be used to specify the "txg_how"
|
||||
* parameter when calling the dmu_tx_assign() function. See the comment
|
||||
* above dmu_tx_assign() for more details on the meaning of these flags.
|
||||
* These flags are for the dmu_tx_assign() function and describe what to do if
|
||||
* the transaction is full. See the comment above dmu_tx_assign() for more
|
||||
* details on the meaning of these flags.
|
||||
*/
|
||||
#define TXG_NOWAIT (0ULL)
|
||||
#define TXG_WAIT (1ULL<<0)
|
||||
#define TXG_NOTHROTTLE (1ULL<<1)
|
||||
#define DMU_TX_NOWAIT (0ULL)
|
||||
#define DMU_TX_WAIT (1ULL<<0)
|
||||
#define DMU_TX_NOTHROTTLE (1ULL<<1)
|
||||
|
||||
void byteswap_uint64_array(void *buf, size_t size);
|
||||
void byteswap_uint32_array(void *buf, size_t size);
|
||||
@ -827,7 +828,7 @@ void dmu_tx_hold_spill(dmu_tx_t *tx, uint64_t object);
|
||||
void dmu_tx_hold_sa(dmu_tx_t *tx, struct sa_handle *hdl, boolean_t may_grow);
|
||||
void dmu_tx_hold_sa_create(dmu_tx_t *tx, int total_size);
|
||||
void dmu_tx_abort(dmu_tx_t *tx);
|
||||
int dmu_tx_assign(dmu_tx_t *tx, uint64_t txg_how);
|
||||
int dmu_tx_assign(dmu_tx_t *tx, uint64_t flags);
|
||||
void dmu_tx_wait(dmu_tx_t *tx);
|
||||
void dmu_tx_commit(dmu_tx_t *tx);
|
||||
void dmu_tx_mark_netfree(dmu_tx_t *tx);
|
||||
|
@ -1998,7 +1998,7 @@ top:
|
||||
}
|
||||
|
||||
zfs_sa_upgrade_txholds(tx, zp);
|
||||
error = dmu_tx_assign(tx, TXG_NOWAIT);
|
||||
error = dmu_tx_assign(tx, DMU_TX_NOWAIT);
|
||||
if (error) {
|
||||
mutex_exit(&zp->z_acl_lock);
|
||||
|
||||
|
@ -337,7 +337,7 @@ zfs_unlinked_drain(zfsvfs_t *zfsvfs)
|
||||
if (zp->z_links != 0) {
|
||||
tx = dmu_tx_create(zfsvfs->z_os);
|
||||
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
|
||||
error = dmu_tx_assign(tx, TXG_WAIT);
|
||||
error = dmu_tx_assign(tx, DMU_TX_WAIT);
|
||||
if (error != 0) {
|
||||
dmu_tx_abort(tx);
|
||||
vput(ZTOV(zp));
|
||||
@ -401,7 +401,7 @@ zfs_purgedir(znode_t *dzp)
|
||||
/* Is this really needed ? */
|
||||
zfs_sa_upgrade_txholds(tx, xzp);
|
||||
dmu_tx_mark_netfree(tx);
|
||||
error = dmu_tx_assign(tx, TXG_WAIT);
|
||||
error = dmu_tx_assign(tx, DMU_TX_WAIT);
|
||||
if (error) {
|
||||
dmu_tx_abort(tx);
|
||||
vput(ZTOV(xzp));
|
||||
@ -503,7 +503,7 @@ zfs_rmnode(znode_t *zp)
|
||||
dmu_tx_hold_free(tx, acl_obj, 0, DMU_OBJECT_END);
|
||||
|
||||
zfs_sa_upgrade_txholds(tx, zp);
|
||||
error = dmu_tx_assign(tx, TXG_WAIT);
|
||||
error = dmu_tx_assign(tx, DMU_TX_WAIT);
|
||||
if (error) {
|
||||
/*
|
||||
* Not enough space to delete the file. Leave it in the
|
||||
@ -848,7 +848,7 @@ zfs_make_xattrdir(znode_t *zp, vattr_t *vap, znode_t **xvpp, cred_t *cr)
|
||||
fuid_dirtied = zfsvfs->z_fuid_dirty;
|
||||
if (fuid_dirtied)
|
||||
zfs_fuid_txhold(zfsvfs, tx);
|
||||
error = dmu_tx_assign(tx, TXG_WAIT);
|
||||
error = dmu_tx_assign(tx, DMU_TX_WAIT);
|
||||
if (error) {
|
||||
zfs_acl_ids_free(&acl_ids);
|
||||
dmu_tx_abort(tx);
|
||||
|
@ -2198,7 +2198,7 @@ zfs_set_version(zfsvfs_t *zfsvfs, uint64_t newvers)
|
||||
ZFS_SA_ATTRS);
|
||||
dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, FALSE, NULL);
|
||||
}
|
||||
error = dmu_tx_assign(tx, TXG_WAIT);
|
||||
error = dmu_tx_assign(tx, DMU_TX_WAIT);
|
||||
if (error) {
|
||||
dmu_tx_abort(tx);
|
||||
return (error);
|
||||
|
@ -143,14 +143,14 @@ typedef ulong_t cookie_t;
|
||||
* (3) All range locks must be grabbed before calling dmu_tx_assign(),
|
||||
* as they can span dmu_tx_assign() calls.
|
||||
*
|
||||
* (4) If ZPL locks are held, pass TXG_NOWAIT as the second argument to
|
||||
* (4) If ZPL locks are held, pass DMU_TX_NOWAIT as the second argument to
|
||||
* dmu_tx_assign(). This is critical because we don't want to block
|
||||
* while holding locks.
|
||||
*
|
||||
* If no ZPL locks are held (aside from zfs_enter()), use TXG_WAIT. This
|
||||
* reduces lock contention and CPU usage when we must wait (note that if
|
||||
* throughput is constrained by the storage, nearly every transaction
|
||||
* must wait).
|
||||
* If no ZPL locks are held (aside from zfs_enter()), use DMU_TX_WAIT.
|
||||
* This reduces lock contention and CPU usage when we must wait (note
|
||||
* that if throughput is constrained by the storage, nearly every
|
||||
* transaction must wait).
|
||||
*
|
||||
* Note, in particular, that if a lock is sometimes acquired before
|
||||
* the tx assigns, and sometimes after (e.g. z_lock), then failing
|
||||
@ -158,15 +158,16 @@ typedef ulong_t cookie_t;
|
||||
*
|
||||
* Thread A has grabbed a lock before calling dmu_tx_assign().
|
||||
* Thread B is in an already-assigned tx, and blocks for this lock.
|
||||
* Thread A calls dmu_tx_assign(TXG_WAIT) and blocks in txg_wait_open()
|
||||
* forever, because the previous txg can't quiesce until B's tx commits.
|
||||
* Thread A calls dmu_tx_assign(DMU_TX_WAIT) and blocks in
|
||||
* txg_wait_open() forever, because the previous txg can't quiesce
|
||||
* until B's tx commits.
|
||||
*
|
||||
* If dmu_tx_assign() returns ERESTART and zfsvfs->z_assign is TXG_NOWAIT,
|
||||
* then drop all locks, call dmu_tx_wait(), and try again. On subsequent
|
||||
* calls to dmu_tx_assign(), pass TXG_NOTHROTTLE in addition to TXG_NOWAIT,
|
||||
* to indicate that this operation has already called dmu_tx_wait().
|
||||
* This will ensure that we don't retry forever, waiting a short bit
|
||||
* each time.
|
||||
* If dmu_tx_assign() returns ERESTART and zfsvfs->z_assign is
|
||||
* DMU_TX_NOWAIT, then drop all locks, call dmu_tx_wait(), and try
|
||||
* again. On subsequent calls to dmu_tx_assign(), pass
|
||||
* DMU_TX_NOTHROTTLE in addition to DMU_TX_NOWAIT, to indicate that
|
||||
* this operation has already called dmu_tx_wait(). This will ensure
|
||||
* that we don't retry forever, waiting a short bit each time.
|
||||
*
|
||||
* (5) If the operation succeeded, generate the intent log entry for it
|
||||
* before dropping locks. This ensures that the ordering of events
|
||||
@ -188,7 +189,8 @@ typedef ulong_t cookie_t;
|
||||
* rw_enter(...); // grab any other locks you need
|
||||
* tx = dmu_tx_create(...); // get DMU tx
|
||||
* dmu_tx_hold_*(); // hold each object you might modify
|
||||
* error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
|
||||
* error = dmu_tx_assign(tx,
|
||||
* (waited ? DMU_TX_NOTHROTTLE : 0) | DMU_TX_NOWAIT);
|
||||
* if (error) {
|
||||
* rw_exit(...); // drop locks
|
||||
* zfs_dirent_unlock(dl); // unlock directory entry
|
||||
@ -1045,7 +1047,7 @@ zfs_create(znode_t *dzp, const char *name, vattr_t *vap, int excl, int mode,
|
||||
dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
|
||||
0, acl_ids.z_aclp->z_acl_bytes);
|
||||
}
|
||||
error = dmu_tx_assign(tx, TXG_WAIT);
|
||||
error = dmu_tx_assign(tx, DMU_TX_WAIT);
|
||||
if (error) {
|
||||
zfs_acl_ids_free(&acl_ids);
|
||||
dmu_tx_abort(tx);
|
||||
@ -1186,7 +1188,7 @@ zfs_remove_(vnode_t *dvp, vnode_t *vp, const char *name, cred_t *cr)
|
||||
*/
|
||||
dmu_tx_mark_netfree(tx);
|
||||
|
||||
error = dmu_tx_assign(tx, TXG_WAIT);
|
||||
error = dmu_tx_assign(tx, DMU_TX_WAIT);
|
||||
if (error) {
|
||||
dmu_tx_abort(tx);
|
||||
zfs_exit(zfsvfs, FTAG);
|
||||
@ -1409,7 +1411,7 @@ zfs_mkdir(znode_t *dzp, const char *dirname, vattr_t *vap, znode_t **zpp,
|
||||
dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
|
||||
ZFS_SA_BASE_ATTR_SIZE);
|
||||
|
||||
error = dmu_tx_assign(tx, TXG_WAIT);
|
||||
error = dmu_tx_assign(tx, DMU_TX_WAIT);
|
||||
if (error) {
|
||||
zfs_acl_ids_free(&acl_ids);
|
||||
dmu_tx_abort(tx);
|
||||
@ -1511,7 +1513,7 @@ zfs_rmdir_(vnode_t *dvp, vnode_t *vp, const char *name, cred_t *cr)
|
||||
zfs_sa_upgrade_txholds(tx, zp);
|
||||
zfs_sa_upgrade_txholds(tx, dzp);
|
||||
dmu_tx_mark_netfree(tx);
|
||||
error = dmu_tx_assign(tx, TXG_WAIT);
|
||||
error = dmu_tx_assign(tx, DMU_TX_WAIT);
|
||||
if (error) {
|
||||
dmu_tx_abort(tx);
|
||||
zfs_exit(zfsvfs, FTAG);
|
||||
@ -2543,7 +2545,7 @@ zfs_setattr(znode_t *zp, vattr_t *vap, int flags, cred_t *cr, zidmap_t *mnt_ns)
|
||||
|
||||
zfs_sa_upgrade_txholds(tx, zp);
|
||||
|
||||
err = dmu_tx_assign(tx, TXG_WAIT);
|
||||
err = dmu_tx_assign(tx, DMU_TX_WAIT);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
@ -3244,7 +3246,7 @@ zfs_do_rename_impl(vnode_t *sdvp, vnode_t **svpp, struct componentname *scnp,
|
||||
|
||||
zfs_sa_upgrade_txholds(tx, szp);
|
||||
dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
|
||||
error = dmu_tx_assign(tx, TXG_WAIT);
|
||||
error = dmu_tx_assign(tx, DMU_TX_WAIT);
|
||||
if (error) {
|
||||
dmu_tx_abort(tx);
|
||||
goto out_seq;
|
||||
@ -3445,7 +3447,7 @@ zfs_symlink(znode_t *dzp, const char *name, vattr_t *vap,
|
||||
}
|
||||
if (fuid_dirtied)
|
||||
zfs_fuid_txhold(zfsvfs, tx);
|
||||
error = dmu_tx_assign(tx, TXG_WAIT);
|
||||
error = dmu_tx_assign(tx, DMU_TX_WAIT);
|
||||
if (error) {
|
||||
zfs_acl_ids_free(&acl_ids);
|
||||
dmu_tx_abort(tx);
|
||||
@ -3664,7 +3666,7 @@ zfs_link(znode_t *tdzp, znode_t *szp, const char *name, cred_t *cr,
|
||||
dmu_tx_hold_zap(tx, tdzp->z_id, TRUE, name);
|
||||
zfs_sa_upgrade_txholds(tx, szp);
|
||||
zfs_sa_upgrade_txholds(tx, tdzp);
|
||||
error = dmu_tx_assign(tx, TXG_WAIT);
|
||||
error = dmu_tx_assign(tx, DMU_TX_WAIT);
|
||||
if (error) {
|
||||
dmu_tx_abort(tx);
|
||||
zfs_exit(zfsvfs, FTAG);
|
||||
@ -3793,7 +3795,7 @@ zfs_inactive(vnode_t *vp, cred_t *cr, caller_context_t *ct)
|
||||
|
||||
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
|
||||
zfs_sa_upgrade_txholds(tx, zp);
|
||||
error = dmu_tx_assign(tx, TXG_WAIT);
|
||||
error = dmu_tx_assign(tx, DMU_TX_WAIT);
|
||||
if (error) {
|
||||
dmu_tx_abort(tx);
|
||||
} else {
|
||||
@ -4164,7 +4166,7 @@ zfs_putpages(struct vnode *vp, vm_page_t *ma, size_t len, int flags,
|
||||
|
||||
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
|
||||
zfs_sa_upgrade_txholds(tx, zp);
|
||||
err = dmu_tx_assign(tx, TXG_WAIT);
|
||||
err = dmu_tx_assign(tx, DMU_TX_WAIT);
|
||||
if (err != 0) {
|
||||
dmu_tx_abort(tx);
|
||||
goto out;
|
||||
|
@ -1412,7 +1412,7 @@ zfs_extend(znode_t *zp, uint64_t end)
|
||||
newblksz = 0;
|
||||
}
|
||||
|
||||
error = dmu_tx_assign(tx, TXG_WAIT);
|
||||
error = dmu_tx_assign(tx, DMU_TX_WAIT);
|
||||
if (error) {
|
||||
dmu_tx_abort(tx);
|
||||
zfs_rangelock_exit(lr);
|
||||
@ -1530,7 +1530,7 @@ zfs_trunc(znode_t *zp, uint64_t end)
|
||||
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
|
||||
zfs_sa_upgrade_txholds(tx, zp);
|
||||
dmu_tx_mark_netfree(tx);
|
||||
error = dmu_tx_assign(tx, TXG_WAIT);
|
||||
error = dmu_tx_assign(tx, DMU_TX_WAIT);
|
||||
if (error) {
|
||||
dmu_tx_abort(tx);
|
||||
zfs_rangelock_exit(lr);
|
||||
@ -1611,7 +1611,7 @@ log:
|
||||
tx = dmu_tx_create(zfsvfs->z_os);
|
||||
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
|
||||
zfs_sa_upgrade_txholds(tx, zp);
|
||||
error = dmu_tx_assign(tx, TXG_WAIT);
|
||||
error = dmu_tx_assign(tx, DMU_TX_WAIT);
|
||||
if (error) {
|
||||
dmu_tx_abort(tx);
|
||||
return (error);
|
||||
|
@ -737,7 +737,7 @@ zvol_geom_bio_strategy(struct bio *bp)
|
||||
|
||||
if (bp->bio_cmd == BIO_DELETE) {
|
||||
dmu_tx_t *tx = dmu_tx_create(zv->zv_objset);
|
||||
error = dmu_tx_assign(tx, TXG_WAIT);
|
||||
error = dmu_tx_assign(tx, DMU_TX_WAIT);
|
||||
if (error != 0) {
|
||||
dmu_tx_abort(tx);
|
||||
} else {
|
||||
@ -757,7 +757,7 @@ zvol_geom_bio_strategy(struct bio *bp)
|
||||
} else {
|
||||
dmu_tx_t *tx = dmu_tx_create(os);
|
||||
dmu_tx_hold_write_by_dnode(tx, zv->zv_dn, off, size);
|
||||
error = dmu_tx_assign(tx, TXG_WAIT);
|
||||
error = dmu_tx_assign(tx, DMU_TX_WAIT);
|
||||
if (error) {
|
||||
dmu_tx_abort(tx);
|
||||
} else {
|
||||
@ -904,7 +904,7 @@ zvol_cdev_write(struct cdev *dev, struct uio *uio_s, int ioflag)
|
||||
bytes = volsize - off;
|
||||
|
||||
dmu_tx_hold_write_by_dnode(tx, zv->zv_dn, off, bytes);
|
||||
error = dmu_tx_assign(tx, TXG_WAIT);
|
||||
error = dmu_tx_assign(tx, DMU_TX_WAIT);
|
||||
if (error) {
|
||||
dmu_tx_abort(tx);
|
||||
break;
|
||||
@ -1153,7 +1153,7 @@ zvol_cdev_ioctl(struct cdev *dev, ulong_t cmd, caddr_t data,
|
||||
lr = zfs_rangelock_enter(&zv->zv_rangelock, offset, length,
|
||||
RL_WRITER);
|
||||
dmu_tx_t *tx = dmu_tx_create(zv->zv_objset);
|
||||
error = dmu_tx_assign(tx, TXG_WAIT);
|
||||
error = dmu_tx_assign(tx, DMU_TX_WAIT);
|
||||
if (error != 0) {
|
||||
sync = FALSE;
|
||||
dmu_tx_abort(tx);
|
||||
|
@ -2188,7 +2188,7 @@ top:
|
||||
}
|
||||
|
||||
zfs_sa_upgrade_txholds(tx, zp);
|
||||
error = dmu_tx_assign(tx, TXG_NOWAIT);
|
||||
error = dmu_tx_assign(tx, DMU_TX_NOWAIT);
|
||||
if (error) {
|
||||
mutex_exit(&zp->z_acl_lock);
|
||||
mutex_exit(&zp->z_lock);
|
||||
|
@ -623,7 +623,7 @@ zfs_purgedir(znode_t *dzp)
|
||||
/* Is this really needed ? */
|
||||
zfs_sa_upgrade_txholds(tx, xzp);
|
||||
dmu_tx_mark_netfree(tx);
|
||||
error = dmu_tx_assign(tx, TXG_WAIT);
|
||||
error = dmu_tx_assign(tx, DMU_TX_WAIT);
|
||||
if (error) {
|
||||
dmu_tx_abort(tx);
|
||||
zfs_zrele_async(xzp);
|
||||
@ -729,7 +729,7 @@ zfs_rmnode(znode_t *zp)
|
||||
dmu_tx_hold_free(tx, acl_obj, 0, DMU_OBJECT_END);
|
||||
|
||||
zfs_sa_upgrade_txholds(tx, zp);
|
||||
error = dmu_tx_assign(tx, TXG_WAIT);
|
||||
error = dmu_tx_assign(tx, DMU_TX_WAIT);
|
||||
if (error) {
|
||||
/*
|
||||
* Not enough space to delete the file. Leave it in the
|
||||
@ -1150,7 +1150,7 @@ zfs_make_xattrdir(znode_t *zp, vattr_t *vap, znode_t **xzpp, cred_t *cr)
|
||||
fuid_dirtied = zfsvfs->z_fuid_dirty;
|
||||
if (fuid_dirtied)
|
||||
zfs_fuid_txhold(zfsvfs, tx);
|
||||
error = dmu_tx_assign(tx, TXG_WAIT);
|
||||
error = dmu_tx_assign(tx, DMU_TX_WAIT);
|
||||
if (error) {
|
||||
zfs_acl_ids_free(&acl_ids);
|
||||
dmu_tx_abort(tx);
|
||||
|
@ -1962,7 +1962,7 @@ zfs_set_version(zfsvfs_t *zfsvfs, uint64_t newvers)
|
||||
ZFS_SA_ATTRS);
|
||||
dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, FALSE, NULL);
|
||||
}
|
||||
error = dmu_tx_assign(tx, TXG_WAIT);
|
||||
error = dmu_tx_assign(tx, DMU_TX_WAIT);
|
||||
if (error) {
|
||||
dmu_tx_abort(tx);
|
||||
return (error);
|
||||
|
@ -105,14 +105,14 @@
|
||||
* (3) All range locks must be grabbed before calling dmu_tx_assign(),
|
||||
* as they can span dmu_tx_assign() calls.
|
||||
*
|
||||
* (4) If ZPL locks are held, pass TXG_NOWAIT as the second argument to
|
||||
* (4) If ZPL locks are held, pass DMU_TX_NOWAIT as the second argument to
|
||||
* dmu_tx_assign(). This is critical because we don't want to block
|
||||
* while holding locks.
|
||||
*
|
||||
* If no ZPL locks are held (aside from zfs_enter()), use TXG_WAIT. This
|
||||
* reduces lock contention and CPU usage when we must wait (note that if
|
||||
* throughput is constrained by the storage, nearly every transaction
|
||||
* must wait).
|
||||
* If no ZPL locks are held (aside from zfs_enter()), use DMU_TX_WAIT.
|
||||
* This reduces lock contention and CPU usage when we must wait (note
|
||||
* that if throughput is constrained by the storage, nearly every
|
||||
* transaction must wait).
|
||||
*
|
||||
* Note, in particular, that if a lock is sometimes acquired before
|
||||
* the tx assigns, and sometimes after (e.g. z_lock), then failing
|
||||
@ -120,15 +120,16 @@
|
||||
*
|
||||
* Thread A has grabbed a lock before calling dmu_tx_assign().
|
||||
* Thread B is in an already-assigned tx, and blocks for this lock.
|
||||
* Thread A calls dmu_tx_assign(TXG_WAIT) and blocks in txg_wait_open()
|
||||
* forever, because the previous txg can't quiesce until B's tx commits.
|
||||
* Thread A calls dmu_tx_assign(DMU_TX_WAIT) and blocks in
|
||||
* txg_wait_open() forever, because the previous txg can't quiesce
|
||||
* until B's tx commits.
|
||||
*
|
||||
* If dmu_tx_assign() returns ERESTART and zfsvfs->z_assign is TXG_NOWAIT,
|
||||
* then drop all locks, call dmu_tx_wait(), and try again. On subsequent
|
||||
* calls to dmu_tx_assign(), pass TXG_NOTHROTTLE in addition to TXG_NOWAIT,
|
||||
* to indicate that this operation has already called dmu_tx_wait().
|
||||
* This will ensure that we don't retry forever, waiting a short bit
|
||||
* each time.
|
||||
* If dmu_tx_assign() returns ERESTART and zfsvfs->z_assign is
|
||||
* DMU_TX_NOWAIT, then drop all locks, call dmu_tx_wait(), and try
|
||||
* again. On subsequent calls to dmu_tx_assign(), pass
|
||||
* DMU_TX_NOTHROTTLE in addition to DMU_TX_NOWAIT, to indicate that
|
||||
* this operation has already called dmu_tx_wait(). This will ensure
|
||||
* that we don't retry forever, waiting a short bit each time.
|
||||
*
|
||||
* (5) If the operation succeeded, generate the intent log entry for it
|
||||
* before dropping locks. This ensures that the ordering of events
|
||||
@ -150,7 +151,8 @@
|
||||
* rw_enter(...); // grab any other locks you need
|
||||
* tx = dmu_tx_create(...); // get DMU tx
|
||||
* dmu_tx_hold_*(); // hold each object you might modify
|
||||
* error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
|
||||
* error = dmu_tx_assign(tx,
|
||||
* (waited ? DMU_TX_NOTHROTTLE : 0) | DMU_TX_NOWAIT);
|
||||
* if (error) {
|
||||
* rw_exit(...); // drop locks
|
||||
* zfs_dirent_unlock(dl); // unlock directory entry
|
||||
@ -737,7 +739,7 @@ top:
|
||||
}
|
||||
|
||||
error = dmu_tx_assign(tx,
|
||||
(waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
|
||||
(waited ? DMU_TX_NOTHROTTLE : 0) | DMU_TX_NOWAIT);
|
||||
if (error) {
|
||||
zfs_dirent_unlock(dl);
|
||||
if (error == ERESTART) {
|
||||
@ -928,7 +930,8 @@ top:
|
||||
dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
|
||||
0, acl_ids.z_aclp->z_acl_bytes);
|
||||
}
|
||||
error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
|
||||
error = dmu_tx_assign(tx,
|
||||
(waited ? DMU_TX_NOTHROTTLE : 0) | DMU_TX_NOWAIT);
|
||||
if (error) {
|
||||
if (error == ERESTART) {
|
||||
waited = B_TRUE;
|
||||
@ -1092,7 +1095,8 @@ top:
|
||||
*/
|
||||
dmu_tx_mark_netfree(tx);
|
||||
|
||||
error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
|
||||
error = dmu_tx_assign(tx,
|
||||
(waited ? DMU_TX_NOTHROTTLE : 0) | DMU_TX_NOWAIT);
|
||||
if (error) {
|
||||
zfs_dirent_unlock(dl);
|
||||
if (error == ERESTART) {
|
||||
@ -1338,7 +1342,8 @@ top:
|
||||
dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
|
||||
ZFS_SA_BASE_ATTR_SIZE);
|
||||
|
||||
error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
|
||||
error = dmu_tx_assign(tx,
|
||||
(waited ? DMU_TX_NOTHROTTLE : 0) | DMU_TX_NOWAIT);
|
||||
if (error) {
|
||||
zfs_dirent_unlock(dl);
|
||||
if (error == ERESTART) {
|
||||
@ -1482,7 +1487,8 @@ top:
|
||||
zfs_sa_upgrade_txholds(tx, zp);
|
||||
zfs_sa_upgrade_txholds(tx, dzp);
|
||||
dmu_tx_mark_netfree(tx);
|
||||
error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
|
||||
error = dmu_tx_assign(tx,
|
||||
(waited ? DMU_TX_NOTHROTTLE : 0) | DMU_TX_NOWAIT);
|
||||
if (error) {
|
||||
rw_exit(&zp->z_parent_lock);
|
||||
rw_exit(&zp->z_name_lock);
|
||||
@ -1814,7 +1820,7 @@ zfs_setattr_dir(znode_t *dzp)
|
||||
else
|
||||
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
|
||||
|
||||
err = dmu_tx_assign(tx, TXG_WAIT);
|
||||
err = dmu_tx_assign(tx, DMU_TX_WAIT);
|
||||
if (err)
|
||||
break;
|
||||
|
||||
@ -2387,7 +2393,7 @@ top:
|
||||
|
||||
zfs_sa_upgrade_txholds(tx, zp);
|
||||
|
||||
err = dmu_tx_assign(tx, TXG_WAIT);
|
||||
err = dmu_tx_assign(tx, DMU_TX_WAIT);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
@ -3071,7 +3077,8 @@ top:
|
||||
zfs_fuid_txhold(zfsvfs, tx);
|
||||
zfs_sa_upgrade_txholds(tx, szp);
|
||||
dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
|
||||
error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
|
||||
error = dmu_tx_assign(tx,
|
||||
(waited ? DMU_TX_NOTHROTTLE : 0) | DMU_TX_NOWAIT);
|
||||
if (error) {
|
||||
if (zl != NULL)
|
||||
zfs_rename_unlock(&zl);
|
||||
@ -3366,7 +3373,8 @@ top:
|
||||
}
|
||||
if (fuid_dirtied)
|
||||
zfs_fuid_txhold(zfsvfs, tx);
|
||||
error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
|
||||
error = dmu_tx_assign(tx,
|
||||
(waited ? DMU_TX_NOTHROTTLE : 0) | DMU_TX_NOWAIT);
|
||||
if (error) {
|
||||
zfs_dirent_unlock(dl);
|
||||
if (error == ERESTART) {
|
||||
@ -3616,7 +3624,8 @@ top:
|
||||
|
||||
zfs_sa_upgrade_txholds(tx, szp);
|
||||
zfs_sa_upgrade_txholds(tx, tdzp);
|
||||
error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
|
||||
error = dmu_tx_assign(tx,
|
||||
(waited ? DMU_TX_NOTHROTTLE : 0) | DMU_TX_NOWAIT);
|
||||
if (error) {
|
||||
zfs_dirent_unlock(dl);
|
||||
if (error == ERESTART) {
|
||||
@ -3853,7 +3862,7 @@ zfs_putpage(struct inode *ip, struct page *pp, struct writeback_control *wbc,
|
||||
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
|
||||
zfs_sa_upgrade_txholds(tx, zp);
|
||||
|
||||
err = dmu_tx_assign(tx, TXG_WAIT);
|
||||
err = dmu_tx_assign(tx, DMU_TX_WAIT);
|
||||
if (err != 0) {
|
||||
dmu_tx_abort(tx);
|
||||
#ifdef HAVE_VFS_FILEMAP_DIRTY_FOLIO
|
||||
@ -3968,7 +3977,7 @@ zfs_dirty_inode(struct inode *ip, int flags)
|
||||
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
|
||||
zfs_sa_upgrade_txholds(tx, zp);
|
||||
|
||||
error = dmu_tx_assign(tx, TXG_WAIT);
|
||||
error = dmu_tx_assign(tx, DMU_TX_WAIT);
|
||||
if (error) {
|
||||
dmu_tx_abort(tx);
|
||||
goto out;
|
||||
@ -4027,7 +4036,7 @@ zfs_inactive(struct inode *ip)
|
||||
|
||||
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
|
||||
zfs_sa_upgrade_txholds(tx, zp);
|
||||
error = dmu_tx_assign(tx, TXG_WAIT);
|
||||
error = dmu_tx_assign(tx, DMU_TX_WAIT);
|
||||
if (error) {
|
||||
dmu_tx_abort(tx);
|
||||
} else {
|
||||
|
@ -1518,7 +1518,7 @@ zfs_extend(znode_t *zp, uint64_t end)
|
||||
newblksz = 0;
|
||||
}
|
||||
|
||||
error = dmu_tx_assign(tx, TXG_WAIT);
|
||||
error = dmu_tx_assign(tx, DMU_TX_WAIT);
|
||||
if (error) {
|
||||
dmu_tx_abort(tx);
|
||||
zfs_rangelock_exit(lr);
|
||||
@ -1704,7 +1704,7 @@ zfs_trunc(znode_t *zp, uint64_t end)
|
||||
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
|
||||
zfs_sa_upgrade_txholds(tx, zp);
|
||||
dmu_tx_mark_netfree(tx);
|
||||
error = dmu_tx_assign(tx, TXG_WAIT);
|
||||
error = dmu_tx_assign(tx, DMU_TX_WAIT);
|
||||
if (error) {
|
||||
dmu_tx_abort(tx);
|
||||
zfs_rangelock_exit(lr);
|
||||
@ -1775,7 +1775,7 @@ log:
|
||||
tx = dmu_tx_create(zfsvfs->z_os);
|
||||
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
|
||||
zfs_sa_upgrade_txholds(tx, zp);
|
||||
error = dmu_tx_assign(tx, TXG_WAIT);
|
||||
error = dmu_tx_assign(tx, DMU_TX_WAIT);
|
||||
if (error) {
|
||||
dmu_tx_abort(tx);
|
||||
goto out;
|
||||
|
@ -300,7 +300,7 @@ zvol_write(zv_request_t *zvr)
|
||||
dmu_tx_hold_write_by_dnode(tx, zv->zv_dn, off, bytes);
|
||||
|
||||
/* This will only fail for ENOSPC */
|
||||
error = dmu_tx_assign(tx, TXG_WAIT);
|
||||
error = dmu_tx_assign(tx, DMU_TX_WAIT);
|
||||
if (error) {
|
||||
dmu_tx_abort(tx);
|
||||
break;
|
||||
@ -396,7 +396,7 @@ zvol_discard(zv_request_t *zvr)
|
||||
|
||||
tx = dmu_tx_create(zv->zv_objset);
|
||||
dmu_tx_mark_netfree(tx);
|
||||
error = dmu_tx_assign(tx, TXG_WAIT);
|
||||
error = dmu_tx_assign(tx, DMU_TX_WAIT);
|
||||
if (error != 0) {
|
||||
dmu_tx_abort(tx);
|
||||
} else {
|
||||
|
@ -1051,7 +1051,7 @@ dmu_free_long_range_impl(objset_t *os, dnode_t *dn, uint64_t offset,
|
||||
* reduction in space used.
|
||||
*/
|
||||
dmu_tx_mark_netfree(tx);
|
||||
err = dmu_tx_assign(tx, TXG_WAIT);
|
||||
err = dmu_tx_assign(tx, DMU_TX_WAIT);
|
||||
if (err) {
|
||||
dmu_tx_abort(tx);
|
||||
return (err);
|
||||
@ -1142,7 +1142,7 @@ dmu_free_long_object(objset_t *os, uint64_t object)
|
||||
dmu_tx_hold_bonus(tx, object);
|
||||
dmu_tx_hold_free(tx, object, 0, DMU_OBJECT_END);
|
||||
dmu_tx_mark_netfree(tx);
|
||||
err = dmu_tx_assign(tx, TXG_WAIT);
|
||||
err = dmu_tx_assign(tx, DMU_TX_WAIT);
|
||||
if (err == 0) {
|
||||
err = dmu_object_free(os, object, tx);
|
||||
dmu_tx_commit(tx);
|
||||
@ -1996,7 +1996,7 @@ dmu_sync_late_arrival(zio_t *pio, objset_t *os, dmu_sync_cb_t *done, zgd_t *zgd,
|
||||
* which time the log block we are writing will be obsolete, so we can
|
||||
* skip waiting and just return error here instead.
|
||||
*/
|
||||
if (dmu_tx_assign(tx, TXG_NOWAIT | TXG_NOTHROTTLE) != 0) {
|
||||
if (dmu_tx_assign(tx, DMU_TX_NOWAIT | DMU_TX_NOTHROTTLE) != 0) {
|
||||
dmu_tx_abort(tx);
|
||||
/* Make zl_get_data do txg_waited_synced() */
|
||||
return (SET_ERROR(EIO));
|
||||
|
@ -2452,7 +2452,7 @@ dmu_objset_space_upgrade(objset_t *os)
|
||||
continue;
|
||||
tx = dmu_tx_create(os);
|
||||
dmu_tx_hold_bonus(tx, obj);
|
||||
objerr = dmu_tx_assign(tx, TXG_WAIT);
|
||||
objerr = dmu_tx_assign(tx, DMU_TX_WAIT);
|
||||
if (objerr != 0) {
|
||||
dmu_buf_rele(db, FTAG);
|
||||
dmu_tx_abort(tx);
|
||||
|
@ -2024,7 +2024,7 @@ receive_object(struct receive_writer_arg *rwa, struct drr_object *drro,
|
||||
tx = dmu_tx_create(rwa->os);
|
||||
dmu_tx_hold_bonus(tx, object_to_hold);
|
||||
dmu_tx_hold_write(tx, object_to_hold, 0, 0);
|
||||
err = dmu_tx_assign(tx, TXG_WAIT);
|
||||
err = dmu_tx_assign(tx, DMU_TX_WAIT);
|
||||
if (err != 0) {
|
||||
dmu_tx_abort(tx);
|
||||
return (err);
|
||||
@ -2228,7 +2228,7 @@ flush_write_batch_impl(struct receive_writer_arg *rwa)
|
||||
dmu_tx_hold_write_by_dnode(tx, dn, first_drrw->drr_offset,
|
||||
last_drrw->drr_offset - first_drrw->drr_offset +
|
||||
last_drrw->drr_logical_size);
|
||||
err = dmu_tx_assign(tx, TXG_WAIT);
|
||||
err = dmu_tx_assign(tx, DMU_TX_WAIT);
|
||||
if (err != 0) {
|
||||
dmu_tx_abort(tx);
|
||||
dnode_rele(dn, FTAG);
|
||||
@ -2501,7 +2501,7 @@ receive_write_embedded(struct receive_writer_arg *rwa,
|
||||
|
||||
dmu_tx_hold_write(tx, drrwe->drr_object,
|
||||
drrwe->drr_offset, drrwe->drr_length);
|
||||
err = dmu_tx_assign(tx, TXG_WAIT);
|
||||
err = dmu_tx_assign(tx, DMU_TX_WAIT);
|
||||
if (err != 0) {
|
||||
dmu_tx_abort(tx);
|
||||
return (err);
|
||||
@ -2564,7 +2564,7 @@ receive_spill(struct receive_writer_arg *rwa, struct drr_spill *drrs,
|
||||
|
||||
dmu_tx_hold_spill(tx, db->db_object);
|
||||
|
||||
err = dmu_tx_assign(tx, TXG_WAIT);
|
||||
err = dmu_tx_assign(tx, DMU_TX_WAIT);
|
||||
if (err != 0) {
|
||||
dmu_buf_rele(db, FTAG);
|
||||
dmu_buf_rele(db_spill, FTAG);
|
||||
|
@ -568,7 +568,7 @@ commit_rl_updates(objset_t *os, struct merge_data *md, uint64_t object,
|
||||
{
|
||||
dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(os->os_spa)->dp_mos_dir);
|
||||
dmu_tx_hold_space(tx, sizeof (struct redact_block_list_node));
|
||||
VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
|
||||
VERIFY0(dmu_tx_assign(tx, DMU_TX_WAIT));
|
||||
uint64_t txg = dmu_tx_get_txg(tx);
|
||||
if (!md->md_synctask_txg[txg & TXG_MASK]) {
|
||||
dsl_sync_task_nowait(dmu_tx_pool(tx),
|
||||
|
@ -1017,7 +1017,7 @@ dmu_tx_delay(dmu_tx_t *tx, uint64_t dirty)
|
||||
* decreasing performance.
|
||||
*/
|
||||
static int
|
||||
dmu_tx_try_assign(dmu_tx_t *tx, uint64_t txg_how)
|
||||
dmu_tx_try_assign(dmu_tx_t *tx, uint64_t flags)
|
||||
{
|
||||
spa_t *spa = tx->tx_pool->dp_spa;
|
||||
|
||||
@ -1037,11 +1037,11 @@ dmu_tx_try_assign(dmu_tx_t *tx, uint64_t txg_how)
|
||||
* Otherwise, return EIO so that an error can get
|
||||
* propagated back to the VOP calls.
|
||||
*
|
||||
* Note that we always honor the txg_how flag regardless
|
||||
* Note that we always honor the `flags` flag regardless
|
||||
* of the failuremode setting.
|
||||
*/
|
||||
if (spa_get_failmode(spa) == ZIO_FAILURE_MODE_CONTINUE &&
|
||||
!(txg_how & TXG_WAIT))
|
||||
!(flags & DMU_TX_WAIT))
|
||||
return (SET_ERROR(EIO));
|
||||
|
||||
return (SET_ERROR(ERESTART));
|
||||
@ -1165,20 +1165,20 @@ dmu_tx_unassign(dmu_tx_t *tx)
|
||||
}
|
||||
|
||||
/*
|
||||
* Assign tx to a transaction group; txg_how is a bitmask:
|
||||
* Assign tx to a transaction group; `flags` is a bitmask:
|
||||
*
|
||||
* If TXG_WAIT is set and the currently open txg is full, this function
|
||||
* If DMU_TX_WAIT is set and the currently open txg is full, this function
|
||||
* will wait until there's a new txg. This should be used when no locks
|
||||
* are being held. With this bit set, this function will only fail if
|
||||
* we're truly out of space (or over quota).
|
||||
*
|
||||
* If TXG_WAIT is *not* set and we can't assign into the currently open
|
||||
* If DMU_TX_WAIT is *not* set and we can't assign into the currently open
|
||||
* txg without blocking, this function will return immediately with
|
||||
* ERESTART. This should be used whenever locks are being held. On an
|
||||
* ERESTART error, the caller should drop all locks, call dmu_tx_wait(),
|
||||
* and try again.
|
||||
*
|
||||
* If TXG_NOTHROTTLE is set, this indicates that this tx should not be
|
||||
* If DMU_TX_NOTHROTTLE is set, this indicates that this tx should not be
|
||||
* delayed due on the ZFS Write Throttle (see comments in dsl_pool.c for
|
||||
* details on the throttle). This is used by the VFS operations, after
|
||||
* they have already called dmu_tx_wait() (though most likely on a
|
||||
@ -1201,24 +1201,24 @@ dmu_tx_unassign(dmu_tx_t *tx)
|
||||
* 1 <- dmu_tx_get_txg(T3)
|
||||
*/
|
||||
int
|
||||
dmu_tx_assign(dmu_tx_t *tx, uint64_t txg_how)
|
||||
dmu_tx_assign(dmu_tx_t *tx, uint64_t flags)
|
||||
{
|
||||
int err;
|
||||
|
||||
ASSERT(tx->tx_txg == 0);
|
||||
ASSERT0(txg_how & ~(TXG_WAIT | TXG_NOTHROTTLE));
|
||||
ASSERT0(flags & ~(DMU_TX_WAIT | DMU_TX_NOTHROTTLE));
|
||||
ASSERT(!dsl_pool_sync_context(tx->tx_pool));
|
||||
|
||||
/* If we might wait, we must not hold the config lock. */
|
||||
IMPLY((txg_how & TXG_WAIT), !dsl_pool_config_held(tx->tx_pool));
|
||||
IMPLY((flags & DMU_TX_WAIT), !dsl_pool_config_held(tx->tx_pool));
|
||||
|
||||
if ((txg_how & TXG_NOTHROTTLE))
|
||||
if ((flags & DMU_TX_NOTHROTTLE))
|
||||
tx->tx_dirty_delayed = B_TRUE;
|
||||
|
||||
while ((err = dmu_tx_try_assign(tx, txg_how)) != 0) {
|
||||
while ((err = dmu_tx_try_assign(tx, flags)) != 0) {
|
||||
dmu_tx_unassign(tx);
|
||||
|
||||
if (err != ERESTART || !(txg_how & TXG_WAIT))
|
||||
if (err != ERESTART || !(flags & DMU_TX_WAIT))
|
||||
return (err);
|
||||
|
||||
dmu_tx_wait(tx);
|
||||
@ -1263,9 +1263,9 @@ dmu_tx_wait(dmu_tx_t *tx)
|
||||
|
||||
/*
|
||||
* Note: setting tx_dirty_delayed only has effect if the
|
||||
* caller used TX_WAIT. Otherwise they are going to
|
||||
* caller used DMU_TX_WAIT. Otherwise they are going to
|
||||
* destroy this tx and try again. The common case,
|
||||
* zfs_write(), uses TX_WAIT.
|
||||
* zfs_write(), uses DMU_TX_WAIT.
|
||||
*/
|
||||
tx->tx_dirty_delayed = B_TRUE;
|
||||
} else if (spa_suspended(spa) || tx->tx_lasttried_txg == 0) {
|
||||
|
@ -1434,7 +1434,7 @@ dsl_scan_restart_resilver(dsl_pool_t *dp, uint64_t txg)
|
||||
if (txg == 0) {
|
||||
dmu_tx_t *tx;
|
||||
tx = dmu_tx_create_dd(dp->dp_mos_dir);
|
||||
VERIFY(0 == dmu_tx_assign(tx, TXG_WAIT));
|
||||
VERIFY(0 == dmu_tx_assign(tx, DMU_TX_WAIT));
|
||||
|
||||
txg = dmu_tx_get_txg(tx);
|
||||
dp->dp_scan->scn_restart_txg = txg;
|
||||
|
@ -58,7 +58,7 @@ dsl_sync_task_common(const char *pool, dsl_checkfunc_t *checkfunc,
|
||||
|
||||
top:
|
||||
tx = dmu_tx_create_dd(dp->dp_mos_dir);
|
||||
VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
|
||||
VERIFY0(dmu_tx_assign(tx, DMU_TX_WAIT));
|
||||
|
||||
dst.dst_pool = dp;
|
||||
dst.dst_txg = dmu_tx_get_txg(tx);
|
||||
|
@ -1984,7 +1984,7 @@ static void
|
||||
spa_unload_log_sm_flush_all(spa_t *spa)
|
||||
{
|
||||
dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
|
||||
VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
|
||||
VERIFY0(dmu_tx_assign(tx, DMU_TX_WAIT));
|
||||
|
||||
ASSERT3U(spa->spa_log_flushall_txg, ==, 0);
|
||||
spa->spa_log_flushall_txg = dmu_tx_get_txg(tx);
|
||||
@ -3262,7 +3262,7 @@ spa_livelist_condense_cb(void *arg, zthr_t *t)
|
||||
dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
|
||||
dmu_tx_mark_netfree(tx);
|
||||
dmu_tx_hold_space(tx, 1);
|
||||
err = dmu_tx_assign(tx, TXG_NOWAIT | TXG_NOTHROTTLE);
|
||||
err = dmu_tx_assign(tx, DMU_TX_NOWAIT | DMU_TX_NOTHROTTLE);
|
||||
if (err == 0) {
|
||||
/*
|
||||
* Prevent the condense zthr restarting before
|
||||
@ -8593,7 +8593,7 @@ spa_vdev_split_mirror(spa_t *spa, const char *newname, nvlist_t *config,
|
||||
/* finally, update the original pool's config */
|
||||
txg = spa_vdev_config_enter(spa);
|
||||
tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
|
||||
error = dmu_tx_assign(tx, TXG_WAIT);
|
||||
error = dmu_tx_assign(tx, DMU_TX_WAIT);
|
||||
if (error != 0)
|
||||
dmu_tx_abort(tx);
|
||||
for (c = 0; c < children; c++) {
|
||||
|
@ -385,7 +385,7 @@ spa_history_log_nvl(spa_t *spa, nvlist_t *nvl)
|
||||
}
|
||||
|
||||
tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
|
||||
err = dmu_tx_assign(tx, TXG_WAIT);
|
||||
err = dmu_tx_assign(tx, DMU_TX_WAIT);
|
||||
if (err) {
|
||||
dmu_tx_abort(tx);
|
||||
return (err);
|
||||
@ -561,7 +561,7 @@ spa_history_log_internal(spa_t *spa, const char *operation,
|
||||
/* create a tx if we didn't get one */
|
||||
if (tx == NULL) {
|
||||
htx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
|
||||
if (dmu_tx_assign(htx, TXG_WAIT) != 0) {
|
||||
if (dmu_tx_assign(htx, DMU_TX_WAIT) != 0) {
|
||||
dmu_tx_abort(htx);
|
||||
return;
|
||||
}
|
||||
|
@ -569,7 +569,7 @@ spa_condense_indirect_commit_entry(spa_t *spa,
|
||||
|
||||
dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
|
||||
dmu_tx_hold_space(tx, sizeof (*vimep) + sizeof (count));
|
||||
VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
|
||||
VERIFY0(dmu_tx_assign(tx, DMU_TX_WAIT));
|
||||
int txgoff = dmu_tx_get_txg(tx) & TXG_MASK;
|
||||
|
||||
/*
|
||||
|
@ -158,7 +158,7 @@ vdev_initialize_change_state(vdev_t *vd, vdev_initializing_state_t new_state)
|
||||
vd->vdev_initialize_state = new_state;
|
||||
|
||||
dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
|
||||
VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
|
||||
VERIFY0(dmu_tx_assign(tx, DMU_TX_WAIT));
|
||||
|
||||
if (new_state != VDEV_INITIALIZE_NONE) {
|
||||
dsl_sync_task_nowait(spa_get_dsl(spa),
|
||||
@ -250,7 +250,7 @@ vdev_initialize_write(vdev_t *vd, uint64_t start, uint64_t size, abd_t *data)
|
||||
mutex_exit(&vd->vdev_initialize_io_lock);
|
||||
|
||||
dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
|
||||
VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
|
||||
VERIFY0(dmu_tx_assign(tx, DMU_TX_WAIT));
|
||||
uint64_t txg = dmu_tx_get_txg(tx);
|
||||
|
||||
spa_config_enter(spa, SCL_STATE_ALL, vd, RW_READER);
|
||||
|
@ -4625,7 +4625,7 @@ spa_raidz_expand_thread(void *arg, zthr_t *zthr)
|
||||
dmu_tx_t *tx =
|
||||
dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
|
||||
|
||||
VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
|
||||
VERIFY0(dmu_tx_assign(tx, DMU_TX_WAIT));
|
||||
uint64_t txg = dmu_tx_get_txg(tx);
|
||||
|
||||
/*
|
||||
|
@ -287,7 +287,7 @@ vdev_rebuild_initiate(vdev_t *vd)
|
||||
ASSERT(!vd->vdev_rebuilding);
|
||||
|
||||
dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
|
||||
VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
|
||||
VERIFY0(dmu_tx_assign(tx, DMU_TX_WAIT));
|
||||
|
||||
vd->vdev_rebuilding = B_TRUE;
|
||||
|
||||
@ -592,7 +592,7 @@ vdev_rebuild_range(vdev_rebuild_t *vr, uint64_t start, uint64_t size)
|
||||
mutex_exit(&vr->vr_io_lock);
|
||||
|
||||
dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
|
||||
VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
|
||||
VERIFY0(dmu_tx_assign(tx, DMU_TX_WAIT));
|
||||
uint64_t txg = dmu_tx_get_txg(tx);
|
||||
|
||||
spa_config_enter(spa, SCL_STATE_ALL, vd, RW_READER);
|
||||
@ -932,7 +932,7 @@ vdev_rebuild_thread(void *arg)
|
||||
|
||||
dsl_pool_t *dp = spa_get_dsl(spa);
|
||||
dmu_tx_t *tx = dmu_tx_create_dd(dp->dp_mos_dir);
|
||||
VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
|
||||
VERIFY0(dmu_tx_assign(tx, DMU_TX_WAIT));
|
||||
|
||||
mutex_enter(&vd->vdev_rebuild_lock);
|
||||
if (error == 0) {
|
||||
|
@ -1717,7 +1717,7 @@ spa_vdev_remove_thread(void *arg)
|
||||
dmu_tx_t *tx =
|
||||
dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
|
||||
|
||||
VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
|
||||
VERIFY0(dmu_tx_assign(tx, DMU_TX_WAIT));
|
||||
uint64_t txg = dmu_tx_get_txg(tx);
|
||||
|
||||
/*
|
||||
|
@ -342,7 +342,7 @@ vdev_trim_change_state(vdev_t *vd, vdev_trim_state_t new_state,
|
||||
vd->vdev_trim_state = new_state;
|
||||
|
||||
dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
|
||||
VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
|
||||
VERIFY0(dmu_tx_assign(tx, DMU_TX_WAIT));
|
||||
dsl_sync_task_nowait(spa_get_dsl(spa), vdev_trim_zap_update_sync,
|
||||
guid, tx);
|
||||
|
||||
@ -527,7 +527,7 @@ vdev_trim_range(trim_args_t *ta, uint64_t start, uint64_t size)
|
||||
mutex_exit(&vd->vdev_trim_io_lock);
|
||||
|
||||
dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
|
||||
VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
|
||||
VERIFY0(dmu_tx_assign(tx, DMU_TX_WAIT));
|
||||
uint64_t txg = dmu_tx_get_txg(tx);
|
||||
|
||||
spa_config_enter(spa, SCL_STATE_ALL, vd, RW_READER);
|
||||
|
@ -340,7 +340,7 @@ zfs_set_userquota(zfsvfs_t *zfsvfs, zfs_userquota_prop_t type,
|
||||
}
|
||||
if (fuid_dirtied)
|
||||
zfs_fuid_txhold(zfsvfs, tx);
|
||||
err = dmu_tx_assign(tx, TXG_WAIT);
|
||||
err = dmu_tx_assign(tx, DMU_TX_WAIT);
|
||||
if (err) {
|
||||
dmu_tx_abort(tx);
|
||||
return (err);
|
||||
|
@ -900,7 +900,7 @@ top:
|
||||
|
||||
zp->z_size = end;
|
||||
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
|
||||
error = dmu_tx_assign(tx, TXG_WAIT);
|
||||
error = dmu_tx_assign(tx, DMU_TX_WAIT);
|
||||
if (error) {
|
||||
zrele(zp);
|
||||
if (error == ERESTART) {
|
||||
|
@ -265,7 +265,7 @@ zfs_sa_set_xattr(znode_t *zp, const char *name, const void *value, size_t vsize)
|
||||
dmu_tx_hold_sa_create(tx, size);
|
||||
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
|
||||
|
||||
error = dmu_tx_assign(tx, TXG_WAIT);
|
||||
error = dmu_tx_assign(tx, DMU_TX_WAIT);
|
||||
if (error) {
|
||||
dmu_tx_abort(tx);
|
||||
} else {
|
||||
|
@ -835,7 +835,7 @@ zfs_write(znode_t *zp, zfs_uio_t *uio, int ioflag, cred_t *cr)
|
||||
dmu_tx_hold_write_by_dnode(tx, DB_DNODE(db), woff, nbytes);
|
||||
DB_DNODE_EXIT(db);
|
||||
zfs_sa_upgrade_txholds(tx, zp);
|
||||
error = dmu_tx_assign(tx, TXG_WAIT);
|
||||
error = dmu_tx_assign(tx, DMU_TX_WAIT);
|
||||
if (error) {
|
||||
dmu_tx_abort(tx);
|
||||
if (abuf != NULL)
|
||||
@ -1660,7 +1660,7 @@ zfs_clone_range(znode_t *inzp, uint64_t *inoffp, znode_t *outzp,
|
||||
dmu_tx_hold_clone_by_dnode(tx, DB_DNODE(db), outoff, size);
|
||||
DB_DNODE_EXIT(db);
|
||||
zfs_sa_upgrade_txholds(tx, outzp);
|
||||
error = dmu_tx_assign(tx, TXG_WAIT);
|
||||
error = dmu_tx_assign(tx, DMU_TX_WAIT);
|
||||
if (error != 0) {
|
||||
dmu_tx_abort(tx);
|
||||
break;
|
||||
@ -1827,7 +1827,7 @@ zfs_clone_range_replay(znode_t *zp, uint64_t off, uint64_t len, uint64_t blksz,
|
||||
dmu_tx_hold_clone_by_dnode(tx, DB_DNODE(db), off, len);
|
||||
DB_DNODE_EXIT(db);
|
||||
zfs_sa_upgrade_txholds(tx, zp);
|
||||
error = dmu_tx_assign(tx, TXG_WAIT);
|
||||
error = dmu_tx_assign(tx, DMU_TX_WAIT);
|
||||
if (error != 0) {
|
||||
dmu_tx_abort(tx);
|
||||
zfs_exit(zfsvfs, FTAG);
|
||||
|
@ -957,7 +957,7 @@ zil_commit_activate_saxattr_feature(zilog_t *zilog)
|
||||
dmu_objset_type(zilog->zl_os) != DMU_OST_ZVOL &&
|
||||
!dsl_dataset_feature_is_active(ds, SPA_FEATURE_ZILSAXATTR)) {
|
||||
tx = dmu_tx_create(zilog->zl_os);
|
||||
VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
|
||||
VERIFY0(dmu_tx_assign(tx, DMU_TX_WAIT));
|
||||
dsl_dataset_dirty(ds, tx);
|
||||
txg = dmu_tx_get_txg(tx);
|
||||
|
||||
@ -1003,7 +1003,7 @@ zil_create(zilog_t *zilog)
|
||||
*/
|
||||
if (BP_IS_HOLE(&blk) || BP_SHOULD_BYTESWAP(&blk)) {
|
||||
tx = dmu_tx_create(zilog->zl_os);
|
||||
VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
|
||||
VERIFY0(dmu_tx_assign(tx, DMU_TX_WAIT));
|
||||
dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
|
||||
txg = dmu_tx_get_txg(tx);
|
||||
|
||||
@ -1093,7 +1093,7 @@ zil_destroy(zilog_t *zilog, boolean_t keep_first)
|
||||
return (B_FALSE);
|
||||
|
||||
tx = dmu_tx_create(zilog->zl_os);
|
||||
VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
|
||||
VERIFY0(dmu_tx_assign(tx, DMU_TX_WAIT));
|
||||
dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
|
||||
txg = dmu_tx_get_txg(tx);
|
||||
|
||||
@ -1977,7 +1977,7 @@ next_lwb:
|
||||
* Open transaction to allocate the next block pointer.
|
||||
*/
|
||||
dmu_tx_t *tx = dmu_tx_create(zilog->zl_os);
|
||||
VERIFY0(dmu_tx_assign(tx, TXG_WAIT | TXG_NOTHROTTLE));
|
||||
VERIFY0(dmu_tx_assign(tx, DMU_TX_WAIT | DMU_TX_NOTHROTTLE));
|
||||
dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
|
||||
uint64_t txg = dmu_tx_get_txg(tx);
|
||||
|
||||
@ -3454,9 +3454,9 @@ zil_commit_itx_assign(zilog_t *zilog, zil_commit_waiter_t *zcw)
|
||||
* Since we are not going to create any new dirty data, and we
|
||||
* can even help with clearing the existing dirty data, we
|
||||
* should not be subject to the dirty data based delays. We
|
||||
* use TXG_NOTHROTTLE to bypass the delay mechanism.
|
||||
* use DMU_TX_NOTHROTTLE to bypass the delay mechanism.
|
||||
*/
|
||||
VERIFY0(dmu_tx_assign(tx, TXG_WAIT | TXG_NOTHROTTLE));
|
||||
VERIFY0(dmu_tx_assign(tx, DMU_TX_WAIT | DMU_TX_NOTHROTTLE));
|
||||
|
||||
itx_t *itx = zil_itx_create(TX_COMMIT, sizeof (lr_t));
|
||||
itx->itx_sync = B_TRUE;
|
||||
|
@ -276,7 +276,7 @@ zvol_update_volsize(uint64_t volsize, objset_t *os)
|
||||
tx = dmu_tx_create(os);
|
||||
dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
|
||||
dmu_tx_mark_netfree(tx);
|
||||
error = dmu_tx_assign(tx, TXG_WAIT);
|
||||
error = dmu_tx_assign(tx, DMU_TX_WAIT);
|
||||
if (error) {
|
||||
dmu_tx_abort(tx);
|
||||
return (SET_ERROR(error));
|
||||
@ -459,7 +459,7 @@ zvol_replay_truncate(void *arg1, void *arg2, boolean_t byteswap)
|
||||
|
||||
dmu_tx_t *tx = dmu_tx_create(zv->zv_objset);
|
||||
dmu_tx_mark_netfree(tx);
|
||||
int error = dmu_tx_assign(tx, TXG_WAIT);
|
||||
int error = dmu_tx_assign(tx, DMU_TX_WAIT);
|
||||
if (error != 0) {
|
||||
dmu_tx_abort(tx);
|
||||
} else {
|
||||
@ -506,7 +506,7 @@ zvol_replay_write(void *arg1, void *arg2, boolean_t byteswap)
|
||||
|
||||
tx = dmu_tx_create(os);
|
||||
dmu_tx_hold_write(tx, ZVOL_OBJ, offset, length);
|
||||
error = dmu_tx_assign(tx, TXG_WAIT);
|
||||
error = dmu_tx_assign(tx, DMU_TX_WAIT);
|
||||
if (error) {
|
||||
dmu_tx_abort(tx);
|
||||
} else {
|
||||
@ -557,7 +557,7 @@ zvol_replay_clone_range(void *arg1, void *arg2, boolean_t byteswap)
|
||||
return (error);
|
||||
tx = dmu_tx_create(os);
|
||||
dmu_tx_hold_clone_by_dnode(tx, zv->zv_dn, off, len);
|
||||
error = dmu_tx_assign(tx, TXG_WAIT);
|
||||
error = dmu_tx_assign(tx, DMU_TX_WAIT);
|
||||
if (error != 0) {
|
||||
dmu_tx_abort(tx);
|
||||
goto out;
|
||||
@ -722,7 +722,7 @@ zvol_clone_range(zvol_state_t *zv_src, uint64_t inoff, zvol_state_t *zv_dst,
|
||||
|
||||
tx = dmu_tx_create(zv_dst->zv_objset);
|
||||
dmu_tx_hold_clone_by_dnode(tx, zv_dst->zv_dn, outoff, size);
|
||||
error = dmu_tx_assign(tx, TXG_WAIT);
|
||||
error = dmu_tx_assign(tx, DMU_TX_WAIT);
|
||||
if (error != 0) {
|
||||
dmu_tx_abort(tx);
|
||||
break;
|
||||
|
Loading…
Reference in New Issue
Block a user