file-posix: Fix aio=threads performance regression after enablign FUA

For aio=threads, we're currently not implementing REQ_FUA in any useful
way, but just do a separate raw_co_flush_to_disk() call. This changes
behaviour compared to the old state, which used bdrv_co_flush() with its
optimisations. As a quick fix, call bdrv_co_flush() again like before.
Eventually, we can use pwritev2() to make use of RWF_DSYNC if available,
but we'll still have to keep this code path as a fallback, so this fix
is required either way.

While the fix itself is a one-liner, some new graph locking annotations
are needed to convince TSA that the locking is correct.

Cc: qemu-stable@nongnu.org
Fixes: 984a32f17e ("file-posix: Support FUA writes")
Buglink: https://issues.redhat.com/browse/RHEL-96854
Reported-by: Tingting Mao <timao@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Message-ID: <20250625085019.27735-1-kwolf@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
This commit is contained in:
Kevin Wolf 2025-06-25 10:50:19 +02:00
parent 430e2be81e
commit d402da1360

View File

@ -2564,9 +2564,9 @@ static inline bool raw_check_linux_aio(BDRVRawState *s)
}
#endif
static int coroutine_fn raw_co_prw(BlockDriverState *bs, int64_t *offset_ptr,
uint64_t bytes, QEMUIOVector *qiov, int type,
int flags)
static int coroutine_fn GRAPH_RDLOCK
raw_co_prw(BlockDriverState *bs, int64_t *offset_ptr, uint64_t bytes,
QEMUIOVector *qiov, int type, int flags)
{
BDRVRawState *s = bs->opaque;
RawPosixAIOData acb;
@ -2625,7 +2625,7 @@ static int coroutine_fn raw_co_prw(BlockDriverState *bs, int64_t *offset_ptr,
ret = raw_thread_pool_submit(handle_aiocb_rw, &acb);
if (ret == 0 && (flags & BDRV_REQ_FUA)) {
/* TODO Use pwritev2() instead if it's available */
ret = raw_co_flush_to_disk(bs);
ret = bdrv_co_flush(bs);
}
goto out; /* Avoid the compiler err of unused label */
@ -2660,16 +2660,16 @@ out:
return ret;
}
static int coroutine_fn raw_co_preadv(BlockDriverState *bs, int64_t offset,
int64_t bytes, QEMUIOVector *qiov,
BdrvRequestFlags flags)
static int coroutine_fn GRAPH_RDLOCK
raw_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes,
QEMUIOVector *qiov, BdrvRequestFlags flags)
{
return raw_co_prw(bs, &offset, bytes, qiov, QEMU_AIO_READ, flags);
}
static int coroutine_fn raw_co_pwritev(BlockDriverState *bs, int64_t offset,
int64_t bytes, QEMUIOVector *qiov,
BdrvRequestFlags flags)
static int coroutine_fn GRAPH_RDLOCK
raw_co_pwritev(BlockDriverState *bs, int64_t offset, int64_t bytes,
QEMUIOVector *qiov, BdrvRequestFlags flags)
{
return raw_co_prw(bs, &offset, bytes, qiov, QEMU_AIO_WRITE, flags);
}
@ -3606,10 +3606,11 @@ static int coroutine_fn raw_co_zone_mgmt(BlockDriverState *bs, BlockZoneOp op,
#endif
#if defined(CONFIG_BLKZONED)
static int coroutine_fn raw_co_zone_append(BlockDriverState *bs,
int64_t *offset,
QEMUIOVector *qiov,
BdrvRequestFlags flags) {
static int coroutine_fn GRAPH_RDLOCK
raw_co_zone_append(BlockDriverState *bs,
int64_t *offset,
QEMUIOVector *qiov,
BdrvRequestFlags flags) {
assert(flags == 0);
int64_t zone_size_mask = bs->bl.zone_size - 1;
int64_t iov_len = 0;