mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-08-26 01:02:46 +00:00
iomap: inline iomap_dio_bio_opflags()
It is neater to build blk_opf_t fully in one place, so inline iomap_dio_bio_opflags() in iomap_dio_bio_iter(). Also tidy up the logic in dealing with IOMAP_DIO_CALLER_COMP, in generally separate the logic in dealing with flags associated with reads and writes. Originally-from: Christoph Hellwig <hch@lst.de> Reviewed-by: "Ritesh Harjani (IBM)" <ritesh.list@gmail.com> Signed-off-by: John Garry <john.g.garry@oracle.com> Link: https://lore.kernel.org/r/20250320120250.4087011-2-john.g.garry@oracle.com Signed-off-by: Christian Brauner <brauner@kernel.org>
This commit is contained in:
parent
b26816b4e3
commit
d279c80e0b
@ -312,27 +312,20 @@ static int iomap_dio_zero(const struct iomap_iter *iter, struct iomap_dio *dio,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Figure out the bio's operation flags from the dio request, the
|
* Use a FUA write if we need datasync semantics and this is a pure data I/O
|
||||||
* mapping, and whether or not we want FUA. Note that we can end up
|
* that doesn't require any metadata updates (including after I/O completion
|
||||||
* clearing the WRITE_THROUGH flag in the dio request.
|
* such as unwritten extent conversion) and the underlying device either
|
||||||
|
* doesn't have a volatile write cache or supports FUA.
|
||||||
|
* This allows us to avoid cache flushes on I/O completion.
|
||||||
*/
|
*/
|
||||||
static inline blk_opf_t iomap_dio_bio_opflags(struct iomap_dio *dio,
|
static inline bool iomap_dio_can_use_fua(const struct iomap *iomap,
|
||||||
const struct iomap *iomap, bool use_fua, bool atomic_hw)
|
struct iomap_dio *dio)
|
||||||
{
|
{
|
||||||
blk_opf_t opflags = REQ_SYNC | REQ_IDLE;
|
if (iomap->flags & (IOMAP_F_SHARED | IOMAP_F_DIRTY))
|
||||||
|
return false;
|
||||||
if (!(dio->flags & IOMAP_DIO_WRITE))
|
if (!(dio->flags & IOMAP_DIO_WRITE_THROUGH))
|
||||||
return REQ_OP_READ;
|
return false;
|
||||||
|
return !bdev_write_cache(iomap->bdev) || bdev_fua(iomap->bdev);
|
||||||
opflags |= REQ_OP_WRITE;
|
|
||||||
if (use_fua)
|
|
||||||
opflags |= REQ_FUA;
|
|
||||||
else
|
|
||||||
dio->flags &= ~IOMAP_DIO_WRITE_THROUGH;
|
|
||||||
if (atomic_hw)
|
|
||||||
opflags |= REQ_ATOMIC;
|
|
||||||
|
|
||||||
return opflags;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int iomap_dio_bio_iter(struct iomap_iter *iter, struct iomap_dio *dio)
|
static int iomap_dio_bio_iter(struct iomap_iter *iter, struct iomap_dio *dio)
|
||||||
@ -340,24 +333,28 @@ static int iomap_dio_bio_iter(struct iomap_iter *iter, struct iomap_dio *dio)
|
|||||||
const struct iomap *iomap = &iter->iomap;
|
const struct iomap *iomap = &iter->iomap;
|
||||||
struct inode *inode = iter->inode;
|
struct inode *inode = iter->inode;
|
||||||
unsigned int fs_block_size = i_blocksize(inode), pad;
|
unsigned int fs_block_size = i_blocksize(inode), pad;
|
||||||
bool atomic_hw = iter->flags & IOMAP_ATOMIC_HW;
|
|
||||||
const loff_t length = iomap_length(iter);
|
const loff_t length = iomap_length(iter);
|
||||||
loff_t pos = iter->pos;
|
loff_t pos = iter->pos;
|
||||||
blk_opf_t bio_opf;
|
blk_opf_t bio_opf = REQ_SYNC | REQ_IDLE;
|
||||||
struct bio *bio;
|
struct bio *bio;
|
||||||
bool need_zeroout = false;
|
bool need_zeroout = false;
|
||||||
bool use_fua = false;
|
|
||||||
int nr_pages, ret = 0;
|
int nr_pages, ret = 0;
|
||||||
u64 copied = 0;
|
u64 copied = 0;
|
||||||
size_t orig_count;
|
size_t orig_count;
|
||||||
|
|
||||||
if (atomic_hw && length != iter->len)
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
if ((pos | length) & (bdev_logical_block_size(iomap->bdev) - 1) ||
|
if ((pos | length) & (bdev_logical_block_size(iomap->bdev) - 1) ||
|
||||||
!bdev_iter_is_aligned(iomap->bdev, dio->submit.iter))
|
!bdev_iter_is_aligned(iomap->bdev, dio->submit.iter))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (dio->flags & IOMAP_DIO_WRITE) {
|
||||||
|
bio_opf |= REQ_OP_WRITE;
|
||||||
|
|
||||||
|
if (iter->flags & IOMAP_ATOMIC_HW) {
|
||||||
|
if (length != iter->len)
|
||||||
|
return -EINVAL;
|
||||||
|
bio_opf |= REQ_ATOMIC;
|
||||||
|
}
|
||||||
|
|
||||||
if (iomap->type == IOMAP_UNWRITTEN) {
|
if (iomap->type == IOMAP_UNWRITTEN) {
|
||||||
dio->flags |= IOMAP_DIO_UNWRITTEN;
|
dio->flags |= IOMAP_DIO_UNWRITTEN;
|
||||||
need_zeroout = true;
|
need_zeroout = true;
|
||||||
@ -369,23 +366,26 @@ static int iomap_dio_bio_iter(struct iomap_iter *iter, struct iomap_dio *dio)
|
|||||||
if (iomap->flags & IOMAP_F_NEW) {
|
if (iomap->flags & IOMAP_F_NEW) {
|
||||||
need_zeroout = true;
|
need_zeroout = true;
|
||||||
} else if (iomap->type == IOMAP_MAPPED) {
|
} else if (iomap->type == IOMAP_MAPPED) {
|
||||||
|
if (iomap_dio_can_use_fua(iomap, dio))
|
||||||
|
bio_opf |= REQ_FUA;
|
||||||
|
else
|
||||||
|
dio->flags &= ~IOMAP_DIO_WRITE_THROUGH;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Use a FUA write if we need datasync semantics, this is a pure
|
* We can only do deferred completion for pure overwrites that
|
||||||
* data IO that doesn't require any metadata updates (including
|
* don't require additional I/O at completion time.
|
||||||
* after IO completion such as unwritten extent conversion) and
|
*
|
||||||
* the underlying device either supports FUA or doesn't have
|
* This rules out writes that need zeroing or extent conversion,
|
||||||
* a volatile write cache. This allows us to avoid cache flushes
|
* extend the file size, or issue metadata I/O or cache flushes
|
||||||
* on IO completion. If we can't use writethrough and need to
|
* during completion processing.
|
||||||
* sync, disable in-task completions as dio completion will
|
|
||||||
* need to call generic_write_sync() which will do a blocking
|
|
||||||
* fsync / cache flush call.
|
|
||||||
*/
|
*/
|
||||||
if (!(iomap->flags & (IOMAP_F_SHARED|IOMAP_F_DIRTY)) &&
|
if (need_zeroout || (pos >= i_size_read(inode)) ||
|
||||||
(dio->flags & IOMAP_DIO_WRITE_THROUGH) &&
|
((dio->flags & IOMAP_DIO_NEED_SYNC) &&
|
||||||
(bdev_fua(iomap->bdev) || !bdev_write_cache(iomap->bdev)))
|
!(bio_opf & REQ_FUA)))
|
||||||
use_fua = true;
|
|
||||||
else if (dio->flags & IOMAP_DIO_NEED_SYNC)
|
|
||||||
dio->flags &= ~IOMAP_DIO_CALLER_COMP;
|
dio->flags &= ~IOMAP_DIO_CALLER_COMP;
|
||||||
|
} else {
|
||||||
|
bio_opf |= REQ_OP_READ;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -399,18 +399,6 @@ static int iomap_dio_bio_iter(struct iomap_iter *iter, struct iomap_dio *dio)
|
|||||||
if (!iov_iter_count(dio->submit.iter))
|
if (!iov_iter_count(dio->submit.iter))
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
/*
|
|
||||||
* We can only do deferred completion for pure overwrites that
|
|
||||||
* don't require additional IO at completion. This rules out
|
|
||||||
* writes that need zeroing or extent conversion, extend
|
|
||||||
* the file size, or issue journal IO or cache flushes
|
|
||||||
* during completion processing.
|
|
||||||
*/
|
|
||||||
if (need_zeroout ||
|
|
||||||
((dio->flags & IOMAP_DIO_NEED_SYNC) && !use_fua) ||
|
|
||||||
((dio->flags & IOMAP_DIO_WRITE) && pos >= i_size_read(inode)))
|
|
||||||
dio->flags &= ~IOMAP_DIO_CALLER_COMP;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The rules for polled IO completions follow the guidelines as the
|
* The rules for polled IO completions follow the guidelines as the
|
||||||
* ones we set for inline and deferred completions. If none of those
|
* ones we set for inline and deferred completions. If none of those
|
||||||
@ -428,8 +416,6 @@ static int iomap_dio_bio_iter(struct iomap_iter *iter, struct iomap_dio *dio)
|
|||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
bio_opf = iomap_dio_bio_opflags(dio, iomap, use_fua, atomic_hw);
|
|
||||||
|
|
||||||
nr_pages = bio_iov_vecs_to_alloc(dio->submit.iter, BIO_MAX_VECS);
|
nr_pages = bio_iov_vecs_to_alloc(dio->submit.iter, BIO_MAX_VECS);
|
||||||
do {
|
do {
|
||||||
size_t n;
|
size_t n;
|
||||||
@ -461,7 +447,7 @@ static int iomap_dio_bio_iter(struct iomap_iter *iter, struct iomap_dio *dio)
|
|||||||
}
|
}
|
||||||
|
|
||||||
n = bio->bi_iter.bi_size;
|
n = bio->bi_iter.bi_size;
|
||||||
if (WARN_ON_ONCE(atomic_hw && n != length)) {
|
if (WARN_ON_ONCE((bio_opf & REQ_ATOMIC) && n != length)) {
|
||||||
/*
|
/*
|
||||||
* This bio should have covered the complete length,
|
* This bio should have covered the complete length,
|
||||||
* which it doesn't, so error. We may need to zero out
|
* which it doesn't, so error. We may need to zero out
|
||||||
|
Loading…
Reference in New Issue
Block a user