mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson
synced 2025-09-02 16:44:59 +00:00
dm-table: atomic writes support
Support stacking atomic write limits for DM devices. All the pre-existing code in blk_stack_atomic_writes_limits() already takes care of finding the aggregrate limits from the bottom devices. Feature flag DM_TARGET_ATOMIC_WRITES is introduced so that atomic writes can be enabled on personalities selectively. This is to ensure that atomic writes are only enabled when verified to be working properly (for a specific personality). In addition, it just may not make sense to enable atomic writes on some personalities (so this flag also helps there). Signed-off-by: John Garry <john.g.garry@oracle.com> Reviewed-by: Mike Snitzer <snitzer@kernel.org> Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
This commit is contained in:
parent
a38425935f
commit
3194e36488
@ -1806,6 +1806,32 @@ static bool dm_table_supports_secure_erase(struct dm_table *t)
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int device_not_atomic_write_capable(struct dm_target *ti,
|
||||||
|
struct dm_dev *dev, sector_t start,
|
||||||
|
sector_t len, void *data)
|
||||||
|
{
|
||||||
|
return !bdev_can_atomic_write(dev->bdev);
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool dm_table_supports_atomic_writes(struct dm_table *t)
|
||||||
|
{
|
||||||
|
for (unsigned int i = 0; i < t->num_targets; i++) {
|
||||||
|
struct dm_target *ti = dm_table_get_target(t, i);
|
||||||
|
|
||||||
|
if (!dm_target_supports_atomic_writes(ti->type))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
if (!ti->type->iterate_devices)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
if (ti->type->iterate_devices(ti,
|
||||||
|
device_not_atomic_write_capable, NULL)) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
|
int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
|
||||||
struct queue_limits *limits)
|
struct queue_limits *limits)
|
||||||
{
|
{
|
||||||
@ -1854,6 +1880,9 @@ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
|
|||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (dm_table_supports_atomic_writes(t))
|
||||||
|
limits->features |= BLK_FEAT_ATOMIC_WRITES;
|
||||||
|
|
||||||
r = queue_limits_set(q, limits);
|
r = queue_limits_set(q, limits);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
@ -299,6 +299,9 @@ struct target_type {
|
|||||||
#define dm_target_supports_mixed_zoned_model(type) (false)
|
#define dm_target_supports_mixed_zoned_model(type) (false)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#define DM_TARGET_ATOMIC_WRITES 0x00000400
|
||||||
|
#define dm_target_supports_atomic_writes(type) ((type)->features & DM_TARGET_ATOMIC_WRITES)
|
||||||
|
|
||||||
struct dm_target {
|
struct dm_target {
|
||||||
struct dm_table *table;
|
struct dm_table *table;
|
||||||
struct target_type *type;
|
struct target_type *type;
|
||||||
|
@ -286,9 +286,9 @@ enum {
|
|||||||
#define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl)
|
#define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl)
|
||||||
|
|
||||||
#define DM_VERSION_MAJOR 4
|
#define DM_VERSION_MAJOR 4
|
||||||
#define DM_VERSION_MINOR 48
|
#define DM_VERSION_MINOR 49
|
||||||
#define DM_VERSION_PATCHLEVEL 0
|
#define DM_VERSION_PATCHLEVEL 0
|
||||||
#define DM_VERSION_EXTRA "-ioctl (2023-03-01)"
|
#define DM_VERSION_EXTRA "-ioctl (2025-01-17)"
|
||||||
|
|
||||||
/* Status bits */
|
/* Status bits */
|
||||||
#define DM_READONLY_FLAG (1 << 0) /* In/Out */
|
#define DM_READONLY_FLAG (1 << 0) /* In/Out */
|
||||||
|
Loading…
Reference in New Issue
Block a user