mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson
synced 2025-08-28 18:10:32 +00:00
ublk: properly serialize all FETCH_REQs
Most uring_cmds issued against ublk character devices are serialized because each command affects only one queue, and there is an early check which only allows a single task (the queue's ubq_daemon) to issue uring_cmds against that queue. However, this mechanism does not work for FETCH_REQs, since they are expected before ubq_daemon is set. Since FETCH_REQs are only used at initialization and not in the fast path, serialize them using the per-ublk-device mutex. This fixes a number of data races that were previously possible if a badly behaved ublk server decided to issue multiple FETCH_REQs against the same qid/tag concurrently. Reported-by: Caleb Sander Mateos <csander@purestorage.com> Signed-off-by: Uday Shankar <ushankar@purestorage.com> Signed-off-by: Ming Lei <ming.lei@redhat.com> Link: https://lore.kernel.org/r/20250416035444.99569-2-ming.lei@redhat.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
3bf540609c
commit
b69b8edfb2
@ -1832,8 +1832,8 @@ static void ublk_nosrv_work(struct work_struct *work)
|
|||||||
|
|
||||||
/* device can only be started after all IOs are ready */
|
/* device can only be started after all IOs are ready */
|
||||||
static void ublk_mark_io_ready(struct ublk_device *ub, struct ublk_queue *ubq)
|
static void ublk_mark_io_ready(struct ublk_device *ub, struct ublk_queue *ubq)
|
||||||
|
__must_hold(&ub->mutex)
|
||||||
{
|
{
|
||||||
mutex_lock(&ub->mutex);
|
|
||||||
ubq->nr_io_ready++;
|
ubq->nr_io_ready++;
|
||||||
if (ublk_queue_ready(ubq)) {
|
if (ublk_queue_ready(ubq)) {
|
||||||
ubq->ubq_daemon = current;
|
ubq->ubq_daemon = current;
|
||||||
@ -1845,7 +1845,6 @@ static void ublk_mark_io_ready(struct ublk_device *ub, struct ublk_queue *ubq)
|
|||||||
}
|
}
|
||||||
if (ub->nr_queues_ready == ub->dev_info.nr_hw_queues)
|
if (ub->nr_queues_ready == ub->dev_info.nr_hw_queues)
|
||||||
complete_all(&ub->completion);
|
complete_all(&ub->completion);
|
||||||
mutex_unlock(&ub->mutex);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ublk_handle_need_get_data(struct ublk_device *ub, int q_id,
|
static void ublk_handle_need_get_data(struct ublk_device *ub, int q_id,
|
||||||
@ -1929,6 +1928,52 @@ static int ublk_unregister_io_buf(struct io_uring_cmd *cmd,
|
|||||||
return io_buffer_unregister_bvec(cmd, index, issue_flags);
|
return io_buffer_unregister_bvec(cmd, index, issue_flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int ublk_fetch(struct io_uring_cmd *cmd, struct ublk_queue *ubq,
|
||||||
|
struct ublk_io *io, __u64 buf_addr)
|
||||||
|
{
|
||||||
|
struct ublk_device *ub = ubq->dev;
|
||||||
|
int ret = 0;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* When handling FETCH command for setting up ublk uring queue,
|
||||||
|
* ub->mutex is the innermost lock, and we won't block for handling
|
||||||
|
* FETCH, so it is fine even for IO_URING_F_NONBLOCK.
|
||||||
|
*/
|
||||||
|
mutex_lock(&ub->mutex);
|
||||||
|
/* UBLK_IO_FETCH_REQ is only allowed before queue is setup */
|
||||||
|
if (ublk_queue_ready(ubq)) {
|
||||||
|
ret = -EBUSY;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* allow each command to be FETCHed at most once */
|
||||||
|
if (io->flags & UBLK_IO_FLAG_ACTIVE) {
|
||||||
|
ret = -EINVAL;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
WARN_ON_ONCE(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV);
|
||||||
|
|
||||||
|
if (ublk_need_map_io(ubq)) {
|
||||||
|
/*
|
||||||
|
* FETCH_RQ has to provide IO buffer if NEED GET
|
||||||
|
* DATA is not enabled
|
||||||
|
*/
|
||||||
|
if (!buf_addr && !ublk_need_get_data(ubq))
|
||||||
|
goto out;
|
||||||
|
} else if (buf_addr) {
|
||||||
|
/* User copy requires addr to be unset */
|
||||||
|
ret = -EINVAL;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
ublk_fill_io_cmd(io, cmd, buf_addr);
|
||||||
|
ublk_mark_io_ready(ub, ubq);
|
||||||
|
out:
|
||||||
|
mutex_unlock(&ub->mutex);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
|
static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
|
||||||
unsigned int issue_flags,
|
unsigned int issue_flags,
|
||||||
const struct ublksrv_io_cmd *ub_cmd)
|
const struct ublksrv_io_cmd *ub_cmd)
|
||||||
@ -1985,33 +2030,9 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
|
|||||||
case UBLK_IO_UNREGISTER_IO_BUF:
|
case UBLK_IO_UNREGISTER_IO_BUF:
|
||||||
return ublk_unregister_io_buf(cmd, ub_cmd->addr, issue_flags);
|
return ublk_unregister_io_buf(cmd, ub_cmd->addr, issue_flags);
|
||||||
case UBLK_IO_FETCH_REQ:
|
case UBLK_IO_FETCH_REQ:
|
||||||
/* UBLK_IO_FETCH_REQ is only allowed before queue is setup */
|
ret = ublk_fetch(cmd, ubq, io, ub_cmd->addr);
|
||||||
if (ublk_queue_ready(ubq)) {
|
if (ret)
|
||||||
ret = -EBUSY;
|
|
||||||
goto out;
|
goto out;
|
||||||
}
|
|
||||||
/*
|
|
||||||
* The io is being handled by server, so COMMIT_RQ is expected
|
|
||||||
* instead of FETCH_REQ
|
|
||||||
*/
|
|
||||||
if (io->flags & UBLK_IO_FLAG_OWNED_BY_SRV)
|
|
||||||
goto out;
|
|
||||||
|
|
||||||
if (ublk_need_map_io(ubq)) {
|
|
||||||
/*
|
|
||||||
* FETCH_RQ has to provide IO buffer if NEED GET
|
|
||||||
* DATA is not enabled
|
|
||||||
*/
|
|
||||||
if (!ub_cmd->addr && !ublk_need_get_data(ubq))
|
|
||||||
goto out;
|
|
||||||
} else if (ub_cmd->addr) {
|
|
||||||
/* User copy requires addr to be unset */
|
|
||||||
ret = -EINVAL;
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
ublk_fill_io_cmd(io, cmd, ub_cmd->addr);
|
|
||||||
ublk_mark_io_ready(ub, ubq);
|
|
||||||
break;
|
break;
|
||||||
case UBLK_IO_COMMIT_AND_FETCH_REQ:
|
case UBLK_IO_COMMIT_AND_FETCH_REQ:
|
||||||
req = blk_mq_tag_to_rq(ub->tag_set.tags[ub_cmd->q_id], tag);
|
req = blk_mq_tag_to_rq(ub->tag_set.tags[ub_cmd->q_id], tag);
|
||||||
|
Loading…
Reference in New Issue
Block a user