mirror of
https://github.com/qemu/qemu.git
synced 2025-08-05 19:09:28 +00:00
block: add bdrv_graph_wrlock_drained() convenience wrapper
Many write-locked sections are also drained sections. A new bdrv_graph_wrunlock_drained() wrapper around bdrv_graph_wrunlock() is introduced, which will begin a drained section first. A global variable is used so bdrv_graph_wrunlock() knows if it also needs to end such a drained section. Both the aio_poll call in bdrv_graph_wrlock() and the aio_bh_poll() in bdrv_graph_wrunlock() can re-enter a write-locked section. While for the latter, ending the drain could be moved to before the call, the former requires that the variable is a counter and not just a boolean. Since the wrapper calls bdrv_drain_all_begin(), which must be called with the graph unlocked, mark the wrapper as GRAPH_UNLOCKED too. The switch to the new helpers was generated with the following commands and then manually checked: find . -name '*.c' -exec sed -i -z 's/bdrv_drain_all_begin();\n\s*bdrv_graph_wrlock();/bdrv_graph_wrlock_drained();/g' {} ';' find . -name '*.c' -exec sed -i -z 's/bdrv_graph_wrunlock();\n\s*bdrv_drain_all_end();/bdrv_graph_wrunlock();/g' {} ';' Suggested-by: Kevin Wolf <kwolf@redhat.com> Signed-off-by: Fiona Ebner <f.ebner@proxmox.com> Message-ID: <20250530151125.955508-25-f.ebner@proxmox.com> [kwolf: Removed redundant GRAPH_UNLOCKED] Reviewed-by: Kevin Wolf <kwolf@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
This commit is contained in:
parent
502f00c51a
commit
6b89e851fa
20
block.c
20
block.c
@ -1721,14 +1721,12 @@ bdrv_open_driver(BlockDriverState *bs, BlockDriver *drv, const char *node_name,
|
||||
open_failed:
|
||||
bs->drv = NULL;
|
||||
|
||||
bdrv_drain_all_begin();
|
||||
bdrv_graph_wrlock();
|
||||
bdrv_graph_wrlock_drained();
|
||||
if (bs->file != NULL) {
|
||||
bdrv_unref_child(bs, bs->file);
|
||||
assert(!bs->file);
|
||||
}
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_drain_all_end();
|
||||
|
||||
g_free(bs->opaque);
|
||||
bs->opaque = NULL;
|
||||
@ -3602,11 +3600,9 @@ int bdrv_set_backing_hd(BlockDriverState *bs, BlockDriverState *backing_hd,
|
||||
int ret;
|
||||
GLOBAL_STATE_CODE();
|
||||
|
||||
bdrv_drain_all_begin();
|
||||
bdrv_graph_wrlock();
|
||||
bdrv_graph_wrlock_drained();
|
||||
ret = bdrv_set_backing_hd_drained(bs, backing_hd, errp);
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_drain_all_end();
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -3797,12 +3793,10 @@ static BdrvChild *bdrv_open_child_common(const char *filename,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
bdrv_drain_all_begin();
|
||||
bdrv_graph_wrlock();
|
||||
bdrv_graph_wrlock_drained();
|
||||
child = bdrv_attach_child(parent, bs, bdref_key, child_class, child_role,
|
||||
errp);
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_drain_all_end();
|
||||
|
||||
return child;
|
||||
}
|
||||
@ -5180,8 +5174,7 @@ static void bdrv_close(BlockDriverState *bs)
|
||||
bs->drv = NULL;
|
||||
}
|
||||
|
||||
bdrv_drain_all_begin();
|
||||
bdrv_graph_wrlock();
|
||||
bdrv_graph_wrlock_drained();
|
||||
QLIST_FOREACH_SAFE(child, &bs->children, next, next) {
|
||||
bdrv_unref_child(bs, child);
|
||||
}
|
||||
@ -5189,7 +5182,6 @@ static void bdrv_close(BlockDriverState *bs)
|
||||
assert(!bs->backing);
|
||||
assert(!bs->file);
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_drain_all_end();
|
||||
|
||||
g_free(bs->opaque);
|
||||
bs->opaque = NULL;
|
||||
@ -5515,8 +5507,7 @@ int bdrv_append(BlockDriverState *bs_new, BlockDriverState *bs_top,
|
||||
assert(!bs_new->backing);
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
|
||||
bdrv_drain_all_begin();
|
||||
bdrv_graph_wrlock();
|
||||
bdrv_graph_wrlock_drained();
|
||||
|
||||
child = bdrv_attach_child_noperm(bs_new, bs_top, "backing",
|
||||
&child_of_bds, bdrv_backing_role(bs_new),
|
||||
@ -5537,7 +5528,6 @@ out:
|
||||
|
||||
bdrv_refresh_limits(bs_top, NULL, NULL);
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_drain_all_end();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -498,12 +498,10 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
|
||||
block_copy_set_speed(bcs, speed);
|
||||
|
||||
/* Required permissions are taken by copy-before-write filter target */
|
||||
bdrv_drain_all_begin();
|
||||
bdrv_graph_wrlock();
|
||||
bdrv_graph_wrlock_drained();
|
||||
block_job_add_bdrv(&job->common, "target", target, 0, BLK_PERM_ALL,
|
||||
&error_abort);
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_drain_all_end();
|
||||
|
||||
return &job->common;
|
||||
|
||||
|
@ -281,11 +281,9 @@ static int blk_log_writes_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
ret = 0;
|
||||
fail_log:
|
||||
if (ret < 0) {
|
||||
bdrv_drain_all_begin();
|
||||
bdrv_graph_wrlock();
|
||||
bdrv_graph_wrlock_drained();
|
||||
bdrv_unref_child(bs, s->log_file);
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_drain_all_end();
|
||||
s->log_file = NULL;
|
||||
qemu_mutex_destroy(&s->mutex);
|
||||
}
|
||||
@ -298,12 +296,10 @@ static void blk_log_writes_close(BlockDriverState *bs)
|
||||
{
|
||||
BDRVBlkLogWritesState *s = bs->opaque;
|
||||
|
||||
bdrv_drain_all_begin();
|
||||
bdrv_graph_wrlock();
|
||||
bdrv_graph_wrlock_drained();
|
||||
bdrv_unref_child(bs, s->log_file);
|
||||
s->log_file = NULL;
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_drain_all_end();
|
||||
qemu_mutex_destroy(&s->mutex);
|
||||
}
|
||||
|
||||
|
@ -151,12 +151,10 @@ static void blkverify_close(BlockDriverState *bs)
|
||||
{
|
||||
BDRVBlkverifyState *s = bs->opaque;
|
||||
|
||||
bdrv_drain_all_begin();
|
||||
bdrv_graph_wrlock();
|
||||
bdrv_graph_wrlock_drained();
|
||||
bdrv_unref_child(bs, s->test_file);
|
||||
s->test_file = NULL;
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_drain_all_end();
|
||||
}
|
||||
|
||||
static int64_t coroutine_fn GRAPH_RDLOCK
|
||||
|
@ -889,11 +889,9 @@ void blk_remove_bs(BlockBackend *blk)
|
||||
root = blk->root;
|
||||
blk->root = NULL;
|
||||
|
||||
bdrv_drain_all_begin();
|
||||
bdrv_graph_wrlock();
|
||||
bdrv_graph_wrlock_drained();
|
||||
bdrv_root_unref_child(root);
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_drain_all_end();
|
||||
}
|
||||
|
||||
/*
|
||||
@ -906,8 +904,7 @@ int blk_insert_bs(BlockBackend *blk, BlockDriverState *bs, Error **errp)
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
bdrv_ref(bs);
|
||||
bdrv_drain_all_begin();
|
||||
bdrv_graph_wrlock();
|
||||
bdrv_graph_wrlock_drained();
|
||||
|
||||
if ((bs->open_flags & BDRV_O_INACTIVE) && blk_can_inactivate(blk)) {
|
||||
blk->disable_perm = true;
|
||||
@ -922,7 +919,6 @@ int blk_insert_bs(BlockBackend *blk, BlockDriverState *bs, Error **errp)
|
||||
BDRV_CHILD_FILTERED | BDRV_CHILD_PRIMARY,
|
||||
perm, shared_perm, blk, errp);
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_drain_all_end();
|
||||
if (blk->root == NULL) {
|
||||
return -EPERM;
|
||||
}
|
||||
|
@ -392,8 +392,7 @@ void commit_start(const char *job_id, BlockDriverState *bs,
|
||||
* this is the responsibility of the interface (i.e. whoever calls
|
||||
* commit_start()).
|
||||
*/
|
||||
bdrv_drain_all_begin();
|
||||
bdrv_graph_wrlock();
|
||||
bdrv_graph_wrlock_drained();
|
||||
s->base_overlay = bdrv_find_overlay(top, base);
|
||||
assert(s->base_overlay);
|
||||
|
||||
@ -425,21 +424,18 @@ void commit_start(const char *job_id, BlockDriverState *bs,
|
||||
iter_shared_perms, errp);
|
||||
if (ret < 0) {
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_drain_all_end();
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
|
||||
if (bdrv_freeze_backing_chain(commit_top_bs, base, errp) < 0) {
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_drain_all_end();
|
||||
goto fail;
|
||||
}
|
||||
s->chain_frozen = true;
|
||||
|
||||
ret = block_job_add_bdrv(&s->common, "base", base, 0, BLK_PERM_ALL, errp);
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_drain_all_end();
|
||||
|
||||
if (ret < 0) {
|
||||
goto fail;
|
||||
|
@ -33,6 +33,17 @@ static QemuMutex aio_context_list_lock;
|
||||
/* Written and read with atomic operations. */
|
||||
static int has_writer;
|
||||
|
||||
/*
|
||||
* Many write-locked sections are also drained sections. There is a convenience
|
||||
* wrapper bdrv_graph_wrlock_drained() which begins a drained section before
|
||||
* acquiring the lock. This variable here is used so bdrv_graph_wrunlock() knows
|
||||
* if it also needs to end such a drained section. It needs to be a counter,
|
||||
* because the aio_poll() call in bdrv_graph_wrlock() might re-enter
|
||||
* bdrv_graph_wrlock_drained(). And note that aio_bh_poll() in
|
||||
* bdrv_graph_wrunlock() might also re-enter a write-locked section.
|
||||
*/
|
||||
static int wrlock_quiesced_counter;
|
||||
|
||||
/*
|
||||
* A reader coroutine could move from an AioContext to another.
|
||||
* If this happens, there is no problem from the point of view of
|
||||
@ -112,8 +123,14 @@ void no_coroutine_fn bdrv_graph_wrlock(void)
|
||||
assert(!qatomic_read(&has_writer));
|
||||
assert(!qemu_in_coroutine());
|
||||
|
||||
/* Make sure that constantly arriving new I/O doesn't cause starvation */
|
||||
bdrv_drain_all_begin_nopoll();
|
||||
bool need_drain = wrlock_quiesced_counter == 0;
|
||||
|
||||
if (need_drain) {
|
||||
/*
|
||||
* Make sure that constantly arriving new I/O doesn't cause starvation
|
||||
*/
|
||||
bdrv_drain_all_begin_nopoll();
|
||||
}
|
||||
|
||||
/*
|
||||
* reader_count == 0: this means writer will read has_reader as 1
|
||||
@ -139,7 +156,18 @@ void no_coroutine_fn bdrv_graph_wrlock(void)
|
||||
smp_mb();
|
||||
} while (reader_count() >= 1);
|
||||
|
||||
bdrv_drain_all_end();
|
||||
if (need_drain) {
|
||||
bdrv_drain_all_end();
|
||||
}
|
||||
}
|
||||
|
||||
void no_coroutine_fn bdrv_graph_wrlock_drained(void)
|
||||
{
|
||||
GLOBAL_STATE_CODE();
|
||||
|
||||
bdrv_drain_all_begin();
|
||||
wrlock_quiesced_counter++;
|
||||
bdrv_graph_wrlock();
|
||||
}
|
||||
|
||||
void no_coroutine_fn bdrv_graph_wrunlock(void)
|
||||
@ -168,6 +196,12 @@ void no_coroutine_fn bdrv_graph_wrunlock(void)
|
||||
* progress.
|
||||
*/
|
||||
aio_bh_poll(qemu_get_aio_context());
|
||||
|
||||
if (wrlock_quiesced_counter > 0) {
|
||||
bdrv_drain_all_end();
|
||||
wrlock_quiesced_counter--;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
void coroutine_fn bdrv_graph_co_rdlock(void)
|
||||
|
@ -2014,15 +2014,13 @@ static BlockJob *mirror_start_job(
|
||||
*/
|
||||
bdrv_disable_dirty_bitmap(s->dirty_bitmap);
|
||||
|
||||
bdrv_drain_all_begin();
|
||||
bdrv_graph_wrlock();
|
||||
bdrv_graph_wrlock_drained();
|
||||
ret = block_job_add_bdrv(&s->common, "source", bs, 0,
|
||||
BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE |
|
||||
BLK_PERM_CONSISTENT_READ,
|
||||
errp);
|
||||
if (ret < 0) {
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_drain_all_end();
|
||||
goto fail;
|
||||
}
|
||||
|
||||
@ -2068,19 +2066,16 @@ static BlockJob *mirror_start_job(
|
||||
iter_shared_perms, errp);
|
||||
if (ret < 0) {
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_drain_all_end();
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
|
||||
if (bdrv_freeze_backing_chain(mirror_top_bs, target, errp) < 0) {
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_drain_all_end();
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_drain_all_end();
|
||||
|
||||
QTAILQ_INIT(&s->ops_in_flight);
|
||||
|
||||
|
@ -2823,11 +2823,9 @@ qcow2_do_close(BlockDriverState *bs, bool close_data_file)
|
||||
if (close_data_file && has_data_file(bs)) {
|
||||
GLOBAL_STATE_CODE();
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
bdrv_drain_all_begin();
|
||||
bdrv_graph_wrlock();
|
||||
bdrv_graph_wrlock_drained();
|
||||
bdrv_unref_child(bs, s->data_file);
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_drain_all_end();
|
||||
s->data_file = NULL;
|
||||
bdrv_graph_rdlock_main_loop();
|
||||
}
|
||||
|
@ -1037,8 +1037,7 @@ static int quorum_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
|
||||
close_exit:
|
||||
/* cleanup on error */
|
||||
bdrv_drain_all_begin();
|
||||
bdrv_graph_wrlock();
|
||||
bdrv_graph_wrlock_drained();
|
||||
for (i = 0; i < s->num_children; i++) {
|
||||
if (!opened[i]) {
|
||||
continue;
|
||||
@ -1046,7 +1045,6 @@ close_exit:
|
||||
bdrv_unref_child(bs, s->children[i]);
|
||||
}
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_drain_all_end();
|
||||
g_free(s->children);
|
||||
g_free(opened);
|
||||
exit:
|
||||
@ -1059,13 +1057,11 @@ static void quorum_close(BlockDriverState *bs)
|
||||
BDRVQuorumState *s = bs->opaque;
|
||||
int i;
|
||||
|
||||
bdrv_drain_all_begin();
|
||||
bdrv_graph_wrlock();
|
||||
bdrv_graph_wrlock_drained();
|
||||
for (i = 0; i < s->num_children; i++) {
|
||||
bdrv_unref_child(bs, s->children[i]);
|
||||
}
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_drain_all_end();
|
||||
|
||||
g_free(s->children);
|
||||
}
|
||||
|
@ -540,8 +540,7 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode,
|
||||
return;
|
||||
}
|
||||
|
||||
bdrv_drain_all_begin();
|
||||
bdrv_graph_wrlock();
|
||||
bdrv_graph_wrlock_drained();
|
||||
|
||||
bdrv_ref(hidden_disk->bs);
|
||||
s->hidden_disk = bdrv_attach_child(bs, hidden_disk->bs, "hidden disk",
|
||||
@ -550,7 +549,6 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode,
|
||||
if (local_err) {
|
||||
error_propagate(errp, local_err);
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_drain_all_end();
|
||||
return;
|
||||
}
|
||||
|
||||
@ -561,7 +559,6 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode,
|
||||
if (local_err) {
|
||||
error_propagate(errp, local_err);
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_drain_all_end();
|
||||
return;
|
||||
}
|
||||
|
||||
@ -574,14 +571,12 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode,
|
||||
!check_top_bs(top_bs, bs)) {
|
||||
error_setg(errp, "No top_bs or it is invalid");
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_drain_all_end();
|
||||
reopen_backing_file(bs, false, NULL);
|
||||
return;
|
||||
}
|
||||
bdrv_op_block_all(top_bs, s->blocker);
|
||||
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_drain_all_end();
|
||||
|
||||
s->backup_job = backup_job_create(
|
||||
NULL, s->secondary_disk->bs, s->hidden_disk->bs,
|
||||
@ -656,14 +651,12 @@ static void replication_done(void *opaque, int ret)
|
||||
if (ret == 0) {
|
||||
s->stage = BLOCK_REPLICATION_DONE;
|
||||
|
||||
bdrv_drain_all_begin();
|
||||
bdrv_graph_wrlock();
|
||||
bdrv_graph_wrlock_drained();
|
||||
bdrv_unref_child(bs, s->secondary_disk);
|
||||
s->secondary_disk = NULL;
|
||||
bdrv_unref_child(bs, s->hidden_disk);
|
||||
s->hidden_disk = NULL;
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_drain_all_end();
|
||||
|
||||
s->error = 0;
|
||||
} else {
|
||||
|
@ -291,11 +291,9 @@ int bdrv_snapshot_goto(BlockDriverState *bs,
|
||||
}
|
||||
|
||||
/* .bdrv_open() will re-attach it */
|
||||
bdrv_drain_all_begin();
|
||||
bdrv_graph_wrlock();
|
||||
bdrv_graph_wrlock_drained();
|
||||
bdrv_unref_child(bs, fallback);
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_drain_all_end();
|
||||
|
||||
ret = bdrv_snapshot_goto(fallback_bs, snapshot_id, errp);
|
||||
memset(bs->opaque, 0, drv->instance_size);
|
||||
|
@ -371,12 +371,10 @@ void stream_start(const char *job_id, BlockDriverState *bs,
|
||||
* already have our own plans. Also don't allow resize as the image size is
|
||||
* queried only at the job start and then cached.
|
||||
*/
|
||||
bdrv_drain_all_begin();
|
||||
bdrv_graph_wrlock();
|
||||
bdrv_graph_wrlock_drained();
|
||||
if (block_job_add_bdrv(&s->common, "active node", bs, 0,
|
||||
basic_flags | BLK_PERM_WRITE, errp)) {
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_drain_all_end();
|
||||
goto fail;
|
||||
}
|
||||
|
||||
@ -397,12 +395,10 @@ void stream_start(const char *job_id, BlockDriverState *bs,
|
||||
basic_flags, errp);
|
||||
if (ret < 0) {
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_drain_all_end();
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_drain_all_end();
|
||||
|
||||
s->base_overlay = base_overlay;
|
||||
s->above_base = above_base;
|
||||
|
20
block/vmdk.c
20
block/vmdk.c
@ -271,8 +271,7 @@ static void vmdk_free_extents(BlockDriverState *bs)
|
||||
BDRVVmdkState *s = bs->opaque;
|
||||
VmdkExtent *e;
|
||||
|
||||
bdrv_drain_all_begin();
|
||||
bdrv_graph_wrlock();
|
||||
bdrv_graph_wrlock_drained();
|
||||
for (i = 0; i < s->num_extents; i++) {
|
||||
e = &s->extents[i];
|
||||
g_free(e->l1_table);
|
||||
@ -284,7 +283,6 @@ static void vmdk_free_extents(BlockDriverState *bs)
|
||||
}
|
||||
}
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_drain_all_end();
|
||||
|
||||
g_free(s->extents);
|
||||
}
|
||||
@ -1249,11 +1247,9 @@ vmdk_parse_extents(const char *desc, BlockDriverState *bs, QDict *options,
|
||||
0, 0, 0, 0, 0, &extent, errp);
|
||||
if (ret < 0) {
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
bdrv_drain_all_begin();
|
||||
bdrv_graph_wrlock();
|
||||
bdrv_graph_wrlock_drained();
|
||||
bdrv_unref_child(bs, extent_file);
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_drain_all_end();
|
||||
bdrv_graph_rdlock_main_loop();
|
||||
goto out;
|
||||
}
|
||||
@ -1270,11 +1266,9 @@ vmdk_parse_extents(const char *desc, BlockDriverState *bs, QDict *options,
|
||||
g_free(buf);
|
||||
if (ret) {
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
bdrv_drain_all_begin();
|
||||
bdrv_graph_wrlock();
|
||||
bdrv_graph_wrlock_drained();
|
||||
bdrv_unref_child(bs, extent_file);
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_drain_all_end();
|
||||
bdrv_graph_rdlock_main_loop();
|
||||
goto out;
|
||||
}
|
||||
@ -1283,11 +1277,9 @@ vmdk_parse_extents(const char *desc, BlockDriverState *bs, QDict *options,
|
||||
ret = vmdk_open_se_sparse(bs, extent_file, bs->open_flags, errp);
|
||||
if (ret) {
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
bdrv_drain_all_begin();
|
||||
bdrv_graph_wrlock();
|
||||
bdrv_graph_wrlock_drained();
|
||||
bdrv_unref_child(bs, extent_file);
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_drain_all_end();
|
||||
bdrv_graph_rdlock_main_loop();
|
||||
goto out;
|
||||
}
|
||||
@ -1295,11 +1287,9 @@ vmdk_parse_extents(const char *desc, BlockDriverState *bs, QDict *options,
|
||||
} else {
|
||||
error_setg(errp, "Unsupported extent type '%s'", type);
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
bdrv_drain_all_begin();
|
||||
bdrv_graph_wrlock();
|
||||
bdrv_graph_wrlock_drained();
|
||||
bdrv_unref_child(bs, extent_file);
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_drain_all_end();
|
||||
bdrv_graph_rdlock_main_loop();
|
||||
ret = -ENOTSUP;
|
||||
goto out;
|
||||
|
@ -3561,8 +3561,7 @@ void qmp_x_blockdev_change(const char *parent, const char *child,
|
||||
BlockDriverState *parent_bs, *new_bs = NULL;
|
||||
BdrvChild *p_child;
|
||||
|
||||
bdrv_drain_all_begin();
|
||||
bdrv_graph_wrlock();
|
||||
bdrv_graph_wrlock_drained();
|
||||
|
||||
parent_bs = bdrv_lookup_bs(parent, parent, errp);
|
||||
if (!parent_bs) {
|
||||
@ -3599,7 +3598,6 @@ void qmp_x_blockdev_change(const char *parent, const char *child,
|
||||
|
||||
out:
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_drain_all_end();
|
||||
}
|
||||
|
||||
BlockJobInfoList *qmp_query_block_jobs(Error **errp)
|
||||
|
10
blockjob.c
10
blockjob.c
@ -198,8 +198,7 @@ void block_job_remove_all_bdrv(BlockJob *job)
|
||||
* one to make sure that such a concurrent access does not attempt
|
||||
* to process an already freed BdrvChild.
|
||||
*/
|
||||
bdrv_drain_all_begin();
|
||||
bdrv_graph_wrlock();
|
||||
bdrv_graph_wrlock_drained();
|
||||
while (job->nodes) {
|
||||
GSList *l = job->nodes;
|
||||
BdrvChild *c = l->data;
|
||||
@ -212,7 +211,6 @@ void block_job_remove_all_bdrv(BlockJob *job)
|
||||
g_slist_free_1(l);
|
||||
}
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_drain_all_end();
|
||||
}
|
||||
|
||||
bool block_job_has_bdrv(BlockJob *job, BlockDriverState *bs)
|
||||
@ -498,8 +496,7 @@ void *block_job_create(const char *job_id, const BlockJobDriver *driver,
|
||||
int ret;
|
||||
GLOBAL_STATE_CODE();
|
||||
|
||||
bdrv_drain_all_begin();
|
||||
bdrv_graph_wrlock();
|
||||
bdrv_graph_wrlock_drained();
|
||||
|
||||
if (job_id == NULL && !(flags & JOB_INTERNAL)) {
|
||||
job_id = bdrv_get_device_name(bs);
|
||||
@ -509,7 +506,6 @@ void *block_job_create(const char *job_id, const BlockJobDriver *driver,
|
||||
flags, cb, opaque, errp);
|
||||
if (job == NULL) {
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_drain_all_end();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@ -548,12 +544,10 @@ void *block_job_create(const char *job_id, const BlockJobDriver *driver,
|
||||
}
|
||||
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_drain_all_end();
|
||||
return job;
|
||||
|
||||
fail:
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_drain_all_end();
|
||||
job_early_fail(&job->job);
|
||||
return NULL;
|
||||
}
|
||||
|
@ -112,10 +112,21 @@ void unregister_aiocontext(AioContext *ctx);
|
||||
void no_coroutine_fn TSA_ACQUIRE(graph_lock) TSA_NO_TSA
|
||||
bdrv_graph_wrlock(void);
|
||||
|
||||
/*
|
||||
* bdrv_graph_wrlock_drained:
|
||||
* Similar to bdrv_graph_wrlock, but will begin a drained section before
|
||||
* locking.
|
||||
*/
|
||||
void no_coroutine_fn TSA_ACQUIRE(graph_lock) TSA_NO_TSA
|
||||
bdrv_graph_wrlock_drained(void);
|
||||
|
||||
/*
|
||||
* bdrv_graph_wrunlock:
|
||||
* Write finished, reset global has_writer to 0 and restart
|
||||
* all readers that are waiting.
|
||||
*
|
||||
* Also ends the drained section if bdrv_graph_wrlock_drained() was used to lock
|
||||
* the graph.
|
||||
*/
|
||||
void no_coroutine_fn TSA_RELEASE(graph_lock) TSA_NO_TSA
|
||||
bdrv_graph_wrunlock(void);
|
||||
|
@ -772,11 +772,9 @@ static void test_blockjob_common_drain_node(enum drain_type drain_type,
|
||||
tjob->bs = src;
|
||||
job = &tjob->common;
|
||||
|
||||
bdrv_drain_all_begin();
|
||||
bdrv_graph_wrlock();
|
||||
bdrv_graph_wrlock_drained();
|
||||
block_job_add_bdrv(job, "target", target, 0, BLK_PERM_ALL, &error_abort);
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_drain_all_end();
|
||||
|
||||
switch (result) {
|
||||
case TEST_JOB_SUCCESS:
|
||||
@ -955,13 +953,11 @@ static void bdrv_test_top_close(BlockDriverState *bs)
|
||||
{
|
||||
BdrvChild *c, *next_c;
|
||||
|
||||
bdrv_drain_all_begin();
|
||||
bdrv_graph_wrlock();
|
||||
bdrv_graph_wrlock_drained();
|
||||
QLIST_FOREACH_SAFE(c, &bs->children, next, next_c) {
|
||||
bdrv_unref_child(bs, c);
|
||||
}
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_drain_all_end();
|
||||
}
|
||||
|
||||
static int coroutine_fn GRAPH_RDLOCK
|
||||
@ -1053,12 +1049,10 @@ static void do_test_delete_by_drain(bool detach_instead_of_delete,
|
||||
|
||||
null_bs = bdrv_open("null-co://", NULL, NULL, BDRV_O_RDWR | BDRV_O_PROTOCOL,
|
||||
&error_abort);
|
||||
bdrv_drain_all_begin();
|
||||
bdrv_graph_wrlock();
|
||||
bdrv_graph_wrlock_drained();
|
||||
bdrv_attach_child(bs, null_bs, "null-child", &child_of_bds,
|
||||
BDRV_CHILD_DATA, &error_abort);
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_drain_all_end();
|
||||
|
||||
/* This child will be the one to pass to requests through to, and
|
||||
* it will stall until a drain occurs */
|
||||
@ -1066,25 +1060,21 @@ static void do_test_delete_by_drain(bool detach_instead_of_delete,
|
||||
&error_abort);
|
||||
child_bs->total_sectors = 65536 >> BDRV_SECTOR_BITS;
|
||||
/* Takes our reference to child_bs */
|
||||
bdrv_drain_all_begin();
|
||||
bdrv_graph_wrlock();
|
||||
bdrv_graph_wrlock_drained();
|
||||
tts->wait_child = bdrv_attach_child(bs, child_bs, "wait-child",
|
||||
&child_of_bds,
|
||||
BDRV_CHILD_DATA | BDRV_CHILD_PRIMARY,
|
||||
&error_abort);
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_drain_all_end();
|
||||
|
||||
/* This child is just there to be deleted
|
||||
* (for detach_instead_of_delete == true) */
|
||||
null_bs = bdrv_open("null-co://", NULL, NULL, BDRV_O_RDWR | BDRV_O_PROTOCOL,
|
||||
&error_abort);
|
||||
bdrv_drain_all_begin();
|
||||
bdrv_graph_wrlock();
|
||||
bdrv_graph_wrlock_drained();
|
||||
bdrv_attach_child(bs, null_bs, "null-child", &child_of_bds, BDRV_CHILD_DATA,
|
||||
&error_abort);
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_drain_all_end();
|
||||
|
||||
blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
|
||||
blk_insert_bs(blk, bs, &error_abort);
|
||||
@ -1167,8 +1157,7 @@ static void no_coroutine_fn detach_indirect_bh(void *opaque)
|
||||
|
||||
bdrv_dec_in_flight(data->child_b->bs);
|
||||
|
||||
bdrv_drain_all_begin();
|
||||
bdrv_graph_wrlock();
|
||||
bdrv_graph_wrlock_drained();
|
||||
bdrv_unref_child(data->parent_b, data->child_b);
|
||||
|
||||
bdrv_ref(data->c);
|
||||
@ -1176,7 +1165,6 @@ static void no_coroutine_fn detach_indirect_bh(void *opaque)
|
||||
&child_of_bds, BDRV_CHILD_DATA,
|
||||
&error_abort);
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_drain_all_end();
|
||||
}
|
||||
|
||||
static void coroutine_mixed_fn detach_by_parent_aio_cb(void *opaque, int ret)
|
||||
@ -1274,8 +1262,7 @@ static void TSA_NO_TSA test_detach_indirect(bool by_parent_cb)
|
||||
/* Set child relationships */
|
||||
bdrv_ref(b);
|
||||
bdrv_ref(a);
|
||||
bdrv_drain_all_begin();
|
||||
bdrv_graph_wrlock();
|
||||
bdrv_graph_wrlock_drained();
|
||||
child_b = bdrv_attach_child(parent_b, b, "PB-B", &child_of_bds,
|
||||
BDRV_CHILD_DATA, &error_abort);
|
||||
child_a = bdrv_attach_child(parent_b, a, "PB-A", &child_of_bds,
|
||||
@ -1286,7 +1273,6 @@ static void TSA_NO_TSA test_detach_indirect(bool by_parent_cb)
|
||||
by_parent_cb ? &child_of_bds : &detach_by_driver_cb_class,
|
||||
BDRV_CHILD_DATA, &error_abort);
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_drain_all_end();
|
||||
|
||||
g_assert_cmpint(parent_a->refcnt, ==, 1);
|
||||
g_assert_cmpint(parent_b->refcnt, ==, 1);
|
||||
@ -1699,8 +1685,7 @@ static void test_drop_intermediate_poll(void)
|
||||
* Establish the chain last, so the chain links are the first
|
||||
* elements in the BDS.parents lists
|
||||
*/
|
||||
bdrv_drain_all_begin();
|
||||
bdrv_graph_wrlock();
|
||||
bdrv_graph_wrlock_drained();
|
||||
for (i = 0; i < 3; i++) {
|
||||
if (i) {
|
||||
/* Takes the reference to chain[i - 1] */
|
||||
@ -1709,7 +1694,6 @@ static void test_drop_intermediate_poll(void)
|
||||
}
|
||||
}
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_drain_all_end();
|
||||
|
||||
job = block_job_create("job", &test_simple_job_driver, NULL, job_node,
|
||||
0, BLK_PERM_ALL, 0, 0, NULL, NULL, &error_abort);
|
||||
@ -1956,12 +1940,10 @@ static void do_test_replace_child_mid_drain(int old_drain_count,
|
||||
new_child_bs->total_sectors = 1;
|
||||
|
||||
bdrv_ref(old_child_bs);
|
||||
bdrv_drain_all_begin();
|
||||
bdrv_graph_wrlock();
|
||||
bdrv_graph_wrlock_drained();
|
||||
bdrv_attach_child(parent_bs, old_child_bs, "child", &child_of_bds,
|
||||
BDRV_CHILD_COW, &error_abort);
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_drain_all_end();
|
||||
parent_s->setup_completed = true;
|
||||
|
||||
for (i = 0; i < old_drain_count; i++) {
|
||||
|
@ -137,12 +137,10 @@ static void test_update_perm_tree(void)
|
||||
|
||||
blk_insert_bs(root, bs, &error_abort);
|
||||
|
||||
bdrv_drain_all_begin();
|
||||
bdrv_graph_wrlock();
|
||||
bdrv_graph_wrlock_drained();
|
||||
bdrv_attach_child(filter, bs, "child", &child_of_bds,
|
||||
BDRV_CHILD_DATA, &error_abort);
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_drain_all_end();
|
||||
|
||||
ret = bdrv_append(filter, bs, NULL);
|
||||
g_assert_cmpint(ret, <, 0);
|
||||
@ -206,13 +204,11 @@ static void test_should_update_child(void)
|
||||
|
||||
bdrv_set_backing_hd(target, bs, &error_abort);
|
||||
|
||||
bdrv_drain_all_begin();
|
||||
bdrv_graph_wrlock();
|
||||
bdrv_graph_wrlock_drained();
|
||||
g_assert(target->backing->bs == bs);
|
||||
bdrv_attach_child(filter, target, "target", &child_of_bds,
|
||||
BDRV_CHILD_DATA, &error_abort);
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_drain_all_end();
|
||||
bdrv_append(filter, bs, &error_abort);
|
||||
|
||||
bdrv_graph_rdlock_main_loop();
|
||||
@ -248,8 +244,7 @@ static void test_parallel_exclusive_write(void)
|
||||
bdrv_ref(base);
|
||||
bdrv_ref(fl1);
|
||||
|
||||
bdrv_drain_all_begin();
|
||||
bdrv_graph_wrlock();
|
||||
bdrv_graph_wrlock_drained();
|
||||
bdrv_attach_child(top, fl1, "backing", &child_of_bds,
|
||||
BDRV_CHILD_FILTERED | BDRV_CHILD_PRIMARY,
|
||||
&error_abort);
|
||||
@ -262,7 +257,6 @@ static void test_parallel_exclusive_write(void)
|
||||
|
||||
bdrv_replace_node(fl1, fl2, &error_abort);
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_drain_all_end();
|
||||
|
||||
bdrv_drained_end(fl2);
|
||||
bdrv_drained_end(fl1);
|
||||
@ -369,8 +363,7 @@ static void test_parallel_perm_update(void)
|
||||
*/
|
||||
bdrv_ref(base);
|
||||
|
||||
bdrv_drain_all_begin();
|
||||
bdrv_graph_wrlock();
|
||||
bdrv_graph_wrlock_drained();
|
||||
bdrv_attach_child(top, ws, "file", &child_of_bds, BDRV_CHILD_DATA,
|
||||
&error_abort);
|
||||
c_fl1 = bdrv_attach_child(ws, fl1, "first", &child_of_bds,
|
||||
@ -384,7 +377,6 @@ static void test_parallel_perm_update(void)
|
||||
BDRV_CHILD_FILTERED | BDRV_CHILD_PRIMARY,
|
||||
&error_abort);
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_drain_all_end();
|
||||
|
||||
/* Select fl1 as first child to be active */
|
||||
s->selected = c_fl1;
|
||||
@ -438,13 +430,11 @@ static void test_append_greedy_filter(void)
|
||||
BlockDriverState *base = no_perm_node("base");
|
||||
BlockDriverState *fl = exclusive_writer_node("fl1");
|
||||
|
||||
bdrv_drain_all_begin();
|
||||
bdrv_graph_wrlock();
|
||||
bdrv_graph_wrlock_drained();
|
||||
bdrv_attach_child(top, base, "backing", &child_of_bds,
|
||||
BDRV_CHILD_FILTERED | BDRV_CHILD_PRIMARY,
|
||||
&error_abort);
|
||||
bdrv_graph_wrunlock();
|
||||
bdrv_drain_all_end();
|
||||
|
||||
bdrv_append(fl, base, &error_abort);
|
||||
bdrv_unref(fl);
|
||||
|
Loading…
Reference in New Issue
Block a user