migration: Use automatic rcu_read unlock in ram.c

Use the automatic read unlocker in migration/ram.c

Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
Reviewed-by: Daniel P. Berrangé <berrange@redhat.com>
Message-Id: <20191007143642.301445-4-dgilbert@redhat.com>
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
This commit is contained in:
Dr. David Alan Gilbert 2019-10-07 15:36:39 +01:00
parent 0e6ebd4877
commit 89ac5a1d2a

View File

@ -181,14 +181,14 @@ int foreach_not_ignored_block(RAMBlockIterFunc func, void *opaque)
RAMBlock *block; RAMBlock *block;
int ret = 0; int ret = 0;
rcu_read_lock(); RCU_READ_LOCK_GUARD();
RAMBLOCK_FOREACH_NOT_IGNORED(block) { RAMBLOCK_FOREACH_NOT_IGNORED(block) {
ret = func(block, opaque); ret = func(block, opaque);
if (ret) { if (ret) {
break; break;
} }
} }
rcu_read_unlock();
return ret; return ret;
} }
@ -1848,12 +1848,12 @@ static void migration_bitmap_sync(RAMState *rs)
memory_global_dirty_log_sync(); memory_global_dirty_log_sync();
qemu_mutex_lock(&rs->bitmap_mutex); qemu_mutex_lock(&rs->bitmap_mutex);
rcu_read_lock(); WITH_RCU_READ_LOCK_GUARD() {
RAMBLOCK_FOREACH_NOT_IGNORED(block) { RAMBLOCK_FOREACH_NOT_IGNORED(block) {
ramblock_sync_dirty_bitmap(rs, block); ramblock_sync_dirty_bitmap(rs, block);
} }
ram_counters.remaining = ram_bytes_remaining(); ram_counters.remaining = ram_bytes_remaining();
rcu_read_unlock(); }
qemu_mutex_unlock(&rs->bitmap_mutex); qemu_mutex_unlock(&rs->bitmap_mutex);
memory_global_after_dirty_log_sync(); memory_global_after_dirty_log_sync();
@ -2397,13 +2397,12 @@ static void migration_page_queue_free(RAMState *rs)
/* This queue generally should be empty - but in the case of a failed /* This queue generally should be empty - but in the case of a failed
* migration might have some droppings in. * migration might have some droppings in.
*/ */
rcu_read_lock(); RCU_READ_LOCK_GUARD();
QSIMPLEQ_FOREACH_SAFE(mspr, &rs->src_page_requests, next_req, next_mspr) { QSIMPLEQ_FOREACH_SAFE(mspr, &rs->src_page_requests, next_req, next_mspr) {
memory_region_unref(mspr->rb->mr); memory_region_unref(mspr->rb->mr);
QSIMPLEQ_REMOVE_HEAD(&rs->src_page_requests, next_req); QSIMPLEQ_REMOVE_HEAD(&rs->src_page_requests, next_req);
g_free(mspr); g_free(mspr);
} }
rcu_read_unlock();
} }
/** /**
@ -2424,7 +2423,8 @@ int ram_save_queue_pages(const char *rbname, ram_addr_t start, ram_addr_t len)
RAMState *rs = ram_state; RAMState *rs = ram_state;
ram_counters.postcopy_requests++; ram_counters.postcopy_requests++;
rcu_read_lock(); RCU_READ_LOCK_GUARD();
if (!rbname) { if (!rbname) {
/* Reuse last RAMBlock */ /* Reuse last RAMBlock */
ramblock = rs->last_req_rb; ramblock = rs->last_req_rb;
@ -2466,12 +2466,10 @@ int ram_save_queue_pages(const char *rbname, ram_addr_t start, ram_addr_t len)
QSIMPLEQ_INSERT_TAIL(&rs->src_page_requests, new_entry, next_req); QSIMPLEQ_INSERT_TAIL(&rs->src_page_requests, new_entry, next_req);
migration_make_urgent_request(); migration_make_urgent_request();
qemu_mutex_unlock(&rs->src_page_req_mutex); qemu_mutex_unlock(&rs->src_page_req_mutex);
rcu_read_unlock();
return 0; return 0;
err: err:
rcu_read_unlock();
return -1; return -1;
} }
@ -2700,7 +2698,8 @@ static uint64_t ram_bytes_total_common(bool count_ignored)
RAMBlock *block; RAMBlock *block;
uint64_t total = 0; uint64_t total = 0;
rcu_read_lock(); RCU_READ_LOCK_GUARD();
if (count_ignored) { if (count_ignored) {
RAMBLOCK_FOREACH_MIGRATABLE(block) { RAMBLOCK_FOREACH_MIGRATABLE(block) {
total += block->used_length; total += block->used_length;
@ -2710,7 +2709,6 @@ static uint64_t ram_bytes_total_common(bool count_ignored)
total += block->used_length; total += block->used_length;
} }
} }
rcu_read_unlock();
return total; return total;
} }
@ -3034,7 +3032,7 @@ int ram_postcopy_send_discard_bitmap(MigrationState *ms)
RAMBlock *block; RAMBlock *block;
int ret; int ret;
rcu_read_lock(); RCU_READ_LOCK_GUARD();
/* This should be our last sync, the src is now paused */ /* This should be our last sync, the src is now paused */
migration_bitmap_sync(rs); migration_bitmap_sync(rs);
@ -3048,7 +3046,6 @@ int ram_postcopy_send_discard_bitmap(MigrationState *ms)
/* Deal with TPS != HPS and huge pages */ /* Deal with TPS != HPS and huge pages */
ret = postcopy_chunk_hostpages(ms, block); ret = postcopy_chunk_hostpages(ms, block);
if (ret) { if (ret) {
rcu_read_unlock();
return ret; return ret;
} }
@ -3060,7 +3057,6 @@ int ram_postcopy_send_discard_bitmap(MigrationState *ms)
trace_ram_postcopy_send_discard_bitmap(); trace_ram_postcopy_send_discard_bitmap();
ret = postcopy_each_ram_send_discard(ms); ret = postcopy_each_ram_send_discard(ms);
rcu_read_unlock();
return ret; return ret;
} }
@ -3081,7 +3077,7 @@ int ram_discard_range(const char *rbname, uint64_t start, size_t length)
trace_ram_discard_range(rbname, start, length); trace_ram_discard_range(rbname, start, length);
rcu_read_lock(); RCU_READ_LOCK_GUARD();
RAMBlock *rb = qemu_ram_block_by_name(rbname); RAMBlock *rb = qemu_ram_block_by_name(rbname);
if (!rb) { if (!rb) {
@ -3101,8 +3097,6 @@ int ram_discard_range(const char *rbname, uint64_t start, size_t length)
ret = ram_block_discard_range(rb, start, length); ret = ram_block_discard_range(rb, start, length);
err: err:
rcu_read_unlock();
return ret; return ret;
} }
@ -3231,13 +3225,12 @@ static void ram_init_bitmaps(RAMState *rs)
/* For memory_global_dirty_log_start below. */ /* For memory_global_dirty_log_start below. */
qemu_mutex_lock_iothread(); qemu_mutex_lock_iothread();
qemu_mutex_lock_ramlist(); qemu_mutex_lock_ramlist();
rcu_read_lock();
WITH_RCU_READ_LOCK_GUARD() {
ram_list_init_bitmaps(); ram_list_init_bitmaps();
memory_global_dirty_log_start(); memory_global_dirty_log_start();
migration_bitmap_sync_precopy(rs); migration_bitmap_sync_precopy(rs);
}
rcu_read_unlock();
qemu_mutex_unlock_ramlist(); qemu_mutex_unlock_ramlist();
qemu_mutex_unlock_iothread(); qemu_mutex_unlock_iothread();
} }
@ -3424,7 +3417,7 @@ static int ram_save_iterate(QEMUFile *f, void *opaque)
goto out; goto out;
} }
rcu_read_lock(); WITH_RCU_READ_LOCK_GUARD() {
if (ram_list.version != rs->last_version) { if (ram_list.version != rs->last_version) {
ram_state_reset(rs); ram_state_reset(rs);
} }
@ -3458,13 +3451,15 @@ static int ram_save_iterate(QEMUFile *f, void *opaque)
rs->target_page_count += pages; rs->target_page_count += pages;
/* we want to check in the 1st loop, just in case it was the 1st time /*
and we had to sync the dirty bitmap. * we want to check in the 1st loop, just in case it was the 1st
qemu_clock_get_ns() is a bit expensive, so we only check each some * time and we had to sync the dirty bitmap.
iterations * qemu_clock_get_ns() is a bit expensive, so we only check each
* some iterations
*/ */
if ((i & 63) == 0) { if ((i & 63) == 0) {
uint64_t t1 = (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - t0) / 1000000; uint64_t t1 = (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - t0) /
1000000;
if (t1 > MAX_WAIT) { if (t1 > MAX_WAIT) {
trace_ram_save_iterate_big_wait(t1, i); trace_ram_save_iterate_big_wait(t1, i);
break; break;
@ -3472,7 +3467,7 @@ static int ram_save_iterate(QEMUFile *f, void *opaque)
} }
i++; i++;
} }
rcu_read_unlock(); }
/* /*
* Must occur before EOS (or any QEMUFile operation) * Must occur before EOS (or any QEMUFile operation)
@ -3510,8 +3505,7 @@ static int ram_save_complete(QEMUFile *f, void *opaque)
RAMState *rs = *temp; RAMState *rs = *temp;
int ret = 0; int ret = 0;
rcu_read_lock(); WITH_RCU_READ_LOCK_GUARD() {
if (!migration_in_postcopy()) { if (!migration_in_postcopy()) {
migration_bitmap_sync_precopy(rs); migration_bitmap_sync_precopy(rs);
} }
@ -3537,8 +3531,7 @@ static int ram_save_complete(QEMUFile *f, void *opaque)
flush_compressed_data(rs); flush_compressed_data(rs);
ram_control_after_iterate(f, RAM_CONTROL_FINISH); ram_control_after_iterate(f, RAM_CONTROL_FINISH);
}
rcu_read_unlock();
multifd_send_sync_main(rs); multifd_send_sync_main(rs);
qemu_put_be64(f, RAM_SAVE_FLAG_EOS); qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
@ -3561,9 +3554,9 @@ static void ram_save_pending(QEMUFile *f, void *opaque, uint64_t max_size,
if (!migration_in_postcopy() && if (!migration_in_postcopy() &&
remaining_size < max_size) { remaining_size < max_size) {
qemu_mutex_lock_iothread(); qemu_mutex_lock_iothread();
rcu_read_lock(); WITH_RCU_READ_LOCK_GUARD() {
migration_bitmap_sync_precopy(rs); migration_bitmap_sync_precopy(rs);
rcu_read_unlock(); }
qemu_mutex_unlock_iothread(); qemu_mutex_unlock_iothread();
remaining_size = rs->migration_dirty_pages * TARGET_PAGE_SIZE; remaining_size = rs->migration_dirty_pages * TARGET_PAGE_SIZE;
} }
@ -3907,7 +3900,13 @@ int colo_init_ram_cache(void)
error_report("%s: Can't alloc memory for COLO cache of block %s," error_report("%s: Can't alloc memory for COLO cache of block %s,"
"size 0x" RAM_ADDR_FMT, __func__, block->idstr, "size 0x" RAM_ADDR_FMT, __func__, block->idstr,
block->used_length); block->used_length);
goto out_locked; RAMBLOCK_FOREACH_NOT_IGNORED(block) {
if (block->colo_cache) {
qemu_anon_ram_free(block->colo_cache, block->used_length);
block->colo_cache = NULL;
}
}
return -errno;
} }
memcpy(block->colo_cache, block->host, block->used_length); memcpy(block->colo_cache, block->host, block->used_length);
} }
@ -3933,18 +3932,6 @@ int colo_init_ram_cache(void)
memory_global_dirty_log_start(); memory_global_dirty_log_start();
return 0; return 0;
out_locked:
RAMBLOCK_FOREACH_NOT_IGNORED(block) {
if (block->colo_cache) {
qemu_anon_ram_free(block->colo_cache, block->used_length);
block->colo_cache = NULL;
}
}
rcu_read_unlock();
return -errno;
} }
/* It is need to hold the global lock to call this helper */ /* It is need to hold the global lock to call this helper */
@ -3958,16 +3945,14 @@ void colo_release_ram_cache(void)
block->bmap = NULL; block->bmap = NULL;
} }
rcu_read_lock(); WITH_RCU_READ_LOCK_GUARD() {
RAMBLOCK_FOREACH_NOT_IGNORED(block) { RAMBLOCK_FOREACH_NOT_IGNORED(block) {
if (block->colo_cache) { if (block->colo_cache) {
qemu_anon_ram_free(block->colo_cache, block->used_length); qemu_anon_ram_free(block->colo_cache, block->used_length);
block->colo_cache = NULL; block->colo_cache = NULL;
} }
} }
}
rcu_read_unlock();
qemu_mutex_destroy(&ram_state->bitmap_mutex); qemu_mutex_destroy(&ram_state->bitmap_mutex);
g_free(ram_state); g_free(ram_state);
ram_state = NULL; ram_state = NULL;
@ -4205,14 +4190,14 @@ static void colo_flush_ram_cache(void)
unsigned long offset = 0; unsigned long offset = 0;
memory_global_dirty_log_sync(); memory_global_dirty_log_sync();
rcu_read_lock(); WITH_RCU_READ_LOCK_GUARD() {
RAMBLOCK_FOREACH_NOT_IGNORED(block) { RAMBLOCK_FOREACH_NOT_IGNORED(block) {
ramblock_sync_dirty_bitmap(ram_state, block); ramblock_sync_dirty_bitmap(ram_state, block);
} }
rcu_read_unlock(); }
trace_colo_flush_ram_cache_begin(ram_state->migration_dirty_pages); trace_colo_flush_ram_cache_begin(ram_state->migration_dirty_pages);
rcu_read_lock(); WITH_RCU_READ_LOCK_GUARD() {
block = QLIST_FIRST_RCU(&ram_list.blocks); block = QLIST_FIRST_RCU(&ram_list.blocks);
while (block) { while (block) {
@ -4228,8 +4213,7 @@ static void colo_flush_ram_cache(void)
memcpy(dst_host, src_host, TARGET_PAGE_SIZE); memcpy(dst_host, src_host, TARGET_PAGE_SIZE);
} }
} }
}
rcu_read_unlock();
trace_colo_flush_ram_cache_end(); trace_colo_flush_ram_cache_end();
} }
@ -4428,8 +4412,7 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id)
* it will be necessary to reduce the granularity of this * it will be necessary to reduce the granularity of this
* critical section. * critical section.
*/ */
rcu_read_lock(); WITH_RCU_READ_LOCK_GUARD() {
if (postcopy_running) { if (postcopy_running) {
ret = ram_load_postcopy(f); ret = ram_load_postcopy(f);
} else { } else {
@ -4437,7 +4420,7 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id)
} }
ret |= wait_for_decompress_done(); ret |= wait_for_decompress_done();
rcu_read_unlock(); }
trace_ram_load_complete(ret, seq_iter); trace_ram_load_complete(ret, seq_iter);
if (!ret && migration_incoming_in_colo_state()) { if (!ret && migration_incoming_in_colo_state()) {