Base patches for MTTCG enablement.

-----BEGIN PGP SIGNATURE-----
 Version: GnuPG v2
 
 iQExBAABCAAbBQJYF07FFBxwYm9uemluaUByZWRoYXQuY29tAAoJEL/70l94x66D
 ppoIAI4AxWocso5WIUH6uEHjOAxw9ZNhZ92nF8VtcbvGtN/eh8Qk4jfRX+W/Jl0q
 D13Rm3m8ynNHqh8YFs+O6i/WSgxHGxKwb75mNr36HDnYnMFluTvRQkvYJUXRyRuL
 CVtNgy8+q8FbbWo+NiJ5I7gfk2Si4BQfZN0uCLqGuCwqvvA/spN13xUcpeBXEKhL
 TeDGZBT/atDnT2bRcve8E8g5/0RKjTL9EB0jwfJjHocT5bs+toPe6js9VnZDRNWN
 ZldcONgEHj3zAj9j7hTkVWFTGPSCx/tt6y6JeORq1oxk0mCCswEk0U9A3hLzLjc/
 94XHsLaEoZ7HNAKtkLc07NYhkQM=
 =+6Sj
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/bonzini/tags/for-upstream-mttcg' into staging

Base patches for MTTCG enablement.

# gpg: Signature made Mon 31 Oct 2016 14:01:41 GMT
# gpg:                using RSA key 0xBFFBD25F78C7AE83
# gpg: Good signature from "Paolo Bonzini <bonzini@gnu.org>"
# gpg:                 aka "Paolo Bonzini <pbonzini@redhat.com>"
# Primary key fingerprint: 46F5 9FBD 57D6 12E7 BFD4  E2F7 7E15 100C CD36 69B1
#      Subkey fingerprint: F133 3857 4B66 2389 866C  7682 BFFB D25F 78C7 AE83

* remotes/bonzini/tags/for-upstream-mttcg:
  tcg: move locking for tb_invalidate_phys_page_range up
  *_run_on_cpu: introduce run_on_cpu_data type
  cpus: re-factor out handle_icount_deadline
  tcg: cpus rm tcg_exec_all()
  tcg: move tcg_exec_all and helpers above thread fn
  target-arm/arm-powerctl: wake up sleeping CPUs
  tcg: protect translation related stuff with tb_lock.
  translate-all: Add assert_(memory|tb)_lock annotations
  linux-user/elfload: ensure mmap_lock() held while setting up
  tcg: comment on which functions have to be called with tb_lock held
  cpu-exec: include cpu_index in CPU_LOG_EXEC messages
  translate-all: add DEBUG_LOCKING asserts
  translate_all: DEBUG_FLUSH -> DEBUG_TB_FLUSH
  cpus: make all_vcpus_paused() return bool

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2016-10-31 15:29:12 +00:00
commit 6bc56d317f
25 changed files with 426 additions and 222 deletions

View File

@ -42,6 +42,11 @@ void mmap_unlock(void)
} }
} }
bool have_mmap_lock(void)
{
return mmap_lock_count > 0 ? true : false;
}
/* Grab lock to make sure things are in a consistent state after fork(). */ /* Grab lock to make sure things are in a consistent state after fork(). */
void mmap_fork_start(void) void mmap_fork_start(void)
{ {

View File

@ -143,8 +143,9 @@ static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, TranslationBlock *itb)
uint8_t *tb_ptr = itb->tc_ptr; uint8_t *tb_ptr = itb->tc_ptr;
qemu_log_mask_and_addr(CPU_LOG_EXEC, itb->pc, qemu_log_mask_and_addr(CPU_LOG_EXEC, itb->pc,
"Trace %p [" TARGET_FMT_lx "] %s\n", "Trace %p [%d: " TARGET_FMT_lx "] %s\n",
itb->tc_ptr, itb->pc, lookup_symbol(itb->pc)); itb->tc_ptr, cpu->cpu_index, itb->pc,
lookup_symbol(itb->pc));
#if defined(DEBUG_DISAS) #if defined(DEBUG_DISAS)
if (qemu_loglevel_mask(CPU_LOG_TB_CPU) if (qemu_loglevel_mask(CPU_LOG_TB_CPU)
@ -204,15 +205,21 @@ static void cpu_exec_nocache(CPUState *cpu, int max_cycles,
if (max_cycles > CF_COUNT_MASK) if (max_cycles > CF_COUNT_MASK)
max_cycles = CF_COUNT_MASK; max_cycles = CF_COUNT_MASK;
tb_lock();
tb = tb_gen_code(cpu, orig_tb->pc, orig_tb->cs_base, orig_tb->flags, tb = tb_gen_code(cpu, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
max_cycles | CF_NOCACHE max_cycles | CF_NOCACHE
| (ignore_icount ? CF_IGNORE_ICOUNT : 0)); | (ignore_icount ? CF_IGNORE_ICOUNT : 0));
tb->orig_tb = orig_tb; tb->orig_tb = orig_tb;
tb_unlock();
/* execute the generated code */ /* execute the generated code */
trace_exec_tb_nocache(tb, tb->pc); trace_exec_tb_nocache(tb, tb->pc);
cpu_tb_exec(cpu, tb); cpu_tb_exec(cpu, tb);
tb_lock();
tb_phys_invalidate(tb, -1); tb_phys_invalidate(tb, -1);
tb_free(tb); tb_free(tb);
tb_unlock();
} }
#endif #endif

View File

@ -109,7 +109,7 @@ void cpu_list_remove(CPUState *cpu)
struct qemu_work_item { struct qemu_work_item {
struct qemu_work_item *next; struct qemu_work_item *next;
run_on_cpu_func func; run_on_cpu_func func;
void *data; run_on_cpu_data data;
bool free, exclusive, done; bool free, exclusive, done;
}; };
@ -129,7 +129,7 @@ static void queue_work_on_cpu(CPUState *cpu, struct qemu_work_item *wi)
qemu_cpu_kick(cpu); qemu_cpu_kick(cpu);
} }
void do_run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data, void do_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data,
QemuMutex *mutex) QemuMutex *mutex)
{ {
struct qemu_work_item wi; struct qemu_work_item wi;
@ -154,7 +154,7 @@ void do_run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data,
} }
} }
void async_run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data) void async_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data)
{ {
struct qemu_work_item *wi; struct qemu_work_item *wi;
@ -296,7 +296,8 @@ void cpu_exec_end(CPUState *cpu)
} }
} }
void async_safe_run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data) void async_safe_run_on_cpu(CPUState *cpu, run_on_cpu_func func,
run_on_cpu_data data)
{ {
struct qemu_work_item *wi; struct qemu_work_item *wi;

255
cpus.c
View File

@ -69,7 +69,6 @@
#endif /* CONFIG_LINUX */ #endif /* CONFIG_LINUX */
static CPUState *next_cpu;
int64_t max_delay; int64_t max_delay;
int64_t max_advance; int64_t max_advance;
@ -557,7 +556,7 @@ static const VMStateDescription vmstate_timers = {
} }
}; };
static void cpu_throttle_thread(CPUState *cpu, void *opaque) static void cpu_throttle_thread(CPUState *cpu, run_on_cpu_data opaque)
{ {
double pct; double pct;
double throttle_ratio; double throttle_ratio;
@ -588,7 +587,8 @@ static void cpu_throttle_timer_tick(void *opaque)
} }
CPU_FOREACH(cpu) { CPU_FOREACH(cpu) {
if (!atomic_xchg(&cpu->throttle_thread_scheduled, 1)) { if (!atomic_xchg(&cpu->throttle_thread_scheduled, 1)) {
async_run_on_cpu(cpu, cpu_throttle_thread, NULL); async_run_on_cpu(cpu, cpu_throttle_thread,
RUN_ON_CPU_NULL);
} }
} }
@ -915,7 +915,7 @@ void qemu_init_cpu_loop(void)
qemu_thread_get_self(&io_thread); qemu_thread_get_self(&io_thread);
} }
void run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data) void run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data)
{ {
do_run_on_cpu(cpu, func, data, &qemu_global_mutex); do_run_on_cpu(cpu, func, data, &qemu_global_mutex);
} }
@ -1055,12 +1055,102 @@ static void *qemu_dummy_cpu_thread_fn(void *arg)
#endif #endif
} }
static void tcg_exec_all(void); static int64_t tcg_get_icount_limit(void)
{
int64_t deadline;
if (replay_mode != REPLAY_MODE_PLAY) {
deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
/* Maintain prior (possibly buggy) behaviour where if no deadline
* was set (as there is no QEMU_CLOCK_VIRTUAL timer) or it is more than
* INT32_MAX nanoseconds ahead, we still use INT32_MAX
* nanoseconds.
*/
if ((deadline < 0) || (deadline > INT32_MAX)) {
deadline = INT32_MAX;
}
return qemu_icount_round(deadline);
} else {
return replay_get_instructions();
}
}
static void handle_icount_deadline(void)
{
if (use_icount) {
int64_t deadline =
qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
if (deadline == 0) {
qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
}
}
}
static int tcg_cpu_exec(CPUState *cpu)
{
int ret;
#ifdef CONFIG_PROFILER
int64_t ti;
#endif
#ifdef CONFIG_PROFILER
ti = profile_getclock();
#endif
if (use_icount) {
int64_t count;
int decr;
timers_state.qemu_icount -= (cpu->icount_decr.u16.low
+ cpu->icount_extra);
cpu->icount_decr.u16.low = 0;
cpu->icount_extra = 0;
count = tcg_get_icount_limit();
timers_state.qemu_icount += count;
decr = (count > 0xffff) ? 0xffff : count;
count -= decr;
cpu->icount_decr.u16.low = decr;
cpu->icount_extra = count;
}
cpu_exec_start(cpu);
ret = cpu_exec(cpu);
cpu_exec_end(cpu);
#ifdef CONFIG_PROFILER
tcg_time += profile_getclock() - ti;
#endif
if (use_icount) {
/* Fold pending instructions back into the
instruction counter, and clear the interrupt flag. */
timers_state.qemu_icount -= (cpu->icount_decr.u16.low
+ cpu->icount_extra);
cpu->icount_decr.u32 = 0;
cpu->icount_extra = 0;
replay_account_executed_instructions();
}
return ret;
}
/* Destroy any remaining vCPUs which have been unplugged and have
* finished running
*/
static void deal_with_unplugged_cpus(void)
{
CPUState *cpu;
CPU_FOREACH(cpu) {
if (cpu->unplug && !cpu_can_run(cpu)) {
qemu_tcg_destroy_vcpu(cpu);
cpu->created = false;
qemu_cond_signal(&qemu_cpu_cond);
break;
}
}
}
static void *qemu_tcg_cpu_thread_fn(void *arg) static void *qemu_tcg_cpu_thread_fn(void *arg)
{ {
CPUState *cpu = arg; CPUState *cpu = arg;
CPUState *remove_cpu = NULL;
rcu_register_thread(); rcu_register_thread();
@ -1087,29 +1177,44 @@ static void *qemu_tcg_cpu_thread_fn(void *arg)
/* process any pending work */ /* process any pending work */
atomic_mb_set(&exit_request, 1); atomic_mb_set(&exit_request, 1);
cpu = first_cpu;
while (1) { while (1) {
tcg_exec_all(); /* Account partial waits to QEMU_CLOCK_VIRTUAL. */
qemu_account_warp_timer();
if (use_icount) { if (!cpu) {
int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL); cpu = first_cpu;
}
if (deadline == 0) { for (; cpu != NULL && !exit_request; cpu = CPU_NEXT(cpu)) {
qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
} qemu_clock_enable(QEMU_CLOCK_VIRTUAL,
} (cpu->singlestep_enabled & SSTEP_NOTIMER) == 0);
qemu_tcg_wait_io_event(QTAILQ_FIRST(&cpus));
CPU_FOREACH(cpu) { if (cpu_can_run(cpu)) {
if (cpu->unplug && !cpu_can_run(cpu)) { int r;
remove_cpu = cpu; r = tcg_cpu_exec(cpu);
if (r == EXCP_DEBUG) {
cpu_handle_guest_debug(cpu);
break; break;
} }
} else if (cpu->stop || cpu->stopped) {
if (cpu->unplug) {
cpu = CPU_NEXT(cpu);
} }
if (remove_cpu) { break;
qemu_tcg_destroy_vcpu(remove_cpu);
cpu->created = false;
qemu_cond_signal(&qemu_cpu_cond);
remove_cpu = NULL;
} }
} /* for cpu.. */
/* Pairs with smp_wmb in qemu_cpu_kick. */
atomic_mb_set(&exit_request, 0);
handle_icount_deadline();
qemu_tcg_wait_io_event(QTAILQ_FIRST(&cpus));
deal_with_unplugged_cpus();
} }
return NULL; return NULL;
@ -1207,17 +1312,17 @@ void qemu_mutex_unlock_iothread(void)
qemu_mutex_unlock(&qemu_global_mutex); qemu_mutex_unlock(&qemu_global_mutex);
} }
static int all_vcpus_paused(void) static bool all_vcpus_paused(void)
{ {
CPUState *cpu; CPUState *cpu;
CPU_FOREACH(cpu) { CPU_FOREACH(cpu) {
if (!cpu->stopped) { if (!cpu->stopped) {
return 0; return false;
} }
} }
return 1; return true;
} }
void pause_all_vcpus(void) void pause_all_vcpus(void)
@ -1412,106 +1517,6 @@ int vm_stop_force_state(RunState state)
} }
} }
static int64_t tcg_get_icount_limit(void)
{
int64_t deadline;
if (replay_mode != REPLAY_MODE_PLAY) {
deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
/* Maintain prior (possibly buggy) behaviour where if no deadline
* was set (as there is no QEMU_CLOCK_VIRTUAL timer) or it is more than
* INT32_MAX nanoseconds ahead, we still use INT32_MAX
* nanoseconds.
*/
if ((deadline < 0) || (deadline > INT32_MAX)) {
deadline = INT32_MAX;
}
return qemu_icount_round(deadline);
} else {
return replay_get_instructions();
}
}
static int tcg_cpu_exec(CPUState *cpu)
{
int ret;
#ifdef CONFIG_PROFILER
int64_t ti;
#endif
#ifdef CONFIG_PROFILER
ti = profile_getclock();
#endif
if (use_icount) {
int64_t count;
int decr;
timers_state.qemu_icount -= (cpu->icount_decr.u16.low
+ cpu->icount_extra);
cpu->icount_decr.u16.low = 0;
cpu->icount_extra = 0;
count = tcg_get_icount_limit();
timers_state.qemu_icount += count;
decr = (count > 0xffff) ? 0xffff : count;
count -= decr;
cpu->icount_decr.u16.low = decr;
cpu->icount_extra = count;
}
cpu_exec_start(cpu);
ret = cpu_exec(cpu);
cpu_exec_end(cpu);
#ifdef CONFIG_PROFILER
tcg_time += profile_getclock() - ti;
#endif
if (use_icount) {
/* Fold pending instructions back into the
instruction counter, and clear the interrupt flag. */
timers_state.qemu_icount -= (cpu->icount_decr.u16.low
+ cpu->icount_extra);
cpu->icount_decr.u32 = 0;
cpu->icount_extra = 0;
replay_account_executed_instructions();
}
return ret;
}
static void tcg_exec_all(void)
{
int r;
/* Account partial waits to QEMU_CLOCK_VIRTUAL. */
qemu_account_warp_timer();
if (next_cpu == NULL) {
next_cpu = first_cpu;
}
for (; next_cpu != NULL && !exit_request; next_cpu = CPU_NEXT(next_cpu)) {
CPUState *cpu = next_cpu;
qemu_clock_enable(QEMU_CLOCK_VIRTUAL,
(cpu->singlestep_enabled & SSTEP_NOTIMER) == 0);
if (cpu_can_run(cpu)) {
r = tcg_cpu_exec(cpu);
if (r == EXCP_DEBUG) {
cpu_handle_guest_debug(cpu);
break;
} else if (r == EXCP_ATOMIC) {
cpu_exec_step_atomic(cpu);
}
} else if (cpu->stop || cpu->stopped) {
if (cpu->unplug) {
next_cpu = CPU_NEXT(cpu);
}
break;
}
}
/* Pairs with smp_wmb in qemu_cpu_kick. */
atomic_mb_set(&exit_request, 0);
}
void list_cpus(FILE *f, fprintf_function cpu_fprintf, const char *optarg) void list_cpus(FILE *f, fprintf_function cpu_fprintf, const char *optarg)
{ {
/* XXX: implement xxx_cpu_list for targets that still miss it */ /* XXX: implement xxx_cpu_list for targets that still miss it */

22
exec.c
View File

@ -687,7 +687,11 @@ void cpu_exec_realizefn(CPUState *cpu, Error **errp)
#if defined(CONFIG_USER_ONLY) #if defined(CONFIG_USER_ONLY)
static void breakpoint_invalidate(CPUState *cpu, target_ulong pc) static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
{ {
mmap_lock();
tb_lock();
tb_invalidate_phys_page_range(pc, pc + 1, 0); tb_invalidate_phys_page_range(pc, pc + 1, 0);
tb_unlock();
mmap_unlock();
} }
#else #else
static void breakpoint_invalidate(CPUState *cpu, target_ulong pc) static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
@ -696,6 +700,7 @@ static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
hwaddr phys = cpu_get_phys_page_attrs_debug(cpu, pc, &attrs); hwaddr phys = cpu_get_phys_page_attrs_debug(cpu, pc, &attrs);
int asidx = cpu_asidx_from_attrs(cpu, attrs); int asidx = cpu_asidx_from_attrs(cpu, attrs);
if (phys != -1) { if (phys != -1) {
/* Locks grabbed by tb_invalidate_phys_addr */
tb_invalidate_phys_addr(cpu->cpu_ases[asidx].as, tb_invalidate_phys_addr(cpu->cpu_ases[asidx].as,
phys | (pc & ~TARGET_PAGE_MASK)); phys | (pc & ~TARGET_PAGE_MASK));
} }
@ -1988,7 +1993,11 @@ ram_addr_t qemu_ram_addr_from_host(void *ptr)
static void notdirty_mem_write(void *opaque, hwaddr ram_addr, static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
uint64_t val, unsigned size) uint64_t val, unsigned size)
{ {
bool locked = false;
if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) { if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
locked = true;
tb_lock();
tb_invalidate_phys_page_fast(ram_addr, size); tb_invalidate_phys_page_fast(ram_addr, size);
} }
switch (size) { switch (size) {
@ -2004,6 +2013,11 @@ static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
default: default:
abort(); abort();
} }
if (locked) {
tb_unlock();
}
/* Set both VGA and migration bits for simplicity and to remove /* Set both VGA and migration bits for simplicity and to remove
* the notdirty callback faster. * the notdirty callback faster.
*/ */
@ -2064,6 +2078,12 @@ static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
continue; continue;
} }
cpu->watchpoint_hit = wp; cpu->watchpoint_hit = wp;
/* The tb_lock will be reset when cpu_loop_exit or
* cpu_loop_exit_noexc longjmp back into the cpu_exec
* main loop.
*/
tb_lock();
tb_check_watchpoint(cpu); tb_check_watchpoint(cpu);
if (wp->flags & BP_STOP_BEFORE_ACCESS) { if (wp->flags & BP_STOP_BEFORE_ACCESS) {
cpu->exception_index = EXCP_DEBUG; cpu->exception_index = EXCP_DEBUG;
@ -2471,7 +2491,9 @@ static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask); cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask);
} }
if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) { if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
tb_lock();
tb_invalidate_phys_range(addr, addr + length); tb_invalidate_phys_range(addr, addr + length);
tb_unlock();
dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE); dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
} }
cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask); cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask);

View File

@ -133,9 +133,9 @@ static void kvm_apic_vapic_base_update(APICCommonState *s)
} }
} }
static void kvm_apic_put(CPUState *cs, void *data) static void kvm_apic_put(CPUState *cs, run_on_cpu_data data)
{ {
APICCommonState *s = data; APICCommonState *s = data.host_ptr;
struct kvm_lapic_state kapic; struct kvm_lapic_state kapic;
int ret; int ret;
@ -151,12 +151,12 @@ static void kvm_apic_put(CPUState *cs, void *data)
static void kvm_apic_post_load(APICCommonState *s) static void kvm_apic_post_load(APICCommonState *s)
{ {
run_on_cpu(CPU(s->cpu), kvm_apic_put, s); run_on_cpu(CPU(s->cpu), kvm_apic_put, RUN_ON_CPU_HOST_PTR(s));
} }
static void do_inject_external_nmi(CPUState *cpu, void *data) static void do_inject_external_nmi(CPUState *cpu, run_on_cpu_data data)
{ {
APICCommonState *s = data; APICCommonState *s = data.host_ptr;
uint32_t lvt; uint32_t lvt;
int ret; int ret;
@ -174,7 +174,7 @@ static void do_inject_external_nmi(CPUState *cpu, void *data)
static void kvm_apic_external_nmi(APICCommonState *s) static void kvm_apic_external_nmi(APICCommonState *s)
{ {
run_on_cpu(CPU(s->cpu), do_inject_external_nmi, s); run_on_cpu(CPU(s->cpu), do_inject_external_nmi, RUN_ON_CPU_HOST_PTR(s));
} }
static void kvm_send_msi(MSIMessage *msg) static void kvm_send_msi(MSIMessage *msg)
@ -213,7 +213,7 @@ static void kvm_apic_reset(APICCommonState *s)
/* Not used by KVM, which uses the CPU mp_state instead. */ /* Not used by KVM, which uses the CPU mp_state instead. */
s->wait_for_sipi = 0; s->wait_for_sipi = 0;
run_on_cpu(CPU(s->cpu), kvm_apic_put, s); run_on_cpu(CPU(s->cpu), kvm_apic_put, RUN_ON_CPU_HOST_PTR(s));
} }
static void kvm_apic_realize(DeviceState *dev, Error **errp) static void kvm_apic_realize(DeviceState *dev, Error **errp)

View File

@ -17,6 +17,7 @@
#include "sysemu/kvm.h" #include "sysemu/kvm.h"
#include "hw/i386/apic_internal.h" #include "hw/i386/apic_internal.h"
#include "hw/sysbus.h" #include "hw/sysbus.h"
#include "tcg/tcg.h"
#define VAPIC_IO_PORT 0x7e #define VAPIC_IO_PORT 0x7e
@ -449,6 +450,9 @@ static void patch_instruction(VAPICROMState *s, X86CPU *cpu, target_ulong ip)
resume_all_vcpus(); resume_all_vcpus();
if (!kvm_enabled()) { if (!kvm_enabled()) {
/* tb_lock will be reset when cpu_loop_exit_noexc longjmps
* back into the cpu_exec loop. */
tb_lock();
tb_gen_code(cs, current_pc, current_cs_base, current_flags, 1); tb_gen_code(cs, current_pc, current_cs_base, current_flags, 1);
cpu_loop_exit_noexc(cs); cpu_loop_exit_noexc(cs);
} }
@ -483,10 +487,9 @@ typedef struct VAPICEnableTPRReporting {
bool enable; bool enable;
} VAPICEnableTPRReporting; } VAPICEnableTPRReporting;
static void vapic_do_enable_tpr_reporting(CPUState *cpu, void *data) static void vapic_do_enable_tpr_reporting(CPUState *cpu, run_on_cpu_data data)
{ {
VAPICEnableTPRReporting *info = data; VAPICEnableTPRReporting *info = data.host_ptr;
apic_enable_tpr_access_reporting(info->apic, info->enable); apic_enable_tpr_access_reporting(info->apic, info->enable);
} }
@ -501,7 +504,7 @@ static void vapic_enable_tpr_reporting(bool enable)
CPU_FOREACH(cs) { CPU_FOREACH(cs) {
cpu = X86_CPU(cs); cpu = X86_CPU(cs);
info.apic = cpu->apic_state; info.apic = cpu->apic_state;
run_on_cpu(cs, vapic_do_enable_tpr_reporting, &info); run_on_cpu(cs, vapic_do_enable_tpr_reporting, RUN_ON_CPU_HOST_PTR(&info));
} }
} }
@ -734,9 +737,9 @@ static void vapic_realize(DeviceState *dev, Error **errp)
nb_option_roms++; nb_option_roms++;
} }
static void do_vapic_enable(CPUState *cs, void *data) static void do_vapic_enable(CPUState *cs, run_on_cpu_data data)
{ {
VAPICROMState *s = data; VAPICROMState *s = data.host_ptr;
X86CPU *cpu = X86_CPU(cs); X86CPU *cpu = X86_CPU(cs);
static const uint8_t enabled = 1; static const uint8_t enabled = 1;
@ -758,7 +761,7 @@ static void kvmvapic_vm_state_change(void *opaque, int running,
if (s->state == VAPIC_ACTIVE) { if (s->state == VAPIC_ACTIVE) {
if (smp_cpus == 1) { if (smp_cpus == 1) {
run_on_cpu(first_cpu, do_vapic_enable, s); run_on_cpu(first_cpu, do_vapic_enable, RUN_ON_CPU_HOST_PTR(s));
} else { } else {
zero = g_malloc0(s->rom_state.vapic_size); zero = g_malloc0(s->rom_state.vapic_size);
cpu_physical_memory_write(s->vapic_paddr, zero, cpu_physical_memory_write(s->vapic_paddr, zero,

View File

@ -84,11 +84,11 @@ static void mmubooke_create_initial_mapping(CPUPPCState *env,
env->tlb_dirty = true; env->tlb_dirty = true;
} }
static void spin_kick(CPUState *cs, void *data) static void spin_kick(CPUState *cs, run_on_cpu_data data)
{ {
PowerPCCPU *cpu = POWERPC_CPU(cs); PowerPCCPU *cpu = POWERPC_CPU(cs);
CPUPPCState *env = &cpu->env; CPUPPCState *env = &cpu->env;
SpinInfo *curspin = data; SpinInfo *curspin = data.host_ptr;
hwaddr map_size = 64 * 1024 * 1024; hwaddr map_size = 64 * 1024 * 1024;
hwaddr map_start; hwaddr map_start;
@ -147,7 +147,7 @@ static void spin_write(void *opaque, hwaddr addr, uint64_t value,
if (!(ldq_p(&curspin->addr) & 1)) { if (!(ldq_p(&curspin->addr) & 1)) {
/* run CPU */ /* run CPU */
run_on_cpu(cpu, spin_kick, curspin); run_on_cpu(cpu, spin_kick, RUN_ON_CPU_HOST_PTR(curspin));
} }
} }

View File

@ -2186,7 +2186,7 @@ static void spapr_machine_finalizefn(Object *obj)
g_free(spapr->kvm_type); g_free(spapr->kvm_type);
} }
static void ppc_cpu_do_nmi_on_cpu(CPUState *cs, void *arg) static void ppc_cpu_do_nmi_on_cpu(CPUState *cs, run_on_cpu_data arg)
{ {
cpu_synchronize_state(cs); cpu_synchronize_state(cs);
ppc_cpu_do_system_reset(cs); ppc_cpu_do_system_reset(cs);
@ -2197,7 +2197,7 @@ static void spapr_nmi(NMIState *n, int cpu_index, Error **errp)
CPUState *cs; CPUState *cs;
CPU_FOREACH(cs) { CPU_FOREACH(cs) {
async_run_on_cpu(cs, ppc_cpu_do_nmi_on_cpu, NULL); async_run_on_cpu(cs, ppc_cpu_do_nmi_on_cpu, RUN_ON_CPU_NULL);
} }
} }

View File

@ -19,9 +19,9 @@ struct SPRSyncState {
target_ulong mask; target_ulong mask;
}; };
static void do_spr_sync(CPUState *cs, void *arg) static void do_spr_sync(CPUState *cs, run_on_cpu_data arg)
{ {
struct SPRSyncState *s = arg; struct SPRSyncState *s = arg.host_ptr;
PowerPCCPU *cpu = POWERPC_CPU(cs); PowerPCCPU *cpu = POWERPC_CPU(cs);
CPUPPCState *env = &cpu->env; CPUPPCState *env = &cpu->env;
@ -38,7 +38,7 @@ static void set_spr(CPUState *cs, int spr, target_ulong value,
.value = value, .value = value,
.mask = mask .mask = mask
}; };
run_on_cpu(cs, do_spr_sync, &s); run_on_cpu(cs, do_spr_sync, RUN_ON_CPU_HOST_PTR(&s));
} }
static bool has_spr(PowerPCCPU *cpu, int spr) static bool has_spr(PowerPCCPU *cpu, int spr)
@ -886,10 +886,10 @@ typedef struct {
Error *err; Error *err;
} SetCompatState; } SetCompatState;
static void do_set_compat(CPUState *cs, void *arg) static void do_set_compat(CPUState *cs, run_on_cpu_data arg)
{ {
PowerPCCPU *cpu = POWERPC_CPU(cs); PowerPCCPU *cpu = POWERPC_CPU(cs);
SetCompatState *s = arg; SetCompatState *s = arg.host_ptr;
cpu_synchronize_state(cs); cpu_synchronize_state(cs);
ppc_set_compat(cpu, s->cpu_version, &s->err); ppc_set_compat(cpu, s->cpu_version, &s->err);
@ -990,7 +990,7 @@ static target_ulong h_client_architecture_support(PowerPCCPU *cpu_,
.err = NULL, .err = NULL,
}; };
run_on_cpu(cs, do_set_compat, &s); run_on_cpu(cs, do_set_compat, RUN_ON_CPU_HOST_PTR(&s));
if (s.err) { if (s.err) {
error_report_err(s.err); error_report_err(s.err);

View File

@ -316,6 +316,7 @@ static inline void tb_set_jmp_target(TranslationBlock *tb,
#endif #endif
/* Called with tb_lock held. */
static inline void tb_add_jump(TranslationBlock *tb, int n, static inline void tb_add_jump(TranslationBlock *tb, int n,
TranslationBlock *tb_next) TranslationBlock *tb_next)
{ {
@ -369,6 +370,7 @@ void tlb_fill(CPUState *cpu, target_ulong addr, MMUAccessType access_type,
#if defined(CONFIG_USER_ONLY) #if defined(CONFIG_USER_ONLY)
void mmap_lock(void); void mmap_lock(void);
void mmap_unlock(void); void mmap_unlock(void);
bool have_mmap_lock(void);
static inline tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr) static inline tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
{ {

View File

@ -231,7 +231,25 @@ struct kvm_run;
#define TB_JMP_CACHE_SIZE (1 << TB_JMP_CACHE_BITS) #define TB_JMP_CACHE_SIZE (1 << TB_JMP_CACHE_BITS)
/* work queue */ /* work queue */
typedef void (*run_on_cpu_func)(CPUState *cpu, void *data);
/* The union type allows passing of 64 bit target pointers on 32 bit
* hosts in a single parameter
*/
typedef union {
int host_int;
unsigned long host_ulong;
void *host_ptr;
vaddr target_ptr;
} run_on_cpu_data;
#define RUN_ON_CPU_HOST_PTR(p) ((run_on_cpu_data){.host_ptr = (p)})
#define RUN_ON_CPU_HOST_INT(i) ((run_on_cpu_data){.host_int = (i)})
#define RUN_ON_CPU_HOST_ULONG(ul) ((run_on_cpu_data){.host_ulong = (ul)})
#define RUN_ON_CPU_TARGET_PTR(v) ((run_on_cpu_data){.target_ptr = (v)})
#define RUN_ON_CPU_NULL RUN_ON_CPU_HOST_PTR(NULL)
typedef void (*run_on_cpu_func)(CPUState *cpu, run_on_cpu_data data);
struct qemu_work_item; struct qemu_work_item;
/** /**
@ -319,7 +337,10 @@ struct CPUState {
MemoryRegion *memory; MemoryRegion *memory;
void *env_ptr; /* CPUArchState */ void *env_ptr; /* CPUArchState */
/* Writes protected by tb_lock, reads not thread-safe */
struct TranslationBlock *tb_jmp_cache[TB_JMP_CACHE_SIZE]; struct TranslationBlock *tb_jmp_cache[TB_JMP_CACHE_SIZE];
struct GDBRegisterState *gdb_regs; struct GDBRegisterState *gdb_regs;
int gdb_num_regs; int gdb_num_regs;
int gdb_num_g_regs; int gdb_num_g_regs;
@ -634,7 +655,7 @@ bool cpu_is_stopped(CPUState *cpu);
* *
* Used internally in the implementation of run_on_cpu. * Used internally in the implementation of run_on_cpu.
*/ */
void do_run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data, void do_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data,
QemuMutex *mutex); QemuMutex *mutex);
/** /**
@ -645,7 +666,7 @@ void do_run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data,
* *
* Schedules the function @func for execution on the vCPU @cpu. * Schedules the function @func for execution on the vCPU @cpu.
*/ */
void run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data); void run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data);
/** /**
* async_run_on_cpu: * async_run_on_cpu:
@ -655,7 +676,7 @@ void run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data);
* *
* Schedules the function @func for execution on the vCPU @cpu asynchronously. * Schedules the function @func for execution on the vCPU @cpu asynchronously.
*/ */
void async_run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data); void async_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data);
/** /**
* async_safe_run_on_cpu: * async_safe_run_on_cpu:
@ -669,7 +690,7 @@ void async_run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data);
* Unlike run_on_cpu and async_run_on_cpu, the function is run outside the * Unlike run_on_cpu and async_run_on_cpu, the function is run outside the
* BQL. * BQL.
*/ */
void async_safe_run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data); void async_safe_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data);
/** /**
* qemu_get_cpu: * qemu_get_cpu:

View File

@ -1856,7 +1856,7 @@ void kvm_flush_coalesced_mmio_buffer(void)
s->coalesced_flush_in_progress = false; s->coalesced_flush_in_progress = false;
} }
static void do_kvm_cpu_synchronize_state(CPUState *cpu, void *arg) static void do_kvm_cpu_synchronize_state(CPUState *cpu, run_on_cpu_data arg)
{ {
if (!cpu->kvm_vcpu_dirty) { if (!cpu->kvm_vcpu_dirty) {
kvm_arch_get_registers(cpu); kvm_arch_get_registers(cpu);
@ -1867,11 +1867,11 @@ static void do_kvm_cpu_synchronize_state(CPUState *cpu, void *arg)
void kvm_cpu_synchronize_state(CPUState *cpu) void kvm_cpu_synchronize_state(CPUState *cpu)
{ {
if (!cpu->kvm_vcpu_dirty) { if (!cpu->kvm_vcpu_dirty) {
run_on_cpu(cpu, do_kvm_cpu_synchronize_state, NULL); run_on_cpu(cpu, do_kvm_cpu_synchronize_state, RUN_ON_CPU_NULL);
} }
} }
static void do_kvm_cpu_synchronize_post_reset(CPUState *cpu, void *arg) static void do_kvm_cpu_synchronize_post_reset(CPUState *cpu, run_on_cpu_data arg)
{ {
kvm_arch_put_registers(cpu, KVM_PUT_RESET_STATE); kvm_arch_put_registers(cpu, KVM_PUT_RESET_STATE);
cpu->kvm_vcpu_dirty = false; cpu->kvm_vcpu_dirty = false;
@ -1879,10 +1879,10 @@ static void do_kvm_cpu_synchronize_post_reset(CPUState *cpu, void *arg)
void kvm_cpu_synchronize_post_reset(CPUState *cpu) void kvm_cpu_synchronize_post_reset(CPUState *cpu)
{ {
run_on_cpu(cpu, do_kvm_cpu_synchronize_post_reset, NULL); run_on_cpu(cpu, do_kvm_cpu_synchronize_post_reset, RUN_ON_CPU_NULL);
} }
static void do_kvm_cpu_synchronize_post_init(CPUState *cpu, void *arg) static void do_kvm_cpu_synchronize_post_init(CPUState *cpu, run_on_cpu_data arg)
{ {
kvm_arch_put_registers(cpu, KVM_PUT_FULL_STATE); kvm_arch_put_registers(cpu, KVM_PUT_FULL_STATE);
cpu->kvm_vcpu_dirty = false; cpu->kvm_vcpu_dirty = false;
@ -1890,7 +1890,7 @@ static void do_kvm_cpu_synchronize_post_init(CPUState *cpu, void *arg)
void kvm_cpu_synchronize_post_init(CPUState *cpu) void kvm_cpu_synchronize_post_init(CPUState *cpu)
{ {
run_on_cpu(cpu, do_kvm_cpu_synchronize_post_init, NULL); run_on_cpu(cpu, do_kvm_cpu_synchronize_post_init, RUN_ON_CPU_NULL);
} }
int kvm_cpu_exec(CPUState *cpu) int kvm_cpu_exec(CPUState *cpu)
@ -2218,9 +2218,10 @@ struct kvm_set_guest_debug_data {
int err; int err;
}; };
static void kvm_invoke_set_guest_debug(CPUState *cpu, void *data) static void kvm_invoke_set_guest_debug(CPUState *cpu, run_on_cpu_data data)
{ {
struct kvm_set_guest_debug_data *dbg_data = data; struct kvm_set_guest_debug_data *dbg_data =
(struct kvm_set_guest_debug_data *) data.host_ptr;
dbg_data->err = kvm_vcpu_ioctl(cpu, KVM_SET_GUEST_DEBUG, dbg_data->err = kvm_vcpu_ioctl(cpu, KVM_SET_GUEST_DEBUG,
&dbg_data->dbg); &dbg_data->dbg);
@ -2237,7 +2238,8 @@ int kvm_update_guest_debug(CPUState *cpu, unsigned long reinject_trap)
} }
kvm_arch_update_guest_debug(cpu, &data.dbg); kvm_arch_update_guest_debug(cpu, &data.dbg);
run_on_cpu(cpu, kvm_invoke_set_guest_debug, &data); run_on_cpu(cpu, kvm_invoke_set_guest_debug,
RUN_ON_CPU_HOST_PTR(&data));
return data.err; return data.err;
} }

View File

@ -1842,6 +1842,8 @@ static void load_elf_image(const char *image_name, int image_fd,
info->pt_dynamic_addr = 0; info->pt_dynamic_addr = 0;
#endif #endif
mmap_lock();
/* Find the maximum size of the image and allocate an appropriate /* Find the maximum size of the image and allocate an appropriate
amount of memory to handle that. */ amount of memory to handle that. */
loaddr = -1, hiaddr = 0; loaddr = -1, hiaddr = 0;
@ -2002,6 +2004,8 @@ static void load_elf_image(const char *image_name, int image_fd,
load_symbols(ehdr, image_fd, load_bias); load_symbols(ehdr, image_fd, load_bias);
} }
mmap_unlock();
close(image_fd); close(image_fd);
return; return;

View File

@ -41,6 +41,11 @@ void mmap_unlock(void)
} }
} }
bool have_mmap_lock(void)
{
return mmap_lock_count > 0 ? true : false;
}
/* Grab lock to make sure things are in a consistent state after fork(). */ /* Grab lock to make sure things are in a consistent state after fork(). */
void mmap_fork_start(void) void mmap_fork_start(void)
{ {

View File

@ -9,4 +9,4 @@ obj-y += neon_helper.o iwmmxt_helper.o
obj-y += gdbstub.o obj-y += gdbstub.o
obj-$(TARGET_AARCH64) += cpu64.o translate-a64.o helper-a64.o gdbstub64.o obj-$(TARGET_AARCH64) += cpu64.o translate-a64.o helper-a64.o gdbstub64.o
obj-y += crypto_helper.o obj-y += crypto_helper.o
obj-y += arm-powerctl.o obj-$(CONFIG_SOFTMMU) += arm-powerctl.o

View File

@ -166,6 +166,8 @@ int arm_set_cpu_on(uint64_t cpuid, uint64_t entry, uint64_t context_id,
/* Start the new CPU at the requested address */ /* Start the new CPU at the requested address */
cpu_set_pc(target_cpu_state, entry); cpu_set_pc(target_cpu_state, entry);
qemu_cpu_kick(target_cpu_state);
/* We are good to go */ /* We are good to go */
return QEMU_ARM_POWERCTL_RET_SUCCESS; return QEMU_ARM_POWERCTL_RET_SUCCESS;
} }

View File

@ -1121,9 +1121,9 @@ typedef struct MCEInjectionParams {
int flags; int flags;
} MCEInjectionParams; } MCEInjectionParams;
static void do_inject_x86_mce(CPUState *cs, void *data) static void do_inject_x86_mce(CPUState *cs, run_on_cpu_data data)
{ {
MCEInjectionParams *params = data; MCEInjectionParams *params = data.host_ptr;
X86CPU *cpu = X86_CPU(cs); X86CPU *cpu = X86_CPU(cs);
CPUX86State *cenv = &cpu->env; CPUX86State *cenv = &cpu->env;
uint64_t *banks = cenv->mce_banks + 4 * params->bank; uint64_t *banks = cenv->mce_banks + 4 * params->bank;
@ -1230,7 +1230,7 @@ void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int bank,
return; return;
} }
run_on_cpu(cs, do_inject_x86_mce, &params); run_on_cpu(cs, do_inject_x86_mce, RUN_ON_CPU_HOST_PTR(&params));
if (flags & MCE_INJECT_BROADCAST) { if (flags & MCE_INJECT_BROADCAST) {
CPUState *other_cs; CPUState *other_cs;
@ -1243,7 +1243,7 @@ void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int bank,
if (other_cs == cs) { if (other_cs == cs) {
continue; continue;
} }
run_on_cpu(other_cs, do_inject_x86_mce, &params); run_on_cpu(other_cs, do_inject_x86_mce, RUN_ON_CPU_HOST_PTR(&params));
} }
} }
} }

View File

@ -183,7 +183,7 @@ static int kvm_get_tsc(CPUState *cs)
return 0; return 0;
} }
static inline void do_kvm_synchronize_tsc(CPUState *cpu, void *arg) static inline void do_kvm_synchronize_tsc(CPUState *cpu, run_on_cpu_data arg)
{ {
kvm_get_tsc(cpu); kvm_get_tsc(cpu);
} }
@ -194,7 +194,7 @@ void kvm_synchronize_all_tsc(void)
if (kvm_enabled()) { if (kvm_enabled()) {
CPU_FOREACH(cpu) { CPU_FOREACH(cpu) {
run_on_cpu(cpu, do_kvm_synchronize_tsc, NULL); run_on_cpu(cpu, do_kvm_synchronize_tsc, RUN_ON_CPU_NULL);
} }
} }
} }

View File

@ -164,7 +164,7 @@ static void s390_cpu_machine_reset_cb(void *opaque)
{ {
S390CPU *cpu = opaque; S390CPU *cpu = opaque;
run_on_cpu(CPU(cpu), s390_do_cpu_full_reset, NULL); run_on_cpu(CPU(cpu), s390_do_cpu_full_reset, RUN_ON_CPU_NULL);
} }
#endif #endif
@ -220,7 +220,7 @@ static void s390_cpu_realizefn(DeviceState *dev, Error **errp)
s390_cpu_gdb_init(cs); s390_cpu_gdb_init(cs);
qemu_init_vcpu(cs); qemu_init_vcpu(cs);
#if !defined(CONFIG_USER_ONLY) #if !defined(CONFIG_USER_ONLY)
run_on_cpu(cs, s390_do_cpu_full_reset, NULL); run_on_cpu(cs, s390_do_cpu_full_reset, RUN_ON_CPU_NULL);
#else #else
cpu_reset(cs); cpu_reset(cs);
#endif #endif

View File

@ -502,13 +502,13 @@ static inline hwaddr decode_basedisp_s(CPUS390XState *env, uint32_t ipb,
#define decode_basedisp_rs decode_basedisp_s #define decode_basedisp_rs decode_basedisp_s
/* helper functions for run_on_cpu() */ /* helper functions for run_on_cpu() */
static inline void s390_do_cpu_reset(CPUState *cs, void *arg) static inline void s390_do_cpu_reset(CPUState *cs, run_on_cpu_data arg)
{ {
S390CPUClass *scc = S390_CPU_GET_CLASS(cs); S390CPUClass *scc = S390_CPU_GET_CLASS(cs);
scc->cpu_reset(cs); scc->cpu_reset(cs);
} }
static inline void s390_do_cpu_full_reset(CPUState *cs, void *arg) static inline void s390_do_cpu_full_reset(CPUState *cs, run_on_cpu_data arg)
{ {
cpu_reset(cs); cpu_reset(cs);
} }

View File

@ -1607,7 +1607,7 @@ int kvm_s390_cpu_restart(S390CPU *cpu)
{ {
SigpInfo si = {}; SigpInfo si = {};
run_on_cpu(CPU(cpu), sigp_restart, &si); run_on_cpu(CPU(cpu), sigp_restart, RUN_ON_CPU_HOST_PTR(&si));
DPRINTF("DONE: KVM cpu restart: %p\n", &cpu->env); DPRINTF("DONE: KVM cpu restart: %p\n", &cpu->env);
return 0; return 0;
} }
@ -1683,31 +1683,31 @@ static int handle_sigp_single_dst(S390CPU *dst_cpu, uint8_t order,
switch (order) { switch (order) {
case SIGP_START: case SIGP_START:
run_on_cpu(CPU(dst_cpu), sigp_start, &si); run_on_cpu(CPU(dst_cpu), sigp_start, RUN_ON_CPU_HOST_PTR(&si));
break; break;
case SIGP_STOP: case SIGP_STOP:
run_on_cpu(CPU(dst_cpu), sigp_stop, &si); run_on_cpu(CPU(dst_cpu), sigp_stop, RUN_ON_CPU_HOST_PTR(&si));
break; break;
case SIGP_RESTART: case SIGP_RESTART:
run_on_cpu(CPU(dst_cpu), sigp_restart, &si); run_on_cpu(CPU(dst_cpu), sigp_restart, RUN_ON_CPU_HOST_PTR(&si));
break; break;
case SIGP_STOP_STORE_STATUS: case SIGP_STOP_STORE_STATUS:
run_on_cpu(CPU(dst_cpu), sigp_stop_and_store_status, &si); run_on_cpu(CPU(dst_cpu), sigp_stop_and_store_status, RUN_ON_CPU_HOST_PTR(&si));
break; break;
case SIGP_STORE_STATUS_ADDR: case SIGP_STORE_STATUS_ADDR:
run_on_cpu(CPU(dst_cpu), sigp_store_status_at_address, &si); run_on_cpu(CPU(dst_cpu), sigp_store_status_at_address, RUN_ON_CPU_HOST_PTR(&si));
break; break;
case SIGP_STORE_ADTL_STATUS: case SIGP_STORE_ADTL_STATUS:
run_on_cpu(CPU(dst_cpu), sigp_store_adtl_status, &si); run_on_cpu(CPU(dst_cpu), sigp_store_adtl_status, RUN_ON_CPU_HOST_PTR(&si));
break; break;
case SIGP_SET_PREFIX: case SIGP_SET_PREFIX:
run_on_cpu(CPU(dst_cpu), sigp_set_prefix, &si); run_on_cpu(CPU(dst_cpu), sigp_set_prefix, RUN_ON_CPU_HOST_PTR(&si));
break; break;
case SIGP_INITIAL_CPU_RESET: case SIGP_INITIAL_CPU_RESET:
run_on_cpu(CPU(dst_cpu), sigp_initial_cpu_reset, &si); run_on_cpu(CPU(dst_cpu), sigp_initial_cpu_reset, RUN_ON_CPU_HOST_PTR(&si));
break; break;
case SIGP_CPU_RESET: case SIGP_CPU_RESET:
run_on_cpu(CPU(dst_cpu), sigp_cpu_reset, &si); run_on_cpu(CPU(dst_cpu), sigp_cpu_reset, RUN_ON_CPU_HOST_PTR(&si));
break; break;
default: default:
DPRINTF("KVM: unknown SIGP: 0x%x\n", order); DPRINTF("KVM: unknown SIGP: 0x%x\n", order);

View File

@ -126,7 +126,7 @@ static int modified_clear_reset(S390CPU *cpu)
pause_all_vcpus(); pause_all_vcpus();
cpu_synchronize_all_states(); cpu_synchronize_all_states();
CPU_FOREACH(t) { CPU_FOREACH(t) {
run_on_cpu(t, s390_do_cpu_full_reset, NULL); run_on_cpu(t, s390_do_cpu_full_reset, RUN_ON_CPU_NULL);
} }
s390_cmma_reset(); s390_cmma_reset();
subsystem_reset(); subsystem_reset();
@ -145,7 +145,7 @@ static int load_normal_reset(S390CPU *cpu)
pause_all_vcpus(); pause_all_vcpus();
cpu_synchronize_all_states(); cpu_synchronize_all_states();
CPU_FOREACH(t) { CPU_FOREACH(t) {
run_on_cpu(t, s390_do_cpu_reset, NULL); run_on_cpu(t, s390_do_cpu_reset, RUN_ON_CPU_NULL);
} }
s390_cmma_reset(); s390_cmma_reset();
subsystem_reset(); subsystem_reset();

View File

@ -726,6 +726,7 @@ static inline bool tcg_op_buf_full(void)
/* pool based memory allocation */ /* pool based memory allocation */
/* tb_lock must be held for tcg_malloc_internal. */
void *tcg_malloc_internal(TCGContext *s, int size); void *tcg_malloc_internal(TCGContext *s, int size);
void tcg_pool_reset(TCGContext *s); void tcg_pool_reset(TCGContext *s);
@ -733,6 +734,7 @@ void tb_lock(void);
void tb_unlock(void); void tb_unlock(void);
void tb_lock_reset(void); void tb_lock_reset(void);
/* Called with tb_lock held. */
static inline void *tcg_malloc(int size) static inline void *tcg_malloc(int size)
{ {
TCGContext *s = &tcg_ctx; TCGContext *s = &tcg_ctx;

View File

@ -31,6 +31,7 @@
#include "tcg.h" #include "tcg.h"
#if defined(CONFIG_USER_ONLY) #if defined(CONFIG_USER_ONLY)
#include "qemu.h" #include "qemu.h"
#include "exec/exec-all.h"
#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
#include <sys/param.h> #include <sys/param.h>
#if __FreeBSD_version >= 700104 #if __FreeBSD_version >= 700104
@ -56,16 +57,39 @@
#include "qemu/timer.h" #include "qemu/timer.h"
#include "exec/log.h" #include "exec/log.h"
//#define DEBUG_TB_INVALIDATE /* #define DEBUG_TB_INVALIDATE */
//#define DEBUG_FLUSH /* #define DEBUG_TB_FLUSH */
/* #define DEBUG_LOCKING */
/* make various TB consistency checks */ /* make various TB consistency checks */
//#define DEBUG_TB_CHECK /* #define DEBUG_TB_CHECK */
#if !defined(CONFIG_USER_ONLY) #if !defined(CONFIG_USER_ONLY)
/* TB consistency checks only implemented for usermode emulation. */ /* TB consistency checks only implemented for usermode emulation. */
#undef DEBUG_TB_CHECK #undef DEBUG_TB_CHECK
#endif #endif
/* Access to the various translations structures need to be serialised via locks
* for consistency. This is automatic for SoftMMU based system
* emulation due to its single threaded nature. In user-mode emulation
* access to the memory related structures are protected with the
* mmap_lock.
*/
#ifdef DEBUG_LOCKING
#define DEBUG_MEM_LOCKS 1
#else
#define DEBUG_MEM_LOCKS 0
#endif
#ifdef CONFIG_SOFTMMU
#define assert_memory_lock() do { /* nothing */ } while (0)
#else
#define assert_memory_lock() do { \
if (DEBUG_MEM_LOCKS) { \
g_assert(have_mmap_lock()); \
} \
} while (0)
#endif
#define SMC_BITMAP_USE_THRESHOLD 10 #define SMC_BITMAP_USE_THRESHOLD 10
typedef struct PageDesc { typedef struct PageDesc {
@ -173,6 +197,23 @@ void tb_lock_reset(void)
#endif #endif
} }
#ifdef DEBUG_LOCKING
#define DEBUG_TB_LOCKS 1
#else
#define DEBUG_TB_LOCKS 0
#endif
#ifdef CONFIG_SOFTMMU
#define assert_tb_lock() do { /* nothing */ } while (0)
#else
#define assert_tb_lock() do { \
if (DEBUG_TB_LOCKS) { \
g_assert(have_tb_lock); \
} \
} while (0)
#endif
static TranslationBlock *tb_find_pc(uintptr_t tc_ptr); static TranslationBlock *tb_find_pc(uintptr_t tc_ptr);
void cpu_gen_init(void) void cpu_gen_init(void)
@ -267,7 +308,9 @@ static int encode_search(TranslationBlock *tb, uint8_t *block)
return p - block; return p - block;
} }
/* The cpu state corresponding to 'searched_pc' is restored. */ /* The cpu state corresponding to 'searched_pc' is restored.
* Called with tb_lock held.
*/
static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb, static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
uintptr_t searched_pc) uintptr_t searched_pc)
{ {
@ -320,7 +363,9 @@ static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
bool cpu_restore_state(CPUState *cpu, uintptr_t retaddr) bool cpu_restore_state(CPUState *cpu, uintptr_t retaddr)
{ {
TranslationBlock *tb; TranslationBlock *tb;
bool r = false;
tb_lock();
tb = tb_find_pc(retaddr); tb = tb_find_pc(retaddr);
if (tb) { if (tb) {
cpu_restore_state_from_tb(cpu, tb, retaddr); cpu_restore_state_from_tb(cpu, tb, retaddr);
@ -329,9 +374,11 @@ bool cpu_restore_state(CPUState *cpu, uintptr_t retaddr)
tb_phys_invalidate(tb, -1); tb_phys_invalidate(tb, -1);
tb_free(tb); tb_free(tb);
} }
return true; r = true;
} }
return false; tb_unlock();
return r;
} }
void page_size_init(void) void page_size_init(void)
@ -421,6 +468,7 @@ static void page_init(void)
} }
/* If alloc=1: /* If alloc=1:
* Called with tb_lock held for system emulation.
* Called with mmap_lock held for user-mode emulation. * Called with mmap_lock held for user-mode emulation.
*/ */
static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc) static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
@ -429,6 +477,10 @@ static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
void **lp; void **lp;
int i; int i;
if (alloc) {
assert_memory_lock();
}
/* Level 1. Always allocated. */ /* Level 1. Always allocated. */
lp = l1_map + ((index >> v_l1_shift) & (v_l1_size - 1)); lp = l1_map + ((index >> v_l1_shift) & (v_l1_size - 1));
@ -785,12 +837,18 @@ bool tcg_enabled(void)
return tcg_ctx.code_gen_buffer != NULL; return tcg_ctx.code_gen_buffer != NULL;
} }
/* Allocate a new translation block. Flush the translation buffer if /*
too many translation blocks or too much generated code. */ * Allocate a new translation block. Flush the translation buffer if
* too many translation blocks or too much generated code.
*
* Called with tb_lock held.
*/
static TranslationBlock *tb_alloc(target_ulong pc) static TranslationBlock *tb_alloc(target_ulong pc)
{ {
TranslationBlock *tb; TranslationBlock *tb;
assert_tb_lock();
if (tcg_ctx.tb_ctx.nb_tbs >= tcg_ctx.code_gen_max_blocks) { if (tcg_ctx.tb_ctx.nb_tbs >= tcg_ctx.code_gen_max_blocks) {
return NULL; return NULL;
} }
@ -801,8 +859,11 @@ static TranslationBlock *tb_alloc(target_ulong pc)
return tb; return tb;
} }
/* Called with tb_lock held. */
void tb_free(TranslationBlock *tb) void tb_free(TranslationBlock *tb)
{ {
assert_tb_lock();
/* In practice this is mostly used for single use temporary TB /* In practice this is mostly used for single use temporary TB
Ignore the hard cases and just back up if this TB happens to Ignore the hard cases and just back up if this TB happens to
be the last one generated. */ be the last one generated. */
@ -856,20 +917,18 @@ static void page_flush_tb(void)
} }
/* flush all the translation blocks */ /* flush all the translation blocks */
static void do_tb_flush(CPUState *cpu, void *data) static void do_tb_flush(CPUState *cpu, run_on_cpu_data tb_flush_count)
{ {
unsigned tb_flush_req = (unsigned) (uintptr_t) data;
tb_lock(); tb_lock();
/* If it's already been done on request of another CPU, /* If it is already been done on request of another CPU,
* just retry. * just retry.
*/ */
if (tcg_ctx.tb_ctx.tb_flush_count != tb_flush_req) { if (tcg_ctx.tb_ctx.tb_flush_count != tb_flush_count.host_int) {
goto done; goto done;
} }
#if defined(DEBUG_FLUSH) #if defined(DEBUG_TB_FLUSH)
printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n", printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
(unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer), (unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer),
tcg_ctx.tb_ctx.nb_tbs, tcg_ctx.tb_ctx.nb_tbs > 0 ? tcg_ctx.tb_ctx.nb_tbs, tcg_ctx.tb_ctx.nb_tbs > 0 ?
@ -906,8 +965,9 @@ done:
void tb_flush(CPUState *cpu) void tb_flush(CPUState *cpu)
{ {
if (tcg_enabled()) { if (tcg_enabled()) {
uintptr_t tb_flush_req = atomic_mb_read(&tcg_ctx.tb_ctx.tb_flush_count); unsigned tb_flush_count = atomic_mb_read(&tcg_ctx.tb_ctx.tb_flush_count);
async_safe_run_on_cpu(cpu, do_tb_flush, (void *) tb_flush_req); async_safe_run_on_cpu(cpu, do_tb_flush,
RUN_ON_CPU_HOST_INT(tb_flush_count));
} }
} }
@ -925,6 +985,10 @@ do_tb_invalidate_check(struct qht *ht, void *p, uint32_t hash, void *userp)
} }
} }
/* verify that all the pages have correct rights for code
*
* Called with tb_lock held.
*/
static void tb_invalidate_check(target_ulong address) static void tb_invalidate_check(target_ulong address)
{ {
address &= TARGET_PAGE_MASK; address &= TARGET_PAGE_MASK;
@ -1029,7 +1093,10 @@ static inline void tb_jmp_unlink(TranslationBlock *tb)
} }
} }
/* invalidate one TB */ /* invalidate one TB
*
* Called with tb_lock held.
*/
void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr) void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
{ {
CPUState *cpu; CPUState *cpu;
@ -1037,6 +1104,8 @@ void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
uint32_t h; uint32_t h;
tb_page_addr_t phys_pc; tb_page_addr_t phys_pc;
assert_tb_lock();
atomic_set(&tb->invalid, true); atomic_set(&tb->invalid, true);
/* remove the TB from the hash list */ /* remove the TB from the hash list */
@ -1117,6 +1186,8 @@ static inline void tb_alloc_page(TranslationBlock *tb,
bool page_already_protected; bool page_already_protected;
#endif #endif
assert_memory_lock();
tb->page_addr[n] = page_addr; tb->page_addr[n] = page_addr;
p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1); p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
tb->page_next[n] = p->first_tb; tb->page_next[n] = p->first_tb;
@ -1173,6 +1244,8 @@ static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
{ {
uint32_t h; uint32_t h;
assert_memory_lock();
/* add in the page list */ /* add in the page list */
tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK); tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
if (phys_page2 != -1) { if (phys_page2 != -1) {
@ -1204,6 +1277,7 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
#ifdef CONFIG_PROFILER #ifdef CONFIG_PROFILER
int64_t ti; int64_t ti;
#endif #endif
assert_memory_lock();
phys_pc = get_page_addr_code(env, pc); phys_pc = get_page_addr_code(env, pc);
if (use_icount && !(cflags & CF_IGNORE_ICOUNT)) { if (use_icount && !(cflags & CF_IGNORE_ICOUNT)) {
@ -1328,9 +1402,10 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
* access: the virtual CPU will exit the current TB if code is modified inside * access: the virtual CPU will exit the current TB if code is modified inside
* this TB. * this TB.
* *
* Called with mmap_lock held for user-mode emulation * Called with mmap_lock held for user-mode emulation, grabs tb_lock
* Called with tb_lock held for system-mode emulation
*/ */
void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end) static void tb_invalidate_phys_range_1(tb_page_addr_t start, tb_page_addr_t end)
{ {
while (start < end) { while (start < end) {
tb_invalidate_phys_page_range(start, end, 0); tb_invalidate_phys_page_range(start, end, 0);
@ -1339,6 +1414,21 @@ void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
} }
} }
#ifdef CONFIG_SOFTMMU
void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
{
assert_tb_lock();
tb_invalidate_phys_range_1(start, end);
}
#else
void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
{
assert_memory_lock();
tb_lock();
tb_invalidate_phys_range_1(start, end);
tb_unlock();
}
#endif
/* /*
* Invalidate all TBs which intersect with the target physical address range * Invalidate all TBs which intersect with the target physical address range
* [start;end[. NOTE: start and end must refer to the *same* physical page. * [start;end[. NOTE: start and end must refer to the *same* physical page.
@ -1346,7 +1436,8 @@ void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
* access: the virtual CPU will exit the current TB if code is modified inside * access: the virtual CPU will exit the current TB if code is modified inside
* this TB. * this TB.
* *
* Called with mmap_lock held for user-mode emulation * Called with tb_lock/mmap_lock held for user-mode emulation
* Called with tb_lock held for system-mode emulation
*/ */
void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end, void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
int is_cpu_write_access) int is_cpu_write_access)
@ -1368,6 +1459,9 @@ void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
uint32_t current_flags = 0; uint32_t current_flags = 0;
#endif /* TARGET_HAS_PRECISE_SMC */ #endif /* TARGET_HAS_PRECISE_SMC */
assert_memory_lock();
assert_tb_lock();
p = page_find(start >> TARGET_PAGE_BITS); p = page_find(start >> TARGET_PAGE_BITS);
if (!p) { if (!p) {
return; return;
@ -1443,7 +1537,10 @@ void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
} }
#ifdef CONFIG_SOFTMMU #ifdef CONFIG_SOFTMMU
/* len must be <= 8 and start must be a multiple of len */ /* len must be <= 8 and start must be a multiple of len.
* Called via softmmu_template.h when code areas are written to with
* tb_lock held.
*/
void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len) void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
{ {
PageDesc *p; PageDesc *p;
@ -1457,13 +1554,17 @@ void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
(intptr_t)cpu_single_env->segs[R_CS].base); (intptr_t)cpu_single_env->segs[R_CS].base);
} }
#endif #endif
assert_memory_lock();
p = page_find(start >> TARGET_PAGE_BITS); p = page_find(start >> TARGET_PAGE_BITS);
if (!p) { if (!p) {
return; return;
} }
if (!p->code_bitmap && if (!p->code_bitmap &&
++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD) { ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD) {
/* build code bitmap */ /* build code bitmap. FIXME: writes should be protected by
* tb_lock, reads by tb_lock or RCU.
*/
build_page_bitmap(p); build_page_bitmap(p);
} }
if (p->code_bitmap) { if (p->code_bitmap) {
@ -1502,11 +1603,15 @@ static bool tb_invalidate_phys_page(tb_page_addr_t addr, uintptr_t pc)
uint32_t current_flags = 0; uint32_t current_flags = 0;
#endif #endif
assert_memory_lock();
addr &= TARGET_PAGE_MASK; addr &= TARGET_PAGE_MASK;
p = page_find(addr >> TARGET_PAGE_BITS); p = page_find(addr >> TARGET_PAGE_BITS);
if (!p) { if (!p) {
return false; return false;
} }
tb_lock();
tb = p->first_tb; tb = p->first_tb;
#ifdef TARGET_HAS_PRECISE_SMC #ifdef TARGET_HAS_PRECISE_SMC
if (tb && pc != 0) { if (tb && pc != 0) {
@ -1544,9 +1649,13 @@ static bool tb_invalidate_phys_page(tb_page_addr_t addr, uintptr_t pc)
modifying the memory. It will ensure that it cannot modify modifying the memory. It will ensure that it cannot modify
itself */ itself */
tb_gen_code(cpu, current_pc, current_cs_base, current_flags, 1); tb_gen_code(cpu, current_pc, current_cs_base, current_flags, 1);
/* tb_lock will be reset after cpu_loop_exit_noexc longjmps
* back into the cpu_exec loop. */
return true; return true;
} }
#endif #endif
tb_unlock();
return false; return false;
} }
#endif #endif
@ -1599,11 +1708,14 @@ void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr)
return; return;
} }
ram_addr = memory_region_get_ram_addr(mr) + addr; ram_addr = memory_region_get_ram_addr(mr) + addr;
tb_lock();
tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0); tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
tb_unlock();
rcu_read_unlock(); rcu_read_unlock();
} }
#endif /* !defined(CONFIG_USER_ONLY) */ #endif /* !defined(CONFIG_USER_ONLY) */
/* Called with tb_lock held. */
void tb_check_watchpoint(CPUState *cpu) void tb_check_watchpoint(CPUState *cpu)
{ {
TranslationBlock *tb; TranslationBlock *tb;
@ -1640,6 +1752,7 @@ void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
target_ulong pc, cs_base; target_ulong pc, cs_base;
uint32_t flags; uint32_t flags;
tb_lock();
tb = tb_find_pc(retaddr); tb = tb_find_pc(retaddr);
if (!tb) { if (!tb) {
cpu_abort(cpu, "cpu_io_recompile: could not find TB for pc=%p", cpu_abort(cpu, "cpu_io_recompile: could not find TB for pc=%p",
@ -1691,11 +1804,16 @@ void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
/* FIXME: In theory this could raise an exception. In practice /* FIXME: In theory this could raise an exception. In practice
we have already translated the block once so it's probably ok. */ we have already translated the block once so it's probably ok. */
tb_gen_code(cpu, pc, cs_base, flags, cflags); tb_gen_code(cpu, pc, cs_base, flags, cflags);
/* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
the first in the TB) then we end up generating a whole new TB and * the first in the TB) then we end up generating a whole new TB and
repeating the fault, which is horribly inefficient. * repeating the fault, which is horribly inefficient.
Better would be to execute just this insn uncached, or generate a * Better would be to execute just this insn uncached, or generate a
second new TB. */ * second new TB.
*
* cpu_loop_exit_noexc will longjmp back to cpu_exec where the
* tb_lock gets reset.
*/
cpu_loop_exit_noexc(cpu); cpu_loop_exit_noexc(cpu);
} }
@ -1759,6 +1877,8 @@ void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
TranslationBlock *tb; TranslationBlock *tb;
struct qht_stats hst; struct qht_stats hst;
tb_lock();
target_code_size = 0; target_code_size = 0;
max_target_code_size = 0; max_target_code_size = 0;
cross_page = 0; cross_page = 0;
@ -1820,6 +1940,8 @@ void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
tcg_ctx.tb_ctx.tb_phys_invalidate_count); tcg_ctx.tb_ctx.tb_phys_invalidate_count);
cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count); cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
tcg_dump_info(f, cpu_fprintf); tcg_dump_info(f, cpu_fprintf);
tb_unlock();
} }
void dump_opcount_info(FILE *f, fprintf_function cpu_fprintf) void dump_opcount_info(FILE *f, fprintf_function cpu_fprintf)
@ -1972,6 +2094,7 @@ void page_set_flags(target_ulong start, target_ulong end, int flags)
assert(end < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS)); assert(end < ((target_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
#endif #endif
assert(start < end); assert(start < end);
assert_memory_lock();
start = start & TARGET_PAGE_MASK; start = start & TARGET_PAGE_MASK;
end = TARGET_PAGE_ALIGN(end); end = TARGET_PAGE_ALIGN(end);