mirror of
https://github.com/qemu/qemu.git
synced 2025-08-15 22:02:19 +00:00
Generic CPUs / accelerators patch queue
- Reduce "exec/tb-flush.h" inclusion on linux-user - Consider alignment in bsd-user's mmap_find_vma() - Unify MMAP common user emulation API - Simplify cpu-target.c further - Prefer cached CpuClass over CPU_GET_CLASS() macro - Restrict CPU has_work() handlers to system emulation - Consolidate core exec/vCPU section in MAINTAINERS -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEE+qvnXhKRciHc/Wuy4+MsLN6twN4FAmfN1NgACgkQ4+MsLN6t wN6BghAAr5WBteo7OiNRyTA0Ilg+nOcTf6Re08CgWf/3TIMljEPq8o/tLQOxiFke AMktDlvYTyg4BWa5UdAKLpj7N7eyHcXrqv95A6Lg/xoGdlbqfYIpyX7/5h0pV70f z8WUj2/YMpoyXxvvAjw4YtUqHIWZhSTIBsFqJ6jALl6T7fouo8y++AWn/L+zY4tO /qqC6djJwufphPJWm2NvG+nvf+T60C+4JUc8CkjYQsyL3K3HpoAgzrgb/6VRtHob nKfORPboKEVSE1Z52GnmM9eMsZjbWOz9bkEN69yfRbHHQNuvsicok+V59PnWWDYd JX6cC5ukJUJlgYDKOj2jCg9OouoV4mRYRqYYWPtE8WkGLoeJu4mV1AEkVB7h3lTA BtUu4ohsrk/krfyB89apu8SqDPya6F4TDqJpGmAqlAG2UWJwrECuJV82uTDZql0R MqnCUYb7OQBkdb9CoqFi47jTYlqgdVLKekS8udXLCaqWggki8Nb1GVQ09LFyv3NF JlQVNNQG3D2V7JIDd2aXgr4PmhmV2oPv+HYxW+SPxU2qDHIU93krkKyi0TRk0mSC sWvJYBJcwbXlnMD5clad1bTLZrK5Csl5WkX8I0d0feqeRPSXC2YBTwL2/GgzT8qF n/2dxB3Lf+1LUl6KAv3kT9lONtqic0J9oBBcPUjVog2ikAD7+Vo= =TZua -----END PGP SIGNATURE----- Merge tag 'accel-cpus-20250309' of https://github.com/philmd/qemu into staging Generic CPUs / accelerators patch queue - Reduce "exec/tb-flush.h" inclusion on linux-user - Consider alignment in bsd-user's mmap_find_vma() - Unify MMAP common user emulation API - Simplify cpu-target.c further - Prefer cached CpuClass over CPU_GET_CLASS() macro - Restrict CPU has_work() handlers to system emulation - Consolidate core exec/vCPU section in MAINTAINERS # -----BEGIN PGP SIGNATURE----- # # iQIzBAABCAAdFiEE+qvnXhKRciHc/Wuy4+MsLN6twN4FAmfN1NgACgkQ4+MsLN6t # wN6BghAAr5WBteo7OiNRyTA0Ilg+nOcTf6Re08CgWf/3TIMljEPq8o/tLQOxiFke # AMktDlvYTyg4BWa5UdAKLpj7N7eyHcXrqv95A6Lg/xoGdlbqfYIpyX7/5h0pV70f # z8WUj2/YMpoyXxvvAjw4YtUqHIWZhSTIBsFqJ6jALl6T7fouo8y++AWn/L+zY4tO # /qqC6djJwufphPJWm2NvG+nvf+T60C+4JUc8CkjYQsyL3K3HpoAgzrgb/6VRtHob # nKfORPboKEVSE1Z52GnmM9eMsZjbWOz9bkEN69yfRbHHQNuvsicok+V59PnWWDYd # JX6cC5ukJUJlgYDKOj2jCg9OouoV4mRYRqYYWPtE8WkGLoeJu4mV1AEkVB7h3lTA # BtUu4ohsrk/krfyB89apu8SqDPya6F4TDqJpGmAqlAG2UWJwrECuJV82uTDZql0R # MqnCUYb7OQBkdb9CoqFi47jTYlqgdVLKekS8udXLCaqWggki8Nb1GVQ09LFyv3NF # JlQVNNQG3D2V7JIDd2aXgr4PmhmV2oPv+HYxW+SPxU2qDHIU93krkKyi0TRk0mSC # sWvJYBJcwbXlnMD5clad1bTLZrK5Csl5WkX8I0d0feqeRPSXC2YBTwL2/GgzT8qF # n/2dxB3Lf+1LUl6KAv3kT9lONtqic0J9oBBcPUjVog2ikAD7+Vo= # =TZua # -----END PGP SIGNATURE----- # gpg: Signature made Mon 10 Mar 2025 01:50:16 HKT # gpg: using RSA key FAABE75E12917221DCFD6BB2E3E32C2CDEADC0DE # gpg: Good signature from "Philippe Mathieu-Daudé (F4BUG) <f4bug@amsat.org>" [full] # Primary key fingerprint: FAAB E75E 1291 7221 DCFD 6BB2 E3E3 2C2C DEAD C0DE * tag 'accel-cpus-20250309' of https://github.com/philmd/qemu: (38 commits) MAINTAINERS: Consolidate core exec/vCPU handling section cpus: Remove CPUClass::has_work() handler target/xtensa: Move has_work() from CPUClass to SysemuCPUOps target/tricore: Move has_work() from CPUClass to SysemuCPUOps target/sparc: Move has_work() from CPUClass to SysemuCPUOps target/sh4: Move has_work() from CPUClass to SysemuCPUOps target/s390x: Move has_work() from CPUClass to SysemuCPUOps target/s390x: Restrict I/O handler installers to system emulation target/rx: Move has_work() from CPUClass to SysemuCPUOps target/riscv: Move has_work() from CPUClass to SysemuCPUOps target/ppc: Move has_work() from CPUClass to SysemuCPUOps target/openrisc: Move has_work() from CPUClass to SysemuCPUOps target/mips: Move has_work() from CPUClass to SysemuCPUOps target/microblaze: Move has_work() from CPUClass to SysemuCPUOps target/m68k: Move has_work() from CPUClass to SysemuCPUOps target/loongarch: Move has_work() from CPUClass to SysemuCPUOps target/i386: Move has_work() from CPUClass to SysemuCPUOps target/hppa: Move has_work() from CPUClass to SysemuCPUOps target/hexagon: Remove CPUClass:has_work() handler target/avr: Move has_work() from CPUClass to SysemuCPUOps ... Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
This commit is contained in:
commit
5136598e26
14
MAINTAINERS
14
MAINTAINERS
@ -152,10 +152,7 @@ Overall TCG CPUs
|
||||
M: Richard Henderson <richard.henderson@linaro.org>
|
||||
R: Paolo Bonzini <pbonzini@redhat.com>
|
||||
S: Maintained
|
||||
F: system/cpus.c
|
||||
F: system/watchpoint.c
|
||||
F: cpu-common.c
|
||||
F: cpu-target.c
|
||||
F: page-vary-target.c
|
||||
F: page-vary-common.c
|
||||
F: accel/tcg/
|
||||
@ -165,15 +162,11 @@ F: util/cacheflush.c
|
||||
F: scripts/decodetree.py
|
||||
F: docs/devel/decodetree.rst
|
||||
F: docs/devel/tcg*
|
||||
F: include/exec/cpu*.h
|
||||
F: include/exec/exec-all.h
|
||||
F: include/exec/tb-flush.h
|
||||
F: include/exec/target_long.h
|
||||
F: include/exec/helper*.h
|
||||
F: include/exec/helper*.h.inc
|
||||
F: include/exec/helper-info.c.inc
|
||||
F: include/exec/page-protection.h
|
||||
F: include/system/cpus.h
|
||||
F: include/system/tcg.h
|
||||
F: include/accel/tcg/cpu-ops.h
|
||||
F: host/include/*/host/cpuinfo.h
|
||||
@ -497,12 +490,19 @@ Overall
|
||||
M: Richard Henderson <richard.henderson@linaro.org>
|
||||
R: Paolo Bonzini <pbonzini@redhat.com>
|
||||
S: Maintained
|
||||
F: include/exec/cpu*.h
|
||||
F: include/exec/exec-all.h
|
||||
F: include/exec/target_long.h
|
||||
F: include/qemu/accel.h
|
||||
F: include/system/accel-*.h
|
||||
F: include/system/cpus.h
|
||||
F: include/accel/accel-cpu-target.h
|
||||
F: accel/accel-*.c
|
||||
F: accel/Makefile.objs
|
||||
F: accel/stubs/Makefile.objs
|
||||
F: cpu-common.c
|
||||
F: cpu-target.c
|
||||
F: system/cpus.c
|
||||
|
||||
Apple Silicon HVF CPUs
|
||||
M: Alexander Graf <agraf@csgraf.de>
|
||||
|
@ -113,22 +113,20 @@ void accel_init_interfaces(AccelClass *ac)
|
||||
|
||||
void accel_cpu_instance_init(CPUState *cpu)
|
||||
{
|
||||
CPUClass *cc = CPU_GET_CLASS(cpu);
|
||||
|
||||
if (cc->accel_cpu && cc->accel_cpu->cpu_instance_init) {
|
||||
cc->accel_cpu->cpu_instance_init(cpu);
|
||||
if (cpu->cc->accel_cpu && cpu->cc->accel_cpu->cpu_instance_init) {
|
||||
cpu->cc->accel_cpu->cpu_instance_init(cpu);
|
||||
}
|
||||
}
|
||||
|
||||
bool accel_cpu_common_realize(CPUState *cpu, Error **errp)
|
||||
{
|
||||
CPUClass *cc = CPU_GET_CLASS(cpu);
|
||||
AccelState *accel = current_accel();
|
||||
AccelClass *acc = ACCEL_GET_CLASS(accel);
|
||||
|
||||
/* target specific realization */
|
||||
if (cc->accel_cpu && cc->accel_cpu->cpu_target_realize
|
||||
&& !cc->accel_cpu->cpu_target_realize(cpu, errp)) {
|
||||
if (cpu->cc->accel_cpu
|
||||
&& cpu->cc->accel_cpu->cpu_target_realize
|
||||
&& !cpu->cc->accel_cpu->cpu_target_realize(cpu, errp)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -121,10 +121,9 @@ static inline int xlat_gdb_type(CPUState *cpu, int gdbtype)
|
||||
[GDB_WATCHPOINT_ACCESS] = BP_GDB | BP_MEM_ACCESS,
|
||||
};
|
||||
|
||||
CPUClass *cc = CPU_GET_CLASS(cpu);
|
||||
int cputype = xlat[gdbtype];
|
||||
|
||||
if (cc->gdb_stop_before_watchpoint) {
|
||||
if (cpu->cc->gdb_stop_before_watchpoint) {
|
||||
cputype |= BP_STOP_BEFORE_ACCESS;
|
||||
}
|
||||
return cputype;
|
||||
|
@ -630,7 +630,7 @@ void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
|
||||
* to account for the re-execution of the branch.
|
||||
*/
|
||||
n = 1;
|
||||
cc = CPU_GET_CLASS(cpu);
|
||||
cc = cpu->cc;
|
||||
if (cc->tcg_ops->io_recompile_replay_branch &&
|
||||
cc->tcg_ops->io_recompile_replay_branch(cpu, tb)) {
|
||||
cpu->neg.icount_decr.u16.low++;
|
||||
|
@ -68,7 +68,6 @@ int cpu_watchpoint_address_matches(CPUState *cpu, vaddr addr, vaddr len)
|
||||
void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
|
||||
MemTxAttrs attrs, int flags, uintptr_t ra)
|
||||
{
|
||||
CPUClass *cc = CPU_GET_CLASS(cpu);
|
||||
CPUWatchpoint *wp;
|
||||
|
||||
assert(tcg_enabled());
|
||||
@ -84,9 +83,9 @@ void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
|
||||
return;
|
||||
}
|
||||
|
||||
if (cc->tcg_ops->adjust_watchpoint_address) {
|
||||
if (cpu->cc->tcg_ops->adjust_watchpoint_address) {
|
||||
/* this is currently used only by ARM BE32 */
|
||||
addr = cc->tcg_ops->adjust_watchpoint_address(cpu, addr, len);
|
||||
addr = cpu->cc->tcg_ops->adjust_watchpoint_address(cpu, addr, len);
|
||||
}
|
||||
|
||||
assert((flags & ~BP_MEM_ACCESS) == 0);
|
||||
@ -118,8 +117,8 @@ void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
|
||||
wp->hitattrs = attrs;
|
||||
|
||||
if (wp->flags & BP_CPU
|
||||
&& cc->tcg_ops->debug_check_watchpoint
|
||||
&& !cc->tcg_ops->debug_check_watchpoint(cpu, wp)) {
|
||||
&& cpu->cc->tcg_ops->debug_check_watchpoint
|
||||
&& !cpu->cc->tcg_ops->debug_check_watchpoint(cpu, wp)) {
|
||||
wp->flags &= ~BP_WATCHPOINT_HIT;
|
||||
continue;
|
||||
}
|
||||
|
@ -370,9 +370,11 @@ static inline abi_long do_bsd_shmat(int shmid, abi_ulong shmaddr, int shmflg)
|
||||
if (shmaddr) {
|
||||
host_raddr = shmat(shmid, (void *)g2h_untagged(shmaddr), shmflg);
|
||||
} else {
|
||||
abi_ulong alignment;
|
||||
abi_ulong mmap_start;
|
||||
|
||||
mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
|
||||
alignment = 0; /* alignment above page size not required */
|
||||
mmap_start = mmap_find_vma(0, shm_info.shm_segsz, alignment);
|
||||
|
||||
if (mmap_start == -1) {
|
||||
return -TARGET_ENOMEM;
|
||||
|
@ -275,8 +275,7 @@ static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size,
|
||||
* It must be called with mmap_lock() held.
|
||||
* Return -1 if error.
|
||||
*/
|
||||
static abi_ulong mmap_find_vma_aligned(abi_ulong start, abi_ulong size,
|
||||
abi_ulong alignment)
|
||||
abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size, abi_ulong alignment)
|
||||
{
|
||||
void *ptr, *prev;
|
||||
abi_ulong addr;
|
||||
@ -395,11 +394,6 @@ static abi_ulong mmap_find_vma_aligned(abi_ulong start, abi_ulong size,
|
||||
}
|
||||
}
|
||||
|
||||
abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size)
|
||||
{
|
||||
return mmap_find_vma_aligned(start, size, 0);
|
||||
}
|
||||
|
||||
/* NOTE: all the constants are the HOST ones */
|
||||
abi_long target_mmap(abi_ulong start, abi_ulong len, int prot,
|
||||
int flags, int fd, off_t offset)
|
||||
@ -489,13 +483,12 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int prot,
|
||||
* before we truncate the length for mapping files below.
|
||||
*/
|
||||
if (!(flags & MAP_FIXED)) {
|
||||
abi_ulong alignment;
|
||||
|
||||
host_len = len + offset - host_offset;
|
||||
host_len = HOST_PAGE_ALIGN(host_len);
|
||||
if ((flags & MAP_ALIGNMENT_MASK) != 0)
|
||||
start = mmap_find_vma_aligned(real_start, host_len,
|
||||
(flags & MAP_ALIGNMENT_MASK) >> MAP_ALIGNMENT_SHIFT);
|
||||
else
|
||||
start = mmap_find_vma(real_start, host_len);
|
||||
alignment = (flags & MAP_ALIGNMENT_MASK) >> MAP_ALIGNMENT_SHIFT;
|
||||
start = mmap_find_vma(real_start, host_len, alignment);
|
||||
if (start == (abi_ulong)-1) {
|
||||
errno = ENOMEM;
|
||||
goto fail;
|
||||
|
@ -32,6 +32,7 @@
|
||||
extern char **environ;
|
||||
|
||||
#include "user/thunk.h"
|
||||
#include "user/mmap.h"
|
||||
#include "target_arch.h"
|
||||
#include "syscall_defs.h"
|
||||
#include "target_syscall.h"
|
||||
@ -233,19 +234,8 @@ void print_taken_signal(int target_signum, const target_siginfo_t *tinfo);
|
||||
extern int do_strace;
|
||||
|
||||
/* mmap.c */
|
||||
int target_mprotect(abi_ulong start, abi_ulong len, int prot);
|
||||
abi_long target_mmap(abi_ulong start, abi_ulong len, int prot,
|
||||
int flags, int fd, off_t offset);
|
||||
int target_munmap(abi_ulong start, abi_ulong len);
|
||||
abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
|
||||
abi_ulong new_size, unsigned long flags,
|
||||
abi_ulong new_addr);
|
||||
int target_msync(abi_ulong start, abi_ulong len, int flags);
|
||||
extern abi_ulong mmap_next_start;
|
||||
abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size);
|
||||
void mmap_reserve(abi_ulong start, abi_ulong size);
|
||||
void TSA_NO_TSA mmap_fork_start(void);
|
||||
void TSA_NO_TSA mmap_fork_end(int child);
|
||||
|
||||
/* main.c */
|
||||
extern char qemu_proc_pathname[];
|
||||
|
@ -1034,7 +1034,7 @@ void process_pending_signals(CPUArchState *env)
|
||||
void cpu_loop_exit_sigsegv(CPUState *cpu, target_ulong addr,
|
||||
MMUAccessType access_type, bool maperr, uintptr_t ra)
|
||||
{
|
||||
const TCGCPUOps *tcg_ops = CPU_GET_CLASS(cpu)->tcg_ops;
|
||||
const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops;
|
||||
|
||||
if (tcg_ops->record_sigsegv) {
|
||||
tcg_ops->record_sigsegv(cpu, addr, access_type, maperr, ra);
|
||||
@ -1050,7 +1050,7 @@ void cpu_loop_exit_sigsegv(CPUState *cpu, target_ulong addr,
|
||||
void cpu_loop_exit_sigbus(CPUState *cpu, target_ulong addr,
|
||||
MMUAccessType access_type, uintptr_t ra)
|
||||
{
|
||||
const TCGCPUOps *tcg_ops = CPU_GET_CLASS(cpu)->tcg_ops;
|
||||
const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops;
|
||||
|
||||
if (tcg_ops->record_sigbus) {
|
||||
tcg_ops->record_sigbus(cpu, addr, access_type, ra);
|
||||
|
10
cpu-common.c
10
cpu-common.c
@ -388,11 +388,10 @@ void process_queued_cpu_work(CPUState *cpu)
|
||||
int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
|
||||
CPUBreakpoint **breakpoint)
|
||||
{
|
||||
CPUClass *cc = CPU_GET_CLASS(cpu);
|
||||
CPUBreakpoint *bp;
|
||||
|
||||
if (cc->gdb_adjust_breakpoint) {
|
||||
pc = cc->gdb_adjust_breakpoint(cpu, pc);
|
||||
if (cpu->cc->gdb_adjust_breakpoint) {
|
||||
pc = cpu->cc->gdb_adjust_breakpoint(cpu, pc);
|
||||
}
|
||||
|
||||
bp = g_malloc(sizeof(*bp));
|
||||
@ -418,11 +417,10 @@ int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
|
||||
/* Remove a specific breakpoint. */
|
||||
int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
|
||||
{
|
||||
CPUClass *cc = CPU_GET_CLASS(cpu);
|
||||
CPUBreakpoint *bp;
|
||||
|
||||
if (cc->gdb_adjust_breakpoint) {
|
||||
pc = cc->gdb_adjust_breakpoint(cpu, pc);
|
||||
if (cpu->cc->gdb_adjust_breakpoint) {
|
||||
pc = cpu->cc->gdb_adjust_breakpoint(cpu, pc);
|
||||
}
|
||||
|
||||
QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
|
||||
|
146
cpu-target.c
146
cpu-target.c
@ -21,159 +21,13 @@
|
||||
#include "qapi/error.h"
|
||||
#include "qemu/error-report.h"
|
||||
#include "qemu/qemu-print.h"
|
||||
#include "migration/vmstate.h"
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
#include "hw/core/sysemu-cpu-ops.h"
|
||||
#endif
|
||||
#include "system/accel-ops.h"
|
||||
#include "system/cpus.h"
|
||||
#include "system/tcg.h"
|
||||
#include "exec/tswap.h"
|
||||
#include "exec/replay-core.h"
|
||||
#include "exec/cpu-common.h"
|
||||
#include "exec/cputlb.h"
|
||||
#include "exec/exec-all.h"
|
||||
#include "exec/tb-flush.h"
|
||||
#include "exec/log.h"
|
||||
#include "accel/accel-cpu-target.h"
|
||||
#include "trace/trace-root.h"
|
||||
#include "qemu/accel.h"
|
||||
#include "hw/core/cpu.h"
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
static int cpu_common_post_load(void *opaque, int version_id)
|
||||
{
|
||||
if (tcg_enabled()) {
|
||||
CPUState *cpu = opaque;
|
||||
|
||||
/*
|
||||
* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
|
||||
* version_id is increased.
|
||||
*/
|
||||
cpu->interrupt_request &= ~0x01;
|
||||
|
||||
tlb_flush(cpu);
|
||||
|
||||
/*
|
||||
* loadvm has just updated the content of RAM, bypassing the
|
||||
* usual mechanisms that ensure we flush TBs for writes to
|
||||
* memory we've translated code from. So we must flush all TBs,
|
||||
* which will now be stale.
|
||||
*/
|
||||
tb_flush(cpu);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cpu_common_pre_load(void *opaque)
|
||||
{
|
||||
CPUState *cpu = opaque;
|
||||
|
||||
cpu->exception_index = -1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool cpu_common_exception_index_needed(void *opaque)
|
||||
{
|
||||
CPUState *cpu = opaque;
|
||||
|
||||
return tcg_enabled() && cpu->exception_index != -1;
|
||||
}
|
||||
|
||||
static const VMStateDescription vmstate_cpu_common_exception_index = {
|
||||
.name = "cpu_common/exception_index",
|
||||
.version_id = 1,
|
||||
.minimum_version_id = 1,
|
||||
.needed = cpu_common_exception_index_needed,
|
||||
.fields = (const VMStateField[]) {
|
||||
VMSTATE_INT32(exception_index, CPUState),
|
||||
VMSTATE_END_OF_LIST()
|
||||
}
|
||||
};
|
||||
|
||||
static bool cpu_common_crash_occurred_needed(void *opaque)
|
||||
{
|
||||
CPUState *cpu = opaque;
|
||||
|
||||
return cpu->crash_occurred;
|
||||
}
|
||||
|
||||
static const VMStateDescription vmstate_cpu_common_crash_occurred = {
|
||||
.name = "cpu_common/crash_occurred",
|
||||
.version_id = 1,
|
||||
.minimum_version_id = 1,
|
||||
.needed = cpu_common_crash_occurred_needed,
|
||||
.fields = (const VMStateField[]) {
|
||||
VMSTATE_BOOL(crash_occurred, CPUState),
|
||||
VMSTATE_END_OF_LIST()
|
||||
}
|
||||
};
|
||||
|
||||
const VMStateDescription vmstate_cpu_common = {
|
||||
.name = "cpu_common",
|
||||
.version_id = 1,
|
||||
.minimum_version_id = 1,
|
||||
.pre_load = cpu_common_pre_load,
|
||||
.post_load = cpu_common_post_load,
|
||||
.fields = (const VMStateField[]) {
|
||||
VMSTATE_UINT32(halted, CPUState),
|
||||
VMSTATE_UINT32(interrupt_request, CPUState),
|
||||
VMSTATE_END_OF_LIST()
|
||||
},
|
||||
.subsections = (const VMStateDescription * const []) {
|
||||
&vmstate_cpu_common_exception_index,
|
||||
&vmstate_cpu_common_crash_occurred,
|
||||
NULL
|
||||
}
|
||||
};
|
||||
#endif
|
||||
|
||||
bool cpu_exec_realizefn(CPUState *cpu, Error **errp)
|
||||
{
|
||||
if (!accel_cpu_common_realize(cpu, errp)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Wait until cpu initialization complete before exposing cpu. */
|
||||
cpu_list_add(cpu);
|
||||
|
||||
#ifdef CONFIG_USER_ONLY
|
||||
assert(qdev_get_vmsd(DEVICE(cpu)) == NULL ||
|
||||
qdev_get_vmsd(DEVICE(cpu))->unmigratable);
|
||||
#else
|
||||
if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
|
||||
vmstate_register(NULL, cpu->cpu_index, &vmstate_cpu_common, cpu);
|
||||
}
|
||||
if (cpu->cc->sysemu_ops->legacy_vmsd != NULL) {
|
||||
vmstate_register(NULL, cpu->cpu_index, cpu->cc->sysemu_ops->legacy_vmsd, cpu);
|
||||
}
|
||||
#endif /* CONFIG_USER_ONLY */
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void cpu_exec_unrealizefn(CPUState *cpu)
|
||||
{
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
CPUClass *cc = CPU_GET_CLASS(cpu);
|
||||
|
||||
if (cc->sysemu_ops->legacy_vmsd != NULL) {
|
||||
vmstate_unregister(NULL, cc->sysemu_ops->legacy_vmsd, cpu);
|
||||
}
|
||||
if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
|
||||
vmstate_unregister(NULL, &vmstate_cpu_common, cpu);
|
||||
}
|
||||
#endif
|
||||
|
||||
cpu_list_remove(cpu);
|
||||
/*
|
||||
* Now that the vCPU has been removed from the RCU list, we can call
|
||||
* accel_cpu_common_unrealize, which may free fields using call_rcu.
|
||||
*/
|
||||
accel_cpu_common_unrealize(cpu);
|
||||
}
|
||||
|
||||
char *cpu_model_from_type(const char *typename)
|
||||
{
|
||||
|
@ -62,9 +62,8 @@ void disas_initialize_debug_target(CPUDebug *s, CPUState *cpu)
|
||||
s->info.print_address_func = print_address;
|
||||
s->info.endian = BFD_ENDIAN_UNKNOWN;
|
||||
|
||||
CPUClass *cc = CPU_GET_CLASS(cpu);
|
||||
if (cc->disas_set_info) {
|
||||
cc->disas_set_info(cpu, &s->info);
|
||||
if (cpu->cc->disas_set_info) {
|
||||
cpu->cc->disas_set_info(cpu, &s->info);
|
||||
g_assert(s->info.endian != BFD_ENDIAN_UNKNOWN);
|
||||
}
|
||||
}
|
||||
|
@ -354,7 +354,6 @@ static const char *get_feature_xml(const char *p, const char **newp,
|
||||
GDBProcess *process)
|
||||
{
|
||||
CPUState *cpu = gdb_get_first_cpu_in_process(process);
|
||||
CPUClass *cc = CPU_GET_CLASS(cpu);
|
||||
GDBRegisterState *r;
|
||||
size_t len;
|
||||
|
||||
@ -377,11 +376,11 @@ static const char *get_feature_xml(const char *p, const char **newp,
|
||||
"<!DOCTYPE target SYSTEM \"gdb-target.dtd\">"
|
||||
"<target>"));
|
||||
|
||||
if (cc->gdb_arch_name) {
|
||||
if (cpu->cc->gdb_arch_name) {
|
||||
g_ptr_array_add(
|
||||
xml,
|
||||
g_markup_printf_escaped("<architecture>%s</architecture>",
|
||||
cc->gdb_arch_name(cpu)));
|
||||
cpu->cc->gdb_arch_name(cpu)));
|
||||
}
|
||||
for (guint i = 0; i < cpu->gdb_regs->len; i++) {
|
||||
r = &g_array_index(cpu->gdb_regs, GDBRegisterState, i);
|
||||
@ -520,11 +519,10 @@ GArray *gdb_get_register_list(CPUState *cpu)
|
||||
|
||||
int gdb_read_register(CPUState *cpu, GByteArray *buf, int reg)
|
||||
{
|
||||
CPUClass *cc = CPU_GET_CLASS(cpu);
|
||||
GDBRegisterState *r;
|
||||
|
||||
if (reg < cc->gdb_num_core_regs) {
|
||||
return cc->gdb_read_register(cpu, buf, reg);
|
||||
if (reg < cpu->cc->gdb_num_core_regs) {
|
||||
return cpu->cc->gdb_read_register(cpu, buf, reg);
|
||||
}
|
||||
|
||||
for (guint i = 0; i < cpu->gdb_regs->len; i++) {
|
||||
@ -538,11 +536,10 @@ int gdb_read_register(CPUState *cpu, GByteArray *buf, int reg)
|
||||
|
||||
static int gdb_write_register(CPUState *cpu, uint8_t *mem_buf, int reg)
|
||||
{
|
||||
CPUClass *cc = CPU_GET_CLASS(cpu);
|
||||
GDBRegisterState *r;
|
||||
|
||||
if (reg < cc->gdb_num_core_regs) {
|
||||
return cc->gdb_write_register(cpu, mem_buf, reg);
|
||||
if (reg < cpu->cc->gdb_num_core_regs) {
|
||||
return cpu->cc->gdb_write_register(cpu, mem_buf, reg);
|
||||
}
|
||||
|
||||
for (guint i = 0; i < cpu->gdb_regs->len; i++) {
|
||||
@ -570,7 +567,7 @@ static void gdb_register_feature(CPUState *cpu, int base_reg,
|
||||
|
||||
void gdb_init_cpu(CPUState *cpu)
|
||||
{
|
||||
CPUClass *cc = CPU_GET_CLASS(cpu);
|
||||
CPUClass *cc = cpu->cc;
|
||||
const GDBFeature *feature;
|
||||
|
||||
cpu->gdb_regs = g_array_new(false, false, sizeof(GDBRegisterState));
|
||||
@ -1646,11 +1643,8 @@ void gdb_extend_qsupported_features(char *qflags)
|
||||
|
||||
static void handle_query_supported(GArray *params, void *user_ctx)
|
||||
{
|
||||
CPUClass *cc;
|
||||
|
||||
g_string_printf(gdbserver_state.str_buf, "PacketSize=%x", MAX_PACKET_LENGTH);
|
||||
cc = CPU_GET_CLASS(first_cpu);
|
||||
if (cc->gdb_core_xml_file) {
|
||||
if (first_cpu->cc->gdb_core_xml_file) {
|
||||
g_string_append(gdbserver_state.str_buf, ";qXfer:features:read+");
|
||||
}
|
||||
|
||||
@ -1697,7 +1691,6 @@ static void handle_query_supported(GArray *params, void *user_ctx)
|
||||
static void handle_query_xfer_features(GArray *params, void *user_ctx)
|
||||
{
|
||||
GDBProcess *process;
|
||||
CPUClass *cc;
|
||||
unsigned long len, total_len, addr;
|
||||
const char *xml;
|
||||
const char *p;
|
||||
@ -1708,8 +1701,7 @@ static void handle_query_xfer_features(GArray *params, void *user_ctx)
|
||||
}
|
||||
|
||||
process = gdb_get_cpu_process(gdbserver_state.g_cpu);
|
||||
cc = CPU_GET_CLASS(gdbserver_state.g_cpu);
|
||||
if (!cc->gdb_core_xml_file) {
|
||||
if (!gdbserver_state.g_cpu->cc->gdb_core_xml_file) {
|
||||
gdb_put_packet("");
|
||||
return;
|
||||
}
|
||||
|
@ -456,8 +456,6 @@ static int phy_memory_mode;
|
||||
int gdb_target_memory_rw_debug(CPUState *cpu, hwaddr addr,
|
||||
uint8_t *buf, int len, bool is_write)
|
||||
{
|
||||
CPUClass *cc;
|
||||
|
||||
if (phy_memory_mode) {
|
||||
if (is_write) {
|
||||
cpu_physical_memory_write(addr, buf, len);
|
||||
@ -467,9 +465,8 @@ int gdb_target_memory_rw_debug(CPUState *cpu, hwaddr addr,
|
||||
return 0;
|
||||
}
|
||||
|
||||
cc = CPU_GET_CLASS(cpu);
|
||||
if (cc->memory_rw_debug) {
|
||||
return cc->memory_rw_debug(cpu, addr, buf, len, is_write);
|
||||
if (cpu->cc->memory_rw_debug) {
|
||||
return cpu->cc->memory_rw_debug(cpu, addr, buf, len, is_write);
|
||||
}
|
||||
|
||||
return cpu_memory_rw_debug(cpu, addr, buf, len, is_write);
|
||||
|
@ -233,10 +233,8 @@ void gdb_handle_query_offsets(GArray *params, void *user_ctx)
|
||||
static inline int target_memory_rw_debug(CPUState *cpu, target_ulong addr,
|
||||
uint8_t *buf, int len, bool is_write)
|
||||
{
|
||||
CPUClass *cc;
|
||||
cc = CPU_GET_CLASS(cpu);
|
||||
if (cc->memory_rw_debug) {
|
||||
return cc->memory_rw_debug(cpu, addr, buf, len, is_write);
|
||||
if (cpu->cc->memory_rw_debug) {
|
||||
return cpu->cc->memory_rw_debug(cpu, addr, buf, len, is_write);
|
||||
}
|
||||
return cpu_memory_rw_debug(cpu, addr, buf, len, is_write);
|
||||
}
|
||||
|
@ -743,11 +743,8 @@ int gdb_continue_partial(char *newstates)
|
||||
int gdb_target_memory_rw_debug(CPUState *cpu, hwaddr addr,
|
||||
uint8_t *buf, int len, bool is_write)
|
||||
{
|
||||
CPUClass *cc;
|
||||
|
||||
cc = CPU_GET_CLASS(cpu);
|
||||
if (cc->memory_rw_debug) {
|
||||
return cc->memory_rw_debug(cpu, addr, buf, len, is_write);
|
||||
if (cpu->cc->memory_rw_debug) {
|
||||
return cpu->cc->memory_rw_debug(cpu, addr, buf, len, is_write);
|
||||
}
|
||||
return cpu_memory_rw_debug(cpu, addr, buf, len, is_write);
|
||||
}
|
||||
|
@ -235,8 +235,8 @@ void cpu_hotplug_hw_init(MemoryRegion *as, Object *owner,
|
||||
|
||||
static AcpiCpuStatus *get_cpu_status(CPUHotplugState *cpu_st, DeviceState *dev)
|
||||
{
|
||||
CPUClass *k = CPU_GET_CLASS(dev);
|
||||
uint64_t cpu_arch_id = k->get_arch_id(CPU(dev));
|
||||
CPUState *cpu = CPU(dev);
|
||||
uint64_t cpu_arch_id = cpu->cc->get_arch_id(cpu);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < cpu_st->dev_count; i++) {
|
||||
|
@ -62,10 +62,9 @@ static const MemoryRegionOps AcpiCpuHotplug_ops = {
|
||||
static void acpi_set_cpu_present_bit(AcpiCpuHotplug *g, CPUState *cpu,
|
||||
bool *swtchd_to_modern)
|
||||
{
|
||||
CPUClass *k = CPU_GET_CLASS(cpu);
|
||||
int64_t cpu_id;
|
||||
|
||||
cpu_id = k->get_arch_id(cpu);
|
||||
cpu_id = cpu->cc->get_arch_id(cpu);
|
||||
if ((cpu_id / 8) >= ACPI_GPE_PROC_LEN) {
|
||||
object_property_set_bool(g->device, "cpu-hotplug-legacy", false,
|
||||
&error_abort);
|
||||
|
@ -40,9 +40,7 @@ CPUState *cpu_by_arch_id(int64_t id)
|
||||
CPUState *cpu;
|
||||
|
||||
CPU_FOREACH(cpu) {
|
||||
CPUClass *cc = CPU_GET_CLASS(cpu);
|
||||
|
||||
if (cc->get_arch_id(cpu) == id) {
|
||||
if (cpu->cc->get_arch_id(cpu) == id) {
|
||||
return cpu;
|
||||
}
|
||||
}
|
||||
@ -101,11 +99,9 @@ static int cpu_common_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg)
|
||||
|
||||
void cpu_dump_state(CPUState *cpu, FILE *f, int flags)
|
||||
{
|
||||
CPUClass *cc = CPU_GET_CLASS(cpu);
|
||||
|
||||
if (cc->dump_state) {
|
||||
if (cpu->cc->dump_state) {
|
||||
cpu_synchronize_state(cpu);
|
||||
cc->dump_state(cpu, f, flags);
|
||||
cpu->cc->dump_state(cpu, f, flags);
|
||||
}
|
||||
}
|
||||
|
||||
@ -119,11 +115,10 @@ void cpu_reset(CPUState *cpu)
|
||||
static void cpu_common_reset_hold(Object *obj, ResetType type)
|
||||
{
|
||||
CPUState *cpu = CPU(obj);
|
||||
CPUClass *cc = CPU_GET_CLASS(cpu);
|
||||
|
||||
if (qemu_loglevel_mask(CPU_LOG_RESET)) {
|
||||
qemu_log("CPU Reset (CPU %d)\n", cpu->cpu_index);
|
||||
log_cpu_state(cpu, cc->reset_dump_flags);
|
||||
log_cpu_state(cpu, cpu->cc->reset_dump_flags);
|
||||
}
|
||||
|
||||
cpu->interrupt_request = 0;
|
||||
@ -139,11 +134,6 @@ static void cpu_common_reset_hold(Object *obj, ResetType type)
|
||||
cpu_exec_reset_hold(cpu);
|
||||
}
|
||||
|
||||
static bool cpu_common_has_work(CPUState *cs)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
ObjectClass *cpu_class_by_name(const char *typename, const char *cpu_model)
|
||||
{
|
||||
ObjectClass *oc;
|
||||
@ -193,6 +183,20 @@ static void cpu_common_parse_features(const char *typename, char *features,
|
||||
}
|
||||
}
|
||||
|
||||
bool cpu_exec_realizefn(CPUState *cpu, Error **errp)
|
||||
{
|
||||
if (!accel_cpu_common_realize(cpu, errp)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Wait until cpu initialization complete before exposing cpu. */
|
||||
cpu_list_add(cpu);
|
||||
|
||||
cpu_vmstate_register(cpu);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void cpu_common_realizefn(DeviceState *dev, Error **errp)
|
||||
{
|
||||
CPUState *cpu = CPU(dev);
|
||||
@ -234,10 +238,24 @@ static void cpu_common_unrealizefn(DeviceState *dev)
|
||||
cpu_exec_unrealizefn(cpu);
|
||||
}
|
||||
|
||||
void cpu_exec_unrealizefn(CPUState *cpu)
|
||||
{
|
||||
cpu_vmstate_unregister(cpu);
|
||||
|
||||
cpu_list_remove(cpu);
|
||||
/*
|
||||
* Now that the vCPU has been removed from the RCU list, we can call
|
||||
* accel_cpu_common_unrealize, which may free fields using call_rcu.
|
||||
*/
|
||||
accel_cpu_common_unrealize(cpu);
|
||||
}
|
||||
|
||||
static void cpu_common_initfn(Object *obj)
|
||||
{
|
||||
CPUState *cpu = CPU(obj);
|
||||
|
||||
cpu_exec_class_post_init(CPU_GET_CLASS(obj));
|
||||
|
||||
/* cache the cpu class for the hotpath */
|
||||
cpu->cc = CPU_GET_CLASS(cpu);
|
||||
|
||||
@ -310,7 +328,6 @@ static void cpu_common_class_init(ObjectClass *klass, void *data)
|
||||
|
||||
k->parse_features = cpu_common_parse_features;
|
||||
k->get_arch_id = cpu_common_get_arch_id;
|
||||
k->has_work = cpu_common_has_work;
|
||||
k->gdb_read_register = cpu_common_gdb_read_register;
|
||||
k->gdb_write_register = cpu_common_gdb_write_register;
|
||||
set_bit(DEVICE_CATEGORY_CPU, dc->categories);
|
||||
|
@ -21,18 +21,25 @@
|
||||
#include "qemu/osdep.h"
|
||||
#include "qapi/error.h"
|
||||
#include "exec/address-spaces.h"
|
||||
#include "exec/cputlb.h"
|
||||
#include "exec/memory.h"
|
||||
#include "exec/tb-flush.h"
|
||||
#include "exec/tswap.h"
|
||||
#include "hw/qdev-core.h"
|
||||
#include "hw/qdev-properties.h"
|
||||
#include "hw/core/sysemu-cpu-ops.h"
|
||||
#include "migration/vmstate.h"
|
||||
#include "system/tcg.h"
|
||||
|
||||
bool cpu_has_work(CPUState *cpu)
|
||||
{
|
||||
return cpu->cc->sysemu_ops->has_work(cpu);
|
||||
}
|
||||
|
||||
bool cpu_paging_enabled(const CPUState *cpu)
|
||||
{
|
||||
CPUClass *cc = CPU_GET_CLASS(cpu);
|
||||
|
||||
if (cc->sysemu_ops->get_paging_enabled) {
|
||||
return cc->sysemu_ops->get_paging_enabled(cpu);
|
||||
if (cpu->cc->sysemu_ops->get_paging_enabled) {
|
||||
return cpu->cc->sysemu_ops->get_paging_enabled(cpu);
|
||||
}
|
||||
|
||||
return false;
|
||||
@ -41,10 +48,8 @@ bool cpu_paging_enabled(const CPUState *cpu)
|
||||
bool cpu_get_memory_mapping(CPUState *cpu, MemoryMappingList *list,
|
||||
Error **errp)
|
||||
{
|
||||
CPUClass *cc = CPU_GET_CLASS(cpu);
|
||||
|
||||
if (cc->sysemu_ops->get_memory_mapping) {
|
||||
return cc->sysemu_ops->get_memory_mapping(cpu, list, errp);
|
||||
if (cpu->cc->sysemu_ops->get_memory_mapping) {
|
||||
return cpu->cc->sysemu_ops->get_memory_mapping(cpu, list, errp);
|
||||
}
|
||||
|
||||
error_setg(errp, "Obtaining memory mappings is unsupported on this CPU.");
|
||||
@ -54,15 +59,15 @@ bool cpu_get_memory_mapping(CPUState *cpu, MemoryMappingList *list,
|
||||
hwaddr cpu_get_phys_page_attrs_debug(CPUState *cpu, vaddr addr,
|
||||
MemTxAttrs *attrs)
|
||||
{
|
||||
CPUClass *cc = CPU_GET_CLASS(cpu);
|
||||
hwaddr paddr;
|
||||
|
||||
if (cc->sysemu_ops->get_phys_page_attrs_debug) {
|
||||
paddr = cc->sysemu_ops->get_phys_page_attrs_debug(cpu, addr, attrs);
|
||||
if (cpu->cc->sysemu_ops->get_phys_page_attrs_debug) {
|
||||
paddr = cpu->cc->sysemu_ops->get_phys_page_attrs_debug(cpu, addr,
|
||||
attrs);
|
||||
} else {
|
||||
/* Fallback for CPUs which don't implement the _attrs_ hook */
|
||||
*attrs = MEMTXATTRS_UNSPECIFIED;
|
||||
paddr = cc->sysemu_ops->get_phys_page_debug(cpu, addr);
|
||||
paddr = cpu->cc->sysemu_ops->get_phys_page_debug(cpu, addr);
|
||||
}
|
||||
/* Indicate that this is a debug access. */
|
||||
attrs->debug = 1;
|
||||
@ -90,64 +95,53 @@ int cpu_asidx_from_attrs(CPUState *cpu, MemTxAttrs attrs)
|
||||
int cpu_write_elf32_qemunote(WriteCoreDumpFunction f, CPUState *cpu,
|
||||
void *opaque)
|
||||
{
|
||||
CPUClass *cc = CPU_GET_CLASS(cpu);
|
||||
|
||||
if (!cc->sysemu_ops->write_elf32_qemunote) {
|
||||
if (!cpu->cc->sysemu_ops->write_elf32_qemunote) {
|
||||
return 0;
|
||||
}
|
||||
return (*cc->sysemu_ops->write_elf32_qemunote)(f, cpu, opaque);
|
||||
return (*cpu->cc->sysemu_ops->write_elf32_qemunote)(f, cpu, opaque);
|
||||
}
|
||||
|
||||
int cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cpu,
|
||||
int cpuid, void *opaque)
|
||||
{
|
||||
CPUClass *cc = CPU_GET_CLASS(cpu);
|
||||
|
||||
if (!cc->sysemu_ops->write_elf32_note) {
|
||||
if (!cpu->cc->sysemu_ops->write_elf32_note) {
|
||||
return -1;
|
||||
}
|
||||
return (*cc->sysemu_ops->write_elf32_note)(f, cpu, cpuid, opaque);
|
||||
return (*cpu->cc->sysemu_ops->write_elf32_note)(f, cpu, cpuid, opaque);
|
||||
}
|
||||
|
||||
int cpu_write_elf64_qemunote(WriteCoreDumpFunction f, CPUState *cpu,
|
||||
void *opaque)
|
||||
{
|
||||
CPUClass *cc = CPU_GET_CLASS(cpu);
|
||||
|
||||
if (!cc->sysemu_ops->write_elf64_qemunote) {
|
||||
if (!cpu->cc->sysemu_ops->write_elf64_qemunote) {
|
||||
return 0;
|
||||
}
|
||||
return (*cc->sysemu_ops->write_elf64_qemunote)(f, cpu, opaque);
|
||||
return (*cpu->cc->sysemu_ops->write_elf64_qemunote)(f, cpu, opaque);
|
||||
}
|
||||
|
||||
int cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cpu,
|
||||
int cpuid, void *opaque)
|
||||
{
|
||||
CPUClass *cc = CPU_GET_CLASS(cpu);
|
||||
|
||||
if (!cc->sysemu_ops->write_elf64_note) {
|
||||
if (!cpu->cc->sysemu_ops->write_elf64_note) {
|
||||
return -1;
|
||||
}
|
||||
return (*cc->sysemu_ops->write_elf64_note)(f, cpu, cpuid, opaque);
|
||||
return (*cpu->cc->sysemu_ops->write_elf64_note)(f, cpu, cpuid, opaque);
|
||||
}
|
||||
|
||||
bool cpu_virtio_is_big_endian(CPUState *cpu)
|
||||
{
|
||||
CPUClass *cc = CPU_GET_CLASS(cpu);
|
||||
|
||||
if (cc->sysemu_ops->virtio_is_big_endian) {
|
||||
return cc->sysemu_ops->virtio_is_big_endian(cpu);
|
||||
if (cpu->cc->sysemu_ops->virtio_is_big_endian) {
|
||||
return cpu->cc->sysemu_ops->virtio_is_big_endian(cpu);
|
||||
}
|
||||
return target_words_bigendian();
|
||||
}
|
||||
|
||||
GuestPanicInformation *cpu_get_crash_info(CPUState *cpu)
|
||||
{
|
||||
CPUClass *cc = CPU_GET_CLASS(cpu);
|
||||
GuestPanicInformation *res = NULL;
|
||||
|
||||
if (cc->sysemu_ops->get_crash_info) {
|
||||
res = cc->sysemu_ops->get_crash_info(cpu);
|
||||
if (cpu->cc->sysemu_ops->get_crash_info) {
|
||||
res = cpu->cc->sysemu_ops->get_crash_info(cpu);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
@ -189,8 +183,123 @@ void cpu_class_init_props(DeviceClass *dc)
|
||||
device_class_set_props(dc, cpu_system_props);
|
||||
}
|
||||
|
||||
void cpu_exec_class_post_init(CPUClass *cc)
|
||||
{
|
||||
/* Check mandatory SysemuCPUOps handlers */
|
||||
g_assert(cc->sysemu_ops->has_work);
|
||||
}
|
||||
|
||||
void cpu_exec_initfn(CPUState *cpu)
|
||||
{
|
||||
cpu->memory = get_system_memory();
|
||||
object_ref(OBJECT(cpu->memory));
|
||||
}
|
||||
|
||||
static int cpu_common_post_load(void *opaque, int version_id)
|
||||
{
|
||||
if (tcg_enabled()) {
|
||||
CPUState *cpu = opaque;
|
||||
|
||||
/*
|
||||
* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
|
||||
* version_id is increased.
|
||||
*/
|
||||
cpu->interrupt_request &= ~0x01;
|
||||
|
||||
tlb_flush(cpu);
|
||||
|
||||
/*
|
||||
* loadvm has just updated the content of RAM, bypassing the
|
||||
* usual mechanisms that ensure we flush TBs for writes to
|
||||
* memory we've translated code from. So we must flush all TBs,
|
||||
* which will now be stale.
|
||||
*/
|
||||
tb_flush(cpu);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cpu_common_pre_load(void *opaque)
|
||||
{
|
||||
CPUState *cpu = opaque;
|
||||
|
||||
cpu->exception_index = -1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool cpu_common_exception_index_needed(void *opaque)
|
||||
{
|
||||
CPUState *cpu = opaque;
|
||||
|
||||
return tcg_enabled() && cpu->exception_index != -1;
|
||||
}
|
||||
|
||||
static const VMStateDescription vmstate_cpu_common_exception_index = {
|
||||
.name = "cpu_common/exception_index",
|
||||
.version_id = 1,
|
||||
.minimum_version_id = 1,
|
||||
.needed = cpu_common_exception_index_needed,
|
||||
.fields = (const VMStateField[]) {
|
||||
VMSTATE_INT32(exception_index, CPUState),
|
||||
VMSTATE_END_OF_LIST()
|
||||
}
|
||||
};
|
||||
|
||||
static bool cpu_common_crash_occurred_needed(void *opaque)
|
||||
{
|
||||
CPUState *cpu = opaque;
|
||||
|
||||
return cpu->crash_occurred;
|
||||
}
|
||||
|
||||
static const VMStateDescription vmstate_cpu_common_crash_occurred = {
|
||||
.name = "cpu_common/crash_occurred",
|
||||
.version_id = 1,
|
||||
.minimum_version_id = 1,
|
||||
.needed = cpu_common_crash_occurred_needed,
|
||||
.fields = (const VMStateField[]) {
|
||||
VMSTATE_BOOL(crash_occurred, CPUState),
|
||||
VMSTATE_END_OF_LIST()
|
||||
}
|
||||
};
|
||||
|
||||
const VMStateDescription vmstate_cpu_common = {
|
||||
.name = "cpu_common",
|
||||
.version_id = 1,
|
||||
.minimum_version_id = 1,
|
||||
.pre_load = cpu_common_pre_load,
|
||||
.post_load = cpu_common_post_load,
|
||||
.fields = (const VMStateField[]) {
|
||||
VMSTATE_UINT32(halted, CPUState),
|
||||
VMSTATE_UINT32(interrupt_request, CPUState),
|
||||
VMSTATE_END_OF_LIST()
|
||||
},
|
||||
.subsections = (const VMStateDescription * const []) {
|
||||
&vmstate_cpu_common_exception_index,
|
||||
&vmstate_cpu_common_crash_occurred,
|
||||
NULL
|
||||
}
|
||||
};
|
||||
|
||||
void cpu_vmstate_register(CPUState *cpu)
|
||||
{
|
||||
if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
|
||||
vmstate_register(NULL, cpu->cpu_index, &vmstate_cpu_common, cpu);
|
||||
}
|
||||
if (cpu->cc->sysemu_ops->legacy_vmsd != NULL) {
|
||||
vmstate_register(NULL, cpu->cpu_index,
|
||||
cpu->cc->sysemu_ops->legacy_vmsd, cpu);
|
||||
}
|
||||
}
|
||||
|
||||
void cpu_vmstate_unregister(CPUState *cpu)
|
||||
{
|
||||
if (cpu->cc->sysemu_ops->legacy_vmsd != NULL) {
|
||||
vmstate_unregister(NULL, cpu->cc->sysemu_ops->legacy_vmsd, cpu);
|
||||
}
|
||||
if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
|
||||
vmstate_unregister(NULL, &vmstate_cpu_common, cpu);
|
||||
}
|
||||
}
|
||||
|
@ -10,6 +10,7 @@
|
||||
#include "hw/qdev-core.h"
|
||||
#include "hw/qdev-properties.h"
|
||||
#include "hw/core/cpu.h"
|
||||
#include "migration/vmstate.h"
|
||||
|
||||
static const Property cpu_user_props[] = {
|
||||
/*
|
||||
@ -26,7 +27,23 @@ void cpu_class_init_props(DeviceClass *dc)
|
||||
device_class_set_props(dc, cpu_user_props);
|
||||
}
|
||||
|
||||
void cpu_exec_class_post_init(CPUClass *cc)
|
||||
{
|
||||
/* nothing to do */
|
||||
}
|
||||
|
||||
void cpu_exec_initfn(CPUState *cpu)
|
||||
{
|
||||
/* nothing to do */
|
||||
}
|
||||
|
||||
void cpu_vmstate_register(CPUState *cpu)
|
||||
{
|
||||
assert(qdev_get_vmsd(DEVICE(cpu)) == NULL ||
|
||||
qdev_get_vmsd(DEVICE(cpu))->unmigratable);
|
||||
}
|
||||
|
||||
void cpu_vmstate_unregister(CPUState *cpu)
|
||||
{
|
||||
/* nothing to do */
|
||||
}
|
||||
|
@ -141,7 +141,7 @@ struct TCGCPUOps {
|
||||
*
|
||||
* This method must be provided. If the target does not need to
|
||||
* do anything special for halt, the same function used for its
|
||||
* CPUClass::has_work method can be used here, as they have the
|
||||
* SysemuCPUOps::has_work method can be used here, as they have the
|
||||
* same function signature.
|
||||
*/
|
||||
bool (*cpu_exec_halt)(CPUState *cpu);
|
||||
|
@ -104,7 +104,6 @@ struct SysemuCPUOps;
|
||||
* instantiatable CPU type.
|
||||
* @parse_features: Callback to parse command line arguments.
|
||||
* @reset_dump_flags: #CPUDumpFlags to use for reset logging.
|
||||
* @has_work: Callback for checking if there is work to do.
|
||||
* @mmu_index: Callback for choosing softmmu mmu index;
|
||||
* may be used internally by memory_rw_debug without TCG.
|
||||
* @memory_rw_debug: Callback for GDB memory access.
|
||||
@ -153,7 +152,6 @@ struct CPUClass {
|
||||
ObjectClass *(*class_by_name)(const char *cpu_model);
|
||||
void (*parse_features)(const char *typename, char *str, Error **errp);
|
||||
|
||||
bool (*has_work)(CPUState *cpu);
|
||||
int (*mmu_index)(CPUState *cpu, bool ifetch);
|
||||
int (*memory_rw_debug)(CPUState *cpu, vaddr addr,
|
||||
uint8_t *buf, int len, bool is_write);
|
||||
@ -750,6 +748,16 @@ int cpu_asidx_from_attrs(CPUState *cpu, MemTxAttrs attrs);
|
||||
*/
|
||||
bool cpu_virtio_is_big_endian(CPUState *cpu);
|
||||
|
||||
/**
|
||||
* cpu_has_work:
|
||||
* @cpu: The vCPU to check.
|
||||
*
|
||||
* Checks whether the CPU has work to do.
|
||||
*
|
||||
* Returns: %true if the CPU has work, %false otherwise.
|
||||
*/
|
||||
bool cpu_has_work(CPUState *cpu);
|
||||
|
||||
#endif /* CONFIG_USER_ONLY */
|
||||
|
||||
/**
|
||||
@ -816,22 +824,6 @@ CPUState *cpu_create(const char *typename);
|
||||
*/
|
||||
const char *parse_cpu_option(const char *cpu_option);
|
||||
|
||||
/**
|
||||
* cpu_has_work:
|
||||
* @cpu: The vCPU to check.
|
||||
*
|
||||
* Checks whether the CPU has work to do.
|
||||
*
|
||||
* Returns: %true if the CPU has work, %false otherwise.
|
||||
*/
|
||||
static inline bool cpu_has_work(CPUState *cpu)
|
||||
{
|
||||
CPUClass *cc = CPU_GET_CLASS(cpu);
|
||||
|
||||
g_assert(cc->has_work);
|
||||
return cc->has_work(cpu);
|
||||
}
|
||||
|
||||
/**
|
||||
* qemu_cpu_is_self:
|
||||
* @cpu: The vCPU to check against.
|
||||
@ -968,9 +960,7 @@ void cpu_interrupt(CPUState *cpu, int mask);
|
||||
*/
|
||||
static inline void cpu_set_pc(CPUState *cpu, vaddr addr)
|
||||
{
|
||||
CPUClass *cc = CPU_GET_CLASS(cpu);
|
||||
|
||||
cc->set_pc(cpu, addr);
|
||||
cpu->cc->set_pc(cpu, addr);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1164,7 +1154,10 @@ G_NORETURN void cpu_abort(CPUState *cpu, const char *fmt, ...)
|
||||
|
||||
/* $(top_srcdir)/cpu.c */
|
||||
void cpu_class_init_props(DeviceClass *dc);
|
||||
void cpu_exec_class_post_init(CPUClass *cc);
|
||||
void cpu_exec_initfn(CPUState *cpu);
|
||||
void cpu_vmstate_register(CPUState *cpu);
|
||||
void cpu_vmstate_unregister(CPUState *cpu);
|
||||
bool cpu_exec_realizefn(CPUState *cpu, Error **errp);
|
||||
void cpu_exec_unrealizefn(CPUState *cpu);
|
||||
void cpu_exec_reset_hold(CPUState *cpu);
|
||||
|
@ -16,6 +16,10 @@
|
||||
* struct SysemuCPUOps: System operations specific to a CPU class
|
||||
*/
|
||||
typedef struct SysemuCPUOps {
|
||||
/**
|
||||
* @has_work: Callback for checking if there is work to do.
|
||||
*/
|
||||
bool (*has_work)(CPUState *cpu); /* MANDATORY NON-NULL */
|
||||
/**
|
||||
* @get_memory_mapping: Callback for obtaining the memory mappings.
|
||||
*/
|
||||
|
32
include/user/mmap.h
Normal file
32
include/user/mmap.h
Normal file
@ -0,0 +1,32 @@
|
||||
/*
|
||||
* MMAP declarations for QEMU user emulation
|
||||
*
|
||||
* SPDX-License-Identifier: GPL-2.0-or-later
|
||||
*/
|
||||
#ifndef USER_MMAP_H
|
||||
#define USER_MMAP_H
|
||||
|
||||
#include "user/abitypes.h"
|
||||
|
||||
/*
|
||||
* mmap_next_start: The base address for the next mmap without hint,
|
||||
* increased after each successful map, starting at task_unmapped_base.
|
||||
* This is an optimization within QEMU and not part of ADDR_COMPAT_LAYOUT.
|
||||
*/
|
||||
extern abi_ulong mmap_next_start;
|
||||
|
||||
int target_mprotect(abi_ulong start, abi_ulong len, int prot);
|
||||
|
||||
abi_long target_mmap(abi_ulong start, abi_ulong len, int prot,
|
||||
int flags, int fd, off_t offset);
|
||||
int target_munmap(abi_ulong start, abi_ulong len);
|
||||
abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
|
||||
abi_ulong new_size, unsigned long flags,
|
||||
abi_ulong new_addr);
|
||||
|
||||
abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size, abi_ulong alignment);
|
||||
|
||||
void TSA_NO_TSA mmap_fork_start(void);
|
||||
void TSA_NO_TSA mmap_fork_end(int child);
|
||||
|
||||
#endif
|
@ -15,7 +15,7 @@ static int open_cpuinfo(CPUArchState *cpu_env, int fd)
|
||||
const char *p, *q;
|
||||
int t;
|
||||
|
||||
p = object_class_get_name(OBJECT_CLASS(CPU_GET_CLASS(env_cpu(cpu_env))));
|
||||
p = object_class_get_name(OBJECT_CLASS(env_cpu(cpu_env)->cc));
|
||||
q = strchr(p, '-');
|
||||
t = q - p;
|
||||
assert(t < sizeof(model));
|
||||
|
@ -21,6 +21,7 @@
|
||||
#include "trace.h"
|
||||
#include "exec/log.h"
|
||||
#include "exec/page-protection.h"
|
||||
#include "exec/tb-flush.h"
|
||||
#include "exec/translation-block.h"
|
||||
#include "qemu.h"
|
||||
#include "user/page-protection.h"
|
||||
|
@ -753,7 +753,7 @@ void force_sigsegv(int oldsig)
|
||||
void cpu_loop_exit_sigsegv(CPUState *cpu, target_ulong addr,
|
||||
MMUAccessType access_type, bool maperr, uintptr_t ra)
|
||||
{
|
||||
const TCGCPUOps *tcg_ops = CPU_GET_CLASS(cpu)->tcg_ops;
|
||||
const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops;
|
||||
|
||||
if (tcg_ops->record_sigsegv) {
|
||||
tcg_ops->record_sigsegv(cpu, addr, access_type, maperr, ra);
|
||||
@ -769,7 +769,7 @@ void cpu_loop_exit_sigsegv(CPUState *cpu, target_ulong addr,
|
||||
void cpu_loop_exit_sigbus(CPUState *cpu, target_ulong addr,
|
||||
MMUAccessType access_type, uintptr_t ra)
|
||||
{
|
||||
const TCGCPUOps *tcg_ops = CPU_GET_CLASS(cpu)->tcg_ops;
|
||||
const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops;
|
||||
|
||||
if (tcg_ops->record_sigbus) {
|
||||
tcg_ops->record_sigbus(cpu, addr, access_type, ra);
|
||||
|
@ -26,6 +26,7 @@
|
||||
#include "tcg/startup.h"
|
||||
#include "target_mman.h"
|
||||
#include "exec/page-protection.h"
|
||||
#include "exec/tb-flush.h"
|
||||
#include "exec/translation-block.h"
|
||||
#include <elf.h>
|
||||
#include <endian.h>
|
||||
|
@ -20,7 +20,6 @@
|
||||
|
||||
#include "user/thunk.h"
|
||||
#include "exec/exec-all.h"
|
||||
#include "exec/tb-flush.h"
|
||||
#include "qemu/log.h"
|
||||
|
||||
extern char *exec_path;
|
||||
|
@ -18,6 +18,8 @@
|
||||
#ifndef LINUX_USER_USER_MMAP_H
|
||||
#define LINUX_USER_USER_MMAP_H
|
||||
|
||||
#include "user/mmap.h"
|
||||
|
||||
/*
|
||||
* Guest parameters for the ADDR_COMPAT_LAYOUT personality
|
||||
* (at present this is the only layout supported by QEMU).
|
||||
@ -39,24 +41,7 @@
|
||||
extern abi_ulong task_unmapped_base;
|
||||
extern abi_ulong elf_et_dyn_base;
|
||||
|
||||
/*
|
||||
* mmap_next_start: The base address for the next mmap without hint,
|
||||
* increased after each successful map, starting at task_unmapped_base.
|
||||
* This is an optimization within QEMU and not part of ADDR_COMPAT_LAYOUT.
|
||||
*/
|
||||
extern abi_ulong mmap_next_start;
|
||||
|
||||
int target_mprotect(abi_ulong start, abi_ulong len, int prot);
|
||||
abi_long target_mmap(abi_ulong start, abi_ulong len, int prot,
|
||||
int flags, int fd, off_t offset);
|
||||
int target_munmap(abi_ulong start, abi_ulong len);
|
||||
abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
|
||||
abi_ulong new_size, unsigned long flags,
|
||||
abi_ulong new_addr);
|
||||
abi_long target_madvise(abi_ulong start, abi_ulong len_in, int advice);
|
||||
abi_ulong mmap_find_vma(abi_ulong, abi_ulong, abi_ulong);
|
||||
void mmap_fork_start(void);
|
||||
void mmap_fork_end(int child);
|
||||
|
||||
abi_ulong target_shmat(CPUArchState *cpu_env, int shmid,
|
||||
abi_ulong shmaddr, int shmflg);
|
||||
|
@ -63,6 +63,7 @@ static void alpha_restore_state_to_opc(CPUState *cs,
|
||||
}
|
||||
}
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
static bool alpha_cpu_has_work(CPUState *cs)
|
||||
{
|
||||
/* Here we are checking to see if the CPU should wake up from HALT.
|
||||
@ -77,6 +78,7 @@ static bool alpha_cpu_has_work(CPUState *cs)
|
||||
| CPU_INTERRUPT_SMP
|
||||
| CPU_INTERRUPT_MCHK);
|
||||
}
|
||||
#endif /* !CONFIG_USER_ONLY */
|
||||
|
||||
static int alpha_cpu_mmu_index(CPUState *cs, bool ifetch)
|
||||
{
|
||||
@ -224,6 +226,7 @@ static void alpha_cpu_initfn(Object *obj)
|
||||
#include "hw/core/sysemu-cpu-ops.h"
|
||||
|
||||
static const struct SysemuCPUOps alpha_sysemu_ops = {
|
||||
.has_work = alpha_cpu_has_work,
|
||||
.get_phys_page_debug = alpha_cpu_get_phys_page_debug,
|
||||
};
|
||||
#endif
|
||||
@ -259,7 +262,6 @@ static void alpha_cpu_class_init(ObjectClass *oc, void *data)
|
||||
&acc->parent_realize);
|
||||
|
||||
cc->class_by_name = alpha_cpu_class_by_name;
|
||||
cc->has_work = alpha_cpu_has_work;
|
||||
cc->mmu_index = alpha_cpu_mmu_index;
|
||||
cc->dump_state = alpha_cpu_dump_state;
|
||||
cc->set_pc = alpha_cpu_set_pc;
|
||||
|
@ -123,6 +123,7 @@ void arm_restore_state_to_opc(CPUState *cs,
|
||||
}
|
||||
#endif /* CONFIG_TCG */
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
/*
|
||||
* With SCTLR_ELx.NMI == 0, IRQ with Superpriority is masked identically with
|
||||
* IRQ without Superpriority. Moreover, if the GIC is configured so that
|
||||
@ -141,6 +142,7 @@ static bool arm_cpu_has_work(CPUState *cs)
|
||||
| CPU_INTERRUPT_VFIQ | CPU_INTERRUPT_VIRQ | CPU_INTERRUPT_VSERR
|
||||
| CPU_INTERRUPT_EXITTB);
|
||||
}
|
||||
#endif /* !CONFIG_USER_ONLY */
|
||||
|
||||
static int arm_cpu_mmu_index(CPUState *cs, bool ifetch)
|
||||
{
|
||||
@ -830,7 +832,6 @@ static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,
|
||||
|
||||
static bool arm_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
|
||||
{
|
||||
CPUClass *cc = CPU_GET_CLASS(cs);
|
||||
CPUARMState *env = cpu_env(cs);
|
||||
uint32_t cur_el = arm_current_el(env);
|
||||
bool secure = arm_is_secure(env);
|
||||
@ -930,7 +931,7 @@ static bool arm_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
|
||||
found:
|
||||
cs->exception_index = excp_idx;
|
||||
env->exception.target_el = target_el;
|
||||
cc->tcg_ops->do_interrupt(cs);
|
||||
cs->cc->tcg_ops->do_interrupt(cs);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -2656,6 +2657,7 @@ static const gchar *arm_gdb_arch_name(CPUState *cs)
|
||||
#include "hw/core/sysemu-cpu-ops.h"
|
||||
|
||||
static const struct SysemuCPUOps arm_sysemu_ops = {
|
||||
.has_work = arm_cpu_has_work,
|
||||
.get_phys_page_attrs_debug = arm_cpu_get_phys_page_attrs_debug,
|
||||
.asidx_from_attrs = arm_asidx_from_attrs,
|
||||
.write_elf32_note = arm_cpu_write_elf32_note,
|
||||
@ -2706,7 +2708,6 @@ static void arm_cpu_class_init(ObjectClass *oc, void *data)
|
||||
&acc->parent_phases);
|
||||
|
||||
cc->class_by_name = arm_cpu_class_by_name;
|
||||
cc->has_work = arm_cpu_has_work;
|
||||
cc->mmu_index = arm_cpu_mmu_index;
|
||||
cc->dump_state = arm_cpu_dump_state;
|
||||
cc->set_pc = arm_cpu_set_pc;
|
||||
|
@ -19,7 +19,6 @@
|
||||
|
||||
static bool arm_v7m_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
|
||||
{
|
||||
CPUClass *cc = CPU_GET_CLASS(cs);
|
||||
ARMCPU *cpu = ARM_CPU(cs);
|
||||
CPUARMState *env = &cpu->env;
|
||||
bool ret = false;
|
||||
@ -35,7 +34,7 @@ static bool arm_v7m_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
|
||||
if (interrupt_request & CPU_INTERRUPT_HARD
|
||||
&& (armv7m_nvic_can_take_pending_exception(env->nvic))) {
|
||||
cs->exception_index = EXCP_IRQ;
|
||||
cc->tcg_ops->do_interrupt(cs);
|
||||
cs->cc->tcg_ops->do_interrupt(cs);
|
||||
ret = true;
|
||||
}
|
||||
return ret;
|
||||
|
@ -201,6 +201,7 @@ static void avr_cpu_dump_state(CPUState *cs, FILE *f, int flags)
|
||||
#include "hw/core/sysemu-cpu-ops.h"
|
||||
|
||||
static const struct SysemuCPUOps avr_sysemu_ops = {
|
||||
.has_work = avr_cpu_has_work,
|
||||
.get_phys_page_debug = avr_cpu_get_phys_page_debug,
|
||||
};
|
||||
|
||||
@ -233,7 +234,6 @@ static void avr_cpu_class_init(ObjectClass *oc, void *data)
|
||||
|
||||
cc->class_by_name = avr_cpu_class_by_name;
|
||||
|
||||
cc->has_work = avr_cpu_has_work;
|
||||
cc->mmu_index = avr_cpu_mmu_index;
|
||||
cc->dump_state = avr_cpu_dump_state;
|
||||
cc->set_pc = avr_cpu_set_pc;
|
||||
|
@ -262,11 +262,6 @@ static void hexagon_cpu_synchronize_from_tb(CPUState *cs,
|
||||
cpu_env(cs)->gpr[HEX_REG_PC] = tb->pc;
|
||||
}
|
||||
|
||||
static bool hexagon_cpu_has_work(CPUState *cs)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
static void hexagon_restore_state_to_opc(CPUState *cs,
|
||||
const TranslationBlock *tb,
|
||||
const uint64_t *data)
|
||||
@ -346,7 +341,6 @@ static void hexagon_cpu_class_init(ObjectClass *c, void *data)
|
||||
&mcc->parent_phases);
|
||||
|
||||
cc->class_by_name = hexagon_cpu_class_by_name;
|
||||
cc->has_work = hexagon_cpu_has_work;
|
||||
cc->dump_state = hexagon_dump_state;
|
||||
cc->set_pc = hexagon_cpu_set_pc;
|
||||
cc->get_pc = hexagon_cpu_get_pc;
|
||||
|
@ -131,10 +131,12 @@ static void hppa_restore_state_to_opc(CPUState *cs,
|
||||
env->psw_n = 0;
|
||||
}
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
static bool hppa_cpu_has_work(CPUState *cs)
|
||||
{
|
||||
return cs->interrupt_request & (CPU_INTERRUPT_HARD | CPU_INTERRUPT_NMI);
|
||||
}
|
||||
#endif /* !CONFIG_USER_ONLY */
|
||||
|
||||
static int hppa_cpu_mmu_index(CPUState *cs, bool ifetch)
|
||||
{
|
||||
@ -242,6 +244,7 @@ static ObjectClass *hppa_cpu_class_by_name(const char *cpu_model)
|
||||
#include "hw/core/sysemu-cpu-ops.h"
|
||||
|
||||
static const struct SysemuCPUOps hppa_sysemu_ops = {
|
||||
.has_work = hppa_cpu_has_work,
|
||||
.get_phys_page_debug = hppa_cpu_get_phys_page_debug,
|
||||
};
|
||||
#endif
|
||||
@ -278,7 +281,6 @@ static void hppa_cpu_class_init(ObjectClass *oc, void *data)
|
||||
&acc->parent_phases);
|
||||
|
||||
cc->class_by_name = hppa_cpu_class_by_name;
|
||||
cc->has_work = hppa_cpu_has_work;
|
||||
cc->mmu_index = hppa_cpu_mmu_index;
|
||||
cc->dump_state = hppa_cpu_dump_state;
|
||||
cc->set_pc = hppa_cpu_set_pc;
|
||||
|
@ -8604,16 +8604,15 @@ static vaddr x86_cpu_get_pc(CPUState *cs)
|
||||
return cpu->env.eip + cpu->env.segs[R_CS].base;
|
||||
}
|
||||
|
||||
#if !defined(CONFIG_USER_ONLY)
|
||||
int x86_cpu_pending_interrupt(CPUState *cs, int interrupt_request)
|
||||
{
|
||||
X86CPU *cpu = X86_CPU(cs);
|
||||
CPUX86State *env = &cpu->env;
|
||||
|
||||
#if !defined(CONFIG_USER_ONLY)
|
||||
if (interrupt_request & CPU_INTERRUPT_POLL) {
|
||||
return CPU_INTERRUPT_POLL;
|
||||
}
|
||||
#endif
|
||||
if (interrupt_request & CPU_INTERRUPT_SIPI) {
|
||||
return CPU_INTERRUPT_SIPI;
|
||||
}
|
||||
@ -8634,14 +8633,12 @@ int x86_cpu_pending_interrupt(CPUState *cs, int interrupt_request)
|
||||
(env->eflags & IF_MASK &&
|
||||
!(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
|
||||
return CPU_INTERRUPT_HARD;
|
||||
#if !defined(CONFIG_USER_ONLY)
|
||||
} else if (env->hflags2 & HF2_VGIF_MASK) {
|
||||
if((interrupt_request & CPU_INTERRUPT_VIRQ) &&
|
||||
(env->eflags & IF_MASK) &&
|
||||
!(env->hflags & HF_INHIBIT_IRQ_MASK)) {
|
||||
return CPU_INTERRUPT_VIRQ;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
@ -8652,6 +8649,7 @@ static bool x86_cpu_has_work(CPUState *cs)
|
||||
{
|
||||
return x86_cpu_pending_interrupt(cs, cs->interrupt_request) != 0;
|
||||
}
|
||||
#endif /* !CONFIG_USER_ONLY */
|
||||
|
||||
int x86_mmu_index_pl(CPUX86State *env, unsigned pl)
|
||||
{
|
||||
@ -8893,6 +8891,7 @@ static const Property x86_cpu_properties[] = {
|
||||
#include "hw/core/sysemu-cpu-ops.h"
|
||||
|
||||
static const struct SysemuCPUOps i386_sysemu_ops = {
|
||||
.has_work = x86_cpu_has_work,
|
||||
.get_memory_mapping = x86_cpu_get_memory_mapping,
|
||||
.get_paging_enabled = x86_cpu_get_paging_enabled,
|
||||
.get_phys_page_attrs_debug = x86_cpu_get_phys_page_attrs_debug,
|
||||
@ -8926,7 +8925,6 @@ static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
|
||||
|
||||
cc->class_by_name = x86_cpu_class_by_name;
|
||||
cc->parse_features = x86_cpu_parse_featurestr;
|
||||
cc->has_work = x86_cpu_has_work;
|
||||
cc->mmu_index = x86_cpu_mmu_index;
|
||||
cc->dump_state = x86_cpu_dump_state;
|
||||
cc->set_pc = x86_cpu_set_pc;
|
||||
|
@ -2349,8 +2349,6 @@ struct X86CPUClass {
|
||||
extern const VMStateDescription vmstate_x86_cpu;
|
||||
#endif
|
||||
|
||||
int x86_cpu_pending_interrupt(CPUState *cs, int interrupt_request);
|
||||
|
||||
int x86_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cpu,
|
||||
int cpuid, DumpState *s);
|
||||
int x86_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cpu,
|
||||
@ -2373,6 +2371,8 @@ void x86_cpu_list(void);
|
||||
int cpu_x86_support_mca_broadcast(CPUX86State *env);
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
int x86_cpu_pending_interrupt(CPUState *cs, int interrupt_request);
|
||||
|
||||
hwaddr x86_cpu_get_phys_page_attrs_debug(CPUState *cpu, vaddr addr,
|
||||
MemTxAttrs *attrs);
|
||||
int cpu_get_pic_interrupt(CPUX86State *s);
|
||||
|
@ -350,11 +350,9 @@ static void loongarch_restore_state_to_opc(CPUState *cs,
|
||||
}
|
||||
#endif /* CONFIG_TCG */
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
static bool loongarch_cpu_has_work(CPUState *cs)
|
||||
{
|
||||
#ifdef CONFIG_USER_ONLY
|
||||
return true;
|
||||
#else
|
||||
bool has_work = false;
|
||||
|
||||
if ((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
|
||||
@ -363,8 +361,8 @@ static bool loongarch_cpu_has_work(CPUState *cs)
|
||||
}
|
||||
|
||||
return has_work;
|
||||
#endif
|
||||
}
|
||||
#endif /* !CONFIG_USER_ONLY */
|
||||
|
||||
static int loongarch_cpu_mmu_index(CPUState *cs, bool ifetch)
|
||||
{
|
||||
@ -885,6 +883,7 @@ static const TCGCPUOps loongarch_tcg_ops = {
|
||||
#include "hw/core/sysemu-cpu-ops.h"
|
||||
|
||||
static const struct SysemuCPUOps loongarch_sysemu_ops = {
|
||||
.has_work = loongarch_cpu_has_work,
|
||||
.write_elf64_note = loongarch_cpu_write_elf64_note,
|
||||
.get_phys_page_debug = loongarch_cpu_get_phys_page_debug,
|
||||
};
|
||||
@ -920,7 +919,6 @@ static void loongarch_cpu_class_init(ObjectClass *c, void *data)
|
||||
&lacc->parent_phases);
|
||||
|
||||
cc->class_by_name = loongarch_cpu_class_by_name;
|
||||
cc->has_work = loongarch_cpu_has_work;
|
||||
cc->mmu_index = loongarch_cpu_mmu_index;
|
||||
cc->dump_state = loongarch_cpu_dump_state;
|
||||
cc->set_pc = loongarch_cpu_set_pc;
|
||||
|
@ -51,10 +51,12 @@ static void m68k_restore_state_to_opc(CPUState *cs,
|
||||
}
|
||||
}
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
static bool m68k_cpu_has_work(CPUState *cs)
|
||||
{
|
||||
return cs->interrupt_request & CPU_INTERRUPT_HARD;
|
||||
}
|
||||
#endif /* !CONFIG_USER_ONLY */
|
||||
|
||||
static int m68k_cpu_mmu_index(CPUState *cs, bool ifetch)
|
||||
{
|
||||
@ -579,6 +581,7 @@ static const VMStateDescription vmstate_m68k_cpu = {
|
||||
#include "hw/core/sysemu-cpu-ops.h"
|
||||
|
||||
static const struct SysemuCPUOps m68k_sysemu_ops = {
|
||||
.has_work = m68k_cpu_has_work,
|
||||
.get_phys_page_debug = m68k_cpu_get_phys_page_debug,
|
||||
};
|
||||
#endif /* !CONFIG_USER_ONLY */
|
||||
@ -612,7 +615,6 @@ static void m68k_cpu_class_init(ObjectClass *c, void *data)
|
||||
&mcc->parent_phases);
|
||||
|
||||
cc->class_by_name = m68k_cpu_class_by_name;
|
||||
cc->has_work = m68k_cpu_has_work;
|
||||
cc->mmu_index = m68k_cpu_mmu_index;
|
||||
cc->dump_state = m68k_cpu_dump_state;
|
||||
cc->set_pc = m68k_cpu_set_pc;
|
||||
|
@ -115,10 +115,12 @@ static void mb_restore_state_to_opc(CPUState *cs,
|
||||
cpu->env.iflags = data[1];
|
||||
}
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
static bool mb_cpu_has_work(CPUState *cs)
|
||||
{
|
||||
return cs->interrupt_request & (CPU_INTERRUPT_HARD | CPU_INTERRUPT_NMI);
|
||||
}
|
||||
#endif /* !CONFIG_USER_ONLY */
|
||||
|
||||
static int mb_cpu_mmu_index(CPUState *cs, bool ifetch)
|
||||
{
|
||||
@ -417,6 +419,7 @@ static ObjectClass *mb_cpu_class_by_name(const char *cpu_model)
|
||||
#include "hw/core/sysemu-cpu-ops.h"
|
||||
|
||||
static const struct SysemuCPUOps mb_sysemu_ops = {
|
||||
.has_work = mb_cpu_has_work,
|
||||
.get_phys_page_attrs_debug = mb_cpu_get_phys_page_attrs_debug,
|
||||
};
|
||||
#endif
|
||||
@ -452,7 +455,6 @@ static void mb_cpu_class_init(ObjectClass *oc, void *data)
|
||||
&mcc->parent_phases);
|
||||
|
||||
cc->class_by_name = mb_cpu_class_by_name;
|
||||
cc->has_work = mb_cpu_has_work;
|
||||
cc->mmu_index = mb_cpu_mmu_index;
|
||||
cc->dump_state = mb_cpu_dump_state;
|
||||
cc->set_pc = mb_cpu_set_pc;
|
||||
|
@ -132,6 +132,7 @@ static vaddr mips_cpu_get_pc(CPUState *cs)
|
||||
return cpu->env.active_tc.PC;
|
||||
}
|
||||
|
||||
#if !defined(CONFIG_USER_ONLY)
|
||||
static bool mips_cpu_has_work(CPUState *cs)
|
||||
{
|
||||
CPUMIPSState *env = cpu_env(cs);
|
||||
@ -177,6 +178,7 @@ static bool mips_cpu_has_work(CPUState *cs)
|
||||
}
|
||||
return has_work;
|
||||
}
|
||||
#endif /* !CONFIG_USER_ONLY */
|
||||
|
||||
static int mips_cpu_mmu_index(CPUState *cs, bool ifunc)
|
||||
{
|
||||
@ -534,6 +536,7 @@ static ObjectClass *mips_cpu_class_by_name(const char *cpu_model)
|
||||
#include "hw/core/sysemu-cpu-ops.h"
|
||||
|
||||
static const struct SysemuCPUOps mips_sysemu_ops = {
|
||||
.has_work = mips_cpu_has_work,
|
||||
.get_phys_page_debug = mips_cpu_get_phys_page_debug,
|
||||
.legacy_vmsd = &vmstate_mips_cpu,
|
||||
};
|
||||
@ -577,7 +580,6 @@ static void mips_cpu_class_init(ObjectClass *c, void *data)
|
||||
&mcc->parent_phases);
|
||||
|
||||
cc->class_by_name = mips_cpu_class_by_name;
|
||||
cc->has_work = mips_cpu_has_work;
|
||||
cc->mmu_index = mips_cpu_mmu_index;
|
||||
cc->dump_state = mips_cpu_dump_state;
|
||||
cc->set_pc = mips_cpu_set_pc;
|
||||
|
@ -162,8 +162,6 @@ void cpu_mips_store_cause(CPUMIPSState *env, target_ulong val);
|
||||
|
||||
extern const VMStateDescription vmstate_mips_cpu;
|
||||
|
||||
#endif /* !CONFIG_USER_ONLY */
|
||||
|
||||
static inline bool cpu_mips_hw_interrupts_enabled(CPUMIPSState *env)
|
||||
{
|
||||
return (env->CP0_Status & (1 << CP0St_IE)) &&
|
||||
@ -206,6 +204,8 @@ static inline bool cpu_mips_hw_interrupts_pending(CPUMIPSState *env)
|
||||
return r;
|
||||
}
|
||||
|
||||
#endif /* !CONFIG_USER_ONLY */
|
||||
|
||||
void msa_reset(CPUMIPSState *env);
|
||||
|
||||
/* cp0_timer.c */
|
||||
|
@ -63,11 +63,13 @@ static void openrisc_restore_state_to_opc(CPUState *cs,
|
||||
}
|
||||
}
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
static bool openrisc_cpu_has_work(CPUState *cs)
|
||||
{
|
||||
return cs->interrupt_request & (CPU_INTERRUPT_HARD |
|
||||
CPU_INTERRUPT_TIMER);
|
||||
}
|
||||
#endif /* !CONFIG_USER_ONLY */
|
||||
|
||||
static int openrisc_cpu_mmu_index(CPUState *cs, bool ifetch)
|
||||
{
|
||||
@ -233,6 +235,7 @@ static void openrisc_any_initfn(Object *obj)
|
||||
#include "hw/core/sysemu-cpu-ops.h"
|
||||
|
||||
static const struct SysemuCPUOps openrisc_sysemu_ops = {
|
||||
.has_work = openrisc_cpu_has_work,
|
||||
.get_phys_page_debug = openrisc_cpu_get_phys_page_debug,
|
||||
};
|
||||
#endif
|
||||
@ -266,7 +269,6 @@ static void openrisc_cpu_class_init(ObjectClass *oc, void *data)
|
||||
&occ->parent_phases);
|
||||
|
||||
cc->class_by_name = openrisc_cpu_class_by_name;
|
||||
cc->has_work = openrisc_cpu_has_work;
|
||||
cc->mmu_index = openrisc_cpu_mmu_index;
|
||||
cc->dump_state = openrisc_cpu_dump_state;
|
||||
cc->set_pc = openrisc_cpu_set_pc;
|
||||
|
@ -7177,10 +7177,12 @@ static void ppc_restore_state_to_opc(CPUState *cs,
|
||||
}
|
||||
#endif /* CONFIG_TCG */
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
static bool ppc_cpu_has_work(CPUState *cs)
|
||||
{
|
||||
return cs->interrupt_request & CPU_INTERRUPT_HARD;
|
||||
}
|
||||
#endif /* !CONFIG_USER_ONLY */
|
||||
|
||||
static int ppc_cpu_mmu_index(CPUState *cs, bool ifetch)
|
||||
{
|
||||
@ -7423,6 +7425,7 @@ static void ppc_disas_set_info(CPUState *cs, disassemble_info *info)
|
||||
#include "hw/core/sysemu-cpu-ops.h"
|
||||
|
||||
static const struct SysemuCPUOps ppc_sysemu_ops = {
|
||||
.has_work = ppc_cpu_has_work,
|
||||
.get_phys_page_debug = ppc_cpu_get_phys_page_debug,
|
||||
.write_elf32_note = ppc32_cpu_write_elf32_note,
|
||||
.write_elf64_note = ppc64_cpu_write_elf64_note,
|
||||
@ -7474,7 +7477,6 @@ static void ppc_cpu_class_init(ObjectClass *oc, void *data)
|
||||
&pcc->parent_phases);
|
||||
|
||||
cc->class_by_name = ppc_cpu_class_by_name;
|
||||
cc->has_work = ppc_cpu_has_work;
|
||||
cc->mmu_index = ppc_cpu_mmu_index;
|
||||
cc->dump_state = ppc_cpu_dump_state;
|
||||
cc->set_pc = ppc_cpu_set_pc;
|
||||
|
@ -1006,9 +1006,9 @@ static vaddr riscv_cpu_get_pc(CPUState *cs)
|
||||
return env->pc;
|
||||
}
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
bool riscv_cpu_has_work(CPUState *cs)
|
||||
{
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
RISCVCPU *cpu = RISCV_CPU(cs);
|
||||
CPURISCVState *env = &cpu->env;
|
||||
/*
|
||||
@ -1018,10 +1018,8 @@ bool riscv_cpu_has_work(CPUState *cs)
|
||||
return riscv_cpu_all_pending(env) != 0 ||
|
||||
riscv_cpu_sirq_pending(env) != RISCV_EXCP_NONE ||
|
||||
riscv_cpu_vsirq_pending(env) != RISCV_EXCP_NONE;
|
||||
#else
|
||||
return true;
|
||||
#endif
|
||||
}
|
||||
#endif /* !CONFIG_USER_ONLY */
|
||||
|
||||
static int riscv_cpu_mmu_index(CPUState *cs, bool ifetch)
|
||||
{
|
||||
@ -3029,6 +3027,7 @@ static int64_t riscv_get_arch_id(CPUState *cs)
|
||||
#include "hw/core/sysemu-cpu-ops.h"
|
||||
|
||||
static const struct SysemuCPUOps riscv_sysemu_ops = {
|
||||
.has_work = riscv_cpu_has_work,
|
||||
.get_phys_page_debug = riscv_cpu_get_phys_page_debug,
|
||||
.write_elf64_note = riscv_cpu_write_elf64_note,
|
||||
.write_elf32_note = riscv_cpu_write_elf32_note,
|
||||
@ -3050,7 +3049,6 @@ static void riscv_cpu_common_class_init(ObjectClass *c, void *data)
|
||||
&mcc->parent_phases);
|
||||
|
||||
cc->class_by_name = riscv_cpu_class_by_name;
|
||||
cc->has_work = riscv_cpu_has_work;
|
||||
cc->mmu_index = riscv_cpu_mmu_index;
|
||||
cc->dump_state = riscv_cpu_dump_state;
|
||||
cc->set_pc = riscv_cpu_set_pc;
|
||||
|
@ -142,8 +142,10 @@ static inline float16 check_nanbox_h(CPURISCVState *env, uint64_t f)
|
||||
}
|
||||
}
|
||||
|
||||
/* Our implementation of CPUClass::has_work */
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
/* Our implementation of SysemuCPUOps::has_work */
|
||||
bool riscv_cpu_has_work(CPUState *cs);
|
||||
#endif
|
||||
|
||||
/* Zjpm addr masking routine */
|
||||
static inline target_ulong adjust_addr_body(CPURISCVState *env,
|
||||
|
@ -196,6 +196,7 @@ static void rx_cpu_init(Object *obj)
|
||||
#include "hw/core/sysemu-cpu-ops.h"
|
||||
|
||||
static const struct SysemuCPUOps rx_sysemu_ops = {
|
||||
.has_work = rx_cpu_has_work,
|
||||
.get_phys_page_debug = rx_cpu_get_phys_page_debug,
|
||||
};
|
||||
|
||||
@ -226,7 +227,6 @@ static void rx_cpu_class_init(ObjectClass *klass, void *data)
|
||||
&rcc->parent_phases);
|
||||
|
||||
cc->class_by_name = rx_cpu_class_by_name;
|
||||
cc->has_work = rx_cpu_has_work;
|
||||
cc->mmu_index = riscv_cpu_mmu_index;
|
||||
cc->dump_state = rx_cpu_dump_state;
|
||||
cc->set_pc = rx_cpu_set_pc;
|
||||
|
@ -39,6 +39,23 @@
|
||||
#include "system/tcg.h"
|
||||
#include "hw/core/sysemu-cpu-ops.h"
|
||||
|
||||
bool s390_cpu_has_work(CPUState *cs)
|
||||
{
|
||||
S390CPU *cpu = S390_CPU(cs);
|
||||
|
||||
/* STOPPED cpus can never wake up */
|
||||
if (s390_cpu_get_state(cpu) != S390_CPU_STATE_LOAD &&
|
||||
s390_cpu_get_state(cpu) != S390_CPU_STATE_OPERATING) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!(cs->interrupt_request & CPU_INTERRUPT_HARD)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return s390_cpu_has_int(cpu);
|
||||
}
|
||||
|
||||
/* S390CPUClass::load_normal() */
|
||||
static void s390_cpu_load_normal(CPUState *s)
|
||||
{
|
||||
@ -158,6 +175,7 @@ void s390_cpu_finalize(Object *obj)
|
||||
}
|
||||
|
||||
static const struct SysemuCPUOps s390_sysemu_ops = {
|
||||
.has_work = s390_cpu_has_work,
|
||||
.get_phys_page_debug = s390_cpu_get_phys_page_debug,
|
||||
.get_crash_info = s390_cpu_get_crash_info,
|
||||
.write_elf64_note = s390_cpu_write_elf64_note,
|
||||
|
@ -126,23 +126,6 @@ static vaddr s390_cpu_get_pc(CPUState *cs)
|
||||
return cpu->env.psw.addr;
|
||||
}
|
||||
|
||||
static bool s390_cpu_has_work(CPUState *cs)
|
||||
{
|
||||
S390CPU *cpu = S390_CPU(cs);
|
||||
|
||||
/* STOPPED cpus can never wake up */
|
||||
if (s390_cpu_get_state(cpu) != S390_CPU_STATE_LOAD &&
|
||||
s390_cpu_get_state(cpu) != S390_CPU_STATE_OPERATING) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!(cs->interrupt_request & CPU_INTERRUPT_HARD)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return s390_cpu_has_int(cpu);
|
||||
}
|
||||
|
||||
static int s390x_cpu_mmu_index(CPUState *cs, bool ifetch)
|
||||
{
|
||||
return s390x_env_mmu_index(cpu_env(cs), ifetch);
|
||||
@ -395,7 +378,6 @@ static void s390_cpu_class_init(ObjectClass *oc, void *data)
|
||||
&scc->parent_phases);
|
||||
|
||||
cc->class_by_name = s390_cpu_class_by_name,
|
||||
cc->has_work = s390_cpu_has_work;
|
||||
cc->mmu_index = s390x_cpu_mmu_index;
|
||||
cc->dump_state = s390_cpu_dump_state;
|
||||
cc->query_cpu_fast = s390_query_cpu_fast;
|
||||
|
@ -30,6 +30,7 @@ void trigger_pgm_exception(CPUS390XState *env, uint32_t code)
|
||||
/* env->int_pgm_ilen is already set, or will be set during unwinding */
|
||||
}
|
||||
|
||||
#if !defined(CONFIG_USER_ONLY)
|
||||
void s390_program_interrupt(CPUS390XState *env, uint32_t code, uintptr_t ra)
|
||||
{
|
||||
if (kvm_enabled()) {
|
||||
@ -41,7 +42,6 @@ void s390_program_interrupt(CPUS390XState *env, uint32_t code, uintptr_t ra)
|
||||
}
|
||||
}
|
||||
|
||||
#if !defined(CONFIG_USER_ONLY)
|
||||
void cpu_inject_clock_comparator(S390CPU *cpu)
|
||||
{
|
||||
CPUS390XState *env = &cpu->env;
|
||||
@ -225,11 +225,9 @@ bool s390_cpu_has_stop_int(S390CPU *cpu)
|
||||
|
||||
return env->pending_int & INTERRUPT_STOP;
|
||||
}
|
||||
#endif
|
||||
|
||||
bool s390_cpu_has_int(S390CPU *cpu)
|
||||
{
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
if (!tcg_enabled()) {
|
||||
return false;
|
||||
}
|
||||
@ -238,7 +236,5 @@ bool s390_cpu_has_int(S390CPU *cpu)
|
||||
s390_cpu_has_io_int(cpu) ||
|
||||
s390_cpu_has_restart_int(cpu) ||
|
||||
s390_cpu_has_stop_int(cpu);
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
#endif /* !CONFIG_USER_ONLY */
|
||||
|
@ -245,6 +245,7 @@ bool s390_cpu_system_realize(DeviceState *dev, Error **errp);
|
||||
void s390_cpu_finalize(Object *obj);
|
||||
void s390_cpu_system_class_init(CPUClass *cc);
|
||||
void s390_cpu_machine_reset_cb(void *opaque);
|
||||
bool s390_cpu_has_work(CPUState *cs);
|
||||
|
||||
#else
|
||||
static inline unsigned int s390_cpu_halt(S390CPU *cpu)
|
||||
@ -341,6 +342,7 @@ void cpu_unmap_lowcore(LowCore *lowcore);
|
||||
|
||||
/* interrupt.c */
|
||||
void trigger_pgm_exception(CPUS390XState *env, uint32_t code);
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
void cpu_inject_clock_comparator(S390CPU *cpu);
|
||||
void cpu_inject_cpu_timer(S390CPU *cpu);
|
||||
void cpu_inject_emergency_signal(S390CPU *cpu, uint16_t src_cpu_addr);
|
||||
@ -353,9 +355,11 @@ bool s390_cpu_has_restart_int(S390CPU *cpu);
|
||||
bool s390_cpu_has_stop_int(S390CPU *cpu);
|
||||
void cpu_inject_restart(S390CPU *cpu);
|
||||
void cpu_inject_stop(S390CPU *cpu);
|
||||
#endif /* CONFIG_USER_ONLY */
|
||||
|
||||
|
||||
/* ioinst.c */
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
void ioinst_handle_xsch(S390CPU *cpu, uint64_t reg1, uintptr_t ra);
|
||||
void ioinst_handle_csch(S390CPU *cpu, uint64_t reg1, uintptr_t ra);
|
||||
void ioinst_handle_hsch(S390CPU *cpu, uint64_t reg1, uintptr_t ra);
|
||||
@ -373,6 +377,7 @@ void ioinst_handle_schm(S390CPU *cpu, uint64_t reg1, uint64_t reg2,
|
||||
void ioinst_handle_rsch(S390CPU *cpu, uint64_t reg1, uintptr_t ra);
|
||||
void ioinst_handle_rchp(S390CPU *cpu, uint64_t reg1, uintptr_t ra);
|
||||
void ioinst_handle_sal(S390CPU *cpu, uint64_t reg1, uintptr_t ra);
|
||||
#endif /* CONFIG_USER_ONLY */
|
||||
|
||||
|
||||
/* mem_helper.c */
|
||||
|
@ -82,12 +82,12 @@ static bool superh_io_recompile_replay_branch(CPUState *cs,
|
||||
}
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
static bool superh_cpu_has_work(CPUState *cs)
|
||||
{
|
||||
return cs->interrupt_request & CPU_INTERRUPT_HARD;
|
||||
}
|
||||
#endif /* !CONFIG_USER_ONLY */
|
||||
|
||||
static int sh4_cpu_mmu_index(CPUState *cs, bool ifetch)
|
||||
{
|
||||
@ -254,6 +254,7 @@ static const VMStateDescription vmstate_sh_cpu = {
|
||||
#include "hw/core/sysemu-cpu-ops.h"
|
||||
|
||||
static const struct SysemuCPUOps sh4_sysemu_ops = {
|
||||
.has_work = superh_cpu_has_work,
|
||||
.get_phys_page_debug = superh_cpu_get_phys_page_debug,
|
||||
};
|
||||
#endif
|
||||
@ -290,7 +291,6 @@ static void superh_cpu_class_init(ObjectClass *oc, void *data)
|
||||
&scc->parent_phases);
|
||||
|
||||
cc->class_by_name = superh_cpu_class_by_name;
|
||||
cc->has_work = superh_cpu_has_work;
|
||||
cc->mmu_index = sh4_cpu_mmu_index;
|
||||
cc->dump_state = superh_cpu_dump_state;
|
||||
cc->set_pc = superh_cpu_set_pc;
|
||||
|
@ -777,11 +777,13 @@ static void sparc_restore_state_to_opc(CPUState *cs,
|
||||
}
|
||||
}
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
static bool sparc_cpu_has_work(CPUState *cs)
|
||||
{
|
||||
return (cs->interrupt_request & CPU_INTERRUPT_HARD) &&
|
||||
cpu_interrupts_enabled(cpu_env(cs));
|
||||
}
|
||||
#endif /* !CONFIG_USER_ONLY */
|
||||
|
||||
static int sparc_cpu_mmu_index(CPUState *cs, bool ifetch)
|
||||
{
|
||||
@ -988,6 +990,7 @@ static const Property sparc_cpu_properties[] = {
|
||||
#include "hw/core/sysemu-cpu-ops.h"
|
||||
|
||||
static const struct SysemuCPUOps sparc_sysemu_ops = {
|
||||
.has_work = sparc_cpu_has_work,
|
||||
.get_phys_page_debug = sparc_cpu_get_phys_page_debug,
|
||||
.legacy_vmsd = &vmstate_sparc_cpu,
|
||||
};
|
||||
@ -1029,7 +1032,6 @@ static void sparc_cpu_class_init(ObjectClass *oc, void *data)
|
||||
|
||||
cc->class_by_name = sparc_cpu_class_by_name;
|
||||
cc->parse_features = sparc_cpu_parse_features;
|
||||
cc->has_work = sparc_cpu_has_work;
|
||||
cc->mmu_index = sparc_cpu_mmu_index;
|
||||
cc->dump_state = sparc_cpu_dump_state;
|
||||
#if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
|
||||
|
@ -165,6 +165,7 @@ static bool tricore_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
|
||||
#include "hw/core/sysemu-cpu-ops.h"
|
||||
|
||||
static const struct SysemuCPUOps tricore_sysemu_ops = {
|
||||
.has_work = tricore_cpu_has_work,
|
||||
.get_phys_page_debug = tricore_cpu_get_phys_page_debug,
|
||||
};
|
||||
|
||||
@ -193,7 +194,6 @@ static void tricore_cpu_class_init(ObjectClass *c, void *data)
|
||||
resettable_class_set_parent_phases(rc, NULL, tricore_cpu_reset_hold, NULL,
|
||||
&mcc->parent_phases);
|
||||
cc->class_by_name = tricore_cpu_class_by_name;
|
||||
cc->has_work = tricore_cpu_has_work;
|
||||
cc->mmu_index = tricore_cpu_mmu_index;
|
||||
|
||||
cc->gdb_read_register = tricore_cpu_gdb_read_register;
|
||||
|
@ -63,16 +63,14 @@ static void xtensa_restore_state_to_opc(CPUState *cs,
|
||||
cpu->env.pc = data[0];
|
||||
}
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
static bool xtensa_cpu_has_work(CPUState *cs)
|
||||
{
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
XtensaCPU *cpu = XTENSA_CPU(cs);
|
||||
CPUXtensaState *env = cpu_env(cs);
|
||||
|
||||
return !cpu->env.runstall && cpu->env.pending_irq_level;
|
||||
#else
|
||||
return true;
|
||||
#endif
|
||||
return !env->runstall && env->pending_irq_level;
|
||||
}
|
||||
#endif /* !CONFIG_USER_ONLY */
|
||||
|
||||
static int xtensa_cpu_mmu_index(CPUState *cs, bool ifetch)
|
||||
{
|
||||
@ -226,6 +224,7 @@ static const VMStateDescription vmstate_xtensa_cpu = {
|
||||
#include "hw/core/sysemu-cpu-ops.h"
|
||||
|
||||
static const struct SysemuCPUOps xtensa_sysemu_ops = {
|
||||
.has_work = xtensa_cpu_has_work,
|
||||
.get_phys_page_debug = xtensa_cpu_get_phys_page_debug,
|
||||
};
|
||||
#endif
|
||||
@ -263,7 +262,6 @@ static void xtensa_cpu_class_init(ObjectClass *oc, void *data)
|
||||
&xcc->parent_phases);
|
||||
|
||||
cc->class_by_name = xtensa_cpu_class_by_name;
|
||||
cc->has_work = xtensa_cpu_has_work;
|
||||
cc->mmu_index = xtensa_cpu_mmu_index;
|
||||
cc->dump_state = xtensa_cpu_dump_state;
|
||||
cc->set_pc = xtensa_cpu_set_pc;
|
||||
|
Loading…
Reference in New Issue
Block a user