mirror of
https://github.com/qemu/qemu.git
synced 2025-08-15 05:06:56 +00:00
Bugfixes and making SCSI adapters IOMMU-friendly.
-----BEGIN PGP SIGNATURE-----
Version: GnuPG v2
iQEcBAABCAAGBQJVFAh1AAoJEL/70l94x66D8wEH/jOWcJ04YbRN9yIDXbtrWRtS
XMz9GQcKrpo+QJYB466r8dP+2mRuKajkuSm6++MPXcJs1I0O1AzbSKCDTi5kG01/
XZYOYNLc6lEjMJBZ6AbydjnsSCZjFglrFttQqIicX5l3oBZI6jtvC5FyUTUZbmp2
FXwapzadeqf8n3C66RHSO1jTGuh7ROwW1inENsxhz34ZnQ5WhWrEiFYeTwo5mMqc
sMQk3Fw4d9uRpJSVbfnQ44LrQ3GJX90U4rqBjhfIIvhkzQlziARBSR+3DdUZKVpI
uniOtvNkWmy8ST1OBf3c1ALCHTb/uUS/9bz+FQz6dbjp5COXP2NCpvCShLswBU0=
=TVSA
-----END PGP SIGNATURE-----
Merge remote-tracking branch 'remotes/bonzini/tags/for-upstream' into staging
Bugfixes and making SCSI adapters IOMMU-friendly.
# gpg: Signature made Thu Mar 26 13:24:05 2015 GMT using RSA key ID 78C7AE83
# gpg: Good signature from "Paolo Bonzini <bonzini@gnu.org>"
# gpg: aka "Paolo Bonzini <pbonzini@redhat.com>"
# gpg: WARNING: This key is not certified with sufficiently trusted signatures!
# gpg: It is not certain that the signature belongs to the owner.
# Primary key fingerprint: 46F5 9FBD 57D6 12E7 BFD4 E2F7 7E15 100C CD36 69B1
# Subkey fingerprint: F133 3857 4B66 2389 866C 7682 BFFB D25F 78C7 AE83
* remotes/bonzini/tags/for-upstream:
virtio-scsi-dataplane: fix memory leak for VirtIOSCSIVring
misc: fix typos in copyright declaration
exec: avoid possible overwriting of mmaped area in qemu_ram_remap
sparc: memory: Replace memory_region_init_ram with memory_region_allocate_system_memory
mips: memory: Replace memory_region_init_ram with memory_region_allocate_system_memory
m68k: memory: Replace memory_region_init_ram with memory_region_allocate_system_memory
nbd: Fix up comment after commit e140177
vmw_pvscsi: use PCI DMA APIs
megasas: use PCI DMA APIs
cpus: Don't kick un-realized cpus.
i6300esb: Fix signed integer overflow
i6300esb: Correct endiannness
fw_cfg: factor out initialization of FW_CFG_ID (rev. number)
rcu tests: fix compilation on 32-bit ppc
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
commit
f75d4f8295
@ -47,8 +47,9 @@ void qmp_nbd_server_start(SocketAddress *addr, Error **errp)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Hook into the BlockDriverState notifiers to close the export when
|
/*
|
||||||
* the file is closed.
|
* Hook into the BlockBackend notifiers to close the export when the
|
||||||
|
* backend is closed.
|
||||||
*/
|
*/
|
||||||
typedef struct NBDCloseNotifier {
|
typedef struct NBDCloseNotifier {
|
||||||
Notifier n;
|
Notifier n;
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
/*
|
/*
|
||||||
* QEMU Boot Device Implement
|
* QEMU Boot Device Implement
|
||||||
*
|
*
|
||||||
* Copyright (c) 2014 HUAWEI TECHNOLOGIES CO.,LTD.
|
* Copyright (c) 2014 HUAWEI TECHNOLOGIES CO., LTD.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
* of this software and associated documentation files (the "Software"), to deal
|
* of this software and associated documentation files (the "Software"), to deal
|
||||||
|
2
cpus.c
2
cpus.c
@ -1119,7 +1119,7 @@ bool qemu_in_vcpu_thread(void)
|
|||||||
void qemu_mutex_lock_iothread(void)
|
void qemu_mutex_lock_iothread(void)
|
||||||
{
|
{
|
||||||
atomic_inc(&iothread_requesting_mutex);
|
atomic_inc(&iothread_requesting_mutex);
|
||||||
if (!tcg_enabled() || !first_cpu) {
|
if (!tcg_enabled() || !first_cpu || !first_cpu->thread) {
|
||||||
qemu_mutex_lock(&qemu_global_mutex);
|
qemu_mutex_lock(&qemu_global_mutex);
|
||||||
atomic_dec(&iothread_requesting_mutex);
|
atomic_dec(&iothread_requesting_mutex);
|
||||||
} else {
|
} else {
|
||||||
|
1
exec.c
1
exec.c
@ -1638,7 +1638,6 @@ void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
|
|||||||
abort();
|
abort();
|
||||||
} else {
|
} else {
|
||||||
flags = MAP_FIXED;
|
flags = MAP_FIXED;
|
||||||
munmap(vaddr, length);
|
|
||||||
if (block->fd >= 0) {
|
if (block->fd >= 0) {
|
||||||
flags |= (block->flags & RAM_SHARED ?
|
flags |= (block->flags & RAM_SHARED ?
|
||||||
MAP_SHARED : MAP_PRIVATE);
|
MAP_SHARED : MAP_PRIVATE);
|
||||||
|
@ -702,7 +702,6 @@ static FWCfgState *bochs_bios_init(void)
|
|||||||
* the APIC ID, not the "CPU index"
|
* the APIC ID, not the "CPU index"
|
||||||
*/
|
*/
|
||||||
fw_cfg_add_i16(fw_cfg, FW_CFG_MAX_CPUS, (uint16_t)apic_id_limit);
|
fw_cfg_add_i16(fw_cfg, FW_CFG_MAX_CPUS, (uint16_t)apic_id_limit);
|
||||||
fw_cfg_add_i32(fw_cfg, FW_CFG_ID, 1);
|
|
||||||
fw_cfg_add_i64(fw_cfg, FW_CFG_RAM_SIZE, (uint64_t)ram_size);
|
fw_cfg_add_i64(fw_cfg, FW_CFG_RAM_SIZE, (uint64_t)ram_size);
|
||||||
fw_cfg_add_bytes(fw_cfg, FW_CFG_ACPI_TABLES,
|
fw_cfg_add_bytes(fw_cfg, FW_CFG_ACPI_TABLES,
|
||||||
acpi_tables, acpi_tables_len);
|
acpi_tables, acpi_tables_len);
|
||||||
|
@ -50,8 +50,7 @@ static void an5206_init(MachineState *machine)
|
|||||||
env->rambar0 = AN5206_RAMBAR_ADDR | 1;
|
env->rambar0 = AN5206_RAMBAR_ADDR | 1;
|
||||||
|
|
||||||
/* DRAM at address zero */
|
/* DRAM at address zero */
|
||||||
memory_region_init_ram(ram, NULL, "an5206.ram", ram_size, &error_abort);
|
memory_region_allocate_system_memory(ram, NULL, "an5206.ram", ram_size);
|
||||||
vmstate_register_ram_global(ram);
|
|
||||||
memory_region_add_subregion(address_space_mem, 0, ram);
|
memory_region_add_subregion(address_space_mem, 0, ram);
|
||||||
|
|
||||||
/* Internal SRAM. */
|
/* Internal SRAM. */
|
||||||
|
@ -42,8 +42,8 @@ static void dummy_m68k_init(MachineState *machine)
|
|||||||
env->vbr = 0;
|
env->vbr = 0;
|
||||||
|
|
||||||
/* RAM at address zero */
|
/* RAM at address zero */
|
||||||
memory_region_init_ram(ram, NULL, "dummy_m68k.ram", ram_size, &error_abort);
|
memory_region_allocate_system_memory(ram, NULL, "dummy_m68k.ram",
|
||||||
vmstate_register_ram_global(ram);
|
ram_size);
|
||||||
memory_region_add_subregion(address_space_mem, 0, ram);
|
memory_region_add_subregion(address_space_mem, 0, ram);
|
||||||
|
|
||||||
/* Load kernel. */
|
/* Load kernel. */
|
||||||
|
@ -218,8 +218,7 @@ static void mcf5208evb_init(MachineState *machine)
|
|||||||
/* TODO: Configure BARs. */
|
/* TODO: Configure BARs. */
|
||||||
|
|
||||||
/* DRAM at 0x40000000 */
|
/* DRAM at 0x40000000 */
|
||||||
memory_region_init_ram(ram, NULL, "mcf5208.ram", ram_size, &error_abort);
|
memory_region_allocate_system_memory(ram, NULL, "mcf5208.ram", ram_size);
|
||||||
vmstate_register_ram_global(ram);
|
|
||||||
memory_region_add_subregion(address_space_mem, 0x40000000, ram);
|
memory_region_add_subregion(address_space_mem, 0x40000000, ram);
|
||||||
|
|
||||||
/* Internal SRAM. */
|
/* Internal SRAM. */
|
||||||
|
@ -301,8 +301,7 @@ static void mips_fulong2e_init(MachineState *machine)
|
|||||||
bios_size = 1024 * 1024;
|
bios_size = 1024 * 1024;
|
||||||
|
|
||||||
/* allocate RAM */
|
/* allocate RAM */
|
||||||
memory_region_init_ram(ram, NULL, "fulong2e.ram", ram_size, &error_abort);
|
memory_region_allocate_system_memory(ram, NULL, "fulong2e.ram", ram_size);
|
||||||
vmstate_register_ram_global(ram);
|
|
||||||
memory_region_init_ram(bios, NULL, "fulong2e.bios", bios_size,
|
memory_region_init_ram(bios, NULL, "fulong2e.bios", bios_size,
|
||||||
&error_abort);
|
&error_abort);
|
||||||
vmstate_register_ram_global(bios);
|
vmstate_register_ram_global(bios);
|
||||||
|
@ -182,9 +182,8 @@ static void mips_jazz_init(MachineState *machine,
|
|||||||
cc->do_unassigned_access = mips_jazz_do_unassigned_access;
|
cc->do_unassigned_access = mips_jazz_do_unassigned_access;
|
||||||
|
|
||||||
/* allocate RAM */
|
/* allocate RAM */
|
||||||
memory_region_init_ram(ram, NULL, "mips_jazz.ram", machine->ram_size,
|
memory_region_allocate_system_memory(ram, NULL, "mips_jazz.ram",
|
||||||
&error_abort);
|
machine->ram_size);
|
||||||
vmstate_register_ram_global(ram);
|
|
||||||
memory_region_add_subregion(address_space, 0, ram);
|
memory_region_add_subregion(address_space, 0, ram);
|
||||||
|
|
||||||
memory_region_init_ram(bios, NULL, "mips_jazz.bios", MAGNUM_BIOS_SIZE,
|
memory_region_init_ram(bios, NULL, "mips_jazz.bios", MAGNUM_BIOS_SIZE,
|
||||||
|
@ -993,9 +993,8 @@ void mips_malta_init(MachineState *machine)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* register RAM at high address where it is undisturbed by IO */
|
/* register RAM at high address where it is undisturbed by IO */
|
||||||
memory_region_init_ram(ram_high, NULL, "mips_malta.ram", ram_size,
|
memory_region_allocate_system_memory(ram_high, NULL, "mips_malta.ram",
|
||||||
&error_abort);
|
ram_size);
|
||||||
vmstate_register_ram_global(ram_high);
|
|
||||||
memory_region_add_subregion(system_memory, 0x80000000, ram_high);
|
memory_region_add_subregion(system_memory, 0x80000000, ram_high);
|
||||||
|
|
||||||
/* alias for pre IO hole access */
|
/* alias for pre IO hole access */
|
||||||
|
@ -171,9 +171,8 @@ mips_mipssim_init(MachineState *machine)
|
|||||||
qemu_register_reset(main_cpu_reset, reset_info);
|
qemu_register_reset(main_cpu_reset, reset_info);
|
||||||
|
|
||||||
/* Allocate RAM. */
|
/* Allocate RAM. */
|
||||||
memory_region_init_ram(ram, NULL, "mips_mipssim.ram", ram_size,
|
memory_region_allocate_system_memory(ram, NULL, "mips_mipssim.ram",
|
||||||
&error_abort);
|
ram_size);
|
||||||
vmstate_register_ram_global(ram);
|
|
||||||
memory_region_init_ram(bios, NULL, "mips_mipssim.bios", BIOS_SIZE,
|
memory_region_init_ram(bios, NULL, "mips_mipssim.bios", BIOS_SIZE,
|
||||||
&error_abort);
|
&error_abort);
|
||||||
vmstate_register_ram_global(bios);
|
vmstate_register_ram_global(bios);
|
||||||
|
@ -205,8 +205,7 @@ void mips_r4k_init(MachineState *machine)
|
|||||||
((unsigned int)ram_size / (1 << 20)));
|
((unsigned int)ram_size / (1 << 20)));
|
||||||
exit(1);
|
exit(1);
|
||||||
}
|
}
|
||||||
memory_region_init_ram(ram, NULL, "mips_r4k.ram", ram_size, &error_abort);
|
memory_region_allocate_system_memory(ram, NULL, "mips_r4k.ram", ram_size);
|
||||||
vmstate_register_ram_global(ram);
|
|
||||||
|
|
||||||
memory_region_add_subregion(address_space_mem, 0, ram);
|
memory_region_add_subregion(address_space_mem, 0, ram);
|
||||||
|
|
||||||
|
@ -614,6 +614,7 @@ static void fw_cfg_init1(DeviceState *dev)
|
|||||||
qdev_init_nofail(dev);
|
qdev_init_nofail(dev);
|
||||||
|
|
||||||
fw_cfg_add_bytes(s, FW_CFG_SIGNATURE, (char *)"QEMU", 4);
|
fw_cfg_add_bytes(s, FW_CFG_SIGNATURE, (char *)"QEMU", 4);
|
||||||
|
fw_cfg_add_i32(s, FW_CFG_ID, 1);
|
||||||
fw_cfg_add_bytes(s, FW_CFG_UUID, qemu_uuid, 16);
|
fw_cfg_add_bytes(s, FW_CFG_UUID, qemu_uuid, 16);
|
||||||
fw_cfg_add_i16(s, FW_CFG_NOGRAPHIC, (uint16_t)(display_type == DT_NOGRAPHIC));
|
fw_cfg_add_i16(s, FW_CFG_NOGRAPHIC, (uint16_t)(display_type == DT_NOGRAPHIC));
|
||||||
fw_cfg_add_i16(s, FW_CFG_NB_CPUS, (uint16_t)smp_cpus);
|
fw_cfg_add_i16(s, FW_CFG_NB_CPUS, (uint16_t)smp_cpus);
|
||||||
|
@ -460,7 +460,6 @@ static void ppc_core99_init(MachineState *machine)
|
|||||||
|
|
||||||
fw_cfg = fw_cfg_init_mem(CFG_ADDR, CFG_ADDR + 2);
|
fw_cfg = fw_cfg_init_mem(CFG_ADDR, CFG_ADDR + 2);
|
||||||
fw_cfg_add_i16(fw_cfg, FW_CFG_MAX_CPUS, (uint16_t)max_cpus);
|
fw_cfg_add_i16(fw_cfg, FW_CFG_MAX_CPUS, (uint16_t)max_cpus);
|
||||||
fw_cfg_add_i32(fw_cfg, FW_CFG_ID, 1);
|
|
||||||
fw_cfg_add_i64(fw_cfg, FW_CFG_RAM_SIZE, (uint64_t)ram_size);
|
fw_cfg_add_i64(fw_cfg, FW_CFG_RAM_SIZE, (uint64_t)ram_size);
|
||||||
fw_cfg_add_i16(fw_cfg, FW_CFG_MACHINE_ID, machine_arch);
|
fw_cfg_add_i16(fw_cfg, FW_CFG_MACHINE_ID, machine_arch);
|
||||||
fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_ADDR, kernel_base);
|
fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_ADDR, kernel_base);
|
||||||
|
@ -315,7 +315,6 @@ static void ppc_heathrow_init(MachineState *machine)
|
|||||||
|
|
||||||
fw_cfg = fw_cfg_init_mem(CFG_ADDR, CFG_ADDR + 2);
|
fw_cfg = fw_cfg_init_mem(CFG_ADDR, CFG_ADDR + 2);
|
||||||
fw_cfg_add_i16(fw_cfg, FW_CFG_MAX_CPUS, (uint16_t)max_cpus);
|
fw_cfg_add_i16(fw_cfg, FW_CFG_MAX_CPUS, (uint16_t)max_cpus);
|
||||||
fw_cfg_add_i32(fw_cfg, FW_CFG_ID, 1);
|
|
||||||
fw_cfg_add_i64(fw_cfg, FW_CFG_RAM_SIZE, (uint64_t)ram_size);
|
fw_cfg_add_i64(fw_cfg, FW_CFG_RAM_SIZE, (uint64_t)ram_size);
|
||||||
fw_cfg_add_i16(fw_cfg, FW_CFG_MACHINE_ID, ARCH_HEATHROW);
|
fw_cfg_add_i16(fw_cfg, FW_CFG_MACHINE_ID, ARCH_HEATHROW);
|
||||||
fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_ADDR, kernel_base);
|
fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_ADDR, kernel_base);
|
||||||
|
@ -171,26 +171,29 @@ static bool megasas_is_jbod(MegasasState *s)
|
|||||||
return s->flags & MEGASAS_MASK_USE_JBOD;
|
return s->flags & MEGASAS_MASK_USE_JBOD;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void megasas_frame_set_cmd_status(unsigned long frame, uint8_t v)
|
static void megasas_frame_set_cmd_status(MegasasState *s,
|
||||||
|
unsigned long frame, uint8_t v)
|
||||||
{
|
{
|
||||||
stb_phys(&address_space_memory,
|
PCIDevice *pci = &s->parent_obj;
|
||||||
frame + offsetof(struct mfi_frame_header, cmd_status), v);
|
stb_pci_dma(pci, frame + offsetof(struct mfi_frame_header, cmd_status), v);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void megasas_frame_set_scsi_status(unsigned long frame, uint8_t v)
|
static void megasas_frame_set_scsi_status(MegasasState *s,
|
||||||
|
unsigned long frame, uint8_t v)
|
||||||
{
|
{
|
||||||
stb_phys(&address_space_memory,
|
PCIDevice *pci = &s->parent_obj;
|
||||||
frame + offsetof(struct mfi_frame_header, scsi_status), v);
|
stb_pci_dma(pci, frame + offsetof(struct mfi_frame_header, scsi_status), v);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Context is considered opaque, but the HBA firmware is running
|
* Context is considered opaque, but the HBA firmware is running
|
||||||
* in little endian mode. So convert it to little endian, too.
|
* in little endian mode. So convert it to little endian, too.
|
||||||
*/
|
*/
|
||||||
static uint64_t megasas_frame_get_context(unsigned long frame)
|
static uint64_t megasas_frame_get_context(MegasasState *s,
|
||||||
|
unsigned long frame)
|
||||||
{
|
{
|
||||||
return ldq_le_phys(&address_space_memory,
|
PCIDevice *pci = &s->parent_obj;
|
||||||
frame + offsetof(struct mfi_frame_header, context));
|
return ldq_le_pci_dma(pci, frame + offsetof(struct mfi_frame_header, context));
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool megasas_frame_is_ieee_sgl(MegasasCmd *cmd)
|
static bool megasas_frame_is_ieee_sgl(MegasasCmd *cmd)
|
||||||
@ -523,8 +526,7 @@ static MegasasCmd *megasas_enqueue_frame(MegasasState *s,
|
|||||||
s->busy++;
|
s->busy++;
|
||||||
|
|
||||||
if (s->consumer_pa) {
|
if (s->consumer_pa) {
|
||||||
s->reply_queue_tail = ldl_le_phys(&address_space_memory,
|
s->reply_queue_tail = ldl_le_pci_dma(pcid, s->consumer_pa);
|
||||||
s->consumer_pa);
|
|
||||||
}
|
}
|
||||||
trace_megasas_qf_enqueue(cmd->index, cmd->count, cmd->context,
|
trace_megasas_qf_enqueue(cmd->index, cmd->count, cmd->context,
|
||||||
s->reply_queue_head, s->reply_queue_tail, s->busy);
|
s->reply_queue_head, s->reply_queue_tail, s->busy);
|
||||||
@ -547,29 +549,24 @@ static void megasas_complete_frame(MegasasState *s, uint64_t context)
|
|||||||
*/
|
*/
|
||||||
if (megasas_use_queue64(s)) {
|
if (megasas_use_queue64(s)) {
|
||||||
queue_offset = s->reply_queue_head * sizeof(uint64_t);
|
queue_offset = s->reply_queue_head * sizeof(uint64_t);
|
||||||
stq_le_phys(&address_space_memory,
|
stq_le_pci_dma(pci_dev, s->reply_queue_pa + queue_offset, context);
|
||||||
s->reply_queue_pa + queue_offset, context);
|
|
||||||
} else {
|
} else {
|
||||||
queue_offset = s->reply_queue_head * sizeof(uint32_t);
|
queue_offset = s->reply_queue_head * sizeof(uint32_t);
|
||||||
stl_le_phys(&address_space_memory,
|
stl_le_pci_dma(pci_dev, s->reply_queue_pa + queue_offset, context);
|
||||||
s->reply_queue_pa + queue_offset, context);
|
|
||||||
}
|
}
|
||||||
s->reply_queue_tail = ldl_le_phys(&address_space_memory,
|
s->reply_queue_tail = ldl_le_pci_dma(pci_dev, s->consumer_pa);
|
||||||
s->consumer_pa);
|
|
||||||
trace_megasas_qf_complete(context, s->reply_queue_head,
|
trace_megasas_qf_complete(context, s->reply_queue_head,
|
||||||
s->reply_queue_tail, s->busy);
|
s->reply_queue_tail, s->busy);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (megasas_intr_enabled(s)) {
|
if (megasas_intr_enabled(s)) {
|
||||||
/* Update reply queue pointer */
|
/* Update reply queue pointer */
|
||||||
s->reply_queue_tail = ldl_le_phys(&address_space_memory,
|
s->reply_queue_tail = ldl_le_pci_dma(pci_dev, s->consumer_pa);
|
||||||
s->consumer_pa);
|
|
||||||
tail = s->reply_queue_head;
|
tail = s->reply_queue_head;
|
||||||
s->reply_queue_head = megasas_next_index(s, tail, s->fw_cmds);
|
s->reply_queue_head = megasas_next_index(s, tail, s->fw_cmds);
|
||||||
trace_megasas_qf_update(s->reply_queue_head, s->reply_queue_tail,
|
trace_megasas_qf_update(s->reply_queue_head, s->reply_queue_tail,
|
||||||
s->busy);
|
s->busy);
|
||||||
stl_le_phys(&address_space_memory,
|
stl_le_pci_dma(pci_dev, s->producer_pa, s->reply_queue_head);
|
||||||
s->producer_pa, s->reply_queue_head);
|
|
||||||
/* Notify HBA */
|
/* Notify HBA */
|
||||||
if (msix_enabled(pci_dev)) {
|
if (msix_enabled(pci_dev)) {
|
||||||
trace_megasas_msix_raise(0);
|
trace_megasas_msix_raise(0);
|
||||||
@ -651,8 +648,8 @@ static int megasas_init_firmware(MegasasState *s, MegasasCmd *cmd)
|
|||||||
pa_lo = le32_to_cpu(initq->pi_addr_lo);
|
pa_lo = le32_to_cpu(initq->pi_addr_lo);
|
||||||
pa_hi = le32_to_cpu(initq->pi_addr_hi);
|
pa_hi = le32_to_cpu(initq->pi_addr_hi);
|
||||||
s->producer_pa = ((uint64_t) pa_hi << 32) | pa_lo;
|
s->producer_pa = ((uint64_t) pa_hi << 32) | pa_lo;
|
||||||
s->reply_queue_head = ldl_le_phys(&address_space_memory, s->producer_pa);
|
s->reply_queue_head = ldl_le_pci_dma(pcid, s->producer_pa);
|
||||||
s->reply_queue_tail = ldl_le_phys(&address_space_memory, s->consumer_pa);
|
s->reply_queue_tail = ldl_le_pci_dma(pcid, s->consumer_pa);
|
||||||
flags = le32_to_cpu(initq->flags);
|
flags = le32_to_cpu(initq->flags);
|
||||||
if (flags & MFI_QUEUE_FLAG_CONTEXT64) {
|
if (flags & MFI_QUEUE_FLAG_CONTEXT64) {
|
||||||
s->flags |= MEGASAS_MASK_USE_QUEUE64;
|
s->flags |= MEGASAS_MASK_USE_QUEUE64;
|
||||||
@ -1951,14 +1948,14 @@ static void megasas_handle_frame(MegasasState *s, uint64_t frame_addr,
|
|||||||
* Always read 64bit context, top bits will be
|
* Always read 64bit context, top bits will be
|
||||||
* masked out if required in megasas_enqueue_frame()
|
* masked out if required in megasas_enqueue_frame()
|
||||||
*/
|
*/
|
||||||
frame_context = megasas_frame_get_context(frame_addr);
|
frame_context = megasas_frame_get_context(s, frame_addr);
|
||||||
|
|
||||||
cmd = megasas_enqueue_frame(s, frame_addr, frame_context, frame_count);
|
cmd = megasas_enqueue_frame(s, frame_addr, frame_context, frame_count);
|
||||||
if (!cmd) {
|
if (!cmd) {
|
||||||
/* reply queue full */
|
/* reply queue full */
|
||||||
trace_megasas_frame_busy(frame_addr);
|
trace_megasas_frame_busy(frame_addr);
|
||||||
megasas_frame_set_scsi_status(frame_addr, BUSY);
|
megasas_frame_set_scsi_status(s, frame_addr, BUSY);
|
||||||
megasas_frame_set_cmd_status(frame_addr, MFI_STAT_SCSI_DONE_WITH_ERROR);
|
megasas_frame_set_cmd_status(s, frame_addr, MFI_STAT_SCSI_DONE_WITH_ERROR);
|
||||||
megasas_complete_frame(s, frame_context);
|
megasas_complete_frame(s, frame_context);
|
||||||
s->event_count++;
|
s->event_count++;
|
||||||
return;
|
return;
|
||||||
@ -1993,7 +1990,7 @@ static void megasas_handle_frame(MegasasState *s, uint64_t frame_addr,
|
|||||||
if (cmd->frame) {
|
if (cmd->frame) {
|
||||||
cmd->frame->header.cmd_status = frame_status;
|
cmd->frame->header.cmd_status = frame_status;
|
||||||
} else {
|
} else {
|
||||||
megasas_frame_set_cmd_status(frame_addr, frame_status);
|
megasas_frame_set_cmd_status(s, frame_addr, frame_status);
|
||||||
}
|
}
|
||||||
megasas_unmap_frame(s, cmd);
|
megasas_unmap_frame(s, cmd);
|
||||||
megasas_complete_frame(s, cmd->context);
|
megasas_complete_frame(s, cmd->context);
|
||||||
|
@ -182,13 +182,19 @@ static void virtio_scsi_vring_teardown(VirtIOSCSI *s)
|
|||||||
|
|
||||||
if (s->ctrl_vring) {
|
if (s->ctrl_vring) {
|
||||||
vring_teardown(&s->ctrl_vring->vring, vdev, 0);
|
vring_teardown(&s->ctrl_vring->vring, vdev, 0);
|
||||||
|
g_slice_free(VirtIOSCSIVring, s->ctrl_vring);
|
||||||
|
s->ctrl_vring = NULL;
|
||||||
}
|
}
|
||||||
if (s->event_vring) {
|
if (s->event_vring) {
|
||||||
vring_teardown(&s->event_vring->vring, vdev, 1);
|
vring_teardown(&s->event_vring->vring, vdev, 1);
|
||||||
|
g_slice_free(VirtIOSCSIVring, s->event_vring);
|
||||||
|
s->event_vring = NULL;
|
||||||
}
|
}
|
||||||
if (s->cmd_vrings) {
|
if (s->cmd_vrings) {
|
||||||
for (i = 0; i < vs->conf.num_queues && s->cmd_vrings[i]; i++) {
|
for (i = 0; i < vs->conf.num_queues && s->cmd_vrings[i]; i++) {
|
||||||
vring_teardown(&s->cmd_vrings[i]->vring, vdev, 2 + i);
|
vring_teardown(&s->cmd_vrings[i]->vring, vdev, 2 + i);
|
||||||
|
g_slice_free(VirtIOSCSIVring, s->cmd_vrings[i]);
|
||||||
|
s->cmd_vrings[i] = NULL;
|
||||||
}
|
}
|
||||||
free(s->cmd_vrings);
|
free(s->cmd_vrings);
|
||||||
s->cmd_vrings = NULL;
|
s->cmd_vrings = NULL;
|
||||||
|
@ -42,12 +42,12 @@
|
|||||||
#define PVSCSI_MAX_CMD_DATA_WORDS \
|
#define PVSCSI_MAX_CMD_DATA_WORDS \
|
||||||
(sizeof(PVSCSICmdDescSetupRings)/sizeof(uint32_t))
|
(sizeof(PVSCSICmdDescSetupRings)/sizeof(uint32_t))
|
||||||
|
|
||||||
#define RS_GET_FIELD(rs_pa, field) \
|
#define RS_GET_FIELD(m, field) \
|
||||||
(ldl_le_phys(&address_space_memory, \
|
(ldl_le_pci_dma(&container_of(m, PVSCSIState, rings)->parent_obj, \
|
||||||
rs_pa + offsetof(struct PVSCSIRingsState, field)))
|
(m)->rs_pa + offsetof(struct PVSCSIRingsState, field)))
|
||||||
#define RS_SET_FIELD(rs_pa, field, val) \
|
#define RS_SET_FIELD(m, field, val) \
|
||||||
(stl_le_phys(&address_space_memory, \
|
(stl_le_pci_dma(&container_of(m, PVSCSIState, rings)->parent_obj, \
|
||||||
rs_pa + offsetof(struct PVSCSIRingsState, field), val))
|
(m)->rs_pa + offsetof(struct PVSCSIRingsState, field), val))
|
||||||
|
|
||||||
#define TYPE_PVSCSI "pvscsi"
|
#define TYPE_PVSCSI "pvscsi"
|
||||||
#define PVSCSI(obj) OBJECT_CHECK(PVSCSIState, (obj), TYPE_PVSCSI)
|
#define PVSCSI(obj) OBJECT_CHECK(PVSCSIState, (obj), TYPE_PVSCSI)
|
||||||
@ -153,13 +153,13 @@ pvscsi_ring_init_data(PVSCSIRingInfo *m, PVSCSICmdDescSetupRings *ri)
|
|||||||
m->cmp_ring_pages_pa[i] = ri->cmpRingPPNs[i] << VMW_PAGE_SHIFT;
|
m->cmp_ring_pages_pa[i] = ri->cmpRingPPNs[i] << VMW_PAGE_SHIFT;
|
||||||
}
|
}
|
||||||
|
|
||||||
RS_SET_FIELD(m->rs_pa, reqProdIdx, 0);
|
RS_SET_FIELD(m, reqProdIdx, 0);
|
||||||
RS_SET_FIELD(m->rs_pa, reqConsIdx, 0);
|
RS_SET_FIELD(m, reqConsIdx, 0);
|
||||||
RS_SET_FIELD(m->rs_pa, reqNumEntriesLog2, txr_len_log2);
|
RS_SET_FIELD(m, reqNumEntriesLog2, txr_len_log2);
|
||||||
|
|
||||||
RS_SET_FIELD(m->rs_pa, cmpProdIdx, 0);
|
RS_SET_FIELD(m, cmpProdIdx, 0);
|
||||||
RS_SET_FIELD(m->rs_pa, cmpConsIdx, 0);
|
RS_SET_FIELD(m, cmpConsIdx, 0);
|
||||||
RS_SET_FIELD(m->rs_pa, cmpNumEntriesLog2, rxr_len_log2);
|
RS_SET_FIELD(m, cmpNumEntriesLog2, rxr_len_log2);
|
||||||
|
|
||||||
trace_pvscsi_ring_init_data(txr_len_log2, rxr_len_log2);
|
trace_pvscsi_ring_init_data(txr_len_log2, rxr_len_log2);
|
||||||
|
|
||||||
@ -185,9 +185,9 @@ pvscsi_ring_init_msg(PVSCSIRingInfo *m, PVSCSICmdDescSetupMsgRing *ri)
|
|||||||
m->msg_ring_pages_pa[i] = ri->ringPPNs[i] << VMW_PAGE_SHIFT;
|
m->msg_ring_pages_pa[i] = ri->ringPPNs[i] << VMW_PAGE_SHIFT;
|
||||||
}
|
}
|
||||||
|
|
||||||
RS_SET_FIELD(m->rs_pa, msgProdIdx, 0);
|
RS_SET_FIELD(m, msgProdIdx, 0);
|
||||||
RS_SET_FIELD(m->rs_pa, msgConsIdx, 0);
|
RS_SET_FIELD(m, msgConsIdx, 0);
|
||||||
RS_SET_FIELD(m->rs_pa, msgNumEntriesLog2, len_log2);
|
RS_SET_FIELD(m, msgNumEntriesLog2, len_log2);
|
||||||
|
|
||||||
trace_pvscsi_ring_init_msg(len_log2);
|
trace_pvscsi_ring_init_msg(len_log2);
|
||||||
|
|
||||||
@ -213,7 +213,7 @@ pvscsi_ring_cleanup(PVSCSIRingInfo *mgr)
|
|||||||
static hwaddr
|
static hwaddr
|
||||||
pvscsi_ring_pop_req_descr(PVSCSIRingInfo *mgr)
|
pvscsi_ring_pop_req_descr(PVSCSIRingInfo *mgr)
|
||||||
{
|
{
|
||||||
uint32_t ready_ptr = RS_GET_FIELD(mgr->rs_pa, reqProdIdx);
|
uint32_t ready_ptr = RS_GET_FIELD(mgr, reqProdIdx);
|
||||||
|
|
||||||
if (ready_ptr != mgr->consumed_ptr) {
|
if (ready_ptr != mgr->consumed_ptr) {
|
||||||
uint32_t next_ready_ptr =
|
uint32_t next_ready_ptr =
|
||||||
@ -233,7 +233,7 @@ pvscsi_ring_pop_req_descr(PVSCSIRingInfo *mgr)
|
|||||||
static void
|
static void
|
||||||
pvscsi_ring_flush_req(PVSCSIRingInfo *mgr)
|
pvscsi_ring_flush_req(PVSCSIRingInfo *mgr)
|
||||||
{
|
{
|
||||||
RS_SET_FIELD(mgr->rs_pa, reqConsIdx, mgr->consumed_ptr);
|
RS_SET_FIELD(mgr, reqConsIdx, mgr->consumed_ptr);
|
||||||
}
|
}
|
||||||
|
|
||||||
static hwaddr
|
static hwaddr
|
||||||
@ -278,14 +278,14 @@ pvscsi_ring_flush_cmp(PVSCSIRingInfo *mgr)
|
|||||||
|
|
||||||
trace_pvscsi_ring_flush_cmp(mgr->filled_cmp_ptr);
|
trace_pvscsi_ring_flush_cmp(mgr->filled_cmp_ptr);
|
||||||
|
|
||||||
RS_SET_FIELD(mgr->rs_pa, cmpProdIdx, mgr->filled_cmp_ptr);
|
RS_SET_FIELD(mgr, cmpProdIdx, mgr->filled_cmp_ptr);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool
|
static bool
|
||||||
pvscsi_ring_msg_has_room(PVSCSIRingInfo *mgr)
|
pvscsi_ring_msg_has_room(PVSCSIRingInfo *mgr)
|
||||||
{
|
{
|
||||||
uint32_t prodIdx = RS_GET_FIELD(mgr->rs_pa, msgProdIdx);
|
uint32_t prodIdx = RS_GET_FIELD(mgr, msgProdIdx);
|
||||||
uint32_t consIdx = RS_GET_FIELD(mgr->rs_pa, msgConsIdx);
|
uint32_t consIdx = RS_GET_FIELD(mgr, msgConsIdx);
|
||||||
|
|
||||||
return (prodIdx - consIdx) < (mgr->msg_len_mask + 1);
|
return (prodIdx - consIdx) < (mgr->msg_len_mask + 1);
|
||||||
}
|
}
|
||||||
@ -298,7 +298,7 @@ pvscsi_ring_flush_msg(PVSCSIRingInfo *mgr)
|
|||||||
|
|
||||||
trace_pvscsi_ring_flush_msg(mgr->filled_msg_ptr);
|
trace_pvscsi_ring_flush_msg(mgr->filled_msg_ptr);
|
||||||
|
|
||||||
RS_SET_FIELD(mgr->rs_pa, msgProdIdx, mgr->filled_msg_ptr);
|
RS_SET_FIELD(mgr, msgProdIdx, mgr->filled_msg_ptr);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
|
@ -151,8 +151,7 @@ static void leon3_generic_hw_init(MachineState *machine)
|
|||||||
exit(1);
|
exit(1);
|
||||||
}
|
}
|
||||||
|
|
||||||
memory_region_init_ram(ram, NULL, "leon3.ram", ram_size, &error_abort);
|
memory_region_allocate_system_memory(ram, NULL, "leon3.ram", ram_size);
|
||||||
vmstate_register_ram_global(ram);
|
|
||||||
memory_region_add_subregion(address_space_mem, 0x40000000, ram);
|
memory_region_add_subregion(address_space_mem, 0x40000000, ram);
|
||||||
|
|
||||||
/* Allocate BIOS */
|
/* Allocate BIOS */
|
||||||
|
@ -805,9 +805,8 @@ static int ram_init1(SysBusDevice *dev)
|
|||||||
{
|
{
|
||||||
RamDevice *d = SUN4M_RAM(dev);
|
RamDevice *d = SUN4M_RAM(dev);
|
||||||
|
|
||||||
memory_region_init_ram(&d->ram, OBJECT(d), "sun4m.ram", d->size,
|
memory_region_allocate_system_memory(&d->ram, OBJECT(d), "sun4m.ram",
|
||||||
&error_abort);
|
d->size);
|
||||||
vmstate_register_ram_global(&d->ram);
|
|
||||||
sysbus_init_mmio(dev, &d->ram);
|
sysbus_init_mmio(dev, &d->ram);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -1088,7 +1087,6 @@ static void sun4m_hw_init(const struct sun4m_hwdef *hwdef,
|
|||||||
|
|
||||||
fw_cfg = fw_cfg_init_mem(CFG_ADDR, CFG_ADDR + 2);
|
fw_cfg = fw_cfg_init_mem(CFG_ADDR, CFG_ADDR + 2);
|
||||||
fw_cfg_add_i16(fw_cfg, FW_CFG_MAX_CPUS, (uint16_t)max_cpus);
|
fw_cfg_add_i16(fw_cfg, FW_CFG_MAX_CPUS, (uint16_t)max_cpus);
|
||||||
fw_cfg_add_i32(fw_cfg, FW_CFG_ID, 1);
|
|
||||||
fw_cfg_add_i64(fw_cfg, FW_CFG_RAM_SIZE, (uint64_t)ram_size);
|
fw_cfg_add_i64(fw_cfg, FW_CFG_RAM_SIZE, (uint64_t)ram_size);
|
||||||
fw_cfg_add_i16(fw_cfg, FW_CFG_MACHINE_ID, hwdef->machine_id);
|
fw_cfg_add_i16(fw_cfg, FW_CFG_MACHINE_ID, hwdef->machine_id);
|
||||||
fw_cfg_add_i16(fw_cfg, FW_CFG_SUN4M_DEPTH, graphic_depth);
|
fw_cfg_add_i16(fw_cfg, FW_CFG_SUN4M_DEPTH, graphic_depth);
|
||||||
|
@ -895,7 +895,6 @@ static void sun4uv_init(MemoryRegion *address_space_mem,
|
|||||||
|
|
||||||
fw_cfg = fw_cfg_init_io(BIOS_CFG_IOPORT);
|
fw_cfg = fw_cfg_init_io(BIOS_CFG_IOPORT);
|
||||||
fw_cfg_add_i16(fw_cfg, FW_CFG_MAX_CPUS, (uint16_t)max_cpus);
|
fw_cfg_add_i16(fw_cfg, FW_CFG_MAX_CPUS, (uint16_t)max_cpus);
|
||||||
fw_cfg_add_i32(fw_cfg, FW_CFG_ID, 1);
|
|
||||||
fw_cfg_add_i64(fw_cfg, FW_CFG_RAM_SIZE, (uint64_t)ram_size);
|
fw_cfg_add_i64(fw_cfg, FW_CFG_RAM_SIZE, (uint64_t)ram_size);
|
||||||
fw_cfg_add_i16(fw_cfg, FW_CFG_MACHINE_ID, hwdef->machine_id);
|
fw_cfg_add_i16(fw_cfg, FW_CFG_MACHINE_ID, hwdef->machine_id);
|
||||||
fw_cfg_add_i64(fw_cfg, FW_CFG_KERNEL_ADDR, kernel_entry);
|
fw_cfg_add_i64(fw_cfg, FW_CFG_KERNEL_ADDR, kernel_entry);
|
||||||
|
@ -125,8 +125,14 @@ static void i6300esb_restart_timer(I6300State *d, int stage)
|
|||||||
else
|
else
|
||||||
timeout <<= 5;
|
timeout <<= 5;
|
||||||
|
|
||||||
/* Get the timeout in units of ticks_per_sec. */
|
/* Get the timeout in units of ticks_per_sec.
|
||||||
timeout = get_ticks_per_sec() * timeout / 33000000;
|
*
|
||||||
|
* ticks_per_sec is typically 10^9 == 0x3B9ACA00 (30 bits), with
|
||||||
|
* 20 bits of user supplied preload, and 15 bits of scale, the
|
||||||
|
* multiply here can exceed 64-bits, before we divide by 33MHz, so
|
||||||
|
* we use a higher-precision intermediate result.
|
||||||
|
*/
|
||||||
|
timeout = muldiv64(get_ticks_per_sec(), timeout, 33000000);
|
||||||
|
|
||||||
i6300esb_debug("stage %d, timeout %" PRIi64 "\n", d->stage, timeout);
|
i6300esb_debug("stage %d, timeout %" PRIi64 "\n", d->stage, timeout);
|
||||||
|
|
||||||
@ -369,7 +375,7 @@ static const MemoryRegionOps i6300esb_ops = {
|
|||||||
i6300esb_mem_writel,
|
i6300esb_mem_writel,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
.endianness = DEVICE_NATIVE_ENDIAN,
|
.endianness = DEVICE_LITTLE_ENDIAN,
|
||||||
};
|
};
|
||||||
|
|
||||||
static const VMStateDescription vmstate_i6300esb = {
|
static const VMStateDescription vmstate_i6300esb = {
|
||||||
|
@ -82,6 +82,7 @@ static volatile int goflag = GOFLAG_INIT;
|
|||||||
#define RCU_READ_RUN 1000
|
#define RCU_READ_RUN 1000
|
||||||
|
|
||||||
#define NR_THREADS 100
|
#define NR_THREADS 100
|
||||||
|
static QemuMutex counts_mutex;
|
||||||
static QemuThread threads[NR_THREADS];
|
static QemuThread threads[NR_THREADS];
|
||||||
static struct rcu_reader_data *data[NR_THREADS];
|
static struct rcu_reader_data *data[NR_THREADS];
|
||||||
static int n_threads;
|
static int n_threads;
|
||||||
@ -130,7 +131,9 @@ static void *rcu_read_perf_test(void *arg)
|
|||||||
}
|
}
|
||||||
n_reads_local += RCU_READ_RUN;
|
n_reads_local += RCU_READ_RUN;
|
||||||
}
|
}
|
||||||
atomic_add(&n_reads, n_reads_local);
|
qemu_mutex_lock(&counts_mutex);
|
||||||
|
n_reads += n_reads_local;
|
||||||
|
qemu_mutex_unlock(&counts_mutex);
|
||||||
|
|
||||||
rcu_unregister_thread();
|
rcu_unregister_thread();
|
||||||
return NULL;
|
return NULL;
|
||||||
@ -151,7 +154,9 @@ static void *rcu_update_perf_test(void *arg)
|
|||||||
synchronize_rcu();
|
synchronize_rcu();
|
||||||
n_updates_local++;
|
n_updates_local++;
|
||||||
}
|
}
|
||||||
atomic_add(&n_updates, n_updates_local);
|
qemu_mutex_lock(&counts_mutex);
|
||||||
|
n_updates += n_updates_local;
|
||||||
|
qemu_mutex_unlock(&counts_mutex);
|
||||||
|
|
||||||
rcu_unregister_thread();
|
rcu_unregister_thread();
|
||||||
return NULL;
|
return NULL;
|
||||||
@ -241,6 +246,7 @@ static void *rcu_read_stress_test(void *arg)
|
|||||||
struct rcu_stress *p;
|
struct rcu_stress *p;
|
||||||
int pc;
|
int pc;
|
||||||
long long n_reads_local = 0;
|
long long n_reads_local = 0;
|
||||||
|
long long rcu_stress_local[RCU_STRESS_PIPE_LEN + 1] = { 0 };
|
||||||
volatile int garbage = 0;
|
volatile int garbage = 0;
|
||||||
|
|
||||||
rcu_register_thread();
|
rcu_register_thread();
|
||||||
@ -265,13 +271,18 @@ static void *rcu_read_stress_test(void *arg)
|
|||||||
if ((pc > RCU_STRESS_PIPE_LEN) || (pc < 0)) {
|
if ((pc > RCU_STRESS_PIPE_LEN) || (pc < 0)) {
|
||||||
pc = RCU_STRESS_PIPE_LEN;
|
pc = RCU_STRESS_PIPE_LEN;
|
||||||
}
|
}
|
||||||
atomic_inc(&rcu_stress_count[pc]);
|
rcu_stress_local[pc]++;
|
||||||
n_reads_local++;
|
n_reads_local++;
|
||||||
if ((++itercnt % 0x1000) == 0) {
|
if ((++itercnt % 0x1000) == 0) {
|
||||||
synchronize_rcu();
|
synchronize_rcu();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
atomic_add(&n_reads, n_reads_local);
|
qemu_mutex_lock(&counts_mutex);
|
||||||
|
n_reads += n_reads_local;
|
||||||
|
for (i = 0; i <= RCU_STRESS_PIPE_LEN; i++) {
|
||||||
|
rcu_stress_count[i] += rcu_stress_local[i];
|
||||||
|
}
|
||||||
|
qemu_mutex_unlock(&counts_mutex);
|
||||||
|
|
||||||
rcu_unregister_thread();
|
rcu_unregister_thread();
|
||||||
return NULL;
|
return NULL;
|
||||||
@ -419,6 +430,7 @@ int main(int argc, char *argv[])
|
|||||||
int nreaders = 1;
|
int nreaders = 1;
|
||||||
int duration = 1;
|
int duration = 1;
|
||||||
|
|
||||||
|
qemu_mutex_init(&counts_mutex);
|
||||||
if (argc >= 2 && argv[1][0] == '-') {
|
if (argc >= 2 && argv[1][0] == '-') {
|
||||||
g_test_init(&argc, &argv, NULL);
|
g_test_init(&argc, &argv, NULL);
|
||||||
if (g_test_quick()) {
|
if (g_test_quick()) {
|
||||||
|
@ -35,16 +35,15 @@
|
|||||||
* Test variables.
|
* Test variables.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
long long n_reads = 0LL;
|
static QemuMutex counts_mutex;
|
||||||
long long n_updates = 0LL;
|
static long long n_reads = 0LL;
|
||||||
long long n_reclaims = 0LL;
|
static long long n_updates = 0LL;
|
||||||
long long n_nodes_removed = 0LL;
|
static long long n_reclaims = 0LL;
|
||||||
long long n_nodes = 0LL;
|
static long long n_nodes_removed = 0LL;
|
||||||
int g_test_in_charge = 0;
|
static long long n_nodes = 0LL;
|
||||||
|
static int g_test_in_charge = 0;
|
||||||
|
|
||||||
int nthreadsrunning;
|
static int nthreadsrunning;
|
||||||
|
|
||||||
char argsbuf[64];
|
|
||||||
|
|
||||||
#define GOFLAG_INIT 0
|
#define GOFLAG_INIT 0
|
||||||
#define GOFLAG_RUN 1
|
#define GOFLAG_RUN 1
|
||||||
@ -92,21 +91,21 @@ static void wait_all_threads(void)
|
|||||||
struct list_element {
|
struct list_element {
|
||||||
QLIST_ENTRY(list_element) entry;
|
QLIST_ENTRY(list_element) entry;
|
||||||
struct rcu_head rcu;
|
struct rcu_head rcu;
|
||||||
long long val;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
static void reclaim_list_el(struct rcu_head *prcu)
|
static void reclaim_list_el(struct rcu_head *prcu)
|
||||||
{
|
{
|
||||||
struct list_element *el = container_of(prcu, struct list_element, rcu);
|
struct list_element *el = container_of(prcu, struct list_element, rcu);
|
||||||
g_free(el);
|
g_free(el);
|
||||||
atomic_add(&n_reclaims, 1);
|
/* Accessed only from call_rcu thread. */
|
||||||
|
n_reclaims++;
|
||||||
}
|
}
|
||||||
|
|
||||||
static QLIST_HEAD(q_list_head, list_element) Q_list_head;
|
static QLIST_HEAD(q_list_head, list_element) Q_list_head;
|
||||||
|
|
||||||
static void *rcu_q_reader(void *arg)
|
static void *rcu_q_reader(void *arg)
|
||||||
{
|
{
|
||||||
long long j, n_reads_local = 0;
|
long long n_reads_local = 0;
|
||||||
struct list_element *el;
|
struct list_element *el;
|
||||||
|
|
||||||
*(struct rcu_reader_data **)arg = &rcu_reader;
|
*(struct rcu_reader_data **)arg = &rcu_reader;
|
||||||
@ -118,8 +117,6 @@ static void *rcu_q_reader(void *arg)
|
|||||||
while (goflag == GOFLAG_RUN) {
|
while (goflag == GOFLAG_RUN) {
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
QLIST_FOREACH_RCU(el, &Q_list_head, entry) {
|
QLIST_FOREACH_RCU(el, &Q_list_head, entry) {
|
||||||
j = atomic_read(&el->val);
|
|
||||||
(void)j;
|
|
||||||
n_reads_local++;
|
n_reads_local++;
|
||||||
if (goflag == GOFLAG_STOP) {
|
if (goflag == GOFLAG_STOP) {
|
||||||
break;
|
break;
|
||||||
@ -129,7 +126,9 @@ static void *rcu_q_reader(void *arg)
|
|||||||
|
|
||||||
g_usleep(100);
|
g_usleep(100);
|
||||||
}
|
}
|
||||||
atomic_add(&n_reads, n_reads_local);
|
qemu_mutex_lock(&counts_mutex);
|
||||||
|
n_reads += n_reads_local;
|
||||||
|
qemu_mutex_unlock(&counts_mutex);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -137,6 +136,7 @@ static void *rcu_q_reader(void *arg)
|
|||||||
static void *rcu_q_updater(void *arg)
|
static void *rcu_q_updater(void *arg)
|
||||||
{
|
{
|
||||||
int j, target_el;
|
int j, target_el;
|
||||||
|
long long n_nodes_local = 0;
|
||||||
long long n_updates_local = 0;
|
long long n_updates_local = 0;
|
||||||
long long n_removed_local = 0;
|
long long n_removed_local = 0;
|
||||||
struct list_element *el, *prev_el;
|
struct list_element *el, *prev_el;
|
||||||
@ -170,8 +170,7 @@ static void *rcu_q_updater(void *arg)
|
|||||||
j++;
|
j++;
|
||||||
if (target_el == j) {
|
if (target_el == j) {
|
||||||
prev_el = g_new(struct list_element, 1);
|
prev_el = g_new(struct list_element, 1);
|
||||||
atomic_add(&n_nodes, 1);
|
n_nodes += n_nodes_local;
|
||||||
prev_el->val = atomic_read(&n_nodes);
|
|
||||||
QLIST_INSERT_BEFORE_RCU(el, prev_el, entry);
|
QLIST_INSERT_BEFORE_RCU(el, prev_el, entry);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -181,8 +180,11 @@ static void *rcu_q_updater(void *arg)
|
|||||||
synchronize_rcu();
|
synchronize_rcu();
|
||||||
}
|
}
|
||||||
synchronize_rcu();
|
synchronize_rcu();
|
||||||
atomic_add(&n_updates, n_updates_local);
|
qemu_mutex_lock(&counts_mutex);
|
||||||
atomic_add(&n_nodes_removed, n_removed_local);
|
n_nodes += n_nodes_local;
|
||||||
|
n_updates += n_updates_local;
|
||||||
|
n_nodes_removed += n_removed_local;
|
||||||
|
qemu_mutex_unlock(&counts_mutex);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -194,10 +196,11 @@ static void rcu_qtest_init(void)
|
|||||||
srand(time(0));
|
srand(time(0));
|
||||||
for (i = 0; i < RCU_Q_LEN; i++) {
|
for (i = 0; i < RCU_Q_LEN; i++) {
|
||||||
new_el = g_new(struct list_element, 1);
|
new_el = g_new(struct list_element, 1);
|
||||||
new_el->val = i;
|
|
||||||
QLIST_INSERT_HEAD_RCU(&Q_list_head, new_el, entry);
|
QLIST_INSERT_HEAD_RCU(&Q_list_head, new_el, entry);
|
||||||
}
|
}
|
||||||
atomic_add(&n_nodes, RCU_Q_LEN);
|
qemu_mutex_lock(&counts_mutex);
|
||||||
|
n_nodes += RCU_Q_LEN;
|
||||||
|
qemu_mutex_unlock(&counts_mutex);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void rcu_qtest_run(int duration, int nreaders)
|
static void rcu_qtest_run(int duration, int nreaders)
|
||||||
@ -233,7 +236,9 @@ static void rcu_qtest(const char *test, int duration, int nreaders)
|
|||||||
call_rcu1(&prev_el->rcu, reclaim_list_el);
|
call_rcu1(&prev_el->rcu, reclaim_list_el);
|
||||||
n_removed_local++;
|
n_removed_local++;
|
||||||
}
|
}
|
||||||
atomic_add(&n_nodes_removed, n_removed_local);
|
qemu_mutex_lock(&counts_mutex);
|
||||||
|
n_nodes_removed += n_removed_local;
|
||||||
|
qemu_mutex_unlock(&counts_mutex);
|
||||||
synchronize_rcu();
|
synchronize_rcu();
|
||||||
while (n_nodes_removed > n_reclaims) {
|
while (n_nodes_removed > n_reclaims) {
|
||||||
g_usleep(100);
|
g_usleep(100);
|
||||||
@ -277,6 +282,7 @@ int main(int argc, char *argv[])
|
|||||||
{
|
{
|
||||||
int duration = 0, readers = 0;
|
int duration = 0, readers = 0;
|
||||||
|
|
||||||
|
qemu_mutex_init(&counts_mutex);
|
||||||
if (argc >= 2) {
|
if (argc >= 2) {
|
||||||
if (argv[1][0] == '-') {
|
if (argv[1][0] == '-') {
|
||||||
g_test_init(&argc, &argv, NULL);
|
g_test_init(&argc, &argv, NULL);
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
/*
|
/*
|
||||||
* QTest testcase for USB OHCI controller
|
* QTest testcase for USB OHCI controller
|
||||||
*
|
*
|
||||||
* Copyright (c) 2014 HUAWEI TECHNOLOGIES CO.,LTD.
|
* Copyright (c) 2014 HUAWEI TECHNOLOGIES CO., LTD.
|
||||||
*
|
*
|
||||||
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
||||||
* See the COPYING file in the top-level directory.
|
* See the COPYING file in the top-level directory.
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
/*
|
/*
|
||||||
* QTest testcase for USB UHCI controller
|
* QTest testcase for USB UHCI controller
|
||||||
*
|
*
|
||||||
* Copyright (c) 2014 HUAWEI TECHNOLOGIES CO.,LTD.
|
* Copyright (c) 2014 HUAWEI TECHNOLOGIES CO., LTD.
|
||||||
*
|
*
|
||||||
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
||||||
* See the COPYING file in the top-level directory.
|
* See the COPYING file in the top-level directory.
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
/*
|
/*
|
||||||
* QTest testcase for USB xHCI controller
|
* QTest testcase for USB xHCI controller
|
||||||
*
|
*
|
||||||
* Copyright (c) 2014 HUAWEI TECHNOLOGIES CO.,LTD.
|
* Copyright (c) 2014 HUAWEI TECHNOLOGIES CO., LTD.
|
||||||
*
|
*
|
||||||
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
||||||
* See the COPYING file in the top-level directory.
|
* See the COPYING file in the top-level directory.
|
||||||
|
Loading…
Reference in New Issue
Block a user