Fix cpu_physical_memory_rw() for 64-bit I/O accesses

KVM uses cpu_physical_memory_rw() to access the I/O devices. When a
read or write with a length of 8-byte is requested, it is split into 2
4-byte accesses.

This has been broken in revision 5849. After this revision, only the
first 4 bytes are actually read/write to the device, as the target
address is changed, so on the next iteration of the loop the next 4
bytes are actually read/written elsewhere (in the RAM for the graphic
card).

This patch fixes screen corruption (and most probably data corruption)
with FreeBSD/amd64. Bug #2556746 in KVM bugzilla.

Signed-off-by: Aurelien Jarno <aurelien@aurel32.net>

git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@6628 c046a42c-6fe2-441c-8c8c-71466251a162
This commit is contained in:
aurel32 2009-02-18 21:37:17 +00:00
parent 978dd63540
commit 6c2934db94

26
exec.c
View File

@ -2955,25 +2955,26 @@ void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
if (is_write) { if (is_write) {
if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) { if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
target_phys_addr_t addr1 = addr;
io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
if (p) if (p)
addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset; addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
/* XXX: could force cpu_single_env to NULL to avoid /* XXX: could force cpu_single_env to NULL to avoid
potential bugs */ potential bugs */
if (l >= 4 && ((addr & 3) == 0)) { if (l >= 4 && ((addr1 & 3) == 0)) {
/* 32 bit write access */ /* 32 bit write access */
val = ldl_p(buf); val = ldl_p(buf);
io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val); io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
l = 4; l = 4;
} else if (l >= 2 && ((addr & 1) == 0)) { } else if (l >= 2 && ((addr1 & 1) == 0)) {
/* 16 bit write access */ /* 16 bit write access */
val = lduw_p(buf); val = lduw_p(buf);
io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val); io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
l = 2; l = 2;
} else { } else {
/* 8 bit write access */ /* 8 bit write access */
val = ldub_p(buf); val = ldub_p(buf);
io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val); io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
l = 1; l = 1;
} }
} else { } else {
@ -2993,23 +2994,24 @@ void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
} else { } else {
if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
!(pd & IO_MEM_ROMD)) { !(pd & IO_MEM_ROMD)) {
target_phys_addr_t addr1 = addr;
/* I/O case */ /* I/O case */
io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
if (p) if (p)
addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset; addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
if (l >= 4 && ((addr & 3) == 0)) { if (l >= 4 && ((addr1 & 3) == 0)) {
/* 32 bit read access */ /* 32 bit read access */
val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr); val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
stl_p(buf, val); stl_p(buf, val);
l = 4; l = 4;
} else if (l >= 2 && ((addr & 1) == 0)) { } else if (l >= 2 && ((addr1 & 1) == 0)) {
/* 16 bit read access */ /* 16 bit read access */
val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr); val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
stw_p(buf, val); stw_p(buf, val);
l = 2; l = 2;
} else { } else {
/* 8 bit read access */ /* 8 bit read access */
val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr); val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
stb_p(buf, val); stb_p(buf, val);
l = 1; l = 1;
} }