ppcmmu: Add 64-bit accesses to I/O

Also add an exception for unaligned 64 bit. 64 bit accesses require dword alignment.
This commit is contained in:
joevt 2023-06-14 21:10:34 -07:00
parent 814260f0b6
commit 16123dea45

View File

@ -987,9 +987,10 @@ void mmu_print_regs()
} }
// Forward declarations. // Forward declarations.
static uint32_t read_unaligned(uint32_t guest_va, uint8_t *host_va, uint32_t size); template <class T>
static void write_unaligned(uint32_t guest_va, uint8_t *host_va, uint32_t value, static T read_unaligned(uint32_t guest_va, uint8_t *host_va);
uint32_t size); template <class T>
static void write_unaligned(uint32_t guest_va, uint8_t *host_va, T value);
template <class T> template <class T>
inline T mmu_read_vmem(uint32_t guest_va) inline T mmu_read_vmem(uint32_t guest_va)
@ -1034,11 +1035,28 @@ inline T mmu_read_vmem(uint32_t guest_va)
#ifdef MMU_PROFILING #ifdef MMU_PROFILING
iomem_reads_total++; iomem_reads_total++;
#endif #endif
return ( if (sizeof(T) == 8) {
tlb2_entry->reg_desc->devobj->read(tlb2_entry->reg_desc->start, if (guest_va & 3) {
guest_va - tlb2_entry->reg_desc->start, ppc_exception_handler(Except_Type::EXC_ALIGNMENT, 0x0);
sizeof(T)) }
); {
return (
((T)tlb2_entry->reg_desc->devobj->read(tlb2_entry->reg_desc->start,
guest_va - tlb2_entry->reg_desc->start,
4) << 32) |
tlb2_entry->reg_desc->devobj->read(tlb2_entry->reg_desc->start,
guest_va + 4 - tlb2_entry->reg_desc->start,
4)
);
}
}
else {
return (
tlb2_entry->reg_desc->devobj->read(tlb2_entry->reg_desc->start,
guest_va - tlb2_entry->reg_desc->start,
sizeof(T))
);
}
} }
} }
@ -1048,7 +1066,7 @@ inline T mmu_read_vmem(uint32_t guest_va)
// handle unaligned memory accesses // handle unaligned memory accesses
if (sizeof(T) > 1 && (guest_va & (sizeof(T) - 1))) { if (sizeof(T) > 1 && (guest_va & (sizeof(T) - 1))) {
return read_unaligned(guest_va, host_va, sizeof(T)); return read_unaligned<T>(guest_va, host_va);
} }
// handle aligned memory accesses // handle aligned memory accesses
@ -1143,9 +1161,24 @@ inline void mmu_write_vmem(uint32_t guest_va, T value)
#ifdef MMU_PROFILING #ifdef MMU_PROFILING
iomem_writes_total++; iomem_writes_total++;
#endif #endif
tlb2_entry->reg_desc->devobj->write(tlb2_entry->reg_desc->start, if (sizeof(T) == 8) {
guest_va - tlb2_entry->reg_desc->start, if (guest_va & 3) {
value, sizeof(T)); ppc_exception_handler(Except_Type::EXC_ALIGNMENT, 0x0);
}
{
tlb2_entry->reg_desc->devobj->write(tlb2_entry->reg_desc->start,
guest_va - tlb2_entry->reg_desc->start,
value >> 32, 4);
tlb2_entry->reg_desc->devobj->write(tlb2_entry->reg_desc->start,
guest_va + 4 - tlb2_entry->reg_desc->start,
(uint32_t)value, 4);
}
}
else {
tlb2_entry->reg_desc->devobj->write(tlb2_entry->reg_desc->start,
guest_va - tlb2_entry->reg_desc->start,
value, sizeof(T));
}
return; return;
} }
} }
@ -1156,7 +1189,7 @@ inline void mmu_write_vmem(uint32_t guest_va, T value)
// handle unaligned memory accesses // handle unaligned memory accesses
if (sizeof(T) > 1 && (guest_va & (sizeof(T) - 1))) { if (sizeof(T) > 1 && (guest_va & (sizeof(T) - 1))) {
write_unaligned(guest_va, host_va, value, sizeof(T)); write_unaligned<T>(guest_va, host_va, value);
return; return;
} }
@ -1183,42 +1216,53 @@ template void mmu_write_vmem<uint16_t>(uint32_t guest_va, uint16_t value);
template void mmu_write_vmem<uint32_t>(uint32_t guest_va, uint32_t value); template void mmu_write_vmem<uint32_t>(uint32_t guest_va, uint32_t value);
template void mmu_write_vmem<uint64_t>(uint32_t guest_va, uint64_t value); template void mmu_write_vmem<uint64_t>(uint32_t guest_va, uint64_t value);
static uint32_t read_unaligned(uint32_t guest_va, uint8_t *host_va, uint32_t size) template <class T>
static T read_unaligned(uint32_t guest_va, uint8_t *host_va)
{ {
uint32_t result = 0; T result = 0;
// is it a misaligned cross-page read? // is it a misaligned cross-page read?
if (((guest_va & 0xFFF) + size) > 0x1000) { if (((guest_va & 0xFFF) + sizeof(T)) > 0x1000) {
#ifdef MMU_PROFILING #ifdef MMU_PROFILING
unaligned_crossp_r++; unaligned_crossp_r++;
#endif #endif
// Break such a memory access into multiple, bytewise accesses. // Break such a memory access into multiple, bytewise accesses.
// Because such accesses suffer a performance penalty, they will be // Because such accesses suffer a performance penalty, they will be
// presumably very rare so don't waste time optimizing the code below. // presumably very rare so don't waste time optimizing the code below.
for (int i = 0; i < size; guest_va++, i++) { for (int i = 0; i < sizeof(T); guest_va++, i++) {
result = (result << 8) | mmu_read_vmem<uint8_t>(guest_va); result = (result << 8) | mmu_read_vmem<uint8_t>(guest_va);
} }
} else { } else {
#ifdef MMU_PROFILING #ifdef MMU_PROFILING
unaligned_reads++; unaligned_reads++;
#endif #endif
switch(size) { switch(sizeof(T)) {
case 1:
return *host_va;
case 2: case 2:
return READ_WORD_BE_U(host_va); return READ_WORD_BE_U(host_va);
case 4: case 4:
return READ_DWORD_BE_U(host_va); return READ_DWORD_BE_U(host_va);
case 8: // FIXME: should we raise alignment exception here? case 8:
if (guest_va & 3) {
ppc_exception_handler(Except_Type::EXC_ALIGNMENT, 0x0);
}
return READ_QWORD_BE_U(host_va); return READ_QWORD_BE_U(host_va);
} }
} }
return result; return result;
} }
static void write_unaligned(uint32_t guest_va, uint8_t *host_va, uint32_t value, // explicitely instantiate all required read_unaligned variants
uint32_t size) template uint16_t read_unaligned<uint16_t>(uint32_t guest_va, uint8_t *host_va);
template uint32_t read_unaligned<uint32_t>(uint32_t guest_va, uint8_t *host_va);
template uint64_t read_unaligned<uint64_t>(uint32_t guest_va, uint8_t *host_va);
template <class T>
static void write_unaligned(uint32_t guest_va, uint8_t *host_va, T value)
{ {
// is it a misaligned cross-page write? // is it a misaligned cross-page write?
if (((guest_va & 0xFFF) + size) > 0x1000) { if (((guest_va & 0xFFF) + sizeof(T)) > 0x1000) {
#ifdef MMU_PROFILING #ifdef MMU_PROFILING
unaligned_crossp_w++; unaligned_crossp_w++;
#endif #endif
@ -1226,29 +1270,41 @@ static void write_unaligned(uint32_t guest_va, uint8_t *host_va, uint32_t value,
// Because such accesses suffer a performance penalty, they will be // Because such accesses suffer a performance penalty, they will be
// presumably very rare so don't waste time optimizing the code below. // presumably very rare so don't waste time optimizing the code below.
uint32_t shift = (size - 1) * 8; uint32_t shift = (sizeof(T) - 1) * 8;
for (int i = 0; i < size; shift -= 8, guest_va++, i++) { for (int i = 0; i < sizeof(T); shift -= 8, guest_va++, i++) {
mmu_write_vmem<uint8_t>(guest_va, (value >> shift) & 0xFF); mmu_write_vmem<uint8_t>(guest_va, (value >> shift) & 0xFF);
} }
} else { } else {
#ifdef MMU_PROFILING #ifdef MMU_PROFILING
unaligned_writes++; unaligned_writes++;
#endif #endif
switch(size) { switch(sizeof(T)) {
case 1:
*host_va = value;
break;
case 2: case 2:
WRITE_WORD_BE_U(host_va, value); WRITE_WORD_BE_U(host_va, value);
break; break;
case 4: case 4:
WRITE_DWORD_BE_U(host_va, value); WRITE_DWORD_BE_U(host_va, value);
break; break;
case 8: // FIXME: should we raise alignment exception here? case 8:
if (guest_va & 3) {
ppc_exception_handler(Except_Type::EXC_ALIGNMENT, 0x0);
}
WRITE_QWORD_BE_U(host_va, value); WRITE_QWORD_BE_U(host_va, value);
break; break;
} }
} }
} }
// explicitely instantiate all required write_unaligned variants
template void write_unaligned<uint16_t>(uint32_t guest_va, uint8_t *host_va, uint16_t value);
template void write_unaligned<uint32_t>(uint32_t guest_va, uint8_t *host_va, uint32_t value);
template void write_unaligned<uint64_t>(uint32_t guest_va, uint8_t *host_va, uint64_t value);
/* MMU profiling. */ /* MMU profiling. */
#ifdef MMU_PROFILING #ifdef MMU_PROFILING
@ -1877,7 +1933,7 @@ uint8_t* quickinstruction_translate(uint32_t addr) {
return real_addr; return real_addr;
} }
#endif #endif // Old and slow code
uint64_t mem_read_dbg(uint32_t virt_addr, uint32_t size) { uint64_t mem_read_dbg(uint32_t virt_addr, uint32_t size) {
uint32_t save_dsisr, save_dar; uint32_t save_dsisr, save_dar;