diff --git a/cpu/ppc/ppcmmu.cpp b/cpu/ppc/ppcmmu.cpp index 359d354..4f6924e 100644 --- a/cpu/ppc/ppcmmu.cpp +++ b/cpu/ppc/ppcmmu.cpp @@ -1022,9 +1022,10 @@ void mmu_print_regs() } // Forward declarations. -static uint32_t read_unaligned(uint32_t guest_va, uint8_t *host_va, uint32_t size); -static void write_unaligned(uint32_t guest_va, uint8_t *host_va, uint32_t value, - uint32_t size); +template +static T read_unaligned(uint32_t guest_va, uint8_t *host_va); +template +static void write_unaligned(uint32_t guest_va, uint8_t *host_va, T value); template inline T mmu_read_vmem(uint32_t guest_va) @@ -1069,11 +1070,28 @@ inline T mmu_read_vmem(uint32_t guest_va) #ifdef MMU_PROFILING iomem_reads_total++; #endif - return ( - tlb2_entry->rgn_desc->devobj->read(tlb2_entry->rgn_desc->start, - guest_va - tlb2_entry->dev_base_va, - sizeof(T)) - ); + if (sizeof(T) == 8) { + if (guest_va & 3) { + ppc_exception_handler(Except_Type::EXC_ALIGNMENT, 0x0); + } + { + return ( + ((T)tlb2_entry->rgn_desc->devobj->read(tlb2_entry->rgn_desc->start, + guest_va - tlb2_entry->dev_base_va, + 4) << 32) | + tlb2_entry->rgn_desc->devobj->read(tlb2_entry->rgn_desc->start, + guest_va + 4 - tlb2_entry->dev_base_va, + 4) + ); + } + } + else { + return ( + tlb2_entry->rgn_desc->devobj->read(tlb2_entry->rgn_desc->start, + guest_va - tlb2_entry->dev_base_va, + sizeof(T)) + ); + } } } @@ -1083,7 +1101,7 @@ inline T mmu_read_vmem(uint32_t guest_va) // handle unaligned memory accesses if (sizeof(T) > 1 && (guest_va & (sizeof(T) - 1))) { - return read_unaligned(guest_va, host_va, sizeof(T)); + return read_unaligned(guest_va, host_va); } // handle aligned memory accesses @@ -1178,9 +1196,24 @@ inline void mmu_write_vmem(uint32_t guest_va, T value) #ifdef MMU_PROFILING iomem_writes_total++; #endif - tlb2_entry->rgn_desc->devobj->write(tlb2_entry->rgn_desc->start, - guest_va - tlb2_entry->dev_base_va, - value, sizeof(T)); + if (sizeof(T) == 8) { + if (guest_va & 3) { + ppc_exception_handler(Except_Type::EXC_ALIGNMENT, 0x0); + } + { + tlb2_entry->rgn_desc->devobj->write(tlb2_entry->rgn_desc->start, + guest_va - tlb2_entry->dev_base_va, + value >> 32, 4); + tlb2_entry->rgn_desc->devobj->write(tlb2_entry->rgn_desc->start, + guest_va + 4 - tlb2_entry->dev_base_va, + (uint32_t)value, 4); + } + } + else { + tlb2_entry->rgn_desc->devobj->write(tlb2_entry->rgn_desc->start, + guest_va - tlb2_entry->dev_base_va, + value, sizeof(T)); + } return; } } @@ -1191,7 +1224,7 @@ inline void mmu_write_vmem(uint32_t guest_va, T value) // handle unaligned memory accesses if (sizeof(T) > 1 && (guest_va & (sizeof(T) - 1))) { - write_unaligned(guest_va, host_va, value, sizeof(T)); + write_unaligned(guest_va, host_va, value); return; } @@ -1218,42 +1251,53 @@ template void mmu_write_vmem(uint32_t guest_va, uint16_t value); template void mmu_write_vmem(uint32_t guest_va, uint32_t value); template void mmu_write_vmem(uint32_t guest_va, uint64_t value); -static uint32_t read_unaligned(uint32_t guest_va, uint8_t *host_va, uint32_t size) +template +static T read_unaligned(uint32_t guest_va, uint8_t *host_va) { - uint32_t result = 0; + T result = 0; // is it a misaligned cross-page read? - if (((guest_va & 0xFFF) + size) > 0x1000) { + if (((guest_va & 0xFFF) + sizeof(T)) > 0x1000) { #ifdef MMU_PROFILING unaligned_crossp_r++; #endif // Break such a memory access into multiple, bytewise accesses. // Because such accesses suffer a performance penalty, they will be // presumably very rare so don't waste time optimizing the code below. - for (int i = 0; i < size; guest_va++, i++) { + for (int i = 0; i < sizeof(T); guest_va++, i++) { result = (result << 8) | mmu_read_vmem(guest_va); } } else { #ifdef MMU_PROFILING unaligned_reads++; #endif - switch(size) { + switch(sizeof(T)) { + case 1: + return *host_va; case 2: return READ_WORD_BE_U(host_va); case 4: return READ_DWORD_BE_U(host_va); - case 8: // FIXME: should we raise alignment exception here? + case 8: + if (guest_va & 3) { + ppc_exception_handler(Except_Type::EXC_ALIGNMENT, 0x0); + } return READ_QWORD_BE_U(host_va); } } return result; } -static void write_unaligned(uint32_t guest_va, uint8_t *host_va, uint32_t value, - uint32_t size) +// explicitely instantiate all required read_unaligned variants +template uint16_t read_unaligned(uint32_t guest_va, uint8_t *host_va); +template uint32_t read_unaligned(uint32_t guest_va, uint8_t *host_va); +template uint64_t read_unaligned(uint32_t guest_va, uint8_t *host_va); + +template +static void write_unaligned(uint32_t guest_va, uint8_t *host_va, T value) { // is it a misaligned cross-page write? - if (((guest_va & 0xFFF) + size) > 0x1000) { + if (((guest_va & 0xFFF) + sizeof(T)) > 0x1000) { #ifdef MMU_PROFILING unaligned_crossp_w++; #endif @@ -1261,29 +1305,41 @@ static void write_unaligned(uint32_t guest_va, uint8_t *host_va, uint32_t value, // Because such accesses suffer a performance penalty, they will be // presumably very rare so don't waste time optimizing the code below. - uint32_t shift = (size - 1) * 8; + uint32_t shift = (sizeof(T) - 1) * 8; - for (int i = 0; i < size; shift -= 8, guest_va++, i++) { + for (int i = 0; i < sizeof(T); shift -= 8, guest_va++, i++) { mmu_write_vmem(guest_va, (value >> shift) & 0xFF); } } else { #ifdef MMU_PROFILING unaligned_writes++; #endif - switch(size) { + switch(sizeof(T)) { + case 1: + *host_va = value; + break; case 2: WRITE_WORD_BE_U(host_va, value); break; case 4: WRITE_DWORD_BE_U(host_va, value); break; - case 8: // FIXME: should we raise alignment exception here? + case 8: + if (guest_va & 3) { + ppc_exception_handler(Except_Type::EXC_ALIGNMENT, 0x0); + } WRITE_QWORD_BE_U(host_va, value); break; } } } +// explicitely instantiate all required write_unaligned variants +template void write_unaligned(uint32_t guest_va, uint8_t *host_va, uint16_t value); +template void write_unaligned(uint32_t guest_va, uint8_t *host_va, uint32_t value); +template void write_unaligned(uint32_t guest_va, uint8_t *host_va, uint64_t value); + + /* MMU profiling. */ #ifdef MMU_PROFILING @@ -1912,7 +1968,7 @@ uint8_t* quickinstruction_translate(uint32_t addr) { return real_addr; } -#endif +#endif // Old and slow code uint64_t mem_read_dbg(uint32_t virt_addr, uint32_t size) { uint32_t save_dsisr, save_dar;