ppcmmu: Remove old and slow code.

This commit is contained in:
joevt 2024-01-25 23:36:16 -08:00 committed by dingusdev
parent 0f8a464157
commit c9d4cc3321
1 changed files with 0 additions and 506 deletions

View File

@ -1408,512 +1408,6 @@ public:
};
#endif
//=================== Old and slow code. Kept for reference =================
#if 0
template <class T, const bool is_aligned>
static inline T read_phys_mem(AddressMapEntry *mru_rgn, uint32_t addr)
{
if (addr < mru_rgn->start || (addr + sizeof(T)) > mru_rgn->end) {
AddressMapEntry* entry = mem_ctrl_instance->find_range(addr);
if (entry) {
*mru_rgn = *entry;
} else {
LOG_F(ERROR, "Read from unmapped memory at 0x%08X!", addr);
return (-1ULL ? sizeof(T) == 8 : -1UL);
}
}
if (mru_rgn->type & (RT_ROM | RT_RAM)) {
#ifdef MMU_PROFILING
dmem_reads_total++;
#endif
switch(sizeof(T)) {
case 1:
return *(mru_rgn->mem_ptr + (addr - mru_rgn->start));
case 2:
if (is_aligned) {
return READ_WORD_BE_A(mru_rgn->mem_ptr + (addr - mru_rgn->start));
} else {
return READ_WORD_BE_U(mru_rgn->mem_ptr + (addr - mru_rgn->start));
}
case 4:
if (is_aligned) {
return READ_DWORD_BE_A(mru_rgn->mem_ptr + (addr - mru_rgn->start));
} else {
return READ_DWORD_BE_U(mru_rgn->mem_ptr + (addr - mru_rgn->start));
}
case 8:
if (is_aligned) {
return READ_QWORD_BE_A(mru_rgn->mem_ptr + (addr - mru_rgn->start));
}
default:
LOG_F(ERROR, "READ_PHYS: invalid size %lu passed", sizeof(T));
return (-1ULL ? sizeof(T) == 8 : -1UL);
}
} else if (mru_rgn->type & RT_MMIO) {
#ifdef MMU_PROFILING
iomem_reads_total++;
#endif
return (mru_rgn->devobj->read(mru_rgn->start,
addr - mru_rgn->start, sizeof(T)));
} else {
LOG_F(ERROR, "READ_PHYS: invalid region type!");
return (-1ULL ? sizeof(T) == 8 : -1UL);
}
}
template <class T, const bool is_aligned>
static inline void write_phys_mem(AddressMapEntry *mru_rgn, uint32_t addr, T value)
{
if (addr < mru_rgn->start || (addr + sizeof(T)) > mru_rgn->end) {
AddressMapEntry* entry = mem_ctrl_instance->find_range(addr);
if (entry) {
*mru_rgn = *entry;
} else {
LOG_F(ERROR, "Write to unmapped memory at 0x%08X!", addr);
return;
}
}
if (mru_rgn->type & RT_RAM) {
#ifdef MMU_PROFILING
dmem_writes_total++;
#endif
switch(sizeof(T)) {
case 1:
*(mru_rgn->mem_ptr + (addr - mru_rgn->start)) = value;
break;
case 2:
if (is_aligned) {
WRITE_WORD_BE_A(mru_rgn->mem_ptr + (addr - mru_rgn->start), value);
} else {
WRITE_WORD_BE_U(mru_rgn->mem_ptr + (addr - mru_rgn->start), value);
}
break;
case 4:
if (is_aligned) {
WRITE_DWORD_BE_A(mru_rgn->mem_ptr + (addr - mru_rgn->start), value);
} else {
WRITE_DWORD_BE_U(mru_rgn->mem_ptr + (addr - mru_rgn->start), value);
}
break;
case 8:
if (is_aligned) {
WRITE_QWORD_BE_A(mru_rgn->mem_ptr + (addr - mru_rgn->start), value);
}
break;
default:
LOG_F(ERROR, "WRITE_PHYS: invalid size %lu passed", sizeof(T));
return;
}
} else if (mru_rgn->type & RT_MMIO) {
#ifdef MMU_PROFILING
iomem_writes_total++;
#endif
mru_rgn->devobj->write(mru_rgn->start, addr - mru_rgn->start, value,
sizeof(T));
} else {
LOG_F(ERROR, "WRITE_PHYS: invalid region type!");
}
}
/** PowerPC-style MMU data address translation. */
static uint32_t ppc_mmu_addr_translate(uint32_t la, int is_write)
{
uint32_t pa; /* translated physical address */
bool bat_hit = false;
unsigned msr_pr = !!(ppc_state.msr & MSR::PR);
// Format: %XY
// X - supervisor access bit, Y - problem/user access bit
// Those bits are mutually exclusive
unsigned access_bits = ((msr_pr ^ 1) << 1) | msr_pr;
for (int bat_index = 0; bat_index < 4; bat_index++) {
PPC_BAT_entry* bat_entry = &dbat_array[bat_index];
if ((bat_entry->access & access_bits) && ((la & bat_entry->hi_mask) == bat_entry->bepi)) {
bat_hit = true;
#ifdef MMU_PROFILING
bat_transl_total++;
#endif
if (!bat_entry->prot || ((bat_entry->prot & 1) && is_write)) {
ppc_state.spr[SPR::DSISR] = 0x08000000 | (is_write << 25);
ppc_state.spr[SPR::DAR] = la;
mmu_exception_handler(Except_Type::EXC_DSI, 0);
}
// logical to physical translation
pa = bat_entry->phys_hi | (la & ~bat_entry->hi_mask);
break;
}
}
/* page address translation */
if (!bat_hit) {
PATResult pat_res = page_address_translation(la, false, msr_pr, is_write);
pa = pat_res.phys;
#ifdef MMU_PROFILING
ptab_transl_total++;
#endif
}
return pa;
}
static void mem_write_unaligned(uint32_t addr, uint32_t value, uint32_t size) {
#ifdef MMU_DEBUG
LOG_F(WARNING, "Attempt to write unaligned %d bytes to 0x%08X", size, addr);
#endif
if (((addr & 0xFFF) + size) > 0x1000) {
// Special case: unaligned cross-page writes
#ifdef MMU_PROFILING
unaligned_crossp_w++;
#endif
uint32_t phys_addr;
uint32_t shift = (size - 1) * 8;
// Break misaligned memory accesses into multiple, bytewise accesses
// and retranslate on page boundary.
// Because such accesses suffer a performance penalty, they will be
// presumably very rare so don't care much about performance.
for (int i = 0; i < size; shift -= 8, addr++, phys_addr++, i++) {
if ((ppc_state.msr & MSR::DR) && (!i || !(addr & 0xFFF))) {
phys_addr = ppc_mmu_addr_translate(addr, 1);
}
write_phys_mem<uint8_t, false>(&last_write_area, phys_addr,
(value >> shift) & 0xFF);
}
} else {
// data address translation if enabled
if (ppc_state.msr & MSR::DR) {
addr = ppc_mmu_addr_translate(addr, 1);
}
if (size == 2) {
write_phys_mem<uint16_t, false>(&last_write_area, addr, value);
} else {
write_phys_mem<uint32_t, false>(&last_write_area, addr, value);
}
#ifdef MMU_PROFILING
unaligned_writes++;
#endif
}
}
static inline uint64_t tlb_translate_addr(uint32_t guest_va)
{
TLBEntry *tlb1_entry, *tlb2_entry;
const uint32_t tag = guest_va & ~0xFFFUL;
// look up address in the primary TLB
tlb1_entry = &pCurDTLB1[(guest_va >> PAGE_SIZE_BITS) & tlb_size_mask];
if (tlb1_entry->tag == tag) { // primary TLB hit -> fast path
return tlb1_entry->host_va_offs_r + guest_va;
} else { // primary TLB miss -> look up address in the secondary TLB
tlb2_entry = &pCurDTLB2[((guest_va >> PAGE_SIZE_BITS) & tlb_size_mask) * TLB2_WAYS];
if (tlb2_entry->tag == tag) {
// update LRU bits
tlb2_entry[0].lru_bits = 0x3;
tlb2_entry[1].lru_bits = 0x2;
tlb2_entry[2].lru_bits &= 0x1;
tlb2_entry[3].lru_bits &= 0x1;
} else if (tlb2_entry[1].tag == tag) {
// update LRU bits
tlb2_entry[0].lru_bits = 0x2;
tlb2_entry[1].lru_bits = 0x3;
tlb2_entry[2].lru_bits &= 0x1;
tlb2_entry[3].lru_bits &= 0x1;
tlb2_entry = &tlb2_entry[1];
} else if (tlb2_entry[2].tag == tag) {
// update LRU bits
tlb2_entry[0].lru_bits &= 0x1;
tlb2_entry[1].lru_bits &= 0x1;
tlb2_entry[2].lru_bits = 0x3;
tlb2_entry[3].lru_bits = 0x2;
tlb2_entry = &tlb2_entry[2];
} else if (tlb2_entry[3].tag == tag) {
// update LRU bits
tlb2_entry[0].lru_bits &= 0x1;
tlb2_entry[1].lru_bits &= 0x1;
tlb2_entry[2].lru_bits = 0x2;
tlb2_entry[3].lru_bits = 0x3;
tlb2_entry = &tlb2_entry[3];
} else { // secondary TLB miss ->
// perform full address translation and refill the secondary TLB
tlb2_entry = dtlb2_refill(guest_va, 0);
}
if (tlb2_entry->flags & TLBFlags::PAGE_MEM) { // is it a real memory region?
// refill the primary TLB
tlb1_entry->tag = tag;
tlb1_entry->flags = tlb2_entry->flags;
tlb1_entry->host_va_offs_r = tlb2_entry->host_va_offs_r;
tlb1_entry->phys_tag = tlb2_entry->phys_tag;
return tlb1_entry->host_va_offs_r + guest_va;
} else { // an attempt to access a memory-mapped device
return guest_va - tlb2_entry->dev_base_va;
}
}
}
static uint32_t mem_grab_unaligned(uint32_t addr, uint32_t size) {
uint32_t ret = 0;
#ifdef MMU_DEBUG
LOG_F(WARNING, "Attempt to read unaligned %d bytes from 0x%08X", size, addr);
#endif
if (((addr & 0xFFF) + size) > 0x1000) {
// Special case: misaligned cross-page reads
#ifdef MMU_PROFILING
unaligned_crossp_r++;
#endif
uint32_t phys_addr;
uint32_t res = 0;
// Break misaligned memory accesses into multiple, bytewise accesses
// and retranslate on page boundary.
// Because such accesses suffer a performance penalty, they will be
// presumably very rare so don't care much about performance.
for (int i = 0; i < size; addr++, phys_addr++, i++) {
tlb_translate_addr(addr);
if ((ppc_state.msr & MSR::DR) && (!i || !(addr & 0xFFF))) {
phys_addr = ppc_mmu_addr_translate(addr, 0);
}
res = (res << 8) |
read_phys_mem<uint8_t, false>(&last_read_area, phys_addr);
}
return res;
} else {
/* data address translation if enabled */
if (ppc_state.msr & MSR::DR) {
addr = ppc_mmu_addr_translate(addr, 0);
}
if (size == 2) {
return read_phys_mem<uint16_t, false>(&last_read_area, addr);
} else {
return read_phys_mem<uint32_t, false>(&last_read_area, addr);
}
#ifdef MMU_PROFILING
unaligned_reads++;
#endif
}
return ret;
}
void mem_write_byte(uint32_t addr, uint8_t value) {
mmu_write_vmem<uint8_t>(addr, value);
/* data address translation if enabled */
if (ppc_state.msr & MSR::DR) {
addr = ppc_mmu_addr_translate(addr, 1);
}
write_phys_mem<uint8_t, true>(&last_write_area, addr, value);
}
void mem_write_word(uint32_t addr, uint16_t value) {
mmu_write_vmem<uint16_t>(addr, value);
if (addr & 1) {
mem_write_unaligned(addr, value, 2);
return;
}
/* data address translation if enabled */
if (ppc_state.msr & MSR::DR) {
addr = ppc_mmu_addr_translate(addr, 1);
}
write_phys_mem<uint16_t, true>(&last_write_area, addr, value);
}
void mem_write_dword(uint32_t addr, uint32_t value) {
mmu_write_vmem<uint32_t>(addr, value);
if (addr & 3) {
mem_write_unaligned(addr, value, 4);
return;
}
/* data address translation if enabled */
if (ppc_state.msr & MSR::DR) {
addr = ppc_mmu_addr_translate(addr, 1);
}
write_phys_mem<uint32_t, true>(&last_write_area, addr, value);
}
void mem_write_qword(uint32_t addr, uint64_t value) {
mmu_write_vmem<uint64_t>(addr, value);
if (addr & 7) {
ABORT_F("SOS! Attempt to write unaligned QWORD to 0x%08X\n", addr);
}
/* data address translation if enabled */
if (ppc_state.msr & MSR::DR) {
addr = ppc_mmu_addr_translate(addr, 1);
}
write_phys_mem<uint64_t, true>(&last_write_area, addr, value);
}
/** Grab a value from memory into a register */
uint8_t mem_grab_byte(uint32_t addr) {
tlb_translate_addr(addr);
/* data address translation if enabled */
if (ppc_state.msr & MSR::DR) {
addr = ppc_mmu_addr_translate(addr, 0);
}
return read_phys_mem<uint8_t, true>(&last_read_area, addr);
}
uint16_t mem_grab_word(uint32_t addr) {
tlb_translate_addr(addr);
if (addr & 1) {
return mem_grab_unaligned(addr, 2);
}
/* data address translation if enabled */
if (ppc_state.msr & MSR::DR) {
addr = ppc_mmu_addr_translate(addr, 0);
}
return read_phys_mem<uint16_t, true>(&last_read_area, addr);
}
uint32_t mem_grab_dword(uint32_t addr) {
tlb_translate_addr(addr);
if (addr & 3) {
return mem_grab_unaligned(addr, 4);
}
/* data address translation if enabled */
if (ppc_state.msr & MSR::DR) {
addr = ppc_mmu_addr_translate(addr, 0);
}
return read_phys_mem<uint32_t, true>(&last_read_area, addr);
}
uint64_t mem_grab_qword(uint32_t addr) {
tlb_translate_addr(addr);
if (addr & 7) {
ABORT_F("SOS! Attempt to read unaligned QWORD at 0x%08X\n", addr);
}
/* data address translation if enabled */
if (ppc_state.msr & MSR::DR) {
addr = ppc_mmu_addr_translate(addr, 0);
}
return read_phys_mem<uint64_t, true>(&last_read_area, addr);
}
/** PowerPC-style MMU instruction address translation. */
static uint32_t mmu_instr_translation(uint32_t la)
{
uint32_t pa; /* translated physical address */
bool bat_hit = false;
unsigned msr_pr = !!(ppc_state.msr & MSR::PR);
// Format: %XY
// X - supervisor access bit, Y - problem/user access bit
// Those bits are mutually exclusive
unsigned access_bits = ((msr_pr ^ 1) << 1) | msr_pr;
for (int bat_index = 0; bat_index < 4; bat_index++) {
PPC_BAT_entry* bat_entry = &ibat_array[bat_index];
if ((bat_entry->access & access_bits) && ((la & bat_entry->hi_mask) == bat_entry->bepi)) {
bat_hit = true;
#ifdef MMU_PROFILING
bat_transl_total++;
#endif
if (!bat_entry->prot) {
mmu_exception_handler(Except_Type::EXC_ISI, 0x08000000);
}
// logical to physical translation
pa = bat_entry->phys_hi | (la & ~bat_entry->hi_mask);
break;
}
}
/* page address translation */
if (!bat_hit) {
PATResult pat_res = page_address_translation(la, true, msr_pr, 0);
pa = pat_res.phys;
#ifdef MMU_PROFILING
ptab_transl_total++;
#endif
}
return pa;
}
uint8_t* quickinstruction_translate(uint32_t addr) {
uint8_t* real_addr;
#ifdef MMU_PROFILING
exec_reads_total++;
#endif
/* perform instruction address translation if enabled */
if (ppc_state.msr & MSR::IR) {
addr = mmu_instr_translation(addr);
}
if (addr >= last_exec_area.start && addr <= last_exec_area.end) {
real_addr = last_exec_area.mem_ptr + (addr - last_exec_area.start);
ppc_set_cur_instruction(real_addr);
} else {
AddressMapEntry* entry = mem_ctrl_instance->find_range(addr);
if (entry && entry->type & (RT_ROM | RT_RAM)) {
last_exec_area.start = entry->start;
last_exec_area.end = entry->end;
last_exec_area.mem_ptr = entry->mem_ptr;
real_addr = last_exec_area.mem_ptr + (addr - last_exec_area.start);
ppc_set_cur_instruction(real_addr);
} else {
ABORT_F("Attempt to execute code at %08X!\n", addr);
}
}
return real_addr;
}
#endif // Old and slow code
uint64_t mem_read_dbg(uint32_t virt_addr, uint32_t size) {
uint32_t save_dsisr, save_dar;
uint64_t ret_val;