Fix for 6522 counter underflows falsely delaying when IRQ occurs. (Fixes #1176)

This commit is contained in:
tomcw 2023-02-05 11:02:56 +00:00
parent 6a3a3114c7
commit f03f5e0904
6 changed files with 25 additions and 7 deletions

View File

@ -202,6 +202,10 @@ void SY6522::Write(BYTE nReg, BYTE nValue)
nValue &= 0x7F;
m_regs.IER |= nValue;
}
if (m_syncEvent[0])
m_syncEvent[0]->m_canAssertIRQ = (m_regs.IER & IxR_TIMER1) ? true : false;
if (m_syncEvent[1])
m_syncEvent[1]->m_canAssertIRQ = (m_regs.IER & IxR_TIMER2) ? true : false;
UpdateIFR(0);
break;
case 0x0f: // ORA_NO_HS
@ -669,12 +673,14 @@ void SY6522::SetTimersActiveFromSnapshot(bool timer1Active, bool timer2Active, U
{
SyncEvent* syncEvent = m_syncEvent[0];
syncEvent->SetCycles(GetRegT1C() + kExtraTimerCycles); // NB. use COUNTER, not LATCH
syncEvent->m_canAssertIRQ = (m_regs.IER & IxR_TIMER1) ? true : false;
g_SynchronousEventMgr.Insert(syncEvent);
}
if (IsTimer2Active())
{
SyncEvent* syncEvent = m_syncEvent[1];
syncEvent->SetCycles(GetRegT2C() + kExtraTimerCycles); // NB. use COUNTER, not LATCH
syncEvent->m_canAssertIRQ = (m_regs.IER & IxR_TIMER2) ? true : false;
g_SynchronousEventMgr.Insert(syncEvent);
}
}

View File

@ -139,6 +139,9 @@ static volatile BOOL g_bNmiFlank = FALSE; // Positive going flank on NMI line
static bool g_irqDefer1Opcode = false;
static bool g_interruptInLastExecutionBatch = false; // Last batch of executed cycles included an interrupt (IRQ/NMI)
// NB. No need to save to save-state, as IRQ() follows CheckSynchronousInterruptSources(), and IRQ() always sets it to false.
static bool g_irqOnLastOpcodeCycle = false;
//
static eCpuType g_MainCPU = CPU_65C02;
@ -204,6 +207,12 @@ bool IsInterruptInLastExecution(void)
return g_interruptInLastExecutionBatch;
}
void SetIrqOnLastOpcodeCycle(void)
{
if (!(regs.ps & AF_INTERRUPT))
g_irqOnLastOpcodeCycle = true;
}
//
#include "CPU/cpu_general.inl"
@ -406,9 +415,6 @@ static __forceinline void CheckSynchronousInterruptSources(UINT cycles, ULONG uE
g_SynchronousEventMgr.Update(cycles, uExecutedCycles);
}
// NB. No need to save to save-state, as IRQ() follows CheckSynchronousInterruptSources(), and IRQ() always sets it to false.
bool g_irqOnLastOpcodeCycle = false;
static __forceinline bool IRQ(ULONG& uExecutedCycles, BOOL& flagc, BOOL& flagn, BOOL& flagv, BOOL& flagz)
{
bool irqTaken = false;

View File

@ -61,3 +61,4 @@ bool IsIrqAsserted(void);
bool Is6502InterruptEnabled(void);
void ResetCyclesExecutedForDebugger(void);
bool IsInterruptInLastExecution(void);
void SetIrqOnLastOpcodeCycle(void);

View File

@ -42,6 +42,7 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#include "StdAfx.h"
#include "SynchronousEventManager.h"
#include "CPU.h"
void SynchronousEventManager::Insert(SyncEvent* pNewEvent)
{
@ -135,8 +136,6 @@ bool SynchronousEventManager::Remove(int id)
return false;
}
extern bool g_irqOnLastOpcodeCycle;
void SynchronousEventManager::Update(int cycles, ULONG uExecutedCycles)
{
SyncEvent* pCurrEvent = m_syncEventHead;
@ -147,8 +146,8 @@ void SynchronousEventManager::Update(int cycles, ULONG uExecutedCycles)
pCurrEvent->m_cyclesRemaining -= cycles;
if (pCurrEvent->m_cyclesRemaining <= 0)
{
if (pCurrEvent->m_cyclesRemaining == 0)
g_irqOnLastOpcodeCycle = true; // IRQ occurs on last cycle of opcode
if (pCurrEvent->m_cyclesRemaining == 0 && pCurrEvent->m_canAssertIRQ)
SetIrqOnLastOpcodeCycle(); // IRQ occurs on last cycle of opcode
int cyclesUnderflowed = -pCurrEvent->m_cyclesRemaining;

View File

@ -32,6 +32,7 @@ public:
: m_id(id),
m_cyclesRemaining(initCycles),
m_active(false),
m_canAssertIRQ(true),
m_callback(callback),
m_next(NULL)
{}
@ -45,6 +46,7 @@ public:
int m_id;
int m_cyclesRemaining;
bool m_active;
bool m_canAssertIRQ;
syncEventCB m_callback;
SyncEvent* m_next;
};

View File

@ -34,6 +34,10 @@ eCpuType GetActiveCpu(void)
return g_ActiveCPU;
}
void SetIrqOnLastOpcodeCycle(void)
{
}
bool g_bStopOnBRK = false;
static __forceinline int Fetch(BYTE& iOpcode, ULONG uExecutedCycles)