diff --git a/source/6522.cpp b/source/6522.cpp index 8922773d..e375dfc7 100644 --- a/source/6522.cpp +++ b/source/6522.cpp @@ -202,6 +202,10 @@ void SY6522::Write(BYTE nReg, BYTE nValue) nValue &= 0x7F; m_regs.IER |= nValue; } + if (m_syncEvent[0]) + m_syncEvent[0]->m_canAssertIRQ = (m_regs.IER & IxR_TIMER1) ? true : false; + if (m_syncEvent[1]) + m_syncEvent[1]->m_canAssertIRQ = (m_regs.IER & IxR_TIMER2) ? true : false; UpdateIFR(0); break; case 0x0f: // ORA_NO_HS @@ -669,12 +673,14 @@ void SY6522::SetTimersActiveFromSnapshot(bool timer1Active, bool timer2Active, U { SyncEvent* syncEvent = m_syncEvent[0]; syncEvent->SetCycles(GetRegT1C() + kExtraTimerCycles); // NB. use COUNTER, not LATCH + syncEvent->m_canAssertIRQ = (m_regs.IER & IxR_TIMER1) ? true : false; g_SynchronousEventMgr.Insert(syncEvent); } if (IsTimer2Active()) { SyncEvent* syncEvent = m_syncEvent[1]; syncEvent->SetCycles(GetRegT2C() + kExtraTimerCycles); // NB. use COUNTER, not LATCH + syncEvent->m_canAssertIRQ = (m_regs.IER & IxR_TIMER2) ? true : false; g_SynchronousEventMgr.Insert(syncEvent); } } diff --git a/source/CPU.cpp b/source/CPU.cpp index 9b04deca..88621aa5 100644 --- a/source/CPU.cpp +++ b/source/CPU.cpp @@ -139,6 +139,9 @@ static volatile BOOL g_bNmiFlank = FALSE; // Positive going flank on NMI line static bool g_irqDefer1Opcode = false; static bool g_interruptInLastExecutionBatch = false; // Last batch of executed cycles included an interrupt (IRQ/NMI) +// NB. No need to save to save-state, as IRQ() follows CheckSynchronousInterruptSources(), and IRQ() always sets it to false. +static bool g_irqOnLastOpcodeCycle = false; + // static eCpuType g_MainCPU = CPU_65C02; @@ -204,6 +207,12 @@ bool IsInterruptInLastExecution(void) return g_interruptInLastExecutionBatch; } +void SetIrqOnLastOpcodeCycle(void) +{ + if (!(regs.ps & AF_INTERRUPT)) + g_irqOnLastOpcodeCycle = true; +} + // #include "CPU/cpu_general.inl" @@ -406,9 +415,6 @@ static __forceinline void CheckSynchronousInterruptSources(UINT cycles, ULONG uE g_SynchronousEventMgr.Update(cycles, uExecutedCycles); } -// NB. No need to save to save-state, as IRQ() follows CheckSynchronousInterruptSources(), and IRQ() always sets it to false. -bool g_irqOnLastOpcodeCycle = false; - static __forceinline bool IRQ(ULONG& uExecutedCycles, BOOL& flagc, BOOL& flagn, BOOL& flagv, BOOL& flagz) { bool irqTaken = false; diff --git a/source/CPU.h b/source/CPU.h index 90ee5204..f0bac764 100644 --- a/source/CPU.h +++ b/source/CPU.h @@ -61,3 +61,4 @@ bool IsIrqAsserted(void); bool Is6502InterruptEnabled(void); void ResetCyclesExecutedForDebugger(void); bool IsInterruptInLastExecution(void); +void SetIrqOnLastOpcodeCycle(void); diff --git a/source/SynchronousEventManager.cpp b/source/SynchronousEventManager.cpp index 3ad1dfeb..c45b48b8 100644 --- a/source/SynchronousEventManager.cpp +++ b/source/SynchronousEventManager.cpp @@ -42,6 +42,7 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA #include "StdAfx.h" #include "SynchronousEventManager.h" +#include "CPU.h" void SynchronousEventManager::Insert(SyncEvent* pNewEvent) { @@ -135,8 +136,6 @@ bool SynchronousEventManager::Remove(int id) return false; } -extern bool g_irqOnLastOpcodeCycle; - void SynchronousEventManager::Update(int cycles, ULONG uExecutedCycles) { SyncEvent* pCurrEvent = m_syncEventHead; @@ -147,8 +146,8 @@ void SynchronousEventManager::Update(int cycles, ULONG uExecutedCycles) pCurrEvent->m_cyclesRemaining -= cycles; if (pCurrEvent->m_cyclesRemaining <= 0) { - if (pCurrEvent->m_cyclesRemaining == 0) - g_irqOnLastOpcodeCycle = true; // IRQ occurs on last cycle of opcode + if (pCurrEvent->m_cyclesRemaining == 0 && pCurrEvent->m_canAssertIRQ) + SetIrqOnLastOpcodeCycle(); // IRQ occurs on last cycle of opcode int cyclesUnderflowed = -pCurrEvent->m_cyclesRemaining; diff --git a/source/SynchronousEventManager.h b/source/SynchronousEventManager.h index d0b9f621..9a48d405 100644 --- a/source/SynchronousEventManager.h +++ b/source/SynchronousEventManager.h @@ -32,6 +32,7 @@ public: : m_id(id), m_cyclesRemaining(initCycles), m_active(false), + m_canAssertIRQ(true), m_callback(callback), m_next(NULL) {} @@ -45,6 +46,7 @@ public: int m_id; int m_cyclesRemaining; bool m_active; + bool m_canAssertIRQ; syncEventCB m_callback; SyncEvent* m_next; }; diff --git a/test/TestCPU6502/TestCPU6502.cpp b/test/TestCPU6502/TestCPU6502.cpp index 4354d900..8afe92c7 100644 --- a/test/TestCPU6502/TestCPU6502.cpp +++ b/test/TestCPU6502/TestCPU6502.cpp @@ -34,6 +34,10 @@ eCpuType GetActiveCpu(void) return g_ActiveCPU; } +void SetIrqOnLastOpcodeCycle(void) +{ +} + bool g_bStopOnBRK = false; static __forceinline int Fetch(BYTE& iOpcode, ULONG uExecutedCycles)