2013-07-06 04:37:13 +00:00
|
|
|
/*
|
2013-06-28 06:36:25 +00:00
|
|
|
* Apple // emulator for Linux
|
|
|
|
*
|
|
|
|
* CPU Timing Support.
|
|
|
|
*
|
|
|
|
* Mostly this adds support for specifically throttling the emulator speed to
|
|
|
|
* match a 1.02MHz Apple //e.
|
|
|
|
*
|
|
|
|
* Added 2013 by Aaron Culliney
|
2013-07-06 04:37:13 +00:00
|
|
|
*
|
2013-06-28 06:36:25 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include "timing.h"
|
2013-07-08 03:52:30 +00:00
|
|
|
#include "misc.h"
|
2013-06-28 06:36:25 +00:00
|
|
|
|
|
|
|
#include <stdlib.h>
|
|
|
|
#include <stdio.h>
|
|
|
|
#include <time.h>
|
2013-07-02 08:10:57 +00:00
|
|
|
#include <pthread.h>
|
2013-07-22 00:20:03 +00:00
|
|
|
#include <limits.h>
|
2013-06-28 06:36:25 +00:00
|
|
|
|
2013-07-22 00:20:03 +00:00
|
|
|
#define CALIBRATE_HZ 120
|
2013-06-28 06:36:25 +00:00
|
|
|
|
|
|
|
static unsigned long cpu_target_hz = APPLE2_HZ; // target clock speed
|
2013-07-22 00:20:03 +00:00
|
|
|
static unsigned long calibrate_interval = NANOSECONDS / CALIBRATE_HZ; // calibration interval for drifting
|
|
|
|
static unsigned long cycle_nanoseconds = NANOSECONDS / APPLE2_HZ; // nanosecs per cycle
|
|
|
|
static unsigned int cycle_nanoseconds_count;
|
2013-06-28 06:36:25 +00:00
|
|
|
|
|
|
|
static struct timespec deltat, t0, ti, tj;
|
2013-07-22 00:20:03 +00:00
|
|
|
|
|
|
|
static unsigned long cycle_count=0; // CPU cycle counter
|
|
|
|
static int spinloop_count=0; // spin loop counter
|
|
|
|
|
2013-06-28 06:36:25 +00:00
|
|
|
static long sleep_adjust=0;
|
|
|
|
static long sleep_adjust_inc=0;
|
|
|
|
|
2013-07-02 08:10:57 +00:00
|
|
|
extern pthread_mutex_t mutex;
|
|
|
|
extern pthread_cond_t cond;
|
|
|
|
|
2013-06-28 06:36:25 +00:00
|
|
|
// -----------------------------------------------------------------------------
|
|
|
|
|
|
|
|
// assuming end > start, returns end - start
|
2013-07-06 04:37:13 +00:00
|
|
|
static inline struct timespec timespec_diff(struct timespec start, struct timespec end) {
|
2013-06-28 06:36:25 +00:00
|
|
|
struct timespec t;
|
|
|
|
|
|
|
|
// assuming time_t is signed ...
|
2013-07-06 04:37:13 +00:00
|
|
|
if (end.tv_nsec < start.tv_nsec)
|
|
|
|
{
|
2013-06-28 06:36:25 +00:00
|
|
|
t.tv_sec = end.tv_sec - start.tv_sec - 1;
|
|
|
|
t.tv_nsec = NANOSECONDS + end.tv_nsec - start.tv_nsec;
|
2013-07-06 04:37:13 +00:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2013-06-28 06:36:25 +00:00
|
|
|
t.tv_sec = end.tv_sec - start.tv_sec;
|
|
|
|
t.tv_nsec = end.tv_nsec - start.tv_nsec;
|
|
|
|
}
|
|
|
|
|
|
|
|
return t;
|
|
|
|
}
|
|
|
|
|
2013-07-06 04:37:13 +00:00
|
|
|
static inline long timespec_nsecs(struct timespec t) {
|
2013-06-28 06:36:25 +00:00
|
|
|
return t.tv_sec*NANOSECONDS + t.tv_nsec;
|
|
|
|
}
|
|
|
|
|
2013-07-22 00:20:03 +00:00
|
|
|
// spin loop to throttle to target CPU Hz
|
|
|
|
static inline void _spin_loop(unsigned long c)
|
|
|
|
{
|
|
|
|
static volatile unsigned int spinney=0; // volatile to prevent being optimized away
|
|
|
|
for (unsigned long i=0; i<c; i++)
|
|
|
|
{
|
|
|
|
++spinney;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void _determine_initial_spinloop_counter()
|
|
|
|
{
|
|
|
|
struct timespec s0, s1;
|
|
|
|
|
|
|
|
// time the spinloop to determine a good starting value for the spin counter
|
|
|
|
|
|
|
|
unsigned long avg_spin_nsecs = 0;
|
|
|
|
unsigned int const samples = 5;
|
|
|
|
unsigned int i=0;
|
|
|
|
spinloop_count = 500000000;
|
|
|
|
do
|
|
|
|
{
|
|
|
|
clock_gettime(CLOCK_MONOTONIC, &s0);
|
|
|
|
_spin_loop(spinloop_count);
|
|
|
|
clock_gettime(CLOCK_MONOTONIC, &s1);
|
|
|
|
deltat = timespec_diff(s0, s1);
|
|
|
|
|
|
|
|
if (deltat.tv_sec > 0)
|
|
|
|
{
|
|
|
|
printf("oops long wait (>= %lu sec) adjusting loop count (%d -> %d)\n", deltat.tv_sec, spinloop_count, spinloop_count>>1);
|
|
|
|
spinloop_count >>= 1;
|
|
|
|
i = 0;
|
|
|
|
avg_spin_nsecs = 0;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
printf("spinloop = %lu nsec\n", deltat.tv_nsec);
|
|
|
|
avg_spin_nsecs += deltat.tv_nsec;
|
|
|
|
++i;
|
|
|
|
} while (i<samples);
|
|
|
|
|
|
|
|
avg_spin_nsecs = (avg_spin_nsecs / samples);
|
|
|
|
printf("average = %lu nsec\n", avg_spin_nsecs);
|
|
|
|
|
|
|
|
spinloop_count = cycle_nanoseconds * spinloop_count / avg_spin_nsecs;
|
|
|
|
|
|
|
|
cycle_nanoseconds_count = cycle_nanoseconds / spinloop_count;
|
|
|
|
|
|
|
|
printf("counter for a single cycle = %d\n", spinloop_count);
|
|
|
|
}
|
|
|
|
|
2013-07-06 04:37:13 +00:00
|
|
|
void timing_initialize() {
|
2013-07-22 00:20:03 +00:00
|
|
|
|
|
|
|
// should do this only on startup
|
|
|
|
_determine_initial_spinloop_counter();
|
|
|
|
|
2013-06-28 06:36:25 +00:00
|
|
|
clock_gettime(CLOCK_MONOTONIC, &t0);
|
|
|
|
ti=t0;
|
|
|
|
}
|
|
|
|
|
2013-07-22 00:20:03 +00:00
|
|
|
void timing_set_cpu_scale(unsigned int scale)
|
|
|
|
{
|
|
|
|
// ...
|
2013-06-28 06:36:25 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2013-07-22 00:20:03 +00:00
|
|
|
* Throttles 6502 CPU down to the target CPU frequency (default is speed of original Apple //e).
|
|
|
|
*
|
|
|
|
* This uses an adaptive spin loop to stay closer to the target CPU frequency.
|
2013-07-02 08:10:57 +00:00
|
|
|
*
|
2013-06-28 06:36:25 +00:00
|
|
|
*/
|
2013-07-22 00:20:03 +00:00
|
|
|
void timing_throttle()
|
|
|
|
{
|
|
|
|
static unsigned int drift_interval_counter=0; // in nsecs since last
|
|
|
|
static unsigned int instruction_interval_counter=0; // instruction count since last
|
|
|
|
static unsigned int spin_adjust_interval=INT_MAX;
|
|
|
|
static int8_t spin_adjust_count=0; // +/- 1
|
2013-06-28 06:36:25 +00:00
|
|
|
|
2013-07-22 00:20:03 +00:00
|
|
|
++instruction_interval_counter;
|
2013-07-02 08:10:57 +00:00
|
|
|
|
2013-07-22 00:20:03 +00:00
|
|
|
unsigned int opcycles = cpu65__opcycles[cpu65_debug.opcode] + cpu65_debug.opcycles;
|
|
|
|
if (!opcycles)
|
2013-07-06 04:37:13 +00:00
|
|
|
{
|
2013-07-22 00:20:03 +00:00
|
|
|
opcycles = 2; // assume 2 cycles for UNK opcodes
|
|
|
|
}
|
|
|
|
cycle_count += opcycles;
|
2013-07-02 08:10:57 +00:00
|
|
|
|
2013-07-22 00:20:03 +00:00
|
|
|
int8_t c = instruction_interval_counter%spin_adjust_interval ? spin_adjust_count : 0;
|
|
|
|
_spin_loop(opcycles * (spinloop_count + c) );
|
|
|
|
drift_interval_counter += c*cycle_nanoseconds;
|
2013-07-02 08:10:57 +00:00
|
|
|
|
2013-07-22 00:20:03 +00:00
|
|
|
if (drift_interval_counter < calibrate_interval)
|
|
|
|
{
|
|
|
|
return;
|
|
|
|
}
|
2013-06-28 06:36:25 +00:00
|
|
|
|
2013-07-22 00:20:03 +00:00
|
|
|
// -------------------------------------------------------------------------
|
|
|
|
// calibrate emulator clock to real clock ...
|
2013-06-28 06:36:25 +00:00
|
|
|
|
2013-07-22 00:20:03 +00:00
|
|
|
clock_gettime(CLOCK_MONOTONIC, &tj);
|
|
|
|
deltat = timespec_diff(ti, tj);
|
|
|
|
ti=tj;
|
2013-06-28 06:36:25 +00:00
|
|
|
|
2013-07-22 00:20:03 +00:00
|
|
|
// NOTE: these calculations could overflow if emulator speed is severely dampened back...
|
|
|
|
unsigned long real_counter = NANOSECONDS * deltat.tv_sec;
|
|
|
|
real_counter += deltat.tv_nsec;
|
|
|
|
long diff_nsecs = real_counter - drift_interval_counter; // whole +/- nsec diff
|
2013-06-28 06:36:25 +00:00
|
|
|
|
2013-07-22 00:20:03 +00:00
|
|
|
float nsecs_per_oneloop = cycle_nanoseconds/(float)spinloop_count;
|
|
|
|
unsigned int instruction_interval_nsecs = instruction_interval_counter * nsecs_per_oneloop;
|
2013-06-28 06:36:25 +00:00
|
|
|
|
2013-07-22 00:20:03 +00:00
|
|
|
// reset
|
|
|
|
drift_interval_counter=0;
|
|
|
|
instruction_interval_counter=0;
|
|
|
|
|
|
|
|
// calculate spin adjustment
|
|
|
|
if (diff_nsecs == 0)
|
|
|
|
{
|
|
|
|
// nothing to do
|
|
|
|
}
|
|
|
|
else if (abs(diff_nsecs) > instruction_interval_nsecs)
|
|
|
|
{
|
|
|
|
// spin for additional +/- X each instruction
|
|
|
|
spinloop_count += diff_nsecs / instruction_interval_nsecs;
|
|
|
|
spin_adjust_interval=INT_MAX;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
// sub adjustment : spin for additional +/- 1 every interval
|
|
|
|
spin_adjust_count = diff_nsecs < 0 ? -1 : 1;
|
|
|
|
spin_adjust_interval = instruction_interval_nsecs / abs(diff_nsecs);
|
2013-06-28 06:36:25 +00:00
|
|
|
}
|
|
|
|
}
|
2013-07-22 00:20:03 +00:00
|
|
|
|