- Turn on runtime detection of loop and jump alignment as Aranym people

reported they got some improvement with it and larger loops. Small
  loops are an issue for now until unrolling is implemented for DBcc.
- Const jumps are identified in readcpu. I don't want to duplicate code
  uselessly. Rather, it's the JIT job to know whether we are doing block
  inlining and un-marking those instructions as end-of-block.
This commit is contained in:
gbeauche 2002-10-03 15:05:01 +00:00
parent a60c6da7c3
commit 8de7ad1091
2 changed files with 10 additions and 11 deletions

View File

@ -103,7 +103,7 @@ static bool lazy_flush = true; // Flag: lazy translation cache invalidation
static bool avoid_fpu = true; // Flag: compile FPU instructions ? static bool avoid_fpu = true; // Flag: compile FPU instructions ?
static bool have_cmov = false; // target has CMOV instructions ? static bool have_cmov = false; // target has CMOV instructions ?
static bool have_rat_stall = true; // target has partial register stalls ? static bool have_rat_stall = true; // target has partial register stalls ?
static bool tune_alignment = false; // Tune code alignments for running CPU ? static bool tune_alignment = true; // Tune code alignments for running CPU ?
static int align_loops = 32; // Align the start of loops static int align_loops = 32; // Align the start of loops
static int align_jumps = 32; // Align the start of jumps static int align_jumps = 32; // Align the start of jumps
static int zero_fd = -1; static int zero_fd = -1;
@ -5699,17 +5699,13 @@ void build_comp(void)
prop[opcode].cflow = fl_trap; // ILLEGAL instructions do trap prop[opcode].cflow = fl_trap; // ILLEGAL instructions do trap
} }
#define IS_CONST_JUMP(opc) \
( ((table68k[opc].mnemo == i_Bcc) && (table68k[opc].cc < 2)) \
|| (table68k[opc].mnemo == i_BSR) \
)
for (i = 0; tbl[i].opcode < 65536; i++) { for (i = 0; tbl[i].opcode < 65536; i++) {
int cflow = table68k[tbl[i].opcode].cflow; int cflow = table68k[tbl[i].opcode].cflow;
if (USE_INLINING && IS_CONST_JUMP(tbl[i].opcode)) if (USE_INLINING && ((cflow & fl_const_jump) != 0))
prop[cft_map(tbl[i].opcode)].cflow = fl_const_jump; cflow = fl_const_jump;
else else
prop[cft_map(tbl[i].opcode)].cflow = cflow; cflow &= ~fl_const_jump;
prop[cft_map(tbl[i].opcode)].cflow = cflow;
int uses_fpu = tbl[i].specific & 32; int uses_fpu = tbl[i].specific & 32;
if (uses_fpu && avoid_fpu) if (uses_fpu && avoid_fpu)
@ -5718,8 +5714,6 @@ void build_comp(void)
compfunctbl[cft_map(tbl[i].opcode)] = tbl[i].handler; compfunctbl[cft_map(tbl[i].opcode)] = tbl[i].handler;
} }
#undef IS_CONST_JUMP
for (i = 0; nftbl[i].opcode < 65536; i++) { for (i = 0; nftbl[i].opcode < 65536; i++) {
int uses_fpu = tbl[i].specific & 32; int uses_fpu = tbl[i].specific & 32;
if (uses_fpu && avoid_fpu) if (uses_fpu && avoid_fpu)

View File

@ -786,6 +786,11 @@ void read_table68k (void)
|| (table68k[opc].mnemo == i_BSR) \ || (table68k[opc].mnemo == i_BSR) \
) )
// Precise const jumps as such. The JIT compiler will take
// care to actually enable that optimization or not
if (IS_CONST_JUMP(i))
table68k[i].cflow |= fl_const_jump;
// Fix flags used information for Scc, Bcc, TRAPcc, DBcc instructions // Fix flags used information for Scc, Bcc, TRAPcc, DBcc instructions
int flags_used = table68k[i].flaglive; int flags_used = table68k[i].flaglive;
if ( (mnemo == i_Scc) if ( (mnemo == i_Scc)