mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-12-13 20:32:21 +00:00
d1b7382983
CopyToReg/CopyFromReg/INLINEASM. These are annoying because they have the same opcode before an after isel. Fix this by setting their NodeID to -1 to indicate that they are selected, just like what automatically happens when selecting things that end up being machine nodes. With that done, give IsLegalToFold a new flag that causes it to ignore chains. This lets the HandleMergeInputChains routine be the one place that validates chains after a match is successful, enabling the new hotness in chain processing. This smarter chain processing eliminates the need for "PreprocessRMW" in the X86 and MSP430 backends and enables MSP to start matching it's multiple mem operand instructions more aggressively. I currently #if out the dead code in the X86 backend and MSP backend, I'll remove it for real in a follow-on patch. The testcase changes are: test/CodeGen/X86/sse3.ll: we generate better code test/CodeGen/X86/store_op_load_fold2.ll: PreprocessRMW was miscompiling this before, we now generate correct code Convert it to filecheck while I'm at it. test/CodeGen/MSP430/Inst16mm.ll: Add a testcase for mem/mem folding to make anton happy. :) git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@97596 91177308-0d34-0410-b5e6-96231b3b80d8
25 lines
1.4 KiB
LLVM
25 lines
1.4 KiB
LLVM
; RUN: llc < %s -march=x86 -x86-asm-syntax=intel | FileCheck %s
|
|
|
|
target datalayout = "e-p:32:32"
|
|
%struct.Macroblock = type { i32, i32, i32, i32, i32, [8 x i32], %struct.Macroblock*, %struct.Macroblock*, i32, [2 x [4 x [4 x [2 x i32]]]], [16 x i8], [16 x i8], i32, i64, [4 x i32], [4 x i32], i64, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i16, double, i32, i32, i32, i32, i32, i32, i32, i32, i32 }
|
|
|
|
define internal fastcc i32 @dct_chroma(i32 %uv, i32 %cr_cbp) nounwind {
|
|
cond_true2732.preheader: ; preds = %entry
|
|
%tmp2666 = getelementptr %struct.Macroblock* null, i32 0, i32 13 ; <i64*> [#uses=2]
|
|
%tmp2674 = trunc i32 0 to i8 ; <i8> [#uses=1]
|
|
%tmp2667.us.us = load i64* %tmp2666 ; <i64> [#uses=1]
|
|
%tmp2670.us.us = load i64* null ; <i64> [#uses=1]
|
|
%shift.upgrd.1 = zext i8 %tmp2674 to i64 ; <i64> [#uses=1]
|
|
%tmp2675.us.us = shl i64 %tmp2670.us.us, %shift.upgrd.1 ; <i64> [#uses=1]
|
|
%tmp2675not.us.us = xor i64 %tmp2675.us.us, -1 ; <i64> [#uses=1]
|
|
%tmp2676.us.us = and i64 %tmp2667.us.us, %tmp2675not.us.us ; <i64> [#uses=1]
|
|
store i64 %tmp2676.us.us, i64* %tmp2666
|
|
ret i32 0
|
|
|
|
; CHECK: and {{E..}}, DWORD PTR [360]
|
|
; CHECK: and DWORD PTR [356], {{E..}}
|
|
; CHECK: mov DWORD PTR [360], {{E..}}
|
|
|
|
}
|
|
|