mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-12-13 20:32:21 +00:00
Use default lowering of DYNAMIC_STACKALLOC. As far as I can tell, ARM isle is doing the right thing and codegen looks correct for both Thumb and Thumb2.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@101410 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
31d80103d5
commit
3a1588a2e3
@ -121,9 +121,6 @@ private:
|
||||
SDNode *SelectARMIndexedLoad(SDNode *N);
|
||||
SDNode *SelectT2IndexedLoad(SDNode *N);
|
||||
|
||||
/// SelectDYN_ALLOC - Select dynamic alloc for Thumb.
|
||||
SDNode *SelectDYN_ALLOC(SDNode *N);
|
||||
|
||||
/// SelectVLD - Select NEON load intrinsics. NumVecs should be
|
||||
/// 1, 2, 3 or 4. The opcode arrays specify the instructions used for
|
||||
/// loads of D registers and even subregs and odd subregs of Q registers.
|
||||
@ -939,59 +936,6 @@ SDNode *ARMDAGToDAGISel::SelectT2IndexedLoad(SDNode *N) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
SDNode *ARMDAGToDAGISel::SelectDYN_ALLOC(SDNode *N) {
|
||||
DebugLoc dl = N->getDebugLoc();
|
||||
EVT VT = N->getValueType(0);
|
||||
SDValue Chain = N->getOperand(0);
|
||||
SDValue Size = N->getOperand(1);
|
||||
SDValue Align = N->getOperand(2);
|
||||
SDValue SP = CurDAG->getCopyFromReg(Chain, dl, ARM::SP, MVT::i32);
|
||||
int32_t AlignVal = cast<ConstantSDNode>(Align)->getSExtValue();
|
||||
if (AlignVal < 0)
|
||||
// We need to align the stack. Use Thumb1 tAND which is the only thumb
|
||||
// instruction that can read and write SP. This matches to a pseudo
|
||||
// instruction that has a chain to ensure the result is written back to
|
||||
// the stack pointer.
|
||||
SP = SDValue(CurDAG->getMachineNode(ARM::tANDsp, dl, VT, SP, Align), 0);
|
||||
|
||||
bool isC = isa<ConstantSDNode>(Size);
|
||||
uint32_t C = isC ? cast<ConstantSDNode>(Size)->getZExtValue() : ~0UL;
|
||||
// Handle the most common case for both Thumb1 and Thumb2:
|
||||
// tSUBspi - immediate is between 0 ... 508 inclusive.
|
||||
if (C <= 508 && ((C & 3) == 0))
|
||||
// FIXME: tSUBspi encode scale 4 implicitly.
|
||||
return CurDAG->SelectNodeTo(N, ARM::tSUBspi_, VT, MVT::Other, SP,
|
||||
CurDAG->getTargetConstant(C/4, MVT::i32),
|
||||
Chain);
|
||||
|
||||
if (Subtarget->isThumb1Only()) {
|
||||
// Use tADDspr since Thumb1 does not have a sub r, sp, r. ARMISelLowering
|
||||
// should have negated the size operand already. FIXME: We can't insert
|
||||
// new target independent node at this stage so we are forced to negate
|
||||
// it earlier. Is there a better solution?
|
||||
return CurDAG->SelectNodeTo(N, ARM::tADDspr_, VT, MVT::Other, SP, Size,
|
||||
Chain);
|
||||
} else if (Subtarget->isThumb2()) {
|
||||
if (isC && Predicate_t2_so_imm(Size.getNode())) {
|
||||
// t2SUBrSPi
|
||||
SDValue Ops[] = { SP, CurDAG->getTargetConstant(C, MVT::i32), Chain };
|
||||
return CurDAG->SelectNodeTo(N, ARM::t2SUBrSPi_, VT, MVT::Other, Ops, 3);
|
||||
} else if (isC && Predicate_imm0_4095(Size.getNode())) {
|
||||
// t2SUBrSPi12
|
||||
SDValue Ops[] = { SP, CurDAG->getTargetConstant(C, MVT::i32), Chain };
|
||||
return CurDAG->SelectNodeTo(N, ARM::t2SUBrSPi12_, VT, MVT::Other, Ops, 3);
|
||||
} else {
|
||||
// t2SUBrSPs
|
||||
SDValue Ops[] = { SP, Size,
|
||||
getI32Imm(ARM_AM::getSORegOpc(ARM_AM::lsl,0)), Chain };
|
||||
return CurDAG->SelectNodeTo(N, ARM::t2SUBrSPs_, VT, MVT::Other, Ops, 4);
|
||||
}
|
||||
}
|
||||
|
||||
// FIXME: Add ADD / SUB sp instructions for ARM.
|
||||
return 0;
|
||||
}
|
||||
|
||||
/// PairDRegs - Insert a pair of double registers into an implicit def to
|
||||
/// form a quad register.
|
||||
SDNode *ARMDAGToDAGISel::PairDRegs(EVT VT, SDValue V0, SDValue V1) {
|
||||
@ -1571,8 +1515,6 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
|
||||
return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 5);
|
||||
}
|
||||
}
|
||||
case ARMISD::DYN_ALLOC:
|
||||
return SelectDYN_ALLOC(N);
|
||||
case ISD::SRL:
|
||||
if (SDNode *I = SelectV6T2BitfieldExtractOp(N,
|
||||
Subtarget->isThumb() ? ARM::t2UBFX : ARM::UBFX))
|
||||
|
@ -384,10 +384,7 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM)
|
||||
// FIXME: Shouldn't need this, since no register is used, but the legalizer
|
||||
// doesn't yet know how to not do that for SjLj.
|
||||
setExceptionSelectorRegister(ARM::R0);
|
||||
if (Subtarget->isThumb())
|
||||
setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom);
|
||||
else
|
||||
setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand);
|
||||
setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand);
|
||||
setOperationAction(ISD::MEMBARRIER, MVT::Other, Custom);
|
||||
|
||||
if (!Subtarget->hasV6Ops() && !Subtarget->isThumb2()) {
|
||||
|
@ -7,15 +7,15 @@
|
||||
define arm_apcscc void @t() nounwind ssp {
|
||||
entry:
|
||||
; CHECK: t:
|
||||
; CHECK: mvn r0, #7
|
||||
; CHECK: ands sp, r0
|
||||
; CHECK: mov r1, sp
|
||||
; CHECK: mov sp, r1
|
||||
; CHECK: mov r0, sp
|
||||
; CHECK: bfc r0, #0, #3
|
||||
; CHECK: subs r0, #16
|
||||
; CHECK: mov sp, r0
|
||||
; Yes, this is stupid codegen, but it's correct.
|
||||
; CHECK: sub sp, #16
|
||||
; CHECK: mov r1, sp
|
||||
; CHECK: mov sp, r1
|
||||
; CHECK: ands sp, r0
|
||||
; CHECK: mov r0, sp
|
||||
; CHECK: bfc r0, #0, #3
|
||||
; CHECK: subs r0, #16
|
||||
; CHECK: mov sp, r0
|
||||
%size = mul i32 8, 2
|
||||
%vla_a = alloca i8, i32 %size, align 8
|
||||
%vla_b = alloca i8, i32 %size, align 8
|
||||
|
Loading…
Reference in New Issue
Block a user