mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2025-01-14 00:32:55 +00:00
ARM: simplify EmitAtomicBinary64
ATOMIC_STORE operations always get here as a lowered ATOMIC_SWAP, so there's no need for any code to handle them specially. There should be no functionality change so no tests. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@203567 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
dabc5073b2
commit
90b25eaef2
@ -3323,12 +3323,6 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
|
|||||||
else
|
else
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case ISD::ATOMIC_STORE:
|
|
||||||
if (cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i64)
|
|
||||||
return SelectAtomic(N, 0, 0, 0, ARM::ATOMIC_STORE_I64);
|
|
||||||
else
|
|
||||||
break;
|
|
||||||
|
|
||||||
case ISD::ATOMIC_LOAD_ADD:
|
case ISD::ATOMIC_LOAD_ADD:
|
||||||
return SelectAtomic(N,
|
return SelectAtomic(N,
|
||||||
ARM::ATOMIC_LOAD_ADD_I8,
|
ARM::ATOMIC_LOAD_ADD_I8,
|
||||||
|
@ -6517,15 +6517,13 @@ ARMTargetLowering::EmitAtomicBinary64(MachineInstr *MI, MachineBasicBlock *BB,
|
|||||||
MachineFunction::iterator It = BB;
|
MachineFunction::iterator It = BB;
|
||||||
++It;
|
++It;
|
||||||
|
|
||||||
bool isStore = (MI->getOpcode() == ARM::ATOMIC_STORE_I64);
|
|
||||||
unsigned offset = (isStore ? -2 : 0);
|
|
||||||
unsigned destlo = MI->getOperand(0).getReg();
|
unsigned destlo = MI->getOperand(0).getReg();
|
||||||
unsigned desthi = MI->getOperand(1).getReg();
|
unsigned desthi = MI->getOperand(1).getReg();
|
||||||
unsigned ptr = MI->getOperand(offset+2).getReg();
|
unsigned ptr = MI->getOperand(2).getReg();
|
||||||
unsigned vallo = MI->getOperand(offset+3).getReg();
|
unsigned vallo = MI->getOperand(3).getReg();
|
||||||
unsigned valhi = MI->getOperand(offset+4).getReg();
|
unsigned valhi = MI->getOperand(4).getReg();
|
||||||
unsigned OrdIdx = offset + (IsCmpxchg ? 7 : 5);
|
AtomicOrdering Ord =
|
||||||
AtomicOrdering Ord = static_cast<AtomicOrdering>(MI->getOperand(OrdIdx).getImm());
|
static_cast<AtomicOrdering>(MI->getOperand(IsCmpxchg ? 7 : 5).getImm());
|
||||||
DebugLoc dl = MI->getDebugLoc();
|
DebugLoc dl = MI->getDebugLoc();
|
||||||
bool isThumb2 = Subtarget->isThumb2();
|
bool isThumb2 = Subtarget->isThumb2();
|
||||||
|
|
||||||
@ -6579,23 +6577,22 @@ ARMTargetLowering::EmitAtomicBinary64(MachineInstr *MI, MachineBasicBlock *BB,
|
|||||||
// fallthrough --> exitMBB
|
// fallthrough --> exitMBB
|
||||||
BB = loopMBB;
|
BB = loopMBB;
|
||||||
|
|
||||||
if (!isStore) {
|
// Load
|
||||||
// Load
|
if (isThumb2) {
|
||||||
if (isThumb2) {
|
AddDefaultPred(BuildMI(BB, dl, TII->get(ldrOpc))
|
||||||
AddDefaultPred(BuildMI(BB, dl, TII->get(ldrOpc))
|
.addReg(destlo, RegState::Define)
|
||||||
.addReg(destlo, RegState::Define)
|
.addReg(desthi, RegState::Define)
|
||||||
.addReg(desthi, RegState::Define)
|
.addReg(ptr));
|
||||||
.addReg(ptr));
|
} else {
|
||||||
} else {
|
unsigned GPRPair0 = MRI.createVirtualRegister(&ARM::GPRPairRegClass);
|
||||||
unsigned GPRPair0 = MRI.createVirtualRegister(&ARM::GPRPairRegClass);
|
AddDefaultPred(BuildMI(BB, dl, TII->get(ldrOpc))
|
||||||
AddDefaultPred(BuildMI(BB, dl, TII->get(ldrOpc))
|
.addReg(GPRPair0, RegState::Define)
|
||||||
.addReg(GPRPair0, RegState::Define).addReg(ptr));
|
.addReg(ptr));
|
||||||
// Copy r2/r3 into dest. (This copy will normally be coalesced.)
|
// Copy r2/r3 into dest. (This copy will normally be coalesced.)
|
||||||
BuildMI(BB, dl, TII->get(TargetOpcode::COPY), destlo)
|
BuildMI(BB, dl, TII->get(TargetOpcode::COPY), destlo)
|
||||||
.addReg(GPRPair0, 0, ARM::gsub_0);
|
.addReg(GPRPair0, 0, ARM::gsub_0);
|
||||||
BuildMI(BB, dl, TII->get(TargetOpcode::COPY), desthi)
|
BuildMI(BB, dl, TII->get(TargetOpcode::COPY), desthi)
|
||||||
.addReg(GPRPair0, 0, ARM::gsub_1);
|
.addReg(GPRPair0, 0, ARM::gsub_1);
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
unsigned StoreLo, StoreHi;
|
unsigned StoreLo, StoreHi;
|
||||||
@ -7761,7 +7758,6 @@ ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
|
|||||||
case ARM::ATOMIC_LOAD_AND_I64:
|
case ARM::ATOMIC_LOAD_AND_I64:
|
||||||
return EmitAtomicBinary64(MI, BB, isThumb2 ? ARM::t2ANDrr : ARM::ANDrr,
|
return EmitAtomicBinary64(MI, BB, isThumb2 ? ARM::t2ANDrr : ARM::ANDrr,
|
||||||
isThumb2 ? ARM::t2ANDrr : ARM::ANDrr);
|
isThumb2 ? ARM::t2ANDrr : ARM::ANDrr);
|
||||||
case ARM::ATOMIC_STORE_I64:
|
|
||||||
case ARM::ATOMIC_SWAP_I64:
|
case ARM::ATOMIC_SWAP_I64:
|
||||||
return EmitAtomicBinary64(MI, BB, 0, 0, false);
|
return EmitAtomicBinary64(MI, BB, 0, 0, false);
|
||||||
case ARM::ATOMIC_CMP_SWAP_I64:
|
case ARM::ATOMIC_CMP_SWAP_I64:
|
||||||
|
@ -4538,11 +4538,6 @@ let usesCustomInserter = 1, Defs = [CPSR] in {
|
|||||||
(outs GPR:$dst1, GPR:$dst2),
|
(outs GPR:$dst1, GPR:$dst2),
|
||||||
(ins GPR:$addr, i32imm:$ordering),
|
(ins GPR:$addr, i32imm:$ordering),
|
||||||
NoItinerary, []>;
|
NoItinerary, []>;
|
||||||
let mayStore = 1 in
|
|
||||||
def ATOMIC_STORE_I64 : PseudoInst<
|
|
||||||
(outs GPR:$dst1, GPR:$dst2),
|
|
||||||
(ins GPR:$addr, GPR:$src1, GPR:$src2, i32imm:$ordering),
|
|
||||||
NoItinerary, []>;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
let usesCustomInserter = 1 in {
|
let usesCustomInserter = 1 in {
|
||||||
|
Loading…
x
Reference in New Issue
Block a user