mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2025-01-15 23:31:37 +00:00
Fix the remaining atomic intrinsics to use the right register classes on Thumb2,
and add some basic tests for them. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@132235 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
d652b1368b
commit
de64aaf6c8
@ -4983,8 +4983,14 @@ ARMTargetLowering::EmitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB,
|
||||
unsigned ptr = MI->getOperand(1).getReg();
|
||||
unsigned incr = MI->getOperand(2).getReg();
|
||||
DebugLoc dl = MI->getDebugLoc();
|
||||
|
||||
bool isThumb2 = Subtarget->isThumb2();
|
||||
|
||||
MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
|
||||
if (isThumb2) {
|
||||
MRI.constrainRegClass(dest, ARM::rGPRRegisterClass);
|
||||
MRI.constrainRegClass(ptr, ARM::rGPRRegisterClass);
|
||||
}
|
||||
|
||||
unsigned ldrOpc, strOpc;
|
||||
switch (Size) {
|
||||
default: llvm_unreachable("unsupported size for AtomicCmpSwap!");
|
||||
@ -5013,10 +5019,10 @@ ARMTargetLowering::EmitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB,
|
||||
BB->end());
|
||||
exitMBB->transferSuccessorsAndUpdatePHIs(BB);
|
||||
|
||||
MachineRegisterInfo &RegInfo = MF->getRegInfo();
|
||||
unsigned scratch = RegInfo.createVirtualRegister(ARM::GPRRegisterClass);
|
||||
unsigned scratch2 = (!BinOpcode) ? incr :
|
||||
RegInfo.createVirtualRegister(ARM::GPRRegisterClass);
|
||||
TargetRegisterClass *TRC =
|
||||
isThumb2 ? ARM::tGPRRegisterClass : ARM::GPRRegisterClass;
|
||||
unsigned scratch = MRI.createVirtualRegister(TRC);
|
||||
unsigned scratch2 = (!BinOpcode) ? incr : MRI.createVirtualRegister(TRC);
|
||||
|
||||
// thisMBB:
|
||||
// ...
|
||||
@ -5079,8 +5085,14 @@ ARMTargetLowering::EmitAtomicBinaryMinMax(MachineInstr *MI,
|
||||
unsigned incr = MI->getOperand(2).getReg();
|
||||
unsigned oldval = dest;
|
||||
DebugLoc dl = MI->getDebugLoc();
|
||||
|
||||
bool isThumb2 = Subtarget->isThumb2();
|
||||
|
||||
MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
|
||||
if (isThumb2) {
|
||||
MRI.constrainRegClass(dest, ARM::rGPRRegisterClass);
|
||||
MRI.constrainRegClass(ptr, ARM::rGPRRegisterClass);
|
||||
}
|
||||
|
||||
unsigned ldrOpc, strOpc, extendOpc;
|
||||
switch (Size) {
|
||||
default: llvm_unreachable("unsupported size for AtomicCmpSwap!");
|
||||
@ -5112,9 +5124,10 @@ ARMTargetLowering::EmitAtomicBinaryMinMax(MachineInstr *MI,
|
||||
BB->end());
|
||||
exitMBB->transferSuccessorsAndUpdatePHIs(BB);
|
||||
|
||||
MachineRegisterInfo &RegInfo = MF->getRegInfo();
|
||||
unsigned scratch = RegInfo.createVirtualRegister(ARM::GPRRegisterClass);
|
||||
unsigned scratch2 = RegInfo.createVirtualRegister(ARM::GPRRegisterClass);
|
||||
TargetRegisterClass *TRC =
|
||||
isThumb2 ? ARM::tGPRRegisterClass : ARM::GPRRegisterClass;
|
||||
unsigned scratch = MRI.createVirtualRegister(TRC);
|
||||
unsigned scratch2 = MRI.createVirtualRegister(TRC);
|
||||
|
||||
// thisMBB:
|
||||
// ...
|
||||
@ -5135,7 +5148,7 @@ ARMTargetLowering::EmitAtomicBinaryMinMax(MachineInstr *MI,
|
||||
|
||||
// Sign extend the value, if necessary.
|
||||
if (signExtend && extendOpc) {
|
||||
oldval = RegInfo.createVirtualRegister(ARM::GPRRegisterClass);
|
||||
oldval = MRI.createVirtualRegister(ARM::GPRRegisterClass);
|
||||
AddDefaultPred(BuildMI(BB, dl, TII->get(extendOpc), oldval).addReg(dest));
|
||||
}
|
||||
|
||||
|
103
test/CodeGen/ARM/atomic-op.ll
Normal file
103
test/CodeGen/ARM/atomic-op.ll
Normal file
@ -0,0 +1,103 @@
|
||||
; RUN: llc < %s -mtriple=armv7-apple-darwin10 | FileCheck %s
|
||||
; RUN: llc < %s -mtriple=thumbv7-apple-darwin10 | FileCheck %s
|
||||
|
||||
define void @func(i32 %argc, i8** %argv) nounwind {
|
||||
entry:
|
||||
%argc.addr = alloca i32 ; <i32*> [#uses=1]
|
||||
%argv.addr = alloca i8** ; <i8***> [#uses=1]
|
||||
%val1 = alloca i32 ; <i32*> [#uses=2]
|
||||
%val2 = alloca i32 ; <i32*> [#uses=15]
|
||||
%andt = alloca i32 ; <i32*> [#uses=2]
|
||||
%ort = alloca i32 ; <i32*> [#uses=2]
|
||||
%xort = alloca i32 ; <i32*> [#uses=2]
|
||||
%old = alloca i32 ; <i32*> [#uses=18]
|
||||
%temp = alloca i32 ; <i32*> [#uses=2]
|
||||
store i32 %argc, i32* %argc.addr
|
||||
store i8** %argv, i8*** %argv.addr
|
||||
store i32 0, i32* %val1
|
||||
store i32 31, i32* %val2
|
||||
store i32 3855, i32* %andt
|
||||
store i32 3855, i32* %ort
|
||||
store i32 3855, i32* %xort
|
||||
store i32 4, i32* %temp
|
||||
%tmp = load i32* %temp
|
||||
; CHECK: ldrex
|
||||
; CHECK: add
|
||||
; CHECK: strex
|
||||
call i32 @llvm.atomic.load.add.i32.p0i32( i32* %val1, i32 %tmp ) ; <i32>:0 [#uses=1]
|
||||
store i32 %0, i32* %old
|
||||
; CHECK: ldrex
|
||||
; CHECK: sub
|
||||
; CHECK: strex
|
||||
call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %val2, i32 30 ) ; <i32>:1 [#uses=1]
|
||||
store i32 %1, i32* %old
|
||||
; CHECK: ldrex
|
||||
; CHECK: add
|
||||
; CHECK: strex
|
||||
call i32 @llvm.atomic.load.add.i32.p0i32( i32* %val2, i32 1 ) ; <i32>:2 [#uses=1]
|
||||
store i32 %2, i32* %old
|
||||
; CHECK: ldrex
|
||||
; CHECK: sub
|
||||
; CHECK: strex
|
||||
call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %val2, i32 1 ) ; <i32>:3 [#uses=1]
|
||||
store i32 %3, i32* %old
|
||||
; CHECK: ldrex
|
||||
; CHECK: and
|
||||
; CHECK: strex
|
||||
call i32 @llvm.atomic.load.and.i32.p0i32( i32* %andt, i32 4080 ) ; <i32>:4 [#uses=1]
|
||||
store i32 %4, i32* %old
|
||||
; CHECK: ldrex
|
||||
; CHECK: or
|
||||
; CHECK: strex
|
||||
call i32 @llvm.atomic.load.or.i32.p0i32( i32* %ort, i32 4080 ) ; <i32>:5 [#uses=1]
|
||||
store i32 %5, i32* %old
|
||||
; CHECK: ldrex
|
||||
; CHECK: eor
|
||||
; CHECK: strex
|
||||
call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %xort, i32 4080 ) ; <i32>:6 [#uses=1]
|
||||
store i32 %6, i32* %old
|
||||
; CHECK: ldrex
|
||||
; CHECK: cmp
|
||||
; CHECK: strex
|
||||
call i32 @llvm.atomic.load.min.i32.p0i32( i32* %val2, i32 16 ) ; <i32>:7 [#uses=1]
|
||||
store i32 %7, i32* %old
|
||||
%neg = sub i32 0, 1 ; <i32> [#uses=1]
|
||||
; CHECK: ldrex
|
||||
; CHECK: cmp
|
||||
; CHECK: strex
|
||||
call i32 @llvm.atomic.load.min.i32.p0i32( i32* %val2, i32 %neg ) ; <i32>:8 [#uses=1]
|
||||
store i32 %8, i32* %old
|
||||
; CHECK: ldrex
|
||||
; CHECK: cmp
|
||||
; CHECK: strex
|
||||
call i32 @llvm.atomic.load.max.i32.p0i32( i32* %val2, i32 1 ) ; <i32>:9 [#uses=1]
|
||||
store i32 %9, i32* %old
|
||||
; CHECK: ldrex
|
||||
; CHECK: cmp
|
||||
; CHECK: strex
|
||||
call i32 @llvm.atomic.load.max.i32.p0i32( i32* %val2, i32 0 ) ; <i32>:10 [#uses=1]
|
||||
store i32 %10, i32* %old
|
||||
ret void
|
||||
}
|
||||
|
||||
declare i32 @llvm.atomic.load.add.i32.p0i32(i32*, i32) nounwind
|
||||
|
||||
declare i32 @llvm.atomic.load.sub.i32.p0i32(i32*, i32) nounwind
|
||||
|
||||
declare i32 @llvm.atomic.load.and.i32.p0i32(i32*, i32) nounwind
|
||||
|
||||
declare i32 @llvm.atomic.load.or.i32.p0i32(i32*, i32) nounwind
|
||||
|
||||
declare i32 @llvm.atomic.load.xor.i32.p0i32(i32*, i32) nounwind
|
||||
|
||||
declare i32 @llvm.atomic.load.min.i32.p0i32(i32*, i32) nounwind
|
||||
|
||||
declare i32 @llvm.atomic.load.max.i32.p0i32(i32*, i32) nounwind
|
||||
|
||||
declare i32 @llvm.atomic.load.umax.i32.p0i32(i32*, i32) nounwind
|
||||
|
||||
declare i32 @llvm.atomic.load.umin.i32.p0i32(i32*, i32) nounwind
|
||||
|
||||
declare i32 @llvm.atomic.swap.i32.p0i32(i32*, i32) nounwind
|
||||
|
||||
declare i32 @llvm.atomic.cmp.swap.i32.p0i32(i32*, i32, i32) nounwind
|
Loading…
x
Reference in New Issue
Block a user