mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-11-01 15:11:24 +00:00
Add an atomic lowering pass
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@110113 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
795e70e431
commit
3bababf880
@ -167,6 +167,7 @@ perl -e '$/ = undef; for (split(/\n/, <>)) { s:^ *///? ?::; print " <p>\n" if !
|
||||
<tr><td><a href="#loop-unroll">-loop-unroll</a></td><td>Unroll loops</td></tr>
|
||||
<tr><td><a href="#loop-unswitch">-loop-unswitch</a></td><td>Unswitch loops</td></tr>
|
||||
<tr><td><a href="#loopsimplify">-loopsimplify</a></td><td>Canonicalize natural loops</td></tr>
|
||||
<tr><td><a href="#loweratomic">-loweratomic</a></td><td>Lower atomic intrinsics</td></tr>
|
||||
<tr><td><a href="#lowerinvoke">-lowerinvoke</a></td><td>Lower invoke and unwind, for unwindless code generators</td></tr>
|
||||
<tr><td><a href="#lowersetjmp">-lowersetjmp</a></td><td>Lower Set Jump</td></tr>
|
||||
<tr><td><a href="#lowerswitch">-lowerswitch</a></td><td>Lower SwitchInst's to branches</td></tr>
|
||||
@ -1547,6 +1548,24 @@ if (X < 3) {</pre>
|
||||
</p>
|
||||
</div>
|
||||
|
||||
<!-------------------------------------------------------------------------- -->
|
||||
<div class="doc_subsection">
|
||||
<a name="loweratomic">-loweratomic: Lower atomic intrinsics
|
||||
</div>
|
||||
<div class="doc_text">
|
||||
<p>
|
||||
This pass lowers atomic intrinsics to non-atomic form for use in a known
|
||||
non-preemptible environment.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
The pass does not verify that the environment is non-preemptible (in
|
||||
general this would require knowledge of the entire call graph of the
|
||||
program including any libraries which may not be available in bitcode form);
|
||||
it simply lowers every atomic intrinsic.
|
||||
</p>
|
||||
</div>
|
||||
|
||||
<!-------------------------------------------------------------------------- -->
|
||||
<div class="doc_subsection">
|
||||
<a name="lowerinvoke">-lowerinvoke: Lower invoke and unwind, for unwindless code generators</a>
|
||||
|
@ -148,6 +148,7 @@ namespace {
|
||||
(void) llvm::createABCDPass();
|
||||
(void) llvm::createLintPass();
|
||||
(void) llvm::createSinkingPass();
|
||||
(void) llvm::createLowerAtomicPass();
|
||||
|
||||
(void)new llvm::IntervalPartition();
|
||||
(void)new llvm::FindUsedTypes();
|
||||
|
@ -338,6 +338,12 @@ FunctionPass *createABCDPass();
|
||||
//
|
||||
FunctionPass *createSinkingPass();
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// LowerAtomic - Lower atomic intrinsics to non-atomic form
|
||||
//
|
||||
Pass *createLowerAtomicPass();
|
||||
|
||||
} // End llvm namespace
|
||||
|
||||
#endif
|
||||
|
@ -17,6 +17,7 @@ add_llvm_library(LLVMScalarOpts
|
||||
LoopStrengthReduce.cpp
|
||||
LoopUnrollPass.cpp
|
||||
LoopUnswitch.cpp
|
||||
LowerAtomic.cpp
|
||||
MemCpyOptimizer.cpp
|
||||
Reassociate.cpp
|
||||
Reg2Mem.cpp
|
||||
|
160
lib/Transforms/Scalar/LowerAtomic.cpp
Normal file
160
lib/Transforms/Scalar/LowerAtomic.cpp
Normal file
@ -0,0 +1,160 @@
|
||||
//===- LowerAtomic.cpp - Lower atomic intrinsics --------------------------===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This pass lowers atomic intrinsics to non-atomic form for use in a known
|
||||
// non-preemptible environment.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#define DEBUG_TYPE "loweratomic"
|
||||
#include "llvm/Transforms/Scalar.h"
|
||||
#include "llvm/BasicBlock.h"
|
||||
#include "llvm/Function.h"
|
||||
#include "llvm/Instruction.h"
|
||||
#include "llvm/Instructions.h"
|
||||
#include "llvm/Intrinsics.h"
|
||||
#include "llvm/Pass.h"
|
||||
#include "llvm/Support/IRBuilder.h"
|
||||
|
||||
using namespace llvm;
|
||||
|
||||
namespace {
|
||||
|
||||
bool LowerAtomicIntrinsic(CallInst *CI) {
|
||||
IRBuilder<> Builder(CI->getParent(), CI);
|
||||
|
||||
Function *Callee = CI->getCalledFunction();
|
||||
if (!Callee)
|
||||
return false;
|
||||
|
||||
unsigned IID = Callee->getIntrinsicID();
|
||||
switch (IID) {
|
||||
case Intrinsic::memory_barrier:
|
||||
break;
|
||||
|
||||
case Intrinsic::atomic_load_add:
|
||||
case Intrinsic::atomic_load_sub:
|
||||
case Intrinsic::atomic_load_and:
|
||||
case Intrinsic::atomic_load_nand:
|
||||
case Intrinsic::atomic_load_or:
|
||||
case Intrinsic::atomic_load_xor:
|
||||
case Intrinsic::atomic_load_max:
|
||||
case Intrinsic::atomic_load_min:
|
||||
case Intrinsic::atomic_load_umax:
|
||||
case Intrinsic::atomic_load_umin: {
|
||||
Value *Ptr = CI->getArgOperand(0);
|
||||
Value *Delta = CI->getArgOperand(1);
|
||||
|
||||
LoadInst *Orig = Builder.CreateLoad(Ptr);
|
||||
Value *Res;
|
||||
switch (IID) {
|
||||
case Intrinsic::atomic_load_add:
|
||||
Res = Builder.CreateAdd(Orig, Delta);
|
||||
break;
|
||||
case Intrinsic::atomic_load_sub:
|
||||
Res = Builder.CreateSub(Orig, Delta);
|
||||
break;
|
||||
case Intrinsic::atomic_load_and:
|
||||
Res = Builder.CreateAnd(Orig, Delta);
|
||||
break;
|
||||
case Intrinsic::atomic_load_nand:
|
||||
Res = Builder.CreateNot(Builder.CreateAnd(Orig, Delta));
|
||||
break;
|
||||
case Intrinsic::atomic_load_or:
|
||||
Res = Builder.CreateOr(Orig, Delta);
|
||||
break;
|
||||
case Intrinsic::atomic_load_xor:
|
||||
Res = Builder.CreateXor(Orig, Delta);
|
||||
break;
|
||||
case Intrinsic::atomic_load_max:
|
||||
Res = Builder.CreateSelect(Builder.CreateICmpSLT(Orig, Delta),
|
||||
Delta,
|
||||
Orig);
|
||||
break;
|
||||
case Intrinsic::atomic_load_min:
|
||||
Res = Builder.CreateSelect(Builder.CreateICmpSLT(Orig, Delta),
|
||||
Orig,
|
||||
Delta);
|
||||
break;
|
||||
case Intrinsic::atomic_load_umax:
|
||||
Res = Builder.CreateSelect(Builder.CreateICmpULT(Orig, Delta),
|
||||
Delta,
|
||||
Orig);
|
||||
break;
|
||||
case Intrinsic::atomic_load_umin:
|
||||
Res = Builder.CreateSelect(Builder.CreateICmpULT(Orig, Delta),
|
||||
Orig,
|
||||
Delta);
|
||||
break;
|
||||
default: assert(0 && "Unrecognized atomic modify operation");
|
||||
}
|
||||
Builder.CreateStore(Res, Ptr);
|
||||
|
||||
CI->replaceAllUsesWith(Orig);
|
||||
break;
|
||||
}
|
||||
|
||||
case Intrinsic::atomic_swap: {
|
||||
Value *Ptr = CI->getArgOperand(0);
|
||||
Value *Val = CI->getArgOperand(1);
|
||||
|
||||
LoadInst *Orig = Builder.CreateLoad(Ptr);
|
||||
Builder.CreateStore(Val, Ptr);
|
||||
|
||||
CI->replaceAllUsesWith(Orig);
|
||||
break;
|
||||
}
|
||||
|
||||
case Intrinsic::atomic_cmp_swap: {
|
||||
Value *Ptr = CI->getArgOperand(0);
|
||||
Value *Cmp = CI->getArgOperand(1);
|
||||
Value *Val = CI->getArgOperand(2);
|
||||
|
||||
LoadInst *Orig = Builder.CreateLoad(Ptr);
|
||||
Value *Equal = Builder.CreateICmpEQ(Orig, Cmp);
|
||||
Value *Res = Builder.CreateSelect(Equal, Val, Orig);
|
||||
Builder.CreateStore(Res, Ptr);
|
||||
|
||||
CI->replaceAllUsesWith(Orig);
|
||||
break;
|
||||
}
|
||||
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
|
||||
assert(CI->use_empty() &&
|
||||
"Lowering should have eliminated any uses of the intrinsic call!");
|
||||
CI->eraseFromParent();
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
struct LowerAtomic : public BasicBlockPass {
|
||||
static char ID;
|
||||
LowerAtomic() : BasicBlockPass(&ID) {}
|
||||
bool runOnBasicBlock(BasicBlock &BB) {
|
||||
bool Changed = false;
|
||||
for (BasicBlock::iterator DI = BB.begin(), DE = BB.end(); DI != DE; ) {
|
||||
Instruction *Inst = DI++;
|
||||
if (CallInst *CI = dyn_cast<CallInst>(Inst))
|
||||
Changed |= LowerAtomicIntrinsic(CI);
|
||||
}
|
||||
return Changed;
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
char LowerAtomic::ID = 0;
|
||||
static RegisterPass<LowerAtomic>
|
||||
X("loweratomic", "Lower atomic intrinsics to non-atomic form");
|
||||
|
||||
Pass *llvm::createLowerAtomicPass() { return new LowerAtomic(); }
|
40
test/Transforms/LowerAtomic/atomic-load.ll
Normal file
40
test/Transforms/LowerAtomic/atomic-load.ll
Normal file
@ -0,0 +1,40 @@
|
||||
; RUN: opt < %s -loweratomic -S | FileCheck %s
|
||||
|
||||
declare i8 @llvm.atomic.load.add.i8.p0i8(i8* %ptr, i8 %delta)
|
||||
declare i8 @llvm.atomic.load.nand.i8.p0i8(i8* %ptr, i8 %delta)
|
||||
declare i8 @llvm.atomic.load.min.i8.p0i8(i8* %ptr, i8 %delta)
|
||||
|
||||
define i8 @add() {
|
||||
; CHECK: @add
|
||||
%i = alloca i8
|
||||
%j = call i8 @llvm.atomic.load.add.i8.p0i8(i8* %i, i8 42)
|
||||
; CHECK: [[INST:%[a-z0-9]+]] = load
|
||||
; CHECK-NEXT: add
|
||||
; CHECK-NEXT: store
|
||||
ret i8 %j
|
||||
; CHECK: ret i8 [[INST]]
|
||||
}
|
||||
|
||||
define i8 @nand() {
|
||||
; CHECK: @nand
|
||||
%i = alloca i8
|
||||
%j = call i8 @llvm.atomic.load.nand.i8.p0i8(i8* %i, i8 42)
|
||||
; CHECK: [[INST:%[a-z0-9]+]] = load
|
||||
; CHECK-NEXT: and
|
||||
; CHECK-NEXT: xor
|
||||
; CHECK-NEXT: store
|
||||
ret i8 %j
|
||||
; CHECK: ret i8 [[INST]]
|
||||
}
|
||||
|
||||
define i8 @min() {
|
||||
; CHECK: @min
|
||||
%i = alloca i8
|
||||
%j = call i8 @llvm.atomic.load.min.i8.p0i8(i8* %i, i8 42)
|
||||
; CHECK: [[INST:%[a-z0-9]+]] = load
|
||||
; CHECK-NEXT: icmp
|
||||
; CHECK-NEXT: select
|
||||
; CHECK-NEXT: store
|
||||
ret i8 %j
|
||||
; CHECK: ret i8 [[INST]]
|
||||
}
|
26
test/Transforms/LowerAtomic/atomic-swap.ll
Normal file
26
test/Transforms/LowerAtomic/atomic-swap.ll
Normal file
@ -0,0 +1,26 @@
|
||||
; RUN: opt < %s -loweratomic -S | FileCheck %s
|
||||
|
||||
declare i8 @llvm.atomic.cmp.swap.i8.p0i8(i8* %ptr, i8 %cmp, i8 %val)
|
||||
declare i8 @llvm.atomic.swap.i8.p0i8(i8* %ptr, i8 %val)
|
||||
|
||||
define i8 @cmpswap() {
|
||||
; CHECK: @cmpswap
|
||||
%i = alloca i8
|
||||
%j = call i8 @llvm.atomic.cmp.swap.i8.p0i8(i8* %i, i8 0, i8 42)
|
||||
; CHECK: [[INST:%[a-z0-9]+]] = load
|
||||
; CHECK-NEXT: icmp
|
||||
; CHECK-NEXT: select
|
||||
; CHECK-NEXT: store
|
||||
ret i8 %j
|
||||
; CHECK: ret i8 [[INST]]
|
||||
}
|
||||
|
||||
define i8 @swap() {
|
||||
; CHECK: @swap
|
||||
%i = alloca i8
|
||||
%j = call i8 @llvm.atomic.swap.i8.p0i8(i8* %i, i8 42)
|
||||
; CHECK: [[INST:%[a-z0-9]+]] = load
|
||||
; CHECK-NEXT: store
|
||||
ret i8 %j
|
||||
; CHECK: ret i8 [[INST]]
|
||||
}
|
10
test/Transforms/LowerAtomic/barrier.ll
Normal file
10
test/Transforms/LowerAtomic/barrier.ll
Normal file
@ -0,0 +1,10 @@
|
||||
; RUN: opt < %s -loweratomic -S | FileCheck %s
|
||||
|
||||
declare void @llvm.memory.barrier(i1 %ll, i1 %ls, i1 %sl, i1 %ss, i1 %device)
|
||||
|
||||
define void @barrier() {
|
||||
; CHECK: @barrier
|
||||
call void @llvm.memory.barrier(i1 0, i1 0, i1 0, i1 0, i1 0)
|
||||
; CHECK-NEXT: ret
|
||||
ret void
|
||||
}
|
3
test/Transforms/LowerAtomic/dg.exp
Normal file
3
test/Transforms/LowerAtomic/dg.exp
Normal file
@ -0,0 +1,3 @@
|
||||
load_lib llvm.exp
|
||||
|
||||
RunLLVMTests [lsort [glob -nocomplain $srcdir/$subdir/*.{ll,c,cpp}]]
|
Loading…
Reference in New Issue
Block a user