llvm-6502/test/CodeGen/X86/atomic_add.ll
Evan Cheng 37b7387da9 Optimize some common usage patterns of atomic built-ins __sync_add_and_fetch() and __sync_sub_and_fetch.
When the return value is not used (i.e. only care about the value in the memory), x86 does not have to use add to implement these. Instead, it can use add, sub, inc, dec instructions with the "lock" prefix.

This is currently implemented using a bit of instruction selection trick. The issue is the target independent pattern produces one output and a chain and we want to map it into one that just output a chain. The current trick is to select it into a merge_values with the first definition being an implicit_def. The proper solution is to add new ISD opcodes for the no-output variant. DAG combiner can then transform the node before it gets to target node selection.

Problem #2 is we are adding a whole bunch of x86 atomic instructions when in fact these instructions are identical to the non-lock versions. We need a way to add target specific information to target nodes and have this information carried over to machine instructions. Asm printer (or JIT) can use this information to add the "lock" prefix.


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@77582 91177308-0d34-0410-b5e6-96231b3b80d8
2009-07-30 08:33:02 +00:00

218 lines
5.4 KiB
LLVM

; RUN: llvm-as < %s | llc -march=x86-64 | FileCheck %s
; rdar://7103704
define void @sub1(i32* nocapture %p, i32 %v) nounwind ssp {
entry:
; CHECK: sub1:
; CHECK: subl
%0 = tail call i32 @llvm.atomic.load.sub.i32.p0i32(i32* %p, i32 %v) ; <i32> [#uses=0]
ret void
}
define void @inc4(i64* nocapture %p) nounwind ssp {
entry:
; CHECK: inc4:
; CHECK: incq
%0 = tail call i64 @llvm.atomic.load.add.i64.p0i64(i64* %p, i64 1) ; <i64> [#uses=0]
ret void
}
declare i64 @llvm.atomic.load.add.i64.p0i64(i64* nocapture, i64) nounwind
define void @add8(i64* nocapture %p) nounwind ssp {
entry:
; CHECK: add8:
; CHECK: addq $2
%0 = tail call i64 @llvm.atomic.load.add.i64.p0i64(i64* %p, i64 2) ; <i64> [#uses=0]
ret void
}
define void @add4(i64* nocapture %p, i32 %v) nounwind ssp {
entry:
; CHECK: add4:
; CHECK: addq
%0 = sext i32 %v to i64 ; <i64> [#uses=1]
%1 = tail call i64 @llvm.atomic.load.add.i64.p0i64(i64* %p, i64 %0) ; <i64> [#uses=0]
ret void
}
define void @inc3(i8* nocapture %p) nounwind ssp {
entry:
; CHECK: inc3:
; CHECK: incb
%0 = tail call i8 @llvm.atomic.load.add.i8.p0i8(i8* %p, i8 1) ; <i8> [#uses=0]
ret void
}
declare i8 @llvm.atomic.load.add.i8.p0i8(i8* nocapture, i8) nounwind
define void @add7(i8* nocapture %p) nounwind ssp {
entry:
; CHECK: add7:
; CHECK: addb $2
%0 = tail call i8 @llvm.atomic.load.add.i8.p0i8(i8* %p, i8 2) ; <i8> [#uses=0]
ret void
}
define void @add3(i8* nocapture %p, i32 %v) nounwind ssp {
entry:
; CHECK: add3:
; CHECK: addb
%0 = trunc i32 %v to i8 ; <i8> [#uses=1]
%1 = tail call i8 @llvm.atomic.load.add.i8.p0i8(i8* %p, i8 %0) ; <i8> [#uses=0]
ret void
}
define void @inc2(i16* nocapture %p) nounwind ssp {
entry:
; CHECK: inc2:
; CHECK: incw
%0 = tail call i16 @llvm.atomic.load.add.i16.p0i16(i16* %p, i16 1) ; <i16> [#uses=0]
ret void
}
declare i16 @llvm.atomic.load.add.i16.p0i16(i16* nocapture, i16) nounwind
define void @add6(i16* nocapture %p) nounwind ssp {
entry:
; CHECK: add6:
; CHECK: addw $2
%0 = tail call i16 @llvm.atomic.load.add.i16.p0i16(i16* %p, i16 2) ; <i16> [#uses=0]
ret void
}
define void @add2(i16* nocapture %p, i32 %v) nounwind ssp {
entry:
; CHECK: add2:
; CHECK: addw
%0 = trunc i32 %v to i16 ; <i16> [#uses=1]
%1 = tail call i16 @llvm.atomic.load.add.i16.p0i16(i16* %p, i16 %0) ; <i16> [#uses=0]
ret void
}
define void @inc1(i32* nocapture %p) nounwind ssp {
entry:
; CHECK: inc1:
; CHECK: incl
%0 = tail call i32 @llvm.atomic.load.add.i32.p0i32(i32* %p, i32 1) ; <i32> [#uses=0]
ret void
}
declare i32 @llvm.atomic.load.add.i32.p0i32(i32* nocapture, i32) nounwind
define void @add5(i32* nocapture %p) nounwind ssp {
entry:
; CHECK: add5:
; CHECK: addl $2
%0 = tail call i32 @llvm.atomic.load.add.i32.p0i32(i32* %p, i32 2) ; <i32> [#uses=0]
ret void
}
define void @add1(i32* nocapture %p, i32 %v) nounwind ssp {
entry:
; CHECK: add1:
; CHECK: addl
%0 = tail call i32 @llvm.atomic.load.add.i32.p0i32(i32* %p, i32 %v) ; <i32> [#uses=0]
ret void
}
define void @dec4(i64* nocapture %p) nounwind ssp {
entry:
; CHECK: dec4:
; CHECK: decq
%0 = tail call i64 @llvm.atomic.load.sub.i64.p0i64(i64* %p, i64 1) ; <i64> [#uses=0]
ret void
}
declare i64 @llvm.atomic.load.sub.i64.p0i64(i64* nocapture, i64) nounwind
define void @sub8(i64* nocapture %p) nounwind ssp {
entry:
; CHECK: sub8:
; CHECK: subq $2
%0 = tail call i64 @llvm.atomic.load.sub.i64.p0i64(i64* %p, i64 2) ; <i64> [#uses=0]
ret void
}
define void @sub4(i64* nocapture %p, i32 %v) nounwind ssp {
entry:
; CHECK: sub4:
; CHECK: subq
%0 = sext i32 %v to i64 ; <i64> [#uses=1]
%1 = tail call i64 @llvm.atomic.load.sub.i64.p0i64(i64* %p, i64 %0) ; <i64> [#uses=0]
ret void
}
define void @dec3(i8* nocapture %p) nounwind ssp {
entry:
; CHECK: dec3:
; CHECK: decb
%0 = tail call i8 @llvm.atomic.load.sub.i8.p0i8(i8* %p, i8 1) ; <i8> [#uses=0]
ret void
}
declare i8 @llvm.atomic.load.sub.i8.p0i8(i8* nocapture, i8) nounwind
define void @sub7(i8* nocapture %p) nounwind ssp {
entry:
; CHECK: sub7:
; CHECK: subb $2
%0 = tail call i8 @llvm.atomic.load.sub.i8.p0i8(i8* %p, i8 2) ; <i8> [#uses=0]
ret void
}
define void @sub3(i8* nocapture %p, i32 %v) nounwind ssp {
entry:
; CHECK: sub3:
; CHECK: subb
%0 = trunc i32 %v to i8 ; <i8> [#uses=1]
%1 = tail call i8 @llvm.atomic.load.sub.i8.p0i8(i8* %p, i8 %0) ; <i8> [#uses=0]
ret void
}
define void @dec2(i16* nocapture %p) nounwind ssp {
entry:
; CHECK: dec2:
; CHECK: decw
%0 = tail call i16 @llvm.atomic.load.sub.i16.p0i16(i16* %p, i16 1) ; <i16> [#uses=0]
ret void
}
declare i16 @llvm.atomic.load.sub.i16.p0i16(i16* nocapture, i16) nounwind
define void @sub6(i16* nocapture %p) nounwind ssp {
entry:
; CHECK: sub6:
; CHECK: subw $2
%0 = tail call i16 @llvm.atomic.load.sub.i16.p0i16(i16* %p, i16 2) ; <i16> [#uses=0]
ret void
}
define void @sub2(i16* nocapture %p, i32 %v) nounwind ssp {
entry:
; CHECK: sub2:
; CHECK: subw
%0 = trunc i32 %v to i16 ; <i16> [#uses=1]
%1 = tail call i16 @llvm.atomic.load.sub.i16.p0i16(i16* %p, i16 %0) ; <i16> [#uses=0]
ret void
}
define void @dec1(i32* nocapture %p) nounwind ssp {
entry:
; CHECK: dec1:
; CHECK: decl
%0 = tail call i32 @llvm.atomic.load.sub.i32.p0i32(i32* %p, i32 1) ; <i32> [#uses=0]
ret void
}
declare i32 @llvm.atomic.load.sub.i32.p0i32(i32* nocapture, i32) nounwind
define void @sub5(i32* nocapture %p) nounwind ssp {
entry:
; CHECK: sub5:
; CHECK: subl $2
%0 = tail call i32 @llvm.atomic.load.sub.i32.p0i32(i32* %p, i32 2) ; <i32> [#uses=0]
ret void
}