2015-01-16 04:40:58 +00:00
|
|
|
; RUN: llc < %s | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-BE
|
|
|
|
; RUN: llc -fast-isel -fast-isel-abort < %s | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-BE
|
|
|
|
; RUN: llc -mtriple=powerpc64le-unknown-linux-gnu < %s | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-LE
|
|
|
|
; RUN: llc -mtriple=powerpc64le-unknown-linux-gnu -fast-isel -fast-isel-abort < %s | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-LE
|
|
|
|
|
Revert "r225811 - Revert "r225808 - [PowerPC] Add StackMap/PatchPoint support""
This re-applies r225808, fixed to avoid problems with SDAG dependencies along
with the preceding fix to ScheduleDAGSDNodes::RegDefIter::InitNodeNumDefs.
These problems caused the original regression tests to assert/segfault on many
(but not all) systems.
Original commit message:
This commit does two things:
1. Refactors PPCFastISel to use more of the common infrastructure for call
lowering (this lets us take advantage of this common code for lowering some
common intrinsics, stackmap/patchpoint among them).
2. Adds support for stackmap/patchpoint lowering. For the most part, this is
very similar to the support in the AArch64 target, with the obvious differences
(different registers, NOP instructions, etc.). The test cases are adapted
from the AArch64 test cases.
One difference of note is that the patchpoint call sequence takes 24 bytes, so
you can't use less than that (on AArch64 you can go down to 16). Also, as noted
in the docs, we take the patchpoint address to be the actual code address
(assuming the call is local in the TOC-sharing sense), which should yield
higher performance than generating the full cross-DSO indirect-call sequence
and is likely just as useful for JITed code (if not, we'll change it).
StackMaps and Patchpoints are still marked as experimental, and so this support
is doubly experimental. So go ahead and experiment!
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@225909 91177308-0d34-0410-b5e6-96231b3b80d8
2015-01-14 01:07:51 +00:00
|
|
|
target triple = "powerpc64-unknown-linux-gnu"
|
|
|
|
|
|
|
|
; Trivial patchpoint codegen
|
|
|
|
;
|
|
|
|
define i64 @trivial_patchpoint_codegen(i64 %p1, i64 %p2, i64 %p3, i64 %p4) {
|
|
|
|
entry:
|
|
|
|
; CHECK-LABEL: trivial_patchpoint_codegen:
|
|
|
|
|
2015-01-16 04:40:58 +00:00
|
|
|
; CHECK: li 12, -8531
|
|
|
|
; CHECK-NEXT: rldic 12, 12, 32, 16
|
|
|
|
; CHECK-NEXT: oris 12, 12, 48879
|
|
|
|
; CHECK-NEXT: ori 12, 12, 51966
|
|
|
|
; CHECK-NEXT: mtctr 12
|
Revert "r225811 - Revert "r225808 - [PowerPC] Add StackMap/PatchPoint support""
This re-applies r225808, fixed to avoid problems with SDAG dependencies along
with the preceding fix to ScheduleDAGSDNodes::RegDefIter::InitNodeNumDefs.
These problems caused the original regression tests to assert/segfault on many
(but not all) systems.
Original commit message:
This commit does two things:
1. Refactors PPCFastISel to use more of the common infrastructure for call
lowering (this lets us take advantage of this common code for lowering some
common intrinsics, stackmap/patchpoint among them).
2. Adds support for stackmap/patchpoint lowering. For the most part, this is
very similar to the support in the AArch64 target, with the obvious differences
(different registers, NOP instructions, etc.). The test cases are adapted
from the AArch64 test cases.
One difference of note is that the patchpoint call sequence takes 24 bytes, so
you can't use less than that (on AArch64 you can go down to 16). Also, as noted
in the docs, we take the patchpoint address to be the actual code address
(assuming the call is local in the TOC-sharing sense), which should yield
higher performance than generating the full cross-DSO indirect-call sequence
and is likely just as useful for JITed code (if not, we'll change it).
StackMaps and Patchpoints are still marked as experimental, and so this support
is doubly experimental. So go ahead and experiment!
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@225909 91177308-0d34-0410-b5e6-96231b3b80d8
2015-01-14 01:07:51 +00:00
|
|
|
; CHECK-NEXT: bctrl
|
|
|
|
|
2015-01-16 04:40:58 +00:00
|
|
|
; CHECK: li 12, -8531
|
|
|
|
; CHECK-NEXT: rldic 12, 12, 32, 16
|
|
|
|
; CHECK-NEXT: oris 12, 12, 48879
|
|
|
|
; CHECK-NEXT: ori 12, 12, 51967
|
|
|
|
; CHECK-NEXT: mtctr 12
|
Revert "r225811 - Revert "r225808 - [PowerPC] Add StackMap/PatchPoint support""
This re-applies r225808, fixed to avoid problems with SDAG dependencies along
with the preceding fix to ScheduleDAGSDNodes::RegDefIter::InitNodeNumDefs.
These problems caused the original regression tests to assert/segfault on many
(but not all) systems.
Original commit message:
This commit does two things:
1. Refactors PPCFastISel to use more of the common infrastructure for call
lowering (this lets us take advantage of this common code for lowering some
common intrinsics, stackmap/patchpoint among them).
2. Adds support for stackmap/patchpoint lowering. For the most part, this is
very similar to the support in the AArch64 target, with the obvious differences
(different registers, NOP instructions, etc.). The test cases are adapted
from the AArch64 test cases.
One difference of note is that the patchpoint call sequence takes 24 bytes, so
you can't use less than that (on AArch64 you can go down to 16). Also, as noted
in the docs, we take the patchpoint address to be the actual code address
(assuming the call is local in the TOC-sharing sense), which should yield
higher performance than generating the full cross-DSO indirect-call sequence
and is likely just as useful for JITed code (if not, we'll change it).
StackMaps and Patchpoints are still marked as experimental, and so this support
is doubly experimental. So go ahead and experiment!
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@225909 91177308-0d34-0410-b5e6-96231b3b80d8
2015-01-14 01:07:51 +00:00
|
|
|
; CHECK-NEXT: bctrl
|
|
|
|
|
|
|
|
; CHECK: blr
|
|
|
|
|
|
|
|
%resolveCall2 = inttoptr i64 244837814094590 to i8*
|
|
|
|
%result = tail call i64 (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.i64(i64 2, i32 24, i8* %resolveCall2, i32 4, i64 %p1, i64 %p2, i64 %p3, i64 %p4)
|
|
|
|
%resolveCall3 = inttoptr i64 244837814094591 to i8*
|
|
|
|
tail call void (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.void(i64 3, i32 24, i8* %resolveCall3, i32 2, i64 %p1, i64 %result)
|
|
|
|
ret i64 %result
|
|
|
|
}
|
|
|
|
|
|
|
|
; Caller frame metadata with stackmaps. This should not be optimized
|
|
|
|
; as a leaf function.
|
|
|
|
;
|
|
|
|
; CHECK-LABEL: caller_meta_leaf
|
2015-01-16 04:40:58 +00:00
|
|
|
; CHECK-BE: stdu 1, -80(1)
|
|
|
|
; CHECK-LE: stdu 1, -64(1)
|
Revert "r225811 - Revert "r225808 - [PowerPC] Add StackMap/PatchPoint support""
This re-applies r225808, fixed to avoid problems with SDAG dependencies along
with the preceding fix to ScheduleDAGSDNodes::RegDefIter::InitNodeNumDefs.
These problems caused the original regression tests to assert/segfault on many
(but not all) systems.
Original commit message:
This commit does two things:
1. Refactors PPCFastISel to use more of the common infrastructure for call
lowering (this lets us take advantage of this common code for lowering some
common intrinsics, stackmap/patchpoint among them).
2. Adds support for stackmap/patchpoint lowering. For the most part, this is
very similar to the support in the AArch64 target, with the obvious differences
(different registers, NOP instructions, etc.). The test cases are adapted
from the AArch64 test cases.
One difference of note is that the patchpoint call sequence takes 24 bytes, so
you can't use less than that (on AArch64 you can go down to 16). Also, as noted
in the docs, we take the patchpoint address to be the actual code address
(assuming the call is local in the TOC-sharing sense), which should yield
higher performance than generating the full cross-DSO indirect-call sequence
and is likely just as useful for JITed code (if not, we'll change it).
StackMaps and Patchpoints are still marked as experimental, and so this support
is doubly experimental. So go ahead and experiment!
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@225909 91177308-0d34-0410-b5e6-96231b3b80d8
2015-01-14 01:07:51 +00:00
|
|
|
; CHECK: Ltmp
|
2015-01-16 04:40:58 +00:00
|
|
|
; CHECK-BE: addi 1, 1, 80
|
|
|
|
; CHECK-LE: addi 1, 1, 64
|
Revert "r225811 - Revert "r225808 - [PowerPC] Add StackMap/PatchPoint support""
This re-applies r225808, fixed to avoid problems with SDAG dependencies along
with the preceding fix to ScheduleDAGSDNodes::RegDefIter::InitNodeNumDefs.
These problems caused the original regression tests to assert/segfault on many
(but not all) systems.
Original commit message:
This commit does two things:
1. Refactors PPCFastISel to use more of the common infrastructure for call
lowering (this lets us take advantage of this common code for lowering some
common intrinsics, stackmap/patchpoint among them).
2. Adds support for stackmap/patchpoint lowering. For the most part, this is
very similar to the support in the AArch64 target, with the obvious differences
(different registers, NOP instructions, etc.). The test cases are adapted
from the AArch64 test cases.
One difference of note is that the patchpoint call sequence takes 24 bytes, so
you can't use less than that (on AArch64 you can go down to 16). Also, as noted
in the docs, we take the patchpoint address to be the actual code address
(assuming the call is local in the TOC-sharing sense), which should yield
higher performance than generating the full cross-DSO indirect-call sequence
and is likely just as useful for JITed code (if not, we'll change it).
StackMaps and Patchpoints are still marked as experimental, and so this support
is doubly experimental. So go ahead and experiment!
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@225909 91177308-0d34-0410-b5e6-96231b3b80d8
2015-01-14 01:07:51 +00:00
|
|
|
; CHECK: blr
|
|
|
|
|
|
|
|
define void @caller_meta_leaf() {
|
|
|
|
entry:
|
|
|
|
%metadata = alloca i64, i32 3, align 8
|
|
|
|
store i64 11, i64* %metadata
|
|
|
|
store i64 12, i64* %metadata
|
|
|
|
store i64 13, i64* %metadata
|
|
|
|
call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 4, i32 0, i64* %metadata)
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
; Test patchpoints reusing the same TargetConstant.
|
|
|
|
; <rdar:15390785> Assertion failed: (CI.getNumArgOperands() >= NumArgs + 4)
|
|
|
|
; There is no way to verify this, since it depends on memory allocation.
|
|
|
|
; But I think it's useful to include as a working example.
|
|
|
|
define i64 @testLowerConstant(i64 %arg, i64 %tmp2, i64 %tmp10, i64* %tmp33, i64 %tmp79) {
|
|
|
|
entry:
|
|
|
|
%tmp80 = add i64 %tmp79, -16
|
|
|
|
%tmp81 = inttoptr i64 %tmp80 to i64*
|
|
|
|
%tmp82 = load i64* %tmp81, align 8
|
|
|
|
tail call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 14, i32 8, i64 %arg, i64 %tmp2, i64 %tmp10, i64 %tmp82)
|
|
|
|
tail call void (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.void(i64 15, i32 32, i8* null, i32 3, i64 %arg, i64 %tmp10, i64 %tmp82)
|
|
|
|
%tmp83 = load i64* %tmp33, align 8
|
|
|
|
%tmp84 = add i64 %tmp83, -24
|
|
|
|
%tmp85 = inttoptr i64 %tmp84 to i64*
|
|
|
|
%tmp86 = load i64* %tmp85, align 8
|
|
|
|
tail call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 17, i32 8, i64 %arg, i64 %tmp10, i64 %tmp86)
|
|
|
|
tail call void (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.void(i64 18, i32 32, i8* null, i32 3, i64 %arg, i64 %tmp10, i64 %tmp86)
|
|
|
|
ret i64 10
|
|
|
|
}
|
|
|
|
|
|
|
|
; Test small patchpoints that don't emit calls.
|
|
|
|
define void @small_patchpoint_codegen(i64 %p1, i64 %p2, i64 %p3, i64 %p4) {
|
|
|
|
entry:
|
|
|
|
; CHECK-LABEL: small_patchpoint_codegen:
|
|
|
|
; CHECK: Ltmp
|
|
|
|
; CHECK: nop
|
|
|
|
; CHECK-NEXT: nop
|
|
|
|
; CHECK-NEXT: nop
|
|
|
|
; CHECK-NEXT: nop
|
|
|
|
; CHECK-NEXT: nop
|
|
|
|
; CHECK-NOT: nop
|
|
|
|
; CHECK: blr
|
|
|
|
%result = tail call i64 (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.i64(i64 5, i32 20, i8* null, i32 2, i64 %p1, i64 %p2)
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
declare void @llvm.experimental.stackmap(i64, i32, ...)
|
|
|
|
declare void @llvm.experimental.patchpoint.void(i64, i32, i8*, i32, ...)
|
|
|
|
declare i64 @llvm.experimental.patchpoint.i64(i64, i32, i8*, i32, ...)
|
|
|
|
|