[Hexagon] Converting intrinsics combine imm/imm, simple shifts and extends.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@226483 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Colin LeMahieu
2015-01-19 18:56:19 +00:00
parent 1d72e18caa
commit 3bea6a4959
3 changed files with 135 additions and 0 deletions

View File

@@ -127,6 +127,15 @@ entry:
ret void
}
; CHECK: r{{[0-9]+}}:{{[0-9]+}}{{ *}}={{ *}}combine(##-1280{{ *}},{{ *}}#120)
define void @test25(i32 %a) #0 {
entry:
%0 = tail call i64 @llvm.hexagon.A2.combineii(i32 -1280, i32 120)
store i64 %0, i64* @c, align 4
ret void
}
declare i32 @llvm.hexagon.A2.add(i32, i32) #1
declare i32 @llvm.hexagon.A2.sub(i32, i32) #1
declare i32 @llvm.hexagon.A2.and(i32, i32) #1
@@ -139,3 +148,4 @@ declare i32 @llvm.hexagon.A2.orir(i32, i32) #1
declare i32 @llvm.hexagon.A2.subri(i32, i32)
declare i32 @llvm.hexagon.A2.tfril(i32, i32) #1
declare i32 @llvm.hexagon.A2.tfrih(i32, i32) #1
declare i64 @llvm.hexagon.A2.combineii(i32, i32) #1

View File

@@ -0,0 +1,83 @@
; RUN: llc -march=hexagon < %s | FileCheck %s
; Verify that ALU32 - aslh, asrh, sxth, sxtb, zxth, zxtb intrinsics
; are lowered to the right instructions.
@c = external global i64
; CHECK: r{{[0-9]+}}{{ *}}={{ *}}aslh({{ *}}r{{[0-9]+}}{{ *}})
define void @test1(i32 %a) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.A2.aslh(i32 %a)
%conv = sext i32 %0 to i64
store i64 %conv, i64* @c, align 8
ret void
}
declare i32 @llvm.hexagon.A2.aslh(i32) #1
; CHECK: r{{[0-9]+}}{{ *}}={{ *}}asrh({{ *}}r{{[0-9]+}}{{ *}})
define void @test2(i32 %a) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.A2.asrh(i32 %a)
%conv = sext i32 %0 to i64
store i64 %conv, i64* @c, align 8
ret void
}
declare i32 @llvm.hexagon.A2.asrh(i32) #1
; CHECK: r{{[0-9]+}}{{ *}}={{ *}}sxtb({{ *}}r{{[0-9]+}}{{ *}})
define void @test3(i32 %a) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.A2.sxtb(i32 %a)
%conv = sext i32 %0 to i64
store i64 %conv, i64* @c, align 8
ret void
}
declare i32 @llvm.hexagon.A2.sxtb(i32) #1
; CHECK: r{{[0-9]+}}{{ *}}={{ *}}sxth({{ *}}r{{[0-9]+}}{{ *}})
define void @test4(i32 %a) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.A2.sxth(i32 %a)
%conv = sext i32 %0 to i64
store i64 %conv, i64* @c, align 8
ret void
}
declare i32 @llvm.hexagon.A2.sxth(i32) #1
; CHECK: r{{[0-9]+}}{{ *}}={{ *}}zxtb({{ *}}r{{[0-9]+}}{{ *}})
define void @test6(i32 %a) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.A2.zxtb(i32 %a)
%conv = sext i32 %0 to i64
store i64 %conv, i64* @c, align 8
ret void
}
declare i32 @llvm.hexagon.A2.zxtb(i32) #1
; CHECK: r{{[0-9]+}}{{ *}}={{ *}}zxth({{ *}}r{{[0-9]+}}{{ *}})
define void @test7(i32 %a) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.A2.zxth(i32 %a)
%conv = sext i32 %0 to i64
store i64 %conv, i64* @c, align 8
ret void
}
declare i32 @llvm.hexagon.A2.zxth(i32) #1
; CHECK: r{{[0-9]+}}{{ *}}={{ *}}asrh({{ *}}r{{[0-9]+}}{{ *}})
define void @test8(i32 %a) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.SI.to.SXTHI.asrh(i32 %a)
%conv = sext i32 %0 to i64
store i64 %conv, i64* @c, align 8
ret void
}
declare i32 @llvm.hexagon.SI.to.SXTHI.asrh(i32) #1