[Hexagon] Converting intrinsics combine imm/imm, simple shifts and extends.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@226483 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Colin LeMahieu 2015-01-19 18:56:19 +00:00
parent 1d72e18caa
commit 3bea6a4959
3 changed files with 135 additions and 0 deletions

View File

@ -21,6 +21,10 @@ class T_R_pat <InstHexagon MI, Intrinsic IntID>
: Pat <(IntID I32:$Rs),
(MI I32:$Rs)>;
class T_II_pat <InstHexagon MI, Intrinsic IntID, PatFrag Imm1, PatFrag Imm2>
: Pat<(IntID Imm1:$Is, Imm2:$It),
(MI Imm1:$Is, Imm2:$It)>;
class T_RI_pat <InstHexagon MI, Intrinsic IntID, PatLeaf ImmPred = PatLeaf<(i32 imm)>>
: Pat<(IntID I32:$Rs, ImmPred:$It),
(MI I32:$Rs, ImmPred:$It)>;
@ -33,6 +37,18 @@ class T_RR_pat <InstHexagon MI, Intrinsic IntID>
: Pat <(IntID I32:$Rs, I32:$Rt),
(MI I32:$Rs, I32:$Rt)>;
class T_QII_pat <InstHexagon MI, Intrinsic IntID, PatFrag Imm1, PatFrag Imm2>
: Pat <(IntID (i32 PredRegs:$Ps), Imm1:$Is, Imm2:$It),
(MI PredRegs:$Ps, Imm1:$Is, Imm2:$It)>;
class T_QRI_pat <InstHexagon MI, Intrinsic IntID, PatFrag ImmPred>
: Pat <(IntID (i32 PredRegs:$Ps), I32:$Rs, ImmPred:$Is),
(MI PredRegs:$Ps, I32:$Rs, ImmPred:$Is)>;
class T_QIR_pat <InstHexagon MI, Intrinsic IntID, PatFrag ImmPred>
: Pat <(IntID (i32 PredRegs:$Ps), ImmPred:$Is, I32:$Rs),
(MI PredRegs:$Ps, ImmPred:$Is, I32:$Rs)>;
class T_RRR_pat <InstHexagon MI, Intrinsic IntID>
: Pat <(IntID I32:$Rs, I32:$Rt, I32:$Ru),
(MI I32:$Rs, I32:$Rt, I32:$Ru)>;
@ -267,6 +283,32 @@ def : T_I_pat <A2_tfrsi, int_hexagon_A2_tfrsi>;
def : Pat<(int_hexagon_A2_tfrp DoubleRegs:$src),
(A2_combinew (HiReg DoubleRegs:$src), (LoReg DoubleRegs:$src))>;
/********************************************************************
* ALU32/PERM *
*********************************************************************/
// Combine
def: T_RR_pat<A2_combine_hh, int_hexagon_A2_combine_hh>;
def: T_RR_pat<A2_combine_hl, int_hexagon_A2_combine_hl>;
def: T_RR_pat<A2_combine_lh, int_hexagon_A2_combine_lh>;
def: T_RR_pat<A2_combine_ll, int_hexagon_A2_combine_ll>;
def: T_II_pat<A2_combineii, int_hexagon_A2_combineii, s8ExtPred, s8ImmPred>;
def: Pat<(i32 (int_hexagon_C2_mux (I32:$Rp), (I32:$Rs),
(I32:$Rt))),
(i32 (C2_mux (C2_tfrrp IntRegs:$Rp), IntRegs:$Rs, IntRegs:$Rt))>;
// Shift halfword
def : T_R_pat<A2_aslh, int_hexagon_A2_aslh>;
def : T_R_pat<A2_asrh, int_hexagon_A2_asrh>;
def : T_R_pat<A2_asrh, int_hexagon_SI_to_SXTHI_asrh>;
// Sign/zero extend
def : T_R_pat<A2_sxth, int_hexagon_A2_sxth>;
def : T_R_pat<A2_sxtb, int_hexagon_A2_sxtb>;
def : T_R_pat<A2_zxth, int_hexagon_A2_zxth>;
def : T_R_pat<A2_zxtb, int_hexagon_A2_zxtb>;
//
// ALU 32 types.
//

View File

@ -127,6 +127,15 @@ entry:
ret void
}
; CHECK: r{{[0-9]+}}:{{[0-9]+}}{{ *}}={{ *}}combine(##-1280{{ *}},{{ *}}#120)
define void @test25(i32 %a) #0 {
entry:
%0 = tail call i64 @llvm.hexagon.A2.combineii(i32 -1280, i32 120)
store i64 %0, i64* @c, align 4
ret void
}
declare i32 @llvm.hexagon.A2.add(i32, i32) #1
declare i32 @llvm.hexagon.A2.sub(i32, i32) #1
declare i32 @llvm.hexagon.A2.and(i32, i32) #1
@ -139,3 +148,4 @@ declare i32 @llvm.hexagon.A2.orir(i32, i32) #1
declare i32 @llvm.hexagon.A2.subri(i32, i32)
declare i32 @llvm.hexagon.A2.tfril(i32, i32) #1
declare i32 @llvm.hexagon.A2.tfrih(i32, i32) #1
declare i64 @llvm.hexagon.A2.combineii(i32, i32) #1

View File

@ -0,0 +1,83 @@
; RUN: llc -march=hexagon < %s | FileCheck %s
; Verify that ALU32 - aslh, asrh, sxth, sxtb, zxth, zxtb intrinsics
; are lowered to the right instructions.
@c = external global i64
; CHECK: r{{[0-9]+}}{{ *}}={{ *}}aslh({{ *}}r{{[0-9]+}}{{ *}})
define void @test1(i32 %a) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.A2.aslh(i32 %a)
%conv = sext i32 %0 to i64
store i64 %conv, i64* @c, align 8
ret void
}
declare i32 @llvm.hexagon.A2.aslh(i32) #1
; CHECK: r{{[0-9]+}}{{ *}}={{ *}}asrh({{ *}}r{{[0-9]+}}{{ *}})
define void @test2(i32 %a) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.A2.asrh(i32 %a)
%conv = sext i32 %0 to i64
store i64 %conv, i64* @c, align 8
ret void
}
declare i32 @llvm.hexagon.A2.asrh(i32) #1
; CHECK: r{{[0-9]+}}{{ *}}={{ *}}sxtb({{ *}}r{{[0-9]+}}{{ *}})
define void @test3(i32 %a) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.A2.sxtb(i32 %a)
%conv = sext i32 %0 to i64
store i64 %conv, i64* @c, align 8
ret void
}
declare i32 @llvm.hexagon.A2.sxtb(i32) #1
; CHECK: r{{[0-9]+}}{{ *}}={{ *}}sxth({{ *}}r{{[0-9]+}}{{ *}})
define void @test4(i32 %a) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.A2.sxth(i32 %a)
%conv = sext i32 %0 to i64
store i64 %conv, i64* @c, align 8
ret void
}
declare i32 @llvm.hexagon.A2.sxth(i32) #1
; CHECK: r{{[0-9]+}}{{ *}}={{ *}}zxtb({{ *}}r{{[0-9]+}}{{ *}})
define void @test6(i32 %a) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.A2.zxtb(i32 %a)
%conv = sext i32 %0 to i64
store i64 %conv, i64* @c, align 8
ret void
}
declare i32 @llvm.hexagon.A2.zxtb(i32) #1
; CHECK: r{{[0-9]+}}{{ *}}={{ *}}zxth({{ *}}r{{[0-9]+}}{{ *}})
define void @test7(i32 %a) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.A2.zxth(i32 %a)
%conv = sext i32 %0 to i64
store i64 %conv, i64* @c, align 8
ret void
}
declare i32 @llvm.hexagon.A2.zxth(i32) #1
; CHECK: r{{[0-9]+}}{{ *}}={{ *}}asrh({{ *}}r{{[0-9]+}}{{ *}})
define void @test8(i32 %a) #0 {
entry:
%0 = tail call i32 @llvm.hexagon.SI.to.SXTHI.asrh(i32 %a)
%conv = sext i32 %0 to i64
store i64 %conv, i64* @c, align 8
ret void
}
declare i32 @llvm.hexagon.SI.to.SXTHI.asrh(i32) #1