llvm-6502/test/CodeGen/ARM/intrinsics-crypto.ll
Tim Northover 07786c2f09 AArch64 & ARM: refactor crypto intrinsics to take scalars
Some of the SHA instructions take a scalar i32 as one argument (largely because
they work on 160-bit hash fragments). This wasn't reflected in the IR
previously, with ARM and AArch64 choosing different types (<4 x i32> and <1 x
i32> respectively) which was ugly.

This makes all the affected intrinsics take a uniform "i32", allowing them to
become non-polymorphic at the same time.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@200706 91177308-0d34-0410-b5e6-96231b3b80d8
2014-02-03 17:27:49 +00:00

60 lines
3.3 KiB
LLVM

; RUN: llc < %s -mtriple=armv8 -mattr=+crypto | FileCheck %s
define arm_aapcs_vfpcc <16 x i8> @test_aesde(<16 x i8>* %a, <16 x i8> *%b) {
%tmp = load <16 x i8>* %a
%tmp2 = load <16 x i8>* %b
%tmp3 = call <16 x i8> @llvm.arm.neon.aesd(<16 x i8> %tmp, <16 x i8> %tmp2)
; CHECK: aesd.8 q{{[0-9]+}}, q{{[0-9]+}}
%tmp4 = call <16 x i8> @llvm.arm.neon.aese(<16 x i8> %tmp3, <16 x i8> %tmp2)
; CHECK: aese.8 q{{[0-9]+}}, q{{[0-9]+}}
%tmp5 = call <16 x i8> @llvm.arm.neon.aesimc(<16 x i8> %tmp4)
; CHECK: aesimc.8 q{{[0-9]+}}, q{{[0-9]+}}
%tmp6 = call <16 x i8> @llvm.arm.neon.aesmc(<16 x i8> %tmp5)
; CHECK: aesmc.8 q{{[0-9]+}}, q{{[0-9]+}}
ret <16 x i8> %tmp6
}
define arm_aapcs_vfpcc <4 x i32> @test_sha(<4 x i32> *%a, <4 x i32> *%b, <4 x i32> *%c) {
%tmp = load <4 x i32>* %a
%tmp2 = load <4 x i32>* %b
%tmp3 = load <4 x i32>* %c
%scalar = extractelement <4 x i32> %tmp, i32 0
%resscalar = call i32 @llvm.arm.neon.sha1h(i32 %scalar)
%res1 = insertelement <4 x i32> undef, i32 %resscalar, i32 0
; CHECK: sha1h.32 q{{[0-9]+}}, q{{[0-9]+}}
%res2 = call <4 x i32> @llvm.arm.neon.sha1c(<4 x i32> %tmp2, i32 %scalar, <4 x i32> %res1)
; CHECK: sha1c.32 q{{[0-9]+}}, q{{[0-9]+}}, q{{[0-9]+}}
%res3 = call <4 x i32> @llvm.arm.neon.sha1m(<4 x i32> %res2, i32 %scalar, <4 x i32> %res1)
; CHECK: sha1m.32 q{{[0-9]+}}, q{{[0-9]+}}, q{{[0-9]+}}
%res4 = call <4 x i32> @llvm.arm.neon.sha1p(<4 x i32> %res3, i32 %scalar, <4 x i32> %res1)
; CHECK: sha1p.32 q{{[0-9]+}}, q{{[0-9]+}}, q{{[0-9]+}}
%res5 = call <4 x i32> @llvm.arm.neon.sha1su0(<4 x i32> %res4, <4 x i32> %tmp3, <4 x i32> %res1)
; CHECK: sha1su0.32 q{{[0-9]+}}, q{{[0-9]+}}, q{{[0-9]+}}
%res6 = call <4 x i32> @llvm.arm.neon.sha1su1(<4 x i32> %res5, <4 x i32> %res1)
; CHECK: sha1su1.32 q{{[0-9]+}}, q{{[0-9]+}}
%res7 = call <4 x i32> @llvm.arm.neon.sha256h(<4 x i32> %res6, <4 x i32> %tmp3, <4 x i32> %res1)
; CHECK: sha256h.32 q{{[0-9]+}}, q{{[0-9]+}}, q{{[0-9]+}}
%res8 = call <4 x i32> @llvm.arm.neon.sha256h2(<4 x i32> %res7, <4 x i32> %tmp3, <4 x i32> %res1)
; CHECK: sha256h2.32 q{{[0-9]+}}, q{{[0-9]+}}, q{{[0-9]+}}
%res9 = call <4 x i32> @llvm.arm.neon.sha256su1(<4 x i32> %res8, <4 x i32> %tmp3, <4 x i32> %res1)
; CHECK: sha256su1.32 q{{[0-9]+}}, q{{[0-9]+}}, q{{[0-9]+}}
%res10 = call <4 x i32> @llvm.arm.neon.sha256su0(<4 x i32> %res9, <4 x i32> %tmp3)
; CHECK: sha256su0.32 q{{[0-9]+}}, q{{[0-9]+}}
ret <4 x i32> %res10
}
declare <16 x i8> @llvm.arm.neon.aesd(<16 x i8>, <16 x i8>)
declare <16 x i8> @llvm.arm.neon.aese(<16 x i8>, <16 x i8>)
declare <16 x i8> @llvm.arm.neon.aesimc(<16 x i8>)
declare <16 x i8> @llvm.arm.neon.aesmc(<16 x i8>)
declare i32 @llvm.arm.neon.sha1h(i32)
declare <4 x i32> @llvm.arm.neon.sha1c(<4 x i32>, i32, <4 x i32>)
declare <4 x i32> @llvm.arm.neon.sha1m(<4 x i32>, i32, <4 x i32>)
declare <4 x i32> @llvm.arm.neon.sha1p(<4 x i32>, i32, <4 x i32>)
declare <4 x i32> @llvm.arm.neon.sha1su0(<4 x i32>, <4 x i32>, <4 x i32>)
declare <4 x i32> @llvm.arm.neon.sha256h(<4 x i32>, <4 x i32>, <4 x i32>)
declare <4 x i32> @llvm.arm.neon.sha256h2(<4 x i32>, <4 x i32>, <4 x i32>)
declare <4 x i32> @llvm.arm.neon.sha256su1(<4 x i32>, <4 x i32>, <4 x i32>)
declare <4 x i32> @llvm.arm.neon.sha256su0(<4 x i32>, <4 x i32>)
declare <4 x i32> @llvm.arm.neon.sha1su1(<4 x i32>, <4 x i32>)