From 898a22d7a58a0864224ded9ee3d84405b02fba41 Mon Sep 17 00:00:00 2001 From: Timur Iskhodzhanov Date: Wed, 17 Dec 2014 17:52:12 +0000 Subject: [PATCH] Fix CR/LF line endings in test case git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@224437 91177308-0d34-0410-b5e6-96231b3b80d8 --- test/CodeGen/X86/avx1-stack-reload-folding.ll | 166 +++++++++--------- 1 file changed, 83 insertions(+), 83 deletions(-) diff --git a/test/CodeGen/X86/avx1-stack-reload-folding.ll b/test/CodeGen/X86/avx1-stack-reload-folding.ll index 480e3dd6306..54c192583d6 100644 --- a/test/CodeGen/X86/avx1-stack-reload-folding.ll +++ b/test/CodeGen/X86/avx1-stack-reload-folding.ll @@ -1,83 +1,83 @@ -; RUN: llc -O3 -disable-peephole -mcpu=corei7-avx -mattr=+avx < %s | FileCheck %s - -target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" -target triple = "x86_64-unknown-unknown" - -; Stack reload folding tests - we use the 'big vectors' pattern to guarantee spilling to stack. -; -; Many of these tests are primarily to check memory folding with specific instructions. Using a basic -; load/cvt/store pattern to test for this would mean that it wouldn't be the memory folding code thats -; being tested - the load-execute version of the instruction from the tables would be matched instead. - -define void @stack_fold_vmulpd(<64 x double>* %a, <64 x double>* %b, <64 x double>* %c) { - ;CHECK-LABEL: stack_fold_vmulpd - ;CHECK: vmulpd {{[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload - - %1 = load <64 x double>* %a - %2 = load <64 x double>* %b - %3 = fadd <64 x double> %1, %2 - %4 = fsub <64 x double> %1, %2 - %5 = fmul <64 x double> %3, %4 - store <64 x double> %5, <64 x double>* %c - ret void -} - -define void @stack_fold_cvtdq2ps(<128 x i32>* %a, <128 x i32>* %b, <128 x float>* %c) { - ;CHECK-LABEL: stack_fold_cvtdq2ps - ;CHECK: vcvtdq2ps {{[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload - - %1 = load <128 x i32>* %a - %2 = load <128 x i32>* %b - %3 = and <128 x i32> %1, %2 - %4 = xor <128 x i32> %1, %2 - %5 = sitofp <128 x i32> %3 to <128 x float> - %6 = sitofp <128 x i32> %4 to <128 x float> - %7 = fadd <128 x float> %5, %6 - store <128 x float> %7, <128 x float>* %c - ret void -} - -define void @stack_fold_cvtpd2ps(<128 x double>* %a, <128 x double>* %b, <128 x float>* %c) { - ;CHECK-LABEL: stack_fold_cvtpd2ps - ;CHECK: vcvtpd2psy {{[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload - - %1 = load <128 x double>* %a - %2 = load <128 x double>* %b - %3 = fadd <128 x double> %1, %2 - %4 = fsub <128 x double> %1, %2 - %5 = fptrunc <128 x double> %3 to <128 x float> - %6 = fptrunc <128 x double> %4 to <128 x float> - %7 = fadd <128 x float> %5, %6 - store <128 x float> %7, <128 x float>* %c - ret void -} - -define void @stack_fold_cvttpd2dq(<64 x double>* %a, <64 x double>* %b, <64 x i32>* %c) #0 { - ;CHECK-LABEL: stack_fold_cvttpd2dq - ;CHECK: vcvttpd2dqy {{[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload - - %1 = load <64 x double>* %a - %2 = load <64 x double>* %b - %3 = fadd <64 x double> %1, %2 - %4 = fsub <64 x double> %1, %2 - %5 = fptosi <64 x double> %3 to <64 x i32> - %6 = fptosi <64 x double> %4 to <64 x i32> - %7 = or <64 x i32> %5, %6 - store <64 x i32> %7, <64 x i32>* %c - ret void -} - -define void @stack_fold_cvttps2dq(<128 x float>* %a, <128 x float>* %b, <128 x i32>* %c) #0 { - ;CHECK-LABEL: stack_fold_cvttps2dq - ;CHECK: vcvttps2dq {{[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload - - %1 = load <128 x float>* %a - %2 = load <128 x float>* %b - %3 = fadd <128 x float> %1, %2 - %4 = fsub <128 x float> %1, %2 - %5 = fptosi <128 x float> %3 to <128 x i32> - %6 = fptosi <128 x float> %4 to <128 x i32> - %7 = or <128 x i32> %5, %6 - store <128 x i32> %7, <128 x i32>* %c - ret void -} +; RUN: llc -O3 -disable-peephole -mcpu=corei7-avx -mattr=+avx < %s | FileCheck %s + +target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-unknown-unknown" + +; Stack reload folding tests - we use the 'big vectors' pattern to guarantee spilling to stack. +; +; Many of these tests are primarily to check memory folding with specific instructions. Using a basic +; load/cvt/store pattern to test for this would mean that it wouldn't be the memory folding code thats +; being tested - the load-execute version of the instruction from the tables would be matched instead. + +define void @stack_fold_vmulpd(<64 x double>* %a, <64 x double>* %b, <64 x double>* %c) { + ;CHECK-LABEL: stack_fold_vmulpd + ;CHECK: vmulpd {{[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload + + %1 = load <64 x double>* %a + %2 = load <64 x double>* %b + %3 = fadd <64 x double> %1, %2 + %4 = fsub <64 x double> %1, %2 + %5 = fmul <64 x double> %3, %4 + store <64 x double> %5, <64 x double>* %c + ret void +} + +define void @stack_fold_cvtdq2ps(<128 x i32>* %a, <128 x i32>* %b, <128 x float>* %c) { + ;CHECK-LABEL: stack_fold_cvtdq2ps + ;CHECK: vcvtdq2ps {{[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload + + %1 = load <128 x i32>* %a + %2 = load <128 x i32>* %b + %3 = and <128 x i32> %1, %2 + %4 = xor <128 x i32> %1, %2 + %5 = sitofp <128 x i32> %3 to <128 x float> + %6 = sitofp <128 x i32> %4 to <128 x float> + %7 = fadd <128 x float> %5, %6 + store <128 x float> %7, <128 x float>* %c + ret void +} + +define void @stack_fold_cvtpd2ps(<128 x double>* %a, <128 x double>* %b, <128 x float>* %c) { + ;CHECK-LABEL: stack_fold_cvtpd2ps + ;CHECK: vcvtpd2psy {{[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload + + %1 = load <128 x double>* %a + %2 = load <128 x double>* %b + %3 = fadd <128 x double> %1, %2 + %4 = fsub <128 x double> %1, %2 + %5 = fptrunc <128 x double> %3 to <128 x float> + %6 = fptrunc <128 x double> %4 to <128 x float> + %7 = fadd <128 x float> %5, %6 + store <128 x float> %7, <128 x float>* %c + ret void +} + +define void @stack_fold_cvttpd2dq(<64 x double>* %a, <64 x double>* %b, <64 x i32>* %c) #0 { + ;CHECK-LABEL: stack_fold_cvttpd2dq + ;CHECK: vcvttpd2dqy {{[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload + + %1 = load <64 x double>* %a + %2 = load <64 x double>* %b + %3 = fadd <64 x double> %1, %2 + %4 = fsub <64 x double> %1, %2 + %5 = fptosi <64 x double> %3 to <64 x i32> + %6 = fptosi <64 x double> %4 to <64 x i32> + %7 = or <64 x i32> %5, %6 + store <64 x i32> %7, <64 x i32>* %c + ret void +} + +define void @stack_fold_cvttps2dq(<128 x float>* %a, <128 x float>* %b, <128 x i32>* %c) #0 { + ;CHECK-LABEL: stack_fold_cvttps2dq + ;CHECK: vcvttps2dq {{[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload + + %1 = load <128 x float>* %a + %2 = load <128 x float>* %b + %3 = fadd <128 x float> %1, %2 + %4 = fsub <128 x float> %1, %2 + %5 = fptosi <128 x float> %3 to <128 x i32> + %6 = fptosi <128 x float> %4 to <128 x i32> + %7 = or <128 x i32> %5, %6 + store <128 x i32> %7, <128 x i32>* %c + ret void +}