mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-12-27 13:30:05 +00:00
73f8eff19e
The loop vectorizer preserves wrapping, exact, and fast-math properties of scalar instructions. This patch adds a convenience method to make that operation easier because we need to do this in the loop vectorizer, SLP vectorizer, and possibly other places. Although this is a 'no functional change' patch, I've added a testcase to verify that the exact flag is preserved by the loop vectorizer. The wrapping and fast-math flags are already checked in existing testcases. Differential Revision: http://reviews.llvm.org/D5138 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@216886 91177308-0d34-0410-b5e6-96231b3b80d8
25 lines
682 B
LLVM
25 lines
682 B
LLVM
; RUN: opt < %s -loop-vectorize -force-vector-width=4 -S | FileCheck %s
|
|
|
|
target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
|
|
target triple = "x86_64-apple-macosx10.9.0"
|
|
|
|
; CHECK-LABEL: @lshr_exact(
|
|
; CHECK: lshr exact <4 x i32>
|
|
define void @lshr_exact(i32* %x) {
|
|
entry:
|
|
br label %for.body
|
|
|
|
for.body:
|
|
%iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
|
|
%arrayidx = getelementptr inbounds i32* %x, i64 %iv
|
|
%0 = load i32* %arrayidx, align 4
|
|
%conv1 = lshr exact i32 %0, 1
|
|
store i32 %conv1, i32* %arrayidx, align 4
|
|
%iv.next = add nuw nsw i64 %iv, 1
|
|
%exitcond = icmp eq i64 %iv.next, 256
|
|
br i1 %exitcond, label %for.end, label %for.body
|
|
|
|
for.end:
|
|
ret void
|
|
}
|