mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-12-13 20:32:21 +00:00
7c9c6ed761
Essentially the same as the GEP change in r230786. A similar migration script can be used to update test cases, though a few more test case improvements/changes were required this time around: (r229269-r229278) import fileinput import sys import re pat = re.compile(r"((?:=|:|^)\s*load (?:atomic )?(?:volatile )?(.*?))(| addrspace\(\d+\) *)\*($| *(?:%|@|null|undef|blockaddress|getelementptr|addrspacecast|bitcast|inttoptr|\[\[[a-zA-Z]|\{\{).*$)") for line in sys.stdin: sys.stdout.write(re.sub(pat, r"\1, \2\3*\4", line)) Reviewers: rafael, dexonsmith, grosser Differential Revision: http://reviews.llvm.org/D7649 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@230794 91177308-0d34-0410-b5e6-96231b3b80d8
33 lines
725 B
LLVM
33 lines
725 B
LLVM
; RUN: opt -S -Oz %s | FileCheck %s
|
|
|
|
@a = global i32 4
|
|
|
|
define i1 @inner() {
|
|
%a1 = load volatile i32, i32* @a
|
|
%x1 = add i32 %a1, %a1
|
|
%c = icmp eq i32 %x1, 0
|
|
|
|
; Here are enough instructions to prevent inlining, but because they are used
|
|
; only by the @llvm.assume intrinsic, they're free (and, thus, inlining will
|
|
; still happen).
|
|
%a2 = mul i32 %a1, %a1
|
|
%a3 = sub i32 %a1, 5
|
|
%a4 = udiv i32 %a3, -13
|
|
%a5 = mul i32 %a4, %a4
|
|
%a6 = add i32 %a5, %x1
|
|
%ca = icmp sgt i32 %a6, -7
|
|
tail call void @llvm.assume(i1 %ca)
|
|
|
|
ret i1 %c
|
|
}
|
|
|
|
; @inner() should be inlined for -Oz.
|
|
; CHECK-NOT: call i1 @inner
|
|
define i1 @outer() optsize {
|
|
%r = call i1 @inner()
|
|
ret i1 %r
|
|
}
|
|
|
|
declare void @llvm.assume(i1) nounwind
|
|
|