From 7cfb6d373a8062f3ade6fb42b7890dae5233619c Mon Sep 17 00:00:00 2001 From: Bob Wilson Date: Tue, 16 Mar 2010 05:33:29 +0000 Subject: [PATCH] Add a testcase for the change in r98586. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@98610 91177308-0d34-0410-b5e6-96231b3b80d8 --- .../CodeGen/Thumb2/2010-03-15-AsmCCClobber.ll | 63 +++++++++++++++++++ 1 file changed, 63 insertions(+) create mode 100644 test/CodeGen/Thumb2/2010-03-15-AsmCCClobber.ll diff --git a/test/CodeGen/Thumb2/2010-03-15-AsmCCClobber.ll b/test/CodeGen/Thumb2/2010-03-15-AsmCCClobber.ll new file mode 100644 index 00000000000..71ff68aabeb --- /dev/null +++ b/test/CodeGen/Thumb2/2010-03-15-AsmCCClobber.ll @@ -0,0 +1,63 @@ +; RUN: llc < %s -mtriple=thumbv7-apple-darwin -mcpu=cortex-a8 | FileCheck %s +; Radar 7459078 +target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:32-f32:32:32-f64:32:32-v64:64:64-v128:128:128-a0:0:32-n32" + +%0 = type { i32, i32 } +%s1 = type { %s3, i32, %s4, i8*, void (i8*, i8*)*, i8*, i32*, i32*, i32*, i32, i64, [1 x i32] } +%s2 = type { i32 (...)**, %s4 } +%s3 = type { %s2, i32, i32, i32*, [4 x i8], float, %s4, i8*, i8* } +%s4 = type { %s5 } +%s5 = type { i32 } + +; Make sure the cmp is not scheduled before the InlineAsm that clobbers cc. +; CHECK: InlineAsm End +; CHECK: cmp +; CHECK: beq +define arm_apcscc void @test(%s1* %this, i32 %format, i32 %w, i32 %h, i32 %levels, i32* %s, i8* %data, i32* nocapture %rowbytes, void (i8*, i8*)* %release, i8* %info) nounwind { +entry: + %tmp1 = getelementptr inbounds %s1* %this, i32 0, i32 0, i32 0, i32 1, i32 0, i32 0 + volatile store i32 1, i32* %tmp1, align 4 + %tmp12 = getelementptr inbounds %s1* %this, i32 0, i32 1 + store i32 %levels, i32* %tmp12, align 4 + %tmp13 = getelementptr inbounds %s1* %this, i32 0, i32 3 + store i8* %data, i8** %tmp13, align 4 + %tmp14 = getelementptr inbounds %s1* %this, i32 0, i32 4 + store void (i8*, i8*)* %release, void (i8*, i8*)** %tmp14, align 4 + %tmp15 = getelementptr inbounds %s1* %this, i32 0, i32 5 + store i8* %info, i8** %tmp15, align 4 + %tmp16 = getelementptr inbounds %s1* %this, i32 0, i32 6 + store i32* null, i32** %tmp16, align 4 + %tmp17 = getelementptr inbounds %s1* %this, i32 0, i32 7 + store i32* null, i32** %tmp17, align 4 + %tmp19 = getelementptr inbounds %s1* %this, i32 0, i32 10 + store i64 0, i64* %tmp19, align 4 + %tmp20 = getelementptr inbounds %s1* %this, i32 0, i32 0 + tail call arm_apcscc void @f1(%s3* %tmp20, i32* %s) nounwind + %tmp21 = shl i32 %format, 6 + %tmp22 = tail call arm_apcscc zeroext i8 @f2(i32 %format) nounwind + %toBoolnot = icmp eq i8 %tmp22, 0 + %tmp23 = zext i1 %toBoolnot to i32 + %flags.0 = or i32 %tmp23, %tmp21 + %tmp24 = shl i32 %flags.0, 16 + %asmtmp.i.i.i = tail call %0 asm sideeffect "\0A0:\09ldrex $1, [$2]\0A\09orr $1, $1, $3\0A\09strex $0, $1, [$2]\0A\09cmp $0, #0\0A\09bne 0b", "=&r,=&r,r,r,~{memory},~{cc}"(i32* %tmp1, i32 %tmp24) nounwind + %tmp25 = getelementptr inbounds %s1* %this, i32 0, i32 2, i32 0, i32 0 + volatile store i32 1, i32* %tmp25, align 4 + %tmp26 = icmp eq i32 %levels, 0 + br i1 %tmp26, label %return, label %bb4 + +bb4: + %l.09 = phi i32 [ %tmp28, %bb4 ], [ 0, %entry ] + %scevgep = getelementptr %s1* %this, i32 0, i32 11, i32 %l.09 + %scevgep10 = getelementptr i32* %rowbytes, i32 %l.09 + %tmp27 = load i32* %scevgep10, align 4 + store i32 %tmp27, i32* %scevgep, align 4 + %tmp28 = add i32 %l.09, 1 + %exitcond = icmp eq i32 %tmp28, %levels + br i1 %exitcond, label %return, label %bb4 + +return: + ret void +} + +declare arm_apcscc void @f1(%s3*, i32*) +declare arm_apcscc zeroext i8 @f2(i32)