llvm-6502/test/CodeGen/ARM/2012-11-14-subs_carry.ll
Tim Northover d0dbe02fd2 ARM & AArch64: make use of common cmpxchg idioms after expansion
The C and C++ semantics for compare_exchange require it to return a bool
indicating success. This gets mapped to LLVM IR which follows each cmpxchg with
an icmp of the value loaded against the desired value.

When lowered to ldxr/stxr loops, this extra comparison is redundant: its
results are implicit in the control-flow of the function.

This commit makes two changes: it replaces that icmp with appropriate PHI
nodes, and then makes sure earlyCSE is called after expansion to actually make
use of the opportunities revealed.

I've also added -{arm,aarch64}-enable-atomic-tidy options, so that
existing fragile tests aren't perturbed too much by the change. Many
of them either rely on undef/unreachable too pervasively to be
restored to something well-defined (particularly while making sure
they test the same obscure assert from many years ago), or depend on a
particular CFG shape, which is disrupted by SimplifyCFG.

rdar://problem/16227836

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@209883 91177308-0d34-0410-b5e6-96231b3b80d8
2014-05-30 10:09:59 +00:00

32 lines
655 B
LLVM

; RUN: llc < %s -mtriple=thumbv7-apple-ios -arm-atomic-cfg-tidy=0 | FileCheck %s
;CHECK-LABEL: foo:
;CHECK: adds
;CHECK-NEXT: adc
;CHECK-NEXT: bx
;rdar://12028498
define i32 @foo() nounwind ssp {
entry:
%tmp2 = zext i32 3 to i64
br label %bug_block
bug_block:
%tmp410 = and i64 1031, 1647010
%tmp411 = and i64 %tmp2, -211
%tmp412 = shl i64 %tmp410, %tmp2
%tmp413 = shl i64 %tmp411, %tmp2
%tmp415 = and i64 %tmp413, 1
%tmp420 = xor i64 0, %tmp415
%tmp421 = and i64 %tmp412, %tmp415
%tmp422 = shl i64 %tmp421, 1
br label %finish
finish:
%tmp423 = lshr i64 %tmp422, 32
%tmp424 = trunc i64 %tmp423 to i32
ret i32 %tmp424
}