mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-12-13 04:30:23 +00:00
Optimize alignment of loads and stores.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@40102 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
9bc5dce98d
commit
9941f7426d
@ -8790,6 +8790,11 @@ static bool isSafeToLoadUnconditionally(Value *V, Instruction *ScanFrom) {
|
||||
Instruction *InstCombiner::visitLoadInst(LoadInst &LI) {
|
||||
Value *Op = LI.getOperand(0);
|
||||
|
||||
// Attempt to improve the alignment.
|
||||
unsigned KnownAlign = GetKnownAlignment(Op, TD);
|
||||
if (KnownAlign > LI.getAlignment())
|
||||
LI.setAlignment(KnownAlign);
|
||||
|
||||
// load (cast X) --> cast (load X) iff safe
|
||||
if (isa<CastInst>(Op))
|
||||
if (Instruction *Res = InstCombineLoadCast(*this, LI))
|
||||
@ -8985,6 +8990,11 @@ Instruction *InstCombiner::visitStoreInst(StoreInst &SI) {
|
||||
}
|
||||
}
|
||||
|
||||
// Attempt to improve the alignment.
|
||||
unsigned KnownAlign = GetKnownAlignment(Ptr, TD);
|
||||
if (KnownAlign > SI.getAlignment())
|
||||
SI.setAlignment(KnownAlign);
|
||||
|
||||
// Do really simple DSE, to catch cases where there are several consequtive
|
||||
// stores to the same location, separated by a few arithmetic operations. This
|
||||
// situation often occurs with bitfield accesses.
|
||||
|
66
test/Transforms/InstCombine/loadstore-alignment.ll
Normal file
66
test/Transforms/InstCombine/loadstore-alignment.ll
Normal file
@ -0,0 +1,66 @@
|
||||
; RUN: llvm-as < %s | opt -instcombine | llvm-dis | grep {, align 16} | wc -l | grep 12
|
||||
|
||||
@x = external global <2 x i64>, align 16
|
||||
@xx = external global [13 x <2 x i64>], align 16
|
||||
|
||||
define <2 x i64> @static_hem() {
|
||||
%t = getelementptr <2 x i64>* @x, i32 7
|
||||
%tmp1 = load <2 x i64>* %t, align 1
|
||||
ret <2 x i64> %tmp1
|
||||
}
|
||||
|
||||
define <2 x i64> @hem(i32 %i) {
|
||||
%t = getelementptr <2 x i64>* @x, i32 %i
|
||||
%tmp1 = load <2 x i64>* %t, align 1
|
||||
ret <2 x i64> %tmp1
|
||||
}
|
||||
|
||||
define <2 x i64> @hem_2d(i32 %i, i32 %j) {
|
||||
%t = getelementptr [13 x <2 x i64>]* @xx, i32 %i, i32 %j
|
||||
%tmp1 = load <2 x i64>* %t, align 1
|
||||
ret <2 x i64> %tmp1
|
||||
}
|
||||
|
||||
define <2 x i64> @foo() {
|
||||
%tmp1 = load <2 x i64>* @x, align 1
|
||||
ret <2 x i64> %tmp1
|
||||
}
|
||||
|
||||
define <2 x i64> @bar() {
|
||||
%t = alloca <2 x i64>
|
||||
call void @kip(<2 x i64>* %t);
|
||||
%tmp1 = load <2 x i64>* %t, align 1
|
||||
ret <2 x i64> %tmp1
|
||||
}
|
||||
|
||||
define void @static_hem_store(<2 x i64> %y) {
|
||||
%t = getelementptr <2 x i64>* @x, i32 7
|
||||
store <2 x i64> %y, <2 x i64>* %t, align 1
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @hem_store(i32 %i, <2 x i64> %y) {
|
||||
%t = getelementptr <2 x i64>* @x, i32 %i
|
||||
store <2 x i64> %y, <2 x i64>* %t, align 1
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @hem_2d_store(i32 %i, i32 %j, <2 x i64> %y) {
|
||||
%t = getelementptr [13 x <2 x i64>]* @xx, i32 %i, i32 %j
|
||||
store <2 x i64> %y, <2 x i64>* %t, align 1
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @foo_store(<2 x i64> %y) {
|
||||
store <2 x i64> %y, <2 x i64>* @x, align 1
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @bar_store(<2 x i64> %y) {
|
||||
%t = alloca <2 x i64>
|
||||
call void @kip(<2 x i64>* %t);
|
||||
store <2 x i64> %y, <2 x i64>* %t, align 1
|
||||
ret void
|
||||
}
|
||||
|
||||
declare void @kip(<2 x i64>* %t)
|
Loading…
Reference in New Issue
Block a user