mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-12-15 04:30:12 +00:00
24517d023f
The current memory-instruction optimization logic in CGP, which sinks parts of the address computation that can be adsorbed by the addressing mode, does this by explicitly converting the relevant part of the address computation into IR-level integer operations (making use of ptrtoint and inttoptr). For most targets this is currently not a problem, but for targets wishing to make use of IR-level aliasing analysis during CodeGen, the use of ptrtoint/inttoptr is a problem for two reasons: 1. BasicAA becomes less powerful in the face of the ptrtoint/inttoptr 2. In cases where type-punning was used, and BasicAA was used to override TBAA, BasicAA may no longer do so. (this had forced us to disable all use of TBAA in CodeGen; something which we can now enable again) This (use of GEPs instead of ptrtoint/inttoptr) is not currently enabled by default (except for those targets that use AA during CodeGen), and so aside from some PowerPC subtargets and SystemZ, there should be no change in behavior. We may be able to switch completely away from the ptrtoint/inttoptr sinking on all targets, but further testing is required. I've doubled-up on a number of existing tests that are sensitive to the address sinking behavior (including some store-merging tests that are sensitive to the order of the resulting ADD operations at the SDAG level). git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@206092 91177308-0d34-0410-b5e6-96231b3b80d8
68 lines
2.2 KiB
LLVM
68 lines
2.2 KiB
LLVM
; RUN: llc -mcpu=g5 < %s | FileCheck %s
|
|
; RUN: llc -mcpu=g5 -addr-sink-using-gep=1 < %s | FileCheck %s
|
|
;; Formerly crashed, see PR 1508
|
|
target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f128:64:128"
|
|
target triple = "powerpc64-apple-darwin8"
|
|
%struct.Range = type { i64, i64 }
|
|
|
|
; CHECK: .cfi_startproc
|
|
; CHECK: .cfi_personality 155, L___gxx_personality_v0$non_lazy_ptr
|
|
; CHECK: .cfi_lsda 16, Lexception0
|
|
; CHECK: .cfi_def_cfa_offset 176
|
|
; CHECK: .cfi_offset r31, -8
|
|
; CHECK: .cfi_offset lr, 16
|
|
; CHECK: .cfi_def_cfa_register r31
|
|
; CHECK: .cfi_offset r27, -16
|
|
; CHECK: .cfi_offset r28, -24
|
|
; CHECK: .cfi_offset r29, -32
|
|
; CHECK: .cfi_offset r30, -40
|
|
; CHECK: .cfi_endproc
|
|
|
|
|
|
define void @Bork(i64 %range.0.0, i64 %range.0.1, i64 %size) {
|
|
entry:
|
|
%effectiveRange = alloca %struct.Range, align 8 ; <%struct.Range*> [#uses=2]
|
|
%tmp4 = call i8* @llvm.stacksave() ; <i8*> [#uses=1]
|
|
%size1 = trunc i64 %size to i32 ; <i32> [#uses=1]
|
|
%tmp17 = alloca i8*, i32 %size1 ; <i8**> [#uses=1]
|
|
invoke void @Foo(i8** %tmp17)
|
|
to label %bb30.preheader unwind label %unwind
|
|
|
|
bb30.preheader: ; preds = %entry
|
|
%tmp26 = getelementptr %struct.Range* %effectiveRange, i64 0, i32 1 ; <i64*> [#uses=1]
|
|
br label %bb30
|
|
|
|
unwind: ; preds = %cond_true, %entry
|
|
%exn = landingpad {i8*, i32} personality i32 (...)* @__gxx_personality_v0
|
|
catch i8* null
|
|
call void @llvm.stackrestore(i8* %tmp4)
|
|
resume { i8*, i32 } %exn
|
|
|
|
invcont23: ; preds = %cond_true
|
|
%tmp27 = load i64* %tmp26, align 8 ; <i64> [#uses=1]
|
|
%tmp28 = sub i64 %range_addr.1.0, %tmp27 ; <i64> [#uses=1]
|
|
br label %bb30
|
|
|
|
bb30: ; preds = %invcont23, %bb30.preheader
|
|
%range_addr.1.0 = phi i64 [ %tmp28, %invcont23 ], [ %range.0.1, %bb30.preheader ] ; <i64> [#uses=2]
|
|
%tmp33 = icmp eq i64 %range_addr.1.0, 0 ; <i1> [#uses=1]
|
|
br i1 %tmp33, label %cleanup, label %cond_true
|
|
|
|
cond_true: ; preds = %bb30
|
|
invoke void @Bar(i64 %range.0.0, %struct.Range* %effectiveRange)
|
|
to label %invcont23 unwind label %unwind
|
|
|
|
cleanup: ; preds = %bb30
|
|
ret void
|
|
}
|
|
|
|
declare i8* @llvm.stacksave() nounwind
|
|
|
|
declare void @Foo(i8**)
|
|
|
|
declare void @Bar(i64, %struct.Range*)
|
|
|
|
declare void @llvm.stackrestore(i8*) nounwind
|
|
|
|
declare i32 @__gxx_personality_v0(...)
|