mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-10-13 06:25:02 +00:00
cdd8e46bec
On spec/gcc, this caused a codesize improvement of ~1.9% for ARM mode and ~4.9% for Thumb(2) mode. This is codesize including literal pools. The pools themselves doubled in size for ARM mode and quintupled for Thumb mode, leaving suggestion that there is still perhaps redundancy in LLVM's use of constant pools that could be decreased by sharing entries. Fixes PR11087. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@142530 91177308-0d34-0410-b5e6-96231b3b80d8
28 lines
698 B
LLVM
28 lines
698 B
LLVM
; RUN: llc < %s -mtriple=armv7-apple-darwin | FileCheck %s
|
|
; RUN: llc < %s -mtriple=armv7-unknown-linux-eabi | FileCheck %s
|
|
|
|
; Check that when optimizing for size, a literal pool load is used
|
|
; instead of the (potentially faster) movw/movt pair when loading
|
|
; a large constant.
|
|
|
|
@x = global i32* inttoptr (i32 305419888 to i32*), align 4
|
|
|
|
define i32 @f() optsize {
|
|
; CHECK: f:
|
|
; CHECK: ldr r{{.}}, {{.?}}LCPI{{.}}_{{.}}
|
|
; CHECK: ldr r{{.}}, [{{(pc, )?}}r{{.}}]
|
|
; CHECK: ldr r{{.}}, [r{{.}}]
|
|
%1 = load i32** @x, align 4
|
|
%2 = load i32* %1
|
|
ret i32 %2
|
|
}
|
|
|
|
define i32 @g() {
|
|
; CHECK: g:
|
|
; CHECK: movw
|
|
; CHECK: movt
|
|
%1 = load i32** @x, align 4
|
|
%2 = load i32* %1
|
|
ret i32 %2
|
|
}
|