mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-12-16 11:30:51 +00:00
41b33299cf
This patch is a follow up to r211040 & r211052. Rather than bailing out of fast isel this patch will generate an alternate instruction (movabsq) instead of the leaq. While this will always have enough room to handle the 64 bit displacment it is generally over kill for internal symbols (most displacements will be within 32 bits) but since we have no way of communicating the code model to the the assmebler in order to avoid flagging an absolute leal/leaq as illegal when using a symbolic displacement. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@211130 91177308-0d34-0410-b5e6-96231b3b80d8
25 lines
822 B
LLVM
25 lines
822 B
LLVM
; RUN: llc -mtriple=x86_64-pc-win32-macho -relocation-model=static -O0 < %s | FileCheck %s
|
|
|
|
; Ensure that we don't generate a movl and not a lea for a static relocation
|
|
; when compiling for 64 bit.
|
|
|
|
%struct.MatchInfo = type [64 x i64]
|
|
|
|
@NO_MATCH = internal constant %struct.MatchInfo zeroinitializer, align 8
|
|
|
|
define void @setup() {
|
|
%pending = alloca %struct.MatchInfo, align 8
|
|
%t = bitcast %struct.MatchInfo* %pending to i8*
|
|
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %t, i8* bitcast (%struct.MatchInfo* @NO_MATCH to i8*), i64 512, i32 8, i1 false)
|
|
%u = getelementptr inbounds %struct.MatchInfo* %pending, i32 0, i32 2
|
|
%v = load i64* %u, align 8
|
|
br label %done
|
|
done:
|
|
ret void
|
|
|
|
; CHECK: movabsq $_NO_MATCH, {{.*}}
|
|
}
|
|
|
|
; Function Attrs: nounwind
|
|
declare void @llvm.memcpy.p0i8.p0i8.i64(i8*, i8*, i64, i32, i1)
|