mirror of
				https://github.com/c64scene-ar/llvm-6502.git
				synced 2025-11-03 14:21:30 +00:00 
			
		
		
		
	The bad sorting caused a misaligned basic block when building 176.vpr in ARM mode. <rdar://problem/10594653> git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@146767 91177308-0d34-0410-b5e6-96231b3b80d8
		
			
				
	
	
		
			27 lines
		
	
	
		
			845 B
		
	
	
	
		
			LLVM
		
	
	
	
	
	
			
		
		
	
	
			27 lines
		
	
	
		
			845 B
		
	
	
	
		
			LLVM
		
	
	
	
	
	
; RUN: llc < %s -mcpu=cortex-a8 | FileCheck %s
 | 
						|
target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:32:64-v128:32:128-a0:0:32-n32-S32"
 | 
						|
target triple = "thumbv7-apple-ios"
 | 
						|
 | 
						|
; The double in the constant pool is 8-byte aligned, forcing the function
 | 
						|
; alignment.
 | 
						|
; CHECK: .align 3
 | 
						|
; CHECK: func
 | 
						|
;
 | 
						|
; Constant pool with 8-byte entry before 4-byte entry:
 | 
						|
; CHECK: .align 3
 | 
						|
; CHECK: LCPI
 | 
						|
; CHECK:	.long	2370821947
 | 
						|
; CHECK:	.long	1080815255
 | 
						|
; CHECK: LCPI
 | 
						|
; CHECK:	.long	1123477881
 | 
						|
define void @func(float* nocapture %x, double* nocapture %y) nounwind ssp {
 | 
						|
entry:
 | 
						|
  %0 = load float* %x, align 4
 | 
						|
  %add = fadd float %0, 0x405EDD2F20000000
 | 
						|
  store float %add, float* %x, align 4
 | 
						|
  %1 = load double* %y, align 4
 | 
						|
  %add1 = fadd double %1, 2.234560e+02
 | 
						|
  store double %add1, double* %y, align 4
 | 
						|
  ret void
 | 
						|
}
 |