mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2025-07-25 13:24:46 +00:00
Fix memory access lowering on SPU, adding
support for the case where alignment<value size. These cases were silently miscompiled before this patch. Now they are overly verbose -especially storing is- and any front-end should still avoid misaligned memory accesses as much as possible. The bit juggling algorithm added here probably has some room for improvement still. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@118889 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
@@ -14,6 +14,7 @@
|
||||
; RUN: grep iohl %t1.s | count 8
|
||||
; RUN: grep shufb %t1.s | count 15
|
||||
; RUN: grep frds %t1.s | count 1
|
||||
; RUN: llc < %s -march=cellspu | FileCheck %s
|
||||
|
||||
; ModuleID = 'stores.bc'
|
||||
target datalayout = "E-p:32:32:128-f64:64:128-f32:32:128-i64:32:128-i32:32:128-i16:16:128-i8:8:128-i1:8:128-a0:0:128-v128:128:128-s0:128:128"
|
||||
@@ -149,3 +150,15 @@ entry:
|
||||
store float %conv, float* %dest
|
||||
ret float %conv
|
||||
}
|
||||
|
||||
;Check stores that might span two 16 byte memory blocks
|
||||
define void @store_misaligned( i32 %val, i32* %ptr) {
|
||||
;CHECK: store_misaligned
|
||||
;CHECK: lqd
|
||||
;CHECK: lqd
|
||||
;CHECK: stqd
|
||||
;CHECK: stqd
|
||||
;CHECK: bi $lr
|
||||
store i32 %val, i32*%ptr, align 2
|
||||
ret void
|
||||
}
|
||||
|
Reference in New Issue
Block a user