mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2025-09-26 09:18:56 +00:00
ARM: cortex-m0 doesn't support unaligned memory access.
Unlike other v6+ processors, cortex-m0 never supports unaligned accesses. From the v6m ARM ARM: "A3.2 Alignment support: ARMv6-M always generates a fault when an unaligned access occurs." rdar://16491560 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@205452 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
@@ -236,7 +236,7 @@ void ARMSubtarget::resetSubtargetFeatures(StringRef CPU, StringRef FS) {
|
|||||||
//
|
//
|
||||||
// ARMv6 may or may not support unaligned accesses depending on the
|
// ARMv6 may or may not support unaligned accesses depending on the
|
||||||
// SCTLR.U bit, which is architecture-specific. We assume ARMv6
|
// SCTLR.U bit, which is architecture-specific. We assume ARMv6
|
||||||
// Darwin targets support unaligned accesses, and others don't.
|
// Darwin and NetBSD targets support unaligned accesses, and others don't.
|
||||||
//
|
//
|
||||||
// ARMv7 always has SCTLR.U set to 1, but it has a new SCTLR.A bit
|
// ARMv7 always has SCTLR.U set to 1, but it has a new SCTLR.A bit
|
||||||
// which raises an alignment fault on unaligned accesses. Linux
|
// which raises an alignment fault on unaligned accesses. Linux
|
||||||
@@ -249,6 +249,11 @@ void ARMSubtarget::resetSubtargetFeatures(StringRef CPU, StringRef FS) {
|
|||||||
(hasV7Ops() && (isTargetLinux() || isTargetNaCl() ||
|
(hasV7Ops() && (isTargetLinux() || isTargetNaCl() ||
|
||||||
isTargetNetBSD())) ||
|
isTargetNetBSD())) ||
|
||||||
(hasV6Ops() && (isTargetMachO() || isTargetNetBSD()));
|
(hasV6Ops() && (isTargetMachO() || isTargetNetBSD()));
|
||||||
|
// The one exception is cortex-m0, which despite being v6, does not
|
||||||
|
// support unaligned accesses. Rather than make the above boolean
|
||||||
|
// expression even more obtuse, just override the value here.
|
||||||
|
if (isThumb1Only() && isMClass())
|
||||||
|
AllowsUnalignedMem = false;
|
||||||
break;
|
break;
|
||||||
case StrictAlign:
|
case StrictAlign:
|
||||||
AllowsUnalignedMem = false;
|
AllowsUnalignedMem = false;
|
||||||
|
13
test/CodeGen/Thumb/cortex-m0-unaligned-access.ll
Normal file
13
test/CodeGen/Thumb/cortex-m0-unaligned-access.ll
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
; RUN: llc -mtriple=thumbv6m-apple-unknown-macho < %s | FileCheck --check-prefix=V6M %s
|
||||||
|
; RUN: llc -mtriple=thumbv7m-apple-unknown-macho < %s | FileCheck --check-prefix=V7M %s
|
||||||
|
|
||||||
|
define i32 @split_load(i32* %p) nounwind {
|
||||||
|
; V6M-LABEL: split_load
|
||||||
|
; V6M: ldrh
|
||||||
|
; V6M: ldrh
|
||||||
|
; V7M-LABEL: split_load
|
||||||
|
; V7M-NOT: ldrh
|
||||||
|
; V7M: bx lr
|
||||||
|
%val = load i32* %p, align 2
|
||||||
|
ret i32 %val
|
||||||
|
}
|
Reference in New Issue
Block a user