[mips] Implement NaCl sandboxing of loads, stores and SP changes:

* Add masking instructions before loads and stores (in MC layer).
  * Add masking instructions after SP changes (in MC layer).
  * Forbid loads, stores and SP changes in delay slots (in MI layer).

Differential Revision: http://llvm-reviews.chandlerc.com/D2904


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@203484 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Sasa Stankovic 2014-03-10 20:34:23 +00:00
parent c32c110b90
commit 754aaee387
5 changed files with 385 additions and 5 deletions

View File

@ -17,6 +17,10 @@ namespace llvm {
// Log2 of the NaCl MIPS sandbox's instruction bundle size.
static const unsigned MIPS_NACL_BUNDLE_ALIGN = 4u;
bool isBasePlusOffsetMemoryAccess(unsigned Opcode, unsigned *AddrIdx,
bool *IsStore = NULL);
bool baseRegNeedsLoadStoreMask(unsigned Reg);
// This function creates an MCELFStreamer for Mips NaCl.
MCELFStreamer *createMipsNaClELFStreamer(MCContext &Context, MCAsmBackend &TAB,
raw_ostream &OS,

View File

@ -9,8 +9,10 @@
//
// This file implements MCELFStreamer for Mips NaCl. It emits .o object files
// as required by NaCl's SFI sandbox. It inserts address-masking instructions
// before dangerous control-flow instructions. It aligns on bundle size all
// functions and all targets of indirect branches.
// before dangerous control-flow and memory access instructions. It inserts
// address-masking instructions after instructions that change the stack
// pointer. It ensures that the mask and the dangerous instruction are always
// emitted in the same bundle.
//
//===----------------------------------------------------------------------===//
@ -25,6 +27,7 @@ using namespace llvm;
namespace {
const unsigned IndirectBranchMaskReg = Mips::T6;
const unsigned LoadStoreStackMaskReg = Mips::T7;
/// Extend the generic MCELFStreamer class so that it can mask dangerous
/// instructions.
@ -42,6 +45,11 @@ private:
return MI.getOpcode() == Mips::JR || MI.getOpcode() == Mips::RET;
}
bool isStackPointerFirstOperand(const MCInst &MI) {
return (MI.getNumOperands() > 0 && MI.getOperand(0).isReg()
&& MI.getOperand(0).getReg() == Mips::SP);
}
void emitMask(unsigned AddrReg, unsigned MaskReg,
const MCSubtargetInfo &STI) {
MCInst MaskInst;
@ -63,14 +71,57 @@ private:
EmitBundleUnlock();
}
// Sandbox memory access or SP change. Insert mask operation before and/or
// after the instruction.
void sandboxLoadStoreStackChange(const MCInst &MI, unsigned AddrIdx,
const MCSubtargetInfo &STI, bool MaskBefore,
bool MaskAfter) {
EmitBundleLock(false);
if (MaskBefore) {
// Sandbox memory access.
unsigned BaseReg = MI.getOperand(AddrIdx).getReg();
emitMask(BaseReg, LoadStoreStackMaskReg, STI);
}
MCELFStreamer::EmitInstruction(MI, STI);
if (MaskAfter) {
// Sandbox SP change.
unsigned SPReg = MI.getOperand(0).getReg();
assert((Mips::SP == SPReg) && "Unexpected stack-pointer register.");
emitMask(SPReg, LoadStoreStackMaskReg, STI);
}
EmitBundleUnlock();
}
public:
/// This function is the one used to emit instruction data into the ELF
/// streamer. We override it to mask dangerous instructions.
virtual void EmitInstruction(const MCInst &Inst, const MCSubtargetInfo &STI) {
if (isIndirectJump(Inst))
// Sandbox indirect jumps.
if (isIndirectJump(Inst)) {
sandboxIndirectJump(Inst, STI);
else
MCELFStreamer::EmitInstruction(Inst, STI);
return;
}
// Sandbox loads, stores and SP changes.
unsigned AddrIdx;
bool IsStore;
bool IsMemAccess = isBasePlusOffsetMemoryAccess(Inst.getOpcode(), &AddrIdx,
&IsStore);
bool IsSPFirstOperand = isStackPointerFirstOperand(Inst);
if (IsMemAccess || IsSPFirstOperand) {
bool MaskBefore = (IsMemAccess
&& baseRegNeedsLoadStoreMask(Inst.getOperand(AddrIdx)
.getReg()));
bool MaskAfter = IsSPFirstOperand && !IsStore;
if (MaskBefore || MaskAfter)
sandboxLoadStoreStackChange(Inst, AddrIdx, STI, MaskBefore, MaskAfter);
else
MCELFStreamer::EmitInstruction(Inst, STI);
return;
}
// None of the sandboxing applies, just emit the instruction.
MCELFStreamer::EmitInstruction(Inst, STI);
}
};
@ -78,6 +129,56 @@ public:
namespace llvm {
bool isBasePlusOffsetMemoryAccess(unsigned Opcode, unsigned *AddrIdx,
bool *IsStore) {
if (IsStore)
*IsStore = false;
switch (Opcode) {
default:
return false;
// Load instructions with base address register in position 1.
case Mips::LB:
case Mips::LBu:
case Mips::LH:
case Mips::LHu:
case Mips::LW:
case Mips::LWC1:
case Mips::LDC1:
case Mips::LL:
case Mips::LWL:
case Mips::LWR:
*AddrIdx = 1;
return true;
// Store instructions with base address register in position 1.
case Mips::SB:
case Mips::SH:
case Mips::SW:
case Mips::SWC1:
case Mips::SDC1:
case Mips::SWL:
case Mips::SWR:
*AddrIdx = 1;
if (IsStore)
*IsStore = true;
return true;
// Store instructions with base address register in position 2.
case Mips::SC:
*AddrIdx = 2;
if (IsStore)
*IsStore = true;
return true;
}
}
bool baseRegNeedsLoadStoreMask(unsigned Reg) {
// The contents of SP and thread pointer register do not require masking.
return Reg != Mips::SP && Reg != Mips::T8;
}
MCELFStreamer *createMipsNaClELFStreamer(MCContext &Context, MCAsmBackend &TAB,
raw_ostream &OS,
MCCodeEmitter *Emitter, bool RelaxAll,

View File

@ -13,6 +13,7 @@
#define DEBUG_TYPE "delay-slot-filler"
#include "MCTargetDesc/MipsMCNaCl.h"
#include "Mips.h"
#include "MipsInstrInfo.h"
#include "MipsTargetMachine.h"
@ -531,6 +532,18 @@ bool Filler::searchRange(MachineBasicBlock &MBB, IterTy Begin, IterTy End,
if (delayHasHazard(*I, RegDU, IM))
continue;
if (TM.getSubtarget<MipsSubtarget>().isTargetNaCl()) {
// In NaCl, instructions that must be masked are forbidden in delay slots.
// We only check for loads, stores and SP changes. Calls, returns and
// branches are not checked because non-NaCl targets never put them in
// delay slots.
unsigned AddrIdx;
if ((isBasePlusOffsetMemoryAccess(I->getOpcode(), &AddrIdx)
&& baseRegNeedsLoadStoreMask(I->getOperand(AddrIdx).getReg()))
|| I->modifiesRegister(Mips::SP, TM.getRegisterInfo()))
continue;
}
Filler = I;
return true;
}

View File

@ -0,0 +1,71 @@
; RUN: llc -filetype=asm -mtriple=mipsel-none-linux -relocation-model=static \
; RUN: -O3 < %s | FileCheck %s
; RUN: llc -filetype=asm -mtriple=mipsel-none-nacl -relocation-model=static \
; RUN: -O3 < %s | FileCheck %s -check-prefix=CHECK-NACL
@x = global i32 0, align 4
declare void @f1(i32)
declare void @f2()
define void @test1() {
%1 = load i32* @x, align 4
call void @f1(i32 %1)
ret void
; CHECK-LABEL: test1
; We first make sure that for non-NaCl targets branch-delay slot contains
; dangerous instructions.
; Check that branch-delay slot is used to load argument from x before function
; call.
; CHECK: jal
; CHECK-NEXT: lw $4, %lo(x)(${{[0-9]+}})
; Check that branch-delay slot is used for adjusting sp before return.
; CHECK: jr $ra
; CHECK-NEXT: addiu $sp, $sp, {{[0-9]+}}
; For NaCl, check that branch-delay slot doesn't contain dangerous instructions.
; CHECK-NACL: jal
; CHECK-NACL-NEXT: nop
; CHECK-NACL: jr $ra
; CHECK-NACL-NEXT: nop
}
define void @test2() {
store i32 1, i32* @x, align 4
tail call void @f2()
ret void
; CHECK-LABEL: test2
; Check that branch-delay slot is used for storing to x before function call.
; CHECK: jal
; CHECK-NEXT: sw ${{[0-9]+}}, %lo(x)(${{[0-9]+}})
; Check that branch-delay slot is used for adjusting sp before return.
; CHECK: jr $ra
; CHECK-NEXT: addiu $sp, $sp, {{[0-9]+}}
; For NaCl, check that branch-delay slot doesn't contain dangerous instructions.
; CHECK-NACL: jal
; CHECK-NACL-NEXT: nop
; CHECK-NACL: jr $ra
; CHECK-NACL-NEXT: nop
}

View File

@ -5,6 +5,10 @@
# This test tests that address-masking sandboxing is added when given assembly
# input.
# Test that address-masking sandboxing is added before indirect branches and
# returns.
test1:
.set noreorder
@ -26,3 +30,190 @@ test1:
# CHECK: and $ra, $ra, $14
# CHECK-NEXT: jr $ra
# Test that address-masking sandboxing is added before load instructions.
test2:
.set noreorder
lb $4, 0($1)
nop
lbu $4, 0($2)
lh $4, 0($3)
lhu $1, 0($4)
lw $4, 0($5)
lwc1 $f0, 0($6)
ldc1 $f2, 0($7)
ll $4, 0($8)
lwl $4, 0($9)
lwr $4, 0($10)
lw $4, 0($sp)
lw $4, 0($t8)
# CHECK-LABEL: test2:
# CHECK: and $1, $1, $15
# CHECK-NEXT: lb $4, 0($1)
# Check that additional nop is inserted, to align mask and load to the next
# bundle.
# CHECK: nop
# CHECK: nop
# CHECK: and $2, $2, $15
# CHECK-NEXT: lbu $4, 0($2)
# CHECK: and $3, $3, $15
# CHECK-NEXT: lh $4, 0($3)
# CHECK: and $4, $4, $15
# CHECK-NEXT: lhu $1, 0($4)
# CHECK: and $5, $5, $15
# CHECK-NEXT: lw $4, 0($5)
# CHECK: and $6, $6, $15
# CHECK-NEXT: lwc1 $f0, 0($6)
# CHECK: and $7, $7, $15
# CHECK-NEXT: ldc1 $f2, 0($7)
# CHECK: and $8, $8, $15
# CHECK-NEXT: ll $4, 0($8)
# CHECK: and $9, $9, $15
# CHECK-NEXT: lwl $4, 0($9)
# CHECK: and $10, $10, $15
# CHECK-NEXT: lwr $4, 0($10)
# Check that loads where base register is $sp or $t8 (thread pointer register)
# are not masked.
# CHECK-NOT: and
# CHECK: lw $4, 0($sp)
# CHECK-NOT: and
# CHECK: lw $4, 0($24)
# Test that address-masking sandboxing is added before store instructions.
test3:
.set noreorder
sb $4, 0($1)
nop
sh $4, 0($2)
sw $4, 0($3)
swc1 $f0, 0($4)
sdc1 $f2, 0($5)
swl $4, 0($6)
swr $4, 0($7)
sc $4, 0($8)
sw $4, 0($sp)
sw $4, 0($t8)
# CHECK-LABEL: test3:
# CHECK: and $1, $1, $15
# CHECK-NEXT: sb $4, 0($1)
# Check that additional nop is inserted, to align mask and store to the next
# bundle.
# CHECK: nop
# CHECK: nop
# CHECK: and $2, $2, $15
# CHECK-NEXT: sh $4, 0($2)
# CHECK: and $3, $3, $15
# CHECK-NEXT: sw $4, 0($3)
# CHECK: and $4, $4, $15
# CHECK-NEXT: swc1 $f0, 0($4)
# CHECK: and $5, $5, $15
# CHECK-NEXT: sdc1 $f2, 0($5)
# CHECK: and $6, $6, $15
# CHECK-NEXT: swl $4, 0($6)
# CHECK: and $7, $7, $15
# CHECK-NEXT: swr $4, 0($7)
# CHECK: and $8, $8, $15
# CHECK-NEXT: sc $4, 0($8)
# Check that stores where base register is $sp or $t8 (thread pointer register)
# are not masked.
# CHECK-NOT: and
# CHECK: sw $4, 0($sp)
# CHECK-NOT: and
# CHECK: sw $4, 0($24)
# Test that address-masking sandboxing is added after instructions that change
# stack pointer.
test4:
.set noreorder
addiu $sp, $sp, 24
nop
addu $sp, $sp, $1
lw $sp, 0($2)
lw $sp, 123($sp)
sw $sp, 123($sp)
# CHECK-LABEL: test4:
# CHECK: addiu $sp, $sp, 24
# CHECK-NEXT: and $sp, $sp, $15
# Check that additional nop is inserted, to align instruction and mask to the
# next bundle.
# CHECK: nop
# CHECK: nop
# CHECK: addu $sp, $sp, $1
# CHECK-NEXT: and $sp, $sp, $15
# Since we next check sandboxing sequence which consists of 3 instructions,
# check that 2 additional nops are inserted, to align it to the next bundle.
# CHECK: nop
# CHECK: nop
# Check that for instructions that change stack-pointer and load from memory
# masks are added before and after the instruction.
# CHECK: and $2, $2, $15
# CHECK-NEXT: lw $sp, 0($2)
# CHECK-NEXT: and $sp, $sp, $15
# For loads where $sp is destination and base, check that mask is added after
# but not before.
# CHECK-NOT: and
# CHECK: lw $sp, 123($sp)
# CHECK-NEXT: and $sp, $sp, $15
# For stores where $sp is destination and base, check that mask is added neither
# before nor after.
# CHECK-NOT: and
# CHECK: sw $sp, 123($sp)
# CHECK-NOT: and