diff --git a/lib/Target/MSP430/MSP430InstrInfo.td b/lib/Target/MSP430/MSP430InstrInfo.td index 3ce0a66edfd..321018e958b 100644 --- a/lib/Target/MSP430/MSP430InstrInfo.td +++ b/lib/Target/MSP430/MSP430InstrInfo.td @@ -92,7 +92,9 @@ def addr : ComplexPattern; // Pattern Fragments def zextloadi16i8 : PatFrag<(ops node:$ptr), (i16 (zextloadi8 node:$ptr))>; def extloadi16i8 : PatFrag<(ops node:$ptr), (i16 ( extloadi8 node:$ptr))>; - +def and_su : PatFrag<(ops node:$lhs, node:$rhs), (and node:$lhs, node:$rhs), [{ + return N->hasOneUse(); +}]>; //===----------------------------------------------------------------------===// // Instruction list.. @@ -826,6 +828,65 @@ def CMP16mr : Pseudo<(outs), (ins memsrc:$src1, GR16:$src2), "cmp.w\t{$src1, $src2}", [(MSP430cmp (load addr:$src1), GR16:$src2), (implicit SRW)]>; + +// BIT TESTS, just sets condition codes +// Note that the C condition is set differently than when using CMP. +let isCommutable = 1 in { +def BIT8rr : Pseudo<(outs), (ins GR8:$src1, GR8:$src2), + "bit.b\t{$src2, $src1}", + [(MSP430cmp 0, (and_su GR8:$src1, GR8:$src2)), + (implicit SRW)]>; +def BIT16rr : Pseudo<(outs), (ins GR16:$src1, GR16:$src2), + "bit.w\t{$src2, $src1}", + [(MSP430cmp 0, (and_su GR16:$src1, GR16:$src2)), + (implicit SRW)]>; +} +def BIT8ri : Pseudo<(outs), (ins GR8:$src1, i8imm:$src2), + "bit.b\t{$src2, $src1}", + [(MSP430cmp 0, (and_su GR8:$src1, imm:$src2)), + (implicit SRW)]>; +def BIT16ri : Pseudo<(outs), (ins GR16:$src1, i16imm:$src2), + "bit.w\t{$src2, $src1}", + [(MSP430cmp 0, (and_su GR16:$src1, imm:$src2)), + (implicit SRW)]>; + +def BIT8rm : Pseudo<(outs), (ins GR8:$src1, memdst:$src2), + "bit.b\t{$src2, $src1}", + [(MSP430cmp 0, (and_su GR8:$src1, (load addr:$src2))), + (implicit SRW)]>; +def BIT16rm : Pseudo<(outs), (ins GR16:$src1, memdst:$src2), + "bit.w\t{$src2, $src1}", + [(MSP430cmp 0, (and_su GR16:$src1, (load addr:$src2))), + (implicit SRW)]>; + +def BIT8mr : Pseudo<(outs), (ins memsrc:$src1, GR8:$src2), + "bit.b\t{$src2, $src1}", + [(MSP430cmp 0, (and_su (load addr:$src1), GR8:$src2)), + (implicit SRW)]>; +def BIT16mr : Pseudo<(outs), (ins memsrc:$src1, GR16:$src2), + "bit.w\t{$src2, $src1}", + [(MSP430cmp 0, (and_su (load addr:$src1), GR16:$src2)), + (implicit SRW)]>; + +def BIT8mi : Pseudo<(outs), (ins memsrc:$src1, i8imm:$src2), + "bit.b\t{$src2, $src1}", + [(MSP430cmp 0, (and_su (load addr:$src1), (i8 imm:$src2))), + (implicit SRW)]>; +def BIT16mi : Pseudo<(outs), (ins memsrc:$src1, i16imm:$src2), + "bit.w\t{$src2, $src1}", + [(MSP430cmp 0, (and_su (load addr:$src1), (i16 imm:$src2))), + (implicit SRW)]>; + +def BIT8mm : Pseudo<(outs), (ins memsrc:$src1, memsrc:$src2), + "bit.b\t{$src2, $src1}", + [(MSP430cmp 0, (and_su (i8 (load addr:$src1)), + (load addr:$src2))), + (implicit SRW)]>; +def BIT16mm : Pseudo<(outs), (ins memsrc:$src1, memsrc:$src2), + "bit.w\t{$src2, $src1}", + [(MSP430cmp 0, (and_su (i16 (load addr:$src1)), + (load addr:$src2))), + (implicit SRW)]>; } // Defs = [SRW] //===----------------------------------------------------------------------===// @@ -908,3 +969,6 @@ def : Pat<(store (subc (load addr:$dst), (i8 (load addr:$src))), addr:$dst), // peephole patterns def : Pat<(and GR16:$src, 255), (ZEXT16r GR16:$src)>; +def : Pat<(MSP430cmp 0, (trunc (and_su GR16:$src1, GR16:$src2))), + (BIT8rr (EXTRACT_SUBREG GR16:$src1, subreg_8bit), + (EXTRACT_SUBREG GR16:$src2, subreg_8bit))>; diff --git a/test/CodeGen/MSP430/bit.ll b/test/CodeGen/MSP430/bit.ll new file mode 100644 index 00000000000..2c7836661ff --- /dev/null +++ b/test/CodeGen/MSP430/bit.ll @@ -0,0 +1,166 @@ +; RUN: llvm-as < %s | llc -march=msp430 | FileCheck %s +target datalayout = "e-p:16:16:16-i1:8:8-i8:8:8-i16:16:16-i32:16:32" +target triple = "msp430-generic-generic" + +@foo8 = external global i8; +@bar8 = external global i8; + +define i8 @bitbrr(i8 %a, i8 %b) nounwind { + %t1 = and i8 %a, %b + %t2 = icmp ne i8 %t1, 0 + %t3 = zext i1 %t2 to i8 + ret i8 %t3 +} +; CHECK: bitbrr: +; CHECK: bit.b r14, r15 + +define i8 @bitbri(i8 %a) nounwind { + %t1 = and i8 %a, 15 + %t2 = icmp ne i8 %t1, 0 + %t3 = zext i1 %t2 to i8 + ret i8 %t3 +} +; CHECK: bitbri: +; CHECK: bit.b #15, r15 + +define i8 @bitbir(i8 %a) nounwind { + %t1 = and i8 15, %a + %t2 = icmp ne i8 %t1, 0 + %t3 = zext i1 %t2 to i8 + ret i8 %t3 +} +; CHECK: bitbir: +; CHECK: bit.b #15, r15 + +define i8 @bitbmi() nounwind { + %t1 = load i8* @foo8 + %t2 = and i8 %t1, 15 + %t3 = icmp ne i8 %t2, 0 + %t4 = zext i1 %t3 to i8 + ret i8 %t4 +} +; CHECK: bitbmi: +; CHECK: bit.b #15, &foo8 + +define i8 @bitbim() nounwind { + %t1 = load i8* @foo8 + %t2 = and i8 15, %t1 + %t3 = icmp ne i8 %t2, 0 + %t4 = zext i1 %t3 to i8 + ret i8 %t4 +} +; CHECK: bitbim: +; CHECK: bit.b #15, &foo8 + +define i8 @bitbrm(i8 %a) nounwind { + %t1 = load i8* @foo8 + %t2 = and i8 %a, %t1 + %t3 = icmp ne i8 %t2, 0 + %t4 = zext i1 %t3 to i8 + ret i8 %t4 +} +; CHECK: bitbrm: +; CHECK: bit.b &foo8, r15 + +define i8 @bitbmr(i8 %a) nounwind { + %t1 = load i8* @foo8 + %t2 = and i8 %t1, %a + %t3 = icmp ne i8 %t2, 0 + %t4 = zext i1 %t3 to i8 + ret i8 %t4 +} +; CHECK: bitbmr: +; CHECK: bit.b r15, &foo8 + +define i8 @bitbmm() nounwind { + %t1 = load i8* @foo8 + %t2 = load i8* @bar8 + %t3 = and i8 %t1, %t2 + %t4 = icmp ne i8 %t3, 0 + %t5 = zext i1 %t4 to i8 + ret i8 %t5 +} +; CHECK: bitbmm: +; CHECK: bit.b &bar8, &foo8 + +@foo16 = external global i16; +@bar16 = external global i16; + +define i16 @bitwrr(i16 %a, i16 %b) nounwind { + %t1 = and i16 %a, %b + %t2 = icmp ne i16 %t1, 0 + %t3 = zext i1 %t2 to i16 + ret i16 %t3 +} +; CHECK: bitwrr: +; CHECK: bit.w r14, r15 + +define i16 @bitwri(i16 %a) nounwind { + %t1 = and i16 %a, 4080 + %t2 = icmp ne i16 %t1, 0 + %t3 = zext i1 %t2 to i16 + ret i16 %t3 +} +; CHECK: bitwri: +; CHECK: bit.w #4080, r15 + +define i16 @bitwir(i16 %a) nounwind { + %t1 = and i16 4080, %a + %t2 = icmp ne i16 %t1, 0 + %t3 = zext i1 %t2 to i16 + ret i16 %t3 +} +; CHECK: bitwir: +; CHECK: bit.w #4080, r15 + +define i16 @bitwmi() nounwind { + %t1 = load i16* @foo16 + %t2 = and i16 %t1, 4080 + %t3 = icmp ne i16 %t2, 0 + %t4 = zext i1 %t3 to i16 + ret i16 %t4 +} +; CHECK: bitwmi: +; CHECK: bit.w #4080, &foo16 + +define i16 @bitwim() nounwind { + %t1 = load i16* @foo16 + %t2 = and i16 4080, %t1 + %t3 = icmp ne i16 %t2, 0 + %t4 = zext i1 %t3 to i16 + ret i16 %t4 +} +; CHECK: bitwim: +; CHECK: bit.w #4080, &foo16 + +define i16 @bitwrm(i16 %a) nounwind { + %t1 = load i16* @foo16 + %t2 = and i16 %a, %t1 + %t3 = icmp ne i16 %t2, 0 + %t4 = zext i1 %t3 to i16 + ret i16 %t4 +} +; CHECK: bitwrm: +; CHECK: bit.w &foo16, r15 + +define i16 @bitwmr(i16 %a) nounwind { + %t1 = load i16* @foo16 + %t2 = and i16 %t1, %a + %t3 = icmp ne i16 %t2, 0 + %t4 = zext i1 %t3 to i16 + ret i16 %t4 +} +; CHECK: bitwmr: +; CHECK: bit.w r15, &foo16 + +define i16 @bitwmm() nounwind { + %t1 = load i16* @foo16 + %t2 = load i16* @bar16 + %t3 = and i16 %t1, %t2 + %t4 = icmp ne i16 %t3, 0 + %t5 = zext i1 %t4 to i16 + ret i16 %t5 +} +; CHECK: bitwmm: +; CHECK: bit.w &bar16, &foo16 +