two changes: make the asmmatcher generator ignore ARM pseudos properly,

and make it a hard error for instructions to not have an asm string.
These instructions should be marked isCodeGenOnly.


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@117861 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Chris Lattner 2010-10-31 19:15:18 +00:00
parent 39ee036f40
commit a4a3a5e3c2
5 changed files with 35 additions and 17 deletions

View File

@ -205,7 +205,7 @@ def tCPS : T1I<(outs), (ins cps_opt:$opt), NoItinerary, "cps$opt",
T1Misc<0b0110011>; T1Misc<0b0110011>;
// For both thumb1 and thumb2. // For both thumb1 and thumb2.
let isNotDuplicable = 1 in let isNotDuplicable = 1, isCodeGenOnly = 1 in
def tPICADD : TIt<(outs GPR:$dst), (ins GPR:$lhs, pclabel:$cp), IIC_iALUr, "", def tPICADD : TIt<(outs GPR:$dst), (ins GPR:$lhs, pclabel:$cp), IIC_iALUr, "",
[(set GPR:$dst, (ARMpic_add GPR:$lhs, imm:$cp))]>, [(set GPR:$dst, (ARMpic_add GPR:$lhs, imm:$cp))]>,
T1Special<{0,0,?,?}> { T1Special<{0,0,?,?}> {
@ -917,14 +917,14 @@ let isCall = 1,
// $val is a scratch register for our use. // $val is a scratch register for our use.
let Defs = let Defs =
[ R0, R1, R2, R3, R4, R5, R6, R7, R12 ], hasSideEffects = 1, [ R0, R1, R2, R3, R4, R5, R6, R7, R12 ], hasSideEffects = 1,
isBarrier = 1 in { isBarrier = 1, isCodeGenOnly = 1 in {
def tInt_eh_sjlj_setjmp : ThumbXI<(outs),(ins tGPR:$src, tGPR:$val), def tInt_eh_sjlj_setjmp : ThumbXI<(outs),(ins tGPR:$src, tGPR:$val),
AddrModeNone, SizeSpecial, NoItinerary, "", "", AddrModeNone, SizeSpecial, NoItinerary, "", "",
[(set R0, (ARMeh_sjlj_setjmp tGPR:$src, tGPR:$val))]>; [(set R0, (ARMeh_sjlj_setjmp tGPR:$src, tGPR:$val))]>;
} }
// FIXME: Non-Darwin version(s) // FIXME: Non-Darwin version(s)
let isBarrier = 1, hasSideEffects = 1, isTerminator = 1, let isBarrier = 1, hasSideEffects = 1, isTerminator = 1, isCodeGenOnly = 1,
Defs = [ R7, LR, SP ] in { Defs = [ R7, LR, SP ] in {
def tInt_eh_sjlj_longjmp : XI<(outs), (ins GPR:$src, GPR:$scratch), def tInt_eh_sjlj_longjmp : XI<(outs), (ins GPR:$src, GPR:$scratch),
AddrModeNone, SizeSpecial, IndexModeNone, AddrModeNone, SizeSpecial, IndexModeNone,

View File

@ -2405,7 +2405,7 @@ let Defs =
[ R0, R1, R2, R3, R4, R5, R6, R7, R8, R9, R10, R11, R12, LR, D0, [ R0, R1, R2, R3, R4, R5, R6, R7, R8, R9, R10, R11, R12, LR, D0,
D1, D2, D3, D4, D5, D6, D7, D8, D9, D10, D11, D12, D13, D14, D15, D1, D2, D3, D4, D5, D6, D7, D8, D9, D10, D11, D12, D13, D14, D15,
D16, D17, D18, D19, D20, D21, D22, D23, D24, D25, D26, D27, D28, D29, D30, D16, D17, D18, D19, D20, D21, D22, D23, D24, D25, D26, D27, D28, D29, D30,
D31 ], hasSideEffects = 1, isBarrier = 1 in { D31 ], hasSideEffects = 1, isBarrier = 1, isCodeGenOnly = 1 in {
def t2Int_eh_sjlj_setjmp : Thumb2XI<(outs), (ins tGPR:$src, tGPR:$val), def t2Int_eh_sjlj_setjmp : Thumb2XI<(outs), (ins tGPR:$src, tGPR:$val),
AddrModeNone, SizeSpecial, NoItinerary, "", "", AddrModeNone, SizeSpecial, NoItinerary, "", "",
[(set R0, (ARMeh_sjlj_setjmp tGPR:$src, tGPR:$val))]>, [(set R0, (ARMeh_sjlj_setjmp tGPR:$src, tGPR:$val))]>,
@ -2414,7 +2414,7 @@ let Defs =
let Defs = let Defs =
[ R0, R1, R2, R3, R4, R5, R6, R7, R8, R9, R10, R11, R12, LR ], [ R0, R1, R2, R3, R4, R5, R6, R7, R8, R9, R10, R11, R12, LR ],
hasSideEffects = 1, isBarrier = 1 in { hasSideEffects = 1, isBarrier = 1, isCodeGenOnly = 1 in {
def t2Int_eh_sjlj_setjmp_nofp : Thumb2XI<(outs), (ins tGPR:$src, tGPR:$val), def t2Int_eh_sjlj_setjmp_nofp : Thumb2XI<(outs), (ins tGPR:$src, tGPR:$val),
AddrModeNone, SizeSpecial, NoItinerary, "", "", AddrModeNone, SizeSpecial, NoItinerary, "", "",
[(set R0, (ARMeh_sjlj_setjmp tGPR:$src, tGPR:$val))]>, [(set R0, (ARMeh_sjlj_setjmp tGPR:$src, tGPR:$val))]>,

View File

@ -161,7 +161,7 @@ def MOV32r0 : I<0x31, MRMInitReg, (outs GR32:$dst), (ins), "",
// instruction is lowered to an MCInst. // instruction is lowered to an MCInst.
// FIXME: AddedComplexity gives this a higher priority than MOV64ri32. Remove // FIXME: AddedComplexity gives this a higher priority than MOV64ri32. Remove
// when we have a better way to specify isel priority. // when we have a better way to specify isel priority.
let Defs = [EFLAGS], let Defs = [EFLAGS], isCodeGenOnly=1,
AddedComplexity = 1, isReMaterializable = 1, isAsCheapAsAMove = 1 in AddedComplexity = 1, isReMaterializable = 1, isAsCheapAsAMove = 1 in
def MOV64r0 : I<0x31, MRMInitReg, (outs GR64:$dst), (ins), "", def MOV64r0 : I<0x31, MRMInitReg, (outs GR64:$dst), (ins), "",
[(set GR64:$dst, 0)]>; [(set GR64:$dst, 0)]>;
@ -169,11 +169,11 @@ def MOV64r0 : I<0x31, MRMInitReg, (outs GR64:$dst), (ins), "",
// Materialize i64 constant where top 32-bits are zero. This could theoretically // Materialize i64 constant where top 32-bits are zero. This could theoretically
// use MOV32ri with a SUBREG_TO_REG to represent the zero-extension, however // use MOV32ri with a SUBREG_TO_REG to represent the zero-extension, however
// that would make it more difficult to rematerialize. // that would make it more difficult to rematerialize.
let AddedComplexity = 1, isReMaterializable = 1, isAsCheapAsAMove = 1 in let AddedComplexity = 1, isReMaterializable = 1, isAsCheapAsAMove = 1,
isCodeGenOnly = 1 in
def MOV64ri64i32 : Ii32<0xB8, AddRegFrm, (outs GR64:$dst), (ins i64i32imm:$src), def MOV64ri64i32 : Ii32<0xB8, AddRegFrm, (outs GR64:$dst), (ins i64i32imm:$src),
"", [(set GR64:$dst, i64immZExt32:$src)]>; "", [(set GR64:$dst, i64immZExt32:$src)]>;
// Use sbb to materialize carry bit. // Use sbb to materialize carry bit.
let Uses = [EFLAGS], Defs = [EFLAGS], isCodeGenOnly = 1 in { let Uses = [EFLAGS], Defs = [EFLAGS], isCodeGenOnly = 1 in {
// FIXME: These are pseudo ops that should be replaced with Pat<> patterns. // FIXME: These are pseudo ops that should be replaced with Pat<> patterns.

View File

@ -45,10 +45,14 @@ def MOVSX16rr8W : I<0xBE, MRMSrcReg, (outs GR16:$dst), (ins GR8:$src),
"movs{bw|x}\t{$src, $dst|$dst, $src}", []>, TB, OpSize; "movs{bw|x}\t{$src, $dst|$dst, $src}", []>, TB, OpSize;
def MOVSX16rm8W : I<0xBE, MRMSrcMem, (outs GR16:$dst), (ins i8mem:$src), def MOVSX16rm8W : I<0xBE, MRMSrcMem, (outs GR16:$dst), (ins i8mem:$src),
"movs{bw|x}\t{$src, $dst|$dst, $src}", []>, TB, OpSize; "movs{bw|x}\t{$src, $dst|$dst, $src}", []>, TB, OpSize;
// FIXME: Use a pat pattern or define a syntax here.
let isCodeGenOnly=1 in {
def MOVSX16rr8 : I<0xBE, MRMSrcReg, (outs GR16:$dst), (ins GR8 :$src), def MOVSX16rr8 : I<0xBE, MRMSrcReg, (outs GR16:$dst), (ins GR8 :$src),
"", [(set GR16:$dst, (sext GR8:$src))]>, TB; "", [(set GR16:$dst, (sext GR8:$src))]>, TB;
def MOVSX16rm8 : I<0xBE, MRMSrcMem, (outs GR16:$dst), (ins i8mem :$src), def MOVSX16rm8 : I<0xBE, MRMSrcMem, (outs GR16:$dst), (ins i8mem :$src),
"", [(set GR16:$dst, (sextloadi16i8 addr:$src))]>, TB; "", [(set GR16:$dst, (sextloadi16i8 addr:$src))]>, TB;
}
def MOVSX32rr8 : I<0xBE, MRMSrcReg, (outs GR32:$dst), (ins GR8 :$src), def MOVSX32rr8 : I<0xBE, MRMSrcReg, (outs GR32:$dst), (ins GR8 :$src),
"movs{bl|x}\t{$src, $dst|$dst, $src}", "movs{bl|x}\t{$src, $dst|$dst, $src}",
[(set GR32:$dst, (sext GR8:$src))]>, TB; [(set GR32:$dst, (sext GR8:$src))]>, TB;
@ -69,10 +73,13 @@ def MOVZX16rr8W : I<0xB6, MRMSrcReg, (outs GR16:$dst), (ins GR8:$src),
"movz{bw|x}\t{$src, $dst|$dst, $src}", []>, TB, OpSize; "movz{bw|x}\t{$src, $dst|$dst, $src}", []>, TB, OpSize;
def MOVZX16rm8W : I<0xB6, MRMSrcMem, (outs GR16:$dst), (ins i8mem:$src), def MOVZX16rm8W : I<0xB6, MRMSrcMem, (outs GR16:$dst), (ins i8mem:$src),
"movz{bw|x}\t{$src, $dst|$dst, $src}", []>, TB, OpSize; "movz{bw|x}\t{$src, $dst|$dst, $src}", []>, TB, OpSize;
// FIXME: Use a pat pattern or define a syntax here.
let isCodeGenOnly=1 in {
def MOVZX16rr8 : I<0xB6, MRMSrcReg, (outs GR16:$dst), (ins GR8 :$src), def MOVZX16rr8 : I<0xB6, MRMSrcReg, (outs GR16:$dst), (ins GR8 :$src),
"", [(set GR16:$dst, (zext GR8:$src))]>, TB; "", [(set GR16:$dst, (zext GR8:$src))]>, TB;
def MOVZX16rm8 : I<0xB6, MRMSrcMem, (outs GR16:$dst), (ins i8mem :$src), def MOVZX16rm8 : I<0xB6, MRMSrcMem, (outs GR16:$dst), (ins i8mem :$src),
"", [(set GR16:$dst, (zextloadi16i8 addr:$src))]>, TB; "", [(set GR16:$dst, (zextloadi16i8 addr:$src))]>, TB;
}
def MOVZX32rr8 : I<0xB6, MRMSrcReg, (outs GR32:$dst), (ins GR8 :$src), def MOVZX32rr8 : I<0xB6, MRMSrcReg, (outs GR32:$dst), (ins GR8 :$src),
"movz{bl|x}\t{$src, $dst|$dst, $src}", "movz{bl|x}\t{$src, $dst|$dst, $src}",
[(set GR32:$dst, (zext GR8:$src))]>, TB; [(set GR32:$dst, (zext GR8:$src))]>, TB;
@ -132,6 +139,9 @@ def MOVZX64rr16_Q : RI<0xB7, MRMSrcReg, (outs GR64:$dst), (ins GR16:$src),
def MOVZX64rm16_Q : RI<0xB7, MRMSrcMem, (outs GR64:$dst), (ins i16mem:$src), def MOVZX64rm16_Q : RI<0xB7, MRMSrcMem, (outs GR64:$dst), (ins i16mem:$src),
"movz{wq|x}\t{$src, $dst|$dst, $src}", []>, TB; "movz{wq|x}\t{$src, $dst|$dst, $src}", []>, TB;
// FIXME: These should be Pat patterns.
let isCodeGenOnly = 1 in {
// Use movzbl instead of movzbq when the destination is a register; it's // Use movzbl instead of movzbq when the destination is a register; it's
// equivalent due to implicit zero-extending, and it has a smaller encoding. // equivalent due to implicit zero-extending, and it has a smaller encoding.
def MOVZX64rr8 : I<0xB6, MRMSrcReg, (outs GR64:$dst), (ins GR8 :$src), def MOVZX64rr8 : I<0xB6, MRMSrcReg, (outs GR64:$dst), (ins GR8 :$src),
@ -158,5 +168,5 @@ def MOVZX64rm32 : I<0x8B, MRMSrcMem, (outs GR64:$dst), (ins i32mem:$src),
"", [(set GR64:$dst, (zextloadi64i32 addr:$src))]>; "", [(set GR64:$dst, (zextloadi64i32 addr:$src))]>;
}

View File

@ -225,11 +225,18 @@ static bool IsAssemblerInstruction(StringRef Name,
// Ignore pseudo ops. // Ignore pseudo ops.
// //
// FIXME: This is a hack; can we convert these instructions to set the // FIXME: This is a hack [for X86]; can we convert these instructions to set
// "codegen only" bit instead? // the "codegen only" bit instead?
if (const RecordVal *Form = CGI.TheDef->getValue("Form")) if (const RecordVal *Form = CGI.TheDef->getValue("Form"))
if (Form->getValue()->getAsString() == "Pseudo") if (Form->getValue()->getAsString() == "Pseudo")
return false; return false;
// FIXME: This is a hack [for ARM]; can we convert these instructions to set
// the "codegen only" bit instead?
if (const RecordVal *Form = CGI.TheDef->getValue("F"))
if (Form->getValue()->getAsString() == "Pseudo")
return false;
// Ignore "Int_*" and "*_Int" instructions, which are internal aliases. // Ignore "Int_*" and "*_Int" instructions, which are internal aliases.
// //
@ -240,18 +247,19 @@ static bool IsAssemblerInstruction(StringRef Name,
// Ignore instructions with no .s string. // Ignore instructions with no .s string.
// //
// FIXME: What are these? // FIXME: What are these?
if (CGI.AsmString.empty()) if (CGI.AsmString.empty()) {
return false; PrintError(CGI.TheDef->getLoc(),
"instruction with empty asm string");
throw std::string("ERROR: Invalid instruction for asm matcher");
}
// FIXME: Hack; ignore any instructions with a newline in them. // FIXME: Hack; ignore any instructions with a newline in them.
if (std::find(CGI.AsmString.begin(), if (std::find(CGI.AsmString.begin(),
CGI.AsmString.end(), '\n') != CGI.AsmString.end()) CGI.AsmString.end(), '\n') != CGI.AsmString.end())
return false; return false;
// Ignore instructions with attributes, these are always fake instructions for // Reject instructions with attributes, these aren't something we can handle,
// simplifying codegen. // the target should be refactored to use operands instead of modifiers.
//
// FIXME: Is this true?
// //
// Also, check for instructions which reference the operand multiple times; // Also, check for instructions which reference the operand multiple times;
// this implies a constraint we would not honor. // this implies a constraint we would not honor.