From b4fe76cbb554e838193375fafc115cdb643d4517 Mon Sep 17 00:00:00 2001 From: Chris Lattner Date: Fri, 11 Jun 2004 04:31:10 +0000 Subject: [PATCH] Add direct support for the isnan intrinsic, implementing test/Regression/CodeGen/X86/isnan.llx testcase git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@14141 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/InstSelectSimple.cpp | 31 ++++++++++++++++++++--------- lib/Target/X86/X86ISelSimple.cpp | 31 ++++++++++++++++++++--------- 2 files changed, 44 insertions(+), 18 deletions(-) diff --git a/lib/Target/X86/InstSelectSimple.cpp b/lib/Target/X86/InstSelectSimple.cpp index 59d56bc743c..1adcd52c2e3 100644 --- a/lib/Target/X86/InstSelectSimple.cpp +++ b/lib/Target/X86/InstSelectSimple.cpp @@ -1628,6 +1628,7 @@ void ISel::LowerUnknownIntrinsicFunctionCalls(Function &F) { case Intrinsic::frameaddress: case Intrinsic::memcpy: case Intrinsic::memset: + case Intrinsic::isnan: case Intrinsic::readport: case Intrinsic::writeport: // We directly implement these intrinsics @@ -1636,19 +1637,19 @@ void ISel::LowerUnknownIntrinsicFunctionCalls(Function &F) { // On X86, memory operations are in-order. Lower this intrinsic // into a volatile load. Instruction *Before = CI->getPrev(); - LoadInst * LI = new LoadInst (CI->getOperand(1), "", true, CI); - CI->replaceAllUsesWith (LI); - BB->getInstList().erase (CI); + LoadInst * LI = new LoadInst(CI->getOperand(1), "", true, CI); + CI->replaceAllUsesWith(LI); + BB->getInstList().erase(CI); break; } case Intrinsic::writeio: { // On X86, memory operations are in-order. Lower this intrinsic // into a volatile store. Instruction *Before = CI->getPrev(); - StoreInst * LI = new StoreInst (CI->getOperand(1), - CI->getOperand(2), true, CI); - CI->replaceAllUsesWith (LI); - BB->getInstList().erase (CI); + StoreInst *LI = new StoreInst(CI->getOperand(1), + CI->getOperand(2), true, CI); + CI->replaceAllUsesWith(LI); + BB->getInstList().erase(CI); break; } default: @@ -1656,12 +1657,11 @@ void ISel::LowerUnknownIntrinsicFunctionCalls(Function &F) { Instruction *Before = CI->getPrev(); TM.getIntrinsicLowering().LowerIntrinsicCall(CI); if (Before) { // Move iterator to instruction after call - I = Before; ++I; + I = Before; ++I; } else { I = BB->begin(); } } - } void ISel::visitIntrinsicCall(Intrinsic::ID ID, CallInst &CI) { @@ -1698,6 +1698,19 @@ void ISel::visitIntrinsicCall(Intrinsic::ID ID, CallInst &CI) { } return; + case Intrinsic::isnan: + TmpReg1 = getReg(CI.getOperand(1)); + if (0) { // for processors prior to the P6 + BuildMI(BB, X86::FpUCOM, 2).addReg(TmpReg1).addReg(TmpReg1); + BuildMI(BB, X86::FNSTSW8r, 0); + BuildMI(BB, X86::SAHF, 1); + } else { + BuildMI(BB, X86::FpUCOMI, 2).addReg(TmpReg1).addReg(TmpReg1); + } + TmpReg2 = getReg(CI); + BuildMI(BB, X86::SETPr, 0, TmpReg2); + return; + case Intrinsic::memcpy: { assert(CI.getNumOperands() == 5 && "Illegal llvm.memcpy call!"); unsigned Align = 1; diff --git a/lib/Target/X86/X86ISelSimple.cpp b/lib/Target/X86/X86ISelSimple.cpp index 59d56bc743c..1adcd52c2e3 100644 --- a/lib/Target/X86/X86ISelSimple.cpp +++ b/lib/Target/X86/X86ISelSimple.cpp @@ -1628,6 +1628,7 @@ void ISel::LowerUnknownIntrinsicFunctionCalls(Function &F) { case Intrinsic::frameaddress: case Intrinsic::memcpy: case Intrinsic::memset: + case Intrinsic::isnan: case Intrinsic::readport: case Intrinsic::writeport: // We directly implement these intrinsics @@ -1636,19 +1637,19 @@ void ISel::LowerUnknownIntrinsicFunctionCalls(Function &F) { // On X86, memory operations are in-order. Lower this intrinsic // into a volatile load. Instruction *Before = CI->getPrev(); - LoadInst * LI = new LoadInst (CI->getOperand(1), "", true, CI); - CI->replaceAllUsesWith (LI); - BB->getInstList().erase (CI); + LoadInst * LI = new LoadInst(CI->getOperand(1), "", true, CI); + CI->replaceAllUsesWith(LI); + BB->getInstList().erase(CI); break; } case Intrinsic::writeio: { // On X86, memory operations are in-order. Lower this intrinsic // into a volatile store. Instruction *Before = CI->getPrev(); - StoreInst * LI = new StoreInst (CI->getOperand(1), - CI->getOperand(2), true, CI); - CI->replaceAllUsesWith (LI); - BB->getInstList().erase (CI); + StoreInst *LI = new StoreInst(CI->getOperand(1), + CI->getOperand(2), true, CI); + CI->replaceAllUsesWith(LI); + BB->getInstList().erase(CI); break; } default: @@ -1656,12 +1657,11 @@ void ISel::LowerUnknownIntrinsicFunctionCalls(Function &F) { Instruction *Before = CI->getPrev(); TM.getIntrinsicLowering().LowerIntrinsicCall(CI); if (Before) { // Move iterator to instruction after call - I = Before; ++I; + I = Before; ++I; } else { I = BB->begin(); } } - } void ISel::visitIntrinsicCall(Intrinsic::ID ID, CallInst &CI) { @@ -1698,6 +1698,19 @@ void ISel::visitIntrinsicCall(Intrinsic::ID ID, CallInst &CI) { } return; + case Intrinsic::isnan: + TmpReg1 = getReg(CI.getOperand(1)); + if (0) { // for processors prior to the P6 + BuildMI(BB, X86::FpUCOM, 2).addReg(TmpReg1).addReg(TmpReg1); + BuildMI(BB, X86::FNSTSW8r, 0); + BuildMI(BB, X86::SAHF, 1); + } else { + BuildMI(BB, X86::FpUCOMI, 2).addReg(TmpReg1).addReg(TmpReg1); + } + TmpReg2 = getReg(CI); + BuildMI(BB, X86::SETPr, 0, TmpReg2); + return; + case Intrinsic::memcpy: { assert(CI.getNumOperands() == 5 && "Illegal llvm.memcpy call!"); unsigned Align = 1;