Simplify all the casting business and get rid of isSigned().

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@32731 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Reid Spencer
2006-12-21 08:28:31 +00:00
parent 06e3f4ee7e
commit eefef64e59

View File

@ -38,8 +38,7 @@ static Function *EnsureFunctionExists(Module &M, const char *Name,
/// prototype doesn't match the arguments we expect to pass in. /// prototype doesn't match the arguments we expect to pass in.
template <class ArgIt> template <class ArgIt>
static CallInst *ReplaceCallWith(const char *NewFn, CallInst *CI, static CallInst *ReplaceCallWith(const char *NewFn, CallInst *CI,
ArgIt ArgBegin, ArgIt ArgEnd, ArgIt ArgBegin, ArgIt ArgEnd, bool isSigned,
const unsigned *castOpcodes,
const Type *RetTy, Function *&FCache) { const Type *RetTy, Function *&FCache) {
if (!FCache) { if (!FCache) {
// If we haven't already looked up this function, check to see if the // If we haven't already looked up this function, check to see if the
@ -62,14 +61,9 @@ static CallInst *ReplaceCallWith(const char *NewFn, CallInst *CI,
for (ArgIt I = ArgBegin; I != ArgEnd && ArgNo != FT->getNumParams(); for (ArgIt I = ArgBegin; I != ArgEnd && ArgNo != FT->getNumParams();
++I, ++ArgNo) { ++I, ++ArgNo) {
Value *Arg = *I; Value *Arg = *I;
if (Arg->getType() != FT->getParamType(ArgNo)) if (Arg->getType() != FT->getParamType(ArgNo)) {
if (castOpcodes[ArgNo]) Instruction::CastOps opcode = CastInst::getCastOpcode(Arg, isSigned,
Arg = CastInst::create(Instruction::CastOps(castOpcodes[ArgNo]), FT->getParamType(ArgNo), isSigned);
Arg, FT->getParamType(ArgNo), Arg->getName(), CI);
else {
Instruction::CastOps opcode = CastInst::getCastOpcode(Arg,
Arg->getType()->isSigned(), FT->getParamType(ArgNo),
FT->getParamType(ArgNo)->isSigned());
Arg = CastInst::create(opcode, Arg, FT->getParamType(ArgNo), Arg = CastInst::create(opcode, Arg, FT->getParamType(ArgNo),
Arg->getName(), CI); Arg->getName(), CI);
} }
@ -85,9 +79,8 @@ static CallInst *ReplaceCallWith(const char *NewFn, CallInst *CI,
if (!CI->use_empty()) { if (!CI->use_empty()) {
Value *V = NewCI; Value *V = NewCI;
if (CI->getType() != NewCI->getType()) { if (CI->getType() != NewCI->getType()) {
Instruction::CastOps opcode = CastInst::getCastOpcode(NewCI, Instruction::CastOps opcode = CastInst::getCastOpcode(NewCI, isSigned,
NewCI->getType()->isSigned(), CI->getType(), CI->getType(), isSigned);
CI->getType()->isSigned());
V = CastInst::create(opcode, NewCI, CI->getType(), Name, CI); V = CastInst::create(opcode, NewCI, CI->getType(), Name, CI);
} }
CI->replaceAllUsesWith(V); CI->replaceAllUsesWith(V);
@ -400,75 +393,38 @@ void IntrinsicLowering::LowerIntrinsicCall(CallInst *CI) {
break; // Simply strip out debugging intrinsics break; // Simply strip out debugging intrinsics
case Intrinsic::memcpy_i32: { case Intrinsic::memcpy_i32: {
// The memcpy intrinsic take an extra alignment argument that the memcpy
// libc function does not.
static unsigned opcodes[] =
{ Instruction::BitCast, Instruction::BitCast, Instruction::BitCast };
// FIXME:
// if (target_is_64_bit) opcodes[2] = Instruction::ZExt;
// else opcodes[2] = Instruction::BitCast;
static Function *MemcpyFCache = 0; static Function *MemcpyFCache = 0;
ReplaceCallWith("memcpy", CI, CI->op_begin()+1, CI->op_end()-1, ReplaceCallWith("memcpy", CI, CI->op_begin()+1, CI->op_end()-1,
opcodes, (*(CI->op_begin()+1))->getType(), MemcpyFCache); false, (*(CI->op_begin()+1))->getType(), MemcpyFCache);
break; break;
} }
case Intrinsic::memcpy_i64: { case Intrinsic::memcpy_i64: {
static unsigned opcodes[] =
{ Instruction::BitCast, Instruction::BitCast, Instruction::Trunc };
// FIXME:
// if (target_is_64_bit) opcodes[2] = Instruction::BitCast;
// else opcodes[2] = Instruction::Trunc;
static Function *MemcpyFCache = 0; static Function *MemcpyFCache = 0;
ReplaceCallWith("memcpy", CI, CI->op_begin()+1, CI->op_end()-1, ReplaceCallWith("memcpy", CI, CI->op_begin()+1, CI->op_end()-1,
opcodes, (*(CI->op_begin()+1))->getType(), MemcpyFCache); false, (*(CI->op_begin()+1))->getType(), MemcpyFCache);
break; break;
} }
case Intrinsic::memmove_i32: { case Intrinsic::memmove_i32: {
// The memmove intrinsic take an extra alignment argument that the memmove
// libc function does not.
static unsigned opcodes[] =
{ Instruction::BitCast, Instruction::BitCast, Instruction::BitCast };
// FIXME:
// if (target_is_64_bit) opcodes[2] = Instruction::ZExt;
// else opcodes[2] = Instruction::BitCast;
static Function *MemmoveFCache = 0; static Function *MemmoveFCache = 0;
ReplaceCallWith("memmove", CI, CI->op_begin()+1, CI->op_end()-1, ReplaceCallWith("memmove", CI, CI->op_begin()+1, CI->op_end()-1,
opcodes, (*(CI->op_begin()+1))->getType(), MemmoveFCache); false, (*(CI->op_begin()+1))->getType(), MemmoveFCache);
break; break;
} }
case Intrinsic::memmove_i64: { case Intrinsic::memmove_i64: {
// The memmove intrinsic take an extra alignment argument that the memmove
// libc function does not.
static const unsigned opcodes[] =
{ Instruction::BitCast, Instruction::BitCast, Instruction::Trunc };
// if (target_is_64_bit) opcodes[2] = Instruction::BitCast;
// else opcodes[2] = Instruction::Trunc;
static Function *MemmoveFCache = 0; static Function *MemmoveFCache = 0;
ReplaceCallWith("memmove", CI, CI->op_begin()+1, CI->op_end()-1, ReplaceCallWith("memmove", CI, CI->op_begin()+1, CI->op_end()-1,
opcodes, (*(CI->op_begin()+1))->getType(), MemmoveFCache); false, (*(CI->op_begin()+1))->getType(), MemmoveFCache);
break; break;
} }
case Intrinsic::memset_i32: { case Intrinsic::memset_i32: {
// The memset intrinsic take an extra alignment argument that the memset
// libc function does not.
static const unsigned opcodes[] =
{ Instruction::BitCast, Instruction::ZExt, Instruction::ZExt, 0 };
// if (target_is_64_bit) opcodes[2] = Instruction::BitCast;
// else opcodes[2] = Instruction::ZExt;
static Function *MemsetFCache = 0; static Function *MemsetFCache = 0;
ReplaceCallWith("memset", CI, CI->op_begin()+1, CI->op_end()-1, ReplaceCallWith("memset", CI, CI->op_begin()+1, CI->op_end()-1,
opcodes, (*(CI->op_begin()+1))->getType(), MemsetFCache); true, (*(CI->op_begin()+1))->getType(), MemsetFCache);
} }
case Intrinsic::memset_i64: { case Intrinsic::memset_i64: {
// The memset intrinsic take an extra alignment argument that the memset
// libc function does not.
static const unsigned opcodes[] =
{ Instruction::BitCast, Instruction::ZExt, Instruction::Trunc, 0 };
// if (target_is_64_bit) opcodes[2] = Instruction::BitCast;
// else opcodes[2] = Instruction::Trunc;
static Function *MemsetFCache = 0; static Function *MemsetFCache = 0;
ReplaceCallWith("memset", CI, CI->op_begin()+1, CI->op_end()-1, ReplaceCallWith("memset", CI, CI->op_begin()+1, CI->op_end()-1,
opcodes, (*(CI->op_begin()+1))->getType(), MemsetFCache); true, (*(CI->op_begin()+1))->getType(), MemsetFCache);
break; break;
} }
case Intrinsic::isunordered_f32: case Intrinsic::isunordered_f32:
@ -484,17 +440,15 @@ void IntrinsicLowering::LowerIntrinsicCall(CallInst *CI) {
break; break;
} }
case Intrinsic::sqrt_f32: { case Intrinsic::sqrt_f32: {
static const unsigned opcodes[] = { 0 };
static Function *sqrtfFCache = 0; static Function *sqrtfFCache = 0;
ReplaceCallWith("sqrtf", CI, CI->op_begin()+1, CI->op_end(), ReplaceCallWith("sqrtf", CI, CI->op_begin()+1, CI->op_end(),
opcodes, Type::FloatTy, sqrtfFCache); false, Type::FloatTy, sqrtfFCache);
break; break;
} }
case Intrinsic::sqrt_f64: { case Intrinsic::sqrt_f64: {
static const unsigned opcodes[] = { 0 };
static Function *sqrtFCache = 0; static Function *sqrtFCache = 0;
ReplaceCallWith("sqrt", CI, CI->op_begin()+1, CI->op_end(), ReplaceCallWith("sqrt", CI, CI->op_begin()+1, CI->op_end(),
opcodes, Type::DoubleTy, sqrtFCache); false, Type::DoubleTy, sqrtFCache);
break; break;
} }
} }