mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2025-01-12 17:32:19 +00:00
reapply Sanjiv's patch to genericize memcpy/memset/memmove to take an
arbitrary integer width for the count. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@59823 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
dc770929cb
commit
824b958e6f
@ -5070,7 +5070,13 @@ for more efficient code generation.
|
||||
<div class="doc_text">
|
||||
|
||||
<h5>Syntax:</h5>
|
||||
<p>This is an overloaded intrinsic. You can use llvm.memcpy on any integer bit
|
||||
width. Not all targets support all bit widths however.</p>
|
||||
<pre>
|
||||
declare void @llvm.memcpy.i8(i8 * <dest>, i8 * <src>,
|
||||
i8 <len>, i32 <align>)
|
||||
declare void @llvm.memcpy.i16(i8 * <dest>, i8 * <src>,
|
||||
i16 <len>, i32 <align>)
|
||||
declare void @llvm.memcpy.i32(i8 * <dest>, i8 * <src>,
|
||||
i32 <len>, i32 <align>)
|
||||
declare void @llvm.memcpy.i64(i8 * <dest>, i8 * <src>,
|
||||
@ -5124,7 +5130,13 @@ be set to 0 or 1.
|
||||
<div class="doc_text">
|
||||
|
||||
<h5>Syntax:</h5>
|
||||
<p>This is an overloaded intrinsic. You can use llvm.memmove on any integer bit
|
||||
width. Not all targets support all bit widths however.</p>
|
||||
<pre>
|
||||
declare void @llvm.memmove.i8(i8 * <dest>, i8 * <src>,
|
||||
i8 <len>, i32 <align>)
|
||||
declare void @llvm.memmove.i16(i8 * <dest>, i8 * <src>,
|
||||
i16 <len>, i32 <align>)
|
||||
declare void @llvm.memmove.i32(i8 * <dest>, i8 * <src>,
|
||||
i32 <len>, i32 <align>)
|
||||
declare void @llvm.memmove.i64(i8 * <dest>, i8 * <src>,
|
||||
@ -5179,7 +5191,13 @@ be set to 0 or 1.
|
||||
<div class="doc_text">
|
||||
|
||||
<h5>Syntax:</h5>
|
||||
<p>This is an overloaded intrinsic. You can use llvm.memset on any integer bit
|
||||
width. Not all targets support all bit widths however.</p>
|
||||
<pre>
|
||||
declare void @llvm.memset.i8(i8 * <dest>, i8 <val>,
|
||||
i8 <len>, i32 <align>)
|
||||
declare void @llvm.memset.i16(i8 * <dest>, i8 <val>,
|
||||
i16 <len>, i32 <align>)
|
||||
declare void @llvm.memset.i32(i8 * <dest>, i8 <val>,
|
||||
i32 <len>, i32 <align>)
|
||||
declare void @llvm.memset.i64(i8 * <dest>, i8 <val>,
|
||||
|
@ -53,7 +53,9 @@ void BrainF::header() {
|
||||
//Function prototypes
|
||||
|
||||
//declare void @llvm.memset.i32(i8 *, i8, i32, i32)
|
||||
Function *memset_func = Intrinsic::getDeclaration(module, Intrinsic::memset_i32);
|
||||
const Type *Tys[] = { Type::Int32Ty };
|
||||
Function *memset_func = Intrinsic::getDeclaration(module, Intrinsic::memset,
|
||||
Tys, 1);
|
||||
|
||||
//declare i32 @getchar()
|
||||
getchar_func = cast<Function>(module->
|
||||
|
@ -208,12 +208,9 @@ namespace llvm {
|
||||
static inline bool classof(const MemIntrinsic *) { return true; }
|
||||
static inline bool classof(const IntrinsicInst *I) {
|
||||
switch (I->getIntrinsicID()) {
|
||||
case Intrinsic::memcpy_i32:
|
||||
case Intrinsic::memcpy_i64:
|
||||
case Intrinsic::memmove_i32:
|
||||
case Intrinsic::memmove_i64:
|
||||
case Intrinsic::memset_i32:
|
||||
case Intrinsic::memset_i64:
|
||||
case Intrinsic::memcpy:
|
||||
case Intrinsic::memmove:
|
||||
case Intrinsic::memset:
|
||||
return true;
|
||||
default: return false;
|
||||
}
|
||||
@ -246,8 +243,7 @@ namespace llvm {
|
||||
// Methods for support type inquiry through isa, cast, and dyn_cast:
|
||||
static inline bool classof(const MemCpyInst *) { return true; }
|
||||
static inline bool classof(const IntrinsicInst *I) {
|
||||
return I->getIntrinsicID() == Intrinsic::memcpy_i32 ||
|
||||
I->getIntrinsicID() == Intrinsic::memcpy_i64;
|
||||
return I->getIntrinsicID() == Intrinsic::memcpy;
|
||||
}
|
||||
static inline bool classof(const Value *V) {
|
||||
return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
|
||||
@ -275,8 +271,7 @@ namespace llvm {
|
||||
// Methods for support type inquiry through isa, cast, and dyn_cast:
|
||||
static inline bool classof(const MemMoveInst *) { return true; }
|
||||
static inline bool classof(const IntrinsicInst *I) {
|
||||
return I->getIntrinsicID() == Intrinsic::memmove_i32 ||
|
||||
I->getIntrinsicID() == Intrinsic::memmove_i64;
|
||||
return I->getIntrinsicID() == Intrinsic::memmove;
|
||||
}
|
||||
static inline bool classof(const Value *V) {
|
||||
return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
|
||||
@ -299,8 +294,7 @@ namespace llvm {
|
||||
// Methods for support type inquiry through isa, cast, and dyn_cast:
|
||||
static inline bool classof(const MemSetInst *) { return true; }
|
||||
static inline bool classof(const IntrinsicInst *I) {
|
||||
return I->getIntrinsicID() == Intrinsic::memset_i32 ||
|
||||
I->getIntrinsicID() == Intrinsic::memset_i64;
|
||||
return I->getIntrinsicID() == Intrinsic::memset;
|
||||
}
|
||||
static inline bool classof(const Value *V) {
|
||||
return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
|
||||
|
@ -193,33 +193,15 @@ def int_stackprotector : Intrinsic<[llvm_void_ty],
|
||||
//
|
||||
|
||||
let Properties = [IntrWriteArgMem] in {
|
||||
def int_memcpy_i16 : Intrinsic<[llvm_void_ty],
|
||||
def int_memcpy : Intrinsic<[llvm_void_ty],
|
||||
[llvm_ptr_ty, llvm_ptr_ty,
|
||||
llvm_anyint_ty, llvm_i32_ty]>;
|
||||
def int_memmove : Intrinsic<[llvm_void_ty],
|
||||
[llvm_ptr_ty, llvm_ptr_ty,
|
||||
llvm_i16_ty, llvm_i16_ty]>;
|
||||
def int_memcpy_i32 : Intrinsic<[llvm_void_ty],
|
||||
[llvm_ptr_ty, llvm_ptr_ty,
|
||||
llvm_i32_ty, llvm_i32_ty]>;
|
||||
def int_memcpy_i64 : Intrinsic<[llvm_void_ty],
|
||||
[llvm_ptr_ty, llvm_ptr_ty,
|
||||
llvm_i64_ty, llvm_i32_ty]>;
|
||||
def int_memmove_i16 : Intrinsic<[llvm_void_ty],
|
||||
[llvm_ptr_ty, llvm_ptr_ty,
|
||||
llvm_i16_ty, llvm_i16_ty]>;
|
||||
def int_memmove_i32 : Intrinsic<[llvm_void_ty],
|
||||
[llvm_ptr_ty, llvm_ptr_ty,
|
||||
llvm_i32_ty, llvm_i32_ty]>;
|
||||
def int_memmove_i64 : Intrinsic<[llvm_void_ty],
|
||||
[llvm_ptr_ty, llvm_ptr_ty,
|
||||
llvm_i64_ty, llvm_i32_ty]>;
|
||||
def int_memset_i16 : Intrinsic<[llvm_void_ty],
|
||||
[llvm_ptr_ty, llvm_i8_ty,
|
||||
llvm_i16_ty, llvm_i16_ty]>;
|
||||
def int_memset_i32 : Intrinsic<[llvm_void_ty],
|
||||
[llvm_ptr_ty, llvm_i8_ty,
|
||||
llvm_i32_ty, llvm_i32_ty]>;
|
||||
def int_memset_i64 : Intrinsic<[llvm_void_ty],
|
||||
[llvm_ptr_ty, llvm_i8_ty,
|
||||
llvm_i64_ty, llvm_i32_ty]>;
|
||||
llvm_anyint_ty, llvm_i32_ty]>;
|
||||
def int_memset : Intrinsic<[llvm_void_ty],
|
||||
[llvm_ptr_ty, llvm_i8_ty,
|
||||
llvm_anyint_ty, llvm_i32_ty]>;
|
||||
}
|
||||
|
||||
// These functions do not actually read memory, but they are sensitive to the
|
||||
|
@ -903,8 +903,7 @@ bool Andersens::AddConstraintsForExternalCall(CallSite CS, Function *F) {
|
||||
F->getName() == "atol" || F->getName() == "atoll" ||
|
||||
F->getName() == "remove" || F->getName() == "unlink" ||
|
||||
F->getName() == "rename" || F->getName() == "memcmp" ||
|
||||
F->getName() == "llvm.memset.i32" ||
|
||||
F->getName() == "llvm.memset.i64" ||
|
||||
F->getName() == "llvm.memset" ||
|
||||
F->getName() == "strcmp" || F->getName() == "strncmp" ||
|
||||
F->getName() == "execl" || F->getName() == "execlp" ||
|
||||
F->getName() == "execle" || F->getName() == "execv" ||
|
||||
@ -942,8 +941,8 @@ bool Andersens::AddConstraintsForExternalCall(CallSite CS, Function *F) {
|
||||
|
||||
|
||||
// These functions do induce points-to edges.
|
||||
if (F->getName() == "llvm.memcpy.i32" || F->getName() == "llvm.memcpy.i64" ||
|
||||
F->getName() == "llvm.memmove.i32" ||F->getName() == "llvm.memmove.i64" ||
|
||||
if (F->getName() == "llvm.memcpy" ||
|
||||
F->getName() == "llvm.memmove" ||
|
||||
F->getName() == "memmove") {
|
||||
|
||||
// *Dest = *Src, which requires an artificial graph node to represent the
|
||||
|
@ -98,22 +98,19 @@ void IntrinsicLowering::AddPrototypes(Module &M) {
|
||||
EnsureFunctionExists(M, "abort", I->arg_end(), I->arg_end(),
|
||||
Type::VoidTy);
|
||||
break;
|
||||
case Intrinsic::memcpy_i32:
|
||||
case Intrinsic::memcpy_i64:
|
||||
case Intrinsic::memcpy:
|
||||
M.getOrInsertFunction("memcpy", PointerType::getUnqual(Type::Int8Ty),
|
||||
PointerType::getUnqual(Type::Int8Ty),
|
||||
PointerType::getUnqual(Type::Int8Ty),
|
||||
TD.getIntPtrType(), (Type *)0);
|
||||
break;
|
||||
case Intrinsic::memmove_i32:
|
||||
case Intrinsic::memmove_i64:
|
||||
case Intrinsic::memmove:
|
||||
M.getOrInsertFunction("memmove", PointerType::getUnqual(Type::Int8Ty),
|
||||
PointerType::getUnqual(Type::Int8Ty),
|
||||
PointerType::getUnqual(Type::Int8Ty),
|
||||
TD.getIntPtrType(), (Type *)0);
|
||||
break;
|
||||
case Intrinsic::memset_i32:
|
||||
case Intrinsic::memset_i64:
|
||||
case Intrinsic::memset:
|
||||
M.getOrInsertFunction("memset", PointerType::getUnqual(Type::Int8Ty),
|
||||
PointerType::getUnqual(Type::Int8Ty),
|
||||
Type::Int32Ty,
|
||||
@ -784,8 +781,7 @@ void IntrinsicLowering::LowerIntrinsicCall(CallInst *CI) {
|
||||
case Intrinsic::var_annotation:
|
||||
break; // Strip out annotate intrinsic
|
||||
|
||||
case Intrinsic::memcpy_i32:
|
||||
case Intrinsic::memcpy_i64: {
|
||||
case Intrinsic::memcpy: {
|
||||
static Constant *MemcpyFCache = 0;
|
||||
Value *Size = CI->getOperand(3);
|
||||
const Type *IntPtr = TD.getIntPtrType();
|
||||
@ -803,8 +799,7 @@ void IntrinsicLowering::LowerIntrinsicCall(CallInst *CI) {
|
||||
MemcpyFCache);
|
||||
break;
|
||||
}
|
||||
case Intrinsic::memmove_i32:
|
||||
case Intrinsic::memmove_i64: {
|
||||
case Intrinsic::memmove: {
|
||||
static Constant *MemmoveFCache = 0;
|
||||
Value *Size = CI->getOperand(3);
|
||||
const Type *IntPtr = TD.getIntPtrType();
|
||||
@ -822,8 +817,7 @@ void IntrinsicLowering::LowerIntrinsicCall(CallInst *CI) {
|
||||
MemmoveFCache);
|
||||
break;
|
||||
}
|
||||
case Intrinsic::memset_i32:
|
||||
case Intrinsic::memset_i64: {
|
||||
case Intrinsic::memset: {
|
||||
static Constant *MemsetFCache = 0;
|
||||
Value *Size = CI->getOperand(3);
|
||||
const Type *IntPtr = TD.getIntPtrType();
|
||||
|
@ -3663,8 +3663,7 @@ SelectionDAGLowering::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) {
|
||||
case Intrinsic::longjmp:
|
||||
return "_longjmp"+!TLI.usesUnderscoreLongJmp();
|
||||
break;
|
||||
case Intrinsic::memcpy_i32:
|
||||
case Intrinsic::memcpy_i64: {
|
||||
case Intrinsic::memcpy: {
|
||||
SDValue Op1 = getValue(I.getOperand(1));
|
||||
SDValue Op2 = getValue(I.getOperand(2));
|
||||
SDValue Op3 = getValue(I.getOperand(3));
|
||||
@ -3673,8 +3672,7 @@ SelectionDAGLowering::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) {
|
||||
I.getOperand(1), 0, I.getOperand(2), 0));
|
||||
return 0;
|
||||
}
|
||||
case Intrinsic::memset_i32:
|
||||
case Intrinsic::memset_i64: {
|
||||
case Intrinsic::memset: {
|
||||
SDValue Op1 = getValue(I.getOperand(1));
|
||||
SDValue Op2 = getValue(I.getOperand(2));
|
||||
SDValue Op3 = getValue(I.getOperand(3));
|
||||
@ -3683,8 +3681,7 @@ SelectionDAGLowering::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) {
|
||||
I.getOperand(1), 0));
|
||||
return 0;
|
||||
}
|
||||
case Intrinsic::memmove_i32:
|
||||
case Intrinsic::memmove_i64: {
|
||||
case Intrinsic::memmove: {
|
||||
SDValue Op1 = getValue(I.getOperand(1));
|
||||
SDValue Op2 = getValue(I.getOperand(2));
|
||||
SDValue Op3 = getValue(I.getOperand(3));
|
||||
|
@ -9200,12 +9200,11 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
|
||||
if (GlobalVariable *GVSrc = dyn_cast<GlobalVariable>(MMI->getSource()))
|
||||
if (GVSrc->isConstant()) {
|
||||
Module *M = CI.getParent()->getParent()->getParent();
|
||||
Intrinsic::ID MemCpyID;
|
||||
if (CI.getOperand(3)->getType() == Type::Int32Ty)
|
||||
MemCpyID = Intrinsic::memcpy_i32;
|
||||
else
|
||||
MemCpyID = Intrinsic::memcpy_i64;
|
||||
CI.setOperand(0, Intrinsic::getDeclaration(M, MemCpyID));
|
||||
Intrinsic::ID MemCpyID = Intrinsic::memcpy;
|
||||
const Type *Tys[1];
|
||||
Tys[0] = CI.getOperand(3)->getType();
|
||||
CI.setOperand(0,
|
||||
Intrinsic::getDeclaration(M, MemCpyID, Tys, 1));
|
||||
Changed = true;
|
||||
}
|
||||
|
||||
|
@ -427,9 +427,12 @@ bool MemCpyOpt::processStore(StoreInst *SI, BasicBlock::iterator& BBI) {
|
||||
// instruction needed by the start of the block.
|
||||
BasicBlock::iterator InsertPt = BI;
|
||||
|
||||
if (MemSetF == 0)
|
||||
if (MemSetF == 0) {
|
||||
const Type *Tys[] = {Type::Int64Ty};
|
||||
MemSetF = Intrinsic::getDeclaration(SI->getParent()->getParent()
|
||||
->getParent(), Intrinsic::memset_i64);
|
||||
->getParent(), Intrinsic::memset,
|
||||
Tys, 1);
|
||||
}
|
||||
|
||||
// Get the starting pointer of the block.
|
||||
StartPtr = Range.StartPtr;
|
||||
@ -671,9 +674,11 @@ bool MemCpyOpt::processMemCpy(MemCpyInst* M) {
|
||||
return false;
|
||||
|
||||
// If all checks passed, then we can transform these memcpy's
|
||||
const Type *Tys[1];
|
||||
Tys[0] = M->getLength()->getType();
|
||||
Function* MemCpyFun = Intrinsic::getDeclaration(
|
||||
M->getParent()->getParent()->getParent(),
|
||||
M->getIntrinsicID());
|
||||
M->getIntrinsicID(), Tys, 1);
|
||||
|
||||
std::vector<Value*> args;
|
||||
args.push_back(M->getRawDest());
|
||||
|
@ -130,9 +130,10 @@ Value *LibCallOptimization::EmitStrLen(Value *Ptr, IRBuilder<> &B) {
|
||||
Value *LibCallOptimization::EmitMemCpy(Value *Dst, Value *Src, Value *Len,
|
||||
unsigned Align, IRBuilder<> &B) {
|
||||
Module *M = Caller->getParent();
|
||||
Intrinsic::ID IID = Len->getType() == Type::Int32Ty ?
|
||||
Intrinsic::memcpy_i32 : Intrinsic::memcpy_i64;
|
||||
Value *MemCpy = Intrinsic::getDeclaration(M, IID);
|
||||
Intrinsic::ID IID = Intrinsic::memcpy;
|
||||
const Type *Tys[1];
|
||||
Tys[0] = Len->getType();
|
||||
Value *MemCpy = Intrinsic::getDeclaration(M, IID, Tys, 1);
|
||||
return B.CreateCall4(MemCpy, CastToCStr(Dst, B), CastToCStr(Src, B), Len,
|
||||
ConstantInt::get(Type::Int32Ty, Align));
|
||||
}
|
||||
|
@ -257,8 +257,10 @@ bool llvm::InlineFunction(CallSite CS, CallGraph *CG, const TargetData *TD) {
|
||||
Value *NewAlloca = new AllocaInst(AggTy, 0, Align, I->getName(),
|
||||
Caller->begin()->begin());
|
||||
// Emit a memcpy.
|
||||
const Type *Tys[] = { Type::Int64Ty };
|
||||
Function *MemCpyFn = Intrinsic::getDeclaration(Caller->getParent(),
|
||||
Intrinsic::memcpy_i64);
|
||||
Intrinsic::memcpy,
|
||||
Tys, 1);
|
||||
Value *DestCast = new BitCastInst(NewAlloca, VoidPtrTy, "tmp", TheCall);
|
||||
Value *SrcCast = new BitCastInst(*AI, VoidPtrTy, "tmp", TheCall);
|
||||
|
||||
|
@ -1336,12 +1336,9 @@ void Verifier::visitIntrinsicFunctionCall(Intrinsic::ID ID, CallInst &CI) {
|
||||
switch (ID) {
|
||||
default:
|
||||
break;
|
||||
case Intrinsic::memcpy_i32:
|
||||
case Intrinsic::memcpy_i64:
|
||||
case Intrinsic::memmove_i32:
|
||||
case Intrinsic::memmove_i64:
|
||||
case Intrinsic::memset_i32:
|
||||
case Intrinsic::memset_i64:
|
||||
case Intrinsic::memcpy:
|
||||
case Intrinsic::memmove:
|
||||
case Intrinsic::memset:
|
||||
Assert1(isa<ConstantInt>(CI.getOperand(4)),
|
||||
"alignment argument of memory intrinsics must be a constant int",
|
||||
&CI);
|
||||
|
Loading…
x
Reference in New Issue
Block a user