mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2025-01-14 00:32:55 +00:00
[SROA] Split the alignment computation complete for the memcpy rewriting
to work independently for the slice side and the other side. This allows us to only compute the minimum of the two when we actually rewrite to a memcpy that needs to take the minimum, and preserve higher alignment for one side or the other when rewriting to loads and stores. This fix was inspired by seeing the result of some refactoring that makes addrspace handling better. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@202242 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
07f95b3218
commit
38e90e3de1
@ -2494,10 +2494,7 @@ private:
|
||||
assert((IsDest && II.getRawDest() == OldPtr) ||
|
||||
(!IsDest && II.getRawSource() == OldPtr));
|
||||
|
||||
unsigned MinAlignment = II.getAlignment();
|
||||
if (MinAlignment == 0)
|
||||
MinAlignment = 1; // Fix the '0' alignment used by memcpy and memmove.
|
||||
MinAlignment = MinAlign(MinAlignment, getSliceAlign());
|
||||
unsigned SliceAlign = getSliceAlign();
|
||||
|
||||
// For unsplit intrinsics, we simply modify the source and destination
|
||||
// pointers in place. This isn't just an optimization, it is a matter of
|
||||
@ -2513,9 +2510,10 @@ private:
|
||||
else
|
||||
II.setSource(AdjustedPtr);
|
||||
|
||||
if (II.getAlignment() > MinAlignment) {
|
||||
if (II.getAlignment() > SliceAlign) {
|
||||
Type *CstTy = II.getAlignmentCst()->getType();
|
||||
II.setAlignment(ConstantInt::get(CstTy, MinAlignment));
|
||||
II.setAlignment(
|
||||
ConstantInt::get(CstTy, MinAlign(II.getAlignment(), SliceAlign)));
|
||||
}
|
||||
|
||||
DEBUG(dbgs() << " to: " << II << "\n");
|
||||
@ -2564,10 +2562,8 @@ private:
|
||||
// Compute the relative offset for the other pointer within the transfer.
|
||||
unsigned IntPtrWidth = DL.getPointerSizeInBits();
|
||||
APInt OtherOffset(IntPtrWidth, NewBeginOffset - BeginOffset);
|
||||
|
||||
// Factor the offset other pointer's alignment into the requinerd minimum.
|
||||
MinAlignment =
|
||||
MinAlign(MinAlignment, OtherOffset.zextOrTrunc(64).getZExtValue());
|
||||
unsigned OtherAlign = MinAlign(II.getAlignment() ? II.getAlignment() : 1,
|
||||
OtherOffset.zextOrTrunc(64).getZExtValue());
|
||||
|
||||
if (EmitMemCpy) {
|
||||
Type *OtherPtrTy = OtherPtr->getType();
|
||||
@ -2581,9 +2577,9 @@ private:
|
||||
Type *SizeTy = II.getLength()->getType();
|
||||
Constant *Size = ConstantInt::get(SizeTy, NewEndOffset - NewBeginOffset);
|
||||
|
||||
CallInst *New = IRB.CreateMemCpy(IsDest ? OurPtr : OtherPtr,
|
||||
IsDest ? OtherPtr : OurPtr,
|
||||
Size, MinAlignment, II.isVolatile());
|
||||
CallInst *New = IRB.CreateMemCpy(
|
||||
IsDest ? OurPtr : OtherPtr, IsDest ? OtherPtr : OurPtr, Size,
|
||||
MinAlign(SliceAlign, OtherAlign), II.isVolatile());
|
||||
(void)New;
|
||||
DEBUG(dbgs() << " to: " << *New << "\n");
|
||||
return false;
|
||||
@ -2612,9 +2608,13 @@ private:
|
||||
|
||||
Value *SrcPtr = getAdjustedPtr(IRB, DL, OtherPtr, OtherOffset, OtherPtrTy,
|
||||
OtherPtr->getName() + ".");
|
||||
unsigned SrcAlign = OtherAlign;
|
||||
Value *DstPtr = &NewAI;
|
||||
if (!IsDest)
|
||||
unsigned DstAlign = SliceAlign;
|
||||
if (!IsDest) {
|
||||
std::swap(SrcPtr, DstPtr);
|
||||
std::swap(SrcAlign, DstAlign);
|
||||
}
|
||||
|
||||
Value *Src;
|
||||
if (VecTy && !IsWholeAlloca && !IsDest) {
|
||||
@ -2628,7 +2628,7 @@ private:
|
||||
uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset;
|
||||
Src = extractInteger(DL, IRB, Src, SubIntTy, Offset, "extract");
|
||||
} else {
|
||||
Src = IRB.CreateAlignedLoad(SrcPtr, MinAlignment, II.isVolatile(),
|
||||
Src = IRB.CreateAlignedLoad(SrcPtr, SrcAlign, II.isVolatile(),
|
||||
"copyload");
|
||||
}
|
||||
|
||||
@ -2646,7 +2646,7 @@ private:
|
||||
}
|
||||
|
||||
StoreInst *Store = cast<StoreInst>(
|
||||
IRB.CreateAlignedStore(Src, DstPtr, MinAlignment, II.isVolatile()));
|
||||
IRB.CreateAlignedStore(Src, DstPtr, DstAlign, II.isVolatile()));
|
||||
(void)Store;
|
||||
DEBUG(dbgs() << " to: " << *Store << "\n");
|
||||
return !II.isVolatile();
|
||||
|
@ -1405,3 +1405,19 @@ entry:
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @test24(i8* %src, i8* %dst) {
|
||||
; CHECK-LABEL: @test24(
|
||||
; CHECK: alloca i64, align 16
|
||||
; CHECK: load volatile i64* %{{[^,]*}}, align 1
|
||||
; CHECK: store volatile i64 %{{[^,]*}}, i64* %{{[^,]*}}, align 16
|
||||
; CHECK: load volatile i64* %{{[^,]*}}, align 16
|
||||
; CHECK: store volatile i64 %{{[^,]*}}, i64* %{{[^,]*}}, align 1
|
||||
|
||||
entry:
|
||||
%a = alloca i64, align 16
|
||||
%ptr = bitcast i64* %a to i8*
|
||||
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %ptr, i8* %src, i32 8, i32 1, i1 true)
|
||||
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dst, i8* %ptr, i32 8, i32 1, i1 true)
|
||||
ret void
|
||||
}
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user