diff --git a/lib/Transforms/Scalar/SROA.cpp b/lib/Transforms/Scalar/SROA.cpp index 728ce997a97..af3a880cb90 100644 --- a/lib/Transforms/Scalar/SROA.cpp +++ b/lib/Transforms/Scalar/SROA.cpp @@ -582,7 +582,8 @@ private: P.Partitions.push_back(New); } - bool handleLoadOrStore(Type *Ty, Instruction &I, int64_t Offset) { + bool handleLoadOrStore(Type *Ty, Instruction &I, int64_t Offset, + bool IsVolatile) { uint64_t Size = TD.getTypeStoreSize(Ty); // If this memory access can be shown to *statically* extend outside the @@ -603,7 +604,14 @@ private: return true; } - insertUse(I, Offset, Size); + // We allow splitting of loads and stores where the type is an integer type + // and which cover the entire alloca. Such integer loads and stores + // often require decomposition into fine grained loads and stores. + bool IsSplittable = false; + if (IntegerType *ITy = dyn_cast(Ty)) + IsSplittable = !IsVolatile && ITy->getBitWidth() == AllocSize*8; + + insertUse(I, Offset, Size, IsSplittable); return true; } @@ -624,7 +632,7 @@ private: bool visitLoadInst(LoadInst &LI) { assert((!LI.isSimple() || LI.getType()->isSingleValueType()) && "All simple FCA loads should have been pre-split"); - return handleLoadOrStore(LI.getType(), LI, Offset); + return handleLoadOrStore(LI.getType(), LI, Offset, LI.isVolatile()); } bool visitStoreInst(StoreInst &SI) { @@ -634,7 +642,7 @@ private: assert((!SI.isSimple() || ValOp->getType()->isSingleValueType()) && "All simple FCA stores should have been pre-split"); - return handleLoadOrStore(ValOp->getType(), SI, Offset); + return handleLoadOrStore(ValOp->getType(), SI, Offset, SI.isVolatile()); } @@ -1173,6 +1181,21 @@ Type *AllocaPartitioning::getCommonType(iterator I) const { UserTy = LI->getType(); } else if (StoreInst *SI = dyn_cast(UI->U->getUser())) { UserTy = SI->getValueOperand()->getType(); + } else { + return 0; // Bail if we have weird uses. + } + + if (IntegerType *ITy = dyn_cast(UserTy)) { + // If the type is larger than the partition, skip it. We only encounter + // this for split integer operations where we want to use the type of the + // entity causing the split. + if (ITy->getBitWidth() > (I->EndOffset - I->BeginOffset)*8) + continue; + + // If we have found an integer type use covering the alloca, use that + // regardless of the other types, as integers are often used for a "bucket + // of bits" type. + return ITy; } if (Ty && Ty != UserTy) @@ -2138,6 +2161,14 @@ static bool isIntegerWideningViable(const DataLayout &TD, if (SizeInBits != TD.getTypeStoreSizeInBits(AllocaTy)) return false; + // We need to ensure that an integer type with the appropriate bitwidth can + // be converted to the alloca type, whatever that is. We don't want to force + // the alloca itself to have an integer type if there is a more suitable one. + Type *IntTy = Type::getIntNTy(AllocaTy->getContext(), SizeInBits); + if (!canConvertValue(TD, AllocaTy, IntTy) || + !canConvertValue(TD, IntTy, AllocaTy)) + return false; + uint64_t Size = TD.getTypeStoreSize(AllocaTy); // Check the uses to ensure the uses are (likely) promoteable integer uses. @@ -2461,6 +2492,50 @@ private: if (VecTy) return rewriteVectorizedLoadInst(IRB, LI, OldOp); + + uint64_t Size = EndOffset - BeginOffset; + if (Size < TD.getTypeStoreSize(LI.getType())) { + assert(!LI.isVolatile()); + assert(LI.getType()->isIntegerTy() && + "Only integer type loads and stores are split"); + assert(LI.getType()->getIntegerBitWidth() == + TD.getTypeStoreSizeInBits(LI.getType()) && + "Non-byte-multiple bit width"); + assert(LI.getType()->getIntegerBitWidth() == + TD.getTypeSizeInBits(OldAI.getAllocatedType()) && + "Only alloca-wide loads can be split and recomposed"); + IntegerType *NarrowTy = Type::getIntNTy(LI.getContext(), Size * 8); + bool IsConvertable = (BeginOffset - NewAllocaBeginOffset == 0) && + canConvertValue(TD, NewAllocaTy, NarrowTy); + Value *V; + // Move the insertion point just past the load so that we can refer to it. + IRB.SetInsertPoint(llvm::next(BasicBlock::iterator(&LI))); + if (IsConvertable) + V = convertValue(TD, IRB, + IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), + getName(".load")), + NarrowTy); + else + V = IRB.CreateAlignedLoad( + getAdjustedAllocaPtr(IRB, NarrowTy->getPointerTo()), + getPartitionTypeAlign(NarrowTy), getName(".load")); + // Create a placeholder value with the same type as LI to use as the + // basis for the new value. This allows us to replace the uses of LI with + // the computed value, and then replace the placeholder with LI, leaving + // LI only used for this computation. + Value *Placeholder + = IRB.CreateLoad(UndefValue::get(LI.getType()->getPointerTo())); + V = insertInteger(TD, IRB, Placeholder, V, BeginOffset, + getName(".insert")); + LI.replaceAllUsesWith(V); + Placeholder->replaceAllUsesWith(&LI); + cast(Placeholder)->eraseFromParent(); + if (Pass.DeadSplitInsts.insert(&LI)) + Pass.DeadInsts.push_back(&LI); + DEBUG(dbgs() << " to: " << *V << "\n"); + return IsConvertable; + } + if (IntTy && LI.getType()->isIntegerTy()) return rewriteIntegerLoad(IRB, LI); @@ -2540,6 +2615,39 @@ private: if (VecTy) return rewriteVectorizedStoreInst(IRB, SI, OldOp); Type *ValueTy = SI.getValueOperand()->getType(); + + uint64_t Size = EndOffset - BeginOffset; + if (Size < TD.getTypeStoreSize(ValueTy)) { + assert(!SI.isVolatile()); + assert(ValueTy->isIntegerTy() && + "Only integer type loads and stores are split"); + assert(ValueTy->getIntegerBitWidth() == + TD.getTypeStoreSizeInBits(ValueTy) && + "Non-byte-multiple bit width"); + assert(ValueTy->getIntegerBitWidth() == + TD.getTypeSizeInBits(OldAI.getAllocatedType()) && + "Only alloca-wide stores can be split and recomposed"); + IntegerType *NarrowTy = Type::getIntNTy(SI.getContext(), Size * 8); + Value *V = extractInteger(TD, IRB, SI.getValueOperand(), NarrowTy, + BeginOffset, getName(".extract")); + StoreInst *NewSI; + bool IsConvertable = (BeginOffset - NewAllocaBeginOffset == 0) && + canConvertValue(TD, NarrowTy, NewAllocaTy); + if (IsConvertable) + NewSI = IRB.CreateAlignedStore(convertValue(TD, IRB, V, NewAllocaTy), + &NewAI, NewAI.getAlignment()); + else + NewSI = IRB.CreateAlignedStore( + V, getAdjustedAllocaPtr(IRB, NarrowTy->getPointerTo()), + getPartitionTypeAlign(NarrowTy)); + (void)NewSI; + if (Pass.DeadSplitInsts.insert(&SI)) + Pass.DeadInsts.push_back(&SI); + + DEBUG(dbgs() << " to: " << *NewSI << "\n"); + return IsConvertable; + } + if (IntTy && ValueTy->isIntegerTy()) return rewriteIntegerStore(IRB, SI); @@ -3173,6 +3281,9 @@ static Type *getTypePartition(const DataLayout &TD, Type *Ty, uint64_t Offset, uint64_t Size) { if (Offset == 0 && TD.getTypeAllocSize(Ty) == Size) return stripAggregateTypeWrapping(TD, Ty); + if (Offset > TD.getTypeAllocSize(Ty) || + (TD.getTypeAllocSize(Ty) - Offset) < Size) + return 0; if (SequentialType *SeqTy = dyn_cast(Ty)) { // We can't partition pointers... @@ -3464,6 +3575,8 @@ void SROA::deleteDeadInstructions(SmallPtrSet &DeletedAllocas) { Instruction *I = DeadInsts.pop_back_val(); DEBUG(dbgs() << "Deleting dead instruction: " << *I << "\n"); + I->replaceAllUsesWith(UndefValue::get(I->getType())); + for (User::op_iterator OI = I->op_begin(), E = I->op_end(); OI != E; ++OI) if (Instruction *U = dyn_cast(*OI)) { // Zero out the operand and see if it becomes trivially dead. diff --git a/test/Transforms/SROA/basictest.ll b/test/Transforms/SROA/basictest.ll index 03120f7a327..110950f76a1 100644 --- a/test/Transforms/SROA/basictest.ll +++ b/test/Transforms/SROA/basictest.ll @@ -577,9 +577,17 @@ entry: %ai = load i24* %aiptr ; CHCEK-NOT: store ; CHCEK-NOT: load -; CHECK: %[[mask0:.*]] = and i24 undef, -256 -; CHECK-NEXT: %[[mask1:.*]] = and i24 %[[mask0]], -65281 -; CHECK-NEXT: %[[mask2:.*]] = and i24 %[[mask1]], 65535 +; CHECK: %[[ext2:.*]] = zext i8 0 to i24 +; CHECK-NEXT: %[[shift2:.*]] = shl i24 %[[ext2]], 16 +; CHECK-NEXT: %[[mask2:.*]] = and i24 undef, 65535 +; CHECK-NEXT: %[[insert2:.*]] = or i24 %[[mask2]], %[[shift2]] +; CHECK-NEXT: %[[ext1:.*]] = zext i8 0 to i24 +; CHECK-NEXT: %[[shift1:.*]] = shl i24 %[[ext1]], 8 +; CHECK-NEXT: %[[mask1:.*]] = and i24 %[[insert2]], -65281 +; CHECK-NEXT: %[[insert1:.*]] = or i24 %[[mask1]], %[[shift1]] +; CHECK-NEXT: %[[ext0:.*]] = zext i8 0 to i24 +; CHECK-NEXT: %[[mask0:.*]] = and i24 %[[insert1]], -256 +; CHECK-NEXT: %[[insert0:.*]] = or i24 %[[mask0]], %[[ext0]] %biptr = bitcast [3 x i8]* %b to i24* store i24 %ai, i24* %biptr @@ -591,10 +599,10 @@ entry: %b2 = load i8* %b2ptr ; CHCEK-NOT: store ; CHCEK-NOT: load -; CHECK: %[[trunc0:.*]] = trunc i24 %[[mask2]] to i8 -; CHECK-NEXT: %[[shift1:.*]] = lshr i24 %[[mask2]], 8 +; CHECK: %[[trunc0:.*]] = trunc i24 %[[insert0]] to i8 +; CHECK-NEXT: %[[shift1:.*]] = lshr i24 %[[insert0]], 8 ; CHECK-NEXT: %[[trunc1:.*]] = trunc i24 %[[shift1]] to i8 -; CHECK-NEXT: %[[shift2:.*]] = lshr i24 %[[mask2]], 16 +; CHECK-NEXT: %[[shift2:.*]] = lshr i24 %[[insert0]], 16 ; CHECK-NEXT: %[[trunc2:.*]] = trunc i24 %[[shift2]] to i8 %bsum0 = add i8 %b0, %b1 @@ -1064,6 +1072,49 @@ entry: ret void } +define i64 @PR14059.2({ float, float }* %phi) { +; Check that SROA can split up alloca-wide integer loads and stores where the +; underlying alloca has smaller components that are accessed independently. This +; shows up particularly with ABI lowering patterns coming out of Clang that rely +; on the particular register placement of a single large integer return value. +; CHECK: @PR14059.2 + +entry: + %retval = alloca { float, float }, align 4 + ; CHECK-NOT: alloca + + %0 = bitcast { float, float }* %retval to i64* + store i64 0, i64* %0 + ; CHECK-NOT: store + + %phi.realp = getelementptr inbounds { float, float }* %phi, i32 0, i32 0 + %phi.real = load float* %phi.realp + %phi.imagp = getelementptr inbounds { float, float }* %phi, i32 0, i32 1 + %phi.imag = load float* %phi.imagp + ; CHECK: %[[realp:.*]] = getelementptr inbounds { float, float }* %phi, i32 0, i32 0 + ; CHECK-NEXT: %[[real:.*]] = load float* %[[realp]] + ; CHECK-NEXT: %[[imagp:.*]] = getelementptr inbounds { float, float }* %phi, i32 0, i32 1 + ; CHECK-NEXT: %[[imag:.*]] = load float* %[[imagp]] + + %real = getelementptr inbounds { float, float }* %retval, i32 0, i32 0 + %imag = getelementptr inbounds { float, float }* %retval, i32 0, i32 1 + store float %phi.real, float* %real + store float %phi.imag, float* %imag + ; CHECK-NEXT: %[[imag_convert:.*]] = bitcast float %[[imag]] to i32 + ; CHECK-NEXT: %[[imag_ext:.*]] = zext i32 %[[imag_convert]] to i64 + ; CHECK-NEXT: %[[imag_shift:.*]] = shl i64 %[[imag_ext]], 32 + ; CHECK-NEXT: %[[imag_mask:.*]] = and i64 undef, 4294967295 + ; CHECK-NEXT: %[[imag_insert:.*]] = or i64 %[[imag_mask]], %[[imag_shift]] + ; CHECK-NEXT: %[[real_convert:.*]] = bitcast float %[[real]] to i32 + ; CHECK-NEXT: %[[real_ext:.*]] = zext i32 %[[real_convert]] to i64 + ; CHECK-NEXT: %[[real_mask:.*]] = and i64 %[[imag_insert]], -4294967296 + ; CHECK-NEXT: %[[real_insert:.*]] = or i64 %[[real_mask]], %[[real_ext]] + + %1 = load i64* %0, align 1 + ret i64 %1 + ; CHECK-NEXT: ret i64 %[[real_insert]] +} + define void @PR14105({ [16 x i8] }* %ptr) { ; Ensure that when rewriting the GEP index '-1' for this alloca we preserve is ; sign as negative. We use a volatile memcpy to ensure promotion never actually diff --git a/test/Transforms/SROA/big-endian.ll b/test/Transforms/SROA/big-endian.ll index 532f8690cf9..ce82d1f30b5 100644 --- a/test/Transforms/SROA/big-endian.ll +++ b/test/Transforms/SROA/big-endian.ll @@ -26,9 +26,17 @@ entry: %ai = load i24* %aiptr ; CHCEK-NOT: store ; CHCEK-NOT: load -; CHECK: %[[mask0:.*]] = and i24 undef, 65535 -; CHECK-NEXT: %[[mask1:.*]] = and i24 %[[mask0]], -65281 -; CHECK-NEXT: %[[mask2:.*]] = and i24 %[[mask1]], -256 +; CHECK: %[[ext2:.*]] = zext i8 0 to i24 +; CHECK-NEXT: %[[mask2:.*]] = and i24 undef, -256 +; CHECK-NEXT: %[[insert2:.*]] = or i24 %[[mask2]], %[[ext2]] +; CHECK-NEXT: %[[ext1:.*]] = zext i8 0 to i24 +; CHECK-NEXT: %[[shift1:.*]] = shl i24 %[[ext1]], 8 +; CHECK-NEXT: %[[mask1:.*]] = and i24 %[[insert2]], -65281 +; CHECK-NEXT: %[[insert1:.*]] = or i24 %[[mask1]], %[[shift1]] +; CHECK-NEXT: %[[ext0:.*]] = zext i8 0 to i24 +; CHECK-NEXT: %[[shift0:.*]] = shl i24 %[[ext0]], 16 +; CHECK-NEXT: %[[mask0:.*]] = and i24 %[[insert1]], 65535 +; CHECK-NEXT: %[[insert0:.*]] = or i24 %[[mask0]], %[[shift0]] %biptr = bitcast [3 x i8]* %b to i24* store i24 %ai, i24* %biptr @@ -40,11 +48,11 @@ entry: %b2 = load i8* %b2ptr ; CHCEK-NOT: store ; CHCEK-NOT: load -; CHECK: %[[shift0:.*]] = lshr i24 %[[mask2]], 16 +; CHECK: %[[shift0:.*]] = lshr i24 %[[insert0]], 16 ; CHECK-NEXT: %[[trunc0:.*]] = trunc i24 %[[shift0]] to i8 -; CHECK-NEXT: %[[shift1:.*]] = lshr i24 %[[mask2]], 8 +; CHECK-NEXT: %[[shift1:.*]] = lshr i24 %[[insert0]], 8 ; CHECK-NEXT: %[[trunc1:.*]] = trunc i24 %[[shift1]] to i8 -; CHECK-NEXT: %[[trunc2:.*]] = trunc i24 %[[mask2]] to i8 +; CHECK-NEXT: %[[trunc2:.*]] = trunc i24 %[[insert0]] to i8 %bsum0 = add i8 %b0, %b1 %bsum1 = add i8 %bsum0, %b2 @@ -74,27 +82,26 @@ entry: %a0i16ptr = bitcast i8* %a0ptr to i16* store i16 1, i16* %a0i16ptr -; CHECK: %[[mask:.*]] = and i56 undef, 1099511627775 -; CHECK-NEXT: %[[or:.*]] = or i56 %[[mask]], 1099511627776 +; CHECK: %[[mask0:.*]] = and i16 1, -16 %a1i4ptr = bitcast i8* %a1ptr to i4* store i4 1, i4* %a1i4ptr -; CHECK: %[[mask:.*]] = and i56 %[[or]], -16492674416641 -; CHECK-NEXT: %[[or:.*]] = or i56 %[[mask]], 1099511627776 +; CHECK-NEXT: %[[insert0:.*]] = or i16 %[[mask0]], 1 store i8 1, i8* %a2ptr -; CHECK-NEXT: %[[mask:.*]] = and i56 %[[or]], -1095216660481 -; CHECK-NEXT: %[[or:.*]] = or i56 %[[mask]], 4294967296 +; CHECK-NEXT: %[[mask1:.*]] = and i40 undef, 4294967295 +; CHECK-NEXT: %[[insert1:.*]] = or i40 %[[mask1]], 4294967296 %a3i24ptr = bitcast i8* %a3ptr to i24* store i24 1, i24* %a3i24ptr -; CHECK-NEXT: %[[mask:.*]] = and i56 %[[or]], -4294967041 -; CHECK-NEXT: %[[or:.*]] = or i56 %[[mask]], 256 +; CHECK-NEXT: %[[mask2:.*]] = and i40 %[[insert1]], -4294967041 +; CHECK-NEXT: %[[insert2:.*]] = or i40 %[[mask2]], 256 %a2i40ptr = bitcast i8* %a2ptr to i40* store i40 1, i40* %a2i40ptr -; CHECK-NEXT: %[[mask:.*]] = and i56 %[[or]], -1099511627776 -; CHECK-NEXT: %[[or:.*]] = or i56 %[[mask]], 1 +; CHECK-NEXT: %[[ext3:.*]] = zext i40 1 to i56 +; CHECK-NEXT: %[[mask3:.*]] = and i56 undef, -1099511627776 +; CHECK-NEXT: %[[insert3:.*]] = or i56 %[[mask3]], %[[ext3]] ; CHCEK-NOT: store ; CHCEK-NOT: load @@ -103,6 +110,10 @@ entry: %ai = load i56* %aiptr %ret = zext i56 %ai to i64 ret i64 %ret -; CHECK: %[[ret:.*]] = zext i56 %[[or]] to i64 +; CHECK-NEXT: %[[ext4:.*]] = zext i16 %[[insert0]] to i56 +; CHECK-NEXT: %[[shift4:.*]] = shl i56 %[[ext4]], 40 +; CHECK-NEXT: %[[mask4:.*]] = and i56 %[[insert3]], 1099511627775 +; CHECK-NEXT: %[[insert4:.*]] = or i56 %[[mask4]], %[[shift4]] +; CHECK-NEXT: %[[ret:.*]] = zext i56 %[[insert4]] to i64 ; CHECK-NEXT: ret i64 %[[ret]] }