mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2025-01-14 16:33:28 +00:00
Fix typos in comments
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@215777 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
cc921d6f41
commit
c51ec911e5
@ -21,7 +21,7 @@ def int_arm_thread_pointer : GCCBuiltin<"__builtin_thread_pointer">,
|
||||
Intrinsic<[llvm_ptr_ty], [], [IntrNoMem]>;
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// Saturating Arithmentic
|
||||
// Saturating Arithmetic
|
||||
|
||||
def int_arm_qadd : GCCBuiltin<"__builtin_arm_qadd">,
|
||||
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
|
||||
|
@ -105,7 +105,7 @@ bool AtomicExpandLoadLinked::expandAtomicLoad(LoadInst *LI) {
|
||||
? Monotonic
|
||||
: LI->getOrdering();
|
||||
|
||||
// The only 64-bit load guaranteed to be single-copy atomic by the ARM ARM is
|
||||
// The only 64-bit load guaranteed to be single-copy atomic by the ARM is
|
||||
// an ldrexd (A3.5.3).
|
||||
IRBuilder<> Builder(LI);
|
||||
Value *Val = TM->getSubtargetImpl()->getTargetLowering()->emitLoadLinked(
|
||||
|
@ -19,7 +19,7 @@
|
||||
// a = add nsw i64 f, 3
|
||||
// e = getelementptr ..., i64 a
|
||||
//
|
||||
// This is legal to do so if the computations are markers with either nsw or nuw
|
||||
// This is legal to do if the computations are marked with either nsw or nuw
|
||||
// markers.
|
||||
// Moreover, the current heuristic is simple: it does not create new sext
|
||||
// operations, i.e., it gives up when a sext would have forked (e.g., if
|
||||
@ -223,7 +223,7 @@ AArch64AddressTypePromotion::shouldConsiderSExt(const Instruction *SExt) const {
|
||||
}
|
||||
|
||||
// Input:
|
||||
// - SExtInsts contains all the sext instructions that are use direclty in
|
||||
// - SExtInsts contains all the sext instructions that are used directly in
|
||||
// GetElementPtrInst, i.e., access to memory.
|
||||
// Algorithm:
|
||||
// - For each sext operation in SExtInsts:
|
||||
@ -353,7 +353,7 @@ AArch64AddressTypePromotion::propagateSignExtension(Instructions &SExtInsts) {
|
||||
|
||||
// If the use is already of the right type, connect its uses to its argument
|
||||
// and delete it.
|
||||
// This can happen for an Instruction which all uses are sign extended.
|
||||
// This can happen for an Instruction all uses of which are sign extended.
|
||||
if (!ToRemove.count(SExt) &&
|
||||
SExt->getType() == SExt->getOperand(0)->getType()) {
|
||||
DEBUG(dbgs() << "Sign extension is useless, attach its use to "
|
||||
|
@ -743,7 +743,7 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM)
|
||||
// On v8, we have particularly efficient implementations of atomic fences
|
||||
// if they can be combined with nearby atomic loads and stores.
|
||||
if (!Subtarget->hasV8Ops()) {
|
||||
// Automatically insert fences (dmb ist) around ATOMIC_SWAP etc.
|
||||
// Automatically insert fences (dmb ish) around ATOMIC_SWAP etc.
|
||||
setInsertFencesForAtomic(true);
|
||||
}
|
||||
} else {
|
||||
|
@ -439,7 +439,7 @@ public:
|
||||
// enableAtomicExpandLoadLinked - True if we need to expand our atomics.
|
||||
bool enableAtomicExpandLoadLinked() const override;
|
||||
|
||||
/// getInstrItins - Return the instruction itineraies based on subtarget
|
||||
/// getInstrItins - Return the instruction itineraries based on subtarget
|
||||
/// selection.
|
||||
const InstrItineraryData *getInstrItineraryData() const {
|
||||
return &InstrItins;
|
||||
|
@ -56,7 +56,7 @@ public:
|
||||
HexagonSubtarget(StringRef TT, StringRef CPU, StringRef FS,
|
||||
const TargetMachine &TM);
|
||||
|
||||
/// getInstrItins - Return the instruction itineraies based on subtarget
|
||||
/// getInstrItins - Return the instruction itineraries based on subtarget
|
||||
/// selection.
|
||||
const InstrItineraryData *getInstrItineraryData() const {
|
||||
return &InstrItins;
|
||||
|
Loading…
x
Reference in New Issue
Block a user