diff --git a/docs/CommandGuide/lit.rst b/docs/CommandGuide/lit.rst index 1f97bc31dd7..e820eef2faf 100644 --- a/docs/CommandGuide/lit.rst +++ b/docs/CommandGuide/lit.rst @@ -161,7 +161,7 @@ ADDITIONAL OPTIONS .. option:: --show-tests - List all of the the discovered tests and exit. + List all of the discovered tests and exit. EXIT STATUS ----------- diff --git a/docs/LibFuzzer.rst b/docs/LibFuzzer.rst index 47bdfd3a27d..1ac75a40698 100644 --- a/docs/LibFuzzer.rst +++ b/docs/LibFuzzer.rst @@ -112,7 +112,7 @@ Here we show how to use lib/Fuzzer on something real, yet simple: pcre2_:: (cd pcre; ./autogen.sh; CC="clang -fsanitize=address $COV_FLAGS" ./configure --prefix=`pwd`/../inst && make -j && make install) # Build lib/Fuzzer files. clang -c -g -O2 -std=c++11 Fuzzer/*.cpp -IFuzzer - # Build the the actual function that does something interesting with PCRE2. + # Build the actual function that does something interesting with PCRE2. cat << EOF > pcre_fuzzer.cc #include #include "pcre2posix.h" diff --git a/include/llvm/Analysis/BlockFrequencyInfoImpl.h b/include/llvm/Analysis/BlockFrequencyInfoImpl.h index 56b8f8ff6ad..32d96090f45 100644 --- a/include/llvm/Analysis/BlockFrequencyInfoImpl.h +++ b/include/llvm/Analysis/BlockFrequencyInfoImpl.h @@ -873,7 +873,7 @@ template class BlockFrequencyInfoImpl : BlockFrequencyInfoImplBase { /// /// \pre \a computeMassInLoop() has been called for each subloop of \c /// OuterLoop. - /// \pre \c Insert points at the the last loop successfully processed by \a + /// \pre \c Insert points at the last loop successfully processed by \a /// computeMassInLoop(). /// \pre \c OuterLoop has irreducible SCCs. void computeIrreducibleMass(LoopData *OuterLoop, diff --git a/include/llvm/Support/MemoryBuffer.h b/include/llvm/Support/MemoryBuffer.h index 35a7bdb004a..81616d8ba3a 100644 --- a/include/llvm/Support/MemoryBuffer.h +++ b/include/llvm/Support/MemoryBuffer.h @@ -124,7 +124,7 @@ public: static ErrorOr> getFileOrSTDIN(const Twine &Filename, int64_t FileSize = -1); - /// Map a subrange of the the specified file as a MemoryBuffer. + /// Map a subrange of the specified file as a MemoryBuffer. static ErrorOr> getFileSlice(const Twine &Filename, uint64_t MapSize, uint64_t Offset); diff --git a/include/llvm/Target/TargetLowering.h b/include/llvm/Target/TargetLowering.h index 616edd8c248..710e82d6b3d 100644 --- a/include/llvm/Target/TargetLowering.h +++ b/include/llvm/Target/TargetLowering.h @@ -2394,7 +2394,7 @@ public: /// outgoing token chain. It calls LowerCall to do the actual lowering. std::pair LowerCallTo(CallLoweringInfo &CLI) const; - /// This hook must be implemented to lower calls into the the specified + /// This hook must be implemented to lower calls into the specified /// DAG. The outgoing arguments to the call are described by the Outs array, /// and the values to be returned by the call are described by the Ins /// array. The implementation should fill in the InVals array with legal-type diff --git a/lib/Analysis/ScalarEvolutionExpander.cpp b/lib/Analysis/ScalarEvolutionExpander.cpp index f82235d0c26..8274374621c 100644 --- a/lib/Analysis/ScalarEvolutionExpander.cpp +++ b/lib/Analysis/ScalarEvolutionExpander.cpp @@ -1702,7 +1702,7 @@ unsigned SCEVExpander::replaceCongruentIVs(Loop *L, const DominatorTree *DT, unsigned NumElim = 0; DenseMap ExprToIVMap; - // Process phis from wide to narrow. Mapping wide phis to the their truncation + // Process phis from wide to narrow. Map wide phis to their truncation // so narrow phis can reuse them. for (SmallVectorImpl::const_iterator PIter = Phis.begin(), PEnd = Phis.end(); PIter != PEnd; ++PIter) { diff --git a/lib/CodeGen/AtomicExpandPass.cpp b/lib/CodeGen/AtomicExpandPass.cpp index fa17108b2a8..530ab46db03 100644 --- a/lib/CodeGen/AtomicExpandPass.cpp +++ b/lib/CodeGen/AtomicExpandPass.cpp @@ -464,7 +464,7 @@ bool AtomicExpand::expandAtomicCmpXchg(AtomicCmpXchgInst *CI) { Value *ShouldStore = Builder.CreateICmpEQ(Loaded, CI->getCompareOperand(), "should_store"); - // If the the cmpxchg doesn't actually need any ordering when it fails, we can + // If the cmpxchg doesn't actually need any ordering when it fails, we can // jump straight past that fence instruction (if it exists). Builder.CreateCondBr(ShouldStore, TryStoreBB, FailureBB); diff --git a/lib/CodeGen/ImplicitNullChecks.cpp b/lib/CodeGen/ImplicitNullChecks.cpp index 122e23d4a5c..d7644a6676c 100644 --- a/lib/CodeGen/ImplicitNullChecks.cpp +++ b/lib/CodeGen/ImplicitNullChecks.cpp @@ -61,10 +61,10 @@ class ImplicitNullChecks : public MachineFunctionPass { // The block the check resides in. MachineBasicBlock *CheckBlock; - // The block branched to if the the pointer is non-null. + // The block branched to if the pointer is non-null. MachineBasicBlock *NotNullSucc; - // The block branched to if the the pointer is null. + // The block branched to if the pointer is null. MachineBasicBlock *NullSucc; NullCheck() diff --git a/lib/CodeGen/MachineScheduler.cpp b/lib/CodeGen/MachineScheduler.cpp index 50a9c669ea2..a48e54caf3f 100644 --- a/lib/CodeGen/MachineScheduler.cpp +++ b/lib/CodeGen/MachineScheduler.cpp @@ -2150,7 +2150,7 @@ void GenericSchedulerBase::setPolicy(CandPolicy &Policy, bool IsPostRA, SchedBoundary &CurrZone, SchedBoundary *OtherZone) { - // Apply preemptive heuristics based on the the total latency and resources + // Apply preemptive heuristics based on the total latency and resources // inside and outside this zone. Potential stalls should be considered before // following this policy. diff --git a/lib/CodeGen/WinEHPrepare.cpp b/lib/CodeGen/WinEHPrepare.cpp index 7934a4d9da2..d04d93f11e4 100644 --- a/lib/CodeGen/WinEHPrepare.cpp +++ b/lib/CodeGen/WinEHPrepare.cpp @@ -2296,7 +2296,7 @@ void WinEHPrepare::findCleanupHandlers(LandingPadActions &Actions, // value for this block but the value is a nullptr. This means that // we have previously analyzed the block and determined that it did // not contain any cleanup code. Based on the earlier analysis, we - // know the the block must end in either an unconditional branch, a + // know the block must end in either an unconditional branch, a // resume or a conditional branch that is predicated on a comparison // with a selector. Either the resume or the selector dispatch // would terminate the search for cleanup code, so the unconditional diff --git a/lib/Support/Locale.cpp b/lib/Support/Locale.cpp index 35ddf7f11bf..d5cb72b5db3 100644 --- a/lib/Support/Locale.cpp +++ b/lib/Support/Locale.cpp @@ -15,7 +15,7 @@ int columnWidth(StringRef Text) { bool isPrint(int UCS) { #if LLVM_ON_WIN32 - // Restrict characters that we'll try to print to the the lower part of ASCII + // Restrict characters that we'll try to print to the lower part of ASCII // except for the control characters (0x20 - 0x7E). In general one can not // reliably output code points U+0080 and higher using narrow character C/C++ // output functions in Windows, because the meaning of the upper 128 codes is diff --git a/lib/Target/AArch64/AArch64ISelLowering.cpp b/lib/Target/AArch64/AArch64ISelLowering.cpp index 1fa266a87b9..0165ef9c49c 100644 --- a/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -1424,7 +1424,7 @@ static SDValue LowerXOR(SDValue Op, SelectionDAG &DAG) { ConstantSDNode *CFVal = dyn_cast(FVal); ConstantSDNode *CTVal = dyn_cast(TVal); - // The the values aren't constants, this isn't the pattern we're looking for. + // The values aren't constants, this isn't the pattern we're looking for. if (!CFVal || !CTVal) return Op; @@ -3420,7 +3420,7 @@ SDValue AArch64TargetLowering::LowerFCOPYSIGN(SDValue Op, EltVT = MVT::i64; VecVT = MVT::v2i64; - // We want to materialize a mask with the the high bit set, but the AdvSIMD + // We want to materialize a mask with the high bit set, but the AdvSIMD // immediate moves cannot materialize that in a single instruction for // 64-bit elements. Instead, materialize zero and then negate it. EltMask = 0; diff --git a/lib/Target/AArch64/Disassembler/AArch64ExternalSymbolizer.cpp b/lib/Target/AArch64/Disassembler/AArch64ExternalSymbolizer.cpp index eb05ed915dd..82bc949927c 100644 --- a/lib/Target/AArch64/Disassembler/AArch64ExternalSymbolizer.cpp +++ b/lib/Target/AArch64/Disassembler/AArch64ExternalSymbolizer.cpp @@ -52,7 +52,7 @@ getVariant(uint64_t LLVMDisassembler_VariantKind) { /// returns zero and isBranch is Success then a symbol look up for /// Address + Value is done and if a symbol is found an MCExpr is created with /// that, else an MCExpr with Address + Value is created. If GetOpInfo() -/// returns zero and isBranch is Fail then the the Opcode of the MCInst is +/// returns zero and isBranch is Fail then the Opcode of the MCInst is /// tested and for ADRP an other instructions that help to load of pointers /// a symbol look up is done to see it is returns a specific reference type /// to add to the comment stream. This function returns Success if it adds diff --git a/lib/Target/AMDGPU/AMDKernelCodeT.h b/lib/Target/AMDGPU/AMDKernelCodeT.h index 4d3041ff3db..eaffb854793 100644 --- a/lib/Target/AMDGPU/AMDKernelCodeT.h +++ b/lib/Target/AMDGPU/AMDKernelCodeT.h @@ -132,7 +132,7 @@ enum amd_code_property_mask_t { /// private memory do not exceed this size. For example, if the /// element size is 4 (32-bits or dword) and a 64-bit value must be /// loaded, the finalizer will generate two 32-bit loads. This - /// ensures that the interleaving will get the the work-item + /// ensures that the interleaving will get the work-item /// specific dword for both halves of the 64-bit value. If it just /// did a 64-bit load then it would get one dword which belonged to /// its own work-item, but the second dword would belong to the diff --git a/lib/Target/AMDGPU/SIInstrInfo.cpp b/lib/Target/AMDGPU/SIInstrInfo.cpp index 3394573b062..47bc17823b3 100644 --- a/lib/Target/AMDGPU/SIInstrInfo.cpp +++ b/lib/Target/AMDGPU/SIInstrInfo.cpp @@ -1806,7 +1806,7 @@ void SIInstrInfo::legalizeOperands(MachineInstr *MI) const { } MachineBasicBlock &MBB = *MI->getParent(); - // Extract the the ptr from the resource descriptor. + // Extract the ptr from the resource descriptor. // SRsrcPtrLo = srsrc:sub0 unsigned SRsrcPtrLo = buildExtractSubReg(MI, MRI, *SRsrc, diff --git a/lib/Target/ARM/AsmParser/ARMAsmParser.cpp b/lib/Target/ARM/AsmParser/ARMAsmParser.cpp index 8bcbb1159f8..35387d3e6cf 100644 --- a/lib/Target/ARM/AsmParser/ARMAsmParser.cpp +++ b/lib/Target/ARM/AsmParser/ARMAsmParser.cpp @@ -5841,7 +5841,7 @@ bool ARMAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name, // do and don't have a cc_out optional-def operand. With some spot-checks // of the operand list, we can figure out which variant we're trying to // parse and adjust accordingly before actually matching. We shouldn't ever - // try to remove a cc_out operand that was explicitly set on the the + // try to remove a cc_out operand that was explicitly set on the // mnemonic, of course (CarrySetting == true). Reason number #317 the // table driven matcher doesn't fit well with the ARM instruction set. if (!CarrySetting && shouldOmitCCOutOperand(Mnemonic, Operands)) diff --git a/lib/Target/ARM/MCTargetDesc/ARMMCCodeEmitter.cpp b/lib/Target/ARM/MCTargetDesc/ARMMCCodeEmitter.cpp index bef77f5c24e..b88578309f0 100644 --- a/lib/Target/ARM/MCTargetDesc/ARMMCCodeEmitter.cpp +++ b/lib/Target/ARM/MCTargetDesc/ARMMCCodeEmitter.cpp @@ -1065,7 +1065,7 @@ ARMMCCodeEmitter::getHiLo16ImmOpValue(const MCInst &MI, unsigned OpIdx, // it's just a plain immediate expression, previously those evaluated to // the lower 16 bits of the expression regardless of whether // we have a movt or a movw, but that led to misleadingly results. - // This is now disallowed in the the AsmParser in validateInstruction() + // This is disallowed in the AsmParser in validateInstruction() // so this should never happen. llvm_unreachable("expression without :upper16: or :lower16:"); } diff --git a/lib/Target/X86/MCTargetDesc/X86BaseInfo.h b/lib/Target/X86/MCTargetDesc/X86BaseInfo.h index 85b00068252..ef197f40c27 100644 --- a/lib/Target/X86/MCTargetDesc/X86BaseInfo.h +++ b/lib/Target/X86/MCTargetDesc/X86BaseInfo.h @@ -271,7 +271,7 @@ namespace X86II { /// register DI/EDI/ESI. RawFrmDst = 9, - /// RawFrmSrc - This form is for instructions that use the the source index + /// RawFrmSrc - This form is for instructions that use the source index /// register SI/ESI/ERI with a possible segment override, and also the /// destination index register DI/ESI/RDI. RawFrmDstSrc = 10, diff --git a/lib/Target/X86/X86FixupLEAs.cpp b/lib/Target/X86/X86FixupLEAs.cpp index b39c5aba30b..5eb4faeedff 100644 --- a/lib/Target/X86/X86FixupLEAs.cpp +++ b/lib/Target/X86/X86FixupLEAs.cpp @@ -44,7 +44,7 @@ class FixupLEAPass : public MachineFunctionPass { /// \brief Given a machine register, look for the instruction /// which writes it in the current basic block. If found, /// try to replace it with an equivalent LEA instruction. - /// If replacement succeeds, then also process the the newly created + /// If replacement succeeds, then also process the newly created /// instruction. void seekLEAFixup(MachineOperand &p, MachineBasicBlock::iterator &I, MachineFunction::iterator MFI); diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 685ca2c3161..1af4c31505e 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -5446,7 +5446,7 @@ static bool isHorizontalBinOp(const BuildVectorSDNode *N, unsigned Opcode, /// /// Otherwise, the first horizontal binop dag node takes as input the lower /// 128-bit of V0 and the lower 128-bit of V1, and the second horizontal binop -/// dag node takes the the upper 128-bit of V0 and the upper 128-bit of V1. +/// dag node takes the upper 128-bit of V0 and the upper 128-bit of V1. /// Example: /// HADD V0_LO, V1_LO /// HADD V0_HI, V1_HI diff --git a/lib/Transforms/ObjCARC/ObjCARCContract.cpp b/lib/Transforms/ObjCARC/ObjCARCContract.cpp index 528f40eaad1..baca76ba3f2 100644 --- a/lib/Transforms/ObjCARC/ObjCARCContract.cpp +++ b/lib/Transforms/ObjCARC/ObjCARCContract.cpp @@ -212,7 +212,7 @@ static StoreInst *findSafeStoreForStoreStrongContraction(LoadInst *Load, break; // Now we know that we have not seen either the store or the release. If I - // is the the release, mark that we saw the release and continue. + // is the release, mark that we saw the release and continue. Instruction *Inst = &*I; if (Inst == Release) { SawRelease = true; diff --git a/lib/Transforms/Scalar/JumpThreading.cpp b/lib/Transforms/Scalar/JumpThreading.cpp index 1ebc6a5b1aa..1130d228acb 100644 --- a/lib/Transforms/Scalar/JumpThreading.cpp +++ b/lib/Transforms/Scalar/JumpThreading.cpp @@ -759,7 +759,7 @@ bool JumpThreading::ProcessBlock(BasicBlock *BB) { if (CmpInst *CondCmp = dyn_cast(CondInst)) { // If we're branching on a conditional, LVI might be able to determine - // it's value at the the branch instruction. We only handle comparisons + // it's value at the branch instruction. We only handle comparisons // against a constant at this time. // TODO: This should be extended to handle switches as well. BranchInst *CondBr = dyn_cast(BB->getTerminator()); diff --git a/lib/Transforms/Scalar/SampleProfile.cpp b/lib/Transforms/Scalar/SampleProfile.cpp index 3480cd49912..c8dfa54a4aa 100644 --- a/lib/Transforms/Scalar/SampleProfile.cpp +++ b/lib/Transforms/Scalar/SampleProfile.cpp @@ -282,7 +282,7 @@ bool SampleProfileLoader::computeBlockWeights(Function &F) { /// \brief Find equivalence classes for the given block. /// /// This finds all the blocks that are guaranteed to execute the same -/// number of times as \p BB1. To do this, it traverses all the the +/// number of times as \p BB1. To do this, it traverses all the /// descendants of \p BB1 in the dominator or post-dominator tree. /// /// A block BB2 will be in the same equivalence class as \p BB1 if diff --git a/lib/Transforms/Utils/SimplifyCFG.cpp b/lib/Transforms/Utils/SimplifyCFG.cpp index 60ac271bceb..71aaa8808b0 100644 --- a/lib/Transforms/Utils/SimplifyCFG.cpp +++ b/lib/Transforms/Utils/SimplifyCFG.cpp @@ -4058,7 +4058,7 @@ static bool SwitchToLookupTable(SwitchInst *SI, IRBuilder<> &Builder, return false; // Figure out the corresponding result for each case value and phi node in the - // common destination, as well as the the min and max case values. + // common destination, as well as the min and max case values. assert(SI->case_begin() != SI->case_end()); SwitchInst::CaseIt CI = SI->case_begin(); ConstantInt *MinCaseVal = CI.getCaseValue(); diff --git a/test/CodeGen/AArch64/arm64-codegen-prepare-extload.ll b/test/CodeGen/AArch64/arm64-codegen-prepare-extload.ll index f0b8299a66e..c9f668f2c42 100644 --- a/test/CodeGen/AArch64/arm64-codegen-prepare-extload.ll +++ b/test/CodeGen/AArch64/arm64-codegen-prepare-extload.ll @@ -24,7 +24,7 @@ false: } ; Check that we manage to form a zextload is an operation with only one -; argument to explicitly extend is in the the way. +; argument to explicitly extend is in the way. ; OPTALL-LABEL: @promoteOneArg ; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8, i8* %p ; OPT-NEXT: [[ZEXT:%[a-zA-Z_0-9-]+]] = zext i8 [[LD]] to i32 @@ -49,7 +49,7 @@ false: } ; Check that we manage to form a sextload is an operation with only one -; argument to explicitly extend is in the the way. +; argument to explicitly extend is in the way. ; Version with sext. ; OPTALL-LABEL: @promoteOneArgSExt ; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8, i8* %p @@ -74,7 +74,7 @@ false: } ; Check that we manage to form a zextload is an operation with two -; arguments to explicitly extend is in the the way. +; arguments to explicitly extend is in the way. ; Extending %add will create two extensions: ; 1. One for %b. ; 2. One for %t. @@ -113,7 +113,7 @@ false: } ; Check that we manage to form a sextload is an operation with two -; arguments to explicitly extend is in the the way. +; arguments to explicitly extend is in the way. ; Version with sext. ; OPTALL-LABEL: @promoteTwoArgSExt ; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8, i8* %p diff --git a/test/CodeGen/AMDGPU/local-memory-two-objects.ll b/test/CodeGen/AMDGPU/local-memory-two-objects.ll index 06a8b1246e6..f501a7ac627 100644 --- a/test/CodeGen/AMDGPU/local-memory-two-objects.ll +++ b/test/CodeGen/AMDGPU/local-memory-two-objects.ll @@ -14,7 +14,7 @@ ; EG: {{^}}local_memory_two_objects: -; We would like to check the the lds writes are using different +; We would like to check the lds writes are using different ; addresses, but due to variations in the scheduler, we can't do ; this consistently on evergreen GPUs. ; EG: LDS_WRITE diff --git a/test/CodeGen/ARM/debug-frame-vararg.ll b/test/CodeGen/ARM/debug-frame-vararg.ll index 19e55fe0235..c1eff0a5bd6 100644 --- a/test/CodeGen/ARM/debug-frame-vararg.ll +++ b/test/CodeGen/ARM/debug-frame-vararg.ll @@ -4,7 +4,7 @@ ; RUN: llc -mtriple thumb-unknown-linux-gnueabi -filetype asm -o - %s -disable-fp-elim | FileCheck %s --check-prefix=CHECK-THUMB-FP-ELIM ; Tests that the initial space allocated to the varargs on the stack is -; taken into account in the the .cfi_ directives. +; taken into account in the .cfi_ directives. ; Generated from the C program: ; #include diff --git a/test/CodeGen/Mips/cconv/callee-saved.ll b/test/CodeGen/Mips/cconv/callee-saved.ll index d0b1e64cdee..0570ab35fd0 100644 --- a/test/CodeGen/Mips/cconv/callee-saved.ll +++ b/test/CodeGen/Mips/cconv/callee-saved.ll @@ -18,7 +18,7 @@ ; RUN: llc -march=mips64 -target-abi n64 < %s | FileCheck --check-prefix=ALL --check-prefix=N64-INV %s ; RUN: llc -march=mips64el -target-abi n64 < %s | FileCheck --check-prefix=ALL --check-prefix=N64-INV %s -; Test the the callee-saved registers are callee-saved as specified by section +; Test the callee-saved registers are callee-saved as specified by section ; 2 of the MIPSpro N32 Handbook and section 3 of the SYSV ABI spec. define void @gpr_clobber() nounwind { diff --git a/test/CodeGen/X86/codegen-prepare-extload.ll b/test/CodeGen/X86/codegen-prepare-extload.ll index 65502b312b0..c5c761ee63e 100644 --- a/test/CodeGen/X86/codegen-prepare-extload.ll +++ b/test/CodeGen/X86/codegen-prepare-extload.ll @@ -30,7 +30,7 @@ false: } ; Check that we manage to form a zextload is an operation with only one -; argument to explicitly extend is in the the way. +; argument to explicitly extend is in the way. ; OPTALL-LABEL: @promoteOneArg ; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8, i8* %p ; OPT-NEXT: [[ZEXT:%[a-zA-Z_0-9-]+]] = zext i8 [[LD]] to i32 @@ -55,7 +55,7 @@ false: } ; Check that we manage to form a sextload is an operation with only one -; argument to explicitly extend is in the the way. +; argument to explicitly extend is in the way. ; Version with sext. ; OPTALL-LABEL: @promoteOneArgSExt ; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8, i8* %p @@ -80,7 +80,7 @@ false: } ; Check that we manage to form a zextload is an operation with two -; arguments to explicitly extend is in the the way. +; arguments to explicitly extend is in the way. ; Extending %add will create two extensions: ; 1. One for %b. ; 2. One for %t. @@ -119,7 +119,7 @@ false: } ; Check that we manage to form a sextload is an operation with two -; arguments to explicitly extend is in the the way. +; arguments to explicitly extend is in the way. ; Version with sext. ; OPTALL-LABEL: @promoteTwoArgSExt ; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i8, i8* %p diff --git a/tools/llvm-objdump/MachODump.cpp b/tools/llvm-objdump/MachODump.cpp index bf7451eb86d..1730bf3859f 100644 --- a/tools/llvm-objdump/MachODump.cpp +++ b/tools/llvm-objdump/MachODump.cpp @@ -788,7 +788,7 @@ static void DumpLiteralPointerSection(MachOObjectFile *O, // Set the size of the literal pointer. uint32_t lp_size = O->is64Bit() ? 8 : 4; - // Collect the external relocation symbols for the the literal pointers. + // Collect the external relocation symbols for the literal pointers. std::vector> Relocs; for (const RelocationRef &Reloc : Section.relocations()) { DataRefImpl Rel; diff --git a/tools/llvm-size/llvm-size.cpp b/tools/llvm-size/llvm-size.cpp index c64c1d722d3..9a6e2c1ae4b 100644 --- a/tools/llvm-size/llvm-size.cpp +++ b/tools/llvm-size/llvm-size.cpp @@ -97,7 +97,7 @@ static size_t getNumLengthAsString(uint64_t num) { return result.size(); } -/// @brief Return the the printing format for the Radix. +/// @brief Return the printing format for the Radix. static const char *getRadixFmt(void) { switch (Radix) { case octal: diff --git a/unittests/Support/AllocatorTest.cpp b/unittests/Support/AllocatorTest.cpp index 38c7fcba8af..4b544641e9b 100644 --- a/unittests/Support/AllocatorTest.cpp +++ b/unittests/Support/AllocatorTest.cpp @@ -129,7 +129,7 @@ TEST(AllocatorTest, TestAlignmentPastSlab) { // Aligning the current slab pointer is likely to move it past the end of the // slab, which would confuse any unsigned comparisons with the difference of - // the the end pointer and the aligned pointer. + // the end pointer and the aligned pointer. Alloc.Allocate(1024, 8192); EXPECT_EQ(2U, Alloc.GetNumSlabs());