mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-12-25 14:32:53 +00:00
Fix a ton of comment typos found by codespell. Patch by
Luis Felipe Strano Moraes! git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@129558 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
bcb8c6d09e
commit
7a2bdde0a0
@ -154,7 +154,7 @@ else()
|
||||
option(LLVM_ENABLE_ASSERTIONS "Enable assertions" ON)
|
||||
endif()
|
||||
|
||||
# All options refered to from HandleLLVMOptions have to be specified
|
||||
# All options referred to from HandleLLVMOptions have to be specified
|
||||
# BEFORE this include, otherwise options will not be correctly set on
|
||||
# first cmake run
|
||||
include(config-ix)
|
||||
|
@ -2024,7 +2024,7 @@ $(DistZip) : $(TopDistDir)/.makedistdir
|
||||
$(Verb) cd $(PROJ_OBJ_ROOT) ; $(ZIP) -rq $(DistZip) $(DistName)
|
||||
|
||||
dist :: $(DistTarGZip) $(DistTarBZ2) $(DistZip)
|
||||
$(Echo) ===== DISTRIBUTION PACKAGING SUCESSFUL =====
|
||||
$(Echo) ===== DISTRIBUTION PACKAGING SUCCESSFUL =====
|
||||
|
||||
DistCheckDir := $(PROJ_OBJ_ROOT)/_distcheckdir
|
||||
|
||||
|
2
autoconf/m4/libtool.m4
vendored
2
autoconf/m4/libtool.m4
vendored
@ -1118,7 +1118,7 @@ if test -n "$_LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)" || \
|
||||
test -n "$_LT_AC_TAGVAR(runpath_var, $1)" || \
|
||||
test "X$_LT_AC_TAGVAR(hardcode_automatic, $1)" = "Xyes" ; then
|
||||
|
||||
# We can hardcode non-existant directories.
|
||||
# We can hardcode non-existent directories.
|
||||
if test "$_LT_AC_TAGVAR(hardcode_direct, $1)" != no &&
|
||||
# If the only mechanism to avoid hardcoding is shlibpath_var, we
|
||||
# have to relink, otherwise we might link with an installed library
|
||||
|
@ -156,7 +156,7 @@ AC_CACHE_CHECK([whether deplibs are loaded by dlopen],
|
||||
osf[[1234]]*)
|
||||
# dlopen did load deplibs (at least at 4.x), but until the 5.x series,
|
||||
# it did *not* use an RPATH in a shared library to find objects the
|
||||
# library depends on, so we explictly say `no'.
|
||||
# library depends on, so we explicitly say `no'.
|
||||
libltdl_cv_sys_dlopen_deplibs=no
|
||||
;;
|
||||
osf5.0|osf5.0a|osf5.1)
|
||||
|
@ -135,7 +135,7 @@ function(explicit_map_components_to_libraries out_libs)
|
||||
string(TOUPPER "${c}" capitalized)
|
||||
list(FIND capitalized_libs LLVM${capitalized} lib_idx)
|
||||
if( lib_idx LESS 0 )
|
||||
# The component is unkown. Maybe is an ommitted target?
|
||||
# The component is unknown. Maybe is an omitted target?
|
||||
is_llvm_target_library(${c} iltl_result)
|
||||
if( NOT iltl_result )
|
||||
message(FATAL_ERROR "Library `${c}' not found in list of llvm libraries.")
|
||||
|
2
configure
vendored
2
configure
vendored
@ -11723,7 +11723,7 @@ else
|
||||
osf[1234]*)
|
||||
# dlopen did load deplibs (at least at 4.x), but until the 5.x series,
|
||||
# it did *not* use an RPATH in a shared library to find objects the
|
||||
# library depends on, so we explictly say `no'.
|
||||
# library depends on, so we explicitly say `no'.
|
||||
libltdl_cv_sys_dlopen_deplibs=no
|
||||
;;
|
||||
osf5.0|osf5.0a|osf5.1)
|
||||
|
@ -268,7 +268,7 @@ The number of bytes consumed by instructions in the function.
|
||||
|
||||
=item B<Average Instruction Size>
|
||||
|
||||
The average number of bytes consumed by the instructions in the funtion. This
|
||||
The average number of bytes consumed by the instructions in the function. This
|
||||
value is computed by dividing Instruction Size by Instructions.
|
||||
|
||||
=item B<Bytes Per Instruction>
|
||||
|
@ -308,13 +308,13 @@ separate option groups syntactically.</p>
|
||||
<tt class="docutils literal"><span class="pre">-std=c99</span></tt>. It is also allowed to use spaces instead of the equality
|
||||
sign: <tt class="docutils literal"><span class="pre">-std</span> <span class="pre">c99</span></tt>. At most one occurrence is allowed.</li>
|
||||
<li><tt class="docutils literal"><span class="pre">parameter_list_option</span></tt> - same as the above, but more than one option
|
||||
occurence is allowed.</li>
|
||||
occurrence is allowed.</li>
|
||||
<li><tt class="docutils literal"><span class="pre">prefix_option</span></tt> - same as the parameter_option, but the option name and
|
||||
argument do not have to be separated. Example: <tt class="docutils literal"><span class="pre">-ofile</span></tt>. This can be also
|
||||
specified as <tt class="docutils literal"><span class="pre">-o</span> <span class="pre">file</span></tt>; however, <tt class="docutils literal"><span class="pre">-o=file</span></tt> will be parsed incorrectly
|
||||
(<tt class="docutils literal"><span class="pre">=file</span></tt> will be interpreted as option value). At most one occurrence is
|
||||
allowed.</li>
|
||||
<li><tt class="docutils literal"><span class="pre">prefix_list_option</span></tt> - same as the above, but more than one occurence of
|
||||
<li><tt class="docutils literal"><span class="pre">prefix_list_option</span></tt> - same as the above, but more than one occurrence of
|
||||
the option is allowed; example: <tt class="docutils literal"><span class="pre">-lm</span> <span class="pre">-lpthread</span></tt>.</li>
|
||||
<li><tt class="docutils literal"><span class="pre">alias_option</span></tt> - a special option type for creating aliases. Unlike other
|
||||
option types, aliases are not allowed to have any properties besides the
|
||||
|
@ -507,7 +507,7 @@
|
||||
style exception handling. The single parameter is a pointer to a
|
||||
buffer populated by <a href="#llvm_eh_sjlj_setjmp">
|
||||
<tt>llvm.eh.sjlj.setjmp</tt></a>. The frame pointer and stack pointer
|
||||
are restored from the buffer, then control is transfered to the
|
||||
are restored from the buffer, then control is transferred to the
|
||||
destination address.</p>
|
||||
|
||||
</div>
|
||||
|
@ -60,11 +60,11 @@ Understood. :)
|
||||
|
||||
Yup, I think that this makes a lot of sense. I am still intrigued,
|
||||
however, by the prospect of a minimally allocated VM representation... I
|
||||
think that it could have definate advantages for certain applications
|
||||
think that it could have definite advantages for certain applications
|
||||
(think very small machines, like PDAs). I don't, however, think that our
|
||||
initial implementations should focus on this. :)
|
||||
|
||||
Here are some other auxilliary goals that I think we should consider:
|
||||
Here are some other auxiliary goals that I think we should consider:
|
||||
|
||||
1. Primary goal: Support a high performance dynamic compilation
|
||||
system. This means that we have an "ideal" division of labor between
|
||||
|
@ -40,7 +40,7 @@ IDEAS TO CONSIDER
|
||||
packaged with the bytecodes themselves. As a conceptual implementation
|
||||
idea, we could include an immediate dominator number for each basic block
|
||||
in the LLVM bytecode program. Basic blocks could be numbered according
|
||||
to the order of occurance in the bytecode representation.
|
||||
to the order of occurrence in the bytecode representation.
|
||||
|
||||
2. Including loop header and body information. This would facilitate
|
||||
detection of intervals and natural loops.
|
||||
|
@ -39,7 +39,7 @@ declaration and calling syntax.
|
||||
|
||||
Very true. If you're implementing an object oriented language, however,
|
||||
remember that you have to do all the pointer to member function stuff
|
||||
yourself.... so everytime you invoke a virtual method one is involved
|
||||
yourself.... so every time you invoke a virtual method one is involved
|
||||
(instead of having C++ hide it for you behind "syntactic sugar").
|
||||
|
||||
> And the old array syntax:
|
||||
|
@ -18,7 +18,7 @@ suggested, as specified below:
|
||||
|
||||
Very true. We should discuss this more, but my reasoning is more of a
|
||||
consistency argument. There are VERY few instructions that can have all
|
||||
of the types eliminated, and doing so when available unnecesarily makes
|
||||
of the types eliminated, and doing so when available unnecessarily makes
|
||||
the language more difficult to handle. Especially when you see 'int
|
||||
%this' and 'bool %that' all over the place, I think it would be
|
||||
disorienting to see:
|
||||
@ -44,7 +44,7 @@ branches).
|
||||
|
||||
No. This was something I was debating for a while, and didn't really feel
|
||||
strongly about either way. It is common to switch on other types in HLL's
|
||||
(for example signed int's are particually common), but in this case, all
|
||||
(for example signed int's are particularly common), but in this case, all
|
||||
that will be added is an additional 'cast' instruction. I removed that
|
||||
from the spec.
|
||||
|
||||
@ -160,7 +160,7 @@ that can be trivally translated into a conditional move...
|
||||
> I agree that we need a static data space. Otherwise, emulating global
|
||||
> data gets unnecessarily complex.
|
||||
|
||||
Definately. Also a later item though. :)
|
||||
Definitely. Also a later item though. :)
|
||||
|
||||
> We once talked about adding a symbolic thread-id field to each
|
||||
> ..
|
||||
|
@ -42,7 +42,7 @@ Does using GCC's backend buy us anything?
|
||||
> optimization (step 16 in your list). Do you have a breakdown of that?
|
||||
|
||||
Not really. The irritating part of GCC is that it mixes it all up and
|
||||
doesn't have a clean seperation of concerns. A lot of the "back end
|
||||
doesn't have a clean separation of concerns. A lot of the "back end
|
||||
optimization" happens right along with other data optimizations (ie, CSE
|
||||
of machine specific things).
|
||||
|
||||
|
@ -17,7 +17,7 @@ iterator to an instruction, which, given just an Instruction*, requires a
|
||||
linear search of the basic block the instruction is contained in... just
|
||||
to insert an instruction before another instruction, or to delete an
|
||||
instruction! This complicates algorithms that should be very simple (like
|
||||
simple constant propogation), because they aren't actually sparse anymore,
|
||||
simple constant propagation), because they aren't actually sparse anymore,
|
||||
they have to traverse basic blocks to remove constant propogated
|
||||
instructions.
|
||||
|
||||
|
@ -2369,11 +2369,11 @@ b: unreachable
|
||||
<a href="#terminators">terminator instruction</a>
|
||||
if the terminator instruction has multiple successors and the instruction
|
||||
is always executed when control transfers to one of the successors, and
|
||||
may not be executed when control is transfered to another.</li>
|
||||
may not be executed when control is transferred to another.</li>
|
||||
|
||||
<li>Additionally, an instruction also <i>control-depends</i> on a terminator
|
||||
instruction if the set of instructions it otherwise depends on would be
|
||||
different if the terminator had transfered control to a different
|
||||
different if the terminator had transferred control to a different
|
||||
successor.</li>
|
||||
|
||||
<li>Dependence is transitive.</li>
|
||||
|
@ -1577,7 +1577,7 @@ void throwCppException (int32_t ignoreIt) {
|
||||
typedef void (*OurExceptionThrowFunctType) (int32_t typeToThrow);
|
||||
|
||||
/// This is a test harness which runs test by executing generated
|
||||
/// function with a type info type to throw. Harness wraps the excecution
|
||||
/// function with a type info type to throw. Harness wraps the execution
|
||||
/// of generated function in a C++ try catch clause.
|
||||
/// @param engine execution engine to use for executing generated function.
|
||||
/// This demo program expects this to be a JIT instance for demo
|
||||
|
@ -31,7 +31,7 @@ typedef void *LLVMDisasmContextRef;
|
||||
* the call back in the DisInfo parameter. The instruction containing operand
|
||||
* is at the PC parameter. For some instruction sets, there can be more than
|
||||
* one operand with symbolic information. To determine the symbolic operand
|
||||
* infomation for each operand, the bytes for the specific operand in the
|
||||
* information for each operand, the bytes for the specific operand in the
|
||||
* instruction are specified by the Offset parameter and its byte widith is the
|
||||
* size parameter. For instructions sets with fixed widths and one symbolic
|
||||
* operand per instruction, the Offset parameter will be zero and Size parameter
|
||||
@ -109,7 +109,7 @@ extern "C" {
|
||||
* Create a disassembler for the TripleName. Symbolic disassembly is supported
|
||||
* by passing a block of information in the DisInfo parameter and specifing the
|
||||
* TagType and call back functions as described above. These can all be passed
|
||||
* as NULL. If successfull this returns a disassembler context if not it
|
||||
* as NULL. If successful this returns a disassembler context if not it
|
||||
* returns NULL.
|
||||
*/
|
||||
extern LLVMDisasmContextRef
|
||||
@ -127,7 +127,7 @@ LLVMDisasmDispose(LLVMDisasmContextRef DC);
|
||||
|
||||
/**
|
||||
* Disassmble a single instruction using the disassembler context specified in
|
||||
* the parameter DC. The bytes of the instuction are specified in the parameter
|
||||
* the parameter DC. The bytes of the instruction are specified in the parameter
|
||||
* Bytes, and contains at least BytesSize number of bytes. The instruction is
|
||||
* at the address specified by the PC parameter. If a valid instruction can be
|
||||
* disassembled its string is returned indirectly in OutString which whos size
|
||||
|
@ -72,7 +72,7 @@ lto_get_version(void);
|
||||
|
||||
|
||||
/**
|
||||
* Returns the last error string or NULL if last operation was sucessful.
|
||||
* Returns the last error string or NULL if last operation was successful.
|
||||
*/
|
||||
extern const char*
|
||||
lto_get_error_message(void);
|
||||
@ -263,7 +263,7 @@ lto_codegen_write_merged_modules(lto_code_gen_t cg, const char* path);
|
||||
|
||||
/**
|
||||
* Generates code for all added modules into one native object file.
|
||||
* On sucess returns a pointer to a generated mach-o/ELF buffer and
|
||||
* On success returns a pointer to a generated mach-o/ELF buffer and
|
||||
* length set to the buffer size. The buffer is owned by the
|
||||
* lto_code_gen_t and will be freed when lto_codegen_dispose()
|
||||
* is called, or lto_codegen_compile() is called again.
|
||||
|
@ -153,7 +153,7 @@ void SplitString(StringRef Source,
|
||||
SmallVectorImpl<StringRef> &OutFragments,
|
||||
StringRef Delimiters = " \t\n\v\f\r");
|
||||
|
||||
/// HashString - Hash funtion for strings.
|
||||
/// HashString - Hash function for strings.
|
||||
///
|
||||
/// This is the Bernstein hash function.
|
||||
//
|
||||
|
@ -289,7 +289,7 @@ template<typename NodeTy> struct simplify_type<const ilist_iterator<NodeTy> > {
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
/// iplist - The subset of list functionality that can safely be used on nodes
|
||||
/// of polymorphic types, i.e. a heterogenous list with a common base class that
|
||||
/// of polymorphic types, i.e. a heterogeneous list with a common base class that
|
||||
/// holds the next/prev pointers. The only state of the list itself is a single
|
||||
/// pointer to the head of the list.
|
||||
///
|
||||
|
@ -43,7 +43,7 @@ namespace llvm {
|
||||
/// InlineCost - Represent the cost of inlining a function. This
|
||||
/// supports special values for functions which should "always" or
|
||||
/// "never" be inlined. Otherwise, the cost represents a unitless
|
||||
/// amount; smaller values increase the likelyhood of the function
|
||||
/// amount; smaller values increase the likelihood of the function
|
||||
/// being inlined.
|
||||
class InlineCost {
|
||||
enum Kind {
|
||||
|
@ -146,7 +146,7 @@ inline Region* RegionNode::getNodeAs<Region>() const {
|
||||
/// two connections to the remaining graph. It can be used to analyze or
|
||||
/// optimize parts of the control flow graph.
|
||||
///
|
||||
/// A <em> simple Region </em> is connected to the remaing graph by just two
|
||||
/// A <em> simple Region </em> is connected to the remaining graph by just two
|
||||
/// edges. One edge entering the Region and another one leaving the Region.
|
||||
///
|
||||
/// An <em> extended Region </em> (or just Region) is a subgraph that can be
|
||||
@ -443,7 +443,7 @@ public:
|
||||
|
||||
/// @brief Move all direct child nodes of this Region to another Region.
|
||||
///
|
||||
/// @param To The Region the child nodes will be transfered to.
|
||||
/// @param To The Region the child nodes will be transferred to.
|
||||
void transferChildrenTo(Region *To);
|
||||
|
||||
/// @brief Verify if the region is a correct region.
|
||||
|
@ -20,7 +20,7 @@
|
||||
|
||||
namespace llvm {
|
||||
//===----------------------------------------------------------------------===//
|
||||
/// @brief Hierachical RegionNode successor iterator.
|
||||
/// @brief Hierarchical RegionNode successor iterator.
|
||||
///
|
||||
/// This iterator iterates over all successors of a RegionNode.
|
||||
///
|
||||
|
@ -54,7 +54,7 @@ public:
|
||||
/// @brief Get a pass to print the LLVM IR in the region.
|
||||
///
|
||||
/// @param O The ouput stream to print the Region.
|
||||
/// @param Banner The banner to seperate different printed passes.
|
||||
/// @param Banner The banner to separate different printed passes.
|
||||
///
|
||||
/// @return The pass to print the LLVM IR in the region.
|
||||
Pass *createPrinterPass(raw_ostream &O, const std::string &Banner) const;
|
||||
|
@ -435,7 +435,7 @@ class Archive {
|
||||
/// to determine just enough information to create an ArchiveMember object
|
||||
/// which is then inserted into the Archive object's ilist at the location
|
||||
/// given by \p where.
|
||||
/// @returns true if an error occured, false otherwise
|
||||
/// @returns true if an error occurred, false otherwise
|
||||
/// @brief Add a file to the archive.
|
||||
bool addFileBefore(
|
||||
const sys::Path& filename, ///< The file to be added
|
||||
|
@ -219,7 +219,7 @@ namespace ISD {
|
||||
// RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
|
||||
// These nodes take two operands: the normal LHS and RHS to the add. They
|
||||
// produce two results: the normal result of the add, and a boolean that
|
||||
// indicates if an overflow occured (*not* a flag, because it may be stored
|
||||
// indicates if an overflow occurred (*not* a flag, because it may be stored
|
||||
// to memory, etc.). If the type of the boolean is not i1 then the high
|
||||
// bits conform to getBooleanContents.
|
||||
// These nodes are generated from the llvm.[su]add.with.overflow intrinsics.
|
||||
|
@ -36,7 +36,7 @@ class GlobalValue;
|
||||
class Function;
|
||||
|
||||
/// JITCodeEmitter - This class defines two sorts of methods: those for
|
||||
/// emitting the actual bytes of machine code, and those for emitting auxillary
|
||||
/// emitting the actual bytes of machine code, and those for emitting auxiliary
|
||||
/// structures, such as jump tables, relocations, etc.
|
||||
///
|
||||
/// Emission of machine code is complicated by the fact that we don't (in
|
||||
|
@ -34,7 +34,7 @@ class Function;
|
||||
class MCSymbol;
|
||||
|
||||
/// MachineCodeEmitter - This class defines two sorts of methods: those for
|
||||
/// emitting the actual bytes of machine code, and those for emitting auxillary
|
||||
/// emitting the actual bytes of machine code, and those for emitting auxiliary
|
||||
/// structures, such as jump tables, relocations, etc.
|
||||
///
|
||||
/// Emission of machine code is complicated by the fact that we don't (in
|
||||
@ -54,7 +54,7 @@ protected:
|
||||
/// allocated for this code buffer.
|
||||
uint8_t *BufferBegin, *BufferEnd;
|
||||
/// CurBufferPtr - Pointer to the next byte of memory to fill when emitting
|
||||
/// code. This is guranteed to be in the range [BufferBegin,BufferEnd]. If
|
||||
/// code. This is guaranteed to be in the range [BufferBegin,BufferEnd]. If
|
||||
/// this pointer is at BufferEnd, it will never move due to code emission, and
|
||||
/// all code emission requests will be ignored (this is the buffer overflow
|
||||
/// condition).
|
||||
|
@ -692,11 +692,11 @@ namespace llvm {
|
||||
/// will create a cycle.
|
||||
bool WillCreateCycle(SUnit *SU, SUnit *TargetSU);
|
||||
|
||||
/// AddPred - Updates the topological ordering to accomodate an edge
|
||||
/// AddPred - Updates the topological ordering to accommodate an edge
|
||||
/// to be added from SUnit X to SUnit Y.
|
||||
void AddPred(SUnit *Y, SUnit *X);
|
||||
|
||||
/// RemovePred - Updates the topological ordering to accomodate an
|
||||
/// RemovePred - Updates the topological ordering to accommodate an
|
||||
/// an edge to be removed from the specified node N from the predecessors
|
||||
/// of the current node M.
|
||||
void RemovePred(SUnit *M, SUnit *N);
|
||||
|
@ -829,7 +829,7 @@ public:
|
||||
/// These functions only replace all existing uses. It's possible that as
|
||||
/// these replacements are being performed, CSE may cause the From node
|
||||
/// to be given new uses. These new uses of From are left in place, and
|
||||
/// not automatically transfered to To.
|
||||
/// not automatically transferred to To.
|
||||
///
|
||||
void ReplaceAllUsesWith(SDValue From, SDValue Op,
|
||||
DAGUpdateListener *UpdateListener = 0);
|
||||
|
@ -258,7 +258,7 @@ public:
|
||||
}
|
||||
|
||||
virtual SDValue RunSDNodeXForm(SDValue V, unsigned XFormNo) {
|
||||
assert(0 && "Tblgen shoudl generate this!");
|
||||
assert(0 && "Tblgen should generate this!");
|
||||
return SDValue();
|
||||
}
|
||||
|
||||
|
@ -838,7 +838,7 @@ public:
|
||||
|
||||
|
||||
/// HandleSDNode - This class is used to form a handle around another node that
|
||||
/// is persistant and is updated across invocations of replaceAllUsesWith on its
|
||||
/// is persistent and is updated across invocations of replaceAllUsesWith on its
|
||||
/// operand. This node should be directly created by end-users and not added to
|
||||
/// the AllNodes list.
|
||||
class HandleSDNode : public SDNode {
|
||||
|
@ -94,7 +94,7 @@ class TargetLoweringObjectFileMachO : public TargetLoweringObjectFile {
|
||||
///
|
||||
const MCSection *TLSBSSSection; // Defaults to ".tbss".
|
||||
|
||||
/// TLSTLVSection - Section for thread local structure infomation.
|
||||
/// TLSTLVSection - Section for thread local structure information.
|
||||
/// Contains the source code name of the variable, visibility and a pointer
|
||||
/// to the initial value (.tdata or .tbss).
|
||||
const MCSection *TLSTLVSection; // Defaults to ".tlv".
|
||||
|
@ -185,7 +185,7 @@ public:
|
||||
/// \param GVsWithCode - Allocating globals with code breaks
|
||||
/// freeMachineCodeForFunction and is probably unsafe and bad for performance.
|
||||
/// However, we have clients who depend on this behavior, so we must support
|
||||
/// it. Eventually, when we're willing to break some backwards compatability,
|
||||
/// it. Eventually, when we're willing to break some backwards compatibility,
|
||||
/// this flag should be flipped to false, so that by default
|
||||
/// freeMachineCodeForFunction works.
|
||||
static ExecutionEngine *create(Module *M,
|
||||
|
@ -12,7 +12,7 @@
|
||||
//
|
||||
// Global variables are constant pointers that refer to hunks of space that are
|
||||
// allocated by either the VM, or by the linker in a static compiler. A global
|
||||
// variable may have an intial value, which is copied into the executables .data
|
||||
// variable may have an initial value, which is copied into the executables .data
|
||||
// area. Global Constants are required to have initializers.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
@ -584,7 +584,7 @@ DEFINE_TRANSPARENT_OPERAND_ACCESSORS(GetElementPtrInst, Value)
|
||||
/// @brief Represent an integer comparison operator.
|
||||
class ICmpInst: public CmpInst {
|
||||
protected:
|
||||
/// @brief Clone an indentical ICmpInst
|
||||
/// @brief Clone an identical ICmpInst
|
||||
virtual ICmpInst *clone_impl() const;
|
||||
public:
|
||||
/// @brief Constructor with insert-before-instruction semantics.
|
||||
@ -735,7 +735,7 @@ public:
|
||||
/// @brief Represents a floating point comparison operator.
|
||||
class FCmpInst: public CmpInst {
|
||||
protected:
|
||||
/// @brief Clone an indentical FCmpInst
|
||||
/// @brief Clone an identical FCmpInst
|
||||
virtual FCmpInst *clone_impl() const;
|
||||
public:
|
||||
/// @brief Constructor with insert-before-instruction semantics.
|
||||
|
@ -36,8 +36,8 @@ private:
|
||||
/// List of sections in layout order.
|
||||
llvm::SmallVector<MCSectionData*, 16> SectionOrder;
|
||||
|
||||
/// The last fragment which was layed out, or 0 if nothing has been layed
|
||||
/// out. Fragments are always layed out in order, so all fragments with a
|
||||
/// The last fragment which was laid out, or 0 if nothing has been laid
|
||||
/// out. Fragments are always laid out in order, so all fragments with a
|
||||
/// lower ordinal will be up to date.
|
||||
mutable DenseMap<const MCSectionData*, MCFragment *> LastValidFragment;
|
||||
|
||||
@ -58,7 +58,7 @@ public:
|
||||
void Invalidate(MCFragment *F);
|
||||
|
||||
/// \brief Perform layout for a single fragment, assuming that the previous
|
||||
/// fragment has already been layed out correctly, and the parent section has
|
||||
/// fragment has already been laid out correctly, and the parent section has
|
||||
/// been initialized.
|
||||
void LayoutFragment(MCFragment *Fragment);
|
||||
|
||||
|
@ -706,7 +706,7 @@ private:
|
||||
/// \param DF The fragment the fixup is inside.
|
||||
/// \param Target [out] On return, the relocatable expression the fixup
|
||||
/// evaluates to.
|
||||
/// \param Value [out] On return, the value of the fixup as currently layed
|
||||
/// \param Value [out] On return, the value of the fixup as currently laid
|
||||
/// out.
|
||||
/// \return Whether the fixup value was fully resolved. This is true if the
|
||||
/// \arg Value result is fixed, otherwise the value may change due to
|
||||
@ -745,7 +745,7 @@ private:
|
||||
MCFragment &F, const MCFixup &Fixup);
|
||||
|
||||
public:
|
||||
/// Compute the effective fragment size assuming it is layed out at the given
|
||||
/// Compute the effective fragment size assuming it is laid out at the given
|
||||
/// \arg SectionAddress and \arg FragmentOffset.
|
||||
uint64_t ComputeFragmentSize(const MCAsmLayout &Layout, const MCFragment &F) const;
|
||||
|
||||
|
@ -13,7 +13,7 @@
|
||||
// Passes are designed this way so that it is possible to run passes in a cache
|
||||
// and organizationally optimal order without having to specify it at the front
|
||||
// end. This allows arbitrary passes to be strung together and have them
|
||||
// executed as effeciently as possible.
|
||||
// executed as efficiently as possible.
|
||||
//
|
||||
// Passes should extend one of the classes below, depending on the guarantees
|
||||
// that it can make about what will be modified as it is run. For example, most
|
||||
|
@ -272,7 +272,7 @@ public:
|
||||
const void *DestNodeID, int DestNodePort,
|
||||
const std::string &Attrs) {
|
||||
if (SrcNodePort > 64) return; // Eminating from truncated part?
|
||||
if (DestNodePort > 64) DestNodePort = 64; // Targetting the truncated part?
|
||||
if (DestNodePort > 64) DestNodePort = 64; // Targeting the truncated part?
|
||||
|
||||
O << "\tNode" << SrcNodeID;
|
||||
if (SrcNodePort >= 0)
|
||||
|
@ -20,7 +20,7 @@ namespace llvm {
|
||||
class raw_ostream;
|
||||
|
||||
/// DisablePrettyStackTrace - Set this to true to disable this module. This
|
||||
/// might be neccessary if the host application installs its own signal
|
||||
/// might be necessary if the host application installs its own signal
|
||||
/// handlers which conflict with the ones installed by this module.
|
||||
/// Defaults to false.
|
||||
extern bool DisablePrettyStackTrace;
|
||||
|
@ -102,7 +102,7 @@ namespace sys {
|
||||
);
|
||||
|
||||
/// This function terminates the program.
|
||||
/// @returns true if an error occured.
|
||||
/// @returns true if an error occurred.
|
||||
/// @see Execute
|
||||
/// @brief Terminates the program.
|
||||
bool Kill
|
||||
|
@ -53,7 +53,7 @@ namespace llvm {
|
||||
|
||||
/// matches - Match the regex against a given \arg String.
|
||||
///
|
||||
/// \param Matches - If given, on a succesful match this will be filled in
|
||||
/// \param Matches - If given, on a successful match this will be filled in
|
||||
/// with references to the matched group expressions (inside \arg String),
|
||||
/// the first group is always the entire pattern.
|
||||
///
|
||||
|
@ -8,7 +8,7 @@
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file defines some helpful functions for dealing with the possibility of
|
||||
// unix signals occuring while your program is running.
|
||||
// unix signals occurring while your program is running.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
|
@ -35,13 +35,13 @@ namespace sys {
|
||||
public:
|
||||
|
||||
/// A constant TimeValue representing the smallest time
|
||||
/// value permissable by the class. MinTime is some point
|
||||
/// value permissible by the class. MinTime is some point
|
||||
/// in the distant past, about 300 billion years BCE.
|
||||
/// @brief The smallest possible time value.
|
||||
static const TimeValue MinTime;
|
||||
|
||||
/// A constant TimeValue representing the largest time
|
||||
/// value permissable by the class. MaxTime is some point
|
||||
/// value permissible by the class. MaxTime is some point
|
||||
/// in the distant future, about 300 billion years AD.
|
||||
/// @brief The largest possible time value.
|
||||
static const TimeValue MaxTime;
|
||||
|
@ -477,7 +477,7 @@ public:
|
||||
}
|
||||
|
||||
/// shouldScheduleLoadsNear - This is a used by the pre-regalloc scheduler to
|
||||
/// determine (in conjuction with areLoadsFromSameBasePtr) if two loads should
|
||||
/// determine (in conjunction with areLoadsFromSameBasePtr) if two loads should
|
||||
/// be scheduled togther. On some targets if two loads are loading from
|
||||
/// addresses in the same cache line, it's better if they are scheduled
|
||||
/// together. This function takes two integers that represent the load offsets
|
||||
|
@ -1042,7 +1042,7 @@ protected:
|
||||
}
|
||||
|
||||
/// JumpIsExpensive - Tells the code generator not to expand sequence of
|
||||
/// operations into a seperate sequences that increases the amount of
|
||||
/// operations into a separate sequences that increases the amount of
|
||||
/// flow control.
|
||||
void setJumpIsExpensive(bool isExpensive = true) {
|
||||
JumpIsExpensive = isExpensive;
|
||||
|
@ -207,7 +207,7 @@ public:
|
||||
///
|
||||
/// Note that this only does one level of inlining. For example, if the
|
||||
/// instruction 'call B' is inlined, and 'B' calls 'C', then the call to 'C' now
|
||||
/// exists in the instruction stream. Similiarly this will inline a recursive
|
||||
/// exists in the instruction stream. Similarly this will inline a recursive
|
||||
/// function by one level.
|
||||
///
|
||||
bool InlineFunction(CallInst *C, InlineFunctionInfo &IFI);
|
||||
|
@ -133,7 +133,7 @@ private:
|
||||
/// is refined.
|
||||
virtual void refineAbstractType(const DerivedType *OldTy, const Type *NewTy);
|
||||
|
||||
/// This function markes a type as being concrete (defined).
|
||||
/// This function marks a type as being concrete (defined).
|
||||
virtual void typeBecameConcrete(const DerivedType *AbsTy);
|
||||
|
||||
/// @}
|
||||
|
@ -350,7 +350,7 @@ DecomposeGEPExpression(const Value *V, int64_t &BaseOffs,
|
||||
Scale *= IndexScale.getSExtValue();
|
||||
|
||||
|
||||
// If we already had an occurrance of this index variable, merge this
|
||||
// If we already had an occurrence of this index variable, merge this
|
||||
// scale into it. For example, we want to handle:
|
||||
// A[x][x] -> x*16 + x*4 -> x*20
|
||||
// This also ensures that 'x' only appears in the index list once.
|
||||
@ -883,7 +883,7 @@ BasicAliasAnalysis::aliasGEP(const GEPOperator *GEP1, uint64_t V1Size,
|
||||
if (GEP1BaseOffset == 0 && GEP1VariableIndices.empty())
|
||||
return MustAlias;
|
||||
|
||||
// If there is a difference betwen the pointers, but the difference is
|
||||
// If there is a difference between the pointers, but the difference is
|
||||
// less than the size of the associated memory object, then we know
|
||||
// that the objects are partially overlapping.
|
||||
if (GEP1BaseOffset != 0 && GEP1VariableIndices.empty()) {
|
||||
|
@ -602,7 +602,7 @@ void GlobalsModRef::addEscapingUse(Use &U) {
|
||||
// For the purposes of this analysis, it is conservatively correct to treat
|
||||
// a newly escaping value equivalently to a deleted one. We could perhaps
|
||||
// be more precise by processing the new use and attempting to update our
|
||||
// saved analysis results to accomodate it.
|
||||
// saved analysis results to accommodate it.
|
||||
deleteValue(U);
|
||||
|
||||
AliasAnalysis::addEscapingUse(U);
|
||||
|
@ -501,7 +501,7 @@ InlineCost InlineCostAnalyzer::getInlineCost(CallSite CS,
|
||||
return InlineCost::getAlways();
|
||||
|
||||
if (CalleeFI->Metrics.usesDynamicAlloca) {
|
||||
// Get infomation about the caller.
|
||||
// Get information about the caller.
|
||||
FunctionInfo &CallerFI = CachedFunctionInfo[Caller];
|
||||
|
||||
// If we haven't calculated this information yet, do so now.
|
||||
@ -549,7 +549,7 @@ InlineCost InlineCostAnalyzer::getSpecializationCost(Function *Callee,
|
||||
|
||||
int Cost = 0;
|
||||
|
||||
// Look at the orginal size of the callee. Each instruction counts as 5.
|
||||
// Look at the original size of the callee. Each instruction counts as 5.
|
||||
Cost += CalleeFI->Metrics.NumInsts * InlineConstants::InstrCost;
|
||||
|
||||
// Offset that with the amount of code that can be constant-folded
|
||||
|
@ -286,7 +286,7 @@ void BallLarusDag::calculatePathNumbers() {
|
||||
BallLarusEdge* exitEdge = addEdge(node, getExit(), 0);
|
||||
exitEdge->setType(BallLarusEdge::SPLITEDGE_PHONY);
|
||||
|
||||
// Counters to handle the possibilty of a multi-graph
|
||||
// Counters to handle the possibility of a multi-graph
|
||||
BasicBlock* oldTarget = 0;
|
||||
unsigned duplicateNumber = 0;
|
||||
|
||||
|
@ -124,7 +124,7 @@ bool PathProfileVerifier::runOnModule (Module &M) {
|
||||
ProfilePathEdgeVector* pev = currentPath->getPathEdges();
|
||||
DEBUG(dbgs () << "path #" << currentPath->getNumber() << ": "
|
||||
<< currentPath->getCount() << "\n");
|
||||
// setup the entry edge (normally path profiling doens't care about this)
|
||||
// setup the entry edge (normally path profiling doesn't care about this)
|
||||
if (currentPath->getFirstBlockInPath() == &F->getEntryBlock())
|
||||
edgeArray[arrayMap[0][currentPath->getFirstBlockInPath()][0]]
|
||||
+= currentPath->getCount();
|
||||
|
@ -140,7 +140,7 @@ void ProfileEstimatorPass::recurseBasicBlock(BasicBlock *BB) {
|
||||
// loop, thus the edge is a backedge, continue and do not check if the
|
||||
// value is valid.
|
||||
if (BBisHeader && BBLoop->contains(*bbi)) {
|
||||
printEdgeError(edge, "but is backedge, continueing");
|
||||
printEdgeError(edge, "but is backedge, continuing");
|
||||
continue;
|
||||
}
|
||||
// If the edges value is missing (and this is no loop header, and this is
|
||||
|
@ -309,9 +309,9 @@ void ProfileInfoT<Function,BasicBlock>::
|
||||
removeEdge(oldedge);
|
||||
}
|
||||
|
||||
/// Replaces all occurences of RmBB in the ProfilingInfo with DestBB.
|
||||
/// Replaces all occurrences of RmBB in the ProfilingInfo with DestBB.
|
||||
/// This checks all edges of the function the blocks reside in and replaces the
|
||||
/// occurences of RmBB with DestBB.
|
||||
/// occurrences of RmBB with DestBB.
|
||||
template<>
|
||||
void ProfileInfoT<Function,BasicBlock>::
|
||||
replaceAllUses(const BasicBlock *RmBB, const BasicBlock *DestBB) {
|
||||
@ -812,7 +812,7 @@ void ProfileInfoT<Function,BasicBlock>::repair(const Function *F) {
|
||||
}
|
||||
if (iw < 0) continue;
|
||||
|
||||
// Check the recieving end of the path if it can handle the flow.
|
||||
// Check the receiving end of the path if it can handle the flow.
|
||||
double ow = getExecutionCount(Dest);
|
||||
Processed.clear();
|
||||
for (succ_const_iterator NBB = succ_begin(BB), End = succ_end(BB);
|
||||
|
@ -1882,7 +1882,7 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
|
||||
// outer mul and the inner addrec are guaranteed to have no overflow.
|
||||
//
|
||||
// No self-wrap cannot be guaranteed after changing the step size, but
|
||||
// will be infered if either NUW or NSW is true.
|
||||
// will be inferred if either NUW or NSW is true.
|
||||
Flags = AddRec->getNoWrapFlags(clearFlags(Flags, SCEV::FlagNW));
|
||||
const SCEV *NewRec = getAddRecExpr(NewOps, AddRecLoop, Flags);
|
||||
|
||||
|
@ -31,7 +31,7 @@
|
||||
//
|
||||
// The second field identifies the type's parent node in the tree, or
|
||||
// is null or omitted for a root node. A type is considered to alias
|
||||
// all of its decendents and all of its ancestors in the tree. Also,
|
||||
// all of its descendants and all of its ancestors in the tree. Also,
|
||||
// a type is considered to alias all types in other trees, so that
|
||||
// bitcode produced from multiple front-ends is handled conservatively.
|
||||
//
|
||||
|
@ -1328,7 +1328,7 @@ static Value *BuildSubAggregate(Value *From, Value* To, const Type *IndexedType,
|
||||
break;
|
||||
}
|
||||
}
|
||||
// If we succesfully found a value for each of our subaggregates
|
||||
// If we successfully found a value for each of our subaggregates
|
||||
if (To)
|
||||
return To;
|
||||
}
|
||||
@ -1757,7 +1757,7 @@ llvm::GetUnderlyingObject(Value *V, const TargetData *TD, unsigned MaxLookup) {
|
||||
} else {
|
||||
// See if InstructionSimplify knows any relevant tricks.
|
||||
if (Instruction *I = dyn_cast<Instruction>(V))
|
||||
// TODO: Aquire a DominatorTree and use it.
|
||||
// TODO: Acquire a DominatorTree and use it.
|
||||
if (Value *Simplified = SimplifyInstruction(I, TD, 0)) {
|
||||
V = Simplified;
|
||||
continue;
|
||||
|
@ -363,7 +363,7 @@ void ValueEnumerator::EnumerateValue(const Value *V) {
|
||||
// Initializers for globals are handled explicitly elsewhere.
|
||||
} else if (isa<ConstantArray>(C) && cast<ConstantArray>(C)->isString()) {
|
||||
// Do not enumerate the initializers for an array of simple characters.
|
||||
// The initializers just polute the value table, and we emit the strings
|
||||
// The initializers just pollute the value table, and we emit the strings
|
||||
// specially.
|
||||
} else if (C->getNumOperands()) {
|
||||
// If a constant has operands, enumerate them. This makes sure that if a
|
||||
|
@ -357,7 +357,7 @@ void AggressiveAntiDepBreaker::PrescanInstruction(MachineInstr *MI,
|
||||
RegRefs = State->GetRegRefs();
|
||||
|
||||
// Handle dead defs by simulating a last-use of the register just
|
||||
// after the def. A dead def can occur because the def is truely
|
||||
// after the def. A dead def can occur because the def is truly
|
||||
// dead, or because only a subregister is live at the def. If we
|
||||
// don't do this the dead def will be incorrectly merged into the
|
||||
// previous def.
|
||||
|
@ -53,7 +53,7 @@ static cl::opt<bool> DisableDebugInfoPrinting("disable-debug-info-print",
|
||||
cl::desc("Disable debug info printing"));
|
||||
|
||||
static cl::opt<bool> UnknownLocations("use-unknown-locations", cl::Hidden,
|
||||
cl::desc("Make an absense of debug location information explicit."),
|
||||
cl::desc("Make an absence of debug location information explicit."),
|
||||
cl::init(false));
|
||||
|
||||
#ifndef NDEBUG
|
||||
@ -1422,7 +1422,7 @@ DwarfDebug::collectVariableInfo(const MachineFunction *MF,
|
||||
|
||||
if (HI + 1 == HE)
|
||||
// If Begin is the last instruction in History then its value is valid
|
||||
// until the end of the funtion.
|
||||
// until the end of the function.
|
||||
SLabel = FunctionEndSym;
|
||||
else {
|
||||
const MachineInstr *End = HI[1];
|
||||
|
@ -254,7 +254,7 @@ bool CodePlacementOpt::MoveDiscontiguousLoopBlocks(MachineFunction &MF,
|
||||
|
||||
// Determine a position to move orphaned loop blocks to. If TopMBB is not
|
||||
// entered via fallthrough and BotMBB is exited via fallthrough, prepend them
|
||||
// to the top of the loop to avoid loosing that fallthrough. Otherwise append
|
||||
// to the top of the loop to avoid losing that fallthrough. Otherwise append
|
||||
// them to the bottom, even if it previously had a fallthrough, on the theory
|
||||
// that it's worth an extra branch to keep the loop contiguous.
|
||||
MachineFunction::iterator InsertPt =
|
||||
|
@ -173,7 +173,7 @@ namespace llvm {
|
||||
unsigned Offset; // sh_offset - Offset from the file start
|
||||
unsigned Size; // sh_size - The section size.
|
||||
unsigned Link; // sh_link - Section header table index link.
|
||||
unsigned Info; // sh_info - Auxillary information.
|
||||
unsigned Info; // sh_info - Auxiliary information.
|
||||
unsigned Align; // sh_addralign - Alignment of section.
|
||||
unsigned EntSize; // sh_entsize - Size of entries in the section e
|
||||
|
||||
|
@ -77,7 +77,7 @@ ELFWriter::ELFWriter(raw_ostream &o, TargetMachine &tm)
|
||||
// Create the object code emitter object for this target.
|
||||
ElfCE = new ELFCodeEmitter(*this);
|
||||
|
||||
// Inital number of sections
|
||||
// Initial number of sections
|
||||
NumSections = 0;
|
||||
}
|
||||
|
||||
|
@ -7,7 +7,7 @@
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// Expand Psuedo-instructions produced by ISel. These are usually to allow
|
||||
// Expand Pseudo-instructions produced by ISel. These are usually to allow
|
||||
// the expansion to contain control flow, such as a conditional move
|
||||
// implemented with a conditional branch and a phi, or an atomic operation
|
||||
// implemented with a loop.
|
||||
|
@ -290,7 +290,7 @@ public:
|
||||
/// mapVirtReg - Map virtual register to an equivalence class.
|
||||
void mapVirtReg(unsigned VirtReg, UserValue *EC);
|
||||
|
||||
/// renameRegister - Replace all references to OldReg wiht NewReg:SubIdx.
|
||||
/// renameRegister - Replace all references to OldReg with NewReg:SubIdx.
|
||||
void renameRegister(unsigned OldReg, unsigned NewReg, unsigned SubIdx);
|
||||
|
||||
/// emitDebugVariables - Recreate DBG_VALUE instruction from data structures.
|
||||
|
@ -1507,7 +1507,7 @@ rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
|
||||
// ...
|
||||
// def = ...
|
||||
// = use
|
||||
// It's better to start a new interval to avoid artifically
|
||||
// It's better to start a new interval to avoid artificially
|
||||
// extend the new interval.
|
||||
if (MI->readsWritesVirtualRegister(li.reg) ==
|
||||
std::make_pair(false,true)) {
|
||||
|
@ -337,7 +337,7 @@ void PEI::insertCSRSpillsAndRestores(MachineFunction &Fn) {
|
||||
--BeforeI;
|
||||
|
||||
// Restore all registers immediately before the return and any
|
||||
// terminators that preceed it.
|
||||
// terminators that precede it.
|
||||
if (!TFI->restoreCalleeSavedRegisters(*MBB, I, CSI, TRI)) {
|
||||
for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
|
||||
unsigned Reg = CSI[i].getReg();
|
||||
@ -437,7 +437,7 @@ void PEI::insertCSRSpillsAndRestores(MachineFunction &Fn) {
|
||||
--BeforeI;
|
||||
|
||||
// Restore all registers immediately before the return and any
|
||||
// terminators that preceed it.
|
||||
// terminators that precede it.
|
||||
for (unsigned i = 0, e = blockCSI.size(); i != e; ++i) {
|
||||
unsigned Reg = blockCSI[i].getReg();
|
||||
const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
|
||||
|
@ -26,7 +26,7 @@ and then "merge" mul and mov:
|
||||
sxth r3, r3
|
||||
mla r4, r3, lr, r4
|
||||
|
||||
It also increase the likelyhood the store may become dead.
|
||||
It also increase the likelihood the store may become dead.
|
||||
|
||||
//===---------------------------------------------------------------------===//
|
||||
|
||||
@ -162,7 +162,7 @@ synthesize the various copy insertion/inspection methods in TargetInstrInfo.
|
||||
|
||||
//===---------------------------------------------------------------------===//
|
||||
|
||||
Stack coloring improvments:
|
||||
Stack coloring improvements:
|
||||
|
||||
1. Do proper LiveStackAnalysis on all stack objects including those which are
|
||||
not spill slots.
|
||||
|
@ -792,7 +792,7 @@ void RALinScan::updateSpillWeights(std::vector<float> &Weights,
|
||||
// register class we are trying to allocate. Then add the weight to all
|
||||
// sub-registers of the super-register even if they are not aliases.
|
||||
// e.g. allocating for GR32, bh is not used, updating bl spill weight.
|
||||
// bl should get the same spill weight otherwise it will be choosen
|
||||
// bl should get the same spill weight otherwise it will be chosen
|
||||
// as a spill candidate since spilling bh doesn't make ebx available.
|
||||
for (unsigned i = 0, e = Supers.size(); i != e; ++i) {
|
||||
for (const unsigned *sr = tri_->getSubRegisters(Supers[i]); *sr; ++sr)
|
||||
|
@ -47,7 +47,7 @@ outputFileSuffix("rmf-file-suffix",
|
||||
|
||||
static cl::opt<std::string>
|
||||
machineFuncsToRender("rmf-funcs",
|
||||
cl::desc("Coma seperated list of functions to render"
|
||||
cl::desc("Comma separated list of functions to render"
|
||||
", or \"*\"."),
|
||||
cl::init(""), cl::Hidden);
|
||||
|
||||
|
@ -472,7 +472,7 @@ void ScheduleDAGTopologicalSort::InitDAGTopologicalSorting() {
|
||||
#endif
|
||||
}
|
||||
|
||||
/// AddPred - Updates the topological ordering to accomodate an edge
|
||||
/// AddPred - Updates the topological ordering to accommodate an edge
|
||||
/// to be added from SUnit X to SUnit Y.
|
||||
void ScheduleDAGTopologicalSort::AddPred(SUnit *Y, SUnit *X) {
|
||||
int UpperBound, LowerBound;
|
||||
@ -490,7 +490,7 @@ void ScheduleDAGTopologicalSort::AddPred(SUnit *Y, SUnit *X) {
|
||||
}
|
||||
}
|
||||
|
||||
/// RemovePred - Updates the topological ordering to accomodate an
|
||||
/// RemovePred - Updates the topological ordering to accommodate an
|
||||
/// an edge to be removed from the specified node N from the predecessors
|
||||
/// of the current node M.
|
||||
void ScheduleDAGTopologicalSort::RemovePred(SUnit *M, SUnit *N) {
|
||||
|
@ -371,7 +371,7 @@ void ScheduleDAGInstrs::BuildSchedGraph(AliasAnalysis *AA) {
|
||||
// will be overlapped by work done outside the current
|
||||
// scheduling region.
|
||||
Latency -= std::min(Latency, Count);
|
||||
// Add the artifical edge.
|
||||
// Add the artificial edge.
|
||||
ExitSU.addPred(SDep(SU, SDep::Order, Latency,
|
||||
/*Reg=*/0, /*isNormalMemory=*/false,
|
||||
/*isMustAlias=*/false,
|
||||
|
@ -1239,7 +1239,7 @@ bool FastISel::HandlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) {
|
||||
// Only handle legal types. Two interesting things to note here. First,
|
||||
// by bailing out early, we may leave behind some dead instructions,
|
||||
// since SelectionDAG's HandlePHINodesInSuccessorBlocks will insert its
|
||||
// own moves. Second, this check is necessary becuase FastISel doesn't
|
||||
// own moves. Second, this check is necessary because FastISel doesn't
|
||||
// use CreateRegs to create registers, so it always creates
|
||||
// exactly one register for each non-void instruction.
|
||||
EVT VT = TLI.getValueType(PN->getType(), /*AllowUnknown=*/true);
|
||||
|
@ -2878,7 +2878,7 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node,
|
||||
}
|
||||
case ISD::FP_ROUND_INREG: {
|
||||
// The only way we can lower this is to turn it into a TRUNCSTORE,
|
||||
// EXTLOAD pair, targetting a temporary location (a stack slot).
|
||||
// EXTLOAD pair, targeting a temporary location (a stack slot).
|
||||
|
||||
// NOTE: there is a choice here between constantly creating new stack
|
||||
// slots and always reusing the same one. We currently always create
|
||||
|
@ -617,7 +617,7 @@ namespace {
|
||||
};
|
||||
}
|
||||
|
||||
/// ProcessSDDbgValues - Process SDDbgValues assoicated with this node.
|
||||
/// ProcessSDDbgValues - Process SDDbgValues associated with this node.
|
||||
static void ProcessSDDbgValues(SDNode *N, SelectionDAG *DAG,
|
||||
InstrEmitter &Emitter,
|
||||
SmallVector<std::pair<unsigned, MachineInstr*>, 32> &Orders,
|
||||
|
@ -6197,7 +6197,7 @@ TargetLowering::LowerCallTo(SDValue Chain, const Type *RetTy,
|
||||
|
||||
// For a function returning void, there is no return value. We can't create
|
||||
// such a node, so we just return a null return value in that case. In
|
||||
// that case, nothing will actualy look at the value.
|
||||
// that case, nothing will actually look at the value.
|
||||
if (ReturnValues.empty())
|
||||
return std::make_pair(SDValue(), Chain);
|
||||
|
||||
@ -6413,7 +6413,7 @@ void SelectionDAGISel::LowerArguments(const BasicBlock *LLVMBB) {
|
||||
SDB->setValue(I, Res);
|
||||
|
||||
// If this argument is live outside of the entry block, insert a copy from
|
||||
// whereever we got it to the vreg that other BB's will reference it as.
|
||||
// wherever we got it to the vreg that other BB's will reference it as.
|
||||
SDB->CopyToExportRegsIfNeeded(I);
|
||||
}
|
||||
}
|
||||
|
@ -1859,12 +1859,11 @@ TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
|
||||
case ISD::SETTRUE2: return DAG.getConstant(1, VT);
|
||||
}
|
||||
|
||||
if (isa<ConstantSDNode>(N0.getNode())) {
|
||||
// Ensure that the constant occurs on the RHS, and fold constant
|
||||
// comparisons.
|
||||
// Ensure that the constant occurs on the RHS, and fold constant
|
||||
// comparisons.
|
||||
if (isa<ConstantSDNode>(N0.getNode()))
|
||||
return DAG.getSetCC(dl, VT, N1, N0, ISD::getSetCCSwappedOperands(Cond));
|
||||
}
|
||||
|
||||
|
||||
if (ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode())) {
|
||||
const APInt &C1 = N1C->getAPIntValue();
|
||||
|
||||
|
@ -277,7 +277,7 @@ void PEI::calculateAnticAvail(MachineFunction &Fn) {
|
||||
// Initialize data flow sets.
|
||||
clearAnticAvailSets();
|
||||
|
||||
// Calulate Antic{In,Out} and Avail{In,Out} iteratively on the MCFG.
|
||||
// Calculate Antic{In,Out} and Avail{In,Out} iteratively on the MCFG.
|
||||
bool changed = true;
|
||||
unsigned iterations = 0;
|
||||
while (changed) {
|
||||
|
@ -587,7 +587,7 @@ StrongPHIElimination::SplitInterferencesForBasicBlock(
|
||||
}
|
||||
|
||||
// We now walk the PHIs in successor blocks and check for interferences. This
|
||||
// is necesary because the use of a PHI's operands are logically contained in
|
||||
// is necessary because the use of a PHI's operands are logically contained in
|
||||
// the predecessor block. The def of a PHI's destination register is processed
|
||||
// along with the other defs in a basic block.
|
||||
|
||||
|
@ -32,7 +32,7 @@ STATISTIC(NumCommutes, "Number of instructions commuted");
|
||||
STATISTIC(NumDRM , "Number of re-materializable defs elided");
|
||||
STATISTIC(NumStores , "Number of stores added");
|
||||
STATISTIC(NumPSpills , "Number of physical register spills");
|
||||
STATISTIC(NumOmitted , "Number of reloads omited");
|
||||
STATISTIC(NumOmitted , "Number of reloads omitted");
|
||||
STATISTIC(NumAvoided , "Number of reloads deemed unnecessary");
|
||||
STATISTIC(NumCopified, "Number of available reloads turned into copies");
|
||||
STATISTIC(NumReMats , "Number of re-materialization");
|
||||
@ -669,7 +669,7 @@ static void UpdateKills(MachineInstr &MI, const TargetRegisterInfo* TRI,
|
||||
}
|
||||
}
|
||||
|
||||
/// ReMaterialize - Re-materialize definition for Reg targetting DestReg.
|
||||
/// ReMaterialize - Re-materialize definition for Reg targeting DestReg.
|
||||
///
|
||||
static void ReMaterialize(MachineBasicBlock &MBB,
|
||||
MachineBasicBlock::iterator &MII,
|
||||
|
@ -666,7 +666,7 @@ void JIT::jitTheFunction(Function *F, const MutexGuard &locked) {
|
||||
}
|
||||
|
||||
/// getPointerToFunction - This method is used to get the address of the
|
||||
/// specified function, compiling it if neccesary.
|
||||
/// specified function, compiling it if necessary.
|
||||
///
|
||||
void *JIT::getPointerToFunction(Function *F) {
|
||||
|
||||
|
@ -36,7 +36,7 @@ extern "C" {
|
||||
// disassembly is supported by passing a block of information in the DisInfo
|
||||
// parameter and specifing the TagType and call back functions as described in
|
||||
// the header llvm-c/Disassembler.h . The pointer to the block and the
|
||||
// functions can all be passed as NULL. If successfull this returns a
|
||||
// functions can all be passed as NULL. If successful this returns a
|
||||
// disassembler context if not it returns NULL.
|
||||
//
|
||||
LLVMDisasmContextRef LLVMCreateDisasm(const char *TripleName, void *DisInfo,
|
||||
|
@ -389,7 +389,7 @@ static bool EvaluateSymbolicAdd(const MCAssembler *Asm,
|
||||
// (LHS_A - RHS_B),
|
||||
// (RHS_A - LHS_B),
|
||||
// (RHS_A - RHS_B).
|
||||
// Since we are attempting to be as aggresive as possible about folding, we
|
||||
// Since we are attempting to be as aggressive as possible about folding, we
|
||||
// attempt to evaluate each possible alternative.
|
||||
AttemptToFoldSymbolOffsetDifference(Asm, Layout, Addrs, InSet, LHS_A, LHS_B,
|
||||
Result_Cst);
|
||||
|
@ -440,7 +440,7 @@ public:
|
||||
// Compensate for the relocation offset, Darwin x86_64 relocations only
|
||||
// have the addend and appear to have attempted to define it to be the
|
||||
// actual expression addend without the PCrel bias. However, instructions
|
||||
// with data following the relocation are not accomodated for (see comment
|
||||
// with data following the relocation are not accommodated for (see comment
|
||||
// below regarding SIGNED{1,2,4}), so it isn't exactly that either.
|
||||
Value += 1LL << Log2Size;
|
||||
}
|
||||
@ -541,7 +541,7 @@ public:
|
||||
}
|
||||
|
||||
// x86_64 almost always uses external relocations, except when there is no
|
||||
// symbol to use as a base address (a local symbol with no preceeding
|
||||
// symbol to use as a base address (a local symbol with no preceding
|
||||
// non-local symbol).
|
||||
if (Base) {
|
||||
Index = Base->getIndex();
|
||||
|
@ -3564,7 +3564,7 @@ void APFloat::toString(SmallVectorImpl<char> &Str,
|
||||
}
|
||||
|
||||
bool APFloat::getExactInverse(APFloat *inv) const {
|
||||
// We can only guarantee the existance of an exact inverse for IEEE floats.
|
||||
// We can only guarantee the existence of an exact inverse for IEEE floats.
|
||||
if (semantics != &IEEEhalf && semantics != &IEEEsingle &&
|
||||
semantics != &IEEEdouble && semantics != &IEEEquad)
|
||||
return false;
|
||||
|
@ -1518,7 +1518,7 @@ APInt::ms APInt::magic() const {
|
||||
/// Requires that the divisor not be 0. Taken from "Hacker's Delight", Henry
|
||||
/// S. Warren, Jr., chapter 10.
|
||||
/// LeadingZeros can be used to simplify the calculation if the upper bits
|
||||
/// of the devided value are known zero.
|
||||
/// of the divided value are known zero.
|
||||
APInt::mu APInt::magicu(unsigned LeadingZeros) const {
|
||||
const APInt& d = *this;
|
||||
unsigned p;
|
||||
|
@ -198,7 +198,7 @@ int llvm::DiffFilesWithTolerance(const sys::PathWithStatus &FileA,
|
||||
return 1;
|
||||
}
|
||||
|
||||
// Now its safe to mmap the files into memory becasue both files
|
||||
// Now its safe to mmap the files into memory because both files
|
||||
// have a non-zero size.
|
||||
error_code ec;
|
||||
OwningPtr<MemoryBuffer> F1;
|
||||
|
@ -8,7 +8,7 @@
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file defines some helpful functions for dealing with the possibility of
|
||||
// Unix signals occuring while your program is running.
|
||||
// Unix signals occurring while your program is running.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
|
@ -8,7 +8,7 @@
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file defines some helpful functions for dealing with the possibility of
|
||||
// Unix signals occuring while your program is running.
|
||||
// Unix signals occurring while your program is running.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
|
@ -131,7 +131,7 @@ unsigned StringRef::edit_distance(llvm::StringRef Other,
|
||||
|
||||
/// find - Search for the first string \arg Str in the string.
|
||||
///
|
||||
/// \return - The index of the first occurence of \arg Str, or npos if not
|
||||
/// \return - The index of the first occurrence of \arg Str, or npos if not
|
||||
/// found.
|
||||
size_t StringRef::find(StringRef Str, size_t From) const {
|
||||
size_t N = Str.size();
|
||||
@ -145,7 +145,7 @@ size_t StringRef::find(StringRef Str, size_t From) const {
|
||||
|
||||
/// rfind - Search for the last string \arg Str in the string.
|
||||
///
|
||||
/// \return - The index of the last occurence of \arg Str, or npos if not
|
||||
/// \return - The index of the last occurrence of \arg Str, or npos if not
|
||||
/// found.
|
||||
size_t StringRef::rfind(StringRef Str) const {
|
||||
size_t N = Str.size();
|
||||
|
@ -236,7 +236,7 @@ Program::Execute(const Path &path, const char **args, const char **envp,
|
||||
// Create a child process.
|
||||
int child = fork();
|
||||
switch (child) {
|
||||
// An error occured: Return to the caller.
|
||||
// An error occurred: Return to the caller.
|
||||
case -1:
|
||||
MakeErrMsg(ErrMsg, "Couldn't fork");
|
||||
return false;
|
||||
|
@ -8,7 +8,7 @@
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file defines some helpful functions for dealing with the possibility of
|
||||
// Unix signals occuring while your program is running.
|
||||
// Unix signals occurring while your program is running.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
|
@ -1708,7 +1708,7 @@ void ARMAsmPrinter::EmitInstruction(const MachineInstr *MI) {
|
||||
return;
|
||||
}
|
||||
// Tail jump branches are really just branch instructions with additional
|
||||
// code-gen attributes. Convert them to the cannonical form here.
|
||||
// code-gen attributes. Convert them to the canonical form here.
|
||||
case ARM::TAILJMPd:
|
||||
case ARM::TAILJMPdND: {
|
||||
MCInst TmpInst, TmpInst2;
|
||||
|
@ -1201,7 +1201,7 @@ bool ARMBaseInstrInfo::areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2,
|
||||
}
|
||||
|
||||
/// shouldScheduleLoadsNear - This is a used by the pre-regalloc scheduler to
|
||||
/// determine (in conjuction with areLoadsFromSameBasePtr) if two loads should
|
||||
/// determine (in conjunction with areLoadsFromSameBasePtr) if two loads should
|
||||
/// be scheduled togther. On some targets if two loads are loading from
|
||||
/// addresses in the same cache line, it's better if they are scheduled
|
||||
/// together. This function takes two integers that represent the load offsets
|
||||
|
@ -291,7 +291,7 @@ public:
|
||||
int64_t &Offset1, int64_t &Offset2)const;
|
||||
|
||||
/// shouldScheduleLoadsNear - This is a used by the pre-regalloc scheduler to
|
||||
/// determine (in conjuction with areLoadsFromSameBasePtr) if two loads should
|
||||
/// determine (in conjunction with areLoadsFromSameBasePtr) if two loads should
|
||||
/// be scheduled togther. On some targets if two loads are loading from
|
||||
/// addresses in the same cache line, it's better if they are scheduled
|
||||
/// together. This function takes two integers that represent the load offsets
|
||||
|
@ -88,7 +88,7 @@ BitVector ARMBaseRegisterInfo::
|
||||
getReservedRegs(const MachineFunction &MF) const {
|
||||
const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
|
||||
|
||||
// FIXME: avoid re-calculating this everytime.
|
||||
// FIXME: avoid re-calculating this every time.
|
||||
BitVector Reserved(getNumRegs());
|
||||
Reserved.set(ARM::SP);
|
||||
Reserved.set(ARM::PC);
|
||||
|
@ -725,7 +725,7 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM)
|
||||
// pressure of the register class's representative and all of it's super
|
||||
// classes' representatives transitively. We have not implemented this because
|
||||
// of the difficulty prior to coalescing of modeling operand register classes
|
||||
// due to the common occurence of cross class copies and subregister insertions
|
||||
// due to the common occurrence of cross class copies and subregister insertions
|
||||
// and extractions.
|
||||
std::pair<const TargetRegisterClass*, uint8_t>
|
||||
ARMTargetLowering::findRepresentativeClass(EVT VT) const{
|
||||
@ -1323,7 +1323,7 @@ ARMTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
|
||||
// than necessary, because it means that each store effectively depends
|
||||
// on every argument instead of just those arguments it would clobber.
|
||||
|
||||
// Do not flag preceeding copytoreg stuff together with the following stuff.
|
||||
// Do not flag preceding copytoreg stuff together with the following stuff.
|
||||
InFlag = SDValue();
|
||||
for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
|
||||
Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user