Add clang

This commit is contained in:
Jeremy Rand 2015-07-18 00:10:17 -05:00
parent 79f01ca4a8
commit 99ce1197a6
8850 changed files with 1375270 additions and 0 deletions

4
tools/clang/.arcconfig Normal file
View File

@ -0,0 +1,4 @@
{
"project_id" : "clang",
"conduit_uri" : "http://llvm-reviews.chandlerc.com/"
}

View File

@ -0,0 +1 @@
BasedOnStyle: LLVM

35
tools/clang/.gitignore vendored Normal file
View File

@ -0,0 +1,35 @@
#==============================================================================#
# This file specifies intentionally untracked files that git should ignore.
# See: http://www.kernel.org/pub/software/scm/git/docs/gitignore.html
#
# This file is intentionally different from the output of `git svn show-ignore`,
# as most of those are useless.
#==============================================================================#
#==============================================================================#
# File extensions to be ignored anywhere in the tree.
#==============================================================================#
# Temp files created by most text editors.
*~
# Merge files created by git.
*.orig
# Byte compiled python modules.
*.pyc
# vim swap files
.*.swp
.sw?
#==============================================================================#
# Explicit files to ignore (only matches one).
#==============================================================================#
cscope.files
cscope.out
#==============================================================================#
# Directories to ignore (do not add trailing '/'s, they skip symlinks).
#==============================================================================#
# Clang extra user tools, which is tracked independently (clang-tools-extra).
tools/extra
# Sphinx build products
docs/_build
docs/analyzer/_build

401
tools/clang/CMakeLists.txt Normal file
View File

@ -0,0 +1,401 @@
# If we are not building as a part of LLVM, build Clang as an
# standalone project, using LLVM as an external library:
if( CMAKE_SOURCE_DIR STREQUAL CMAKE_CURRENT_SOURCE_DIR )
project(Clang)
cmake_minimum_required(VERSION 2.8)
set(CLANG_PATH_TO_LLVM_SOURCE "" CACHE PATH
"Path to LLVM source code. Not necessary if using an installed LLVM.")
set(CLANG_PATH_TO_LLVM_BUILD "" CACHE PATH
"Path to the directory where LLVM was built or installed.")
if( CLANG_PATH_TO_LLVM_SOURCE )
if( NOT EXISTS "${CLANG_PATH_TO_LLVM_SOURCE}/cmake/config-ix.cmake" )
message(FATAL_ERROR "Please set CLANG_PATH_TO_LLVM_SOURCE to the root directory of LLVM source code.")
else()
get_filename_component(LLVM_MAIN_SRC_DIR ${CLANG_PATH_TO_LLVM_SOURCE}
ABSOLUTE)
list(APPEND CMAKE_MODULE_PATH "${LLVM_MAIN_SRC_DIR}/cmake/modules")
endif()
endif()
if (EXISTS "${CLANG_PATH_TO_LLVM_BUILD}/bin/llvm-config${CMAKE_EXECUTABLE_SUFFIX}")
set (PATH_TO_LLVM_CONFIG "${CLANG_PATH_TO_LLVM_BUILD}/bin/llvm-config${CMAKE_EXECUTABLE_SUFFIX}")
elseif (EXISTS "${CLANG_PATH_TO_LLVM_BUILD}/bin/Debug/llvm-config${CMAKE_EXECUTABLE_SUFFIX}")
# Looking for bin/Debug/llvm-config is a complete hack. How can we get
# around this?
set (PATH_TO_LLVM_CONFIG "${CLANG_PATH_TO_LLVM_BUILD}/bin/Debug/llvm-config${CMAKE_EXECUTABLE_SUFFIX}")
else()
message(FATAL_ERROR "Please set CLANG_PATH_TO_LLVM_BUILD to a directory containing a LLVM build.")
endif()
list(APPEND CMAKE_MODULE_PATH "${CLANG_PATH_TO_LLVM_BUILD}/share/llvm/cmake")
get_filename_component(PATH_TO_LLVM_BUILD ${CLANG_PATH_TO_LLVM_BUILD}
ABSOLUTE)
option(LLVM_INSTALL_TOOLCHAIN_ONLY "Only include toolchain files in the 'install' target." OFF)
include(AddLLVM)
include(TableGen)
include("${CLANG_PATH_TO_LLVM_BUILD}/share/llvm/cmake/LLVMConfig.cmake")
include(HandleLLVMOptions)
set(PACKAGE_VERSION "${LLVM_PACKAGE_VERSION}")
set(LLVM_MAIN_INCLUDE_DIR "${LLVM_MAIN_SRC_DIR}/include")
set(LLVM_BINARY_DIR ${CMAKE_BINARY_DIR})
set(CMAKE_INCLUDE_CURRENT_DIR ON)
include_directories("${PATH_TO_LLVM_BUILD}/include" "${LLVM_MAIN_INCLUDE_DIR}")
link_directories("${PATH_TO_LLVM_BUILD}/lib")
exec_program("${PATH_TO_LLVM_CONFIG} --bindir" OUTPUT_VARIABLE LLVM_BINARY_DIR)
set(LLVM_TABLEGEN_EXE "${LLVM_BINARY_DIR}/llvm-tblgen${CMAKE_EXECUTABLE_SUFFIX}")
# Define the default arguments to use with 'lit', and an option for the user
# to override.
set(LIT_ARGS_DEFAULT "-sv")
if (MSVC OR XCODE)
set(LIT_ARGS_DEFAULT "${LIT_ARGS_DEFAULT} --no-progress-bar")
endif()
set(LLVM_LIT_ARGS "${LIT_ARGS_DEFAULT}" CACHE STRING "Default options for lit")
set( CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin )
set( CMAKE_LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib )
set( CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib )
set( CLANG_BUILT_STANDALONE 1 )
find_package(LibXml2)
if (LIBXML2_FOUND)
set(CLANG_HAVE_LIBXML 1)
endif ()
endif()
set(CLANG_RESOURCE_DIR "" CACHE STRING
"Relative directory from the Clang binary to its resource files.")
set(C_INCLUDE_DIRS "" CACHE STRING
"Colon separated list of directories clang will search for headers.")
set(GCC_INSTALL_PREFIX "" CACHE PATH "Directory where gcc is installed." )
set(DEFAULT_SYSROOT "" CACHE PATH
"Default <path> to all compiler invocations for --sysroot=<path>." )
set(CLANG_VENDOR "" CACHE STRING
"Vendor-specific text for showing with version information.")
if( CLANG_VENDOR )
add_definitions( -DCLANG_VENDOR="${CLANG_VENDOR} " )
endif()
set(CLANG_REPOSITORY_STRING "" CACHE STRING
"Vendor-specific text for showing the repository the source is taken from.")
if(CLANG_REPOSITORY_STRING)
add_definitions(-DCLANG_REPOSITORY_STRING="${CLANG_REPOSITORY_STRING}")
endif()
set(CLANG_VENDOR_UTI "org.llvm.clang" CACHE STRING
"Vendor-specific uti.")
set(CLANG_SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR})
set(CLANG_BINARY_DIR ${CMAKE_CURRENT_BINARY_DIR})
if( CMAKE_SOURCE_DIR STREQUAL CMAKE_BINARY_DIR AND NOT MSVC_IDE )
message(FATAL_ERROR "In-source builds are not allowed. CMake would overwrite "
"the makefiles distributed with LLVM. Please create a directory and run cmake "
"from there, passing the path to this source directory as the last argument. "
"This process created the file `CMakeCache.txt' and the directory "
"`CMakeFiles'. Please delete them.")
endif()
if( NOT CMAKE_SOURCE_DIR STREQUAL CMAKE_BINARY_DIR )
file(GLOB_RECURSE
tablegenned_files_on_include_dir
"${CLANG_SOURCE_DIR}/include/clang/*.inc")
if( tablegenned_files_on_include_dir )
message(FATAL_ERROR "Apparently there is a previous in-source build, "
"probably as the result of running `configure' and `make' on "
"${CLANG_SOURCE_DIR}. This may cause problems. The suspicious files are:\n"
"${tablegenned_files_on_include_dir}\nPlease clean the source directory.")
endif()
endif()
# Compute the Clang version from the LLVM version.
string(REGEX MATCH "[0-9]+\\.[0-9]+(\\.[0-9]+)?" CLANG_VERSION
${PACKAGE_VERSION})
message(STATUS "Clang version: ${CLANG_VERSION}")
string(REGEX REPLACE "([0-9]+)\\.[0-9]+(\\.[0-9]+)?" "\\1" CLANG_VERSION_MAJOR
${CLANG_VERSION})
string(REGEX REPLACE "[0-9]+\\.([0-9]+)(\\.[0-9]+)?" "\\1" CLANG_VERSION_MINOR
${CLANG_VERSION})
if (${CLANG_VERSION} MATCHES "[0-9]+\\.[0-9]+\\.[0-9]+")
set(CLANG_HAS_VERSION_PATCHLEVEL 1)
string(REGEX REPLACE "[0-9]+\\.[0-9]+\\.([0-9]+)" "\\1" CLANG_VERSION_PATCHLEVEL
${CLANG_VERSION})
else()
set(CLANG_HAS_VERSION_PATCHLEVEL 0)
endif()
# Configure the Version.inc file.
configure_file(
${CMAKE_CURRENT_SOURCE_DIR}/include/clang/Basic/Version.inc.in
${CMAKE_CURRENT_BINARY_DIR}/include/clang/Basic/Version.inc)
# Add appropriate flags for GCC
if (LLVM_COMPILER_IS_GCC_COMPATIBLE)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-common -Woverloaded-virtual -Wcast-qual -fno-strict-aliasing")
# Enable -pedantic for Clang even if it's not enabled for LLVM.
if (NOT LLVM_ENABLE_PEDANTIC)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -pedantic -Wno-long-long")
endif ()
check_cxx_compiler_flag("-Werror -Wnested-anon-types" CXX_SUPPORTS_NO_NESTED_ANON_TYPES_FLAG)
if( CXX_SUPPORTS_NO_NESTED_ANON_TYPES_FLAG )
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-nested-anon-types" )
endif()
endif ()
if (APPLE)
set(CMAKE_MODULE_LINKER_FLAGS "${CMAKE_MODULE_LINKER_FLAGS} -Wl,-flat_namespace -Wl,-undefined -Wl,suppress")
endif ()
configure_file(
${CLANG_SOURCE_DIR}/include/clang/Config/config.h.cmake
${CLANG_BINARY_DIR}/include/clang/Config/config.h)
include(LLVMParseArguments)
function(clang_tablegen)
# Syntax:
# clang_tablegen output-file [tablegen-arg ...] SOURCE source-file
# [[TARGET cmake-target-name] [DEPENDS extra-dependency ...]]
#
# Generates a custom command for invoking tblgen as
#
# tblgen source-file -o=output-file tablegen-arg ...
#
# and, if cmake-target-name is provided, creates a custom target for
# executing the custom command depending on output-file. It is
# possible to list more files to depend after DEPENDS.
parse_arguments( CTG "SOURCE;TARGET;DEPENDS" "" ${ARGN} )
if( NOT CTG_SOURCE )
message(FATAL_ERROR "SOURCE source-file required by clang_tablegen")
endif()
set( LLVM_TARGET_DEFINITIONS ${CTG_SOURCE} )
tablegen( CLANG ${CTG_DEFAULT_ARGS} )
list( GET CTG_DEFAULT_ARGS 0 output_file )
if( CTG_TARGET )
add_custom_target( ${CTG_TARGET} DEPENDS ${output_file} ${CTG_DEPENDS} )
set_target_properties( ${CTG_TARGET} PROPERTIES FOLDER "Clang tablegenning")
endif()
endfunction(clang_tablegen)
# FIXME: Generalize and move to llvm.
function(add_clang_symbol_exports target_name export_file)
# Makefile.rules contains special cases for different platforms.
# We restrict ourselves to Darwin for the time being.
if(${CMAKE_SYSTEM_NAME} MATCHES "Darwin")
add_custom_command(OUTPUT symbol.exports
COMMAND sed -e "s/^/_/" < ${export_file} > symbol.exports
DEPENDS ${export_file}
VERBATIM
COMMENT "Creating export file for ${target_name}")
add_custom_target(${target_name}_exports DEPENDS symbol.exports)
set_property(DIRECTORY APPEND
PROPERTY ADDITIONAL_MAKE_CLEAN_FILES symbol.exports)
get_property(srcs TARGET ${target_name} PROPERTY SOURCES)
foreach(src ${srcs})
get_filename_component(extension ${src} EXT)
if(extension STREQUAL ".cpp")
set(first_source_file ${src})
break()
endif()
endforeach()
# Force re-linking when the exports file changes. Actually, it
# forces recompilation of the source file. The LINK_DEPENDS target
# property only works for makefile-based generators.
set_property(SOURCE ${first_source_file} APPEND PROPERTY
OBJECT_DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/symbol.exports)
set_property(TARGET ${target_name} APPEND_STRING PROPERTY
LINK_FLAGS " -Wl,-exported_symbols_list,${CMAKE_CURRENT_BINARY_DIR}/symbol.exports")
add_dependencies(${target_name} ${target_name}_exports)
endif()
endfunction(add_clang_symbol_exports)
macro(add_clang_library name)
llvm_process_sources(srcs ${ARGN})
if(MSVC_IDE OR XCODE)
# Add public headers
file(RELATIVE_PATH lib_path
${CLANG_SOURCE_DIR}/lib/
${CMAKE_CURRENT_SOURCE_DIR}
)
if(NOT lib_path MATCHES "^[.][.]")
file( GLOB_RECURSE headers
${CLANG_SOURCE_DIR}/include/clang/${lib_path}/*.h
${CLANG_SOURCE_DIR}/include/clang/${lib_path}/*.def
)
set_source_files_properties(${headers} PROPERTIES HEADER_FILE_ONLY ON)
file( GLOB_RECURSE tds
${CLANG_SOURCE_DIR}/include/clang/${lib_path}/*.td
)
source_group("TableGen descriptions" FILES ${tds})
set_source_files_properties(${tds}} PROPERTIES HEADER_FILE_ONLY ON)
set(srcs ${srcs} ${headers} ${tds})
endif()
endif(MSVC_IDE OR XCODE)
if (MODULE)
set(libkind MODULE)
elseif (SHARED_LIBRARY)
set(libkind SHARED)
else()
set(libkind)
endif()
add_library( ${name} ${libkind} ${srcs} )
if( LLVM_COMMON_DEPENDS )
add_dependencies( ${name} ${LLVM_COMMON_DEPENDS} )
endif( LLVM_COMMON_DEPENDS )
llvm_config( ${name} ${LLVM_LINK_COMPONENTS} )
target_link_libraries( ${name} ${LLVM_COMMON_LIBS} )
link_system_libs( ${name} )
if (SHARED_LIBRARY AND EXPORTED_SYMBOL_FILE)
add_clang_symbol_exports( ${name} ${EXPORTED_SYMBOL_FILE} )
endif()
if (NOT LLVM_INSTALL_TOOLCHAIN_ONLY OR ${name} STREQUAL "libclang")
install(TARGETS ${name}
LIBRARY DESTINATION lib${LLVM_LIBDIR_SUFFIX}
ARCHIVE DESTINATION lib${LLVM_LIBDIR_SUFFIX}
RUNTIME DESTINATION bin)
endif()
set_target_properties(${name} PROPERTIES FOLDER "Clang libraries")
endmacro(add_clang_library)
macro(add_clang_executable name)
add_llvm_executable( ${name} ${ARGN} )
set_target_properties(${name} PROPERTIES FOLDER "Clang executables")
endmacro(add_clang_executable)
include_directories(BEFORE
${CMAKE_CURRENT_BINARY_DIR}/include
${CMAKE_CURRENT_SOURCE_DIR}/include
)
if (NOT LLVM_INSTALL_TOOLCHAIN_ONLY)
install(DIRECTORY include/
DESTINATION include
FILES_MATCHING
PATTERN "*.def"
PATTERN "*.h"
PATTERN "config.h" EXCLUDE
PATTERN ".svn" EXCLUDE
)
install(DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/include/
DESTINATION include
FILES_MATCHING
PATTERN "CMakeFiles" EXCLUDE
PATTERN "*.inc"
)
endif()
install(DIRECTORY include/clang-c
DESTINATION include
FILES_MATCHING
PATTERN "*.h"
PATTERN ".svn" EXCLUDE
)
add_definitions( -D_GNU_SOURCE )
option(CLANG_ENABLE_ARCMT "Build ARCMT." ON)
option(CLANG_ENABLE_REWRITER "Build rewriter." ON)
option(CLANG_ENABLE_STATIC_ANALYZER "Build static analyzer." ON)
if (NOT CLANG_ENABLE_REWRITER AND CLANG_ENABLE_ARCMT)
message(FATAL_ERROR "Cannot disable rewriter while enabling ARCMT")
endif()
if (NOT CLANG_ENABLE_REWRITER AND CLANG_ENABLE_STATIC_ANALYZER)
message(FATAL_ERROR "Cannot disable rewriter while enabling static analyzer")
endif()
if (NOT CLANG_ENABLE_STATIC_ANALYZER AND CLANG_ENABLE_ARCMT)
message(FATAL_ERROR "Cannot disable static analyzer while enabling ARCMT")
endif()
if(CLANG_ENABLE_ARCMT)
add_definitions(-DCLANG_ENABLE_ARCMT)
endif()
if(CLANG_ENABLE_REWRITER)
add_definitions(-DCLANG_ENABLE_REWRITER)
endif()
if(CLANG_ENABLE_STATIC_ANALYZER)
add_definitions(-DCLANG_ENABLE_STATIC_ANALYZER)
endif()
# Clang version information
set(CLANG_EXECUTABLE_VERSION
"${CLANG_VERSION_MAJOR}.${CLANG_VERSION_MINOR}" CACHE STRING
"Version number that will be placed into the clang executable, in the form XX.YY")
set(LIBCLANG_LIBRARY_VERSION
"${CLANG_VERSION_MAJOR}.${CLANG_VERSION_MINOR}" CACHE STRING
"Version number that will be placed into the libclang library , in the form XX.YY")
mark_as_advanced(CLANG_EXECUTABLE_VERSION LIBCLANG_LIBRARY_VERSION)
add_subdirectory(utils/TableGen)
add_subdirectory(include)
add_subdirectory(lib)
add_subdirectory(tools)
add_subdirectory(runtime)
option(CLANG_BUILD_EXAMPLES "Build CLANG example programs by default." OFF)
add_subdirectory(examples)
option(CLANG_INCLUDE_TESTS
"Generate build targets for the Clang unit tests."
${LLVM_INCLUDE_TESTS})
if( CLANG_INCLUDE_TESTS )
add_subdirectory(test)
add_subdirectory(unittests)
endif()
option(CLANG_INCLUDE_DOCS "Generate build targets for the Clang docs."
${LLVM_INCLUDE_DOCS})
if( CLANG_INCLUDE_DOCS )
add_subdirectory(docs)
endif()
# Workaround for MSVS10 to avoid the Dialog Hell
# FIXME: This could be removed with future version of CMake.
if( CLANG_BUILT_STANDALONE AND MSVC_VERSION EQUAL 1600 )
set(CLANG_SLN_FILENAME "${CMAKE_CURRENT_BINARY_DIR}/Clang.sln")
if( EXISTS "${CLANG_SLN_FILENAME}" )
file(APPEND "${CLANG_SLN_FILENAME}" "\n# This should be regenerated!\n")
endif()
endif()
set(BUG_REPORT_URL "http://llvm.org/bugs/" CACHE STRING
"Default URL where bug reports are to be submitted.")
set(CLANG_ORDER_FILE "" CACHE FILEPATH
"Order file to use when compiling clang in order to improve startup time.")

View File

@ -0,0 +1,40 @@
This file is a list of the people responsible for ensuring that patches for a
particular part of Clang are reviewed, either by themself or by someone else.
They are also the gatekeepers for their part of Clang, with the final word on
what goes in or not.
The list is sorted by surname and formatted to allow easy grepping and
beautification by scripts. The fields are: name (N), email (E), web-address
(W), PGP key ID and fingerprint (P), description (D), and snail-mail address
(S).
N: Chandler Carruth
E: chandlerc@gmail.com
E: chandlerc@google.com
D: CMake, library layering
N: Eric Christopher
E: echristo@gmail.com
D: Debug Information, autotools/configure/make build, inline assembly
N: Doug Gregor
D: All parts of Clang not covered by someone else
N: Anton Korobeynikov
E: anton@korobeynikov.info
D: Exception handling, Windows codegen, ARM EABI
N: Ted Kremenek
D: Clang Static Analyzer
N: John McCall
E: rjmccall@apple.com
D: Clang LLVM IR generation
N: Chad Rosier
E: mcrosier@codeaurora.org
D: MS-inline asm, and the compiler driver
N: Richard Smith
E: richard@metafoo.co.uk
D: Clang Semantic Analysis (tools/clang/lib/Sema/* tools/clang/include/clang/Sema/*)

View File

@ -0,0 +1,2 @@
#import <Cocoa/Cocoa.h>

View File

@ -0,0 +1,86 @@
#include <algorithm>
#include <bitset>
#include <cassert>
#include <cctype>
#include <cerrno>
#include <cfloat>
#include <ciso646>
#include <climits>
#include <clocale>
#include <cmath>
#include <complex>
#include <csetjmp>
#include <csignal>
#include <cstdarg>
#include <cstddef>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <ctime>
#include <cwchar>
#include <cwctype>
#include <deque>
#include <exception>
#include <fstream>
#include <functional>
#include <iomanip>
#include <ios>
#include <iosfwd>
#include <iostream>
#include <istream>
#include <iterator>
#include <limits>
#include <list>
#include <locale>
#include <map>
#include <memory>
#include <new>
#include <numeric>
#include <ostream>
#include <queue>
#include <set>
#include <sstream>
#include <stack>
#include <stdexcept>
#include <streambuf>
#include <string>
#if __has_include(<strstream>)
#include <strstream>
#endif
#include <typeinfo>
#include <utility>
#include <valarray>
#include <vector>
#if __cplusplus >= 201103 || defined(__GXX_EXPERIMENTAL_CXX0X__)
#include <array>
#if __has_include(<atomic>)
#include <atomic>
#endif
#include <chrono>
#if __has_include(<codecvt>)
#include <codecvt>
#endif
#include <condition_variable>
#include <forward_list>
#if __has_include(<future>)
#include <future>
#endif
#include <initializer_list>
#include <mutex>
#include <random>
#include <ratio>
#include <regex>
#if __has_include(<scoped_allocator>)
#include <scoped_allocator>
#endif
#include <system_error>
#include <thread>
#include <tuple>
#include <type_traits>
#if __has_include(<typeindex>)
#include <typeindex>
#endif
#include <unordered_map>
#include <unordered_set>
#endif

View File

@ -0,0 +1,639 @@
/* Test for integer constant types. */
/* Origin: Joseph Myers <jsm28@cam.ac.uk>. */
/* { dg-do compile } */
/* { dg-options "-std=iso9899:1999 -pedantic-errors" } */
#include <limits.h>
/* Assertion that constant C is of type T. */
#define ASSERT_CONST_TYPE(C, T) \
do { \
typedef T type; \
typedef type **typepp; \
typedef __typeof__((C)) ctype; \
typedef ctype **ctypepp; \
typepp x = 0; \
ctypepp y = 0; \
x = y; \
y = x; \
} while (0)
/* (T *) if E is zero, (void *) otherwise. */
#define type_if_not(T, E) __typeof__(0 ? (T *)0 : (void *)(E))
/* (T *) if E is nonzero, (void *) otherwise. */
#define type_if(T, E) type_if_not(T, !(E))
/* Combine pointer types, all but one (void *). */
#define type_comb2(T1, T2) __typeof__(0 ? (T1)0 : (T2)0)
#define type_comb3(T1, T2, T3) type_comb2(T1, type_comb2(T2, T3))
#define type_comb4(T1, T2, T3, T4) \
type_comb2(T1, type_comb2(T2, type_comb2(T3, T4)))
#define type_comb6(T1, T2, T3, T4, T5, T6) \
type_comb2(T1, \
type_comb2(T2, \
type_comb2(T3, \
type_comb2(T4, \
type_comb2(T5, T6)))))
/* (T1 *) if E1, otherwise (T2 *) if E2. */
#define first_of2p(T1, E1, T2, E2) type_comb2(type_if(T1, (E1)), \
type_if(T2, (!(E1) && (E2))))
/* (T1 *) if E1, otherwise (T2 *) if E2, otherwise (T3 *) if E3. */
#define first_of3p(T1, E1, T2, E2, T3, E3) \
type_comb3(type_if(T1, (E1)), \
type_if(T2, (!(E1) && (E2))), \
type_if(T3, (!(E1) && !(E2) && (E3))))
/* (T1 *) if E1, otherwise (T2 *) if E2, otherwise (T3 *) if E3, otherwise
(T4 *) if E4. */
#define first_of4p(T1, E1, T2, E2, T3, E3, T4, E4) \
type_comb4(type_if(T1, (E1)), \
type_if(T2, (!(E1) && (E2))), \
type_if(T3, (!(E1) && !(E2) && (E3))), \
type_if(T4, (!(E1) && !(E2) && !(E3) && (E4))))
/* (T1 *) if E1, otherwise (T2 *) if E2, otherwise (T3 *) if E3, otherwise
(T4 *) if E4, otherwise (T5 *) if E5, otherwise (T6 *) if E6. */
#define first_of6p(T1, E1, T2, E2, T3, E3, T4, E4, T5, E5, T6, E6) \
type_comb6(type_if(T1, (E1)), \
type_if(T2, (!(E1) && (E2))), \
type_if(T3, (!(E1) && !(E2) && (E3))), \
type_if(T4, (!(E1) && !(E2) && !(E3) && (E4))), \
type_if(T5, (!(E1) && !(E2) && !(E3) && !(E4) && (E5))), \
type_if(T6, (!(E1) && !(E2) && !(E3) \
&& !(E4) && !(E5) && (E6))))
/* Likewise, but return the original type rather than a pointer type. */
#define first_of2(T1, E1, T2, E2) \
__typeof__(*((first_of2p(T1, (E1), T2, (E2)))0))
#define first_of3(T1, E1, T2, E2, T3, E3) \
__typeof__(*((first_of3p(T1, (E1), T2, (E2), T3, (E3)))0))
#define first_of4(T1, E1, T2, E2, T3, E3, T4, E4) \
__typeof__(*((first_of4p(T1, (E1), T2, (E2), T3, (E3), T4, (E4)))0))
#define first_of6(T1, E1, T2, E2, T3, E3, T4, E4, T5, E5, T6, E6) \
__typeof__(*((first_of6p(T1, (E1), T2, (E2), T3, (E3), \
T4, (E4), T5, (E5), T6, (E6)))0))
/* Types of constants according to the C99 rules. */
#define C99_UNSUF_DEC_TYPE(C) \
first_of3(int, (C) <= INT_MAX, \
long int, (C) <= LONG_MAX, \
long long int, (C) <= LLONG_MAX)
#define C99_UNSUF_OCTHEX_TYPE(C) \
first_of6(int, (C) <= INT_MAX, \
unsigned int, (C) <= UINT_MAX, \
long int, (C) <= LONG_MAX, \
unsigned long int, (C) <= ULONG_MAX, \
long long int, (C) <= LLONG_MAX, \
unsigned long long int, (C) <= ULLONG_MAX)
#define C99_SUFu_TYPE(C) \
first_of3(unsigned int, (C) <= UINT_MAX, \
unsigned long int, (C) <= ULONG_MAX, \
unsigned long long int, (C) <= ULLONG_MAX)
#define C99_SUFl_DEC_TYPE(C) \
first_of2(long int, (C) <= LONG_MAX, \
long long int, (C) <= LLONG_MAX)
#define C99_SUFl_OCTHEX_TYPE(C) \
first_of4(long int, (C) <= LONG_MAX, \
unsigned long int, (C) <= ULONG_MAX, \
long long int, (C) <= LLONG_MAX, \
unsigned long long int, (C) <= ULLONG_MAX)
#define C99_SUFul_TYPE(C) \
first_of2(unsigned long int, (C) <= ULONG_MAX, \
unsigned long long int, (C) <= ULLONG_MAX)
#define C99_SUFll_OCTHEX_TYPE(C) \
first_of2(long long int, (C) <= LLONG_MAX, \
unsigned long long int, (C) <= ULLONG_MAX)
/* Checks that constants have correct type. */
#define CHECK_UNSUF_DEC_TYPE(C) ASSERT_CONST_TYPE((C), C99_UNSUF_DEC_TYPE((C)))
#define CHECK_UNSUF_OCTHEX_TYPE(C) \
ASSERT_CONST_TYPE((C), C99_UNSUF_OCTHEX_TYPE((C)))
#define CHECK_SUFu_TYPE(C) ASSERT_CONST_TYPE((C), C99_SUFu_TYPE((C)))
#define CHECK_SUFl_DEC_TYPE(C) ASSERT_CONST_TYPE((C), C99_SUFl_DEC_TYPE((C)))
#define CHECK_SUFl_OCTHEX_TYPE(C) \
ASSERT_CONST_TYPE((C), C99_SUFl_OCTHEX_TYPE((C)))
#define CHECK_SUFul_TYPE(C) ASSERT_CONST_TYPE((C), C99_SUFul_TYPE((C)))
#define CHECK_SUFll_DEC_TYPE(C) ASSERT_CONST_TYPE((C), long long int)
#define CHECK_SUFll_OCTHEX_TYPE(C) \
ASSERT_CONST_TYPE((C), C99_SUFll_OCTHEX_TYPE((C)))
#define CHECK_SUFull_TYPE(C) ASSERT_CONST_TYPE((C), unsigned long long int)
/* Check a decimal value, with all suffixes. */
#define CHECK_DEC_CONST(C) \
CHECK_UNSUF_DEC_TYPE(C); \
CHECK_SUFu_TYPE(C##u); \
CHECK_SUFu_TYPE(C##U); \
CHECK_SUFl_DEC_TYPE(C##l); \
CHECK_SUFl_DEC_TYPE(C##L); \
CHECK_SUFul_TYPE(C##ul); \
CHECK_SUFul_TYPE(C##uL); \
CHECK_SUFul_TYPE(C##Ul); \
CHECK_SUFul_TYPE(C##UL); \
CHECK_SUFll_DEC_TYPE(C##ll); \
CHECK_SUFll_DEC_TYPE(C##LL); \
CHECK_SUFull_TYPE(C##ull); \
CHECK_SUFull_TYPE(C##uLL); \
CHECK_SUFull_TYPE(C##Ull); \
CHECK_SUFull_TYPE(C##ULL);
/* Check an octal or hexadecimal value, with all suffixes. */
#define CHECK_OCTHEX_CONST(C) \
CHECK_UNSUF_OCTHEX_TYPE(C); \
CHECK_SUFu_TYPE(C##u); \
CHECK_SUFu_TYPE(C##U); \
CHECK_SUFl_OCTHEX_TYPE(C##l); \
CHECK_SUFl_OCTHEX_TYPE(C##L); \
CHECK_SUFul_TYPE(C##ul); \
CHECK_SUFul_TYPE(C##uL); \
CHECK_SUFul_TYPE(C##Ul); \
CHECK_SUFul_TYPE(C##UL); \
CHECK_SUFll_OCTHEX_TYPE(C##ll); \
CHECK_SUFll_OCTHEX_TYPE(C##LL); \
CHECK_SUFull_TYPE(C##ull); \
CHECK_SUFull_TYPE(C##uLL); \
CHECK_SUFull_TYPE(C##Ull); \
CHECK_SUFull_TYPE(C##ULL);
#define CHECK_OCT_CONST(C) CHECK_OCTHEX_CONST(C)
#define CHECK_HEX_CONST(C) \
CHECK_OCTHEX_CONST(0x##C); \
CHECK_OCTHEX_CONST(0X##C);
/* True iff "long long" is at least B bits. This presumes that (B-2)/3 is at
most 63. */
#define LLONG_AT_LEAST(B) \
(LLONG_MAX >> ((B)-2)/3 >> ((B)-2)/3 \
>> ((B)-2 - ((B)-2)/3 - ((B)-2)/3))
#define LLONG_HAS_BITS(B) (LLONG_AT_LEAST((B)) && !LLONG_AT_LEAST((B) + 1))
void
foo (void)
{
/* Decimal. */
/* Check all 2^n and 2^n - 1 up to 2^71 - 1. */
CHECK_DEC_CONST(1);
CHECK_DEC_CONST(2);
CHECK_DEC_CONST(3);
CHECK_DEC_CONST(4);
CHECK_DEC_CONST(7);
CHECK_DEC_CONST(8);
CHECK_DEC_CONST(15);
CHECK_DEC_CONST(16);
CHECK_DEC_CONST(31);
CHECK_DEC_CONST(32);
CHECK_DEC_CONST(63);
CHECK_DEC_CONST(64);
CHECK_DEC_CONST(127);
CHECK_DEC_CONST(128);
CHECK_DEC_CONST(255);
CHECK_DEC_CONST(256);
CHECK_DEC_CONST(511);
CHECK_DEC_CONST(512);
CHECK_DEC_CONST(1023);
CHECK_DEC_CONST(1024);
CHECK_DEC_CONST(2047);
CHECK_DEC_CONST(2048);
CHECK_DEC_CONST(4095);
CHECK_DEC_CONST(4096);
CHECK_DEC_CONST(8191);
CHECK_DEC_CONST(8192);
CHECK_DEC_CONST(16383);
CHECK_DEC_CONST(16384);
CHECK_DEC_CONST(32767);
CHECK_DEC_CONST(32768);
CHECK_DEC_CONST(65535);
CHECK_DEC_CONST(65536);
CHECK_DEC_CONST(131071);
CHECK_DEC_CONST(131072);
CHECK_DEC_CONST(262143);
CHECK_DEC_CONST(262144);
CHECK_DEC_CONST(524287);
CHECK_DEC_CONST(524288);
CHECK_DEC_CONST(1048575);
CHECK_DEC_CONST(1048576);
CHECK_DEC_CONST(2097151);
CHECK_DEC_CONST(2097152);
CHECK_DEC_CONST(4194303);
CHECK_DEC_CONST(4194304);
CHECK_DEC_CONST(8388607);
CHECK_DEC_CONST(8388608);
CHECK_DEC_CONST(16777215);
CHECK_DEC_CONST(16777216);
CHECK_DEC_CONST(33554431);
CHECK_DEC_CONST(33554432);
CHECK_DEC_CONST(67108863);
CHECK_DEC_CONST(67108864);
CHECK_DEC_CONST(134217727);
CHECK_DEC_CONST(134217728);
CHECK_DEC_CONST(268435455);
CHECK_DEC_CONST(268435456);
CHECK_DEC_CONST(536870911);
CHECK_DEC_CONST(536870912);
CHECK_DEC_CONST(1073741823);
CHECK_DEC_CONST(1073741824);
CHECK_DEC_CONST(2147483647);
CHECK_DEC_CONST(2147483648);
CHECK_DEC_CONST(4294967295);
CHECK_DEC_CONST(4294967296);
CHECK_DEC_CONST(8589934591);
CHECK_DEC_CONST(8589934592);
CHECK_DEC_CONST(17179869183);
CHECK_DEC_CONST(17179869184);
CHECK_DEC_CONST(34359738367);
CHECK_DEC_CONST(34359738368);
CHECK_DEC_CONST(68719476735);
CHECK_DEC_CONST(68719476736);
CHECK_DEC_CONST(137438953471);
CHECK_DEC_CONST(137438953472);
CHECK_DEC_CONST(274877906943);
CHECK_DEC_CONST(274877906944);
CHECK_DEC_CONST(549755813887);
CHECK_DEC_CONST(549755813888);
CHECK_DEC_CONST(1099511627775);
CHECK_DEC_CONST(1099511627776);
CHECK_DEC_CONST(2199023255551);
CHECK_DEC_CONST(2199023255552);
CHECK_DEC_CONST(4398046511103);
CHECK_DEC_CONST(4398046511104);
CHECK_DEC_CONST(8796093022207);
CHECK_DEC_CONST(8796093022208);
CHECK_DEC_CONST(17592186044415);
CHECK_DEC_CONST(17592186044416);
CHECK_DEC_CONST(35184372088831);
CHECK_DEC_CONST(35184372088832);
CHECK_DEC_CONST(70368744177663);
CHECK_DEC_CONST(70368744177664);
CHECK_DEC_CONST(140737488355327);
CHECK_DEC_CONST(140737488355328);
CHECK_DEC_CONST(281474976710655);
CHECK_DEC_CONST(281474976710656);
CHECK_DEC_CONST(562949953421311);
CHECK_DEC_CONST(562949953421312);
CHECK_DEC_CONST(1125899906842623);
CHECK_DEC_CONST(1125899906842624);
CHECK_DEC_CONST(2251799813685247);
CHECK_DEC_CONST(2251799813685248);
CHECK_DEC_CONST(4503599627370495);
CHECK_DEC_CONST(4503599627370496);
CHECK_DEC_CONST(9007199254740991);
CHECK_DEC_CONST(9007199254740992);
CHECK_DEC_CONST(18014398509481983);
CHECK_DEC_CONST(18014398509481984);
CHECK_DEC_CONST(36028797018963967);
CHECK_DEC_CONST(36028797018963968);
CHECK_DEC_CONST(72057594037927935);
CHECK_DEC_CONST(72057594037927936);
CHECK_DEC_CONST(144115188075855871);
CHECK_DEC_CONST(144115188075855872);
CHECK_DEC_CONST(288230376151711743);
CHECK_DEC_CONST(288230376151711744);
CHECK_DEC_CONST(576460752303423487);
CHECK_DEC_CONST(576460752303423488);
CHECK_DEC_CONST(1152921504606846975);
CHECK_DEC_CONST(1152921504606846976);
CHECK_DEC_CONST(2305843009213693951);
CHECK_DEC_CONST(2305843009213693952);
CHECK_DEC_CONST(4611686018427387903);
CHECK_DEC_CONST(4611686018427387904);
CHECK_DEC_CONST(9223372036854775807);
#if LLONG_AT_LEAST(65)
CHECK_DEC_CONST(9223372036854775808);
CHECK_DEC_CONST(18446744073709551615);
#endif
#if LLONG_AT_LEAST(66)
CHECK_DEC_CONST(18446744073709551616);
CHECK_DEC_CONST(36893488147419103231);
#endif
#if LLONG_AT_LEAST(67)
CHECK_DEC_CONST(36893488147419103232);
CHECK_DEC_CONST(73786976294838206463);
#endif
#if LLONG_AT_LEAST(68)
CHECK_DEC_CONST(73786976294838206464);
CHECK_DEC_CONST(147573952589676412927);
#endif
#if LLONG_AT_LEAST(69)
CHECK_DEC_CONST(147573952589676412928);
CHECK_DEC_CONST(295147905179352825855);
#endif
#if LLONG_AT_LEAST(70)
CHECK_DEC_CONST(295147905179352825856);
CHECK_DEC_CONST(590295810358705651711);
#endif
#if LLONG_AT_LEAST(71)
CHECK_DEC_CONST(590295810358705651712);
CHECK_DEC_CONST(1180591620717411303423);
#endif
#if LLONG_AT_LEAST(72)
CHECK_DEC_CONST(1180591620717411303424);
CHECK_DEC_CONST(2361183241434822606847);
#endif
/* Octal and hexadecimal. */
/* Check all 2^n and 2^n - 1 up to 2^72 - 1. */
CHECK_OCT_CONST(0);
CHECK_HEX_CONST(0);
CHECK_OCT_CONST(01);
CHECK_HEX_CONST(1);
CHECK_OCT_CONST(02);
CHECK_HEX_CONST(2);
CHECK_OCT_CONST(03);
CHECK_HEX_CONST(3);
CHECK_OCT_CONST(04);
CHECK_HEX_CONST(4);
CHECK_OCT_CONST(07);
CHECK_HEX_CONST(7);
CHECK_OCT_CONST(010);
CHECK_HEX_CONST(8);
CHECK_OCT_CONST(017);
CHECK_HEX_CONST(f);
CHECK_OCT_CONST(020);
CHECK_HEX_CONST(10);
CHECK_OCT_CONST(037);
CHECK_HEX_CONST(1f);
CHECK_OCT_CONST(040);
CHECK_HEX_CONST(20);
CHECK_OCT_CONST(077);
CHECK_HEX_CONST(3f);
CHECK_OCT_CONST(0100);
CHECK_HEX_CONST(40);
CHECK_OCT_CONST(0177);
CHECK_HEX_CONST(7f);
CHECK_OCT_CONST(0200);
CHECK_HEX_CONST(80);
CHECK_OCT_CONST(0377);
CHECK_HEX_CONST(ff);
CHECK_OCT_CONST(0400);
CHECK_HEX_CONST(100);
CHECK_OCT_CONST(0777);
CHECK_HEX_CONST(1ff);
CHECK_OCT_CONST(01000);
CHECK_HEX_CONST(200);
CHECK_OCT_CONST(01777);
CHECK_HEX_CONST(3ff);
CHECK_OCT_CONST(02000);
CHECK_HEX_CONST(400);
CHECK_OCT_CONST(03777);
CHECK_HEX_CONST(7ff);
CHECK_OCT_CONST(04000);
CHECK_HEX_CONST(800);
CHECK_OCT_CONST(07777);
CHECK_HEX_CONST(fff);
CHECK_OCT_CONST(010000);
CHECK_HEX_CONST(1000);
CHECK_OCT_CONST(017777);
CHECK_HEX_CONST(1fff);
CHECK_OCT_CONST(020000);
CHECK_HEX_CONST(2000);
CHECK_OCT_CONST(037777);
CHECK_HEX_CONST(3fff);
CHECK_OCT_CONST(040000);
CHECK_HEX_CONST(4000);
CHECK_OCT_CONST(077777);
CHECK_HEX_CONST(7fff);
CHECK_OCT_CONST(0100000);
CHECK_HEX_CONST(8000);
CHECK_OCT_CONST(0177777);
CHECK_HEX_CONST(ffff);
CHECK_OCT_CONST(0200000);
CHECK_HEX_CONST(10000);
CHECK_OCT_CONST(0377777);
CHECK_HEX_CONST(1ffff);
CHECK_OCT_CONST(0400000);
CHECK_HEX_CONST(20000);
CHECK_OCT_CONST(0777777);
CHECK_HEX_CONST(3ffff);
CHECK_OCT_CONST(01000000);
CHECK_HEX_CONST(40000);
CHECK_OCT_CONST(01777777);
CHECK_HEX_CONST(7ffff);
CHECK_OCT_CONST(02000000);
CHECK_HEX_CONST(80000);
CHECK_OCT_CONST(03777777);
CHECK_HEX_CONST(fffff);
CHECK_OCT_CONST(04000000);
CHECK_HEX_CONST(100000);
CHECK_OCT_CONST(07777777);
CHECK_HEX_CONST(1fffff);
CHECK_OCT_CONST(010000000);
CHECK_HEX_CONST(200000);
CHECK_OCT_CONST(017777777);
CHECK_HEX_CONST(3fffff);
CHECK_OCT_CONST(020000000);
CHECK_HEX_CONST(400000);
CHECK_OCT_CONST(037777777);
CHECK_HEX_CONST(7fffff);
CHECK_OCT_CONST(040000000);
CHECK_HEX_CONST(800000);
CHECK_OCT_CONST(077777777);
CHECK_HEX_CONST(ffffff);
CHECK_OCT_CONST(0100000000);
CHECK_HEX_CONST(1000000);
CHECK_OCT_CONST(0177777777);
CHECK_HEX_CONST(1ffffff);
CHECK_OCT_CONST(0200000000);
CHECK_HEX_CONST(2000000);
CHECK_OCT_CONST(0377777777);
CHECK_HEX_CONST(3ffffff);
CHECK_OCT_CONST(0400000000);
CHECK_HEX_CONST(4000000);
CHECK_OCT_CONST(0777777777);
CHECK_HEX_CONST(7ffffff);
CHECK_OCT_CONST(01000000000);
CHECK_HEX_CONST(8000000);
CHECK_OCT_CONST(01777777777);
CHECK_HEX_CONST(fffffff);
CHECK_OCT_CONST(02000000000);
CHECK_HEX_CONST(10000000);
CHECK_OCT_CONST(03777777777);
CHECK_HEX_CONST(1fffffff);
CHECK_OCT_CONST(04000000000);
CHECK_HEX_CONST(20000000);
CHECK_OCT_CONST(07777777777);
CHECK_HEX_CONST(3fffffff);
CHECK_OCT_CONST(010000000000);
CHECK_HEX_CONST(40000000);
CHECK_OCT_CONST(017777777777);
CHECK_HEX_CONST(7fffffff);
CHECK_OCT_CONST(020000000000);
CHECK_HEX_CONST(80000000);
CHECK_OCT_CONST(037777777777);
CHECK_HEX_CONST(ffffffff);
CHECK_OCT_CONST(040000000000);
CHECK_HEX_CONST(100000000);
CHECK_OCT_CONST(077777777777);
CHECK_HEX_CONST(1ffffffff);
CHECK_OCT_CONST(0100000000000);
CHECK_HEX_CONST(200000000);
CHECK_OCT_CONST(0177777777777);
CHECK_HEX_CONST(3ffffffff);
CHECK_OCT_CONST(0200000000000);
CHECK_HEX_CONST(400000000);
CHECK_OCT_CONST(0377777777777);
CHECK_HEX_CONST(7ffffffff);
CHECK_OCT_CONST(0400000000000);
CHECK_HEX_CONST(800000000);
CHECK_OCT_CONST(0777777777777);
CHECK_HEX_CONST(fffffffff);
CHECK_OCT_CONST(01000000000000);
CHECK_HEX_CONST(1000000000);
CHECK_OCT_CONST(01777777777777);
CHECK_HEX_CONST(1fffffffff);
CHECK_OCT_CONST(02000000000000);
CHECK_HEX_CONST(2000000000);
CHECK_OCT_CONST(03777777777777);
CHECK_HEX_CONST(3fffffffff);
CHECK_OCT_CONST(04000000000000);
CHECK_HEX_CONST(4000000000);
CHECK_OCT_CONST(07777777777777);
CHECK_HEX_CONST(7fffffffff);
CHECK_OCT_CONST(010000000000000);
CHECK_HEX_CONST(8000000000);
CHECK_OCT_CONST(017777777777777);
CHECK_HEX_CONST(ffffffffff);
CHECK_OCT_CONST(020000000000000);
CHECK_HEX_CONST(10000000000);
CHECK_OCT_CONST(037777777777777);
CHECK_HEX_CONST(1ffffffffff);
CHECK_OCT_CONST(040000000000000);
CHECK_HEX_CONST(20000000000);
CHECK_OCT_CONST(077777777777777);
CHECK_HEX_CONST(3ffffffffff);
CHECK_OCT_CONST(0100000000000000);
CHECK_HEX_CONST(40000000000);
CHECK_OCT_CONST(0177777777777777);
CHECK_HEX_CONST(7ffffffffff);
CHECK_OCT_CONST(0200000000000000);
CHECK_HEX_CONST(80000000000);
CHECK_OCT_CONST(0377777777777777);
CHECK_HEX_CONST(fffffffffff);
CHECK_OCT_CONST(0400000000000000);
CHECK_HEX_CONST(100000000000);
CHECK_OCT_CONST(0777777777777777);
CHECK_HEX_CONST(1fffffffffff);
CHECK_OCT_CONST(01000000000000000);
CHECK_HEX_CONST(200000000000);
CHECK_OCT_CONST(01777777777777777);
CHECK_HEX_CONST(3fffffffffff);
CHECK_OCT_CONST(02000000000000000);
CHECK_HEX_CONST(400000000000);
CHECK_OCT_CONST(03777777777777777);
CHECK_HEX_CONST(7fffffffffff);
CHECK_OCT_CONST(04000000000000000);
CHECK_HEX_CONST(800000000000);
CHECK_OCT_CONST(07777777777777777);
CHECK_HEX_CONST(ffffffffffff);
CHECK_OCT_CONST(010000000000000000);
CHECK_HEX_CONST(1000000000000);
CHECK_OCT_CONST(017777777777777777);
CHECK_HEX_CONST(1ffffffffffff);
CHECK_OCT_CONST(020000000000000000);
CHECK_HEX_CONST(2000000000000);
CHECK_OCT_CONST(037777777777777777);
CHECK_HEX_CONST(3ffffffffffff);
CHECK_OCT_CONST(040000000000000000);
CHECK_HEX_CONST(4000000000000);
CHECK_OCT_CONST(077777777777777777);
CHECK_HEX_CONST(7ffffffffffff);
CHECK_OCT_CONST(0100000000000000000);
CHECK_HEX_CONST(8000000000000);
CHECK_OCT_CONST(0177777777777777777);
CHECK_HEX_CONST(fffffffffffff);
CHECK_OCT_CONST(0200000000000000000);
CHECK_HEX_CONST(10000000000000);
CHECK_OCT_CONST(0377777777777777777);
CHECK_HEX_CONST(1fffffffffffff);
CHECK_OCT_CONST(0400000000000000000);
CHECK_HEX_CONST(20000000000000);
CHECK_OCT_CONST(0777777777777777777);
CHECK_HEX_CONST(3fffffffffffff);
CHECK_OCT_CONST(01000000000000000000);
CHECK_HEX_CONST(40000000000000);
CHECK_OCT_CONST(01777777777777777777);
CHECK_HEX_CONST(7fffffffffffff);
CHECK_OCT_CONST(02000000000000000000);
CHECK_HEX_CONST(80000000000000);
CHECK_OCT_CONST(03777777777777777777);
CHECK_HEX_CONST(ffffffffffffff);
CHECK_OCT_CONST(04000000000000000000);
CHECK_HEX_CONST(100000000000000);
CHECK_OCT_CONST(07777777777777777777);
CHECK_HEX_CONST(1ffffffffffffff);
CHECK_OCT_CONST(010000000000000000000);
CHECK_HEX_CONST(200000000000000);
CHECK_OCT_CONST(017777777777777777777);
CHECK_HEX_CONST(3ffffffffffffff);
CHECK_OCT_CONST(020000000000000000000);
CHECK_HEX_CONST(400000000000000);
CHECK_OCT_CONST(037777777777777777777);
CHECK_HEX_CONST(7ffffffffffffff);
CHECK_OCT_CONST(040000000000000000000);
CHECK_HEX_CONST(800000000000000);
CHECK_OCT_CONST(077777777777777777777);
CHECK_HEX_CONST(fffffffffffffff);
CHECK_OCT_CONST(0100000000000000000000);
CHECK_HEX_CONST(1000000000000000);
CHECK_OCT_CONST(0177777777777777777777);
CHECK_HEX_CONST(1fffffffffffffff);
CHECK_OCT_CONST(0200000000000000000000);
CHECK_HEX_CONST(2000000000000000);
CHECK_OCT_CONST(0377777777777777777777);
CHECK_HEX_CONST(3fffffffffffffff);
CHECK_OCT_CONST(0400000000000000000000);
CHECK_HEX_CONST(4000000000000000);
CHECK_OCT_CONST(0777777777777777777777);
CHECK_HEX_CONST(7fffffffffffffff);
CHECK_OCT_CONST(01000000000000000000000);
CHECK_HEX_CONST(8000000000000000);
CHECK_OCT_CONST(01777777777777777777777);
CHECK_HEX_CONST(ffffffffffffffff);
#if LLONG_AT_LEAST(65)
CHECK_OCT_CONST(02000000000000000000000);
CHECK_HEX_CONST(10000000000000000);
CHECK_OCT_CONST(03777777777777777777777);
CHECK_HEX_CONST(1ffffffffffffffff);
#endif
#if LLONG_AT_LEAST(66)
CHECK_OCT_CONST(04000000000000000000000);
CHECK_HEX_CONST(20000000000000000);
CHECK_OCT_CONST(07777777777777777777777);
CHECK_HEX_CONST(3ffffffffffffffff);
#endif
#if LLONG_AT_LEAST(67)
CHECK_OCT_CONST(010000000000000000000000);
CHECK_HEX_CONST(40000000000000000);
CHECK_OCT_CONST(017777777777777777777777);
CHECK_HEX_CONST(7ffffffffffffffff);
#endif
#if LLONG_AT_LEAST(68)
CHECK_OCT_CONST(020000000000000000000000);
CHECK_HEX_CONST(80000000000000000);
CHECK_OCT_CONST(037777777777777777777777);
CHECK_HEX_CONST(fffffffffffffffff);
#endif
#if LLONG_AT_LEAST(69)
CHECK_OCT_CONST(040000000000000000000000);
CHECK_HEX_CONST(100000000000000000);
CHECK_OCT_CONST(077777777777777777777777);
CHECK_HEX_CONST(1fffffffffffffffff);
#endif
#if LLONG_AT_LEAST(70)
CHECK_OCT_CONST(0100000000000000000000000);
CHECK_HEX_CONST(200000000000000000);
CHECK_OCT_CONST(0177777777777777777777777);
CHECK_HEX_CONST(3fffffffffffffffff);
#endif
#if LLONG_AT_LEAST(71)
CHECK_OCT_CONST(0200000000000000000000000);
CHECK_HEX_CONST(400000000000000000);
CHECK_OCT_CONST(0377777777777777777777777);
CHECK_HEX_CONST(7fffffffffffffffff);
#endif
#if LLONG_AT_LEAST(72)
CHECK_OCT_CONST(0400000000000000000000000);
CHECK_HEX_CONST(800000000000000000);
CHECK_OCT_CONST(0777777777777777777777777);
CHECK_HEX_CONST(ffffffffffffffffff);
#endif
}

View File

@ -0,0 +1,4 @@
#include <Carbon/Carbon.h>
//#import<vecLib/vecLib.h>

View File

@ -0,0 +1,27 @@
#define EXPAND_2_CASES(i, x, y) CASE(i, x, y); CASE(i + 1, x, y);
#define EXPAND_4_CASES(i, x, y) EXPAND_2_CASES(i, x, y) EXPAND_2_CASES(i + 2, x, y)
#define EXPAND_8_CASES(i, x, y) EXPAND_4_CASES(i, x, y) EXPAND_4_CASES(i + 4, x, y)
#define EXPAND_16_CASES(i, x, y) EXPAND_8_CASES(i, x, y) EXPAND_8_CASES(i + 8, x, y)
#define EXPAND_32_CASES(i, x, y) EXPAND_16_CASES(i, x, y) EXPAND_16_CASES(i + 16, x, y)
#define EXPAND_64_CASES(i, x, y) EXPAND_32_CASES(i, x, y) EXPAND_32_CASES(i + 32, x, y)
#define EXPAND_128_CASES(i, x, y) EXPAND_64_CASES(i, x, y) EXPAND_64_CASES(i + 64, x, y)
#define EXPAND_256_CASES(i, x, y) EXPAND_128_CASES(i, x, y) EXPAND_128_CASES(i + 128, x, y)
#define EXPAND_512_CASES(i, x, y) EXPAND_256_CASES(i, x, y) EXPAND_256_CASES(i + 256, x, y)
#define EXPAND_1024_CASES(i, x, y) EXPAND_512_CASES(i, x, y) EXPAND_512_CASES(i + 512, x, y)
#define EXPAND_2048_CASES(i, x, y) EXPAND_1024_CASES(i, x, y) EXPAND_1024_CASES(i + 1024, x, y)
#define EXPAND_4096_CASES(i, x, y) EXPAND_2048_CASES(i, x, y) EXPAND_2048_CASES(i + 2048, x, y)
// This has a *monstrous* single fan-out in the CFG, across 8000 blocks inside
// the while loop.
unsigned cfg_big_switch(int x) {
unsigned y = 0;
while (x > 0) {
switch(x) {
#define CASE(i, x, y) \
case i: { int case_var = 3*x + i; y += case_var - 1; break; }
EXPAND_4096_CASES(0, x, y);
}
--x;
}
return y;
}

View File

@ -0,0 +1,20 @@
#define EXPAND_2_BRANCHES(i, x, y) BRANCH(i, x, y); BRANCH(i + 1, x, y);
#define EXPAND_4_BRANCHES(i, x, y) EXPAND_2_BRANCHES(i, x, y) EXPAND_2_BRANCHES(i + 2, x, y)
#define EXPAND_8_BRANCHES(i, x, y) EXPAND_4_BRANCHES(i, x, y) EXPAND_4_BRANCHES(i + 4, x, y)
#define EXPAND_16_BRANCHES(i, x, y) EXPAND_8_BRANCHES(i, x, y) EXPAND_8_BRANCHES(i + 8, x, y)
#define EXPAND_32_BRANCHES(i, x, y) EXPAND_16_BRANCHES(i, x, y) EXPAND_16_BRANCHES(i + 16, x, y)
#define EXPAND_64_BRANCHES(i, x, y) EXPAND_32_BRANCHES(i, x, y) EXPAND_32_BRANCHES(i + 32, x, y)
#define EXPAND_128_BRANCHES(i, x, y) EXPAND_64_BRANCHES(i, x, y) EXPAND_64_BRANCHES(i + 64, x, y)
#define EXPAND_256_BRANCHES(i, x, y) EXPAND_128_BRANCHES(i, x, y) EXPAND_128_BRANCHES(i + 128, x, y)
#define EXPAND_512_BRANCHES(i, x, y) EXPAND_256_BRANCHES(i, x, y) EXPAND_256_BRANCHES(i + 256, x, y)
#define EXPAND_1024_BRANCHES(i, x, y) EXPAND_512_BRANCHES(i, x, y) EXPAND_512_BRANCHES(i + 512, x, y)
#define EXPAND_2048_BRANCHES(i, x, y) EXPAND_1024_BRANCHES(i, x, y) EXPAND_1024_BRANCHES(i + 1024, x, y)
#define EXPAND_4096_BRANCHES(i, x, y) EXPAND_2048_BRANCHES(i, x, y) EXPAND_2048_BRANCHES(i + 2048, x, y)
unsigned cfg_long_chain_single_exit(unsigned x) {
unsigned y = 0;
#define BRANCH(i, x, y) if ((x % 13171) < i) { int var = x / 13171; y ^= var; }
EXPAND_4096_BRANCHES(1, x, y);
#undef BRANCH
return y;
}

View File

@ -0,0 +1,20 @@
#define EXPAND_2_BRANCHES(i, x, y) BRANCH(i, x, y); BRANCH(i + 1, x, y);
#define EXPAND_4_BRANCHES(i, x, y) EXPAND_2_BRANCHES(i, x, y) EXPAND_2_BRANCHES(i + 2, x, y)
#define EXPAND_8_BRANCHES(i, x, y) EXPAND_4_BRANCHES(i, x, y) EXPAND_4_BRANCHES(i + 4, x, y)
#define EXPAND_16_BRANCHES(i, x, y) EXPAND_8_BRANCHES(i, x, y) EXPAND_8_BRANCHES(i + 8, x, y)
#define EXPAND_32_BRANCHES(i, x, y) EXPAND_16_BRANCHES(i, x, y) EXPAND_16_BRANCHES(i + 16, x, y)
#define EXPAND_64_BRANCHES(i, x, y) EXPAND_32_BRANCHES(i, x, y) EXPAND_32_BRANCHES(i + 32, x, y)
#define EXPAND_128_BRANCHES(i, x, y) EXPAND_64_BRANCHES(i, x, y) EXPAND_64_BRANCHES(i + 64, x, y)
#define EXPAND_256_BRANCHES(i, x, y) EXPAND_128_BRANCHES(i, x, y) EXPAND_128_BRANCHES(i + 128, x, y)
#define EXPAND_512_BRANCHES(i, x, y) EXPAND_256_BRANCHES(i, x, y) EXPAND_256_BRANCHES(i + 256, x, y)
#define EXPAND_1024_BRANCHES(i, x, y) EXPAND_512_BRANCHES(i, x, y) EXPAND_512_BRANCHES(i + 512, x, y)
#define EXPAND_2048_BRANCHES(i, x, y) EXPAND_1024_BRANCHES(i, x, y) EXPAND_1024_BRANCHES(i + 1024, x, y)
#define EXPAND_4096_BRANCHES(i, x, y) EXPAND_2048_BRANCHES(i, x, y) EXPAND_2048_BRANCHES(i + 2048, x, y)
unsigned cfg_long_chain_multiple_exit(unsigned x) {
unsigned y = 0;
#define BRANCH(i, x, y) if (((x % 13171) + ++y) < i) { int var = x / 13171 + y; return var; }
EXPAND_4096_BRANCHES(1, x, y);
#undef BRANCH
return 42;
}

View File

@ -0,0 +1,21 @@
#define EXPAND_2_BRANCHES(i, x, y) BRANCH(i, x, y); BRANCH(i + 1, x, y);
#define EXPAND_4_BRANCHES(i, x, y) EXPAND_2_BRANCHES(i, x, y) EXPAND_2_BRANCHES(i + 2, x, y)
#define EXPAND_8_BRANCHES(i, x, y) EXPAND_4_BRANCHES(i, x, y) EXPAND_4_BRANCHES(i + 4, x, y)
#define EXPAND_16_BRANCHES(i, x, y) EXPAND_8_BRANCHES(i, x, y) EXPAND_8_BRANCHES(i + 8, x, y)
#define EXPAND_32_BRANCHES(i, x, y) EXPAND_16_BRANCHES(i, x, y) EXPAND_16_BRANCHES(i + 16, x, y)
#define EXPAND_64_BRANCHES(i, x, y) EXPAND_32_BRANCHES(i, x, y) EXPAND_32_BRANCHES(i + 32, x, y)
#define EXPAND_128_BRANCHES(i, x, y) EXPAND_64_BRANCHES(i, x, y) EXPAND_64_BRANCHES(i + 64, x, y)
#define EXPAND_256_BRANCHES(i, x, y) EXPAND_128_BRANCHES(i, x, y) EXPAND_128_BRANCHES(i + 128, x, y)
#define EXPAND_512_BRANCHES(i, x, y) EXPAND_256_BRANCHES(i, x, y) EXPAND_256_BRANCHES(i + 256, x, y)
#define EXPAND_1024_BRANCHES(i, x, y) EXPAND_512_BRANCHES(i, x, y) EXPAND_512_BRANCHES(i + 512, x, y)
#define EXPAND_2048_BRANCHES(i, x, y) EXPAND_1024_BRANCHES(i, x, y) EXPAND_1024_BRANCHES(i + 1024, x, y)
#define EXPAND_4096_BRANCHES(i, x, y) EXPAND_2048_BRANCHES(i, x, y) EXPAND_2048_BRANCHES(i + 2048, x, y)
unsigned cfg_long_chain_many_preds(unsigned x) {
unsigned y = 0;
#define BRANCH(i, x, y) if ((x % 13171) < i) { int var = x / 13171; y ^= var; } else
EXPAND_4096_BRANCHES(1, x, y);
#undef BRANCH
int var = x / 13171; y^= var;
return y;
}

View File

@ -0,0 +1,36 @@
#define EXPAND_2_INNER_CASES(i, x, y) INNER_CASE(i, x, y); INNER_CASE(i + 1, x, y);
#define EXPAND_4_INNER_CASES(i, x, y) EXPAND_2_INNER_CASES(i, x, y) EXPAND_2_INNER_CASES(i + 2, x, y)
#define EXPAND_8_INNER_CASES(i, x, y) EXPAND_4_INNER_CASES(i, x, y) EXPAND_4_INNER_CASES(i + 4, x, y)
#define EXPAND_16_INNER_CASES(i, x, y) EXPAND_8_INNER_CASES(i, x, y) EXPAND_8_INNER_CASES(i + 8, x, y)
#define EXPAND_32_INNER_CASES(i, x, y) EXPAND_16_INNER_CASES(i, x, y) EXPAND_16_INNER_CASES(i + 16, x, y)
#define EXPAND_64_INNER_CASES(i, x, y) EXPAND_32_INNER_CASES(i, x, y) EXPAND_32_INNER_CASES(i + 32, x, y)
#define EXPAND_2_OUTER_CASES(i, x, y) OUTER_CASE(i, x, y); OUTER_CASE(i + 1, x, y);
#define EXPAND_4_OUTER_CASES(i, x, y) EXPAND_2_OUTER_CASES(i, x, y) EXPAND_2_OUTER_CASES(i + 2, x, y)
#define EXPAND_8_OUTER_CASES(i, x, y) EXPAND_4_OUTER_CASES(i, x, y) EXPAND_4_OUTER_CASES(i + 4, x, y)
#define EXPAND_16_OUTER_CASES(i, x, y) EXPAND_8_OUTER_CASES(i, x, y) EXPAND_8_OUTER_CASES(i + 8, x, y)
#define EXPAND_32_OUTER_CASES(i, x, y) EXPAND_16_OUTER_CASES(i, x, y) EXPAND_16_OUTER_CASES(i + 16, x, y)
#define EXPAND_64_OUTER_CASES(i, x, y) EXPAND_32_OUTER_CASES(i, x, y) EXPAND_32_OUTER_CASES(i + 32, x, y)
// Rather than a single monstrous fan-out, this fans out in smaller increments,
// but to a similar size.
unsigned cfg_nested_switch(int x) {
unsigned y = 0;
while (x > 0) {
switch (x) {
#define INNER_CASE(i, x, y) \
case i: { int case_var = 3*x + i; y += case_var - 1; break; }
#define OUTER_CASE(i, x, y) \
case i: { \
int case_var = y >> 8; \
switch (case_var) { \
EXPAND_64_INNER_CASES(0, x, y); \
} \
break; \
}
EXPAND_64_OUTER_CASES(0, x, y);
}
--x;
}
return y;
}

View File

@ -0,0 +1,59 @@
// Hammer the CFG with large numbers of overlapping variable scopes, which
// implicit destructors triggered at each edge.
#define EXPAND_BASIC_STRUCT(i) struct X##i { X##i(int); ~X##i(); };
#define EXPAND_NORET_STRUCT(i) struct X##i { X##i(int); ~X##i() __attribute__((noreturn)); };
EXPAND_BASIC_STRUCT(0000); EXPAND_NORET_STRUCT(0001);
EXPAND_BASIC_STRUCT(0010); EXPAND_BASIC_STRUCT(0011);
EXPAND_BASIC_STRUCT(0100); EXPAND_NORET_STRUCT(0101);
EXPAND_NORET_STRUCT(0110); EXPAND_BASIC_STRUCT(0111);
EXPAND_BASIC_STRUCT(1000); EXPAND_NORET_STRUCT(1001);
EXPAND_BASIC_STRUCT(1010); EXPAND_BASIC_STRUCT(1011);
EXPAND_NORET_STRUCT(1100); EXPAND_NORET_STRUCT(1101);
EXPAND_BASIC_STRUCT(1110); EXPAND_BASIC_STRUCT(1111);
#define EXPAND_2_VARS(c, i, x) const X##i var_##c##_##i##0(x), &var_##c##_##i##1 = X##i(x)
#define EXPAND_4_VARS(c, i, x) EXPAND_2_VARS(c, i##0, x); EXPAND_2_VARS(c, i##1, x)
#define EXPAND_8_VARS(c, i, x) EXPAND_4_VARS(c, i##0, x); EXPAND_4_VARS(c, i##1, x)
#define EXPAND_16_VARS(c, i, x) EXPAND_8_VARS(c, i##0, x); EXPAND_8_VARS(c, i##1, x)
#define EXPAND_32_VARS(c, x) EXPAND_16_VARS(c, 0, x); EXPAND_16_VARS(c, 1, x)
#define EXPAND_2_INNER_CASES(i, x, y) INNER_CASE(i, x, y); INNER_CASE(i + 1, x, y);
#define EXPAND_4_INNER_CASES(i, x, y) EXPAND_2_INNER_CASES(i, x, y) EXPAND_2_INNER_CASES(i + 2, x, y)
#define EXPAND_8_INNER_CASES(i, x, y) EXPAND_4_INNER_CASES(i, x, y) EXPAND_4_INNER_CASES(i + 4, x, y)
#define EXPAND_16_INNER_CASES(i, x, y) EXPAND_8_INNER_CASES(i, x, y) EXPAND_8_INNER_CASES(i + 8, x, y)
#define EXPAND_32_INNER_CASES(i, x, y) EXPAND_16_INNER_CASES(i, x, y) EXPAND_16_INNER_CASES(i + 16, x, y)
#define EXPAND_2_OUTER_CASES(i, x, y) OUTER_CASE(i, x, y); OUTER_CASE(i + 1, x, y);
#define EXPAND_4_OUTER_CASES(i, x, y) EXPAND_2_OUTER_CASES(i, x, y) EXPAND_2_OUTER_CASES(i + 2, x, y)
#define EXPAND_8_OUTER_CASES(i, x, y) EXPAND_4_OUTER_CASES(i, x, y) EXPAND_4_OUTER_CASES(i + 4, x, y)
#define EXPAND_16_OUTER_CASES(i, x, y) EXPAND_8_OUTER_CASES(i, x, y) EXPAND_8_OUTER_CASES(i + 8, x, y)
#define EXPAND_32_OUTER_CASES(i, x, y) EXPAND_16_OUTER_CASES(i, x, y) EXPAND_16_OUTER_CASES(i + 16, x, y)
unsigned cfg_nested_vars(int x) {
int y = 0;
while (x > 0) {
EXPAND_32_VARS(a, x);
switch (x) {
#define INNER_CASE(i, x, y) \
case i: { \
int case_var = 3*x + i; \
EXPAND_32_VARS(c, case_var); \
y += case_var - 1; \
break; \
}
#define OUTER_CASE(i, x, y) \
case i: { \
int case_var = y >> 8; \
EXPAND_32_VARS(b, y); \
switch (case_var) { \
EXPAND_32_INNER_CASES(0, x, y); \
} \
break; \
}
EXPAND_32_OUTER_CASES(0, x, y);
}
--x;
}
return y;
}

View File

@ -0,0 +1,5 @@
// clang -I/usr/include/c++/4.0.0 -I/usr/include/c++/4.0.0/powerpc-apple-darwin8 -I/usr/include/c++/4.0.0/backward INPUTS/iostream.cc -Eonly
#include <iostream>
#include <stdint.h>

View File

@ -0,0 +1,17 @@
// This pounds on macro expansion for performance reasons. This is currently
// heavily constrained by darwin's malloc.
// Function-like macros.
#define A0(A, B) A B
#define A1(A, B) A0(A,B) A0(A,B) A0(A,B) A0(A,B) A0(A,B) A0(A,B)
#define A2(A, B) A1(A,B) A1(A,B) A1(A,B) A1(A,B) A1(A,B) A1(A,B)
#define A3(A, B) A2(A,B) A2(A,B) A2(A,B) A2(A,B) A2(A,B) A2(A,B)
#define A4(A, B) A3(A,B) A3(A,B) A3(A,B) A3(A,B) A3(A,B) A3(A,B)
#define A5(A, B) A4(A,B) A4(A,B) A4(A,B) A4(A,B) A4(A,B) A4(A,B)
#define A6(A, B) A5(A,B) A5(A,B) A5(A,B) A5(A,B) A5(A,B) A5(A,B)
#define A7(A, B) A6(A,B) A6(A,B) A6(A,B) A6(A,B) A6(A,B) A6(A,B)
#define A8(A, B) A7(A,B) A7(A,B) A7(A,B) A7(A,B) A7(A,B) A7(A,B)
A8(a, b)

View File

@ -0,0 +1,16 @@
// This pounds on macro expansion for performance reasons. This is currently
// heavily constrained by darwin's malloc.
// Object-like expansions
#define A0 a b
#define A1 A0 A0 A0 A0 A0 A0
#define A2 A1 A1 A1 A1 A1 A1
#define A3 A2 A2 A2 A2 A2 A2
#define A4 A3 A3 A3 A3 A3 A3
#define A5 A4 A4 A4 A4 A4 A4
#define A6 A5 A5 A5 A5 A5 A5
#define A7 A6 A6 A6 A6 A6 A6
#define A8 A7 A7 A7 A7 A7 A7
A8

View File

@ -0,0 +1,47 @@
#define __extension__
#define __stpcpy(dest, src) (__extension__ (__builtin_constant_p (src) ? (__string2_1bptr_p (src) && strlen (src) + 1 <= 8 ? __stpcpy_small (dest, __stpcpy_args (src), strlen (src) + 1) : ((char *) __mempcpy (dest, src, strlen (src) + 1) - 1)) : __stpcpy (dest, src)))
#define stpcpy(dest, src) __stpcpy (dest, src)
#define __stpcpy_args(src) __extension__ __STRING2_SMALL_GET16 (src, 0), __extension__ __STRING2_SMALL_GET16 (src, 4), __extension__ __STRING2_SMALL_GET32 (src, 0), __extension__ __STRING2_SMALL_GET32 (src, 4)
#define __mempcpy(dest, src, n) (__extension__ (__builtin_constant_p (src) && __builtin_constant_p (n) && __string2_1bptr_p (src) && n <= 8 ? __mempcpy_small (dest, __mempcpy_args (src), n) : __mempcpy (dest, src, n)))
#define mempcpy(dest, src, n) __mempcpy (dest, src, n)
#define __mempcpy_args(src) ((char *) (src))[0], ((char *) (src))[2], ((char *) (src))[4], ((char *) (src))[6], __extension__ __STRING2_SMALL_GET16 (src, 0), __extension__ __STRING2_SMALL_GET16 (src, 4), __extension__ __STRING2_SMALL_GET32 (src, 0), __extension__ __STRING2_SMALL_GET32 (src, 4)
#define __STRING2_SMALL_GET16(src, idx) (((__const unsigned char *) (__const char *) (src))[idx + 1] << 8 | ((__const unsigned char *) (__const char *) (src))[idx])
#define __STRING2_SMALL_GET32(src, idx) (((((__const unsigned char *) (__const char *) (src))[idx + 3] << 8 | ((__const unsigned char *) (__const char *) (src))[idx + 2]) << 8 | ((__const unsigned char *) (__const char *) (src))[idx + 1]) << 8 | ((__const unsigned char *) (__const char *) (src))[idx])
stpcpy (stpcpy (stpcpy (stpcpy (a, b), c), d), e)
stpcpy (stpcpy (stpcpy (stpcpy (a, b), c), d), e)
stpcpy (stpcpy (stpcpy (stpcpy (a, b), c), d), e)
stpcpy (stpcpy (stpcpy (stpcpy (a, b), c), d), e)
stpcpy (stpcpy (stpcpy (stpcpy (a, b), c), d), e)
stpcpy (stpcpy (stpcpy (stpcpy (a, b), c), d), e)
stpcpy (stpcpy (stpcpy (stpcpy (a, b), c), d), e)
stpcpy (stpcpy (stpcpy (stpcpy (a, b), c), d), e)
stpcpy (stpcpy (stpcpy (stpcpy (a, b), c), d), e)
stpcpy (stpcpy (stpcpy (stpcpy (a, b), c), d), e)
stpcpy (stpcpy (stpcpy (stpcpy (a, b), c), d), e)
stpcpy (stpcpy (stpcpy (stpcpy (a, b), c), d), e)
stpcpy (stpcpy (stpcpy (stpcpy (a, b), c), d), e)
stpcpy (stpcpy (stpcpy (stpcpy (a, b), c), d), e)
stpcpy (stpcpy (stpcpy (stpcpy (a, b), c), d), e)
stpcpy (stpcpy (stpcpy (stpcpy (a, b), c), d), e)
stpcpy (stpcpy (stpcpy (stpcpy (a, b), c), d), e)
stpcpy (stpcpy (stpcpy (stpcpy (a, b), c), d), e)
stpcpy (stpcpy (stpcpy (stpcpy (a, b), c), d), e)
stpcpy (stpcpy (stpcpy (stpcpy (a, b), c), d), e)
stpcpy (stpcpy (stpcpy (stpcpy (a, b), c), d), e)
stpcpy (stpcpy (stpcpy (stpcpy (a, b), c), d), e)
stpcpy (stpcpy (stpcpy (stpcpy (a, b), c), d), e)
stpcpy (stpcpy (stpcpy (stpcpy (a, b), c), d), e)
stpcpy (stpcpy (stpcpy (stpcpy (a, b), c), d), e)
stpcpy (stpcpy (stpcpy (stpcpy (a, b), c), d), e)
stpcpy (stpcpy (stpcpy (stpcpy (a, b), c), d), e)
stpcpy (stpcpy (stpcpy (stpcpy (a, b), c), d), e)
stpcpy (stpcpy (stpcpy (stpcpy (a, b), c), d), e)
stpcpy (stpcpy (stpcpy (stpcpy (a, b), c), d), e)
stpcpy (stpcpy (stpcpy (stpcpy (a, b), c), d), e)
stpcpy (stpcpy (stpcpy (stpcpy (a, b), c), d), e)
stpcpy (stpcpy (stpcpy (stpcpy (a, b), c), d), e)

49
tools/clang/INSTALL.txt Normal file
View File

@ -0,0 +1,49 @@
//===----------------------------------------------------------------------===//
// Clang Installation Instructions
//===----------------------------------------------------------------------===//
These instructions describe how to build and install Clang.
//===----------------------------------------------------------------------===//
// Step 1: Organization
//===----------------------------------------------------------------------===//
Clang is designed to be built as part of an LLVM build. Assuming that the LLVM
source code is located at $LLVM_SRC_ROOT, then the clang source code should be
installed as:
$LLVM_SRC_ROOT/tools/clang
The directory is not required to be called clang, but doing so will allow the
LLVM build system to automatically recognize it and build it along with LLVM.
//===----------------------------------------------------------------------===//
// Step 2: Configure and Build LLVM
//===----------------------------------------------------------------------===//
Configure and build your copy of LLVM (see $LLVM_SRC_ROOT/GettingStarted.html
for more information).
Assuming you installed clang at $LLVM_SRC_ROOT/tools/clang then Clang will
automatically be built with LLVM. Otherwise, run 'make' in the Clang source
directory to build Clang.
//===----------------------------------------------------------------------===//
// Step 3: (Optional) Verify Your Build
//===----------------------------------------------------------------------===//
It is a good idea to run the Clang tests to make sure your build works
correctly. From inside the Clang build directory, run 'make test' to run the
tests.
//===----------------------------------------------------------------------===//
// Step 4: Install Clang
//===----------------------------------------------------------------------===//
From inside the Clang build directory, run 'make install' to install the Clang
compiler and header files into the prefix directory selected when LLVM was
configured.
The Clang compiler is available as 'clang' and 'clang++'. It supports a gcc like command line
interface. See the man page for clang (installed into $prefix/share/man/man1)
for more information.

63
tools/clang/LICENSE.TXT Normal file
View File

@ -0,0 +1,63 @@
==============================================================================
LLVM Release License
==============================================================================
University of Illinois/NCSA
Open Source License
Copyright (c) 2007-2013 University of Illinois at Urbana-Champaign.
All rights reserved.
Developed by:
LLVM Team
University of Illinois at Urbana-Champaign
http://llvm.org
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal with
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimers.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimers in the
documentation and/or other materials provided with the distribution.
* Neither the names of the LLVM Team, University of Illinois at
Urbana-Champaign, nor the names of its contributors may be used to
endorse or promote products derived from this Software without specific
prior written permission.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
SOFTWARE.
==============================================================================
The LLVM software contains code written by third parties. Such software will
have its own individual LICENSE.TXT file in the directory in which it appears.
This file will describe the copyrights, license, and restrictions which apply
to that code.
The disclaimer of warranty in the University of Illinois Open Source License
applies to all code in the LLVM Distribution, and nothing in any of the
other licenses gives permission to use the names of the LLVM Team or the
University of Illinois to endorse or promote products derived from this
Software.
The following pieces of software have additional or alternate copyrights,
licenses, and/or restrictions:
Program Directory
------- ---------
<none yet>

115
tools/clang/Makefile Normal file
View File

@ -0,0 +1,115 @@
##===- Makefile --------------------------------------------*- Makefile -*-===##
#
# The LLVM Compiler Infrastructure
#
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
#
##===----------------------------------------------------------------------===##
# If CLANG_LEVEL is not set, then we are the top-level Makefile. Otherwise, we
# are being included from a subdirectory makefile.
ifndef CLANG_LEVEL
IS_TOP_LEVEL := 1
CLANG_LEVEL := .
DIRS := utils/TableGen include lib tools runtime docs unittests
PARALLEL_DIRS :=
ifeq ($(BUILD_EXAMPLES),1)
PARALLEL_DIRS += examples
endif
endif
ifeq ($(MAKECMDGOALS),libs-only)
DIRS := $(filter-out tools docs, $(DIRS))
OPTIONAL_DIRS :=
endif
ifeq ($(BUILD_CLANG_ONLY),YES)
DIRS := $(filter-out docs unittests, $(DIRS))
OPTIONAL_DIRS :=
endif
###
# Common Makefile code, shared by all Clang Makefiles.
# Set LLVM source root level.
LEVEL := $(CLANG_LEVEL)/../..
# Include LLVM common makefile.
include $(LEVEL)/Makefile.common
ifneq ($(ENABLE_DOCS),1)
DIRS := $(filter-out docs, $(DIRS))
endif
# Set common Clang build flags.
CPP.Flags += -I$(PROJ_SRC_DIR)/$(CLANG_LEVEL)/include -I$(PROJ_OBJ_DIR)/$(CLANG_LEVEL)/include
ifdef CLANG_VENDOR
CPP.Flags += -DCLANG_VENDOR='"$(CLANG_VENDOR) "'
endif
ifdef CLANG_REPOSITORY_STRING
CPP.Flags += -DCLANG_REPOSITORY_STRING='"$(CLANG_REPOSITORY_STRING)"'
endif
# Disable -fstrict-aliasing. Darwin disables it by default (and LLVM doesn't
# work with it enabled with GCC), Clang/llvm-gcc don't support it yet, and newer
# GCC's have false positive warnings with it on Linux (which prove a pain to
# fix). For example:
# http://gcc.gnu.org/PR41874
# http://gcc.gnu.org/PR41838
#
# We can revisit this when LLVM/Clang support it.
CXX.Flags += -fno-strict-aliasing
# Set up Clang's tblgen.
ifndef CLANG_TBLGEN
ifeq ($(LLVM_CROSS_COMPILING),1)
CLANG_TBLGEN := $(BuildLLVMToolDir)/clang-tblgen$(BUILD_EXEEXT)
else
CLANG_TBLGEN := $(LLVMToolDir)/clang-tblgen$(EXEEXT)
endif
endif
ClangTableGen = $(CLANG_TBLGEN) $(TableGen.Flags)
###
# Clang Top Level specific stuff.
ifeq ($(IS_TOP_LEVEL),1)
ifneq ($(PROJ_SRC_ROOT),$(PROJ_OBJ_ROOT))
$(RecursiveTargets)::
$(Verb) for dir in test unittests; do \
if [ -f $(PROJ_SRC_DIR)/$${dir}/Makefile ] && [ ! -f $${dir}/Makefile ]; then \
$(MKDIR) $${dir}; \
$(CP) $(PROJ_SRC_DIR)/$${dir}/Makefile $${dir}/Makefile; \
fi \
done
endif
test::
@ $(MAKE) -C test
report::
@ $(MAKE) -C test report
clean::
@ $(MAKE) -C test clean
libs-only: all
tags::
$(Verb) etags `find . -type f -name '*.h' -or -name '*.cpp' | \
grep -v /lib/Headers | grep -v /test/`
cscope.files:
find tools lib include -name '*.cpp' \
-or -name '*.def' \
-or -name '*.td' \
-or -name '*.h' > cscope.files
.PHONY: test report clean cscope.files
endif

View File

@ -0,0 +1,5 @@
# This file provides information for llvm-top
DepModule: llvm
ConfigCmd:
ConfigTest:
BuildCmd:

111
tools/clang/NOTES.txt Normal file
View File

@ -0,0 +1,111 @@
//===---------------------------------------------------------------------===//
// Random Notes
//===---------------------------------------------------------------------===//
//===---------------------------------------------------------------------===//
To time GCC preprocessing speed without output, use:
"time gcc -MM file"
This is similar to -Eonly.
//===---------------------------------------------------------------------===//
Creating and using a PTH file for performance measurement (use a release build).
$ clang -ccc-pch-is-pth -x objective-c-header INPUTS/Cocoa_h.m -o /tmp/tokencache
$ clang -cc1 -token-cache /tmp/tokencache INPUTS/Cocoa_h.m
//===---------------------------------------------------------------------===//
C++ Template Instantiation benchmark:
http://users.rcn.com/abrahams/instantiation_speed/index.html
//===---------------------------------------------------------------------===//
TODO: File Manager Speedup:
We currently do a lot of stat'ing for files that don't exist, particularly
when lots of -I paths exist (e.g. see the <iostream> example, check for
failures in stat in FileManager::getFile). It would be far better to make
the following changes:
1. FileEntry contains a sys::Path instead of a std::string for Name.
2. sys::Path contains timestamp and size, lazily computed. Eliminate from
FileEntry.
3. File UIDs are created on request, not when files are opened.
These changes make it possible to efficiently have FileEntry objects for
files that exist on the file system, but have not been used yet.
Once this is done:
1. DirectoryEntry gets a boolean value "has read entries". When false, not
all entries in the directory are in the file mgr, when true, they are.
2. Instead of stat'ing the file in FileManager::getFile, check to see if
the dir has been read. If so, fail immediately, if not, read the dir,
then retry.
3. Reading the dir uses the getdirentries syscall, creating a FileEntry
for all files found.
//===---------------------------------------------------------------------===//
// Specifying targets: -triple and -arch
//===---------------------------------------------------------------------===//
The clang supports "-triple" and "-arch" options. At most one -triple and one
-arch option may be specified. Both are optional.
The "selection of target" behavior is defined as follows:
(1) If the user does not specify -triple, we default to the host triple.
(2) If the user specifies a -arch, that overrides the arch in the host or
specified triple.
//===---------------------------------------------------------------------===//
verifyInputConstraint and verifyOutputConstraint should not return bool.
Instead we should return something like:
enum VerifyConstraintResult {
Valid,
// Output only
OutputOperandConstraintLacksEqualsCharacter,
MatchingConstraintNotValidInOutputOperand,
// Input only
InputOperandConstraintContainsEqualsCharacter,
MatchingConstraintReferencesInvalidOperandNumber,
// Both
PercentConstraintUsedWithLastOperand
};
//===---------------------------------------------------------------------===//
Blocks should not capture variables that are only used in dead code.
The rule that we came up with is that blocks are required to capture
variables if they're referenced in evaluated code, even if that code
doesn't actually rely on the value of the captured variable.
For example, this requires a capture:
(void) var;
But this does not:
if (false) puts(var);
Summary of <rdar://problem/9851835>: if we implement this, we should
warn about non-POD variables that are referenced but not captured, but
only if the non-reachability is not due to macro or template
metaprogramming.
//===---------------------------------------------------------------------===//
We can still apply a modified version of the constructor/destructor
delegation optimization in cases of virtual inheritance where:
- there is no function-try-block,
- the constructor signature is not variadic, and
- the parameter variables can safely be copied and repassed
to the base constructor because either
- they have not had their addresses taken by the vbase initializers or
- they were passed indirectly.
//===---------------------------------------------------------------------===//

26
tools/clang/README.txt Normal file
View File

@ -0,0 +1,26 @@
//===----------------------------------------------------------------------===//
// C Language Family Front-end
//===----------------------------------------------------------------------===//
Welcome to Clang. This is a compiler front-end for the C family of languages
(C, C++, Objective-C, and Objective-C++) which is built as part of the LLVM
compiler infrastructure project.
Unlike many other compiler frontends, Clang is useful for a number of things
beyond just compiling code: we intend for Clang to be host to a number of
different source level tools. One example of this is the Clang Static Analyzer.
If you're interested in more (including how to build Clang) it is best to read
the relevant web sites. Here are some pointers:
Information on Clang: http://clang.llvm.org/
Building and using Clang: http://clang.llvm.org/get_started.html
Clang Static Analyzer: http://clang-analyzer.llvm.org/
Information on the LLVM project: http://llvm.org/
If you have questions or comments about Clang, a great place to discuss them is
on the Clang development mailing list:
http://lists.cs.uiuc.edu/mailman/listinfo/cfe-dev
If you find a bug in Clang, please file it in the LLVM bug tracker:
http://llvm.org/bugs/

View File

@ -0,0 +1,17 @@
//===----------------------------------------------------------------------===//
// Clang Python Bindings
//===----------------------------------------------------------------------===//
This directory implements Python bindings for Clang.
You may need to alter LD_LIBRARY_PATH so that the Clang library can be
found. The unit tests are designed to be run with 'nosetests'. For example:
--
$ env PYTHONPATH=$(echo ~/llvm/tools/clang/bindings/python/) \
LD_LIBRARY_PATH=$(llvm-config --libdir) \
nosetests -v
tests.cindex.test_index.test_create ... ok
...
OK
--

View File

@ -0,0 +1,24 @@
#===- __init__.py - Clang Python Bindings --------------------*- python -*--===#
#
# The LLVM Compiler Infrastructure
#
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
#
#===------------------------------------------------------------------------===#
r"""
Clang Library Bindings
======================
This package provides access to the Clang compiler and libraries.
The available modules are:
cindex
Bindings for the Clang indexing library.
"""
__all__ = ['cindex']

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,34 @@
#===- enumerations.py - Python Enumerations ------------------*- python -*--===#
#
# The LLVM Compiler Infrastructure
#
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
#
#===------------------------------------------------------------------------===#
"""
Clang Enumerations
==================
This module provides static definitions of enumerations that exist in libclang.
Enumerations are typically defined as a list of tuples. The exported values are
typically munged into other types or classes at module load time.
All enumerations are centrally defined in this file so they are all grouped
together and easier to audit. And, maybe even one day this file will be
automatically generated by scanning the libclang headers!
"""
# Maps to CXTokenKind. Note that libclang maintains a separate set of token
# enumerations from the C++ API.
TokenKinds = [
('PUNCTUATION', 0),
('KEYWORD', 1),
('IDENTIFIER', 2),
('LITERAL', 3),
('COMMENT', 4),
]
__all__ = ['TokenKinds']

View File

@ -0,0 +1,87 @@
#!/usr/bin/env python
#===- cindex-dump.py - cindex/Python Source Dump -------------*- python -*--===#
#
# The LLVM Compiler Infrastructure
#
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
#
#===------------------------------------------------------------------------===#
"""
A simple command line tool for dumping a source file using the Clang Index
Library.
"""
def get_diag_info(diag):
return { 'severity' : diag.severity,
'location' : diag.location,
'spelling' : diag.spelling,
'ranges' : diag.ranges,
'fixits' : diag.fixits }
def get_cursor_id(cursor, cursor_list = []):
if not opts.showIDs:
return None
if cursor is None:
return None
# FIXME: This is really slow. It would be nice if the index API exposed
# something that let us hash cursors.
for i,c in enumerate(cursor_list):
if cursor == c:
return i
cursor_list.append(cursor)
return len(cursor_list) - 1
def get_info(node, depth=0):
if opts.maxDepth is not None and depth >= opts.maxDepth:
children = None
else:
children = [get_info(c, depth+1)
for c in node.get_children()]
return { 'id' : get_cursor_id(node),
'kind' : node.kind,
'usr' : node.get_usr(),
'spelling' : node.spelling,
'location' : node.location,
'extent.start' : node.extent.start,
'extent.end' : node.extent.end,
'is_definition' : node.is_definition(),
'definition id' : get_cursor_id(node.get_definition()),
'children' : children }
def main():
from clang.cindex import Index
from pprint import pprint
from optparse import OptionParser, OptionGroup
global opts
parser = OptionParser("usage: %prog [options] {filename} [clang-args*]")
parser.add_option("", "--show-ids", dest="showIDs",
help="Don't compute cursor IDs (very slow)",
default=False)
parser.add_option("", "--max-depth", dest="maxDepth",
help="Limit cursor expansion to depth N",
metavar="N", type=int, default=None)
parser.disable_interspersed_args()
(opts, args) = parser.parse_args()
if len(args) == 0:
parser.error('invalid number arguments')
index = Index.create()
tu = index.parse(None, args)
if not tu:
parser.error("unable to load input")
pprint(('diags', map(get_diag_info, tu.diagnostics)))
pprint(('nodes', get_info(tu.cursor)))
if __name__ == '__main__':
main()

View File

@ -0,0 +1,58 @@
#!/usr/bin/env python
#===- cindex-includes.py - cindex/Python Inclusion Graph -----*- python -*--===#
#
# The LLVM Compiler Infrastructure
#
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
#
#===------------------------------------------------------------------------===#
"""
A simple command line tool for dumping a Graphviz description (dot) that
describes include dependencies.
"""
def main():
import sys
from clang.cindex import Index
from optparse import OptionParser, OptionGroup
parser = OptionParser("usage: %prog [options] {filename} [clang-args*]")
parser.disable_interspersed_args()
(opts, args) = parser.parse_args()
if len(args) == 0:
parser.error('invalid number arguments')
# FIXME: Add an output file option
out = sys.stdout
index = Index.create()
tu = index.parse(None, args)
if not tu:
parser.error("unable to load input")
# A helper function for generating the node name.
def name(f):
if f:
return "\"" + f.name + "\""
# Generate the include graph
out.write("digraph G {\n")
for i in tu.get_includes():
line = " ";
if i.is_input_file:
# Always write the input file as a node just in case it doesn't
# actually include anything. This would generate a 1 node graph.
line += name(i.include)
else:
line += '%s->%s' % (name(i.source), name(i.include))
line += "\n";
out.write(line)
out.write("}\n")
if __name__ == '__main__':
main()

View File

@ -0,0 +1,6 @@
#ifndef HEADER1
#define HEADER1
#include "header3.h"
#endif

View File

@ -0,0 +1,6 @@
#ifndef HEADER2
#define HEADER2
#include "header3.h"
#endif

View File

@ -0,0 +1,3 @@
// Not a guarded header!
void f();

View File

@ -0,0 +1,6 @@
#include "stdio.h"
int main(int argc, char* argv[]) {
printf("hello world\n");
return 0;
}

View File

@ -0,0 +1,5 @@
#include "header1.h"
#include "header2.h"
#include "header1.h"
int main() { }

View File

@ -0,0 +1,2 @@
int DECL_ONE = 1;
int DECL_TWO = 2;

View File

@ -0,0 +1,89 @@
from clang.cindex import CompilationDatabase
from clang.cindex import CompilationDatabaseError
from clang.cindex import CompileCommands
from clang.cindex import CompileCommand
import os
import gc
kInputsDir = os.path.join(os.path.dirname(__file__), 'INPUTS')
def test_create_fail():
"""Check we fail loading a database with an assertion"""
path = os.path.dirname(__file__)
try:
cdb = CompilationDatabase.fromDirectory(path)
except CompilationDatabaseError as e:
assert e.cdb_error == CompilationDatabaseError.ERROR_CANNOTLOADDATABASE
else:
assert False
def test_create():
"""Check we can load a compilation database"""
cdb = CompilationDatabase.fromDirectory(kInputsDir)
def test_lookup_fail():
"""Check file lookup failure"""
cdb = CompilationDatabase.fromDirectory(kInputsDir)
assert cdb.getCompileCommands('file_do_not_exist.cpp') == None
def test_lookup_succeed():
"""Check we get some results if the file exists in the db"""
cdb = CompilationDatabase.fromDirectory(kInputsDir)
cmds = cdb.getCompileCommands('/home/john.doe/MyProject/project.cpp')
assert len(cmds) != 0
def test_1_compilecommand():
"""Check file with single compile command"""
cdb = CompilationDatabase.fromDirectory(kInputsDir)
cmds = cdb.getCompileCommands('/home/john.doe/MyProject/project.cpp')
assert len(cmds) == 1
assert cmds[0].directory == '/home/john.doe/MyProject'
expected = [ 'clang++', '-o', 'project.o', '-c',
'/home/john.doe/MyProject/project.cpp']
for arg, exp in zip(cmds[0].arguments, expected):
assert arg == exp
def test_2_compilecommand():
"""Check file with 2 compile commands"""
cdb = CompilationDatabase.fromDirectory(kInputsDir)
cmds = cdb.getCompileCommands('/home/john.doe/MyProject/project2.cpp')
assert len(cmds) == 2
expected = [
{ 'wd': '/home/john.doe/MyProjectA',
'line': ['clang++', '-o', 'project2.o', '-c',
'/home/john.doe/MyProject/project2.cpp']},
{ 'wd': '/home/john.doe/MyProjectB',
'line': ['clang++', '-DFEATURE=1', '-o', 'project2-feature.o', '-c',
'/home/john.doe/MyProject/project2.cpp']}
]
for i in range(len(cmds)):
assert cmds[i].directory == expected[i]['wd']
for arg, exp in zip(cmds[i].arguments, expected[i]['line']):
assert arg == exp
def test_compilecommand_iterator_stops():
"""Check that iterator stops after the correct number of elements"""
cdb = CompilationDatabase.fromDirectory(kInputsDir)
count = 0
for cmd in cdb.getCompileCommands('/home/john.doe/MyProject/project2.cpp'):
count += 1
assert count <= 2
def test_compilationDB_references():
"""Ensure CompilationsCommands are independent of the database"""
cdb = CompilationDatabase.fromDirectory(kInputsDir)
cmds = cdb.getCompileCommands('/home/john.doe/MyProject/project.cpp')
del cdb
gc.collect()
workingdir = cmds[0].directory
def test_compilationCommands_references():
"""Ensure CompilationsCommand keeps a reference to CompilationCommands"""
cdb = CompilationDatabase.fromDirectory(kInputsDir)
cmds = cdb.getCompileCommands('/home/john.doe/MyProject/project.cpp')
del cdb
cmd0 = cmds[0]
del cmds
gc.collect()
workingdir = cmd0.directory

View File

@ -0,0 +1,75 @@
from clang.cindex import TranslationUnit
def check_completion_results(cr, expected):
assert cr is not None
assert len(cr.diagnostics) == 0
completions = [str(c) for c in cr.results]
for c in expected:
assert c in completions
def test_code_complete():
files = [('fake.c', """
/// Aaa.
int test1;
/// Bbb.
void test2(void);
void f() {
}
""")]
tu = TranslationUnit.from_source('fake.c', ['-std=c99'], unsaved_files=files,
options=TranslationUnit.PARSE_INCLUDE_BRIEF_COMMENTS_IN_CODE_COMPLETION)
cr = tu.codeComplete('fake.c', 9, 1, unsaved_files=files, include_brief_comments=True)
expected = [
"{'int', ResultType} | {'test1', TypedText} || Priority: 50 || Availability: Available || Brief comment: Aaa.",
"{'void', ResultType} | {'test2', TypedText} | {'(', LeftParen} | {')', RightParen} || Priority: 50 || Availability: Available || Brief comment: Bbb.",
"{'return', TypedText} || Priority: 40 || Availability: Available || Brief comment: None"
]
check_completion_results(cr, expected)
def test_code_complete_availability():
files = [('fake.cpp', """
class P {
protected:
int member;
};
class Q : public P {
public:
using P::member;
};
void f(P x, Q y) {
x.; // member is inaccessible
y.; // member is accessible
}
""")]
tu = TranslationUnit.from_source('fake.cpp', ['-std=c++98'], unsaved_files=files)
cr = tu.codeComplete('fake.cpp', 12, 5, unsaved_files=files)
expected = [
"{'const', TypedText} || Priority: 40 || Availability: Available || Brief comment: None",
"{'volatile', TypedText} || Priority: 40 || Availability: Available || Brief comment: None",
"{'operator', TypedText} || Priority: 40 || Availability: Available || Brief comment: None",
"{'P', TypedText} | {'::', Text} || Priority: 75 || Availability: Available || Brief comment: None",
"{'Q', TypedText} | {'::', Text} || Priority: 75 || Availability: Available || Brief comment: None"
]
check_completion_results(cr, expected)
cr = tu.codeComplete('fake.cpp', 13, 5, unsaved_files=files)
expected = [
"{'P', TypedText} | {'::', Text} || Priority: 75 || Availability: Available || Brief comment: None",
"{'P &', ResultType} | {'operator=', TypedText} | {'(', LeftParen} | {'const P &', Placeholder} | {')', RightParen} || Priority: 34 || Availability: Available || Brief comment: None",
"{'int', ResultType} | {'member', TypedText} || Priority: 35 || Availability: NotAccessible || Brief comment: None",
"{'void', ResultType} | {'~P', TypedText} | {'(', LeftParen} | {')', RightParen} || Priority: 34 || Availability: Available || Brief comment: None"
]
check_completion_results(cr, expected)

View File

@ -0,0 +1,40 @@
from clang.cindex import TranslationUnit
from tests.cindex.util import get_cursor
def test_comment():
files = [('fake.c', """
/// Aaa.
int test1;
/// Bbb.
/// x
void test2(void);
void f() {
}
""")]
# make a comment-aware TU
tu = TranslationUnit.from_source('fake.c', ['-std=c99'], unsaved_files=files,
options=TranslationUnit.PARSE_INCLUDE_BRIEF_COMMENTS_IN_CODE_COMPLETION)
test1 = get_cursor(tu, 'test1')
assert test1 is not None, "Could not find test1."
assert test1.type.is_pod()
raw = test1.raw_comment
brief = test1.brief_comment
assert raw == """/// Aaa."""
assert brief == """Aaa."""
test2 = get_cursor(tu, 'test2')
raw = test2.raw_comment
brief = test2.brief_comment
assert raw == """/// Bbb.\n/// x"""
assert brief == """Bbb. x"""
f = get_cursor(tu, 'f')
raw = f.raw_comment
brief = f.brief_comment
assert raw is None
assert brief is None

View File

@ -0,0 +1,261 @@
import gc
from clang.cindex import CursorKind
from clang.cindex import TranslationUnit
from clang.cindex import TypeKind
from .util import get_cursor
from .util import get_cursors
from .util import get_tu
kInput = """\
// FIXME: Find nicer way to drop builtins and other cruft.
int start_decl;
struct s0 {
int a;
int b;
};
struct s1;
void f0(int a0, int a1) {
int l0, l1;
if (a0)
return;
for (;;) {
break;
}
}
"""
def test_get_children():
tu = get_tu(kInput)
# Skip until past start_decl.
it = tu.cursor.get_children()
while it.next().spelling != 'start_decl':
pass
tu_nodes = list(it)
assert len(tu_nodes) == 3
for cursor in tu_nodes:
assert cursor.translation_unit is not None
assert tu_nodes[0] != tu_nodes[1]
assert tu_nodes[0].kind == CursorKind.STRUCT_DECL
assert tu_nodes[0].spelling == 's0'
assert tu_nodes[0].is_definition() == True
assert tu_nodes[0].location.file.name == 't.c'
assert tu_nodes[0].location.line == 4
assert tu_nodes[0].location.column == 8
assert tu_nodes[0].hash > 0
assert tu_nodes[0].translation_unit is not None
s0_nodes = list(tu_nodes[0].get_children())
assert len(s0_nodes) == 2
assert s0_nodes[0].kind == CursorKind.FIELD_DECL
assert s0_nodes[0].spelling == 'a'
assert s0_nodes[0].type.kind == TypeKind.INT
assert s0_nodes[1].kind == CursorKind.FIELD_DECL
assert s0_nodes[1].spelling == 'b'
assert s0_nodes[1].type.kind == TypeKind.INT
assert tu_nodes[1].kind == CursorKind.STRUCT_DECL
assert tu_nodes[1].spelling == 's1'
assert tu_nodes[1].displayname == 's1'
assert tu_nodes[1].is_definition() == False
assert tu_nodes[2].kind == CursorKind.FUNCTION_DECL
assert tu_nodes[2].spelling == 'f0'
assert tu_nodes[2].displayname == 'f0(int, int)'
assert tu_nodes[2].is_definition() == True
def test_references():
"""Ensure that references to TranslationUnit are kept."""
tu = get_tu('int x;')
cursors = list(tu.cursor.get_children())
assert len(cursors) > 0
cursor = cursors[0]
assert isinstance(cursor.translation_unit, TranslationUnit)
# Delete reference to TU and perform a full GC.
del tu
gc.collect()
assert isinstance(cursor.translation_unit, TranslationUnit)
# If the TU was destroyed, this should cause a segfault.
parent = cursor.semantic_parent
def test_canonical():
source = 'struct X; struct X; struct X { int member; };'
tu = get_tu(source)
cursors = []
for cursor in tu.cursor.get_children():
if cursor.spelling == 'X':
cursors.append(cursor)
assert len(cursors) == 3
assert cursors[1].canonical == cursors[2].canonical
def test_is_static_method():
"""Ensure Cursor.is_static_method works."""
source = 'class X { static void foo(); void bar(); };'
tu = get_tu(source, lang='cpp')
cls = get_cursor(tu, 'X')
foo = get_cursor(tu, 'foo')
bar = get_cursor(tu, 'bar')
assert cls is not None
assert foo is not None
assert bar is not None
assert foo.is_static_method()
assert not bar.is_static_method()
def test_underlying_type():
tu = get_tu('typedef int foo;')
typedef = get_cursor(tu, 'foo')
assert typedef is not None
assert typedef.kind.is_declaration()
underlying = typedef.underlying_typedef_type
assert underlying.kind == TypeKind.INT
kParentTest = """\
class C {
void f();
}
void C::f() { }
"""
def test_semantic_parent():
tu = get_tu(kParentTest, 'cpp')
curs = get_cursors(tu, 'f')
decl = get_cursor(tu, 'C')
assert(len(curs) == 2)
assert(curs[0].semantic_parent == curs[1].semantic_parent)
assert(curs[0].semantic_parent == decl)
def test_lexical_parent():
tu = get_tu(kParentTest, 'cpp')
curs = get_cursors(tu, 'f')
decl = get_cursor(tu, 'C')
assert(len(curs) == 2)
assert(curs[0].lexical_parent != curs[1].lexical_parent)
assert(curs[0].lexical_parent == decl)
assert(curs[1].lexical_parent == tu.cursor)
def test_enum_type():
tu = get_tu('enum TEST { FOO=1, BAR=2 };')
enum = get_cursor(tu, 'TEST')
assert enum is not None
assert enum.kind == CursorKind.ENUM_DECL
enum_type = enum.enum_type
assert enum_type.kind == TypeKind.UINT
def test_enum_type_cpp():
tu = get_tu('enum TEST : long long { FOO=1, BAR=2 };', lang="cpp")
enum = get_cursor(tu, 'TEST')
assert enum is not None
assert enum.kind == CursorKind.ENUM_DECL
assert enum.enum_type.kind == TypeKind.LONGLONG
def test_objc_type_encoding():
tu = get_tu('int i;', lang='objc')
i = get_cursor(tu, 'i')
assert i is not None
assert i.objc_type_encoding == 'i'
def test_enum_values():
tu = get_tu('enum TEST { SPAM=1, EGG, HAM = EGG * 20};')
enum = get_cursor(tu, 'TEST')
assert enum is not None
assert enum.kind == CursorKind.ENUM_DECL
enum_constants = list(enum.get_children())
assert len(enum_constants) == 3
spam, egg, ham = enum_constants
assert spam.kind == CursorKind.ENUM_CONSTANT_DECL
assert spam.enum_value == 1
assert egg.kind == CursorKind.ENUM_CONSTANT_DECL
assert egg.enum_value == 2
assert ham.kind == CursorKind.ENUM_CONSTANT_DECL
assert ham.enum_value == 40
def test_enum_values_cpp():
tu = get_tu('enum TEST : long long { SPAM = -1, HAM = 0x10000000000};', lang="cpp")
enum = get_cursor(tu, 'TEST')
assert enum is not None
assert enum.kind == CursorKind.ENUM_DECL
enum_constants = list(enum.get_children())
assert len(enum_constants) == 2
spam, ham = enum_constants
assert spam.kind == CursorKind.ENUM_CONSTANT_DECL
assert spam.enum_value == -1
assert ham.kind == CursorKind.ENUM_CONSTANT_DECL
assert ham.enum_value == 0x10000000000
def test_annotation_attribute():
tu = get_tu('int foo (void) __attribute__ ((annotate("here be annotation attribute")));')
foo = get_cursor(tu, 'foo')
assert foo is not None
for c in foo.get_children():
if c.kind == CursorKind.ANNOTATE_ATTR:
assert c.displayname == "here be annotation attribute"
break
else:
assert False, "Couldn't find annotation"
def test_result_type():
tu = get_tu('int foo();')
foo = get_cursor(tu, 'foo')
assert foo is not None
t = foo.result_type
assert t.kind == TypeKind.INT
def test_get_tokens():
"""Ensure we can map cursors back to tokens."""
tu = get_tu('int foo(int i);')
foo = get_cursor(tu, 'foo')
tokens = list(foo.get_tokens())
assert len(tokens) == 7
assert tokens[0].spelling == 'int'
assert tokens[1].spelling == 'foo'
def test_get_arguments():
tu = get_tu('void foo(int i, int j);')
foo = get_cursor(tu, 'foo')
arguments = list(foo.get_arguments())
assert len(arguments) == 2
assert arguments[0].spelling == "i"
assert arguments[1].spelling == "j"
def test_referenced():
tu = get_tu('void foo(); void bar() { foo(); }')
foo = get_cursor(tu, 'foo')
bar = get_cursor(tu, 'bar')
for c in bar.get_children():
if c.kind == CursorKind.CALL_EXPR:
assert c.referenced.spelling == foo.spelling
break

View File

@ -0,0 +1,47 @@
from clang.cindex import CursorKind
def test_name():
assert CursorKind.UNEXPOSED_DECL.name is 'UNEXPOSED_DECL'
def test_get_all_kinds():
kinds = CursorKind.get_all_kinds()
assert CursorKind.UNEXPOSED_DECL in kinds
assert CursorKind.TRANSLATION_UNIT in kinds
assert CursorKind.VARIABLE_REF in kinds
assert CursorKind.LAMBDA_EXPR in kinds
assert CursorKind.OBJ_BOOL_LITERAL_EXPR in kinds
assert CursorKind.OBJ_SELF_EXPR in kinds
assert CursorKind.MS_ASM_STMT in kinds
assert CursorKind.MODULE_IMPORT_DECL in kinds
def test_kind_groups():
"""Check that every kind classifies to exactly one group."""
assert CursorKind.UNEXPOSED_DECL.is_declaration()
assert CursorKind.TYPE_REF.is_reference()
assert CursorKind.DECL_REF_EXPR.is_expression()
assert CursorKind.UNEXPOSED_STMT.is_statement()
assert CursorKind.INVALID_FILE.is_invalid()
assert CursorKind.TRANSLATION_UNIT.is_translation_unit()
assert not CursorKind.TYPE_REF.is_translation_unit()
assert CursorKind.PREPROCESSING_DIRECTIVE.is_preprocessing()
assert not CursorKind.TYPE_REF.is_preprocessing()
assert CursorKind.UNEXPOSED_DECL.is_unexposed()
assert not CursorKind.TYPE_REF.is_unexposed()
for k in CursorKind.get_all_kinds():
group = [n for n in ('is_declaration', 'is_reference', 'is_expression',
'is_statement', 'is_invalid', 'is_attribute')
if getattr(k, n)()]
if k in ( CursorKind.TRANSLATION_UNIT,
CursorKind.MACRO_DEFINITION,
CursorKind.MACRO_INSTANTIATION,
CursorKind.INCLUSION_DIRECTIVE,
CursorKind.PREPROCESSING_DIRECTIVE):
assert len(group) == 0
else:
assert len(group) == 1

View File

@ -0,0 +1,82 @@
from clang.cindex import *
from .util import get_tu
# FIXME: We need support for invalid translation units to test better.
def test_diagnostic_warning():
tu = get_tu('int f0() {}\n')
assert len(tu.diagnostics) == 1
assert tu.diagnostics[0].severity == Diagnostic.Warning
assert tu.diagnostics[0].location.line == 1
assert tu.diagnostics[0].location.column == 11
assert (tu.diagnostics[0].spelling ==
'control reaches end of non-void function')
def test_diagnostic_note():
# FIXME: We aren't getting notes here for some reason.
tu = get_tu('#define A x\nvoid *A = 1;\n')
assert len(tu.diagnostics) == 1
assert tu.diagnostics[0].severity == Diagnostic.Warning
assert tu.diagnostics[0].location.line == 2
assert tu.diagnostics[0].location.column == 7
assert 'incompatible' in tu.diagnostics[0].spelling
# assert tu.diagnostics[1].severity == Diagnostic.Note
# assert tu.diagnostics[1].location.line == 1
# assert tu.diagnostics[1].location.column == 11
# assert tu.diagnostics[1].spelling == 'instantiated from'
def test_diagnostic_fixit():
tu = get_tu('struct { int f0; } x = { f0 : 1 };')
assert len(tu.diagnostics) == 1
assert tu.diagnostics[0].severity == Diagnostic.Warning
assert tu.diagnostics[0].location.line == 1
assert tu.diagnostics[0].location.column == 26
assert tu.diagnostics[0].spelling.startswith('use of GNU old-style')
assert len(tu.diagnostics[0].fixits) == 1
assert tu.diagnostics[0].fixits[0].range.start.line == 1
assert tu.diagnostics[0].fixits[0].range.start.column == 26
assert tu.diagnostics[0].fixits[0].range.end.line == 1
assert tu.diagnostics[0].fixits[0].range.end.column == 30
assert tu.diagnostics[0].fixits[0].value == '.f0 = '
def test_diagnostic_range():
tu = get_tu('void f() { int i = "a" + 1; }')
assert len(tu.diagnostics) == 1
assert tu.diagnostics[0].severity == Diagnostic.Warning
assert tu.diagnostics[0].location.line == 1
assert tu.diagnostics[0].location.column == 16
assert tu.diagnostics[0].spelling.startswith('incompatible pointer to')
assert len(tu.diagnostics[0].fixits) == 0
assert len(tu.diagnostics[0].ranges) == 1
assert tu.diagnostics[0].ranges[0].start.line == 1
assert tu.diagnostics[0].ranges[0].start.column == 20
assert tu.diagnostics[0].ranges[0].end.line == 1
assert tu.diagnostics[0].ranges[0].end.column == 27
try:
tu.diagnostics[0].ranges[1].start.line
except IndexError:
assert True
else:
assert False
def test_diagnostic_category():
"""Ensure that category properties work."""
tu = get_tu('int f(int i) { return 7; }', all_warnings=True)
assert len(tu.diagnostics) == 1
d = tu.diagnostics[0]
assert d.severity == Diagnostic.Warning
assert d.location.line == 1
assert d.location.column == 11
assert d.category_number == 2
assert d.category_name == 'Semantic Issue'
def test_diagnostic_option():
"""Ensure that category option properties work."""
tu = get_tu('int f(int i) { return 7; }', all_warnings=True)
assert len(tu.diagnostics) == 1
d = tu.diagnostics[0]
assert d.option == '-Wunused-parameter'
assert d.disable_option == '-Wno-unused-parameter'

View File

@ -0,0 +1,9 @@
from clang.cindex import Index, File
def test_file():
index = Index.create()
tu = index.parse('t.c', unsaved_files = [('t.c', "")])
file = File.from_name(tu, "t.c")
assert str(file) == "t.c"
assert file.name == "t.c"
assert repr(file) == "<File: t.c>"

View File

@ -0,0 +1,15 @@
from clang.cindex import *
import os
kInputsDir = os.path.join(os.path.dirname(__file__), 'INPUTS')
def test_create():
index = Index.create()
# FIXME: test Index.read
def test_parse():
index = Index.create()
assert isinstance(index, Index)
tu = index.parse(os.path.join(kInputsDir, 'hello.cpp'))
assert isinstance(tu, TranslationUnit)

View File

@ -0,0 +1,95 @@
from clang.cindex import Cursor
from clang.cindex import File
from clang.cindex import SourceLocation
from clang.cindex import SourceRange
from .util import get_cursor
from .util import get_tu
baseInput="int one;\nint two;\n"
def assert_location(loc, line, column, offset):
assert loc.line == line
assert loc.column == column
assert loc.offset == offset
def test_location():
tu = get_tu(baseInput)
one = get_cursor(tu, 'one')
two = get_cursor(tu, 'two')
assert one is not None
assert two is not None
assert_location(one.location,line=1,column=5,offset=4)
assert_location(two.location,line=2,column=5,offset=13)
# adding a linebreak at top should keep columns same
tu = get_tu('\n' + baseInput)
one = get_cursor(tu, 'one')
two = get_cursor(tu, 'two')
assert one is not None
assert two is not None
assert_location(one.location,line=2,column=5,offset=5)
assert_location(two.location,line=3,column=5,offset=14)
# adding a space should affect column on first line only
tu = get_tu(' ' + baseInput)
one = get_cursor(tu, 'one')
two = get_cursor(tu, 'two')
assert_location(one.location,line=1,column=6,offset=5)
assert_location(two.location,line=2,column=5,offset=14)
# define the expected location ourselves and see if it matches
# the returned location
tu = get_tu(baseInput)
file = File.from_name(tu, 't.c')
location = SourceLocation.from_position(tu, file, 1, 5)
cursor = Cursor.from_location(tu, location)
one = get_cursor(tu, 'one')
assert one is not None
assert one == cursor
# Ensure locations referring to the same entity are equivalent.
location2 = SourceLocation.from_position(tu, file, 1, 5)
assert location == location2
location3 = SourceLocation.from_position(tu, file, 1, 4)
assert location2 != location3
offset_location = SourceLocation.from_offset(tu, file, 5)
cursor = Cursor.from_location(tu, offset_location)
verified = False
for n in [n for n in tu.cursor.get_children() if n.spelling == 'one']:
assert n == cursor
verified = True
assert verified
def test_extent():
tu = get_tu(baseInput)
one = get_cursor(tu, 'one')
two = get_cursor(tu, 'two')
assert_location(one.extent.start,line=1,column=1,offset=0)
assert_location(one.extent.end,line=1,column=8,offset=7)
assert baseInput[one.extent.start.offset:one.extent.end.offset] == "int one"
assert_location(two.extent.start,line=2,column=1,offset=9)
assert_location(two.extent.end,line=2,column=8,offset=16)
assert baseInput[two.extent.start.offset:two.extent.end.offset] == "int two"
file = File.from_name(tu, 't.c')
location1 = SourceLocation.from_position(tu, file, 1, 1)
location2 = SourceLocation.from_position(tu, file, 1, 8)
range1 = SourceRange.from_locations(location1, location2)
range2 = SourceRange.from_locations(location1, location2)
assert range1 == range2
location3 = SourceLocation.from_position(tu, file, 1, 6)
range3 = SourceRange.from_locations(location1, location3)
assert range1 != range3

View File

@ -0,0 +1,43 @@
from clang.cindex import TokenKind
from nose.tools import eq_
from nose.tools import ok_
from nose.tools import raises
def test_constructor():
"""Ensure TokenKind constructor works as expected."""
t = TokenKind(5, 'foo')
eq_(t.value, 5)
eq_(t.name, 'foo')
@raises(ValueError)
def test_bad_register():
"""Ensure a duplicate value is rejected for registration."""
TokenKind.register(2, 'foo')
@raises(ValueError)
def test_unknown_value():
"""Ensure trying to fetch an unknown value raises."""
TokenKind.from_value(-1)
def test_registration():
"""Ensure that items registered appear as class attributes."""
ok_(hasattr(TokenKind, 'LITERAL'))
literal = TokenKind.LITERAL
ok_(isinstance(literal, TokenKind))
def test_from_value():
"""Ensure registered values can be obtained from from_value()."""
t = TokenKind.from_value(3)
ok_(isinstance(t, TokenKind))
eq_(t, TokenKind.LITERAL)
def test_repr():
"""Ensure repr() works."""
r = repr(TokenKind.LITERAL)
eq_(r, 'TokenKind.LITERAL')

View File

@ -0,0 +1,52 @@
from clang.cindex import CursorKind
from clang.cindex import Index
from clang.cindex import SourceLocation
from clang.cindex import SourceRange
from clang.cindex import TokenKind
from nose.tools import eq_
from nose.tools import ok_
from .util import get_tu
def test_token_to_cursor():
"""Ensure we can obtain a Cursor from a Token instance."""
tu = get_tu('int i = 5;')
r = tu.get_extent('t.c', (0, 9))
tokens = list(tu.get_tokens(extent=r))
assert len(tokens) == 5
assert tokens[1].spelling == 'i'
assert tokens[1].kind == TokenKind.IDENTIFIER
cursor = tokens[1].cursor
assert cursor.kind == CursorKind.VAR_DECL
assert tokens[1].cursor == tokens[2].cursor
def test_token_location():
"""Ensure Token.location works."""
tu = get_tu('int foo = 10;')
r = tu.get_extent('t.c', (0, 11))
tokens = list(tu.get_tokens(extent=r))
eq_(len(tokens), 4)
loc = tokens[1].location
ok_(isinstance(loc, SourceLocation))
eq_(loc.line, 1)
eq_(loc.column, 5)
eq_(loc.offset, 4)
def test_token_extent():
"""Ensure Token.extent works."""
tu = get_tu('int foo = 10;')
r = tu.get_extent('t.c', (0, 11))
tokens = list(tu.get_tokens(extent=r))
eq_(len(tokens), 4)
extent = tokens[1].extent
ok_(isinstance(extent, SourceRange))
eq_(extent.start.offset, 4)
eq_(extent.end.offset, 7)

View File

@ -0,0 +1,258 @@
import gc
import os
from clang.cindex import CursorKind
from clang.cindex import Cursor
from clang.cindex import File
from clang.cindex import Index
from clang.cindex import SourceLocation
from clang.cindex import SourceRange
from clang.cindex import TranslationUnitSaveError
from clang.cindex import TranslationUnitLoadError
from clang.cindex import TranslationUnit
from .util import get_cursor
from .util import get_tu
kInputsDir = os.path.join(os.path.dirname(__file__), 'INPUTS')
def test_spelling():
path = os.path.join(kInputsDir, 'hello.cpp')
tu = TranslationUnit.from_source(path)
assert tu.spelling == path
def test_cursor():
path = os.path.join(kInputsDir, 'hello.cpp')
tu = get_tu(path)
c = tu.cursor
assert isinstance(c, Cursor)
assert c.kind is CursorKind.TRANSLATION_UNIT
def test_parse_arguments():
path = os.path.join(kInputsDir, 'parse_arguments.c')
tu = TranslationUnit.from_source(path, ['-DDECL_ONE=hello', '-DDECL_TWO=hi'])
spellings = [c.spelling for c in tu.cursor.get_children()]
assert spellings[-2] == 'hello'
assert spellings[-1] == 'hi'
def test_reparse_arguments():
path = os.path.join(kInputsDir, 'parse_arguments.c')
tu = TranslationUnit.from_source(path, ['-DDECL_ONE=hello', '-DDECL_TWO=hi'])
tu.reparse()
spellings = [c.spelling for c in tu.cursor.get_children()]
assert spellings[-2] == 'hello'
assert spellings[-1] == 'hi'
def test_unsaved_files():
tu = TranslationUnit.from_source('fake.c', ['-I./'], unsaved_files = [
('fake.c', """
#include "fake.h"
int x;
int SOME_DEFINE;
"""),
('./fake.h', """
#define SOME_DEFINE y
""")
])
spellings = [c.spelling for c in tu.cursor.get_children()]
assert spellings[-2] == 'x'
assert spellings[-1] == 'y'
def test_unsaved_files_2():
import StringIO
tu = TranslationUnit.from_source('fake.c', unsaved_files = [
('fake.c', StringIO.StringIO('int x;'))])
spellings = [c.spelling for c in tu.cursor.get_children()]
assert spellings[-1] == 'x'
def normpaths_equal(path1, path2):
""" Compares two paths for equality after normalizing them with
os.path.normpath
"""
return os.path.normpath(path1) == os.path.normpath(path2)
def test_includes():
def eq(expected, actual):
if not actual.is_input_file:
return normpaths_equal(expected[0], actual.source.name) and \
normpaths_equal(expected[1], actual.include.name)
else:
return normpaths_equal(expected[1], actual.include.name)
src = os.path.join(kInputsDir, 'include.cpp')
h1 = os.path.join(kInputsDir, "header1.h")
h2 = os.path.join(kInputsDir, "header2.h")
h3 = os.path.join(kInputsDir, "header3.h")
inc = [(src, h1), (h1, h3), (src, h2), (h2, h3)]
tu = TranslationUnit.from_source(src)
for i in zip(inc, tu.get_includes()):
assert eq(i[0], i[1])
def save_tu(tu):
"""Convenience API to save a TranslationUnit to a file.
Returns the filename it was saved to.
"""
# FIXME Generate a temp file path using system APIs.
base = 'TEMP_FOR_TRANSLATIONUNIT_SAVE.c'
path = os.path.join(kInputsDir, base)
# Just in case.
if os.path.exists(path):
os.unlink(path)
tu.save(path)
return path
def test_save():
"""Ensure TranslationUnit.save() works."""
tu = get_tu('int foo();')
path = save_tu(tu)
assert os.path.exists(path)
assert os.path.getsize(path) > 0
os.unlink(path)
def test_save_translation_errors():
"""Ensure that saving to an invalid directory raises."""
tu = get_tu('int foo();')
path = '/does/not/exist/llvm-test.ast'
assert not os.path.exists(os.path.dirname(path))
try:
tu.save(path)
assert False
except TranslationUnitSaveError as ex:
expected = TranslationUnitSaveError.ERROR_UNKNOWN
assert ex.save_error == expected
def test_load():
"""Ensure TranslationUnits can be constructed from saved files."""
tu = get_tu('int foo();')
assert len(tu.diagnostics) == 0
path = save_tu(tu)
assert os.path.exists(path)
assert os.path.getsize(path) > 0
tu2 = TranslationUnit.from_ast_file(filename=path)
assert len(tu2.diagnostics) == 0
foo = get_cursor(tu2, 'foo')
assert foo is not None
# Just in case there is an open file descriptor somewhere.
del tu2
os.unlink(path)
def test_index_parse():
path = os.path.join(kInputsDir, 'hello.cpp')
index = Index.create()
tu = index.parse(path)
assert isinstance(tu, TranslationUnit)
def test_get_file():
"""Ensure tu.get_file() works appropriately."""
tu = get_tu('int foo();')
f = tu.get_file('t.c')
assert isinstance(f, File)
assert f.name == 't.c'
try:
f = tu.get_file('foobar.cpp')
except:
pass
else:
assert False
def test_get_source_location():
"""Ensure tu.get_source_location() works."""
tu = get_tu('int foo();')
location = tu.get_location('t.c', 2)
assert isinstance(location, SourceLocation)
assert location.offset == 2
assert location.file.name == 't.c'
location = tu.get_location('t.c', (1, 3))
assert isinstance(location, SourceLocation)
assert location.line == 1
assert location.column == 3
assert location.file.name == 't.c'
def test_get_source_range():
"""Ensure tu.get_source_range() works."""
tu = get_tu('int foo();')
r = tu.get_extent('t.c', (1,4))
assert isinstance(r, SourceRange)
assert r.start.offset == 1
assert r.end.offset == 4
assert r.start.file.name == 't.c'
assert r.end.file.name == 't.c'
r = tu.get_extent('t.c', ((1,2), (1,3)))
assert isinstance(r, SourceRange)
assert r.start.line == 1
assert r.start.column == 2
assert r.end.line == 1
assert r.end.column == 3
assert r.start.file.name == 't.c'
assert r.end.file.name == 't.c'
start = tu.get_location('t.c', 0)
end = tu.get_location('t.c', 5)
r = tu.get_extent('t.c', (start, end))
assert isinstance(r, SourceRange)
assert r.start.offset == 0
assert r.end.offset == 5
assert r.start.file.name == 't.c'
assert r.end.file.name == 't.c'
def test_get_tokens_gc():
"""Ensures get_tokens() works properly with garbage collection."""
tu = get_tu('int foo();')
r = tu.get_extent('t.c', (0, 10))
tokens = list(tu.get_tokens(extent=r))
assert tokens[0].spelling == 'int'
gc.collect()
assert tokens[0].spelling == 'int'
del tokens[1]
gc.collect()
assert tokens[0].spelling == 'int'
# May trigger segfault if we don't do our job properly.
del tokens
gc.collect()
gc.collect() # Just in case.
def test_fail_from_source():
path = os.path.join(kInputsDir, 'non-existent.cpp')
try:
tu = TranslationUnit.from_source(path)
except TranslationUnitLoadError:
tu = None
assert tu == None
def test_fail_from_ast_file():
path = os.path.join(kInputsDir, 'non-existent.ast')
try:
tu = TranslationUnit.from_ast_file(path)
except TranslationUnitLoadError:
tu = None
assert tu == None

View File

@ -0,0 +1,397 @@
import gc
from clang.cindex import CursorKind
from clang.cindex import TranslationUnit
from clang.cindex import TypeKind
from nose.tools import raises
from .util import get_cursor
from .util import get_tu
kInput = """\
typedef int I;
struct teststruct {
int a;
I b;
long c;
unsigned long d;
signed long e;
const int f;
int *g;
int ***h;
};
"""
def test_a_struct():
tu = get_tu(kInput)
teststruct = get_cursor(tu, 'teststruct')
assert teststruct is not None, "Could not find teststruct."
fields = list(teststruct.get_children())
assert all(x.kind == CursorKind.FIELD_DECL for x in fields)
assert all(x.translation_unit is not None for x in fields)
assert fields[0].spelling == 'a'
assert not fields[0].type.is_const_qualified()
assert fields[0].type.kind == TypeKind.INT
assert fields[0].type.get_canonical().kind == TypeKind.INT
assert fields[1].spelling == 'b'
assert not fields[1].type.is_const_qualified()
assert fields[1].type.kind == TypeKind.TYPEDEF
assert fields[1].type.get_canonical().kind == TypeKind.INT
assert fields[1].type.get_declaration().spelling == 'I'
assert fields[2].spelling == 'c'
assert not fields[2].type.is_const_qualified()
assert fields[2].type.kind == TypeKind.LONG
assert fields[2].type.get_canonical().kind == TypeKind.LONG
assert fields[3].spelling == 'd'
assert not fields[3].type.is_const_qualified()
assert fields[3].type.kind == TypeKind.ULONG
assert fields[3].type.get_canonical().kind == TypeKind.ULONG
assert fields[4].spelling == 'e'
assert not fields[4].type.is_const_qualified()
assert fields[4].type.kind == TypeKind.LONG
assert fields[4].type.get_canonical().kind == TypeKind.LONG
assert fields[5].spelling == 'f'
assert fields[5].type.is_const_qualified()
assert fields[5].type.kind == TypeKind.INT
assert fields[5].type.get_canonical().kind == TypeKind.INT
assert fields[6].spelling == 'g'
assert not fields[6].type.is_const_qualified()
assert fields[6].type.kind == TypeKind.POINTER
assert fields[6].type.get_pointee().kind == TypeKind.INT
assert fields[7].spelling == 'h'
assert not fields[7].type.is_const_qualified()
assert fields[7].type.kind == TypeKind.POINTER
assert fields[7].type.get_pointee().kind == TypeKind.POINTER
assert fields[7].type.get_pointee().get_pointee().kind == TypeKind.POINTER
assert fields[7].type.get_pointee().get_pointee().get_pointee().kind == TypeKind.INT
def test_references():
"""Ensure that a Type maintains a reference to a TranslationUnit."""
tu = get_tu('int x;')
children = list(tu.cursor.get_children())
assert len(children) > 0
cursor = children[0]
t = cursor.type
assert isinstance(t.translation_unit, TranslationUnit)
# Delete main TranslationUnit reference and force a GC.
del tu
gc.collect()
assert isinstance(t.translation_unit, TranslationUnit)
# If the TU was destroyed, this should cause a segfault.
decl = t.get_declaration()
constarrayInput="""
struct teststruct {
void *A[2];
};
"""
def testConstantArray():
tu = get_tu(constarrayInput)
teststruct = get_cursor(tu, 'teststruct')
assert teststruct is not None, "Didn't find teststruct??"
fields = list(teststruct.get_children())
assert fields[0].spelling == 'A'
assert fields[0].type.kind == TypeKind.CONSTANTARRAY
assert fields[0].type.get_array_element_type() is not None
assert fields[0].type.get_array_element_type().kind == TypeKind.POINTER
assert fields[0].type.get_array_size() == 2
def test_equal():
"""Ensure equivalence operators work on Type."""
source = 'int a; int b; void *v;'
tu = get_tu(source)
a = get_cursor(tu, 'a')
b = get_cursor(tu, 'b')
v = get_cursor(tu, 'v')
assert a is not None
assert b is not None
assert v is not None
assert a.type == b.type
assert a.type != v.type
assert a.type != None
assert a.type != 'foo'
def test_type_spelling():
"""Ensure Type.spelling works."""
tu = get_tu('int c[5]; int i[]; int x; int v[x];')
c = get_cursor(tu, 'c')
i = get_cursor(tu, 'i')
x = get_cursor(tu, 'x')
v = get_cursor(tu, 'v')
assert c is not None
assert i is not None
assert x is not None
assert v is not None
assert c.type.spelling == "int [5]"
assert i.type.spelling == "int []"
assert x.type.spelling == "int"
assert v.type.spelling == "int [x]"
def test_typekind_spelling():
"""Ensure TypeKind.spelling works."""
tu = get_tu('int a;')
a = get_cursor(tu, 'a')
assert a is not None
assert a.type.kind.spelling == 'Int'
def test_function_argument_types():
"""Ensure that Type.argument_types() works as expected."""
tu = get_tu('void f(int, int);')
f = get_cursor(tu, 'f')
assert f is not None
args = f.type.argument_types()
assert args is not None
assert len(args) == 2
t0 = args[0]
assert t0 is not None
assert t0.kind == TypeKind.INT
t1 = args[1]
assert t1 is not None
assert t1.kind == TypeKind.INT
args2 = list(args)
assert len(args2) == 2
assert t0 == args2[0]
assert t1 == args2[1]
@raises(TypeError)
def test_argument_types_string_key():
"""Ensure that non-int keys raise a TypeError."""
tu = get_tu('void f(int, int);')
f = get_cursor(tu, 'f')
assert f is not None
args = f.type.argument_types()
assert len(args) == 2
args['foo']
@raises(IndexError)
def test_argument_types_negative_index():
"""Ensure that negative indexes on argument_types Raises an IndexError."""
tu = get_tu('void f(int, int);')
f = get_cursor(tu, 'f')
args = f.type.argument_types()
args[-1]
@raises(IndexError)
def test_argument_types_overflow_index():
"""Ensure that indexes beyond the length of Type.argument_types() raise."""
tu = get_tu('void f(int, int);')
f = get_cursor(tu, 'f')
args = f.type.argument_types()
args[2]
@raises(Exception)
def test_argument_types_invalid_type():
"""Ensure that obtaining argument_types on a Type without them raises."""
tu = get_tu('int i;')
i = get_cursor(tu, 'i')
assert i is not None
i.type.argument_types()
def test_is_pod():
"""Ensure Type.is_pod() works."""
tu = get_tu('int i; void f();')
i = get_cursor(tu, 'i')
f = get_cursor(tu, 'f')
assert i is not None
assert f is not None
assert i.type.is_pod()
assert not f.type.is_pod()
def test_function_variadic():
"""Ensure Type.is_function_variadic works."""
source ="""
#include <stdarg.h>
void foo(int a, ...);
void bar(int a, int b);
"""
tu = get_tu(source)
foo = get_cursor(tu, 'foo')
bar = get_cursor(tu, 'bar')
assert foo is not None
assert bar is not None
assert isinstance(foo.type.is_function_variadic(), bool)
assert foo.type.is_function_variadic()
assert not bar.type.is_function_variadic()
def test_element_type():
"""Ensure Type.element_type works."""
tu = get_tu('int c[5]; int i[]; int x; int v[x];')
c = get_cursor(tu, 'c')
i = get_cursor(tu, 'i')
v = get_cursor(tu, 'v')
assert c is not None
assert i is not None
assert v is not None
assert c.type.kind == TypeKind.CONSTANTARRAY
assert c.type.element_type.kind == TypeKind.INT
assert i.type.kind == TypeKind.INCOMPLETEARRAY
assert i.type.element_type.kind == TypeKind.INT
assert v.type.kind == TypeKind.VARIABLEARRAY
assert v.type.element_type.kind == TypeKind.INT
@raises(Exception)
def test_invalid_element_type():
"""Ensure Type.element_type raises if type doesn't have elements."""
tu = get_tu('int i;')
i = get_cursor(tu, 'i')
assert i is not None
i.element_type
def test_element_count():
"""Ensure Type.element_count works."""
tu = get_tu('int i[5]; int j;')
i = get_cursor(tu, 'i')
j = get_cursor(tu, 'j')
assert i is not None
assert j is not None
assert i.type.element_count == 5
try:
j.type.element_count
assert False
except:
assert True
def test_is_volatile_qualified():
"""Ensure Type.is_volatile_qualified works."""
tu = get_tu('volatile int i = 4; int j = 2;')
i = get_cursor(tu, 'i')
j = get_cursor(tu, 'j')
assert i is not None
assert j is not None
assert isinstance(i.type.is_volatile_qualified(), bool)
assert i.type.is_volatile_qualified()
assert not j.type.is_volatile_qualified()
def test_is_restrict_qualified():
"""Ensure Type.is_restrict_qualified works."""
tu = get_tu('struct s { void * restrict i; void * j; };')
i = get_cursor(tu, 'i')
j = get_cursor(tu, 'j')
assert i is not None
assert j is not None
assert isinstance(i.type.is_restrict_qualified(), bool)
assert i.type.is_restrict_qualified()
assert not j.type.is_restrict_qualified()
def test_record_layout():
"""Ensure Cursor.type.get_size, Cursor.type.get_align and
Cursor.type.get_offset works."""
source ="""
struct a {
long a1;
long a2:3;
long a3:4;
long long a4;
};
"""
tries=[(['-target','i386-linux-gnu'],(4,16,0,32,35,64)),
(['-target','nvptx64-unknown-unknown'],(8,24,0,64,67,128)),
(['-target','i386-pc-win32'],(8,16,0,32,35,64)),
(['-target','msp430-none-none'],(2,14,0,32,35,48))]
for flags, values in tries:
align,total,a1,a2,a3,a4 = values
tu = get_tu(source, flags=flags)
teststruct = get_cursor(tu, 'a')
fields = list(teststruct.get_children())
assert teststruct.type.get_align() == align
assert teststruct.type.get_size() == total
assert teststruct.type.get_offset(fields[0].spelling) == a1
assert teststruct.type.get_offset(fields[1].spelling) == a2
assert teststruct.type.get_offset(fields[2].spelling) == a3
assert teststruct.type.get_offset(fields[3].spelling) == a4
assert fields[0].is_bitfield() == False
assert fields[1].is_bitfield() == True
assert fields[1].get_bitfield_width() == 3
assert fields[2].is_bitfield() == True
assert fields[2].get_bitfield_width() == 4
assert fields[3].is_bitfield() == False
def test_offset():
"""Ensure Cursor.get_record_field_offset works in anonymous records"""
source="""
struct Test {
struct {
int bariton;
union {
int foo;
};
};
int bar;
};"""
tries=[(['-target','i386-linux-gnu'],(4,16,0,32,64)),
(['-target','nvptx64-unknown-unknown'],(8,24,0,32,64)),
(['-target','i386-pc-win32'],(8,16,0,32,64)),
(['-target','msp430-none-none'],(2,14,0,32,64))]
for flags, values in tries:
align,total,bariton,foo,bar = values
tu = get_tu(source)
teststruct = get_cursor(tu, 'Test')
fields = list(teststruct.get_children())
assert teststruct.type.get_offset("bariton") == bariton
assert teststruct.type.get_offset("foo") == foo
assert teststruct.type.get_offset("bar") == bar
def test_decay():
"""Ensure decayed types are handled as the original type"""
tu = get_tu("void foo(int a[]);")
foo = get_cursor(tu, 'foo')
a = foo.type.argument_types()[0]
assert a.kind == TypeKind.INCOMPLETEARRAY
assert a.element_type.kind == TypeKind.INT
assert a.get_canonical().kind == TypeKind.INCOMPLETEARRAY

View File

@ -0,0 +1,93 @@
# This file provides common utility functions for the test suite.
from clang.cindex import Cursor
from clang.cindex import TranslationUnit
def get_tu(source, lang='c', all_warnings=False, flags=[]):
"""Obtain a translation unit from source and language.
By default, the translation unit is created from source file "t.<ext>"
where <ext> is the default file extension for the specified language. By
default it is C, so "t.c" is the default file name.
Supported languages are {c, cpp, objc}.
all_warnings is a convenience argument to enable all compiler warnings.
"""
args = list(flags)
name = 't.c'
if lang == 'cpp':
name = 't.cpp'
args.append('-std=c++11')
elif lang == 'objc':
name = 't.m'
elif lang != 'c':
raise Exception('Unknown language: %s' % lang)
if all_warnings:
args += ['-Wall', '-Wextra']
return TranslationUnit.from_source(name, args, unsaved_files=[(name,
source)])
def get_cursor(source, spelling):
"""Obtain a cursor from a source object.
This provides a convenient search mechanism to find a cursor with specific
spelling within a source. The first argument can be either a
TranslationUnit or Cursor instance.
If the cursor is not found, None is returned.
"""
children = []
if isinstance(source, Cursor):
children = source.get_children()
else:
# Assume TU
children = source.cursor.get_children()
for cursor in children:
if cursor.spelling == spelling:
return cursor
# Recurse into children.
result = get_cursor(cursor, spelling)
if result is not None:
return result
return None
def get_cursors(source, spelling):
"""Obtain all cursors from a source object with a specific spelling.
This provides a convenient search mechanism to find all cursors with specific
spelling within a source. The first argument can be either a
TranslationUnit or Cursor instance.
If no cursors are found, an empty list is returned.
"""
cursors = []
children = []
if isinstance(source, Cursor):
children = source.get_children()
else:
# Assume TU
children = source.cursor.get_children()
for cursor in children:
if cursor.spelling == spelling:
cursors.append(cursor)
# Recurse into children.
cursors.extend(get_cursors(cursor, spelling))
return cursors
__all__ = [
'get_cursor',
'get_cursors',
'get_tu',
]

View File

@ -0,0 +1,592 @@
<?xml version="1.0" encoding="UTF-8"?>
<grammar xmlns="http://relaxng.org/ns/structure/1.0"
datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
<start>
<choice>
<!-- Everything else not explicitly mentioned below. -->
<ref name="Other" />
<ref name="Function" />
<ref name="Class" />
<ref name="Variable" />
<ref name="Namespace" />
<ref name="Typedef" />
<ref name="Enum" />
</choice>
</start>
<define name="Other">
<element name="Other">
<ref name="attrSourceLocation" />
<ref name="Name" />
<optional>
<ref name="USR" />
</optional>
<optional>
<ref name="Headerfile" />
</optional>
<optional>
<ref name="Declaration" />
</optional>
<optional>
<ref name="Abstract" />
</optional>
<optional>
<ref name="TemplateParameters" />
</optional>
<optional>
<ref name="Parameters" />
</optional>
<optional>
<ref name="ResultDiscussion" />
</optional>
<optional>
<ref name="Discussion" />
</optional>
</element>
</define>
<define name="Function">
<element name="Function">
<optional>
<attribute name="templateKind">
<choice>
<value>template</value>
<value>specialization</value>
</choice>
</attribute>
</optional>
<ref name="attrSourceLocation" />
<optional>
<attribute name="isInstanceMethod">
<data type="boolean" />
</attribute>
</optional>
<optional>
<attribute name="isClassMethod">
<data type="boolean" />
</attribute>
</optional>
<ref name="Name" />
<optional>
<ref name="USR" />
</optional>
<optional>
<ref name="Headerfile" />
</optional>
<optional>
<ref name="Declaration" />
</optional>
<optional>
<ref name="Abstract" />
</optional>
<optional>
<ref name="TemplateParameters" />
</optional>
<optional>
<ref name="Parameters" />
</optional>
<optional>
<ref name="Exceptions" />
</optional>
<zeroOrMore>
<ref name="Availability" />
</zeroOrMore>
<zeroOrMore>
<ref name="Deprecated" />
</zeroOrMore>
<zeroOrMore>
<ref name="Unavailable" />
</zeroOrMore>
<optional>
<ref name="ResultDiscussion" />
</optional>
<optional>
<ref name="Discussion" />
</optional>
</element>
</define>
<define name="Class">
<element name="Class">
<optional>
<attribute name="templateKind">
<choice>
<value>template</value>
<value>specialization</value>
<value>partialSpecialization</value>
</choice>
</attribute>
</optional>
<ref name="attrSourceLocation" />
<ref name="Name" />
<optional>
<ref name="USR" />
</optional>
<optional>
<ref name="Headerfile" />
</optional>
<optional>
<ref name="Declaration" />
</optional>
<optional>
<ref name="Abstract" />
</optional>
<optional>
<ref name="TemplateParameters" />
</optional>
<!-- Parameters and results don't make sense for classes, but the user
can specify \param or \returns in a comment anyway. -->
<optional>
<ref name="Parameters" />
</optional>
<optional>
<ref name="ResultDiscussion" />
</optional>
<optional>
<ref name="Discussion" />
</optional>
</element>
</define>
<define name="Variable">
<element name="Variable">
<ref name="attrSourceLocation" />
<ref name="Name" />
<optional>
<ref name="USR" />
</optional>
<optional>
<ref name="Headerfile" />
</optional>
<optional>
<ref name="Declaration" />
</optional>
<optional>
<ref name="Abstract" />
</optional>
<!-- Template parameters, parameters and results don't make sense for
variables, but the user can specify \tparam \param or \returns
in a comment anyway. -->
<optional>
<ref name="TemplateParameters" />
</optional>
<optional>
<ref name="Parameters" />
</optional>
<optional>
<ref name="ResultDiscussion" />
</optional>
<optional>
<ref name="Discussion" />
</optional>
</element>
</define>
<define name="Namespace">
<element name="Namespace">
<ref name="attrSourceLocation" />
<ref name="Name" />
<optional>
<ref name="USR" />
</optional>
<optional>
<ref name="Headerfile" />
</optional>
<optional>
<ref name="Declaration" />
</optional>
<optional>
<ref name="Abstract" />
</optional>
<!-- Template parameters, parameters and results don't make sense for
namespaces, but the user can specify \tparam, \param or \returns
in a comment anyway. -->
<optional>
<ref name="TemplateParameters" />
</optional>
<optional>
<ref name="Parameters" />
</optional>
<optional>
<ref name="ResultDiscussion" />
</optional>
<optional>
<ref name="Discussion" />
</optional>
</element>
</define>
<define name="Typedef">
<element name="Typedef">
<ref name="attrSourceLocation" />
<ref name="Name" />
<optional>
<ref name="USR" />
</optional>
<optional>
<ref name="Headerfile" />
</optional>
<optional>
<ref name="Declaration" />
</optional>
<optional>
<ref name="Abstract" />
</optional>
<optional>
<ref name="TemplateParameters" />
</optional>
<!-- Parameters and results might make sense for typedefs if the type is
a function pointer type. -->
<optional>
<ref name="Parameters" />
</optional>
<optional>
<ref name="ResultDiscussion" />
</optional>
<optional>
<ref name="Discussion" />
</optional>
</element>
</define>
<define name="Enum">
<element name="Enum">
<ref name="attrSourceLocation" />
<ref name="Name" />
<optional>
<ref name="USR" />
</optional>
<optional>
<ref name="Headerfile" />
</optional>
<optional>
<ref name="Declaration" />
</optional>
<optional>
<ref name="Abstract" />
</optional>
<!-- Template parameters, parameters and results don't make sense for
enums, but the user can specify \tparam \param or \returns in a
comment anyway. -->
<optional>
<ref name="TemplateParameters" />
</optional>
<optional>
<ref name="Parameters" />
</optional>
<optional>
<ref name="ResultDiscussion" />
</optional>
<optional>
<ref name="Discussion" />
</optional>
</element>
</define>
<define name="attrSourceLocation">
<optional>
<attribute name="file">
<!-- Non-empty text content. -->
<data type="string">
<param name="pattern">.*\S.*</param>
</data>
</attribute>
</optional>
<optional>
<attribute name="line">
<data type="positiveInteger" />
</attribute>
<attribute name="column">
<data type="positiveInteger" />
</attribute>
</optional>
</define>
<define name="Name">
<element name="Name">
<!-- Non-empty text content. -->
<data type="string">
<param name="pattern">.*\S.*</param>
</data>
</element>
</define>
<define name="USR">
<element name="USR">
<!-- Non-empty text content. -->
<data type="string">
<param name="pattern">.*\S.*</param>
</data>
</element>
</define>
<define name="Abstract">
<element name="Abstract">
<zeroOrMore>
<ref name="TextBlockContent" />
</zeroOrMore>
</element>
</define>
<define name="Declaration">
<element name="Declaration">
<!-- Non-empty text content. -->
<data type="string"/>
</element>
</define>
<define name="Headerfile">
<element name="Headerfile">
<oneOrMore>
<ref name="TextBlockContent" />
</oneOrMore>
</element>
</define>
<define name="Discussion">
<element name="Discussion">
<zeroOrMore>
<ref name="TextBlockContent" />
</zeroOrMore>
</element>
</define>
<define name="TemplateParameters">
<element name="TemplateParameters">
<!-- Parameter elements should be sorted according to position. -->
<oneOrMore>
<element name="Parameter">
<element name="Name">
<!-- Non-empty text content. -->
<data type="string">
<param name="pattern">.*\S.*</param>
</data>
</element>
<optional>
<!-- This is index at depth 0. libclang API can return more
information about position, but we expose only essential
information here, since "Parameter" elements are already
sorted.
"Position" element could be added in future if needed. -->
<element name="Index">
<data type="nonNegativeInteger" />
</element>
</optional>
<!-- In general, template parameters with whitespace discussion
should not be emitted. Schema might be more strict here. -->
<element name="Discussion">
<ref name="TextBlockContent" />
</element>
</element>
</oneOrMore>
</element>
</define>
<define name="Parameters">
<element name="Parameters">
<!-- Parameter elements should be sorted according to index. -->
<oneOrMore>
<element name="Parameter">
<element name="Name">
<!-- Non-empty text content. -->
<data type="string">
<param name="pattern">.*\S.*</param>
</data>
</element>
<optional>
<choice>
<element name="Index">
<data type="nonNegativeInteger" />
</element>
<element name="IsVarArg">
<empty />
</element>
</choice>
</optional>
<element name="Direction">
<attribute name="isExplicit">
<data type="boolean" />
</attribute>
<choice>
<value>in</value>
<value>out</value>
<value>in,out</value>
</choice>
</element>
<!-- In general, template parameters with whitespace discussion
should not be emitted, unless direction is explicitly specified.
Schema might be more strict here. -->
<element name="Discussion">
<ref name="TextBlockContent" />
</element>
</element>
</oneOrMore>
</element>
</define>
<define name="Exceptions">
<element name="Exceptions">
<oneOrMore>
<ref name="TextBlockContent" />
</oneOrMore>
</element>
</define>
<define name="Availability">
<element name="Availability">
<attribute name="distribution">
<data type="string" />
</attribute>
<optional>
<element name="IntroducedInVersion">
<data type="string">
<param name="pattern">\d+|\d+\.\d+|\d+\.\d+.\d+</param>
</data>
</element>
</optional>
<optional>
<element name="DeprecatedInVersion">
<data type="string">
<param name="pattern">\d+|\d+\.\d+|\d+\.\d+.\d+</param>
</data>
</element>
</optional>
<optional>
<element name="RemovedAfterVersion">
<data type="string">
<param name="pattern">\d+|\d+\.\d+|\d+\.\d+.\d+</param>
</data>
</element>
</optional>
<optional>
<element name="DeprecationSummary">
<data type="string" />
</element>
</optional>
<optional>
<ref name="Unavailable" />
</optional>
</element>
</define>
<define name="Deprecated">
<element name="Deprecated">
<optional>
<data type="string" />
</optional>
</element>
</define>
<define name="Unavailable">
<element name="Unavailable">
<optional>
<data type="string" />
</optional>
</element>
</define>
<define name="ResultDiscussion">
<element name="ResultDiscussion">
<zeroOrMore>
<ref name="TextBlockContent" />
</zeroOrMore>
</element>
</define>
<define name="TextBlockContent">
<choice>
<element name="Para">
<optional>
<attribute name="kind">
<choice>
<value>attention</value>
<value>author</value>
<value>authors</value>
<value>bug</value>
<value>copyright</value>
<value>date</value>
<value>invariant</value>
<value>note</value>
<value>post</value>
<value>pre</value>
<value>remark</value>
<value>remarks</value>
<value>sa</value>
<value>see</value>
<value>since</value>
<value>todo</value>
<value>version</value>
<value>warning</value>
</choice>
</attribute>
</optional>
<zeroOrMore>
<ref name="TextInlineContent" />
</zeroOrMore>
</element>
<element name="Verbatim">
<attribute name="xml:space">
<value>preserve</value>
</attribute>
<attribute name="kind">
<!-- TODO: add all Doxygen verbatim kinds -->
<choice>
<value>code</value>
<value>verbatim</value>
</choice>
</attribute>
<text />
</element>
</choice>
</define>
<define name="TextInlineContent">
<choice>
<text />
<element name="bold">
<!-- Non-empty text content. -->
<data type="string">
<param name="pattern">.*\S.*</param>
</data>
</element>
<element name="monospaced">
<!-- Non-empty text content. -->
<data type="string">
<param name="pattern">.*\S.*</param>
</data>
</element>
<element name="emphasized">
<!-- Non-empty text content. -->
<data type="string">
<param name="pattern">.*\S.*</param>
</data>
</element>
<element name="rawHTML">
<!-- Non-empty text content. -->
<data type="string">
<param name="pattern">.*\S.*</param>
</data>
</element>
</choice>
</define>
</grammar>

View File

@ -0,0 +1,199 @@
================
AddressSanitizer
================
.. contents::
:local:
Introduction
============
AddressSanitizer is a fast memory error detector. It consists of a compiler
instrumentation module and a run-time library. The tool can detect the
following types of bugs:
* Out-of-bounds accesses to heap, stack and globals
* Use-after-free
* Use-after-return (to some extent)
* Double-free, invalid free
* Memory leaks (experimental)
Typical slowdown introduced by AddressSanitizer is **2x**.
How to build
============
Follow the `clang build instructions <../get_started.html>`_. CMake build is
supported.
Usage
=====
Simply compile and link your program with ``-fsanitize=address`` flag. The
AddressSanitizer run-time library should be linked to the final executable, so
make sure to use ``clang`` (not ``ld``) for the final link step. When linking
shared libraries, the AddressSanitizer run-time is not linked, so
``-Wl,-z,defs`` may cause link errors (don't use it with AddressSanitizer). To
get a reasonable performance add ``-O1`` or higher. To get nicer stack traces
in error messages add ``-fno-omit-frame-pointer``. To get perfect stack traces
you may need to disable inlining (just use ``-O1``) and tail call elimination
(``-fno-optimize-sibling-calls``).
.. code-block:: console
% cat example_UseAfterFree.cc
int main(int argc, char **argv) {
int *array = new int[100];
delete [] array;
return array[argc]; // BOOM
}
# Compile and link
% clang -O1 -g -fsanitize=address -fno-omit-frame-pointer example_UseAfterFree.cc
or:
.. code-block:: console
# Compile
% clang -O1 -g -fsanitize=address -fno-omit-frame-pointer -c example_UseAfterFree.cc
# Link
% clang -g -fsanitize=address example_UseAfterFree.o
If a bug is detected, the program will print an error message to stderr and
exit with a non-zero exit code. Currently, AddressSanitizer does not symbolize
its output, so you may need to use a separate script to symbolize the result
offline (this will be fixed in future).
.. code-block:: console
% ./a.out 2> log
% projects/compiler-rt/lib/asan/scripts/asan_symbolize.py / < log | c++filt
==9442== ERROR: AddressSanitizer heap-use-after-free on address 0x7f7ddab8c084 at pc 0x403c8c bp 0x7fff87fb82d0 sp 0x7fff87fb82c8
READ of size 4 at 0x7f7ddab8c084 thread T0
#0 0x403c8c in main example_UseAfterFree.cc:4
#1 0x7f7ddabcac4d in __libc_start_main ??:0
0x7f7ddab8c084 is located 4 bytes inside of 400-byte region [0x7f7ddab8c080,0x7f7ddab8c210)
freed by thread T0 here:
#0 0x404704 in operator delete[](void*) ??:0
#1 0x403c53 in main example_UseAfterFree.cc:4
#2 0x7f7ddabcac4d in __libc_start_main ??:0
previously allocated by thread T0 here:
#0 0x404544 in operator new[](unsigned long) ??:0
#1 0x403c43 in main example_UseAfterFree.cc:2
#2 0x7f7ddabcac4d in __libc_start_main ??:0
==9442== ABORTING
AddressSanitizer exits on the first detected error. This is by design.
One reason: it makes the generated code smaller and faster (both by
~5%). Another reason: this makes fixing bugs unavoidable. With Valgrind,
it is often the case that users treat Valgrind warnings as false
positives (which they are not) and don't fix them.
``__has_feature(address_sanitizer)``
------------------------------------
In some cases one may need to execute different code depending on whether
AddressSanitizer is enabled.
:ref:`\_\_has\_feature <langext-__has_feature-__has_extension>` can be used for
this purpose.
.. code-block:: c
#if defined(__has_feature)
# if __has_feature(address_sanitizer)
// code that builds only under AddressSanitizer
# endif
#endif
``__attribute__((no_sanitize_address))``
-----------------------------------------------
Some code should not be instrumented by AddressSanitizer. One may use the
function attribute
:ref:`no_sanitize_address <langext-address_sanitizer>`
(or a deprecated synonym `no_address_safety_analysis`)
to disable instrumentation of a particular function. This attribute may not be
supported by other compilers, so we suggest to use it together with
``__has_feature(address_sanitizer)``.
Initialization order checking
-----------------------------
AddressSanitizer can optionally detect dynamic initialization order problems,
when initialization of globals defined in one translation unit uses
globals defined in another translation unit. To enable this check at runtime,
you should set environment variable
``ASAN_OPTIONS=check_initialization_order=1``.
Blacklist
---------
AddressSanitizer supports ``src`` and ``fun`` entity types in
:doc:`SanitizerSpecialCaseList`, that can be used to suppress error reports
in the specified source files or functions. Additionally, AddressSanitizer
introduces ``global`` and ``type`` entity types that can be used to
suppress error reports for out-of-bound access to globals with certain
names and types (you may only specify class or struct types).
You may use an ``init`` category to suppress reports about initialization-order
problems happening in certain source files or with certain global variables.
.. code-block:: bash
# Suppress error reports for code in a file or in a function:
src:bad_file.cpp
# Ignore all functions with names containing MyFooBar:
fun:*MyFooBar*
# Disable out-of-bound checks for global:
global:bad_array
# Disable out-of-bound checks for global instances of a given class ...
type:class.Namespace::BadClassName
# ... or a given struct. Use wildcard to deal with anonymous namespace.
type:struct.Namespace2::*::BadStructName
# Disable initialization-order checks for globals:
global:bad_init_global=init
type:*BadInitClassSubstring*=init
src:bad/init/files/*=init
Memory leak detection
---------------------
For the experimental memory leak detector in AddressSanitizer, see
:doc:`LeakSanitizer`.
Supported Platforms
===================
AddressSanitizer is supported on
* Linux i386/x86\_64 (tested on Ubuntu 10.04 and 12.04);
* MacOS 10.6, 10.7 and 10.8 (i386/x86\_64).
Support for Linux ARM (and Android ARM) is in progress (it may work, but
is not guaranteed too).
Limitations
===========
* AddressSanitizer uses more real memory than a native run. Exact overhead
depends on the allocations sizes. The smaller the allocations you make the
bigger the overhead is.
* AddressSanitizer uses more stack memory. We have seen up to 3x increase.
* On 64-bit platforms AddressSanitizer maps (but not reserves) 16+ Terabytes of
virtual address space. This means that tools like ``ulimit`` may not work as
usually expected.
* Static linking is not supported.
Current Status
==============
AddressSanitizer is fully functional on supported platforms starting from LLVM
3.1. The test suite is integrated into CMake build and can be run with ``make
check-asan`` command.
More Information
================
`http://code.google.com/p/address-sanitizer <http://code.google.com/p/address-sanitizer/>`_

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,935 @@
==================================
Block Implementation Specification
==================================
.. contents::
:local:
History
=======
* 2008/7/14 - created.
* 2008/8/21 - revised, C++.
* 2008/9/24 - add ``NULL`` ``isa`` field to ``__block`` storage.
* 2008/10/1 - revise block layout to use a ``static`` descriptor structure.
* 2008/10/6 - revise block layout to use an unsigned long int flags.
* 2008/10/28 - specify use of ``_Block_object_assign`` and
``_Block_object_dispose`` for all "Object" types in helper functions.
* 2008/10/30 - revise new layout to have invoke function in same place.
* 2008/10/30 - add ``__weak`` support.
* 2010/3/16 - rev for stret return, signature field.
* 2010/4/6 - improved wording.
* 2013/1/6 - improved wording and converted to rst.
This document describes the Apple ABI implementation specification of Blocks.
The first shipping version of this ABI is found in Mac OS X 10.6, and shall be
referred to as 10.6.ABI. As of 2010/3/16, the following describes the ABI
contract with the runtime and the compiler, and, as necessary, will be referred
to as ABI.2010.3.16.
Since the Apple ABI references symbols from other elements of the system, any
attempt to use this ABI on systems prior to SnowLeopard is undefined.
High Level
==========
The ABI of ``Blocks`` consist of their layout and the runtime functions required
by the compiler. A ``Block`` consists of a structure of the following form:
.. code-block:: c
struct Block_literal_1 {
void *isa; // initialized to &_NSConcreteStackBlock or &_NSConcreteGlobalBlock
int flags;
int reserved;
void (*invoke)(void *, ...);
struct Block_descriptor_1 {
unsigned long int reserved; // NULL
unsigned long int size; // sizeof(struct Block_literal_1)
// optional helper functions
void (*copy_helper)(void *dst, void *src); // IFF (1<<25)
void (*dispose_helper)(void *src); // IFF (1<<25)
// required ABI.2010.3.16
const char *signature; // IFF (1<<30)
} *descriptor;
// imported variables
};
The following flags bits are in use thusly for a possible ABI.2010.3.16:
.. code-block:: c
enum {
BLOCK_HAS_COPY_DISPOSE = (1 << 25),
BLOCK_HAS_CTOR = (1 << 26), // helpers have C++ code
BLOCK_IS_GLOBAL = (1 << 28),
BLOCK_HAS_STRET = (1 << 29), // IFF BLOCK_HAS_SIGNATURE
BLOCK_HAS_SIGNATURE = (1 << 30),
};
In 10.6.ABI the (1<<29) was usually set and was always ignored by the runtime -
it had been a transitional marker that did not get deleted after the
transition. This bit is now paired with (1<<30), and represented as the pair
(3<<30), for the following combinations of valid bit settings, and their
meanings:
.. code-block:: c
switch (flags & (3<<29)) {
case (0<<29): 10.6.ABI, no signature field available
case (1<<29): 10.6.ABI, no signature field available
case (2<<29): ABI.2010.3.16, regular calling convention, presence of signature field
case (3<<29): ABI.2010.3.16, stret calling convention, presence of signature field,
}
The signature field is not always populated.
The following discussions are presented as 10.6.ABI otherwise.
``Block`` literals may occur within functions where the structure is created in
stack local memory. They may also appear as initialization expressions for
``Block`` variables of global or ``static`` local variables.
When a ``Block`` literal expression is evaluated the stack based structure is
initialized as follows:
1. A ``static`` descriptor structure is declared and initialized as follows:
a. The ``invoke`` function pointer is set to a function that takes the
``Block`` structure as its first argument and the rest of the arguments (if
any) to the ``Block`` and executes the ``Block`` compound statement.
b. The ``size`` field is set to the size of the following ``Block`` literal
structure.
c. The ``copy_helper`` and ``dispose_helper`` function pointers are set to
respective helper functions if they are required by the ``Block`` literal.
2. A stack (or global) ``Block`` literal data structure is created and
initialized as follows:
a. The ``isa`` field is set to the address of the external
``_NSConcreteStackBlock``, which is a block of uninitialized memory supplied
in ``libSystem``, or ``_NSConcreteGlobalBlock`` if this is a static or file
level ``Block`` literal.
b. The ``flags`` field is set to zero unless there are variables imported
into the ``Block`` that need helper functions for program level
``Block_copy()`` and ``Block_release()`` operations, in which case the
(1<<25) flags bit is set.
As an example, the ``Block`` literal expression:
.. code-block:: c
^ { printf("hello world\n"); }
would cause the following to be created on a 32-bit system:
.. code-block:: c
struct __block_literal_1 {
void *isa;
int flags;
int reserved;
void (*invoke)(struct __block_literal_1 *);
struct __block_descriptor_1 *descriptor;
};
void __block_invoke_1(struct __block_literal_1 *_block) {
printf("hello world\n");
}
static struct __block_descriptor_1 {
unsigned long int reserved;
unsigned long int Block_size;
} __block_descriptor_1 = { 0, sizeof(struct __block_literal_1), __block_invoke_1 };
and where the ``Block`` literal itself appears:
.. code-block:: c
struct __block_literal_1 _block_literal = {
&_NSConcreteStackBlock,
(1<<29), <uninitialized>,
__block_invoke_1,
&__block_descriptor_1
};
A ``Block`` imports other ``Block`` references, ``const`` copies of other
variables, and variables marked ``__block``. In Objective-C, variables may
additionally be objects.
When a ``Block`` literal expression is used as the initial value of a global
or ``static`` local variable, it is initialized as follows:
.. code-block:: c
struct __block_literal_1 __block_literal_1 = {
&_NSConcreteGlobalBlock,
(1<<28)|(1<<29), <uninitialized>,
__block_invoke_1,
&__block_descriptor_1
};
that is, a different address is provided as the first value and a particular
(1<<28) bit is set in the ``flags`` field, and otherwise it is the same as for
stack based ``Block`` literals. This is an optimization that can be used for
any ``Block`` literal that imports no ``const`` or ``__block`` storage
variables.
Imported Variables
==================
Variables of ``auto`` storage class are imported as ``const`` copies. Variables
of ``__block`` storage class are imported as a pointer to an enclosing data
structure. Global variables are simply referenced and not considered as
imported.
Imported ``const`` copy variables
---------------------------------
Automatic storage variables not marked with ``__block`` are imported as
``const`` copies.
The simplest example is that of importing a variable of type ``int``:
.. code-block:: c
int x = 10;
void (^vv)(void) = ^{ printf("x is %d\n", x); }
x = 11;
vv();
which would be compiled to:
.. code-block:: c
struct __block_literal_2 {
void *isa;
int flags;
int reserved;
void (*invoke)(struct __block_literal_2 *);
struct __block_descriptor_2 *descriptor;
const int x;
};
void __block_invoke_2(struct __block_literal_2 *_block) {
printf("x is %d\n", _block->x);
}
static struct __block_descriptor_2 {
unsigned long int reserved;
unsigned long int Block_size;
} __block_descriptor_2 = { 0, sizeof(struct __block_literal_2) };
and:
.. code-block:: c
struct __block_literal_2 __block_literal_2 = {
&_NSConcreteStackBlock,
(1<<29), <uninitialized>,
__block_invoke_2,
&__block_descriptor_2,
x
};
In summary, scalars, structures, unions, and function pointers are generally
imported as ``const`` copies with no need for helper functions.
Imported ``const`` copy of ``Block`` reference
----------------------------------------------
The first case where copy and dispose helper functions are required is for the
case of when a ``Block`` itself is imported. In this case both a
``copy_helper`` function and a ``dispose_helper`` function are needed. The
``copy_helper`` function is passed both the existing stack based pointer and the
pointer to the new heap version and should call back into the runtime to
actually do the copy operation on the imported fields within the ``Block``. The
runtime functions are all described in :ref:`RuntimeHelperFunctions`.
A quick example:
.. code-block:: c
void (^existingBlock)(void) = ...;
void (^vv)(void) = ^{ existingBlock(); }
vv();
struct __block_literal_3 {
...; // existing block
};
struct __block_literal_4 {
void *isa;
int flags;
int reserved;
void (*invoke)(struct __block_literal_4 *);
struct __block_literal_3 *const existingBlock;
};
void __block_invoke_4(struct __block_literal_2 *_block) {
__block->existingBlock->invoke(__block->existingBlock);
}
void __block_copy_4(struct __block_literal_4 *dst, struct __block_literal_4 *src) {
//_Block_copy_assign(&dst->existingBlock, src->existingBlock, 0);
_Block_object_assign(&dst->existingBlock, src->existingBlock, BLOCK_FIELD_IS_BLOCK);
}
void __block_dispose_4(struct __block_literal_4 *src) {
// was _Block_destroy
_Block_object_dispose(src->existingBlock, BLOCK_FIELD_IS_BLOCK);
}
static struct __block_descriptor_4 {
unsigned long int reserved;
unsigned long int Block_size;
void (*copy_helper)(struct __block_literal_4 *dst, struct __block_literal_4 *src);
void (*dispose_helper)(struct __block_literal_4 *);
} __block_descriptor_4 = {
0,
sizeof(struct __block_literal_4),
__block_copy_4,
__block_dispose_4,
};
and where said ``Block`` is used:
.. code-block:: c
struct __block_literal_4 _block_literal = {
&_NSConcreteStackBlock,
(1<<25)|(1<<29), <uninitialized>
__block_invoke_4,
& __block_descriptor_4
existingBlock,
};
Importing ``__attribute__((NSObject))`` variables
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
GCC introduces ``__attribute__((NSObject))`` on structure pointers to mean "this
is an object". This is useful because many low level data structures are
declared as opaque structure pointers, e.g. ``CFStringRef``, ``CFArrayRef``,
etc. When used from C, however, these are still really objects and are the
second case where that requires copy and dispose helper functions to be
generated. The copy helper functions generated by the compiler should use the
``_Block_object_assign`` runtime helper function and in the dispose helper the
``_Block_object_dispose`` runtime helper function should be called.
For example, ``Block`` foo in the following:
.. code-block:: c
struct Opaque *__attribute__((NSObject)) objectPointer = ...;
...
void (^foo)(void) = ^{ CFPrint(objectPointer); };
would have the following helper functions generated:
.. code-block:: c
void __block_copy_foo(struct __block_literal_5 *dst, struct __block_literal_5 *src) {
_Block_object_assign(&dst->objectPointer, src-> objectPointer, BLOCK_FIELD_IS_OBJECT);
}
void __block_dispose_foo(struct __block_literal_5 *src) {
_Block_object_dispose(src->objectPointer, BLOCK_FIELD_IS_OBJECT);
}
Imported ``__block`` marked variables
-------------------------------------
Layout of ``__block`` marked variables
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The compiler must embed variables that are marked ``__block`` in a specialized
structure of the form:
.. code-block:: c
struct _block_byref_foo {
void *isa;
struct Block_byref *forwarding;
int flags; //refcount;
int size;
typeof(marked_variable) marked_variable;
};
Variables of certain types require helper functions for when ``Block_copy()``
and ``Block_release()`` are performed upon a referencing ``Block``. At the "C"
level only variables that are of type ``Block`` or ones that have
``__attribute__((NSObject))`` marked require helper functions. In Objective-C
objects require helper functions and in C++ stack based objects require helper
functions. Variables that require helper functions use the form:
.. code-block:: c
struct _block_byref_foo {
void *isa;
struct _block_byref_foo *forwarding;
int flags; //refcount;
int size;
// helper functions called via Block_copy() and Block_release()
void (*byref_keep)(void *dst, void *src);
void (*byref_dispose)(void *);
typeof(marked_variable) marked_variable;
};
The structure is initialized such that:
a. The ``forwarding`` pointer is set to the beginning of its enclosing
structure.
b. The ``size`` field is initialized to the total size of the enclosing
structure.
c. The ``flags`` field is set to either 0 if no helper functions are needed
or (1<<25) if they are.
d. The helper functions are initialized (if present).
e. The variable itself is set to its initial value.
f. The ``isa`` field is set to ``NULL``.
Access to ``__block`` variables from within its lexical scope
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
In order to "move" the variable to the heap upon a ``copy_helper`` operation the
compiler must rewrite access to such a variable to be indirect through the
structures ``forwarding`` pointer. For example:
.. code-block:: c
int __block i = 10;
i = 11;
would be rewritten to be:
.. code-block:: c
struct _block_byref_i {
void *isa;
struct _block_byref_i *forwarding;
int flags; //refcount;
int size;
int captured_i;
} i = { NULL, &i, 0, sizeof(struct _block_byref_i), 10 };
i.forwarding->captured_i = 11;
In the case of a ``Block`` reference variable being marked ``__block`` the
helper code generated must use the ``_Block_object_assign`` and
``_Block_object_dispose`` routines supplied by the runtime to make the
copies. For example:
.. code-block:: c
__block void (voidBlock)(void) = blockA;
voidBlock = blockB;
would translate into:
.. code-block:: c
struct _block_byref_voidBlock {
void *isa;
struct _block_byref_voidBlock *forwarding;
int flags; //refcount;
int size;
void (*byref_keep)(struct _block_byref_voidBlock *dst, struct _block_byref_voidBlock *src);
void (*byref_dispose)(struct _block_byref_voidBlock *);
void (^captured_voidBlock)(void);
};
void _block_byref_keep_helper(struct _block_byref_voidBlock *dst, struct _block_byref_voidBlock *src) {
//_Block_copy_assign(&dst->captured_voidBlock, src->captured_voidBlock, 0);
_Block_object_assign(&dst->captured_voidBlock, src->captured_voidBlock, BLOCK_FIELD_IS_BLOCK | BLOCK_BYREF_CALLER);
}
void _block_byref_dispose_helper(struct _block_byref_voidBlock *param) {
//_Block_destroy(param->captured_voidBlock, 0);
_Block_object_dispose(param->captured_voidBlock, BLOCK_FIELD_IS_BLOCK | BLOCK_BYREF_CALLER)}
and:
.. code-block:: c
struct _block_byref_voidBlock voidBlock = {( .forwarding=&voidBlock, .flags=(1<<25), .size=sizeof(struct _block_byref_voidBlock *),
.byref_keep=_block_byref_keep_helper, .byref_dispose=_block_byref_dispose_helper,
.captured_voidBlock=blockA )};
voidBlock.forwarding->captured_voidBlock = blockB;
Importing ``__block`` variables into ``Blocks``
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
A ``Block`` that uses a ``__block`` variable in its compound statement body must
import the variable and emit ``copy_helper`` and ``dispose_helper`` helper
functions that, in turn, call back into the runtime to actually copy or release
the ``byref`` data block using the functions ``_Block_object_assign`` and
``_Block_object_dispose``.
For example:
.. code-block:: c
int __block i = 2;
functioncall(^{ i = 10; });
would translate to:
.. code-block:: c
struct _block_byref_i {
void *isa; // set to NULL
struct _block_byref_voidBlock *forwarding;
int flags; //refcount;
int size;
void (*byref_keep)(struct _block_byref_i *dst, struct _block_byref_i *src);
void (*byref_dispose)(struct _block_byref_i *);
int captured_i;
};
struct __block_literal_5 {
void *isa;
int flags;
int reserved;
void (*invoke)(struct __block_literal_5 *);
struct __block_descriptor_5 *descriptor;
struct _block_byref_i *i_holder;
};
void __block_invoke_5(struct __block_literal_5 *_block) {
_block->forwarding->captured_i = 10;
}
void __block_copy_5(struct __block_literal_5 *dst, struct __block_literal_5 *src) {
//_Block_byref_assign_copy(&dst->captured_i, src->captured_i);
_Block_object_assign(&dst->captured_i, src->captured_i, BLOCK_FIELD_IS_BYREF | BLOCK_BYREF_CALLER);
}
void __block_dispose_5(struct __block_literal_5 *src) {
//_Block_byref_release(src->captured_i);
_Block_object_dispose(src->captured_i, BLOCK_FIELD_IS_BYREF | BLOCK_BYREF_CALLER);
}
static struct __block_descriptor_5 {
unsigned long int reserved;
unsigned long int Block_size;
void (*copy_helper)(struct __block_literal_5 *dst, struct __block_literal_5 *src);
void (*dispose_helper)(struct __block_literal_5 *);
} __block_descriptor_5 = { 0, sizeof(struct __block_literal_5) __block_copy_5, __block_dispose_5 };
and:
.. code-block:: c
struct _block_byref_i i = {( .forwarding=&i, .flags=0, .size=sizeof(struct _block_byref_i) )};
struct __block_literal_5 _block_literal = {
&_NSConcreteStackBlock,
(1<<25)|(1<<29), <uninitialized>,
__block_invoke_5,
&__block_descriptor_5,
2,
};
Importing ``__attribute__((NSObject))`` ``__block`` variables
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
A ``__block`` variable that is also marked ``__attribute__((NSObject))`` should
have ``byref_keep`` and ``byref_dispose`` helper functions that use
``_Block_object_assign`` and ``_Block_object_dispose``.
``__block`` escapes
^^^^^^^^^^^^^^^^^^^
Because ``Blocks`` referencing ``__block`` variables may have ``Block_copy()``
performed upon them the underlying storage for the variables may move to the
heap. In Objective-C Garbage Collection Only compilation environments the heap
used is the garbage collected one and no further action is required. Otherwise
the compiler must issue a call to potentially release any heap storage for
``__block`` variables at all escapes or terminations of their scope. The call
should be:
.. code-block:: c
_Block_object_dispose(&_block_byref_foo, BLOCK_FIELD_IS_BYREF);
Nesting
^^^^^^^
``Blocks`` may contain ``Block`` literal expressions. Any variables used within
inner blocks are imported into all enclosing ``Block`` scopes even if the
variables are not used. This includes ``const`` imports as well as ``__block``
variables.
Objective C Extensions to ``Blocks``
====================================
Importing Objects
-----------------
Objects should be treated as ``__attribute__((NSObject))`` variables; all
``copy_helper``, ``dispose_helper``, ``byref_keep``, and ``byref_dispose``
helper functions should use ``_Block_object_assign`` and
``_Block_object_dispose``. There should be no code generated that uses
``*-retain`` or ``*-release`` methods.
``Blocks`` as Objects
---------------------
The compiler will treat ``Blocks`` as objects when synthesizing property setters
and getters, will characterize them as objects when generating garbage
collection strong and weak layout information in the same manner as objects, and
will issue strong and weak write-barrier assignments in the same manner as
objects.
``__weak __block`` Support
--------------------------
Objective-C (and Objective-C++) support the ``__weak`` attribute on ``__block``
variables. Under normal circumstances the compiler uses the Objective-C runtime
helper support functions ``objc_assign_weak`` and ``objc_read_weak``. Both
should continue to be used for all reads and writes of ``__weak __block``
variables:
.. code-block:: c
objc_read_weak(&block->byref_i->forwarding->i)
The ``__weak`` variable is stored in a ``_block_byref_foo`` structure and the
``Block`` has copy and dispose helpers for this structure that call:
.. code-block:: c
_Block_object_assign(&dest->_block_byref_i, src-> _block_byref_i, BLOCK_FIELD_IS_WEAK | BLOCK_FIELD_IS_BYREF);
and:
.. code-block:: c
_Block_object_dispose(src->_block_byref_i, BLOCK_FIELD_IS_WEAK | BLOCK_FIELD_IS_BYREF);
In turn, the ``block_byref`` copy support helpers distinguish between whether
the ``__block`` variable is a ``Block`` or not and should either call:
.. code-block:: c
_Block_object_assign(&dest->_block_byref_i, src->_block_byref_i, BLOCK_FIELD_IS_WEAK | BLOCK_FIELD_IS_OBJECT | BLOCK_BYREF_CALLER);
for something declared as an object or:
.. code-block:: c
_Block_object_assign(&dest->_block_byref_i, src->_block_byref_i, BLOCK_FIELD_IS_WEAK | BLOCK_FIELD_IS_BLOCK | BLOCK_BYREF_CALLER);
for something declared as a ``Block``.
A full example follows:
.. code-block:: c
__block __weak id obj = <initialization expression>;
functioncall(^{ [obj somemessage]; });
would translate to:
.. code-block:: c
struct _block_byref_obj {
void *isa; // uninitialized
struct _block_byref_obj *forwarding;
int flags; //refcount;
int size;
void (*byref_keep)(struct _block_byref_i *dst, struct _block_byref_i *src);
void (*byref_dispose)(struct _block_byref_i *);
id captured_obj;
};
void _block_byref_obj_keep(struct _block_byref_voidBlock *dst, struct _block_byref_voidBlock *src) {
//_Block_copy_assign(&dst->captured_obj, src->captured_obj, 0);
_Block_object_assign(&dst->captured_obj, src->captured_obj, BLOCK_FIELD_IS_OBJECT | BLOCK_FIELD_IS_WEAK | BLOCK_BYREF_CALLER);
}
void _block_byref_obj_dispose(struct _block_byref_voidBlock *param) {
//_Block_destroy(param->captured_obj, 0);
_Block_object_dispose(param->captured_obj, BLOCK_FIELD_IS_OBJECT | BLOCK_FIELD_IS_WEAK | BLOCK_BYREF_CALLER);
};
for the block ``byref`` part and:
.. code-block:: c
struct __block_literal_5 {
void *isa;
int flags;
int reserved;
void (*invoke)(struct __block_literal_5 *);
struct __block_descriptor_5 *descriptor;
struct _block_byref_obj *byref_obj;
};
void __block_invoke_5(struct __block_literal_5 *_block) {
[objc_read_weak(&_block->byref_obj->forwarding->captured_obj) somemessage];
}
void __block_copy_5(struct __block_literal_5 *dst, struct __block_literal_5 *src) {
//_Block_byref_assign_copy(&dst->byref_obj, src->byref_obj);
_Block_object_assign(&dst->byref_obj, src->byref_obj, BLOCK_FIELD_IS_BYREF | BLOCK_FIELD_IS_WEAK);
}
void __block_dispose_5(struct __block_literal_5 *src) {
//_Block_byref_release(src->byref_obj);
_Block_object_dispose(src->byref_obj, BLOCK_FIELD_IS_BYREF | BLOCK_FIELD_IS_WEAK);
}
static struct __block_descriptor_5 {
unsigned long int reserved;
unsigned long int Block_size;
void (*copy_helper)(struct __block_literal_5 *dst, struct __block_literal_5 *src);
void (*dispose_helper)(struct __block_literal_5 *);
} __block_descriptor_5 = { 0, sizeof(struct __block_literal_5), __block_copy_5, __block_dispose_5 };
and within the compound statement:
.. code-block:: c
truct _block_byref_obj obj = {( .forwarding=&obj, .flags=(1<<25), .size=sizeof(struct _block_byref_obj),
.byref_keep=_block_byref_obj_keep, .byref_dispose=_block_byref_obj_dispose,
.captured_obj = <initialization expression> )};
truct __block_literal_5 _block_literal = {
&_NSConcreteStackBlock,
(1<<25)|(1<<29), <uninitialized>,
__block_invoke_5,
&__block_descriptor_5,
&obj, // a reference to the on-stack structure containing "captured_obj"
};
functioncall(_block_literal->invoke(&_block_literal));
C++ Support
===========
Within a block stack based C++ objects are copied into ``const`` copies using
the copy constructor. It is an error if a stack based C++ object is used within
a block if it does not have a copy constructor. In addition both copy and
destroy helper routines must be synthesized for the block to support the
``Block_copy()`` operation, and the flags work marked with the (1<<26) bit in
addition to the (1<<25) bit. The copy helper should call the constructor using
appropriate offsets of the variable within the supplied stack based block source
and heap based destination for all ``const`` constructed copies, and similarly
should call the destructor in the destroy routine.
As an example, suppose a C++ class ``FOO`` existed with a copy constructor.
Within a code block a stack version of a ``FOO`` object is declared and used
within a ``Block`` literal expression:
.. code-block:: c++
{
FOO foo;
void (^block)(void) = ^{ printf("%d\n", foo.value()); };
}
The compiler would synthesize:
.. code-block:: c++
struct __block_literal_10 {
void *isa;
int flags;
int reserved;
void (*invoke)(struct __block_literal_10 *);
struct __block_descriptor_10 *descriptor;
const FOO foo;
};
void __block_invoke_10(struct __block_literal_10 *_block) {
printf("%d\n", _block->foo.value());
}
void __block_literal_10(struct __block_literal_10 *dst, struct __block_literal_10 *src) {
FOO_ctor(&dst->foo, &src->foo);
}
void __block_dispose_10(struct __block_literal_10 *src) {
FOO_dtor(&src->foo);
}
static struct __block_descriptor_10 {
unsigned long int reserved;
unsigned long int Block_size;
void (*copy_helper)(struct __block_literal_10 *dst, struct __block_literal_10 *src);
void (*dispose_helper)(struct __block_literal_10 *);
} __block_descriptor_10 = { 0, sizeof(struct __block_literal_10), __block_copy_10, __block_dispose_10 };
and the code would be:
.. code-block:: c++
{
FOO foo;
comp_ctor(&foo); // default constructor
struct __block_literal_10 _block_literal = {
&_NSConcreteStackBlock,
(1<<25)|(1<<26)|(1<<29), <uninitialized>,
__block_invoke_10,
&__block_descriptor_10,
};
comp_ctor(&_block_literal->foo, &foo); // const copy into stack version
struct __block_literal_10 &block = &_block_literal; // assign literal to block variable
block->invoke(block); // invoke block
comp_dtor(&_block_literal->foo); // destroy stack version of const block copy
comp_dtor(&foo); // destroy original version
}
C++ objects stored in ``__block`` storage start out on the stack in a
``block_byref`` data structure as do other variables. Such objects (if not
``const`` objects) must support a regular copy constructor. The ``block_byref``
data structure will have copy and destroy helper routines synthesized by the
compiler. The copy helper will have code created to perform the copy
constructor based on the initial stack ``block_byref`` data structure, and will
also set the (1<<26) bit in addition to the (1<<25) bit. The destroy helper
will have code to do the destructor on the object stored within the supplied
``block_byref`` heap data structure. For example,
.. code-block:: c++
__block FOO blockStorageFoo;
requires the normal constructor for the embedded ``blockStorageFoo`` object:
.. code-block:: c++
FOO_ctor(& _block_byref_blockStorageFoo->blockStorageFoo);
and at scope termination the destructor:
.. code-block:: c++
FOO_dtor(& _block_byref_blockStorageFoo->blockStorageFoo);
Note that the forwarding indirection is *NOT* used.
The compiler would need to generate (if used from a block literal) the following
copy/dispose helpers:
.. code-block:: c++
void _block_byref_obj_keep(struct _block_byref_blockStorageFoo *dst, struct _block_byref_blockStorageFoo *src) {
FOO_ctor(&dst->blockStorageFoo, &src->blockStorageFoo);
}
void _block_byref_obj_dispose(struct _block_byref_blockStorageFoo *src) {
FOO_dtor(&src->blockStorageFoo);
}
for the appropriately named constructor and destructor for the class/struct
``FOO``.
To support member variable and function access the compiler will synthesize a
``const`` pointer to a block version of the ``this`` pointer.
.. _RuntimeHelperFunctions:
Runtime Helper Functions
========================
The runtime helper functions are described in
``/usr/local/include/Block_private.h``. To summarize their use, a ``Block``
requires copy/dispose helpers if it imports any block variables, ``__block``
storage variables, ``__attribute__((NSObject))`` variables, or C++ ``const``
copied objects with constructor/destructors. The (1<<26) bit is set and
functions are generated.
The block copy helper function should, for each of the variables of the type
mentioned above, call:
.. code-block:: c
_Block_object_assign(&dst->target, src->target, BLOCK_FIELD_<appropo>);
in the copy helper and:
.. code-block:: c
_Block_object_dispose(->target, BLOCK_FIELD_<appropo>);
in the dispose helper where ``<appropo>`` is:
.. code-block:: c
enum {
BLOCK_FIELD_IS_OBJECT = 3, // id, NSObject, __attribute__((NSObject)), block, ...
BLOCK_FIELD_IS_BLOCK = 7, // a block variable
BLOCK_FIELD_IS_BYREF = 8, // the on stack structure holding the __block variable
BLOCK_FIELD_IS_WEAK = 16, // declared __weak
BLOCK_BYREF_CALLER = 128, // called from byref copy/dispose helpers
};
and of course the constructors/destructors for ``const`` copied C++ objects.
The ``block_byref`` data structure similarly requires copy/dispose helpers for
block variables, ``__attribute__((NSObject))`` variables, or C++ ``const``
copied objects with constructor/destructors, and again the (1<<26) bit is set
and functions are generated in the same manner.
Under ObjC we allow ``__weak`` as an attribute on ``__block`` variables, and
this causes the addition of ``BLOCK_FIELD_IS_WEAK`` orred onto the
``BLOCK_FIELD_IS_BYREF`` flag when copying the ``block_byref`` structure in the
``Block`` copy helper, and onto the ``BLOCK_FIELD_<appropo>`` field within the
``block_byref`` copy/dispose helper calls.
The prototypes, and summary, of the helper functions are:
.. code-block:: c
/* Certain field types require runtime assistance when being copied to the
heap. The following function is used to copy fields of types: blocks,
pointers to byref structures, and objects (including
__attribute__((NSObject)) pointers. BLOCK_FIELD_IS_WEAK is orthogonal to
the other choices which are mutually exclusive. Only in a Block copy
helper will one see BLOCK_FIELD_IS_BYREF.
*/
void _Block_object_assign(void *destAddr, const void *object, const int flags);
/* Similarly a compiler generated dispose helper needs to call back for each
field of the byref data structure. (Currently the implementation only
packs one field into the byref structure but in principle there could be
more). The same flags used in the copy helper should be used for each
call generated to this function:
*/
void _Block_object_dispose(const void *object, const int flags);
Copyright
=========
Copyright 2008-2010 Apple, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

View File

@ -0,0 +1 @@
*NOTE* This document has moved to http://clang.llvm.org/docs/Block-ABI-Apple.html.

View File

@ -0,0 +1,361 @@
.. role:: block-term
=================================
Language Specification for Blocks
=================================
.. contents::
:local:
Revisions
=========
- 2008/2/25 --- created
- 2008/7/28 --- revised, ``__block`` syntax
- 2008/8/13 --- revised, Block globals
- 2008/8/21 --- revised, C++ elaboration
- 2008/11/1 --- revised, ``__weak`` support
- 2009/1/12 --- revised, explicit return types
- 2009/2/10 --- revised, ``__block`` objects need retain
Overview
========
A new derived type is introduced to C and, by extension, Objective-C,
C++, and Objective-C++
The Block Type
==============
Like function types, the :block-term:`Block type` is a pair consisting
of a result value type and a list of parameter types very similar to a
function type. Blocks are intended to be used much like functions with
the key distinction being that in addition to executable code they
also contain various variable bindings to automatic (stack) or managed
(heap) memory.
The abstract declarator,
.. code-block:: c
int (^)(char, float)
describes a reference to a Block that, when invoked, takes two
parameters, the first of type char and the second of type float, and
returns a value of type int. The Block referenced is of opaque data
that may reside in automatic (stack) memory, global memory, or heap
memory.
Block Variable Declarations
===========================
A :block-term:`variable with Block type` is declared using function
pointer style notation substituting ``^`` for ``*``. The following are
valid Block variable declarations:
.. code-block:: c
void (^blockReturningVoidWithVoidArgument)(void);
int (^blockReturningIntWithIntAndCharArguments)(int, char);
void (^arrayOfTenBlocksReturningVoidWithIntArgument[10])(int);
Variadic ``...`` arguments are supported. [variadic.c] A Block that
takes no arguments must specify void in the argument list [voidarg.c].
An empty parameter list does not represent, as K&R provide, an
unspecified argument list. Note: both gcc and clang support K&R style
as a convenience.
A Block reference may be cast to a pointer of arbitrary type and vice
versa. [cast.c] A Block reference may not be dereferenced via the
pointer dereference operator ``*``, and thus a Block's size may not be
computed at compile time. [sizeof.c]
Block Literal Expressions
=========================
A :block-term:`Block literal expression` produces a reference to a
Block. It is introduced by the use of the ``^`` token as a unary
operator.
.. code-block:: c
Block_literal_expression ::= ^ block_decl compound_statement_body
block_decl ::=
block_decl ::= parameter_list
block_decl ::= type_expression
where type expression is extended to allow ``^`` as a Block reference
(pointer) where ``*`` is allowed as a function reference (pointer).
The following Block literal:
.. code-block:: c
^ void (void) { printf("hello world\n"); }
produces a reference to a Block with no arguments with no return value.
The return type is optional and is inferred from the return
statements. If the return statements return a value, they all must
return a value of the same type. If there is no value returned the
inferred type of the Block is void; otherwise it is the type of the
return statement value.
If the return type is omitted and the argument list is ``( void )``,
the ``( void )`` argument list may also be omitted.
So:
.. code-block:: c
^ ( void ) { printf("hello world\n"); }
and:
.. code-block:: c
^ { printf("hello world\n"); }
are exactly equivalent constructs for the same expression.
The type_expression extends C expression parsing to accommodate Block
reference declarations as it accommodates function pointer
declarations.
Given:
.. code-block:: c
typedef int (*pointerToFunctionThatReturnsIntWithCharArg)(char);
pointerToFunctionThatReturnsIntWithCharArg functionPointer;
^ pointerToFunctionThatReturnsIntWithCharArg (float x) { return functionPointer; }
and:
.. code-block:: c
^ int ((*)(float x))(char) { return functionPointer; }
are equivalent expressions, as is:
.. code-block:: c
^(float x) { return functionPointer; }
[returnfunctionptr.c]
The compound statement body establishes a new lexical scope within
that of its parent. Variables used within the scope of the compound
statement are bound to the Block in the normal manner with the
exception of those in automatic (stack) storage. Thus one may access
functions and global variables as one would expect, as well as static
local variables. [testme]
Local automatic (stack) variables referenced within the compound
statement of a Block are imported and captured by the Block as const
copies. The capture (binding) is performed at the time of the Block
literal expression evaluation.
The compiler is not required to capture a variable if it can prove
that no references to the variable will actually be evaluated.
Programmers can force a variable to be captured by referencing it in a
statement at the beginning of the Block, like so:
.. code-block:: c
(void) foo;
This matters when capturing the variable has side-effects, as it can
in Objective-C or C++.
The lifetime of variables declared in a Block is that of a function;
each activation frame contains a new copy of variables declared within
the local scope of the Block. Such variable declarations should be
allowed anywhere [testme] rather than only when C99 parsing is
requested, including for statements. [testme]
Block literal expressions may occur within Block literal expressions
(nest) and all variables captured by any nested blocks are implicitly
also captured in the scopes of their enclosing Blocks.
A Block literal expression may be used as the initialization value for
Block variables at global or local static scope.
The Invoke Operator
===================
Blocks are :block-term:`invoked` using function call syntax with a
list of expression parameters of types corresponding to the
declaration and returning a result type also according to the
declaration. Given:
.. code-block:: c
int (^x)(char);
void (^z)(void);
int (^(*y))(char) = &x;
the following are all legal Block invocations:
.. code-block:: c
x('a');
(*y)('a');
(true ? x : *y)('a')
The Copy and Release Operations
===============================
The compiler and runtime provide :block-term:`copy` and
:block-term:`release` operations for Block references that create and,
in matched use, release allocated storage for referenced Blocks.
The copy operation ``Block_copy()`` is styled as a function that takes
an arbitrary Block reference and returns a Block reference of the same
type. The release operation, ``Block_release()``, is styled as a
function that takes an arbitrary Block reference and, if dynamically
matched to a Block copy operation, allows recovery of the referenced
allocated memory.
The ``__block`` Storage Qualifier
=================================
In addition to the new Block type we also introduce a new storage
qualifier, :block-term:`__block`, for local variables. [testme: a
__block declaration within a block literal] The ``__block`` storage
qualifier is mutually exclusive to the existing local storage
qualifiers auto, register, and static. [testme] Variables qualified by
``__block`` act as if they were in allocated storage and this storage
is automatically recovered after last use of said variable. An
implementation may choose an optimization where the storage is
initially automatic and only "moved" to allocated (heap) storage upon
a Block_copy of a referencing Block. Such variables may be mutated as
normal variables are.
In the case where a ``__block`` variable is a Block one must assume
that the ``__block`` variable resides in allocated storage and as such
is assumed to reference a Block that is also in allocated storage
(that it is the result of a ``Block_copy`` operation). Despite this
there is no provision to do a ``Block_copy`` or a ``Block_release`` if
an implementation provides initial automatic storage for Blocks. This
is due to the inherent race condition of potentially several threads
trying to update the shared variable and the need for synchronization
around disposing of older values and copying new ones. Such
synchronization is beyond the scope of this language specification.
Control Flow
============
The compound statement of a Block is treated much like a function body
with respect to control flow in that goto, break, and continue do not
escape the Block. Exceptions are treated *normally* in that when
thrown they pop stack frames until a catch clause is found.
Objective-C Extensions
======================
Objective-C extends the definition of a Block reference type to be
that also of id. A variable or expression of Block type may be
messaged or used as a parameter wherever an id may be. The converse is
also true. Block references may thus appear as properties and are
subject to the assign, retain, and copy attribute logic that is
reserved for objects.
All Blocks are constructed to be Objective-C objects regardless of
whether the Objective-C runtime is operational in the program or
not. Blocks using automatic (stack) memory are objects and may be
messaged, although they may not be assigned into ``__weak`` locations
if garbage collection is enabled.
Within a Block literal expression within a method definition
references to instance variables are also imported into the lexical
scope of the compound statement. These variables are implicitly
qualified as references from self, and so self is imported as a const
copy. The net effect is that instance variables can be mutated.
The :block-term:`Block_copy` operator retains all objects held in
variables of automatic storage referenced within the Block expression
(or form strong references if running under garbage collection).
Object variables of ``__block`` storage type are assumed to hold
normal pointers with no provision for retain and release messages.
Foundation defines (and supplies) ``-copy`` and ``-release`` methods for
Blocks.
In the Objective-C and Objective-C++ languages, we allow the
``__weak`` specifier for ``__block`` variables of object type. If
garbage collection is not enabled, this qualifier causes these
variables to be kept without retain messages being sent. This
knowingly leads to dangling pointers if the Block (or a copy) outlives
the lifetime of this object.
In garbage collected environments, the ``__weak`` variable is set to
nil when the object it references is collected, as long as the
``__block`` variable resides in the heap (either by default or via
``Block_copy()``). The initial Apple implementation does in fact
start ``__block`` variables on the stack and migrate them to the heap
only as a result of a ``Block_copy()`` operation.
It is a runtime error to attempt to assign a reference to a
stack-based Block into any storage marked ``__weak``, including
``__weak`` ``__block`` variables.
C++ Extensions
==============
Block literal expressions within functions are extended to allow const
use of C++ objects, pointers, or references held in automatic storage.
As usual, within the block, references to captured variables become
const-qualified, as if they were references to members of a const
object. Note that this does not change the type of a variable of
reference type.
For example, given a class Foo:
.. code-block:: c
Foo foo;
Foo &fooRef = foo;
Foo *fooPtr = &foo;
A Block that referenced these variables would import the variables as
const variations:
.. code-block:: c
const Foo block_foo = foo;
Foo &block_fooRef = fooRef;
Foo *const block_fooPtr = fooPtr;
Captured variables are copied into the Block at the instant of
evaluating the Block literal expression. They are also copied when
calling ``Block_copy()`` on a Block allocated on the stack. In both
cases, they are copied as if the variable were const-qualified, and
it's an error if there's no such constructor.
Captured variables in Blocks on the stack are destroyed when control
leaves the compound statement that contains the Block literal
expression. Captured variables in Blocks on the heap are destroyed
when the reference count of the Block drops to zero.
Variables declared as residing in ``__block`` storage may be initially
allocated in the heap or may first appear on the stack and be copied
to the heap as a result of a ``Block_copy()`` operation. When copied
from the stack, ``__block`` variables are copied using their normal
qualification (i.e. without adding const). In C++11, ``__block``
variables are copied as x-values if that is possible, then as l-values
if not; if both fail, it's an error. The destructor for any initial
stack-based version is called at the variable's normal end of scope.
References to ``this``, as well as references to non-static members of
any enclosing class, are evaluated by capturing ``this`` just like a
normal variable of C pointer type.
Member variables that are Blocks may not be overloaded by the types of
their arguments.

View File

@ -0,0 +1,51 @@
if (DOXYGEN_FOUND)
if (LLVM_ENABLE_DOXYGEN)
set(abs_srcdir ${CMAKE_CURRENT_SOURCE_DIR})
set(abs_builddir ${CMAKE_CURRENT_BINARY_DIR})
if (HAVE_DOT)
set(DOT ${LLVM_PATH_DOT})
endif()
if (LLVM_DOXYGEN_EXTERNAL_SEARCH)
set(enable_searchengine "YES")
set(searchengine_url "${LLVM_DOXYGEN_SEARCHENGINE_URL}")
set(enable_server_based_search "YES")
set(enable_external_search "YES")
set(extra_search_mappings "${LLVM_DOXYGEN_SEARCH_MAPPINGS}")
else()
set(enable_searchengine "NO")
set(searchengine_url "")
set(enable_server_based_search "NO")
set(enable_external_search "NO")
set(extra_search_mappings "")
endif()
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/doxygen.cfg.in
${CMAKE_CURRENT_BINARY_DIR}/doxygen.cfg @ONLY)
set(abs_top_srcdir)
set(abs_top_builddir)
set(DOT)
set(enable_searchengine)
set(searchengine_url)
set(enable_server_based_search)
set(enable_external_search)
set(extra_search_mappings)
add_custom_target(doxygen-clang
COMMAND ${DOXYGEN_EXECUTABLE} ${CMAKE_CURRENT_BINARY_DIR}/doxygen.cfg
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
COMMENT "Generating clang doxygen documentation." VERBATIM)
if (LLVM_BUILD_DOCS)
add_dependencies(doxygen doxygen-clang)
endif()
if (NOT LLVM_INSTALL_TOOLCHAIN_ONLY)
install(DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/doxygen/html
DESTINATION docs/html)
endif()
endif()
endif()

View File

@ -0,0 +1,36 @@
==========
ClangCheck
==========
`ClangCheck` is a small wrapper around :doc:`LibTooling` which can be used to
do basic error checking and AST dumping.
.. code-block:: console
$ cat <<EOF > snippet.cc
> void f() {
> int a = 0
> }
> EOF
$ ~/clang/build/bin/clang-check snippet.cc -ast-dump --
Processing: /Users/danieljasper/clang/llvm/tools/clang/docs/snippet.cc.
/Users/danieljasper/clang/llvm/tools/clang/docs/snippet.cc:2:12: error: expected ';' at end of
declaration
int a = 0
^
;
(TranslationUnitDecl 0x7ff3a3029ed0 <<invalid sloc>>
(TypedefDecl 0x7ff3a302a410 <<invalid sloc>> __int128_t '__int128')
(TypedefDecl 0x7ff3a302a470 <<invalid sloc>> __uint128_t 'unsigned __int128')
(TypedefDecl 0x7ff3a302a830 <<invalid sloc>> __builtin_va_list '__va_list_tag [1]')
(FunctionDecl 0x7ff3a302a8d0 </Users/danieljasper/clang/llvm/tools/clang/docs/snippet.cc:1:1, line:3:1> f 'void (void)'
(CompoundStmt 0x7ff3a302aa10 <line:1:10, line:3:1>
(DeclStmt 0x7ff3a302a9f8 <line:2:3, line:3:1>
(VarDecl 0x7ff3a302a980 <line:2:3, col:11> a 'int'
(IntegerLiteral 0x7ff3a302a9d8 <col:11> 'int' 0))))))
1 error generated.
Error while processing snippet.cc.
The '--' at the end is important as it prevents `clang-check` from search for a
compilation database. For more information on how to setup and use `clang-check`
in a project, see :doc:`HowToSetupToolingForLLVM`.

View File

@ -0,0 +1,178 @@
===========
ClangFormat
===========
`ClangFormat` describes a set of tools that are built on top of
:doc:`LibFormat`. It can support your workflow in a variety of ways including a
standalone tool and editor integrations.
Standalone Tool
===============
:program:`clang-format` is located in `clang/tools/clang-format` and can be used
to format C/C++/Obj-C code.
.. code-block:: console
$ clang-format -help
OVERVIEW: A tool to format C/C++/Obj-C code.
If no arguments are specified, it formats the code from standard input
and writes the result to the standard output.
If <file>s are given, it reformats the files. If -i is specified
together with <file>s, the files are edited in-place. Otherwise, the
result is written to the standard output.
USAGE: clang-format [options] [<file> ...]
OPTIONS:
Clang-format options:
-cursor=<uint> - The position of the cursor when invoking
clang-format from an editor integration
-dump-config - Dump configuration options to stdout and exit.
Can be used with -style option.
-i - Inplace edit <file>s, if specified.
-length=<uint> - Format a range of this length (in bytes).
Multiple ranges can be formatted by specifying
several -offset and -length pairs.
When only a single -offset is specified without
-length, clang-format will format up to the end
of the file.
Can only be used with one input file.
-lines=<string> - <start line>:<end line> - format a range of
lines (both 1-based).
Multiple ranges can be formatted by specifying
several -lines arguments.
Can't be used with -offset and -length.
Can only be used with one input file.
-offset=<uint> - Format a range starting at this byte offset.
Multiple ranges can be formatted by specifying
several -offset and -length pairs.
Can only be used with one input file.
-output-replacements-xml - Output replacements as XML.
-style=<string> - Coding style, currently supports:
LLVM, Google, Chromium, Mozilla, WebKit.
Use -style=file to load style configuration from
.clang-format file located in one of the parent
directories of the source file (or current
directory for stdin).
Use -style="{key: value, ...}" to set specific
parameters, e.g.:
-style="{BasedOnStyle: llvm, IndentWidth: 8}"
General options:
-help - Display available options (-help-hidden for more)
-help-list - Display list of available options (-help-list-hidden for more)
-version - Display the version of this program
When the desired code formatting style is different from the available options,
the style can be customized using the ``-style="{key: value, ...}"`` option or
by putting your style configuration in the ``.clang-format`` or ``_clang-format``
file in your project's directory and using ``clang-format -style=file``.
An easy way to create the ``.clang-format`` file is:
.. code-block:: console
clang-format -style=llvm -dump-config > .clang-format
Available style options are described in :doc:`ClangFormatStyleOptions`.
Vim Integration
===============
There is an integration for :program:`vim` which lets you run the
:program:`clang-format` standalone tool on your current buffer, optionally
selecting regions to reformat. The integration has the form of a `python`-file
which can be found under `clang/tools/clang-format/clang-format.py`.
This can be integrated by adding the following to your `.vimrc`:
.. code-block:: vim
map <C-K> :pyf <path-to-this-file>/clang-format.py<CR>
imap <C-K> <ESC>:pyf <path-to-this-file>/clang-format.py<CR>i
The first line enables :program:`clang-format` for NORMAL and VISUAL mode, the
second line adds support for INSERT mode. Change "C-K" to another binding if
you need :program:`clang-format` on a different key (C-K stands for Ctrl+k).
With this integration you can press the bound key and clang-format will
format the current line in NORMAL and INSERT mode or the selected region in
VISUAL mode. The line or region is extended to the next bigger syntactic
entity.
It operates on the current, potentially unsaved buffer and does not create
or save any files. To revert a formatting, just undo.
Emacs Integration
=================
Similar to the integration for :program:`vim`, there is an integration for
:program:`emacs`. It can be found at `clang/tools/clang-format/clang-format.el`
and used by adding this to your `.emacs`:
.. code-block:: common-lisp
(load "<path-to-clang>/tools/clang-format/clang-format.el")
(global-set-key [C-M-tab] 'clang-format-region)
This binds the function `clang-format-region` to C-M-tab, which then formats the
current line or selected region.
BBEdit Integration
==================
:program:`clang-format` cannot be used as a text filter with BBEdit, but works
well via a script. The AppleScript to do this integration can be found at
`clang/tools/clang-format/clang-format-bbedit.applescript`; place a copy in
`~/Library/Application Support/BBEdit/Scripts`, and edit the path within it to
point to your local copy of :program:`clang-format`.
With this integration you can select the script from the Script menu and
:program:`clang-format` will format the selection. Note that you can rename the
menu item by renaming the script, and can assign the menu item a keyboard
shortcut in the BBEdit preferences, under Menus & Shortcuts.
Visual Studio Integration
=========================
Download the latest Visual Studio plugin from the `alpha build site
<http://llvm.org/builds/>`_. The default key-binding is Ctrl-R,Ctrl-F.
Script for patch reformatting
=============================
The python script `clang/tools/clang-format-diff.py` parses the output of
a unified diff and reformats all contained lines with :program:`clang-format`.
.. code-block:: console
usage: clang-format-diff.py [-h] [-p P] [-style STYLE]
Reformat changed lines in diff.
optional arguments:
-h, --help show this help message and exit
-p P strip the smallest prefix containing P slashes
-style STYLE formatting style to apply (LLVM, Google, Chromium, Mozilla,
WebKit)
So to reformat all the lines in the latest :program:`git` commit, just do:
.. code-block:: console
git diff -U0 HEAD^ | clang-format-diff.py -p1
The :option:`-U0` will create a diff without context lines (the script would format
those as well).

View File

@ -0,0 +1,391 @@
==========================
Clang-Format Style Options
==========================
:doc:`ClangFormatStyleOptions` describes configurable formatting style options
supported by :doc:`LibFormat` and :doc:`ClangFormat`.
When using :program:`clang-format` command line utility or
``clang::format::reformat(...)`` functions from code, one can either use one of
the predefined styles (LLVM, Google, Chromium, Mozilla, WebKit) or create a
custom style by configuring specific style options.
Configuring Style with clang-format
===================================
:program:`clang-format` supports two ways to provide custom style options:
directly specify style configuration in the ``-style=`` command line option or
use ``-style=file`` and put style configuration in the ``.clang-format`` or
``_clang-format`` file in the project directory.
When using ``-style=file``, :program:`clang-format` for each input file will
try to find the ``.clang-format`` file located in the closest parent directory
of the input file. When the standard input is used, the search is started from
the current directory.
The ``.clang-format`` file uses YAML format:
.. code-block:: yaml
key1: value1
key2: value2
# A comment.
...
An easy way to get a valid ``.clang-format`` file containing all configuration
options of a certain predefined style is:
.. code-block:: console
clang-format -style=llvm -dump-config > .clang-format
When specifying configuration in the ``-style=`` option, the same configuration
is applied for all input files. The format of the configuration is:
.. code-block:: console
-style='{key1: value1, key2: value2, ...}'
Configuring Style in Code
=========================
When using ``clang::format::reformat(...)`` functions, the format is specified
by supplying the `clang::format::FormatStyle
<http://clang.llvm.org/doxygen/structclang_1_1format_1_1FormatStyle.html>`_
structure.
Configurable Format Style Options
=================================
This section lists the supported style options. Value type is specified for
each option. For enumeration types possible values are specified both as a C++
enumeration member (with a prefix, e.g. ``LS_Auto``), and as a value usable in
the configuration (without a prefix: ``Auto``).
**BasedOnStyle** (``string``)
The style used for all options not specifically set in the configuration.
This option is supported only in the :program:`clang-format` configuration
(both within ``-style='{...}'`` and the ``.clang-format`` file).
Possible values:
* ``LLVM``
A style complying with the `LLVM coding standards
<http://llvm.org/docs/CodingStandards.html>`_
* ``Google``
A style complying with `Google's C++ style guide
<http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml>`_
* ``Chromium``
A style complying with `Chromium's style guide
<http://www.chromium.org/developers/coding-style>`_
* ``Mozilla``
A style complying with `Mozilla's style guide
<https://developer.mozilla.org/en-US/docs/Developer_Guide/Coding_Style>`_
* ``WebKit``
A style complying with `WebKit's style guide
<http://www.webkit.org/coding/coding-style.html>`_
.. START_FORMAT_STYLE_OPTIONS
**AccessModifierOffset** (``int``)
The extra indent or outdent of access modifiers, e.g. ``public:``.
**AlignEscapedNewlinesLeft** (``bool``)
If ``true``, aligns escaped newlines as far left as possible.
Otherwise puts them into the right-most column.
**AlignTrailingComments** (``bool``)
If ``true``, aligns trailing comments.
**AllowAllParametersOfDeclarationOnNextLine** (``bool``)
Allow putting all parameters of a function declaration onto
the next line even if ``BinPackParameters`` is ``false``.
**AllowShortIfStatementsOnASingleLine** (``bool``)
If ``true``, ``if (a) return;`` can be put on a single
line.
**AllowShortLoopsOnASingleLine** (``bool``)
If ``true``, ``while (true) continue;`` can be put on a
single line.
**AlwaysBreakBeforeMultilineStrings** (``bool``)
If ``true``, always break before multiline string literals.
**AlwaysBreakTemplateDeclarations** (``bool``)
If ``true``, always break after the ``template<...>`` of a
template declaration.
**BinPackParameters** (``bool``)
If ``false``, a function call's or function definition's parameters
will either all be on the same line or will have one line each.
**BreakBeforeBinaryOperators** (``bool``)
If ``true``, binary operators will be placed after line breaks.
**BreakBeforeBraces** (``BraceBreakingStyle``)
The brace breaking style to use.
Possible values:
* ``BS_Attach`` (in configuration: ``Attach``)
Always attach braces to surrounding context.
* ``BS_Linux`` (in configuration: ``Linux``)
Like ``Attach``, but break before braces on function, namespace and
class definitions.
* ``BS_Stroustrup`` (in configuration: ``Stroustrup``)
Like ``Attach``, but break before function definitions.
* ``BS_Allman`` (in configuration: ``Allman``)
Always break before braces.
**BreakConstructorInitializersBeforeComma** (``bool``)
Always break constructor initializers before commas and align
the commas with the colon.
**ColumnLimit** (``unsigned``)
The column limit.
A column limit of ``0`` means that there is no column limit. In this case,
clang-format will respect the input's line breaking decisions within
statements.
**ConstructorInitializerAllOnOneLineOrOnePerLine** (``bool``)
If the constructor initializers don't fit on a line, put each
initializer on its own line.
**ConstructorInitializerIndentWidth** (``unsigned``)
The number of characters to use for indentation of constructor
initializer lists.
**Cpp11BracedListStyle** (``bool``)
If ``true``, format braced lists as best suited for C++11 braced
lists.
Important differences:
- No spaces inside the braced list.
- No line break before the closing brace.
- Indentation with the continuation indent, not with the block indent.
Fundamentally, C++11 braced lists are formatted exactly like function
calls would be formatted in their place. If the braced list follows a name
(e.g. a type or variable name), clang-format formats as if the ``{}`` were
the parentheses of a function call with that name. If there is no name,
a zero-length name is assumed.
**DerivePointerBinding** (``bool``)
If ``true``, analyze the formatted file for the most common binding.
**ExperimentalAutoDetectBinPacking** (``bool``)
If ``true``, clang-format detects whether function calls and
definitions are formatted with one parameter per line.
Each call can be bin-packed, one-per-line or inconclusive. If it is
inconclusive, e.g. completely on one line, but a decision needs to be
made, clang-format analyzes whether there are other bin-packed cases in
the input file and act accordingly.
NOTE: This is an experimental flag, that might go away or be renamed. Do
not use this in config files, etc. Use at your own risk.
**IndentCaseLabels** (``bool``)
Indent case labels one level from the switch statement.
When ``false``, use the same indentation level as for the switch statement.
Switch statement body is always indented one level more than case labels.
**IndentFunctionDeclarationAfterType** (``bool``)
If ``true``, indent when breaking function declarations which
are not also definitions after the type.
**IndentWidth** (``unsigned``)
The number of columns to use for indentation.
**MaxEmptyLinesToKeep** (``unsigned``)
The maximum number of consecutive empty lines to keep.
**NamespaceIndentation** (``NamespaceIndentationKind``)
The indentation used for namespaces.
Possible values:
* ``NI_None`` (in configuration: ``None``)
Don't indent in namespaces.
* ``NI_Inner`` (in configuration: ``Inner``)
Indent only in inner namespaces (nested in other namespaces).
* ``NI_All`` (in configuration: ``All``)
Indent in all namespaces.
**ObjCSpaceBeforeProtocolList** (``bool``)
Add a space in front of an Objective-C protocol list, i.e. use
``Foo <Protocol>`` instead of ``Foo<Protocol>``.
**PenaltyBreakComment** (``unsigned``)
The penalty for each line break introduced inside a comment.
**PenaltyBreakFirstLessLess** (``unsigned``)
The penalty for breaking before the first ``<<``.
**PenaltyBreakString** (``unsigned``)
The penalty for each line break introduced inside a string literal.
**PenaltyExcessCharacter** (``unsigned``)
The penalty for each character outside of the column limit.
**PenaltyReturnTypeOnItsOwnLine** (``unsigned``)
Penalty for putting the return type of a function onto its own
line.
**PointerBindsToType** (``bool``)
Set whether & and * bind to the type as opposed to the variable.
**SpaceAfterControlStatementKeyword** (``bool``)
If ``true``, spaces will be inserted between 'for'/'if'/'while'/...
and '('.
**SpaceBeforeAssignmentOperators** (``bool``)
If ``false``, spaces will be removed before assignment operators.
**SpaceInEmptyParentheses** (``bool``)
If ``false``, spaces may be inserted into '()'.
**SpacesBeforeTrailingComments** (``unsigned``)
The number of spaces to before trailing line comments.
**SpacesInCStyleCastParentheses** (``bool``)
If ``false``, spaces may be inserted into C style casts.
**SpacesInParentheses** (``bool``)
If ``true``, spaces will be inserted after every '(' and before
every ')'.
**Standard** (``LanguageStandard``)
Format compatible with this standard, e.g. use
``A<A<int> >`` instead of ``A<A<int>>`` for LS_Cpp03.
Possible values:
* ``LS_Cpp03`` (in configuration: ``Cpp03``)
Use C++03-compatible syntax.
* ``LS_Cpp11`` (in configuration: ``Cpp11``)
Use features of C++11 (e.g. ``A<A<int>>`` instead of
``A<A<int> >``).
* ``LS_Auto`` (in configuration: ``Auto``)
Automatic detection based on the input.
**TabWidth** (``unsigned``)
The number of columns used for tab stops.
**UseTab** (``UseTabStyle``)
The way to use tab characters in the resulting file.
Possible values:
* ``UT_Never`` (in configuration: ``Never``)
Never use tab.
* ``UT_ForIndentation`` (in configuration: ``ForIndentation``)
Use tabs only for indentation.
* ``UT_Always`` (in configuration: ``Always``)
Use tabs whenever we need to fill whitespace that spans at least from
one tab stop to the next one.
.. END_FORMAT_STYLE_OPTIONS
Examples
========
A style similar to the `Linux Kernel style
<https://www.kernel.org/doc/Documentation/CodingStyle>`_:
.. code-block:: yaml
BasedOnStyle: LLVM
IndentWidth: 8
UseTab: Always
BreakBeforeBraces: Linux
AllowShortIfStatementsOnASingleLine: false
IndentCaseLabels: false
The result is (imagine that tabs are used for indentation here):
.. code-block:: c++
void test()
{
switch (x) {
case 0:
case 1:
do_something();
break;
case 2:
do_something_else();
break;
default:
break;
}
if (condition)
do_something_completely_different();
if (x == y) {
q();
} else if (x > y) {
w();
} else {
r();
}
}
A style similar to the default Visual Studio formatting style:
.. code-block:: yaml
UseTab: Never
IndentWidth: 4
BreakBeforeBraces: Allman
AllowShortIfStatementsOnASingleLine: false
IndentCaseLabels: false
ColumnLimit: 0
The result is:
.. code-block:: c++
void test()
{
switch (suffix)
{
case 0:
case 1:
do_something();
break;
case 2:
do_something_else();
break;
default:
break;
}
if (condition)
do_somthing_completely_different();
if (x == y)
{
q();
}
else if (x > y)
{
w();
}
else
{
r();
}
}

View File

@ -0,0 +1,150 @@
=============
Clang Plugins
=============
Clang Plugins make it possible to run extra user defined actions during a
compilation. This document will provide a basic walkthrough of how to write and
run a Clang Plugin.
Introduction
============
Clang Plugins run FrontendActions over code. See the :doc:`FrontendAction
tutorial <RAVFrontendAction>` on how to write a ``FrontendAction`` using the
``RecursiveASTVisitor``. In this tutorial, we'll demonstrate how to write a
simple clang plugin.
Writing a ``PluginASTAction``
=============================
The main difference from writing normal ``FrontendActions`` is that you can
handle plugin command line options. The ``PluginASTAction`` base class declares
a ``ParseArgs`` method which you have to implement in your plugin.
.. code-block:: c++
bool ParseArgs(const CompilerInstance &CI,
const std::vector<std::string>& args) {
for (unsigned i = 0, e = args.size(); i != e; ++i) {
if (args[i] == "-some-arg") {
// Handle the command line argument.
}
}
return true;
}
Registering a plugin
====================
A plugin is loaded from a dynamic library at runtime by the compiler. To
register a plugin in a library, use ``FrontendPluginRegistry::Add<>``:
.. code-block:: c++
static FrontendPluginRegistry::Add<MyPlugin> X("my-plugin-name", "my plugin description");
Putting it all together
=======================
Let's look at an example plugin that prints top-level function names. This
example is also checked into the clang repository; please also take a look at
the latest `checked in version of PrintFunctionNames.cpp
<http://llvm.org/viewvc/llvm-project/cfe/trunk/examples/PrintFunctionNames/PrintFunctionNames.cpp?view=markup>`_.
.. code-block:: c++
#include "clang/Frontend/FrontendPluginRegistry.h"
#include "clang/AST/ASTConsumer.h"
#include "clang/AST/AST.h"
#include "clang/Frontend/CompilerInstance.h"
#include "llvm/Support/raw_ostream.h"
using namespace clang;
namespace {
class PrintFunctionsConsumer : public ASTConsumer {
public:
virtual bool HandleTopLevelDecl(DeclGroupRef DG) {
for (DeclGroupRef::iterator i = DG.begin(), e = DG.end(); i != e; ++i) {
const Decl *D = *i;
if (const NamedDecl *ND = dyn_cast<NamedDecl>(D))
llvm::errs() << "top-level-decl: \"" << ND->getNameAsString() << "\"\n";
}
return true;
}
};
class PrintFunctionNamesAction : public PluginASTAction {
protected:
ASTConsumer *CreateASTConsumer(CompilerInstance &CI, llvm::StringRef) {
return new PrintFunctionsConsumer();
}
bool ParseArgs(const CompilerInstance &CI,
const std::vector<std::string>& args) {
for (unsigned i = 0, e = args.size(); i != e; ++i) {
llvm::errs() << "PrintFunctionNames arg = " << args[i] << "\n";
// Example error handling.
if (args[i] == "-an-error") {
DiagnosticsEngine &D = CI.getDiagnostics();
unsigned DiagID = D.getCustomDiagID(
DiagnosticsEngine::Error, "invalid argument '" + args[i] + "'");
D.Report(DiagID);
return false;
}
}
if (args.size() && args[0] == "help")
PrintHelp(llvm::errs());
return true;
}
void PrintHelp(llvm::raw_ostream& ros) {
ros << "Help for PrintFunctionNames plugin goes here\n";
}
};
}
static FrontendPluginRegistry::Add<PrintFunctionNamesAction>
X("print-fns", "print function names");
Running the plugin
==================
To run a plugin, the dynamic library containing the plugin registry must be
loaded via the :option:`-load` command line option. This will load all plugins
that are registered, and you can select the plugins to run by specifying the
:option:`-plugin` option. Additional parameters for the plugins can be passed with
:option:`-plugin-arg-<plugin-name>`.
Note that those options must reach clang's cc1 process. There are two
ways to do so:
* Directly call the parsing process by using the :option:`-cc1` option; this
has the downside of not configuring the default header search paths, so
you'll need to specify the full system path configuration on the command
line.
* Use clang as usual, but prefix all arguments to the cc1 process with
:option:`-Xclang`.
For example, to run the ``print-function-names`` plugin over a source file in
clang, first build the plugin, and then call clang with the plugin from the
source tree:
.. code-block:: console
$ export BD=/path/to/build/directory
$ (cd $BD && make PrintFunctionNames )
$ clang++ -D_GNU_SOURCE -D_DEBUG -D__STDC_CONSTANT_MACROS \
-D__STDC_FORMAT_MACROS -D__STDC_LIMIT_MACROS -D_GNU_SOURCE \
-I$BD/tools/clang/include -Itools/clang/include -I$BD/include -Iinclude \
tools/clang/tools/clang-check/ClangCheck.cpp -fsyntax-only \
-Xclang -load -Xclang $BD/lib/PrintFunctionNames.so -Xclang \
-plugin -Xclang print-fns
Also see the print-function-name plugin example's
`README <http://llvm.org/viewvc/llvm-project/cfe/trunk/examples/PrintFunctionNames/README.txt?view=markup>`_

View File

@ -0,0 +1,191 @@
========
Overview
========
Clang Tools are standalone command line (and potentially GUI) tools
designed for use by C++ developers who are already using and enjoying
Clang as their compiler. These tools provide developer-oriented
functionality such as fast syntax checking, automatic formatting,
refactoring, etc.
Only a couple of the most basic and fundamental tools are kept in the
primary Clang Subversion project. The rest of the tools are kept in a
side-project so that developers who don't want or need to build them
don't. If you want to get access to the extra Clang Tools repository,
simply check it out into the tools tree of your Clang checkout and
follow the usual process for building and working with a combined
LLVM/Clang checkout:
- With Subversion:
- ``cd llvm/tools/clang/tools``
- ``svn co http://llvm.org/svn/llvm-project/clang-tools-extra/trunk extra``
- Or with Git:
- ``cd llvm/tools/clang/tools``
- ``git clone http://llvm.org/git/clang-tools-extra.git extra``
This document describes a high-level overview of the organization of
Clang Tools within the project as well as giving an introduction to some
of the more important tools. However, it should be noted that this
document is currently focused on Clang and Clang Tool developers, not on
end users of these tools.
Clang Tools Organization
========================
Clang Tools are CLI or GUI programs that are intended to be directly
used by C++ developers. That is they are *not* primarily for use by
Clang developers, although they are hopefully useful to C++ developers
who happen to work on Clang, and we try to actively dogfood their
functionality. They are developed in three components: the underlying
infrastructure for building a standalone tool based on Clang, core
shared logic used by many different tools in the form of refactoring and
rewriting libraries, and the tools themselves.
The underlying infrastructure for Clang Tools is the
:doc:`LibTooling <LibTooling>` platform. See its documentation for much
more detailed information about how this infrastructure works. The
common refactoring and rewriting toolkit-style library is also part of
LibTooling organizationally.
A few Clang Tools are developed along side the core Clang libraries as
examples and test cases of fundamental functionality. However, most of
the tools are developed in a side repository to provide easy separation
from the core libraries. We intentionally do not support public
libraries in the side repository, as we want to carefully review and
find good APIs for libraries as they are lifted out of a few tools and
into the core Clang library set.
Regardless of which repository Clang Tools' code resides in, the
development process and practices for all Clang Tools are exactly those
of Clang itself. They are entirely within the Clang *project*,
regardless of the version control scheme.
Core Clang Tools
================
The core set of Clang tools that are within the main repository are
tools that very specifically complement, and allow use and testing of
*Clang* specific functionality.
``clang-check``
---------------
:doc:`ClangCheck` combines the LibTooling framework for running a
Clang tool with the basic Clang diagnostics by syntax checking specific files
in a fast, command line interface. It can also accept flags to re-display the
diagnostics in different formats with different flags, suitable for use driving
an IDE or editor. Furthermore, it can be used in fixit-mode to directly apply
fixit-hints offered by clang. See :doc:`HowToSetupToolingForLLVM` for
instructions on how to setup and used `clang-check`.
``clang-format``
~~~~~~~~~~~~~~~~
Clang-format is both a :doc:`library <LibFormat>` and a :doc:`stand-alone tool
<ClangFormat>` with the goal of automatically reformatting C++ sources files
according to configurable style guides. To do so, clang-format uses Clang's
``Lexer`` to transform an input file into a token stream and then changes all
the whitespace around those tokens. The goal is for clang-format to serve both
as a user tool (ideally with powerful IDE integrations) and as part of other
refactoring tools, e.g. to do a reformatting of all the lines changed during a
renaming.
``cpp11-migrate``
~~~~~~~~~~~~~~~~~
``cpp11-migrate`` migrates C++ code to use C++11 features where appropriate.
Currently it can:
* convert loops to range-based for loops;
* convert null pointer constants (like ``NULL`` or ``0``) to C++11 ``nullptr``;
* replace the type specifier in variable declarations with the ``auto`` type specifier;
* add the ``override`` specifier to applicable member functions.
Extra Clang Tools
=================
As various categories of Clang Tools are added to the extra repository,
they'll be tracked here. The focus of this documentation is on the scope
and features of the tools for other tool developers; each tool should
provide its own user-focused documentation.
Ideas for new Tools
===================
* C++ cast conversion tool. Will convert C-style casts (``(type) value``) to
appropriate C++ cast (``static_cast``, ``const_cast`` or
``reinterpret_cast``).
* Non-member ``begin()`` and ``end()`` conversion tool. Will convert
``foo.begin()`` into ``begin(foo)`` and similarly for ``end()``, where
``foo`` is a standard container. We could also detect similar patterns for
arrays.
* ``make_shared`` / ``make_unique`` conversion. Part of this transformation
can be incorporated into the ``auto`` transformation. Will convert
.. code-block:: c++
std::shared_ptr<Foo> sp(new Foo);
std::unique_ptr<Foo> up(new Foo);
func(std::shared_ptr<Foo>(new Foo), bar());
into:
.. code-block:: c++
auto sp = std::make_shared<Foo>();
auto up = std::make_unique<Foo>(); // In C++14 mode.
// This also affects correctness. For the cases where bar() throws,
// make_shared() is safe and the original code may leak.
func(std::make_shared<Foo>(), bar());
* ``tr1`` removal tool. Will migrate source code from using TR1 library
features to C++11 library. For example:
.. code-block:: c++
#include <tr1/unordered_map>
int main()
{
std::tr1::unordered_map <int, int> ma;
std::cout << ma.size () << std::endl;
return 0;
}
should be rewritten to:
.. code-block:: c++
#include <unordered_map>
int main()
{
std::unordered_map <int, int> ma;
std::cout << ma.size () << std::endl;
return 0;
}
* A tool to remove ``auto``. Will convert ``auto`` to an explicit type or add
comments with deduced types. The motivation is that there are developers
that don't want to use ``auto`` because they are afraid that they might lose
control over their code.
* C++14: less verbose operator function objects (`N3421
<http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2012/n3421.htm>`_).
For example:
.. code-block:: c++
sort(v.begin(), v.end(), greater<ValueType>());
should be rewritten to:
.. code-block:: c++
sort(v.begin(), v.end(), greater<>());

View File

@ -0,0 +1,204 @@
===================================================================
Cross-compilation using Clang
===================================================================
Introduction
============
This document will guide you in choosing the right Clang options
for cross-compiling your code to a different architecture. It assumes you
already know how to compile the code in question for the host architecture,
and that you know how to choose additional include and library paths.
However, this document is *not* a "how to" and won't help you setting your
build system or Makefiles, nor choosing the right CMake options, etc.
Also, it does not cover all the possible options, nor does it contain
specific examples for specific architectures. For a concrete example, the
`instructions for cross-compiling LLVM itself
<http://llvm.org/docs/HowToCrossCompileLLVM.html>`_ may be of interest.
After reading this document, you should be familiar with the main issues
related to cross-compilation, and what main compiler options Clang provides
for performing cross-compilation.
Cross compilation issues
========================
In GCC world, every host/target combination has its own set of binaries,
headers, libraries, etc. So, it's usually simple to download a package
with all files in, unzip to a directory and point the build system to
that compiler, that will know about its location and find all it needs to
when compiling your code.
On the other hand, Clang/LLVM is natively a cross-compiler, meaning that
one set of programs can compile to all targets by setting the ``-target``
option. That makes it a lot easier for programers wishing to compile to
different platforms and architectures, and for compiler developers that
only have to maintain one build system, and for OS distributions, that
need only one set of main packages.
But, as is true to any cross-compiler, and given the complexity of
different architectures, OS's and options, it's not always easy finding
the headers, libraries or binutils to generate target specific code.
So you'll need special options to help Clang understand what target
you're compiling to, where your tools are, etc.
Another problem is that compilers come with standard libraries only (like
``compiler-rt``, ``libcxx``, ``libgcc``, ``libm``, etc), so you'll have to
find and make available to the build system, every other library required
to build your software, that is specific to your target. It's not enough to
have your host's libraries installed.
Finally, not all toolchains are the same, and consequently, not every Clang
option will work magically. Some options, like ``--sysroot`` (which
effectively changes the logical root for headers and libraries), assume
all your binaries and libraries are in the same directory, which may not
true when your cross-compiler was installed by the distribution's package
management. So, for each specific case, you may use more than one
option, and in most cases, you'll end up setting include paths (``-I``) and
library paths (``-L``) manually.
To sum up, different toolchains can:
* be host/target specific or more flexible
* be in a single directory, or spread out across your system
* have different sets of libraries and headers by default
* need special options, which your build system won't be able to figure
out by itself
General Cross-Compilation Options in Clang
==========================================
Target Triple
-------------
The basic option is to define the target architecture. For that, use
``-target <triple>``. If you don't specify the target, CPU names won't
match (since Clang assumes the host triple), and the compilation will
go ahead, creating code for the host platform, which will break later
on when assembling or linking.
The triple has the general format ``<arch><sub>-<vendor>-<sys>-<abi>``, where:
* ``arch`` = ``x86``, ``arm``, ``thumb``, ``mips``, etc.
* ``sub`` = for ex. on ARM: ``v5``, ``v6m``, ``v7a``, ``v7m``, etc.
* ``vendor`` = ``pc``, ``apple``, ``nvidia``, ``ibm``, etc.
* ``sys`` = ``none``, ``linux``, ``win32``, ``darwin``, ``cuda``, etc.
* ``abi`` = ``eabi``, ``gnu``, ``android``, ``macho``, ``elf``, etc.
The sub-architecture options are available for their own architectures,
of course, so "x86v7a" doesn't make sense. The vendor needs to be
specified only if there's a relevant change, for instance between PC
and Apple. Most of the time it can be omitted (and Unknown)
will be assumed, which sets the defaults for the specified architecture.
The system name is generally the OS (linux, darwin), but could be special
like the bare-metal "none".
When a parameter is not important, they can be omitted, or you can
choose ``unknown`` and the defaults will be used. If you choose a parameter
that Clang doesn't know, like ``blerg``, it'll ignore and assume
``unknown``, which is not always desired, so be careful.
Finally, the ABI option is something that will pick default CPU/FPU,
define the specific behaviour of your code (PCS, extensions),
and also choose the correct library calls, etc.
CPU, FPU, ABI
-------------
Once your target is specified, it's time to pick the hardware you'll
be compiling to. For every architecture, a default set of CPU/FPU/ABI
will be chosen, so you'll almost always have to change it via flags.
Typical flags include:
* ``-mcpu=<cpu-name>``, like x86-64, swift, cortex-a15
* ``-fpu=<fpu-name>``, like SSE3, NEON, controlling the FP unit available
* ``-mfloat-abi=<fabi>``, like soft, hard, controlling which registers
to use for floating-point
The default is normally the common denominator, so that Clang doesn't
generate code that breaks. But that also means you won't get the best
code for your specific hardware, which may mean orders of magnitude
slower than you expect.
For example, if your target is ``arm-none-eabi``, the default CPU will
be ``arm7tdmi`` using soft float, which is extremely slow on modern cores,
whereas if your triple is ``armv7a-none-eabi``, it'll be Cortex-A8 with
NEON, but still using soft-float, which is much better, but still not
great.
Toolchain Options
-----------------
There are three main options to control access to your cross-compiler:
``--sysroot``, ``-I``, and ``-L``. The two last ones are well known,
but they're particularly important for additional libraries
and headers that are specific to your target.
There are two main ways to have a cross-compiler:
#. When you have extracted your cross-compiler from a zip file into
a directory, you have to use ``--sysroot=<path>``. The path is the
root directory where you have unpacked your file, and Clang will
look for the directories ``bin``, ``lib``, ``include`` in there.
In this case, your setup should be pretty much done (if no
additional headers or libraries are needed), as Clang will find
all binaries it needs (assembler, linker, etc) in there.
#. When you have installed via a package manager (modern Linux
distributions have cross-compiler packages available), make
sure the target triple you set is *also* the prefix of your
cross-compiler toolchain.
In this case, Clang will find the other binaries (assembler,
linker), but not always where the target headers and libraries
are. People add system-specific clues to Clang often, but as
things change, it's more likely that it won't find than the
other way around.
So, here, you'll be a lot safer if you specify the include/library
directories manually (via ``-I`` and ``-L``).
Target-Specific Libraries
=========================
All libraries that you compile as part of your build will be
cross-compiled to your target, and your build system will probably
find them in the right place. But all dependencies that are
normally checked against (like ``libxml`` or ``libz`` etc) will match
against the host platform, not the target.
So, if the build system is not aware that you want to cross-compile
your code, it will get every dependency wrong, and your compilation
will fail during build time, not configure time.
Also, finding the libraries for your target are not as easy
as for your host machine. There aren't many cross-libraries available
as packages to most OS's, so you'll have to either cross-compile them
from source, or download the package for your target platform,
extract the libraries and headers, put them in specific directories
and add ``-I`` and ``-L`` pointing to them.
Also, some libraries have different dependencies on different targets,
so configuration tools to find dependencies in the host can get the
list wrong for the target platform. This means that the configuration
of your build can get things wrong when setting their own library
paths, and you'll have to augment it via additional flags (configure,
Make, CMake, etc).
Multilibs
---------
When you want to cross-compile to more than one configuration, for
example hard-float-ARM and soft-float-ARM, you'll have to have multiple
copies of your libraries and (possibly) headers.
Some Linux distributions have support for Multilib, which handle that
for you in an easier way, but if you're not careful and, for instance,
forget to specify ``-ccc-gcc-name armv7l-linux-gnueabihf-gcc`` (which
uses hard-float), Clang will pick the ``armv7l-linux-gnueabi-ld``
(which uses soft-float) and linker errors will happen.
The same is true if you're compiling for different ABIs, like ``gnueabi``
and ``androideabi``, and might even link and run, but produce run-time
errors, which are much harder to track down and fix.

View File

@ -0,0 +1,158 @@
=================
DataFlowSanitizer
=================
.. toctree::
:hidden:
DataFlowSanitizerDesign
.. contents::
:local:
Introduction
============
DataFlowSanitizer is a generalised dynamic data flow analysis.
Unlike other Sanitizer tools, this tool is not designed to detect a
specific class of bugs on its own. Instead, it provides a generic
dynamic data flow analysis framework to be used by clients to help
detect application-specific issues within their own code.
Usage
=====
With no program changes, applying DataFlowSanitizer to a program
will not alter its behavior. To use DataFlowSanitizer, the program
uses API functions to apply tags to data to cause it to be tracked, and to
check the tag of a specific data item. DataFlowSanitizer manages
the propagation of tags through the program according to its data flow.
The APIs are defined in the header file ``sanitizer/dfsan_interface.h``.
For further information about each function, please refer to the header
file.
ABI List
--------
DataFlowSanitizer uses a list of functions known as an ABI list to decide
whether a call to a specific function should use the operating system's native
ABI or whether it should use a variant of this ABI that also propagates labels
through function parameters and return values. The ABI list file also controls
how labels are propagated in the former case. DataFlowSanitizer comes with a
default ABI list which is intended to eventually cover the glibc library on
Linux but it may become necessary for users to extend the ABI list in cases
where a particular library or function cannot be instrumented (e.g. because
it is implemented in assembly or another language which DataFlowSanitizer does
not support) or a function is called from a library or function which cannot
be instrumented.
DataFlowSanitizer's ABI list file is a :doc:`SanitizerSpecialCaseList`.
The pass treats every function in the ``uninstrumented`` category in the
ABI list file as conforming to the native ABI. Unless the ABI list contains
additional categories for those functions, a call to one of those functions
will produce a warning message, as the labelling behavior of the function
is unknown. The other supported categories are ``discard``, ``functional``
and ``custom``.
* ``discard`` -- To the extent that this function writes to (user-accessible)
memory, it also updates labels in shadow memory (this condition is trivially
satisfied for functions which do not write to user-accessible memory). Its
return value is unlabelled.
* ``functional`` -- Like ``discard``, except that the label of its return value
is the union of the label of its arguments.
* ``custom`` -- Instead of calling the function, a custom wrapper ``__dfsw_F``
is called, where ``F`` is the name of the function. This function may wrap
the original function or provide its own implementation. This category is
generally used for uninstrumentable functions which write to user-accessible
memory or which have more complex label propagation behavior. The signature
of ``__dfsw_F`` is based on that of ``F`` with each argument having a
label of type ``dfsan_label`` appended to the argument list. If ``F``
is of non-void return type a final argument of type ``dfsan_label *``
is appended to which the custom function can store the label for the
return value. For example:
.. code-block:: c++
void f(int x);
void __dfsw_f(int x, dfsan_label x_label);
void *memcpy(void *dest, const void *src, size_t n);
void *__dfsw_memcpy(void *dest, const void *src, size_t n,
dfsan_label dest_label, dfsan_label src_label,
dfsan_label n_label, dfsan_label *ret_label);
If a function defined in the translation unit being compiled belongs to the
``uninstrumented`` category, it will be compiled so as to conform to the
native ABI. Its arguments will be assumed to be unlabelled, but it will
propagate labels in shadow memory.
For example:
.. code-block:: none
# main is called by the C runtime using the native ABI.
fun:main=uninstrumented
fun:main=discard
# malloc only writes to its internal data structures, not user-accessible memory.
fun:malloc=uninstrumented
fun:malloc=discard
# tolower is a pure function.
fun:tolower=uninstrumented
fun:tolower=functional
# memcpy needs to copy the shadow from the source to the destination region.
# This is done in a custom function.
fun:memcpy=uninstrumented
fun:memcpy=custom
Example
=======
The following program demonstrates label propagation by checking that
the correct labels are propagated.
.. code-block:: c++
#include <sanitizer/dfsan_interface.h>
#include <assert.h>
int main(void) {
int i = 1;
dfsan_label i_label = dfsan_create_label("i", 0);
dfsan_set_label(i_label, &i, sizeof(i));
int j = 2;
dfsan_label j_label = dfsan_create_label("j", 0);
dfsan_set_label(j_label, &j, sizeof(j));
int k = 3;
dfsan_label k_label = dfsan_create_label("k", 0);
dfsan_set_label(k_label, &k, sizeof(k));
dfsan_label ij_label = dfsan_get_label(i + j);
assert(dfsan_has_label(ij_label, i_label));
assert(dfsan_has_label(ij_label, j_label));
assert(!dfsan_has_label(ij_label, k_label));
dfsan_label ijk_label = dfsan_get_label(i + j + k);
assert(dfsan_has_label(ijk_label, i_label));
assert(dfsan_has_label(ijk_label, j_label));
assert(dfsan_has_label(ijk_label, k_label));
return 0;
}
Current status
==============
DataFlowSanitizer is a work in progress, currently under development for
x86\_64 Linux.
Design
======
Please refer to the :doc:`design document<DataFlowSanitizerDesign>`.

View File

@ -0,0 +1,220 @@
DataFlowSanitizer Design Document
=================================
This document sets out the design for DataFlowSanitizer, a general
dynamic data flow analysis. Unlike other Sanitizer tools, this tool is
not designed to detect a specific class of bugs on its own. Instead,
it provides a generic dynamic data flow analysis framework to be used
by clients to help detect application-specific issues within their
own code.
DataFlowSanitizer is a program instrumentation which can associate
a number of taint labels with any data stored in any memory region
accessible by the program. The analysis is dynamic, which means that
it operates on a running program, and tracks how the labels propagate
through that program. The tool shall support a large (>100) number
of labels, such that programs which operate on large numbers of data
items may be analysed with each data item being tracked separately.
Use Cases
---------
This instrumentation can be used as a tool to help monitor how data
flows from a program's inputs (sources) to its outputs (sinks).
This has applications from a privacy/security perspective in that
one can audit how a sensitive data item is used within a program and
ensure it isn't exiting the program anywhere it shouldn't be.
Interface
---------
A number of functions are provided which will create taint labels,
attach labels to memory regions and extract the set of labels
associated with a specific memory region. These functions are declared
in the header file ``sanitizer/dfsan_interface.h``.
.. code-block:: c
/// Creates and returns a base label with the given description and user data.
dfsan_label dfsan_create_label(const char *desc, void *userdata);
/// Sets the label for each address in [addr,addr+size) to \c label.
void dfsan_set_label(dfsan_label label, void *addr, size_t size);
/// Sets the label for each address in [addr,addr+size) to the union of the
/// current label for that address and \c label.
void dfsan_add_label(dfsan_label label, void *addr, size_t size);
/// Retrieves the label associated with the given data.
///
/// The type of 'data' is arbitrary. The function accepts a value of any type,
/// which can be truncated or extended (implicitly or explicitly) as necessary.
/// The truncation/extension operations will preserve the label of the original
/// value.
dfsan_label dfsan_get_label(long data);
/// Retrieves a pointer to the dfsan_label_info struct for the given label.
const struct dfsan_label_info *dfsan_get_label_info(dfsan_label label);
/// Returns whether the given label label contains the label elem.
int dfsan_has_label(dfsan_label label, dfsan_label elem);
/// If the given label label contains a label with the description desc, returns
/// that label, else returns 0.
dfsan_label dfsan_has_label_with_desc(dfsan_label label, const char *desc);
Taint label representation
--------------------------
As stated above, the tool must track a large number of taint
labels. This poses an implementation challenge, as most multiple-label
tainting systems assign one label per bit to shadow storage, and
union taint labels using a bitwise or operation. This will not scale
to clients which use hundreds or thousands of taint labels, as the
label union operation becomes O(n) in the number of supported labels,
and data associated with it will quickly dominate the live variable
set, causing register spills and hampering performance.
Instead, a low overhead approach is proposed which is best-case O(log\
:sub:`2` n) during execution. The underlying assumption is that
the required space of label unions is sparse, which is a reasonable
assumption to make given that we are optimizing for the case where
applications mostly copy data from one place to another, without often
invoking the need for an actual union operation. The representation
of a taint label is a 16-bit integer, and new labels are allocated
sequentially from a pool. The label identifier 0 is special, and means
that the data item is unlabelled.
When a label union operation is requested at a join point (any
arithmetic or logical operation with two or more operands, such as
addition), the code checks whether a union is required, whether the
same union has been requested before, and whether one union label
subsumes the other. If so, it returns the previously allocated union
label. If not, it allocates a new union label from the same pool used
for new labels.
Specifically, the instrumentation pass will insert code like this
to decide the union label ``lu`` for a pair of labels ``l1``
and ``l2``:
.. code-block:: c
if (l1 == l2)
lu = l1;
else
lu = __dfsan_union(l1, l2);
The equality comparison is outlined, to provide an early exit in
the common cases where the program is processing unlabelled data, or
where the two data items have the same label. ``__dfsan_union`` is
a runtime library function which performs all other union computation.
Further optimizations are possible, for example if ``l1`` is known
at compile time to be zero (e.g. it is derived from a constant),
``l2`` can be used for ``lu``, and vice versa.
Memory layout and label management
----------------------------------
The following is the current memory layout for Linux/x86\_64:
+---------------+---------------+--------------------+
| Start | End | Use |
+===============+===============+====================+
| 0x700000008000|0x800000000000 | application memory |
+---------------+---------------+--------------------+
| 0x200200000000|0x700000008000 | unused |
+---------------+---------------+--------------------+
| 0x200000000000|0x200200000000 | union table |
+---------------+---------------+--------------------+
| 0x000000010000|0x200000000000 | shadow memory |
+---------------+---------------+--------------------+
| 0x000000000000|0x000000010000 | reserved by kernel |
+---------------+---------------+--------------------+
Each byte of application memory corresponds to two bytes of shadow
memory, which are used to store its taint label. As for LLVM SSA
registers, we have not found it necessary to associate a label with
each byte or bit of data, as some other tools do. Instead, labels are
associated directly with registers. Loads will result in a union of
all shadow labels corresponding to bytes loaded (which most of the
time will be short circuited by the initial comparison) and stores will
result in a copy of the label to the shadow of all bytes stored to.
Propagating labels through arguments
------------------------------------
In order to propagate labels through function arguments and return values,
DataFlowSanitizer changes the ABI of each function in the translation unit.
There are currently two supported ABIs:
* Args -- Argument and return value labels are passed through additional
arguments and by modifying the return type.
* TLS -- Argument and return value labels are passed through TLS variables
``__dfsan_arg_tls`` and ``__dfsan_retval_tls``.
The main advantage of the TLS ABI is that it is more tolerant of ABI mismatches
(TLS storage is not shared with any other form of storage, whereas extra
arguments may be stored in registers which under the native ABI are not used
for parameter passing and thus could contain arbitrary values). On the other
hand the args ABI is more efficient and allows ABI mismatches to be more easily
identified by checking for nonzero labels in nominally unlabelled programs.
Implementing the ABI list
-------------------------
The `ABI list <DataFlowSanitizer.html#abi-list>`_ provides a list of functions
which conform to the native ABI, each of which is callable from an instrumented
program. This is implemented by replacing each reference to a native ABI
function with a reference to a function which uses the instrumented ABI.
Such functions are automatically-generated wrappers for the native functions.
For example, given the ABI list example provided in the user manual, the
following wrappers will be generated under the args ABI:
.. code-block:: llvm
define linkonce_odr { i8*, i16 } @"dfsw$malloc"(i64 %0, i16 %1) {
entry:
%2 = call i8* @malloc(i64 %0)
%3 = insertvalue { i8*, i16 } undef, i8* %2, 0
%4 = insertvalue { i8*, i16 } %3, i16 0, 1
ret { i8*, i16 } %4
}
define linkonce_odr { i32, i16 } @"dfsw$tolower"(i32 %0, i16 %1) {
entry:
%2 = call i32 @tolower(i32 %0)
%3 = insertvalue { i32, i16 } undef, i32 %2, 0
%4 = insertvalue { i32, i16 } %3, i16 %1, 1
ret { i32, i16 } %4
}
define linkonce_odr { i8*, i16 } @"dfsw$memcpy"(i8* %0, i8* %1, i64 %2, i16 %3, i16 %4, i16 %5) {
entry:
%labelreturn = alloca i16
%6 = call i8* @__dfsw_memcpy(i8* %0, i8* %1, i64 %2, i16 %3, i16 %4, i16 %5, i16* %labelreturn)
%7 = load i16* %labelreturn
%8 = insertvalue { i8*, i16 } undef, i8* %6, 0
%9 = insertvalue { i8*, i16 } %8, i16 %7, 1
ret { i8*, i16 } %9
}
As an optimization, direct calls to native ABI functions will call the
native ABI function directly and the pass will compute the appropriate label
internally. This has the advantage of reducing the number of union operations
required when the return value label is known to be zero (i.e. ``discard``
functions, or ``functional`` functions with known unlabelled arguments).
Checking ABI Consistency
------------------------
DFSan changes the ABI of each function in the module. This makes it possible
for a function with the native ABI to be called with the instrumented ABI,
or vice versa, thus possibly invoking undefined behavior. A simple way
of statically detecting instances of this problem is to prepend the prefix
"dfs$" to the name of each instrumented-ABI function.
This will not catch every such problem; in particular function pointers passed
across the instrumented-native barrier cannot be used on the other side.
These problems could potentially be caught dynamically.

Binary file not shown.

After

Width:  |  Height:  |  Size: 71 KiB

View File

@ -0,0 +1,400 @@
=========================
Driver Design & Internals
=========================
.. contents::
:local:
Introduction
============
This document describes the Clang driver. The purpose of this document
is to describe both the motivation and design goals for the driver, as
well as details of the internal implementation.
Features and Goals
==================
The Clang driver is intended to be a production quality compiler driver
providing access to the Clang compiler and tools, with a command line
interface which is compatible with the gcc driver.
Although the driver is part of and driven by the Clang project, it is
logically a separate tool which shares many of the same goals as Clang:
.. contents:: Features
:local:
GCC Compatibility
-----------------
The number one goal of the driver is to ease the adoption of Clang by
allowing users to drop Clang into a build system which was designed to
call GCC. Although this makes the driver much more complicated than
might otherwise be necessary, we decided that being very compatible with
the gcc command line interface was worth it in order to allow users to
quickly test clang on their projects.
Flexible
--------
The driver was designed to be flexible and easily accommodate new uses
as we grow the clang and LLVM infrastructure. As one example, the driver
can easily support the introduction of tools which have an integrated
assembler; something we hope to add to LLVM in the future.
Similarly, most of the driver functionality is kept in a library which
can be used to build other tools which want to implement or accept a gcc
like interface.
Low Overhead
------------
The driver should have as little overhead as possible. In practice, we
found that the gcc driver by itself incurred a small but meaningful
overhead when compiling many small files. The driver doesn't do much
work compared to a compilation, but we have tried to keep it as
efficient as possible by following a few simple principles:
- Avoid memory allocation and string copying when possible.
- Don't parse arguments more than once.
- Provide a few simple interfaces for efficiently searching arguments.
Simple
------
Finally, the driver was designed to be "as simple as possible", given
the other goals. Notably, trying to be completely compatible with the
gcc driver adds a significant amount of complexity. However, the design
of the driver attempts to mitigate this complexity by dividing the
process into a number of independent stages instead of a single
monolithic task.
Internal Design and Implementation
==================================
.. contents::
:local:
:depth: 1
Internals Introduction
----------------------
In order to satisfy the stated goals, the driver was designed to
completely subsume the functionality of the gcc executable; that is, the
driver should not need to delegate to gcc to perform subtasks. On
Darwin, this implies that the Clang driver also subsumes the gcc
driver-driver, which is used to implement support for building universal
images (binaries and object files). This also implies that the driver
should be able to call the language specific compilers (e.g. cc1)
directly, which means that it must have enough information to forward
command line arguments to child processes correctly.
Design Overview
---------------
The diagram below shows the significant components of the driver
architecture and how they relate to one another. The orange components
represent concrete data structures built by the driver, the green
components indicate conceptually distinct stages which manipulate these
data structures, and the blue components are important helper classes.
.. image:: DriverArchitecture.png
:align: center
:alt: Driver Architecture Diagram
Driver Stages
-------------
The driver functionality is conceptually divided into five stages:
#. **Parse: Option Parsing**
The command line argument strings are decomposed into arguments
(``Arg`` instances). The driver expects to understand all available
options, although there is some facility for just passing certain
classes of options through (like ``-Wl,``).
Each argument corresponds to exactly one abstract ``Option``
definition, which describes how the option is parsed along with some
additional metadata. The Arg instances themselves are lightweight and
merely contain enough information for clients to determine which
option they correspond to and their values (if they have additional
parameters).
For example, a command line like "-Ifoo -I foo" would parse to two
Arg instances (a JoinedArg and a SeparateArg instance), but each
would refer to the same Option.
Options are lazily created in order to avoid populating all Option
classes when the driver is loaded. Most of the driver code only needs
to deal with options by their unique ID (e.g., ``options::OPT_I``),
Arg instances themselves do not generally store the values of
parameters. In many cases, this would simply result in creating
unnecessary string copies. Instead, Arg instances are always embedded
inside an ArgList structure, which contains the original vector of
argument strings. Each Arg itself only needs to contain an index into
this vector instead of storing its values directly.
The clang driver can dump the results of this stage using the
``-ccc-print-options`` flag (which must precede any actual command
line arguments). For example:
.. code-block:: console
$ clang -ccc-print-options -Xarch_i386 -fomit-frame-pointer -Wa,-fast -Ifoo -I foo t.c
Option 0 - Name: "-Xarch_", Values: {"i386", "-fomit-frame-pointer"}
Option 1 - Name: "-Wa,", Values: {"-fast"}
Option 2 - Name: "-I", Values: {"foo"}
Option 3 - Name: "-I", Values: {"foo"}
Option 4 - Name: "<input>", Values: {"t.c"}
After this stage is complete the command line should be broken down
into well defined option objects with their appropriate parameters.
Subsequent stages should rarely, if ever, need to do any string
processing.
#. **Pipeline: Compilation Job Construction**
Once the arguments are parsed, the tree of subprocess jobs needed for
the desired compilation sequence are constructed. This involves
determining the input files and their types, what work is to be done
on them (preprocess, compile, assemble, link, etc.), and constructing
a list of Action instances for each task. The result is a list of one
or more top-level actions, each of which generally corresponds to a
single output (for example, an object or linked executable).
The majority of Actions correspond to actual tasks, however there are
two special Actions. The first is InputAction, which simply serves to
adapt an input argument for use as an input to other Actions. The
second is BindArchAction, which conceptually alters the architecture
to be used for all of its input Actions.
The clang driver can dump the results of this stage using the
``-ccc-print-phases`` flag. For example:
.. code-block:: console
$ clang -ccc-print-phases -x c t.c -x assembler t.s
0: input, "t.c", c
1: preprocessor, {0}, cpp-output
2: compiler, {1}, assembler
3: assembler, {2}, object
4: input, "t.s", assembler
5: assembler, {4}, object
6: linker, {3, 5}, image
Here the driver is constructing seven distinct actions, four to
compile the "t.c" input into an object file, two to assemble the
"t.s" input, and one to link them together.
A rather different compilation pipeline is shown here; in this
example there are two top level actions to compile the input files
into two separate object files, where each object file is built using
``lipo`` to merge results built for two separate architectures.
.. code-block:: console
$ clang -ccc-print-phases -c -arch i386 -arch x86_64 t0.c t1.c
0: input, "t0.c", c
1: preprocessor, {0}, cpp-output
2: compiler, {1}, assembler
3: assembler, {2}, object
4: bind-arch, "i386", {3}, object
5: bind-arch, "x86_64", {3}, object
6: lipo, {4, 5}, object
7: input, "t1.c", c
8: preprocessor, {7}, cpp-output
9: compiler, {8}, assembler
10: assembler, {9}, object
11: bind-arch, "i386", {10}, object
12: bind-arch, "x86_64", {10}, object
13: lipo, {11, 12}, object
After this stage is complete the compilation process is divided into
a simple set of actions which need to be performed to produce
intermediate or final outputs (in some cases, like ``-fsyntax-only``,
there is no "real" final output). Phases are well known compilation
steps, such as "preprocess", "compile", "assemble", "link", etc.
#. **Bind: Tool & Filename Selection**
This stage (in conjunction with the Translate stage) turns the tree
of Actions into a list of actual subprocess to run. Conceptually, the
driver performs a top down matching to assign Action(s) to Tools. The
ToolChain is responsible for selecting the tool to perform a
particular action; once selected the driver interacts with the tool
to see if it can match additional actions (for example, by having an
integrated preprocessor).
Once Tools have been selected for all actions, the driver determines
how the tools should be connected (for example, using an inprocess
module, pipes, temporary files, or user provided filenames). If an
output file is required, the driver also computes the appropriate
file name (the suffix and file location depend on the input types and
options such as ``-save-temps``).
The driver interacts with a ToolChain to perform the Tool bindings.
Each ToolChain contains information about all the tools needed for
compilation for a particular architecture, platform, and operating
system. A single driver invocation may query multiple ToolChains
during one compilation in order to interact with tools for separate
architectures.
The results of this stage are not computed directly, but the driver
can print the results via the ``-ccc-print-bindings`` option. For
example:
.. code-block:: console
$ clang -ccc-print-bindings -arch i386 -arch ppc t0.c
# "i386-apple-darwin9" - "clang", inputs: ["t0.c"], output: "/tmp/cc-Sn4RKF.s"
# "i386-apple-darwin9" - "darwin::Assemble", inputs: ["/tmp/cc-Sn4RKF.s"], output: "/tmp/cc-gvSnbS.o"
# "i386-apple-darwin9" - "darwin::Link", inputs: ["/tmp/cc-gvSnbS.o"], output: "/tmp/cc-jgHQxi.out"
# "ppc-apple-darwin9" - "gcc::Compile", inputs: ["t0.c"], output: "/tmp/cc-Q0bTox.s"
# "ppc-apple-darwin9" - "gcc::Assemble", inputs: ["/tmp/cc-Q0bTox.s"], output: "/tmp/cc-WCdicw.o"
# "ppc-apple-darwin9" - "gcc::Link", inputs: ["/tmp/cc-WCdicw.o"], output: "/tmp/cc-HHBEBh.out"
# "i386-apple-darwin9" - "darwin::Lipo", inputs: ["/tmp/cc-jgHQxi.out", "/tmp/cc-HHBEBh.out"], output: "a.out"
This shows the tool chain, tool, inputs and outputs which have been
bound for this compilation sequence. Here clang is being used to
compile t0.c on the i386 architecture and darwin specific versions of
the tools are being used to assemble and link the result, but generic
gcc versions of the tools are being used on PowerPC.
#. **Translate: Tool Specific Argument Translation**
Once a Tool has been selected to perform a particular Action, the
Tool must construct concrete Jobs which will be executed during
compilation. The main work is in translating from the gcc style
command line options to whatever options the subprocess expects.
Some tools, such as the assembler, only interact with a handful of
arguments and just determine the path of the executable to call and
pass on their input and output arguments. Others, like the compiler
or the linker, may translate a large number of arguments in addition.
The ArgList class provides a number of simple helper methods to
assist with translating arguments; for example, to pass on only the
last of arguments corresponding to some option, or all arguments for
an option.
The result of this stage is a list of Jobs (executable paths and
argument strings) to execute.
#. **Execute**
Finally, the compilation pipeline is executed. This is mostly
straightforward, although there is some interaction with options like
``-pipe``, ``-pass-exit-codes`` and ``-time``.
Additional Notes
----------------
The Compilation Object
^^^^^^^^^^^^^^^^^^^^^^
The driver constructs a Compilation object for each set of command line
arguments. The Driver itself is intended to be invariant during
construction of a Compilation; an IDE should be able to construct a
single long lived driver instance to use for an entire build, for
example.
The Compilation object holds information that is particular to each
compilation sequence. For example, the list of used temporary files
(which must be removed once compilation is finished) and result files
(which should be removed if compilation fails).
Unified Parsing & Pipelining
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Parsing and pipelining both occur without reference to a Compilation
instance. This is by design; the driver expects that both of these
phases are platform neutral, with a few very well defined exceptions
such as whether the platform uses a driver driver.
ToolChain Argument Translation
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
In order to match gcc very closely, the clang driver currently allows
tool chains to perform their own translation of the argument list (into
a new ArgList data structure). Although this allows the clang driver to
match gcc easily, it also makes the driver operation much harder to
understand (since the Tools stop seeing some arguments the user
provided, and see new ones instead).
For example, on Darwin ``-gfull`` gets translated into two separate
arguments, ``-g`` and ``-fno-eliminate-unused-debug-symbols``. Trying to
write Tool logic to do something with ``-gfull`` will not work, because
Tool argument translation is done after the arguments have been
translated.
A long term goal is to remove this tool chain specific translation, and
instead force each tool to change its own logic to do the right thing on
the untranslated original arguments.
Unused Argument Warnings
^^^^^^^^^^^^^^^^^^^^^^^^
The driver operates by parsing all arguments but giving Tools the
opportunity to choose which arguments to pass on. One downside of this
infrastructure is that if the user misspells some option, or is confused
about which options to use, some command line arguments the user really
cared about may go unused. This problem is particularly important when
using clang as a compiler, since the clang compiler does not support
anywhere near all the options that gcc does, and we want to make sure
users know which ones are being used.
To support this, the driver maintains a bit associated with each
argument of whether it has been used (at all) during the compilation.
This bit usually doesn't need to be set by hand, as the key ArgList
accessors will set it automatically.
When a compilation is successful (there are no errors), the driver
checks the bit and emits an "unused argument" warning for any arguments
which were never accessed. This is conservative (the argument may not
have been used to do what the user wanted) but still catches the most
obvious cases.
Relation to GCC Driver Concepts
-------------------------------
For those familiar with the gcc driver, this section provides a brief
overview of how things from the gcc driver map to the clang driver.
- **Driver Driver**
The driver driver is fully integrated into the clang driver. The
driver simply constructs additional Actions to bind the architecture
during the *Pipeline* phase. The tool chain specific argument
translation is responsible for handling ``-Xarch_``.
The one caveat is that this approach requires ``-Xarch_`` not be used
to alter the compilation itself (for example, one cannot provide
``-S`` as an ``-Xarch_`` argument). The driver attempts to reject
such invocations, and overall there isn't a good reason to abuse
``-Xarch_`` to that end in practice.
The upside is that the clang driver is more efficient and does little
extra work to support universal builds. It also provides better error
reporting and UI consistency.
- **Specs**
The clang driver has no direct correspondent for "specs". The
majority of the functionality that is embedded in specs is in the
Tool specific argument translation routines. The parts of specs which
control the compilation pipeline are generally part of the *Pipeline*
stage.
- **Toolchains**
The gcc driver has no direct understanding of tool chains. Each gcc
binary roughly corresponds to the information which is embedded
inside a single ToolChain.
The clang driver is intended to be portable and support complex
compilation environments. All platform and tool chain specific code
should be protected behind either abstract or well defined interfaces
(such as whether the platform supports use as a driver driver).

View File

@ -0,0 +1,80 @@
=======================
External Clang Examples
=======================
Introduction
============
This page provides some examples of the kinds of things that people have
done with Clang that might serve as useful guides (or starting points) from
which to develop your own tools. They may be helpful even for something as
banal (but necessary) as how to set up your build to integrate Clang.
Clang's library-based design is deliberately aimed at facilitating use by
external projects, and we are always interested in improving Clang to
better serve our external users. Some typical categories of applications
where Clang is used are:
- Static analysis.
- Documentation/cross-reference generation.
If you know of (or wrote!) a tool or project using Clang, please send an
email to Clang's `development discussion mailing list
<http://lists.cs.uiuc.edu/mailman/listinfo/cfe-dev>`_ to have it added.
(or if you are already a Clang contributor, feel free to directly commit
additions). Since the primary purpose of this page is to provide examples
that can help developers, generally they must have code available.
List of projects and tools
==========================
`<https://github.com/Andersbakken/rtags/>`_
"RTags is a client/server application that indexes c/c++ code and keeps
a persistent in-memory database of references, symbolnames, completions
etc."
`<http://rprichard.github.com/sourceweb/>`_
"A C/C++ source code indexer and navigator"
`<https://github.com/etaoins/qconnectlint>`_
"qconnectlint is a Clang tool for statically verifying the consistency
of signal and slot connections made with Qt's ``QObject::connect``."
`<https://github.com/woboq/woboq_codebrowser>`_
"The Woboq Code Browser is a web-based code browser for C/C++ projects.
Check out `<http://code.woboq.org/>`_ for an example!"
`<https://github.com/mozilla/dxr>`_
"DXR is a source code cross-reference tool that uses static analysis
data collected by instrumented compilers."
`<https://github.com/eschulte/clang-mutate>`_
"This tool performs a number of operations on C-language source files."
`<https://github.com/gmarpons/Crisp>`_
"A coding rule validation add-on for LLVM/clang. Crisp rules are written
in Prolog. A high-level declarative DSL to easily write new rules is under
development. It will be called CRISP, an acronym for *Coding Rules in
Sugared Prolog*."
`<https://github.com/drothlis/clang-ctags>`_
"Generate tag file for C++ source code."
`<https://github.com/exclipy/clang_indexer>`_
"This is an indexer for C and C++ based on the libclang library."
`<https://github.com/holtgrewe/linty>`_
"Linty - C/C++ Style Checking with Python & libclang."
`<https://github.com/axw/cmonster>`_
"cmonster is a Python wrapper for the Clang C++ parser."
`<https://github.com/rizsotto/Constantine>`_
"Constantine is a toy project to learn how to write clang plugin.
Implements pseudo const analysis. Generates warnings about variables,
which were declared without const qualifier."
`<https://github.com/jessevdk/cldoc>`_
"cldoc is a Clang based documentation generator for C and C++.
cldoc tries to solve the issue of writing C/C++ software documentation
with a modern, non-intrusive and robust approach."

64
tools/clang/docs/FAQ.rst Normal file
View File

@ -0,0 +1,64 @@
================================
Frequently Asked Questions (FAQ)
================================
.. contents::
:local:
Driver
======
I run ``clang -cc1 ...`` and get weird errors about missing headers
-------------------------------------------------------------------
Given this source file:
.. code-block:: c
#include <stdio.h>
int main() {
printf("Hello world\n");
}
If you run:
.. code-block:: console
$ clang -cc1 hello.c
hello.c:1:10: fatal error: 'stdio.h' file not found
#include <stdio.h>
^
1 error generated.
``clang -cc1`` is the frontend, ``clang`` is the :doc:`driver
<DriverInternals>`. The driver invokes the frontend with options appropriate
for your system. To see these options, run:
.. code-block:: console
$ clang -### -c hello.c
Some clang command line options are driver-only options, some are frontend-only
options. Frontend-only options are intended to be used only by clang developers.
Users should not run ``clang -cc1`` directly, because ``-cc1`` options are not
guaranteed to be stable.
If you want to use a frontend-only option ("a ``-cc1`` option"), for example
``-ast-dump``, then you need to take the ``clang -cc1`` line generated by the
driver and add the option you need. Alternatively, you can run
``clang -Xclang <option> ...`` to force the driver pass ``<option>`` to
``clang -cc1``.
I get errors about some headers being missing (``stddef.h``, ``stdarg.h``)
--------------------------------------------------------------------------
Some header files (``stddef.h``, ``stdarg.h``, and others) are shipped with
Clang --- these are called builtin includes. Clang searches for them in a
directory relative to the location of the ``clang`` binary. If you moved the
``clang`` binary, you need to move the builtin headers, too.
More information can be found in the :ref:`libtooling_builtin_includes`
section.

View File

@ -0,0 +1,199 @@
===================================
How To Setup Clang Tooling For LLVM
===================================
Clang Tooling provides infrastructure to write tools that need syntactic
and semantic information about a program. This term also relates to a set
of specific tools using this infrastructure (e.g. ``clang-check``). This
document provides information on how to set up and use Clang Tooling for
the LLVM source code.
Introduction
============
Clang Tooling needs a compilation database to figure out specific build
options for each file. Currently it can create a compilation database
from the ``compilation_commands.json`` file, generated by CMake. When
invoking clang tools, you can either specify a path to a build directory
using a command line parameter ``-p`` or let Clang Tooling find this
file in your source tree. In either case you need to configure your
build using CMake to use clang tools.
Setup Clang Tooling Using CMake and Make
========================================
If you intend to use make to build LLVM, you should have CMake 2.8.6 or
later installed (can be found `here <http://cmake.org>`_).
First, you need to generate Makefiles for LLVM with CMake. You need to
make a build directory and run CMake from it:
.. code-block:: console
$ mkdir your/build/directory
$ cd your/build/directory
$ cmake -DCMAKE_EXPORT_COMPILE_COMMANDS=ON path/to/llvm/sources
If you want to use clang instead of GCC, you can add
``-DCMAKE_C_COMPILER=/path/to/clang -DCMAKE_CXX_COMPILER=/path/to/clang++``.
You can also use ``ccmake``, which provides a curses interface to configure
CMake variables for lazy people.
As a result, the new ``compile_commands.json`` file should appear in the
current directory. You should link it to the LLVM source tree so that
Clang Tooling is able to use it:
.. code-block:: console
$ ln -s $PWD/compile_commands.json path/to/llvm/source/
Now you are ready to build and test LLVM using make:
.. code-block:: console
$ make check-all
Using Clang Tools
=================
After you completed the previous steps, you are ready to run clang tools. If
you have a recent clang installed, you should have ``clang-check`` in
``$PATH``. Try to run it on any ``.cpp`` file inside the LLVM source tree:
.. code-block:: console
$ clang-check tools/clang/lib/Tooling/CompilationDatabase.cpp
If you're using vim, it's convenient to have clang-check integrated. Put
this into your ``.vimrc``:
::
function! ClangCheckImpl(cmd)
if &autowrite | wall | endif
echo "Running " . a:cmd . " ..."
let l:output = system(a:cmd)
cexpr l:output
cwindow
let w:quickfix_title = a:cmd
if v:shell_error != 0
cc
endif
let g:clang_check_last_cmd = a:cmd
endfunction
function! ClangCheck()
let l:filename = expand('%')
if l:filename =~ '\.\(cpp\|cxx\|cc\|c\)$'
call ClangCheckImpl("clang-check " . l:filename)
elseif exists("g:clang_check_last_cmd")
call ClangCheckImpl(g:clang_check_last_cmd)
else
echo "Can't detect file's compilation arguments and no previous clang-check invocation!"
endif
endfunction
nmap <silent> <F5> :call ClangCheck()<CR><CR>
When editing a .cpp/.cxx/.cc/.c file, hit F5 to reparse the file. In
case the current file has a different extension (for example, .h), F5
will re-run the last clang-check invocation made from this vim instance
(if any). The output will go into the error window, which is opened
automatically when clang-check finds errors, and can be re-opened with
``:cope``.
Other ``clang-check`` options that can be useful when working with clang
AST:
* ``-ast-print`` --- Build ASTs and then pretty-print them.
* ``-ast-dump`` --- Build ASTs and then debug dump them.
* ``-ast-dump-filter=<string>`` --- Use with ``-ast-dump`` or ``-ast-print`` to
dump/print only AST declaration nodes having a certain substring in a
qualified name. Use ``-ast-list`` to list all filterable declaration node
names.
* ``-ast-list`` --- Build ASTs and print the list of declaration node qualified
names.
Examples:
.. code-block:: console
$ clang-check tools/clang/tools/clang-check/ClangCheck.cpp -ast-dump -ast-dump-filter ActionFactory::newASTConsumer
Processing: tools/clang/tools/clang-check/ClangCheck.cpp.
Dumping ::ActionFactory::newASTConsumer:
clang::ASTConsumer *newASTConsumer() (CompoundStmt 0x44da290 </home/alexfh/local/llvm/tools/clang/tools/clang-check/ClangCheck.cpp:64:40, line:72:3>
(IfStmt 0x44d97c8 <line:65:5, line:66:45>
<<<NULL>>>
(ImplicitCastExpr 0x44d96d0 <line:65:9> '_Bool':'_Bool' <UserDefinedConversion>
...
$ clang-check tools/clang/tools/clang-check/ClangCheck.cpp -ast-print -ast-dump-filter ActionFactory::newASTConsumer
Processing: tools/clang/tools/clang-check/ClangCheck.cpp.
Printing <anonymous namespace>::ActionFactory::newASTConsumer:
clang::ASTConsumer *newASTConsumer() {
if (this->ASTList.operator _Bool())
return clang::CreateASTDeclNodeLister();
if (this->ASTDump.operator _Bool())
return clang::CreateASTDumper(this->ASTDumpFilter);
if (this->ASTPrint.operator _Bool())
return clang::CreateASTPrinter(&llvm::outs(), this->ASTDumpFilter);
return new clang::ASTConsumer();
}
(Experimental) Using Ninja Build System
=======================================
Optionally you can use the `Ninja <https://github.com/martine/ninja>`_
build system instead of make. It is aimed at making your builds faster.
Currently this step will require building Ninja from sources.
To take advantage of using Clang Tools along with Ninja build you need
at least CMake 2.8.9.
Clone the Ninja git repository and build Ninja from sources:
.. code-block:: console
$ git clone git://github.com/martine/ninja.git
$ cd ninja/
$ ./bootstrap.py
This will result in a single binary ``ninja`` in the current directory.
It doesn't require installation and can just be copied to any location
inside ``$PATH``, say ``/usr/local/bin/``:
.. code-block:: console
$ sudo cp ninja /usr/local/bin/
$ sudo chmod a+rx /usr/local/bin/ninja
After doing all of this, you'll need to generate Ninja build files for
LLVM with CMake. You need to make a build directory and run CMake from
it:
.. code-block:: console
$ mkdir your/build/directory
$ cd your/build/directory
$ cmake -G Ninja -DCMAKE_EXPORT_COMPILE_COMMANDS=ON path/to/llvm/sources
If you want to use clang instead of GCC, you can add
``-DCMAKE_C_COMPILER=/path/to/clang -DCMAKE_CXX_COMPILER=/path/to/clang++``.
You can also use ``ccmake``, which provides a curses interface to configure
CMake variables in an interactive manner.
As a result, the new ``compile_commands.json`` file should appear in the
current directory. You should link it to the LLVM source tree so that
Clang Tooling is able to use it:
.. code-block:: console
$ ln -s $PWD/compile_commands.json path/to/llvm/source/
Now you are ready to build and test LLVM using Ninja:
.. code-block:: console
$ ninja check-all
Other target names can be used in the same way as with make.

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,126 @@
=============================
Introduction to the Clang AST
=============================
This document gives a gentle introduction to the mysteries of the Clang
AST. It is targeted at developers who either want to contribute to
Clang, or use tools that work based on Clang's AST, like the AST
matchers.
.. raw:: html
<center><iframe width="560" height="315" src="http://www.youtube.com/embed/VqCkCDFLSsc?vq=hd720" frameborder="0" allowfullscreen></iframe></center>
`Slides <http://llvm.org/devmtg/2013-04/klimek-slides.pdf>`_
Introduction
============
Clang's AST is different from ASTs produced by some other compilers in
that it closely resembles both the written C++ code and the C++
standard. For example, parenthesis expressions and compile time
constants are available in an unreduced form in the AST. This makes
Clang's AST a good fit for refactoring tools.
Documentation for all Clang AST nodes is available via the generated
`Doxygen <http://clang.llvm.org/doxygen>`_. The doxygen online
documentation is also indexed by your favorite search engine, which will
make a search for clang and the AST node's class name usually turn up
the doxygen of the class you're looking for (for example, search for:
clang ParenExpr).
Examining the AST
=================
A good way to familarize yourself with the Clang AST is to actually look
at it on some simple example code. Clang has a builtin AST-dump mode,
which can be enabled with the flag ``-ast-dump``.
Let's look at a simple example AST:
::
$ cat test.cc
int f(int x) {
int result = (x / 42);
return result;
}
# Clang by default is a frontend for many tools; -Xclang is used to pass
# options directly to the C++ frontend.
$ clang -Xclang -ast-dump -fsyntax-only test.cc
TranslationUnitDecl 0x5aea0d0 <<invalid sloc>>
... cutting out internal declarations of clang ...
`-FunctionDecl 0x5aeab50 <test.cc:1:1, line:4:1> f 'int (int)'
|-ParmVarDecl 0x5aeaa90 <line:1:7, col:11> x 'int'
`-CompoundStmt 0x5aead88 <col:14, line:4:1>
|-DeclStmt 0x5aead10 <line:2:3, col:24>
| `-VarDecl 0x5aeac10 <col:3, col:23> result 'int'
| `-ParenExpr 0x5aeacf0 <col:16, col:23> 'int'
| `-BinaryOperator 0x5aeacc8 <col:17, col:21> 'int' '/'
| |-ImplicitCastExpr 0x5aeacb0 <col:17> 'int' <LValueToRValue>
| | `-DeclRefExpr 0x5aeac68 <col:17> 'int' lvalue ParmVar 0x5aeaa90 'x' 'int'
| `-IntegerLiteral 0x5aeac90 <col:21> 'int' 42
`-ReturnStmt 0x5aead68 <line:3:3, col:10>
`-ImplicitCastExpr 0x5aead50 <col:10> 'int' <LValueToRValue>
`-DeclRefExpr 0x5aead28 <col:10> 'int' lvalue Var 0x5aeac10 'result' 'int'
The toplevel declaration in
a translation unit is always the `translation unit
declaration <http://clang.llvm.org/doxygen/classclang_1_1TranslationUnitDecl.html>`_.
In this example, our first user written declaration is the `function
declaration <http://clang.llvm.org/doxygen/classclang_1_1FunctionDecl.html>`_
of "``f``". The body of "``f``" is a `compound
statement <http://clang.llvm.org/doxygen/classclang_1_1CompoundStmt.html>`_,
whose child nodes are a `declaration
statement <http://clang.llvm.org/doxygen/classclang_1_1DeclStmt.html>`_
that declares our result variable, and the `return
statement <http://clang.llvm.org/doxygen/classclang_1_1ReturnStmt.html>`_.
AST Context
===========
All information about the AST for a translation unit is bundled up in
the class
`ASTContext <http://clang.llvm.org/doxygen/classclang_1_1ASTContext.html>`_.
It allows traversal of the whole translation unit starting from
`getTranslationUnitDecl <http://clang.llvm.org/doxygen/classclang_1_1ASTContext.html#abd909fb01ef10cfd0244832a67b1dd64>`_,
or to access Clang's `table of
identifiers <http://clang.llvm.org/doxygen/classclang_1_1ASTContext.html#a4f95adb9958e22fbe55212ae6482feb4>`_
for the parsed translation unit.
AST Nodes
=========
Clang's AST nodes are modeled on a class hierarchy that does not have a
common ancestor. Instead, there are multiple larger hierarchies for
basic node types like
`Decl <http://clang.llvm.org/doxygen/classclang_1_1Decl.html>`_ and
`Stmt <http://clang.llvm.org/doxygen/classclang_1_1Stmt.html>`_. Many
important AST nodes derive from
`Type <http://clang.llvm.org/doxygen/classclang_1_1Type.html>`_,
`Decl <http://clang.llvm.org/doxygen/classclang_1_1Decl.html>`_,
`DeclContext <http://clang.llvm.org/doxygen/classclang_1_1DeclContext.html>`_
or `Stmt <http://clang.llvm.org/doxygen/classclang_1_1Stmt.html>`_, with
some classes deriving from both Decl and DeclContext.
There are also a multitude of nodes in the AST that are not part of a
larger hierarchy, and are only reachable from specific other nodes, like
`CXXBaseSpecifier <http://clang.llvm.org/doxygen/classclang_1_1CXXBaseSpecifier.html>`_.
Thus, to traverse the full AST, one starts from the
`TranslationUnitDecl <http://clang.llvm.org/doxygen/classclang_1_1TranslationUnitDecl.html>`_
and then recursively traverses everything that can be reached from that
node - this information has to be encoded for each specific node type.
This algorithm is encoded in the
`RecursiveASTVisitor <http://clang.llvm.org/doxygen/classclang_1_1RecursiveASTVisitor.html>`_.
See the `RecursiveASTVisitor
tutorial <http://clang.llvm.org/docs/RAVFrontendAction.html>`_.
The two most basic nodes in the Clang AST are statements
(`Stmt <http://clang.llvm.org/doxygen/classclang_1_1Stmt.html>`_) and
declarations
(`Decl <http://clang.llvm.org/doxygen/classclang_1_1Decl.html>`_). Note
that expressions
(`Expr <http://clang.llvm.org/doxygen/classclang_1_1Expr.html>`_) are
also statements in Clang's AST.

View File

@ -0,0 +1,88 @@
==============================================
JSON Compilation Database Format Specification
==============================================
This document describes a format for specifying how to replay single
compilations independently of the build system.
Background
==========
Tools based on the C++ Abstract Syntax Tree need full information how to
parse a translation unit. Usually this information is implicitly
available in the build system, but running tools as part of the build
system is not necessarily the best solution:
- Build systems are inherently change driven, so running multiple tools
over the same code base without changing the code does not fit into
the architecture of many build systems.
- Figuring out whether things have changed is often an IO bound
process; this makes it hard to build low latency end user tools based
on the build system.
- Build systems are inherently sequential in the build graph, for
example due to generated source code. While tools that run
independently of the build still need the generated source code to
exist, running tools multiple times over unchanging source does not
require serialization of the runs according to the build dependency
graph.
Supported Systems
=================
Currently `CMake <http://cmake.org>`_ (since 2.8.5) supports generation
of compilation databases for Unix Makefile builds (Ninja builds in the
works) with the option ``CMAKE_EXPORT_COMPILE_COMMANDS``.
For projects on Linux, there is an alternative to intercept compiler
calls with a tool called `Bear <https://github.com/rizsotto/Bear>`_.
Clang's tooling interface supports reading compilation databases; see
the :doc:`LibTooling documentation <LibTooling>`. libclang and its
python bindings also support this (since clang 3.2); see
`CXCompilationDatabase.h </doxygen/group__COMPILATIONDB.html>`_.
Format
======
A compilation database is a JSON file, which consist of an array of
"command objects", where each command object specifies one way a
translation unit is compiled in the project.
Each command object contains the translation unit's main file, the
working directory of the compile run and the actual compile command.
Example:
::
[
{ "directory": "/home/user/llvm/build",
"command": "/usr/bin/clang++ -Irelative -DSOMEDEF=\"With spaces, quotes and \\-es.\" -c -o file.o file.cc",
"file": "file.cc" },
...
]
The contracts for each field in the command object are:
- **directory:** The working directory of the compilation. All paths
specified in the **command** or **file** fields must be either
absolute or relative to this directory.
- **file:** The main translation unit source processed by this
compilation step. This is used by tools as the key into the
compilation database. There can be multiple command objects for the
same file, for example if the same source file is compiled with
different configurations.
- **command:** The compile command executed. After JSON unescaping,
this must be a valid command to rerun the exact compilation step for
the translation unit in the environment the build system uses.
Parameters use shell quoting and shell escaping of quotes, with '``"``'
and '``\``' being the only special characters. Shell expansion is not
supported.
Build System Integration
========================
The convention is to name the file compile\_commands.json and put it at
the top of the build directory. Clang tools are pointed to the top of
the build directory to detect the file and use the compilation database
to parse C++ code in the source tree.

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,32 @@
================
LeakSanitizer
================
.. contents::
:local:
Introduction
============
LeakSanitizer is a run-time memory leak detector. It can be combined with
:doc:`AddressSanitizer` to get both memory error and leak detection.
LeakSanitizer does not introduce any additional slowdown when used in this mode.
The LeakSanitizer runtime can also be linked in separately to get leak detection
only, at a minimal performance cost.
Current status
==============
LeakSanitizer is experimental and supported only on x86\_64 Linux.
The combined mode has been tested on fairly large software projects. The
stand-alone mode has received much less testing.
There are plans to support LeakSanitizer in :doc:`MemorySanitizer` builds.
More Information
================
`https://code.google.com/p/address-sanitizer/wiki/LeakSanitizer
<https://code.google.com/p/address-sanitizer/wiki/LeakSanitizer>`_

View File

@ -0,0 +1,134 @@
======================
Matching the Clang AST
======================
This document explains how to use Clang's LibASTMatchers to match interesting
nodes of the AST and execute code that uses the matched nodes. Combined with
:doc:`LibTooling`, LibASTMatchers helps to write code-to-code transformation
tools or query tools.
We assume basic knowledge about the Clang AST. See the :doc:`Introduction
to the Clang AST <IntroductionToTheClangAST>` if you want to learn more
about how the AST is structured.
.. FIXME: create tutorial and link to the tutorial
Introduction
------------
LibASTMatchers provides a domain specific language to create predicates on
Clang's AST. This DSL is written in and can be used from C++, allowing users
to write a single program to both match AST nodes and access the node's C++
interface to extract attributes, source locations, or any other information
provided on the AST level.
AST matchers are predicates on nodes in the AST. Matchers are created by
calling creator functions that allow building up a tree of matchers, where
inner matchers are used to make the match more specific.
For example, to create a matcher that matches all class or union declarations
in the AST of a translation unit, you can call `recordDecl()
<LibASTMatchersReference.html#recordDecl0Anchor>`_. To narrow the match down,
for example to find all class or union declarations with the name "``Foo``",
insert a `hasName <LibASTMatchersReference.html#hasName0Anchor>`_ matcher: the
call ``recordDecl(hasName("Foo"))`` returns a matcher that matches classes or
unions that are named "``Foo``", in any namespace. By default, matchers that
accept multiple inner matchers use an implicit `allOf()
<LibASTMatchersReference.html#allOf0Anchor>`_. This allows further narrowing
down the match, for example to match all classes that are derived from
"``Bar``": ``recordDecl(hasName("Foo"), isDerivedFrom("Bar"))``.
How to create a matcher
-----------------------
With more than a thousand classes in the Clang AST, one can quickly get lost
when trying to figure out how to create a matcher for a specific pattern. This
section will teach you how to use a rigorous step-by-step pattern to build the
matcher you are interested in. Note that there will always be matchers missing
for some part of the AST. See the section about :ref:`how to write your own
AST matchers <astmatchers-writing>` later in this document.
.. FIXME: why is it linking back to the same section?!
The precondition to using the matchers is to understand how the AST for what you
want to match looks like. The
:doc:`Introduction to the Clang AST <IntroductionToTheClangAST>` teaches you
how to dump a translation unit's AST into a human readable format.
.. FIXME: Introduce link to ASTMatchersTutorial.html
.. FIXME: Introduce link to ASTMatchersCookbook.html
In general, the strategy to create the right matchers is:
#. Find the outermost class in Clang's AST you want to match.
#. Look at the `AST Matcher Reference <LibASTMatchersReference.html>`_ for
matchers that either match the node you're interested in or narrow down
attributes on the node.
#. Create your outer match expression. Verify that it works as expected.
#. Examine the matchers for what the next inner node you want to match is.
#. Repeat until the matcher is finished.
.. _astmatchers-bind:
Binding nodes in match expressions
----------------------------------
Matcher expressions allow you to specify which parts of the AST are interesting
for a certain task. Often you will want to then do something with the nodes
that were matched, like building source code transformations.
To that end, matchers that match specific AST nodes (so called node matchers)
are bindable; for example, ``recordDecl(hasName("MyClass")).bind("id")`` will
bind the matched ``recordDecl`` node to the string "``id``", to be later
retrieved in the `match callback
<http://clang.llvm.org/doxygen/classclang_1_1ast__matchers_1_1MatchFinder_1_1MatchCallback.html>`_.
.. FIXME: Introduce link to ASTMatchersTutorial.html
.. FIXME: Introduce link to ASTMatchersCookbook.html
Writing your own matchers
-------------------------
There are multiple different ways to define a matcher, depending on its type
and flexibility.
``VariadicDynCastAllOfMatcher<Base, Derived>``
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Those match all nodes of type *Base* if they can be dynamically casted to
*Derived*. The names of those matchers are nouns, which closely resemble
*Derived*. ``VariadicDynCastAllOfMatchers`` are the backbone of the matcher
hierarchy. Most often, your match expression will start with one of them, and
you can :ref:`bind <astmatchers-bind>` the node they represent to ids for later
processing.
``VariadicDynCastAllOfMatchers`` are callable classes that model variadic
template functions in C++03. They take an aribtrary number of
``Matcher<Derived>`` and return a ``Matcher<Base>``.
``AST_MATCHER_P(Type, Name, ParamType, Param)``
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Most matcher definitions use the matcher creation macros. Those define both
the matcher of type ``Matcher<Type>`` itself, and a matcher-creation function
named *Name* that takes a parameter of type *ParamType* and returns the
corresponding matcher.
There are multiple matcher definition macros that deal with polymorphic return
values and different parameter counts. See `ASTMatchersMacros.h
<http://clang.llvm.org/doxygen/ASTMatchersMacros_8h.html>`_.
.. _astmatchers-writing:
Matcher creation functions
^^^^^^^^^^^^^^^^^^^^^^^^^^
Matchers are generated by nesting calls to matcher creation functions. Most of
the time those functions are either created by using
``VariadicDynCastAllOfMatcher`` or the matcher creation macros (see below).
The free-standing functions are an indication that this matcher is just a
combination of other matchers, as is for example the case with `callee
<LibASTMatchersReference.html#callee1Anchor>`_.
.. FIXME: "... macros (see below)" --- there isn't anything below

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,556 @@
===============================================================
Tutorial for building tools using LibTooling and LibASTMatchers
===============================================================
This document is intended to show how to build a useful source-to-source
translation tool based on Clang's `LibTooling <LibTooling.html>`_. It is
explicitly aimed at people who are new to Clang, so all you should need
is a working knowledge of C++ and the command line.
In order to work on the compiler, you need some basic knowledge of the
abstract syntax tree (AST). To this end, the reader is incouraged to
skim the :doc:`Introduction to the Clang
AST <IntroductionToTheClangAST>`
Step 0: Obtaining Clang
=======================
As Clang is part of the LLVM project, you'll need to download LLVM's
source code first. Both Clang and LLVM are maintained as Subversion
repositories, but we'll be accessing them through the git mirror. For
further information, see the `getting started
guide <http://llvm.org/docs/GettingStarted.html>`_.
.. code-block:: console
mkdir ~/clang-llvm && cd ~/clang-llvm
git clone http://llvm.org/git/llvm.git
cd llvm/tools
git clone http://llvm.org/git/clang.git
cd clang/tools
git clone http://llvm.org/git/clang-tools-extra.git extra
Next you need to obtain the CMake build system and Ninja build tool. You
may already have CMake installed, but current binary versions of CMake
aren't built with Ninja support.
.. code-block:: console
cd ~/clang-llvm
git clone https://github.com/martine/ninja.git
cd ninja
git checkout release
./bootstrap.py
sudo cp ninja /usr/bin/
cd ~/clang-llvm
git clone git://cmake.org/stage/cmake.git
cd cmake
git checkout next
./bootstrap
make
sudo make install
Okay. Now we'll build Clang!
.. code-block:: console
cd ~/clang-llvm
mkdir build && cd build
cmake -G Ninja ../llvm -DLLVM_BUILD_TESTS=ON # Enable tests; default is off.
ninja
ninja check # Test LLVM only.
ninja clang-test # Test Clang only.
ninja install
And we're live.
All of the tests should pass, though there is a (very) small chance that
you can catch LLVM and Clang out of sync. Running ``'git svn rebase'``
in both the llvm and clang directories should fix any problems.
Finally, we want to set Clang as its own compiler.
.. code-block:: console
cd ~/clang-llvm/build
ccmake ../llvm
The second command will bring up a GUI for configuring Clang. You need
to set the entry for ``CMAKE_CXX_COMPILER``. Press ``'t'`` to turn on
advanced mode. Scroll down to ``CMAKE_CXX_COMPILER``, and set it to
``/usr/bin/clang++``, or wherever you installed it. Press ``'c'`` to
configure, then ``'g'`` to generate CMake's files.
Finally, run ninja one last time, and you're done.
Step 1: Create a ClangTool
==========================
Now that we have enough background knowledge, it's time to create the
simplest productive ClangTool in existence: a syntax checker. While this
already exists as ``clang-check``, it's important to understand what's
going on.
First, we'll need to create a new directory for our tool and tell CMake
that it exists. As this is not going to be a core clang tool, it will
live in the ``tools/extra`` repository.
.. code-block:: console
cd ~/clang-llvm/llvm/tools/clang
mkdir tools/extra/loop-convert
echo 'add_subdirectory(loop-convert)' >> tools/extra/CMakeLists.txt
vim tools/extra/loop-convert/CMakeLists.txt
CMakeLists.txt should have the following contents:
::
set(LLVM_LINK_COMPONENTS support)
set(LLVM_USED_LIBS clangTooling clangBasic clangAST)
add_clang_executable(loop-convert
LoopConvert.cpp
)
target_link_libraries(loop-convert
clangTooling
clangBasic
clangASTMatchers
)
With that done, Ninja will be able to compile our tool. Let's give it
something to compile! Put the following into
``tools/extra/loop-convert/LoopConvert.cpp``. A detailed explanation of
why the different parts are needed can be found in the `LibTooling
documentation <LibTooling.html>`_.
.. code-block:: c++
// Declares clang::SyntaxOnlyAction.
#include "clang/Frontend/FrontendActions.h"
#include "clang/Tooling/CommonOptionsParser.h"
#include "clang/Tooling/Tooling.h"
// Declares llvm::cl::extrahelp.
#include "llvm/Support/CommandLine.h"
using namespace clang::tooling;
using namespace llvm;
// CommonOptionsParser declares HelpMessage with a description of the common
// command-line options related to the compilation database and input files.
// It's nice to have this help message in all tools.
static cl::extrahelp CommonHelp(CommonOptionsParser::HelpMessage);
// A help message for this specific tool can be added afterwards.
static cl::extrahelp MoreHelp("\nMore help text...");
int main(int argc, const char **argv) {
CommonOptionsParser OptionsParser(argc, argv);
ClangTool Tool(OptionsParser.getCompilations(),
OptionsParser.getSourcePathList());
return Tool.run(newFrontendActionFactory<clang::SyntaxOnlyAction>());
}
And that's it! You can compile our new tool by running ninja from the
``build`` directory.
.. code-block:: console
cd ~/clang-llvm/build
ninja
You should now be able to run the syntax checker, which is located in
``~/clang-llvm/build/bin``, on any source file. Try it!
.. code-block:: console
cat "int main() { return 0; }" > test.cpp
bin/loop-convert test.cpp --
Note the two dashes after we specify the source file. The additional
options for the compiler are passed after the dashes rather than loading
them from a compilation database - there just aren't any options needed
right now.
Intermezzo: Learn AST matcher basics
====================================
Clang recently introduced the :doc:`ASTMatcher
library <LibASTMatchers>` to provide a simple, powerful, and
concise way to describe specific patterns in the AST. Implemented as a
DSL powered by macros and templates (see
`ASTMatchers.h <../doxygen/ASTMatchers_8h_source.html>`_ if you're
curious), matchers offer the feel of algebraic data types common to
functional programming languages.
For example, suppose you wanted to examine only binary operators. There
is a matcher to do exactly that, conveniently named ``binaryOperator``.
I'll give you one guess what this matcher does:
.. code-block:: c++
binaryOperator(hasOperatorName("+"), hasLHS(integerLiteral(equals(0))))
Shockingly, it will match against addition expressions whose left hand
side is exactly the literal 0. It will not match against other forms of
0, such as ``'\0'`` or ``NULL``, but it will match against macros that
expand to 0. The matcher will also not match against calls to the
overloaded operator ``'+'``, as there is a separate ``operatorCallExpr``
matcher to handle overloaded operators.
There are AST matchers to match all the different nodes of the AST,
narrowing matchers to only match AST nodes fulfilling specific criteria,
and traversal matchers to get from one kind of AST node to another. For
a complete list of AST matchers, take a look at the `AST Matcher
References <LibASTMatchersReference.html>`_
All matcher that are nouns describe entities in the AST and can be
bound, so that they can be referred to whenever a match is found. To do
so, simply call the method ``bind`` on these matchers, e.g.:
.. code-block:: c++
variable(hasType(isInteger())).bind("intvar")
Step 2: Using AST matchers
==========================
Okay, on to using matchers for real. Let's start by defining a matcher
which will capture all ``for`` statements that define a new variable
initialized to zero. Let's start with matching all ``for`` loops:
.. code-block:: c++
forStmt()
Next, we want to specify that a single variable is declared in the first
portion of the loop, so we can extend the matcher to
.. code-block:: c++
forStmt(hasLoopInit(declStmt(hasSingleDecl(varDecl()))))
Finally, we can add the condition that the variable is initialized to
zero.
.. code-block:: c++
forStmt(hasLoopInit(declStmt(hasSingleDecl(varDecl(
hasInitializer(integerLiteral(equals(0))))))))
It is fairly easy to read and understand the matcher definition ("match
loops whose init portion declares a single variable which is initialized
to the integer literal 0"), but deciding that every piece is necessary
is more difficult. Note that this matcher will not match loops whose
variables are initialized to ``'\0'``, ``0.0``, ``NULL``, or any form of
zero besides the integer 0.
The last step is giving the matcher a name and binding the ``ForStmt``
as we will want to do something with it:
.. code-block:: c++
StatementMatcher LoopMatcher =
forStmt(hasLoopInit(declStmt(hasSingleDecl(varDecl(
hasInitializer(integerLiteral(equals(0)))))))).bind("forLoop");
Once you have defined your matchers, you will need to add a little more
scaffolding in order to run them. Matchers are paired with a
``MatchCallback`` and registered with a ``MatchFinder`` object, then run
from a ``ClangTool``. More code!
Add the following to ``LoopConvert.cpp``:
.. code-block:: c++
#include "clang/ASTMatchers/ASTMatchers.h"
#include "clang/ASTMatchers/ASTMatchFinder.h"
using namespace clang;
using namespace clang::ast_matchers;
StatementMatcher LoopMatcher =
forStmt(hasLoopInit(declStmt(hasSingleDecl(varDecl(
hasInitializer(integerLiteral(equals(0)))))))).bind("forLoop");
class LoopPrinter : public MatchFinder::MatchCallback {
public :
virtual void run(const MatchFinder::MatchResult &Result) {
if (const ForStmt *FS = Result.Nodes.getNodeAs<clang::ForStmt>("forLoop"))
FS->dump();
}
};
And change ``main()`` to:
.. code-block:: c++
int main(int argc, const char **argv) {
CommonOptionsParser OptionsParser(argc, argv);
ClangTool Tool(OptionsParser.getCompilations(),
OptionsParser.getSourcePathList());
LoopPrinter Printer;
MatchFinder Finder;
Finder.addMatcher(LoopMatcher, &Printer);
return Tool.run(newFrontendActionFactory(&Finder));
}
Now, you should be able to recompile and run the code to discover for
loops. Create a new file with a few examples, and test out our new
handiwork:
.. code-block:: console
cd ~/clang-llvm/llvm/llvm_build/
ninja loop-convert
vim ~/test-files/simple-loops.cc
bin/loop-convert ~/test-files/simple-loops.cc
Step 3.5: More Complicated Matchers
===================================
Our simple matcher is capable of discovering for loops, but we would
still need to filter out many more ourselves. We can do a good portion
of the remaining work with some cleverly chosen matchers, but first we
need to decide exactly which properties we want to allow.
How can we characterize for loops over arrays which would be eligible
for translation to range-based syntax? Range based loops over arrays of
size ``N`` that:
- start at index ``0``
- iterate consecutively
- end at index ``N-1``
We already check for (1), so all we need to add is a check to the loop's
condition to ensure that the loop's index variable is compared against
``N`` and another check to ensure that the increment step just
increments this same variable. The matcher for (2) is straightforward:
require a pre- or post-increment of the same variable declared in the
init portion.
Unfortunately, such a matcher is impossible to write. Matchers contain
no logic for comparing two arbitrary AST nodes and determining whether
or not they are equal, so the best we can do is matching more than we
would like to allow, and punting extra comparisons to the callback.
In any case, we can start building this sub-matcher. We can require that
the increment step be a unary increment like this:
.. code-block:: c++
hasIncrement(unaryOperator(hasOperatorName("++")))
Specifying what is incremented introduces another quirk of Clang's AST:
Usages of variables are represented as ``DeclRefExpr``'s ("declaration
reference expressions") because they are expressions which refer to
variable declarations. To find a ``unaryOperator`` that refers to a
specific declaration, we can simply add a second condition to it:
.. code-block:: c++
hasIncrement(unaryOperator(
hasOperatorName("++"),
hasUnaryOperand(declRefExpr())))
Furthermore, we can restrict our matcher to only match if the
incremented variable is an integer:
.. code-block:: c++
hasIncrement(unaryOperator(
hasOperatorName("++"),
hasUnaryOperand(declRefExpr(to(varDecl(hasType(isInteger())))))))
And the last step will be to attach an identifier to this variable, so
that we can retrieve it in the callback:
.. code-block:: c++
hasIncrement(unaryOperator(
hasOperatorName("++"),
hasUnaryOperand(declRefExpr(to(
varDecl(hasType(isInteger())).bind("incrementVariable"))))))
We can add this code to the definition of ``LoopMatcher`` and make sure
that our program, outfitted with the new matcher, only prints out loops
that declare a single variable initialized to zero and have an increment
step consisting of a unary increment of some variable.
Now, we just need to add a matcher to check if the condition part of the
``for`` loop compares a variable against the size of the array. There is
only one problem - we don't know which array we're iterating over
without looking at the body of the loop! We are again restricted to
approximating the result we want with matchers, filling in the details
in the callback. So we start with:
.. code-block:: c++
hasCondition(binaryOperator(hasOperatorName("<"))
It makes sense to ensure that the left-hand side is a reference to a
variable, and that the right-hand side has integer type.
.. code-block:: c++
hasCondition(binaryOperator(
hasOperatorName("<"),
hasLHS(declRefExpr(to(varDecl(hasType(isInteger()))))),
hasRHS(expr(hasType(isInteger())))))
Why? Because it doesn't work. Of the three loops provided in
``test-files/simple.cpp``, zero of them have a matching condition. A
quick look at the AST dump of the first for loop, produced by the
previous iteration of loop-convert, shows us the answer:
::
(ForStmt 0x173b240
(DeclStmt 0x173afc8
0x173af50 "int i =
(IntegerLiteral 0x173afa8 'int' 0)")
<<>>
(BinaryOperator 0x173b060 '_Bool' '<'
(ImplicitCastExpr 0x173b030 'int'
(DeclRefExpr 0x173afe0 'int' lvalue Var 0x173af50 'i' 'int'))
(ImplicitCastExpr 0x173b048 'int'
(DeclRefExpr 0x173b008 'const int' lvalue Var 0x170fa80 'N' 'const int')))
(UnaryOperator 0x173b0b0 'int' lvalue prefix '++'
(DeclRefExpr 0x173b088 'int' lvalue Var 0x173af50 'i' 'int'))
(CompoundStatement ...
We already know that the declaration and increments both match, or this
loop wouldn't have been dumped. The culprit lies in the implicit cast
applied to the first operand (i.e. the LHS) of the less-than operator,
an L-value to R-value conversion applied to the expression referencing
``i``. Thankfully, the matcher library offers a solution to this problem
in the form of ``ignoringParenImpCasts``, which instructs the matcher to
ignore implicit casts and parentheses before continuing to match.
Adjusting the condition operator will restore the desired match.
.. code-block:: c++
hasCondition(binaryOperator(
hasOperatorName("<"),
hasLHS(ignoringParenImpCasts(declRefExpr(
to(varDecl(hasType(isInteger())))))),
hasRHS(expr(hasType(isInteger())))))
After adding binds to the expressions we wished to capture and
extracting the identifier strings into variables, we have array-step-2
completed.
Step 4: Retrieving Matched Nodes
================================
So far, the matcher callback isn't very interesting: it just dumps the
loop's AST. At some point, we will need to make changes to the input
source code. Next, we'll work on using the nodes we bound in the
previous step.
The ``MatchFinder::run()`` callback takes a
``MatchFinder::MatchResult&`` as its parameter. We're most interested in
its ``Context`` and ``Nodes`` members. Clang uses the ``ASTContext``
class to represent contextual information about the AST, as the name
implies, though the most functionally important detail is that several
operations require an ``ASTContext*`` parameter. More immediately useful
is the set of matched nodes, and how we retrieve them.
Since we bind three variables (identified by ConditionVarName,
InitVarName, and IncrementVarName), we can obtain the matched nodes by
using the ``getNodeAs()`` member function.
In ``LoopConvert.cpp`` add
.. code-block:: c++
#include "clang/AST/ASTContext.h"
Change ``LoopMatcher`` to
.. code-block:: c++
StatementMatcher LoopMatcher =
forStmt(hasLoopInit(declStmt(
hasSingleDecl(varDecl(hasInitializer(integerLiteral(equals(0))))
.bind("initVarName")))),
hasIncrement(unaryOperator(
hasOperatorName("++"),
hasUnaryOperand(declRefExpr(
to(varDecl(hasType(isInteger())).bind("incVarName")))))),
hasCondition(binaryOperator(
hasOperatorName("<"),
hasLHS(ignoringParenImpCasts(declRefExpr(
to(varDecl(hasType(isInteger())).bind("condVarName"))))),
hasRHS(expr(hasType(isInteger())))))).bind("forLoop");
And change ``LoopPrinter::run`` to
.. code-block:: c++
void LoopPrinter::run(const MatchFinder::MatchResult &Result) {
ASTContext *Context = Result.Context;
const ForStmt *FS = Result.Nodes.getStmtAs<ForStmt>("forLoop");
// We do not want to convert header files!
if (!FS || !Context->getSourceManager().isFromMainFile(FS->getForLoc()))
return;
const VarDecl *IncVar = Result.Nodes.getNodeAs<VarDecl>("incVarName");
const VarDecl *CondVar = Result.Nodes.getNodeAs<VarDecl>("condVarName");
const VarDecl *InitVar = Result.Nodes.getNodeAs<VarDecl>("initVarName");
if (!areSameVariable(IncVar, CondVar) || !areSameVariable(IncVar, InitVar))
return;
llvm::outs() << "Potential array-based loop discovered.\n";
}
Clang associates a ``VarDecl`` with each variable to represent the variable's
declaration. Since the "canonical" form of each declaration is unique by
address, all we need to do is make sure neither ``ValueDecl`` (base class of
``VarDecl``) is ``NULL`` and compare the canonical Decls.
.. code-block:: c++
static bool areSameVariable(const ValueDecl *First, const ValueDecl *Second) {
return First && Second &&
First->getCanonicalDecl() == Second->getCanonicalDecl();
}
If execution reaches the end of ``LoopPrinter::run()``, we know that the
loop shell that looks like
.. code-block:: c++
for (int i= 0; i < expr(); ++i) { ... }
For now, we will just print a message explaining that we found a loop.
The next section will deal with recursively traversing the AST to
discover all changes needed.
As a side note, it's not as trivial to test if two expressions are the same,
though Clang has already done the hard work for us by providing a way to
canonicalize expressions:
.. code-block:: c++
static bool areSameExpr(ASTContext *Context, const Expr *First,
const Expr *Second) {
if (!First || !Second)
return false;
llvm::FoldingSetNodeID FirstID, SecondID;
First->Profile(FirstID, *Context, true);
Second->Profile(SecondID, *Context, true);
return FirstID == SecondID;
}
This code relies on the comparison between two
``llvm::FoldingSetNodeIDs``. As the documentation for
``Stmt::Profile()`` indicates, the ``Profile()`` member function builds
a description of a node in the AST, based on its properties, along with
those of its children. ``FoldingSetNodeID`` then serves as a hash we can
use to compare expressions. We will need ``areSameExpr`` later. Before
you run the new code on the additional loops added to
test-files/simple.cpp, try to figure out which ones will be considered
potentially convertible.

View File

@ -0,0 +1,56 @@
=========
LibFormat
=========
LibFormat is a library that implements automatic source code formatting based
on Clang. This documents describes the LibFormat interface and design as well
as some basic style discussions.
If you just want to use `clang-format` as a tool or integrated into an editor,
checkout :doc:`ClangFormat`.
Design
------
FIXME: Write up design.
Interface
---------
The core routine of LibFormat is ``reformat()``:
.. code-block:: c++
tooling::Replacements reformat(const FormatStyle &Style, Lexer &Lex,
SourceManager &SourceMgr,
std::vector<CharSourceRange> Ranges);
This reads a token stream out of the lexer ``Lex`` and reformats all the code
ranges in ``Ranges``. The ``FormatStyle`` controls basic decisions made during
formatting. A list of options can be found under :ref:`style-options`.
.. _style-options:
Style Options
-------------
The style options describe specific formatting options that can be used in
order to make `ClangFormat` comply with different style guides. Currently,
two style guides are hard-coded:
.. code-block:: c++
/// \brief Returns a format style complying with the LLVM coding standards:
/// http://llvm.org/docs/CodingStandards.html.
FormatStyle getLLVMStyle();
/// \brief Returns a format style complying with Google's C++ style guide:
/// http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml.
FormatStyle getGoogleStyle();
These options are also exposed in the :doc:`standalone tools <ClangFormat>`
through the `-style` option.
In the future, we plan on making this configurable.

View File

@ -0,0 +1,192 @@
==========
LibTooling
==========
LibTooling is a library to support writing standalone tools based on Clang.
This document will provide a basic walkthrough of how to write a tool using
LibTooling.
For the information on how to setup Clang Tooling for LLVM see
:doc:`HowToSetupToolingForLLVM`
Introduction
------------
Tools built with LibTooling, like Clang Plugins, run ``FrontendActions`` over
code.
.. See FIXME for a tutorial on how to write FrontendActions.
In this tutorial, we'll demonstrate the different ways of running Clang's
``SyntaxOnlyAction``, which runs a quick syntax check, over a bunch of code.
Parsing a code snippet in memory
--------------------------------
If you ever wanted to run a ``FrontendAction`` over some sample code, for
example to unit test parts of the Clang AST, ``runToolOnCode`` is what you
looked for. Let me give you an example:
.. code-block:: c++
#include "clang/Tooling/Tooling.h"
TEST(runToolOnCode, CanSyntaxCheckCode) {
// runToolOnCode returns whether the action was correctly run over the
// given code.
EXPECT_TRUE(runToolOnCode(new clang::SyntaxOnlyAction, "class X {};"));
}
Writing a standalone tool
-------------------------
Once you unit tested your ``FrontendAction`` to the point where it cannot
possibly break, it's time to create a standalone tool. For a standalone tool
to run clang, it first needs to figure out what command line arguments to use
for a specified file. To that end we create a ``CompilationDatabase``. There
are different ways to create a compilation database, and we need to support all
of them depending on command-line options. There's the ``CommonOptionsParser``
class that takes the responsibility to parse command-line parameters related to
compilation databases and inputs, so that all tools share the implementation.
Parsing common tools options
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
``CompilationDatabase`` can be read from a build directory or the command line.
Using ``CommonOptionsParser`` allows for explicit specification of a compile
command line, specification of build path using the ``-p`` command-line option,
and automatic location of the compilation database using source files paths.
.. code-block:: c++
#include "clang/Tooling/CommonOptionsParser.h"
using namespace clang::tooling;
int main(int argc, const char **argv) {
// CommonOptionsParser constructor will parse arguments and create a
// CompilationDatabase. In case of error it will terminate the program.
CommonOptionsParser OptionsParser(argc, argv);
// Use OptionsParser.getCompilations() and OptionsParser.getSourcePathList()
// to retrieve CompilationDatabase and the list of input file paths.
}
Creating and running a ClangTool
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Once we have a ``CompilationDatabase``, we can create a ``ClangTool`` and run
our ``FrontendAction`` over some code. For example, to run the
``SyntaxOnlyAction`` over the files "a.cc" and "b.cc" one would write:
.. code-block:: c++
// A clang tool can run over a number of sources in the same process...
std::vector<std::string> Sources;
Sources.push_back("a.cc");
Sources.push_back("b.cc");
// We hand the CompilationDatabase we created and the sources to run over into
// the tool constructor.
ClangTool Tool(OptionsParser.getCompilations(), Sources);
// The ClangTool needs a new FrontendAction for each translation unit we run
// on. Thus, it takes a FrontendActionFactory as parameter. To create a
// FrontendActionFactory from a given FrontendAction type, we call
// newFrontendActionFactory<clang::SyntaxOnlyAction>().
int result = Tool.run(newFrontendActionFactory<clang::SyntaxOnlyAction>());
Putting it together --- the first tool
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Now we combine the two previous steps into our first real tool. This example
tool is also checked into the clang tree at
``tools/clang-check/ClangCheck.cpp``.
.. code-block:: c++
// Declares clang::SyntaxOnlyAction.
#include "clang/Frontend/FrontendActions.h"
#include "clang/Tooling/CommonOptionsParser.h"
#include "clang/Tooling/Tooling.h"
// Declares llvm::cl::extrahelp.
#include "llvm/Support/CommandLine.h"
using namespace clang::tooling;
using namespace llvm;
// CommonOptionsParser declares HelpMessage with a description of the common
// command-line options related to the compilation database and input files.
// It's nice to have this help message in all tools.
static cl::extrahelp CommonHelp(CommonOptionsParser::HelpMessage);
// A help message for this specific tool can be added afterwards.
static cl::extrahelp MoreHelp("\nMore help text...");
int main(int argc, const char **argv) {
CommonOptionsParser OptionsParser(argc, argv);
ClangTool Tool(OptionsParser.getCompilations(),
OptionsParser.getSourcePathList());
return Tool.run(newFrontendActionFactory<clang::SyntaxOnlyAction>());
}
Running the tool on some code
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
When you check out and build clang, clang-check is already built and available
to you in bin/clang-check inside your build directory.
You can run clang-check on a file in the llvm repository by specifying all the
needed parameters after a "``--``" separator:
.. code-block:: bash
$ cd /path/to/source/llvm
$ export BD=/path/to/build/llvm
$ $BD/bin/clang-check tools/clang/tools/clang-check/ClangCheck.cpp -- \
clang++ -D__STDC_CONSTANT_MACROS -D__STDC_LIMIT_MACROS \
-Itools/clang/include -I$BD/include -Iinclude \
-Itools/clang/lib/Headers -c
As an alternative, you can also configure cmake to output a compile command
database into its build directory:
.. code-block:: bash
# Alternatively to calling cmake, use ccmake, toggle to advanced mode and
# set the parameter CMAKE_EXPORT_COMPILE_COMMANDS from the UI.
$ cmake -DCMAKE_EXPORT_COMPILE_COMMANDS=ON .
This creates a file called ``compile_commands.json`` in the build directory.
Now you can run :program:`clang-check` over files in the project by specifying
the build path as first argument and some source files as further positional
arguments:
.. code-block:: bash
$ cd /path/to/source/llvm
$ export BD=/path/to/build/llvm
$ $BD/bin/clang-check -p $BD tools/clang/tools/clang-check/ClangCheck.cpp
.. _libtooling_builtin_includes:
Builtin includes
^^^^^^^^^^^^^^^^
Clang tools need their builtin headers and search for them the same way Clang
does. Thus, the default location to look for builtin headers is in a path
``$(dirname /path/to/tool)/../lib/clang/3.4/include`` relative to the tool
binary. This works out-of-the-box for tools running from llvm's toplevel
binary directory after building clang-headers, or if the tool is running from
the binary directory of a clang install next to the clang binary.
Tips: if your tool fails to find ``stddef.h`` or similar headers, call the tool
with ``-v`` and look at the search paths it looks through.
Linking
^^^^^^^
For a list of libraries to link, look at one of the tools' Makefiles (for
example `clang-check/Makefile
<http://llvm.org/viewvc/llvm-project/cfe/trunk/tools/clang-check/Makefile?view=markup>`_).

102
tools/clang/docs/Makefile Normal file
View File

@ -0,0 +1,102 @@
##===- docs/Makefile ---------------------------------------*- Makefile -*-===##
#
# The LLVM Compiler Infrastructure
#
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
#
##===----------------------------------------------------------------------===##
CLANG_LEVEL := ..
DIRS := tools
ifdef BUILD_FOR_WEBSITE
PROJ_OBJ_DIR = .
DOXYGEN = doxygen
$(PROJ_OBJ_DIR)/doxygen.cfg: doxygen.cfg.in
cat $< | sed \
-e 's/@abs_srcdir@/./g' \
-e 's/@DOT@/dot/g' \
-e 's/@PACKAGE_VERSION@/mainline/' \
-e 's/@abs_builddir@/./g' \
-e 's/@enable_searchengine@/NO/g' \
-e 's/@searchengine_url@//g' \
-e 's/@enable_server_based_search@/NO/g' \
-e 's/@enable_external_search@/NO/g' \
-e 's/@extra_search_mappings@//g' > $@
endif
include $(CLANG_LEVEL)/Makefile
HTML := $(wildcard $(PROJ_SRC_DIR)/*.html) \
$(wildcard $(PROJ_SRC_DIR)/*.css)
#IMAGES := $(wildcard $(PROJ_SRC_DIR)/img/*.*)
DOXYFILES := doxygen.cfg.in doxygen.css doxygen.footer doxygen.header \
doxygen.intro
EXTRA_DIST := $(HTML) $(DOXYFILES) llvm.css CommandGuide img
.PHONY: install-html install-doxygen doxygen generated
install_targets :=
ifndef ONLY_MAN_DOCS
install_targets += install-html
endif
ifeq ($(ENABLE_DOXYGEN),1)
install_targets += install-doxygen
endif
install-local:: $(install_targets)
# Live documentation is generated for the web site using this target:
# 'make generated BUILD_FOR_WEBSITE=1'
generated:: doxygen
install-html: $(PROJ_OBJ_DIR)/html.tar.gz
$(Echo) Installing HTML documentation
$(Verb) $(MKDIR) $(DESTDIR)$(PROJ_docsdir)/html
$(Verb) $(MKDIR) $(DESTDIR)$(PROJ_docsdir)/html/img
$(Verb) $(DataInstall) $(HTML) $(DESTDIR)$(PROJ_docsdir)/html
# $(Verb) $(DataInstall) $(IMAGES) $(DESTDIR)$(PROJ_docsdir)/html/img
$(Verb) $(DataInstall) $(PROJ_OBJ_DIR)/html.tar.gz $(DESTDIR)$(PROJ_docsdir)
$(PROJ_OBJ_DIR)/html.tar.gz: $(HTML)
$(Echo) Packaging HTML documentation
$(Verb) $(RM) -rf $@ $(PROJ_OBJ_DIR)/html.tar
$(Verb) cd $(PROJ_SRC_DIR) && \
$(TAR) cf $(PROJ_OBJ_DIR)/html.tar *.html
$(Verb) $(GZIPBIN) $(PROJ_OBJ_DIR)/html.tar
install-doxygen: doxygen
$(Echo) Installing doxygen documentation
$(Verb) $(MKDIR) $(DESTDIR)$(PROJ_docsdir)/html/doxygen
$(Verb) $(DataInstall) $(PROJ_OBJ_DIR)/doxygen.tar.gz $(DESTDIR)$(PROJ_docsdir)
$(Verb) cd $(PROJ_OBJ_DIR)/doxygen && \
$(FIND) . -type f -exec \
$(DataInstall) {} $(DESTDIR)$(PROJ_docsdir)/html/doxygen \;
doxygen: regendoc $(PROJ_OBJ_DIR)/doxygen.tar.gz
regendoc:
$(Echo) Building doxygen documentation
$(Verb) $(RM) -rf $(PROJ_OBJ_DIR)/doxygen
$(Verb) $(DOXYGEN) $(PROJ_OBJ_DIR)/doxygen.cfg
$(Verb) sed -i "s/[$$]LatestRev[$$]/`svnversion $(PROJ_SRC_DIR)`/g" \
$(PROJ_OBJ_DIR)/doxygen/html/*.html
$(PROJ_OBJ_DIR)/doxygen.tar.gz: $(DOXYFILES) $(PROJ_OBJ_DIR)/doxygen.cfg
$(Echo) Packaging doxygen documentation
$(Verb) $(RM) -rf $@ $(PROJ_OBJ_DIR)/doxygen.tar
$(Verb) $(TAR) cf $(PROJ_OBJ_DIR)/doxygen.tar doxygen
$(Verb) $(GZIPBIN) $(PROJ_OBJ_DIR)/doxygen.tar
$(Verb) $(CP) $(PROJ_OBJ_DIR)/doxygen.tar.gz $(PROJ_OBJ_DIR)/doxygen/html/
userloc: $(LLVM_SRC_ROOT)/docs/userloc.html
$(LLVM_SRC_ROOT)/docs/userloc.html:
$(Echo) Making User LOC Table
$(Verb) cd $(LLVM_SRC_ROOT) ; ./utils/userloc.pl -details -recurse \
-html lib include tools runtime utils examples autoconf test > docs/userloc.html
uninstall-local::
$(Echo) Uninstalling Documentation
$(Verb) $(RM) -rf $(DESTDIR)$(PROJ_docsdir)

View File

@ -0,0 +1,163 @@
# Makefile for Sphinx documentation
#
# You can set these variables from the command line.
SPHINXOPTS =
SPHINXBUILD = sphinx-build
PAPER =
BUILDDIR = _build
# Internal variables.
PAPEROPT_a4 = -D latex_paper_size=a4
PAPEROPT_letter = -D latex_paper_size=letter
ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
# the i18n builder cannot share the environment and doctrees with the others
I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext default
default: html
help:
@echo "Please use \`make <target>' where <target> is one of"
@echo " html to make standalone HTML files"
@echo " dirhtml to make HTML files named index.html in directories"
@echo " singlehtml to make a single large HTML file"
@echo " pickle to make pickle files"
@echo " json to make JSON files"
@echo " htmlhelp to make HTML files and a HTML help project"
@echo " qthelp to make HTML files and a qthelp project"
@echo " devhelp to make HTML files and a Devhelp project"
@echo " epub to make an epub"
@echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
@echo " latexpdf to make LaTeX files and run them through pdflatex"
@echo " text to make text files"
@echo " man to make manual pages"
@echo " texinfo to make Texinfo files"
@echo " info to make Texinfo files and run them through makeinfo"
@echo " gettext to make PO message catalogs"
@echo " changes to make an overview of all changed/added/deprecated items"
@echo " linkcheck to check all external links for integrity"
@echo " doctest to run all doctests embedded in the documentation (if enabled)"
clean:
-rm -rf $(BUILDDIR)/*
html:
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
@echo
@# FIXME: Remove this `cp` once HTML->Sphinx transition is completed.
@# Kind of a hack, but HTML-formatted docs are on the way out anyway.
@echo "Copying legacy HTML-formatted docs into $(BUILDDIR)/html"
@cp -a *.html $(BUILDDIR)/html
@# FIXME: What we really need is a way to specify redirects, so that
@# we can just redirect to a reST'ified version of this document.
@# PR14714 is tracking the issue of redirects.
@cp -a Block-ABI-Apple.txt $(BUILDDIR)/html
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
dirhtml:
$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
singlehtml:
$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
@echo
@echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
pickle:
$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
@echo
@echo "Build finished; now you can process the pickle files."
json:
$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
@echo
@echo "Build finished; now you can process the JSON files."
htmlhelp:
$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
@echo
@echo "Build finished; now you can run HTML Help Workshop with the" \
".hhp project file in $(BUILDDIR)/htmlhelp."
qthelp:
$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
@echo
@echo "Build finished; now you can run "qcollectiongenerator" with the" \
".qhcp project file in $(BUILDDIR)/qthelp, like this:"
@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/Clang.qhcp"
@echo "To view the help file:"
@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/Clang.qhc"
devhelp:
$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
@echo
@echo "Build finished."
@echo "To view the help file:"
@echo "# mkdir -p $$HOME/.local/share/devhelp/Clang"
@echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/Clang"
@echo "# devhelp"
epub:
$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
@echo
@echo "Build finished. The epub file is in $(BUILDDIR)/epub."
latex:
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo
@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
@echo "Run \`make' in that directory to run these through (pdf)latex" \
"(use \`make latexpdf' here to do that automatically)."
latexpdf:
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo "Running LaTeX files through pdflatex..."
$(MAKE) -C $(BUILDDIR)/latex all-pdf
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
text:
$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
@echo
@echo "Build finished. The text files are in $(BUILDDIR)/text."
man:
$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
@echo
@echo "Build finished. The manual pages are in $(BUILDDIR)/man."
texinfo:
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
@echo
@echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
@echo "Run \`make' in that directory to run these through makeinfo" \
"(use \`make info' here to do that automatically)."
info:
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
@echo "Running Texinfo files through makeinfo..."
make -C $(BUILDDIR)/texinfo info
@echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
gettext:
$(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
@echo
@echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
changes:
$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
@echo
@echo "The overview file is in $(BUILDDIR)/changes."
linkcheck:
$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
@echo
@echo "Link check complete; look for any errors in the above output " \
"or in $(BUILDDIR)/linkcheck/output.txt."
doctest:
$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
@echo "Testing of doctests in the sources finished, look at the " \
"results in $(BUILDDIR)/doctest/output.txt."

View File

@ -0,0 +1,189 @@
================
MemorySanitizer
================
.. contents::
:local:
Introduction
============
MemorySanitizer is a detector of uninitialized reads. It consists of a
compiler instrumentation module and a run-time library.
Typical slowdown introduced by MemorySanitizer is **3x**.
How to build
============
Follow the `clang build instructions <../get_started.html>`_. CMake
build is supported.
Usage
=====
Simply compile and link your program with ``-fsanitize=memory`` flag.
The MemorySanitizer run-time library should be linked to the final
executable, so make sure to use ``clang`` (not ``ld``) for the final
link step. When linking shared libraries, the MemorySanitizer run-time
is not linked, so ``-Wl,-z,defs`` may cause link errors (don't use it
with MemorySanitizer). To get a reasonable performance add ``-O1`` or
higher. To get meaninful stack traces in error messages add
``-fno-omit-frame-pointer``. To get perfect stack traces you may need
to disable inlining (just use ``-O1``) and tail call elimination
(``-fno-optimize-sibling-calls``).
.. code-block:: console
% cat umr.cc
#include <stdio.h>
int main(int argc, char** argv) {
int* a = new int[10];
a[5] = 0;
if (a[argc])
printf("xx\n");
return 0;
}
% clang -fsanitize=memory -fno-omit-frame-pointer -g -O2 umr.cc
If a bug is detected, the program will print an error message to
stderr and exit with a non-zero exit code. Currently, MemorySanitizer
does not symbolize its output by default, so you may need to use a
separate script to symbolize the result offline (this will be fixed in
future).
.. code-block:: console
% ./a.out 2>log
% projects/compiler-rt/lib/asan/scripts/asan_symbolize.py / < log | c++filt
==30106== WARNING: MemorySanitizer: UMR (uninitialized-memory-read)
#0 0x7f45944b418a in main umr.cc:6
#1 0x7f45938b676c in __libc_start_main libc-start.c:226
Exiting
By default, MemorySanitizer exits on the first detected error.
``__has_feature(memory_sanitizer)``
------------------------------------
In some cases one may need to execute different code depending on
whether MemorySanitizer is enabled. :ref:`\_\_has\_feature
<langext-__has_feature-__has_extension>` can be used for this purpose.
.. code-block:: c
#if defined(__has_feature)
# if __has_feature(memory_sanitizer)
// code that builds only under MemorySanitizer
# endif
#endif
``__attribute__((no_sanitize_memory))``
-----------------------------------------------
Some code should not be checked by MemorySanitizer.
One may use the function attribute
:ref:`no_sanitize_memory <langext-memory_sanitizer>`
to disable uninitialized checks in a particular function.
MemorySanitizer may still instrument such functions to avoid false positives.
This attribute may not be
supported by other compilers, so we suggest to use it together with
``__has_feature(memory_sanitizer)``.
Blacklist
---------
MemorySanitizer supports ``src`` and ``fun`` entity types in
:doc:`SanitizerSpecialCaseList`, that can be used to relax MemorySanitizer
checks for certain source files and functions. All "Use of uninitialized value"
warnings will be suppressed and all values loaded from memory will be
considered fully initialized.
Origin Tracking
===============
MemorySanitizer can track origins of unitialized values, similar to
Valgrind's --track-origins option. This feature is enabled by
``-fsanitize-memory-track-origins`` Clang option. With the code from
the example above,
.. code-block:: console
% clang -fsanitize=memory -fsanitize-memory-track-origins -fno-omit-frame-pointer -g -O2 umr.cc
% ./a.out 2>log
% projects/compiler-rt/lib/asan/scripts/asan_symbolize.py / < log | c++filt
==14425== WARNING: MemorySanitizer: UMR (uninitialized-memory-read)
==14425== WARNING: Trying to symbolize code, but external symbolizer is not initialized!
#0 0x7f8bdda3824b in main umr.cc:6
#1 0x7f8bdce3a76c in __libc_start_main libc-start.c:226
raw origin id: 2030043137
ORIGIN: heap allocation:
#0 0x7f8bdda4034b in operator new[](unsigned long) msan_new_delete.cc:39
#1 0x7f8bdda3814d in main umr.cc:4
#2 0x7f8bdce3a76c in __libc_start_main libc-start.c:226
Exiting
Origin tracking has proved to be very useful for debugging UMR
reports. It slows down program execution by a factor of 1.5x-2x on top
of the usual MemorySanitizer slowdown.
Handling external code
============================
MemorySanitizer requires that all program code is instrumented. This
also includes any libraries that the program depends on, even libc.
Failing to achieve this may result in false UMR reports.
Full MemorySanitizer instrumentation is very difficult to achieve. To
make it easier, MemorySanitizer runtime library includes 70+
interceptors for the most common libc functions. They make it possible
to run MemorySanitizer-instrumented programs linked with
uninstrumented libc. For example, the authors were able to bootstrap
MemorySanitizer-instrumented Clang compiler by linking it with
self-built instrumented libcxx (as a replacement for libstdc++).
In the case when rebuilding all program dependencies with
MemorySanitizer is problematic, an experimental MSanDR tool can be
used. It is a DynamoRio-based tool that uses dynamic instrumentation
to avoid false positives due to uninstrumented code. The tool simply
marks memory from instrumented libraries as fully initialized. See
`http://code.google.com/p/memory-sanitizer/wiki/Running#Running_with_the_dynamic_tool`
for more information.
Supported Platforms
===================
MemorySanitizer is supported on
* Linux x86\_64 (tested on Ubuntu 10.04 and 12.04);
Limitations
===========
* MemorySanitizer uses 2x more real memory than a native run, 3x with
origin tracking.
* MemorySanitizer maps (but not reserves) 64 Terabytes of virtual
address space. This means that tools like ``ulimit`` may not work as
usually expected.
* Static linking is not supported.
* Non-position-independent executables are not supported. Therefore, the
``fsanitize=memory`` flag will cause Clang to act as though the ``-fPIE``
flag had been supplied if compiling without ``-fPIC``, and as though the
``-pie`` flag had been supplied if linking an executable.
* Depending on the version of Linux kernel, running without ASLR may
be not supported. Note that GDB disables ASLR by default. To debug
instrumented programs, use "set disable-randomization off".
Current Status
==============
MemorySanitizer is an experimental tool. It is known to work on large
real-world programs, like Clang/LLVM itself.
More Information
================
`http://code.google.com/p/memory-sanitizer <http://code.google.com/p/memory-sanitizer/>`_

View File

@ -0,0 +1,769 @@
=======
Modules
=======
.. warning::
The functionality described on this page is supported for C and
Objective-C. C++ support is experimental.
.. contents::
:local:
Introduction
============
Most software is built using a number of software libraries, including libraries supplied by the platform, internal libraries built as part of the software itself to provide structure, and third-party libraries. For each library, one needs to access both its interface (API) and its implementation. In the C family of languages, the interface to a library is accessed by including the appropriate header files(s):
.. code-block:: c
#include <SomeLib.h>
The implementation is handled separately by linking against the appropriate library. For example, by passing ``-lSomeLib`` to the linker.
Modules provide an alternative, simpler way to use software libraries that provides better compile-time scalability and eliminates many of the problems inherent to using the C preprocessor to access the API of a library.
Problems with the current model
-------------------------------
The ``#include`` mechanism provided by the C preprocessor is a very poor way to access the API of a library, for a number of reasons:
* **Compile-time scalability**: Each time a header is included, the
compiler must preprocess and parse the text in that header and every
header it includes, transitively. This process must be repeated for
every translation unit in the application, which involves a huge
amount of redundant work. In a project with *N* translation units
and *M* headers included in each translation unit, the compiler is
performing *M x N* work even though most of the *M* headers are
shared among multiple translation units. C++ is particularly bad,
because the compilation model for templates forces a huge amount of
code into headers.
* **Fragility**: ``#include`` directives are treated as textual
inclusion by the preprocessor, and are therefore subject to any
active macro definitions at the time of inclusion. If any of the
active macro definitions happens to collide with a name in the
library, it can break the library API or cause compilation failures
in the library header itself. For an extreme example,
``#define std "The C++ Standard"`` and then include a standard
library header: the result is a horrific cascade of failures in the
C++ Standard Library's implementation. More subtle real-world
problems occur when the headers for two different libraries interact
due to macro collisions, and users are forced to reorder
``#include`` directives or introduce ``#undef`` directives to break
the (unintended) dependency.
* **Conventional workarounds**: C programmers have
adopted a number of conventions to work around the fragility of the
C preprocessor model. Include guards, for example, are required for
the vast majority of headers to ensure that multiple inclusion
doesn't break the compile. Macro names are written with
``LONG_PREFIXED_UPPERCASE_IDENTIFIERS`` to avoid collisions, and some
library/framework developers even use ``__underscored`` names
in headers to avoid collisions with "normal" names that (by
convention) shouldn't even be macros. These conventions are a
barrier to entry for developers coming from non-C languages, are
boilerplate for more experienced developers, and make our headers
far uglier than they should be.
* **Tool confusion**: In a C-based language, it is hard to build tools
that work well with software libraries, because the boundaries of
the libraries are not clear. Which headers belong to a particular
library, and in what order should those headers be included to
guarantee that they compile correctly? Are the headers C, C++,
Objective-C++, or one of the variants of these languages? What
declarations in those headers are actually meant to be part of the
API, and what declarations are present only because they had to be
written as part of the header file?
Semantic import
---------------
Modules improve access to the API of software libraries by replacing the textual preprocessor inclusion model with a more robust, more efficient semantic model. From the user's perspective, the code looks only slightly different, because one uses an ``import`` declaration rather than a ``#include`` preprocessor directive:
.. code-block:: c
import std.io; // pseudo-code; see below for syntax discussion
However, this module import behaves quite differently from the corresponding ``#include <stdio.h>``: when the compiler sees the module import above, it loads a binary representation of the ``std.io`` module and makes its API available to the application directly. Preprocessor definitions that precede the import declaration have no impact on the API provided by ``std.io``, because the module itself was compiled as a separate, standalone module. Additionally, any linker flags required to use the ``std.io`` module will automatically be provided when the module is imported [#]_
This semantic import model addresses many of the problems of the preprocessor inclusion model:
* **Compile-time scalability**: The ``std.io`` module is only compiled once, and importing the module into a translation unit is a constant-time operation (independent of module system). Thus, the API of each software library is only parsed once, reducing the *M x N* compilation problem to an *M + N* problem.
* **Fragility**: Each module is parsed as a standalone entity, so it has a consistent preprocessor environment. This completely eliminates the need for ``__underscored`` names and similarly defensive tricks. Moreover, the current preprocessor definitions when an import declaration is encountered are ignored, so one software library can not affect how another software library is compiled, eliminating include-order dependencies.
* **Tool confusion**: Modules describe the API of software libraries, and tools can reason about and present a module as a representation of that API. Because modules can only be built standalone, tools can rely on the module definition to ensure that they get the complete API for the library. Moreover, modules can specify which languages they work with, so, e.g., one can not accidentally attempt to load a C++ module into a C program.
Problems modules do not solve
-----------------------------
Many programming languages have a module or package system, and because of the variety of features provided by these languages it is important to define what modules do *not* do. In particular, all of the following are considered out-of-scope for modules:
* **Rewrite the world's code**: It is not realistic to require applications or software libraries to make drastic or non-backward-compatible changes, nor is it feasible to completely eliminate headers. Modules must interoperate with existing software libraries and allow a gradual transition.
* **Versioning**: Modules have no notion of version information. Programmers must still rely on the existing versioning mechanisms of the underlying language (if any exist) to version software libraries.
* **Namespaces**: Unlike in some languages, modules do not imply any notion of namespaces. Thus, a struct declared in one module will still conflict with a struct of the same name declared in a different module, just as they would if declared in two different headers. This aspect is important for backward compatibility, because (for example) the mangled names of entities in software libraries must not change when introducing modules.
* **Binary distribution of modules**: Headers (particularly C++ headers) expose the full complexity of the language. Maintaining a stable binary module format across architectures, compiler versions, and compiler vendors is technically infeasible.
Using Modules
=============
To enable modules, pass the command-line flag ``-fmodules`` [#]_. This will make any modules-enabled software libraries available as modules as well as introducing any modules-specific syntax. Additional `command-line parameters`_ are described in a separate section later.
Objective-C Import declaration
------------------------------
Objective-C provides syntax for importing a module via an *@import declaration*, which imports the named module:
.. parsed-literal::
@import std;
The @import declaration above imports the entire contents of the ``std`` module (which would contain, e.g., the entire C or C++ standard library) and make its API available within the current translation unit. To import only part of a module, one may use dot syntax to specific a particular submodule, e.g.,
.. parsed-literal::
@import std.io;
Redundant import declarations are ignored, and one is free to import modules at any point within the translation unit, so long as the import declaration is at global scope.
At present, there is no C or C++ syntax for import declarations. Clang
will track the modules proposal in the C++ committee. See the section
`Includes as imports`_ to see how modules get imported today.
Includes as imports
-------------------
The primary user-level feature of modules is the import operation, which provides access to the API of software libraries. However, today's programs make extensive use of ``#include``, and it is unrealistic to assume that all of this code will change overnight. Instead, modules automatically translate ``#include`` directives into the corresponding module import. For example, the include directive
.. code-block:: c
#include <stdio.h>
will be automatically mapped to an import of the module ``std.io``. Even with specific ``import`` syntax in the language, this particular feature is important for both adoption and backward compatibility: automatic translation of ``#include`` to ``import`` allows an application to get the benefits of modules (for all modules-enabled libraries) without any changes to the application itself. Thus, users can easily use modules with one compiler while falling back to the preprocessor-inclusion mechanism with other compilers.
.. note::
The automatic mapping of ``#include`` to ``import`` also solves an implementation problem: importing a module with a definition of some entity (say, a ``struct Point``) and then parsing a header containing another definition of ``struct Point`` would cause a redefinition error, even if it is the same ``struct Point``. By mapping ``#include`` to ``import``, the compiler can guarantee that it always sees just the already-parsed definition from the module.
Module maps
-----------
The crucial link between modules and headers is described by a *module map*, which describes how a collection of existing headers maps on to the (logical) structure of a module. For example, one could imagine a module ``std`` covering the C standard library. Each of the C standard library headers (``<stdio.h>``, ``<stdlib.h>``, ``<math.h>``, etc.) would contribute to the ``std`` module, by placing their respective APIs into the corresponding submodule (``std.io``, ``std.lib``, ``std.math``, etc.). Having a list of the headers that are part of the ``std`` module allows the compiler to build the ``std`` module as a standalone entity, and having the mapping from header names to (sub)modules allows the automatic translation of ``#include`` directives to module imports.
Module maps are specified as separate files (each named ``module.map``) alongside the headers they describe, which allows them to be added to existing software libraries without having to change the library headers themselves (in most cases [#]_). The actual `Module map language`_ is described in a later section.
.. note::
To actually see any benefits from modules, one first has to introduce module maps for the underlying C standard library and the libraries and headers on which it depends. The section `Modularizing a Platform`_ describes the steps one must take to write these module maps.
One can use module maps without modules to check the integrity of the use of header files. To do this, use the ``-fmodule-maps`` option instead of the ``-fmodules`` option.
Compilation model
-----------------
The binary representation of modules is automatically generated by the compiler on an as-needed basis. When a module is imported (e.g., by an ``#include`` of one of the module's headers), the compiler will spawn a second instance of itself [#]_, with a fresh preprocessing context [#]_, to parse just the headers in that module. The resulting Abstract Syntax Tree (AST) is then persisted into the binary representation of the module that is then loaded into translation unit where the module import was encountered.
The binary representation of modules is persisted in the *module cache*. Imports of a module will first query the module cache and, if a binary representation of the required module is already available, will load that representation directly. Thus, a module's headers will only be parsed once per language configuration, rather than once per translation unit that uses the module.
Modules maintain references to each of the headers that were part of the module build. If any of those headers changes, or if any of the modules on which a module depends change, then the module will be (automatically) recompiled. The process should never require any user intervention.
Command-line parameters
-----------------------
``-fmodules``
Enable the modules feature (EXPERIMENTAL).
``-fcxx-modules``
Enable the modules feature for C++ (EXPERIMENTAL and VERY BROKEN).
``-fmodule-maps``
Enable interpretation of module maps (EXPERIMENTAL). This option is implied by ``-fmodules``.
``-fmodules-cache-path=<directory>``
Specify the path to the modules cache. If not provided, Clang will select a system-appropriate default.
``-fno-autolink``
Disable automatic linking against the libraries associated with imported modules.
``-fmodules-ignore-macro=macroname``
Instruct modules to ignore the named macro when selecting an appropriate module variant. Use this for macros defined on the command line that don't affect how modules are built, to improve sharing of compiled module files.
``-fmodules-prune-interval=seconds``
Specify the minimum delay (in seconds) between attempts to prune the module cache. Module cache pruning attempts to clear out old, unused module files so that the module cache itself does not grow without bound. The default delay is large (604,800 seconds, or 7 days) because this is an expensive operation. Set this value to 0 to turn off pruning.
``-fmodules-prune-after=seconds``
Specify the minimum time (in seconds) for which a file in the module cache must be unused (according to access time) before module pruning will remove it. The default delay is large (2,678,400 seconds, or 31 days) to avoid excessive module rebuilding.
``-module-file-info <module file name>``
Debugging aid that prints information about a given module file (with a ``.pcm`` extension), including the language and preprocessor options that particular module variant was built with.
``-fmodules-decluse``
Enable checking of module ``use`` declarations.
``-fmodule-name=module-id``
Consider a source file as a part of the given module.
``-fmodule-map-file=<file>``
Load the given module map file if a header from its directory or one of its subdirectories is loaded.
Module Map Language
===================
The module map language describes the mapping from header files to the
logical structure of modules. To enable support for using a library as
a module, one must write a ``module.map`` file for that library. The
``module.map`` file is placed alongside the header files themselves,
and is written in the module map language described below.
As an example, the module map file for the C standard library might look a bit like this:
.. parsed-literal::
module std [system] {
module complex {
header "complex.h"
export *
}
module ctype {
header "ctype.h"
export *
}
module errno {
header "errno.h"
header "sys/errno.h"
export *
}
module fenv {
header "fenv.h"
export *
}
// ...more headers follow...
}
Here, the top-level module ``std`` encompasses the whole C standard library. It has a number of submodules containing different parts of the standard library: ``complex`` for complex numbers, ``ctype`` for character types, etc. Each submodule lists one of more headers that provide the contents for that submodule. Finally, the ``export *`` command specifies that anything included by that submodule will be automatically re-exported.
Lexical structure
-----------------
Module map files use a simplified form of the C99 lexer, with the same rules for identifiers, tokens, string literals, ``/* */`` and ``//`` comments. The module map language has the following reserved words; all other C identifiers are valid identifiers.
.. parsed-literal::
``config_macros`` ``export`` ``module``
``conflict`` ``framework`` ``requires``
``exclude`` ``header`` ``private``
``explicit`` ``link`` ``umbrella``
``extern`` ``use``
Module map file
---------------
A module map file consists of a series of module declarations:
.. parsed-literal::
*module-map-file*:
*module-declaration**
Within a module map file, modules are referred to by a *module-id*, which uses periods to separate each part of a module's name:
.. parsed-literal::
*module-id*:
*identifier* ('.' *identifier*)*
Module declaration
------------------
A module declaration describes a module, including the headers that contribute to that module, its submodules, and other aspects of the module.
.. parsed-literal::
*module-declaration*:
``explicit``:sub:`opt` ``framework``:sub:`opt` ``module`` *module-id* *attributes*:sub:`opt` '{' *module-member** '}'
``extern`` ``module`` *module-id* *string-literal*
The *module-id* should consist of only a single *identifier*, which provides the name of the module being defined. Each module shall have a single definition.
The ``explicit`` qualifier can only be applied to a submodule, i.e., a module that is nested within another module. The contents of explicit submodules are only made available when the submodule itself was explicitly named in an import declaration or was re-exported from an imported module.
The ``framework`` qualifier specifies that this module corresponds to a Darwin-style framework. A Darwin-style framework (used primarily on Mac OS X and iOS) is contained entirely in directory ``Name.framework``, where ``Name`` is the name of the framework (and, therefore, the name of the module). That directory has the following layout:
.. parsed-literal::
Name.framework/
module.map Module map for the framework
Headers/ Subdirectory containing framework headers
Frameworks/ Subdirectory containing embedded frameworks
Resources/ Subdirectory containing additional resources
Name Symbolic link to the shared library for the framework
The ``system`` attribute specifies that the module is a system module. When a system module is rebuilt, all of the module's header will be considered system headers, which suppresses warnings. This is equivalent to placing ``#pragma GCC system_header`` in each of the module's headers. The form of attributes is described in the section Attributes_, below.
Modules can have a number of different kinds of members, each of which is described below:
.. parsed-literal::
*module-member*:
*requires-declaration*
*header-declaration*
*umbrella-dir-declaration*
*submodule-declaration*
*export-declaration*
*use-declaration*
*link-declaration*
*config-macros-declaration*
*conflict-declaration*
An extern module references a module defined by the *module-id* in a file given by the *string-literal*. The file can be referenced either by an absolute path or by a path relative to the current map file.
Requires declaration
~~~~~~~~~~~~~~~~~~~~
A *requires-declaration* specifies the requirements that an importing translation unit must satisfy to use the module.
.. parsed-literal::
*requires-declaration*:
``requires`` *feature-list*
*feature-list*:
*feature* (',' *feature*)*
*feature*:
``!``:sub:`opt` *identifier*
The requirements clause allows specific modules or submodules to specify that they are only accessible with certain language dialects or on certain platforms. The feature list is a set of identifiers, defined below. If any of the features is not available in a given translation unit, that translation unit shall not import the module. The optional ``!`` indicates that a feature is incompatible with the module.
The following features are defined:
altivec
The target supports AltiVec.
blocks
The "blocks" language feature is available.
cplusplus
C++ support is available.
cplusplus11
C++11 support is available.
objc
Objective-C support is available.
objc_arc
Objective-C Automatic Reference Counting (ARC) is available
opencl
OpenCL is available
tls
Thread local storage is available.
*target feature*
A specific target feature (e.g., ``sse4``, ``avx``, ``neon``) is available.
**Example**: The ``std`` module can be extended to also include C++ and C++11 headers using a *requires-declaration*:
.. parsed-literal::
module std {
// C standard library...
module vector {
requires cplusplus
header "vector"
}
module type_traits {
requires cplusplus11
header "type_traits"
}
}
Header declaration
~~~~~~~~~~~~~~~~~~
A header declaration specifies that a particular header is associated with the enclosing module.
.. parsed-literal::
*header-declaration*:
``umbrella``:sub:`opt` ``header`` *string-literal*
``private`` ``header`` *string-literal*
``exclude`` ``header`` *string-literal*
A header declaration that does not contain ``exclude`` specifies a header that contributes to the enclosing module. Specifically, when the module is built, the named header will be parsed and its declarations will be (logically) placed into the enclosing submodule.
A header with the ``umbrella`` specifier is called an umbrella header. An umbrella header includes all of the headers within its directory (and any subdirectories), and is typically used (in the ``#include`` world) to easily access the full API provided by a particular library. With modules, an umbrella header is a convenient shortcut that eliminates the need to write out ``header`` declarations for every library header. A given directory can only contain a single umbrella header.
.. note::
Any headers not included by the umbrella header should have
explicit ``header`` declarations. Use the
``-Wincomplete-umbrella`` warning option to ask Clang to complain
about headers not covered by the umbrella header or the module map.
A header with the ``private`` specifier may not be included from outside the module itself.
A header with the ``exclude`` specifier is excluded from the module. It will not be included when the module is built, nor will it be considered to be part of the module.
**Example**: The C header ``assert.h`` is an excellent candidate for an excluded header, because it is meant to be included multiple times (possibly with different ``NDEBUG`` settings).
.. parsed-literal::
module std [system] {
exclude header "assert.h"
}
A given header shall not be referenced by more than one *header-declaration*.
Umbrella directory declaration
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
An umbrella directory declaration specifies that all of the headers in the specified directory should be included within the module.
.. parsed-literal::
*umbrella-dir-declaration*:
``umbrella`` *string-literal*
The *string-literal* refers to a directory. When the module is built, all of the header files in that directory (and its subdirectories) are included in the module.
An *umbrella-dir-declaration* shall not refer to the same directory as the location of an umbrella *header-declaration*. In other words, only a single kind of umbrella can be specified for a given directory.
.. note::
Umbrella directories are useful for libraries that have a large number of headers but do not have an umbrella header.
Submodule declaration
~~~~~~~~~~~~~~~~~~~~~
Submodule declarations describe modules that are nested within their enclosing module.
.. parsed-literal::
*submodule-declaration*:
*module-declaration*
*inferred-submodule-declaration*
A *submodule-declaration* that is a *module-declaration* is a nested module. If the *module-declaration* has a ``framework`` specifier, the enclosing module shall have a ``framework`` specifier; the submodule's contents shall be contained within the subdirectory ``Frameworks/SubName.framework``, where ``SubName`` is the name of the submodule.
A *submodule-declaration* that is an *inferred-submodule-declaration* describes a set of submodules that correspond to any headers that are part of the module but are not explicitly described by a *header-declaration*.
.. parsed-literal::
*inferred-submodule-declaration*:
``explicit``:sub:`opt` ``framework``:sub:`opt` ``module`` '*' *attributes*:sub:`opt` '{' *inferred-submodule-member** '}'
*inferred-submodule-member*:
``export`` '*'
A module containing an *inferred-submodule-declaration* shall have either an umbrella header or an umbrella directory. The headers to which the *inferred-submodule-declaration* applies are exactly those headers included by the umbrella header (transitively) or included in the module because they reside within the umbrella directory (or its subdirectories).
For each header included by the umbrella header or in the umbrella directory that is not named by a *header-declaration*, a module declaration is implicitly generated from the *inferred-submodule-declaration*. The module will:
* Have the same name as the header (without the file extension)
* Have the ``explicit`` specifier, if the *inferred-submodule-declaration* has the ``explicit`` specifier
* Have the ``framework`` specifier, if the
*inferred-submodule-declaration* has the ``framework`` specifier
* Have the attributes specified by the \ *inferred-submodule-declaration*
* Contain a single *header-declaration* naming that header
* Contain a single *export-declaration* ``export *``, if the \ *inferred-submodule-declaration* contains the \ *inferred-submodule-member* ``export *``
**Example**: If the subdirectory "MyLib" contains the headers ``A.h`` and ``B.h``, then the following module map:
.. parsed-literal::
module MyLib {
umbrella "MyLib"
explicit module * {
export *
}
}
is equivalent to the (more verbose) module map:
.. parsed-literal::
module MyLib {
explicit module A {
header "A.h"
export *
}
explicit module B {
header "B.h"
export *
}
}
Export declaration
~~~~~~~~~~~~~~~~~~
An *export-declaration* specifies which imported modules will automatically be re-exported as part of a given module's API.
.. parsed-literal::
*export-declaration*:
``export`` *wildcard-module-id*
*wildcard-module-id*:
*identifier*
'*'
*identifier* '.' *wildcard-module-id*
The *export-declaration* names a module or a set of modules that will be re-exported to any translation unit that imports the enclosing module. Each imported module that matches the *wildcard-module-id* up to, but not including, the first ``*`` will be re-exported.
**Example**:: In the following example, importing ``MyLib.Derived`` also provides the API for ``MyLib.Base``:
.. parsed-literal::
module MyLib {
module Base {
header "Base.h"
}
module Derived {
header "Derived.h"
export Base
}
}
Note that, if ``Derived.h`` includes ``Base.h``, one can simply use a wildcard export to re-export everything ``Derived.h`` includes:
.. parsed-literal::
module MyLib {
module Base {
header "Base.h"
}
module Derived {
header "Derived.h"
export *
}
}
.. note::
The wildcard export syntax ``export *`` re-exports all of the
modules that were imported in the actual header file. Because
``#include`` directives are automatically mapped to module imports,
``export *`` provides the same transitive-inclusion behavior
provided by the C preprocessor, e.g., importing a given module
implicitly imports all of the modules on which it depends.
Therefore, liberal use of ``export *`` provides excellent backward
compatibility for programs that rely on transitive inclusion (i.e.,
all of them).
Use declaration
~~~~~~~~~~~~~~~
A *use-declaration* specifies one of the other modules that the module is allowed to use. An import or include not matching one of these is rejected when the option *-fmodules-decluse*.
.. parsed-literal::
*use-declaration*:
``use`` *module-id*
**Example**:: In the following example, use of A from C is not declared, so will trigger a warning.
.. parsed-literal::
module A {
header "a.h"
}
module B {
header "b.h"
}
module C {
header "c.h"
use B
}
When compiling a source file that implements a module, use the option ``-fmodule-name=``module-id to indicate that the source file is logically part of that module.
The compiler at present only applies restrictions to the module directly being built.
Link declaration
~~~~~~~~~~~~~~~~
A *link-declaration* specifies a library or framework against which a program should be linked if the enclosing module is imported in any translation unit in that program.
.. parsed-literal::
*link-declaration*:
``link`` ``framework``:sub:`opt` *string-literal*
The *string-literal* specifies the name of the library or framework against which the program should be linked. For example, specifying "clangBasic" would instruct the linker to link with ``-lclangBasic`` for a Unix-style linker.
A *link-declaration* with the ``framework`` specifies that the linker should link against the named framework, e.g., with ``-framework MyFramework``.
.. note::
Automatic linking with the ``link`` directive is not yet widely
implemented, because it requires support from both the object file
format and the linker. The notion is similar to Microsoft Visual
Studio's ``#pragma comment(lib...)``.
Configuration macros declaration
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The *config-macros-declaration* specifies the set of configuration macros that have an effect on the the API of the enclosing module.
.. parsed-literal::
*config-macros-declaration*:
``config_macros`` *attributes*:sub:`opt` *config-macro-list*:sub:`opt`
*config-macro-list*:
*identifier* (',' *identifier*)*
Each *identifier* in the *config-macro-list* specifies the name of a macro. The compiler is required to maintain different variants of the given module for differing definitions of any of the named macros.
A *config-macros-declaration* shall only be present on a top-level module, i.e., a module that is not nested within an enclosing module.
The ``exhaustive`` attribute specifies that the list of macros in the *config-macros-declaration* is exhaustive, meaning that no other macro definition is intended to have an effect on the API of that module.
.. note::
The ``exhaustive`` attribute implies that any macro definitions
for macros not listed as configuration macros should be ignored
completely when building the module. As an optimization, the
compiler could reduce the number of unique module variants by not
considering these non-configuration macros. This optimization is not
yet implemented in Clang.
A translation unit shall not import the same module under different definitions of the configuration macros.
.. note::
Clang implements a weak form of this requirement: the definitions
used for configuration macros are fixed based on the definitions
provided by the command line. If an import occurs and the definition
of any configuration macro has changed, the compiler will produce a
warning (under the control of ``-Wconfig-macros``).
**Example:** A logging library might provide different API (e.g., in the form of different definitions for a logging macro) based on the ``NDEBUG`` macro setting:
.. parsed-literal::
module MyLogger {
umbrella header "MyLogger.h"
config_macros [exhaustive] NDEBUG
}
Conflict declarations
~~~~~~~~~~~~~~~~~~~~~
A *conflict-declaration* describes a case where the presence of two different modules in the same translation unit is likely to cause a problem. For example, two modules may provide similar-but-incompatible functionality.
.. parsed-literal::
*conflict-declaration*:
``conflict`` *module-id* ',' *string-literal*
The *module-id* of the *conflict-declaration* specifies the module with which the enclosing module conflicts. The specified module shall not have been imported in the translation unit when the enclosing module is imported.
The *string-literal* provides a message to be provided as part of the compiler diagnostic when two modules conflict.
.. note::
Clang emits a warning (under the control of ``-Wmodule-conflict``)
when a module conflict is discovered.
**Example:**
.. parsed-literal::
module Conflicts {
explicit module A {
header "conflict_a.h"
conflict B, "we just don't like B"
}
module B {
header "conflict_b.h"
}
}
Attributes
----------
Attributes are used in a number of places in the grammar to describe specific behavior of other declarations. The format of attributes is fairly simple.
.. parsed-literal::
*attributes*:
*attribute* *attributes*:sub:`opt`
*attribute*:
'[' *identifier* ']'
Any *identifier* can be used as an attribute, and each declaration specifies what attributes can be applied to it.
Modularizing a Platform
=======================
To get any benefit out of modules, one needs to introduce module maps for software libraries starting at the bottom of the stack. This typically means introducing a module map covering the operating system's headers and the C standard library headers (in ``/usr/include``, for a Unix system).
The module maps will be written using the `module map language`_, which provides the tools necessary to describe the mapping between headers and modules. Because the set of headers differs from one system to the next, the module map will likely have to be somewhat customized for, e.g., a particular distribution and version of the operating system. Moreover, the system headers themselves may require some modification, if they exhibit any anti-patterns that break modules. Such common patterns are described below.
**Macro-guarded copy-and-pasted definitions**
System headers vend core types such as ``size_t`` for users. These types are often needed in a number of system headers, and are almost trivial to write. Hence, it is fairly common to see a definition such as the following copy-and-pasted throughout the headers:
.. parsed-literal::
#ifndef _SIZE_T
#define _SIZE_T
typedef __SIZE_TYPE__ size_t;
#endif
Unfortunately, when modules compiles all of the C library headers together into a single module, only the first actual type definition of ``size_t`` will be visible, and then only in the submodule corresponding to the lucky first header. Any other headers that have copy-and-pasted versions of this pattern will *not* have a definition of ``size_t``. Importing the submodule corresponding to one of those headers will therefore not yield ``size_t`` as part of the API, because it wasn't there when the header was parsed. The fix for this problem is either to pull the copied declarations into a common header that gets included everywhere ``size_t`` is part of the API, or to eliminate the ``#ifndef`` and redefine the ``size_t`` type. The latter works for C++ headers and C11, but will cause an error for non-modules C90/C99, where redefinition of ``typedefs`` is not permitted.
**Conflicting definitions**
Different system headers may provide conflicting definitions for various macros, functions, or types. These conflicting definitions don't tend to cause problems in a pre-modules world unless someone happens to include both headers in one translation unit. Since the fix is often simply "don't do that", such problems persist. Modules requires that the conflicting definitions be eliminated or that they be placed in separate modules (the former is generally the better answer).
**Missing includes**
Headers are often missing ``#include`` directives for headers that they actually depend on. As with the problem of conflicting definitions, this only affects unlucky users who don't happen to include headers in the right order. With modules, the headers of a particular module will be parsed in isolation, so the module may fail to build if there are missing includes.
**Headers that vend multiple APIs at different times**
Some systems have headers that contain a number of different kinds of API definitions, only some of which are made available with a given include. For example, the header may vend ``size_t`` only when the macro ``__need_size_t`` is defined before that header is included, and also vend ``wchar_t`` only when the macro ``__need_wchar_t`` is defined. Such headers are often included many times in a single translation unit, and will have no include guards. There is no sane way to map this header to a submodule. One can either eliminate the header (e.g., by splitting it into separate headers, one per actual API) or simply ``exclude`` it in the module map.
To detect and help address some of these problems, the ``clang-tools-extra`` repository contains a ``modularize`` tool that parses a set of given headers and attempts to detect these problems and produce a report. See the tool's in-source documentation for information on how to check your system or library headers.
Future Directions
=================
Modules is an experimental feature, and there is much work left to do to make it both real and useful. Here are a few ideas:
**Detect unused module imports**
Unlike with ``#include`` directives, it should be fairly simple to track whether a directly-imported module has ever been used. By doing so, Clang can emit ``unused import`` or ``unused #include`` diagnostics, including Fix-Its to remove the useless imports/includes.
**Fix-Its for missing imports**
It's fairly common for one to make use of some API while writing code, only to get a compiler error about "unknown type" or "no function named" because the corresponding header has not been included. Clang should detect such cases and auto-import the required module (with a Fix-It!).
**Improve modularize**
The modularize tool is both extremely important (for deployment) and extremely crude. It needs better UI, better detection of problems (especially for C++), and perhaps an assistant mode to help write module maps for you.
**C++ Support**
Modules clearly has to work for C++, or we'll never get to use it for the Clang code base.
Where To Learn More About Modules
=================================
The Clang source code provides additional information about modules:
``clang/lib/Headers/module.map``
Module map for Clang's compiler-specific header files.
``clang/test/Modules/``
Tests specifically related to modules functionality.
``clang/include/clang/Basic/Module.h``
The ``Module`` class in this header describes a module, and is used throughout the compiler to implement modules.
``clang/include/clang/Lex/ModuleMap.h``
The ``ModuleMap`` class in this header describes the full module map, consisting of all of the module map files that have been parsed, and providing facilities for looking up module maps and mapping between modules and headers (in both directions).
PCHInternals_
Information about the serialized AST format used for precompiled headers and modules. The actual implementation is in the ``clangSerialization`` library.
.. [#] Automatic linking against the libraries of modules requires specific linker support, which is not widely available.
.. [#] Modules are only available in C and Objective-C; a separate flag ``-fcxx-modules`` enables modules support for C++, which is even more experimental and broken.
.. [#] There are certain anti-patterns that occur in headers, particularly system headers, that cause problems for modules. The section `Modularizing a Platform`_ describes some of them.
.. [#] The second instance is actually a new thread within the current process, not a separate process. However, the original compiler instance is blocked on the execution of this thread.
.. [#] The preprocessing context in which the modules are parsed is actually dependent on the command-line options provided to the compiler, including the language dialect and any ``-D`` options. However, the compiled modules for different command-line options are kept distinct, and any preprocessor directives that occur within the translation unit are ignored. See the section on the `Configuration macros declaration`_ for more information.
.. _PCHInternals: PCHInternals.html

View File

@ -0,0 +1,548 @@
====================
Objective-C Literals
====================
Introduction
============
Three new features were introduced into clang at the same time:
*NSNumber Literals* provide a syntax for creating ``NSNumber`` from
scalar literal expressions; *Collection Literals* provide a short-hand
for creating arrays and dictionaries; *Object Subscripting* provides a
way to use subscripting with Objective-C objects. Users of Apple
compiler releases can use these features starting with the Apple LLVM
Compiler 4.0. Users of open-source LLVM.org compiler releases can use
these features starting with clang v3.1.
These language additions simplify common Objective-C programming
patterns, make programs more concise, and improve the safety of
container creation.
This document describes how the features are implemented in clang, and
how to use them in your own programs.
NSNumber Literals
=================
The framework class ``NSNumber`` is used to wrap scalar values inside
objects: signed and unsigned integers (``char``, ``short``, ``int``,
``long``, ``long long``), floating point numbers (``float``,
``double``), and boolean values (``BOOL``, C++ ``bool``). Scalar values
wrapped in objects are also known as *boxed* values.
In Objective-C, any character, numeric or boolean literal prefixed with
the ``'@'`` character will evaluate to a pointer to an ``NSNumber``
object initialized with that value. C's type suffixes may be used to
control the size of numeric literals.
Examples
--------
The following program illustrates the rules for ``NSNumber`` literals:
.. code-block:: objc
void main(int argc, const char *argv[]) {
// character literals.
NSNumber *theLetterZ = @'Z'; // equivalent to [NSNumber numberWithChar:'Z']
// integral literals.
NSNumber *fortyTwo = @42; // equivalent to [NSNumber numberWithInt:42]
NSNumber *fortyTwoUnsigned = @42U; // equivalent to [NSNumber numberWithUnsignedInt:42U]
NSNumber *fortyTwoLong = @42L; // equivalent to [NSNumber numberWithLong:42L]
NSNumber *fortyTwoLongLong = @42LL; // equivalent to [NSNumber numberWithLongLong:42LL]
// floating point literals.
NSNumber *piFloat = @3.141592654F; // equivalent to [NSNumber numberWithFloat:3.141592654F]
NSNumber *piDouble = @3.1415926535; // equivalent to [NSNumber numberWithDouble:3.1415926535]
// BOOL literals.
NSNumber *yesNumber = @YES; // equivalent to [NSNumber numberWithBool:YES]
NSNumber *noNumber = @NO; // equivalent to [NSNumber numberWithBool:NO]
#ifdef __cplusplus
NSNumber *trueNumber = @true; // equivalent to [NSNumber numberWithBool:(BOOL)true]
NSNumber *falseNumber = @false; // equivalent to [NSNumber numberWithBool:(BOOL)false]
#endif
}
Discussion
----------
NSNumber literals only support literal scalar values after the ``'@'``.
Consequently, ``@INT_MAX`` works, but ``@INT_MIN`` does not, because
they are defined like this:
.. code-block:: objc
#define INT_MAX 2147483647 /* max value for an int */
#define INT_MIN (-2147483647-1) /* min value for an int */
The definition of ``INT_MIN`` is not a simple literal, but a
parenthesized expression. Parenthesized expressions are supported using
the `boxed expression <#objc_boxed_expressions>`_ syntax, which is
described in the next section.
Because ``NSNumber`` does not currently support wrapping ``long double``
values, the use of a ``long double NSNumber`` literal (e.g.
``@123.23L``) will be rejected by the compiler.
Previously, the ``BOOL`` type was simply a typedef for ``signed char``,
and ``YES`` and ``NO`` were macros that expand to ``(BOOL)1`` and
``(BOOL)0`` respectively. To support ``@YES`` and ``@NO`` expressions,
these macros are now defined using new language keywords in
``&LT;objc/objc.h&GT;``:
.. code-block:: objc
#if __has_feature(objc_bool)
#define YES __objc_yes
#define NO __objc_no
#else
#define YES ((BOOL)1)
#define NO ((BOOL)0)
#endif
The compiler implicitly converts ``__objc_yes`` and ``__objc_no`` to
``(BOOL)1`` and ``(BOOL)0``. The keywords are used to disambiguate
``BOOL`` and integer literals.
Objective-C++ also supports ``@true`` and ``@false`` expressions, which
are equivalent to ``@YES`` and ``@NO``.
Boxed Expressions
=================
Objective-C provides a new syntax for boxing C expressions:
.. code-block:: objc
@( <expression> )
Expressions of scalar (numeric, enumerated, BOOL) and C string pointer
types are supported:
.. code-block:: objc
// numbers.
NSNumber *smallestInt = @(-INT_MAX - 1); // [NSNumber numberWithInt:(-INT_MAX - 1)]
NSNumber *piOverTwo = @(M_PI / 2); // [NSNumber numberWithDouble:(M_PI / 2)]
// enumerated types.
typedef enum { Red, Green, Blue } Color;
NSNumber *favoriteColor = @(Green); // [NSNumber numberWithInt:((int)Green)]
// strings.
NSString *path = @(getenv("PATH")); // [NSString stringWithUTF8String:(getenv("PATH"))]
NSArray *pathComponents = [path componentsSeparatedByString:@":"];
Boxed Enums
-----------
Cocoa frameworks frequently define constant values using *enums.*
Although enum values are integral, they may not be used directly as
boxed literals (this avoids conflicts with future ``'@'``-prefixed
Objective-C keywords). Instead, an enum value must be placed inside a
boxed expression. The following example demonstrates configuring an
``AVAudioRecorder`` using a dictionary that contains a boxed enumeration
value:
.. code-block:: objc
enum {
AVAudioQualityMin = 0,
AVAudioQualityLow = 0x20,
AVAudioQualityMedium = 0x40,
AVAudioQualityHigh = 0x60,
AVAudioQualityMax = 0x7F
};
- (AVAudioRecorder *)recordToFile:(NSURL *)fileURL {
NSDictionary *settings = @{ AVEncoderAudioQualityKey : @(AVAudioQualityMax) };
return [[AVAudioRecorder alloc] initWithURL:fileURL settings:settings error:NULL];
}
The expression ``@(AVAudioQualityMax)`` converts ``AVAudioQualityMax``
to an integer type, and boxes the value accordingly. If the enum has a
:ref:`fixed underlying type <objc-fixed-enum>` as in:
.. code-block:: objc
typedef enum : unsigned char { Red, Green, Blue } Color;
NSNumber *red = @(Red), *green = @(Green), *blue = @(Blue); // => [NSNumber numberWithUnsignedChar:]
then the fixed underlying type will be used to select the correct
``NSNumber`` creation method.
Boxing a value of enum type will result in a ``NSNumber`` pointer with a
creation method according to the underlying type of the enum, which can
be a :ref:`fixed underlying type <objc-fixed-enum>`
or a compiler-defined integer type capable of representing the values of
all the members of the enumeration:
.. code-block:: objc
typedef enum : unsigned char { Red, Green, Blue } Color;
Color col = Red;
NSNumber *nsCol = @(col); // => [NSNumber numberWithUnsignedChar:]
Boxed C Strings
---------------
A C string literal prefixed by the ``'@'`` token denotes an ``NSString``
literal in the same way a numeric literal prefixed by the ``'@'`` token
denotes an ``NSNumber`` literal. When the type of the parenthesized
expression is ``(char *)`` or ``(const char *)``, the result of the
boxed expression is a pointer to an ``NSString`` object containing
equivalent character data, which is assumed to be '\\0'-terminated and
UTF-8 encoded. The following example converts C-style command line
arguments into ``NSString`` objects.
.. code-block:: objc
// Partition command line arguments into positional and option arguments.
NSMutableArray *args = [NSMutableArray new];
NSMutableDictionary *options = [NSMutableDictionary new];
while (--argc) {
const char *arg = *++argv;
if (strncmp(arg, "--", 2) == 0) {
options[@(arg + 2)] = @(*++argv); // --key value
} else {
[args addObject:@(arg)]; // positional argument
}
}
As with all C pointers, character pointer expressions can involve
arbitrary pointer arithmetic, therefore programmers must ensure that the
character data is valid. Passing ``NULL`` as the character pointer will
raise an exception at runtime. When possible, the compiler will reject
``NULL`` character pointers used in boxed expressions.
Container Literals
==================
Objective-C now supports a new expression syntax for creating immutable
array and dictionary container objects.
Examples
--------
Immutable array expression:
.. code-block:: objc
NSArray *array = @[ @"Hello", NSApp, [NSNumber numberWithInt:42] ];
This creates an ``NSArray`` with 3 elements. The comma-separated
sub-expressions of an array literal can be any Objective-C object
pointer typed expression.
Immutable dictionary expression:
.. code-block:: objc
NSDictionary *dictionary = @{
@"name" : NSUserName(),
@"date" : [NSDate date],
@"processInfo" : [NSProcessInfo processInfo]
};
This creates an ``NSDictionary`` with 3 key/value pairs. Value
sub-expressions of a dictionary literal must be Objective-C object
pointer typed, as in array literals. Key sub-expressions must be of an
Objective-C object pointer type that implements the
``&LT;NSCopying&GT;`` protocol.
Discussion
----------
Neither keys nor values can have the value ``nil`` in containers. If the
compiler can prove that a key or value is ``nil`` at compile time, then
a warning will be emitted. Otherwise, a runtime error will occur.
Using array and dictionary literals is safer than the variadic creation
forms commonly in use today. Array literal expressions expand to calls
to ``+[NSArray arrayWithObjects:count:]``, which validates that all
objects are non-``nil``. The variadic form,
``+[NSArray arrayWithObjects:]`` uses ``nil`` as an argument list
terminator, which can lead to malformed array objects. Dictionary
literals are similarly created with
``+[NSDictionary dictionaryWithObjects:forKeys:count:]`` which validates
all objects and keys, unlike
``+[NSDictionary dictionaryWithObjectsAndKeys:]`` which also uses a
``nil`` parameter as an argument list terminator.
Object Subscripting
===================
Objective-C object pointer values can now be used with C's subscripting
operator.
Examples
--------
The following code demonstrates the use of object subscripting syntax
with ``NSMutableArray`` and ``NSMutableDictionary`` objects:
.. code-block:: objc
NSMutableArray *array = ...;
NSUInteger idx = ...;
id newObject = ...;
id oldObject = array[idx];
array[idx] = newObject; // replace oldObject with newObject
NSMutableDictionary *dictionary = ...;
NSString *key = ...;
oldObject = dictionary[key];
dictionary[key] = newObject; // replace oldObject with newObject
The next section explains how subscripting expressions map to accessor
methods.
Subscripting Methods
--------------------
Objective-C supports two kinds of subscript expressions: *array-style*
subscript expressions use integer typed subscripts; *dictionary-style*
subscript expressions use Objective-C object pointer typed subscripts.
Each type of subscript expression is mapped to a message send using a
predefined selector. The advantage of this design is flexibility: class
designers are free to introduce subscripting by declaring methods or by
adopting protocols. Moreover, because the method names are selected by
the type of the subscript, an object can be subscripted using both array
and dictionary styles.
Array-Style Subscripting
^^^^^^^^^^^^^^^^^^^^^^^^
When the subscript operand has an integral type, the expression is
rewritten to use one of two different selectors, depending on whether
the element is being read or written. When an expression reads an
element using an integral index, as in the following example:
.. code-block:: objc
NSUInteger idx = ...;
id value = object[idx];
it is translated into a call to ``objectAtIndexedSubscript:``
.. code-block:: objc
id value = [object objectAtIndexedSubscript:idx];
When an expression writes an element using an integral index:
.. code-block:: objc
object[idx] = newValue;
it is translated to a call to ``setObject:atIndexedSubscript:``
.. code-block:: objc
[object setObject:newValue atIndexedSubscript:idx];
These message sends are then type-checked and performed just like
explicit message sends. The method used for objectAtIndexedSubscript:
must be declared with an argument of integral type and a return value of
some Objective-C object pointer type. The method used for
setObject:atIndexedSubscript: must be declared with its first argument
having some Objective-C pointer type and its second argument having
integral type.
The meaning of indexes is left up to the declaring class. The compiler
will coerce the index to the appropriate argument type of the method it
uses for type-checking. For an instance of ``NSArray``, reading an
element using an index outside the range ``[0, array.count)`` will raise
an exception. For an instance of ``NSMutableArray``, assigning to an
element using an index within this range will replace that element, but
assigning to an element using an index outside this range will raise an
exception; no syntax is provided for inserting, appending, or removing
elements for mutable arrays.
A class need not declare both methods in order to take advantage of this
language feature. For example, the class ``NSArray`` declares only
``objectAtIndexedSubscript:``, so that assignments to elements will fail
to type-check; moreover, its subclass ``NSMutableArray`` declares
``setObject:atIndexedSubscript:``.
Dictionary-Style Subscripting
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
When the subscript operand has an Objective-C object pointer type, the
expression is rewritten to use one of two different selectors, depending
on whether the element is being read from or written to. When an
expression reads an element using an Objective-C object pointer
subscript operand, as in the following example:
.. code-block:: objc
id key = ...;
id value = object[key];
it is translated into a call to the ``objectForKeyedSubscript:`` method:
.. code-block:: objc
id value = [object objectForKeyedSubscript:key];
When an expression writes an element using an Objective-C object pointer
subscript:
.. code-block:: objc
object[key] = newValue;
it is translated to a call to ``setObject:forKeyedSubscript:``
.. code-block:: objc
[object setObject:newValue forKeyedSubscript:key];
The behavior of ``setObject:forKeyedSubscript:`` is class-specific; but
in general it should replace an existing value if one is already
associated with a key, otherwise it should add a new value for the key.
No syntax is provided for removing elements from mutable dictionaries.
Discussion
----------
An Objective-C subscript expression occurs when the base operand of the
C subscript operator has an Objective-C object pointer type. Since this
potentially collides with pointer arithmetic on the value, these
expressions are only supported under the modern Objective-C runtime,
which categorically forbids such arithmetic.
Currently, only subscripts of integral or Objective-C object pointer
type are supported. In C++, a class type can be used if it has a single
conversion function to an integral or Objective-C pointer type, in which
case that conversion is applied and analysis continues as appropriate.
Otherwise, the expression is ill-formed.
An Objective-C object subscript expression is always an l-value. If the
expression appears on the left-hand side of a simple assignment operator
(=), the element is written as described below. If the expression
appears on the left-hand side of a compound assignment operator (e.g.
+=), the program is ill-formed, because the result of reading an element
is always an Objective-C object pointer and no binary operators are
legal on such pointers. If the expression appears in any other position,
the element is read as described below. It is an error to take the
address of a subscript expression, or (in C++) to bind a reference to
it.
Programs can use object subscripting with Objective-C object pointers of
type ``id``. Normal dynamic message send rules apply; the compiler must
see *some* declaration of the subscripting methods, and will pick the
declaration seen first.
Caveats
=======
Objects created using the literal or boxed expression syntax are not
guaranteed to be uniqued by the runtime, but nor are they guaranteed to
be newly-allocated. As such, the result of performing direct comparisons
against the location of an object literal (using ``==``, ``!=``, ``<``,
``<=``, ``>``, or ``>=``) is not well-defined. This is usually a simple
mistake in code that intended to call the ``isEqual:`` method (or the
``compare:`` method).
This caveat applies to compile-time string literals as well.
Historically, string literals (using the ``@"..."`` syntax) have been
uniqued across translation units during linking. This is an
implementation detail of the compiler and should not be relied upon. If
you are using such code, please use global string constants instead
(``NSString * const MyConst = @"..."``) or use ``isEqual:``.
Grammar Additions
=================
To support the new syntax described above, the Objective-C
``@``-expression grammar has the following new productions:
::
objc-at-expression : '@' (string-literal | encode-literal | selector-literal | protocol-literal | object-literal)
;
object-literal : ('+' | '-')? numeric-constant
| character-constant
| boolean-constant
| array-literal
| dictionary-literal
;
boolean-constant : '__objc_yes' | '__objc_no' | 'true' | 'false' /* boolean keywords. */
;
array-literal : '[' assignment-expression-list ']'
;
assignment-expression-list : assignment-expression (',' assignment-expression-list)?
| /* empty */
;
dictionary-literal : '{' key-value-list '}'
;
key-value-list : key-value-pair (',' key-value-list)?
| /* empty */
;
key-value-pair : assignment-expression ':' assignment-expression
;
Note: ``@true`` and ``@false`` are only supported in Objective-C++.
Availability Checks
===================
Programs test for the new features by using clang's \_\_has\_feature
checks. Here are examples of their use:
.. code-block:: objc
#if __has_feature(objc_array_literals)
// new way.
NSArray *elements = @[ @"H", @"He", @"O", @"C" ];
#else
// old way (equivalent).
id objects[] = { @"H", @"He", @"O", @"C" };
NSArray *elements = [NSArray arrayWithObjects:objects count:4];
#endif
#if __has_feature(objc_dictionary_literals)
// new way.
NSDictionary *masses = @{ @"H" : @1.0078, @"He" : @4.0026, @"O" : @15.9990, @"C" : @12.0096 };
#else
// old way (equivalent).
id keys[] = { @"H", @"He", @"O", @"C" };
id values[] = { [NSNumber numberWithDouble:1.0078], [NSNumber numberWithDouble:4.0026],
[NSNumber numberWithDouble:15.9990], [NSNumber numberWithDouble:12.0096] };
NSDictionary *masses = [NSDictionary dictionaryWithObjects:objects forKeys:keys count:4];
#endif
#if __has_feature(objc_subscripting)
NSUInteger i, count = elements.count;
for (i = 0; i < count; ++i) {
NSString *element = elements[i];
NSNumber *mass = masses[element];
NSLog(@"the mass of %@ is %@", element, mass);
}
#else
NSUInteger i, count = [elements count];
for (i = 0; i < count; ++i) {
NSString *element = [elements objectAtIndex:i];
NSNumber *mass = [masses objectForKey:element];
NSLog(@"the mass of %@ is %@", element, mass);
}
#endif
Code can use also ``__has_feature(objc_bool)`` to check for the
availability of numeric literals support. This checks for the new
``__objc_yes / __objc_no`` keywords, which enable the use of
``@YES / @NO`` literals.
To check whether boxed expressions are supported, use
``__has_feature(objc_boxed_expressions)`` feature macro.

View File

@ -0,0 +1,561 @@
========================================
Precompiled Header and Modules Internals
========================================
.. contents::
:local:
This document describes the design and implementation of Clang's precompiled
headers (PCH) and modules. If you are interested in the end-user view, please
see the :ref:`User's Manual <usersmanual-precompiled-headers>`.
Using Precompiled Headers with ``clang``
----------------------------------------
The Clang compiler frontend, ``clang -cc1``, supports two command line options
for generating and using PCH files.
To generate PCH files using ``clang -cc1``, use the option :option:`-emit-pch`:
.. code-block:: bash
$ clang -cc1 test.h -emit-pch -o test.h.pch
This option is transparently used by ``clang`` when generating PCH files. The
resulting PCH file contains the serialized form of the compiler's internal
representation after it has completed parsing and semantic analysis. The PCH
file can then be used as a prefix header with the :option:`-include-pch`
option:
.. code-block:: bash
$ clang -cc1 -include-pch test.h.pch test.c -o test.s
Design Philosophy
-----------------
Precompiled headers are meant to improve overall compile times for projects, so
the design of precompiled headers is entirely driven by performance concerns.
The use case for precompiled headers is relatively simple: when there is a
common set of headers that is included in nearly every source file in the
project, we *precompile* that bundle of headers into a single precompiled
header (PCH file). Then, when compiling the source files in the project, we
load the PCH file first (as a prefix header), which acts as a stand-in for that
bundle of headers.
A precompiled header implementation improves performance when:
* Loading the PCH file is significantly faster than re-parsing the bundle of
headers stored within the PCH file. Thus, a precompiled header design
attempts to minimize the cost of reading the PCH file. Ideally, this cost
should not vary with the size of the precompiled header file.
* The cost of generating the PCH file initially is not so large that it
counters the per-source-file performance improvement due to eliminating the
need to parse the bundled headers in the first place. This is particularly
important on multi-core systems, because PCH file generation serializes the
build when all compilations require the PCH file to be up-to-date.
Modules, as implemented in Clang, use the same mechanisms as precompiled
headers to save a serialized AST file (one per module) and use those AST
modules. From an implementation standpoint, modules are a generalization of
precompiled headers, lifting a number of restrictions placed on precompiled
headers. In particular, there can only be one precompiled header and it must
be included at the beginning of the translation unit. The extensions to the
AST file format required for modules are discussed in the section on
:ref:`modules <pchinternals-modules>`.
Clang's AST files are designed with a compact on-disk representation, which
minimizes both creation time and the time required to initially load the AST
file. The AST file itself contains a serialized representation of Clang's
abstract syntax trees and supporting data structures, stored using the same
compressed bitstream as `LLVM's bitcode file format
<http://llvm.org/docs/BitCodeFormat.html>`_.
Clang's AST files are loaded "lazily" from disk. When an AST file is initially
loaded, Clang reads only a small amount of data from the AST file to establish
where certain important data structures are stored. The amount of data read in
this initial load is independent of the size of the AST file, such that a
larger AST file does not lead to longer AST load times. The actual header data
in the AST file --- macros, functions, variables, types, etc. --- is loaded
only when it is referenced from the user's code, at which point only that
entity (and those entities it depends on) are deserialized from the AST file.
With this approach, the cost of using an AST file for a translation unit is
proportional to the amount of code actually used from the AST file, rather than
being proportional to the size of the AST file itself.
When given the :option:`-print-stats` option, Clang produces statistics
describing how much of the AST file was actually loaded from disk. For a
simple "Hello, World!" program that includes the Apple ``Cocoa.h`` header
(which is built as a precompiled header), this option illustrates how little of
the actual precompiled header is required:
.. code-block:: none
*** AST File Statistics:
895/39981 source location entries read (2.238563%)
19/15315 types read (0.124061%)
20/82685 declarations read (0.024188%)
154/58070 identifiers read (0.265197%)
0/7260 selectors read (0.000000%)
0/30842 statements read (0.000000%)
4/8400 macros read (0.047619%)
1/4995 lexical declcontexts read (0.020020%)
0/4413 visible declcontexts read (0.000000%)
0/7230 method pool entries read (0.000000%)
0 method pool misses
For this small program, only a tiny fraction of the source locations, types,
declarations, identifiers, and macros were actually deserialized from the
precompiled header. These statistics can be useful to determine whether the
AST file implementation can be improved by making more of the implementation
lazy.
Precompiled headers can be chained. When you create a PCH while including an
existing PCH, Clang can create the new PCH by referencing the original file and
only writing the new data to the new file. For example, you could create a PCH
out of all the headers that are very commonly used throughout your project, and
then create a PCH for every single source file in the project that includes the
code that is specific to that file, so that recompiling the file itself is very
fast, without duplicating the data from the common headers for every file. The
mechanisms behind chained precompiled headers are discussed in a :ref:`later
section <pchinternals-chained>`.
AST File Contents
-----------------
Clang's AST files are organized into several different blocks, each of which
contains the serialized representation of a part of Clang's internal
representation. Each of the blocks corresponds to either a block or a record
within `LLVM's bitstream format <http://llvm.org/docs/BitCodeFormat.html>`_.
The contents of each of these logical blocks are described below.
.. image:: PCHLayout.png
For a given AST file, the `llvm-bcanalyzer
<http://llvm.org/docs/CommandGuide/llvm-bcanalyzer.html>`_ utility can be used
to examine the actual structure of the bitstream for the AST file. This
information can be used both to help understand the structure of the AST file
and to isolate areas where AST files can still be optimized, e.g., through the
introduction of abbreviations.
Metadata Block
^^^^^^^^^^^^^^
The metadata block contains several records that provide information about how
the AST file was built. This metadata is primarily used to validate the use of
an AST file. For example, a precompiled header built for a 32-bit x86 target
cannot be used when compiling for a 64-bit x86 target. The metadata block
contains information about:
Language options
Describes the particular language dialect used to compile the AST file,
including major options (e.g., Objective-C support) and more minor options
(e.g., support for "``//``" comments). The contents of this record correspond to
the ``LangOptions`` class.
Target architecture
The target triple that describes the architecture, platform, and ABI for
which the AST file was generated, e.g., ``i386-apple-darwin9``.
AST version
The major and minor version numbers of the AST file format. Changes in the
minor version number should not affect backward compatibility, while changes
in the major version number imply that a newer compiler cannot read an older
precompiled header (and vice-versa).
Original file name
The full path of the header that was used to generate the AST file.
Predefines buffer
Although not explicitly stored as part of the metadata, the predefines buffer
is used in the validation of the AST file. The predefines buffer itself
contains code generated by the compiler to initialize the preprocessor state
according to the current target, platform, and command-line options. For
example, the predefines buffer will contain "``#define __STDC__ 1``" when we
are compiling C without Microsoft extensions. The predefines buffer itself
is stored within the :ref:`pchinternals-sourcemgr`, but its contents are
verified along with the rest of the metadata.
A chained PCH file (that is, one that references another PCH) and a module
(which may import other modules) have additional metadata containing the list
of all AST files that this AST file depends on. Each of those files will be
loaded along with this AST file.
For chained precompiled headers, the language options, target architecture and
predefines buffer data is taken from the end of the chain, since they have to
match anyway.
.. _pchinternals-sourcemgr:
Source Manager Block
^^^^^^^^^^^^^^^^^^^^
The source manager block contains the serialized representation of Clang's
:ref:`SourceManager <SourceManager>` class, which handles the mapping from
source locations (as represented in Clang's abstract syntax tree) into actual
column/line positions within a source file or macro instantiation. The AST
file's representation of the source manager also includes information about all
of the headers that were (transitively) included when building the AST file.
The bulk of the source manager block is dedicated to information about the
various files, buffers, and macro instantiations into which a source location
can refer. Each of these is referenced by a numeric "file ID", which is a
unique number (allocated starting at 1) stored in the source location. Clang
serializes the information for each kind of file ID, along with an index that
maps file IDs to the position within the AST file where the information about
that file ID is stored. The data associated with a file ID is loaded only when
required by the front end, e.g., to emit a diagnostic that includes a macro
instantiation history inside the header itself.
The source manager block also contains information about all of the headers
that were included when building the AST file. This includes information about
the controlling macro for the header (e.g., when the preprocessor identified
that the contents of the header dependent on a macro like
``LLVM_CLANG_SOURCEMANAGER_H``).
.. _pchinternals-preprocessor:
Preprocessor Block
^^^^^^^^^^^^^^^^^^
The preprocessor block contains the serialized representation of the
preprocessor. Specifically, it contains all of the macros that have been
defined by the end of the header used to build the AST file, along with the
token sequences that comprise each macro. The macro definitions are only read
from the AST file when the name of the macro first occurs in the program. This
lazy loading of macro definitions is triggered by lookups into the
:ref:`identifier table <pchinternals-ident-table>`.
.. _pchinternals-types:
Types Block
^^^^^^^^^^^
The types block contains the serialized representation of all of the types
referenced in the translation unit. Each Clang type node (``PointerType``,
``FunctionProtoType``, etc.) has a corresponding record type in the AST file.
When types are deserialized from the AST file, the data within the record is
used to reconstruct the appropriate type node using the AST context.
Each type has a unique type ID, which is an integer that uniquely identifies
that type. Type ID 0 represents the NULL type, type IDs less than
``NUM_PREDEF_TYPE_IDS`` represent predefined types (``void``, ``float``, etc.),
while other "user-defined" type IDs are assigned consecutively from
``NUM_PREDEF_TYPE_IDS`` upward as the types are encountered. The AST file has
an associated mapping from the user-defined types block to the location within
the types block where the serialized representation of that type resides,
enabling lazy deserialization of types. When a type is referenced from within
the AST file, that reference is encoded using the type ID shifted left by 3
bits. The lower three bits are used to represent the ``const``, ``volatile``,
and ``restrict`` qualifiers, as in Clang's :ref:`QualType <QualType>` class.
.. _pchinternals-decls:
Declarations Block
^^^^^^^^^^^^^^^^^^
The declarations block contains the serialized representation of all of the
declarations referenced in the translation unit. Each Clang declaration node
(``VarDecl``, ``FunctionDecl``, etc.) has a corresponding record type in the
AST file. When declarations are deserialized from the AST file, the data
within the record is used to build and populate a new instance of the
corresponding ``Decl`` node. As with types, each declaration node has a
numeric ID that is used to refer to that declaration within the AST file. In
addition, a lookup table provides a mapping from that numeric ID to the offset
within the precompiled header where that declaration is described.
Declarations in Clang's abstract syntax trees are stored hierarchically. At
the top of the hierarchy is the translation unit (``TranslationUnitDecl``),
which contains all of the declarations in the translation unit but is not
actually written as a specific declaration node. Its child declarations (such
as functions or struct types) may also contain other declarations inside them,
and so on. Within Clang, each declaration is stored within a :ref:`declaration
context <DeclContext>`, as represented by the ``DeclContext`` class.
Declaration contexts provide the mechanism to perform name lookup within a
given declaration (e.g., find the member named ``x`` in a structure) and
iterate over the declarations stored within a context (e.g., iterate over all
of the fields of a structure for structure layout).
In Clang's AST file format, deserializing a declaration that is a
``DeclContext`` is a separate operation from deserializing all of the
declarations stored within that declaration context. Therefore, Clang will
deserialize the translation unit declaration without deserializing the
declarations within that translation unit. When required, the declarations
stored within a declaration context will be deserialized. There are two
representations of the declarations within a declaration context, which
correspond to the name-lookup and iteration behavior described above:
* When the front end performs name lookup to find a name ``x`` within a given
declaration context (for example, during semantic analysis of the expression
``p->x``, where ``p``'s type is defined in the precompiled header), Clang
refers to an on-disk hash table that maps from the names within that
declaration context to the declaration IDs that represent each visible
declaration with that name. The actual declarations will then be
deserialized to provide the results of name lookup.
* When the front end performs iteration over all of the declarations within a
declaration context, all of those declarations are immediately
de-serialized. For large declaration contexts (e.g., the translation unit),
this operation is expensive; however, large declaration contexts are not
traversed in normal compilation, since such a traversal is unnecessary.
However, it is common for the code generator and semantic analysis to
traverse declaration contexts for structs, classes, unions, and
enumerations, although those contexts contain relatively few declarations in
the common case.
Statements and Expressions
^^^^^^^^^^^^^^^^^^^^^^^^^^
Statements and expressions are stored in the AST file in both the :ref:`types
<pchinternals-types>` and the :ref:`declarations <pchinternals-decls>` blocks,
because every statement or expression will be associated with either a type or
declaration. The actual statement and expression records are stored
immediately following the declaration or type that owns the statement or
expression. For example, the statement representing the body of a function
will be stored directly following the declaration of the function.
As with types and declarations, each statement and expression kind in Clang's
abstract syntax tree (``ForStmt``, ``CallExpr``, etc.) has a corresponding
record type in the AST file, which contains the serialized representation of
that statement or expression. Each substatement or subexpression within an
expression is stored as a separate record (which keeps most records to a fixed
size). Within the AST file, the subexpressions of an expression are stored, in
reverse order, prior to the expression that owns those expression, using a form
of `Reverse Polish Notation
<http://en.wikipedia.org/wiki/Reverse_Polish_notation>`_. For example, an
expression ``3 - 4 + 5`` would be represented as follows:
+-----------------------+
| ``IntegerLiteral(5)`` |
+-----------------------+
| ``IntegerLiteral(4)`` |
+-----------------------+
| ``IntegerLiteral(3)`` |
+-----------------------+
| ``IntegerLiteral(-)`` |
+-----------------------+
| ``IntegerLiteral(+)`` |
+-----------------------+
| ``STOP`` |
+-----------------------+
When reading this representation, Clang evaluates each expression record it
encounters, builds the appropriate abstract syntax tree node, and then pushes
that expression on to a stack. When a record contains *N* subexpressions ---
``BinaryOperator`` has two of them --- those expressions are popped from the
top of the stack. The special STOP code indicates that we have reached the end
of a serialized expression or statement; other expression or statement records
may follow, but they are part of a different expression.
.. _pchinternals-ident-table:
Identifier Table Block
^^^^^^^^^^^^^^^^^^^^^^
The identifier table block contains an on-disk hash table that maps each
identifier mentioned within the AST file to the serialized representation of
the identifier's information (e.g, the ``IdentifierInfo`` structure). The
serialized representation contains:
* The actual identifier string.
* Flags that describe whether this identifier is the name of a built-in, a
poisoned identifier, an extension token, or a macro.
* If the identifier names a macro, the offset of the macro definition within
the :ref:`pchinternals-preprocessor`.
* If the identifier names one or more declarations visible from translation
unit scope, the :ref:`declaration IDs <pchinternals-decls>` of these
declarations.
When an AST file is loaded, the AST file reader mechanism introduces itself
into the identifier table as an external lookup source. Thus, when the user
program refers to an identifier that has not yet been seen, Clang will perform
a lookup into the identifier table. If an identifier is found, its contents
(macro definitions, flags, top-level declarations, etc.) will be deserialized,
at which point the corresponding ``IdentifierInfo`` structure will have the
same contents it would have after parsing the headers in the AST file.
Within the AST file, the identifiers used to name declarations are represented
with an integral value. A separate table provides a mapping from this integral
value (the identifier ID) to the location within the on-disk hash table where
that identifier is stored. This mapping is used when deserializing the name of
a declaration, the identifier of a token, or any other construct in the AST
file that refers to a name.
.. _pchinternals-method-pool:
Method Pool Block
^^^^^^^^^^^^^^^^^
The method pool block is represented as an on-disk hash table that serves two
purposes: it provides a mapping from the names of Objective-C selectors to the
set of Objective-C instance and class methods that have that particular
selector (which is required for semantic analysis in Objective-C) and also
stores all of the selectors used by entities within the AST file. The design
of the method pool is similar to that of the :ref:`identifier table
<pchinternals-ident-table>`: the first time a particular selector is formed
during the compilation of the program, Clang will search in the on-disk hash
table of selectors; if found, Clang will read the Objective-C methods
associated with that selector into the appropriate front-end data structure
(``Sema::InstanceMethodPool`` and ``Sema::FactoryMethodPool`` for instance and
class methods, respectively).
As with identifiers, selectors are represented by numeric values within the AST
file. A separate index maps these numeric selector values to the offset of the
selector within the on-disk hash table, and will be used when de-serializing an
Objective-C method declaration (or other Objective-C construct) that refers to
the selector.
AST Reader Integration Points
-----------------------------
The "lazy" deserialization behavior of AST files requires their integration
into several completely different submodules of Clang. For example, lazily
deserializing the declarations during name lookup requires that the name-lookup
routines be able to query the AST file to find entities stored there.
For each Clang data structure that requires direct interaction with the AST
reader logic, there is an abstract class that provides the interface between
the two modules. The ``ASTReader`` class, which handles the loading of an AST
file, inherits from all of these abstract classes to provide lazy
deserialization of Clang's data structures. ``ASTReader`` implements the
following abstract classes:
``ExternalSLocEntrySource``
This abstract interface is associated with the ``SourceManager`` class, and
is used whenever the :ref:`source manager <pchinternals-sourcemgr>` needs to
load the details of a file, buffer, or macro instantiation.
``IdentifierInfoLookup``
This abstract interface is associated with the ``IdentifierTable`` class, and
is used whenever the program source refers to an identifier that has not yet
been seen. In this case, the AST reader searches for this identifier within
its :ref:`identifier table <pchinternals-ident-table>` to load any top-level
declarations or macros associated with that identifier.
``ExternalASTSource``
This abstract interface is associated with the ``ASTContext`` class, and is
used whenever the abstract syntax tree nodes need to loaded from the AST
file. It provides the ability to de-serialize declarations and types
identified by their numeric values, read the bodies of functions when
required, and read the declarations stored within a declaration context
(either for iteration or for name lookup).
``ExternalSemaSource``
This abstract interface is associated with the ``Sema`` class, and is used
whenever semantic analysis needs to read information from the :ref:`global
method pool <pchinternals-method-pool>`.
.. _pchinternals-chained:
Chained precompiled headers
---------------------------
Chained precompiled headers were initially intended to improve the performance
of IDE-centric operations such as syntax highlighting and code completion while
a particular source file is being edited by the user. To minimize the amount
of reparsing required after a change to the file, a form of precompiled header
--- called a precompiled *preamble* --- is automatically generated by parsing
all of the headers in the source file, up to and including the last
``#include``. When only the source file changes (and none of the headers it
depends on), reparsing of that source file can use the precompiled preamble and
start parsing after the ``#include``\ s, so parsing time is proportional to the
size of the source file (rather than all of its includes). However, the
compilation of that translation unit may already use a precompiled header: in
this case, Clang will create the precompiled preamble as a chained precompiled
header that refers to the original precompiled header. This drastically
reduces the time needed to serialize the precompiled preamble for use in
reparsing.
Chained precompiled headers get their name because each precompiled header can
depend on one other precompiled header, forming a chain of dependencies. A
translation unit will then include the precompiled header that starts the chain
(i.e., nothing depends on it). This linearity of dependencies is important for
the semantic model of chained precompiled headers, because the most-recent
precompiled header can provide information that overrides the information
provided by the precompiled headers it depends on, just like a header file
``B.h`` that includes another header ``A.h`` can modify the state produced by
parsing ``A.h``, e.g., by ``#undef``'ing a macro defined in ``A.h``.
There are several ways in which chained precompiled headers generalize the AST
file model:
Numbering of IDs
Many different kinds of entities --- identifiers, declarations, types, etc.
--- have ID numbers that start at 1 or some other predefined constant and
grow upward. Each precompiled header records the maximum ID number it has
assigned in each category. Then, when a new precompiled header is generated
that depends on (chains to) another precompiled header, it will start
counting at the next available ID number. This way, one can determine, given
an ID number, which AST file actually contains the entity.
Name lookup
When writing a chained precompiled header, Clang attempts to write only
information that has changed from the precompiled header on which it is
based. This changes the lookup algorithm for the various tables, such as the
:ref:`identifier table <pchinternals-ident-table>`: the search starts at the
most-recent precompiled header. If no entry is found, lookup then proceeds
to the identifier table in the precompiled header it depends on, and so one.
Once a lookup succeeds, that result is considered definitive, overriding any
results from earlier precompiled headers.
Update records
There are various ways in which a later precompiled header can modify the
entities described in an earlier precompiled header. For example, later
precompiled headers can add entries into the various name-lookup tables for
the translation unit or namespaces, or add new categories to an Objective-C
class. Each of these updates is captured in an "update record" that is
stored in the chained precompiled header file and will be loaded along with
the original entity.
.. _pchinternals-modules:
Modules
-------
Modules generalize the chained precompiled header model yet further, from a
linear chain of precompiled headers to an arbitrary directed acyclic graph
(DAG) of AST files. All of the same techniques used to make chained
precompiled headers work --- ID number, name lookup, update records --- are
shared with modules. However, the DAG nature of modules introduce a number of
additional complications to the model:
Numbering of IDs
The simple, linear numbering scheme used in chained precompiled headers falls
apart with the module DAG, because different modules may end up with
different numbering schemes for entities they imported from common shared
modules. To account for this, each module file provides information about
which modules it depends on and which ID numbers it assigned to the entities
in those modules, as well as which ID numbers it took for its own new
entities. The AST reader then maps these "local" ID numbers into a "global"
ID number space for the current translation unit, providing a 1-1 mapping
between entities (in whatever AST file they inhabit) and global ID numbers.
If that translation unit is then serialized into an AST file, this mapping
will be stored for use when the AST file is imported.
Declaration merging
It is possible for a given entity (from the language's perspective) to be
declared multiple times in different places. For example, two different
headers can have the declaration of ``printf`` or could forward-declare
``struct stat``. If each of those headers is included in a module, and some
third party imports both of those modules, there is a potentially serious
problem: name lookup for ``printf`` or ``struct stat`` will find both
declarations, but the AST nodes are unrelated. This would result in a
compilation error, due to an ambiguity in name lookup. Therefore, the AST
reader performs declaration merging according to the appropriate language
semantics, ensuring that the two disjoint declarations are merged into a
single redeclaration chain (with a common canonical declaration), so that it
is as if one of the headers had been included before the other.
Name Visibility
Modules allow certain names that occur during module creation to be "hidden",
so that they are not part of the public interface of the module and are not
visible to its clients. The AST reader maintains a "visible" bit on various
AST nodes (declarations, macros, etc.) to indicate whether that particular
AST node is currently visible; the various name lookup mechanisms in Clang
inspect the visible bit to determine whether that entity, which is still in
the AST (because other, visible AST nodes may depend on it), can actually be
found by name lookup. When a new (sub)module is imported, it may make
existing, non-visible, already-deserialized AST nodes visible; it is the
responsibility of the AST reader to find and update these AST nodes when it
is notified of the import.

File diff suppressed because it is too large Load Diff

Binary file not shown.

After

Width:  |  Height:  |  Size: 31 KiB

View File

@ -0,0 +1,163 @@
==========================
Pretokenized Headers (PTH)
==========================
This document first describes the low-level interface for using PTH and
then briefly elaborates on its design and implementation. If you are
interested in the end-user view, please see the :ref:`User's Manual
<usersmanual-precompiled-headers>`.
Using Pretokenized Headers with ``clang`` (Low-level Interface)
===============================================================
The Clang compiler frontend, ``clang -cc1``, supports three command line
options for generating and using PTH files.
To generate PTH files using ``clang -cc1``, use the option ``-emit-pth``:
.. code-block:: console
$ clang -cc1 test.h -emit-pth -o test.h.pth
This option is transparently used by ``clang`` when generating PTH
files. Similarly, PTH files can be used as prefix headers using the
``-include-pth`` option:
.. code-block:: console
$ clang -cc1 -include-pth test.h.pth test.c -o test.s
Alternatively, Clang's PTH files can be used as a raw "token-cache" (or
"content" cache) of the source included by the original header file.
This means that the contents of the PTH file are searched as substitutes
for *any* source files that are used by ``clang -cc1`` to process a
source file. This is done by specifying the ``-token-cache`` option:
.. code-block:: console
$ cat test.h
#include <stdio.h>
$ clang -cc1 -emit-pth test.h -o test.h.pth
$ cat test.c
#include "test.h"
$ clang -cc1 test.c -o test -token-cache test.h.pth
In this example the contents of ``stdio.h`` (and the files it includes)
will be retrieved from ``test.h.pth``, as the PTH file is being used in
this case as a raw cache of the contents of ``test.h``. This is a
low-level interface used to both implement the high-level PTH interface
as well as to provide alternative means to use PTH-style caching.
PTH Design and Implementation
=============================
Unlike GCC's precompiled headers, which cache the full ASTs and
preprocessor state of a header file, Clang's pretokenized header files
mainly cache the raw lexer *tokens* that are needed to segment the
stream of characters in a source file into keywords, identifiers, and
operators. Consequently, PTH serves to mainly directly speed up the
lexing and preprocessing of a source file, while parsing and
type-checking must be completely redone every time a PTH file is used.
Basic Design Tradeoffs
----------------------
In the long term there are plans to provide an alternate PCH
implementation for Clang that also caches the work for parsing and type
checking the contents of header files. The current implementation of PCH
in Clang as pretokenized header files was motivated by the following
factors:
**Language independence**
PTH files work with any language that
Clang's lexer can handle, including C, Objective-C, and (in the early
stages) C++. This means development on language features at the
parsing level or above (which is basically almost all interesting
pieces) does not require PTH to be modified.
**Simple design**
Relatively speaking, PTH has a simple design and
implementation, making it easy to test. Further, because the
machinery for PTH resides at the lower-levels of the Clang library
stack it is fairly straightforward to profile and optimize.
Further, compared to GCC's PCH implementation (which is the dominate
precompiled header file implementation that Clang can be directly
compared against) the PTH design in Clang yields several attractive
features:
**Architecture independence**
In contrast to GCC's PCH files (and
those of several other compilers), Clang's PTH files are architecture
independent, requiring only a single PTH file when building a
program for multiple architectures.
For example, on Mac OS X one may wish to compile a "universal binary"
that runs on PowerPC, 32-bit Intel (i386), and 64-bit Intel
architectures. In contrast, GCC requires a PCH file for each
architecture, as the definitions of types in the AST are
architecture-specific. Since a Clang PTH file essentially represents
a lexical cache of header files, a single PTH file can be safely used
when compiling for multiple architectures. This can also reduce
compile times because only a single PTH file needs to be generated
during a build instead of several.
**Reduced memory pressure**
Similar to GCC, Clang reads PTH files
via the use of memory mapping (i.e., ``mmap``). Clang, however,
memory maps PTH files as read-only, meaning that multiple invocations
of ``clang -cc1`` can share the same pages in memory from a
memory-mapped PTH file. In comparison, GCC also memory maps its PCH
files but also modifies those pages in memory, incurring the
copy-on-write costs. The read-only nature of PTH can greatly reduce
memory pressure for builds involving multiple cores, thus improving
overall scalability.
**Fast generation**
PTH files can be generated in a small fraction
of the time needed to generate GCC's PCH files. Since PTH/PCH
generation is a serial operation that typically blocks progress
during a build, faster generation time leads to improved processor
utilization with parallel builds on multicore machines.
Despite these strengths, PTH's simple design suffers some algorithmic
handicaps compared to other PCH strategies such as those used by GCC.
While PTH can greatly speed up the processing time of a header file, the
amount of work required to process a header file is still roughly linear
in the size of the header file. In contrast, the amount of work done by
GCC to process a precompiled header is (theoretically) constant (the
ASTs for the header are literally memory mapped into the compiler). This
means that only the pieces of the header file that are referenced by the
source file including the header are the only ones the compiler needs to
process during actual compilation. While GCC's particular implementation
of PCH mitigates some of these algorithmic strengths via the use of
copy-on-write pages, the approach itself can fundamentally dominate at
an algorithmic level, especially when one considers header files of
arbitrary size.
There are plans to potentially implement an complementary PCH
implementation for Clang based on the lazy deserialization of ASTs. This
approach would theoretically have the same constant-time algorithmic
advantages just mentioned but would also retain some of the strengths of
PTH such as reduced memory pressure (ideal for multi-core builds).
Internal PTH Optimizations
--------------------------
While the main optimization employed by PTH is to reduce lexing time of
header files by caching pre-lexed tokens, PTH also employs several other
optimizations to speed up the processing of header files:
- ``stat`` caching: PTH files cache information obtained via calls to
``stat`` that ``clang -cc1`` uses to resolve which files are included
by ``#include`` directives. This greatly reduces the overhead
involved in context-switching to the kernel to resolve included
files.
- Fast skipping of ``#ifdef`` ... ``#endif`` chains: PTH files
record the basic structure of nested preprocessor blocks. When the
condition of the preprocessor block is false, all of its tokens are
immediately skipped instead of requiring them to be handled by
Clang's preprocessor.

View File

@ -0,0 +1,216 @@
==========================================================
How to write RecursiveASTVisitor based ASTFrontendActions.
==========================================================
Introduction
============
In this tutorial you will learn how to create a FrontendAction that uses
a RecursiveASTVisitor to find CXXRecordDecl AST nodes with a specified
name.
Creating a FrontendAction
=========================
When writing a clang based tool like a Clang Plugin or a standalone tool
based on LibTooling, the common entry point is the FrontendAction.
FrontendAction is an interface that allows execution of user specific
actions as part of the compilation. To run tools over the AST clang
provides the convenience interface ASTFrontendAction, which takes care
of executing the action. The only part left is to implement the
CreateASTConsumer method that returns an ASTConsumer per translation
unit.
::
class FindNamedClassAction : public clang::ASTFrontendAction {
public:
virtual clang::ASTConsumer *CreateASTConsumer(
clang::CompilerInstance &Compiler, llvm::StringRef InFile) {
return new FindNamedClassConsumer;
}
};
Creating an ASTConsumer
=======================
ASTConsumer is an interface used to write generic actions on an AST,
regardless of how the AST was produced. ASTConsumer provides many
different entry points, but for our use case the only one needed is
HandleTranslationUnit, which is called with the ASTContext for the
translation unit.
::
class FindNamedClassConsumer : public clang::ASTConsumer {
public:
virtual void HandleTranslationUnit(clang::ASTContext &Context) {
// Traversing the translation unit decl via a RecursiveASTVisitor
// will visit all nodes in the AST.
Visitor.TraverseDecl(Context.getTranslationUnitDecl());
}
private:
// A RecursiveASTVisitor implementation.
FindNamedClassVisitor Visitor;
};
Using the RecursiveASTVisitor
=============================
Now that everything is hooked up, the next step is to implement a
RecursiveASTVisitor to extract the relevant information from the AST.
The RecursiveASTVisitor provides hooks of the form bool
VisitNodeType(NodeType \*) for most AST nodes; the exception are TypeLoc
nodes, which are passed by-value. We only need to implement the methods
for the relevant node types.
Let's start by writing a RecursiveASTVisitor that visits all
CXXRecordDecl's.
::
class FindNamedClassVisitor
: public RecursiveASTVisitor<FindNamedClassVisitor> {
public:
bool VisitCXXRecordDecl(CXXRecordDecl *Declaration) {
// For debugging, dumping the AST nodes will show which nodes are already
// being visited.
Declaration->dump();
// The return value indicates whether we want the visitation to proceed.
// Return false to stop the traversal of the AST.
return true;
}
};
In the methods of our RecursiveASTVisitor we can now use the full power
of the Clang AST to drill through to the parts that are interesting for
us. For example, to find all class declaration with a certain name, we
can check for a specific qualified name:
::
bool VisitCXXRecordDecl(CXXRecordDecl *Declaration) {
if (Declaration->getQualifiedNameAsString() == "n::m::C")
Declaration->dump();
return true;
}
Accessing the SourceManager and ASTContext
==========================================
Some of the information about the AST, like source locations and global
identifier information, are not stored in the AST nodes themselves, but
in the ASTContext and its associated source manager. To retrieve them we
need to hand the ASTContext into our RecursiveASTVisitor implementation.
The ASTContext is available from the CompilerInstance during the call to
CreateASTConsumer. We can thus extract it there and hand it into our
freshly created FindNamedClassConsumer:
::
virtual clang::ASTConsumer *CreateASTConsumer(
clang::CompilerInstance &Compiler, llvm::StringRef InFile) {
return new FindNamedClassConsumer(&Compiler.getASTContext());
}
Now that the ASTContext is available in the RecursiveASTVisitor, we can
do more interesting things with AST nodes, like looking up their source
locations:
::
bool VisitCXXRecordDecl(CXXRecordDecl *Declaration) {
if (Declaration->getQualifiedNameAsString() == "n::m::C") {
// getFullLoc uses the ASTContext's SourceManager to resolve the source
// location and break it up into its line and column parts.
FullSourceLoc FullLocation = Context->getFullLoc(Declaration->getLocStart());
if (FullLocation.isValid())
llvm::outs() << "Found declaration at "
<< FullLocation.getSpellingLineNumber() << ":"
<< FullLocation.getSpellingColumnNumber() << "\n";
}
return true;
}
Putting it all together
=======================
Now we can combine all of the above into a small example program:
::
#include "clang/AST/ASTConsumer.h"
#include "clang/AST/RecursiveASTVisitor.h"
#include "clang/Frontend/CompilerInstance.h"
#include "clang/Frontend/FrontendAction.h"
#include "clang/Tooling/Tooling.h"
using namespace clang;
class FindNamedClassVisitor
: public RecursiveASTVisitor<FindNamedClassVisitor> {
public:
explicit FindNamedClassVisitor(ASTContext *Context)
: Context(Context) {}
bool VisitCXXRecordDecl(CXXRecordDecl *Declaration) {
if (Declaration->getQualifiedNameAsString() == "n::m::C") {
FullSourceLoc FullLocation = Context->getFullLoc(Declaration->getLocStart());
if (FullLocation.isValid())
llvm::outs() << "Found declaration at "
<< FullLocation.getSpellingLineNumber() << ":"
<< FullLocation.getSpellingColumnNumber() << "\n";
}
return true;
}
private:
ASTContext *Context;
};
class FindNamedClassConsumer : public clang::ASTConsumer {
public:
explicit FindNamedClassConsumer(ASTContext *Context)
: Visitor(Context) {}
virtual void HandleTranslationUnit(clang::ASTContext &Context) {
Visitor.TraverseDecl(Context.getTranslationUnitDecl());
}
private:
FindNamedClassVisitor Visitor;
};
class FindNamedClassAction : public clang::ASTFrontendAction {
public:
virtual clang::ASTConsumer *CreateASTConsumer(
clang::CompilerInstance &Compiler, llvm::StringRef InFile) {
return new FindNamedClassConsumer(&Compiler.getASTContext());
}
};
int main(int argc, char **argv) {
if (argc > 1) {
clang::tooling::runToolOnCode(new FindNamedClassAction, argv[1]);
}
}
We store this into a file called FindClassDecls.cpp and create the
following CMakeLists.txt to link it:
::
set(LLVM_USED_LIBS clangTooling)
add_clang_executable(find-class-decls FindClassDecls.cpp)
When running this tool over a small code snippet it will output all
declarations of a class n::m::C it found:
::
$ ./bin/find-class-decls "namespace n { namespace m { class C {}; } }"
Found declaration at 1:29

View File

@ -0,0 +1 @@
See llvm/docs/README.txt

View File

@ -0,0 +1,282 @@
=======================
Clang 3.4 Release Notes
=======================
.. contents::
:local:
:depth: 2
Introduction
============
This document contains the release notes for the Clang C/C++/Objective-C
frontend, part of the LLVM Compiler Infrastructure, release 3.4. Here we
describe the status of Clang in some detail, including major
improvements from the previous release and new feature work. For the
general LLVM release notes, see `the LLVM
documentation <http://llvm.org/docs/ReleaseNotes.html>`_. All LLVM
releases may be downloaded from the `LLVM releases web
site <http://llvm.org/releases/>`_.
For more information about Clang or LLVM, including information about the
latest release, please check out the main `Clang Web Site
<http://clang.llvm.org>`_ or the `LLVM Web Site <http://llvm.org>`_.
Note that if you are reading this file from a Subversion checkout or the main
Clang web page, this document applies to the *next* release, not the current
one. To see the release notes for a specific release, please see the `releases
page <http://llvm.org/releases/>`_.
What's New in Clang 3.4?
========================
Some of the major new features and improvements to Clang are listed here.
Generic improvements to Clang as a whole or to its underlying infrastructure
are described first, followed by language-specific sections with improvements
to Clang's support for those languages.
Last release which will build as C++98
--------------------------------------
This is expected to be the last release of Clang which compiles using a C++98
toolchain. We expect to start using some C++11 features in Clang starting after
this release. That said, we are committed to supporting a reasonable set of
modern C++ toolchains as the host compiler on all of the platforms. This will
at least include Visual Studio 2012 on Windows, and Clang 3.1 or GCC 4.7.x on
Mac and Linux. The final set of compilers (and the C++11 features they support)
is not set in stone, but we wanted users of Clang to have a heads up that the
next release will involve a substantial change in the host toolchain
requirements.
Note that this change is part of a change for the entire LLVM project, not just
Clang.
Major New Features
------------------
Improvements to Clang's diagnostics
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Clang's diagnostics are constantly being improved to catch more issues, explain
them more clearly, and provide more accurate source information about them. The
improvements since the 3.3 release include:
- -Wheader-guard warns on mismatches between the #ifndef and #define lines
in a header guard.
.. code-block:: c
#ifndef multiple
#define multi
#endif
returns
`warning: 'multiple' is used as a header guard here, followed by #define of a different macro [-Wheader-guard]`
- -Wlogical-not-parentheses warns when a logical not ('!') only applies to the
left-hand side of a comparison. This warning is part of -Wparentheses.
.. code-block:: c++
int i1 = 0, i2 = 1;
bool ret;
ret = !i1 == i2;
returns
`warning: logical not is only applied to the left hand side of this comparison [-Wlogical-not-parentheses]`
- Boolean increment, a deprecated feature, has own warning flag
-Wdeprecated-increment-bool, and is still part of -Wdeprecated.
- Clang errors on builtin enum increments and decrements in C++.
.. code-block:: c++
enum A { A1, A2 };
void test() {
A a;
a++;
}
returns
`error: cannot increment expression of enum type 'A'`
- -Wloop-analysis now warns on for-loops which have the same increment or
decrement in the loop header as the last statement in the loop.
.. code-block:: c
void foo(char *a, char *b, unsigned c) {
for (unsigned i = 0; i < c; ++i) {
a[i] = b[i];
++i;
}
}
returns
`warning: variable 'i' is incremented both in the loop header and in the loop body [-Wloop-analysis]`
- -Wuninitialized now performs checking across field initializers to detect
when one field in used uninitialized in another field initialization.
.. code-block:: c++
class A {
int x;
int y;
A() : x(y) {}
};
returns
`warning: field 'y' is uninitialized when used here [-Wuninitialized]`
- Clang can detect initializer list use inside a macro and suggest parentheses
if possible to fix.
- Many improvements to Clang's typo correction facilities, such as:
+ Adding global namespace qualifiers so that corrections can refer to shadowed
or otherwise ambiguous or unreachable namespaces.
+ Including accessible class members in the set of typo correction candidates,
so that corrections requiring a class name in the name specifier are now
possible.
+ Allowing typo corrections that involve removing a name specifier.
+ In some situations, correcting function names when a function was given the
wrong number of arguments, including situations where the original function
name was correct but was shadowed by a lexically closer function with the
same name yet took a different number of arguments.
+ Offering typo suggestions for 'using' declarations.
+ Providing better diagnostics and fixit suggestions in more situations when
a '->' was used instead of '.' or vice versa.
+ Providing more relevant suggestions for typos followed by '.' or '='.
+ Various performance improvements when searching for typo correction
candidates.
- `LeakSanitizer <LeakSanitizer.html>`_ is an experimental memory leak detector
which can be combined with AddressSanitizer.
New Compiler Flags
------------------
- Clang no longer special cases -O4 to enable lto. Explicitly pass -flto to
enable it.
- Clang no longer fails on >= -O5. These flags are mapped to -O3 instead.
- Command line "clang -O3 -flto a.c -c" and "clang -emit-llvm a.c -c"
are no longer equivalent.
- Clang now errors on unknown -m flags (``-munknown-to-clang``),
unknown -f flags (``-funknown-to-clang``) and unknown
options (``-what-is-this``).
C Language Changes in Clang
---------------------------
- Added new checked arithmetic builtins for security critical applications.
C++ Language Changes in Clang
-----------------------------
- Fixed an ABI regression, introduced in Clang 3.2, which affected
member offsets for classes inheriting from certain classes with tail padding.
See `PR16537 <http://llvm.org/PR16537>`_.
- Clang 3.4 supports the 2013-08-28 draft of the ISO WG21 SG10 feature test
macro recommendations. These aim to provide a portable method to determine
whether a compiler supports a language feature, much like Clang's
|has_feature macro|_.
.. |has_feature macro| replace:: ``__has_feature`` macro
.. _has_feature macro: LanguageExtensions.html#has-feature-and-has-extension
C++1y Feature Support
^^^^^^^^^^^^^^^^^^^^^
Clang 3.4 supports all the features in the current working draft of the
upcoming C++ standard, provisionally named C++1y. Support for the following
major new features has been added since Clang 3.3:
- Generic lambdas and initialized lambda captures.
- Deduced function return types (``auto f() { return 0; }``).
- Generalized ``constexpr`` support (variable mutation and loops).
- Variable templates and static data member templates.
- Use of ``'`` as a digit separator in numeric literals.
- Support for sized ``::operator delete`` functions.
In addition, ``[[deprecated]]`` is now accepted as a synonym for Clang's
existing ``deprecated`` attribute.
Use ``-std=c++1y`` to enable C++1y mode.
OpenCL C Language Changes in Clang
----------------------------------
- OpenCL C "long" now always has a size of 64 bit, and all OpenCL C
types are aligned as specified in the OpenCL C standard. Also,
"char" is now always signed.
Internal API Changes
--------------------
These are major API changes that have happened since the 3.3 release of
Clang. If upgrading an external codebase that uses Clang as a library,
this section should help get you past the largest hurdles of upgrading.
Wide Character Types
^^^^^^^^^^^^^^^^^^^^
The ASTContext class now keeps track of two different types for wide character
types: WCharTy and WideCharTy. WCharTy represents the built-in wchar_t type
available in C++. WideCharTy is the type used for wide character literals; in
C++ it is the same as WCharTy, but in C99, where wchar_t is a typedef, it is an
integer type.
Static Analyzer
---------------
The static analyzer has been greatly improved. This impacts the overall analyzer quality and reduces a number of false positives.
In particular, this release provides enhanced C++ support, reasoning about initializer lists, zeroing constructors, noreturn destructors and modeling of destructor calls on calls to delete.
Clang Format
------------
Clang now includes a new tool ``clang-format`` which can be used to
automatically format C, C++ and Objective-C source code. ``clang-format``
automatically chooses linebreaks and indentation and can be easily integrated
into editors, IDEs and version control systems. It supports several pre-defined
styles as well as precise style control using a multitude of formatting
options. ``clang-format`` itself is just a thin wrapper around a library which
can also be used directly from code refactoring and code translation tools.
More information can be found on `Clang Format's
site <http://clang.llvm.org/docs/ClangFormat.html>`_.
Windows Support
---------------
- `clang-cl <UsersManual.html#clang-cl>`_ provides a new driver mode that is
designed for compatibility with Visual Studio's compiler, cl.exe. This driver
mode makes Clang accept the same kind of command-line options as cl.exe. The
installer will attempt to expose clang-cl in any Visual Studio installations
on the system as a Platform Toolset, e.g. "LLVM-vs2012". clang-cl targets the
Microsoft ABI by default. Please note that this driver mode and compatibility
with the MS ABI is highly experimental.
Python Binding Changes
----------------------
The following methods have been added:
Significant Known Problems
==========================
Additional Information
======================
A wide variety of additional information is available on the `Clang web
page <http://clang.llvm.org/>`_. The web page contains versions of the
API documentation which are up-to-date with the Subversion revision of
the source code. You can access versions of these documents specific to
this release by going into the "``clang/docs/``" directory in the Clang
tree.
If you have any questions or comments about Clang, please feel free to
contact us via the `mailing
list <http://lists.cs.uiuc.edu/mailman/listinfo/cfe-dev>`_.

View File

@ -0,0 +1,79 @@
===========================
Sanitizer special case list
===========================
.. contents::
:local:
Introduction
============
This document describes the way to disable or alter the behavior of
sanitizer tools for certain source-level entities by providing a special
file at compile-time.
Goal and usage
==============
User of sanitizer tools, such as :doc:`AddressSanitizer`, :doc:`ThreadSanitizer`
or :doc:`MemorySanitizer` may want to disable or alter some checks for
certain source-level entities to:
* speedup hot function, which is known to be correct;
* ignore a function that does some low-level magic (e.g. walks through the
thread stack, bypassing the frame boundaries);
* ignore a known problem.
To achieve this, user may create a file listing the entities he wants to
ignore, and pass it to clang at compile-time using
``-fsanitize-blacklist`` flag. See :doc:`UsersManual` for details.
Example
=======
.. code-block:: bash
$ cat foo.c
#include <stdlib.h>
void bad_foo() {
int *a = (int*)malloc(40);
a[10] = 1;
}
int main() { bad_foo(); }
$ cat blacklist.txt
# Ignore reports from bad_foo function.
fun:bad_foo
$ clang -fsanitize=address foo.c ; ./a.out
# AddressSanitizer prints an error report.
$ clang -fsanitize=address -fsanitize-blacklist=blacklist.txt foo.c ; ./a.out
# No error report here.
Format
======
Each line contains an entity type, followed by a colon and a regular
expression, specifying the names of the entities, optionally followed by
an equals sign and a tool-specific category. Empty lines and lines starting
with "#" are ignored. The meanining of ``*`` in regular expression for entity
names is different - it is treated as in shell wildcarding. Two generic
entity types are ``src`` and ``fun``, which allow user to add, respectively,
source files and functions to special case list. Some sanitizer tools may
introduce custom entity types - refer to tool-specific docs.
.. code-block:: bash
# Lines starting with # are ignored.
# Turn off checks for the source file (use absolute path or path relative
# to the current working directory):
src:/path/to/source/file.c
# Turn off checks for a particular functions (use mangled names):
fun:MyFooBar
fun:_Z8MyFooBarv
# Extended regular expressions are supported:
fun:bad_(foo|bar)
src:bad_source[1-9].c
# Shell like usage of * is supported (* is treated as .*):
src:bad/sources/*
fun:*BadFunction*
# Specific sanitizer tools may introduce categories.
src:/special/path/*=special_sources

View File

@ -0,0 +1,139 @@
ThreadSanitizer
===============
Introduction
------------
ThreadSanitizer is a tool that detects data races. It consists of a compiler
instrumentation module and a run-time library. Typical slowdown introduced by
ThreadSanitizer is about **5x-15x**. Typical memory overhead introduced by
ThreadSanitizer is about **5x-10x**.
How to build
------------
Follow the `Clang build instructions <../get_started.html>`_. CMake build is
supported.
Supported Platforms
-------------------
ThreadSanitizer is supported on Linux x86_64 (tested on Ubuntu 10.04 and 12.04).
Support for MacOS 10.7 (64-bit only) is planned for 2013. Support for 32-bit
platforms is problematic and not yet planned.
Usage
-----
Simply compile and link your program with ``-fsanitize=thread``. To get a
reasonable performance add ``-O1`` or higher. Use ``-g`` to get file names
and line numbers in the warning messages.
Example:
.. code-block:: c++
% cat projects/compiler-rt/lib/tsan/lit_tests/tiny_race.c
#include <pthread.h>
int Global;
void *Thread1(void *x) {
Global = 42;
return x;
}
int main() {
pthread_t t;
pthread_create(&t, NULL, Thread1, NULL);
Global = 43;
pthread_join(t, NULL);
return Global;
}
$ clang -fsanitize=thread -g -O1 tiny_race.c
If a bug is detected, the program will print an error message to stderr.
Currently, ThreadSanitizer symbolizes its output using an external
``addr2line`` process (this will be fixed in future).
.. code-block:: bash
% ./a.out
WARNING: ThreadSanitizer: data race (pid=19219)
Write of size 4 at 0x7fcf47b21bc0 by thread T1:
#0 Thread1 tiny_race.c:4 (exe+0x00000000a360)
Previous write of size 4 at 0x7fcf47b21bc0 by main thread:
#0 main tiny_race.c:10 (exe+0x00000000a3b4)
Thread T1 (running) created at:
#0 pthread_create tsan_interceptors.cc:705 (exe+0x00000000c790)
#1 main tiny_race.c:9 (exe+0x00000000a3a4)
``__has_feature(thread_sanitizer)``
------------------------------------
In some cases one may need to execute different code depending on whether
ThreadSanitizer is enabled.
:ref:`\_\_has\_feature <langext-__has_feature-__has_extension>` can be used for
this purpose.
.. code-block:: c
#if defined(__has_feature)
# if __has_feature(thread_sanitizer)
// code that builds only under ThreadSanitizer
# endif
#endif
``__attribute__((no_sanitize_thread))``
-----------------------------------------------
Some code should not be instrumented by ThreadSanitizer.
One may use the function attribute
:ref:`no_sanitize_thread <langext-thread_sanitizer>`
to disable instrumentation of plain (non-atomic) loads/stores in a particular function.
ThreadSanitizer still instruments such functions to avoid false positives and
provide meaningful stack traces.
This attribute may not be
supported by other compilers, so we suggest to use it together with
``__has_feature(thread_sanitizer)``.
Blacklist
---------
ThreadSanitizer supports ``src`` and ``fun`` entity types in
:doc:`SanitizerSpecialCaseList`, that can be used to suppress data race reports in
the specified source files or functions. Unlike functions marked with
:ref:`no_sanitize_thread <langext-thread_sanitizer>` attribute,
blacklisted functions are not instrumented at all. This can lead to false positives
due to missed synchronization via atomic operations and missed stack frames in reports.
Limitations
-----------
* ThreadSanitizer uses more real memory than a native run. At the default
settings the memory overhead is 5x plus 1Mb per each thread. Settings with 3x
(less accurate analysis) and 9x (more accurate analysis) overhead are also
available.
* ThreadSanitizer maps (but does not reserve) a lot of virtual address space.
This means that tools like ``ulimit`` may not work as usually expected.
* Libc/libstdc++ static linking is not supported.
* Non-position-independent executables are not supported. Therefore, the
``fsanitize=thread`` flag will cause Clang to act as though the ``-fPIE``
flag had been supplied if compiling without ``-fPIC``, and as though the
``-pie`` flag had been supplied if linking an executable.
Current Status
--------------
ThreadSanitizer is in beta stage. It is known to work on large C++ programs
using pthreads, but we do not promise anything (yet). C++11 threading is
supported with llvm libc++. The test suite is integrated into CMake build
and can be run with ``make check-tsan`` command.
We are actively working on enhancing the tool --- stay tuned. Any help,
especially in the form of minimized standalone tests is more than welcome.
More Information
----------------
`http://code.google.com/p/thread-sanitizer <http://code.google.com/p/thread-sanitizer/>`_.

View File

@ -0,0 +1,97 @@
=================================================
Choosing the Right Interface for Your Application
=================================================
Clang provides infrastructure to write tools that need syntactic and semantic
information about a program. This document will give a short introduction of
the different ways to write clang tools, and their pros and cons.
LibClang
--------
`LibClang <http://clang.llvm.org/doxygen/group__CINDEX.html>`_ is a stable high
level C interface to clang. When in doubt LibClang is probably the interface
you want to use. Consider the other interfaces only when you have a good
reason not to use LibClang.
Canonical examples of when to use LibClang:
* Xcode
* Clang Python Bindings
Use LibClang when you...:
* want to interface with clang from other languages than C++
* need a stable interface that takes care to be backwards compatible
* want powerful high-level abstractions, like iterating through an AST with a
cursor, and don't want to learn all the nitty gritty details of Clang's AST.
Do not use LibClang when you...:
* want full control over the Clang AST
Clang Plugins
-------------
:doc:`Clang Plugins <ClangPlugins>` allow you to run additional actions on the
AST as part of a compilation. Plugins are dynamic libraries that are loaded at
runtime by the compiler, and they're easy to integrate into your build
environment.
Canonical examples of when to use Clang Plugins:
* special lint-style warnings or errors for your project
* creating additional build artifacts from a single compile step
Use Clang Plugins when you...:
* need your tool to rerun if any of the dependencies change
* want your tool to make or break a build
* need full control over the Clang AST
Do not use Clang Plugins when you...:
* want to run tools outside of your build environment
* want full control on how Clang is set up, including mapping of in-memory
virtual files
* need to run over a specific subset of files in your project which is not
necessarily related to any changes which would trigger rebuilds
LibTooling
----------
:doc:`LibTooling <LibTooling>` is a C++ interface aimed at writing standalone
tools, as well as integrating into services that run clang tools. Canonical
examples of when to use LibTooling:
* a simple syntax checker
* refactoring tools
Use LibTooling when you...:
* want to run tools over a single file, or a specific subset of files,
independently of the build system
* want full control over the Clang AST
* want to share code with Clang Plugins
Do not use LibTooling when you...:
* want to run as part of the build triggered by dependency changes
* want a stable interface so you don't need to change your code when the AST API
changes
* want high level abstractions like cursors and code completion out of the box
* do not want to write your tools in C++
:doc:`Clang tools <ClangTools>` are a collection of specific developer tools
built on top of the LibTooling infrastructure as part of the Clang project.
They are targeted at automating and improving core development activities of
C/C++ developers.
Examples of tools we are building or planning as part of the Clang project:
* Syntax checking (:program:`clang-check`)
* Automatic fixing of compile errors (:program:`clang-fixit`)
* Automatic code formatting (:program:`clang-format`)
* Migration tools for new features in new language standards
* Core refactoring tools

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,151 @@
============
Debug Checks
============
.. contents::
:local:
The analyzer contains a number of checkers which can aid in debugging. Enable
them by using the "-analyzer-checker=" flag, followed by the name of the
checker.
General Analysis Dumpers
========================
These checkers are used to dump the results of various infrastructural analyses
to stderr. Some checkers also have "view" variants, which will display a graph
using a 'dot' format viewer (such as Graphviz on OS X) instead.
- debug.DumpCallGraph, debug.ViewCallGraph: Show the call graph generated for
the current translation unit. This is used to determine the order in which to
analyze functions when inlining is enabled.
- debug.DumpCFG, debug.ViewCFG: Show the CFG generated for each top-level
function being analyzed.
- debug.DumpDominators: Shows the dominance tree for the CFG of each top-level
function.
- debug.DumpLiveVars: Show the results of live variable analysis for each
top-level function being analyzed.
- debug.ViewExplodedGraph: Show the Exploded Graphs generated for the
analysis of different functions in the input translation unit. When there
are several functions analyzed, display one graph per function. Beware
that these graphs may grow very large, even for small functions.
Path Tracking
=============
These checkers print information about the path taken by the analyzer engine.
- debug.DumpCalls: Prints out every function or method call encountered during a
path traversal. This is indented to show the call stack, but does NOT do any
special handling of branches, meaning different paths could end up
interleaved.
- debug.DumpTraversal: Prints the name of each branch statement encountered
during a path traversal ("IfStmt", "WhileStmt", etc). Currently used to check
whether the analysis engine is doing BFS or DFS.
State Checking
==============
These checkers will print out information about the analyzer state in the form
of analysis warnings. They are intended for use with the -verify functionality
in regression tests.
- debug.TaintTest: Prints out the word "tainted" for every expression that
carries taint. At the time of this writing, taint was only introduced by the
checks under experimental.security.taint.TaintPropagation; this checker may
eventually move to the security.taint package.
- debug.ExprInspection: Responds to certain function calls, which are modeled
after builtins. These function calls should affect the program state other
than the evaluation of their arguments; to use them, you will need to declare
them within your test file. The available functions are described below.
(FIXME: debug.ExprInspection should probably be renamed, since it no longer only
inspects expressions.)
ExprInspection checks
---------------------
- void clang_analyzer_eval(bool);
Prints TRUE if the argument is known to have a non-zero value, FALSE if the
argument is known to have a zero or null value, and UNKNOWN if the argument
isn't sufficiently constrained on this path. You can use this to test other
values by using expressions like "x == 5". Note that this functionality is
currently DISABLED in inlined functions, since different calls to the same
inlined function could provide different information, making it difficult to
write proper -verify directives.
In C, the argument can be typed as 'int' or as '_Bool'.
Example usage::
clang_analyzer_eval(x); // expected-warning{{UNKNOWN}}
if (!x) return;
clang_analyzer_eval(x); // expected-warning{{TRUE}}
- void clang_analyzer_checkInlined(bool);
If a call occurs within an inlined function, prints TRUE or FALSE according to
the value of its argument. If a call occurs outside an inlined function,
nothing is printed.
The intended use of this checker is to assert that a function is inlined at
least once (by passing 'true' and expecting a warning), or to assert that a
function is never inlined (by passing 'false' and expecting no warning). The
argument is technically unnecessary but is intended to clarify intent.
You might wonder why we can't print TRUE if a function is ever inlined and
FALSE if it is not. The problem is that any inlined function could conceivably
also be analyzed as a top-level function (in which case both TRUE and FALSE
would be printed), depending on the value of the -analyzer-inlining option.
In C, the argument can be typed as 'int' or as '_Bool'.
Example usage::
int inlined() {
clang_analyzer_checkInlined(true); // expected-warning{{TRUE}}
return 42;
}
void topLevel() {
clang_analyzer_checkInlined(false); // no-warning (not inlined)
int value = inlined();
// This assertion will not be valid if the previous call was not inlined.
clang_analyzer_eval(value == 42); // expected-warning{{TRUE}}
}
- void clang_analyzer_warnIfReached();
Generate a warning if this line of code gets reached by the analyzer.
Example usage::
if (true) {
clang_analyzer_warnIfReached(); // expected-warning{{REACHABLE}}
}
else {
clang_analyzer_warnIfReached(); // no-warning
}
Statistics
==========
The debug.Stats checker collects various information about the analysis of each
function, such as how many blocks were reached and if the analyzer timed out.
There is also an additional -analyzer-stats flag, which enables various
statistics within the analyzer engine. Note the Stats checker (which produces at
least one bug report per function) may actually change the values reported by
-analyzer-stats.

Some files were not shown because too many files have changed in this diff Show More