From d24f1f342a4637756297117ead63e2e966999126 Mon Sep 17 00:00:00 2001 From: Daniel Dunbar Date: Sat, 26 Dec 2009 22:58:23 +0000 Subject: [PATCH] lit: Sink code into a 'lit' package. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@92168 91177308-0d34-0410-b5e6-96231b3b80d8 --- utils/lit/lit.py | 575 +---------------- .../ExampleTests.ObjDir/lit.site.cfg | 0 .../ExampleTests/Clang/fsyntax-only.c | 0 .../lit/{ => lit}/ExampleTests/Clang/lit.cfg | 0 .../LLVM.InTree/test/Bar/bar-test.ll | 0 .../ExampleTests/LLVM.InTree/test/Bar/dg.exp | 0 .../ExampleTests/LLVM.InTree/test/lit.cfg | 0 .../LLVM.InTree/test/lit.site.cfg | 0 .../ExampleTests/LLVM.InTree/test/site.exp | 0 .../ExampleTests/LLVM.OutOfTree/lit.local.cfg | 0 .../LLVM.OutOfTree/obj/test/Foo/lit.local.cfg | 0 .../LLVM.OutOfTree/obj/test/lit.site.cfg | 0 .../LLVM.OutOfTree/obj/test/site.exp | 0 .../LLVM.OutOfTree/src/test/Foo/data.txt | 0 .../LLVM.OutOfTree/src/test/Foo/dg.exp | 0 .../LLVM.OutOfTree/src/test/Foo/pct-S.ll | 0 .../LLVM.OutOfTree/src/test/lit.cfg | 0 .../ExampleTests/ShExternal/lit.local.cfg | 0 .../ExampleTests/ShInternal/lit.local.cfg | 0 .../ExampleTests/TclTest/lit.local.cfg | 0 .../ExampleTests/TclTest/stderr-pipe.ll | 0 .../ExampleTests/TclTest/tcl-redir-1.ll | 0 utils/lit/{ => lit}/ExampleTests/fail.c | 0 utils/lit/{ => lit}/ExampleTests/lit.cfg | 0 utils/lit/{ => lit}/ExampleTests/pass.c | 0 utils/lit/{ => lit}/ExampleTests/xfail.c | 0 utils/lit/{ => lit}/ExampleTests/xpass.c | 0 utils/lit/{ => lit}/LitConfig.py | 0 utils/lit/{ => lit}/LitFormats.py | 0 utils/lit/{ => lit}/ProgressBar.py | 0 utils/lit/{ => lit}/ShCommands.py | 0 utils/lit/{ => lit}/ShUtil.py | 0 utils/lit/{ => lit}/TclUtil.py | 0 utils/lit/{ => lit}/Test.py | 0 utils/lit/{ => lit}/TestFormats.py | 0 utils/lit/{ => lit}/TestRunner.py | 0 utils/lit/{ => lit}/TestingConfig.py | 0 utils/lit/{ => lit}/Util.py | 0 utils/lit/lit/__init__.py | 10 + utils/lit/lit/lit.py | 579 ++++++++++++++++++ 40 files changed, 591 insertions(+), 573 deletions(-) rename utils/lit/{ => lit}/ExampleTests.ObjDir/lit.site.cfg (100%) rename utils/lit/{ => lit}/ExampleTests/Clang/fsyntax-only.c (100%) rename utils/lit/{ => lit}/ExampleTests/Clang/lit.cfg (100%) rename utils/lit/{ => lit}/ExampleTests/LLVM.InTree/test/Bar/bar-test.ll (100%) rename utils/lit/{ => lit}/ExampleTests/LLVM.InTree/test/Bar/dg.exp (100%) rename utils/lit/{ => lit}/ExampleTests/LLVM.InTree/test/lit.cfg (100%) rename utils/lit/{ => lit}/ExampleTests/LLVM.InTree/test/lit.site.cfg (100%) rename utils/lit/{ => lit}/ExampleTests/LLVM.InTree/test/site.exp (100%) rename utils/lit/{ => lit}/ExampleTests/LLVM.OutOfTree/lit.local.cfg (100%) rename utils/lit/{ => lit}/ExampleTests/LLVM.OutOfTree/obj/test/Foo/lit.local.cfg (100%) rename utils/lit/{ => lit}/ExampleTests/LLVM.OutOfTree/obj/test/lit.site.cfg (100%) rename utils/lit/{ => lit}/ExampleTests/LLVM.OutOfTree/obj/test/site.exp (100%) rename utils/lit/{ => lit}/ExampleTests/LLVM.OutOfTree/src/test/Foo/data.txt (100%) rename utils/lit/{ => lit}/ExampleTests/LLVM.OutOfTree/src/test/Foo/dg.exp (100%) rename utils/lit/{ => lit}/ExampleTests/LLVM.OutOfTree/src/test/Foo/pct-S.ll (100%) rename utils/lit/{ => lit}/ExampleTests/LLVM.OutOfTree/src/test/lit.cfg (100%) rename utils/lit/{ => lit}/ExampleTests/ShExternal/lit.local.cfg (100%) rename utils/lit/{ => lit}/ExampleTests/ShInternal/lit.local.cfg (100%) rename utils/lit/{ => lit}/ExampleTests/TclTest/lit.local.cfg (100%) rename utils/lit/{ => lit}/ExampleTests/TclTest/stderr-pipe.ll (100%) rename utils/lit/{ => lit}/ExampleTests/TclTest/tcl-redir-1.ll (100%) rename utils/lit/{ => lit}/ExampleTests/fail.c (100%) rename utils/lit/{ => lit}/ExampleTests/lit.cfg (100%) rename utils/lit/{ => lit}/ExampleTests/pass.c (100%) rename utils/lit/{ => lit}/ExampleTests/xfail.c (100%) rename utils/lit/{ => lit}/ExampleTests/xpass.c (100%) rename utils/lit/{ => lit}/LitConfig.py (100%) rename utils/lit/{ => lit}/LitFormats.py (100%) rename utils/lit/{ => lit}/ProgressBar.py (100%) rename utils/lit/{ => lit}/ShCommands.py (100%) rename utils/lit/{ => lit}/ShUtil.py (100%) rename utils/lit/{ => lit}/TclUtil.py (100%) rename utils/lit/{ => lit}/Test.py (100%) rename utils/lit/{ => lit}/TestFormats.py (100%) rename utils/lit/{ => lit}/TestRunner.py (100%) rename utils/lit/{ => lit}/TestingConfig.py (100%) rename utils/lit/{ => lit}/Util.py (100%) create mode 100644 utils/lit/lit/__init__.py create mode 100755 utils/lit/lit/lit.py diff --git a/utils/lit/lit.py b/utils/lit/lit.py index 293976fd309..851063b3bd1 100755 --- a/utils/lit/lit.py +++ b/utils/lit/lit.py @@ -1,576 +1,5 @@ #!/usr/bin/env python -""" -lit - LLVM Integrated Tester. - -See lit.pod for more information. -""" - -import math, os, platform, random, re, sys, time, threading, traceback - -import ProgressBar -import TestRunner -import Util - -from TestingConfig import TestingConfig -import LitConfig -import Test - -# Configuration files to look for when discovering test suites. These can be -# overridden with --config-prefix. -# -# FIXME: Rename to 'config.lit', 'site.lit', and 'local.lit' ? -gConfigName = 'lit.cfg' -gSiteConfigName = 'lit.site.cfg' - -kLocalConfigName = 'lit.local.cfg' - -class TestingProgressDisplay: - def __init__(self, opts, numTests, progressBar=None): - self.opts = opts - self.numTests = numTests - self.current = None - self.lock = threading.Lock() - self.progressBar = progressBar - self.completed = 0 - - def update(self, test): - # Avoid locking overhead in quiet mode - if self.opts.quiet and not test.result.isFailure: - self.completed += 1 - return - - # Output lock. - self.lock.acquire() - try: - self.handleUpdate(test) - finally: - self.lock.release() - - def finish(self): - if self.progressBar: - self.progressBar.clear() - elif self.opts.quiet: - pass - elif self.opts.succinct: - sys.stdout.write('\n') - - def handleUpdate(self, test): - self.completed += 1 - if self.progressBar: - self.progressBar.update(float(self.completed)/self.numTests, - test.getFullName()) - - if self.opts.succinct and not test.result.isFailure: - return - - if self.progressBar: - self.progressBar.clear() - - print '%s: %s (%d of %d)' % (test.result.name, test.getFullName(), - self.completed, self.numTests) - - if test.result.isFailure and self.opts.showOutput: - print "%s TEST '%s' FAILED %s" % ('*'*20, test.getFullName(), - '*'*20) - print test.output - print "*" * 20 - - sys.stdout.flush() - -class TestProvider: - def __init__(self, tests, maxTime): - self.maxTime = maxTime - self.iter = iter(tests) - self.lock = threading.Lock() - self.startTime = time.time() - - def get(self): - # Check if we have run out of time. - if self.maxTime is not None: - if time.time() - self.startTime > self.maxTime: - return None - - # Otherwise take the next test. - self.lock.acquire() - try: - item = self.iter.next() - except StopIteration: - item = None - self.lock.release() - return item - -class Tester(threading.Thread): - def __init__(self, litConfig, provider, display): - threading.Thread.__init__(self) - self.litConfig = litConfig - self.provider = provider - self.display = display - - def run(self): - while 1: - item = self.provider.get() - if item is None: - break - self.runTest(item) - - def runTest(self, test): - result = None - startTime = time.time() - try: - result, output = test.config.test_format.execute(test, - self.litConfig) - except KeyboardInterrupt: - # This is a sad hack. Unfortunately subprocess goes - # bonkers with ctrl-c and we start forking merrily. - print '\nCtrl-C detected, goodbye.' - os.kill(0,9) - except: - if self.litConfig.debug: - raise - result = Test.UNRESOLVED - output = 'Exception during script execution:\n' - output += traceback.format_exc() - output += '\n' - elapsed = time.time() - startTime - - test.setResult(result, output, elapsed) - self.display.update(test) - -def dirContainsTestSuite(path): - cfgpath = os.path.join(path, gSiteConfigName) - if os.path.exists(cfgpath): - return cfgpath - cfgpath = os.path.join(path, gConfigName) - if os.path.exists(cfgpath): - return cfgpath - -def getTestSuite(item, litConfig, cache): - """getTestSuite(item, litConfig, cache) -> (suite, relative_path) - - Find the test suite containing @arg item. - - @retval (None, ...) - Indicates no test suite contains @arg item. - @retval (suite, relative_path) - The suite that @arg item is in, and its - relative path inside that suite. - """ - def search1(path): - # Check for a site config or a lit config. - cfgpath = dirContainsTestSuite(path) - - # If we didn't find a config file, keep looking. - if not cfgpath: - parent,base = os.path.split(path) - if parent == path: - return (None, ()) - - ts, relative = search(parent) - return (ts, relative + (base,)) - - # We found a config file, load it. - if litConfig.debug: - litConfig.note('loading suite config %r' % cfgpath) - - cfg = TestingConfig.frompath(cfgpath, None, litConfig, mustExist = True) - source_root = os.path.realpath(cfg.test_source_root or path) - exec_root = os.path.realpath(cfg.test_exec_root or path) - return Test.TestSuite(cfg.name, source_root, exec_root, cfg), () - - def search(path): - # Check for an already instantiated test suite. - res = cache.get(path) - if res is None: - cache[path] = res = search1(path) - return res - - # Canonicalize the path. - item = os.path.realpath(item) - - # Skip files and virtual components. - components = [] - while not os.path.isdir(item): - parent,base = os.path.split(item) - if parent == item: - return (None, ()) - components.append(base) - item = parent - components.reverse() - - ts, relative = search(item) - return ts, tuple(relative + tuple(components)) - -def getLocalConfig(ts, path_in_suite, litConfig, cache): - def search1(path_in_suite): - # Get the parent config. - if not path_in_suite: - parent = ts.config - else: - parent = search(path_in_suite[:-1]) - - # Load the local configuration. - source_path = ts.getSourcePath(path_in_suite) - cfgpath = os.path.join(source_path, kLocalConfigName) - if litConfig.debug: - litConfig.note('loading local config %r' % cfgpath) - return TestingConfig.frompath(cfgpath, parent, litConfig, - mustExist = False, - config = parent.clone(cfgpath)) - - def search(path_in_suite): - key = (ts, path_in_suite) - res = cache.get(key) - if res is None: - cache[key] = res = search1(path_in_suite) - return res - - return search(path_in_suite) - -def getTests(path, litConfig, testSuiteCache, localConfigCache): - # Find the test suite for this input and its relative path. - ts,path_in_suite = getTestSuite(path, litConfig, testSuiteCache) - if ts is None: - litConfig.warning('unable to find test suite for %r' % path) - return (),() - - if litConfig.debug: - litConfig.note('resolved input %r to %r::%r' % (path, ts.name, - path_in_suite)) - - return ts, getTestsInSuite(ts, path_in_suite, litConfig, - testSuiteCache, localConfigCache) - -def getTestsInSuite(ts, path_in_suite, litConfig, - testSuiteCache, localConfigCache): - # Check that the source path exists (errors here are reported by the - # caller). - source_path = ts.getSourcePath(path_in_suite) - if not os.path.exists(source_path): - return - - # Check if the user named a test directly. - if not os.path.isdir(source_path): - lc = getLocalConfig(ts, path_in_suite[:-1], litConfig, localConfigCache) - yield Test.Test(ts, path_in_suite, lc) - return - - # Otherwise we have a directory to search for tests, start by getting the - # local configuration. - lc = getLocalConfig(ts, path_in_suite, litConfig, localConfigCache) - - # Search for tests. - for res in lc.test_format.getTestsInDirectory(ts, path_in_suite, - litConfig, lc): - yield res - - # Search subdirectories. - for filename in os.listdir(source_path): - # FIXME: This doesn't belong here? - if filename in ('Output', '.svn') or filename in lc.excludes: - continue - - # Ignore non-directories. - file_sourcepath = os.path.join(source_path, filename) - if not os.path.isdir(file_sourcepath): - continue - - # Check for nested test suites, first in the execpath in case there is a - # site configuration and then in the source path. - file_execpath = ts.getExecPath(path_in_suite + (filename,)) - if dirContainsTestSuite(file_execpath): - sub_ts, subiter = getTests(file_execpath, litConfig, - testSuiteCache, localConfigCache) - elif dirContainsTestSuite(file_sourcepath): - sub_ts, subiter = getTests(file_sourcepath, litConfig, - testSuiteCache, localConfigCache) - else: - # Otherwise, continue loading from inside this test suite. - subiter = getTestsInSuite(ts, path_in_suite + (filename,), - litConfig, testSuiteCache, - localConfigCache) - sub_ts = None - - N = 0 - for res in subiter: - N += 1 - yield res - if sub_ts and not N: - litConfig.warning('test suite %r contained no tests' % sub_ts.name) - -def runTests(numThreads, litConfig, provider, display): - # If only using one testing thread, don't use threads at all; this lets us - # profile, among other things. - if numThreads == 1: - t = Tester(litConfig, provider, display) - t.run() - return - - # Otherwise spin up the testing threads and wait for them to finish. - testers = [Tester(litConfig, provider, display) - for i in range(numThreads)] - for t in testers: - t.start() - try: - for t in testers: - t.join() - except KeyboardInterrupt: - sys.exit(2) - -def main(): - global options - from optparse import OptionParser, OptionGroup - parser = OptionParser("usage: %prog [options] {file-or-path}") - - parser.add_option("-j", "--threads", dest="numThreads", metavar="N", - help="Number of testing threads", - type=int, action="store", default=None) - parser.add_option("", "--config-prefix", dest="configPrefix", - metavar="NAME", help="Prefix for 'lit' config files", - action="store", default=None) - parser.add_option("", "--param", dest="userParameters", - metavar="NAME=VAL", - help="Add 'NAME' = 'VAL' to the user defined parameters", - type=str, action="append", default=[]) - - group = OptionGroup(parser, "Output Format") - # FIXME: I find these names very confusing, although I like the - # functionality. - group.add_option("-q", "--quiet", dest="quiet", - help="Suppress no error output", - action="store_true", default=False) - group.add_option("-s", "--succinct", dest="succinct", - help="Reduce amount of output", - action="store_true", default=False) - group.add_option("-v", "--verbose", dest="showOutput", - help="Show all test output", - action="store_true", default=False) - group.add_option("", "--no-progress-bar", dest="useProgressBar", - help="Do not use curses based progress bar", - action="store_false", default=True) - parser.add_option_group(group) - - group = OptionGroup(parser, "Test Execution") - group.add_option("", "--path", dest="path", - help="Additional paths to add to testing environment", - action="append", type=str, default=[]) - group.add_option("", "--vg", dest="useValgrind", - help="Run tests under valgrind", - action="store_true", default=False) - group.add_option("", "--vg-arg", dest="valgrindArgs", metavar="ARG", - help="Specify an extra argument for valgrind", - type=str, action="append", default=[]) - group.add_option("", "--time-tests", dest="timeTests", - help="Track elapsed wall time for each test", - action="store_true", default=False) - group.add_option("", "--no-execute", dest="noExecute", - help="Don't execute any tests (assume PASS)", - action="store_true", default=False) - parser.add_option_group(group) - - group = OptionGroup(parser, "Test Selection") - group.add_option("", "--max-tests", dest="maxTests", metavar="N", - help="Maximum number of tests to run", - action="store", type=int, default=None) - group.add_option("", "--max-time", dest="maxTime", metavar="N", - help="Maximum time to spend testing (in seconds)", - action="store", type=float, default=None) - group.add_option("", "--shuffle", dest="shuffle", - help="Run tests in random order", - action="store_true", default=False) - parser.add_option_group(group) - - group = OptionGroup(parser, "Debug and Experimental Options") - group.add_option("", "--debug", dest="debug", - help="Enable debugging (for 'lit' development)", - action="store_true", default=False) - group.add_option("", "--show-suites", dest="showSuites", - help="Show discovered test suites", - action="store_true", default=False) - group.add_option("", "--no-tcl-as-sh", dest="useTclAsSh", - help="Don't run Tcl scripts using 'sh'", - action="store_false", default=True) - group.add_option("", "--repeat", dest="repeatTests", metavar="N", - help="Repeat tests N times (for timing)", - action="store", default=None, type=int) - parser.add_option_group(group) - - (opts, args) = parser.parse_args() - - if not args: - parser.error('No inputs specified') - - if opts.configPrefix is not None: - global gConfigName, gSiteConfigName - gConfigName = '%s.cfg' % opts.configPrefix - gSiteConfigName = '%s.site.cfg' % opts.configPrefix - - if opts.numThreads is None: - opts.numThreads = Util.detectCPUs() - - inputs = args - - # Create the user defined parameters. - userParams = {} - for entry in opts.userParameters: - if '=' not in entry: - name,val = entry,'' - else: - name,val = entry.split('=', 1) - userParams[name] = val - - # Create the global config object. - litConfig = LitConfig.LitConfig(progname = os.path.basename(sys.argv[0]), - path = opts.path, - quiet = opts.quiet, - useValgrind = opts.useValgrind, - valgrindArgs = opts.valgrindArgs, - useTclAsSh = opts.useTclAsSh, - noExecute = opts.noExecute, - debug = opts.debug, - isWindows = (platform.system()=='Windows'), - params = userParams) - - # Load the tests from the inputs. - tests = [] - testSuiteCache = {} - localConfigCache = {} - for input in inputs: - prev = len(tests) - tests.extend(getTests(input, litConfig, - testSuiteCache, localConfigCache)[1]) - if prev == len(tests): - litConfig.warning('input %r contained no tests' % input) - - # If there were any errors during test discovery, exit now. - if litConfig.numErrors: - print >>sys.stderr, '%d errors, exiting.' % litConfig.numErrors - sys.exit(2) - - if opts.showSuites: - suitesAndTests = dict([(ts,[]) - for ts,_ in testSuiteCache.values() - if ts]) - for t in tests: - suitesAndTests[t.suite].append(t) - - print '-- Test Suites --' - suitesAndTests = suitesAndTests.items() - suitesAndTests.sort(key = lambda (ts,_): ts.name) - for ts,ts_tests in suitesAndTests: - print ' %s - %d tests' %(ts.name, len(ts_tests)) - print ' Source Root: %s' % ts.source_root - print ' Exec Root : %s' % ts.exec_root - - # Select and order the tests. - numTotalTests = len(tests) - if opts.shuffle: - random.shuffle(tests) - else: - tests.sort(key = lambda t: t.getFullName()) - if opts.maxTests is not None: - tests = tests[:opts.maxTests] - - extra = '' - if len(tests) != numTotalTests: - extra = ' of %d' % numTotalTests - header = '-- Testing: %d%s tests, %d threads --'%(len(tests),extra, - opts.numThreads) - - if opts.repeatTests: - tests = [t.copyWithIndex(i) - for t in tests - for i in range(opts.repeatTests)] - - progressBar = None - if not opts.quiet: - if opts.succinct and opts.useProgressBar: - try: - tc = ProgressBar.TerminalController() - progressBar = ProgressBar.ProgressBar(tc, header) - except ValueError: - print header - progressBar = ProgressBar.SimpleProgressBar('Testing: ') - else: - print header - - # Don't create more threads than tests. - opts.numThreads = min(len(tests), opts.numThreads) - - startTime = time.time() - display = TestingProgressDisplay(opts, len(tests), progressBar) - provider = TestProvider(tests, opts.maxTime) - runTests(opts.numThreads, litConfig, provider, display) - display.finish() - - if not opts.quiet: - print 'Testing Time: %.2fs'%(time.time() - startTime) - - # Update results for any tests which weren't run. - for t in tests: - if t.result is None: - t.setResult(Test.UNRESOLVED, '', 0.0) - - # List test results organized by kind. - hasFailures = False - byCode = {} - for t in tests: - if t.result not in byCode: - byCode[t.result] = [] - byCode[t.result].append(t) - if t.result.isFailure: - hasFailures = True - - # FIXME: Show unresolved and (optionally) unsupported tests. - for title,code in (('Unexpected Passing Tests', Test.XPASS), - ('Failing Tests', Test.FAIL)): - elts = byCode.get(code) - if not elts: - continue - print '*'*20 - print '%s (%d):' % (title, len(elts)) - for t in elts: - print ' %s' % t.getFullName() - print - - if opts.timeTests: - # Collate, in case we repeated tests. - times = {} - for t in tests: - key = t.getFullName() - times[key] = times.get(key, 0.) + t.elapsed - - byTime = list(times.items()) - byTime.sort(key = lambda (name,elapsed): elapsed) - if byTime: - Util.printHistogram(byTime, title='Tests') - - for name,code in (('Expected Passes ', Test.PASS), - ('Expected Failures ', Test.XFAIL), - ('Unsupported Tests ', Test.UNSUPPORTED), - ('Unresolved Tests ', Test.UNRESOLVED), - ('Unexpected Passes ', Test.XPASS), - ('Unexpected Failures', Test.FAIL),): - if opts.quiet and not code.isFailure: - continue - N = len(byCode.get(code,[])) - if N: - print ' %s: %d' % (name,N) - - # If we encountered any additional errors, exit abnormally. - if litConfig.numErrors: - print >>sys.stderr, '\n%d error(s), exiting.' % litConfig.numErrors - sys.exit(2) - - # Warn about warnings. - if litConfig.numWarnings: - print >>sys.stderr, '\n%d warning(s) in tests.' % litConfig.numWarnings - - if hasFailures: - sys.exit(1) - sys.exit(0) - if __name__=='__main__': - # Bump the GIL check interval, its more important to get any one thread to a - # blocking operation (hopefully exec) than to try and unblock other threads. - import sys - sys.setcheckinterval(1000) - main() + import lit + lit.main() diff --git a/utils/lit/ExampleTests.ObjDir/lit.site.cfg b/utils/lit/lit/ExampleTests.ObjDir/lit.site.cfg similarity index 100% rename from utils/lit/ExampleTests.ObjDir/lit.site.cfg rename to utils/lit/lit/ExampleTests.ObjDir/lit.site.cfg diff --git a/utils/lit/ExampleTests/Clang/fsyntax-only.c b/utils/lit/lit/ExampleTests/Clang/fsyntax-only.c similarity index 100% rename from utils/lit/ExampleTests/Clang/fsyntax-only.c rename to utils/lit/lit/ExampleTests/Clang/fsyntax-only.c diff --git a/utils/lit/ExampleTests/Clang/lit.cfg b/utils/lit/lit/ExampleTests/Clang/lit.cfg similarity index 100% rename from utils/lit/ExampleTests/Clang/lit.cfg rename to utils/lit/lit/ExampleTests/Clang/lit.cfg diff --git a/utils/lit/ExampleTests/LLVM.InTree/test/Bar/bar-test.ll b/utils/lit/lit/ExampleTests/LLVM.InTree/test/Bar/bar-test.ll similarity index 100% rename from utils/lit/ExampleTests/LLVM.InTree/test/Bar/bar-test.ll rename to utils/lit/lit/ExampleTests/LLVM.InTree/test/Bar/bar-test.ll diff --git a/utils/lit/ExampleTests/LLVM.InTree/test/Bar/dg.exp b/utils/lit/lit/ExampleTests/LLVM.InTree/test/Bar/dg.exp similarity index 100% rename from utils/lit/ExampleTests/LLVM.InTree/test/Bar/dg.exp rename to utils/lit/lit/ExampleTests/LLVM.InTree/test/Bar/dg.exp diff --git a/utils/lit/ExampleTests/LLVM.InTree/test/lit.cfg b/utils/lit/lit/ExampleTests/LLVM.InTree/test/lit.cfg similarity index 100% rename from utils/lit/ExampleTests/LLVM.InTree/test/lit.cfg rename to utils/lit/lit/ExampleTests/LLVM.InTree/test/lit.cfg diff --git a/utils/lit/ExampleTests/LLVM.InTree/test/lit.site.cfg b/utils/lit/lit/ExampleTests/LLVM.InTree/test/lit.site.cfg similarity index 100% rename from utils/lit/ExampleTests/LLVM.InTree/test/lit.site.cfg rename to utils/lit/lit/ExampleTests/LLVM.InTree/test/lit.site.cfg diff --git a/utils/lit/ExampleTests/LLVM.InTree/test/site.exp b/utils/lit/lit/ExampleTests/LLVM.InTree/test/site.exp similarity index 100% rename from utils/lit/ExampleTests/LLVM.InTree/test/site.exp rename to utils/lit/lit/ExampleTests/LLVM.InTree/test/site.exp diff --git a/utils/lit/ExampleTests/LLVM.OutOfTree/lit.local.cfg b/utils/lit/lit/ExampleTests/LLVM.OutOfTree/lit.local.cfg similarity index 100% rename from utils/lit/ExampleTests/LLVM.OutOfTree/lit.local.cfg rename to utils/lit/lit/ExampleTests/LLVM.OutOfTree/lit.local.cfg diff --git a/utils/lit/ExampleTests/LLVM.OutOfTree/obj/test/Foo/lit.local.cfg b/utils/lit/lit/ExampleTests/LLVM.OutOfTree/obj/test/Foo/lit.local.cfg similarity index 100% rename from utils/lit/ExampleTests/LLVM.OutOfTree/obj/test/Foo/lit.local.cfg rename to utils/lit/lit/ExampleTests/LLVM.OutOfTree/obj/test/Foo/lit.local.cfg diff --git a/utils/lit/ExampleTests/LLVM.OutOfTree/obj/test/lit.site.cfg b/utils/lit/lit/ExampleTests/LLVM.OutOfTree/obj/test/lit.site.cfg similarity index 100% rename from utils/lit/ExampleTests/LLVM.OutOfTree/obj/test/lit.site.cfg rename to utils/lit/lit/ExampleTests/LLVM.OutOfTree/obj/test/lit.site.cfg diff --git a/utils/lit/ExampleTests/LLVM.OutOfTree/obj/test/site.exp b/utils/lit/lit/ExampleTests/LLVM.OutOfTree/obj/test/site.exp similarity index 100% rename from utils/lit/ExampleTests/LLVM.OutOfTree/obj/test/site.exp rename to utils/lit/lit/ExampleTests/LLVM.OutOfTree/obj/test/site.exp diff --git a/utils/lit/ExampleTests/LLVM.OutOfTree/src/test/Foo/data.txt b/utils/lit/lit/ExampleTests/LLVM.OutOfTree/src/test/Foo/data.txt similarity index 100% rename from utils/lit/ExampleTests/LLVM.OutOfTree/src/test/Foo/data.txt rename to utils/lit/lit/ExampleTests/LLVM.OutOfTree/src/test/Foo/data.txt diff --git a/utils/lit/ExampleTests/LLVM.OutOfTree/src/test/Foo/dg.exp b/utils/lit/lit/ExampleTests/LLVM.OutOfTree/src/test/Foo/dg.exp similarity index 100% rename from utils/lit/ExampleTests/LLVM.OutOfTree/src/test/Foo/dg.exp rename to utils/lit/lit/ExampleTests/LLVM.OutOfTree/src/test/Foo/dg.exp diff --git a/utils/lit/ExampleTests/LLVM.OutOfTree/src/test/Foo/pct-S.ll b/utils/lit/lit/ExampleTests/LLVM.OutOfTree/src/test/Foo/pct-S.ll similarity index 100% rename from utils/lit/ExampleTests/LLVM.OutOfTree/src/test/Foo/pct-S.ll rename to utils/lit/lit/ExampleTests/LLVM.OutOfTree/src/test/Foo/pct-S.ll diff --git a/utils/lit/ExampleTests/LLVM.OutOfTree/src/test/lit.cfg b/utils/lit/lit/ExampleTests/LLVM.OutOfTree/src/test/lit.cfg similarity index 100% rename from utils/lit/ExampleTests/LLVM.OutOfTree/src/test/lit.cfg rename to utils/lit/lit/ExampleTests/LLVM.OutOfTree/src/test/lit.cfg diff --git a/utils/lit/ExampleTests/ShExternal/lit.local.cfg b/utils/lit/lit/ExampleTests/ShExternal/lit.local.cfg similarity index 100% rename from utils/lit/ExampleTests/ShExternal/lit.local.cfg rename to utils/lit/lit/ExampleTests/ShExternal/lit.local.cfg diff --git a/utils/lit/ExampleTests/ShInternal/lit.local.cfg b/utils/lit/lit/ExampleTests/ShInternal/lit.local.cfg similarity index 100% rename from utils/lit/ExampleTests/ShInternal/lit.local.cfg rename to utils/lit/lit/ExampleTests/ShInternal/lit.local.cfg diff --git a/utils/lit/ExampleTests/TclTest/lit.local.cfg b/utils/lit/lit/ExampleTests/TclTest/lit.local.cfg similarity index 100% rename from utils/lit/ExampleTests/TclTest/lit.local.cfg rename to utils/lit/lit/ExampleTests/TclTest/lit.local.cfg diff --git a/utils/lit/ExampleTests/TclTest/stderr-pipe.ll b/utils/lit/lit/ExampleTests/TclTest/stderr-pipe.ll similarity index 100% rename from utils/lit/ExampleTests/TclTest/stderr-pipe.ll rename to utils/lit/lit/ExampleTests/TclTest/stderr-pipe.ll diff --git a/utils/lit/ExampleTests/TclTest/tcl-redir-1.ll b/utils/lit/lit/ExampleTests/TclTest/tcl-redir-1.ll similarity index 100% rename from utils/lit/ExampleTests/TclTest/tcl-redir-1.ll rename to utils/lit/lit/ExampleTests/TclTest/tcl-redir-1.ll diff --git a/utils/lit/ExampleTests/fail.c b/utils/lit/lit/ExampleTests/fail.c similarity index 100% rename from utils/lit/ExampleTests/fail.c rename to utils/lit/lit/ExampleTests/fail.c diff --git a/utils/lit/ExampleTests/lit.cfg b/utils/lit/lit/ExampleTests/lit.cfg similarity index 100% rename from utils/lit/ExampleTests/lit.cfg rename to utils/lit/lit/ExampleTests/lit.cfg diff --git a/utils/lit/ExampleTests/pass.c b/utils/lit/lit/ExampleTests/pass.c similarity index 100% rename from utils/lit/ExampleTests/pass.c rename to utils/lit/lit/ExampleTests/pass.c diff --git a/utils/lit/ExampleTests/xfail.c b/utils/lit/lit/ExampleTests/xfail.c similarity index 100% rename from utils/lit/ExampleTests/xfail.c rename to utils/lit/lit/ExampleTests/xfail.c diff --git a/utils/lit/ExampleTests/xpass.c b/utils/lit/lit/ExampleTests/xpass.c similarity index 100% rename from utils/lit/ExampleTests/xpass.c rename to utils/lit/lit/ExampleTests/xpass.c diff --git a/utils/lit/LitConfig.py b/utils/lit/lit/LitConfig.py similarity index 100% rename from utils/lit/LitConfig.py rename to utils/lit/lit/LitConfig.py diff --git a/utils/lit/LitFormats.py b/utils/lit/lit/LitFormats.py similarity index 100% rename from utils/lit/LitFormats.py rename to utils/lit/lit/LitFormats.py diff --git a/utils/lit/ProgressBar.py b/utils/lit/lit/ProgressBar.py similarity index 100% rename from utils/lit/ProgressBar.py rename to utils/lit/lit/ProgressBar.py diff --git a/utils/lit/ShCommands.py b/utils/lit/lit/ShCommands.py similarity index 100% rename from utils/lit/ShCommands.py rename to utils/lit/lit/ShCommands.py diff --git a/utils/lit/ShUtil.py b/utils/lit/lit/ShUtil.py similarity index 100% rename from utils/lit/ShUtil.py rename to utils/lit/lit/ShUtil.py diff --git a/utils/lit/TclUtil.py b/utils/lit/lit/TclUtil.py similarity index 100% rename from utils/lit/TclUtil.py rename to utils/lit/lit/TclUtil.py diff --git a/utils/lit/Test.py b/utils/lit/lit/Test.py similarity index 100% rename from utils/lit/Test.py rename to utils/lit/lit/Test.py diff --git a/utils/lit/TestFormats.py b/utils/lit/lit/TestFormats.py similarity index 100% rename from utils/lit/TestFormats.py rename to utils/lit/lit/TestFormats.py diff --git a/utils/lit/TestRunner.py b/utils/lit/lit/TestRunner.py similarity index 100% rename from utils/lit/TestRunner.py rename to utils/lit/lit/TestRunner.py diff --git a/utils/lit/TestingConfig.py b/utils/lit/lit/TestingConfig.py similarity index 100% rename from utils/lit/TestingConfig.py rename to utils/lit/lit/TestingConfig.py diff --git a/utils/lit/Util.py b/utils/lit/lit/Util.py similarity index 100% rename from utils/lit/Util.py rename to utils/lit/lit/Util.py diff --git a/utils/lit/lit/__init__.py b/utils/lit/lit/__init__.py new file mode 100644 index 00000000000..01026023d29 --- /dev/null +++ b/utils/lit/lit/__init__.py @@ -0,0 +1,10 @@ +"""'lit' Testing Tool""" + +from lit import main + +__author__ = 'Daniel Dunbar' +__email__ = 'daniel@zuster.org' +__versioninfo__ = (0, 1, 0) +__version__ = '.'.join(map(str, __versioninfo__)) + +__all__ = [] diff --git a/utils/lit/lit/lit.py b/utils/lit/lit/lit.py new file mode 100755 index 00000000000..f1f19c4ddae --- /dev/null +++ b/utils/lit/lit/lit.py @@ -0,0 +1,579 @@ +#!/usr/bin/env python + +""" +lit - LLVM Integrated Tester. + +See lit.pod for more information. +""" + +import math, os, platform, random, re, sys, time, threading, traceback + +import ProgressBar +import TestRunner +import Util + +from TestingConfig import TestingConfig +import LitConfig +import Test + +# Configuration files to look for when discovering test suites. These can be +# overridden with --config-prefix. +# +# FIXME: Rename to 'config.lit', 'site.lit', and 'local.lit' ? +gConfigName = 'lit.cfg' +gSiteConfigName = 'lit.site.cfg' + +kLocalConfigName = 'lit.local.cfg' + +class TestingProgressDisplay: + def __init__(self, opts, numTests, progressBar=None): + self.opts = opts + self.numTests = numTests + self.current = None + self.lock = threading.Lock() + self.progressBar = progressBar + self.completed = 0 + + def update(self, test): + # Avoid locking overhead in quiet mode + if self.opts.quiet and not test.result.isFailure: + self.completed += 1 + return + + # Output lock. + self.lock.acquire() + try: + self.handleUpdate(test) + finally: + self.lock.release() + + def finish(self): + if self.progressBar: + self.progressBar.clear() + elif self.opts.quiet: + pass + elif self.opts.succinct: + sys.stdout.write('\n') + + def handleUpdate(self, test): + self.completed += 1 + if self.progressBar: + self.progressBar.update(float(self.completed)/self.numTests, + test.getFullName()) + + if self.opts.succinct and not test.result.isFailure: + return + + if self.progressBar: + self.progressBar.clear() + + print '%s: %s (%d of %d)' % (test.result.name, test.getFullName(), + self.completed, self.numTests) + + if test.result.isFailure and self.opts.showOutput: + print "%s TEST '%s' FAILED %s" % ('*'*20, test.getFullName(), + '*'*20) + print test.output + print "*" * 20 + + sys.stdout.flush() + +class TestProvider: + def __init__(self, tests, maxTime): + self.maxTime = maxTime + self.iter = iter(tests) + self.lock = threading.Lock() + self.startTime = time.time() + + def get(self): + # Check if we have run out of time. + if self.maxTime is not None: + if time.time() - self.startTime > self.maxTime: + return None + + # Otherwise take the next test. + self.lock.acquire() + try: + item = self.iter.next() + except StopIteration: + item = None + self.lock.release() + return item + +class Tester(threading.Thread): + def __init__(self, litConfig, provider, display): + threading.Thread.__init__(self) + self.litConfig = litConfig + self.provider = provider + self.display = display + + def run(self): + while 1: + item = self.provider.get() + if item is None: + break + self.runTest(item) + + def runTest(self, test): + result = None + startTime = time.time() + try: + result, output = test.config.test_format.execute(test, + self.litConfig) + except KeyboardInterrupt: + # This is a sad hack. Unfortunately subprocess goes + # bonkers with ctrl-c and we start forking merrily. + print '\nCtrl-C detected, goodbye.' + os.kill(0,9) + except: + if self.litConfig.debug: + raise + result = Test.UNRESOLVED + output = 'Exception during script execution:\n' + output += traceback.format_exc() + output += '\n' + elapsed = time.time() - startTime + + test.setResult(result, output, elapsed) + self.display.update(test) + +def dirContainsTestSuite(path): + cfgpath = os.path.join(path, gSiteConfigName) + if os.path.exists(cfgpath): + return cfgpath + cfgpath = os.path.join(path, gConfigName) + if os.path.exists(cfgpath): + return cfgpath + +def getTestSuite(item, litConfig, cache): + """getTestSuite(item, litConfig, cache) -> (suite, relative_path) + + Find the test suite containing @arg item. + + @retval (None, ...) - Indicates no test suite contains @arg item. + @retval (suite, relative_path) - The suite that @arg item is in, and its + relative path inside that suite. + """ + def search1(path): + # Check for a site config or a lit config. + cfgpath = dirContainsTestSuite(path) + + # If we didn't find a config file, keep looking. + if not cfgpath: + parent,base = os.path.split(path) + if parent == path: + return (None, ()) + + ts, relative = search(parent) + return (ts, relative + (base,)) + + # We found a config file, load it. + if litConfig.debug: + litConfig.note('loading suite config %r' % cfgpath) + + cfg = TestingConfig.frompath(cfgpath, None, litConfig, mustExist = True) + source_root = os.path.realpath(cfg.test_source_root or path) + exec_root = os.path.realpath(cfg.test_exec_root or path) + return Test.TestSuite(cfg.name, source_root, exec_root, cfg), () + + def search(path): + # Check for an already instantiated test suite. + res = cache.get(path) + if res is None: + cache[path] = res = search1(path) + return res + + # Canonicalize the path. + item = os.path.realpath(item) + + # Skip files and virtual components. + components = [] + while not os.path.isdir(item): + parent,base = os.path.split(item) + if parent == item: + return (None, ()) + components.append(base) + item = parent + components.reverse() + + ts, relative = search(item) + return ts, tuple(relative + tuple(components)) + +def getLocalConfig(ts, path_in_suite, litConfig, cache): + def search1(path_in_suite): + # Get the parent config. + if not path_in_suite: + parent = ts.config + else: + parent = search(path_in_suite[:-1]) + + # Load the local configuration. + source_path = ts.getSourcePath(path_in_suite) + cfgpath = os.path.join(source_path, kLocalConfigName) + if litConfig.debug: + litConfig.note('loading local config %r' % cfgpath) + return TestingConfig.frompath(cfgpath, parent, litConfig, + mustExist = False, + config = parent.clone(cfgpath)) + + def search(path_in_suite): + key = (ts, path_in_suite) + res = cache.get(key) + if res is None: + cache[key] = res = search1(path_in_suite) + return res + + return search(path_in_suite) + +def getTests(path, litConfig, testSuiteCache, localConfigCache): + # Find the test suite for this input and its relative path. + ts,path_in_suite = getTestSuite(path, litConfig, testSuiteCache) + if ts is None: + litConfig.warning('unable to find test suite for %r' % path) + return (),() + + if litConfig.debug: + litConfig.note('resolved input %r to %r::%r' % (path, ts.name, + path_in_suite)) + + return ts, getTestsInSuite(ts, path_in_suite, litConfig, + testSuiteCache, localConfigCache) + +def getTestsInSuite(ts, path_in_suite, litConfig, + testSuiteCache, localConfigCache): + # Check that the source path exists (errors here are reported by the + # caller). + source_path = ts.getSourcePath(path_in_suite) + if not os.path.exists(source_path): + return + + # Check if the user named a test directly. + if not os.path.isdir(source_path): + lc = getLocalConfig(ts, path_in_suite[:-1], litConfig, localConfigCache) + yield Test.Test(ts, path_in_suite, lc) + return + + # Otherwise we have a directory to search for tests, start by getting the + # local configuration. + lc = getLocalConfig(ts, path_in_suite, litConfig, localConfigCache) + + # Search for tests. + for res in lc.test_format.getTestsInDirectory(ts, path_in_suite, + litConfig, lc): + yield res + + # Search subdirectories. + for filename in os.listdir(source_path): + # FIXME: This doesn't belong here? + if filename in ('Output', '.svn') or filename in lc.excludes: + continue + + # Ignore non-directories. + file_sourcepath = os.path.join(source_path, filename) + if not os.path.isdir(file_sourcepath): + continue + + # Check for nested test suites, first in the execpath in case there is a + # site configuration and then in the source path. + file_execpath = ts.getExecPath(path_in_suite + (filename,)) + if dirContainsTestSuite(file_execpath): + sub_ts, subiter = getTests(file_execpath, litConfig, + testSuiteCache, localConfigCache) + elif dirContainsTestSuite(file_sourcepath): + sub_ts, subiter = getTests(file_sourcepath, litConfig, + testSuiteCache, localConfigCache) + else: + # Otherwise, continue loading from inside this test suite. + subiter = getTestsInSuite(ts, path_in_suite + (filename,), + litConfig, testSuiteCache, + localConfigCache) + sub_ts = None + + N = 0 + for res in subiter: + N += 1 + yield res + if sub_ts and not N: + litConfig.warning('test suite %r contained no tests' % sub_ts.name) + +def runTests(numThreads, litConfig, provider, display): + # If only using one testing thread, don't use threads at all; this lets us + # profile, among other things. + if numThreads == 1: + t = Tester(litConfig, provider, display) + t.run() + return + + # Otherwise spin up the testing threads and wait for them to finish. + testers = [Tester(litConfig, provider, display) + for i in range(numThreads)] + for t in testers: + t.start() + try: + for t in testers: + t.join() + except KeyboardInterrupt: + sys.exit(2) + +def main(): + # Bump the GIL check interval, its more important to get any one thread to a + # blocking operation (hopefully exec) than to try and unblock other threads. + # + # FIXME: This is a hack. + import sys + sys.setcheckinterval(1000) + + global options + from optparse import OptionParser, OptionGroup + parser = OptionParser("usage: %prog [options] {file-or-path}") + + parser.add_option("-j", "--threads", dest="numThreads", metavar="N", + help="Number of testing threads", + type=int, action="store", default=None) + parser.add_option("", "--config-prefix", dest="configPrefix", + metavar="NAME", help="Prefix for 'lit' config files", + action="store", default=None) + parser.add_option("", "--param", dest="userParameters", + metavar="NAME=VAL", + help="Add 'NAME' = 'VAL' to the user defined parameters", + type=str, action="append", default=[]) + + group = OptionGroup(parser, "Output Format") + # FIXME: I find these names very confusing, although I like the + # functionality. + group.add_option("-q", "--quiet", dest="quiet", + help="Suppress no error output", + action="store_true", default=False) + group.add_option("-s", "--succinct", dest="succinct", + help="Reduce amount of output", + action="store_true", default=False) + group.add_option("-v", "--verbose", dest="showOutput", + help="Show all test output", + action="store_true", default=False) + group.add_option("", "--no-progress-bar", dest="useProgressBar", + help="Do not use curses based progress bar", + action="store_false", default=True) + parser.add_option_group(group) + + group = OptionGroup(parser, "Test Execution") + group.add_option("", "--path", dest="path", + help="Additional paths to add to testing environment", + action="append", type=str, default=[]) + group.add_option("", "--vg", dest="useValgrind", + help="Run tests under valgrind", + action="store_true", default=False) + group.add_option("", "--vg-arg", dest="valgrindArgs", metavar="ARG", + help="Specify an extra argument for valgrind", + type=str, action="append", default=[]) + group.add_option("", "--time-tests", dest="timeTests", + help="Track elapsed wall time for each test", + action="store_true", default=False) + group.add_option("", "--no-execute", dest="noExecute", + help="Don't execute any tests (assume PASS)", + action="store_true", default=False) + parser.add_option_group(group) + + group = OptionGroup(parser, "Test Selection") + group.add_option("", "--max-tests", dest="maxTests", metavar="N", + help="Maximum number of tests to run", + action="store", type=int, default=None) + group.add_option("", "--max-time", dest="maxTime", metavar="N", + help="Maximum time to spend testing (in seconds)", + action="store", type=float, default=None) + group.add_option("", "--shuffle", dest="shuffle", + help="Run tests in random order", + action="store_true", default=False) + parser.add_option_group(group) + + group = OptionGroup(parser, "Debug and Experimental Options") + group.add_option("", "--debug", dest="debug", + help="Enable debugging (for 'lit' development)", + action="store_true", default=False) + group.add_option("", "--show-suites", dest="showSuites", + help="Show discovered test suites", + action="store_true", default=False) + group.add_option("", "--no-tcl-as-sh", dest="useTclAsSh", + help="Don't run Tcl scripts using 'sh'", + action="store_false", default=True) + group.add_option("", "--repeat", dest="repeatTests", metavar="N", + help="Repeat tests N times (for timing)", + action="store", default=None, type=int) + parser.add_option_group(group) + + (opts, args) = parser.parse_args() + + if not args: + parser.error('No inputs specified') + + if opts.configPrefix is not None: + global gConfigName, gSiteConfigName + gConfigName = '%s.cfg' % opts.configPrefix + gSiteConfigName = '%s.site.cfg' % opts.configPrefix + + if opts.numThreads is None: + opts.numThreads = Util.detectCPUs() + + inputs = args + + # Create the user defined parameters. + userParams = {} + for entry in opts.userParameters: + if '=' not in entry: + name,val = entry,'' + else: + name,val = entry.split('=', 1) + userParams[name] = val + + # Create the global config object. + litConfig = LitConfig.LitConfig(progname = os.path.basename(sys.argv[0]), + path = opts.path, + quiet = opts.quiet, + useValgrind = opts.useValgrind, + valgrindArgs = opts.valgrindArgs, + useTclAsSh = opts.useTclAsSh, + noExecute = opts.noExecute, + debug = opts.debug, + isWindows = (platform.system()=='Windows'), + params = userParams) + + # Load the tests from the inputs. + tests = [] + testSuiteCache = {} + localConfigCache = {} + for input in inputs: + prev = len(tests) + tests.extend(getTests(input, litConfig, + testSuiteCache, localConfigCache)[1]) + if prev == len(tests): + litConfig.warning('input %r contained no tests' % input) + + # If there were any errors during test discovery, exit now. + if litConfig.numErrors: + print >>sys.stderr, '%d errors, exiting.' % litConfig.numErrors + sys.exit(2) + + if opts.showSuites: + suitesAndTests = dict([(ts,[]) + for ts,_ in testSuiteCache.values() + if ts]) + for t in tests: + suitesAndTests[t.suite].append(t) + + print '-- Test Suites --' + suitesAndTests = suitesAndTests.items() + suitesAndTests.sort(key = lambda (ts,_): ts.name) + for ts,ts_tests in suitesAndTests: + print ' %s - %d tests' %(ts.name, len(ts_tests)) + print ' Source Root: %s' % ts.source_root + print ' Exec Root : %s' % ts.exec_root + + # Select and order the tests. + numTotalTests = len(tests) + if opts.shuffle: + random.shuffle(tests) + else: + tests.sort(key = lambda t: t.getFullName()) + if opts.maxTests is not None: + tests = tests[:opts.maxTests] + + extra = '' + if len(tests) != numTotalTests: + extra = ' of %d' % numTotalTests + header = '-- Testing: %d%s tests, %d threads --'%(len(tests),extra, + opts.numThreads) + + if opts.repeatTests: + tests = [t.copyWithIndex(i) + for t in tests + for i in range(opts.repeatTests)] + + progressBar = None + if not opts.quiet: + if opts.succinct and opts.useProgressBar: + try: + tc = ProgressBar.TerminalController() + progressBar = ProgressBar.ProgressBar(tc, header) + except ValueError: + print header + progressBar = ProgressBar.SimpleProgressBar('Testing: ') + else: + print header + + # Don't create more threads than tests. + opts.numThreads = min(len(tests), opts.numThreads) + + startTime = time.time() + display = TestingProgressDisplay(opts, len(tests), progressBar) + provider = TestProvider(tests, opts.maxTime) + runTests(opts.numThreads, litConfig, provider, display) + display.finish() + + if not opts.quiet: + print 'Testing Time: %.2fs'%(time.time() - startTime) + + # Update results for any tests which weren't run. + for t in tests: + if t.result is None: + t.setResult(Test.UNRESOLVED, '', 0.0) + + # List test results organized by kind. + hasFailures = False + byCode = {} + for t in tests: + if t.result not in byCode: + byCode[t.result] = [] + byCode[t.result].append(t) + if t.result.isFailure: + hasFailures = True + + # FIXME: Show unresolved and (optionally) unsupported tests. + for title,code in (('Unexpected Passing Tests', Test.XPASS), + ('Failing Tests', Test.FAIL)): + elts = byCode.get(code) + if not elts: + continue + print '*'*20 + print '%s (%d):' % (title, len(elts)) + for t in elts: + print ' %s' % t.getFullName() + print + + if opts.timeTests: + # Collate, in case we repeated tests. + times = {} + for t in tests: + key = t.getFullName() + times[key] = times.get(key, 0.) + t.elapsed + + byTime = list(times.items()) + byTime.sort(key = lambda (name,elapsed): elapsed) + if byTime: + Util.printHistogram(byTime, title='Tests') + + for name,code in (('Expected Passes ', Test.PASS), + ('Expected Failures ', Test.XFAIL), + ('Unsupported Tests ', Test.UNSUPPORTED), + ('Unresolved Tests ', Test.UNRESOLVED), + ('Unexpected Passes ', Test.XPASS), + ('Unexpected Failures', Test.FAIL),): + if opts.quiet and not code.isFailure: + continue + N = len(byCode.get(code,[])) + if N: + print ' %s: %d' % (name,N) + + # If we encountered any additional errors, exit abnormally. + if litConfig.numErrors: + print >>sys.stderr, '\n%d error(s), exiting.' % litConfig.numErrors + sys.exit(2) + + # Warn about warnings. + if litConfig.numWarnings: + print >>sys.stderr, '\n%d warning(s) in tests.' % litConfig.numWarnings + + if hasFailures: + sys.exit(1) + sys.exit(0) + +if __name__=='__main__': + main()