[lit] Add an --output option, for writing results in a machine readable form.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@190738 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Daniel Dunbar 2013-09-14 01:19:17 +00:00
parent e94e0984df
commit 2849503ab2
5 changed files with 96 additions and 3 deletions

View File

@ -35,6 +35,21 @@ UNSUPPORTED = ResultCode('UNSUPPORTED', False)
class MetricValue(object):
def format(self):
"""
format() -> str
Convert this metric to a string suitable for displaying as part of the
console output.
"""
raise RuntimeError("abstract method")
def todata(self):
"""
todata() -> json-serializable data
Convert this metric to content suitable for serializing in the JSON test
output.
"""
raise RuntimeError("abstract method")
class IntMetricValue(MetricValue):
@ -44,6 +59,9 @@ class IntMetricValue(MetricValue):
def format(self):
return str(self.value)
def todata(self):
return self.value
class RealMetricValue(MetricValue):
def __init__(self, value):
self.value = value
@ -51,6 +69,9 @@ class RealMetricValue(MetricValue):
def format(self):
return '%.4f' % self.value
def todata(self):
return self.value
# Test results.
class Result(object):

View File

@ -69,6 +69,45 @@ class TestingProgressDisplay(object):
# Ensure the output is flushed.
sys.stdout.flush()
def write_test_results(run, lit_config, testing_time, output_path):
try:
import json
except ImportError:
lit_config.fatal('test output unsupported with Python 2.5')
# Construct the data we will write.
data = {}
# Encode the current lit version as a schema version.
data['__version__'] = lit.__versioninfo__
data['elapsed'] = testing_time
# FIXME: Record some information on the lit configuration used?
# FIXME: Record information from the individual test suites?
# Encode the tests.
data['tests'] = tests_data = []
for test in run.tests:
test_data = {
'name' : test.getFullName(),
'code' : test.result.code.name,
'output' : test.result.output,
'elapsed' : test.result.elapsed }
# Add test metrics, if present.
if test.result.metrics:
test_data['metrics'] = metrics_data = {}
for key, value in test.result.metrics.items():
metrics_data[key] = value.todata()
tests_data.append(test_data)
# Write the output.
f = open(output_path, 'w')
try:
json.dump(data, f, indent=2, sort_keys=True)
f.write('\n')
finally:
f.close()
def main(builtinParameters = {}):
# Bump the GIL check interval, its more important to get any one thread to a
# blocking operation (hopefully exec) than to try and unblock other threads.
@ -103,6 +142,9 @@ def main(builtinParameters = {}):
group.add_option("-v", "--verbose", dest="showOutput",
help="Show all test output",
action="store_true", default=False)
group.add_option("-o", "--output", dest="output_path",
help="Write test results to the provided path",
action="store", type=str, metavar="PATH")
group.add_option("", "--no-progress-bar", dest="useProgressBar",
help="Do not use curses based progress bar",
action="store_false", default=True)
@ -289,8 +331,13 @@ def main(builtinParameters = {}):
sys.exit(2)
display.finish()
testing_time = time.time() - startTime
if not opts.quiet:
print('Testing Time: %.2fs'%(time.time() - startTime))
print('Testing Time: %.2fs' % (testing_time,))
# Write out the test data, if requested.
if opts.output_path is not None:
write_test_results(run, litConfig, testing_time, opts.output_path)
# List test results organized by kind.
hasFailures = False

View File

@ -1,6 +1,6 @@
[global]
result_code = PASS
result_output = 'Test passed.'
result_output = Test passed.
[results]
value0 = 1

View File

@ -23,7 +23,7 @@ config.excludes = ['Inputs']
config.test_source_root = os.path.dirname(__file__)
config.test_exec_root = config.test_source_root
config.target_triple = None
config.target_triple = '(unused)'
src_root = os.path.join(config.test_source_root, '..')
config.environment['PYTHONPATH'] = src_root
@ -39,3 +39,7 @@ config.substitutions.append(('%{python}', sys.executable))
if lit_config.params.get('check-coverage', None):
config.environment['COVERAGE_PROCESS_START'] = os.path.join(
os.path.dirname(__file__), ".coveragerc")
# Add a feature to detect the Python version.
config.available_features.add("python%d.%d" % (sys.version_info[0],
sys.version_info[1]))

View File

@ -0,0 +1,21 @@
# XFAIL: python2.5
# RUN: %{lit} -j 1 -v %{inputs}/test-data --output %t.results.out > %t.out
# RUN: FileCheck < %t.results.out %s
# CHECK: {
# CHECK: "__version__"
# CHECK: "elapsed"
# CHECK-NEXT: "tests": [
# CHECK-NEXT: {
# CHECK-NEXT: "code": "PASS",
# CHECK-NEXT: "elapsed": {{[0-9.]+}},
# CHECK-NEXT: "metrics": {
# CHECK-NEXT: "value0": 1,
# CHECK-NEXT: "value1": 2.3456
# CHECK-NEXT: }
# CHECK-NEXT: "name": "test-data :: metrics.ini",
# CHECK-NEXT: "output": "Test passed."
# CHECK-NEXT: }
# CHECK-NEXT: ]
# CHECK-NEXT: }