mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2025-07-29 10:25:12 +00:00
[lit] Add support for attach arbitrary metrics to test results.
- This is a work-in-progress and all details are subject to change, but I am trying to build up support for allowing lit to be used as a driver for performance tests (or other tests which might want to record information beyond simple PASS/FAIL). git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@190535 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
44
utils/lit/tests/Inputs/test-data/lit.cfg
Normal file
44
utils/lit/tests/Inputs/test-data/lit.cfg
Normal file
@@ -0,0 +1,44 @@
|
||||
import os
|
||||
try:
|
||||
import ConfigParser
|
||||
except ImportError:
|
||||
import configparser as ConfigParser
|
||||
|
||||
import lit.formats
|
||||
import lit.Test
|
||||
|
||||
class DummyFormat(lit.formats.FileBasedTest):
|
||||
def execute(self, test, lit_config):
|
||||
# In this dummy format, expect that each test file is actually just a
|
||||
# .ini format dump of the results to report.
|
||||
|
||||
source_path = test.getSourcePath()
|
||||
|
||||
cfg = ConfigParser.ConfigParser()
|
||||
cfg.read(source_path)
|
||||
|
||||
# Create the basic test result.
|
||||
result_code = cfg.get('global', 'result_code')
|
||||
result_output = cfg.get('global', 'result_output')
|
||||
result = lit.Test.Result(getattr(lit.Test, result_code),
|
||||
result_output)
|
||||
|
||||
# Load additional metrics.
|
||||
for key,value_str in cfg.items('results'):
|
||||
value = eval(value_str)
|
||||
if isinstance(value, int):
|
||||
metric = lit.Test.IntMetricValue(value)
|
||||
elif isinstance(value, float):
|
||||
metric = lit.Test.RealMetricValue(value)
|
||||
else:
|
||||
raise RuntimeError("unsupported result type")
|
||||
result.addMetric(key, metric)
|
||||
|
||||
return result
|
||||
|
||||
config.name = 'test-data'
|
||||
config.suffixes = ['.ini']
|
||||
config.test_format = DummyFormat()
|
||||
config.test_source_root = None
|
||||
config.test_exec_root = None
|
||||
config.target_triple = None
|
7
utils/lit/tests/Inputs/test-data/metrics.ini
Normal file
7
utils/lit/tests/Inputs/test-data/metrics.ini
Normal file
@@ -0,0 +1,7 @@
|
||||
[global]
|
||||
result_code = PASS
|
||||
result_output = 'Test passed.'
|
||||
|
||||
[results]
|
||||
value0 = 1
|
||||
value1 = 2.3456
|
12
utils/lit/tests/test-data.py
Normal file
12
utils/lit/tests/test-data.py
Normal file
@@ -0,0 +1,12 @@
|
||||
# Test features related to formats which support reporting additional test data.
|
||||
|
||||
# RUN: %{lit} -j 1 -v %{inputs}/test-data > %t.out
|
||||
# RUN: FileCheck < %t.out %s
|
||||
|
||||
# CHECK: -- Testing:
|
||||
|
||||
# CHECK: PASS: test-data :: metrics.ini
|
||||
# CHECK-NEXT: *** TEST 'test-data :: metrics.ini' RESULTS ***
|
||||
# CHECK-NEXT: value0: 1
|
||||
# CHECK-NEXT: value1: 2.3456
|
||||
# CHECK-NEXT: ***
|
Reference in New Issue
Block a user