forked from premiere/premiere-libtorrent
improve regression testing framework
This commit is contained in:
parent
2b5892289f
commit
6ac8b9e006
|
@ -1,9 +1,29 @@
|
|||
# all features are built in these directories
|
||||
test_dirs:
|
||||
- examples
|
||||
- tools
|
||||
- test
|
||||
|
||||
features:
|
||||
- variant=release asserts=production debug-symbols=on
|
||||
- encryption=openssl statistics=on logging=verbose disk-stats=on dht=logging request-log=on allocator=debug debug-iterators=on
|
||||
|
||||
# all build_features are built in these directories
|
||||
build_dirs:
|
||||
- tools
|
||||
- examples
|
||||
- bindings/python
|
||||
|
||||
build_features:
|
||||
- ipv6=off
|
||||
- deprecated-functions=off
|
||||
- logging=verbose
|
||||
- statistics=on
|
||||
- asserts=off
|
||||
- asserts=production debug
|
||||
- asserts=on release
|
||||
- asserts=off invariant-checks=off
|
||||
- invariant-checks=on release
|
||||
- ipv6=off dht=off extensions=off logging=none deprecated-functions=off invariant-checks=off
|
||||
|
||||
project: libtorrent
|
||||
|
||||
branch: trunk
|
||||
|
@ -13,8 +33,3 @@ clean:
|
|||
|
||||
time_limit: 180
|
||||
|
||||
features:
|
||||
- variant=release asserts=production invariant-checks=off debug-iterators=off debug-symbols=on
|
||||
- encryption=openssl statistics=on logging=verbose disk-stats=on dht=logging request-log=on allocator=debug
|
||||
- ipv6=off dht=off extensions=off logging=none deprecated-functions=off
|
||||
|
||||
|
|
|
@ -171,6 +171,6 @@ int main()
|
|||
}
|
||||
#endif
|
||||
|
||||
return ret;
|
||||
return ret ? 333 : 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -38,7 +38,7 @@ import glob
|
|||
import json
|
||||
|
||||
# TODO: different parsers could be run on output from different actions
|
||||
# if we would use the xml output in stead of stdout/stderr
|
||||
# if we would use the xml output instead of stdout/stderr
|
||||
def style_output(logfile, outfile):
|
||||
subtle = False
|
||||
for l in logfile.split('\n'):
|
||||
|
@ -59,9 +59,14 @@ def style_output(logfile, outfile):
|
|||
print >>outfile, '<span class="test-error">%s</span>' % l
|
||||
elif '**passed**' in l:
|
||||
print >>outfile, '<span class="test-pass">%s</span>' % l
|
||||
elif ': error: ' in l or ': fatal error: ' in l or ' : fatal error ' in l or \
|
||||
'failed to write output file' in l or ') : error C' in l or \
|
||||
' : error LNK' in l or ': undefined reference to ' in l:
|
||||
elif ': error: ' in l or \
|
||||
';1;31merror: ' in l or \
|
||||
': fatal error: ' in l or \
|
||||
' : fatal error ' in l or \
|
||||
'failed to write output file' in l or \
|
||||
') : error C' in l or \
|
||||
' : error LNK' in l or \
|
||||
': undefined reference to ' in l:
|
||||
print >>outfile, '<span class="compile-error">%s</span>' % l
|
||||
elif ': warning: ' in l or ') : warning C' in l or \
|
||||
'Uninitialised value was created by a' in l or \
|
||||
|
@ -221,6 +226,10 @@ for branch_name in revs:
|
|||
print >>html, '''<html><head><title>regression tests, %s</title><style type="text/css">
|
||||
.passed { display: block; width: 6px; height: 1em; background-color: #6f8 }
|
||||
.failed { display: block; width: 6px; height: 1em; background-color: #f68 }
|
||||
.crash { display: block; width: 6px; height: 1em; background-color: #f08 }
|
||||
.compile-failed { display: block; width: 6px; height: 1em; background-color: #000 }
|
||||
.timeout { display: block; width: 6px; height: 1em; background-color: #86f }
|
||||
.valgrind-error { display: block; width: 6px; height: 1em; background-color: #f80 }
|
||||
table { border: 0; border-collapse: collapse; }
|
||||
h1 { font-size: 15pt; }
|
||||
th { font-size: 8pt; }
|
||||
|
@ -243,8 +252,13 @@ for branch_name in revs:
|
|||
|
||||
print >>html, '<tr><th colspan="2" style="border:0;">revision %d</th>' % r
|
||||
|
||||
for f in tests:
|
||||
print >>html, '<th colspan="%d" style="width: %dpx;">%s</th>' % (len(tests[f]), len(tests[f])*6 - 5, f)
|
||||
features = tests.keys()
|
||||
features = sorted(features, key=lambda x: len(tests[x]))
|
||||
|
||||
for f in features:
|
||||
title = f
|
||||
if len(tests[f]) < 10: title = '#'
|
||||
print >>html, '<th colspan="%d" style="width: %dpx;">%s</th>' % (len(tests[f]), len(tests[f])*6 - 5, title)
|
||||
print >>html, '</tr>'
|
||||
|
||||
for p in platforms:
|
||||
|
@ -261,26 +275,46 @@ for branch_name in revs:
|
|||
print >>details_file, '''<html><head><title>%s %s [%s]</title><style type="text/css">
|
||||
.passed { background-color: #6f8 }
|
||||
.failed { background-color: #f68 }
|
||||
.crash { background-color: #f08 }
|
||||
.compile-failed { background-color: #000 }
|
||||
.timeout { background-color: #86f }
|
||||
.valgrind-error { background-color: #f80 }
|
||||
table { border: 0; border-collapse: collapse; display: inline-block; }
|
||||
th { font-size: 15pt; width: 18em; }
|
||||
td { border: 0; border-spacing: 0px; padding: 1px 0px 0px 1px; }
|
||||
</style>
|
||||
</head><body>''' % (p, toolset, branch_name)
|
||||
print >>html, '<th class="left-head"><a href="%s">%s</a></th>' % (details_name, toolset)
|
||||
for f in platforms[p][toolset]:
|
||||
print >>details_file, '<table><tr><th>%s</th></tr>' % f
|
||||
|
||||
for f in features:
|
||||
title = f
|
||||
if len(tests[f]) < 10: title = '#'
|
||||
|
||||
print >>details_file, '<table><tr><th>%s</th></tr>' % title
|
||||
for t in platforms[p][toolset][f]:
|
||||
details = platforms[p][toolset][f][t]
|
||||
exitcode = details['status']
|
||||
if exitcode == 0: c = 'passed'
|
||||
else: c = 'failed'
|
||||
error_state = '%d' % exitcode
|
||||
if exitcode == 222:
|
||||
|
||||
if exitcode == 0:
|
||||
error_state = 'passed'
|
||||
c = 'passed'
|
||||
elif exitcode == 222:
|
||||
error_state = 'valgrind error'
|
||||
elif exitcode == 139:
|
||||
c = 'valgrind-error'
|
||||
elif exitcode == 139 or \
|
||||
exitcode == 138:
|
||||
error_state = 'crash'
|
||||
c = 'crash'
|
||||
elif exitcode == -1073740777:
|
||||
error_state = 'timeout'
|
||||
c = 'timeout'
|
||||
elif exitcode == 333:
|
||||
error_code = 'test-failed'
|
||||
c = 'failed'
|
||||
else:
|
||||
error_state = 'compile-failed (%d)' % exitcode
|
||||
c = 'compile-failed'
|
||||
|
||||
log_name = os.path.join('logs-%s-%d' % (branch_name, r), p + '~' + toolset + '~' + t + '~' + f.replace(' ', '.') + '.html')
|
||||
print >>html, '<td title="%s %s"><a class="%s" href="%s"></a></td>' % (t, f, c, log_name)
|
||||
print >>details_file, '<tr><td class="%s"><a href="%s">%s [%s]</a></td></tr>' % (c, os.path.split(log_name)[1], t, error_state)
|
||||
|
|
|
@ -104,7 +104,8 @@ def run_tests(toolset, tests, features, options, test_dir, time_limit):
|
|||
# works for actual unit tests
|
||||
if 'launcher=valgrind' in options_copy:
|
||||
options_copy.remove('launcher=valgrind')
|
||||
cmdline = ['bjam', '--out-xml=%s' % xml_file, '-l%d' % time_limit, '-q', '--abbreviate-paths', toolset] + options_copy + feature_list
|
||||
cmdline = ['bjam', '--out-xml=%s' % xml_file, '-l%d' % time_limit, \
|
||||
'-q', '--abbreviate-paths', toolset] + options_copy + feature_list
|
||||
|
||||
# print ' '.join(cmdline)
|
||||
|
||||
|
@ -170,10 +171,11 @@ def main(argv):
|
|||
|
||||
toolsets = []
|
||||
|
||||
num_processes = 4
|
||||
num_processes = 2
|
||||
incremental = False
|
||||
|
||||
test_dirs = []
|
||||
build_dirs = []
|
||||
configs = []
|
||||
options = ['boost=source', 'preserve-test-targets=on']
|
||||
time_limit = 1200
|
||||
|
@ -217,8 +219,14 @@ def main(argv):
|
|||
if 'test_dirs' in cfg:
|
||||
for d in cfg['test_dirs']:
|
||||
test_dirs.append(os.path.abspath(d))
|
||||
else:
|
||||
print 'no test directory specified by .regression.yml'
|
||||
|
||||
if 'build_dirs' in cfg:
|
||||
for d in cfg['build_dirs']:
|
||||
build_dirs.append(os.path.abspath(d))
|
||||
test_dirs.append(os.path.abspath(d))
|
||||
|
||||
if len(build_dirs) == 0 and len(test_dirs) == 0:
|
||||
print 'no test or build directory specified by .regression.yml'
|
||||
sys.exit(1)
|
||||
|
||||
configs = []
|
||||
|
@ -228,6 +236,11 @@ def main(argv):
|
|||
else:
|
||||
configs = ['']
|
||||
|
||||
build_configs = []
|
||||
if 'build_features' in cfg:
|
||||
for d in cfg['build_features']:
|
||||
build_configs.append(d)
|
||||
|
||||
clean_files = []
|
||||
if 'clean' in cfg:
|
||||
clean_files = cfg['clean']
|
||||
|
@ -241,7 +254,7 @@ def main(argv):
|
|||
|
||||
# it takes a bit longer to run in valgrind
|
||||
if 'launcher=valgrind' in options:
|
||||
time_limit *= 6
|
||||
time_limit *= 7
|
||||
|
||||
architecture = platform.machine()
|
||||
build_platform = platform.system() + '-' + platform.release()
|
||||
|
@ -250,7 +263,7 @@ def main(argv):
|
|||
|
||||
timestamp = datetime.now()
|
||||
|
||||
# tester_pool = Pool(processes=num_processes)
|
||||
tester_pool = Pool(processes=num_processes)
|
||||
|
||||
print '%s-%d - %s - %s' % (branch_name, revision, author, timestamp)
|
||||
|
||||
|
@ -292,18 +305,22 @@ def main(argv):
|
|||
if not toolset in results: results[toolset] = {}
|
||||
toolset_found = False
|
||||
|
||||
# futures = []
|
||||
# for features in configs:
|
||||
# futures.append(tester_pool.apply_async(run_tests, [toolset, tests, features, options, test_dir, time_limit]))
|
||||
additional_configs = []
|
||||
if test_dir in build_dirs:
|
||||
additional_configs = build_configs
|
||||
|
||||
# for future in futures:
|
||||
# (compiler, r) = future.get()
|
||||
# results[toolset].update(r)
|
||||
futures = []
|
||||
for features in configs + additional_configs:
|
||||
futures.append(tester_pool.apply_async(run_tests, [toolset, tests, features, options, test_dir, time_limit]))
|
||||
|
||||
for features in configs:
|
||||
(compiler, r) = run_tests(toolset, tests, features, options, test_dir, time_limit)
|
||||
for future in futures:
|
||||
(compiler, r) = future.get()
|
||||
results[toolset].update(r)
|
||||
|
||||
# for features in configs + additional_configs:
|
||||
# (compiler, r) = run_tests(toolset, tests, features, options, test_dir, time_limit)
|
||||
# results[toolset].update(r)
|
||||
|
||||
print ''
|
||||
|
||||
if len(clean_files) > 0:
|
||||
|
|
Loading…
Reference in New Issue