improve regression testing framework

This commit is contained in:
Arvid Norberg 2014-03-16 23:55:18 +00:00
parent 2b5892289f
commit 6ac8b9e006
4 changed files with 101 additions and 35 deletions

View File

@ -1,9 +1,29 @@
# all features are built in these directories
test_dirs: test_dirs:
- examples
- tools
- test - test
features:
- variant=release asserts=production debug-symbols=on
- encryption=openssl statistics=on logging=verbose disk-stats=on dht=logging request-log=on allocator=debug debug-iterators=on
# all build_features are built in these directories
build_dirs:
- tools
- examples
- bindings/python - bindings/python
build_features:
- ipv6=off
- deprecated-functions=off
- logging=verbose
- statistics=on
- asserts=off
- asserts=production debug
- asserts=on release
- asserts=off invariant-checks=off
- invariant-checks=on release
- ipv6=off dht=off extensions=off logging=none deprecated-functions=off invariant-checks=off
project: libtorrent project: libtorrent
branch: trunk branch: trunk
@ -13,8 +33,3 @@ clean:
time_limit: 180 time_limit: 180
features:
- variant=release asserts=production invariant-checks=off debug-iterators=off debug-symbols=on
- encryption=openssl statistics=on logging=verbose disk-stats=on dht=logging request-log=on allocator=debug
- ipv6=off dht=off extensions=off logging=none deprecated-functions=off

View File

@ -171,6 +171,6 @@ int main()
} }
#endif #endif
return ret; return ret ? 333 : 0;
} }

View File

@ -38,7 +38,7 @@ import glob
import json import json
# TODO: different parsers could be run on output from different actions # TODO: different parsers could be run on output from different actions
# if we would use the xml output in stead of stdout/stderr # if we would use the xml output instead of stdout/stderr
def style_output(logfile, outfile): def style_output(logfile, outfile):
subtle = False subtle = False
for l in logfile.split('\n'): for l in logfile.split('\n'):
@ -59,9 +59,14 @@ def style_output(logfile, outfile):
print >>outfile, '<span class="test-error">%s</span>' % l print >>outfile, '<span class="test-error">%s</span>' % l
elif '**passed**' in l: elif '**passed**' in l:
print >>outfile, '<span class="test-pass">%s</span>' % l print >>outfile, '<span class="test-pass">%s</span>' % l
elif ': error: ' in l or ': fatal error: ' in l or ' : fatal error ' in l or \ elif ': error: ' in l or \
'failed to write output file' in l or ') : error C' in l or \ ';1;31merror: ' in l or \
' : error LNK' in l or ': undefined reference to ' in l: ': fatal error: ' in l or \
' : fatal error ' in l or \
'failed to write output file' in l or \
') : error C' in l or \
' : error LNK' in l or \
': undefined reference to ' in l:
print >>outfile, '<span class="compile-error">%s</span>' % l print >>outfile, '<span class="compile-error">%s</span>' % l
elif ': warning: ' in l or ') : warning C' in l or \ elif ': warning: ' in l or ') : warning C' in l or \
'Uninitialised value was created by a' in l or \ 'Uninitialised value was created by a' in l or \
@ -221,6 +226,10 @@ for branch_name in revs:
print >>html, '''<html><head><title>regression tests, %s</title><style type="text/css"> print >>html, '''<html><head><title>regression tests, %s</title><style type="text/css">
.passed { display: block; width: 6px; height: 1em; background-color: #6f8 } .passed { display: block; width: 6px; height: 1em; background-color: #6f8 }
.failed { display: block; width: 6px; height: 1em; background-color: #f68 } .failed { display: block; width: 6px; height: 1em; background-color: #f68 }
.crash { display: block; width: 6px; height: 1em; background-color: #f08 }
.compile-failed { display: block; width: 6px; height: 1em; background-color: #000 }
.timeout { display: block; width: 6px; height: 1em; background-color: #86f }
.valgrind-error { display: block; width: 6px; height: 1em; background-color: #f80 }
table { border: 0; border-collapse: collapse; } table { border: 0; border-collapse: collapse; }
h1 { font-size: 15pt; } h1 { font-size: 15pt; }
th { font-size: 8pt; } th { font-size: 8pt; }
@ -243,8 +252,13 @@ for branch_name in revs:
print >>html, '<tr><th colspan="2" style="border:0;">revision %d</th>' % r print >>html, '<tr><th colspan="2" style="border:0;">revision %d</th>' % r
for f in tests: features = tests.keys()
print >>html, '<th colspan="%d" style="width: %dpx;">%s</th>' % (len(tests[f]), len(tests[f])*6 - 5, f) features = sorted(features, key=lambda x: len(tests[x]))
for f in features:
title = f
if len(tests[f]) < 10: title = '#'
print >>html, '<th colspan="%d" style="width: %dpx;">%s</th>' % (len(tests[f]), len(tests[f])*6 - 5, title)
print >>html, '</tr>' print >>html, '</tr>'
for p in platforms: for p in platforms:
@ -261,26 +275,46 @@ for branch_name in revs:
print >>details_file, '''<html><head><title>%s %s [%s]</title><style type="text/css"> print >>details_file, '''<html><head><title>%s %s [%s]</title><style type="text/css">
.passed { background-color: #6f8 } .passed { background-color: #6f8 }
.failed { background-color: #f68 } .failed { background-color: #f68 }
.crash { background-color: #f08 }
.compile-failed { background-color: #000 }
.timeout { background-color: #86f }
.valgrind-error { background-color: #f80 }
table { border: 0; border-collapse: collapse; display: inline-block; } table { border: 0; border-collapse: collapse; display: inline-block; }
th { font-size: 15pt; width: 18em; } th { font-size: 15pt; width: 18em; }
td { border: 0; border-spacing: 0px; padding: 1px 0px 0px 1px; } td { border: 0; border-spacing: 0px; padding: 1px 0px 0px 1px; }
</style> </style>
</head><body>''' % (p, toolset, branch_name) </head><body>''' % (p, toolset, branch_name)
print >>html, '<th class="left-head"><a href="%s">%s</a></th>' % (details_name, toolset) print >>html, '<th class="left-head"><a href="%s">%s</a></th>' % (details_name, toolset)
for f in platforms[p][toolset]:
print >>details_file, '<table><tr><th>%s</th></tr>' % f for f in features:
title = f
if len(tests[f]) < 10: title = '#'
print >>details_file, '<table><tr><th>%s</th></tr>' % title
for t in platforms[p][toolset][f]: for t in platforms[p][toolset][f]:
details = platforms[p][toolset][f][t] details = platforms[p][toolset][f][t]
exitcode = details['status'] exitcode = details['status']
if exitcode == 0: c = 'passed'
else: c = 'failed' if exitcode == 0:
error_state = '%d' % exitcode error_state = 'passed'
if exitcode == 222: c = 'passed'
elif exitcode == 222:
error_state = 'valgrind error' error_state = 'valgrind error'
elif exitcode == 139: c = 'valgrind-error'
elif exitcode == 139 or \
exitcode == 138:
error_state = 'crash' error_state = 'crash'
c = 'crash'
elif exitcode == -1073740777: elif exitcode == -1073740777:
error_state = 'timeout' error_state = 'timeout'
c = 'timeout'
elif exitcode == 333:
error_code = 'test-failed'
c = 'failed'
else:
error_state = 'compile-failed (%d)' % exitcode
c = 'compile-failed'
log_name = os.path.join('logs-%s-%d' % (branch_name, r), p + '~' + toolset + '~' + t + '~' + f.replace(' ', '.') + '.html') log_name = os.path.join('logs-%s-%d' % (branch_name, r), p + '~' + toolset + '~' + t + '~' + f.replace(' ', '.') + '.html')
print >>html, '<td title="%s %s"><a class="%s" href="%s"></a></td>' % (t, f, c, log_name) print >>html, '<td title="%s %s"><a class="%s" href="%s"></a></td>' % (t, f, c, log_name)
print >>details_file, '<tr><td class="%s"><a href="%s">%s [%s]</a></td></tr>' % (c, os.path.split(log_name)[1], t, error_state) print >>details_file, '<tr><td class="%s"><a href="%s">%s [%s]</a></td></tr>' % (c, os.path.split(log_name)[1], t, error_state)

View File

@ -104,7 +104,8 @@ def run_tests(toolset, tests, features, options, test_dir, time_limit):
# works for actual unit tests # works for actual unit tests
if 'launcher=valgrind' in options_copy: if 'launcher=valgrind' in options_copy:
options_copy.remove('launcher=valgrind') options_copy.remove('launcher=valgrind')
cmdline = ['bjam', '--out-xml=%s' % xml_file, '-l%d' % time_limit, '-q', '--abbreviate-paths', toolset] + options_copy + feature_list cmdline = ['bjam', '--out-xml=%s' % xml_file, '-l%d' % time_limit, \
'-q', '--abbreviate-paths', toolset] + options_copy + feature_list
# print ' '.join(cmdline) # print ' '.join(cmdline)
@ -170,10 +171,11 @@ def main(argv):
toolsets = [] toolsets = []
num_processes = 4 num_processes = 2
incremental = False incremental = False
test_dirs = [] test_dirs = []
build_dirs = []
configs = [] configs = []
options = ['boost=source', 'preserve-test-targets=on'] options = ['boost=source', 'preserve-test-targets=on']
time_limit = 1200 time_limit = 1200
@ -217,8 +219,14 @@ def main(argv):
if 'test_dirs' in cfg: if 'test_dirs' in cfg:
for d in cfg['test_dirs']: for d in cfg['test_dirs']:
test_dirs.append(os.path.abspath(d)) test_dirs.append(os.path.abspath(d))
else:
print 'no test directory specified by .regression.yml' if 'build_dirs' in cfg:
for d in cfg['build_dirs']:
build_dirs.append(os.path.abspath(d))
test_dirs.append(os.path.abspath(d))
if len(build_dirs) == 0 and len(test_dirs) == 0:
print 'no test or build directory specified by .regression.yml'
sys.exit(1) sys.exit(1)
configs = [] configs = []
@ -228,6 +236,11 @@ def main(argv):
else: else:
configs = [''] configs = ['']
build_configs = []
if 'build_features' in cfg:
for d in cfg['build_features']:
build_configs.append(d)
clean_files = [] clean_files = []
if 'clean' in cfg: if 'clean' in cfg:
clean_files = cfg['clean'] clean_files = cfg['clean']
@ -241,7 +254,7 @@ def main(argv):
# it takes a bit longer to run in valgrind # it takes a bit longer to run in valgrind
if 'launcher=valgrind' in options: if 'launcher=valgrind' in options:
time_limit *= 6 time_limit *= 7
architecture = platform.machine() architecture = platform.machine()
build_platform = platform.system() + '-' + platform.release() build_platform = platform.system() + '-' + platform.release()
@ -250,7 +263,7 @@ def main(argv):
timestamp = datetime.now() timestamp = datetime.now()
# tester_pool = Pool(processes=num_processes) tester_pool = Pool(processes=num_processes)
print '%s-%d - %s - %s' % (branch_name, revision, author, timestamp) print '%s-%d - %s - %s' % (branch_name, revision, author, timestamp)
@ -292,18 +305,22 @@ def main(argv):
if not toolset in results: results[toolset] = {} if not toolset in results: results[toolset] = {}
toolset_found = False toolset_found = False
# futures = [] additional_configs = []
# for features in configs: if test_dir in build_dirs:
# futures.append(tester_pool.apply_async(run_tests, [toolset, tests, features, options, test_dir, time_limit])) additional_configs = build_configs
# for future in futures: futures = []
# (compiler, r) = future.get() for features in configs + additional_configs:
# results[toolset].update(r) futures.append(tester_pool.apply_async(run_tests, [toolset, tests, features, options, test_dir, time_limit]))
for features in configs: for future in futures:
(compiler, r) = run_tests(toolset, tests, features, options, test_dir, time_limit) (compiler, r) = future.get()
results[toolset].update(r) results[toolset].update(r)
# for features in configs + additional_configs:
# (compiler, r) = run_tests(toolset, tests, features, options, test_dir, time_limit)
# results[toolset].update(r)
print '' print ''
if len(clean_files) > 0: if len(clean_files) > 0: