diff --git a/.regression.yml b/.regression.yml
index 70c9fbe77..1520a1c54 100644
--- a/.regression.yml
+++ b/.regression.yml
@@ -1,9 +1,29 @@
+# all features are built in these directories
test_dirs:
- - examples
- - tools
- test
+
+features:
+ - variant=release asserts=production debug-symbols=on
+ - encryption=openssl statistics=on logging=verbose disk-stats=on dht=logging request-log=on allocator=debug debug-iterators=on
+
+# all build_features are built in these directories
+build_dirs:
+ - tools
+ - examples
- bindings/python
+build_features:
+ - ipv6=off
+ - deprecated-functions=off
+ - logging=verbose
+ - statistics=on
+ - asserts=off
+ - asserts=production debug
+ - asserts=on release
+ - asserts=off invariant-checks=off
+ - invariant-checks=on release
+ - ipv6=off dht=off extensions=off logging=none deprecated-functions=off invariant-checks=off
+
project: libtorrent
branch: trunk
@@ -13,8 +33,3 @@ clean:
time_limit: 180
-features:
- - variant=release asserts=production invariant-checks=off debug-iterators=off debug-symbols=on
- - encryption=openssl statistics=on logging=verbose disk-stats=on dht=logging request-log=on allocator=debug
- - ipv6=off dht=off extensions=off logging=none deprecated-functions=off
-
diff --git a/test/main.cpp b/test/main.cpp
index c619d5567..9d66ead31 100644
--- a/test/main.cpp
+++ b/test/main.cpp
@@ -171,6 +171,6 @@ int main()
}
#endif
- return ret;
+ return ret ? 333 : 0;
}
diff --git a/tools/parse_test_results.py b/tools/parse_test_results.py
index 2134f94df..eddbb6ee5 100755
--- a/tools/parse_test_results.py
+++ b/tools/parse_test_results.py
@@ -38,7 +38,7 @@ import glob
import json
# TODO: different parsers could be run on output from different actions
-# if we would use the xml output in stead of stdout/stderr
+# if we would use the xml output instead of stdout/stderr
def style_output(logfile, outfile):
subtle = False
for l in logfile.split('\n'):
@@ -59,9 +59,14 @@ def style_output(logfile, outfile):
print >>outfile, '%s' % l
elif '**passed**' in l:
print >>outfile, '%s' % l
- elif ': error: ' in l or ': fatal error: ' in l or ' : fatal error ' in l or \
- 'failed to write output file' in l or ') : error C' in l or \
- ' : error LNK' in l or ': undefined reference to ' in l:
+ elif ': error: ' in l or \
+ ';1;31merror: ' in l or \
+ ': fatal error: ' in l or \
+ ' : fatal error ' in l or \
+ 'failed to write output file' in l or \
+ ') : error C' in l or \
+ ' : error LNK' in l or \
+ ': undefined reference to ' in l:
print >>outfile, '%s' % l
elif ': warning: ' in l or ') : warning C' in l or \
'Uninitialised value was created by a' in l or \
@@ -221,6 +226,10 @@ for branch_name in revs:
print >>html, '''
regression tests, %s
''' % (p, toolset, branch_name)
print >>html, '%s | ' % (details_name, toolset)
- for f in platforms[p][toolset]:
- print >>details_file, '%s |
' % f
+
+ for f in features:
+ title = f
+ if len(tests[f]) < 10: title = '#'
+
+ print >>details_file, '%s |
' % title
for t in platforms[p][toolset][f]:
details = platforms[p][toolset][f][t]
exitcode = details['status']
- if exitcode == 0: c = 'passed'
- else: c = 'failed'
- error_state = '%d' % exitcode
- if exitcode == 222:
+
+ if exitcode == 0:
+ error_state = 'passed'
+ c = 'passed'
+ elif exitcode == 222:
error_state = 'valgrind error'
- elif exitcode == 139:
+ c = 'valgrind-error'
+ elif exitcode == 139 or \
+ exitcode == 138:
error_state = 'crash'
+ c = 'crash'
elif exitcode == -1073740777:
error_state = 'timeout'
+ c = 'timeout'
+ elif exitcode == 333:
+ error_code = 'test-failed'
+ c = 'failed'
+ else:
+ error_state = 'compile-failed (%d)' % exitcode
+ c = 'compile-failed'
+
log_name = os.path.join('logs-%s-%d' % (branch_name, r), p + '~' + toolset + '~' + t + '~' + f.replace(' ', '.') + '.html')
print >>html, ' | ' % (t, f, c, log_name)
print >>details_file, '%s [%s] |
' % (c, os.path.split(log_name)[1], t, error_state)
diff --git a/tools/run_tests.py b/tools/run_tests.py
index 172075e54..4aafe7943 100755
--- a/tools/run_tests.py
+++ b/tools/run_tests.py
@@ -104,7 +104,8 @@ def run_tests(toolset, tests, features, options, test_dir, time_limit):
# works for actual unit tests
if 'launcher=valgrind' in options_copy:
options_copy.remove('launcher=valgrind')
- cmdline = ['bjam', '--out-xml=%s' % xml_file, '-l%d' % time_limit, '-q', '--abbreviate-paths', toolset] + options_copy + feature_list
+ cmdline = ['bjam', '--out-xml=%s' % xml_file, '-l%d' % time_limit, \
+ '-q', '--abbreviate-paths', toolset] + options_copy + feature_list
# print ' '.join(cmdline)
@@ -170,10 +171,11 @@ def main(argv):
toolsets = []
- num_processes = 4
+ num_processes = 2
incremental = False
test_dirs = []
+ build_dirs = []
configs = []
options = ['boost=source', 'preserve-test-targets=on']
time_limit = 1200
@@ -217,8 +219,14 @@ def main(argv):
if 'test_dirs' in cfg:
for d in cfg['test_dirs']:
test_dirs.append(os.path.abspath(d))
- else:
- print 'no test directory specified by .regression.yml'
+
+ if 'build_dirs' in cfg:
+ for d in cfg['build_dirs']:
+ build_dirs.append(os.path.abspath(d))
+ test_dirs.append(os.path.abspath(d))
+
+ if len(build_dirs) == 0 and len(test_dirs) == 0:
+ print 'no test or build directory specified by .regression.yml'
sys.exit(1)
configs = []
@@ -228,6 +236,11 @@ def main(argv):
else:
configs = ['']
+ build_configs = []
+ if 'build_features' in cfg:
+ for d in cfg['build_features']:
+ build_configs.append(d)
+
clean_files = []
if 'clean' in cfg:
clean_files = cfg['clean']
@@ -241,7 +254,7 @@ def main(argv):
# it takes a bit longer to run in valgrind
if 'launcher=valgrind' in options:
- time_limit *= 6
+ time_limit *= 7
architecture = platform.machine()
build_platform = platform.system() + '-' + platform.release()
@@ -250,7 +263,7 @@ def main(argv):
timestamp = datetime.now()
-# tester_pool = Pool(processes=num_processes)
+ tester_pool = Pool(processes=num_processes)
print '%s-%d - %s - %s' % (branch_name, revision, author, timestamp)
@@ -292,18 +305,22 @@ def main(argv):
if not toolset in results: results[toolset] = {}
toolset_found = False
-# futures = []
-# for features in configs:
-# futures.append(tester_pool.apply_async(run_tests, [toolset, tests, features, options, test_dir, time_limit]))
+ additional_configs = []
+ if test_dir in build_dirs:
+ additional_configs = build_configs
-# for future in futures:
-# (compiler, r) = future.get()
-# results[toolset].update(r)
+ futures = []
+ for features in configs + additional_configs:
+ futures.append(tester_pool.apply_async(run_tests, [toolset, tests, features, options, test_dir, time_limit]))
- for features in configs:
- (compiler, r) = run_tests(toolset, tests, features, options, test_dir, time_limit)
+ for future in futures:
+ (compiler, r) = future.get()
results[toolset].update(r)
+# for features in configs + additional_configs:
+# (compiler, r) = run_tests(toolset, tests, features, options, test_dir, time_limit)
+# results[toolset].update(r)
+
print ''
if len(clean_files) > 0: