From 4590d2c13c7a9f18392f84cd2a6cf90bd815f010 Mon Sep 17 00:00:00 2001 From: arvidn Date: Sun, 20 Dec 2015 12:39:30 -0500 Subject: [PATCH] remove out-dated python script --- tools/Makefile.am | 5 +- tools/parse_test_results.py | 370 --------------------------------- tools/run_regression_tests.py | 152 -------------- tools/run_tests.py | 378 ---------------------------------- 4 files changed, 1 insertion(+), 904 deletions(-) delete mode 100755 tools/parse_test_results.py delete mode 100755 tools/run_regression_tests.py delete mode 100755 tools/run_tests.py diff --git a/tools/Makefile.am b/tools/Makefile.am index 9e134a542..6d12e2f5d 100644 --- a/tools/Makefile.am +++ b/tools/Makefile.am @@ -19,10 +19,7 @@ EXTRA_DIST = Jamfile \ parse_peer_log.py \ parse_sample.py \ parse_session_stats.py \ - parse_test_results.py \ - parse_utp_log.py \ - run_regression_tests.py\ - run_tests.py + parse_utp_log.py fuzz_torrent_SOURCES = fuzz_torrent.cpp diff --git a/tools/parse_test_results.py b/tools/parse_test_results.py deleted file mode 100755 index 2d19880c3..000000000 --- a/tools/parse_test_results.py +++ /dev/null @@ -1,370 +0,0 @@ -#!/usr/bin/env python - -# Copyright (c) 2013, Arvid Norberg -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in -# the documentation and/or other materials provided with the distribution. -# * Neither the name of the author nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE -# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -# POSSIBILITY OF SUCH DAMAGE. - -# This is meant to be run from the root directory of the repo. It will -# look for the .regression.yml file and expect a regression_tests directory -# with results from test runs previously produced by run_tests.py - -import os -import sys -import glob -import json - -# TODO: different parsers could be run on output from different actions -# if we would use the xml output instead of stdout/stderr -def style_output(logfile, outfile): - subtle = False - for l in logfile.split('\n'): - l = l.encode('utf-8') - l = l.replace('<', '<') - l = l.replace('>', '>') - if 'TEST_CHECK' in l or \ - 'TEST_EQUAL_ERROR' in l or \ - '"ERROR: "' in l or \ - l.startswith('EXIT STATUS: ') or \ - ' second time limit exceeded' in l or l.startswith('signal: SIG') or \ - 'jump or move depends on uninitialised value(s)' in l or \ - 'Invalid read of size' in l or \ - 'Invalid write of size' in l or \ - 'Use of uninitialised value of size' in l or \ - 'Uninitialised byte(s) found during' in l or \ - 'Terminated with exception: ' in l or \ - 'TEST(S) FAILED' in l or \ - 'points to uninitialised byte(s)' in l: - print >>outfile, '%s' % l - elif '**passed**' in l: - print >>outfile, '%s' % l - elif ': error: ' in l or \ - ';1;31merror: ' in l or \ - ': fatal error: ' in l or \ - ' : fatal error ' in l or \ - 'failed to write output file' in l or \ - ') : error C' in l or \ - ' : error LNK' in l or \ - ': undefined reference to ' in l: - print >>outfile, '%s' % l - elif ': warning: ' in l or \ - ') : warning C' in l or \ - '0;1;35mwarning: ' in l or \ - 'Uninitialised value was created by a' in l or \ - 'bytes after a block of size' in l or \ - 'bytes inside a block of size' in l: - print >>outfile, '%s' % l.strip() - elif l == '====== END OUTPUT ======' and not subtle: - print >>outfile, '%s' % l - subtle = True - else: - print >>outfile, '%s' % l - if subtle: print >>outfile, '' - -def modification_time(file): - mtime = 0 - try: - mtime = os.stat(file).st_mtime - except: pass - return mtime - -def save_log_file(log_name, project_name, branch_name, test_name, timestamp, data): - - if not os.path.exists(os.path.split(log_name)[0]): - os.mkdir(os.path.split(log_name)[0]) - - try: - # if the log file already exists, and it's newer than - # the source, no need to re-parse it - mtime = os.stat(log_name).st_mtime - if mtime >= timestamp: return - except: pass - - html = open(log_name, 'w+') - print >>html, '''%s - %s -

%s - %s

''' % (project_name, branch_name, project_name, branch_name) - print >>html, '

%s

' % test_name.encode('utf-8')
-	style_output(data, html)
-
-	print >>html, '
' - html.close() - sys.stdout.write('.') - sys.stdout.flush() - -def parse_tests(rev_dir): - - # this contains mappings from platforms to - # the next layer of dictionaries. The next - # layer contains a mapping of toolsets to - # dictionaries the next layer of dictionaries. - # those dictionaries contain a mapping from - # feature-sets to the next layer of dictionaries. - # the next layer contains a mapping from - # tests to information about those tests, such - # as whether it passed and the output from the - # command - # example: - - # { - # darwin: { - # clang-4.2.1: { - # ipv6=off: { - # test_primitives: { - # output: ... - # status: 1 - # } - # } - # } - # } - # } - - platforms = {} - - tests = {} - - for f in glob.glob(os.path.join(rev_dir, '*.json')): - platform_toolset = os.path.split(f)[1].split('.json')[0].split('#') - try: - j = json.loads(open(f, 'rb').read()) - timestamp = os.stat(f).st_mtime - except Exception, e: - print '\nFAILED TO LOAD "%s": %s\n' % (f, e) - continue - - platform = platform_toolset[0] - toolset = platform_toolset[1] - - for cfg in j: - test_name = cfg.split('|')[0] - features = cfg.split('|')[1] - - if not features in tests: - tests[features] = set() - - tests[features].add(test_name) - - if not platform in platforms: - platforms[platform] = {} - - if not toolset in platforms[platform]: - platforms[platform][toolset] = {} - - if not features in platforms[platform][toolset]: - platforms[platform][toolset][features] = {} - - platforms[platform][toolset][features][test_name] = j[cfg] - platforms[platform][toolset][features][test_name]['timestamp'] = timestamp - - return (platforms, tests) - - -# TODO: remove this dependency by encoding it in the output files -# this script should work from outside of the repo, just having -# access to the shared folder -project_name = 'libtorrent' - -# maps branch name to latest rev -revs = {} - -input_dir = os.path.abspath('regression_tests') - -for rev in os.listdir(input_dir): - try: - branch = rev.split('-')[0] - if branch == 'logs': continue - r = int(rev.split('-')[1]) - if not branch in revs: - revs[branch] = r - else: - if r > revs[branch]: - revs[branch] = r - except: - print 'ignoring %s' % rev - -if revs == {}: - print 'no test files found' - sys.exit(1) - -print 'latest versions' -for b in revs: - print '%s\t%d' % (b, revs[b]) - -try: os.mkdir('regression_test_report') -except: pass - -os.chdir('regression_test_report') - -for branch_name in revs: - - latest_rev = revs[branch_name] - - html_file = '%s.html' % branch_name - - html = open(html_file, 'w+') - - print >>html, '''regression tests, %s -

%s - %s

''' % (project_name, project_name, branch_name) - - print >>html, '' - - num_printed_revs = 0; - for r in range(latest_rev, latest_rev - 40, -1): - sys.stdout.write('.') - sys.stdout.flush() - - rev_dir = os.path.join(input_dir, '%s-%d' % (branch_name, r)) - (platforms, tests) = parse_tests(rev_dir) - - if len(tests) + len(platforms) == 0: continue - - print >>html, '' % r - - features = tests.keys() - features = sorted(features, key=lambda x: len(tests[x])) - - for f in features: - title = f - if len(tests[f]) < 10: title = '#' - print >>html, '' % (len(tests[f]), len(tests[f])*6 - 5, title) - print >>html, '' - - for p in platforms: - print >>html, '' % (len(platforms[p]), p) - idx = 0 - for toolset in platforms[p]: - if idx > 0: print >>html, '' - log_dir = 'logs-%s-%d' % (branch_name, r) - if not os.path.exists(log_dir): - os.mkdir(log_dir) - details_name = os.path.join(log_dir, '%s-%s.html' % (p, toolset)) - details_file = open(details_name, 'w+') - - print >>details_file, '''%s %s [%s] - ''' % (p, toolset, branch_name) - print >>html, '' % (details_name, toolset) - - deferred_end_table = False - for f in features: - title = f - if len(tests[f]) < 10: title = '#' - - if title != '#': - if deferred_end_table: - print >>details_file, '
revision %d%s
%s
%s
' - print >>details_file, '' % title - deferred_end_table = False - else: - print >>details_file, '
%s
' - print >>details_file, '' % title - elif not deferred_end_table: - print >>details_file, '
%s
' - print >>details_file, '' % title - - if not f in platforms[p][toolset]: - for i in range(len(tests[f])): - print >>html, '' % (f) - continue - - for t in platforms[p][toolset][f]: - details = platforms[p][toolset][f][t] - exitcode = details['status'] - - if exitcode == 0: - error_state = 'passed' - c = 'passed' - elif exitcode == 222: - error_state = 'valgrind error' - c = 'valgrind-error' - elif exitcode == 139 or \ - exitcode == 138: - error_state = 'crash' - c = 'crash' - elif exitcode == -1073740777: - error_state = 'timeout' - c = 'timeout' - elif exitcode == 333 or \ - exitcode == 77: - error_code = 'test-failed' - c = 'failed' - else: - error_state = 'compile-failed (%d)' % exitcode - c = 'compile-failed' - - log_name = os.path.join('logs-%s-%d' % (branch_name, r), p + '~' + toolset + '~' + t + '~' + f.replace(' ', '.') + '.html') - print >>html, '' % (t, f, c, log_name) - print >>details_file, '' % (c, os.path.split(log_name)[1], t, error_state) - save_log_file(log_name, project_name, branch_name, '%s - %s' % (t, f), int(details['timestamp']), details['output']) - if title != '#': - print >>details_file, '
%s
%s [%s]
' - deferred_end_table = False - else: - deferred_end_table = True - - if deferred_end_table: - print >>details_file, '' - - print >>html, '' - idx += 1 - print >>details_file, '' - details_file.close() - num_printed_revs += 1 - if num_printed_revs >= 20: break - - print >>html, '' - html.close() - - print '' - diff --git a/tools/run_regression_tests.py b/tools/run_regression_tests.py deleted file mode 100755 index e3f0007f0..000000000 --- a/tools/run_regression_tests.py +++ /dev/null @@ -1,152 +0,0 @@ -#!/usr/bin/env python - -# Copyright (c) 2013, Arvid Norberg -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in -# the documentation and/or other materials provided with the distribution. -# * Neither the name of the author nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE -# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -# POSSIBILITY OF SUCH DAMAGE. - -import run_tests -import os -import time -import subprocess -import sys - -def indent(s): - s = s.split('\n') - s = [(3 * ' ') + line.lstrip() for line in s] - s = '\n'.join(s) - return s - -# returns a list of new revisions -def svn_fetch(last_rev): - - if os.system('svn up') != 0: - print 'svn up failed' - return [] - -# log command and output -# $ svn log -l10 --incremental -q -# ------------------------------------------------------------------------ -# r9073 | arvidn | 2013-10-04 21:49:00 -0700 (Fri, 04 Oct 2013) -# ------------------------------------------------------------------------ -# r9072 | arvidn | 2013-10-04 21:18:24 -0700 (Fri, 04 Oct 2013) -# ------------------------------------------------------------------------ -# r9068 | arvidn | 2013-10-04 08:51:32 -0700 (Fri, 04 Oct 2013) -# ------------------------------------------------------------------------ -# r9067 | arvidn | 2013-10-04 08:45:47 -0700 (Fri, 04 Oct 2013) -# ------------------------------------------------------------------------ - - p = subprocess.Popen(['svn', 'log', '-l10', '--incremental', '-q'], stdout=subprocess.PIPE) - - revision = -1 - - output = '' - ret = [] - for l in p.stdout: - if not l.startswith('r'): continue - rev = int(l.split(' ')[0][1:]) - if rev == last_rev: break - ret.append(rev) - - print 'svn up: ', - for r in ret: print '%d ' % r, - print '' - return ret - -def svn_up(revision): - os.system('svn up -r %d' % revision) - -def print_usage(): - print '''usage: run_regression_tests.py [options] toolset [toolset...] - -toolset are bjam toolsets. For instance clang, gcc, darwin, msvc etc. -The path "./regression_tests" is expected to be a shared folder -between all testsers. - -options: - - -j use n parallel processes for running tests - -i build incrementally (i.e. don't clean between checkouts) - -valgrind run tests with valgrind (requires valgrind to be installed) - -s skip. always run tests on the latest version -''' - - -def loop(): - - if len(sys.argv) < 2: - print_usage() - sys.exit(1) - - skip = '-s' in sys.argv - - rev_file = os.path.join(os.getcwd(), '.rev') - if skip: - sys.argv.remove('-s') - print 'restoring last state from "%s"' % rev_file - - try: - last_rev = int(open(rev_file, 'r').read()) - except: - last_rev = run_tests.svn_info()[0] - 1 - open(rev_file, 'w+').write('%d' % last_rev) - - revs = [] - - while True: - new_revs = svn_fetch(last_rev) - - if len(new_revs) > 0: - revs = new_revs + revs - - # in skip mode, only ever run the latest version - if skip and len(revs): revs = revs[:1] - - if revs == []: - time.sleep(300) - continue - - print 'revs: ', - for r in revs: print '%d ' % r, - print '' - - r = revs[0] - print '\n\nREVISION %d ===\n' % r - svn_up(r) - - try: - run_tests.main(sys.argv[1:]) - last_rev = r; - - # pop the revision we just completed - revs = revs[1:] - - open(rev_file, 'w+').write('%d' % last_rev) - except Exception, e: - print e - -if __name__ == "__main__": - loop() diff --git a/tools/run_tests.py b/tools/run_tests.py deleted file mode 100755 index 9e6a1994f..000000000 --- a/tools/run_tests.py +++ /dev/null @@ -1,378 +0,0 @@ -#!/usr/bin/env python - -# Copyright (c) 2013, Arvid Norberg -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in -# the documentation and/or other materials provided with the distribution. -# * Neither the name of the author nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE -# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -# POSSIBILITY OF SUCH DAMAGE. - -# this is meant to be run from the root of the repository -# the arguments are the boost-build toolsets to use. -# these will vary between testers and operating systems -# common ones are: clang, darwin, gcc, msvc, icc - -import random -import os -import platform -import subprocess -import xml.etree.ElementTree as et -from datetime import datetime -import json -import sys -import yaml -import glob -import shutil -import traceback -import clean - -# the .regression.yml configuration file format looks like this (it's yaml): - -# test_dirs: -# - -# - ... -# -# features: -# - -# - ... -# - -def svn_info(): - # figure out which revision this is - p = subprocess.Popen(['svn', 'info'], stdout=subprocess.PIPE) - - revision = -1 - author = '' - - for l in p.stdout: - if 'Last Changed Rev' in l: - revision = int(l.split(':')[1].strip()) - if 'Last Changed Author' in l: - author = l.split(':')[1].strip() - - if revision == -1: - print 'Failed to extract subversion revision' - sys.exit(1) - - if author == '': - print 'Failed to extract subversion author' - sys.exit(1) - - return (revision, author) - -def run_tests(toolset, tests, features, options, test_dir, time_limit): - assert(type(features) == str) - - xml_file = 'bjam_build.%d.xml' % random.randint(0, 100000) - try: - - results = {} - - feature_list = features.split(' ') - os.chdir(test_dir) - - c = 0 - for t in tests: - c = c + 1 - - options_copy = options[:] - if t != '': options_copy.append(t) - if t == '': - t = os.path.split(os.getcwd())[1] - # we can't pass in a launcher when just building, that only - # works for actual unit tests - if 'launcher=valgrind' in options_copy: - options_copy.remove('launcher=valgrind') - cmdline = ['bjam', '--out-xml=%s' % xml_file, '-l%d' % time_limit, \ - '-q', '--abbreviate-paths', toolset] + options_copy + feature_list -# print 'calling ', cmdline - - p = subprocess.Popen(cmdline, stdout=subprocess.PIPE, cwd=test_dir) - output = '' - for l in p.stdout: - if 'launcher=valgrind' in options_copy and l.startswith('chase_cuOff'): - continue - output += l.decode('latin-1') - sys.stdout.write('.') - sys.stdout.flush() - p.wait() - - # parse out the toolset version from the xml file - compiler = '' - compiler_version = '' - command = '' - - # make this parse the actual test to pick up the time - # spent runnin the test - try: - dom = et.parse(xml_file) - - command = dom.find('./command').text - - prop = dom.findall('./action/properties/property') - for a in prop: - name = a.attrib['name'] - if name == 'toolset': - compiler = a.text - if compiler_version != '': break - if name.startswith('toolset-') and name.endswith(':version'): - compiler_version = a.text - if compiler != '': break - - if compiler != '' and compiler_version != '': - toolset = compiler + '-' + compiler_version - except: pass - - r = { 'status': p.returncode, 'output': output, 'command': command } - results[t + '|' + features] = r - - if p.returncode != 0: - # if the build or test failed, print out the - # important parts - sys.stdout.write('\n') - print command - for l in output: - if 'error: ' in l or \ - ': fatal error: ' in l or \ - 'failed to write output file' in l or \ - ': error C' in l or \ - 'undefined reference to ' in l or \ - ' error LNK' in l or \ - 'TEST_CHECK' in l or \ - 'TEST_EQUAL_ERROR' in l or \ - '"ERROR: "' in l or \ - l.startswith('EXIT STATUS: ') or \ - ' second time limit exceeded' in l or \ - l.startswith('signal: SIG') or \ - 'jump or move depends on uninitialised value(s)' in l or \ - 'Invalid read of size' in l or \ - 'Invalid write of size' in l or \ - 'Use of uninitialised value of size' in l or \ - 'Uninitialised byte(s) found during' in l or \ - 'points to uninitialised byte(s)' in l: - print l - - print '\n%s - %d / %d' % (toolset, c, len(tests)) - - except Exception, e: - # need this to make child processes exit - print 'exiting test process: ', traceback.format_exc() - sys.exit(1) - finally: - try: os.unlink(xml_file) - except: pass - - return (toolset, results) - -def print_usage(): - print '''usage: run_tests.py [options] bjam-toolset [bjam-toolset...] [bjam-option...] -options: --j use n parallel processes --h prints this message and exits --i build incrementally (i.e. don't clean between checkouts) --valgrind run tests with valgrind (requires valgrind to be installed) -''' - -def main(argv): - - toolsets = [] - - incremental = False - - test_dirs = [] - build_dirs = [] - configs = [] - options = ['preserve-test-targets=on'] - time_limit = 1200 - - for arg in argv: - if arg[0] == '-': - if arg[1] == 'j': - num_processes = int(arg[2:]) - options.append('-j%d' % num_processes) - elif arg[1] == 'h': - print_usage() - sys.exit(1) - elif arg[1] == 'i': - incremental = True - elif arg[1:] == 'valgrind': - options.append('launcher=valgrind') - else: - print 'unknown option: %s' % arg - print_usage() - sys.exit(1) - elif '=' in arg: - options.append(arg) - else: - toolsets.append(arg) - - if toolsets == []: - print_usage() - sys.exit(1) - - if not incremental: - print 'cleaning repo' - clean.clean() - - try: - cfg = open('.regression.yml', 'r') - except: - print '.regression.yml not found in current directory' - sys.exit(1) - - cfg = yaml.load(cfg.read()) - - if 'test_dirs' in cfg: - for d in cfg['test_dirs']: - test_dirs.append(os.path.abspath(d)) - - if 'build_dirs' in cfg: - for d in cfg['build_dirs']: - build_dirs.append(os.path.abspath(d)) - test_dirs.append(os.path.abspath(d)) - - if len(build_dirs) == 0 and len(test_dirs) == 0: - print 'no test or build directory specified by .regression.yml' - sys.exit(1) - - configs = [] - if 'features' in cfg: - for d in cfg['features']: - configs.append(d) - else: - configs = [''] - - build_configs = [] - if 'build_features' in cfg: - for d in cfg['build_features']: - build_configs.append(d) - - clean_files = [] - if 'clean' in cfg: - clean_files = cfg['clean'] - - branch_name = 'trunk' - if 'branch' in cfg: - branch_name = cfg['branch'] - - if 'time_limit' in cfg: - time_limit = int(cfg['time_limit']) - - # it takes a bit longer to run in valgrind - if 'launcher=valgrind' in options: - time_limit *= 7 - - architecture = platform.machine() - build_platform = platform.system() + '-' + platform.release() - - revision, author = svn_info() - - timestamp = datetime.now() - - print '%s-%d - %s - %s' % (branch_name, revision, author, timestamp) - - print 'toolsets: %s' % ' '.join(toolsets) -# print 'configs: %s' % '|'.join(configs) - - current_dir = os.getcwd() - - try: - rev_dir = os.path.join(current_dir, 'regression_tests') - try: os.mkdir(rev_dir) - except: pass - rev_dir = os.path.join(rev_dir, '%s-%d' % (branch_name, revision)) - try: os.mkdir(rev_dir) - except: pass - - for toolset in toolsets: - results = {} - for test_dir in test_dirs: - print 'running tests from "%s" in %s' % (test_dir, branch_name) - os.chdir(test_dir) - test_dir = os.getcwd() - - # figure out which tests are exported by this Jamfile - p = subprocess.Popen(['bjam', '--dump-tests', 'non-existing-target'], stdout=subprocess.PIPE, cwd=test_dir) - - tests = [] - - output = '' - for l in p.stdout: - output += l - if not 'boost-test(RUN)' in l: continue - test_name = os.path.split(l.split(' ')[1][1:-1])[1] - tests.append(test_name) - print 'found %d tests' % len(tests) - if len(tests) == 0: - tests = [''] - - additional_configs = [] - if test_dir in build_dirs: - additional_configs = build_configs - - futures = [] - for features in configs + additional_configs: - (compiler, r) = run_tests(toolset, tests, features, options, test_dir, time_limit) - results.update(r) - - print '' - - if len(clean_files) > 0: - print 'deleting ', - for filt in clean_files: - for f in glob.glob(os.path.join(test_dir, filt)): - # a precaution to make sure a malicious repo - # won't clean things outside of the test directory - if not os.path.abspath(f).startswith(test_dir): continue - print '%s ' % f, - try: shutil.rmtree(f) - except: pass - print '' - - # each file contains a full set of tests for one speific toolset and platform - try: - f = open(os.path.join(rev_dir, build_platform + '#' + toolset + '.json'), 'w+') - except IOError, e: - print e - rev_dir = os.path.join(current_dir, 'regression_tests') - try: os.mkdir(rev_dir) - except: pass - rev_dir = os.path.join(rev_dir, '%s-%d' % (branch_name, revision)) - try: os.mkdir(rev_dir) - except: pass - f = open(os.path.join(rev_dir, build_platform + '#' + toolset + '.json'), 'w+') - - print >>f, json.dumps(results) - f.close() - - - finally: - # always restore current directory - try: - os.chdir(current_dir) - except: pass - -if __name__ == "__main__": - main(sys.argv[1:]) -