remove out-dated python script

This commit is contained in:
arvidn 2015-12-20 12:39:30 -05:00
parent 03e90d45d8
commit 4590d2c13c
4 changed files with 1 additions and 904 deletions

View File

@ -19,10 +19,7 @@ EXTRA_DIST = Jamfile \
parse_peer_log.py \
parse_sample.py \
parse_session_stats.py \
parse_test_results.py \
parse_utp_log.py \
run_regression_tests.py\
run_tests.py
parse_utp_log.py
fuzz_torrent_SOURCES = fuzz_torrent.cpp

View File

@ -1,370 +0,0 @@
#!/usr/bin/env python
# Copyright (c) 2013, Arvid Norberg
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the distribution.
# * Neither the name of the author nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# This is meant to be run from the root directory of the repo. It will
# look for the .regression.yml file and expect a regression_tests directory
# with results from test runs previously produced by run_tests.py
import os
import sys
import glob
import json
# TODO: different parsers could be run on output from different actions
# if we would use the xml output instead of stdout/stderr
def style_output(logfile, outfile):
subtle = False
for l in logfile.split('\n'):
l = l.encode('utf-8')
l = l.replace('<', '&lt;')
l = l.replace('>', '&gt;')
if 'TEST_CHECK' in l or \
'TEST_EQUAL_ERROR' in l or \
'"ERROR: "' in l or \
l.startswith('EXIT STATUS: ') or \
' second time limit exceeded' in l or l.startswith('signal: SIG') or \
'jump or move depends on uninitialised value(s)' in l or \
'Invalid read of size' in l or \
'Invalid write of size' in l or \
'Use of uninitialised value of size' in l or \
'Uninitialised byte(s) found during' in l or \
'Terminated with exception: ' in l or \
'TEST(S) FAILED' in l or \
'points to uninitialised byte(s)' in l:
print >>outfile, '<span class="test-error">%s</span>' % l
elif '**passed**' in l:
print >>outfile, '<span class="test-pass">%s</span>' % l
elif ': error: ' in l or \
';1;31merror: ' in l or \
': fatal error: ' in l or \
' : fatal error ' in l or \
'failed to write output file' in l or \
') : error C' in l or \
' : error LNK' in l or \
': undefined reference to ' in l:
print >>outfile, '<span class="compile-error">%s</span>' % l
elif ': warning: ' in l or \
') : warning C' in l or \
'0;1;35mwarning: ' in l or \
'Uninitialised value was created by a' in l or \
'bytes after a block of size' in l or \
'bytes inside a block of size' in l:
print >>outfile, '<span class="compile-warning">%s</span>' % l.strip()
elif l == '====== END OUTPUT ======' and not subtle:
print >>outfile, '<span class="subtle">%s' % l
subtle = True
else:
print >>outfile, '%s' % l
if subtle: print >>outfile, '</span>'
def modification_time(file):
mtime = 0
try:
mtime = os.stat(file).st_mtime
except: pass
return mtime
def save_log_file(log_name, project_name, branch_name, test_name, timestamp, data):
if not os.path.exists(os.path.split(log_name)[0]):
os.mkdir(os.path.split(log_name)[0])
try:
# if the log file already exists, and it's newer than
# the source, no need to re-parse it
mtime = os.stat(log_name).st_mtime
if mtime >= timestamp: return
except: pass
html = open(log_name, 'w+')
print >>html, '''<html><head><title>%s - %s</title><style type="text/css">
.compile-error { color: #f13; font-weight: bold; }
.compile-warning { font-weight: bold; color: black; }
.test-error { color: #f13; font-weight: bold; }
.test-pass { color: #1c2; font-weight: bold; }
.subtle { color: #ddd; }
pre { color: #999; white-space: pre-wrap; word-wrap: break-word; }
</style>
</head><body><h1>%s - %s</h1>''' % (project_name, branch_name, project_name, branch_name)
print >>html, '<h3>%s</h3><pre>' % test_name.encode('utf-8')
style_output(data, html)
print >>html, '</pre></body></html>'
html.close()
sys.stdout.write('.')
sys.stdout.flush()
def parse_tests(rev_dir):
# this contains mappings from platforms to
# the next layer of dictionaries. The next
# layer contains a mapping of toolsets to
# dictionaries the next layer of dictionaries.
# those dictionaries contain a mapping from
# feature-sets to the next layer of dictionaries.
# the next layer contains a mapping from
# tests to information about those tests, such
# as whether it passed and the output from the
# command
# example:
# {
# darwin: {
# clang-4.2.1: {
# ipv6=off: {
# test_primitives: {
# output: ...
# status: 1
# }
# }
# }
# }
# }
platforms = {}
tests = {}
for f in glob.glob(os.path.join(rev_dir, '*.json')):
platform_toolset = os.path.split(f)[1].split('.json')[0].split('#')
try:
j = json.loads(open(f, 'rb').read())
timestamp = os.stat(f).st_mtime
except Exception, e:
print '\nFAILED TO LOAD "%s": %s\n' % (f, e)
continue
platform = platform_toolset[0]
toolset = platform_toolset[1]
for cfg in j:
test_name = cfg.split('|')[0]
features = cfg.split('|')[1]
if not features in tests:
tests[features] = set()
tests[features].add(test_name)
if not platform in platforms:
platforms[platform] = {}
if not toolset in platforms[platform]:
platforms[platform][toolset] = {}
if not features in platforms[platform][toolset]:
platforms[platform][toolset][features] = {}
platforms[platform][toolset][features][test_name] = j[cfg]
platforms[platform][toolset][features][test_name]['timestamp'] = timestamp
return (platforms, tests)
# TODO: remove this dependency by encoding it in the output files
# this script should work from outside of the repo, just having
# access to the shared folder
project_name = 'libtorrent'
# maps branch name to latest rev
revs = {}
input_dir = os.path.abspath('regression_tests')
for rev in os.listdir(input_dir):
try:
branch = rev.split('-')[0]
if branch == 'logs': continue
r = int(rev.split('-')[1])
if not branch in revs:
revs[branch] = r
else:
if r > revs[branch]:
revs[branch] = r
except:
print 'ignoring %s' % rev
if revs == {}:
print 'no test files found'
sys.exit(1)
print 'latest versions'
for b in revs:
print '%s\t%d' % (b, revs[b])
try: os.mkdir('regression_test_report')
except: pass
os.chdir('regression_test_report')
for branch_name in revs:
latest_rev = revs[branch_name]
html_file = '%s.html' % branch_name
html = open(html_file, 'w+')
print >>html, '''<html><head><title>regression tests, %s</title><style type="text/css">
.passed { display: block; width: 6px; height: 1em; background-color: #6f8 }
.failed { display: block; width: 6px; height: 1em; background-color: #f68 }
.crash { display: block; width: 6px; height: 1em; background-color: #f08 }
.compile-failed { display: block; width: 6px; height: 1em; background-color: #000 }
.timeout { display: block; width: 6px; height: 1em; background-color: #86f }
.valgrind-error { display: block; width: 6px; height: 1em; background-color: #f80 }
table { border: 0; border-collapse: collapse; }
h1 { font-size: 15pt; }
th { font-size: 8pt; }
td { border: 0; border-spacing: 0px; padding: 0px 0px 0px 0px; }
.left-head { white-space: nowrap; }
</style>
</head><body><h1>%s - %s</h1>''' % (project_name, project_name, branch_name)
print >>html, '<table border="1">'
num_printed_revs = 0;
for r in range(latest_rev, latest_rev - 40, -1):
sys.stdout.write('.')
sys.stdout.flush()
rev_dir = os.path.join(input_dir, '%s-%d' % (branch_name, r))
(platforms, tests) = parse_tests(rev_dir)
if len(tests) + len(platforms) == 0: continue
print >>html, '<tr><th colspan="2" style="border:0;">revision %d</th>' % r
features = tests.keys()
features = sorted(features, key=lambda x: len(tests[x]))
for f in features:
title = f
if len(tests[f]) < 10: title = '#'
print >>html, '<th colspan="%d" style="width: %dpx;">%s</th>' % (len(tests[f]), len(tests[f])*6 - 5, title)
print >>html, '</tr>'
for p in platforms:
print >>html, '<tr><th class="left-head" rowspan="%d">%s</th>' % (len(platforms[p]), p)
idx = 0
for toolset in platforms[p]:
if idx > 0: print >>html, '<tr>'
log_dir = 'logs-%s-%d' % (branch_name, r)
if not os.path.exists(log_dir):
os.mkdir(log_dir)
details_name = os.path.join(log_dir, '%s-%s.html' % (p, toolset))
details_file = open(details_name, 'w+')
print >>details_file, '''<html><head><title>%s %s [%s]</title><style type="text/css">
.passed { background-color: #6f8 }
.failed { background-color: #f68 }
.missing { background-color: #fff }
.crash { background-color: #f08 }
.compile-failed { background-color: #000 }
.timeout { background-color: #86f }
.valgrind-error { background-color: #f80 }
table { border: 0; border-collapse: collapse; display: inline-block; }
th { font-size: 15pt; width: 18em; }
td { border: 0; border-spacing: 0px; padding: 1px 0px 0px 1px; }
</style>
</head><body>''' % (p, toolset, branch_name)
print >>html, '<th class="left-head"><a href="%s">%s</a></th>' % (details_name, toolset)
deferred_end_table = False
for f in features:
title = f
if len(tests[f]) < 10: title = '#'
if title != '#':
if deferred_end_table:
print >>details_file, '</table><table>'
print >>details_file, '<tr><th>%s</th></tr>' % title
deferred_end_table = False
else:
print >>details_file, '<table>'
print >>details_file, '<tr><th>%s</th></tr>' % title
elif not deferred_end_table:
print >>details_file, '<table>'
print >>details_file, '<tr><th>%s</th></tr>' % title
if not f in platforms[p][toolset]:
for i in range(len(tests[f])):
print >>html, '<td title="%s"><a class="missing"></a></td>' % (f)
continue
for t in platforms[p][toolset][f]:
details = platforms[p][toolset][f][t]
exitcode = details['status']
if exitcode == 0:
error_state = 'passed'
c = 'passed'
elif exitcode == 222:
error_state = 'valgrind error'
c = 'valgrind-error'
elif exitcode == 139 or \
exitcode == 138:
error_state = 'crash'
c = 'crash'
elif exitcode == -1073740777:
error_state = 'timeout'
c = 'timeout'
elif exitcode == 333 or \
exitcode == 77:
error_code = 'test-failed'
c = 'failed'
else:
error_state = 'compile-failed (%d)' % exitcode
c = 'compile-failed'
log_name = os.path.join('logs-%s-%d' % (branch_name, r), p + '~' + toolset + '~' + t + '~' + f.replace(' ', '.') + '.html')
print >>html, '<td title="%s %s"><a class="%s" href="%s"></a></td>' % (t, f, c, log_name)
print >>details_file, '<tr><td class="%s"><a href="%s">%s [%s]</a></td></tr>' % (c, os.path.split(log_name)[1], t, error_state)
save_log_file(log_name, project_name, branch_name, '%s - %s' % (t, f), int(details['timestamp']), details['output'])
if title != '#':
print >>details_file, '</table>'
deferred_end_table = False
else:
deferred_end_table = True
if deferred_end_table:
print >>details_file, '</table>'
print >>html, '</tr>'
idx += 1
print >>details_file, '</body></html>'
details_file.close()
num_printed_revs += 1
if num_printed_revs >= 20: break
print >>html, '</table></body></html>'
html.close()
print ''

View File

@ -1,152 +0,0 @@
#!/usr/bin/env python
# Copyright (c) 2013, Arvid Norberg
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the distribution.
# * Neither the name of the author nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import run_tests
import os
import time
import subprocess
import sys
def indent(s):
s = s.split('\n')
s = [(3 * ' ') + line.lstrip() for line in s]
s = '\n'.join(s)
return s
# returns a list of new revisions
def svn_fetch(last_rev):
if os.system('svn up') != 0:
print 'svn up failed'
return []
# log command and output
# $ svn log -l10 --incremental -q
# ------------------------------------------------------------------------
# r9073 | arvidn | 2013-10-04 21:49:00 -0700 (Fri, 04 Oct 2013)
# ------------------------------------------------------------------------
# r9072 | arvidn | 2013-10-04 21:18:24 -0700 (Fri, 04 Oct 2013)
# ------------------------------------------------------------------------
# r9068 | arvidn | 2013-10-04 08:51:32 -0700 (Fri, 04 Oct 2013)
# ------------------------------------------------------------------------
# r9067 | arvidn | 2013-10-04 08:45:47 -0700 (Fri, 04 Oct 2013)
# ------------------------------------------------------------------------
p = subprocess.Popen(['svn', 'log', '-l10', '--incremental', '-q'], stdout=subprocess.PIPE)
revision = -1
output = ''
ret = []
for l in p.stdout:
if not l.startswith('r'): continue
rev = int(l.split(' ')[0][1:])
if rev == last_rev: break
ret.append(rev)
print 'svn up: ',
for r in ret: print '%d ' % r,
print ''
return ret
def svn_up(revision):
os.system('svn up -r %d' % revision)
def print_usage():
print '''usage: run_regression_tests.py [options] toolset [toolset...]
toolset are bjam toolsets. For instance clang, gcc, darwin, msvc etc.
The path "./regression_tests" is expected to be a shared folder
between all testsers.
options:
-j<n> use n parallel processes for running tests
-i build incrementally (i.e. don't clean between checkouts)
-valgrind run tests with valgrind (requires valgrind to be installed)
-s skip. always run tests on the latest version
'''
def loop():
if len(sys.argv) < 2:
print_usage()
sys.exit(1)
skip = '-s' in sys.argv
rev_file = os.path.join(os.getcwd(), '.rev')
if skip:
sys.argv.remove('-s')
print 'restoring last state from "%s"' % rev_file
try:
last_rev = int(open(rev_file, 'r').read())
except:
last_rev = run_tests.svn_info()[0] - 1
open(rev_file, 'w+').write('%d' % last_rev)
revs = []
while True:
new_revs = svn_fetch(last_rev)
if len(new_revs) > 0:
revs = new_revs + revs
# in skip mode, only ever run the latest version
if skip and len(revs): revs = revs[:1]
if revs == []:
time.sleep(300)
continue
print 'revs: ',
for r in revs: print '%d ' % r,
print ''
r = revs[0]
print '\n\nREVISION %d ===\n' % r
svn_up(r)
try:
run_tests.main(sys.argv[1:])
last_rev = r;
# pop the revision we just completed
revs = revs[1:]
open(rev_file, 'w+').write('%d' % last_rev)
except Exception, e:
print e
if __name__ == "__main__":
loop()

View File

@ -1,378 +0,0 @@
#!/usr/bin/env python
# Copyright (c) 2013, Arvid Norberg
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the distribution.
# * Neither the name of the author nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# this is meant to be run from the root of the repository
# the arguments are the boost-build toolsets to use.
# these will vary between testers and operating systems
# common ones are: clang, darwin, gcc, msvc, icc
import random
import os
import platform
import subprocess
import xml.etree.ElementTree as et
from datetime import datetime
import json
import sys
import yaml
import glob
import shutil
import traceback
import clean
# the .regression.yml configuration file format looks like this (it's yaml):
# test_dirs:
# - <path-to-test-folder>
# - ...
#
# features:
# - <list of boost-built features>
# - ...
#
def svn_info():
# figure out which revision this is
p = subprocess.Popen(['svn', 'info'], stdout=subprocess.PIPE)
revision = -1
author = ''
for l in p.stdout:
if 'Last Changed Rev' in l:
revision = int(l.split(':')[1].strip())
if 'Last Changed Author' in l:
author = l.split(':')[1].strip()
if revision == -1:
print 'Failed to extract subversion revision'
sys.exit(1)
if author == '':
print 'Failed to extract subversion author'
sys.exit(1)
return (revision, author)
def run_tests(toolset, tests, features, options, test_dir, time_limit):
assert(type(features) == str)
xml_file = 'bjam_build.%d.xml' % random.randint(0, 100000)
try:
results = {}
feature_list = features.split(' ')
os.chdir(test_dir)
c = 0
for t in tests:
c = c + 1
options_copy = options[:]
if t != '': options_copy.append(t)
if t == '':
t = os.path.split(os.getcwd())[1]
# we can't pass in a launcher when just building, that only
# works for actual unit tests
if 'launcher=valgrind' in options_copy:
options_copy.remove('launcher=valgrind')
cmdline = ['bjam', '--out-xml=%s' % xml_file, '-l%d' % time_limit, \
'-q', '--abbreviate-paths', toolset] + options_copy + feature_list
# print 'calling ', cmdline
p = subprocess.Popen(cmdline, stdout=subprocess.PIPE, cwd=test_dir)
output = ''
for l in p.stdout:
if 'launcher=valgrind' in options_copy and l.startswith('chase_cuOff'):
continue
output += l.decode('latin-1')
sys.stdout.write('.')
sys.stdout.flush()
p.wait()
# parse out the toolset version from the xml file
compiler = ''
compiler_version = ''
command = ''
# make this parse the actual test to pick up the time
# spent runnin the test
try:
dom = et.parse(xml_file)
command = dom.find('./command').text
prop = dom.findall('./action/properties/property')
for a in prop:
name = a.attrib['name']
if name == 'toolset':
compiler = a.text
if compiler_version != '': break
if name.startswith('toolset-') and name.endswith(':version'):
compiler_version = a.text
if compiler != '': break
if compiler != '' and compiler_version != '':
toolset = compiler + '-' + compiler_version
except: pass
r = { 'status': p.returncode, 'output': output, 'command': command }
results[t + '|' + features] = r
if p.returncode != 0:
# if the build or test failed, print out the
# important parts
sys.stdout.write('\n')
print command
for l in output:
if 'error: ' in l or \
': fatal error: ' in l or \
'failed to write output file' in l or \
': error C' in l or \
'undefined reference to ' in l or \
' error LNK' in l or \
'TEST_CHECK' in l or \
'TEST_EQUAL_ERROR' in l or \
'"ERROR: "' in l or \
l.startswith('EXIT STATUS: ') or \
' second time limit exceeded' in l or \
l.startswith('signal: SIG') or \
'jump or move depends on uninitialised value(s)' in l or \
'Invalid read of size' in l or \
'Invalid write of size' in l or \
'Use of uninitialised value of size' in l or \
'Uninitialised byte(s) found during' in l or \
'points to uninitialised byte(s)' in l:
print l
print '\n%s - %d / %d' % (toolset, c, len(tests))
except Exception, e:
# need this to make child processes exit
print 'exiting test process: ', traceback.format_exc()
sys.exit(1)
finally:
try: os.unlink(xml_file)
except: pass
return (toolset, results)
def print_usage():
print '''usage: run_tests.py [options] bjam-toolset [bjam-toolset...] [bjam-option...]
options:
-j<n> use n parallel processes
-h prints this message and exits
-i build incrementally (i.e. don't clean between checkouts)
-valgrind run tests with valgrind (requires valgrind to be installed)
'''
def main(argv):
toolsets = []
incremental = False
test_dirs = []
build_dirs = []
configs = []
options = ['preserve-test-targets=on']
time_limit = 1200
for arg in argv:
if arg[0] == '-':
if arg[1] == 'j':
num_processes = int(arg[2:])
options.append('-j%d' % num_processes)
elif arg[1] == 'h':
print_usage()
sys.exit(1)
elif arg[1] == 'i':
incremental = True
elif arg[1:] == 'valgrind':
options.append('launcher=valgrind')
else:
print 'unknown option: %s' % arg
print_usage()
sys.exit(1)
elif '=' in arg:
options.append(arg)
else:
toolsets.append(arg)
if toolsets == []:
print_usage()
sys.exit(1)
if not incremental:
print 'cleaning repo'
clean.clean()
try:
cfg = open('.regression.yml', 'r')
except:
print '.regression.yml not found in current directory'
sys.exit(1)
cfg = yaml.load(cfg.read())
if 'test_dirs' in cfg:
for d in cfg['test_dirs']:
test_dirs.append(os.path.abspath(d))
if 'build_dirs' in cfg:
for d in cfg['build_dirs']:
build_dirs.append(os.path.abspath(d))
test_dirs.append(os.path.abspath(d))
if len(build_dirs) == 0 and len(test_dirs) == 0:
print 'no test or build directory specified by .regression.yml'
sys.exit(1)
configs = []
if 'features' in cfg:
for d in cfg['features']:
configs.append(d)
else:
configs = ['']
build_configs = []
if 'build_features' in cfg:
for d in cfg['build_features']:
build_configs.append(d)
clean_files = []
if 'clean' in cfg:
clean_files = cfg['clean']
branch_name = 'trunk'
if 'branch' in cfg:
branch_name = cfg['branch']
if 'time_limit' in cfg:
time_limit = int(cfg['time_limit'])
# it takes a bit longer to run in valgrind
if 'launcher=valgrind' in options:
time_limit *= 7
architecture = platform.machine()
build_platform = platform.system() + '-' + platform.release()
revision, author = svn_info()
timestamp = datetime.now()
print '%s-%d - %s - %s' % (branch_name, revision, author, timestamp)
print 'toolsets: %s' % ' '.join(toolsets)
# print 'configs: %s' % '|'.join(configs)
current_dir = os.getcwd()
try:
rev_dir = os.path.join(current_dir, 'regression_tests')
try: os.mkdir(rev_dir)
except: pass
rev_dir = os.path.join(rev_dir, '%s-%d' % (branch_name, revision))
try: os.mkdir(rev_dir)
except: pass
for toolset in toolsets:
results = {}
for test_dir in test_dirs:
print 'running tests from "%s" in %s' % (test_dir, branch_name)
os.chdir(test_dir)
test_dir = os.getcwd()
# figure out which tests are exported by this Jamfile
p = subprocess.Popen(['bjam', '--dump-tests', 'non-existing-target'], stdout=subprocess.PIPE, cwd=test_dir)
tests = []
output = ''
for l in p.stdout:
output += l
if not 'boost-test(RUN)' in l: continue
test_name = os.path.split(l.split(' ')[1][1:-1])[1]
tests.append(test_name)
print 'found %d tests' % len(tests)
if len(tests) == 0:
tests = ['']
additional_configs = []
if test_dir in build_dirs:
additional_configs = build_configs
futures = []
for features in configs + additional_configs:
(compiler, r) = run_tests(toolset, tests, features, options, test_dir, time_limit)
results.update(r)
print ''
if len(clean_files) > 0:
print 'deleting ',
for filt in clean_files:
for f in glob.glob(os.path.join(test_dir, filt)):
# a precaution to make sure a malicious repo
# won't clean things outside of the test directory
if not os.path.abspath(f).startswith(test_dir): continue
print '%s ' % f,
try: shutil.rmtree(f)
except: pass
print ''
# each file contains a full set of tests for one speific toolset and platform
try:
f = open(os.path.join(rev_dir, build_platform + '#' + toolset + '.json'), 'w+')
except IOError, e:
print e
rev_dir = os.path.join(current_dir, 'regression_tests')
try: os.mkdir(rev_dir)
except: pass
rev_dir = os.path.join(rev_dir, '%s-%d' % (branch_name, revision))
try: os.mkdir(rev_dir)
except: pass
f = open(os.path.join(rev_dir, build_platform + '#' + toolset + '.json'), 'w+')
print >>f, json.dumps(results)
f.close()
finally:
# always restore current directory
try:
os.chdir(current_dir)
except: pass
if __name__ == "__main__":
main(sys.argv[1:])