some helper scripts for running and collecting summary report on unit tests, can be expanded to automatically run regression tests

This commit is contained in:
Arvid Norberg 2013-06-15 23:25:14 +00:00
parent 9803b6a15f
commit 70e3617cd6
3 changed files with 454 additions and 0 deletions

18
.regression.yml Normal file
View File

@ -0,0 +1,18 @@
test_dirs:
- test
project:
libtorrent
features:
- variant=release
- asserts=production
- encryption=gcrypt
- statistics=on logging=verbose disk-stats=on bandwidth-limit-logging=on
- ipv6=off
- deprecated-functions=off
- address-model=32
- dht-support=off
- invariant-checks=off
- extensions=off

232
tools/parse_test_results.py Executable file
View File

@ -0,0 +1,232 @@
#!/bin/python
# Copyright (c) 2013, Arvid Norberg
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the distribution.
# * Neither the name of the author nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# This is meant to be run from the root directory of the repo. It will
# look for the .regression.yml file and expect a regression_tests directory
# with results from test runs previously produced by run_tests.py
import os
import sys
import glob
import json
import yaml
def style_output(o):
ret = ''
subtle = False
for l in o.split('\n'):
if 'TEST_CHECK' in l or 'TEST_EQUAL_ERROR' in l or l.startswith('EXIT STATUS: '):
ret += '<span class="test-error">%s</span>\n' % l
elif '**passed**' in l:
ret += '<span class="test-pass">%s</span>\n' % l
elif ': error: ' in l:
ret += '<span class="compile-error">%s</span>\n' % l
elif ': warning: ' in l:
ret += '<span class="compile-warning">%s</span>\n' % l
elif l == '====== END OUTPUT ======' and not subtle:
ret += '<span class="subtle">%s\n' % l
subtle = True
else:
ret += '%s\n' % l
if subtle: ret += '</span>'
return ret
project_name = ''
try:
cfg = open('.regression.yml', 'r')
except:
print '.regression.yml not found in current directory'
sys.exit(1)
cfg = yaml.load(cfg.read())
if 'project' in cfg:
project_name = cfg['project']
os.chdir('regression_tests')
def modification_time(file):
mtime = 0
try:
st = os.stat(file)
mtime = st.st_mtime
except Exception, e:
print e
return mtime
index_mtime = modification_time('index.html')
print 'index mtime: %d' % index_mtime
latest_rev = 0
for rev in os.listdir('.'):
try:
r = int(rev)
if r > latest_rev: latest_rev = r
except: pass
if latest_rev == 0:
print 'no test files found'
sys.exit(1)
rev_dir = '%d' % latest_rev
need_refresh = False
for f in glob.glob(os.path.join(rev_dir, '*.json')):
mtime = modification_time(f)
if mtime > index_mtime:
need_refresh = True
break
if not need_refresh:
print 'all up to date'
sys.exit(0)
# this contains mappings from platforms to
# the next layer of dictionaries. The next
# layer contains a mapping of toolsets to
# dictionaries the next layer of dictionaries.
# those dictionaries contain a mapping from
# feature-sets to the next layer of dictionaries.
# the next layer contains a mapping from
# tests to information about those tests, such
# as whether it passed and the output from the
# command
# example:
# {
# darwin: {
# clang-4.2.1: {
# ipv6=off: {
# test_primitives: {
# output: ...
# status: 1
# warnings: 21
# }
# }
# }
# }
# }
platforms = {}
tests = {}
for f in glob.glob(os.path.join(rev_dir, '*.json')):
platform_toolset = os.path.split(f)[1].split('.json')[0].split('#')
j = json.loads(open(f, 'rb').read())
platform = platform_toolset[0]
toolset = platform_toolset[1]
if not platform in platforms:
platforms[platform] = {}
if not toolset in platforms[platform]:
platforms[platform][toolset] = {}
for cfg in j:
test_name = cfg.split('|')[0]
features = cfg.split('|')[1]
if not features in tests:
tests[features] = set()
tests[features].add(test_name)
if not features in platforms[platform][toolset]:
platforms[platform][toolset][features] = {}
platforms[platform][toolset][features][test_name] = j[cfg]
html = open('index.html', 'w')
print >>html, '''<html><head><title>regression tests, %s revision %d</title><style type="text/css">
.passed { display: block; width: 8px; height: 1em; background-color: #6f8 }
.failed { display: block; width: 8px; height: 1em; background-color: #f68 }
table { border: 0; }
td { border: 0; border-spacing: 0px; padding: 0px 0px 0px 0px; }
.compile-error { color: #f13; font-weight: bold; }
.compile-warning { color: #cb0; }
.test-error { color: #f13; font-weight: bold; }
.test-pass { color: #1c2; font-weight: bold; }
.subtle { color: #ccc; }
pre { color: #999; }
</style><script type="text/javascript">
var expanded = -1;
function toggle(i) {
if (expanded != -1) document.getElementById(expanded).style.display = 'none';
expanded = i;
document.getElementById(i).style.display = 'block';
}
</script></head><body>''' % (project_name, latest_rev)
print >>html, '<h1>%s revision %d</h1>' % (project_name, latest_rev)
print >>html, '<table border="1"><tr><th colspan="2" style="border:0;"></th>'
for f in tests:
print >>html, '<th colspan="%d">%s</th>' % (len(tests[f]), f)
print >>html, '</tr>'
details_id = 0
details = []
for p in platforms:
print >>html, '<tr><th rowspan="%d">%s</th>' % (len(platforms[p]), p)
idx = 0
for toolset in platforms[p]:
if idx > 0: print >>html, '<tr>'
print >>html, '<th>%s</th>' % toolset
for f in platforms[p][toolset]:
for t in platforms[p][toolset][f]:
if platforms[p][toolset][f][t][u'status'] == 0: c = 'passed'
else: c = 'failed'
print >>html, '<td title="%s"><a class="%s" href="javascript:toggle(%d)"></a></td>' % ('%s %s' % (t, f), c, details_id)
platforms[p][toolset][f][t]['name'] = t
platforms[p][toolset][f][t]['features'] = f
details.append(platforms[p][toolset][f][t])
details_id += 1
print >>html, '</tr>'
idx += 1
print >>html, '</table>'
details_id = 0
for d in details:
print >>html, '<div style="display: none" id="%d"><h3>%s - %s</h3><pre>%s</pre></div>' % \
(details_id, d['name'].encode('utf8'), d['features'].encode('utf-8'), style_output(d['output']).encode('utf-8'))
details_id += 1
print >>html, '</body></html>'
html.close()

204
tools/run_tests.py Executable file
View File

@ -0,0 +1,204 @@
#!/bin/python
# Copyright (c) 2013, Arvid Norberg
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the distribution.
# * Neither the name of the author nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# this is meant to be run from the root of the repository
# the arguments are the boost-build toolsets to use.
# these will vary between testers and operating systems
# common ones are: clang, darwin, gcc, msvc, icc
import random
import os
import platform
import subprocess
import xml.etree.ElementTree as et
from datetime import datetime
import json
import sys
import yaml
# the .regression.yml configuration file format looks like this (it's yaml):
# test-dirs:
# - <path-to-test-folder>
# - ...
#
# features:
# - <list of boost-built features>
# - ...
#
toolsets = sys.argv[1:]
try:
cfg = open('.regression.yml', 'r')
except:
print '.regressions.yml not found in current directory'
sys.exit(1)
cfg = yaml.load(cfg.read())
test_dirs = []
configs = []
options = ['boost=source']
if 'test_dirs' in cfg:
for d in cfg['test_dirs']:
test_dirs.append(d)
else:
print 'no test directory specified by .regressions.yml'
sys.exit(1)
configs = []
if 'features' in cfg:
for d in cfg['features']:
configs.append(d.split(' '))
else:
configs = [['']]
architecture = platform.machine()
build_platform = platform.system() + '-' + platform.release()
fail_color = '\033[31;1m'
pass_color = '\033[32;1m'
end_seq = '\033[0m'
if platform.system() == 'Windows':
fail_color == ''
pass_color == ''
end_seq = ''
# figure out which revision this is
p = subprocess.Popen(['svn', 'info'], stdout=subprocess.PIPE)
revision = -1
author = ''
timestamp = datetime.now()
for l in p.stdout:
if 'Last Changed Rev' in l:
revision = int(l.split(':')[1].strip())
if 'Last Changed Author' in l:
author = l.split(':')[1].strip()
if revision == -1:
print 'Failed to extract subversion revision'
sys.exit(1)
if author == '':
print 'Failed to extract subversion author'
sys.exit(1)
print '%d - %s - %s' % (revision, author, timestamp)
print 'toolsets: ', toolsets
print 'configs: ', configs
xml_file = 'bjam_build.%d.xml' % random.randint(0, 100000)
rev_dir = os.path.join(os.getcwd(), 'regression_tests')
try: os.mkdir(rev_dir)
except: pass
rev_dir = os.path.join(rev_dir, '%d' % revision)
try: os.mkdir(rev_dir)
except: pass
for test_dir in test_dirs:
print 'running tests from %s' % test_dir
os.chdir(test_dir)
# figure out which tests are exported by this Jamfile
p = subprocess.Popen(['bjam', '--dump-tests', 'non-existing-target'], stdout=subprocess.PIPE)
tests = []
for l in p.stdout:
if not 'boost-test(RUN)' in l: continue
test_name = os.path.split(l.split(' ')[1][1:-1])[1]
tests.append(test_name)
print 'found %d tests' % len(tests)
for toolset in toolsets:
print 'toolset %s' % toolset
results = {}
toolset_found = False
# TODO: run tests in parallel
for t in tests:
print t
for features in configs:
print 'running %s [%s] [%s]' % (t, toolset, ' '.join(features)),
sys.stdout.flush()
p = subprocess.Popen(['bjam', '--out-xml=%s' % xml_file, toolset, t] + options + features, stdout=subprocess.PIPE)
output = ''
warnings = 0
for l in p.stdout:
if 'warning: ' in l: warnings += 1
output += l
p.wait()
# parse out the toolset version from the xml file
compiler = ''
compiler_version = ''
# make this parse the actual test to pick up the time
# spent runnin the test
if not toolset_found:
try:
dom = et.parse(xml_file)
command = dom.find('./command').text
prop = dom.findall('./action/properties/property')
for a in prop:
name = a.attrib['name']
if name == 'toolset':
compiler = a.text
if compiler_version != '': break
if name.startswith('toolset-') and name.endswith(':version'):
compiler_version = a.text
if compiler != '': break
if compiler != '' and compiler_version != '':
toolset = compiler + '-' + compiler_version
toolset_found = True
except: pass
try: os.unlink(xml_file)
except: pass
r = { 'status': p.returncode, 'output': output, 'warnings': warnings, 'command': command }
results[t + '|' + '|'.join(features)] = r
if p.returncode == 0: print pass_color + 'PASSED' + end_seq
else: print fail_color + 'FAILED' + end_seq
# each file contains a full set of tests for one speific toolset and platform
f = open(os.path.join(rev_dir, build_platform + '#' + toolset + '.json'), 'w+')
print >>f, json.dumps(results)
f.close()