improve regression test output
This commit is contained in:
parent
1734beda07
commit
476c36b9c8
|
@ -43,7 +43,6 @@ from datetime import datetime
|
||||||
import json
|
import json
|
||||||
import sys
|
import sys
|
||||||
import yaml
|
import yaml
|
||||||
from multiprocessing import Pool
|
|
||||||
import glob
|
import glob
|
||||||
import shutil
|
import shutil
|
||||||
import traceback
|
import traceback
|
||||||
|
@ -90,12 +89,14 @@ def run_tests(toolset, tests, features, options, test_dir, time_limit):
|
||||||
try:
|
try:
|
||||||
|
|
||||||
results = {}
|
results = {}
|
||||||
toolset_found = False
|
|
||||||
|
|
||||||
feature_list = features.split(' ')
|
feature_list = features.split(' ')
|
||||||
os.chdir(test_dir)
|
os.chdir(test_dir)
|
||||||
|
|
||||||
|
c = 0
|
||||||
for t in tests:
|
for t in tests:
|
||||||
|
c = c + 1
|
||||||
|
|
||||||
options_copy = options[:]
|
options_copy = options[:]
|
||||||
if t != '': options_copy.append(t)
|
if t != '': options_copy.append(t)
|
||||||
if t == '':
|
if t == '':
|
||||||
|
@ -113,6 +114,8 @@ def run_tests(toolset, tests, features, options, test_dir, time_limit):
|
||||||
output = ''
|
output = ''
|
||||||
for l in p.stdout:
|
for l in p.stdout:
|
||||||
output += l.decode('latin-1')
|
output += l.decode('latin-1')
|
||||||
|
sys.stdout.write('.')
|
||||||
|
sys.stdout.flush()
|
||||||
p.wait()
|
p.wait()
|
||||||
|
|
||||||
# parse out the toolset version from the xml file
|
# parse out the toolset version from the xml file
|
||||||
|
@ -144,11 +147,11 @@ def run_tests(toolset, tests, features, options, test_dir, time_limit):
|
||||||
r = { 'status': p.returncode, 'output': output, 'command': command }
|
r = { 'status': p.returncode, 'output': output, 'command': command }
|
||||||
results[t + '|' + features] = r
|
results[t + '|' + features] = r
|
||||||
|
|
||||||
if p.returncode == 0:
|
if p.returncode != 0:
|
||||||
sys.stdout.write('.')
|
sys.stdout.write('\n')
|
||||||
else:
|
|
||||||
sys.stdout.write(output)
|
sys.stdout.write(output)
|
||||||
sys.stdout.flush()
|
|
||||||
|
print '\n%s - %d / %d' % (toolset, c, len(tests))
|
||||||
|
|
||||||
except Exception, e:
|
except Exception, e:
|
||||||
# need this to make child processes exit
|
# need this to make child processes exit
|
||||||
|
@ -173,7 +176,6 @@ def main(argv):
|
||||||
|
|
||||||
toolsets = []
|
toolsets = []
|
||||||
|
|
||||||
num_processes = 2
|
|
||||||
incremental = False
|
incremental = False
|
||||||
|
|
||||||
test_dirs = []
|
test_dirs = []
|
||||||
|
@ -186,6 +188,7 @@ def main(argv):
|
||||||
if arg[0] == '-':
|
if arg[0] == '-':
|
||||||
if arg[1] == 'j':
|
if arg[1] == 'j':
|
||||||
num_processes = int(arg[2:])
|
num_processes = int(arg[2:])
|
||||||
|
options.append('-j%d' % num_processes)
|
||||||
elif arg[1] == 'h':
|
elif arg[1] == 'h':
|
||||||
print_usage()
|
print_usage()
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
@ -265,8 +268,6 @@ def main(argv):
|
||||||
|
|
||||||
timestamp = datetime.now()
|
timestamp = datetime.now()
|
||||||
|
|
||||||
tester_pool = Pool(processes=num_processes)
|
|
||||||
|
|
||||||
print '%s-%d - %s - %s' % (branch_name, revision, author, timestamp)
|
print '%s-%d - %s - %s' % (branch_name, revision, author, timestamp)
|
||||||
|
|
||||||
print 'toolsets: %s' % ' '.join(toolsets)
|
print 'toolsets: %s' % ' '.join(toolsets)
|
||||||
|
@ -282,30 +283,27 @@ def main(argv):
|
||||||
try: os.mkdir(rev_dir)
|
try: os.mkdir(rev_dir)
|
||||||
except: pass
|
except: pass
|
||||||
|
|
||||||
results = {}
|
for toolset in toolsets:
|
||||||
for test_dir in test_dirs:
|
results = {}
|
||||||
print 'running tests from "%s" in %s' % (test_dir, branch_name)
|
for test_dir in test_dirs:
|
||||||
os.chdir(test_dir)
|
print 'running tests from "%s" in %s' % (test_dir, branch_name)
|
||||||
test_dir = os.getcwd()
|
os.chdir(test_dir)
|
||||||
|
test_dir = os.getcwd()
|
||||||
|
|
||||||
# figure out which tests are exported by this Jamfile
|
# figure out which tests are exported by this Jamfile
|
||||||
p = subprocess.Popen(['bjam', '--dump-tests', 'non-existing-target'], stdout=subprocess.PIPE, cwd=test_dir)
|
p = subprocess.Popen(['bjam', '--dump-tests', 'non-existing-target'], stdout=subprocess.PIPE, cwd=test_dir)
|
||||||
|
|
||||||
tests = []
|
tests = []
|
||||||
|
|
||||||
output = ''
|
output = ''
|
||||||
for l in p.stdout:
|
for l in p.stdout:
|
||||||
output += l
|
output += l
|
||||||
if not 'boost-test(RUN)' in l: continue
|
if not 'boost-test(RUN)' in l: continue
|
||||||
test_name = os.path.split(l.split(' ')[1][1:-1])[1]
|
test_name = os.path.split(l.split(' ')[1][1:-1])[1]
|
||||||
tests.append(test_name)
|
tests.append(test_name)
|
||||||
print 'found %d tests' % len(tests)
|
print 'found %d tests' % len(tests)
|
||||||
if len(tests) == 0:
|
if len(tests) == 0:
|
||||||
tests = ['']
|
tests = ['']
|
||||||
|
|
||||||
for toolset in toolsets:
|
|
||||||
if not toolset in results: results[toolset] = {}
|
|
||||||
toolset_found = False
|
|
||||||
|
|
||||||
additional_configs = []
|
additional_configs = []
|
||||||
if test_dir in build_dirs:
|
if test_dir in build_dirs:
|
||||||
|
@ -313,22 +311,15 @@ def main(argv):
|
||||||
|
|
||||||
futures = []
|
futures = []
|
||||||
for features in configs + additional_configs:
|
for features in configs + additional_configs:
|
||||||
futures.append(tester_pool.apply_async(run_tests, [toolset, tests, features, options, test_dir, time_limit]))
|
(compiler, r) = run_tests(toolset, tests, features, options, test_dir, time_limit)
|
||||||
|
results.update(r)
|
||||||
for future in futures:
|
|
||||||
(compiler, r) = future.get()
|
|
||||||
results[toolset].update(r)
|
|
||||||
|
|
||||||
# for features in configs + additional_configs:
|
|
||||||
# (compiler, r) = run_tests(toolset, tests, features, options, test_dir, time_limit)
|
|
||||||
# results[toolset].update(r)
|
|
||||||
|
|
||||||
print ''
|
print ''
|
||||||
|
|
||||||
if len(clean_files) > 0:
|
if len(clean_files) > 0:
|
||||||
print 'deleting ',
|
print 'deleting ',
|
||||||
for filt in clean_files:
|
for filt in clean_files:
|
||||||
for f in glob.glob(filt):
|
for f in glob.glob(os.path.join(test_dir, filt)):
|
||||||
# a precaution to make sure a malicious repo
|
# a precaution to make sure a malicious repo
|
||||||
# won't clean things outside of the test directory
|
# won't clean things outside of the test directory
|
||||||
if not os.path.abspath(f).startswith(test_dir): continue
|
if not os.path.abspath(f).startswith(test_dir): continue
|
||||||
|
@ -337,7 +328,6 @@ def main(argv):
|
||||||
except: pass
|
except: pass
|
||||||
print ''
|
print ''
|
||||||
|
|
||||||
for toolset in toolsets:
|
|
||||||
# each file contains a full set of tests for one speific toolset and platform
|
# each file contains a full set of tests for one speific toolset and platform
|
||||||
try:
|
try:
|
||||||
f = open(os.path.join(rev_dir, build_platform + '#' + toolset + '.json'), 'w+')
|
f = open(os.path.join(rev_dir, build_platform + '#' + toolset + '.json'), 'w+')
|
||||||
|
@ -350,7 +340,7 @@ def main(argv):
|
||||||
except: pass
|
except: pass
|
||||||
f = open(os.path.join(rev_dir, build_platform + '#' + toolset + '.json'), 'w+')
|
f = open(os.path.join(rev_dir, build_platform + '#' + toolset + '.json'), 'w+')
|
||||||
|
|
||||||
print >>f, json.dumps(results[toolset])
|
print >>f, json.dumps(results)
|
||||||
f.close()
|
f.close()
|
||||||
|
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue