first stab at script to atomatically run unit tests for new check-ins
This commit is contained in:
parent
0feaf488ae
commit
9bec8d67c9
|
@ -46,9 +46,10 @@ def style_output(o):
|
|||
ret += '<span class="test-error">%s</span>\n' % l
|
||||
elif '**passed**' in l:
|
||||
ret += '<span class="test-pass">%s</span>\n' % l
|
||||
elif ': error: ' in l or ': fatal error: ' in l:
|
||||
elif ': error: ' in l or ': fatal error: ' in l or ' : fatal error ' in l or \
|
||||
'failed to write output file' in l:
|
||||
ret += '<span class="compile-error">%s</span>\n' % l
|
||||
elif ': warning: ' in l:
|
||||
elif ': warning: ' in l or ') : warning C' in l:
|
||||
ret += '<span class="compile-warning">%s</span>\n' % l
|
||||
elif l == '====== END OUTPUT ======' and not subtle:
|
||||
ret += '<span class="subtle">%s\n' % l
|
||||
|
@ -127,7 +128,6 @@ if not need_refresh:
|
|||
# test_primitives: {
|
||||
# output: ...
|
||||
# status: 1
|
||||
# warnings: 21
|
||||
# }
|
||||
# }
|
||||
# }
|
||||
|
|
|
@ -0,0 +1,69 @@
|
|||
#!/bin/python
|
||||
|
||||
import run_tests
|
||||
import os
|
||||
import time
|
||||
|
||||
# returns a list of new revisions
|
||||
def svn_fetch():
|
||||
|
||||
current_version = run_tests.svn_info()[0]
|
||||
|
||||
p = subprocess.Popen(['svn', 'up'], stdout=subprocess.PIPE)
|
||||
|
||||
revision = -1
|
||||
|
||||
output = ''
|
||||
for l in p.stdout:
|
||||
if 'At revision ' in l:
|
||||
revision = int(l.split(':')[1].strip())
|
||||
output += l
|
||||
|
||||
if revision == -1:
|
||||
print '\n\nsvn update failed\n\n%s' % ouput
|
||||
sys.exit(1)
|
||||
|
||||
return range(current_version + 1, revision + 1)
|
||||
|
||||
def svn_up(revision):
|
||||
os.system('svn up %d' % revision)
|
||||
|
||||
def print_usage():
|
||||
print '''usage: run_regression_tests.py remote-path [options] toolset [toolset...]
|
||||
|
||||
toolset are bjam toolsets. For instance clang, gcc, darwin, msvc etc.
|
||||
remote-path is an scp path where the results are copied. This path has
|
||||
the form: user@hostname:/path
|
||||
|
||||
options:
|
||||
|
||||
-j<n> use n parallel processes for running tests
|
||||
'''
|
||||
|
||||
|
||||
def loop():
|
||||
while True:
|
||||
revs = svn_fetch()
|
||||
# reverse the list to always run the tests for the
|
||||
# latest version first, then fill in with the history
|
||||
revs.reverse()
|
||||
|
||||
if len(sys.argv) < 3:
|
||||
print_usage()
|
||||
sys.exit(1)
|
||||
|
||||
remote_path = sys.argv[1]
|
||||
root_path = os.path.join(os.getcwd(), 'regression_tests')
|
||||
|
||||
for r in revs:
|
||||
print '\n\nREVISION %d ===\n' % r
|
||||
svn_up(r)
|
||||
|
||||
run_tests.main(sys.argv[2:])
|
||||
|
||||
os.system('scp -r %s %s' % (os.path.join(root_path, '%d' % r), remote_path))
|
||||
|
||||
time.sleep(120)
|
||||
|
||||
if __name__ == "__main__":
|
||||
loop()
|
|
@ -89,9 +89,7 @@ def run_tests(toolset, tests, features, options, test_dir):
|
|||
for t in tests:
|
||||
p = subprocess.Popen(['bjam', '--out-xml=%s' % xml_file, toolset, t] + options + features.split(' '), stdout=subprocess.PIPE)
|
||||
output = ''
|
||||
warnings = 0
|
||||
for l in p.stdout:
|
||||
if 'warning: ' in l: warnings += 1
|
||||
output += l
|
||||
p.wait()
|
||||
|
||||
|
@ -124,7 +122,7 @@ def run_tests(toolset, tests, features, options, test_dir):
|
|||
try: os.unlink(xml_file)
|
||||
except: pass
|
||||
|
||||
r = { 'status': p.returncode, 'output': output, 'warnings': warnings, 'command': command }
|
||||
r = { 'status': p.returncode, 'output': output, 'command': command }
|
||||
results[t + '|' + features] = r
|
||||
|
||||
fail_color = '\033[31;1m'
|
||||
|
@ -136,9 +134,9 @@ def run_tests(toolset, tests, features, options, test_dir):
|
|||
pass_color == ''
|
||||
end_seq = ''
|
||||
|
||||
print '%s [%s] [%s]' % (t, toolset, features),
|
||||
if p.returncode == 0: print pass_color + 'PASSED' + end_seq
|
||||
else: print fail_color + 'FAILED' + end_seq
|
||||
if p.returncode == 0: sys.stdout.write('.')
|
||||
else: sys.stdout.write('X')
|
||||
sys.stdout.flush()
|
||||
|
||||
return (toolset, results)
|
||||
|
||||
|
@ -149,13 +147,13 @@ options:
|
|||
-h prints this message and exits
|
||||
'''
|
||||
|
||||
def main():
|
||||
def main(argv):
|
||||
|
||||
toolsets = []
|
||||
|
||||
num_processes = 4
|
||||
|
||||
for arg in sys.argv[1:]:
|
||||
for arg in argv:
|
||||
if arg[0] == '-':
|
||||
if arg[1] == 'j':
|
||||
num_processes = int(arg[2:])
|
||||
|
@ -209,8 +207,8 @@ def main():
|
|||
|
||||
print '%d - %s - %s' % (revision, author, timestamp)
|
||||
|
||||
print 'toolsets: ', toolsets
|
||||
print 'configs: ', configs
|
||||
print 'toolsets: %s' % ' '.join(toolsets)
|
||||
# print 'configs: %s' % '|'.join(configs)
|
||||
|
||||
rev_dir = os.path.join(os.getcwd(), 'regression_tests')
|
||||
try: os.mkdir(rev_dir)
|
||||
|
@ -236,7 +234,6 @@ def main():
|
|||
print 'found %d tests' % len(tests)
|
||||
|
||||
for toolset in toolsets:
|
||||
print 'toolset %s' % toolset
|
||||
results = {}
|
||||
toolset_found = False
|
||||
|
||||
|
@ -258,5 +255,5 @@ def main():
|
|||
f.close()
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
main(sys.argv[1:])
|
||||
|
||||
|
|
Loading…
Reference in New Issue