Add/fix shebang in python scripts, add execute permission (linux), remove whitespace/tabs at end of line

This commit is contained in:
Chocobo1 2015-07-11 13:51:30 +08:00
parent bbe51268d3
commit a59d1d3240
34 changed files with 207 additions and 182 deletions

2
bindings/python/make_torrent.py Normal file → Executable file
View File

@ -1,4 +1,4 @@
#!/bin/python #!/usr/bin/env python
import sys import sys
import os import os

2
bindings/python/rss_reader.py Normal file → Executable file
View File

@ -1,4 +1,4 @@
#!/usr/bin/python #!/usr/bin/env python
import sys import sys
import libtorrent as lt import libtorrent as lt

12
docs/gen_reference_doc.py Normal file → Executable file
View File

@ -1,3 +1,5 @@
#!/usr/bin/env python
import glob import glob
import os import os
import sys import sys
@ -371,7 +373,7 @@ def parse_class(lno, lines, filename):
context = '' context = ''
if verbose: print '?? %s' % l if verbose: print '?? %s' % l
if len(name) > 0: if len(name) > 0:
print '\x1b[31mFAILED TO PARSE CLASS\x1b[0m %s\nfile: %s:%d' % (name, filename, lno) print '\x1b[31mFAILED TO PARSE CLASS\x1b[0m %s\nfile: %s:%d' % (name, filename, lno)
return [None, lno] return [None, lno]
@ -416,7 +418,7 @@ def parse_enum(lno, lines, filename):
start_brace += l.count('{') start_brace += l.count('{')
end_brace += l.count('}') end_brace += l.count('}')
if '{' in l: if '{' in l:
l = l.split('{')[1] l = l.split('{')[1]
l = l.split('}')[0] l = l.split('}')[0]
@ -1019,7 +1021,7 @@ for cat in categories:
print >>out, '%s\n' % block.replace('\n', '\n\t') print >>out, '%s\n' % block.replace('\n', '\n\t')
f['desc'] = linkify_symbols(f['desc']) f['desc'] = linkify_symbols(f['desc'])
print >>out, '%s' % f['desc'] print >>out, '%s' % f['desc']
print >>out, dump_link_targets() print >>out, dump_link_targets()
render_enums(out, c['enums'], False, '.') render_enums(out, c['enums'], False, '.')
@ -1060,7 +1062,7 @@ for cat in categories:
print >>out, linkify_symbols(f['desc']) print >>out, linkify_symbols(f['desc'])
print >>out, dump_link_targets() print >>out, dump_link_targets()
render_enums(out, enums, True, '-') render_enums(out, enums, True, '-')
print >>out, dump_link_targets() print >>out, dump_link_targets()
@ -1084,5 +1086,5 @@ for i,o in preprocess_rst.items():
out.close() out.close()
f.close() f.close()

2
docs/gen_settings_doc.py Normal file → Executable file
View File

@ -1,3 +1,5 @@
#!/usr/bin/env python
f = open('../include/libtorrent/settings_pack.hpp') f = open('../include/libtorrent/settings_pack.hpp')
out = open('settings.rst', 'w+') out = open('settings.rst', 'w+')

4
docs/gen_stats_doc.py Normal file → Executable file
View File

@ -1,3 +1,5 @@
#!/usr/bin/env python
counter_types = {} counter_types = {}
f = open('../include/libtorrent/performance_counters.hpp') f = open('../include/libtorrent/performance_counters.hpp')
@ -28,7 +30,7 @@ for l in f:
if '=' in l: l = l[:l.index('=')].strip() if '=' in l: l = l[:l.index('=')].strip()
counter_types[l] = counter_type counter_types[l] = counter_type
f.close() f.close()
f = open('../src/session_stats.cpp') f = open('../src/session_stats.cpp')

4
docs/gen_todo.py Normal file → Executable file
View File

@ -1,3 +1,5 @@
#!/usr/bin/env python
import glob import glob
import os import os
@ -54,7 +56,7 @@ for f in files:
prio = items[-1]['priority'] prio = items[-1]['priority']
if prio >= 0 and prio <= 4: priority_count[prio] += 1 if prio >= 0 and prio <= 4: priority_count[prio] += 1
continue continue
if state == '': if state == '':
context.append(html_sanitize(l)) context.append(html_sanitize(l))
if len(context) > 20: context.pop(0) if len(context) > 20: context.pop(0)

6
examples/run_benchmarks.py Normal file → Executable file
View File

@ -1,3 +1,5 @@
#!/usr/bin/env python
import sys import sys
import os import os
import resource import resource
@ -200,7 +202,7 @@ def prefix_len(text, prefix):
def device_name(path): def device_name(path):
mount = subprocess.Popen('mount', stdout=subprocess.PIPE) mount = subprocess.Popen('mount', stdout=subprocess.PIPE)
max_match_len = 0 max_match_len = 0
match_device = '' match_device = ''
path = os.path.abspath(path) path = os.path.abspath(path)
@ -310,7 +312,7 @@ def run_test(config):
tester_output = open('session_stats/tester.output', 'w+') tester_output = open('session_stats/tester.output', 'w+')
tester = subprocess.Popen(shlex.split(cmdline), stdout=tester_output) tester = subprocess.Popen(shlex.split(cmdline), stdout=tester_output)
print 'OK' print 'OK'
time.sleep(2) time.sleep(2)
print '\n' print '\n'

2
setup.py Normal file → Executable file
View File

@ -1,3 +1,5 @@
#!/usr/bin/env python
import os import os
os.chdir('bindings/python') os.chdir('bindings/python')
execfile('setup.py') execfile('setup.py')

0
test/http.py Normal file → Executable file
View File

2
test/socks.py Normal file → Executable file
View File

@ -1,4 +1,4 @@
#!/usr/bin/python #!/usr/bin/env python
"""Minimal non-feature complete socks proxy""" """Minimal non-feature complete socks proxy"""

2
test/web_server.py Normal file → Executable file
View File

@ -1,3 +1,5 @@
#!/usr/bin/env python
import BaseHTTPServer import BaseHTTPServer
import SimpleHTTPServer import SimpleHTTPServer
import sys import sys

10
tools/clean.py Normal file → Executable file
View File

@ -1,3 +1,5 @@
#!/usr/bin/env python
import os import os
import shutil import shutil
import glob import glob
@ -31,7 +33,7 @@ def clean():
'Jamfile.rej', 'Jamfile.rej',
'Jamfile.orig', 'Jamfile.orig',
] ]
directories = [ directories = [
'examples', 'examples',
'test', 'test',
@ -47,7 +49,7 @@ def clean():
os.path.join('bindings', 'c'), os.path.join('bindings', 'c'),
os.path.join('bindings', os.path.join('c', 'src')) os.path.join('bindings', os.path.join('c', 'src'))
] ]
for d in directories: for d in directories:
for f in to_delete: for f in to_delete:
path = os.path.join(d, f) path = os.path.join(d, f)
@ -62,7 +64,7 @@ def clean():
print p print p
except Exception, e: except Exception, e:
print p, e print p, e
if __name__ == "__main__": if __name__ == "__main__":
clean() clean()

View File

@ -1,4 +1,4 @@
#! /usr/bin/env python #!/usr/bin/env python
import socket import socket
import sys import sys

240
tools/gprof2dot.py Normal file → Executable file
View File

@ -98,7 +98,7 @@ def ratio(numerator, denominator):
class UndefinedEvent(Exception): class UndefinedEvent(Exception):
"""Raised when attempting to get an event which is undefined.""" """Raised when attempting to get an event which is undefined."""
def __init__(self, event): def __init__(self, event):
Exception.__init__(self) Exception.__init__(self)
self.event = event self.event = event
@ -130,7 +130,7 @@ class Event(object):
assert val1 is not None assert val1 is not None
assert val2 is not None assert val2 is not None
return self._aggregator(val1, val2) return self._aggregator(val1, val2)
def format(self, val): def format(self, val):
"""Format an event value.""" """Format an event value."""
assert val is not None assert val is not None
@ -176,13 +176,13 @@ class Object(object):
def __contains__(self, event): def __contains__(self, event):
return event in self.events return event in self.events
def __getitem__(self, event): def __getitem__(self, event):
try: try:
return self.events[event] return self.events[event]
except KeyError: except KeyError:
raise UndefinedEvent(event) raise UndefinedEvent(event)
def __setitem__(self, event, value): def __setitem__(self, event, value):
if value is None: if value is None:
if event in self.events: if event in self.events:
@ -193,7 +193,7 @@ class Object(object):
class Call(Object): class Call(Object):
"""A call between functions. """A call between functions.
There should be at most one call object for every pair of functions. There should be at most one call object for every pair of functions.
""" """
@ -217,7 +217,7 @@ class Function(Object):
self.called = None self.called = None
self.weight = None self.weight = None
self.cycle = None self.cycle = None
def add_call(self, call): def add_call(self, call):
if call.callee_id in self.calls: if call.callee_id in self.calls:
sys.stderr.write('warning: overwriting call from function %s to %s\n' % (str(self.id), str(call.callee_id))) sys.stderr.write('warning: overwriting call from function %s to %s\n' % (str(self.id), str(call.callee_id)))
@ -374,7 +374,7 @@ class Profile(Object):
if self.functions[f].name == funcName: if self.functions[f].name == funcName:
return f return f
return False return False
def _tarjan(self, function, order, stack, orders, lowlinks, visited): def _tarjan(self, function, order, stack, orders, lowlinks, visited):
"""Tarjan's strongly connected components algorithm. """Tarjan's strongly connected components algorithm.
@ -466,7 +466,7 @@ class Profile(Object):
if call.callee_id != function.id: if call.callee_id != function.id:
assert call.ratio is not None assert call.ratio is not None
# Aggregate the input for each cycle # Aggregate the input for each cycle
for cycle in self.cycles: for cycle in self.cycles:
total = inevent.null() total = inevent.null()
for function in compat_itervalues(self.functions): for function in compat_itervalues(self.functions):
@ -491,7 +491,7 @@ class Profile(Object):
total += self._integrate_call(call, outevent, inevent) total += self._integrate_call(call, outevent, inevent)
function[outevent] = total function[outevent] = total
return function[outevent] return function[outevent]
def _integrate_call(self, call, outevent, inevent): def _integrate_call(self, call, outevent, inevent):
assert outevent not in call assert outevent not in call
assert call.ratio is not None assert call.ratio is not None
@ -513,7 +513,7 @@ class Profile(Object):
subtotal += self._integrate_call(call, outevent, inevent) subtotal += self._integrate_call(call, outevent, inevent)
total += subtotal total += subtotal
cycle[outevent] = total cycle[outevent] = total
# Compute the time propagated to callers of this cycle # Compute the time propagated to callers of this cycle
callees = {} callees = {}
for function in compat_itervalues(self.functions): for function in compat_itervalues(self.functions):
@ -525,7 +525,7 @@ class Profile(Object):
callees[callee] += call.ratio callees[callee] += call.ratio
except KeyError: except KeyError:
callees[callee] = call.ratio callees[callee] = call.ratio
for member in cycle.functions: for member in cycle.functions:
member[outevent] = outevent.null() member[outevent] = outevent.null()
@ -626,11 +626,11 @@ class Profile(Object):
if TOTAL_TIME_RATIO in call: if TOTAL_TIME_RATIO in call:
# handle exact cases first # handle exact cases first
call.weight = call[TOTAL_TIME_RATIO] call.weight = call[TOTAL_TIME_RATIO]
else: else:
try: try:
# make a safe estimate # make a safe estimate
call.weight = min(function[TOTAL_TIME_RATIO], callee[TOTAL_TIME_RATIO]) call.weight = min(function[TOTAL_TIME_RATIO], callee[TOTAL_TIME_RATIO])
except UndefinedEvent: except UndefinedEvent:
pass pass
@ -647,7 +647,7 @@ class Profile(Object):
call = function.calls[callee_id] call = function.calls[callee_id]
if callee_id not in self.functions or call.weight is not None and call.weight < edge_thres: if callee_id not in self.functions or call.weight is not None and call.weight < edge_thres:
del function.calls[callee_id] del function.calls[callee_id]
def dump(self): def dump(self):
for function in compat_itervalues(self.functions): for function in compat_itervalues(self.functions):
sys.stderr.write('Function %s:\n' % (function.name,)) sys.stderr.write('Function %s:\n' % (function.name,))
@ -674,7 +674,7 @@ class Struct:
if attrs is None: if attrs is None:
attrs = {} attrs = {}
self.__dict__['_attrs'] = attrs self.__dict__['_attrs'] = attrs
def __getattr__(self, name): def __getattr__(self, name):
try: try:
return self._attrs[name] return self._attrs[name]
@ -689,7 +689,7 @@ class Struct:
def __repr__(self): def __repr__(self):
return repr(self._attrs) return repr(self._attrs)
class ParseError(Exception): class ParseError(Exception):
"""Raised when parsing to signal mismatches.""" """Raised when parsing to signal mismatches."""
@ -715,7 +715,7 @@ class Parser:
def parse(self): def parse(self):
raise NotImplementedError raise NotImplementedError
class LineParser(Parser): class LineParser(Parser):
"""Base class for parsers that read line-based formats.""" """Base class for parsers that read line-based formats."""
@ -790,21 +790,21 @@ class XmlTokenizer:
self.index = 0 self.index = 0
self.final = False self.final = False
self.skip_ws = skip_ws self.skip_ws = skip_ws
self.character_pos = 0, 0 self.character_pos = 0, 0
self.character_data = '' self.character_data = ''
self.parser = xml.parsers.expat.ParserCreate() self.parser = xml.parsers.expat.ParserCreate()
self.parser.StartElementHandler = self.handle_element_start self.parser.StartElementHandler = self.handle_element_start
self.parser.EndElementHandler = self.handle_element_end self.parser.EndElementHandler = self.handle_element_end
self.parser.CharacterDataHandler = self.handle_character_data self.parser.CharacterDataHandler = self.handle_character_data
def handle_element_start(self, name, attributes): def handle_element_start(self, name, attributes):
self.finish_character_data() self.finish_character_data()
line, column = self.pos() line, column = self.pos()
token = XmlToken(XML_ELEMENT_START, name, attributes, line, column) token = XmlToken(XML_ELEMENT_START, name, attributes, line, column)
self.tokens.append(token) self.tokens.append(token)
def handle_element_end(self, name): def handle_element_end(self, name):
self.finish_character_data() self.finish_character_data()
line, column = self.pos() line, column = self.pos()
@ -815,15 +815,15 @@ class XmlTokenizer:
if not self.character_data: if not self.character_data:
self.character_pos = self.pos() self.character_pos = self.pos()
self.character_data += data self.character_data += data
def finish_character_data(self): def finish_character_data(self):
if self.character_data: if self.character_data:
if not self.skip_ws or not self.character_data.isspace(): if not self.skip_ws or not self.character_data.isspace():
line, column = self.character_pos line, column = self.character_pos
token = XmlToken(XML_CHARACTER_DATA, self.character_data, None, line, column) token = XmlToken(XML_CHARACTER_DATA, self.character_data, None, line, column)
self.tokens.append(token) self.tokens.append(token)
self.character_data = '' self.character_data = ''
def next(self): def next(self):
size = 16*1024 size = 16*1024
while self.index >= len(self.tokens) and not self.final: while self.index >= len(self.tokens) and not self.final:
@ -868,13 +868,13 @@ class XmlParser(Parser):
Parser.__init__(self) Parser.__init__(self)
self.tokenizer = XmlTokenizer(fp) self.tokenizer = XmlTokenizer(fp)
self.consume() self.consume()
def consume(self): def consume(self):
self.token = self.tokenizer.next() self.token = self.tokenizer.next()
def match_element_start(self, name): def match_element_start(self, name):
return self.token.type == XML_ELEMENT_START and self.token.name_or_data == name return self.token.type == XML_ELEMENT_START and self.token.name_or_data == name
def match_element_end(self, name): def match_element_end(self, name):
return self.token.type == XML_ELEMENT_END and self.token.name_or_data == name return self.token.type == XML_ELEMENT_END and self.token.name_or_data == name
@ -888,7 +888,7 @@ class XmlParser(Parser):
attrs = self.token.attrs attrs = self.token.attrs
self.consume() self.consume()
return attrs return attrs
def element_end(self, name): def element_end(self, name):
while self.token.type == XML_CHARACTER_DATA: while self.token.type == XML_CHARACTER_DATA:
self.consume() self.consume()
@ -966,20 +966,20 @@ class GprofParser(Parser):
) )
_cg_primary_re = re.compile( _cg_primary_re = re.compile(
r'^\[(?P<index>\d+)\]?' + r'^\[(?P<index>\d+)\]?' +
r'\s+(?P<percentage_time>\d+\.\d+)' + r'\s+(?P<percentage_time>\d+\.\d+)' +
r'\s+(?P<self>\d+\.\d+)' + r'\s+(?P<self>\d+\.\d+)' +
r'\s+(?P<descendants>\d+\.\d+)' + r'\s+(?P<descendants>\d+\.\d+)' +
r'\s+(?:(?P<called>\d+)(?:\+(?P<called_self>\d+))?)?' + r'\s+(?:(?P<called>\d+)(?:\+(?P<called_self>\d+))?)?' +
r'\s+(?P<name>\S.*?)' + r'\s+(?P<name>\S.*?)' +
r'(?:\s+<cycle\s(?P<cycle>\d+)>)?' + r'(?:\s+<cycle\s(?P<cycle>\d+)>)?' +
r'\s\[(\d+)\]$' r'\s\[(\d+)\]$'
) )
_cg_parent_re = re.compile( _cg_parent_re = re.compile(
r'^\s+(?P<self>\d+\.\d+)?' + r'^\s+(?P<self>\d+\.\d+)?' +
r'\s+(?P<descendants>\d+\.\d+)?' + r'\s+(?P<descendants>\d+\.\d+)?' +
r'\s+(?P<called>\d+)(?:/(?P<called_total>\d+))?' + r'\s+(?P<called>\d+)(?:/(?P<called_total>\d+))?' +
r'\s+(?P<name>\S.*?)' + r'\s+(?P<name>\S.*?)' +
r'(?:\s+<cycle\s(?P<cycle>\d+)>)?' + r'(?:\s+<cycle\s(?P<cycle>\d+)>)?' +
r'\s\[(?P<index>\d+)\]$' r'\s\[(?P<index>\d+)\]$'
@ -988,19 +988,19 @@ class GprofParser(Parser):
_cg_child_re = _cg_parent_re _cg_child_re = _cg_parent_re
_cg_cycle_header_re = re.compile( _cg_cycle_header_re = re.compile(
r'^\[(?P<index>\d+)\]?' + r'^\[(?P<index>\d+)\]?' +
r'\s+(?P<percentage_time>\d+\.\d+)' + r'\s+(?P<percentage_time>\d+\.\d+)' +
r'\s+(?P<self>\d+\.\d+)' + r'\s+(?P<self>\d+\.\d+)' +
r'\s+(?P<descendants>\d+\.\d+)' + r'\s+(?P<descendants>\d+\.\d+)' +
r'\s+(?:(?P<called>\d+)(?:\+(?P<called_self>\d+))?)?' + r'\s+(?:(?P<called>\d+)(?:\+(?P<called_self>\d+))?)?' +
r'\s+<cycle\s(?P<cycle>\d+)\sas\sa\swhole>' + r'\s+<cycle\s(?P<cycle>\d+)\sas\sa\swhole>' +
r'\s\[(\d+)\]$' r'\s\[(\d+)\]$'
) )
_cg_cycle_member_re = re.compile( _cg_cycle_member_re = re.compile(
r'^\s+(?P<self>\d+\.\d+)?' + r'^\s+(?P<self>\d+\.\d+)?' +
r'\s+(?P<descendants>\d+\.\d+)?' + r'\s+(?P<descendants>\d+\.\d+)?' +
r'\s+(?P<called>\d+)(?:\+(?P<called_self>\d+))?' + r'\s+(?P<called>\d+)(?:\+(?P<called_self>\d+))?' +
r'\s+(?P<name>\S.*?)' + r'\s+(?P<name>\S.*?)' +
r'(?:\s+<cycle\s(?P<cycle>\d+)>)?' + r'(?:\s+<cycle\s(?P<cycle>\d+)>)?' +
r'\s\[(?P<index>\d+)\]$' r'\s\[(?P<index>\d+)\]$'
@ -1018,7 +1018,7 @@ class GprofParser(Parser):
line = lines.pop(0) line = lines.pop(0)
if line.startswith('['): if line.startswith('['):
break break
# read function parent line # read function parent line
mo = self._cg_parent_re.match(line) mo = self._cg_parent_re.match(line)
if not mo: if not mo:
@ -1039,7 +1039,7 @@ class GprofParser(Parser):
while lines: while lines:
line = lines.pop(0) line = lines.pop(0)
# read function subroutine line # read function subroutine line
mo = self._cg_child_re.match(line) mo = self._cg_child_re.match(line)
if not mo: if not mo:
@ -1049,7 +1049,7 @@ class GprofParser(Parser):
else: else:
child = self.translate(mo) child = self.translate(mo)
children.append(child) children.append(child)
function.parents = parents function.parents = parents
function.children = children function.children = children
@ -1074,7 +1074,7 @@ class GprofParser(Parser):
continue continue
call = self.translate(mo) call = self.translate(mo)
cycle.functions.append(call) cycle.functions.append(call)
self.cycles[cycle.cycle] = cycle self.cycles[cycle.cycle] = cycle
def parse_cg_entry(self, lines): def parse_cg_entry(self, lines):
@ -1101,16 +1101,16 @@ class GprofParser(Parser):
self.parse_cg_entry(entry_lines) self.parse_cg_entry(entry_lines)
entry_lines = [] entry_lines = []
else: else:
entry_lines.append(line) entry_lines.append(line)
line = self.readline() line = self.readline()
def parse(self): def parse(self):
self.parse_cg() self.parse_cg()
self.fp.close() self.fp.close()
profile = Profile() profile = Profile()
profile[TIME] = 0.0 profile[TIME] = 0.0
cycles = {} cycles = {}
for index in self.cycles: for index in self.cycles:
cycles[index] = Cycle() cycles[index] = Cycle()
@ -1125,16 +1125,16 @@ class GprofParser(Parser):
call = Call(entry.index) call = Call(entry.index)
call[CALLS] = entry.called_self call[CALLS] = entry.called_self
function.called += entry.called_self function.called += entry.called_self
# populate the function calls # populate the function calls
for child in entry.children: for child in entry.children:
call = Call(child.index) call = Call(child.index)
assert child.called is not None assert child.called is not None
call[CALLS] = child.called call[CALLS] = child.called
if child.index not in self.functions: if child.index not in self.functions:
# NOTE: functions that were never called but were discovered by gprof's # NOTE: functions that were never called but were discovered by gprof's
# static call graph analysis dont have a call graph entry so we need # static call graph analysis dont have a call graph entry so we need
# to add them here # to add them here
missing = Function(child.index, child.name) missing = Function(child.index, child.name)
@ -1150,7 +1150,7 @@ class GprofParser(Parser):
try: try:
cycle = cycles[entry.cycle] cycle = cycles[entry.cycle]
except KeyError: except KeyError:
sys.stderr.write('warning: <cycle %u as a whole> entry missing\n' % entry.cycle) sys.stderr.write('warning: <cycle %u as a whole> entry missing\n' % entry.cycle)
cycle = Cycle() cycle = Cycle()
cycles[entry.cycle] = cycle cycles[entry.cycle] = cycle
cycle.add_function(function) cycle.add_function(function)
@ -1230,18 +1230,18 @@ class AXEParser(Parser):
_cg_footer_re = re.compile('^Index\s+Function\s*$') _cg_footer_re = re.compile('^Index\s+Function\s*$')
_cg_primary_re = re.compile( _cg_primary_re = re.compile(
r'^\[(?P<index>\d+)\]?' + r'^\[(?P<index>\d+)\]?' +
r'\s+(?P<percentage_time>\d+\.\d+)' + r'\s+(?P<percentage_time>\d+\.\d+)' +
r'\s+(?P<self>\d+\.\d+)' + r'\s+(?P<self>\d+\.\d+)' +
r'\s+(?P<descendants>\d+\.\d+)' + r'\s+(?P<descendants>\d+\.\d+)' +
r'\s+(?P<name>\S.*?)' + r'\s+(?P<name>\S.*?)' +
r'(?:\s+<cycle\s(?P<cycle>\d+)>)?' + r'(?:\s+<cycle\s(?P<cycle>\d+)>)?' +
r'\s+\[(\d+)\]$' r'\s+\[(\d+)\]$'
) )
_cg_parent_re = re.compile( _cg_parent_re = re.compile(
r'^\s+(?P<self>\d+\.\d+)?' + r'^\s+(?P<self>\d+\.\d+)?' +
r'\s+(?P<descendants>\d+\.\d+)?' + r'\s+(?P<descendants>\d+\.\d+)?' +
r'\s+(?P<name>\S.*?)' + r'\s+(?P<name>\S.*?)' +
r'(?:\s+<cycle\s(?P<cycle>\d+)>)?' + r'(?:\s+<cycle\s(?P<cycle>\d+)>)?' +
r'\s+\[(?P<index>\d+)\]$' r'\s+\[(?P<index>\d+)\]$'
@ -1250,17 +1250,17 @@ class AXEParser(Parser):
_cg_child_re = _cg_parent_re _cg_child_re = _cg_parent_re
_cg_cycle_header_re = re.compile( _cg_cycle_header_re = re.compile(
r'^\[(?P<index>\d+)\]?' + r'^\[(?P<index>\d+)\]?' +
r'\s+(?P<percentage_time>\d+\.\d+)' + r'\s+(?P<percentage_time>\d+\.\d+)' +
r'\s+(?P<self>\d+\.\d+)' + r'\s+(?P<self>\d+\.\d+)' +
r'\s+(?P<descendants>\d+\.\d+)' + r'\s+(?P<descendants>\d+\.\d+)' +
r'\s+<cycle\s(?P<cycle>\d+)\sas\sa\swhole>' + r'\s+<cycle\s(?P<cycle>\d+)\sas\sa\swhole>' +
r'\s+\[(\d+)\]$' r'\s+\[(\d+)\]$'
) )
_cg_cycle_member_re = re.compile( _cg_cycle_member_re = re.compile(
r'^\s+(?P<self>\d+\.\d+)?' + r'^\s+(?P<self>\d+\.\d+)?' +
r'\s+(?P<descendants>\d+\.\d+)?' + r'\s+(?P<descendants>\d+\.\d+)?' +
r'\s+(?P<name>\S.*?)' + r'\s+(?P<name>\S.*?)' +
r'(?:\s+<cycle\s(?P<cycle>\d+)>)?' + r'(?:\s+<cycle\s(?P<cycle>\d+)>)?' +
r'\s+\[(?P<index>\d+)\]$' r'\s+\[(?P<index>\d+)\]$'
@ -1277,7 +1277,7 @@ class AXEParser(Parser):
line = lines.pop(0) line = lines.pop(0)
if line.startswith('['): if line.startswith('['):
break break
# read function parent line # read function parent line
mo = self._cg_parent_re.match(line) mo = self._cg_parent_re.match(line)
if not mo: if not mo:
@ -1297,7 +1297,7 @@ class AXEParser(Parser):
while lines: while lines:
line = lines.pop(0) line = lines.pop(0)
# read function subroutine line # read function subroutine line
mo = self._cg_child_re.match(line) mo = self._cg_child_re.match(line)
if not mo: if not mo:
@ -1348,7 +1348,7 @@ class AXEParser(Parser):
continue continue
call = self.translate(mo) call = self.translate(mo)
cycle.functions.append(call) cycle.functions.append(call)
cycle.parents = parents cycle.parents = parents
self.cycles[cycle.cycle] = cycle self.cycles[cycle.cycle] = cycle
@ -1374,7 +1374,7 @@ class AXEParser(Parser):
self.parse_cg_entry(entry_lines) self.parse_cg_entry(entry_lines)
entry_lines = [] entry_lines = []
else: else:
entry_lines.append(line) entry_lines.append(line)
line = self.readline() line = self.readline()
def parse(self): def parse(self):
@ -1384,7 +1384,7 @@ class AXEParser(Parser):
profile = Profile() profile = Profile()
profile[TIME] = 0.0 profile[TIME] = 0.0
cycles = {} cycles = {}
for index in self.cycles: for index in self.cycles:
cycles[index] = Cycle() cycles[index] = Cycle()
@ -1394,7 +1394,7 @@ class AXEParser(Parser):
function = Function(entry.index, entry.name) function = Function(entry.index, entry.name)
function[TIME] = entry.self function[TIME] = entry.self
function[TOTAL_TIME_RATIO] = entry.percentage_time / 100.0 function[TOTAL_TIME_RATIO] = entry.percentage_time / 100.0
# populate the function calls # populate the function calls
for child in entry.children: for child in entry.children:
call = Call(child.index) call = Call(child.index)
@ -1403,7 +1403,7 @@ class AXEParser(Parser):
call[TOTAL_TIME_RATIO] = function[TOTAL_TIME_RATIO] call[TOTAL_TIME_RATIO] = function[TOTAL_TIME_RATIO]
if child.index not in self.functions: if child.index not in self.functions:
# NOTE: functions that were never called but were discovered by gprof's # NOTE: functions that were never called but were discovered by gprof's
# static call graph analysis dont have a call graph entry so we need # static call graph analysis dont have a call graph entry so we need
# to add them here # to add them here
# FIXME: Is this applicable? # FIXME: Is this applicable?
@ -1419,7 +1419,7 @@ class AXEParser(Parser):
try: try:
cycle = cycles[entry.cycle] cycle = cycles[entry.cycle]
except KeyError: except KeyError:
sys.stderr.write('warning: <cycle %u as a whole> entry missing\n' % entry.cycle) sys.stderr.write('warning: <cycle %u as a whole> entry missing\n' % entry.cycle)
cycle = Cycle() cycle = Cycle()
cycles[entry.cycle] = cycle cycles[entry.cycle] = cycle
cycle.add_function(function) cycle.add_function(function)
@ -1447,7 +1447,7 @@ class AXEParser(Parser):
class CallgrindParser(LineParser): class CallgrindParser(LineParser):
"""Parser for valgrind's callgrind tool. """Parser for valgrind's callgrind tool.
See also: See also:
- http://valgrind.org/docs/manual/cl-format.html - http://valgrind.org/docs/manual/cl-format.html
""" """
@ -1559,7 +1559,7 @@ class CallgrindParser(LineParser):
self.parse_association_spec() self.parse_association_spec()
__subpos_re = r'(0x[0-9a-fA-F]+|\d+|\+\d+|-\d+|\*)' __subpos_re = r'(0x[0-9a-fA-F]+|\d+|\+\d+|-\d+|\*)'
_cost_re = re.compile(r'^' + _cost_re = re.compile(r'^' +
__subpos_re + r'( +' + __subpos_re + r')*' + __subpos_re + r'( +' + __subpos_re + r')*' +
r'( +\d+)*' + r'( +\d+)*' +
'$') '$')
@ -1603,12 +1603,12 @@ class CallgrindParser(LineParser):
events = [float(event) for event in events] events = [float(event) for event in events]
if calls is None: if calls is None:
function[SAMPLES] += events[0] function[SAMPLES] += events[0]
self.profile[SAMPLES] += events[0] self.profile[SAMPLES] += events[0]
else: else:
callee = self.get_callee() callee = self.get_callee()
callee.called += calls callee.called += calls
try: try:
call = function.calls[callee.id] call = function.calls[callee.id]
except KeyError: except KeyError:
@ -1670,7 +1670,7 @@ class CallgrindParser(LineParser):
def parse_position_spec(self): def parse_position_spec(self):
line = self.lookahead() line = self.lookahead()
if line.startswith('jump=') or line.startswith('jcnd='): if line.startswith('jump=') or line.startswith('jcnd='):
self.consume() self.consume()
return True return True
@ -1755,14 +1755,14 @@ class CallgrindParser(LineParser):
def get_function(self): def get_function(self):
module = self.positions.get('ob', '') module = self.positions.get('ob', '')
filename = self.positions.get('fl', '') filename = self.positions.get('fl', '')
function = self.positions.get('fn', '') function = self.positions.get('fn', '')
return self.make_function(module, filename, function) return self.make_function(module, filename, function)
def get_callee(self): def get_callee(self):
module = self.positions.get('cob', '') module = self.positions.get('cob', '')
filename = self.positions.get('cfi', '') filename = self.positions.get('cfi', '')
function = self.positions.get('cfn', '') function = self.positions.get('cfn', '')
return self.make_function(module, filename, function) return self.make_function(module, filename, function)
@ -1892,7 +1892,7 @@ class PerfParser(LineParser):
class OprofileParser(LineParser): class OprofileParser(LineParser):
"""Parser for oprofile callgraph output. """Parser for oprofile callgraph output.
See also: See also:
- http://oprofile.sourceforge.net/doc/opreport.html#opreport-callgraph - http://oprofile.sourceforge.net/doc/opreport.html#opreport-callgraph
""" """
@ -1921,7 +1921,7 @@ class OprofileParser(LineParser):
self.update_subentries_dict(callers_total, callers) self.update_subentries_dict(callers_total, callers)
function_total.samples += function.samples function_total.samples += function.samples
self.update_subentries_dict(callees_total, callees) self.update_subentries_dict(callees_total, callees)
def update_subentries_dict(self, totals, partials): def update_subentries_dict(self, totals, partials):
for partial in compat_itervalues(partials): for partial in compat_itervalues(partials):
try: try:
@ -1930,7 +1930,7 @@ class OprofileParser(LineParser):
totals[partial.id] = partial totals[partial.id] = partial
else: else:
total.samples += partial.samples total.samples += partial.samples
def parse(self): def parse(self):
# read lookahead # read lookahead
self.readline() self.readline()
@ -1942,7 +1942,7 @@ class OprofileParser(LineParser):
profile = Profile() profile = Profile()
reverse_call_samples = {} reverse_call_samples = {}
# populate the profile # populate the profile
profile[SAMPLES] = 0 profile[SAMPLES] = 0
for _callers, _function, _callees in compat_itervalues(self.entries): for _callers, _function, _callees in compat_itervalues(self.entries):
@ -1965,7 +1965,7 @@ class OprofileParser(LineParser):
call = Call(_callee.id) call = Call(_callee.id)
call[SAMPLES2] = _callee.samples call[SAMPLES2] = _callee.samples
function.add_call(call) function.add_call(call)
# compute derived data # compute derived data
profile.validate() profile.validate()
profile.find_cycles() profile.find_cycles()
@ -2051,7 +2051,7 @@ class OprofileParser(LineParser):
def match_primary(self): def match_primary(self):
line = self.lookahead() line = self.lookahead()
return not line[:1].isspace() return not line[:1].isspace()
def match_secondary(self): def match_secondary(self):
line = self.lookahead() line = self.lookahead()
return line[:1].isspace() return line[:1].isspace()
@ -2059,7 +2059,7 @@ class OprofileParser(LineParser):
class HProfParser(LineParser): class HProfParser(LineParser):
"""Parser for java hprof output """Parser for java hprof output
See also: See also:
- http://java.sun.com/developer/technicalArticles/Programming/HPROF.html - http://java.sun.com/developer/technicalArticles/Programming/HPROF.html
""" """
@ -2220,7 +2220,7 @@ class SysprofParser(XmlParser):
def build_profile(self, objects, nodes): def build_profile(self, objects, nodes):
profile = Profile() profile = Profile()
profile[SAMPLES] = 0 profile[SAMPLES] = 0
for id, object in compat_iteritems(objects): for id, object in compat_iteritems(objects):
# Ignore fake objects (process names, modules, "Everything", "kernel", etc.) # Ignore fake objects (process names, modules, "Everything", "kernel", etc.)
@ -2289,7 +2289,7 @@ class XPerfParser(Parser):
def parse(self): def parse(self):
import csv import csv
reader = csv.reader( reader = csv.reader(
self.stream, self.stream,
delimiter = ',', delimiter = ',',
quotechar = None, quotechar = None,
escapechar = None, escapechar = None,
@ -2304,7 +2304,7 @@ class XPerfParser(Parser):
header = False header = False
else: else:
self.parse_row(row) self.parse_row(row)
# compute derived data # compute derived data
self.profile.validate() self.profile.validate()
self.profile.find_cycles() self.profile.find_cycles()
@ -2332,7 +2332,7 @@ class XPerfParser(Parser):
else: else:
break break
fields[name] = value fields[name] = value
process = fields['Process Name'] process = fields['Process Name']
symbol = fields['Module'] + '!' + fields['Function'] symbol = fields['Module'] + '!' + fields['Function']
weight = fields['Weight'] weight = fields['Weight']
@ -2403,12 +2403,12 @@ class SleepyParser(Parser):
self.calls = {} self.calls = {}
self.profile = Profile() self.profile = Profile()
_symbol_re = re.compile( _symbol_re = re.compile(
r'^(?P<id>\w+)' + r'^(?P<id>\w+)' +
r'\s+"(?P<module>[^"]*)"' + r'\s+"(?P<module>[^"]*)"' +
r'\s+"(?P<procname>[^"]*)"' + r'\s+"(?P<procname>[^"]*)"' +
r'\s+"(?P<sourcefile>[^"]*)"' + r'\s+"(?P<sourcefile>[^"]*)"' +
r'\s+(?P<sourceline>\d+)$' r'\s+(?P<sourceline>\d+)$'
) )
@ -2428,7 +2428,7 @@ class SleepyParser(Parser):
mo = self._symbol_re.match(line) mo = self._symbol_re.match(line)
if mo: if mo:
symbol_id, module, procname, sourcefile, sourceline = mo.groups() symbol_id, module, procname, sourcefile, sourceline = mo.groups()
function_id = ':'.join([module, procname]) function_id = ':'.join([module, procname])
try: try:
@ -2455,7 +2455,7 @@ class SleepyParser(Parser):
callee[SAMPLES] += samples callee[SAMPLES] += samples
self.profile[SAMPLES] += samples self.profile[SAMPLES] += samples
for caller in callstack[1:]: for caller in callstack[1:]:
try: try:
call = caller.calls[callee.id] call = caller.calls[callee.id]
@ -2523,7 +2523,7 @@ class AQtimeParser(XmlParser):
self.parse_headers() self.parse_headers()
results = self.parse_results() results = self.parse_results()
self.element_end('AQtime_Results') self.element_end('AQtime_Results')
return self.build_profile(results) return self.build_profile(results)
def parse_headers(self): def parse_headers(self):
self.element_start('HEADERS') self.element_start('HEADERS')
@ -2627,7 +2627,7 @@ class AQtimeParser(XmlParser):
profile[TOTAL_TIME] = profile[TIME] profile[TOTAL_TIME] = profile[TIME]
profile.ratio(TOTAL_TIME_RATIO, TOTAL_TIME) profile.ratio(TOTAL_TIME_RATIO, TOTAL_TIME)
return profile return profile
def build_function(self, fields): def build_function(self, fields):
function = Function(self.build_id(fields), self.build_name(fields)) function = Function(self.build_id(fields), self.build_name(fields))
function[TIME] = fields['Time'] function[TIME] = fields['Time']
@ -2733,7 +2733,7 @@ class PstatsParser:
class Theme: class Theme:
def __init__(self, def __init__(self,
bgcolor = (0.0, 0.0, 1.0), bgcolor = (0.0, 0.0, 1.0),
mincolor = (0.0, 0.0, 0.0), mincolor = (0.0, 0.0, 0.0),
maxcolor = (0.0, 0.0, 1.0), maxcolor = (0.0, 0.0, 1.0),
@ -2803,10 +2803,10 @@ class Theme:
def color(self, weight): def color(self, weight):
weight = min(max(weight, 0.0), 1.0) weight = min(max(weight, 0.0), 1.0)
hmin, smin, lmin = self.mincolor hmin, smin, lmin = self.mincolor
hmax, smax, lmax = self.maxcolor hmax, smax, lmax = self.maxcolor
if self.skew < 0: if self.skew < 0:
raise ValueError("Skew must be greater than 0") raise ValueError("Skew must be greater than 0")
elif self.skew == 1.0: elif self.skew == 1.0:
@ -2973,10 +2973,10 @@ class DotWriter:
weight = 0.0 weight = 0.0
label = '\n'.join(labels) label = '\n'.join(labels)
self.node(function.id, self.node(function.id,
label = label, label = label,
color = self.color(theme.node_bgcolor(weight)), color = self.color(theme.node_bgcolor(weight)),
fontcolor = self.color(theme.node_fgcolor(weight)), fontcolor = self.color(theme.node_fgcolor(weight)),
fontsize = "%.2f" % theme.node_fontsize(weight), fontsize = "%.2f" % theme.node_fontsize(weight),
) )
@ -2998,13 +2998,13 @@ class DotWriter:
label = '\n'.join(labels) label = '\n'.join(labels)
self.edge(function.id, call.callee_id, self.edge(function.id, call.callee_id,
label = label, label = label,
color = self.color(theme.edge_color(weight)), color = self.color(theme.edge_color(weight)),
fontcolor = self.color(theme.edge_color(weight)), fontcolor = self.color(theme.edge_color(weight)),
fontsize = "%.2f" % theme.edge_fontsize(weight), fontsize = "%.2f" % theme.edge_fontsize(weight),
penwidth = "%.2f" % theme.edge_penwidth(weight), penwidth = "%.2f" % theme.edge_penwidth(weight),
labeldistance = "%.2f" % theme.edge_penwidth(weight), labeldistance = "%.2f" % theme.edge_penwidth(weight),
arrowsize = "%.2f" % theme.edge_arrowsize(weight), arrowsize = "%.2f" % theme.edge_arrowsize(weight),
) )
@ -3197,11 +3197,11 @@ class Main:
self.theme = self.themes[self.options.theme] self.theme = self.themes[self.options.theme]
except KeyError: except KeyError:
optparser.error('invalid colormap \'%s\'' % self.options.theme) optparser.error('invalid colormap \'%s\'' % self.options.theme)
# set skew on the theme now that it has been picked. # set skew on the theme now that it has been picked.
if self.options.theme_skew: if self.options.theme_skew:
self.theme.skew = self.options.theme_skew self.theme.skew = self.options.theme_skew
totalMethod = self.options.totalMethod totalMethod = self.options.totalMethod
try: try:
@ -3225,7 +3225,7 @@ class Main:
parser = Format(self.args[0]) parser = Format(self.args[0])
self.profile = parser.parse() self.profile = parser.parse()
if self.options.output is None: if self.options.output is None:
self.output = sys.stdout self.output = sys.stdout
else: else:
@ -3245,7 +3245,7 @@ class Main:
profile = self.profile profile = self.profile
profile.prune(self.options.node_thres/100.0, self.options.edge_thres/100.0) profile.prune(self.options.node_thres/100.0, self.options.edge_thres/100.0)
if self.options.root: if self.options.root:
rootId = profile.getFunctionId(self.options.root) rootId = profile.getFunctionId(self.options.root)
if not rootId: if not rootId:

View File

@ -1,4 +1,4 @@
#! /usr/bin/env python #!/usr/bin/env python
import os, sys, time import os, sys, time
keys = [['upload rate', 'x1y1', 6], ['history entries', 'x1y2', 10], ['queue', 'x1y2', 4]] keys = [['upload rate', 'x1y1', 6], ['history entries', 'x1y2', 10], ['queue', 'x1y2', 4]]

View File

@ -1,4 +1,4 @@
#! /usr/bin/env python #!/usr/bin/env python
# Copyright Arvid Norberg 2008. Use, modification and distribution is # Copyright Arvid Norberg 2008. Use, modification and distribution is
# subject to the Boost Software License, Version 1.0. (See accompanying # subject to the Boost Software License, Version 1.0. (See accompanying
# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) # file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)

View File

@ -1,4 +1,4 @@
#! /usr/bin/env python #!/usr/bin/env python
import sys import sys
import os import os
import time import time

View File

@ -1,4 +1,4 @@
#! /usr/bin/env python #!/usr/bin/env python
import sys import sys
import os import os

2
tools/parse_dht_stats.py Normal file → Executable file
View File

@ -1,4 +1,4 @@
#! /usr/bin/env python #!/usr/bin/env python
import sys import sys
import os import os

View File

@ -1,4 +1,4 @@
#! /usr/bin/env python #!/usr/bin/env python
import os, sys, time import os, sys, time

View File

@ -1,4 +1,4 @@
#! /usr/bin/env python #!/usr/bin/env python
import os, sys, time import os, sys, time
@ -65,7 +65,7 @@ for l in lines:
for i in keys: for i in keys:
print '%s: avg: %f' % (i, field_sum[i] / last_t) print '%s: avg: %f' % (i, field_sum[i] / last_t)
print print
out.close() out.close()
out = open('disk_buffer.gnuplot', 'wb') out = open('disk_buffer.gnuplot', 'wb')

View File

@ -1,4 +1,4 @@
#! /usr/bin/env python #!/usr/bin/env python
# Copyright Arvid Norberg 2008. Use, modification and distribution is # Copyright Arvid Norberg 2008. Use, modification and distribution is
# subject to the Boost Software License, Version 1.0. (See accompanying # subject to the Boost Software License, Version 1.0. (See accompanying
# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) # file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)

5
tools/parse_lookup_log.py Normal file → Executable file
View File

@ -1,3 +1,4 @@
#!/usr/bin/env python
# this is meant to parse the dht_lookups.log generated by parse_dht_log.py # this is meant to parse the dht_lookups.log generated by parse_dht_log.py
import os import os
@ -82,14 +83,14 @@ for l in f:
dst = '0.0.0.0' dst = '0.0.0.0'
if not dst in nodes: if not dst in nodes:
nodes[dst] = { 'conns': set(), 'p': p, 'c': 'blue', 's': 'circle'} nodes[dst] = { 'conns': set(), 'p': p, 'c': 'blue', 's': 'circle'}
p = calculate_pos(l[2], 25) p = calculate_pos(l[2], 25)
dst = '255.255.255.255' dst = '255.255.255.255'
if not dst in nodes: if not dst in nodes:
nodes[dst] = { 'conns': set(), 'p': p, 'c': 'yellow', 's': 'circle'} nodes[dst] = { 'conns': set(), 'p': p, 'c': 'yellow', 's': 'circle'}
elif kind == '->': elif kind == '->':
dst = l[3] dst = l[3]
if not dst in nodes: if not dst in nodes:
src = get_origin(dst) src = get_origin(dst)
p = calculate_pos(l[2], int(l[1])) p = calculate_pos(l[2], int(l[1]))

View File

@ -1,4 +1,4 @@
#! /usr/bin/env python #!/usr/bin/env python
import os, sys, time import os, sys, time
# usage: memory.log memory_index.log # usage: memory.log memory_index.log
@ -92,7 +92,7 @@ for l in lines:
print >>out print >>out
cur_line = [-1] * allocation_points_to_print cur_line = [-1] * allocation_points_to_print
last_time = time last_time = time
size = int(l[5]) size = int(l[5])
ap = int(l[0]) ap = int(l[0])
if ap in hot_ap: if ap in hot_ap:

1
tools/parse_peer_log.py Normal file → Executable file
View File

@ -1,4 +1,5 @@
#!/usr/bin/env python #!/usr/bin/env python
import glob import glob
import os import os
import sys import sys

4
tools/parse_sample.py Normal file → Executable file
View File

@ -1,3 +1,5 @@
#!/usr/bin/env python
import sys import sys
# to use this script, first run 'sample' to sample your libtorrent based process # to use this script, first run 'sample' to sample your libtorrent based process
@ -95,7 +97,7 @@ for l in f:
if 'BN_CTX_free' == fun: fold = indentation if 'BN_CTX_free' == fun: fold = indentation
if 'cerror' == fun: fold = indentation if 'cerror' == fun: fold = indentation
if '0xffffffff' == fun: fold = indentation if '0xffffffff' == fun: fold = indentation
list = [] list = []
for k in fun_samples: for k in fun_samples:
list.append((fun_samples[k], k)) list.append((fun_samples[k], k))

View File

@ -102,7 +102,7 @@ def gen_report(name, unit, lines, short_unit, generation, log_file, options):
return None return None
except: pass except: pass
script = os.path.join(output_dir, '%s_%04d.gnuplot' % (name, generation)) script = os.path.join(output_dir, '%s_%04d.gnuplot' % (name, generation))
out = open(script, 'wb') out = open(script, 'wb')
print >>out, "set term png size 1200,700" print >>out, "set term png size 1200,700"
@ -153,7 +153,7 @@ def gen_report(name, unit, lines, short_unit, generation, log_file, options):
print >>out, 'set ylabel "%s"' % unit print >>out, 'set ylabel "%s"' % unit
print >>out, 'set xlabel "time (s)"' print >>out, 'set xlabel "time (s)"'
print >>out, 'set format y "%%.1s%%c%s";' % short_unit print >>out, 'set format y "%%.1s%%c%s";' % short_unit
print >>out, 'set style fill solid 1.0 noborder' print >>out, 'set style fill solid 1.0 noborder'
print >>out, 'plot', print >>out, 'plot',
column = 2 column = 2
first = True first = True

View File

@ -1,4 +1,4 @@
#!/bin/python #!/usr/bin/env python
# Copyright (c) 2013, Arvid Norberg # Copyright (c) 2013, Arvid Norberg
# All rights reserved. # All rights reserved.
@ -122,7 +122,7 @@ def save_log_file(log_name, project_name, branch_name, test_name, timestamp, dat
sys.stdout.flush() sys.stdout.flush()
def parse_tests(rev_dir): def parse_tests(rev_dir):
# this contains mappings from platforms to # this contains mappings from platforms to
# the next layer of dictionaries. The next # the next layer of dictionaries. The next
# layer contains a mapping of toolsets to # layer contains a mapping of toolsets to
@ -134,7 +134,7 @@ def parse_tests(rev_dir):
# as whether it passed and the output from the # as whether it passed and the output from the
# command # command
# example: # example:
# { # {
# darwin: { # darwin: {
# clang-4.2.1: { # clang-4.2.1: {
@ -160,25 +160,25 @@ def parse_tests(rev_dir):
except Exception, e: except Exception, e:
print '\nFAILED TO LOAD "%s": %s\n' % (f, e) print '\nFAILED TO LOAD "%s": %s\n' % (f, e)
continue continue
platform = platform_toolset[0] platform = platform_toolset[0]
toolset = platform_toolset[1] toolset = platform_toolset[1]
for cfg in j: for cfg in j:
test_name = cfg.split('|')[0] test_name = cfg.split('|')[0]
features = cfg.split('|')[1] features = cfg.split('|')[1]
if not features in tests: if not features in tests:
tests[features] = set() tests[features] = set()
tests[features].add(test_name) tests[features].add(test_name)
if not platform in platforms: if not platform in platforms:
platforms[platform] = {} platforms[platform] = {}
if not toolset in platforms[platform]: if not toolset in platforms[platform]:
platforms[platform][toolset] = {} platforms[platform][toolset] = {}
if not features in platforms[platform][toolset]: if not features in platforms[platform][toolset]:
platforms[platform][toolset][features] = {} platforms[platform][toolset][features] = {}

8
tools/parse_utp_log.py Normal file → Executable file
View File

@ -1,3 +1,5 @@
#!/usr/bin/env python
import os, sys, time import os, sys, time
# usage: parse_log.py log-file [socket-index to focus on] # usage: parse_log.py log-file [socket-index to focus on]
@ -15,7 +17,7 @@ if socket_filter == None:
for l in file: for l in file:
if not 'our_delay' in l: continue if not 'our_delay' in l: continue
try: try:
a = l.strip().split(" ") a = l.strip().split(" ")
socket_index = a[1][:-1] socket_index = a[1][:-1]
@ -40,7 +42,7 @@ if socket_filter == None:
print '%s: %d' % (i[0], i[1]) print '%s: %d' % (i[0], i[1])
count += 1 count += 1
if count > 5: break if count > 5: break
file.close() file.close()
socket_filter = items[0][0] socket_filter = items[0][0]
print '\nfocusing on socket %s' % socket_filter print '\nfocusing on socket %s' % socket_filter
@ -119,7 +121,7 @@ for l in file:
continue continue
# if socket_index[:2] != '0x': # if socket_index[:2] != '0x':
# continue # continue
if socket_filter != None and socket_index != socket_filter: if socket_filter != None and socket_index != socket_filter:
continue continue

2
tools/run_benchmark.py Normal file → Executable file
View File

@ -1,3 +1,5 @@
#!/usr/bin/env python
import os import os
import time import time
import shutil import shutil

4
tools/run_regression_tests.py Normal file → Executable file
View File

@ -1,4 +1,4 @@
#!/bin/python #!/usr/bin/env python
# Copyright (c) 2013, Arvid Norberg # Copyright (c) 2013, Arvid Norberg
# All rights reserved. # All rights reserved.
@ -136,7 +136,7 @@ def loop():
r = revs[0] r = revs[0]
print '\n\nREVISION %d ===\n' % r print '\n\nREVISION %d ===\n' % r
svn_up(r) svn_up(r)
try: try:
run_tests.main(sys.argv[1:]) run_tests.main(sys.argv[1:])
last_rev = r; last_rev = r;

View File

@ -1,5 +1,4 @@
#!/usr/bin/env python
#!/bin/python
# Copyright (c) 2013, Arvid Norberg # Copyright (c) 2013, Arvid Norberg
# All rights reserved. # All rights reserved.
@ -89,10 +88,10 @@ def run_tests(toolset, tests, features, options, test_dir, time_limit):
try: try:
results = {} results = {}
feature_list = features.split(' ') feature_list = features.split(' ')
os.chdir(test_dir) os.chdir(test_dir)
c = 0 c = 0
for t in tests: for t in tests:
c = c + 1 c = c + 1
@ -118,19 +117,19 @@ def run_tests(toolset, tests, features, options, test_dir, time_limit):
sys.stdout.write('.') sys.stdout.write('.')
sys.stdout.flush() sys.stdout.flush()
p.wait() p.wait()
# parse out the toolset version from the xml file # parse out the toolset version from the xml file
compiler = '' compiler = ''
compiler_version = '' compiler_version = ''
command = '' command = ''
# make this parse the actual test to pick up the time # make this parse the actual test to pick up the time
# spent runnin the test # spent runnin the test
try: try:
dom = et.parse(xml_file) dom = et.parse(xml_file)
command = dom.find('./command').text command = dom.find('./command').text
prop = dom.findall('./action/properties/property') prop = dom.findall('./action/properties/property')
for a in prop: for a in prop:
name = a.attrib['name'] name = a.attrib['name']
@ -140,14 +139,14 @@ def run_tests(toolset, tests, features, options, test_dir, time_limit):
if name.startswith('toolset-') and name.endswith(':version'): if name.startswith('toolset-') and name.endswith(':version'):
compiler_version = a.text compiler_version = a.text
if compiler != '': break if compiler != '': break
if compiler != '' and compiler_version != '': if compiler != '' and compiler_version != '':
toolset = compiler + '-' + compiler_version toolset = compiler + '-' + compiler_version
except: pass except: pass
r = { 'status': p.returncode, 'output': output, 'command': command } r = { 'status': p.returncode, 'output': output, 'command': command }
results[t + '|' + features] = r results[t + '|' + features] = r
if p.returncode != 0: if p.returncode != 0:
# if the build or test failed, print out the # if the build or test failed, print out the
# important parts # important parts
@ -367,7 +366,7 @@ def main(argv):
print >>f, json.dumps(results) print >>f, json.dumps(results)
f.close() f.close()
finally: finally:
# always restore current directory # always restore current directory
try: try:

3
tools/set_version.py Normal file → Executable file
View File

@ -1,4 +1,5 @@
#! /usr/bin/env python #!/usr/bin/env python
import os import os
import sys import sys
import glob import glob

3
tools/update_copyright.py Normal file → Executable file
View File

@ -1,4 +1,5 @@
#! /usr/bin/env python #!/usr/bin/env python
import os import os
import sys import sys
import glob import glob