diff --git a/bindings/python/make_torrent.py b/bindings/python/make_torrent.py old mode 100644 new mode 100755 index 4b68ae81e..0bf96b241 --- a/bindings/python/make_torrent.py +++ b/bindings/python/make_torrent.py @@ -1,4 +1,4 @@ -#!/bin/python +#!/usr/bin/env python import sys import os diff --git a/bindings/python/rss_reader.py b/bindings/python/rss_reader.py old mode 100644 new mode 100755 index 17a2c59fe..a8e4b72ca --- a/bindings/python/rss_reader.py +++ b/bindings/python/rss_reader.py @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/env python import sys import libtorrent as lt diff --git a/docs/gen_reference_doc.py b/docs/gen_reference_doc.py old mode 100644 new mode 100755 index 6998271f9..d7004c02b --- a/docs/gen_reference_doc.py +++ b/docs/gen_reference_doc.py @@ -1,3 +1,5 @@ +#!/usr/bin/env python + import glob import os import sys @@ -371,7 +373,7 @@ def parse_class(lno, lines, filename): context = '' if verbose: print '?? %s' % l - + if len(name) > 0: print '\x1b[31mFAILED TO PARSE CLASS\x1b[0m %s\nfile: %s:%d' % (name, filename, lno) return [None, lno] @@ -416,7 +418,7 @@ def parse_enum(lno, lines, filename): start_brace += l.count('{') end_brace += l.count('}') - if '{' in l: + if '{' in l: l = l.split('{')[1] l = l.split('}')[0] @@ -1019,7 +1021,7 @@ for cat in categories: print >>out, '%s\n' % block.replace('\n', '\n\t') f['desc'] = linkify_symbols(f['desc']) print >>out, '%s' % f['desc'] - + print >>out, dump_link_targets() render_enums(out, c['enums'], False, '.') @@ -1060,7 +1062,7 @@ for cat in categories: print >>out, linkify_symbols(f['desc']) print >>out, dump_link_targets() - + render_enums(out, enums, True, '-') print >>out, dump_link_targets() @@ -1084,5 +1086,5 @@ for i,o in preprocess_rst.items(): out.close() f.close() - + diff --git a/docs/gen_settings_doc.py b/docs/gen_settings_doc.py old mode 100644 new mode 100755 index 17a18897c..6d0a3deb1 --- a/docs/gen_settings_doc.py +++ b/docs/gen_settings_doc.py @@ -1,3 +1,5 @@ +#!/usr/bin/env python + f = open('../include/libtorrent/settings_pack.hpp') out = open('settings.rst', 'w+') diff --git a/docs/gen_stats_doc.py b/docs/gen_stats_doc.py old mode 100644 new mode 100755 index 39f8611a7..664ecd781 --- a/docs/gen_stats_doc.py +++ b/docs/gen_stats_doc.py @@ -1,3 +1,5 @@ +#!/usr/bin/env python + counter_types = {} f = open('../include/libtorrent/performance_counters.hpp') @@ -28,7 +30,7 @@ for l in f: if '=' in l: l = l[:l.index('=')].strip() counter_types[l] = counter_type - + f.close() f = open('../src/session_stats.cpp') diff --git a/docs/gen_todo.py b/docs/gen_todo.py old mode 100644 new mode 100755 index 51660d3bc..c36fb5b27 --- a/docs/gen_todo.py +++ b/docs/gen_todo.py @@ -1,3 +1,5 @@ +#!/usr/bin/env python + import glob import os @@ -54,7 +56,7 @@ for f in files: prio = items[-1]['priority'] if prio >= 0 and prio <= 4: priority_count[prio] += 1 continue - + if state == '': context.append(html_sanitize(l)) if len(context) > 20: context.pop(0) diff --git a/examples/run_benchmarks.py b/examples/run_benchmarks.py old mode 100644 new mode 100755 index 65e972b69..8b8141b8e --- a/examples/run_benchmarks.py +++ b/examples/run_benchmarks.py @@ -1,3 +1,5 @@ +#!/usr/bin/env python + import sys import os import resource @@ -200,7 +202,7 @@ def prefix_len(text, prefix): def device_name(path): mount = subprocess.Popen('mount', stdout=subprocess.PIPE) - + max_match_len = 0 match_device = '' path = os.path.abspath(path) @@ -310,7 +312,7 @@ def run_test(config): tester_output = open('session_stats/tester.output', 'w+') tester = subprocess.Popen(shlex.split(cmdline), stdout=tester_output) print 'OK' - + time.sleep(2) print '\n' diff --git a/setup.py b/setup.py old mode 100644 new mode 100755 index 479375590..06f12e01a --- a/setup.py +++ b/setup.py @@ -1,3 +1,5 @@ +#!/usr/bin/env python + import os os.chdir('bindings/python') execfile('setup.py') diff --git a/test/http.py b/test/http.py old mode 100644 new mode 100755 diff --git a/test/socks.py b/test/socks.py old mode 100644 new mode 100755 index 2c6127300..2ce316d4d --- a/test/socks.py +++ b/test/socks.py @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/env python """Minimal non-feature complete socks proxy""" diff --git a/test/web_server.py b/test/web_server.py old mode 100644 new mode 100755 index 38e7579c9..fd1d76208 --- a/test/web_server.py +++ b/test/web_server.py @@ -1,3 +1,5 @@ +#!/usr/bin/env python + import BaseHTTPServer import SimpleHTTPServer import sys diff --git a/tools/clean.py b/tools/clean.py old mode 100644 new mode 100755 index 5a85831e0..7565978fc --- a/tools/clean.py +++ b/tools/clean.py @@ -1,3 +1,5 @@ +#!/usr/bin/env python + import os import shutil import glob @@ -31,7 +33,7 @@ def clean(): 'Jamfile.rej', 'Jamfile.orig', ] - + directories = [ 'examples', 'test', @@ -47,7 +49,7 @@ def clean(): os.path.join('bindings', 'c'), os.path.join('bindings', os.path.join('c', 'src')) ] - + for d in directories: for f in to_delete: path = os.path.join(d, f) @@ -62,7 +64,7 @@ def clean(): print p except Exception, e: print p, e - + if __name__ == "__main__": clean() - + diff --git a/tools/dht_flood.py b/tools/dht_flood.py index b9f6b98da..9e3336cd3 100755 --- a/tools/dht_flood.py +++ b/tools/dht_flood.py @@ -1,4 +1,4 @@ -#! /usr/bin/env python +#!/usr/bin/env python import socket import sys diff --git a/tools/gprof2dot.py b/tools/gprof2dot.py old mode 100644 new mode 100755 index e33bde44c..303495afd --- a/tools/gprof2dot.py +++ b/tools/gprof2dot.py @@ -98,7 +98,7 @@ def ratio(numerator, denominator): class UndefinedEvent(Exception): """Raised when attempting to get an event which is undefined.""" - + def __init__(self, event): Exception.__init__(self) self.event = event @@ -130,7 +130,7 @@ class Event(object): assert val1 is not None assert val2 is not None return self._aggregator(val1, val2) - + def format(self, val): """Format an event value.""" assert val is not None @@ -176,13 +176,13 @@ class Object(object): def __contains__(self, event): return event in self.events - + def __getitem__(self, event): try: return self.events[event] except KeyError: raise UndefinedEvent(event) - + def __setitem__(self, event, value): if value is None: if event in self.events: @@ -193,7 +193,7 @@ class Object(object): class Call(Object): """A call between functions. - + There should be at most one call object for every pair of functions. """ @@ -217,7 +217,7 @@ class Function(Object): self.called = None self.weight = None self.cycle = None - + def add_call(self, call): if call.callee_id in self.calls: sys.stderr.write('warning: overwriting call from function %s to %s\n' % (str(self.id), str(call.callee_id))) @@ -374,7 +374,7 @@ class Profile(Object): if self.functions[f].name == funcName: return f return False - + def _tarjan(self, function, order, stack, orders, lowlinks, visited): """Tarjan's strongly connected components algorithm. @@ -466,7 +466,7 @@ class Profile(Object): if call.callee_id != function.id: assert call.ratio is not None - # Aggregate the input for each cycle + # Aggregate the input for each cycle for cycle in self.cycles: total = inevent.null() for function in compat_itervalues(self.functions): @@ -491,7 +491,7 @@ class Profile(Object): total += self._integrate_call(call, outevent, inevent) function[outevent] = total return function[outevent] - + def _integrate_call(self, call, outevent, inevent): assert outevent not in call assert call.ratio is not None @@ -513,7 +513,7 @@ class Profile(Object): subtotal += self._integrate_call(call, outevent, inevent) total += subtotal cycle[outevent] = total - + # Compute the time propagated to callers of this cycle callees = {} for function in compat_itervalues(self.functions): @@ -525,7 +525,7 @@ class Profile(Object): callees[callee] += call.ratio except KeyError: callees[callee] = call.ratio - + for member in cycle.functions: member[outevent] = outevent.null() @@ -626,11 +626,11 @@ class Profile(Object): if TOTAL_TIME_RATIO in call: # handle exact cases first - call.weight = call[TOTAL_TIME_RATIO] + call.weight = call[TOTAL_TIME_RATIO] else: try: # make a safe estimate - call.weight = min(function[TOTAL_TIME_RATIO], callee[TOTAL_TIME_RATIO]) + call.weight = min(function[TOTAL_TIME_RATIO], callee[TOTAL_TIME_RATIO]) except UndefinedEvent: pass @@ -647,7 +647,7 @@ class Profile(Object): call = function.calls[callee_id] if callee_id not in self.functions or call.weight is not None and call.weight < edge_thres: del function.calls[callee_id] - + def dump(self): for function in compat_itervalues(self.functions): sys.stderr.write('Function %s:\n' % (function.name,)) @@ -674,7 +674,7 @@ class Struct: if attrs is None: attrs = {} self.__dict__['_attrs'] = attrs - + def __getattr__(self, name): try: return self._attrs[name] @@ -689,7 +689,7 @@ class Struct: def __repr__(self): return repr(self._attrs) - + class ParseError(Exception): """Raised when parsing to signal mismatches.""" @@ -715,7 +715,7 @@ class Parser: def parse(self): raise NotImplementedError - + class LineParser(Parser): """Base class for parsers that read line-based formats.""" @@ -790,21 +790,21 @@ class XmlTokenizer: self.index = 0 self.final = False self.skip_ws = skip_ws - + self.character_pos = 0, 0 self.character_data = '' - + self.parser = xml.parsers.expat.ParserCreate() self.parser.StartElementHandler = self.handle_element_start self.parser.EndElementHandler = self.handle_element_end self.parser.CharacterDataHandler = self.handle_character_data - + def handle_element_start(self, name, attributes): self.finish_character_data() line, column = self.pos() token = XmlToken(XML_ELEMENT_START, name, attributes, line, column) self.tokens.append(token) - + def handle_element_end(self, name): self.finish_character_data() line, column = self.pos() @@ -815,15 +815,15 @@ class XmlTokenizer: if not self.character_data: self.character_pos = self.pos() self.character_data += data - + def finish_character_data(self): if self.character_data: - if not self.skip_ws or not self.character_data.isspace(): + if not self.skip_ws or not self.character_data.isspace(): line, column = self.character_pos token = XmlToken(XML_CHARACTER_DATA, self.character_data, None, line, column) self.tokens.append(token) self.character_data = '' - + def next(self): size = 16*1024 while self.index >= len(self.tokens) and not self.final: @@ -868,13 +868,13 @@ class XmlParser(Parser): Parser.__init__(self) self.tokenizer = XmlTokenizer(fp) self.consume() - + def consume(self): self.token = self.tokenizer.next() def match_element_start(self, name): return self.token.type == XML_ELEMENT_START and self.token.name_or_data == name - + def match_element_end(self, name): return self.token.type == XML_ELEMENT_END and self.token.name_or_data == name @@ -888,7 +888,7 @@ class XmlParser(Parser): attrs = self.token.attrs self.consume() return attrs - + def element_end(self, name): while self.token.type == XML_CHARACTER_DATA: self.consume() @@ -966,20 +966,20 @@ class GprofParser(Parser): ) _cg_primary_re = re.compile( - r'^\[(?P\d+)\]?' + - r'\s+(?P\d+\.\d+)' + - r'\s+(?P\d+\.\d+)' + - r'\s+(?P\d+\.\d+)' + - r'\s+(?:(?P\d+)(?:\+(?P\d+))?)?' + + r'^\[(?P\d+)\]?' + + r'\s+(?P\d+\.\d+)' + + r'\s+(?P\d+\.\d+)' + + r'\s+(?P\d+\.\d+)' + + r'\s+(?:(?P\d+)(?:\+(?P\d+))?)?' + r'\s+(?P\S.*?)' + r'(?:\s+\d+)>)?' + r'\s\[(\d+)\]$' ) _cg_parent_re = re.compile( - r'^\s+(?P\d+\.\d+)?' + - r'\s+(?P\d+\.\d+)?' + - r'\s+(?P\d+)(?:/(?P\d+))?' + + r'^\s+(?P\d+\.\d+)?' + + r'\s+(?P\d+\.\d+)?' + + r'\s+(?P\d+)(?:/(?P\d+))?' + r'\s+(?P\S.*?)' + r'(?:\s+\d+)>)?' + r'\s\[(?P\d+)\]$' @@ -988,19 +988,19 @@ class GprofParser(Parser): _cg_child_re = _cg_parent_re _cg_cycle_header_re = re.compile( - r'^\[(?P\d+)\]?' + - r'\s+(?P\d+\.\d+)' + - r'\s+(?P\d+\.\d+)' + - r'\s+(?P\d+\.\d+)' + - r'\s+(?:(?P\d+)(?:\+(?P\d+))?)?' + + r'^\[(?P\d+)\]?' + + r'\s+(?P\d+\.\d+)' + + r'\s+(?P\d+\.\d+)' + + r'\s+(?P\d+\.\d+)' + + r'\s+(?:(?P\d+)(?:\+(?P\d+))?)?' + r'\s+\d+)\sas\sa\swhole>' + r'\s\[(\d+)\]$' ) _cg_cycle_member_re = re.compile( - r'^\s+(?P\d+\.\d+)?' + - r'\s+(?P\d+\.\d+)?' + - r'\s+(?P\d+)(?:\+(?P\d+))?' + + r'^\s+(?P\d+\.\d+)?' + + r'\s+(?P\d+\.\d+)?' + + r'\s+(?P\d+)(?:\+(?P\d+))?' + r'\s+(?P\S.*?)' + r'(?:\s+\d+)>)?' + r'\s\[(?P\d+)\]$' @@ -1018,7 +1018,7 @@ class GprofParser(Parser): line = lines.pop(0) if line.startswith('['): break - + # read function parent line mo = self._cg_parent_re.match(line) if not mo: @@ -1039,7 +1039,7 @@ class GprofParser(Parser): while lines: line = lines.pop(0) - + # read function subroutine line mo = self._cg_child_re.match(line) if not mo: @@ -1049,7 +1049,7 @@ class GprofParser(Parser): else: child = self.translate(mo) children.append(child) - + function.parents = parents function.children = children @@ -1074,7 +1074,7 @@ class GprofParser(Parser): continue call = self.translate(mo) cycle.functions.append(call) - + self.cycles[cycle.cycle] = cycle def parse_cg_entry(self, lines): @@ -1101,16 +1101,16 @@ class GprofParser(Parser): self.parse_cg_entry(entry_lines) entry_lines = [] else: - entry_lines.append(line) + entry_lines.append(line) line = self.readline() - + def parse(self): self.parse_cg() self.fp.close() profile = Profile() profile[TIME] = 0.0 - + cycles = {} for index in self.cycles: cycles[index] = Cycle() @@ -1125,16 +1125,16 @@ class GprofParser(Parser): call = Call(entry.index) call[CALLS] = entry.called_self function.called += entry.called_self - + # populate the function calls for child in entry.children: call = Call(child.index) - + assert child.called is not None call[CALLS] = child.called if child.index not in self.functions: - # NOTE: functions that were never called but were discovered by gprof's + # NOTE: functions that were never called but were discovered by gprof's # static call graph analysis dont have a call graph entry so we need # to add them here missing = Function(child.index, child.name) @@ -1150,7 +1150,7 @@ class GprofParser(Parser): try: cycle = cycles[entry.cycle] except KeyError: - sys.stderr.write('warning: entry missing\n' % entry.cycle) + sys.stderr.write('warning: entry missing\n' % entry.cycle) cycle = Cycle() cycles[entry.cycle] = cycle cycle.add_function(function) @@ -1230,18 +1230,18 @@ class AXEParser(Parser): _cg_footer_re = re.compile('^Index\s+Function\s*$') _cg_primary_re = re.compile( - r'^\[(?P\d+)\]?' + - r'\s+(?P\d+\.\d+)' + - r'\s+(?P\d+\.\d+)' + - r'\s+(?P\d+\.\d+)' + + r'^\[(?P\d+)\]?' + + r'\s+(?P\d+\.\d+)' + + r'\s+(?P\d+\.\d+)' + + r'\s+(?P\d+\.\d+)' + r'\s+(?P\S.*?)' + r'(?:\s+\d+)>)?' + r'\s+\[(\d+)\]$' ) _cg_parent_re = re.compile( - r'^\s+(?P\d+\.\d+)?' + - r'\s+(?P\d+\.\d+)?' + + r'^\s+(?P\d+\.\d+)?' + + r'\s+(?P\d+\.\d+)?' + r'\s+(?P\S.*?)' + r'(?:\s+\d+)>)?' + r'\s+\[(?P\d+)\]$' @@ -1250,17 +1250,17 @@ class AXEParser(Parser): _cg_child_re = _cg_parent_re _cg_cycle_header_re = re.compile( - r'^\[(?P\d+)\]?' + - r'\s+(?P\d+\.\d+)' + - r'\s+(?P\d+\.\d+)' + - r'\s+(?P\d+\.\d+)' + + r'^\[(?P\d+)\]?' + + r'\s+(?P\d+\.\d+)' + + r'\s+(?P\d+\.\d+)' + + r'\s+(?P\d+\.\d+)' + r'\s+\d+)\sas\sa\swhole>' + r'\s+\[(\d+)\]$' ) _cg_cycle_member_re = re.compile( - r'^\s+(?P\d+\.\d+)?' + - r'\s+(?P\d+\.\d+)?' + + r'^\s+(?P\d+\.\d+)?' + + r'\s+(?P\d+\.\d+)?' + r'\s+(?P\S.*?)' + r'(?:\s+\d+)>)?' + r'\s+\[(?P\d+)\]$' @@ -1277,7 +1277,7 @@ class AXEParser(Parser): line = lines.pop(0) if line.startswith('['): break - + # read function parent line mo = self._cg_parent_re.match(line) if not mo: @@ -1297,7 +1297,7 @@ class AXEParser(Parser): while lines: line = lines.pop(0) - + # read function subroutine line mo = self._cg_child_re.match(line) if not mo: @@ -1348,7 +1348,7 @@ class AXEParser(Parser): continue call = self.translate(mo) cycle.functions.append(call) - + cycle.parents = parents self.cycles[cycle.cycle] = cycle @@ -1374,7 +1374,7 @@ class AXEParser(Parser): self.parse_cg_entry(entry_lines) entry_lines = [] else: - entry_lines.append(line) + entry_lines.append(line) line = self.readline() def parse(self): @@ -1384,7 +1384,7 @@ class AXEParser(Parser): profile = Profile() profile[TIME] = 0.0 - + cycles = {} for index in self.cycles: cycles[index] = Cycle() @@ -1394,7 +1394,7 @@ class AXEParser(Parser): function = Function(entry.index, entry.name) function[TIME] = entry.self function[TOTAL_TIME_RATIO] = entry.percentage_time / 100.0 - + # populate the function calls for child in entry.children: call = Call(child.index) @@ -1403,7 +1403,7 @@ class AXEParser(Parser): call[TOTAL_TIME_RATIO] = function[TOTAL_TIME_RATIO] if child.index not in self.functions: - # NOTE: functions that were never called but were discovered by gprof's + # NOTE: functions that were never called but were discovered by gprof's # static call graph analysis dont have a call graph entry so we need # to add them here # FIXME: Is this applicable? @@ -1419,7 +1419,7 @@ class AXEParser(Parser): try: cycle = cycles[entry.cycle] except KeyError: - sys.stderr.write('warning: entry missing\n' % entry.cycle) + sys.stderr.write('warning: entry missing\n' % entry.cycle) cycle = Cycle() cycles[entry.cycle] = cycle cycle.add_function(function) @@ -1447,7 +1447,7 @@ class AXEParser(Parser): class CallgrindParser(LineParser): """Parser for valgrind's callgrind tool. - + See also: - http://valgrind.org/docs/manual/cl-format.html """ @@ -1559,7 +1559,7 @@ class CallgrindParser(LineParser): self.parse_association_spec() __subpos_re = r'(0x[0-9a-fA-F]+|\d+|\+\d+|-\d+|\*)' - _cost_re = re.compile(r'^' + + _cost_re = re.compile(r'^' + __subpos_re + r'( +' + __subpos_re + r')*' + r'( +\d+)*' + '$') @@ -1603,12 +1603,12 @@ class CallgrindParser(LineParser): events = [float(event) for event in events] if calls is None: - function[SAMPLES] += events[0] + function[SAMPLES] += events[0] self.profile[SAMPLES] += events[0] else: callee = self.get_callee() callee.called += calls - + try: call = function.calls[callee.id] except KeyError: @@ -1670,7 +1670,7 @@ class CallgrindParser(LineParser): def parse_position_spec(self): line = self.lookahead() - + if line.startswith('jump=') or line.startswith('jcnd='): self.consume() return True @@ -1755,14 +1755,14 @@ class CallgrindParser(LineParser): def get_function(self): module = self.positions.get('ob', '') - filename = self.positions.get('fl', '') - function = self.positions.get('fn', '') + filename = self.positions.get('fl', '') + function = self.positions.get('fn', '') return self.make_function(module, filename, function) def get_callee(self): module = self.positions.get('cob', '') - filename = self.positions.get('cfi', '') - function = self.positions.get('cfn', '') + filename = self.positions.get('cfi', '') + function = self.positions.get('cfn', '') return self.make_function(module, filename, function) @@ -1892,7 +1892,7 @@ class PerfParser(LineParser): class OprofileParser(LineParser): """Parser for oprofile callgraph output. - + See also: - http://oprofile.sourceforge.net/doc/opreport.html#opreport-callgraph """ @@ -1921,7 +1921,7 @@ class OprofileParser(LineParser): self.update_subentries_dict(callers_total, callers) function_total.samples += function.samples self.update_subentries_dict(callees_total, callees) - + def update_subentries_dict(self, totals, partials): for partial in compat_itervalues(partials): try: @@ -1930,7 +1930,7 @@ class OprofileParser(LineParser): totals[partial.id] = partial else: total.samples += partial.samples - + def parse(self): # read lookahead self.readline() @@ -1942,7 +1942,7 @@ class OprofileParser(LineParser): profile = Profile() reverse_call_samples = {} - + # populate the profile profile[SAMPLES] = 0 for _callers, _function, _callees in compat_itervalues(self.entries): @@ -1965,7 +1965,7 @@ class OprofileParser(LineParser): call = Call(_callee.id) call[SAMPLES2] = _callee.samples function.add_call(call) - + # compute derived data profile.validate() profile.find_cycles() @@ -2051,7 +2051,7 @@ class OprofileParser(LineParser): def match_primary(self): line = self.lookahead() return not line[:1].isspace() - + def match_secondary(self): line = self.lookahead() return line[:1].isspace() @@ -2059,7 +2059,7 @@ class OprofileParser(LineParser): class HProfParser(LineParser): """Parser for java hprof output - + See also: - http://java.sun.com/developer/technicalArticles/Programming/HPROF.html """ @@ -2220,7 +2220,7 @@ class SysprofParser(XmlParser): def build_profile(self, objects, nodes): profile = Profile() - + profile[SAMPLES] = 0 for id, object in compat_iteritems(objects): # Ignore fake objects (process names, modules, "Everything", "kernel", etc.) @@ -2289,7 +2289,7 @@ class XPerfParser(Parser): def parse(self): import csv reader = csv.reader( - self.stream, + self.stream, delimiter = ',', quotechar = None, escapechar = None, @@ -2304,7 +2304,7 @@ class XPerfParser(Parser): header = False else: self.parse_row(row) - + # compute derived data self.profile.validate() self.profile.find_cycles() @@ -2332,7 +2332,7 @@ class XPerfParser(Parser): else: break fields[name] = value - + process = fields['Process Name'] symbol = fields['Module'] + '!' + fields['Function'] weight = fields['Weight'] @@ -2403,12 +2403,12 @@ class SleepyParser(Parser): self.calls = {} self.profile = Profile() - + _symbol_re = re.compile( - r'^(?P\w+)' + - r'\s+"(?P[^"]*)"' + - r'\s+"(?P[^"]*)"' + - r'\s+"(?P[^"]*)"' + + r'^(?P\w+)' + + r'\s+"(?P[^"]*)"' + + r'\s+"(?P[^"]*)"' + + r'\s+"(?P[^"]*)"' + r'\s+(?P\d+)$' ) @@ -2428,7 +2428,7 @@ class SleepyParser(Parser): mo = self._symbol_re.match(line) if mo: symbol_id, module, procname, sourcefile, sourceline = mo.groups() - + function_id = ':'.join([module, procname]) try: @@ -2455,7 +2455,7 @@ class SleepyParser(Parser): callee[SAMPLES] += samples self.profile[SAMPLES] += samples - + for caller in callstack[1:]: try: call = caller.calls[callee.id] @@ -2523,7 +2523,7 @@ class AQtimeParser(XmlParser): self.parse_headers() results = self.parse_results() self.element_end('AQtime_Results') - return self.build_profile(results) + return self.build_profile(results) def parse_headers(self): self.element_start('HEADERS') @@ -2627,7 +2627,7 @@ class AQtimeParser(XmlParser): profile[TOTAL_TIME] = profile[TIME] profile.ratio(TOTAL_TIME_RATIO, TOTAL_TIME) return profile - + def build_function(self, fields): function = Function(self.build_id(fields), self.build_name(fields)) function[TIME] = fields['Time'] @@ -2733,7 +2733,7 @@ class PstatsParser: class Theme: - def __init__(self, + def __init__(self, bgcolor = (0.0, 0.0, 1.0), mincolor = (0.0, 0.0, 0.0), maxcolor = (0.0, 0.0, 1.0), @@ -2803,10 +2803,10 @@ class Theme: def color(self, weight): weight = min(max(weight, 0.0), 1.0) - + hmin, smin, lmin = self.mincolor hmax, smax, lmax = self.maxcolor - + if self.skew < 0: raise ValueError("Skew must be greater than 0") elif self.skew == 1.0: @@ -2973,10 +2973,10 @@ class DotWriter: weight = 0.0 label = '\n'.join(labels) - self.node(function.id, - label = label, - color = self.color(theme.node_bgcolor(weight)), - fontcolor = self.color(theme.node_fgcolor(weight)), + self.node(function.id, + label = label, + color = self.color(theme.node_bgcolor(weight)), + fontcolor = self.color(theme.node_fgcolor(weight)), fontsize = "%.2f" % theme.node_fontsize(weight), ) @@ -2998,13 +2998,13 @@ class DotWriter: label = '\n'.join(labels) - self.edge(function.id, call.callee_id, - label = label, - color = self.color(theme.edge_color(weight)), + self.edge(function.id, call.callee_id, + label = label, + color = self.color(theme.edge_color(weight)), fontcolor = self.color(theme.edge_color(weight)), - fontsize = "%.2f" % theme.edge_fontsize(weight), - penwidth = "%.2f" % theme.edge_penwidth(weight), - labeldistance = "%.2f" % theme.edge_penwidth(weight), + fontsize = "%.2f" % theme.edge_fontsize(weight), + penwidth = "%.2f" % theme.edge_penwidth(weight), + labeldistance = "%.2f" % theme.edge_penwidth(weight), arrowsize = "%.2f" % theme.edge_arrowsize(weight), ) @@ -3197,11 +3197,11 @@ class Main: self.theme = self.themes[self.options.theme] except KeyError: optparser.error('invalid colormap \'%s\'' % self.options.theme) - + # set skew on the theme now that it has been picked. if self.options.theme_skew: self.theme.skew = self.options.theme_skew - + totalMethod = self.options.totalMethod try: @@ -3225,7 +3225,7 @@ class Main: parser = Format(self.args[0]) self.profile = parser.parse() - + if self.options.output is None: self.output = sys.stdout else: @@ -3245,7 +3245,7 @@ class Main: profile = self.profile profile.prune(self.options.node_thres/100.0, self.options.edge_thres/100.0) - + if self.options.root: rootId = profile.getFunctionId(self.options.root) if not rootId: diff --git a/tools/parse_bandwidth_log.py b/tools/parse_bandwidth_log.py index e2403290e..2e144dea0 100755 --- a/tools/parse_bandwidth_log.py +++ b/tools/parse_bandwidth_log.py @@ -1,4 +1,4 @@ -#! /usr/bin/env python +#!/usr/bin/env python import os, sys, time keys = [['upload rate', 'x1y1', 6], ['history entries', 'x1y2', 10], ['queue', 'x1y2', 4]] diff --git a/tools/parse_buffer_log.py b/tools/parse_buffer_log.py index 451d873b3..9d3257d38 100755 --- a/tools/parse_buffer_log.py +++ b/tools/parse_buffer_log.py @@ -1,4 +1,4 @@ -#! /usr/bin/env python +#!/usr/bin/env python # Copyright Arvid Norberg 2008. Use, modification and distribution is # subject to the Boost Software License, Version 1.0. (See accompanying # file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) diff --git a/tools/parse_dht_log.py b/tools/parse_dht_log.py index a3124f81f..83717373d 100755 --- a/tools/parse_dht_log.py +++ b/tools/parse_dht_log.py @@ -1,4 +1,4 @@ -#! /usr/bin/env python +#!/usr/bin/env python import sys import os import time diff --git a/tools/parse_dht_rtt.py b/tools/parse_dht_rtt.py index 454f30ccd..0548f1152 100755 --- a/tools/parse_dht_rtt.py +++ b/tools/parse_dht_rtt.py @@ -1,4 +1,4 @@ -#! /usr/bin/env python +#!/usr/bin/env python import sys import os diff --git a/tools/parse_dht_stats.py b/tools/parse_dht_stats.py old mode 100644 new mode 100755 index 388350710..15edd0863 --- a/tools/parse_dht_stats.py +++ b/tools/parse_dht_stats.py @@ -1,4 +1,4 @@ -#! /usr/bin/env python +#!/usr/bin/env python import sys import os diff --git a/tools/parse_disk_access.py b/tools/parse_disk_access.py index f4e3155f4..e88946425 100755 --- a/tools/parse_disk_access.py +++ b/tools/parse_disk_access.py @@ -1,4 +1,4 @@ -#! /usr/bin/env python +#!/usr/bin/env python import os, sys, time diff --git a/tools/parse_disk_buffer_log.py b/tools/parse_disk_buffer_log.py index 509703fa1..e4640348f 100755 --- a/tools/parse_disk_buffer_log.py +++ b/tools/parse_disk_buffer_log.py @@ -1,4 +1,4 @@ -#! /usr/bin/env python +#!/usr/bin/env python import os, sys, time @@ -65,7 +65,7 @@ for l in lines: for i in keys: print '%s: avg: %f' % (i, field_sum[i] / last_t) print - + out.close() out = open('disk_buffer.gnuplot', 'wb') diff --git a/tools/parse_disk_log.py b/tools/parse_disk_log.py index 1dc5d5b53..f0987467c 100755 --- a/tools/parse_disk_log.py +++ b/tools/parse_disk_log.py @@ -1,4 +1,4 @@ -#! /usr/bin/env python +#!/usr/bin/env python # Copyright Arvid Norberg 2008. Use, modification and distribution is # subject to the Boost Software License, Version 1.0. (See accompanying # file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) diff --git a/tools/parse_lookup_log.py b/tools/parse_lookup_log.py old mode 100644 new mode 100755 index 4db5e1de5..ed44c928d --- a/tools/parse_lookup_log.py +++ b/tools/parse_lookup_log.py @@ -1,3 +1,4 @@ +#!/usr/bin/env python # this is meant to parse the dht_lookups.log generated by parse_dht_log.py import os @@ -82,14 +83,14 @@ for l in f: dst = '0.0.0.0' if not dst in nodes: nodes[dst] = { 'conns': set(), 'p': p, 'c': 'blue', 's': 'circle'} - + p = calculate_pos(l[2], 25) dst = '255.255.255.255' if not dst in nodes: nodes[dst] = { 'conns': set(), 'p': p, 'c': 'yellow', 's': 'circle'} elif kind == '->': dst = l[3] - + if not dst in nodes: src = get_origin(dst) p = calculate_pos(l[2], int(l[1])) diff --git a/tools/parse_memory_log.py b/tools/parse_memory_log.py index f7a619458..17749d49d 100755 --- a/tools/parse_memory_log.py +++ b/tools/parse_memory_log.py @@ -1,4 +1,4 @@ -#! /usr/bin/env python +#!/usr/bin/env python import os, sys, time # usage: memory.log memory_index.log @@ -92,7 +92,7 @@ for l in lines: print >>out cur_line = [-1] * allocation_points_to_print last_time = time - + size = int(l[5]) ap = int(l[0]) if ap in hot_ap: diff --git a/tools/parse_peer_log.py b/tools/parse_peer_log.py old mode 100644 new mode 100755 index c087fdc4a..45dc1a6e6 --- a/tools/parse_peer_log.py +++ b/tools/parse_peer_log.py @@ -1,4 +1,5 @@ #!/usr/bin/env python + import glob import os import sys diff --git a/tools/parse_sample.py b/tools/parse_sample.py old mode 100644 new mode 100755 index afb254a78..3961914f0 --- a/tools/parse_sample.py +++ b/tools/parse_sample.py @@ -1,3 +1,5 @@ +#!/usr/bin/env python + import sys # to use this script, first run 'sample' to sample your libtorrent based process @@ -95,7 +97,7 @@ for l in f: if 'BN_CTX_free' == fun: fold = indentation if 'cerror' == fun: fold = indentation if '0xffffffff' == fun: fold = indentation - + list = [] for k in fun_samples: list.append((fun_samples[k], k)) diff --git a/tools/parse_session_stats.py b/tools/parse_session_stats.py index 7f759e306..b07ca776f 100755 --- a/tools/parse_session_stats.py +++ b/tools/parse_session_stats.py @@ -102,7 +102,7 @@ def gen_report(name, unit, lines, short_unit, generation, log_file, options): return None except: pass - + script = os.path.join(output_dir, '%s_%04d.gnuplot' % (name, generation)) out = open(script, 'wb') print >>out, "set term png size 1200,700" @@ -153,7 +153,7 @@ def gen_report(name, unit, lines, short_unit, generation, log_file, options): print >>out, 'set ylabel "%s"' % unit print >>out, 'set xlabel "time (s)"' print >>out, 'set format y "%%.1s%%c%s";' % short_unit - print >>out, 'set style fill solid 1.0 noborder' + print >>out, 'set style fill solid 1.0 noborder' print >>out, 'plot', column = 2 first = True diff --git a/tools/parse_test_results.py b/tools/parse_test_results.py index 3cce5a07c..2d19880c3 100755 --- a/tools/parse_test_results.py +++ b/tools/parse_test_results.py @@ -1,4 +1,4 @@ -#!/bin/python +#!/usr/bin/env python # Copyright (c) 2013, Arvid Norberg # All rights reserved. @@ -122,7 +122,7 @@ def save_log_file(log_name, project_name, branch_name, test_name, timestamp, dat sys.stdout.flush() def parse_tests(rev_dir): - + # this contains mappings from platforms to # the next layer of dictionaries. The next # layer contains a mapping of toolsets to @@ -134,7 +134,7 @@ def parse_tests(rev_dir): # as whether it passed and the output from the # command # example: - + # { # darwin: { # clang-4.2.1: { @@ -160,25 +160,25 @@ def parse_tests(rev_dir): except Exception, e: print '\nFAILED TO LOAD "%s": %s\n' % (f, e) continue - + platform = platform_toolset[0] toolset = platform_toolset[1] - + for cfg in j: test_name = cfg.split('|')[0] features = cfg.split('|')[1] - + if not features in tests: tests[features] = set() - + tests[features].add(test_name) - + if not platform in platforms: platforms[platform] = {} - + if not toolset in platforms[platform]: platforms[platform][toolset] = {} - + if not features in platforms[platform][toolset]: platforms[platform][toolset][features] = {} diff --git a/tools/parse_utp_log.py b/tools/parse_utp_log.py old mode 100644 new mode 100755 index aaeaa222e..788ddc261 --- a/tools/parse_utp_log.py +++ b/tools/parse_utp_log.py @@ -1,3 +1,5 @@ +#!/usr/bin/env python + import os, sys, time # usage: parse_log.py log-file [socket-index to focus on] @@ -15,7 +17,7 @@ if socket_filter == None: for l in file: if not 'our_delay' in l: continue - + try: a = l.strip().split(" ") socket_index = a[1][:-1] @@ -40,7 +42,7 @@ if socket_filter == None: print '%s: %d' % (i[0], i[1]) count += 1 if count > 5: break - + file.close() socket_filter = items[0][0] print '\nfocusing on socket %s' % socket_filter @@ -119,7 +121,7 @@ for l in file: continue # if socket_index[:2] != '0x': # continue - + if socket_filter != None and socket_index != socket_filter: continue diff --git a/tools/run_benchmark.py b/tools/run_benchmark.py old mode 100644 new mode 100755 index 2e1bebce5..2b68e2109 --- a/tools/run_benchmark.py +++ b/tools/run_benchmark.py @@ -1,3 +1,5 @@ +#!/usr/bin/env python + import os import time import shutil diff --git a/tools/run_regression_tests.py b/tools/run_regression_tests.py old mode 100644 new mode 100755 index 98a1461ef..e3f0007f0 --- a/tools/run_regression_tests.py +++ b/tools/run_regression_tests.py @@ -1,4 +1,4 @@ -#!/bin/python +#!/usr/bin/env python # Copyright (c) 2013, Arvid Norberg # All rights reserved. @@ -136,7 +136,7 @@ def loop(): r = revs[0] print '\n\nREVISION %d ===\n' % r svn_up(r) - + try: run_tests.main(sys.argv[1:]) last_rev = r; diff --git a/tools/run_tests.py b/tools/run_tests.py index 6277c3c6a..e6eae4b6c 100755 --- a/tools/run_tests.py +++ b/tools/run_tests.py @@ -1,5 +1,4 @@ - -#!/bin/python +#!/usr/bin/env python # Copyright (c) 2013, Arvid Norberg # All rights reserved. @@ -89,10 +88,10 @@ def run_tests(toolset, tests, features, options, test_dir, time_limit): try: results = {} - + feature_list = features.split(' ') os.chdir(test_dir) - + c = 0 for t in tests: c = c + 1 @@ -118,19 +117,19 @@ def run_tests(toolset, tests, features, options, test_dir, time_limit): sys.stdout.write('.') sys.stdout.flush() p.wait() - + # parse out the toolset version from the xml file compiler = '' compiler_version = '' command = '' - + # make this parse the actual test to pick up the time # spent runnin the test try: dom = et.parse(xml_file) - + command = dom.find('./command').text - + prop = dom.findall('./action/properties/property') for a in prop: name = a.attrib['name'] @@ -140,14 +139,14 @@ def run_tests(toolset, tests, features, options, test_dir, time_limit): if name.startswith('toolset-') and name.endswith(':version'): compiler_version = a.text if compiler != '': break - + if compiler != '' and compiler_version != '': toolset = compiler + '-' + compiler_version except: pass - + r = { 'status': p.returncode, 'output': output, 'command': command } results[t + '|' + features] = r - + if p.returncode != 0: # if the build or test failed, print out the # important parts @@ -367,7 +366,7 @@ def main(argv): print >>f, json.dumps(results) f.close() - + finally: # always restore current directory try: diff --git a/tools/set_version.py b/tools/set_version.py old mode 100644 new mode 100755 index 1153fa6a1..88cc10a4d --- a/tools/set_version.py +++ b/tools/set_version.py @@ -1,4 +1,5 @@ -#! /usr/bin/env python +#!/usr/bin/env python + import os import sys import glob diff --git a/tools/update_copyright.py b/tools/update_copyright.py old mode 100644 new mode 100755 index 2a8bdea03..f3b7e3f93 --- a/tools/update_copyright.py +++ b/tools/update_copyright.py @@ -1,4 +1,5 @@ -#! /usr/bin/env python +#!/usr/bin/env python + import os import sys import glob