diff --git a/.travis.yml b/.travis.yml index 890b4b3ef..4073f92d9 100644 --- a/.travis.yml +++ b/.travis.yml @@ -5,7 +5,7 @@ matrix: fast_finish: true include: - if: repo = arvidn/libtorrent - env: variant=release sonar_scan=1 toolset=gcc + env: variant=release sonar_scan=1 toolset=gcc pylint=1 - env: variant=test_debug lint=1 tests=1 toolset=gcc-sanitizer - env: variant=test_debug sim=1 crypto=openssl toolset=gcc-sanitizer - env: variant=test_release coverage=1 tests=1 toolset=gcc-coverage python=1 @@ -46,6 +46,7 @@ addons: - python2.7-dev - g++-5 - [cmake3, ninja-build] + - python3-pip before_install: @@ -79,7 +80,6 @@ before_install: - ulimit -a install: - - touch ~/user-config.jam - 'if [[ $toolset == "gcc" ]]; then g++-5 --version; @@ -130,7 +130,12 @@ install: - 'echo "using python : 2.7 ;" >> ~/user-config.jam' - if [ "$docs" == "1" ]; then rst2html.py --version; fi - 'if [ "$lint" == "1" ]; then curl "https://raw.githubusercontent.com/google/styleguide/71ec7f1e524969c19ce33cfc72e8e023f2b98ee2/cpplint/cpplint.py" >~/cpplint.py; fi' - + - 'if [ "$pylint" == "1" ]; then + sudo pip install flake8; + flake8 --version; + sudo pip3 install flake8; + python3 -m flake8 --version; + fi' - 'if [ $sonar_scan == "1" ]; then wget https://sonarsource.bintray.com/Distribution/sonar-scanner-cli/sonar-scanner-2.6.1.zip; wget https://sonarqube.com/static/cpp/build-wrapper-linux-x86.zip; @@ -183,7 +188,10 @@ script: - 'if [ "$lint" == "1" ]; then python ~/cpplint.py --extensions=cpp --headers=hpp --filter=-,+runtime/explicit,+whitespace/end_of_line --linelength=90 test/*.{cpp,hpp} src/*.cpp include/libtorrent/*.hpp include/libtorrent/kademlia/*.hpp src/kademlia/*.cpp include/libtorrent/aux_/*.hpp include/libtorrent/extensions/*.hpp simulation/*.{cpp,hpp} tools/*.{cpp,hpp} examples/*.{cpp,hpp}; fi' - + - 'if [ "$pylint" == "1" ]; then + flake8 --max-line-length=120; + python3 -m flake8 --max-line-length=120; + fi' - 'if [ "$sonar_scan" == "1" ]; then build-wrapper-linux-x86-64 --out-dir bw-output bjam -a -j3 optimization=off crypto=$crypto deprecated-functions=off $toolset variant=$variant -l300 && sonar-scanner -D sonar.login=$SONAR_TOKEN; diff --git a/bindings/python/client.py b/bindings/python/client.py index 4613bb9c6..26f6d36f8 100755 --- a/bindings/python/client.py +++ b/bindings/python/client.py @@ -3,7 +3,7 @@ # Copyright Daniel Wallin 2006. Use, modification and distribution is # subject to the Boost Software License, Version 1.0. (See accompanying # file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) -from __future__ import print_function + import sys import atexit @@ -153,6 +153,7 @@ def print_download_queue(console, download_queue): write_line(console, out) + def add_torrent(ses, filename, options): atp = lt.add_torrent_params() if filename.startswith('magnet:'): @@ -160,8 +161,8 @@ def add_torrent(ses, filename, options): else: atp.ti = lt.torrent_info(filename) try: - at.resume_data = open(os.path.join(options.save_path, info.name() + '.fastresume'), 'rb').read() - except: + atp.resume_data = open(os.path.join(options.save_path, atp.info.name() + '.fastresume'), 'rb').read() + except BaseException: pass atp.save_path = options.save_path @@ -171,6 +172,7 @@ def add_torrent(ses, filename, options): | lt.torrent_flags.duplicate_is_error ses.async_add_torrent(atp) + def main(): from optparse import OptionParser @@ -225,12 +227,13 @@ def main(): if options.max_download_rate <= 0: options.max_download_rate = -1 - settings = { 'user_agent': 'python_client/' + lt.__version__, + settings = { + 'user_agent': 'python_client/' + lt.__version__, 'listen_interfaces': '%s:%d' % (options.listen_interface, options.port), 'download_rate_limit': int(options.max_download_rate), 'upload_rate_limit': int(options.max_upload_rate), 'alert_mask': lt.alert.category_t.all_categories, - 'outgoing_interfaces' : options.outgoing_interface + 'outgoing_interfaces': options.outgoing_interface, } if options.proxy_host != '': @@ -258,16 +261,16 @@ def main(): out = '' - for h,t in torrents.items(): + for h, t in torrents.items(): out += 'name: %-40s\n' % t.name[:40] if t.state != lt.torrent_status.seeding: - state_str = ['queued', 'checking', 'downloading metadata', \ - 'downloading', 'finished', 'seeding', \ + state_str = ['queued', 'checking', 'downloading metadata', + 'downloading', 'finished', 'seeding', 'allocating', 'checking fastresume'] out += state_str[t.state] + ' ' - out += '%5.4f%% ' % (t.progress*100) + out += '%5.4f%% ' % (t.progress * 100) out += progress_bar(t.progress, 49) out += '\n' @@ -300,7 +303,7 @@ def main(): out += progress_bar(p / float(f.size), 20) out += ' ' + f.path + '\n' write_line(console, out) - except: + except BaseException: pass write_line(console, 76 * '-' + '\n') @@ -312,7 +315,7 @@ def main(): alerts_log.append(a.message()) # add new torrents to our list of torrent_status - if type(a) == lt.add_torrent_alert: + if isinstance(a, lt.add_torrent_alert): h = a.handle h.set_max_connections(60) h.set_max_uploads(-1) @@ -320,7 +323,7 @@ def main(): # update our torrent_status array for torrents that have # changed some of their state - if type(a) == lt.state_update_alert: + if isinstance(a, lt.state_update_alert): for s in a.status: torrents[s.handle] = s @@ -333,19 +336,23 @@ def main(): c = console.sleep_and_input(0.5) ses.post_torrent_updates() - if not c: continue + if not c: + continue if c == 'r': - for h in torrents.keys(): h.force_reannounce() + for h in torrents: + h.force_reannounce() elif c == 'q': alive = False elif c == 'p': - for h in torrents.keys(): h.pause() + for h in torrents: + h.pause() elif c == 'u': - for h in torrents.keys(): h.resume() + for h in torrents: + h.resume() ses.pause() - for h,t in torrents.items(): + for h, t in torrents.items(): if not h.is_valid() or not t.has_metadata: continue h.save_resume_data() @@ -353,7 +360,7 @@ def main(): while len(torrents) > 0: alerts = ses.pop_alerts() for a in alerts: - if type(a) == lt.save_resume_data_alert: + if isinstance(a, lt.save_resume_data_alert): print(a) data = lt.write_resume_data_buf(a.params) h = a.handle @@ -361,11 +368,12 @@ def main(): open(os.path.join(options.save_path, torrents[h].name + '.fastresume'), 'wb').write(data) del torrents[h] - if type(a) == lt.save_resume_data_failed_alert: + if isinstance(a, lt.save_resume_data_failed_alert): h = a.handle if h in torrents: print('failed to save resume data for ', torrents[h].name) del torrents[h] time.sleep(0.5) + main() diff --git a/bindings/python/make_torrent.py b/bindings/python/make_torrent.py index e1cb6e99e..a5c9137c3 100755 --- a/bindings/python/make_torrent.py +++ b/bindings/python/make_torrent.py @@ -1,5 +1,5 @@ #!/usr/bin/env python -from __future__ import print_function + import sys import os @@ -22,8 +22,8 @@ parent_input = os.path.split(input)[0] # if we have a single file, use it because os.walk does not work on a single files if os.path.isfile(input): - size = os.path.getsize(input) - fs.add_file(input, size) + size = os.path.getsize(input) + fs.add_file(input, size) for root, dirs, files in os.walk(input): # skip directories starting with . @@ -39,7 +39,7 @@ for root, dirs, files in os.walk(input): if f == 'Thumbs.db': continue - fname = os.path.join(root[len(parent_input)+1:], f) + fname = os.path.join(root[len(parent_input) + 1:], f) size = os.path.getsize(os.path.join(parent_input, fname)) print('%10d kiB %s' % (size / 1024, fname)) fs.add_file(fname, size) diff --git a/bindings/python/setup.py b/bindings/python/setup.py index 5a0816800..aadf4cd46 100644 --- a/bindings/python/setup.py +++ b/bindings/python/setup.py @@ -1,5 +1,5 @@ #!/usr/bin/env python -from __future__ import print_function + from distutils.core import setup, Extension from distutils.sysconfig import get_config_vars @@ -55,13 +55,13 @@ def target_specific(): try: with open('compile_flags') as _file: extra_cmd = _file.read() -except: +except BaseException: extra_cmd = None try: with open('link_flags') as _file: ldflags = _file.read() -except: +except BaseException: ldflags = None # this is to pull out compiler arguments from the CXX flags set up by the @@ -75,7 +75,7 @@ try: while len(cmd) > 0 and not cmd[0].startswith('-'): cmd = cmd[1:] extra_cmd += ' '.join(cmd) -except: +except BaseException: pass ext = None @@ -85,7 +85,7 @@ if '--bjam' in sys.argv: del sys.argv[sys.argv.index('--bjam')] if '--help' not in sys.argv \ - and '--help-commands' not in sys.argv: + and '--help-commands' not in sys.argv: toolset = '' file_ext = '.so' @@ -129,14 +129,22 @@ if '--bjam' in sys.argv: print('build failed') sys.exit(1) - try: os.mkdir('build') - except: pass - try: shutil.rmtree('build/lib') - except: pass - try: os.mkdir('build/lib') - except: pass - try: os.mkdir('libtorrent') - except: pass + try: + os.mkdir('build') + except BaseException: + pass + try: + shutil.rmtree('build/lib') + except BaseException: + pass + try: + os.mkdir('build/lib') + except BaseException: + pass + try: + os.mkdir('libtorrent') + except BaseException: + pass shutil.copyfile('libtorrent' + file_ext, 'build/lib/libtorrent' + file_ext) @@ -145,13 +153,12 @@ if '--bjam' in sys.argv: else: # Remove '-Wstrict-prototypes' compiler option, which isn't valid for C++. cfg_vars = get_config_vars() - for key, value in cfg_vars.items(): + for key, value in list(cfg_vars.items()): if isinstance(value, str): cfg_vars[key] = value.replace('-Wstrict-prototypes', '') - source_list = os.listdir(os.path.join(os.path.dirname(__file__), "src")) - source_list = [os.path.abspath(os.path.join(os.path.dirname(__file__), - "src", s)) for s in source_list if s.endswith(".cpp")] + src_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "src")) + source_list = [os.path.join(src_dir, s) for s in os.listdir(src_dir) if s.endswith(".cpp")] if extra_cmd: flags = flags_parser() diff --git a/bindings/python/simple_client.py b/bindings/python/simple_client.py index 9b0854d19..fb6fdea48 100755 --- a/bindings/python/simple_client.py +++ b/bindings/python/simple_client.py @@ -8,7 +8,7 @@ import libtorrent as lt import time import sys -ses = lt.session({'listen_interfaces':'0.0.0.0:6881'}) +ses = lt.session({'listen_interfaces': '0.0.0.0:6881'}) info = lt.torrent_info(sys.argv[1]) h = ses.add_torrent({'ti': info, 'save_path': '.'}) diff --git a/bindings/python/test.py b/bindings/python/test.py index 2ce13d55b..1487f48e1 100644 --- a/bindings/python/test.py +++ b/bindings/python/test.py @@ -1,7 +1,6 @@ #!/usr/bin/env python # vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4 -from __future__ import print_function import libtorrent as lt @@ -13,7 +12,6 @@ import shutil import binascii import subprocess as sub import sys -import inspect import pickle import threading @@ -29,6 +27,7 @@ settings = { 'enable_dht': False, 'enable_lsd': False, 'enable_natpmp': False, 'enable_upnp': False, 'listen_interfaces': '0.0.0.0:0', 'file_pool_size': 1} + class test_create_torrent(unittest.TestCase): def test_from_torrent_info(self): @@ -47,10 +46,10 @@ class test_create_torrent(unittest.TestCase): class test_session_stats(unittest.TestCase): def test_unique(self): - l = lt.session_stats_metrics() - self.assertTrue(len(l) > 40) + metrics = lt.session_stats_metrics() + self.assertTrue(len(metrics) > 40) idx = set() - for m in l: + for m in metrics: self.assertTrue(m.value_index not in idx) idx.add(m.value_index) @@ -118,7 +117,6 @@ class test_torrent_handle(unittest.TestCase): self.assertEqual(len(torrents), 1) self.assertEqual(torrents[self.h], 'bar') - def test_replace_trackers(self): self.setup() trackers = [] @@ -141,20 +139,18 @@ class test_torrent_handle(unittest.TestCase): tracker.fail_limit = 1 trackers = [tracker] self.h.replace_trackers(trackers) - tracker_list = [tracker for tracker in self.h.trackers()] # wait a bit until the endpoints list gets populated - while len(tracker_list[0]['endpoints']) == 0: + while len(self.h.trackers()[0]['endpoints']) == 0: time.sleep(0.1) - tracker_list = [tracker for tracker in self.h.trackers()] - pickled_trackers = pickle.dumps(tracker_list) + pickled_trackers = pickle.dumps(self.h.trackers()) unpickled_trackers = pickle.loads(pickled_trackers) self.assertEqual(unpickled_trackers[0]['url'], 'udp://tracker1.com') self.assertEqual(unpickled_trackers[0]['endpoints'][0]['last_error']['value'], 0) def test_file_status(self): self.setup() - l = self.h.file_status() - print(l) + status = self.h.file_status() + print(status) def test_piece_deadlines(self): self.setup() @@ -165,11 +161,10 @@ class test_torrent_handle(unittest.TestCase): # time, wait for next full second to prevent second increment time.sleep(1 - datetime.datetime.now().microsecond / 1000000.0) - sessionStart = datetime.datetime.now().replace(microsecond=0) self.setup() st = self.h.status() for attr in dir(st): - print('%s: %s' % (attr, getattr(st, attr))) + print('%s: %s' % (attr, getattr(st, attr))) # last upload and download times are at session start time self.assertEqual(st.last_upload, None) self.assertEqual(st.last_download, None) @@ -177,7 +172,7 @@ class test_torrent_handle(unittest.TestCase): def test_serialize_trackers(self): """Test to ensure the dict contains only python built-in types""" self.setup() - self.h.add_tracker({'url':'udp://tracker1.com'}) + self.h.add_tracker({'url': 'udp://tracker1.com'}) tr = self.h.trackers()[0] # wait a bit until the endpoints list gets populated while len(tr['endpoints']) == 0: @@ -215,7 +210,7 @@ class test_torrent_handle(unittest.TestCase): ses = lt.session(settings) h = ses.add_torrent(tp) for attr in dir(tp): - print('%s: %s' % (attr, getattr(tp, attr))) + print('%s: %s' % (attr, getattr(tp, attr))) h.connect_peer(('3.3.3.3', 3)) @@ -246,33 +241,34 @@ class test_torrent_handle(unittest.TestCase): def test_torrent_parameter(self): self.ses = lt.session(settings) - self.ti = lt.torrent_info('url_seed_multi.torrent'); + self.ti = lt.torrent_info('url_seed_multi.torrent') self.h = self.ses.add_torrent({ 'ti': self.ti, 'save_path': os.getcwd(), 'trackers': ['http://test.com/announce'], 'dht_nodes': [('1.2.3.4', 6881), ('4.3.2.1', 6881)], - 'file_priorities': [1,1], + 'file_priorities': [1, 1], 'http_seeds': ['http://test.com/file3'], 'url_seeds': ['http://test.com/announce-url'], 'peers': [('5.6.7.8', 6881)], 'banned_peers': [('8.7.6.5', 6881)], - 'renamed_files': { 0: 'test.txt', 2: 'test.txt' } - }) + 'renamed_files': {0: 'test.txt', 2: 'test.txt'} + }) self.st = self.h.status() self.assertEqual(self.st.save_path, os.getcwd()) - trackers = self.h.trackers(); + trackers = self.h.trackers() self.assertEqual(len(trackers), 1) self.assertEqual(trackers[0].get('url'), 'http://test.com/announce') self.assertEqual(trackers[0].get('tier'), 0) - self.assertEqual(self.h.get_file_priorities(), [1,1]) - self.assertEqual(self.h.http_seeds(),['http://test.com/file3']) + self.assertEqual(self.h.get_file_priorities(), [1, 1]) + self.assertEqual(self.h.http_seeds(), ['http://test.com/file3']) # url_seeds was already set, test that it did not got overwritten self.assertEqual(self.h.url_seeds(), - ['http://test.com/announce-url/', 'http://test.com/file/']) - self.assertEqual(self.h.get_piece_priorities(),[4]) - self.assertEqual(self.ti.merkle_tree(),[]) - self.assertEqual(self.st.verified_pieces,[]) + ['http://test.com/announce-url/', 'http://test.com/file/']) + self.assertEqual(self.h.get_piece_priorities(), [4]) + self.assertEqual(self.ti.merkle_tree(), []) + self.assertEqual(self.st.verified_pieces, []) + class test_torrent_info(unittest.TestCase): @@ -328,13 +324,13 @@ class test_torrent_info(unittest.TestCase): os.path.join('temp', 'foo')) idx += 1 - def test_announce_entry(self): ae = lt.announce_entry('test') - self.assertEquals(ae.url, 'test') - self.assertEquals(ae.tier, 0) - self.assertEquals(ae.verified, False) - self.assertEquals(ae.source, 0) + self.assertEqual(ae.url, 'test') + self.assertEqual(ae.tier, 0) + self.assertEqual(ae.verified, False) + self.assertEqual(ae.source, 0) + class test_alerts(unittest.TestCase): @@ -350,7 +346,7 @@ class test_alerts(unittest.TestCase): alerts = ses.pop_alerts() for a in alerts: if a.what() == 'add_torrent_alert': - self.assertEquals(a.torrent_name, 'temp') + self.assertEqual(a.torrent_name, 'temp') print(a.message()) for field_name in dir(a): if field_name.startswith('__'): @@ -429,10 +425,11 @@ class test_bencoder(unittest.TestCase): class test_sha1hash(unittest.TestCase): def test_sha1hash(self): - h = 'a0'*20 + h = 'a0' * 20 s = lt.sha1_hash(binascii.unhexlify(h)) self.assertEqual(h, str(s)) + class test_magnet_link(unittest.TestCase): def test_parse_magnet_uri(self): @@ -453,70 +450,72 @@ class test_magnet_link(unittest.TestCase): h = ses.add_torrent(p) self.assertEqual(str(h.info_hash()), '178882f042c0c33426a6d81e0333ece346e68a68') + class test_peer_class(unittest.TestCase): - def test_peer_class_ids(self): - s = lt.session(settings) + def test_peer_class_ids(self): + s = lt.session(settings) - print('global_peer_class_id:', lt.session.global_peer_class_id) - print('tcp_peer_class_id:', lt.session.tcp_peer_class_id) - print('local_peer_class_id:', lt.session.local_peer_class_id) + print('global_peer_class_id:', lt.session.global_peer_class_id) + print('tcp_peer_class_id:', lt.session.tcp_peer_class_id) + print('local_peer_class_id:', lt.session.local_peer_class_id) - print('global: ', s.get_peer_class(s.global_peer_class_id)) - print('tcp: ', s.get_peer_class(s.local_peer_class_id)) - print('local: ', s.get_peer_class(s.local_peer_class_id)) + print('global: ', s.get_peer_class(s.global_peer_class_id)) + print('tcp: ', s.get_peer_class(s.local_peer_class_id)) + print('local: ', s.get_peer_class(s.local_peer_class_id)) - def test_peer_class(self): - s = lt.session(settings) + def test_peer_class(self): + s = lt.session(settings) - c = s.create_peer_class('test class') - print('new class: ', s.get_peer_class(c)) + c = s.create_peer_class('test class') + print('new class: ', s.get_peer_class(c)) - nfo = s.get_peer_class(c) - self.assertEqual(nfo['download_limit'], 0) - self.assertEqual(nfo['upload_limit'], 0) - self.assertEqual(nfo['ignore_unchoke_slots'], False) - self.assertEqual(nfo['connection_limit_factor'], 100) - self.assertEqual(nfo['download_priority'], 1) - self.assertEqual(nfo['upload_priority'], 1) - self.assertEqual(nfo['label'], 'test class') + nfo = s.get_peer_class(c) + self.assertEqual(nfo['download_limit'], 0) + self.assertEqual(nfo['upload_limit'], 0) + self.assertEqual(nfo['ignore_unchoke_slots'], False) + self.assertEqual(nfo['connection_limit_factor'], 100) + self.assertEqual(nfo['download_priority'], 1) + self.assertEqual(nfo['upload_priority'], 1) + self.assertEqual(nfo['label'], 'test class') - nfo['download_limit'] = 1337 - nfo['upload_limit'] = 1338 - nfo['ignore_unchoke_slots'] = True - nfo['connection_limit_factor'] = 42 - nfo['download_priority'] = 2 - nfo['upload_priority'] = 3 + nfo['download_limit'] = 1337 + nfo['upload_limit'] = 1338 + nfo['ignore_unchoke_slots'] = True + nfo['connection_limit_factor'] = 42 + nfo['download_priority'] = 2 + nfo['upload_priority'] = 3 - s.set_peer_class(c, nfo) + s.set_peer_class(c, nfo) - nfo2 = s.get_peer_class(c) - self.assertEqual(nfo, nfo2) + nfo2 = s.get_peer_class(c) + self.assertEqual(nfo, nfo2) - def test_peer_class_filter(self): - filt = lt.peer_class_type_filter() - filt.add(lt.peer_class_type_filter.tcp_socket, lt.session.global_peer_class_id); - filt.remove(lt.peer_class_type_filter.utp_socket, lt.session.local_peer_class_id); + def test_peer_class_filter(self): + filt = lt.peer_class_type_filter() + filt.add(lt.peer_class_type_filter.tcp_socket, lt.session.global_peer_class_id) + filt.remove(lt.peer_class_type_filter.utp_socket, lt.session.local_peer_class_id) - filt.disallow(lt.peer_class_type_filter.tcp_socket, lt.session.global_peer_class_id); - filt.allow(lt.peer_class_type_filter.utp_socket, lt.session.local_peer_class_id); + filt.disallow(lt.peer_class_type_filter.tcp_socket, lt.session.global_peer_class_id) + filt.allow(lt.peer_class_type_filter.utp_socket, lt.session.local_peer_class_id) + + def test_peer_class_ip_filter(self): + s = lt.session(settings) + s.set_peer_class_type_filter(lt.peer_class_type_filter()) + s.set_peer_class_filter(lt.ip_filter()) - def test_peer_class_ip_filter(self): - s = lt.session(settings) - s.set_peer_class_type_filter(lt.peer_class_type_filter()) - s.set_peer_class_filter(lt.ip_filter()) class test_session(unittest.TestCase): def test_add_torrent(self): s = lt.session(settings) - h = s.add_torrent({'ti': lt.torrent_info('base.torrent'), - 'save_path': '.', - 'dht_nodes': [('1.2.3.4', 6881), ('4.3.2.1', 6881)], - 'http_seeds': ['http://test.com/seed'], - 'peers': [('5.6.7.8', 6881)], - 'banned_peers': [('8.7.6.5', 6881)], - 'file_priorities': [1,1,1,2,0]}) + s.add_torrent({'ti': lt.torrent_info('base.torrent'), + 'save_path': '.', + 'dht_nodes': [('1.2.3.4', 6881), ('4.3.2.1', 6881)], + 'http_seeds': ['http://test.com/seed'], + 'peers': [('5.6.7.8', 6881)], + 'banned_peers': [('8.7.6.5', 6881)], + 'file_priorities': [1, 1, 1, 2, 0]}) def test_apply_settings(self): @@ -565,10 +564,9 @@ class test_session(unittest.TestCase): self.assertTrue(isinstance(a.active_requests, list)) self.assertTrue(isinstance(a.routing_table, list)) - def test_unknown_settings(self): try: - s = lt.session({'unexpected-key-name': 42}) + lt.session({'unexpected-key-name': 42}) self.assertFalse('should have thrown an exception') except KeyError as e: print(e) @@ -606,52 +604,52 @@ class test_example_client(unittest.TestCase): my_stdin = slave_fd process = sub.Popen( - [sys.executable,"client.py","url_seed_multi.torrent"], + [sys.executable, "client.py", "url_seed_multi.torrent"], stdin=my_stdin, stdout=sub.PIPE, stderr=sub.PIPE) # python2 has no Popen.wait() timeout time.sleep(5) returncode = process.poll() - if returncode == None: + if returncode is None: # this is an expected use-case process.kill() err = process.stderr.read().decode("utf-8") self.assertEqual('', err, 'process throw errors: \n' + err) # check error code if process did unexpected end - if returncode != None: + if returncode is not None: # in case of error return: output stdout if nothing was on stderr if returncode != 0: print("stdout:\n" + process.stdout.read().decode("utf-8")) self.assertEqual(returncode, 0, "returncode: " + str(returncode) + "\n" - + "stderr: empty\n" - + "some configuration does not output errors like missing module members," - + "try to call it manually to get the error message\n") + + "stderr: empty\n" + + "some configuration does not output errors like missing module members," + + "try to call it manually to get the error message\n") def test_execute_simple_client(self): process = sub.Popen( - [sys.executable,"simple_client.py","url_seed_multi.torrent"], + [sys.executable, "simple_client.py", "url_seed_multi.torrent"], stdout=sub.PIPE, stderr=sub.PIPE) # python2 has no Popen.wait() timeout time.sleep(5) returncode = process.poll() - if returncode == None: + if returncode is None: # this is an expected use-case process.kill() err = process.stderr.read().decode("utf-8") self.assertEqual('', err, 'process throw errors: \n' + err) # check error code if process did unexpected end - if returncode != None: + if returncode is not None: # in case of error return: output stdout if nothing was on stderr if returncode != 0: print("stdout:\n" + process.stdout.read().decode("utf-8")) self.assertEqual(returncode, 0, "returncode: " + str(returncode) + "\n" - + "stderr: empty\n" - + "some configuration does not output errors like missing module members," - + "try to call it manually to get the error message\n") + + "stderr: empty\n" + + "some configuration does not output errors like missing module members," + + "try to call it manually to get the error message\n") def test_execute_make_torrent(self): process = sub.Popen( - [sys.executable,"make_torrent.py","url_seed_multi.torrent", - "http://test.com/test"], stdout=sub.PIPE, stderr=sub.PIPE) + [sys.executable, "make_torrent.py", "url_seed_multi.torrent", + "http://test.com/test"], stdout=sub.PIPE, stderr=sub.PIPE) returncode = process.wait() # python2 has no Popen.wait() timeout err = process.stderr.read().decode("utf-8") @@ -660,15 +658,16 @@ class test_example_client(unittest.TestCase): if returncode != 0: print("stdout:\n" + process.stdout.read().decode("utf-8")) self.assertEqual(returncode, 0, "returncode: " + str(returncode) + "\n" - + "stderr: empty\n" - + "some configuration does not output errors like missing module members," - + "try to call it manually to get the error message\n") + + "stderr: empty\n" + + "some configuration does not output errors like missing module members," + + "try to call it manually to get the error message\n") def test_default_settings(self): default = lt.default_settings() print(default) + class test_operation_t(unittest.TestCase): def test_enum(self): @@ -678,6 +677,7 @@ class test_operation_t(unittest.TestCase): self.assertEqual(lt.operation_name(lt.operation_t.partfile_write), "partfile_write") self.assertEqual(lt.operation_name(lt.operation_t.hostname_lookup), "hostname_lookup") + if __name__ == '__main__': print(lt.__version__) shutil.copy(os.path.join('..', '..', 'test', 'test_torrents', diff --git a/docs/gen_reference_doc.py b/docs/gen_reference_doc.py index db83ea8f7..84665c0b8 100644 --- a/docs/gen_reference_doc.py +++ b/docs/gen_reference_doc.py @@ -1,4 +1,5 @@ #!/usr/bin/env python +from __future__ import print_function import glob import os @@ -9,18 +10,18 @@ dump = '--dump' in sys.argv internal = '--internal' in sys.argv plain_output = '--plain-output' in sys.argv if plain_output: - plain_file = open('plain_text_out.txt', 'w+') + plain_file = open('plain_text_out.txt', 'w+') in_code = None paths = ['include/libtorrent/*.hpp', 'include/libtorrent/kademlia/*.hpp', 'include/libtorrent/extensions/*.hpp'] if internal: - paths.append('include/libtorrent/aux_/*.hpp') + paths.append('include/libtorrent/aux_/*.hpp') files = [] for p in paths: - files.extend(glob.glob(os.path.join('..', p))) + files.extend(glob.glob(os.path.join('..', p))) functions = [] classes = [] @@ -35,690 +36,784 @@ symbols = {} # some files that need pre-processing to turn symbols into # links into the reference documentation preprocess_rst = \ -{ - 'manual.rst':'manual-ref.rst', - 'upgrade_to_1.2.rst':'upgrade_to_1.2-ref.rst', - 'settings.rst':'settings-ref.rst' -} + { + 'manual.rst': 'manual-ref.rst', + 'upgrade_to_1.2.rst': 'upgrade_to_1.2-ref.rst', + 'settings.rst': 'settings-ref.rst' + } # some pre-defined sections from the main manual symbols = \ -{ - "queuing_": "manual-ref.html#queuing", - "fast-resume_": "manual-ref.html#fast-resume", - "storage-allocation_": "manual-ref.html#storage-allocation", - "alerts_": "manual-ref.html#alerts", - "upnp-and-nat-pmp_": "manual-ref.html#upnp-and-nat-pmp", - "http-seeding_": "manual-ref.html#http-seeding", - "metadata-from-peers_": "manual-ref.html#metadata-from-peers", - "magnet-links_": "manual-ref.html#magnet-links", - "ssl-torrents_": "manual-ref.html#ssl-torrents", - "dynamic-loading-of-torrent-files_": "manual-ref.html#dynamic-loading-of-torrent-files", - "session-statistics_": "manual-ref.html#session-statistics", - "peer-classes_": "manual-ref.html#peer-classes" -} + { + "queuing_": "manual-ref.html#queuing", + "fast-resume_": "manual-ref.html#fast-resume", + "storage-allocation_": "manual-ref.html#storage-allocation", + "alerts_": "manual-ref.html#alerts", + "upnp-and-nat-pmp_": "manual-ref.html#upnp-and-nat-pmp", + "http-seeding_": "manual-ref.html#http-seeding", + "metadata-from-peers_": "manual-ref.html#metadata-from-peers", + "magnet-links_": "manual-ref.html#magnet-links", + "ssl-torrents_": "manual-ref.html#ssl-torrents", + "dynamic-loading-of-torrent-files_": "manual-ref.html#dynamic-loading-of-torrent-files", + "session-statistics_": "manual-ref.html#session-statistics", + "peer-classes_": "manual-ref.html#peer-classes" + } static_links = \ -{ - ".. _`BEP 3`: https://bittorrent.org/beps/bep_0003.html", - ".. _`BEP 17`: https://bittorrent.org/beps/bep_0017.html", - ".. _`BEP 19`: https://bittorrent.org/beps/bep_0019.html" -} + { + ".. _`BEP 3`: https://bittorrent.org/beps/bep_0003.html", + ".. _`BEP 17`: https://bittorrent.org/beps/bep_0017.html", + ".. _`BEP 19`: https://bittorrent.org/beps/bep_0019.html" + } anon_index = 0 category_mapping = { - 'ed25519.hpp': 'ed25519', - 'session.hpp': 'Core', - 'add_torrent_params.hpp': 'Core', - 'session_status.hpp': 'Core', - 'error_code.hpp': 'Error Codes', - 'storage.hpp': 'Custom Storage', - 'storage_defs.hpp': 'Storage', - 'file_storage.hpp': 'Storage', - 'file_pool.hpp': 'Custom Storage', - 'extensions.hpp': 'Plugins', - 'ut_metadata.hpp': 'Plugins', - 'ut_pex.hpp': 'Plugins', - 'ut_trackers.hpp': 'Plugins', - 'smart_ban.hpp': 'Plugins', - 'create_torrent.hpp': 'Create Torrents', - 'alert.hpp': 'Alerts', - 'alert_types.hpp': 'Alerts', - 'bencode.hpp': 'Bencoding', - 'lazy_entry.hpp': 'Bencoding', - 'bdecode.hpp': 'Bdecoding', - 'entry.hpp': 'Bencoding', - 'time.hpp': 'Time', - 'escape_string.hpp': 'Utility', - 'enum_net.hpp': 'Network', - 'broadcast_socket.hpp': 'Network', - 'socket.hpp': 'Network', - 'socket_io.hpp': 'Network', - 'bitfield.hpp': 'Utility', - 'sha1_hash.hpp': 'Utility', - 'hasher.hpp': 'Utility', - 'hasher512.hpp': 'Utility', - 'identify_client.hpp': 'Utility', - 'ip_filter.hpp': 'Filter', - 'session_settings.hpp': 'Settings', - 'settings_pack.hpp': 'Settings', - 'operations.hpp': 'Alerts', - 'disk_buffer_holder.hpp': 'Custom Storage', - 'alert_dispatcher.hpp': 'Alerts', + 'ed25519.hpp': 'ed25519', + 'session.hpp': 'Core', + 'add_torrent_params.hpp': 'Core', + 'session_status.hpp': 'Core', + 'error_code.hpp': 'Error Codes', + 'storage.hpp': 'Custom Storage', + 'storage_defs.hpp': 'Storage', + 'file_storage.hpp': 'Storage', + 'file_pool.hpp': 'Custom Storage', + 'extensions.hpp': 'Plugins', + 'ut_metadata.hpp': 'Plugins', + 'ut_pex.hpp': 'Plugins', + 'ut_trackers.hpp': 'Plugins', + 'smart_ban.hpp': 'Plugins', + 'create_torrent.hpp': 'Create Torrents', + 'alert.hpp': 'Alerts', + 'alert_types.hpp': 'Alerts', + 'bencode.hpp': 'Bencoding', + 'lazy_entry.hpp': 'Bencoding', + 'bdecode.hpp': 'Bdecoding', + 'entry.hpp': 'Bencoding', + 'time.hpp': 'Time', + 'escape_string.hpp': 'Utility', + 'enum_net.hpp': 'Network', + 'broadcast_socket.hpp': 'Network', + 'socket.hpp': 'Network', + 'socket_io.hpp': 'Network', + 'bitfield.hpp': 'Utility', + 'sha1_hash.hpp': 'Utility', + 'hasher.hpp': 'Utility', + 'hasher512.hpp': 'Utility', + 'identify_client.hpp': 'Utility', + 'ip_filter.hpp': 'Filter', + 'session_settings.hpp': 'Settings', + 'settings_pack.hpp': 'Settings', + 'operations.hpp': 'Alerts', + 'disk_buffer_holder.hpp': 'Custom Storage', + 'alert_dispatcher.hpp': 'Alerts', } category_fun_mapping = { - 'min_memory_usage()': 'Settings', - 'high_performance_seed()': 'Settings', - 'cache_status': 'Core', + 'min_memory_usage()': 'Settings', + 'high_performance_seed()': 'Settings', + 'cache_status': 'Core', } + def categorize_symbol(name, filename): - f = os.path.split(filename)[1] + f = os.path.split(filename)[1] - if name.endswith('_category()') \ - or name.endswith('_error_code') \ - or name.endswith('error_code_enum'): - return 'Error Codes' + if name.endswith('_category()') \ + or name.endswith('_error_code') \ + or name.endswith('error_code_enum'): + return 'Error Codes' - if name in category_fun_mapping: - return category_fun_mapping[name] + if name in category_fun_mapping: + return category_fun_mapping[name] - if f in category_mapping: - return category_mapping[f] + if f in category_mapping: + return category_mapping[f] + + return 'Core' - return 'Core' def suppress_warning(filename, name): - f = os.path.split(filename)[1] - if f != 'alert_types.hpp': return False + f = os.path.split(filename)[1] + if f != 'alert_types.hpp': + return False -# if name.endswith('_alert') or name == 'message()': - return True + # if name.endswith('_alert') or name == 'message()': + return True + + # return False -# return False def first_item(itr): - for i in itr: - return i - return None + for i in itr: + return i + return None + def is_visible(desc): - if desc.strip().startswith('hidden'): return False - if internal: return True - if desc.strip().startswith('internal'): return False - return True + if desc.strip().startswith('hidden'): + return False + if internal: + return True + if desc.strip().startswith('internal'): + return False + return True + def highlight_signature(s): - name = s.split('(', 1) - name2 = name[0].split(' ') - if len(name2[-1]) == 0: return s + name = s.split('(', 1) + name2 = name[0].split(' ') + if len(name2[-1]) == 0: + return s - # make the name of the function bold - name2[-1] = '**' + name2[-1] + '** ' + # make the name of the function bold + name2[-1] = '**' + name2[-1] + '** ' - # if there is a return value, make sure we preserve pointer types - if len(name2) > 1: - name2[0] = name2[0].replace('*', '\\*') - name[0] = ' '.join(name2) + # if there is a return value, make sure we preserve pointer types + if len(name2) > 1: + name2[0] = name2[0].replace('*', '\\*') + name[0] = ' '.join(name2) - # we have to escape asterisks, since this is rendered into - # a parsed literal in rst - name[1] = name[1].replace('*', '\\*') + # we have to escape asterisks, since this is rendered into + # a parsed literal in rst + name[1] = name[1].replace('*', '\\*') - # we also have to escape colons - name[1] = name[1].replace(':', '\\:') + # we also have to escape colons + name[1] = name[1].replace(':', '\\:') - # escape trailing underscores - name[1] = name[1].replace('_', '\\_') + # escape trailing underscores + name[1] = name[1].replace('_', '\\_') + + # comments in signatures are italic + name[1] = name[1].replace('/\\*', '*/\\*') + name[1] = name[1].replace('\\*/', '\\*/*') + return '('.join(name) - # comments in signatures are italic - name[1] = name[1].replace('/\\*', '*/\\*') - name[1] = name[1].replace('\\*/', '\\*/*') - return '('.join(name) def html_sanitize(s): - ret = '' - for i in s: - if i == '<': ret += '<' - elif i == '>': ret += '>' - elif i == '&': ret += '&' - else: ret += i - return ret + ret = '' + for i in s: + if i == '<': + ret += '<' + elif i == '>': + ret += '>' + elif i == '&': + ret += '&' + else: + ret += i + return ret + def looks_like_namespace(line): - line = line.strip() - if line.startswith('namespace'): return True - return False + line = line.strip() + if line.startswith('namespace'): + return True + return False + def looks_like_blank(line): - line = line.split('//')[0] - line = line.replace('{', '') - line = line.replace('}', '') - line = line.replace('[', '') - line = line.replace(']', '') - line = line.replace(';', '') - line = line.strip() - return len(line) == 0 + line = line.split('//')[0] + line = line.replace('{', '') + line = line.replace('}', '') + line = line.replace('[', '') + line = line.replace(']', '') + line = line.replace(';', '') + line = line.strip() + return len(line) == 0 + def looks_like_variable(line): - line = line.split('//')[0] - line = line.strip() - if not ' ' in line and not '\t' in line: return False - if line.startswith('friend '): return False - if line.startswith('enum '): return False - if line.startswith(','): return False - if line.startswith(':'): return False - if line.startswith('typedef'): return False - if line.startswith('using'): return False - if ' = ' in line: return True - if line.endswith(';'): return True - return False + line = line.split('//')[0] + line = line.strip() + if ' ' not in line and '\t' not in line: + return False + if line.startswith('friend '): + return False + if line.startswith('enum '): + return False + if line.startswith(','): + return False + if line.startswith(':'): + return False + if line.startswith('typedef'): + return False + if line.startswith('using'): + return False + if ' = ' in line: + return True + if line.endswith(';'): + return True + return False + def looks_like_forward_decl(line): - line = line.split('//')[0] - line = line.strip() - if not line.endswith(';'): return False - if '{' in line: return False - if '}' in line: return False - if line.startswith('friend '): return True - if line.startswith('struct '): return True - if line.startswith('class '): return True - return False + line = line.split('//')[0] + line = line.strip() + if not line.endswith(';'): + return False + if '{' in line: + return False + if '}' in line: + return False + if line.startswith('friend '): + return True + if line.startswith('struct '): + return True + if line.startswith('class '): + return True + return False + def looks_like_function(line): - if line.startswith('friend'): return False - if '::' in line.split('(')[0].split(' ')[-1]: return False - if line.startswith(','): return False - if line.startswith(':'): return False - return '(' in line; + if line.startswith('friend'): + return False + if '::' in line.split('(')[0].split(' ')[-1]: + return False + if line.startswith(','): + return False + if line.startswith(':'): + return False + return '(' in line + def parse_function(lno, lines, filename): - current_fun = {} - start_paren = 0 - end_paren = 0 - signature = '' + start_paren = 0 + end_paren = 0 + signature = '' - while lno < len(lines): - l = lines[lno].strip() - lno += 1 - if l.startswith('//'): continue + while lno < len(lines): + line = lines[lno].strip() + lno += 1 + if line.startswith('//'): + continue - start_paren += l.count('(') - end_paren += l.count(')') + start_paren += line.count('(') + end_paren += line.count(')') - sig_line = l.replace('TORRENT_EXPORT ', '').replace('TORRENT_EXTRA_EXPORT','').strip() - if signature != '': sig_line = '\n ' + sig_line - signature += sig_line - if verbose: print 'fun %s' % l + sig_line = line.replace('TORRENT_EXPORT ', '').replace('TORRENT_EXTRA_EXPORT', '').strip() + if signature != '': + sig_line = '\n ' + sig_line + signature += sig_line + if verbose: + print('fun %s' % line) - if start_paren > 0 and start_paren == end_paren: - if signature[-1] != ';': - # we also need to consume the function body - start_paren = 0 - end_paren = 0 - for i in range(len(signature)): - if signature[i] == '(': start_paren += 1 - elif signature[i] == ')': end_paren += 1 + if start_paren > 0 and start_paren == end_paren: + if signature[-1] != ';': + # we also need to consume the function body + start_paren = 0 + end_paren = 0 + for i in range(len(signature)): + if signature[i] == '(': + start_paren += 1 + elif signature[i] == ')': + end_paren += 1 - if start_paren > 0 and start_paren == end_paren: - for k in range(i, len(signature)): - if signature[k] == ':' or signature[k] == '{': - signature = signature[0:k].strip() - break - break + if start_paren > 0 and start_paren == end_paren: + for k in range(i, len(signature)): + if signature[k] == ':' or signature[k] == '{': + signature = signature[0:k].strip() + break + break + + lno = consume_block(lno - 1, lines) + signature += ';' + ret = [{'file': filename[11:], 'signatures': set([signature]), 'names': set( + [signature.split('(')[0].split(' ')[-1].strip() + '()'])}, lno] + if first_item(ret[0]['names']) == '()': + return [None, lno] + return ret + if len(signature) > 0: + print('\x1b[31mFAILED TO PARSE FUNCTION\x1b[0m %s\nline: %d\nfile: %s' % (signature, lno, filename)) + return [None, lno] - lno = consume_block(lno - 1, lines) - signature += ';' - ret = [{ 'file': filename[11:], 'signatures': set([ signature ]), 'names': set([ signature.split('(')[0].split(' ')[-1].strip() + '()'])}, lno] - if first_item(ret[0]['names']) == '()': return [None, lno] - return ret - if len(signature) > 0: - print '\x1b[31mFAILED TO PARSE FUNCTION\x1b[0m %s\nline: %d\nfile: %s' % (signature, lno, filename) - return [None, lno] def parse_class(lno, lines, filename): - start_brace = 0 - end_brace = 0 + start_brace = 0 + end_brace = 0 - name = '' - funs = [] - fields = [] - enums = [] - state = 'public' - context = '' - class_type = 'struct' - blanks = 0 - decl = '' + name = '' + funs = [] + fields = [] + enums = [] + state = 'public' + context = '' + class_type = 'struct' + blanks = 0 + decl = '' - while lno < len(lines): - l = lines[lno].strip() - decl += lines[lno].replace('TORRENT_EXPORT ', '').replace('TORRENT_EXTRA_EXPORT', '').split('{')[0].strip() - if '{' in l: break - if verbose: print 'class %s' % l - lno += 1 + while lno < len(lines): + line = lines[lno].strip() + decl += lines[lno].replace('TORRENT_EXPORT ', '').replace('TORRENT_EXTRA_EXPORT', '').split('{')[0].strip() + if '{' in line: + break + if verbose: + print('class %s' % line) + lno += 1 - if decl.startswith('class'): - state = 'private' - class_type = 'class' + if decl.startswith('class'): + state = 'private' + class_type = 'class' - name = decl.split(':')[0].replace('class ', '').replace('struct ', '').replace('final', '').strip() + name = decl.split(':')[0].replace('class ', '').replace('struct ', '').replace('final', '').strip() + while lno < len(lines): + line = lines[lno].strip() + lno += 1 - while lno < len(lines): - l = lines[lno].strip() - lno += 1 + if line == '': + blanks += 1 + context = '' + continue - if l == '': - blanks += 1 - context = '' - continue + if line.startswith('/*'): + lno = consume_comment(lno - 1, lines) + continue - if l.startswith('/*'): - lno = consume_comment(lno - 1, lines) - continue + if line.startswith('#'): + lno = consume_ifdef(lno - 1, lines, True) + continue - if l.startswith('#'): - lno = consume_ifdef(lno - 1, lines, True) - continue + if 'TORRENT_DEFINE_ALERT' in line: + if verbose: + print('xx %s' % line) + blanks += 1 + continue + if 'TORRENT_DEPRECATED' in line: + if verbose: + print('xx %s' % line) + blanks += 1 + continue - if 'TORRENT_DEFINE_ALERT' in l: - if verbose: print 'xx %s' % l - blanks += 1 - continue - if 'TORRENT_DEPRECATED' in l: - if verbose: print 'xx %s' % l - blanks += 1 - continue + if line.startswith('//'): + if verbose: + print('desc %s' % line) - if l.startswith('//'): - if verbose: print 'desc %s' % l + # plain output prints just descriptions and filters out c++ code. + # it's used to run spell checker over + if plain_output: + s = line.split('//')[1] + # if the first character is a space, strip it + if len(s) > 0 and s[0] == ' ': + s = s[1:] + global in_code + if in_code is not None and not s.startswith(in_code) and len(s) > 1: + in_code = None - # plain output prints just descriptions and filters out c++ code. - # it's used to run spell checker over - if plain_output: - line = l.split('//')[1] - # if the first character is a space, strip it - if len(line) > 0 and line[0] == ' ': line = line[1:] - global in_code - if in_code != None and not line.startswith(in_code) and len(line) > 1: - in_code = None + if s.strip().startswith('.. code::'): + in_code = s.split('.. code::')[0] + '\t' - if line.strip().startswith('.. code::'): - in_code = line.split('.. code::')[0] + '\t' + # strip out C++ code from the plain text output since it's meant for + # running spell checking over + if not s.strip().startswith('.. ') and in_code is None: + plain_file.write(s + '\n') + line = line[2:] + if len(line) and line[0] == ' ': + line = line[1:] + context += line + '\n' + continue - # strip out C++ code from the plain text output since it's meant for - # running spell checking over - if not line.strip().startswith('.. ') and in_code == None: - plain_file.write(line + '\n') - l = l[2:] - if len(l) and l[0] == ' ': l = l[1:] - context += l + '\n' - continue + start_brace += line.count('{') + end_brace += line.count('}') - start_brace += l.count('{') - end_brace += l.count('}') + if line == 'private:': + state = 'private' + elif line == 'protected:': + state = 'protected' + elif line == 'public:': + state = 'public' - if l == 'private:': state = 'private' - elif l == 'protected:': state = 'protected' - elif l == 'public:': state = 'public' + if start_brace > 0 and start_brace == end_brace: + return [{'file': filename[11:], 'enums': enums, 'fields':fields, + 'type': class_type, 'name': name, 'decl': decl, 'fun': funs}, lno] - if start_brace > 0 and start_brace == end_brace: - return [{ 'file': filename[11:], 'enums': enums, 'fields':fields, 'type': class_type, 'name': name, 'decl': decl, 'fun': funs}, lno] + if state != 'public' and not internal: + if verbose: + print('private %s' % line) + blanks += 1 + continue - if state != 'public' and not internal: - if verbose: print 'private %s' % l - blanks += 1 - continue + if start_brace - end_brace > 1: + if verbose: + print('scope %s' % line) + blanks += 1 + continue - if start_brace - end_brace > 1: - if verbose: print 'scope %s' % l - blanks += 1 - continue; + if looks_like_function(line): + current_fun, lno = parse_function(lno - 1, lines, filename) + if current_fun is not None and is_visible(context): + if context == '' and blanks == 0 and len(funs): + funs[-1]['signatures'].update(current_fun['signatures']) + funs[-1]['names'].update(current_fun['names']) + else: + current_fun['desc'] = context + if context == '' and not suppress_warning(filename, first_item(current_fun['names'])): + print('WARNING: member function "%s" is not documented: \x1b[34m%s:%d\x1b[0m' + % (name + '::' + first_item(current_fun['names']), filename, lno)) + funs.append(current_fun) + context = '' + blanks = 0 + continue - if looks_like_function(l): - current_fun, lno = parse_function(lno - 1, lines, filename) - if current_fun != None and is_visible(context): - if context == '' and blanks == 0 and len(funs): - funs[-1]['signatures'].update(current_fun['signatures']) - funs[-1]['names'].update(current_fun['names']) - else: - current_fun['desc'] = context - if context == '' and not suppress_warning(filename, first_item(current_fun['names'])): - print 'WARNING: member function "%s" is not documented: \x1b[34m%s:%d\x1b[0m' \ - % (name + '::' + first_item(current_fun['names']), filename, lno) - funs.append(current_fun) - context = '' - blanks = 0 - continue + if looks_like_variable(line): + if verbose: + print('var %s' % line) + if not is_visible(context): + continue + line = line.split('//')[0].strip() + # the name may look like this: + # std::uint8_t fails : 7; + # int scrape_downloaded = -1; + # static constexpr peer_flags_t interesting{0x1}; + n = line.split('=')[0].split('{')[0].strip().split(' : ')[0].split(' ')[-1].split(':')[0].split(';')[0] + if context == '' and blanks == 0 and len(fields): + fields[-1]['names'].append(n) + fields[-1]['signatures'].append(line) + else: + if context == '' and not suppress_warning(filename, n): + print('WARNING: field "%s" is not documented: \x1b[34m%s:%d\x1b[0m' + % (name + '::' + n, filename, lno)) + fields.append({'signatures': [line], 'names': [n], 'desc': context}) + context = '' + blanks = 0 + continue - if looks_like_variable(l): - if verbose: print 'var %s' % l - if not is_visible(context): - continue - l = l.split('//')[0].strip() - # the name may look like this: - # std::uint8_t fails : 7; - # int scrape_downloaded = -1; - # static constexpr peer_flags_t interesting{0x1}; - n = l.split('=')[0].split('{')[0].strip().split(' : ')[0].split(' ')[-1].split(':')[0].split(';')[0] - if context == '' and blanks == 0 and len(fields): - fields[-1]['names'].append(n) - fields[-1]['signatures'].append(l) - else: - if context == '' and not suppress_warning(filename, n): - print 'WARNING: field "%s" is not documented: \x1b[34m%s:%d\x1b[0m' \ - % (name + '::' + n, filename, lno) - fields.append({'signatures': [l], 'names': [n], 'desc': context}) - context = '' - blanks = 0 - continue + if line.startswith('enum '): + if verbose: + print('enum %s' % line) + if not is_visible(context): + consume_block(lno - 1, lines) + else: + enum, lno = parse_enum(lno - 1, lines, filename) + if enum is not None: + enum['desc'] = context + if context == '' and not suppress_warning(filename, enum['name']): + print('WARNING: enum "%s" is not documented: \x1b[34m%s:%d\x1b[0m' + % (name + '::' + enum['name'], filename, lno)) + enums.append(enum) + context = '' + continue - if l.startswith('enum '): - if verbose: print 'enum %s' % l - if not is_visible(context): - consume_block(lno - 1, lines) - else: - enum, lno = parse_enum(lno - 1, lines, filename) - if enum != None: - enum['desc'] = context - if context == '' and not suppress_warning(filename, enum['name']): - print 'WARNING: enum "%s" is not documented: \x1b[34m%s:%d\x1b[0m' \ - % (name + '::' + enum['name'], filename, lno) - enums.append(enum) - context = '' - continue + context = '' - context = '' + if verbose: + if looks_like_forward_decl(line) \ + or looks_like_blank(line) \ + or looks_like_namespace(line): + print('-- %s' % line) + else: + print('?? %s' % line) - if verbose: - if looks_like_forward_decl(l) \ - or looks_like_blank(l) \ - or looks_like_namespace(l): - print '-- %s' % l - else: - print '?? %s' % l + if len(name) > 0: + print('\x1b[31mFAILED TO PARSE CLASS\x1b[0m %s\nfile: %s:%d' % (name, filename, lno)) + return [None, lno] - if len(name) > 0: - print '\x1b[31mFAILED TO PARSE CLASS\x1b[0m %s\nfile: %s:%d' % (name, filename, lno) - return [None, lno] def parse_enum(lno, lines, filename): - start_brace = 0 - end_brace = 0 - global anon_index + start_brace = 0 + end_brace = 0 + global anon_index - l = lines[lno].strip() - name = l.replace('enum ', '').split('{')[0].strip() - if len(name) == 0: - if not internal: - print 'WARNING: anonymous enum at: \x1b[34m%s:%d\x1b[0m' % (filename, lno) - lno = consume_block(lno - 1, lines) - return [None, lno] - name = 'anonymous_enum_%d' % anon_index - anon_index += 1 + line = lines[lno].strip() + name = line.replace('enum ', '').split('{')[0].strip() + if len(name) == 0: + if not internal: + print('WARNING: anonymous enum at: \x1b[34m%s:%d\x1b[0m' % (filename, lno)) + lno = consume_block(lno - 1, lines) + return [None, lno] + name = 'anonymous_enum_%d' % anon_index + anon_index += 1 - values = [] - context = '' - if not '{' in l: - if verbose: print 'enum %s' % lines[lno] - lno += 1 + values = [] + context = '' + if '{' not in line: + if verbose: + print('enum %s' % lines[lno]) + lno += 1 - val = 0 - while lno < len(lines): - l = lines[lno].strip() - lno += 1 + val = 0 + while lno < len(lines): + line = lines[lno].strip() + lno += 1 - if l.startswith('//'): - if verbose: print 'desc %s' % l - l = l[2:] - if len(l) and l[0] == ' ': l = l[1:] - context += l + '\n' - continue + if line.startswith('//'): + if verbose: + print('desc %s' % line) + line = line[2:] + if len(line) and line[0] == ' ': + line = line[1:] + context += line + '\n' + continue - if l.startswith('#'): - lno = consume_ifdef(lno - 1, lines) - continue + if line.startswith('#'): + lno = consume_ifdef(lno - 1, lines) + continue - start_brace += l.count('{') - end_brace += l.count('}') + start_brace += line.count('{') + end_brace += line.count('}') - if '{' in l: - l = l.split('{')[1] - l = l.split('}')[0] + if '{' in line: + line = line.split('{')[1] + line = line.split('}')[0] - if len(l): - if verbose: print 'enumv %s' % lines[lno-1] - for v in l.split(','): - v = v.strip(); - if v.startswith('//'): break - if v == '': continue - valstr = '' - try: - if '=' in v: val = int(v.split('=')[1].strip(), 0) - valstr = str(val) - except: pass + if len(line): + if verbose: + print('enumv %s' % lines[lno - 1]) + for v in line.split(','): + v = v.strip() + if v.startswith('//'): + break + if v == '': + continue + valstr = '' + try: + if '=' in v: + val = int(v.split('=')[1].strip(), 0) + valstr = str(val) + except BaseException: + pass - if '=' in v: v = v.split('=')[0].strip() - if is_visible(context): - values.append({'name': v.strip(), 'desc': context, 'val': valstr}) - if verbose: print 'enumv %s' % valstr - context = '' - val += 1 - else: - if verbose: print '?? %s' % lines[lno-1] + if '=' in v: + v = v.split('=')[0].strip() + if is_visible(context): + values.append({'name': v.strip(), 'desc': context, 'val': valstr}) + if verbose: + print('enumv %s' % valstr) + context = '' + val += 1 + else: + if verbose: + print('?? %s' % lines[lno - 1]) - if start_brace > 0 and start_brace == end_brace: - return [{'file': filename[11:], 'name': name, 'values': values}, lno] + if start_brace > 0 and start_brace == end_brace: + return [{'file': filename[11:], 'name': name, 'values': values}, lno] + + if len(name) > 0: + print('\x1b[31mFAILED TO PARSE ENUM\x1b[0m %s\nline: %d\nfile: %s' % (name, lno, filename)) + return [None, lno] - if len(name) > 0: - print '\x1b[31mFAILED TO PARSE ENUM\x1b[0m %s\nline: %d\nfile: %s' % (name, lno, filename) - return [None, lno] def consume_block(lno, lines): - start_brace = 0 - end_brace = 0 + start_brace = 0 + end_brace = 0 - while lno < len(lines): - l = lines[lno].strip() - if verbose: print 'xx %s' % l - lno += 1 + while lno < len(lines): + line = lines[lno].strip() + if verbose: + print('xx %s' % line) + lno += 1 - start_brace += l.count('{') - end_brace += l.count('}') + start_brace += line.count('{') + end_brace += line.count('}') + + if start_brace > 0 and start_brace == end_brace: + break + return lno - if start_brace > 0 and start_brace == end_brace: - break - return lno def consume_comment(lno, lines): - while lno < len(lines): - l = lines[lno].strip() - if verbose: print 'xx %s' % l - lno += 1 - if '*/' in l: break + while lno < len(lines): + line = lines[lno].strip() + if verbose: + print('xx %s' % line) + lno += 1 + if '*/' in line: + break - return lno + return lno -def trim_define(l): - return l.replace('#ifndef', '').replace('#ifdef', '') \ - .replace('#if', '').replace('defined', '') \ - .replace('TORRENT_USE_IPV6', '').replace('TORRENT_ABI_VERSION == 1', '') \ - .replace('||', '').replace('&&', '').replace('(', '').replace(')','') \ - .replace('!', '').replace('\\', '').strip() -def consume_ifdef(lno, lines, warn_on_ifdefs = False): - l = lines[lno].strip() - lno += 1 +def trim_define(line): + return line.replace('#ifndef', '').replace('#ifdef', '') \ + .replace('#if', '').replace('defined', '') \ + .replace('TORRENT_USE_IPV6', '').replace('TORRENT_ABI_VERSION == 1', '') \ + .replace('||', '').replace('&&', '').replace('(', '').replace(')', '') \ + .replace('!', '').replace('\\', '').strip() - start_if = 1 - end_if = 0 - if verbose: print 'prep %s' % l +def consume_ifdef(lno, lines, warn_on_ifdefs=False): + line = lines[lno].strip() + lno += 1 - if warn_on_ifdefs and l.strip().startswith('#if'): - while l.endswith('\\'): - lno += 1 - l += lines[lno].strip() - if verbose: print 'prep %s' % lines[lno].trim() - define = trim_define(l) - if 'TORRENT_' in define and not 'TORRENT_ABI_VERSION' in define: - print '\x1b[31mWARNING: possible ABI breakage in public struct! "%s" \x1b[34m %s:%d\x1b[0m' % \ - (define, filename, lno) - # we've already warned once, no need to do it twice - warn_on_ifdefs = False - elif define != '': - print '\x1b[33msensitive define in public struct: "%s"\x1b[34m %s:%d\x1b[0m' % (define, filename, lno) + start_if = 1 + end_if = 0 - if (l.startswith('#if') and ( - ' TORRENT_USE_ASSERTS' in l or - ' TORRENT_USE_INVARIANT_CHECKS' in l or - ' TORRENT_ASIO_DEBUGGING' in l) or - l == '#if TORRENT_ABI_VERSION == 1' - ): - while lno < len(lines): - l = lines[lno].strip() - lno += 1 - if verbose: print 'prep %s' % l - if l.startswith('#endif'): end_if += 1 - if l.startswith('#if'): start_if += 1 - if l == '#else' and start_if - end_if == 1: break - if start_if - end_if == 0: break - return lno - else: - while l.endswith('\\') and lno < len(lines): - l = lines[lno].strip() - lno += 1 - if verbose: print 'prep %s' % l + if verbose: + print('prep %s' % line) + + if warn_on_ifdefs and line.strip().startswith('#if'): + while line.endswith('\\'): + lno += 1 + line += lines[lno].strip() + if verbose: + print('prep %s' % lines[lno].trim()) + define = trim_define(line) + if 'TORRENT_' in define and 'TORRENT_ABI_VERSION' not in define: + print('\x1b[31mWARNING: possible ABI breakage in public struct! "%s" \x1b[34m %s:%d\x1b[0m' % + (define, filename, lno)) + # we've already warned once, no need to do it twice + warn_on_ifdefs = False + elif define != '': + print('\x1b[33msensitive define in public struct: "%s"\x1b[34m %s:%d\x1b[0m' % (define, filename, lno)) + + if (line.startswith('#if') and ( + ' TORRENT_USE_ASSERTS' in line or + ' TORRENT_USE_INVARIANT_CHECKS' in line or + ' TORRENT_ASIO_DEBUGGING' in line) or + line == '#if TORRENT_ABI_VERSION == 1'): + while lno < len(lines): + line = lines[lno].strip() + lno += 1 + if verbose: + print('prep %s' % line) + if line.startswith('#endif'): + end_if += 1 + if line.startswith('#if'): + start_if += 1 + if line == '#else' and start_if - end_if == 1: + break + if start_if - end_if == 0: + break + return lno + else: + while line.endswith('\\') and lno < len(lines): + line = lines[lno].strip() + lno += 1 + if verbose: + print('prep %s' % line) + + return lno - return lno for filename in files: - h = open(filename) - lines = h.read().split('\n') + h = open(filename) + lines = h.read().split('\n') - if verbose: print '\n=== %s ===\n' % filename + if verbose: + print('\n=== %s ===\n' % filename) - blanks = 0 - lno = 0 - while lno < len(lines): - l = lines[lno].strip() - lno += 1 + blanks = 0 + lno = 0 + while lno < len(lines): + line = lines[lno].strip() + lno += 1 - if l == '': - blanks += 1 - context = '' - continue + if line == '': + blanks += 1 + context = '' + continue - if l.startswith('//') and l[2:].strip() == 'OVERVIEW': - # this is a section overview - current_overview = '' - while lno < len(lines): - l = lines[lno].strip() - lno += 1 - if not l.startswith('//'): - # end of overview - overviews[filename[11:]] = current_overview - current_overview = '' - break - l = l[2:] - if l.startswith(' '): l = l[1:] - current_overview += l + '\n' + if line.startswith('//') and line[2:].strip() == 'OVERVIEW': + # this is a section overview + current_overview = '' + while lno < len(lines): + line = lines[lno].strip() + lno += 1 + if not line.startswith('//'): + # end of overview + overviews[filename[11:]] = current_overview + current_overview = '' + break + line = line[2:] + if line.startswith(' '): + line = line[1:] + current_overview += line + '\n' - if l.startswith('//'): - if verbose: print 'desc %s' % l - l = l[2:] - if len(l) and l[0] == ' ': l = l[1:] - context += l + '\n' - continue + if line.startswith('//'): + if verbose: + print('desc %s' % line) + line = line[2:] + if len(line) and line[0] == ' ': + line = line[1:] + context += line + '\n' + continue - if l.startswith('/*'): - lno = consume_comment(lno - 1, lines) - continue + if line.startswith('/*'): + lno = consume_comment(lno - 1, lines) + continue - if l.startswith('#'): - lno = consume_ifdef(lno - 1, lines) - continue + if line.startswith('#'): + lno = consume_ifdef(lno - 1, lines) + continue - if (l == 'namespace detail' or \ - l == 'namespace impl' or \ - l == 'namespace aux') \ - and not internal: - lno = consume_block(lno, lines) - continue + if (line == 'namespace detail' or + line == 'namespace impl' or + line == 'namespace aux') \ + and not internal: + lno = consume_block(lno, lines) + continue - if 'TORRENT_DEPRECATED' in l: - if ('class ' in l or 'struct ' in l) and not ';' in l: - lno = consume_block(lno - 1, lines) - context = '' - blanks += 1 - if verbose: print 'xx %s' % l - continue + if 'TORRENT_DEPRECATED' in line: + if ('class ' in line or 'struct ' in line) and ';' not in line: + lno = consume_block(lno - 1, lines) + context = '' + blanks += 1 + if verbose: + print('xx %s' % line) + continue - if 'TORRENT_EXPORT ' in l or l.startswith('inline ') or l.startswith('template') or internal: - if l.startswith('class ') or l.startswith('struct '): - if not l.endswith(';'): - current_class, lno = parse_class(lno -1, lines, filename) - if current_class != None and is_visible(context): - current_class['desc'] = context - if context == '': - print 'WARNING: class "%s" is not documented: \x1b[34m%s:%d\x1b[0m' \ - % (current_class['name'], filename, lno) - classes.append(current_class) - context = '' - blanks += 1 - continue + if 'TORRENT_EXPORT ' in line or line.startswith('inline ') or line.startswith('template') or internal: + if line.startswith('class ') or line.startswith('struct '): + if not line.endswith(';'): + current_class, lno = parse_class(lno - 1, lines, filename) + if current_class is not None and is_visible(context): + current_class['desc'] = context + if context == '': + print('WARNING: class "%s" is not documented: \x1b[34m%s:%d\x1b[0m' + % (current_class['name'], filename, lno)) + classes.append(current_class) + context = '' + blanks += 1 + continue - if looks_like_function(l): - current_fun, lno = parse_function(lno - 1, lines, filename) - if current_fun != None and is_visible(context): - if context == '' and blanks == 0 and len(functions): - functions[-1]['signatures'].update(current_fun['signatures']) - functions[-1]['names'].update(current_fun['names']) - else: - current_fun['desc'] = context - if context == '': - print 'WARNING: function "%s" is not documented: \x1b[34m%s:%d\x1b[0m' \ - % (first_item(current_fun['names']), filename, lno) - functions.append(current_fun) - context = '' - blanks = 0 - continue + if looks_like_function(line): + current_fun, lno = parse_function(lno - 1, lines, filename) + if current_fun is not None and is_visible(context): + if context == '' and blanks == 0 and len(functions): + functions[-1]['signatures'].update(current_fun['signatures']) + functions[-1]['names'].update(current_fun['names']) + else: + current_fun['desc'] = context + if context == '': + print('WARNING: function "%s" is not documented: \x1b[34m%s:%d\x1b[0m' + % (first_item(current_fun['names']), filename, lno)) + functions.append(current_fun) + context = '' + blanks = 0 + continue - if ('class ' in l or 'struct ' in l) and not ';' in l: - lno = consume_block(lno - 1, lines) - context = '' - blanks += 1 - continue + if ('class ' in line or 'struct ' in line) and ';' not in line: + lno = consume_block(lno - 1, lines) + context = '' + blanks += 1 + continue - if l.startswith('enum '): - if not is_visible(context): - consume_block(lno - 1, lines) - else: - current_enum, lno = parse_enum(lno - 1, lines, filename) - if current_enum != None and is_visible(context): - current_enum['desc'] = context - if context == '': - print 'WARNING: enum "%s" is not documented: \x1b[34m%s:%d\x1b[0m' \ - % (current_enum['name'], filename, lno) - enums.append(current_enum) - context = '' - blanks += 1 - continue + if line.startswith('enum '): + if not is_visible(context): + consume_block(lno - 1, lines) + else: + current_enum, lno = parse_enum(lno - 1, lines, filename) + if current_enum is not None and is_visible(context): + current_enum['desc'] = context + if context == '': + print('WARNING: enum "%s" is not documented: \x1b[34m%s:%d\x1b[0m' + % (current_enum['name'], filename, lno)) + enums.append(current_enum) + context = '' + blanks += 1 + continue - blanks += 1 - if verbose: - if looks_like_forward_decl(l) \ - or looks_like_blank(l) \ - or looks_like_namespace(l): - print '-- %s' % l - else: - print '?? %s' % l + blanks += 1 + if verbose: + if looks_like_forward_decl(line) \ + or looks_like_blank(line) \ + or looks_like_namespace(line): + print('-- %s' % line) + else: + print('?? %s' % line) - context = '' - h.close() + context = '' + h.close() # ==================================================================== # @@ -729,266 +824,291 @@ for filename in files: if dump: - if verbose: print '\n===============================\n' + if verbose: + print('\n===============================\n') - for c in classes: - print '\x1b[4m%s\x1b[0m %s\n{' % (c['type'], c['name']) - for f in c['fun']: - for s in f['signatures']: - print ' %s' % s.replace('\n', '\n ') + for c in classes: + print('\x1b[4m%s\x1b[0m %s\n{' % (c['type'], c['name'])) + for f in c['fun']: + for s in f['signatures']: + print(' %s' % s.replace('\n', '\n ')) - if len(c['fun']) > 0 and len(c['fields']) > 0: print '' + if len(c['fun']) > 0 and len(c['fields']) > 0: + print('') - for f in c['fields']: - for s in f['signatures']: - print ' %s' % s + for f in c['fields']: + for s in f['signatures']: + print(' %s' % s) - if len(c['fields']) > 0 and len(c['enums']) > 0: print '' + if len(c['fields']) > 0 and len(c['enums']) > 0: + print('') - for e in c['enums']: - print ' \x1b[4menum\x1b[0m %s\n {' % e['name'] - for v in e['values']: - print ' %s' % v['name'] - print ' };' - print '};\n' + for e in c['enums']: + print(' \x1b[4menum\x1b[0m %s\n {' % e['name']) + for v in e['values']: + print(' %s' % v['name']) + print(' };') + print('};\n') - for f in functions: - print '%s' % f['signature'] + for f in functions: + print('%s' % f['signature']) - for e in enums: - print '\x1b[4menum\x1b[0m %s\n{' % e['name'] - for v in e['values']: - print ' %s' % v['name'] - print '};' + for e in enums: + print('\x1b[4menum\x1b[0m %s\n{' % e['name']) + for v in e['values']: + print(' %s' % v['name']) + print('};') categories = {} for c in classes: - cat = categorize_symbol(c['name'], c['file']) - if not cat in categories: - categories[cat] = { 'classes': [], 'functions': [], 'enums': [], 'filename': 'reference-%s.rst' % cat.replace(' ', '_')} + cat = categorize_symbol(c['name'], c['file']) + if cat not in categories: + categories[cat] = {'classes': [], 'functions': [], 'enums': [], + 'filename': 'reference-%s.rst' % cat.replace(' ', '_')} - if c['file'] in overviews: - categories[cat]['overview'] = overviews[c['file']] + if c['file'] in overviews: + categories[cat]['overview'] = overviews[c['file']] - filename = categories[cat]['filename'].replace('.rst', '.html') + '#' - categories[cat]['classes'].append(c) - symbols[c['name']] = filename + c['name'] - for f in c['fun']: - for n in f['names']: - symbols[n] = filename + n - symbols[c['name'] + '::' + n] = filename + n + filename = categories[cat]['filename'].replace('.rst', '.html') + '#' + categories[cat]['classes'].append(c) + symbols[c['name']] = filename + c['name'] + for f in c['fun']: + for n in f['names']: + symbols[n] = filename + n + symbols[c['name'] + '::' + n] = filename + n - for f in c['fields']: - for n in f['names']: - symbols[c['name'] + '::' + n] = filename + n + for f in c['fields']: + for n in f['names']: + symbols[c['name'] + '::' + n] = filename + n - for e in c['enums']: - symbols[e['name']] = filename + e['name'] - symbols[c['name'] + '::' + e['name']] = filename + e['name'] - for v in e['values']: -# symbols[v['name']] = filename + v['name'] - symbols[e['name'] + '::' + v['name']] = filename + v['name'] - symbols[c['name'] + '::' + v['name']] = filename + v['name'] + for e in c['enums']: + symbols[e['name']] = filename + e['name'] + symbols[c['name'] + '::' + e['name']] = filename + e['name'] + for v in e['values']: + # symbols[v['name']] = filename + v['name'] + symbols[e['name'] + '::' + v['name']] = filename + v['name'] + symbols[c['name'] + '::' + v['name']] = filename + v['name'] for f in functions: - cat = categorize_symbol(first_item(f['names']), f['file']) - if not cat in categories: - categories[cat] = { 'classes': [], 'functions': [], 'enums': [], 'filename': 'reference-%s.rst' % cat.replace(' ', '_')} + cat = categorize_symbol(first_item(f['names']), f['file']) + if cat not in categories: + categories[cat] = {'classes': [], 'functions': [], 'enums': [], + 'filename': 'reference-%s.rst' % cat.replace(' ', '_')} - if f['file'] in overviews: - categories[cat]['overview'] = overviews[f['file']] + if f['file'] in overviews: + categories[cat]['overview'] = overviews[f['file']] - for n in f['names']: - symbols[n] = categories[cat]['filename'].replace('.rst', '.html') + '#' + n - categories[cat]['functions'].append(f) + for n in f['names']: + symbols[n] = categories[cat]['filename'].replace('.rst', '.html') + '#' + n + categories[cat]['functions'].append(f) for e in enums: - cat = categorize_symbol(e['name'], e['file']) - if not cat in categories: - categories[cat] = { 'classes': [], 'functions': [], 'enums': [], 'filename': 'reference-%s.rst' % cat.replace(' ', '_')} - categories[cat]['enums'].append(e) - filename = categories[cat]['filename'].replace('.rst', '.html') + '#' - symbols[e['name']] = filename + e['name'] - for v in e['values']: - symbols[e['name'] + '::' + v['name']] = filename + v['name'] + cat = categorize_symbol(e['name'], e['file']) + if cat not in categories: + categories[cat] = {'classes': [], 'functions': [], 'enums': [], + 'filename': 'reference-%s.rst' % cat.replace(' ', '_')} + categories[cat]['enums'].append(e) + filename = categories[cat]['filename'].replace('.rst', '.html') + '#' + symbols[e['name']] = filename + e['name'] + for v in e['values']: + symbols[e['name'] + '::' + v['name']] = filename + v['name'] + def print_declared_in(out, o): - out.write('Declared in "%s"\n\n' % print_link(o['file'], '../include/%s' % o['file'])) - print >>out, dump_link_targets() + out.write('Declared in "%s"\n\n' % print_link(o['file'], '../include/%s' % o['file'])) + print(dump_link_targets(), file=out) # returns RST marked up string + + def linkify_symbols(string): - lines = string.split('\n') - ret = [] - in_literal = False - lno = 0 - for l in lines: - lno += 1 - # don't touch headlines, i.e. lines whose - # next line entirely contains one of =, - or . - if (lno < len(lines)-1): next_line = lines[lno] - else: next_line = '' + lines = string.split('\n') + ret = [] + in_literal = False + lno = 0 + for line in lines: + lno += 1 + # don't touch headlines, i.e. lines whose + # next line entirely contains one of =, - or . + if (lno < len(lines) - 1): + next_line = lines[lno] + else: + next_line = '' - if len(next_line) > 0 and lines[lno].replace('=',''). \ - replace('-','').replace('.', '') == '': - ret.append(l) - continue + if len(next_line) > 0 and lines[lno].replace('=', ''). \ + replace('-', '').replace('.', '') == '': + ret.append(line) + continue - if l.startswith('|'): - ret.append(l) - continue - if in_literal and not l.startswith('\t') and not l == '': -# print ' end literal: "%s"' % l - in_literal = False - if in_literal: -# print ' literal: "%s"' % l - ret.append(l) - continue - if l.strip() == '.. parsed-literal::' or \ - l.strip().startswith('.. code::') or \ - (not l.strip().startswith('..') and l.endswith('::')): -# print ' start literal: "%s"' % l - in_literal = True - words = l.split(' ') + if line.startswith('|'): + ret.append(line) + continue + if in_literal and not line.startswith('\t') and not line == '': + # print(' end literal: "%s"' % line) + in_literal = False + if in_literal: + # print(' literal: "%s"' % line) + ret.append(line) + continue + if line.strip() == '.. parsed-literal::' or \ + line.strip().startswith('.. code::') or \ + (not line.strip().startswith('..') and line.endswith('::')): + # print(' start literal: "%s"' % line) + in_literal = True + words = line.split(' ') - for i in range(len(words)): - # it's important to preserve leading - # tabs, since that's relevant for - # rst markup + for i in range(len(words)): + # it's important to preserve leading + # tabs, since that's relevant for + # rst markup - leading = '' - w = words[i] + leading = '' + w = words[i] - if len(w) == 0: continue + if len(w) == 0: + continue - while len(w) > 0 and \ - w[0] in ['\t', ' ', '(', '[', '{']: - leading += w[0] - w = w[1:] + while len(w) > 0 and \ + w[0] in ['\t', ' ', '(', '[', '{']: + leading += w[0] + w = w[1:] - # preserve commas and dots at the end - w = w.strip() - trailing = '' + # preserve commas and dots at the end + w = w.strip() + trailing = '' - if len(w) == 0: continue + if len(w) == 0: + continue - while len(w) > 1 and w[-1] in ['.', ',', ')'] and w[-2:] != '()': - trailing = w[-1] + trailing - w = w[:-1] + while len(w) > 1 and w[-1] in ['.', ',', ')'] and w[-2:] != '()': + trailing = w[-1] + trailing + w = w[:-1] - link_name = w; + link_name = w -# print w + # print(w) - if len(w) == 0: continue + if len(w) == 0: + continue - if link_name[-1] == '_': link_name = link_name[:-1] + if link_name[-1] == '_': + link_name = link_name[:-1] + + if w in symbols: + link_name = link_name.replace('-', ' ') + # print(' found %s -> %s' % (w, link_name)) + words[i] = leading + print_link(link_name, symbols[w]) + trailing + ret.append(' '.join(words)) + return '\n'.join(ret) - if w in symbols: - link_name = link_name.replace('-', ' ') -# print ' found %s -> %s' % (w, link_name) - words[i] = leading + print_link(link_name, symbols[w]) + trailing - ret.append(' '.join(words)) - return '\n'.join(ret) link_targets = [] + def print_link(name, target): - global link_targets - link_targets.append(target) - return "`%s`__" % name + global link_targets + link_targets.append(target) + return "`%s`__" % name -def dump_link_targets(indent = ''): - global link_targets - ret = '\n' - for l in link_targets: - ret += '%s__ %s\n' % (indent, l) - link_targets = [] - return ret -def heading(string, c, indent = ''): - string = string.strip() - return '\n' + indent + string + '\n' + indent + (c * len(string)) + '\n' +def dump_link_targets(indent=''): + global link_targets + ret = '\n' + for l in link_targets: + ret += '%s__ %s\n' % (indent, l) + link_targets = [] + return ret + + +def heading(string, c, indent=''): + string = string.strip() + return '\n' + indent + string + '\n' + indent + (c * len(string)) + '\n' + def render_enums(out, enums, print_declared_reference, header_level): - for e in enums: - print >>out, '.. raw:: html\n' - print >>out, '\t' % e['name'] - print >>out, '' - print >>out, heading('enum %s' % e['name'], header_level) + for e in enums: + print('.. raw:: html\n', file=out) + print('\t' % e['name'], file=out) + print('', file=out) + print(heading('enum %s' % e['name'], header_level), file=out) - print_declared_in(out, e) + print_declared_in(out, e) - width = [len('name'), len('value'), len('description')] + width = [len('name'), len('value'), len('description')] - for i in range(len(e['values'])): - e['values'][i]['desc'] = linkify_symbols(e['values'][i]['desc']) + for i in range(len(e['values'])): + e['values'][i]['desc'] = linkify_symbols(e['values'][i]['desc']) - for v in e['values']: - width[0] = max(width[0], len(v['name'])) - width[1] = max(width[1], len(v['val'])) - for d in v['desc'].split('\n'): - width[2] = max(width[2], len(d)) + for v in e['values']: + width[0] = max(width[0], len(v['name'])) + width[1] = max(width[1], len(v['val'])) + for d in v['desc'].split('\n'): + width[2] = max(width[2], len(d)) - print >>out, '+-' + ('-' * width[0]) + '-+-' + ('-' * width[1]) + '-+-' + ('-' * width[2]) + '-+' - print >>out, '| ' + 'name'.ljust(width[0]) + ' | ' + 'value'.ljust(width[1]) + ' | ' + 'description'.ljust(width[2]) + ' |' - print >>out, '+=' + ('=' * width[0]) + '=+=' + ('=' * width[1]) + '=+=' + ('=' * width[2]) + '=+' - for v in e['values']: - d = v['desc'].split('\n') - if len(d) == 0: d = [''] - print >>out, '| ' + v['name'].ljust(width[0]) + ' | ' + v['val'].ljust(width[1]) + ' | ' + d[0].ljust(width[2]) + ' |' - for s in d[1:]: - print >>out, '| ' + (' ' * width[0]) + ' | ' + (' ' * width[1]) + ' | ' + s.ljust(width[2]) + ' |' - print >>out, '+-' + ('-' * width[0]) + '-+-' + ('-' * width[1]) + '-+-' + ('-' * width[2]) + '-+' - print >>out, '' + print('+-' + ('-' * width[0]) + '-+-' + ('-' * width[1]) + '-+-' + ('-' * width[2]) + '-+', file=out) + print(('| ' + 'name'.ljust(width[0]) + ' | ' + 'value'.ljust(width[1]) + ' | ' + 'description'.ljust(width[2]) + ' |'), file=out) + print('+=' + ('=' * width[0]) + '=+=' + ('=' * width[1]) + '=+=' + ('=' * width[2]) + '=+', file=out) + for v in e['values']: + d = v['desc'].split('\n') + if len(d) == 0: + d = [''] + print(('| ' + v['name'].ljust(width[0]) + ' | ' + v['val'].ljust(width[1]) + ' | ' + + d[0].ljust(width[2]) + ' |'), file=out) + for s in d[1:]: + print('| ' + (' ' * width[0]) + ' | ' + (' ' * width[1]) + ' | ' + s.ljust(width[2]) + ' |', file=out) + print('+-' + ('-' * width[0]) + '-+-' + ('-' * width[1]) + '-+-' + ('-' * width[2]) + '-+', file=out) + print('', file=out) + + print(dump_link_targets(), file=out) - print >>out, dump_link_targets() sections = \ -{ - 'Core': 0, - 'Session': 0, - 'Settings': 0, + { + 'Core': 0, + 'Session': 0, + 'Settings': 0, - 'Bencoding': 1, - 'Bdecoding': 1, - 'Filter': 1, - 'Error Codes': 1, - 'Create Torrents': 1, + 'Bencoding': 1, + 'Bdecoding': 1, + 'Filter': 1, + 'Error Codes': 1, + 'Create Torrents': 1, - 'ed25519': 2, - 'Utility': 2, - 'Storage': 2, - 'Custom Storage': 2, - 'Plugins': 2, + 'ed25519': 2, + 'Utility': 2, + 'Storage': 2, + 'Custom Storage': 2, + 'Plugins': 2, + + 'Alerts': 3 + } - 'Alerts': 3 -} def print_toc(out, categories, s): - for cat in categories: - if (s != 2 and cat not in sections) or \ - (cat in sections and sections[cat] != s): continue + for cat in categories: + if (s != 2 and cat not in sections) or \ + (cat in sections and sections[cat] != s): + continue - print >>out, '\t.. rubric:: %s\n' % cat + print('\t.. rubric:: %s\n' % cat, file=out) - if 'overview' in categories[cat]: - print >>out, '\t| overview__' + if 'overview' in categories[cat]: + print('\t| overview__', file=out) - category_filename = categories[cat]['filename'].replace('.rst', '.html') - for c in categories[cat]['classes']: - print >>out, '\t| ' + print_link(c['name'], symbols[c['name']]) - for f in categories[cat]['functions']: - for n in f['names']: - print >>out, '\t| ' + print_link(n, symbols[n]) - for e in categories[cat]['enums']: - print >>out, '\t| ' + print_link(e['name'], symbols[e['name']]) - print >>out, '' + for c in categories[cat]['classes']: + print('\t| ' + print_link(c['name'], symbols[c['name']]), file=out) + for f in categories[cat]['functions']: + for n in f['names']: + print('\t| ' + print_link(n, symbols[n]), file=out) + for e in categories[cat]['enums']: + print('\t| ' + print_link(e['name'], symbols[e['name']]), file=out) + print('', file=out) - if 'overview' in categories[cat]: - print >>out, '\t__ %s#overview' % categories[cat]['filename'].replace('.rst', '.html') - print >>out, dump_link_targets('\t') + if 'overview' in categories[cat]: + print('\t__ %s#overview' % categories[cat]['filename'].replace('.rst', '.html'), file=out) + print(dump_link_targets('\t'), file=out) out = open('reference.rst', 'w+') @@ -1002,19 +1122,19 @@ out.write('`single-page version`__\n\n__ single-page-ref.html\n\n') for i in range(4): - out.write('.. container:: main-toc\n\n') - print_toc(out, categories, i) + out.write('.. container:: main-toc\n\n') + print_toc(out, categories, i) out.close() for cat in categories: - out = open(categories[cat]['filename'], 'w+') + out = open(categories[cat]['filename'], 'w+') - classes = categories[cat]['classes'] - functions = categories[cat]['functions'] - enums = categories[cat]['enums'] + classes = categories[cat]['classes'] + functions = categories[cat]['functions'] + enums = categories[cat]['enums'] - out.write(''' + out.write(''' :Author: Arvid Norberg, arvid@libtorrent.org :Version: 1.2.0 @@ -1030,132 +1150,133 @@ __ reference.html ''' % heading(cat, '=')) - if 'overview' in categories[cat]: - out.write('%s\n' % linkify_symbols(categories[cat]['overview'])) + if 'overview' in categories[cat]: + out.write('%s\n' % linkify_symbols(categories[cat]['overview'])) - for c in classes: + for c in classes: - print >>out, '.. raw:: html\n' - print >>out, '\t' % c['name'] - print >>out, '' + print('.. raw:: html\n', file=out) + print('\t' % c['name'], file=out) + print('', file=out) - out.write('%s\n' % heading(c['name'], '-')) - print_declared_in(out, c) - c['desc'] = linkify_symbols(c['desc']) - out.write('%s\n' % c['desc']) - print >>out, dump_link_targets() + out.write('%s\n' % heading(c['name'], '-')) + print_declared_in(out, c) + c['desc'] = linkify_symbols(c['desc']) + out.write('%s\n' % c['desc']) + print(dump_link_targets(), file=out) - print >>out,'\n.. parsed-literal::\n\t' + print('\n.. parsed-literal::\n\t', file=out) - block = '\n%s\n{\n' % c['decl'] - for f in c['fun']: - for s in f['signatures']: - block += ' %s\n' % highlight_signature(s.replace('\n', '\n ')) + block = '\n%s\n{\n' % c['decl'] + for f in c['fun']: + for s in f['signatures']: + block += ' %s\n' % highlight_signature(s.replace('\n', '\n ')) - if len(c['fun']) > 0 and len(c['enums']) > 0: block += '\n' + if len(c['fun']) > 0 and len(c['enums']) > 0: + block += '\n' - first = True - for e in c['enums']: - if not first: - block += '\n' - first = False - block += ' enum %s\n {\n' % e['name'] - for v in e['values']: - block += ' %s,\n' % v['name'] - block += ' };\n' + first = True + for e in c['enums']: + if not first: + block += '\n' + first = False + block += ' enum %s\n {\n' % e['name'] + for v in e['values']: + block += ' %s,\n' % v['name'] + block += ' };\n' - if len(c['fun']) + len(c['enums']) > 0 and len(c['fields']): block += '\n' + if len(c['fun']) + len(c['enums']) > 0 and len(c['fields']): + block += '\n' - for f in c['fields']: - for s in f['signatures']: - block += ' %s\n' % s + for f in c['fields']: + for s in f['signatures']: + block += ' %s\n' % s - block += '};' + block += '};' - print >>out, block.replace('\n', '\n\t') + '\n' + print(block.replace('\n', '\n\t') + '\n', file=out) - for f in c['fun']: - if f['desc'] == '': continue - title = '' - print >>out, '.. raw:: html\n' - for n in f['names']: - print >>out, '\t' % n - print >>out, '' - for n in f['names']: - title += '%s ' % n - print >>out, heading(title.strip(), '.') + for f in c['fun']: + if f['desc'] == '': + continue + title = '' + print('.. raw:: html\n', file=out) + for n in f['names']: + print('\t' % n, file=out) + print('', file=out) + for n in f['names']: + title += '%s ' % n + print(heading(title.strip(), '.'), file=out) - block = '.. parsed-literal::\n\n' + block = '.. parsed-literal::\n\n' - for s in f['signatures']: - block += highlight_signature(s.replace('\n', '\n ')) + '\n' - print >>out, '%s\n' % block.replace('\n', '\n\t') - f['desc'] = linkify_symbols(f['desc']) - print >>out, '%s' % f['desc'] + for s in f['signatures']: + block += highlight_signature(s.replace('\n', '\n ')) + '\n' + print('%s\n' % block.replace('\n', '\n\t'), file=out) + f['desc'] = linkify_symbols(f['desc']) + print('%s' % f['desc'], file=out) - print >>out, dump_link_targets() + print(dump_link_targets(), file=out) - render_enums(out, c['enums'], False, '.') + render_enums(out, c['enums'], False, '.') - for f in c['fields']: - if f['desc'] == '': continue + for f in c['fields']: + if f['desc'] == '': + continue - print >>out, '.. raw:: html\n' - for n in f['names']: - print >>out, '\t' % n - print >>out, '' + print('.. raw:: html\n', file=out) + for n in f['names']: + print('\t' % n, file=out) + print('', file=out) - for n in f['names']: - print >>out, '%s ' % n, - print >>out, '' - f['desc'] = linkify_symbols(f['desc']) - print >>out, '\t%s' % f['desc'].replace('\n', '\n\t') + for n in f['names']: + print('%s ' % n, end=' ', file=out) + print('', file=out) + f['desc'] = linkify_symbols(f['desc']) + print('\t%s' % f['desc'].replace('\n', '\n\t'), file=out) - print >>out, dump_link_targets() + print(dump_link_targets(), file=out) + for f in functions: + h = '' + print('.. raw:: html\n', file=out) + for n in f['names']: + print('\t' % n, file=out) + print('', file=out) + for n in f['names']: + h += '%s ' % n + print(heading(h, '-'), file=out) + print_declared_in(out, f) - for f in functions: - h = '' - print >>out, '.. raw:: html\n' - for n in f['names']: - print >>out, '\t' % n - print >>out, '' - for n in f['names']: - h += '%s ' % n - print >>out, heading(h, '-') - print_declared_in(out, f) + block = '.. parsed-literal::\n\n' + for s in f['signatures']: + block += highlight_signature(s) + '\n' - block = '.. parsed-literal::\n\n' - for s in f['signatures']: - block += highlight_signature(s) + '\n' + print('%s\n' % block.replace('\n', '\n\t'), file=out) + print(linkify_symbols(f['desc']), file=out) - print >>out, '%s\n' % block.replace('\n', '\n\t') - print >>out, linkify_symbols(f['desc']) + print(dump_link_targets(), file=out) - print >>out, dump_link_targets() + render_enums(out, enums, True, '-') - render_enums(out, enums, True, '-') + print(dump_link_targets(), file=out) - print >>out, dump_link_targets() + for i in static_links: + print(i, file=out) - for i in static_links: - print >>out, i + out.close() - out.close() +# for s in symbols: +# print(s) -#for s in symbols: -# print s - -for i,o in preprocess_rst.items(): - f = open(i, 'r') - out = open(o, 'w+') - print 'processing %s -> %s' % (i, o) - l = linkify_symbols(f.read()) - print >>out, l, - - print >>out, dump_link_targets() - - out.close() - f.close() +for i, o in list(preprocess_rst.items()): + f = open(i, 'r') + out = open(o, 'w+') + print('processing %s -> %s' % (i, o)) + link = linkify_symbols(f.read()) + print(link, end=' ', file=out) + print(dump_link_targets(), file=out) + out.close() + f.close() diff --git a/docs/gen_settings_doc.py b/docs/gen_settings_doc.py index 5329cf0aa..5f5090af4 100755 --- a/docs/gen_settings_doc.py +++ b/docs/gen_settings_doc.py @@ -1,98 +1,123 @@ #!/usr/bin/env python +from __future__ import print_function f = open('../include/libtorrent/settings_pack.hpp') out = open('settings.rst', 'w+') + def print_field(str, width): - return '%s%s' % (str, ' ' * (width - len(str))) + return '%s%s' % (str, ' ' * (width - len(str))) + def render_section(names, description, type, default_values): - max_name_len = max(len(max(names, key=len)), len('name')) - max_type_len = max(len(type), len('type')) - max_val_len = max(len(max(default_values, key=len)), len('default')) + max_name_len = max(len(max(names, key=len)), len('name')) + max_type_len = max(len(type), len('type')) + max_val_len = max(len(max(default_values, key=len)), len('default')) - # add link targets for the rest of the manual to reference - for n in names: - print >>out, '.. _%s:\n' % n + # add link targets for the rest of the manual to reference + for n in names: + print('.. _%s:\n' % n, file=out) - if len(names) > 0: - print >>out, '.. raw:: html\n' - for n in names: - print >>out, '\t' % n - print >>out, '' + if len(names) > 0: + print('.. raw:: html\n', file=out) + for n in names: + print('\t' % n, file=out) + print('', file=out) - separator = '+-' + ('-' * max_name_len) + '-+-' + ('-' * max_type_len) + '-+-' + ('-' * max_val_len) + '-+' + separator = '+-' + ('-' * max_name_len) + '-+-' + ('-' * max_type_len) + '-+-' + ('-' * max_val_len) + '-+' + + # build a table for the settings, their type and default value + print(separator, file=out) + print( + '| %s | %s | %s |' % + (print_field( + 'name', max_name_len), print_field( + 'type', max_type_len), print_field( + 'default', max_val_len)), file=out) + print(separator.replace('-', '='), file=out) + for i in range(len(names)): + print( + '| %s | %s | %s |' % + (print_field( + names[i], max_name_len), print_field( + type, max_type_len), print_field( + default_values[i], max_val_len)), file=out) + print(separator, file=out) + print(file=out) + print(description, file=out) - # build a table for the settings, their type and default value - print >>out, separator - print >>out, '| %s | %s | %s |' % (print_field('name', max_name_len), print_field('type', max_type_len), print_field('default', max_val_len)) - print >>out, separator.replace('-', '=') - for i in range(len(names)): - print >>out, '| %s | %s | %s |' % (print_field(names[i], max_name_len), print_field(type, max_type_len), print_field(default_values[i], max_val_len)) - print >>out, separator - print >>out - print >>out, description mode = '' # parse out default values for settings f2 = open('../src/settings_pack.cpp') def_map = {} -for l in f2: - l = l.strip() - if not l.startswith('SET(') \ - and not l.startswith('SET_NOPREV(') \ - and not l.startswith('DEPRECATED_SET('): continue +for line in f2: + line = line.strip() + if not line.startswith('SET(') \ + and not line.startswith('SET_NOPREV(') \ + and not line.startswith('DEPRECATED_SET('): + continue - l = l.split('(')[1].split(',') - def_map[l[0]] = l[1].strip() - print '%s = %s' % (l[0], l[1].strip()) + line = line.split('(')[1].split(',') + def_map[line[0]] = line[1].strip() + print('%s = %s' % (line[0], line[1].strip())) description = '' names = [] -for l in f: - if 'enum string_types' in l: mode = 'string' - if 'enum bool_types' in l: mode = 'bool' - if 'enum int_types' in l: mode = 'int' - if '#if TORRENT_ABI_VERSION == 1' in l: mode += 'skip' - if '#endif' in l: mode = mode[0:-4] +for line in f: + if 'enum string_types' in line: + mode = 'string' + if 'enum bool_types' in line: + mode = 'bool' + if 'enum int_types' in line: + mode = 'int' + if '#if TORRENT_ABI_VERSION == 1' in line: + mode += 'skip' + if '#endif' in line: + mode = mode[0:-4] - if mode == '': continue - if mode[-4:] == 'skip': continue + if mode == '': + continue + if mode[-4:] == 'skip': + continue - l = l.lstrip() + line = line.lstrip() - if l == '' and len(names) > 0: - if description == '': - for n in names: - print 'WARNING: no description for "%s"' % n - else: - default_values = [] - for n in names: - default_values.append(def_map[n]) - render_section(names, description, mode, default_values) - description = '' - names = [] + if line == '' and len(names) > 0: + if description == '': + for n in names: + print('WARNING: no description for "%s"' % n) + else: + default_values = [] + for n in names: + default_values.append(def_map[n]) + render_section(names, description, mode, default_values) + description = '' + names = [] - if l.startswith('};'): - mode = '' - continue + if line.startswith('};'): + mode = '' + continue - if l.startswith('//'): - if l[2] == ' ': description += l[3:] - else: description += l[2:] - continue + if line.startswith('//'): + if line[2] == ' ': + description += line[3:] + else: + description += line[2:] + continue - l = l.strip() - if l.endswith(','): - l = l[:-1] # strip trailing comma - if '=' in l: l = l.split('=')[0].strip() - if l.endswith('_internal'): continue + line = line.strip() + if line.endswith(','): + line = line[:-1] # strip trailing comma + if '=' in line: + line = line.split('=')[0].strip() + if line.endswith('_internal'): + continue - names.append(l) + names.append(line) out.close() f.close() - diff --git a/docs/gen_stats_doc.py b/docs/gen_stats_doc.py index 5bba99812..dcd6649ed 100755 --- a/docs/gen_stats_doc.py +++ b/docs/gen_stats_doc.py @@ -1,4 +1,5 @@ #!/usr/bin/env python +from __future__ import print_function counter_types = {} @@ -6,33 +7,40 @@ f = open('../include/libtorrent/performance_counters.hpp') counter_type = '' -for l in f: +for line in f: - # ignore anything after // - if '//' in l: l = l.split('//')[0] + # ignore anything after // + if '//' in line: + line = line.split('//')[0] - l = l.strip() + line = line.strip() - if l.startswith('#'): continue - if l == '': continue + if line.startswith('#'): + continue + if line == '': + continue - if 'enum stats_counter_t' in l: - counter_type = 'counter' - continue + if 'enum stats_counter_t' in line: + counter_type = 'counter' + continue - if 'enum stats_gauge_t' in l: - counter_type = 'gauge' - continue + if 'enum stats_gauge_t' in line: + counter_type = 'gauge' + continue - if '{' in l or '}' in l or 'struct' in l or 'namespace' in l: continue - if counter_type == '': continue - if not l.endswith(','): continue + if '{' in line or '}' in line or 'struct' in line or 'namespace' in line: + continue + if counter_type == '': + continue + if not line.endswith(','): + continue - # strip off trailing comma - l = l[:-1] - if '=' in l: l = l[:l.index('=')].strip() + # strip off trailing comma + line = line[:-1] + if '=' in line: + line = line[:line.index('=')].strip() - counter_types[l] = counter_type + counter_types[line] = counter_type f.close() @@ -40,39 +48,42 @@ f = open('../src/session_stats.cpp') out = open('stats_counters.rst', 'w+') + def print_field(str, width): - return '%s%s' % (str, ' ' * (width - len(str))) + return '%s%s' % (str, ' ' * (width - len(str))) + def render_section(names, description, types): - max_name_len = max(len(max(names, key=len)), len('name')) - max_type_len = max(len(max(types, key=len)), len('type')) + max_name_len = max(len(max(names, key=len)), len('name')) + max_type_len = max(len(max(types, key=len)), len('type')) - if description == '': - for n in names: - print 'WARNING: no description for "%s"' % n + if description == '': + for n in names: + print('WARNING: no description for "%s"' % n) - # add link targets for the rest of the manual to reference - for n in names: - print >>out, '.. _%s:\n' % n + # add link targets for the rest of the manual to reference + for n in names: + print('.. _%s:\n' % n, file=out) - if len(names) > 0: - print >>out, '.. raw:: html\n' - for n in names: - print >>out, '\t' % n - print >>out, '' + if len(names) > 0: + print('.. raw:: html\n', file=out) + for n in names: + print('\t' % n, file=out) + print('', file=out) - separator = '+-' + ('-' * max_name_len) + '-+-' + ('-' * max_type_len) + '-+' + separator = '+-' + ('-' * max_name_len) + '-+-' + ('-' * max_type_len) + '-+' + + # build a table for the settings, their type and default value + print(separator, file=out) + print('| %s | %s |' % (print_field('name', max_name_len), print_field('type', max_type_len)), file=out) + print(separator.replace('-', '='), file=out) + for i in range(len(names)): + print('| %s | %s |' % (print_field(names[i], max_name_len), print_field(types[i], max_type_len)), file=out) + print(separator, file=out) + print(file=out) + print(description, file=out) + print('', file=out) - # build a table for the settings, their type and default value - print >>out, separator - print >>out, '| %s | %s |' % (print_field('name', max_name_len), print_field('type', max_type_len)) - print >>out, separator.replace('-', '=') - for i in range(len(names)): - print >>out, '| %s | %s |' % (print_field(names[i], max_name_len), print_field(types[i], max_type_len)) - print >>out, separator - print >>out - print >>out, description - print >>out, '' mode = '' @@ -80,42 +91,43 @@ description = '' names = [] types = [] -for l in f: - description_line = l.lstrip().startswith('//') +for line in f: + description_line = line.lstrip().startswith('//') - l = l.strip() + line = line.strip() - if mode == 'ignore': - if '#endif' in l: mode = '' - continue + if mode == 'ignore': + if '#endif' in line: + mode = '' + continue - if 'TORRENT_ABI_VERSION == 1' in l: - mode = 'ignore' - continue + if 'TORRENT_ABI_VERSION == 1' in line: + mode = 'ignore' + continue - if description_line == True: - if len(names) > 0: - render_section(names, description, types) - description = '' - names = [] - types = [] + if description_line: + if len(names) > 0: + render_section(names, description, types) + description = '' + names = [] + types = [] - description += '\n' + l[3:] + description += '\n' + line[3:] - if '#define' in l: continue + if '#define' in line: + continue - if 'METRIC(' in l: - args = l.split('(')[1].split(')')[0].split(',') + if 'METRIC(' in line: + args = line.split('(')[1].split(')')[0].split(',') - # args: category, name, type + # args: category, name, type - args[1] = args[1].strip() - names.append(args[0].strip() + '.' + args[1].strip()) - types.append(counter_types[args[1]]) + args[1] = args[1].strip() + names.append(args[0].strip() + '.' + args[1].strip()) + types.append(counter_types[args[1]]) if len(names) > 0: - render_section(names, description, types) + render_section(names, description, types) out.close() f.close() - diff --git a/docs/gen_todo.py b/docs/gen_todo.py index c36fb5b27..773d7baae 100755 --- a/docs/gen_todo.py +++ b/docs/gen_todo.py @@ -2,15 +2,23 @@ import glob import os +import sys -paths = ['test/*.cpp', 'src/*.cpp', 'src/kademlia/*.cpp', 'include/libtorrent/*.hpp', 'include/libtorrent/kademlia/*.hpp', 'include/libtorrent/aux_/*.hpp', 'include/libtorrent/extensions/*.hpp'] +paths = [ + 'test/*.cpp', + 'src/*.cpp', + 'src/kademlia/*.cpp', + 'include/libtorrent/*.hpp', + 'include/libtorrent/kademlia/*.hpp', + 'include/libtorrent/aux_/*.hpp', + 'include/libtorrent/extensions/*.hpp'] os.system('(cd .. ; ctags %s 2>/dev/null)' % ' '.join(paths)) files = [] for p in paths: - files.extend(glob.glob(os.path.join('..', p))) + files.extend(glob.glob(os.path.join('..', p))) items = [] @@ -20,79 +28,91 @@ context = [] priority_count = [0, 0, 0, 0, 0] + def html_sanitize(s): - ret = '' - for i in s: - if i == '<': ret += '<' - elif i == '>': ret += '>' - elif i == '&': ret += '&' - else: ret += i - return ret + ret = '' + for i in s: + if i == '<': + ret += '<' + elif i == '>': + ret += '>' + elif i == '&': + ret += '&' + else: + ret += i + return ret + for f in files: - h = open(f) + h = open(f) - state = '' - line_no = 0 - context_lines = 0 + state = '' + line_no = 0 + context_lines = 0 - for l in h: - line_no += 1 - line = l.strip() - if 'TODO:' in line and line.startswith('//'): - line = line.split('TODO:')[1].strip() - state = 'todo' - items.append({}) - items[-1]['location'] = '%s:%d' % (f, line_no) - items[-1]['priority'] = 0 - if line[0] in '0123456789': - items[-1]['priority'] = int(line[0]) - if int(line[0]) > 5: - print 'priority too high: ' + line - sys.exit(1) + for l in h: + line_no += 1 + line = l.strip() + if 'TODO:' in line and line.startswith('//'): + line = line.split('TODO:')[1].strip() + state = 'todo' + items.append({}) + items[-1]['location'] = '%s:%d' % (f, line_no) + items[-1]['priority'] = 0 + if line[0] in '0123456789': + items[-1]['priority'] = int(line[0]) + if int(line[0]) > 5: + print('priority too high: ' + line) + sys.exit(1) - line = line[1:].strip() - items[-1]['todo'] = line - prio = items[-1]['priority'] - if prio >= 0 and prio <= 4: priority_count[prio] += 1 - continue + line = line[1:].strip() + items[-1]['todo'] = line + prio = items[-1]['priority'] + if prio >= 0 and prio <= 4: + priority_count[prio] += 1 + continue - if state == '': - context.append(html_sanitize(l)) - if len(context) > 20: context.pop(0) - continue + if state == '': + context.append(html_sanitize(l)) + if len(context) > 20: + context.pop(0) + continue - if state == 'todo': - if line.strip().startswith('//'): - items[-1]['todo'] += '\n' - items[-1]['todo'] += line[2:].strip() - else: - state = 'context' - items[-1]['context'] = ''.join(context) + '
relevance %d | %s | %s |
relevance %d | ' + '%s | %s |