add gen_todo.py script. include todo.html and mark up some todos in the code with priority
This commit is contained in:
parent
360af45e63
commit
f1b8582a95
|
@ -9,6 +9,11 @@ libtorrent hacking
|
|||
:depth: 2
|
||||
:backlinks: none
|
||||
|
||||
This describe some of the internals of libtorrent. If you're looking for
|
||||
something to contribute, please take a look at the `todo list`_.
|
||||
|
||||
.. _`todo list`: todo.html
|
||||
|
||||
terminology
|
||||
===========
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,110 @@
|
|||
import glob
|
||||
import os
|
||||
|
||||
paths = ['src/*.cpp', 'src/kademlia/*.cpp', 'include/libtorrent/*.hpp', 'include/libtorrent/kademlia/*.hpp', 'include/libtorrent/aux_/*.hpp', 'include/libtorrent/extensions/*.hpp']
|
||||
|
||||
os.system('ctags %s 2>/dev/null' % ' '.join(paths))
|
||||
|
||||
files = []
|
||||
|
||||
for p in paths:
|
||||
files.extend(glob.glob(p))
|
||||
|
||||
items = []
|
||||
|
||||
# keeps 20 non-comment lines, used to get more context around
|
||||
# todo-items
|
||||
context = []
|
||||
|
||||
for f in files:
|
||||
h = open(f)
|
||||
|
||||
state = ''
|
||||
line_no = 0
|
||||
context_lines = 0
|
||||
|
||||
for l in h:
|
||||
line_no += 1
|
||||
line = l.strip()
|
||||
if 'TODO:' in line and line.startswith('//'):
|
||||
line = line.split('TODO:')[1].strip()
|
||||
state = 'todo'
|
||||
items.append({})
|
||||
items[-1]['location'] = '%s:%d' % (f, line_no)
|
||||
items[-1]['priority'] = 0
|
||||
if line[0] in '0123456789':
|
||||
items[-1]['priority'] = int(line[0])
|
||||
line = line[1:].strip()
|
||||
items[-1]['todo'] = line
|
||||
continue
|
||||
|
||||
if state == '':
|
||||
context.append(l)
|
||||
if len(context) > 20: context.pop(0)
|
||||
continue
|
||||
|
||||
if state == 'todo':
|
||||
if line.strip().startswith('//'):
|
||||
items[-1]['todo'] += '\n'
|
||||
items[-1]['todo'] += line[2:].strip()
|
||||
else:
|
||||
state = 'context'
|
||||
items[-1]['context'] = ''.join(context) + '<div style="background: #ffff00" width="100%">' + l + '</div>';
|
||||
context_lines = 1
|
||||
continue
|
||||
|
||||
if state == 'context':
|
||||
items[-1]['context'] += l
|
||||
context_lines += 1
|
||||
if context_lines > 30: state = ''
|
||||
|
||||
h.close()
|
||||
|
||||
items.sort(key = lambda x: x['priority'], reverse = True)
|
||||
|
||||
#for i in items:
|
||||
# print '\n\n', i['todo'], '\n'
|
||||
# print i['location'], '\n'
|
||||
# print 'prio: ', i['priority'], '\n'
|
||||
# if 'context' in i:
|
||||
# print i['context'], '\n'
|
||||
|
||||
out = open('docs/todo.html', 'w+')
|
||||
out.write('''<html><head>
|
||||
<script type="text/javascript">
|
||||
/* <![CDATA[ */
|
||||
var expanded = -1
|
||||
function expand(id) {
|
||||
if (expanded != -1) {
|
||||
var ctx = document.getElementById(expanded);
|
||||
ctx.style.display = "none";
|
||||
// if we're expanding the field that's already
|
||||
// expanded, just collapse it
|
||||
var no_expand = id == expanded;
|
||||
expanded = -1;
|
||||
if (no_expand) return;
|
||||
}
|
||||
var ctx = document.getElementById(id);
|
||||
ctx.style.display = "table-row";
|
||||
expanded = id;
|
||||
}
|
||||
/* ]]> */
|
||||
</script>
|
||||
|
||||
</head><body>
|
||||
<h1>libtorrent todo-list</h1>
|
||||
<table width="100%" border="1" style="border-collapse: collapse;">''')
|
||||
|
||||
index = 0
|
||||
for i in items:
|
||||
if not 'context' in i: i['context'] = ''
|
||||
out.write('<tr><td>relevance %d</td><td><a href="javascript:expand(%d)">%s</a></td><td>%s</td></tr>' \
|
||||
% (i['priority'], index, i['location'], i['todo'].replace('\n', ' ')))
|
||||
|
||||
out.write('<tr id="%d" style="display: none;" colspan="3"><td colspan="3"><h2>%s</h2><h4>%s</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;">%s</pre></td></tr>' \
|
||||
% (index, i['todo'], i['location'], i['context']))
|
||||
index += 1
|
||||
|
||||
out.write('</table></body></html>')
|
||||
out.close()
|
||||
|
|
@ -57,7 +57,8 @@ class node_impl;
|
|||
|
||||
// -------- find data -----------
|
||||
|
||||
//TODO: rename this to find_peers
|
||||
//TODO: 3 rename this class to find_peers, since that's what it does
|
||||
// find_data is an unnecessarily generic name
|
||||
class find_data : public traversal_algorithm
|
||||
{
|
||||
public:
|
||||
|
|
|
@ -89,7 +89,9 @@ struct node_entry
|
|||
else rtt = int(rtt) / 3 + int(new_rtt) * 2 / 3;
|
||||
}
|
||||
|
||||
// TODO: replace with a union of address_v4 and address_v6
|
||||
// TODO: 2 replace with a union of address_v4 and address_v6
|
||||
// to not waste space. This struct is instantiated hundreds of times
|
||||
// for the routing table
|
||||
address addr;
|
||||
boost::uint16_t port;
|
||||
// the number of times this node has failed to
|
||||
|
|
|
@ -1035,7 +1035,10 @@ namespace libtorrent
|
|||
};
|
||||
|
||||
// this list is sorted by time_critical_piece::deadline
|
||||
// TODO: this should be a deque
|
||||
// TODO: 2 this should be a deque, since time critical
|
||||
// pieces are expected to be popped in the same order
|
||||
// as they are sorted. The expectation is that new items
|
||||
// are pushed back and items are popped from the front
|
||||
std::list<time_critical_piece> m_time_critical_pieces;
|
||||
|
||||
std::string m_trackerid;
|
||||
|
|
|
@ -371,10 +371,6 @@ namespace libtorrent
|
|||
// when it was added.
|
||||
std::string name() const;
|
||||
|
||||
// TODO: add a feature where the user can tell the torrent
|
||||
// to finish all pieces currently in the pipeline, and then
|
||||
// abort the torrent.
|
||||
|
||||
void set_upload_limit(int limit) const;
|
||||
int upload_limit() const;
|
||||
void set_download_limit(int limit) const;
|
||||
|
|
|
@ -361,14 +361,16 @@ namespace libtorrent {
|
|||
// system_time end = get_system_time()
|
||||
// + boost::posix_time::microseconds(total_microseconds(max_wait));
|
||||
|
||||
// apparently this call can be interrupted
|
||||
// prematurely if there are other signals
|
||||
// this call can be interrupted prematurely by other signals
|
||||
// while (m_condition.timed_wait(lock, end))
|
||||
// if (!m_alerts.empty()) return m_alerts.front();
|
||||
|
||||
ptime start = time_now_hires();
|
||||
|
||||
// TODO: change this to use an asio timer instead
|
||||
// TODO: 3 change this to use a timed wait on a condition variable
|
||||
// problem is, that's not necessarily portable. But it should be used
|
||||
// where available. This implementation can be left the way it is for
|
||||
// more primitive platforms
|
||||
while (m_alerts.empty())
|
||||
{
|
||||
lock.unlock();
|
||||
|
|
|
@ -338,7 +338,6 @@ namespace libtorrent
|
|||
|
||||
// rely on default umask to filter x and w permissions
|
||||
// for group and others
|
||||
// TODO: copy the mode from the source file
|
||||
int permissions = S_IRUSR | S_IWUSR
|
||||
| S_IRGRP | S_IWGRP
|
||||
| S_IROTH | S_IWOTH;
|
||||
|
|
|
@ -117,7 +117,7 @@ namespace libtorrent
|
|||
else
|
||||
{
|
||||
int receive_buffer_size = receive_buffer().left() - m_parser.body_start();
|
||||
// TODO: in chunked encoding mode, this assert won't hold
|
||||
// TODO: 1 in chunked encoding mode, this assert won't hold.
|
||||
// the chunk headers should be subtracted from the receive_buffer_size
|
||||
TORRENT_ASSERT(receive_buffer_size <= t->block_size());
|
||||
ret.bytes_downloaded = t->block_size() - receive_buffer_size;
|
||||
|
|
|
@ -96,7 +96,7 @@ namespace libtorrent
|
|||
|
||||
void http_tracker_connection::start()
|
||||
{
|
||||
// TODO: authentication
|
||||
// TODO: 0 support authentication (i.e. user name and password) in the URL
|
||||
std::string url = tracker_req().url;
|
||||
|
||||
if (tracker_req().kind == tracker_request::scrape_request)
|
||||
|
|
|
@ -33,7 +33,8 @@ POSSIBILITY OF SUCH DAMAGE.
|
|||
#include "libtorrent/pch.hpp"
|
||||
#include "libtorrent/socket.hpp"
|
||||
|
||||
// TODO: it would be nice to not have this dependency here
|
||||
// TODO: 3 remove this dependency by having the dht observer
|
||||
// have its own flags
|
||||
#include "libtorrent/aux_/session_impl.hpp"
|
||||
|
||||
#include <boost/bind.hpp>
|
||||
|
|
|
@ -2485,7 +2485,7 @@ namespace libtorrent
|
|||
TORRENT_ASSERT(m_ses.is_network_thread());
|
||||
|
||||
// flush send buffer at the end of this scope
|
||||
// TODO: peers should really be corked/uncorked outside of
|
||||
// TODO: 1 peers should really be corked/uncorked outside of
|
||||
// all completed disk operations
|
||||
cork _c(*this);
|
||||
|
||||
|
|
|
@ -4082,7 +4082,6 @@ retry:
|
|||
if (m_next_dht_torrent == m_torrents.end())
|
||||
m_next_dht_torrent = m_torrents.begin();
|
||||
m_next_dht_torrent->second->dht_announce();
|
||||
// TODO: make a list for torrents that want to be announced on the DHT
|
||||
++m_next_dht_torrent;
|
||||
if (m_next_dht_torrent == m_torrents.end())
|
||||
m_next_dht_torrent = m_torrents.begin();
|
||||
|
@ -4265,7 +4264,7 @@ retry:
|
|||
bool handled_by_extension = false;
|
||||
|
||||
#ifndef TORRENT_DISABLE_EXTENSIONS
|
||||
// TODO: allow extensions to sort torrents for queuing
|
||||
// TODO: 0 allow extensions to sort torrents for queuing
|
||||
#endif
|
||||
|
||||
if (!handled_by_extension)
|
||||
|
@ -6133,7 +6132,9 @@ retry:
|
|||
// since we have a new external IP now, we need to
|
||||
// restart the DHT with a new node ID
|
||||
#ifndef TORRENT_DISABLE_DHT
|
||||
// TODO: we only need to do this if our global IPv4 address has changed
|
||||
// TODO: 1 we only need to do this if our global IPv4 address has changed
|
||||
// since the DHT (currently) only supports IPv4. Since restarting the DHT
|
||||
// is kind of expensive, it would be nice to not do it unnecessarily
|
||||
if (m_dht)
|
||||
{
|
||||
entry s = m_dht->state();
|
||||
|
|
|
@ -986,7 +986,7 @@ ret:
|
|||
size_type file_offset = start;
|
||||
file_storage::iterator file_iter;
|
||||
|
||||
// TODO: use binary search!
|
||||
// TODO: 3 use binary search to find the file entry
|
||||
for (file_iter = files().begin();;)
|
||||
{
|
||||
if (file_offset < file_iter->size)
|
||||
|
@ -1092,7 +1092,7 @@ ret:
|
|||
size_type file_offset = start;
|
||||
file_storage::iterator file_iter;
|
||||
|
||||
// TODO: use binary search!
|
||||
// TODO: 3 use binary search to find the file entry
|
||||
for (file_iter = files().begin();;)
|
||||
{
|
||||
if (file_offset < file_iter->size)
|
||||
|
@ -1206,7 +1206,8 @@ ret:
|
|||
// a specific alignment for writes. Make sure to truncate the size
|
||||
|
||||
// TODO: what if file_base is used to merge several virtual files
|
||||
// into a single physical file?
|
||||
// into a single physical file? We should probably disable this
|
||||
// if file_base is used. This is not a widely used feature though
|
||||
file_handle->set_size(file_iter->size, ec);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -4780,13 +4780,13 @@ namespace libtorrent
|
|||
if (web->type == web_seed_entry::url_seed)
|
||||
{
|
||||
c = new (std::nothrow) web_peer_connection(
|
||||
m_ses, shared_from_this(), s, a, web->url, &web->peer_info, // TODO: pass in web
|
||||
m_ses, shared_from_this(), s, a, web->url, &web->peer_info,
|
||||
web->auth, web->extra_headers);
|
||||
}
|
||||
else if (web->type == web_seed_entry::http_seed)
|
||||
{
|
||||
c = new (std::nothrow) http_seed_connection(
|
||||
m_ses, shared_from_this(), s, a, web->url, &web->peer_info, // TODO: pass in web
|
||||
m_ses, shared_from_this(), s, a, web->url, &web->peer_info,
|
||||
web->auth, web->extra_headers);
|
||||
}
|
||||
if (!c) return;
|
||||
|
@ -5291,7 +5291,8 @@ namespace libtorrent
|
|||
, end(m_trackers.end()); i != end; ++i)
|
||||
{
|
||||
// don't save trackers we can't trust
|
||||
// TODO: save the send_stats state instead
|
||||
// TODO: 1 save the send_stats state instead of throwing them away
|
||||
// it may pose an issue when downgrading though
|
||||
if (i->send_stats == false) continue;
|
||||
if (i->tier == tier)
|
||||
{
|
||||
|
@ -5786,7 +5787,7 @@ namespace libtorrent
|
|||
// failed to parse it. Pause the torrent
|
||||
if (alerts().should_post<metadata_failed_alert>())
|
||||
{
|
||||
// TODO: pass in ec along with the alert
|
||||
// TODO: 2 pass in ec along with the alert
|
||||
alerts().post_alert(metadata_failed_alert(get_handle()));
|
||||
}
|
||||
set_error(errors::invalid_swarm_metadata, "");
|
||||
|
|
|
@ -545,9 +545,9 @@ namespace libtorrent
|
|||
std::vector<peer_entry> peer_list;
|
||||
for (int i = 0; i < num_peers; ++i)
|
||||
{
|
||||
// TODO: don't use a string here. The problem is that
|
||||
// some trackers will respond with actual strings.
|
||||
// Especially i2p trackers
|
||||
// TODO: it would be more efficient to not use a string here.
|
||||
// however, the problem is that some trackers will respond
|
||||
// with actual strings. For example i2p trackers
|
||||
peer_entry e;
|
||||
char ip_string[100];
|
||||
unsigned int a = detail::read_uint8(buf);
|
||||
|
|
|
@ -409,9 +409,10 @@ struct utp_socket_impl
|
|||
// timers when we should trigger the read and
|
||||
// write callbacks (unless the buffers fill up
|
||||
// before)
|
||||
// TODO: 3 remove the read timeout concept. This should not be necessary
|
||||
ptime m_read_timeout;
|
||||
|
||||
// TODO: remove the write timeout concept, and maybe even the read timeout
|
||||
// TODO: 3 remove the write timeout concept. This should not be necessary
|
||||
ptime m_write_timeout;
|
||||
|
||||
// the time when the last packet we sent times out. Including re-sends.
|
||||
|
@ -613,7 +614,7 @@ struct utp_socket_impl
|
|||
bool m_attached:1;
|
||||
|
||||
// this is true if nagle is enabled (which it is by default)
|
||||
// TODO: support the option to turn it off
|
||||
// TODO: 2 support the option to turn it off
|
||||
bool m_nagle:1;
|
||||
|
||||
// this is true while the socket is in slow start mode. It's
|
||||
|
|
Loading…
Reference in New Issue