forked from premiere/premiere-libtorrent
6775 lines
279 KiB
HTML
6775 lines
279 KiB
HTML
<html><head>
|
|
<script type="text/javascript">
|
|
/* <![CDATA[ */
|
|
var expanded = -1
|
|
function expand(id) {
|
|
if (expanded != -1) {
|
|
var ctx = document.getElementById(expanded);
|
|
ctx.style.display = "none";
|
|
// if we're expanding the field that's already
|
|
// expanded, just collapse it
|
|
var no_expand = id == expanded;
|
|
expanded = -1;
|
|
if (no_expand) return;
|
|
}
|
|
var ctx = document.getElementById(id);
|
|
ctx.style.display = "table-row";
|
|
expanded = id;
|
|
}
|
|
/* ]]> */
|
|
</script>
|
|
|
|
</head><body>
|
|
<h1>libtorrent todo-list</h1>
|
|
<span style="color: #f00">3 urgent</span>
|
|
<span style="color: #f77">15 important</span>
|
|
<span style="color: #3c3">20 relevant</span>
|
|
<span style="color: #77f">9 feasible</span>
|
|
<span style="color: #999">88 notes</span>
|
|
<table width="100%" border="1" style="border-collapse: collapse;"><tr style="background: #f44"><td>relevance 4</td><td><a href="javascript:expand(0)">../src/session_impl.cpp:480</a></td><td>in order to support SSL over uTP, the utp_socket manager either needs to be able to receive packets on multiple ports, or we need to peek into the first few bytes the payload stream of a socket to determine whether or not it's an SSL connection. (The former is simpler but won't do as well with NATs)</td></tr><tr id="0" style="display: none;" colspan="3"><td colspan="3"><h2>in order to support SSL over uTP, the utp_socket manager either
|
|
needs to be able to receive packets on multiple ports, or we need to
|
|
peek into the first few bytes the payload stream of a socket to determine
|
|
whether or not it's an SSL connection. (The former is simpler but won't
|
|
do as well with NATs)</h2><h4>../src/session_impl.cpp:480</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> , m_optimistic_unchoke_time_scaler(0)
|
|
, m_disconnect_time_scaler(90)
|
|
, m_auto_scrape_time_scaler(180)
|
|
, m_next_explicit_cache_torrent(0)
|
|
, m_cache_rotation_timer(0)
|
|
, m_next_suggest_torrent(0)
|
|
, m_suggest_timer(0)
|
|
, m_peak_up_rate(0)
|
|
, m_peak_down_rate(0)
|
|
, m_created(time_now_hires())
|
|
, m_last_tick(m_created)
|
|
, m_last_second_tick(m_created - milliseconds(900))
|
|
, m_last_choke(m_created)
|
|
, m_next_rss_update(min_time())
|
|
#ifndef TORRENT_DISABLE_DHT
|
|
, m_dht_announce_timer(m_io_service)
|
|
, m_dht_interval_update_torrents(0)
|
|
#endif
|
|
, m_external_udp_port(0)
|
|
, m_udp_socket(m_io_service, m_half_open)
|
|
<div style="background: #ffff00" width="100%"> , m_utp_socket_manager(m_settings, m_udp_socket, m_stats_counters
|
|
</div> , boost::bind(&session_impl::incoming_connection, this, _1))
|
|
, m_boost_connections(0)
|
|
, m_timer(m_io_service)
|
|
, m_lsd_announce_timer(m_io_service)
|
|
, m_host_resolver(m_io_service)
|
|
, m_download_connect_attempts(0)
|
|
, m_tick_residual(0)
|
|
#if defined TORRENT_VERBOSE_LOGGING || defined TORRENT_LOGGING || defined TORRENT_ERROR_LOGGING
|
|
, m_logpath(".")
|
|
#endif
|
|
#ifndef TORRENT_DISABLE_GEO_IP
|
|
, m_asnum_db(0)
|
|
, m_country_db(0)
|
|
#endif
|
|
, m_deferred_submit_disk_jobs(false)
|
|
, m_pending_auto_manage(false)
|
|
, m_need_auto_manage(false)
|
|
, m_abort(false)
|
|
, m_paused(false)
|
|
#if TORRENT_USE_ASSERTS && defined BOOST_HAS_PTHREADS
|
|
, m_network_thread(0)
|
|
#endif
|
|
{
|
|
#if TORRENT_USE_ASSERTS
|
|
m_posting_torrent_updates = false;
|
|
#endif
|
|
m_udp_socket.set_rate_limit(m_settings.get_int(settings_pack::dht_upload_rate_limit));
|
|
|
|
m_udp_socket.subscribe(&m_tracker_manager);
|
|
m_udp_socket.subscribe(&m_utp_socket_manager);
|
|
</pre></td></tr><tr style="background: #f44"><td>relevance 4</td><td><a href="javascript:expand(1)">../src/torrent.cpp:9550</a></td><td>this logic doesn't work for seeding torrents that are not ticked</td></tr><tr id="1" style="display: none;" colspan="3"><td colspan="3"><h2>this logic doesn't work for seeding torrents that are not ticked</h2><h4>../src/torrent.cpp:9550</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> {
|
|
i = m_connections.begin() + idx;
|
|
--idx;
|
|
}
|
|
}
|
|
if (m_ses.alerts().should_post<stats_alert>())
|
|
m_ses.alerts().post_alert(stats_alert(get_handle(), tick_interval_ms, m_stat));
|
|
|
|
m_total_uploaded += m_stat.last_payload_uploaded();
|
|
m_total_downloaded += m_stat.last_payload_downloaded();
|
|
m_stat.second_tick(tick_interval_ms);
|
|
|
|
// these counters are saved in the resume data, since they updated
|
|
// we need to save the resume data too
|
|
m_need_save_resume_data = true;
|
|
|
|
// if the rate is 0, there's no update because of network transfers
|
|
if (m_stat.low_pass_upload_rate() > 0 || m_stat.low_pass_download_rate() > 0)
|
|
state_updated();
|
|
|
|
<div style="background: #ffff00" width="100%">
|
|
</div> // this section determines whether the torrent is active or not. When it
|
|
// changes state, it may also trigger the auto-manage logic to reconsider
|
|
// which torrents should be queued and started. There is a low pass
|
|
// filter in order to avoid flapping (auto_manage_startup).
|
|
bool is_inactive = false;
|
|
if (is_finished())
|
|
is_inactive = m_stat.upload_payload_rate() < m_ses.settings().get_int(settings_pack::inactive_up_rate);
|
|
else
|
|
is_inactive = m_stat.download_payload_rate() < m_ses.settings().get_int(settings_pack::inactive_down_rate);
|
|
|
|
if (is_inactive)
|
|
{
|
|
if (m_inactive_counter < 0) m_inactive_counter = 0;
|
|
if (m_inactive_counter < INT16_MAX)
|
|
{
|
|
++m_inactive_counter;
|
|
|
|
// if this torrent was just considered inactive, we may want
|
|
// to dequeue some other torrent
|
|
if (m_inactive == false
|
|
&& m_inactive_counter >= m_ses.settings().get_int(settings_pack::auto_manage_startup))
|
|
{
|
|
m_inactive = true;
|
|
if (m_ses.settings().get_bool(settings_pack::dont_count_slow_torrents))
|
|
m_ses.trigger_auto_manage();
|
|
}
|
|
}
|
|
}
|
|
else
|
|
{
|
|
</pre></td></tr><tr style="background: #f44"><td>relevance 4</td><td><a href="javascript:expand(2)">../src/kademlia/refresh.cpp:93</a></td><td>when bootstrapping against our own IP completes, continue to issue another bootstrap against the deepest, non-full bucket. when it completes, issue a bootstrap against one bucket above it, and so on until the bootstrap lookup against the top level bucket (bucket 0) completes. That's when the bootstrap is done</td></tr><tr id="2" style="display: none;" colspan="3"><td colspan="3"><h2>when bootstrapping against our own IP completes,
|
|
continue to issue another bootstrap against the deepest,
|
|
non-full bucket. when it completes, issue a bootstrap against
|
|
one bucket above it, and so on until the bootstrap lookup
|
|
against the top level bucket (bucket 0) completes. That's
|
|
when the bootstrap is done</h2><h4>../src/kademlia/refresh.cpp:93</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> e["y"] = "q";
|
|
e["q"] = "find_node";
|
|
entry& a = e["a"];
|
|
a["target"] = target().to_string();
|
|
m_node.stats_counters().inc_stats_counter(counters::dht_find_node_out);
|
|
return m_node.m_rpc.invoke(e, o->target_ep(), o);
|
|
}
|
|
|
|
bootstrap::bootstrap(
|
|
node_impl& node
|
|
, node_id target
|
|
, done_callback const& callback)
|
|
: refresh(node, target, callback)
|
|
{
|
|
}
|
|
|
|
char const* bootstrap::name() const { return "bootstrap"; }
|
|
|
|
void bootstrap::done()
|
|
{
|
|
<div style="background: #ffff00" width="100%">
|
|
</div>#ifdef TORRENT_DHT_VERBOSE_LOGGING
|
|
TORRENT_LOG(traversal) << "[" << this << "]"
|
|
<< " bootstrap done, pinging remaining nodes";
|
|
#endif
|
|
|
|
for (std::vector<observer_ptr>::iterator i = m_results.begin()
|
|
, end(m_results.end()); i != end; ++i)
|
|
{
|
|
if ((*i)->flags & observer::flag_queried) continue;
|
|
// this will send a ping
|
|
m_node.add_node((*i)->target_ep());
|
|
}
|
|
refresh::done();
|
|
}
|
|
|
|
} } // namespace libtorrent::dht
|
|
|
|
</pre></td></tr><tr style="background: #fcc"><td>relevance 3</td><td><a href="javascript:expand(3)">../src/disk_io_thread.cpp:242</a></td><td>it would be nice to have the number of threads be set dynamically</td></tr><tr id="3" style="display: none;" colspan="3"><td colspan="3"><h2>it would be nice to have the number of threads be set dynamically</h2><h4>../src/disk_io_thread.cpp:242</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> std::pair<block_cache::iterator, block_cache::iterator> pieces
|
|
= m_disk_cache.all_pieces();
|
|
TORRENT_ASSERT(pieces.first == pieces.second);
|
|
#endif
|
|
|
|
#ifdef TORRENT_DISK_STATS
|
|
if (g_access_log)
|
|
{
|
|
FILE* f = g_access_log;
|
|
g_access_log = NULL;
|
|
fclose(f);
|
|
}
|
|
#endif
|
|
|
|
TORRENT_ASSERT(m_magic == 0x1337);
|
|
#if TORRENT_USE_ASSERTS
|
|
m_magic = 0xdead;
|
|
#endif
|
|
}
|
|
|
|
<div style="background: #ffff00" width="100%"> void disk_io_thread::set_num_threads(int i, bool wait)
|
|
</div> {
|
|
TORRENT_ASSERT(m_magic == 0x1337);
|
|
if (i == m_num_threads) return;
|
|
|
|
if (i > m_num_threads)
|
|
{
|
|
while (m_num_threads < i)
|
|
{
|
|
int thread_id = (++m_num_threads) - 1;
|
|
thread_type_t type = generic_thread;
|
|
|
|
// the magic number 3 is also used in add_job()
|
|
// every 4:th thread is a hasher thread
|
|
if ((thread_id & 0x3) == 3) type = hasher_thread;
|
|
m_threads.push_back(boost::shared_ptr<thread>(
|
|
new thread(boost::bind(&disk_io_thread::thread_fun, this, thread_id, type))));
|
|
}
|
|
}
|
|
else
|
|
{
|
|
while (m_num_threads > i) { --m_num_threads; }
|
|
mutex::scoped_lock l(m_job_mutex);
|
|
m_job_cond.notify_all();
|
|
m_hash_job_cond.notify_all();
|
|
l.unlock();
|
|
if (wait) for (int i = m_num_threads; i < m_threads.size(); ++i) m_threads[i]->join();
|
|
// this will detach the threads
|
|
m_threads.resize(m_num_threads);
|
|
}
|
|
}
|
|
</pre></td></tr><tr style="background: #fcc"><td>relevance 3</td><td><a href="javascript:expand(4)">../src/peer_connection.cpp:1729</a></td><td>we should probably use ses.m_allowed_upload_slots here instead to work with auto-unchoke logic</td></tr><tr id="4" style="display: none;" colspan="3"><td colspan="3"><h2>we should probably use ses.m_allowed_upload_slots here instead
|
|
to work with auto-unchoke logic</h2><h4>../src/peer_connection.cpp:1729</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;">
|
|
if (t->graceful_pause())
|
|
{
|
|
#if defined TORRENT_VERBOSE_LOGGING
|
|
peer_log("DID NOT UNCHOKE [ graceful pause mode ]");
|
|
#endif
|
|
return;
|
|
}
|
|
|
|
if (is_choked())
|
|
{
|
|
if (ignore_unchoke_slots())
|
|
{
|
|
#ifdef TORRENT_VERBOSE_LOGGING
|
|
peer_log("ABOUT TO UNCHOKE [ peer ignores unchoke slots ]");
|
|
#endif
|
|
// if this peer is expempted from the choker
|
|
// just unchoke it immediately
|
|
send_unchoke();
|
|
}
|
|
<div style="background: #ffff00" width="100%"> else if (m_ses.num_uploads() < m_settings.get_int(settings_pack::unchoke_slots_limit)
|
|
</div> || m_settings.get_int(settings_pack::unchoke_slots_limit) < 0)
|
|
{
|
|
// if the peer is choked and we have upload slots left,
|
|
// then unchoke it. Another condition that has to be met
|
|
// is that the torrent doesn't keep track of the individual
|
|
// up/down ratio for each peer (ratio == 0) or (if it does
|
|
// keep track) this particular connection isn't a leecher.
|
|
// If the peer was choked because it was leeching, don't
|
|
// unchoke it again.
|
|
// The exception to this last condition is if we're a seed.
|
|
// In that case we don't care if people are leeching, they
|
|
// can't pay for their downloads anyway.
|
|
m_ses.unchoke_peer(*this);
|
|
}
|
|
#if defined TORRENT_VERBOSE_LOGGING
|
|
else
|
|
{
|
|
peer_log("DID NOT UNCHOKE [ the number of uploads (%d) "
|
|
"is more than or equal to the limit (%d) ]"
|
|
, m_ses.num_uploads(), m_settings.get_int(settings_pack::unchoke_slots_limit));
|
|
}
|
|
#endif
|
|
}
|
|
else
|
|
{
|
|
// the reason to send an extra unchoke message here is that
|
|
// because of the handshake-round-trip optimization, we may
|
|
// end up sending an unchoke before the other end sends us
|
|
// an interested message. This may confuse clients, not reacting
|
|
// to the first unchoke, and then not check whether it's unchoked
|
|
</pre></td></tr><tr style="background: #fcc"><td>relevance 3</td><td><a href="javascript:expand(5)">../src/peer_connection.cpp:3059</a></td><td>since we throw away the queue entry once we issue the disk job, this may happen. Instead, we should keep the queue entry around, mark it as having been requested from disk and once the disk job comes back, discard it if it has been cancelled. Maybe even be able to cancel disk jobs?</td></tr><tr id="5" style="display: none;" colspan="3"><td colspan="3"><h2>since we throw away the queue entry once we issue
|
|
the disk job, this may happen. Instead, we should keep the
|
|
queue entry around, mark it as having been requested from
|
|
disk and once the disk job comes back, discard it if it has
|
|
been cancelled. Maybe even be able to cancel disk jobs?</h2><h4>../src/peer_connection.cpp:3059</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;">
|
|
std::vector<peer_request>::iterator i
|
|
= std::find(m_requests.begin(), m_requests.end(), r);
|
|
|
|
if (i != m_requests.end())
|
|
{
|
|
m_counters.inc_stats_counter(counters::cancelled_piece_requests);
|
|
m_requests.erase(i);
|
|
|
|
if (m_requests.empty())
|
|
m_counters.inc_stats_counter(counters::num_peers_up_requests, -1);
|
|
|
|
#ifdef TORRENT_VERBOSE_LOGGING
|
|
peer_log("==> REJECT_PIECE [ piece: %d s: %x l: %x ] cancelled"
|
|
, r.piece , r.start , r.length);
|
|
#endif
|
|
write_reject_request(r);
|
|
}
|
|
else
|
|
{
|
|
<div style="background: #ffff00" width="100%">#if defined TORRENT_VERBOSE_LOGGING || defined TORRENT_ERROR_LOGGING
|
|
</div> peer_log("*** GOT CANCEL NOT IN THE QUEUE");
|
|
#endif
|
|
}
|
|
}
|
|
|
|
// -----------------------------
|
|
// --------- DHT PORT ----------
|
|
// -----------------------------
|
|
|
|
void peer_connection::incoming_dht_port(int listen_port)
|
|
{
|
|
INVARIANT_CHECK;
|
|
|
|
#ifdef TORRENT_VERBOSE_LOGGING
|
|
peer_log("<== DHT_PORT [ p: %d ]", listen_port);
|
|
#endif
|
|
#ifndef TORRENT_DISABLE_DHT
|
|
m_ses.add_dht_node(udp::endpoint(
|
|
m_remote.address(), listen_port));
|
|
#endif
|
|
}
|
|
|
|
// -----------------------------
|
|
// --------- HAVE ALL ----------
|
|
// -----------------------------
|
|
|
|
void peer_connection::incoming_have_all()
|
|
{
|
|
INVARIANT_CHECK;
|
|
|
|
</pre></td></tr><tr style="background: #fcc"><td>relevance 3</td><td><a href="javascript:expand(6)">../src/peer_connection.cpp:4756</a></td><td>instead of using settings_pack::request_timeout, use m_rtt.mean() + m_rtt.avg_deviation() * 2 or something like that. the configuration option could hopefully be removed</td></tr><tr id="6" style="display: none;" colspan="3"><td colspan="3"><h2>instead of using settings_pack::request_timeout, use
|
|
m_rtt.mean() + m_rtt.avg_deviation() * 2 or something like that.
|
|
the configuration option could hopefully be removed</h2><h4>../src/peer_connection.cpp:4756</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> // don't bother disconnect peers we haven't been interested
|
|
// in (and that hasn't been interested in us) for a while
|
|
// unless we have used up all our connection slots
|
|
if (may_timeout
|
|
&& !m_interesting
|
|
&& !m_peer_interested
|
|
&& d1 > time_limit
|
|
&& d2 > time_limit
|
|
&& (m_ses.num_connections() >= m_settings.get_int(settings_pack::connections_limit)
|
|
|| (t && t->num_peers() >= t->max_connections()))
|
|
&& can_disconnect(error_code(errors::timed_out_no_interest, get_libtorrent_category())))
|
|
{
|
|
#if defined TORRENT_VERBOSE_LOGGING || defined TORRENT_ERROR_LOGGING
|
|
peer_log("*** MUTUAL NO INTEREST [ t1: %d t2: %d ]"
|
|
, total_seconds(d1), total_seconds(d2));
|
|
#endif
|
|
disconnect(errors::timed_out_no_interest, op_bittorrent);
|
|
return;
|
|
}
|
|
|
|
<div style="background: #ffff00" width="100%"> if (may_timeout
|
|
</div> && !m_download_queue.empty()
|
|
&& m_quota[download_channel] > 0
|
|
&& now > m_requested + seconds(m_settings.get_int(settings_pack::request_timeout)
|
|
+ m_timeout_extend))
|
|
{
|
|
snub_peer();
|
|
}
|
|
|
|
// if we haven't sent something in too long, send a keep-alive
|
|
keep_alive();
|
|
|
|
m_statistics.second_tick(tick_interval_ms);
|
|
|
|
if (m_statistics.upload_payload_rate() > m_upload_rate_peak)
|
|
{
|
|
m_upload_rate_peak = m_statistics.upload_payload_rate();
|
|
}
|
|
if (m_statistics.download_payload_rate() > m_download_rate_peak)
|
|
{
|
|
m_download_rate_peak = m_statistics.download_payload_rate();
|
|
#ifndef TORRENT_DISABLE_GEO_IP
|
|
if (peer_info_struct())
|
|
{
|
|
std::pair<const int, int>* as_stats = peer_info_struct()->inet_as;
|
|
if (as_stats && as_stats->second < m_download_rate_peak)
|
|
as_stats->second = m_download_rate_peak;
|
|
}
|
|
#endif
|
|
}
|
|
if (is_disconnecting()) return;
|
|
</pre></td></tr><tr style="background: #fcc"><td>relevance 3</td><td><a href="javascript:expand(7)">../src/piece_picker.cpp:3166</a></td><td>it would be nice if this could be folded into lock_piece() the main distinction is that this also maintains the m_num_passed counter and the passed_hash_check member</td></tr><tr id="7" style="display: none;" colspan="3"><td colspan="3"><h2>it would be nice if this could be folded into lock_piece()
|
|
the main distinction is that this also maintains the m_num_passed
|
|
counter and the passed_hash_check member</h2><h4>../src/piece_picker.cpp:3166</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> int state = m_piece_map[piece].state;
|
|
if (state == piece_pos::piece_open) return;
|
|
std::vector<downloading_piece>::iterator i = find_dl_piece(state - 1, piece);
|
|
if (i == m_downloads[state - 1].end()) return;
|
|
|
|
TORRENT_ASSERT(i->passed_hash_check == false);
|
|
if (i->passed_hash_check)
|
|
{
|
|
// it's not clear why this would happen,
|
|
// but it seems reasonable to not break the
|
|
// accounting over it.
|
|
i->passed_hash_check = false;
|
|
TORRENT_ASSERT(m_num_passed > 0);
|
|
--m_num_passed;
|
|
}
|
|
|
|
// prevent this piece from being picked until it's restored
|
|
i->locked = true;
|
|
}
|
|
|
|
<div style="background: #ffff00" width="100%"> void piece_picker::write_failed(piece_block block)
|
|
</div> {
|
|
TORRENT_PIECE_PICKER_INVARIANT_CHECK;
|
|
|
|
#if TORRENT_USE_INVARIANT_CHECKS
|
|
check_piece_state();
|
|
#endif
|
|
|
|
#ifdef TORRENT_PICKER_LOG
|
|
std::cerr << "[" << this << "] " << "write_failed( {" << block.piece_index << ", " << block.block_index << "} )" << std::endl;
|
|
#endif
|
|
|
|
int state = m_piece_map[block.piece_index].state;
|
|
if (state == piece_pos::piece_open) return;
|
|
std::vector<downloading_piece>::iterator i = find_dl_piece(state - 1, block.piece_index);
|
|
if (i == m_downloads[state - 1].end()) return;
|
|
|
|
block_info& info = i->info[block.block_index];
|
|
TORRENT_ASSERT(&info >= &m_block_info[0]);
|
|
TORRENT_ASSERT(&info < &m_block_info[0] + m_block_info.size());
|
|
TORRENT_ASSERT(info.piece_index == block.piece_index);
|
|
TORRENT_ASSERT(info.state == block_info::state_writing);
|
|
TORRENT_ASSERT(info.num_peers == 0);
|
|
|
|
TORRENT_ASSERT(i->writing > 0);
|
|
TORRENT_ASSERT(info.state == block_info::state_writing);
|
|
|
|
if (info.state == block_info::state_finished) return;
|
|
if (info.state == block_info::state_writing) --i->writing;
|
|
|
|
info.peer = 0;
|
|
</pre></td></tr><tr style="background: #fcc"><td>relevance 3</td><td><a href="javascript:expand(8)">../src/resolver.cpp:39</a></td><td>the first places to use this resolver is the http_connection/http_tracker_connection and udp_tracker_connection. make sure to prefer cache on shutdown</td></tr><tr id="8" style="display: none;" colspan="3"><td colspan="3"><h2>the first places to use this resolver is the
|
|
http_connection/http_tracker_connection and udp_tracker_connection.
|
|
make sure to prefer cache on shutdown</h2><h4>../src/resolver.cpp:39</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;">THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
|
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
|
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
|
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
|
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
|
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
|
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
|
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
|
POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*/
|
|
|
|
#include "libtorrent/resolver.hpp"
|
|
#include <boost/bind.hpp>
|
|
#include "libtorrent/debug.hpp"
|
|
|
|
namespace libtorrent
|
|
{
|
|
<div style="background: #ffff00" width="100%"> resolver::resolver(io_service& ios)
|
|
</div> : m_ios(ios)
|
|
, m_resolver(ios)
|
|
, m_max_size(700)
|
|
, m_timeout(seconds(1200))
|
|
{}
|
|
|
|
void resolver::on_lookup(error_code const& ec, tcp::resolver::iterator i
|
|
, resolver_interface::callback_t h, std::string hostname)
|
|
{
|
|
#if defined TORRENT_ASIO_DEBUGGING
|
|
complete_async("resolver::on_lookup");
|
|
#endif
|
|
if (ec)
|
|
{
|
|
std::vector<address> empty;
|
|
h(ec, empty);
|
|
return;
|
|
}
|
|
|
|
dns_cache_entry& ce = m_cache[hostname];
|
|
ptime now = time_now();
|
|
ce.last_seen = now;
|
|
ce.addresses.clear();
|
|
while (i != tcp::resolver::iterator())
|
|
{
|
|
ce.addresses.push_back(i->endpoint().address());
|
|
++i;
|
|
}
|
|
|
|
h(ec, ce.addresses);
|
|
</pre></td></tr><tr style="background: #fcc"><td>relevance 3</td><td><a href="javascript:expand(9)">../src/session_impl.cpp:5697</a></td><td>it would be really nice to update these counters as they are incremented. This depends on the session being ticked, which has a fairly coarse grained resolution</td></tr><tr id="9" style="display: none;" colspan="3"><td colspan="3"><h2>it would be really nice to update these counters
|
|
as they are incremented. This depends on the session
|
|
being ticked, which has a fairly coarse grained resolution</h2><h4>../src/session_impl.cpp:5697</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> t->status(&alert->status.back(), ~torrent_handle::query_accurate_download_counters);
|
|
t->clear_in_state_update();
|
|
}
|
|
state_updates.clear();
|
|
|
|
#if TORRENT_USE_ASSERTS
|
|
m_posting_torrent_updates = false;
|
|
#endif
|
|
|
|
m_alerts.post_alert_ptr(alert.release());
|
|
}
|
|
|
|
void session_impl::post_session_stats()
|
|
{
|
|
std::auto_ptr<session_stats_alert> alert(new session_stats_alert());
|
|
std::vector<boost::uint64_t>& values = alert->values;
|
|
values.resize(counters::num_counters, 0);
|
|
|
|
m_disk_thread.update_stats_counters(m_stats_counters);
|
|
|
|
<div style="background: #ffff00" width="100%"> m_stats_counters.set_value(counters::sent_bytes
|
|
</div> , m_stat.total_upload());
|
|
m_stats_counters.set_value(counters::sent_payload_bytes
|
|
, m_stat.total_transfer(stat::upload_payload));
|
|
m_stats_counters.set_value(counters::sent_ip_overhead_bytes
|
|
, m_stat.total_transfer(stat::upload_ip_protocol));
|
|
m_stats_counters.set_value(counters::sent_tracker_bytes
|
|
, m_stat.total_transfer(stat::upload_tracker_protocol));
|
|
|
|
m_stats_counters.set_value(counters::recv_bytes
|
|
, m_stat.total_download());
|
|
m_stats_counters.set_value(counters::recv_payload_bytes
|
|
, m_stat.total_transfer(stat::download_payload));
|
|
m_stats_counters.set_value(counters::recv_ip_overhead_bytes
|
|
, m_stat.total_transfer(stat::download_ip_protocol));
|
|
m_stats_counters.set_value(counters::recv_tracker_bytes
|
|
, m_stat.total_transfer(stat::download_tracker_protocol));
|
|
|
|
m_stats_counters.set_value(counters::limiter_up_queue
|
|
, m_upload_rate.queue_size());
|
|
m_stats_counters.set_value(counters::limiter_down_queue
|
|
, m_download_rate.queue_size());
|
|
|
|
m_stats_counters.set_value(counters::limiter_up_bytes
|
|
, m_upload_rate.queued_bytes());
|
|
m_stats_counters.set_value(counters::limiter_down_bytes
|
|
, m_download_rate.queued_bytes());
|
|
|
|
for (int i = 0; i < counters::num_counters; ++i)
|
|
values[i] = m_stats_counters[i];
|
|
|
|
</pre></td></tr><tr style="background: #fcc"><td>relevance 3</td><td><a href="javascript:expand(10)">../src/session_impl.cpp:7156</a></td><td>If socket jobs could be higher level, to include RC4 encryption and decryption, we would offload the main thread even more</td></tr><tr id="10" style="display: none;" colspan="3"><td colspan="3"><h2>If socket jobs could be higher level, to include RC4 encryption and decryption,
|
|
we would offload the main thread even more</h2><h4>../src/session_impl.cpp:7156</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> {
|
|
int num_threads = m_settings.get_int(settings_pack::network_threads);
|
|
int num_pools = num_threads > 0 ? num_threads : 1;
|
|
while (num_pools > m_net_thread_pool.size())
|
|
{
|
|
m_net_thread_pool.push_back(boost::make_shared<network_thread_pool>());
|
|
m_net_thread_pool.back()->set_num_threads(1);
|
|
}
|
|
|
|
while (num_pools < m_net_thread_pool.size())
|
|
{
|
|
m_net_thread_pool.erase(m_net_thread_pool.end() - 1);
|
|
}
|
|
|
|
if (num_threads == 0 && m_net_thread_pool.size() > 0)
|
|
{
|
|
m_net_thread_pool[0]->set_num_threads(0);
|
|
}
|
|
}
|
|
|
|
<div style="background: #ffff00" width="100%"> void session_impl::post_socket_job(socket_job& j)
|
|
</div> {
|
|
uintptr_t idx = 0;
|
|
if (m_net_thread_pool.size() > 1)
|
|
{
|
|
// each peer needs to be pinned to a specific thread
|
|
// since reading and writing simultaneously on the same
|
|
// socket from different threads is not supported by asio.
|
|
// as long as a specific socket is consistently used from
|
|
// the same thread, it's safe
|
|
idx = uintptr_t(j.peer.get());
|
|
idx ^= idx >> 8;
|
|
idx %= m_net_thread_pool.size();
|
|
}
|
|
m_net_thread_pool[idx]->post_job(j);
|
|
}
|
|
|
|
void session_impl::update_cache_buffer_chunk_size()
|
|
{
|
|
if (m_settings.get_int(settings_pack::cache_buffer_chunk_size) <= 0)
|
|
m_settings.set_int(settings_pack::cache_buffer_chunk_size, 1);
|
|
}
|
|
|
|
void session_impl::update_report_web_seed_downloads()
|
|
{
|
|
// if this flag changed, update all web seed connections
|
|
bool report = m_settings.get_bool(settings_pack::report_web_seed_downloads);
|
|
for (connection_map::iterator i = m_connections.begin()
|
|
, end(m_connections.end()); i != end; ++i)
|
|
{
|
|
int type = (*i)->type();
|
|
</pre></td></tr><tr style="background: #fcc"><td>relevance 3</td><td><a href="javascript:expand(11)">../src/torrent.cpp:1083</a></td><td>if any other peer has a busy request to this block, we need to cancel it too</td></tr><tr id="11" style="display: none;" colspan="3"><td colspan="3"><h2>if any other peer has a busy request to this block, we need to cancel it too</h2><h4>../src/torrent.cpp:1083</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;">#endif
|
|
|
|
TORRENT_ASSERT(j->piece >= 0);
|
|
|
|
if (j->action == disk_io_job::write)
|
|
{
|
|
piece_block block_finished(j->piece, j->d.io.offset / block_size());
|
|
|
|
// we failed to write j->piece to disk tell the piece picker
|
|
if (j->piece >= 0)
|
|
{
|
|
// this will block any other peer from issuing requests
|
|
// to this piece, until we've cleared it.
|
|
if (j->error.ec == asio::error::operation_aborted)
|
|
{
|
|
if (has_picker())
|
|
picker().mark_as_canceled(block_finished, NULL);
|
|
}
|
|
else
|
|
{
|
|
<div style="background: #ffff00" width="100%"> if (has_picker())
|
|
</div> picker().write_failed(block_finished);
|
|
|
|
if (m_storage)
|
|
{
|
|
// when this returns, all outstanding jobs to the
|
|
// piece are done, and we can restore it, allowing
|
|
// new requests to it
|
|
m_ses.disk_thread().async_clear_piece(m_storage.get(), j->piece
|
|
, boost::bind(&torrent::on_piece_fail_sync, shared_from_this(), _1, block_finished));
|
|
}
|
|
else
|
|
{
|
|
// is m_abort true? if so, we should probably just
|
|
// exit this function early, no need to keep the picker
|
|
// state up-to-date, right?
|
|
disk_io_job sj;
|
|
sj.piece = j->piece;
|
|
on_piece_fail_sync(&sj, block_finished);
|
|
}
|
|
}
|
|
update_gauge();
|
|
}
|
|
}
|
|
|
|
if (j->error.ec == error_code(boost::system::errc::not_enough_memory, generic_category()))
|
|
{
|
|
if (alerts().should_post<file_error_alert>())
|
|
alerts().post_alert(file_error_alert(j->error.ec
|
|
, resolve_filename(j->error.file), j->error.operation_str(), get_handle()));
|
|
if (c) c->disconnect(errors::no_memory, peer_connection_interface::op_file);
|
|
</pre></td></tr><tr style="background: #fcc"><td>relevance 3</td><td><a href="javascript:expand(12)">../src/torrent.cpp:7594</a></td><td>if peer is a really good peer, maybe we shouldn't disconnect it</td></tr><tr id="12" style="display: none;" colspan="3"><td colspan="3"><h2>if peer is a really good peer, maybe we shouldn't disconnect it</h2><h4>../src/torrent.cpp:7594</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;">#if defined TORRENT_LOGGING || defined TORRENT_ERROR_LOGGING
|
|
debug_log("incoming peer (%d)", int(m_connections.size()));
|
|
#endif
|
|
|
|
#ifdef TORRENT_DEBUG
|
|
error_code ec;
|
|
TORRENT_ASSERT(p->remote() == p->get_socket()->remote_endpoint(ec) || ec);
|
|
#endif
|
|
|
|
TORRENT_ASSERT(p->peer_info_struct() != NULL);
|
|
|
|
// we need to do this after we've added the peer to the policy
|
|
// since that's when the peer is assigned its peer_info object,
|
|
// which holds the rank
|
|
if (maybe_replace_peer)
|
|
{
|
|
// now, find the lowest rank peer and disconnect that
|
|
// if it's lower rank than the incoming connection
|
|
peer_connection* peer = find_lowest_ranking_peer();
|
|
|
|
<div style="background: #ffff00" width="100%"> if (peer && peer->peer_rank() < p->peer_rank())
|
|
</div> {
|
|
peer->disconnect(errors::too_many_connections, peer_connection_interface::op_bittorrent);
|
|
p->peer_disconnected_other();
|
|
}
|
|
else
|
|
{
|
|
p->disconnect(errors::too_many_connections, peer_connection_interface::op_bittorrent);
|
|
// we have to do this here because from the peer's point of
|
|
// it wasn't really attached to the torrent, but we do need
|
|
// to let policy know we're removing it
|
|
remove_peer(p);
|
|
return false;
|
|
}
|
|
}
|
|
|
|
#if TORRENT_USE_INVARIANT_CHECKS
|
|
if (m_policy) m_policy->check_invariant();
|
|
#endif
|
|
|
|
if (m_share_mode)
|
|
recalc_share_mode();
|
|
|
|
return true;
|
|
}
|
|
|
|
bool torrent::want_tick() const
|
|
{
|
|
if (m_abort) return false;
|
|
|
|
if (!m_connections.empty()) return true;
|
|
</pre></td></tr><tr style="background: #fcc"><td>relevance 3</td><td><a href="javascript:expand(13)">../src/web_peer_connection.cpp:586</a></td><td>just make this peer not have the pieces associated with the file we just requested. Only when it doesn't have any of the file do the following</td></tr><tr id="13" style="display: none;" colspan="3"><td colspan="3"><h2>just make this peer not have the pieces
|
|
associated with the file we just requested. Only
|
|
when it doesn't have any of the file do the following</h2><h4>../src/web_peer_connection.cpp:586</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> {
|
|
++m_num_responses;
|
|
|
|
if (m_parser.connection_close())
|
|
{
|
|
incoming_choke();
|
|
if (m_num_responses == 1)
|
|
m_web->supports_keepalive = false;
|
|
}
|
|
|
|
#ifdef TORRENT_VERBOSE_LOGGING
|
|
peer_log("*** STATUS: %d %s", m_parser.status_code(), m_parser.message().c_str());
|
|
std::multimap<std::string, std::string> const& headers = m_parser.headers();
|
|
for (std::multimap<std::string, std::string>::const_iterator i = headers.begin()
|
|
, end(headers.end()); i != end; ++i)
|
|
peer_log(" %s: %s", i->first.c_str(), i->second.c_str());
|
|
#endif
|
|
// if the status code is not one of the accepted ones, abort
|
|
if (!is_ok_status(m_parser.status_code()))
|
|
{
|
|
<div style="background: #ffff00" width="100%"> int retry_time = atoi(m_parser.header("retry-after").c_str());
|
|
</div> if (retry_time <= 0) retry_time = m_settings.get_int(settings_pack::urlseed_wait_retry);
|
|
// temporarily unavailable, retry later
|
|
t->retry_web_seed(this, retry_time);
|
|
std::string error_msg = to_string(m_parser.status_code()).elems
|
|
+ (" " + m_parser.message());
|
|
if (t->alerts().should_post<url_seed_alert>())
|
|
{
|
|
t->alerts().post_alert(url_seed_alert(t->get_handle(), m_url
|
|
, error_msg));
|
|
}
|
|
received_bytes(0, bytes_transferred);
|
|
disconnect(error_code(m_parser.status_code(), get_http_category()), op_bittorrent, 1);
|
|
#ifdef TORRENT_DEBUG
|
|
TORRENT_ASSERT(statistics().last_payload_downloaded()
|
|
+ statistics().last_protocol_downloaded()
|
|
== dl_target);
|
|
#endif
|
|
return;
|
|
}
|
|
if (is_redirect(m_parser.status_code()))
|
|
{
|
|
// this means we got a redirection request
|
|
// look for the location header
|
|
std::string location = m_parser.header("location");
|
|
received_bytes(0, bytes_transferred);
|
|
|
|
if (location.empty())
|
|
{
|
|
// we should not try this server again.
|
|
t->remove_web_seed(this, errors::missing_location, op_bittorrent, 2);
|
|
</pre></td></tr><tr style="background: #fcc"><td>relevance 3</td><td><a href="javascript:expand(14)">../include/libtorrent/block_cache.hpp:212</a></td><td>could this be a scoped_array instead? does cached_piece_entry really need to be copyable? cached_piece_entry does need to be copyable since it's part of a container, but it's possible it could be a raw pointer or boost::unique_ptr perhaps</td></tr><tr id="14" style="display: none;" colspan="3"><td colspan="3"><h2>could this be a scoped_array instead? does cached_piece_entry really need to be copyable?
|
|
cached_piece_entry does need to be copyable since it's part of a container, but it's possible
|
|
it could be a raw pointer or boost::unique_ptr perhaps</h2><h4>../include/libtorrent/block_cache.hpp:212</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> // on this piece to complete. These are executed at that point.
|
|
tailqueue read_jobs;
|
|
|
|
int get_piece() const { return piece; }
|
|
void* get_storage() const { return storage.get(); }
|
|
|
|
bool operator==(cached_piece_entry const& rhs) const
|
|
{ return storage.get() == rhs.storage.get() && piece == rhs.piece; }
|
|
|
|
// if this is set, we'll be calculating the hash
|
|
// for this piece. This member stores the interim
|
|
// state while we're calulcating the hash.
|
|
partial_hash* hash;
|
|
|
|
// set to a unique identifier of a peer that last
|
|
// requested from this piece.
|
|
void* last_requester;
|
|
|
|
// the pointers to the block data. If this is a ghost
|
|
// cache entry, there won't be any data here
|
|
<div style="background: #ffff00" width="100%"> boost::shared_array<cached_block_entry> blocks;
|
|
</div>
|
|
// the last time a block was written to this piece
|
|
// plus the minimum amount of time the block is guaranteed
|
|
// to stay in the cache
|
|
</pre></td></tr><tr style="background: #fcc"><td>relevance 3</td><td><a href="javascript:expand(15)">../include/libtorrent/disk_io_thread.hpp:537</a></td><td>turn these counters and gauges into session_stats counters (which also would need to be thread safe)</td></tr><tr id="15" style="display: none;" colspan="3"><td colspan="3"><h2>turn these counters and gauges into session_stats
|
|
counters (which also would need to be thread safe)</h2><h4>../include/libtorrent/disk_io_thread.hpp:537</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> void* m_userdata;
|
|
|
|
// the last time we expired write blocks from the cache
|
|
ptime m_last_cache_expiry;
|
|
|
|
ptime m_last_file_check;
|
|
|
|
// LRU cache of open files
|
|
file_pool m_file_pool;
|
|
|
|
// disk cache
|
|
mutable mutex m_cache_mutex;
|
|
block_cache m_disk_cache;
|
|
|
|
// total number of blocks in use by both the read
|
|
// and the write cache. This is not supposed to
|
|
// exceed m_cache_size
|
|
|
|
counters& m_stats_counters;
|
|
|
|
<div style="background: #ffff00" width="100%"> cache_status m_cache_stats;
|
|
</div>
|
|
// average read time for cache misses (in microseconds)
|
|
average_accumulator m_read_time;
|
|
|
|
// average write time (in microseconds)
|
|
average_accumulator m_write_time;
|
|
|
|
// average hash time (in microseconds)
|
|
average_accumulator m_hash_time;
|
|
|
|
// average time to serve a job (any job) in microseconds
|
|
average_accumulator m_job_time;
|
|
|
|
// the total number of outstanding jobs. This is used to
|
|
// limit the number of jobs issued in parallel. It also creates
|
|
// an opportunity to sort the jobs by physical offset before
|
|
// issued to the AIO subsystem
|
|
boost::atomic<int> m_outstanding_jobs;
|
|
|
|
// this is the main thread io_service. Callbacks are
|
|
// posted on this in order to have them execute in
|
|
// the main thread.
|
|
io_service& m_ios;
|
|
|
|
// the number of jobs that have been blocked by a fence. These
|
|
// jobs are queued up in their respective storage, waiting for
|
|
// the fence to be lowered. This counter is just used to know
|
|
// when it's OK to exit the main loop of the disk thread
|
|
boost::atomic<int> m_num_blocked_jobs;
|
|
|
|
</pre></td></tr><tr style="background: #fcc"><td>relevance 3</td><td><a href="javascript:expand(16)">../include/libtorrent/policy.hpp:104</a></td><td>this class should be renamed peer_list</td></tr><tr id="16" style="display: none;" colspan="3"><td colspan="3"><h2>this class should be renamed peer_list</h2><h4>../include/libtorrent/policy.hpp:104</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> int min_reconnect_time;
|
|
|
|
// the number of iterations over the peer list for this operation
|
|
int loop_counter;
|
|
|
|
// these are used only by find_connect_candidates in order
|
|
// to implement peer ranking. See:
|
|
// http://blog.libtorrent.org/2012/12/swarm-connectivity/
|
|
external_ip const* ip;
|
|
int port;
|
|
|
|
// this must be set to a torrent_peer allocator
|
|
torrent_peer_allocator_interface* peer_allocator;
|
|
|
|
// if any peer were removed during this call, they are returned in
|
|
// this vector. The caller would want to make sure there are no
|
|
// references to these torrent_peers anywhere
|
|
std::vector<torrent_peer*> erased;
|
|
};
|
|
|
|
<div style="background: #ffff00" width="100%"> class TORRENT_EXTRA_EXPORT policy : single_threaded
|
|
</div> {
|
|
public:
|
|
|
|
policy();
|
|
|
|
#if TORRENT_USE_I2P
|
|
torrent_peer* add_i2p_peer(char const* destination, int src, char flags
|
|
, torrent_state* state);
|
|
#endif
|
|
|
|
enum
|
|
{
|
|
// these flags match the flags passed in ut_pex
|
|
// messages
|
|
flag_encryption = 0x1,
|
|
flag_seed = 0x2,
|
|
flag_utp = 0x4,
|
|
flag_holepunch = 0x8,
|
|
};
|
|
|
|
// this is called once for every torrent_peer we get from
|
|
// the tracker, pex, lsd or dht.
|
|
torrent_peer* add_peer(const tcp::endpoint& remote
|
|
, int source, char flags, torrent_state* state);
|
|
|
|
// false means duplicate connection
|
|
bool update_peer_port(int port, torrent_peer* p, int src, torrent_state* state);
|
|
|
|
// called when an incoming connection is accepted
|
|
// false means the connection was refused or failed
|
|
</pre></td></tr><tr style="background: #fcc"><td>relevance 3</td><td><a href="javascript:expand(17)">../include/libtorrent/session.hpp:210</a></td><td>could the fingerprint be a setting as well? And should the settings_pack be optional?</td></tr><tr id="17" style="display: none;" colspan="3"><td colspan="3"><h2>could the fingerprint be a setting as well? And should the
|
|
settings_pack be optional?</h2><h4>../include/libtorrent/session.hpp:210</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> //
|
|
// see apply_settings().
|
|
class TORRENT_EXPORT session: public boost::noncopyable
|
|
{
|
|
public:
|
|
|
|
// If the fingerprint in the first overload is omited, the client will
|
|
// get a default fingerprint stating the version of libtorrent. The
|
|
// fingerprint is a short string that will be used in the peer-id to
|
|
// identify the client and the client's version. For more details see the
|
|
// fingerprint class.
|
|
//
|
|
// The flags paramater can be used to start default features (upnp &
|
|
// nat-pmp) and default plugins (ut_metadata, ut_pex and smart_ban). The
|
|
// default is to start those features. If you do not want them to start,
|
|
// pass 0 as the flags parameter.
|
|
//
|
|
// The ``alert_mask`` is the same mask that you would send to
|
|
// set_alert_mask().
|
|
|
|
<div style="background: #ffff00" width="100%"> session(settings_pack const& pack
|
|
</div> , fingerprint const& print = fingerprint("LT"
|
|
, LIBTORRENT_VERSION_MAJOR, LIBTORRENT_VERSION_MINOR, 0, 0)
|
|
, int flags = start_default_features | add_default_plugins)
|
|
{
|
|
TORRENT_CFG();
|
|
init(print);
|
|
start(flags, pack);
|
|
}
|
|
session(fingerprint const& print = fingerprint("LT"
|
|
, LIBTORRENT_VERSION_MAJOR, LIBTORRENT_VERSION_MINOR, 0, 0)
|
|
, int flags = start_default_features | add_default_plugins
|
|
, boost::uint32_t alert_mask = alert::error_notification
|
|
TORRENT_LOGPATH_ARG_DEFAULT)
|
|
{
|
|
TORRENT_CFG();
|
|
settings_pack pack;
|
|
pack.set_int(settings_pack::alert_mask, alert_mask);
|
|
if ((flags & start_default_features) == 0)
|
|
{
|
|
pack.set_bool(settings_pack::enable_upnp, false);
|
|
pack.set_bool(settings_pack::enable_natpmp, false);
|
|
pack.set_bool(settings_pack::enable_lsd, false);
|
|
pack.set_bool(settings_pack::enable_dht, false);
|
|
}
|
|
|
|
init(print);
|
|
#if defined TORRENT_VERBOSE_LOGGING || defined TORRENT_LOGGING || defined TORRENT_ERROR_LOGGING
|
|
set_log_path(logpath);
|
|
#endif
|
|
start(flags, pack);
|
|
</pre></td></tr><tr style="background: #cfc"><td>relevance 2</td><td><a href="javascript:expand(18)">../src/disk_io_thread.cpp:844</a></td><td>should this be allocated on the stack?</td></tr><tr id="18" style="display: none;" colspan="3"><td colspan="3"><h2>should this be allocated on the stack?</h2><h4>../src/disk_io_thread.cpp:844</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> // if we're also flushing the read cache, this piece
|
|
// should be removed as soon as all write jobs finishes
|
|
// otherwise it will turn into a read piece
|
|
}
|
|
|
|
// mark_for_deletion may erase the piece from the cache, that's
|
|
// why we don't have the 'i' iterator referencing it at this point
|
|
if (flags & (flush_read_cache | flush_delete_cache))
|
|
{
|
|
fail_jobs_impl(storage_error(boost::asio::error::operation_aborted), pe->jobs, completed_jobs);
|
|
m_disk_cache.mark_for_deletion(pe);
|
|
}
|
|
}
|
|
|
|
void disk_io_thread::flush_cache(piece_manager* storage, boost::uint32_t flags
|
|
, tailqueue& completed_jobs, mutex::scoped_lock& l)
|
|
{
|
|
if (storage)
|
|
{
|
|
boost::unordered_set<cached_piece_entry*> const& pieces = storage->cached_pieces();
|
|
<div style="background: #ffff00" width="100%"> std::vector<int> piece_index;
|
|
</div> piece_index.reserve(pieces.size());
|
|
for (boost::unordered_set<cached_piece_entry*>::const_iterator i = pieces.begin()
|
|
, end(pieces.end()); i != end; ++i)
|
|
{
|
|
piece_index.push_back((*i)->piece);
|
|
}
|
|
|
|
for (std::vector<int>::iterator i = piece_index.begin()
|
|
, end(piece_index.end()); i != end; ++i)
|
|
{
|
|
cached_piece_entry* pe = m_disk_cache.find_piece(storage, *i);
|
|
if (pe == NULL) continue;
|
|
TORRENT_PIECE_ASSERT(pe->storage.get() == storage, pe);
|
|
flush_piece(pe, flags, completed_jobs, l);
|
|
}
|
|
#if TORRENT_USE_ASSERTS
|
|
TORRENT_ASSERT(l.locked());
|
|
// if the user asked to delete the cache for this storage
|
|
// we really should not have any pieces left. This is only called
|
|
// from disk_io_thread::do_delete, which is a fence job and should
|
|
// have any other jobs active, i.e. there should not be any references
|
|
// keeping pieces or blocks alive
|
|
if ((flags & flush_delete_cache) && (flags & flush_expect_clear))
|
|
{
|
|
boost::unordered_set<cached_piece_entry*> const& storage_pieces = storage->cached_pieces();
|
|
for (boost::unordered_set<cached_piece_entry*>::const_iterator i = storage_pieces.begin()
|
|
, end(storage_pieces.end()); i != end; ++i)
|
|
{
|
|
cached_piece_entry* pe = m_disk_cache.find_piece(storage, (*i)->piece);
|
|
TORRENT_PIECE_ASSERT(pe->num_dirty == 0, pe);
|
|
</pre></td></tr><tr style="background: #cfc"><td>relevance 2</td><td><a href="javascript:expand(19)">../src/disk_io_thread.cpp:885</a></td><td>we're not flushing the read cache at all?</td></tr><tr id="19" style="display: none;" colspan="3"><td colspan="3"><h2>we're not flushing the read cache at all?</h2><h4>../src/disk_io_thread.cpp:885</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> // from disk_io_thread::do_delete, which is a fence job and should
|
|
// have any other jobs active, i.e. there should not be any references
|
|
// keeping pieces or blocks alive
|
|
if ((flags & flush_delete_cache) && (flags & flush_expect_clear))
|
|
{
|
|
boost::unordered_set<cached_piece_entry*> const& storage_pieces = storage->cached_pieces();
|
|
for (boost::unordered_set<cached_piece_entry*>::const_iterator i = storage_pieces.begin()
|
|
, end(storage_pieces.end()); i != end; ++i)
|
|
{
|
|
cached_piece_entry* pe = m_disk_cache.find_piece(storage, (*i)->piece);
|
|
TORRENT_PIECE_ASSERT(pe->num_dirty == 0, pe);
|
|
}
|
|
}
|
|
#endif
|
|
}
|
|
else
|
|
{
|
|
std::pair<block_cache::iterator, block_cache::iterator> range = m_disk_cache.all_pieces();
|
|
while (range.first != range.second)
|
|
{
|
|
<div style="background: #ffff00" width="100%"> while (range.first->num_dirty == 0)
|
|
</div> {
|
|
++range.first;
|
|
if (range.first == range.second) return;
|
|
}
|
|
cached_piece_entry* pe = const_cast<cached_piece_entry*>(&*range.first);
|
|
flush_piece(pe, flags, completed_jobs, l);
|
|
range = m_disk_cache.all_pieces();
|
|
}
|
|
}
|
|
}
|
|
|
|
// this is called if we're exceeding (or about to exceed) the cache
|
|
// size limit. This means we should not restrict ourselves to contiguous
|
|
// blocks of write cache line size, but try to flush all old blocks
|
|
// this is why we pass in 1 as cont_block to the flushing functions
|
|
void disk_io_thread::try_flush_write_blocks(int num, tailqueue& completed_jobs
|
|
, mutex::scoped_lock& l)
|
|
{
|
|
DLOG("try_flush_write_blocks: %d\n", num);
|
|
|
|
list_iterator range = m_disk_cache.write_lru_pieces();
|
|
std::vector<std::pair<piece_manager*, int> > pieces;
|
|
pieces.reserve(m_disk_cache.num_write_lru_pieces());
|
|
|
|
for (list_iterator p = range; p.get() && num > 0; p.next())
|
|
{
|
|
cached_piece_entry* e = (cached_piece_entry*)p.get();
|
|
if (e->num_dirty == 0) continue;
|
|
pieces.push_back(std::make_pair(e->storage.get(), int(e->piece)));
|
|
}
|
|
</pre></td></tr><tr style="background: #cfc"><td>relevance 2</td><td><a href="javascript:expand(20)">../src/file.cpp:1491</a></td><td>use vm_copy here, if available, and if buffers are aligned</td></tr><tr id="20" style="display: none;" colspan="3"><td colspan="3"><h2>use vm_copy here, if available, and if buffers are aligned</h2><h4>../src/file.cpp:1491</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> CloseHandle(native_handle());
|
|
m_path.clear();
|
|
#else
|
|
if (m_file_handle != INVALID_HANDLE_VALUE)
|
|
::close(m_file_handle);
|
|
#endif
|
|
|
|
m_file_handle = INVALID_HANDLE_VALUE;
|
|
|
|
m_open_mode = 0;
|
|
}
|
|
|
|
// defined in storage.cpp
|
|
int bufs_size(file::iovec_t const* bufs, int num_bufs);
|
|
|
|
void gather_copy(file::iovec_t const* bufs, int num_bufs, char* dst)
|
|
{
|
|
int offset = 0;
|
|
for (int i = 0; i < num_bufs; ++i)
|
|
{
|
|
<div style="background: #ffff00" width="100%"> memcpy(dst + offset, bufs[i].iov_base, bufs[i].iov_len);
|
|
</div> offset += bufs[i].iov_len;
|
|
}
|
|
}
|
|
|
|
void scatter_copy(file::iovec_t const* bufs, int num_bufs, char const* src)
|
|
{
|
|
int offset = 0;
|
|
for (int i = 0; i < num_bufs; ++i)
|
|
{
|
|
</pre></td></tr><tr style="background: #cfc"><td>relevance 2</td><td><a href="javascript:expand(21)">../src/file.cpp:1502</a></td><td>use vm_copy here, if available, and if buffers are aligned</td></tr><tr id="21" style="display: none;" colspan="3"><td colspan="3"><h2>use vm_copy here, if available, and if buffers are aligned</h2><h4>../src/file.cpp:1502</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> }
|
|
|
|
// defined in storage.cpp
|
|
int bufs_size(file::iovec_t const* bufs, int num_bufs);
|
|
|
|
void gather_copy(file::iovec_t const* bufs, int num_bufs, char* dst)
|
|
{
|
|
int offset = 0;
|
|
for (int i = 0; i < num_bufs; ++i)
|
|
{
|
|
memcpy(dst + offset, bufs[i].iov_base, bufs[i].iov_len);
|
|
offset += bufs[i].iov_len;
|
|
}
|
|
}
|
|
|
|
void scatter_copy(file::iovec_t const* bufs, int num_bufs, char const* src)
|
|
{
|
|
int offset = 0;
|
|
for (int i = 0; i < num_bufs; ++i)
|
|
{
|
|
<div style="background: #ffff00" width="100%"> memcpy(bufs[i].iov_base, src + offset, bufs[i].iov_len);
|
|
</div> offset += bufs[i].iov_len;
|
|
}
|
|
}
|
|
|
|
bool coalesce_read_buffers(file::iovec_t const*& bufs, int& num_bufs, file::iovec_t* tmp)
|
|
{
|
|
int buf_size = bufs_size(bufs, num_bufs);
|
|
// this is page aligned since it's used in APIs which
|
|
// are likely to require that (depending on OS)
|
|
char* buf = (char*)page_aligned_allocator::malloc(buf_size);
|
|
if (!buf) return false;
|
|
tmp->iov_base = buf;
|
|
tmp->iov_len = buf_size;
|
|
bufs = tmp;
|
|
num_bufs = 1;
|
|
return true;
|
|
}
|
|
|
|
void coalesce_read_buffers_end(file::iovec_t const* bufs, int num_bufs, char* buf, bool copy)
|
|
{
|
|
if (copy) scatter_copy(bufs, num_bufs, buf);
|
|
page_aligned_allocator::free(buf);
|
|
}
|
|
|
|
bool coalesce_write_buffers(file::iovec_t const*& bufs, int& num_bufs, file::iovec_t* tmp)
|
|
{
|
|
// coalesce buffers means allocate a temporary buffer and
|
|
// issue a single write operation instead of using a vector
|
|
// operation
|
|
int buf_size = 0;
|
|
</pre></td></tr><tr style="background: #cfc"><td>relevance 2</td><td><a href="javascript:expand(22)">../src/session_impl.cpp:2568</a></td><td>use bind_to_device in udp_socket</td></tr><tr id="22" style="display: none;" colspan="3"><td colspan="3"><h2>use bind_to_device in udp_socket</h2><h4>../src/session_impl.cpp:2568</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> if (m_listen_sockets.empty() && ec)
|
|
{
|
|
#if defined TORRENT_VERBOSE_LOGGING || defined TORRENT_LOGGING || defined TORRENT_ERROR_LOGGING
|
|
char msg[200];
|
|
snprintf(msg, sizeof(msg), "cannot bind TCP listen socket to interface \"%s\": %s"
|
|
, print_endpoint(m_listen_interface).c_str(), ec.message().c_str());
|
|
(*m_logger) << msg << "\n";
|
|
#endif
|
|
if (m_listen_port_retries > 0)
|
|
{
|
|
m_listen_interface.port(m_listen_interface.port() + 1);
|
|
--m_listen_port_retries;
|
|
goto retry;
|
|
}
|
|
if (m_alerts.should_post<listen_failed_alert>())
|
|
m_alerts.post_alert(listen_failed_alert(print_endpoint(m_listen_interface)
|
|
, listen_failed_alert::bind, ec, listen_failed_alert::udp));
|
|
return;
|
|
}
|
|
|
|
<div style="background: #ffff00" width="100%"> m_udp_socket.bind(udp::endpoint(m_listen_interface.address(), m_listen_interface.port()), ec);
|
|
</div> if (ec)
|
|
{
|
|
#if defined TORRENT_VERBOSE_LOGGING || defined TORRENT_LOGGING || defined TORRENT_ERROR_LOGGING
|
|
session_log("cannot bind to UDP interface \"%s\": %s"
|
|
, print_endpoint(m_listen_interface).c_str(), ec.message().c_str());
|
|
#endif
|
|
if (m_listen_port_retries > 0)
|
|
{
|
|
m_listen_interface.port(m_listen_interface.port() + 1);
|
|
--m_listen_port_retries;
|
|
goto retry;
|
|
}
|
|
if (m_alerts.should_post<listen_failed_alert>())
|
|
{
|
|
error_code err;
|
|
m_alerts.post_alert(listen_failed_alert(print_endpoint(m_listen_interface)
|
|
, listen_failed_alert::bind, ec, listen_failed_alert::udp));
|
|
}
|
|
return;
|
|
}
|
|
else
|
|
{
|
|
m_external_udp_port = m_udp_socket.local_port();
|
|
maybe_update_udp_mapping(0, m_listen_interface.port(), m_listen_interface.port());
|
|
maybe_update_udp_mapping(1, m_listen_interface.port(), m_listen_interface.port());
|
|
if (m_alerts.should_post<listen_succeeded_alert>())
|
|
m_alerts.post_alert(listen_succeeded_alert(m_listen_interface, listen_succeeded_alert::udp));
|
|
}
|
|
|
|
if (m_settings.get_int(settings_pack::peer_tos) != 0) {
|
|
</pre></td></tr><tr style="background: #cfc"><td>relevance 2</td><td><a href="javascript:expand(23)">../src/session_impl.cpp:4596</a></td><td>make a list for torrents that want to be announced on the DHT so we don't have to loop over all torrents, just to find the ones that want to announce</td></tr><tr id="23" style="display: none;" colspan="3"><td colspan="3"><h2>make a list for torrents that want to be announced on the DHT so we
|
|
don't have to loop over all torrents, just to find the ones that want to announce</h2><h4>../src/session_impl.cpp:4596</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> if (!m_dht_torrents.empty())
|
|
{
|
|
boost::shared_ptr<torrent> t;
|
|
do
|
|
{
|
|
t = m_dht_torrents.front().lock();
|
|
m_dht_torrents.pop_front();
|
|
} while (!t && !m_dht_torrents.empty());
|
|
|
|
if (t)
|
|
{
|
|
t->dht_announce();
|
|
return;
|
|
}
|
|
}
|
|
if (m_torrents.empty()) return;
|
|
|
|
if (m_next_dht_torrent == m_torrents.end())
|
|
m_next_dht_torrent = m_torrents.begin();
|
|
m_next_dht_torrent->second->dht_announce();
|
|
<div style="background: #ffff00" width="100%"> ++m_next_dht_torrent;
|
|
</div> if (m_next_dht_torrent == m_torrents.end())
|
|
m_next_dht_torrent = m_torrents.begin();
|
|
}
|
|
#endif
|
|
|
|
void session_impl::on_lsd_announce(error_code const& e)
|
|
{
|
|
#if defined TORRENT_ASIO_DEBUGGING
|
|
complete_async("session_impl::on_lsd_announce");
|
|
#endif
|
|
m_stats_counters.inc_stats_counter(counters::on_lsd_counter);
|
|
TORRENT_ASSERT(is_single_thread());
|
|
if (e) return;
|
|
|
|
if (m_abort) return;
|
|
|
|
#if defined TORRENT_ASIO_DEBUGGING
|
|
add_outstanding_async("session_impl::on_lsd_announce");
|
|
#endif
|
|
// announce on local network every 5 minutes
|
|
int delay = (std::max)(m_settings.get_int(settings_pack::local_service_announce_interval)
|
|
/ (std::max)(int(m_torrents.size()), 1), 1);
|
|
error_code ec;
|
|
m_lsd_announce_timer.expires_from_now(seconds(delay), ec);
|
|
m_lsd_announce_timer.async_wait(
|
|
bind(&session_impl::on_lsd_announce, this, _1));
|
|
|
|
if (m_torrents.empty()) return;
|
|
|
|
if (m_next_lsd_torrent == m_torrents.end())
|
|
</pre></td></tr><tr style="background: #cfc"><td>relevance 2</td><td><a href="javascript:expand(24)">../src/torrent.cpp:701</a></td><td>post alert</td></tr><tr id="24" style="display: none;" colspan="3"><td colspan="3"><h2>post alert</h2><h4>../src/torrent.cpp:701</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> state_updated();
|
|
|
|
set_state(torrent_status::downloading);
|
|
|
|
m_override_resume_data = true;
|
|
init();
|
|
}
|
|
|
|
#endif // if 0
|
|
|
|
void torrent::leave_seed_mode(bool seed)
|
|
{
|
|
if (!m_seed_mode) return;
|
|
|
|
if (!seed)
|
|
{
|
|
// this means the user promised we had all the
|
|
// files, but it turned out we didn't. This is
|
|
// an error.
|
|
|
|
<div style="background: #ffff00" width="100%">
|
|
</div>#if defined TORRENT_ERROR_LOGGING
|
|
debug_log("*** FAILED SEED MODE, rechecking");
|
|
#endif
|
|
}
|
|
|
|
#if defined TORRENT_VERBOSE_LOGGING || defined TORRENT_LOGGING
|
|
debug_log("*** LEAVING SEED MODE (%s)", seed ? "as seed" : "as non-seed");
|
|
#endif
|
|
m_seed_mode = false;
|
|
// seed is false if we turned out not
|
|
// to be a seed after all
|
|
if (!seed)
|
|
{
|
|
m_have_all = false;
|
|
set_state(torrent_status::downloading);
|
|
force_recheck();
|
|
}
|
|
m_num_verified = 0;
|
|
m_verified.clear();
|
|
m_verifying.clear();
|
|
|
|
m_need_save_resume_data = true;
|
|
}
|
|
|
|
void torrent::verified(int piece)
|
|
{
|
|
TORRENT_ASSERT(piece < int(m_verified.size()));
|
|
TORRENT_ASSERT(piece >= 0);
|
|
TORRENT_ASSERT(m_verified.get_bit(piece) == false);
|
|
++m_num_verified;
|
|
</pre></td></tr><tr style="background: #cfc"><td>relevance 2</td><td><a href="javascript:expand(25)">../src/torrent.cpp:4648</a></td><td>abort lookups this torrent has made via the session host resolver interface</td></tr><tr id="25" style="display: none;" colspan="3"><td colspan="3"><h2>abort lookups this torrent has made via the
|
|
session host resolver interface</h2><h4>../src/torrent.cpp:4648</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> // files belonging to the torrents
|
|
disconnect_all(errors::torrent_aborted, peer_connection_interface::op_bittorrent);
|
|
|
|
// post a message to the main thread to destruct
|
|
// the torrent object from there
|
|
if (m_storage.get())
|
|
{
|
|
inc_refcount("release_files");
|
|
m_ses.disk_thread().async_stop_torrent(m_storage.get()
|
|
, boost::bind(&torrent::on_cache_flushed, shared_from_this(), _1));
|
|
}
|
|
else
|
|
{
|
|
TORRENT_ASSERT(m_abort);
|
|
if (alerts().should_post<cache_flushed_alert>())
|
|
alerts().post_alert(cache_flushed_alert(get_handle()));
|
|
}
|
|
|
|
m_storage.reset();
|
|
m_host_resolver.cancel();
|
|
<div style="background: #ffff00" width="100%">
|
|
</div> if (!m_apply_ip_filter)
|
|
{
|
|
inc_stats_counter(counters::non_filter_torrents, -1);
|
|
m_apply_ip_filter = true;
|
|
}
|
|
|
|
m_allow_peers = false;
|
|
m_auto_managed = false;
|
|
for (int i = 0; i < aux::session_interface::num_torrent_lists; ++i)
|
|
{
|
|
if (!m_links[i].in_list()) continue;
|
|
m_links[i].unlink(m_ses.torrent_list(i), i);
|
|
}
|
|
// don't re-add this torrent to the state-update list
|
|
m_state_subscription = false;
|
|
}
|
|
|
|
void torrent::super_seeding(bool on)
|
|
{
|
|
if (on == m_super_seeding) return;
|
|
|
|
m_super_seeding = on;
|
|
m_need_save_resume_data = true;
|
|
|
|
if (m_super_seeding) return;
|
|
|
|
// disable super seeding for all peers
|
|
for (peer_iterator i = begin(); i != end(); ++i)
|
|
{
|
|
(*i)->superseed_piece(-1, -1);
|
|
</pre></td></tr><tr style="background: #cfc"><td>relevance 2</td><td><a href="javascript:expand(26)">../src/udp_tracker_connection.cpp:64</a></td><td>it would be nice to not have a dependency on session_impl here</td></tr><tr id="26" style="display: none;" colspan="3"><td colspan="3"><h2>it would be nice to not have a dependency on session_impl here</h2><h4>../src/udp_tracker_connection.cpp:64</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;">#pragma warning(pop)
|
|
#endif
|
|
|
|
#include "libtorrent/tracker_manager.hpp"
|
|
#include "libtorrent/parse_url.hpp"
|
|
#include "libtorrent/udp_tracker_connection.hpp"
|
|
#include "libtorrent/io.hpp"
|
|
#include "libtorrent/aux_/session_impl.hpp"
|
|
#include "libtorrent/escape_string.hpp"
|
|
#include "libtorrent/broadcast_socket.hpp" // for is_any
|
|
#include "libtorrent/random.hpp"
|
|
|
|
namespace libtorrent
|
|
{
|
|
|
|
std::map<address, udp_tracker_connection::connection_cache_entry>
|
|
udp_tracker_connection::m_connection_cache;
|
|
|
|
mutex udp_tracker_connection::m_cache_mutex;
|
|
|
|
<div style="background: #ffff00" width="100%"> udp_tracker_connection::udp_tracker_connection(
|
|
</div> io_service& ios
|
|
, connection_queue& cc
|
|
, tracker_manager& man
|
|
, tracker_request const& req
|
|
, boost::weak_ptr<request_callback> c
|
|
, aux::session_impl& ses
|
|
, proxy_settings const& proxy)
|
|
: tracker_connection(man, req, ios, c)
|
|
, m_ses(ses)
|
|
, m_proxy(proxy)
|
|
, m_transaction_id(0)
|
|
, m_attempts(0)
|
|
, m_state(action_error)
|
|
, m_abort(false)
|
|
{
|
|
}
|
|
|
|
void udp_tracker_connection::start()
|
|
{
|
|
std::string hostname;
|
|
std::string protocol;
|
|
int port;
|
|
error_code ec;
|
|
|
|
using boost::tuples::ignore;
|
|
boost::tie(protocol, ignore, hostname, port, ignore)
|
|
= parse_url_components(tracker_req().url, ec);
|
|
if (port == -1) port = protocol == "http" ? 80 : 443;
|
|
|
|
if (ec)
|
|
</pre></td></tr><tr style="background: #cfc"><td>relevance 2</td><td><a href="javascript:expand(27)">../src/web_peer_connection.cpp:645</a></td><td>create a mapping of file-index to redirection URLs. Use that to form URLs instead. Support to reconnect to a new server without destructing this peer_connection</td></tr><tr id="27" style="display: none;" colspan="3"><td colspan="3"><h2>create a mapping of file-index to redirection URLs. Use that to form
|
|
URLs instead. Support to reconnect to a new server without destructing this
|
|
peer_connection</h2><h4>../src/web_peer_connection.cpp:645</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> == dl_target);
|
|
#endif
|
|
return;
|
|
}
|
|
|
|
bool single_file_request = false;
|
|
if (!m_path.empty() && m_path[m_path.size() - 1] != '/')
|
|
single_file_request = true;
|
|
|
|
// add the redirected url and remove the current one
|
|
if (!single_file_request)
|
|
{
|
|
TORRENT_ASSERT(!m_file_requests.empty());
|
|
int file_index = m_file_requests.front();
|
|
|
|
if (!t->need_loaded())
|
|
{
|
|
disconnect(errors::torrent_aborted, op_bittorrent);
|
|
return;
|
|
}
|
|
<div style="background: #ffff00" width="100%"> torrent_info const& info = t->torrent_file();
|
|
</div> std::string path = info.orig_files().file_path(file_index);
|
|
#ifdef TORRENT_WINDOWS
|
|
convert_path_to_posix(path);
|
|
#endif
|
|
path = escape_path(path.c_str(), path.length());
|
|
size_t i = location.rfind(path);
|
|
if (i == std::string::npos)
|
|
{
|
|
t->remove_web_seed(this, errors::invalid_redirection, op_bittorrent, 2);
|
|
m_web = NULL;
|
|
TORRENT_ASSERT(is_disconnecting());
|
|
#ifdef TORRENT_DEBUG
|
|
TORRENT_ASSERT(statistics().last_payload_downloaded()
|
|
+ statistics().last_protocol_downloaded()
|
|
== dl_target);
|
|
#endif
|
|
return;
|
|
}
|
|
location.resize(i);
|
|
}
|
|
else
|
|
{
|
|
location = resolve_redirect_location(m_url, location);
|
|
}
|
|
|
|
#ifdef TORRENT_VERBOSE_LOGGING
|
|
peer_log("*** LOCATION: %s", location.c_str());
|
|
#endif
|
|
t->add_web_seed(location, web_seed_entry::url_seed, m_external_auth, m_extra_headers);
|
|
t->remove_web_seed(this, errors::redirecting, op_bittorrent, 2);
|
|
</pre></td></tr><tr style="background: #cfc"><td>relevance 2</td><td><a href="javascript:expand(28)">../src/kademlia/dos_blocker.cpp:68</a></td><td>make these limits configurable</td></tr><tr id="28" style="display: none;" colspan="3"><td colspan="3"><h2>make these limits configurable</h2><h4>../src/kademlia/dos_blocker.cpp:68</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> bool dos_blocker::incoming(address addr, ptime now)
|
|
{
|
|
node_ban_entry* match = 0;
|
|
node_ban_entry* min = m_ban_nodes;
|
|
for (node_ban_entry* i = m_ban_nodes; i < m_ban_nodes + num_ban_nodes; ++i)
|
|
{
|
|
if (i->src == addr)
|
|
{
|
|
match = i;
|
|
break;
|
|
}
|
|
if (i->count < min->count) min = i;
|
|
else if (i->count == min->count
|
|
&& i->limit < min->limit) min = i;
|
|
}
|
|
|
|
if (match)
|
|
{
|
|
++match->count;
|
|
|
|
<div style="background: #ffff00" width="100%"> if (match->count >= 50)
|
|
</div> {
|
|
if (now < match->limit)
|
|
{
|
|
if (match->count == 50)
|
|
{
|
|
#ifdef TORRENT_DHT_VERBOSE_LOGGING
|
|
TORRENT_LOG(dht_tracker) << " BANNING PEER [ ip: "
|
|
<< addr << " time: " << total_milliseconds((now - match->limit) + seconds(10)) / 1000.f
|
|
<< " count: " << match->count << " ]";
|
|
#endif
|
|
// we've received 50 messages in less than 10 seconds from
|
|
// this node. Ignore it until it's silent for 5 minutes
|
|
match->limit = now + minutes(5);
|
|
}
|
|
|
|
return false;
|
|
}
|
|
|
|
// we got 50 messages from this peer, but it was in
|
|
// more than 10 seconds. Reset the counter and the timer
|
|
match->count = 0;
|
|
match->limit = now + seconds(10);
|
|
}
|
|
}
|
|
else
|
|
{
|
|
min->count = 1;
|
|
min->limit = now + seconds(10);
|
|
min->src = addr;
|
|
}
|
|
</pre></td></tr><tr style="background: #cfc"><td>relevance 2</td><td><a href="javascript:expand(29)">../src/kademlia/node.cpp:67</a></td><td>make this configurable in dht_settings</td></tr><tr id="29" style="display: none;" colspan="3"><td colspan="3"><h2>make this configurable in dht_settings</h2><h4>../src/kademlia/node.cpp:67</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;">#include "libtorrent/kademlia/routing_table.hpp"
|
|
#include "libtorrent/kademlia/node.hpp"
|
|
#include "libtorrent/kademlia/dht_observer.hpp"
|
|
|
|
#include "libtorrent/kademlia/refresh.hpp"
|
|
#include "libtorrent/kademlia/get_peers.hpp"
|
|
#include "libtorrent/kademlia/get_item.hpp"
|
|
#include "libtorrent/performance_counters.hpp" // for counters
|
|
|
|
#ifdef TORRENT_USE_VALGRIND
|
|
#include <valgrind/memcheck.h>
|
|
#endif
|
|
|
|
namespace libtorrent { namespace dht
|
|
{
|
|
|
|
void incoming_error(entry& e, char const* msg, int error_code = 203);
|
|
|
|
using detail::write_endpoint;
|
|
|
|
<div style="background: #ffff00" width="100%">enum { announce_interval = 30 };
|
|
</div>
|
|
#ifdef TORRENT_DHT_VERBOSE_LOGGING
|
|
TORRENT_DEFINE_LOG(node)
|
|
|
|
extern int g_failed_announces;
|
|
extern int g_announces;
|
|
|
|
#endif
|
|
|
|
// remove peers that have timed out
|
|
void purge_peers(std::set<peer_entry>& peers)
|
|
{
|
|
for (std::set<peer_entry>::iterator i = peers.begin()
|
|
, end(peers.end()); i != end;)
|
|
{
|
|
// the peer has timed out
|
|
if (i->added + minutes(int(announce_interval * 1.5f)) < time_now())
|
|
{
|
|
#ifdef TORRENT_DHT_VERBOSE_LOGGING
|
|
TORRENT_LOG(node) << "peer timed out at: " << i->addr;
|
|
#endif
|
|
peers.erase(i++);
|
|
}
|
|
else
|
|
++i;
|
|
}
|
|
}
|
|
|
|
void nop() {}
|
|
|
|
</pre></td></tr><tr style="background: #cfc"><td>relevance 2</td><td><a href="javascript:expand(30)">../src/kademlia/node_id.cpp:133</a></td><td>this could be optimized if SSE 4.2 is available. It could also be optimized given that we have a fixed length</td></tr><tr id="30" style="display: none;" colspan="3"><td colspan="3"><h2>this could be optimized if SSE 4.2 is
|
|
available. It could also be optimized given
|
|
that we have a fixed length</h2><h4>../src/kademlia/node_id.cpp:133</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> b6 = ip_.to_v6().to_bytes();
|
|
ip = &b6[0];
|
|
num_octets = 8;
|
|
mask = v6mask;
|
|
}
|
|
else
|
|
#endif
|
|
{
|
|
b4 = ip_.to_v4().to_bytes();
|
|
ip = &b4[0];
|
|
num_octets = 4;
|
|
mask = v4mask;
|
|
}
|
|
|
|
for (int i = 0; i < num_octets; ++i)
|
|
ip[i] &= mask[i];
|
|
|
|
ip[0] |= (r & 0x7) << 5;
|
|
|
|
// this is the crc32c (Castagnoli) polynomial
|
|
<div style="background: #ffff00" width="100%"> boost::crc_optimal<32, 0x1EDC6F41, 0xFFFFFFFF, 0xFFFFFFFF, true, true> crc;
|
|
</div> crc.process_block(ip, ip + num_octets);
|
|
boost::uint32_t c = crc.checksum();
|
|
node_id id;
|
|
|
|
id[0] = (c >> 24) & 0xff;
|
|
id[1] = (c >> 16) & 0xff;
|
|
id[2] = ((c >> 8) & 0xf8) | (random() & 0x7);
|
|
|
|
for (int i = 3; i < 19; ++i) id[i] = random() & 0xff;
|
|
id[19] = r & 0xff;
|
|
|
|
return id;
|
|
}
|
|
|
|
node_id generate_random_id()
|
|
{
|
|
char r[20];
|
|
for (int i = 0; i < 20; ++i) r[i] = random() & 0xff;
|
|
return hasher(r, 20).final();
|
|
}
|
|
|
|
// verifies whether a node-id matches the IP it's used from
|
|
// returns true if the node-id is OK coming from this source
|
|
// and false otherwise.
|
|
bool verify_id(node_id const& nid, address const& source_ip)
|
|
{
|
|
// no need to verify local IPs, they would be incorrect anyway
|
|
if (is_local(source_ip)) return true;
|
|
|
|
node_id h = generate_id_impl(source_ip, nid[19]);
|
|
</pre></td></tr><tr style="background: #cfc"><td>relevance 2</td><td><a href="javascript:expand(31)">../include/libtorrent/enum_net.hpp:137</a></td><td>this could be done more efficiently by just looking up the interface with the given name, maybe even with if_nametoindex()</td></tr><tr id="31" style="display: none;" colspan="3"><td colspan="3"><h2>this could be done more efficiently by just looking up
|
|
the interface with the given name, maybe even with if_nametoindex()</h2><h4>../include/libtorrent/enum_net.hpp:137</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;">
|
|
address ip = address::from_string(device_name, ec);
|
|
if (!ec)
|
|
{
|
|
bind_ep.address(ip);
|
|
// it appears to be an IP. Just bind to that address
|
|
sock.bind(bind_ep, ec);
|
|
return bind_ep.address();
|
|
}
|
|
|
|
ec.clear();
|
|
|
|
#ifdef SO_BINDTODEVICE
|
|
// try to use SO_BINDTODEVICE here, if that exists. If it fails,
|
|
// fall back to the mechanism we have below
|
|
sock.set_option(bind_to_device_opt(device_name), ec);
|
|
if (ec)
|
|
#endif
|
|
{
|
|
ec.clear();
|
|
<div style="background: #ffff00" width="100%"> std::vector<ip_interface> ifs = enum_net_interfaces(ios, ec);
|
|
</div> if (ec) return bind_ep.address();
|
|
|
|
bool found = false;
|
|
|
|
for (int i = 0; i < int(ifs.size()); ++i)
|
|
{
|
|
// we're looking for a specific interface, and its address
|
|
// (which must be of the same family as the address we're
|
|
// connecting to)
|
|
if (strcmp(ifs[i].name, device_name) != 0) continue;
|
|
if (ifs[i].interface_address.is_v4() != ipv4)
|
|
continue;
|
|
|
|
bind_ep.address(ifs[i].interface_address);
|
|
found = true;
|
|
break;
|
|
}
|
|
|
|
if (!found)
|
|
{
|
|
ec = error_code(boost::system::errc::no_such_device, generic_category());
|
|
return bind_ep.address();
|
|
}
|
|
}
|
|
sock.bind(bind_ep, ec);
|
|
return bind_ep.address();
|
|
}
|
|
|
|
// returns true if the given device exists
|
|
TORRENT_EXTRA_EXPORT bool has_interface(char const* name, io_service& ios
|
|
</pre></td></tr><tr style="background: #cfc"><td>relevance 2</td><td><a href="javascript:expand(32)">../include/libtorrent/intrusive_ptr_base.hpp:44</a></td><td>remove this class and transition over to using shared_ptr and make_shared instead</td></tr><tr id="32" style="display: none;" colspan="3"><td colspan="3"><h2>remove this class and transition over to using shared_ptr and
|
|
make_shared instead</h2><h4>../include/libtorrent/intrusive_ptr_base.hpp:44</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;">CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
|
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
|
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
|
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
|
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
|
POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*/
|
|
|
|
#ifndef TORRENT_INTRUSIVE_PTR_BASE
|
|
#define TORRENT_INTRUSIVE_PTR_BASE
|
|
|
|
#include <boost/checked_delete.hpp>
|
|
#include <boost/intrusive_ptr.hpp>
|
|
#include "libtorrent/config.hpp"
|
|
#include "libtorrent/assert.hpp"
|
|
#include <boost/atomic.hpp>
|
|
|
|
namespace libtorrent
|
|
{
|
|
<div style="background: #ffff00" width="100%"> template<class T>
|
|
</div> struct intrusive_ptr_base
|
|
{
|
|
intrusive_ptr_base(intrusive_ptr_base<T> const&)
|
|
: m_refs(0) {}
|
|
|
|
friend void intrusive_ptr_add_ref(intrusive_ptr_base<T> const* s)
|
|
{
|
|
TORRENT_ASSERT(s != 0);
|
|
TORRENT_ASSERT(s->m_refs >= 0);
|
|
++s->m_refs;
|
|
}
|
|
|
|
friend void intrusive_ptr_release(intrusive_ptr_base<T> const* s)
|
|
{
|
|
TORRENT_ASSERT(s != 0);
|
|
TORRENT_ASSERT(s->m_refs > 0);
|
|
if (--s->m_refs == 0)
|
|
boost::checked_delete(static_cast<T const*>(s));
|
|
}
|
|
|
|
boost::intrusive_ptr<T> self()
|
|
{ return boost::intrusive_ptr<T>((T*)this); }
|
|
|
|
boost::intrusive_ptr<const T> self() const
|
|
{ return boost::intrusive_ptr<const T>((T const*)this); }
|
|
|
|
int refcount() const { return m_refs; }
|
|
|
|
intrusive_ptr_base(): m_refs(0) {}
|
|
|
|
</pre></td></tr><tr style="background: #cfc"><td>relevance 2</td><td><a href="javascript:expand(33)">../include/libtorrent/settings_pack.hpp:70</a></td><td>add an API to query a settings_pack as well</td></tr><tr id="33" style="display: none;" colspan="3"><td colspan="3"><h2>add an API to query a settings_pack as well</h2><h4>../include/libtorrent/settings_pack.hpp:70</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"></pre></td></tr><tr style="background: #cfc"><td>relevance 2</td><td><a href="javascript:expand(34)">../include/libtorrent/settings_pack.hpp:71</a></td><td>maybe convert all bool types into int-types as well</td></tr><tr id="34" style="display: none;" colspan="3"><td colspan="3"><h2>maybe convert all bool types into int-types as well</h2><h4>../include/libtorrent/settings_pack.hpp:71</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;">{
|
|
namespace aux { struct session_impl; struct session_settings; }
|
|
|
|
struct settings_pack;
|
|
struct lazy_entry;
|
|
|
|
TORRENT_EXTRA_EXPORT settings_pack* load_pack_from_dict(lazy_entry const* settings);
|
|
TORRENT_EXTRA_EXPORT void save_settings_to_dict(aux::session_settings const& s, entry::dictionary_type& sett);
|
|
TORRENT_EXPORT void initialize_default_settings(aux::session_settings& s);
|
|
TORRENT_EXTRA_EXPORT void apply_pack(settings_pack const* pack, aux::session_settings& sett, aux::session_impl* ses = 0);
|
|
|
|
TORRENT_EXPORT int setting_by_name(std::string const& name);
|
|
TORRENT_EXPORT char const* name_for_setting(int s);
|
|
|
|
#ifndef TORRENT_NO_DEPRECATE
|
|
struct session_settings;
|
|
settings_pack* load_pack_from_struct(aux::session_settings const& current, session_settings const& s);
|
|
void load_struct_from_settings(aux::session_settings const& current, session_settings& ret);
|
|
#endif
|
|
|
|
<div style="background: #ffff00" width="100%">
|
|
</div> // The ``settings_pack`` struct, contains the names of all settings as
|
|
// enum values. These values are passed in to the ``set_str()``,
|
|
// ``set_int()``, ``set_bool()`` functions, to specify the setting to
|
|
// change.
|
|
//
|
|
// These are the available settings:
|
|
//
|
|
// .. include:: settings-ref.rst
|
|
//
|
|
struct TORRENT_EXPORT settings_pack
|
|
{
|
|
friend struct disk_io_thread;
|
|
friend void apply_pack(settings_pack const* pack, aux::session_settings& sett, aux::session_impl* ses);
|
|
|
|
void set_str(int name, std::string val);
|
|
void set_int(int name, int val);
|
|
void set_bool(int name, bool val);
|
|
bool has_val(int name) const;
|
|
void clear();
|
|
|
|
std::string get_str(int name) const;
|
|
int get_int(int name) const;
|
|
bool get_bool(int name) const;
|
|
|
|
// setting names (indices) are 16 bits. The two most significant
|
|
// bits indicate what type the setting has. (string, int, bool)
|
|
enum type_bases
|
|
{
|
|
string_type_base = 0x0000,
|
|
int_type_base = 0x4000,
|
|
</pre></td></tr><tr style="background: #cfc"><td>relevance 2</td><td><a href="javascript:expand(35)">../include/libtorrent/torrent.hpp:1188</a></td><td>replace all usage of this with m_ses.get_resolver()</td></tr><tr id="35" style="display: none;" colspan="3"><td colspan="3"><h2>replace all usage of this with m_ses.get_resolver()</h2><h4>../include/libtorrent/torrent.hpp:1188</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;">
|
|
// The list of web seeds in this torrent. Seeds
|
|
// with fatal errors are removed from the set
|
|
std::list<web_seed_entry> m_web_seeds;
|
|
|
|
#ifndef TORRENT_DISABLE_EXTENSIONS
|
|
typedef std::list<boost::shared_ptr<torrent_plugin> > extension_list_t;
|
|
extension_list_t m_extensions;
|
|
#endif
|
|
|
|
// used for tracker announces
|
|
deadline_timer m_tracker_timer;
|
|
|
|
// this is the upload and download statistics for the whole torrent.
|
|
// it's updated from all its peers once every second.
|
|
libtorrent::stat m_stat;
|
|
|
|
// -----------------------------
|
|
|
|
// used to resolve hostnames for web seeds
|
|
<div style="background: #ffff00" width="100%"> mutable tcp::resolver m_host_resolver;
|
|
</div>
|
|
// this vector is allocated lazily. If no file priorities are
|
|
// ever changed, this remains empty. Any unallocated slot
|
|
// implicitly means the file has priority 1.
|
|
</pre></td></tr><tr style="background: #cfc"><td>relevance 2</td><td><a href="javascript:expand(36)">../include/libtorrent/torrent_info.hpp:303</a></td><td>there may be some opportunities to optimize the size if torrent_info. specifically to turn some std::string and std::vector into pointers</td></tr><tr id="36" style="display: none;" colspan="3"><td colspan="3"><h2>there may be some opportunities to optimize the size if torrent_info.
|
|
specifically to turn some std::string and std::vector into pointers</h2><h4>../include/libtorrent/torrent_info.hpp:303</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> bool resolving;
|
|
|
|
// if the user wanted to remove this while
|
|
// we were resolving it. In this case, we set
|
|
// the removed flag to true, to make the resolver
|
|
// callback remove it
|
|
bool removed;
|
|
|
|
// if the web server doesn't support keepalive or a block request was
|
|
// interrupted, the block received so far is kept here for the next
|
|
// connection to pick up
|
|
peer_request restart_request;
|
|
std::vector<char> restart_piece;
|
|
};
|
|
|
|
#ifndef BOOST_NO_EXCEPTIONS
|
|
// for backwards compatibility with 0.14
|
|
typedef libtorrent_exception invalid_torrent_file;
|
|
#endif
|
|
|
|
<div style="background: #ffff00" width="100%"> class TORRENT_EXPORT torrent_info
|
|
</div> {
|
|
public:
|
|
|
|
// The constructor that takes an info-hash will initialize the info-hash to the given value,
|
|
// but leave all other fields empty. This is used internally when downloading torrents without
|
|
// the metadata. The metadata will be created by libtorrent as soon as it has been downloaded
|
|
// from the swarm.
|
|
//
|
|
// The constructor that takes a lazy_entry will create a torrent_info object from the
|
|
// information found in the given torrent_file. The lazy_entry represents a tree node in
|
|
// an bencoded file. To load an ordinary .torrent file
|
|
// into a lazy_entry, use lazy_bdecode().
|
|
//
|
|
// The version that takes a buffer pointer and a size will decode it as a .torrent file and
|
|
// initialize the torrent_info object for you.
|
|
//
|
|
// The version that takes a filename will simply load the torrent file and decode it inside
|
|
// the constructor, for convenience. This might not be the most suitable for applications that
|
|
// want to be able to report detailed errors on what might go wrong.
|
|
//
|
|
// The overloads that takes an ``error_code const&`` never throws if an error occur, they
|
|
// will simply set the error code to describe what went wrong and not fully initialize the
|
|
// torrent_info object. The overloads that do not take the extra error_code parameter will
|
|
// always throw if an error occurs. These overloads are not available when building without
|
|
// exception support.
|
|
//
|
|
// The ``flags`` argument is currently unused.
|
|
#ifndef BOOST_NO_EXCEPTIONS
|
|
torrent_info(lazy_entry const& torrent_file, int flags = 0);
|
|
torrent_info(char const* buffer, int size, int flags = 0);
|
|
</pre></td></tr><tr style="background: #cfc"><td>relevance 2</td><td><a href="javascript:expand(37)">../include/libtorrent/aux_/session_interface.hpp:108</a></td><td>the IP voting mechanism should be factored out to its own class, not part of the session</td></tr><tr id="37" style="display: none;" colspan="3"><td colspan="3"><h2>the IP voting mechanism should be factored out
|
|
to its own class, not part of the session</h2><h4>../include/libtorrent/aux_/session_interface.hpp:108</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> class port_filter;
|
|
struct settings_pack;
|
|
struct torrent_peer_allocator_interface;
|
|
struct counters;
|
|
struct resolver_interface;
|
|
|
|
#ifndef TORRENT_DISABLE_DHT
|
|
namespace dht
|
|
{
|
|
struct dht_tracker;
|
|
}
|
|
#endif
|
|
}
|
|
|
|
namespace libtorrent { namespace aux
|
|
{
|
|
// TOOD: make this interface a lot smaller
|
|
struct session_interface
|
|
: buffer_allocator_interface
|
|
{
|
|
<div style="background: #ffff00" width="100%"> enum
|
|
</div> {
|
|
source_dht = 1,
|
|
source_peer = 2,
|
|
source_tracker = 4,
|
|
source_router = 8
|
|
};
|
|
|
|
virtual void set_external_address(address const& ip
|
|
, int source_type, address const& source) = 0;
|
|
virtual external_ip const& external_address() const = 0;
|
|
|
|
virtual disk_interface& disk_thread() = 0;
|
|
|
|
virtual alert_manager& alerts() = 0;
|
|
|
|
virtual torrent_peer_allocator_interface* get_peer_allocator() = 0;
|
|
virtual io_service& get_io_service() = 0;
|
|
virtual resolver_interface& get_resolver() = 0;
|
|
|
|
virtual bool has_connection(peer_connection* p) const = 0;
|
|
virtual void insert_peer(boost::shared_ptr<peer_connection> const& c) = 0;
|
|
|
|
virtual void queue_async_resume_data(boost::shared_ptr<torrent> const& t) = 0;
|
|
virtual void done_async_resume() = 0;
|
|
virtual void evict_torrent(torrent* t) = 0;
|
|
|
|
virtual void remove_torrent(torrent_handle const& h, int options = 0) = 0;
|
|
virtual void remove_torrent_impl(boost::shared_ptr<torrent> tptr, int options) = 0;
|
|
|
|
// ip and port filter
|
|
</pre></td></tr><tr style="background: #ccf"><td>relevance 1</td><td><a href="javascript:expand(38)">../src/http_seed_connection.cpp:111</a></td><td>in chunked encoding mode, this assert won't hold. the chunk headers should be subtracted from the receive_buffer_size</td></tr><tr id="38" style="display: none;" colspan="3"><td colspan="3"><h2>in chunked encoding mode, this assert won't hold.
|
|
the chunk headers should be subtracted from the receive_buffer_size</h2><h4>../src/http_seed_connection.cpp:111</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> boost::optional<piece_block_progress>
|
|
http_seed_connection::downloading_piece_progress() const
|
|
{
|
|
if (m_requests.empty())
|
|
return boost::optional<piece_block_progress>();
|
|
|
|
boost::shared_ptr<torrent> t = associated_torrent().lock();
|
|
TORRENT_ASSERT(t);
|
|
|
|
piece_block_progress ret;
|
|
|
|
peer_request const& pr = m_requests.front();
|
|
ret.piece_index = pr.piece;
|
|
if (!m_parser.header_finished())
|
|
{
|
|
ret.bytes_downloaded = 0;
|
|
}
|
|
else
|
|
{
|
|
int receive_buffer_size = receive_buffer().left() - m_parser.body_start();
|
|
<div style="background: #ffff00" width="100%"> TORRENT_ASSERT_VAL(receive_buffer_size <= t->block_size(), receive_buffer_size);
|
|
</div> ret.bytes_downloaded = t->block_size() - receive_buffer_size;
|
|
}
|
|
// this is used to make sure that the block_index stays within
|
|
// bounds. If the entire piece is downloaded, the block_index
|
|
// would otherwise point to one past the end
|
|
int correction = ret.bytes_downloaded ? -1 : 0;
|
|
ret.block_index = (pr.start + ret.bytes_downloaded + correction) / t->block_size();
|
|
ret.full_block_bytes = t->block_size();
|
|
const int last_piece = t->torrent_file().num_pieces() - 1;
|
|
if (ret.piece_index == last_piece && ret.block_index
|
|
== t->torrent_file().piece_size(last_piece) / t->block_size())
|
|
ret.full_block_bytes = t->torrent_file().piece_size(last_piece) % t->block_size();
|
|
return ret;
|
|
}
|
|
|
|
void http_seed_connection::write_request(peer_request const& r)
|
|
{
|
|
INVARIANT_CHECK;
|
|
|
|
boost::shared_ptr<torrent> t = associated_torrent().lock();
|
|
TORRENT_ASSERT(t);
|
|
|
|
TORRENT_ASSERT(t->valid_metadata());
|
|
// http_seeds don't support requesting more than one piece
|
|
// at a time
|
|
TORRENT_ASSERT(r.length <= t->torrent_file().piece_size(r.piece));
|
|
|
|
std::string request;
|
|
request.reserve(400);
|
|
|
|
</pre></td></tr><tr style="background: #ccf"><td>relevance 1</td><td><a href="javascript:expand(39)">../src/session_impl.cpp:6508</a></td><td>report the proper address of the router as the source IP of this understanding of our external address, instead of the empty address</td></tr><tr id="39" style="display: none;" colspan="3"><td colspan="3"><h2>report the proper address of the router as the source IP of
|
|
this understanding of our external address, instead of the empty address</h2><h4>../src/session_impl.cpp:6508</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> void session_impl::on_port_mapping(int mapping, address const& ip, int port
|
|
, error_code const& ec, int map_transport)
|
|
{
|
|
TORRENT_ASSERT(is_single_thread());
|
|
|
|
TORRENT_ASSERT(map_transport >= 0 && map_transport <= 1);
|
|
|
|
if (mapping == m_udp_mapping[map_transport] && port != 0)
|
|
{
|
|
m_external_udp_port = port;
|
|
if (m_alerts.should_post<portmap_alert>())
|
|
m_alerts.post_alert(portmap_alert(mapping, port
|
|
, map_transport));
|
|
return;
|
|
}
|
|
|
|
if (mapping == m_tcp_mapping[map_transport] && port != 0)
|
|
{
|
|
if (ip != address())
|
|
{
|
|
<div style="background: #ffff00" width="100%"> set_external_address(ip, source_router, address());
|
|
</div> }
|
|
|
|
if (!m_listen_sockets.empty()) {
|
|
m_listen_sockets.front().external_address = ip;
|
|
m_listen_sockets.front().external_port = port;
|
|
}
|
|
if (m_alerts.should_post<portmap_alert>())
|
|
m_alerts.post_alert(portmap_alert(mapping, port
|
|
, map_transport));
|
|
return;
|
|
}
|
|
|
|
if (ec)
|
|
{
|
|
if (m_alerts.should_post<portmap_error_alert>())
|
|
m_alerts.post_alert(portmap_error_alert(mapping
|
|
, map_transport, ec));
|
|
}
|
|
else
|
|
{
|
|
if (m_alerts.should_post<portmap_alert>())
|
|
m_alerts.post_alert(portmap_alert(mapping, port
|
|
, map_transport));
|
|
}
|
|
}
|
|
|
|
session_status session_impl::status() const
|
|
{
|
|
// INVARIANT_CHECK;
|
|
TORRENT_ASSERT(is_single_thread());
|
|
</pre></td></tr><tr style="background: #ccf"><td>relevance 1</td><td><a href="javascript:expand(40)">../src/session_impl.cpp:7668</a></td><td>we only need to do this if our global IPv4 address has changed since the DHT (currently) only supports IPv4. Since restarting the DHT is kind of expensive, it would be nice to not do it unnecessarily</td></tr><tr id="40" style="display: none;" colspan="3"><td colspan="3"><h2>we only need to do this if our global IPv4 address has changed
|
|
since the DHT (currently) only supports IPv4. Since restarting the DHT
|
|
is kind of expensive, it would be nice to not do it unnecessarily</h2><h4>../src/session_impl.cpp:7668</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;">#endif
|
|
|
|
if (!m_external_ip.cast_vote(ip, source_type, source)) return;
|
|
|
|
#if defined TORRENT_VERBOSE_LOGGING
|
|
session_log(" external IP updated");
|
|
#endif
|
|
|
|
if (m_alerts.should_post<external_ip_alert>())
|
|
m_alerts.post_alert(external_ip_alert(ip));
|
|
|
|
for (torrent_map::iterator i = m_torrents.begin()
|
|
, end(m_torrents.end()); i != end; ++i)
|
|
{
|
|
i->second->new_external_ip();
|
|
}
|
|
|
|
// since we have a new external IP now, we need to
|
|
// restart the DHT with a new node ID
|
|
#ifndef TORRENT_DISABLE_DHT
|
|
<div style="background: #ffff00" width="100%"> if (m_dht)
|
|
</div> {
|
|
entry s = m_dht->state();
|
|
int cur_state = 0;
|
|
int prev_state = 0;
|
|
entry* nodes1 = s.find_key("nodes");
|
|
if (nodes1 && nodes1->type() == entry::list_t) cur_state = nodes1->list().size();
|
|
entry* nodes2 = m_dht_state.find_key("nodes");
|
|
if (nodes2 && nodes2->type() == entry::list_t) prev_state = nodes2->list().size();
|
|
if (cur_state > prev_state) m_dht_state = s;
|
|
start_dht(m_dht_state);
|
|
}
|
|
#endif
|
|
}
|
|
|
|
// decrement the refcount of the block in the disk cache
|
|
// since the network thread doesn't need it anymore
|
|
void session_impl::reclaim_block(block_cache_reference ref)
|
|
{
|
|
m_disk_thread.reclaim_block(ref);
|
|
}
|
|
|
|
char* session_impl::allocate_disk_buffer(char const* category)
|
|
{
|
|
return m_disk_thread.allocate_disk_buffer(category);
|
|
}
|
|
|
|
char* session_impl::async_allocate_disk_buffer(char const* category
|
|
, boost::function<void(char*)> const& handler)
|
|
{
|
|
return m_disk_thread.async_allocate_disk_buffer(category, handler);
|
|
</pre></td></tr><tr style="background: #ccf"><td>relevance 1</td><td><a href="javascript:expand(41)">../src/torrent.cpp:1142</a></td><td>make this depend on the error and on the filesystem the files are being downloaded to. If the error is no_space_left_on_device and the filesystem doesn't support sparse files, only zero the priorities of the pieces that are at the tails of all files, leaving everything up to the highest written piece in each file</td></tr><tr id="41" style="display: none;" colspan="3"><td colspan="3"><h2>make this depend on the error and on the filesystem the
|
|
files are being downloaded to. If the error is no_space_left_on_device
|
|
and the filesystem doesn't support sparse files, only zero the priorities
|
|
of the pieces that are at the tails of all files, leaving everything
|
|
up to the highest written piece in each file</h2><h4>../src/torrent.cpp:1142</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> alerts().post_alert(file_error_alert(j->error.ec
|
|
, resolve_filename(j->error.file), j->error.operation_str(), get_handle()));
|
|
|
|
// put the torrent in an error-state
|
|
set_error(j->error.ec, j->error.file);
|
|
|
|
// if a write operation failed, and future writes are likely to
|
|
// fail, while reads may succeed, just set the torrent to upload mode
|
|
// if we make an incorrect assumption here, it's not the end of the
|
|
// world, if we ever issue a read request and it fails as well, we
|
|
// won't get in here and we'll actually end up pausing the torrent
|
|
if (j->action == disk_io_job::write
|
|
&& (j->error.ec == boost::system::errc::read_only_file_system
|
|
|| j->error.ec == boost::system::errc::permission_denied
|
|
|| j->error.ec == boost::system::errc::operation_not_permitted
|
|
|| j->error.ec == boost::system::errc::no_space_on_device
|
|
|| j->error.ec == boost::system::errc::file_too_large))
|
|
{
|
|
// if we failed to write, stop downloading and just
|
|
// keep seeding.
|
|
<div style="background: #ffff00" width="100%"> set_upload_mode(true);
|
|
</div> return;
|
|
}
|
|
|
|
// if the error appears to be more serious than a full disk, just pause the torrent
|
|
pause();
|
|
}
|
|
|
|
void torrent::on_piece_fail_sync(disk_io_job const* j, piece_block b)
|
|
{
|
|
update_gauge();
|
|
// some peers that previously was no longer interesting may
|
|
// now have become interesting, since we lack this one piece now.
|
|
for (peer_iterator i = begin(); i != end();)
|
|
{
|
|
peer_connection* p = *i;
|
|
// update_interest may disconnect the peer and
|
|
// invalidate the iterator
|
|
++i;
|
|
// no need to do anything with peers that
|
|
// already are interested. Gaining a piece may
|
|
// only make uninteresting peers interesting again.
|
|
if (p->is_interesting()) continue;
|
|
p->update_interest();
|
|
if (!m_abort)
|
|
{
|
|
if (request_a_block(*this, *p))
|
|
inc_stats_counter(counters::hash_fail_piece_picks);
|
|
p->send_block_requests();
|
|
}
|
|
}
|
|
</pre></td></tr><tr style="background: #ccf"><td>relevance 1</td><td><a href="javascript:expand(42)">../src/torrent.cpp:6774</a></td><td>save the send_stats state instead of throwing them away it may pose an issue when downgrading though</td></tr><tr id="42" style="display: none;" colspan="3"><td colspan="3"><h2>save the send_stats state instead of throwing them away
|
|
it may pose an issue when downgrading though</h2><h4>../src/torrent.cpp:6774</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> for (int k = 0; k < bits; ++k)
|
|
v |= (i->info[j*8+k].state == piece_picker::block_info::state_finished)
|
|
? (1 << k) : 0;
|
|
bitmask.append(1, v);
|
|
TORRENT_ASSERT(bits == 8 || j == num_bitmask_bytes - 1);
|
|
}
|
|
piece_struct["bitmask"] = bitmask;
|
|
// push the struct onto the unfinished-piece list
|
|
up.push_back(piece_struct);
|
|
}
|
|
}
|
|
|
|
// save trackers
|
|
entry::list_type& tr_list = ret["trackers"].list();
|
|
tr_list.push_back(entry::list_type());
|
|
int tier = 0;
|
|
for (std::vector<announce_entry>::const_iterator i = m_trackers.begin()
|
|
, end(m_trackers.end()); i != end; ++i)
|
|
{
|
|
// don't save trackers we can't trust
|
|
<div style="background: #ffff00" width="100%"> if (i->send_stats == false) continue;
|
|
</div> if (i->tier == tier)
|
|
{
|
|
tr_list.back().list().push_back(i->url);
|
|
}
|
|
else
|
|
{
|
|
tr_list.push_back(entry::list_t);
|
|
tr_list.back().list().push_back(i->url);
|
|
tier = i->tier;
|
|
}
|
|
}
|
|
|
|
// save web seeds
|
|
if (!m_web_seeds.empty())
|
|
{
|
|
entry::list_type& url_list = ret["url-list"].list();
|
|
entry::list_type& httpseed_list = ret["httpseeds"].list();
|
|
for (std::list<web_seed_entry>::const_iterator i = m_web_seeds.begin()
|
|
, end(m_web_seeds.end()); i != end; ++i)
|
|
{
|
|
if (i->type == web_seed_entry::url_seed)
|
|
url_list.push_back(i->url);
|
|
else if (i->type == web_seed_entry::http_seed)
|
|
httpseed_list.push_back(i->url);
|
|
}
|
|
}
|
|
|
|
// write have bitmask
|
|
// the pieces string has one byte per piece. Each
|
|
// byte is a bitmask representing different properties
|
|
</pre></td></tr><tr style="background: #ccf"><td>relevance 1</td><td><a href="javascript:expand(43)">../src/torrent.cpp:7842</a></td><td>should disconnect all peers that have the pieces we have not just seeds. It would be pretty expensive to check all pieces for all peers though</td></tr><tr id="43" style="display: none;" colspan="3"><td colspan="3"><h2>should disconnect all peers that have the pieces we have
|
|
not just seeds. It would be pretty expensive to check all pieces
|
|
for all peers though</h2><h4>../src/torrent.cpp:7842</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> set_state(torrent_status::finished);
|
|
set_queue_position(-1);
|
|
|
|
m_became_finished = m_ses.session_time();
|
|
|
|
// we have to call completed() before we start
|
|
// disconnecting peers, since there's an assert
|
|
// to make sure we're cleared the piece picker
|
|
if (is_seed()) completed();
|
|
|
|
send_upload_only();
|
|
|
|
state_updated();
|
|
|
|
if (m_completed_time == 0)
|
|
m_completed_time = time(0);
|
|
|
|
// disconnect all seeds
|
|
if (settings().get_bool(settings_pack::close_redundant_connections))
|
|
{
|
|
<div style="background: #ffff00" width="100%"> std::vector<peer_connection*> seeds;
|
|
</div> for (peer_iterator i = m_connections.begin();
|
|
i != m_connections.end(); ++i)
|
|
{
|
|
peer_connection* p = *i;
|
|
TORRENT_ASSERT(p->associated_torrent().lock().get() == this);
|
|
if (p->upload_only())
|
|
{
|
|
#if defined TORRENT_VERBOSE_LOGGING || defined TORRENT_ERROR_LOGGING
|
|
p->peer_log("*** SEED, CLOSING CONNECTION");
|
|
#endif
|
|
seeds.push_back(p);
|
|
}
|
|
}
|
|
std::for_each(seeds.begin(), seeds.end()
|
|
, boost::bind(&peer_connection::disconnect, _1, errors::torrent_finished
|
|
, peer_connection_interface::op_bittorrent, 0));
|
|
}
|
|
|
|
if (m_abort) return;
|
|
|
|
update_want_peers();
|
|
|
|
TORRENT_ASSERT(m_storage);
|
|
|
|
// we need to keep the object alive during this operation
|
|
inc_refcount("release_files");
|
|
m_ses.disk_thread().async_release_files(m_storage.get()
|
|
, boost::bind(&torrent::on_cache_flushed, shared_from_this(), _1));
|
|
|
|
// this torrent just completed downloads, which means it will fall
|
|
</pre></td></tr><tr style="background: #ccf"><td>relevance 1</td><td><a href="javascript:expand(44)">../src/kademlia/node.cpp:827</a></td><td>find_node should write directly to the response entry</td></tr><tr id="44" style="display: none;" colspan="3"><td colspan="3"><h2>find_node should write directly to the response entry</h2><h4>../src/kademlia/node.cpp:827</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> TORRENT_LOG(node) << " values: " << reply["values"].list().size();
|
|
}
|
|
#endif
|
|
}
|
|
else if (strcmp(query, "find_node") == 0)
|
|
{
|
|
key_desc_t msg_desc[] = {
|
|
{"target", lazy_entry::string_t, 20, 0},
|
|
};
|
|
|
|
lazy_entry const* msg_keys[1];
|
|
if (!verify_message(arg_ent, msg_desc, msg_keys, 1, error_string, sizeof(error_string)))
|
|
{
|
|
incoming_error(e, error_string);
|
|
return;
|
|
}
|
|
|
|
m_counters.inc_stats_counter(counters::dht_find_node_in);
|
|
sha1_hash target(msg_keys[0]->string_ptr());
|
|
|
|
<div style="background: #ffff00" width="100%"> nodes_t n;
|
|
</div> m_table.find_node(target, n, 0);
|
|
write_nodes_entry(reply, n);
|
|
}
|
|
else if (strcmp(query, "announce_peer") == 0)
|
|
{
|
|
key_desc_t msg_desc[] = {
|
|
{"info_hash", lazy_entry::string_t, 20, 0},
|
|
{"port", lazy_entry::int_t, 0, 0},
|
|
{"token", lazy_entry::string_t, 0, 0},
|
|
{"n", lazy_entry::string_t, 0, key_desc_t::optional},
|
|
{"seed", lazy_entry::int_t, 0, key_desc_t::optional},
|
|
{"implied_port", lazy_entry::int_t, 0, key_desc_t::optional},
|
|
};
|
|
|
|
lazy_entry const* msg_keys[6];
|
|
if (!verify_message(arg_ent, msg_desc, msg_keys, 6, error_string, sizeof(error_string)))
|
|
{
|
|
#ifdef TORRENT_DHT_VERBOSE_LOGGING
|
|
++g_failed_announces;
|
|
#endif
|
|
incoming_error(e, error_string);
|
|
return;
|
|
}
|
|
|
|
int port = int(msg_keys[1]->int_value());
|
|
|
|
// is the announcer asking to ignore the explicit
|
|
// listen port and instead use the source port of the packet?
|
|
if (msg_keys[5] && msg_keys[5]->int_value() != 0)
|
|
port = m.addr.port();
|
|
</pre></td></tr><tr style="background: #ccf"><td>relevance 1</td><td><a href="javascript:expand(45)">../include/libtorrent/ip_voter.hpp:122</a></td><td>instead, have one instance per possible subnet, global IPv4, global IPv6, loopback, 192.168.x.x, 10.x.x.x, etc.</td></tr><tr id="45" style="display: none;" colspan="3"><td colspan="3"><h2>instead, have one instance per possible subnet, global IPv4, global IPv6, loopback, 192.168.x.x, 10.x.x.x, etc.</h2><h4>../include/libtorrent/ip_voter.hpp:122</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> // away all the votes and started from scratch, in case
|
|
// our IP has changed
|
|
ptime m_last_rotate;
|
|
};
|
|
|
|
// this keeps track of multiple external IPs (for now, just IPv6 and IPv4, but
|
|
// it could be extended to deal with loopback and local network addresses as well)
|
|
struct TORRENT_EXTRA_EXPORT external_ip
|
|
{
|
|
// returns true if a different IP is the top vote now
|
|
// i.e. we changed our idea of what our external IP is
|
|
bool cast_vote(address const& ip, int source_type, address const& source);
|
|
|
|
// the external IP as it would be observed from `ip`
|
|
address external_address(address const& ip) const;
|
|
|
|
private:
|
|
|
|
// for now, assume one external IPv4 and one external IPv6 address
|
|
// 0 = IPv4 1 = IPv6
|
|
<div style="background: #ffff00" width="100%"> ip_voter m_vote_group[2];
|
|
</div> };
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
</pre></td></tr><tr style="background: #ccf"><td>relevance 1</td><td><a href="javascript:expand(46)">../include/libtorrent/web_peer_connection.hpp:121</a></td><td>if we make this be a disk_buffer_holder instead we would save a copy sometimes use allocate_disk_receive_buffer and release_disk_receive_buffer</td></tr><tr id="46" style="display: none;" colspan="3"><td colspan="3"><h2>if we make this be a disk_buffer_holder instead
|
|
we would save a copy sometimes
|
|
use allocate_disk_receive_buffer and release_disk_receive_buffer</h2><h4>../include/libtorrent/web_peer_connection.hpp:121</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;">
|
|
// returns the block currently being
|
|
// downloaded. And the progress of that
|
|
// block. If the peer isn't downloading
|
|
// a piece for the moment, the boost::optional
|
|
// will be invalid.
|
|
boost::optional<piece_block_progress> downloading_piece_progress() const;
|
|
|
|
void handle_padfile(buffer::const_interval& recv_buffer);
|
|
|
|
// this has one entry per http-request
|
|
// (might be more than the bt requests)
|
|
std::deque<int> m_file_requests;
|
|
|
|
std::string m_url;
|
|
|
|
web_seed_entry* m_web;
|
|
|
|
// this is used for intermediate storage of pieces
|
|
// that are received in more than one HTTP response
|
|
<div style="background: #ffff00" width="100%"> std::vector<char> m_piece;
|
|
</div>
|
|
// the number of bytes received in the current HTTP
|
|
// response. used to know where in the buffer the
|
|
// next response starts
|
|
size_type m_received_body;
|
|
|
|
// position in the current range response
|
|
size_type m_range_pos;
|
|
|
|
// this is the offset inside the current receive
|
|
// buffer where the next chunk header will be.
|
|
// this is updated for each chunk header that's
|
|
// parsed. It does not necessarily point to a valid
|
|
// offset in the receive buffer, if we haven't received
|
|
// it yet. This offset never includes the HTTP header
|
|
size_type m_chunk_pos;
|
|
|
|
// the position in the current block
|
|
int m_block_pos;
|
|
|
|
// this is the number of bytes we've already received
|
|
// from the next chunk header we're waiting for
|
|
int m_partial_chunk_header;
|
|
|
|
// the number of responses we've received so far on
|
|
// this connection
|
|
int m_num_responses;
|
|
};
|
|
}
|
|
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(47)">../src/block_cache.cpp:884</a></td><td>it's somewhat expensive to iterate over this linked list. Presumably because of the random access of memory. It would be nice if pieces with no evictable blocks weren't in this list</td></tr><tr id="47" style="display: none;" colspan="3"><td colspan="3"><h2>it's somewhat expensive
|
|
to iterate over this linked list. Presumably because of the random
|
|
access of memory. It would be nice if pieces with no evictable blocks
|
|
weren't in this list</h2><h4>../src/block_cache.cpp:884</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> }
|
|
else if (m_last_cache_op == ghost_hit_lru1)
|
|
{
|
|
// when we insert new items or move things from L1 to L2
|
|
// evict blocks from L2
|
|
lru_list[1] = &m_lru[cached_piece_entry::read_lru2];
|
|
lru_list[2] = &m_lru[cached_piece_entry::read_lru1];
|
|
}
|
|
else
|
|
{
|
|
// when we get cache hits in L2 evict from L1
|
|
lru_list[1] = &m_lru[cached_piece_entry::read_lru1];
|
|
lru_list[2] = &m_lru[cached_piece_entry::read_lru2];
|
|
}
|
|
|
|
// end refers to which end of the ARC cache we're evicting
|
|
// from. The LFU or the LRU end
|
|
for (int end = 0; num > 0 && end < 3; ++end)
|
|
{
|
|
// iterate over all blocks in order of last being used (oldest first) and
|
|
<div style="background: #ffff00" width="100%"> for (list_iterator i = lru_list[end]->iterate(); i.get() && num > 0;)
|
|
</div> {
|
|
cached_piece_entry* pe = reinterpret_cast<cached_piece_entry*>(i.get());
|
|
TORRENT_PIECE_ASSERT(pe->in_use, pe);
|
|
i.next();
|
|
|
|
if (pe == ignore)
|
|
continue;
|
|
|
|
if (pe->ok_to_evict())
|
|
{
|
|
#ifdef TORRENT_DEBUG
|
|
for (int j = 0; j < pe->blocks_in_piece; ++j)
|
|
TORRENT_PIECE_ASSERT(pe->blocks[j].buf == 0, pe);
|
|
#endif
|
|
TORRENT_PIECE_ASSERT(pe->refcount == 0, pe);
|
|
move_to_ghost(pe);
|
|
continue;
|
|
}
|
|
|
|
TORRENT_PIECE_ASSERT(pe->num_dirty == 0, pe);
|
|
|
|
// all blocks are pinned in this piece, skip it
|
|
if (pe->num_blocks <= pe->pinned) continue;
|
|
|
|
// go through the blocks and evict the ones that are not dirty and not
|
|
// referenced
|
|
for (int j = 0; j < pe->blocks_in_piece && num > 0; ++j)
|
|
{
|
|
cached_block_entry& b = pe->blocks[j];
|
|
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(48)">../src/block_cache.cpp:948</a></td><td>this should probably only be done every n:th time</td></tr><tr id="48" style="display: none;" colspan="3"><td colspan="3"><h2>this should probably only be done every n:th time</h2><h4>../src/block_cache.cpp:948</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> }
|
|
|
|
if (pe->ok_to_evict())
|
|
{
|
|
#ifdef TORRENT_DEBUG
|
|
for (int j = 0; j < pe->blocks_in_piece; ++j)
|
|
TORRENT_PIECE_ASSERT(pe->blocks[j].buf == 0, pe);
|
|
#endif
|
|
move_to_ghost(pe);
|
|
}
|
|
}
|
|
}
|
|
|
|
// if we can't evict enough blocks from the read cache, also look at write
|
|
// cache pieces for blocks that have already been written to disk and can be
|
|
// evicted the first pass, we only evict blocks that have been hashed, the
|
|
// second pass we flush anything this is potentially a very expensive
|
|
// operation, since we're likely to have iterate every single block in the
|
|
// cache, and we might not get to evict anything.
|
|
|
|
<div style="background: #ffff00" width="100%"> if (num > 0 && m_read_cache_size > m_pinned_blocks)
|
|
</div> {
|
|
for (int pass = 0; pass < 2 && num > 0; ++pass)
|
|
{
|
|
for (list_iterator i = m_lru[cached_piece_entry::write_lru].iterate(); i.get() && num > 0;)
|
|
{
|
|
cached_piece_entry* pe = reinterpret_cast<cached_piece_entry*>(i.get());
|
|
TORRENT_PIECE_ASSERT(pe->in_use, pe);
|
|
|
|
i.next();
|
|
|
|
if (pe == ignore)
|
|
continue;
|
|
|
|
if (pe->ok_to_evict())
|
|
{
|
|
#ifdef TORRENT_DEBUG
|
|
for (int j = 0; j < pe->blocks_in_piece; ++j)
|
|
TORRENT_PIECE_ASSERT(pe->blocks[j].buf == 0, pe);
|
|
#endif
|
|
TORRENT_PIECE_ASSERT(pe->refcount == 0, pe);
|
|
erase_piece(pe);
|
|
continue;
|
|
}
|
|
|
|
// all blocks in this piece are dirty
|
|
if (pe->num_dirty == pe->num_blocks)
|
|
continue;
|
|
|
|
int end = pe->blocks_in_piece;
|
|
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(49)">../src/block_cache.cpp:1714</a></td><td>create a holder for refcounts that automatically decrement</td></tr><tr id="49" style="display: none;" colspan="3"><td colspan="3"><h2>create a holder for refcounts that automatically decrement</h2><h4>../src/block_cache.cpp:1714</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;">
|
|
j->buffer = allocate_buffer("send buffer");
|
|
if (j->buffer == 0) return -2;
|
|
|
|
while (size > 0)
|
|
{
|
|
TORRENT_PIECE_ASSERT(pe->blocks[block].buf, pe);
|
|
int to_copy = (std::min)(block_size()
|
|
- block_offset, size);
|
|
std::memcpy(j->buffer + buffer_offset
|
|
, pe->blocks[block].buf + block_offset
|
|
, to_copy);
|
|
++pe->blocks[block].hitcount;
|
|
size -= to_copy;
|
|
block_offset = 0;
|
|
buffer_offset += to_copy;
|
|
++block;
|
|
}
|
|
// we incremented the refcount for both of these blocks.
|
|
// now decrement it.
|
|
<div style="background: #ffff00" width="100%"> dec_block_refcount(pe, start_block, ref_reading);
|
|
</div> if (blocks_to_read == 2) dec_block_refcount(pe, start_block + 1, ref_reading);
|
|
return j->d.io.buffer_size;
|
|
}
|
|
|
|
void block_cache::reclaim_block(block_cache_reference const& ref)
|
|
{
|
|
cached_piece_entry* pe = find_piece(ref);
|
|
TORRENT_ASSERT(pe);
|
|
if (pe == NULL) return;
|
|
|
|
TORRENT_PIECE_ASSERT(pe->in_use, pe);
|
|
|
|
TORRENT_PIECE_ASSERT(pe->blocks[ref.block].buf, pe);
|
|
dec_block_refcount(pe, ref.block, block_cache::ref_reading);
|
|
|
|
TORRENT_PIECE_ASSERT(m_send_buffer_blocks > 0, pe);
|
|
--m_send_buffer_blocks;
|
|
|
|
maybe_free_piece(pe);
|
|
}
|
|
|
|
bool block_cache::maybe_free_piece(cached_piece_entry* pe)
|
|
{
|
|
if (!pe->ok_to_evict()
|
|
|| !pe->marked_for_deletion
|
|
|| !pe->jobs.empty())
|
|
return false;
|
|
|
|
boost::shared_ptr<piece_manager> s = pe->storage;
|
|
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(50)">../src/bt_peer_connection.cpp:646</a></td><td>this could be optimized using knuth morris pratt</td></tr><tr id="50" style="display: none;" colspan="3"><td colspan="3"><h2>this could be optimized using knuth morris pratt</h2><h4>../src/bt_peer_connection.cpp:646</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> {
|
|
disconnect(errors::no_memory, op_encryption);
|
|
return;
|
|
}
|
|
|
|
#ifdef TORRENT_VERBOSE_LOGGING
|
|
peer_log(" computed RC4 keys");
|
|
#endif
|
|
}
|
|
|
|
int bt_peer_connection::get_syncoffset(char const* src, int src_size,
|
|
char const* target, int target_size) const
|
|
{
|
|
TORRENT_ASSERT(target_size >= src_size);
|
|
TORRENT_ASSERT(src_size > 0);
|
|
TORRENT_ASSERT(src);
|
|
TORRENT_ASSERT(target);
|
|
|
|
int traverse_limit = target_size - src_size;
|
|
|
|
<div style="background: #ffff00" width="100%"> for (int i = 0; i < traverse_limit; ++i)
|
|
</div> {
|
|
char const* target_ptr = target + i;
|
|
if (std::equal(src, src+src_size, target_ptr))
|
|
return i;
|
|
}
|
|
|
|
// // Partial sync
|
|
// for (int i = 0; i < target_size; ++i)
|
|
// {
|
|
// // first is iterator in src[] at which mismatch occurs
|
|
// // second is iterator in target[] at which mismatch occurs
|
|
// std::pair<const char*, const char*> ret;
|
|
// int src_sync_size;
|
|
// if (i > traverse_limit) // partial sync test
|
|
// {
|
|
// ret = std::mismatch(src, src + src_size - (i - traverse_limit), &target[i]);
|
|
// src_sync_size = ret.first - src;
|
|
// if (src_sync_size == (src_size - (i - traverse_limit)))
|
|
// return i;
|
|
// }
|
|
// else // complete sync test
|
|
// {
|
|
// ret = std::mismatch(src, src + src_size, &target[i]);
|
|
// src_sync_size = ret.first - src;
|
|
// if (src_sync_size == src_size)
|
|
// return i;
|
|
// }
|
|
// }
|
|
|
|
// no complete sync
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(51)">../src/bt_peer_connection.cpp:2213</a></td><td>if we're finished, send upload_only message</td></tr><tr id="51" style="display: none;" colspan="3"><td colspan="3"><h2>if we're finished, send upload_only message</h2><h4>../src/bt_peer_connection.cpp:2213</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> if (msg[5 + k / 8] & (0x80 >> (k % 8))) bitfield_string[k] = '1';
|
|
else bitfield_string[k] = '0';
|
|
}
|
|
peer_log("==> BITFIELD [ %s ]", bitfield_string.c_str());
|
|
#endif
|
|
m_sent_bitfield = true;
|
|
|
|
send_buffer(msg, packet_size);
|
|
|
|
stats_counters().inc_stats_counter(counters::num_outgoing_bitfield);
|
|
|
|
if (num_lazy_pieces > 0)
|
|
{
|
|
for (int i = 0; i < num_lazy_pieces; ++i)
|
|
{
|
|
#ifdef TORRENT_VERBOSE_LOGGING
|
|
peer_log("==> HAVE [ piece: %d ]", lazy_pieces[i]);
|
|
#endif
|
|
write_have(lazy_pieces[i]);
|
|
}
|
|
<div style="background: #ffff00" width="100%"> }
|
|
</div>
|
|
if (m_supports_fast)
|
|
send_allowed_set();
|
|
}
|
|
|
|
#ifndef TORRENT_DISABLE_EXTENSIONS
|
|
void bt_peer_connection::write_extensions()
|
|
{
|
|
INVARIANT_CHECK;
|
|
|
|
TORRENT_ASSERT(m_supports_extensions);
|
|
TORRENT_ASSERT(m_sent_handshake);
|
|
|
|
entry handshake;
|
|
entry::dictionary_type& m = handshake["m"].dict();
|
|
|
|
// if we're using a proxy, our listen port won't be useful
|
|
// anyway.
|
|
if (!m_settings.get_bool(settings_pack::force_proxy) && is_outgoing())
|
|
handshake["p"] = m_ses.listen_port();
|
|
|
|
// only send the port in case we bade the connection
|
|
// on incoming connections the other end already knows
|
|
// our listen port
|
|
if (!m_settings.get_bool(settings_pack::anonymous_mode))
|
|
{
|
|
handshake["v"] = m_settings.get_str(settings_pack::handshake_client_version).empty()
|
|
? m_settings.get_str(settings_pack::user_agent)
|
|
: m_settings.get_str(settings_pack::handshake_client_version);
|
|
}
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(52)">../src/disk_io_thread.cpp:921</a></td><td>instead of doing a lookup each time through the loop, save cached_piece_entry pointers with piece_refcount incremented to pin them</td></tr><tr id="52" style="display: none;" colspan="3"><td colspan="3"><h2>instead of doing a lookup each time through the loop, save
|
|
cached_piece_entry pointers with piece_refcount incremented to pin them</h2><h4>../src/disk_io_thread.cpp:921</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> // this is why we pass in 1 as cont_block to the flushing functions
|
|
void disk_io_thread::try_flush_write_blocks(int num, tailqueue& completed_jobs
|
|
, mutex::scoped_lock& l)
|
|
{
|
|
DLOG("try_flush_write_blocks: %d\n", num);
|
|
|
|
list_iterator range = m_disk_cache.write_lru_pieces();
|
|
std::vector<std::pair<piece_manager*, int> > pieces;
|
|
pieces.reserve(m_disk_cache.num_write_lru_pieces());
|
|
|
|
for (list_iterator p = range; p.get() && num > 0; p.next())
|
|
{
|
|
cached_piece_entry* e = (cached_piece_entry*)p.get();
|
|
if (e->num_dirty == 0) continue;
|
|
pieces.push_back(std::make_pair(e->storage.get(), int(e->piece)));
|
|
}
|
|
|
|
for (std::vector<std::pair<piece_manager*, int> >::iterator i = pieces.begin()
|
|
, end(pieces.end()); i != end; ++i)
|
|
{
|
|
<div style="background: #ffff00" width="100%"> cached_piece_entry* pe = m_disk_cache.find_piece(i->first, i->second);
|
|
</div> if (pe == NULL) continue;
|
|
|
|
// another thread may flush this piece while we're looping and
|
|
// evict it into a read piece and then also evict it to ghost
|
|
if (pe->cache_state != cached_piece_entry::write_lru) continue;
|
|
|
|
#if TORRENT_USE_ASSERTS
|
|
pe->piece_log.push_back(piece_log_t(piece_log_t::try_flush_write_blocks, -1));
|
|
#endif
|
|
++pe->piece_refcount;
|
|
kick_hasher(pe, l);
|
|
num -= try_flush_hashed(pe, 1, completed_jobs, l);
|
|
--pe->piece_refcount;
|
|
}
|
|
|
|
// when the write cache is under high pressure, it is likely
|
|
// counter productive to actually do this, since a piece may
|
|
// not have had its flush_hashed job run on it
|
|
// so only do it if no other thread is currently flushing
|
|
|
|
if (num == 0 || m_stats_counters[counters::num_writing_threads] > 0) return;
|
|
|
|
// if we still need to flush blocks, start over and flush
|
|
// everything in LRU order (degrade to lru cache eviction)
|
|
for (std::vector<std::pair<piece_manager*, int> >::iterator i = pieces.begin()
|
|
, end(pieces.end()); i != end; ++i)
|
|
{
|
|
cached_piece_entry* pe = m_disk_cache.find_piece(i->first, i->second);
|
|
if (pe == NULL) continue;
|
|
if (pe->num_dirty == 0) continue;
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(53)">../src/disk_io_thread.cpp:1132</a></td><td>instead of doing this. pass in the settings to each storage_interface call. Each disk thread could hold its most recent understanding of the settings in a shared_ptr, and update it every time it wakes up from a job. That way each access to the settings won't require a mutex to be held.</td></tr><tr id="53" style="display: none;" colspan="3"><td colspan="3"><h2>instead of doing this. pass in the settings to each storage_interface
|
|
call. Each disk thread could hold its most recent understanding of the settings
|
|
in a shared_ptr, and update it every time it wakes up from a job. That way
|
|
each access to the settings won't require a mutex to be held.</h2><h4>../src/disk_io_thread.cpp:1132</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> {
|
|
INVARIANT_CHECK;
|
|
TORRENT_ASSERT(j->next == 0);
|
|
TORRENT_ASSERT((j->flags & disk_io_job::in_progress) || !j->storage);
|
|
|
|
mutex::scoped_lock l(m_cache_mutex);
|
|
|
|
check_cache_level(l, completed_jobs);
|
|
|
|
DLOG("perform_job job: %s ( %s%s) piece: %d offset: %d outstanding: %d\n"
|
|
, job_action_name[j->action]
|
|
, (j->flags & disk_io_job::fence) ? "fence ": ""
|
|
, (j->flags & disk_io_job::force_copy) ? "force_copy ": ""
|
|
, j->piece, j->d.io.offset
|
|
, j->storage ? j->storage->num_outstanding_jobs() : -1);
|
|
|
|
l.unlock();
|
|
|
|
boost::shared_ptr<piece_manager> storage = j->storage;
|
|
|
|
<div style="background: #ffff00" width="100%"> if (storage && storage->get_storage_impl()->m_settings == 0)
|
|
</div> storage->get_storage_impl()->m_settings = &m_settings;
|
|
|
|
TORRENT_ASSERT(j->action < sizeof(job_functions)/sizeof(job_functions[0]));
|
|
|
|
ptime start_time = time_now_hires();
|
|
|
|
++m_outstanding_jobs;
|
|
|
|
// call disk function
|
|
int ret = (this->*(job_functions[j->action]))(j, completed_jobs);
|
|
|
|
--m_outstanding_jobs;
|
|
|
|
if (ret == retry_job)
|
|
{
|
|
mutex::scoped_lock l(m_job_mutex);
|
|
// to avoid busy looping here, give up
|
|
// our quanta in case there aren't any other
|
|
// jobs to run in between
|
|
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(54)">../src/disk_io_thread.cpp:1157</a></td><td>a potentially more efficient solution would be to have a special queue for retry jobs, that's only ever run when a job completes, in any thread. It would only work if m_outstanding_jobs > 0</td></tr><tr id="54" style="display: none;" colspan="3"><td colspan="3"><h2>a potentially more efficient solution would be to have a special
|
|
queue for retry jobs, that's only ever run when a job completes, in
|
|
any thread. It would only work if m_outstanding_jobs > 0</h2><h4>../src/disk_io_thread.cpp:1157</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> storage->get_storage_impl()->m_settings = &m_settings;
|
|
|
|
TORRENT_ASSERT(j->action < sizeof(job_functions)/sizeof(job_functions[0]));
|
|
|
|
ptime start_time = time_now_hires();
|
|
|
|
++m_outstanding_jobs;
|
|
|
|
// call disk function
|
|
int ret = (this->*(job_functions[j->action]))(j, completed_jobs);
|
|
|
|
--m_outstanding_jobs;
|
|
|
|
if (ret == retry_job)
|
|
{
|
|
mutex::scoped_lock l(m_job_mutex);
|
|
// to avoid busy looping here, give up
|
|
// our quanta in case there aren't any other
|
|
// jobs to run in between
|
|
|
|
<div style="background: #ffff00" width="100%">
|
|
</div> TORRENT_ASSERT((j->flags & disk_io_job::in_progress) || !j->storage);
|
|
|
|
bool need_sleep = m_queued_jobs.empty();
|
|
m_queued_jobs.push_back(j);
|
|
l.unlock();
|
|
if (need_sleep) sleep(0);
|
|
return;
|
|
}
|
|
|
|
#if TORRENT_USE_ASSERT
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(55)">../src/disk_io_thread.cpp:1171</a></td><td>it should clear the hash state even when there's an error, right?</td></tr><tr id="55" style="display: none;" colspan="3"><td colspan="3"><h2>it should clear the hash state even when there's an error, right?</h2><h4>../src/disk_io_thread.cpp:1171</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> --m_outstanding_jobs;
|
|
|
|
if (ret == retry_job)
|
|
{
|
|
mutex::scoped_lock l(m_job_mutex);
|
|
// to avoid busy looping here, give up
|
|
// our quanta in case there aren't any other
|
|
// jobs to run in between
|
|
|
|
|
|
TORRENT_ASSERT((j->flags & disk_io_job::in_progress) || !j->storage);
|
|
|
|
bool need_sleep = m_queued_jobs.empty();
|
|
m_queued_jobs.push_back(j);
|
|
l.unlock();
|
|
if (need_sleep) sleep(0);
|
|
return;
|
|
}
|
|
|
|
#if TORRENT_USE_ASSERT
|
|
<div style="background: #ffff00" width="100%"> if (j->action == disk_io_job::hash && !j->error.ec)
|
|
</div> {
|
|
// a hash job should never return without clearing pe->hash
|
|
l.lock();
|
|
cached_piece_entry* pe = m_disk_cache.find_piece(j);
|
|
if (pe != NULL)
|
|
{
|
|
TORRENT_PIECE_ASSERT(pe->hash == NULL, pe);
|
|
}
|
|
l.unlock();
|
|
}
|
|
#endif
|
|
|
|
if (ret == defer_handler) return;
|
|
|
|
j->ret = ret;
|
|
|
|
ptime now = time_now_hires();
|
|
m_job_time.add_sample(total_microseconds(now - start_time));
|
|
completed_jobs.push_back(j);
|
|
}
|
|
|
|
int disk_io_thread::do_uncached_read(disk_io_job* j)
|
|
{
|
|
j->buffer = m_disk_cache.allocate_buffer("send buffer");
|
|
if (j->buffer == 0)
|
|
{
|
|
j->error.ec = error::no_memory;
|
|
j->error.operation = storage_error::alloc_cache_piece;
|
|
return -1;
|
|
}
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(56)">../src/disk_io_thread.cpp:1866</a></td><td>maybe the tailqueue_iterator should contain a pointer-pointer instead and have an unlink function</td></tr><tr id="56" style="display: none;" colspan="3"><td colspan="3"><h2>maybe the tailqueue_iterator should contain a pointer-pointer
|
|
instead and have an unlink function</h2><h4>../src/disk_io_thread.cpp:1866</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> j->callback = handler;
|
|
|
|
add_fence_job(storage, j);
|
|
}
|
|
|
|
void disk_io_thread::async_delete_files(piece_manager* storage
|
|
, boost::function<void(disk_io_job const*)> const& handler)
|
|
{
|
|
#ifdef TORRENT_DEBUG
|
|
// the caller must increment the torrent refcount before
|
|
// issuing an async disk request
|
|
storage->assert_torrent_refcount();
|
|
#endif
|
|
|
|
// remove cache blocks belonging to this torrent
|
|
tailqueue completed_jobs;
|
|
|
|
// remove outstanding jobs belonging to this torrent
|
|
mutex::scoped_lock l2(m_job_mutex);
|
|
|
|
<div style="background: #ffff00" width="100%"> disk_io_job* qj = (disk_io_job*)m_queued_jobs.get_all();
|
|
</div> tailqueue to_abort;
|
|
|
|
while (qj)
|
|
{
|
|
disk_io_job* next = (disk_io_job*)qj->next;
|
|
#if TORRENT_USE_ASSERTS
|
|
qj->next = NULL;
|
|
#endif
|
|
if (qj->storage.get() == storage)
|
|
to_abort.push_back(qj);
|
|
else
|
|
m_queued_jobs.push_back(qj);
|
|
qj = next;
|
|
}
|
|
l2.unlock();
|
|
|
|
mutex::scoped_lock l(m_cache_mutex);
|
|
flush_cache(storage, flush_delete_cache, completed_jobs, l);
|
|
l.unlock();
|
|
|
|
disk_io_job* j = allocate_job(disk_io_job::delete_files);
|
|
j->storage = storage->shared_from_this();
|
|
j->callback = handler;
|
|
add_fence_job(storage, j);
|
|
|
|
fail_jobs_impl(storage_error(boost::asio::error::operation_aborted), to_abort, completed_jobs);
|
|
|
|
if (completed_jobs.size())
|
|
add_completed_jobs(completed_jobs);
|
|
}
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(57)">../src/disk_io_thread.cpp:2121</a></td><td>this is potentially very expensive. One way to solve it would be to have a fence for just this one piece.</td></tr><tr id="57" style="display: none;" colspan="3"><td colspan="3"><h2>this is potentially very expensive. One way to solve
|
|
it would be to have a fence for just this one piece.</h2><h4>../src/disk_io_thread.cpp:2121</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> }
|
|
|
|
void disk_io_thread::async_clear_piece(piece_manager* storage, int index
|
|
, boost::function<void(disk_io_job const*)> const& handler)
|
|
{
|
|
#ifdef TORRENT_DEBUG
|
|
// the caller must increment the torrent refcount before
|
|
// issuing an async disk request
|
|
storage->assert_torrent_refcount();
|
|
#endif
|
|
|
|
disk_io_job* j = allocate_job(disk_io_job::clear_piece);
|
|
j->storage = storage->shared_from_this();
|
|
j->piece = index;
|
|
j->callback = handler;
|
|
|
|
// regular jobs are not guaranteed to be executed in-order
|
|
// since clear piece must guarantee that all write jobs that
|
|
// have been issued finish before the clear piece job completes
|
|
|
|
<div style="background: #ffff00" width="100%"> add_fence_job(storage, j);
|
|
</div> }
|
|
|
|
void disk_io_thread::clear_piece(piece_manager* storage, int index)
|
|
{
|
|
mutex::scoped_lock l(m_cache_mutex);
|
|
|
|
cached_piece_entry* pe = m_disk_cache.find_piece(storage, index);
|
|
if (pe == 0) return;
|
|
TORRENT_PIECE_ASSERT(pe->hashing == false, pe);
|
|
pe->hashing_done = 0;
|
|
delete pe->hash;
|
|
pe->hash = NULL;
|
|
|
|
// evict_piece returns true if the piece was in fact
|
|
// evicted. A piece may fail to be evicted if there
|
|
// are still outstanding operations on it, which should
|
|
// never be the case when this function is used
|
|
// in fact, no jobs should really be hung on this piece
|
|
// at this point
|
|
tailqueue jobs;
|
|
bool ok = m_disk_cache.evict_piece(pe, jobs);
|
|
TORRENT_PIECE_ASSERT(ok, pe);
|
|
fail_jobs(storage_error(boost::asio::error::operation_aborted), jobs);
|
|
}
|
|
|
|
void disk_io_thread::kick_hasher(cached_piece_entry* pe, mutex::scoped_lock& l)
|
|
{
|
|
if (!pe->hash) return;
|
|
if (pe->hashing) return;
|
|
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(58)">../src/disk_io_thread.cpp:2382</a></td><td>we should probably just hang the job on the piece and make sure the hasher gets kicked</td></tr><tr id="58" style="display: none;" colspan="3"><td colspan="3"><h2>we should probably just hang the job on the piece and make sure the hasher gets kicked</h2><h4>../src/disk_io_thread.cpp:2382</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> if (pe == NULL)
|
|
{
|
|
int cache_state = (j->flags & disk_io_job::volatile_read)
|
|
? cached_piece_entry::volatile_read_lru
|
|
: cached_piece_entry::read_lru1;
|
|
pe = m_disk_cache.allocate_piece(j, cache_state);
|
|
}
|
|
if (pe == NULL)
|
|
{
|
|
j->error.ec = error::no_memory;
|
|
j->error.operation = storage_error::alloc_cache_piece;
|
|
return -1;
|
|
}
|
|
|
|
if (pe->hashing)
|
|
{
|
|
TORRENT_PIECE_ASSERT(pe->hash, pe);
|
|
// another thread is hashing this piece right now
|
|
// try again in a little bit
|
|
DLOG("do_hash: retry\n");
|
|
<div style="background: #ffff00" width="100%"> return retry_job;
|
|
</div> }
|
|
|
|
pe->hashing = 1;
|
|
|
|
TORRENT_PIECE_ASSERT(pe->cache_state <= cached_piece_entry::read_lru1
|
|
|| pe->cache_state == cached_piece_entry::read_lru2, pe);
|
|
++pe->piece_refcount;
|
|
|
|
if (pe->hash == NULL)
|
|
{
|
|
pe->hashing_done = 0;
|
|
pe->hash = new partial_hash;
|
|
}
|
|
partial_hash* ph = pe->hash;
|
|
|
|
int block_size = m_disk_cache.block_size();
|
|
int blocks_in_piece = (piece_size + block_size - 1) / block_size;
|
|
|
|
file::iovec_t iov;
|
|
int ret = 0;
|
|
|
|
// keep track of which blocks we have locked by incrementing
|
|
// their refcounts. This is used to decrement only these blocks
|
|
// later.
|
|
int* locked_blocks = TORRENT_ALLOCA(int, blocks_in_piece);
|
|
memset(locked_blocks, 0, blocks_in_piece * sizeof(int));
|
|
int num_locked_blocks = 0;
|
|
|
|
// increment the refcounts of all
|
|
// blocks up front, and then hash them without holding the lock
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(59)">../src/disk_io_thread.cpp:2452</a></td><td>introduce a holder class that automatically increments and decrements the piece_refcount</td></tr><tr id="59" style="display: none;" colspan="3"><td colspan="3"><h2>introduce a holder class that automatically increments
|
|
and decrements the piece_refcount</h2><h4>../src/disk_io_thread.cpp:2452</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> for (int i = ph->offset / block_size; i < blocks_in_piece; ++i)
|
|
{
|
|
iov.iov_len = (std::min)(block_size, piece_size - ph->offset);
|
|
|
|
if (next_locked_block < num_locked_blocks
|
|
&& locked_blocks[next_locked_block] == i)
|
|
{
|
|
++next_locked_block;
|
|
TORRENT_PIECE_ASSERT(pe->blocks[i].buf, pe);
|
|
TORRENT_PIECE_ASSERT(ph->offset == i * block_size, pe);
|
|
ph->offset += iov.iov_len;
|
|
ph->h.update(pe->blocks[i].buf, iov.iov_len);
|
|
}
|
|
else
|
|
{
|
|
iov.iov_base = m_disk_cache.allocate_buffer("hashing");
|
|
|
|
if (iov.iov_base == NULL)
|
|
{
|
|
l.lock();
|
|
<div style="background: #ffff00" width="100%">
|
|
</div> // decrement the refcounts of the blocks we just hashed
|
|
for (int i = 0; i < num_locked_blocks; ++i)
|
|
m_disk_cache.dec_block_refcount(pe, locked_blocks[i], block_cache::ref_hashing);
|
|
|
|
--pe->piece_refcount;
|
|
pe->hashing = false;
|
|
delete pe->hash;
|
|
pe->hash = NULL;
|
|
|
|
m_disk_cache.maybe_free_piece(pe);
|
|
|
|
j->error.ec = errors::no_memory;
|
|
j->error.operation = storage_error::alloc_cache_piece;
|
|
return -1;
|
|
}
|
|
|
|
DLOG("do_hash: reading (piece: %d block: %d)\n", int(pe->piece), i);
|
|
|
|
ptime start_time = time_now_hires();
|
|
|
|
TORRENT_PIECE_ASSERT(ph->offset == i * block_size, pe);
|
|
ret = j->storage->get_storage_impl()->readv(&iov, 1, j->piece
|
|
, ph->offset, file_flags, j->error);
|
|
|
|
if (ret < 0)
|
|
{
|
|
m_disk_cache.free_buffer((char*)iov.iov_base);
|
|
l.lock();
|
|
break;
|
|
}
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(60)">../src/disk_io_thread.cpp:2692</a></td><td>it would be nice to not have to lock the mutex every turn through this loop</td></tr><tr id="60" style="display: none;" colspan="3"><td colspan="3"><h2>it would be nice to not have to lock the mutex every
|
|
turn through this loop</h2><h4>../src/disk_io_thread.cpp:2692</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> {
|
|
j->error.ec = error::no_memory;
|
|
j->error.operation = storage_error::alloc_cache_piece;
|
|
return -1;
|
|
}
|
|
|
|
#if TORRENT_USE_ASSERTS
|
|
pe->piece_log.push_back(piece_log_t(j->action));
|
|
#endif
|
|
TORRENT_PIECE_ASSERT(pe->cache_state <= cached_piece_entry::read_lru1 || pe->cache_state == cached_piece_entry::read_lru2, pe);
|
|
++pe->piece_refcount;
|
|
|
|
int block_size = m_disk_cache.block_size();
|
|
int piece_size = j->storage->files()->piece_size(j->piece);
|
|
int blocks_in_piece = (piece_size + block_size - 1) / block_size;
|
|
|
|
file::iovec_t iov;
|
|
int ret = 0;
|
|
int offset = 0;
|
|
|
|
<div style="background: #ffff00" width="100%"> for (int i = 0; i < blocks_in_piece; ++i)
|
|
</div> {
|
|
iov.iov_len = (std::min)(block_size, piece_size - offset);
|
|
|
|
// is the block already in the cache?
|
|
if (pe->blocks[i].buf) continue;
|
|
l.unlock();
|
|
|
|
iov.iov_base = m_disk_cache.allocate_buffer("read cache");
|
|
|
|
if (iov.iov_base == NULL)
|
|
{
|
|
//#error introduce a holder class that automatically increments and decrements the piece_refcount
|
|
--pe->piece_refcount;
|
|
m_disk_cache.maybe_free_piece(pe);
|
|
j->error.ec = errors::no_memory;
|
|
j->error.operation = storage_error::alloc_cache_piece;
|
|
return -1;
|
|
}
|
|
|
|
DLOG("do_cache_piece: reading (piece: %d block: %d)\n"
|
|
, int(pe->piece), i);
|
|
|
|
ptime start_time = time_now_hires();
|
|
|
|
ret = j->storage->get_storage_impl()->readv(&iov, 1, j->piece
|
|
, offset, file_flags, j->error);
|
|
|
|
if (ret < 0)
|
|
{
|
|
l.lock();
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(61)">../src/http_tracker_connection.cpp:96</a></td><td>support authentication (i.e. user name and password) in the URL</td></tr><tr id="61" style="display: none;" colspan="3"><td colspan="3"><h2>support authentication (i.e. user name and password) in the URL</h2><h4>../src/http_tracker_connection.cpp:96</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> , tracker_request const& req
|
|
, boost::weak_ptr<request_callback> c
|
|
, aux::session_impl& ses
|
|
, std::string const& auth
|
|
#if TORRENT_USE_I2P
|
|
, i2p_connection* i2p_conn
|
|
#endif
|
|
)
|
|
: tracker_connection(man, req, ios, c)
|
|
, m_man(man)
|
|
, m_ses(ses)
|
|
, m_cc(cc)
|
|
, m_ios(ios)
|
|
#if TORRENT_USE_I2P
|
|
, m_i2p_conn(i2p_conn)
|
|
#endif
|
|
{}
|
|
|
|
void http_tracker_connection::start()
|
|
{
|
|
<div style="background: #ffff00" width="100%"> std::string url = tracker_req().url;
|
|
</div>
|
|
if (tracker_req().kind == tracker_request::scrape_request)
|
|
{
|
|
// find and replace "announce" with "scrape"
|
|
// in request
|
|
|
|
std::size_t pos = url.find("announce");
|
|
if (pos == std::string::npos)
|
|
{
|
|
tracker_connection::fail(error_code(errors::scrape_not_available));
|
|
return;
|
|
}
|
|
url.replace(pos, 8, "scrape");
|
|
}
|
|
|
|
#if TORRENT_USE_I2P
|
|
bool i2p = is_i2p_url(url);
|
|
#else
|
|
static const bool i2p = false;
|
|
#endif
|
|
|
|
aux::session_settings const& settings = m_ses.settings();
|
|
|
|
// if request-string already contains
|
|
// some parameters, append an ampersand instead
|
|
// of a question mark
|
|
size_t arguments_start = url.find('?');
|
|
if (arguments_start != std::string::npos)
|
|
url += "&";
|
|
else
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(62)">../src/metadata_transfer.cpp:359</a></td><td>this is not safe. The torrent could be unloaded while we're still sending the metadata</td></tr><tr id="62" style="display: none;" colspan="3"><td colspan="3"><h2>this is not safe. The torrent could be unloaded while
|
|
we're still sending the metadata</h2><h4>../src/metadata_transfer.cpp:359</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> std::pair<int, int> offset
|
|
= req_to_offset(req, (int)m_tp.metadata().left());
|
|
|
|
char msg[15];
|
|
char* ptr = msg;
|
|
|
|
#ifdef TORRENT_VERBOSE_LOGGING
|
|
m_pc.peer_log("==> METADATA [ start: %d | total_size: %d | offset: %d | data_size: %d ]"
|
|
, req.first, req.second, offset.first, offset.second);
|
|
#endif
|
|
// yes, we have metadata, send it
|
|
detail::write_uint32(11 + offset.second, ptr);
|
|
detail::write_uint8(bt_peer_connection::msg_extended, ptr);
|
|
detail::write_uint8(m_message_index, ptr);
|
|
// means 'data packet'
|
|
detail::write_uint8(1, ptr);
|
|
detail::write_uint32((int)m_tp.metadata().left(), ptr);
|
|
detail::write_uint32(offset.first, ptr);
|
|
m_pc.send_buffer(msg, sizeof(msg));
|
|
|
|
<div style="background: #ffff00" width="100%"> char const* metadata = m_tp.metadata().begin;
|
|
</div> m_pc.append_const_send_buffer(metadata + offset.first, offset.second);
|
|
}
|
|
else
|
|
{
|
|
#ifdef TORRENT_VERBOSE_LOGGING
|
|
m_pc.peer_log("==> DONT HAVE METADATA\n");
|
|
#endif
|
|
char msg[4+3];
|
|
char* ptr = msg;
|
|
|
|
// we don't have the metadata, reply with
|
|
// don't have-message
|
|
detail::write_uint32(1 + 2, ptr);
|
|
detail::write_uint8(bt_peer_connection::msg_extended, ptr);
|
|
detail::write_uint8(m_message_index, ptr);
|
|
// means 'have no data'
|
|
detail::write_uint8(2, ptr);
|
|
m_pc.send_buffer(msg, sizeof(msg));
|
|
}
|
|
m_pc.setup_send();
|
|
}
|
|
|
|
virtual bool on_extended(int length
|
|
, int msg, buffer::const_interval body)
|
|
{
|
|
if (msg != 14) return false;
|
|
if (m_message_index == 0) return false;
|
|
|
|
if (length > 500 * 1024)
|
|
{
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(63)">../src/packet_buffer.cpp:176</a></td><td>use compare_less_wrap for this comparison as well</td></tr><tr id="63" style="display: none;" colspan="3"><td colspan="3"><h2>use compare_less_wrap for this comparison as well</h2><h4>../src/packet_buffer.cpp:176</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> while (new_size < size)
|
|
new_size <<= 1;
|
|
|
|
void** new_storage = (void**)malloc(sizeof(void*) * new_size);
|
|
|
|
for (index_type i = 0; i < new_size; ++i)
|
|
new_storage[i] = 0;
|
|
|
|
for (index_type i = m_first; i < (m_first + m_capacity); ++i)
|
|
new_storage[i & (new_size - 1)] = m_storage[i & (m_capacity - 1)];
|
|
|
|
free(m_storage);
|
|
|
|
m_storage = new_storage;
|
|
m_capacity = new_size;
|
|
}
|
|
|
|
void* packet_buffer::remove(index_type idx)
|
|
{
|
|
INVARIANT_CHECK;
|
|
<div style="background: #ffff00" width="100%"> if (idx >= m_first + m_capacity)
|
|
</div> return 0;
|
|
|
|
if (compare_less_wrap(idx, m_first, 0xffff))
|
|
return 0;
|
|
|
|
const int mask = (m_capacity - 1);
|
|
void* old_value = m_storage[idx & mask];
|
|
m_storage[idx & mask] = 0;
|
|
|
|
if (old_value)
|
|
{
|
|
--m_size;
|
|
if (m_size == 0) m_last = m_first;
|
|
}
|
|
|
|
if (idx == m_first && m_size != 0)
|
|
{
|
|
++m_first;
|
|
for (boost::uint32_t i = 0; i < m_capacity; ++i, ++m_first)
|
|
if (m_storage[m_first & mask]) break;
|
|
m_first &= 0xffff;
|
|
}
|
|
|
|
if (((idx + 1) & 0xffff) == m_last && m_size != 0)
|
|
{
|
|
--m_last;
|
|
for (boost::uint32_t i = 0; i < m_capacity; ++i, --m_last)
|
|
if (m_storage[m_last & mask]) break;
|
|
++m_last;
|
|
m_last &= 0xffff;
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(64)">../src/part_file.cpp:252</a></td><td>what do we do if someone is currently reading from the disk from this piece? does it matter? Since we won't actively erase the data from disk, but it may be overwritten soon, it's probably not that big of a deal</td></tr><tr id="64" style="display: none;" colspan="3"><td colspan="3"><h2>what do we do if someone is currently reading from the disk
|
|
from this piece? does it matter? Since we won't actively erase the
|
|
data from disk, but it may be overwritten soon, it's probably not that
|
|
big of a deal</h2><h4>../src/part_file.cpp:252</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> if (((mode & file::rw_mask) != file::read_only)
|
|
&& ec == boost::system::errc::no_such_file_or_directory)
|
|
{
|
|
// this means the directory the file is in doesn't exist.
|
|
// so create it
|
|
ec.clear();
|
|
create_directories(m_path, ec);
|
|
|
|
if (ec) return;
|
|
m_file.open(fn, mode, ec);
|
|
}
|
|
}
|
|
|
|
void part_file::free_piece(int piece, error_code& ec)
|
|
{
|
|
mutex::scoped_lock l(m_mutex);
|
|
|
|
boost::unordered_map<int, int>::iterator i = m_piece_map.find(piece);
|
|
if (i == m_piece_map.end()) return;
|
|
|
|
<div style="background: #ffff00" width="100%">
|
|
</div> m_free_slots.push_back(i->second);
|
|
m_piece_map.erase(i);
|
|
m_dirty_metadata = true;
|
|
}
|
|
|
|
void part_file::move_partfile(std::string const& path, error_code& ec)
|
|
{
|
|
mutex::scoped_lock l(m_mutex);
|
|
|
|
flush_metadata_impl(ec);
|
|
if (ec) return;
|
|
|
|
m_file.close();
|
|
|
|
if (!m_piece_map.empty())
|
|
{
|
|
std::string old_path = combine_path(m_path, m_name);
|
|
std::string new_path = combine_path(path, m_name);
|
|
|
|
rename(old_path, new_path, ec);
|
|
if (ec == boost::system::errc::no_such_file_or_directory)
|
|
ec.clear();
|
|
|
|
if (ec)
|
|
{
|
|
copy_file(old_path, new_path, ec);
|
|
if (ec) return;
|
|
remove(old_path, ec);
|
|
}
|
|
}
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(65)">../src/part_file.cpp:344</a></td><td>instead of rebuilding the whole file header and flushing it, update the slot entries as we go</td></tr><tr id="65" style="display: none;" colspan="3"><td colspan="3"><h2>instead of rebuilding the whole file header
|
|
and flushing it, update the slot entries as we go</h2><h4>../src/part_file.cpp:344</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> if (block_to_copy == m_piece_size)
|
|
{
|
|
m_free_slots.push_back(i->second);
|
|
m_piece_map.erase(i);
|
|
m_dirty_metadata = true;
|
|
}
|
|
}
|
|
file_offset += block_to_copy;
|
|
piece_offset = 0;
|
|
size -= block_to_copy;
|
|
}
|
|
}
|
|
|
|
void part_file::flush_metadata(error_code& ec)
|
|
{
|
|
mutex::scoped_lock l(m_mutex);
|
|
|
|
flush_metadata_impl(ec);
|
|
}
|
|
|
|
<div style="background: #ffff00" width="100%"> void part_file::flush_metadata_impl(error_code& ec)
|
|
</div> {
|
|
// do we need to flush the metadata?
|
|
if (m_dirty_metadata == false) return;
|
|
|
|
if (m_piece_map.empty())
|
|
{
|
|
// if we don't have any pieces left in the
|
|
// part file, remove it
|
|
std::string p = combine_path(m_path, m_name);
|
|
remove(p, ec);
|
|
|
|
if (ec == boost::system::errc::no_such_file_or_directory)
|
|
ec.clear();
|
|
return;
|
|
}
|
|
|
|
open_file(file::read_write, ec);
|
|
if (ec) return;
|
|
|
|
boost::scoped_array<boost::uint32_t> header(new boost::uint32_t[m_header_size]);
|
|
|
|
using namespace libtorrent::detail;
|
|
|
|
char* ptr = (char*)header.get();
|
|
|
|
write_uint32(m_max_pieces, ptr);
|
|
write_uint32(m_piece_size, ptr);
|
|
|
|
for (int piece = 0; piece < m_max_pieces; ++piece)
|
|
{
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(66)">../src/peer_connection.cpp:1115</a></td><td>this should be the global download rate</td></tr><tr id="66" style="display: none;" colspan="3"><td colspan="3"><h2>this should be the global download rate</h2><h4>../src/peer_connection.cpp:1115</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;">
|
|
int rate = 0;
|
|
|
|
// if we haven't received any data recently, the current download rate
|
|
// is not representative
|
|
if (time_now() - m_last_piece > seconds(30) && m_download_rate_peak > 0)
|
|
{
|
|
rate = m_download_rate_peak;
|
|
}
|
|
else if (time_now() - m_last_unchoked < seconds(5)
|
|
&& m_statistics.total_payload_upload() < 2 * 0x4000)
|
|
{
|
|
// if we're have only been unchoked for a short period of time,
|
|
// we don't know what rate we can get from this peer. Instead of assuming
|
|
// the lowest possible rate, assume the average.
|
|
|
|
int peers_with_requests = stats_counters()[counters::num_peers_down_requests];
|
|
// avoid division by 0
|
|
if (peers_with_requests == 0) peers_with_requests = 1;
|
|
|
|
<div style="background: #ffff00" width="100%"> rate = t->statistics().transfer_rate(stat::download_payload) / peers_with_requests;
|
|
</div> }
|
|
else
|
|
{
|
|
// current download rate in bytes per seconds
|
|
rate = m_statistics.transfer_rate(stat::download_payload);
|
|
}
|
|
|
|
// avoid division by zero
|
|
if (rate < 50) rate = 50;
|
|
|
|
// average of current rate and peak
|
|
// rate = (rate + m_download_rate_peak) / 2;
|
|
|
|
return milliseconds((m_outstanding_bytes
|
|
+ m_queued_time_critical * t->block_size() * 1000) / rate);
|
|
}
|
|
|
|
void peer_connection::add_stat(size_type downloaded, size_type uploaded)
|
|
{
|
|
m_statistics.add_stat(downloaded, uploaded);
|
|
}
|
|
|
|
void peer_connection::received_bytes(int bytes_payload, int bytes_protocol)
|
|
{
|
|
m_statistics.received_bytes(bytes_payload, bytes_protocol);
|
|
if (m_ignore_stats) return;
|
|
boost::shared_ptr<torrent> t = m_torrent.lock();
|
|
if (!t) return;
|
|
t->received_bytes(bytes_payload, bytes_protocol);
|
|
}
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(67)">../src/peer_connection.cpp:3294</a></td><td>sort the allowed fast set in priority order</td></tr><tr id="67" style="display: none;" colspan="3"><td colspan="3"><h2>sort the allowed fast set in priority order</h2><h4>../src/peer_connection.cpp:3294</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> m_allowed_fast.push_back(index);
|
|
|
|
// if the peer has the piece and we want
|
|
// to download it, request it
|
|
if (int(m_have_piece.size()) > index
|
|
&& m_have_piece[index]
|
|
&& !t->has_piece_passed(index)
|
|
&& t->valid_metadata()
|
|
&& t->has_picker()
|
|
&& t->picker().piece_priority(index) > 0)
|
|
{
|
|
t->peer_is_interesting(*this);
|
|
}
|
|
}
|
|
|
|
std::vector<int> const& peer_connection::allowed_fast()
|
|
{
|
|
boost::shared_ptr<torrent> t = m_torrent.lock();
|
|
TORRENT_ASSERT(t);
|
|
|
|
<div style="background: #ffff00" width="100%"> return m_allowed_fast;
|
|
</div> }
|
|
|
|
bool peer_connection::can_request_time_critical() const
|
|
{
|
|
if (has_peer_choked() || !is_interesting()) return false;
|
|
if ((int)m_download_queue.size() + (int)m_request_queue.size()
|
|
> m_desired_queue_size * 2) return false;
|
|
if (on_parole()) return false;
|
|
if (m_disconnecting) return false;
|
|
boost::shared_ptr<torrent> t = m_torrent.lock();
|
|
TORRENT_ASSERT(t);
|
|
if (t->upload_mode()) return false;
|
|
|
|
// ignore snubbed peers, since they're not likely to return pieces in a
|
|
// timely manner anyway
|
|
if (m_snubbed) return false;
|
|
return true;
|
|
}
|
|
|
|
bool peer_connection::make_time_critical(piece_block const& block)
|
|
{
|
|
std::vector<pending_block>::iterator rit = std::find_if(m_request_queue.begin()
|
|
, m_request_queue.end(), has_block(block));
|
|
if (rit == m_request_queue.end()) return false;
|
|
#if TORRENT_USE_ASSERTS
|
|
boost::shared_ptr<torrent> t = m_torrent.lock();
|
|
TORRENT_ASSERT(t);
|
|
TORRENT_ASSERT(t->has_picker());
|
|
TORRENT_ASSERT(t->picker().is_requested(block));
|
|
#endif
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(68)">../src/piece_picker.cpp:2407</a></td><td>when expanding pieces for cache stripe reasons, the !downloading condition doesn't make much sense</td></tr><tr id="68" style="display: none;" colspan="3"><td colspan="3"><h2>when expanding pieces for cache stripe reasons,
|
|
the !downloading condition doesn't make much sense</h2><h4>../src/piece_picker.cpp:2407</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> TORRENT_ASSERT(index < (int)m_piece_map.size() || m_piece_map.empty());
|
|
if (index+1 == (int)m_piece_map.size())
|
|
return m_blocks_in_last_piece;
|
|
else
|
|
return m_blocks_per_piece;
|
|
}
|
|
|
|
bool piece_picker::is_piece_free(int piece, bitfield const& bitmask) const
|
|
{
|
|
TORRENT_ASSERT(piece >= 0 && piece < int(m_piece_map.size()));
|
|
return bitmask[piece]
|
|
&& !m_piece_map[piece].have()
|
|
&& !m_piece_map[piece].filtered();
|
|
}
|
|
|
|
bool piece_picker::can_pick(int piece, bitfield const& bitmask) const
|
|
{
|
|
TORRENT_ASSERT(piece >= 0 && piece < int(m_piece_map.size()));
|
|
return bitmask[piece]
|
|
&& !m_piece_map[piece].have()
|
|
<div style="background: #ffff00" width="100%"> && !m_piece_map[piece].downloading()
|
|
</div> && !m_piece_map[piece].filtered();
|
|
}
|
|
|
|
#if TORRENT_USE_INVARIANT_CHECKS
|
|
void piece_picker::check_peers()
|
|
{
|
|
for (std::vector<block_info>::iterator i = m_block_info.begin()
|
|
, end(m_block_info.end()); i != end; ++i)
|
|
{
|
|
TORRENT_ASSERT(i->peer == 0 || static_cast<torrent_peer*>(i->peer)->in_use);
|
|
}
|
|
}
|
|
#endif
|
|
|
|
void piece_picker::clear_peer(void* peer)
|
|
{
|
|
for (std::vector<block_info>::iterator i = m_block_info.begin()
|
|
, end(m_block_info.end()); i != end; ++i)
|
|
{
|
|
if (i->peer == peer) i->peer = 0;
|
|
}
|
|
}
|
|
|
|
namespace
|
|
{
|
|
// the first bool is true if this is the only peer that has requested and downloaded
|
|
// blocks from this piece.
|
|
// the second bool is true if this is the only active peer that is requesting
|
|
// and downloading blocks from this piece. Active means having a connection.
|
|
boost::tuple<bool, bool> requested_from(piece_picker::downloading_piece const& p
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(69)">../src/session_impl.cpp:678</a></td><td>there's no rule here to make uTP connections not have the global or local rate limits apply to it. This used to be the default.</td></tr><tr id="69" style="display: none;" colspan="3"><td colspan="3"><h2>there's no rule here to make uTP connections not have the global or
|
|
local rate limits apply to it. This used to be the default.</h2><h4>../src/session_impl.cpp:678</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> m_global_class = m_classes.new_peer_class("global");
|
|
m_tcp_peer_class = m_classes.new_peer_class("tcp");
|
|
m_local_peer_class = m_classes.new_peer_class("local");
|
|
// local peers are always unchoked
|
|
m_classes.at(m_local_peer_class)->ignore_unchoke_slots = true;
|
|
// local peers are allowed to exceed the normal connection
|
|
// limit by 50%
|
|
m_classes.at(m_local_peer_class)->connection_limit_factor = 150;
|
|
|
|
TORRENT_ASSERT(m_global_class == session::global_peer_class_id);
|
|
TORRENT_ASSERT(m_tcp_peer_class == session::tcp_peer_class_id);
|
|
TORRENT_ASSERT(m_local_peer_class == session::local_peer_class_id);
|
|
|
|
init_peer_class_filter(true);
|
|
|
|
// TCP, SSL/TCP and I2P connections should be assigned the TCP peer class
|
|
m_peer_class_type_filter.add(peer_class_type_filter::tcp_socket, m_tcp_peer_class);
|
|
m_peer_class_type_filter.add(peer_class_type_filter::ssl_tcp_socket, m_tcp_peer_class);
|
|
m_peer_class_type_filter.add(peer_class_type_filter::i2p_socket, m_tcp_peer_class);
|
|
|
|
<div style="background: #ffff00" width="100%">
|
|
</div>#ifdef TORRENT_UPNP_LOGGING
|
|
m_upnp_log.open("upnp.log", std::ios::in | std::ios::out | std::ios::trunc);
|
|
#endif
|
|
|
|
#if defined TORRENT_VERBOSE_LOGGING || defined TORRENT_LOGGING || defined TORRENT_ERROR_LOGGING
|
|
|
|
char tmp[300];
|
|
snprintf(tmp, sizeof(tmp), "libtorrent configuration: %s\n"
|
|
"libtorrent version: %s\n"
|
|
"libtorrent revision: %s\n\n"
|
|
, TORRENT_CFG_STRING
|
|
, LIBTORRENT_VERSION
|
|
, LIBTORRENT_REVISION);
|
|
(*m_logger) << tmp;
|
|
|
|
#endif // TORRENT_VERBOSE_LOGGING
|
|
|
|
#ifdef TORRENT_STATS
|
|
|
|
m_stats_logger = 0;
|
|
m_log_seq = 0;
|
|
m_stats_logging_enabled = true;
|
|
|
|
memset(&m_last_cache_status, 0, sizeof(m_last_cache_status));
|
|
vm_statistics_data_t vst;
|
|
get_vm_stats(&vst, ec);
|
|
if (!ec) m_last_vm_stat = vst;
|
|
|
|
get_thread_cpu_usage(&m_network_thread_cpu_usage);
|
|
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(70)">../src/session_impl.cpp:2384</a></td><td>instead of having a special case for this, just make the default listen interfaces be "0.0.0.0:6881,[::1]:6881" and use the generic path. That would even allow for not listening at all.</td></tr><tr id="70" style="display: none;" colspan="3"><td colspan="3"><h2>instead of having a special case for this, just make the
|
|
default listen interfaces be "0.0.0.0:6881,[::1]:6881" and use
|
|
the generic path. That would even allow for not listening at all.</h2><h4>../src/session_impl.cpp:2384</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;">
|
|
// reset the retry counter
|
|
m_listen_port_retries = m_settings.get_int(settings_pack::max_retry_port_bind);
|
|
|
|
retry:
|
|
|
|
// close the open listen sockets
|
|
// close the listen sockets
|
|
for (std::list<listen_socket_t>::iterator i = m_listen_sockets.begin()
|
|
, end(m_listen_sockets.end()); i != end; ++i)
|
|
i->sock->close(ec);
|
|
m_listen_sockets.clear();
|
|
m_stats_counters.set_value(counters::has_incoming_connections, 0);
|
|
ec.clear();
|
|
|
|
if (m_abort) return;
|
|
|
|
m_ipv6_interface = tcp::endpoint();
|
|
m_ipv4_interface = tcp::endpoint();
|
|
|
|
<div style="background: #ffff00" width="100%"> if (m_listen_interfaces.empty())
|
|
</div> {
|
|
// this means we should open two listen sockets
|
|
// one for IPv4 and one for IPv6
|
|
|
|
listen_socket_t s;
|
|
setup_listener(&s, "0.0.0.0", true, m_listen_interface.port()
|
|
, m_listen_port_retries, flags, ec);
|
|
|
|
if (s.sock)
|
|
{
|
|
// update the listen_interface member with the
|
|
// actual port we ended up listening on, so that the other
|
|
// sockets can be bound to the same one
|
|
m_listen_interface.port(s.external_port);
|
|
|
|
TORRENT_ASSERT(!m_abort);
|
|
m_listen_sockets.push_back(s);
|
|
}
|
|
|
|
#ifdef TORRENT_USE_OPENSSL
|
|
if (m_settings.get_int(settings_pack::ssl_listen))
|
|
{
|
|
listen_socket_t s;
|
|
s.ssl = true;
|
|
int retries = 10;
|
|
setup_listener(&s, "0.0.0.0", true, m_settings.get_int(settings_pack::ssl_listen)
|
|
, retries, flags, ec);
|
|
|
|
if (s.sock)
|
|
{
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(71)">../src/session_impl.cpp:3225</a></td><td>should this function take a shared_ptr instead?</td></tr><tr id="71" style="display: none;" colspan="3"><td colspan="3"><h2>should this function take a shared_ptr instead?</h2><h4>../src/session_impl.cpp:3225</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> {
|
|
#if defined TORRENT_ASIO_DEBUGGING
|
|
complete_async("session_impl::on_socks_accept");
|
|
#endif
|
|
m_socks_listen_socket.reset();
|
|
if (e == asio::error::operation_aborted) return;
|
|
if (e)
|
|
{
|
|
if (m_alerts.should_post<listen_failed_alert>())
|
|
m_alerts.post_alert(listen_failed_alert("socks5", listen_failed_alert::accept, e
|
|
, listen_failed_alert::socks5));
|
|
return;
|
|
}
|
|
open_new_incoming_socks_connection();
|
|
incoming_connection(s);
|
|
}
|
|
|
|
// if cancel_with_cq is set, the peer connection is
|
|
// currently expected to be scheduled for a connection
|
|
// with the connection queue, and should be cancelled
|
|
<div style="background: #ffff00" width="100%"> void session_impl::close_connection(peer_connection* p
|
|
</div> , error_code const& ec, bool cancel_with_cq)
|
|
{
|
|
TORRENT_ASSERT(is_single_thread());
|
|
boost::shared_ptr<peer_connection> sp(p->self());
|
|
|
|
if (cancel_with_cq) m_half_open.cancel(p);
|
|
|
|
// someone else is holding a reference, it's important that
|
|
// it's destructed from the network thread. Make sure the
|
|
// last reference is held by the network thread.
|
|
if (!sp.unique())
|
|
m_undead_peers.push_back(sp);
|
|
|
|
// too expensive
|
|
// INVARIANT_CHECK;
|
|
|
|
#ifdef TORRENT_DEBUG
|
|
// for (aux::session_impl::torrent_map::const_iterator i = m_torrents.begin()
|
|
// , end(m_torrents.end()); i != end; ++i)
|
|
// TORRENT_ASSERT(!i->second->has_peer((peer_connection*)p));
|
|
#endif
|
|
|
|
#if defined(TORRENT_LOGGING)
|
|
session_log(" CLOSING CONNECTION %s : %s"
|
|
, print_endpoint(p->remote()).c_str(), ec.message().c_str());
|
|
#endif
|
|
|
|
TORRENT_ASSERT(p->is_disconnecting());
|
|
|
|
if (!p->is_choked() && !p->ignore_unchoke_slots()) --m_num_unchoked;
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(72)">../src/session_impl.cpp:3600</a></td><td>have a separate list for these connections, instead of having to loop through all of them</td></tr><tr id="72" style="display: none;" colspan="3"><td colspan="3"><h2>have a separate list for these connections, instead of having to loop through all of them</h2><h4>../src/session_impl.cpp:3600</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> if (m_auto_manage_time_scaler < 0)
|
|
{
|
|
INVARIANT_CHECK;
|
|
m_auto_manage_time_scaler = settings().get_int(settings_pack::auto_manage_interval);
|
|
recalculate_auto_managed_torrents();
|
|
}
|
|
|
|
// --------------------------------------------------------------
|
|
// check for incoming connections that might have timed out
|
|
// --------------------------------------------------------------
|
|
|
|
for (connection_map::iterator i = m_connections.begin();
|
|
i != m_connections.end();)
|
|
{
|
|
peer_connection* p = (*i).get();
|
|
++i;
|
|
// ignore connections that already have a torrent, since they
|
|
// are ticked through the torrents' second_tick
|
|
if (!p->associated_torrent().expired()) continue;
|
|
|
|
<div style="background: #ffff00" width="100%"> if (m_last_tick - p->connected_time()
|
|
</div> > seconds(m_settings.get_int(settings_pack::handshake_timeout)))
|
|
p->disconnect(errors::timed_out, peer_connection::op_bittorrent);
|
|
}
|
|
|
|
// --------------------------------------------------------------
|
|
// second_tick every torrent (that wants it)
|
|
// --------------------------------------------------------------
|
|
|
|
#if TORRENT_DEBUG_STREAMING > 0
|
|
printf("\033[2J\033[0;0H");
|
|
#endif
|
|
|
|
std::vector<torrent*>& want_tick = m_torrent_lists[torrent_want_tick];
|
|
for (int i = 0; i < int(want_tick.size()); ++i)
|
|
{
|
|
torrent& t = *want_tick[i];
|
|
TORRENT_ASSERT(t.want_tick());
|
|
TORRENT_ASSERT(!t.is_aborted());
|
|
|
|
t.second_tick(tick_interval_ms, m_tick_residual / 1000);
|
|
|
|
// if the call to second_tick caused the torrent
|
|
// to no longer want to be ticked (i.e. it was
|
|
// removed from the list) we need to back up the counter
|
|
// to not miss the torrent after it
|
|
if (!t.want_tick()) --i;
|
|
}
|
|
|
|
#ifndef TORRENT_DISABLE_DHT
|
|
if (m_dht)
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(73)">../src/session_impl.cpp:3641</a></td><td>this should apply to all bandwidth channels</td></tr><tr id="73" style="display: none;" colspan="3"><td colspan="3"><h2>this should apply to all bandwidth channels</h2><h4>../src/session_impl.cpp:3641</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> t.second_tick(tick_interval_ms, m_tick_residual / 1000);
|
|
|
|
// if the call to second_tick caused the torrent
|
|
// to no longer want to be ticked (i.e. it was
|
|
// removed from the list) we need to back up the counter
|
|
// to not miss the torrent after it
|
|
if (!t.want_tick()) --i;
|
|
}
|
|
|
|
#ifndef TORRENT_DISABLE_DHT
|
|
if (m_dht)
|
|
{
|
|
int dht_down;
|
|
int dht_up;
|
|
m_dht->network_stats(dht_up, dht_down);
|
|
m_stat.sent_dht_bytes(dht_up);
|
|
m_stat.received_dht_bytes(dht_down);
|
|
}
|
|
#endif
|
|
|
|
<div style="background: #ffff00" width="100%"> if (m_settings.get_bool(settings_pack::rate_limit_ip_overhead))
|
|
</div> {
|
|
peer_class* gpc = m_classes.at(m_global_class);
|
|
|
|
gpc->channel[peer_connection::download_channel].use_quota(
|
|
#ifndef TORRENT_DISABLE_DHT
|
|
m_stat.download_dht() +
|
|
#endif
|
|
m_stat.download_tracker());
|
|
|
|
gpc->channel[peer_connection::upload_channel].use_quota(
|
|
#ifndef TORRENT_DISABLE_DHT
|
|
m_stat.upload_dht() +
|
|
#endif
|
|
m_stat.upload_tracker());
|
|
|
|
int up_limit = upload_rate_limit(m_global_class);
|
|
int down_limit = download_rate_limit(m_global_class);
|
|
|
|
if (down_limit > 0
|
|
&& m_stat.download_ip_overhead() >= down_limit
|
|
&& m_alerts.should_post<performance_alert>())
|
|
{
|
|
m_alerts.post_alert(performance_alert(torrent_handle()
|
|
, performance_alert::download_limit_too_low));
|
|
}
|
|
|
|
if (up_limit > 0
|
|
&& m_stat.upload_ip_overhead() >= up_limit
|
|
&& m_alerts.should_post<performance_alert>())
|
|
{
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(74)">../src/session_impl.cpp:4704</a></td><td>these vectors could be copied from m_torrent_lists, if we would maintain them. That way the first pass over all torrents could be avoided. It would be especially efficient if most torrents are not auto-managed whenever we receive a scrape response (or anything that may change the rank of a torrent) that one torrent could re-sort itself in a list that's kept sorted at all times. That way, this pass over all torrents could be avoided alltogether.</td></tr><tr id="74" style="display: none;" colspan="3"><td colspan="3"><h2>these vectors could be copied from m_torrent_lists,
|
|
if we would maintain them. That way the first pass over
|
|
all torrents could be avoided. It would be especially
|
|
efficient if most torrents are not auto-managed
|
|
whenever we receive a scrape response (or anything
|
|
that may change the rank of a torrent) that one torrent
|
|
could re-sort itself in a list that's kept sorted at all
|
|
times. That way, this pass over all torrents could be
|
|
avoided alltogether.</h2><h4>../src/session_impl.cpp:4704</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;">#if defined TORRENT_VERBOSE_LOGGING || defined TORRENT_LOGGING
|
|
if (t->allows_peers())
|
|
t->log_to_all_peers("AUTO MANAGER PAUSING TORRENT");
|
|
#endif
|
|
// use graceful pause for auto-managed torrents
|
|
t->set_allow_peers(false, true);
|
|
}
|
|
}
|
|
}
|
|
|
|
void session_impl::recalculate_auto_managed_torrents()
|
|
{
|
|
INVARIANT_CHECK;
|
|
|
|
m_need_auto_manage = false;
|
|
|
|
if (is_paused()) return;
|
|
|
|
// these vectors are filled with auto managed torrents
|
|
|
|
<div style="background: #ffff00" width="100%"> std::vector<torrent*> checking;
|
|
</div> std::vector<torrent*> downloaders;
|
|
downloaders.reserve(m_torrents.size());
|
|
std::vector<torrent*> seeds;
|
|
seeds.reserve(m_torrents.size());
|
|
|
|
// these counters are set to the number of torrents
|
|
// of each kind we're allowed to have active
|
|
int num_downloaders = settings().get_int(settings_pack::active_downloads);
|
|
int num_seeds = settings().get_int(settings_pack::active_seeds);
|
|
int checking_limit = 1;
|
|
int dht_limit = settings().get_int(settings_pack::active_dht_limit);
|
|
int tracker_limit = settings().get_int(settings_pack::active_tracker_limit);
|
|
int lsd_limit = settings().get_int(settings_pack::active_lsd_limit);
|
|
int hard_limit = settings().get_int(settings_pack::active_limit);
|
|
|
|
if (num_downloaders == -1)
|
|
num_downloaders = (std::numeric_limits<int>::max)();
|
|
if (num_seeds == -1)
|
|
num_seeds = (std::numeric_limits<int>::max)();
|
|
if (hard_limit == -1)
|
|
hard_limit = (std::numeric_limits<int>::max)();
|
|
if (dht_limit == -1)
|
|
dht_limit = (std::numeric_limits<int>::max)();
|
|
if (lsd_limit == -1)
|
|
lsd_limit = (std::numeric_limits<int>::max)();
|
|
if (tracker_limit == -1)
|
|
tracker_limit = (std::numeric_limits<int>::max)();
|
|
|
|
for (torrent_map::iterator i = m_torrents.begin()
|
|
, end(m_torrents.end()); i != end; ++i)
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(75)">../src/session_impl.cpp:4779</a></td><td>allow extensions to sort torrents for queuing</td></tr><tr id="75" style="display: none;" colspan="3"><td colspan="3"><h2>allow extensions to sort torrents for queuing</h2><h4>../src/session_impl.cpp:4779</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> if (t->is_finished())
|
|
seeds.push_back(t);
|
|
else
|
|
downloaders.push_back(t);
|
|
}
|
|
else if (!t->is_paused())
|
|
{
|
|
if (t->state() == torrent_status::checking_files)
|
|
{
|
|
if (checking_limit > 0) --checking_limit;
|
|
continue;
|
|
}
|
|
TORRENT_ASSERT(t->m_resume_data_loaded || !t->valid_metadata());
|
|
--hard_limit;
|
|
}
|
|
}
|
|
|
|
bool handled_by_extension = false;
|
|
|
|
#ifndef TORRENT_DISABLE_EXTENSIONS
|
|
<div style="background: #ffff00" width="100%">#endif
|
|
</div>
|
|
if (!handled_by_extension)
|
|
{
|
|
std::sort(checking.begin(), checking.end()
|
|
, boost::bind(&torrent::sequence_number, _1) < boost::bind(&torrent::sequence_number, _2));
|
|
|
|
std::sort(downloaders.begin(), downloaders.end()
|
|
, boost::bind(&torrent::sequence_number, _1) < boost::bind(&torrent::sequence_number, _2));
|
|
|
|
std::sort(seeds.begin(), seeds.end()
|
|
, boost::bind(&torrent::seed_rank, _1, boost::ref(m_settings))
|
|
> boost::bind(&torrent::seed_rank, _2, boost::ref(m_settings)));
|
|
}
|
|
|
|
auto_manage_torrents(checking, checking_limit, dht_limit, tracker_limit, lsd_limit
|
|
, hard_limit, num_downloaders);
|
|
|
|
if (settings().get_bool(settings_pack::auto_manage_prefer_seeds))
|
|
{
|
|
auto_manage_torrents(seeds, checking_limit, dht_limit, tracker_limit, lsd_limit
|
|
, hard_limit, num_seeds);
|
|
auto_manage_torrents(downloaders, checking_limit, dht_limit, tracker_limit, lsd_limit
|
|
, hard_limit, num_downloaders);
|
|
}
|
|
else
|
|
{
|
|
auto_manage_torrents(downloaders, checking_limit, dht_limit, tracker_limit, lsd_limit
|
|
, hard_limit, num_downloaders);
|
|
auto_manage_torrents(seeds, checking_limit, dht_limit, tracker_limit, lsd_limit
|
|
, hard_limit, num_seeds);
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(76)">../src/session_impl.cpp:4957</a></td><td>use a lower limit than m_settings.connections_limit to allocate the to 10% or so of connection slots for incoming connections</td></tr><tr id="76" style="display: none;" colspan="3"><td colspan="3"><h2>use a lower limit than m_settings.connections_limit
|
|
to allocate the to 10% or so of connection slots for incoming
|
|
connections</h2><h4>../src/session_impl.cpp:4957</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> // if we don't have any free slots, return
|
|
if (free_slots <= -m_half_open.limit()) return;
|
|
|
|
// boost connections are connections made by torrent connection
|
|
// boost, which are done immediately on a tracker response. These
|
|
// connections needs to be deducted from this second
|
|
if (m_boost_connections > 0)
|
|
{
|
|
if (m_boost_connections > max_connections)
|
|
{
|
|
m_boost_connections -= max_connections;
|
|
max_connections = 0;
|
|
}
|
|
else
|
|
{
|
|
max_connections -= m_boost_connections;
|
|
m_boost_connections = 0;
|
|
}
|
|
}
|
|
|
|
<div style="background: #ffff00" width="100%"> int limit = (std::min)(m_settings.get_int(settings_pack::connections_limit)
|
|
</div> - num_connections(), free_slots);
|
|
|
|
// this logic is here to smooth out the number of new connection
|
|
// attempts over time, to prevent connecting a large number of
|
|
// sockets, wait 10 seconds, and then try again
|
|
if (m_settings.get_bool(settings_pack::smooth_connects) && max_connections > (limit+1) / 2)
|
|
max_connections = (limit+1) / 2;
|
|
|
|
std::vector<torrent*>& want_peers_download = m_torrent_lists[torrent_want_peers_download];
|
|
std::vector<torrent*>& want_peers_finished = m_torrent_lists[torrent_want_peers_finished];
|
|
|
|
// if no torrent want any peers, just return
|
|
if (want_peers_download.empty() && want_peers_finished.empty()) return;
|
|
|
|
// if we don't have any connection attempt quota, return
|
|
if (max_connections <= 0) return;
|
|
|
|
INVARIANT_CHECK;
|
|
|
|
int steps_since_last_connect = 0;
|
|
int num_torrents = int(want_peers_finished.size() + want_peers_download.size());
|
|
for (;;)
|
|
{
|
|
if (m_next_downloading_connect_torrent >= int(want_peers_download.size()))
|
|
m_next_downloading_connect_torrent = 0;
|
|
|
|
if (m_next_finished_connect_torrent >= int(want_peers_finished.size()))
|
|
m_next_finished_connect_torrent = 0;
|
|
|
|
torrent* t = NULL;
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(77)">../src/session_impl.cpp:5119</a></td><td>post a message to have this happen immediately instead of waiting for the next tick</td></tr><tr id="77" style="display: none;" colspan="3"><td colspan="3"><h2>post a message to have this happen
|
|
immediately instead of waiting for the next tick</h2><h4>../src/session_impl.cpp:5119</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> {
|
|
// we've unchoked this peer, and it hasn't reciprocated
|
|
// we may want to increase our estimated reciprocation rate
|
|
p->increase_est_reciprocation_rate();
|
|
}
|
|
}
|
|
}
|
|
|
|
if (!p->is_peer_interested()
|
|
|| p->is_disconnecting()
|
|
|| p->is_connecting())
|
|
{
|
|
// this peer is not unchokable. So, if it's unchoked
|
|
// already, make sure to choke it.
|
|
if (p->is_choked()) continue;
|
|
if (pi && pi->optimistically_unchoked)
|
|
{
|
|
pi->optimistically_unchoked = false;
|
|
// force a new optimistic unchoke
|
|
m_optimistic_unchoke_time_scaler = 0;
|
|
<div style="background: #ffff00" width="100%"> }
|
|
</div> t->choke_peer(*p);
|
|
continue;
|
|
}
|
|
peers.push_back(p.get());
|
|
}
|
|
|
|
if (m_settings.get_int(settings_pack::choking_algorithm) == settings_pack::rate_based_choker)
|
|
{
|
|
m_allowed_upload_slots = 0;
|
|
std::sort(peers.begin(), peers.end()
|
|
, boost::bind(&peer_connection::upload_rate_compare, _1, _2));
|
|
|
|
#ifdef TORRENT_DEBUG
|
|
for (std::vector<peer_connection*>::const_iterator i = peers.begin()
|
|
, end(peers.end()), prev(peers.end()); i != end; ++i)
|
|
{
|
|
if (prev != end)
|
|
{
|
|
boost::shared_ptr<torrent> t1 = (*prev)->associated_torrent().lock();
|
|
TORRENT_ASSERT(t1);
|
|
boost::shared_ptr<torrent> t2 = (*i)->associated_torrent().lock();
|
|
TORRENT_ASSERT(t2);
|
|
TORRENT_ASSERT((*prev)->uploaded_in_last_round() * 1000
|
|
* (1 + t1->priority()) / total_milliseconds(unchoke_interval)
|
|
>= (*i)->uploaded_in_last_round() * 1000
|
|
* (1 + t2->priority()) / total_milliseconds(unchoke_interval));
|
|
}
|
|
prev = i;
|
|
}
|
|
#endif
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(78)">../src/session_impl.cpp:5153</a></td><td>make configurable</td></tr><tr id="78" style="display: none;" colspan="3"><td colspan="3"><h2>make configurable</h2><h4>../src/session_impl.cpp:5153</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;">
|
|
#ifdef TORRENT_DEBUG
|
|
for (std::vector<peer_connection*>::const_iterator i = peers.begin()
|
|
, end(peers.end()), prev(peers.end()); i != end; ++i)
|
|
{
|
|
if (prev != end)
|
|
{
|
|
boost::shared_ptr<torrent> t1 = (*prev)->associated_torrent().lock();
|
|
TORRENT_ASSERT(t1);
|
|
boost::shared_ptr<torrent> t2 = (*i)->associated_torrent().lock();
|
|
TORRENT_ASSERT(t2);
|
|
TORRENT_ASSERT((*prev)->uploaded_in_last_round() * 1000
|
|
* (1 + t1->priority()) / total_milliseconds(unchoke_interval)
|
|
>= (*i)->uploaded_in_last_round() * 1000
|
|
* (1 + t2->priority()) / total_milliseconds(unchoke_interval));
|
|
}
|
|
prev = i;
|
|
}
|
|
#endif
|
|
|
|
<div style="background: #ffff00" width="100%"> int rate_threshold = 1024;
|
|
</div>
|
|
for (std::vector<peer_connection*>::const_iterator i = peers.begin()
|
|
, end(peers.end()); i != end; ++i)
|
|
{
|
|
peer_connection const& p = **i;
|
|
int rate = int(p.uploaded_in_last_round()
|
|
* 1000 / total_milliseconds(unchoke_interval));
|
|
|
|
if (rate < rate_threshold) break;
|
|
|
|
++m_allowed_upload_slots;
|
|
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(79)">../src/session_impl.cpp:5167</a></td><td>make configurable</td></tr><tr id="79" style="display: none;" colspan="3"><td colspan="3"><h2>make configurable</h2><h4>../src/session_impl.cpp:5167</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> >= (*i)->uploaded_in_last_round() * 1000
|
|
* (1 + t2->priority()) / total_milliseconds(unchoke_interval));
|
|
}
|
|
prev = i;
|
|
}
|
|
#endif
|
|
|
|
int rate_threshold = 1024;
|
|
|
|
for (std::vector<peer_connection*>::const_iterator i = peers.begin()
|
|
, end(peers.end()); i != end; ++i)
|
|
{
|
|
peer_connection const& p = **i;
|
|
int rate = int(p.uploaded_in_last_round()
|
|
* 1000 / total_milliseconds(unchoke_interval));
|
|
|
|
if (rate < rate_threshold) break;
|
|
|
|
++m_allowed_upload_slots;
|
|
|
|
<div style="background: #ffff00" width="100%"> rate_threshold += 1024;
|
|
</div> }
|
|
// allow one optimistic unchoke
|
|
++m_allowed_upload_slots;
|
|
}
|
|
|
|
if (m_settings.get_int(settings_pack::choking_algorithm) == settings_pack::bittyrant_choker)
|
|
{
|
|
// if we're using the bittyrant choker, sort peers by their return
|
|
// on investment. i.e. download rate / upload rate
|
|
std::sort(peers.begin(), peers.end()
|
|
, boost::bind(&peer_connection::bittyrant_unchoke_compare, _1, _2));
|
|
}
|
|
else
|
|
{
|
|
// sorts the peers that are eligible for unchoke by download rate and secondary
|
|
// by total upload. The reason for this is, if all torrents are being seeded,
|
|
// the download rate will be 0, and the peers we have sent the least to should
|
|
// be unchoked
|
|
std::sort(peers.begin(), peers.end()
|
|
, boost::bind(&peer_connection::unchoke_compare, _1, _2));
|
|
}
|
|
|
|
// auto unchoke
|
|
peer_class* gpc = m_classes.at(m_global_class);
|
|
int upload_limit = gpc->channel[peer_connection::upload_channel].throttle();
|
|
if (m_settings.get_int(settings_pack::choking_algorithm) == settings_pack::auto_expand_choker
|
|
&& upload_limit > 0)
|
|
{
|
|
// if our current upload rate is less than 90% of our
|
|
// limit
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(80)">../src/session_impl.cpp:5246</a></td><td>this should be called for all peers!</td></tr><tr id="80" style="display: none;" colspan="3"><td colspan="3"><h2>this should be called for all peers!</h2><h4>../src/session_impl.cpp:5246</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> // measurement of the peak, use that + 10kB/s, otherwise
|
|
// assume 20 kB/s
|
|
upload_capacity_left = (std::max)(20000, m_peak_up_rate + 10000);
|
|
if (m_alerts.should_post<performance_alert>())
|
|
m_alerts.post_alert(performance_alert(torrent_handle()
|
|
, performance_alert::bittyrant_with_no_uplimit));
|
|
}
|
|
}
|
|
|
|
m_num_unchoked = 0;
|
|
// go through all the peers and unchoke the first ones and choke
|
|
// all the other ones.
|
|
for (std::vector<peer_connection*>::iterator i = peers.begin()
|
|
, end(peers.end()); i != end; ++i)
|
|
{
|
|
peer_connection* p = *i;
|
|
TORRENT_ASSERT(p);
|
|
TORRENT_ASSERT(!p->ignore_unchoke_slots());
|
|
|
|
// this will update the m_uploaded_at_last_unchoke
|
|
<div style="background: #ffff00" width="100%"> p->reset_choke_counters();
|
|
</div>
|
|
torrent* t = p->associated_torrent().lock().get();
|
|
TORRENT_ASSERT(t);
|
|
|
|
// if this peer should be unchoked depends on different things
|
|
// in different unchoked schemes
|
|
bool unchoke = false;
|
|
if (m_settings.get_int(settings_pack::choking_algorithm) == settings_pack::bittyrant_choker)
|
|
{
|
|
unchoke = p->est_reciprocation_rate() <= upload_capacity_left;
|
|
}
|
|
else
|
|
{
|
|
unchoke = unchoke_set_size > 0;
|
|
}
|
|
|
|
if (unchoke)
|
|
{
|
|
upload_capacity_left -= p->est_reciprocation_rate();
|
|
|
|
// yes, this peer should be unchoked
|
|
if (p->is_choked())
|
|
{
|
|
if (!t->unchoke_peer(*p))
|
|
continue;
|
|
}
|
|
|
|
--unchoke_set_size;
|
|
++m_num_unchoked;
|
|
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(81)">../src/session_impl.cpp:5663</a></td><td>it might be a nice feature here to limit the number of torrents to send in a single update. By just posting the first n torrents, they would nicely be round-robined because the torrent lists are always pushed back</td></tr><tr id="81" style="display: none;" colspan="3"><td colspan="3"><h2>it might be a nice feature here to limit the number of torrents
|
|
to send in a single update. By just posting the first n torrents, they
|
|
would nicely be round-robined because the torrent lists are always
|
|
pushed back</h2><h4>../src/session_impl.cpp:5663</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> t->status(&*i, flags);
|
|
}
|
|
}
|
|
|
|
void session_impl::post_torrent_updates()
|
|
{
|
|
INVARIANT_CHECK;
|
|
|
|
TORRENT_ASSERT(is_single_thread());
|
|
|
|
std::auto_ptr<state_update_alert> alert(new state_update_alert());
|
|
std::vector<torrent*>& state_updates
|
|
= m_torrent_lists[aux::session_impl::torrent_state_updates];
|
|
|
|
alert->status.reserve(state_updates.size());
|
|
|
|
#if TORRENT_USE_ASSERTS
|
|
m_posting_torrent_updates = true;
|
|
#endif
|
|
|
|
<div style="background: #ffff00" width="100%"> for (std::vector<torrent*>::iterator i = state_updates.begin()
|
|
</div> , end(state_updates.end()); i != end; ++i)
|
|
{
|
|
torrent* t = *i;
|
|
TORRENT_ASSERT(t->m_links[aux::session_impl::torrent_state_updates].in_list());
|
|
alert->status.push_back(torrent_status());
|
|
// querying accurate download counters may require
|
|
// the torrent to be loaded. Loading a torrent, and evicting another
|
|
// one will lead to calling state_updated(), which screws with
|
|
// this list while we're working on it, and break things
|
|
t->status(&alert->status.back(), ~torrent_handle::query_accurate_download_counters);
|
|
t->clear_in_state_update();
|
|
}
|
|
state_updates.clear();
|
|
|
|
#if TORRENT_USE_ASSERTS
|
|
m_posting_torrent_updates = false;
|
|
#endif
|
|
|
|
m_alerts.post_alert_ptr(alert.release());
|
|
}
|
|
|
|
void session_impl::post_session_stats()
|
|
{
|
|
std::auto_ptr<session_stats_alert> alert(new session_stats_alert());
|
|
std::vector<boost::uint64_t>& values = alert->values;
|
|
values.resize(counters::num_counters, 0);
|
|
|
|
m_disk_thread.update_stats_counters(m_stats_counters);
|
|
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(82)">../src/storage.cpp:710</a></td><td>make this more generic to not just work if files have been renamed, but also if they have been merged into a single file for instance maybe use the same format as .torrent files and reuse some code from torrent_info</td></tr><tr id="82" style="display: none;" colspan="3"><td colspan="3"><h2>make this more generic to not just work if files have been
|
|
renamed, but also if they have been merged into a single file for instance
|
|
maybe use the same format as .torrent files and reuse some code from torrent_info</h2><h4>../src/storage.cpp:710</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> for (;;)
|
|
{
|
|
if (file_offset < files().file_size(file_index))
|
|
break;
|
|
|
|
file_offset -= files().file_size(file_index);
|
|
++file_index;
|
|
TORRENT_ASSERT(file_index != files().num_files());
|
|
}
|
|
|
|
error_code ec;
|
|
file_handle handle = open_file(file_index, file::read_only, ec);
|
|
if (!handle || ec) return slot;
|
|
|
|
size_type data_start = handle->sparse_end(file_offset);
|
|
return int((data_start + files().piece_length() - 1) / files().piece_length());
|
|
}
|
|
|
|
bool default_storage::verify_resume_data(lazy_entry const& rd, storage_error& ec)
|
|
{
|
|
<div style="background: #ffff00" width="100%"> lazy_entry const* mapped_files = rd.dict_find_list("mapped_files");
|
|
</div> if (mapped_files && mapped_files->list_size() == m_files.num_files())
|
|
{
|
|
m_mapped_files.reset(new file_storage(m_files));
|
|
for (int i = 0; i < m_files.num_files(); ++i)
|
|
{
|
|
std::string new_filename = mapped_files->list_string_value_at(i);
|
|
if (new_filename.empty()) continue;
|
|
m_mapped_files->rename_file(i, new_filename);
|
|
}
|
|
}
|
|
|
|
lazy_entry const* file_priority = rd.dict_find_list("file_priority");
|
|
if (file_priority && file_priority->list_size()
|
|
== files().num_files())
|
|
{
|
|
m_file_priority.resize(file_priority->list_size());
|
|
for (int i = 0; i < file_priority->list_size(); ++i)
|
|
m_file_priority[i] = boost::uint8_t(file_priority->list_int_value_at(i, 1));
|
|
}
|
|
|
|
lazy_entry const* file_sizes_ent = rd.dict_find_list("file sizes");
|
|
if (file_sizes_ent == 0)
|
|
{
|
|
ec.ec = errors::missing_file_sizes;
|
|
return false;
|
|
}
|
|
|
|
if (file_sizes_ent->list_size() == 0)
|
|
{
|
|
ec.ec = errors::no_files_in_resume_data;
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(83)">../src/storage.cpp:1006</a></td><td>if everything moves OK, except for the partfile we currently won't update the save path, which breaks things. it would probably make more sense to give up on the partfile</td></tr><tr id="83" style="display: none;" colspan="3"><td colspan="3"><h2>if everything moves OK, except for the partfile
|
|
we currently won't update the save path, which breaks things.
|
|
it would probably make more sense to give up on the partfile</h2><h4>../src/storage.cpp:1006</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> if (ec)
|
|
{
|
|
ec.file = i->second;
|
|
ec.operation = storage_error::copy;
|
|
}
|
|
else
|
|
{
|
|
// ignore errors when removing
|
|
error_code e;
|
|
remove_all(old_path, e);
|
|
}
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
|
|
if (!ec)
|
|
{
|
|
if (m_part_file)
|
|
{
|
|
<div style="background: #ffff00" width="100%"> m_part_file->move_partfile(save_path, ec.ec);
|
|
</div> if (ec)
|
|
{
|
|
ec.file = -1;
|
|
ec.operation = storage_error::partfile_move;
|
|
return piece_manager::fatal_disk_error;
|
|
}
|
|
}
|
|
|
|
m_save_path = save_path;
|
|
}
|
|
return ret;
|
|
}
|
|
|
|
int default_storage::readv(file::iovec_t const* bufs, int num_bufs
|
|
, int slot, int offset, int flags, storage_error& ec)
|
|
{
|
|
fileop op = { &file::readv
|
|
, file::read_only | flags };
|
|
#ifdef TORRENT_SIMULATE_SLOW_READ
|
|
boost::thread::sleep(boost::get_system_time()
|
|
+ boost::posix_time::milliseconds(1000));
|
|
#endif
|
|
return readwritev(bufs, slot, offset, num_bufs, op, ec);
|
|
}
|
|
|
|
int default_storage::writev(file::iovec_t const* bufs, int num_bufs
|
|
, int slot, int offset, int flags, storage_error& ec)
|
|
{
|
|
fileop op = { &file::writev
|
|
, file::read_write | flags };
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(84)">../src/torrent.cpp:491</a></td><td>if the existing torrent doesn't have metadata, insert the metadata we just downloaded into it.</td></tr><tr id="84" style="display: none;" colspan="3"><td colspan="3"><h2>if the existing torrent doesn't have metadata, insert
|
|
the metadata we just downloaded into it.</h2><h4>../src/torrent.cpp:491</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;">
|
|
m_torrent_file = tf;
|
|
|
|
// now, we might already have this torrent in the session.
|
|
boost::shared_ptr<torrent> t = m_ses.find_torrent(m_torrent_file->info_hash()).lock();
|
|
if (t)
|
|
{
|
|
if (!m_uuid.empty() && t->uuid().empty())
|
|
t->set_uuid(m_uuid);
|
|
if (!m_url.empty() && t->url().empty())
|
|
t->set_url(m_url);
|
|
if (!m_source_feed_url.empty() && t->source_feed_url().empty())
|
|
t->set_source_feed_url(m_source_feed_url);
|
|
|
|
// insert this torrent in the uuid index
|
|
if (!m_uuid.empty() || !m_url.empty())
|
|
{
|
|
m_ses.insert_uuid_torrent(m_uuid.empty() ? m_url : m_uuid, t);
|
|
}
|
|
|
|
<div style="background: #ffff00" width="100%">
|
|
</div> set_error(error_code(errors::duplicate_torrent, get_libtorrent_category()), error_file_url);
|
|
abort();
|
|
return;
|
|
}
|
|
|
|
m_ses.insert_torrent(m_torrent_file->info_hash(), me, m_uuid);
|
|
|
|
TORRENT_ASSERT(num_torrents == int(m_ses.m_torrents.size()));
|
|
|
|
// if the user added any trackers while downloading the
|
|
// .torrent file, serge them into the new tracker list
|
|
std::vector<announce_entry> new_trackers = m_torrent_file->trackers();
|
|
for (std::vector<announce_entry>::iterator i = m_trackers.begin()
|
|
, end(m_trackers.end()); i != end; ++i)
|
|
{
|
|
// if we already have this tracker, ignore it
|
|
if (std::find_if(new_trackers.begin(), new_trackers.end()
|
|
, boost::bind(&announce_entry::url, _1) == i->url) != new_trackers.end())
|
|
continue;
|
|
|
|
// insert the tracker ordered by tier
|
|
new_trackers.insert(std::find_if(new_trackers.begin(), new_trackers.end()
|
|
, boost::bind(&announce_entry::tier, _1) >= i->tier), *i);
|
|
}
|
|
m_trackers.swap(new_trackers);
|
|
|
|
#ifndef TORRENT_DISABLE_ENCRYPTION
|
|
hasher h;
|
|
h.update("req2", 4);
|
|
h.update((char*)&m_torrent_file->info_hash()[0], 20);
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(85)">../src/torrent.cpp:641</a></td><td>if the existing torrent doesn't have metadata, insert the metadata we just downloaded into it.</td></tr><tr id="85" style="display: none;" colspan="3"><td colspan="3"><h2>if the existing torrent doesn't have metadata, insert
|
|
the metadata we just downloaded into it.</h2><h4>../src/torrent.cpp:641</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;">
|
|
m_torrent_file = tf;
|
|
|
|
// now, we might already have this torrent in the session.
|
|
boost::shared_ptr<torrent> t = m_ses.find_torrent(m_torrent_file->info_hash()).lock();
|
|
if (t)
|
|
{
|
|
if (!m_uuid.empty() && t->uuid().empty())
|
|
t->set_uuid(m_uuid);
|
|
if (!m_url.empty() && t->url().empty())
|
|
t->set_url(m_url);
|
|
if (!m_source_feed_url.empty() && t->source_feed_url().empty())
|
|
t->set_source_feed_url(m_source_feed_url);
|
|
|
|
// insert this torrent in the uuid index
|
|
if (!m_uuid.empty() || !m_url.empty())
|
|
{
|
|
m_ses.insert_uuid_torrent(m_uuid.empty() ? m_url : m_uuid, t);
|
|
}
|
|
|
|
<div style="background: #ffff00" width="100%">
|
|
</div> set_error(error_code(errors::duplicate_torrent, get_libtorrent_category()), error_file_url);
|
|
abort();
|
|
return;
|
|
}
|
|
|
|
m_ses.insert_torrent(m_torrent_file->info_hash(), me, m_uuid);
|
|
|
|
// if the user added any trackers while downloading the
|
|
// .torrent file, merge them into the new tracker list
|
|
std::vector<announce_entry> new_trackers = m_torrent_file->trackers();
|
|
for (std::vector<announce_entry>::iterator i = m_trackers.begin()
|
|
, end(m_trackers.end()); i != end; ++i)
|
|
{
|
|
// if we already have this tracker, ignore it
|
|
if (std::find_if(new_trackers.begin(), new_trackers.end()
|
|
, boost::bind(&announce_entry::url, _1) == i->url) != new_trackers.end())
|
|
continue;
|
|
|
|
// insert the tracker ordered by tier
|
|
new_trackers.insert(std::find_if(new_trackers.begin(), new_trackers.end()
|
|
, boost::bind(&announce_entry::tier, _1) >= i->tier), *i);
|
|
}
|
|
m_trackers.swap(new_trackers);
|
|
|
|
#ifndef TORRENT_DISABLE_ENCRYPTION
|
|
hasher h;
|
|
h.update("req2", 4);
|
|
h.update((char*)&m_torrent_file->info_hash()[0], 20);
|
|
m_ses.add_obfuscated_hash(h.final(), shared_from_this());
|
|
#endif
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(86)">../src/torrent.cpp:1446</a></td><td>is verify_peer_cert called once per certificate in the chain, and this function just tells us which depth we're at right now? If so, the comment makes sense. any certificate that isn't the leaf (i.e. the one presented by the peer) should be accepted automatically, given preverified is true. The leaf certificate need to be verified to make sure its DN matches the info-hash</td></tr><tr id="86" style="display: none;" colspan="3"><td colspan="3"><h2>is verify_peer_cert called once per certificate in the chain, and
|
|
this function just tells us which depth we're at right now? If so, the comment
|
|
makes sense.
|
|
any certificate that isn't the leaf (i.e. the one presented by the peer)
|
|
should be accepted automatically, given preverified is true. The leaf certificate
|
|
need to be verified to make sure its DN matches the info-hash</h2><h4>../src/torrent.cpp:1446</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> if (pp) p->add_extension(pp);
|
|
}
|
|
|
|
// if files are checked for this torrent, call the extension
|
|
// to let it initialize itself
|
|
if (m_connections_initialized)
|
|
tp->on_files_checked();
|
|
}
|
|
|
|
#endif
|
|
|
|
#ifdef TORRENT_USE_OPENSSL
|
|
|
|
#if BOOST_VERSION >= 104700
|
|
bool torrent::verify_peer_cert(bool preverified, boost::asio::ssl::verify_context& ctx)
|
|
{
|
|
// if the cert wasn't signed by the correct CA, fail the verification
|
|
if (!preverified) return false;
|
|
|
|
// we're only interested in checking the certificate at the end of the chain.
|
|
<div style="background: #ffff00" width="100%"> int depth = X509_STORE_CTX_get_error_depth(ctx.native_handle());
|
|
</div> if (depth > 0) return true;
|
|
|
|
X509* cert = X509_STORE_CTX_get_current_cert(ctx.native_handle());
|
|
|
|
// Go through the alternate names in the certificate looking for matching DNS entries
|
|
GENERAL_NAMES* gens = static_cast<GENERAL_NAMES*>(
|
|
X509_get_ext_d2i(cert, NID_subject_alt_name, 0, 0));
|
|
|
|
#if defined(TORRENT_VERBOSE_LOGGING) || defined(TORRENT_LOGGING)
|
|
std::string names;
|
|
bool match = false;
|
|
#endif
|
|
for (int i = 0; i < sk_GENERAL_NAME_num(gens); ++i)
|
|
{
|
|
GENERAL_NAME* gen = sk_GENERAL_NAME_value(gens, i);
|
|
if (gen->type != GEN_DNS) continue;
|
|
ASN1_IA5STRING* domain = gen->d.dNSName;
|
|
if (domain->type != V_ASN1_IA5STRING || !domain->data || !domain->length) continue;
|
|
const char* torrent_name = reinterpret_cast<const char*>(domain->data);
|
|
std::size_t name_length = domain->length;
|
|
|
|
#if defined(TORRENT_VERBOSE_LOGGING) || defined(TORRENT_LOGGING)
|
|
if (i > 1) names += " | n: ";
|
|
names.append(torrent_name, name_length);
|
|
#endif
|
|
if (strncmp(torrent_name, "*", name_length) == 0
|
|
|| strncmp(torrent_name, m_torrent_file->name().c_str(), name_length) == 0)
|
|
{
|
|
#if defined(TORRENT_VERBOSE_LOGGING) || defined(TORRENT_LOGGING)
|
|
match = true;
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(87)">../src/torrent.cpp:1838</a></td><td>instead of creating the picker up front here, maybe this whole section should move to need_picker()</td></tr><tr id="87" style="display: none;" colspan="3"><td colspan="3"><h2>instead of creating the picker up front here,
|
|
maybe this whole section should move to need_picker()</h2><h4>../src/torrent.cpp:1838</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> else
|
|
{
|
|
read_resume_data(m_resume_data->entry);
|
|
}
|
|
}
|
|
|
|
#if TORRENT_USE_ASSERTS
|
|
m_resume_data_loaded = true;
|
|
#endif
|
|
|
|
int num_pad_files = 0;
|
|
TORRENT_ASSERT(block_size() > 0);
|
|
file_storage const& fs = m_torrent_file->files();
|
|
for (int i = 0; i < fs.num_files(); ++i)
|
|
{
|
|
if (fs.pad_file_at(i)) ++num_pad_files;
|
|
|
|
if (!fs.pad_file_at(i) || fs.file_size(i) == 0) continue;
|
|
m_padding += boost::uint32_t(fs.file_size(i));
|
|
|
|
<div style="background: #ffff00" width="100%"> need_picker();
|
|
</div>
|
|
peer_request pr = m_torrent_file->map_file(i, 0, fs.file_size(i));
|
|
int off = pr.start & (block_size()-1);
|
|
if (off != 0) { pr.length -= block_size() - off; pr.start += block_size() - off; }
|
|
TORRENT_ASSERT((pr.start & (block_size()-1)) == 0);
|
|
|
|
int block = block_size();
|
|
int blocks_per_piece = m_torrent_file->piece_length() / block;
|
|
piece_block pb(pr.piece, pr.start / block);
|
|
for (; pr.length >= block; pr.length -= block, ++pb.block_index)
|
|
{
|
|
if (int(pb.block_index) == blocks_per_piece) { pb.block_index = 0; ++pb.piece_index; }
|
|
m_picker->mark_as_finished(pb, 0);
|
|
}
|
|
// ugly edge case where padfiles are not used they way they're
|
|
// supposed to be. i.e. added back-to back or at the end
|
|
if (int(pb.block_index) == blocks_per_piece) { pb.block_index = 0; ++pb.piece_index; }
|
|
if (pr.length > 0 && ((i+1 != fs.num_files() && fs.pad_file_at(i+1))
|
|
|| i + 1 == fs.num_files()))
|
|
{
|
|
m_picker->mark_as_finished(pb, 0);
|
|
}
|
|
}
|
|
|
|
if (m_padding > 0)
|
|
{
|
|
// if we marked an entire piece as finished, we actually
|
|
// need to consider it finished
|
|
|
|
std::vector<piece_picker::downloading_piece> dq
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(88)">../src/torrent.cpp:2034</a></td><td>there may be peer extensions relying on the torrent extension still being alive. Only do this if there are no peers. And when the last peer is disconnected, if the torrent is unloaded, clear the extensions m_extensions.clear();</td></tr><tr id="88" style="display: none;" colspan="3"><td colspan="3"><h2>there may be peer extensions relying on the torrent extension
|
|
still being alive. Only do this if there are no peers. And when the last peer
|
|
is disconnected, if the torrent is unloaded, clear the extensions
|
|
m_extensions.clear();</h2><h4>../src/torrent.cpp:2034</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> // pinned torrents are not allowed to be swapped out
|
|
TORRENT_ASSERT(!m_pinned);
|
|
|
|
m_should_be_loaded = false;
|
|
|
|
// make sure it's not unloaded in the middle of some operation that uses it
|
|
if (m_refcount > 0) return;
|
|
|
|
// call on_unload() on extensions
|
|
#ifndef TORRENT_DISABLE_EXTENSIONS
|
|
for (extension_list_t::iterator i = m_extensions.begin()
|
|
, end(m_extensions.end()); i != end; ++i)
|
|
{
|
|
TORRENT_TRY {
|
|
(*i)->on_unload();
|
|
} TORRENT_CATCH (std::exception&) {}
|
|
}
|
|
|
|
// also remove extensions and re-instantiate them when the torrent is loaded again
|
|
// they end up using a significant amount of memory
|
|
<div style="background: #ffff00" width="100%">#endif
|
|
</div>
|
|
// someone else holds a reference to the torrent_info
|
|
// make the torrent release its reference to it,
|
|
// after making a copy and then unloading that version
|
|
// as soon as the user is done with its copy of torrent_info
|
|
// it will be freed, and we'll have the unloaded version left
|
|
if (!m_torrent_file.unique())
|
|
m_torrent_file = boost::make_shared<torrent_info>(*m_torrent_file);
|
|
|
|
m_torrent_file->unload();
|
|
inc_stats_counter(counters::num_loaded_torrents, -1);
|
|
|
|
m_storage.reset();
|
|
|
|
state_updated();
|
|
}
|
|
|
|
bt_peer_connection* torrent::find_introducer(tcp::endpoint const& ep) const
|
|
{
|
|
#ifndef TORRENT_DISABLE_EXTENSIONS
|
|
for (const_peer_iterator i = m_connections.begin(); i != m_connections.end(); ++i)
|
|
{
|
|
if ((*i)->type() != peer_connection::bittorrent_connection) continue;
|
|
bt_peer_connection* p = (bt_peer_connection*)(*i);
|
|
if (!p->supports_holepunch()) continue;
|
|
peer_plugin const* pp = p->find_plugin("ut_pex");
|
|
if (!pp) continue;
|
|
if (was_introduced_by(pp, ep)) return (bt_peer_connection*)p;
|
|
}
|
|
#endif
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(89)">../src/torrent.cpp:2707</a></td><td>this pattern is repeated in a few places. Factor this into a function and generalize the concept of a torrent having a dedicated listen port</td></tr><tr id="89" style="display: none;" colspan="3"><td colspan="3"><h2>this pattern is repeated in a few places. Factor this into
|
|
a function and generalize the concept of a torrent having a
|
|
dedicated listen port</h2><h4>../src/torrent.cpp:2707</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> // if the files haven't been checked yet, we're
|
|
// not ready for peers. Except, if we don't have metadata,
|
|
// we need peers to download from
|
|
if (!m_files_checked && valid_metadata()) return;
|
|
|
|
if (!m_announce_to_lsd) return;
|
|
|
|
// private torrents are never announced on LSD
|
|
if (m_torrent_file->is_valid() && m_torrent_file->priv()) return;
|
|
|
|
// i2p torrents are also never announced on LSD
|
|
// unless we allow mixed swarms
|
|
if (m_torrent_file->is_valid()
|
|
&& (torrent_file().is_i2p() && !settings().get_bool(settings_pack::allow_i2p_mixed)))
|
|
return;
|
|
|
|
if (is_paused()) return;
|
|
|
|
if (!m_ses.has_lsd()) return;
|
|
|
|
<div style="background: #ffff00" width="100%">#ifdef TORRENT_USE_OPENSSL
|
|
</div> int port = is_ssl_torrent() ? m_ses.ssl_listen_port() : m_ses.listen_port();
|
|
#else
|
|
int port = m_ses.listen_port();
|
|
#endif
|
|
|
|
// announce with the local discovery service
|
|
m_ses.announce_lsd(m_torrent_file->info_hash(), port
|
|
, m_ses.settings().get_bool(settings_pack::broadcast_lsd) && m_lsd_seq == 0);
|
|
++m_lsd_seq;
|
|
}
|
|
|
|
#ifndef TORRENT_DISABLE_DHT
|
|
|
|
void torrent::dht_announce()
|
|
{
|
|
TORRENT_ASSERT(m_ses.is_single_thread());
|
|
if (!m_ses.dht())
|
|
{
|
|
#if defined TORRENT_VERBOSE_LOGGING || defined TORRENT_LOGGING || defined TORRENT_ERROR_LOGGING
|
|
debug_log("DHT: no dht initialized");
|
|
#endif
|
|
return;
|
|
}
|
|
if (!should_announce_dht())
|
|
{
|
|
#if defined TORRENT_VERBOSE_LOGGING || defined TORRENT_LOGGING || defined TORRENT_ERROR_LOGGING
|
|
if (!m_ses.announce_dht())
|
|
debug_log("DHT: no listen sockets");
|
|
|
|
if (m_torrent_file->is_valid() && !m_files_checked)
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(90)">../src/torrent.cpp:3238</a></td><td>instead, borrow host resolvers from a pool in session_impl. That would make the torrent object smaller</td></tr><tr id="90" style="display: none;" colspan="3"><td colspan="3"><h2>instead, borrow host resolvers from a pool in session_impl. That
|
|
would make the torrent object smaller</h2><h4>../src/torrent.cpp:3238</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> , boost::bind(&torrent::on_i2p_resolve
|
|
, shared_from_this(), _1));
|
|
*/
|
|
// it seems like you're not supposed to do a name lookup
|
|
// on the peers returned from the tracker, but just strip
|
|
// the .i2p and use it as a destination
|
|
i->ip.resize(i->ip.size() - 4);
|
|
torrent_state st = get_policy_state();
|
|
need_policy();
|
|
if (m_policy->add_i2p_peer(i->ip.c_str(), peer_info::tracker, 0, &st))
|
|
state_updated();
|
|
peers_erased(st.erased);
|
|
}
|
|
else
|
|
#endif
|
|
{
|
|
#if defined TORRENT_ASIO_DEBUGGING
|
|
add_outstanding_async("torrent::on_peer_name_lookup");
|
|
#endif
|
|
tcp::resolver::query q(i->ip, to_string(i->port).elems);
|
|
<div style="background: #ffff00" width="100%"> m_ses.get_resolver().async_resolve(i->ip, 0
|
|
</div> , boost::bind(&torrent::on_peer_name_lookup
|
|
, shared_from_this(), _1, _2, i->port));
|
|
}
|
|
}
|
|
else
|
|
{
|
|
// ignore local addresses from the tracker (unless the tracker is local too)
|
|
// there are 2 reasons to allow this:
|
|
// 1. retrackers are popular in russia, where an ISP runs a tracker within
|
|
// the AS (but not on the local network) giving out peers only from the
|
|
// local network
|
|
// 2. it might make sense to have a tracker extension in the future where
|
|
// trackers records a peer's internal and external IP, and match up
|
|
// peers on the same local network
|
|
// if (is_local(a.address()) && !is_local(tracker_ip)) continue;
|
|
if (add_peer(a, peer_info::tracker))
|
|
state_updated();
|
|
}
|
|
}
|
|
update_want_peers();
|
|
|
|
if (m_ses.alerts().should_post<tracker_reply_alert>())
|
|
{
|
|
m_ses.alerts().post_alert(tracker_reply_alert(
|
|
get_handle(), peer_list.size(), r.url));
|
|
}
|
|
m_got_tracker_response = true;
|
|
|
|
// we're listening on an interface type that was not used
|
|
// when talking to the tracker. If there is a matching interface
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(91)">../src/torrent.cpp:4429</a></td><td>update suggest_piece?</td></tr><tr id="91" style="display: none;" colspan="3"><td colspan="3"><h2>update suggest_piece?</h2><h4>../src/torrent.cpp:4429</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;">
|
|
void torrent::peer_has_all(peer_connection const* peer)
|
|
{
|
|
if (has_picker())
|
|
{
|
|
m_picker->inc_refcount_all(peer);
|
|
}
|
|
#ifdef TORRENT_DEBUG
|
|
else
|
|
{
|
|
TORRENT_ASSERT(is_seed() || !m_have_all);
|
|
}
|
|
#endif
|
|
}
|
|
|
|
void torrent::peer_lost(bitfield const& bits, peer_connection const* peer)
|
|
{
|
|
if (has_picker())
|
|
{
|
|
m_picker->dec_refcount(bits, peer);
|
|
<div style="background: #ffff00" width="100%"> }
|
|
</div>#ifdef TORRENT_DEBUG
|
|
else
|
|
{
|
|
TORRENT_ASSERT(is_seed() || !m_have_all);
|
|
}
|
|
#endif
|
|
}
|
|
|
|
void torrent::peer_lost(int index, peer_connection const* peer)
|
|
{
|
|
if (m_picker.get())
|
|
{
|
|
m_picker->dec_refcount(index, peer);
|
|
update_suggest_piece(index, -1);
|
|
}
|
|
#ifdef TORRENT_DEBUG
|
|
else
|
|
{
|
|
TORRENT_ASSERT(is_seed() || !m_have_all);
|
|
}
|
|
#endif
|
|
}
|
|
|
|
void torrent::add_suggest_piece(int index)
|
|
{
|
|
// it would be nice if we would keep track of piece
|
|
// availability even when we're a seed, for
|
|
// the suggest piece feature
|
|
if (!has_picker()) return;
|
|
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(92)">../src/torrent.cpp:4572</a></td><td>really, we should just keep the picker around in this case to maintain the availability counters</td></tr><tr id="92" style="display: none;" colspan="3"><td colspan="3"><h2>really, we should just keep the picker around
|
|
in this case to maintain the availability counters</h2><h4>../src/torrent.cpp:4572</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> pieces.reserve(cs.pieces.size());
|
|
|
|
// sort in ascending order, to get most recently used first
|
|
std::sort(cs.pieces.begin(), cs.pieces.end()
|
|
, boost::bind(&cached_piece_info::last_use, _1)
|
|
> boost::bind(&cached_piece_info::last_use, _2));
|
|
|
|
for (std::vector<cached_piece_info>::iterator i = cs.pieces.begin()
|
|
, end(cs.pieces.end()); i != end; ++i)
|
|
{
|
|
TORRENT_ASSERT(i->storage == m_storage.get());
|
|
if (!has_piece_passed(i->piece)) continue;
|
|
suggest_piece_t p;
|
|
p.piece_index = i->piece;
|
|
if (has_picker())
|
|
{
|
|
p.num_peers = m_picker->get_availability(i->piece);
|
|
}
|
|
else
|
|
{
|
|
<div style="background: #ffff00" width="100%"> p.num_peers = 0;
|
|
</div> for (const_peer_iterator i = m_connections.begin()
|
|
, end(m_connections.end()); i != end; ++i)
|
|
{
|
|
peer_connection* peer = *i;
|
|
if (peer->has_piece(p.piece_index)) ++p.num_peers;
|
|
}
|
|
}
|
|
pieces.push_back(p);
|
|
}
|
|
|
|
// sort by rarity (stable, to maintain sort
|
|
// by last use)
|
|
std::stable_sort(pieces.begin(), pieces.end());
|
|
|
|
// only suggest half of the pieces
|
|
pieces.resize(pieces.size() / 2);
|
|
|
|
// send new suggests to peers
|
|
// the peers will filter out pieces we've
|
|
// already suggested to them
|
|
for (std::vector<suggest_piece_t>::iterator i = pieces.begin()
|
|
, end(pieces.end()); i != end; ++i)
|
|
{
|
|
for (peer_iterator p = m_connections.begin();
|
|
p != m_connections.end(); ++p)
|
|
(*p)->send_suggest(i->piece_index);
|
|
}
|
|
}
|
|
|
|
void torrent::abort()
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(93)">../src/torrent.cpp:6478</a></td><td>make this more generic to not just work if files have been renamed, but also if they have been merged into a single file for instance maybe use the same format as .torrent files and reuse some code from torrent_info The mapped_files needs to be read both in the network thread and in the disk thread, since they both have their own mapped files structures which are kept in sync</td></tr><tr id="93" style="display: none;" colspan="3"><td colspan="3"><h2>make this more generic to not just work if files have been
|
|
renamed, but also if they have been merged into a single file for instance
|
|
maybe use the same format as .torrent files and reuse some code from torrent_info
|
|
The mapped_files needs to be read both in the network thread
|
|
and in the disk thread, since they both have their own mapped files structures
|
|
which are kept in sync</h2><h4>../src/torrent.cpp:6478</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> super_seeding(rd.dict_find_int_value("super_seeding", 0));
|
|
|
|
if (!m_use_resume_save_path)
|
|
{
|
|
std::string p = rd.dict_find_string_value("save_path");
|
|
if (!p.empty()) m_save_path = p;
|
|
}
|
|
|
|
m_url = rd.dict_find_string_value("url");
|
|
m_uuid = rd.dict_find_string_value("uuid");
|
|
m_source_feed_url = rd.dict_find_string_value("feed");
|
|
|
|
if (!m_uuid.empty() || !m_url.empty())
|
|
{
|
|
boost::shared_ptr<torrent> me(shared_from_this());
|
|
|
|
// insert this torrent in the uuid index
|
|
m_ses.insert_uuid_torrent(m_uuid.empty() ? m_url : m_uuid, me);
|
|
}
|
|
|
|
<div style="background: #ffff00" width="100%"> lazy_entry const* mapped_files = rd.dict_find_list("mapped_files");
|
|
</div> if (mapped_files && mapped_files->list_size() == m_torrent_file->num_files())
|
|
{
|
|
for (int i = 0; i < m_torrent_file->num_files(); ++i)
|
|
{
|
|
std::string new_filename = mapped_files->list_string_value_at(i);
|
|
if (new_filename.empty()) continue;
|
|
m_torrent_file->rename_file(i, new_filename);
|
|
}
|
|
}
|
|
|
|
m_added_time = rd.dict_find_int_value("added_time", m_added_time);
|
|
m_completed_time = rd.dict_find_int_value("completed_time", m_completed_time);
|
|
if (m_completed_time != 0 && m_completed_time < m_added_time)
|
|
m_completed_time = m_added_time;
|
|
|
|
lazy_entry const* file_priority = rd.dict_find_list("file_priority");
|
|
if (file_priority && file_priority->list_size()
|
|
== m_torrent_file->num_files())
|
|
{
|
|
int num_files = m_torrent_file->num_files();
|
|
m_file_priority.resize(num_files);
|
|
for (int i = 0; i < num_files; ++i)
|
|
m_file_priority[i] = file_priority->list_int_value_at(i, 1);
|
|
// unallocated slots are assumed to be priority 1, so cut off any
|
|
// trailing ones
|
|
int end_range = num_files - 1;
|
|
for (; end_range >= 0; --end_range) if (m_file_priority[end_range] != 1) break;
|
|
m_file_priority.resize(end_range + 1);
|
|
|
|
// initialize pad files to priority 0
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(94)">../src/torrent.cpp:6641</a></td><td>if this is a merkle torrent and we can't restore the tree, we need to wipe all the bits in the have array, but not necessarily we might want to do a full check to see if we have all the pieces. This is low priority since almost no one uses merkle torrents</td></tr><tr id="94" style="display: none;" colspan="3"><td colspan="3"><h2>if this is a merkle torrent and we can't
|
|
restore the tree, we need to wipe all the
|
|
bits in the have array, but not necessarily
|
|
we might want to do a full check to see if we have
|
|
all the pieces. This is low priority since almost
|
|
no one uses merkle torrents</h2><h4>../src/torrent.cpp:6641</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> add_web_seed(url, web_seed_entry::http_seed);
|
|
}
|
|
}
|
|
|
|
if (m_torrent_file->is_merkle_torrent())
|
|
{
|
|
lazy_entry const* mt = rd.dict_find_string("merkle tree");
|
|
if (mt)
|
|
{
|
|
std::vector<sha1_hash> tree;
|
|
tree.resize(m_torrent_file->merkle_tree().size());
|
|
std::memcpy(&tree[0], mt->string_ptr()
|
|
, (std::min)(mt->string_length(), int(tree.size()) * 20));
|
|
if (mt->string_length() < int(tree.size()) * 20)
|
|
std::memset(&tree[0] + mt->string_length() / 20, 0
|
|
, tree.size() - mt->string_length() / 20);
|
|
m_torrent_file->set_merkle_tree(tree);
|
|
}
|
|
else
|
|
{
|
|
<div style="background: #ffff00" width="100%"> TORRENT_ASSERT(false);
|
|
</div> }
|
|
}
|
|
|
|
// updating some of the torrent state may have set need_save_resume_data.
|
|
// clear it here since we've just restored the resume data we already
|
|
// have. Nothing has changed from that state yet.
|
|
m_need_save_resume_data = false;
|
|
}
|
|
|
|
boost::shared_ptr<const torrent_info> torrent::get_torrent_copy()
|
|
{
|
|
if (!m_torrent_file->is_valid()) return boost::shared_ptr<const torrent_info>();
|
|
if (!need_loaded()) return boost::shared_ptr<const torrent_info>();
|
|
|
|
return m_torrent_file;
|
|
}
|
|
|
|
void torrent::write_resume_data(entry& ret) const
|
|
{
|
|
using namespace libtorrent::detail; // for write_*_endpoint()
|
|
ret["file-format"] = "libtorrent resume file";
|
|
ret["file-version"] = 1;
|
|
ret["libtorrent-version"] = LIBTORRENT_VERSION;
|
|
|
|
ret["total_uploaded"] = m_total_uploaded;
|
|
ret["total_downloaded"] = m_total_downloaded;
|
|
|
|
ret["active_time"] = active_time();
|
|
ret["finished_time"] = finished_time();
|
|
ret["seeding_time"] = seeding_time();
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(95)">../src/torrent.cpp:6831</a></td><td>make this more generic to not just work if files have been renamed, but also if they have been merged into a single file for instance. using file_base</td></tr><tr id="95" style="display: none;" colspan="3"><td colspan="3"><h2>make this more generic to not just work if files have been
|
|
renamed, but also if they have been merged into a single file for instance.
|
|
using file_base</h2><h4>../src/torrent.cpp:6831</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> pieces.resize(m_torrent_file->num_pieces());
|
|
if (!has_picker())
|
|
{
|
|
std::memset(&pieces[0], m_have_all, pieces.size());
|
|
}
|
|
else
|
|
{
|
|
for (int i = 0, end(pieces.size()); i < end; ++i)
|
|
pieces[i] = m_picker->have_piece(i) ? 1 : 0;
|
|
}
|
|
|
|
if (m_seed_mode)
|
|
{
|
|
TORRENT_ASSERT(m_verified.size() == pieces.size());
|
|
TORRENT_ASSERT(m_verifying.size() == pieces.size());
|
|
for (int i = 0, end(pieces.size()); i < end; ++i)
|
|
pieces[i] |= m_verified[i] ? 2 : 0;
|
|
}
|
|
|
|
// write renamed files
|
|
<div style="background: #ffff00" width="100%"> if (&m_torrent_file->files() != &m_torrent_file->orig_files()
|
|
</div> && m_torrent_file->files().num_files() == m_torrent_file->orig_files().num_files())
|
|
{
|
|
entry::list_type& fl = ret["mapped_files"].list();
|
|
file_storage const& fs = m_torrent_file->files();
|
|
for (int i = 0; i < fs.num_files(); ++i)
|
|
{
|
|
fl.push_back(fs.file_path(i));
|
|
}
|
|
}
|
|
|
|
// write local peers
|
|
|
|
std::back_insert_iterator<entry::string_type> peers(ret["peers"].string());
|
|
std::back_insert_iterator<entry::string_type> banned_peers(ret["banned_peers"].string());
|
|
#if TORRENT_USE_IPV6
|
|
std::back_insert_iterator<entry::string_type> peers6(ret["peers6"].string());
|
|
std::back_insert_iterator<entry::string_type> banned_peers6(ret["banned_peers6"].string());
|
|
#endif
|
|
|
|
// failcount is a 5 bit value
|
|
int max_failcount = (std::min)(settings().get_int(settings_pack::max_failcount), 31);
|
|
|
|
int num_saved_peers = 0;
|
|
|
|
if (m_policy)
|
|
{
|
|
for (policy::const_iterator i = m_policy->begin_peer()
|
|
, end(m_policy->end_peer()); i != end; ++i)
|
|
{
|
|
error_code ec;
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(96)">../src/torrent.cpp:8790</a></td><td>add a flag to ignore stats, and only care about resume data for content. For unchanged files, don't trigger a load of the metadata just to save an empty resume data file</td></tr><tr id="96" style="display: none;" colspan="3"><td colspan="3"><h2>add a flag to ignore stats, and only care about resume data for
|
|
content. For unchanged files, don't trigger a load of the metadata
|
|
just to save an empty resume data file</h2><h4>../src/torrent.cpp:8790</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> if (m_complete != 0xffffff) seeds = m_complete;
|
|
else seeds = m_policy ? m_policy->num_seeds() : 0;
|
|
|
|
if (m_incomplete != 0xffffff) downloaders = m_incomplete;
|
|
else downloaders = m_policy ? m_policy->num_peers() - m_policy->num_seeds() : 0;
|
|
|
|
if (seeds == 0)
|
|
{
|
|
ret |= no_seeds;
|
|
ret |= downloaders & prio_mask;
|
|
}
|
|
else
|
|
{
|
|
ret |= ((1 + downloaders) * scale / seeds) & prio_mask;
|
|
}
|
|
|
|
return ret;
|
|
}
|
|
|
|
// this is an async operation triggered by the client
|
|
<div style="background: #ffff00" width="100%"> void torrent::save_resume_data(int flags)
|
|
</div> {
|
|
TORRENT_ASSERT(m_ses.is_single_thread());
|
|
INVARIANT_CHECK;
|
|
|
|
if (!valid_metadata())
|
|
{
|
|
alerts().post_alert(save_resume_data_failed_alert(get_handle()
|
|
, errors::no_metadata));
|
|
return;
|
|
}
|
|
|
|
if (!m_storage.get())
|
|
{
|
|
alerts().post_alert(save_resume_data_failed_alert(get_handle()
|
|
, errors::destructing_torrent));
|
|
return;
|
|
}
|
|
|
|
if ((flags & torrent_handle::only_if_modified) && !m_need_save_resume_data)
|
|
{
|
|
alerts().post_alert(save_resume_data_failed_alert(get_handle()
|
|
, errors::resume_data_not_modified));
|
|
return;
|
|
}
|
|
|
|
m_need_save_resume_data = false;
|
|
m_last_saved_resume = m_ses.session_time();
|
|
m_save_resume_flags = boost::uint8_t(flags);
|
|
state_updated();
|
|
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(97)">../src/torrent.cpp:9758</a></td><td>go through the pieces we have and count the total number of downloaders we have. Only count peers that are interested in us since some peers might not send have messages for pieces we have it num_interested == 0, we need to pick a new piece</td></tr><tr id="97" style="display: none;" colspan="3"><td colspan="3"><h2>go through the pieces we have and count the total number
|
|
of downloaders we have. Only count peers that are interested in us
|
|
since some peers might not send have messages for pieces we have
|
|
it num_interested == 0, we need to pick a new piece</h2><h4>../src/torrent.cpp:9758</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> }
|
|
|
|
rarest_pieces.clear();
|
|
rarest_rarity = pp.peer_count;
|
|
rarest_pieces.push_back(i);
|
|
}
|
|
|
|
update_gauge();
|
|
update_want_peers();
|
|
|
|
// now, rarest_pieces is a list of all pieces that are the rarest ones.
|
|
// and rarest_rarity is the number of peers that have the rarest pieces
|
|
|
|
// if there's only a single peer that doesn't have the rarest piece
|
|
// it's impossible for us to download one piece and upload it
|
|
// twice. i.e. we cannot get a positive share ratio
|
|
if (num_peers - rarest_rarity < settings().get_int(settings_pack::share_mode_target)) return;
|
|
|
|
// we might be able to do better than a share ratio of 2 if there are
|
|
// enough downloaders of the pieces we already have.
|
|
<div style="background: #ffff00" width="100%">
|
|
</div> // now, pick one of the rarest pieces to download
|
|
int pick = random() % rarest_pieces.size();
|
|
bool was_finished = is_finished();
|
|
m_picker->set_piece_priority(rarest_pieces[pick], 1);
|
|
update_gauge();
|
|
update_peer_interest(was_finished);
|
|
|
|
update_want_peers();
|
|
}
|
|
|
|
void torrent::refresh_explicit_cache(int cache_size)
|
|
{
|
|
TORRENT_ASSERT(m_ses.is_single_thread());
|
|
if (!ready_for_connections()) return;
|
|
|
|
if (m_abort) return;
|
|
TORRENT_ASSERT(m_storage);
|
|
|
|
if (!is_loaded()) return;
|
|
|
|
// rotate the cached pieces
|
|
cache_status status;
|
|
m_ses.disk_thread().get_cache_info(&status, false, m_storage.get());
|
|
|
|
// add blocks_per_piece / 2 in order to round to closest whole piece
|
|
int blocks_per_piece = m_torrent_file->piece_length() / block_size();
|
|
int num_cache_pieces = (cache_size + blocks_per_piece / 2) / blocks_per_piece;
|
|
if (num_cache_pieces > m_torrent_file->num_pieces())
|
|
num_cache_pieces = m_torrent_file->num_pieces();
|
|
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(98)">../src/torrent.cpp:10404</a></td><td>instead of resorting the whole list, insert the peers directly into the right place</td></tr><tr id="98" style="display: none;" colspan="3"><td colspan="3"><h2>instead of resorting the whole list, insert the peers
|
|
directly into the right place</h2><h4>../src/torrent.cpp:10404</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> printf("timed out [average-piece-time: %d ms ]\n"
|
|
, m_average_piece_time);
|
|
#endif
|
|
}
|
|
|
|
// pick all blocks for this piece. the peers list is kept up to date
|
|
// and sorted. when we issue a request to a peer, its download queue
|
|
// time will increase and it may need to be bumped in the peers list,
|
|
// since it's ordered by download queue time
|
|
pick_time_critical_block(peers, ignore_peers
|
|
, peers_with_requests
|
|
, pi, &*i, m_picker.get()
|
|
, blocks_in_piece, timed_out);
|
|
|
|
// put back the peers we ignored into the peer list for the next piece
|
|
if (!ignore_peers.empty())
|
|
{
|
|
peers.insert(peers.begin(), ignore_peers.begin(), ignore_peers.end());
|
|
ignore_peers.clear();
|
|
|
|
<div style="background: #ffff00" width="100%"> std::sort(peers.begin(), peers.end()
|
|
</div> , boost::bind(&peer_connection::download_queue_time, _1, 16*1024)
|
|
< boost::bind(&peer_connection::download_queue_time, _2, 16*1024));
|
|
}
|
|
|
|
// if this peer's download time exceeds 2 seconds, we're done.
|
|
// We don't want to build unreasonably long request queues
|
|
if (!peers.empty() && peers[0]->download_queue_time() > milliseconds(2000))
|
|
break;
|
|
}
|
|
|
|
// commit all the time critical requests
|
|
for (std::set<peer_connection*>::iterator i = peers_with_requests.begin()
|
|
, end(peers_with_requests.end()); i != end; ++i)
|
|
{
|
|
(*i)->send_block_requests();
|
|
}
|
|
}
|
|
|
|
std::set<std::string> torrent::web_seeds(web_seed_entry::type_t type) const
|
|
{
|
|
TORRENT_ASSERT(m_ses.is_single_thread());
|
|
std::set<std::string> ret;
|
|
for (std::list<web_seed_entry>::const_iterator i = m_web_seeds.begin()
|
|
, end(m_web_seeds.end()); i != end; ++i)
|
|
{
|
|
if (i->peer_info.banned) continue;
|
|
if (i->type != type) continue;
|
|
ret.insert(i->url);
|
|
}
|
|
return ret;
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(99)">../src/torrent_peer.cpp:179</a></td><td>how do we deal with our external address changing?</td></tr><tr id="99" style="display: none;" colspan="3"><td colspan="3"><h2>how do we deal with our external address changing?</h2><h4>../src/torrent_peer.cpp:179</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> , is_v6_addr(false)
|
|
#endif
|
|
#if TORRENT_USE_I2P
|
|
, is_i2p_addr(false)
|
|
#endif
|
|
, on_parole(false)
|
|
, banned(false)
|
|
, supports_utp(true) // assume peers support utp
|
|
, confirmed_supports_utp(false)
|
|
, supports_holepunch(false)
|
|
, web_seed(false)
|
|
#if TORRENT_USE_ASSERTS
|
|
, in_use(false)
|
|
#endif
|
|
{
|
|
TORRENT_ASSERT((src & 0xff) == src);
|
|
}
|
|
|
|
boost::uint32_t torrent_peer::rank(external_ip const& external, int external_port) const
|
|
{
|
|
<div style="background: #ffff00" width="100%"> if (peer_rank == 0)
|
|
</div> peer_rank = peer_priority(
|
|
tcp::endpoint(external.external_address(this->address()), external_port)
|
|
, tcp::endpoint(this->address(), this->port));
|
|
return peer_rank;
|
|
}
|
|
|
|
boost::uint64_t torrent_peer::total_download() const
|
|
{
|
|
if (connection != 0)
|
|
{
|
|
TORRENT_ASSERT(prev_amount_download == 0);
|
|
return connection->statistics().total_payload_download();
|
|
}
|
|
else
|
|
{
|
|
return boost::uint64_t(prev_amount_download) << 10;
|
|
}
|
|
}
|
|
|
|
boost::uint64_t torrent_peer::total_upload() const
|
|
{
|
|
if (connection != 0)
|
|
{
|
|
TORRENT_ASSERT(prev_amount_upload == 0);
|
|
return connection->statistics().total_payload_upload();
|
|
}
|
|
else
|
|
{
|
|
return boost::uint64_t(prev_amount_upload) << 10;
|
|
}
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(100)">../src/udp_socket.cpp:290</a></td><td>it would be nice to detect this on posix systems also</td></tr><tr id="100" style="display: none;" colspan="3"><td colspan="3"><h2>it would be nice to detect this on posix systems also</h2><h4>../src/udp_socket.cpp:290</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> --m_v6_outstanding;
|
|
}
|
|
else
|
|
#endif
|
|
{
|
|
TORRENT_ASSERT(m_v4_outstanding > 0);
|
|
--m_v4_outstanding;
|
|
}
|
|
|
|
if (ec == asio::error::operation_aborted) return;
|
|
if (m_abort) return;
|
|
|
|
CHECK_MAGIC;
|
|
|
|
for (;;)
|
|
{
|
|
error_code ec;
|
|
udp::endpoint ep;
|
|
size_t bytes_transferred = s->receive_from(asio::buffer(m_buf, m_buf_size), ep, 0, ec);
|
|
|
|
<div style="background: #ffff00" width="100%">#ifdef TORRENT_WINDOWS
|
|
</div> if ((ec == error_code(ERROR_MORE_DATA, system_category())
|
|
|| ec == error_code(WSAEMSGSIZE, system_category()))
|
|
&& m_buf_size < 65536)
|
|
{
|
|
// if this function fails to allocate memory, m_buf_size
|
|
// is set to 0. In that case, don't issue the async_read().
|
|
set_buf_size(m_buf_size * 2);
|
|
if (m_buf_size == 0) return;
|
|
continue;
|
|
}
|
|
#endif
|
|
|
|
if (ec == asio::error::would_block || ec == asio::error::try_again) break;
|
|
on_read_impl(s, ep, ec, bytes_transferred);
|
|
}
|
|
call_drained_handler();
|
|
setup_read(s);
|
|
}
|
|
|
|
void udp_socket::call_handler(error_code const& ec, udp::endpoint const& ep, char const* buf, int size)
|
|
{
|
|
m_observers_locked = true;
|
|
for (std::vector<udp_socket_observer*>::iterator i = m_observers.begin();
|
|
i != m_observers.end();)
|
|
{
|
|
bool ret = false;
|
|
TORRENT_TRY {
|
|
ret = (*i)->incoming_packet(ec, ep, buf, size);
|
|
} TORRENT_CATCH (std::exception&) {}
|
|
if (*i == NULL) i = m_observers.erase(i);
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(101)">../src/udp_tracker_connection.cpp:554</a></td><td>it would be more efficient to not use a string here. however, the problem is that some trackers will respond with actual strings. For example i2p trackers</td></tr><tr id="101" style="display: none;" colspan="3"><td colspan="3"><h2>it would be more efficient to not use a string here.
|
|
however, the problem is that some trackers will respond
|
|
with actual strings. For example i2p trackers</h2><h4>../src/udp_tracker_connection.cpp:554</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> }
|
|
|
|
boost::shared_ptr<request_callback> cb = requester();
|
|
#if defined TORRENT_VERBOSE_LOGGING || defined TORRENT_LOGGING
|
|
if (cb)
|
|
{
|
|
boost::shared_ptr<request_callback> cb = requester();
|
|
cb->debug_log("<== UDP_TRACKER_RESPONSE [ url: %s ]", tracker_req().url.c_str());
|
|
}
|
|
#endif
|
|
|
|
if (!cb)
|
|
{
|
|
close();
|
|
return true;
|
|
}
|
|
|
|
std::vector<peer_entry> peer_list;
|
|
for (int i = 0; i < num_peers; ++i)
|
|
{
|
|
<div style="background: #ffff00" width="100%"> peer_entry e;
|
|
</div> char ip_string[100];
|
|
unsigned int a = detail::read_uint8(buf);
|
|
unsigned int b = detail::read_uint8(buf);
|
|
unsigned int c = detail::read_uint8(buf);
|
|
unsigned int d = detail::read_uint8(buf);
|
|
snprintf(ip_string, 100, "%u.%u.%u.%u", a, b, c, d);
|
|
e.ip = ip_string;
|
|
e.port = detail::read_uint16(buf);
|
|
e.pid.clear();
|
|
peer_list.push_back(e);
|
|
}
|
|
|
|
std::list<address> ip_list;
|
|
for (std::list<tcp::endpoint>::const_iterator i = m_endpoints.begin()
|
|
, end(m_endpoints.end()); i != end; ++i)
|
|
{
|
|
ip_list.push_back(i->address());
|
|
}
|
|
|
|
cb->tracker_response(tracker_req(), m_target.address(), ip_list
|
|
, peer_list, interval, min_interval, complete, incomplete, 0, address(), "" /*trackerid*/);
|
|
|
|
close();
|
|
return true;
|
|
}
|
|
|
|
bool udp_tracker_connection::on_scrape_response(char const* buf, int size)
|
|
{
|
|
restart_read_timeout();
|
|
int action = detail::read_int32(buf);
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(102)">../src/upnp.cpp:72</a></td><td>listen_interface is not used. It's meant to bind the broadcast socket</td></tr><tr id="102" style="display: none;" colspan="3"><td colspan="3"><h2>listen_interface is not used. It's meant to bind the broadcast socket</h2><h4>../src/upnp.cpp:72</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;">#include <asio/ip/multicast.hpp>
|
|
#else
|
|
#include <boost/asio/ip/host_name.hpp>
|
|
#include <boost/asio/ip/multicast.hpp>
|
|
#endif
|
|
#include <cstdlib>
|
|
|
|
namespace libtorrent {
|
|
|
|
namespace upnp_errors
|
|
{
|
|
boost::system::error_code make_error_code(error_code_enum e)
|
|
{
|
|
return error_code(e, get_upnp_category());
|
|
}
|
|
|
|
} // upnp_errors namespace
|
|
|
|
static error_code ec;
|
|
|
|
<div style="background: #ffff00" width="100%">upnp::upnp(io_service& ios, connection_queue& cc
|
|
</div> , address const& listen_interface, std::string const& user_agent
|
|
, portmap_callback_t const& cb, log_callback_t const& lcb
|
|
, bool ignore_nonrouters, void* state)
|
|
: m_user_agent(user_agent)
|
|
, m_callback(cb)
|
|
, m_log_callback(lcb)
|
|
, m_retry_count(0)
|
|
, m_io_service(ios)
|
|
, m_resolver(ios)
|
|
, m_socket(udp::endpoint(address_v4::from_string("239.255.255.250", ec), 1900)
|
|
, boost::bind(&upnp::on_reply, self(), _1, _2, _3))
|
|
, m_broadcast_timer(ios)
|
|
, m_refresh_timer(ios)
|
|
, m_map_timer(ios)
|
|
, m_disabled(false)
|
|
, m_closing(false)
|
|
, m_ignore_non_routers(ignore_nonrouters)
|
|
, m_cc(cc)
|
|
, m_last_if_update(min_time())
|
|
{
|
|
TORRENT_ASSERT(cb);
|
|
|
|
error_code ec;
|
|
m_socket.open(ios, ec);
|
|
|
|
if (state)
|
|
{
|
|
upnp_state_t* s = (upnp_state_t*)state;
|
|
m_devices.swap(s->devices);
|
|
m_mappings.swap(s->mappings);
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(103)">../src/ut_metadata.cpp:320</a></td><td>we really need to increment the refcounter on the torrent while this buffer is still in the peer's send buffer</td></tr><tr id="103" style="display: none;" colspan="3"><td colspan="3"><h2>we really need to increment the refcounter on the torrent
|
|
while this buffer is still in the peer's send buffer</h2><h4>../src/ut_metadata.cpp:320</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> if (!m_tp.need_loaded()) return;
|
|
metadata = m_tp.metadata().begin + offset;
|
|
metadata_piece_size = (std::min)(
|
|
int(m_tp.get_metadata_size() - offset), 16 * 1024);
|
|
TORRENT_ASSERT(metadata_piece_size > 0);
|
|
TORRENT_ASSERT(offset >= 0);
|
|
TORRENT_ASSERT(offset + metadata_piece_size <= int(m_tp.get_metadata_size()));
|
|
}
|
|
|
|
char msg[200];
|
|
char* header = msg;
|
|
char* p = &msg[6];
|
|
int len = bencode(p, e);
|
|
int total_size = 2 + len + metadata_piece_size;
|
|
namespace io = detail;
|
|
io::write_uint32(total_size, header);
|
|
io::write_uint8(bt_peer_connection::msg_extended, header);
|
|
io::write_uint8(m_message_index, header);
|
|
|
|
m_pc.send_buffer(msg, len + 6);
|
|
<div style="background: #ffff00" width="100%"> if (metadata_piece_size) m_pc.append_const_send_buffer(
|
|
</div> metadata, metadata_piece_size);
|
|
|
|
m_pc.stats_counters().inc_stats_counter(counters::num_outgoing_extended);
|
|
m_pc.stats_counters().inc_stats_counter(counters::num_outgoing_metadata);
|
|
}
|
|
|
|
virtual bool on_extended(int length
|
|
, int extended_msg, buffer::const_interval body)
|
|
{
|
|
if (extended_msg != 2) return false;
|
|
if (m_message_index == 0) return false;
|
|
|
|
if (length > 17 * 1024)
|
|
{
|
|
#ifdef TORRENT_VERBOSE_LOGGING
|
|
m_pc.peer_log("<== UT_METADATA [ packet too big %d ]", length);
|
|
#endif
|
|
m_pc.disconnect(errors::invalid_metadata_message, peer_connection_interface::op_bittorrent, 2);
|
|
return true;
|
|
}
|
|
|
|
if (!m_pc.packet_finished()) return true;
|
|
|
|
int len;
|
|
entry msg = bdecode(body.begin, body.end, len);
|
|
if (msg.type() != entry::dictionary_t)
|
|
{
|
|
#ifdef TORRENT_VERBOSE_LOGGING
|
|
m_pc.peer_log("<== UT_METADATA [ not a dictionary ]");
|
|
#endif
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(104)">../src/utp_stream.cpp:1627</a></td><td>this loop may not be very efficient</td></tr><tr id="104" style="display: none;" colspan="3"><td colspan="3"><h2>this loop may not be very efficient</h2><h4>../src/utp_stream.cpp:1627</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;">
|
|
char* m_buf;
|
|
};
|
|
|
|
// sends a packet, pulls data from the write buffer (if there's any)
|
|
// if ack is true, we need to send a packet regardless of if there's
|
|
// any data. Returns true if we could send more data (i.e. call
|
|
// send_pkt() again)
|
|
// returns true if there is more space for payload in our
|
|
// congestion window, false if there is no more space.
|
|
bool utp_socket_impl::send_pkt(int flags)
|
|
{
|
|
INVARIANT_CHECK;
|
|
|
|
bool force = (flags & pkt_ack) || (flags & pkt_fin);
|
|
|
|
// TORRENT_ASSERT(m_state != UTP_STATE_FIN_SENT || (flags & pkt_ack));
|
|
|
|
// first see if we need to resend any packets
|
|
|
|
<div style="background: #ffff00" width="100%"> for (int i = (m_acked_seq_nr + 1) & ACK_MASK; i != m_seq_nr; i = (i + 1) & ACK_MASK)
|
|
</div> {
|
|
packet* p = (packet*)m_outbuf.at(i);
|
|
if (!p) continue;
|
|
if (!p->need_resend) continue;
|
|
if (!resend_packet(p))
|
|
{
|
|
// we couldn't resend the packet. It probably doesn't
|
|
// fit in our cwnd. If force is set, we need to continue
|
|
// to send our packet anyway, if we don't have force set,
|
|
// we might as well return
|
|
if (!force) return false;
|
|
// resend_packet might have failed
|
|
if (m_state == UTP_STATE_ERROR_WAIT || m_state == UTP_STATE_DELETE) return false;
|
|
break;
|
|
}
|
|
|
|
// don't fast-resend this packet
|
|
if (m_fast_resend_seq_nr == i)
|
|
m_fast_resend_seq_nr = (m_fast_resend_seq_nr + 1) & ACK_MASK;
|
|
}
|
|
|
|
int sack = 0;
|
|
if (m_inbuf.size())
|
|
{
|
|
// the SACK bitfield should ideally fit all
|
|
// the pieces we have successfully received
|
|
sack = (m_inbuf.span() + 7) / 8;
|
|
if (sack > 32) sack = 32;
|
|
}
|
|
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(105)">../src/web_connection_base.cpp:71</a></td><td>introduce a web-seed default class which has a low download priority</td></tr><tr id="105" style="display: none;" colspan="3"><td colspan="3"><h2>introduce a web-seed default class which has a low download priority</h2><h4>../src/web_connection_base.cpp:71</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;">
|
|
namespace libtorrent
|
|
{
|
|
web_connection_base::web_connection_base(
|
|
peer_connection_args const& pack
|
|
, web_seed_entry& web)
|
|
: peer_connection(pack)
|
|
, m_first_request(true)
|
|
, m_ssl(false)
|
|
, m_external_auth(web.auth)
|
|
, m_extra_headers(web.extra_headers)
|
|
, m_parser(http_parser::dont_parse_chunks)
|
|
, m_body_start(0)
|
|
{
|
|
TORRENT_ASSERT(&web.peer_info == pack.peerinfo);
|
|
TORRENT_ASSERT(web.endpoint == *pack.endp);
|
|
|
|
INVARIANT_CHECK;
|
|
|
|
// we only want left-over bandwidth
|
|
<div style="background: #ffff00" width="100%">
|
|
</div> std::string protocol;
|
|
error_code ec;
|
|
boost::tie(protocol, m_basic_auth, m_host, m_port, m_path)
|
|
= parse_url_components(web.url, ec);
|
|
TORRENT_ASSERT(!ec);
|
|
|
|
if (m_port == -1 && protocol == "http")
|
|
m_port = 80;
|
|
|
|
#ifdef TORRENT_USE_OPENSSL
|
|
if (protocol == "https")
|
|
{
|
|
m_ssl = true;
|
|
if (m_port == -1) m_port = 443;
|
|
}
|
|
#endif
|
|
|
|
if (!m_basic_auth.empty())
|
|
m_basic_auth = base64encode(m_basic_auth);
|
|
|
|
m_server_string = "URL seed @ ";
|
|
m_server_string += m_host;
|
|
}
|
|
|
|
int web_connection_base::timeout() const
|
|
{
|
|
// since this is a web seed, change the timeout
|
|
// according to the settings.
|
|
return m_settings.get_int(settings_pack::urlseed_timeout);
|
|
}
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(106)">../src/kademlia/dht_tracker.cpp:428</a></td><td>ideally this function would be called when the put completes</td></tr><tr id="106" style="display: none;" colspan="3"><td colspan="3"><h2>ideally this function would be called when the
|
|
put completes</h2><h4>../src/kademlia/dht_tracker.cpp:428</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> // since it controls whether we re-put the content
|
|
TORRENT_ASSERT(!it.is_mutable());
|
|
f(it);
|
|
return false;
|
|
}
|
|
|
|
bool get_mutable_item_callback(item& it, boost::function<void(item const&)> f)
|
|
{
|
|
// the reason to wrap here is to control the return value
|
|
// since it controls whether we re-put the content
|
|
TORRENT_ASSERT(it.is_mutable());
|
|
f(it);
|
|
return false;
|
|
}
|
|
|
|
bool put_immutable_item_callback(item& it, boost::function<void()> f
|
|
, entry data)
|
|
{
|
|
TORRENT_ASSERT(!it.is_mutable());
|
|
it.assign(data);
|
|
<div style="background: #ffff00" width="100%"> f();
|
|
</div> return true;
|
|
}
|
|
|
|
bool put_mutable_item_callback(item& it, boost::function<void(item&)> cb)
|
|
{
|
|
cb(it);
|
|
return true;
|
|
}
|
|
|
|
void dht_tracker::get_item(sha1_hash const& target
|
|
, boost::function<void(item const&)> cb)
|
|
{
|
|
m_dht.get_item(target, boost::bind(&get_immutable_item_callback, _1, cb));
|
|
}
|
|
|
|
// key is a 32-byte binary string, the public key to look up.
|
|
// the salt is optional
|
|
void dht_tracker::get_item(char const* key
|
|
, boost::function<void(item const&)> cb
|
|
, std::string salt)
|
|
{
|
|
m_dht.get_item(key, salt, boost::bind(&get_mutable_item_callback, _1, cb));
|
|
}
|
|
|
|
void dht_tracker::put_item(entry data
|
|
, boost::function<void()> cb)
|
|
{
|
|
std::string flat_data;
|
|
bencode(std::back_inserter(flat_data), data);
|
|
sha1_hash target = item_target_id(
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(107)">../src/kademlia/routing_table.cpp:308</a></td><td>instad of refreshing a bucket by using find_nodes, ping each node periodically</td></tr><tr id="107" style="display: none;" colspan="3"><td colspan="3"><h2>instad of refreshing a bucket by using find_nodes,
|
|
ping each node periodically</h2><h4>../src/kademlia/routing_table.cpp:308</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> os << "]\n";
|
|
}
|
|
}
|
|
|
|
#endif
|
|
|
|
void routing_table::touch_bucket(node_id const& target)
|
|
{
|
|
table_t::iterator i = find_bucket(target);
|
|
i->last_active = time_now();
|
|
}
|
|
|
|
// returns true if lhs is in more need of a refresh than rhs
|
|
bool compare_bucket_refresh(routing_table_node const& lhs, routing_table_node const& rhs)
|
|
{
|
|
// add the number of nodes to prioritize buckets with few nodes in them
|
|
return lhs.last_active + seconds(lhs.live_nodes.size() * 5)
|
|
< rhs.last_active + seconds(rhs.live_nodes.size() * 5);
|
|
}
|
|
|
|
<div style="background: #ffff00" width="100%">bool routing_table::need_refresh(node_id& target) const
|
|
</div>{
|
|
INVARIANT_CHECK;
|
|
|
|
ptime now = time_now();
|
|
|
|
// refresh our own bucket once every 15 minutes
|
|
if (now - minutes(15) > m_last_self_refresh)
|
|
{
|
|
m_last_self_refresh = now;
|
|
target = m_id;
|
|
#ifdef TORRENT_DHT_VERBOSE_LOGGING
|
|
TORRENT_LOG(table) << "need_refresh [ bucket: self target: " << target << " ]";
|
|
#endif
|
|
return true;
|
|
}
|
|
|
|
if (m_buckets.empty()) return false;
|
|
|
|
table_t::const_iterator i = std::min_element(m_buckets.begin(), m_buckets.end()
|
|
, &compare_bucket_refresh);
|
|
|
|
if (now - minutes(15) < i->last_active) return false;
|
|
if (now - seconds(45) < m_last_refresh) return false;
|
|
|
|
// generate a random node_id within the given bucket
|
|
target = generate_random_id();
|
|
int num_bits = std::distance(m_buckets.begin(), i) + 1;
|
|
node_id mask = generate_prefix_mask(num_bits);
|
|
|
|
// target = (target & ~mask) | (root & mask)
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(108)">../include/libtorrent/bitfield.hpp:158</a></td><td>rename to data() ?</td></tr><tr id="108" style="display: none;" colspan="3"><td colspan="3"><h2>rename to data() ?</h2><h4>../include/libtorrent/bitfield.hpp:158</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> if (m_buf[i] != 0) return false;
|
|
}
|
|
return true;
|
|
}
|
|
|
|
// returns the size of the bitfield in bits.
|
|
int size() const
|
|
{
|
|
return m_buf == NULL ? 0 : m_buf[-1];
|
|
}
|
|
|
|
int num_words() const
|
|
{
|
|
return (size() + 31) / 32;
|
|
}
|
|
|
|
// returns true if the bitfield has zero size.
|
|
bool empty() const { return m_buf == NULL ? true : m_buf[-1] == 0; }
|
|
|
|
// returns a pointer to the internal buffer of the bitfield.
|
|
<div style="background: #ffff00" width="100%"> char const* bytes() const { return (char const*)m_buf; }
|
|
</div>
|
|
// copy operator
|
|
bitfield& operator=(bitfield const& rhs)
|
|
{
|
|
assign(rhs.bytes(), rhs.size());
|
|
return *this;
|
|
}
|
|
|
|
// count the number of bits in the bitfield that are set to 1.
|
|
int count() const
|
|
{
|
|
int ret = 0;
|
|
const int words = num_words();
|
|
#if TORRENT_HAS_SSE
|
|
unsigned int cpui[4];
|
|
cpuid(cpui, 1);
|
|
if (cpui[2] & (1 << 23))
|
|
{
|
|
for (int i = 0; i < words; ++i)
|
|
{
|
|
#ifdef __GNUC__
|
|
ret += __builtin_popcount(m_buf[i]);
|
|
#else
|
|
ret += _mm_popcnt_u32(m_buf[i]);
|
|
#endif
|
|
}
|
|
|
|
return ret;
|
|
}
|
|
#endif // TORRENT_HAS_SSE
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(109)">../include/libtorrent/block_cache.hpp:220</a></td><td>make this 32 bits and to count seconds since the block cache was created</td></tr><tr id="109" style="display: none;" colspan="3"><td colspan="3"><h2>make this 32 bits and to count seconds since the block cache was created</h2><h4>../include/libtorrent/block_cache.hpp:220</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;">
|
|
bool operator==(cached_piece_entry const& rhs) const
|
|
{ return storage.get() == rhs.storage.get() && piece == rhs.piece; }
|
|
|
|
// if this is set, we'll be calculating the hash
|
|
// for this piece. This member stores the interim
|
|
// state while we're calulcating the hash.
|
|
partial_hash* hash;
|
|
|
|
// set to a unique identifier of a peer that last
|
|
// requested from this piece.
|
|
void* last_requester;
|
|
|
|
// the pointers to the block data. If this is a ghost
|
|
// cache entry, there won't be any data here
|
|
boost::shared_array<cached_block_entry> blocks;
|
|
|
|
// the last time a block was written to this piece
|
|
// plus the minimum amount of time the block is guaranteed
|
|
// to stay in the cache
|
|
<div style="background: #ffff00" width="100%"> ptime expire;
|
|
</div>
|
|
boost::uint64_t piece:22;
|
|
|
|
// the number of dirty blocks in this piece
|
|
boost::uint64_t num_dirty:14;
|
|
|
|
// the number of blocks in the cache for this piece
|
|
boost::uint64_t num_blocks:14;
|
|
|
|
// the total number of blocks in this piece (and the number
|
|
// of elements in the blocks array)
|
|
boost::uint64_t blocks_in_piece:14;
|
|
|
|
// ---- 64 bit boundary ----
|
|
|
|
// while we have an outstanding async hash operation
|
|
// working on this piece, 'hashing' is set to 1
|
|
// When the operation returns, this is set to 0.
|
|
boost::uint32_t hashing:1;
|
|
|
|
// if we've completed at least one hash job on this
|
|
// piece, and returned it. This is set to one
|
|
boost::uint32_t hashing_done:1;
|
|
|
|
// if this is true, whenever refcount hits 0,
|
|
// this piece should be deleted
|
|
boost::uint32_t marked_for_deletion:1;
|
|
|
|
// this is set to true once we flush blocks past
|
|
// the hash cursor. Once this happens, there's
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(110)">../include/libtorrent/config.hpp:339</a></td><td>Make this count Unicode characters instead of bytes on windows</td></tr><tr id="110" style="display: none;" colspan="3"><td colspan="3"><h2>Make this count Unicode characters instead of bytes on windows</h2><h4>../include/libtorrent/config.hpp:339</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;">#define TORRENT_USE_WRITEV 0
|
|
#define TORRENT_USE_READV 0
|
|
|
|
#else
|
|
#warning unknown OS, assuming BSD
|
|
#define TORRENT_BSD
|
|
#endif
|
|
|
|
#if defined __GNUC__ && !(defined TORRENT_USE_OSATOMIC \
|
|
|| defined TORRENT_USE_INTERLOCKED_ATOMIC \
|
|
|| defined TORRENT_USE_BEOS_ATOMIC \
|
|
|| defined TORRENT_USE_SOLARIS_ATOMIC)
|
|
// atomic operations in GCC were introduced in 4.1.1
|
|
# if (__GNUC__ >= 4 && __GNUC_MINOR__ >= 1 && __GNUC_PATCHLEVEL__ >= 1) || __GNUC__ > 4
|
|
# define TORRENT_USE_GCC_ATOMIC 1
|
|
# endif
|
|
#endif
|
|
|
|
// on windows, NAME_MAX refers to Unicode characters
|
|
// on linux it refers to bytes (utf-8 encoded)
|
|
<div style="background: #ffff00" width="100%">
|
|
</div>// windows
|
|
#if defined FILENAME_MAX
|
|
#define TORRENT_MAX_PATH FILENAME_MAX
|
|
|
|
// beos
|
|
#elif defined B_PATH_NAME_LENGTH
|
|
#define TORRENT_MAX_PATH B_PATH_NAME_LENGTH
|
|
|
|
// solaris
|
|
#elif defined MAXPATH
|
|
#define TORRENT_MAX_PATH MAXPATH
|
|
|
|
// posix
|
|
#elif defined NAME_MAX
|
|
#define TORRENT_MAX_PATH NAME_MAX
|
|
|
|
// none of the above
|
|
#else
|
|
// this is the maximum number of characters in a
|
|
// path element / filename on windows
|
|
#define TORRENT_MAX_PATH 255
|
|
#warning unknown platform, assuming the longest path is 255
|
|
|
|
#endif
|
|
|
|
#if defined TORRENT_WINDOWS && !defined TORRENT_MINGW
|
|
|
|
#include <stdarg.h>
|
|
|
|
// internal
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(111)">../include/libtorrent/debug.hpp:212</a></td><td>rewrite this class to use FILE* instead and have a printf-like interface</td></tr><tr id="111" style="display: none;" colspan="3"><td colspan="3"><h2>rewrite this class to use FILE* instead and
|
|
have a printf-like interface</h2><h4>../include/libtorrent/debug.hpp:212</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;">#endif
|
|
}
|
|
|
|
#if defined TORRENT_VERBOSE_LOGGING || defined TORRENT_LOGGING || defined TORRENT_ERROR_LOGGING
|
|
|
|
#include <cstring>
|
|
#include "libtorrent/config.hpp"
|
|
#include "libtorrent/file.hpp"
|
|
#include "libtorrent/thread.hpp"
|
|
|
|
#if TORRENT_USE_IOSTREAM
|
|
#include <string>
|
|
#include <fstream>
|
|
#include <iostream>
|
|
#endif
|
|
|
|
namespace libtorrent
|
|
{
|
|
// DEBUG API
|
|
|
|
<div style="background: #ffff00" width="100%"> struct logger
|
|
</div> {
|
|
#if TORRENT_USE_IOSTREAM
|
|
// all log streams share a single file descriptor
|
|
// and re-opens the file for each log line
|
|
// these members are defined in session_impl.cpp
|
|
static std::ofstream log_file;
|
|
static std::string open_filename;
|
|
static mutex file_mutex;
|
|
#endif
|
|
|
|
~logger()
|
|
{
|
|
mutex::scoped_lock l(file_mutex);
|
|
log_file.close();
|
|
open_filename.clear();
|
|
}
|
|
|
|
logger(std::string const& logpath, std::string const& filename
|
|
, int instance, bool append)
|
|
{
|
|
char log_name[512];
|
|
snprintf(log_name, sizeof(log_name), "libtorrent_logs%d", instance);
|
|
std::string dir(complete(combine_path(combine_path(logpath, log_name), filename)) + ".log");
|
|
error_code ec;
|
|
if (!exists(parent_path(dir)))
|
|
create_directories(parent_path(dir), ec);
|
|
m_filename = dir;
|
|
|
|
mutex::scoped_lock l(file_mutex);
|
|
open(!append);
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(112)">../include/libtorrent/disk_buffer_pool.hpp:133</a></td><td>try to remove the observers, only using the async_allocate handlers</td></tr><tr id="112" style="display: none;" colspan="3"><td colspan="3"><h2>try to remove the observers, only using the async_allocate handlers</h2><h4>../include/libtorrent/disk_buffer_pool.hpp:133</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;">
|
|
// number of bytes per block. The BitTorrent
|
|
// protocol defines the block size to 16 KiB.
|
|
const int m_block_size;
|
|
|
|
// number of disk buffers currently allocated
|
|
int m_in_use;
|
|
|
|
// cache size limit
|
|
int m_max_use;
|
|
|
|
// if we have exceeded the limit, we won't start
|
|
// allowing allocations again until we drop below
|
|
// this low watermark
|
|
int m_low_watermark;
|
|
|
|
// if we exceed the max number of buffers, we start
|
|
// adding up callbacks to this queue. Once the number
|
|
// of buffers in use drops below the low watermark,
|
|
// we start calling these functions back
|
|
<div style="background: #ffff00" width="100%"> std::vector<boost::shared_ptr<disk_observer> > m_observers;
|
|
</div>
|
|
// these handlers are executed when a new buffer is available
|
|
std::vector<handler_t> m_handlers;
|
|
|
|
// callback used to tell the cache it needs to free up some blocks
|
|
boost::function<void()> m_trigger_cache_trim;
|
|
|
|
// set to true to throttle more allocations
|
|
bool m_exceeded_max_size;
|
|
|
|
// this is the main thread io_service. Callbacks are
|
|
// posted on this in order to have them execute in
|
|
// the main thread.
|
|
io_service& m_ios;
|
|
|
|
private:
|
|
|
|
void check_buffer_level(mutex::scoped_lock& l);
|
|
|
|
mutable mutex m_pool_mutex;
|
|
|
|
int m_cache_buffer_chunk_size;
|
|
bool m_lock_disk_cache;
|
|
|
|
#if TORRENT_HAVE_MMAP
|
|
// the file descriptor of the cache mmap file
|
|
int m_cache_fd;
|
|
// the pointer to the block of virtual address space
|
|
// making up the mmapped cache space
|
|
char* m_cache_pool;
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(113)">../include/libtorrent/peer_connection.hpp:217</a></td><td>make this a raw pointer (to save size in the first cache line) and make the constructor take a raw pointer. torrent objects should always outlive their peers</td></tr><tr id="113" style="display: none;" colspan="3"><td colspan="3"><h2>make this a raw pointer (to save size in
|
|
the first cache line) and make the constructor
|
|
take a raw pointer. torrent objects should always
|
|
outlive their peers</h2><h4>../include/libtorrent/peer_connection.hpp:217</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> , m_snubbed(false)
|
|
, m_interesting(false)
|
|
, m_choked(true)
|
|
, m_corked(false)
|
|
, m_ignore_stats(false)
|
|
, m_recv_pos(0)
|
|
, m_packet_size(0)
|
|
{}
|
|
|
|
protected:
|
|
|
|
// the pieces the other end have
|
|
bitfield m_have_piece;
|
|
|
|
// this is the torrent this connection is
|
|
// associated with. If the connection is an
|
|
// incoming connection, this is set to zero
|
|
// until the info_hash is received. Then it's
|
|
// set to the torrent it belongs to.
|
|
|
|
<div style="background: #ffff00" width="100%"> boost::weak_ptr<torrent> m_torrent;
|
|
</div>
|
|
public:
|
|
|
|
// a back reference to the session
|
|
// the peer belongs to.
|
|
aux::session_interface& m_ses;
|
|
|
|
// settings that apply to this peer
|
|
aux::session_settings const& m_settings;
|
|
|
|
protected:
|
|
|
|
// this is true if this connection has been added
|
|
// to the list of connections that will be closed.
|
|
bool m_disconnecting:1;
|
|
|
|
// this is true until this socket has become
|
|
// writable for the first time (i.e. the
|
|
// connection completed). While connecting
|
|
// the timeout will not be triggered. This is
|
|
// because windows XP SP2 may delay connection
|
|
// attempts, which means that the connection
|
|
// may not even have been attempted when the
|
|
// time out is reached.
|
|
bool m_connecting:1;
|
|
|
|
// this is set to true if the last time we tried to
|
|
// pick a piece to download, we could only find
|
|
// blocks that were already requested from other
|
|
// peers. In this case, we should not try to pick
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(114)">../include/libtorrent/peer_connection.hpp:1141</a></td><td>factor this out into its own class with a virtual interface torrent and session should implement this interface</td></tr><tr id="114" style="display: none;" colspan="3"><td colspan="3"><h2>factor this out into its own class with a virtual interface
|
|
torrent and session should implement this interface</h2><h4>../include/libtorrent/peer_connection.hpp:1141</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;">
|
|
// the local endpoint for this peer, i.e. our address
|
|
// and our port. If this is set for outgoing connections
|
|
// before the connection completes, it means we want to
|
|
// force the connection to be bound to the specified interface.
|
|
// if it ends up being bound to a different local IP, the connection
|
|
// is closed.
|
|
tcp::endpoint m_local;
|
|
|
|
// remote peer's id
|
|
peer_id m_peer_id;
|
|
|
|
// the bandwidth channels, upload and download
|
|
// keeps track of the current quotas
|
|
bandwidth_channel m_bandwidth_channel[num_channels];
|
|
|
|
private:
|
|
// statistics about upload and download speeds
|
|
// and total amount of uploads and downloads for
|
|
// this peer
|
|
<div style="background: #ffff00" width="100%"> stat m_statistics;
|
|
</div> protected:
|
|
|
|
// if the timeout is extended for the outstanding
|
|
// requests, this is the number of seconds it was
|
|
// extended.
|
|
int m_timeout_extend;
|
|
|
|
// the number of outstanding bytes expected
|
|
// to be received by extensions
|
|
int m_extension_outstanding_bytes;
|
|
|
|
// the number of time critical requests
|
|
// queued up in the m_request_queue that
|
|
// soon will be committed to the download
|
|
// queue. This is included in download_queue_time()
|
|
// so that it can be used while adding more
|
|
// requests and take the previous requests
|
|
// into account without submitting it all
|
|
// immediately
|
|
int m_queued_time_critical;
|
|
|
|
// the number of valid, received bytes in m_recv_buffer
|
|
int m_recv_end:24;
|
|
|
|
//#error 1 byte
|
|
|
|
// recv_buf.begin (start of actual receive buffer)
|
|
// |
|
|
// | m_recv_start (logical start of current
|
|
// | | receive buffer, as perceived by upper layers)
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(115)">../include/libtorrent/peer_connection_interface.hpp:45</a></td><td>make this interface smaller!</td></tr><tr id="115" style="display: none;" colspan="3"><td colspan="3"><h2>make this interface smaller!</h2><h4>../include/libtorrent/peer_connection_interface.hpp:45</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;">SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
|
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
|
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
|
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
|
POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*/
|
|
|
|
#ifndef TORRENT_PEER_CONNECTION_INTERFACE_HPP
|
|
#define TORRENT_PEER_CONNECTION_INTERFACE_HPP
|
|
|
|
#include "libtorrent/socket.hpp"
|
|
#include "libtorrent/error_code.hpp"
|
|
|
|
namespace libtorrent
|
|
{
|
|
struct torrent_peer;
|
|
class stat;
|
|
struct peer_info;
|
|
|
|
<div style="background: #ffff00" width="100%"> struct peer_connection_interface
|
|
</div> {
|
|
// these constants are used to identify the operation
|
|
// that failed, causing a peer to disconnect
|
|
enum operation_t
|
|
{
|
|
// this is used when the bittorrent logic
|
|
// determines to disconnect
|
|
op_bittorrent = 0,
|
|
op_iocontrol,
|
|
op_getpeername,
|
|
op_getname,
|
|
op_alloc_recvbuf,
|
|
op_alloc_sndbuf,
|
|
op_file_write,
|
|
op_file_read,
|
|
op_file,
|
|
op_sock_write,
|
|
op_sock_read,
|
|
op_sock_open,
|
|
op_sock_bind,
|
|
op_available,
|
|
op_encryption,
|
|
op_connect,
|
|
op_ssl_handshake,
|
|
op_get_interface,
|
|
};
|
|
|
|
virtual tcp::endpoint const& remote() const = 0;
|
|
virtual tcp::endpoint local_endpoint() const = 0;
|
|
virtual void disconnect(error_code const& ec, operation_t op, int error = 0) = 0;
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(116)">../include/libtorrent/performance_counters.hpp:132</a></td><td>should keepalives be in here too? how about dont-have, share-mode, upload-only</td></tr><tr id="116" style="display: none;" colspan="3"><td colspan="3"><h2>should keepalives be in here too?
|
|
how about dont-have, share-mode, upload-only</h2><h4>../include/libtorrent/performance_counters.hpp:132</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> // a connect candidate
|
|
connection_attempt_loops,
|
|
// successful incoming connections (not rejected for any reason)
|
|
incoming_connections,
|
|
|
|
// counts events where the network
|
|
// thread wakes up
|
|
on_read_counter,
|
|
on_write_counter,
|
|
on_tick_counter,
|
|
on_lsd_counter,
|
|
on_lsd_peer_counter,
|
|
on_udp_counter,
|
|
on_accept_counter,
|
|
on_disk_queue_counter,
|
|
on_disk_counter,
|
|
|
|
torrent_evicted_counter,
|
|
|
|
// bittorrent message counters
|
|
<div style="background: #ffff00" width="100%"> num_incoming_choke,
|
|
</div> num_incoming_unchoke,
|
|
num_incoming_interested,
|
|
num_incoming_not_interested,
|
|
num_incoming_have,
|
|
num_incoming_bitfield,
|
|
num_incoming_request,
|
|
num_incoming_piece,
|
|
num_incoming_cancel,
|
|
num_incoming_dht_port,
|
|
num_incoming_suggest,
|
|
num_incoming_have_all,
|
|
num_incoming_have_none,
|
|
num_incoming_reject,
|
|
num_incoming_allowed_fast,
|
|
num_incoming_ext_handshake,
|
|
num_incoming_pex,
|
|
num_incoming_metadata,
|
|
num_incoming_extended,
|
|
|
|
num_outgoing_choke,
|
|
num_outgoing_unchoke,
|
|
num_outgoing_interested,
|
|
num_outgoing_not_interested,
|
|
num_outgoing_have,
|
|
num_outgoing_bitfield,
|
|
num_outgoing_request,
|
|
num_outgoing_piece,
|
|
num_outgoing_cancel,
|
|
num_outgoing_dht_port,
|
|
num_outgoing_suggest,
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(117)">../include/libtorrent/performance_counters.hpp:404</a></td><td>some space could be saved here by making gauges 32 bits</td></tr><tr id="117" style="display: none;" colspan="3"><td colspan="3"><h2>some space could be saved here by making gauges 32 bits</h2><h4>../include/libtorrent/performance_counters.hpp:404</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> limiter_down_bytes,
|
|
|
|
num_counters,
|
|
num_gauge_counters = num_counters - num_stats_counters
|
|
};
|
|
|
|
counters();
|
|
|
|
counters(counters const&);
|
|
counters& operator=(counters const&);
|
|
|
|
// returns the new value
|
|
boost::int64_t inc_stats_counter(int c, boost::int64_t value = 1);
|
|
boost::int64_t operator[](int i) const;
|
|
|
|
void set_value(int c, boost::int64_t value);
|
|
void blend_stats_counter(int c, boost::int64_t value, int ratio);
|
|
|
|
private:
|
|
|
|
<div style="background: #ffff00" width="100%">#if BOOST_ATOMIC_LLONG_LOCK_FREE == 2
|
|
</div> boost::atomic<boost::int64_t> m_stats_counter[num_counters];
|
|
#else
|
|
// if the atomic type is't lock-free, use a single lock instead, for
|
|
// the whole array
|
|
mutex m_mutex;
|
|
boost::int64_t m_stats_counter[num_counters];
|
|
#endif
|
|
};
|
|
}
|
|
|
|
#endif
|
|
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(118)">../include/libtorrent/piece_picker.hpp:669</a></td><td>should this be allocated lazily?</td></tr><tr id="118" style="display: none;" colspan="3"><td colspan="3"><h2>should this be allocated lazily?</h2><h4>../include/libtorrent/piece_picker.hpp:669</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> std::vector<downloading_piece>::const_iterator find_dl_piece(int queue, int index) const;
|
|
std::vector<downloading_piece>::iterator find_dl_piece(int queue, int index);
|
|
|
|
// returns an iterator to the downloading piece, whichever
|
|
// download list it may live in now
|
|
std::vector<downloading_piece>::iterator update_piece_state(std::vector<downloading_piece>::iterator dp);
|
|
|
|
// some compilers (e.g. gcc 2.95, does not inherit access
|
|
// privileges to nested classes)
|
|
private:
|
|
|
|
// the following vectors are mutable because they sometimes may
|
|
// be updated lazily, triggered by const functions
|
|
|
|
// this maps indices to number of peers that has this piece and
|
|
// index into the m_piece_info vectors.
|
|
// piece_pos::we_have_index means that we have the piece, so it
|
|
// doesn't exist in the piece_info buckets
|
|
// pieces with the filtered flag set doesn't have entries in
|
|
// the m_piece_info buckets either
|
|
<div style="background: #ffff00" width="100%"> mutable std::vector<piece_pos> m_piece_map;
|
|
</div>
|
|
// the number of seeds. These are not added to
|
|
// the availability counters of the pieces
|
|
int m_seeds;
|
|
|
|
// the number of pieces that have passed the hash check
|
|
int m_num_passed;
|
|
|
|
// this vector contains all piece indices that are pickable
|
|
// sorted by priority. Pieces are in random random order
|
|
// among pieces with the same priority
|
|
mutable std::vector<int> m_pieces;
|
|
|
|
// these are indices to the priority boundries inside
|
|
// the m_pieces vector. priority 0 always start at
|
|
// 0, priority 1 starts at m_priority_boundries[0] etc.
|
|
mutable std::vector<int> m_priority_boundries;
|
|
|
|
// each piece that's currently being downloaded
|
|
// has an entry in this list with block allocations.
|
|
// i.e. it says wich parts of the piece that
|
|
// is being downloaded. This list is ordered
|
|
// by piece index to make lookups efficient
|
|
// there are 3 buckets of downloading pieces, each
|
|
// is individually sorted by piece index.
|
|
// 0: downloading pieces with unrequested blocks
|
|
// 1: downloading pieces where every block is busy
|
|
// and some are still in the requested state
|
|
// 2: downloading pieces where every block is
|
|
// finished or writing
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(119)">../include/libtorrent/proxy_base.hpp:166</a></td><td>it would be nice to remember the bind port and bind once we know where the proxy is m_sock.bind(endpoint, ec);</td></tr><tr id="119" style="display: none;" colspan="3"><td colspan="3"><h2>it would be nice to remember the bind port and bind once we know where the proxy is
|
|
m_sock.bind(endpoint, ec);</h2><h4>../include/libtorrent/proxy_base.hpp:166</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> {
|
|
return m_sock.get_option(opt, ec);
|
|
}
|
|
|
|
#ifndef BOOST_NO_EXCEPTIONS
|
|
void bind(endpoint_type const& /* endpoint */)
|
|
{
|
|
// m_sock.bind(endpoint);
|
|
}
|
|
#endif
|
|
|
|
void bind(endpoint_type const& /* endpoint */, error_code& /* ec */)
|
|
{
|
|
// the reason why we ignore binds here is because we don't
|
|
// (necessarily) yet know what address family the proxy
|
|
// will resolve to, and binding to the wrong one would
|
|
// break our connection attempt later. The caller here
|
|
// doesn't necessarily know that we're proxying, so this
|
|
// bind address is based on the final endpoint, not the
|
|
// proxy.
|
|
<div style="background: #ffff00" width="100%"> }
|
|
</div>
|
|
#ifndef BOOST_NO_EXCEPTIONS
|
|
void open(protocol_type const&)
|
|
{
|
|
// m_sock.open(p);
|
|
}
|
|
#endif
|
|
|
|
void open(protocol_type const&, error_code&)
|
|
{
|
|
// we need to ignore this for the same reason as stated
|
|
// for ignoring bind()
|
|
// m_sock.open(p, ec);
|
|
}
|
|
|
|
#ifndef BOOST_NO_EXCEPTIONS
|
|
void close()
|
|
{
|
|
m_remote_endpoint = endpoint_type();
|
|
m_sock.close();
|
|
m_resolver.cancel();
|
|
}
|
|
#endif
|
|
|
|
void close(error_code& ec)
|
|
{
|
|
m_remote_endpoint = endpoint_type();
|
|
m_sock.close(ec);
|
|
m_resolver.cancel();
|
|
}
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(120)">../include/libtorrent/session.hpp:856</a></td><td>add get_peer_class_type_filter() as well</td></tr><tr id="120" style="display: none;" colspan="3"><td colspan="3"><h2>add get_peer_class_type_filter() as well</h2><h4>../include/libtorrent/session.hpp:856</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> //
|
|
// The ``peer_class`` argument cannot be greater than 31. The bitmasks
|
|
// representing peer classes in the ``peer_class_filter`` are 32 bits.
|
|
//
|
|
// For more information, see peer-classes_.
|
|
void set_peer_class_filter(ip_filter const& f);
|
|
|
|
// Sets and gets the *peer class type filter*. This is controls automatic
|
|
// peer class assignments to peers based on what kind of socket it is.
|
|
//
|
|
// It does not only support assigning peer classes, it also supports
|
|
// removing peer classes based on socket type.
|
|
//
|
|
// The order of these rules being applied are:
|
|
//
|
|
// 1. peer-class IP filter
|
|
// 2. peer-class type filter, removing classes
|
|
// 3. peer-class type filter, adding classes
|
|
//
|
|
// For more information, see peer-classes_.
|
|
<div style="background: #ffff00" width="100%"> void set_peer_class_type_filter(peer_class_type_filter const& f);
|
|
</div>
|
|
// Creates a new peer class (see peer-classes_) with the given name. The
|
|
// returned integer is the new peer class' identifier. Peer classes may
|
|
// have the same name, so each invocation of this function creates a new
|
|
// class and returns a unique identifier.
|
|
//
|
|
// Identifiers are assigned from low numbers to higher. So if you plan on
|
|
// using certain peer classes in a call to `set_peer_class_filter()`_,
|
|
// make sure to create those early on, to get low identifiers.
|
|
//
|
|
// For more information on peer classes, see peer-classes_.
|
|
int create_peer_class(char const* name);
|
|
|
|
// This call dereferences the reference count of the specified peer
|
|
// class. When creating a peer class it's automatically referenced by 1.
|
|
// If you want to recycle a peer class, you may call this function. You
|
|
// may only call this function **once** per peer class you create.
|
|
// Calling it more than once for the same class will lead to memory
|
|
// corruption.
|
|
//
|
|
// Since peer classes are reference counted, this function will not
|
|
// remove the peer class if it's still assigned to torrents or peers. It
|
|
// will however remove it once the last peer and torrent drops their
|
|
// references to it.
|
|
//
|
|
// There is no need to call this function for custom peer classes. All
|
|
// peer classes will be properly destructed when the session object
|
|
// destructs.
|
|
//
|
|
// For more information on peer classes, see peer-classes_.
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(121)">../include/libtorrent/settings_pack.hpp:1074</a></td><td>deprecate this ``max_rejects`` is the number of piece requests we will reject in a row while a peer is choked before the peer is considered abusive and is disconnected.</td></tr><tr id="121" style="display: none;" colspan="3"><td colspan="3"><h2>deprecate this
|
|
``max_rejects`` is the number of piece requests we will reject in a row
|
|
while a peer is choked before the peer is considered abusive and is
|
|
disconnected.</h2><h4>../include/libtorrent/settings_pack.hpp:1074</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> auto_manage_startup,
|
|
|
|
// ``seeding_piece_quota`` is the number of pieces to send to a peer,
|
|
// when seeding, before rotating in another peer to the unchoke set.
|
|
// It defaults to 3 pieces, which means that when seeding, any peer we've
|
|
// sent more than this number of pieces to will be unchoked in favour of
|
|
// a choked peer.
|
|
seeding_piece_quota,
|
|
|
|
// ``max_sparse_regions`` is a limit of the number of *sparse regions* in
|
|
// a torrent. A sparse region is defined as a hole of pieces we have not
|
|
// yet downloaded, in between pieces that have been downloaded. This is
|
|
// used as a hack for windows vista which has a bug where you cannot
|
|
// write files with more than a certain number of sparse regions. This
|
|
// limit is not hard, it will be exceeded. Once it's exceeded, pieces
|
|
// that will maintain or decrease the number of sparse regions are
|
|
// prioritized. To disable this functionality, set this to 0. It defaults
|
|
// to 0 on all platforms except windows.
|
|
max_sparse_regions,
|
|
|
|
<div style="background: #ffff00" width="100%"> max_rejects,
|
|
</div>
|
|
// ``recv_socket_buffer_size`` and ``send_socket_buffer_size`` specifies
|
|
// the buffer sizes set on peer sockets. 0 (which is the default) means
|
|
// the OS default (i.e. don't change the buffer sizes). The socket buffer
|
|
// sizes are changed using setsockopt() with SOL_SOCKET/SO_RCVBUF and
|
|
// SO_SNDBUFFER.
|
|
recv_socket_buffer_size,
|
|
send_socket_buffer_size,
|
|
|
|
// ``file_checks_delay_per_block`` is the number of milliseconds to sleep
|
|
// in between disk read operations when checking torrents. This defaults
|
|
// to 0, but can be set to higher numbers to slow down the rate at which
|
|
// data is read from the disk while checking. This may be useful for
|
|
// background tasks that doesn't matter if they take a bit longer, as long
|
|
// as they leave disk I/O time for other processes.
|
|
file_checks_delay_per_block,
|
|
|
|
// ``read_cache_line_size`` is the number of blocks to read into the read
|
|
// cache when a read cache miss occurs. Setting this to 0 is essentially
|
|
// the same thing as disabling read cache. The number of blocks read
|
|
// into the read cache is always capped by the piece boundry.
|
|
//
|
|
// When a piece in the write cache has ``write_cache_line_size`` contiguous
|
|
// blocks in it, they will be flushed. Setting this to 1 effectively
|
|
// disables the write cache.
|
|
read_cache_line_size,
|
|
write_cache_line_size,
|
|
|
|
// ``optimistic_disk_retry`` is the number of seconds from a disk write
|
|
// errors occur on a torrent until libtorrent will take it out of the
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(122)">../include/libtorrent/size_type.hpp:48</a></td><td>remove these and just use boost's types directly</td></tr><tr id="122" style="display: none;" colspan="3"><td colspan="3"><h2>remove these and just use boost's types directly</h2><h4>../include/libtorrent/size_type.hpp:48</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;">ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
|
POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*/
|
|
|
|
#ifndef TORRENT_SIZE_TYPE_HPP_INCLUDED
|
|
#define TORRENT_SIZE_TYPE_HPP_INCLUDED
|
|
|
|
#ifdef _MSC_VER
|
|
#pragma warning(push, 1)
|
|
#endif
|
|
|
|
#include <boost/cstdint.hpp>
|
|
|
|
#ifdef _MSC_VER
|
|
#pragma warning(pop)
|
|
#endif
|
|
|
|
namespace libtorrent
|
|
{
|
|
<div style="background: #ffff00" width="100%"> typedef boost::int64_t size_type;
|
|
</div> typedef boost::uint64_t unsigned_size_type;
|
|
}
|
|
|
|
|
|
#endif
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(123)">../include/libtorrent/torrent.hpp:1194</a></td><td>this wastes 5 bits per file</td></tr><tr id="123" style="display: none;" colspan="3"><td colspan="3"><h2>this wastes 5 bits per file</h2><h4>../include/libtorrent/torrent.hpp:1194</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;">#ifndef TORRENT_DISABLE_EXTENSIONS
|
|
typedef std::list<boost::shared_ptr<torrent_plugin> > extension_list_t;
|
|
extension_list_t m_extensions;
|
|
#endif
|
|
|
|
// used for tracker announces
|
|
deadline_timer m_tracker_timer;
|
|
|
|
// this is the upload and download statistics for the whole torrent.
|
|
// it's updated from all its peers once every second.
|
|
libtorrent::stat m_stat;
|
|
|
|
// -----------------------------
|
|
|
|
// used to resolve hostnames for web seeds
|
|
mutable tcp::resolver m_host_resolver;
|
|
|
|
// this vector is allocated lazily. If no file priorities are
|
|
// ever changed, this remains empty. Any unallocated slot
|
|
// implicitly means the file has priority 1.
|
|
<div style="background: #ffff00" width="100%"> std::vector<boost::uint8_t> m_file_priority;
|
|
</div>
|
|
// this vector contains the number of bytes completely
|
|
// downloaded (as in passed-hash-check) in each file.
|
|
// this lets us trigger on individual files completing
|
|
// the vector is allocated lazily, when file progress
|
|
// is first queried by the client
|
|
std::vector<boost::uint64_t> m_file_progress;
|
|
|
|
// these are the pieces we're currently
|
|
// suggesting to peers.
|
|
std::vector<suggest_piece_t> m_suggested_pieces;
|
|
|
|
std::vector<announce_entry> m_trackers;
|
|
// this is an index into m_trackers
|
|
|
|
// this list is sorted by time_critical_piece::deadline
|
|
std::vector<time_critical_piece> m_time_critical_pieces;
|
|
|
|
std::string m_trackerid;
|
|
std::string m_username;
|
|
std::string m_password;
|
|
|
|
std::string m_save_path;
|
|
|
|
// if we don't have the metadata, this is a url to
|
|
// the torrent file
|
|
std::string m_url;
|
|
|
|
// if this was added from an RSS feed, this is the unique
|
|
// identifier in the feed.
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(124)">../include/libtorrent/torrent.hpp:1253</a></td><td>These two bitfields should probably be coalesced into one</td></tr><tr id="124" style="display: none;" colspan="3"><td colspan="3"><h2>These two bitfields should probably be coalesced into one</h2><h4>../include/libtorrent/torrent.hpp:1253</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> // the .torrent file from m_url
|
|
// std::vector<char> m_torrent_file_buf;
|
|
|
|
// this is a list of all pieces that we have announced
|
|
// as having, without actually having yet. If we receive
|
|
// a request for a piece in this list, we need to hold off
|
|
// on responding until we have completed the piece and
|
|
// verified its hash. If the hash fails, send reject to
|
|
// peers with outstanding requests, and dont_have to other
|
|
// peers. This vector is ordered, to make lookups fast.
|
|
std::vector<int> m_predictive_pieces;
|
|
|
|
// the performance counters of this session
|
|
counters& m_stats_counters;
|
|
|
|
// each bit represents a piece. a set bit means
|
|
// the piece has had its hash verified. This
|
|
// is only used in seed mode (when m_seed_mode
|
|
// is true)
|
|
|
|
<div style="background: #ffff00" width="100%"> bitfield m_verified;
|
|
</div> // this means there is an outstanding, async, operation
|
|
// to verify each piece that has a 1
|
|
bitfield m_verifying;
|
|
|
|
// set if there's an error on this torrent
|
|
error_code m_error;
|
|
|
|
// used if there is any resume data
|
|
boost::scoped_ptr<resume_data_t> m_resume_data;
|
|
|
|
// if the torrent is started without metadata, it may
|
|
// still be given a name until the metadata is received
|
|
// once the metadata is received this field will no
|
|
// longer be used and will be reset
|
|
boost::scoped_ptr<std::string> m_name;
|
|
|
|
storage_constructor_type m_storage_constructor;
|
|
|
|
// the posix time this torrent was added and when
|
|
// it was completed. If the torrent isn't yet
|
|
// completed, m_completed_time is 0
|
|
time_t m_added_time;
|
|
time_t m_completed_time;
|
|
|
|
// this was the last time _we_ saw a seed in this swarm
|
|
time_t m_last_seen_complete;
|
|
|
|
// this is the time last any of our peers saw a seed
|
|
// in this swarm
|
|
time_t m_swarm_last_seen_complete;
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(125)">../include/libtorrent/torrent.hpp:1571</a></td><td>There are 8 bits free here</td></tr><tr id="125" style="display: none;" colspan="3"><td colspan="3"><h2>There are 8 bits free here</h2><h4>../include/libtorrent/torrent.hpp:1571</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> // files belonging to it. When set, don't write any
|
|
// more blocks to disk!
|
|
bool m_deleted:1;
|
|
|
|
// pinned torrents are locked in RAM and won't be unloaded
|
|
// in favor of more active torrents. When the torrent is added,
|
|
// the user may choose to initialize this to 1, in which case
|
|
// it will never be unloaded from RAM
|
|
bool m_pinned:1;
|
|
|
|
// when this is false, we should unload the torrent as soon
|
|
// as the no other async. job needs the torrent loaded
|
|
bool m_should_be_loaded:1;
|
|
|
|
// ----
|
|
|
|
// the timestamp of the last piece passed for this torrent
|
|
// specified in session_time
|
|
boost::uint16_t m_last_download;
|
|
|
|
<div style="background: #ffff00" width="100%">
|
|
</div> // this is a second count-down to when we should tick the
|
|
// storage for this torrent. Ticking the storage is used
|
|
// to periodically flush the partfile metadata and possibly
|
|
// other deferred flushing. Any disk operation starts this
|
|
// counter (unless it's already counting down). 0 means no
|
|
// ticking is needed.
|
|
boost::uint8_t m_storage_tick;
|
|
|
|
// ----
|
|
|
|
// the timestamp of the last byte uploaded from this torrent
|
|
// specified in session_time
|
|
boost::uint16_t m_last_upload;
|
|
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(126)">../include/libtorrent/torrent.hpp:1587</a></td><td>There are 8 bits here</td></tr><tr id="126" style="display: none;" colspan="3"><td colspan="3"><h2>There are 8 bits here</h2><h4>../include/libtorrent/torrent.hpp:1587</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;">
|
|
// the timestamp of the last piece passed for this torrent
|
|
// specified in session_time
|
|
boost::uint16_t m_last_download;
|
|
|
|
|
|
// this is a second count-down to when we should tick the
|
|
// storage for this torrent. Ticking the storage is used
|
|
// to periodically flush the partfile metadata and possibly
|
|
// other deferred flushing. Any disk operation starts this
|
|
// counter (unless it's already counting down). 0 means no
|
|
// ticking is needed.
|
|
boost::uint8_t m_storage_tick;
|
|
|
|
// ----
|
|
|
|
// the timestamp of the last byte uploaded from this torrent
|
|
// specified in session_time
|
|
boost::uint16_t m_last_upload;
|
|
|
|
<div style="background: #ffff00" width="100%">
|
|
</div> // if this is true, libtorrent may pause and resume
|
|
// this torrent depending on queuing rules. Torrents
|
|
// started with auto_managed flag set may be added in
|
|
// a paused state in case there are no available
|
|
// slots.
|
|
bool m_auto_managed:1;
|
|
|
|
enum { no_gauge_state = 0xf };
|
|
// the current stats gauge this torrent counts against
|
|
boost::uint32_t m_current_gauge_state:4;
|
|
|
|
// set to true while moving the storage
|
|
bool m_moving_storage:1;
|
|
|
|
// this is true if this torrent is considered inactive from the
|
|
// queuing mechanism's point of view. If a torrent doesn't transfer
|
|
// at high enough rates, it's inactive.
|
|
bool m_inactive:1;
|
|
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(127)">../include/libtorrent/torrent.hpp:1608</a></td><td>there's space for 1 bits here</td></tr><tr id="127" style="display: none;" colspan="3"><td colspan="3"><h2>there's space for 1 bits here</h2><h4>../include/libtorrent/torrent.hpp:1608</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;">
|
|
// if this is true, libtorrent may pause and resume
|
|
// this torrent depending on queuing rules. Torrents
|
|
// started with auto_managed flag set may be added in
|
|
// a paused state in case there are no available
|
|
// slots.
|
|
bool m_auto_managed:1;
|
|
|
|
enum { no_gauge_state = 0xf };
|
|
// the current stats gauge this torrent counts against
|
|
boost::uint32_t m_current_gauge_state:4;
|
|
|
|
// set to true while moving the storage
|
|
bool m_moving_storage:1;
|
|
|
|
// this is true if this torrent is considered inactive from the
|
|
// queuing mechanism's point of view. If a torrent doesn't transfer
|
|
// at high enough rates, it's inactive.
|
|
bool m_inactive:1;
|
|
|
|
<div style="background: #ffff00" width="100%">
|
|
</div>// ----
|
|
|
|
// the scrape data from the tracker response, this
|
|
// is optional and may be 0xffffff
|
|
unsigned int m_downloaded:24;
|
|
|
|
// the timestamp of the last scrape request to
|
|
// one of the trackers in this torrent
|
|
// specified in session_time
|
|
boost::uint16_t m_last_scrape;
|
|
|
|
// ----
|
|
|
|
// progress parts per million (the number of
|
|
// millionths of completeness)
|
|
unsigned int m_progress_ppm:20;
|
|
|
|
// the number of seconds this torrent has been under the inactive
|
|
// threshold in terms of sending and receiving data. When this counter
|
|
// reaches the settings.inactive_torrent_timeout it will be considered
|
|
// inactive and possibly open up another queue slot, to start another,
|
|
// queued, torrent. Every second it's above the threshold
|
|
boost::int16_t m_inactive_counter;
|
|
|
|
// if this is set, accept the save path saved in the resume data, if
|
|
// present
|
|
bool m_use_resume_save_path:1;
|
|
|
|
#if TORRENT_USE_ASSERTS
|
|
public:
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(128)">../include/libtorrent/torrent_info.hpp:124</a></td><td>include the number of peers received from this tracker, at last announce</td></tr><tr id="128" style="display: none;" colspan="3"><td colspan="3"><h2>include the number of peers received from this tracker, at last announce</h2><h4>../include/libtorrent/torrent_info.hpp:124</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;">
|
|
// if this tracker failed the last time it was contacted
|
|
// this error code specifies what error occurred
|
|
error_code last_error;
|
|
|
|
// returns the number of seconds to the next announce on
|
|
// this tracker. ``min_announce_in()`` returns the number of seconds until we are
|
|
// allowed to force another tracker update with this tracker.
|
|
//
|
|
// If the last time this tracker was contacted failed, ``last_error`` is the error
|
|
// code describing what error occurred.
|
|
int next_announce_in() const;
|
|
int min_announce_in() const;
|
|
|
|
// the time of next tracker announce
|
|
ptime next_announce;
|
|
|
|
// no announces before this time
|
|
ptime min_announce;
|
|
|
|
<div style="background: #ffff00" width="100%">
|
|
</div> // these are either -1 or the scrape information this tracker last responded with. *incomplete* is
|
|
// the current number of downloaders in the swarm, *complete* is the current number
|
|
// of seeds in the swarm and *downloaded* is the cumulative number of completed
|
|
// downloads of this torrent, since the beginning of time (from this tracker's point
|
|
// of view).
|
|
|
|
// if this tracker has returned scrape data, these fields are filled
|
|
// in with valid numbers. Otherwise they are set to -1.
|
|
// the number of current downloaders
|
|
int scrape_incomplete;
|
|
int scrape_complete;
|
|
int scrape_downloaded;
|
|
|
|
// the tier this tracker belongs to
|
|
boost::uint8_t tier;
|
|
|
|
// the max number of failures to announce to this tracker in
|
|
// a row, before this tracker is not used anymore. 0 means unlimited
|
|
boost::uint8_t fail_limit;
|
|
|
|
// the number of times in a row we have failed to announce to this
|
|
// tracker.
|
|
boost::uint8_t fails:7;
|
|
|
|
// true while we're waiting for a response from the tracker.
|
|
bool updating:1;
|
|
|
|
// flags for the source bitmask, each indicating where
|
|
// we heard about this tracker
|
|
enum tracker_source
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(129)">../include/libtorrent/upnp.hpp:113</a></td><td>support using the windows API for UPnP operations as well</td></tr><tr id="129" style="display: none;" colspan="3"><td colspan="3"><h2>support using the windows API for UPnP operations as well</h2><h4>../include/libtorrent/upnp.hpp:113</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> // specific port
|
|
external_port_must_be_wildcard = 727
|
|
};
|
|
|
|
// hidden
|
|
TORRENT_EXPORT boost::system::error_code make_error_code(error_code_enum e);
|
|
}
|
|
|
|
TORRENT_EXPORT boost::system::error_category& get_upnp_category();
|
|
|
|
// int: port-mapping index
|
|
// address: external address as queried from router
|
|
// int: external port
|
|
// std::string: error message
|
|
// an empty string as error means success
|
|
// a port-mapping index of -1 means it's
|
|
// an informational log message
|
|
typedef boost::function<void(int, address, int, error_code const&)> portmap_callback_t;
|
|
typedef boost::function<void(char const*)> log_callback_t;
|
|
|
|
<div style="background: #ffff00" width="100%">class TORRENT_EXTRA_EXPORT upnp : public intrusive_ptr_base<upnp>
|
|
</div>{
|
|
public:
|
|
upnp(io_service& ios, connection_queue& cc
|
|
, address const& listen_interface, std::string const& user_agent
|
|
, portmap_callback_t const& cb, log_callback_t const& lcb
|
|
, bool ignore_nonrouters, void* state = 0);
|
|
~upnp();
|
|
|
|
void* drain_state();
|
|
|
|
enum protocol_type { none = 0, udp = 1, tcp = 2 };
|
|
|
|
// Attempts to add a port mapping for the specified protocol. Valid protocols are
|
|
// ``upnp::tcp`` and ``upnp::udp`` for the UPnP class and ``natpmp::tcp`` and
|
|
// ``natpmp::udp`` for the NAT-PMP class.
|
|
//
|
|
// ``external_port`` is the port on the external address that will be mapped. This
|
|
// is a hint, you are not guaranteed that this port will be available, and it may
|
|
// end up being something else. In the portmap_alert_ notification, the actual
|
|
// external port is reported.
|
|
//
|
|
// ``local_port`` is the port in the local machine that the mapping should forward
|
|
// to.
|
|
//
|
|
// The return value is an index that identifies this port mapping. This is used
|
|
// to refer to mappings that fails or succeeds in the portmap_error_alert_ and
|
|
// portmap_alert_ respectively. If The mapping fails immediately, the return value
|
|
// is -1, which means failure. There will not be any error alert notification for
|
|
// mappings that fail with a -1 return value.
|
|
int add_mapping(protocol_type p, int external_port, int local_port);
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(130)">../include/libtorrent/utp_stream.hpp:391</a></td><td>implement blocking write. Low priority since it's not used (yet)</td></tr><tr id="130" style="display: none;" colspan="3"><td colspan="3"><h2>implement blocking write. Low priority since it's not used (yet)</h2><h4>../include/libtorrent/utp_stream.hpp:391</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> for (typename Mutable_Buffers::const_iterator i = buffers.begin()
|
|
, end(buffers.end()); i != end; ++i)
|
|
{
|
|
using asio::buffer_cast;
|
|
using asio::buffer_size;
|
|
add_read_buffer(buffer_cast<void*>(*i), buffer_size(*i));
|
|
#if TORRENT_USE_ASSERTS
|
|
buf_size += buffer_size(*i);
|
|
#endif
|
|
}
|
|
std::size_t ret = read_some(true);
|
|
TORRENT_ASSERT(ret <= buf_size);
|
|
TORRENT_ASSERT(ret > 0);
|
|
return ret;
|
|
}
|
|
|
|
template <class Const_Buffers>
|
|
std::size_t write_some(Const_Buffers const& /* buffers */, error_code& /* ec */)
|
|
{
|
|
TORRENT_ASSERT(false && "not implemented!");
|
|
<div style="background: #ffff00" width="100%"> return 0;
|
|
</div> }
|
|
|
|
#ifndef BOOST_NO_EXCEPTIONS
|
|
template <class Mutable_Buffers>
|
|
std::size_t read_some(Mutable_Buffers const& buffers)
|
|
{
|
|
error_code ec;
|
|
std::size_t ret = read_some(buffers, ec);
|
|
if (ec)
|
|
boost::throw_exception(boost::system::system_error(ec));
|
|
return ret;
|
|
}
|
|
|
|
template <class Const_Buffers>
|
|
std::size_t write_some(Const_Buffers const& buffers)
|
|
{
|
|
error_code ec;
|
|
std::size_t ret = write_some(buffers, ec);
|
|
if (ec)
|
|
boost::throw_exception(boost::system::system_error(ec));
|
|
return ret;
|
|
}
|
|
#endif
|
|
|
|
template <class Const_Buffers, class Handler>
|
|
void async_write_some(Const_Buffers const& buffers, Handler const& handler)
|
|
{
|
|
if (m_impl == 0)
|
|
{
|
|
m_io_service.post(boost::bind<void>(handler, asio::error::not_connected, 0));
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(131)">../include/libtorrent/kademlia/item.hpp:61</a></td><td>since this is a public function, it should probably be moved out of this header and into one with other public functions.</td></tr><tr id="131" style="display: none;" colspan="3"><td colspan="3"><h2>since this is a public function, it should probably be moved
|
|
out of this header and into one with other public functions.</h2><h4>../include/libtorrent/kademlia/item.hpp:61</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;">#include <boost/array.hpp>
|
|
|
|
namespace libtorrent { namespace dht
|
|
{
|
|
|
|
// calculate the target hash for an immutable item.
|
|
sha1_hash TORRENT_EXTRA_EXPORT item_target_id(
|
|
std::pair<char const*, int> v);
|
|
|
|
// calculate the target hash for a mutable item.
|
|
sha1_hash TORRENT_EXTRA_EXPORT item_target_id(std::pair<char const*, int> salt
|
|
, char const* pk);
|
|
|
|
bool TORRENT_EXTRA_EXPORT verify_mutable_item(
|
|
std::pair<char const*, int> v
|
|
, std::pair<char const*, int> salt
|
|
, boost::uint64_t seq
|
|
, char const* pk
|
|
, char const* sig);
|
|
|
|
<div style="background: #ffff00" width="100%">
|
|
</div>// given a byte range ``v`` and an optional byte range ``salt``, a
|
|
// sequence number, public key ``pk`` (must be 32 bytes) and a secret key
|
|
// ``sk`` (must be 64 bytes), this function produces a signature which
|
|
// is written into a 64 byte buffer pointed to by ``sig``. The caller
|
|
// is responsible for allocating the destination buffer that's passed in
|
|
// as the ``sig`` argument. Typically it would be allocated on the stack.
|
|
void TORRENT_EXPORT sign_mutable_item(
|
|
std::pair<char const*, int> v
|
|
, std::pair<char const*, int> salt
|
|
, boost::uint64_t seq
|
|
, char const* pk
|
|
, char const* sk
|
|
, char* sig);
|
|
|
|
enum
|
|
{
|
|
item_pk_len = 32,
|
|
item_sk_len = 64,
|
|
item_sig_len = 64
|
|
};
|
|
|
|
class TORRENT_EXTRA_EXPORT item
|
|
{
|
|
public:
|
|
item() : m_seq(0), m_mutable(false) {}
|
|
item(char const* pk, std::string const& salt);
|
|
item(entry const& v) { assign(v); }
|
|
item(entry const& v
|
|
, std::pair<char const*, int> salt
|
|
, boost::uint64_t seq, char const* pk, char const* sk);
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(132)">../include/libtorrent/aux_/session_impl.hpp:412</a></td><td>move the login info into the tracker_request object</td></tr><tr id="132" style="display: none;" colspan="3"><td colspan="3"><h2>move the login info into the tracker_request object</h2><h4>../include/libtorrent/aux_/session_impl.hpp:412</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;">
|
|
void on_lsd_announce(error_code const& e);
|
|
|
|
// called when a port mapping is successful, or a router returns
|
|
// a failure to map a port
|
|
void on_port_mapping(int mapping, address const& ip, int port
|
|
, error_code const& ec, int nat_transport);
|
|
|
|
bool is_aborted() const { return m_abort; }
|
|
bool is_paused() const { return m_paused; }
|
|
|
|
void pause();
|
|
void resume();
|
|
|
|
void set_ip_filter(ip_filter const& f);
|
|
ip_filter const& get_ip_filter() const;
|
|
|
|
void set_port_filter(port_filter const& f);
|
|
port_filter const& get_port_filter() const;
|
|
|
|
<div style="background: #ffff00" width="100%"> void queue_tracker_request(tracker_request& req
|
|
</div> , std::string login, boost::weak_ptr<request_callback> c
|
|
, boost::uint32_t key);
|
|
|
|
// ==== peer class operations ====
|
|
|
|
// implements session_interface
|
|
void set_peer_classes(peer_class_set* s, address const& a, int st);
|
|
peer_class_pool const& peer_classes() const { return m_classes; }
|
|
peer_class_pool& peer_classes() { return m_classes; }
|
|
bool ignore_unchoke_slots_set(peer_class_set const& set) const;
|
|
int copy_pertinent_channels(peer_class_set const& set
|
|
, int channel, bandwidth_channel** dst, int max);
|
|
int use_quota_overhead(peer_class_set& set, int amount_down, int amount_up);
|
|
bool use_quota_overhead(bandwidth_channel* ch, int channel, int amount);
|
|
|
|
int create_peer_class(char const* name);
|
|
void delete_peer_class(int cid);
|
|
void set_peer_class_filter(ip_filter const& f);
|
|
ip_filter const& get_peer_class_filter() const;
|
|
|
|
void set_peer_class_type_filter(peer_class_type_filter f);
|
|
peer_class_type_filter get_peer_class_type_filter();
|
|
|
|
peer_class_info get_peer_class(int cid);
|
|
void set_peer_class(int cid, peer_class_info const& pci);
|
|
|
|
bool is_listening() const;
|
|
|
|
#ifndef TORRENT_DISABLE_EXTENSIONS
|
|
void add_extensions_to_torrent(
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(133)">../include/libtorrent/aux_/session_impl.hpp:900</a></td><td>should this be renamed m_outgoing_interfaces?</td></tr><tr id="133" style="display: none;" colspan="3"><td colspan="3"><h2>should this be renamed m_outgoing_interfaces?</h2><h4>../include/libtorrent/aux_/session_impl.hpp:900</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> // listen socket. For each retry the port number
|
|
// is incremented by one
|
|
int m_listen_port_retries;
|
|
|
|
// the addresses or device names of the interfaces we are supposed to
|
|
// listen on. if empty, it means that we should let the os decide
|
|
// which interface to listen on
|
|
std::vector<std::pair<std::string, int> > m_listen_interfaces;
|
|
|
|
// keep this around until everything uses the list of interfaces
|
|
// instead.
|
|
tcp::endpoint m_listen_interface;
|
|
|
|
// the network interfaces outgoing connections are opened through. If
|
|
// there is more then one, they are used in a round-robin fasion
|
|
// each element is a device name or IP address (in string form) and
|
|
// a port number. The port determins which port to bind the listen
|
|
// socket to, and the device or IP determines which network adapter
|
|
// to be used. If no adapter with the specified name exists, the listen
|
|
// socket fails.
|
|
<div style="background: #ffff00" width="100%"> std::vector<std::string> m_net_interfaces;
|
|
</div>
|
|
// if we're listening on an IPv6 interface
|
|
// this is one of the non local IPv6 interfaces
|
|
// on this machine
|
|
tcp::endpoint m_ipv6_interface;
|
|
tcp::endpoint m_ipv4_interface;
|
|
|
|
// since we might be listening on multiple interfaces
|
|
// we might need more than one listen socket
|
|
std::list<listen_socket_t> m_listen_sockets;
|
|
|
|
#if TORRENT_USE_I2P
|
|
i2p_connection m_i2p_conn;
|
|
boost::shared_ptr<socket_type> m_i2p_listen_socket;
|
|
#endif
|
|
|
|
#ifdef TORRENT_USE_OPENSSL
|
|
boost::asio::ssl::context* ssl_ctx() { return &m_ssl_ctx; }
|
|
void ssl_handshake(error_code const& ec, boost::shared_ptr<socket_type> s);
|
|
#endif
|
|
|
|
// when as a socks proxy is used for peers, also
|
|
// listen for incoming connections on a socks connection
|
|
boost::shared_ptr<socket_type> m_socks_listen_socket;
|
|
boost::uint16_t m_socks_listen_port;
|
|
|
|
// round-robin index into m_net_interfaces
|
|
mutable boost::uint8_t m_interface_index;
|
|
|
|
void open_new_incoming_socks_connection();
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(134)">../include/libtorrent/aux_/session_interface.hpp:200</a></td><td>it would be nice to not have this be part of session_interface</td></tr><tr id="134" style="display: none;" colspan="3"><td colspan="3"><h2>it would be nice to not have this be part of session_interface</h2><h4>../include/libtorrent/aux_/session_interface.hpp:200</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> virtual boost::uint16_t listen_port() const = 0;
|
|
virtual boost::uint16_t ssl_listen_port() const = 0;
|
|
|
|
// used to (potentially) issue socket write calls onto multiple threads
|
|
virtual void post_socket_job(socket_job& j) = 0;
|
|
|
|
// load the specified torrent. also evict one torrent, except
|
|
// for the one specified, if we are at the limit of loaded torrents
|
|
virtual bool load_torrent(torrent* t) = 0;
|
|
|
|
// bump the specified torrent to make it the most recently used one
|
|
// in the torrent LRU (i.e. the least likely to get unloaded)
|
|
virtual void bump_torrent(torrent* t, bool back = true) = 0;
|
|
|
|
// ask for which interface and port to bind outgoing peer connections on
|
|
virtual tcp::endpoint bind_outgoing_socket(socket_type& s, address const&
|
|
remote_address, error_code& ec) const = 0;
|
|
virtual bool verify_bound_address(address const& addr, bool utp
|
|
, error_code& ec) = 0;
|
|
|
|
<div style="background: #ffff00" width="100%"> virtual proxy_settings proxy() const = 0;
|
|
</div>
|
|
#if TORRENT_USE_I2P
|
|
virtual proxy_settings i2p_proxy() const = 0;
|
|
virtual char const* i2p_session() const = 0;
|
|
#endif
|
|
|
|
virtual void prioritize_connections(boost::weak_ptr<torrent> t) = 0;
|
|
|
|
virtual tcp::endpoint get_ipv6_interface() const = 0;
|
|
virtual tcp::endpoint get_ipv4_interface() const = 0;
|
|
|
|
virtual void trigger_auto_manage() = 0;
|
|
|
|
virtual void apply_settings_pack(settings_pack* pack) = 0;
|
|
virtual session_settings const& settings() const = 0;
|
|
|
|
virtual void queue_tracker_request(tracker_request& req
|
|
, std::string login, boost::weak_ptr<request_callback> c
|
|
, boost::uint32_t key) = 0;
|
|
|
|
// peer-classes
|
|
virtual void set_peer_classes(peer_class_set* s, address const& a, int st) = 0;
|
|
virtual peer_class_pool const& peer_classes() const = 0;
|
|
virtual peer_class_pool& peer_classes() = 0;
|
|
virtual bool ignore_unchoke_slots_set(peer_class_set const& set) const = 0;
|
|
virtual int copy_pertinent_channels(peer_class_set const& set
|
|
, int channel, bandwidth_channel** dst, int max) = 0;
|
|
virtual int use_quota_overhead(peer_class_set& set, int amount_down, int amount_up) = 0;
|
|
|
|
virtual bandwidth_manager* get_bandwidth_manager(int channel) = 0;
|
|
</pre></td></tr></table></body></html> |