forked from premiere/premiere-libtorrent
10465 lines
451 KiB
HTML
10465 lines
451 KiB
HTML
<html><head>
|
|
<script type="text/javascript">
|
|
/* <![CDATA[ */
|
|
var expanded = -1
|
|
function expand(id) {
|
|
if (expanded != -1) {
|
|
var ctx = document.getElementById(expanded);
|
|
ctx.style.display = "none";
|
|
// if we're expanding the field that's already
|
|
// expanded, just collapse it
|
|
var no_expand = id == expanded;
|
|
expanded = -1;
|
|
if (no_expand) return;
|
|
}
|
|
var ctx = document.getElementById(id);
|
|
ctx.style.display = "table-row";
|
|
expanded = id;
|
|
}
|
|
/* ]]> */
|
|
</script>
|
|
|
|
</head><body>
|
|
<h1>libtorrent todo-list</h1>
|
|
<span style="color: #f00">0 urgent</span>
|
|
<span style="color: #f77">21 important</span>
|
|
<span style="color: #3c3">61 relevant</span>
|
|
<span style="color: #77f">7 feasible</span>
|
|
<span style="color: #999">177 notes</span>
|
|
<table width="100%" border="1" style="border-collapse: collapse;"><tr style="background: #fcc"><td>relevance 3</td><td><a href="javascript:expand(0)">../src/file.cpp:538</a></td><td>find out what error code is reported when the filesystem does not support hard links.</td></tr><tr id="0" style="display: none;" colspan="3"><td colspan="3"><h2>find out what error code is reported when the filesystem
|
|
does not support hard links.</h2><h4>../src/file.cpp:538</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> {
|
|
#ifdef TORRENT_WINDOWS
|
|
|
|
#if TORRENT_USE_WSTRING
|
|
#define CreateHardLink_ CreateHardLinkW
|
|
std::wstring n_exist = convert_to_wstring(file);
|
|
std::wstring n_link = convert_to_wstring(link);
|
|
#else
|
|
#define CreateHardLink_ CreateHardLinkA
|
|
std::string n_exist = convert_to_native(file);
|
|
std::string n_link = convert_to_native(link);
|
|
#endif
|
|
BOOL ret = CreateHardLink_(n_link.c_str(), n_exist.c_str(), NULL);
|
|
if (ret)
|
|
{
|
|
ec.clear();
|
|
return;
|
|
}
|
|
|
|
// something failed. Does the filesystem not support hard links?
|
|
<div style="background: #ffff00" width="100%"> DWORD error = GetLastError();
|
|
</div> if (error != ERROR_NOT_SUPPORTED || error != ERROR_ACCESS_DENIED)
|
|
{
|
|
// it's possible CreateHardLink will copy the file internally too,
|
|
// if the filesystem does not support it.
|
|
ec.assign(GetLastError(), system_category());
|
|
return;
|
|
}
|
|
|
|
// fall back to making a copy
|
|
|
|
#else
|
|
|
|
std::string n_exist = convert_to_native(file);
|
|
std::string n_link = convert_to_native(link);
|
|
|
|
// assume posix's link() function exists
|
|
int ret = ::link(n_exist.c_str(), n_link.c_str());
|
|
|
|
if (ret == 0)
|
|
{
|
|
ec.clear();
|
|
return;
|
|
}
|
|
|
|
// most errors are passed through, except for the ones that indicate that
|
|
// hard links are not supported and require a copy.
|
|
</pre></td></tr><tr style="background: #fcc"><td>relevance 3</td><td><a href="javascript:expand(1)">../src/peer_connection.cpp:2962</a></td><td>instead of having to ask the torrent whether it's in graceful pause mode or not, the peers should keep that state (and the torrent should update them when it enters graceful pause). When a peer enters graceful pause mode, it should cancel all outstanding requests and clear its request queue.</td></tr><tr id="1" style="display: none;" colspan="3"><td colspan="3"><h2>instead of having to ask the torrent whether it's in graceful
|
|
pause mode or not, the peers should keep that state (and the torrent
|
|
should update them when it enters graceful pause). When a peer enters
|
|
graceful pause mode, it should cancel all outstanding requests and
|
|
clear its request queue.</h2><h4>../src/peer_connection.cpp:2962</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> // to disk or are in the disk write cache
|
|
if (picker.is_piece_finished(p.piece) && !was_finished)
|
|
{
|
|
#if TORRENT_USE_INVARIANT_CHECKS
|
|
check_postcondition post_checker2_(t, false);
|
|
#endif
|
|
t->verify_piece(p.piece);
|
|
}
|
|
|
|
check_graceful_pause();
|
|
|
|
if (is_disconnecting()) return;
|
|
|
|
if (request_a_block(*t, *this))
|
|
m_counters.inc_stats_counter(counters::incoming_piece_picks);
|
|
send_block_requests();
|
|
}
|
|
|
|
void peer_connection::check_graceful_pause()
|
|
{
|
|
<div style="background: #ffff00" width="100%"> boost::shared_ptr<torrent> t = m_torrent.lock();
|
|
</div> if (!t || !t->graceful_pause()) return;
|
|
|
|
if (m_outstanding_bytes > 0) return;
|
|
|
|
#ifndef TORRENT_DISABLE_LOGGING
|
|
peer_log(peer_log_alert::info, "GRACEFUL_PAUSE", "NO MORE DOWNLOAD");
|
|
#endif
|
|
disconnect(errors::torrent_paused, op_bittorrent);
|
|
}
|
|
|
|
void peer_connection::on_disk_write_complete(disk_io_job const* j
|
|
, peer_request p, boost::shared_ptr<torrent> t)
|
|
{
|
|
TORRENT_ASSERT(is_single_thread());
|
|
torrent_ref_holder h(t.get(), "async_write");
|
|
if (t) t->dec_refcount("async_write");
|
|
|
|
#ifndef TORRENT_DISABLE_LOGGING
|
|
peer_log(peer_log_alert::info, "FILE_ASYNC_WRITE_COMPLETE", "ret: %d piece: %d s: %x l: %x e: %s"
|
|
, j->ret, p.piece, p.start, p.length, j->error.ec.message().c_str());
|
|
#endif
|
|
|
|
m_counters.inc_stats_counter(counters::queued_write_bytes, -p.length);
|
|
m_outstanding_writing_bytes -= p.length;
|
|
|
|
TORRENT_ASSERT(m_outstanding_writing_bytes >= 0);
|
|
|
|
// flush send buffer at the end of
|
|
// this burst of disk events
|
|
// m_ses.cork_burst(this);
|
|
</pre></td></tr><tr style="background: #fcc"><td>relevance 3</td><td><a href="javascript:expand(2)">../src/peer_connection.cpp:3832</a></td><td>once peers are properly put in graceful pause mode, they can cancel all outstanding requests and this test can be removed.</td></tr><tr id="2" style="display: none;" colspan="3"><td colspan="3"><h2>once peers are properly put in graceful pause mode, they can
|
|
cancel all outstanding requests and this test can be removed.</h2><h4>../src/peer_connection.cpp:3832</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> }
|
|
|
|
TORRENT_ASSERT(piece >= 0 && piece < m_sent_suggested_pieces.size());
|
|
|
|
if (m_sent_suggested_pieces[piece]) return;
|
|
m_sent_suggested_pieces.set_bit(piece);
|
|
|
|
write_suggest(piece);
|
|
}
|
|
|
|
void peer_connection::send_block_requests()
|
|
{
|
|
TORRENT_ASSERT(is_single_thread());
|
|
INVARIANT_CHECK;
|
|
|
|
boost::shared_ptr<torrent> t = m_torrent.lock();
|
|
TORRENT_ASSERT(t);
|
|
|
|
if (m_disconnecting) return;
|
|
|
|
<div style="background: #ffff00" width="100%"> if (t->graceful_pause()) return;
|
|
</div>
|
|
// we can't download pieces in these states
|
|
if (t->state() == torrent_status::checking_files
|
|
|| t->state() == torrent_status::checking_resume_data
|
|
|| t->state() == torrent_status::downloading_metadata
|
|
|| t->state() == torrent_status::allocating)
|
|
return;
|
|
|
|
if (int(m_download_queue.size()) >= m_desired_queue_size
|
|
|| t->upload_mode()) return;
|
|
|
|
bool empty_download_queue = m_download_queue.empty();
|
|
|
|
while (!m_request_queue.empty()
|
|
&& (int(m_download_queue.size()) < m_desired_queue_size
|
|
|| m_queued_time_critical > 0))
|
|
{
|
|
pending_block block = m_request_queue.front();
|
|
|
|
m_request_queue.erase(m_request_queue.begin());
|
|
if (m_queued_time_critical) --m_queued_time_critical;
|
|
|
|
// if we're a seed, we don't have a piece picker
|
|
// so we don't have to worry about invariants getting
|
|
// out of sync with it
|
|
if (!t->has_picker()) continue;
|
|
|
|
// this can happen if a block times out, is re-requested and
|
|
// then arrives "unexpectedly"
|
|
if (t->picker().is_finished(block.block)
|
|
</pre></td></tr><tr style="background: #fcc"><td>relevance 3</td><td><a href="javascript:expand(3)">../src/session_impl.cpp:2065</a></td><td>port map SSL udp socket here</td></tr><tr id="3" style="display: none;" colspan="3"><td colspan="3"><h2>port map SSL udp socket here</h2><h4>../src/session_impl.cpp:2065</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> udp::endpoint ssl_bind_if(m_listen_interface.address(), ssl_port);
|
|
|
|
// if ssl port is 0, we don't want to listen on an SSL port
|
|
if (ssl_port != 0)
|
|
{
|
|
m_ssl_udp_socket.bind(ssl_bind_if, ec);
|
|
if (ec)
|
|
{
|
|
#ifndef TORRENT_DISABLE_LOGGING
|
|
session_log("SSL: cannot bind to UDP interface \"%s\": %s"
|
|
, print_endpoint(m_listen_interface).c_str(), ec.message().c_str());
|
|
#endif
|
|
if (m_alerts.should_post<listen_failed_alert>())
|
|
{
|
|
error_code err;
|
|
m_alerts.emplace_alert<listen_failed_alert>(ssl_bind_if.address().to_string()
|
|
, ssl_port, listen_failed_alert::bind, ec, listen_failed_alert::utp_ssl);
|
|
}
|
|
ec.clear();
|
|
}
|
|
<div style="background: #ffff00" width="100%"> }
|
|
</div>#endif // TORRENT_USE_OPENSSL
|
|
|
|
</pre></td></tr><tr style="background: #fcc"><td>relevance 3</td><td><a href="javascript:expand(4)">../src/session_impl.cpp:3806</a></td><td>it would probably make sense to have a separate list of peers that are eligible for optimistic unchoke, similar to the torrents perhaps this could even iterate over the pool allocators of torrent_peer objects. It could probably be done in a single pass and collect the n best candidates</td></tr><tr id="4" style="display: none;" colspan="3"><td colspan="3"><h2>it would probably make sense to have a separate list of peers
|
|
that are eligible for optimistic unchoke, similar to the torrents
|
|
perhaps this could even iterate over the pool allocators of
|
|
torrent_peer objects. It could probably be done in a single pass and
|
|
collect the n best candidates</h2><h4>../src/session_impl.cpp:3806</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> , torrent_peer const* const r)
|
|
{
|
|
return l->last_optimistically_unchoked
|
|
< r->last_optimistically_unchoked;
|
|
}
|
|
}
|
|
|
|
void session_impl::recalculate_optimistic_unchoke_slots()
|
|
{
|
|
INVARIANT_CHECK;
|
|
|
|
TORRENT_ASSERT(is_single_thread());
|
|
if (m_stats_counters[counters::num_unchoke_slots] == 0) return;
|
|
|
|
std::vector<torrent_peer*> opt_unchoke;
|
|
|
|
// collect the currently optimistically unchoked peers here, so we can
|
|
// choke them when we've found new optimistic unchoke candidates.
|
|
std::vector<torrent_peer*> prev_opt_unchoke;
|
|
|
|
<div style="background: #ffff00" width="100%"> for (connection_map::iterator i = m_connections.begin()
|
|
</div> , end(m_connections.end()); i != end; ++i)
|
|
{
|
|
peer_connection* p = i->get();
|
|
TORRENT_ASSERT(p);
|
|
torrent_peer* pi = p->peer_info_struct();
|
|
if (!pi) continue;
|
|
if (pi->web_seed) continue;
|
|
|
|
if (pi->optimistically_unchoked)
|
|
{
|
|
prev_opt_unchoke.push_back(pi);
|
|
}
|
|
|
|
torrent* t = p->associated_torrent().lock().get();
|
|
if (!t) continue;
|
|
|
|
</pre></td></tr><tr style="background: #fcc"><td>relevance 3</td><td><a href="javascript:expand(5)">../src/session_impl.cpp:3828</a></td><td>peers should know whether their torrent is paused or not, instead of having to ask it over and over again</td></tr><tr id="5" style="display: none;" colspan="3"><td colspan="3"><h2>peers should know whether their torrent is paused or not,
|
|
instead of having to ask it over and over again</h2><h4>../src/session_impl.cpp:3828</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> // choke them when we've found new optimistic unchoke candidates.
|
|
std::vector<torrent_peer*> prev_opt_unchoke;
|
|
|
|
for (connection_map::iterator i = m_connections.begin()
|
|
, end(m_connections.end()); i != end; ++i)
|
|
{
|
|
peer_connection* p = i->get();
|
|
TORRENT_ASSERT(p);
|
|
torrent_peer* pi = p->peer_info_struct();
|
|
if (!pi) continue;
|
|
if (pi->web_seed) continue;
|
|
|
|
if (pi->optimistically_unchoked)
|
|
{
|
|
prev_opt_unchoke.push_back(pi);
|
|
}
|
|
|
|
torrent* t = p->associated_torrent().lock().get();
|
|
if (!t) continue;
|
|
|
|
<div style="background: #ffff00" width="100%"> if (t->is_paused()) continue;
|
|
</div>
|
|
if (!p->is_connecting()
|
|
&& !p->is_disconnecting()
|
|
&& p->is_peer_interested()
|
|
&& t->free_upload_slots()
|
|
&& (p->is_choked() || pi->optimistically_unchoked)
|
|
&& !p->ignore_unchoke_slots()
|
|
&& t->valid_metadata())
|
|
{
|
|
opt_unchoke.push_back(pi);
|
|
}
|
|
}
|
|
|
|
// find the peers that has been waiting the longest to be optimistically
|
|
// unchoked
|
|
|
|
int num_opt_unchoke = m_settings.get_int(settings_pack::num_optimistic_unchoke_slots);
|
|
int const allowed_unchoke_slots = m_stats_counters[counters::num_unchoke_slots];
|
|
if (num_opt_unchoke == 0) num_opt_unchoke = (std::max)(1, allowed_unchoke_slots / 5);
|
|
if (num_opt_unchoke > int(opt_unchoke.size())) num_opt_unchoke =
|
|
int(opt_unchoke.size());
|
|
|
|
// find the n best optimistic unchoke candidates
|
|
std::partial_sort(opt_unchoke.begin()
|
|
, opt_unchoke.begin() + num_opt_unchoke
|
|
, opt_unchoke.end(), &last_optimistic_unchoke_cmp);
|
|
|
|
#ifndef TORRENT_DISABLE_EXTENSIONS
|
|
if (m_session_extension_features & plugin::optimistic_unchoke_feature)
|
|
{
|
|
</pre></td></tr><tr style="background: #fcc"><td>relevance 3</td><td><a href="javascript:expand(6)">../src/session_impl.cpp:4105</a></td><td>there should be a pre-calculated list of all peers eligible for unchoking</td></tr><tr id="6" style="display: none;" colspan="3"><td colspan="3"><h2>there should be a pre-calculated list of all peers eligible for
|
|
unchoking</h2><h4>../src/session_impl.cpp:4105</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> if (want_peers_download.empty() && want_peers_finished.empty()) break;
|
|
// if we have gone a whole loop without
|
|
// handing out a single connection, break
|
|
if (steps_since_last_connect > num_torrents + 1) break;
|
|
// maintain the global limit on number of connections
|
|
if (num_connections() >= m_settings.get_int(settings_pack::connections_limit)) break;
|
|
}
|
|
}
|
|
|
|
void session_impl::recalculate_unchoke_slots()
|
|
{
|
|
TORRENT_ASSERT(is_single_thread());
|
|
INVARIANT_CHECK;
|
|
|
|
time_point now = aux::time_now();
|
|
time_duration unchoke_interval = now - m_last_choke;
|
|
m_last_choke = now;
|
|
|
|
// build list of all peers that are
|
|
// unchokable.
|
|
<div style="background: #ffff00" width="100%"> std::vector<peer_connection*> peers;
|
|
</div> for (connection_map::iterator i = m_connections.begin();
|
|
i != m_connections.end();)
|
|
{
|
|
boost::shared_ptr<peer_connection> p = *i;
|
|
TORRENT_ASSERT(p);
|
|
++i;
|
|
torrent* t = p->associated_torrent().lock().get();
|
|
torrent_peer* pi = p->peer_info_struct();
|
|
|
|
if (p->ignore_unchoke_slots() || t == 0 || pi == 0
|
|
|| pi->web_seed || t->is_paused())
|
|
{
|
|
p->reset_choke_counters();
|
|
continue;
|
|
}
|
|
|
|
if (!p->is_peer_interested()
|
|
|| p->is_disconnecting()
|
|
|| p->is_connecting())
|
|
{
|
|
// this peer is not unchokable. So, if it's unchoked
|
|
// already, make sure to choke it.
|
|
if (p->is_choked())
|
|
{
|
|
p->reset_choke_counters();
|
|
continue;
|
|
}
|
|
if (pi && pi->optimistically_unchoked)
|
|
{
|
|
m_stats_counters.inc_stats_counter(counters::num_peers_up_unchoked_optimistic, -1);
|
|
</pre></td></tr><tr style="background: #fcc"><td>relevance 3</td><td><a href="javascript:expand(7)">../src/torrent.cpp:9611</a></td><td>this really needs to be moved to do_async_save_resume_data. flags need to be passed on</td></tr><tr id="7" style="display: none;" colspan="3"><td colspan="3"><h2>this really needs to be moved to do_async_save_resume_data.
|
|
flags need to be passed on</h2><h4>../src/torrent.cpp:9611</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> alerts().emplace_alert<save_resume_data_failed_alert>(get_handle()
|
|
, m_error);
|
|
return;
|
|
}
|
|
|
|
// storage may be NULL during shutdown
|
|
if (!m_storage)
|
|
{
|
|
TORRENT_ASSERT(m_abort);
|
|
alerts().emplace_alert<save_resume_data_failed_alert>(get_handle()
|
|
, boost::asio::error::operation_aborted);
|
|
return;
|
|
}
|
|
|
|
boost::shared_ptr<entry> rd(new entry);
|
|
write_resume_data(*rd);
|
|
alerts().emplace_alert<save_resume_data_alert>(rd, get_handle());
|
|
return;
|
|
}
|
|
|
|
<div style="background: #ffff00" width="100%"> if ((flags & torrent_handle::flush_disk_cache) && m_storage.get())
|
|
</div> m_ses.disk_thread().async_release_files(m_storage.get());
|
|
|
|
m_ses.queue_async_resume_data(shared_from_this());
|
|
}
|
|
|
|
bool torrent::do_async_save_resume_data()
|
|
{
|
|
if (!need_loaded())
|
|
{
|
|
alerts().emplace_alert<save_resume_data_failed_alert>(get_handle(), m_error);
|
|
return false;
|
|
}
|
|
// storage may be NULL during shutdown
|
|
if (!m_storage)
|
|
{
|
|
TORRENT_ASSERT(m_abort);
|
|
alerts().emplace_alert<save_resume_data_failed_alert>(get_handle()
|
|
, boost::asio::error::operation_aborted);
|
|
return false;
|
|
}
|
|
inc_refcount("save_resume");
|
|
m_ses.disk_thread().async_save_resume_data(m_storage.get()
|
|
, boost::bind(&torrent::on_save_resume_data, shared_from_this(), _1));
|
|
return true;
|
|
}
|
|
|
|
bool torrent::should_check_files() const
|
|
{
|
|
TORRENT_ASSERT(is_single_thread());
|
|
// #error should m_allow_peers really affect checking?
|
|
</pre></td></tr><tr style="background: #fcc"><td>relevance 3</td><td><a href="javascript:expand(8)">../src/upnp.cpp:94</a></td><td>listen_interface is not used. It's meant to bind the broadcast socket. it would probably have to be changed to a vector of interfaces to bind to though, since the broadcast socket opens one socket per local interface by default</td></tr><tr id="8" style="display: none;" colspan="3"><td colspan="3"><h2>listen_interface is not used. It's meant to bind the broadcast
|
|
socket. it would probably have to be changed to a vector of interfaces to
|
|
bind to though, since the broadcast socket opens one socket per local
|
|
interface by default</h2><h4>../src/upnp.cpp:94</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> , portmap_callback_t const& cb, log_callback_t const& lcb
|
|
, bool ignore_nonrouters)
|
|
: m_user_agent(user_agent)
|
|
, m_callback(cb)
|
|
, m_log_callback(lcb)
|
|
, m_retry_count(0)
|
|
, m_io_service(ios)
|
|
, m_resolver(ios)
|
|
, m_socket(udp::endpoint(address_v4::from_string("239.255.255.250"
|
|
, ignore_error), 1900))
|
|
, m_broadcast_timer(ios)
|
|
, m_refresh_timer(ios)
|
|
, m_map_timer(ios)
|
|
, m_disabled(false)
|
|
, m_closing(false)
|
|
, m_ignore_non_routers(ignore_nonrouters)
|
|
, m_last_if_update(min_time())
|
|
{
|
|
TORRENT_ASSERT(cb);
|
|
|
|
<div style="background: #ffff00" width="100%"> TORRENT_UNUSED(listen_interface);
|
|
</div>}
|
|
|
|
void upnp::start()
|
|
{
|
|
error_code ec;
|
|
m_socket.open(boost::bind(&upnp::on_reply, self(), _1, _2, _3)
|
|
, m_refresh_timer.get_io_service(), ec);
|
|
|
|
m_mappings.reserve(10);
|
|
}
|
|
|
|
upnp::~upnp()
|
|
{
|
|
}
|
|
|
|
void upnp::discover_device()
|
|
{
|
|
mutex::scoped_lock l(m_mutex);
|
|
if (m_socket.num_send_sockets() == 0)
|
|
log("No network interfaces to broadcast to", l);
|
|
|
|
discover_device_impl(l);
|
|
}
|
|
|
|
void upnp::log(char const* msg, mutex::scoped_lock& l)
|
|
{
|
|
l.unlock();
|
|
m_log_callback(msg);
|
|
l.lock();
|
|
}
|
|
</pre></td></tr><tr style="background: #fcc"><td>relevance 3</td><td><a href="javascript:expand(9)">../src/kademlia/node_id.cpp:54</a></td><td>the XORing should be done at full words instead of bytes</td></tr><tr id="9" style="display: none;" colspan="3"><td colspan="3"><h2>the XORing should be done at full words instead of bytes</h2><h4>../src/kademlia/node_id.cpp:54</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;">
|
|
#include "libtorrent/kademlia/node_id.hpp"
|
|
#include "libtorrent/kademlia/node_entry.hpp"
|
|
#include "libtorrent/hasher.hpp"
|
|
#include "libtorrent/assert.hpp"
|
|
#include "libtorrent/broadcast_socket.hpp" // for is_local et.al
|
|
#include "libtorrent/socket_io.hpp" // for hash_address
|
|
#include "libtorrent/random.hpp" // for random
|
|
#include "libtorrent/hasher.hpp" // for hasher
|
|
#include "libtorrent/crc32c.hpp" // for crc32c
|
|
|
|
namespace libtorrent { namespace dht
|
|
{
|
|
|
|
// returns the distance between the two nodes
|
|
// using the kademlia XOR-metric
|
|
node_id distance(node_id const& n1, node_id const& n2)
|
|
{
|
|
node_id ret;
|
|
node_id::iterator k = ret.begin();
|
|
<div style="background: #ffff00" width="100%"> for (node_id::const_iterator i = n1.begin(), j = n2.begin()
|
|
</div> , end(n1.end()); i != end; ++i, ++j, ++k)
|
|
{
|
|
*k = *i ^ *j;
|
|
}
|
|
return ret;
|
|
}
|
|
|
|
// returns true if: distance(n1, ref) < distance(n2, ref)
|
|
bool compare_ref(node_id const& n1, node_id const& n2, node_id const& ref)
|
|
{
|
|
</pre></td></tr><tr style="background: #fcc"><td>relevance 3</td><td><a href="javascript:expand(10)">../src/kademlia/node_id.cpp:66</a></td><td>the XORing should be done at full words instead of bytes</td></tr><tr id="10" style="display: none;" colspan="3"><td colspan="3"><h2>the XORing should be done at full words instead of bytes</h2><h4>../src/kademlia/node_id.cpp:66</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;">namespace libtorrent { namespace dht
|
|
{
|
|
|
|
// returns the distance between the two nodes
|
|
// using the kademlia XOR-metric
|
|
node_id distance(node_id const& n1, node_id const& n2)
|
|
{
|
|
node_id ret;
|
|
node_id::iterator k = ret.begin();
|
|
for (node_id::const_iterator i = n1.begin(), j = n2.begin()
|
|
, end(n1.end()); i != end; ++i, ++j, ++k)
|
|
{
|
|
*k = *i ^ *j;
|
|
}
|
|
return ret;
|
|
}
|
|
|
|
// returns true if: distance(n1, ref) < distance(n2, ref)
|
|
bool compare_ref(node_id const& n1, node_id const& n2, node_id const& ref)
|
|
{
|
|
<div style="background: #ffff00" width="100%"> for (node_id::const_iterator i = n1.begin(), j = n2.begin()
|
|
</div> , k = ref.begin(), end(n1.end()); i != end; ++i, ++j, ++k)
|
|
{
|
|
boost::uint8_t lhs = (*i ^ *k);
|
|
boost::uint8_t rhs = (*j ^ *k);
|
|
if (lhs < rhs) return true;
|
|
if (lhs > rhs) return false;
|
|
}
|
|
return false;
|
|
}
|
|
|
|
// returns n in: 2^n <= distance(n1, n2) < 2^(n+1)
|
|
// useful for finding out which bucket a node belongs to
|
|
int distance_exp(node_id const& n1, node_id const& n2)
|
|
{
|
|
</pre></td></tr><tr style="background: #fcc"><td>relevance 3</td><td><a href="javascript:expand(11)">../src/kademlia/node_id.cpp:82</a></td><td>the xoring should be done at full words and _builtin_clz() could be used as the last step</td></tr><tr id="11" style="display: none;" colspan="3"><td colspan="3"><h2>the xoring should be done at full words and _builtin_clz() could
|
|
be used as the last step</h2><h4>../src/kademlia/node_id.cpp:82</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;">}
|
|
|
|
// returns true if: distance(n1, ref) < distance(n2, ref)
|
|
bool compare_ref(node_id const& n1, node_id const& n2, node_id const& ref)
|
|
{
|
|
for (node_id::const_iterator i = n1.begin(), j = n2.begin()
|
|
, k = ref.begin(), end(n1.end()); i != end; ++i, ++j, ++k)
|
|
{
|
|
boost::uint8_t lhs = (*i ^ *k);
|
|
boost::uint8_t rhs = (*j ^ *k);
|
|
if (lhs < rhs) return true;
|
|
if (lhs > rhs) return false;
|
|
}
|
|
return false;
|
|
}
|
|
|
|
// returns n in: 2^n <= distance(n1, n2) < 2^(n+1)
|
|
// useful for finding out which bucket a node belongs to
|
|
int distance_exp(node_id const& n1, node_id const& n2)
|
|
{
|
|
<div style="background: #ffff00" width="100%"> int byte = node_id::size - 1;
|
|
</div> for (node_id::const_iterator i = n1.begin(), j = n2.begin()
|
|
, end(n1.end()); i != end; ++i, ++j, --byte)
|
|
{
|
|
TORRENT_ASSERT(byte >= 0);
|
|
boost::uint8_t t = *i ^ *j;
|
|
if (t == 0) continue;
|
|
// we have found the first non-zero byte
|
|
// return the bit-number of the first bit
|
|
// that differs
|
|
int const bit = byte * 8;
|
|
for (int b = 7; b >= 0; --b)
|
|
if (t >= (1 << b)) return bit + b;
|
|
return bit;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
node_id generate_id_impl(address const& ip_, boost::uint32_t r)
|
|
{
|
|
boost::uint8_t* ip = 0;
|
|
|
|
static const boost::uint8_t v4mask[] = { 0x03, 0x0f, 0x3f, 0xff };
|
|
#if TORRENT_USE_IPV6
|
|
static const boost::uint8_t v6mask[] = { 0x01, 0x03, 0x07, 0x0f, 0x1f, 0x3f, 0x7f, 0xff };
|
|
#endif
|
|
boost::uint8_t const* mask = 0;
|
|
int num_octets = 0;
|
|
|
|
address_v4::bytes_type b4;
|
|
</pre></td></tr><tr style="background: #fcc"><td>relevance 3</td><td><a href="javascript:expand(12)">../src/kademlia/routing_table.cpp:698</a></td><td>the call to compare_ip_cidr here is expensive. peel off some layers of abstraction here to make it quicker. Look at xoring and using _builtin_ctz()</td></tr><tr id="12" style="display: none;" colspan="3"><td colspan="3"><h2>the call to compare_ip_cidr here is expensive. peel off some
|
|
layers of abstraction here to make it quicker. Look at xoring and using _builtin_ctz()</h2><h4>../src/kademlia/routing_table.cpp:698</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> // that we have an updated RTT
|
|
j = std::find_if(rb.begin(), rb.end(), boost::bind(&node_entry::id, _1) == e.id);
|
|
if (j != rb.end())
|
|
{
|
|
// a new IP address just claimed this node-ID
|
|
// ignore it
|
|
if (j->addr() != e.addr() || j->port() != e.port())
|
|
return failed_to_add;
|
|
|
|
TORRENT_ASSERT(j->id == e.id && j->ep() == e.ep());
|
|
j->timeout_count = 0;
|
|
j->update_rtt(e.rtt);
|
|
e = *j;
|
|
erase_one(m_ips, j->addr().to_v4().to_bytes());
|
|
rb.erase(j);
|
|
}
|
|
|
|
if (m_settings.restrict_routing_ips)
|
|
{
|
|
// don't allow multiple entries from IPs very close to each other
|
|
<div style="background: #ffff00" width="100%"> j = std::find_if(b.begin(), b.end(), boost::bind(&compare_ip_cidr, _1, e));
|
|
</div> if (j == b.end())
|
|
{
|
|
j = std::find_if(rb.begin(), rb.end(), boost::bind(&compare_ip_cidr, _1, e));
|
|
if (j == rb.end()) goto ip_ok;
|
|
}
|
|
|
|
// we already have a node in this bucket with an IP very
|
|
// close to this one. We know that it's not the same, because
|
|
// it claims a different node-ID. Ignore this to avoid attacks
|
|
#ifndef TORRENT_DISABLE_LOGGING
|
|
char hex_id1[41];
|
|
to_hex(e.id.data(), 20, hex_id1);
|
|
char hex_id2[41];
|
|
to_hex(j->id.data(), 20, hex_id2);
|
|
m_log->log(dht_logger::routing_table, "ignoring node: %s %s existing node: %s %s"
|
|
, hex_id1, print_address(e.addr()).c_str()
|
|
, hex_id2, print_address(j->addr()).c_str());
|
|
#endif
|
|
return failed_to_add;
|
|
}
|
|
ip_ok:
|
|
|
|
// can we split the bucket?
|
|
// only nodes that haven't failed can split the bucket, and we can only
|
|
// split the last bucket
|
|
bool const can_split = (boost::next(i) == m_buckets.end()
|
|
&& m_buckets.size() < 159)
|
|
&& e.fail_count() == 0
|
|
&& (i == m_buckets.begin() || boost::prior(i)->live_nodes.size() > 1);
|
|
|
|
</pre></td></tr><tr style="background: #fcc"><td>relevance 3</td><td><a href="javascript:expand(13)">../src/kademlia/rpc_manager.cpp:87</a></td><td>move this into it's own .cpp file</td></tr><tr id="13" style="display: none;" colspan="3"><td colspan="3"><h2>move this into it's own .cpp file</h2><h4>../src/kademlia/rpc_manager.cpp:87</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;">
|
|
void intrusive_ptr_add_ref(observer const* o)
|
|
{
|
|
TORRENT_ASSERT(o != 0);
|
|
TORRENT_ASSERT(o->m_refs < 0xffff);
|
|
++o->m_refs;
|
|
}
|
|
|
|
void intrusive_ptr_release(observer const* o)
|
|
{
|
|
TORRENT_ASSERT(o != 0);
|
|
TORRENT_ASSERT(o->m_refs > 0);
|
|
if (--o->m_refs == 0)
|
|
{
|
|
boost::intrusive_ptr<traversal_algorithm> ta = o->algorithm();
|
|
(const_cast<observer*>(o))->~observer();
|
|
ta->free_observer(const_cast<observer*>(o));
|
|
}
|
|
}
|
|
|
|
<div style="background: #ffff00" width="100%">dht_observer* observer::get_observer() const
|
|
</div>{
|
|
return m_algorithm->get_node().observer();
|
|
}
|
|
|
|
void observer::set_target(udp::endpoint const& ep)
|
|
{
|
|
m_sent = clock_type::now();
|
|
|
|
m_port = ep.port();
|
|
#if TORRENT_USE_IPV6
|
|
if (ep.address().is_v6())
|
|
{
|
|
flags |= flag_ipv6_address;
|
|
m_addr.v6 = ep.address().to_v6().to_bytes();
|
|
}
|
|
else
|
|
#endif
|
|
{
|
|
flags &= ~flag_ipv6_address;
|
|
m_addr.v4 = ep.address().to_v4().to_bytes();
|
|
}
|
|
}
|
|
|
|
address observer::target_addr() const
|
|
{
|
|
#if TORRENT_USE_IPV6
|
|
if (flags & flag_ipv6_address)
|
|
return address_v6(m_addr.v6);
|
|
else
|
|
#endif
|
|
</pre></td></tr><tr style="background: #fcc"><td>relevance 3</td><td><a href="javascript:expand(14)">../include/libtorrent/socks5_stream.hpp:114</a></td><td>enable this assert and fix remaining causes of it triggering</td></tr><tr id="14" style="display: none;" colspan="3"><td colspan="3"><h2>enable this assert and fix remaining causes of it triggering</h2><h4>../include/libtorrent/socks5_stream.hpp:114</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> , m_listen(0)
|
|
{}
|
|
|
|
void set_version(int v) { m_version = v; }
|
|
|
|
void set_command(int c)
|
|
{
|
|
TORRENT_ASSERT(c >= socks5_connect && c <=socks5_udp_associate);
|
|
m_command = c;
|
|
}
|
|
|
|
void set_username(std::string const& user
|
|
, std::string const& password)
|
|
{
|
|
m_user = user;
|
|
m_password = password;
|
|
}
|
|
|
|
void set_dst_name(std::string const& host)
|
|
{
|
|
<div style="background: #ffff00" width="100%">/*
|
|
</div>#if TORRENT_USE_ASSERTS
|
|
error_code ec;
|
|
address::from_string(host, ec);
|
|
// if this assert trips, set_dst_name() is called wth an IP address rather
|
|
// than a hostname. Instead, resolve the IP into an address and pass it to
|
|
// async_connect instead
|
|
TORRENT_ASSERT(ec);
|
|
#endif
|
|
*/
|
|
m_dst_name = host;
|
|
if (m_dst_name.size() > 255)
|
|
m_dst_name.resize(255);
|
|
}
|
|
|
|
void close(error_code& ec)
|
|
{
|
|
m_dst_name.clear();
|
|
proxy_base::close(ec);
|
|
}
|
|
|
|
#ifndef BOOST_NO_EXCEPTIONS
|
|
void close()
|
|
{
|
|
m_dst_name.clear();
|
|
proxy_base::close();
|
|
}
|
|
#endif
|
|
|
|
</pre></td></tr><tr style="background: #fcc"><td>relevance 3</td><td><a href="javascript:expand(15)">../include/libtorrent/torrent.hpp:1347</a></td><td>factor out the links (as well as update_list() to a separate class that torrent can inherit)</td></tr><tr id="15" style="display: none;" colspan="3"><td colspan="3"><h2>factor out the links (as well as update_list() to a separate
|
|
class that torrent can inherit)</h2><h4>../include/libtorrent/torrent.hpp:1347</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;">
|
|
// this was the last time _we_ saw a seed in this swarm
|
|
time_t m_last_seen_complete;
|
|
|
|
// this is the time last any of our peers saw a seed
|
|
// in this swarm
|
|
time_t m_swarm_last_seen_complete;
|
|
|
|
// keep a copy if the info-hash here, so it can be accessed from multiple
|
|
// threads, and be cheap to access from the client
|
|
sha1_hash m_info_hash;
|
|
|
|
public:
|
|
// these are the lists this torrent belongs to. For more
|
|
// details about each list, see session_impl.hpp. Each list
|
|
// represents a group this torrent belongs to and makes it
|
|
// efficient to enumerate only torrents belonging to a specific
|
|
// group. Such as torrents that want peer connections or want
|
|
// to be ticked etc.
|
|
|
|
<div style="background: #ffff00" width="100%"> link m_links[aux::session_interface::num_torrent_lists];
|
|
</div>
|
|
private:
|
|
|
|
// m_num_verified = m_verified.count()
|
|
boost::uint32_t m_num_verified;
|
|
|
|
// this timestamp is kept in session-time, to
|
|
// make it fit in 16 bits
|
|
boost::uint16_t m_last_saved_resume;
|
|
|
|
// if this torrent is running, this was the time
|
|
// when it was started. This is used to have a
|
|
// bias towards keeping seeding torrents that
|
|
// recently was started, to avoid oscillation
|
|
// this is specified at a second granularity
|
|
// in session-time. see session_impl for details.
|
|
// the reference point is stepped forward every 4
|
|
// hours to keep the timestamps fit in 16 bits
|
|
boost::uint16_t m_started;
|
|
|
|
// if we're a seed, this is the session time
|
|
// timestamp of when we became one
|
|
boost::uint16_t m_became_seed;
|
|
|
|
// if we're finished, this is the session time
|
|
// timestamp of when we finished
|
|
boost::uint16_t m_became_finished;
|
|
|
|
// when checking, this is the first piece we have not
|
|
// issued a hash job for
|
|
</pre></td></tr><tr style="background: #fcc"><td>relevance 3</td><td><a href="javascript:expand(16)">../include/libtorrent/torrent_handle.hpp:237</a></td><td>consider replacing all the setters and getters for pause, resume, stop-when-ready, share-mode, upload-mode, super-seeding, apply-ip-filter, resolve-countries, pinned, sequential-download, seed-mode with just set_flags() and clear_flags() using the flags from add_torrent_params. Perhaps those flags should have a more generic name.</td></tr><tr id="16" style="display: none;" colspan="3"><td colspan="3"><h2>consider replacing all the setters and getters for pause,
|
|
resume, stop-when-ready, share-mode, upload-mode, super-seeding,
|
|
apply-ip-filter, resolve-countries, pinned, sequential-download,
|
|
seed-mode
|
|
with just set_flags() and clear_flags() using the flags from
|
|
add_torrent_params. Perhaps those flags should have a more generic
|
|
name.</h2><h4>../include/libtorrent/torrent_handle.hpp:237</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> // expensive if done from within a GUI thread that needs to stay
|
|
// responsive. Try to avoid quering for information you don't need, and
|
|
// try to do it in as few calls as possible. You can get most of the
|
|
// interesting information about a torrent from the
|
|
// torrent_handle::status() call.
|
|
//
|
|
// The default constructor will initialize the handle to an invalid state.
|
|
// Which means you cannot perform any operation on it, unless you first
|
|
// assign it a valid handle. If you try to perform any operation on an
|
|
// uninitialized handle, it will throw ``invalid_handle``.
|
|
//
|
|
// .. warning::
|
|
// All operations on a torrent_handle may throw libtorrent_exception
|
|
// exception, in case the handle is no longer refering to a torrent.
|
|
// There is one exception is_valid() will never throw. Since the torrents
|
|
// are processed by a background thread, there is no guarantee that a
|
|
// handle will remain valid between two calls.
|
|
//
|
|
struct TORRENT_EXPORT torrent_handle
|
|
{
|
|
<div style="background: #ffff00" width="100%"> friend class invariant_access;
|
|
</div> friend struct aux::session_impl;
|
|
friend class session;
|
|
friend struct session_handle;
|
|
friend struct feed;
|
|
friend class torrent;
|
|
friend std::size_t hash_value(torrent_handle const& th);
|
|
|
|
// constructs a torrent handle that does not refer to a torrent.
|
|
// i.e. is_valid() will return false.
|
|
torrent_handle() {}
|
|
|
|
torrent_handle(torrent_handle const& t)
|
|
{ if (!t.m_torrent.expired()) m_torrent = t.m_torrent; }
|
|
|
|
#if __cplusplus >= 201103L
|
|
torrent_handle& operator=(torrent_handle const&) = default;
|
|
#endif
|
|
|
|
// flags for add_piece().
|
|
enum flags_t { overwrite_existing = 1 };
|
|
|
|
// This function will write ``data`` to the storage as piece ``piece``,
|
|
// as if it had been downloaded from a peer. ``data`` is expected to
|
|
// point to a buffer of as many bytes as the size of the specified piece.
|
|
// The data in the buffer is copied and passed on to the disk IO thread
|
|
// to be written at a later point.
|
|
//
|
|
// By default, data that's already been downloaded is not overwritten by
|
|
// this buffer. If you trust this data to be correct (and pass the piece
|
|
// hash check) you may pass the overwrite_existing flag. This will
|
|
</pre></td></tr><tr style="background: #fcc"><td>relevance 3</td><td><a href="javascript:expand(17)">../include/libtorrent/torrent_handle.hpp:484</a></td><td>unify url_seed and http_seed with just web_seed, using the web_seed_entry.</td></tr><tr id="17" style="display: none;" colspan="3"><td colspan="3"><h2>unify url_seed and http_seed with just web_seed, using the
|
|
web_seed_entry.</h2><h4>../include/libtorrent/torrent_handle.hpp:484</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> // announce url for the tracker as well as an int ``tier``, which is
|
|
// specifies the order in which this tracker is tried. If you want
|
|
// libtorrent to use another list of trackers for this torrent, you can
|
|
// use ``replace_trackers()`` which takes a list of the same form as the
|
|
// one returned from ``trackers()`` and will replace it. If you want an
|
|
// immediate effect, you have to call force_reannounce(). See
|
|
// announce_entry.
|
|
//
|
|
// ``add_tracker()`` will look if the specified tracker is already in the
|
|
// set. If it is, it doesn't do anything. If it's not in the current set
|
|
// of trackers, it will insert it in the tier specified in the
|
|
// announce_entry.
|
|
//
|
|
// The updated set of trackers will be saved in the resume data, and when
|
|
// a torrent is started with resume data, the trackers from the resume
|
|
// data will replace the original ones.
|
|
std::vector<announce_entry> trackers() const;
|
|
void replace_trackers(std::vector<announce_entry> const&) const;
|
|
void add_tracker(announce_entry const&) const;
|
|
|
|
<div style="background: #ffff00" width="100%">
|
|
</div> // ``add_url_seed()`` adds another url to the torrent's list of url
|
|
// seeds. If the given url already exists in that list, the call has no
|
|
// effect. The torrent will connect to the server and try to download
|
|
// pieces from it, unless it's paused, queued, checking or seeding.
|
|
// ``remove_url_seed()`` removes the given url if it exists already.
|
|
// ``url_seeds()`` return a set of the url seeds currently in this
|
|
// torrent. Note that urls that fails may be removed automatically from
|
|
// the list.
|
|
//
|
|
// See http-seeding_ for more information.
|
|
void add_url_seed(std::string const& url) const;
|
|
void remove_url_seed(std::string const& url) const;
|
|
std::set<std::string> url_seeds() const;
|
|
|
|
// These functions are identical as the ``*_url_seed()`` variants, but
|
|
// they operate on `BEP 17`_ web seeds instead of `BEP 19`_.
|
|
//
|
|
// See http-seeding_ for more information.
|
|
void add_http_seed(std::string const& url) const;
|
|
void remove_http_seed(std::string const& url) const;
|
|
std::set<std::string> http_seeds() const;
|
|
|
|
// add the specified extension to this torrent. The ``ext`` argument is
|
|
// a function that will be called from within libtorrent's context
|
|
// passing in the internal torrent object and the specified userdata
|
|
// pointer. The function is expected to return a shared pointer to
|
|
// a torrent_plugin instance.
|
|
void add_extension(
|
|
boost::function<boost::shared_ptr<torrent_plugin>(torrent_handle const&, void*)> const& ext
|
|
, void* userdata = 0);
|
|
</pre></td></tr><tr style="background: #fcc"><td>relevance 3</td><td><a href="javascript:expand(18)">../include/libtorrent/web_peer_connection.hpp:131</a></td><td>if we make this be a disk_buffer_holder instead we would save a copy use allocate_disk_receive_buffer and release_disk_receive_buffer</td></tr><tr id="18" style="display: none;" colspan="3"><td colspan="3"><h2>if we make this be a disk_buffer_holder instead
|
|
we would save a copy
|
|
use allocate_disk_receive_buffer and release_disk_receive_buffer</h2><h4>../include/libtorrent/web_peer_connection.hpp:131</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> boost::optional<piece_block_progress> downloading_piece_progress() const TORRENT_OVERRIDE;
|
|
|
|
void handle_padfile();
|
|
|
|
// this has one entry per http-request
|
|
// (might be more than the bt requests)
|
|
struct file_request_t
|
|
{
|
|
int file_index;
|
|
int length;
|
|
boost::int64_t start;
|
|
};
|
|
std::deque<file_request_t> m_file_requests;
|
|
|
|
std::string m_url;
|
|
|
|
web_seed_t* m_web;
|
|
|
|
// this is used for intermediate storage of pieces to be delivered to the
|
|
// bittorrent engine
|
|
<div style="background: #ffff00" width="100%"> std::vector<char> m_piece;
|
|
</div>
|
|
// the number of bytes we've forwarded to the incoming_payload() function
|
|
// in the current HTTP response. used to know where in the buffer the
|
|
// next response starts
|
|
int m_received_body;
|
|
|
|
// this is the offset inside the current receive
|
|
// buffer where the next chunk header will be.
|
|
// this is updated for each chunk header that's
|
|
// parsed. It does not necessarily point to a valid
|
|
// offset in the receive buffer, if we haven't received
|
|
// it yet. This offset never includes the HTTP header
|
|
int m_chunk_pos;
|
|
|
|
// this is the number of bytes we've already received
|
|
// from the next chunk header we're waiting for
|
|
int m_partial_chunk_header;
|
|
|
|
// the number of responses we've received so far on
|
|
// this connection
|
|
int m_num_responses;
|
|
};
|
|
}
|
|
|
|
#endif // TORRENT_WEB_PEER_CONNECTION_HPP_INCLUDED
|
|
|
|
</pre></td></tr><tr style="background: #fcc"><td>relevance 3</td><td><a href="javascript:expand(19)">../include/libtorrent/kademlia/routing_table.hpp:99</a></td><td>to improve memory locality and scanning performance, turn the routing table into a single vector with boundaries for the nodes instead. Perhaps replacement nodes should be in a separate vector.</td></tr><tr id="19" style="display: none;" colspan="3"><td colspan="3"><h2>to improve memory locality and scanning performance, turn the
|
|
routing table into a single vector with boundaries for the nodes instead.
|
|
Perhaps replacement nodes should be in a separate vector.</h2><h4>../include/libtorrent/kademlia/routing_table.hpp:99</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;">// * Nodes are not marked as being stale, they keep a counter
|
|
// that tells how many times in a row they have failed. When
|
|
// a new node is to be inserted, the node that has failed
|
|
// the most times is replaced. If none of the nodes in the
|
|
// bucket has failed, then it is put in the replacement
|
|
// cache (just like in the paper).
|
|
|
|
namespace impl
|
|
{
|
|
template <typename F>
|
|
inline void forwarder(void* userdata, node_entry const& node)
|
|
{
|
|
F* f = reinterpret_cast<F*>(userdata);
|
|
(*f)(node);
|
|
}
|
|
}
|
|
|
|
class TORRENT_EXTRA_EXPORT routing_table : boost::noncopyable
|
|
{
|
|
public:
|
|
<div style="background: #ffff00" width="100%"> typedef std::vector<routing_table_node> table_t;
|
|
</div>
|
|
routing_table(node_id const& id, int bucket_size
|
|
, dht_settings const& settings
|
|
, dht_logger* log);
|
|
|
|
#ifndef TORRENT_NO_DEPRECATE
|
|
void status(session_status& s) const;
|
|
#endif
|
|
|
|
void status(std::vector<dht_routing_bucket>& s) const;
|
|
|
|
void node_failed(node_id const& id, udp::endpoint const& ep);
|
|
|
|
// adds an endpoint that will never be added to
|
|
// the routing table
|
|
void add_router_node(udp::endpoint router);
|
|
|
|
// iterates over the router nodes added
|
|
typedef std::set<udp::endpoint>::const_iterator router_iterator;
|
|
router_iterator router_begin() const { return m_router_nodes.begin(); }
|
|
router_iterator router_end() const { return m_router_nodes.end(); }
|
|
|
|
enum add_node_status_t {
|
|
failed_to_add = 0,
|
|
node_added,
|
|
need_bucket_split
|
|
};
|
|
add_node_status_t add_node_impl(node_entry e);
|
|
|
|
bool add_node(node_entry e);
|
|
</pre></td></tr><tr style="background: #fcc"><td>relevance 3</td><td><a href="javascript:expand(20)">../include/libtorrent/aux_/allocating_handler.hpp:77</a></td><td>make sure the handlers we pass in are potentially movable!</td></tr><tr id="20" style="display: none;" colspan="3"><td colspan="3"><h2>make sure the handlers we pass in are potentially movable!</h2><h4>../include/libtorrent/aux_/allocating_handler.hpp:77</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> : used(false)
|
|
{}
|
|
|
|
bool used;
|
|
#else
|
|
handler_storage() {}
|
|
#endif
|
|
boost::aligned_storage<Size> bytes;
|
|
private:
|
|
handler_storage(handler_storage const&);
|
|
};
|
|
|
|
// this class is a wrapper for an asio handler object. Its main purpose
|
|
// is to pass along additional parameters to the asio handler allocator
|
|
// function, as well as providing a distinct type for the handler
|
|
// allocator function to overload on
|
|
template <class Handler, std::size_t Size>
|
|
struct allocating_handler
|
|
{
|
|
|
|
<div style="background: #ffff00" width="100%">#if !defined BOOST_NO_CXX11_RVALUE_REFERENCES
|
|
</div> allocating_handler(
|
|
Handler&& h, handler_storage<Size>& s)
|
|
: handler(std::move(h))
|
|
, storage(s)
|
|
{}
|
|
#endif
|
|
|
|
allocating_handler(
|
|
Handler const& h, handler_storage<Size>& s)
|
|
: handler(h)
|
|
, storage(s)
|
|
{}
|
|
|
|
#if !defined BOOST_NO_CXX11_VARIADIC_TEMPLATES \
|
|
&& !defined BOOST_NO_CXX11_RVALUE_REFERENCES
|
|
template <class... A>
|
|
void operator()(A&&... a) const
|
|
{
|
|
handler(std::forward<A>(a)...);
|
|
}
|
|
#else
|
|
template <class A0>
|
|
void operator()(A0 const& a0) const
|
|
{
|
|
handler(a0);
|
|
}
|
|
|
|
template <class A0, class A1>
|
|
void operator()(A0 const& a0, A1 const& a1) const
|
|
{
|
|
</pre></td></tr><tr style="background: #cfc"><td>relevance 2</td><td><a href="javascript:expand(21)">../test/test_dht.cpp:514</a></td><td>split this test up into smaller test cases</td></tr><tr id="21" style="display: none;" colspan="3"><td colspan="3"><h2>split this test up into smaller test cases</h2><h4>../test/test_dht.cpp:514</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> va_end(v);
|
|
m_log.push_back(buf);
|
|
}
|
|
virtual void log_packet(message_direction_t dir, char const* pkt, int len
|
|
, udp::endpoint node) TORRENT_OVERRIDE {}
|
|
virtual bool on_dht_request(char const* query, int query_len
|
|
, dht::msg const& request, entry& response) TORRENT_OVERRIDE { return false; }
|
|
|
|
std::vector<std::string> m_log;
|
|
};
|
|
|
|
dht_settings test_settings()
|
|
{
|
|
dht_settings sett;
|
|
sett.max_torrents = 4;
|
|
sett.max_dht_items = 4;
|
|
sett.enforce_node_id = false;
|
|
return sett;
|
|
}
|
|
|
|
<div style="background: #ffff00" width="100%">TORRENT_TEST(dht)
|
|
</div>{
|
|
dht_settings sett = test_settings();
|
|
mock_socket s;
|
|
obs observer;
|
|
counters cnt;
|
|
dht::node node(&s, sett, node_id(0), &observer, cnt);
|
|
|
|
// DHT should be running on port 48199 now
|
|
bdecode_node response;
|
|
char error_string[200];
|
|
bool ret;
|
|
|
|
// ====== ping ======
|
|
udp::endpoint source(address::from_string("10.0.0.1"), 20);
|
|
send_dht_request(node, "ping", source, &response);
|
|
|
|
dht::key_desc_t pong_desc[] = {
|
|
{"y", bdecode_node::string_t, 1, 0},
|
|
{"t", bdecode_node::string_t, 2, 0},
|
|
{"r", bdecode_node::dict_t, 0, key_desc_t::parse_children},
|
|
{"id", bdecode_node::string_t, 20, key_desc_t::last_child},
|
|
};
|
|
|
|
bdecode_node pong_keys[4];
|
|
|
|
fprintf(stderr, "msg: %s\n", print_entry(response).c_str());
|
|
ret = dht::verify_message(response, pong_desc, pong_keys, error_string
|
|
, sizeof(error_string));
|
|
TEST_CHECK(ret);
|
|
if (ret)
|
|
</pre></td></tr><tr style="background: #cfc"><td>relevance 2</td><td><a href="javascript:expand(22)">../test/test_dht.cpp:2141</a></td><td>split this up into smaller test cases</td></tr><tr id="22" style="display: none;" colspan="3"><td colspan="3"><h2>split this up into smaller test cases</h2><h4>../test/test_dht.cpp:2141</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;">
|
|
TEST_EQUAL(to_hex(std::string(signature, 64))
|
|
, "6834284b6b24c3204eb2fea824d82f88883a3d95e8b4a21b8c0ded553d17d17d"
|
|
"df9a8a7104b1258f30bed3787e6cb896fca78c58f8e03b5f18f14951a87d9a08");
|
|
|
|
sha1_hash target_id = item_target_id(test_salt, public_key);
|
|
TEST_EQUAL(to_hex(target_id.to_string()), "411eba73b6f087ca51a3795d9c8c938d365e32c1");
|
|
}
|
|
|
|
TORRENT_TEST(signing_test3)
|
|
{
|
|
// test vector 3
|
|
|
|
// test content
|
|
std::pair<char const*, int> test_content("12:Hello World!", 15);
|
|
|
|
sha1_hash target_id = item_target_id(test_content);
|
|
TEST_EQUAL(to_hex(target_id.to_string()), "e5f96f6f38320f0f33959cb4d3d656452117aadb");
|
|
}
|
|
|
|
<div style="background: #ffff00" width="100%">TORRENT_TEST(verify_message)
|
|
</div>{
|
|
char error_string[200];
|
|
|
|
// test verify_message
|
|
static const key_desc_t msg_desc[] = {
|
|
{"A", bdecode_node::string_t, 4, 0},
|
|
{"B", bdecode_node::dict_t, 0, key_desc_t::optional | key_desc_t::parse_children},
|
|
{"B1", bdecode_node::string_t, 0, 0},
|
|
{"B2", bdecode_node::string_t, 0, key_desc_t::last_child},
|
|
{"C", bdecode_node::dict_t, 0, key_desc_t::optional | key_desc_t::parse_children},
|
|
{"C1", bdecode_node::string_t, 0, 0},
|
|
{"C2", bdecode_node::string_t, 0, key_desc_t::last_child},
|
|
};
|
|
|
|
bdecode_node msg_keys[7];
|
|
|
|
bdecode_node ent;
|
|
|
|
error_code ec;
|
|
char const test_msg[] = "d1:A4:test1:Bd2:B15:test22:B25:test3ee";
|
|
bdecode(test_msg, test_msg + sizeof(test_msg)-1, ent, ec);
|
|
fprintf(stderr, "%s\n", print_entry(ent).c_str());
|
|
|
|
bool ret = verify_message(ent, msg_desc, msg_keys, error_string
|
|
, sizeof(error_string));
|
|
TEST_CHECK(ret);
|
|
TEST_CHECK(msg_keys[0]);
|
|
if (msg_keys[0]) TEST_EQUAL(msg_keys[0].string_value(), "test");
|
|
TEST_CHECK(msg_keys[1]);
|
|
TEST_CHECK(msg_keys[2]);
|
|
</pre></td></tr><tr style="background: #cfc"><td>relevance 2</td><td><a href="javascript:expand(23)">../test/test_piece_picker.cpp:281</a></td><td>split this up into smaller tests (where we print_title)</td></tr><tr id="23" style="display: none;" colspan="3"><td colspan="3"><h2>split this up into smaller tests (where we print_title)</h2><h4>../test/test_piece_picker.cpp:281</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> std::vector<piece_block> picked;
|
|
counters pc;
|
|
p->pick_pieces(string2vec(availability), picked
|
|
, num_blocks, prefer_contiguous_blocks, peer_struct
|
|
, options, suggested_pieces, 20, pc);
|
|
print_pick(picked);
|
|
TEST_CHECK(verify_pick(p, picked));
|
|
return picked;
|
|
}
|
|
|
|
int test_pick(boost::shared_ptr<piece_picker> const& p
|
|
, int options = piece_picker::rarest_first)
|
|
{
|
|
const std::vector<int> empty_vector;
|
|
std::vector<piece_block> picked = pick_pieces(p, "*******", 1, 0, 0
|
|
, options, empty_vector);
|
|
if (picked.empty()) return -1;
|
|
return picked[0].piece_index;
|
|
}
|
|
|
|
<div style="background: #ffff00" width="100%">TORRENT_TEST(piece_picker)
|
|
</div>{
|
|
tcp::endpoint endp;
|
|
piece_picker::downloading_piece st;
|
|
#if TORRENT_USE_ASSERTS
|
|
tmp0.in_use = true;
|
|
tmp1.in_use = true;
|
|
tmp2.in_use = true;
|
|
tmp3.in_use = true;
|
|
tmp4.in_use = true;
|
|
tmp5.in_use = true;
|
|
tmp6.in_use = true;
|
|
tmp7.in_use = true;
|
|
tmp8.in_use = true;
|
|
tmp9.in_use = true;
|
|
peer_struct.in_use = true;
|
|
#endif
|
|
tmp_peer = &tmp1;
|
|
std::vector<piece_block> picked;
|
|
boost::shared_ptr<piece_picker> p;
|
|
const int options = piece_picker::rarest_first;
|
|
std::pair<int, int> dc;
|
|
counters pc;
|
|
|
|
print_title("test piece_block");
|
|
|
|
TEST_CHECK(piece_block(0, 0) != piece_block(0, 1));
|
|
TEST_CHECK(piece_block(0, 0) != piece_block(1, 0));
|
|
TEST_CHECK(!(piece_block(0, 0) != piece_block(0, 0)));
|
|
|
|
TEST_CHECK(!(piece_block(0, 0) == piece_block(0, 1)));
|
|
</pre></td></tr><tr style="background: #cfc"><td>relevance 2</td><td><a href="javascript:expand(24)">../test/test_storage.cpp:485</a></td><td>split this test up into smaller parts</td></tr><tr id="24" style="display: none;" colspan="3"><td colspan="3"><h2>split this test up into smaller parts</h2><h4>../test/test_storage.cpp:485</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> boost::shared_ptr<void> dummy;
|
|
boost::shared_ptr<piece_manager> pm = boost::make_shared<piece_manager>(new default_storage(p), dummy, &fs);
|
|
libtorrent::mutex lock;
|
|
|
|
bool done = false;
|
|
bdecode_node frd;
|
|
std::vector<std::string> links;
|
|
io.async_check_fastresume(pm.get(), &frd, links
|
|
, boost::bind(&on_check_resume_data, _1, &done));
|
|
io.submit_jobs();
|
|
ios.reset();
|
|
run_until(ios, done);
|
|
|
|
io.set_num_threads(0);
|
|
}
|
|
|
|
#ifdef TORRENT_NO_DEPRECATE
|
|
#define storage_mode_compact storage_mode_sparse
|
|
#endif
|
|
|
|
<div style="background: #ffff00" width="100%">void run_test(bool unbuffered)
|
|
</div>{
|
|
std::string test_path = current_working_directory();
|
|
std::cerr << "\n=== " << test_path << " ===\n" << std::endl;
|
|
|
|
boost::shared_ptr<torrent_info> info;
|
|
|
|
buf_ptr piece0 = new_piece(piece_size);
|
|
buf_ptr piece1 = new_piece(piece_size);
|
|
buf_ptr piece2 = new_piece(piece_size);
|
|
buf_ptr piece3 = new_piece(piece_size);
|
|
|
|
{
|
|
error_code ec;
|
|
remove_all(combine_path(test_path, "temp_storage"), ec);
|
|
if (ec && ec != boost::system::errc::no_such_file_or_directory)
|
|
std::cerr << "remove_all '" << combine_path(test_path, "temp_storage")
|
|
<< "': " << ec.message() << std::endl;
|
|
file_storage fs;
|
|
fs.add_file("temp_storage/test1.tmp", 17);
|
|
fs.add_file("temp_storage/test2.tmp", 612);
|
|
fs.add_file("temp_storage/test3.tmp", 0);
|
|
fs.add_file("temp_storage/test4.tmp", 0);
|
|
fs.add_file("temp_storage/test5.tmp", 3253);
|
|
fs.add_file("temp_storage/test6.tmp", 841);
|
|
const int last_file_size = 4 * piece_size - fs.total_size();
|
|
fs.add_file("temp_storage/test7.tmp", last_file_size);
|
|
|
|
// File layout
|
|
// +-+--+++-------+-------+----------------------------------------------------------------------------------------+
|
|
// |1| 2||| file5 | file6 | file7 |
|
|
</pre></td></tr><tr style="background: #cfc"><td>relevance 2</td><td><a href="javascript:expand(25)">../src/alert.cpp:1461</a></td><td>the salt here is allocated on the heap. It would be nice to allocate in in the stack_allocator</td></tr><tr id="25" style="display: none;" colspan="3"><td colspan="3"><h2>the salt here is allocated on the heap. It would be nice to
|
|
allocate in in the stack_allocator</h2><h4>../src/alert.cpp:1461</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> , operation_names[op]
|
|
, error.value()
|
|
, convert_from_native(error.message()).c_str());
|
|
return msg;
|
|
}
|
|
|
|
dht_immutable_item_alert::dht_immutable_item_alert(aux::stack_allocator&
|
|
, sha1_hash const& t, entry const& i)
|
|
: target(t), item(i)
|
|
{}
|
|
|
|
std::string dht_immutable_item_alert::message() const
|
|
{
|
|
char msg[1050];
|
|
snprintf(msg, sizeof(msg), "DHT immutable item %s [ %s ]"
|
|
, to_hex(target.to_string()).c_str()
|
|
, item.to_string().c_str());
|
|
return msg;
|
|
}
|
|
|
|
<div style="background: #ffff00" width="100%"> dht_mutable_item_alert::dht_mutable_item_alert(aux::stack_allocator&
|
|
</div> , boost::array<char, 32> k
|
|
, boost::array<char, 64> sig
|
|
, boost::uint64_t sequence
|
|
, std::string const& s
|
|
, entry const& i
|
|
, bool a)
|
|
: key(k), signature(sig), seq(sequence), salt(s), item(i), authoritative(a)
|
|
{}
|
|
|
|
std::string dht_mutable_item_alert::message() const
|
|
{
|
|
char msg[1050];
|
|
snprintf(msg, sizeof(msg), "DHT mutable item (key=%s salt=%s seq=%" PRId64 " %s) [ %s ]"
|
|
, to_hex(std::string(&key[0], 32)).c_str()
|
|
, salt.c_str()
|
|
, seq
|
|
, authoritative ? "auth" : "non-auth"
|
|
, item.to_string().c_str());
|
|
return msg;
|
|
}
|
|
|
|
dht_put_alert::dht_put_alert(aux::stack_allocator&, sha1_hash const& t, int n)
|
|
: target(t)
|
|
, seq(0)
|
|
, num_success(n)
|
|
{}
|
|
|
|
dht_put_alert::dht_put_alert(aux::stack_allocator&
|
|
, boost::array<char, 32> key
|
|
, boost::array<char, 64> sig
|
|
</pre></td></tr><tr style="background: #cfc"><td>relevance 2</td><td><a href="javascript:expand(26)">../src/alert_manager.cpp:90</a></td><td>keep a count of the number of threads waiting. Only if it's > 0 notify them</td></tr><tr id="26" style="display: none;" colspan="3"><td colspan="3"><h2>keep a count of the number of threads waiting. Only if it's
|
|
> 0 notify them</h2><h4>../src/alert_manager.cpp:90</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;">
|
|
return NULL;
|
|
}
|
|
|
|
void alert_manager::maybe_notify(alert* a, mutex::scoped_lock& lock)
|
|
{
|
|
if (a->type() == save_resume_data_failed_alert::alert_type
|
|
|| a->type() == save_resume_data_alert::alert_type)
|
|
++m_num_queued_resume;
|
|
|
|
if (m_alerts[m_generation].size() == 1)
|
|
{
|
|
lock.unlock();
|
|
|
|
// we just posted to an empty queue. If anyone is waiting for
|
|
// alerts, we need to notify them. Also (potentially) call the
|
|
// user supplied m_notify callback to let the client wake up its
|
|
// message loop to poll for alerts.
|
|
if (m_notify) m_notify();
|
|
|
|
<div style="background: #ffff00" width="100%"> m_condition.notify_all();
|
|
</div> }
|
|
else
|
|
{
|
|
lock.unlock();
|
|
}
|
|
|
|
#ifndef TORRENT_DISABLE_EXTENSIONS
|
|
for (ses_extension_list_t::iterator i = m_ses_extensions.begin()
|
|
, end(m_ses_extensions.end()); i != end; ++i)
|
|
{
|
|
(*i)->on_alert(a);
|
|
}
|
|
#endif
|
|
}
|
|
|
|
#ifndef TORRENT_NO_DEPRECATE
|
|
|
|
bool alert_manager::maybe_dispatch(alert const& a)
|
|
{
|
|
if (m_dispatch)
|
|
{
|
|
m_dispatch(a.clone());
|
|
return true;
|
|
}
|
|
return false;
|
|
}
|
|
|
|
#ifdef __GNUC__
|
|
#pragma GCC diagnostic push
|
|
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
|
|
</pre></td></tr><tr style="background: #cfc"><td>relevance 2</td><td><a href="javascript:expand(27)">../src/block_cache.cpp:1723</a></td><td>turn these return values into enums returns -1: block not in cache -2: out of memory</td></tr><tr id="27" style="display: none;" colspan="3"><td colspan="3"><h2>turn these return values into enums
|
|
returns
|
|
-1: block not in cache
|
|
-2: out of memory</h2><h4>../src/block_cache.cpp:1723</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> {
|
|
TORRENT_PIECE_ASSERT(!p.blocks[k].dirty, &p);
|
|
TORRENT_PIECE_ASSERT(!p.blocks[k].pending, &p);
|
|
TORRENT_PIECE_ASSERT(p.blocks[k].refcount == 0, &p);
|
|
}
|
|
TORRENT_PIECE_ASSERT(p.blocks[k].refcount >= 0, &p);
|
|
num_refcount += p.blocks[k].refcount;
|
|
}
|
|
TORRENT_PIECE_ASSERT(num_blocks == p.num_blocks, &p);
|
|
TORRENT_PIECE_ASSERT(num_pending <= p.refcount, &p);
|
|
TORRENT_PIECE_ASSERT(num_refcount == p.refcount, &p);
|
|
TORRENT_PIECE_ASSERT(num_dirty == p.num_dirty, &p);
|
|
}
|
|
TORRENT_ASSERT(m_read_cache_size == cached_read_blocks);
|
|
TORRENT_ASSERT(m_write_cache_size == cached_write_blocks);
|
|
TORRENT_ASSERT(m_pinned_blocks == num_pinned);
|
|
TORRENT_ASSERT(m_write_cache_size + m_read_cache_size <= in_use());
|
|
}
|
|
#endif
|
|
|
|
<div style="background: #ffff00" width="100%">
|
|
</div>int block_cache::copy_from_piece(cached_piece_entry* pe, disk_io_job* j
|
|
, bool expect_no_fail)
|
|
{
|
|
INVARIANT_CHECK;
|
|
TORRENT_UNUSED(expect_no_fail);
|
|
|
|
TORRENT_PIECE_ASSERT(j->buffer.disk_block == 0, pe);
|
|
TORRENT_PIECE_ASSERT(pe->in_use, pe);
|
|
|
|
// copy from the cache and update the last use timestamp
|
|
int block = j->d.io.offset / block_size();
|
|
int block_offset = j->d.io.offset & (block_size()-1);
|
|
int buffer_offset = 0;
|
|
int size = j->d.io.buffer_size;
|
|
int blocks_to_read = block_offset > 0 && (size > block_size() - block_offset) ? 2 : 1;
|
|
TORRENT_PIECE_ASSERT(size <= block_size(), pe);
|
|
const int start_block = block;
|
|
|
|
#if TORRENT_USE_ASSERTS
|
|
int piece_size = j->storage->files()->piece_size(j->piece);
|
|
int blocks_in_piece = (piece_size + block_size() - 1) / block_size();
|
|
TORRENT_PIECE_ASSERT(start_block < blocks_in_piece, pe);
|
|
#endif
|
|
|
|
// if there's no buffer, we don't have this block in
|
|
// the cache, and we're not currently reading it in either
|
|
// since it's not pending
|
|
|
|
if (inc_block_refcount(pe, start_block, ref_reading) == false)
|
|
{
|
|
</pre></td></tr><tr style="background: #cfc"><td>relevance 2</td><td><a href="javascript:expand(28)">../src/escape_string.cpp:209</a></td><td>this should probably be moved into string_util.cpp</td></tr><tr id="28" style="display: none;" colspan="3"><td colspan="3"><h2>this should probably be moved into string_util.cpp</h2><h4>../src/escape_string.cpp:209</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> }
|
|
return false;
|
|
}
|
|
|
|
void convert_path_to_posix(std::string& path)
|
|
{
|
|
for (std::string::iterator i = path.begin()
|
|
, end(path.end()); i != end; ++i)
|
|
if (*i == '\\') *i = '/';
|
|
}
|
|
|
|
#ifdef TORRENT_WINDOWS
|
|
void convert_path_to_windows(std::string& path)
|
|
{
|
|
for (std::string::iterator i = path.begin()
|
|
, end(path.end()); i != end; ++i)
|
|
if (*i == '/') *i = '\\';
|
|
}
|
|
#endif
|
|
|
|
<div style="background: #ffff00" width="100%"> std::string read_until(char const*& str, char delim, char const* end)
|
|
</div> {
|
|
TORRENT_ASSERT(str <= end);
|
|
|
|
std::string ret;
|
|
while (str != end && *str != delim)
|
|
{
|
|
ret += *str;
|
|
++str;
|
|
}
|
|
// skip the delimiter as well
|
|
while (str != end && *str == delim) ++str;
|
|
return ret;
|
|
}
|
|
|
|
std::string maybe_url_encode(std::string const& url)
|
|
{
|
|
std::string protocol, host, auth, path;
|
|
int port;
|
|
error_code ec;
|
|
boost::tie(protocol, auth, host, port, path) = parse_url_components(url, ec);
|
|
if (ec) return url;
|
|
|
|
// first figure out if this url contains unencoded characters
|
|
if (!need_encoding(path.c_str(), path.size()))
|
|
return url;
|
|
|
|
char msg[TORRENT_MAX_PATH*4];
|
|
snprintf(msg, sizeof(msg), "%s://%s%s%s%s%s%s", protocol.c_str(), auth.c_str()
|
|
, auth.empty()?"":"@", host.c_str()
|
|
, port == -1 ? "" : ":"
|
|
</pre></td></tr><tr style="background: #cfc"><td>relevance 2</td><td><a href="javascript:expand(29)">../src/file.cpp:567</a></td><td>test this on a FAT volume to see what error we get!</td></tr><tr id="29" style="display: none;" colspan="3"><td colspan="3"><h2>test this on a FAT volume to see what error we get!</h2><h4>../src/file.cpp:567</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> }
|
|
|
|
// fall back to making a copy
|
|
|
|
#else
|
|
|
|
std::string n_exist = convert_to_native(file);
|
|
std::string n_link = convert_to_native(link);
|
|
|
|
// assume posix's link() function exists
|
|
int ret = ::link(n_exist.c_str(), n_link.c_str());
|
|
|
|
if (ret == 0)
|
|
{
|
|
ec.clear();
|
|
return;
|
|
}
|
|
|
|
// most errors are passed through, except for the ones that indicate that
|
|
// hard links are not supported and require a copy.
|
|
<div style="background: #ffff00" width="100%"> if (errno != EMLINK || errno != EXDEV)
|
|
</div> {
|
|
// some error happened, report up to the caller
|
|
ec.assign(errno, system_category());
|
|
return;
|
|
}
|
|
|
|
// fall back to making a copy
|
|
|
|
#endif
|
|
|
|
// if we get here, we should copy the file
|
|
copy_file(file, link, ec);
|
|
}
|
|
|
|
bool is_directory(std::string const& f, error_code& ec)
|
|
{
|
|
ec.clear();
|
|
error_code e;
|
|
file_status s;
|
|
stat_file(f, &s, e);
|
|
if (!e && s.mode & file_status::directory) return true;
|
|
ec = e;
|
|
return false;
|
|
}
|
|
|
|
void recursive_copy(std::string const& old_path, std::string const& new_path, error_code& ec)
|
|
{
|
|
TORRENT_ASSERT(!ec);
|
|
if (is_directory(old_path, ec))
|
|
{
|
|
</pre></td></tr><tr style="background: #cfc"><td>relevance 2</td><td><a href="javascript:expand(30)">../src/http_tracker_connection.cpp:379</a></td><td>returning a bool here is redundant. Instead this function should return the peer_entry</td></tr><tr id="30" style="display: none;" colspan="3"><td colspan="3"><h2>returning a bool here is redundant. Instead this function should
|
|
return the peer_entry</h2><h4>../src/http_tracker_connection.cpp:379</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> }
|
|
else
|
|
{
|
|
std::list<address> ip_list;
|
|
if (m_tracker_connection)
|
|
{
|
|
error_code ignore;
|
|
std::vector<tcp::endpoint> const& epts = m_tracker_connection->endpoints();
|
|
for (std::vector<tcp::endpoint>::const_iterator i = epts.begin()
|
|
, end(epts.end()); i != end; ++i)
|
|
{
|
|
ip_list.push_back(i->address());
|
|
}
|
|
}
|
|
|
|
cb->tracker_response(tracker_req(), m_tracker_ip, ip_list, resp);
|
|
}
|
|
close();
|
|
}
|
|
|
|
<div style="background: #ffff00" width="100%"> bool extract_peer_info(bdecode_node const& info, peer_entry& ret, error_code& ec)
|
|
</div> {
|
|
// extract peer id (if any)
|
|
if (info.type() != bdecode_node::dict_t)
|
|
{
|
|
ec.assign(errors::invalid_peer_dict, get_libtorrent_category());
|
|
return false;
|
|
}
|
|
bdecode_node i = info.dict_find_string("peer id");
|
|
if (i && i.string_length() == 20)
|
|
{
|
|
std::copy(i.string_ptr(), i.string_ptr()+20, ret.pid.begin());
|
|
}
|
|
else
|
|
{
|
|
// if there's no peer_id, just initialize it to a bunch of zeroes
|
|
std::fill_n(ret.pid.begin(), 20, 0);
|
|
}
|
|
|
|
// extract ip
|
|
i = info.dict_find_string("ip");
|
|
if (i == 0)
|
|
{
|
|
ec.assign(errors::invalid_tracker_response, get_libtorrent_category());
|
|
return false;
|
|
}
|
|
ret.hostname = i.string_value();
|
|
|
|
// extract port
|
|
i = info.dict_find_int("port");
|
|
if (i == 0)
|
|
</pre></td></tr><tr style="background: #cfc"><td>relevance 2</td><td><a href="javascript:expand(31)">../src/instantiate_connection.cpp:43</a></td><td>peer_connection and tracker_connection should probably be flags</td></tr><tr id="31" style="display: none;" colspan="3"><td colspan="3"><h2>peer_connection and tracker_connection should probably be flags</h2><h4>../src/instantiate_connection.cpp:43</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"></pre></td></tr><tr style="background: #cfc"><td>relevance 2</td><td><a href="javascript:expand(32)">../src/instantiate_connection.cpp:44</a></td><td>move this function into libtorrent::aux namespace</td></tr><tr id="32" style="display: none;" colspan="3"><td colspan="3"><h2>move this function into libtorrent::aux namespace</h2><h4>../src/instantiate_connection.cpp:44</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;">LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
|
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
|
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
|
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
|
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
|
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
|
POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*/
|
|
|
|
#include "libtorrent/socket.hpp"
|
|
#include "libtorrent/session_settings.hpp"
|
|
#include "libtorrent/socket_type.hpp"
|
|
#include "libtorrent/utp_socket_manager.hpp"
|
|
#include "libtorrent/instantiate_connection.hpp"
|
|
#include <boost/shared_ptr.hpp>
|
|
#include <stdexcept>
|
|
|
|
namespace libtorrent
|
|
{
|
|
<div style="background: #ffff00" width="100%"> bool instantiate_connection(io_service& ios
|
|
</div> , aux::proxy_settings const& ps, socket_type& s
|
|
, void* ssl_context
|
|
, utp_socket_manager* sm
|
|
, bool peer_connection
|
|
, bool tracker_connection)
|
|
{
|
|
#ifndef TORRENT_USE_OPENSSL
|
|
TORRENT_UNUSED(ssl_context);
|
|
#endif
|
|
|
|
if (sm)
|
|
{
|
|
utp_stream* str;
|
|
#ifdef TORRENT_USE_OPENSSL
|
|
if (ssl_context)
|
|
{
|
|
s.instantiate<ssl_stream<utp_stream> >(ios, ssl_context);
|
|
str = &s.get<ssl_stream<utp_stream> >()->next_layer();
|
|
}
|
|
else
|
|
#endif
|
|
{
|
|
s.instantiate<utp_stream>(ios);
|
|
str = s.get<utp_stream>();
|
|
}
|
|
str->set_impl(sm->new_utp_socket(str));
|
|
}
|
|
#if TORRENT_USE_I2P
|
|
else if (ps.type == settings_pack::i2p_proxy)
|
|
{
|
|
</pre></td></tr><tr style="background: #cfc"><td>relevance 2</td><td><a href="javascript:expand(33)">../src/peer_connection.cpp:2382</a></td><td>this should probably be based on time instead of number of request messages. For a very high throughput connection, 300 may be a legitimate number of requests to have in flight when getting choked</td></tr><tr id="33" style="display: none;" colspan="3"><td colspan="3"><h2>this should probably be based on time instead of number
|
|
of request messages. For a very high throughput connection, 300
|
|
may be a legitimate number of requests to have in flight when
|
|
getting choked</h2><h4>../src/peer_connection.cpp:2382</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> , "piece: %d s: %d l: %d invalid request"
|
|
, r.piece , r.start , r.length);
|
|
#endif
|
|
|
|
write_reject_request(r);
|
|
++m_num_invalid_requests;
|
|
|
|
if (t->alerts().should_post<invalid_request_alert>())
|
|
{
|
|
// msvc 12 appears to deduce the rvalue reference template
|
|
// incorrectly for bool temporaries. So, create a dummy instance
|
|
bool peer_interested = bool(m_peer_interested);
|
|
t->alerts().emplace_alert<invalid_request_alert>(
|
|
t->get_handle(), m_remote, m_peer_id, r
|
|
, t->has_piece_passed(r.piece), peer_interested, false);
|
|
}
|
|
|
|
// every ten invalid request, remind the peer that it's choked
|
|
if (!m_peer_interested && m_num_invalid_requests % 10 == 0 && m_choked)
|
|
{
|
|
<div style="background: #ffff00" width="100%"> if (m_num_invalid_requests > 300 && !m_peer_choked
|
|
</div> && can_disconnect(error_code(errors::too_many_requests_when_choked
|
|
, get_libtorrent_category())))
|
|
{
|
|
disconnect(errors::too_many_requests_when_choked, op_bittorrent, 2);
|
|
return;
|
|
}
|
|
#ifndef TORRENT_DISABLE_LOGGING
|
|
peer_log(peer_log_alert::outgoing_message, "CHOKE");
|
|
#endif
|
|
write_choke();
|
|
}
|
|
|
|
return;
|
|
}
|
|
|
|
// if we have choked the client
|
|
// ignore the request
|
|
const int blocks_per_piece = static_cast<int>(
|
|
(t->torrent_file().piece_length() + t->block_size() - 1) / t->block_size());
|
|
|
|
// disconnect peers that downloads more than foo times an allowed
|
|
// fast piece
|
|
if (m_choked && fast_idx != -1 && m_accept_fast_piece_cnt[fast_idx] >= 3 * blocks_per_piece
|
|
&& can_disconnect(error_code(errors::too_many_requests_when_choked, get_libtorrent_category())))
|
|
{
|
|
disconnect(errors::too_many_requests_when_choked, op_bittorrent, 2);
|
|
return;
|
|
}
|
|
|
|
if (m_choked && fast_idx == -1)
|
|
</pre></td></tr><tr style="background: #cfc"><td>relevance 2</td><td><a href="javascript:expand(34)">../src/peer_connection.cpp:3116</a></td><td>since we throw away the queue entry once we issue the disk job, this may happen. Instead, we should keep the queue entry around, mark it as having been requested from disk and once the disk job comes back, discard it if it has been cancelled. Maybe even be able to cancel disk jobs?</td></tr><tr id="34" style="display: none;" colspan="3"><td colspan="3"><h2>since we throw away the queue entry once we issue
|
|
the disk job, this may happen. Instead, we should keep the
|
|
queue entry around, mark it as having been requested from
|
|
disk and once the disk job comes back, discard it if it has
|
|
been cancelled. Maybe even be able to cancel disk jobs?</h2><h4>../src/peer_connection.cpp:3116</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;">
|
|
std::vector<peer_request>::iterator i
|
|
= std::find(m_requests.begin(), m_requests.end(), r);
|
|
|
|
if (i != m_requests.end())
|
|
{
|
|
m_counters.inc_stats_counter(counters::cancelled_piece_requests);
|
|
m_requests.erase(i);
|
|
|
|
if (m_requests.empty())
|
|
m_counters.inc_stats_counter(counters::num_peers_up_requests, -1);
|
|
|
|
#ifndef TORRENT_DISABLE_LOGGING
|
|
peer_log(peer_log_alert::outgoing_message, "REJECT_PIECE", "piece: %d s: %x l: %x cancelled"
|
|
, r.piece , r.start , r.length);
|
|
#endif
|
|
write_reject_request(r);
|
|
}
|
|
else
|
|
{
|
|
<div style="background: #ffff00" width="100%">#ifndef TORRENT_DISABLE_LOGGING
|
|
</div> peer_log(peer_log_alert::info, "INVALID_CANCEL", "got cancel not in the queue");
|
|
#endif
|
|
}
|
|
}
|
|
|
|
// -----------------------------
|
|
// --------- DHT PORT ----------
|
|
// -----------------------------
|
|
|
|
void peer_connection::incoming_dht_port(int listen_port)
|
|
{
|
|
TORRENT_ASSERT(is_single_thread());
|
|
INVARIANT_CHECK;
|
|
|
|
#ifndef TORRENT_DISABLE_LOGGING
|
|
peer_log(peer_log_alert::incoming_message, "DHT_PORT", "p: %d", listen_port);
|
|
#endif
|
|
#ifndef TORRENT_DISABLE_DHT
|
|
m_ses.add_dht_node(udp::endpoint(
|
|
m_remote.address(), listen_port));
|
|
#else
|
|
TORRENT_UNUSED(listen_port);
|
|
#endif
|
|
}
|
|
|
|
// -----------------------------
|
|
// --------- HAVE ALL ----------
|
|
// -----------------------------
|
|
|
|
void peer_connection::incoming_have_all()
|
|
</pre></td></tr><tr style="background: #cfc"><td>relevance 2</td><td><a href="javascript:expand(35)">../src/peer_connection.cpp:4791</a></td><td>use a deadline_timer for timeouts. Don't rely on second_tick()! Hook this up to connect timeout as well. This would improve performance because of less work in second_tick(), and might let use remove ticking entirely eventually</td></tr><tr id="35" style="display: none;" colspan="3"><td colspan="3"><h2>use a deadline_timer for timeouts. Don't rely on second_tick()!
|
|
Hook this up to connect timeout as well. This would improve performance
|
|
because of less work in second_tick(), and might let use remove ticking
|
|
entirely eventually</h2><h4>../src/peer_connection.cpp:4791</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> connect_timeout += 20;
|
|
#endif
|
|
|
|
if (d > seconds(connect_timeout)
|
|
&& can_disconnect(error_code(errors::timed_out, get_libtorrent_category())))
|
|
{
|
|
#ifndef TORRENT_DISABLE_LOGGING
|
|
peer_log(peer_log_alert::info, "CONNECT_FAILED", "waited %d seconds"
|
|
, int(total_seconds(d)));
|
|
#endif
|
|
connect_failed(errors::timed_out);
|
|
return;
|
|
}
|
|
}
|
|
|
|
// if we can't read, it means we're blocked on the rate-limiter
|
|
// or the disk, not the peer itself. In this case, don't blame
|
|
// the peer and disconnect it
|
|
bool may_timeout = (m_channel_state[download_channel] & peer_info::bw_network) != 0;
|
|
|
|
<div style="background: #ffff00" width="100%"> if (may_timeout && d > seconds(timeout()) && !m_connecting && m_reading_bytes == 0
|
|
</div> && can_disconnect(error_code(errors::timed_out_inactivity, get_libtorrent_category())))
|
|
{
|
|
#ifndef TORRENT_DISABLE_LOGGING
|
|
peer_log(peer_log_alert::info, "LAST_ACTIVITY", "%d seconds ago"
|
|
, int(total_seconds(d)));
|
|
#endif
|
|
disconnect(errors::timed_out_inactivity, op_bittorrent);
|
|
return;
|
|
}
|
|
|
|
// do not stall waiting for a handshake
|
|
int timeout = m_settings.get_int (settings_pack::handshake_timeout);
|
|
#if TORRENT_USE_I2P
|
|
timeout *= is_i2p(*m_socket) ? 4 : 1;
|
|
#endif
|
|
if (may_timeout
|
|
&& !m_connecting
|
|
&& in_handshake()
|
|
&& d > seconds(timeout))
|
|
{
|
|
#ifndef TORRENT_DISABLE_LOGGING
|
|
peer_log(peer_log_alert::info, "NO_HANDSHAKE", "waited %d seconds"
|
|
, int(total_seconds(d)));
|
|
#endif
|
|
disconnect(errors::timed_out_no_handshake, op_bittorrent);
|
|
return;
|
|
}
|
|
|
|
// disconnect peers that we unchoked, but
|
|
// they didn't send a request within 60 seconds.
|
|
</pre></td></tr><tr style="background: #cfc"><td>relevance 2</td><td><a href="javascript:expand(36)">../src/peer_list.cpp:495</a></td><td>it would be nice if there was a way to iterate over these torrent_peer objects in the order they are allocated in the pool instead. It would probably be more efficient</td></tr><tr id="36" style="display: none;" colspan="3"><td colspan="3"><h2>it would be nice if there was a way to iterate over these
|
|
torrent_peer objects in the order they are allocated in the pool
|
|
instead. It would probably be more efficient</h2><h4>../src/peer_list.cpp:495</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> , int session_time, torrent_state* state)
|
|
{
|
|
TORRENT_ASSERT(is_single_thread());
|
|
INVARIANT_CHECK;
|
|
|
|
const int candidate_count = 10;
|
|
peers.reserve(candidate_count);
|
|
|
|
int erase_candidate = -1;
|
|
|
|
if (m_finished != state->is_finished)
|
|
recalculate_connect_candidates(state);
|
|
|
|
external_ip const& external = *state->ip;
|
|
int external_port = state->port;
|
|
|
|
if (m_round_robin >= int(m_peers.size())) m_round_robin = 0;
|
|
|
|
int max_peerlist_size = state->max_peerlist_size;
|
|
|
|
<div style="background: #ffff00" width="100%"> for (int iterations = (std::min)(int(m_peers.size()), 300);
|
|
</div> iterations > 0; --iterations)
|
|
{
|
|
++state->loop_counter;
|
|
|
|
if (m_round_robin >= int(m_peers.size())) m_round_robin = 0;
|
|
|
|
torrent_peer& pe = *m_peers[m_round_robin];
|
|
TORRENT_ASSERT(pe.in_use);
|
|
int current = m_round_robin;
|
|
|
|
// if the number of peers is growing large
|
|
// we need to start weeding.
|
|
|
|
if (int(m_peers.size()) >= max_peerlist_size * 0.95
|
|
&& max_peerlist_size > 0)
|
|
{
|
|
if (is_erase_candidate(pe)
|
|
&& (erase_candidate == -1
|
|
|| !compare_peer_erase(*m_peers[erase_candidate], pe)))
|
|
{
|
|
if (should_erase_immediately(pe))
|
|
{
|
|
if (erase_candidate > current) --erase_candidate;
|
|
erase_peer(m_peers.begin() + current, state);
|
|
continue;
|
|
}
|
|
else
|
|
{
|
|
erase_candidate = current;
|
|
}
|
|
</pre></td></tr><tr style="background: #cfc"><td>relevance 2</td><td><a href="javascript:expand(37)">../src/piece_picker.cpp:1982</a></td><td>make the 2048 limit configurable</td></tr><tr id="37" style="display: none;" colspan="3"><td colspan="3"><h2>make the 2048 limit configurable</h2><h4>../src/piece_picker.cpp:1982</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> // returned block picks.
|
|
boost::uint32_t piece_picker::pick_pieces(bitfield const& pieces
|
|
, std::vector<piece_block>& interesting_blocks, int num_blocks
|
|
, int prefer_contiguous_blocks, torrent_peer* peer
|
|
, int options, std::vector<int> const& suggested_pieces
|
|
, int num_peers
|
|
, counters& pc
|
|
) const
|
|
{
|
|
TORRENT_ASSERT(peer == 0 || peer->in_use);
|
|
boost::uint32_t ret = 0;
|
|
|
|
// prevent the number of partial pieces to grow indefinitely
|
|
// make this scale by the number of peers we have. For large
|
|
// scale clients, we would have more peers, and allow a higher
|
|
// threshold for the number of partials
|
|
// deduct pad files because they case partial pieces which are OK
|
|
// the second condition is to make sure we cap the number of partial
|
|
// _bytes_. The larger the pieces are, the fewer partial pieces we want.
|
|
// 2048 corresponds to 32 MiB
|
|
<div style="background: #ffff00" width="100%"> const int num_partials = int(m_downloads[piece_pos::piece_downloading].size())
|
|
</div> - m_num_pad_files;
|
|
if (num_partials > num_peers * 3 / 2
|
|
|| num_partials * m_blocks_per_piece > 2048)
|
|
{
|
|
// if we have too many partial pieces, prioritize completing
|
|
// them. In order for this to have an affect, also disable
|
|
// prefer whole pieces (otherwise partial pieces would be de-prioritized)
|
|
options |= prioritize_partials;
|
|
prefer_contiguous_blocks = 0;
|
|
|
|
ret |= picker_log_alert::partial_ratio;
|
|
}
|
|
|
|
if (prefer_contiguous_blocks) ret |= picker_log_alert::prefer_contiguous;
|
|
|
|
// only one of rarest_first and sequential can be set.
|
|
TORRENT_ASSERT(((options & rarest_first) ? 1 : 0)
|
|
+ ((options & sequential) ? 1 : 0) <= 1);
|
|
#ifdef TORRENT_EXPENSIVE_INVARIANT_CHECKS
|
|
TORRENT_PIECE_PICKER_INVARIANT_CHECK;
|
|
#endif
|
|
TORRENT_ASSERT(num_blocks > 0);
|
|
TORRENT_ASSERT(pieces.size() == m_piece_map.size());
|
|
|
|
TORRENT_ASSERT(!m_priority_boundries.empty() || m_dirty);
|
|
|
|
// this will be filled with blocks that we should not request
|
|
// unless we can't find num_blocks among the other ones.
|
|
std::vector<piece_block> backup_blocks;
|
|
std::vector<piece_block> backup_blocks2;
|
|
</pre></td></tr><tr style="background: #cfc"><td>relevance 2</td><td><a href="javascript:expand(38)">../src/piece_picker.cpp:2614</a></td><td>the first_block returned here is the largest free range, not the first-fit range, which would be better</td></tr><tr id="38" style="display: none;" colspan="3"><td colspan="3"><h2>the first_block returned here is the largest free range, not
|
|
the first-fit range, which would be better</h2><h4>../src/piece_picker.cpp:2614</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> , end(m_block_info.end()); i != end; ++i)
|
|
{
|
|
TORRENT_ASSERT(i->peer == 0 || static_cast<torrent_peer*>(i->peer)->in_use);
|
|
}
|
|
}
|
|
#endif
|
|
|
|
void piece_picker::clear_peer(torrent_peer* peer)
|
|
{
|
|
for (std::vector<block_info>::iterator i = m_block_info.begin()
|
|
, end(m_block_info.end()); i != end; ++i)
|
|
{
|
|
if (i->peer == peer) i->peer = 0;
|
|
}
|
|
}
|
|
|
|
// the first bool is true if this is the only peer that has requested and downloaded
|
|
// blocks from this piece.
|
|
// the second bool is true if this is the only active peer that is requesting
|
|
// and downloading blocks from this piece. Active means having a connection.
|
|
<div style="background: #ffff00" width="100%"> boost::tuple<bool, bool, int, int> piece_picker::requested_from(
|
|
</div> piece_picker::downloading_piece const& p
|
|
, int num_blocks_in_piece, torrent_peer* peer) const
|
|
{
|
|
bool exclusive = true;
|
|
bool exclusive_active = true;
|
|
int contiguous_blocks = 0;
|
|
int max_contiguous = 0;
|
|
int first_block = 0;
|
|
block_info const* binfo = blocks_for_piece(p);
|
|
for (int j = 0; j < num_blocks_in_piece; ++j)
|
|
{
|
|
piece_picker::block_info const& info = binfo[j];
|
|
TORRENT_ASSERT(info.peer == 0 || static_cast<torrent_peer*>(info.peer)->in_use);
|
|
TORRENT_ASSERT(info.piece_index == p.index);
|
|
if (info.state == piece_picker::block_info::state_none)
|
|
{
|
|
++contiguous_blocks;
|
|
continue;
|
|
}
|
|
if (contiguous_blocks > max_contiguous)
|
|
{
|
|
max_contiguous = contiguous_blocks;
|
|
first_block = j - contiguous_blocks;
|
|
}
|
|
contiguous_blocks = 0;
|
|
if (info.peer != peer)
|
|
{
|
|
exclusive = false;
|
|
if (info.state == piece_picker::block_info::state_requested
|
|
&& info.peer != 0)
|
|
</pre></td></tr><tr style="background: #cfc"><td>relevance 2</td><td><a href="javascript:expand(39)">../src/piece_picker.cpp:3395</a></td><td>it would be nice if this could be folded into lock_piece() the main distinction is that this also maintains the m_num_passed counter and the passed_hash_check member Is there ever a case where we call write filed without also locking the piece? Perhaps write_failed() should imply locking it.</td></tr><tr id="39" style="display: none;" colspan="3"><td colspan="3"><h2>it would be nice if this could be folded into lock_piece()
|
|
the main distinction is that this also maintains the m_num_passed
|
|
counter and the passed_hash_check member
|
|
Is there ever a case where we call write filed without also locking
|
|
the piece? Perhaps write_failed() should imply locking it.</h2><h4>../src/piece_picker.cpp:3395</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> int state = m_piece_map[piece].download_queue();
|
|
if (state == piece_pos::piece_open) return;
|
|
std::vector<downloading_piece>::iterator i = find_dl_piece(state, piece);
|
|
if (i == m_downloads[state].end()) return;
|
|
|
|
TORRENT_ASSERT(i->passed_hash_check == false);
|
|
if (i->passed_hash_check)
|
|
{
|
|
// it's not clear why this would happen,
|
|
// but it seems reasonable to not break the
|
|
// accounting over it.
|
|
i->passed_hash_check = false;
|
|
TORRENT_ASSERT(m_num_passed > 0);
|
|
--m_num_passed;
|
|
}
|
|
|
|
// prevent this piece from being picked until it's restored
|
|
i->locked = true;
|
|
}
|
|
|
|
<div style="background: #ffff00" width="100%"> void piece_picker::write_failed(piece_block block)
|
|
</div> {
|
|
TORRENT_PIECE_PICKER_INVARIANT_CHECK;
|
|
|
|
#if TORRENT_USE_INVARIANT_CHECKS
|
|
check_piece_state();
|
|
#endif
|
|
|
|
#ifdef TORRENT_PICKER_LOG
|
|
std::cerr << "[" << this << "] " << "write_failed( {" << block.piece_index << ", " << block.block_index << "} )" << std::endl;
|
|
#endif
|
|
|
|
int state = m_piece_map[block.piece_index].download_queue();
|
|
if (state == piece_pos::piece_open) return;
|
|
std::vector<downloading_piece>::iterator i = find_dl_piece(state, block.piece_index);
|
|
if (i == m_downloads[state].end()) return;
|
|
|
|
block_info* binfo = blocks_for_piece(*i);
|
|
block_info& info = binfo[block.block_index];
|
|
TORRENT_ASSERT(&info >= &m_block_info[0]);
|
|
TORRENT_ASSERT(&info < &m_block_info[0] + m_block_info.size());
|
|
TORRENT_ASSERT(info.piece_index == block.piece_index);
|
|
TORRENT_ASSERT(info.state == block_info::state_writing);
|
|
TORRENT_ASSERT(info.num_peers == 0);
|
|
|
|
TORRENT_ASSERT(i->writing > 0);
|
|
TORRENT_ASSERT(info.state == block_info::state_writing);
|
|
|
|
if (info.state == block_info::state_finished) return;
|
|
if (info.state == block_info::state_writing) --i->writing;
|
|
|
|
</pre></td></tr><tr style="background: #cfc"><td>relevance 2</td><td><a href="javascript:expand(40)">../src/session_impl.cpp:463</a></td><td>is there a reason not to move all of this into init()? and just post it to the io_service?</td></tr><tr id="40" style="display: none;" colspan="3"><td colspan="3"><h2>is there a reason not to move all of this into init()? and just
|
|
post it to the io_service?</h2><h4>../src/session_impl.cpp:463</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> m_posting_torrent_updates = false;
|
|
#endif
|
|
m_udp_socket.set_rate_limit(m_settings.get_int(settings_pack::dht_upload_rate_limit));
|
|
|
|
m_udp_socket.subscribe(&m_utp_socket_manager);
|
|
m_udp_socket.subscribe(this);
|
|
m_udp_socket.subscribe(&m_tracker_manager);
|
|
|
|
#ifdef TORRENT_USE_OPENSSL
|
|
m_ssl_udp_socket.subscribe(&m_ssl_utp_socket_manager);
|
|
m_ssl_udp_socket.subscribe(this);
|
|
#endif
|
|
|
|
error_code ec;
|
|
m_listen_interface = tcp::endpoint(address_v4::any(), 0);
|
|
TORRENT_ASSERT_VAL(!ec, ec);
|
|
}
|
|
|
|
// This function is called by the creating thread, not in the message loop's
|
|
// / io_service thread.
|
|
<div style="background: #ffff00" width="100%"> void session_impl::start_session(settings_pack const& pack)
|
|
</div> {
|
|
if (pack.has_val(settings_pack::alert_mask))
|
|
{
|
|
m_alerts.set_alert_mask(pack.get_int(settings_pack::alert_mask));
|
|
}
|
|
|
|
#ifndef TORRENT_DISABLE_LOGGING
|
|
session_log("start session");
|
|
#endif
|
|
|
|
error_code ec;
|
|
#ifdef TORRENT_USE_OPENSSL
|
|
m_ssl_ctx.set_verify_mode(boost::asio::ssl::context::verify_none, ec);
|
|
#if BOOST_VERSION >= 104700
|
|
#if OPENSSL_VERSION_NUMBER >= 0x90812f
|
|
aux::openssl_set_tlsext_servername_callback(m_ssl_ctx.native_handle()
|
|
, servername_callback);
|
|
aux::openssl_set_tlsext_servername_arg(m_ssl_ctx.native_handle(), this);
|
|
#endif // OPENSSL_VERSION_NUMBER
|
|
#endif // BOOST_VERSION
|
|
#endif
|
|
|
|
#ifndef TORRENT_DISABLE_DHT
|
|
m_next_dht_torrent = m_torrents.begin();
|
|
#endif
|
|
m_next_lsd_torrent = m_torrents.begin();
|
|
m_tcp_mapping[0] = -1;
|
|
m_tcp_mapping[1] = -1;
|
|
m_udp_mapping[0] = -1;
|
|
m_udp_mapping[1] = -1;
|
|
</pre></td></tr><tr style="background: #cfc"><td>relevance 2</td><td><a href="javascript:expand(41)">../src/session_impl.cpp:1946</a></td><td>the udp socket(s) should be using the same generic mechanism and not be restricted to a single one we should open a one listen socket for each entry in the listen_interfaces list</td></tr><tr id="41" style="display: none;" colspan="3"><td colspan="3"><h2>the udp socket(s) should be using the same generic
|
|
mechanism and not be restricted to a single one
|
|
we should open a one listen socket for each entry in the
|
|
listen_interfaces list</h2><h4>../src/session_impl.cpp:1946</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> }
|
|
#endif // TORRENT_USE_OPENSSL
|
|
}
|
|
#endif // TORRENT_USE_IPV6
|
|
|
|
// set our main IPv4 and IPv6 interfaces
|
|
// used to send to the tracker
|
|
std::vector<ip_interface> ifs = enum_net_interfaces(m_io_service, ec);
|
|
for (std::vector<ip_interface>::const_iterator i = ifs.begin()
|
|
, end(ifs.end()); i != end; ++i)
|
|
{
|
|
address const& addr = i->interface_address;
|
|
if (addr.is_v6() && !is_local(addr) && !is_loopback(addr))
|
|
m_ipv6_interface = tcp::endpoint(addr, m_listen_interface.port());
|
|
else if (addr.is_v4() && !is_local(addr) && !is_loopback(addr))
|
|
m_ipv4_interface = tcp::endpoint(addr, m_listen_interface.port());
|
|
}
|
|
}
|
|
else
|
|
{
|
|
<div style="background: #ffff00" width="100%"> for (int i = 0; i < m_listen_interfaces.size(); ++i)
|
|
</div> {
|
|
std::string const& device = m_listen_interfaces[i].first;
|
|
int port = m_listen_interfaces[i].second;
|
|
|
|
int num_device_fails = 0;
|
|
|
|
#if TORRENT_USE_IPV6
|
|
const int first_family = 0;
|
|
#else
|
|
const int first_family = 1;
|
|
#endif
|
|
boost::asio::ip::tcp protocol[]
|
|
= { boost::asio::ip::tcp::v6(), boost::asio::ip::tcp::v4() };
|
|
|
|
for (int address_family = first_family; address_family < 2; ++address_family)
|
|
{
|
|
error_code err;
|
|
address test_family = address::from_string(device.c_str(), err);
|
|
if (!err
|
|
&& test_family.is_v4() != address_family
|
|
&& !is_any(test_family))
|
|
continue;
|
|
|
|
listen_socket_t s = setup_listener(device, protocol[address_family]
|
|
, port, flags, ec);
|
|
|
|
if (ec == error_code(boost::system::errc::no_such_device, generic_category()))
|
|
{
|
|
++num_device_fails;
|
|
continue;
|
|
</pre></td></tr><tr style="background: #cfc"><td>relevance 2</td><td><a href="javascript:expand(42)">../src/session_impl.cpp:2049</a></td><td>use bind_to_device in udp_socket</td></tr><tr id="42" style="display: none;" colspan="3"><td colspan="3"><h2>use bind_to_device in udp_socket</h2><h4>../src/session_impl.cpp:2049</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> m_listen_interface.port(m_listen_interface.port() + 1);
|
|
--listen_port_retries;
|
|
goto retry;
|
|
}
|
|
if (m_alerts.should_post<listen_failed_alert>())
|
|
m_alerts.emplace_alert<listen_failed_alert>(
|
|
m_listen_interface.address().to_string()
|
|
, m_listen_interface.port()
|
|
, listen_failed_alert::bind
|
|
, ec, listen_failed_alert::tcp);
|
|
return;
|
|
}
|
|
|
|
#ifdef TORRENT_USE_OPENSSL
|
|
int ssl_port = m_settings.get_int(settings_pack::ssl_listen);
|
|
udp::endpoint ssl_bind_if(m_listen_interface.address(), ssl_port);
|
|
|
|
// if ssl port is 0, we don't want to listen on an SSL port
|
|
if (ssl_port != 0)
|
|
{
|
|
<div style="background: #ffff00" width="100%"> m_ssl_udp_socket.bind(ssl_bind_if, ec);
|
|
</div> if (ec)
|
|
{
|
|
#ifndef TORRENT_DISABLE_LOGGING
|
|
session_log("SSL: cannot bind to UDP interface \"%s\": %s"
|
|
, print_endpoint(m_listen_interface).c_str(), ec.message().c_str());
|
|
#endif
|
|
if (m_alerts.should_post<listen_failed_alert>())
|
|
{
|
|
error_code err;
|
|
m_alerts.emplace_alert<listen_failed_alert>(ssl_bind_if.address().to_string()
|
|
, ssl_port, listen_failed_alert::bind, ec, listen_failed_alert::utp_ssl);
|
|
}
|
|
ec.clear();
|
|
}
|
|
</pre></td></tr><tr style="background: #cfc"><td>relevance 2</td><td><a href="javascript:expand(43)">../src/session_impl.cpp:2069</a></td><td>use bind_to_device in udp_socket</td></tr><tr id="43" style="display: none;" colspan="3"><td colspan="3"><h2>use bind_to_device in udp_socket</h2><h4>../src/session_impl.cpp:2069</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> if (ssl_port != 0)
|
|
{
|
|
m_ssl_udp_socket.bind(ssl_bind_if, ec);
|
|
if (ec)
|
|
{
|
|
#ifndef TORRENT_DISABLE_LOGGING
|
|
session_log("SSL: cannot bind to UDP interface \"%s\": %s"
|
|
, print_endpoint(m_listen_interface).c_str(), ec.message().c_str());
|
|
#endif
|
|
if (m_alerts.should_post<listen_failed_alert>())
|
|
{
|
|
error_code err;
|
|
m_alerts.emplace_alert<listen_failed_alert>(ssl_bind_if.address().to_string()
|
|
, ssl_port, listen_failed_alert::bind, ec, listen_failed_alert::utp_ssl);
|
|
}
|
|
ec.clear();
|
|
}
|
|
}
|
|
#endif // TORRENT_USE_OPENSSL
|
|
|
|
<div style="background: #ffff00" width="100%"> m_udp_socket.bind(udp::endpoint(m_listen_interface.address()
|
|
</div> , m_listen_interface.port()), ec);
|
|
if (ec)
|
|
{
|
|
#ifndef TORRENT_DISABLE_LOGGING
|
|
session_log("cannot bind to UDP interface \"%s\": %s"
|
|
, print_endpoint(m_listen_interface).c_str(), ec.message().c_str());
|
|
#endif
|
|
if (listen_port_retries > 0)
|
|
{
|
|
m_listen_interface.port(m_listen_interface.port() + 1);
|
|
--listen_port_retries;
|
|
goto retry;
|
|
}
|
|
if (m_alerts.should_post<listen_failed_alert>())
|
|
{
|
|
error_code err;
|
|
m_alerts.emplace_alert<listen_failed_alert>(m_listen_interface.address().to_string()
|
|
, m_listen_interface.port()
|
|
, listen_failed_alert::bind
|
|
, ec, listen_failed_alert::udp);
|
|
}
|
|
return;
|
|
}
|
|
else
|
|
{
|
|
m_external_udp_port = m_udp_socket.local_port();
|
|
maybe_update_udp_mapping(0, m_listen_interface.port(), m_listen_interface.port());
|
|
maybe_update_udp_mapping(1, m_listen_interface.port(), m_listen_interface.port());
|
|
}
|
|
|
|
</pre></td></tr><tr style="background: #cfc"><td>relevance 2</td><td><a href="javascript:expand(44)">../src/session_impl.cpp:3576</a></td><td>make a list for torrents that want to be announced on the DHT so we don't have to loop over all torrents, just to find the ones that want to announce</td></tr><tr id="44" style="display: none;" colspan="3"><td colspan="3"><h2>make a list for torrents that want to be announced on the DHT so we
|
|
don't have to loop over all torrents, just to find the ones that want to announce</h2><h4>../src/session_impl.cpp:3576</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> if (!m_dht_torrents.empty())
|
|
{
|
|
boost::shared_ptr<torrent> t;
|
|
do
|
|
{
|
|
t = m_dht_torrents.front().lock();
|
|
m_dht_torrents.pop_front();
|
|
} while (!t && !m_dht_torrents.empty());
|
|
|
|
if (t)
|
|
{
|
|
t->dht_announce();
|
|
return;
|
|
}
|
|
}
|
|
if (m_torrents.empty()) return;
|
|
|
|
if (m_next_dht_torrent == m_torrents.end())
|
|
m_next_dht_torrent = m_torrents.begin();
|
|
m_next_dht_torrent->second->dht_announce();
|
|
<div style="background: #ffff00" width="100%"> ++m_next_dht_torrent;
|
|
</div> if (m_next_dht_torrent == m_torrents.end())
|
|
m_next_dht_torrent = m_torrents.begin();
|
|
}
|
|
#endif
|
|
|
|
void session_impl::on_lsd_announce(error_code const& e)
|
|
{
|
|
#if defined TORRENT_ASIO_DEBUGGING
|
|
complete_async("session_impl::on_lsd_announce");
|
|
#endif
|
|
m_stats_counters.inc_stats_counter(counters::on_lsd_counter);
|
|
TORRENT_ASSERT(is_single_thread());
|
|
if (e) return;
|
|
|
|
if (m_abort) return;
|
|
|
|
#if defined TORRENT_ASIO_DEBUGGING
|
|
add_outstanding_async("session_impl::on_lsd_announce");
|
|
#endif
|
|
// announce on local network every 5 minutes
|
|
int delay = (std::max)(m_settings.get_int(settings_pack::local_service_announce_interval)
|
|
/ (std::max)(int(m_torrents.size()), 1), 1);
|
|
error_code ec;
|
|
m_lsd_announce_timer.expires_from_now(seconds(delay), ec);
|
|
m_lsd_announce_timer.async_wait(
|
|
bind(&session_impl::on_lsd_announce, this, _1));
|
|
|
|
if (m_torrents.empty()) return;
|
|
|
|
if (m_next_lsd_torrent == m_torrents.end())
|
|
</pre></td></tr><tr style="background: #cfc"><td>relevance 2</td><td><a href="javascript:expand(45)">../src/session_impl.cpp:6057</a></td><td>this should be factored into the udp socket, so we only have the code once</td></tr><tr id="45" style="display: none;" colspan="3"><td colspan="3"><h2>this should be factored into the udp socket, so we only have the
|
|
code once</h2><h4>../src/session_impl.cpp:6057</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> {
|
|
return upload_rate_limit(m_local_peer_class);
|
|
}
|
|
|
|
int session_impl::local_download_rate_limit() const
|
|
{
|
|
return download_rate_limit(m_local_peer_class);
|
|
}
|
|
|
|
int session_impl::upload_rate_limit() const
|
|
{
|
|
return upload_rate_limit(m_global_class);
|
|
}
|
|
|
|
int session_impl::download_rate_limit() const
|
|
{
|
|
return download_rate_limit(m_global_class);
|
|
}
|
|
#endif
|
|
|
|
<div style="background: #ffff00" width="100%"> void session_impl::update_peer_tos()
|
|
</div> {
|
|
error_code ec;
|
|
|
|
#if TORRENT_USE_IPV6 && defined IPV6_TCLASS
|
|
if (m_udp_socket.local_endpoint(ec).address().is_v6())
|
|
m_udp_socket.set_option(traffic_class(m_settings.get_int(settings_pack::peer_tos)), ec);
|
|
else
|
|
#endif
|
|
m_udp_socket.set_option(type_of_service(m_settings.get_int(settings_pack::peer_tos)), ec);
|
|
|
|
#ifdef TORRENT_USE_OPENSSL
|
|
#if TORRENT_USE_IPV6 && defined IPV6_TCLASS
|
|
if (m_ssl_udp_socket.local_endpoint(ec).address().is_v6())
|
|
m_ssl_udp_socket.set_option(traffic_class(m_settings.get_int(settings_pack::peer_tos)), ec);
|
|
else
|
|
#endif
|
|
m_ssl_udp_socket.set_option(type_of_service(m_settings.get_int(settings_pack::peer_tos)), ec);
|
|
#endif
|
|
|
|
#ifndef TORRENT_DISABLE_LOGGING
|
|
session_log(">>> SET_TOS [ udp_socket tos: %x e: %s ]"
|
|
, m_settings.get_int(settings_pack::peer_tos)
|
|
, ec.message().c_str());
|
|
#endif
|
|
}
|
|
|
|
void session_impl::update_user_agent()
|
|
{
|
|
// replace all occurances of '\n' with ' '.
|
|
std::string agent = m_settings.get_str(settings_pack::user_agent);
|
|
</pre></td></tr><tr style="background: #cfc"><td>relevance 2</td><td><a href="javascript:expand(46)">../src/session_impl.cpp:6761</a></td><td>perhaps DHT logging should be disabled by TORRENT_DISABLE_LOGGING too</td></tr><tr id="46" style="display: none;" colspan="3"><td colspan="3"><h2>perhaps DHT logging should be disabled by TORRENT_DISABLE_LOGGING
|
|
too</h2><h4>../src/session_impl.cpp:6761</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> void session_impl::get_peers(sha1_hash const& ih)
|
|
{
|
|
if (!m_alerts.should_post<dht_get_peers_alert>()) return;
|
|
m_alerts.emplace_alert<dht_get_peers_alert>(ih);
|
|
}
|
|
|
|
void session_impl::announce(sha1_hash const& ih, address const& addr
|
|
, int port)
|
|
{
|
|
if (!m_alerts.should_post<dht_announce_alert>()) return;
|
|
m_alerts.emplace_alert<dht_announce_alert>(addr, port, ih);
|
|
}
|
|
|
|
void session_impl::outgoing_get_peers(sha1_hash const& target
|
|
, sha1_hash const& sent_target, udp::endpoint const& ep)
|
|
{
|
|
if (!m_alerts.should_post<dht_outgoing_get_peers_alert>()) return;
|
|
m_alerts.emplace_alert<dht_outgoing_get_peers_alert>(target, sent_target, ep);
|
|
}
|
|
|
|
<div style="background: #ffff00" width="100%"> TORRENT_FORMAT(3,4)
|
|
</div> void session_impl::log(libtorrent::dht::dht_logger::module_t m, char const* fmt, ...)
|
|
{
|
|
if (!m_alerts.should_post<dht_log_alert>()) return;
|
|
|
|
va_list v;
|
|
va_start(v, fmt);
|
|
char buf[1024];
|
|
vsnprintf(buf, sizeof(buf), fmt, v);
|
|
va_end(v);
|
|
m_alerts.emplace_alert<dht_log_alert>(static_cast<dht_log_alert::dht_module_t>(m), buf);
|
|
}
|
|
|
|
void session_impl::log_packet(message_direction_t dir, char const* pkt, int len
|
|
, udp::endpoint node)
|
|
{
|
|
if (!m_alerts.should_post<dht_pkt_alert>()) return;
|
|
|
|
dht_pkt_alert::direction_t d = dir == dht_logger::incoming_message
|
|
? dht_pkt_alert::incoming : dht_pkt_alert::outgoing;
|
|
|
|
m_alerts.emplace_alert<dht_pkt_alert>(pkt, len, d, node);
|
|
}
|
|
|
|
bool session_impl::on_dht_request(char const* query, int query_len
|
|
, dht::msg const& request, entry& response)
|
|
{
|
|
#ifndef TORRENT_DISABLE_EXTENSIONS
|
|
if (query_len > max_dht_query_length) return false;
|
|
|
|
for (m_extension_dht_queries_t::iterator i = m_extension_dht_queries.begin();
|
|
</pre></td></tr><tr style="background: #cfc"><td>relevance 2</td><td><a href="javascript:expand(47)">../src/storage.cpp:1069</a></td><td>we probably need to do this unconditionally in this function. Even if the resume data file appears stale, we need to create these hard links, right?</td></tr><tr id="47" style="display: none;" colspan="3"><td colspan="3"><h2>we probably need to do this unconditionally in this function.
|
|
Even if the resume data file appears stale, we need to create these
|
|
hard links, right?</h2><h4>../src/storage.cpp:1069</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> {
|
|
ec.ec = errors::mismatching_file_size;
|
|
ec.file = i;
|
|
ec.operation = storage_error::none;
|
|
return false;
|
|
}
|
|
|
|
if (settings().get_bool(settings_pack::ignore_resume_timestamps)) continue;
|
|
|
|
// allow some slack, because of FAT volumes
|
|
if (expected_time != 0 &&
|
|
(file_time > expected_time + 5 * 60 || file_time < expected_time - 5))
|
|
{
|
|
ec.ec = errors::mismatching_file_timestamp;
|
|
ec.file = i;
|
|
ec.operation = storage_error::stat;
|
|
return false;
|
|
}
|
|
}
|
|
|
|
<div style="background: #ffff00" width="100%">#ifndef TORRENT_DISABLE_MUTABLE_TORRENTS
|
|
</div> if (links)
|
|
{
|
|
// if this is a mutable torrent, and we need to pick up some files
|
|
// from other torrents, do that now. Note that there is an inherent
|
|
// race condition here. We checked if the files existed on a different
|
|
// thread a while ago. These files may no longer exist or may have been
|
|
// moved. If so, we just fail. The user is responsible to not touch
|
|
// other torrents until a new mutable torrent has been completely
|
|
// added.
|
|
int idx = 0;
|
|
for (std::vector<std::string>::const_iterator i = links->begin();
|
|
i != links->end(); ++i, ++idx)
|
|
{
|
|
if (i->empty()) continue;
|
|
|
|
error_code err;
|
|
std::string file_path = fs.file_path(idx, m_save_path);
|
|
hard_link(*i, file_path, err);
|
|
|
|
// if the file already exists, that's not an error
|
|
</pre></td></tr><tr style="background: #cfc"><td>relevance 2</td><td><a href="javascript:expand(48)">../src/storage.cpp:1093</a></td><td>is this risky? The upper layer will assume we have the whole file. Perhaps we should verify that at least the size of the file is correct</td></tr><tr id="48" style="display: none;" colspan="3"><td colspan="3"><h2>is this risky? The upper layer will assume we have the
|
|
whole file. Perhaps we should verify that at least the size
|
|
of the file is correct</h2><h4>../src/storage.cpp:1093</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> if (links)
|
|
{
|
|
// if this is a mutable torrent, and we need to pick up some files
|
|
// from other torrents, do that now. Note that there is an inherent
|
|
// race condition here. We checked if the files existed on a different
|
|
// thread a while ago. These files may no longer exist or may have been
|
|
// moved. If so, we just fail. The user is responsible to not touch
|
|
// other torrents until a new mutable torrent has been completely
|
|
// added.
|
|
int idx = 0;
|
|
for (std::vector<std::string>::const_iterator i = links->begin();
|
|
i != links->end(); ++i, ++idx)
|
|
{
|
|
if (i->empty()) continue;
|
|
|
|
error_code err;
|
|
std::string file_path = fs.file_path(idx, m_save_path);
|
|
hard_link(*i, file_path, err);
|
|
|
|
// if the file already exists, that's not an error
|
|
<div style="background: #ffff00" width="100%"> if (!err || err == boost::system::errc::file_exists)
|
|
</div> continue;
|
|
|
|
ec.ec = err;
|
|
ec.file = idx;
|
|
ec.operation = storage_error::hard_link;
|
|
return false;
|
|
}
|
|
}
|
|
#endif // TORRENT_DISABLE_MUTABLE_TORRENTS
|
|
|
|
return true;
|
|
}
|
|
|
|
int default_storage::move_storage(std::string const& sp, int flags, storage_error& ec)
|
|
{
|
|
int ret = piece_manager::no_error;
|
|
std::string save_path = complete(sp);
|
|
|
|
// check to see if any of the files exist
|
|
error_code e;
|
|
file_storage const& f = files();
|
|
|
|
file_status s;
|
|
if (flags == fail_if_exist)
|
|
{
|
|
stat_file(save_path, &s, e);
|
|
if (e != boost::system::errc::no_such_file_or_directory)
|
|
{
|
|
// the directory exists, check all the files
|
|
for (int i = 0; i < f.num_files(); ++i)
|
|
</pre></td></tr><tr style="background: #cfc"><td>relevance 2</td><td><a href="javascript:expand(49)">../src/torrent.cpp:681</a></td><td>post alert</td></tr><tr id="49" style="display: none;" colspan="3"><td colspan="3"><h2>post alert</h2><h4>../src/torrent.cpp:681</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> if (new_gauge_state == m_current_gauge_state) return;
|
|
|
|
if (m_current_gauge_state != no_gauge_state)
|
|
inc_stats_counter(m_current_gauge_state + counters::num_checking_torrents, -1);
|
|
if (new_gauge_state != no_gauge_state)
|
|
inc_stats_counter(new_gauge_state + counters::num_checking_torrents, 1);
|
|
|
|
m_current_gauge_state = new_gauge_state;
|
|
}
|
|
|
|
void torrent::leave_seed_mode(bool skip_checking)
|
|
{
|
|
if (!m_seed_mode) return;
|
|
|
|
if (!skip_checking)
|
|
{
|
|
// this means the user promised we had all the
|
|
// files, but it turned out we didn't. This is
|
|
// an error.
|
|
|
|
<div style="background: #ffff00" width="100%">
|
|
</div>#ifndef TORRENT_DISABLE_LOGGING
|
|
debug_log("*** FAILED SEED MODE, rechecking");
|
|
#endif
|
|
}
|
|
|
|
#ifndef TORRENT_DISABLE_LOGGING
|
|
debug_log("*** LEAVING SEED MODE (%s)"
|
|
, skip_checking ? "as seed" : "as non-seed");
|
|
#endif
|
|
m_seed_mode = false;
|
|
// seed is false if we turned out not
|
|
// to be a seed after all
|
|
if (!skip_checking)
|
|
{
|
|
m_have_all = false;
|
|
set_state(torrent_status::downloading);
|
|
force_recheck();
|
|
}
|
|
m_num_verified = 0;
|
|
m_verified.clear();
|
|
m_verifying.clear();
|
|
|
|
set_need_save_resume();
|
|
}
|
|
|
|
void torrent::verified(int piece)
|
|
{
|
|
TORRENT_ASSERT(piece < int(m_verified.size()));
|
|
TORRENT_ASSERT(piece >= 0);
|
|
TORRENT_ASSERT(m_verified.get_bit(piece) == false);
|
|
</pre></td></tr><tr style="background: #cfc"><td>relevance 2</td><td><a href="javascript:expand(50)">../src/torrent.cpp:1907</a></td><td>add a unit test where we don't have metadata, connect to a peer that sends a bitfield that's too large, then we get the metadata</td></tr><tr id="50" style="display: none;" colspan="3"><td colspan="3"><h2>add a unit test where we don't have metadata, connect to a peer
|
|
that sends a bitfield that's too large, then we get the metadata</h2><h4>../src/torrent.cpp:1907</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> {
|
|
read_resume_data(m_resume_data->node);
|
|
}
|
|
}
|
|
|
|
#if TORRENT_USE_ASSERTS
|
|
m_resume_data_loaded = true;
|
|
#endif
|
|
|
|
construct_storage();
|
|
|
|
if (m_share_mode && valid_metadata())
|
|
{
|
|
// in share mode, all pieces have their priorities initialized to 0
|
|
m_file_priority.clear();
|
|
m_file_priority.resize(m_torrent_file->num_files(), 0);
|
|
}
|
|
|
|
// it's important to initialize the peers early, because this is what will
|
|
// fix up their have-bitmasks to have the correct size
|
|
<div style="background: #ffff00" width="100%"> if (!m_connections_initialized)
|
|
</div> {
|
|
m_connections_initialized = true;
|
|
// all peer connections have to initialize themselves now that the metadata
|
|
// is available
|
|
// copy the peer list since peers may disconnect and invalidate
|
|
// m_connections as we initialize them
|
|
std::vector<peer_connection*> peers = m_connections;
|
|
for (torrent::peer_iterator i = peers.begin();
|
|
i != peers.end(); ++i)
|
|
{
|
|
peer_connection* pc = *i;
|
|
if (pc->is_disconnecting()) continue;
|
|
pc->on_metadata_impl();
|
|
if (pc->is_disconnecting()) continue;
|
|
pc->init();
|
|
}
|
|
}
|
|
|
|
// if we've already loaded file priorities, don't load piece priorities,
|
|
// they will interfere.
|
|
if (!m_seed_mode && m_resume_data && m_file_priority.empty())
|
|
{
|
|
bdecode_node piece_priority = m_resume_data->node
|
|
.dict_find_string("piece_priority");
|
|
|
|
if (piece_priority && piece_priority.string_length()
|
|
== m_torrent_file->num_pieces())
|
|
{
|
|
char const* p = piece_priority.string_ptr();
|
|
for (int i = 0; i < piece_priority.string_length(); ++i)
|
|
</pre></td></tr><tr style="background: #cfc"><td>relevance 2</td><td><a href="javascript:expand(51)">../src/torrent.cpp:4941</a></td><td>abort lookups this torrent has made via the session host resolver interface</td></tr><tr id="51" style="display: none;" colspan="3"><td colspan="3"><h2>abort lookups this torrent has made via the
|
|
session host resolver interface</h2><h4>../src/torrent.cpp:4941</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> // files belonging to the torrents
|
|
disconnect_all(errors::torrent_aborted, op_bittorrent);
|
|
|
|
// post a message to the main thread to destruct
|
|
// the torrent object from there
|
|
if (m_storage.get())
|
|
{
|
|
inc_refcount("release_files");
|
|
m_ses.disk_thread().async_stop_torrent(m_storage.get()
|
|
, boost::bind(&torrent::on_cache_flushed, shared_from_this(), _1));
|
|
}
|
|
else
|
|
{
|
|
TORRENT_ASSERT(m_abort);
|
|
if (alerts().should_post<cache_flushed_alert>())
|
|
alerts().emplace_alert<cache_flushed_alert>(get_handle());
|
|
}
|
|
|
|
m_storage.reset();
|
|
|
|
<div style="background: #ffff00" width="100%">
|
|
</div> if (!m_apply_ip_filter)
|
|
{
|
|
inc_stats_counter(counters::non_filter_torrents, -1);
|
|
m_apply_ip_filter = true;
|
|
}
|
|
|
|
m_allow_peers = false;
|
|
m_auto_managed = false;
|
|
update_state_list();
|
|
for (int i = 0; i < aux::session_interface::num_torrent_lists; ++i)
|
|
{
|
|
if (!m_links[i].in_list()) continue;
|
|
m_links[i].unlink(m_ses.torrent_list(i), i);
|
|
}
|
|
// don't re-add this torrent to the state-update list
|
|
m_state_subscription = false;
|
|
}
|
|
|
|
void torrent::super_seeding(bool on)
|
|
{
|
|
if (on == m_super_seeding) return;
|
|
|
|
m_super_seeding = on;
|
|
set_need_save_resume();
|
|
|
|
if (m_super_seeding) return;
|
|
|
|
// disable super seeding for all peers
|
|
for (peer_iterator i = begin(); i != end(); ++i)
|
|
{
|
|
</pre></td></tr><tr style="background: #cfc"><td>relevance 2</td><td><a href="javascript:expand(52)">../src/torrent.cpp:8127</a></td><td>if peer is a really good peer, maybe we shouldn't disconnect it perhaps this logic should be disabled if we have too many idle peers (with some definition of idle)</td></tr><tr id="52" style="display: none;" colspan="3"><td colspan="3"><h2>if peer is a really good peer, maybe we shouldn't disconnect it
|
|
perhaps this logic should be disabled if we have too many idle peers
|
|
(with some definition of idle)</h2><h4>../src/torrent.cpp:8127</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;">#ifndef TORRENT_DISABLE_LOGGING
|
|
debug_log("incoming peer (%d)", int(m_connections.size()));
|
|
#endif
|
|
|
|
#ifdef TORRENT_DEBUG
|
|
error_code ec;
|
|
TORRENT_ASSERT(p->remote() == p->get_socket()->remote_endpoint(ec) || ec);
|
|
#endif
|
|
|
|
TORRENT_ASSERT(p->peer_info_struct() != NULL);
|
|
|
|
// we need to do this after we've added the peer to the peer_list
|
|
// since that's when the peer is assigned its peer_info object,
|
|
// which holds the rank
|
|
if (maybe_replace_peer)
|
|
{
|
|
// now, find the lowest rank peer and disconnect that
|
|
// if it's lower rank than the incoming connection
|
|
peer_connection* peer = find_lowest_ranking_peer();
|
|
|
|
<div style="background: #ffff00" width="100%"> if (peer && peer->peer_rank() < p->peer_rank())
|
|
</div> {
|
|
#ifndef TORRENT_DISABLE_LOGGING
|
|
debug_log("CLOSING CONNECTION \"%s\" peer list full (low peer rank) "
|
|
"connections: %d limit: %d"
|
|
, print_endpoint(peer->remote()).c_str()
|
|
, int(m_connections.size())
|
|
, m_max_connections);
|
|
#endif
|
|
peer->disconnect(errors::too_many_connections, op_bittorrent);
|
|
p->peer_disconnected_other();
|
|
}
|
|
else
|
|
{
|
|
#ifndef TORRENT_DISABLE_LOGGING
|
|
debug_log("CLOSING CONNECTION \"%s\" peer list full (low peer rank) "
|
|
"connections: %d limit: %d"
|
|
, print_endpoint(p->remote()).c_str()
|
|
, int(m_connections.size())
|
|
, m_max_connections);
|
|
#endif
|
|
p->disconnect(errors::too_many_connections, op_bittorrent);
|
|
// we have to do this here because from the peer's point of
|
|
// it wasn't really attached to the torrent, but we do need
|
|
// to let peer_list know we're removing it
|
|
remove_peer(p);
|
|
return false;
|
|
}
|
|
}
|
|
|
|
#if TORRENT_USE_INVARIANT_CHECKS
|
|
</pre></td></tr><tr style="background: #cfc"><td>relevance 2</td><td><a href="javascript:expand(53)">../src/tracker_manager.cpp:200</a></td><td>some of these arguments could probably be moved to the tracker request itself. like the ip_filter and settings</td></tr><tr id="53" style="display: none;" colspan="3"><td colspan="3"><h2>some of these arguments could probably be moved to the
|
|
tracker request itself. like the ip_filter and settings</h2><h4>../src/tracker_manager.cpp:200</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> , interval == 0 ? min_interval : interval);
|
|
close();
|
|
}
|
|
|
|
void tracker_connection::sent_bytes(int bytes)
|
|
{
|
|
m_man.sent_bytes(bytes);
|
|
}
|
|
|
|
void tracker_connection::received_bytes(int bytes)
|
|
{
|
|
m_man.received_bytes(bytes);
|
|
}
|
|
|
|
void tracker_connection::close()
|
|
{
|
|
cancel();
|
|
m_man.remove_request(this);
|
|
}
|
|
|
|
<div style="background: #ffff00" width="100%"> tracker_manager::tracker_manager(class udp_socket& sock
|
|
</div> , counters& stats_counters
|
|
, resolver_interface& resolver
|
|
, aux::session_settings const& sett
|
|
#if !defined TORRENT_DISABLE_LOGGING || TORRENT_USE_ASSERTS
|
|
, aux::session_logger& ses
|
|
#endif
|
|
)
|
|
: m_udp_socket(sock)
|
|
, m_host_resolver(resolver)
|
|
, m_settings(sett)
|
|
, m_stats_counters(stats_counters)
|
|
#if !defined TORRENT_DISABLE_LOGGING || TORRENT_USE_ASSERTS
|
|
, m_ses(ses)
|
|
#endif
|
|
, m_abort(false)
|
|
{}
|
|
|
|
tracker_manager::~tracker_manager()
|
|
{
|
|
TORRENT_ASSERT(m_abort);
|
|
abort_all_requests(true);
|
|
}
|
|
|
|
void tracker_manager::sent_bytes(int bytes)
|
|
{
|
|
TORRENT_ASSERT(m_ses.is_single_thread());
|
|
m_stats_counters.inc_stats_counter(counters::sent_tracker_bytes, bytes);
|
|
}
|
|
|
|
void tracker_manager::received_bytes(int bytes)
|
|
</pre></td></tr><tr style="background: #cfc"><td>relevance 2</td><td><a href="javascript:expand(54)">../src/udp_socket.cpp:810</a></td><td>the udp_socket should really just be a single socket, and the session should support having more than one, just like with TCP sockets for now, just make bind failures non-fatal</td></tr><tr id="54" style="display: none;" colspan="3"><td colspan="3"><h2>the udp_socket should really just be a single socket, and the
|
|
session should support having more than one, just like with TCP sockets
|
|
for now, just make bind failures non-fatal</h2><h4>../src/udp_socket.cpp:810</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> {
|
|
m_ipv4_sock.open(udp::v4(), ec);
|
|
if (ec) return;
|
|
|
|
// this is best-effort. ignore errors
|
|
error_code err;
|
|
#ifdef TORRENT_WINDOWS
|
|
m_ipv4_sock.set_option(exclusive_address_use(true), err);
|
|
#endif
|
|
m_ipv4_sock.set_option(boost::asio::socket_base::reuse_address(true), err);
|
|
|
|
m_ipv4_sock.bind(ep, ec);
|
|
if (ec) return;
|
|
udp::socket::non_blocking_io ioc(true);
|
|
m_ipv4_sock.io_control(ioc, ec);
|
|
if (ec) return;
|
|
setup_read(&m_ipv4_sock);
|
|
}
|
|
|
|
#if TORRENT_USE_IPV6
|
|
<div style="background: #ffff00" width="100%"> if (supports_ipv6() && (ep.address().is_v6() || is_any(ep.address())))
|
|
</div> {
|
|
udp::endpoint ep6 = ep;
|
|
if (is_any(ep.address())) ep6.address(address_v6::any());
|
|
m_ipv6_sock.open(udp::v6(), ec);
|
|
if (ec) return;
|
|
|
|
// this is best-effort. ignore errors
|
|
error_code err;
|
|
#ifdef TORRENT_WINDOWS
|
|
m_ipv6_sock.set_option(exclusive_address_use(true), err);
|
|
#endif
|
|
m_ipv6_sock.set_option(boost::asio::socket_base::reuse_address(true), err);
|
|
m_ipv6_sock.set_option(boost::asio::ip::v6_only(true), err);
|
|
|
|
m_ipv6_sock.bind(ep6, ec);
|
|
if (ec != error_code(boost::system::errc::address_not_available
|
|
, boost::system::generic_category()))
|
|
{
|
|
if (ec) return;
|
|
udp::socket::non_blocking_io ioc(true);
|
|
m_ipv6_sock.io_control(ioc, ec);
|
|
if (ec) return;
|
|
setup_read(&m_ipv6_sock);
|
|
}
|
|
else
|
|
{
|
|
ec.clear();
|
|
}
|
|
}
|
|
#endif
|
|
</pre></td></tr><tr style="background: #cfc"><td>relevance 2</td><td><a href="javascript:expand(55)">../src/udp_tracker_connection.cpp:83</a></td><td>support authentication here. tracker_req().auth</td></tr><tr id="55" style="display: none;" colspan="3"><td colspan="3"><h2>support authentication here. tracker_req().auth</h2><h4>../src/udp_tracker_connection.cpp:83</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> udp_tracker_connection::m_connection_cache;
|
|
|
|
mutex udp_tracker_connection::m_cache_mutex;
|
|
|
|
udp_tracker_connection::udp_tracker_connection(
|
|
io_service& ios
|
|
, tracker_manager& man
|
|
, tracker_request const& req
|
|
, boost::weak_ptr<request_callback> c)
|
|
: tracker_connection(man, req, ios, c)
|
|
, m_transaction_id(0)
|
|
, m_attempts(0)
|
|
, m_state(action_error)
|
|
, m_abort(false)
|
|
{
|
|
update_transaction_id();
|
|
}
|
|
|
|
void udp_tracker_connection::start()
|
|
{
|
|
<div style="background: #ffff00" width="100%"> std::string hostname;
|
|
</div> std::string protocol;
|
|
int port;
|
|
error_code ec;
|
|
|
|
using boost::tuples::ignore;
|
|
boost::tie(protocol, ignore, hostname, port, ignore)
|
|
= parse_url_components(tracker_req().url, ec);
|
|
if (port == -1) port = protocol == "http" ? 80 : 443;
|
|
|
|
if (ec)
|
|
{
|
|
tracker_connection::fail(ec);
|
|
return;
|
|
}
|
|
|
|
aux::session_settings const& settings = m_man.settings();
|
|
|
|
if (settings.get_bool(settings_pack::proxy_hostnames)
|
|
&& (settings.get_int(settings_pack::proxy_type) == settings_pack::socks5
|
|
|| settings.get_int(settings_pack::proxy_type) == settings_pack::socks5_pw))
|
|
{
|
|
m_hostname = hostname;
|
|
m_target.port(port);
|
|
start_announce();
|
|
}
|
|
else
|
|
{
|
|
#if defined TORRENT_ASIO_DEBUGGING
|
|
add_outstanding_async("udp_tracker_connection::name_lookup");
|
|
#endif
|
|
</pre></td></tr><tr style="background: #cfc"><td>relevance 2</td><td><a href="javascript:expand(56)">../src/ut_metadata.cpp:123</a></td><td>if we were to initialize m_metadata_size lazily instead, we would probably be more efficient initialize m_metadata_size</td></tr><tr id="56" style="display: none;" colspan="3"><td colspan="3"><h2>if we were to initialize m_metadata_size lazily instead,
|
|
we would probably be more efficient
|
|
initialize m_metadata_size</h2><h4>../src/ut_metadata.cpp:123</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> metadata();
|
|
}
|
|
|
|
bool need_loaded()
|
|
{ return m_torrent.need_loaded(); }
|
|
|
|
virtual void on_unload() TORRENT_OVERRIDE
|
|
{
|
|
m_metadata.reset();
|
|
}
|
|
|
|
virtual void on_load() TORRENT_OVERRIDE
|
|
{
|
|
// initialize m_metadata_size
|
|
TORRENT_ASSERT(m_torrent.is_loaded());
|
|
metadata();
|
|
}
|
|
|
|
virtual void on_files_checked() TORRENT_OVERRIDE
|
|
{
|
|
<div style="background: #ffff00" width="100%"> metadata();
|
|
</div> }
|
|
|
|
virtual boost::shared_ptr<peer_plugin> new_connection(
|
|
peer_connection_handle const& pc) TORRENT_OVERRIDE;
|
|
|
|
int get_metadata_size() const
|
|
{
|
|
TORRENT_ASSERT(m_metadata_size > 0);
|
|
return m_metadata_size;
|
|
}
|
|
|
|
buffer::const_interval metadata() const
|
|
{
|
|
if (!m_torrent.need_loaded()) return buffer::const_interval(NULL, NULL);
|
|
TORRENT_ASSERT(m_torrent.valid_metadata());
|
|
if (!m_metadata)
|
|
{
|
|
m_metadata = m_torrent.torrent_file().metadata();
|
|
m_metadata_size = m_torrent.torrent_file().metadata_size();
|
|
TORRENT_ASSERT(hasher(m_metadata.get(), m_metadata_size).final()
|
|
== m_torrent.torrent_file().info_hash());
|
|
}
|
|
return buffer::const_interval(m_metadata.get(), m_metadata.get()
|
|
+ m_metadata_size);
|
|
}
|
|
|
|
bool received_metadata(ut_metadata_peer_plugin& source
|
|
, char const* buf, int size, int piece, int total_size);
|
|
|
|
// returns a piece of the metadata that
|
|
</pre></td></tr><tr style="background: #cfc"><td>relevance 2</td><td><a href="javascript:expand(57)">../src/utp_socket_manager.cpp:235</a></td><td>we may want to take ec into account here. possibly close connections quicker</td></tr><tr id="57" style="display: none;" colspan="3"><td colspan="3"><h2>we may want to take ec into account here. possibly close
|
|
connections quicker</h2><h4>../src/utp_socket_manager.cpp:235</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> error_code err;
|
|
m_interfaces = enum_net_interfaces(m_sock.get_io_service(), err);
|
|
if (err) return socket_ep;
|
|
}
|
|
|
|
for (std::vector<ip_interface>::iterator i = m_interfaces.begin()
|
|
, end(m_interfaces.end()); i != end; ++i)
|
|
{
|
|
if (i->interface_address.is_v4() != remote.is_v4())
|
|
continue;
|
|
|
|
if (strcmp(best->name, i->name) == 0)
|
|
return tcp::endpoint(i->interface_address, socket_ep.port());
|
|
}
|
|
return socket_ep;
|
|
}
|
|
|
|
bool utp_socket_manager::incoming_packet(error_code const& ec, udp::endpoint const& ep
|
|
, char const* p, int size)
|
|
{
|
|
<div style="background: #ffff00" width="100%"> TORRENT_UNUSED(ec);
|
|
</div>// UTP_LOGV("incoming packet size:%d\n", size);
|
|
|
|
if (size < int(sizeof(utp_header))) return false;
|
|
|
|
utp_header const* ph = reinterpret_cast<utp_header const*>(p);
|
|
|
|
// UTP_LOGV("incoming packet version:%d\n", int(ph->get_version()));
|
|
|
|
if (ph->get_version() != 1) return false;
|
|
|
|
const time_point receive_time = clock_type::now();
|
|
|
|
// parse out connection ID and look for existing
|
|
// connections. If found, forward to the utp_stream.
|
|
boost::uint16_t id = ph->connection_id;
|
|
|
|
// first test to see if it's the same socket as last time
|
|
// in most cases it is
|
|
if (m_last_socket
|
|
&& utp_match(m_last_socket, ep, id))
|
|
{
|
|
return utp_incoming_packet(m_last_socket, p, size, ep, receive_time);
|
|
}
|
|
|
|
std::pair<socket_map_t::iterator, socket_map_t::iterator> r =
|
|
m_utp_sockets.equal_range(id);
|
|
|
|
for (; r.first != r.second; ++r.first)
|
|
{
|
|
if (!utp_match(r.first->second, ep, id)) continue;
|
|
</pre></td></tr><tr style="background: #cfc"><td>relevance 2</td><td><a href="javascript:expand(58)">../src/utp_stream.cpp:389</a></td><td>it would be nice if not everything would have to be public here</td></tr><tr id="58" style="display: none;" colspan="3"><td colspan="3"><h2>it would be nice if not everything would have to be public here</h2><h4>../src/utp_stream.cpp:389</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> void incoming(boost::uint8_t const* buf, int size, packet* p, time_point now);
|
|
void do_ledbat(int acked_bytes, int delay, int in_flight);
|
|
int packet_timeout() const;
|
|
bool test_socket_state();
|
|
void maybe_trigger_receive_callback();
|
|
void maybe_trigger_send_callback();
|
|
bool cancel_handlers(error_code const& ec, bool kill);
|
|
bool consume_incoming_data(
|
|
utp_header const* ph, boost::uint8_t const* ptr, int payload_size, time_point now);
|
|
void update_mtu_limits();
|
|
void experienced_loss(int seq_nr);
|
|
|
|
void set_state(int s);
|
|
|
|
private:
|
|
|
|
// non-copyable
|
|
utp_socket_impl(utp_socket_impl const&);
|
|
utp_socket_impl const& operator=(utp_socket_impl const&);
|
|
|
|
<div style="background: #ffff00" width="100%">public:
|
|
</div>
|
|
void check_receive_buffers() const;
|
|
|
|
#if TORRENT_USE_INVARIANT_CHECKS
|
|
void check_invariant() const;
|
|
#endif
|
|
|
|
utp_socket_manager* m_sm;
|
|
|
|
// userdata pointer passed along
|
|
// with any callback. This is initialized to 0
|
|
// then set to point to the utp_stream when
|
|
// hooked up, and then reset to 0 once the utp_stream
|
|
// detaches. This is used to know whether or not
|
|
// the socket impl is still attached to a utp_stream
|
|
// object. When it isn't, we'll never be able to
|
|
// signal anything back to the client, and in case
|
|
// of errors, we just have to delete ourselves
|
|
// i.e. transition to the UTP_STATE_DELETED state
|
|
void* m_userdata;
|
|
|
|
// This is a platform-independent replacement
|
|
// for the regular iovec type in posix. Since
|
|
// it's not used in any system call, we might as
|
|
// well define our own type instead of wrapping
|
|
// the system's type.
|
|
struct iovec_t
|
|
{
|
|
iovec_t(void* b, size_t l): buf(b), len(l) {}
|
|
void* buf;
|
|
</pre></td></tr><tr style="background: #cfc"><td>relevance 2</td><td><a href="javascript:expand(59)">../src/web_peer_connection.cpp:328</a></td><td>do we really need a special case here? wouldn't the multi-file case handle single file torrents correctly too?</td></tr><tr id="59" style="display: none;" colspan="3"><td colspan="3"><h2>do we really need a special case here? wouldn't the multi-file
|
|
case handle single file torrents correctly too?</h2><h4>../src/web_peer_connection.cpp:328</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> incoming_piece_fragment(m_piece.size());
|
|
m_web->restart_request.piece = -1;
|
|
}
|
|
|
|
#if 0
|
|
std::cerr << this << " REQ: p: " << pr.piece << " " << pr.start << std::endl;
|
|
#endif
|
|
size -= pr.length;
|
|
}
|
|
|
|
int proxy_type = m_settings.get_int(settings_pack::proxy_type);
|
|
bool using_proxy = (proxy_type == settings_pack::http
|
|
|| proxy_type == settings_pack::http_pw) && !m_ssl;
|
|
|
|
// the number of pad files that have been "requested". In case we _only_
|
|
// request padfiles, we can't rely on handling them in the on_receive()
|
|
// callback (because we won't receive anything), instead we have to post a
|
|
// pretend read callback where we can deliver the zeroes for the partfile
|
|
int num_pad_files = 0;
|
|
|
|
<div style="background: #ffff00" width="100%"> if (single_file_request)
|
|
</div> {
|
|
file_request_t file_req;
|
|
file_req.file_index = 0;
|
|
file_req.start = boost::int64_t(req.piece) * info.piece_length()
|
|
+ req.start;
|
|
file_req.length = req.length;
|
|
|
|
request += "GET ";
|
|
// do not encode single file paths, they are
|
|
// assumed to be encoded in the torrent file
|
|
request += using_proxy ? m_url : m_path;
|
|
request += " HTTP/1.1\r\n";
|
|
add_headers(request, m_settings, using_proxy);
|
|
request += "\r\nRange: bytes=";
|
|
request += to_string(file_req.start).elems;
|
|
request += "-";
|
|
request += to_string(file_req.start + file_req.length - 1).elems;
|
|
request += "\r\n\r\n";
|
|
m_first_request = false;
|
|
|
|
m_file_requests.push_back(file_req);
|
|
}
|
|
else
|
|
{
|
|
if (!t->need_loaded())
|
|
{
|
|
disconnect(errors::torrent_aborted, op_bittorrent);
|
|
return;
|
|
}
|
|
|
|
</pre></td></tr><tr style="background: #cfc"><td>relevance 2</td><td><a href="javascript:expand(60)">../src/web_peer_connection.cpp:550</a></td><td>just make this peer not have the pieces associated with the file we just requested. Only when it doesn't have any of the file do the following</td></tr><tr id="60" style="display: none;" colspan="3"><td colspan="3"><h2>just make this peer not have the pieces
|
|
associated with the file we just requested. Only
|
|
when it doesn't have any of the file do the following</h2><h4>../src/web_peer_connection.cpp:550</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;">
|
|
peer_connection::received_invalid_data(index, single_peer);
|
|
|
|
// if we don't think we have any of the files, allow banning the web seed
|
|
if (num_have_pieces() == 0) return true;
|
|
|
|
// don't disconnect, we won't request anything from this file again
|
|
return false;
|
|
}
|
|
|
|
void web_peer_connection::on_receive_padfile()
|
|
{
|
|
handle_padfile();
|
|
}
|
|
|
|
void web_peer_connection::handle_error(int bytes_left)
|
|
{
|
|
boost::shared_ptr<torrent> t = associated_torrent().lock();
|
|
TORRENT_ASSERT(t);
|
|
|
|
<div style="background: #ffff00" width="100%"> int retry_time = atoi(m_parser.header("retry-after").c_str());
|
|
</div> if (retry_time <= 0) retry_time = m_settings.get_int(settings_pack::urlseed_wait_retry);
|
|
// temporarily unavailable, retry later
|
|
t->retry_web_seed(this, retry_time);
|
|
std::string error_msg = to_string(m_parser.status_code()).elems
|
|
+ (" " + m_parser.message());
|
|
if (t->alerts().should_post<url_seed_alert>())
|
|
{
|
|
t->alerts().emplace_alert<url_seed_alert>(t->get_handle(), m_url
|
|
, error_msg);
|
|
}
|
|
received_bytes(0, bytes_left);
|
|
disconnect(error_code(m_parser.status_code(), get_http_category()), op_bittorrent, 1);
|
|
return;
|
|
}
|
|
|
|
void web_peer_connection::handle_redirect(int bytes_left)
|
|
{
|
|
// this means we got a redirection request
|
|
// look for the location header
|
|
std::string location = m_parser.header("location");
|
|
received_bytes(0, bytes_left);
|
|
|
|
boost::shared_ptr<torrent> t = associated_torrent().lock();
|
|
TORRENT_ASSERT(t);
|
|
|
|
if (location.empty())
|
|
{
|
|
// we should not try this server again.
|
|
t->remove_web_seed(this, errors::missing_location, op_bittorrent, 2);
|
|
m_web = NULL;
|
|
</pre></td></tr><tr style="background: #cfc"><td>relevance 2</td><td><a href="javascript:expand(61)">../src/web_peer_connection.cpp:603</a></td><td>create a mapping of file-index to redirection URLs. Use that to form URLs instead. Support to reconnect to a new server without destructing this peer_connection</td></tr><tr id="61" style="display: none;" colspan="3"><td colspan="3"><h2>create a mapping of file-index to redirection URLs. Use that to form
|
|
URLs instead. Support to reconnect to a new server without destructing this
|
|
peer_connection</h2><h4>../src/web_peer_connection.cpp:603</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> m_web = NULL;
|
|
TORRENT_ASSERT(is_disconnecting());
|
|
return;
|
|
}
|
|
|
|
bool single_file_request = false;
|
|
if (!m_path.empty() && m_path[m_path.size() - 1] != '/')
|
|
single_file_request = true;
|
|
|
|
// add the redirected url and remove the current one
|
|
if (!single_file_request)
|
|
{
|
|
TORRENT_ASSERT(!m_file_requests.empty());
|
|
int const file_index = m_file_requests.front().file_index;
|
|
|
|
if (!t->need_loaded())
|
|
{
|
|
disconnect(errors::torrent_aborted, op_bittorrent);
|
|
return;
|
|
}
|
|
<div style="background: #ffff00" width="100%"> torrent_info const& info = t->torrent_file();
|
|
</div> std::string path = info.orig_files().file_path(file_index);
|
|
#ifdef TORRENT_WINDOWS
|
|
convert_path_to_posix(path);
|
|
#endif
|
|
path = escape_path(path.c_str(), path.length());
|
|
size_t i = location.rfind(path);
|
|
if (i == std::string::npos)
|
|
{
|
|
t->remove_web_seed(this, errors::invalid_redirection, op_bittorrent, 2);
|
|
m_web = NULL;
|
|
TORRENT_ASSERT(is_disconnecting());
|
|
return;
|
|
}
|
|
location.resize(i);
|
|
}
|
|
else
|
|
{
|
|
location = resolve_redirect_location(m_url, location);
|
|
}
|
|
|
|
#ifndef TORRENT_DISABLE_LOGGING
|
|
peer_log(peer_log_alert::info, "LOCATION", "%s", location.c_str());
|
|
#endif
|
|
t->add_web_seed(location, web_seed_entry::url_seed, m_external_auth, m_extra_headers);
|
|
t->remove_web_seed(this, errors::redirecting, op_bittorrent, 2);
|
|
m_web = NULL;
|
|
TORRENT_ASSERT(is_disconnecting());
|
|
return;
|
|
}
|
|
|
|
</pre></td></tr><tr style="background: #cfc"><td>relevance 2</td><td><a href="javascript:expand(62)">../src/kademlia/dht_storage.cpp:110</a></td><td>make this configurable in dht_settings</td></tr><tr id="62" style="display: none;" colspan="3"><td colspan="3"><h2>make this configurable in dht_settings</h2><h4>../src/kademlia/dht_storage.cpp:110</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> // this is a group. It contains a set of group members
|
|
struct torrent_entry
|
|
{
|
|
std::string name;
|
|
std::set<peer_entry> peers;
|
|
};
|
|
|
|
#ifndef TORRENT_NO_DEPRECATE
|
|
struct count_peers
|
|
{
|
|
int* count;
|
|
count_peers(int* c): count(c) {}
|
|
void operator()(std::pair<libtorrent::sha1_hash
|
|
, torrent_entry> const& t)
|
|
{
|
|
*count += t.second.peers.size();
|
|
}
|
|
};
|
|
#endif
|
|
|
|
<div style="background: #ffff00" width="100%"> enum { announce_interval = 30 };
|
|
</div>
|
|
struct dht_immutable_item
|
|
{
|
|
dht_immutable_item() : value(0), num_announcers(0), size(0) {}
|
|
// malloced space for the actual value
|
|
char* value;
|
|
// this counts the number of IPs we have seen
|
|
// announcing this item, this is used to determine
|
|
// popularity if we reach the limit of items to store
|
|
bloom_filter<128> ips;
|
|
// the last time we heard about this
|
|
time_point last_seen;
|
|
// number of IPs in the bloom filter
|
|
int num_announcers;
|
|
// size of malloced space pointed to by value
|
|
int size;
|
|
};
|
|
|
|
struct ed25519_public_key { char bytes[item_pk_len]; };
|
|
|
|
struct dht_mutable_item : dht_immutable_item
|
|
{
|
|
char sig[item_sig_len];
|
|
boost::int64_t seq;
|
|
ed25519_public_key key;
|
|
char* salt;
|
|
int salt_size;
|
|
};
|
|
|
|
void touch_item(dht_immutable_item* f, address const& address)
|
|
</pre></td></tr><tr style="background: #cfc"><td>relevance 2</td><td><a href="javascript:expand(63)">../src/kademlia/node.cpp:656</a></td><td>it would be nice to have a bias towards node-id prefixes that are missing in the bucket</td></tr><tr id="63" style="display: none;" colspan="3"><td colspan="3"><h2>it would be nice to have a bias towards node-id prefixes that
|
|
are missing in the bucket</h2><h4>../src/kademlia/node.cpp:656</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> // this shouldn't happen
|
|
TORRENT_ASSERT(m_id != ne->id);
|
|
if (ne->id == m_id) return;
|
|
|
|
int bucket = 159 - distance_exp(m_id, ne->id);
|
|
TORRENT_ASSERT(bucket < 160);
|
|
send_single_refresh(ne->ep(), bucket, ne->id);
|
|
}
|
|
|
|
void node::send_single_refresh(udp::endpoint const& ep, int bucket
|
|
, node_id const& id)
|
|
{
|
|
TORRENT_ASSERT(id != m_id);
|
|
void* ptr = m_rpc.allocate_observer();
|
|
if (ptr == 0) return;
|
|
|
|
TORRENT_ASSERT(bucket >= 0);
|
|
TORRENT_ASSERT(bucket <= 159);
|
|
|
|
// generate a random node_id within the given bucket
|
|
<div style="background: #ffff00" width="100%"> node_id mask = generate_prefix_mask(bucket + 1);
|
|
</div> node_id target = generate_secret_id() & ~mask;
|
|
target |= m_id & mask;
|
|
|
|
// create a dummy traversal_algorithm
|
|
// this is unfortunately necessary for the observer
|
|
// to free itself from the pool when it's being released
|
|
boost::intrusive_ptr<traversal_algorithm> algo(
|
|
new traversal_algorithm(*this, (node_id::min)()));
|
|
observer_ptr o(new (ptr) ping_observer(algo, ep, id));
|
|
#if defined TORRENT_DEBUG || defined TORRENT_RELEASE_ASSERTS
|
|
o->m_in_constructor = false;
|
|
#endif
|
|
entry e;
|
|
e["y"] = "q";
|
|
entry& a = e["a"];
|
|
|
|
if (m_table.is_full(bucket))
|
|
{
|
|
// current bucket is full, just ping it.
|
|
e["q"] = "ping";
|
|
m_counters.inc_stats_counter(counters::dht_ping_out);
|
|
}
|
|
else
|
|
{
|
|
// use get_peers instead of find_node. We'll get nodes in the response
|
|
// either way.
|
|
e["q"] = "get_peers";
|
|
a["info_hash"] = target.to_string();
|
|
m_counters.inc_stats_counter(counters::dht_get_peers_out);
|
|
}
|
|
</pre></td></tr><tr style="background: #cfc"><td>relevance 2</td><td><a href="javascript:expand(64)">../src/kademlia/node.cpp:734</a></td><td>use the non deprecated function instead of this one</td></tr><tr id="64" style="display: none;" colspan="3"><td colspan="3"><h2>use the non deprecated function instead of this one</h2><h4>../src/kademlia/node.cpp:734</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;">
|
|
for (std::set<traversal_algorithm*>::iterator i = m_running_requests.begin()
|
|
, end(m_running_requests.end()); i != end; ++i)
|
|
{
|
|
requests.push_back(dht_lookup());
|
|
dht_lookup& lookup = requests.back();
|
|
(*i)->status(lookup);
|
|
}
|
|
}
|
|
|
|
void node::update_stats_counters(counters& c) const
|
|
{
|
|
const dht_storage_counters& dht_cnt = m_storage->counters();
|
|
c.set_value(counters::dht_torrents, dht_cnt.torrents);
|
|
c.set_value(counters::dht_peers, dht_cnt.peers);
|
|
c.set_value(counters::dht_immutable_data, dht_cnt.immutable_data);
|
|
c.set_value(counters::dht_mutable_data, dht_cnt.mutable_data);
|
|
}
|
|
|
|
#ifndef TORRENT_NO_DEPRECATE
|
|
<div style="background: #ffff00" width="100%">void node::status(session_status& s)
|
|
</div>{
|
|
mutex_t::scoped_lock l(m_mutex);
|
|
|
|
m_table.status(s);
|
|
s.dht_torrents = int(m_storage->num_torrents());
|
|
s.active_requests.clear();
|
|
s.dht_total_allocations = m_rpc.num_allocated_observers();
|
|
for (std::set<traversal_algorithm*>::iterator i = m_running_requests.begin()
|
|
, end(m_running_requests.end()); i != end; ++i)
|
|
{
|
|
s.active_requests.push_back(dht_lookup());
|
|
dht_lookup& lookup = s.active_requests.back();
|
|
(*i)->status(lookup);
|
|
}
|
|
}
|
|
#endif
|
|
|
|
void node::lookup_peers(sha1_hash const& info_hash, entry& reply
|
|
, bool noseed, bool scrape) const
|
|
{
|
|
if (m_observer)
|
|
m_observer->get_peers(info_hash);
|
|
|
|
m_storage->get_peers(info_hash, noseed, scrape, reply);
|
|
}
|
|
|
|
void TORRENT_EXTRA_EXPORT write_nodes_entry(entry& r, nodes_t const& nodes)
|
|
{
|
|
entry& n = r["nodes"];
|
|
std::back_insert_iterator<std::string> out(n.string());
|
|
</pre></td></tr><tr style="background: #cfc"><td>relevance 2</td><td><a href="javascript:expand(65)">../src/kademlia/node.cpp:893</a></td><td>find_node should write directly to the response entry</td></tr><tr id="65" style="display: none;" colspan="3"><td colspan="3"><h2>find_node should write directly to the response entry</h2><h4>../src/kademlia/node.cpp:893</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> , int(reply["values"].list().size()));
|
|
}
|
|
#endif
|
|
}
|
|
else if (query_len == 9 && memcmp(query, "find_node", 9) == 0)
|
|
{
|
|
key_desc_t msg_desc[] = {
|
|
{"target", bdecode_node::string_t, 20, 0},
|
|
};
|
|
|
|
bdecode_node msg_keys[1];
|
|
if (!verify_message(arg_ent, msg_desc, msg_keys, error_string, sizeof(error_string)))
|
|
{
|
|
incoming_error(e, error_string);
|
|
return;
|
|
}
|
|
|
|
m_counters.inc_stats_counter(counters::dht_find_node_in);
|
|
sha1_hash target(msg_keys[0].string_ptr());
|
|
|
|
<div style="background: #ffff00" width="100%"> nodes_t n;
|
|
</div> m_table.find_node(target, n, 0);
|
|
write_nodes_entry(reply, n);
|
|
}
|
|
else if (query_len == 13 && memcmp(query, "announce_peer", 13) == 0)
|
|
{
|
|
key_desc_t msg_desc[] = {
|
|
{"info_hash", bdecode_node::string_t, 20, 0},
|
|
{"port", bdecode_node::int_t, 0, 0},
|
|
{"token", bdecode_node::string_t, 0, 0},
|
|
{"n", bdecode_node::string_t, 0, key_desc_t::optional},
|
|
{"seed", bdecode_node::int_t, 0, key_desc_t::optional},
|
|
{"implied_port", bdecode_node::int_t, 0, key_desc_t::optional},
|
|
};
|
|
|
|
bdecode_node msg_keys[6];
|
|
if (!verify_message(arg_ent, msg_desc, msg_keys, error_string, sizeof(error_string)))
|
|
{
|
|
m_counters.inc_stats_counter(counters::dht_invalid_announce);
|
|
incoming_error(e, error_string);
|
|
return;
|
|
}
|
|
|
|
int port = int(msg_keys[1].int_value());
|
|
|
|
// is the announcer asking to ignore the explicit
|
|
// listen port and instead use the source port of the packet?
|
|
if (msg_keys[5] && msg_keys[5].int_value() != 0)
|
|
port = m.addr.port();
|
|
|
|
if (port < 0 || port >= 65536)
|
|
</pre></td></tr><tr style="background: #cfc"><td>relevance 2</td><td><a href="javascript:expand(66)">../src/kademlia/routing_table.cpp:132</a></td><td>use the non deprecated function instead of this one</td></tr><tr id="66" style="display: none;" colspan="3"><td colspan="3"><h2>use the non deprecated function instead of this one</h2><h4>../src/kademlia/routing_table.cpp:132</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;">
|
|
static const int size_exceptions[] = {16, 8, 4, 2};
|
|
if (bucket < int(sizeof(size_exceptions)/sizeof(size_exceptions[0])))
|
|
return m_bucket_size * size_exceptions[bucket];
|
|
return m_bucket_size;
|
|
}
|
|
|
|
void routing_table::status(std::vector<dht_routing_bucket>& s) const
|
|
{
|
|
for (table_t::const_iterator i = m_buckets.begin()
|
|
, end(m_buckets.end()); i != end; ++i)
|
|
{
|
|
dht_routing_bucket b;
|
|
b.num_nodes = i->live_nodes.size();
|
|
b.num_replacements = i->replacements.size();
|
|
s.push_back(b);
|
|
}
|
|
}
|
|
|
|
#ifndef TORRENT_NO_DEPRECATE
|
|
<div style="background: #ffff00" width="100%">void routing_table::status(session_status& s) const
|
|
</div>{
|
|
int ignore;
|
|
boost::tie(s.dht_nodes, s.dht_node_cache, ignore) = size();
|
|
s.dht_global_nodes = num_global_nodes();
|
|
|
|
for (table_t::const_iterator i = m_buckets.begin()
|
|
, end(m_buckets.end()); i != end; ++i)
|
|
{
|
|
dht_routing_bucket b;
|
|
b.num_nodes = i->live_nodes.size();
|
|
b.num_replacements = i->replacements.size();
|
|
#ifndef TORRENT_NO_DEPRECATE
|
|
b.last_active = 0;
|
|
#endif
|
|
s.dht_routing_table.push_back(b);
|
|
}
|
|
}
|
|
#endif
|
|
|
|
boost::tuple<int, int, int> routing_table::size() const
|
|
{
|
|
int nodes = 0;
|
|
int replacements = 0;
|
|
int confirmed = 0;
|
|
for (table_t::const_iterator i = m_buckets.begin()
|
|
, end(m_buckets.end()); i != end; ++i)
|
|
{
|
|
nodes += i->live_nodes.size();
|
|
for (bucket_t::const_iterator k = i->live_nodes.begin()
|
|
, end2(i->live_nodes.end()); k != end2; ++k)
|
|
</pre></td></tr><tr style="background: #cfc"><td>relevance 2</td><td><a href="javascript:expand(67)">../src/kademlia/routing_table.cpp:991</a></td><td>move the lowest priority nodes to the replacement bucket</td></tr><tr id="67" style="display: none;" colspan="3"><td colspan="3"><h2>move the lowest priority nodes to the replacement bucket</h2><h4>../src/kademlia/routing_table.cpp:991</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> bucket_t& rb = m_buckets[bucket_index].replacements;
|
|
|
|
// move any node whose (160 - distane_exp(m_id, id)) >= (i - m_buckets.begin())
|
|
// to the new bucket
|
|
int const new_bucket_size = bucket_limit(bucket_index + 1);
|
|
for (bucket_t::iterator j = b.begin(); j != b.end();)
|
|
{
|
|
int const d = distance_exp(m_id, j->id);
|
|
if (d >= 159 - bucket_index)
|
|
{
|
|
++j;
|
|
continue;
|
|
}
|
|
// this entry belongs in the new bucket
|
|
new_bucket.push_back(*j);
|
|
j = b.erase(j);
|
|
}
|
|
|
|
if (b.size() > bucket_size_limit)
|
|
{
|
|
<div style="background: #ffff00" width="100%"> for (bucket_t::iterator i = b.begin() + bucket_size_limit
|
|
</div> , end(b.end()); i != end; ++i)
|
|
{
|
|
rb.push_back(*i);
|
|
}
|
|
|
|
b.resize(bucket_size_limit);
|
|
}
|
|
|
|
// split the replacement bucket as well. If the live bucket
|
|
// is not full anymore, also move the replacement entries
|
|
// into the main bucket
|
|
for (bucket_t::iterator j = rb.begin(); j != rb.end();)
|
|
{
|
|
if (distance_exp(m_id, j->id) >= 159 - bucket_index)
|
|
{
|
|
if (int(b.size()) >= bucket_size_limit)
|
|
{
|
|
++j;
|
|
continue;
|
|
}
|
|
b.push_back(*j);
|
|
}
|
|
else
|
|
{
|
|
// this entry belongs in the new bucket
|
|
if (int(new_bucket.size()) < new_bucket_size)
|
|
new_bucket.push_back(*j);
|
|
else
|
|
new_replacement_bucket.push_back(*j);
|
|
}
|
|
</pre></td></tr><tr style="background: #cfc"><td>relevance 2</td><td><a href="javascript:expand(68)">../include/libtorrent/alert_types.hpp:1416</a></td><td>should the alert baseclass have this object instead?</td></tr><tr id="68" style="display: none;" colspan="3"><td colspan="3"><h2>should the alert baseclass have this object instead?</h2><h4>../include/libtorrent/alert_types.hpp:1416</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> {
|
|
// internal
|
|
portmap_log_alert(aux::stack_allocator& alloc, int t, const char* m);
|
|
|
|
TORRENT_DEFINE_ALERT(portmap_log_alert, 52)
|
|
|
|
static const int static_category = alert::port_mapping_log_notification;
|
|
virtual std::string message() const TORRENT_OVERRIDE;
|
|
|
|
int map_type;
|
|
|
|
#ifndef TORRENT_NO_DEPRECATE
|
|
std::string msg;
|
|
#endif
|
|
|
|
// the message associated with this log line
|
|
char const* log_message() const;
|
|
|
|
private:
|
|
|
|
<div style="background: #ffff00" width="100%"> aux::stack_allocator const& m_alloc;
|
|
</div>
|
|
int m_log_idx;
|
|
};
|
|
|
|
#endif
|
|
|
|
// This alert is generated when a fastresume file has been passed to
|
|
// add_torrent() but the files on disk did not match the fastresume file.
|
|
// The error_code explains the reason why the resume file was rejected.
|
|
struct TORRENT_EXPORT fastresume_rejected_alert TORRENT_FINAL : torrent_alert
|
|
{
|
|
// internal
|
|
fastresume_rejected_alert(aux::stack_allocator& alloc
|
|
, torrent_handle const& h
|
|
, error_code const& ec
|
|
, std::string const& file
|
|
, char const* op);
|
|
|
|
TORRENT_DEFINE_ALERT(fastresume_rejected_alert, 53)
|
|
|
|
static const int static_category = alert::status_notification
|
|
| alert::error_notification;
|
|
virtual std::string message() const TORRENT_OVERRIDE;
|
|
|
|
error_code error;
|
|
|
|
#ifndef TORRENT_NO_DEPRECATE
|
|
// If the error happend to a specific file, ``file`` is the path to it.
|
|
std::string file;
|
|
#endif
|
|
</pre></td></tr><tr style="background: #cfc"><td>relevance 2</td><td><a href="javascript:expand(69)">../include/libtorrent/build_config.hpp:40</a></td><td>instead of using a dummy function to cause link errors when incompatible build configurations are used, make the namespace name depend on the configuration, and have a using declaration in the headers to pull it into libtorrent.</td></tr><tr id="69" style="display: none;" colspan="3"><td colspan="3"><h2>instead of using a dummy function to cause link errors when
|
|
incompatible build configurations are used, make the namespace name
|
|
depend on the configuration, and have a using declaration in the headers
|
|
to pull it into libtorrent.</h2><h4>../include/libtorrent/build_config.hpp:40</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;">AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
|
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
|
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
|
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
|
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
|
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
|
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
|
POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*/
|
|
|
|
#ifndef TORRENT_BUILD_CONFIG_HPP_INCLUDED
|
|
#define TORRENT_BUILD_CONFIG_HPP_INCLUDED
|
|
|
|
#include "libtorrent/config.hpp"
|
|
#include <boost/preprocessor/cat.hpp>
|
|
#include <boost/preprocessor/stringize.hpp>
|
|
|
|
<div style="background: #ffff00" width="100%">#if TORRENT_USE_IPV6
|
|
</div>#define TORRENT_CFG_IPV6 ipv6_
|
|
#else
|
|
#define TORRENT_CFG_IPV6 noipv6_
|
|
#endif
|
|
|
|
#ifdef TORRENT_NO_DEPRECATE
|
|
#define TORRENT_CFG_DEPR nodeprecate_
|
|
#else
|
|
#define TORRENT_CFG_DEPR deprecated_
|
|
#endif
|
|
|
|
#define TORRENT_CFG \
|
|
BOOST_PP_CAT(TORRENT_CFG_IPV6, \
|
|
TORRENT_CFG_DEPR)
|
|
|
|
#define TORRENT_CFG_STRING BOOST_PP_STRINGIZE(TORRENT_CFG)
|
|
|
|
#endif
|
|
|
|
</pre></td></tr><tr style="background: #cfc"><td>relevance 2</td><td><a href="javascript:expand(70)">../include/libtorrent/enum_net.hpp:151</a></td><td>this could be done more efficiently by just looking up the interface with the given name, maybe even with if_nametoindex()</td></tr><tr id="70" style="display: none;" colspan="3"><td colspan="3"><h2>this could be done more efficiently by just looking up
|
|
the interface with the given name, maybe even with if_nametoindex()</h2><h4>../include/libtorrent/enum_net.hpp:151</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> // providing 0.0.0.0 as the device, turn it into "::"
|
|
if (ip == address_v4::any() && protocol == boost::asio::ip::tcp::v6())
|
|
ip = address_v6::any();
|
|
#endif
|
|
bind_ep.address(ip);
|
|
// it appears to be an IP. Just bind to that address
|
|
sock.bind(bind_ep, ec);
|
|
return bind_ep.address();
|
|
}
|
|
|
|
ec.clear();
|
|
|
|
#ifdef SO_BINDTODEVICE
|
|
// try to use SO_BINDTODEVICE here, if that exists. If it fails,
|
|
// fall back to the mechanism we have below
|
|
sock.set_option(bind_to_device_opt(device_name), ec);
|
|
if (ec)
|
|
#endif
|
|
{
|
|
ec.clear();
|
|
<div style="background: #ffff00" width="100%"> std::vector<ip_interface> ifs = enum_net_interfaces(ios, ec);
|
|
</div> if (ec) return bind_ep.address();
|
|
|
|
bool found = false;
|
|
|
|
for (int i = 0; i < int(ifs.size()); ++i)
|
|
{
|
|
// we're looking for a specific interface, and its address
|
|
// (which must be of the same family as the address we're
|
|
// connecting to)
|
|
if (strcmp(ifs[i].name, device_name) != 0) continue;
|
|
if (ifs[i].interface_address.is_v4() != (protocol == boost::asio::ip::tcp::v4()))
|
|
continue;
|
|
|
|
bind_ep.address(ifs[i].interface_address);
|
|
found = true;
|
|
break;
|
|
}
|
|
|
|
if (!found)
|
|
{
|
|
ec = error_code(boost::system::errc::no_such_device, generic_category());
|
|
return bind_ep.address();
|
|
}
|
|
}
|
|
sock.bind(bind_ep, ec);
|
|
return bind_ep.address();
|
|
}
|
|
|
|
// returns true if the given device exists
|
|
TORRENT_EXTRA_EXPORT bool has_interface(char const* name, io_service& ios
|
|
</pre></td></tr><tr style="background: #cfc"><td>relevance 2</td><td><a href="javascript:expand(71)">../include/libtorrent/heterogeneous_queue.hpp:56</a></td><td>add emplace_back() version</td></tr><tr id="71" style="display: none;" colspan="3"><td colspan="3"><h2>add emplace_back() version</h2><h4>../include/libtorrent/heterogeneous_queue.hpp:56</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;">#include <vector>
|
|
|
|
#include <boost/cstdint.hpp>
|
|
#include <boost/utility/enable_if.hpp>
|
|
#include <boost/type_traits/is_base_of.hpp>
|
|
|
|
#include "libtorrent/assert.hpp"
|
|
|
|
namespace libtorrent {
|
|
|
|
template <class T>
|
|
struct heterogeneous_queue
|
|
{
|
|
heterogeneous_queue()
|
|
: m_storage(NULL)
|
|
, m_capacity(0)
|
|
, m_size(0)
|
|
, m_num_items(0)
|
|
{}
|
|
|
|
<div style="background: #ffff00" width="100%"> template <class U>
|
|
</div> typename boost::enable_if<boost::is_base_of<T, U> >::type
|
|
push_back(U const& a)
|
|
{
|
|
// the size of the type rounded up to pointer alignment
|
|
const int object_size = (sizeof(U) + sizeof(*m_storage) - 1)
|
|
/ sizeof(*m_storage);
|
|
|
|
// +1 for the length prefix
|
|
if (m_size + object_size + header_size > m_capacity)
|
|
grow_capacity(object_size);
|
|
|
|
uintptr_t* ptr = m_storage + m_size;
|
|
|
|
// length prefix
|
|
header_t* hdr = reinterpret_cast<header_t*>(ptr);
|
|
hdr->len = object_size;
|
|
hdr->move = &move<U>;
|
|
ptr += header_size;
|
|
|
|
// construct in-place
|
|
new (ptr) U(a);
|
|
|
|
// if we constructed the object without throwing any exception
|
|
// update counters to indicate the new item is in there
|
|
++m_num_items;
|
|
m_size += header_size + object_size;
|
|
}
|
|
|
|
void get_pointers(std::vector<T*>& out)
|
|
{
|
|
</pre></td></tr><tr style="background: #cfc"><td>relevance 2</td><td><a href="javascript:expand(72)">../include/libtorrent/peer_connection.hpp:1116</a></td><td>rename this target queue size</td></tr><tr id="72" style="display: none;" colspan="3"><td colspan="3"><h2>rename this target queue size</h2><h4>../include/libtorrent/peer_connection.hpp:1116</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;">
|
|
// the number of bytes send to the disk-io
|
|
// thread that hasn't yet been completely written.
|
|
int m_outstanding_writing_bytes;
|
|
|
|
// max transfer rates seen on this peer
|
|
int m_download_rate_peak;
|
|
int m_upload_rate_peak;
|
|
|
|
// when using the BitTyrant choker, this is our
|
|
// estimated reciprocation rate. i.e. the rate
|
|
// we need to send to this peer for it to unchoke
|
|
// us
|
|
int m_est_reciprocation_rate;
|
|
|
|
// stop sending data after this many bytes, INT_MAX = inf
|
|
int m_send_barrier;
|
|
|
|
// the number of request we should queue up
|
|
// at the remote end.
|
|
<div style="background: #ffff00" width="100%"> boost::uint16_t m_desired_queue_size;
|
|
</div>
|
|
#ifndef TORRENT_NO_DEPRECATE
|
|
#ifndef TORRENT_DISABLE_RESOLVE_COUNTRIES
|
|
// in case the session settings is set
|
|
// to resolve countries, this is set to
|
|
// the two character country code this
|
|
// peer resides in.
|
|
char m_country[2];
|
|
#endif
|
|
#endif // TORRENT_NO_DEPRECATE
|
|
|
|
// if set to non-zero, this peer will always prefer
|
|
// to request entire n pieces, rather than blocks.
|
|
// where n is the value of this variable.
|
|
// if it is 0, the download rate limit setting
|
|
// will be used to determine if whole pieces
|
|
// are preferred.
|
|
boost::uint8_t m_prefer_contiguous_blocks;
|
|
|
|
// this is the number of times this peer has had
|
|
// a request rejected because of a disk I/O failure.
|
|
// once this reaches a certain threshold, the
|
|
// peer is disconnected in order to avoid infinite
|
|
// loops of consistent failures
|
|
boost::uint8_t m_disk_read_failures;
|
|
|
|
// this is used in seed mode whenever we trigger a hash check
|
|
// for a piece, before we read it. It's used to throttle
|
|
// the hash checks to just a few per peer at a time.
|
|
boost::uint8_t m_outstanding_piece_verification:3;
|
|
</pre></td></tr><tr style="background: #cfc"><td>relevance 2</td><td><a href="javascript:expand(73)">../include/libtorrent/piece_picker.hpp:594</a></td><td>having 8 priority levels is probably excessive. It should probably be changed to 3 levels + dont-download</td></tr><tr id="73" style="display: none;" colspan="3"><td colspan="3"><h2>having 8 priority levels is probably excessive. It should
|
|
probably be changed to 3 levels + dont-download</h2><h4>../include/libtorrent/piece_picker.hpp:594</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> // the number of peers that has this piece
|
|
// (availability)
|
|
#ifdef TORRENT_OPTIMIZE_MEMORY_USAGE
|
|
boost::uint32_t peer_count : 9;
|
|
#else
|
|
boost::uint32_t peer_count : 16;
|
|
#endif
|
|
|
|
// one of the enums from state_t. This indicates whether this piece
|
|
// is currently being downloaded or not, and what state it's in if
|
|
// it is. Specifically, as an optimization, pieces that have all blocks
|
|
// requested from them are separated out into separate lists to make
|
|
// lookups quicker. The main oddity is that whether a downloading piece
|
|
// has only been requested from peers that are reverse, that's
|
|
// recorded as piece_downloading_reverse, which really means the same
|
|
// as piece_downloading, it just saves space to also indicate that it
|
|
// has a bit lower priority. The reverse bit is only relevant if the
|
|
// state is piece_downloadin.
|
|
boost::uint32_t download_state : 3;
|
|
|
|
<div style="background: #ffff00" width="100%">
|
|
</div> // is 0 if the piece is filtered (not to be downloaded)
|
|
// 1 is low priority
|
|
// 2 is low priority
|
|
// 3 is mid priority
|
|
// 4 is default priority
|
|
// 5 is mid priority
|
|
// 6 is high priority
|
|
// 7 is high priority
|
|
boost::uint32_t piece_priority : 3;
|
|
|
|
// index in to the piece_info vector
|
|
#ifdef TORRENT_OPTIMIZE_MEMORY_USAGE
|
|
boost::uint32_t index : 17;
|
|
#else
|
|
boost::uint32_t index;
|
|
#endif
|
|
|
|
#ifdef TORRENT_DEBUG_REFCOUNTS
|
|
// all the peers that have this piece
|
|
std::set<const torrent_peer*> have_peers;
|
|
#endif
|
|
|
|
enum
|
|
{
|
|
// index is set to this to indicate that we have the
|
|
// piece. There is no entry for the piece in the
|
|
// buckets if this is the case.
|
|
#ifdef TORRENT_OPTIMIZE_MEMORY_USAGE
|
|
we_have_index = 0x3ffff,
|
|
#else
|
|
</pre></td></tr><tr style="background: #cfc"><td>relevance 2</td><td><a href="javascript:expand(74)">../include/libtorrent/proxy_base.hpp:260</a></td><td>use the resolver interface that has a built-in cache</td></tr><tr id="74" style="display: none;" colspan="3"><td colspan="3"><h2>use the resolver interface that has a built-in cache</h2><h4>../include/libtorrent/proxy_base.hpp:260</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> return m_sock.lowest_layer();
|
|
}
|
|
|
|
next_layer_type& next_layer()
|
|
{
|
|
return m_sock;
|
|
}
|
|
|
|
bool is_open() const { return m_sock.is_open(); }
|
|
|
|
protected:
|
|
|
|
bool handle_error(error_code const& e, boost::shared_ptr<handler_type> const& h);
|
|
|
|
tcp::socket m_sock;
|
|
std::string m_hostname;
|
|
int m_port;
|
|
|
|
endpoint_type m_remote_endpoint;
|
|
|
|
<div style="background: #ffff00" width="100%"> tcp::resolver m_resolver;
|
|
</div>};
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
</pre></td></tr><tr style="background: #cfc"><td>relevance 2</td><td><a href="javascript:expand(75)">../include/libtorrent/session_handle.hpp:78</a></td><td>the ip filter should probably be saved here too</td></tr><tr id="75" style="display: none;" colspan="3"><td colspan="3"><h2>the ip filter should probably be saved here too</h2><h4>../include/libtorrent/session_handle.hpp:78</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> class port_filter;
|
|
class alert;
|
|
|
|
#ifndef TORRENT_NO_DEPRECATE
|
|
struct session_status;
|
|
#endif
|
|
|
|
typedef boost::function<void(sha1_hash const&, std::vector<char>&
|
|
, error_code&)> user_load_function_t;
|
|
|
|
struct TORRENT_EXPORT session_handle
|
|
{
|
|
session_handle() : m_impl(NULL) {}
|
|
|
|
session_handle(aux::session_impl* impl)
|
|
: m_impl(impl)
|
|
{}
|
|
|
|
bool is_valid() const { return m_impl != NULL; }
|
|
|
|
<div style="background: #ffff00" width="100%">
|
|
</div> // flags that determines which aspects of the session should be
|
|
// saved when calling save_state().
|
|
enum save_state_flags_t
|
|
{
|
|
// saves settings (i.e. the session_settings)
|
|
save_settings = 0x001,
|
|
|
|
// saves dht_settings
|
|
save_dht_settings = 0x002,
|
|
|
|
// saves dht state such as nodes and node-id, possibly accelerating
|
|
// joining the DHT if provided at next session startup.
|
|
save_dht_state = 0x004,
|
|
|
|
// save pe_settings
|
|
save_encryption_settings = 0x020,
|
|
|
|
// internal
|
|
save_as_map = 0x040
|
|
|
|
#ifndef TORRENT_NO_DEPRECATE
|
|
,
|
|
// saves RSS feeds
|
|
save_feeds = 0x080,
|
|
save_proxy = 0x008,
|
|
save_i2p_proxy = 0x010,
|
|
save_dht_proxy = save_proxy,
|
|
save_peer_proxy = save_proxy,
|
|
save_web_proxy = save_proxy,
|
|
save_tracker_proxy = save_proxy
|
|
</pre></td></tr><tr style="background: #cfc"><td>relevance 2</td><td><a href="javascript:expand(76)">../include/libtorrent/socks5_stream.hpp:144</a></td><td>add async_connect() that takes a hostname and port as well</td></tr><tr id="76" style="display: none;" colspan="3"><td colspan="3"><h2>add async_connect() that takes a hostname and port as well</h2><h4>../include/libtorrent/socks5_stream.hpp:144</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;">*/
|
|
m_dst_name = host;
|
|
if (m_dst_name.size() > 255)
|
|
m_dst_name.resize(255);
|
|
}
|
|
|
|
void close(error_code& ec)
|
|
{
|
|
m_dst_name.clear();
|
|
proxy_base::close(ec);
|
|
}
|
|
|
|
#ifndef BOOST_NO_EXCEPTIONS
|
|
void close()
|
|
{
|
|
m_dst_name.clear();
|
|
proxy_base::close();
|
|
}
|
|
#endif
|
|
|
|
<div style="background: #ffff00" width="100%"> template <class Handler>
|
|
</div> void async_connect(endpoint_type const& endpoint, Handler const& handler)
|
|
{
|
|
// make sure we don't try to connect to INADDR_ANY. binding is fine,
|
|
// and using a hostname is fine on SOCKS version 5.
|
|
TORRENT_ASSERT(m_command == socks5_bind
|
|
|| endpoint.address() != address()
|
|
|| (!m_dst_name.empty() && m_version == 5));
|
|
|
|
m_remote_endpoint = endpoint;
|
|
|
|
// the connect is split up in the following steps:
|
|
// 1. resolve name of proxy server
|
|
// 2. connect to proxy server
|
|
// 3. if version == 5:
|
|
// 3.1 send SOCKS5 authentication method message
|
|
// 3.2 read SOCKS5 authentication response
|
|
// 3.3 send username+password
|
|
// 4. send SOCKS command message
|
|
|
|
// to avoid unnecessary copying of the handler,
|
|
// store it in a shaed_ptr
|
|
boost::shared_ptr<handler_type> h(new handler_type(handler));
|
|
|
|
#if defined TORRENT_ASIO_DEBUGGING
|
|
add_outstanding_async("socks5_stream::name_lookup");
|
|
#endif
|
|
tcp::resolver::query q(m_hostname, to_string(m_port).elems);
|
|
m_resolver.async_resolve(q, boost::bind(
|
|
&socks5_stream::name_lookup, this, _1, _2, h));
|
|
}
|
|
</pre></td></tr><tr style="background: #cfc"><td>relevance 2</td><td><a href="javascript:expand(77)">../include/libtorrent/tracker_manager.hpp:290</a></td><td>this class probably doesn't need to have virtual functions.</td></tr><tr id="77" style="display: none;" colspan="3"><td colspan="3"><h2>this class probably doesn't need to have virtual functions.</h2><h4>../include/libtorrent/tracker_manager.hpp:290</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> int m_completion_timeout;
|
|
|
|
typedef mutex mutex_t;
|
|
mutable mutex_t m_mutex;
|
|
|
|
// used for timeouts
|
|
// this is set when the request has been sent
|
|
time_point m_start_time;
|
|
|
|
// this is set every time something is received
|
|
time_point m_read_time;
|
|
|
|
// the asio async operation
|
|
deadline_timer m_timeout;
|
|
|
|
int m_read_timeout;
|
|
|
|
bool m_abort;
|
|
};
|
|
|
|
<div style="background: #ffff00" width="100%"> struct TORRENT_EXTRA_EXPORT tracker_connection
|
|
</div> : timeout_handler
|
|
{
|
|
tracker_connection(tracker_manager& man
|
|
, tracker_request const& req
|
|
, io_service& ios
|
|
, boost::weak_ptr<request_callback> r);
|
|
|
|
void update_transaction_id(boost::shared_ptr<udp_tracker_connection> c
|
|
, boost::uint64_t tid);
|
|
|
|
boost::shared_ptr<request_callback> requester() const;
|
|
virtual ~tracker_connection() {}
|
|
|
|
tracker_request const& tracker_req() const { return m_req; }
|
|
|
|
void fail(error_code const& ec, int code = -1, char const* msg = ""
|
|
, int interval = 0, int min_interval = 0);
|
|
virtual void start() = 0;
|
|
virtual void close();
|
|
address const& bind_interface() const { return m_req.bind_ip; }
|
|
void sent_bytes(int bytes);
|
|
void received_bytes(int bytes);
|
|
virtual bool on_receive(error_code const&, udp::endpoint const&
|
|
, char const* /* buf */, int /* size */) { return false; }
|
|
virtual bool on_receive_hostname(error_code const&
|
|
, char const* /* hostname */
|
|
, char const* /* buf */, int /* size */) { return false; }
|
|
|
|
boost::shared_ptr<tracker_connection> shared_from_this()
|
|
{
|
|
</pre></td></tr><tr style="background: #cfc"><td>relevance 2</td><td><a href="javascript:expand(78)">../include/libtorrent/aux_/session_impl.hpp:1163</a></td><td>the throttling of saving resume data could probably be factored out into a separate class</td></tr><tr id="78" style="display: none;" colspan="3"><td colspan="3"><h2>the throttling of saving resume data could probably be
|
|
factored out into a separate class</h2><h4>../include/libtorrent/aux_/session_impl.hpp:1163</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> // each second tick the timer takes a little
|
|
// bit longer than one second to trigger. The
|
|
// extra time it took is accumulated into this
|
|
// counter. Every time it exceeds 1000, torrents
|
|
// will tick their timers 2 seconds instead of one.
|
|
// this keeps the timers more accurate over time
|
|
// as a kind of "leap second" to adjust for the
|
|
// accumulated error
|
|
boost::uint16_t m_tick_residual;
|
|
|
|
#ifndef TORRENT_DISABLE_LOGGING
|
|
virtual void session_log(char const* fmt, ...) const TORRENT_OVERRIDE TORRENT_FORMAT(2,3);
|
|
virtual void session_vlog(char const* fmt, va_list& va) const TORRENT_OVERRIDE TORRENT_FORMAT(2,0);
|
|
|
|
// this list of tracker loggers serves as tracker_callbacks when
|
|
// shutting down. This list is just here to keep them alive during
|
|
// whe shutting down process
|
|
std::list<boost::shared_ptr<tracker_logger> > m_tracker_loggers;
|
|
#endif
|
|
|
|
<div style="background: #ffff00" width="100%"> virtual void queue_async_resume_data(boost::shared_ptr<torrent> const& t) TORRENT_OVERRIDE;
|
|
</div> virtual void done_async_resume() TORRENT_OVERRIDE;
|
|
void async_resume_dispatched();
|
|
|
|
// state for keeping track of external IPs
|
|
external_ip m_external_ip;
|
|
|
|
#ifndef TORRENT_DISABLE_EXTENSIONS
|
|
// this is a list to allow extensions to potentially remove themselves.
|
|
typedef std::list<boost::shared_ptr<plugin> > ses_extension_list_t;
|
|
ses_extension_list_t m_ses_extensions;
|
|
|
|
// the union of all session extensions' implemented_features(). This is
|
|
// used to exclude callbacks to the session extensions.
|
|
boost::uint32_t m_session_extension_features;
|
|
|
|
// std::string could be used for the query names if only all common
|
|
// implementations used SSO *glares at gcc*
|
|
struct extension_dht_query
|
|
{
|
|
boost::uint8_t query_len;
|
|
boost::array<char, max_dht_query_length> query;
|
|
dht_extension_handler_t handler;
|
|
};
|
|
typedef std::vector<extension_dht_query> m_extension_dht_queries_t;
|
|
m_extension_dht_queries_t m_extension_dht_queries;
|
|
#endif
|
|
|
|
// if this function is set, it indicates that torrents are allowed
|
|
// to be unloaded. If it isn't, torrents will never be unloaded
|
|
user_load_function_t m_user_load_torrent;
|
|
</pre></td></tr><tr style="background: #cfc"><td>relevance 2</td><td><a href="javascript:expand(79)">../include/libtorrent/aux_/session_interface.hpp:137</a></td><td>the IP voting mechanism should be factored out to its own class, not part of the session</td></tr><tr id="79" style="display: none;" colspan="3"><td colspan="3"><h2>the IP voting mechanism should be factored out
|
|
to its own class, not part of the session</h2><h4>../include/libtorrent/aux_/session_interface.hpp:137</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;">#if TORRENT_USE_ASSERTS
|
|
virtual bool is_single_thread() const = 0;
|
|
virtual bool has_peer(peer_connection const* p) const = 0;
|
|
virtual bool any_torrent_has_peer(peer_connection const* p) const = 0;
|
|
virtual bool is_posting_torrent_updates() const = 0;
|
|
#endif
|
|
protected:
|
|
~session_logger() {}
|
|
};
|
|
#endif // TORRENT_DISABLE_LOGGING || TORRENT_USE_ASSERTS
|
|
|
|
// TOOD: 2 make this interface a lot smaller. It could be split up into
|
|
// several smaller interfaces. Each subsystem could then limit the size
|
|
// of the mock object to test it.
|
|
struct TORRENT_EXTRA_EXPORT session_interface
|
|
: buffer_allocator_interface
|
|
#if !defined TORRENT_DISABLE_LOGGING || TORRENT_USE_ASSERTS
|
|
, session_logger
|
|
#endif
|
|
{
|
|
<div style="background: #ffff00" width="100%"> enum
|
|
</div> {
|
|
source_dht = 1,
|
|
source_peer = 2,
|
|
source_tracker = 4,
|
|
source_router = 8
|
|
};
|
|
|
|
virtual void set_external_address(address const& ip
|
|
, int source_type, address const& source) = 0;
|
|
virtual external_ip const& external_address() const = 0;
|
|
|
|
virtual disk_interface& disk_thread() = 0;
|
|
|
|
virtual alert_manager& alerts() = 0;
|
|
|
|
virtual torrent_peer_allocator_interface* get_peer_allocator() = 0;
|
|
virtual io_service& get_io_service() = 0;
|
|
virtual resolver_interface& get_resolver() = 0;
|
|
|
|
typedef boost::function<void(error_code const&, std::vector<address> const&)>
|
|
callback_t;
|
|
|
|
</pre></td></tr><tr style="background: #cfc"><td>relevance 2</td><td><a href="javascript:expand(80)">../include/libtorrent/aux_/session_interface.hpp:162</a></td><td>remove this. There's already get_resolver()</td></tr><tr id="80" style="display: none;" colspan="3"><td colspan="3"><h2>remove this. There's already get_resolver()</h2><h4>../include/libtorrent/aux_/session_interface.hpp:162</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> source_peer = 2,
|
|
source_tracker = 4,
|
|
source_router = 8
|
|
};
|
|
|
|
virtual void set_external_address(address const& ip
|
|
, int source_type, address const& source) = 0;
|
|
virtual external_ip const& external_address() const = 0;
|
|
|
|
virtual disk_interface& disk_thread() = 0;
|
|
|
|
virtual alert_manager& alerts() = 0;
|
|
|
|
virtual torrent_peer_allocator_interface* get_peer_allocator() = 0;
|
|
virtual io_service& get_io_service() = 0;
|
|
virtual resolver_interface& get_resolver() = 0;
|
|
|
|
typedef boost::function<void(error_code const&, std::vector<address> const&)>
|
|
callback_t;
|
|
|
|
<div style="background: #ffff00" width="100%"> virtual void async_resolve(std::string const& host, int flags
|
|
</div> , callback_t const& h) = 0;
|
|
|
|
virtual bool has_connection(peer_connection* p) const = 0;
|
|
virtual void insert_peer(boost::shared_ptr<peer_connection> const& c) = 0;
|
|
|
|
virtual void queue_async_resume_data(boost::shared_ptr<torrent> const& t) = 0;
|
|
virtual void done_async_resume() = 0;
|
|
virtual void evict_torrent(torrent* t) = 0;
|
|
|
|
virtual void remove_torrent(torrent_handle const& h, int options = 0) = 0;
|
|
virtual void remove_torrent_impl(boost::shared_ptr<torrent> tptr, int options) = 0;
|
|
|
|
// port filter
|
|
virtual port_filter const& get_port_filter() const = 0;
|
|
virtual void ban_ip(address addr) = 0;
|
|
|
|
virtual boost::int64_t session_time() const = 0;
|
|
|
|
virtual bool is_paused() const = 0;
|
|
virtual bool is_aborted() const = 0;
|
|
virtual int num_uploads() const = 0;
|
|
virtual bool preemptive_unchoke() const = 0;
|
|
virtual void trigger_optimistic_unchoke() = 0;
|
|
virtual void trigger_unchoke() = 0;
|
|
|
|
virtual boost::weak_ptr<torrent> find_torrent(sha1_hash const& info_hash) const = 0;
|
|
virtual boost::weak_ptr<torrent> find_disconnect_candidate_torrent() const = 0;
|
|
virtual boost::shared_ptr<torrent> delay_load_torrent(sha1_hash const& info_hash
|
|
, peer_connection* pc) = 0;
|
|
virtual void insert_torrent(sha1_hash const& ih, boost::shared_ptr<torrent> const& t
|
|
</pre></td></tr><tr style="background: #cfc"><td>relevance 2</td><td><a href="javascript:expand(81)">../include/libtorrent/aux_/session_interface.hpp:217</a></td><td>factor out the thread pool for socket jobs into a separate class used to (potentially) issue socket write calls onto multiple threads</td></tr><tr id="81" style="display: none;" colspan="3"><td colspan="3"><h2>factor out the thread pool for socket jobs into a separate
|
|
class
|
|
used to (potentially) issue socket write calls onto multiple threads</h2><h4>../include/libtorrent/aux_/session_interface.hpp:217</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> virtual int num_torrents() const = 0;
|
|
|
|
virtual peer_id const& get_peer_id() const = 0;
|
|
|
|
// cork a peer and schedule a delayed uncork
|
|
// does nothing if the peer is already corked
|
|
virtual void cork_burst(peer_connection* p) = 0;
|
|
|
|
virtual void close_connection(peer_connection* p, error_code const& ec) = 0;
|
|
virtual int num_connections() const = 0;
|
|
|
|
virtual char* allocate_buffer() = 0;
|
|
virtual void free_buffer(char* buf) = 0;
|
|
virtual int send_buffer_size() const = 0;
|
|
|
|
virtual void deferred_submit_jobs() = 0;
|
|
|
|
virtual boost::uint16_t listen_port() const = 0;
|
|
virtual boost::uint16_t ssl_listen_port() const = 0;
|
|
|
|
<div style="background: #ffff00" width="100%"> virtual void post_socket_job(socket_job& j) = 0;
|
|
</div>
|
|
// load the specified torrent. also evict one torrent, except
|
|
// for the one specified, if we are at the limit of loaded torrents
|
|
virtual bool load_torrent(torrent* t) = 0;
|
|
|
|
// bump the specified torrent to make it the most recently used one
|
|
// in the torrent LRU (i.e. the least likely to get unloaded)
|
|
virtual void bump_torrent(torrent* t, bool back = true) = 0;
|
|
|
|
// ask for which interface and port to bind outgoing peer connections on
|
|
virtual tcp::endpoint bind_outgoing_socket(socket_type& s, address const&
|
|
remote_address, error_code& ec) const = 0;
|
|
virtual bool verify_bound_address(address const& addr, bool utp
|
|
, error_code& ec) = 0;
|
|
|
|
#ifndef TORRENT_DISABLE_MUTABLE_TORRENTS
|
|
virtual std::vector<boost::shared_ptr<torrent> > find_collection(
|
|
std::string const& collection) const = 0;
|
|
#endif
|
|
|
|
</pre></td></tr><tr style="background: #ccf"><td>relevance 1</td><td><a href="javascript:expand(82)">../src/disk_io_thread.cpp:218</a></td><td>it would be nice to have the number of threads be set dynamically</td></tr><tr id="82" style="display: none;" colspan="3"><td colspan="3"><h2>it would be nice to have the number of threads be set dynamically</h2><h4>../src/disk_io_thread.cpp:218</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;">
|
|
TORRENT_ASSERT(m_magic == 0x1337);
|
|
#if TORRENT_USE_ASSERTS
|
|
m_magic = 0xdead;
|
|
#endif
|
|
}
|
|
|
|
void disk_io_thread::abort(bool wait)
|
|
{
|
|
m_abort = true;
|
|
if (m_num_threads == 0)
|
|
{
|
|
abort_jobs();
|
|
}
|
|
else
|
|
{
|
|
set_num_threads(0, wait);
|
|
}
|
|
}
|
|
|
|
<div style="background: #ffff00" width="100%"> void disk_io_thread::set_num_threads(int i, bool wait)
|
|
</div> {
|
|
TORRENT_ASSERT(m_magic == 0x1337);
|
|
if (i == m_num_threads) return;
|
|
|
|
if (i > m_num_threads)
|
|
{
|
|
while (m_num_threads < i)
|
|
{
|
|
int thread_id = (++m_num_threads) - 1;
|
|
thread_type_t type = generic_thread;
|
|
|
|
// this keeps the io_service::run() call blocked from returning.
|
|
// When shutting down, it's possible that the event queue is drained
|
|
// before the disk_io_thread has posted its last callback. When this
|
|
// happens, the io_service will have a pending callback from the
|
|
// disk_io_thread, but the event loop is not running. this means
|
|
// that the event is destructed after the disk_io_thread. If the
|
|
// event refers to a disk buffer it will try to free it, but the
|
|
// buffer pool won't exist anymore, and crash. This prevents that.
|
|
boost::shared_ptr<io_service::work> work =
|
|
boost::make_shared<io_service::work>(boost::ref(m_ios));
|
|
|
|
// the magic number 3 is also used in add_job()
|
|
// every 4:th thread is a hasher thread
|
|
if ((thread_id & 0x3) == 3) type = hasher_thread;
|
|
m_threads.push_back(boost::shared_ptr<thread>(
|
|
new thread(boost::bind(&disk_io_thread::thread_fun, this
|
|
, thread_id, type, work))));
|
|
}
|
|
}
|
|
</pre></td></tr><tr style="background: #ccf"><td>relevance 1</td><td><a href="javascript:expand(83)">../src/http_seed_connection.cpp:129</a></td><td>in chunked encoding mode, this assert won't hold. the chunk headers should be subtracted from the receive_buffer_size</td></tr><tr id="83" style="display: none;" colspan="3"><td colspan="3"><h2>in chunked encoding mode, this assert won't hold.
|
|
the chunk headers should be subtracted from the receive_buffer_size</h2><h4>../src/http_seed_connection.cpp:129</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> boost::optional<piece_block_progress>
|
|
http_seed_connection::downloading_piece_progress() const
|
|
{
|
|
if (m_requests.empty())
|
|
return boost::optional<piece_block_progress>();
|
|
|
|
boost::shared_ptr<torrent> t = associated_torrent().lock();
|
|
TORRENT_ASSERT(t);
|
|
|
|
piece_block_progress ret;
|
|
|
|
peer_request const& pr = m_requests.front();
|
|
ret.piece_index = pr.piece;
|
|
if (!m_parser.header_finished())
|
|
{
|
|
ret.bytes_downloaded = 0;
|
|
}
|
|
else
|
|
{
|
|
int receive_buffer_size = m_recv_buffer.get().left() - m_parser.body_start();
|
|
<div style="background: #ffff00" width="100%"> TORRENT_ASSERT_VAL(receive_buffer_size <= t->block_size(), receive_buffer_size);
|
|
</div> ret.bytes_downloaded = t->block_size() - receive_buffer_size;
|
|
}
|
|
// this is used to make sure that the block_index stays within
|
|
// bounds. If the entire piece is downloaded, the block_index
|
|
// would otherwise point to one past the end
|
|
int correction = ret.bytes_downloaded ? -1 : 0;
|
|
ret.block_index = (pr.start + ret.bytes_downloaded + correction) / t->block_size();
|
|
ret.full_block_bytes = t->block_size();
|
|
const int last_piece = t->torrent_file().num_pieces() - 1;
|
|
if (ret.piece_index == last_piece && ret.block_index
|
|
== t->torrent_file().piece_size(last_piece) / t->block_size())
|
|
ret.full_block_bytes = t->torrent_file().piece_size(last_piece) % t->block_size();
|
|
return ret;
|
|
}
|
|
|
|
void http_seed_connection::write_request(peer_request const& r)
|
|
{
|
|
INVARIANT_CHECK;
|
|
|
|
boost::shared_ptr<torrent> t = associated_torrent().lock();
|
|
TORRENT_ASSERT(t);
|
|
|
|
TORRENT_ASSERT(t->valid_metadata());
|
|
// http_seeds don't support requesting more than one piece
|
|
// at a time
|
|
TORRENT_ASSERT(r.length <= t->torrent_file().piece_size(r.piece));
|
|
|
|
std::string request;
|
|
request.reserve(400);
|
|
|
|
</pre></td></tr><tr style="background: #ccf"><td>relevance 1</td><td><a href="javascript:expand(84)">../src/session_impl.cpp:5449</a></td><td>report the proper address of the router as the source IP of this understanding of our external address, instead of the empty address</td></tr><tr id="84" style="display: none;" colspan="3"><td colspan="3"><h2>report the proper address of the router as the source IP of
|
|
this understanding of our external address, instead of the empty address</h2><h4>../src/session_impl.cpp:5449</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> void session_impl::on_port_mapping(int mapping, address const& ip, int port
|
|
, error_code const& ec, int map_transport)
|
|
{
|
|
TORRENT_ASSERT(is_single_thread());
|
|
|
|
TORRENT_ASSERT(map_transport >= 0 && map_transport <= 1);
|
|
|
|
if (mapping == m_udp_mapping[map_transport] && port != 0)
|
|
{
|
|
m_external_udp_port = port;
|
|
if (m_alerts.should_post<portmap_alert>())
|
|
m_alerts.emplace_alert<portmap_alert>(mapping, port
|
|
, map_transport);
|
|
return;
|
|
}
|
|
|
|
if (mapping == m_tcp_mapping[map_transport] && port != 0)
|
|
{
|
|
if (ip != address())
|
|
{
|
|
<div style="background: #ffff00" width="100%"> set_external_address(ip, source_router, address());
|
|
</div> }
|
|
|
|
if (!m_listen_sockets.empty()) {
|
|
m_listen_sockets.front().external_address = ip;
|
|
m_listen_sockets.front().external_port = port;
|
|
}
|
|
if (m_alerts.should_post<portmap_alert>())
|
|
m_alerts.emplace_alert<portmap_alert>(mapping, port
|
|
, map_transport);
|
|
return;
|
|
}
|
|
|
|
if (ec)
|
|
{
|
|
if (m_alerts.should_post<portmap_error_alert>())
|
|
m_alerts.emplace_alert<portmap_error_alert>(mapping
|
|
, map_transport, ec);
|
|
}
|
|
else
|
|
{
|
|
if (m_alerts.should_post<portmap_alert>())
|
|
m_alerts.emplace_alert<portmap_alert>(mapping, port
|
|
, map_transport);
|
|
}
|
|
}
|
|
|
|
#ifndef TORRENT_NO_DEPRECATE
|
|
session_status session_impl::status() const
|
|
{
|
|
// INVARIANT_CHECK;
|
|
</pre></td></tr><tr style="background: #ccf"><td>relevance 1</td><td><a href="javascript:expand(85)">../src/torrent.cpp:1240</a></td><td>make this depend on the error and on the filesystem the files are being downloaded to. If the error is no_space_left_on_device and the filesystem doesn't support sparse files, only zero the priorities of the pieces that are at the tails of all files, leaving everything up to the highest written piece in each file</td></tr><tr id="85" style="display: none;" colspan="3"><td colspan="3"><h2>make this depend on the error and on the filesystem the
|
|
files are being downloaded to. If the error is no_space_left_on_device
|
|
and the filesystem doesn't support sparse files, only zero the priorities
|
|
of the pieces that are at the tails of all files, leaving everything
|
|
up to the highest written piece in each file</h2><h4>../src/torrent.cpp:1240</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;">
|
|
// notify the user of the error
|
|
if (alerts().should_post<file_error_alert>())
|
|
alerts().emplace_alert<file_error_alert>(j->error.ec
|
|
, resolve_filename(j->error.file), j->error.operation_str(), get_handle());
|
|
|
|
// if a write operation failed, and future writes are likely to
|
|
// fail, while reads may succeed, just set the torrent to upload mode
|
|
// if we make an incorrect assumption here, it's not the end of the
|
|
// world, if we ever issue a read request and it fails as well, we
|
|
// won't get in here and we'll actually end up pausing the torrent
|
|
if (j->action == disk_io_job::write
|
|
&& (j->error.ec == boost::system::errc::read_only_file_system
|
|
|| j->error.ec == boost::system::errc::permission_denied
|
|
|| j->error.ec == boost::system::errc::operation_not_permitted
|
|
|| j->error.ec == boost::system::errc::no_space_on_device
|
|
|| j->error.ec == boost::system::errc::file_too_large))
|
|
{
|
|
// if we failed to write, stop downloading and just
|
|
// keep seeding.
|
|
<div style="background: #ffff00" width="100%"> set_upload_mode(true);
|
|
</div> return;
|
|
}
|
|
|
|
// put the torrent in an error-state
|
|
set_error(j->error.ec, j->error.file);
|
|
|
|
// if the error appears to be more serious than a full disk, just pause the torrent
|
|
pause();
|
|
}
|
|
|
|
void torrent::on_piece_fail_sync(disk_io_job const* j, piece_block b)
|
|
{
|
|
TORRENT_UNUSED(j);
|
|
TORRENT_UNUSED(b);
|
|
|
|
update_gauge();
|
|
// some peers that previously was no longer interesting may
|
|
// now have become interesting, since we lack this one piece now.
|
|
for (peer_iterator i = begin(); i != end();)
|
|
{
|
|
peer_connection* p = *i;
|
|
// update_interest may disconnect the peer and
|
|
// invalidate the iterator
|
|
++i;
|
|
// no need to do anything with peers that
|
|
// already are interested. Gaining a piece may
|
|
// only make uninteresting peers interesting again.
|
|
if (p->is_interesting()) continue;
|
|
p->update_interest();
|
|
if (!m_abort)
|
|
</pre></td></tr><tr style="background: #ccf"><td>relevance 1</td><td><a href="javascript:expand(86)">../src/torrent.cpp:7227</a></td><td>save the send_stats state instead of throwing them away it may pose an issue when downgrading though</td></tr><tr id="86" style="display: none;" colspan="3"><td colspan="3"><h2>save the send_stats state instead of throwing them away
|
|
it may pose an issue when downgrading though</h2><h4>../src/torrent.cpp:7227</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> for (int k = 0; k < bits; ++k)
|
|
v |= (info[j*8+k].state == piece_picker::block_info::state_finished)
|
|
? (1 << k) : 0;
|
|
bitmask.append(1, v);
|
|
TORRENT_ASSERT(bits == 8 || j == num_bitmask_bytes - 1);
|
|
}
|
|
piece_struct["bitmask"] = bitmask;
|
|
// push the struct onto the unfinished-piece list
|
|
up.push_back(piece_struct);
|
|
}
|
|
}
|
|
|
|
// save trackers
|
|
entry::list_type& tr_list = ret["trackers"].list();
|
|
tr_list.push_back(entry::list_type());
|
|
int tier = 0;
|
|
for (std::vector<announce_entry>::const_iterator i = m_trackers.begin()
|
|
, end(m_trackers.end()); i != end; ++i)
|
|
{
|
|
// don't save trackers we can't trust
|
|
<div style="background: #ffff00" width="100%"> if (i->send_stats == false) continue;
|
|
</div> if (i->tier == tier)
|
|
{
|
|
tr_list.back().list().push_back(i->url);
|
|
}
|
|
else
|
|
{
|
|
tr_list.push_back(entry::list_t);
|
|
tr_list.back().list().push_back(i->url);
|
|
tier = i->tier;
|
|
}
|
|
}
|
|
|
|
// save web seeds
|
|
if (!m_web_seeds.empty())
|
|
{
|
|
entry::list_type& url_list = ret["url-list"].list();
|
|
entry::list_type& httpseed_list = ret["httpseeds"].list();
|
|
for (std::list<web_seed_t>::const_iterator i = m_web_seeds.begin()
|
|
, end(m_web_seeds.end()); i != end; ++i)
|
|
{
|
|
if (i->removed) continue;
|
|
if (i->type == web_seed_entry::url_seed)
|
|
url_list.push_back(i->url);
|
|
else if (i->type == web_seed_entry::http_seed)
|
|
httpseed_list.push_back(i->url);
|
|
}
|
|
}
|
|
|
|
// write have bitmask
|
|
// the pieces string has one byte per piece. Each
|
|
</pre></td></tr><tr style="background: #ccf"><td>relevance 1</td><td><a href="javascript:expand(87)">../src/torrent.cpp:8471</a></td><td>should disconnect all peers that have the pieces we have not just seeds. It would be pretty expensive to check all pieces for all peers though</td></tr><tr id="87" style="display: none;" colspan="3"><td colspan="3"><h2>should disconnect all peers that have the pieces we have
|
|
not just seeds. It would be pretty expensive to check all pieces
|
|
for all peers though</h2><h4>../src/torrent.cpp:8471</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;">
|
|
set_state(torrent_status::finished);
|
|
set_queue_position(-1);
|
|
|
|
m_became_finished = m_ses.session_time();
|
|
|
|
// we have to call completed() before we start
|
|
// disconnecting peers, since there's an assert
|
|
// to make sure we're cleared the piece picker
|
|
if (is_seed()) completed();
|
|
|
|
send_upload_only();
|
|
state_updated();
|
|
|
|
if (m_completed_time == 0)
|
|
m_completed_time = time(0);
|
|
|
|
// disconnect all seeds
|
|
if (settings().get_bool(settings_pack::close_redundant_connections))
|
|
{
|
|
<div style="background: #ffff00" width="100%"> std::vector<peer_connection*> seeds;
|
|
</div> for (peer_iterator i = m_connections.begin();
|
|
i != m_connections.end(); ++i)
|
|
{
|
|
peer_connection* p = *i;
|
|
TORRENT_ASSERT(p->associated_torrent().lock().get() == this);
|
|
if (p->upload_only())
|
|
{
|
|
#ifndef TORRENT_DISABLE_LOGGING
|
|
p->peer_log(peer_log_alert::info, "SEED", "CLOSING CONNECTION");
|
|
#endif
|
|
seeds.push_back(p);
|
|
}
|
|
}
|
|
std::for_each(seeds.begin(), seeds.end()
|
|
, boost::bind(&peer_connection::disconnect, _1, errors::torrent_finished
|
|
, op_bittorrent, 0));
|
|
}
|
|
|
|
if (m_abort) return;
|
|
|
|
update_want_peers();
|
|
|
|
if (m_storage)
|
|
{
|
|
// we need to keep the object alive during this operation
|
|
inc_refcount("release_files");
|
|
m_ses.disk_thread().async_release_files(m_storage.get()
|
|
, boost::bind(&torrent::on_cache_flushed, shared_from_this(), _1));
|
|
}
|
|
|
|
</pre></td></tr><tr style="background: #ccf"><td>relevance 1</td><td><a href="javascript:expand(88)">../include/libtorrent/ip_voter.hpp:124</a></td><td>instead, have one instance per possible subnet, global IPv4, global IPv6, loopback, 192.168.x.x, 10.x.x.x, etc.</td></tr><tr id="88" style="display: none;" colspan="3"><td colspan="3"><h2>instead, have one instance per possible subnet, global IPv4, global IPv6, loopback, 192.168.x.x, 10.x.x.x, etc.</h2><h4>../include/libtorrent/ip_voter.hpp:124</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> // away all the votes and started from scratch, in case
|
|
// our IP has changed
|
|
time_point m_last_rotate;
|
|
};
|
|
|
|
// this keeps track of multiple external IPs (for now, just IPv6 and IPv4, but
|
|
// it could be extended to deal with loopback and local network addresses as well)
|
|
struct TORRENT_EXTRA_EXPORT external_ip
|
|
{
|
|
// returns true if a different IP is the top vote now
|
|
// i.e. we changed our idea of what our external IP is
|
|
bool cast_vote(address const& ip, int source_type, address const& source);
|
|
|
|
// the external IP as it would be observed from `ip`
|
|
address external_address(address const& ip) const;
|
|
|
|
private:
|
|
|
|
// for now, assume one external IPv4 and one external IPv6 address
|
|
// 0 = IPv4 1 = IPv6
|
|
<div style="background: #ffff00" width="100%"> ip_voter m_vote_group[2];
|
|
</div> };
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(89)">../test/test_block_cache.cpp:469</a></td><td>test try_evict_blocks</td></tr><tr id="89" style="display: none;" colspan="3"><td colspan="3"><h2>test try_evict_blocks</h2><h4>../test/test_block_cache.cpp:469</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"></pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(90)">../test/test_block_cache.cpp:470</a></td><td>test evicting volatile pieces, to see them be removed</td></tr><tr id="90" style="display: none;" colspan="3"><td colspan="3"><h2>test evicting volatile pieces, to see them be removed</h2><h4>../test/test_block_cache.cpp:470</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"></pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(91)">../test/test_block_cache.cpp:471</a></td><td>test evicting dirty pieces</td></tr><tr id="91" style="display: none;" colspan="3"><td colspan="3"><h2>test evicting dirty pieces</h2><h4>../test/test_block_cache.cpp:471</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"></pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(92)">../test/test_block_cache.cpp:472</a></td><td>test free_piece</td></tr><tr id="92" style="display: none;" colspan="3"><td colspan="3"><h2>test free_piece</h2><h4>../test/test_block_cache.cpp:472</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"></pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(93)">../test/test_block_cache.cpp:473</a></td><td>test abort_dirty</td></tr><tr id="93" style="display: none;" colspan="3"><td colspan="3"><h2>test abort_dirty</h2><h4>../test/test_block_cache.cpp:473</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"></pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(94)">../test/test_block_cache.cpp:474</a></td><td>test unaligned reads</td></tr><tr id="94" style="display: none;" colspan="3"><td colspan="3"><h2>test unaligned reads</h2><h4>../test/test_block_cache.cpp:474</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> // it's supposed to be a cache hit
|
|
TEST_CHECK(ret >= 0);
|
|
// return the reference to the buffer we just read
|
|
RETURN_BUFFER;
|
|
|
|
tailqueue<disk_io_job> jobs;
|
|
bc.clear(jobs);
|
|
}
|
|
|
|
TORRENT_TEST(block_cache)
|
|
{
|
|
test_write();
|
|
test_flush();
|
|
test_insert();
|
|
test_evict();
|
|
test_arc_promote();
|
|
test_arc_unghost();
|
|
test_iovec();
|
|
test_unaligned_read();
|
|
|
|
<div style="background: #ffff00" width="100%">}
|
|
</div>
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(95)">../test/test_bloom_filter.cpp:130</a></td><td>test size()</td></tr><tr id="95" style="display: none;" colspan="3"><td colspan="3"><h2>test size()</h2><h4>../test/test_bloom_filter.cpp:130</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"></pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(96)">../test/test_bloom_filter.cpp:131</a></td><td>test clear()</td></tr><tr id="96" style="display: none;" colspan="3"><td colspan="3"><h2>test clear()</h2><h4>../test/test_bloom_filter.cpp:131</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> TEST_EQUAL(memcmp(bits_out.c_str(), bits, 4), 0);
|
|
|
|
sha1_hash k( "\x01\x00\x02\x00 ");
|
|
TEST_CHECK(!filter.find(k));
|
|
filter.set(k);
|
|
TEST_CHECK(filter.find(k));
|
|
|
|
boost::uint8_t compare[4] = { 0x16, 0xff, 0x55, 0xaa};
|
|
|
|
bits_out = filter.to_string();
|
|
TEST_EQUAL(memcmp(compare, bits_out.c_str(), 4), 0);
|
|
}
|
|
|
|
TORRENT_TEST(bloom_filter)
|
|
{
|
|
test_set_and_get();
|
|
test_set_bits();
|
|
test_count_zeroes();
|
|
test_to_from_string();
|
|
|
|
<div style="background: #ffff00" width="100%">}
|
|
</div>
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(97)">../test/test_dht.cpp:103</a></td><td>ideally the mock_socket would contain this queue of packets, to make tests independent</td></tr><tr id="97" style="display: none;" colspan="3"><td colspan="3"><h2>ideally the mock_socket would contain this queue of packets, to
|
|
make tests independent</h2><h4>../test/test_dht.cpp:103</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> carry = sum > 255;
|
|
}
|
|
}
|
|
|
|
void node_push_back(void* userdata, libtorrent::dht::node_entry const& n)
|
|
{
|
|
using namespace libtorrent::dht;
|
|
std::vector<node_entry>* nv = (std::vector<node_entry>*)userdata;
|
|
nv->push_back(n);
|
|
}
|
|
|
|
static void nop(void* userdata, libtorrent::dht::node_entry const& n) {}
|
|
|
|
std::list<std::pair<udp::endpoint, entry> > g_sent_packets;
|
|
|
|
struct mock_socket : udp_socket_interface
|
|
{
|
|
bool has_quota() { return true; }
|
|
bool send_packet(entry& msg, udp::endpoint const& ep, int flags)
|
|
{
|
|
<div style="background: #ffff00" width="100%"> g_sent_packets.push_back(std::make_pair(ep, msg));
|
|
</div> return true;
|
|
}
|
|
};
|
|
|
|
sha1_hash generate_next()
|
|
{
|
|
sha1_hash ret;
|
|
for (int i = 0; i < 20; ++i) ret[i] = rand() & 0xff;
|
|
return ret;
|
|
}
|
|
|
|
boost::array<char, 64> generate_key()
|
|
{
|
|
boost::array<char, 64> ret;
|
|
for (int i = 0; i < 64; ++i) ret[i] = rand() & 0xff;
|
|
return ret;
|
|
}
|
|
|
|
static const std::string no;
|
|
|
|
std::list<std::pair<udp::endpoint, entry> >::iterator
|
|
find_packet(udp::endpoint ep)
|
|
{
|
|
return std::find_if(g_sent_packets.begin(), g_sent_packets.end()
|
|
, boost::bind(&std::pair<udp::endpoint, entry>::first, _1) == ep);
|
|
}
|
|
|
|
void lazy_from_entry(entry const& e, bdecode_node& l)
|
|
{
|
|
error_code ec;
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(98)">../test/test_dht.cpp:420</a></td><td>check to make sure the "best" items are stored</td></tr><tr id="98" style="display: none;" colspan="3"><td colspan="3"><h2>check to make sure the "best" items are stored</h2><h4>../test/test_dht.cpp:420</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;">
|
|
key_desc_t desc[] =
|
|
{
|
|
{ "r", bdecode_node::dict_t, 0, key_desc_t::parse_children },
|
|
{ "v", bdecode_node::dict_t, 0, 0},
|
|
{ "id", bdecode_node::string_t, 20, key_desc_t::last_child},
|
|
{ "y", bdecode_node::string_t, 1, 0},
|
|
};
|
|
|
|
bdecode_node parsed[4];
|
|
char error_string[200];
|
|
|
|
int ret = verify_message(response, desc, parsed, error_string
|
|
, sizeof(error_string));
|
|
if (ret)
|
|
{
|
|
items_num.insert(items_num.begin(), j);
|
|
}
|
|
}
|
|
|
|
<div style="background: #ffff00" width="100%"> TEST_EQUAL(items_num.size(), 4);
|
|
</div>}
|
|
|
|
int sum_distance_exp(int s, node_entry const& e, node_id const& ref)
|
|
{
|
|
return s + distance_exp(e.id, ref);
|
|
}
|
|
|
|
std::vector<tcp::endpoint> g_got_peers;
|
|
|
|
void get_peers_cb(std::vector<tcp::endpoint> const& peers)
|
|
{
|
|
g_got_peers.insert(g_got_peers.end(), peers.begin(), peers.end());
|
|
}
|
|
|
|
std::vector<dht::item> g_got_items;
|
|
dht::item g_put_item;
|
|
int g_put_count;
|
|
|
|
void get_mutable_item_cb(dht::item const& i, bool a)
|
|
{
|
|
if (!a) return;
|
|
if (!i.empty())
|
|
g_got_items.push_back(i);
|
|
}
|
|
|
|
void put_mutable_item_data_cb(dht::item& i)
|
|
{
|
|
if (!i.empty())
|
|
g_got_items.push_back(i);
|
|
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(99)">../test/test_dht.cpp:513</a></td><td>test obfuscated_get_peers</td></tr><tr id="99" style="display: none;" colspan="3"><td colspan="3"><h2>test obfuscated_get_peers</h2><h4>../test/test_dht.cpp:513</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"></pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(100)">../test/test_fast_extension.cpp:880</a></td><td>test sending invalid requests (out of bound piece index, offsets and sizes)</td></tr><tr id="100" style="display: none;" colspan="3"><td colspan="3"><h2>test sending invalid requests (out of bound piece index, offsets and
|
|
sizes)</h2><h4>../test/test_fast_extension.cpp:880</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> sha1_hash ih;
|
|
boost::shared_ptr<lt::session> ses;
|
|
io_service ios;
|
|
tcp::socket s(ios);
|
|
setup_peer(s, ih, ses);
|
|
|
|
char recv_buffer[1000];
|
|
do_handshake(s, ih, recv_buffer);
|
|
print_session_log(*ses);
|
|
send_have_none(s);
|
|
|
|
peer_request req;
|
|
req.piece = 124134235;
|
|
req.start = 0;
|
|
req.length = 0x4000;
|
|
send_request(s, req);
|
|
}
|
|
|
|
#endif // TORRENT_DISABLE_EXTENSIONS
|
|
|
|
<div style="background: #ffff00" width="100%">
|
|
</div></pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(101)">../test/test_file_progress.cpp:109</a></td><td>test the update function too</td></tr><tr id="101" style="display: none;" colspan="3"><td colspan="3"><h2>test the update function too</h2><h4>../test/test_file_progress.cpp:109</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> for (int idx = 0; idx < fs.num_pieces(); ++idx)
|
|
{
|
|
piece_picker picker;
|
|
picker.init(4, fs.total_size() % 4, fs.num_pieces());
|
|
picker.we_have(idx);
|
|
|
|
std::vector<boost::int64_t> vec;
|
|
aux::file_progress fp;
|
|
|
|
fp.init(picker, fs);
|
|
fp.export_progress(vec);
|
|
|
|
boost::uint64_t sum = 0;
|
|
for (int i = 0; i < int(vec.size()); ++i)
|
|
sum += vec[i];
|
|
|
|
TEST_EQUAL(int(sum), fs.piece_size(idx));
|
|
}
|
|
}
|
|
|
|
<div style="background: #ffff00" width="100%">
|
|
</div></pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(102)">../test/test_file_storage.cpp:214</a></td><td>test file_storage::optimize</td></tr><tr id="102" style="display: none;" colspan="3"><td colspan="3"><h2>test file_storage::optimize</h2><h4>../test/test_file_storage.cpp:214</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"></pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(103)">../test/test_file_storage.cpp:215</a></td><td>test map_block</td></tr><tr id="103" style="display: none;" colspan="3"><td colspan="3"><h2>test map_block</h2><h4>../test/test_file_storage.cpp:215</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"></pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(104)">../test/test_file_storage.cpp:216</a></td><td>test piece_size(int piece)</td></tr><tr id="104" style="display: none;" colspan="3"><td colspan="3"><h2>test piece_size(int piece)</h2><h4>../test/test_file_storage.cpp:216</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"></pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(105)">../test/test_file_storage.cpp:217</a></td><td>test file_index_at_offset</td></tr><tr id="105" style="display: none;" colspan="3"><td colspan="3"><h2>test file_index_at_offset</h2><h4>../test/test_file_storage.cpp:217</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"></pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(106)">../test/test_file_storage.cpp:218</a></td><td>test file attributes</td></tr><tr id="106" style="display: none;" colspan="3"><td colspan="3"><h2>test file attributes</h2><h4>../test/test_file_storage.cpp:218</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"></pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(107)">../test/test_file_storage.cpp:219</a></td><td>test symlinks</td></tr><tr id="107" style="display: none;" colspan="3"><td colspan="3"><h2>test symlinks</h2><h4>../test/test_file_storage.cpp:219</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"></pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(108)">../test/test_file_storage.cpp:220</a></td><td>test pad_files</td></tr><tr id="108" style="display: none;" colspan="3"><td colspan="3"><h2>test pad_files</h2><h4>../test/test_file_storage.cpp:220</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"></pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(109)">../test/test_file_storage.cpp:221</a></td><td>test reorder_file (make sure internal_file_entry::swap() is used)</td></tr><tr id="109" style="display: none;" colspan="3"><td colspan="3"><h2>test reorder_file (make sure internal_file_entry::swap() is used)</h2><h4>../test/test_file_storage.cpp:221</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> TEST_EQUAL(rq.start, 298);
|
|
TEST_EQUAL(rq.length, 841);
|
|
}
|
|
|
|
TORRENT_TEST(file_path_hash)
|
|
{
|
|
// test file_path_hash and path_hash. Make sure we can detect a path
|
|
// whose name collides with
|
|
file_storage fs;
|
|
fs.set_piece_length(512);
|
|
fs.add_file(combine_path("temp_storage", "Foo"), 17);
|
|
fs.add_file(combine_path("temp_storage", "foo"), 612);
|
|
|
|
fprintf(stderr, "path: %s\n", fs.file_path(0).c_str());
|
|
fprintf(stderr, "file: %s\n", fs.file_path(1).c_str());
|
|
boost::uint32_t file_hash0 = fs.file_path_hash(0, "a");
|
|
boost::uint32_t file_hash1 = fs.file_path_hash(1, "a");
|
|
TEST_EQUAL(file_hash0, file_hash1);
|
|
}
|
|
|
|
<div style="background: #ffff00" width="100%">
|
|
</div></pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(110)">../test/test_peer_list.cpp:939</a></td><td>test erasing peers</td></tr><tr id="110" style="display: none;" colspan="3"><td colspan="3"><h2>test erasing peers</h2><h4>../test/test_peer_list.cpp:939</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"></pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(111)">../test/test_peer_list.cpp:940</a></td><td>test update_peer_port with allow_multiple_connections_per_ip and without</td></tr><tr id="111" style="display: none;" colspan="3"><td colspan="3"><h2>test update_peer_port with allow_multiple_connections_per_ip and without</h2><h4>../test/test_peer_list.cpp:940</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"></pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(112)">../test/test_peer_list.cpp:941</a></td><td>test add i2p peers</td></tr><tr id="112" style="display: none;" colspan="3"><td colspan="3"><h2>test add i2p peers</h2><h4>../test/test_peer_list.cpp:941</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"></pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(113)">../test/test_peer_list.cpp:942</a></td><td>test allow_i2p_mixed</td></tr><tr id="113" style="display: none;" colspan="3"><td colspan="3"><h2>test allow_i2p_mixed</h2><h4>../test/test_peer_list.cpp:942</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"></pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(114)">../test/test_peer_list.cpp:943</a></td><td>test insert_peer failing with all error conditions</td></tr><tr id="114" style="display: none;" colspan="3"><td colspan="3"><h2>test insert_peer failing with all error conditions</h2><h4>../test/test_peer_list.cpp:943</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"></pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(115)">../test/test_peer_list.cpp:944</a></td><td>test IPv6</td></tr><tr id="115" style="display: none;" colspan="3"><td colspan="3"><h2>test IPv6</h2><h4>../test/test_peer_list.cpp:944</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"></pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(116)">../test/test_peer_list.cpp:945</a></td><td>test connect_to_peer() failing</td></tr><tr id="116" style="display: none;" colspan="3"><td colspan="3"><h2>test connect_to_peer() failing</h2><h4>../test/test_peer_list.cpp:945</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"></pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(117)">../test/test_peer_list.cpp:946</a></td><td>test connection_closed</td></tr><tr id="117" style="display: none;" colspan="3"><td colspan="3"><h2>test connection_closed</h2><h4>../test/test_peer_list.cpp:946</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"></pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(118)">../test/test_peer_list.cpp:947</a></td><td>connect candidates recalculation when incrementing failcount</td></tr><tr id="118" style="display: none;" colspan="3"><td colspan="3"><h2>connect candidates recalculation when incrementing failcount</h2><h4>../test/test_peer_list.cpp:947</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> torrent_peer* peer4 = add_peer(p, st, ep("10.0.0.4", 8080));
|
|
TEST_CHECK(peer4);
|
|
TEST_EQUAL(p.num_peers(), 4);
|
|
torrent_peer* peer5 = add_peer(p, st, ep("10.0.0.5", 8080));
|
|
TEST_CHECK(peer5);
|
|
TEST_EQUAL(p.num_peers(), 5);
|
|
torrent_peer* peer6 = p.add_peer(ep("10.0.0.6", 8080), 0, 0, &st);
|
|
TEST_CHECK(peer6 == NULL);
|
|
TEST_EQUAL(p.num_peers(), 5);
|
|
|
|
// one of the connection should have been removed
|
|
TEST_EQUAL(has_peer(p, ep("10.0.0.1", 8080))
|
|
+ has_peer(p, ep("10.0.0.2", 8080))
|
|
+ has_peer(p, ep("10.0.0.3", 8080))
|
|
+ has_peer(p, ep("10.0.0.4", 8080))
|
|
+ has_peer(p, ep("10.0.0.5", 8080))
|
|
+ has_peer(p, ep("10.0.0.6", 8080))
|
|
, 5);
|
|
}
|
|
|
|
<div style="background: #ffff00" width="100%">
|
|
</div></pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(119)">../test/test_resolve_links.cpp:80</a></td><td>test files with different piece size (negative test)</td></tr><tr id="119" style="display: none;" colspan="3"><td colspan="3"><h2>test files with different piece size (negative test)</h2><h4>../test/test_resolve_links.cpp:80</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> { "test2", "test1_pad_files", 0},
|
|
{ "test3", "test1_pad_files", 0},
|
|
{ "test2", "test1_single", 0},
|
|
|
|
// these are all padded. The first small file will accidentally also
|
|
// match, even though it's not tail padded, the following file is identical
|
|
{ "test2_pad_files", "test1_pad_files", 2},
|
|
{ "test3_pad_files", "test1_pad_files", 2},
|
|
{ "test3_pad_files", "test2_pad_files", 2},
|
|
{ "test1_pad_files", "test2_pad_files", 2},
|
|
{ "test1_pad_files", "test3_pad_files", 2},
|
|
{ "test2_pad_files", "test3_pad_files", 2},
|
|
|
|
// one might expect this to work, but since the tail of the single file
|
|
// torrent is not padded, the last piece hash won't match
|
|
{ "test1_pad_files", "test1_single", 0},
|
|
|
|
// if it's padded on the other hand, it will work
|
|
{ "test1_pad_files", "test1_single_padded", 1},
|
|
|
|
<div style="background: #ffff00" width="100%">};
|
|
</div>
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(120)">../test/test_resolve_links.cpp:83</a></td><td>it would be nice to test resolving of more than just 2 files as well. like 3 single file torrents merged into one, resolving all 3 files.</td></tr><tr id="120" style="display: none;" colspan="3"><td colspan="3"><h2>it would be nice to test resolving of more than just 2 files as well.
|
|
like 3 single file torrents merged into one, resolving all 3 files.</h2><h4>../test/test_resolve_links.cpp:83</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> { "test2", "test1_single", 0},
|
|
|
|
// these are all padded. The first small file will accidentally also
|
|
// match, even though it's not tail padded, the following file is identical
|
|
{ "test2_pad_files", "test1_pad_files", 2},
|
|
{ "test3_pad_files", "test1_pad_files", 2},
|
|
{ "test3_pad_files", "test2_pad_files", 2},
|
|
{ "test1_pad_files", "test2_pad_files", 2},
|
|
{ "test1_pad_files", "test3_pad_files", 2},
|
|
{ "test2_pad_files", "test3_pad_files", 2},
|
|
|
|
// one might expect this to work, but since the tail of the single file
|
|
// torrent is not padded, the last piece hash won't match
|
|
{ "test1_pad_files", "test1_single", 0},
|
|
|
|
// if it's padded on the other hand, it will work
|
|
{ "test1_pad_files", "test1_single_padded", 1},
|
|
|
|
};
|
|
|
|
<div style="background: #ffff00" width="100%">
|
|
</div>TORRENT_TEST(resolve_links)
|
|
{
|
|
|
|
#ifndef TORRENT_DISABLE_MUTABLE_TORRENTS
|
|
std::string path = combine_path(parent_path(current_working_directory())
|
|
, "mutable_test_torrents");
|
|
|
|
for (int i = 0; i < int(sizeof(test_torrents)/sizeof(test_torrents[0])); ++i)
|
|
{
|
|
test_torrent_t const& e = test_torrents[i];
|
|
|
|
std::string p = combine_path(path, e.filename1) + ".torrent";
|
|
fprintf(stderr, "loading %s\n", p.c_str());
|
|
boost::shared_ptr<torrent_info> ti1 = boost::make_shared<torrent_info>(p);
|
|
|
|
p = combine_path(path, e.filename2) + ".torrent";
|
|
fprintf(stderr, "loading %s\n", p.c_str());
|
|
boost::shared_ptr<torrent_info> ti2 = boost::make_shared<torrent_info>(p);
|
|
|
|
fprintf(stderr, "resolving\n");
|
|
resolve_links l(ti1);
|
|
l.match(ti2, ".");
|
|
|
|
std::vector<resolve_links::link_t> const& links = l.get_links();
|
|
|
|
int num_matches = std::count_if(links.begin(), links.end()
|
|
, boost::bind(&resolve_links::link_t::ti, _1));
|
|
|
|
// some debug output in case the test fails
|
|
if (num_matches > e.expected_matches)
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(121)">../test/test_resume.cpp:232</a></td><td>test what happens when loading a resume file with both piece priorities and file priorities (file prio should take presedence)</td></tr><tr id="121" style="display: none;" colspan="3"><td colspan="3"><h2>test what happens when loading a resume file with both piece priorities
|
|
and file priorities (file prio should take presedence)</h2><h4>../test/test_resume.cpp:232</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> fprintf(stderr, "%s\n", ra->resume_data->to_string().c_str());
|
|
entry::string_type prios = (*ra->resume_data)["piece_priority"].string();
|
|
TEST_EQUAL(int(prios.size()), ti->num_pieces());
|
|
TEST_EQUAL(prios[0], '\0');
|
|
TEST_EQUAL(prios[1], '\x04');
|
|
TEST_EQUAL(prios[ti->num_pieces()-1], '\0');
|
|
|
|
bencode(std::back_inserter(p.resume_data), *ra->resume_data);
|
|
}
|
|
|
|
ses.remove_torrent(h);
|
|
|
|
// now, make sure the piece priorities are loaded correctly
|
|
h = ses.add_torrent(p);
|
|
|
|
TEST_EQUAL(h.piece_priority(0), 0);
|
|
TEST_EQUAL(h.piece_priority(1), 4);
|
|
TEST_EQUAL(h.piece_priority(ti->num_pieces()-1), 0);
|
|
}
|
|
|
|
<div style="background: #ffff00" width="100%">
|
|
</div></pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(122)">../test/test_resume.cpp:235</a></td><td>make sure a resume file only ever contain file priorities OR piece priorities. Never both.</td></tr><tr id="122" style="display: none;" colspan="3"><td colspan="3"><h2>make sure a resume file only ever contain file priorities OR piece
|
|
priorities. Never both.</h2><h4>../test/test_resume.cpp:235</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> entry::string_type prios = (*ra->resume_data)["piece_priority"].string();
|
|
TEST_EQUAL(int(prios.size()), ti->num_pieces());
|
|
TEST_EQUAL(prios[0], '\0');
|
|
TEST_EQUAL(prios[1], '\x04');
|
|
TEST_EQUAL(prios[ti->num_pieces()-1], '\0');
|
|
|
|
bencode(std::back_inserter(p.resume_data), *ra->resume_data);
|
|
}
|
|
|
|
ses.remove_torrent(h);
|
|
|
|
// now, make sure the piece priorities are loaded correctly
|
|
h = ses.add_torrent(p);
|
|
|
|
TEST_EQUAL(h.piece_priority(0), 0);
|
|
TEST_EQUAL(h.piece_priority(1), 4);
|
|
TEST_EQUAL(h.piece_priority(ti->num_pieces()-1), 0);
|
|
}
|
|
|
|
|
|
<div style="background: #ffff00" width="100%">
|
|
</div></pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(123)">../test/test_resume.cpp:238</a></td><td>generally save</td></tr><tr id="123" style="display: none;" colspan="3"><td colspan="3"><h2>generally save</h2><h4>../test/test_resume.cpp:238</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> TEST_EQUAL(int(prios.size()), ti->num_pieces());
|
|
TEST_EQUAL(prios[0], '\0');
|
|
TEST_EQUAL(prios[1], '\x04');
|
|
TEST_EQUAL(prios[ti->num_pieces()-1], '\0');
|
|
|
|
bencode(std::back_inserter(p.resume_data), *ra->resume_data);
|
|
}
|
|
|
|
ses.remove_torrent(h);
|
|
|
|
// now, make sure the piece priorities are loaded correctly
|
|
h = ses.add_torrent(p);
|
|
|
|
TEST_EQUAL(h.piece_priority(0), 0);
|
|
TEST_EQUAL(h.piece_priority(1), 4);
|
|
TEST_EQUAL(h.piece_priority(ti->num_pieces()-1), 0);
|
|
}
|
|
|
|
|
|
|
|
<div style="background: #ffff00" width="100%">
|
|
</div>TORRENT_TEST(file_priorities_default)
|
|
{
|
|
lt::session ses;
|
|
std::vector<int> file_priorities = test_resume_flags(ses, 0, "", "").file_priorities();
|
|
|
|
TEST_EQUAL(file_priorities.size(), 3);
|
|
TEST_EQUAL(file_priorities[0], 4);
|
|
TEST_EQUAL(file_priorities[1], 4);
|
|
TEST_EQUAL(file_priorities[2], 4);
|
|
}
|
|
|
|
TORRENT_TEST(file_priorities_resume_seed_mode)
|
|
{
|
|
// in share mode file priorities should always be 0
|
|
lt::session ses;
|
|
std::vector<int> file_priorities = test_resume_flags(ses,
|
|
add_torrent_params::flag_share_mode, "", "123").file_priorities();
|
|
|
|
TEST_EQUAL(file_priorities.size(), 3);
|
|
TEST_EQUAL(file_priorities[0], 0);
|
|
TEST_EQUAL(file_priorities[1], 0);
|
|
TEST_EQUAL(file_priorities[2], 0);
|
|
}
|
|
|
|
TORRENT_TEST(file_priorities_seed_mode)
|
|
{
|
|
// in share mode file priorities should always be 0
|
|
lt::session ses;
|
|
std::vector<int> file_priorities = test_resume_flags(ses,
|
|
add_torrent_params::flag_share_mode, "123", "").file_priorities();
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(124)">../test/test_resume.cpp:695</a></td><td>test all other resume flags here too. This would require returning more than just the torrent_status from test_resume_flags. Also http seeds and trackers for instance</td></tr><tr id="124" style="display: none;" colspan="3"><td colspan="3"><h2>test all other resume flags here too. This would require returning
|
|
more than just the torrent_status from test_resume_flags. Also http seeds
|
|
and trackers for instance</h2><h4>../test/test_resume.cpp:695</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> lt::session ses;
|
|
// resume data overrides the paused flag
|
|
torrent_status s = test_resume_flags(ses, add_torrent_params::flag_paused).status();
|
|
default_tests(s);
|
|
#ifdef TORRENT_WINDOWS
|
|
TEST_EQUAL(s.save_path, "c:\\add_torrent_params save_path");
|
|
#else
|
|
TEST_EQUAL(s.save_path, "/add_torrent_params save_path");
|
|
#endif
|
|
TEST_EQUAL(s.sequential_download, false);
|
|
TEST_EQUAL(s.paused, false);
|
|
TEST_EQUAL(s.auto_managed, false);
|
|
TEST_EQUAL(s.seed_mode, false);
|
|
TEST_EQUAL(s.super_seeding, false);
|
|
TEST_EQUAL(s.share_mode, false);
|
|
TEST_EQUAL(s.upload_mode, false);
|
|
TEST_EQUAL(s.ip_filter_applies, false);
|
|
TEST_EQUAL(s.connections_limit, 1345);
|
|
TEST_EQUAL(s.uploads_limit, 1346);
|
|
|
|
<div style="background: #ffff00" width="100%">}
|
|
</div>
|
|
TORRENT_TEST(url_seed_resume_data)
|
|
{
|
|
// merge url seeds with resume data
|
|
fprintf(stderr, "flags: merge_resume_http_seeds\n");
|
|
lt::session ses;
|
|
torrent_handle h = test_resume_flags(ses,
|
|
add_torrent_params::flag_merge_resume_http_seeds);
|
|
std::set<std::string> us = h.url_seeds();
|
|
std::set<std::string> ws = h.http_seeds();
|
|
|
|
TEST_EQUAL(us.size(), 3);
|
|
TEST_EQUAL(std::count(us.begin(), us.end()
|
|
, "http://add_torrent_params_url_seed.com"), 1);
|
|
TEST_EQUAL(std::count(us.begin(), us.end()
|
|
, "http://torrent_file_url_seed.com/"), 1);
|
|
TEST_EQUAL(std::count(us.begin(), us.end()
|
|
, "http://resume_data_url_seed.com/"), 1);
|
|
|
|
TEST_EQUAL(ws.size(), 1);
|
|
TEST_EQUAL(std::count(ws.begin(), ws.end()
|
|
, "http://resume_data_http_seed.com"), 1);
|
|
}
|
|
|
|
TORRENT_TEST(resume_override_torrent)
|
|
{
|
|
// resume data overrides the .torrent_file
|
|
fprintf(stderr, "flags: no merge_resume_http_seed\n");
|
|
lt::session ses;
|
|
torrent_handle h = test_resume_flags(ses,
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(125)">../test/test_settings_pack.cpp:140</a></td><td>load_pack_from_dict</td></tr><tr id="125" style="display: none;" colspan="3"><td colspan="3"><h2>load_pack_from_dict</h2><h4>../test/test_settings_pack.cpp:140</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> TEST_EQUAL(pack.get_bool(settings_pack::send_redundant_have), true);
|
|
|
|
pack.clear();
|
|
|
|
TEST_EQUAL(pack.has_val(settings_pack::send_redundant_have), false);
|
|
TEST_EQUAL(pack.has_val(settings_pack::user_agent), false);
|
|
TEST_EQUAL(pack.has_val(settings_pack::lazy_bitfields), false);
|
|
}
|
|
|
|
TORRENT_TEST(duplicates)
|
|
{
|
|
settings_pack p;
|
|
p.set_str(settings_pack::peer_fingerprint, "abc");
|
|
p.set_str(settings_pack::peer_fingerprint, "cde");
|
|
p.set_str(settings_pack::peer_fingerprint, "efg");
|
|
p.set_str(settings_pack::peer_fingerprint, "hij");
|
|
|
|
TEST_EQUAL(p.get_str(settings_pack::peer_fingerprint), "hij");
|
|
}
|
|
|
|
<div style="background: #ffff00" width="100%">
|
|
</div></pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(126)">../test/test_ssl.cpp:394</a></td><td>test using a signed certificate with the wrong info-hash in DN</td></tr><tr id="126" style="display: none;" colspan="3"><td colspan="3"><h2>test using a signed certificate with the wrong info-hash in DN</h2><h4>../test/test_ssl.cpp:394</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> // in verifying peers
|
|
ctx.set_verify_mode(context::verify_none, ec);
|
|
if (ec)
|
|
{
|
|
fprintf(stderr, "Failed to set SSL verify mode: %s\n"
|
|
, ec.message().c_str());
|
|
TEST_CHECK(!ec);
|
|
return false;
|
|
}
|
|
|
|
std::string certificate = combine_path("..", combine_path("ssl", "peer_certificate.pem"));
|
|
std::string private_key = combine_path("..", combine_path("ssl", "peer_private_key.pem"));
|
|
std::string dh_params = combine_path("..", combine_path("ssl", "dhparams.pem"));
|
|
|
|
if (flags & invalid_certificate)
|
|
{
|
|
certificate = combine_path("..", combine_path("ssl", "invalid_peer_certificate.pem"));
|
|
private_key = combine_path("..", combine_path("ssl", "invalid_peer_private_key.pem"));
|
|
}
|
|
|
|
<div style="background: #ffff00" width="100%">
|
|
</div> if (flags & (valid_certificate | invalid_certificate))
|
|
{
|
|
fprintf(stderr, "set_password_callback\n");
|
|
ctx.set_password_callback(boost::bind(&password_callback, _1, _2, "test"), ec);
|
|
if (ec)
|
|
{
|
|
fprintf(stderr, "Failed to set certificate password callback: %s\n"
|
|
, ec.message().c_str());
|
|
TEST_CHECK(!ec);
|
|
return false;
|
|
}
|
|
fprintf(stderr, "use_certificate_file \"%s\"\n", certificate.c_str());
|
|
ctx.use_certificate_file(certificate, context::pem, ec);
|
|
if (ec)
|
|
{
|
|
fprintf(stderr, "Failed to set certificate file: %s\n"
|
|
, ec.message().c_str());
|
|
TEST_CHECK(!ec);
|
|
return false;
|
|
}
|
|
fprintf(stderr, "use_private_key_file \"%s\"\n", private_key.c_str());
|
|
ctx.use_private_key_file(private_key, context::pem, ec);
|
|
if (ec)
|
|
{
|
|
fprintf(stderr, "Failed to set private key: %s\n"
|
|
, ec.message().c_str());
|
|
TEST_CHECK(!ec);
|
|
return false;
|
|
}
|
|
fprintf(stderr, "use_tmp_dh_file \"%s\"\n", dh_params.c_str());
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(127)">../test/test_ssl.cpp:492</a></td><td>also test using a hash that refers to a valid torrent but that differs from the SNI hash</td></tr><tr id="127" style="display: none;" colspan="3"><td colspan="3"><h2>also test using a hash that refers to a valid torrent
|
|
but that differs from the SNI hash</h2><h4>../test/test_ssl.cpp:492</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> print_alerts(ses1, "ses1", true, true, true, &on_alert);
|
|
if (ec)
|
|
{
|
|
fprintf(stderr, "Failed SSL handshake: %s\n"
|
|
, ec.message().c_str());
|
|
return false;
|
|
}
|
|
|
|
char handshake[] = "\x13" "BitTorrent protocol\0\0\0\0\0\0\0\x04"
|
|
" " // space for info-hash
|
|
"aaaaaaaaaaaaaaaaaaaa" // peer-id
|
|
"\0\0\0\x01\x02"; // interested
|
|
|
|
// fill in the info-hash
|
|
if (flags & valid_bittorrent_hash)
|
|
{
|
|
std::memcpy(handshake + 28, &t->info_hash()[0], 20);
|
|
}
|
|
else
|
|
{
|
|
<div style="background: #ffff00" width="100%"> std::generate(handshake + 28, handshake + 48, &rand);
|
|
</div> }
|
|
|
|
// fill in the peer-id
|
|
std::generate(handshake + 48, handshake + 68, &rand);
|
|
|
|
fprintf(stderr, "bittorrent handshake\n");
|
|
boost::asio::write(ssl_sock, boost::asio::buffer(handshake, (sizeof(handshake) - 1)), ec);
|
|
print_alerts(ses1, "ses1", true, true, true, &on_alert);
|
|
if (ec)
|
|
{
|
|
fprintf(stderr, "failed to write bittorrent handshake: %s\n"
|
|
, ec.message().c_str());
|
|
return false;
|
|
}
|
|
|
|
char buf[68];
|
|
fprintf(stderr, "read bittorrent handshake\n");
|
|
boost::asio::read(ssl_sock, boost::asio::buffer(buf, sizeof(buf)), ec);
|
|
print_alerts(ses1, "ses1", true, true, true, &on_alert);
|
|
if (ec)
|
|
{
|
|
fprintf(stderr, "failed to read bittorrent handshake: %s\n"
|
|
, ec.message().c_str());
|
|
return false;
|
|
}
|
|
|
|
if (memcmp(buf, "\x13" "BitTorrent protocol", 20) != 0)
|
|
{
|
|
fprintf(stderr, "invalid bittorrent handshake\n");
|
|
return false;
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(128)">../test/test_timestamp_history.cpp:54</a></td><td>test the case where we have > 120 samples (and have the base delay actually be updated)</td></tr><tr id="128" style="display: none;" colspan="3"><td colspan="3"><h2>test the case where we have > 120 samples (and have the base delay actually be updated)</h2><h4>../test/test_timestamp_history.cpp:54</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"></pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(129)">../test/test_timestamp_history.cpp:55</a></td><td>test the case where a sample is lower than the history entry but not lower than the base</td></tr><tr id="129" style="display: none;" colspan="3"><td colspan="3"><h2>test the case where a sample is lower than the history entry but not lower than the base</h2><h4>../test/test_timestamp_history.cpp:55</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;">#include "libtorrent/timestamp_history.hpp"
|
|
|
|
TORRENT_TEST(timestamp_history)
|
|
{
|
|
using namespace libtorrent;
|
|
|
|
timestamp_history h;
|
|
TEST_EQUAL(h.add_sample(0x32, false), 0);
|
|
TEST_EQUAL(h.base(), 0x32);
|
|
TEST_EQUAL(h.add_sample(0x33, false), 0x1);
|
|
TEST_EQUAL(h.base(), 0x32);
|
|
TEST_EQUAL(h.add_sample(0x3433, false), 0x3401);
|
|
TEST_EQUAL(h.base(), 0x32);
|
|
TEST_EQUAL(h.add_sample(0x30, false), 0);
|
|
TEST_EQUAL(h.base(), 0x30);
|
|
|
|
// test that wrapping of the timestamp is properly handled
|
|
h.add_sample(0xfffffff3, false);
|
|
TEST_EQUAL(h.base(), 0xfffffff3);
|
|
|
|
<div style="background: #ffff00" width="100%">}
|
|
</div>
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(130)">../test/test_torrent.cpp:135</a></td><td>wait for an alert rather than just waiting 10 seconds. This is kind of silly</td></tr><tr id="130" style="display: none;" colspan="3"><td colspan="3"><h2>wait for an alert rather than just waiting 10 seconds. This is kind of silly</h2><h4>../test/test_torrent.cpp:135</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> TEST_EQUAL(int(h.file_priorities().size()), info->num_files());
|
|
TEST_EQUAL(h.file_priorities()[0], 0);
|
|
if (info->num_files() > 1)
|
|
TEST_EQUAL(h.file_priorities()[1], 0);
|
|
if (info->num_files() > 2)
|
|
TEST_EQUAL(h.file_priorities()[2], 1);
|
|
}
|
|
}
|
|
|
|
if (info->num_pieces() > 0)
|
|
{
|
|
h.piece_priority(0, 1);
|
|
st = h.status();
|
|
TEST_CHECK(st.pieces.size() > 0 && st.pieces[0] == false);
|
|
std::vector<char> piece(info->piece_length());
|
|
for (int i = 0; i < int(piece.size()); ++i)
|
|
piece[i] = (i % 26) + 'A';
|
|
h.add_piece(0, &piece[0]);
|
|
|
|
// wait until the piece is done writing and hashing
|
|
<div style="background: #ffff00" width="100%"> test_sleep(2000);
|
|
</div> st = h.status();
|
|
TEST_CHECK(st.pieces.size() > 0 && st.pieces[0] == true);
|
|
|
|
std::cout << "reading piece 0" << std::endl;
|
|
h.read_piece(0);
|
|
alert const* a = wait_for_alert(ses, read_piece_alert::alert_type, "read_piece");
|
|
TEST_CHECK(a);
|
|
read_piece_alert const* rpa = alert_cast<read_piece_alert>(a);
|
|
TEST_CHECK(rpa);
|
|
if (rpa)
|
|
{
|
|
std::cout << "SUCCEEDED!" << std::endl;
|
|
TEST_CHECK(memcmp(&piece[0], rpa->buffer.get(), piece.size()) == 0);
|
|
TEST_CHECK(rpa->size == info->piece_size(0));
|
|
TEST_CHECK(rpa->piece == 0);
|
|
TEST_CHECK(hasher(&piece[0], piece.size()).final() == info->hash_for_piece(0));
|
|
}
|
|
}
|
|
}
|
|
|
|
TORRENT_TEST(long_names)
|
|
{
|
|
entry info;
|
|
info["pieces"] = "aaaaaaaaaaaaaaaaaaaa";
|
|
info["name"] = "slightly shorter name, it's kind of sad that people started the trend of incorrectly encoding the regular name field and then adding another one with correct encoding";
|
|
info["name.utf-8"] = "this is a long ass name in order to try to make make_magnet_uri overflow and hopefully crash. Although, by the time you read this that particular bug should have been fixed";
|
|
info["piece length"] = 16 * 1024;
|
|
info["length"] = 3245;
|
|
entry torrent;
|
|
torrent["info"] = info;
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(131)">../test/test_torrent_info.cpp:156</a></td><td>test remap_files</td></tr><tr id="131" style="display: none;" colspan="3"><td colspan="3"><h2>test remap_files</h2><h4>../test/test_torrent_info.cpp:156</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"></pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(132)">../test/test_torrent_info.cpp:157</a></td><td>merkle torrents. specifically torrent_info::add_merkle_nodes and torrent with "root hash"</td></tr><tr id="132" style="display: none;" colspan="3"><td colspan="3"><h2>merkle torrents. specifically torrent_info::add_merkle_nodes and torrent with "root hash"</h2><h4>../test/test_torrent_info.cpp:157</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"></pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(133)">../test/test_torrent_info.cpp:158</a></td><td>torrent with 'p' (padfile) attribute</td></tr><tr id="133" style="display: none;" colspan="3"><td colspan="3"><h2>torrent with 'p' (padfile) attribute</h2><h4>../test/test_torrent_info.cpp:158</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"></pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(134)">../test/test_torrent_info.cpp:159</a></td><td>torrent with 'h' (hidden) attribute</td></tr><tr id="134" style="display: none;" colspan="3"><td colspan="3"><h2>torrent with 'h' (hidden) attribute</h2><h4>../test/test_torrent_info.cpp:159</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"></pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(135)">../test/test_torrent_info.cpp:160</a></td><td>torrent with 'x' (executable) attribute</td></tr><tr id="135" style="display: none;" colspan="3"><td colspan="3"><h2>torrent with 'x' (executable) attribute</h2><h4>../test/test_torrent_info.cpp:160</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"></pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(136)">../test/test_torrent_info.cpp:161</a></td><td>torrent with 'l' (symlink) attribute</td></tr><tr id="136" style="display: none;" colspan="3"><td colspan="3"><h2>torrent with 'l' (symlink) attribute</h2><h4>../test/test_torrent_info.cpp:161</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"></pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(137)">../test/test_torrent_info.cpp:162</a></td><td>creating a merkle torrent (torrent_info::build_merkle_list)</td></tr><tr id="137" style="display: none;" colspan="3"><td colspan="3"><h2>creating a merkle torrent (torrent_info::build_merkle_list)</h2><h4>../test/test_torrent_info.cpp:162</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"></pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(138)">../test/test_torrent_info.cpp:163</a></td><td>torrent with multiple trackers in multiple tiers, making sure we shuffle them (how do you test shuffling?, load it multiple times and make sure it's in different order at least once)</td></tr><tr id="138" style="display: none;" colspan="3"><td colspan="3"><h2>torrent with multiple trackers in multiple tiers, making sure we shuffle them (how do you test shuffling?, load it multiple times and make sure it's in different order at least once)</h2><h4>../test/test_torrent_info.cpp:163</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"></pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(139)">../test/test_torrent_info.cpp:164</a></td><td>torrents with a missing name</td></tr><tr id="139" style="display: none;" colspan="3"><td colspan="3"><h2>torrents with a missing name</h2><h4>../test/test_torrent_info.cpp:164</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"></pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(140)">../test/test_torrent_info.cpp:165</a></td><td>torrents with a zero-length name</td></tr><tr id="140" style="display: none;" colspan="3"><td colspan="3"><h2>torrents with a zero-length name</h2><h4>../test/test_torrent_info.cpp:165</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"></pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(141)">../test/test_torrent_info.cpp:166</a></td><td>torrents with a merkle tree and add_merkle_nodes</td></tr><tr id="141" style="display: none;" colspan="3"><td colspan="3"><h2>torrents with a merkle tree and add_merkle_nodes</h2><h4>../test/test_torrent_info.cpp:166</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"></pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(142)">../test/test_torrent_info.cpp:167</a></td><td>torrent with a non-dictionary info-section</td></tr><tr id="142" style="display: none;" colspan="3"><td colspan="3"><h2>torrent with a non-dictionary info-section</h2><h4>../test/test_torrent_info.cpp:167</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"></pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(143)">../test/test_torrent_info.cpp:168</a></td><td>torrents with DHT nodes</td></tr><tr id="143" style="display: none;" colspan="3"><td colspan="3"><h2>torrents with DHT nodes</h2><h4>../test/test_torrent_info.cpp:168</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"></pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(144)">../test/test_torrent_info.cpp:169</a></td><td>torrent with url-list as a single string</td></tr><tr id="144" style="display: none;" colspan="3"><td colspan="3"><h2>torrent with url-list as a single string</h2><h4>../test/test_torrent_info.cpp:169</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"></pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(145)">../test/test_torrent_info.cpp:170</a></td><td>torrent with http seed as a single string</td></tr><tr id="145" style="display: none;" colspan="3"><td colspan="3"><h2>torrent with http seed as a single string</h2><h4>../test/test_torrent_info.cpp:170</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"></pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(146)">../test/test_torrent_info.cpp:171</a></td><td>torrent with a comment</td></tr><tr id="146" style="display: none;" colspan="3"><td colspan="3"><h2>torrent with a comment</h2><h4>../test/test_torrent_info.cpp:171</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"></pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(147)">../test/test_torrent_info.cpp:172</a></td><td>torrent with an SSL cert</td></tr><tr id="147" style="display: none;" colspan="3"><td colspan="3"><h2>torrent with an SSL cert</h2><h4>../test/test_torrent_info.cpp:172</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"></pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(148)">../test/test_torrent_info.cpp:173</a></td><td>torrent with attributes (executable and hidden)</td></tr><tr id="148" style="display: none;" colspan="3"><td colspan="3"><h2>torrent with attributes (executable and hidden)</h2><h4>../test/test_torrent_info.cpp:173</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"></pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(149)">../test/test_torrent_info.cpp:174</a></td><td>torrent_info::add_tracker</td></tr><tr id="149" style="display: none;" colspan="3"><td colspan="3"><h2>torrent_info::add_tracker</h2><h4>../test/test_torrent_info.cpp:174</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"></pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(150)">../test/test_torrent_info.cpp:175</a></td><td>torrent_info::unload</td></tr><tr id="150" style="display: none;" colspan="3"><td colspan="3"><h2>torrent_info::unload</h2><h4>../test/test_torrent_info.cpp:175</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"></pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(151)">../test/test_torrent_info.cpp:176</a></td><td>torrent_info constructor that takes an invalid bencoded buffer</td></tr><tr id="151" style="display: none;" colspan="3"><td colspan="3"><h2>torrent_info constructor that takes an invalid bencoded buffer</h2><h4>../test/test_torrent_info.cpp:176</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"></pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(152)">../test/test_torrent_info.cpp:177</a></td><td>verify_encoding with a string that triggers character replacement</td></tr><tr id="152" style="display: none;" colspan="3"><td colspan="3"><h2>verify_encoding with a string that triggers character replacement</h2><h4>../test/test_torrent_info.cpp:177</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;">test_failing_torrent_t test_error_torrents[] =
|
|
{
|
|
{ "missing_piece_len.torrent", errors::torrent_missing_piece_length },
|
|
{ "invalid_piece_len.torrent", errors::torrent_missing_piece_length },
|
|
{ "negative_piece_len.torrent", errors::torrent_missing_piece_length },
|
|
{ "no_name.torrent", errors::torrent_missing_name },
|
|
{ "invalid_name.torrent", errors::torrent_missing_name },
|
|
{ "invalid_info.torrent", errors::torrent_missing_info },
|
|
{ "string.torrent", errors::torrent_is_no_dict },
|
|
{ "negative_size.torrent", errors::torrent_invalid_length },
|
|
{ "negative_file_size.torrent", errors::torrent_invalid_length },
|
|
{ "invalid_path_list.torrent", errors::torrent_missing_name},
|
|
{ "missing_path_list.torrent", errors::torrent_missing_name },
|
|
{ "invalid_pieces.torrent", errors::torrent_missing_pieces },
|
|
{ "unaligned_pieces.torrent", errors::torrent_invalid_hashes },
|
|
{ "invalid_root_hash.torrent", errors::torrent_invalid_hashes },
|
|
{ "invalid_root_hash2.torrent", errors::torrent_missing_pieces },
|
|
{ "invalid_file_size.torrent", errors::torrent_invalid_length },
|
|
};
|
|
|
|
<div style="background: #ffff00" width="100%">
|
|
</div>TORRENT_TEST(add_url_seed)
|
|
{
|
|
torrent_info ti(sha1_hash(" "));
|
|
TEST_EQUAL(ti.web_seeds().size(), 0);
|
|
|
|
ti.add_url_seed("http://test.com");
|
|
|
|
TEST_EQUAL(ti.web_seeds().size(), 1);
|
|
web_seed_entry we = ti.web_seeds()[0];
|
|
TEST_EQUAL(we.type, web_seed_entry::url_seed);
|
|
TEST_EQUAL(we.url, "http://test.com");
|
|
}
|
|
|
|
TORRENT_TEST(add_http_seed)
|
|
{
|
|
torrent_info ti(sha1_hash(" "));
|
|
TEST_EQUAL(ti.web_seeds().size(), 0);
|
|
|
|
ti.add_http_seed("http://test.com");
|
|
|
|
TEST_EQUAL(ti.web_seeds().size(), 1);
|
|
web_seed_entry we = ti.web_seeds()[0];
|
|
TEST_EQUAL(we.type, web_seed_entry::http_seed);
|
|
TEST_EQUAL(we.url, "http://test.com");
|
|
}
|
|
|
|
TORRENT_TEST(set_web_seeds)
|
|
{
|
|
torrent_info ti(sha1_hash(" "));
|
|
TEST_EQUAL(ti.web_seeds().size(), 0);
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(153)">../test/test_tracker.cpp:53</a></td><td>test scrape requests</td></tr><tr id="153" style="display: none;" colspan="3"><td colspan="3"><h2>test scrape requests</h2><h4>../test/test_tracker.cpp:53</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"></pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(154)">../test/test_tracker.cpp:54</a></td><td>test parse peers6</td></tr><tr id="154" style="display: none;" colspan="3"><td colspan="3"><h2>test parse peers6</h2><h4>../test/test_tracker.cpp:54</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"></pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(155)">../test/test_tracker.cpp:55</a></td><td>test parse tracker-id</td></tr><tr id="155" style="display: none;" colspan="3"><td colspan="3"><h2>test parse tracker-id</h2><h4>../test/test_tracker.cpp:55</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"></pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(156)">../test/test_tracker.cpp:56</a></td><td>test parse failure-reason</td></tr><tr id="156" style="display: none;" colspan="3"><td colspan="3"><h2>test parse failure-reason</h2><h4>../test/test_tracker.cpp:56</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"></pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(157)">../test/test_tracker.cpp:57</a></td><td>test all failure paths, including invalid bencoding not a dictionary no files entry in scrape response no info-hash entry in scrape response malformed peers in peer list of dictionaries uneven number of bytes in peers and peers6 string responses</td></tr><tr id="157" style="display: none;" colspan="3"><td colspan="3"><h2>test all failure paths, including
|
|
invalid bencoding
|
|
not a dictionary
|
|
no files entry in scrape response
|
|
no info-hash entry in scrape response
|
|
malformed peers in peer list of dictionaries
|
|
uneven number of bytes in peers and peers6 string responses</h2><h4>../test/test_tracker.cpp:57</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;">#include "test.hpp"
|
|
#include "setup_transfer.hpp"
|
|
#include "udp_tracker.hpp"
|
|
#include "settings.hpp"
|
|
#include "libtorrent/alert.hpp"
|
|
#include "libtorrent/peer_info.hpp" // for peer_list_entry
|
|
#include "libtorrent/broadcast_socket.hpp" // for supports_ipv6
|
|
#include "libtorrent/alert_types.hpp"
|
|
#include "libtorrent/session.hpp"
|
|
#include "libtorrent/error_code.hpp"
|
|
#include "libtorrent/tracker_manager.hpp"
|
|
#include "libtorrent/http_tracker_connection.hpp" // for parse_tracker_response
|
|
#include "libtorrent/torrent_info.hpp"
|
|
#include "libtorrent/announce_entry.hpp"
|
|
|
|
#include <fstream>
|
|
|
|
using namespace libtorrent;
|
|
namespace lt = libtorrent;
|
|
|
|
<div style="background: #ffff00" width="100%">
|
|
</div>TORRENT_TEST(parse_hostname_peers)
|
|
{
|
|
char const response[] = "d5:peersld7:peer id20:aaaaaaaaaaaaaaaaaaaa2:ip13:test_hostname4:porti1000eed7:peer id20:bbbbabaababababababa2:ip12:another_host4:porti1001eeee";
|
|
error_code ec;
|
|
tracker_response resp = parse_tracker_response(response, sizeof(response) - 1
|
|
, ec, false, sha1_hash());
|
|
|
|
TEST_EQUAL(ec, error_code());
|
|
TEST_EQUAL(resp.peers.size(), 2);
|
|
if (resp.peers.size() == 2)
|
|
{
|
|
peer_entry const& e0 = resp.peers[0];
|
|
peer_entry const& e1 = resp.peers[1];
|
|
TEST_EQUAL(e0.hostname, "test_hostname");
|
|
TEST_EQUAL(e0.port, 1000);
|
|
TEST_EQUAL(e0.pid, peer_id("aaaaaaaaaaaaaaaaaaaa"));
|
|
|
|
TEST_EQUAL(e1.hostname, "another_host");
|
|
TEST_EQUAL(e1.port, 1001);
|
|
TEST_EQUAL(e1.pid, peer_id("bbbbabaababababababa"));
|
|
}
|
|
}
|
|
|
|
TORRENT_TEST(parse_peers4)
|
|
{
|
|
char const response[] = "d5:peers12:\x01\x02\x03\x04\x30\x10"
|
|
"\x09\x08\x07\x06\x20\x10" "e";
|
|
error_code ec;
|
|
tracker_response resp = parse_tracker_response(response, sizeof(response) - 1
|
|
, ec, false, sha1_hash());
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(158)">../test/test_transfer.cpp:215</a></td><td>these settings_pack tests belong in their own test</td></tr><tr id="158" style="display: none;" colspan="3"><td colspan="3"><h2>these settings_pack tests belong in their own test</h2><h4>../test/test_transfer.cpp:215</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> pack.set_int(settings_pack::min_reconnect_time, 0);
|
|
pack.set_int(settings_pack::stop_tracker_timeout, 1);
|
|
pack.set_bool(settings_pack::announce_to_all_trackers, true);
|
|
pack.set_bool(settings_pack::announce_to_all_tiers, true);
|
|
|
|
// make sure we announce to both http and udp trackers
|
|
pack.set_bool(settings_pack::prefer_udp_trackers, false);
|
|
pack.set_bool(settings_pack::enable_outgoing_utp, false);
|
|
pack.set_bool(settings_pack::enable_incoming_utp, false);
|
|
pack.set_bool(settings_pack::enable_lsd, false);
|
|
pack.set_bool(settings_pack::enable_natpmp, false);
|
|
pack.set_bool(settings_pack::enable_upnp, false);
|
|
pack.set_bool(settings_pack::enable_dht, false);
|
|
pack.set_int(settings_pack::alert_mask, mask);
|
|
|
|
pack.set_int(settings_pack::out_enc_policy, settings_pack::pe_disabled);
|
|
pack.set_int(settings_pack::in_enc_policy, settings_pack::pe_disabled);
|
|
|
|
pack.set_bool(settings_pack::allow_multiple_connections_per_ip, false);
|
|
|
|
<div style="background: #ffff00" width="100%"> pack.set_int(settings_pack::unchoke_slots_limit, 0);
|
|
</div> ses1.apply_settings(pack);
|
|
TEST_CHECK(ses1.get_settings().get_int(settings_pack::unchoke_slots_limit) == 0);
|
|
|
|
pack.set_int(settings_pack::unchoke_slots_limit, -1);
|
|
ses1.apply_settings(pack);
|
|
TEST_CHECK(ses1.get_settings().get_int(settings_pack::unchoke_slots_limit) == -1);
|
|
|
|
pack.set_int(settings_pack::unchoke_slots_limit, 8);
|
|
ses1.apply_settings(pack);
|
|
TEST_CHECK(ses1.get_settings().get_int(settings_pack::unchoke_slots_limit) == 8);
|
|
|
|
ses2.apply_settings(pack);
|
|
|
|
torrent_handle tor1;
|
|
torrent_handle tor2;
|
|
|
|
create_directory("tmp1_transfer", ec);
|
|
std::ofstream file("tmp1_transfer/temporary");
|
|
boost::shared_ptr<torrent_info> t = ::create_torrent(&file, "temporary", 16 * 1024, 13, false);
|
|
file.close();
|
|
|
|
TEST_CHECK(exists(combine_path("tmp1_transfer", "temporary")));
|
|
|
|
add_torrent_params addp(&test_storage_constructor);
|
|
addp.flags &= ~add_torrent_params::flag_paused;
|
|
addp.flags &= ~add_torrent_params::flag_auto_managed;
|
|
|
|
add_torrent_params params;
|
|
params.storage_mode = storage_mode;
|
|
params.flags &= ~add_torrent_params::flag_paused;
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(159)">../test/test_transfer.cpp:294</a></td><td>factor out the disk-full test into its own unit test</td></tr><tr id="159" style="display: none;" colspan="3"><td colspan="3"><h2>factor out the disk-full test into its own unit test</h2><h4>../test/test_transfer.cpp:294</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> print_alerts(ses1, "ses1", true, true, true, &on_alert);
|
|
print_alerts(ses2, "ses2", true, true, true, &on_alert);
|
|
|
|
if (i % 10 == 0)
|
|
{
|
|
print_ses_rate(i / 10.f, &st1, &st2);
|
|
}
|
|
|
|
if (!test_move_storage && st2.progress > 0.25f)
|
|
{
|
|
test_move_storage = true;
|
|
tor1.move_storage("tmp1_transfer_moved");
|
|
tor2.move_storage("tmp2_transfer_moved");
|
|
std::cerr << "moving storage" << std::endl;
|
|
}
|
|
|
|
// wait 10 loops before we restart the torrent. This lets
|
|
// us catch all events that failed (and would put the torrent
|
|
// back into upload mode) before we restart it.
|
|
|
|
<div style="background: #ffff00" width="100%"> if (test_disk_full && st2.upload_mode && ++upload_mode_timer > 10)
|
|
</div> {
|
|
test_disk_full = false;
|
|
((test_storage*)tor2.get_storage_impl())->set_limit(16 * 1024 * 1024);
|
|
|
|
// if we reset the upload mode too soon, there may be more disk
|
|
// jobs failing right after, putting us back in upload mode. So,
|
|
// give the disk some time to fail all disk jobs before resetting
|
|
// upload mode to false
|
|
test_sleep(500);
|
|
|
|
// then we need to drain the alert queue, so the peer_disconnects
|
|
// counter doesn't get incremented by old alerts
|
|
print_alerts(ses1, "ses1", true, true, true, &on_alert);
|
|
print_alerts(ses2, "ses2", true, true, true, &on_alert);
|
|
|
|
lt::error_code err = tor2.status().errc;
|
|
fprintf(stderr, "error: \"%s\"\n", err.message().c_str());
|
|
TEST_CHECK(!err);
|
|
tor2.set_upload_mode(false);
|
|
|
|
// at this point we probably disconnected the seed
|
|
// so we need to reconnect as well
|
|
fprintf(stderr, "%s: reconnecting peer\n", time_now_string());
|
|
error_code ec;
|
|
tor2.connect_peer(tcp::endpoint(address::from_string("127.0.0.1", ec)
|
|
, ses1.listen_port()));
|
|
|
|
TEST_CHECK(tor2.status().is_finished == false);
|
|
fprintf(stderr, "disconnects: %d\n", peer_disconnects);
|
|
TEST_CHECK(peer_disconnects >= 2);
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(160)">../test/test_upnp.cpp:108</a></td><td>store the log and verify that some key messages are there</td></tr><tr id="160" style="display: none;" colspan="3"><td colspan="3"><h2>store the log and verify that some key messages are there</h2><h4>../test/test_upnp.cpp:108</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> "USN:uuid:000f-66d6-7296000099dc::upnp:rootdevice\r\n"
|
|
"Location: http://127.0.0.1:%d/upnp.xml\r\n"
|
|
"Server: Custom/1.0 UPnP/1.0 Proc/Ver\r\n"
|
|
"EXT:\r\n"
|
|
"Cache-Control:max-age=180\r\n"
|
|
"DATE: Fri, 02 Jan 1970 08:10:38 GMT\r\n\r\n";
|
|
|
|
TORRENT_ASSERT(g_port != 0);
|
|
char buf[sizeof(msg) + 30];
|
|
int len = snprintf(buf, sizeof(buf), msg, g_port);
|
|
|
|
error_code ec;
|
|
sock->send(buf, len, ec);
|
|
|
|
if (ec) std::cerr << "*** error sending " << ec.message() << std::endl;
|
|
}
|
|
|
|
void log_callback(char const* err)
|
|
{
|
|
std::cerr << "UPnP: " << err << std::endl;
|
|
<div style="background: #ffff00" width="100%">}
|
|
</div>
|
|
struct callback_info
|
|
{
|
|
int mapping;
|
|
int port;
|
|
error_code ec;
|
|
bool operator==(callback_info const& e)
|
|
{ return mapping == e.mapping && port == e.port && !ec == !e.ec; }
|
|
};
|
|
|
|
std::list<callback_info> callbacks;
|
|
|
|
void callback(int mapping, address const& ip, int port, error_code const& err)
|
|
{
|
|
callback_info info = {mapping, port, err};
|
|
callbacks.push_back(info);
|
|
std::cerr << "mapping: " << mapping << ", port: " << port << ", IP: " << ip
|
|
<< ", error: \"" << err.message() << "\"\n";
|
|
}
|
|
|
|
void run_upnp_test(char const* root_filename, char const* router_model, char const* control_name, int igd_version)
|
|
{
|
|
libtorrent::io_service ios;
|
|
|
|
g_port = start_web_server();
|
|
|
|
std::vector<char> buf;
|
|
error_code ec;
|
|
load_file(root_filename, buf, ec);
|
|
buf.push_back(0);
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(161)">../src/block_cache.cpp:987</a></td><td>it's somewhat expensive to iterate over this linked list. Presumably because of the random access of memory. It would be nice if pieces with no evictable blocks weren't in this list</td></tr><tr id="161" style="display: none;" colspan="3"><td colspan="3"><h2>it's somewhat expensive
|
|
to iterate over this linked list. Presumably because of the random
|
|
access of memory. It would be nice if pieces with no evictable blocks
|
|
weren't in this list</h2><h4>../src/block_cache.cpp:987</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> }
|
|
else if (m_last_cache_op == ghost_hit_lru1)
|
|
{
|
|
// when we insert new items or move things from L1 to L2
|
|
// evict blocks from L2
|
|
lru_list[1] = &m_lru[cached_piece_entry::read_lru2];
|
|
lru_list[2] = &m_lru[cached_piece_entry::read_lru1];
|
|
}
|
|
else
|
|
{
|
|
// when we get cache hits in L2 evict from L1
|
|
lru_list[1] = &m_lru[cached_piece_entry::read_lru1];
|
|
lru_list[2] = &m_lru[cached_piece_entry::read_lru2];
|
|
}
|
|
|
|
// end refers to which end of the ARC cache we're evicting
|
|
// from. The LFU or the LRU end
|
|
for (int end = 0; num > 0 && end < 3; ++end)
|
|
{
|
|
// iterate over all blocks in order of last being used (oldest first) and
|
|
<div style="background: #ffff00" width="100%"> for (list_iterator<cached_piece_entry> i = lru_list[end]->iterate(); i.get() && num > 0;)
|
|
</div> {
|
|
cached_piece_entry* pe = reinterpret_cast<cached_piece_entry*>(i.get());
|
|
TORRENT_PIECE_ASSERT(pe->in_use, pe);
|
|
i.next();
|
|
|
|
if (pe == ignore)
|
|
continue;
|
|
|
|
if (pe->ok_to_evict())
|
|
{
|
|
#ifdef TORRENT_DEBUG
|
|
for (int j = 0; j < pe->blocks_in_piece; ++j)
|
|
TORRENT_PIECE_ASSERT(pe->blocks[j].buf == 0, pe);
|
|
#endif
|
|
TORRENT_PIECE_ASSERT(pe->refcount == 0, pe);
|
|
move_to_ghost(pe);
|
|
continue;
|
|
}
|
|
|
|
TORRENT_PIECE_ASSERT(pe->num_dirty == 0, pe);
|
|
|
|
// all blocks are pinned in this piece, skip it
|
|
if (pe->num_blocks <= pe->pinned) continue;
|
|
|
|
// go through the blocks and evict the ones that are not dirty and not
|
|
// referenced
|
|
for (int j = 0; j < pe->blocks_in_piece && num > 0; ++j)
|
|
{
|
|
cached_block_entry& b = pe->blocks[j];
|
|
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(162)">../src/block_cache.cpp:1051</a></td><td>this should probably only be done every n:th time</td></tr><tr id="162" style="display: none;" colspan="3"><td colspan="3"><h2>this should probably only be done every n:th time</h2><h4>../src/block_cache.cpp:1051</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> }
|
|
|
|
if (pe->ok_to_evict())
|
|
{
|
|
#ifdef TORRENT_DEBUG
|
|
for (int j = 0; j < pe->blocks_in_piece; ++j)
|
|
TORRENT_PIECE_ASSERT(pe->blocks[j].buf == 0, pe);
|
|
#endif
|
|
move_to_ghost(pe);
|
|
}
|
|
}
|
|
}
|
|
|
|
// if we can't evict enough blocks from the read cache, also look at write
|
|
// cache pieces for blocks that have already been written to disk and can be
|
|
// evicted the first pass, we only evict blocks that have been hashed, the
|
|
// second pass we flush anything this is potentially a very expensive
|
|
// operation, since we're likely to have iterate every single block in the
|
|
// cache, and we might not get to evict anything.
|
|
|
|
<div style="background: #ffff00" width="100%"> if (num > 0 && m_read_cache_size > m_pinned_blocks)
|
|
</div> {
|
|
for (int pass = 0; pass < 2 && num > 0; ++pass)
|
|
{
|
|
for (list_iterator<cached_piece_entry> i = m_lru[cached_piece_entry::write_lru].iterate(); i.get() && num > 0;)
|
|
{
|
|
cached_piece_entry* pe = reinterpret_cast<cached_piece_entry*>(i.get());
|
|
TORRENT_PIECE_ASSERT(pe->in_use, pe);
|
|
|
|
i.next();
|
|
|
|
if (pe == ignore)
|
|
continue;
|
|
|
|
if (pe->ok_to_evict())
|
|
{
|
|
#ifdef TORRENT_DEBUG
|
|
for (int j = 0; j < pe->blocks_in_piece; ++j)
|
|
TORRENT_PIECE_ASSERT(pe->blocks[j].buf == 0, pe);
|
|
#endif
|
|
TORRENT_PIECE_ASSERT(pe->refcount == 0, pe);
|
|
erase_piece(pe);
|
|
continue;
|
|
}
|
|
|
|
// all blocks in this piece are dirty
|
|
if (pe->num_dirty == pe->num_blocks)
|
|
continue;
|
|
|
|
int end = pe->blocks_in_piece;
|
|
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(163)">../src/block_cache.cpp:1810</a></td><td>create a holder for refcounts that automatically decrement</td></tr><tr id="163" style="display: none;" colspan="3"><td colspan="3"><h2>create a holder for refcounts that automatically decrement</h2><h4>../src/block_cache.cpp:1810</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> }
|
|
|
|
j->buffer.disk_block = allocate_buffer("send buffer");
|
|
if (j->buffer.disk_block == 0) return -2;
|
|
|
|
while (size > 0)
|
|
{
|
|
TORRENT_PIECE_ASSERT(pe->blocks[block].buf, pe);
|
|
int to_copy = (std::min)(block_size()
|
|
- block_offset, size);
|
|
std::memcpy(j->buffer.disk_block + buffer_offset
|
|
, pe->blocks[block].buf + block_offset
|
|
, to_copy);
|
|
size -= to_copy;
|
|
block_offset = 0;
|
|
buffer_offset += to_copy;
|
|
++block;
|
|
}
|
|
// we incremented the refcount for both of these blocks.
|
|
// now decrement it.
|
|
<div style="background: #ffff00" width="100%"> dec_block_refcount(pe, start_block, ref_reading);
|
|
</div> if (blocks_to_read == 2) dec_block_refcount(pe, start_block + 1, ref_reading);
|
|
return j->d.io.buffer_size;
|
|
}
|
|
|
|
void block_cache::reclaim_block(block_cache_reference const& ref)
|
|
{
|
|
cached_piece_entry* pe = find_piece(ref);
|
|
TORRENT_ASSERT(pe);
|
|
if (pe == NULL) return;
|
|
|
|
TORRENT_PIECE_ASSERT(pe->in_use, pe);
|
|
|
|
TORRENT_PIECE_ASSERT(pe->blocks[ref.block].buf, pe);
|
|
dec_block_refcount(pe, ref.block, block_cache::ref_reading);
|
|
|
|
TORRENT_PIECE_ASSERT(m_send_buffer_blocks > 0, pe);
|
|
--m_send_buffer_blocks;
|
|
|
|
maybe_free_piece(pe);
|
|
}
|
|
|
|
bool block_cache::maybe_free_piece(cached_piece_entry* pe)
|
|
{
|
|
if (!pe->ok_to_evict()
|
|
|| !pe->marked_for_deletion
|
|
|| !pe->jobs.empty())
|
|
return false;
|
|
|
|
DLOG(stderr, "[%p] block_cache maybe_free_piece "
|
|
"piece: %d refcount: %d marked_for_deletion: %d\n"
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(164)">../src/bt_peer_connection.cpp:691</a></td><td>this could be optimized using knuth morris pratt</td></tr><tr id="164" style="display: none;" colspan="3"><td colspan="3"><h2>this could be optimized using knuth morris pratt</h2><h4>../src/bt_peer_connection.cpp:691</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> }
|
|
|
|
m_rc4->set_incoming_key(&remote_key[0], 20);
|
|
m_rc4->set_outgoing_key(&local_key[0], 20);
|
|
|
|
#ifndef TORRENT_DISABLE_LOGGING
|
|
peer_log(peer_log_alert::info, "ENCRYPTION", "computed RC4 keys");
|
|
#endif
|
|
}
|
|
|
|
int bt_peer_connection::get_syncoffset(char const* src, int src_size,
|
|
char const* target, int target_size) const
|
|
{
|
|
TORRENT_ASSERT(target_size >= src_size);
|
|
TORRENT_ASSERT(src_size > 0);
|
|
TORRENT_ASSERT(src);
|
|
TORRENT_ASSERT(target);
|
|
|
|
int traverse_limit = target_size - src_size;
|
|
|
|
<div style="background: #ffff00" width="100%"> for (int i = 0; i < traverse_limit; ++i)
|
|
</div> {
|
|
char const* target_ptr = target + i;
|
|
if (std::equal(src, src+src_size, target_ptr))
|
|
return i;
|
|
}
|
|
|
|
// Partial sync
|
|
// for (int i = 0; i < target_size; ++i)
|
|
// {
|
|
// // first is iterator in src[] at which mismatch occurs
|
|
// // second is iterator in target[] at which mismatch occurs
|
|
// std::pair<const char*, const char*> ret;
|
|
// int src_sync_size;
|
|
// if (i > traverse_limit) // partial sync test
|
|
// {
|
|
// ret = std::mismatch(src, src + src_size - (i - traverse_limit), &target[i]);
|
|
// src_sync_size = ret.first - src;
|
|
// if (src_sync_size == (src_size - (i - traverse_limit)))
|
|
// return i;
|
|
// }
|
|
// else // complete sync test
|
|
// {
|
|
// ret = std::mismatch(src, src + src_size, &target[i]);
|
|
// src_sync_size = ret.first - src;
|
|
// if (src_sync_size == src_size)
|
|
// return i;
|
|
// }
|
|
// }
|
|
|
|
// no complete sync
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(165)">../src/bt_peer_connection.cpp:2261</a></td><td>if we're finished, send upload_only message</td></tr><tr id="165" style="display: none;" colspan="3"><td colspan="3"><h2>if we're finished, send upload_only message</h2><h4>../src/bt_peer_connection.cpp:2261</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> }
|
|
peer_log(peer_log_alert::outgoing_message, "BITFIELD"
|
|
, "%s", bitfield_string.c_str());
|
|
#endif
|
|
m_sent_bitfield = true;
|
|
|
|
send_buffer(reinterpret_cast<char const*>(msg), packet_size);
|
|
|
|
stats_counters().inc_stats_counter(counters::num_outgoing_bitfield);
|
|
|
|
if (num_lazy_pieces > 0)
|
|
{
|
|
for (int i = 0; i < num_lazy_pieces; ++i)
|
|
{
|
|
#ifndef TORRENT_DISABLE_LOGGING
|
|
peer_log(peer_log_alert::outgoing_message, "HAVE"
|
|
, "piece: %d", lazy_pieces[i]);
|
|
#endif
|
|
write_have(lazy_pieces[i]);
|
|
}
|
|
<div style="background: #ffff00" width="100%"> }
|
|
</div>
|
|
if (m_supports_fast)
|
|
send_allowed_set();
|
|
}
|
|
|
|
#ifndef TORRENT_DISABLE_EXTENSIONS
|
|
void bt_peer_connection::write_extensions()
|
|
{
|
|
INVARIANT_CHECK;
|
|
|
|
TORRENT_ASSERT(m_supports_extensions);
|
|
TORRENT_ASSERT(m_sent_handshake);
|
|
|
|
entry handshake;
|
|
entry::dictionary_type& m = handshake["m"].dict();
|
|
|
|
// if we're using a proxy, our listen port won't be useful
|
|
// anyway.
|
|
if (!m_settings.get_bool(settings_pack::force_proxy) && is_outgoing())
|
|
handshake["p"] = m_ses.listen_port();
|
|
|
|
// only send the port in case we bade the connection
|
|
// on incoming connections the other end already knows
|
|
// our listen port
|
|
if (!m_settings.get_bool(settings_pack::anonymous_mode))
|
|
{
|
|
handshake["v"] = m_settings.get_str(settings_pack::handshake_client_version).empty()
|
|
? m_settings.get_str(settings_pack::user_agent)
|
|
: m_settings.get_str(settings_pack::handshake_client_version);
|
|
}
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(166)">../src/choker.cpp:336</a></td><td>optimize this using partial_sort or something. We don't need to sort the entire list</td></tr><tr id="166" style="display: none;" colspan="3"><td colspan="3"><h2>optimize this using partial_sort or something. We don't need
|
|
to sort the entire list</h2><h4>../src/choker.cpp:336</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> return upload_slots;
|
|
}
|
|
|
|
// ==== rate-based ====
|
|
//
|
|
// The rate based unchoker looks at our upload rate to peers, and find
|
|
// a balance between number of upload slots and the rate we achieve. The
|
|
// intention is to not spread upload bandwidth too thin, but also to not
|
|
// unchoke few enough peers to not be able to saturate the up-link.
|
|
// this is done by traversing the peers sorted by our upload rate to
|
|
// them in decreasing rates. For each peer we increase our threshold
|
|
// by 1 kB/s. The first peer we get to to whom we upload slower than
|
|
// the threshold, we stop and that's the number of unchoke slots we have.
|
|
if (sett.get_int(settings_pack::choking_algorithm)
|
|
== settings_pack::rate_based_choker)
|
|
{
|
|
// first reset the number of unchoke slots, because we'll calculate
|
|
// it purely based on the current state of our peers.
|
|
upload_slots = 0;
|
|
|
|
<div style="background: #ffff00" width="100%">
|
|
</div></pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(167)">../src/choker.cpp:339</a></td><td>make the comparison function a free function and move it into this cpp file</td></tr><tr id="167" style="display: none;" colspan="3"><td colspan="3"><h2>make the comparison function a free function and move it
|
|
into this cpp file</h2><h4>../src/choker.cpp:339</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> }
|
|
|
|
// ==== rate-based ====
|
|
//
|
|
// The rate based unchoker looks at our upload rate to peers, and find
|
|
// a balance between number of upload slots and the rate we achieve. The
|
|
// intention is to not spread upload bandwidth too thin, but also to not
|
|
// unchoke few enough peers to not be able to saturate the up-link.
|
|
// this is done by traversing the peers sorted by our upload rate to
|
|
// them in decreasing rates. For each peer we increase our threshold
|
|
// by 1 kB/s. The first peer we get to to whom we upload slower than
|
|
// the threshold, we stop and that's the number of unchoke slots we have.
|
|
if (sett.get_int(settings_pack::choking_algorithm)
|
|
== settings_pack::rate_based_choker)
|
|
{
|
|
// first reset the number of unchoke slots, because we'll calculate
|
|
// it purely based on the current state of our peers.
|
|
upload_slots = 0;
|
|
|
|
|
|
<div style="background: #ffff00" width="100%"> std::sort(peers.begin(), peers.end()
|
|
</div> , boost::bind(&upload_rate_compare, _1, _2));
|
|
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(168)">../src/choker.cpp:344</a></td><td>make configurable</td></tr><tr id="168" style="display: none;" colspan="3"><td colspan="3"><h2>make configurable</h2><h4>../src/choker.cpp:344</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> //
|
|
// The rate based unchoker looks at our upload rate to peers, and find
|
|
// a balance between number of upload slots and the rate we achieve. The
|
|
// intention is to not spread upload bandwidth too thin, but also to not
|
|
// unchoke few enough peers to not be able to saturate the up-link.
|
|
// this is done by traversing the peers sorted by our upload rate to
|
|
// them in decreasing rates. For each peer we increase our threshold
|
|
// by 1 kB/s. The first peer we get to to whom we upload slower than
|
|
// the threshold, we stop and that's the number of unchoke slots we have.
|
|
if (sett.get_int(settings_pack::choking_algorithm)
|
|
== settings_pack::rate_based_choker)
|
|
{
|
|
// first reset the number of unchoke slots, because we'll calculate
|
|
// it purely based on the current state of our peers.
|
|
upload_slots = 0;
|
|
|
|
|
|
std::sort(peers.begin(), peers.end()
|
|
, boost::bind(&upload_rate_compare, _1, _2));
|
|
|
|
<div style="background: #ffff00" width="100%"> int rate_threshold = 1024;
|
|
</div>
|
|
for (std::vector<peer_connection*>::const_iterator i = peers.begin()
|
|
, end(peers.end()); i != end; ++i)
|
|
{
|
|
peer_connection const& p = **i;
|
|
int rate = int(p.uploaded_in_last_round()
|
|
* 1000 / total_milliseconds(unchoke_interval));
|
|
|
|
if (rate < rate_threshold) break;
|
|
|
|
++upload_slots;
|
|
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(169)">../src/choker.cpp:358</a></td><td>make configurable</td></tr><tr id="169" style="display: none;" colspan="3"><td colspan="3"><h2>make configurable</h2><h4>../src/choker.cpp:358</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> // it purely based on the current state of our peers.
|
|
upload_slots = 0;
|
|
|
|
|
|
std::sort(peers.begin(), peers.end()
|
|
, boost::bind(&upload_rate_compare, _1, _2));
|
|
|
|
int rate_threshold = 1024;
|
|
|
|
for (std::vector<peer_connection*>::const_iterator i = peers.begin()
|
|
, end(peers.end()); i != end; ++i)
|
|
{
|
|
peer_connection const& p = **i;
|
|
int rate = int(p.uploaded_in_last_round()
|
|
* 1000 / total_milliseconds(unchoke_interval));
|
|
|
|
if (rate < rate_threshold) break;
|
|
|
|
++upload_slots;
|
|
|
|
<div style="background: #ffff00" width="100%"> rate_threshold += 1024;
|
|
</div> }
|
|
++upload_slots;
|
|
}
|
|
|
|
// sorts the peers that are eligible for unchoke by download rate and
|
|
// secondary by total upload. The reason for this is, if all torrents are
|
|
// being seeded, the download rate will be 0, and the peers we have sent
|
|
// the least to should be unchoked
|
|
|
|
// we use partial sort here, because we only care about the top
|
|
// upload_slots peers.
|
|
|
|
if (sett.get_int(settings_pack::seed_choking_algorithm)
|
|
== settings_pack::round_robin)
|
|
{
|
|
int pieces = sett.get_int(settings_pack::seeding_piece_quota);
|
|
|
|
std::partial_sort(peers.begin(), peers.begin()
|
|
+ (std::min)(upload_slots, int(peers.size())), peers.end()
|
|
, boost::bind(&unchoke_compare_rr, _1, _2, pieces));
|
|
}
|
|
else if (sett.get_int(settings_pack::seed_choking_algorithm)
|
|
== settings_pack::fastest_upload)
|
|
{
|
|
std::partial_sort(peers.begin(), peers.begin()
|
|
+ (std::min)(upload_slots, int(peers.size())), peers.end()
|
|
, boost::bind(&unchoke_compare_fastest_upload, _1, _2));
|
|
}
|
|
else if (sett.get_int(settings_pack::seed_choking_algorithm)
|
|
== settings_pack::anti_leech)
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(170)">../src/create_torrent.cpp:287</a></td><td>this should probably be optional</td></tr><tr id="170" style="display: none;" colspan="3"><td colspan="3"><h2>this should probably be optional</h2><h4>../src/create_torrent.cpp:287</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> counters cnt;
|
|
disk_io_thread disk_thread(ios, cnt, 0);
|
|
disk_thread.set_num_threads(1);
|
|
|
|
storage_params params;
|
|
params.files = &t.files();
|
|
params.mapped_files = NULL;
|
|
params.path = path;
|
|
params.pool = &disk_thread.files();
|
|
params.mode = storage_mode_sparse;
|
|
|
|
storage_interface* storage_impl = default_storage_constructor(params);
|
|
|
|
boost::shared_ptr<piece_manager> storage = boost::make_shared<piece_manager>(
|
|
storage_impl, dummy, const_cast<file_storage*>(&t.files()));
|
|
|
|
settings_pack sett;
|
|
sett.set_int(settings_pack::cache_size, 0);
|
|
sett.set_int(settings_pack::hashing_threads, 2);
|
|
|
|
<div style="background: #ffff00" width="100%"> alert_manager dummy2(0, 0);
|
|
</div> disk_thread.set_settings(&sett, dummy2);
|
|
|
|
int piece_counter = 0;
|
|
int completed_piece = 0;
|
|
int piece_read_ahead = 15 * 1024 * 1024 / t.piece_length();
|
|
if (piece_read_ahead < 1) piece_read_ahead = 1;
|
|
|
|
for (int i = 0; i < piece_read_ahead; ++i)
|
|
{
|
|
disk_thread.async_hash(storage.get(), i, disk_io_job::sequential_access
|
|
, boost::bind(&on_hash, _1, &t, storage, &disk_thread
|
|
, &piece_counter, &completed_piece, &f, &ec), NULL);
|
|
++piece_counter;
|
|
if (piece_counter >= t.num_pieces()) break;
|
|
}
|
|
disk_thread.submit_jobs();
|
|
ios.run(ec);
|
|
}
|
|
|
|
create_torrent::~create_torrent() {}
|
|
|
|
create_torrent::create_torrent(file_storage& fs, int piece_size
|
|
, int pad_file_limit, int flags, int alignment)
|
|
: m_files(fs)
|
|
, m_creation_date(time(0))
|
|
, m_multifile(fs.num_files() > 1)
|
|
, m_private(false)
|
|
, m_merkle_torrent((flags & merkle) != 0)
|
|
, m_include_mtime((flags & modification_time) != 0)
|
|
, m_include_symlinks((flags & symlinks) != 0)
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(171)">../src/disk_buffer_pool.cpp:322</a></td><td>perhaps we should sort the buffers here?</td></tr><tr id="171" style="display: none;" colspan="3"><td colspan="3"><h2>perhaps we should sort the buffers here?</h2><h4>../src/disk_buffer_pool.cpp:322</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> mutex::scoped_lock l(m_pool_mutex);
|
|
for (int i = 0; i < iov_len; ++i)
|
|
{
|
|
iov[i].iov_base = allocate_buffer_impl(l, "pending read");
|
|
iov[i].iov_len = block_size();
|
|
if (iov[i].iov_base == NULL)
|
|
{
|
|
// uh oh. We failed to allocate the buffer!
|
|
// we need to roll back and free all the buffers
|
|
// we've already allocated
|
|
for (int j = 0; j < i; ++j)
|
|
free_buffer_impl(static_cast<char*>(iov[j].iov_base), l);
|
|
return -1;
|
|
}
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
void disk_buffer_pool::free_iovec(file::iovec_t* iov, int iov_len)
|
|
{
|
|
<div style="background: #ffff00" width="100%"> mutex::scoped_lock l(m_pool_mutex);
|
|
</div> for (int i = 0; i < iov_len; ++i)
|
|
free_buffer_impl(static_cast<char*>(iov[i].iov_base), l);
|
|
check_buffer_level(l);
|
|
}
|
|
|
|
char* disk_buffer_pool::allocate_buffer_impl(mutex::scoped_lock& l
|
|
, char const*)
|
|
{
|
|
TORRENT_ASSERT(m_settings_set);
|
|
TORRENT_ASSERT(m_magic == 0x1337);
|
|
TORRENT_ASSERT(l.locked());
|
|
TORRENT_UNUSED(l);
|
|
|
|
char* ret;
|
|
#if TORRENT_HAVE_MMAP
|
|
if (m_cache_pool)
|
|
{
|
|
if (m_free_list.size() <= (m_max_use - m_low_watermark)
|
|
/ 2 && !m_exceeded_max_size)
|
|
{
|
|
m_exceeded_max_size = true;
|
|
m_trigger_cache_trim();
|
|
}
|
|
if (m_free_list.empty()) return 0;
|
|
boost::uint64_t slot_index = m_free_list.back();
|
|
m_free_list.pop_back();
|
|
ret = m_cache_pool + (slot_index * 0x4000);
|
|
TORRENT_ASSERT(is_disk_buffer(ret, l));
|
|
}
|
|
else
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(172)">../src/disk_io_thread.cpp:883</a></td><td>it would be nice to optimize this by having the cache pieces also ordered by</td></tr><tr id="172" style="display: none;" colspan="3"><td colspan="3"><h2>it would be nice to optimize this by having the cache
|
|
pieces also ordered by</h2><h4>../src/disk_io_thread.cpp:883</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> // from disk_io_thread::do_delete, which is a fence job and should
|
|
// have any other jobs active, i.e. there should not be any references
|
|
// keeping pieces or blocks alive
|
|
if ((flags & flush_delete_cache) && (flags & flush_expect_clear))
|
|
{
|
|
boost::unordered_set<cached_piece_entry*> const& storage_pieces = storage->cached_pieces();
|
|
for (boost::unordered_set<cached_piece_entry*>::const_iterator i = storage_pieces.begin()
|
|
, end(storage_pieces.end()); i != end; ++i)
|
|
{
|
|
cached_piece_entry* pe = m_disk_cache.find_piece(storage, (*i)->piece);
|
|
TORRENT_PIECE_ASSERT(pe->num_dirty == 0, pe);
|
|
}
|
|
}
|
|
#endif
|
|
}
|
|
else
|
|
{
|
|
std::pair<block_cache::iterator, block_cache::iterator> range = m_disk_cache.all_pieces();
|
|
while (range.first != range.second)
|
|
{
|
|
<div style="background: #ffff00" width="100%"> if ((flags & (flush_read_cache | flush_delete_cache)) == 0)
|
|
</div> {
|
|
// if we're not flushing the read cache, and not deleting the
|
|
// cache, skip pieces with no dirty blocks, i.e. read cache
|
|
// pieces
|
|
while (range.first->num_dirty == 0)
|
|
{
|
|
++range.first;
|
|
if (range.first == range.second) return;
|
|
}
|
|
}
|
|
cached_piece_entry* pe = const_cast<cached_piece_entry*>(&*range.first);
|
|
flush_piece(pe, flags, completed_jobs, l);
|
|
range = m_disk_cache.all_pieces();
|
|
}
|
|
}
|
|
}
|
|
|
|
// this is called if we're exceeding (or about to exceed) the cache
|
|
// size limit. This means we should not restrict ourselves to contiguous
|
|
// blocks of write cache line size, but try to flush all old blocks
|
|
// this is why we pass in 1 as cont_block to the flushing functions
|
|
void disk_io_thread::try_flush_write_blocks(int num, jobqueue_t& completed_jobs
|
|
, mutex::scoped_lock& l)
|
|
{
|
|
DLOG("try_flush_write_blocks: %d\n", num);
|
|
|
|
list_iterator<cached_piece_entry> range = m_disk_cache.write_lru_pieces();
|
|
std::vector<std::pair<piece_manager*, int> > pieces;
|
|
pieces.reserve(m_disk_cache.num_write_lru_pieces());
|
|
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(173)">../src/disk_io_thread.cpp:926</a></td><td>instead of doing a lookup each time through the loop, save cached_piece_entry pointers with piece_refcount incremented to pin them</td></tr><tr id="173" style="display: none;" colspan="3"><td colspan="3"><h2>instead of doing a lookup each time through the loop, save
|
|
cached_piece_entry pointers with piece_refcount incremented to pin them</h2><h4>../src/disk_io_thread.cpp:926</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> // this is why we pass in 1 as cont_block to the flushing functions
|
|
void disk_io_thread::try_flush_write_blocks(int num, jobqueue_t& completed_jobs
|
|
, mutex::scoped_lock& l)
|
|
{
|
|
DLOG("try_flush_write_blocks: %d\n", num);
|
|
|
|
list_iterator<cached_piece_entry> range = m_disk_cache.write_lru_pieces();
|
|
std::vector<std::pair<piece_manager*, int> > pieces;
|
|
pieces.reserve(m_disk_cache.num_write_lru_pieces());
|
|
|
|
for (list_iterator<cached_piece_entry> p = range; p.get() && num > 0; p.next())
|
|
{
|
|
cached_piece_entry* e = p.get();
|
|
if (e->num_dirty == 0) continue;
|
|
pieces.push_back(std::make_pair(e->storage.get(), int(e->piece)));
|
|
}
|
|
|
|
for (std::vector<std::pair<piece_manager*, int> >::iterator i = pieces.begin()
|
|
, end(pieces.end()); i != end; ++i)
|
|
{
|
|
<div style="background: #ffff00" width="100%"> cached_piece_entry* pe = m_disk_cache.find_piece(i->first, i->second);
|
|
</div> if (pe == NULL) continue;
|
|
|
|
// another thread may flush this piece while we're looping and
|
|
// evict it into a read piece and then also evict it to ghost
|
|
if (pe->cache_state != cached_piece_entry::write_lru) continue;
|
|
|
|
#if TORRENT_USE_ASSERTS
|
|
pe->piece_log.push_back(piece_log_t(piece_log_t::try_flush_write_blocks, -1));
|
|
#endif
|
|
++pe->piece_refcount;
|
|
kick_hasher(pe, l);
|
|
num -= try_flush_hashed(pe, 1, completed_jobs, l);
|
|
--pe->piece_refcount;
|
|
}
|
|
|
|
// when the write cache is under high pressure, it is likely
|
|
// counter productive to actually do this, since a piece may
|
|
// not have had its flush_hashed job run on it
|
|
// so only do it if no other thread is currently flushing
|
|
|
|
if (num == 0 || m_stats_counters[counters::num_writing_threads] > 0) return;
|
|
|
|
// if we still need to flush blocks, start over and flush
|
|
// everything in LRU order (degrade to lru cache eviction)
|
|
for (std::vector<std::pair<piece_manager*, int> >::iterator i = pieces.begin()
|
|
, end(pieces.end()); i != end; ++i)
|
|
{
|
|
cached_piece_entry* pe = m_disk_cache.find_piece(i->first, i->second);
|
|
if (pe == NULL) continue;
|
|
if (pe->num_dirty == 0) continue;
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(174)">../src/disk_io_thread.cpp:1105</a></td><td>instead of doing this. pass in the settings to each storage_interface call. Each disk thread could hold its most recent understanding of the settings in a shared_ptr, and update it every time it wakes up from a job. That way each access to the settings won't require a mutex to be held.</td></tr><tr id="174" style="display: none;" colspan="3"><td colspan="3"><h2>instead of doing this. pass in the settings to each storage_interface
|
|
call. Each disk thread could hold its most recent understanding of the settings
|
|
in a shared_ptr, and update it every time it wakes up from a job. That way
|
|
each access to the settings won't require a mutex to be held.</h2><h4>../src/disk_io_thread.cpp:1105</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> {
|
|
INVARIANT_CHECK;
|
|
TORRENT_ASSERT(j->next == 0);
|
|
TORRENT_ASSERT((j->flags & disk_io_job::in_progress) || !j->storage);
|
|
|
|
mutex::scoped_lock l(m_cache_mutex);
|
|
|
|
check_cache_level(l, completed_jobs);
|
|
|
|
DLOG("perform_job job: %s ( %s%s) piece: %d offset: %d outstanding: %d\n"
|
|
, job_action_name[j->action]
|
|
, (j->flags & disk_io_job::fence) ? "fence ": ""
|
|
, (j->flags & disk_io_job::force_copy) ? "force_copy ": ""
|
|
, j->piece, j->d.io.offset
|
|
, j->storage ? j->storage->num_outstanding_jobs() : -1);
|
|
|
|
l.unlock();
|
|
|
|
boost::shared_ptr<piece_manager> storage = j->storage;
|
|
|
|
<div style="background: #ffff00" width="100%"> if (storage && storage->get_storage_impl()->m_settings == 0)
|
|
</div> storage->get_storage_impl()->m_settings = &m_settings;
|
|
|
|
TORRENT_ASSERT(j->action < sizeof(job_functions)/sizeof(job_functions[0]));
|
|
|
|
time_point start_time = clock_type::now();
|
|
|
|
m_stats_counters.inc_stats_counter(counters::num_running_disk_jobs, 1);
|
|
|
|
// call disk function
|
|
int ret = (this->*(job_functions[j->action]))(j, completed_jobs);
|
|
|
|
// note that -2 erros are OK
|
|
TORRENT_ASSERT(ret != -1 || (j->error.ec && j->error.operation != 0));
|
|
|
|
m_stats_counters.inc_stats_counter(counters::num_running_disk_jobs, -1);
|
|
|
|
if (ret == retry_job)
|
|
{
|
|
mutex::scoped_lock l2(m_job_mutex);
|
|
// to avoid busy looping here, give up
|
|
// our quanta in case there aren't any other
|
|
// jobs to run in between
|
|
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(175)">../src/disk_io_thread.cpp:1133</a></td><td>a potentially more efficient solution would be to have a special queue for retry jobs, that's only ever run when a job completes, in any thread. It would only work if counters::num_running_disk_jobs > 0</td></tr><tr id="175" style="display: none;" colspan="3"><td colspan="3"><h2>a potentially more efficient solution would be to have a special
|
|
queue for retry jobs, that's only ever run when a job completes, in
|
|
any thread. It would only work if counters::num_running_disk_jobs > 0</h2><h4>../src/disk_io_thread.cpp:1133</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;">
|
|
time_point start_time = clock_type::now();
|
|
|
|
m_stats_counters.inc_stats_counter(counters::num_running_disk_jobs, 1);
|
|
|
|
// call disk function
|
|
int ret = (this->*(job_functions[j->action]))(j, completed_jobs);
|
|
|
|
// note that -2 erros are OK
|
|
TORRENT_ASSERT(ret != -1 || (j->error.ec && j->error.operation != 0));
|
|
|
|
m_stats_counters.inc_stats_counter(counters::num_running_disk_jobs, -1);
|
|
|
|
if (ret == retry_job)
|
|
{
|
|
mutex::scoped_lock l2(m_job_mutex);
|
|
// to avoid busy looping here, give up
|
|
// our quanta in case there aren't any other
|
|
// jobs to run in between
|
|
|
|
<div style="background: #ffff00" width="100%">
|
|
</div> TORRENT_ASSERT((j->flags & disk_io_job::in_progress) || !j->storage);
|
|
|
|
bool need_sleep = m_queued_jobs.empty();
|
|
m_queued_jobs.push_back(j);
|
|
l2.unlock();
|
|
if (need_sleep) sleep(0);
|
|
return;
|
|
}
|
|
|
|
#if TORRENT_USE_ASSERTS
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(176)">../src/disk_io_thread.cpp:1147</a></td><td>it should clear the hash state even when there's an error, right?</td></tr><tr id="176" style="display: none;" colspan="3"><td colspan="3"><h2>it should clear the hash state even when there's an error, right?</h2><h4>../src/disk_io_thread.cpp:1147</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> m_stats_counters.inc_stats_counter(counters::num_running_disk_jobs, -1);
|
|
|
|
if (ret == retry_job)
|
|
{
|
|
mutex::scoped_lock l2(m_job_mutex);
|
|
// to avoid busy looping here, give up
|
|
// our quanta in case there aren't any other
|
|
// jobs to run in between
|
|
|
|
|
|
TORRENT_ASSERT((j->flags & disk_io_job::in_progress) || !j->storage);
|
|
|
|
bool need_sleep = m_queued_jobs.empty();
|
|
m_queued_jobs.push_back(j);
|
|
l2.unlock();
|
|
if (need_sleep) sleep(0);
|
|
return;
|
|
}
|
|
|
|
#if TORRENT_USE_ASSERTS
|
|
<div style="background: #ffff00" width="100%"> if (j->action == disk_io_job::hash && !j->error.ec)
|
|
</div> {
|
|
// a hash job should never return without clearing pe->hash
|
|
l.lock();
|
|
cached_piece_entry* pe = m_disk_cache.find_piece(j);
|
|
if (pe != NULL)
|
|
{
|
|
TORRENT_PIECE_ASSERT(pe->hash == NULL, pe);
|
|
}
|
|
l.unlock();
|
|
}
|
|
#endif
|
|
|
|
if (ret == defer_handler) return;
|
|
|
|
j->ret = ret;
|
|
|
|
time_point now = clock_type::now();
|
|
m_job_time.add_sample(total_microseconds(now - start_time));
|
|
completed_jobs.push_back(j);
|
|
}
|
|
|
|
int disk_io_thread::do_uncached_read(disk_io_job* j)
|
|
{
|
|
j->buffer.disk_block = m_disk_cache.allocate_buffer("send buffer");
|
|
if (j->buffer.disk_block == 0)
|
|
{
|
|
j->error.ec = error::no_memory;
|
|
j->error.operation = storage_error::alloc_cache_piece;
|
|
return -1;
|
|
}
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(177)">../src/disk_io_thread.cpp:1848</a></td><td>maybe the tailqueue_iterator<disk_io_job> should contain a pointer-pointer instead and have an unlink function</td></tr><tr id="177" style="display: none;" colspan="3"><td colspan="3"><h2>maybe the tailqueue_iterator<disk_io_job> should contain a pointer-pointer
|
|
instead and have an unlink function</h2><h4>../src/disk_io_thread.cpp:1848</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> j->callback = handler;
|
|
|
|
add_fence_job(storage, j);
|
|
}
|
|
|
|
void disk_io_thread::async_delete_files(piece_manager* storage
|
|
, boost::function<void(disk_io_job const*)> const& handler)
|
|
{
|
|
#ifdef TORRENT_DEBUG
|
|
// the caller must increment the torrent refcount before
|
|
// issuing an async disk request
|
|
storage->assert_torrent_refcount();
|
|
#endif
|
|
|
|
// remove cache blocks belonging to this torrent
|
|
jobqueue_t completed_jobs;
|
|
|
|
// remove outstanding jobs belonging to this torrent
|
|
mutex::scoped_lock l2(m_job_mutex);
|
|
|
|
<div style="background: #ffff00" width="100%"> disk_io_job* qj = m_queued_jobs.get_all();
|
|
</div> jobqueue_t to_abort;
|
|
|
|
while (qj)
|
|
{
|
|
disk_io_job* next = qj->next;
|
|
#if TORRENT_USE_ASSERTS
|
|
qj->next = NULL;
|
|
#endif
|
|
if (qj->storage.get() == storage)
|
|
to_abort.push_back(qj);
|
|
else
|
|
m_queued_jobs.push_back(qj);
|
|
qj = next;
|
|
}
|
|
l2.unlock();
|
|
|
|
mutex::scoped_lock l(m_cache_mutex);
|
|
flush_cache(storage, flush_delete_cache, completed_jobs, l);
|
|
l.unlock();
|
|
|
|
disk_io_job* j = allocate_job(disk_io_job::delete_files);
|
|
j->storage = storage->shared_from_this();
|
|
j->callback = handler;
|
|
add_fence_job(storage, j);
|
|
|
|
fail_jobs_impl(storage_error(boost::asio::error::operation_aborted)
|
|
, to_abort, completed_jobs);
|
|
|
|
if (completed_jobs.size())
|
|
add_completed_jobs(completed_jobs);
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(178)">../src/disk_io_thread.cpp:2113</a></td><td>this is potentially very expensive. One way to solve it would be to have a fence for just this one piece.</td></tr><tr id="178" style="display: none;" colspan="3"><td colspan="3"><h2>this is potentially very expensive. One way to solve
|
|
it would be to have a fence for just this one piece.</h2><h4>../src/disk_io_thread.cpp:2113</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> }
|
|
|
|
void disk_io_thread::async_clear_piece(piece_manager* storage, int index
|
|
, boost::function<void(disk_io_job const*)> const& handler)
|
|
{
|
|
#ifdef TORRENT_DEBUG
|
|
// the caller must increment the torrent refcount before
|
|
// issuing an async disk request
|
|
storage->assert_torrent_refcount();
|
|
#endif
|
|
|
|
disk_io_job* j = allocate_job(disk_io_job::clear_piece);
|
|
j->storage = storage->shared_from_this();
|
|
j->piece = index;
|
|
j->callback = handler;
|
|
|
|
// regular jobs are not guaranteed to be executed in-order
|
|
// since clear piece must guarantee that all write jobs that
|
|
// have been issued finish before the clear piece job completes
|
|
|
|
<div style="background: #ffff00" width="100%"> add_fence_job(storage, j);
|
|
</div> }
|
|
|
|
void disk_io_thread::clear_piece(piece_manager* storage, int index)
|
|
{
|
|
mutex::scoped_lock l(m_cache_mutex);
|
|
|
|
cached_piece_entry* pe = m_disk_cache.find_piece(storage, index);
|
|
if (pe == 0) return;
|
|
TORRENT_PIECE_ASSERT(pe->hashing == false, pe);
|
|
pe->hashing_done = 0;
|
|
delete pe->hash;
|
|
pe->hash = NULL;
|
|
|
|
// evict_piece returns true if the piece was in fact
|
|
// evicted. A piece may fail to be evicted if there
|
|
// are still outstanding operations on it, which should
|
|
// never be the case when this function is used
|
|
// in fact, no jobs should really be hung on this piece
|
|
// at this point
|
|
jobqueue_t jobs;
|
|
bool ok = m_disk_cache.evict_piece(pe, jobs);
|
|
TORRENT_PIECE_ASSERT(ok, pe);
|
|
TORRENT_UNUSED(ok);
|
|
fail_jobs(storage_error(boost::asio::error::operation_aborted), jobs);
|
|
}
|
|
|
|
void disk_io_thread::kick_hasher(cached_piece_entry* pe, mutex::scoped_lock& l)
|
|
{
|
|
if (!pe->hash) return;
|
|
if (pe->hashing) return;
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(179)">../src/disk_io_thread.cpp:2375</a></td><td>we should probably just hang the job on the piece and make sure the hasher gets kicked</td></tr><tr id="179" style="display: none;" colspan="3"><td colspan="3"><h2>we should probably just hang the job on the piece and make sure the hasher gets kicked</h2><h4>../src/disk_io_thread.cpp:2375</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> if (pe == NULL)
|
|
{
|
|
int cache_state = (j->flags & disk_io_job::volatile_read)
|
|
? cached_piece_entry::volatile_read_lru
|
|
: cached_piece_entry::read_lru1;
|
|
pe = m_disk_cache.allocate_piece(j, cache_state);
|
|
}
|
|
if (pe == NULL)
|
|
{
|
|
j->error.ec = error::no_memory;
|
|
j->error.operation = storage_error::alloc_cache_piece;
|
|
return -1;
|
|
}
|
|
|
|
if (pe->hashing)
|
|
{
|
|
TORRENT_PIECE_ASSERT(pe->hash, pe);
|
|
// another thread is hashing this piece right now
|
|
// try again in a little bit
|
|
DLOG("do_hash: retry\n");
|
|
<div style="background: #ffff00" width="100%"> return retry_job;
|
|
</div> }
|
|
|
|
pe->hashing = 1;
|
|
|
|
TORRENT_PIECE_ASSERT(pe->cache_state <= cached_piece_entry::read_lru1
|
|
|| pe->cache_state == cached_piece_entry::read_lru2, pe);
|
|
++pe->piece_refcount;
|
|
|
|
if (pe->hash == NULL)
|
|
{
|
|
pe->hashing_done = 0;
|
|
pe->hash = new partial_hash;
|
|
}
|
|
partial_hash* ph = pe->hash;
|
|
|
|
int block_size = m_disk_cache.block_size();
|
|
int blocks_in_piece = (piece_size + block_size - 1) / block_size;
|
|
|
|
// keep track of which blocks we have locked by incrementing
|
|
// their refcounts. This is used to decrement only these blocks
|
|
// later.
|
|
int* locked_blocks = TORRENT_ALLOCA(int, blocks_in_piece);
|
|
memset(locked_blocks, 0, blocks_in_piece * sizeof(int));
|
|
int num_locked_blocks = 0;
|
|
|
|
// increment the refcounts of all
|
|
// blocks up front, and then hash them without holding the lock
|
|
TORRENT_PIECE_ASSERT(ph->offset % block_size == 0, pe);
|
|
for (int i = ph->offset / block_size; i < blocks_in_piece; ++i)
|
|
{
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(180)">../src/disk_io_thread.cpp:2442</a></td><td>introduce a holder class that automatically increments and decrements the piece_refcount</td></tr><tr id="180" style="display: none;" colspan="3"><td colspan="3"><h2>introduce a holder class that automatically increments
|
|
and decrements the piece_refcount</h2><h4>../src/disk_io_thread.cpp:2442</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> {
|
|
file::iovec_t iov;
|
|
iov.iov_len = (std::min)(block_size, piece_size - ph->offset);
|
|
|
|
if (next_locked_block < num_locked_blocks
|
|
&& locked_blocks[next_locked_block] == i)
|
|
{
|
|
++next_locked_block;
|
|
TORRENT_PIECE_ASSERT(pe->blocks[i].buf, pe);
|
|
TORRENT_PIECE_ASSERT(ph->offset == i * block_size, pe);
|
|
ph->offset += iov.iov_len;
|
|
ph->h.update(pe->blocks[i].buf, iov.iov_len);
|
|
}
|
|
else
|
|
{
|
|
iov.iov_base = m_disk_cache.allocate_buffer("hashing");
|
|
|
|
if (iov.iov_base == NULL)
|
|
{
|
|
l.lock();
|
|
<div style="background: #ffff00" width="100%">
|
|
</div> // decrement the refcounts of the blocks we just hashed
|
|
for (int k = 0; k < num_locked_blocks; ++k)
|
|
m_disk_cache.dec_block_refcount(pe, locked_blocks[k], block_cache::ref_hashing);
|
|
|
|
--pe->piece_refcount;
|
|
pe->hashing = false;
|
|
delete pe->hash;
|
|
pe->hash = NULL;
|
|
|
|
m_disk_cache.maybe_free_piece(pe);
|
|
|
|
j->error.ec = errors::no_memory;
|
|
j->error.operation = storage_error::alloc_cache_piece;
|
|
return -1;
|
|
}
|
|
|
|
DLOG("do_hash: reading (piece: %d block: %d)\n", int(pe->piece), i);
|
|
|
|
time_point start_time = clock_type::now();
|
|
|
|
TORRENT_PIECE_ASSERT(ph->offset == i * block_size, pe);
|
|
ret = j->storage->get_storage_impl()->readv(&iov, 1, j->piece
|
|
, ph->offset, file_flags, j->error);
|
|
|
|
if (ret < 0)
|
|
{
|
|
TORRENT_ASSERT(j->error.ec && j->error.operation != 0);
|
|
m_disk_cache.free_buffer(static_cast<char*>(iov.iov_base));
|
|
l.lock();
|
|
break;
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(181)">../src/disk_io_thread.cpp:2692</a></td><td>it would be nice to not have to lock the mutex every turn through this loop</td></tr><tr id="181" style="display: none;" colspan="3"><td colspan="3"><h2>it would be nice to not have to lock the mutex every
|
|
turn through this loop</h2><h4>../src/disk_io_thread.cpp:2692</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> j->error.ec = error::no_memory;
|
|
j->error.operation = storage_error::alloc_cache_piece;
|
|
return -1;
|
|
}
|
|
|
|
#if TORRENT_USE_ASSERTS
|
|
pe->piece_log.push_back(piece_log_t(j->action));
|
|
#endif
|
|
TORRENT_PIECE_ASSERT(pe->cache_state <= cached_piece_entry::read_lru1
|
|
|| pe->cache_state == cached_piece_entry::read_lru2, pe);
|
|
++pe->piece_refcount;
|
|
|
|
int block_size = m_disk_cache.block_size();
|
|
int piece_size = j->storage->files()->piece_size(j->piece);
|
|
int blocks_in_piece = (piece_size + block_size - 1) / block_size;
|
|
|
|
file::iovec_t iov;
|
|
int ret = 0;
|
|
int offset = 0;
|
|
|
|
<div style="background: #ffff00" width="100%"> for (int i = 0; i < blocks_in_piece; ++i)
|
|
</div> {
|
|
iov.iov_len = (std::min)(block_size, piece_size - offset);
|
|
|
|
// is the block already in the cache?
|
|
if (pe->blocks[i].buf) continue;
|
|
l.unlock();
|
|
|
|
iov.iov_base = m_disk_cache.allocate_buffer("read cache");
|
|
|
|
if (iov.iov_base == NULL)
|
|
{
|
|
//#error introduce a holder class that automatically increments and decrements the piece_refcount
|
|
--pe->piece_refcount;
|
|
m_disk_cache.maybe_free_piece(pe);
|
|
j->error.ec = errors::no_memory;
|
|
j->error.operation = storage_error::alloc_cache_piece;
|
|
return -1;
|
|
}
|
|
|
|
DLOG("do_cache_piece: reading (piece: %d block: %d)\n"
|
|
, int(pe->piece), i);
|
|
|
|
time_point start_time = clock_type::now();
|
|
|
|
ret = j->storage->get_storage_impl()->readv(&iov, 1, j->piece
|
|
, offset, file_flags, j->error);
|
|
|
|
if (ret < 0)
|
|
{
|
|
l.lock();
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(182)">../src/file_progress.cpp:137</a></td><td>it would be nice to not depend on alert_manager here</td></tr><tr id="182" style="display: none;" colspan="3"><td colspan="3"><h2>it would be nice to not depend on alert_manager here</h2><h4>../src/file_progress.cpp:137</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> {
|
|
if (m_file_progress.empty())
|
|
return;
|
|
|
|
const int piece_size = fs.piece_length();
|
|
boost::int64_t off = boost::int64_t(index) * piece_size;
|
|
int file_index = fs.file_index_at_offset(off);
|
|
int size = fs.piece_size(index);
|
|
for (; size > 0; ++file_index)
|
|
{
|
|
boost::int64_t file_offset = off - fs.file_offset(file_index);
|
|
TORRENT_ASSERT(file_index != fs.num_files());
|
|
TORRENT_ASSERT(file_offset <= fs.file_size(file_index));
|
|
int add = (std::min)(fs.file_size(file_index)
|
|
- file_offset, boost::int64_t(size));
|
|
m_file_progress[file_index] += add;
|
|
|
|
TORRENT_ASSERT(m_file_progress[file_index]
|
|
<= fs.file_size(file_index));
|
|
|
|
<div style="background: #ffff00" width="100%"> if (m_file_progress[file_index] >= fs.file_size(file_index) && alerts)
|
|
</div> {
|
|
if (!fs.pad_file_at(file_index))
|
|
{
|
|
if (alerts->should_post<file_completed_alert>())
|
|
{
|
|
// this file just completed, post alert
|
|
alerts->emplace_alert<file_completed_alert>(h, file_index);
|
|
}
|
|
}
|
|
}
|
|
size -= add;
|
|
off += add;
|
|
TORRENT_ASSERT(size >= 0);
|
|
}
|
|
}
|
|
|
|
#if TORRENT_USE_INVARIANT_CHECKS
|
|
void file_progress::check_invariant(file_storage const& fs) const
|
|
{
|
|
if (!m_file_progress.empty())
|
|
{
|
|
for (std::vector<boost::uint64_t>::const_iterator i = m_file_progress.begin()
|
|
, end(m_file_progress.end()); i != end; ++i)
|
|
{
|
|
int index = i - m_file_progress.begin();
|
|
TORRENT_ASSERT(*i <= fs.file_size(index));
|
|
}
|
|
}
|
|
}
|
|
#endif
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(183)">../src/http_tracker_connection.cpp:186</a></td><td>support this somehow</td></tr><tr id="183" style="display: none;" colspan="3"><td colspan="3"><h2>support this somehow</h2><h4>../src/http_tracker_connection.cpp:186</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> {
|
|
if (tracker_req().i2pconn->local_endpoint().empty())
|
|
{
|
|
fail(error_code(errors::no_i2p_endpoint), -1, "Waiting for i2p acceptor from SAM bridge", 5);
|
|
return;
|
|
}
|
|
else
|
|
{
|
|
url += "&ip=" + tracker_req ().i2pconn->local_endpoint () + ".i2p";
|
|
}
|
|
}
|
|
else
|
|
#endif
|
|
if (!settings.get_bool(settings_pack::anonymous_mode))
|
|
{
|
|
std::string announce_ip = settings.get_str(settings_pack::announce_ip);
|
|
if (!announce_ip.empty())
|
|
{
|
|
url += "&ip=" + escape_string(announce_ip.c_str(), announce_ip.size());
|
|
}
|
|
<div style="background: #ffff00" width="100%">/* else if (settings.get_bool(settings_pack::announce_double_nat)
|
|
</div> && is_local(m_ses.listen_address()))
|
|
{
|
|
// only use the global external listen address here
|
|
// if it turned out to be on a local network
|
|
// since otherwise the tracker should use our
|
|
// source IP to determine our origin
|
|
url += "&ip=" + print_address(m_ses.listen_address());
|
|
}
|
|
*/
|
|
}
|
|
}
|
|
|
|
m_tracker_connection.reset(new http_connection(get_io_service(), m_man.host_resolver()
|
|
, boost::bind(&http_tracker_connection::on_response, shared_from_this(), _1, _2, _3, _4)
|
|
, true, settings.get_int(settings_pack::max_http_recv_buffer_size)
|
|
, boost::bind(&http_tracker_connection::on_connect, shared_from_this(), _1)
|
|
, boost::bind(&http_tracker_connection::on_filter, shared_from_this(), _1, _2)
|
|
#ifdef TORRENT_USE_OPENSSL
|
|
, tracker_req().ssl_ctx
|
|
#endif
|
|
));
|
|
|
|
int timeout = tracker_req().event==tracker_request::stopped
|
|
?settings.get_int(settings_pack::stop_tracker_timeout)
|
|
:settings.get_int(settings_pack::tracker_completion_timeout);
|
|
|
|
// when sending stopped requests, prefer the cached DNS entry
|
|
// to avoid being blocked for slow or failing responses. Chances
|
|
// are that we're shutting down, and this should be a best-effort
|
|
// attempt. It's not worth stalling shutdown.
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(184)">../src/metadata_transfer.cpp:365</a></td><td>this is not safe. The torrent could be unloaded while we're still sending the metadata</td></tr><tr id="184" style="display: none;" colspan="3"><td colspan="3"><h2>this is not safe. The torrent could be unloaded while
|
|
we're still sending the metadata</h2><h4>../src/metadata_transfer.cpp:365</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> = req_to_offset(req, int(m_tp.metadata().left()));
|
|
|
|
char msg[15];
|
|
char* ptr = msg;
|
|
|
|
#ifndef TORRENT_DISABLE_LOGGING
|
|
m_pc.peer_log(peer_log_alert::outgoing_message, "METADATA"
|
|
, "start: %d total_size: %d offset: %d data_size: %d"
|
|
, req.first, req.second, offset.first, offset.second);
|
|
#endif
|
|
// yes, we have metadata, send it
|
|
detail::write_uint32(11 + offset.second, ptr);
|
|
detail::write_uint8(bt_peer_connection::msg_extended, ptr);
|
|
detail::write_uint8(m_message_index, ptr);
|
|
// means 'data packet'
|
|
detail::write_uint8(1, ptr);
|
|
detail::write_uint32(int(m_tp.metadata().left()), ptr);
|
|
detail::write_uint32(offset.first, ptr);
|
|
m_pc.send_buffer(msg, sizeof(msg));
|
|
|
|
<div style="background: #ffff00" width="100%"> char const* metadata = m_tp.metadata().begin;
|
|
</div> m_pc.append_const_send_buffer(metadata + offset.first, offset.second);
|
|
}
|
|
else
|
|
{
|
|
#ifndef TORRENT_DISABLE_LOGGING
|
|
m_pc.peer_log(peer_log_alert::outgoing_message, "METADATA"
|
|
, "don't have metadata");
|
|
#endif
|
|
char msg[4+3];
|
|
char* ptr = msg;
|
|
|
|
// we don't have the metadata, reply with
|
|
// don't have-message
|
|
detail::write_uint32(1 + 2, ptr);
|
|
detail::write_uint8(bt_peer_connection::msg_extended, ptr);
|
|
detail::write_uint8(m_message_index, ptr);
|
|
// means 'have no data'
|
|
detail::write_uint8(2, ptr);
|
|
m_pc.send_buffer(msg, sizeof(msg));
|
|
}
|
|
m_pc.setup_send();
|
|
}
|
|
|
|
virtual bool on_extended(int length
|
|
, int msg, buffer::const_interval body) TORRENT_OVERRIDE
|
|
{
|
|
if (msg != 14) return false;
|
|
if (m_message_index == 0) return false;
|
|
|
|
if (length > 500 * 1024)
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(185)">../src/packet_buffer.cpp:180</a></td><td>use compare_less_wrap for this comparison as well</td></tr><tr id="185" style="display: none;" colspan="3"><td colspan="3"><h2>use compare_less_wrap for this comparison as well</h2><h4>../src/packet_buffer.cpp:180</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> void** new_storage = static_cast<void**>(malloc(sizeof(void*) * new_size));
|
|
#ifndef BOOST_NO_EXCEPTIONS
|
|
if (new_storage == NULL) throw std::bad_alloc();
|
|
#endif
|
|
|
|
for (index_type i = 0; i < new_size; ++i)
|
|
new_storage[i] = 0;
|
|
|
|
for (index_type i = m_first; i < (m_first + m_capacity); ++i)
|
|
new_storage[i & (new_size - 1)] = m_storage[i & (m_capacity - 1)];
|
|
|
|
free(m_storage);
|
|
|
|
m_storage = new_storage;
|
|
m_capacity = new_size;
|
|
}
|
|
|
|
void* packet_buffer_impl::remove(index_type idx)
|
|
{
|
|
INVARIANT_CHECK;
|
|
<div style="background: #ffff00" width="100%"> if (idx >= m_first + m_capacity)
|
|
</div> return 0;
|
|
|
|
if (compare_less_wrap(idx, m_first, 0xffff))
|
|
return 0;
|
|
|
|
const int mask = (m_capacity - 1);
|
|
void* old_value = m_storage[idx & mask];
|
|
m_storage[idx & mask] = 0;
|
|
|
|
if (old_value)
|
|
{
|
|
--m_size;
|
|
if (m_size == 0) m_last = m_first;
|
|
}
|
|
|
|
if (idx == m_first && m_size != 0)
|
|
{
|
|
++m_first;
|
|
for (boost::uint32_t i = 0; i < m_capacity; ++i, ++m_first)
|
|
if (m_storage[m_first & mask]) break;
|
|
m_first &= 0xffff;
|
|
}
|
|
|
|
if (((idx + 1) & 0xffff) == m_last && m_size != 0)
|
|
{
|
|
--m_last;
|
|
for (boost::uint32_t i = 0; i < m_capacity; ++i, --m_last)
|
|
if (m_storage[m_last & mask]) break;
|
|
++m_last;
|
|
m_last &= 0xffff;
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(186)">../src/part_file.cpp:252</a></td><td>what do we do if someone is currently reading from the disk from this piece? does it matter? Since we won't actively erase the data from disk, but it may be overwritten soon, it's probably not that big of a deal</td></tr><tr id="186" style="display: none;" colspan="3"><td colspan="3"><h2>what do we do if someone is currently reading from the disk
|
|
from this piece? does it matter? Since we won't actively erase the
|
|
data from disk, but it may be overwritten soon, it's probably not that
|
|
big of a deal</h2><h4>../src/part_file.cpp:252</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> if (((mode & file::rw_mask) != file::read_only)
|
|
&& ec == boost::system::errc::no_such_file_or_directory)
|
|
{
|
|
// this means the directory the file is in doesn't exist.
|
|
// so create it
|
|
ec.clear();
|
|
create_directories(m_path, ec);
|
|
|
|
if (ec) return;
|
|
m_file.open(fn, mode, ec);
|
|
}
|
|
}
|
|
|
|
void part_file::free_piece(int piece)
|
|
{
|
|
mutex::scoped_lock l(m_mutex);
|
|
|
|
boost::unordered_map<int, int>::iterator i = m_piece_map.find(piece);
|
|
if (i == m_piece_map.end()) return;
|
|
|
|
<div style="background: #ffff00" width="100%">
|
|
</div> m_free_slots.push_back(i->second);
|
|
m_piece_map.erase(i);
|
|
m_dirty_metadata = true;
|
|
}
|
|
|
|
void part_file::move_partfile(std::string const& path, error_code& ec)
|
|
{
|
|
mutex::scoped_lock l(m_mutex);
|
|
|
|
flush_metadata_impl(ec);
|
|
if (ec) return;
|
|
|
|
m_file.close();
|
|
|
|
if (!m_piece_map.empty())
|
|
{
|
|
std::string old_path = combine_path(m_path, m_name);
|
|
std::string new_path = combine_path(path, m_name);
|
|
|
|
rename(old_path, new_path, ec);
|
|
if (ec == boost::system::errc::no_such_file_or_directory)
|
|
ec.clear();
|
|
|
|
if (ec)
|
|
{
|
|
copy_file(old_path, new_path, ec);
|
|
if (ec) return;
|
|
remove(old_path, ec);
|
|
}
|
|
}
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(187)">../src/part_file.cpp:353</a></td><td>instead of rebuilding the whole file header and flushing it, update the slot entries as we go</td></tr><tr id="187" style="display: none;" colspan="3"><td colspan="3"><h2>instead of rebuilding the whole file header
|
|
and flushing it, update the slot entries as we go</h2><h4>../src/part_file.cpp:353</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> if (block_to_copy == m_piece_size)
|
|
{
|
|
m_free_slots.push_back(i->second);
|
|
m_piece_map.erase(i);
|
|
m_dirty_metadata = true;
|
|
}
|
|
}
|
|
file_offset += block_to_copy;
|
|
piece_offset = 0;
|
|
size -= block_to_copy;
|
|
}
|
|
}
|
|
|
|
void part_file::flush_metadata(error_code& ec)
|
|
{
|
|
mutex::scoped_lock l(m_mutex);
|
|
|
|
flush_metadata_impl(ec);
|
|
}
|
|
|
|
<div style="background: #ffff00" width="100%"> void part_file::flush_metadata_impl(error_code& ec)
|
|
</div> {
|
|
// do we need to flush the metadata?
|
|
if (m_dirty_metadata == false) return;
|
|
|
|
if (m_piece_map.empty())
|
|
{
|
|
m_file.close();
|
|
|
|
// if we don't have any pieces left in the
|
|
// part file, remove it
|
|
std::string p = combine_path(m_path, m_name);
|
|
remove(p, ec);
|
|
|
|
if (ec == boost::system::errc::no_such_file_or_directory)
|
|
ec.clear();
|
|
return;
|
|
}
|
|
|
|
open_file(file::read_write, ec);
|
|
if (ec) return;
|
|
|
|
boost::scoped_array<boost::uint32_t> header(new boost::uint32_t[m_header_size]);
|
|
|
|
using namespace libtorrent::detail;
|
|
|
|
char* ptr = reinterpret_cast<char*>(header.get());
|
|
|
|
write_uint32(m_max_pieces, ptr);
|
|
write_uint32(m_piece_size, ptr);
|
|
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(188)">../src/peer_connection.cpp:524</a></td><td>it would be neat to be able to print this straight into the alert's stack allocator</td></tr><tr id="188" style="display: none;" colspan="3"><td colspan="3"><h2>it would be neat to be able to print this straight into the
|
|
alert's stack allocator</h2><h4>../src/peer_connection.cpp:524</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> }
|
|
|
|
#ifndef TORRENT_DISABLE_LOGGING
|
|
void peer_connection::peer_log(peer_log_alert::direction_t direction
|
|
, char const* event) const
|
|
{
|
|
peer_log(direction, event, "");
|
|
}
|
|
|
|
TORRENT_FORMAT(4,5)
|
|
void peer_connection::peer_log(peer_log_alert::direction_t direction
|
|
, char const* event, char const* fmt, ...) const
|
|
{
|
|
TORRENT_ASSERT(is_single_thread());
|
|
|
|
if (!m_ses.alerts().should_post<peer_log_alert>()) return;
|
|
|
|
va_list v;
|
|
va_start(v, fmt);
|
|
|
|
<div style="background: #ffff00" width="100%"> char buf[512];
|
|
</div> vsnprintf(buf, sizeof(buf), fmt, v);
|
|
va_end(v);
|
|
|
|
torrent_handle h;
|
|
boost::shared_ptr<torrent> t = m_torrent.lock();
|
|
if (t) h = t->get_handle();
|
|
|
|
m_ses.alerts().emplace_alert<peer_log_alert>(
|
|
h, m_remote, m_peer_id, direction, event, buf);
|
|
}
|
|
#endif
|
|
|
|
#ifndef TORRENT_DISABLE_EXTENSIONS
|
|
void peer_connection::add_extension(boost::shared_ptr<peer_plugin> ext)
|
|
{
|
|
TORRENT_ASSERT(is_single_thread());
|
|
m_extensions.push_back(ext);
|
|
}
|
|
|
|
peer_plugin const* peer_connection::find_plugin(char const* type)
|
|
{
|
|
TORRENT_ASSERT(is_single_thread());
|
|
for (extension_list_t::iterator i = m_extensions.begin()
|
|
, end(m_extensions.end()); i != end; ++i)
|
|
{
|
|
if (strcmp((*i)->type(), type) == 0) return (*i).get();
|
|
}
|
|
return 0;
|
|
}
|
|
#endif
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(189)">../src/peer_connection.cpp:1031</a></td><td>this should be the global download rate</td></tr><tr id="189" style="display: none;" colspan="3"><td colspan="3"><h2>this should be the global download rate</h2><h4>../src/peer_connection.cpp:1031</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;">
|
|
int rate = 0;
|
|
|
|
// if we haven't received any data recently, the current download rate
|
|
// is not representative
|
|
if (aux::time_now() - m_last_piece > seconds(30) && m_download_rate_peak > 0)
|
|
{
|
|
rate = m_download_rate_peak;
|
|
}
|
|
else if (aux::time_now() - m_last_unchoked < seconds(5)
|
|
&& m_statistics.total_payload_upload() < 2 * 0x4000)
|
|
{
|
|
// if we're have only been unchoked for a short period of time,
|
|
// we don't know what rate we can get from this peer. Instead of assuming
|
|
// the lowest possible rate, assume the average.
|
|
|
|
int peers_with_requests = stats_counters()[counters::num_peers_down_requests];
|
|
// avoid division by 0
|
|
if (peers_with_requests == 0) peers_with_requests = 1;
|
|
|
|
<div style="background: #ffff00" width="100%"> rate = t->statistics().transfer_rate(stat::download_payload) / peers_with_requests;
|
|
</div> }
|
|
else
|
|
{
|
|
// current download rate in bytes per seconds
|
|
rate = m_statistics.transfer_rate(stat::download_payload);
|
|
}
|
|
|
|
// avoid division by zero
|
|
if (rate < 50) rate = 50;
|
|
|
|
// average of current rate and peak
|
|
// rate = (rate + m_download_rate_peak) / 2;
|
|
|
|
return milliseconds((m_outstanding_bytes + extra_bytes
|
|
+ m_queued_time_critical * t->block_size() * 1000) / rate);
|
|
}
|
|
|
|
void peer_connection::add_stat(boost::int64_t downloaded, boost::int64_t uploaded)
|
|
{
|
|
TORRENT_ASSERT(is_single_thread());
|
|
m_statistics.add_stat(downloaded, uploaded);
|
|
}
|
|
|
|
void peer_connection::received_bytes(int bytes_payload, int bytes_protocol)
|
|
{
|
|
TORRENT_ASSERT(is_single_thread());
|
|
m_statistics.received_bytes(bytes_payload, bytes_protocol);
|
|
if (m_ignore_stats) return;
|
|
boost::shared_ptr<torrent> t = m_torrent.lock();
|
|
if (!t) return;
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(190)">../src/peer_connection.cpp:3364</a></td><td>sort the allowed fast set in priority order</td></tr><tr id="190" style="display: none;" colspan="3"><td colspan="3"><h2>sort the allowed fast set in priority order</h2><h4>../src/peer_connection.cpp:3364</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;">
|
|
// if the peer has the piece and we want
|
|
// to download it, request it
|
|
if (int(m_have_piece.size()) > index
|
|
&& m_have_piece[index]
|
|
&& !t->has_piece_passed(index)
|
|
&& t->valid_metadata()
|
|
&& t->has_picker()
|
|
&& t->picker().piece_priority(index) > 0)
|
|
{
|
|
t->peer_is_interesting(*this);
|
|
}
|
|
}
|
|
|
|
std::vector<int> const& peer_connection::allowed_fast()
|
|
{
|
|
TORRENT_ASSERT(is_single_thread());
|
|
boost::shared_ptr<torrent> t = m_torrent.lock();
|
|
TORRENT_ASSERT(t);
|
|
|
|
<div style="background: #ffff00" width="100%"> return m_allowed_fast;
|
|
</div> }
|
|
|
|
bool peer_connection::can_request_time_critical() const
|
|
{
|
|
TORRENT_ASSERT(is_single_thread());
|
|
if (has_peer_choked() || !is_interesting()) return false;
|
|
if (int(m_download_queue.size()) + int(m_request_queue.size())
|
|
> m_desired_queue_size * 2) return false;
|
|
if (on_parole()) return false;
|
|
if (m_disconnecting) return false;
|
|
boost::shared_ptr<torrent> t = m_torrent.lock();
|
|
TORRENT_ASSERT(t);
|
|
if (t->upload_mode()) return false;
|
|
|
|
// ignore snubbed peers, since they're not likely to return pieces in a
|
|
// timely manner anyway
|
|
if (m_snubbed) return false;
|
|
return true;
|
|
}
|
|
|
|
bool peer_connection::make_time_critical(piece_block const& block)
|
|
{
|
|
TORRENT_ASSERT(is_single_thread());
|
|
std::vector<pending_block>::iterator rit = std::find_if(m_request_queue.begin()
|
|
, m_request_queue.end(), has_block(block));
|
|
if (rit == m_request_queue.end()) return false;
|
|
#if TORRENT_USE_ASSERTS
|
|
boost::shared_ptr<torrent> t = m_torrent.lock();
|
|
TORRENT_ASSERT(t);
|
|
TORRENT_ASSERT(t->has_picker());
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(191)">../src/peer_connection.cpp:6176</a></td><td>The stats checks can not be honored when authenticated encryption is in use because we may have encrypted data which we cannot authenticate yet</td></tr><tr id="191" style="display: none;" colspan="3"><td colspan="3"><h2>The stats checks can not be honored when authenticated encryption is in use
|
|
because we may have encrypted data which we cannot authenticate yet</h2><h4>../src/peer_connection.cpp:6176</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;">#ifndef TORRENT_DISABLE_LOGGING
|
|
peer_log(peer_log_alert::incoming, "READ"
|
|
, "%d bytes", int(bytes_transferred));
|
|
#endif
|
|
// correct the dl quota usage, if not all of the buffer was actually read
|
|
TORRENT_ASSERT(int(bytes_transferred) <= m_quota[download_channel]);
|
|
m_quota[download_channel] -= bytes_transferred;
|
|
|
|
if (m_disconnecting)
|
|
{
|
|
trancieve_ip_packet(bytes_in_loop, m_remote.address().is_v6());
|
|
return;
|
|
}
|
|
|
|
TORRENT_ASSERT(bytes_transferred > 0);
|
|
m_recv_buffer.received(bytes_transferred);
|
|
|
|
int bytes = bytes_transferred;
|
|
int sub_transferred = 0;
|
|
do {
|
|
<div style="background: #ffff00" width="100%">#if 0
|
|
</div> boost::int64_t cur_payload_dl = m_statistics.last_payload_downloaded();
|
|
boost::int64_t cur_protocol_dl = m_statistics.last_protocol_downloaded();
|
|
#endif
|
|
sub_transferred = m_recv_buffer.advance_pos(bytes);
|
|
on_receive(error, sub_transferred);
|
|
bytes -= sub_transferred;
|
|
TORRENT_ASSERT(sub_transferred > 0);
|
|
|
|
#if 0
|
|
TORRENT_ASSERT(m_statistics.last_payload_downloaded() - cur_payload_dl >= 0);
|
|
TORRENT_ASSERT(m_statistics.last_protocol_downloaded() - cur_protocol_dl >= 0);
|
|
boost::int64_t stats_diff = m_statistics.last_payload_downloaded() - cur_payload_dl +
|
|
m_statistics.last_protocol_downloaded() - cur_protocol_dl;
|
|
TORRENT_ASSERT(stats_diff == int(sub_transferred));
|
|
#endif
|
|
if (m_disconnecting) return;
|
|
|
|
} while (bytes > 0 && sub_transferred > 0);
|
|
|
|
m_recv_buffer.normalize();
|
|
|
|
TORRENT_ASSERT(m_recv_buffer.pos_at_end());
|
|
TORRENT_ASSERT(m_recv_buffer.packet_size() > 0);
|
|
|
|
if (m_peer_choked)
|
|
{
|
|
m_recv_buffer.clamp_size();
|
|
}
|
|
|
|
if (num_loops > read_loops) break;
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(192)">../src/piece_picker.cpp:2061</a></td><td>this could probably be optimized by incrementally calling partial_sort to sort one more element in the list. Because chances are that we'll just need a single piece, and once we've picked from it we're done. Sorting the rest of the list in that case is a waste of time.</td></tr><tr id="192" style="display: none;" colspan="3"><td colspan="3"><h2>this could probably be optimized by incrementally
|
|
calling partial_sort to sort one more element in the list. Because
|
|
chances are that we'll just need a single piece, and once we've
|
|
picked from it we're done. Sorting the rest of the list in that
|
|
case is a waste of time.</h2><h4>../src/piece_picker.cpp:2061</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> pc.inc_stats_counter(counters::piece_picker_partial_loops);
|
|
|
|
// in time critical mode, only pick high priority pieces
|
|
if ((options & time_critical_mode)
|
|
&& piece_priority(i->index) != priority_levels - 1)
|
|
continue;
|
|
|
|
if (!is_piece_free(i->index, pieces)) continue;
|
|
|
|
TORRENT_ASSERT(m_piece_map[i->index].download_queue()
|
|
== piece_pos::piece_downloading);
|
|
|
|
ordered_partials[num_ordered_partials++] = &*i;
|
|
}
|
|
|
|
// now, sort the list.
|
|
if (options & rarest_first)
|
|
{
|
|
ret |= picker_log_alert::rarest_first_partials;
|
|
|
|
<div style="background: #ffff00" width="100%"> std::sort(ordered_partials, ordered_partials + num_ordered_partials
|
|
</div> , boost::bind(&piece_picker::partial_compare_rarest_first, this
|
|
, _1, _2));
|
|
}
|
|
|
|
for (int i = 0; i < num_ordered_partials; ++i)
|
|
{
|
|
ret |= picker_log_alert::prioritize_partials;
|
|
|
|
num_blocks = add_blocks_downloading(*ordered_partials[i], pieces
|
|
, interesting_blocks, backup_blocks, backup_blocks2
|
|
, num_blocks, prefer_contiguous_blocks, peer, options);
|
|
if (num_blocks <= 0) return ret;
|
|
if (int(backup_blocks.size()) >= num_blocks
|
|
&& int(backup_blocks2.size()) >= num_blocks)
|
|
break;
|
|
}
|
|
|
|
num_blocks = append_blocks(interesting_blocks, backup_blocks
|
|
, num_blocks);
|
|
if (num_blocks <= 0) return ret;
|
|
|
|
num_blocks = append_blocks(interesting_blocks, backup_blocks2
|
|
, num_blocks);
|
|
if (num_blocks <= 0) return ret;
|
|
}
|
|
|
|
if (!suggested_pieces.empty())
|
|
{
|
|
for (std::vector<int>::const_iterator i = suggested_pieces.begin();
|
|
i != suggested_pieces.end(); ++i)
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(193)">../src/piece_picker.cpp:2584</a></td><td>when expanding pieces for cache stripe reasons, the !downloading condition doesn't make much sense</td></tr><tr id="193" style="display: none;" colspan="3"><td colspan="3"><h2>when expanding pieces for cache stripe reasons,
|
|
the !downloading condition doesn't make much sense</h2><h4>../src/piece_picker.cpp:2584</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> TORRENT_ASSERT(index < int(m_piece_map.size()) || m_piece_map.empty());
|
|
if (index + 1 == int(m_piece_map.size()))
|
|
return m_blocks_in_last_piece;
|
|
else
|
|
return m_blocks_per_piece;
|
|
}
|
|
|
|
bool piece_picker::is_piece_free(int piece, bitfield const& bitmask) const
|
|
{
|
|
TORRENT_ASSERT(piece >= 0 && piece < int(m_piece_map.size()));
|
|
return bitmask[piece]
|
|
&& !m_piece_map[piece].have()
|
|
&& !m_piece_map[piece].filtered();
|
|
}
|
|
|
|
bool piece_picker::can_pick(int piece, bitfield const& bitmask) const
|
|
{
|
|
TORRENT_ASSERT(piece >= 0 && piece < int(m_piece_map.size()));
|
|
return bitmask[piece]
|
|
&& !m_piece_map[piece].have()
|
|
<div style="background: #ffff00" width="100%"> && !m_piece_map[piece].downloading()
|
|
</div> && !m_piece_map[piece].filtered();
|
|
}
|
|
|
|
#if TORRENT_USE_INVARIANT_CHECKS
|
|
void piece_picker::check_peers()
|
|
{
|
|
for (std::vector<block_info>::iterator i = m_block_info.begin()
|
|
, end(m_block_info.end()); i != end; ++i)
|
|
{
|
|
TORRENT_ASSERT(i->peer == 0 || static_cast<torrent_peer*>(i->peer)->in_use);
|
|
}
|
|
}
|
|
#endif
|
|
|
|
void piece_picker::clear_peer(torrent_peer* peer)
|
|
{
|
|
for (std::vector<block_info>::iterator i = m_block_info.begin()
|
|
, end(m_block_info.end()); i != end; ++i)
|
|
{
|
|
if (i->peer == peer) i->peer = 0;
|
|
}
|
|
}
|
|
|
|
// the first bool is true if this is the only peer that has requested and downloaded
|
|
// blocks from this piece.
|
|
// the second bool is true if this is the only active peer that is requesting
|
|
// and downloading blocks from this piece. Active means having a connection.
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(194)">../src/session_impl.cpp:523</a></td><td>there's no rule here to make uTP connections not have the global or local rate limits apply to it. This used to be the default.</td></tr><tr id="194" style="display: none;" colspan="3"><td colspan="3"><h2>there's no rule here to make uTP connections not have the global or
|
|
local rate limits apply to it. This used to be the default.</h2><h4>../src/session_impl.cpp:523</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> m_global_class = m_classes.new_peer_class("global");
|
|
m_tcp_peer_class = m_classes.new_peer_class("tcp");
|
|
m_local_peer_class = m_classes.new_peer_class("local");
|
|
// local peers are always unchoked
|
|
m_classes.at(m_local_peer_class)->ignore_unchoke_slots = true;
|
|
// local peers are allowed to exceed the normal connection
|
|
// limit by 50%
|
|
m_classes.at(m_local_peer_class)->connection_limit_factor = 150;
|
|
|
|
TORRENT_ASSERT(m_global_class == session::global_peer_class_id);
|
|
TORRENT_ASSERT(m_tcp_peer_class == session::tcp_peer_class_id);
|
|
TORRENT_ASSERT(m_local_peer_class == session::local_peer_class_id);
|
|
|
|
init_peer_class_filter(true);
|
|
|
|
// TCP, SSL/TCP and I2P connections should be assigned the TCP peer class
|
|
m_peer_class_type_filter.add(peer_class_type_filter::tcp_socket, m_tcp_peer_class);
|
|
m_peer_class_type_filter.add(peer_class_type_filter::ssl_tcp_socket, m_tcp_peer_class);
|
|
m_peer_class_type_filter.add(peer_class_type_filter::i2p_socket, m_tcp_peer_class);
|
|
|
|
<div style="background: #ffff00" width="100%">
|
|
</div>#ifndef TORRENT_DISABLE_LOGGING
|
|
|
|
session_log("config: %s version: %s revision: %s"
|
|
, TORRENT_CFG_STRING
|
|
, LIBTORRENT_VERSION
|
|
, LIBTORRENT_REVISION);
|
|
|
|
#endif // TORRENT_DISABLE_LOGGING
|
|
|
|
// ---- auto-cap max connections ----
|
|
int max_files = max_open_files();
|
|
// deduct some margin for epoll/kqueue, log files,
|
|
// futexes, shared objects etc.
|
|
// 80% of the available file descriptors should go to connections
|
|
m_settings.set_int(settings_pack::connections_limit, (std::min)(
|
|
m_settings.get_int(settings_pack::connections_limit)
|
|
, (std::max)(5, (max_files - 20) * 8 / 10)));
|
|
// 20% goes towards regular files (see disk_io_thread)
|
|
#ifndef TORRENT_DISABLE_LOGGING
|
|
session_log(" max connections: %d", m_settings.get_int(settings_pack::connections_limit));
|
|
session_log(" max files: %d", max_files);
|
|
|
|
session_log(" generated peer ID: %s", m_peer_id.to_string().c_str());
|
|
#endif
|
|
|
|
boost::shared_ptr<settings_pack> copy = boost::make_shared<settings_pack>(pack);
|
|
m_io_service.post(boost::bind(&session_impl::init, this, copy));
|
|
}
|
|
|
|
void session_impl::init(boost::shared_ptr<settings_pack> pack)
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(195)">../src/session_impl.cpp:1625</a></td><td>it would be nice to reserve() these vectors up front</td></tr><tr id="195" style="display: none;" colspan="3"><td colspan="3"><h2>it would be nice to reserve() these vectors up front</h2><h4>../src/session_impl.cpp:1625</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;">
|
|
bandwidth_channel* ch = &p->channel[peer_connection::download_channel];
|
|
if (use_quota_overhead(ch, amount_down))
|
|
ret |= 1 << peer_connection::download_channel;
|
|
ch = &p->channel[peer_connection::upload_channel];
|
|
if (use_quota_overhead(ch, amount_up))
|
|
ret |= 1 << peer_connection::upload_channel;
|
|
}
|
|
return ret;
|
|
}
|
|
|
|
// session_impl is responsible for deleting 'pack'
|
|
void session_impl::apply_settings_pack(boost::shared_ptr<settings_pack> pack)
|
|
{
|
|
apply_settings_pack_impl(*pack);
|
|
}
|
|
|
|
settings_pack session_impl::get_settings() const
|
|
{
|
|
settings_pack ret;
|
|
<div style="background: #ffff00" width="100%"> for (int i = settings_pack::string_type_base;
|
|
</div> i < settings_pack::max_string_setting_internal; ++i)
|
|
{
|
|
ret.set_str(i, m_settings.get_str(i));
|
|
}
|
|
for (int i = settings_pack::int_type_base;
|
|
i < settings_pack::max_int_setting_internal; ++i)
|
|
{
|
|
ret.set_int(i, m_settings.get_int(i));
|
|
}
|
|
for (int i = settings_pack::bool_type_base;
|
|
i < settings_pack::max_bool_setting_internal; ++i)
|
|
{
|
|
ret.set_bool(i, m_settings.get_bool(i));
|
|
}
|
|
return ret;
|
|
}
|
|
|
|
void session_impl::apply_settings_pack_impl(settings_pack const& pack)
|
|
{
|
|
bool reopen_listen_port =
|
|
(pack.has_val(settings_pack::ssl_listen)
|
|
&& pack.get_int(settings_pack::ssl_listen)
|
|
!= m_settings.get_int(settings_pack::ssl_listen))
|
|
|| (pack.has_val(settings_pack::listen_interfaces)
|
|
&& pack.get_str(settings_pack::listen_interfaces)
|
|
!= m_settings.get_str(settings_pack::listen_interfaces));
|
|
|
|
apply_pack(&pack, m_settings, this);
|
|
m_disk_thread.set_settings(&pack, m_alerts);
|
|
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(196)">../src/session_impl.cpp:1862</a></td><td>instead of having a special case for this, just make the default listen interfaces be "0.0.0.0:6881,[::]:6881" and use the generic path. That would even allow for not listening at all.</td></tr><tr id="196" style="display: none;" colspan="3"><td colspan="3"><h2>instead of having a special case for this, just make the
|
|
default listen interfaces be "0.0.0.0:6881,[::]:6881" and use
|
|
the generic path. That would even allow for not listening at all.</h2><h4>../src/session_impl.cpp:1862</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> error_code ec;
|
|
|
|
int listen_port_retries = m_settings.get_int(settings_pack::max_retry_port_bind);
|
|
|
|
retry:
|
|
|
|
// close the open listen sockets
|
|
// close the listen sockets
|
|
for (std::list<listen_socket_t>::iterator i = m_listen_sockets.begin()
|
|
, end(m_listen_sockets.end()); i != end; ++i)
|
|
i->sock->close(ec);
|
|
m_listen_sockets.clear();
|
|
m_stats_counters.set_value(counters::has_incoming_connections, 0);
|
|
ec.clear();
|
|
|
|
if (m_abort) return;
|
|
|
|
m_ipv6_interface = tcp::endpoint();
|
|
m_ipv4_interface = tcp::endpoint();
|
|
|
|
<div style="background: #ffff00" width="100%"> if (m_listen_interfaces.empty())
|
|
</div> {
|
|
// this means we should open two listen sockets
|
|
// one for IPv4 and one for IPv6
|
|
listen_socket_t s = setup_listener("0.0.0.0", boost::asio::ip::tcp::v4()
|
|
, m_listen_interface.port()
|
|
, flags, ec);
|
|
|
|
if (!ec && s.sock)
|
|
{
|
|
// update the listen_interface member with the
|
|
// actual port we ended up listening on, so that the other
|
|
// sockets can be bound to the same one
|
|
m_listen_interface.port(s.external_port);
|
|
|
|
TORRENT_ASSERT(!m_abort);
|
|
m_listen_sockets.push_back(s);
|
|
}
|
|
|
|
#ifdef TORRENT_USE_OPENSSL
|
|
if (m_settings.get_int(settings_pack::ssl_listen))
|
|
{
|
|
s = setup_listener("0.0.0.0", boost::asio::ip::tcp::v4()
|
|
, m_settings.get_int(settings_pack::ssl_listen)
|
|
, flags | open_ssl_socket, ec);
|
|
|
|
if (!ec && s.sock)
|
|
{
|
|
TORRENT_ASSERT(!m_abort);
|
|
m_listen_sockets.push_back(s);
|
|
}
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(197)">../src/session_impl.cpp:2794</a></td><td>should this function take a shared_ptr instead?</td></tr><tr id="197" style="display: none;" colspan="3"><td colspan="3"><h2>should this function take a shared_ptr instead?</h2><h4>../src/session_impl.cpp:2794</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;">#if defined TORRENT_ASIO_DEBUGGING
|
|
complete_async("session_impl::on_socks_accept");
|
|
#endif
|
|
m_socks_listen_socket.reset();
|
|
if (e == boost::asio::error::operation_aborted) return;
|
|
if (e)
|
|
{
|
|
if (m_alerts.should_post<listen_failed_alert>())
|
|
m_alerts.emplace_alert<listen_failed_alert>("socks5"
|
|
, -1, listen_failed_alert::accept, e
|
|
, listen_failed_alert::socks5);
|
|
return;
|
|
}
|
|
open_new_incoming_socks_connection();
|
|
incoming_connection(s);
|
|
}
|
|
|
|
// if cancel_with_cq is set, the peer connection is
|
|
// currently expected to be scheduled for a connection
|
|
// with the connection queue, and should be cancelled
|
|
<div style="background: #ffff00" width="100%"> void session_impl::close_connection(peer_connection* p
|
|
</div> , error_code const& ec)
|
|
{
|
|
TORRENT_ASSERT(is_single_thread());
|
|
boost::shared_ptr<peer_connection> sp(p->self());
|
|
|
|
// someone else is holding a reference, it's important that
|
|
// it's destructed from the network thread. Make sure the
|
|
// last reference is held by the network thread.
|
|
if (!sp.unique())
|
|
m_undead_peers.push_back(sp);
|
|
|
|
// too expensive
|
|
// INVARIANT_CHECK;
|
|
|
|
#ifdef TORRENT_DEBUG
|
|
// for (aux::session_impl::torrent_map::const_iterator i = m_torrents.begin()
|
|
// , end(m_torrents.end()); i != end; ++i)
|
|
// TORRENT_ASSERT(!i->second->has_peer((peer_connection*)p));
|
|
#endif
|
|
|
|
#ifndef TORRENT_DISABLE_LOGGING
|
|
session_log(" CLOSING CONNECTION %s : %s"
|
|
, print_endpoint(p->remote()).c_str(), ec.message().c_str());
|
|
#else
|
|
TORRENT_UNUSED(ec);
|
|
#endif
|
|
|
|
TORRENT_ASSERT(p->is_disconnecting());
|
|
|
|
TORRENT_ASSERT(sp.use_count() > 0);
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(198)">../src/session_impl.cpp:3162</a></td><td>have a separate list for these connections, instead of having to loop through all of them</td></tr><tr id="198" style="display: none;" colspan="3"><td colspan="3"><h2>have a separate list for these connections, instead of having to loop through all of them</h2><h4>../src/session_impl.cpp:3162</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> if (m_auto_manage_time_scaler < 0)
|
|
{
|
|
INVARIANT_CHECK;
|
|
m_auto_manage_time_scaler = settings().get_int(settings_pack::auto_manage_interval);
|
|
recalculate_auto_managed_torrents();
|
|
}
|
|
|
|
// --------------------------------------------------------------
|
|
// check for incoming connections that might have timed out
|
|
// --------------------------------------------------------------
|
|
|
|
for (connection_map::iterator i = m_connections.begin();
|
|
i != m_connections.end();)
|
|
{
|
|
peer_connection* p = (*i).get();
|
|
++i;
|
|
// ignore connections that already have a torrent, since they
|
|
// are ticked through the torrents' second_tick
|
|
if (!p->associated_torrent().expired()) continue;
|
|
|
|
<div style="background: #ffff00" width="100%"> int timeout = m_settings.get_int(settings_pack::handshake_timeout);
|
|
</div>#if TORRENT_USE_I2P
|
|
timeout *= is_i2p(*p->get_socket()) ? 4 : 1;
|
|
#endif
|
|
if (m_last_tick - p->connected_time () > seconds(timeout))
|
|
p->disconnect(errors::timed_out, op_bittorrent);
|
|
}
|
|
|
|
// --------------------------------------------------------------
|
|
// second_tick every torrent (that wants it)
|
|
// --------------------------------------------------------------
|
|
|
|
#if TORRENT_DEBUG_STREAMING > 0
|
|
printf("\033[2J\033[0;0H");
|
|
#endif
|
|
|
|
std::vector<torrent*>& want_tick = m_torrent_lists[torrent_want_tick];
|
|
for (int i = 0; i < int(want_tick.size()); ++i)
|
|
{
|
|
torrent& t = *want_tick[i];
|
|
TORRENT_ASSERT(t.want_tick());
|
|
TORRENT_ASSERT(!t.is_aborted());
|
|
|
|
t.second_tick(tick_interval_ms);
|
|
|
|
// if the call to second_tick caused the torrent
|
|
// to no longer want to be ticked (i.e. it was
|
|
// removed from the list) we need to back up the counter
|
|
// to not miss the torrent after it
|
|
if (!t.want_tick()) --i;
|
|
}
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(199)">../src/session_impl.cpp:3195</a></td><td>this should apply to all bandwidth channels</td></tr><tr id="199" style="display: none;" colspan="3"><td colspan="3"><h2>this should apply to all bandwidth channels</h2><h4>../src/session_impl.cpp:3195</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;">#if TORRENT_DEBUG_STREAMING > 0
|
|
printf("\033[2J\033[0;0H");
|
|
#endif
|
|
|
|
std::vector<torrent*>& want_tick = m_torrent_lists[torrent_want_tick];
|
|
for (int i = 0; i < int(want_tick.size()); ++i)
|
|
{
|
|
torrent& t = *want_tick[i];
|
|
TORRENT_ASSERT(t.want_tick());
|
|
TORRENT_ASSERT(!t.is_aborted());
|
|
|
|
t.second_tick(tick_interval_ms);
|
|
|
|
// if the call to second_tick caused the torrent
|
|
// to no longer want to be ticked (i.e. it was
|
|
// removed from the list) we need to back up the counter
|
|
// to not miss the torrent after it
|
|
if (!t.want_tick()) --i;
|
|
}
|
|
|
|
<div style="background: #ffff00" width="100%"> if (m_settings.get_bool(settings_pack::rate_limit_ip_overhead))
|
|
</div> {
|
|
int up_limit = upload_rate_limit(m_global_class);
|
|
int down_limit = download_rate_limit(m_global_class);
|
|
|
|
if (down_limit > 0
|
|
&& m_stat.download_ip_overhead() >= down_limit
|
|
&& m_alerts.should_post<performance_alert>())
|
|
{
|
|
m_alerts.emplace_alert<performance_alert>(torrent_handle()
|
|
, performance_alert::download_limit_too_low);
|
|
}
|
|
|
|
if (up_limit > 0
|
|
&& m_stat.upload_ip_overhead() >= up_limit
|
|
&& m_alerts.should_post<performance_alert>())
|
|
{
|
|
m_alerts.emplace_alert<performance_alert>(torrent_handle()
|
|
, performance_alert::upload_limit_too_low);
|
|
}
|
|
}
|
|
|
|
m_peak_up_rate = (std::max)(m_stat.upload_rate(), m_peak_up_rate);
|
|
m_peak_down_rate = (std::max)(m_stat.download_rate(), m_peak_down_rate);
|
|
|
|
m_stat.second_tick(tick_interval_ms);
|
|
|
|
// --------------------------------------------------------------
|
|
// scrape paused torrents that are auto managed
|
|
// (unless the session is paused)
|
|
// --------------------------------------------------------------
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(200)">../src/session_impl.cpp:3989</a></td><td>use a lower limit than m_settings.connections_limit to allocate the to 10% or so of connection slots for incoming connections</td></tr><tr id="200" style="display: none;" colspan="3"><td colspan="3"><h2>use a lower limit than m_settings.connections_limit
|
|
to allocate the to 10% or so of connection slots for incoming
|
|
connections</h2><h4>../src/session_impl.cpp:3989</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> // robin fashion, so that every torrent is equally likely to connect to a
|
|
// peer
|
|
|
|
// boost connections are connections made by torrent connection
|
|
// boost, which are done immediately on a tracker response. These
|
|
// connections needs to be deducted from this second
|
|
if (m_boost_connections > 0)
|
|
{
|
|
if (m_boost_connections > max_connections)
|
|
{
|
|
m_boost_connections -= max_connections;
|
|
max_connections = 0;
|
|
}
|
|
else
|
|
{
|
|
max_connections -= m_boost_connections;
|
|
m_boost_connections = 0;
|
|
}
|
|
}
|
|
|
|
<div style="background: #ffff00" width="100%"> int limit = m_settings.get_int(settings_pack::connections_limit)
|
|
</div> - num_connections();
|
|
|
|
// this logic is here to smooth out the number of new connection
|
|
// attempts over time, to prevent connecting a large number of
|
|
// sockets, wait 10 seconds, and then try again
|
|
if (m_settings.get_bool(settings_pack::smooth_connects) && max_connections > (limit+1) / 2)
|
|
max_connections = (limit+1) / 2;
|
|
|
|
std::vector<torrent*>& want_peers_download = m_torrent_lists[torrent_want_peers_download];
|
|
std::vector<torrent*>& want_peers_finished = m_torrent_lists[torrent_want_peers_finished];
|
|
|
|
// if no torrent want any peers, just return
|
|
if (want_peers_download.empty() && want_peers_finished.empty()) return;
|
|
|
|
// if we don't have any connection attempt quota, return
|
|
if (max_connections <= 0) return;
|
|
|
|
INVARIANT_CHECK;
|
|
|
|
int steps_since_last_connect = 0;
|
|
int num_torrents = int(want_peers_finished.size() + want_peers_download.size());
|
|
for (;;)
|
|
{
|
|
if (m_next_downloading_connect_torrent >= int(want_peers_download.size()))
|
|
m_next_downloading_connect_torrent = 0;
|
|
|
|
if (m_next_finished_connect_torrent >= int(want_peers_finished.size()))
|
|
m_next_finished_connect_torrent = 0;
|
|
|
|
torrent* t = NULL;
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(201)">../src/session_impl.cpp:4141</a></td><td>post a message to have this happen immediately instead of waiting for the next tick</td></tr><tr id="201" style="display: none;" colspan="3"><td colspan="3"><h2>post a message to have this happen
|
|
immediately instead of waiting for the next tick</h2><h4>../src/session_impl.cpp:4141</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> continue;
|
|
}
|
|
|
|
if (!p->is_peer_interested()
|
|
|| p->is_disconnecting()
|
|
|| p->is_connecting())
|
|
{
|
|
// this peer is not unchokable. So, if it's unchoked
|
|
// already, make sure to choke it.
|
|
if (p->is_choked())
|
|
{
|
|
p->reset_choke_counters();
|
|
continue;
|
|
}
|
|
if (pi && pi->optimistically_unchoked)
|
|
{
|
|
m_stats_counters.inc_stats_counter(counters::num_peers_up_unchoked_optimistic, -1);
|
|
pi->optimistically_unchoked = false;
|
|
// force a new optimistic unchoke
|
|
m_optimistic_unchoke_time_scaler = 0;
|
|
<div style="background: #ffff00" width="100%"> }
|
|
</div> t->choke_peer(*p);
|
|
p->reset_choke_counters();
|
|
continue;
|
|
}
|
|
|
|
peers.push_back(p.get());
|
|
}
|
|
|
|
// the unchoker wants an estimate of our upload rate capacity
|
|
// (used by bittyrant)
|
|
int max_upload_rate = upload_rate_limit(m_global_class);
|
|
if (m_settings.get_int(settings_pack::choking_algorithm)
|
|
== settings_pack::bittyrant_choker
|
|
&& max_upload_rate == 0)
|
|
{
|
|
// we don't know at what rate we can upload. If we have a
|
|
// measurement of the peak, use that + 10kB/s, otherwise
|
|
// assume 20 kB/s
|
|
max_upload_rate = (std::max)(20000, m_peak_up_rate + 10000);
|
|
if (m_alerts.should_post<performance_alert>())
|
|
m_alerts.emplace_alert<performance_alert>(torrent_handle()
|
|
, performance_alert::bittyrant_with_no_uplimit);
|
|
}
|
|
|
|
int const allowed_upload_slots = unchoke_sort(peers, max_upload_rate
|
|
, unchoke_interval, m_settings);
|
|
m_stats_counters.set_value(counters::num_unchoke_slots
|
|
, allowed_upload_slots);
|
|
|
|
#ifndef TORRENT_DISABLE_LOGGING
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(202)">../src/session_impl.cpp:4525</a></td><td>it might be a nice feature here to limit the number of torrents to send in a single update. By just posting the first n torrents, they would nicely be round-robined because the torrent lists are always pushed back. Perhaps the status_update_alert could even have a fixed array of n entries rather than a vector, to further improve memory locality.</td></tr><tr id="202" style="display: none;" colspan="3"><td colspan="3"><h2>it might be a nice feature here to limit the number of torrents
|
|
to send in a single update. By just posting the first n torrents, they
|
|
would nicely be round-robined because the torrent lists are always
|
|
pushed back. Perhaps the status_update_alert could even have a fixed
|
|
array of n entries rather than a vector, to further improve memory
|
|
locality.</h2><h4>../src/session_impl.cpp:4525</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> t->status(&*i, flags);
|
|
}
|
|
}
|
|
|
|
void session_impl::post_torrent_updates(boost::uint32_t flags)
|
|
{
|
|
INVARIANT_CHECK;
|
|
|
|
TORRENT_ASSERT(is_single_thread());
|
|
|
|
std::vector<torrent*>& state_updates
|
|
= m_torrent_lists[aux::session_impl::torrent_state_updates];
|
|
|
|
#if TORRENT_USE_ASSERTS
|
|
m_posting_torrent_updates = true;
|
|
#endif
|
|
|
|
std::vector<torrent_status> status;
|
|
status.reserve(state_updates.size());
|
|
|
|
<div style="background: #ffff00" width="100%"> for (std::vector<torrent*>::iterator i = state_updates.begin()
|
|
</div> , end(state_updates.end()); i != end; ++i)
|
|
{
|
|
torrent* t = *i;
|
|
TORRENT_ASSERT(t->m_links[aux::session_impl::torrent_state_updates].in_list());
|
|
status.push_back(torrent_status());
|
|
// querying accurate download counters may require
|
|
// the torrent to be loaded. Loading a torrent, and evicting another
|
|
// one will lead to calling state_updated(), which screws with
|
|
// this list while we're working on it, and break things
|
|
t->status(&status.back(), flags);
|
|
t->clear_in_state_update();
|
|
}
|
|
state_updates.clear();
|
|
|
|
#if TORRENT_USE_ASSERTS
|
|
m_posting_torrent_updates = false;
|
|
#endif
|
|
|
|
m_alerts.emplace_alert<state_update_alert>(status);
|
|
}
|
|
|
|
void session_impl::post_session_stats()
|
|
{
|
|
m_disk_thread.update_stats_counters(m_stats_counters);
|
|
|
|
#ifndef TORRENT_DISABLE_DHT
|
|
if (m_dht)
|
|
m_dht->update_stats_counters(m_stats_counters);
|
|
#endif
|
|
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(203)">../src/session_impl.cpp:4738</a></td><td>this logic could probably be less spaghetti looking by being moved to a function with early exits</td></tr><tr id="203" style="display: none;" colspan="3"><td colspan="3"><h2>this logic could probably be less spaghetti looking by being
|
|
moved to a function with early exits</h2><h4>../src/session_impl.cpp:4738</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> }
|
|
|
|
// figure out the info hash of the torrent
|
|
sha1_hash const* ih = 0;
|
|
sha1_hash tmp;
|
|
if (params.ti) ih = &params.ti->info_hash();
|
|
else if (!params.url.empty())
|
|
{
|
|
// in order to avoid info-hash collisions, for
|
|
// torrents where we don't have an info-hash, but
|
|
// just a URL, set the temporary info-hash to the
|
|
// hash of the URL. This will be changed once we
|
|
// have the actual .torrent file
|
|
tmp = hasher(&params.url[0], params.url.size()).final();
|
|
ih = &tmp;
|
|
}
|
|
else ih = &params.info_hash;
|
|
|
|
// we don't have a torrent file. If the user provided
|
|
// resume data, there may be some metadata in there
|
|
<div style="background: #ffff00" width="100%"> if ((!params.ti || !params.ti->is_valid())
|
|
</div> && !params.resume_data.empty())
|
|
{
|
|
int pos;
|
|
error_code err;
|
|
bdecode_node root;
|
|
bdecode_node info;
|
|
#ifndef TORRENT_DISABLE_LOGGING
|
|
session_log("adding magnet link with resume data");
|
|
#endif
|
|
if (bdecode(&params.resume_data[0], &params.resume_data[0]
|
|
+ params.resume_data.size(), root, err, &pos) == 0
|
|
&& root.type() == bdecode_node::dict_t
|
|
&& (info = root.dict_find_dict("info")))
|
|
{
|
|
#ifndef TORRENT_DISABLE_LOGGING
|
|
session_log("found metadata in resume data");
|
|
#endif
|
|
// verify the info-hash of the metadata stored in the resume file matches
|
|
// the torrent we're loading
|
|
|
|
std::pair<char const*, int> buf = info.data_section();
|
|
sha1_hash resume_ih = hasher(buf.first, buf.second).final();
|
|
|
|
// if url is set, the info_hash is not actually the info-hash of the
|
|
// torrent, but the hash of the URL, until we have the full torrent
|
|
// only require the info-hash to match if we actually passed in one
|
|
if (resume_ih == params.info_hash
|
|
|| !params.url.empty()
|
|
|| params.info_hash.is_all_zeros())
|
|
{
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(204)">../src/session_impl.cpp:5414</a></td><td>perhaps this function should not exist when logging is disabled</td></tr><tr id="204" style="display: none;" colspan="3"><td colspan="3"><h2>perhaps this function should not exist when logging is disabled</h2><h4>../src/session_impl.cpp:5414</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> TORRENT_ASSERT(is_single_thread());
|
|
|
|
INVARIANT_CHECK;
|
|
|
|
boost::shared_ptr<torrent> t = find_torrent(ih).lock();
|
|
if (!t) return;
|
|
// don't add peers from lsd to private torrents
|
|
if (t->torrent_file().priv() || (t->torrent_file().is_i2p()
|
|
&& !m_settings.get_bool(settings_pack::allow_i2p_mixed))) return;
|
|
|
|
#ifndef TORRENT_DISABLE_LOGGING
|
|
session_log("added peer from local discovery: %s", print_endpoint(peer).c_str());
|
|
#endif
|
|
t->add_peer(peer, peer_info::lsd);
|
|
t->do_connect_boost();
|
|
|
|
if (m_alerts.should_post<lsd_peer_alert>())
|
|
m_alerts.emplace_alert<lsd_peer_alert>(t->get_handle(), peer);
|
|
}
|
|
|
|
<div style="background: #ffff00" width="100%"> void session_impl::on_port_map_log(
|
|
</div> char const* msg, int map_transport)
|
|
{
|
|
#ifndef TORRENT_DISABLE_LOGGING
|
|
TORRENT_ASSERT(map_transport >= 0 && map_transport <= 1);
|
|
// log message
|
|
if (m_alerts.should_post<portmap_log_alert>())
|
|
m_alerts.emplace_alert<portmap_log_alert>(map_transport, msg);
|
|
#else
|
|
TORRENT_UNUSED(msg);
|
|
TORRENT_UNUSED(map_transport);
|
|
#endif
|
|
}
|
|
|
|
void session_impl::on_port_mapping(int mapping, address const& ip, int port
|
|
, error_code const& ec, int map_transport)
|
|
{
|
|
TORRENT_ASSERT(is_single_thread());
|
|
|
|
TORRENT_ASSERT(map_transport >= 0 && map_transport <= 1);
|
|
|
|
if (mapping == m_udp_mapping[map_transport] && port != 0)
|
|
{
|
|
m_external_udp_port = port;
|
|
if (m_alerts.should_post<portmap_alert>())
|
|
m_alerts.emplace_alert<portmap_alert>(mapping, port
|
|
, map_transport);
|
|
return;
|
|
}
|
|
|
|
if (mapping == m_tcp_mapping[map_transport] && port != 0)
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(205)">../src/storage.cpp:895</a></td><td>make this more generic to not just work if files have been renamed, but also if they have been merged into a single file for instance maybe use the same format as .torrent files and reuse some code from torrent_info</td></tr><tr id="205" style="display: none;" colspan="3"><td colspan="3"><h2>make this more generic to not just work if files have been
|
|
renamed, but also if they have been merged into a single file for instance
|
|
maybe use the same format as .torrent files and reuse some code from torrent_info</h2><h4>../src/storage.cpp:895</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> else
|
|
{
|
|
ec.ec = error;
|
|
ec.file = i;
|
|
ec.operation = storage_error::stat;
|
|
m_stat_cache.set_error(i);
|
|
}
|
|
}
|
|
|
|
fl.push_back(entry(entry::list_t));
|
|
entry::list_type& p = fl.back().list();
|
|
p.push_back(entry(file_size));
|
|
p.push_back(entry(file_time));
|
|
}
|
|
}
|
|
|
|
bool default_storage::verify_resume_data(bdecode_node const& rd
|
|
, std::vector<std::string> const* links
|
|
, storage_error& ec)
|
|
{
|
|
<div style="background: #ffff00" width="100%"> bdecode_node mapped_files = rd.dict_find_list("mapped_files");
|
|
</div> if (mapped_files && mapped_files.list_size() == m_files.num_files())
|
|
{
|
|
m_mapped_files.reset(new file_storage(m_files));
|
|
for (int i = 0; i < m_files.num_files(); ++i)
|
|
{
|
|
std::string new_filename = mapped_files.list_string_value_at(i);
|
|
if (new_filename.empty()) continue;
|
|
m_mapped_files->rename_file(i, new_filename);
|
|
}
|
|
}
|
|
|
|
bdecode_node file_priority = rd.dict_find_list("file_priority");
|
|
if (file_priority && file_priority.list_size()
|
|
== files().num_files())
|
|
{
|
|
m_file_priority.resize(file_priority.list_size());
|
|
for (int i = 0; i < file_priority.list_size(); ++i)
|
|
m_file_priority[i] = boost::uint8_t(file_priority.list_int_value_at(i, 1));
|
|
}
|
|
|
|
bdecode_node file_sizes_ent = rd.dict_find_list("file sizes");
|
|
if (file_sizes_ent == 0)
|
|
{
|
|
ec.ec = errors::missing_file_sizes;
|
|
ec.file = -1;
|
|
ec.operation = storage_error::check_resume;
|
|
return false;
|
|
}
|
|
|
|
if (file_sizes_ent.list_size() == 0)
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(206)">../src/storage.cpp:1234</a></td><td>if everything moves OK, except for the partfile we currently won't update the save path, which breaks things. it would probably make more sense to give up on the partfile</td></tr><tr id="206" style="display: none;" colspan="3"><td colspan="3"><h2>if everything moves OK, except for the partfile
|
|
we currently won't update the save path, which breaks things.
|
|
it would probably make more sense to give up on the partfile</h2><h4>../src/storage.cpp:1234</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> if (ec)
|
|
{
|
|
ec.file = i->second;
|
|
ec.operation = storage_error::copy;
|
|
}
|
|
else
|
|
{
|
|
// ignore errors when removing
|
|
error_code ignore;
|
|
remove_all(old_path, ignore);
|
|
}
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
|
|
if (!ec)
|
|
{
|
|
if (m_part_file)
|
|
{
|
|
<div style="background: #ffff00" width="100%"> m_part_file->move_partfile(save_path, ec.ec);
|
|
</div> if (ec)
|
|
{
|
|
ec.file = -1;
|
|
ec.operation = storage_error::partfile_move;
|
|
return piece_manager::fatal_disk_error;
|
|
}
|
|
}
|
|
|
|
m_save_path = save_path;
|
|
}
|
|
return ret;
|
|
}
|
|
|
|
int default_storage::readv(file::iovec_t const* bufs, int num_bufs
|
|
, int piece, int offset, int flags, storage_error& ec)
|
|
{
|
|
read_fileop op(*this, flags);
|
|
|
|
#ifdef TORRENT_SIMULATE_SLOW_READ
|
|
boost::thread::sleep(boost::get_system_time()
|
|
+ boost::posix_time::milliseconds(1000));
|
|
#endif
|
|
return readwritev(files(), bufs, piece, offset, num_bufs, op, ec);
|
|
}
|
|
|
|
int default_storage::writev(file::iovec_t const* bufs, int num_bufs
|
|
, int piece, int offset, int flags, storage_error& ec)
|
|
{
|
|
write_fileop op(*this, flags);
|
|
return readwritev(files(), bufs, piece, offset, num_bufs, op, ec);
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(207)">../src/string_util.cpp:60</a></td><td>warning C4146: unary minus operator applied to unsigned type, result still unsigned</td></tr><tr id="207" style="display: none;" colspan="3"><td colspan="3"><h2>warning C4146: unary minus operator applied to unsigned type,
|
|
result still unsigned</h2><h4>../src/string_util.cpp:60</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;">
|
|
#include <boost/tuple/tuple.hpp>
|
|
|
|
#include <cstdlib> // for malloc
|
|
#include <cstring> // for memmov/strcpy/strlen
|
|
|
|
#include "libtorrent/aux_/disable_warnings_pop.hpp"
|
|
|
|
namespace libtorrent
|
|
{
|
|
|
|
// lexical_cast's result depends on the locale. We need
|
|
// a well defined result
|
|
boost::array<char, 4 + std::numeric_limits<boost::int64_t>::digits10>
|
|
to_string(boost::int64_t n)
|
|
{
|
|
boost::array<char, 4 + std::numeric_limits<boost::int64_t>::digits10> ret;
|
|
char *p = &ret.back();
|
|
*p = '\0';
|
|
boost::uint64_t un = n;
|
|
<div style="background: #ffff00" width="100%"> if (n < 0) un = -un;
|
|
</div> do {
|
|
*--p = '0' + un % 10;
|
|
un /= 10;
|
|
} while (un);
|
|
if (n < 0) *--p = '-';
|
|
std::memmove(&ret[0], p, &ret.back() - p + 1);
|
|
return ret;
|
|
}
|
|
|
|
bool is_alpha(char c)
|
|
{
|
|
return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z');
|
|
}
|
|
|
|
bool is_print(char c)
|
|
{
|
|
return c >= 32 && c < 127;
|
|
}
|
|
|
|
bool is_space(char c)
|
|
{
|
|
static const char* ws = " \t\n\r\f\v";
|
|
return strchr(ws, c) != 0;
|
|
}
|
|
|
|
char to_lower(char c)
|
|
{
|
|
return (c >= 'A' && c <= 'Z') ? c - 'A' + 'a' : c;
|
|
}
|
|
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(208)">../src/torrent.cpp:101</a></td><td>factor out cache_status to its own header</td></tr><tr id="208" style="display: none;" colspan="3"><td colspan="3"><h2>factor out cache_status to its own header</h2><h4>../src/torrent.cpp:101</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;">#include "libtorrent/extensions.hpp"
|
|
#include "libtorrent/aux_/session_interface.hpp"
|
|
#include "libtorrent/instantiate_connection.hpp"
|
|
#include "libtorrent/assert.hpp"
|
|
#include "libtorrent/broadcast_socket.hpp"
|
|
#include "libtorrent/kademlia/dht_tracker.hpp"
|
|
#include "libtorrent/peer_info.hpp"
|
|
#include "libtorrent/http_connection.hpp"
|
|
#include "libtorrent/random.hpp"
|
|
#include "libtorrent/peer_class.hpp" // for peer_class
|
|
#include "libtorrent/socket_io.hpp" // for read_*_endpoint
|
|
#include "libtorrent/ip_filter.hpp"
|
|
#include "libtorrent/request_blocks.hpp"
|
|
#include "libtorrent/performance_counters.hpp" // for counters
|
|
#include "libtorrent/resolver_interface.hpp"
|
|
#include "libtorrent/alloca.hpp"
|
|
#include "libtorrent/resolve_links.hpp"
|
|
#include "libtorrent/aux_/file_progress.hpp"
|
|
#include "libtorrent/alert_manager.hpp"
|
|
#include "libtorrent/disk_interface.hpp"
|
|
<div style="background: #ffff00" width="100%">#include "libtorrent/disk_io_thread.hpp" // for cache_status
|
|
</div>
|
|
#ifndef TORRENT_DISABLE_LOGGING
|
|
#include "libtorrent/aux_/session_impl.hpp" // for tracker_logger
|
|
#endif
|
|
|
|
using namespace libtorrent;
|
|
using boost::tuples::tuple;
|
|
using boost::tuples::get;
|
|
using boost::tuples::make_tuple;
|
|
|
|
namespace libtorrent
|
|
{
|
|
namespace {
|
|
|
|
int root2(int x)
|
|
{
|
|
int ret = 0;
|
|
x >>= 1;
|
|
while (x > 0)
|
|
{
|
|
// if this assert triggers, the block size
|
|
// is not an even 2 exponent!
|
|
TORRENT_ASSERT(x == 1 || (x & 1) == 0);
|
|
++ret;
|
|
x >>= 1;
|
|
}
|
|
return ret;
|
|
}
|
|
|
|
} // anonymous namespace
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(209)">../src/torrent.cpp:468</a></td><td>if the existing torrent doesn't have metadata, insert the metadata we just downloaded into it.</td></tr><tr id="209" style="display: none;" colspan="3"><td colspan="3"><h2>if the existing torrent doesn't have metadata, insert
|
|
the metadata we just downloaded into it.</h2><h4>../src/torrent.cpp:468</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;">
|
|
m_torrent_file = tf;
|
|
|
|
// now, we might already have this torrent in the session.
|
|
boost::shared_ptr<torrent> t = m_ses.find_torrent(m_torrent_file->info_hash()).lock();
|
|
if (t)
|
|
{
|
|
if (!m_uuid.empty() && t->uuid().empty())
|
|
t->set_uuid(m_uuid);
|
|
if (!m_url.empty() && t->url().empty())
|
|
t->set_url(m_url);
|
|
if (!m_source_feed_url.empty() && t->source_feed_url().empty())
|
|
t->set_source_feed_url(m_source_feed_url);
|
|
|
|
// insert this torrent in the uuid index
|
|
if (!m_uuid.empty() || !m_url.empty())
|
|
{
|
|
m_ses.insert_uuid_torrent(m_uuid.empty() ? m_url : m_uuid, t);
|
|
}
|
|
|
|
<div style="background: #ffff00" width="100%">
|
|
</div> set_error(error_code(errors::duplicate_torrent, get_libtorrent_category()), error_file_url);
|
|
abort();
|
|
return;
|
|
}
|
|
|
|
m_ses.insert_torrent(m_torrent_file->info_hash(), me, m_uuid);
|
|
|
|
TORRENT_ASSERT(num_torrents == int(m_ses.m_torrents.size()));
|
|
|
|
// if the user added any trackers while downloading the
|
|
// .torrent file, merge them into the new tracker list
|
|
std::vector<announce_entry> new_trackers = m_torrent_file->trackers();
|
|
for (std::vector<announce_entry>::iterator i = m_trackers.begin()
|
|
, end(m_trackers.end()); i != end; ++i)
|
|
{
|
|
// if we already have this tracker, ignore it
|
|
if (std::find_if(new_trackers.begin(), new_trackers.end()
|
|
, boost::bind(&announce_entry::url, _1) == i->url) != new_trackers.end())
|
|
continue;
|
|
|
|
// insert the tracker ordered by tier
|
|
new_trackers.insert(std::find_if(new_trackers.begin(), new_trackers.end()
|
|
, boost::bind(&announce_entry::tier, _1) >= i->tier), *i);
|
|
}
|
|
m_trackers.swap(new_trackers);
|
|
|
|
#if !defined(TORRENT_DISABLE_ENCRYPTION) && !defined(TORRENT_DISABLE_EXTENSIONS)
|
|
hasher h;
|
|
h.update("req2", 4);
|
|
h.update((char*)&m_torrent_file->info_hash()[0], 20);
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(210)">../src/torrent.cpp:580</a></td><td>if the existing torrent doesn't have metadata, insert the metadata we just downloaded into it.</td></tr><tr id="210" style="display: none;" colspan="3"><td colspan="3"><h2>if the existing torrent doesn't have metadata, insert
|
|
the metadata we just downloaded into it.</h2><h4>../src/torrent.cpp:580</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> m_torrent_file = tf;
|
|
m_info_hash = tf->info_hash();
|
|
|
|
// now, we might already have this torrent in the session.
|
|
boost::shared_ptr<torrent> t = m_ses.find_torrent(m_torrent_file->info_hash()).lock();
|
|
if (t)
|
|
{
|
|
if (!m_uuid.empty() && t->uuid().empty())
|
|
t->set_uuid(m_uuid);
|
|
if (!m_url.empty() && t->url().empty())
|
|
t->set_url(m_url);
|
|
if (!m_source_feed_url.empty() && t->source_feed_url().empty())
|
|
t->set_source_feed_url(m_source_feed_url);
|
|
|
|
// insert this torrent in the uuid index
|
|
if (!m_uuid.empty() || !m_url.empty())
|
|
{
|
|
m_ses.insert_uuid_torrent(m_uuid.empty() ? m_url : m_uuid, t);
|
|
}
|
|
|
|
<div style="background: #ffff00" width="100%">
|
|
</div> set_error(error_code(errors::duplicate_torrent, get_libtorrent_category()), torrent_status::error_file_url);
|
|
abort();
|
|
return;
|
|
}
|
|
|
|
m_ses.insert_torrent(m_torrent_file->info_hash(), me, m_uuid);
|
|
|
|
// if the user added any trackers while downloading the
|
|
// .torrent file, merge them into the new tracker list
|
|
std::vector<announce_entry> new_trackers = m_torrent_file->trackers();
|
|
for (std::vector<announce_entry>::iterator i = m_trackers.begin()
|
|
, end(m_trackers.end()); i != end; ++i)
|
|
{
|
|
// if we already have this tracker, ignore it
|
|
if (std::find_if(new_trackers.begin(), new_trackers.end()
|
|
, boost::bind(&announce_entry::url, _1) == i->url) != new_trackers.end())
|
|
continue;
|
|
|
|
// insert the tracker ordered by tier
|
|
new_trackers.insert(std::find_if(new_trackers.begin(), new_trackers.end()
|
|
, boost::bind(&announce_entry::tier, _1) >= i->tier), *i);
|
|
}
|
|
m_trackers.swap(new_trackers);
|
|
|
|
// add the web seeds from the .torrent file
|
|
std::vector<web_seed_entry> const& web_seeds = m_torrent_file->web_seeds();
|
|
m_web_seeds.insert(m_web_seeds.end(), web_seeds.begin(), web_seeds.end());
|
|
|
|
#if !defined(TORRENT_DISABLE_ENCRYPTION) && !defined(TORRENT_DISABLE_EXTENSIONS)
|
|
hasher h;
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(211)">../src/torrent.cpp:1562</a></td><td>is verify_peer_cert called once per certificate in the chain, and this function just tells us which depth we're at right now? If so, the comment makes sense. any certificate that isn't the leaf (i.e. the one presented by the peer) should be accepted automatically, given preverified is true. The leaf certificate need to be verified to make sure its DN matches the info-hash</td></tr><tr id="211" style="display: none;" colspan="3"><td colspan="3"><h2>is verify_peer_cert called once per certificate in the chain, and
|
|
this function just tells us which depth we're at right now? If so, the comment
|
|
makes sense.
|
|
any certificate that isn't the leaf (i.e. the one presented by the peer)
|
|
should be accepted automatically, given preverified is true. The leaf certificate
|
|
need to be verified to make sure its DN matches the info-hash</h2><h4>../src/torrent.cpp:1562</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> if (pp) p->add_extension(pp);
|
|
}
|
|
|
|
// if files are checked for this torrent, call the extension
|
|
// to let it initialize itself
|
|
if (m_connections_initialized)
|
|
tp->on_files_checked();
|
|
}
|
|
|
|
#endif
|
|
|
|
#ifdef TORRENT_USE_OPENSSL
|
|
|
|
#if BOOST_VERSION >= 104700
|
|
bool torrent::verify_peer_cert(bool preverified, boost::asio::ssl::verify_context& ctx)
|
|
{
|
|
// if the cert wasn't signed by the correct CA, fail the verification
|
|
if (!preverified) return false;
|
|
|
|
// we're only interested in checking the certificate at the end of the chain.
|
|
<div style="background: #ffff00" width="100%"> int depth = X509_STORE_CTX_get_error_depth(ctx.native_handle());
|
|
</div> if (depth > 0) return true;
|
|
|
|
X509* cert = X509_STORE_CTX_get_current_cert(ctx.native_handle());
|
|
|
|
// Go through the alternate names in the certificate looking for matching DNS entries
|
|
GENERAL_NAMES* gens = static_cast<GENERAL_NAMES*>(
|
|
X509_get_ext_d2i(cert, NID_subject_alt_name, 0, 0));
|
|
|
|
#ifndef TORRENT_DISABLE_LOGGING
|
|
std::string names;
|
|
bool match = false;
|
|
#endif
|
|
for (int i = 0; i < aux::openssl_num_general_names(gens); ++i)
|
|
{
|
|
GENERAL_NAME* gen = aux::openssl_general_name_value(gens, i);
|
|
if (gen->type != GEN_DNS) continue;
|
|
ASN1_IA5STRING* domain = gen->d.dNSName;
|
|
if (domain->type != V_ASN1_IA5STRING || !domain->data || !domain->length) continue;
|
|
const char* torrent_name = reinterpret_cast<const char*>(domain->data);
|
|
std::size_t name_length = domain->length;
|
|
|
|
#ifndef TORRENT_DISABLE_LOGGING
|
|
if (i > 1) names += " | n: ";
|
|
names.append(torrent_name, name_length);
|
|
#endif
|
|
if (strncmp(torrent_name, "*", name_length) == 0
|
|
|| strncmp(torrent_name, m_torrent_file->name().c_str(), name_length) == 0)
|
|
{
|
|
#ifndef TORRENT_DISABLE_LOGGING
|
|
match = true;
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(212)">../src/torrent.cpp:1984</a></td><td>instead of creating the picker up front here, maybe this whole section should move to need_picker()</td></tr><tr id="212" style="display: none;" colspan="3"><td colspan="3"><h2>instead of creating the picker up front here,
|
|
maybe this whole section should move to need_picker()</h2><h4>../src/torrent.cpp:1984</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> m_have_all = true;
|
|
m_ses.get_io_service().post(boost::bind(&torrent::files_checked, shared_from_this()));
|
|
m_resume_data.reset();
|
|
update_gauge();
|
|
update_state_list();
|
|
return;
|
|
}
|
|
|
|
set_state(torrent_status::checking_resume_data);
|
|
|
|
int num_pad_files = 0;
|
|
TORRENT_ASSERT(block_size() > 0);
|
|
file_storage const& fs = m_torrent_file->files();
|
|
for (int i = 0; i < fs.num_files(); ++i)
|
|
{
|
|
if (fs.pad_file_at(i)) ++num_pad_files;
|
|
|
|
if (!fs.pad_file_at(i) || fs.file_size(i) == 0) continue;
|
|
m_padding += boost::uint32_t(fs.file_size(i));
|
|
|
|
<div style="background: #ffff00" width="100%"> need_picker();
|
|
</div>
|
|
peer_request pr = m_torrent_file->map_file(i, 0, fs.file_size(i));
|
|
int off = pr.start & (block_size()-1);
|
|
if (off != 0) { pr.length -= block_size() - off; pr.start += block_size() - off; }
|
|
TORRENT_ASSERT((pr.start & (block_size()-1)) == 0);
|
|
|
|
int block = block_size();
|
|
int blocks_per_piece = m_torrent_file->piece_length() / block;
|
|
piece_block pb(pr.piece, pr.start / block);
|
|
for (; pr.length >= block; pr.length -= block, ++pb.block_index)
|
|
{
|
|
if (int(pb.block_index) == blocks_per_piece) { pb.block_index = 0; ++pb.piece_index; }
|
|
m_picker->mark_as_finished(pb, 0);
|
|
}
|
|
// ugly edge case where padfiles are not used they way they're
|
|
// supposed to be. i.e. added back-to back or at the end
|
|
if (int(pb.block_index) == blocks_per_piece) { pb.block_index = 0; ++pb.piece_index; }
|
|
if (pr.length > 0 && ((i+1 != fs.num_files() && fs.pad_file_at(i+1))
|
|
|| i + 1 == fs.num_files()))
|
|
{
|
|
m_picker->mark_as_finished(pb, 0);
|
|
}
|
|
}
|
|
|
|
if (m_padding > 0)
|
|
{
|
|
// if we marked an entire piece as finished, we actually
|
|
// need to consider it finished
|
|
|
|
std::vector<piece_picker::downloading_piece> dq
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(213)">../src/torrent.cpp:2058</a></td><td>this could be optimized by looking up which files are complete and just look at those</td></tr><tr id="213" style="display: none;" colspan="3"><td colspan="3"><h2>this could be optimized by looking up which files are
|
|
complete and just look at those</h2><h4>../src/torrent.cpp:2058</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> if (!need_loaded()) return;
|
|
|
|
if (num_pad_files > 0)
|
|
m_picker->set_num_pad_files(num_pad_files);
|
|
|
|
std::vector<std::string> links;
|
|
#ifndef TORRENT_DISABLE_MUTABLE_TORRENTS
|
|
if (!m_torrent_file->similar_torrents().empty()
|
|
|| !m_torrent_file->collections().empty())
|
|
{
|
|
resolve_links res(m_torrent_file);
|
|
|
|
std::vector<sha1_hash> s = m_torrent_file->similar_torrents();
|
|
for (std::vector<sha1_hash>::iterator i = s.begin(), end(s.end());
|
|
i != end; ++i)
|
|
{
|
|
boost::shared_ptr<torrent> t = m_ses.find_torrent(*i).lock();
|
|
if (!t) continue;
|
|
|
|
// Only attempt to reuse files from torrents that are seeding.
|
|
<div style="background: #ffff00" width="100%"> if (!t->is_seed()) continue;
|
|
</div>
|
|
res.match(t->get_torrent_copy(), t->save_path());
|
|
}
|
|
std::vector<std::string> c = m_torrent_file->collections();
|
|
for (std::vector<std::string>::iterator i = c.begin(), end(c.end());
|
|
i != end; ++i)
|
|
{
|
|
std::vector<boost::shared_ptr<torrent> > ts = m_ses.find_collection(*i);
|
|
|
|
for (std::vector<boost::shared_ptr<torrent> >::iterator k = ts.begin()
|
|
, end2(ts.end()); k != end2; ++k)
|
|
{
|
|
// Only attempt to reuse files from torrents that are seeding.
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(214)">../src/torrent.cpp:2074</a></td><td>this could be optimized by looking up which files are complete and just look at those</td></tr><tr id="214" style="display: none;" colspan="3"><td colspan="3"><h2>this could be optimized by looking up which files are
|
|
complete and just look at those</h2><h4>../src/torrent.cpp:2074</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> i != end; ++i)
|
|
{
|
|
boost::shared_ptr<torrent> t = m_ses.find_torrent(*i).lock();
|
|
if (!t) continue;
|
|
|
|
// Only attempt to reuse files from torrents that are seeding.
|
|
if (!t->is_seed()) continue;
|
|
|
|
res.match(t->get_torrent_copy(), t->save_path());
|
|
}
|
|
std::vector<std::string> c = m_torrent_file->collections();
|
|
for (std::vector<std::string>::iterator i = c.begin(), end(c.end());
|
|
i != end; ++i)
|
|
{
|
|
std::vector<boost::shared_ptr<torrent> > ts = m_ses.find_collection(*i);
|
|
|
|
for (std::vector<boost::shared_ptr<torrent> >::iterator k = ts.begin()
|
|
, end2(ts.end()); k != end2; ++k)
|
|
{
|
|
// Only attempt to reuse files from torrents that are seeding.
|
|
<div style="background: #ffff00" width="100%"> if (!(*k)->is_seed()) continue;
|
|
</div>
|
|
res.match((*k)->get_torrent_copy(), (*k)->save_path());
|
|
}
|
|
}
|
|
|
|
std::vector<resolve_links::link_t> const& l = res.get_links();
|
|
if (!l.empty())
|
|
{
|
|
for (std::vector<resolve_links::link_t>::const_iterator i = l.begin()
|
|
, end(l.end()); i != end; ++i)
|
|
{
|
|
if (!i->ti) continue;
|
|
|
|
torrent_info const& ti = *i->ti;
|
|
std::string const& save_path = i->save_path;
|
|
links.push_back(combine_path(save_path
|
|
, ti.files().file_path(i->file_idx)));
|
|
}
|
|
}
|
|
}
|
|
#endif // TORRENT_DISABLE_MUTABLE_TORRENTS
|
|
|
|
inc_refcount("check_fastresume");
|
|
// async_check_fastresume will gut links
|
|
m_ses.disk_thread().async_check_fastresume(
|
|
m_storage.get(), m_resume_data ? &m_resume_data->node : NULL
|
|
, links, boost::bind(&torrent::on_resume_data_checked
|
|
, shared_from_this(), _1));
|
|
#ifndef TORRENT_DISABLE_LOGGING
|
|
debug_log("init, async_check_fastresume");
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(215)">../src/torrent.cpp:2240</a></td><td>there may be peer extensions relying on the torrent extension still being alive. Only do this if there are no peers. And when the last peer is disconnected, if the torrent is unloaded, clear the extensions m_extensions.clear();</td></tr><tr id="215" style="display: none;" colspan="3"><td colspan="3"><h2>there may be peer extensions relying on the torrent extension
|
|
still being alive. Only do this if there are no peers. And when the last peer
|
|
is disconnected, if the torrent is unloaded, clear the extensions
|
|
m_extensions.clear();</h2><h4>../src/torrent.cpp:2240</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> // pinned torrents are not allowed to be swapped out
|
|
TORRENT_ASSERT(!m_pinned);
|
|
|
|
m_should_be_loaded = false;
|
|
|
|
// make sure it's not unloaded in the middle of some operation that uses it
|
|
if (m_refcount > 0) return;
|
|
|
|
// call on_unload() on extensions
|
|
#ifndef TORRENT_DISABLE_EXTENSIONS
|
|
for (extension_list_t::iterator i = m_extensions.begin()
|
|
, end(m_extensions.end()); i != end; ++i)
|
|
{
|
|
TORRENT_TRY {
|
|
(*i)->on_unload();
|
|
} TORRENT_CATCH (std::exception&) {}
|
|
}
|
|
|
|
// also remove extensions and re-instantiate them when the torrent is loaded again
|
|
// they end up using a significant amount of memory
|
|
<div style="background: #ffff00" width="100%">#endif
|
|
</div>
|
|
// someone else holds a reference to the torrent_info
|
|
// make the torrent release its reference to it,
|
|
// after making a copy and then unloading that version
|
|
// as soon as the user is done with its copy of torrent_info
|
|
// it will be freed, and we'll have the unloaded version left
|
|
if (!m_torrent_file.unique())
|
|
m_torrent_file = boost::make_shared<torrent_info>(*m_torrent_file);
|
|
|
|
m_torrent_file->unload();
|
|
inc_stats_counter(counters::num_loaded_torrents, -1);
|
|
|
|
m_storage.reset();
|
|
|
|
state_updated();
|
|
}
|
|
|
|
bt_peer_connection* torrent::find_introducer(tcp::endpoint const& ep) const
|
|
{
|
|
#ifndef TORRENT_DISABLE_EXTENSIONS
|
|
for (const_peer_iterator i = m_connections.begin(); i != m_connections.end(); ++i)
|
|
{
|
|
if ((*i)->type() != peer_connection::bittorrent_connection) continue;
|
|
bt_peer_connection* p = static_cast<bt_peer_connection*>(*i);
|
|
if (!p->supports_holepunch()) continue;
|
|
peer_plugin const* pp = p->find_plugin("ut_pex");
|
|
if (!pp) continue;
|
|
if (was_introduced_by(pp, ep)) return p;
|
|
}
|
|
#else
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(216)">../src/torrent.cpp:2932</a></td><td>this pattern is repeated in a few places. Factor this into a function and generalize the concept of a torrent having a dedicated listen port</td></tr><tr id="216" style="display: none;" colspan="3"><td colspan="3"><h2>this pattern is repeated in a few places. Factor this into
|
|
a function and generalize the concept of a torrent having a
|
|
dedicated listen port</h2><h4>../src/torrent.cpp:2932</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> // if the files haven't been checked yet, we're
|
|
// not ready for peers. Except, if we don't have metadata,
|
|
// we need peers to download from
|
|
if (!m_files_checked && valid_metadata()) return;
|
|
|
|
if (!m_announce_to_lsd) return;
|
|
|
|
// private torrents are never announced on LSD
|
|
if (m_torrent_file->is_valid() && m_torrent_file->priv()) return;
|
|
|
|
// i2p torrents are also never announced on LSD
|
|
// unless we allow mixed swarms
|
|
if (m_torrent_file->is_valid()
|
|
&& (torrent_file().is_i2p() && !settings().get_bool(settings_pack::allow_i2p_mixed)))
|
|
return;
|
|
|
|
if (is_paused()) return;
|
|
|
|
if (!m_ses.has_lsd()) return;
|
|
|
|
<div style="background: #ffff00" width="100%">#ifdef TORRENT_USE_OPENSSL
|
|
</div> int port = is_ssl_torrent() ? m_ses.ssl_listen_port() : m_ses.listen_port();
|
|
#else
|
|
int port = m_ses.listen_port();
|
|
#endif
|
|
|
|
// announce with the local discovery service
|
|
m_ses.announce_lsd(m_torrent_file->info_hash(), port
|
|
, settings().get_bool(settings_pack::broadcast_lsd) && m_lsd_seq == 0);
|
|
++m_lsd_seq;
|
|
}
|
|
|
|
#ifndef TORRENT_DISABLE_DHT
|
|
|
|
void torrent::dht_announce()
|
|
{
|
|
TORRENT_ASSERT(is_single_thread());
|
|
if (!m_ses.dht())
|
|
{
|
|
#ifndef TORRENT_DISABLE_LOGGING
|
|
debug_log("DHT: no dht initialized");
|
|
#endif
|
|
return;
|
|
}
|
|
if (!should_announce_dht())
|
|
{
|
|
#ifndef TORRENT_DISABLE_LOGGING
|
|
if (!m_ses.announce_dht())
|
|
debug_log("DHT: no listen sockets");
|
|
|
|
if (m_torrent_file->is_valid() && !m_files_checked)
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(217)">../src/torrent.cpp:3779</a></td><td>add one peer per IP the hostname resolves to</td></tr><tr id="217" style="display: none;" colspan="3"><td colspan="3"><h2>add one peer per IP the hostname resolves to</h2><h4>../src/torrent.cpp:3779</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;">#endif
|
|
|
|
void torrent::on_peer_name_lookup(error_code const& e
|
|
, std::vector<address> const& host_list, int port)
|
|
{
|
|
TORRENT_ASSERT(is_single_thread());
|
|
|
|
INVARIANT_CHECK;
|
|
|
|
#if defined TORRENT_ASIO_DEBUGGING
|
|
complete_async("torrent::on_peer_name_lookup");
|
|
#endif
|
|
|
|
#ifndef TORRENT_DISABLE_LOGGING
|
|
if (e)
|
|
debug_log("peer name lookup error: %s", e.message().c_str());
|
|
#endif
|
|
|
|
if (e || host_list.empty() || m_ses.is_aborted()) return;
|
|
|
|
<div style="background: #ffff00" width="100%"> tcp::endpoint host(host_list.front(), port);
|
|
</div>
|
|
if (m_ip_filter && m_ip_filter->access(host.address()) & ip_filter::blocked)
|
|
{
|
|
#ifndef TORRENT_DISABLE_LOGGING
|
|
error_code ec;
|
|
debug_log("blocked ip from tracker: %s", host.address().to_string(ec).c_str());
|
|
#endif
|
|
if (m_ses.alerts().should_post<peer_blocked_alert>())
|
|
m_ses.alerts().emplace_alert<peer_blocked_alert>(get_handle()
|
|
, host.address(), peer_blocked_alert::ip_filter);
|
|
return;
|
|
}
|
|
|
|
if (add_peer(host, peer_info::tracker))
|
|
state_updated();
|
|
update_want_peers();
|
|
}
|
|
|
|
boost::int64_t torrent::bytes_left() const
|
|
{
|
|
// if we don't have the metadata yet, we
|
|
// cannot tell how big the torrent is.
|
|
if (!valid_metadata()) return -1;
|
|
return m_torrent_file->total_size()
|
|
- quantized_bytes_done();
|
|
}
|
|
|
|
boost::int64_t torrent::quantized_bytes_done() const
|
|
{
|
|
// INVARIANT_CHECK;
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(218)">../src/torrent.cpp:4719</a></td><td>update suggest_piece?</td></tr><tr id="218" style="display: none;" colspan="3"><td colspan="3"><h2>update suggest_piece?</h2><h4>../src/torrent.cpp:4719</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> if (has_picker())
|
|
{
|
|
torrent_peer* pp = peer->peer_info_struct();
|
|
m_picker->inc_refcount_all(pp);
|
|
}
|
|
#ifdef TORRENT_DEBUG
|
|
else
|
|
{
|
|
TORRENT_ASSERT(is_seed() || !m_have_all);
|
|
}
|
|
#endif
|
|
}
|
|
|
|
void torrent::peer_lost(bitfield const& bits, peer_connection const* peer)
|
|
{
|
|
if (has_picker())
|
|
{
|
|
TORRENT_ASSERT(bits.size() == torrent_file().num_pieces());
|
|
torrent_peer* pp = peer->peer_info_struct();
|
|
m_picker->dec_refcount(bits, pp);
|
|
<div style="background: #ffff00" width="100%"> }
|
|
</div>#ifdef TORRENT_DEBUG
|
|
else
|
|
{
|
|
TORRENT_ASSERT(is_seed() || !m_have_all);
|
|
}
|
|
#endif
|
|
}
|
|
|
|
void torrent::peer_lost(int index, peer_connection const* peer)
|
|
{
|
|
if (m_picker.get())
|
|
{
|
|
torrent_peer* pp = peer->peer_info_struct();
|
|
m_picker->dec_refcount(index, pp);
|
|
update_suggest_piece(index, -1);
|
|
}
|
|
#ifdef TORRENT_DEBUG
|
|
else
|
|
{
|
|
TORRENT_ASSERT(is_seed() || !m_have_all);
|
|
}
|
|
#endif
|
|
}
|
|
|
|
void torrent::add_suggest_piece(int index)
|
|
{
|
|
// it would be nice if we would keep track of piece
|
|
// availability even when we're a seed, for
|
|
// the suggest piece feature
|
|
if (!has_picker()) return;
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(219)">../src/torrent.cpp:4863</a></td><td>really, we should just keep the picker around in this case to maintain the availability counters</td></tr><tr id="219" style="display: none;" colspan="3"><td colspan="3"><h2>really, we should just keep the picker around
|
|
in this case to maintain the availability counters</h2><h4>../src/torrent.cpp:4863</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> pieces.reserve(cs.pieces.size());
|
|
|
|
// sort in ascending order, to get most recently used first
|
|
std::sort(cs.pieces.begin(), cs.pieces.end()
|
|
, boost::bind(&cached_piece_info::last_use, _1)
|
|
> boost::bind(&cached_piece_info::last_use, _2));
|
|
|
|
for (std::vector<cached_piece_info>::iterator i = cs.pieces.begin()
|
|
, end(cs.pieces.end()); i != end; ++i)
|
|
{
|
|
TORRENT_ASSERT(i->storage == m_storage.get());
|
|
if (!has_piece_passed(i->piece)) continue;
|
|
suggest_piece_t p;
|
|
p.piece_index = i->piece;
|
|
if (has_picker())
|
|
{
|
|
p.num_peers = m_picker->get_availability(i->piece);
|
|
}
|
|
else
|
|
{
|
|
<div style="background: #ffff00" width="100%"> p.num_peers = 0;
|
|
</div> for (const_peer_iterator j = m_connections.begin()
|
|
, end2(m_connections.end()); j != end2; ++j)
|
|
{
|
|
peer_connection* peer = *j;
|
|
if (peer->has_piece(p.piece_index)) ++p.num_peers;
|
|
}
|
|
}
|
|
pieces.push_back(p);
|
|
}
|
|
|
|
// sort by rarity (stable, to maintain sort
|
|
// by last use)
|
|
std::stable_sort(pieces.begin(), pieces.end());
|
|
|
|
// only suggest half of the pieces
|
|
pieces.resize(pieces.size() / 2);
|
|
|
|
// send new suggests to peers
|
|
// the peers will filter out pieces we've
|
|
// already suggested to them
|
|
for (std::vector<suggest_piece_t>::iterator i = pieces.begin()
|
|
, end(pieces.end()); i != end; ++i)
|
|
{
|
|
for (peer_iterator p = m_connections.begin();
|
|
p != m_connections.end(); ++p)
|
|
(*p)->send_suggest(i->piece_index);
|
|
}
|
|
}
|
|
|
|
void torrent::abort()
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(220)">../src/torrent.cpp:6926</a></td><td>make this more generic to not just work if files have been renamed, but also if they have been merged into a single file for instance maybe use the same format as .torrent files and reuse some code from torrent_info The mapped_files needs to be read both in the network thread and in the disk thread, since they both have their own mapped files structures which are kept in sync</td></tr><tr id="220" style="display: none;" colspan="3"><td colspan="3"><h2>make this more generic to not just work if files have been
|
|
renamed, but also if they have been merged into a single file for instance
|
|
maybe use the same format as .torrent files and reuse some code from torrent_info
|
|
The mapped_files needs to be read both in the network thread
|
|
and in the disk thread, since they both have their own mapped files structures
|
|
which are kept in sync</h2><h4>../src/torrent.cpp:6926</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> {
|
|
m_save_path = p;
|
|
#ifndef TORRENT_DISABLE_LOGGING
|
|
debug_log("loaded resume data: save-path: %s", m_save_path.c_str());
|
|
#endif
|
|
}
|
|
}
|
|
|
|
m_url = rd.dict_find_string_value("url");
|
|
m_uuid = rd.dict_find_string_value("uuid");
|
|
m_source_feed_url = rd.dict_find_string_value("feed");
|
|
|
|
if (!m_uuid.empty() || !m_url.empty())
|
|
{
|
|
boost::shared_ptr<torrent> me(shared_from_this());
|
|
|
|
// insert this torrent in the uuid index
|
|
m_ses.insert_uuid_torrent(m_uuid.empty() ? m_url : m_uuid, me);
|
|
}
|
|
|
|
<div style="background: #ffff00" width="100%"> bdecode_node mapped_files = rd.dict_find_list("mapped_files");
|
|
</div> if (mapped_files && mapped_files.list_size() == m_torrent_file->num_files())
|
|
{
|
|
for (int i = 0; i < m_torrent_file->num_files(); ++i)
|
|
{
|
|
std::string new_filename = mapped_files.list_string_value_at(i);
|
|
if (new_filename.empty()) continue;
|
|
m_torrent_file->rename_file(i, new_filename);
|
|
}
|
|
}
|
|
|
|
m_added_time = rd.dict_find_int_value("added_time", m_added_time);
|
|
m_completed_time = rd.dict_find_int_value("completed_time", m_completed_time);
|
|
if (m_completed_time != 0 && m_completed_time < m_added_time)
|
|
m_completed_time = m_added_time;
|
|
|
|
// load file priorities except if the add_torrent_param file was set to
|
|
// override resume data
|
|
if (!m_override_resume_data || m_file_priority.empty())
|
|
{
|
|
bdecode_node file_priority = rd.dict_find_list("file_priority");
|
|
if (file_priority)
|
|
{
|
|
const int num_files = (std::min)(file_priority.list_size()
|
|
, m_torrent_file->num_files());
|
|
m_file_priority.resize(num_files, 4);
|
|
for (int i = 0; i < num_files; ++i)
|
|
{
|
|
m_file_priority[i] = file_priority.list_int_value_at(i, 1);
|
|
// this is suspicious, leave seed mode
|
|
if (m_file_priority[i] == 0) m_seed_mode = false;
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(221)">../src/torrent.cpp:7059</a></td><td>if this is a merkle torrent and we can't restore the tree, we need to wipe all the bits in the have array, but not necessarily we might want to do a full check to see if we have all the pieces. This is low priority since almost no one uses merkle torrents</td></tr><tr id="221" style="display: none;" colspan="3"><td colspan="3"><h2>if this is a merkle torrent and we can't
|
|
restore the tree, we need to wipe all the
|
|
bits in the have array, but not necessarily
|
|
we might want to do a full check to see if we have
|
|
all the pieces. This is low priority since almost
|
|
no one uses merkle torrents</h2><h4>../src/torrent.cpp:7059</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> add_web_seed(url, web_seed_entry::http_seed);
|
|
}
|
|
}
|
|
|
|
if (m_torrent_file->is_merkle_torrent())
|
|
{
|
|
bdecode_node mt = rd.dict_find_string("merkle tree");
|
|
if (mt)
|
|
{
|
|
std::vector<sha1_hash> tree;
|
|
tree.resize(m_torrent_file->merkle_tree().size());
|
|
std::memcpy(&tree[0], mt.string_ptr()
|
|
, (std::min)(mt.string_length(), int(tree.size()) * 20));
|
|
if (mt.string_length() < int(tree.size()) * 20)
|
|
std::memset(&tree[0] + mt.string_length() / 20, 0
|
|
, tree.size() - mt.string_length() / 20);
|
|
m_torrent_file->set_merkle_tree(tree);
|
|
}
|
|
else
|
|
{
|
|
<div style="background: #ffff00" width="100%"> TORRENT_ASSERT(false);
|
|
</div> }
|
|
}
|
|
|
|
// updating some of the torrent state may have set need_save_resume_data.
|
|
// clear it here since we've just restored the resume data we already
|
|
// have. Nothing has changed from that state yet.
|
|
m_need_save_resume_data = false;
|
|
|
|
if (m_seed_mode)
|
|
{
|
|
// some sanity checking. Maybe we shouldn't be in seed mode anymore
|
|
bdecode_node pieces = rd.dict_find("pieces");
|
|
if (pieces && pieces.type() == bdecode_node::string_t
|
|
&& int(pieces.string_length()) == m_torrent_file->num_pieces())
|
|
{
|
|
char const* pieces_str = pieces.string_ptr();
|
|
for (int i = 0, end(pieces.string_length()); i < end; ++i)
|
|
{
|
|
// being in seed mode and missing a piece is not compatible.
|
|
// Leave seed mode if that happens
|
|
if ((pieces_str[i] & 1)) continue;
|
|
m_seed_mode = false;
|
|
break;
|
|
}
|
|
}
|
|
|
|
bdecode_node piece_priority = rd.dict_find_string("piece_priority");
|
|
if (piece_priority && piece_priority.string_length()
|
|
== m_torrent_file->num_pieces())
|
|
{
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(222)">../src/torrent.cpp:7303</a></td><td>make this more generic to not just work if files have been renamed, but also if they have been merged into a single file for instance. using file_base</td></tr><tr id="222" style="display: none;" colspan="3"><td colspan="3"><h2>make this more generic to not just work if files have been
|
|
renamed, but also if they have been merged into a single file for instance.
|
|
using file_base</h2><h4>../src/torrent.cpp:7303</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> if (!has_picker())
|
|
{
|
|
std::memset(&pieces[0], m_have_all, pieces.size());
|
|
}
|
|
else if (has_picker())
|
|
{
|
|
for (int i = 0, end(pieces.size()); i < end; ++i)
|
|
pieces[i] = m_picker->have_piece(i) ? 1 : 0;
|
|
}
|
|
|
|
if (m_seed_mode)
|
|
{
|
|
TORRENT_ASSERT(m_verified.size() == pieces.size());
|
|
TORRENT_ASSERT(m_verifying.size() == pieces.size());
|
|
for (int i = 0, end(pieces.size()); i < end; ++i)
|
|
pieces[i] |= m_verified[i] ? 2 : 0;
|
|
}
|
|
}
|
|
|
|
// write renamed files
|
|
<div style="background: #ffff00" width="100%"> if (&m_torrent_file->files() != &m_torrent_file->orig_files()
|
|
</div> && m_torrent_file->files().num_files() == m_torrent_file->orig_files().num_files())
|
|
{
|
|
entry::list_type& fl = ret["mapped_files"].list();
|
|
file_storage const& fs = m_torrent_file->files();
|
|
for (int i = 0; i < fs.num_files(); ++i)
|
|
{
|
|
fl.push_back(fs.file_path(i));
|
|
}
|
|
}
|
|
|
|
// write local peers
|
|
|
|
std::back_insert_iterator<entry::string_type> peers(ret["peers"].string());
|
|
std::back_insert_iterator<entry::string_type> banned_peers(ret["banned_peers"].string());
|
|
#if TORRENT_USE_IPV6
|
|
std::back_insert_iterator<entry::string_type> peers6(ret["peers6"].string());
|
|
std::back_insert_iterator<entry::string_type> banned_peers6(ret["banned_peers6"].string());
|
|
#endif
|
|
|
|
int num_saved_peers = 0;
|
|
|
|
std::vector<torrent_peer const*> deferred_peers;
|
|
|
|
if (m_peer_list)
|
|
{
|
|
for (peer_list::const_iterator i = m_peer_list->begin_peer()
|
|
, end(m_peer_list->end_peer()); i != end; ++i)
|
|
{
|
|
error_code ec;
|
|
torrent_peer const* p = *i;
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(223)">../src/torrent.cpp:9559</a></td><td>add a flag to ignore stats, and only care about resume data for content. For unchanged files, don't trigger a load of the metadata just to save an empty resume data file</td></tr><tr id="223" style="display: none;" colspan="3"><td colspan="3"><h2>add a flag to ignore stats, and only care about resume data for
|
|
content. For unchanged files, don't trigger a load of the metadata
|
|
just to save an empty resume data file</h2><h4>../src/torrent.cpp:9559</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> if (m_complete != 0xffffff) seeds = m_complete;
|
|
else seeds = m_peer_list ? m_peer_list->num_seeds() : 0;
|
|
|
|
if (m_incomplete != 0xffffff) downloaders = m_incomplete;
|
|
else downloaders = m_peer_list ? m_peer_list->num_peers() - m_peer_list->num_seeds() : 0;
|
|
|
|
if (seeds == 0)
|
|
{
|
|
ret |= no_seeds;
|
|
ret |= downloaders & prio_mask;
|
|
}
|
|
else
|
|
{
|
|
ret |= ((1 + downloaders) * scale / seeds) & prio_mask;
|
|
}
|
|
|
|
return ret;
|
|
}
|
|
|
|
// this is an async operation triggered by the client
|
|
<div style="background: #ffff00" width="100%"> void torrent::save_resume_data(int flags)
|
|
</div> {
|
|
TORRENT_ASSERT(is_single_thread());
|
|
INVARIANT_CHECK;
|
|
|
|
if (!valid_metadata())
|
|
{
|
|
alerts().emplace_alert<save_resume_data_failed_alert>(get_handle()
|
|
, errors::no_metadata);
|
|
return;
|
|
}
|
|
|
|
if ((flags & torrent_handle::only_if_modified) && !m_need_save_resume_data)
|
|
{
|
|
alerts().emplace_alert<save_resume_data_failed_alert>(get_handle()
|
|
, errors::resume_data_not_modified);
|
|
return;
|
|
}
|
|
|
|
m_need_save_resume_data = false;
|
|
m_last_saved_resume = m_ses.session_time();
|
|
m_save_resume_flags = boost::uint8_t(flags);
|
|
state_updated();
|
|
|
|
if (m_state == torrent_status::checking_files
|
|
|| m_state == torrent_status::checking_resume_data)
|
|
{
|
|
if (!need_loaded())
|
|
{
|
|
alerts().emplace_alert<save_resume_data_failed_alert>(get_handle()
|
|
, m_error);
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(224)">../src/torrent.cpp:11186</a></td><td>instead of resorting the whole list, insert the peers directly into the right place</td></tr><tr id="224" style="display: none;" colspan="3"><td colspan="3"><h2>instead of resorting the whole list, insert the peers
|
|
directly into the right place</h2><h4>../src/torrent.cpp:11186</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> printf("timed out [average-piece-time: %d ms ]\n"
|
|
, m_average_piece_time);
|
|
#endif
|
|
}
|
|
|
|
// pick all blocks for this piece. the peers list is kept up to date
|
|
// and sorted. when we issue a request to a peer, its download queue
|
|
// time will increase and it may need to be bumped in the peers list,
|
|
// since it's ordered by download queue time
|
|
pick_time_critical_block(peers, ignore_peers
|
|
, peers_with_requests
|
|
, pi, &*i, m_picker.get()
|
|
, blocks_in_piece, timed_out);
|
|
|
|
// put back the peers we ignored into the peer list for the next piece
|
|
if (!ignore_peers.empty())
|
|
{
|
|
peers.insert(peers.begin(), ignore_peers.begin(), ignore_peers.end());
|
|
ignore_peers.clear();
|
|
|
|
<div style="background: #ffff00" width="100%"> std::sort(peers.begin(), peers.end()
|
|
</div> , boost::bind(&peer_connection::download_queue_time, _1, 16*1024)
|
|
< boost::bind(&peer_connection::download_queue_time, _2, 16*1024));
|
|
}
|
|
|
|
// if this peer's download time exceeds 2 seconds, we're done.
|
|
// We don't want to build unreasonably long request queues
|
|
if (!peers.empty() && peers[0]->download_queue_time() > milliseconds(2000))
|
|
break;
|
|
}
|
|
|
|
// commit all the time critical requests
|
|
for (std::set<peer_connection*>::iterator i = peers_with_requests.begin()
|
|
, end(peers_with_requests.end()); i != end; ++i)
|
|
{
|
|
(*i)->send_block_requests();
|
|
}
|
|
}
|
|
|
|
std::set<std::string> torrent::web_seeds(web_seed_entry::type_t type) const
|
|
{
|
|
TORRENT_ASSERT(is_single_thread());
|
|
std::set<std::string> ret;
|
|
for (std::list<web_seed_t>::const_iterator i = m_web_seeds.begin()
|
|
, end(m_web_seeds.end()); i != end; ++i)
|
|
{
|
|
if (i->peer_info.banned) continue;
|
|
if (i->removed) continue;
|
|
if (i->type != type) continue;
|
|
ret.insert(i->url);
|
|
}
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(225)">../src/torrent_peer.cpp:188</a></td><td>how do we deal with our external address changing?</td></tr><tr id="225" style="display: none;" colspan="3"><td colspan="3"><h2>how do we deal with our external address changing?</h2><h4>../src/torrent_peer.cpp:188</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> , is_v6_addr(false)
|
|
#endif
|
|
#if TORRENT_USE_I2P
|
|
, is_i2p_addr(false)
|
|
#endif
|
|
, on_parole(false)
|
|
, banned(false)
|
|
, supports_utp(true) // assume peers support utp
|
|
, confirmed_supports_utp(false)
|
|
, supports_holepunch(false)
|
|
, web_seed(false)
|
|
#if TORRENT_USE_ASSERTS
|
|
, in_use(false)
|
|
#endif
|
|
{
|
|
TORRENT_ASSERT((src & 0xff) == src);
|
|
}
|
|
|
|
boost::uint32_t torrent_peer::rank(external_ip const& external, int external_port) const
|
|
{
|
|
<div style="background: #ffff00" width="100%"> if (peer_rank == 0)
|
|
</div> peer_rank = peer_priority(
|
|
tcp::endpoint(external.external_address(this->address()), external_port)
|
|
, tcp::endpoint(this->address(), this->port));
|
|
return peer_rank;
|
|
}
|
|
|
|
#ifndef TORRENT_DISABLE_LOGGING
|
|
std::string torrent_peer::to_string() const
|
|
{
|
|
#if TORRENT_USE_I2P
|
|
if (is_i2p_addr) return dest();
|
|
#endif // TORRENT_USE_I2P
|
|
error_code ec;
|
|
return address().to_string(ec);
|
|
}
|
|
#endif
|
|
|
|
boost::uint64_t torrent_peer::total_download() const
|
|
{
|
|
if (connection != 0)
|
|
{
|
|
TORRENT_ASSERT(prev_amount_download == 0);
|
|
return connection->statistics().total_payload_download();
|
|
}
|
|
else
|
|
{
|
|
return boost::uint64_t(prev_amount_download) << 10;
|
|
}
|
|
}
|
|
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(226)">../src/udp_socket.cpp:320</a></td><td>it would be nice to detect this on posix systems also</td></tr><tr id="226" style="display: none;" colspan="3"><td colspan="3"><h2>it would be nice to detect this on posix systems also</h2><h4>../src/udp_socket.cpp:320</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> else
|
|
#endif
|
|
{
|
|
if (m_restart_v4) {
|
|
--m_restart_v4;
|
|
setup_read(s);
|
|
}
|
|
}
|
|
return;
|
|
}
|
|
if (m_abort) return;
|
|
|
|
CHECK_MAGIC;
|
|
|
|
for (;;)
|
|
{
|
|
error_code err;
|
|
udp::endpoint ep;
|
|
size_t bytes_transferred = s->receive_from(boost::asio::buffer(m_buf, m_buf_size), ep, 0, err);
|
|
|
|
<div style="background: #ffff00" width="100%">#ifdef TORRENT_WINDOWS
|
|
</div> if ((err == error_code(ERROR_MORE_DATA, system_category())
|
|
|| err == error_code(WSAEMSGSIZE, system_category()))
|
|
&& m_buf_size < 65536)
|
|
{
|
|
// if this function fails to allocate memory, m_buf_size
|
|
// is set to 0. In that case, don't issue the async_read().
|
|
set_buf_size(m_buf_size * 2);
|
|
if (m_buf_size == 0) return;
|
|
continue;
|
|
}
|
|
#endif
|
|
|
|
if (err == boost::asio::error::would_block || err == boost::asio::error::try_again) break;
|
|
on_read_impl(ep, err, bytes_transferred);
|
|
|
|
// found on iOS, socket will be disconnected when app goes backgroud. try to reopen it.
|
|
if (err == boost::asio::error::not_connected || err == boost::asio::error::bad_descriptor)
|
|
{
|
|
ep = s->local_endpoint(err);
|
|
if (!err) {
|
|
bind(ep, err);
|
|
}
|
|
return;
|
|
}
|
|
}
|
|
call_drained_handler();
|
|
setup_read(s);
|
|
}
|
|
|
|
void udp_socket::call_handler(error_code const& ec, udp::endpoint const& ep, char const* buf, int size)
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(227)">../src/udp_socket.cpp:869</a></td><td>use the system resolver_interface here</td></tr><tr id="227" style="display: none;" colspan="3"><td colspan="3"><h2>use the system resolver_interface here</h2><h4>../src/udp_socket.cpp:869</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;">
|
|
void udp_socket::set_proxy_settings(aux::proxy_settings const& ps)
|
|
{
|
|
CHECK_MAGIC;
|
|
TORRENT_ASSERT(is_single_thread());
|
|
|
|
error_code ec;
|
|
m_socks5_sock.close(ec);
|
|
m_tunnel_packets = false;
|
|
|
|
m_proxy_settings = ps;
|
|
|
|
if (m_abort) return;
|
|
|
|
if (ps.type == settings_pack::socks5
|
|
|| ps.type == settings_pack::socks5_pw)
|
|
{
|
|
m_queue_packets = true;
|
|
// connect to socks5 server and open up the UDP tunnel
|
|
|
|
<div style="background: #ffff00" width="100%"> tcp::resolver::query q(ps.hostname, to_string(ps.port).elems);
|
|
</div> ++m_outstanding_ops;
|
|
#if TORRENT_USE_ASSERTS
|
|
++m_outstanding_resolve;
|
|
#endif
|
|
#if defined TORRENT_ASIO_DEBUGGING
|
|
add_outstanding_async("udp_socket::on_name_lookup");
|
|
#endif
|
|
m_resolver.async_resolve(q, boost::bind(
|
|
&udp_socket::on_name_lookup, this, _1, _2));
|
|
}
|
|
}
|
|
|
|
void udp_socket::on_name_lookup(error_code const& e, tcp::resolver::iterator i)
|
|
{
|
|
#if defined TORRENT_ASIO_DEBUGGING
|
|
complete_async("udp_socket::on_name_lookup");
|
|
#endif
|
|
#if TORRENT_USE_ASSERTS
|
|
TORRENT_ASSERT(m_outstanding_resolve > 0);
|
|
--m_outstanding_resolve;
|
|
#endif
|
|
|
|
TORRENT_ASSERT(m_outstanding_ops > 0);
|
|
--m_outstanding_ops;
|
|
TORRENT_ASSERT(m_outstanding_ops == m_outstanding_connect
|
|
+ m_outstanding_timeout
|
|
+ m_outstanding_resolve
|
|
+ m_outstanding_socks);
|
|
|
|
if (m_abort) return;
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(228)">../src/ut_metadata.cpp:320</a></td><td>we really need to increment the refcounter on the torrent while this buffer is still in the peer's send buffer</td></tr><tr id="228" style="display: none;" colspan="3"><td colspan="3"><h2>we really need to increment the refcounter on the torrent
|
|
while this buffer is still in the peer's send buffer</h2><h4>../src/ut_metadata.cpp:320</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> if (!m_tp.need_loaded()) return;
|
|
metadata = m_tp.metadata().begin + offset;
|
|
metadata_piece_size = (std::min)(
|
|
int(m_tp.get_metadata_size() - offset), 16 * 1024);
|
|
TORRENT_ASSERT(metadata_piece_size > 0);
|
|
TORRENT_ASSERT(offset >= 0);
|
|
TORRENT_ASSERT(offset + metadata_piece_size <= int(m_tp.get_metadata_size()));
|
|
}
|
|
|
|
char msg[200];
|
|
char* header = msg;
|
|
char* p = &msg[6];
|
|
int len = bencode(p, e);
|
|
int total_size = 2 + len + metadata_piece_size;
|
|
namespace io = detail;
|
|
io::write_uint32(total_size, header);
|
|
io::write_uint8(bt_peer_connection::msg_extended, header);
|
|
io::write_uint8(m_message_index, header);
|
|
|
|
m_pc.send_buffer(msg, len + 6);
|
|
<div style="background: #ffff00" width="100%"> if (metadata_piece_size) m_pc.append_const_send_buffer(
|
|
</div> metadata, metadata_piece_size);
|
|
|
|
m_pc.stats_counters().inc_stats_counter(counters::num_outgoing_extended);
|
|
m_pc.stats_counters().inc_stats_counter(counters::num_outgoing_metadata);
|
|
}
|
|
|
|
virtual bool on_extended(int length
|
|
, int extended_msg, buffer::const_interval body) TORRENT_OVERRIDE
|
|
{
|
|
if (extended_msg != 2) return false;
|
|
if (m_message_index == 0) return false;
|
|
|
|
if (length > 17 * 1024)
|
|
{
|
|
#ifndef TORRENT_DISABLE_LOGGING
|
|
m_pc.peer_log(peer_log_alert::incoming_message, "UT_METADATA"
|
|
, "packet too big %d", length);
|
|
#endif
|
|
m_pc.disconnect(errors::invalid_metadata_message, op_bittorrent, 2);
|
|
return true;
|
|
}
|
|
|
|
if (!m_pc.packet_finished()) return true;
|
|
|
|
int len;
|
|
entry msg = bdecode(body.begin, body.end, len);
|
|
if (msg.type() != entry::dictionary_t)
|
|
{
|
|
#ifndef TORRENT_DISABLE_LOGGING
|
|
m_pc.peer_log(peer_log_alert::incoming_message, "UT_METADATA"
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(229)">../src/utp_stream.cpp:1761</a></td><td>this loop is not very efficient. It could be fixed by having a separate list of sequence numbers that need resending</td></tr><tr id="229" style="display: none;" colspan="3"><td colspan="3"><h2>this loop is not very efficient. It could be fixed by having
|
|
a separate list of sequence numbers that need resending</h2><h4>../src/utp_stream.cpp:1761</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;">};
|
|
|
|
// sends a packet, pulls data from the write buffer (if there's any)
|
|
// if ack is true, we need to send a packet regardless of if there's
|
|
// any data. Returns true if we could send more data (i.e. call
|
|
// send_pkt() again)
|
|
// returns true if there is more space for payload in our
|
|
// congestion window, false if there is no more space.
|
|
bool utp_socket_impl::send_pkt(int flags)
|
|
{
|
|
#ifdef TORRENT_EXPENSIVE_INVARIANT_CHECKS
|
|
INVARIANT_CHECK;
|
|
#endif
|
|
|
|
bool force = (flags & pkt_ack) || (flags & pkt_fin);
|
|
|
|
// TORRENT_ASSERT(m_state != UTP_STATE_FIN_SENT || (flags & pkt_ack));
|
|
|
|
// first see if we need to resend any packets
|
|
|
|
<div style="background: #ffff00" width="100%"> for (int i = (m_acked_seq_nr + 1) & ACK_MASK; i != m_seq_nr; i = (i + 1) & ACK_MASK)
|
|
</div> {
|
|
packet* p = m_outbuf.at(i);
|
|
if (!p) continue;
|
|
if (!p->need_resend) continue;
|
|
if (!resend_packet(p))
|
|
{
|
|
// we couldn't resend the packet. It probably doesn't
|
|
// fit in our cwnd. If force is set, we need to continue
|
|
// to send our packet anyway, if we don't have force set,
|
|
// we might as well return
|
|
if (!force) return false;
|
|
// resend_packet might have failed
|
|
if (m_state == UTP_STATE_ERROR_WAIT || m_state == UTP_STATE_DELETE) return false;
|
|
break;
|
|
}
|
|
|
|
// don't fast-resend this packet
|
|
if (m_fast_resend_seq_nr == i)
|
|
m_fast_resend_seq_nr = (m_fast_resend_seq_nr + 1) & ACK_MASK;
|
|
}
|
|
|
|
int sack = 0;
|
|
if (m_inbuf.size())
|
|
{
|
|
// the SACK bitfield should ideally fit all
|
|
// the pieces we have successfully received
|
|
sack = (m_inbuf.span() + 7) / 8;
|
|
if (sack > 32) sack = 32;
|
|
}
|
|
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(230)">../src/web_connection_base.cpp:81</a></td><td>introduce a web-seed default class which has a low download priority</td></tr><tr id="230" style="display: none;" colspan="3"><td colspan="3"><h2>introduce a web-seed default class which has a low download priority</h2><h4>../src/web_connection_base.cpp:81</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> peer_connection_args const& pack
|
|
, web_seed_t& web)
|
|
: peer_connection(pack)
|
|
, m_first_request(true)
|
|
, m_ssl(false)
|
|
, m_external_auth(web.auth)
|
|
, m_extra_headers(web.extra_headers)
|
|
, m_parser(http_parser::dont_parse_chunks)
|
|
, m_body_start(0)
|
|
{
|
|
TORRENT_ASSERT(&web.peer_info == pack.peerinfo);
|
|
// when going through a proxy, we don't necessarily have an endpoint here,
|
|
// since the proxy might be resolving the hostname, not us
|
|
TORRENT_ASSERT(web.endpoints.empty() || web.endpoints.front() == pack.endp);
|
|
|
|
INVARIANT_CHECK;
|
|
|
|
TORRENT_ASSERT(is_outgoing());
|
|
|
|
// we only want left-over bandwidth
|
|
<div style="background: #ffff00" width="100%">
|
|
</div> std::string protocol;
|
|
error_code ec;
|
|
boost::tie(protocol, m_basic_auth, m_host, m_port, m_path)
|
|
= parse_url_components(web.url, ec);
|
|
TORRENT_ASSERT(!ec);
|
|
|
|
if (m_port == -1 && protocol == "http")
|
|
m_port = 80;
|
|
|
|
#ifdef TORRENT_USE_OPENSSL
|
|
if (protocol == "https")
|
|
{
|
|
m_ssl = true;
|
|
if (m_port == -1) m_port = 443;
|
|
}
|
|
#endif
|
|
|
|
if (!m_basic_auth.empty())
|
|
m_basic_auth = base64encode(m_basic_auth);
|
|
|
|
m_server_string = "URL seed @ ";
|
|
m_server_string += m_host;
|
|
}
|
|
|
|
int web_connection_base::timeout() const
|
|
{
|
|
// since this is a web seed, change the timeout
|
|
// according to the settings.
|
|
return m_settings.get_int(settings_pack::urlseed_timeout);
|
|
}
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(231)">../src/kademlia/dht_storage.cpp:426</a></td><td>c++11 use a lambda here instead</td></tr><tr id="231" style="display: none;" colspan="3"><td colspan="3"><h2>c++11 use a lambda here instead</h2><h4>../src/kademlia/dht_storage.cpp:426</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> return true;
|
|
}
|
|
|
|
void put_mutable_item(sha1_hash const& target
|
|
, char const* buf, int size
|
|
, char const* sig
|
|
, boost::int64_t seq
|
|
, char const* pk
|
|
, char const* salt, int salt_size
|
|
, address const& addr) TORRENT_OVERRIDE
|
|
{
|
|
dht_mutable_table_t::iterator i = m_mutable_table.find(target);
|
|
if (i == m_mutable_table.end())
|
|
{
|
|
// this is the case where we don't have an item in this slot
|
|
// make sure we don't add too many items
|
|
if (int(m_mutable_table.size()) >= m_settings.max_dht_items)
|
|
{
|
|
// delete the least important one (i.e. the one
|
|
// the fewest peers are announcing)
|
|
<div style="background: #ffff00" width="100%"> dht_mutable_table_t::iterator j = std::min_element(m_mutable_table.begin()
|
|
</div> , m_mutable_table.end()
|
|
, boost::bind(&dht_immutable_item::num_announcers
|
|
, boost::bind(&dht_mutable_table_t::value_type::second, _1))
|
|
< boost::bind(&dht_immutable_item::num_announcers
|
|
, boost::bind(&dht_mutable_table_t::value_type::second, _2)));
|
|
TORRENT_ASSERT(j != m_mutable_table.end());
|
|
free(j->second.value);
|
|
free(j->second.salt);
|
|
m_mutable_table.erase(j);
|
|
m_counters.mutable_data -= 1;
|
|
}
|
|
dht_mutable_item to_add;
|
|
to_add.value = static_cast<char*>(malloc(size));
|
|
to_add.size = size;
|
|
to_add.seq = seq;
|
|
to_add.salt = NULL;
|
|
to_add.salt_size = 0;
|
|
if (salt_size > 0)
|
|
{
|
|
to_add.salt = static_cast<char*>(malloc(salt_size));
|
|
to_add.salt_size = salt_size;
|
|
memcpy(to_add.salt, salt, salt_size);
|
|
}
|
|
memcpy(to_add.sig, sig, sizeof(to_add.sig));
|
|
memcpy(to_add.value, buf, size);
|
|
memcpy(&to_add.key, pk, sizeof(to_add.key));
|
|
|
|
boost::tie(i, boost::tuples::ignore) = m_mutable_table.insert(
|
|
std::make_pair(target, to_add));
|
|
m_counters.mutable_data += 1;
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(232)">../src/kademlia/node.cpp:721</a></td><td>in the future, this function should update all the dht related counter. For now, it just update the storage related ones.</td></tr><tr id="232" style="display: none;" colspan="3"><td colspan="3"><h2>in the future, this function should update all the
|
|
dht related counter. For now, it just update the storage
|
|
related ones.</h2><h4>../src/kademlia/node.cpp:721</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;">
|
|
return d;
|
|
}
|
|
|
|
void node::status(std::vector<dht_routing_bucket>& table
|
|
, std::vector<dht_lookup>& requests)
|
|
{
|
|
mutex_t::scoped_lock l(m_mutex);
|
|
|
|
m_table.status(table);
|
|
|
|
for (std::set<traversal_algorithm*>::iterator i = m_running_requests.begin()
|
|
, end(m_running_requests.end()); i != end; ++i)
|
|
{
|
|
requests.push_back(dht_lookup());
|
|
dht_lookup& lookup = requests.back();
|
|
(*i)->status(lookup);
|
|
}
|
|
}
|
|
|
|
<div style="background: #ffff00" width="100%">void node::update_stats_counters(counters& c) const
|
|
</div>{
|
|
const dht_storage_counters& dht_cnt = m_storage->counters();
|
|
c.set_value(counters::dht_torrents, dht_cnt.torrents);
|
|
c.set_value(counters::dht_peers, dht_cnt.peers);
|
|
c.set_value(counters::dht_immutable_data, dht_cnt.immutable_data);
|
|
c.set_value(counters::dht_mutable_data, dht_cnt.mutable_data);
|
|
}
|
|
|
|
#ifndef TORRENT_NO_DEPRECATE
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(233)">../src/kademlia/put_data.cpp:97</a></td><td>what if o is not an isntance of put_data_observer? This need to be redesigned for better type saftey.</td></tr><tr id="233" style="display: none;" colspan="3"><td colspan="3"><h2>what if o is not an isntance of put_data_observer? This need to be
|
|
redesigned for better type saftey.</h2><h4>../src/kademlia/put_data.cpp:97</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;">{
|
|
m_done = true;
|
|
|
|
#ifndef TORRENT_DISABLE_LOGGING
|
|
get_node().observer()->log(dht_logger::traversal, "[%p] %s DONE, response %d, timeout %d"
|
|
, static_cast<void*>(this), name(), m_responses, m_timeouts);
|
|
#endif
|
|
|
|
m_put_callback(m_data, m_responses);
|
|
traversal_algorithm::done();
|
|
}
|
|
|
|
bool put_data::invoke(observer_ptr o)
|
|
{
|
|
if (m_done)
|
|
{
|
|
m_invoke_count = -1;
|
|
return false;
|
|
}
|
|
|
|
<div style="background: #ffff00" width="100%"> put_data_observer* po = static_cast<put_data_observer*>(o.get());
|
|
</div>
|
|
entry e;
|
|
e["y"] = "q";
|
|
e["q"] = "put";
|
|
entry& a = e["a"];
|
|
a["v"] = m_data.value();
|
|
a["token"] = po->m_token;
|
|
if (m_data.is_mutable())
|
|
{
|
|
a["k"] = std::string(m_data.pk().data(), item_pk_len);
|
|
a["seq"] = m_data.seq();
|
|
a["sig"] = std::string(m_data.sig().data(), item_sig_len);
|
|
if (!m_data.salt().empty())
|
|
{
|
|
a["salt"] = m_data.salt();
|
|
}
|
|
}
|
|
|
|
return m_node.m_rpc.invoke(e, o->target_ep(), o);
|
|
}
|
|
|
|
} } // namespace libtorrent::dht
|
|
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(234)">../include/libtorrent/announce_entry.hpp:97</a></td><td>include the number of peers received from this tracker, at last announce</td></tr><tr id="234" style="display: none;" colspan="3"><td colspan="3"><h2>include the number of peers received from this tracker, at last
|
|
announce</h2><h4>../include/libtorrent/announce_entry.hpp:97</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;">
|
|
// if this tracker failed the last time it was contacted
|
|
// this error code specifies what error occurred
|
|
error_code last_error;
|
|
|
|
// returns the number of seconds to the next announce on this tracker.
|
|
// ``min_announce_in()`` returns the number of seconds until we are
|
|
// allowed to force another tracker update with this tracker.
|
|
//
|
|
// If the last time this tracker was contacted failed, ``last_error`` is
|
|
// the error code describing what error occurred.
|
|
int next_announce_in() const;
|
|
int min_announce_in() const;
|
|
|
|
// the time of next tracker announce
|
|
time_point next_announce;
|
|
|
|
// no announces before this time
|
|
time_point min_announce;
|
|
|
|
<div style="background: #ffff00" width="100%">
|
|
</div> // these are either -1 or the scrape information this tracker last
|
|
// responded with. *incomplete* is the current number of downloaders in
|
|
// the swarm, *complete* is the current number of seeds in the swarm and
|
|
// *downloaded* is the cumulative number of completed downloads of this
|
|
// torrent, since the beginning of time (from this tracker's point of
|
|
// view).
|
|
|
|
// if this tracker has returned scrape data, these fields are filled in
|
|
// with valid numbers. Otherwise they are set to -1. the number of
|
|
// current downloaders
|
|
int scrape_incomplete;
|
|
int scrape_complete;
|
|
int scrape_downloaded;
|
|
|
|
// the tier this tracker belongs to
|
|
boost::uint8_t tier;
|
|
|
|
// the max number of failures to announce to this tracker in
|
|
// a row, before this tracker is not used anymore. 0 means unlimited
|
|
boost::uint8_t fail_limit;
|
|
|
|
// the number of times in a row we have failed to announce to this
|
|
// tracker.
|
|
boost::uint8_t fails:7;
|
|
|
|
// true while we're waiting for a response from the tracker.
|
|
bool updating:1;
|
|
|
|
// flags for the source bitmask, each indicating where
|
|
// we heard about this tracker
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(235)">../include/libtorrent/block_cache.hpp:223</a></td><td>make this 32 bits and to count seconds since the block cache was created</td></tr><tr id="235" style="display: none;" colspan="3"><td colspan="3"><h2>make this 32 bits and to count seconds since the block cache was created</h2><h4>../include/libtorrent/block_cache.hpp:223</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;">
|
|
bool operator==(cached_piece_entry const& rhs) const
|
|
{ return storage.get() == rhs.storage.get() && piece == rhs.piece; }
|
|
|
|
// if this is set, we'll be calculating the hash
|
|
// for this piece. This member stores the interim
|
|
// state while we're calculating the hash.
|
|
partial_hash* hash;
|
|
|
|
// set to a unique identifier of a peer that last
|
|
// requested from this piece.
|
|
void* last_requester;
|
|
|
|
// the pointers to the block data. If this is a ghost
|
|
// cache entry, there won't be any data here
|
|
boost::shared_array<cached_block_entry> blocks;
|
|
|
|
// the last time a block was written to this piece
|
|
// plus the minimum amount of time the block is guaranteed
|
|
// to stay in the cache
|
|
<div style="background: #ffff00" width="100%"> time_point expire;
|
|
</div>
|
|
boost::uint64_t piece:22;
|
|
|
|
// the number of dirty blocks in this piece
|
|
boost::uint64_t num_dirty:14;
|
|
|
|
// the number of blocks in the cache for this piece
|
|
boost::uint64_t num_blocks:14;
|
|
|
|
// the total number of blocks in this piece (and the number
|
|
// of elements in the blocks array)
|
|
boost::uint64_t blocks_in_piece:14;
|
|
|
|
// ---- 64 bit boundary ----
|
|
|
|
// while we have an outstanding async hash operation
|
|
// working on this piece, 'hashing' is set to 1
|
|
// When the operation returns, this is set to 0.
|
|
boost::uint32_t hashing:1;
|
|
|
|
// if we've completed at least one hash job on this
|
|
// piece, and returned it. This is set to one
|
|
boost::uint32_t hashing_done:1;
|
|
|
|
// if this is true, whenever refcount hits 0,
|
|
// this piece should be deleted
|
|
boost::uint32_t marked_for_deletion:1;
|
|
|
|
// this is set to true once we flush blocks past
|
|
// the hash cursor. Once this happens, there's
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(236)">../include/libtorrent/config.hpp:362</a></td><td>Make this count Unicode characters instead of bytes on windows</td></tr><tr id="236" style="display: none;" colspan="3"><td colspan="3"><h2>Make this count Unicode characters instead of bytes on windows</h2><h4>../include/libtorrent/config.hpp:362</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;">#pragma message ( "unknown OS, assuming BSD" )
|
|
#else
|
|
#warning "unknown OS, assuming BSD"
|
|
#endif
|
|
|
|
#define TORRENT_BSD
|
|
#endif
|
|
|
|
#if defined __GNUC__ && !(defined TORRENT_USE_OSATOMIC \
|
|
|| defined TORRENT_USE_INTERLOCKED_ATOMIC \
|
|
|| defined TORRENT_USE_BEOS_ATOMIC \
|
|
|| defined TORRENT_USE_SOLARIS_ATOMIC)
|
|
// atomic operations in GCC were introduced in 4.1.1
|
|
# if (__GNUC__ >= 4 && __GNUC_MINOR__ >= 1 && __GNUC_PATCHLEVEL__ >= 1) || __GNUC__ > 4
|
|
# define TORRENT_USE_GCC_ATOMIC 1
|
|
# endif
|
|
#endif
|
|
|
|
// on windows, NAME_MAX refers to Unicode characters
|
|
// on linux it refers to bytes (utf-8 encoded)
|
|
<div style="background: #ffff00" width="100%">
|
|
</div>// windows
|
|
#if defined FILENAME_MAX
|
|
#define TORRENT_MAX_PATH FILENAME_MAX
|
|
|
|
// beos
|
|
#elif defined B_PATH_NAME_LENGTH
|
|
#define TORRENT_MAX_PATH B_PATH_NAME_LENGTH
|
|
|
|
// solaris
|
|
#elif defined MAXPATH
|
|
#define TORRENT_MAX_PATH MAXPATH
|
|
|
|
// none of the above
|
|
#else
|
|
// this is the maximum number of characters in a
|
|
// path element / filename on windows and also on many filesystems commonly used
|
|
// on linux
|
|
#define TORRENT_MAX_PATH 255
|
|
|
|
#ifdef _MSC_VER
|
|
#pragma message ( "unknown platform, assuming the longest path is 255" )
|
|
#else
|
|
#warning "unknown platform, assuming the longest path is 255"
|
|
#endif
|
|
|
|
#endif
|
|
|
|
#define TORRENT_UNUSED(x) (void)(x)
|
|
|
|
#if (defined _MSC_VER && _MSC_VER < 1900) && !defined TORRENT_MINGW
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(237)">../include/libtorrent/disk_buffer_pool.hpp:137</a></td><td>try to remove the observers, only using the async_allocate handlers</td></tr><tr id="237" style="display: none;" colspan="3"><td colspan="3"><h2>try to remove the observers, only using the async_allocate handlers</h2><h4>../include/libtorrent/disk_buffer_pool.hpp:137</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;">
|
|
// number of bytes per block. The BitTorrent
|
|
// protocol defines the block size to 16 KiB.
|
|
const int m_block_size;
|
|
|
|
// number of disk buffers currently allocated
|
|
int m_in_use;
|
|
|
|
// cache size limit
|
|
int m_max_use;
|
|
|
|
// if we have exceeded the limit, we won't start
|
|
// allowing allocations again until we drop below
|
|
// this low watermark
|
|
int m_low_watermark;
|
|
|
|
// if we exceed the max number of buffers, we start
|
|
// adding up callbacks to this queue. Once the number
|
|
// of buffers in use drops below the low watermark,
|
|
// we start calling these functions back
|
|
<div style="background: #ffff00" width="100%"> std::vector<boost::shared_ptr<disk_observer> > m_observers;
|
|
</div>
|
|
// these handlers are executed when a new buffer is available
|
|
std::vector<handler_t> m_handlers;
|
|
|
|
// callback used to tell the cache it needs to free up some blocks
|
|
boost::function<void()> m_trigger_cache_trim;
|
|
|
|
// set to true to throttle more allocations
|
|
bool m_exceeded_max_size;
|
|
|
|
// this is the main thread io_service. Callbacks are
|
|
// posted on this in order to have them execute in
|
|
// the main thread.
|
|
io_service& m_ios;
|
|
|
|
private:
|
|
|
|
void check_buffer_level(mutex::scoped_lock& l);
|
|
|
|
mutable mutex m_pool_mutex;
|
|
|
|
int m_cache_buffer_chunk_size;
|
|
bool m_lock_disk_cache;
|
|
|
|
#if TORRENT_HAVE_MMAP
|
|
// the file descriptor of the cache mmap file
|
|
int m_cache_fd;
|
|
// the pointer to the block of virtual address space
|
|
// making up the mmapped cache space
|
|
char* m_cache_pool;
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(238)">../include/libtorrent/file.hpp:173</a></td><td>move this into a separate header file, TU pair</td></tr><tr id="238" style="display: none;" colspan="3"><td colspan="3"><h2>move this into a separate header file, TU pair</h2><h4>../include/libtorrent/file.hpp:173</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> TORRENT_EXTRA_EXPORT std::string parent_path(std::string const& f);
|
|
TORRENT_EXTRA_EXPORT bool has_parent_path(std::string const& f);
|
|
TORRENT_EXTRA_EXPORT char const* filename_cstr(char const* f);
|
|
|
|
// internal used by create_torrent.hpp
|
|
TORRENT_EXTRA_EXPORT std::string filename(std::string const& f);
|
|
TORRENT_EXTRA_EXPORT std::string combine_path(std::string const& lhs
|
|
, std::string const& rhs);
|
|
TORRENT_EXTRA_EXPORT void append_path(std::string& branch
|
|
, std::string const& leaf);
|
|
TORRENT_EXTRA_EXPORT void append_path(std::string& branch
|
|
, char const* str, int len);
|
|
// internal used by create_torrent.hpp
|
|
TORRENT_EXTRA_EXPORT std::string complete(std::string const& f);
|
|
TORRENT_EXTRA_EXPORT bool is_complete(std::string const& f);
|
|
TORRENT_EXTRA_EXPORT std::string current_working_directory();
|
|
#if TORRENT_USE_UNC_PATHS
|
|
TORRENT_EXTRA_EXPORT std::string canonicalize_path(std::string const& f);
|
|
#endif
|
|
|
|
<div style="background: #ffff00" width="100%"> class TORRENT_EXTRA_EXPORT directory : public boost::noncopyable
|
|
</div> {
|
|
public:
|
|
directory(std::string const& path, error_code& ec);
|
|
~directory();
|
|
void next(error_code& ec);
|
|
std::string file() const;
|
|
boost::uint64_t inode() const;
|
|
bool done() const { return m_done; }
|
|
private:
|
|
#ifdef TORRENT_WINDOWS
|
|
HANDLE m_handle;
|
|
int m_inode;
|
|
#if TORRENT_USE_WSTRING
|
|
WIN32_FIND_DATAW m_fd;
|
|
#else
|
|
WIN32_FIND_DATAA m_fd;
|
|
#endif
|
|
#else
|
|
DIR* m_handle;
|
|
// the dirent struct contains a zero-sized
|
|
// array at the end, it will end up referring
|
|
// to the m_name field
|
|
struct dirent m_dirent;
|
|
char m_name[TORRENT_MAX_PATH + 1]; // +1 to make room for null
|
|
#endif
|
|
bool m_done;
|
|
};
|
|
|
|
struct file;
|
|
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(239)">../include/libtorrent/heterogeneous_queue.hpp:184</a></td><td>if this throws, should we do anything?</td></tr><tr id="239" style="display: none;" colspan="3"><td colspan="3"><h2>if this throws, should we do anything?</h2><h4>../include/libtorrent/heterogeneous_queue.hpp:184</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> - 1) / sizeof(uintptr_t);
|
|
|
|
void grow_capacity(int size)
|
|
{
|
|
int amount_to_grow = (std::max)(size + header_size
|
|
, (std::max)(m_capacity * 3 / 2, 128));
|
|
|
|
uintptr_t* new_storage = new uintptr_t[m_capacity + amount_to_grow];
|
|
|
|
uintptr_t* src = m_storage;
|
|
uintptr_t* dst = new_storage;
|
|
uintptr_t const* const end = m_storage + m_size;
|
|
while (src < end)
|
|
{
|
|
header_t* src_hdr = reinterpret_cast<header_t*>(src);
|
|
header_t* dst_hdr = reinterpret_cast<header_t*>(dst);
|
|
*dst_hdr = *src_hdr;
|
|
src += header_size;
|
|
dst += header_size;
|
|
TORRENT_ASSERT(src + src_hdr->len <= end);
|
|
<div style="background: #ffff00" width="100%"> src_hdr->move(dst, src);
|
|
</div> src += src_hdr->len;
|
|
dst += src_hdr->len;
|
|
}
|
|
|
|
delete[] m_storage;
|
|
m_storage = new_storage;
|
|
m_capacity += amount_to_grow;
|
|
}
|
|
|
|
template <class U>
|
|
static void move(uintptr_t* dst, uintptr_t* src)
|
|
{
|
|
U* rhs = reinterpret_cast<U*>(src);
|
|
#if __cplusplus >= 201103L
|
|
new (dst) U(std::move(*rhs));
|
|
#else
|
|
new (dst) U(*rhs);
|
|
#endif
|
|
rhs->~U();
|
|
}
|
|
|
|
uintptr_t* m_storage;
|
|
// number of uintptr_t's allocated under m_storage
|
|
int m_capacity;
|
|
// the number of uintptr_t's used under m_storage
|
|
int m_size;
|
|
// the number of objects allocated under m_storage
|
|
int m_num_items;
|
|
};
|
|
}
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(240)">../include/libtorrent/identify_client.hpp:50</a></td><td>hide these declarations when deprecaated functions are disabled, and expose them internally in a header under aux_.</td></tr><tr id="240" style="display: none;" colspan="3"><td colspan="3"><h2>hide these declarations when deprecaated functions are disabled, and
|
|
expose them internally in a header under aux_.</h2><h4>../include/libtorrent/identify_client.hpp:50</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;">
|
|
*/
|
|
|
|
#ifndef TORRENT_IDENTIFY_CLIENT_HPP_INCLUDED
|
|
#define TORRENT_IDENTIFY_CLIENT_HPP_INCLUDED
|
|
|
|
#include "libtorrent/config.hpp"
|
|
|
|
#include "libtorrent/aux_/disable_warnings_push.hpp"
|
|
|
|
#include <boost/optional.hpp>
|
|
|
|
#include "libtorrent/aux_/disable_warnings_pop.hpp"
|
|
|
|
#include "libtorrent/peer_id.hpp"
|
|
#include "libtorrent/fingerprint.hpp"
|
|
|
|
namespace libtorrent
|
|
{
|
|
|
|
<div style="background: #ffff00" width="100%">
|
|
</div> // these functions don't really need to be public. This mechanism of
|
|
// advertising client software and version is also out-dated.
|
|
|
|
// This function can can be used to extract a string describing a client
|
|
// version from its peer-id. It will recognize most clients that have this
|
|
// kind of identification in the peer-id.
|
|
TORRENT_DEPRECATED_EXPORT TORRENT_DEPRECATED
|
|
std::string identify_client(const peer_id& p);
|
|
|
|
// Returns an optional fingerprint if any can be identified from the peer
|
|
// id. This can be used to automate the identification of clients. It will
|
|
// not be able to identify peers with non- standard encodings. Only Azureus
|
|
// style, Shadow's style and Mainline style.
|
|
TORRENT_DEPRECATED_EXPORT TORRENT_DEPRECATED
|
|
boost::optional<fingerprint>
|
|
client_fingerprint(peer_id const& p);
|
|
|
|
}
|
|
|
|
#endif // TORRENT_IDENTIFY_CLIENT_HPP_INCLUDED
|
|
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(241)">../include/libtorrent/peer_connection.hpp:209</a></td><td>make this a raw pointer (to save size in the first cache line) and make the constructor take a raw pointer. torrent objects should always outlive their peers</td></tr><tr id="241" style="display: none;" colspan="3"><td colspan="3"><h2>make this a raw pointer (to save size in
|
|
the first cache line) and make the constructor
|
|
take a raw pointer. torrent objects should always
|
|
outlive their peers</h2><h4>../include/libtorrent/peer_connection.hpp:209</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> , m_connecting(!t.expired())
|
|
, m_endgame_mode(false)
|
|
, m_snubbed(false)
|
|
, m_interesting(false)
|
|
, m_choked(true)
|
|
, m_corked(false)
|
|
, m_ignore_stats(false)
|
|
{}
|
|
|
|
protected:
|
|
|
|
// the pieces the other end have
|
|
bitfield m_have_piece;
|
|
|
|
// this is the torrent this connection is
|
|
// associated with. If the connection is an
|
|
// incoming connection, this is set to zero
|
|
// until the info_hash is received. Then it's
|
|
// set to the torrent it belongs to.
|
|
|
|
<div style="background: #ffff00" width="100%"> boost::weak_ptr<torrent> m_torrent;
|
|
</div>
|
|
public:
|
|
|
|
// a back reference to the session
|
|
// the peer belongs to.
|
|
aux::session_interface& m_ses;
|
|
|
|
// settings that apply to this peer
|
|
aux::session_settings const& m_settings;
|
|
|
|
protected:
|
|
|
|
// this is true if this connection has been added
|
|
// to the list of connections that will be closed.
|
|
bool m_disconnecting:1;
|
|
|
|
// this is true until this socket has become
|
|
// writable for the first time (i.e. the
|
|
// connection completed). While connecting
|
|
// the timeout will not be triggered. This is
|
|
// because windows XP SP2 may delay connection
|
|
// attempts, which means that the connection
|
|
// may not even have been attempted when the
|
|
// time out is reached.
|
|
bool m_connecting:1;
|
|
|
|
// this is set to true if the last time we tried to
|
|
// pick a piece to download, we could only find
|
|
// blocks that were already requested from other
|
|
// peers. In this case, we should not try to pick
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(242)">../include/libtorrent/peer_connection.hpp:1044</a></td><td>factor this out into its own class with a virtual interface torrent and session should implement this interface</td></tr><tr id="242" style="display: none;" colspan="3"><td colspan="3"><h2>factor this out into its own class with a virtual interface
|
|
torrent and session should implement this interface</h2><h4>../include/libtorrent/peer_connection.hpp:1044</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;">
|
|
// the local endpoint for this peer, i.e. our address
|
|
// and our port. If this is set for outgoing connections
|
|
// before the connection completes, it means we want to
|
|
// force the connection to be bound to the specified interface.
|
|
// if it ends up being bound to a different local IP, the connection
|
|
// is closed.
|
|
tcp::endpoint m_local;
|
|
|
|
// remote peer's id
|
|
peer_id m_peer_id;
|
|
|
|
// the bandwidth channels, upload and download
|
|
// keeps track of the current quotas
|
|
bandwidth_channel m_bandwidth_channel[num_channels];
|
|
|
|
protected:
|
|
// statistics about upload and download speeds
|
|
// and total amount of uploads and downloads for
|
|
// this peer
|
|
<div style="background: #ffff00" width="100%"> stat m_statistics;
|
|
</div>
|
|
// the number of outstanding bytes expected
|
|
// to be received by extensions
|
|
int m_extension_outstanding_bytes;
|
|
|
|
// the number of time critical requests
|
|
// queued up in the m_request_queue that
|
|
// soon will be committed to the download
|
|
// queue. This is included in download_queue_time()
|
|
// so that it can be used while adding more
|
|
// requests and take the previous requests
|
|
// into account without submitting it all
|
|
// immediately
|
|
int m_queued_time_critical;
|
|
|
|
// the number of bytes we are currently reading
|
|
// from disk, that will be added to the send
|
|
// buffer as soon as they complete
|
|
int m_reading_bytes;
|
|
|
|
// options used for the piece picker. These flags will
|
|
// be augmented with flags controlled by other settings
|
|
// like sequential download etc. These are here to
|
|
// let plugins control flags that should always be set
|
|
int m_picker_options;
|
|
|
|
// the number of invalid piece-requests
|
|
// we have got from this peer. If the request
|
|
// queue gets empty, and there have been
|
|
// invalid requests, we can assume the
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(243)">../include/libtorrent/peer_connection_interface.hpp:47</a></td><td>make this interface smaller!</td></tr><tr id="243" style="display: none;" colspan="3"><td colspan="3"><h2>make this interface smaller!</h2><h4>../include/libtorrent/peer_connection_interface.hpp:47</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;">CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
|
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
|
POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*/
|
|
|
|
#ifndef TORRENT_PEER_CONNECTION_INTERFACE_HPP
|
|
#define TORRENT_PEER_CONNECTION_INTERFACE_HPP
|
|
|
|
#include "libtorrent/socket.hpp"
|
|
#include "libtorrent/error_code.hpp"
|
|
#include "libtorrent/alert_types.hpp"
|
|
#include "libtorrent/operations.hpp" // for operation_t enum
|
|
|
|
namespace libtorrent
|
|
{
|
|
struct torrent_peer;
|
|
class stat;
|
|
struct peer_info;
|
|
|
|
<div style="background: #ffff00" width="100%"> struct TORRENT_EXTRA_EXPORT peer_connection_interface
|
|
</div> {
|
|
virtual tcp::endpoint const& remote() const = 0;
|
|
virtual tcp::endpoint local_endpoint() const = 0;
|
|
virtual void disconnect(error_code const& ec
|
|
, operation_t op, int error = 0) = 0;
|
|
virtual peer_id const& pid() const = 0;
|
|
virtual void set_holepunch_mode() = 0;
|
|
virtual torrent_peer* peer_info_struct() const = 0;
|
|
virtual void set_peer_info(torrent_peer* pi) = 0;
|
|
virtual bool is_outgoing() const = 0;
|
|
virtual void add_stat(boost::int64_t downloaded, boost::int64_t uploaded) = 0;
|
|
virtual bool fast_reconnect() const = 0;
|
|
virtual bool is_choked() const = 0;
|
|
virtual bool failed() const = 0;
|
|
virtual stat const& statistics() const = 0;
|
|
virtual void get_peer_info(peer_info& p) const = 0;
|
|
#ifndef TORRENT_DISABLE_LOGGING
|
|
virtual void peer_log(peer_log_alert::direction_t direction
|
|
, char const* event, char const* fmt = "", ...) const TORRENT_FORMAT(4,5) = 0;
|
|
#endif
|
|
protected:
|
|
~peer_connection_interface() {}
|
|
};
|
|
}
|
|
|
|
#endif
|
|
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(244)">../include/libtorrent/performance_counters.hpp:139</a></td><td>should keepalives be in here too? how about dont-have, share-mode, upload-only</td></tr><tr id="244" style="display: none;" colspan="3"><td colspan="3"><h2>should keepalives be in here too?
|
|
how about dont-have, share-mode, upload-only</h2><h4>../include/libtorrent/performance_counters.hpp:139</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> // a connect candidate
|
|
connection_attempt_loops,
|
|
// successful incoming connections (not rejected for any reason)
|
|
incoming_connections,
|
|
|
|
// counts events where the network
|
|
// thread wakes up
|
|
on_read_counter,
|
|
on_write_counter,
|
|
on_tick_counter,
|
|
on_lsd_counter,
|
|
on_lsd_peer_counter,
|
|
on_udp_counter,
|
|
on_accept_counter,
|
|
on_disk_queue_counter,
|
|
on_disk_counter,
|
|
|
|
torrent_evicted_counter,
|
|
|
|
// bittorrent message counters
|
|
<div style="background: #ffff00" width="100%"> num_incoming_choke,
|
|
</div> num_incoming_unchoke,
|
|
num_incoming_interested,
|
|
num_incoming_not_interested,
|
|
num_incoming_have,
|
|
num_incoming_bitfield,
|
|
num_incoming_request,
|
|
num_incoming_piece,
|
|
num_incoming_cancel,
|
|
num_incoming_dht_port,
|
|
num_incoming_suggest,
|
|
num_incoming_have_all,
|
|
num_incoming_have_none,
|
|
num_incoming_reject,
|
|
num_incoming_allowed_fast,
|
|
num_incoming_ext_handshake,
|
|
num_incoming_pex,
|
|
num_incoming_metadata,
|
|
num_incoming_extended,
|
|
|
|
num_outgoing_choke,
|
|
num_outgoing_unchoke,
|
|
num_outgoing_interested,
|
|
num_outgoing_not_interested,
|
|
num_outgoing_have,
|
|
num_outgoing_bitfield,
|
|
num_outgoing_request,
|
|
num_outgoing_piece,
|
|
num_outgoing_cancel,
|
|
num_outgoing_dht_port,
|
|
num_outgoing_suggest,
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(245)">../include/libtorrent/performance_counters.hpp:450</a></td><td>some space could be saved here by making gauges 32 bits</td></tr><tr id="245" style="display: none;" colspan="3"><td colspan="3"><h2>some space could be saved here by making gauges 32 bits</h2><h4>../include/libtorrent/performance_counters.hpp:450</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"></pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(246)">../include/libtorrent/performance_counters.hpp:451</a></td><td>restore these to regular integers. Instead have one copy of the counters per thread and collect them at convenient synchronization points</td></tr><tr id="246" style="display: none;" colspan="3"><td colspan="3"><h2>restore these to regular integers. Instead have one copy
|
|
of the counters per thread and collect them at convenient
|
|
synchronization points</h2><h4>../include/libtorrent/performance_counters.hpp:451</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> num_utp_deleted,
|
|
|
|
num_counters,
|
|
num_gauges_counters = num_counters - num_stats_counters
|
|
};
|
|
|
|
counters();
|
|
|
|
counters(counters const&);
|
|
counters& operator=(counters const&);
|
|
|
|
// returns the new value
|
|
boost::int64_t inc_stats_counter(int c, boost::int64_t value = 1);
|
|
boost::int64_t operator[](int i) const;
|
|
|
|
void set_value(int c, boost::int64_t value);
|
|
void blend_stats_counter(int c, boost::int64_t value, int ratio);
|
|
|
|
private:
|
|
|
|
<div style="background: #ffff00" width="100%">#if BOOST_ATOMIC_LLONG_LOCK_FREE == 2
|
|
</div> boost::atomic<boost::int64_t> m_stats_counter[num_counters];
|
|
#else
|
|
// if the atomic type is't lock-free, use a single lock instead, for
|
|
// the whole array
|
|
mutable mutex m_mutex;
|
|
boost::int64_t m_stats_counter[num_counters];
|
|
#endif
|
|
};
|
|
}
|
|
|
|
#endif
|
|
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(247)">../include/libtorrent/piece_picker.hpp:756</a></td><td>should this be allocated lazily?</td></tr><tr id="247" style="display: none;" colspan="3"><td colspan="3"><h2>should this be allocated lazily?</h2><h4>../include/libtorrent/piece_picker.hpp:756</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;">
|
|
std::vector<downloading_piece>::const_iterator find_dl_piece(int queue, int index) const;
|
|
std::vector<downloading_piece>::iterator find_dl_piece(int queue, int index);
|
|
|
|
// returns an iterator to the downloading piece, whichever
|
|
// download list it may live in now
|
|
std::vector<downloading_piece>::iterator update_piece_state(
|
|
std::vector<downloading_piece>::iterator dp);
|
|
|
|
private:
|
|
|
|
// the following vectors are mutable because they sometimes may
|
|
// be updated lazily, triggered by const functions
|
|
|
|
// this maps indices to number of peers that has this piece and
|
|
// index into the m_piece_info vectors.
|
|
// piece_pos::we_have_index means that we have the piece, so it
|
|
// doesn't exist in the piece_info buckets
|
|
// pieces with the filtered flag set doesn't have entries in
|
|
// the m_piece_info buckets either
|
|
<div style="background: #ffff00" width="100%"> mutable std::vector<piece_pos> m_piece_map;
|
|
</div>
|
|
// the number of seeds. These are not added to
|
|
// the availability counters of the pieces
|
|
int m_seeds;
|
|
|
|
// the number of pieces that have passed the hash check
|
|
int m_num_passed;
|
|
|
|
// this vector contains all piece indices that are pickable
|
|
// sorted by priority. Pieces are in random random order
|
|
// among pieces with the same priority
|
|
mutable std::vector<int> m_pieces;
|
|
|
|
// these are indices to the priority boundries inside
|
|
// the m_pieces vector. priority 0 always start at
|
|
// 0, priority 1 starts at m_priority_boundries[0] etc.
|
|
mutable std::vector<int> m_priority_boundries;
|
|
|
|
// each piece that's currently being downloaded has an entry in this list
|
|
// with block allocations. i.e. it says wich parts of the piece that is
|
|
// being downloaded. This list is ordered by piece index to make lookups
|
|
// efficient there are as many buckets as there are piece states. See
|
|
// piece_pos::state_t. The only download state that does not have a
|
|
// corresponding downloading_piece vector is piece_open and
|
|
// piece_downloading_reverse (the latter uses the same as
|
|
// piece_downloading).
|
|
std::vector<downloading_piece> m_downloads[piece_pos::num_download_categories];
|
|
|
|
// this holds the information of the blocks in partially downloaded
|
|
// pieces. the downloading_piece::info index point into this vector for
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(248)">../include/libtorrent/proxy_base.hpp:174</a></td><td>it would be nice to remember the bind port and bind once we know where the proxy is m_sock.bind(endpoint, ec);</td></tr><tr id="248" style="display: none;" colspan="3"><td colspan="3"><h2>it would be nice to remember the bind port and bind once we know where the proxy is
|
|
m_sock.bind(endpoint, ec);</h2><h4>../include/libtorrent/proxy_base.hpp:174</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> void bind(endpoint_type const& /* endpoint */)
|
|
{
|
|
// m_sock.bind(endpoint);
|
|
}
|
|
#endif
|
|
|
|
error_code cancel(error_code& ec)
|
|
{
|
|
return m_sock.cancel(ec);
|
|
}
|
|
|
|
void bind(endpoint_type const& /* endpoint */, error_code& /* ec */)
|
|
{
|
|
// the reason why we ignore binds here is because we don't
|
|
// (necessarily) yet know what address family the proxy
|
|
// will resolve to, and binding to the wrong one would
|
|
// break our connection attempt later. The caller here
|
|
// doesn't necessarily know that we're proxying, so this
|
|
// bind address is based on the final endpoint, not the
|
|
// proxy.
|
|
<div style="background: #ffff00" width="100%"> }
|
|
</div>
|
|
#ifndef BOOST_NO_EXCEPTIONS
|
|
void open(protocol_type const&)
|
|
{
|
|
// m_sock.open(p);
|
|
}
|
|
#endif
|
|
|
|
void open(protocol_type const&, error_code&)
|
|
{
|
|
// we need to ignore this for the same reason as stated
|
|
// for ignoring bind()
|
|
// m_sock.open(p, ec);
|
|
}
|
|
|
|
#ifndef BOOST_NO_EXCEPTIONS
|
|
void close()
|
|
{
|
|
m_remote_endpoint = endpoint_type();
|
|
m_sock.close();
|
|
m_resolver.cancel();
|
|
}
|
|
#endif
|
|
|
|
void close(error_code& ec)
|
|
{
|
|
m_remote_endpoint = endpoint_type();
|
|
m_sock.close(ec);
|
|
m_resolver.cancel();
|
|
}
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(249)">../include/libtorrent/receive_buffer.hpp:258</a></td><td>Detect when the start of the next crpyto packet is aligned with the start of piece data and the crpyto packet is at least as large as the piece data. With a little extra work we could receive directly into a disk buffer in that case.</td></tr><tr id="249" style="display: none;" colspan="3"><td colspan="3"><h2>Detect when the start of the next crpyto packet is aligned
|
|
with the start of piece data and the crpyto packet is at least
|
|
as large as the piece data. With a little extra work
|
|
we could receive directly into a disk buffer in that case.</h2><h4>../include/libtorrent/receive_buffer.hpp:258</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;">
|
|
void cut(int size, int packet_size, int offset = 0);
|
|
|
|
void crypto_cut(int size, int packet_size)
|
|
{
|
|
TORRENT_ASSERT(m_recv_pos != INT_MAX);
|
|
m_connection_buffer.cut(size, m_recv_pos + packet_size, m_recv_pos);
|
|
}
|
|
|
|
void reset(int packet_size);
|
|
void crypto_reset(int packet_size);
|
|
|
|
void set_soft_packet_size(int size);
|
|
|
|
int advance_pos(int bytes);
|
|
|
|
buffer::const_interval get() const;
|
|
|
|
bool can_recv_contiguous(int /*size*/) const
|
|
{
|
|
<div style="background: #ffff00" width="100%"> return m_recv_pos == INT_MAX;
|
|
</div> }
|
|
|
|
void mutable_buffers(std::vector<boost::asio::mutable_buffer>& vec
|
|
, std::size_t bytes_transfered);
|
|
|
|
private:
|
|
// explicitly disallow assignment, to silence msvc warning
|
|
crypto_receive_buffer& operator=(crypto_receive_buffer const&);
|
|
|
|
int m_recv_pos;
|
|
int m_packet_size;
|
|
int m_soft_packet_size;
|
|
receive_buffer& m_connection_buffer;
|
|
};
|
|
#endif // TORRENT_DISABLE_ENCRYPTION
|
|
|
|
} // namespace libtorrent
|
|
|
|
#endif // #ifndef TORRENT_RECEIVE_BUFFER_HPP_INCLUDED
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(250)">../include/libtorrent/session_handle.hpp:682</a></td><td>add get_peer_class_type_filter() as well</td></tr><tr id="250" style="display: none;" colspan="3"><td colspan="3"><h2>add get_peer_class_type_filter() as well</h2><h4>../include/libtorrent/session_handle.hpp:682</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> //
|
|
// The ``peer_class`` argument cannot be greater than 31. The bitmasks
|
|
// representing peer classes in the ``peer_class_filter`` are 32 bits.
|
|
//
|
|
// For more information, see peer-classes_.
|
|
void set_peer_class_filter(ip_filter const& f);
|
|
|
|
// Sets and gets the *peer class type filter*. This is controls automatic
|
|
// peer class assignments to peers based on what kind of socket it is.
|
|
//
|
|
// It does not only support assigning peer classes, it also supports
|
|
// removing peer classes based on socket type.
|
|
//
|
|
// The order of these rules being applied are:
|
|
//
|
|
// 1. peer-class IP filter
|
|
// 2. peer-class type filter, removing classes
|
|
// 3. peer-class type filter, adding classes
|
|
//
|
|
// For more information, see peer-classes_.
|
|
<div style="background: #ffff00" width="100%"> void set_peer_class_type_filter(peer_class_type_filter const& f);
|
|
</div>
|
|
// Creates a new peer class (see peer-classes_) with the given name. The
|
|
// returned integer is the new peer class' identifier. Peer classes may
|
|
// have the same name, so each invocation of this function creates a new
|
|
// class and returns a unique identifier.
|
|
//
|
|
// Identifiers are assigned from low numbers to higher. So if you plan on
|
|
// using certain peer classes in a call to `set_peer_class_filter()`_,
|
|
// make sure to create those early on, to get low identifiers.
|
|
//
|
|
// For more information on peer classes, see peer-classes_.
|
|
int create_peer_class(char const* name);
|
|
|
|
// This call dereferences the reference count of the specified peer
|
|
// class. When creating a peer class it's automatically referenced by 1.
|
|
// If you want to recycle a peer class, you may call this function. You
|
|
// may only call this function **once** per peer class you create.
|
|
// Calling it more than once for the same class will lead to memory
|
|
// corruption.
|
|
//
|
|
// Since peer classes are reference counted, this function will not
|
|
// remove the peer class if it's still assigned to torrents or peers. It
|
|
// will however remove it once the last peer and torrent drops their
|
|
// references to it.
|
|
//
|
|
// There is no need to call this function for custom peer classes. All
|
|
// peer classes will be properly destructed when the session object
|
|
// destructs.
|
|
//
|
|
// For more information on peer classes, see peer-classes_.
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(251)">../include/libtorrent/settings_pack.hpp:1094</a></td><td>deprecate this ``max_rejects`` is the number of piece requests we will reject in a row while a peer is choked before the peer is considered abusive and is disconnected.</td></tr><tr id="251" style="display: none;" colspan="3"><td colspan="3"><h2>deprecate this
|
|
``max_rejects`` is the number of piece requests we will reject in a
|
|
row while a peer is choked before the peer is considered abusive
|
|
and is disconnected.</h2><h4>../include/libtorrent/settings_pack.hpp:1094</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;">
|
|
// this is the minimum allowed announce interval for a tracker. This
|
|
// is specified in seconds and is used as a sanity check on what is
|
|
// returned from a tracker. It mitigates hammering misconfigured
|
|
// trackers.
|
|
min_announce_interval,
|
|
|
|
// this is the number of seconds a torrent is considered active after
|
|
// it was started, regardless of upload and download speed. This is so
|
|
// that newly started torrents are not considered inactive until they
|
|
// have a fair chance to start downloading.
|
|
auto_manage_startup,
|
|
|
|
// ``seeding_piece_quota`` is the number of pieces to send to a peer,
|
|
// when seeding, before rotating in another peer to the unchoke set.
|
|
// It defaults to 3 pieces, which means that when seeding, any peer
|
|
// we've sent more than this number of pieces to will be unchoked in
|
|
// favour of a choked peer.
|
|
seeding_piece_quota,
|
|
|
|
<div style="background: #ffff00" width="100%"> max_rejects,
|
|
</div>
|
|
// ``recv_socket_buffer_size`` and ``send_socket_buffer_size``
|
|
// specifies the buffer sizes set on peer sockets. 0 (which is the
|
|
// default) means the OS default (i.e. don't change the buffer sizes).
|
|
// The socket buffer sizes are changed using setsockopt() with
|
|
// SOL_SOCKET/SO_RCVBUF and SO_SNDBUFFER.
|
|
recv_socket_buffer_size,
|
|
send_socket_buffer_size,
|
|
|
|
// ``file_checks_delay_per_block`` is the number of milliseconds to
|
|
// sleep in between disk read operations when checking torrents. This
|
|
// defaults to 0, but can be set to higher numbers to slow down the
|
|
// rate at which data is read from the disk while checking. This may
|
|
// be useful for background tasks that doesn't matter if they take a
|
|
// bit longer, as long as they leave disk I/O time for other
|
|
// processes.
|
|
file_checks_delay_per_block,
|
|
|
|
// ``read_cache_line_size`` is the number of blocks to read into the
|
|
// read cache when a read cache miss occurs. Setting this to 0 is
|
|
// essentially the same thing as disabling read cache. The number of
|
|
// blocks read into the read cache is always capped by the piece
|
|
// boundary.
|
|
//
|
|
// When a piece in the write cache has ``write_cache_line_size``
|
|
// contiguous blocks in it, they will be flushed. Setting this to 1
|
|
// effectively disables the write cache.
|
|
read_cache_line_size,
|
|
write_cache_line_size,
|
|
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(252)">../include/libtorrent/torrent.hpp:195</a></td><td>make this a raw pointer. perhaps keep the shared_ptr around further down the object to maintain an owner</td></tr><tr id="252" style="display: none;" colspan="3"><td colspan="3"><h2>make this a raw pointer. perhaps keep the shared_ptr
|
|
around further down the object to maintain an owner</h2><h4>../include/libtorrent/torrent.hpp:195</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> // connection to pick up
|
|
peer_request restart_request;
|
|
std::vector<char> restart_piece;
|
|
};
|
|
|
|
struct TORRENT_EXTRA_EXPORT torrent_hot_members
|
|
{
|
|
torrent_hot_members(aux::session_interface& ses
|
|
, add_torrent_params const& p, int block_size);
|
|
|
|
protected:
|
|
// the piece picker. This is allocated lazily. When we don't
|
|
// have anything in the torrent (for instance, if it hasn't
|
|
// been started yet) or if we have everything, there is no
|
|
// picker. It's allocated on-demand the first time we need
|
|
// it in torrent::need_picker(). In order to tell the
|
|
// difference between having everything and nothing in
|
|
// the case there is no piece picker, see m_have_all.
|
|
boost::scoped_ptr<piece_picker> m_picker;
|
|
|
|
<div style="background: #ffff00" width="100%"> boost::shared_ptr<torrent_info> m_torrent_file;
|
|
</div>
|
|
// a back reference to the session
|
|
// this torrent belongs to.
|
|
aux::session_interface& m_ses;
|
|
|
|
// this vector is sorted at all times, by the pointer value.
|
|
// use sorted_insert() and sorted_find() on it. The GNU STL
|
|
// implementation on Darwin uses significantly less memory to
|
|
// represent a vector than a set, and this set is typically
|
|
// relatively small, and it's cheap to copy pointers.
|
|
std::vector<peer_connection*> m_connections;
|
|
|
|
// the scrape data from the tracker response, this
|
|
// is optional and may be 0xffffff
|
|
boost::uint32_t m_complete:24;
|
|
|
|
// set to true when this torrent may not download anything
|
|
bool m_upload_mode:1;
|
|
|
|
// this is set to false as long as the connections
|
|
// of this torrent hasn't been initialized. If we
|
|
// have metadata from the start, connections are
|
|
// initialized immediately, if we didn't have metadata,
|
|
// they are initialized right after files_checked().
|
|
// valid_resume_data() will return false as long as
|
|
// the connections aren't initialized, to avoid
|
|
// them from altering the piece-picker before it
|
|
// has been initialized with files_checked().
|
|
bool m_connections_initialized:1;
|
|
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(253)">../include/libtorrent/torrent.hpp:1244</a></td><td>this wastes 5 bits per file</td></tr><tr id="253" style="display: none;" colspan="3"><td colspan="3"><h2>this wastes 5 bits per file</h2><h4>../include/libtorrent/torrent.hpp:1244</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> typedef std::list<boost::shared_ptr<torrent_plugin> > extension_list_t;
|
|
extension_list_t m_extensions;
|
|
#endif
|
|
|
|
// used for tracker announces
|
|
deadline_timer m_tracker_timer;
|
|
|
|
// used to detect when we are active or inactive for long enough
|
|
// to trigger the auto-manage logic
|
|
deadline_timer m_inactivity_timer;
|
|
|
|
// this is the upload and download statistics for the whole torrent.
|
|
// it's updated from all its peers once every second.
|
|
libtorrent::stat m_stat;
|
|
|
|
// -----------------------------
|
|
|
|
// this vector is allocated lazily. If no file priorities are
|
|
// ever changed, this remains empty. Any unallocated slot
|
|
// implicitly means the file has priority 1.
|
|
<div style="background: #ffff00" width="100%"> std::vector<boost::uint8_t> m_file_priority;
|
|
</div>
|
|
// this object is used to track download progress of individual files
|
|
aux::file_progress m_file_progress;
|
|
|
|
// these are the pieces we're currently
|
|
// suggesting to peers.
|
|
std::vector<suggest_piece_t> m_suggested_pieces;
|
|
|
|
std::vector<announce_entry> m_trackers;
|
|
// this is an index into m_trackers
|
|
|
|
// this list is sorted by time_critical_piece::deadline
|
|
std::vector<time_critical_piece> m_time_critical_pieces;
|
|
|
|
std::string m_trackerid;
|
|
#ifndef TORRENT_NO_DEPRECATE
|
|
// deprecated in 1.1
|
|
std::string m_username;
|
|
std::string m_password;
|
|
#endif
|
|
|
|
std::string m_save_path;
|
|
|
|
// if we don't have the metadata, this is a url to
|
|
// the torrent file
|
|
std::string m_url;
|
|
|
|
// if this was added from an RSS feed, this is the unique
|
|
// identifier in the feed.
|
|
std::string m_uuid;
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(254)">../include/libtorrent/torrent.hpp:1302</a></td><td>These two bitfields should probably be coalesced into one</td></tr><tr id="254" style="display: none;" colspan="3"><td colspan="3"><h2>These two bitfields should probably be coalesced into one</h2><h4>../include/libtorrent/torrent.hpp:1302</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> // the .torrent file from m_url
|
|
// std::vector<char> m_torrent_file_buf;
|
|
|
|
// this is a list of all pieces that we have announced
|
|
// as having, without actually having yet. If we receive
|
|
// a request for a piece in this list, we need to hold off
|
|
// on responding until we have completed the piece and
|
|
// verified its hash. If the hash fails, send reject to
|
|
// peers with outstanding requests, and dont_have to other
|
|
// peers. This vector is ordered, to make lookups fast.
|
|
std::vector<int> m_predictive_pieces;
|
|
|
|
// the performance counters of this session
|
|
counters& m_stats_counters;
|
|
|
|
// each bit represents a piece. a set bit means
|
|
// the piece has had its hash verified. This
|
|
// is only used in seed mode (when m_seed_mode
|
|
// is true)
|
|
|
|
<div style="background: #ffff00" width="100%"> bitfield m_verified;
|
|
</div> // this means there is an outstanding, async, operation
|
|
// to verify each piece that has a 1
|
|
bitfield m_verifying;
|
|
|
|
// set if there's an error on this torrent
|
|
error_code m_error;
|
|
|
|
// used if there is any resume data
|
|
boost::scoped_ptr<resume_data_t> m_resume_data;
|
|
|
|
// if the torrent is started without metadata, it may
|
|
// still be given a name until the metadata is received
|
|
// once the metadata is received this field will no
|
|
// longer be used and will be reset
|
|
boost::scoped_ptr<std::string> m_name;
|
|
|
|
storage_constructor_type m_storage_constructor;
|
|
|
|
// the posix time this torrent was added and when
|
|
// it was completed. If the torrent isn't yet
|
|
// completed, m_completed_time is 0
|
|
time_t m_added_time;
|
|
time_t m_completed_time;
|
|
|
|
// this was the last time _we_ saw a seed in this swarm
|
|
time_t m_last_seen_complete;
|
|
|
|
// this is the time last any of our peers saw a seed
|
|
// in this swarm
|
|
time_t m_swarm_last_seen_complete;
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(255)">../include/libtorrent/torrent_info.hpp:117</a></td><td>there may be some opportunities to optimize the size if torrent_info. specifically to turn some std::string and std::vector into pointers</td></tr><tr id="255" style="display: none;" colspan="3"><td colspan="3"><h2>there may be some opportunities to optimize the size if torrent_info.
|
|
specifically to turn some std::string and std::vector into pointers</h2><h4>../include/libtorrent/torrent_info.hpp:117</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> // The URL of the web seed
|
|
std::string url;
|
|
|
|
// Optional authentication. If this is set, it's passed
|
|
// in as HTTP basic auth to the web seed. The format is:
|
|
// username:password.
|
|
std::string auth;
|
|
|
|
// Any extra HTTP headers that need to be passed to the web seed
|
|
headers_t extra_headers;
|
|
|
|
// The type of web seed (see type_t)
|
|
boost::uint8_t type;
|
|
};
|
|
|
|
#ifndef BOOST_NO_EXCEPTIONS
|
|
// for backwards compatibility with 0.14
|
|
typedef libtorrent_exception invalid_torrent_file;
|
|
#endif
|
|
|
|
<div style="background: #ffff00" width="100%"> class TORRENT_EXPORT torrent_info
|
|
</div> {
|
|
public:
|
|
|
|
// The constructor that takes an info-hash will initialize the info-hash
|
|
// to the given value, but leave all other fields empty. This is used
|
|
// internally when downloading torrents without the metadata. The
|
|
// metadata will be created by libtorrent as soon as it has been
|
|
// downloaded from the swarm.
|
|
//
|
|
// The constructor that takes a bdecode_node will create a torrent_info
|
|
// object from the information found in the given torrent_file. The
|
|
// bdecode_node represents a tree node in an bencoded file. To load an
|
|
// ordinary .torrent file into a bdecode_node, use bdecode().
|
|
//
|
|
// The version that takes a buffer pointer and a size will decode it as a
|
|
// .torrent file and initialize the torrent_info object for you.
|
|
//
|
|
// The version that takes a filename will simply load the torrent file
|
|
// and decode it inside the constructor, for convenience. This might not
|
|
// be the most suitable for applications that want to be able to report
|
|
// detailed errors on what might go wrong.
|
|
//
|
|
// There is an upper limit on the size of the torrent file that will be
|
|
// loaded by the overload taking a filename. If it's important that even
|
|
// very large torrent files are loaded, use one of the other overloads.
|
|
//
|
|
// The overloads that takes an ``error_code const&`` never throws if an
|
|
// error occur, they will simply set the error code to describe what went
|
|
// wrong and not fully initialize the torrent_info object. The overloads
|
|
// that do not take the extra error_code parameter will always throw if
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(256)">../include/libtorrent/tracker_manager.hpp:392</a></td><td>this should be unique_ptr in the future</td></tr><tr id="256" style="display: none;" colspan="3"><td colspan="3"><h2>this should be unique_ptr in the future</h2><h4>../include/libtorrent/tracker_manager.hpp:392</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;">
|
|
// this is only used for SOCKS packets, since
|
|
// they may be addressed to hostname
|
|
virtual bool incoming_packet(error_code const& e, char const* hostname
|
|
, char const* buf, int size) TORRENT_OVERRIDE;
|
|
|
|
void update_transaction_id(
|
|
boost::shared_ptr<udp_tracker_connection> c
|
|
, boost::uint64_t tid);
|
|
|
|
aux::session_settings const& settings() const { return m_settings; }
|
|
udp_socket& get_udp_socket() { return m_udp_socket; }
|
|
resolver_interface& host_resolver() { return m_host_resolver; }
|
|
|
|
private:
|
|
|
|
typedef mutex mutex_t;
|
|
mutable mutex_t m_mutex;
|
|
|
|
// maps transactionid to the udp_tracker_connection
|
|
<div style="background: #ffff00" width="100%"> typedef boost::unordered_map<boost::uint32_t
|
|
</div> , boost::shared_ptr<udp_tracker_connection> > udp_conns_t;
|
|
udp_conns_t m_udp_conns;
|
|
|
|
typedef std::vector<boost::shared_ptr<http_tracker_connection> > http_conns_t;
|
|
http_conns_t m_http_conns;
|
|
|
|
class udp_socket& m_udp_socket;
|
|
resolver_interface& m_host_resolver;
|
|
aux::session_settings const& m_settings;
|
|
counters& m_stats_counters;
|
|
#if !defined TORRENT_DISABLE_LOGGING || TORRENT_USE_ASSERTS
|
|
aux::session_logger& m_ses;
|
|
#endif
|
|
|
|
bool m_abort;
|
|
};
|
|
}
|
|
|
|
#endif // TORRENT_TRACKER_MANAGER_HPP_INCLUDED
|
|
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(257)">../include/libtorrent/upnp.hpp:132</a></td><td>support using the windows API for UPnP operations as well</td></tr><tr id="257" style="display: none;" colspan="3"><td colspan="3"><h2>support using the windows API for UPnP operations as well</h2><h4>../include/libtorrent/upnp.hpp:132</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> std::list<std::string> tag_stack;
|
|
std::string control_url;
|
|
std::string service_type;
|
|
std::string model;
|
|
std::string url_base;
|
|
bool top_tags(const char* str1, const char* str2)
|
|
{
|
|
std::list<std::string>::reverse_iterator i = tag_stack.rbegin();
|
|
if (i == tag_stack.rend()) return false;
|
|
if (!string_equal_no_case(i->c_str(), str2)) return false;
|
|
++i;
|
|
if (i == tag_stack.rend()) return false;
|
|
if (!string_equal_no_case(i->c_str(), str1)) return false;
|
|
return true;
|
|
}
|
|
};
|
|
|
|
TORRENT_EXTRA_EXPORT void find_control_url(int type, char const* string
|
|
, int str_len, parse_state& state);
|
|
|
|
<div style="background: #ffff00" width="100%">class TORRENT_EXTRA_EXPORT upnp : public boost::enable_shared_from_this<upnp>
|
|
</div>{
|
|
public:
|
|
upnp(io_service& ios
|
|
, address const& listen_interface, std::string const& user_agent
|
|
, portmap_callback_t const& cb, log_callback_t const& lcb
|
|
, bool ignore_nonrouters);
|
|
~upnp();
|
|
|
|
void start();
|
|
|
|
enum protocol_type { none = 0, udp = 1, tcp = 2 };
|
|
|
|
// Attempts to add a port mapping for the specified protocol. Valid protocols are
|
|
// ``upnp::tcp`` and ``upnp::udp`` for the UPnP class and ``natpmp::tcp`` and
|
|
// ``natpmp::udp`` for the NAT-PMP class.
|
|
//
|
|
// ``external_port`` is the port on the external address that will be mapped. This
|
|
// is a hint, you are not guaranteed that this port will be available, and it may
|
|
// end up being something else. In the portmap_alert_ notification, the actual
|
|
// external port is reported.
|
|
//
|
|
// ``local_port`` is the port in the local machine that the mapping should forward
|
|
// to.
|
|
//
|
|
// The return value is an index that identifies this port mapping. This is used
|
|
// to refer to mappings that fails or succeeds in the portmap_error_alert_ and
|
|
// portmap_alert_ respectively. If The mapping fails immediately, the return value
|
|
// is -1, which means failure. There will not be any error alert notification for
|
|
// mappings that fail with a -1 return value.
|
|
int add_mapping(protocol_type p, int external_port, int local_port);
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(258)">../include/libtorrent/utp_stream.hpp:424</a></td><td>implement blocking write. Low priority since it's not used (yet)</td></tr><tr id="258" style="display: none;" colspan="3"><td colspan="3"><h2>implement blocking write. Low priority since it's not used (yet)</h2><h4>../include/libtorrent/utp_stream.hpp:424</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> for (typename Mutable_Buffers::const_iterator i = buffers.begin()
|
|
, end(buffers.end()); i != end; ++i)
|
|
{
|
|
using boost::asio::buffer_cast;
|
|
using boost::asio::buffer_size;
|
|
add_read_buffer(buffer_cast<void*>(*i), buffer_size(*i));
|
|
#if TORRENT_USE_ASSERTS
|
|
buf_size += buffer_size(*i);
|
|
#endif
|
|
}
|
|
std::size_t ret = read_some(true);
|
|
TORRENT_ASSERT(ret <= buf_size);
|
|
TORRENT_ASSERT(ret > 0);
|
|
return ret;
|
|
}
|
|
|
|
template <class Const_Buffers>
|
|
std::size_t write_some(Const_Buffers const& /* buffers */, error_code& /* ec */)
|
|
{
|
|
TORRENT_ASSERT(false && "not implemented!");
|
|
<div style="background: #ffff00" width="100%"> return 0;
|
|
</div> }
|
|
|
|
#ifndef BOOST_NO_EXCEPTIONS
|
|
template <class Mutable_Buffers>
|
|
std::size_t read_some(Mutable_Buffers const& buffers)
|
|
{
|
|
error_code ec;
|
|
std::size_t ret = read_some(buffers, ec);
|
|
if (ec)
|
|
boost::throw_exception(boost::system::system_error(ec));
|
|
return ret;
|
|
}
|
|
|
|
template <class Const_Buffers>
|
|
std::size_t write_some(Const_Buffers const& buffers)
|
|
{
|
|
error_code ec;
|
|
std::size_t ret = write_some(buffers, ec);
|
|
if (ec)
|
|
boost::throw_exception(boost::system::system_error(ec));
|
|
return ret;
|
|
}
|
|
#endif
|
|
|
|
template <class Const_Buffers, class Handler>
|
|
void async_write_some(Const_Buffers const& buffers, Handler const& handler)
|
|
{
|
|
if (m_impl == 0)
|
|
{
|
|
m_io_service.post(boost::bind<void>(handler
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(259)">../include/libtorrent/kademlia/item.hpp:61</a></td><td>since this is a public function, it should probably be moved out of this header and into one with other public functions.</td></tr><tr id="259" style="display: none;" colspan="3"><td colspan="3"><h2>since this is a public function, it should probably be moved
|
|
out of this header and into one with other public functions.</h2><h4>../include/libtorrent/kademlia/item.hpp:61</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;">#include <boost/array.hpp>
|
|
|
|
namespace libtorrent { namespace dht
|
|
{
|
|
|
|
// calculate the target hash for an immutable item.
|
|
sha1_hash TORRENT_EXTRA_EXPORT item_target_id(
|
|
std::pair<char const*, int> v);
|
|
|
|
// calculate the target hash for a mutable item.
|
|
sha1_hash TORRENT_EXTRA_EXPORT item_target_id(std::pair<char const*, int> salt
|
|
, char const* pk);
|
|
|
|
bool TORRENT_EXTRA_EXPORT verify_mutable_item(
|
|
std::pair<char const*, int> v
|
|
, std::pair<char const*, int> salt
|
|
, boost::uint64_t seq
|
|
, char const* pk
|
|
, char const* sig);
|
|
|
|
<div style="background: #ffff00" width="100%">
|
|
</div>// given a byte range ``v`` and an optional byte range ``salt``, a
|
|
// sequence number, public key ``pk`` (must be 32 bytes) and a secret key
|
|
// ``sk`` (must be 64 bytes), this function produces a signature which
|
|
// is written into a 64 byte buffer pointed to by ``sig``. The caller
|
|
// is responsible for allocating the destination buffer that's passed in
|
|
// as the ``sig`` argument. Typically it would be allocated on the stack.
|
|
void TORRENT_EXPORT sign_mutable_item(
|
|
std::pair<char const*, int> v
|
|
, std::pair<char const*, int> salt
|
|
, boost::uint64_t seq
|
|
, char const* pk
|
|
, char const* sk
|
|
, char* sig);
|
|
|
|
enum
|
|
{
|
|
item_pk_len = 32,
|
|
item_sk_len = 64,
|
|
item_sig_len = 64
|
|
};
|
|
|
|
class TORRENT_EXTRA_EXPORT item
|
|
{
|
|
public:
|
|
item() : m_seq(0), m_mutable(false) {}
|
|
item(char const* pk, std::string const& salt);
|
|
item(entry const& v) { assign(v); }
|
|
item(entry const& v
|
|
, std::pair<char const*, int> salt
|
|
, boost::uint64_t seq, char const* pk, char const* sk);
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(260)">../include/libtorrent/aux_/session_impl.hpp:859</a></td><td>should this be renamed m_outgoing_interfaces?</td></tr><tr id="260" style="display: none;" colspan="3"><td colspan="3"><h2>should this be renamed m_outgoing_interfaces?</h2><h4>../include/libtorrent/aux_/session_impl.hpp:859</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> // client with the tracker only. It is randomized
|
|
// at startup
|
|
int m_key;
|
|
|
|
// the addresses or device names of the interfaces we are supposed to
|
|
// listen on. if empty, it means that we should let the os decide
|
|
// which interface to listen on
|
|
std::vector<std::pair<std::string, int> > m_listen_interfaces;
|
|
|
|
// keep this around until everything uses the list of interfaces
|
|
// instead.
|
|
tcp::endpoint m_listen_interface;
|
|
|
|
// the network interfaces outgoing connections are opened through. If
|
|
// there is more then one, they are used in a round-robin fashion
|
|
// each element is a device name or IP address (in string form) and
|
|
// a port number. The port determines which port to bind the listen
|
|
// socket to, and the device or IP determines which network adapter
|
|
// to be used. If no adapter with the specified name exists, the listen
|
|
// socket fails.
|
|
<div style="background: #ffff00" width="100%"> std::vector<std::string> m_net_interfaces;
|
|
</div>
|
|
// if we're listening on an IPv6 interface
|
|
// this is one of the non local IPv6 interfaces
|
|
// on this machine
|
|
tcp::endpoint m_ipv6_interface;
|
|
tcp::endpoint m_ipv4_interface;
|
|
|
|
// since we might be listening on multiple interfaces
|
|
// we might need more than one listen socket
|
|
std::list<listen_socket_t> m_listen_sockets;
|
|
|
|
#if TORRENT_USE_I2P
|
|
i2p_connection m_i2p_conn;
|
|
boost::shared_ptr<socket_type> m_i2p_listen_socket;
|
|
#endif
|
|
|
|
#ifdef TORRENT_USE_OPENSSL
|
|
ssl::context* ssl_ctx() { return &m_ssl_ctx; }
|
|
void on_incoming_utp_ssl(boost::shared_ptr<socket_type> const& s);
|
|
void ssl_handshake(error_code const& ec, boost::shared_ptr<socket_type> s);
|
|
#endif
|
|
|
|
// when as a socks proxy is used for peers, also
|
|
// listen for incoming connections on a socks connection
|
|
boost::shared_ptr<socket_type> m_socks_listen_socket;
|
|
boost::uint16_t m_socks_listen_port;
|
|
|
|
// round-robin index into m_net_interfaces
|
|
mutable boost::uint8_t m_interface_index;
|
|
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(261)">../include/libtorrent/aux_/session_impl.hpp:911</a></td><td>replace this by a proper asio timer</td></tr><tr id="261" style="display: none;" colspan="3"><td colspan="3"><h2>replace this by a proper asio timer</h2><h4>../include/libtorrent/aux_/session_impl.hpp:911</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> void open_new_incoming_socks_connection();
|
|
|
|
enum listen_on_flags_t
|
|
{
|
|
open_ssl_socket = 0x10
|
|
};
|
|
|
|
listen_socket_t setup_listener(std::string const& device
|
|
, boost::asio::ip::tcp const& protocol, int port, int flags
|
|
, error_code& ec);
|
|
|
|
#ifndef TORRENT_DISABLE_DHT
|
|
entry m_dht_state;
|
|
#endif
|
|
|
|
// this is initialized to the unchoke_interval
|
|
// session_setting and decreased every second.
|
|
// when it reaches zero, it is reset to the
|
|
// unchoke_interval and the unchoke set is
|
|
// recomputed.
|
|
<div style="background: #ffff00" width="100%"> int m_unchoke_time_scaler;
|
|
</div>
|
|
// this is used to decide when to recalculate which
|
|
// torrents to keep queued and which to activate
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(262)">../include/libtorrent/aux_/session_impl.hpp:916</a></td><td>replace this by a proper asio timer</td></tr><tr id="262" style="display: none;" colspan="3"><td colspan="3"><h2>replace this by a proper asio timer</h2><h4>../include/libtorrent/aux_/session_impl.hpp:916</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> open_ssl_socket = 0x10
|
|
};
|
|
|
|
listen_socket_t setup_listener(std::string const& device
|
|
, boost::asio::ip::tcp const& protocol, int port, int flags
|
|
, error_code& ec);
|
|
|
|
#ifndef TORRENT_DISABLE_DHT
|
|
entry m_dht_state;
|
|
#endif
|
|
|
|
// this is initialized to the unchoke_interval
|
|
// session_setting and decreased every second.
|
|
// when it reaches zero, it is reset to the
|
|
// unchoke_interval and the unchoke set is
|
|
// recomputed.
|
|
int m_unchoke_time_scaler;
|
|
|
|
// this is used to decide when to recalculate which
|
|
// torrents to keep queued and which to activate
|
|
<div style="background: #ffff00" width="100%"> int m_auto_manage_time_scaler;
|
|
</div>
|
|
// works like unchoke_time_scaler but it
|
|
// is only decresed when the unchoke set
|
|
// is recomputed, and when it reaches zero,
|
|
// the optimistic unchoke is moved to another peer.
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(263)">../include/libtorrent/aux_/session_impl.hpp:923</a></td><td>replace this by a proper asio timer</td></tr><tr id="263" style="display: none;" colspan="3"><td colspan="3"><h2>replace this by a proper asio timer</h2><h4>../include/libtorrent/aux_/session_impl.hpp:923</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;">
|
|
#ifndef TORRENT_DISABLE_DHT
|
|
entry m_dht_state;
|
|
#endif
|
|
|
|
// this is initialized to the unchoke_interval
|
|
// session_setting and decreased every second.
|
|
// when it reaches zero, it is reset to the
|
|
// unchoke_interval and the unchoke set is
|
|
// recomputed.
|
|
int m_unchoke_time_scaler;
|
|
|
|
// this is used to decide when to recalculate which
|
|
// torrents to keep queued and which to activate
|
|
int m_auto_manage_time_scaler;
|
|
|
|
// works like unchoke_time_scaler but it
|
|
// is only decresed when the unchoke set
|
|
// is recomputed, and when it reaches zero,
|
|
// the optimistic unchoke is moved to another peer.
|
|
<div style="background: #ffff00" width="100%"> int m_optimistic_unchoke_time_scaler;
|
|
</div>
|
|
// works like unchoke_time_scaler. Each time
|
|
// it reaches 0, and all the connections are
|
|
// used, the worst connection will be disconnected
|
|
// from the torrent with the most peers
|
|
int m_disconnect_time_scaler;
|
|
|
|
// when this scaler reaches zero, it will
|
|
// scrape one of the auto managed, paused,
|
|
// torrents.
|
|
int m_auto_scrape_time_scaler;
|
|
|
|
// the index of the torrent that we'll
|
|
// refresh the next time
|
|
int m_next_explicit_cache_torrent;
|
|
|
|
// this is a counter of the number of seconds until
|
|
// the next time the read cache is rotated, if we're
|
|
// using an explicit read read cache.
|
|
int m_cache_rotation_timer;
|
|
|
|
// the index of the torrent that we'll
|
|
// refresh the next time
|
|
int m_next_suggest_torrent;
|
|
|
|
// this is a counter of the number of seconds until
|
|
// the next time the suggest pieces are refreshed
|
|
int m_suggest_timer;
|
|
|
|
// statistics gathered from all torrents.
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(264)">../include/libtorrent/aux_/session_interface.hpp:241</a></td><td>it would be nice to not have this be part of session_interface</td></tr><tr id="264" style="display: none;" colspan="3"><td colspan="3"><h2>it would be nice to not have this be part of session_interface</h2><h4>../include/libtorrent/aux_/session_interface.hpp:241</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;">
|
|
// load the specified torrent. also evict one torrent, except
|
|
// for the one specified, if we are at the limit of loaded torrents
|
|
virtual bool load_torrent(torrent* t) = 0;
|
|
|
|
// bump the specified torrent to make it the most recently used one
|
|
// in the torrent LRU (i.e. the least likely to get unloaded)
|
|
virtual void bump_torrent(torrent* t, bool back = true) = 0;
|
|
|
|
// ask for which interface and port to bind outgoing peer connections on
|
|
virtual tcp::endpoint bind_outgoing_socket(socket_type& s, address const&
|
|
remote_address, error_code& ec) const = 0;
|
|
virtual bool verify_bound_address(address const& addr, bool utp
|
|
, error_code& ec) = 0;
|
|
|
|
#ifndef TORRENT_DISABLE_MUTABLE_TORRENTS
|
|
virtual std::vector<boost::shared_ptr<torrent> > find_collection(
|
|
std::string const& collection) const = 0;
|
|
#endif
|
|
|
|
<div style="background: #ffff00" width="100%"> virtual proxy_settings proxy() const = 0;
|
|
</div>
|
|
#if TORRENT_USE_I2P
|
|
virtual proxy_settings i2p_proxy() const = 0;
|
|
virtual char const* i2p_session() const = 0;
|
|
#endif
|
|
|
|
virtual void prioritize_connections(boost::weak_ptr<torrent> t) = 0;
|
|
|
|
virtual tcp::endpoint get_ipv6_interface() const = 0;
|
|
virtual tcp::endpoint get_ipv4_interface() const = 0;
|
|
|
|
virtual void trigger_auto_manage() = 0;
|
|
|
|
virtual void apply_settings_pack(boost::shared_ptr<settings_pack> pack) = 0;
|
|
virtual session_settings const& settings() const = 0;
|
|
|
|
virtual void queue_tracker_request(tracker_request& req
|
|
, boost::weak_ptr<request_callback> c) = 0;
|
|
|
|
// peer-classes
|
|
virtual void set_peer_classes(peer_class_set* s, address const& a, int st) = 0;
|
|
virtual peer_class_pool const& peer_classes() const = 0;
|
|
virtual peer_class_pool& peer_classes() = 0;
|
|
virtual bool ignore_unchoke_slots_set(peer_class_set const& set) const = 0;
|
|
virtual int copy_pertinent_channels(peer_class_set const& set
|
|
, int channel, bandwidth_channel** dst, int max) = 0;
|
|
virtual int use_quota_overhead(peer_class_set& set, int amount_down, int amount_up) = 0;
|
|
|
|
virtual bandwidth_manager* get_bandwidth_manager(int channel) = 0;
|
|
|
|
</pre></td></tr><tr style="background: #ccc"><td>relevance 0</td><td><a href="javascript:expand(265)">../include/libtorrent/aux_/session_settings.hpp:78</a></td><td>make this a bitfield</td></tr><tr id="265" style="display: none;" colspan="3"><td colspan="3"><h2>make this a bitfield</h2><h4>../include/libtorrent/aux_/session_settings.hpp:78</h4><pre style="background: #f6f6f6; border: solid 1px #ddd;"> if ((name & settings_pack::type_mask) != settings_pack:: type ## _type_base) return default_val; \
|
|
return m_ ## type ## s[name - settings_pack:: type ## _type_base]
|
|
|
|
struct TORRENT_EXTRA_EXPORT session_settings
|
|
{
|
|
friend void libtorrent::save_settings_to_dict(
|
|
aux::session_settings const& s, entry::dictionary_type& sett);
|
|
|
|
void set_str(int name, std::string const& value) { SET(string); }
|
|
std::string const& get_str(int name) const { GET(string, m_strings[0]); }
|
|
void set_int(int name, int value) { SET(int); }
|
|
int get_int(int name) const { GET(int, 0); }
|
|
void set_bool(int name, bool value) { SET(bool); }
|
|
bool get_bool(int name) const { GET(bool, false); }
|
|
|
|
session_settings();
|
|
|
|
private:
|
|
std::string m_strings[settings_pack::num_string_settings];
|
|
int m_ints[settings_pack::num_int_settings];
|
|
<div style="background: #ffff00" width="100%"> bool m_bools[settings_pack::num_bool_settings];
|
|
</div> };
|
|
|
|
#undef GET
|
|
#undef SET
|
|
|
|
} }
|
|
|
|
#endif
|
|
|
|
</pre></td></tr></table></body></html> |