diff --git a/docs/todo.html b/docs/todo.html
index a68dd3a1c..0ac881a14 100644
--- a/docs/todo.html
+++ b/docs/todo.html
@@ -22,10 +22,10 @@
relevance 3 | ../test/test_dht.cpp:436 | test obfuscated_get_peers |
test obfuscated_get_peers../test/test_dht.cpp:436 g_got_peers.insert(g_got_peers.end(), peers.begin(), peers.end());
}
@@ -128,7 +128,7 @@ bool get_item_cb(dht::item& i)
fprintf(stderr, "%s: discovered disk full mode. Raise limit and disable upload-mode\n", time_now_string());
peer_disconnects = 0;
continue;
- | ||
relevance 3 | ../src/disk_io_thread.cpp:242 | it would be nice to have the number of threads be set dynamically |
it would be nice to have the number of threads be set dynamically../src/disk_io_thread.cpp:242 std::pair<block_cache::iterator, block_cache::iterator> pieces
+ | ||
relevance 3 | ../src/disk_io_thread.cpp:240 | it would be nice to have the number of threads be set dynamically |
it would be nice to have the number of threads be set dynamically../src/disk_io_thread.cpp:240 std::pair<block_cache::iterator, block_cache::iterator> pieces
= m_disk_cache.all_pieces();
TORRENT_ASSERT(pieces.first == pieces.second);
#endif
@@ -179,63 +179,62 @@ bool get_item_cb(dht::item& i)
m_threads.resize(m_num_threads);
}
}
- | ||
relevance 3 | ../src/peer_connection.cpp:1685 | we should probably use ses.m_allowed_upload_slots here instead to work with auto-unchoke logic |
we should probably use ses.m_allowed_upload_slots here instead
-to work with auto-unchoke logic../src/peer_connection.cpp:1685#endif
- write_unchoke();
- return;
- }
+ | ||
relevance 3 | ../src/pe_crypto.cpp:228 | clean this up using destructors instead |
clean this up using destructors instead../src/pe_crypto.cpp:228 if (e != 0) { ret = 1; goto get_out; }
+ e = gcry_mpi_scan(&secret, GCRYMPI_FMT_USG, (unsigned char const*)m_dh_local_secret
+ , sizeof(m_dh_local_secret), 0);
+ if (e != 0) { ret = 1; goto get_out; }
- maybe_unchoke_this_peer();
- }
+ gcry_mpi_powm(remote_key, remote_key, secret, prime);
- void peer_connection::maybe_unchoke_this_peer()
- {
- TORRENT_ASSERT(is_single_thread());
- if (ignore_unchoke_slots())
+ // remote_key is now the shared secret
+ e = gcry_mpi_print(GCRYMPI_FMT_USG, (unsigned char*)m_dh_shared_secret
+ , sizeof(m_dh_shared_secret), &written, remote_key);
+ if (e != 0) { ret = 1; goto get_out; }
+
+ if (written < 96)
{
-#ifdef TORRENT_VERBOSE_LOGGING
- peer_log("ABOUT TO UNCHOKE [ peer ignores unchoke slots ]");
-#endif
- // if this peer is expempted from the choker
- // just unchoke it immediately
- send_unchoke();
+ memmove(m_dh_shared_secret, m_dh_shared_secret
+ + (sizeof(m_dh_shared_secret) - written), written);
+ memset(m_dh_shared_secret, 0, sizeof(m_dh_shared_secret) - written);
}
- else if (m_ses.preemptive_unchoke())
- {
- // if the peer is choked and we have upload slots left,
- // then unchoke it. Another condition that has to be met
- // is that the torrent doesn't keep track of the individual
- // up/down ratio for each peer (ratio == 0) or (if it does
- // keep track) this particular connection isn't a leecher.
- // If the peer was choked because it was leeching, don't
- // unchoke it again.
- // The exception to this last condition is if we're a seed.
- // In that case we don't care if people are leeching, they
- // can't pay for their downloads anyway.
- boost::shared_ptr<torrent> t = m_torrent.lock();
- TORRENT_ASSERT(t);
+get_out:
+ if (prime) gcry_mpi_release(prime);
+ if (remote_key) gcry_mpi_release(remote_key);
+ if (secret) gcry_mpi_release(secret);
- t->unchoke_peer(*this);
- }
-#if defined TORRENT_VERBOSE_LOGGING
- else
- {
- peer_log("DID NOT UNCHOKE [ the number of uploads (%d) "
- "is more than or equal to the limit (%d) ]"
- , m_ses.num_uploads(), m_settings.get_int(settings_pack::unchoke_slots_limit));
- }
-#endif
- }
+#elif defined TORRENT_USE_OPENSSL
- // -----------------------------
- // ------ NOT INTERESTED -------
- // -----------------------------
- | ||
relevance 3 | ../src/peer_connection.cpp:3015 | since we throw away the queue entry once we issue the disk job, this may happen. Instead, we should keep the queue entry around, mark it as having been requested from disk and once the disk job comes back, discard it if it has been cancelled. Maybe even be able to cancel disk jobs? |
since we throw away the queue entry once we issue
+ BIGNUM* prime = 0;
+ BIGNUM* secret = 0;
+ BIGNUM* remote_key = 0;
+ BN_CTX* ctx = 0;
+ int size;
+
+ prime = BN_bin2bn(dh_prime, sizeof(dh_prime), 0);
+ if (prime == 0) { ret = 1; goto get_out; }
+ secret = BN_bin2bn((unsigned char*)m_dh_local_secret, sizeof(m_dh_local_secret), 0);
+ if (secret == 0) { ret = 1; goto get_out; }
+ remote_key = BN_bin2bn((unsigned char*)remote_pubkey, 96, 0);
+ if (remote_key == 0) { ret = 1; goto get_out; }
+
+ ctx = BN_CTX_new();
+ if (ctx == 0) { ret = 1; goto get_out; }
+ BN_mod_exp(remote_key, remote_key, secret, prime, ctx);
+ BN_CTX_free(ctx);
+
+ // remote_key is now the shared secret
+ size = BN_num_bytes(remote_key);
+ memset(m_dh_shared_secret, 0, sizeof(m_dh_shared_secret) - size);
+ BN_bn2bin(remote_key, (unsigned char*)m_dh_shared_secret + sizeof(m_dh_shared_secret) - size);
+
+get_out:
+ BN_free(remote_key);
+ | ||
relevance 3 | ../src/peer_connection.cpp:3026 | since we throw away the queue entry once we issue the disk job, this may happen. Instead, we should keep the queue entry around, mark it as having been requested from disk and once the disk job comes back, discard it if it has been cancelled. Maybe even be able to cancel disk jobs? |
since we throw away the queue entry once we issue
the disk job, this may happen. Instead, we should keep the
queue entry around, mark it as having been requested from
disk and once the disk job comes back, discard it if it has
-been cancelled. Maybe even be able to cancel disk jobs?../src/peer_connection.cpp:3015
+been cancelled. Maybe even be able to cancel disk jobs?../src/peer_connection.cpp:3026
std::vector<peer_request>::iterator i
= std::find(m_requests.begin(), m_requests.end(), r);
@@ -286,9 +285,9 @@ been cancelled. Maybe even be able to cancel disk jobs?../src/peer_conn
void peer_connection::incoming_have_all()
{
TORRENT_ASSERT(is_single_thread());
- | ||
relevance 3 | ../src/peer_connection.cpp:4754 | instead of using settings_pack::request_timeout, use m_rtt.mean() + m_rtt.avg_deviation() * 2 or something like that. the configuration option could hopefully be removed |
instead of using settings_pack::request_timeout, use
+ | ||
relevance 3 | ../src/peer_connection.cpp:4765 | instead of using settings_pack::request_timeout, use m_rtt.mean() + m_rtt.avg_deviation() * 2 or something like that. the configuration option could hopefully be removed |
instead of using settings_pack::request_timeout, use
m_rtt.mean() + m_rtt.avg_deviation() * 2 or something like that.
-the configuration option could hopefully be removed../src/peer_connection.cpp:4754 // don't bother disconnect peers we haven't been interested
+the configuration option could hopefully be removed../src/peer_connection.cpp:4765 // don't bother disconnect peers we haven't been interested
// in (and that hasn't been interested in us) for a while
// unless we have used up all our connection slots
if (may_timeout
@@ -392,9 +391,9 @@ counter and the passed_hash_check member../src/piece_picker.cpp:3166 | ||
relevance 3 | ../src/session_impl.cpp:4370 | it would be really nice to update these counters as they are incremented. This depends on the session being ticked, which has a fairly coarse grained resolution |
it would be really nice to update these counters
+ | ||
relevance 3 | ../src/session_impl.cpp:4372 | it would be really nice to update these counters as they are incremented. This depends on the session being ticked, which has a fairly coarse grained resolution |
it would be really nice to update these counters
as they are incremented. This depends on the session
-being ticked, which has a fairly coarse grained resolution../src/session_impl.cpp:4370 t->status(&alert->status.back(), ~torrent_handle::query_accurate_download_counters);
+being ticked, which has a fairly coarse grained resolution../src/session_impl.cpp:4372 t->status(&alert->status.back(), ~torrent_handle::query_accurate_download_counters);
t->clear_in_state_update();
}
state_updates.clear();
@@ -445,8 +444,8 @@ being ticked, which has a fairly coarse grained resolution../src/sessio
m_alerts.post_alert_ptr(alert.release());
}
- | ||
relevance 3 | ../src/session_impl.cpp:5229 | deprecate this function. All of this functionality should be exposed as performance counters |
deprecate this function. All of this functionality should be
-exposed as performance counters../src/session_impl.cpp:5229 if (m_alerts.should_post<portmap_alert>())
+ | ||
relevance 3 | ../src/session_impl.cpp:5231 | deprecate this function. All of this functionality should be exposed as performance counters |
deprecate this function. All of this functionality should be
+exposed as performance counters../src/session_impl.cpp:5231 if (m_alerts.should_post<portmap_alert>())
m_alerts.post_alert(portmap_alert(mapping, port
, map_transport));
return;
@@ -497,8 +496,8 @@ exposed as performance counters../src/session_impl.cpp:5229 | ||
relevance 3 | ../src/session_impl.cpp:5819 | If socket jobs could be higher level, to include RC4 encryption and decryption, we would offload the main thread even more |
If socket jobs could be higher level, to include RC4 encryption and decryption,
-we would offload the main thread even more../src/session_impl.cpp:5819 {
+ | ||
relevance 3 | ../src/session_impl.cpp:5821 | If socket jobs could be higher level, to include RC4 encryption and decryption, we would offload the main thread even more |
If socket jobs could be higher level, to include RC4 encryption and decryption,
+we would offload the main thread even more../src/session_impl.cpp:5821 {
int num_threads = m_settings.get_int(settings_pack::network_threads);
int num_pools = num_threads > 0 ? num_threads : 1;
while (num_pools > m_net_thread_pool.size())
@@ -549,7 +548,7 @@ we would offload the main thread even more../src/session_impl.cpp:5819<
, end(m_connections.end()); i != end; ++i)
{
int type = (*i)->type();
- | ||
relevance 3 | ../src/torrent.cpp:1083 | if any other peer has a busy request to this block, we need to cancel it too |
if any other peer has a busy request to this block, we need to cancel it too../src/torrent.cpp:1083#endif
+ | ||
relevance 3 | ../src/torrent.cpp:1078 | if any other peer has a busy request to this block, we need to cancel it too |
if any other peer has a busy request to this block, we need to cancel it too../src/torrent.cpp:1078#endif
TORRENT_ASSERT(j->piece >= 0);
@@ -600,7 +599,7 @@ we would offload the main thread even more../src/session_impl.cpp:5819<
alerts().post_alert(file_error_alert(j->error.ec
, resolve_filename(j->error.file), j->error.operation_str(), get_handle()));
if (c) c->disconnect(errors::no_memory, peer_connection_interface::op_file);
- | ||
relevance 3 | ../src/torrent.cpp:7682 | if peer is a really good peer, maybe we shouldn't disconnect it |
if peer is a really good peer, maybe we shouldn't disconnect it../src/torrent.cpp:7682#if defined TORRENT_LOGGING || defined TORRENT_ERROR_LOGGING
+ | ||
relevance 3 | ../src/torrent.cpp:7686 | if peer is a really good peer, maybe we shouldn't disconnect it |
if peer is a really good peer, maybe we shouldn't disconnect it../src/torrent.cpp:7686#if defined TORRENT_LOGGING || defined TORRENT_ERROR_LOGGING
debug_log("incoming peer (%d)", int(m_connections.size()));
#endif
@@ -611,7 +610,7 @@ we would offload the main thread even more../src/session_impl.cpp:5819<
TORRENT_ASSERT(p->peer_info_struct() != NULL);
- // we need to do this after we've added the peer to the policy
+ // we need to do this after we've added the peer to the peer_list
// since that's when the peer is assigned its peer_info object,
// which holds the rank
if (maybe_replace_peer)
@@ -630,14 +629,14 @@ we would offload the main thread even more../src/session_impl.cpp:5819<
p->disconnect(errors::too_many_connections, peer_connection_interface::op_bittorrent);
// we have to do this here because from the peer's point of
// it wasn't really attached to the torrent, but we do need
- // to let policy know we're removing it
+ // to let peer_list know we're removing it
remove_peer(p);
return false;
}
}
#if TORRENT_USE_INVARIANT_CHECKS
- if (m_policy) m_policy->check_invariant();
+ if (m_peer_list) m_peer_list->check_invariant();
#endif
if (m_share_mode)
@@ -704,7 +703,109 @@ when it doesn't have any of the file do the following../src/web_peer_co
{
// we should not try this server again.
t->remove_web_seed(this, errors::missing_location, op_bittorrent, 2);
- | ||
relevance 3 | ../include/libtorrent/block_cache.hpp:209 | could this be a scoped_array instead? does cached_piece_entry really need to be copyable? cached_piece_entry does need to be copyable since it's part of a container, but it's possible it could be a raw pointer or boost::unique_ptr perhaps |
could this be a scoped_array instead? does cached_piece_entry
+ | ||
relevance 3 | ../src/kademlia/get_item.cpp:220 | we don't support CAS errors here! we need a custom observer |
we don't support CAS errors here! we need a custom observer../src/kademlia/get_item.cpp:220 TORRENT_LOG(node) << "sending put [ v: \"" << m_data.value()
+ << "\" seq: " << (m_data.is_mutable() ? m_data.seq() : -1)
+ << " nodes: " << v.size() << " ]" ;
+#endif
+
+ // create a dummy traversal_algorithm
+ boost::intrusive_ptr<traversal_algorithm> algo(
+ new traversal_algorithm(m_node, (node_id::min)()));
+
+ // store on the first k nodes
+ for (std::vector<std::pair<node_entry, std::string> >::const_iterator i = v.begin()
+ , end(v.end()); i != end; ++i)
+ {
+#ifdef TORRENT_DHT_VERBOSE_LOGGING
+ TORRENT_LOG(node) << " put-distance: " << (160 - distance_exp(m_target, i->first.id));
+#endif
+
+ void* ptr = m_node.m_rpc.allocate_observer();
+ if (ptr == 0) return;
+
+ observer_ptr o(new (ptr) announce_observer(algo, i->first.ep(), i->first.id));
+ #if TORRENT_USE_ASSERTS
+ o->m_in_constructor = false;
+#endif
+ entry e;
+ e["y"] = "q";
+ e["q"] = "put";
+ entry& a = e["a"];
+ a["v"] = m_data.value();
+ a["token"] = i->second;
+ if (m_data.is_mutable())
+ {
+ a["k"] = std::string(m_data.pk().data(), item_pk_len);
+ a["seq"] = m_data.seq();
+ a["sig"] = std::string(m_data.sig().data(), item_sig_len);
+ if (!m_data.salt().empty())
+ {
+ a["salt"] = m_data.salt();
+ }
+ }
+ m_node.m_rpc.invoke(e, i->first.ep(), o);
+ }
+}
+
+void get_item_observer::reply(msg const& m)
+{
+ char const* pk = NULL;
+ char const* sig = NULL;
+ boost::uint64_t seq = 0;
+
+ lazy_entry const* r = m.message.dict_find_dict("r");
+ | ||
relevance 3 | ../src/kademlia/routing_table.cpp:862 | move the lowest priority nodes to the replacement bucket |
move the lowest priority nodes to the replacement bucket../src/kademlia/routing_table.cpp:862 bucket_t& b = m_buckets[bucket_index].live_nodes;
+ bucket_t& rb = m_buckets[bucket_index].replacements;
+
+ // move any node whose (160 - distane_exp(m_id, id)) >= (i - m_buckets.begin())
+ // to the new bucket
+ int new_bucket_size = bucket_limit(bucket_index + 1);
+ for (bucket_t::iterator j = b.begin(); j != b.end();)
+ {
+ if (distance_exp(m_id, j->id) >= 159 - bucket_index)
+ {
+ ++j;
+ continue;
+ }
+ // this entry belongs in the new bucket
+ new_bucket.push_back(*j);
+ j = b.erase(j);
+ }
+
+ if (b.size() > bucket_size_limit)
+ {
+ for (bucket_t::iterator i = b.begin() + bucket_size_limit
+ , end(b.end()); i != end; ++i)
+ {
+ rb.push_back(*i);
+ }
+
+ b.resize(bucket_size_limit);
+ }
+
+ // split the replacement bucket as well. If the live bucket
+ // is not full anymore, also move the replacement entries
+ // into the main bucket
+ for (bucket_t::iterator j = rb.begin(); j != rb.end();)
+ {
+ if (distance_exp(m_id, j->id) >= 159 - bucket_index)
+ {
+ if (int(b.size()) >= bucket_size_limit)
+ {
+ ++j;
+ continue;
+ }
+ b.push_back(*j);
+ }
+ else
+ {
+ // this entry belongs in the new bucket
+ if (int(new_bucket.size()) < new_bucket_size)
+ new_bucket.push_back(*j);
+ else
+ new_replacement_bucket.push_back(*j);
+ }
+ | ||
relevance 3 | ../include/libtorrent/block_cache.hpp:209 | could this be a scoped_array instead? does cached_piece_entry really need to be copyable? cached_piece_entry does need to be copyable since it's part of a container, but it's possible it could be a raw pointer or boost::unique_ptr perhaps |
could this be a scoped_array instead? does cached_piece_entry
really need to be copyable? cached_piece_entry does need to be
copyable since it's part of a container, but it's possible it could be
a raw pointer or boost::unique_ptr perhaps../include/libtorrent/block_cache.hpp:209 tailqueue read_jobs;
@@ -732,109 +833,6 @@ a raw pointer or boost::unique_ptr perhaps../include/libtorrent/block_c
// the last time a block was written to this piece
// plus the minimum amount of time the block is guaranteed
// to stay in the cache
- | ||
relevance 3 | ../include/libtorrent/disk_io_thread.hpp:537 | turn these counters and gauges into session_stats counters (which also would need to be thread safe) |
turn these counters and gauges into session_stats
-counters (which also would need to be thread safe)../include/libtorrent/disk_io_thread.hpp:537 void* m_userdata;
-
- // the last time we expired write blocks from the cache
- ptime m_last_cache_expiry;
-
- ptime m_last_file_check;
-
- // LRU cache of open files
- file_pool m_file_pool;
-
- // disk cache
- mutable mutex m_cache_mutex;
- block_cache m_disk_cache;
-
- // total number of blocks in use by both the read
- // and the write cache. This is not supposed to
- // exceed m_cache_size
-
- counters& m_stats_counters;
-
- cache_status m_cache_stats;
-
- // average read time for cache misses (in microseconds)
- average_accumulator m_read_time;
-
- // average write time (in microseconds)
- average_accumulator m_write_time;
-
- // average hash time (in microseconds)
- average_accumulator m_hash_time;
-
- // average time to serve a job (any job) in microseconds
- average_accumulator m_job_time;
-
- // the total number of outstanding jobs. This is used to
- // limit the number of jobs issued in parallel. It also creates
- // an opportunity to sort the jobs by physical offset before
- // issued to the AIO subsystem
- boost::atomic<int> m_outstanding_jobs;
-
- // this is the main thread io_service. Callbacks are
- // posted on this in order to have them execute in
- // the main thread.
- io_service& m_ios;
-
- // the number of jobs that have been blocked by a fence. These
- // jobs are queued up in their respective storage, waiting for
- // the fence to be lowered. This counter is just used to know
- // when it's OK to exit the main loop of the disk thread
- boost::atomic<int> m_num_blocked_jobs;
-
- | ||
relevance 3 | ../include/libtorrent/policy.hpp:104 | this class should be renamed peer_list |
this class should be renamed peer_list../include/libtorrent/policy.hpp:104 int min_reconnect_time;
-
- // the number of iterations over the peer list for this operation
- int loop_counter;
-
- // these are used only by find_connect_candidates in order
- // to implement peer ranking. See:
- // http://blog.libtorrent.org/2012/12/swarm-connectivity/
- external_ip const* ip;
- int port;
-
- // this must be set to a torrent_peer allocator
- torrent_peer_allocator_interface* peer_allocator;
-
- // if any peer were removed during this call, they are returned in
- // this vector. The caller would want to make sure there are no
- // references to these torrent_peers anywhere
- std::vector<torrent_peer*> erased;
- };
-
- class TORRENT_EXTRA_EXPORT policy : single_threaded
- {
- public:
-
- policy();
-
-#if TORRENT_USE_I2P
- torrent_peer* add_i2p_peer(char const* destination, int src, char flags
- , torrent_state* state);
-#endif
-
- enum
- {
- // these flags match the flags passed in ut_pex
- // messages
- flag_encryption = 0x1,
- flag_seed = 0x2,
- flag_utp = 0x4,
- flag_holepunch = 0x8,
- };
-
- // this is called once for every torrent_peer we get from
- // the tracker, pex, lsd or dht.
- torrent_peer* add_peer(const tcp::endpoint& remote
- , int source, char flags, torrent_state* state);
-
- // false means duplicate connection
- bool update_peer_port(int port, torrent_peer* p, int src, torrent_state* state);
-
- // called when an incoming connection is accepted
- // false means the connection was refused or failed
| ||
relevance 3 | ../include/libtorrent/session.hpp:210 | could the fingerprint be a setting as well? And should the settings_pack be optional? |
could the fingerprint be a setting as well? And should the
settings_pack be optional?../include/libtorrent/session.hpp:210 //
// see apply_settings().
@@ -939,7 +937,85 @@ should not include internal state.../include/libtorrent/torrent_info.hp
// The URL of the web seed
std::string url;
- | ||
relevance 2 | ../src/disk_io_thread.cpp:844 | should this be allocated on the stack? |
should this be allocated on the stack?../src/disk_io_thread.cpp:844 // if we're also flushing the read cache, this piece
+ | ||
relevance 3 | ../include/libtorrent/kademlia/refresh.hpp:46 | collapse this class into the bootstrap class (or maybe the other way around) |
collapse this class into the bootstrap class (or maybe the other
+way around)../include/libtorrent/kademlia/refresh.hpp:46INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+*/
+
+#ifndef REFRESH_050324_HPP
+#define REFRESH_050324_HPP
+
+#include <libtorrent/kademlia/traversal_algorithm.hpp>
+#include <libtorrent/kademlia/node_id.hpp>
+#include <libtorrent/kademlia/get_peers.hpp>
+
+namespace libtorrent { namespace dht
+{
+
+class routing_table;
+class rpc_manager;
+
+class refresh : public get_peers
+ {
+public:
+ typedef get_peers::nodes_callback done_callback;
+
+ refresh(node_impl& node, node_id target
+ , done_callback const& callback);
+
+ virtual char const* name() const;
+
+protected:
+
+ observer_ptr new_observer(void* ptr, udp::endpoint const& ep
+ , node_id const& id);
+ virtual bool invoke(observer_ptr o);
+};
+
+class bootstrap : public refresh
+{
+public:
+ bootstrap(node_impl& node, node_id target
+ , done_callback const& callback);
+
+ virtual char const* name() const;
+
+ void trim_seed_nodes();
+
+protected:
+
+ virtual void done();
+
+ | ||
relevance 2 | ../test/test_resume.cpp:291 | test all other resume flags here too. This would require returning more than just the torrent_status from test_resume_flags. Also http seeds and trackers for instance |
test all other resume flags here too. This would require returning
+more than just the torrent_status from test_resume_flags. Also http seeds
+and trackers for instance../test/test_resume.cpp:291 TEST_EQUAL(s.ip_filter_applies, false);
+ TEST_EQUAL(s.connections_limit, 1345);
+ TEST_EQUAL(s.uploads_limit, 1346);
+
+ // resume data overrides the paused flag
+ fprintf(stderr, "flags: paused\n");
+ s = test_resume_flags(add_torrent_params::flag_paused);
+ default_tests(s);
+ TEST_EQUAL(s.save_path, "/add_torrent_params save_path");
+ TEST_EQUAL(s.sequential_download, false);
+ TEST_EQUAL(s.paused, false);
+ TEST_EQUAL(s.auto_managed, false);
+ TEST_EQUAL(s.seed_mode, false);
+ TEST_EQUAL(s.super_seeding, false);
+ TEST_EQUAL(s.share_mode, false);
+ TEST_EQUAL(s.upload_mode, false);
+ TEST_EQUAL(s.ip_filter_applies, false);
+ TEST_EQUAL(s.connections_limit, 1345);
+ TEST_EQUAL(s.uploads_limit, 1346);
+
+ return 0;
+ }
+
+
+ | ||
relevance 2 | ../src/disk_io_thread.cpp:842 | should this be allocated on the stack? |
should this be allocated on the stack?../src/disk_io_thread.cpp:842 // if we're also flushing the read cache, this piece
// should be removed as soon as all write jobs finishes
// otherwise it will turn into a read piece
}
@@ -990,7 +1066,7 @@ should not include internal state.../include/libtorrent/torrent_info.hp
{
cached_piece_entry* pe = m_disk_cache.find_piece(storage, (*i)->piece);
TORRENT_PIECE_ASSERT(pe->num_dirty == 0, pe);
- | ||
relevance 2 | ../src/disk_io_thread.cpp:885 | we're not flushing the read cache at all? |
we're not flushing the read cache at all?../src/disk_io_thread.cpp:885 // from disk_io_thread::do_delete, which is a fence job and should
+ | ||
relevance 2 | ../src/disk_io_thread.cpp:883 | we're not flushing the read cache at all? |
we're not flushing the read cache at all?../src/disk_io_thread.cpp:883 // from disk_io_thread::do_delete, which is a fence job and should
// have any other jobs active, i.e. there should not be any references
// keeping pieces or blocks alive
if ((flags & flush_delete_cache) && (flags & flush_expect_clear))
@@ -1041,7 +1117,7 @@ should not include internal state.../include/libtorrent/torrent_info.hp
if (e->num_dirty == 0) continue;
pieces.push_back(std::make_pair(e->storage.get(), int(e->piece)));
}
- | ||
relevance 2 | ../src/file.cpp:1491 | use vm_copy here, if available, and if buffers are aligned |
use vm_copy here, if available, and if buffers are aligned../src/file.cpp:1491 CloseHandle(native_handle());
+ | ||
relevance 2 | ../src/file.cpp:1491 | use vm_copy here, if available, and if buffers are aligned |
use vm_copy here, if available, and if buffers are aligned../src/file.cpp:1491 CloseHandle(native_handle());
m_path.clear();
#else
if (m_file_handle != INVALID_HANDLE_VALUE)
@@ -1071,7 +1147,7 @@ should not include internal state.../include/libtorrent/torrent_info.hp
int offset = 0;
for (int i = 0; i < num_bufs; ++i)
{
- | ||
relevance 2 | ../src/file.cpp:1502 | use vm_copy here, if available, and if buffers are aligned |
use vm_copy here, if available, and if buffers are aligned../src/file.cpp:1502 }
+ | ||
relevance 2 | ../src/file.cpp:1502 | use vm_copy here, if available, and if buffers are aligned |
use vm_copy here, if available, and if buffers are aligned../src/file.cpp:1502 }
// defined in storage.cpp
int bufs_size(file::iovec_t const* bufs, int num_bufs);
@@ -1122,10 +1198,10 @@ should not include internal state.../include/libtorrent/torrent_info.hp
// issue a single write operation instead of using a vector
// operation
int buf_size = 0;
- | ||
relevance 2 | ../src/peer_connection.cpp:4676 | use a deadline_timer for timeouts. Don't rely on second_tick()! Hook this up to connect timeout as well. This would improve performance because of less work in second_tick(), and might let use remove ticking entirely eventually |
use a deadline_timer for timeouts. Don't rely on second_tick()!
+ | ||
relevance 2 | ../src/peer_connection.cpp:4687 | use a deadline_timer for timeouts. Don't rely on second_tick()! Hook this up to connect timeout as well. This would improve performance because of less work in second_tick(), and might let use remove ticking entirely eventually |
use a deadline_timer for timeouts. Don't rely on second_tick()!
Hook this up to connect timeout as well. This would improve performance
because of less work in second_tick(), and might let use remove ticking
-entirely eventually../src/peer_connection.cpp:4676 if (is_i2p(*m_socket))
+entirely eventually../src/peer_connection.cpp:4687 if (is_i2p(*m_socket))
connect_timeout += 20;
#endif
@@ -1176,7 +1252,7 @@ entirely eventually../src/peer_connection.cpp:4676relevance 2 | ../src/session_impl.cpp:227 | find a better place for this function |
|
find a better place for this function../src/session_impl.cpp:227 *j.vec, j.peer->make_write_handler(boost::bind(
+ | ||
relevance 2 | ../src/session_impl.cpp:227 | find a better place for this function |
find a better place for this function../src/session_impl.cpp:227 *j.vec, j.peer->make_write_handler(boost::bind(
&peer_connection::on_send_data, j.peer, _1, _2)));
}
else
@@ -1227,7 +1303,7 @@ namespace aux {
const static class_mapping v4_classes[] =
{
// everything
- | ||
relevance 2 | ../src/session_impl.cpp:1834 | the udp socket(s) should be using the same generic mechanism and not be restricted to a single one we should open a one listen socket for each entry in the listen_interfaces list |
the udp socket(s) should be using the same generic
+ | ||
relevance 2 | ../src/session_impl.cpp:1834 | the udp socket(s) should be using the same generic mechanism and not be restricted to a single one we should open a one listen socket for each entry in the listen_interfaces list |
the udp socket(s) should be using the same generic
mechanism and not be restricted to a single one
we should open a one listen socket for each entry in the
listen_interfaces list../src/session_impl.cpp:1834 }
@@ -1281,7 +1357,7 @@ listen_interfaces list../src/session_impl.cpp:1834relevance 2 | ../src/session_impl.cpp:1931 | use bind_to_device in udp_socket |
|
use bind_to_device in udp_socket../src/session_impl.cpp:1931 {
+ | ||
relevance 2 | ../src/session_impl.cpp:1931 | use bind_to_device in udp_socket |
use bind_to_device in udp_socket../src/session_impl.cpp:1931 {
#if defined TORRENT_VERBOSE_LOGGING || defined TORRENT_LOGGING || defined TORRENT_ERROR_LOGGING
char msg[200];
snprintf(msg, sizeof(msg), "cannot bind TCP listen socket to interface \"%s\": %s"
@@ -1327,7 +1403,7 @@ listen_interfaces list../src/session_impl.cpp:1834relevance 2 | ../src/session_impl.cpp:1958 | use bind_to_device in udp_socket |
|
use bind_to_device in udp_socket../src/session_impl.cpp:1958 session_log("SSL: cannot bind to UDP interface \"%s\": %s"
+ | ||
relevance 2 | ../src/session_impl.cpp:1958 | use bind_to_device in udp_socket |
use bind_to_device in udp_socket../src/session_impl.cpp:1958 session_log("SSL: cannot bind to UDP interface \"%s\": %s"
, print_endpoint(m_listen_interface).c_str(), ec.message().c_str());
#endif
if (m_alerts.should_post<listen_failed_alert>())
@@ -1378,8 +1454,8 @@ listen_interfaces list../src/session_impl.cpp:1834relevance 2 | ../src/session_impl.cpp:3392 | make a list for torrents that want to be announced on the DHT so we don't have to loop over all torrents, just to find the ones that want to announce |
|
make a list for torrents that want to be announced on the DHT so we
-don't have to loop over all torrents, just to find the ones that want to announce../src/session_impl.cpp:3392 if (!m_dht_torrents.empty())
+ | ||
relevance 2 | ../src/session_impl.cpp:3394 | make a list for torrents that want to be announced on the DHT so we don't have to loop over all torrents, just to find the ones that want to announce |
make a list for torrents that want to be announced on the DHT so we
+don't have to loop over all torrents, just to find the ones that want to announce../src/session_impl.cpp:3394 if (!m_dht_torrents.empty())
{
boost::shared_ptr<torrent> t;
do
@@ -1430,7 +1506,7 @@ don't have to loop over all torrents, just to find the ones that want to announc
if (m_torrents.empty()) return;
if (m_next_lsd_torrent == m_torrents.end())
- | ||
relevance 2 | ../src/torrent.cpp:701 | post alert |
post alert../src/torrent.cpp:701 state_updated();
+ | ||
relevance 2 | ../src/torrent.cpp:701 | post alert |
post alert../src/torrent.cpp:701 state_updated();
set_state(torrent_status::downloading);
@@ -1481,8 +1557,8 @@ don't have to loop over all torrents, just to find the ones that want to announc
TORRENT_ASSERT(piece >= 0);
TORRENT_ASSERT(m_verified.get_bit(piece) == false);
++m_num_verified;
- | ||
relevance 2 | ../src/torrent.cpp:4693 | abort lookups this torrent has made via the session host resolver interface |
abort lookups this torrent has made via the
-session host resolver interface../src/torrent.cpp:4693 // files belonging to the torrents
+ | ||
relevance 2 | ../src/torrent.cpp:4696 | abort lookups this torrent has made via the session host resolver interface |
abort lookups this torrent has made via the
+session host resolver interface../src/torrent.cpp:4696 // files belonging to the torrents
disconnect_all(errors::torrent_aborted, peer_connection_interface::op_bittorrent);
// post a message to the main thread to destruct
@@ -1533,7 +1609,7 @@ session host resolver interface../src/torrent.cpp:4693relevance 2 | ../src/web_peer_connection.cpp:655 | create a mapping of file-index to redirection URLs. Use that to form URLs instead. Support to reconnect to a new server without destructing this peer_connection |
|
create a mapping of file-index to redirection URLs. Use that to form
+ | ||
relevance 2 | ../src/web_peer_connection.cpp:655 | create a mapping of file-index to redirection URLs. Use that to form URLs instead. Support to reconnect to a new server without destructing this peer_connection |
create a mapping of file-index to redirection URLs. Use that to form
URLs instead. Support to reconnect to a new server without destructing this
peer_connection../src/web_peer_connection.cpp:655 == dl_target);
#endif
@@ -1586,7 +1662,7 @@ peer_connection../src/web_peer_connection.cpp:655relevance 2 | ../src/kademlia/dos_blocker.cpp:75 | make these limits configurable |
|
make these limits configurable../src/kademlia/dos_blocker.cpp:75 bool dos_blocker::incoming(address addr, ptime now)
+ | ||
relevance 2 | ../src/kademlia/dos_blocker.cpp:75 | make these limits configurable |
make these limits configurable../src/kademlia/dos_blocker.cpp:75 bool dos_blocker::incoming(address addr, ptime now)
{
node_ban_entry* match = 0;
node_ban_entry* min = m_ban_nodes;
@@ -1637,7 +1713,7 @@ peer_connection../src/web_peer_connection.cpp:655relevance 2 | ../src/kademlia/node.cpp:67 | make this configurable in dht_settings |
|
make this configurable in dht_settings../src/kademlia/node.cpp:67#include "libtorrent/kademlia/routing_table.hpp"
+ | ||
relevance 2 | ../src/kademlia/node.cpp:67 | make this configurable in dht_settings |
make this configurable in dht_settings../src/kademlia/node.cpp:67#include "libtorrent/kademlia/routing_table.hpp"
#include "libtorrent/kademlia/node.hpp"
#include "libtorrent/kademlia/dht_observer.hpp"
@@ -1688,7 +1764,59 @@ void purge_peers(std::set<peer_entry>& peers)
void nop() {}
- | ||
relevance 2 | ../src/kademlia/node.cpp:804 | find_node should write directly to the response entry |
find_node should write directly to the response entry../src/kademlia/node.cpp:804 TORRENT_LOG(node) << " values: " << reply["values"].list().size();
+ | ||
relevance 2 | ../src/kademlia/node.cpp:486 | it would be nice to have a bias towards node-id prefixes that are missing in the bucket |
it would be nice to have a bias towards node-id prefixes that
+are missing in the bucket../src/kademlia/node.cpp:486 , boost::bind(&nop)));
+ r->start();
+ m_last_self_refresh = now;
+ return;
+ }
+
+ node_entry const* ne = m_table.next_refresh();
+ if (ne == NULL) return;
+
+ int bucket = 159 - distance_exp(m_id, ne->id);
+ send_single_refresh(ne->ep(), bucket, ne->id);
+}
+
+void node_impl::send_single_refresh(udp::endpoint const& ep, int bucket
+ , node_id const& id)
+{
+ void* ptr = m_rpc.allocate_observer();
+ if (ptr == 0) return;
+
+ // generate a random node_id within the given bucket
+ node_id mask = generate_prefix_mask(bucket + 1);
+ node_id target = generate_secret_id() & ~mask;
+ target |= m_id & mask;
+
+ // create a dummy traversal_algorithm
+ // this is unfortunately necessary for the observer
+ // to free itself from the pool when it's being released
+ boost::intrusive_ptr<traversal_algorithm> algo(
+ new traversal_algorithm(*this, (node_id::min)()));
+ observer_ptr o(new (ptr) ping_observer(algo, ep, id));
+#if defined TORRENT_DEBUG || TORRENT_RELEASE_ASSERTS
+ o->m_in_constructor = false;
+#endif
+ entry e;
+ e["y"] = "q";
+ entry& a = e["a"];
+
+ // use get_peers instead of find_node. We'll get nodes in the response
+ // either way.
+ e["q"] = "get_peers";
+ a["info_hash"] = target.to_string();
+ m_counters.inc_stats_counter(counters::dht_get_peers_out);
+
+// e["q"] = "find_node";
+// a["target"] = target.to_string();
+ m_rpc.invoke(e, ep, o);
+}
+
+time_duration node_impl::connection_timeout()
+{
+ time_duration d = m_rpc.tick();
+ | ||
relevance 2 | ../src/kademlia/node.cpp:883 | find_node should write directly to the response entry |
find_node should write directly to the response entry../src/kademlia/node.cpp:883 TORRENT_LOG(node) << " values: " << reply["values"].list().size();
}
#endif
}
@@ -1739,9 +1867,9 @@ void nop() {}
// listen port and instead use the source port of the packet?
if (msg_keys[5] && msg_keys[5]->int_value() != 0)
port = m.addr.port();
- | ||
relevance 2 | ../src/kademlia/node_id.cpp:133 | this could be optimized if SSE 4.2 is available. It could also be optimized given that we have a fixed length |
this could be optimized if SSE 4.2 is
+ | ||
relevance 2 | ../src/kademlia/node_id.cpp:134 | this could be optimized if SSE 4.2 is available. It could also be optimized given that we have a fixed length |
this could be optimized if SSE 4.2 is
available. It could also be optimized given
-that we have a fixed length../src/kademlia/node_id.cpp:133 b6 = ip_.to_v6().to_bytes();
+that we have a fixed length../src/kademlia/node_id.cpp:134 b6 = ip_.to_v6().to_bytes();
ip = &b6[0];
num_octets = 8;
mask = v6mask;
@@ -1776,23 +1904,23 @@ that we have a fixed length../src/kademlia/node_id.cpp:133 | ||
relevance 2 | ../include/libtorrent/enum_net.hpp:137 | this could be done more efficiently by just looking up the interface with the given name, maybe even with if_nametoindex() |
this could be done more efficiently by just looking up
+ | ||
relevance 2 | ../include/libtorrent/enum_net.hpp:137 | this could be done more efficiently by just looking up the interface with the given name, maybe even with if_nametoindex() |
this could be done more efficiently by just looking up
the interface with the given name, maybe even with if_nametoindex()../include/libtorrent/enum_net.hpp:137
address ip = address::from_string(device_name, ec);
if (!ec)
@@ -1844,7 +1972,7 @@ the interface with the given name, maybe even with if_nametoindex()../i
// returns true if the given device exists
TORRENT_EXTRA_EXPORT bool has_interface(char const* name, io_service& ios
- | ||
relevance 2 | ../include/libtorrent/intrusive_ptr_base.hpp:44 | remove this class and transition over to using shared_ptr and make_shared instead |
remove this class and transition over to using shared_ptr and
+ | ||
relevance 2 | ../include/libtorrent/intrusive_ptr_base.hpp:44 | remove this class and transition over to using shared_ptr and make_shared instead |
remove this class and transition over to using shared_ptr and
make_shared instead../include/libtorrent/intrusive_ptr_base.hpp:44CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
@@ -1896,7 +2024,7 @@ namespace libtorrent
intrusive_ptr_base(): m_refs(0) {}
- | ||
relevance 2 | ../include/libtorrent/proxy_base.hpp:257 | use the resolver interface that has a built-in cache |
use the resolver interface that has a built-in cache../include/libtorrent/proxy_base.hpp:257 return m_sock.lowest_layer();
+ | ||
relevance 2 | ../include/libtorrent/proxy_base.hpp:257 | use the resolver interface that has a built-in cache |
use the resolver interface that has a built-in cache../include/libtorrent/proxy_base.hpp:257 return m_sock.lowest_layer();
}
next_layer_type& next_layer()
@@ -1923,7 +2051,58 @@ protected:
#endif
- | ||
relevance 2 | ../include/libtorrent/session_settings.hpp:55 | this type is only used internally now. move it to an internal header and make this type properly deprecated. |
this type is only used internally now. move it to an internal
+ | ||
relevance 2 | ../include/libtorrent/session.hpp:284 | the ip filter should probably be saved here too |
the ip filter should probably be saved here too../include/libtorrent/session.hpp:284 pack.set_bool(settings_pack::enable_upnp, false);
+ pack.set_bool(settings_pack::enable_natpmp, false);
+ pack.set_bool(settings_pack::enable_lsd, false);
+ pack.set_bool(settings_pack::enable_dht, false);
+ }
+ init(print);
+#if defined TORRENT_VERBOSE_LOGGING || defined TORRENT_LOGGING || defined TORRENT_ERROR_LOGGING
+ set_log_path(logpath);
+#endif
+ start(flags, pack);
+ }
+
+ // The destructor of session will notify all trackers that our torrents
+ // have been shut down. If some trackers are down, they will time out.
+ // All this before the destructor of session returns. So, it's advised
+ // that any kind of interface (such as windows) are closed before
+ // destructing the session object. Because it can take a few second for
+ // it to finish. The timeout can be set with apply_settings().
+ ~session();
+
+
+ // flags that determines which aspects of the session should be
+ // saved when calling save_state().
+ enum save_state_flags_t
+ {
+ // saves settings (i.e. the session_settings)
+ save_settings = 0x001,
+
+ // saves dht_settings
+ save_dht_settings = 0x002,
+
+ // saves dht state such as nodes and node-id, possibly accelerating
+ // joining the DHT if provided at next session startup.
+ save_dht_state = 0x004,
+
+ // save pe_settings
+ save_encryption_settings = 0x020,
+
+ // internal
+ save_as_map = 0x040,
+
+ // saves RSS feeds
+ save_feeds = 0x080
+
+#ifndef TORRENT_NO_DEPRECATE
+ ,
+ save_proxy = 0x008,
+ save_i2p_proxy = 0x010,
+ save_dht_proxy = save_proxy,
+ save_peer_proxy = save_proxy,
+ save_web_proxy = save_proxy,
+ | ||
relevance 2 | ../include/libtorrent/session_settings.hpp:55 | this type is only used internally now. move it to an internal header and make this type properly deprecated. |
this type is only used internally now. move it to an internal
header and make this type properly deprecated.../include/libtorrent/session_settings.hpp:55
#include "libtorrent/version.hpp"
#include "libtorrent/config.hpp"
@@ -1975,7 +2154,7 @@ namespace libtorrent
// proxy_settings::type field.
enum proxy_type
{
- | ||
relevance 2 | ../include/libtorrent/settings_pack.hpp:70 | add an API to query a settings_pack as well |
add an API to query a settings_pack as well../include/libtorrent/settings_pack.hpp:70 | ||
relevance 2 | ../include/libtorrent/settings_pack.hpp:71 | maybe convert all bool types into int-types as well |
maybe convert all bool types into int-types as well../include/libtorrent/settings_pack.hpp:71{
+ | ||
relevance 2 | ../include/libtorrent/settings_pack.hpp:70 | add an API to query a settings_pack as well |
add an API to query a settings_pack as well../include/libtorrent/settings_pack.hpp:70 | ||
relevance 2 | ../include/libtorrent/settings_pack.hpp:71 | maybe convert all bool types into int-types as well |
maybe convert all bool types into int-types as well../include/libtorrent/settings_pack.hpp:71{
namespace aux { struct session_impl; struct session_settings; }
struct settings_pack;
@@ -2026,7 +2205,7 @@ namespace libtorrent
{
string_type_base = 0x0000,
int_type_base = 0x4000,
- | ||
relevance 2 | ../include/libtorrent/socks5_stream.hpp:129 | fix error messages to use custom error_code category |
fix error messages to use custom error_code category../include/libtorrent/socks5_stream.hpp:129 | ||
relevance 2 | ../include/libtorrent/socks5_stream.hpp:130 | add async_connect() that takes a hostname and port as well |
add async_connect() that takes a hostname and port as well../include/libtorrent/socks5_stream.hpp:130 if (m_dst_name.size() > 255)
+ | ||
relevance 2 | ../include/libtorrent/socks5_stream.hpp:129 | fix error messages to use custom error_code category |
fix error messages to use custom error_code category../include/libtorrent/socks5_stream.hpp:129 | ||
relevance 2 | ../include/libtorrent/socks5_stream.hpp:130 | add async_connect() that takes a hostname and port as well |
add async_connect() that takes a hostname and port as well../include/libtorrent/socks5_stream.hpp:130 if (m_dst_name.size() > 255)
m_dst_name.resize(255);
}
@@ -2077,7 +2256,7 @@ namespace libtorrent
m_resolver.async_resolve(q, boost::bind(
&socks5_stream::name_lookup, this, _1, _2, h));
}
- | ||
relevance 2 | ../include/libtorrent/torrent_info.hpp:306 | there may be some opportunities to optimize the size if torrent_info. specifically to turn some std::string and std::vector into pointers |
there may be some opportunities to optimize the size if torrent_info.
+ | ||
relevance 2 | ../include/libtorrent/torrent_info.hpp:306 | there may be some opportunities to optimize the size if torrent_info. specifically to turn some std::string and std::vector into pointers |
there may be some opportunities to optimize the size if torrent_info.
specifically to turn some std::string and std::vector into pointers../include/libtorrent/torrent_info.hpp:306 bool resolving;
// if the user wanted to remove this while
@@ -2129,7 +2308,7 @@ specifically to turn some std::string and std::vector into pointers../i
// error occur, they will simply set the error code to describe what went
// wrong and not fully initialize the torrent_info object. The overloads
// that do not take the extra error_code parameter will always throw if
- | ||
relevance 2 | ../include/libtorrent/tracker_manager.hpp:270 | this class probably doesn't need to have virtual functions. |
this class probably doesn't need to have virtual functions.../include/libtorrent/tracker_manager.hpp:270 int m_completion_timeout;
+ | ||
relevance 2 | ../include/libtorrent/tracker_manager.hpp:270 | this class probably doesn't need to have virtual functions. |
this class probably doesn't need to have virtual functions.../include/libtorrent/tracker_manager.hpp:270 int m_completion_timeout;
typedef mutex mutex_t;
mutable mutex_t m_mutex;
@@ -2180,7 +2359,7 @@ specifically to turn some std::string and std::vector into pointers../i
boost::shared_ptr<tracker_connection> shared_from_this()
{
- | ||
relevance 2 | ../include/libtorrent/tracker_manager.hpp:367 | this should be unique_ptr in the future |
this should be unique_ptr in the future../include/libtorrent/tracker_manager.hpp:367 // this is only used for SOCKS packets, since
+ | ||
relevance 2 | ../include/libtorrent/tracker_manager.hpp:367 | this should be unique_ptr in the future |
this should be unique_ptr in the future../include/libtorrent/tracker_manager.hpp:367 // this is only used for SOCKS packets, since
// they may be addressed to hostname
virtual bool incoming_packet(error_code const& e, char const* hostname
, char const* buf, int size);
@@ -2219,7 +2398,7 @@ specifically to turn some std::string and std::vector into pointers../i
#endif // TORRENT_TRACKER_MANAGER_HPP_INCLUDED
- | ||
relevance 2 | ../include/libtorrent/aux_/session_interface.hpp:107 | the IP voting mechanism should be factored out to its own class, not part of the session |
the IP voting mechanism should be factored out
+ | ||
relevance 2 | ../include/libtorrent/aux_/session_interface.hpp:107 | the IP voting mechanism should be factored out to its own class, not part of the session |
the IP voting mechanism should be factored out
to its own class, not part of the session../include/libtorrent/aux_/session_interface.hpp:107 class port_filter;
struct settings_pack;
struct torrent_peer_allocator_interface;
@@ -2271,7 +2450,7 @@ namespace libtorrent { namespace aux
virtual void queue_async_resume_data(boost::shared_ptr<torrent> const& t) = 0;
virtual void done_async_resume() = 0;
virtual void evict_torrent(torrent* t) = 0;
- | ||
relevance 1 | ../src/http_seed_connection.cpp:124 | in chunked encoding mode, this assert won't hold. the chunk headers should be subtracted from the receive_buffer_size |
in chunked encoding mode, this assert won't hold.
+ | ||
relevance 1 | ../src/http_seed_connection.cpp:124 | in chunked encoding mode, this assert won't hold. the chunk headers should be subtracted from the receive_buffer_size |
in chunked encoding mode, this assert won't hold.
the chunk headers should be subtracted from the receive_buffer_size../src/http_seed_connection.cpp:124 boost::optional<piece_block_progress>
http_seed_connection::downloading_piece_progress() const
{
@@ -2323,8 +2502,8 @@ the chunk headers should be subtracted from the receive_buffer_size../s
std::string request;
request.reserve(400);
- | ||
relevance 1 | ../src/session_impl.cpp:5200 | report the proper address of the router as the source IP of this understanding of our external address, instead of the empty address |
report the proper address of the router as the source IP of
-this understanding of our external address, instead of the empty address../src/session_impl.cpp:5200 void session_impl::on_port_mapping(int mapping, address const& ip, int port
+ | ||
relevance 1 | ../src/session_impl.cpp:5202 | report the proper address of the router as the source IP of this understanding of our external address, instead of the empty address |
report the proper address of the router as the source IP of
+this understanding of our external address, instead of the empty address../src/session_impl.cpp:5202 void session_impl::on_port_mapping(int mapping, address const& ip, int port
, error_code const& ec, int map_transport)
{
TORRENT_ASSERT(is_single_thread());
@@ -2371,9 +2550,9 @@ this understanding of our external address, instead of the empty address | ||
relevance 1 | ../src/session_impl.cpp:6362 | we only need to do this if our global IPv4 address has changed since the DHT (currently) only supports IPv4. Since restarting the DHT is kind of expensive, it would be nice to not do it unnecessarily |
we only need to do this if our global IPv4 address has changed
+ | ||
relevance 1 | ../src/session_impl.cpp:6364 | we only need to do this if our global IPv4 address has changed since the DHT (currently) only supports IPv4. Since restarting the DHT is kind of expensive, it would be nice to not do it unnecessarily |
we only need to do this if our global IPv4 address has changed
since the DHT (currently) only supports IPv4. Since restarting the DHT
-is kind of expensive, it would be nice to not do it unnecessarily../src/session_impl.cpp:6362#endif
+is kind of expensive, it would be nice to not do it unnecessarily../src/session_impl.cpp:6364#endif
if (!m_external_ip.cast_vote(ip, source_type, source)) return;
@@ -2424,11 +2603,11 @@ is kind of expensive, it would be nice to not do it unnecessarily../src
, boost::function<void(char*)> const& handler)
{
return m_disk_thread.async_allocate_disk_buffer(category, handler);
- | ||
relevance 1 | ../src/torrent.cpp:1142 | make this depend on the error and on the filesystem the files are being downloaded to. If the error is no_space_left_on_device and the filesystem doesn't support sparse files, only zero the priorities of the pieces that are at the tails of all files, leaving everything up to the highest written piece in each file |
make this depend on the error and on the filesystem the
+ | ||
relevance 1 | ../src/torrent.cpp:1137 | make this depend on the error and on the filesystem the files are being downloaded to. If the error is no_space_left_on_device and the filesystem doesn't support sparse files, only zero the priorities of the pieces that are at the tails of all files, leaving everything up to the highest written piece in each file |
make this depend on the error and on the filesystem the
files are being downloaded to. If the error is no_space_left_on_device
and the filesystem doesn't support sparse files, only zero the priorities
of the pieces that are at the tails of all files, leaving everything
-up to the highest written piece in each file../src/torrent.cpp:1142 alerts().post_alert(file_error_alert(j->error.ec
+up to the highest written piece in each file../src/torrent.cpp:1137 alerts().post_alert(file_error_alert(j->error.ec
, resolve_filename(j->error.file), j->error.operation_str(), get_handle()));
// put the torrent in an error-state
@@ -2479,8 +2658,8 @@ up to the highest written piece in each file../src/torrent.cpp:1142 | ||
relevance 1 | ../src/torrent.cpp:6834 | save the send_stats state instead of throwing them away it may pose an issue when downgrading though |
save the send_stats state instead of throwing them away
-it may pose an issue when downgrading though../src/torrent.cpp:6834 for (int k = 0; k < bits; ++k)
+ | ||
relevance 1 | ../src/torrent.cpp:6838 | save the send_stats state instead of throwing them away it may pose an issue when downgrading though |
save the send_stats state instead of throwing them away
+it may pose an issue when downgrading though../src/torrent.cpp:6838 for (int k = 0; k < bits; ++k)
v |= (i->info[j*8+k].state == piece_picker::block_info::state_finished)
? (1 << k) : 0;
bitmask.append(1, v);
@@ -2531,9 +2710,9 @@ it may pose an issue when downgrading though../src/torrent.cpp:6834 | ||
relevance 1 | ../src/torrent.cpp:7930 | should disconnect all peers that have the pieces we have not just seeds. It would be pretty expensive to check all pieces for all peers though |
should disconnect all peers that have the pieces we have
+ | ||
relevance 1 | ../src/torrent.cpp:7934 | should disconnect all peers that have the pieces we have not just seeds. It would be pretty expensive to check all pieces for all peers though |
should disconnect all peers that have the pieces we have
not just seeds. It would be pretty expensive to check all pieces
-for all peers though../src/torrent.cpp:7930 set_state(torrent_status::finished);
+for all peers though../src/torrent.cpp:7934 set_state(torrent_status::finished);
set_queue_position(-1);
m_became_finished = m_ses.session_time();
@@ -2584,7 +2763,7 @@ for all peers though../src/torrent.cpp:7930relevance 1 | ../include/libtorrent/ip_voter.hpp:122 | instead, have one instance per possible subnet, global IPv4, global IPv6, loopback, 192.168.x.x, 10.x.x.x, etc. |
|
instead, have one instance per possible subnet, global IPv4, global IPv6, loopback, 192.168.x.x, 10.x.x.x, etc.../include/libtorrent/ip_voter.hpp:122 // away all the votes and started from scratch, in case
+ | ||
relevance 1 | ../include/libtorrent/ip_voter.hpp:122 | instead, have one instance per possible subnet, global IPv4, global IPv6, loopback, 192.168.x.x, 10.x.x.x, etc. |
instead, have one instance per possible subnet, global IPv4, global IPv6, loopback, 192.168.x.x, 10.x.x.x, etc.../include/libtorrent/ip_voter.hpp:122 // away all the votes and started from scratch, in case
// our IP has changed
ptime m_last_rotate;
};
@@ -2611,7 +2790,7 @@ for all peers though../src/torrent.cpp:7930relevance 1 | ../include/libtorrent/web_peer_connection.hpp:121 | if we make this be a disk_buffer_holder instead we would save a copy sometimes use allocate_disk_receive_buffer and release_disk_receive_buffer |
|
if we make this be a disk_buffer_holder instead
+ | ||
relevance 1 | ../include/libtorrent/web_peer_connection.hpp:121 | if we make this be a disk_buffer_holder instead we would save a copy sometimes use allocate_disk_receive_buffer and release_disk_receive_buffer |
if we make this be a disk_buffer_holder instead
we would save a copy sometimes
use allocate_disk_receive_buffer and release_disk_receive_buffer../include/libtorrent/web_peer_connection.hpp:121
// returns the block currently being
@@ -2664,7 +2843,7 @@ use allocate_disk_receive_buffer and release_disk_receive_buffer../incl
};
}
- | ||
relevance 0 | ../test/test_block_cache.cpp:472 | test try_evict_blocks |
test try_evict_blocks../test/test_block_cache.cpp:472 | ||
relevance 0 | ../test/test_block_cache.cpp:473 | test evicting volatile pieces, to see them be removed |
test evicting volatile pieces, to see them be removed../test/test_block_cache.cpp:473 | ||
relevance 0 | ../test/test_block_cache.cpp:474 | test evicting dirty pieces |
test evicting dirty pieces../test/test_block_cache.cpp:474 | ||
relevance 0 | ../test/test_block_cache.cpp:475 | test free_piece |
test free_piece../test/test_block_cache.cpp:475 | ||
relevance 0 | ../test/test_block_cache.cpp:476 | test abort_dirty |
test abort_dirty../test/test_block_cache.cpp:476 | ||
relevance 0 | ../test/test_block_cache.cpp:477 | test unaligned reads |
test unaligned reads../test/test_block_cache.cpp:477 // it's supposed to be a cache hit
+ | ||
relevance 0 | ../test/test_block_cache.cpp:472 | test try_evict_blocks |
test try_evict_blocks../test/test_block_cache.cpp:472 | ||
relevance 0 | ../test/test_block_cache.cpp:473 | test evicting volatile pieces, to see them be removed |
test evicting volatile pieces, to see them be removed../test/test_block_cache.cpp:473 | ||
relevance 0 | ../test/test_block_cache.cpp:474 | test evicting dirty pieces |
test evicting dirty pieces../test/test_block_cache.cpp:474 | ||
relevance 0 | ../test/test_block_cache.cpp:475 | test free_piece |
test free_piece../test/test_block_cache.cpp:475 | ||
relevance 0 | ../test/test_block_cache.cpp:476 | test abort_dirty |
test abort_dirty../test/test_block_cache.cpp:476 | ||
relevance 0 | ../test/test_block_cache.cpp:477 | test unaligned reads |
test unaligned reads../test/test_block_cache.cpp:477 // it's supposed to be a cache hit
TEST_CHECK(ret >= 0);
// return the reference to the buffer we just read
RETURN_BUFFER;
@@ -2687,7 +2866,7 @@ int test_main()
return 0;
}
- | ||
relevance 0 | ../test/test_metadata_extension.cpp:87 | it would be nice to test reversing which session is making the connection as well |
it would be nice to test reversing
+ | ||
relevance 0 | ../test/test_metadata_extension.cpp:87 | it would be nice to test reversing which session is making the connection as well |
it would be nice to test reversing
which session is making the connection as well../test/test_metadata_extension.cpp:87 , boost::shared_ptr<libtorrent::torrent_plugin> (*constructor)(libtorrent::torrent*, void*)
, int timeout)
{
@@ -2739,7 +2918,7 @@ which session is making the connection as well../test/test_metadata_ext
ses1.apply_settings(pack);
ses2.apply_settings(pack);
- | ||
relevance 0 | ../test/test_policy.cpp:419 | test applying a port_filter |
test applying a port_filter../test/test_policy.cpp:419 | ||
relevance 0 | ../test/test_policy.cpp:420 | test erasing peers |
test erasing peers../test/test_policy.cpp:420 | ||
relevance 0 | ../test/test_policy.cpp:421 | test using port and ip filter |
test using port and ip filter../test/test_policy.cpp:421 | ||
relevance 0 | ../test/test_policy.cpp:422 | test incrementing failcount (and make sure we no longer consider the peer a connect canidate) |
test incrementing failcount (and make sure we no longer consider the peer a connect canidate)../test/test_policy.cpp:422 | ||
relevance 0 | ../test/test_policy.cpp:423 | test max peerlist size |
test max peerlist size../test/test_policy.cpp:423 | ||
relevance 0 | ../test/test_policy.cpp:424 | test logic for which connection to keep when receiving an incoming connection to the same peer as we just made an outgoing connection to |
test logic for which connection to keep when receiving an incoming connection to the same peer as we just made an outgoing connection to../test/test_policy.cpp:424 | ||
relevance 0 | ../test/test_policy.cpp:425 | test update_peer_port with allow_multiple_connections_per_ip |
test update_peer_port with allow_multiple_connections_per_ip../test/test_policy.cpp:425 | ||
relevance 0 | ../test/test_policy.cpp:426 | test set_seed |
test set_seed../test/test_policy.cpp:426 | ||
relevance 0 | ../test/test_policy.cpp:427 | test has_peer |
test has_peer../test/test_policy.cpp:427 | ||
relevance 0 | ../test/test_policy.cpp:428 | test insert_peer with a full list |
test insert_peer with a full list../test/test_policy.cpp:428 | ||
relevance 0 | ../test/test_policy.cpp:429 | test add i2p peers |
test add i2p peers../test/test_policy.cpp:429 | ||
relevance 0 | ../test/test_policy.cpp:430 | test allow_i2p_mixed |
test allow_i2p_mixed../test/test_policy.cpp:430 | ||
relevance 0 | ../test/test_policy.cpp:431 | test insert_peer failing |
test insert_peer failing../test/test_policy.cpp:431 | ||
relevance 0 | ../test/test_policy.cpp:432 | test IPv6 |
test IPv6../test/test_policy.cpp:432 | ||
relevance 0 | ../test/test_policy.cpp:433 | test connect_to_peer() failing |
test connect_to_peer() failing../test/test_policy.cpp:433 | ||
relevance 0 | ../test/test_policy.cpp:434 | test connection_closed |
test connection_closed../test/test_policy.cpp:434 | ||
relevance 0 | ../test/test_policy.cpp:435 | test recalculate connect candidates |
test recalculate connect candidates../test/test_policy.cpp:435 | ||
relevance 0 | ../test/test_policy.cpp:436 | add tests here |
add tests here../test/test_policy.cpp:436 for (int i = 0; i < 100; ++i)
+ | ||
relevance 0 | ../test/test_peer_list.cpp:419 | test applying a port_filter |
test applying a port_filter../test/test_peer_list.cpp:419 | ||
relevance 0 | ../test/test_peer_list.cpp:420 | test erasing peers |
test erasing peers../test/test_peer_list.cpp:420 | ||
relevance 0 | ../test/test_peer_list.cpp:421 | test using port and ip filter |
test using port and ip filter../test/test_peer_list.cpp:421 | ||
relevance 0 | ../test/test_peer_list.cpp:422 | test incrementing failcount (and make sure we no longer consider the peer a connect canidate) |
test incrementing failcount (and make sure we no longer consider the peer a connect canidate)../test/test_peer_list.cpp:422 | ||
relevance 0 | ../test/test_peer_list.cpp:423 | test max peerlist size |
test max peerlist size../test/test_peer_list.cpp:423 | ||
relevance 0 | ../test/test_peer_list.cpp:424 | test logic for which connection to keep when receiving an incoming connection to the same peer as we just made an outgoing connection to |
test logic for which connection to keep when receiving an incoming connection to the same peer as we just made an outgoing connection to../test/test_peer_list.cpp:424 | ||
relevance 0 | ../test/test_peer_list.cpp:425 | test update_peer_port with allow_multiple_connections_per_ip |
test update_peer_port with allow_multiple_connections_per_ip../test/test_peer_list.cpp:425 | ||
relevance 0 | ../test/test_peer_list.cpp:426 | test set_seed |
test set_seed../test/test_peer_list.cpp:426 | ||
relevance 0 | ../test/test_peer_list.cpp:427 | test has_peer |
test has_peer../test/test_peer_list.cpp:427 | ||
relevance 0 | ../test/test_peer_list.cpp:428 | test insert_peer with a full list |
test insert_peer with a full list../test/test_peer_list.cpp:428 | ||
relevance 0 | ../test/test_peer_list.cpp:429 | test add i2p peers |
test add i2p peers../test/test_peer_list.cpp:429 | ||
relevance 0 | ../test/test_peer_list.cpp:430 | test allow_i2p_mixed |
test allow_i2p_mixed../test/test_peer_list.cpp:430 | ||
relevance 0 | ../test/test_peer_list.cpp:431 | test insert_peer failing |
test insert_peer failing../test/test_peer_list.cpp:431 | ||
relevance 0 | ../test/test_peer_list.cpp:432 | test IPv6 |
test IPv6../test/test_peer_list.cpp:432 | ||
relevance 0 | ../test/test_peer_list.cpp:433 | test connect_to_peer() failing |
test connect_to_peer() failing../test/test_peer_list.cpp:433 | ||
relevance 0 | ../test/test_peer_list.cpp:434 | test connection_closed |
test connection_closed../test/test_peer_list.cpp:434 | ||
relevance 0 | ../test/test_peer_list.cpp:435 | test recalculate connect candidates |
test recalculate connect candidates../test/test_peer_list.cpp:435 | ||
relevance 0 | ../test/test_peer_list.cpp:436 | add tests here |
add tests here../test/test_peer_list.cpp:436 for (int i = 0; i < 100; ++i)
{
torrent_peer* peer = p.add_peer(rand_tcp_ep(), 0, 0, &st);
TEST_EQUAL(st.erased.size(), 0);
@@ -2763,7 +2942,7 @@ which session is making the connection as well../test/test_metadata_ext
return 0;
}
- | ||
relevance 0 | ../test/test_primitives.cpp:213 | test the case where we have > 120 samples (and have the base delay actually be updated) |
test the case where we have > 120 samples (and have the base delay actually be updated)../test/test_primitives.cpp:213 | ||
relevance 0 | ../test/test_primitives.cpp:214 | test the case where a sample is lower than the history entry but not lower than the base |
test the case where a sample is lower than the history entry but not lower than the base../test/test_primitives.cpp:214 TEST_CHECK(!filter.find(k3));
+ | ||
relevance 0 | ../test/test_primitives.cpp:213 | test the case where we have > 120 samples (and have the base delay actually be updated) |
test the case where we have > 120 samples (and have the base delay actually be updated)../test/test_primitives.cpp:213 | ||
relevance 0 | ../test/test_primitives.cpp:214 | test the case where a sample is lower than the history entry but not lower than the base |
test the case where a sample is lower than the history entry but not lower than the base../test/test_primitives.cpp:214 TEST_CHECK(!filter.find(k3));
TEST_CHECK(filter.find(k4));
// test timestamp_history
@@ -2814,7 +2993,7 @@ which session is making the connection as well../test/test_metadata_ext
sanitize_append_path_element(path, "a...b", 5);
TEST_EQUAL(path, "a...b");
- | ||
relevance 0 | ../test/test_rss.cpp:136 | verify some key state is saved in 'state' |
verify some key state is saved in 'state'../test/test_rss.cpp:136 feed_status st;
+ | ||
relevance 0 | ../test/test_rss.cpp:136 | verify some key state is saved in 'state' |
verify some key state is saved in 'state'../test/test_rss.cpp:136 feed_status st;
f->get_feed_status(&st);
TEST_CHECK(!st.error);
@@ -2849,7 +3028,7 @@ int test_main()
return 0;
}
- | ||
relevance 0 | ../test/test_ssl.cpp:377 | test using a signed certificate with the wrong info-hash in DN |
test using a signed certificate with the wrong info-hash in DN../test/test_ssl.cpp:377 // in verifying peers
+ | ||
relevance 0 | ../test/test_ssl.cpp:377 | test using a signed certificate with the wrong info-hash in DN |
test using a signed certificate with the wrong info-hash in DN../test/test_ssl.cpp:377 // in verifying peers
ctx.set_verify_mode(context::verify_none, ec);
if (ec)
{
@@ -2900,7 +3079,7 @@ int test_main()
return false;
}
fprintf(stderr, "use_tmp_dh_file \"%s\"\n", dh_params.c_str());
- | ||
relevance 0 | ../test/test_ssl.cpp:475 | also test using a hash that refers to a valid torrent but that differs from the SNI hash |
also test using a hash that refers to a valid torrent
+ | ||
relevance 0 | ../test/test_ssl.cpp:475 | also test using a hash that refers to a valid torrent but that differs from the SNI hash |
also test using a hash that refers to a valid torrent
but that differs from the SNI hash../test/test_ssl.cpp:475 print_alerts(ses1, "ses1", true, true, true, &on_alert);
if (ec)
{
@@ -2952,7 +3131,7 @@ but that differs from the SNI hash../test/test_ssl.cpp:475 | ||
relevance 0 | ../test/test_torrent.cpp:132 | wait for an alert rather than just waiting 10 seconds. This is kind of silly |
wait for an alert rather than just waiting 10 seconds. This is kind of silly../test/test_torrent.cpp:132 TEST_EQUAL(h.file_priorities().size(), info->num_files());
+ | ||
relevance 0 | ../test/test_torrent.cpp:132 | wait for an alert rather than just waiting 10 seconds. This is kind of silly |
wait for an alert rather than just waiting 10 seconds. This is kind of silly../test/test_torrent.cpp:132 TEST_EQUAL(h.file_priorities().size(), info->num_files());
TEST_EQUAL(h.file_priorities()[0], 0);
if (info->num_files() > 1)
TEST_EQUAL(h.file_priorities()[1], 0);
@@ -3003,7 +3182,7 @@ but that differs from the SNI hash../test/test_ssl.cpp:475 | ||
relevance 0 | ../test/test_torrent_parse.cpp:114 | test remap_files |
test remap_files../test/test_torrent_parse.cpp:114 | ||
relevance 0 | ../test/test_torrent_parse.cpp:115 | merkle torrents. specifically torrent_info::add_merkle_nodes and torrent with "root hash" |
merkle torrents. specifically torrent_info::add_merkle_nodes and torrent with "root hash"../test/test_torrent_parse.cpp:115 | ||
relevance 0 | ../test/test_torrent_parse.cpp:116 | torrent with 'p' (padfile) attribute |
torrent with 'p' (padfile) attribute../test/test_torrent_parse.cpp:116 | ||
relevance 0 | ../test/test_torrent_parse.cpp:117 | torrent with 'h' (hidden) attribute |
torrent with 'h' (hidden) attribute../test/test_torrent_parse.cpp:117 | ||
relevance 0 | ../test/test_torrent_parse.cpp:118 | torrent with 'x' (executable) attribute |
torrent with 'x' (executable) attribute../test/test_torrent_parse.cpp:118 | ||
relevance 0 | ../test/test_torrent_parse.cpp:119 | torrent with 'l' (symlink) attribute |
torrent with 'l' (symlink) attribute../test/test_torrent_parse.cpp:119 | ||
relevance 0 | ../test/test_torrent_parse.cpp:120 | creating a merkle torrent (torrent_info::build_merkle_list) |
creating a merkle torrent (torrent_info::build_merkle_list)../test/test_torrent_parse.cpp:120 | ||
relevance 0 | ../test/test_torrent_parse.cpp:121 | torrent with multiple trackers in multiple tiers, making sure we shuffle them (how do you test shuffling?, load it multiple times and make sure it's in different order at least once) |
torrent with multiple trackers in multiple tiers, making sure we shuffle them (how do you test shuffling?, load it multiple times and make sure it's in different order at least once)../test/test_torrent_parse.cpp:121 { "invalid_info.torrent", errors::torrent_missing_info },
+ | ||
relevance 0 | ../test/test_torrent_parse.cpp:114 | test remap_files |
test remap_files../test/test_torrent_parse.cpp:114 | ||
relevance 0 | ../test/test_torrent_parse.cpp:115 | merkle torrents. specifically torrent_info::add_merkle_nodes and torrent with "root hash" |
merkle torrents. specifically torrent_info::add_merkle_nodes and torrent with "root hash"../test/test_torrent_parse.cpp:115 | ||
relevance 0 | ../test/test_torrent_parse.cpp:116 | torrent with 'p' (padfile) attribute |
torrent with 'p' (padfile) attribute../test/test_torrent_parse.cpp:116 | ||
relevance 0 | ../test/test_torrent_parse.cpp:117 | torrent with 'h' (hidden) attribute |
torrent with 'h' (hidden) attribute../test/test_torrent_parse.cpp:117 | ||
relevance 0 | ../test/test_torrent_parse.cpp:118 | torrent with 'x' (executable) attribute |
torrent with 'x' (executable) attribute../test/test_torrent_parse.cpp:118 | ||
relevance 0 | ../test/test_torrent_parse.cpp:119 | torrent with 'l' (symlink) attribute |
torrent with 'l' (symlink) attribute../test/test_torrent_parse.cpp:119 | ||
relevance 0 | ../test/test_torrent_parse.cpp:120 | creating a merkle torrent (torrent_info::build_merkle_list) |
creating a merkle torrent (torrent_info::build_merkle_list)../test/test_torrent_parse.cpp:120 | ||
relevance 0 | ../test/test_torrent_parse.cpp:121 | torrent with multiple trackers in multiple tiers, making sure we shuffle them (how do you test shuffling?, load it multiple times and make sure it's in different order at least once) |
torrent with multiple trackers in multiple tiers, making sure we shuffle them (how do you test shuffling?, load it multiple times and make sure it's in different order at least once)../test/test_torrent_parse.cpp:121 { "invalid_info.torrent", errors::torrent_missing_info },
{ "string.torrent", errors::torrent_is_no_dict },
{ "negative_size.torrent", errors::torrent_invalid_length },
{ "negative_file_size.torrent", errors::torrent_file_parse_failed },
@@ -3054,7 +3233,7 @@ namespace libtorrent
TEST_EQUAL(merkle_num_leafs(15), 16);
TEST_EQUAL(merkle_num_leafs(16), 16);
TEST_EQUAL(merkle_num_leafs(17), 32);
- | ||
relevance 0 | ../test/test_tracker.cpp:198 | test parse peers6 |
test parse peers6../test/test_tracker.cpp:198 | ||
relevance 0 | ../test/test_tracker.cpp:199 | test parse tracker-id |
test parse tracker-id../test/test_tracker.cpp:199 | ||
relevance 0 | ../test/test_tracker.cpp:200 | test parse failure-reason |
test parse failure-reason../test/test_tracker.cpp:200 | ||
relevance 0 | ../test/test_tracker.cpp:201 | test all failure paths invalid bencoding not a dictionary no files entry in scrape response no info-hash entry in scrape response malformed peers in peer list of dictionaries uneven number of bytes in peers and peers6 string responses |
test all failure paths
+ | ||
relevance 0 | ../test/test_tracker.cpp:198 | test parse peers6 |
test parse peers6../test/test_tracker.cpp:198 | ||
relevance 0 | ../test/test_tracker.cpp:199 | test parse tracker-id |
test parse tracker-id../test/test_tracker.cpp:199 | ||
relevance 0 | ../test/test_tracker.cpp:200 | test parse failure-reason |
test parse failure-reason../test/test_tracker.cpp:200 | ||
relevance 0 | ../test/test_tracker.cpp:201 | test all failure paths invalid bencoding not a dictionary no files entry in scrape response no info-hash entry in scrape response malformed peers in peer list of dictionaries uneven number of bytes in peers and peers6 string responses |
test all failure paths
invalid bencoding
not a dictionary
no files entry in scrape response
@@ -3111,7 +3290,7 @@ int test_main()
snprintf(tracker_url, sizeof(tracker_url), "http://127.0.0.1:%d/announce", http_port);
t->add_tracker(tracker_url, 0);
- | ||
relevance 0 | ../test/test_upnp.cpp:100 | store the log and verify that some key messages are there |
store the log and verify that some key messages are there../test/test_upnp.cpp:100 "USN:uuid:000f-66d6-7296000099dc::upnp:rootdevice\r\n"
+ | ||
relevance 0 | ../test/test_upnp.cpp:100 | store the log and verify that some key messages are there |
store the log and verify that some key messages are there../test/test_upnp.cpp:100 "USN:uuid:000f-66d6-7296000099dc::upnp:rootdevice\r\n"
"Location: http://127.0.0.1:%d/upnp.xml\r\n"
"Server: Custom/1.0 UPnP/1.0 Proc/Ver\r\n"
"EXT:\r\n"
@@ -3162,7 +3341,7 @@ int run_upnp_test(char const* root_filename, char const* router_model, char cons
error_code ec;
load_file(root_filename, buf, ec);
buf.push_back(0);
- | ||
relevance 0 | ../test/web_seed_suite.cpp:373 | file hashes don't work with the new torrent creator reading async |
file hashes don't work with the new torrent creator reading async../test/web_seed_suite.cpp:373 // corrupt the files now, so that the web seed will be banned
+ | ||
relevance 0 | ../test/web_seed_suite.cpp:375 | file hashes don't work with the new torrent creator reading async |
file hashes don't work with the new torrent creator reading async../test/web_seed_suite.cpp:375 // corrupt the files now, so that the web seed will be banned
if (test_url_seed)
{
create_random_files(combine_path(save_path, "torrent_dir"), file_sizes, sizeof(file_sizes)/sizeof(file_sizes[0]));
@@ -3213,7 +3392,7 @@ int run_upnp_test(char const* root_filename, char const* router_model, char cons
, chunked_encoding, test_ban, keepalive);
if (test_url_seed && test_rename)
- | ||
relevance 0 | ../src/block_cache.cpp:884 | it's somewhat expensive to iterate over this linked list. Presumably because of the random access of memory. It would be nice if pieces with no evictable blocks weren't in this list |
it's somewhat expensive
+ | ||
relevance 0 | ../src/block_cache.cpp:884 | it's somewhat expensive to iterate over this linked list. Presumably because of the random access of memory. It would be nice if pieces with no evictable blocks weren't in this list |
it's somewhat expensive
to iterate over this linked list. Presumably because of the random
access of memory. It would be nice if pieces with no evictable blocks
weren't in this list../src/block_cache.cpp:884 }
@@ -3267,7 +3446,7 @@ weren't in this list../src/block_cache.cpp:884relevance 0 | ../src/block_cache.cpp:948 | this should probably only be done every n:th time |
|
this should probably only be done every n:th time../src/block_cache.cpp:948 }
+ | ||
relevance 0 | ../src/block_cache.cpp:948 | this should probably only be done every n:th time |
this should probably only be done every n:th time../src/block_cache.cpp:948 }
if (pe->ok_to_evict())
{
@@ -3318,7 +3497,7 @@ weren't in this list../src/block_cache.cpp:884relevance 0 | ../src/block_cache.cpp:1720 | create a holder for refcounts that automatically decrement |
|
create a holder for refcounts that automatically decrement../src/block_cache.cpp:1720 }
+ | ||
relevance 0 | ../src/block_cache.cpp:1720 | create a holder for refcounts that automatically decrement |
create a holder for refcounts that automatically decrement../src/block_cache.cpp:1720 }
j->buffer = allocate_buffer("send buffer");
if (j->buffer == 0) return -2;
@@ -3369,7 +3548,7 @@ bool block_cache::maybe_free_piece(cached_piece_entry* pe)
boost::shared_ptr<piece_manager> s = pe->storage;
- | ||
relevance 0 | ../src/bt_peer_connection.cpp:645 | this could be optimized using knuth morris pratt |
this could be optimized using knuth morris pratt../src/bt_peer_connection.cpp:645 {
+ | ||
relevance 0 | ../src/bt_peer_connection.cpp:645 | this could be optimized using knuth morris pratt |
this could be optimized using knuth morris pratt../src/bt_peer_connection.cpp:645 {
disconnect(errors::no_memory, op_encryption);
return;
}
@@ -3420,7 +3599,7 @@ bool block_cache::maybe_free_piece(cached_piece_entry* pe)
// }
// no complete sync
- | ||
relevance 0 | ../src/bt_peer_connection.cpp:2216 | if we're finished, send upload_only message |
if we're finished, send upload_only message../src/bt_peer_connection.cpp:2216 if (msg[5 + k / 8] & (0x80 >> (k % 8))) bitfield_string[k] = '1';
+ | ||
relevance 0 | ../src/bt_peer_connection.cpp:2216 | if we're finished, send upload_only message |
if we're finished, send upload_only message../src/bt_peer_connection.cpp:2216 if (msg[5 + k / 8] & (0x80 >> (k % 8))) bitfield_string[k] = '1';
else bitfield_string[k] = '0';
}
peer_log("==> BITFIELD [ %s ]", bitfield_string.c_str());
@@ -3471,7 +3650,7 @@ bool block_cache::maybe_free_piece(cached_piece_entry* pe)
? m_settings.get_str(settings_pack::user_agent)
: m_settings.get_str(settings_pack::handshake_client_version);
}
- | ||
relevance 0 | ../src/choker.cpp:332 | optimize this using partial_sort or something. We don't need to sort the entire list |
optimize this using partial_sort or something. We don't need
+ | ||
relevance 0 | ../src/choker.cpp:332 | optimize this using partial_sort or something. We don't need to sort the entire list |
optimize this using partial_sort or something. We don't need
to sort the entire list../src/choker.cpp:332 return upload_slots;
}
@@ -3493,7 +3672,7 @@ to sort the entire list../src/choker.cpp:332
- | ||
relevance 0 | ../src/choker.cpp:335 | make the comparison function a free function and move it into this cpp file |
make the comparison function a free function and move it
+ | ||
relevance 0 | ../src/choker.cpp:335 | make the comparison function a free function and move it into this cpp file |
make the comparison function a free function and move it
into this cpp file../src/choker.cpp:335 }
// ==== rate-based ====
@@ -3517,7 +3696,7 @@ into this cpp file../src/choker.cpp:335 std::sort(peers.begin(), peers.end()
, boost::bind(&upload_rate_compare, _1, _2));
- | ||
relevance 0 | ../src/choker.cpp:340 | make configurable |
make configurable../src/choker.cpp:340 //
+ | ||
relevance 0 | ../src/choker.cpp:340 | make configurable |
make configurable../src/choker.cpp:340 //
// The rate based unchoker looks at our upload rate to peers, and find
// a balance between number of upload slots and the rate we achieve. The
// intention is to not spread upload bandwidth too thin, but also to not
@@ -3550,7 +3729,7 @@ into this cpp file../src/choker.cpp:335relevance 0 | ../src/choker.cpp:354 | make configurable |
|
make configurable../src/choker.cpp:354 // it purely based on the current state of our peers.
+ | ||
relevance 0 | ../src/choker.cpp:354 | make configurable |
make configurable../src/choker.cpp:354 // it purely based on the current state of our peers.
upload_slots = 0;
@@ -3601,8 +3780,8 @@ into this cpp file../src/choker.cpp:335relevance 0 | ../src/disk_io_thread.cpp:921 | instead of doing a lookup each time through the loop, save cached_piece_entry pointers with piece_refcount incremented to pin them |
|
instead of doing a lookup each time through the loop, save
-cached_piece_entry pointers with piece_refcount incremented to pin them../src/disk_io_thread.cpp:921 // this is why we pass in 1 as cont_block to the flushing functions
+ | ||
relevance 0 | ../src/disk_io_thread.cpp:919 | instead of doing a lookup each time through the loop, save cached_piece_entry pointers with piece_refcount incremented to pin them |
instead of doing a lookup each time through the loop, save
+cached_piece_entry pointers with piece_refcount incremented to pin them../src/disk_io_thread.cpp:919 // this is why we pass in 1 as cont_block to the flushing functions
void disk_io_thread::try_flush_write_blocks(int num, tailqueue& completed_jobs
, mutex::scoped_lock& l)
{
@@ -3653,10 +3832,10 @@ cached_piece_entry pointers with piece_refcount incremented to pin them
cached_piece_entry* pe = m_disk_cache.find_piece(i->first, i->second);
if (pe == NULL) continue;
if (pe->num_dirty == 0) continue;
- | ||
relevance 0 | ../src/disk_io_thread.cpp:1132 | instead of doing this. pass in the settings to each storage_interface call. Each disk thread could hold its most recent understanding of the settings in a shared_ptr, and update it every time it wakes up from a job. That way each access to the settings won't require a mutex to be held. |
instead of doing this. pass in the settings to each storage_interface
+ | ||
relevance 0 | ../src/disk_io_thread.cpp:1130 | instead of doing this. pass in the settings to each storage_interface call. Each disk thread could hold its most recent understanding of the settings in a shared_ptr, and update it every time it wakes up from a job. That way each access to the settings won't require a mutex to be held. |
instead of doing this. pass in the settings to each storage_interface
call. Each disk thread could hold its most recent understanding of the settings
in a shared_ptr, and update it every time it wakes up from a job. That way
-each access to the settings won't require a mutex to be held.../src/disk_io_thread.cpp:1132 {
+each access to the settings won't require a mutex to be held.../src/disk_io_thread.cpp:1130 {
INVARIANT_CHECK;
TORRENT_ASSERT(j->next == 0);
TORRENT_ASSERT((j->flags & disk_io_job::in_progress) || !j->storage);
@@ -3683,7 +3862,7 @@ each access to the settings won't require a mutex to be held.../src/dis
ptime start_time = time_now_hires();
- ++m_outstanding_jobs;
+ m_stats_counters.inc_stats_counter(counters::num_running_disk_jobs, 1);
// call disk function
int ret = (this->*(job_functions[j->action]))(j, completed_jobs);
@@ -3691,7 +3870,7 @@ each access to the settings won't require a mutex to be held.../src/dis
// note that -2 erros are OK
TORRENT_ASSERT(ret != -1 || (j->error.ec && j->error.operation != 0));
- --m_outstanding_jobs;
+ m_stats_counters.inc_stats_counter(counters::num_running_disk_jobs, -1);
if (ret == retry_job)
{
@@ -3700,12 +3879,12 @@ each access to the settings won't require a mutex to be held.../src/dis
// our quanta in case there aren't any other
// jobs to run in between
- | ||
relevance 0 | ../src/disk_io_thread.cpp:1160 | a potentially more efficient solution would be to have a special queue for retry jobs, that's only ever run when a job completes, in any thread. It would only work if m_outstanding_jobs > 0 |
a potentially more efficient solution would be to have a special
+ | ||
relevance 0 | ../src/disk_io_thread.cpp:1158 | a potentially more efficient solution would be to have a special queue for retry jobs, that's only ever run when a job completes, in any thread. It would only work if counters::num_running_disk_jobs > 0 |
a potentially more efficient solution would be to have a special
queue for retry jobs, that's only ever run when a job completes, in
-any thread. It would only work if m_outstanding_jobs > 0../src/disk_io_thread.cpp:1160
+any thread. It would only work if counters::num_running_disk_jobs > 0../src/disk_io_thread.cpp:1158
ptime start_time = time_now_hires();
- ++m_outstanding_jobs;
+ m_stats_counters.inc_stats_counter(counters::num_running_disk_jobs, 1);
// call disk function
int ret = (this->*(job_functions[j->action]))(j, completed_jobs);
@@ -3713,7 +3892,7 @@ any thread. It would only work if m_outstanding_jobs > 0../src/disk_io_
// note that -2 erros are OK
TORRENT_ASSERT(ret != -1 || (j->error.ec && j->error.operation != 0));
- --m_outstanding_jobs;
+ m_stats_counters.inc_stats_counter(counters::num_running_disk_jobs, -1);
if (ret == retry_job)
{
@@ -3722,7 +3901,7 @@ any thread. It would only work if m_outstanding_jobs > 0../src/disk_io_
// our quanta in case there aren't any other
// jobs to run in between
-
+
TORRENT_ASSERT((j->flags & disk_io_job::in_progress) || !j->storage);
bool need_sleep = m_queued_jobs.empty();
@@ -3733,7 +3912,7 @@ any thread. It would only work if m_outstanding_jobs > 0 ../src/disk_io_
}
#if TORRENT_USE_ASSERT
- | ||
relevance 0 | ../src/disk_io_thread.cpp:1174 | it should clear the hash state even when there's an error, right? |
it should clear the hash state even when there's an error, right?../src/disk_io_thread.cpp:1174 --m_outstanding_jobs;
+ | ||
relevance 0 | ../src/disk_io_thread.cpp:1172 | it should clear the hash state even when there's an error, right? |
it should clear the hash state even when there's an error, right?../src/disk_io_thread.cpp:1172 m_stats_counters.inc_stats_counter(counters::num_running_disk_jobs, -1);
if (ret == retry_job)
{
@@ -3742,7 +3921,7 @@ any thread. It would only work if m_outstanding_jobs > 0../src/disk_io_
// our quanta in case there aren't any other
// jobs to run in between
-
+
TORRENT_ASSERT((j->flags & disk_io_job::in_progress) || !j->storage);
bool need_sleep = m_queued_jobs.empty();
@@ -3784,8 +3963,8 @@ any thread. It would only work if m_outstanding_jobs > 0../src/disk_io_
j->error.operation = storage_error::alloc_cache_piece;
return -1;
}
- | ||
relevance 0 | ../src/disk_io_thread.cpp:1871 | maybe the tailqueue_iterator should contain a pointer-pointer instead and have an unlink function |
maybe the tailqueue_iterator should contain a pointer-pointer
-instead and have an unlink function../src/disk_io_thread.cpp:1871 j->callback = handler;
+ | ||
relevance 0 | ../src/disk_io_thread.cpp:1869 | maybe the tailqueue_iterator should contain a pointer-pointer instead and have an unlink function |
maybe the tailqueue_iterator should contain a pointer-pointer
+instead and have an unlink function../src/disk_io_thread.cpp:1869 j->callback = handler;
add_fence_job(storage, j);
}
@@ -3836,8 +4015,8 @@ instead and have an unlink function../src/disk_io_thread.cpp:1871<
if (completed_jobs.size())
add_completed_jobs(completed_jobs);
}
- | ||
relevance 0 | ../src/disk_io_thread.cpp:2126 | this is potentially very expensive. One way to solve it would be to have a fence for just this one piece. |
this is potentially very expensive. One way to solve
-it would be to have a fence for just this one piece.../src/disk_io_thread.cpp:2126 }
+ | ||
relevance 0 | ../src/disk_io_thread.cpp:2124 | this is potentially very expensive. One way to solve it would be to have a fence for just this one piece. |
this is potentially very expensive. One way to solve
+it would be to have a fence for just this one piece.../src/disk_io_thread.cpp:2124 }
void disk_io_thread::async_clear_piece(piece_manager* storage, int index
, boost::function<void(disk_io_job const*)> const& handler)
@@ -3888,7 +4067,7 @@ it would be to have a fence for just this one piece.../src/disk_io_thre
if (!pe->hash) return;
if (pe->hashing) return;
- | ||
relevance 0 | ../src/disk_io_thread.cpp:2387 | we should probably just hang the job on the piece and make sure the hasher gets kicked |
we should probably just hang the job on the piece and make sure the hasher gets kicked../src/disk_io_thread.cpp:2387 if (pe == NULL)
+ | ||
relevance 0 | ../src/disk_io_thread.cpp:2385 | we should probably just hang the job on the piece and make sure the hasher gets kicked |
we should probably just hang the job on the piece and make sure the hasher gets kicked../src/disk_io_thread.cpp:2385 if (pe == NULL)
{
int cache_state = (j->flags & disk_io_job::volatile_read)
? cached_piece_entry::volatile_read_lru
@@ -3939,8 +4118,8 @@ it would be to have a fence for just this one piece.../src/disk_io_thre
// increment the refcounts of all
// blocks up front, and then hash them without holding the lock
- | ||
relevance 0 | ../src/disk_io_thread.cpp:2457 | introduce a holder class that automatically increments and decrements the piece_refcount |
introduce a holder class that automatically increments
-and decrements the piece_refcount../src/disk_io_thread.cpp:2457 for (int i = ph->offset / block_size; i < blocks_in_piece; ++i)
+ | ||
relevance 0 | ../src/disk_io_thread.cpp:2455 | introduce a holder class that automatically increments and decrements the piece_refcount |
introduce a holder class that automatically increments
+and decrements the piece_refcount../src/disk_io_thread.cpp:2455 for (int i = ph->offset / block_size; i < blocks_in_piece; ++i)
{
iov.iov_len = (std::min)(block_size, piece_size - ph->offset);
@@ -3991,8 +4170,8 @@ and decrements the piece_refcount../src/disk_io_thread.cpp:2457 | ||
relevance 0 | ../src/disk_io_thread.cpp:2699 | it would be nice to not have to lock the mutex every turn through this loop |
it would be nice to not have to lock the mutex every
-turn through this loop../src/disk_io_thread.cpp:2699 {
+ | ||
relevance 0 | ../src/disk_io_thread.cpp:2697 | it would be nice to not have to lock the mutex every turn through this loop |
it would be nice to not have to lock the mutex every
+turn through this loop../src/disk_io_thread.cpp:2697 {
j->error.ec = error::no_memory;
j->error.operation = storage_error::alloc_cache_piece;
return -1;
@@ -4043,7 +4222,7 @@ turn through this loop../src/disk_io_thread.cpp:2699relevance 0 | ../src/http_tracker_connection.cpp:93 | support authentication (i.e. user name and password) in the URL |
|
support authentication (i.e. user name and password) in the URL../src/http_tracker_connection.cpp:93
+ | ||
relevance 0 | ../src/http_tracker_connection.cpp:93 | support authentication (i.e. user name and password) in the URL |
support authentication (i.e. user name and password) in the URL../src/http_tracker_connection.cpp:93
http_tracker_connection::http_tracker_connection(
io_service& ios
, tracker_manager& man
@@ -4094,7 +4273,7 @@ turn through this loop../src/disk_io_thread.cpp:2699 | ||
relevance 0 | ../src/http_tracker_connection.cpp:194 | support this somehow |
support this somehow../src/http_tracker_connection.cpp:194 url += escape_string(id.c_str(), id.length());
+ | ||
relevance 0 | ../src/http_tracker_connection.cpp:194 | support this somehow |
support this somehow../src/http_tracker_connection.cpp:194 url += escape_string(id.c_str(), id.length());
}
#if TORRENT_USE_I2P
@@ -4145,7 +4324,7 @@ turn through this loop../src/disk_io_thread.cpp:2699relevance 0 | ../src/metadata_transfer.cpp:359 | this is not safe. The torrent could be unloaded while we're still sending the metadata |
|
this is not safe. The torrent could be unloaded while
+ | ||
relevance 0 | ../src/metadata_transfer.cpp:359 | this is not safe. The torrent could be unloaded while we're still sending the metadata |
this is not safe. The torrent could be unloaded while
we're still sending the metadata../src/metadata_transfer.cpp:359 std::pair<int, int> offset
= req_to_offset(req, (int)m_tp.metadata().left());
@@ -4197,7 +4376,7 @@ we're still sending the metadata../src/metadata_transfer.cpp:359 | ||
relevance 0 | ../src/packet_buffer.cpp:176 | use compare_less_wrap for this comparison as well |
use compare_less_wrap for this comparison as well../src/packet_buffer.cpp:176 while (new_size < size)
+ | ||
relevance 0 | ../src/packet_buffer.cpp:176 | use compare_less_wrap for this comparison as well |
use compare_less_wrap for this comparison as well../src/packet_buffer.cpp:176 while (new_size < size)
new_size <<= 1;
void** new_storage = (void**)malloc(sizeof(void*) * new_size);
@@ -4248,7 +4427,7 @@ we're still sending the metadata../src/metadata_transfer.cpp:359 | ||
relevance 0 | ../src/part_file.cpp:252 | what do we do if someone is currently reading from the disk from this piece? does it matter? Since we won't actively erase the data from disk, but it may be overwritten soon, it's probably not that big of a deal |
what do we do if someone is currently reading from the disk
+ | ||
relevance 0 | ../src/part_file.cpp:252 | what do we do if someone is currently reading from the disk from this piece? does it matter? Since we won't actively erase the data from disk, but it may be overwritten soon, it's probably not that big of a deal |
what do we do if someone is currently reading from the disk
from this piece? does it matter? Since we won't actively erase the
data from disk, but it may be overwritten soon, it's probably not that
big of a deal../src/part_file.cpp:252 if (((mode & file::rw_mask) != file::read_only)
@@ -4302,7 +4481,7 @@ big of a deal../src/part_file.cpp:252relevance 0 | ../src/part_file.cpp:344 | instead of rebuilding the whole file header and flushing it, update the slot entries as we go |
|
instead of rebuilding the whole file header
+ | ||
relevance 0 | ../src/part_file.cpp:344 | instead of rebuilding the whole file header and flushing it, update the slot entries as we go |
instead of rebuilding the whole file header
and flushing it, update the slot entries as we go../src/part_file.cpp:344 if (block_to_copy == m_piece_size)
{
m_free_slots.push_back(i->second);
@@ -4354,7 +4533,7 @@ and flushing it, update the slot entries as we go../src/part_file.cpp:3
for (int piece = 0; piece < m_max_pieces; ++piece)
{
- | ||
relevance 0 | ../src/peer_connection.cpp:1029 | this should be the global download rate |
this should be the global download rate../src/peer_connection.cpp:1029
+ | ||
relevance 0 | ../src/peer_connection.cpp:1031 | this should be the global download rate |
this should be the global download rate../src/peer_connection.cpp:1031
int rate = 0;
// if we haven't received any data recently, the current download rate
@@ -4405,7 +4584,7 @@ and flushing it, update the slot entries as we go../src/part_file.cpp:3
if (m_ignore_stats) return;
boost::shared_ptr<torrent> t = m_torrent.lock();
if (!t) return;
- | ||
relevance 0 | ../src/peer_connection.cpp:3255 | sort the allowed fast set in priority order |
sort the allowed fast set in priority order../src/peer_connection.cpp:3255
+ | ||
relevance 0 | ../src/peer_connection.cpp:3266 | sort the allowed fast set in priority order |
sort the allowed fast set in priority order../src/peer_connection.cpp:3266
// if the peer has the piece and we want
// to download it, request it
if (int(m_have_piece.size()) > index
@@ -4456,7 +4635,7 @@ and flushing it, update the slot entries as we go../src/part_file.cpp:3
boost::shared_ptr<torrent> t = m_torrent.lock();
TORRENT_ASSERT(t);
TORRENT_ASSERT(t->has_picker());
- | ||
relevance 0 | ../src/piece_picker.cpp:2407 | when expanding pieces for cache stripe reasons, the !downloading condition doesn't make much sense |
when expanding pieces for cache stripe reasons,
+ | ||
relevance 0 | ../src/piece_picker.cpp:2407 | when expanding pieces for cache stripe reasons, the !downloading condition doesn't make much sense |
when expanding pieces for cache stripe reasons,
the !downloading condition doesn't make much sense../src/piece_picker.cpp:2407 TORRENT_ASSERT(index < (int)m_piece_map.size() || m_piece_map.empty());
if (index+1 == (int)m_piece_map.size())
return m_blocks_in_last_piece;
@@ -4508,7 +4687,7 @@ the !downloading condition doesn't make much sense../src/piece_picker.c
// the second bool is true if this is the only active peer that is requesting
// and downloading blocks from this piece. Active means having a connection.
boost::tuple<bool, bool> requested_from(piece_picker::downloading_piece const& p
- | ||
relevance 0 | ../src/session_impl.cpp:533 | there's no rule here to make uTP connections not have the global or local rate limits apply to it. This used to be the default. |
there's no rule here to make uTP connections not have the global or
+ | ||
relevance 0 | ../src/session_impl.cpp:533 | there's no rule here to make uTP connections not have the global or local rate limits apply to it. This used to be the default. |
there's no rule here to make uTP connections not have the global or
local rate limits apply to it. This used to be the default.../src/session_impl.cpp:533 m_global_class = m_classes.new_peer_class("global");
m_tcp_peer_class = m_classes.new_peer_class("tcp");
m_local_peer_class = m_classes.new_peer_class("local");
@@ -4560,7 +4739,7 @@ local rate limits apply to it. This used to be the default.../src/sessi
// futexes, shared objects etc.
rl.rlim_cur -= 20;
- | ||
relevance 0 | ../src/session_impl.cpp:1748 | instead of having a special case for this, just make the default listen interfaces be "0.0.0.0:6881,[::1]:6881" and use the generic path. That would even allow for not listening at all. |
instead of having a special case for this, just make the
+ | ||
relevance 0 | ../src/session_impl.cpp:1748 | instead of having a special case for this, just make the default listen interfaces be "0.0.0.0:6881,[::1]:6881" and use the generic path. That would even allow for not listening at all. |
instead of having a special case for this, just make the
default listen interfaces be "0.0.0.0:6881,[::1]:6881" and use
the generic path. That would even allow for not listening at all.../src/session_impl.cpp:1748
// reset the retry counter
@@ -4613,7 +4792,7 @@ retry:
if (s.sock)
{
TORRENT_ASSERT(!m_abort);
- | ||
relevance 0 | ../src/session_impl.cpp:2619 | should this function take a shared_ptr instead? |
should this function take a shared_ptr instead?../src/session_impl.cpp:2619 {
+ | ||
relevance 0 | ../src/session_impl.cpp:2619 | should this function take a shared_ptr instead? |
should this function take a shared_ptr instead?../src/session_impl.cpp:2619 {
#if defined TORRENT_ASIO_DEBUGGING
complete_async("session_impl::on_socks_accept");
#endif
@@ -4664,7 +4843,7 @@ retry:
TORRENT_ASSERT(sp.use_count() > 0);
connection_map::iterator i = m_connections.find(sp);
- | ||
relevance 0 | ../src/session_impl.cpp:2973 | have a separate list for these connections, instead of having to loop through all of them |
have a separate list for these connections, instead of having to loop through all of them../src/session_impl.cpp:2973 if (m_auto_manage_time_scaler < 0)
+ | ||
relevance 0 | ../src/session_impl.cpp:2973 | have a separate list for these connections, instead of having to loop through all of them |
have a separate list for these connections, instead of having to loop through all of them../src/session_impl.cpp:2973 if (m_auto_manage_time_scaler < 0)
{
INVARIANT_CHECK;
m_auto_manage_time_scaler = settings().get_int(settings_pack::auto_manage_interval);
@@ -4715,7 +4894,7 @@ retry:
#ifndef TORRENT_DISABLE_DHT
int dht_down = 0;
- | ||
relevance 0 | ../src/session_impl.cpp:3014 | this should apply to all bandwidth channels |
this should apply to all bandwidth channels../src/session_impl.cpp:3014 t.second_tick(tick_interval_ms, m_tick_residual / 1000);
+ | ||
relevance 0 | ../src/session_impl.cpp:3014 | this should apply to all bandwidth channels |
this should apply to all bandwidth channels../src/session_impl.cpp:3014 t.second_tick(tick_interval_ms, m_tick_residual / 1000);
// if the call to second_tick caused the torrent
// to no longer want to be ticked (i.e. it was
@@ -4766,7 +4945,7 @@ retry:
m_peak_up_rate = (std::max)(m_stat.upload_rate(), m_peak_up_rate);
m_peak_down_rate = (std::max)(m_stat.download_rate(), m_peak_down_rate);
- | ||
relevance 0 | ../src/session_impl.cpp:3500 | these vectors could be copied from m_torrent_lists, if we would maintain them. That way the first pass over all torrents could be avoided. It would be especially efficient if most torrents are not auto-managed whenever we receive a scrape response (or anything that may change the rank of a torrent) that one torrent could re-sort itself in a list that's kept sorted at all times. That way, this pass over all torrents could be avoided alltogether. |
these vectors could be copied from m_torrent_lists,
+ | ||
relevance 0 | ../src/session_impl.cpp:3502 | these vectors could be copied from m_torrent_lists, if we would maintain them. That way the first pass over all torrents could be avoided. It would be especially efficient if most torrents are not auto-managed whenever we receive a scrape response (or anything that may change the rank of a torrent) that one torrent could re-sort itself in a list that's kept sorted at all times. That way, this pass over all torrents could be avoided alltogether. |
these vectors could be copied from m_torrent_lists,
if we would maintain them. That way the first pass over
all torrents could be avoided. It would be especially
efficient if most torrents are not auto-managed
@@ -4774,7 +4953,7 @@ whenever we receive a scrape response (or anything
that may change the rank of a torrent) that one torrent
could re-sort itself in a list that's kept sorted at all
times. That way, this pass over all torrents could be
-avoided alltogether.../src/session_impl.cpp:3500#if defined TORRENT_VERBOSE_LOGGING || defined TORRENT_LOGGING
+avoided alltogether.../src/session_impl.cpp:3502#if defined TORRENT_VERBOSE_LOGGING || defined TORRENT_LOGGING
if (t->allows_peers())
t->log_to_all_peers("AUTO MANAGER PAUSING TORRENT");
#endif
@@ -4825,7 +5004,7 @@ avoided alltogether.../src/session_impl.cpp:3500relevance 0 | ../src/session_impl.cpp:3575 | allow extensions to sort torrents for queuing |
|
allow extensions to sort torrents for queuing../src/session_impl.cpp:3575 if (t->is_finished())
+ | ||
relevance 0 | ../src/session_impl.cpp:3577 | allow extensions to sort torrents for queuing |
allow extensions to sort torrents for queuing../src/session_impl.cpp:3577 if (t->is_finished())
seeds.push_back(t);
else
downloaders.push_back(t);
@@ -4876,9 +5055,9 @@ avoided alltogether.../src/session_impl.cpp:3500relevance 0 | ../src/session_impl.cpp:3747 | use a lower limit than m_settings.connections_limit to allocate the to 10% or so of connection slots for incoming connections |
|
use a lower limit than m_settings.connections_limit
+ | ||
relevance 0 | ../src/session_impl.cpp:3749 | use a lower limit than m_settings.connections_limit to allocate the to 10% or so of connection slots for incoming connections |
use a lower limit than m_settings.connections_limit
to allocate the to 10% or so of connection slots for incoming
-connections../src/session_impl.cpp:3747 // robin fashion, so that every torrent is equally likely to connect to a
+connections../src/session_impl.cpp:3749 // robin fashion, so that every torrent is equally likely to connect to a
// peer
// boost connections are connections made by torrent connection
@@ -4929,8 +5108,8 @@ connections../src/session_impl.cpp:3747relevance 0 | ../src/session_impl.cpp:3890 | post a message to have this happen immediately instead of waiting for the next tick |
|
post a message to have this happen
-immediately instead of waiting for the next tick../src/session_impl.cpp:3890 torrent* t = p->associated_torrent().lock().get();
+ | ||
relevance 0 | ../src/session_impl.cpp:3892 | post a message to have this happen immediately instead of waiting for the next tick |
post a message to have this happen
+immediately instead of waiting for the next tick../src/session_impl.cpp:3892 torrent* t = p->associated_torrent().lock().get();
torrent_peer* pi = p->peer_info_struct();
if (p->ignore_unchoke_slots() || t == 0 || pi == 0
@@ -4981,7 +5160,7 @@ immediately instead of waiting for the next tick../src/session_impl.cpp
if (num_opt_unchoke == 0) num_opt_unchoke = (std::max)(1, m_allowed_upload_slots / 5);
// reserve some upload slots for optimistic unchokes
- | ||
relevance 0 | ../src/session_impl.cpp:3935 | this should be called for all peers! |
this should be called for all peers!../src/session_impl.cpp:3935
+ | ||
relevance 0 | ../src/session_impl.cpp:3937 | this should be called for all peers! |
this should be called for all peers!../src/session_impl.cpp:3937
m_allowed_upload_slots = unchoke_sort(peers, max_upload_rate
, unchoke_interval, m_settings);
@@ -5032,10 +5211,10 @@ immediately instead of waiting for the next tick../src/session_impl.cpp
{
// no, this peer should be choked
TORRENT_ASSERT(p->peer_info_struct());
- | ||
relevance 0 | ../src/session_impl.cpp:4336 | it might be a nice feature here to limit the number of torrents to send in a single update. By just posting the first n torrents, they would nicely be round-robined because the torrent lists are always pushed back |
it might be a nice feature here to limit the number of torrents
+ | ||
relevance 0 | ../src/session_impl.cpp:4338 | it might be a nice feature here to limit the number of torrents to send in a single update. By just posting the first n torrents, they would nicely be round-robined because the torrent lists are always pushed back |
it might be a nice feature here to limit the number of torrents
to send in a single update. By just posting the first n torrents, they
would nicely be round-robined because the torrent lists are always
-pushed back../src/session_impl.cpp:4336 t->status(&*i, flags);
+pushed back../src/session_impl.cpp:4338 t->status(&*i, flags);
}
}
@@ -5085,7 +5264,7 @@ pushed back../src/session_impl.cpp:4336relevance 0 | ../src/storage.cpp:710 | make this more generic to not just work if files have been renamed, but also if they have been merged into a single file for instance maybe use the same format as .torrent files and reuse some code from torrent_info |
|
make this more generic to not just work if files have been
+ | ||
relevance 0 | ../src/storage.cpp:710 | make this more generic to not just work if files have been renamed, but also if they have been merged into a single file for instance maybe use the same format as .torrent files and reuse some code from torrent_info |
make this more generic to not just work if files have been
renamed, but also if they have been merged into a single file for instance
maybe use the same format as .torrent files and reuse some code from torrent_info../src/storage.cpp:710 for (;;)
{
@@ -5138,7 +5317,7 @@ maybe use the same format as .torrent files and reuse some code from torrent_inf
if (file_sizes_ent->list_size() == 0)
{
ec.ec = errors::no_files_in_resume_data;
- | ||
relevance 0 | ../src/storage.cpp:1006 | if everything moves OK, except for the partfile we currently won't update the save path, which breaks things. it would probably make more sense to give up on the partfile |
if everything moves OK, except for the partfile
+ | ||
relevance 0 | ../src/storage.cpp:1006 | if everything moves OK, except for the partfile we currently won't update the save path, which breaks things. it would probably make more sense to give up on the partfile |
if everything moves OK, except for the partfile
we currently won't update the save path, which breaks things.
it would probably make more sense to give up on the partfile../src/storage.cpp:1006 if (ec)
{
@@ -5191,7 +5370,7 @@ it would probably make more sense to give up on the partfile../src/stor
{
fileop op = { &file::writev
, file::read_write | flags };
- | ||
relevance 0 | ../src/torrent.cpp:491 | if the existing torrent doesn't have metadata, insert the metadata we just downloaded into it. |
if the existing torrent doesn't have metadata, insert
+ | ||
relevance 0 | ../src/torrent.cpp:491 | if the existing torrent doesn't have metadata, insert the metadata we just downloaded into it. |
if the existing torrent doesn't have metadata, insert
the metadata we just downloaded into it.../src/torrent.cpp:491
m_torrent_file = tf;
@@ -5243,7 +5422,7 @@ the metadata we just downloaded into it.../src/torrent.cpp:491 | ||
relevance 0 | ../src/torrent.cpp:641 | if the existing torrent doesn't have metadata, insert the metadata we just downloaded into it. |
if the existing torrent doesn't have metadata, insert
+ | ||
relevance 0 | ../src/torrent.cpp:641 | if the existing torrent doesn't have metadata, insert the metadata we just downloaded into it. |
if the existing torrent doesn't have metadata, insert
the metadata we just downloaded into it.../src/torrent.cpp:641
m_torrent_file = tf;
@@ -5295,12 +5474,12 @@ the metadata we just downloaded into it.../src/torrent.cpp:641 | ||
relevance 0 | ../src/torrent.cpp:1446 | is verify_peer_cert called once per certificate in the chain, and this function just tells us which depth we're at right now? If so, the comment makes sense. any certificate that isn't the leaf (i.e. the one presented by the peer) should be accepted automatically, given preverified is true. The leaf certificate need to be verified to make sure its DN matches the info-hash |
is verify_peer_cert called once per certificate in the chain, and
+ | ||
relevance 0 | ../src/torrent.cpp:1441 | is verify_peer_cert called once per certificate in the chain, and this function just tells us which depth we're at right now? If so, the comment makes sense. any certificate that isn't the leaf (i.e. the one presented by the peer) should be accepted automatically, given preverified is true. The leaf certificate need to be verified to make sure its DN matches the info-hash |
is verify_peer_cert called once per certificate in the chain, and
this function just tells us which depth we're at right now? If so, the comment
makes sense.
any certificate that isn't the leaf (i.e. the one presented by the peer)
should be accepted automatically, given preverified is true. The leaf certificate
-need to be verified to make sure its DN matches the info-hash../src/torrent.cpp:1446 if (pp) p->add_extension(pp);
+need to be verified to make sure its DN matches the info-hash../src/torrent.cpp:1441 if (pp) p->add_extension(pp);
}
// if files are checked for this torrent, call the extension
@@ -5351,16 +5530,16 @@ need to be verified to make sure its DN matches the info-hash../src/tor
{
#if defined(TORRENT_VERBOSE_LOGGING) || defined(TORRENT_LOGGING)
match = true;
- | ||
relevance 0 | ../src/torrent.cpp:1838 | instead of creating the picker up front here, maybe this whole section should move to need_picker() |
instead of creating the picker up front here,
-maybe this whole section should move to need_picker()../src/torrent.cpp:1838 else
- {
- read_resume_data(m_resume_data->entry);
- }
+ | ||
relevance 0 | ../src/torrent.cpp:1841 | instead of creating the picker up front here, maybe this whole section should move to need_picker() |
instead of creating the picker up front here,
+maybe this whole section should move to need_picker()../src/torrent.cpp:1841 {
+ m_have_all = true;
+ m_ses.get_io_service().post(boost::bind(&torrent::files_checked, shared_from_this()));
+ m_resume_data.reset();
+ update_gauge();
+ return;
}
-
-#if TORRENT_USE_ASSERTS
- m_resume_data_loaded = true;
-#endif
+
+ set_state(torrent_status::checking_resume_data);
int num_pad_files = 0;
TORRENT_ASSERT(block_size() > 0);
@@ -5403,10 +5582,10 @@ maybe this whole section should move to need_picker()../src/torrent.cpp
// need to consider it finished
std::vector<piece_picker::downloading_piece> dq
- | ||
relevance 0 | ../src/torrent.cpp:2034 | there may be peer extensions relying on the torrent extension still being alive. Only do this if there are no peers. And when the last peer is disconnected, if the torrent is unloaded, clear the extensions m_extensions.clear(); |
there may be peer extensions relying on the torrent extension
+ | ||
relevance 0 | ../src/torrent.cpp:2037 | there may be peer extensions relying on the torrent extension still being alive. Only do this if there are no peers. And when the last peer is disconnected, if the torrent is unloaded, clear the extensions m_extensions.clear(); |
there may be peer extensions relying on the torrent extension
still being alive. Only do this if there are no peers. And when the last peer
is disconnected, if the torrent is unloaded, clear the extensions
-m_extensions.clear();../src/torrent.cpp:2034 // pinned torrents are not allowed to be swapped out
+m_extensions.clear();../src/torrent.cpp:2037 // pinned torrents are not allowed to be swapped out
TORRENT_ASSERT(!m_pinned);
m_should_be_loaded = false;
@@ -5457,9 +5636,9 @@ m_extensions.clear();../src/torrent.cpp:2034relevance 0 | ../src/torrent.cpp:2709 | this pattern is repeated in a few places. Factor this into a function and generalize the concept of a torrent having a dedicated listen port |
|
this pattern is repeated in a few places. Factor this into
+ | ||
relevance 0 | ../src/torrent.cpp:2712 | this pattern is repeated in a few places. Factor this into a function and generalize the concept of a torrent having a dedicated listen port |
this pattern is repeated in a few places. Factor this into
a function and generalize the concept of a torrent having a
-dedicated listen port../src/torrent.cpp:2709 // if the files haven't been checked yet, we're
+dedicated listen port../src/torrent.cpp:2712 // if the files haven't been checked yet, we're
// not ready for peers. Except, if we don't have metadata,
// we need peers to download from
if (!m_files_checked && valid_metadata()) return;
@@ -5510,7 +5689,7 @@ dedicated listen port../src/torrent.cpp:2709 | ||
relevance 0 | ../src/torrent.cpp:3482 | add one peer per IP the hostname resolves to |
add one peer per IP the hostname resolves to../src/torrent.cpp:3482#endif
+ | ||
relevance 0 | ../src/torrent.cpp:3485 | add one peer per IP the hostname resolves to |
add one peer per IP the hostname resolves to../src/torrent.cpp:3485#endif
void torrent::on_peer_name_lookup(error_code const& e
, std::vector<address> const& host_list, int port)
@@ -5561,7 +5740,7 @@ dedicated listen port../src/torrent.cpp:2709relevance 0 | ../src/torrent.cpp:4473 | update suggest_piece? |
|
update suggest_piece?../src/torrent.cpp:4473
+ | ||
relevance 0 | ../src/torrent.cpp:4476 | update suggest_piece? |
update suggest_piece?../src/torrent.cpp:4476
void torrent::peer_has_all(peer_connection const* peer)
{
if (has_picker())
@@ -5612,8 +5791,8 @@ dedicated listen port../src/torrent.cpp:2709relevance 0 | ../src/torrent.cpp:4616 | really, we should just keep the picker around in this case to maintain the availability counters |
|
really, we should just keep the picker around
-in this case to maintain the availability counters../src/torrent.cpp:4616 pieces.reserve(cs.pieces.size());
+ | ||
relevance 0 | ../src/torrent.cpp:4619 | really, we should just keep the picker around in this case to maintain the availability counters |
really, we should just keep the picker around
+in this case to maintain the availability counters../src/torrent.cpp:4619 pieces.reserve(cs.pieces.size());
// sort in ascending order, to get most recently used first
std::sort(cs.pieces.begin(), cs.pieces.end()
@@ -5664,14 +5843,14 @@ in this case to maintain the availability counters../src/torrent.cpp:46
}
void torrent::abort()
- | ||
relevance 0 | ../src/torrent.cpp:6538 | make this more generic to not just work if files have been renamed, but also if they have been merged into a single file for instance maybe use the same format as .torrent files and reuse some code from torrent_info The mapped_files needs to be read both in the network thread and in the disk thread, since they both have their own mapped files structures which are kept in sync |
make this more generic to not just work if files have been
+ | ||
relevance 0 | ../src/torrent.cpp:6587 | make this more generic to not just work if files have been renamed, but also if they have been merged into a single file for instance maybe use the same format as .torrent files and reuse some code from torrent_info The mapped_files needs to be read both in the network thread and in the disk thread, since they both have their own mapped files structures which are kept in sync |
make this more generic to not just work if files have been
renamed, but also if they have been merged into a single file for instance
maybe use the same format as .torrent files and reuse some code from torrent_info
The mapped_files needs to be read both in the network thread
and in the disk thread, since they both have their own mapped files structures
-which are kept in sync../src/torrent.cpp:6538 super_seeding(rd.dict_find_int_value("super_seeding", 0));
+which are kept in sync../src/torrent.cpp:6587 m_last_upload = tmp == -1 ? INT16_MIN : now - tmp;
- if (!m_use_resume_save_path)
+ if (m_use_resume_save_path)
{
std::string p = rd.dict_find_string_value("save_path");
if (!p.empty()) m_save_path = p;
@@ -5705,27 +5884,27 @@ which are kept in sync../src/torrent.cpp:6538 | ||
relevance 0 | ../src/torrent.cpp:6701 | if this is a merkle torrent and we can't restore the tree, we need to wipe all the bits in the have array, but not necessarily we might want to do a full check to see if we have all the pieces. This is low priority since almost no one uses merkle torrents |
if this is a merkle torrent and we can't
+ lazy_entry const* file_priority = rd.dict_find_list("file_priority");
+ if (file_priority && file_priority->list_size()
+ == m_torrent_file->num_files())
+ {
+ int num_files = m_torrent_file->num_files();
+ m_file_priority.resize(num_files);
+ for (int i = 0; i < num_files; ++i)
+ m_file_priority[i] = file_priority->list_int_value_at(i, 1);
+ // unallocated slots are assumed to be priority 1, so cut off any
+ // trailing ones
+ int end_range = num_files - 1;
+ for (; end_range >= 0; --end_range) if (m_file_priority[end_range] != 1) break;
+ m_file_priority.resize(end_range + 1);
+ | ||
relevance 0 | ../src/torrent.cpp:6705 | if this is a merkle torrent and we can't restore the tree, we need to wipe all the bits in the have array, but not necessarily we might want to do a full check to see if we have all the pieces. This is low priority since almost no one uses merkle torrents |
if this is a merkle torrent and we can't
restore the tree, we need to wipe all the
bits in the have array, but not necessarily
we might want to do a full check to see if we have
all the pieces. This is low priority since almost
-no one uses merkle torrents../src/torrent.cpp:6701 add_web_seed(url, web_seed_entry::http_seed);
+no one uses merkle torrents../src/torrent.cpp:6705 add_web_seed(url, web_seed_entry::http_seed);
}
}
@@ -5776,14 +5955,14 @@ no one uses merkle torrents../src/torrent.cpp:6701 | ||
relevance 0 | ../src/torrent.cpp:6891 | make this more generic to not just work if files have been renamed, but also if they have been merged into a single file for instance. using file_base |
make this more generic to not just work if files have been
+ | ||
relevance 0 | ../src/torrent.cpp:6895 | make this more generic to not just work if files have been renamed, but also if they have been merged into a single file for instance. using file_base |
make this more generic to not just work if files have been
renamed, but also if they have been merged into a single file for instance.
-using file_base../src/torrent.cpp:6891 pieces.resize(m_torrent_file->num_pieces());
+using file_base../src/torrent.cpp:6895 pieces.resize(m_torrent_file->num_pieces());
if (!has_picker())
{
std::memset(&pieces[0], m_have_all, pieces.size());
}
- else
+ else if (has_picker())
{
for (int i = 0, end(pieces.size()); i < end; ++i)
pieces[i] = m_picker->have_piece(i) ? 1 : 0;
@@ -5822,20 +6001,20 @@ using file_base../src/torrent.cpp:6891relevance 0 | ../src/torrent.cpp:8884 | add a flag to ignore stats, and only care about resume data for content. For unchanged files, don't trigger a load of the metadata just to save an empty resume data file |
|
add a flag to ignore stats, and only care about resume data for
+ | ||
relevance 0 | ../src/torrent.cpp:8888 | add a flag to ignore stats, and only care about resume data for content. For unchanged files, don't trigger a load of the metadata just to save an empty resume data file |
add a flag to ignore stats, and only care about resume data for
content. For unchanged files, don't trigger a load of the metadata
-just to save an empty resume data file../src/torrent.cpp:8884 if (m_complete != 0xffffff) seeds = m_complete;
- else seeds = m_policy ? m_policy->num_seeds() : 0;
+just to save an empty resume data file../src/torrent.cpp:8888 if (m_complete != 0xffffff) seeds = m_complete;
+ else seeds = m_peer_list ? m_peer_list->num_seeds() : 0;
if (m_incomplete != 0xffffff) downloaders = m_incomplete;
- else downloaders = m_policy ? m_policy->num_peers() - m_policy->num_seeds() : 0;
+ else downloaders = m_peer_list ? m_peer_list->num_peers() - m_peer_list->num_seeds() : 0;
if (seeds == 0)
{
@@ -5882,10 +6061,10 @@ just to save an empty resume data file../src/torrent.cpp:8884 | ||
relevance 0 | ../src/torrent.cpp:9846 | go through the pieces we have and count the total number of downloaders we have. Only count peers that are interested in us since some peers might not send have messages for pieces we have it num_interested == 0, we need to pick a new piece |
go through the pieces we have and count the total number
+ | ||
relevance 0 | ../src/torrent.cpp:9850 | go through the pieces we have and count the total number of downloaders we have. Only count peers that are interested in us since some peers might not send have messages for pieces we have it num_interested == 0, we need to pick a new piece |
go through the pieces we have and count the total number
of downloaders we have. Only count peers that are interested in us
since some peers might not send have messages for pieces we have
-it num_interested == 0, we need to pick a new piece../src/torrent.cpp:9846 }
+it num_interested == 0, we need to pick a new piece../src/torrent.cpp:9850 }
rarest_pieces.clear();
rarest_rarity = pp.peer_count;
@@ -5936,8 +6115,8 @@ it num_interested == 0, we need to pick a new piece../src/torrent.cpp:9
if (num_cache_pieces > m_torrent_file->num_pieces())
num_cache_pieces = m_torrent_file->num_pieces();
- | ||
relevance 0 | ../src/torrent.cpp:10492 | instead of resorting the whole list, insert the peers directly into the right place |
instead of resorting the whole list, insert the peers
-directly into the right place../src/torrent.cpp:10492 printf("timed out [average-piece-time: %d ms ]\n"
+ | ||
relevance 0 | ../src/torrent.cpp:10496 | instead of resorting the whole list, insert the peers directly into the right place |
instead of resorting the whole list, insert the peers
+directly into the right place../src/torrent.cpp:10496 printf("timed out [average-piece-time: %d ms ]\n"
, m_average_piece_time);
#endif
}
@@ -5988,7 +6167,7 @@ directly into the right place../src/torrent.cpp:10492relevance 0 | ../src/torrent_peer.cpp:176 | how do we deal with our external address changing? |
|
how do we deal with our external address changing?../src/torrent_peer.cpp:176 , is_v6_addr(false)
+ | ||
relevance 0 | ../src/torrent_peer.cpp:176 | how do we deal with our external address changing? |
how do we deal with our external address changing?../src/torrent_peer.cpp:176 , is_v6_addr(false)
#endif
#if TORRENT_USE_I2P
, is_i2p_addr(false)
@@ -6039,7 +6218,7 @@ directly into the right place../src/torrent.cpp:10492relevance 0 | ../src/udp_socket.cpp:286 | it would be nice to detect this on posix systems also |
|
it would be nice to detect this on posix systems also../src/udp_socket.cpp:286 --m_v6_outstanding;
+ | ||
relevance 0 | ../src/udp_socket.cpp:286 | it would be nice to detect this on posix systems also |
it would be nice to detect this on posix systems also../src/udp_socket.cpp:286 --m_v6_outstanding;
}
else
#endif
@@ -6090,7 +6269,7 @@ void udp_socket::call_handler(error_code const& ec, udp::endpoint const&
ret = (*i)->incoming_packet(ec, ep, buf, size);
} TORRENT_CATCH (std::exception&) {}
if (*i == NULL) i = m_observers.erase(i);
- | ||
relevance 0 | ../src/upnp.cpp:71 | listen_interface is not used. It's meant to bind the broadcast socket |
listen_interface is not used. It's meant to bind the broadcast socket../src/upnp.cpp:71#include <asio/ip/multicast.hpp>
+ | ||
relevance 0 | ../src/upnp.cpp:71 | listen_interface is not used. It's meant to bind the broadcast socket |
listen_interface is not used. It's meant to bind the broadcast socket../src/upnp.cpp:71#include <asio/ip/multicast.hpp>
#else
#include <boost/asio/ip/host_name.hpp>
#include <boost/asio/ip/multicast.hpp>
@@ -6141,7 +6320,7 @@ static error_code ec;
m_devices.swap(s->devices);
m_mappings.swap(s->mappings);
delete s;
- | ||
relevance 0 | ../src/ut_metadata.cpp:316 | we really need to increment the refcounter on the torrent while this buffer is still in the peer's send buffer |
we really need to increment the refcounter on the torrent
+ | ||
relevance 0 | ../src/ut_metadata.cpp:316 | we really need to increment the refcounter on the torrent while this buffer is still in the peer's send buffer |
we really need to increment the refcounter on the torrent
while this buffer is still in the peer's send buffer../src/ut_metadata.cpp:316 if (!m_tp.need_loaded()) return;
metadata = m_tp.metadata().begin + offset;
metadata_piece_size = (std::min)(
@@ -6193,7 +6372,7 @@ while this buffer is still in the peer's send buffer../src/ut_metadata.
#ifdef TORRENT_VERBOSE_LOGGING
m_pc.peer_log("<== UT_METADATA [ not a dictionary ]");
#endif
- | ||
relevance 0 | ../src/utp_stream.cpp:1627 | this loop may not be very efficient |
this loop may not be very efficient../src/utp_stream.cpp:1627
+ | ||
relevance 0 | ../src/utp_stream.cpp:1627 | this loop may not be very efficient |
this loop may not be very efficient../src/utp_stream.cpp:1627
char* m_buf;
};
@@ -6244,7 +6423,7 @@ bool utp_socket_impl::send_pkt(int flags)
if (sack > 32) sack = 32;
}
- | ||
relevance 0 | ../src/web_connection_base.cpp:73 | introduce a web-seed default class which has a low download priority |
introduce a web-seed default class which has a low download priority../src/web_connection_base.cpp:73{
+ | ||
relevance 0 | ../src/web_connection_base.cpp:73 | introduce a web-seed default class which has a low download priority |
introduce a web-seed default class which has a low download priority../src/web_connection_base.cpp:73{
web_connection_base::web_connection_base(
peer_connection_args const& pack
, web_seed_entry& web)
@@ -6295,8 +6474,8 @@ bool utp_socket_impl::send_pkt(int flags)
// according to the settings.
return m_settings.get_int(settings_pack::urlseed_timeout);
}
- | ||
relevance 0 | ../src/kademlia/dht_tracker.cpp:428 | ideally this function would be called when the put completes |
ideally this function would be called when the
-put completes../src/kademlia/dht_tracker.cpp:428 // since it controls whether we re-put the content
+ | ||
relevance 0 | ../src/kademlia/dht_tracker.cpp:430 | ideally this function would be called when the put completes |
ideally this function would be called when the
+put completes../src/kademlia/dht_tracker.cpp:430 // since it controls whether we re-put the content
TORRENT_ASSERT(!it.is_mutable());
f(it);
return false;
@@ -6347,59 +6526,7 @@ put completes../src/kademlia/dht_tracker.cpp:428relevance 0 | ../src/kademlia/routing_table.cpp:316 | instad of refreshing a bucket by using find_nodes, ping each node periodically |
|
instad of refreshing a bucket by using find_nodes,
-ping each node periodically../src/kademlia/routing_table.cpp:316 os << "]\n";
- }
-}
-
-#endif
-
-void routing_table::touch_bucket(node_id const& target)
-{
- table_t::iterator i = find_bucket(target);
- i->last_active = time_now();
-}
-
-// returns true if lhs is in more need of a refresh than rhs
-bool compare_bucket_refresh(routing_table_node const& lhs, routing_table_node const& rhs)
-{
- // add the number of nodes to prioritize buckets with few nodes in them
- return lhs.last_active + seconds(lhs.live_nodes.size() * 5)
- < rhs.last_active + seconds(rhs.live_nodes.size() * 5);
-}
-
-bool routing_table::need_refresh(node_id& target) const
- {
- INVARIANT_CHECK;
-
- ptime now = time_now();
-
- // refresh our own bucket once every 15 minutes
- if (now - minutes(15) > m_last_self_refresh)
- {
- m_last_self_refresh = now;
- target = m_id;
-#ifdef TORRENT_DHT_VERBOSE_LOGGING
- TORRENT_LOG(table) << "need_refresh [ bucket: self target: " << target << " ]";
-#endif
- return true;
- }
-
- if (m_buckets.empty()) return false;
-
- table_t::const_iterator i = std::min_element(m_buckets.begin(), m_buckets.end()
- , &compare_bucket_refresh);
-
- if (now - minutes(15) < i->last_active) return false;
- if (now - seconds(45) < m_last_refresh) return false;
-
- // generate a random node_id within the given bucket
- target = generate_random_id();
- int num_bits = std::distance(m_buckets.begin(), i) + 1;
- node_id mask = generate_prefix_mask(num_bits);
-
- // target = (target & ~mask) | (root & mask)
- | ||
relevance 0 | ../include/libtorrent/bitfield.hpp:158 | rename to data() ? |
rename to data() ?../include/libtorrent/bitfield.hpp:158 if (m_buf[i] != 0) return false;
+ | ||
relevance 0 | ../include/libtorrent/bitfield.hpp:158 | rename to data() ? |
rename to data() ?../include/libtorrent/bitfield.hpp:158 if (m_buf[i] != 0) return false;
}
return true;
}
@@ -6450,7 +6577,7 @@ bool compare_bucket_refresh(routing_table_node const& lhs, routing_table_nod
return ret;
}
#endif // TORRENT_HAS_SSE
- | ||
relevance 0 | ../include/libtorrent/block_cache.hpp:218 | make this 32 bits and to count seconds since the block cache was created |
make this 32 bits and to count seconds since the block cache was created../include/libtorrent/block_cache.hpp:218 bool operator==(cached_piece_entry const& rhs) const
+ | ||
relevance 0 | ../include/libtorrent/block_cache.hpp:218 | make this 32 bits and to count seconds since the block cache was created |
make this 32 bits and to count seconds since the block cache was created../include/libtorrent/block_cache.hpp:218 bool operator==(cached_piece_entry const& rhs) const
{ return storage.get() == rhs.storage.get() && piece == rhs.piece; }
// if this is set, we'll be calculating the hash
@@ -6501,7 +6628,7 @@ bool compare_bucket_refresh(routing_table_node const& lhs, routing_table_nod
// this is set to true once we flush blocks past
// the hash cursor. Once this happens, there's
- | ||
relevance 0 | ../include/libtorrent/config.hpp:339 | Make this count Unicode characters instead of bytes on windows |
Make this count Unicode characters instead of bytes on windows../include/libtorrent/config.hpp:339#define TORRENT_USE_WRITEV 0
+ | ||
relevance 0 | ../include/libtorrent/config.hpp:339 | Make this count Unicode characters instead of bytes on windows |
Make this count Unicode characters instead of bytes on windows../include/libtorrent/config.hpp:339#define TORRENT_USE_WRITEV 0
#define TORRENT_USE_READV 0
#else
@@ -6552,7 +6679,7 @@ bool compare_bucket_refresh(routing_table_node const& lhs, routing_table_nod
#include <stdarg.h>
// internal
- | ||
relevance 0 | ../include/libtorrent/debug.hpp:215 | rewrite this class to use FILE* instead and have a printf-like interface |
rewrite this class to use FILE* instead and
+ | ||
relevance 0 | ../include/libtorrent/debug.hpp:215 | rewrite this class to use FILE* instead and have a printf-like interface |
rewrite this class to use FILE* instead and
have a printf-like interface../include/libtorrent/debug.hpp:215#endif
}
@@ -6604,7 +6731,7 @@ namespace libtorrent
mutex::scoped_lock l(file_mutex);
open(!append);
- | ||
relevance 0 | ../include/libtorrent/disk_buffer_pool.hpp:128 | try to remove the observers, only using the async_allocate handlers |
try to remove the observers, only using the async_allocate handlers../include/libtorrent/disk_buffer_pool.hpp:128
+ | ||
relevance 0 | ../include/libtorrent/disk_buffer_pool.hpp:128 | try to remove the observers, only using the async_allocate handlers |
try to remove the observers, only using the async_allocate handlers../include/libtorrent/disk_buffer_pool.hpp:128
// number of bytes per block. The BitTorrent
// protocol defines the block size to 16 KiB.
const int m_block_size;
@@ -6655,7 +6782,7 @@ namespace libtorrent
// the pointer to the block of virtual address space
// making up the mmapped cache space
char* m_cache_pool;
- | ||
relevance 0 | ../include/libtorrent/peer_connection.hpp:216 | make this a raw pointer (to save size in the first cache line) and make the constructor take a raw pointer. torrent objects should always outlive their peers |
make this a raw pointer (to save size in
+ | ||
relevance 0 | ../include/libtorrent/peer_connection.hpp:216 | make this a raw pointer (to save size in the first cache line) and make the constructor take a raw pointer. torrent objects should always outlive their peers |
make this a raw pointer (to save size in
the first cache line) and make the constructor
take a raw pointer. torrent objects should always
outlive their peers../include/libtorrent/peer_connection.hpp:216 , m_snubbed(false)
@@ -6709,7 +6836,7 @@ outlive their peers../include/libtorrent/peer_connection.hpp:216 | ||
relevance 0 | ../include/libtorrent/peer_connection.hpp:1125 | factor this out into its own class with a virtual interface torrent and session should implement this interface |
factor this out into its own class with a virtual interface
+ | ||
relevance 0 | ../include/libtorrent/peer_connection.hpp:1125 | factor this out into its own class with a virtual interface torrent and session should implement this interface |
factor this out into its own class with a virtual interface
torrent and session should implement this interface../include/libtorrent/peer_connection.hpp:1125
// the local endpoint for this peer, i.e. our address
// and our port. If this is set for outgoing connections
@@ -6761,7 +6888,7 @@ torrent and session should implement this interface../include/libtorren
// |
// | m_recv_start (logical start of current
// | | receive buffer, as perceived by upper layers)
- | ||
relevance 0 | ../include/libtorrent/peer_connection_interface.hpp:45 | make this interface smaller! |
make this interface smaller!../include/libtorrent/peer_connection_interface.hpp:45SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ | ||
relevance 0 | ../include/libtorrent/peer_connection_interface.hpp:45 | make this interface smaller! |
make this interface smaller!../include/libtorrent/peer_connection_interface.hpp:45SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
@@ -6812,7 +6939,7 @@ namespace libtorrent
virtual tcp::endpoint const& remote() const = 0;
virtual tcp::endpoint local_endpoint() const = 0;
virtual void disconnect(error_code const& ec, operation_t op, int error = 0) = 0;
- | ||
relevance 0 | ../include/libtorrent/performance_counters.hpp:132 | should keepalives be in here too? how about dont-have, share-mode, upload-only |
should keepalives be in here too?
+ | ||
relevance 0 | ../include/libtorrent/performance_counters.hpp:132 | should keepalives be in here too? how about dont-have, share-mode, upload-only |
should keepalives be in here too?
how about dont-have, share-mode, upload-only../include/libtorrent/performance_counters.hpp:132 // a connect candidate
connection_attempt_loops,
// successful incoming connections (not rejected for any reason)
@@ -6864,9 +6991,9 @@ how about dont-have, share-mode, upload-only../include/libtorrent/perfo
num_outgoing_cancel,
num_outgoing_dht_port,
num_outgoing_suggest,
- | ||
relevance 0 | ../include/libtorrent/performance_counters.hpp:408 | some space could be saved here by making gauges 32 bits |
some space could be saved here by making gauges 32 bits../include/libtorrent/performance_counters.hpp:408 | ||
relevance 0 | ../include/libtorrent/performance_counters.hpp:409 | restore these to regular integers. Instead have one copy of the counters per thread and collect them at convenient synchronization points |
restore these to regular integers. Instead have one copy
+ | ||
relevance 0 | ../include/libtorrent/performance_counters.hpp:429 | some space could be saved here by making gauges 32 bits |
some space could be saved here by making gauges 32 bits../include/libtorrent/performance_counters.hpp:429 | ||
relevance 0 | ../include/libtorrent/performance_counters.hpp:430 | restore these to regular integers. Instead have one copy of the counters per thread and collect them at convenient synchronization points |
restore these to regular integers. Instead have one copy
of the counters per thread and collect them at convenient
-synchronization points../include/libtorrent/performance_counters.hpp:409 limiter_down_bytes,
+synchronization points../include/libtorrent/performance_counters.hpp:430 limiter_down_bytes,
num_counters,
num_gauge_counters = num_counters - num_stats_counters
@@ -6899,7 +7026,7 @@ synchronization points../include/libtorrent/performance_counters.hpp:40
#endif
- | ||
relevance 0 | ../include/libtorrent/piece_picker.hpp:669 | should this be allocated lazily? |
should this be allocated lazily?../include/libtorrent/piece_picker.hpp:669 std::vector<downloading_piece>::const_iterator find_dl_piece(int queue, int index) const;
+ | ||
relevance 0 | ../include/libtorrent/piece_picker.hpp:669 | should this be allocated lazily? |
should this be allocated lazily?../include/libtorrent/piece_picker.hpp:669 std::vector<downloading_piece>::const_iterator find_dl_piece(int queue, int index) const;
std::vector<downloading_piece>::iterator find_dl_piece(int queue, int index);
// returns an iterator to the downloading piece, whichever
@@ -6950,7 +7077,7 @@ synchronization points../include/libtorrent/performance_counters.hpp:40
// and some are still in the requested state
// 2: downloading pieces where every block is
// finished or writing
- | ||
relevance 0 | ../include/libtorrent/proxy_base.hpp:171 | it would be nice to remember the bind port and bind once we know where the proxy is m_sock.bind(endpoint, ec); |
it would be nice to remember the bind port and bind once we know where the proxy is
+ | ||
relevance 0 | ../include/libtorrent/proxy_base.hpp:171 | it would be nice to remember the bind port and bind once we know where the proxy is m_sock.bind(endpoint, ec); |
it would be nice to remember the bind port and bind once we know where the proxy is
m_sock.bind(endpoint, ec);../include/libtorrent/proxy_base.hpp:171 void bind(endpoint_type const& /* endpoint */)
{
// m_sock.bind(endpoint);
@@ -7002,7 +7129,7 @@ m_sock.bind(endpoint, ec);../include/libtorrent/proxy_base.hpp:171
m_sock.close(ec);
m_resolver.cancel();
}
- | ||
relevance 0 | ../include/libtorrent/session.hpp:861 | add get_peer_class_type_filter() as well |
add get_peer_class_type_filter() as well../include/libtorrent/session.hpp:861 //
+ | ||
relevance 0 | ../include/libtorrent/session.hpp:863 | add get_peer_class_type_filter() as well |
add get_peer_class_type_filter() as well../include/libtorrent/session.hpp:863 //
// The ``peer_class`` argument cannot be greater than 31. The bitmasks
// representing peer classes in the ``peer_class_filter`` are 32 bits.
//
@@ -7053,7 +7180,7 @@ m_sock.bind(endpoint, ec);../include/libtorrent/proxy_base.hpp:171
// destructs.
//
// For more information on peer classes, see peer-classes_.
- | ||
relevance 0 | ../include/libtorrent/settings_pack.hpp:1075 | deprecate this ``max_rejects`` is the number of piece requests we will reject in a row while a peer is choked before the peer is considered abusive and is disconnected. |
deprecate this
+ | ||
relevance 0 | ../include/libtorrent/settings_pack.hpp:1075 | deprecate this ``max_rejects`` is the number of piece requests we will reject in a row while a peer is choked before the peer is considered abusive and is disconnected. |
deprecate this
``max_rejects`` is the number of piece requests we will reject in a row
while a peer is choked before the peer is considered abusive and is
disconnected.../include/libtorrent/settings_pack.hpp:1075 auto_manage_startup,
@@ -7107,7 +7234,7 @@ disconnected.../include/libtorrent/settings_pack.hpp:1075 | ||
relevance 0 | ../include/libtorrent/size_type.hpp:48 | remove these and just use boost's types directly |
remove these and just use boost's types directly../include/libtorrent/size_type.hpp:48ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ | ||
relevance 0 | ../include/libtorrent/size_type.hpp:48 | remove these and just use boost's types directly |
remove these and just use boost's types directly../include/libtorrent/size_type.hpp:48ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
@@ -7133,7 +7260,7 @@ namespace libtorrent
#endif
- | ||
relevance 0 | ../include/libtorrent/torrent.hpp:1213 | this wastes 5 bits per file |
this wastes 5 bits per file../include/libtorrent/torrent.hpp:1213 typedef std::list<boost::shared_ptr<torrent_plugin> > extension_list_t;
+ | ||
relevance 0 | ../include/libtorrent/torrent.hpp:1213 | this wastes 5 bits per file |
this wastes 5 bits per file../include/libtorrent/torrent.hpp:1213 typedef std::list<boost::shared_ptr<torrent_plugin> > extension_list_t;
extension_list_t m_extensions;
#endif
@@ -7184,7 +7311,7 @@ namespace libtorrent
// if this was added from an RSS feed, this is the unique
// identifier in the feed.
- | ||
relevance 0 | ../include/libtorrent/torrent.hpp:1272 | These two bitfields should probably be coalesced into one |
These two bitfields should probably be coalesced into one../include/libtorrent/torrent.hpp:1272 // the .torrent file from m_url
+ | ||
relevance 0 | ../include/libtorrent/torrent.hpp:1272 | These two bitfields should probably be coalesced into one |
These two bitfields should probably be coalesced into one../include/libtorrent/torrent.hpp:1272 // the .torrent file from m_url
// std::vector<char> m_torrent_file_buf;
// this is a list of all pieces that we have announced
@@ -7235,7 +7362,7 @@ namespace libtorrent
// this is the time last any of our peers saw a seed
// in this swarm
time_t m_swarm_last_seen_complete;
- | ||
relevance 0 | ../include/libtorrent/torrent_info.hpp:124 | include the number of peers received from this tracker, at last announce |
include the number of peers received from this tracker, at last announce../include/libtorrent/torrent_info.hpp:124
+ | ||
relevance 0 | ../include/libtorrent/torrent_info.hpp:124 | include the number of peers received from this tracker, at last announce |
include the number of peers received from this tracker, at last announce../include/libtorrent/torrent_info.hpp:124
// if this tracker failed the last time it was contacted
// this error code specifies what error occurred
error_code last_error;
@@ -7286,7 +7413,7 @@ namespace libtorrent
// flags for the source bitmask, each indicating where
// we heard about this tracker
enum tracker_source
- | ||
relevance 0 | ../include/libtorrent/upnp.hpp:112 | support using the windows API for UPnP operations as well |
support using the windows API for UPnP operations as well../include/libtorrent/upnp.hpp:112 // specific port
+ | ||
relevance 0 | ../include/libtorrent/upnp.hpp:112 | support using the windows API for UPnP operations as well |
support using the windows API for UPnP operations as well../include/libtorrent/upnp.hpp:112 // specific port
external_port_must_be_wildcard = 727
};
@@ -7337,7 +7464,7 @@ public:
// is -1, which means failure. There will not be any error alert notification for
// mappings that fail with a -1 return value.
int add_mapping(protocol_type p, int external_port, int local_port);
- | ||
relevance 0 | ../include/libtorrent/utp_stream.hpp:395 | implement blocking write. Low priority since it's not used (yet) |
implement blocking write. Low priority since it's not used (yet)../include/libtorrent/utp_stream.hpp:395 for (typename Mutable_Buffers::const_iterator i = buffers.begin()
+ | ||
relevance 0 | ../include/libtorrent/utp_stream.hpp:395 | implement blocking write. Low priority since it's not used (yet) |
implement blocking write. Low priority since it's not used (yet)../include/libtorrent/utp_stream.hpp:395 for (typename Mutable_Buffers::const_iterator i = buffers.begin()
, end(buffers.end()); i != end; ++i)
{
using asio::buffer_cast;
@@ -7388,7 +7515,7 @@ public:
if (m_impl == 0)
{
m_io_service.post(boost::bind<void>(handler, asio::error::not_connected, 0));
- | ||
relevance 0 | ../include/libtorrent/kademlia/item.hpp:61 | since this is a public function, it should probably be moved out of this header and into one with other public functions. |
since this is a public function, it should probably be moved
+ | ||
relevance 0 | ../include/libtorrent/kademlia/item.hpp:61 | since this is a public function, it should probably be moved out of this header and into one with other public functions. |
since this is a public function, it should probably be moved
out of this header and into one with other public functions.../include/libtorrent/kademlia/item.hpp:61#include <boost/array.hpp>
namespace libtorrent { namespace dht
@@ -7440,7 +7567,7 @@ public:
item(entry const& v
, std::pair<char const*, int> salt
, boost::uint64_t seq, char const* pk, char const* sk);
- | ||
relevance 0 | ../include/libtorrent/aux_/session_impl.hpp:378 | move the login info into the tracker_request object |
move the login info into the tracker_request object../include/libtorrent/aux_/session_impl.hpp:378
+ | ||
relevance 0 | ../include/libtorrent/aux_/session_impl.hpp:378 | move the login info into the tracker_request object |
move the login info into the tracker_request object../include/libtorrent/aux_/session_impl.hpp:378
void on_lsd_announce(error_code const& e);
// called when a port mapping is successful, or a router returns
@@ -7455,7 +7582,7 @@ public:
void resume();
void set_ip_filter(ip_filter const& f);
- ip_filter const& get_ip_filter() const;
+ ip_filter& get_ip_filter();
void set_port_filter(port_filter const& f);
port_filter const& get_port_filter() const;
@@ -7491,7 +7618,7 @@ public:
#ifndef TORRENT_DISABLE_EXTENSIONS
void add_extensions_to_torrent(
- | ||
relevance 0 | ../include/libtorrent/aux_/session_impl.hpp:841 | should this be renamed m_outgoing_interfaces? |
should this be renamed m_outgoing_interfaces?../include/libtorrent/aux_/session_impl.hpp:841 // listen socket. For each retry the port number
+ | ||
relevance 0 | ../include/libtorrent/aux_/session_impl.hpp:841 | should this be renamed m_outgoing_interfaces? |
should this be renamed m_outgoing_interfaces?../include/libtorrent/aux_/session_impl.hpp:841 // listen socket. For each retry the port number
// is incremented by one
int m_listen_port_retries;
@@ -7542,7 +7669,7 @@ public:
mutable boost::uint8_t m_interface_index;
void open_new_incoming_socks_connection();
- | ||
relevance 0 | ../include/libtorrent/aux_/session_impl.hpp:895 | replace this by a proper asio timer |
replace this by a proper asio timer../include/libtorrent/aux_/session_impl.hpp:895 {
+ | ||
relevance 0 | ../include/libtorrent/aux_/session_impl.hpp:895 | replace this by a proper asio timer |
replace this by a proper asio timer../include/libtorrent/aux_/session_impl.hpp:895 {
open_ssl_socket = 0x10
};
@@ -7566,7 +7693,7 @@ public:
// this is used to decide when to recalculate which
// torrents to keep queued and which to activate
- | ||
relevance 0 | ../include/libtorrent/aux_/session_impl.hpp:900 | replace this by a proper asio timer |
replace this by a proper asio timer../include/libtorrent/aux_/session_impl.hpp:900 listen_socket_t setup_listener(std::string const& device
+ | ||
relevance 0 | ../include/libtorrent/aux_/session_impl.hpp:900 | replace this by a proper asio timer |
replace this by a proper asio timer../include/libtorrent/aux_/session_impl.hpp:900 listen_socket_t setup_listener(std::string const& device
, bool ipv4, int port, int& retries, int flags, error_code& ec);
#ifndef TORRENT_DISABLE_DHT
@@ -7592,7 +7719,7 @@ public:
// is only decresed when the unchoke set
// is recomputed, and when it reaches zero,
// the optimistic unchoke is moved to another peer.
- | ||
relevance 0 | ../include/libtorrent/aux_/session_impl.hpp:907 | replace this by a proper asio timer |
replace this by a proper asio timer../include/libtorrent/aux_/session_impl.hpp:907
+ | ||
relevance 0 | ../include/libtorrent/aux_/session_impl.hpp:907 | replace this by a proper asio timer |
replace this by a proper asio timer../include/libtorrent/aux_/session_impl.hpp:907
// the number of unchoked peers as set by the auto-unchoker
// this should always be >= m_max_uploads
int m_allowed_upload_slots;
@@ -7643,7 +7770,7 @@ public:
int m_suggest_timer;
// statistics gathered from all torrents.
- | ||
relevance 0 | ../include/libtorrent/aux_/session_interface.hpp:202 | it would be nice to not have this be part of session_interface |
it would be nice to not have this be part of session_interface../include/libtorrent/aux_/session_interface.hpp:202 virtual boost::uint16_t listen_port() const = 0;
+ | ||
relevance 0 | ../include/libtorrent/aux_/session_interface.hpp:202 | it would be nice to not have this be part of session_interface |
it would be nice to not have this be part of session_interface../include/libtorrent/aux_/session_interface.hpp:202 virtual boost::uint16_t listen_port() const = 0;
virtual boost::uint16_t ssl_listen_port() const = 0;
// used to (potentially) issue socket write calls onto multiple threads
diff --git a/examples/client_test.cpp b/examples/client_test.cpp
index 546fde1ed..57e7115b8 100644
--- a/examples/client_test.cpp
+++ b/examples/client_test.cpp
@@ -1756,7 +1756,7 @@ int main(int argc, char* argv[])
, "%3d [%3d, %d] %s%s\n"
, bucket, i->num_nodes, i->num_replacements
, progress_bar + (128 - i->num_nodes)
- , "--------" + (8 - i->num_replacements));
+ , "--------" + (8 - (std::min)(8, i->num_replacements)));
out += str;
}
diff --git a/include/libtorrent/add_torrent_params.hpp b/include/libtorrent/add_torrent_params.hpp
index f435a894b..98da99c02 100644
--- a/include/libtorrent/add_torrent_params.hpp
+++ b/include/libtorrent/add_torrent_params.hpp
@@ -147,15 +147,16 @@ namespace libtorrent
// in there will override the seed mode you set here.
flag_seed_mode = 0x001,
- // If ``flag_override_resume_data`` is set, the ``paused``,
- // ``auto_managed`` and ``save_path`` of the torrent are not loaded
- // from the resume data, but the states requested by the flags in
- // ``add_torrent_params`` will override them.
- //
- // If you pass in resume data, the paused state of the torrent when
- // the resume data was saved will override the paused state you pass
- // in here. You can override this by setting
- // ``flag_override_resume_data``.
+ // If ``flag_override_resume_data`` is set, flags set for this torrent
+ // in this ``add_torrent_params`` object will take precedence over
+ // whatever states are saved in the resume data. For instance, the
+ // ``paused``, ``auto_managed``, ``sequential_download``, ``seed_mode``,
+ // ``super_seeding``, ``max_uploads``, ``max_connections``,
+ // ``upload_limit`` and ``download_limit`` are all affected by this
+ // flag. The intention of this flag is to have any field in
+ // add_torrent_params configuring the torrent override the corresponding
+ // configuration from the resume file, with the one exception of save
+ // resume data, which has its own flag (for historic reasons).
flag_override_resume_data = 0x002,
// If ``flag_upload_mode`` is set, the torrent will be initialized in
diff --git a/include/libtorrent/kademlia/node.hpp b/include/libtorrent/kademlia/node.hpp
index 31140a70e..8221dd5f8 100644
--- a/include/libtorrent/kademlia/node.hpp
+++ b/include/libtorrent/kademlia/node.hpp
@@ -223,7 +223,7 @@ public:
node_id const& nid() const { return m_id; }
- boost::tuple size() const { return m_table.size(); }
+ boost::tuple size() const { return m_table.size(); }
size_type num_global_nodes() const
{ return m_table.num_global_nodes(); }
diff --git a/include/libtorrent/kademlia/node_id.hpp b/include/libtorrent/kademlia/node_id.hpp
index 040576909..d4dd84a57 100644
--- a/include/libtorrent/kademlia/node_id.hpp
+++ b/include/libtorrent/kademlia/node_id.hpp
@@ -63,7 +63,9 @@ int TORRENT_EXTRA_EXPORT distance_exp(node_id const& n1, node_id const& n2);
node_id TORRENT_EXTRA_EXPORT generate_id(address const& external_ip);
node_id TORRENT_EXTRA_EXPORT generate_random_id();
-bool TORRENT_EXTRA_EXPORT verify_random_id(node_id const& nid);
+void TORRENT_EXTRA_EXPORT make_id_secret(node_id& in);
+node_id TORRENT_EXTRA_EXPORT generate_secret_id();
+bool TORRENT_EXTRA_EXPORT verify_secret_id(node_id const& nid);
node_id TORRENT_EXTRA_EXPORT generate_id_impl(address const& ip_, boost::uint32_t r);
bool TORRENT_EXTRA_EXPORT verify_id(node_id const& nid, address const& source_ip);
diff --git a/include/libtorrent/kademlia/refresh.hpp b/include/libtorrent/kademlia/refresh.hpp
index ad6a22147..f9ed06a68 100644
--- a/include/libtorrent/kademlia/refresh.hpp
+++ b/include/libtorrent/kademlia/refresh.hpp
@@ -43,6 +43,8 @@ namespace libtorrent { namespace dht
class routing_table;
class rpc_manager;
+// TODO: 3 collapse this class into the bootstrap class (or maybe the other
+// way around)
class refresh : public get_peers
{
public:
@@ -68,6 +70,8 @@ public:
virtual char const* name() const;
+ void trim_seed_nodes();
+
protected:
virtual void done();
diff --git a/include/libtorrent/kademlia/routing_table.hpp b/include/libtorrent/kademlia/routing_table.hpp
index 476d083ad..830576274 100644
--- a/include/libtorrent/kademlia/routing_table.hpp
+++ b/include/libtorrent/kademlia/routing_table.hpp
@@ -104,6 +104,13 @@ public:
router_iterator router_begin() const { return m_router_nodes.begin(); }
router_iterator router_end() const { return m_router_nodes.end(); }
+ enum add_node_status_t {
+ failed_to_add = 0,
+ node_added,
+ need_bucket_split
+ };
+ add_node_status_t add_node_impl(node_entry e);
+
bool add_node(node_entry e);
// this function is called every time the node sees
@@ -147,7 +154,11 @@ public:
int bucket_size() const { return m_bucket_size; }
- boost::tuple size() const;
+ // returns the number of nodes in the main buckets, number of nodes in the
+ // replacement buckets and the number of nodes in the main buckets that have
+ // been pinged and confirmed up
+ boost::tuple size() const;
+
size_type num_global_nodes() const;
// the number of bits down we have full buckets
@@ -155,9 +166,6 @@ public:
// we have
int depth() const;
- // returns true if there are no working nodes
- // in the routing table
- bool need_bootstrap() const;
int num_active_buckets() const { return m_buckets.size(); }
void replacement_cache(bucket_t& nodes) const;
@@ -202,14 +210,6 @@ private:
// it's mutable because it's updated by depth(), which is const
mutable int m_depth;
- // the last time need_bootstrap() returned true
- mutable ptime m_last_bootstrap;
-
- // the last time the routing table was refreshed.
- // this is used to stagger buckets needing refresh
- // to be at least 45 seconds apart.
- mutable ptime m_last_refresh;
-
// the last time we refreshed our own bucket
// refreshed every 15 minutes
mutable ptime m_last_self_refresh;
diff --git a/include/libtorrent/torrent.hpp b/include/libtorrent/torrent.hpp
index 1c4573b8b..43eeb8b46 100644
--- a/include/libtorrent/torrent.hpp
+++ b/include/libtorrent/torrent.hpp
@@ -698,8 +698,8 @@ namespace libtorrent
void announce_with_tracker(boost::uint8_t e
= tracker_request::none
, address const& bind_interface = address_v4::any());
- int seconds_since_last_scrape() const { return m_ses.session_time() - m_last_scrape; }
-
+ int seconds_since_last_scrape() const
+ { return m_last_scrape == INT16_MIN ? -1 : m_ses.session_time() - m_last_scrape; }
#ifndef TORRENT_DISABLE_DHT
void dht_announce();
#endif
@@ -1583,17 +1583,19 @@ namespace libtorrent
// ----
- // the timestamp of the last piece passed for this torrent
- // specified in session_time
- boost::uint16_t m_last_download;
+ // the timestamp of the last piece passed for this torrent specified in
+ // session_time. This is signed because it must be able to represent time
+ // before the session started
+ boost::int16_t m_last_download;
// the number of peer connections to seeds. This should be the same as
// counting the peer connections that say true for is_seed()
boost::uint16_t m_num_seeds;
- // the timestamp of the last byte uploaded from this torrent
- // specified in session_time
- boost::uint16_t m_last_upload;
+ // the timestamp of the last byte uploaded from this torrent specified in
+ // session_time. This is signed because it must be able to represent time
+ // before the session started
+ boost::int16_t m_last_upload;
// this is a second count-down to when we should tick the
// storage for this torrent. Ticking the storage is used
@@ -1636,10 +1638,10 @@ namespace libtorrent
// is optional and may be 0xffffff
unsigned int m_downloaded:24;
- // the timestamp of the last scrape request to
- // one of the trackers in this torrent
- // specified in session_time
- boost::uint16_t m_last_scrape;
+ // the timestamp of the last scrape request to one of the trackers in
+ // this torrent specified in session_time. This is signed because it must
+ // be able to represent time before the session started
+ boost::int16_t m_last_scrape;
// ----
diff --git a/src/bt_peer_connection.cpp b/src/bt_peer_connection.cpp
index 3312a51ae..4b36406fa 100644
--- a/src/bt_peer_connection.cpp
+++ b/src/bt_peer_connection.cpp
@@ -2569,7 +2569,7 @@ namespace libtorrent
if (is_disconnecting()) return;
// read dh key, generate shared secret
- if (m_dh_key_exchange->compute_secret(recv_buffer.begin) == -1)
+ if (m_dh_key_exchange->compute_secret(recv_buffer.begin) != 0)
{
disconnect(errors::no_memory, op_encryption);
return;
diff --git a/src/kademlia/dht_tracker.cpp b/src/kademlia/dht_tracker.cpp
index 7cbb6ddb3..0710f6171 100644
--- a/src/kademlia/dht_tracker.cpp
+++ b/src/kademlia/dht_tracker.cpp
@@ -328,7 +328,7 @@ namespace libtorrent { namespace dht
{
first = false;
pc << "\n\n ***** starting log at " << time_now_string() << " *****\n\n"
- << "minute:active nodes:passive nodes"
+ << "minute:active nodes:passive nodes:confirmed nodes"
":ping replies sent:ping queries recvd"
":ping replies bytes sent:ping queries bytes recvd"
":find_node replies sent:find_node queries recv"
@@ -348,10 +348,12 @@ namespace libtorrent { namespace dht
int active;
int passive;
- boost::tie(active, passive) = m_dht.size();
+ int confirmed;
+ boost::tie(active, passive, confirmed) = m_dht.size();
pc << (m_counter * tick_period)
<< "\t" << active
- << "\t" << passive;
+ << "\t" << passive
+ << "\t" << confirmed;
for (int i = 0; i < 5; ++i)
pc << "\t" << (m_replies_sent[i] / float(tick_period))
<< "\t" << (m_queries_received[i] / float(tick_period))
diff --git a/src/kademlia/node.cpp b/src/kademlia/node.cpp
index 4f157640d..098b8bcaa 100644
--- a/src/kademlia/node.cpp
+++ b/src/kademlia/node.cpp
@@ -178,7 +178,10 @@ std::string node_impl::generate_token(udp::endpoint const& addr, char const* inf
void node_impl::bootstrap(std::vector const& nodes
, find_data::nodes_callback const& f)
{
- boost::intrusive_ptr r(new dht::bootstrap(*this, m_id, f));
+ node_id target = m_id;
+ make_id_secret(target);
+
+ boost::intrusive_ptr r(new dht::bootstrap(*this, target, f));
m_last_self_refresh = time_now();
#ifdef TORRENT_DHT_VERBOSE_LOGGING
@@ -194,6 +197,9 @@ void node_impl::bootstrap(std::vector const& nodes
r->add_entry(node_id(0), *i, observer::flag_initial);
}
+ // make us start as far away from our node ID as possible
+ r->trim_seed_nodes();
+
#ifdef TORRENT_DHT_VERBOSE_LOGGING
TORRENT_LOG(node) << "bootstrapping with " << count << " nodes";
#endif
@@ -454,7 +460,9 @@ void node_impl::tick()
ptime now = time_now();
if (m_last_self_refresh + minutes(10) < now)
{
- boost::intrusive_ptr r(new dht::refresh(*this, m_id
+ node_id target = m_id;
+ make_id_secret(target);
+ boost::intrusive_ptr r(new dht::bootstrap(*this, target
, boost::bind(&nop)));
r->start();
m_last_self_refresh = now;
@@ -478,7 +486,7 @@ void node_impl::send_single_refresh(udp::endpoint const& ep, int bucket
// TODO: 2 it would be nice to have a bias towards node-id prefixes that
// are missing in the bucket
node_id mask = generate_prefix_mask(bucket + 1);
- node_id target = generate_random_id() & ~mask;
+ node_id target = generate_secret_id() & ~mask;
target |= m_id & mask;
// create a dummy traversal_algorithm
diff --git a/src/kademlia/node_id.cpp b/src/kademlia/node_id.cpp
index a1fe8de15..4041e806d 100644
--- a/src/kademlia/node_id.cpp
+++ b/src/kademlia/node_id.cpp
@@ -151,25 +151,36 @@ node_id generate_id_impl(address const& ip_, boost::uint32_t r)
static boost::uint32_t secret = 0;
-node_id generate_random_id()
+void make_id_secret(node_id& in)
{
- char r[20];
- for (int i = 0; i < 20; ++i) r[i] = random() & 0xff;
- node_id ret = hasher(r, 20).final();
-
if (secret == 0) secret = (random() % 0xfffffffe) + 1;
+ boost::uint32_t rand = random();
+
// generate the last 4 bytes as a "signature" of the previous 4 bytes. This
// lets us verify whether a hash came from this function or not in the future.
hasher h((char*)&secret, 4);
- h.update((char*)&ret[20-8], 4);
+ h.update((char*)&rand, 4);
sha1_hash secret_hash = h.final();
- memcpy(&ret[20-4], &secret_hash[0], 4);
+ memcpy(&in[20-4], &secret_hash[0], 4);
+ memcpy(&in[20-8], &rand, 4);
+}
+node_id generate_random_id()
+{
+ char r[20];
+ for (int i = 0; i < 20; ++i) r[i] = random() & 0xff;
+ return hasher(r, 20).final();
+}
+
+node_id generate_secret_id()
+{
+ node_id ret = generate_random_id();
+ make_id_secret(ret);
return ret;
}
-bool verify_random_id(node_id const& nid)
+bool verify_secret_id(node_id const& nid)
{
if (secret == 0) return false;
diff --git a/src/kademlia/refresh.cpp b/src/kademlia/refresh.cpp
index 06c58e7c6..6c6ed94ad 100644
--- a/src/kademlia/refresh.cpp
+++ b/src/kademlia/refresh.cpp
@@ -92,6 +92,15 @@ bootstrap::bootstrap(
char const* bootstrap::name() const { return "bootstrap"; }
+void bootstrap::trim_seed_nodes()
+{
+ // when we're bootstrapping, we want to start as far away from our ID as
+ // possible, to cover as much as possible of the ID space. So, remove all
+ // nodes except for the 32 that are farthest away from us
+ if (m_results.size() > 32)
+ m_results.erase(m_results.begin(), m_results.end() - 32);
+}
+
void bootstrap::done()
{
#ifdef TORRENT_DHT_VERBOSE_LOGGING
diff --git a/src/kademlia/routing_table.cpp b/src/kademlia/routing_table.cpp
index 602b8400b..3fe04a6d1 100644
--- a/src/kademlia/routing_table.cpp
+++ b/src/kademlia/routing_table.cpp
@@ -77,8 +77,6 @@ routing_table::routing_table(node_id const& id, int bucket_size
: m_settings(settings)
, m_id(id)
, m_depth(0)
- , m_last_bootstrap(time_now())
- , m_last_refresh(min_time())
, m_last_self_refresh(min_time())
, m_bucket_size(bucket_size)
{
@@ -97,7 +95,8 @@ int routing_table::bucket_limit(int bucket) const
void routing_table::status(session_status& s) const
{
- boost::tie(s.dht_nodes, s.dht_node_cache) = size();
+ int ignore;
+ boost::tie(s.dht_nodes, s.dht_node_cache, ignore) = size();
s.dht_global_nodes = num_global_nodes();
for (table_t::const_iterator i = m_buckets.begin()
@@ -113,17 +112,24 @@ void routing_table::status(session_status& s) const
}
}
-boost::tuple routing_table::size() const
+boost::tuple routing_table::size() const
{
int nodes = 0;
int replacements = 0;
+ int confirmed = 0;
for (table_t::const_iterator i = m_buckets.begin()
, end(m_buckets.end()); i != end; ++i)
{
nodes += i->live_nodes.size();
+ for (bucket_t::const_iterator k = i->live_nodes.begin()
+ , end(i->live_nodes.end()); k != end; ++k)
+ {
+ if (k->confirmed()) ++confirmed;
+ }
+
replacements += i->replacements.size();
}
- return boost::make_tuple(nodes, replacements);
+ return boost::make_tuple(nodes, replacements, confirmed);
}
size_type routing_table::num_global_nodes() const
@@ -441,16 +447,37 @@ void routing_table::remove_node(node_entry* n
}
bool routing_table::add_node(node_entry e)
+{
+ add_node_status_t s = add_node_impl(e);
+ if (s == failed_to_add) return false;
+ if (s == node_added) return true;
+
+ while (s == need_bucket_split)
+ {
+ split_bucket();
+
+ // if the new bucket still has too many nodes in it, we need to keep
+ // splitting
+ if (m_buckets.back().live_nodes.size() > bucket_limit(m_buckets.size()-1))
+ continue;
+
+ s = add_node_impl(e);
+ if (s == failed_to_add) return false;
+ if (s == node_added) return true;
+ }
+ return false;
+}
+
+routing_table::add_node_status_t routing_table::add_node_impl(node_entry e)
{
INVARIANT_CHECK;
// if we already have this (IP,port), don't do anything
- if (m_router_nodes.find(e.ep()) != m_router_nodes.end()) return false;
-
- bool ret = need_bootstrap();
+ if (m_router_nodes.find(e.ep()) != m_router_nodes.end())
+ return failed_to_add;
// don't add ourself
- if (e.id == m_id) return ret;
+ if (e.id == m_id) return failed_to_add;
// do we already have this IP in the table?
if (m_ips.count(e.addr().to_v4().to_bytes()) > 0)
@@ -476,7 +503,7 @@ bool routing_table::add_node(node_entry e)
TORRENT_LOG(table) << "ignoring node (duplicate IP): "
<< e.id << " " << e.addr();
#endif
- return ret;
+ return failed_to_add;
}
}
else if (existing && existing->id == e.id)
@@ -486,7 +513,7 @@ bool routing_table::add_node(node_entry e)
existing->timeout_count = 0;
existing->update_rtt(e.rtt);
existing->last_queried = e.last_queried;
- return ret;
+ return node_added;
}
else if (existing)
{
@@ -514,14 +541,15 @@ bool routing_table::add_node(node_entry e)
{
// a new IP address just claimed this node-ID
// ignore it
- if (j->addr() != e.addr() || j->port() != e.port()) return ret;
+ if (j->addr() != e.addr() || j->port() != e.port())
+ return failed_to_add;
// we already have the node in our bucket
TORRENT_ASSERT(j->id == e.id && j->ep() == e.ep());
j->timeout_count = 0;
j->update_rtt(e.rtt);
// TORRENT_LOG(table) << "updating node: " << i->id << " " << i->addr();
- return ret;
+ return node_added;
}
// if this node exists in the replacement bucket. update it and
@@ -533,7 +561,9 @@ bool routing_table::add_node(node_entry e)
{
// a new IP address just claimed this node-ID
// ignore it
- if (j->addr() != e.addr() || j->port() != e.port()) return ret;
+ if (j->addr() != e.addr() || j->port() != e.port())
+ return failed_to_add;
+
TORRENT_ASSERT(j->id == e.id && j->ep() == e.ep());
j->timeout_count = 0;
j->update_rtt(e.rtt);
@@ -556,7 +586,7 @@ bool routing_table::add_node(node_entry e)
<< " existing node: "
<< j->id << " " << j->addr();
#endif
- return ret;
+ return failed_to_add;
}
j = std::find_if(rb.begin(), rb.end(), boost::bind(&compare_ip_cidr, _1, e));
@@ -568,7 +598,7 @@ bool routing_table::add_node(node_entry e)
<< " existing node: "
<< j->id << " " << j->addr();
#endif
- return ret;
+ return failed_to_add;
}
}
@@ -579,7 +609,7 @@ bool routing_table::add_node(node_entry e)
b.push_back(e);
m_ips.insert(e.addr().to_v4().to_bytes());
// TORRENT_LOG(table) << "inserting node: " << e.id << " " << e.addr();
- return ret;
+ return node_added;
}
// if there is no room, we look for nodes that are not 'pinged',
@@ -613,7 +643,7 @@ bool routing_table::add_node(node_entry e)
*j = e;
m_ips.insert(e.addr().to_v4().to_bytes());
// TORRENT_LOG(table) << "replacing unpinged node: " << e.id << " " << e.addr();
- return ret;
+ return node_added;
}
// A node is considered stale if it has failed at least one
@@ -635,7 +665,7 @@ bool routing_table::add_node(node_entry e)
*j = e;
m_ips.insert(e.addr().to_v4().to_bytes());
// TORRENT_LOG(table) << "replacing stale node: " << e.id << " " << e.addr();
- return ret;
+ return node_added;
}
// in order to provide as few lookups as possible before finding
@@ -746,7 +776,7 @@ bool routing_table::add_node(node_entry e)
TORRENT_LOG(table) << "replacing node with higher RTT: " << e.id
<< " " << e.addr();
#endif
- return ret;
+ return node_added;
}
// in order to keep lookup times small, prefer nodes with low RTTs
@@ -771,7 +801,7 @@ bool routing_table::add_node(node_entry e)
// if the IP address matches, it's the same node
// make sure it's marked as pinged
if (j->ep() == e.ep()) j->set_pinged();
- return ret;
+ return node_added;
}
if ((int)rb.size() >= m_bucket_size)
@@ -789,29 +819,10 @@ bool routing_table::add_node(node_entry e)
rb.push_back(e);
m_ips.insert(e.addr().to_v4().to_bytes());
// TORRENT_LOG(table) << "inserting node in replacement cache: " << e.id << " " << e.addr();
- return ret;
+ return node_added;
}
- split_bucket();
-
- // now insert the new node in the appropriate bucket
- i = find_bucket(e.id);
- int dst_bucket = std::distance(m_buckets.begin(), i);
- bucket_t& nb = i->live_nodes;
- bucket_t& nrb = i->replacements;
-
- if (int(nb.size()) < bucket_limit(dst_bucket))
- nb.push_back(e);
- else if (int(nrb.size()) < m_bucket_size)
- nrb.push_back(e);
- else
- nb.push_back(e); // trigger another split
-
- m_ips.insert(e.addr().to_v4().to_bytes());
-
- while (int(m_buckets.back().live_nodes.size()) > bucket_limit(m_buckets.size() - 1))
- split_bucket();
- return ret;
+ return need_bucket_split;
}
void routing_table::split_bucket()
@@ -846,6 +857,18 @@ void routing_table::split_bucket()
j = b.erase(j);
}
+ if (b.size() > bucket_size_limit)
+ {
+ // TODO: 3 move the lowest priority nodes to the replacement bucket
+ for (bucket_t::iterator i = b.begin() + bucket_size_limit
+ , end(b.end()); i != end; ++i)
+ {
+ rb.push_back(*i);
+ }
+
+ b.resize(bucket_size_limit);
+ }
+
// split the replacement bucket as well. If the live bucket
// is not full anymore, also move the replacement entries
// into the main bucket
@@ -865,10 +888,8 @@ void routing_table::split_bucket()
// this entry belongs in the new bucket
if (int(new_bucket.size()) < new_bucket_size)
new_bucket.push_back(*j);
- else if (int(new_replacement_bucket.size()) < m_bucket_size)
- new_replacement_bucket.push_back(*j);
else
- erase_one(m_ips, j->addr().to_v4().to_bytes());
+ new_replacement_bucket.push_back(*j);
}
j = rb.erase(j);
}
@@ -996,24 +1017,6 @@ bool routing_table::node_seen(node_id const& id, udp::endpoint ep, int rtt)
return add_node(node_entry(id, ep, rtt, true));
}
-bool routing_table::need_bootstrap() const
-{
- ptime now = time_now();
- if (now - seconds(30) < m_last_bootstrap) return false;
-
- for (table_t::const_iterator i = m_buckets.begin()
- , end(m_buckets.end()); i != end; ++i)
- {
- for (bucket_t::const_iterator j = i->live_nodes.begin()
- , end(i->live_nodes.end()); j != end; ++j)
- {
- if (j->confirmed()) return false;
- }
- }
- m_last_bootstrap = now;
- return true;
-}
-
// fills the vector with the k nodes from our buckets that
// are nearest to the given id.
void routing_table::find_node(node_id const& target
diff --git a/src/lazy_bdecode.cpp b/src/lazy_bdecode.cpp
index 12956ddde..dfc27af30 100644
--- a/src/lazy_bdecode.cpp
+++ b/src/lazy_bdecode.cpp
@@ -556,6 +556,60 @@ namespace libtorrent
return line_len;
}
+ void escape_string(std::string& ret, char const* str, int len)
+ {
+ for (int i = 0; i < len; ++i)
+ {
+ if (str[i] >= 32 && str[i] < 127)
+ {
+ ret += str[i];
+ }
+ else
+ {
+ char tmp[5];
+ snprintf(tmp, sizeof(tmp), "\\x%02x", (unsigned char)str[i]);
+ ret += tmp;
+ }
+ }
+ }
+
+ void print_string(std::string& ret, char const* str, int len, bool single_line)
+ {
+ bool printable = true;
+ for (int i = 0; i < len; ++i)
+ {
+ char c = str[i];
+ if (c >= 32 && c < 127) continue;
+ printable = false;
+ break;
+ }
+ ret += "'";
+ if (printable)
+ {
+ if (single_line && len > 30)
+ {
+ ret.append(str, 14);
+ ret += "...";
+ ret.append(str + len-14, 14);
+ }
+ else
+ ret.append(str, len);
+ ret += "'";
+ return;
+ }
+ if (single_line && len > 20)
+ {
+ escape_string(ret, str, 9);
+ ret += "...";
+ escape_string(ret, str + len - 9, 9);
+ }
+ else
+ {
+ escape_string(ret, str, len);
+ }
+ ret += "'";
+ }
+
std::string print_entry(lazy_entry const& e, bool single_line, int indent)
{
char indent_str[200];
@@ -576,56 +630,7 @@ namespace libtorrent
}
case lazy_entry::string_t:
{
- bool printable = true;
- char const* str = e.string_ptr();
- for (int i = 0; i < e.string_length(); ++i)
- {
- char c = str[i];
- if (c >= 32 && c < 127) continue;
- printable = false;
- break;
- }
- ret += "'";
- if (printable)
- {
- if (single_line && e.string_length() > 30)
- {
- ret.append(e.string_ptr(), 14);
- ret += "...";
- ret.append(e.string_ptr() + e.string_length()-14, 14);
- }
- else
- ret.append(e.string_ptr(), e.string_length());
- ret += "'";
- return ret;
- }
- if (single_line && e.string_length() > 20)
- {
- for (int i = 0; i < 9; ++i)
- {
- char tmp[5];
- snprintf(tmp, sizeof(tmp), "%02x", (unsigned char)str[i]);
- ret += tmp;
- }
- ret += "...";
- for (int i = e.string_length() - 9
- , len(e.string_length()); i < len; ++i)
- {
- char tmp[5];
- snprintf(tmp, sizeof(tmp), "%02x", (unsigned char)str[i]);
- ret += tmp;
- }
- }
- else
- {
- for (int i = 0; i < e.string_length(); ++i)
- {
- char tmp[5];
- snprintf(tmp, sizeof(tmp), "%02x", (unsigned char)str[i]);
- ret += tmp;
- }
- }
- ret += "'";
+ print_string(ret, e.string_ptr(), e.string_length(), single_line);
return ret;
}
case lazy_entry::list_t:
@@ -654,9 +659,8 @@ namespace libtorrent
{
if (i == 0 && one_liner) ret += " ";
std::pair ent = e.dict_at(i);
- ret += "'";
- ret += ent.first;
- ret += "': ";
+ print_string(ret, ent.first.c_str(), ent.first.size(), true);
+ ret += ": ";
ret += print_entry(*ent.second, single_line, indent + 2);
if (i < e.dict_size() - 1) ret += (one_liner?", ":indent_str);
else ret += (one_liner?" ":indent_str+1);
diff --git a/src/pe_crypto.cpp b/src/pe_crypto.cpp
index a7a204e63..d758b5a95 100644
--- a/src/pe_crypto.cpp
+++ b/src/pe_crypto.cpp
@@ -225,6 +225,7 @@ get_out:
}
get_out:
+ // TODO: 3 clean this up using destructors instead
if (prime) gcry_mpi_release(prime);
if (remote_key) gcry_mpi_release(remote_key);
if (secret) gcry_mpi_release(secret);
diff --git a/src/peer_connection.cpp b/src/peer_connection.cpp
index 8117c2aee..d2fd58411 100644
--- a/src/peer_connection.cpp
+++ b/src/peer_connection.cpp
@@ -1223,9 +1223,9 @@ namespace libtorrent
#endif
#ifndef TORRENT_DISABLE_DHT
- if (dht::verify_random_id(ih))
+ if (dht::verify_secret_id(ih))
{
- // this means the hash was generated from our generate_random_id()
+ // this means the hash was generated from our generate_secret_id()
// as part of DHT traffic. The fact that we got an incoming
// connection on this info-hash, means the other end, making this
// connection fished it out of the DHT chatter. That's suspicious.
diff --git a/src/torrent.cpp b/src/torrent.cpp
index 5fbe64bd5..dbc9f575e 100644
--- a/src/torrent.cpp
+++ b/src/torrent.cpp
@@ -223,16 +223,16 @@ namespace libtorrent
, m_deleted(false)
, m_pinned(p.flags & add_torrent_params::flag_pinned)
, m_should_be_loaded(true)
- , m_last_download(0)
+ , m_last_download(INT16_MIN)
, m_num_seeds(0)
- , m_last_upload(0)
+ , m_last_upload(INT16_MIN)
, m_storage_tick(0)
, m_auto_managed(p.flags & add_torrent_params::flag_auto_managed)
, m_current_gauge_state(no_gauge_state)
, m_moving_storage(false)
, m_inactive(false)
, m_downloaded(0xffffff)
- , m_last_scrape(0)
+ , m_last_scrape(INT16_MIN)
, m_progress_ppm(0)
, m_use_resume_save_path(p.flags & add_torrent_params::flag_use_resume_save_path)
{
@@ -739,26 +739,21 @@ namespace libtorrent
#if defined TORRENT_VERBOSE_LOGGING || defined TORRENT_LOGGING
debug_log("starting torrent");
#endif
- TORRENT_ASSERT(!m_picker);
+ std::vector().swap(m_file_progress);
- if (!m_seed_mode)
+ if (m_resume_data)
{
- std::vector().swap(m_file_progress);
-
- if (m_resume_data)
- {
- int pos;
- error_code ec;
- if (lazy_bdecode(&m_resume_data->buf[0], &m_resume_data->buf[0]
+ int pos;
+ error_code ec;
+ if (lazy_bdecode(&m_resume_data->buf[0], &m_resume_data->buf[0]
+ m_resume_data->buf.size(), m_resume_data->entry, ec, &pos) != 0)
- {
- m_resume_data.reset();
+ {
+ m_resume_data.reset();
#if defined TORRENT_VERBOSE_LOGGING || defined TORRENT_LOGGING || defined TORRENT_ERROR_LOGGING
- debug_log("resume data rejected: %s pos: %d", ec.message().c_str(), pos);
+ debug_log("resume data rejected: %s pos: %d", ec.message().c_str(), pos);
#endif
- if (m_ses.alerts().should_post())
- m_ses.alerts().post_alert(fastresume_rejected_alert(get_handle(), ec, "", 0));
- }
+ if (m_ses.alerts().should_post())
+ m_ses.alerts().post_alert(fastresume_rejected_alert(get_handle(), ec, "", 0));
}
}
@@ -1726,68 +1721,6 @@ namespace libtorrent
return;
}
- // Chicken-and-egg: need to load resume data to get last save_path
- // before constructing m_owning_storage, but need storage before
- // loading resume data. So peek ahead in this case.
- // only do this if the user is willing to have the resume data
- // settings override the settings set in add_torrent_params
- if (m_use_resume_save_path
- && m_resume_data
- && m_resume_data->entry.type() == lazy_entry::dict_t)
- {
- std::string p = m_resume_data->entry.dict_find_string_value("save_path");
- if (!p.empty()) m_save_path = p;
- }
-
- construct_storage();
-
- if (m_share_mode && valid_metadata())
- {
- // in share mode, all pieces have their priorities initialized to 0
- m_file_priority.clear();
- m_file_priority.resize(m_torrent_file->num_files(), 0);
- }
-
- if (!m_connections_initialized)
- {
- m_connections_initialized = true;
- // all peer connections have to initialize themselves now that the metadata
- // is available
- // copy the peer list since peers may disconnect and invalidate
- // m_connections as we initialize them
- std::vector peers = m_connections;
- for (torrent::peer_iterator i = peers.begin();
- i != peers.end(); ++i)
- {
- peer_connection* pc = *i;
- if (pc->is_disconnecting()) continue;
- pc->on_metadata_impl();
- if (pc->is_disconnecting()) continue;
- pc->init();
- }
- }
-
- // in case file priorities were passed in via the add_torrent_params
- // and also in the case of share mode, we need to update the priorities
- update_piece_priorities();
-
- std::vector const& web_seeds = m_torrent_file->web_seeds();
- m_web_seeds.insert(m_web_seeds.end(), web_seeds.begin(), web_seeds.end());
-
- if (m_seed_mode)
- {
- m_have_all = true;
- m_ses.get_io_service().post(boost::bind(&torrent::files_checked, shared_from_this()));
- m_resume_data.reset();
-#if TORRENT_USE_ASSERTS
- m_resume_data_loaded = true;
-#endif
- update_gauge();
- return;
- }
-
- set_state(torrent_status::checking_resume_data);
-
if (m_resume_data && m_resume_data->entry.type() == lazy_entry::dict_t)
{
int ev = 0;
@@ -1825,6 +1758,76 @@ namespace libtorrent
m_resume_data_loaded = true;
#endif
+ construct_storage();
+
+ if (!m_seed_mode && m_resume_data)
+ {
+ lazy_entry const* piece_priority = m_resume_data->entry.dict_find_string("piece_priority");
+ if (piece_priority && piece_priority->string_length()
+ == m_torrent_file->num_pieces())
+ {
+ char const* p = piece_priority->string_ptr();
+ for (int i = 0; i < piece_priority->string_length(); ++i)
+ {
+ int prio = p[i];
+ if (!has_picker() && prio == 1) continue;
+ need_picker();
+ m_picker->set_piece_priority(i, p[i]);
+ update_gauge();
+ }
+ }
+ }
+
+ if (m_share_mode && valid_metadata())
+ {
+ // in share mode, all pieces have their priorities initialized to 0
+ m_file_priority.clear();
+ m_file_priority.resize(m_torrent_file->num_files(), 0);
+ }
+
+ if (!m_connections_initialized)
+ {
+ m_connections_initialized = true;
+ // all peer connections have to initialize themselves now that the metadata
+ // is available
+ // copy the peer list since peers may disconnect and invalidate
+ // m_connections as we initialize them
+ std::vector peers = m_connections;
+ for (torrent::peer_iterator i = peers.begin();
+ i != peers.end(); ++i)
+ {
+ peer_connection* pc = *i;
+ if (pc->is_disconnecting()) continue;
+ pc->on_metadata_impl();
+ if (pc->is_disconnecting()) continue;
+ pc->init();
+ }
+ }
+
+ // in case file priorities were passed in via the add_torrent_params
+ // and also in the case of share mode, we need to update the priorities
+ update_piece_priorities();
+
+ std::vector const& web_seeds = m_torrent_file->web_seeds();
+ m_web_seeds.insert(m_web_seeds.end(), web_seeds.begin(), web_seeds.end());
+
+ set_state(torrent_status::checking_resume_data);
+
+#if TORRENT_USE_ASSERTS
+ m_resume_data_loaded = true;
+#endif
+
+ if (m_seed_mode)
+ {
+ m_have_all = true;
+ m_ses.get_io_service().post(boost::bind(&torrent::files_checked, shared_from_this()));
+ m_resume_data.reset();
+ update_gauge();
+ return;
+ }
+
+ set_state(torrent_status::checking_resume_data);
+
int num_pad_files = 0;
TORRENT_ASSERT(block_size() > 0);
file_storage const& fs = m_torrent_file->files();
@@ -6505,17 +6508,63 @@ namespace libtorrent
m_complete = rd.dict_find_int_value("num_complete", 0xffffff);
m_incomplete = rd.dict_find_int_value("num_incomplete", 0xffffff);
m_downloaded = rd.dict_find_int_value("num_downloaded", 0xffffff);
- set_upload_limit(rd.dict_find_int_value("upload_rate_limit", -1));
- set_download_limit(rd.dict_find_int_value("download_rate_limit", -1));
- set_max_connections(rd.dict_find_int_value("max_connections", -1));
- set_max_uploads(rd.dict_find_int_value("max_uploads", -1));
- m_seed_mode = rd.dict_find_int_value("seed_mode", 0) && m_torrent_file->is_valid();
- if (m_seed_mode)
+
+ if (!m_override_resume_data)
{
- m_verified.resize(m_torrent_file->num_pieces(), false);
- m_verifying.resize(m_torrent_file->num_pieces(), false);
+ int up_limit_ = rd.dict_find_int_value("upload_rate_limit", -1);
+ if (up_limit_ != -1) set_upload_limit(up_limit_);
+
+ int down_limit_ = rd.dict_find_int_value("download_rate_limit", -1);
+ if (down_limit_ != -1) set_download_limit(down_limit_);
+
+ int max_connections_ = rd.dict_find_int_value("max_connections", -1);
+ if (max_connections_ != -1) set_max_connections(max_connections_);
+
+ int max_uploads_ = rd.dict_find_int_value("max_uploads", -1);
+ if (max_uploads_ != -1) set_max_uploads(max_uploads_);
+
+ int seed_mode_ = rd.dict_find_int_value("seed_mode", -1);
+ if (seed_mode_ != -1) m_seed_mode = seed_mode_ && m_torrent_file->is_valid();
+
+ int super_seeding_ = rd.dict_find_int_value("super_seeding", -1);
+ if (super_seeding_ != -1) super_seeding(super_seeding_);
+
+ int auto_managed_ = rd.dict_find_int_value("auto_managed", -1);
+ if (auto_managed_ != -1) m_auto_managed = auto_managed_;
+
+ int sequential_ = rd.dict_find_int_value("sequential_download", -1);
+ if (sequential_ != -1) set_sequential_download(sequential_);
+
+ int paused_ = rd.dict_find_int_value("paused", -1);
+ if (paused_ != -1)
+ {
+ set_allow_peers(!paused_);
+ m_announce_to_dht = !paused_;
+ m_announce_to_trackers = !paused_;
+ m_announce_to_lsd = !paused_;
+
+ update_gauge();
+ update_want_peers();
+ update_want_scrape();
+ }
+ int dht_ = rd.dict_find_int_value("announce_to_dht", -1);
+ if (dht_ != -1) m_announce_to_dht = dht_;
+ int lsd_ = rd.dict_find_int_value("announce_to_lsd", -1);
+ if (lsd_ != -1) m_announce_to_lsd = lsd_;
+ int track_ = rd.dict_find_int_value("announce_to_trackers", -1);
+ if (track_ != -1) m_announce_to_trackers = track_;
}
- super_seeding(rd.dict_find_int_value("super_seeding", 0));
+
+ if (m_seed_mode)
+ m_verified.resize(m_torrent_file->num_pieces(), false);
+
+ int now = m_ses.session_time();
+ int tmp = rd.dict_find_int_value("last_scrape", -1);
+ m_last_scrape = tmp == -1 ? INT16_MIN : now - tmp;
+ tmp = rd.dict_find_int_value("last_download", -1);
+ m_last_download = tmp == -1 ? INT16_MIN : now - tmp;
+ tmp = rd.dict_find_int_value("last_upload", -1);
+ m_last_upload = tmp == -1 ? INT16_MIN : now - tmp;
if (m_use_resume_save_path)
{
@@ -6557,79 +6606,34 @@ namespace libtorrent
if (m_completed_time != 0 && m_completed_time < m_added_time)
m_completed_time = m_added_time;
- lazy_entry const* file_priority = rd.dict_find_list("file_priority");
- if (file_priority && file_priority->list_size()
- == m_torrent_file->num_files())
+ if (!m_seed_mode && !m_override_resume_data)
{
- int num_files = m_torrent_file->num_files();
- m_file_priority.resize(num_files);
- for (int i = 0; i < num_files; ++i)
- m_file_priority[i] = file_priority->list_int_value_at(i, 1);
- // unallocated slots are assumed to be priority 1, so cut off any
- // trailing ones
- int end_range = num_files - 1;
- for (; end_range >= 0; --end_range) if (m_file_priority[end_range] != 1) break;
- m_file_priority.resize(end_range + 1);
-
- // initialize pad files to priority 0
- file_storage const& fs = m_torrent_file->files();
- for (int i = 0; i < (std::min)(fs.num_files(), end_range + 1); ++i)
+ lazy_entry const* file_priority = rd.dict_find_list("file_priority");
+ if (file_priority && file_priority->list_size()
+ == m_torrent_file->num_files())
{
- if (!fs.pad_file_at(i)) continue;
- m_file_priority[i] = 0;
+ int num_files = m_torrent_file->num_files();
+ m_file_priority.resize(num_files);
+ for (int i = 0; i < num_files; ++i)
+ m_file_priority[i] = file_priority->list_int_value_at(i, 1);
+ // unallocated slots are assumed to be priority 1, so cut off any
+ // trailing ones
+ int end_range = num_files - 1;
+ for (; end_range >= 0; --end_range) if (m_file_priority[end_range] != 1) break;
+ m_file_priority.resize(end_range + 1);
+
+ // initialize pad files to priority 0
+ file_storage const& fs = m_torrent_file->files();
+ for (int i = 0; i < (std::min)(fs.num_files(), end_range + 1); ++i)
+ {
+ if (!fs.pad_file_at(i)) continue;
+ m_file_priority[i] = 0;
+ }
}
update_piece_priorities();
}
- lazy_entry const* piece_priority = rd.dict_find_string("piece_priority");
- if (piece_priority && piece_priority->string_length()
- == m_torrent_file->num_pieces())
- {
- char const* p = piece_priority->string_ptr();
- for (int i = 0; i < piece_priority->string_length(); ++i)
- {
- int prio = p[i];
- if (!has_picker() && prio == 1) continue;
- need_picker();
- m_picker->set_piece_priority(i, p[i]);
- update_gauge();
- }
- }
-
- if (!m_override_resume_data)
- {
- int auto_managed_ = rd.dict_find_int_value("auto_managed", -1);
- if (auto_managed_ != -1) m_auto_managed = auto_managed_;
- update_gauge();
- }
-
- int sequential_ = rd.dict_find_int_value("sequential_download", -1);
- if (sequential_ != -1) set_sequential_download(sequential_);
-
- if (!m_override_resume_data)
- {
- int paused_ = rd.dict_find_int_value("paused", -1);
- if (paused_ != -1)
- {
- set_allow_peers(!paused_);
-
- m_announce_to_dht = !paused_;
- m_announce_to_trackers = !paused_;
- m_announce_to_lsd = !paused_;
-
- update_gauge();
- update_want_peers();
- update_want_scrape();
- }
- int dht_ = rd.dict_find_int_value("announce_to_dht", -1);
- if (dht_ != -1) m_announce_to_dht = dht_;
- int lsd_ = rd.dict_find_int_value("announce_to_lsd", -1);
- if (lsd_ != -1) m_announce_to_lsd = lsd_;
- int track_ = rd.dict_find_int_value("announce_to_trackers", -1);
- if (track_ != -1) m_announce_to_trackers = track_;
- }
-
lazy_entry const* trackers = rd.dict_find_list("trackers");
if (trackers)
{
@@ -6873,7 +6877,7 @@ namespace libtorrent
{
std::memset(&pieces[0], m_have_all, pieces.size());
}
- else
+ else if (has_picker())
{
for (int i = 0, end(pieces.size()); i < end; ++i)
pieces[i] = m_picker->have_piece(i) ? 1 : 0;
@@ -11191,8 +11195,8 @@ namespace libtorrent
st->added_time = m_added_time;
st->completed_time = m_completed_time;
- st->last_scrape = m_last_scrape == 0 ? -1
- : m_ses.session_time() - m_last_scrape;
+ st->last_scrape = m_last_scrape == INT16_MIN ? -1
+ : clamped_subtract(m_ses.session_time(), m_last_scrape);
st->share_mode = m_share_mode;
st->upload_mode = m_upload_mode;
@@ -11223,10 +11227,10 @@ namespace libtorrent
st->finished_time = finished_time();
st->active_time = active_time();
st->seeding_time = seeding_time();
- st->time_since_upload = m_last_upload == 0 ? -1
- : m_ses.session_time() - m_last_upload;
- st->time_since_download = m_last_download == 0 ? -1
- : m_ses.session_time() - m_last_download;
+ st->time_since_upload = m_last_upload == INT16_MIN ? -1
+ : clamped_subtract(m_ses.session_time(), m_last_upload);
+ st->time_since_download = m_last_download == INT16_MIN ? -1
+ : clamped_subtract(m_ses.session_time(), m_last_download);
st->storage_mode = (storage_mode_t)m_storage_mode;
diff --git a/test/Jamfile b/test/Jamfile
index 3ca2b18c7..e75693d83 100644
--- a/test/Jamfile
+++ b/test/Jamfile
@@ -90,6 +90,7 @@ feature launcher : none valgrind : composite ;
feature.compose valgrind : "valgrind --tool=memcheck -v --num-callers=20 --read-var-info=yes --track-origins=yes --error-exitcode=222 --suppressions=valgrind_suppressions.txt" on ;
test-suite libtorrent :
+ [ run test_resume.cpp ]
[ run test_sliding_average.cpp ]
[ run test_socket_io.cpp ]
[ run test_random.cpp ]
diff --git a/test/Makefile.am b/test/Makefile.am
index c89a45e1d..88393d2f0 100644
--- a/test/Makefile.am
+++ b/test/Makefile.am
@@ -37,6 +37,7 @@ test_programs = \
test_packet_buffer \
test_settings_pack \
test_read_piece \
+ test_resume \
test_rss \
test_ssl \
test_storage \
diff --git a/test/test_dht.cpp b/test/test_dht.cpp
index d65793fa9..1f9883f8b 100644
--- a/test/test_dht.cpp
+++ b/test/test_dht.cpp
@@ -620,11 +620,15 @@ int test_main()
// ====== test node ID testing =====
{
- node_id rnd = generate_random_id();
- TEST_CHECK(verify_random_id(rnd));
+ node_id rnd = generate_secret_id();
+ TEST_CHECK(verify_secret_id(rnd));
rnd[19] ^= 0x55;
- TEST_CHECK(!verify_random_id(rnd));
+ TEST_CHECK(!verify_secret_id(rnd));
+
+ rnd = generate_random_id();
+ make_id_secret(rnd);
+ TEST_CHECK(verify_secret_id(rnd));
}
// ====== test node ID enforcement ======
diff --git a/test/test_resume.cpp b/test/test_resume.cpp
new file mode 100644
index 000000000..72bd3053a
--- /dev/null
+++ b/test/test_resume.cpp
@@ -0,0 +1,297 @@
+/*
+
+Copyright (c) 2014, Arvid Norberg
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the distribution.
+ * Neither the name of the author nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+*/
+
+#include "libtorrent/session.hpp"
+#include "libtorrent/add_torrent_params.hpp"
+#include "libtorrent/torrent_info.hpp"
+#include "libtorrent/random.hpp"
+#include "libtorrent/create_torrent.hpp"
+
+#include
+
+#include "test.hpp"
+#include "setup_transfer.hpp"
+
+using namespace libtorrent;
+
+boost::shared_ptr generate_torrent()
+{
+ file_storage fs;
+ fs.add_file("test_resume/tmp1", 128 * 1024 * 10);
+ libtorrent::create_torrent t(fs, 128 * 1024, 6);
+
+ t.add_tracker("http://torrent_file_tracker.com/announce");
+
+ int num = t.num_pieces();
+ TEST_CHECK(num > 0);
+ for (int i = 0; i < num; ++i)
+ {
+ sha1_hash ph;
+ for (int k = 0; k < 20; ++k) ph[k] = libtorrent::random();
+ t.set_hash(i, ph);
+ }
+
+ std::vector buf;
+ bencode(std::back_inserter(buf), t.generate());
+ return boost::make_shared(&buf[0], buf.size());
+}
+
+std::vector generate_resume_data(torrent_info* ti)
+{
+ entry rd;
+
+ rd["file-format"] = "libtorrent resume file";
+ rd["file-version"] = 1;
+ rd["info-hash"] = ti->info_hash().to_string();
+ rd["blocks per piece"] = (std::max)(1, ti->piece_length() / 0x4000);
+ rd["pieces"] = std::string(ti->num_pieces(), '\0');
+
+ rd["total_uploaded"] = 1337;
+ rd["total_downloaded"] = 1338;
+ rd["active_time"] = 1339;
+ rd["seeding_time"] = 1340;
+ rd["num_seeds"] = 1341;
+ rd["num_downloaders"] = 1342;
+ rd["upload_rate_limit"] = 1343;
+ rd["download_rate_limit"] = 1344;
+ rd["max_connections"] = 1345;
+ rd["max_uploads"] = 1346;
+ rd["seed_mode"] = 0;
+ rd["super_seeding"] = 0;
+ rd["added_time"] = 1347;
+ rd["completed_time"] = 1348;
+ rd["last_scrape"] = 1349;
+ rd["last_download"] = 1350;
+ rd["last_upload"] = 1351;
+ rd["finished_time"] = 1352;
+ entry::list_type& file_prio = rd["file_priority"].list();
+ file_prio.push_back(entry(1));
+
+ rd["piece_priority"] = std::string(ti->num_pieces(), '\x01');
+ rd["auto_managed"] = 0;
+ rd["sequential_download"] = 0;
+ rd["paused"] = 0;
+ entry::list_type& trackers = rd["trackers"].list();
+ trackers.push_back(entry(entry::list_t));
+ trackers.back().list().push_back(entry("http://resume_data_tracker.com/announce"));
+ entry::list_type& url_list = rd["url-list"].list();
+ url_list.push_back(entry("http://resume_data_url_seed.com"));
+
+ entry::list_type& httpseeds = rd["httpseeds"].list();
+ httpseeds.push_back(entry("http://resume_data_http_seed.com"));
+
+ rd["save_path"] = "/resume_data save_path";
+
+ std::vector ret;
+ bencode(back_inserter(ret), rd);
+
+ return ret;
+}
+
+torrent_status test_resume_flags(int flags)
+{
+ session ses;
+
+ boost::shared_ptr ti = generate_torrent();
+
+ add_torrent_params p;
+
+ p.ti = ti;
+ p.flags = flags;
+ p.save_path = "/add_torrent_params save_path";
+ p.trackers.push_back("http://add_torrent_params_tracker.com/announce");
+ p.url_seeds.push_back("http://add_torrent_params_url_seed.com");
+
+ std::vector rd = generate_resume_data(ti.get());
+ p.resume_data.swap(rd);
+
+ p.max_uploads = 1;
+ p.max_connections = 2;
+ p.upload_limit = 3;
+ p.download_limit = 4;
+ p.file_priorities.push_back(2);
+
+ torrent_handle h = ses.add_torrent(p);
+ torrent_status s = h.status();
+ TEST_EQUAL(s.info_hash, ti->info_hash());
+ return s;
+}
+
+void default_tests(torrent_status const& s)
+{
+ TEST_EQUAL(s.last_scrape, 1349);
+ TEST_EQUAL(s.time_since_download, 1350);
+ TEST_EQUAL(s.time_since_upload, 1351);
+ TEST_EQUAL(s.active_time, 1339);
+ TEST_EQUAL(s.finished_time, 1352);
+ TEST_EQUAL(s.seeding_time, 1340);
+ TEST_EQUAL(s.added_time, 1347);
+ TEST_EQUAL(s.completed_time, 1348);
+}
+
+int test_main()
+{
+ torrent_status s;
+
+ fprintf(stderr, "flags: 0\n");
+ s = test_resume_flags(0);
+ default_tests(s);
+ TEST_EQUAL(s.save_path, "/add_torrent_params save_path");
+ TEST_EQUAL(s.sequential_download, false);
+ TEST_EQUAL(s.paused, false);
+ TEST_EQUAL(s.auto_managed, false);
+ TEST_EQUAL(s.seed_mode, false);
+ TEST_EQUAL(s.super_seeding, false);
+ TEST_EQUAL(s.share_mode, false);
+ TEST_EQUAL(s.upload_mode, false);
+ TEST_EQUAL(s.ip_filter_applies, false);
+ TEST_EQUAL(s.connections_limit, 1345);
+ TEST_EQUAL(s.uploads_limit, 1346);
+
+ fprintf(stderr, "flags: use_resume_save_path\n");
+ s = test_resume_flags(add_torrent_params::flag_use_resume_save_path);
+ default_tests(s);
+ TEST_EQUAL(s.save_path, "/resume_data save_path");
+ TEST_EQUAL(s.sequential_download, false);
+ TEST_EQUAL(s.paused, false);
+ TEST_EQUAL(s.auto_managed, false);
+ TEST_EQUAL(s.seed_mode, false);
+ TEST_EQUAL(s.super_seeding, false);
+ TEST_EQUAL(s.share_mode, false);
+ TEST_EQUAL(s.upload_mode, false);
+ TEST_EQUAL(s.ip_filter_applies, false);
+ TEST_EQUAL(s.connections_limit, 1345);
+ TEST_EQUAL(s.uploads_limit, 1346);
+
+ fprintf(stderr, "flags: override_resume_data\n");
+ s = test_resume_flags(add_torrent_params::flag_override_resume_data
+ | add_torrent_params::flag_paused);
+
+ default_tests(s);
+ TEST_EQUAL(s.save_path, "/add_torrent_params save_path");
+ TEST_EQUAL(s.sequential_download, false);
+ TEST_EQUAL(s.paused, true);
+ TEST_EQUAL(s.auto_managed, false);
+ TEST_EQUAL(s.seed_mode, false);
+ TEST_EQUAL(s.super_seeding, false);
+ TEST_EQUAL(s.share_mode, false);
+ TEST_EQUAL(s.upload_mode, false);
+ TEST_EQUAL(s.ip_filter_applies, false);
+ TEST_EQUAL(s.connections_limit, 2);
+ TEST_EQUAL(s.uploads_limit, 1);
+
+ fprintf(stderr, "flags: seed_mode\n");
+ s = test_resume_flags(add_torrent_params::flag_override_resume_data
+ | add_torrent_params::flag_seed_mode);
+ default_tests(s);
+ TEST_EQUAL(s.save_path, "/add_torrent_params save_path");
+ TEST_EQUAL(s.sequential_download, false);
+ TEST_EQUAL(s.paused, false);
+ TEST_EQUAL(s.auto_managed, false);
+ TEST_EQUAL(s.seed_mode, true);
+ TEST_EQUAL(s.super_seeding, false);
+ TEST_EQUAL(s.share_mode, false);
+ TEST_EQUAL(s.upload_mode, false);
+ TEST_EQUAL(s.ip_filter_applies, false);
+ TEST_EQUAL(s.connections_limit, 2);
+ TEST_EQUAL(s.uploads_limit, 1);
+
+ fprintf(stderr, "flags: upload_mode\n");
+ s = test_resume_flags(add_torrent_params::flag_upload_mode);
+ default_tests(s);
+ TEST_EQUAL(s.save_path, "/add_torrent_params save_path");
+ TEST_EQUAL(s.sequential_download, false);
+ TEST_EQUAL(s.paused, false);
+ TEST_EQUAL(s.auto_managed, false);
+ TEST_EQUAL(s.seed_mode, false);
+ TEST_EQUAL(s.super_seeding, false);
+ TEST_EQUAL(s.share_mode, false);
+ TEST_EQUAL(s.upload_mode, true);
+ TEST_EQUAL(s.ip_filter_applies, false);
+ TEST_EQUAL(s.connections_limit, 1345);
+ TEST_EQUAL(s.uploads_limit, 1346);
+
+ fprintf(stderr, "flags: share_mode\n");
+ s = test_resume_flags(add_torrent_params::flag_override_resume_data
+ | add_torrent_params::flag_share_mode);
+ default_tests(s);
+ TEST_EQUAL(s.save_path, "/add_torrent_params save_path");
+ TEST_EQUAL(s.sequential_download, false);
+ TEST_EQUAL(s.paused, false);
+ TEST_EQUAL(s.auto_managed, false);
+ TEST_EQUAL(s.seed_mode, false);
+ TEST_EQUAL(s.super_seeding, false);
+ TEST_EQUAL(s.share_mode, true);
+ TEST_EQUAL(s.upload_mode, false);
+ TEST_EQUAL(s.ip_filter_applies, false);
+ TEST_EQUAL(s.connections_limit, 2);
+ TEST_EQUAL(s.uploads_limit, 1);
+
+ // resume data overrides the auto-managed flag
+ fprintf(stderr, "flags: auto_managed\n");
+ s = test_resume_flags(add_torrent_params::flag_auto_managed);
+ default_tests(s);
+ TEST_EQUAL(s.save_path, "/add_torrent_params save_path");
+ TEST_EQUAL(s.sequential_download, false);
+ TEST_EQUAL(s.paused, false);
+ TEST_EQUAL(s.auto_managed, false);
+ TEST_EQUAL(s.seed_mode, false);
+ TEST_EQUAL(s.super_seeding, false);
+ TEST_EQUAL(s.share_mode, false);
+ TEST_EQUAL(s.upload_mode, false);
+ TEST_EQUAL(s.ip_filter_applies, false);
+ TEST_EQUAL(s.connections_limit, 1345);
+ TEST_EQUAL(s.uploads_limit, 1346);
+
+ // resume data overrides the paused flag
+ fprintf(stderr, "flags: paused\n");
+ s = test_resume_flags(add_torrent_params::flag_paused);
+ default_tests(s);
+ TEST_EQUAL(s.save_path, "/add_torrent_params save_path");
+ TEST_EQUAL(s.sequential_download, false);
+ TEST_EQUAL(s.paused, false);
+ TEST_EQUAL(s.auto_managed, false);
+ TEST_EQUAL(s.seed_mode, false);
+ TEST_EQUAL(s.super_seeding, false);
+ TEST_EQUAL(s.share_mode, false);
+ TEST_EQUAL(s.upload_mode, false);
+ TEST_EQUAL(s.ip_filter_applies, false);
+ TEST_EQUAL(s.connections_limit, 1345);
+ TEST_EQUAL(s.uploads_limit, 1346);
+
+ // TODO: 2 test all other resume flags here too. This would require returning
+ // more than just the torrent_status from test_resume_flags. Also http seeds
+ // and trackers for instance
+ return 0;
+}
+
+
diff --git a/tools/parse_dht_log.py b/tools/parse_dht_log.py
index 63e9901f4..a3124f81f 100755
--- a/tools/parse_dht_log.py
+++ b/tools/parse_dht_log.py
@@ -34,8 +34,8 @@ searches = []
def convert_timestamp(t):
parts = t.split('.')
- posix = time.strptime(parts[0], '%H:%M:%S')
- return (posix.tm_hour * 3600 + posix.tm_min * 60 + posix.tm_sec) * 1000 + int(parts[1])
+ hms = parts[0].split(':')
+ return (int(hms[0]) * 3600 + int(hms[1]) * 60 + int(hms[2])) * 1000 + int(parts[1])
last_incoming = ''
diff --git a/tools/parse_dht_stats.py b/tools/parse_dht_stats.py
index 548334894..388350710 100644
--- a/tools/parse_dht_stats.py
+++ b/tools/parse_dht_stats.py
@@ -42,7 +42,7 @@ replot
out.close()
gnuplot_scripts += [name]
-gen_stats_gnuplot('dht_routing_table_size', 'nodes', ['active nodes','passive nodes'])
+gen_stats_gnuplot('dht_routing_table_size', 'nodes', ['active nodes','passive nodes', 'confirmed nodes'])
gen_stats_gnuplot('dht_tracker_table_size', '', ['num torrents', 'num peers'])
gen_stats_gnuplot('dht_announces', 'messages per minute', ['announces per min', 'failed announces per min'])
gen_stats_gnuplot('dht_clients', 'messages per minute', ['total msgs per min', 'az msgs per min', 'ut msgs per min', 'lt msgs per min', 'mp msgs per min', 'gr msgs per min'])
| ||