relevance 3 | ../src/file.cpp:487 | find out what error code is reported when the filesystem does not support hard links. |
find out what error code is reported when the filesystem
+does not support hard links.../src/file.cpp:487 {
#ifdef TORRENT_WINDOWS
#if TORRENT_USE_WSTRING
@@ -90,7 +39,7 @@ does not support hard links.../src/file.cpp:484
- // it's possible CreateHardLink will copy the file internally too,
- // if the filesystem does not support it.
- ec.assign(GetLastError(), system_category());
- return;
+ DWORD error = GetLastError();
+ if (error != ERROR_NOT_SUPPORTED || error != ERROR_ACCESS_DENIED)
+ {
+ // it's possible CreateHardLink will copy the file internally too,
+ // if the filesystem does not support it.
+ ec.assign(GetLastError(), system_category());
+ return;
+ }
+
+ // fall back to making a copy
#else
@@ -120,114 +74,33 @@ does not support hard links.../src/file.cpp:484relevance 3 | ../src/session_impl.cpp:3600 | also deduct force started checking torrents from checking_limit also deduct started inactive torrents from hard_limit |
|
also deduct force started checking torrents from checking_limit
-also deduct started inactive torrents from hard_limit../src/session_impl.cpp:3600 if (hard_limit == -1)
- hard_limit = (std::numeric_limits<int>::max)();
- if (dht_limit == -1)
- dht_limit = (std::numeric_limits<int>::max)();
- if (lsd_limit == -1)
- lsd_limit = (std::numeric_limits<int>::max)();
- if (tracker_limit == -1)
- tracker_limit = (std::numeric_limits<int>::max)();
+ | ||
relevance 3 | ../src/session_impl.cpp:2028 | port map SSL udp socket here |
port map SSL udp socket here../src/session_impl.cpp:2028 udp::endpoint ssl_bind_if(m_listen_interface.address(), ssl_port);
- // deduct "force started" torrents from the hard_limit
- // we don't have explicit access to the number of force started torrents,
- // but we know how many started downloading and seeding torrents we have.
- // if we subtract all non-force started torrents from the total, we get
- // the number of force started.
- hard_limit -= m_stats_counters[counters::num_downloading_torrents] -
- downloaders.size();
- hard_limit -= m_stats_counters[counters::num_seeding_torrents]
- + m_stats_counters[counters::num_upload_only_torrents] -
- seeds.size();
-
-
- // if hard_limit is <= 0, all torrents in these lists should be paused.
- // The order is not relevant
- if (hard_limit > 0)
+ // if ssl port is 0, we don't want to listen on an SSL port
+ if (ssl_port != 0)
{
- // we only need to sort the first n torrents here, where n is the number
- // of checking torrents we allow. The rest of the list is still used to
- // make sure the remaining torrents are paused, but their order is not
- // relevant
- std::partial_sort(checking.begin(), checking.begin() +
- (std::min)(checking_limit, int(checking.size())), checking.end()
- , boost::bind(&torrent::sequence_number, _1) < boost::bind(&torrent::sequence_number, _2));
-
- std::partial_sort(downloaders.begin(), downloaders.begin() +
- (std::min)(hard_limit, int(downloaders.size())), downloaders.end()
- , boost::bind(&torrent::sequence_number, _1) < boost::bind(&torrent::sequence_number, _2));
-
- std::partial_sort(seeds.begin(), seeds.begin() +
- (std::min)(hard_limit, int(seeds.size())), seeds.end()
- , boost::bind(&torrent::seed_rank, _1, boost::ref(m_settings))
- > boost::bind(&torrent::seed_rank, _2, boost::ref(m_settings)));
- }
-
- auto_manage_checking_torrents(checking, checking_limit);
-
- if (settings().get_bool(settings_pack::auto_manage_prefer_seeds))
- {
- auto_manage_torrents(seeds, dht_limit, tracker_limit, lsd_limit
- , hard_limit, num_seeds);
- auto_manage_torrents(downloaders, dht_limit, tracker_limit, lsd_limit
- , hard_limit, num_downloaders);
- | ||
relevance 3 | ../src/settings_pack.cpp:62 | write a unit test for settings_pack |
write a unit test for settings_pack../src/settings_pack.cpp:62namespace {
-
- template <class T>
- bool compare_first(std::pair<boost::uint16_t, T> const& lhs
- , std::pair<boost::uint16_t, T> const& rhs)
- {
- return lhs.first < rhs.first;
- }
-
- template <class T>
- void insort_replace(std::vector<std::pair<boost::uint16_t, T> >& c, std::pair<boost::uint16_t, T> const& v)
- {
- typedef std::vector<std::pair<boost::uint16_t, T> > container_t;
- typename container_t::iterator i = std::lower_bound(c.begin(), c.end(), v
- , &compare_first<T>);
- if (i != c.end() && i->first == v.first) i->second = v.second;
- else c.insert(i, v);
- }
-}
-
-namespace libtorrent
- {
- struct str_setting_entry_t
- {
- // the name of this setting. used for serialization and deserialization
- char const* name;
- // if present, this function is called when the setting is changed
- void (aux::session_impl::*fun)();
- char const *default_value;
-#ifndef TORRENT_NO_DEPRECATE
- // offset into session_settings, used to map
- // settings to the deprecated settings struct
- int offset;
+ m_ssl_udp_socket.bind(ssl_bind_if, ec);
+ if (ec)
+ {
+#ifndef TORRENT_DISABLE_LOGGING
+ session_log("SSL: cannot bind to UDP interface \"%s\": %s"
+ , print_endpoint(m_listen_interface).c_str(), ec.message().c_str());
#endif
- };
+ if (m_alerts.should_post<listen_failed_alert>())
+ {
+ error_code err;
+ m_alerts.emplace_alert<listen_failed_alert>(print_endpoint(ssl_bind_if)
+ , listen_failed_alert::bind, ec, listen_failed_alert::utp_ssl);
+ }
+ ec.clear();
+ }
+ }
+ #endif // TORRENT_USE_OPENSSL
- struct int_setting_entry_t
- {
- // the name of this setting. used for serialization and deserialization
- char const* name;
- // if present, this function is called when the setting is changed
- void (aux::session_impl::*fun)();
- int default_value;
-#ifndef TORRENT_NO_DEPRECATE
- // offset into session_settings, used to map
- // settings to the deprecated settings struct
- int offset;
-#endif
- };
-
- struct bool_setting_entry_t
- | ||
relevance 3 | ../src/upnp.cpp:88 | listen_interface is not used. It's meant to bind the broadcast socket. it would probably have to be changed to a vector of interfaces to bind to though, since the broadcast socket opens one socket per local interface by default |
listen_interface is not used. It's meant to bind the broadcast
+ | ||
relevance 3 | ../src/upnp.cpp:94 | listen_interface is not used. It's meant to bind the broadcast socket. it would probably have to be changed to a vector of interfaces to bind to though, since the broadcast socket opens one socket per local interface by default |
listen_interface is not used. It's meant to bind the broadcast
socket. it would probably have to be changed to a vector of interfaces to
bind to though, since the broadcast socket opens one socket per local
-interface by default../src/upnp.cpp:88 , address const& listen_interface, std::string const& user_agent
- , portmap_callback_t const& cb, log_callback_t const& lcb
+interface by default../src/upnp.cpp:94 , portmap_callback_t const& cb, log_callback_t const& lcb
, bool ignore_nonrouters)
: m_user_agent(user_agent)
, m_callback(cb)
@@ -235,7 +108,8 @@ interface by default../src/upnp.cpp:88../src/upnp.cpp:88 TORRENT_UNUSED(listen_interface);
}
-void upnp::start(void* state)
+void upnp::start()
{
error_code ec;
m_socket.open(boost::bind(&upnp::on_reply, self(), _1, _2, _3)
, m_refresh_timer.get_io_service(), ec);
- if (state)
- {
- upnp_state_t* s = (upnp_state_t*)state;
- m_devices.swap(s->devices);
- m_mappings.swap(s->mappings);
- delete s;
- }
-
m_mappings.reserve(10);
}
-void* upnp::drain_state()
+upnp::~upnp()
{
- upnp_state_t* s = new upnp_state_t;
- s->mappings.swap(m_mappings);
-
- for (std::set<rootdevice>::iterator i = m_devices.begin()
- , end(m_devices.end()); i != end; ++i)
- i->upnp_connection.reset();
- s->devices.swap(m_devices);
- return s;
}
-
| ||
relevance 3 | ../src/kademlia/get_item.cpp:202 | it would be nice to not have to spend so much time rendering the bencoded dict if logging is disabled |
it would be nice to not have to spend so much time rendering
-the bencoded dict if logging is disabled../src/kademlia/get_item.cpp:202 bencode(std::back_inserter(buffer), m_data.value());
+
+void upnp::discover_device()
+{
+ mutex::scoped_lock l(m_mutex);
+ if (m_socket.num_send_sockets() == 0)
+ log("No network interfaces to broadcast to", l);
+
+ discover_device_impl(l);
+}
+
+void upnp::log(char const* msg, mutex::scoped_lock& l)
+{
+ l.unlock();
+ m_log_callback(msg);
+ l.lock();
+}
+ | ||
relevance 3 | ../src/kademlia/get_item.cpp:212 | it would be nice to not have to spend so much time rendering the bencoded dict if logging is disabled |
it would be nice to not have to spend so much time rendering
+the bencoded dict if logging is disabled../src/kademlia/get_item.cpp:212 bencode(std::back_inserter(buffer), m_data.value());
TORRENT_ASSERT(m_target == hasher(&buffer[0], buffer.size()).final());
}
#endif
@@ -300,7 +174,7 @@ void get_item::put(std::vector<std::pair<node_entry, std::string> >
#ifndef TORRENT_DISABLE_LOGGING
get_node().observer()->log(dht_logger::traversal, "[%p] sending put "
"[ seq: %" PRId64 " nodes: %d ]"
- , this, (m_data.is_mutable() ? m_data.seq() : -1)
+ , static_cast<void*>(this), (m_data.is_mutable() ? m_data.seq() : -1)
, int(v.size()));
#endif
@@ -314,13 +188,13 @@ void get_item::put(std::vector<std::pair<node_entry, std::string> >
{
#ifndef TORRENT_DISABLE_LOGGING
get_node().observer()->log(dht_logger::traversal, "[%p] put-distance: %d"
- , this, 160 - distance_exp(m_target, i->first.id));
+ , static_cast<void*>(this), 160 - distance_exp(m_target, i->first.id));
#endif
void* ptr = m_node.m_rpc.allocate_observer();
if (ptr == 0) return;
- | ||
relevance 3 | ../src/kademlia/get_item.cpp:226 | we don't support CAS errors here! we need a custom observer |
we don't support CAS errors here! we need a custom observer../src/kademlia/get_item.cpp:226 , this, (m_data.is_mutable() ? m_data.seq() : -1)
+ | ||
relevance 3 | ../src/kademlia/get_item.cpp:236 | we don't support CAS errors here! we need a custom observer |
we don't support CAS errors here! we need a custom observer../src/kademlia/get_item.cpp:236 , static_cast<void*>(this), (m_data.is_mutable() ? m_data.seq() : -1)
, int(v.size()));
#endif
@@ -334,7 +208,7 @@ void get_item::put(std::vector<std::pair<node_entry, std::string> >
{
#ifndef TORRENT_DISABLE_LOGGING
get_node().observer()->log(dht_logger::traversal, "[%p] put-distance: %d"
- , this, 160 - distance_exp(m_target, i->first.id));
+ , static_cast<void*>(this), 160 - distance_exp(m_target, i->first.id));
#endif
void* ptr = m_node.m_rpc.allocate_observer();
@@ -371,7 +245,7 @@ void get_item_observer::reply(msg const& m)
boost::uint64_t seq = 0;
bdecode_node r = m.message.dict_find_dict("r");
- | ||
relevance 3 | ../src/kademlia/rpc_manager.cpp:84 | move this into it's own .cpp file |
move this into it's own .cpp file../src/kademlia/rpc_manager.cpp:84
+ | ||
relevance 3 | ../src/kademlia/rpc_manager.cpp:84 | move this into it's own .cpp file |
move this into it's own .cpp file../src/kademlia/rpc_manager.cpp:84
void intrusive_ptr_add_ref(observer const* o)
{
TORRENT_ASSERT(o != 0);
@@ -422,8 +296,8 @@ address observer::target_addr() const
return address_v6(m_addr.v6);
else
#endif
- | ||
relevance 3 | ../src/kademlia/traversal_algorithm.cpp:376 | it would be nice to not have to perform this loop if logging is disabled |
it would be nice to not have to perform this loop if
-logging is disabled../src/kademlia/traversal_algorithm.cpp:376 ++m_timeouts;
+ | ||
relevance 3 | ../src/kademlia/traversal_algorithm.cpp:378 | it would be nice to not have to perform this loop if logging is disabled |
it would be nice to not have to perform this loop if
+logging is disabled../src/kademlia/traversal_algorithm.cpp:378 ++m_timeouts;
TORRENT_ASSERT(m_invoke_count > 0);
--m_invoke_count;
}
@@ -454,7 +328,7 @@ void traversal_algorithm::done()
to_hex(reinterpret_cast<char const*>(&o->id()[0]), 20, hex_id);
get_node().observer()->log(dht_logger::traversal
, "[%p] id: %s distance: %d addr: %s"
- , this, hex_id, closest_target
+ , static_cast<void*>(this), hex_id, closest_target
, print_endpoint(o->target_ep()).c_str());
--results_target;
@@ -467,91 +341,66 @@ void traversal_algorithm::done()
{
get_node().observer()->log(dht_logger::traversal
, "[%p] COMPLETED distance: %d type: %s"
- , this, closest_target, name());
+ , static_cast<void*>(this), closest_target, name());
}
#endif
// delete all our references to the observer objects so
// they will in turn release the traversal algorithm
m_results.clear();
}
- | ||
relevance 3 | ../include/libtorrent/peer_connection.hpp:1245 | factor this out into its own header and use it for UDP socket and maybe second_timer as well |
factor this out into its own header and use it for UDP socket
-and maybe second_timer as well../include/libtorrent/peer_connection.hpp:1245 bool m_need_interest_update:1;
+ | ||
relevance 3 | ../include/libtorrent/peer_connection.hpp:988 | use handler storage for second_tick and udp_packet handler too |
use handler storage for second_tick and udp_packet handler too../include/libtorrent/peer_connection.hpp:988 // this is the number of bytes we had uploaded the
+ // last time this peer was unchoked. This does not
+ // reset each unchoke interval/round. This is used to
+ // track upload across rounds, for the full duration of
+ // the peer being unchoked. Specifically, it's used
+ // for the round-robin unchoke algorithm.
+ boost::int64_t m_uploaded_at_last_unchoke;
- // set to true if this peer has metadata, and false
- // otherwise.
- bool m_has_metadata:1;
+ // the number of payload bytes downloaded last second tick
+ boost::int32_t m_downloaded_last_second;
- // this is set to true if this peer was accepted exceeding
- // the connection limit. It means it has to disconnect
- // itself, or some other peer, as soon as it's completed
- // the handshake. We need to wait for the handshake in
- // order to know which torrent it belongs to, to know which
- // other peers to compare it to.
- bool m_exceeded_limit:1;
+ // the number of payload bytes uploaded last second tick
+ boost::int32_t m_uploaded_last_second;
- // this is slow-start at the bittorrent layer. It affects how we increase
- // desired queue size (i.e. the number of outstanding requests we keep).
- // While the underlying transport protocol is in slow-start, the number of
- // outstanding requests need to increase at the same pace to keep up.
- bool m_slow_start:1;
+ // the number of bytes that the other
+ // end has to send us in order to respond
+ // to all outstanding piece requests we
+ // have sent to it
+ int m_outstanding_bytes;
- template <class Handler, std::size_t Size>
- struct allocating_handler
- {
- | ||
relevance 3 | ../include/libtorrent/peer_connection.hpp:1250 | if move semantics is supported, move the handler into this wrapper |
if move semantics is supported, move the handler into this
-wrapper../include/libtorrent/peer_connection.hpp:1250 // otherwise.
- bool m_has_metadata:1;
+ aux::handler_storage<TORRENT_READ_HANDLER_MAX_SIZE> m_read_handler_storage;
+ aux::handler_storage<TORRENT_WRITE_HANDLER_MAX_SIZE> m_write_handler_storage;
- // this is set to true if this peer was accepted exceeding
- // the connection limit. It means it has to disconnect
- // itself, or some other peer, as soon as it's completed
- // the handshake. We need to wait for the handshake in
- // order to know which torrent it belongs to, to know which
- // other peers to compare it to.
- bool m_exceeded_limit:1;
+ // we have suggested these pieces to the peer
+ // don't suggest it again
+ bitfield m_sent_suggested_pieces;
- // this is slow-start at the bittorrent layer. It affects how we increase
- // desired queue size (i.e. the number of outstanding requests we keep).
- // While the underlying transport protocol is in slow-start, the number of
- // outstanding requests need to increase at the same pace to keep up.
- bool m_slow_start:1;
+ // the pieces we will send to the peer
+ // if requested (regardless of choke state)
+ std::vector<int> m_accept_fast;
- template <class Handler, std::size_t Size>
- struct allocating_handler
- {
- allocating_handler(
- Handler const& h, handler_storage<Size>& s)
- : handler(h)
- , storage(s)
- {}
+ // a sent-piece counter for the allowed fast set
+ // to avoid exploitation. Each slot is a counter
+ // for one of the pieces from the allowed-fast set
+ std::vector<boost::uint16_t> m_accept_fast_piece_cnt;
-#ifndef BOOST_NO_CXX11_VARIADIC_TEMPLATES
- template <class... A>
- void operator()(A&&... a) const
- {
- handler(std::forward<A>(a)...);
- }
-#else
- template <class A0>
- void operator()(A0 const& a0) const
- {
- handler(a0);
- }
+ // the pieces the peer will send us if
+ // requested (regardless of choke state)
+ std::vector<int> m_allowed_fast;
- template <class A0, class A1>
- void operator()(A0 const& a0, A1 const& a1) const
- {
- handler(a0, a1);
- }
+ // pieces that has been suggested to be
+ // downloaded from this peer
+ std::vector<int> m_suggested_pieces;
- template <class A0, class A1, class A2>
- void operator()(A0 const& a0, A1 const& a1, A2 const& a2) const
- {
- handler(a0, a1, a2);
- }
-#endif
- | ||
relevance 3 | ../include/libtorrent/torrent.hpp:1365 | factor out the links (as well as update_list() to a separate class that torrent can inherit) |
factor out the links (as well as update_list() to a separate
-class that torrent can inherit)../include/libtorrent/torrent.hpp:1365
+ // the time when this peer last saw a complete copy
+ // of this torrent
+ time_t m_last_seen_complete;
+
+ // the block we're currently receiving. Or
+ // (-1, -1) if we're not receiving one
+ piece_block m_receiving_block;
+ | ||
relevance 3 | ../include/libtorrent/torrent.hpp:1347 | factor out the links (as well as update_list() to a separate class that torrent can inherit) |
factor out the links (as well as update_list() to a separate
+class that torrent can inherit)../include/libtorrent/torrent.hpp:1347
// this was the last time _we_ saw a seed in this swarm
time_t m_last_seen_complete;
@@ -602,7 +451,160 @@ class that torrent can inherit)../include/libtorrent/torrent.hpp:1365
// when checking, this is the first piece we have not
// issued a hash job for
- | ||
relevance 2 | ../test/test_piece_picker.cpp:277 | split this up into smaller tests (where we print_title) |
split this up into smaller tests (where we print_title)../test/test_piece_picker.cpp:277 std::vector<piece_block> picked;
+ | ||
relevance 3 | ../include/libtorrent/aux_/allocating_handler.hpp:77 | make sure the handlers we pass in are potentially movable! |
make sure the handlers we pass in are potentially movable!../include/libtorrent/aux_/allocating_handler.hpp:77 : used(false)
+ {}
+
+ bool used;
+#else
+ handler_storage() {}
+#endif
+ boost::aligned_storage<Size> bytes;
+ private:
+ handler_storage(handler_storage const&);
+ };
+
+ // this class is a wrapper for an asio handler object. Its main purpose
+ // is to pass along additional parameters to the asio handler allocator
+ // function, as well as providing a distinct type for the handler
+ // allocator function to overload on
+ template <class Handler, std::size_t Size>
+ struct allocating_handler
+ {
+
+#if !defined BOOST_NO_CXX11_RVALUE_REFERENCES
+ allocating_handler(
+ Handler&& h, handler_storage<Size>& s)
+ : handler(std::move(h))
+ , storage(s)
+ {}
+#endif
+
+ allocating_handler(
+ Handler const& h, handler_storage<Size>& s)
+ : handler(h)
+ , storage(s)
+ {}
+
+#if !defined BOOST_NO_CXX11_VARIADIC_TEMPLATES \
+ && !defined BOOST_NO_CXX11_RVALUE_REFERENCES
+ template <class... A>
+ void operator()(A&&... a) const
+ {
+ handler(std::forward<A>(a)...);
+ }
+#else
+ template <class A0>
+ void operator()(A0 const& a0) const
+ {
+ handler(a0);
+ }
+
+ template <class A0, class A1>
+ void operator()(A0 const& a0, A1 const& a1) const
+ {
+ | ||
relevance 2 | ../test/test_dht.cpp:540 | split this test up into smaller test cases |
split this test up into smaller test cases../test/test_dht.cpp:540
+dht_settings test_settings()
+{
+ dht_settings sett;
+ sett.max_torrents = 4;
+ sett.max_dht_items = 4;
+ sett.enforce_node_id = false;
+ return sett;
+}
+
+entry test_args(sha1_hash const* nid = NULL)
+{
+ entry a;
+
+ if (nid == NULL) a["id"] = generate_next().to_string();
+ else a["id"] = nid->to_string();
+
+ return a;
+}
+
+TORRENT_TEST(dht)
+ {
+ dht_settings sett = test_settings();
+ mock_socket s;
+ obs observer;
+ counters cnt;
+ dht::node node(&s, sett, node_id(0), &observer, cnt);
+
+ // DHT should be running on port 48199 now
+ bdecode_node response;
+ bdecode_node parsed[11];
+ char error_string[200];
+ bool ret;
+
+ // ====== ping ======
+ udp::endpoint source(address::from_string("10.0.0.1"), 20);
+ send_dht_request(node, "ping", source, &response, "10");
+
+ dht::key_desc_t pong_desc[] = {
+ {"y", bdecode_node::string_t, 1, 0},
+ {"t", bdecode_node::string_t, 2, 0},
+ {"r", bdecode_node::dict_t, 0, key_desc_t::parse_children},
+ {"id", bdecode_node::string_t, 20, key_desc_t::last_child},
+ };
+
+ fprintf(stderr, "msg: %s\n", print_entry(response).c_str());
+ ret = dht::verify_message(response, pong_desc, parsed, 4, error_string
+ , sizeof(error_string));
+ TEST_CHECK(ret);
+ if (ret)
+ {
+ | ||
relevance 2 | ../test/test_dht.cpp:1996 | split this up into smaller test cases |
split this up into smaller test cases../test/test_dht.cpp:1996
+ TEST_EQUAL(to_hex(std::string(signature, 64))
+ , "6834284b6b24c3204eb2fea824d82f88883a3d95e8b4a21b8c0ded553d17d17d"
+ "df9a8a7104b1258f30bed3787e6cb896fca78c58f8e03b5f18f14951a87d9a08");
+
+ sha1_hash target_id = item_target_id(test_salt, public_key);
+ TEST_EQUAL(to_hex(target_id.to_string()), "411eba73b6f087ca51a3795d9c8c938d365e32c1");
+}
+
+TORRENT_TEST(signing_test3)
+{
+ // test vector 3
+
+ // test content
+ std::pair<char const*, int> test_content("12:Hello World!", 15);
+
+ sha1_hash target_id = item_target_id(test_content);
+ TEST_EQUAL(to_hex(target_id.to_string()), "e5f96f6f38320f0f33959cb4d3d656452117aadb");
+}
+
+TORRENT_TEST(verify_message)
+ {
+ char error_string[200];
+
+ // test verify_message
+ static const key_desc_t msg_desc[] = {
+ {"A", bdecode_node::string_t, 4, 0},
+ {"B", bdecode_node::dict_t, 0, key_desc_t::optional | key_desc_t::parse_children},
+ {"B1", bdecode_node::string_t, 0, 0},
+ {"B2", bdecode_node::string_t, 0, key_desc_t::last_child},
+ {"C", bdecode_node::dict_t, 0, key_desc_t::optional | key_desc_t::parse_children},
+ {"C1", bdecode_node::string_t, 0, 0},
+ {"C2", bdecode_node::string_t, 0, key_desc_t::last_child},
+ };
+
+ bdecode_node msg_keys[7];
+
+ bdecode_node ent;
+
+ error_code ec;
+ char const test_msg[] = "d1:A4:test1:Bd2:B15:test22:B25:test3ee";
+ bdecode(test_msg, test_msg + sizeof(test_msg)-1, ent, ec);
+ fprintf(stderr, "%s\n", print_entry(ent).c_str());
+
+ bool ret = verify_message(ent, msg_desc, msg_keys, 7, error_string
+ , sizeof(error_string));
+ TEST_CHECK(ret);
+ TEST_CHECK(msg_keys[0]);
+ if (msg_keys[0]) TEST_EQUAL(msg_keys[0].string_value(), "test");
+ TEST_CHECK(msg_keys[1]);
+ TEST_CHECK(msg_keys[2]);
+ | ||
relevance 2 | ../test/test_piece_picker.cpp:281 | split this up into smaller tests (where we print_title) |
split this up into smaller tests (where we print_title)../test/test_piece_picker.cpp:281 std::vector<piece_block> picked;
counters pc;
p->pick_pieces(string2vec(availability), picked
, num_blocks, prefer_contiguous_blocks, peer_struct
@@ -653,8 +655,8 @@ int test_pick(boost::shared_ptr<piece_picker> const& p
TEST_CHECK(!(piece_block(0, 0) != piece_block(0, 0)));
TEST_CHECK(!(piece_block(0, 0) == piece_block(0, 1)));
- | ||
relevance 2 | ../src/alert.cpp:1444 | the salt here is allocated on the heap. It would be nice to allocate in in the stack_allocator |
the salt here is allocated on the heap. It would be nice to
-allocate in in the stack_allocator../src/alert.cpp:1444 , operation_names[op]
+ | ||
relevance 2 | ../src/alert.cpp:1451 | the salt here is allocated on the heap. It would be nice to allocate in in the stack_allocator |
the salt here is allocated on the heap. It would be nice to
+allocate in in the stack_allocator../src/alert.cpp:1451 , operation_names[op]
, error.value()
, convert_from_native(error.message()).c_str());
return msg;
@@ -679,17 +681,19 @@ allocate in in the stack_allocator../src/alert.cpp:1444../src/alert.cpp:1444
| ||
relevance 2 | ../src/alert_manager.cpp:90 | keep a count of the number of threads waiting. Only if it's > 0 notify them |
keep a count of the number of threads waiting. Only if it's
> 0 notify them../src/alert_manager.cpp:90
return NULL;
@@ -754,13 +756,13 @@ allocate in in the stack_allocator../src/alert.cpp:1444 | ||
relevance 2 | ../src/block_cache.cpp:1691 | turn these return values into enums returns -1: block not in cache -2: out of memory |
turn these return values into enums
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
+ | ||
relevance 2 | ../src/block_cache.cpp:1723 | turn these return values into enums returns -1: block not in cache -2: out of memory |
turn these return values into enums
returns
-1: block not in cache
--2: out of memory../src/block_cache.cpp:1691 {
+-2: out of memory../src/block_cache.cpp:1723 {
TORRENT_PIECE_ASSERT(!p.blocks[k].dirty, &p);
TORRENT_PIECE_ASSERT(!p.blocks[k].pending, &p);
TORRENT_PIECE_ASSERT(p.blocks[k].refcount == 0, &p);
@@ -814,7 +816,7 @@ returns
| ||
relevance 2 | ../src/escape_string.cpp:209 | this should probably be moved into string_util.cpp |
this should probably be moved into string_util.cpp../src/escape_string.cpp:209 }
return false;
}
-
+
void convert_path_to_posix(std::string& path)
{
for (std::string::iterator i = path.begin()
@@ -862,9 +864,9 @@ returns
snprintf(msg, sizeof(msg), "%s://%s%s%s%s%s%s", protocol.c_str(), auth.c_str()
, auth.empty()?"":"@", host.c_str()
, port == -1 ? "" : ":"
- | ||
relevance 2 | ../src/file.cpp:508 | test this on a FAT volume to see what error we get! |
test this on a FAT volume to see what error we get!../src/file.cpp:508 // if the filesystem does not support it.
- ec.assign(GetLastError(), system_category());
- return;
+ | ||
relevance 2 | ../src/file.cpp:516 | test this on a FAT volume to see what error we get! |
test this on a FAT volume to see what error we get!../src/file.cpp:516 }
+
+ // fall back to making a copy
#else
@@ -888,6 +890,9 @@ returns
ec.assign(errno, generic_category());
return;
}
+
+ // fall back to making a copy
+
#endif
// if we get here, we should copy the file
@@ -910,17 +915,14 @@ returns
TORRENT_ASSERT(!ec);
if (is_directory(old_path, ec))
{
- create_directory(new_path, ec);
- if (ec) return;
- for (directory i(old_path, ec); !i.done(); i.next(ec))
- | ||
relevance 2 | ../src/http_tracker_connection.cpp:383 | returning a bool here is redundant. Instead this function should return the peer_entry |
returning a bool here is redundant. Instead this function should
-return the peer_entry../src/http_tracker_connection.cpp:383 {
+ | ||
relevance 2 | ../src/http_tracker_connection.cpp:384 | returning a bool here is redundant. Instead this function should return the peer_entry |
returning a bool here is redundant. Instead this function should
+return the peer_entry../src/http_tracker_connection.cpp:384 {
std::list<address> ip_list;
if (m_tracker_connection)
{
- error_code ec;
+ error_code ignore;
ip_list.push_back(
- m_tracker_connection->socket().remote_endpoint(ec).address());
+ m_tracker_connection->socket().remote_endpoint(ignore).address());
std::vector<tcp::endpoint> const& epts = m_tracker_connection->endpoints();
for (std::vector<tcp::endpoint>::const_iterator i = epts.begin()
, end(epts.end()); i != end; ++i)
@@ -965,10 +967,61 @@ return the peer_entry../src/http_tracker_connection.cpp:383 | ||
relevance 2 | ../src/peer_connection.cpp:2340 | this should probably be based on time instead of number of request messages. For a very high throughput connection, 300 may be a legitimate number of requests to have in flight when getting choked |
this should probably be based on time instead of number
+ | ||
relevance 2 | ../src/instantiate_connection.cpp:43 | peer_connection and tracker_connection should probably be flags |
peer_connection and tracker_connection should probably be flags../src/instantiate_connection.cpp:43 | ||
relevance 2 | ../src/instantiate_connection.cpp:44 | move this function into libtorrent::aux namespace |
move this function into libtorrent::aux namespace../src/instantiate_connection.cpp:44LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+*/
+
+#include "libtorrent/socket.hpp"
+#include "libtorrent/session_settings.hpp"
+#include "libtorrent/socket_type.hpp"
+#include "libtorrent/utp_socket_manager.hpp"
+#include "libtorrent/instantiate_connection.hpp"
+#include <boost/shared_ptr.hpp>
+#include <stdexcept>
+
+namespace libtorrent
+{
+ bool instantiate_connection(io_service& ios
+ , aux::proxy_settings const& ps, socket_type& s
+ , void* ssl_context
+ , utp_socket_manager* sm
+ , bool peer_connection
+ , bool tracker_connection)
+ {
+#ifndef TORRENT_USE_OPENSSL
+ TORRENT_UNUSED(ssl_context);
+#endif
+
+ if (sm)
+ {
+ utp_stream* str;
+#ifdef TORRENT_USE_OPENSSL
+ if (ssl_context)
+ {
+ s.instantiate<ssl_stream<utp_stream> >(ios, ssl_context);
+ str = &s.get<ssl_stream<utp_stream> >()->next_layer();
+ }
+ else
+#endif
+ {
+ s.instantiate<utp_stream>(ios);
+ str = s.get<utp_stream>();
+ }
+ str->set_impl(sm->new_utp_socket(str));
+ }
+#if TORRENT_USE_I2P
+ else if (ps.type == settings_pack::i2p_proxy)
+ {
+ | ||
relevance 2 | ../src/peer_connection.cpp:2367 | this should probably be based on time instead of number of request messages. For a very high throughput connection, 300 may be a legitimate number of requests to have in flight when getting choked |
this should probably be based on time instead of number
of request messages. For a very high throughput connection, 300
may be a legitimate number of requests to have in flight when
-getting choked../src/peer_connection.cpp:2340 , "piece: %d s: %d l: %d invalid request"
+getting choked../src/peer_connection.cpp:2367 , "piece: %d s: %d l: %d invalid request"
, r.piece , r.start , r.length);
#endif
@@ -1019,11 +1072,11 @@ getting choked../src/peer_connection.cpp:2340relevance 2 | ../src/peer_connection.cpp:3051 | since we throw away the queue entry once we issue the disk job, this may happen. Instead, we should keep the queue entry around, mark it as having been requested from disk and once the disk job comes back, discard it if it has been cancelled. Maybe even be able to cancel disk jobs? |
|
since we throw away the queue entry once we issue
+ | ||
relevance 2 | ../src/peer_connection.cpp:3081 | since we throw away the queue entry once we issue the disk job, this may happen. Instead, we should keep the queue entry around, mark it as having been requested from disk and once the disk job comes back, discard it if it has been cancelled. Maybe even be able to cancel disk jobs? |
since we throw away the queue entry once we issue
the disk job, this may happen. Instead, we should keep the
queue entry around, mark it as having been requested from
disk and once the disk job comes back, discard it if it has
-been cancelled. Maybe even be able to cancel disk jobs?../src/peer_connection.cpp:3051
+been cancelled. Maybe even be able to cancel disk jobs?../src/peer_connection.cpp:3081
std::vector<peer_request>::iterator i
= std::find(m_requests.begin(), m_requests.end(), r);
@@ -1064,6 +1117,8 @@ been cancelled. Maybe even be able to cancel disk jobs?../src/peer_conn
#ifndef TORRENT_DISABLE_DHT
m_ses.add_dht_node(udp::endpoint(
m_remote.address(), listen_port));
+#else
+ TORRENT_UNUSED(listen_port);
#endif
}
@@ -1072,12 +1127,10 @@ been cancelled. Maybe even be able to cancel disk jobs?../src/peer_conn
// -----------------------------
void peer_connection::incoming_have_all()
- {
- TORRENT_ASSERT(is_single_thread());
- | ||
relevance 2 | ../src/peer_connection.cpp:4709 | use a deadline_timer for timeouts. Don't rely on second_tick()! Hook this up to connect timeout as well. This would improve performance because of less work in second_tick(), and might let use remove ticking entirely eventually |
use a deadline_timer for timeouts. Don't rely on second_tick()!
+ | ||
relevance 2 | ../src/peer_connection.cpp:4762 | use a deadline_timer for timeouts. Don't rely on second_tick()! Hook this up to connect timeout as well. This would improve performance because of less work in second_tick(), and might let use remove ticking entirely eventually |
use a deadline_timer for timeouts. Don't rely on second_tick()!
Hook this up to connect timeout as well. This would improve performance
because of less work in second_tick(), and might let use remove ticking
-entirely eventually../src/peer_connection.cpp:4709 connect_timeout += 20;
+entirely eventually../src/peer_connection.cpp:4762 connect_timeout += 20;
#endif
if (d > seconds(connect_timeout)
@@ -1128,7 +1181,7 @@ entirely eventually../src/peer_connection.cpp:4709relevance 2 | ../src/peer_list.cpp:495 | it would be nice if there was a way to iterate over these torrent_peer objects in the order they are allocated in the pool instead. It would probably be more efficient |
|
it would be nice if there was a way to iterate over these
+ | ||
relevance 2 | ../src/peer_list.cpp:495 | it would be nice if there was a way to iterate over these torrent_peer objects in the order they are allocated in the pool instead. It would probably be more efficient |
it would be nice if there was a way to iterate over these
torrent_peer objects in the order they are allocated in the pool
instead. It would probably be more efficient../src/peer_list.cpp:495 , int session_time, torrent_state* state)
{
@@ -1181,17 +1234,17 @@ instead. It would probably be more efficient../src/peer_list.cpp:495 | ||
relevance 2 | ../src/piece_picker.cpp:1974 | make the 2048 limit configurable |
make the 2048 limit configurable../src/piece_picker.cpp:1974 // only one of rarest_first or sequential can be set
+ | ||
relevance 2 | ../src/piece_picker.cpp:1978 | make the 2048 limit configurable |
make the 2048 limit configurable../src/piece_picker.cpp:1978 // only one of rarest_first or sequential can be set
void piece_picker::pick_pieces(bitfield const& pieces
, std::vector<piece_block>& interesting_blocks, int num_blocks
- , int prefer_contiguous_blocks, void* peer
+ , int prefer_contiguous_blocks, torrent_peer* peer
, int options, std::vector<int> const& suggested_pieces
, int num_peers
, counters& pc
) const
{
- TORRENT_ASSERT(peer == 0 || static_cast<torrent_peer*>(peer)->in_use);
+ TORRENT_ASSERT(peer == 0 || peer->in_use);
// prevent the number of partial pieces to grow indefinitely
// make this scale by the number of peers we have. For large
@@ -1232,15 +1285,15 @@ instead. It would probably be more efficient../src/peer_list.cpp:495 | ||
relevance 2 | ../src/piece_picker.cpp:2583 | the first_block returned here is the largest free range, not the first-fit range, which would be better |
the first_block returned here is the largest free range, not
-the first-fit range, which would be better../src/piece_picker.cpp:2583 , end(m_block_info.end()); i != end; ++i)
+ | ||
relevance 2 | ../src/piece_picker.cpp:2588 | the first_block returned here is the largest free range, not the first-fit range, which would be better |
the first_block returned here is the largest free range, not
+the first-fit range, which would be better../src/piece_picker.cpp:2588 , end(m_block_info.end()); i != end; ++i)
{
TORRENT_ASSERT(i->peer == 0 || static_cast<torrent_peer*>(i->peer)->in_use);
}
}
#endif
- void piece_picker::clear_peer(void* peer)
+ void piece_picker::clear_peer(torrent_peer* peer)
{
for (std::vector<block_info>::iterator i = m_block_info.begin()
, end(m_block_info.end()); i != end; ++i)
@@ -1255,7 +1308,7 @@ the first-fit range, which would be better../src/piece_picker.cpp:2583<
// and downloading blocks from this piece. Active means having a connection.
boost::tuple<bool, bool, int, int> piece_picker::requested_from(
piece_picker::downloading_piece const& p
- , int num_blocks_in_piece, void* peer) const
+ , int num_blocks_in_piece, torrent_peer* peer) const
{
bool exclusive = true;
bool exclusive_active = true;
@@ -1284,11 +1337,11 @@ the first-fit range, which would be better../src/piece_picker.cpp:2583<
exclusive = false;
if (info.state == piece_picker::block_info::state_requested
&& info.peer != 0)
- | ||
relevance 2 | ../src/piece_picker.cpp:3367 | it would be nice if this could be folded into lock_piece() the main distinction is that this also maintains the m_num_passed counter and the passed_hash_check member Is there ever a case where we call write filed without also locking the piece? Perhaps write_failed() should imply locking it. |
it would be nice if this could be folded into lock_piece()
+ | ||
relevance 2 | ../src/piece_picker.cpp:3369 | it would be nice if this could be folded into lock_piece() the main distinction is that this also maintains the m_num_passed counter and the passed_hash_check member Is there ever a case where we call write filed without also locking the piece? Perhaps write_failed() should imply locking it. |
it would be nice if this could be folded into lock_piece()
the main distinction is that this also maintains the m_num_passed
counter and the passed_hash_check member
Is there ever a case where we call write filed without also locking
-the piece? Perhaps write_failed() should imply locking it.../src/piece_picker.cpp:3367 int state = m_piece_map[piece].download_queue();
+the piece? Perhaps write_failed() should imply locking it.../src/piece_picker.cpp:3369 int state = m_piece_map[piece].download_queue();
if (state == piece_pos::piece_open) return;
std::vector<downloading_piece>::iterator i = find_dl_piece(state, piece);
if (i == m_downloads[state].end()) return;
@@ -1339,59 +1392,8 @@ the piece? Perhaps write_failed() should imply locking it.../src/piece_
if (info.state == block_info::state_finished) return;
if (info.state == block_info::state_writing) --i->writing;
- | ||
relevance 2 | ../src/session_impl.cpp:218 | find a better place for this function |
find a better place for this function../src/session_impl.cpp:218 *j.vec, j.peer->make_write_handler(boost::bind(
- &peer_connection::on_send_data, j.peer, _1, _2)));
- }
- else
- {
- if (j.recv_buf)
- {
- j.peer->get_socket()->async_read_some(boost::asio::buffer(j.recv_buf, j.buf_size)
- , j.peer->make_read_handler(boost::bind(
- &peer_connection::on_receive_data, j.peer, _1, _2)));
- }
- else
- {
- j.peer->get_socket()->async_read_some(j.read_vec
- , j.peer->make_read_handler(boost::bind(
- &peer_connection::on_receive_data, j.peer, _1, _2)));
- }
- }
-}
-
-proxy_settings::proxy_settings(settings_pack const& sett)
- {
- hostname = sett.get_str(settings_pack::proxy_hostname);
- username = sett.get_str(settings_pack::proxy_username);
- password = sett.get_str(settings_pack::proxy_password);
- type = sett.get_int(settings_pack::proxy_type);
- port = sett.get_int(settings_pack::proxy_port);
- proxy_hostnames = sett.get_bool(settings_pack::proxy_hostnames);
- proxy_peer_connections = sett.get_bool(
- settings_pack::proxy_peer_connections);
-}
-
-proxy_settings::proxy_settings(aux::session_settings const& sett)
-{
- hostname = sett.get_str(settings_pack::proxy_hostname);
- username = sett.get_str(settings_pack::proxy_username);
- password = sett.get_str(settings_pack::proxy_password);
- type = sett.get_int(settings_pack::proxy_type);
- port = sett.get_int(settings_pack::proxy_port);
- proxy_hostnames = sett.get_bool(settings_pack::proxy_hostnames);
- proxy_peer_connections = sett.get_bool(
- settings_pack::proxy_peer_connections);
-}
-
-namespace aux {
-
- void session_impl::init_peer_class_filter(bool unlimited_local)
- {
- // set the default peer_class_filter to use the local peer class
- // for peers on local networks
- boost::uint32_t lfilter = 1 << m_local_peer_class;
- | ||
relevance 2 | ../src/session_impl.cpp:464 | is there a reason not to move all of this into init()? and just post it to the io_service? |
is there a reason not to move all of this into init()? and just
-post it to the io_service?../src/session_impl.cpp:464 m_posting_torrent_updates = false;
+ | ||
relevance 2 | ../src/session_impl.cpp:457 | is there a reason not to move all of this into init()? and just post it to the io_service? |
is there a reason not to move all of this into init()? and just
+post it to the io_service?../src/session_impl.cpp:457 m_posting_torrent_updates = false;
#endif
m_udp_socket.set_rate_limit(m_settings.get_int(settings_pack::dht_upload_rate_limit));
@@ -1424,8 +1426,9 @@ post it to the io_service?../src/session_impl.cpp:464 | ||
relevance 2 | ../src/session_impl.cpp:835 | if the DHT is enabled, it should probably be restarted here. maybe it should even be deferred to not be started until the client has had a chance to pass in the dht state |
if the DHT is enabled, it should probably be restarted here.
-maybe it should even be deferred to not be started until the client
-has had a chance to pass in the dht state../src/session_impl.cpp:835 if (val) m_settings.set_int(settings_pack::allowed_enc_level, val.int_value());
- }
-#endif
-
- settings = e->dict_find_dict("settings");
- if (settings)
- {
- boost::shared_ptr<settings_pack> pack = load_pack_from_dict(settings);
- apply_settings_pack(pack);
- }
-
- // in case we just set a socks proxy, we might have to
- // open the socks incoming connection
- if (!m_socks_listen_socket) open_new_incoming_socks_connection();
- m_udp_socket.set_proxy_settings(proxy());
-
-#ifndef TORRENT_DISABLE_DHT
- settings = e->dict_find_dict("dht state");
- if (settings)
- {
- m_dht_state = settings;
- }
-#endif
-
-#ifndef TORRENT_NO_DEPRECATE
- settings = e->dict_find_list("feeds");
- if (settings)
- {
- m_feeds.reserve(settings.list_size());
- for (int i = 0; i < settings.list_size(); ++i)
- {
- if (settings.list_at(i).type() != bdecode_node::dict_t) continue;
- boost::shared_ptr<feed> f(new_feed(*this, feed_settings()));
- f->load_state(settings.list_at(i));
- f->update_feed();
- m_feeds.push_back(f);
- }
- update_rss_feeds();
- }
-#endif
-
-#ifndef TORRENT_DISABLE_EXTENSIONS
- for (ses_extension_list_t::iterator i = m_ses_extensions.begin()
- , end(m_ses_extensions.end()); i != end; ++i)
- {
- TORRENT_TRY {
- (*i)->load_state(*e);
- } TORRENT_CATCH(std::exception&) {}
- }
-#endif
- }
- | ||
relevance 2 | ../src/session_impl.cpp:1884 | the udp socket(s) should be using the same generic mechanism and not be restricted to a single one we should open a one listen socket for each entry in the listen_interfaces list |
the udp socket(s) should be using the same generic
+ | ||
relevance 2 | ../src/session_impl.cpp:1923 | the udp socket(s) should be using the same generic mechanism and not be restricted to a single one we should open a one listen socket for each entry in the listen_interfaces list |
the udp socket(s) should be using the same generic
mechanism and not be restricted to a single one
we should open a one listen socket for each entry in the
-listen_interfaces list../src/session_impl.cpp:1884 }
+listen_interfaces list../src/session_impl.cpp:1923 }
#endif // TORRENT_USE_OPENSSL
}
#endif // TORRENT_USE_IPV6
@@ -1549,7 +1498,8 @@ listen_interfaces list../src/session_impl.cpp:1884relevance 2 | ../src/session_impl.cpp:1976 | use bind_to_device in udp_socket |
|
use bind_to_device in udp_socket../src/session_impl.cpp:1976 if (listen_port_retries > 0)
+ | ||
relevance 2 | ../src/session_impl.cpp:2012 | use bind_to_device in udp_socket |
use bind_to_device in udp_socket../src/session_impl.cpp:2012#endif
+ if (listen_port_retries > 0)
{
m_listen_interface.port(m_listen_interface.port() + 1);
--listen_port_retries;
@@ -1563,12 +1513,11 @@ listen_interfaces list../src/session_impl.cpp:1884 m_ssl_udp_socket.bind(ssl_bind_if, ec);
if (ec)
{
@@ -1584,17 +1533,14 @@ listen_interfaces list../src/session_impl.cpp:1884relevance 2 | ../src/session_impl.cpp:2032 | use bind_to_device in udp_socket |
|
use bind_to_device in udp_socket../src/session_impl.cpp:2032 if (ssl_port != 0)
+ {
+ m_ssl_udp_socket.bind(ssl_bind_if, ec);
+ if (ec)
{
- if (m_alerts.should_post<listen_succeeded_alert>())
- m_alerts.emplace_alert<listen_succeeded_alert>(
- tcp::endpoint(ssl_bind_if.address(), ssl_bind_if.port())
- , listen_succeeded_alert::utp_ssl);
- }
- }
-#endif // TORRENT_USE_OPENSSL
-
- | ||
relevance 2 | ../src/session_impl.cpp:2002 | use bind_to_device in udp_socket |
use bind_to_device in udp_socket../src/session_impl.cpp:2002 , print_endpoint(m_listen_interface).c_str(), ec.message().c_str());
+#ifndef TORRENT_DISABLE_LOGGING
+ session_log("SSL: cannot bind to UDP interface \"%s\": %s"
+ , print_endpoint(m_listen_interface).c_str(), ec.message().c_str());
#endif
if (m_alerts.should_post<listen_failed_alert>())
{
@@ -1604,18 +1550,12 @@ listen_interfaces list../src/session_impl.cpp:1884 m_udp_socket.bind(udp::endpoint(m_listen_interface.address(), m_listen_interface.port()), ec);
- if (ec)
+ m_udp_socket.bind(udp::endpoint(m_listen_interface.address()
+ , m_listen_interface.port()), ec);
+ if (ec)
{
#ifndef TORRENT_DISABLE_LOGGING
session_log("cannot bind to UDP interface \"%s\": %s"
@@ -1640,13 +1580,12 @@ listen_interfaces list../src/session_impl.cpp:1884relevance 2 | ../src/session_impl.cpp:3446 | make a list for torrents that want to be announced on the DHT so we don't have to loop over all torrents, just to find the ones that want to announce |
|
make a list for torrents that want to be announced on the DHT so we
-don't have to loop over all torrents, just to find the ones that want to announce../src/session_impl.cpp:3446 if (!m_dht_torrents.empty())
+ // we made it! now post all the listen_succeeded_alerts
+
+ | ||
relevance 2 | ../src/session_impl.cpp:3511 | make a list for torrents that want to be announced on the DHT so we don't have to loop over all torrents, just to find the ones that want to announce |
make a list for torrents that want to be announced on the DHT so we
+don't have to loop over all torrents, just to find the ones that want to announce../src/session_impl.cpp:3511 if (!m_dht_torrents.empty())
{
boost::shared_ptr<torrent> t;
do
@@ -1697,9 +1636,9 @@ don't have to loop over all torrents, just to find the ones that want to announc
if (m_torrents.empty()) return;
if (m_next_lsd_torrent == m_torrents.end())
- | ||
relevance 2 | ../src/storage.cpp:921 | is this risky? The upper layer will assume we have the whole file. Perhaps we should verify that at least the size of the file is correct |
is this risky? The upper layer will assume we have the
+ | ||
relevance 2 | ../src/storage.cpp:941 | is this risky? The upper layer will assume we have the whole file. Perhaps we should verify that at least the size of the file is correct |
is this risky? The upper layer will assume we have the
whole file. Perhaps we should verify that at least the size
-of the file is correct../src/storage.cpp:921 if (links)
+of the file is correct../src/storage.cpp:941 if (links)
{
// if this is a mutable torrent, and we need to pick up some files
// from other torrents, do that now. Note that there is an inherent
@@ -1750,7 +1689,7 @@ of the file is correct../src/storage.cpp:921relevance 2 | ../src/torrent.cpp:667 | post alert |
|
post alert../src/torrent.cpp:667
+ | ||
relevance 2 | ../src/torrent.cpp:676 | post alert |
post alert../src/torrent.cpp:676
state_updated();
set_state(torrent_status::downloading);
@@ -1801,8 +1740,60 @@ of the file is correct../src/storage.cpp:921relevance 2 | ../src/torrent.cpp:3317 | this looks suspicious. Figure out why it makes sense to use the first IP in this list and leave a comment here |
|
this looks suspicious. Figure out why it makes sense to use the
-first IP in this list and leave a comment here../src/torrent.cpp:3317 m_incomplete = incomplete;
+ | ||
relevance 2 | ../src/torrent.cpp:1902 | add a unit test where we don't have metadata, connect to a peer that sends a bitfield that's too large, then we get the metadata |
add a unit test where we don't have metadata, connect to a peer
+that sends a bitfield that's too large, then we get the metadata../src/torrent.cpp:1902 {
+ read_resume_data(m_resume_data->node);
+ }
+ }
+
+#if TORRENT_USE_ASSERTS
+ m_resume_data_loaded = true;
+#endif
+
+ construct_storage();
+
+ if (m_share_mode && valid_metadata())
+ {
+ // in share mode, all pieces have their priorities initialized to 0
+ m_file_priority.clear();
+ m_file_priority.resize(m_torrent_file->num_files(), 0);
+ }
+
+ // it's important to initialize the peers early, because this is what will
+ // fix up their have-bitmasks to have the correct size
+ if (!m_connections_initialized)
+ {
+ m_connections_initialized = true;
+ // all peer connections have to initialize themselves now that the metadata
+ // is available
+ // copy the peer list since peers may disconnect and invalidate
+ // m_connections as we initialize them
+ std::vector<peer_connection*> peers = m_connections;
+ for (torrent::peer_iterator i = peers.begin();
+ i != peers.end(); ++i)
+ {
+ peer_connection* pc = *i;
+ if (pc->is_disconnecting()) continue;
+ pc->on_metadata_impl();
+ if (pc->is_disconnecting()) continue;
+ pc->init();
+ }
+ }
+
+ // if we've already loaded file priorities, don't load piece priorities,
+ // they will interfere.
+ if (!m_seed_mode && m_resume_data && m_file_priority.empty())
+ {
+ bdecode_node piece_priority = m_resume_data->node
+ .dict_find_string("piece_priority");
+
+ if (piece_priority && piece_priority.string_length()
+ == m_torrent_file->num_pieces())
+ {
+ char const* p = piece_priority.string_ptr();
+ for (int i = 0; i < piece_priority.string_length(); ++i)
+ | ||
relevance 2 | ../src/torrent.cpp:3366 | this looks suspicious. Figure out why it makes sense to use the first IP in this list and leave a comment here |
this looks suspicious. Figure out why it makes sense to use the
+first IP in this list and leave a comment here../src/torrent.cpp:3366 m_incomplete = incomplete;
m_downloaded = downloaded;
update_auto_sequential();
@@ -1820,7 +1811,7 @@ first IP in this list and leave a comment here../src/torrent.cpp:3317
TORRENT_ASSERT(is_single_thread());
INVARIANT_CHECK;
- TORRENT_ASSERT(r.kind == tracker_request::announce_request);
+ TORRENT_ASSERT(0 == (r.kind & tracker_request::scrape_request));
if (resp.external_ip != address() && !tracker_ips.empty())
m_ses.set_external_address(resp.external_ip
@@ -1853,8 +1844,8 @@ first IP in this list and leave a comment here../src/torrent.cpp:3317
if ((!resp.trackerid.empty()) && (ae->trackerid != resp.trackerid))
{
- | ||
relevance 2 | ../src/torrent.cpp:4822 | abort lookups this torrent has made via the session host resolver interface |
abort lookups this torrent has made via the
-session host resolver interface../src/torrent.cpp:4822 // files belonging to the torrents
+ | ||
relevance 2 | ../src/torrent.cpp:4885 | abort lookups this torrent has made via the session host resolver interface |
abort lookups this torrent has made via the
+session host resolver interface../src/torrent.cpp:4885 // files belonging to the torrents
disconnect_all(errors::torrent_aborted, op_bittorrent);
// post a message to the main thread to destruct
@@ -1905,7 +1896,7 @@ session host resolver interface../src/torrent.cpp:4822relevance 2 | ../src/torrent.cpp:4968 | the tracker login feature should probably be deprecated |
|
the tracker login feature should probably be deprecated../src/torrent.cpp:4968 alerts().emplace_alert<file_renamed_alert>(get_handle()
+ | ||
relevance 2 | ../src/torrent.cpp:5031 | the tracker login feature should probably be deprecated |
the tracker login feature should probably be deprecated../src/torrent.cpp:5031 alerts().emplace_alert<file_renamed_alert>(get_handle()
, j->buffer.string, j->piece);
m_torrent_file->rename_file(j->piece, j->buffer.string);
}
@@ -1933,11 +1924,12 @@ session host resolver interface../src/torrent.cpp:4822 | ||
relevance 2 | ../src/torrent.cpp:7897 | if peer is a really good peer, maybe we shouldn't disconnect it perhaps this logic should be disabled if we have too many idle peers (with some definition of idle) |
if peer is a really good peer, maybe we shouldn't disconnect it
+ | ||
relevance 2 | ../src/torrent.cpp:8018 | if peer is a really good peer, maybe we shouldn't disconnect it perhaps this logic should be disabled if we have too many idle peers (with some definition of idle) |
if peer is a really good peer, maybe we shouldn't disconnect it
perhaps this logic should be disabled if we have too many idle peers
-(with some definition of idle)../src/torrent.cpp:7897#ifndef TORRENT_DISABLE_LOGGING
+(with some definition of idle)../src/torrent.cpp:8018#ifndef TORRENT_DISABLE_LOGGING
debug_log("incoming peer (%d)", int(m_connections.size()));
#endif
@@ -2009,57 +2000,6 @@ perhaps this logic should be disabled if we have too many idle peers
}
#if TORRENT_USE_INVARIANT_CHECKS
- | ||
relevance 2 | ../src/torrent.cpp:9834 | if residual is not used, remove it |
if residual is not used, remove it../src/torrent.cpp:9834 }
-
- int torrent::finished_time() const
- {
- return m_finished_time + ((!is_finished() || is_paused()) ? 0
- : (m_ses.session_time() - m_became_finished));
- }
-
- int torrent::active_time() const
- {
- return m_active_time + (is_paused() ? 0
- : m_ses.session_time() - m_started);
- }
-
- int torrent::seeding_time() const
- {
- return m_seeding_time + ((!is_seed() || is_paused()) ? 0
- : m_ses.session_time() - m_became_seed);
- }
-
- void torrent::second_tick(int tick_interval_ms, int /* residual */)
- {
- TORRENT_ASSERT(want_tick());
- TORRENT_ASSERT(is_single_thread());
- INVARIANT_CHECK;
-
- boost::weak_ptr<torrent> self(shared_from_this());
-
-#ifndef TORRENT_DISABLE_EXTENSIONS
- for (extension_list_t::iterator i = m_extensions.begin()
- , end(m_extensions.end()); i != end; ++i)
- {
- TORRENT_TRY {
- (*i)->tick();
- } TORRENT_CATCH (std::exception&) {}
- }
-
- if (m_abort) return;
-#endif
-
- // if we're in upload only mode and we're auto-managed
- // leave upload mode every 10 minutes hoping that the error
- // condition has been fixed
- if (m_upload_mode && m_auto_managed
- && int(m_ses.session_time() - m_upload_mode_time)
- >= settings().get_int(settings_pack::optimistic_disk_retry))
- {
- set_upload_mode(false);
- }
-
- if (m_storage_tick > 0 && is_loaded())
| ||
relevance 2 | ../src/tracker_manager.cpp:200 | some of these arguments could probably be moved to the tracker request itself. like the ip_filter and settings |
some of these arguments could probably be moved to the
tracker request itself. like the ip_filter and settings../src/tracker_manager.cpp:200 , interval == 0 ? min_interval : interval);
close();
@@ -2112,9 +2052,9 @@ tracker request itself. like the ip_filter and settings../src/tracker_m
}
void tracker_manager::received_bytes(int bytes)
- | ||
relevance 2 | ../src/udp_socket.cpp:734 | the udp_socket should really just be a single socket, and the session should support having more than one, just like with TCP sockets for now, just make bind failures non-fatal |
the udp_socket should really just be a single socket, and the
+ | ||
relevance 2 | ../src/udp_socket.cpp:744 | the udp_socket should really just be a single socket, and the session should support having more than one, just like with TCP sockets for now, just make bind failures non-fatal |
the udp_socket should really just be a single socket, and the
session should support having more than one, just like with TCP sockets
-for now, just make bind failures non-fatal../src/udp_socket.cpp:734 }
+for now, just make bind failures non-fatal../src/udp_socket.cpp:744 }
if (m_ipv4_sock.is_open()) m_ipv4_sock.close(ec);
#if TORRENT_USE_IPV6
@@ -2269,10 +2209,10 @@ initialize m_metadata_size../src/ut_metadata.cpp:122relevance 2 | ../src/utp_socket_manager.cpp:259 | we may want to take ec into account here. possibly close connections quicker |
|
we may want to take ec into account here. possibly close
-connections quicker../src/utp_socket_manager.cpp:259 error_code ec;
- m_interfaces = enum_net_interfaces(m_sock.get_io_service(), ec);
- if (ec) return socket_ep;
+ | ||
relevance 2 | ../src/utp_socket_manager.cpp:235 | we may want to take ec into account here. possibly close connections quicker |
we may want to take ec into account here. possibly close
+connections quicker../src/utp_socket_manager.cpp:235 error_code err;
+ m_interfaces = enum_net_interfaces(m_sock.get_io_service(), err);
+ if (err) return socket_ep;
}
for (std::vector<ip_interface>::iterator i = m_interfaces.begin()
@@ -2295,7 +2235,7 @@ connections quicker../src/utp_socket_manager.cpp:259../src/utp_socket_manager.cpp:259relevance 2 | ../src/utp_stream.cpp:353 | it would be nice if not everything would have to be public here |
|
it would be nice if not everything would have to be public here../src/utp_stream.cpp:353 void incoming(boost::uint8_t const* buf, int size, packet* p, time_point now);
+ | ||
relevance 2 | ../src/utp_stream.cpp:389 | it would be nice if not everything would have to be public here |
it would be nice if not everything would have to be public here../src/utp_stream.cpp:389 void incoming(boost::uint8_t const* buf, int size, packet* p, time_point now);
void do_ledbat(int acked_bytes, int delay, int in_flight);
int packet_timeout() const;
bool test_socket_state();
@@ -2372,9 +2312,9 @@ private:
{
iovec_t(void* b, size_t l): buf(b), len(l) {}
void* buf;
- | ||
relevance 2 | ../src/web_peer_connection.cpp:622 | just make this peer not have the pieces associated with the file we just requested. Only when it doesn't have any of the file do the following |
just make this peer not have the pieces
+ | ||
relevance 2 | ../src/web_peer_connection.cpp:630 | just make this peer not have the pieces associated with the file we just requested. Only when it doesn't have any of the file do the following |
just make this peer not have the pieces
associated with the file we just requested. Only
-when it doesn't have any of the file do the following../src/web_peer_connection.cpp:622 ++m_num_responses;
+when it doesn't have any of the file do the following../src/web_peer_connection.cpp:630 ++m_num_responses;
if (m_parser.connection_close())
{
@@ -2425,9 +2365,9 @@ when it doesn't have any of the file do the following../src/web_peer_co
{
// we should not try this server again.
t->remove_web_seed(this, errors::missing_location, op_bittorrent, 2);
- | ||
relevance 2 | ../src/web_peer_connection.cpp:681 | create a mapping of file-index to redirection URLs. Use that to form URLs instead. Support to reconnect to a new server without destructing this peer_connection |
create a mapping of file-index to redirection URLs. Use that to form
+ | ||
relevance 2 | ../src/web_peer_connection.cpp:689 | create a mapping of file-index to redirection URLs. Use that to form URLs instead. Support to reconnect to a new server without destructing this peer_connection |
create a mapping of file-index to redirection URLs. Use that to form
URLs instead. Support to reconnect to a new server without destructing this
-peer_connection../src/web_peer_connection.cpp:681 == dl_target);
+peer_connection../src/web_peer_connection.cpp:689 == dl_target);
#endif
return;
}
@@ -2478,59 +2418,59 @@ peer_connection../src/web_peer_connection.cpp:681relevance 2 | ../src/kademlia/node.cpp:71 | make this configurable in dht_settings |
|
make this configurable in dht_settings../src/kademlia/node.cpp:71#include "libtorrent/random.hpp"
-#include "libtorrent/aux_/session_impl.hpp"
-#include "libtorrent/alert_types.hpp" // for dht_lookup
-#include "libtorrent/performance_counters.hpp" // for counters
-
-#include "libtorrent/kademlia/node_id.hpp"
-#include "libtorrent/kademlia/rpc_manager.hpp"
-#include "libtorrent/kademlia/routing_table.hpp"
-#include "libtorrent/kademlia/node.hpp"
-#include "libtorrent/kademlia/dht_observer.hpp"
-
-#include "libtorrent/kademlia/refresh.hpp"
-#include "libtorrent/kademlia/get_peers.hpp"
-#include "libtorrent/kademlia/get_item.hpp"
-
-namespace libtorrent { namespace dht
-{
-
-using detail::write_endpoint;
-
-enum { announce_interval = 30 };
-
-namespace {
-
-// remove peers that have timed out
-void purge_peers(std::set<peer_entry>& peers)
-{
- for (std::set<peer_entry>::iterator i = peers.begin()
- , end(peers.end()); i != end;)
+ | ||
relevance 2 | ../src/kademlia/dht_storage.cpp:106 | make this configurable in dht_settings |
make this configurable in dht_settings../src/kademlia/dht_storage.cpp:106 // this is a group. It contains a set of group members
+ struct torrent_entry
{
- // the peer has timed out
- if (i->added + minutes(int(announce_interval * 1.5f)) < aux::time_now())
- peers.erase(i++);
- else
- ++i;
- }
-}
+ std::string name;
+ std::set<peer_entry> peers;
+ };
-void nop() {}
+#ifndef TORRENT_NO_DEPRECATE
+ struct count_peers
+ {
+ int* count;
+ count_peers(int* c): count(c) {}
+ void operator()(std::pair<libtorrent::sha1_hash
+ , torrent_entry> const& t)
+ {
+ *count += t.second.peers.size();
+ }
+ };
+#endif
-node_id calculate_node_id(node_id const& nid, dht_observer* observer)
-{
- address external_address;
- if (observer) external_address = observer->external_address();
- if (nid == (node_id::min)() || !verify_id(nid, external_address))
- return generate_id(external_address);
-
- return nid;
-}
+ enum { announce_interval = 30 };
+
+ struct dht_immutable_item
+ {
+ dht_immutable_item() : value(0), num_announcers(0), size(0) {}
+ // malloced space for the actual value
+ char* value;
+ // this counts the number of IPs we have seen
+ // announcing this item, this is used to determine
+ // popularity if we reach the limit of items to store
+ bloom_filter<128> ips;
+ // the last time we heard about this
+ time_point last_seen;
+ // number of IPs in the bloom filter
+ int num_announcers;
+ // size of malloced space pointed to by value
+ int size;
+ };
-} // anonymous namespace
- | ||
relevance 2 | ../src/kademlia/node.cpp:535 | it would be nice to have a bias towards node-id prefixes that are missing in the bucket |
it would be nice to have a bias towards node-id prefixes that
-are missing in the bucket../src/kademlia/node.cpp:535 // this shouldn't happen
+ struct ed25519_public_key { char bytes[item_pk_len]; };
+
+ struct dht_mutable_item : dht_immutable_item
+ {
+ char sig[item_sig_len];
+ boost::int64_t seq;
+ ed25519_public_key key;
+ char* salt;
+ int salt_size;
+ };
+
+ void touch_item(dht_immutable_item* f, address const& address)
+ | ||
relevance 2 | ../src/kademlia/node.cpp:549 | it would be nice to have a bias towards node-id prefixes that are missing in the bucket |
it would be nice to have a bias towards node-id prefixes that
+are missing in the bucket../src/kademlia/node.cpp:549 // this shouldn't happen
TORRENT_ASSERT(m_id != ne->id);
if (ne->id == m_id) return;
@@ -2560,61 +2500,61 @@ void node::send_single_refresh(udp::endpoint const& ep, int bucket
boost::intrusive_ptr<traversal_algorithm> algo(
new traversal_algorithm(*this, (node_id::min)()));
observer_ptr o(new (ptr) ping_observer(algo, ep, id));
-#if defined TORRENT_DEBUG || TORRENT_RELEASE_ASSERTS
+#if defined TORRENT_DEBUG || defined TORRENT_RELEASE_ASSERTS
o->m_in_constructor = false;
#endif
entry e;
e["y"] = "q";
entry& a = e["a"];
- // use get_peers instead of find_node. We'll get nodes in the response
- // either way.
- e["q"] = "get_peers";
- a["info_hash"] = target.to_string();
- m_counters.inc_stats_counter(counters::dht_get_peers_out);
-
-// e["q"] = "find_node";
-// a["target"] = target.to_string();
- m_rpc.invoke(e, ep, o);
-}
-
-time_duration node::connection_timeout()
-{
- time_duration d = m_rpc.tick();
- | ||
relevance 2 | ../src/kademlia/node.cpp:625 | use the non deprecated function instead of this one |
use the non deprecated function instead of this one../src/kademlia/node.cpp:625 return d;
-}
-
-void node::status(std::vector<dht_routing_bucket>& table
- , std::vector<dht_lookup>& requests)
-{
- mutex_t::scoped_lock l(m_mutex);
-
- m_table.status(table);
-
+ if (m_table.is_full(bucket))
+ {
+ // current bucket is full, just ping it.
+ e["q"] = "ping";
+ m_counters.inc_stats_counter(counters::dht_ping_out);
+ }
+ else
+ {
+ // use get_peers instead of find_node. We'll get nodes in the response
+ // either way.
+ e["q"] = "get_peers";
+ a["info_hash"] = target.to_string();
+ m_counters.inc_stats_counter(counters::dht_get_peers_out);
+ }
+ | ||
relevance 2 | ../src/kademlia/node.cpp:627 | use the non deprecated function instead of this one |
use the non deprecated function instead of this one../src/kademlia/node.cpp:627
for (std::set<traversal_algorithm*>::iterator i = m_running_requests.begin()
, end(m_running_requests.end()); i != end; ++i)
{
requests.push_back(dht_lookup());
- dht_lookup& l = requests.back();
- (*i)->status(l);
+ dht_lookup& lookup = requests.back();
+ (*i)->status(lookup);
}
}
+void node::update_stats_counters(counters& c) const
+{
+ const dht_storage_counters& dht_cnt = m_storage->counters();
+ c.set_value(counters::dht_torrents, dht_cnt.torrents);
+ c.set_value(counters::dht_peers, dht_cnt.peers);
+ c.set_value(counters::dht_immutable_data, dht_cnt.immutable_data);
+ c.set_value(counters::dht_mutable_data, dht_cnt.mutable_data);
+}
+
#ifndef TORRENT_NO_DEPRECATE
void node::status(session_status& s)
{
mutex_t::scoped_lock l(m_mutex);
m_table.status(s);
- s.dht_torrents = int(m_map.size());
+ s.dht_torrents = int(m_storage->num_torrents());
s.active_requests.clear();
s.dht_total_allocations = m_rpc.num_allocated_observers();
for (std::set<traversal_algorithm*>::iterator i = m_running_requests.begin()
, end(m_running_requests.end()); i != end; ++i)
{
s.active_requests.push_back(dht_lookup());
- dht_lookup& l = s.active_requests.back();
- (*i)->status(l);
+ dht_lookup& lookup = s.active_requests.back();
+ (*i)->status(lookup);
}
}
#endif
@@ -2625,14 +2565,14 @@ void node::lookup_peers(sha1_hash const& info_hash, entry& reply
if (m_observer)
m_observer->get_peers(info_hash);
- table_t::const_iterator i = m_map.lower_bound(info_hash);
- if (i == m_map.end()) return;
- if (i->first != info_hash) return;
+ m_storage->get_peers(info_hash, noseed, scrape, reply);
+}
- torrent_entry const& v = i->second;
-
- if (!v.name.empty()) reply["n"] = v.name;
- | ||
relevance 2 | ../src/kademlia/node.cpp:958 | find_node should write directly to the response entry |
find_node should write directly to the response entry../src/kademlia/node.cpp:958 , int(reply["values"].list().size()));
+void TORRENT_EXTRA_EXPORT write_nodes_entry(entry& r, nodes_t const& nodes)
+{
+ entry& n = r["nodes"];
+ std::back_insert_iterator<std::string> out(n.string());
+ | ||
relevance 2 | ../src/kademlia/node.cpp:890 | find_node should write directly to the response entry |
find_node should write directly to the response entry../src/kademlia/node.cpp:890 , int(reply["values"].list().size()));
}
#endif
}
@@ -2733,8 +2673,8 @@ boost::tuple<int, int, int> routing_table::size() const
{
nodes += i->live_nodes.size();
for (bucket_t::const_iterator k = i->live_nodes.begin()
- , end(i->live_nodes.end()); k != end; ++k)
- | ||
relevance 2 | ../src/kademlia/routing_table.cpp:955 | move the lowest priority nodes to the replacement bucket |
move the lowest priority nodes to the replacement bucket../src/kademlia/routing_table.cpp:955 bucket_t& b = m_buckets[bucket_index].live_nodes;
+ , end2(i->live_nodes.end()); k != end2; ++k)
+ | ||
relevance 2 | ../src/kademlia/routing_table.cpp:960 | move the lowest priority nodes to the replacement bucket |
move the lowest priority nodes to the replacement bucket../src/kademlia/routing_table.cpp:960 bucket_t& b = m_buckets[bucket_index].live_nodes;
bucket_t& rb = m_buckets[bucket_index].replacements;
// move any node whose (160 - distane_exp(m_id, id)) >= (i - m_buckets.begin())
@@ -2785,13 +2725,13 @@ boost::tuple<int, int, int> routing_table::size() const
else
new_replacement_bucket.push_back(*j);
}
- | ||
relevance 2 | ../include/libtorrent/alert_types.hpp:1415 | should the alert baseclass have this object instead? |
should the alert baseclass have this object instead?../include/libtorrent/alert_types.hpp:1415 {
+ | ||
relevance 2 | ../include/libtorrent/alert_types.hpp:1427 | should the alert baseclass have this object instead? |
should the alert baseclass have this object instead?../include/libtorrent/alert_types.hpp:1427 {
// internal
portmap_log_alert(aux::stack_allocator& alloc, int t, const char* m);
TORRENT_DEFINE_ALERT(portmap_log_alert, 52)
- static const int static_category = alert::port_mapping_notification;
+ static const int static_category = alert::port_mapping_log_notification;
virtual std::string message() const;
int map_type;
@@ -2879,8 +2819,8 @@ POSSIBILITY OF SUCH DAMAGE.
#endif
- | ||
relevance 2 | ../include/libtorrent/enum_net.hpp:142 | this could be done more efficiently by just looking up the interface with the given name, maybe even with if_nametoindex() |
this could be done more efficiently by just looking up
-the interface with the given name, maybe even with if_nametoindex()../include/libtorrent/enum_net.hpp:142
+ | ||
relevance 2 | ../include/libtorrent/enum_net.hpp:143 | this could be done more efficiently by just looking up the interface with the given name, maybe even with if_nametoindex() |
this could be done more efficiently by just looking up
+the interface with the given name, maybe even with if_nametoindex()../include/libtorrent/enum_net.hpp:143
address ip = address::from_string(device_name, ec);
if (!ec)
{
@@ -2982,7 +2922,7 @@ namespace libtorrent {
void get_pointers(std::vector<T*>& out)
{
- | ||
relevance 2 | ../include/libtorrent/peer_connection.hpp:1122 | rename this target queue size |
rename this target queue size../include/libtorrent/peer_connection.hpp:1122
+ | ||
relevance 2 | ../include/libtorrent/peer_connection.hpp:1112 | rename this target queue size |
rename this target queue size../include/libtorrent/peer_connection.hpp:1112
// the number of bytes send to the disk-io
// thread that hasn't yet been completely written.
int m_outstanding_writing_bytes;
@@ -3074,7 +3014,7 @@ probably be changed to 3 levels + dont-download../include/libtorrent/pi
#ifdef TORRENT_DEBUG_REFCOUNTS
// all the peers that have this piece
- std::set<const void*> have_peers;
+ std::set<const torrent_peer*> have_peers;
#endif
enum
@@ -3085,7 +3025,7 @@ probably be changed to 3 levels + dont-download../include/libtorrent/pi
#ifdef TORRENT_OPTIMIZE_MEMORY_USAGE
we_have_index = 0x3ffff,
#else
- | ||
relevance 2 | ../include/libtorrent/proxy_base.hpp:259 | use the resolver interface that has a built-in cache |
use the resolver interface that has a built-in cache../include/libtorrent/proxy_base.hpp:259 return m_sock.lowest_layer();
+ | ||
relevance 2 | ../include/libtorrent/proxy_base.hpp:260 | use the resolver interface that has a built-in cache |
use the resolver interface that has a built-in cache../include/libtorrent/proxy_base.hpp:260 return m_sock.lowest_layer();
}
next_layer_type& next_layer()
@@ -3112,11 +3052,7 @@ protected:
#endif
- | ||
relevance 2 | ../include/libtorrent/session_handle.hpp:72 | the ip filter should probably be saved here too |
the ip filter should probably be saved here too../include/libtorrent/session_handle.hpp:72 struct plugin;
- struct torrent_plugin;
- class torrent;
- struct ip_filter;
- class port_filter;
+ | ||
relevance 2 | ../include/libtorrent/session_handle.hpp:78 | the ip filter should probably be saved here too |
the ip filter should probably be saved here too../include/libtorrent/session_handle.hpp:78 class port_filter;
class alert;
#ifndef TORRENT_NO_DEPRECATE
@@ -3128,10 +3064,14 @@ protected:
struct TORRENT_EXPORT session_handle
{
+ session_handle() : m_impl(NULL) {}
+
session_handle(aux::session_impl* impl)
: m_impl(impl)
{}
+ bool is_valid() const { return m_impl; }
+
// flags that determines which aspects of the session should be
// saved when calling save_state().
@@ -3163,105 +3103,7 @@ protected:
save_peer_proxy = save_proxy,
save_web_proxy = save_proxy,
save_tracker_proxy = save_proxy
- | ||
relevance 2 | ../include/libtorrent/session_settings.hpp:56 | this type is only used internally now. move it to an internal header and make this type properly deprecated. |
this type is only used internally now. move it to an internal
-header and make this type properly deprecated.../include/libtorrent/session_settings.hpp:56#include "libtorrent/version.hpp"
-#include "libtorrent/config.hpp"
-#include "libtorrent/settings_pack.hpp"
-
-#include <boost/cstdint.hpp>
-#include <string>
-#include <vector>
-#include <utility>
-
-namespace libtorrent
-{
-
-#ifndef TORRENT_NO_DEPRECATE
-#define TORRENT_EXPORT_DEPRECATED TORRENT_EXPORT
-#else
-#define TORRENT_EXPORT_DEPRECATED
-#endif
-
- namespace aux { struct session_settings; }
-
-
- // The ``proxy_settings`` structs contains the information needed to
- // direct certain traffic to a proxy.
- struct TORRENT_EXPORT_DEPRECATED proxy_settings
- {
- // defaults constructs proxy settings, initializing it to the default
- // settings.
- proxy_settings() : type(0)
- , port(0), proxy_hostnames(true)
- , proxy_peer_connections(true)
- {}
-
- // construct the proxy_settings object from the settings
- // this constructor is implemented in session_impl.cpp
- proxy_settings(settings_pack const& sett);
- proxy_settings(aux::session_settings const& sett);
-
- // the name or IP of the proxy server. ``port`` is the port number the
- // proxy listens to. If required, ``username`` and ``password`` can be
- // set to authenticate with the proxy.
- std::string hostname;
-
- // when using a proy type that requires authentication, the username
- // and password fields must be set to the credentials for the proxy.
- std::string username;
- std::string password;
-
-#ifndef TORRENT_NO_DEPRECATE
- // the type of proxy to use. Assign one of these to the
- // proxy_settings::type field.
- enum proxy_type
- | ||
relevance 2 | ../include/libtorrent/socket_type.hpp:324 | it would be nice to use aligned_storage here when building on c++11 |
it would be nice to use aligned_storage here when
-building on c++11../include/libtorrent/socket_type.hpp:324 sizeof(tcp::socket)
- , sizeof(socks5_stream)
- , sizeof(http_stream)
- , sizeof(utp_stream)
-#if TORRENT_USE_I2P
- , sizeof(i2p_stream)
-#else
- , 0
-#endif
-#ifdef TORRENT_USE_OPENSSL
- , sizeof(ssl_stream<tcp::socket>)
- , sizeof(ssl_stream<socks5_stream>)
- , sizeof(ssl_stream<http_stream>)
- , sizeof(ssl_stream<utp_stream>)
-#else
- , 0, 0, 0, 0
-#endif
- >::value
- };
-
- boost::int64_t m_data[(storage_size + sizeof(boost::int64_t) - 1)
- / sizeof(boost::int64_t)];
- };
-
- // returns true if this socket is an SSL socket
- bool is_ssl(socket_type const& s);
-
- // returns true if this is a uTP socket
- bool is_utp(socket_type const& s);
-
-#if TORRENT_USE_I2P
- // returns true if this is an i2p socket
- bool is_i2p(socket_type const& s);
-#endif
-
- // assuming the socket_type s is an ssl socket, make sure it
- // verifies the hostname in its SSL handshake
- void setup_ssl_hostname(socket_type& s, std::string const& hostname, error_code& ec);
-
- // properly shuts down SSL sockets. holder keeps s alive
- void async_shutdown(socket_type& s, boost::shared_ptr<void> holder);
-}
-
-#endif
-
- | ||
relevance 2 | ../include/libtorrent/socks5_stream.hpp:135 | add async_connect() that takes a hostname and port as well |
add async_connect() that takes a hostname and port as well../include/libtorrent/socks5_stream.hpp:135 if (m_dst_name.size() > 255)
+ | ||
relevance 2 | ../include/libtorrent/socks5_stream.hpp:135 | add async_connect() that takes a hostname and port as well |
add async_connect() that takes a hostname and port as well../include/libtorrent/socks5_stream.hpp:135 if (m_dst_name.size() > 255)
m_dst_name.resize(255);
}
@@ -3312,7 +3154,7 @@ building on c++11../include/libtorrent/socket_type.hpp:324 | ||
relevance 2 | ../include/libtorrent/tracker_manager.hpp:278 | this class probably doesn't need to have virtual functions. |
this class probably doesn't need to have virtual functions.../include/libtorrent/tracker_manager.hpp:278 int m_completion_timeout;
+ | ||
relevance 2 | ../include/libtorrent/tracker_manager.hpp:282 | this class probably doesn't need to have virtual functions. |
this class probably doesn't need to have virtual functions.../include/libtorrent/tracker_manager.hpp:282 int m_completion_timeout;
typedef mutex mutex_t;
mutable mutex_t m_mutex;
@@ -3363,8 +3205,8 @@ building on c++11../include/libtorrent/socket_type.hpp:324 | ||
relevance 2 | ../include/libtorrent/aux_/session_impl.hpp:1125 | the throttling of saving resume data could probably be factored out into a separate class |
the throttling of saving resume data could probably be
-factored out into a separate class../include/libtorrent/aux_/session_impl.hpp:1125 // each second tick the timer takes a little
+ | ||
relevance 2 | ../include/libtorrent/aux_/session_impl.hpp:1146 | the throttling of saving resume data could probably be factored out into a separate class |
the throttling of saving resume data could probably be
+factored out into a separate class../include/libtorrent/aux_/session_impl.hpp:1146 // each second tick the timer takes a little
// bit longer than one second to trigger. The
// extra time it took is accumulated into this
// counter. Every time it exceeds 1000, torrents
@@ -3395,6 +3237,17 @@ factored out into a separate class../include/libtorrent/aux_/session_im
// this is a list to allow extensions to potentially remove themselves.
typedef std::list<boost::shared_ptr<plugin> > ses_extension_list_t;
ses_extension_list_t m_ses_extensions;
+
+ // std::string could be used for the query names if only all common implementations used SSO
+ // *glares at gcc*
+ struct extention_dht_query
+ {
+ uint8_t query_len;
+ boost::array<char, max_dht_query_length> query;
+ dht_extension_handler_t handler;
+ };
+ typedef std::vector<extention_dht_query> m_extension_dht_queries_t;
+ m_extension_dht_queries_t m_extension_dht_queries;
#endif
// if this function is set, it indicates that torrents are allowed
@@ -3404,18 +3257,7 @@ factored out into a separate class../include/libtorrent/aux_/session_im
// this is true whenever we have posted a deferred-disk job
// it means we don't need to post another one
bool m_deferred_submit_disk_jobs;
-
- // this is set to true when a torrent auto-manage
- // event is triggered, and reset whenever the message
- // is delivered and the auto-manage is executed.
- // there should never be more than a single pending auto-manage
- // message in-flight at any given time.
- bool m_pending_auto_manage;
-
- // this is also set to true when triggering an auto-manage
- // of the torrents. However, if the normal auto-manage
- // timer comes along and executes the auto-management,
- | ||
relevance 2 | ../include/libtorrent/aux_/session_interface.hpp:137 | the IP voting mechanism should be factored out to its own class, not part of the session |
the IP voting mechanism should be factored out
+ | ||
relevance 2 | ../include/libtorrent/aux_/session_interface.hpp:137 | the IP voting mechanism should be factored out to its own class, not part of the session |
the IP voting mechanism should be factored out
to its own class, not part of the session../include/libtorrent/aux_/session_interface.hpp:137#if TORRENT_USE_ASSERTS
virtual bool is_single_thread() const = 0;
virtual bool has_peer(peer_connection const* p) const = 0;
@@ -3459,7 +3301,7 @@ to its own class, not part of the session../include/libtorrent/aux_/ses
typedef boost::function<void(error_code const&, std::vector<address> const&)>
callback_t;
- | ||
relevance 2 | ../include/libtorrent/aux_/session_interface.hpp:162 | remove this. There's already get_resolver() |
remove this. There's already get_resolver()../include/libtorrent/aux_/session_interface.hpp:162 source_peer = 2,
+ | ||
relevance 2 | ../include/libtorrent/aux_/session_interface.hpp:162 | remove this. There's already get_resolver() |
remove this. There's already get_resolver()../include/libtorrent/aux_/session_interface.hpp:162 source_peer = 2,
source_tracker = 4,
source_router = 8
};
@@ -3510,7 +3352,7 @@ to its own class, not part of the session../include/libtorrent/aux_/ses
virtual boost::shared_ptr<torrent> delay_load_torrent(sha1_hash const& info_hash
, peer_connection* pc) = 0;
virtual void insert_torrent(sha1_hash const& ih, boost::shared_ptr<torrent> const& t
- | ||
relevance 2 | ../include/libtorrent/aux_/session_interface.hpp:217 | factor out the thread pool for socket jobs into a separate class used to (potentially) issue socket write calls onto multiple threads |
factor out the thread pool for socket jobs into a separate
+ | ||
relevance 2 | ../include/libtorrent/aux_/session_interface.hpp:217 | factor out the thread pool for socket jobs into a separate class used to (potentially) issue socket write calls onto multiple threads |
factor out the thread pool for socket jobs into a separate
class
used to (potentially) issue socket write calls onto multiple threads../include/libtorrent/aux_/session_interface.hpp:217 virtual int num_torrents() const = 0;
@@ -3553,7 +3395,7 @@ used to (potentially) issue socket write calls onto multiple threads../
std::string const& collection) const = 0;
#endif
- | ||
relevance 1 | ../src/disk_io_thread.cpp:219 | it would be nice to have the number of threads be set dynamically |
it would be nice to have the number of threads be set dynamically../src/disk_io_thread.cpp:219
+ | ||
relevance 1 | ../src/disk_io_thread.cpp:224 | it would be nice to have the number of threads be set dynamically |
it would be nice to have the number of threads be set dynamically../src/disk_io_thread.cpp:224
TORRENT_ASSERT(m_magic == 0x1337);
#if TORRENT_USE_ASSERTS
m_magic = 0xdead;
@@ -3604,8 +3446,8 @@ used to (potentially) issue socket write calls onto multiple threads../
, thread_id, type, work))));
}
}
- | ||
relevance 1 | ../src/http_seed_connection.cpp:123 | in chunked encoding mode, this assert won't hold. the chunk headers should be subtracted from the receive_buffer_size |
in chunked encoding mode, this assert won't hold.
-the chunk headers should be subtracted from the receive_buffer_size../src/http_seed_connection.cpp:123 boost::optional<piece_block_progress>
+ | ||
relevance 1 | ../src/http_seed_connection.cpp:129 | in chunked encoding mode, this assert won't hold. the chunk headers should be subtracted from the receive_buffer_size |
in chunked encoding mode, this assert won't hold.
+the chunk headers should be subtracted from the receive_buffer_size../src/http_seed_connection.cpp:129 boost::optional<piece_block_progress>
http_seed_connection::downloading_piece_progress() const
{
if (m_requests.empty())
@@ -3656,8 +3498,8 @@ the chunk headers should be subtracted from the receive_buffer_size../s
std::string request;
request.reserve(400);
- | ||
relevance 1 | ../src/session_impl.cpp:5225 | report the proper address of the router as the source IP of this understanding of our external address, instead of the empty address |
report the proper address of the router as the source IP of
-this understanding of our external address, instead of the empty address../src/session_impl.cpp:5225 void session_impl::on_port_mapping(int mapping, address const& ip, int port
+ | ||
relevance 1 | ../src/session_impl.cpp:5306 | report the proper address of the router as the source IP of this understanding of our external address, instead of the empty address |
report the proper address of the router as the source IP of
+this understanding of our external address, instead of the empty address../src/session_impl.cpp:5306 void session_impl::on_port_mapping(int mapping, address const& ip, int port
, error_code const& ec, int map_transport)
{
TORRENT_ASSERT(is_single_thread());
@@ -3708,9 +3550,9 @@ this understanding of our external address, instead of the empty address | ||
relevance 1 | ../src/session_impl.cpp:6529 | we only need to do this if our global IPv4 address has changed since the DHT (currently) only supports IPv4. Since restarting the DHT is kind of expensive, it would be nice to not do it unnecessarily |
we only need to do this if our global IPv4 address has changed
+ | ||
relevance 1 | ../src/session_impl.cpp:6700 | we only need to do this if our global IPv4 address has changed since the DHT (currently) only supports IPv4. Since restarting the DHT is kind of expensive, it would be nice to not do it unnecessarily |
we only need to do this if our global IPv4 address has changed
since the DHT (currently) only supports IPv4. Since restarting the DHT
-is kind of expensive, it would be nice to not do it unnecessarily../src/session_impl.cpp:6529#endif
+is kind of expensive, it would be nice to not do it unnecessarily../src/session_impl.cpp:6700#endif
if (!m_external_ip.cast_vote(ip, source_type, source)) return;
@@ -3761,11 +3603,11 @@ is kind of expensive, it would be nice to not do it unnecessarily../src
, boost::function<void(char*)> const& handler)
{
return m_disk_thread.async_allocate_disk_buffer(category, handler);
- | ||
relevance 1 | ../src/torrent.cpp:1224 | make this depend on the error and on the filesystem the files are being downloaded to. If the error is no_space_left_on_device and the filesystem doesn't support sparse files, only zero the priorities of the pieces that are at the tails of all files, leaving everything up to the highest written piece in each file |
make this depend on the error and on the filesystem the
+ | ||
relevance 1 | ../src/torrent.cpp:1235 | make this depend on the error and on the filesystem the files are being downloaded to. If the error is no_space_left_on_device and the filesystem doesn't support sparse files, only zero the priorities of the pieces that are at the tails of all files, leaving everything up to the highest written piece in each file |
make this depend on the error and on the filesystem the
files are being downloaded to. If the error is no_space_left_on_device
and the filesystem doesn't support sparse files, only zero the priorities
of the pieces that are at the tails of all files, leaving everything
-up to the highest written piece in each file../src/torrent.cpp:1224
+up to the highest written piece in each file../src/torrent.cpp:1235
// notify the user of the error
if (alerts().should_post<file_error_alert>())
alerts().emplace_alert<file_error_alert>(j->error.ec
@@ -3816,8 +3658,8 @@ up to the highest written piece in each file../src/torrent.cpp:1224 | ||
relevance 1 | ../src/torrent.cpp:7026 | save the send_stats state instead of throwing them away it may pose an issue when downgrading though |
save the send_stats state instead of throwing them away
-it may pose an issue when downgrading though../src/torrent.cpp:7026 for (int k = 0; k < bits; ++k)
+ | ||
relevance 1 | ../src/torrent.cpp:7139 | save the send_stats state instead of throwing them away it may pose an issue when downgrading though |
save the send_stats state instead of throwing them away
+it may pose an issue when downgrading though../src/torrent.cpp:7139 for (int k = 0; k < bits; ++k)
v |= (info[j*8+k].state == piece_picker::block_info::state_finished)
? (1 << k) : 0;
bitmask.append(1, v);
@@ -3858,6 +3700,7 @@ it may pose an issue when downgrading though../src/torrent.cpp:7026../src/torrent.cpp:7026 | ||
relevance 1 | ../src/torrent.cpp:8231 | should disconnect all peers that have the pieces we have not just seeds. It would be pretty expensive to check all pieces for all peers though |
should disconnect all peers that have the pieces we have
+ | ||
relevance 1 | ../src/torrent.cpp:8362 | should disconnect all peers that have the pieces we have not just seeds. It would be pretty expensive to check all pieces for all peers though |
should disconnect all peers that have the pieces we have
not just seeds. It would be pretty expensive to check all pieces
-for all peers though../src/torrent.cpp:8231
+for all peers though../src/torrent.cpp:8362
set_state(torrent_status::finished);
set_queue_position(-1);
@@ -3913,15 +3755,15 @@ for all peers though../src/torrent.cpp:8231 | ||
relevance 1 | ../include/libtorrent/ip_voter.hpp:124 | instead, have one instance per possible subnet, global IPv4, global IPv6, loopback, 192.168.x.x, 10.x.x.x, etc. |
instead, have one instance per possible subnet, global IPv4, global IPv6, loopback, 192.168.x.x, 10.x.x.x, etc.../include/libtorrent/ip_voter.hpp:124 // away all the votes and started from scratch, in case
+ | ||
relevance 1 | ../include/libtorrent/ip_voter.hpp:124 | instead, have one instance per possible subnet, global IPv4, global IPv6, loopback, 192.168.x.x, 10.x.x.x, etc. |
instead, have one instance per possible subnet, global IPv4, global IPv6, loopback, 192.168.x.x, 10.x.x.x, etc.../include/libtorrent/ip_voter.hpp:124 // away all the votes and started from scratch, in case
// our IP has changed
time_point m_last_rotate;
};
@@ -3948,7 +3790,7 @@ for all peers though../src/torrent.cpp:8231relevance 1 | ../include/libtorrent/web_peer_connection.hpp:120 | if we make this be a disk_buffer_holder instead we would save a copy sometimes use allocate_disk_receive_buffer and release_disk_receive_buffer |
|
if we make this be a disk_buffer_holder instead
+ | ||
relevance 1 | ../include/libtorrent/web_peer_connection.hpp:120 | if we make this be a disk_buffer_holder instead we would save a copy sometimes use allocate_disk_receive_buffer and release_disk_receive_buffer |
if we make this be a disk_buffer_holder instead
we would save a copy sometimes
use allocate_disk_receive_buffer and release_disk_receive_buffer../include/libtorrent/web_peer_connection.hpp:120
// returns the block currently being
@@ -4001,12 +3843,12 @@ use allocate_disk_receive_buffer and release_disk_receive_buffer../incl
};
}
- | ||
relevance 0 | ../test/test_block_cache.cpp:469 | test try_evict_blocks |
test try_evict_blocks../test/test_block_cache.cpp:469 | ||
relevance 0 | ../test/test_block_cache.cpp:470 | test evicting volatile pieces, to see them be removed |
test evicting volatile pieces, to see them be removed../test/test_block_cache.cpp:470 | ||
relevance 0 | ../test/test_block_cache.cpp:471 | test evicting dirty pieces |
test evicting dirty pieces../test/test_block_cache.cpp:471 | ||
relevance 0 | ../test/test_block_cache.cpp:472 | test free_piece |
test free_piece../test/test_block_cache.cpp:472 | ||
relevance 0 | ../test/test_block_cache.cpp:473 | test abort_dirty |
test abort_dirty../test/test_block_cache.cpp:473 | ||
relevance 0 | ../test/test_block_cache.cpp:474 | test unaligned reads |
test unaligned reads../test/test_block_cache.cpp:474 // it's supposed to be a cache hit
+ | ||
relevance 0 | ../test/test_block_cache.cpp:469 | test try_evict_blocks |
test try_evict_blocks../test/test_block_cache.cpp:469 | ||
relevance 0 | ../test/test_block_cache.cpp:470 | test evicting volatile pieces, to see them be removed |
test evicting volatile pieces, to see them be removed../test/test_block_cache.cpp:470 | ||
relevance 0 | ../test/test_block_cache.cpp:471 | test evicting dirty pieces |
test evicting dirty pieces../test/test_block_cache.cpp:471 | ||
relevance 0 | ../test/test_block_cache.cpp:472 | test free_piece |
test free_piece../test/test_block_cache.cpp:472 | ||
relevance 0 | ../test/test_block_cache.cpp:473 | test abort_dirty |
test abort_dirty../test/test_block_cache.cpp:473 | ||
relevance 0 | ../test/test_block_cache.cpp:474 | test unaligned reads |
test unaligned reads../test/test_block_cache.cpp:474 // it's supposed to be a cache hit
TEST_CHECK(ret >= 0);
// return the reference to the buffer we just read
RETURN_BUFFER;
- tailqueue jobs;
+ tailqueue<disk_io_job> jobs;
bc.clear(jobs);
}
@@ -4023,7 +3865,7 @@ TORRENT_TEST(block_cache)
}
- | ||
relevance 0 | ../test/test_bloom_filter.cpp:130 | test size() |
test size()../test/test_bloom_filter.cpp:130 | ||
relevance 0 | ../test/test_bloom_filter.cpp:131 | test clear() |
test clear()../test/test_bloom_filter.cpp:131 TEST_EQUAL(memcmp(bits_out.c_str(), bits, 4), 0);
+ | ||
relevance 0 | ../test/test_bloom_filter.cpp:130 | test size() |
test size()../test/test_bloom_filter.cpp:130 | ||
relevance 0 | ../test/test_bloom_filter.cpp:131 | test clear() |
test clear()../test/test_bloom_filter.cpp:131 TEST_EQUAL(memcmp(bits_out.c_str(), bits, 4), 0);
sha1_hash k( "\x01\x00\x02\x00 ");
TEST_CHECK(!filter.find(k));
@@ -4045,59 +3887,8 @@ TORRENT_TEST(bloom_filter)
}
- | ||
relevance 0 | ../test/test_dht.cpp:441 | test obfuscated_get_peers |
test obfuscated_get_peers../test/test_dht.cpp:441
-struct obs : dht::dht_observer
-{
- virtual void set_external_address(address const& addr
- , address const& source) TORRENT_OVERRIDE
- {}
-
- virtual address external_address() TORRENT_OVERRIDE
- {
- return address_v4::from_string("236.0.0.1");
- }
- virtual void get_peers(sha1_hash const& ih) TORRENT_OVERRIDE {}
- virtual void outgoing_get_peers(sha1_hash const& target
- , sha1_hash const& sent_target, udp::endpoint const& ep) TORRENT_OVERRIDE {}
- virtual void announce(sha1_hash const& ih, address const& addr, int port) TORRENT_OVERRIDE {}
- virtual void log(dht_logger::module_t l, char const* fmt, ...) TORRENT_OVERRIDE {}
- virtual void log_packet(message_direction_t dir, char const* pkt, int len
- , udp::endpoint node) TORRENT_OVERRIDE {}
-};
-
-TORRENT_TEST(dht)
- {
- dht_settings sett;
- sett.max_torrents = 4;
- sett.max_dht_items = 4;
- sett.enforce_node_id = false;
- mock_socket s;
- obs observer;
- counters cnt;
- dht::node node(&s, sett, node_id(0), &observer, cnt);
-
- // DHT should be running on port 48199 now
- bdecode_node response;
- bdecode_node parsed[11];
- char error_string[200];
- bool ret;
-
- // ====== ping ======
- udp::endpoint source(address::from_string("10.0.0.1"), 20);
- send_dht_request(node, "ping", source, &response, "10");
-
- dht::key_desc_t pong_desc[] = {
- {"y", bdecode_node::string_t, 1, 0},
- {"t", bdecode_node::string_t, 2, 0},
- {"r", bdecode_node::dict_t, 0, key_desc_t::parse_children},
- {"id", bdecode_node::string_t, 20, key_desc_t::last_child},
- };
-
- fprintf(stderr, "msg: %s\n", print_entry(response).c_str());
- ret = dht::verify_message(response, pong_desc, parsed, 4, error_string
- , sizeof(error_string));
- | ||
relevance 0 | ../test/test_fast_extension.cpp:801 | test sending invalid requests (out of bound piece index, offsets and sizes) |
test sending invalid requests (out of bound piece index, offsets and
-sizes)../test/test_fast_extension.cpp:801
+ | ||
relevance 0 | ../test/test_dht.cpp:539 | test obfuscated_get_peers |
test obfuscated_get_peers../test/test_dht.cpp:539 | ||
relevance 0 | ../test/test_fast_extension.cpp:842 | test sending invalid requests (out of bound piece index, offsets and sizes) |
test sending invalid requests (out of bound piece index, offsets and
+sizes)../test/test_fast_extension.cpp:842
entry ut_metadata_msg = read_ut_metadata_msg(s, recv_buffer
, sizeof(recv_buffer));
@@ -4118,7 +3909,7 @@ sizes)../test/test_fast_extension.cpp:801
- | ||
relevance 0 | ../test/test_file_progress.cpp:109 | test the update function too |
test the update function too../test/test_file_progress.cpp:109 for (int idx = 0; idx < fs.num_pieces(); ++idx)
+ | ||
relevance 0 | ../test/test_file_progress.cpp:109 | test the update function too |
test the update function too../test/test_file_progress.cpp:109 for (int idx = 0; idx < fs.num_pieces(); ++idx)
{
piece_picker picker;
picker.init(4, fs.total_size() % 4, fs.num_pieces());
@@ -4131,7 +3922,7 @@ sizes)../test/test_fast_extension.cpp:801
- | ||
relevance 0 | ../test/test_file_storage.cpp:214 | test file_storage::optimize |
test file_storage::optimize../test/test_file_storage.cpp:214 | ||
relevance 0 | ../test/test_file_storage.cpp:215 | test map_block |
test map_block../test/test_file_storage.cpp:215 | ||
relevance 0 | ../test/test_file_storage.cpp:216 | test piece_size(int piece) |
test piece_size(int piece)../test/test_file_storage.cpp:216 | ||
relevance 0 | ../test/test_file_storage.cpp:217 | test file_index_at_offset |
test file_index_at_offset../test/test_file_storage.cpp:217 | ||
relevance 0 | ../test/test_file_storage.cpp:218 | test file attributes |
test file attributes../test/test_file_storage.cpp:218 | ||
relevance 0 | ../test/test_file_storage.cpp:219 | test symlinks |
test symlinks../test/test_file_storage.cpp:219 | ||
relevance 0 | ../test/test_file_storage.cpp:220 | test pad_files |
test pad_files../test/test_file_storage.cpp:220 | ||
relevance 0 | ../test/test_file_storage.cpp:221 | test reorder_file (make sure internal_file_entry::swap() is used) |
test reorder_file (make sure internal_file_entry::swap() is used)../test/test_file_storage.cpp:221 TEST_EQUAL(rq.start, 298);
+ | ||
relevance 0 | ../test/test_file_storage.cpp:214 | test file_storage::optimize |
test file_storage::optimize../test/test_file_storage.cpp:214 | ||
relevance 0 | ../test/test_file_storage.cpp:215 | test map_block |
test map_block../test/test_file_storage.cpp:215 | ||
relevance 0 | ../test/test_file_storage.cpp:216 | test piece_size(int piece) |
test piece_size(int piece)../test/test_file_storage.cpp:216 | ||
relevance 0 | ../test/test_file_storage.cpp:217 | test file_index_at_offset |
test file_index_at_offset../test/test_file_storage.cpp:217 | ||
relevance 0 | ../test/test_file_storage.cpp:218 | test file attributes |
test file attributes../test/test_file_storage.cpp:218 | ||
relevance 0 | ../test/test_file_storage.cpp:219 | test symlinks |
test symlinks../test/test_file_storage.cpp:219 | ||
relevance 0 | ../test/test_file_storage.cpp:220 | test pad_files |
test pad_files../test/test_file_storage.cpp:220 | ||
relevance 0 | ../test/test_file_storage.cpp:221 | test reorder_file (make sure internal_file_entry::swap() is used) |
test reorder_file (make sure internal_file_entry::swap() is used)../test/test_file_storage.cpp:221 TEST_EQUAL(rq.start, 298);
TEST_EQUAL(rq.length, 841);
}
@@ -4160,59 +3951,7 @@ TORRENT_TEST(file_path_hash)
}
- | ||
relevance 0 | ../test/test_metadata_extension.cpp:114 | it would be nice to test reversing which session is making the connection as well |
it would be nice to test reversing
-which session is making the connection as well../test/test_metadata_extension.cpp:114 pack.set_int(settings_pack::out_enc_policy, settings_pack::pe_forced);
- pack.set_int(settings_pack::in_enc_policy, settings_pack::pe_forced);
- pack.set_bool(settings_pack::prefer_rc4, flags & full_encryption);
-
- if (flags & utp)
- {
- pack.set_bool(settings_pack::utp_dynamic_sock_buf, true);
- pack.set_bool(settings_pack::enable_incoming_utp, true);
- pack.set_bool(settings_pack::enable_outgoing_utp, true);
- pack.set_bool(settings_pack::enable_incoming_tcp, false);
- pack.set_bool(settings_pack::enable_outgoing_tcp, false);
- }
- else
- {
- pack.set_bool(settings_pack::enable_incoming_utp, false);
- pack.set_bool(settings_pack::enable_outgoing_utp, false);
- pack.set_bool(settings_pack::enable_incoming_tcp, true);
- pack.set_bool(settings_pack::enable_outgoing_tcp, true);
- }
-
- pack.set_str(settings_pack::listen_interfaces, "0.0.0.0:48100");
- lt::session ses1(pack);
- pack.set_str(settings_pack::listen_interfaces, "0.0.0.0:49100");
- lt::session ses2(pack);
- ses1.add_extension(constructor);
- ses2.add_extension(constructor);
- torrent_handle tor1;
- torrent_handle tor2;
-
- lt::session* downloader = &ses2;
- lt::session* seed = &ses1;
-
- boost::tie(tor1, tor2, ignore) = setup_transfer(seed, downloader, NULL
- , flags & clear_files, true, false, "_meta");
-
- if (flags & upload_only)
- {
- tor2.set_upload_mode(true);
- }
-
- if (flags & reverse)
- {
- error_code ec;
- int port = seed->listen_port();
- fprintf(stderr, "%s: downloader: connecting peer port: %d\n"
- , time_now_string(), port);
- tor2.connect_peer(tcp::endpoint(address::from_string("127.0.0.1", ec)
- , port));
- }
- else
- {
- | ||
relevance 0 | ../test/test_peer_list.cpp:944 | test erasing peers |
test erasing peers../test/test_peer_list.cpp:944 | ||
relevance 0 | ../test/test_peer_list.cpp:945 | test update_peer_port with allow_multiple_connections_per_ip and without |
test update_peer_port with allow_multiple_connections_per_ip and without../test/test_peer_list.cpp:945 | ||
relevance 0 | ../test/test_peer_list.cpp:946 | test add i2p peers |
test add i2p peers../test/test_peer_list.cpp:946 | ||
relevance 0 | ../test/test_peer_list.cpp:947 | test allow_i2p_mixed |
test allow_i2p_mixed../test/test_peer_list.cpp:947 | ||
relevance 0 | ../test/test_peer_list.cpp:948 | test insert_peer failing with all error conditions |
test insert_peer failing with all error conditions../test/test_peer_list.cpp:948 | ||
relevance 0 | ../test/test_peer_list.cpp:949 | test IPv6 |
test IPv6../test/test_peer_list.cpp:949 | ||
relevance 0 | ../test/test_peer_list.cpp:950 | test connect_to_peer() failing |
test connect_to_peer() failing../test/test_peer_list.cpp:950 | ||
relevance 0 | ../test/test_peer_list.cpp:951 | test connection_closed |
test connection_closed../test/test_peer_list.cpp:951 | ||
relevance 0 | ../test/test_peer_list.cpp:952 | connect candidates recalculation when incrementing failcount |
connect candidates recalculation when incrementing failcount../test/test_peer_list.cpp:952 torrent_peer* peer4 = add_peer(p, st, ep("10.0.0.4", 8080));
+ | ||
relevance 0 | ../test/test_peer_list.cpp:939 | test erasing peers |
test erasing peers../test/test_peer_list.cpp:939 | ||
relevance 0 | ../test/test_peer_list.cpp:940 | test update_peer_port with allow_multiple_connections_per_ip and without |
test update_peer_port with allow_multiple_connections_per_ip and without../test/test_peer_list.cpp:940 | ||
relevance 0 | ../test/test_peer_list.cpp:941 | test add i2p peers |
test add i2p peers../test/test_peer_list.cpp:941 | ||
relevance 0 | ../test/test_peer_list.cpp:942 | test allow_i2p_mixed |
test allow_i2p_mixed../test/test_peer_list.cpp:942 | ||
relevance 0 | ../test/test_peer_list.cpp:943 | test insert_peer failing with all error conditions |
test insert_peer failing with all error conditions../test/test_peer_list.cpp:943 | ||
relevance 0 | ../test/test_peer_list.cpp:944 | test IPv6 |
test IPv6../test/test_peer_list.cpp:944 | ||
relevance 0 | ../test/test_peer_list.cpp:945 | test connect_to_peer() failing |
test connect_to_peer() failing../test/test_peer_list.cpp:945 | ||
relevance 0 | ../test/test_peer_list.cpp:946 | test connection_closed |
test connection_closed../test/test_peer_list.cpp:946 | ||
relevance 0 | ../test/test_peer_list.cpp:947 | connect candidates recalculation when incrementing failcount |
connect candidates recalculation when incrementing failcount../test/test_peer_list.cpp:947 torrent_peer* peer4 = add_peer(p, st, ep("10.0.0.4", 8080));
TEST_CHECK(peer4);
TEST_EQUAL(p.num_peers(), 4);
torrent_peer* peer5 = add_peer(p, st, ep("10.0.0.5", 8080));
@@ -4233,7 +3972,7 @@ which session is making the connection as well../test/test_metadata_ext
}
- | ||
relevance 0 | ../test/test_resolve_links.cpp:80 | test files with different piece size (negative test) |
test files with different piece size (negative test)../test/test_resolve_links.cpp:80 { "test2", "test1_pad_files", 0},
+ | ||
relevance 0 | ../test/test_resolve_links.cpp:80 | test files with different piece size (negative test) |
test files with different piece size (negative test)../test/test_resolve_links.cpp:80 { "test2", "test1_pad_files", 0},
{ "test3", "test1_pad_files", 0},
{ "test2", "test1_single", 0},
@@ -4255,7 +3994,7 @@ which session is making the connection as well../test/test_metadata_ext
};
- | ||
relevance 0 | ../test/test_resolve_links.cpp:83 | it would be nice to test resolving of more than just 2 files as well. like 3 single file torrents merged into one, resolving all 3 files. |
it would be nice to test resolving of more than just 2 files as well.
+ | ||
relevance 0 | ../test/test_resolve_links.cpp:83 | it would be nice to test resolving of more than just 2 files as well. like 3 single file torrents merged into one, resolving all 3 files. |
it would be nice to test resolving of more than just 2 files as well.
like 3 single file torrents merged into one, resolving all 3 files.../test/test_resolve_links.cpp:83 { "test2", "test1_single", 0},
// these are all padded. The first small file will accidentally also
@@ -4284,7 +4023,7 @@ like 3 single file torrents merged into one, resolving all 3 files.../t
std::string path = combine_path(parent_path(current_working_directory())
, "mutable_test_torrents");
- for (int i = 0; i < sizeof(test_torrents)/sizeof(test_torrents[0]); ++i)
+ for (int i = 0; i < int(sizeof(test_torrents)/sizeof(test_torrents[0])); ++i)
{
test_torrent_t const& e = test_torrents[i];
@@ -4307,10 +4046,10 @@ like 3 single file torrents merged into one, resolving all 3 files.../t
// some debug output in case the test fails
if (num_matches > e.expected_matches)
- | ||
relevance 0 | ../test/test_resume.cpp:230 | test what happens when loading a resume file with both piece priorities and file priorities (file prio should take presedence) |
test what happens when loading a resume file with both piece priorities
-and file priorities (file prio should take presedence)../test/test_resume.cpp:230 fprintf(stderr, "%s\n", ra->resume_data->to_string().c_str());
+ | ||
relevance 0 | ../test/test_resume.cpp:232 | test what happens when loading a resume file with both piece priorities and file priorities (file prio should take presedence) |
test what happens when loading a resume file with both piece priorities
+and file priorities (file prio should take presedence)../test/test_resume.cpp:232 fprintf(stderr, "%s\n", ra->resume_data->to_string().c_str());
entry::string_type prios = (*ra->resume_data)["piece_priority"].string();
- TEST_EQUAL(prios.size(), ti->num_pieces());
+ TEST_EQUAL(int(prios.size()), ti->num_pieces());
TEST_EQUAL(prios[0], '\0');
TEST_EQUAL(prios[1], '\x04');
TEST_EQUAL(prios[ti->num_pieces()-1], '\0');
@@ -4329,9 +4068,9 @@ and file priorities (file prio should take presedence)../test/test_resu
}
- | ||
relevance 0 | ../test/test_resume.cpp:233 | make sure a resume file only ever contain file priorities OR piece priorities. Never both. |
make sure a resume file only ever contain file priorities OR piece
-priorities. Never both.../test/test_resume.cpp:233 entry::string_type prios = (*ra->resume_data)["piece_priority"].string();
- TEST_EQUAL(prios.size(), ti->num_pieces());
+ | ||
relevance 0 | ../test/test_resume.cpp:235 | make sure a resume file only ever contain file priorities OR piece priorities. Never both. |
make sure a resume file only ever contain file priorities OR piece
+priorities. Never both.../test/test_resume.cpp:235 entry::string_type prios = (*ra->resume_data)["piece_priority"].string();
+ TEST_EQUAL(int(prios.size()), ti->num_pieces());
TEST_EQUAL(prios[0], '\0');
TEST_EQUAL(prios[1], '\x04');
TEST_EQUAL(prios[ti->num_pieces()-1], '\0');
@@ -4351,7 +4090,7 @@ priorities. Never both.../test/test_resume.cpp:233
- | ||
relevance 0 | ../test/test_resume.cpp:236 | generally save |
generally save../test/test_resume.cpp:236 TEST_EQUAL(prios.size(), ti->num_pieces());
+ | ||
relevance 0 | ../test/test_resume.cpp:238 | generally save |
generally save../test/test_resume.cpp:238 TEST_EQUAL(int(prios.size()), ti->num_pieces());
TEST_EQUAL(prios[0], '\0');
TEST_EQUAL(prios[1], '\x04');
TEST_EQUAL(prios[ti->num_pieces()-1], '\0');
@@ -4374,7 +4113,7 @@ priorities. Never both.../test/test_resume.cpp:233
TORRENT_TEST(file_priorities_default)
{
- libtorrent::session ses;
+ lt::session ses;
std::vector<int> file_priorities = test_resume_flags(ses, 0, "", "").file_priorities();
TEST_EQUAL(file_priorities.size(), 3);
@@ -4386,7 +4125,7 @@ priorities. Never both.../test/test_resume.cpp:233 | ||
relevance 0 | ../test/test_resume.cpp:648 | test all other resume flags here too. This would require returning more than just the torrent_status from test_resume_flags. Also http seeds and trackers for instance |
test all other resume flags here too. This would require returning
+ | ||
relevance 0 | ../test/test_resume.cpp:695 | test all other resume flags here too. This would require returning more than just the torrent_status from test_resume_flags. Also http seeds and trackers for instance |
test all other resume flags here too. This would require returning
more than just the torrent_status from test_resume_flags. Also http seeds
-and trackers for instance../test/test_resume.cpp:648 libtorrent::session ses;
+and trackers for instance../test/test_resume.cpp:695 lt::session ses;
// resume data overrides the paused flag
torrent_status s = test_resume_flags(ses, add_torrent_params::flag_paused).status();
default_tests(s);
@@ -4426,8 +4165,57 @@ and trackers for instance../test/test_resume.cpp:648}
+TORRENT_TEST(url_seed_resume_data)
+{
+ // merge url seeds with resume data
+ fprintf(stderr, "flags: merge_resume_http_seeds\n");
+ lt::session ses;
+ torrent_handle h = test_resume_flags(ses,
+ add_torrent_params::flag_merge_resume_http_seeds);
+ std::set<std::string> us = h.url_seeds();
+ std::set<std::string> ws = h.http_seeds();
- | ||
relevance 0 | ../test/test_ssl.cpp:378 | test using a signed certificate with the wrong info-hash in DN |
test using a signed certificate with the wrong info-hash in DN../test/test_ssl.cpp:378 // in verifying peers
+ TEST_EQUAL(us.size(), 3);
+ TEST_EQUAL(std::count(us.begin(), us.end()
+ , "http://add_torrent_params_url_seed.com"), 1);
+ TEST_EQUAL(std::count(us.begin(), us.end()
+ , "http://torrent_file_url_seed.com/"), 1);
+ TEST_EQUAL(std::count(us.begin(), us.end()
+ , "http://resume_data_url_seed.com/"), 1);
+
+ TEST_EQUAL(ws.size(), 1);
+ TEST_EQUAL(std::count(ws.begin(), ws.end()
+ , "http://resume_data_http_seed.com"), 1);
+}
+
+TORRENT_TEST(resume_override_torrent)
+{
+ // resume data overrides the .torrent_file
+ fprintf(stderr, "flags: no merge_resume_http_seed\n");
+ lt::session ses;
+ torrent_handle h = test_resume_flags(ses,
+ | ||
relevance 0 | ../test/test_settings_pack.cpp:140 | load_pack_from_dict |
load_pack_from_dict../test/test_settings_pack.cpp:140 TEST_EQUAL(pack.get_bool(settings_pack::send_redundant_have), true);
+
+ pack.clear();
+
+ TEST_EQUAL(pack.has_val(settings_pack::send_redundant_have), false);
+ TEST_EQUAL(pack.has_val(settings_pack::user_agent), false);
+ TEST_EQUAL(pack.has_val(settings_pack::lazy_bitfields), false);
+}
+
+TORRENT_TEST(duplicates)
+{
+ settings_pack p;
+ p.set_str(settings_pack::peer_fingerprint, "abc");
+ p.set_str(settings_pack::peer_fingerprint, "cde");
+ p.set_str(settings_pack::peer_fingerprint, "efg");
+ p.set_str(settings_pack::peer_fingerprint, "hij");
+
+ TEST_EQUAL(p.get_str(settings_pack::peer_fingerprint), "hij");
+}
+
+
+ | ||
relevance 0 | ../test/test_ssl.cpp:385 | test using a signed certificate with the wrong info-hash in DN |
test using a signed certificate with the wrong info-hash in DN../test/test_ssl.cpp:385 // in verifying peers
ctx.set_verify_mode(context::verify_none, ec);
if (ec)
{
@@ -4478,8 +4266,8 @@ and trackers for instance../test/test_resume.cpp:648 | ||
relevance 0 | ../test/test_ssl.cpp:476 | also test using a hash that refers to a valid torrent but that differs from the SNI hash |
also test using a hash that refers to a valid torrent
-but that differs from the SNI hash../test/test_ssl.cpp:476 print_alerts(ses1, "ses1", true, true, true, &on_alert);
+ | ||
relevance 0 | ../test/test_ssl.cpp:483 | also test using a hash that refers to a valid torrent but that differs from the SNI hash |
also test using a hash that refers to a valid torrent
+but that differs from the SNI hash../test/test_ssl.cpp:483 print_alerts(ses1, "ses1", true, true, true, &on_alert);
if (ec)
{
fprintf(stderr, "Failed SSL handshake: %s\n"
@@ -4530,7 +4318,7 @@ but that differs from the SNI hash../test/test_ssl.cpp:476 | ||
relevance 0 | ../test/test_timestamp_history.cpp:54 | test the case where we have > 120 samples (and have the base delay actually be updated) |
test the case where we have > 120 samples (and have the base delay actually be updated)../test/test_timestamp_history.cpp:54 | ||
relevance 0 | ../test/test_timestamp_history.cpp:55 | test the case where a sample is lower than the history entry but not lower than the base |
test the case where a sample is lower than the history entry but not lower than the base../test/test_timestamp_history.cpp:55#include "libtorrent/timestamp_history.hpp"
+ | ||
relevance 0 | ../test/test_timestamp_history.cpp:54 | test the case where we have > 120 samples (and have the base delay actually be updated) |
test the case where we have > 120 samples (and have the base delay actually be updated)../test/test_timestamp_history.cpp:54 | ||
relevance 0 | ../test/test_timestamp_history.cpp:55 | test the case where a sample is lower than the history entry but not lower than the base |
test the case where a sample is lower than the history entry but not lower than the base../test/test_timestamp_history.cpp:55#include "libtorrent/timestamp_history.hpp"
TORRENT_TEST(timestamp_history)
{
@@ -4552,7 +4340,7 @@ TORRENT_TEST(timestamp_history)
}
- | ||
relevance 0 | ../test/test_torrent.cpp:135 | wait for an alert rather than just waiting 10 seconds. This is kind of silly |
wait for an alert rather than just waiting 10 seconds. This is kind of silly../test/test_torrent.cpp:135 TEST_EQUAL(h.file_priorities().size(), info->num_files());
+ | ||
relevance 0 | ../test/test_torrent.cpp:135 | wait for an alert rather than just waiting 10 seconds. This is kind of silly |
wait for an alert rather than just waiting 10 seconds. This is kind of silly../test/test_torrent.cpp:135 TEST_EQUAL(int(h.file_priorities().size()), info->num_files());
TEST_EQUAL(h.file_priorities()[0], 0);
if (info->num_files() > 1)
TEST_EQUAL(h.file_priorities()[1], 0);
@@ -4593,17 +4381,17 @@ TORRENT_TEST(timestamp_history)
}
}
-TORRENT_TEST(torrent)
+TORRENT_TEST(long_names)
{
-/* {
- remove("test_torrent_dir2/tmp1");
- remove("test_torrent_dir2/tmp2");
- remove("test_torrent_dir2/tmp3");
- file_storage fs;
- boost::int64_t file_size = 256 * 1024;
- fs.add_file("test_torrent_dir2/tmp1", file_size);
- fs.add_file("test_torrent_dir2/tmp2", file_size);
- | ||
relevance 0 | ../test/test_torrent_info.cpp:155 | test remap_files |
test remap_files../test/test_torrent_info.cpp:155 | ||
relevance 0 | ../test/test_torrent_info.cpp:156 | merkle torrents. specifically torrent_info::add_merkle_nodes and torrent with "root hash" |
merkle torrents. specifically torrent_info::add_merkle_nodes and torrent with "root hash"../test/test_torrent_info.cpp:156 | ||
relevance 0 | ../test/test_torrent_info.cpp:157 | torrent with 'p' (padfile) attribute |
torrent with 'p' (padfile) attribute../test/test_torrent_info.cpp:157 | ||
relevance 0 | ../test/test_torrent_info.cpp:158 | torrent with 'h' (hidden) attribute |
torrent with 'h' (hidden) attribute../test/test_torrent_info.cpp:158 | ||
relevance 0 | ../test/test_torrent_info.cpp:159 | torrent with 'x' (executable) attribute |
torrent with 'x' (executable) attribute../test/test_torrent_info.cpp:159 | ||
relevance 0 | ../test/test_torrent_info.cpp:160 | torrent with 'l' (symlink) attribute |
torrent with 'l' (symlink) attribute../test/test_torrent_info.cpp:160 | ||
relevance 0 | ../test/test_torrent_info.cpp:161 | creating a merkle torrent (torrent_info::build_merkle_list) |
creating a merkle torrent (torrent_info::build_merkle_list)../test/test_torrent_info.cpp:161 | ||
relevance 0 | ../test/test_torrent_info.cpp:162 | torrent with multiple trackers in multiple tiers, making sure we shuffle them (how do you test shuffling?, load it multiple times and make sure it's in different order at least once) |
torrent with multiple trackers in multiple tiers, making sure we shuffle them (how do you test shuffling?, load it multiple times and make sure it's in different order at least once)../test/test_torrent_info.cpp:162 | ||
relevance 0 | ../test/test_torrent_info.cpp:163 | sanitize_append_path_element with all kinds of UTF-8 sequences, including invalid ones |
sanitize_append_path_element with all kinds of UTF-8 sequences, including invalid ones../test/test_torrent_info.cpp:163 | ||
relevance 0 | ../test/test_torrent_info.cpp:164 | torrents with a missing name |
torrents with a missing name../test/test_torrent_info.cpp:164 | ||
relevance 0 | ../test/test_torrent_info.cpp:165 | torrents with a zero-length name |
torrents with a zero-length name../test/test_torrent_info.cpp:165 | ||
relevance 0 | ../test/test_torrent_info.cpp:166 | torrents with a merkle tree and add_merkle_nodes |
torrents with a merkle tree and add_merkle_nodes../test/test_torrent_info.cpp:166 | ||
relevance 0 | ../test/test_torrent_info.cpp:167 | torrent with a non-dictionary info-section |
torrent with a non-dictionary info-section../test/test_torrent_info.cpp:167 | ||
relevance 0 | ../test/test_torrent_info.cpp:168 | torrents with DHT nodes |
torrents with DHT nodes../test/test_torrent_info.cpp:168 | ||
relevance 0 | ../test/test_torrent_info.cpp:169 | torrent with url-list as a single string |
torrent with url-list as a single string../test/test_torrent_info.cpp:169 | ||
relevance 0 | ../test/test_torrent_info.cpp:170 | torrent with http seed as a single string |
torrent with http seed as a single string../test/test_torrent_info.cpp:170 | ||
relevance 0 | ../test/test_torrent_info.cpp:171 | torrent with a comment |
torrent with a comment../test/test_torrent_info.cpp:171 | ||
relevance 0 | ../test/test_torrent_info.cpp:172 | torrent with an SSL cert |
torrent with an SSL cert../test/test_torrent_info.cpp:172 | ||
relevance 0 | ../test/test_torrent_info.cpp:173 | torrent with attributes (executable and hidden) |
torrent with attributes (executable and hidden)../test/test_torrent_info.cpp:173 | ||
relevance 0 | ../test/test_torrent_info.cpp:174 | torrent_info::add_tracker |
torrent_info::add_tracker../test/test_torrent_info.cpp:174 | ||
relevance 0 | ../test/test_torrent_info.cpp:175 | torrent_info::add_url_seed |
torrent_info::add_url_seed../test/test_torrent_info.cpp:175 | ||
relevance 0 | ../test/test_torrent_info.cpp:176 | torrent_info::add_http_seed |
torrent_info::add_http_seed../test/test_torrent_info.cpp:176 | ||
relevance 0 | ../test/test_torrent_info.cpp:177 | torrent_info::unload |
torrent_info::unload../test/test_torrent_info.cpp:177 | ||
relevance 0 | ../test/test_torrent_info.cpp:178 | torrent_info constructor that takes an invalid bencoded buffer |
torrent_info constructor that takes an invalid bencoded buffer../test/test_torrent_info.cpp:178 | ||
relevance 0 | ../test/test_torrent_info.cpp:179 | verify_encoding with a string that triggers character replacement |
verify_encoding with a string that triggers character replacement../test/test_torrent_info.cpp:179test_failing_torrent_t test_error_torrents[] =
+ entry info;
+ info["pieces"] = "aaaaaaaaaaaaaaaaaaaa";
+ info["name"] = "slightly shorter name, it's kind of sad that people started the trend of incorrectly encoding the regular name field and then adding another one with correct encoding";
+ info["name.utf-8"] = "this is a long ass name in order to try to make make_magnet_uri overflow and hopefully crash. Although, by the time you read this that particular bug should have been fixed";
+ info["piece length"] = 16 * 1024;
+ info["length"] = 3245;
+ entry torrent;
+ torrent["info"] = info;
+ | ||
relevance 0 | ../test/test_torrent_info.cpp:156 | test remap_files |
test remap_files../test/test_torrent_info.cpp:156 | ||
relevance 0 | ../test/test_torrent_info.cpp:157 | merkle torrents. specifically torrent_info::add_merkle_nodes and torrent with "root hash" |
merkle torrents. specifically torrent_info::add_merkle_nodes and torrent with "root hash"../test/test_torrent_info.cpp:157 | ||
relevance 0 | ../test/test_torrent_info.cpp:158 | torrent with 'p' (padfile) attribute |
torrent with 'p' (padfile) attribute../test/test_torrent_info.cpp:158 | ||
relevance 0 | ../test/test_torrent_info.cpp:159 | torrent with 'h' (hidden) attribute |
torrent with 'h' (hidden) attribute../test/test_torrent_info.cpp:159 | ||
relevance 0 | ../test/test_torrent_info.cpp:160 | torrent with 'x' (executable) attribute |
torrent with 'x' (executable) attribute../test/test_torrent_info.cpp:160 | ||
relevance 0 | ../test/test_torrent_info.cpp:161 | torrent with 'l' (symlink) attribute |
torrent with 'l' (symlink) attribute../test/test_torrent_info.cpp:161 | ||
relevance 0 | ../test/test_torrent_info.cpp:162 | creating a merkle torrent (torrent_info::build_merkle_list) |
creating a merkle torrent (torrent_info::build_merkle_list)../test/test_torrent_info.cpp:162 | ||
relevance 0 | ../test/test_torrent_info.cpp:163 | torrent with multiple trackers in multiple tiers, making sure we shuffle them (how do you test shuffling?, load it multiple times and make sure it's in different order at least once) |
torrent with multiple trackers in multiple tiers, making sure we shuffle them (how do you test shuffling?, load it multiple times and make sure it's in different order at least once)../test/test_torrent_info.cpp:163 | ||
relevance 0 | ../test/test_torrent_info.cpp:164 | sanitize_append_path_element with all kinds of UTF-8 sequences, including invalid ones |
sanitize_append_path_element with all kinds of UTF-8 sequences, including invalid ones../test/test_torrent_info.cpp:164 | ||
relevance 0 | ../test/test_torrent_info.cpp:165 | torrents with a missing name |
torrents with a missing name../test/test_torrent_info.cpp:165 | ||
relevance 0 | ../test/test_torrent_info.cpp:166 | torrents with a zero-length name |
torrents with a zero-length name../test/test_torrent_info.cpp:166 | ||
relevance 0 | ../test/test_torrent_info.cpp:167 | torrents with a merkle tree and add_merkle_nodes |
torrents with a merkle tree and add_merkle_nodes../test/test_torrent_info.cpp:167 | ||
relevance 0 | ../test/test_torrent_info.cpp:168 | torrent with a non-dictionary info-section |
torrent with a non-dictionary info-section../test/test_torrent_info.cpp:168 | ||
relevance 0 | ../test/test_torrent_info.cpp:169 | torrents with DHT nodes |
torrents with DHT nodes../test/test_torrent_info.cpp:169 | ||
relevance 0 | ../test/test_torrent_info.cpp:170 | torrent with url-list as a single string |
torrent with url-list as a single string../test/test_torrent_info.cpp:170 | ||
relevance 0 | ../test/test_torrent_info.cpp:171 | torrent with http seed as a single string |
torrent with http seed as a single string../test/test_torrent_info.cpp:171 | ||
relevance 0 | ../test/test_torrent_info.cpp:172 | torrent with a comment |
torrent with a comment../test/test_torrent_info.cpp:172 | ||
relevance 0 | ../test/test_torrent_info.cpp:173 | torrent with an SSL cert |
torrent with an SSL cert../test/test_torrent_info.cpp:173 | ||
relevance 0 | ../test/test_torrent_info.cpp:174 | torrent with attributes (executable and hidden) |
torrent with attributes (executable and hidden)../test/test_torrent_info.cpp:174 | ||
relevance 0 | ../test/test_torrent_info.cpp:175 | torrent_info::add_tracker |
torrent_info::add_tracker../test/test_torrent_info.cpp:175 | ||
relevance 0 | ../test/test_torrent_info.cpp:176 | torrent_info::add_url_seed |
torrent_info::add_url_seed../test/test_torrent_info.cpp:176 | ||
relevance 0 | ../test/test_torrent_info.cpp:177 | torrent_info::add_http_seed |
torrent_info::add_http_seed../test/test_torrent_info.cpp:177 | ||
relevance 0 | ../test/test_torrent_info.cpp:178 | torrent_info::unload |
torrent_info::unload../test/test_torrent_info.cpp:178 | ||
relevance 0 | ../test/test_torrent_info.cpp:179 | torrent_info constructor that takes an invalid bencoded buffer |
torrent_info constructor that takes an invalid bencoded buffer../test/test_torrent_info.cpp:179 | ||
relevance 0 | ../test/test_torrent_info.cpp:180 | verify_encoding with a string that triggers character replacement |
verify_encoding with a string that triggers character replacement../test/test_torrent_info.cpp:180test_failing_torrent_t test_error_torrents[] =
{
{ "missing_piece_len.torrent", errors::torrent_missing_piece_length },
{ "invalid_piece_len.torrent", errors::torrent_missing_piece_length },
@@ -4654,26 +4442,26 @@ TORRENT_TEST(torrent)
"abcdefghi_abcdefghi_abcdefghi_abcdefghi_/"
"abcdefghi_abcdefghi_abcdefghi_abcdefghi_abcdefghi_abcdefghi_abcdefghi_abcdefghi_abcdefghi_abcdefghi_"
"abcdefghi_abcdefghi_abcdefghi_abcdefghi_abcdefghi_abcdefghi_abcdefghi_abcdefghi_abcdefghi_abcdefghi_"
- | ||
relevance 0 | ../test/test_tracker.cpp:47 | test scrape requests |
test scrape requests../test/test_tracker.cpp:47 | ||
relevance 0 | ../test/test_tracker.cpp:48 | test parse peers6 |
test parse peers6../test/test_tracker.cpp:48 | ||
relevance 0 | ../test/test_tracker.cpp:49 | test parse tracker-id |
test parse tracker-id../test/test_tracker.cpp:49 | ||
relevance 0 | ../test/test_tracker.cpp:50 | test parse failure-reason |
test parse failure-reason../test/test_tracker.cpp:50 | ||
relevance 0 | ../test/test_tracker.cpp:51 | test all failure paths, including invalid bencoding not a dictionary no files entry in scrape response no info-hash entry in scrape response malformed peers in peer list of dictionaries uneven number of bytes in peers and peers6 string responses |
test all failure paths, including
+ | ||
relevance 0 | ../test/test_tracker.cpp:53 | test scrape requests |
test scrape requests../test/test_tracker.cpp:53 | ||
relevance 0 | ../test/test_tracker.cpp:54 | test parse peers6 |
test parse peers6../test/test_tracker.cpp:54 | ||
relevance 0 | ../test/test_tracker.cpp:55 | test parse tracker-id |
test parse tracker-id../test/test_tracker.cpp:55 | ||
relevance 0 | ../test/test_tracker.cpp:56 | test parse failure-reason |
test parse failure-reason../test/test_tracker.cpp:56 | ||
relevance 0 | ../test/test_tracker.cpp:57 | test all failure paths, including invalid bencoding not a dictionary no files entry in scrape response no info-hash entry in scrape response malformed peers in peer list of dictionaries uneven number of bytes in peers and peers6 string responses |
test all failure paths, including
invalid bencoding
not a dictionary
no files entry in scrape response
no info-hash entry in scrape response
malformed peers in peer list of dictionaries
-uneven number of bytes in peers and peers6 string responses../test/test_tracker.cpp:51CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGE.
-
-*/
-
-#include "test.hpp"
+uneven number of bytes in peers and peers6 string responses../test/test_tracker.cpp:57#include "test.hpp"
#include "setup_transfer.hpp"
#include "udp_tracker.hpp"
+#include "settings.hpp"
#include "libtorrent/alert.hpp"
+#include "libtorrent/peer_info.hpp" // for peer_list_entry
+#include "libtorrent/broadcast_socket.hpp" // for supports_ipv6
+#include "libtorrent/alert_types.hpp"
#include "libtorrent/session.hpp"
#include "libtorrent/error_code.hpp"
#include "libtorrent/tracker_manager.hpp"
#include "libtorrent/http_tracker_connection.hpp" // for parse_tracker_response
+#include "libtorrent/torrent_info.hpp"
+#include "libtorrent/announce_entry.hpp"
#include <fstream>
@@ -4711,7 +4499,7 @@ TORRENT_TEST(parse_peers4)
error_code ec;
tracker_response resp = parse_tracker_response(response, sizeof(response) - 1
, ec, false, sha1_hash());
- | ||
relevance 0 | ../test/test_transfer.cpp:297 | factor out the disk-full test into its own unit test |
factor out the disk-full test into its own unit test../test/test_transfer.cpp:297 print_alerts(ses1, "ses1", true, true, true, &on_alert);
+ | ||
relevance 0 | ../test/test_transfer.cpp:298 | factor out the disk-full test into its own unit test |
factor out the disk-full test into its own unit test../test/test_transfer.cpp:298 print_alerts(ses1, "ses1", true, true, true, &on_alert);
print_alerts(ses2, "ses2", true, true, true, &on_alert);
if (i % 10 == 0)
@@ -4762,7 +4550,7 @@ TORRENT_TEST(parse_peers4)
TEST_CHECK(tor2.status().is_finished == false);
fprintf(stderr, "disconnects: %d\n", peer_disconnects);
TEST_CHECK(peer_disconnects >= 2);
- | ||
relevance 0 | ../test/test_upnp.cpp:108 | store the log and verify that some key messages are there |
store the log and verify that some key messages are there../test/test_upnp.cpp:108 "USN:uuid:000f-66d6-7296000099dc::upnp:rootdevice\r\n"
+ | ||
relevance 0 | ../test/test_upnp.cpp:108 | store the log and verify that some key messages are there |
store the log and verify that some key messages are there../test/test_upnp.cpp:108 "USN:uuid:000f-66d6-7296000099dc::upnp:rootdevice\r\n"
"Location: http://127.0.0.1:%d/upnp.xml\r\n"
"Server: Custom/1.0 UPnP/1.0 Proc/Ver\r\n"
"EXT:\r\n"
@@ -4813,7 +4601,7 @@ void run_upnp_test(char const* root_filename, char const* router_model, char con
error_code ec;
load_file(root_filename, buf, ec);
buf.push_back(0);
- | ||
relevance 0 | ../test/web_seed_suite.cpp:369 | file hashes don't work with the new torrent creator reading async |
file hashes don't work with the new torrent creator reading async../test/web_seed_suite.cpp:369 // corrupt the files now, so that the web seed will be banned
+ | ||
relevance 0 | ../test/web_seed_suite.cpp:374 | file hashes don't work with the new torrent creator reading async |
file hashes don't work with the new torrent creator reading async../test/web_seed_suite.cpp:374 // corrupt the files now, so that the web seed will be banned
if (test_url_seed)
{
create_random_files(combine_path(save_path, "torrent_dir"), file_sizes, sizeof(file_sizes)/sizeof(file_sizes[0]));
@@ -4864,10 +4652,10 @@ void run_upnp_test(char const* root_filename, char const* router_model, char con
pack.set_bool(settings_pack::enable_lsd, false);
pack.set_bool(settings_pack::enable_natpmp, false);
pack.set_bool(settings_pack::enable_upnp, false);
- | ||
relevance 0 | ../src/block_cache.cpp:959 | it's somewhat expensive to iterate over this linked list. Presumably because of the random access of memory. It would be nice if pieces with no evictable blocks weren't in this list |
it's somewhat expensive
+ | ||
relevance 0 | ../src/block_cache.cpp:987 | it's somewhat expensive to iterate over this linked list. Presumably because of the random access of memory. It would be nice if pieces with no evictable blocks weren't in this list |
it's somewhat expensive
to iterate over this linked list. Presumably because of the random
access of memory. It would be nice if pieces with no evictable blocks
-weren't in this list../src/block_cache.cpp:959 }
+weren't in this list../src/block_cache.cpp:987 }
else if (m_last_cache_op == ghost_hit_lru1)
{
// when we insert new items or move things from L1 to L2
@@ -4887,7 +4675,7 @@ weren't in this list../src/block_cache.cpp:959 for (list_iterator i = lru_list[end]->iterate(); i.get() && num > 0;)
+ for (list_iterator<cached_piece_entry> i = lru_list[end]->iterate(); i.get() && num > 0;)
{
cached_piece_entry* pe = reinterpret_cast<cached_piece_entry*>(i.get());
TORRENT_PIECE_ASSERT(pe->in_use, pe);
@@ -4918,7 +4706,7 @@ weren't in this list../src/block_cache.cpp:959relevance 0 | ../src/block_cache.cpp:1023 | this should probably only be done every n:th time |
|
this should probably only be done every n:th time../src/block_cache.cpp:1023 }
+ | ||
relevance 0 | ../src/block_cache.cpp:1051 | this should probably only be done every n:th time |
this should probably only be done every n:th time../src/block_cache.cpp:1051 }
if (pe->ok_to_evict())
{
@@ -4942,7 +4730,7 @@ weren't in this list../src/block_cache.cpp:959 | ||
relevance 0 | ../src/block_cache.cpp:1778 | create a holder for refcounts that automatically decrement |
create a holder for refcounts that automatically decrement../src/block_cache.cpp:1778 }
+ | ||
relevance 0 | ../src/block_cache.cpp:1810 | create a holder for refcounts that automatically decrement |
create a holder for refcounts that automatically decrement../src/block_cache.cpp:1810 }
j->buffer.disk_block = allocate_buffer("send buffer");
if (j->buffer.disk_block == 0) return -2;
@@ -5020,7 +4808,7 @@ bool block_cache::maybe_free_piece(cached_piece_entry* pe)
boost::shared_ptr<piece_manager> s = pe->storage;
- | ||
relevance 0 | ../src/bt_peer_connection.cpp:679 | this could be optimized using knuth morris pratt |
this could be optimized using knuth morris pratt../src/bt_peer_connection.cpp:679 }
+ | ||
relevance 0 | ../src/bt_peer_connection.cpp:691 | this could be optimized using knuth morris pratt |
this could be optimized using knuth morris pratt../src/bt_peer_connection.cpp:691 }
m_rc4->set_incoming_key(&remote_key[0], 20);
m_rc4->set_outgoing_key(&local_key[0], 20);
@@ -5071,13 +4859,13 @@ bool block_cache::maybe_free_piece(cached_piece_entry* pe)
// }
// no complete sync
- | ||
relevance 0 | ../src/bt_peer_connection.cpp:2249 | if we're finished, send upload_only message |
if we're finished, send upload_only message../src/bt_peer_connection.cpp:2249 }
+ | ||
relevance 0 | ../src/bt_peer_connection.cpp:2261 | if we're finished, send upload_only message |
if we're finished, send upload_only message../src/bt_peer_connection.cpp:2261 }
peer_log(peer_log_alert::outgoing_message, "BITFIELD"
, "%s", bitfield_string.c_str());
#endif
m_sent_bitfield = true;
- send_buffer(msg, packet_size);
+ send_buffer(reinterpret_cast<char const*>(msg), packet_size);
stats_counters().inc_stats_counter(counters::num_outgoing_bitfield);
@@ -5122,7 +4910,7 @@ bool block_cache::maybe_free_piece(cached_piece_entry* pe)
? m_settings.get_str(settings_pack::user_agent)
: m_settings.get_str(settings_pack::handshake_client_version);
}
- | ||
relevance 0 | ../src/choker.cpp:336 | optimize this using partial_sort or something. We don't need to sort the entire list |
optimize this using partial_sort or something. We don't need
+ | ||
relevance 0 | ../src/choker.cpp:336 | optimize this using partial_sort or something. We don't need to sort the entire list |
optimize this using partial_sort or something. We don't need
to sort the entire list../src/choker.cpp:336 return upload_slots;
}
@@ -5144,7 +4932,7 @@ to sort the entire list../src/choker.cpp:336
- | ||
relevance 0 | ../src/choker.cpp:339 | make the comparison function a free function and move it into this cpp file |
make the comparison function a free function and move it
+ | ||
relevance 0 | ../src/choker.cpp:339 | make the comparison function a free function and move it into this cpp file |
make the comparison function a free function and move it
into this cpp file../src/choker.cpp:339 }
// ==== rate-based ====
@@ -5168,7 +4956,7 @@ into this cpp file../src/choker.cpp:339 std::sort(peers.begin(), peers.end()
, boost::bind(&upload_rate_compare, _1, _2));
- | ||
relevance 0 | ../src/choker.cpp:344 | make configurable |
make configurable../src/choker.cpp:344 //
+ | ||
relevance 0 | ../src/choker.cpp:344 | make configurable |
make configurable../src/choker.cpp:344 //
// The rate based unchoker looks at our upload rate to peers, and find
// a balance between number of upload slots and the rate we achieve. The
// intention is to not spread upload bandwidth too thin, but also to not
@@ -5201,7 +4989,7 @@ into this cpp file../src/choker.cpp:339relevance 0 | ../src/choker.cpp:358 | make configurable |
|
make configurable../src/choker.cpp:358 // it purely based on the current state of our peers.
+ | ||
relevance 0 | ../src/choker.cpp:358 | make configurable |
make configurable../src/choker.cpp:358 // it purely based on the current state of our peers.
upload_slots = 0;
@@ -5252,7 +5040,7 @@ into this cpp file../src/choker.cpp:339relevance 0 | ../src/create_torrent.cpp:285 | this should probably be optional |
|
this should probably be optional../src/create_torrent.cpp:285 counters cnt;
+ | ||
relevance 0 | ../src/create_torrent.cpp:286 | this should probably be optional |
this should probably be optional../src/create_torrent.cpp:286 counters cnt;
disk_io_thread disk_thread(ios, cnt, 0);
disk_thread.set_num_threads(1);
@@ -5266,7 +5054,7 @@ into this cpp file../src/choker.cpp:339../src/choker.cpp:339relevance 0 | ../src/disk_buffer_pool.cpp:319 | perhaps we should sort the buffers here? |
|
perhaps we should sort the buffers here?../src/disk_buffer_pool.cpp:319 mutex::scoped_lock l(m_pool_mutex);
+ | ||
relevance 0 | ../src/disk_buffer_pool.cpp:340 | perhaps we should sort the buffers here? |
perhaps we should sort the buffers here?../src/disk_buffer_pool.cpp:340 mutex::scoped_lock l(m_pool_mutex);
for (int i = 0; i < iov_len; ++i)
{
iov[i].iov_base = allocate_buffer_impl(l, "pending read");
@@ -5314,7 +5102,7 @@ into this cpp file../src/choker.cpp:339 mutex::scoped_lock l(m_pool_mutex);
for (int i = 0; i < iov_len; ++i)
- free_buffer_impl((char*)iov[i].iov_base, l);
+ free_buffer_impl(static_cast<char*>(iov[i].iov_base), l);
check_buffer_level(l);
}
@@ -5354,8 +5142,8 @@ into this cpp file../src/choker.cpp:339relevance 0 | ../src/disk_io_thread.cpp:884 | it would be nice to optimize this by having the cache pieces also ordered by |
|
it would be nice to optimize this by having the cache
-pieces also ordered by../src/disk_io_thread.cpp:884 // from disk_io_thread::do_delete, which is a fence job and should
+ | ||
relevance 0 | ../src/disk_io_thread.cpp:889 | it would be nice to optimize this by having the cache pieces also ordered by |
it would be nice to optimize this by having the cache
+pieces also ordered by../src/disk_io_thread.cpp:889 // from disk_io_thread::do_delete, which is a fence job and should
// have any other jobs active, i.e. there should not be any references
// keeping pieces or blocks alive
if ((flags & flush_delete_cache) && (flags & flush_expect_clear))
@@ -5397,29 +5185,29 @@ pieces also ordered by../src/disk_io_thread.cpp:884 | ||
relevance 0 | ../src/disk_io_thread.cpp:927 | instead of doing a lookup each time through the loop, save cached_piece_entry pointers with piece_refcount incremented to pin them |
instead of doing a lookup each time through the loop, save
-cached_piece_entry pointers with piece_refcount incremented to pin them../src/disk_io_thread.cpp:927 // this is why we pass in 1 as cont_block to the flushing functions
- void disk_io_thread::try_flush_write_blocks(int num, tailqueue& completed_jobs
+ | ||
relevance 0 | ../src/disk_io_thread.cpp:932 | instead of doing a lookup each time through the loop, save cached_piece_entry pointers with piece_refcount incremented to pin them |
instead of doing a lookup each time through the loop, save
+cached_piece_entry pointers with piece_refcount incremented to pin them../src/disk_io_thread.cpp:932 // this is why we pass in 1 as cont_block to the flushing functions
+ void disk_io_thread::try_flush_write_blocks(int num, jobqueue_t& completed_jobs
, mutex::scoped_lock& l)
{
DLOG("try_flush_write_blocks: %d\n", num);
- list_iterator range = m_disk_cache.write_lru_pieces();
+ list_iterator<cached_piece_entry> range = m_disk_cache.write_lru_pieces();
std::vector<std::pair<piece_manager*, int> > pieces;
pieces.reserve(m_disk_cache.num_write_lru_pieces());
- for (list_iterator p = range; p.get() && num > 0; p.next())
+ for (list_iterator<cached_piece_entry> p = range; p.get() && num > 0; p.next())
{
- cached_piece_entry* e = (cached_piece_entry*)p.get();
+ cached_piece_entry* e = p.get();
if (e->num_dirty == 0) continue;
pieces.push_back(std::make_pair(e->storage.get(), int(e->piece)));
}
@@ -5445,7 +5233,7 @@ cached_piece_entry pointers with piece_refcount incremented to pin them
// when the write cache is under high pressure, it is likely
// counter productive to actually do this, since a piece may
- // not have had its flush_hashed job run on it
+ // not have had its flush_hashed job run on it
// so only do it if no other thread is currently flushing
if (num == 0 || m_stats_counters[counters::num_writing_threads] > 0) return;
@@ -5458,10 +5246,10 @@ cached_piece_entry pointers with piece_refcount incremented to pin them
cached_piece_entry* pe = m_disk_cache.find_piece(i->first, i->second);
if (pe == NULL) continue;
if (pe->num_dirty == 0) continue;
- | ||
relevance 0 | ../src/disk_io_thread.cpp:1106 | instead of doing this. pass in the settings to each storage_interface call. Each disk thread could hold its most recent understanding of the settings in a shared_ptr, and update it every time it wakes up from a job. That way each access to the settings won't require a mutex to be held. |
instead of doing this. pass in the settings to each storage_interface
+ | ||
relevance 0 | ../src/disk_io_thread.cpp:1111 | instead of doing this. pass in the settings to each storage_interface call. Each disk thread could hold its most recent understanding of the settings in a shared_ptr, and update it every time it wakes up from a job. That way each access to the settings won't require a mutex to be held. |
instead of doing this. pass in the settings to each storage_interface
call. Each disk thread could hold its most recent understanding of the settings
in a shared_ptr, and update it every time it wakes up from a job. That way
-each access to the settings won't require a mutex to be held.../src/disk_io_thread.cpp:1106 {
+each access to the settings won't require a mutex to be held.../src/disk_io_thread.cpp:1111 {
INVARIANT_CHECK;
TORRENT_ASSERT(j->next == 0);
TORRENT_ASSERT((j->flags & disk_io_job::in_progress) || !j->storage);
@@ -5500,14 +5288,14 @@ each access to the settings won't require a mutex to be held.../src/dis
if (ret == retry_job)
{
- mutex::scoped_lock l(m_job_mutex);
+ mutex::scoped_lock l2(m_job_mutex);
// to avoid busy looping here, give up
// our quanta in case there aren't any other
// jobs to run in between
- | ||
relevance 0 | ../src/disk_io_thread.cpp:1134 | a potentially more efficient solution would be to have a special queue for retry jobs, that's only ever run when a job completes, in any thread. It would only work if counters::num_running_disk_jobs > 0 |
a potentially more efficient solution would be to have a special
+ | ||
relevance 0 | ../src/disk_io_thread.cpp:1139 | a potentially more efficient solution would be to have a special queue for retry jobs, that's only ever run when a job completes, in any thread. It would only work if counters::num_running_disk_jobs > 0 |
a potentially more efficient solution would be to have a special
queue for retry jobs, that's only ever run when a job completes, in
-any thread. It would only work if counters::num_running_disk_jobs > 0../src/disk_io_thread.cpp:1134
+any thread. It would only work if counters::num_running_disk_jobs > 0../src/disk_io_thread.cpp:1139
time_point start_time = clock_type::now();
m_stats_counters.inc_stats_counter(counters::num_running_disk_jobs, 1);
@@ -5522,42 +5310,42 @@ any thread. It would only work if counters::num_running_disk_jobs > 0..
if (ret == retry_job)
{
- mutex::scoped_lock l(m_job_mutex);
+ mutex::scoped_lock l2(m_job_mutex);
// to avoid busy looping here, give up
// our quanta in case there aren't any other
// jobs to run in between
-
+
TORRENT_ASSERT((j->flags & disk_io_job::in_progress) || !j->storage);
-
+
bool need_sleep = m_queued_jobs.empty();
m_queued_jobs.push_back(j);
- l.unlock();
+ l2.unlock();
if (need_sleep) sleep(0);
return;
}
-#if TORRENT_USE_ASSERT
- | ||
relevance 0 | ../src/disk_io_thread.cpp:1148 | it should clear the hash state even when there's an error, right? |
it should clear the hash state even when there's an error, right?../src/disk_io_thread.cpp:1148 m_stats_counters.inc_stats_counter(counters::num_running_disk_jobs, -1);
+#if TORRENT_USE_ASSERTS
+ | ||
relevance 0 | ../src/disk_io_thread.cpp:1153 | it should clear the hash state even when there's an error, right? |
it should clear the hash state even when there's an error, right?../src/disk_io_thread.cpp:1153 m_stats_counters.inc_stats_counter(counters::num_running_disk_jobs, -1);
if (ret == retry_job)
{
- mutex::scoped_lock l(m_job_mutex);
+ mutex::scoped_lock l2(m_job_mutex);
// to avoid busy looping here, give up
// our quanta in case there aren't any other
// jobs to run in between
-
+
TORRENT_ASSERT((j->flags & disk_io_job::in_progress) || !j->storage);
-
+
bool need_sleep = m_queued_jobs.empty();
m_queued_jobs.push_back(j);
- l.unlock();
+ l2.unlock();
if (need_sleep) sleep(0);
return;
}
-#if TORRENT_USE_ASSERT
+#if TORRENT_USE_ASSERTS
if (j->action == disk_io_job::hash && !j->error.ec)
{
// a hash job should never return without clearing pe->hash
@@ -5589,8 +5377,8 @@ any thread. It would only work if counters::num_running_disk_jobs > 0..
j->error.operation = storage_error::alloc_cache_piece;
return -1;
}
- | ||
relevance 0 | ../src/disk_io_thread.cpp:1849 | maybe the tailqueue_iterator should contain a pointer-pointer instead and have an unlink function |
maybe the tailqueue_iterator should contain a pointer-pointer
-instead and have an unlink function../src/disk_io_thread.cpp:1849 j->callback = handler;
+ | ||
relevance 0 | ../src/disk_io_thread.cpp:1854 | maybe the tailqueue_iterator should contain a pointer-pointer instead and have an unlink function |
maybe the tailqueue_iterator should contain a pointer-pointer
+instead and have an unlink function../src/disk_io_thread.cpp:1854 j->callback = handler;
add_fence_job(storage, j);
}
@@ -5605,17 +5393,17 @@ instead and have an unlink function../src/disk_io_thread.cpp:1849<
#endif
// remove cache blocks belonging to this torrent
- tailqueue completed_jobs;
+ jobqueue_t completed_jobs;
// remove outstanding jobs belonging to this torrent
mutex::scoped_lock l2(m_job_mutex);
- disk_io_job* qj = (disk_io_job*)m_queued_jobs.get_all();
- tailqueue to_abort;
+ disk_io_job* qj = m_queued_jobs.get_all();
+ jobqueue_t to_abort;
while (qj)
{
- disk_io_job* next = (disk_io_job*)qj->next;
+ disk_io_job* next = qj->next;
#if TORRENT_USE_ASSERTS
qj->next = NULL;
#endif
@@ -5641,8 +5429,8 @@ instead and have an unlink function../src/disk_io_thread.cpp:1849<
if (completed_jobs.size())
add_completed_jobs(completed_jobs);
- | ||
relevance 0 | ../src/disk_io_thread.cpp:2111 | this is potentially very expensive. One way to solve it would be to have a fence for just this one piece. |
this is potentially very expensive. One way to solve
-it would be to have a fence for just this one piece.../src/disk_io_thread.cpp:2111 }
+ | ||
relevance 0 | ../src/disk_io_thread.cpp:2119 | this is potentially very expensive. One way to solve it would be to have a fence for just this one piece. |
this is potentially very expensive. One way to solve
+it would be to have a fence for just this one piece.../src/disk_io_thread.cpp:2119 }
void disk_io_thread::async_clear_piece(piece_manager* storage, int index
, boost::function<void(disk_io_job const*)> const& handler)
@@ -5682,7 +5470,7 @@ it would be to have a fence for just this one piece.../src/disk_io_thre
// never be the case when this function is used
// in fact, no jobs should really be hung on this piece
// at this point
- tailqueue jobs;
+ jobqueue_t jobs;
bool ok = m_disk_cache.evict_piece(pe, jobs);
TORRENT_PIECE_ASSERT(ok, pe);
TORRENT_UNUSED(ok);
@@ -5693,7 +5481,7 @@ it would be to have a fence for just this one piece.../src/disk_io_thre
{
if (!pe->hash) return;
if (pe->hashing) return;
- | ||
relevance 0 | ../src/disk_io_thread.cpp:2373 | we should probably just hang the job on the piece and make sure the hasher gets kicked |
we should probably just hang the job on the piece and make sure the hasher gets kicked../src/disk_io_thread.cpp:2373 if (pe == NULL)
+ | ||
relevance 0 | ../src/disk_io_thread.cpp:2381 | we should probably just hang the job on the piece and make sure the hasher gets kicked |
we should probably just hang the job on the piece and make sure the hasher gets kicked../src/disk_io_thread.cpp:2381 if (pe == NULL)
{
int cache_state = (j->flags & disk_io_job::volatile_read)
? cached_piece_entry::volatile_read_lru
@@ -5744,8 +5532,8 @@ it would be to have a fence for just this one piece.../src/disk_io_thre
TORRENT_PIECE_ASSERT(ph->offset % block_size == 0, pe);
for (int i = ph->offset / block_size; i < blocks_in_piece; ++i)
{
- | ||
relevance 0 | ../src/disk_io_thread.cpp:2440 | introduce a holder class that automatically increments and decrements the piece_refcount |
introduce a holder class that automatically increments
-and decrements the piece_refcount../src/disk_io_thread.cpp:2440 {
+ | ||
relevance 0 | ../src/disk_io_thread.cpp:2448 | introduce a holder class that automatically increments and decrements the piece_refcount |
introduce a holder class that automatically increments
+and decrements the piece_refcount../src/disk_io_thread.cpp:2448 {
file::iovec_t iov;
iov.iov_len = (std::min)(block_size, piece_size - ph->offset);
@@ -5767,8 +5555,8 @@ and decrements the piece_refcount../src/disk_io_thread.cpp:2440
// decrement the refcounts of the blocks we just hashed
- for (int i = 0; i < num_locked_blocks; ++i)
- m_disk_cache.dec_block_refcount(pe, locked_blocks[i], block_cache::ref_hashing);
+ for (int k = 0; k < num_locked_blocks; ++k)
+ m_disk_cache.dec_block_refcount(pe, locked_blocks[k], block_cache::ref_hashing);
--pe->piece_refcount;
pe->hashing = false;
@@ -5792,12 +5580,12 @@ and decrements the piece_refcount../src/disk_io_thread.cpp:2440 | ||
relevance 0 | ../src/disk_io_thread.cpp:2688 | it would be nice to not have to lock the mutex every turn through this loop |
it would be nice to not have to lock the mutex every
-turn through this loop../src/disk_io_thread.cpp:2688 j->error.ec = error::no_memory;
+ | ||
relevance 0 | ../src/disk_io_thread.cpp:2696 | it would be nice to not have to lock the mutex every turn through this loop |
it would be nice to not have to lock the mutex every
+turn through this loop../src/disk_io_thread.cpp:2696 j->error.ec = error::no_memory;
j->error.operation = storage_error::alloc_cache_piece;
return -1;
}
@@ -5848,8 +5636,7 @@ turn through this loop../src/disk_io_thread.cpp:2688relevance 0 | ../src/file_progress.cpp:136 | it would be nice to not depend on alert_manager here |
|
it would be nice to not depend on alert_manager here../src/file_progress.cpp:136 , alert_manager* alerts, torrent_handle const& h)
- {
+ | ||
relevance 0 | ../src/file_progress.cpp:137 | it would be nice to not depend on alert_manager here |
it would be nice to not depend on alert_manager here../src/file_progress.cpp:137 {
if (m_file_progress.empty())
return;
@@ -5862,7 +5649,8 @@ turn through this loop../src/disk_io_thread.cpp:2688 | ||
relevance 0 | ../src/http_tracker_connection.cpp:185 | support this somehow |
support this somehow../src/http_tracker_connection.cpp:185 url += escape_string(id.c_str(), id.length());
- }
-
-#if TORRENT_USE_I2P
- if (i2p && tracker_req().i2pconn)
- {
- url += "&ip=";
- url += escape_string(tracker_req().i2pconn->local_endpoint().c_str()
- , tracker_req().i2pconn->local_endpoint().size());
- url += ".i2p";
+ | ||
relevance 0 | ../src/http_tracker_connection.cpp:186 | support this somehow |
support this somehow../src/http_tracker_connection.cpp:186 {
+ if (tracker_req().i2pconn->local_endpoint().empty())
+ {
+ fail(error_code(errors::no_i2p_endpoint), -1, "Waiting for i2p acceptor from SAM bridge", 5);
+ return;
+ }
+ else
+ {
+ url += "&ip=" + tracker_req ().i2pconn->local_endpoint () + ".i2p";
+ }
}
else
#endif
@@ -5950,8 +5738,8 @@ turn through this loop../src/disk_io_thread.cpp:2688relevance 0 | ../src/metadata_transfer.cpp:358 | this is not safe. The torrent could be unloaded while we're still sending the metadata |
|
this is not safe. The torrent could be unloaded while
-we're still sending the metadata../src/metadata_transfer.cpp:358 = req_to_offset(req, (int)m_tp.metadata().left());
+ | ||
relevance 0 | ../src/metadata_transfer.cpp:361 | this is not safe. The torrent could be unloaded while we're still sending the metadata |
this is not safe. The torrent could be unloaded while
+we're still sending the metadata../src/metadata_transfer.cpp:361 = req_to_offset(req, int(m_tp.metadata().left()));
char msg[15];
char* ptr = msg;
@@ -5967,7 +5755,7 @@ we're still sending the metadata../src/metadata_transfer.cpp:358../src/metadata_transfer.cpp:358 | ||
relevance 0 | ../src/packet_buffer.cpp:180 | use compare_less_wrap for this comparison as well |
use compare_less_wrap for this comparison as well../src/packet_buffer.cpp:180 void** new_storage = (void**)malloc(sizeof(void*) * new_size);
+ | ||
relevance 0 | ../src/packet_buffer.cpp:180 | use compare_less_wrap for this comparison as well |
use compare_less_wrap for this comparison as well../src/packet_buffer.cpp:180 void** new_storage = static_cast<void**>(malloc(sizeof(void*) * new_size));
#ifndef BOOST_NO_EXCEPTIONS
if (new_storage == NULL) throw std::bad_alloc();
#endif
@@ -6019,7 +5807,7 @@ we're still sending the metadata../src/metadata_transfer.cpp:358 if (idx >= m_first + m_capacity)
@@ -6053,7 +5841,7 @@ we're still sending the metadata ../src/metadata_transfer.cpp:358 | ||
relevance 0 | ../src/part_file.cpp:252 | what do we do if someone is currently reading from the disk from this piece? does it matter? Since we won't actively erase the data from disk, but it may be overwritten soon, it's probably not that big of a deal |
what do we do if someone is currently reading from the disk
+ | ||
relevance 0 | ../src/part_file.cpp:252 | what do we do if someone is currently reading from the disk from this piece? does it matter? Since we won't actively erase the data from disk, but it may be overwritten soon, it's probably not that big of a deal |
what do we do if someone is currently reading from the disk
from this piece? does it matter? Since we won't actively erase the
data from disk, but it may be overwritten soon, it's probably not that
big of a deal../src/part_file.cpp:252 if (((mode & file::rw_mask) != file::read_only)
@@ -6107,7 +5895,7 @@ big of a deal../src/part_file.cpp:252relevance 0 | ../src/part_file.cpp:353 | instead of rebuilding the whole file header and flushing it, update the slot entries as we go |
|
instead of rebuilding the whole file header
+ | ||
relevance 0 | ../src/part_file.cpp:353 | instead of rebuilding the whole file header and flushing it, update the slot entries as we go |
instead of rebuilding the whole file header
and flushing it, update the slot entries as we go../src/part_file.cpp:353 if (block_to_copy == m_piece_size)
{
m_free_slots.push_back(i->second);
@@ -6141,7 +5929,7 @@ and flushing it, update the slot entries as we go../src/part_file.cpp:3
// part file, remove it
std::string p = combine_path(m_path, m_name);
remove(p, ec);
-
+
if (ec == boost::system::errc::no_such_file_or_directory)
ec.clear();
return;
@@ -6154,21 +5942,21 @@ and flushing it, update the slot entries as we go../src/part_file.cpp:3
using namespace libtorrent::detail;
- char* ptr = (char*)header.get();
+ char* ptr = reinterpret_cast<char*>(header.get());
write_uint32(m_max_pieces, ptr);
write_uint32(m_piece_size, ptr);
- | ||
relevance 0 | ../src/peer_connection.cpp:512 | it would be neat to be able to print this straight into the alert's stack allocator |
it would be neat to be able to print this straight into the
-alert's stack allocator../src/peer_connection.cpp:512 if (!interested) send_not_interested();
- else t->peer_is_interesting(*this);
-
- TORRENT_ASSERT(in_handshake() || is_interesting() == interested);
-
- disconnect_if_redundant();
- }
+ | ||
relevance 0 | ../src/peer_connection.cpp:522 | it would be neat to be able to print this straight into the alert's stack allocator |
it would be neat to be able to print this straight into the
+alert's stack allocator../src/peer_connection.cpp:522 }
#ifndef TORRENT_DISABLE_LOGGING
+ void peer_connection::peer_log(peer_log_alert::direction_t direction
+ , char const* event) const
+ {
+ peer_log(direction, event, "");
+ }
+
TORRENT_FORMAT(4,5)
void peer_connection::peer_log(peer_log_alert::direction_t direction
, char const* event, char const* fmt, ...) const
@@ -6211,7 +5999,7 @@ alert's stack allocator../src/peer_connection.cpp:512relevance 0 | ../src/peer_connection.cpp:1013 | this should be the global download rate |
|
this should be the global download rate../src/peer_connection.cpp:1013
+ | ||
relevance 0 | ../src/peer_connection.cpp:1025 | this should be the global download rate |
this should be the global download rate../src/peer_connection.cpp:1025
int rate = 0;
// if we haven't received any data recently, the current download rate
@@ -6262,7 +6050,7 @@ alert's stack allocator../src/peer_connection.cpp:512relevance 0 | ../src/peer_connection.cpp:3290 | sort the allowed fast set in priority order |
|
sort the allowed fast set in priority order../src/peer_connection.cpp:3290
+ | ||
relevance 0 | ../src/peer_connection.cpp:3329 | sort the allowed fast set in priority order |
sort the allowed fast set in priority order../src/peer_connection.cpp:3329
// if the peer has the piece and we want
// to download it, request it
if (int(m_have_piece.size()) > index
@@ -6289,7 +6077,7 @@ alert's stack allocator../src/peer_connection.cpp:512 | ||
relevance 0 | ../src/peer_connection.cpp:6092 | The stats checks can not be honored when authenticated encryption is in use because we may have encrypted data which we cannot authenticate yet |
The stats checks can not be honored when authenticated encryption is in use
-because we may have encrypted data which we cannot authenticate yet../src/peer_connection.cpp:6092 peer_log(peer_log_alert::incoming, "READ"
+ | ||
relevance 0 | ../src/peer_connection.cpp:3805 | it would be nice if none of this logic would leak outside of the torrent object) |
it would be nice if none of this logic would leak outside of
+the torrent object)../src/peer_connection.cpp:3805 }
+
+ void peer_connection::send_block_requests()
+ {
+ TORRENT_ASSERT(is_single_thread());
+ INVARIANT_CHECK;
+
+ boost::shared_ptr<torrent> t = m_torrent.lock();
+ TORRENT_ASSERT(t);
+
+ if (m_disconnecting) return;
+
+ if (t->graceful_pause() && m_outstanding_bytes == 0)
+ {
+#ifndef TORRENT_DISABLE_LOGGING
+ peer_log(peer_log_alert::info, "GRACEFUL_PAUSE", "NO MORE DOWNLOAD");
+#endif
+ disconnect(errors::torrent_paused, op_bittorrent);
+
+ // if this was the last connection, post the alert
+ if (t->num_peers() == 0)
+ {
+ if (t->alerts().should_post<torrent_paused_alert>())
+ t->alerts().emplace_alert<torrent_paused_alert>(t->get_handle());
+ }
+ return;
+ }
+
+ // we can't download pieces in these states
+ if (t->state() == torrent_status::checking_files
+ || t->state() == torrent_status::checking_resume_data
+ || t->state() == torrent_status::downloading_metadata
+ || t->state() == torrent_status::allocating)
+ return;
+
+ if (int(m_download_queue.size()) >= m_desired_queue_size
+ || t->upload_mode()) return;
+
+ bool empty_download_queue = m_download_queue.empty();
+
+ while (!m_request_queue.empty()
+ && (int(m_download_queue.size()) < m_desired_queue_size
+ || m_queued_time_critical > 0))
+ {
+ pending_block block = m_request_queue.front();
+
+ m_request_queue.erase(m_request_queue.begin());
+ if (m_queued_time_critical) --m_queued_time_critical;
+
+ // if we're a seed, we don't have a piece picker
+ // so we don't have to worry about invariants getting
+ | ||
relevance 0 | ../src/peer_connection.cpp:6150 | The stats checks can not be honored when authenticated encryption is in use because we may have encrypted data which we cannot authenticate yet |
The stats checks can not be honored when authenticated encryption is in use
+because we may have encrypted data which we cannot authenticate yet../src/peer_connection.cpp:6150#ifndef TORRENT_DISABLE_LOGGING
+ peer_log(peer_log_alert::incoming, "READ"
, "%d bytes", int(bytes_transferred));
#endif
// correct the dl quota usage, if not all of the buffer was actually read
@@ -6326,14 +6167,13 @@ because we may have encrypted data which we cannot authenticate yet../s
trancieve_ip_packet(bytes_in_loop, m_remote.address().is_v6());
return;
}
-
+
TORRENT_ASSERT(bytes_transferred > 0);
m_recv_buffer.received(bytes_transferred);
int bytes = bytes_transferred;
int sub_transferred = 0;
do {
- INVARIANT_CHECK;
#if 0
boost::int64_t cur_payload_dl = m_statistics.last_payload_downloaded();
boost::int64_t cur_protocol_dl = m_statistics.last_protocol_downloaded();
@@ -6350,7 +6190,7 @@ because we may have encrypted data which we cannot authenticate yet../s
m_statistics.last_protocol_downloaded() - cur_protocol_dl;
TORRENT_ASSERT(stats_diff == int(sub_transferred));
#endif
- if (m_disconnecting) return;
+ if (m_disconnecting) return;
} while (bytes > 0 && sub_transferred > 0);
@@ -6365,11 +6205,11 @@ because we may have encrypted data which we cannot authenticate yet../s
}
if (num_loops > read_loops) break;
- | ||
relevance 0 | ../src/piece_picker.cpp:2048 | this could probably be optimized by incrementally calling partial_sort to sort one more element in the list. Because chances are that we'll just need a single piece, and once we've picked from it we're done. Sorting the rest of the list in that case is a waste of time. |
this could probably be optimized by incrementally
+ | ||
relevance 0 | ../src/piece_picker.cpp:2052 | this could probably be optimized by incrementally calling partial_sort to sort one more element in the list. Because chances are that we'll just need a single piece, and once we've picked from it we're done. Sorting the rest of the list in that case is a waste of time. |
this could probably be optimized by incrementally
calling partial_sort to sort one more element in the list. Because
chances are that we'll just need a single piece, and once we've
picked from it we're done. Sorting the rest of the list in that
-case is a waste of time.../src/piece_picker.cpp:2048 , end(m_downloads[piece_pos::piece_downloading].end()); i != end; ++i)
+case is a waste of time.../src/piece_picker.cpp:2052 , end(m_downloads[piece_pos::piece_downloading].end()); i != end; ++i)
{
pc.inc_stats_counter(counters::piece_picker_partial_loops);
@@ -6420,9 +6260,9 @@ case is a waste of time.../src/piece_picker.cpp:2048relevance 0 | ../src/piece_picker.cpp:2553 | when expanding pieces for cache stripe reasons, the !downloading condition doesn't make much sense |
|
when expanding pieces for cache stripe reasons,
-the !downloading condition doesn't make much sense../src/piece_picker.cpp:2553 TORRENT_ASSERT(index < (int)m_piece_map.size() || m_piece_map.empty());
- if (index+1 == (int)m_piece_map.size())
+ | ||
relevance 0 | ../src/piece_picker.cpp:2558 | when expanding pieces for cache stripe reasons, the !downloading condition doesn't make much sense |
when expanding pieces for cache stripe reasons,
+the !downloading condition doesn't make much sense../src/piece_picker.cpp:2558 TORRENT_ASSERT(index < int(m_piece_map.size()) || m_piece_map.empty());
+ if (index + 1 == int(m_piece_map.size()))
return m_blocks_in_last_piece;
else
return m_blocks_per_piece;
@@ -6456,7 +6296,7 @@ the !downloading condition doesn't make much sense../src/piece_picker.c
}
#endif
- void piece_picker::clear_peer(void* peer)
+ void piece_picker::clear_peer(torrent_peer* peer)
{
for (std::vector<block_info>::iterator i = m_block_info.begin()
, end(m_block_info.end()); i != end; ++i)
@@ -6469,8 +6309,8 @@ the !downloading condition doesn't make much sense../src/piece_picker.c
// blocks from this piece.
// the second bool is true if this is the only active peer that is requesting
// and downloading blocks from this piece. Active means having a connection.
- | ||
relevance 0 | ../src/session_impl.cpp:520 | there's no rule here to make uTP connections not have the global or local rate limits apply to it. This used to be the default. |
there's no rule here to make uTP connections not have the global or
-local rate limits apply to it. This used to be the default.../src/session_impl.cpp:520 m_global_class = m_classes.new_peer_class("global");
+ | ||
relevance 0 | ../src/session_impl.cpp:514 | there's no rule here to make uTP connections not have the global or local rate limits apply to it. This used to be the default. |
there's no rule here to make uTP connections not have the global or
+local rate limits apply to it. This used to be the default.../src/session_impl.cpp:514 m_global_class = m_classes.new_peer_class("global");
m_tcp_peer_class = m_classes.new_peer_class("tcp");
m_local_peer_class = m_classes.new_peer_class("local");
// local peers are always unchoked
@@ -6521,7 +6361,7 @@ local rate limits apply to it. This used to be the default.../src/sessi
#ifndef TORRENT_DISABLE_LOGGING
session_log(" max connections: %d", m_settings.get_int(settings_pack::connections_limit));
session_log(" max files: %d", int(rl.rlim_cur * 2 / 10));
- | ||
relevance 0 | ../src/session_impl.cpp:1555 | it would be nice to reserve() these vectors up front |
it would be nice to reserve() these vectors up front../src/session_impl.cpp:1555
+ | ||
relevance 0 | ../src/session_impl.cpp:1600 | it would be nice to reserve() these vectors up front |
it would be nice to reserve() these vectors up front../src/session_impl.cpp:1600
bandwidth_channel* ch = &p->channel[peer_connection::download_channel];
if (use_quota_overhead(ch, amount_down))
ret |= 1 << peer_connection::download_channel;
@@ -6572,9 +6412,9 @@ local rate limits apply to it. This used to be the default.../src/sessi
apply_pack(&pack, m_settings, this);
m_disk_thread.set_settings(&pack, m_alerts);
- | ||
relevance 0 | ../src/session_impl.cpp:1798 | instead of having a special case for this, just make the default listen interfaces be "0.0.0.0:6881,[::1]:6881" and use the generic path. That would even allow for not listening at all. |
instead of having a special case for this, just make the
+ | ||
relevance 0 | ../src/session_impl.cpp:1840 | instead of having a special case for this, just make the default listen interfaces be "0.0.0.0:6881,[::1]:6881" and use the generic path. That would even allow for not listening at all. |
instead of having a special case for this, just make the
default listen interfaces be "0.0.0.0:6881,[::1]:6881" and use
-the generic path. That would even allow for not listening at all.../src/session_impl.cpp:1798 error_code ec;
+the generic path. That would even allow for not listening at all.../src/session_impl.cpp:1840 error_code ec;
int listen_port_retries = m_settings.get_int(settings_pack::max_retry_port_bind);
@@ -6598,8 +6438,6 @@ retry:
{
// this means we should open two listen sockets
// one for IPv4 and one for IPv6
- int retries = m_settings.get_int(settings_pack::max_retry_port_bind);
-
listen_socket_t s = setup_listener("0.0.0.0", true
, m_listen_interface.port()
, flags, ec);
@@ -6618,14 +6456,16 @@ retry:
#ifdef TORRENT_USE_OPENSSL
if (m_settings.get_int(settings_pack::ssl_listen))
{
- int retries = m_settings.get_int(settings_pack::max_retry_port_bind);
- listen_socket_t s = setup_listener("0.0.0.0", true
+ s = setup_listener("0.0.0.0", true
, m_settings.get_int(settings_pack::ssl_listen)
, flags | open_ssl_socket, ec);
if (!ec && s.sock)
{
- | ||
relevance 0 | ../src/session_impl.cpp:2687 | should this function take a shared_ptr instead? |
should this function take a shared_ptr instead?../src/session_impl.cpp:2687 {
+ TORRENT_ASSERT(!m_abort);
+ m_listen_sockets.push_back(s);
+ }
+ | ||
relevance 0 | ../src/session_impl.cpp:2750 | should this function take a shared_ptr instead? |
should this function take a shared_ptr instead?../src/session_impl.cpp:2750 {
#if defined TORRENT_ASIO_DEBUGGING
complete_async("session_impl::on_socks_accept");
#endif
@@ -6669,14 +6509,14 @@ retry:
#ifndef TORRENT_DISABLE_LOGGING
session_log(" CLOSING CONNECTION %s : %s"
, print_endpoint(p->remote()).c_str(), ec.message().c_str());
+#else
+ TORRENT_UNUSED(ec);
#endif
TORRENT_ASSERT(p->is_disconnecting());
TORRENT_ASSERT(sp.use_count() > 0);
-
- connection_map::iterator i = m_connections.find(sp);
- | ||
relevance 0 | ../src/session_impl.cpp:3035 | have a separate list for these connections, instead of having to loop through all of them |
have a separate list for these connections, instead of having to loop through all of them../src/session_impl.cpp:3035 if (m_auto_manage_time_scaler < 0)
+ | ||
relevance 0 | ../src/session_impl.cpp:3099 | have a separate list for these connections, instead of having to loop through all of them |
have a separate list for these connections, instead of having to loop through all of them../src/session_impl.cpp:3099 if (m_auto_manage_time_scaler < 0)
{
INVARIANT_CHECK;
m_auto_manage_time_scaler = settings().get_int(settings_pack::auto_manage_interval);
@@ -6719,7 +6559,7 @@ retry:
TORRENT_ASSERT(t.want_tick());
TORRENT_ASSERT(!t.is_aborted());
- t.second_tick(tick_interval_ms, m_tick_residual / 1000);
+ t.second_tick(tick_interval_ms);
// if the call to second_tick caused the torrent
// to no longer want to be ticked (i.e. it was
@@ -6727,7 +6567,7 @@ retry:
// to not miss the torrent after it
if (!t.want_tick()) --i;
}
- | ||
relevance 0 | ../src/session_impl.cpp:3068 | this should apply to all bandwidth channels |
this should apply to all bandwidth channels../src/session_impl.cpp:3068#if TORRENT_DEBUG_STREAMING > 0
+ | ||
relevance 0 | ../src/session_impl.cpp:3132 | this should apply to all bandwidth channels |
this should apply to all bandwidth channels../src/session_impl.cpp:3132#if TORRENT_DEBUG_STREAMING > 0
printf("\033[2J\033[0;0H");
#endif
@@ -6738,7 +6578,7 @@ retry:
TORRENT_ASSERT(t.want_tick());
TORRENT_ASSERT(!t.is_aborted());
- t.second_tick(tick_interval_ms, m_tick_residual / 1000);
+ t.second_tick(tick_interval_ms);
// if the call to second_tick caused the torrent
// to no longer want to be ticked (i.e. it was
@@ -6778,9 +6618,9 @@ retry:
// scrape paused torrents that are auto managed
// (unless the session is paused)
// --------------------------------------------------------------
- | ||
relevance 0 | ../src/session_impl.cpp:3791 | use a lower limit than m_settings.connections_limit to allocate the to 10% or so of connection slots for incoming connections |
use a lower limit than m_settings.connections_limit
+ | ||
relevance 0 | ../src/session_impl.cpp:3861 | use a lower limit than m_settings.connections_limit to allocate the to 10% or so of connection slots for incoming connections |
use a lower limit than m_settings.connections_limit
to allocate the to 10% or so of connection slots for incoming
-connections../src/session_impl.cpp:3791 // robin fashion, so that every torrent is equally likely to connect to a
+connections../src/session_impl.cpp:3861 // robin fashion, so that every torrent is equally likely to connect to a
// peer
// boost connections are connections made by torrent connection
@@ -6831,8 +6671,8 @@ connections../src/session_impl.cpp:3791relevance 0 | ../src/session_impl.cpp:3941 | post a message to have this happen immediately instead of waiting for the next tick |
|
post a message to have this happen
-immediately instead of waiting for the next tick../src/session_impl.cpp:3941 continue;
+ | ||
relevance 0 | ../src/session_impl.cpp:4011 | post a message to have this happen immediately instead of waiting for the next tick |
post a message to have this happen
+immediately instead of waiting for the next tick../src/session_impl.cpp:4011 continue;
}
if (!p->is_peer_interested()
@@ -6883,12 +6723,12 @@ immediately instead of waiting for the next tick../src/session_impl.cpp
, allowed_upload_slots);
#ifndef TORRENT_DISABLE_LOGGING
- | ||
relevance 0 | ../src/session_impl.cpp:4324 | it might be a nice feature here to limit the number of torrents to send in a single update. By just posting the first n torrents, they would nicely be round-robined because the torrent lists are always pushed back. Perhaps the status_update_alert could even have a fixed array of n entries rather than a vector, to further improve memory locality. |
it might be a nice feature here to limit the number of torrents
+ | ||
relevance 0 | ../src/session_impl.cpp:4397 | it might be a nice feature here to limit the number of torrents to send in a single update. By just posting the first n torrents, they would nicely be round-robined because the torrent lists are always pushed back. Perhaps the status_update_alert could even have a fixed array of n entries rather than a vector, to further improve memory locality. |
it might be a nice feature here to limit the number of torrents
to send in a single update. By just posting the first n torrents, they
would nicely be round-robined because the torrent lists are always
pushed back. Perhaps the status_update_alert could even have a fixed
array of n entries rather than a vector, to further improve memory
-locality.../src/session_impl.cpp:4324 t->status(&*i, flags);
+locality.../src/session_impl.cpp:4397 t->status(&*i, flags);
}
}
@@ -6934,13 +6774,13 @@ locality.../src/session_impl.cpp:4324relevance 0 | ../src/session_impl.cpp:4530 | this logic could probably be less spaghetti looking by being moved to a function with early exits |
|
this logic could probably be less spaghetti looking by being
-moved to a function with early exits../src/session_impl.cpp:4530 }
+ | ||
relevance 0 | ../src/session_impl.cpp:4610 | this logic could probably be less spaghetti looking by being moved to a function with early exits |
this logic could probably be less spaghetti looking by being
+moved to a function with early exits../src/session_impl.cpp:4610 }
// figure out the info hash of the torrent
sha1_hash const* ih = 0;
@@ -6964,16 +6804,16 @@ moved to a function with early exits../src/session_impl.cpp:4530 && !params.resume_data.empty())
{
int pos;
- error_code ec;
- bdecode_node tmp;
+ error_code err;
+ bdecode_node root;
bdecode_node info;
#ifndef TORRENT_DISABLE_LOGGING
session_log("adding magnet link with resume data");
#endif
if (bdecode(¶ms.resume_data[0], ¶ms.resume_data[0]
- + params.resume_data.size(), tmp, ec, &pos) == 0
- && tmp.type() == bdecode_node::dict_t
- && (info = tmp.dict_find_dict("info")))
+ + params.resume_data.size(), root, err, &pos) == 0
+ && root.type() == bdecode_node::dict_t
+ && (info = root.dict_find_dict("info")))
{
#ifndef TORRENT_DISABLE_LOGGING
session_log("found metadata in resume data");
@@ -6991,9 +6831,9 @@ moved to a function with early exits ../src/session_impl.cpp:4530 | ||
relevance 0 | ../src/storage.cpp:731 | make this more generic to not just work if files have been renamed, but also if they have been merged into a single file for instance maybe use the same format as .torrent files and reuse some code from torrent_info |
make this more generic to not just work if files have been
+ | ||
relevance 0 | ../src/storage.cpp:751 | make this more generic to not just work if files have been renamed, but also if they have been merged into a single file for instance maybe use the same format as .torrent files and reuse some code from torrent_info |
make this more generic to not just work if files have been
renamed, but also if they have been merged into a single file for instance
-maybe use the same format as .torrent files and reuse some code from torrent_info../src/storage.cpp:731 if (file_offset < files().file_size(file_index))
+maybe use the same format as .torrent files and reuse some code from torrent_info../src/storage.cpp:751 if (file_offset < files().file_size(file_index))
break;
file_offset -= files().file_size(file_index);
@@ -7044,9 +6884,9 @@ maybe use the same format as .torrent files and reuse some code from torrent_inf
}
if (file_sizes_ent.list_size() == 0)
- | ||
relevance 0 | ../src/storage.cpp:1062 | if everything moves OK, except for the partfile we currently won't update the save path, which breaks things. it would probably make more sense to give up on the partfile |
if everything moves OK, except for the partfile
+ | ||
relevance 0 | ../src/storage.cpp:1082 | if everything moves OK, except for the partfile we currently won't update the save path, which breaks things. it would probably make more sense to give up on the partfile |
if everything moves OK, except for the partfile
we currently won't update the save path, which breaks things.
-it would probably make more sense to give up on the partfile../src/storage.cpp:1062 if (ec)
+it would probably make more sense to give up on the partfile../src/storage.cpp:1082 if (ec)
{
ec.file = i->second;
ec.operation = storage_error::copy;
@@ -7054,8 +6894,8 @@ it would probably make more sense to give up on the partfile../src/stor
else
{
// ignore errors when removing
- error_code e;
- remove_all(old_path, e);
+ error_code ignore;
+ remove_all(old_path, ignore);
}
break;
}
@@ -7097,7 +6937,7 @@ it would probably make more sense to give up on the partfile../src/stor
{
fileop op = { &file::writev
, file::read_write | flags };
- | ||
relevance 0 | ../src/string_util.cpp:60 | warning C4146: unary minus operator applied to unsigned type, result still unsigned |
warning C4146: unary minus operator applied to unsigned type,
+ | ||
relevance 0 | ../src/string_util.cpp:60 | warning C4146: unary minus operator applied to unsigned type, result still unsigned |
warning C4146: unary minus operator applied to unsigned type,
result still unsigned../src/string_util.cpp:60
#include <boost/tuple/tuple.hpp>
@@ -7149,7 +6989,7 @@ namespace libtorrent
return (c >= 'A' && c <= 'Z') ? c - 'A' + 'a' : c;
}
- | ||
relevance 0 | ../src/torrent.cpp:97 | factor out cache_status to its own header |
factor out cache_status to its own header../src/torrent.cpp:97#include "libtorrent/extensions.hpp"
+ | ||
relevance 0 | ../src/torrent.cpp:101 | factor out cache_status to its own header |
factor out cache_status to its own header../src/torrent.cpp:101#include "libtorrent/extensions.hpp"
#include "libtorrent/aux_/session_interface.hpp"
#include "libtorrent/instantiate_connection.hpp"
#include "libtorrent/assert.hpp"
@@ -7200,8 +7040,8 @@ namespace libtorrent
}
} // anonymous namespace
- | ||
relevance 0 | ../src/torrent.cpp:458 | if the existing torrent doesn't have metadata, insert the metadata we just downloaded into it. |
if the existing torrent doesn't have metadata, insert
-the metadata we just downloaded into it.../src/torrent.cpp:458
+ | ||
relevance 0 | ../src/torrent.cpp:467 | if the existing torrent doesn't have metadata, insert the metadata we just downloaded into it. |
if the existing torrent doesn't have metadata, insert
+the metadata we just downloaded into it.../src/torrent.cpp:467
m_torrent_file = tf;
// now, we might already have this torrent in the session.
@@ -7252,8 +7092,8 @@ the metadata we just downloaded into it.../src/torrent.cpp:458 | ||
relevance 0 | ../src/torrent.cpp:608 | if the existing torrent doesn't have metadata, insert the metadata we just downloaded into it. |
if the existing torrent doesn't have metadata, insert
-the metadata we just downloaded into it.../src/torrent.cpp:608 m_torrent_file = tf;
+ | ||
relevance 0 | ../src/torrent.cpp:617 | if the existing torrent doesn't have metadata, insert the metadata we just downloaded into it. |
if the existing torrent doesn't have metadata, insert
+the metadata we just downloaded into it.../src/torrent.cpp:617 m_torrent_file = tf;
m_info_hash = tf->info_hash();
// now, we might already have this torrent in the session.
@@ -7301,15 +7141,15 @@ the metadata we just downloaded into it.../src/torrent.cpp:608 | ||
relevance 0 | ../src/torrent.cpp:1534 | is verify_peer_cert called once per certificate in the chain, and this function just tells us which depth we're at right now? If so, the comment makes sense. any certificate that isn't the leaf (i.e. the one presented by the peer) should be accepted automatically, given preverified is true. The leaf certificate need to be verified to make sure its DN matches the info-hash |
is verify_peer_cert called once per certificate in the chain, and
+ | ||
relevance 0 | ../src/torrent.cpp:1557 | is verify_peer_cert called once per certificate in the chain, and this function just tells us which depth we're at right now? If so, the comment makes sense. any certificate that isn't the leaf (i.e. the one presented by the peer) should be accepted automatically, given preverified is true. The leaf certificate need to be verified to make sure its DN matches the info-hash |
is verify_peer_cert called once per certificate in the chain, and
this function just tells us which depth we're at right now? If so, the comment
makes sense.
any certificate that isn't the leaf (i.e. the one presented by the peer)
should be accepted automatically, given preverified is true. The leaf certificate
-need to be verified to make sure its DN matches the info-hash../src/torrent.cpp:1534 if (pp) p->add_extension(pp);
+need to be verified to make sure its DN matches the info-hash../src/torrent.cpp:1557 if (pp) p->add_extension(pp);
}
// if files are checked for this torrent, call the extension
@@ -7342,9 +7182,9 @@ need to be verified to make sure its DN matches the info-hash../src/tor
std::string names;
bool match = false;
#endif
- for (int i = 0; i < sk_GENERAL_NAME_num(gens); ++i)
+ for (int i = 0; i < aux::openssl_num_general_names(gens); ++i)
{
- GENERAL_NAME* gen = sk_GENERAL_NAME_value(gens, i);
+ GENERAL_NAME* gen = aux::openssl_general_name_value(gens, i);
if (gen->type != GEN_DNS) continue;
ASN1_IA5STRING* domain = gen->d.dNSName;
if (domain->type != V_ASN1_IA5STRING || !domain->data || !domain->length) continue;
@@ -7360,8 +7200,8 @@ need to be verified to make sure its DN matches the info-hash../src/tor
{
#ifndef TORRENT_DISABLE_LOGGING
match = true;
- | ||
relevance 0 | ../src/torrent.cpp:1942 | instead of creating the picker up front here, maybe this whole section should move to need_picker() |
instead of creating the picker up front here,
-maybe this whole section should move to need_picker()../src/torrent.cpp:1942 m_have_all = true;
+ | ||
relevance 0 | ../src/torrent.cpp:1979 | instead of creating the picker up front here, maybe this whole section should move to need_picker() |
instead of creating the picker up front here,
+maybe this whole section should move to need_picker()../src/torrent.cpp:1979 m_have_all = true;
m_ses.get_io_service().post(boost::bind(&torrent::files_checked, shared_from_this()));
m_resume_data.reset();
update_gauge();
@@ -7412,13 +7252,13 @@ maybe this whole section should move to need_picker()../src/torrent.cpp
// need to consider it finished
std::vector<piece_picker::downloading_piece> dq
- | ||
relevance 0 | ../src/torrent.cpp:2016 | this could be optimized by looking up which files are complete and just look at those |
this could be optimized by looking up which files are
-complete and just look at those../src/torrent.cpp:2016 if (!need_loaded()) return;
+ | ||
relevance 0 | ../src/torrent.cpp:2053 | this could be optimized by looking up which files are complete and just look at those |
this could be optimized by looking up which files are
+complete and just look at those../src/torrent.cpp:2053 if (!need_loaded()) return;
if (num_pad_files > 0)
m_picker->set_num_pad_files(num_pad_files);
- std::auto_ptr<std::vector<std::string> > links;
+ std::vector<std::string> links;
#ifndef TORRENT_DISABLE_MUTABLE_TORRENTS
if (!m_torrent_file->similar_torrents().empty()
|| !m_torrent_file->collections().empty())
@@ -7444,11 +7284,11 @@ complete and just look at those../src/torrent.cpp:2016relevance 0 | ../src/torrent.cpp:2032 | this could be optimized by looking up which files are complete and just look at those |
|
this could be optimized by looking up which files are
-complete and just look at those../src/torrent.cpp:2032 i != end; ++i)
+ | ||
relevance 0 | ../src/torrent.cpp:2069 | this could be optimized by looking up which files are complete and just look at those |
this could be optimized by looking up which files are
+complete and just look at those../src/torrent.cpp:2069 i != end; ++i)
{
boost::shared_ptr<torrent> t = m_ses.find_torrent(*i).lock();
if (!t) continue;
@@ -7465,7 +7305,7 @@ complete and just look at those../src/torrent.cpp:2032 if (!(*k)->is_seed()) continue;
@@ -7477,7 +7317,6 @@ complete and just look at those../src/torrent.cpp:2032../src/torrent.cpp:2032
| ||
relevance 0 | ../src/torrent.cpp:2199 | there may be peer extensions relying on the torrent extension still being alive. Only do this if there are no peers. And when the last peer is disconnected, if the torrent is unloaded, clear the extensions m_extensions.clear(); |
there may be peer extensions relying on the torrent extension
+ debug_log("init, async_check_fastresume");
+ | ||
relevance 0 | ../src/torrent.cpp:2235 | there may be peer extensions relying on the torrent extension still being alive. Only do this if there are no peers. And when the last peer is disconnected, if the torrent is unloaded, clear the extensions m_extensions.clear(); |
there may be peer extensions relying on the torrent extension
still being alive. Only do this if there are no peers. And when the last peer
is disconnected, if the torrent is unloaded, clear the extensions
-m_extensions.clear();../src/torrent.cpp:2199 // pinned torrents are not allowed to be swapped out
+m_extensions.clear();../src/torrent.cpp:2235 // pinned torrents are not allowed to be swapped out
TORRENT_ASSERT(!m_pinned);
m_should_be_loaded = false;
@@ -7546,16 +7386,16 @@ m_extensions.clear();../src/torrent.cpp:2199 | ||
relevance 0 | ../src/torrent.cpp:2884 | this pattern is repeated in a few places. Factor this into a function and generalize the concept of a torrent having a dedicated listen port |
this pattern is repeated in a few places. Factor this into
+#else
+ | ||
relevance 0 | ../src/torrent.cpp:2926 | this pattern is repeated in a few places. Factor this into a function and generalize the concept of a torrent having a dedicated listen port |
this pattern is repeated in a few places. Factor this into
a function and generalize the concept of a torrent having a
-dedicated listen port../src/torrent.cpp:2884 // if the files haven't been checked yet, we're
+dedicated listen port../src/torrent.cpp:2926 // if the files haven't been checked yet, we're
// not ready for peers. Except, if we don't have metadata,
// we need peers to download from
if (!m_files_checked && valid_metadata()) return;
@@ -7606,7 +7446,7 @@ dedicated listen port../src/torrent.cpp:2884 | ||
relevance 0 | ../src/torrent.cpp:3669 | add one peer per IP the hostname resolves to |
add one peer per IP the hostname resolves to../src/torrent.cpp:3669#endif
+ | ||
relevance 0 | ../src/torrent.cpp:3723 | add one peer per IP the hostname resolves to |
add one peer per IP the hostname resolves to../src/torrent.cpp:3723#endif
void torrent::on_peer_name_lookup(error_code const& e
, std::vector<address> const& host_list, int port)
@@ -7657,12 +7497,10 @@ dedicated listen port../src/torrent.cpp:2884relevance 0 | ../src/torrent.cpp:4601 | update suggest_piece? |
|
update suggest_piece?../src/torrent.cpp:4601
- void torrent::peer_has_all(peer_connection const* peer)
- {
- if (has_picker())
+ | ||
relevance 0 | ../src/torrent.cpp:4663 | update suggest_piece? |
update suggest_piece?../src/torrent.cpp:4663 if (has_picker())
{
- m_picker->inc_refcount_all(peer);
+ torrent_peer* pp = peer->peer_info_struct();
+ m_picker->inc_refcount_all(pp);
}
#ifdef TORRENT_DEBUG
else
@@ -7676,7 +7514,9 @@ dedicated listen port../src/torrent.cpp:2884 }
#ifdef TORRENT_DEBUG
else
@@ -7690,7 +7530,8 @@ dedicated listen port../src/torrent.cpp:2884 | ||
relevance 0 | ../src/torrent.cpp:4744 | really, we should just keep the picker around in this case to maintain the availability counters |
really, we should just keep the picker around
-in this case to maintain the availability counters../src/torrent.cpp:4744 pieces.reserve(cs.pieces.size());
+ | ||
relevance 0 | ../src/torrent.cpp:4807 | really, we should just keep the picker around in this case to maintain the availability counters |
really, we should just keep the picker around
+in this case to maintain the availability counters../src/torrent.cpp:4807 pieces.reserve(cs.pieces.size());
// sort in ascending order, to get most recently used first
std::sort(cs.pieces.begin(), cs.pieces.end()
@@ -7730,10 +7570,10 @@ in this case to maintain the availability counters../src/torrent.cpp:47
else
{
p.num_peers = 0;
- for (const_peer_iterator i = m_connections.begin()
- , end(m_connections.end()); i != end; ++i)
+ for (const_peer_iterator j = m_connections.begin()
+ , end2(m_connections.end()); j != end2; ++j)
{
- peer_connection* peer = *i;
+ peer_connection* peer = *j;
if (peer->has_piece(p.piece_index)) ++p.num_peers;
}
}
@@ -7760,12 +7600,12 @@ in this case to maintain the availability counters../src/torrent.cpp:47
}
void torrent::abort()
- | ||
relevance 0 | ../src/torrent.cpp:6734 | make this more generic to not just work if files have been renamed, but also if they have been merged into a single file for instance maybe use the same format as .torrent files and reuse some code from torrent_info The mapped_files needs to be read both in the network thread and in the disk thread, since they both have their own mapped files structures which are kept in sync |
make this more generic to not just work if files have been
+ | ||
relevance 0 | ../src/torrent.cpp:6838 | make this more generic to not just work if files have been renamed, but also if they have been merged into a single file for instance maybe use the same format as .torrent files and reuse some code from torrent_info The mapped_files needs to be read both in the network thread and in the disk thread, since they both have their own mapped files structures which are kept in sync |
make this more generic to not just work if files have been
renamed, but also if they have been merged into a single file for instance
maybe use the same format as .torrent files and reuse some code from torrent_info
The mapped_files needs to be read both in the network thread
and in the disk thread, since they both have their own mapped files structures
-which are kept in sync../src/torrent.cpp:6734 {
+which are kept in sync../src/torrent.cpp:6838 {
m_save_path = p;
#ifndef TORRENT_DISABLE_LOGGING
debug_log("loaded resume data: save-path: %s", m_save_path.c_str());
@@ -7806,22 +7646,22 @@ which are kept in sync../src/torrent.cpp:6734 | ||
relevance 0 | ../src/torrent.cpp:6858 | if this is a merkle torrent and we can't restore the tree, we need to wipe all the bits in the have array, but not necessarily we might want to do a full check to see if we have all the pieces. This is low priority since almost no one uses merkle torrents |
if this is a merkle torrent and we can't
+ | ||
relevance 0 | ../src/torrent.cpp:6971 | if this is a merkle torrent and we can't restore the tree, we need to wipe all the bits in the have array, but not necessarily we might want to do a full check to see if we have all the pieces. This is low priority since almost no one uses merkle torrents |
if this is a merkle torrent and we can't
restore the tree, we need to wipe all the
bits in the have array, but not necessarily
we might want to do a full check to see if we have
all the pieces. This is low priority since almost
-no one uses merkle torrents../src/torrent.cpp:6858 add_web_seed(url, web_seed_entry::http_seed);
+no one uses merkle torrents../src/torrent.cpp:6971 add_web_seed(url, web_seed_entry::http_seed);
}
}
@@ -7872,9 +7712,9 @@ no one uses merkle torrents../src/torrent.cpp:6858relevance 0 | ../src/torrent.cpp:7083 | make this more generic to not just work if files have been renamed, but also if they have been merged into a single file for instance. using file_base |
|
make this more generic to not just work if files have been
+ | ||
relevance 0 | ../src/torrent.cpp:7197 | make this more generic to not just work if files have been renamed, but also if they have been merged into a single file for instance. using file_base |
make this more generic to not just work if files have been
renamed, but also if they have been merged into a single file for instance.
-using file_base../src/torrent.cpp:7083 pieces.resize(m_torrent_file->num_pieces());
+using file_base../src/torrent.cpp:7197 pieces.resize(m_torrent_file->num_pieces());
if (!has_picker())
{
std::memset(&pieces[0], m_have_all, pieces.size());
@@ -7925,9 +7765,9 @@ using file_base../src/torrent.cpp:7083relevance 0 | ../src/torrent.cpp:9255 | add a flag to ignore stats, and only care about resume data for content. For unchanged files, don't trigger a load of the metadata just to save an empty resume data file |
|
add a flag to ignore stats, and only care about resume data for
+ | ||
relevance 0 | ../src/torrent.cpp:9440 | add a flag to ignore stats, and only care about resume data for content. For unchanged files, don't trigger a load of the metadata just to save an empty resume data file |
add a flag to ignore stats, and only care about resume data for
content. For unchanged files, don't trigger a load of the metadata
-just to save an empty resume data file../src/torrent.cpp:9255 if (m_complete != 0xffffff) seeds = m_complete;
+just to save an empty resume data file../src/torrent.cpp:9440 if (m_complete != 0xffffff) seeds = m_complete;
else seeds = m_peer_list ? m_peer_list->num_seeds() : 0;
if (m_incomplete != 0xffffff) downloaders = m_incomplete;
@@ -7978,8 +7818,8 @@ just to save an empty resume data file../src/torrent.cpp:9255 | ||
relevance 0 | ../src/torrent.cpp:10859 | instead of resorting the whole list, insert the peers directly into the right place |
instead of resorting the whole list, insert the peers
-directly into the right place../src/torrent.cpp:10859 printf("timed out [average-piece-time: %d ms ]\n"
+ | ||
relevance 0 | ../src/torrent.cpp:11057 | instead of resorting the whole list, insert the peers directly into the right place |
instead of resorting the whole list, insert the peers
+directly into the right place../src/torrent.cpp:11057 printf("timed out [average-piece-time: %d ms ]\n"
, m_average_piece_time);
#endif
}
@@ -8026,11 +7866,11 @@ directly into the right place../src/torrent.cpp:10859relevance 0 | ../src/torrent_peer.cpp:179 | how do we deal with our external address changing? |
|
how do we deal with our external address changing?../src/torrent_peer.cpp:179 , is_v6_addr(false)
+ | ||
relevance 0 | ../src/torrent_peer.cpp:188 | how do we deal with our external address changing? |
how do we deal with our external address changing?../src/torrent_peer.cpp:188 , is_v6_addr(false)
#endif
#if TORRENT_USE_I2P
, is_i2p_addr(false)
@@ -8081,9 +7921,9 @@ directly into the right place../src/torrent.cpp:10859relevance 0 | ../src/udp_socket.cpp:288 | it would be nice to detect this on posix systems also |
|
it would be nice to detect this on posix systems also../src/udp_socket.cpp:288 --m_v6_outstanding;
- }
- else
+ | ||
relevance 0 | ../src/udp_socket.cpp:298 | it would be nice to detect this on posix systems also |
it would be nice to detect this on posix systems also../src/udp_socket.cpp:298 else
+#else
+ TORRENT_UNUSED(s);
#endif
{
TORRENT_ASSERT(m_v4_outstanding > 0);
@@ -8097,13 +7937,13 @@ directly into the right place../src/torrent.cpp:10859#ifdef TORRENT_WINDOWS
- if ((ec == error_code(ERROR_MORE_DATA, system_category())
- || ec == error_code(WSAEMSGSIZE, system_category()))
+ if ((err == error_code(ERROR_MORE_DATA, system_category())
+ || err == error_code(WSAEMSGSIZE, system_category()))
&& m_buf_size < 65536)
{
// if this function fails to allocate memory, m_buf_size
@@ -8114,8 +7954,8 @@ directly into the right place../src/torrent.cpp:10859relevance 0 | ../src/udp_socket.cpp:788 | use the system resolver_interface here |
|
use the system resolver_interface here../src/udp_socket.cpp:788
-void udp_socket::set_proxy_settings(proxy_settings const& ps)
+ | ||
relevance 0 | ../src/udp_socket.cpp:798 | use the system resolver_interface here |
use the system resolver_interface here../src/udp_socket.cpp:798
+void udp_socket::set_proxy_settings(aux::proxy_settings const& ps)
{
CHECK_MAGIC;
TORRENT_ASSERT(is_single_thread());
@@ -8183,7 +8023,7 @@ void udp_socket::on_name_lookup(error_code const& e, tcp::resolver::iterator
+ m_outstanding_socks);
if (m_abort) return;
- | ||
relevance 0 | ../src/ut_metadata.cpp:315 | we really need to increment the refcounter on the torrent while this buffer is still in the peer's send buffer |
we really need to increment the refcounter on the torrent
+ | ||
relevance 0 | ../src/ut_metadata.cpp:315 | we really need to increment the refcounter on the torrent while this buffer is still in the peer's send buffer |
we really need to increment the refcounter on the torrent
while this buffer is still in the peer's send buffer../src/ut_metadata.cpp:315 if (!m_tp.need_loaded()) return;
metadata = m_tp.metadata().begin + offset;
metadata_piece_size = (std::min)(
@@ -8235,8 +8075,8 @@ while this buffer is still in the peer's send buffer../src/ut_metadata.
{
#ifndef TORRENT_DISABLE_LOGGING
m_pc.peer_log(peer_log_alert::incoming_message, "UT_METADATA"
- | ||
relevance 0 | ../src/utp_stream.cpp:1718 | this loop is not very efficient. It could be fixed by having a separate list of sequence numbers that need resending |
this loop is not very efficient. It could be fixed by having
-a separate list of sequence numbers that need resending../src/utp_stream.cpp:1718};
+ | ||
relevance 0 | ../src/utp_stream.cpp:1761 | this loop is not very efficient. It could be fixed by having a separate list of sequence numbers that need resending |
this loop is not very efficient. It could be fixed by having
+a separate list of sequence numbers that need resending../src/utp_stream.cpp:1761};
// sends a packet, pulls data from the write buffer (if there's any)
// if ack is true, we need to send a packet regardless of if there's
@@ -8258,7 +8098,7 @@ bool utp_socket_impl::send_pkt(int flags)
for (int i = (m_acked_seq_nr + 1) & ACK_MASK; i != m_seq_nr; i = (i + 1) & ACK_MASK)
{
- packet* p = (packet*)m_outbuf.at(i);
+ packet* p = m_outbuf.at(i);
if (!p) continue;
if (!p->need_resend) continue;
if (!resend_packet(p))
@@ -8287,9 +8127,7 @@ bool utp_socket_impl::send_pkt(int flags)
if (sack > 32) sack = 32;
}
- | ||
relevance 0 | ../src/web_connection_base.cpp:73 | introduce a web-seed default class which has a low download priority |
introduce a web-seed default class which has a low download priority../src/web_connection_base.cpp:73{
- web_connection_base::web_connection_base(
- peer_connection_args const& pack
+ | ||
relevance 0 | ../src/web_connection_base.cpp:81 | introduce a web-seed default class which has a low download priority |
introduce a web-seed default class which has a low download priority../src/web_connection_base.cpp:81 peer_connection_args const& pack
, web_seed_t& web)
: peer_connection(pack)
, m_first_request(true)
@@ -8306,8 +8144,10 @@ bool utp_socket_impl::send_pkt(int flags)
INVARIANT_CHECK;
+ TORRENT_ASSERT(is_outgoing());
+
// we only want left-over bandwidth
-
+
std::string protocol;
error_code ec;
boost::tie(protocol, m_basic_auth, m_host, m_port, m_path)
@@ -8338,19 +8178,19 @@ bool utp_socket_impl::send_pkt(int flags)
// according to the settings.
return m_settings.get_int(settings_pack::urlseed_timeout);
}
- | ||
relevance 0 | ../src/kademlia/dht_tracker.cpp:264 | ideally this function would be called when the put completes |
ideally this function would be called when the
-put completes../src/kademlia/dht_tracker.cpp:264 // since it controls whether we re-put the content
+ | ||
relevance 0 | ../src/kademlia/dht_tracker.cpp:269 | ideally this function would be called when the put completes |
ideally this function would be called when the
+put completes../src/kademlia/dht_tracker.cpp:269 // since it controls whether we re-put the content
TORRENT_ASSERT(!it.is_mutable());
f(it);
return false;
}
- bool get_mutable_item_callback(item& it, boost::function<void(item const&)> f)
+ bool get_mutable_item_callback(item& it, bool authoritative, boost::function<void(item const&, bool)> f)
{
// the reason to wrap here is to control the return value
// since it controls whether we re-put the content
TORRENT_ASSERT(it.is_mutable());
- f(it);
+ f(it, authoritative);
return false;
}
@@ -8363,9 +8203,11 @@ put completes../src/kademlia/dht_tracker.cpp:264 | ||
relevance 0 | ../src/kademlia/node.cpp:614 | in the future, this function should update all the dht related counter. For now, it just update the storage related ones. |
in the future, this function should update all the
+dht related counter. For now, it just update the storage
+related ones.../src/kademlia/node.cpp:614
+ return d;
+}
+
+void node::status(std::vector<dht_routing_bucket>& table
+ , std::vector<dht_lookup>& requests)
+{
+ mutex_t::scoped_lock l(m_mutex);
+
+ m_table.status(table);
+
+ for (std::set<traversal_algorithm*>::iterator i = m_running_requests.begin()
+ , end(m_running_requests.end()); i != end; ++i)
{
- std::string flat_data;
- | ||
relevance 0 | ../include/libtorrent/block_cache.hpp:219 | make this 32 bits and to count seconds since the block cache was created |
make this 32 bits and to count seconds since the block cache was created../include/libtorrent/block_cache.hpp:219
+ requests.push_back(dht_lookup());
+ dht_lookup& lookup = requests.back();
+ (*i)->status(lookup);
+ }
+}
+
+void node::update_stats_counters(counters& c) const
+ {
+ const dht_storage_counters& dht_cnt = m_storage->counters();
+ c.set_value(counters::dht_torrents, dht_cnt.torrents);
+ c.set_value(counters::dht_peers, dht_cnt.peers);
+ c.set_value(counters::dht_immutable_data, dht_cnt.immutable_data);
+ c.set_value(counters::dht_mutable_data, dht_cnt.mutable_data);
+}
+
+#ifndef TORRENT_NO_DEPRECATE
+ | ||
relevance 0 | ../include/libtorrent/announce_entry.hpp:97 | include the number of peers received from this tracker, at last announce |
include the number of peers received from this tracker, at last
+announce../include/libtorrent/announce_entry.hpp:97
+ // if this tracker failed the last time it was contacted
+ // this error code specifies what error occurred
+ error_code last_error;
+
+ // returns the number of seconds to the next announce on this tracker.
+ // ``min_announce_in()`` returns the number of seconds until we are
+ // allowed to force another tracker update with this tracker.
+ //
+ // If the last time this tracker was contacted failed, ``last_error`` is
+ // the error code describing what error occurred.
+ int next_announce_in() const;
+ int min_announce_in() const;
+
+ // the time of next tracker announce
+ time_point next_announce;
+
+ // no announces before this time
+ time_point min_announce;
+
+
+ // these are either -1 or the scrape information this tracker last
+ // responded with. *incomplete* is the current number of downloaders in
+ // the swarm, *complete* is the current number of seeds in the swarm and
+ // *downloaded* is the cumulative number of completed downloads of this
+ // torrent, since the beginning of time (from this tracker's point of
+ // view).
+
+ // if this tracker has returned scrape data, these fields are filled in
+ // with valid numbers. Otherwise they are set to -1. the number of
+ // current downloaders
+ int scrape_incomplete;
+ int scrape_complete;
+ int scrape_downloaded;
+
+ // the tier this tracker belongs to
+ boost::uint8_t tier;
+
+ // the max number of failures to announce to this tracker in
+ // a row, before this tracker is not used anymore. 0 means unlimited
+ boost::uint8_t fail_limit;
+
+ // the number of times in a row we have failed to announce to this
+ // tracker.
+ boost::uint8_t fails:7;
+
+ // true while we're waiting for a response from the tracker.
+ bool updating:1;
+
+ // flags for the source bitmask, each indicating where
+ // we heard about this tracker
+ | ||
relevance 0 | ../include/libtorrent/block_cache.hpp:223 | make this 32 bits and to count seconds since the block cache was created |
make this 32 bits and to count seconds since the block cache was created../include/libtorrent/block_cache.hpp:223
bool operator==(cached_piece_entry const& rhs) const
{ return storage.get() == rhs.storage.get() && piece == rhs.piece; }
@@ -8441,7 +8365,7 @@ put completes../src/kademlia/dht_tracker.cpp:264relevance 0 | ../include/libtorrent/config.hpp:349 | Make this count Unicode characters instead of bytes on windows |
|
Make this count Unicode characters instead of bytes on windows../include/libtorrent/config.hpp:349#pragma message ( "unknown OS, assuming BSD" )
+ | ||
relevance 0 | ../include/libtorrent/config.hpp:357 | Make this count Unicode characters instead of bytes on windows |
Make this count Unicode characters instead of bytes on windows../include/libtorrent/config.hpp:357#pragma message ( "unknown OS, assuming BSD" )
#else
#warning "unknown OS, assuming BSD"
#endif
@@ -8492,7 +8416,7 @@ put completes../src/kademlia/dht_tracker.cpp:264relevance 0 | ../include/libtorrent/disk_buffer_pool.hpp:137 | try to remove the observers, only using the async_allocate handlers |
|
try to remove the observers, only using the async_allocate handlers../include/libtorrent/disk_buffer_pool.hpp:137
+ | ||
relevance 0 | ../include/libtorrent/disk_buffer_pool.hpp:137 | try to remove the observers, only using the async_allocate handlers |
try to remove the observers, only using the async_allocate handlers../include/libtorrent/disk_buffer_pool.hpp:137
// number of bytes per block. The BitTorrent
// protocol defines the block size to 16 KiB.
const int m_block_size;
@@ -8543,7 +8467,7 @@ put completes../src/kademlia/dht_tracker.cpp:264relevance 0 | ../include/libtorrent/file.hpp:173 | move this into a separate header file, TU pair |
|
move this into a separate header file, TU pair../include/libtorrent/file.hpp:173 TORRENT_EXTRA_EXPORT std::string parent_path(std::string const& f);
+ | ||
relevance 0 | ../include/libtorrent/file.hpp:173 | move this into a separate header file, TU pair |
move this into a separate header file, TU pair../include/libtorrent/file.hpp:173 TORRENT_EXTRA_EXPORT std::string parent_path(std::string const& f);
TORRENT_EXTRA_EXPORT bool has_parent_path(std::string const& f);
TORRENT_EXTRA_EXPORT char const* filename_cstr(char const* f);
@@ -8594,7 +8518,7 @@ put completes../src/kademlia/dht_tracker.cpp:264relevance 0 | ../include/libtorrent/heterogeneous_queue.hpp:184 | if this throws, should we do anything? |
|
if this throws, should we do anything?../include/libtorrent/heterogeneous_queue.hpp:184 - 1) / sizeof(uintptr_t);
+ | ||
relevance 0 | ../include/libtorrent/heterogeneous_queue.hpp:184 | if this throws, should we do anything? |
if this throws, should we do anything?../include/libtorrent/heterogeneous_queue.hpp:184 - 1) / sizeof(uintptr_t);
void grow_capacity(int size)
{
@@ -8645,7 +8569,7 @@ put completes../src/kademlia/dht_tracker.cpp:264relevance 0 | ../include/libtorrent/identify_client.hpp:50 | hide these declarations when deprecaated functions are disabled, and expose them internally in a header under aux_. |
|
hide these declarations when deprecaated functions are disabled, and
+ | ||
relevance 0 | ../include/libtorrent/identify_client.hpp:50 | hide these declarations when deprecaated functions are disabled, and expose them internally in a header under aux_. |
hide these declarations when deprecaated functions are disabled, and
expose them internally in a header under aux_.../include/libtorrent/identify_client.hpp:50
*/
@@ -8688,10 +8612,10 @@ namespace libtorrent
#endif // TORRENT_IDENTIFY_CLIENT_HPP_INCLUDED
- | ||
relevance 0 | ../include/libtorrent/peer_connection.hpp:205 | make this a raw pointer (to save size in the first cache line) and make the constructor take a raw pointer. torrent objects should always outlive their peers |
make this a raw pointer (to save size in
+ | ||
relevance 0 | ../include/libtorrent/peer_connection.hpp:209 | make this a raw pointer (to save size in the first cache line) and make the constructor take a raw pointer. torrent objects should always outlive their peers |
make this a raw pointer (to save size in
the first cache line) and make the constructor
take a raw pointer. torrent objects should always
-outlive their peers../include/libtorrent/peer_connection.hpp:205 , m_connecting(!t.expired())
+outlive their peers../include/libtorrent/peer_connection.hpp:209 , m_connecting(!t.expired())
, m_endgame_mode(false)
, m_snubbed(false)
, m_interesting(false)
@@ -8742,8 +8666,8 @@ outlive their peers../include/libtorrent/peer_connection.hpp:205 | ||
relevance 0 | ../include/libtorrent/peer_connection.hpp:1050 | factor this out into its own class with a virtual interface torrent and session should implement this interface |
factor this out into its own class with a virtual interface
-torrent and session should implement this interface../include/libtorrent/peer_connection.hpp:1050
+ | ||
relevance 0 | ../include/libtorrent/peer_connection.hpp:1040 | factor this out into its own class with a virtual interface torrent and session should implement this interface |
factor this out into its own class with a virtual interface
+torrent and session should implement this interface../include/libtorrent/peer_connection.hpp:1040
// the local endpoint for this peer, i.e. our address
// and our port. If this is set for outgoing connections
// before the connection completes, it means we want to
@@ -8794,7 +8718,7 @@ torrent and session should implement this interface../include/libtorren
// we have got from this peer. If the request
// queue gets empty, and there have been
// invalid requests, we can assume the
- | ||
relevance 0 | ../include/libtorrent/peer_connection_interface.hpp:47 | make this interface smaller! |
make this interface smaller!../include/libtorrent/peer_connection_interface.hpp:47CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ | ||
relevance 0 | ../include/libtorrent/peer_connection_interface.hpp:47 | make this interface smaller! |
make this interface smaller!../include/libtorrent/peer_connection_interface.hpp:47CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
@@ -8842,7 +8766,7 @@ namespace libtorrent
#endif
- | ||
relevance 0 | ../include/libtorrent/performance_counters.hpp:139 | should keepalives be in here too? how about dont-have, share-mode, upload-only |
should keepalives be in here too?
+ | ||
relevance 0 | ../include/libtorrent/performance_counters.hpp:139 | should keepalives be in here too? how about dont-have, share-mode, upload-only |
should keepalives be in here too?
how about dont-have, share-mode, upload-only../include/libtorrent/performance_counters.hpp:139 // a connect candidate
connection_attempt_loops,
// successful incoming connections (not rejected for any reason)
@@ -8894,7 +8818,7 @@ how about dont-have, share-mode, upload-only../include/libtorrent/perfo
num_outgoing_cancel,
num_outgoing_dht_port,
num_outgoing_suggest,
- | ||
relevance 0 | ../include/libtorrent/performance_counters.hpp:451 | some space could be saved here by making gauges 32 bits |
some space could be saved here by making gauges 32 bits../include/libtorrent/performance_counters.hpp:451 | ||
relevance 0 | ../include/libtorrent/performance_counters.hpp:452 | restore these to regular integers. Instead have one copy of the counters per thread and collect them at convenient synchronization points |
restore these to regular integers. Instead have one copy
+ | ||
relevance 0 | ../include/libtorrent/performance_counters.hpp:451 | some space could be saved here by making gauges 32 bits |
some space could be saved here by making gauges 32 bits../include/libtorrent/performance_counters.hpp:451 | ||
relevance 0 | ../include/libtorrent/performance_counters.hpp:452 | restore these to regular integers. Instead have one copy of the counters per thread and collect them at convenient synchronization points |
restore these to regular integers. Instead have one copy
of the counters per thread and collect them at convenient
synchronization points../include/libtorrent/performance_counters.hpp:452 num_utp_deleted,
@@ -8929,7 +8853,7 @@ synchronization points../include/libtorrent/performance_counters.hpp:45
#endif
- | ||
relevance 0 | ../include/libtorrent/piece_picker.hpp:760 | should this be allocated lazily? |
should this be allocated lazily?../include/libtorrent/piece_picker.hpp:760
+ | ||
relevance 0 | ../include/libtorrent/piece_picker.hpp:760 | should this be allocated lazily? |
should this be allocated lazily?../include/libtorrent/piece_picker.hpp:760
std::vector<downloading_piece>::const_iterator find_dl_piece(int queue, int index) const;
std::vector<downloading_piece>::iterator find_dl_piece(int queue, int index);
@@ -8980,8 +8904,8 @@ synchronization points../include/libtorrent/performance_counters.hpp:45
// this holds the information of the blocks in partially downloaded
// pieces. the downloading_piece::info index point into this vector for
- | ||
relevance 0 | ../include/libtorrent/proxy_base.hpp:173 | it would be nice to remember the bind port and bind once we know where the proxy is m_sock.bind(endpoint, ec); |
it would be nice to remember the bind port and bind once we know where the proxy is
-m_sock.bind(endpoint, ec);../include/libtorrent/proxy_base.hpp:173 void bind(endpoint_type const& /* endpoint */)
+ | ||
relevance 0 | ../include/libtorrent/proxy_base.hpp:174 | it would be nice to remember the bind port and bind once we know where the proxy is m_sock.bind(endpoint, ec); |
it would be nice to remember the bind port and bind once we know where the proxy is
+m_sock.bind(endpoint, ec);../include/libtorrent/proxy_base.hpp:174 void bind(endpoint_type const& /* endpoint */)
{
// m_sock.bind(endpoint);
}
@@ -9032,7 +8956,7 @@ m_sock.bind(endpoint, ec);../include/libtorrent/proxy_base.hpp:173
m_sock.close(ec);
m_resolver.cancel();
}
- | ||
relevance 0 | ../include/libtorrent/receive_buffer.hpp:258 | Detect when the start of the next crpyto packet is aligned with the start of piece data and the crpyto packet is at least as large as the piece data. With a little extra work we could receive directly into a disk buffer in that case. |
Detect when the start of the next crpyto packet is aligned
+ | ||
relevance 0 | ../include/libtorrent/receive_buffer.hpp:258 | Detect when the start of the next crpyto packet is aligned with the start of piece data and the crpyto packet is at least as large as the piece data. With a little extra work we could receive directly into a disk buffer in that case. |
Detect when the start of the next crpyto packet is aligned
with the start of piece data and the crpyto packet is at least
as large as the piece data. With a little extra work
we could receive directly into a disk buffer in that case.../include/libtorrent/receive_buffer.hpp:258
@@ -9075,7 +8999,7 @@ private:
} // namespace libtorrent
#endif // #ifndef TORRENT_RECEIVE_BUFFER_HPP_INCLUDED
- | ||
relevance 0 | ../include/libtorrent/session_handle.hpp:654 | add get_peer_class_type_filter() as well |
add get_peer_class_type_filter() as well../include/libtorrent/session_handle.hpp:654 //
+ | ||
relevance 0 | ../include/libtorrent/session_handle.hpp:682 | add get_peer_class_type_filter() as well |
add get_peer_class_type_filter() as well../include/libtorrent/session_handle.hpp:682 //
// The ``peer_class`` argument cannot be greater than 31. The bitmasks
// representing peer classes in the ``peer_class_filter`` are 32 bits.
//
@@ -9126,10 +9050,10 @@ private:
// destructs.
//
// For more information on peer classes, see peer-classes_.
- | ||
relevance 0 | ../include/libtorrent/settings_pack.hpp:1086 | deprecate this ``max_rejects`` is the number of piece requests we will reject in a row while a peer is choked before the peer is considered abusive and is disconnected. |
deprecate this
+ | ||
relevance 0 | ../include/libtorrent/settings_pack.hpp:1094 | deprecate this ``max_rejects`` is the number of piece requests we will reject in a row while a peer is choked before the peer is considered abusive and is disconnected. |
deprecate this
``max_rejects`` is the number of piece requests we will reject in a
row while a peer is choked before the peer is considered abusive
-and is disconnected.../include/libtorrent/settings_pack.hpp:1086
+and is disconnected.../include/libtorrent/settings_pack.hpp:1094
// this is the minimum allowed announce interval for a tracker. This
// is specified in seconds and is used as a sanity check on what is
// returned from a tracker. It mitigates hammering misconfigured
@@ -9180,7 +9104,7 @@ and is disconnected.../include/libtorrent/settings_pack.hpp:1086 | ||
relevance 0 | ../include/libtorrent/torrent.hpp:1265 | this wastes 5 bits per file |
this wastes 5 bits per file../include/libtorrent/torrent.hpp:1265 typedef std::list<boost::shared_ptr<torrent_plugin> > extension_list_t;
+ | ||
relevance 0 | ../include/libtorrent/torrent.hpp:1247 | this wastes 5 bits per file |
this wastes 5 bits per file../include/libtorrent/torrent.hpp:1247 typedef std::list<boost::shared_ptr<torrent_plugin> > extension_list_t;
extension_list_t m_extensions;
#endif
@@ -9208,7 +9132,7 @@ and is disconnected.../include/libtorrent/settings_pack.hpp:1086../include/libtorrent/settings_pack.hpp:1086 | ||
relevance 0 | ../include/libtorrent/torrent.hpp:1320 | These two bitfields should probably be coalesced into one |
These two bitfields should probably be coalesced into one../include/libtorrent/torrent.hpp:1320 // the .torrent file from m_url
+ | ||
relevance 0 | ../include/libtorrent/torrent.hpp:1302 | These two bitfields should probably be coalesced into one |
These two bitfields should probably be coalesced into one../include/libtorrent/torrent.hpp:1302 // the .torrent file from m_url
// std::vector<char> m_torrent_file_buf;
// this is a list of all pieces that we have announced
@@ -9282,60 +9206,8 @@ and is disconnected.../include/libtorrent/settings_pack.hpp:1086 | ||
relevance 0 | ../include/libtorrent/torrent_info.hpp:118 | include the number of peers received from this tracker, at last announce |
include the number of peers received from this tracker, at last
-announce../include/libtorrent/torrent_info.hpp:118
- // if this tracker failed the last time it was contacted
- // this error code specifies what error occurred
- error_code last_error;
-
- // returns the number of seconds to the next announce on this tracker.
- // ``min_announce_in()`` returns the number of seconds until we are
- // allowed to force another tracker update with this tracker.
- //
- // If the last time this tracker was contacted failed, ``last_error`` is
- // the error code describing what error occurred.
- int next_announce_in() const;
- int min_announce_in() const;
-
- // the time of next tracker announce
- time_point next_announce;
-
- // no announces before this time
- time_point min_announce;
-
-
- // these are either -1 or the scrape information this tracker last
- // responded with. *incomplete* is the current number of downloaders in
- // the swarm, *complete* is the current number of seeds in the swarm and
- // *downloaded* is the cumulative number of completed downloads of this
- // torrent, since the beginning of time (from this tracker's point of
- // view).
-
- // if this tracker has returned scrape data, these fields are filled in
- // with valid numbers. Otherwise they are set to -1. the number of
- // current downloaders
- int scrape_incomplete;
- int scrape_complete;
- int scrape_downloaded;
-
- // the tier this tracker belongs to
- boost::uint8_t tier;
-
- // the max number of failures to announce to this tracker in
- // a row, before this tracker is not used anymore. 0 means unlimited
- boost::uint8_t fail_limit;
-
- // the number of times in a row we have failed to announce to this
- // tracker.
- boost::uint8_t fails:7;
-
- // true while we're waiting for a response from the tracker.
- bool updating:1;
-
- // flags for the source bitmask, each indicating where
- // we heard about this tracker
- | ||
relevance 0 | ../include/libtorrent/torrent_info.hpp:265 | there may be some opportunities to optimize the size if torrent_info. specifically to turn some std::string and std::vector into pointers |
there may be some opportunities to optimize the size if torrent_info.
-specifically to turn some std::string and std::vector into pointers../include/libtorrent/torrent_info.hpp:265 // The URL of the web seed
+ | ||
relevance 0 | ../include/libtorrent/torrent_info.hpp:118 | there may be some opportunities to optimize the size if torrent_info. specifically to turn some std::string and std::vector into pointers |
there may be some opportunities to optimize the size if torrent_info.
+specifically to turn some std::string and std::vector into pointers../include/libtorrent/torrent_info.hpp:118 // The URL of the web seed
std::string url;
// Optional authentication. If this is set, it's passed
@@ -9386,7 +9258,7 @@ specifically to turn some std::string and std::vector into pointers../i
// error occur, they will simply set the error code to describe what went
// wrong and not fully initialize the torrent_info object. The overloads
// that do not take the extra error_code parameter will always throw if
- | ||
relevance 0 | ../include/libtorrent/tracker_manager.hpp:380 | this should be unique_ptr in the future |
this should be unique_ptr in the future../include/libtorrent/tracker_manager.hpp:380
+ | ||
relevance 0 | ../include/libtorrent/tracker_manager.hpp:384 | this should be unique_ptr in the future |
this should be unique_ptr in the future../include/libtorrent/tracker_manager.hpp:384
// this is only used for SOCKS packets, since
// they may be addressed to hostname
virtual bool incoming_packet(error_code const& e, char const* hostname
@@ -9427,25 +9299,25 @@ specifically to turn some std::string and std::vector into pointers../i
#endif // TORRENT_TRACKER_MANAGER_HPP_INCLUDED
- | ||
relevance 0 | ../include/libtorrent/upnp.hpp:108 | support using the windows API for UPnP operations as well |
support using the windows API for UPnP operations as well../include/libtorrent/upnp.hpp:108 external_port_must_be_wildcard = 727
- };
-
- // hidden
- TORRENT_EXPORT boost::system::error_code make_error_code(error_code_enum e);
+ | ||
relevance 0 | ../include/libtorrent/upnp.hpp:132 | support using the windows API for UPnP operations as well |
support using the windows API for UPnP operations as well../include/libtorrent/upnp.hpp:132 std::list<std::string> tag_stack;
+ std::string control_url;
+ std::string service_type;
+ std::string model;
+ std::string url_base;
+ bool top_tags(const char* str1, const char* str2)
+ {
+ std::list<std::string>::reverse_iterator i = tag_stack.rbegin();
+ if (i == tag_stack.rend()) return false;
+ if (!string_equal_no_case(i->c_str(), str2)) return false;
+ ++i;
+ if (i == tag_stack.rend()) return false;
+ if (!string_equal_no_case(i->c_str(), str1)) return false;
+ return true;
}
+};
- // the boost.system error category for UPnP errors
- TORRENT_EXPORT boost::system::error_category& get_upnp_category();
-
-// int: port-mapping index
-// address: external address as queried from router
-// int: external port
-// std::string: error message
-// an empty string as error means success
-// a port-mapping index of -1 means it's
-// an informational log message
-typedef boost::function<void(int, address, int, error_code const&)> portmap_callback_t;
-typedef boost::function<void(char const*)> log_callback_t;
+TORRENT_EXTRA_EXPORT void find_control_url(int type, char const* string
+ , int str_len, parse_state& state);
class TORRENT_EXTRA_EXPORT upnp : public boost::enable_shared_from_this<upnp>
{
@@ -9456,9 +9328,7 @@ public:
, bool ignore_nonrouters);
~upnp();
- void start(void* state = 0);
-
- void* drain_state();
+ void start();
enum protocol_type { none = 0, udp = 1, tcp = 2 };
@@ -9478,7 +9348,9 @@ public:
// to refer to mappings that fails or succeeds in the portmap_error_alert_ and
// portmap_alert_ respectively. If The mapping fails immediately, the return value
// is -1, which means failure. There will not be any error alert notification for
- | ||
relevance 0 | ../include/libtorrent/utp_stream.hpp:402 | implement blocking write. Low priority since it's not used (yet) |
implement blocking write. Low priority since it's not used (yet)../include/libtorrent/utp_stream.hpp:402 for (typename Mutable_Buffers::const_iterator i = buffers.begin()
+ // mappings that fail with a -1 return value.
+ int add_mapping(protocol_type p, int external_port, int local_port);
+ | ||
relevance 0 | ../include/libtorrent/utp_stream.hpp:424 | implement blocking write. Low priority since it's not used (yet) |
implement blocking write. Low priority since it's not used (yet)../include/libtorrent/utp_stream.hpp:424 for (typename Mutable_Buffers::const_iterator i = buffers.begin()
, end(buffers.end()); i != end; ++i)
{
using boost::asio::buffer_cast;
@@ -9581,7 +9453,7 @@ public:
item(entry const& v
, std::pair<char const*, int> salt
, boost::uint64_t seq, char const* pk, char const* sk);
- | ||
relevance 0 | ../include/libtorrent/aux_/session_impl.hpp:846 | should this be renamed m_outgoing_interfaces? |
should this be renamed m_outgoing_interfaces?../include/libtorrent/aux_/session_impl.hpp:846 // client with the tracker only. It is randomized
+ | ||
relevance 0 | ../include/libtorrent/aux_/session_impl.hpp:858 | should this be renamed m_outgoing_interfaces? |
should this be renamed m_outgoing_interfaces?../include/libtorrent/aux_/session_impl.hpp:858 // client with the tracker only. It is randomized
// at startup
int m_key;
@@ -9619,7 +9491,7 @@ public:
#endif
#ifdef TORRENT_USE_OPENSSL
- boost::asio::ssl::context* ssl_ctx() { return &m_ssl_ctx; }
+ ssl::context* ssl_ctx() { return &m_ssl_ctx; }
void on_incoming_utp_ssl(boost::shared_ptr<socket_type> const& s);
void ssl_handshake(error_code const& ec, boost::shared_ptr<socket_type> s);
#endif
@@ -9632,7 +9504,7 @@ public:
// round-robin index into m_net_interfaces
mutable boost::uint8_t m_interface_index;
- | ||
relevance 0 | ../include/libtorrent/aux_/session_impl.hpp:897 | replace this by a proper asio timer |
replace this by a proper asio timer../include/libtorrent/aux_/session_impl.hpp:897
+ | ||
relevance 0 | ../include/libtorrent/aux_/session_impl.hpp:909 | replace this by a proper asio timer |
replace this by a proper asio timer../include/libtorrent/aux_/session_impl.hpp:909
void open_new_incoming_socks_connection();
enum listen_on_flags_t
@@ -9656,7 +9528,7 @@ public:
// this is used to decide when to recalculate which
// torrents to keep queued and which to activate
- | ||
relevance 0 | ../include/libtorrent/aux_/session_impl.hpp:902 | replace this by a proper asio timer |
replace this by a proper asio timer../include/libtorrent/aux_/session_impl.hpp:902 {
+ | ||
relevance 0 | ../include/libtorrent/aux_/session_impl.hpp:914 | replace this by a proper asio timer |
replace this by a proper asio timer../include/libtorrent/aux_/session_impl.hpp:914 {
open_ssl_socket = 0x10
};
@@ -9682,7 +9554,7 @@ public:
// is only decresed when the unchoke set
// is recomputed, and when it reaches zero,
// the optimistic unchoke is moved to another peer.
- | ||
relevance 0 | ../include/libtorrent/aux_/session_impl.hpp:909 | replace this by a proper asio timer |
replace this by a proper asio timer../include/libtorrent/aux_/session_impl.hpp:909
+ | ||
relevance 0 | ../include/libtorrent/aux_/session_impl.hpp:921 | replace this by a proper asio timer |
replace this by a proper asio timer../include/libtorrent/aux_/session_impl.hpp:921
#ifndef TORRENT_DISABLE_DHT
entry m_dht_state;
#endif
diff --git a/include/libtorrent/kademlia/dht_storage.hpp b/include/libtorrent/kademlia/dht_storage.hpp
index 8d948a5db..4b685ddee 100644
--- a/include/libtorrent/kademlia/dht_storage.hpp
+++ b/include/libtorrent/kademlia/dht_storage.hpp
@@ -92,11 +92,12 @@ namespace dht
// If the torrent tracked contains a name, such a name
// must be stored as a string in peers["n"]
//
- // If the scrape parameter is true, you should fill these keys:
- // peers["BFpe"] - with the standard bit representation of a
- // 256 bloom filter containing the downloaders
- // peers["BFsd"] - with the standard bit representation of a
- // 256 bloom filter containing the seeders
+ // If the scrape parameter is true, you should fill these keys::
+ //
+ // peers["BFpe"] - with the standard bit representation of a
+ // 256 bloom filter containing the downloaders
+ // peers["BFsd"] - with the standard bit representation of a
+ // 256 bloom filter containing the seeders
//
// If the scrape parameter is false, you should fill the
// key peers["values"] with a list containing a subset of
| ||