diff --git a/docs/stats_counters.rst b/docs/stats_counters.rst
index d473deba5..95b7bf4cb 100644
--- a/docs/stats_counters.rst
+++ b/docs/stats_counters.rst
@@ -1755,6 +1755,41 @@ the number of bytes sent and received by the DHT
uTP counters. Each counter represents the number of time each event
has occurred.
+.. _utp.num_utp_idle:
+
+.. _utp.num_utp_syn_sent:
+
+.. _utp.num_utp_connected:
+
+.. _utp.num_utp_fin_sent:
+
+.. _utp.num_utp_close_wait:
+
+.. raw:: html
+
+
+
++------------------------+-------+
+| name | type |
++========================+=======+
+| utp.num_utp_idle | gauge |
++------------------------+-------+
+| utp.num_utp_syn_sent | gauge |
++------------------------+-------+
+| utp.num_utp_connected | gauge |
++------------------------+-------+
+| utp.num_utp_fin_sent | gauge |
++------------------------+-------+
+| utp.num_utp_close_wait | gauge |
++------------------------+-------+
+
+
+the number of uTP sockets in each respective state
+
.. _sock_bufs.socket_send_size3:
.. _sock_bufs.socket_send_size4:
diff --git a/docs/todo.html b/docs/todo.html
index 287f938ef..be5fd2cf3 100644
--- a/docs/todo.html
+++ b/docs/todo.html
@@ -22,10 +22,10 @@
relevance 3 | ../test/test_dht.cpp:436 | test obfuscated_get_peers |
test obfuscated_get_peers../test/test_dht.cpp:436 g_got_peers.insert(g_got_peers.end(), peers.begin(), peers.end());
}
@@ -77,7 +77,7 @@ bool get_item_cb(dht::item& i)
fprintf(stderr, "msg: %s\n", print_entry(response).c_str());
ret = dht::verify_message(&response, pong_desc, parsed, 4, error_string, sizeof(error_string));
- | ||
relevance 3 | ../test/test_transfer.cpp:284 | factor out the disk-full test into its own unit test |
factor out the disk-full test into its own unit test../test/test_transfer.cpp:284 print_alerts(ses1, "ses1", true, true, true, &on_alert);
+ | ||
relevance 3 | ../test/test_transfer.cpp:288 | factor out the disk-full test into its own unit test |
factor out the disk-full test into its own unit test../test/test_transfer.cpp:288 print_alerts(ses1, "ses1", true, true, true, &on_alert);
print_alerts(ses2, "ses2", true, true, true, &on_alert);
if (i % 10 == 0)
@@ -236,59 +236,7 @@ counter and the passed_hash_check member../src/piece_picker.cpp:3166 | ||
relevance 3 | ../src/session_impl.cpp:5195 | deprecate this function. All of this functionality should be exposed as performance counters |
deprecate this function. All of this functionality should be
-exposed as performance counters../src/session_impl.cpp:5195 if (m_alerts.should_post<portmap_alert>())
- m_alerts.post_alert(portmap_alert(mapping, port
- , map_transport));
- return;
- }
-
- if (ec)
- {
- if (m_alerts.should_post<portmap_error_alert>())
- m_alerts.post_alert(portmap_error_alert(mapping
- , map_transport, ec));
- }
- else
- {
- if (m_alerts.should_post<portmap_alert>())
- m_alerts.post_alert(portmap_alert(mapping, port
- , map_transport));
- }
- }
-
- session_status session_impl::status() const
- {
-// INVARIANT_CHECK;
- TORRENT_ASSERT(is_single_thread());
-
- session_status s;
-
- s.optimistic_unchoke_counter = m_optimistic_unchoke_time_scaler;
- s.unchoke_counter = m_unchoke_time_scaler;
-
- s.num_peers = int(m_connections.size());
- s.num_dead_peers = int(m_undead_peers.size());
- s.num_unchoked = m_stats_counters[counters::num_peers_up_unchoked_all];
- s.allowed_upload_slots = m_allowed_upload_slots;
-
- s.num_torrents = m_torrents.size();
- // only non-paused torrents want tick
- s.num_paused_torrents = m_torrents.size() - m_torrent_lists[torrent_want_tick].size();
-
- s.total_redundant_bytes = m_stats_counters[counters::recv_redundant_bytes];
- s.total_failed_bytes = m_stats_counters[counters::recv_failed_bytes];
-
- s.up_bandwidth_queue = m_upload_rate.queue_size();
- s.down_bandwidth_queue = m_download_rate.queue_size();
-
- s.up_bandwidth_bytes_queue = int(m_upload_rate.queued_bytes());
- s.down_bandwidth_bytes_queue = int(m_download_rate.queued_bytes());
-
- s.disk_write_queue = m_stats_counters[counters::num_peers_down_disk];
- s.disk_read_queue = m_stats_counters[counters::num_peers_up_disk];
-
- | ||
relevance 3 | ../src/torrent.cpp:7713 | if peer is a really good peer, maybe we shouldn't disconnect it |
if peer is a really good peer, maybe we shouldn't disconnect it../src/torrent.cpp:7713#if defined TORRENT_LOGGING
+ | ||
relevance 3 | ../src/torrent.cpp:7713 | if peer is a really good peer, maybe we shouldn't disconnect it |
if peer is a really good peer, maybe we shouldn't disconnect it../src/torrent.cpp:7713#if defined TORRENT_LOGGING
debug_log("incoming peer (%d)", int(m_connections.size()));
#endif
@@ -339,9 +287,9 @@ exposed as performance counters../src/session_impl.cpp:5195 | ||
relevance 3 | ../src/web_peer_connection.cpp:596 | just make this peer not have the pieces associated with the file we just requested. Only when it doesn't have any of the file do the following |
just make this peer not have the pieces
+ | ||
relevance 3 | ../src/web_peer_connection.cpp:628 | just make this peer not have the pieces associated with the file we just requested. Only when it doesn't have any of the file do the following |
just make this peer not have the pieces
associated with the file we just requested. Only
-when it doesn't have any of the file do the following../src/web_peer_connection.cpp:596 {
+when it doesn't have any of the file do the following../src/web_peer_connection.cpp:628 {
++m_num_responses;
if (m_parser.connection_close())
@@ -392,7 +340,7 @@ when it doesn't have any of the file do the following../src/web_peer_co
{
// we should not try this server again.
t->remove_web_seed(this, errors::missing_location, op_bittorrent, 2);
- | ||
relevance 3 | ../src/kademlia/get_item.cpp:220 | we don't support CAS errors here! we need a custom observer |
we don't support CAS errors here! we need a custom observer../src/kademlia/get_item.cpp:220 TORRENT_LOG(node) << "sending put [ v: \"" << m_data.value()
+ | ||
relevance 3 | ../src/kademlia/get_item.cpp:220 | we don't support CAS errors here! we need a custom observer |
we don't support CAS errors here! we need a custom observer../src/kademlia/get_item.cpp:220 TORRENT_LOG(node) << "sending put [ v: \"" << m_data.value()
<< "\" seq: " << (m_data.is_mutable() ? m_data.seq() : -1)
<< " nodes: " << v.size() << " ]" ;
#endif
@@ -443,7 +391,7 @@ void get_item_observer::reply(msg const& m)
boost::uint64_t seq = 0;
lazy_entry const* r = m.message.dict_find_dict("r");
- | ||
relevance 3 | ../include/libtorrent/block_cache.hpp:209 | could this be a scoped_array instead? does cached_piece_entry really need to be copyable? cached_piece_entry does need to be copyable since it's part of a container, but it's possible it could be a raw pointer or boost::unique_ptr perhaps |
could this be a scoped_array instead? does cached_piece_entry
+ | ||
relevance 3 | ../include/libtorrent/block_cache.hpp:209 | could this be a scoped_array instead? does cached_piece_entry really need to be copyable? cached_piece_entry does need to be copyable since it's part of a container, but it's possible it could be a raw pointer or boost::unique_ptr perhaps |
could this be a scoped_array instead? does cached_piece_entry
really need to be copyable? cached_piece_entry does need to be
copyable since it's part of a container, but it's possible it could be
a raw pointer or boost::unique_ptr perhaps../include/libtorrent/block_cache.hpp:209 tailqueue read_jobs;
@@ -471,7 +419,111 @@ a raw pointer or boost::unique_ptr perhaps../include/libtorrent/block_c
// the last time a block was written to this piece
// plus the minimum amount of time the block is guaranteed
// to stay in the cache
- | ||
relevance 2 | ../test/test_resume.cpp:331 | test all other resume flags here too. This would require returning more than just the torrent_status from test_resume_flags. Also http seeds and trackers for instance |
test all other resume flags here too. This would require returning
+ | ||
relevance 3 | ../include/libtorrent/file_storage.hpp:49 | the file_entry should be deprecated and add_file() should be thought through a bit better |
the file_entry should be deprecated and add_file() should be
+thought through a bit better../include/libtorrent/file_storage.hpp:49POSSIBILITY OF SUCH DAMAGE.
+
+*/
+
+#ifndef TORRENT_FILE_STORAGE_HPP_INCLUDED
+#define TORRENT_FILE_STORAGE_HPP_INCLUDED
+
+#include <string>
+#include <vector>
+#include <ctime>
+#include <boost/cstdint.hpp>
+
+#include "libtorrent/assert.hpp"
+#include "libtorrent/peer_request.hpp"
+#include "libtorrent/peer_id.hpp"
+
+namespace libtorrent
+{
+ struct file;
+
+
+ // information about a file in a file_storage
+ struct TORRENT_EXPORT file_entry
+ {
+ // hidden
+ file_entry();
+ // hidden
+ ~file_entry();
+
+ // the full path of this file. The paths are unicode strings
+ // encoded in UTF-8.
+ std::string path;
+
+ // the path which this is a symlink to, or empty if this is
+ // not a symlink. This field is only used if the ``symlink_attribute`` is set.
+ std::string symlink_path;
+
+ // the offset of this file inside the torrent
+ boost::int64_t offset;
+
+ // the size of the file (in bytes) and ``offset`` is the byte offset
+ // of the file within the torrent. i.e. the sum of all the sizes of the files
+ // before it in the list.
+ boost::int64_t size;
+
+ // the offset in the file where the storage should start. The normal
+ // case is to have this set to 0, so that the storage starts saving data at the start
+ // if the file. In cases where multiple files are mapped into the same file though,
+ // the ``file_base`` should be set to an offset so that the different regions do
+ // not overlap. This is used when mapping "unselected" files into a so-called part
+ // file.
+ | ||
relevance 3 | ../include/libtorrent/session_status.hpp:87 | add accessors to query the DHT state (post the result as an alert) holds dht routing table stats |
add accessors to query the DHT state (post the result as an alert)
+holds dht routing table stats../include/libtorrent/session_status.hpp:87
+ // the number of nodes left that could be queries for this
+ // lookup. Many of these are likely to be part of the trail
+ // while performing the lookup and would never end up actually
+ // being queried.
+ int nodes_left;
+
+ // the number of seconds ago the
+ // last message was sent that's still
+ // outstanding
+ int last_sent;
+
+ // the number of outstanding requests
+ // that have exceeded the short timeout
+ // and are considered timed out in the
+ // sense that they increased the branch
+ // factor
+ int first_timeout;
+ };
+
+ struct TORRENT_EXPORT dht_routing_bucket
+ {
+ // the total number of nodes and replacement nodes
+ // in the routing table
+ int num_nodes;
+ int num_replacements;
+
+ // number of seconds since last activity
+ int last_active;
+ };
+
+#ifndef TORRENT_NO_DEPRECATE
+ // holds counters and gauges for the uTP sockets
+ // deprecated in 1.1 in favor of session_stats counters, which is a more
+ // flexible, extensible and perfromant mechanism for stats.
+ struct TORRENT_EXPORT utp_status
+ {
+ // gauges. These are snapshots of the number of
+ // uTP sockets in each respective state
+ int num_idle;
+ int num_syn_sent;
+ int num_connected;
+ int num_fin_sent;
+ int num_close_wait;
+
+ // These are monotonically increasing
+ // and cumulative counters for their respective event.
+ boost::uint64_t packet_loss;
+ boost::uint64_t timeout;
+ boost::uint64_t packets_in;
+ boost::uint64_t packets_out;
+ | ||
relevance 2 | ../test/test_resume.cpp:331 | test all other resume flags here too. This would require returning more than just the torrent_status from test_resume_flags. Also http seeds and trackers for instance |
test all other resume flags here too. This would require returning
more than just the torrent_status from test_resume_flags. Also http seeds
and trackers for instance../test/test_resume.cpp:331 // resume data overrides the paused flag
fprintf(stderr, "flags: paused\n");
@@ -497,7 +549,7 @@ and trackers for instance../test/test_resume.cpp:331relevance 2 | ../src/disk_io_thread.cpp:835 | should this be allocated on the stack? |
|
should this be allocated on the stack?../src/disk_io_thread.cpp:835 // if we're also flushing the read cache, this piece
+ | ||
relevance 2 | ../src/disk_io_thread.cpp:838 | should this be allocated on the stack? |
should this be allocated on the stack?../src/disk_io_thread.cpp:838 // if we're also flushing the read cache, this piece
// should be removed as soon as all write jobs finishes
// otherwise it will turn into a read piece
}
@@ -522,6 +574,7 @@ and trackers for instance../test/test_resume.cpp:331 | ||
relevance 2 | ../src/disk_io_thread.cpp:876 | we're not flushing the read cache at all? |
we're not flushing the read cache at all?../src/disk_io_thread.cpp:876 // from disk_io_thread::do_delete, which is a fence job and should
- // have any other jobs active, i.e. there should not be any references
- // keeping pieces or blocks alive
- if ((flags & flush_delete_cache) && (flags & flush_expect_clear))
- {
- boost::unordered_set<cached_piece_entry*> const& storage_pieces = storage->cached_pieces();
- for (boost::unordered_set<cached_piece_entry*>::const_iterator i = storage_pieces.begin()
- , end(storage_pieces.end()); i != end; ++i)
- {
- cached_piece_entry* pe = m_disk_cache.find_piece(storage, (*i)->piece);
- TORRENT_PIECE_ASSERT(pe->num_dirty == 0, pe);
- }
- }
-#endif
- }
- else
- {
- std::pair<block_cache::iterator, block_cache::iterator> range = m_disk_cache.all_pieces();
- while (range.first != range.second)
- {
- while (range.first->num_dirty == 0)
- {
- ++range.first;
- if (range.first == range.second) return;
- }
- cached_piece_entry* pe = const_cast<cached_piece_entry*>(&*range.first);
- flush_piece(pe, flags, completed_jobs, l);
- range = m_disk_cache.all_pieces();
- }
- }
- }
-
- // this is called if we're exceeding (or about to exceed) the cache
- // size limit. This means we should not restrict ourselves to contiguous
- // blocks of write cache line size, but try to flush all old blocks
- // this is why we pass in 1 as cont_block to the flushing functions
- void disk_io_thread::try_flush_write_blocks(int num, tailqueue& completed_jobs
- , mutex::scoped_lock& l)
- {
- DLOG("try_flush_write_blocks: %d\n", num);
-
- list_iterator range = m_disk_cache.write_lru_pieces();
- std::vector<std::pair<piece_manager*, int> > pieces;
- pieces.reserve(m_disk_cache.num_write_lru_pieces());
-
- for (list_iterator p = range; p.get() && num > 0; p.next())
- {
- cached_piece_entry* e = (cached_piece_entry*)p.get();
- if (e->num_dirty == 0) continue;
- pieces.push_back(std::make_pair(e->storage.get(), int(e->piece)));
- }
- | ||
relevance 2 | ../src/file.cpp:1501 | use vm_copy here, if available, and if buffers are aligned |
use vm_copy here, if available, and if buffers are aligned../src/file.cpp:1501 CloseHandle(native_handle());
- m_path.clear();
-#else
- if (m_file_handle != INVALID_HANDLE_VALUE)
- ::close(m_file_handle);
-#endif
-
- m_file_handle = INVALID_HANDLE_VALUE;
-
- m_open_mode = 0;
- }
-
- // defined in storage.cpp
- int bufs_size(file::iovec_t const* bufs, int num_bufs);
-
- void gather_copy(file::iovec_t const* bufs, int num_bufs, char* dst)
- {
- int offset = 0;
- for (int i = 0; i < num_bufs; ++i)
- {
- memcpy(dst + offset, bufs[i].iov_base, bufs[i].iov_len);
- offset += bufs[i].iov_len;
- }
- }
-
- void scatter_copy(file::iovec_t const* bufs, int num_bufs, char const* src)
- {
- int offset = 0;
- for (int i = 0; i < num_bufs; ++i)
- {
- | ||
relevance 2 | ../src/file.cpp:1512 | use vm_copy here, if available, and if buffers are aligned |
use vm_copy here, if available, and if buffers are aligned../src/file.cpp:1512 }
-
- // defined in storage.cpp
- int bufs_size(file::iovec_t const* bufs, int num_bufs);
-
- void gather_copy(file::iovec_t const* bufs, int num_bufs, char* dst)
- {
- int offset = 0;
- for (int i = 0; i < num_bufs; ++i)
- {
- memcpy(dst + offset, bufs[i].iov_base, bufs[i].iov_len);
- offset += bufs[i].iov_len;
- }
- }
-
- void scatter_copy(file::iovec_t const* bufs, int num_bufs, char const* src)
- {
- int offset = 0;
- for (int i = 0; i < num_bufs; ++i)
- {
- memcpy(bufs[i].iov_base, src + offset, bufs[i].iov_len);
- offset += bufs[i].iov_len;
- }
- }
-
- bool coalesce_read_buffers(file::iovec_t const*& bufs, int& num_bufs, file::iovec_t* tmp)
- {
- int buf_size = bufs_size(bufs, num_bufs);
- // this is page aligned since it's used in APIs which
- // are likely to require that (depending on OS)
- char* buf = (char*)page_aligned_allocator::malloc(buf_size);
- if (!buf) return false;
- tmp->iov_base = buf;
- tmp->iov_len = buf_size;
- bufs = tmp;
- num_bufs = 1;
- return true;
- }
-
- void coalesce_read_buffers_end(file::iovec_t const* bufs, int num_bufs, char* buf, bool copy)
- {
- if (copy) scatter_copy(bufs, num_bufs, buf);
- page_aligned_allocator::free(buf);
- }
-
- bool coalesce_write_buffers(file::iovec_t const*& bufs, int& num_bufs, file::iovec_t* tmp)
- {
- // coalesce buffers means allocate a temporary buffer and
- // issue a single write operation instead of using a vector
- // operation
- int buf_size = 0;
- | ||
relevance 2 | ../src/file_storage.cpp:480 | it would be nice if file_entry::filehash could be taken into account as well, and if the file_storage object could actually hold copies of filehash |
it would be nice if file_entry::filehash could be taken into
+ | ||
relevance 2 | ../src/file_storage.cpp:482 | it would be nice if file_entry::filehash could be taken into account as well, and if the file_storage object could actually hold copies of filehash |
it would be nice if file_entry::filehash could be taken into
account as well, and if the file_storage object could actually hold
-copies of filehash../src/file_storage.cpp:480 e.executable_attribute = (flags & attribute_executable) != 0;
+copies of filehash../src/file_storage.cpp:482 e.executable_attribute = (flags & attribute_executable) != 0;
if ((flags & attribute_symlink) && m_symlinks.size() < internal_file_entry::not_a_symlink - 1)
{
e.symlink_attribute = 1;
@@ -733,7 +653,7 @@ copies of filehash../src/file_storage.cpp:480relevance 2 | ../src/peer_connection.cpp:4596 | use a deadline_timer for timeouts. Don't rely on second_tick()! Hook this up to connect timeout as well. This would improve performance because of less work in second_tick(), and might let use remove ticking entirely eventually |
|
use a deadline_timer for timeouts. Don't rely on second_tick()!
+ | ||
relevance 2 | ../src/peer_connection.cpp:4596 | use a deadline_timer for timeouts. Don't rely on second_tick()! Hook this up to connect timeout as well. This would improve performance because of less work in second_tick(), and might let use remove ticking entirely eventually |
use a deadline_timer for timeouts. Don't rely on second_tick()!
Hook this up to connect timeout as well. This would improve performance
because of less work in second_tick(), and might let use remove ticking
entirely eventually../src/peer_connection.cpp:4596 if (is_i2p(*m_socket))
@@ -787,7 +707,7 @@ entirely eventually../src/peer_connection.cpp:4596relevance 2 | ../src/session_impl.cpp:216 | find a better place for this function |
|
find a better place for this function../src/session_impl.cpp:216 *j.vec, j.peer->make_write_handler(boost::bind(
+ | ||
relevance 2 | ../src/session_impl.cpp:216 | find a better place for this function |
find a better place for this function../src/session_impl.cpp:216 *j.vec, j.peer->make_write_handler(boost::bind(
&peer_connection::on_send_data, j.peer, _1, _2)));
}
else
@@ -838,7 +758,7 @@ namespace aux {
const static class_mapping v4_classes[] =
{
// everything
- | ||
relevance 2 | ../src/session_impl.cpp:1807 | the udp socket(s) should be using the same generic mechanism and not be restricted to a single one we should open a one listen socket for each entry in the listen_interfaces list |
the udp socket(s) should be using the same generic
+ | ||
relevance 2 | ../src/session_impl.cpp:1807 | the udp socket(s) should be using the same generic mechanism and not be restricted to a single one we should open a one listen socket for each entry in the listen_interfaces list |
the udp socket(s) should be using the same generic
mechanism and not be restricted to a single one
we should open a one listen socket for each entry in the
listen_interfaces list../src/session_impl.cpp:1807 }
@@ -892,7 +812,7 @@ listen_interfaces list../src/session_impl.cpp:1807relevance 2 | ../src/session_impl.cpp:1902 | use bind_to_device in udp_socket |
|
use bind_to_device in udp_socket../src/session_impl.cpp:1902
+ | ||
relevance 2 | ../src/session_impl.cpp:1902 | use bind_to_device in udp_socket |
use bind_to_device in udp_socket../src/session_impl.cpp:1902
if (m_listen_sockets.empty() && ec)
{
#if defined TORRENT_LOGGING
@@ -938,7 +858,7 @@ listen_interfaces list../src/session_impl.cpp:1807relevance 2 | ../src/session_impl.cpp:1929 | use bind_to_device in udp_socket |
|
use bind_to_device in udp_socket../src/session_impl.cpp:1929 session_log("SSL: cannot bind to UDP interface \"%s\": %s"
+ | ||
relevance 2 | ../src/session_impl.cpp:1929 | use bind_to_device in udp_socket |
use bind_to_device in udp_socket../src/session_impl.cpp:1929 session_log("SSL: cannot bind to UDP interface \"%s\": %s"
, print_endpoint(m_listen_interface).c_str(), ec.message().c_str());
#endif
if (m_alerts.should_post<listen_failed_alert>())
@@ -989,8 +909,8 @@ listen_interfaces list../src/session_impl.cpp:1807relevance 2 | ../src/session_impl.cpp:3369 | make a list for torrents that want to be announced on the DHT so we don't have to loop over all torrents, just to find the ones that want to announce |
|
make a list for torrents that want to be announced on the DHT so we
-don't have to loop over all torrents, just to find the ones that want to announce../src/session_impl.cpp:3369 if (!m_dht_torrents.empty())
+ | ||
relevance 2 | ../src/session_impl.cpp:3392 | make a list for torrents that want to be announced on the DHT so we don't have to loop over all torrents, just to find the ones that want to announce |
make a list for torrents that want to be announced on the DHT so we
+don't have to loop over all torrents, just to find the ones that want to announce../src/session_impl.cpp:3392 if (!m_dht_torrents.empty())
{
boost::shared_ptr<torrent> t;
do
@@ -1041,7 +961,7 @@ don't have to loop over all torrents, just to find the ones that want to announc
if (m_torrents.empty()) return;
if (m_next_lsd_torrent == m_torrents.end())
- | ||
relevance 2 | ../src/torrent.cpp:718 | post alert |
post alert../src/torrent.cpp:718 state_updated();
+ | ||
relevance 2 | ../src/torrent.cpp:718 | post alert |
post alert../src/torrent.cpp:718 state_updated();
set_state(torrent_status::downloading);
@@ -1092,7 +1012,7 @@ don't have to loop over all torrents, just to find the ones that want to announc
TORRENT_ASSERT(piece >= 0);
TORRENT_ASSERT(m_verified.get_bit(piece) == false);
++m_num_verified;
- | ||
relevance 2 | ../src/torrent.cpp:4719 | abort lookups this torrent has made via the session host resolver interface |
abort lookups this torrent has made via the
+ | ||
relevance 2 | ../src/torrent.cpp:4719 | abort lookups this torrent has made via the session host resolver interface |
abort lookups this torrent has made via the
session host resolver interface../src/torrent.cpp:4719 // files belonging to the torrents
disconnect_all(errors::torrent_aborted, peer_connection_interface::op_bittorrent);
@@ -1144,9 +1064,60 @@ session host resolver interface../src/torrent.cpp:4719relevance 2 | ../src/web_peer_connection.cpp:655 | create a mapping of file-index to redirection URLs. Use that to form URLs instead. Support to reconnect to a new server without destructing this peer_connection |
|
create a mapping of file-index to redirection URLs. Use that to form
+ | ||
relevance 2 | ../src/utp_stream.cpp:348 | it would be nice if not everything would have to be public here |
it would be nice if not everything would have to be public here../src/utp_stream.cpp:348 void incoming(boost::uint8_t const* buf, int size, packet* p, ptime now);
+ void do_ledbat(int acked_bytes, int delay, int in_flight);
+ int packet_timeout() const;
+ bool test_socket_state();
+ void maybe_trigger_receive_callback();
+ void maybe_trigger_send_callback();
+ bool cancel_handlers(error_code const& ec, bool kill);
+ bool consume_incoming_data(
+ utp_header const* ph, boost::uint8_t const* ptr, int payload_size, ptime now);
+ void update_mtu_limits();
+ void experienced_loss(int seq_nr);
+
+ void set_state(int s);
+
+private:
+
+ // non-copyable
+ utp_socket_impl(utp_socket_impl const&);
+ utp_socket_impl const& operator=(utp_socket_impl const&);
+
+public:
+
+ void check_receive_buffers() const;
+
+#if TORRENT_USE_INVARIANT_CHECKS
+ void check_invariant() const;
+#endif
+
+ utp_socket_manager* m_sm;
+
+ // userdata pointer passed along
+ // with any callback. This is initialized to 0
+ // then set to point to the utp_stream when
+ // hooked up, and then reset to 0 once the utp_stream
+ // detaches. This is used to know whether or not
+ // the socket impl is still attached to a utp_stream
+ // object. When it isn't, we'll never be able to
+ // signal anything back to the client, and in case
+ // of errors, we just have to delete ourselves
+ // i.e. transition to the UTP_STATE_DELETED state
+ void* m_userdata;
+
+ // This is a platform-independent replacement
+ // for the regular iovec type in posix. Since
+ // it's not used in any system call, we might as
+ // well define our own type instead of wrapping
+ // the system's type.
+ struct iovec_t
+ {
+ iovec_t(void* b, size_t l): buf(b), len(l) {}
+ void* buf;
+ | ||
relevance 2 | ../src/web_peer_connection.cpp:687 | create a mapping of file-index to redirection URLs. Use that to form URLs instead. Support to reconnect to a new server without destructing this peer_connection |
create a mapping of file-index to redirection URLs. Use that to form
URLs instead. Support to reconnect to a new server without destructing this
-peer_connection../src/web_peer_connection.cpp:655 == dl_target);
+peer_connection../src/web_peer_connection.cpp:687 == dl_target);
#endif
return;
}
@@ -1197,58 +1168,7 @@ peer_connection../src/web_peer_connection.cpp:655relevance 2 | ../src/kademlia/dos_blocker.cpp:75 | make these limits configurable |
|
make these limits configurable../src/kademlia/dos_blocker.cpp:75 bool dos_blocker::incoming(address addr, ptime now)
- {
- node_ban_entry* match = 0;
- node_ban_entry* min = m_ban_nodes;
- for (node_ban_entry* i = m_ban_nodes; i < m_ban_nodes + num_ban_nodes; ++i)
- {
- if (i->src == addr)
- {
- match = i;
- break;
- }
- if (i->count < min->count) min = i;
- else if (i->count == min->count
- && i->limit < min->limit) min = i;
- }
-
- if (match)
- {
- ++match->count;
-
- if (match->count >= 50)
- {
- if (now < match->limit)
- {
- if (match->count == 50)
- {
-#ifdef TORRENT_DHT_VERBOSE_LOGGING
- TORRENT_LOG(dht_tracker) << " BANNING PEER [ ip: "
- << addr << " time: " << total_milliseconds((now - match->limit) + seconds(10)) / 1000.f
- << " count: " << match->count << " ]";
-#endif
- // we've received 50 messages in less than 10 seconds from
- // this node. Ignore it until it's silent for 5 minutes
- match->limit = now + minutes(5);
- }
-
- return false;
- }
-
- // we got 50 messages from this peer, but it was in
- // more than 10 seconds. Reset the counter and the timer
- match->count = 0;
- match->limit = now + seconds(10);
- }
- }
- else
- {
- min->count = 1;
- min->limit = now + seconds(10);
- min->src = addr;
- }
- | ||
relevance 2 | ../src/kademlia/node.cpp:67 | make this configurable in dht_settings |
make this configurable in dht_settings../src/kademlia/node.cpp:67#include "libtorrent/kademlia/routing_table.hpp"
+ | ||
relevance 2 | ../src/kademlia/node.cpp:67 | make this configurable in dht_settings |
make this configurable in dht_settings../src/kademlia/node.cpp:67#include "libtorrent/kademlia/routing_table.hpp"
#include "libtorrent/kademlia/node.hpp"
#include "libtorrent/kademlia/dht_observer.hpp"
@@ -1299,7 +1219,7 @@ void purge_peers(std::set<peer_entry>& peers)
void nop() {}
- | ||
relevance 2 | ../src/kademlia/node.cpp:495 | it would be nice to have a bias towards node-id prefixes that are missing in the bucket |
it would be nice to have a bias towards node-id prefixes that
+ | ||
relevance 2 | ../src/kademlia/node.cpp:495 | it would be nice to have a bias towards node-id prefixes that are missing in the bucket |
it would be nice to have a bias towards node-id prefixes that
are missing in the bucket../src/kademlia/node.cpp:495 // this shouldn't happen
TORRENT_ASSERT(m_id != ne->id);
if (ne->id == m_id) return;
@@ -1351,7 +1271,7 @@ void node_impl::send_single_refresh(udp::endpoint const& ep, int bucket
time_duration node_impl::connection_timeout()
{
time_duration d = m_rpc.tick();
- | ||
relevance 2 | ../src/kademlia/node.cpp:892 | find_node should write directly to the response entry |
find_node should write directly to the response entry../src/kademlia/node.cpp:892 TORRENT_LOG(node) << " values: " << reply["values"].list().size();
+ | ||
relevance 2 | ../src/kademlia/node.cpp:894 | find_node should write directly to the response entry |
find_node should write directly to the response entry../src/kademlia/node.cpp:894 TORRENT_LOG(node) << " values: " << reply["values"].list().size();
}
#endif
}
@@ -1402,7 +1322,7 @@ time_duration node_impl::connection_timeout()
// listen port and instead use the source port of the packet?
if (msg_keys[5] && msg_keys[5]->int_value() != 0)
port = m.addr.port();
- | ||
relevance 2 | ../src/kademlia/node_id.cpp:134 | this could be optimized if SSE 4.2 is available. It could also be optimized given that we have a fixed length |
this could be optimized if SSE 4.2 is
+ | ||
relevance 2 | ../src/kademlia/node_id.cpp:134 | this could be optimized if SSE 4.2 is available. It could also be optimized given that we have a fixed length |
this could be optimized if SSE 4.2 is
available. It could also be optimized given
that we have a fixed length../src/kademlia/node_id.cpp:134 b6 = ip_.to_v6().to_bytes();
ip = &b6[0];
@@ -1455,7 +1375,7 @@ void make_id_secret(node_id& in)
memcpy(&in[20-4], &secret_hash[0], 4);
memcpy(&in[20-8], &rand, 4);
}
- | ||
relevance 2 | ../src/kademlia/routing_table.cpp:884 | move the lowest priority nodes to the replacement bucket |
move the lowest priority nodes to the replacement bucket../src/kademlia/routing_table.cpp:884 bucket_t& b = m_buckets[bucket_index].live_nodes;
+ | ||
relevance 2 | ../src/kademlia/routing_table.cpp:886 | move the lowest priority nodes to the replacement bucket |
move the lowest priority nodes to the replacement bucket../src/kademlia/routing_table.cpp:886 bucket_t& b = m_buckets[bucket_index].live_nodes;
bucket_t& rb = m_buckets[bucket_index].replacements;
// move any node whose (160 - distane_exp(m_id, id)) >= (i - m_buckets.begin())
@@ -1506,7 +1426,7 @@ void make_id_secret(node_id& in)
else
new_replacement_bucket.push_back(*j);
}
- | ||
relevance 2 | ../include/libtorrent/enum_net.hpp:137 | this could be done more efficiently by just looking up the interface with the given name, maybe even with if_nametoindex() |
this could be done more efficiently by just looking up
+ | ||
relevance 2 | ../include/libtorrent/enum_net.hpp:137 | this could be done more efficiently by just looking up the interface with the given name, maybe even with if_nametoindex() |
this could be done more efficiently by just looking up
the interface with the given name, maybe even with if_nametoindex()../include/libtorrent/enum_net.hpp:137
address ip = address::from_string(device_name, ec);
if (!ec)
@@ -1558,7 +1478,7 @@ the interface with the given name, maybe even with if_nametoindex()../i
// returns true if the given device exists
TORRENT_EXTRA_EXPORT bool has_interface(char const* name, io_service& ios
- | ||
relevance 2 | ../include/libtorrent/intrusive_ptr_base.hpp:44 | remove this class and transition over to using shared_ptr and make_shared instead |
remove this class and transition over to using shared_ptr and
+ | ||
relevance 2 | ../include/libtorrent/intrusive_ptr_base.hpp:44 | remove this class and transition over to using shared_ptr and make_shared instead |
remove this class and transition over to using shared_ptr and
make_shared instead../include/libtorrent/intrusive_ptr_base.hpp:44CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
@@ -1610,7 +1530,7 @@ namespace libtorrent
intrusive_ptr_base(): m_refs(0) {}
- | ||
relevance 2 | ../include/libtorrent/proxy_base.hpp:257 | use the resolver interface that has a built-in cache |
use the resolver interface that has a built-in cache../include/libtorrent/proxy_base.hpp:257 return m_sock.lowest_layer();
+ | ||
relevance 2 | ../include/libtorrent/proxy_base.hpp:257 | use the resolver interface that has a built-in cache |
use the resolver interface that has a built-in cache../include/libtorrent/proxy_base.hpp:257 return m_sock.lowest_layer();
}
next_layer_type& next_layer()
@@ -1637,7 +1557,7 @@ protected:
#endif
- | ||
relevance 2 | ../include/libtorrent/session.hpp:268 | the ip filter should probably be saved here too |
the ip filter should probably be saved here too../include/libtorrent/session.hpp:268
+ | ||
relevance 2 | ../include/libtorrent/session.hpp:271 | the ip filter should probably be saved here too |
the ip filter should probably be saved here too../include/libtorrent/session.hpp:271
if ((flags & start_default_features) == 0)
{
pack.set_bool(settings_pack::enable_upnp, false);
@@ -1688,7 +1608,7 @@ protected:
save_dht_proxy = save_proxy,
save_peer_proxy = save_proxy,
save_web_proxy = save_proxy,
- | ||
relevance 2 | ../include/libtorrent/session_settings.hpp:55 | this type is only used internally now. move it to an internal header and make this type properly deprecated. |
this type is only used internally now. move it to an internal
+ | ||
relevance 2 | ../include/libtorrent/session_settings.hpp:55 | this type is only used internally now. move it to an internal header and make this type properly deprecated. |
this type is only used internally now. move it to an internal
header and make this type properly deprecated.../include/libtorrent/session_settings.hpp:55
#include "libtorrent/version.hpp"
#include "libtorrent/config.hpp"
@@ -1740,58 +1660,7 @@ namespace libtorrent
// proxy_settings::type field.
enum proxy_type
{
- | ||
relevance 2 | ../include/libtorrent/settings_pack.hpp:70 | add an API to query a settings_pack as well |
add an API to query a settings_pack as well../include/libtorrent/settings_pack.hpp:70{
- namespace aux { struct session_impl; struct session_settings; }
-
- struct settings_pack;
- struct lazy_entry;
-
- TORRENT_EXTRA_EXPORT settings_pack* load_pack_from_dict(lazy_entry const* settings);
- TORRENT_EXTRA_EXPORT void save_settings_to_dict(aux::session_settings const& s, entry::dictionary_type& sett);
- TORRENT_EXPORT void initialize_default_settings(aux::session_settings& s);
- TORRENT_EXTRA_EXPORT void apply_pack(settings_pack const* pack, aux::session_settings& sett, aux::session_impl* ses = 0);
-
- TORRENT_EXPORT int setting_by_name(std::string const& name);
- TORRENT_EXPORT char const* name_for_setting(int s);
-
-#ifndef TORRENT_NO_DEPRECATE
- struct session_settings;
- settings_pack* load_pack_from_struct(aux::session_settings const& current, session_settings const& s);
- void load_struct_from_settings(aux::session_settings const& current, session_settings& ret);
-#endif
-
-
- // The ``settings_pack`` struct, contains the names of all settings as
- // enum values. These values are passed in to the ``set_str()``,
- // ``set_int()``, ``set_bool()`` functions, to specify the setting to
- // change.
- //
- // These are the available settings:
- //
- // .. include:: settings-ref.rst
- //
- struct TORRENT_EXPORT settings_pack
- {
- friend struct disk_io_thread;
- friend void apply_pack(settings_pack const* pack, aux::session_settings& sett, aux::session_impl* ses);
-
- void set_str(int name, std::string val);
- void set_int(int name, int val);
- void set_bool(int name, bool val);
- bool has_val(int name) const;
- void clear();
-
- std::string get_str(int name) const;
- int get_int(int name) const;
- bool get_bool(int name) const;
-
- // setting names (indices) are 16 bits. The two most significant
- // bits indicate what type the setting has. (string, int, bool)
- enum type_bases
- {
- string_type_base = 0x0000,
- int_type_base = 0x4000,
- | ||
relevance 2 | ../include/libtorrent/socks5_stream.hpp:131 | fix error messages to use custom error_code category |
fix error messages to use custom error_code category../include/libtorrent/socks5_stream.hpp:131 | ||
relevance 2 | ../include/libtorrent/socks5_stream.hpp:132 | add async_connect() that takes a hostname and port as well |
add async_connect() that takes a hostname and port as well../include/libtorrent/socks5_stream.hpp:132 if (m_dst_name.size() > 255)
+ | ||
relevance 2 | ../include/libtorrent/socks5_stream.hpp:131 | add async_connect() that takes a hostname and port as well |
add async_connect() that takes a hostname and port as well../include/libtorrent/socks5_stream.hpp:131 if (m_dst_name.size() > 255)
m_dst_name.resize(255);
}
@@ -1842,7 +1711,7 @@ namespace libtorrent
m_resolver.async_resolve(q, boost::bind(
&socks5_stream::name_lookup, this, _1, _2, h));
}
- | ||
relevance 2 | ../include/libtorrent/tracker_manager.hpp:269 | this class probably doesn't need to have virtual functions. |
this class probably doesn't need to have virtual functions.../include/libtorrent/tracker_manager.hpp:269 int m_completion_timeout;
+ | ||
relevance 2 | ../include/libtorrent/tracker_manager.hpp:269 | this class probably doesn't need to have virtual functions. |
this class probably doesn't need to have virtual functions.../include/libtorrent/tracker_manager.hpp:269 int m_completion_timeout;
typedef mutex mutex_t;
mutable mutex_t m_mutex;
@@ -1893,7 +1762,7 @@ namespace libtorrent
boost::shared_ptr<tracker_connection> shared_from_this()
{
- | ||
relevance 2 | ../include/libtorrent/tracker_manager.hpp:366 | this should be unique_ptr in the future |
this should be unique_ptr in the future../include/libtorrent/tracker_manager.hpp:366 // this is only used for SOCKS packets, since
+ | ||
relevance 2 | ../include/libtorrent/tracker_manager.hpp:366 | this should be unique_ptr in the future |
this should be unique_ptr in the future../include/libtorrent/tracker_manager.hpp:366 // this is only used for SOCKS packets, since
// they may be addressed to hostname
virtual bool incoming_packet(error_code const& e, char const* hostname
, char const* buf, int size);
@@ -1932,7 +1801,7 @@ namespace libtorrent
#endif // TORRENT_TRACKER_MANAGER_HPP_INCLUDED
- | ||
relevance 2 | ../include/libtorrent/aux_/session_interface.hpp:104 | the IP voting mechanism should be factored out to its own class, not part of the session |
the IP voting mechanism should be factored out
+ | ||
relevance 2 | ../include/libtorrent/aux_/session_interface.hpp:104 | the IP voting mechanism should be factored out to its own class, not part of the session |
the IP voting mechanism should be factored out
to its own class, not part of the session../include/libtorrent/aux_/session_interface.hpp:104 class port_filter;
struct settings_pack;
struct torrent_peer_allocator_interface;
@@ -1984,7 +1853,7 @@ namespace libtorrent { namespace aux
virtual void queue_async_resume_data(boost::shared_ptr<torrent> const& t) = 0;
virtual void done_async_resume() = 0;
virtual void evict_torrent(torrent* t) = 0;
- | ||
relevance 2 | ../include/libtorrent/aux_/session_settings.hpp:73 | make this a bitfield |
make this a bitfield../include/libtorrent/aux_/session_settings.hpp:73 if ((name & settings_pack::type_mask) != settings_pack:: type ## _type_base) return default_val; \
+ | ||
relevance 2 | ../include/libtorrent/aux_/session_settings.hpp:73 | make this a bitfield |
make this a bitfield../include/libtorrent/aux_/session_settings.hpp:73 if ((name & settings_pack::type_mask) != settings_pack:: type ## _type_base) return default_val; \
return m_ ## type ## s[name - settings_pack:: type ## _type_base]
struct session_settings
@@ -2014,7 +1883,7 @@ namespace libtorrent { namespace aux
#endif
- | ||
relevance 1 | ../src/disk_io_thread.cpp:233 | it would be nice to have the number of threads be set dynamically |
it would be nice to have the number of threads be set dynamically../src/disk_io_thread.cpp:233 std::pair<block_cache::iterator, block_cache::iterator> pieces
+ | ||
relevance 1 | ../src/disk_io_thread.cpp:233 | it would be nice to have the number of threads be set dynamically |
it would be nice to have the number of threads be set dynamically../src/disk_io_thread.cpp:233 std::pair<block_cache::iterator, block_cache::iterator> pieces
= m_disk_cache.all_pieces();
TORRENT_ASSERT(pieces.first == pieces.second);
#endif
@@ -2065,7 +1934,7 @@ namespace libtorrent { namespace aux
m_threads.resize(m_num_threads);
}
}
- | ||
relevance 1 | ../src/http_seed_connection.cpp:124 | in chunked encoding mode, this assert won't hold. the chunk headers should be subtracted from the receive_buffer_size |
in chunked encoding mode, this assert won't hold.
+ | ||
relevance 1 | ../src/http_seed_connection.cpp:124 | in chunked encoding mode, this assert won't hold. the chunk headers should be subtracted from the receive_buffer_size |
in chunked encoding mode, this assert won't hold.
the chunk headers should be subtracted from the receive_buffer_size../src/http_seed_connection.cpp:124 boost::optional<piece_block_progress>
http_seed_connection::downloading_piece_progress() const
{
@@ -2117,8 +1986,8 @@ the chunk headers should be subtracted from the receive_buffer_size../s
std::string request;
request.reserve(400);
- | ||
relevance 1 | ../src/session_impl.cpp:5166 | report the proper address of the router as the source IP of this understanding of our external address, instead of the empty address |
report the proper address of the router as the source IP of
-this understanding of our external address, instead of the empty address../src/session_impl.cpp:5166 void session_impl::on_port_mapping(int mapping, address const& ip, int port
+ | ||
relevance 1 | ../src/session_impl.cpp:5194 | report the proper address of the router as the source IP of this understanding of our external address, instead of the empty address |
report the proper address of the router as the source IP of
+this understanding of our external address, instead of the empty address../src/session_impl.cpp:5194 void session_impl::on_port_mapping(int mapping, address const& ip, int port
, error_code const& ec, int map_transport)
{
TORRENT_ASSERT(is_single_thread());
@@ -2165,9 +2034,13 @@ this understanding of our external address, instead of the empty address | ||
relevance 1 | ../src/session_impl.cpp:6326 | we only need to do this if our global IPv4 address has changed since the DHT (currently) only supports IPv4. Since restarting the DHT is kind of expensive, it would be nice to not do it unnecessarily |
we only need to do this if our global IPv4 address has changed
+#ifndef TORRENT_NO_DEPRECATE
+ session_status session_impl::status() const
+ {
+// INVARIANT_CHECK;
+ | ||
relevance 1 | ../src/session_impl.cpp:6384 | we only need to do this if our global IPv4 address has changed since the DHT (currently) only supports IPv4. Since restarting the DHT is kind of expensive, it would be nice to not do it unnecessarily |
we only need to do this if our global IPv4 address has changed
since the DHT (currently) only supports IPv4. Since restarting the DHT
-is kind of expensive, it would be nice to not do it unnecessarily../src/session_impl.cpp:6326#endif
+is kind of expensive, it would be nice to not do it unnecessarily../src/session_impl.cpp:6384#endif
if (!m_external_ip.cast_vote(ip, source_type, source)) return;
@@ -2218,7 +2091,7 @@ is kind of expensive, it would be nice to not do it unnecessarily../src
, boost::function<void(char*)> const& handler)
{
return m_disk_thread.async_allocate_disk_buffer(category, handler);
- | ||
relevance 1 | ../src/torrent.cpp:1156 | make this depend on the error and on the filesystem the files are being downloaded to. If the error is no_space_left_on_device and the filesystem doesn't support sparse files, only zero the priorities of the pieces that are at the tails of all files, leaving everything up to the highest written piece in each file |
make this depend on the error and on the filesystem the
+ | ||
relevance 1 | ../src/torrent.cpp:1156 | make this depend on the error and on the filesystem the files are being downloaded to. If the error is no_space_left_on_device and the filesystem doesn't support sparse files, only zero the priorities of the pieces that are at the tails of all files, leaving everything up to the highest written piece in each file |
make this depend on the error and on the filesystem the
files are being downloaded to. If the error is no_space_left_on_device
and the filesystem doesn't support sparse files, only zero the priorities
of the pieces that are at the tails of all files, leaving everything
@@ -2273,7 +2146,7 @@ up to the highest written piece in each file../src/torrent.cpp:1156 | ||
relevance 1 | ../src/torrent.cpp:6865 | save the send_stats state instead of throwing them away it may pose an issue when downgrading though |
save the send_stats state instead of throwing them away
+ | ||
relevance 1 | ../src/torrent.cpp:6865 | save the send_stats state instead of throwing them away it may pose an issue when downgrading though |
save the send_stats state instead of throwing them away
it may pose an issue when downgrading though../src/torrent.cpp:6865 for (int k = 0; k < bits; ++k)
v |= (i->info[j*8+k].state == piece_picker::block_info::state_finished)
? (1 << k) : 0;
@@ -2325,7 +2198,7 @@ it may pose an issue when downgrading though../src/torrent.cpp:6865 | ||
relevance 1 | ../src/torrent.cpp:7958 | should disconnect all peers that have the pieces we have not just seeds. It would be pretty expensive to check all pieces for all peers though |
should disconnect all peers that have the pieces we have
+ | ||
relevance 1 | ../src/torrent.cpp:7958 | should disconnect all peers that have the pieces we have not just seeds. It would be pretty expensive to check all pieces for all peers though |
should disconnect all peers that have the pieces we have
not just seeds. It would be pretty expensive to check all pieces
for all peers though../src/torrent.cpp:7958 set_state(torrent_status::finished);
set_queue_position(-1);
@@ -2378,7 +2251,7 @@ for all peers though../src/torrent.cpp:7958relevance 1 | ../include/libtorrent/ip_voter.hpp:122 | instead, have one instance per possible subnet, global IPv4, global IPv6, loopback, 192.168.x.x, 10.x.x.x, etc. |
|
instead, have one instance per possible subnet, global IPv4, global IPv6, loopback, 192.168.x.x, 10.x.x.x, etc.../include/libtorrent/ip_voter.hpp:122 // away all the votes and started from scratch, in case
+ | ||
relevance 1 | ../include/libtorrent/ip_voter.hpp:122 | instead, have one instance per possible subnet, global IPv4, global IPv6, loopback, 192.168.x.x, 10.x.x.x, etc. |
instead, have one instance per possible subnet, global IPv4, global IPv6, loopback, 192.168.x.x, 10.x.x.x, etc.../include/libtorrent/ip_voter.hpp:122 // away all the votes and started from scratch, in case
// our IP has changed
ptime m_last_rotate;
};
@@ -2405,9 +2278,9 @@ for all peers though../src/torrent.cpp:7958relevance 1 | ../include/libtorrent/web_peer_connection.hpp:121 | if we make this be a disk_buffer_holder instead we would save a copy sometimes use allocate_disk_receive_buffer and release_disk_receive_buffer |
|
if we make this be a disk_buffer_holder instead
+ | ||
relevance 1 | ../include/libtorrent/web_peer_connection.hpp:122 | if we make this be a disk_buffer_holder instead we would save a copy sometimes use allocate_disk_receive_buffer and release_disk_receive_buffer |
if we make this be a disk_buffer_holder instead
we would save a copy sometimes
-use allocate_disk_receive_buffer and release_disk_receive_buffer../include/libtorrent/web_peer_connection.hpp:121
+use allocate_disk_receive_buffer and release_disk_receive_buffer../include/libtorrent/web_peer_connection.hpp:122
// returns the block currently being
// downloaded. And the progress of that
// block. If the peer isn't downloading
@@ -2458,7 +2331,7 @@ use allocate_disk_receive_buffer and release_disk_receive_buffer../incl
};
}
- | ||
relevance 0 | ../test/test_block_cache.cpp:475 | test try_evict_blocks |
test try_evict_blocks../test/test_block_cache.cpp:475 | ||
relevance 0 | ../test/test_block_cache.cpp:476 | test evicting volatile pieces, to see them be removed |
test evicting volatile pieces, to see them be removed../test/test_block_cache.cpp:476 | ||
relevance 0 | ../test/test_block_cache.cpp:477 | test evicting dirty pieces |
test evicting dirty pieces../test/test_block_cache.cpp:477 | ||
relevance 0 | ../test/test_block_cache.cpp:478 | test free_piece |
test free_piece../test/test_block_cache.cpp:478 | ||
relevance 0 | ../test/test_block_cache.cpp:479 | test abort_dirty |
test abort_dirty../test/test_block_cache.cpp:479 | ||
relevance 0 | ../test/test_block_cache.cpp:480 | test unaligned reads |
test unaligned reads../test/test_block_cache.cpp:480 // it's supposed to be a cache hit
+ | ||
relevance 0 | ../test/test_block_cache.cpp:475 | test try_evict_blocks |
test try_evict_blocks../test/test_block_cache.cpp:475 | ||
relevance 0 | ../test/test_block_cache.cpp:476 | test evicting volatile pieces, to see them be removed |
test evicting volatile pieces, to see them be removed../test/test_block_cache.cpp:476 | ||
relevance 0 | ../test/test_block_cache.cpp:477 | test evicting dirty pieces |
test evicting dirty pieces../test/test_block_cache.cpp:477 | ||
relevance 0 | ../test/test_block_cache.cpp:478 | test free_piece |
test free_piece../test/test_block_cache.cpp:478 | ||
relevance 0 | ../test/test_block_cache.cpp:479 | test abort_dirty |
test abort_dirty../test/test_block_cache.cpp:479 | ||
relevance 0 | ../test/test_block_cache.cpp:480 | test unaligned reads |
test unaligned reads../test/test_block_cache.cpp:480 // it's supposed to be a cache hit
TEST_CHECK(ret >= 0);
// return the reference to the buffer we just read
RETURN_BUFFER;
@@ -2481,7 +2354,7 @@ int test_main()
return 0;
}
- | ||
relevance 0 | ../test/test_metadata_extension.cpp:87 | it would be nice to test reversing which session is making the connection as well |
it would be nice to test reversing
+ | ||
relevance 0 | ../test/test_metadata_extension.cpp:87 | it would be nice to test reversing which session is making the connection as well |
it would be nice to test reversing
which session is making the connection as well../test/test_metadata_extension.cpp:87 , boost::shared_ptr<libtorrent::torrent_plugin> (*constructor)(libtorrent::torrent*, void*)
, int timeout)
{
@@ -2533,7 +2406,7 @@ which session is making the connection as well../test/test_metadata_ext
ses1.apply_settings(pack);
ses2.apply_settings(pack);
- | ||
relevance 0 | ../test/test_peer_list.cpp:419 | test applying a port_filter |
test applying a port_filter../test/test_peer_list.cpp:419 | ||
relevance 0 | ../test/test_peer_list.cpp:420 | test erasing peers |
test erasing peers../test/test_peer_list.cpp:420 | ||
relevance 0 | ../test/test_peer_list.cpp:421 | test using port and ip filter |
test using port and ip filter../test/test_peer_list.cpp:421 | ||
relevance 0 | ../test/test_peer_list.cpp:422 | test incrementing failcount (and make sure we no longer consider the peer a connect canidate) |
test incrementing failcount (and make sure we no longer consider the peer a connect canidate)../test/test_peer_list.cpp:422 | ||
relevance 0 | ../test/test_peer_list.cpp:423 | test max peerlist size |
test max peerlist size../test/test_peer_list.cpp:423 | ||
relevance 0 | ../test/test_peer_list.cpp:424 | test logic for which connection to keep when receiving an incoming connection to the same peer as we just made an outgoing connection to |
test logic for which connection to keep when receiving an incoming connection to the same peer as we just made an outgoing connection to../test/test_peer_list.cpp:424 | ||
relevance 0 | ../test/test_peer_list.cpp:425 | test update_peer_port with allow_multiple_connections_per_ip |
test update_peer_port with allow_multiple_connections_per_ip../test/test_peer_list.cpp:425 | ||
relevance 0 | ../test/test_peer_list.cpp:426 | test set_seed |
test set_seed../test/test_peer_list.cpp:426 | ||
relevance 0 | ../test/test_peer_list.cpp:427 | test has_peer |
test has_peer../test/test_peer_list.cpp:427 | ||
relevance 0 | ../test/test_peer_list.cpp:428 | test insert_peer with a full list |
test insert_peer with a full list../test/test_peer_list.cpp:428 | ||
relevance 0 | ../test/test_peer_list.cpp:429 | test add i2p peers |
test add i2p peers../test/test_peer_list.cpp:429 | ||
relevance 0 | ../test/test_peer_list.cpp:430 | test allow_i2p_mixed |
test allow_i2p_mixed../test/test_peer_list.cpp:430 | ||
relevance 0 | ../test/test_peer_list.cpp:431 | test insert_peer failing |
test insert_peer failing../test/test_peer_list.cpp:431 | ||
relevance 0 | ../test/test_peer_list.cpp:432 | test IPv6 |
test IPv6../test/test_peer_list.cpp:432 | ||
relevance 0 | ../test/test_peer_list.cpp:433 | test connect_to_peer() failing |
test connect_to_peer() failing../test/test_peer_list.cpp:433 | ||
relevance 0 | ../test/test_peer_list.cpp:434 | test connection_closed |
test connection_closed../test/test_peer_list.cpp:434 | ||
relevance 0 | ../test/test_peer_list.cpp:435 | test recalculate connect candidates |
test recalculate connect candidates../test/test_peer_list.cpp:435 | ||
relevance 0 | ../test/test_peer_list.cpp:436 | add tests here |
add tests here../test/test_peer_list.cpp:436 for (int i = 0; i < 100; ++i)
+ | ||
relevance 0 | ../test/test_peer_list.cpp:419 | test applying a port_filter |
test applying a port_filter../test/test_peer_list.cpp:419 | ||
relevance 0 | ../test/test_peer_list.cpp:420 | test erasing peers |
test erasing peers../test/test_peer_list.cpp:420 | ||
relevance 0 | ../test/test_peer_list.cpp:421 | test using port and ip filter |
test using port and ip filter../test/test_peer_list.cpp:421 | ||
relevance 0 | ../test/test_peer_list.cpp:422 | test incrementing failcount (and make sure we no longer consider the peer a connect canidate) |
test incrementing failcount (and make sure we no longer consider the peer a connect canidate)../test/test_peer_list.cpp:422 | ||
relevance 0 | ../test/test_peer_list.cpp:423 | test max peerlist size |
test max peerlist size../test/test_peer_list.cpp:423 | ||
relevance 0 | ../test/test_peer_list.cpp:424 | test logic for which connection to keep when receiving an incoming connection to the same peer as we just made an outgoing connection to |
test logic for which connection to keep when receiving an incoming connection to the same peer as we just made an outgoing connection to../test/test_peer_list.cpp:424 | ||
relevance 0 | ../test/test_peer_list.cpp:425 | test update_peer_port with allow_multiple_connections_per_ip |
test update_peer_port with allow_multiple_connections_per_ip../test/test_peer_list.cpp:425 | ||
relevance 0 | ../test/test_peer_list.cpp:426 | test set_seed |
test set_seed../test/test_peer_list.cpp:426 | ||
relevance 0 | ../test/test_peer_list.cpp:427 | test has_peer |
test has_peer../test/test_peer_list.cpp:427 | ||
relevance 0 | ../test/test_peer_list.cpp:428 | test insert_peer with a full list |
test insert_peer with a full list../test/test_peer_list.cpp:428 | ||
relevance 0 | ../test/test_peer_list.cpp:429 | test add i2p peers |
test add i2p peers../test/test_peer_list.cpp:429 | ||
relevance 0 | ../test/test_peer_list.cpp:430 | test allow_i2p_mixed |
test allow_i2p_mixed../test/test_peer_list.cpp:430 | ||
relevance 0 | ../test/test_peer_list.cpp:431 | test insert_peer failing |
test insert_peer failing../test/test_peer_list.cpp:431 | ||
relevance 0 | ../test/test_peer_list.cpp:432 | test IPv6 |
test IPv6../test/test_peer_list.cpp:432 | ||
relevance 0 | ../test/test_peer_list.cpp:433 | test connect_to_peer() failing |
test connect_to_peer() failing../test/test_peer_list.cpp:433 | ||
relevance 0 | ../test/test_peer_list.cpp:434 | test connection_closed |
test connection_closed../test/test_peer_list.cpp:434 | ||
relevance 0 | ../test/test_peer_list.cpp:435 | test recalculate connect candidates |
test recalculate connect candidates../test/test_peer_list.cpp:435 | ||
relevance 0 | ../test/test_peer_list.cpp:436 | add tests here |
add tests here../test/test_peer_list.cpp:436 for (int i = 0; i < 100; ++i)
{
torrent_peer* peer = p.add_peer(rand_tcp_ep(), 0, 0, &st);
TEST_EQUAL(st.erased.size(), 0);
@@ -2557,7 +2430,7 @@ which session is making the connection as well../test/test_metadata_ext
return 0;
}
- | ||
relevance 0 | ../test/test_primitives.cpp:213 | test the case where we have > 120 samples (and have the base delay actually be updated) |
test the case where we have > 120 samples (and have the base delay actually be updated)../test/test_primitives.cpp:213 | ||
relevance 0 | ../test/test_primitives.cpp:214 | test the case where a sample is lower than the history entry but not lower than the base |
test the case where a sample is lower than the history entry but not lower than the base../test/test_primitives.cpp:214 TEST_CHECK(!filter.find(k3));
+ | ||
relevance 0 | ../test/test_primitives.cpp:213 | test the case where we have > 120 samples (and have the base delay actually be updated) |
test the case where we have > 120 samples (and have the base delay actually be updated)../test/test_primitives.cpp:213 | ||
relevance 0 | ../test/test_primitives.cpp:214 | test the case where a sample is lower than the history entry but not lower than the base |
test the case where a sample is lower than the history entry but not lower than the base../test/test_primitives.cpp:214 TEST_CHECK(!filter.find(k3));
TEST_CHECK(filter.find(k4));
// test timestamp_history
@@ -2608,7 +2481,7 @@ which session is making the connection as well../test/test_metadata_ext
sanitize_append_path_element(path, "a...b", 5);
TEST_EQUAL(path, "a...b");
- | ||
relevance 0 | ../test/test_rss.cpp:135 | verify some key state is saved in 'state' |
verify some key state is saved in 'state'../test/test_rss.cpp:135 feed_status st;
+ | ||
relevance 0 | ../test/test_rss.cpp:135 | verify some key state is saved in 'state' |
verify some key state is saved in 'state'../test/test_rss.cpp:135 feed_status st;
f->get_feed_status(&st);
TEST_CHECK(!st.error);
@@ -2643,7 +2516,7 @@ int test_main()
return 0;
}
- | ||
relevance 0 | ../test/test_ssl.cpp:377 | test using a signed certificate with the wrong info-hash in DN |
test using a signed certificate with the wrong info-hash in DN../test/test_ssl.cpp:377 // in verifying peers
+ | ||
relevance 0 | ../test/test_ssl.cpp:369 | test using a signed certificate with the wrong info-hash in DN |
test using a signed certificate with the wrong info-hash in DN../test/test_ssl.cpp:369 // in verifying peers
ctx.set_verify_mode(context::verify_none, ec);
if (ec)
{
@@ -2694,8 +2567,8 @@ int test_main()
return false;
}
fprintf(stderr, "use_tmp_dh_file \"%s\"\n", dh_params.c_str());
- | ||
relevance 0 | ../test/test_ssl.cpp:475 | also test using a hash that refers to a valid torrent but that differs from the SNI hash |
also test using a hash that refers to a valid torrent
-but that differs from the SNI hash../test/test_ssl.cpp:475 print_alerts(ses1, "ses1", true, true, true, &on_alert);
+ | ||
relevance 0 | ../test/test_ssl.cpp:467 | also test using a hash that refers to a valid torrent but that differs from the SNI hash |
also test using a hash that refers to a valid torrent
+but that differs from the SNI hash../test/test_ssl.cpp:467 print_alerts(ses1, "ses1", true, true, true, &on_alert);
if (ec)
{
fprintf(stderr, "Failed SSL handshake: %s\n"
@@ -2723,6 +2596,7 @@ but that differs from the SNI hash../test/test_ssl.cpp:475../test/test_ssl.cpp:475../test/test_ssl.cpp:475
| ||
relevance 0 | ../test/test_torrent.cpp:133 | wait for an alert rather than just waiting 10 seconds. This is kind of silly |
wait for an alert rather than just waiting 10 seconds. This is kind of silly../test/test_torrent.cpp:133 TEST_EQUAL(h.file_priorities().size(), info->num_files());
+ | ||
relevance 0 | ../test/test_torrent.cpp:133 | wait for an alert rather than just waiting 10 seconds. This is kind of silly |
wait for an alert rather than just waiting 10 seconds. This is kind of silly../test/test_torrent.cpp:133 TEST_EQUAL(h.file_priorities().size(), info->num_files());
TEST_EQUAL(h.file_priorities()[0], 0);
if (info->num_files() > 1)
TEST_EQUAL(h.file_priorities()[1], 0);
@@ -2797,7 +2670,7 @@ but that differs from the SNI hash../test/test_ssl.cpp:475 | ||
relevance 0 | ../test/test_torrent_parse.cpp:116 | test remap_files |
test remap_files../test/test_torrent_parse.cpp:116 | ||
relevance 0 | ../test/test_torrent_parse.cpp:117 | merkle torrents. specifically torrent_info::add_merkle_nodes and torrent with "root hash" |
merkle torrents. specifically torrent_info::add_merkle_nodes and torrent with "root hash"../test/test_torrent_parse.cpp:117 | ||
relevance 0 | ../test/test_torrent_parse.cpp:118 | torrent with 'p' (padfile) attribute |
torrent with 'p' (padfile) attribute../test/test_torrent_parse.cpp:118 | ||
relevance 0 | ../test/test_torrent_parse.cpp:119 | torrent with 'h' (hidden) attribute |
torrent with 'h' (hidden) attribute../test/test_torrent_parse.cpp:119 | ||
relevance 0 | ../test/test_torrent_parse.cpp:120 | torrent with 'x' (executable) attribute |
torrent with 'x' (executable) attribute../test/test_torrent_parse.cpp:120 | ||
relevance 0 | ../test/test_torrent_parse.cpp:121 | torrent with 'l' (symlink) attribute |
torrent with 'l' (symlink) attribute../test/test_torrent_parse.cpp:121 | ||
relevance 0 | ../test/test_torrent_parse.cpp:122 | creating a merkle torrent (torrent_info::build_merkle_list) |
creating a merkle torrent (torrent_info::build_merkle_list)../test/test_torrent_parse.cpp:122 | ||
relevance 0 | ../test/test_torrent_parse.cpp:123 | torrent with multiple trackers in multiple tiers, making sure we shuffle them (how do you test shuffling?, load it multiple times and make sure it's in different order at least once) |
torrent with multiple trackers in multiple tiers, making sure we shuffle them (how do you test shuffling?, load it multiple times and make sure it's in different order at least once)../test/test_torrent_parse.cpp:123 { "invalid_info.torrent", errors::torrent_missing_info },
+ | ||
relevance 0 | ../test/test_torrent_parse.cpp:116 | test remap_files |
test remap_files../test/test_torrent_parse.cpp:116 | ||
relevance 0 | ../test/test_torrent_parse.cpp:117 | merkle torrents. specifically torrent_info::add_merkle_nodes and torrent with "root hash" |
merkle torrents. specifically torrent_info::add_merkle_nodes and torrent with "root hash"../test/test_torrent_parse.cpp:117 | ||
relevance 0 | ../test/test_torrent_parse.cpp:118 | torrent with 'p' (padfile) attribute |
torrent with 'p' (padfile) attribute../test/test_torrent_parse.cpp:118 | ||
relevance 0 | ../test/test_torrent_parse.cpp:119 | torrent with 'h' (hidden) attribute |
torrent with 'h' (hidden) attribute../test/test_torrent_parse.cpp:119 | ||
relevance 0 | ../test/test_torrent_parse.cpp:120 | torrent with 'x' (executable) attribute |
torrent with 'x' (executable) attribute../test/test_torrent_parse.cpp:120 | ||
relevance 0 | ../test/test_torrent_parse.cpp:121 | torrent with 'l' (symlink) attribute |
torrent with 'l' (symlink) attribute../test/test_torrent_parse.cpp:121 | ||
relevance 0 | ../test/test_torrent_parse.cpp:122 | creating a merkle torrent (torrent_info::build_merkle_list) |
creating a merkle torrent (torrent_info::build_merkle_list)../test/test_torrent_parse.cpp:122 | ||
relevance 0 | ../test/test_torrent_parse.cpp:123 | torrent with multiple trackers in multiple tiers, making sure we shuffle them (how do you test shuffling?, load it multiple times and make sure it's in different order at least once) |
torrent with multiple trackers in multiple tiers, making sure we shuffle them (how do you test shuffling?, load it multiple times and make sure it's in different order at least once)../test/test_torrent_parse.cpp:123 { "invalid_info.torrent", errors::torrent_missing_info },
{ "string.torrent", errors::torrent_is_no_dict },
{ "negative_size.torrent", errors::torrent_invalid_length },
{ "negative_file_size.torrent", errors::torrent_file_parse_failed },
@@ -2848,7 +2721,7 @@ namespace libtorrent
TEST_EQUAL(merkle_num_leafs(15), 16);
TEST_EQUAL(merkle_num_leafs(16), 16);
TEST_EQUAL(merkle_num_leafs(17), 32);
- | ||
relevance 0 | ../test/test_tracker.cpp:198 | test parse peers6 |
test parse peers6../test/test_tracker.cpp:198 | ||
relevance 0 | ../test/test_tracker.cpp:199 | test parse tracker-id |
test parse tracker-id../test/test_tracker.cpp:199 | ||
relevance 0 | ../test/test_tracker.cpp:200 | test parse failure-reason |
test parse failure-reason../test/test_tracker.cpp:200 | ||
relevance 0 | ../test/test_tracker.cpp:201 | test all failure paths invalid bencoding not a dictionary no files entry in scrape response no info-hash entry in scrape response malformed peers in peer list of dictionaries uneven number of bytes in peers and peers6 string responses |
test all failure paths
+ | ||
relevance 0 | ../test/test_tracker.cpp:198 | test parse peers6 |
test parse peers6../test/test_tracker.cpp:198 | ||
relevance 0 | ../test/test_tracker.cpp:199 | test parse tracker-id |
test parse tracker-id../test/test_tracker.cpp:199 | ||
relevance 0 | ../test/test_tracker.cpp:200 | test parse failure-reason |
test parse failure-reason../test/test_tracker.cpp:200 | ||
relevance 0 | ../test/test_tracker.cpp:201 | test all failure paths invalid bencoding not a dictionary no files entry in scrape response no info-hash entry in scrape response malformed peers in peer list of dictionaries uneven number of bytes in peers and peers6 string responses |
test all failure paths
invalid bencoding
not a dictionary
no files entry in scrape response
@@ -2905,7 +2778,7 @@ int test_main()
snprintf(tracker_url, sizeof(tracker_url), "http://127.0.0.1:%d/announce", http_port);
t->add_tracker(tracker_url, 0);
- | ||
relevance 0 | ../test/test_upnp.cpp:100 | store the log and verify that some key messages are there |
store the log and verify that some key messages are there../test/test_upnp.cpp:100 "USN:uuid:000f-66d6-7296000099dc::upnp:rootdevice\r\n"
+ | ||
relevance 0 | ../test/test_upnp.cpp:100 | store the log and verify that some key messages are there |
store the log and verify that some key messages are there../test/test_upnp.cpp:100 "USN:uuid:000f-66d6-7296000099dc::upnp:rootdevice\r\n"
"Location: http://127.0.0.1:%d/upnp.xml\r\n"
"Server: Custom/1.0 UPnP/1.0 Proc/Ver\r\n"
"EXT:\r\n"
@@ -2956,7 +2829,7 @@ int run_upnp_test(char const* root_filename, char const* router_model, char cons
error_code ec;
load_file(root_filename, buf, ec);
buf.push_back(0);
- | ||
relevance 0 | ../test/web_seed_suite.cpp:374 | file hashes don't work with the new torrent creator reading async |
file hashes don't work with the new torrent creator reading async../test/web_seed_suite.cpp:374 // corrupt the files now, so that the web seed will be banned
+ | ||
relevance 0 | ../test/web_seed_suite.cpp:364 | file hashes don't work with the new torrent creator reading async |
file hashes don't work with the new torrent creator reading async../test/web_seed_suite.cpp:364 // corrupt the files now, so that the web seed will be banned
if (test_url_seed)
{
create_random_files(combine_path(save_path, "torrent_dir"), file_sizes, sizeof(file_sizes)/sizeof(file_sizes[0]));
@@ -2994,20 +2867,20 @@ int run_upnp_test(char const* root_filename, char const* router_model, char cons
}
*/
{
- libtorrent::session ses(fingerprint(" ", 0,0,0,0), 0);
-
settings_pack pack;
pack.set_int(settings_pack::max_queued_disk_bytes, 256 * 1024);
pack.set_str(settings_pack::listen_interfaces, "0.0.0.0:51000");
pack.set_int(settings_pack::max_retry_port_bind, 1000);
pack.set_int(settings_pack::alert_mask, ~(alert::progress_notification | alert::stats_notification));
- ses.apply_settings(pack);
+ pack.set_bool(settings_pack::enable_lsd, false);
+ pack.set_bool(settings_pack::enable_natpmp, false);
+ pack.set_bool(settings_pack::enable_upnp, false);
+ pack.set_bool(settings_pack::enable_dht, false);
+ libtorrent::session ses(pack, 0);
test_transfer(ses, torrent_file, proxy, port, protocol, test_url_seed
, chunked_encoding, test_ban, keepalive);
-
- if (test_url_seed && test_rename)
- | ||
relevance 0 | ../src/block_cache.cpp:884 | it's somewhat expensive to iterate over this linked list. Presumably because of the random access of memory. It would be nice if pieces with no evictable blocks weren't in this list |
it's somewhat expensive
+ | ||
relevance 0 | ../src/block_cache.cpp:884 | it's somewhat expensive to iterate over this linked list. Presumably because of the random access of memory. It would be nice if pieces with no evictable blocks weren't in this list |
it's somewhat expensive
to iterate over this linked list. Presumably because of the random
access of memory. It would be nice if pieces with no evictable blocks
weren't in this list../src/block_cache.cpp:884 }
@@ -3061,7 +2934,7 @@ weren't in this list../src/block_cache.cpp:884relevance 0 | ../src/block_cache.cpp:948 | this should probably only be done every n:th time |
|
this should probably only be done every n:th time../src/block_cache.cpp:948 }
+ | ||
relevance 0 | ../src/block_cache.cpp:948 | this should probably only be done every n:th time |
this should probably only be done every n:th time../src/block_cache.cpp:948 }
if (pe->ok_to_evict())
{
@@ -3112,7 +2985,7 @@ weren't in this list../src/block_cache.cpp:884relevance 0 | ../src/block_cache.cpp:1720 | create a holder for refcounts that automatically decrement |
|
create a holder for refcounts that automatically decrement../src/block_cache.cpp:1720 }
+ | ||
relevance 0 | ../src/block_cache.cpp:1720 | create a holder for refcounts that automatically decrement |
create a holder for refcounts that automatically decrement../src/block_cache.cpp:1720 }
j->buffer = allocate_buffer("send buffer");
if (j->buffer == 0) return -2;
@@ -3163,7 +3036,7 @@ bool block_cache::maybe_free_piece(cached_piece_entry* pe)
boost::shared_ptr<piece_manager> s = pe->storage;
- | ||
relevance 0 | ../src/bt_peer_connection.cpp:663 | this could be optimized using knuth morris pratt |
this could be optimized using knuth morris pratt../src/bt_peer_connection.cpp:663 {
+ | ||
relevance 0 | ../src/bt_peer_connection.cpp:663 | this could be optimized using knuth morris pratt |
this could be optimized using knuth morris pratt../src/bt_peer_connection.cpp:663 {
disconnect(errors::no_memory, op_encryption);
return;
}
@@ -3214,7 +3087,7 @@ bool block_cache::maybe_free_piece(cached_piece_entry* pe)
// }
// no complete sync
- | ||
relevance 0 | ../src/bt_peer_connection.cpp:2204 | if we're finished, send upload_only message |
if we're finished, send upload_only message../src/bt_peer_connection.cpp:2204 if (msg[5 + k / 8] & (0x80 >> (k % 8))) bitfield_string[k] = '1';
+ | ||
relevance 0 | ../src/bt_peer_connection.cpp:2204 | if we're finished, send upload_only message |
if we're finished, send upload_only message../src/bt_peer_connection.cpp:2204 if (msg[5 + k / 8] & (0x80 >> (k % 8))) bitfield_string[k] = '1';
else bitfield_string[k] = '0';
}
peer_log("==> BITFIELD [ %s ]", bitfield_string.c_str());
@@ -3265,7 +3138,7 @@ bool block_cache::maybe_free_piece(cached_piece_entry* pe)
? m_settings.get_str(settings_pack::user_agent)
: m_settings.get_str(settings_pack::handshake_client_version);
}
- | ||
relevance 0 | ../src/choker.cpp:332 | optimize this using partial_sort or something. We don't need to sort the entire list |
optimize this using partial_sort or something. We don't need
+ | ||
relevance 0 | ../src/choker.cpp:332 | optimize this using partial_sort or something. We don't need to sort the entire list |
optimize this using partial_sort or something. We don't need
to sort the entire list../src/choker.cpp:332 return upload_slots;
}
@@ -3287,7 +3160,7 @@ to sort the entire list../src/choker.cpp:332
- | ||
relevance 0 | ../src/choker.cpp:335 | make the comparison function a free function and move it into this cpp file |
make the comparison function a free function and move it
+ | ||
relevance 0 | ../src/choker.cpp:335 | make the comparison function a free function and move it into this cpp file |
make the comparison function a free function and move it
into this cpp file../src/choker.cpp:335 }
// ==== rate-based ====
@@ -3311,7 +3184,7 @@ into this cpp file../src/choker.cpp:335 std::sort(peers.begin(), peers.end()
, boost::bind(&upload_rate_compare, _1, _2));
- | ||
relevance 0 | ../src/choker.cpp:340 | make configurable |
make configurable../src/choker.cpp:340 //
+ | ||
relevance 0 | ../src/choker.cpp:340 | make configurable |
make configurable../src/choker.cpp:340 //
// The rate based unchoker looks at our upload rate to peers, and find
// a balance between number of upload slots and the rate we achieve. The
// intention is to not spread upload bandwidth too thin, but also to not
@@ -3344,7 +3217,7 @@ into this cpp file../src/choker.cpp:335relevance 0 | ../src/choker.cpp:354 | make configurable |
|
make configurable../src/choker.cpp:354 // it purely based on the current state of our peers.
+ | ||
relevance 0 | ../src/choker.cpp:354 | make configurable |
make configurable../src/choker.cpp:354 // it purely based on the current state of our peers.
upload_slots = 0;
@@ -3395,8 +3268,60 @@ into this cpp file../src/choker.cpp:335relevance 0 | ../src/disk_io_thread.cpp:912 | instead of doing a lookup each time through the loop, save cached_piece_entry pointers with piece_refcount incremented to pin them |
|
instead of doing a lookup each time through the loop, save
-cached_piece_entry pointers with piece_refcount incremented to pin them../src/disk_io_thread.cpp:912 // this is why we pass in 1 as cont_block to the flushing functions
+ | ||
relevance 0 | ../src/disk_io_thread.cpp:880 | it would be nice to optimize this by having the cache pieces also ordered by |
it would be nice to optimize this by having the cache
+pieces also ordered by../src/disk_io_thread.cpp:880 // from disk_io_thread::do_delete, which is a fence job and should
+ // have any other jobs active, i.e. there should not be any references
+ // keeping pieces or blocks alive
+ if ((flags & flush_delete_cache) && (flags & flush_expect_clear))
+ {
+ boost::unordered_set<cached_piece_entry*> const& storage_pieces = storage->cached_pieces();
+ for (boost::unordered_set<cached_piece_entry*>::const_iterator i = storage_pieces.begin()
+ , end(storage_pieces.end()); i != end; ++i)
+ {
+ cached_piece_entry* pe = m_disk_cache.find_piece(storage, (*i)->piece);
+ TORRENT_PIECE_ASSERT(pe->num_dirty == 0, pe);
+ }
+ }
+#endif
+ }
+ else
+ {
+ std::pair<block_cache::iterator, block_cache::iterator> range = m_disk_cache.all_pieces();
+ while (range.first != range.second)
+ {
+ if ((flags & (flush_read_cache | flush_delete_cache)) == 0)
+ {
+ // if we're not flushing the read cache, and not deleting the
+ // cache, skip pieces with no dirty blocks, i.e. read cache
+ // pieces
+ while (range.first->num_dirty == 0)
+ {
+ ++range.first;
+ if (range.first == range.second) return;
+ }
+ }
+ cached_piece_entry* pe = const_cast<cached_piece_entry*>(&*range.first);
+ flush_piece(pe, flags, completed_jobs, l);
+ range = m_disk_cache.all_pieces();
+ }
+ }
+ }
+
+ // this is called if we're exceeding (or about to exceed) the cache
+ // size limit. This means we should not restrict ourselves to contiguous
+ // blocks of write cache line size, but try to flush all old blocks
+ // this is why we pass in 1 as cont_block to the flushing functions
+ void disk_io_thread::try_flush_write_blocks(int num, tailqueue& completed_jobs
+ , mutex::scoped_lock& l)
+ {
+ DLOG("try_flush_write_blocks: %d\n", num);
+
+ list_iterator range = m_disk_cache.write_lru_pieces();
+ std::vector<std::pair<piece_manager*, int> > pieces;
+ pieces.reserve(m_disk_cache.num_write_lru_pieces());
+
+ | ||
relevance 0 | ../src/disk_io_thread.cpp:923 | instead of doing a lookup each time through the loop, save cached_piece_entry pointers with piece_refcount incremented to pin them |
instead of doing a lookup each time through the loop, save
+cached_piece_entry pointers with piece_refcount incremented to pin them../src/disk_io_thread.cpp:923 // this is why we pass in 1 as cont_block to the flushing functions
void disk_io_thread::try_flush_write_blocks(int num, tailqueue& completed_jobs
, mutex::scoped_lock& l)
{
@@ -3447,10 +3372,10 @@ cached_piece_entry pointers with piece_refcount incremented to pin them
cached_piece_entry* pe = m_disk_cache.find_piece(i->first, i->second);
if (pe == NULL) continue;
if (pe->num_dirty == 0) continue;
- | ||
relevance 0 | ../src/disk_io_thread.cpp:1123 | instead of doing this. pass in the settings to each storage_interface call. Each disk thread could hold its most recent understanding of the settings in a shared_ptr, and update it every time it wakes up from a job. That way each access to the settings won't require a mutex to be held. |
instead of doing this. pass in the settings to each storage_interface
+ | ||
relevance 0 | ../src/disk_io_thread.cpp:1134 | instead of doing this. pass in the settings to each storage_interface call. Each disk thread could hold its most recent understanding of the settings in a shared_ptr, and update it every time it wakes up from a job. That way each access to the settings won't require a mutex to be held. |
instead of doing this. pass in the settings to each storage_interface
call. Each disk thread could hold its most recent understanding of the settings
in a shared_ptr, and update it every time it wakes up from a job. That way
-each access to the settings won't require a mutex to be held.../src/disk_io_thread.cpp:1123 {
+each access to the settings won't require a mutex to be held.../src/disk_io_thread.cpp:1134 {
INVARIANT_CHECK;
TORRENT_ASSERT(j->next == 0);
TORRENT_ASSERT((j->flags & disk_io_job::in_progress) || !j->storage);
@@ -3494,9 +3419,9 @@ each access to the settings won't require a mutex to be held.../src/dis
// our quanta in case there aren't any other
// jobs to run in between
- | ||
relevance 0 | ../src/disk_io_thread.cpp:1151 | a potentially more efficient solution would be to have a special queue for retry jobs, that's only ever run when a job completes, in any thread. It would only work if counters::num_running_disk_jobs > 0 |
a potentially more efficient solution would be to have a special
+ | ||
relevance 0 | ../src/disk_io_thread.cpp:1162 | a potentially more efficient solution would be to have a special queue for retry jobs, that's only ever run when a job completes, in any thread. It would only work if counters::num_running_disk_jobs > 0 |
a potentially more efficient solution would be to have a special
queue for retry jobs, that's only ever run when a job completes, in
-any thread. It would only work if counters::num_running_disk_jobs > 0../src/disk_io_thread.cpp:1151
+any thread. It would only work if counters::num_running_disk_jobs > 0../src/disk_io_thread.cpp:1162
ptime start_time = time_now_hires();
m_stats_counters.inc_stats_counter(counters::num_running_disk_jobs, 1);
@@ -3527,7 +3452,7 @@ any thread. It would only work if counters::num_running_disk_jobs > 0..
}
#if TORRENT_USE_ASSERT
- | ||
relevance 0 | ../src/disk_io_thread.cpp:1165 | it should clear the hash state even when there's an error, right? |
it should clear the hash state even when there's an error, right?../src/disk_io_thread.cpp:1165 m_stats_counters.inc_stats_counter(counters::num_running_disk_jobs, -1);
+ | ||
relevance 0 | ../src/disk_io_thread.cpp:1176 | it should clear the hash state even when there's an error, right? |
it should clear the hash state even when there's an error, right?../src/disk_io_thread.cpp:1176 m_stats_counters.inc_stats_counter(counters::num_running_disk_jobs, -1);
if (ret == retry_job)
{
@@ -3578,8 +3503,8 @@ any thread. It would only work if counters::num_running_disk_jobs > 0..
j->error.operation = storage_error::alloc_cache_piece;
return -1;
}
- | ||
relevance 0 | ../src/disk_io_thread.cpp:1860 | maybe the tailqueue_iterator should contain a pointer-pointer instead and have an unlink function |
maybe the tailqueue_iterator should contain a pointer-pointer
-instead and have an unlink function../src/disk_io_thread.cpp:1860 j->callback = handler;
+ | ||
relevance 0 | ../src/disk_io_thread.cpp:1871 | maybe the tailqueue_iterator should contain a pointer-pointer instead and have an unlink function |
maybe the tailqueue_iterator should contain a pointer-pointer
+instead and have an unlink function../src/disk_io_thread.cpp:1871 j->callback = handler;
add_fence_job(storage, j);
}
@@ -3630,8 +3555,8 @@ instead and have an unlink function../src/disk_io_thread.cpp:1860<
if (completed_jobs.size())
add_completed_jobs(completed_jobs);
}
- | ||
relevance 0 | ../src/disk_io_thread.cpp:2115 | this is potentially very expensive. One way to solve it would be to have a fence for just this one piece. |
this is potentially very expensive. One way to solve
-it would be to have a fence for just this one piece.../src/disk_io_thread.cpp:2115 }
+ | ||
relevance 0 | ../src/disk_io_thread.cpp:2126 | this is potentially very expensive. One way to solve it would be to have a fence for just this one piece. |
this is potentially very expensive. One way to solve
+it would be to have a fence for just this one piece.../src/disk_io_thread.cpp:2126 }
void disk_io_thread::async_clear_piece(piece_manager* storage, int index
, boost::function<void(disk_io_job const*)> const& handler)
@@ -3682,7 +3607,7 @@ it would be to have a fence for just this one piece.../src/disk_io_thre
if (!pe->hash) return;
if (pe->hashing) return;
- | ||
relevance 0 | ../src/disk_io_thread.cpp:2376 | we should probably just hang the job on the piece and make sure the hasher gets kicked |
we should probably just hang the job on the piece and make sure the hasher gets kicked../src/disk_io_thread.cpp:2376 if (pe == NULL)
+ | ||
relevance 0 | ../src/disk_io_thread.cpp:2387 | we should probably just hang the job on the piece and make sure the hasher gets kicked |
we should probably just hang the job on the piece and make sure the hasher gets kicked../src/disk_io_thread.cpp:2387 if (pe == NULL)
{
int cache_state = (j->flags & disk_io_job::volatile_read)
? cached_piece_entry::volatile_read_lru
@@ -3733,8 +3658,8 @@ it would be to have a fence for just this one piece.../src/disk_io_thre
// increment the refcounts of all
// blocks up front, and then hash them without holding the lock
- | ||
relevance 0 | ../src/disk_io_thread.cpp:2446 | introduce a holder class that automatically increments and decrements the piece_refcount |
introduce a holder class that automatically increments
-and decrements the piece_refcount../src/disk_io_thread.cpp:2446 for (int i = ph->offset / block_size; i < blocks_in_piece; ++i)
+ | ||
relevance 0 | ../src/disk_io_thread.cpp:2457 | introduce a holder class that automatically increments and decrements the piece_refcount |
introduce a holder class that automatically increments
+and decrements the piece_refcount../src/disk_io_thread.cpp:2457 for (int i = ph->offset / block_size; i < blocks_in_piece; ++i)
{
iov.iov_len = (std::min)(block_size, piece_size - ph->offset);
@@ -3785,8 +3710,8 @@ and decrements the piece_refcount../src/disk_io_thread.cpp:2446 | ||
relevance 0 | ../src/disk_io_thread.cpp:2688 | it would be nice to not have to lock the mutex every turn through this loop |
it would be nice to not have to lock the mutex every
-turn through this loop../src/disk_io_thread.cpp:2688 {
+ | ||
relevance 0 | ../src/disk_io_thread.cpp:2699 | it would be nice to not have to lock the mutex every turn through this loop |
it would be nice to not have to lock the mutex every
+turn through this loop../src/disk_io_thread.cpp:2699 {
j->error.ec = error::no_memory;
j->error.operation = storage_error::alloc_cache_piece;
return -1;
@@ -3837,7 +3762,7 @@ turn through this loop../src/disk_io_thread.cpp:2688relevance 0 | ../src/http_tracker_connection.cpp:93 | support authentication (i.e. user name and password) in the URL |
|
support authentication (i.e. user name and password) in the URL../src/http_tracker_connection.cpp:93
+ | ||
relevance 0 | ../src/http_tracker_connection.cpp:93 | support authentication (i.e. user name and password) in the URL |
support authentication (i.e. user name and password) in the URL../src/http_tracker_connection.cpp:93
http_tracker_connection::http_tracker_connection(
io_service& ios
, tracker_manager& man
@@ -3888,7 +3813,7 @@ turn through this loop../src/disk_io_thread.cpp:2688 | ||
relevance 0 | ../src/http_tracker_connection.cpp:194 | support this somehow |
support this somehow../src/http_tracker_connection.cpp:194 url += escape_string(id.c_str(), id.length());
+ | ||
relevance 0 | ../src/http_tracker_connection.cpp:194 | support this somehow |
support this somehow../src/http_tracker_connection.cpp:194 url += escape_string(id.c_str(), id.length());
}
#if TORRENT_USE_I2P
@@ -3939,7 +3864,7 @@ turn through this loop../src/disk_io_thread.cpp:2688relevance 0 | ../src/lsd.cpp:84 | instead if writing to a file, post alerts. Or call a log callback |
|
instead if writing to a file, post alerts. Or call a log callback../src/lsd.cpp:84}
+ | ||
relevance 0 | ../src/lsd.cpp:84 | instead if writing to a file, post alerts. Or call a log callback |
instead if writing to a file, post alerts. Or call a log callback../src/lsd.cpp:84}
static error_code ec;
@@ -3990,7 +3915,7 @@ lsd::lsd(io_service& ios, peer_callback_t const& cb)
#endif
}
- | ||
relevance 0 | ../src/metadata_transfer.cpp:359 | this is not safe. The torrent could be unloaded while we're still sending the metadata |
this is not safe. The torrent could be unloaded while
+ | ||
relevance 0 | ../src/metadata_transfer.cpp:359 | this is not safe. The torrent could be unloaded while we're still sending the metadata |
this is not safe. The torrent could be unloaded while
we're still sending the metadata../src/metadata_transfer.cpp:359 std::pair<int, int> offset
= req_to_offset(req, (int)m_tp.metadata().left());
@@ -4042,7 +3967,7 @@ we're still sending the metadata../src/metadata_transfer.cpp:359 | ||
relevance 0 | ../src/packet_buffer.cpp:176 | use compare_less_wrap for this comparison as well |
use compare_less_wrap for this comparison as well../src/packet_buffer.cpp:176 while (new_size < size)
+ | ||
relevance 0 | ../src/packet_buffer.cpp:176 | use compare_less_wrap for this comparison as well |
use compare_less_wrap for this comparison as well../src/packet_buffer.cpp:176 while (new_size < size)
new_size <<= 1;
void** new_storage = (void**)malloc(sizeof(void*) * new_size);
@@ -4093,7 +4018,7 @@ we're still sending the metadata../src/metadata_transfer.cpp:359 | ||
relevance 0 | ../src/part_file.cpp:252 | what do we do if someone is currently reading from the disk from this piece? does it matter? Since we won't actively erase the data from disk, but it may be overwritten soon, it's probably not that big of a deal |
what do we do if someone is currently reading from the disk
+ | ||
relevance 0 | ../src/part_file.cpp:252 | what do we do if someone is currently reading from the disk from this piece? does it matter? Since we won't actively erase the data from disk, but it may be overwritten soon, it's probably not that big of a deal |
what do we do if someone is currently reading from the disk
from this piece? does it matter? Since we won't actively erase the
data from disk, but it may be overwritten soon, it's probably not that
big of a deal../src/part_file.cpp:252 if (((mode & file::rw_mask) != file::read_only)
@@ -4147,7 +4072,7 @@ big of a deal../src/part_file.cpp:252relevance 0 | ../src/part_file.cpp:344 | instead of rebuilding the whole file header and flushing it, update the slot entries as we go |
|
instead of rebuilding the whole file header
+ | ||
relevance 0 | ../src/part_file.cpp:344 | instead of rebuilding the whole file header and flushing it, update the slot entries as we go |
instead of rebuilding the whole file header
and flushing it, update the slot entries as we go../src/part_file.cpp:344 if (block_to_copy == m_piece_size)
{
m_free_slots.push_back(i->second);
@@ -4199,7 +4124,7 @@ and flushing it, update the slot entries as we go../src/part_file.cpp:3
for (int piece = 0; piece < m_max_pieces; ++piece)
{
- | ||
relevance 0 | ../src/peer_connection.cpp:1022 | this should be the global download rate |
this should be the global download rate../src/peer_connection.cpp:1022
+ | ||
relevance 0 | ../src/peer_connection.cpp:1022 | this should be the global download rate |
this should be the global download rate../src/peer_connection.cpp:1022
int rate = 0;
// if we haven't received any data recently, the current download rate
@@ -4250,7 +4175,7 @@ and flushing it, update the slot entries as we go../src/part_file.cpp:3
if (m_ignore_stats) return;
boost::shared_ptr<torrent> t = m_torrent.lock();
if (!t) return;
- | ||
relevance 0 | ../src/peer_connection.cpp:3229 | sort the allowed fast set in priority order |
sort the allowed fast set in priority order../src/peer_connection.cpp:3229
+ | ||
relevance 0 | ../src/peer_connection.cpp:3229 | sort the allowed fast set in priority order |
sort the allowed fast set in priority order../src/peer_connection.cpp:3229
// if the peer has the piece and we want
// to download it, request it
if (int(m_have_piece.size()) > index
@@ -4301,7 +4226,7 @@ and flushing it, update the slot entries as we go../src/part_file.cpp:3
boost::shared_ptr<torrent> t = m_torrent.lock();
TORRENT_ASSERT(t);
TORRENT_ASSERT(t->has_picker());
- | ||
relevance 0 | ../src/peer_connection.cpp:5891 | The stats checks can not be honored when authenticated encryption is in use because we may have encrypted data which we cannot authenticate yet |
The stats checks can not be honored when authenticated encryption is in use
+ | ||
relevance 0 | ../src/peer_connection.cpp:5891 | The stats checks can not be honored when authenticated encryption is in use because we may have encrypted data which we cannot authenticate yet |
The stats checks can not be honored when authenticated encryption is in use
because we may have encrypted data which we cannot authenticate yet../src/peer_connection.cpp:5891#if defined TORRENT_LOGGING
peer_log("<<< read %d bytes", int(bytes_transferred));
#endif
@@ -4353,7 +4278,7 @@ because we may have encrypted data which we cannot authenticate yet../s
}
if (num_loops > read_loops) break;
- | ||
relevance 0 | ../src/piece_picker.cpp:2407 | when expanding pieces for cache stripe reasons, the !downloading condition doesn't make much sense |
when expanding pieces for cache stripe reasons,
+ | ||
relevance 0 | ../src/piece_picker.cpp:2407 | when expanding pieces for cache stripe reasons, the !downloading condition doesn't make much sense |
when expanding pieces for cache stripe reasons,
the !downloading condition doesn't make much sense../src/piece_picker.cpp:2407 TORRENT_ASSERT(index < (int)m_piece_map.size() || m_piece_map.empty());
if (index+1 == (int)m_piece_map.size())
return m_blocks_in_last_piece;
@@ -4405,8 +4330,8 @@ the !downloading condition doesn't make much sense../src/piece_picker.c
// the second bool is true if this is the only active peer that is requesting
// and downloading blocks from this piece. Active means having a connection.
boost::tuple<bool, bool> requested_from(piece_picker::downloading_piece const& p
- | ||
relevance 0 | ../src/session_impl.cpp:508 | there's no rule here to make uTP connections not have the global or local rate limits apply to it. This used to be the default. |
there's no rule here to make uTP connections not have the global or
-local rate limits apply to it. This used to be the default.../src/session_impl.cpp:508 m_global_class = m_classes.new_peer_class("global");
+ | ||
relevance 0 | ../src/session_impl.cpp:507 | there's no rule here to make uTP connections not have the global or local rate limits apply to it. This used to be the default. |
there's no rule here to make uTP connections not have the global or
+local rate limits apply to it. This used to be the default.../src/session_impl.cpp:507 m_global_class = m_classes.new_peer_class("global");
m_tcp_peer_class = m_classes.new_peer_class("tcp");
m_local_peer_class = m_classes.new_peer_class("local");
// local peers are always unchoked
@@ -4457,7 +4382,7 @@ local rate limits apply to it. This used to be the default.../src/sessi
, int(rl.rlim_cur * 8 / 10)));
// 20% goes towards regular files (see disk_io_thread)
#if defined TORRENT_LOGGING
- | ||
relevance 0 | ../src/session_impl.cpp:1721 | instead of having a special case for this, just make the default listen interfaces be "0.0.0.0:6881,[::1]:6881" and use the generic path. That would even allow for not listening at all. |
instead of having a special case for this, just make the
+ | ||
relevance 0 | ../src/session_impl.cpp:1721 | instead of having a special case for this, just make the default listen interfaces be "0.0.0.0:6881,[::1]:6881" and use the generic path. That would even allow for not listening at all. |
instead of having a special case for this, just make the
default listen interfaces be "0.0.0.0:6881,[::1]:6881" and use
the generic path. That would even allow for not listening at all.../src/session_impl.cpp:1721
// reset the retry counter
@@ -4510,7 +4435,7 @@ retry:
if (s.sock)
{
TORRENT_ASSERT(!m_abort);
- | ||
relevance 0 | ../src/session_impl.cpp:2584 | should this function take a shared_ptr instead? |
should this function take a shared_ptr instead?../src/session_impl.cpp:2584 {
+ | ||
relevance 0 | ../src/session_impl.cpp:2607 | should this function take a shared_ptr instead? |
should this function take a shared_ptr instead?../src/session_impl.cpp:2607 {
#if defined TORRENT_ASIO_DEBUGGING
complete_async("session_impl::on_socks_accept");
#endif
@@ -4561,7 +4486,7 @@ retry:
TORRENT_ASSERT(sp.use_count() > 0);
connection_map::iterator i = m_connections.find(sp);
- | ||
relevance 0 | ../src/session_impl.cpp:2948 | have a separate list for these connections, instead of having to loop through all of them |
have a separate list for these connections, instead of having to loop through all of them../src/session_impl.cpp:2948 if (m_auto_manage_time_scaler < 0)
+ | ||
relevance 0 | ../src/session_impl.cpp:2971 | have a separate list for these connections, instead of having to loop through all of them |
have a separate list for these connections, instead of having to loop through all of them../src/session_impl.cpp:2971 if (m_auto_manage_time_scaler < 0)
{
INVARIANT_CHECK;
m_auto_manage_time_scaler = settings().get_int(settings_pack::auto_manage_interval);
@@ -4612,7 +4537,7 @@ retry:
#ifndef TORRENT_DISABLE_DHT
int dht_down = 0;
- | ||
relevance 0 | ../src/session_impl.cpp:2989 | this should apply to all bandwidth channels |
this should apply to all bandwidth channels../src/session_impl.cpp:2989 t.second_tick(tick_interval_ms, m_tick_residual / 1000);
+ | ||
relevance 0 | ../src/session_impl.cpp:3012 | this should apply to all bandwidth channels |
this should apply to all bandwidth channels../src/session_impl.cpp:3012 t.second_tick(tick_interval_ms, m_tick_residual / 1000);
// if the call to second_tick caused the torrent
// to no longer want to be ticked (i.e. it was
@@ -4663,7 +4588,7 @@ retry:
m_peak_up_rate = (std::max)(m_stat.upload_rate(), m_peak_up_rate);
m_peak_down_rate = (std::max)(m_stat.download_rate(), m_peak_down_rate);
- | ||
relevance 0 | ../src/session_impl.cpp:3477 | these vectors could be copied from m_torrent_lists, if we would maintain them. That way the first pass over all torrents could be avoided. It would be especially efficient if most torrents are not auto-managed whenever we receive a scrape response (or anything that may change the rank of a torrent) that one torrent could re-sort itself in a list that's kept sorted at all times. That way, this pass over all torrents could be avoided alltogether. |
these vectors could be copied from m_torrent_lists,
+ | ||
relevance 0 | ../src/session_impl.cpp:3500 | these vectors could be copied from m_torrent_lists, if we would maintain them. That way the first pass over all torrents could be avoided. It would be especially efficient if most torrents are not auto-managed whenever we receive a scrape response (or anything that may change the rank of a torrent) that one torrent could re-sort itself in a list that's kept sorted at all times. That way, this pass over all torrents could be avoided alltogether. |
these vectors could be copied from m_torrent_lists,
if we would maintain them. That way the first pass over
all torrents could be avoided. It would be especially
efficient if most torrents are not auto-managed
@@ -4671,7 +4596,7 @@ whenever we receive a scrape response (or anything
that may change the rank of a torrent) that one torrent
could re-sort itself in a list that's kept sorted at all
times. That way, this pass over all torrents could be
-avoided alltogether.../src/session_impl.cpp:3477#if defined TORRENT_LOGGING
+avoided alltogether.../src/session_impl.cpp:3500#if defined TORRENT_LOGGING
if (t->allows_peers())
t->log_to_all_peers("AUTO MANAGER PAUSING TORRENT");
#endif
@@ -4722,7 +4647,7 @@ avoided alltogether.../src/session_impl.cpp:3477relevance 0 | ../src/session_impl.cpp:3552 | allow extensions to sort torrents for queuing |
|
allow extensions to sort torrents for queuing../src/session_impl.cpp:3552 if (t->is_finished())
+ | ||
relevance 0 | ../src/session_impl.cpp:3577 | allow extensions to sort torrents for queuing |
allow extensions to sort torrents for queuing../src/session_impl.cpp:3577 if (t->is_finished())
seeds.push_back(t);
else
downloaders.push_back(t);
@@ -4773,9 +4698,9 @@ avoided alltogether.../src/session_impl.cpp:3477relevance 0 | ../src/session_impl.cpp:3724 | use a lower limit than m_settings.connections_limit to allocate the to 10% or so of connection slots for incoming connections |
|
use a lower limit than m_settings.connections_limit
+ | ||
relevance 0 | ../src/session_impl.cpp:3750 | use a lower limit than m_settings.connections_limit to allocate the to 10% or so of connection slots for incoming connections |
use a lower limit than m_settings.connections_limit
to allocate the to 10% or so of connection slots for incoming
-connections../src/session_impl.cpp:3724 // robin fashion, so that every torrent is equally likely to connect to a
+connections../src/session_impl.cpp:3750 // robin fashion, so that every torrent is equally likely to connect to a
// peer
// boost connections are connections made by torrent connection
@@ -4826,8 +4751,8 @@ connections../src/session_impl.cpp:3724relevance 0 | ../src/session_impl.cpp:3867 | post a message to have this happen immediately instead of waiting for the next tick |
|
post a message to have this happen
-immediately instead of waiting for the next tick../src/session_impl.cpp:3867 torrent* t = p->associated_torrent().lock().get();
+ | ||
relevance 0 | ../src/session_impl.cpp:3893 | post a message to have this happen immediately instead of waiting for the next tick |
post a message to have this happen
+immediately instead of waiting for the next tick../src/session_impl.cpp:3893 torrent* t = p->associated_torrent().lock().get();
torrent_peer* pi = p->peer_info_struct();
if (p->ignore_unchoke_slots() || t == 0 || pi == 0
@@ -4871,22 +4796,22 @@ immediately instead of waiting for the next tick../src/session_impl.cpp
, performance_alert::bittyrant_with_no_uplimit));
}
- m_allowed_upload_slots = unchoke_sort(peers, max_upload_rate
+ int allowed_upload_slots = unchoke_sort(peers, max_upload_rate
, unchoke_interval, m_settings);
+ m_stats_counters.set_value(counters::num_unchoke_slots
+ , allowed_upload_slots);
int num_opt_unchoke = m_settings.get_int(settings_pack::num_optimistic_unchoke_slots);
- if (num_opt_unchoke == 0) num_opt_unchoke = (std::max)(1, m_allowed_upload_slots / 5);
-
- // reserve some upload slots for optimistic unchokes
- | ||
relevance 0 | ../src/session_impl.cpp:3912 | this should be called for all peers! |
this should be called for all peers!../src/session_impl.cpp:3912
- m_allowed_upload_slots = unchoke_sort(peers, max_upload_rate
- , unchoke_interval, m_settings);
+ if (num_opt_unchoke == 0) num_opt_unchoke = (std::max)(1, allowed_upload_slots / 5);
+ | ||
relevance 0 | ../src/session_impl.cpp:3940 | this should be called for all peers! |
this should be called for all peers!../src/session_impl.cpp:3940 , unchoke_interval, m_settings);
+ m_stats_counters.set_value(counters::num_unchoke_slots
+ , allowed_upload_slots);
int num_opt_unchoke = m_settings.get_int(settings_pack::num_optimistic_unchoke_slots);
- if (num_opt_unchoke == 0) num_opt_unchoke = (std::max)(1, m_allowed_upload_slots / 5);
+ if (num_opt_unchoke == 0) num_opt_unchoke = (std::max)(1, allowed_upload_slots / 5);
// reserve some upload slots for optimistic unchokes
- int unchoke_set_size = m_allowed_upload_slots;
+ int unchoke_set_size = allowed_upload_slots;
// go through all the peers and unchoke the first ones and choke
// all the other ones.
@@ -4929,10 +4854,10 @@ immediately instead of waiting for the next tick../src/session_impl.cpp
{
// no, this peer should be choked
TORRENT_ASSERT(p->peer_info_struct());
- | ||
relevance 0 | ../src/session_impl.cpp:4304 | it might be a nice feature here to limit the number of torrents to send in a single update. By just posting the first n torrents, they would nicely be round-robined because the torrent lists are always pushed back |
it might be a nice feature here to limit the number of torrents
+ | ||
relevance 0 | ../src/session_impl.cpp:4332 | it might be a nice feature here to limit the number of torrents to send in a single update. By just posting the first n torrents, they would nicely be round-robined because the torrent lists are always pushed back |
it might be a nice feature here to limit the number of torrents
to send in a single update. By just posting the first n torrents, they
would nicely be round-robined because the torrent lists are always
-pushed back../src/session_impl.cpp:4304 t->status(&*i, flags);
+pushed back../src/session_impl.cpp:4332 t->status(&*i, flags);
}
}
@@ -4983,9 +4908,9 @@ pushed back../src/session_impl.cpp:4304relevance 0 | ../src/storage.cpp:731 | make this more generic to not just work if files have been renamed, but also if they have been merged into a single file for instance maybe use the same format as .torrent files and reuse some code from torrent_info |
|
make this more generic to not just work if files have been
+ | ||
relevance 0 | ../src/storage.cpp:719 | make this more generic to not just work if files have been renamed, but also if they have been merged into a single file for instance maybe use the same format as .torrent files and reuse some code from torrent_info |
make this more generic to not just work if files have been
renamed, but also if they have been merged into a single file for instance
-maybe use the same format as .torrent files and reuse some code from torrent_info../src/storage.cpp:731 for (;;)
+maybe use the same format as .torrent files and reuse some code from torrent_info../src/storage.cpp:719 for (;;)
{
if (file_offset < files().file_size(file_index))
break;
@@ -4996,8 +4921,8 @@ maybe use the same format as .torrent files and reuse some code from torrent_inf
}
error_code ec;
- file_handle handle = open_file(file_index, file::read_only, ec);
- if (!handle || ec) return slot;
+ file_handle handle = open_file_impl(file_index, file::read_only, ec);
+ if (ec) return slot;
boost::int64_t data_start = handle->sparse_end(file_offset);
return int((data_start + files().piece_length() - 1) / files().piece_length());
@@ -5036,9 +4961,9 @@ maybe use the same format as .torrent files and reuse some code from torrent_inf
if (file_sizes_ent->list_size() == 0)
{
ec.ec = errors::no_files_in_resume_data;
- | ||
relevance 0 | ../src/storage.cpp:1027 | if everything moves OK, except for the partfile we currently won't update the save path, which breaks things. it would probably make more sense to give up on the partfile |
if everything moves OK, except for the partfile
+ | ||
relevance 0 | ../src/storage.cpp:1015 | if everything moves OK, except for the partfile we currently won't update the save path, which breaks things. it would probably make more sense to give up on the partfile |
if everything moves OK, except for the partfile
we currently won't update the save path, which breaks things.
-it would probably make more sense to give up on the partfile../src/storage.cpp:1027 if (ec)
+it would probably make more sense to give up on the partfile../src/storage.cpp:1015 if (ec)
{
ec.file = i->second;
ec.operation = storage_error::copy;
@@ -5089,7 +5014,7 @@ it would probably make more sense to give up on the partfile../src/stor
{
fileop op = { &file::writev
, file::read_write | flags };
- | ||
relevance 0 | ../src/torrent.cpp:507 | if the existing torrent doesn't have metadata, insert the metadata we just downloaded into it. |
if the existing torrent doesn't have metadata, insert
+ | ||
relevance 0 | ../src/torrent.cpp:507 | if the existing torrent doesn't have metadata, insert the metadata we just downloaded into it. |
if the existing torrent doesn't have metadata, insert
the metadata we just downloaded into it.../src/torrent.cpp:507
m_torrent_file = tf;
@@ -5141,7 +5066,7 @@ the metadata we just downloaded into it.../src/torrent.cpp:507 | ||
relevance 0 | ../src/torrent.cpp:658 | if the existing torrent doesn't have metadata, insert the metadata we just downloaded into it. |
if the existing torrent doesn't have metadata, insert
+ | ||
relevance 0 | ../src/torrent.cpp:658 | if the existing torrent doesn't have metadata, insert the metadata we just downloaded into it. |
if the existing torrent doesn't have metadata, insert
the metadata we just downloaded into it.../src/torrent.cpp:658 m_torrent_file = tf;
m_info_hash = tf->info_hash();
@@ -5193,7 +5118,7 @@ the metadata we just downloaded into it.../src/torrent.cpp:658 | ||
relevance 0 | ../src/torrent.cpp:1460 | is verify_peer_cert called once per certificate in the chain, and this function just tells us which depth we're at right now? If so, the comment makes sense. any certificate that isn't the leaf (i.e. the one presented by the peer) should be accepted automatically, given preverified is true. The leaf certificate need to be verified to make sure its DN matches the info-hash |
is verify_peer_cert called once per certificate in the chain, and
+ | ||
relevance 0 | ../src/torrent.cpp:1460 | is verify_peer_cert called once per certificate in the chain, and this function just tells us which depth we're at right now? If so, the comment makes sense. any certificate that isn't the leaf (i.e. the one presented by the peer) should be accepted automatically, given preverified is true. The leaf certificate need to be verified to make sure its DN matches the info-hash |
is verify_peer_cert called once per certificate in the chain, and
this function just tells us which depth we're at right now? If so, the comment
makes sense.
any certificate that isn't the leaf (i.e. the one presented by the peer)
@@ -5249,7 +5174,7 @@ need to be verified to make sure its DN matches the info-hash../src/tor
{
#if defined TORRENT_LOGGING
match = true;
- | ||
relevance 0 | ../src/torrent.cpp:1864 | instead of creating the picker up front here, maybe this whole section should move to need_picker() |
instead of creating the picker up front here,
+ | ||
relevance 0 | ../src/torrent.cpp:1864 | instead of creating the picker up front here, maybe this whole section should move to need_picker() |
instead of creating the picker up front here,
maybe this whole section should move to need_picker()../src/torrent.cpp:1864 {
m_have_all = true;
m_ses.get_io_service().post(boost::bind(&torrent::files_checked, shared_from_this()));
@@ -5301,7 +5226,7 @@ maybe this whole section should move to need_picker()../src/torrent.cpp
// need to consider it finished
std::vector<piece_picker::downloading_piece> dq
- | ||
relevance 0 | ../src/torrent.cpp:2060 | there may be peer extensions relying on the torrent extension still being alive. Only do this if there are no peers. And when the last peer is disconnected, if the torrent is unloaded, clear the extensions m_extensions.clear(); |
there may be peer extensions relying on the torrent extension
+ | ||
relevance 0 | ../src/torrent.cpp:2060 | there may be peer extensions relying on the torrent extension still being alive. Only do this if there are no peers. And when the last peer is disconnected, if the torrent is unloaded, clear the extensions m_extensions.clear(); |
there may be peer extensions relying on the torrent extension
still being alive. Only do this if there are no peers. And when the last peer
is disconnected, if the torrent is unloaded, clear the extensions
m_extensions.clear();../src/torrent.cpp:2060 // pinned torrents are not allowed to be swapped out
@@ -5355,7 +5280,7 @@ m_extensions.clear();../src/torrent.cpp:2060relevance 0 | ../src/torrent.cpp:2735 | this pattern is repeated in a few places. Factor this into a function and generalize the concept of a torrent having a dedicated listen port |
|
this pattern is repeated in a few places. Factor this into
+ | ||
relevance 0 | ../src/torrent.cpp:2735 | this pattern is repeated in a few places. Factor this into a function and generalize the concept of a torrent having a dedicated listen port |
this pattern is repeated in a few places. Factor this into
a function and generalize the concept of a torrent having a
dedicated listen port../src/torrent.cpp:2735 // if the files haven't been checked yet, we're
// not ready for peers. Except, if we don't have metadata,
@@ -5408,7 +5333,7 @@ dedicated listen port../src/torrent.cpp:2735 | ||
relevance 0 | ../src/torrent.cpp:3508 | add one peer per IP the hostname resolves to |
add one peer per IP the hostname resolves to../src/torrent.cpp:3508#endif
+ | ||
relevance 0 | ../src/torrent.cpp:3508 | add one peer per IP the hostname resolves to |
add one peer per IP the hostname resolves to../src/torrent.cpp:3508#endif
void torrent::on_peer_name_lookup(error_code const& e
, std::vector<address> const& host_list, int port)
@@ -5459,7 +5384,7 @@ dedicated listen port../src/torrent.cpp:2735relevance 0 | ../src/torrent.cpp:4499 | update suggest_piece? |
|
update suggest_piece?../src/torrent.cpp:4499
+ | ||
relevance 0 | ../src/torrent.cpp:4499 | update suggest_piece? |
update suggest_piece?../src/torrent.cpp:4499
void torrent::peer_has_all(peer_connection const* peer)
{
if (has_picker())
@@ -5510,7 +5435,7 @@ dedicated listen port../src/torrent.cpp:2735relevance 0 | ../src/torrent.cpp:4642 | really, we should just keep the picker around in this case to maintain the availability counters |
|
really, we should just keep the picker around
+ | ||
relevance 0 | ../src/torrent.cpp:4642 | really, we should just keep the picker around in this case to maintain the availability counters |
really, we should just keep the picker around
in this case to maintain the availability counters../src/torrent.cpp:4642 pieces.reserve(cs.pieces.size());
// sort in ascending order, to get most recently used first
@@ -5562,7 +5487,7 @@ in this case to maintain the availability counters../src/torrent.cpp:46
}
void torrent::abort()
- | ||
relevance 0 | ../src/torrent.cpp:6614 | make this more generic to not just work if files have been renamed, but also if they have been merged into a single file for instance maybe use the same format as .torrent files and reuse some code from torrent_info The mapped_files needs to be read both in the network thread and in the disk thread, since they both have their own mapped files structures which are kept in sync |
make this more generic to not just work if files have been
+ | ||
relevance 0 | ../src/torrent.cpp:6614 | make this more generic to not just work if files have been renamed, but also if they have been merged into a single file for instance maybe use the same format as .torrent files and reuse some code from torrent_info The mapped_files needs to be read both in the network thread and in the disk thread, since they both have their own mapped files structures which are kept in sync |
make this more generic to not just work if files have been
renamed, but also if they have been merged into a single file for instance
maybe use the same format as .torrent files and reuse some code from torrent_info
The mapped_files needs to be read both in the network thread
@@ -5618,7 +5543,7 @@ which are kept in sync../src/torrent.cpp:6614relevance 0 | ../src/torrent.cpp:6732 | if this is a merkle torrent and we can't restore the tree, we need to wipe all the bits in the have array, but not necessarily we might want to do a full check to see if we have all the pieces. This is low priority since almost no one uses merkle torrents |
|
if this is a merkle torrent and we can't
+ | ||
relevance 0 | ../src/torrent.cpp:6732 | if this is a merkle torrent and we can't restore the tree, we need to wipe all the bits in the have array, but not necessarily we might want to do a full check to see if we have all the pieces. This is low priority since almost no one uses merkle torrents |
if this is a merkle torrent and we can't
restore the tree, we need to wipe all the
bits in the have array, but not necessarily
we might want to do a full check to see if we have
@@ -5674,7 +5599,7 @@ no one uses merkle torrents../src/torrent.cpp:6732 | ||
relevance 0 | ../src/torrent.cpp:6922 | make this more generic to not just work if files have been renamed, but also if they have been merged into a single file for instance. using file_base |
make this more generic to not just work if files have been
+ | ||
relevance 0 | ../src/torrent.cpp:6922 | make this more generic to not just work if files have been renamed, but also if they have been merged into a single file for instance. using file_base |
make this more generic to not just work if files have been
renamed, but also if they have been merged into a single file for instance.
using file_base../src/torrent.cpp:6922 pieces.resize(m_torrent_file->num_pieces());
if (!has_picker())
@@ -5727,7 +5652,7 @@ using file_base../src/torrent.cpp:6922relevance 0 | ../src/torrent.cpp:8924 | add a flag to ignore stats, and only care about resume data for content. For unchanged files, don't trigger a load of the metadata just to save an empty resume data file |
|
add a flag to ignore stats, and only care about resume data for
+ | ||
relevance 0 | ../src/torrent.cpp:8924 | add a flag to ignore stats, and only care about resume data for content. For unchanged files, don't trigger a load of the metadata just to save an empty resume data file |
add a flag to ignore stats, and only care about resume data for
content. For unchanged files, don't trigger a load of the metadata
just to save an empty resume data file../src/torrent.cpp:8924 if (m_complete != 0xffffff) seeds = m_complete;
else seeds = m_peer_list ? m_peer_list->num_seeds() : 0;
@@ -5780,7 +5705,7 @@ just to save an empty resume data file../src/torrent.cpp:8924 | ||
relevance 0 | ../src/torrent.cpp:9886 | go through the pieces we have and count the total number of downloaders we have. Only count peers that are interested in us since some peers might not send have messages for pieces we have it num_interested == 0, we need to pick a new piece |
go through the pieces we have and count the total number
+ | ||
relevance 0 | ../src/torrent.cpp:9886 | go through the pieces we have and count the total number of downloaders we have. Only count peers that are interested in us since some peers might not send have messages for pieces we have it num_interested == 0, we need to pick a new piece |
go through the pieces we have and count the total number
of downloaders we have. Only count peers that are interested in us
since some peers might not send have messages for pieces we have
it num_interested == 0, we need to pick a new piece../src/torrent.cpp:9886 }
@@ -5834,7 +5759,7 @@ it num_interested == 0, we need to pick a new piece../src/torrent.cpp:9
if (num_cache_pieces > m_torrent_file->num_pieces())
num_cache_pieces = m_torrent_file->num_pieces();
- | ||
relevance 0 | ../src/torrent.cpp:10532 | instead of resorting the whole list, insert the peers directly into the right place |
instead of resorting the whole list, insert the peers
+ | ||
relevance 0 | ../src/torrent.cpp:10532 | instead of resorting the whole list, insert the peers directly into the right place |
instead of resorting the whole list, insert the peers
directly into the right place../src/torrent.cpp:10532 printf("timed out [average-piece-time: %d ms ]\n"
, m_average_piece_time);
#endif
@@ -5886,7 +5811,7 @@ directly into the right place../src/torrent.cpp:10532relevance 0 | ../src/torrent_peer.cpp:176 | how do we deal with our external address changing? |
|
how do we deal with our external address changing?../src/torrent_peer.cpp:176 , is_v6_addr(false)
+ | ||
relevance 0 | ../src/torrent_peer.cpp:176 | how do we deal with our external address changing? |
how do we deal with our external address changing?../src/torrent_peer.cpp:176 , is_v6_addr(false)
#endif
#if TORRENT_USE_I2P
, is_i2p_addr(false)
@@ -5937,7 +5862,7 @@ directly into the right place../src/torrent.cpp:10532relevance 0 | ../src/udp_socket.cpp:286 | it would be nice to detect this on posix systems also |
|
it would be nice to detect this on posix systems also../src/udp_socket.cpp:286 --m_v6_outstanding;
+ | ||
relevance 0 | ../src/udp_socket.cpp:286 | it would be nice to detect this on posix systems also |
it would be nice to detect this on posix systems also../src/udp_socket.cpp:286 --m_v6_outstanding;
}
else
#endif
@@ -5988,7 +5913,7 @@ void udp_socket::call_handler(error_code const& ec, udp::endpoint const&
ret = (*i)->incoming_packet(ec, ep, buf, size);
} TORRENT_CATCH (std::exception&) {}
if (*i == NULL) i = m_observers.erase(i);
- | ||
relevance 0 | ../src/udp_socket.cpp:777 | use the system resolver_interface here |
use the system resolver_interface here../src/udp_socket.cpp:777
+ | ||
relevance 0 | ../src/udp_socket.cpp:777 | use the system resolver_interface here |
use the system resolver_interface here../src/udp_socket.cpp:777
void udp_socket::set_proxy_settings(proxy_settings const& ps)
{
CHECK_MAGIC;
@@ -6012,6 +5937,9 @@ void udp_socket::set_proxy_settings(proxy_settings const& ps)
++m_outstanding_ops;
#if TORRENT_USE_ASSERTS
++m_outstanding_resolve;
+#endif
+#if defined TORRENT_ASIO_DEBUGGING
+ add_outstanding_async("udp_socket::on_name_lookup");
#endif
m_resolver.async_resolve(q, boost::bind(
&udp_socket::on_name_lookup, this, _1, _2));
@@ -6020,6 +5948,9 @@ void udp_socket::set_proxy_settings(proxy_settings const& ps)
void udp_socket::on_name_lookup(error_code const& e, tcp::resolver::iterator i)
{
+#if defined TORRENT_ASIO_DEBUGGING
+ complete_async("udp_socket::on_name_lookup");
+#endif
#if TORRENT_USE_ASSERTS
TORRENT_ASSERT(m_outstanding_resolve > 0);
--m_outstanding_resolve;
@@ -6033,13 +5964,7 @@ void udp_socket::on_name_lookup(error_code const& e, tcp::resolver::iterator
+ m_outstanding_socks);
if (m_abort) return;
- CHECK_MAGIC;
-
- if (e == asio::error::operation_aborted) return;
-
- TORRENT_ASSERT(is_single_thread());
-
- | ||
relevance 0 | ../src/upnp.cpp:71 | listen_interface is not used. It's meant to bind the broadcast socket |
listen_interface is not used. It's meant to bind the broadcast socket../src/upnp.cpp:71#include <asio/ip/multicast.hpp>
+ | ||
relevance 0 | ../src/upnp.cpp:71 | listen_interface is not used. It's meant to bind the broadcast socket |
listen_interface is not used. It's meant to bind the broadcast socket../src/upnp.cpp:71#include <asio/ip/multicast.hpp>
#else
#include <boost/asio/ip/host_name.hpp>
#include <boost/asio/ip/multicast.hpp>
@@ -6090,7 +6015,7 @@ static error_code ec;
m_devices.swap(s->devices);
m_mappings.swap(s->mappings);
delete s;
- | ||
relevance 0 | ../src/ut_metadata.cpp:316 | we really need to increment the refcounter on the torrent while this buffer is still in the peer's send buffer |
we really need to increment the refcounter on the torrent
+ | ||
relevance 0 | ../src/ut_metadata.cpp:316 | we really need to increment the refcounter on the torrent while this buffer is still in the peer's send buffer |
we really need to increment the refcounter on the torrent
while this buffer is still in the peer's send buffer../src/ut_metadata.cpp:316 if (!m_tp.need_loaded()) return;
metadata = m_tp.metadata().begin + offset;
metadata_piece_size = (std::min)(
@@ -6142,7 +6067,7 @@ while this buffer is still in the peer's send buffer../src/ut_metadata.
#ifdef TORRENT_LOGGING
m_pc.peer_log("<== UT_METADATA [ not a dictionary ]");
#endif
- | ||
relevance 0 | ../src/utp_stream.cpp:1628 | this loop may not be very efficient |
this loop may not be very efficient../src/utp_stream.cpp:1628
+ | ||
relevance 0 | ../src/utp_stream.cpp:1644 | this loop may not be very efficient |
this loop may not be very efficient../src/utp_stream.cpp:1644
char* m_buf;
};
@@ -6193,7 +6118,7 @@ bool utp_socket_impl::send_pkt(int flags)
if (sack > 32) sack = 32;
}
- | ||
relevance 0 | ../src/web_connection_base.cpp:73 | introduce a web-seed default class which has a low download priority |
introduce a web-seed default class which has a low download priority../src/web_connection_base.cpp:73{
+ | ||
relevance 0 | ../src/web_connection_base.cpp:73 | introduce a web-seed default class which has a low download priority |
introduce a web-seed default class which has a low download priority../src/web_connection_base.cpp:73{
web_connection_base::web_connection_base(
peer_connection_args const& pack
, web_seed_t& web)
@@ -6244,8 +6169,8 @@ bool utp_socket_impl::send_pkt(int flags)
// according to the settings.
return m_settings.get_int(settings_pack::urlseed_timeout);
}
- | ||
relevance 0 | ../src/kademlia/dht_tracker.cpp:430 | ideally this function would be called when the put completes |
ideally this function would be called when the
-put completes../src/kademlia/dht_tracker.cpp:430 // since it controls whether we re-put the content
+ | ||
relevance 0 | ../src/kademlia/dht_tracker.cpp:437 | ideally this function would be called when the put completes |
ideally this function would be called when the
+put completes../src/kademlia/dht_tracker.cpp:437 // since it controls whether we re-put the content
TORRENT_ASSERT(!it.is_mutable());
f(it);
return false;
@@ -6296,7 +6221,7 @@ put completes../src/kademlia/dht_tracker.cpp:430relevance 0 | ../include/libtorrent/bitfield.hpp:158 | rename to data() ? |
|
rename to data() ?../include/libtorrent/bitfield.hpp:158 if (m_buf[i] != 0) return false;
+ | ||
relevance 0 | ../include/libtorrent/bitfield.hpp:158 | rename to data() ? |
rename to data() ?../include/libtorrent/bitfield.hpp:158 if (m_buf[i] != 0) return false;
}
return true;
}
@@ -6347,7 +6272,7 @@ put completes../src/kademlia/dht_tracker.cpp:430relevance 0 | ../include/libtorrent/block_cache.hpp:218 | make this 32 bits and to count seconds since the block cache was created |
|
make this 32 bits and to count seconds since the block cache was created../include/libtorrent/block_cache.hpp:218 bool operator==(cached_piece_entry const& rhs) const
+ | ||
relevance 0 | ../include/libtorrent/block_cache.hpp:218 | make this 32 bits and to count seconds since the block cache was created |
make this 32 bits and to count seconds since the block cache was created../include/libtorrent/block_cache.hpp:218 bool operator==(cached_piece_entry const& rhs) const
{ return storage.get() == rhs.storage.get() && piece == rhs.piece; }
// if this is set, we'll be calculating the hash
@@ -6398,7 +6323,7 @@ put completes../src/kademlia/dht_tracker.cpp:430relevance 0 | ../include/libtorrent/config.hpp:334 | Make this count Unicode characters instead of bytes on windows |
|
Make this count Unicode characters instead of bytes on windows../include/libtorrent/config.hpp:334#pragma message ( "unknown OS, assuming BSD" )
+ | ||
relevance 0 | ../include/libtorrent/config.hpp:334 | Make this count Unicode characters instead of bytes on windows |
Make this count Unicode characters instead of bytes on windows../include/libtorrent/config.hpp:334#pragma message ( "unknown OS, assuming BSD" )
#else
#warning "unknown OS, assuming BSD"
#endif
@@ -6449,7 +6374,7 @@ put completes../src/kademlia/dht_tracker.cpp:430relevance 0 | ../include/libtorrent/disk_buffer_pool.hpp:128 | try to remove the observers, only using the async_allocate handlers |
|
try to remove the observers, only using the async_allocate handlers../include/libtorrent/disk_buffer_pool.hpp:128
+ | ||
relevance 0 | ../include/libtorrent/disk_buffer_pool.hpp:128 | try to remove the observers, only using the async_allocate handlers |
try to remove the observers, only using the async_allocate handlers../include/libtorrent/disk_buffer_pool.hpp:128
// number of bytes per block. The BitTorrent
// protocol defines the block size to 16 KiB.
const int m_block_size;
@@ -6500,20 +6425,20 @@ put completes../src/kademlia/dht_tracker.cpp:430relevance 0 | ../include/libtorrent/file.hpp:169 | move this into a separate header file, TU pair |
|
move this into a separate header file, TU pair../include/libtorrent/file.hpp:169 TORRENT_EXTRA_EXPORT bool is_root_path(std::string const& f);
+ | ||
relevance 0 | ../include/libtorrent/file.hpp:169 | move this into a separate header file, TU pair |
move this into a separate header file, TU pair../include/libtorrent/file.hpp:169 TORRENT_EXTRA_EXPORT bool is_root_path(std::string const& f);
// internal used by create_torrent.hpp
- TORRENT_EXPORT std::string parent_path(std::string const& f);
+ TORRENT_EXTRA_EXPORT std::string parent_path(std::string const& f);
TORRENT_EXTRA_EXPORT bool has_parent_path(std::string const& f);
TORRENT_EXTRA_EXPORT char const* filename_cstr(char const* f);
// internal used by create_torrent.hpp
- TORRENT_EXPORT std::string filename(std::string const& f);
+ TORRENT_EXTRA_EXPORT std::string filename(std::string const& f);
TORRENT_EXTRA_EXPORT std::string combine_path(std::string const& lhs
, std::string const& rhs);
// internal used by create_torrent.hpp
- TORRENT_EXPORT std::string complete(std::string const& f);
+ TORRENT_EXTRA_EXPORT std::string complete(std::string const& f);
TORRENT_EXTRA_EXPORT bool is_complete(std::string const& f);
TORRENT_EXTRA_EXPORT std::string current_working_directory();
#if TORRENT_USE_UNC_PATHS
@@ -6551,7 +6476,7 @@ put completes../src/kademlia/dht_tracker.cpp:430relevance 0 | ../include/libtorrent/peer_connection.hpp:206 | make this a raw pointer (to save size in the first cache line) and make the constructor take a raw pointer. torrent objects should always outlive their peers |
|
make this a raw pointer (to save size in
+ | ||
relevance 0 | ../include/libtorrent/peer_connection.hpp:206 | make this a raw pointer (to save size in the first cache line) and make the constructor take a raw pointer. torrent objects should always outlive their peers |
make this a raw pointer (to save size in
the first cache line) and make the constructor
take a raw pointer. torrent objects should always
outlive their peers../include/libtorrent/peer_connection.hpp:206 , m_connecting(!t.expired())
@@ -6605,7 +6530,7 @@ outlive their peers../include/libtorrent/peer_connection.hpp:206 | ||
relevance 0 | ../include/libtorrent/peer_connection.hpp:1059 | factor this out into its own class with a virtual interface torrent and session should implement this interface |
factor this out into its own class with a virtual interface
+ | ||
relevance 0 | ../include/libtorrent/peer_connection.hpp:1059 | factor this out into its own class with a virtual interface torrent and session should implement this interface |
factor this out into its own class with a virtual interface
torrent and session should implement this interface../include/libtorrent/peer_connection.hpp:1059
// the local endpoint for this peer, i.e. our address
// and our port. If this is set for outgoing connections
@@ -6657,7 +6582,7 @@ torrent and session should implement this interface../include/libtorren
// we have got from this peer. If the request
// queue gets empty, and there have been
// invalid requests, we can assume the
- | ||
relevance 0 | ../include/libtorrent/peer_connection_interface.hpp:45 | make this interface smaller! |
make this interface smaller!../include/libtorrent/peer_connection_interface.hpp:45SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ | ||
relevance 0 | ../include/libtorrent/peer_connection_interface.hpp:45 | make this interface smaller! |
make this interface smaller!../include/libtorrent/peer_connection_interface.hpp:45SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
@@ -6708,7 +6633,7 @@ namespace libtorrent
virtual tcp::endpoint const& remote() const = 0;
virtual tcp::endpoint local_endpoint() const = 0;
virtual void disconnect(error_code const& ec, operation_t op, int error = 0) = 0;
- | ||
relevance 0 | ../include/libtorrent/performance_counters.hpp:132 | should keepalives be in here too? how about dont-have, share-mode, upload-only |
should keepalives be in here too?
+ | ||
relevance 0 | ../include/libtorrent/performance_counters.hpp:132 | should keepalives be in here too? how about dont-have, share-mode, upload-only |
should keepalives be in here too?
how about dont-have, share-mode, upload-only../include/libtorrent/performance_counters.hpp:132 // a connect candidate
connection_attempt_loops,
// successful incoming connections (not rejected for any reason)
@@ -6760,9 +6685,9 @@ how about dont-have, share-mode, upload-only../include/libtorrent/perfo
num_outgoing_cancel,
num_outgoing_dht_port,
num_outgoing_suggest,
- | ||
relevance 0 | ../include/libtorrent/performance_counters.hpp:429 | some space could be saved here by making gauges 32 bits |
some space could be saved here by making gauges 32 bits../include/libtorrent/performance_counters.hpp:429 | ||
relevance 0 | ../include/libtorrent/performance_counters.hpp:430 | restore these to regular integers. Instead have one copy of the counters per thread and collect them at convenient synchronization points |
restore these to regular integers. Instead have one copy
+ | ||
relevance 0 | ../include/libtorrent/performance_counters.hpp:439 | some space could be saved here by making gauges 32 bits |
some space could be saved here by making gauges 32 bits../include/libtorrent/performance_counters.hpp:439 | ||
relevance 0 | ../include/libtorrent/performance_counters.hpp:440 | restore these to regular integers. Instead have one copy of the counters per thread and collect them at convenient synchronization points |
restore these to regular integers. Instead have one copy
of the counters per thread and collect them at convenient
-synchronization points../include/libtorrent/performance_counters.hpp:430 limiter_down_bytes,
+synchronization points../include/libtorrent/performance_counters.hpp:440 num_utp_deleted,
num_counters,
num_gauge_counters = num_counters - num_stats_counters
@@ -6795,7 +6720,7 @@ synchronization points../include/libtorrent/performance_counters.hpp:43
#endif
- | ||
relevance 0 | ../include/libtorrent/piece_picker.hpp:668 | should this be allocated lazily? |
should this be allocated lazily?../include/libtorrent/piece_picker.hpp:668 std::vector<downloading_piece>::const_iterator find_dl_piece(int queue, int index) const;
+ | ||
relevance 0 | ../include/libtorrent/piece_picker.hpp:668 | should this be allocated lazily? |
should this be allocated lazily?../include/libtorrent/piece_picker.hpp:668 std::vector<downloading_piece>::const_iterator find_dl_piece(int queue, int index) const;
std::vector<downloading_piece>::iterator find_dl_piece(int queue, int index);
// returns an iterator to the downloading piece, whichever
@@ -6846,7 +6771,7 @@ synchronization points../include/libtorrent/performance_counters.hpp:43
// and some are still in the requested state
// 2: downloading pieces where every block is
// finished or writing
- | ||
relevance 0 | ../include/libtorrent/proxy_base.hpp:171 | it would be nice to remember the bind port and bind once we know where the proxy is m_sock.bind(endpoint, ec); |
it would be nice to remember the bind port and bind once we know where the proxy is
+ | ||
relevance 0 | ../include/libtorrent/proxy_base.hpp:171 | it would be nice to remember the bind port and bind once we know where the proxy is m_sock.bind(endpoint, ec); |
it would be nice to remember the bind port and bind once we know where the proxy is
m_sock.bind(endpoint, ec);../include/libtorrent/proxy_base.hpp:171 void bind(endpoint_type const& /* endpoint */)
{
// m_sock.bind(endpoint);
@@ -6898,10 +6823,10 @@ m_sock.bind(endpoint, ec);../include/libtorrent/proxy_base.hpp:171
m_sock.close(ec);
m_resolver.cancel();
}
- | ||
relevance 0 | ../include/libtorrent/receive_buffer.hpp:260 | Detect when the start of the next crpyto packet is aligned with the start of piece data and the crpyto packet is at least as large as the piece data. With a little extra work we could receive directly into a disk buffer in that case. |
Detect when the start of the next crpyto packet is aligned
+ | ||
relevance 0 | ../include/libtorrent/receive_buffer.hpp:255 | Detect when the start of the next crpyto packet is aligned with the start of piece data and the crpyto packet is at least as large as the piece data. With a little extra work we could receive directly into a disk buffer in that case. |
Detect when the start of the next crpyto packet is aligned
with the start of piece data and the crpyto packet is at least
as large as the piece data. With a little extra work
-we could receive directly into a disk buffer in that case.../include/libtorrent/receive_buffer.hpp:260
+we could receive directly into a disk buffer in that case.../include/libtorrent/receive_buffer.hpp:255
void cut(int size, int packet_size, int offset = 0);
void crypto_cut(int size, int packet_size)
@@ -6938,7 +6863,7 @@ private:
} // namespace libtorrent
#endif // #ifndef TORRENT_RECEIVE_BUFFER_HPP_INCLUDED
- | ||
relevance 0 | ../include/libtorrent/session.hpp:851 | add get_peer_class_type_filter() as well |
add get_peer_class_type_filter() as well../include/libtorrent/session.hpp:851 //
+ | ||
relevance 0 | ../include/libtorrent/session.hpp:856 | add get_peer_class_type_filter() as well |
add get_peer_class_type_filter() as well../include/libtorrent/session.hpp:856 //
// The ``peer_class`` argument cannot be greater than 31. The bitmasks
// representing peer classes in the ``peer_class_filter`` are 32 bits.
//
@@ -6989,10 +6914,10 @@ private:
// destructs.
//
// For more information on peer classes, see peer-classes_.
- | ||
relevance 0 | ../include/libtorrent/settings_pack.hpp:1106 | deprecate this ``max_rejects`` is the number of piece requests we will reject in a row while a peer is choked before the peer is considered abusive and is disconnected. |
deprecate this
+ | ||
relevance 0 | ../include/libtorrent/settings_pack.hpp:1104 | deprecate this ``max_rejects`` is the number of piece requests we will reject in a row while a peer is choked before the peer is considered abusive and is disconnected. |
deprecate this
``max_rejects`` is the number of piece requests we will reject in a
row while a peer is choked before the peer is considered abusive
-and is disconnected.../include/libtorrent/settings_pack.hpp:1106 auto_manage_startup,
+and is disconnected.../include/libtorrent/settings_pack.hpp:1104 auto_manage_startup,
// ``seeding_piece_quota`` is the number of pieces to send to a peer,
// when seeding, before rotating in another peer to the unchoke set.
@@ -7043,7 +6968,7 @@ and is disconnected.../include/libtorrent/settings_pack.hpp:1106 | ||
relevance 0 | ../include/libtorrent/torrent.hpp:1254 | this wastes 5 bits per file |
this wastes 5 bits per file../include/libtorrent/torrent.hpp:1254 typedef std::list<boost::shared_ptr<torrent_plugin> > extension_list_t;
+ | ||
relevance 0 | ../include/libtorrent/torrent.hpp:1254 | this wastes 5 bits per file |
this wastes 5 bits per file../include/libtorrent/torrent.hpp:1254 typedef std::list<boost::shared_ptr<torrent_plugin> > extension_list_t;
extension_list_t m_extensions;
#endif
@@ -7094,7 +7019,7 @@ and is disconnected.../include/libtorrent/settings_pack.hpp:1106 | ||
relevance 0 | ../include/libtorrent/torrent.hpp:1313 | These two bitfields should probably be coalesced into one |
These two bitfields should probably be coalesced into one../include/libtorrent/torrent.hpp:1313 // the .torrent file from m_url
+ | ||
relevance 0 | ../include/libtorrent/torrent.hpp:1313 | These two bitfields should probably be coalesced into one |
These two bitfields should probably be coalesced into one../include/libtorrent/torrent.hpp:1313 // the .torrent file from m_url
// std::vector<char> m_torrent_file_buf;
// this is a list of all pieces that we have announced
@@ -7145,7 +7070,7 @@ and is disconnected.../include/libtorrent/settings_pack.hpp:1106 | ||
relevance 0 | ../include/libtorrent/torrent_info.hpp:123 | include the number of peers received from this tracker, at last announce |
include the number of peers received from this tracker, at last
+ | ||
relevance 0 | ../include/libtorrent/torrent_info.hpp:123 | include the number of peers received from this tracker, at last announce |
include the number of peers received from this tracker, at last
announce../include/libtorrent/torrent_info.hpp:123
// if this tracker failed the last time it was contacted
// this error code specifies what error occurred
@@ -7197,7 +7122,7 @@ announce../include/libtorrent/torrent_info.hpp:123relevance 0 | ../include/libtorrent/torrent_info.hpp:270 | there may be some opportunities to optimize the size if torrent_info. specifically to turn some std::string and std::vector into pointers |
|
there may be some opportunities to optimize the size if torrent_info.
+ | ||
relevance 0 | ../include/libtorrent/torrent_info.hpp:270 | there may be some opportunities to optimize the size if torrent_info. specifically to turn some std::string and std::vector into pointers |
there may be some opportunities to optimize the size if torrent_info.
specifically to turn some std::string and std::vector into pointers../include/libtorrent/torrent_info.hpp:270 // The URL of the web seed
std::string url;
@@ -7249,7 +7174,7 @@ specifically to turn some std::string and std::vector into pointers../i
// error occur, they will simply set the error code to describe what went
// wrong and not fully initialize the torrent_info object. The overloads
// that do not take the extra error_code parameter will always throw if
- | ||
relevance 0 | ../include/libtorrent/upnp.hpp:108 | support using the windows API for UPnP operations as well |
support using the windows API for UPnP operations as well../include/libtorrent/upnp.hpp:108 external_port_must_be_wildcard = 727
+ | ||
relevance 0 | ../include/libtorrent/upnp.hpp:108 | support using the windows API for UPnP operations as well |
support using the windows API for UPnP operations as well../include/libtorrent/upnp.hpp:108 external_port_must_be_wildcard = 727
};
// hidden
@@ -7300,7 +7225,7 @@ public:
// is -1, which means failure. There will not be any error alert notification for
// mappings that fail with a -1 return value.
int add_mapping(protocol_type p, int external_port, int local_port);
- | ||
relevance 0 | ../include/libtorrent/utp_stream.hpp:395 | implement blocking write. Low priority since it's not used (yet) |
implement blocking write. Low priority since it's not used (yet)../include/libtorrent/utp_stream.hpp:395 for (typename Mutable_Buffers::const_iterator i = buffers.begin()
+ | ||
relevance 0 | ../include/libtorrent/utp_stream.hpp:395 | implement blocking write. Low priority since it's not used (yet) |
implement blocking write. Low priority since it's not used (yet)../include/libtorrent/utp_stream.hpp:395 for (typename Mutable_Buffers::const_iterator i = buffers.begin()
, end(buffers.end()); i != end; ++i)
{
using asio::buffer_cast;
@@ -7351,7 +7276,7 @@ public:
if (m_impl == 0)
{
m_io_service.post(boost::bind<void>(handler, asio::error::not_connected, 0));
- | ||
relevance 0 | ../include/libtorrent/kademlia/item.hpp:61 | since this is a public function, it should probably be moved out of this header and into one with other public functions. |
since this is a public function, it should probably be moved
+ | ||
relevance 0 | ../include/libtorrent/kademlia/item.hpp:61 | since this is a public function, it should probably be moved out of this header and into one with other public functions. |
since this is a public function, it should probably be moved
out of this header and into one with other public functions.../include/libtorrent/kademlia/item.hpp:61#include <boost/array.hpp>
namespace libtorrent { namespace dht
@@ -7403,7 +7328,7 @@ public:
item(entry const& v
, std::pair<char const*, int> salt
, boost::uint64_t seq, char const* pk, char const* sk);
- | ||
relevance 0 | ../include/libtorrent/aux_/session_impl.hpp:367 | move the login info into the tracker_request object |
move the login info into the tracker_request object../include/libtorrent/aux_/session_impl.hpp:367
+ | ||
relevance 0 | ../include/libtorrent/aux_/session_impl.hpp:367 | move the login info into the tracker_request object |
move the login info into the tracker_request object../include/libtorrent/aux_/session_impl.hpp:367
void on_lsd_announce(error_code const& e);
// called when a port mapping is successful, or a router returns
@@ -7454,7 +7379,7 @@ public:
#ifndef TORRENT_DISABLE_EXTENSIONS
void add_extensions_to_torrent(
- | ||
relevance 0 | ../include/libtorrent/aux_/session_impl.hpp:830 | should this be renamed m_outgoing_interfaces? |
should this be renamed m_outgoing_interfaces?../include/libtorrent/aux_/session_impl.hpp:830 // listen socket. For each retry the port number
+ | ||
relevance 0 | ../include/libtorrent/aux_/session_impl.hpp:833 | should this be renamed m_outgoing_interfaces? |
should this be renamed m_outgoing_interfaces?../include/libtorrent/aux_/session_impl.hpp:833 // listen socket. For each retry the port number
// is incremented by one
int m_listen_port_retries;
@@ -7493,6 +7418,7 @@ public:
#ifdef TORRENT_USE_OPENSSL
boost::asio::ssl::context* ssl_ctx() { return &m_ssl_ctx; }
+ void on_incoming_utp_ssl(boost::shared_ptr<socket_type> const& s);
void ssl_handshake(error_code const& ec, boost::shared_ptr<socket_type> s);
#endif
@@ -7504,8 +7430,11 @@ public:
// round-robin index into m_net_interfaces
mutable boost::uint8_t m_interface_index;
+ | ||
relevance 0 | ../include/libtorrent/aux_/session_impl.hpp:884 | replace this by a proper asio timer |
replace this by a proper asio timer../include/libtorrent/aux_/session_impl.hpp:884
void open_new_incoming_socks_connection();
- | ||
relevance 0 | ../include/libtorrent/aux_/session_impl.hpp:884 | replace this by a proper asio timer |
replace this by a proper asio timer../include/libtorrent/aux_/session_impl.hpp:884 {
+
+ enum listen_on_flags_t
+ {
open_ssl_socket = 0x10
};
@@ -7516,10 +7445,6 @@ public:
entry m_dht_state;
#endif
- // the number of unchoked peers as set by the auto-unchoker
- // this should always be >= m_max_uploads
- int m_allowed_upload_slots;
-
// this is initialized to the unchoke_interval
// session_setting and decreased every second.
// when it reaches zero, it is reset to the
@@ -7529,17 +7454,17 @@ public:
// this is used to decide when to recalculate which
// torrents to keep queued and which to activate
- | ||
relevance 0 | ../include/libtorrent/aux_/session_impl.hpp:889 | replace this by a proper asio timer |
replace this by a proper asio timer../include/libtorrent/aux_/session_impl.hpp:889 listen_socket_t setup_listener(std::string const& device
+ | ||
relevance 0 | ../include/libtorrent/aux_/session_impl.hpp:889 | replace this by a proper asio timer |
replace this by a proper asio timer../include/libtorrent/aux_/session_impl.hpp:889 {
+ open_ssl_socket = 0x10
+ };
+
+ listen_socket_t setup_listener(std::string const& device
, bool ipv4, int port, int& retries, int flags, error_code& ec);
#ifndef TORRENT_DISABLE_DHT
entry m_dht_state;
#endif
- // the number of unchoked peers as set by the auto-unchoker
- // this should always be >= m_max_uploads
- int m_allowed_upload_slots;
-
// this is initialized to the unchoke_interval
// session_setting and decreased every second.
// when it reaches zero, it is reset to the
@@ -7555,10 +7480,10 @@ public:
// is only decresed when the unchoke set
// is recomputed, and when it reaches zero,
// the optimistic unchoke is moved to another peer.
- | ||
relevance 0 | ../include/libtorrent/aux_/session_impl.hpp:896 | replace this by a proper asio timer |
replace this by a proper asio timer../include/libtorrent/aux_/session_impl.hpp:896
- // the number of unchoked peers as set by the auto-unchoker
- // this should always be >= m_max_uploads
- int m_allowed_upload_slots;
+ | ||
relevance 0 | ../include/libtorrent/aux_/session_impl.hpp:896 | replace this by a proper asio timer |
replace this by a proper asio timer../include/libtorrent/aux_/session_impl.hpp:896
+#ifndef TORRENT_DISABLE_DHT
+ entry m_dht_state;
+#endif
// this is initialized to the unchoke_interval
// session_setting and decreased every second.
@@ -7606,7 +7531,7 @@ public:
int m_suggest_timer;
// statistics gathered from all torrents.
- | ||
relevance 0 | ../include/libtorrent/aux_/session_interface.hpp:199 | it would be nice to not have this be part of session_interface |
it would be nice to not have this be part of session_interface../include/libtorrent/aux_/session_interface.hpp:199 virtual boost::uint16_t listen_port() const = 0;
+ | ||
relevance 0 | ../include/libtorrent/aux_/session_interface.hpp:199 | it would be nice to not have this be part of session_interface |
it would be nice to not have this be part of session_interface../include/libtorrent/aux_/session_interface.hpp:199 virtual boost::uint16_t listen_port() const = 0;
virtual boost::uint16_t ssl_listen_port() const = 0;
// used to (potentially) issue socket write calls onto multiple threads
| ||