relevance 3 | src/torrent.cpp:6073 | if peer is a really good peer, maybe we shouldn't disconnect it |
if peer is a really good peer, maybe we shouldn't disconnect itsrc/torrent.cpp:6073 return false;
}
TORRENT_ASSERT(m_connections.find(p) == m_connections.end());
m_connections.insert(p);
@@ -127,7 +76,7 @@
return m_connections.size() < m_max_connections
&& !is_paused()
&& ((m_state != torrent_status::checking_files
- | ||
relevance 3 | src/utp_stream.cpp:412 | remove the read timeout concept. This should not be necessary |
remove the read timeout concept. This should not be necessarysrc/utp_stream.cpp:412 // these are the callbacks made into the utp_stream object
+ | ||
relevance 3 | src/utp_stream.cpp:412 | remove the read timeout concept. This should not be necessary |
remove the read timeout concept. This should not be necessarysrc/utp_stream.cpp:412 // these are the callbacks made into the utp_stream object
// on read/write/connect events
utp_stream::handler_t m_read_handler;
utp_stream::handler_t m_write_handler;
@@ -149,7 +98,7 @@
// before)
ptime m_read_timeout;
- | ||
relevance 3 | src/utp_stream.cpp:415 | remove the write timeout concept. This should not be necessary |
remove the write timeout concept. This should not be necessarysrc/utp_stream.cpp:415 utp_stream::handler_t m_read_handler;
+ | ||
relevance 3 | src/utp_stream.cpp:415 | remove the write timeout concept. This should not be necessary |
remove the write timeout concept. This should not be necessarysrc/utp_stream.cpp:415 utp_stream::handler_t m_read_handler;
utp_stream::handler_t m_write_handler;
utp_stream::connect_handler_t m_connect_handler;
@@ -200,59 +149,7 @@
timestamp_history m_their_delay_hist;
// the number of bytes we have buffered in m_inbuf
- | ||
relevance 3 | src/utp_stream.cpp:1685 | this alloca() statement won't necessarily produce correctly aligned memory. do something about that |
this alloca() statement won't necessarily produce
-correctly aligned memory. do something about thatsrc/utp_stream.cpp:1685 bool stack_alloced = false;
-#endif
-
- // payload size being zero means we're just sending
- // an force. We should not pick up the nagle packet
- if (!m_nagle_packet || (payload_size == 0 && force))
- {
- // we only need a heap allocation if we have payload and
- // need to keep the packet around (in the outbuf)
- if (payload_size)
- {
- p = (packet*)malloc(sizeof(packet) + m_mtu);
- p->allocated = m_mtu;
- }
- else
- {
-#ifdef TORRENT_DEBUG
- stack_alloced = true;
-#endif
- TORRENT_ASSERT(force);
- p = (packet*)TORRENT_ALLOCA(char, sizeof(packet) + packet_size);
- UTP_LOGV("%8p: allocating %d bytes on the stack\n", this, packet_size);
- p->allocated = packet_size;
- }
-
- p->size = packet_size;
- p->header_size = packet_size - payload_size;
- p->num_transmissions = 0;
- p->need_resend = false;
- ptr = p->buf;
- h = (utp_header*)ptr;
- ptr += sizeof(utp_header);
-
- h->extension = sack ? 1 : 0;
- h->connection_id = m_send_id;
- // seq_nr is ignored for ST_STATE packets, so it doesn't
- // matter that we say this is a sequence number we haven't
- // actually sent yet
- h->seq_nr = m_seq_nr;
- h->type_ver = ((payload_size ? ST_DATA : ST_STATE) << 4) | 1;
-
- write_payload(p->buf + p->header_size, payload_size);
- }
- else
- {
- // pick up the nagle packet and keep adding bytes to it
- p = m_nagle_packet;
-
- ptr = p->buf + sizeof(utp_header);
- h = (utp_header*)p->buf;
- TORRENT_ASSERT(h->seq_nr == m_seq_nr);
- | ||
relevance 3 | src/kademlia/rpc_manager.cpp:36 | remove this dependency by having the dht observer have its own flags |
remove this dependency by having the dht observer
+ | ||
relevance 3 | src/kademlia/rpc_manager.cpp:36 | remove this dependency by having the dht observer have its own flags |
remove this dependency by having the dht observer
have its own flagssrc/kademlia/rpc_manager.cpp:36 contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
@@ -304,7 +201,7 @@ namespace io = libtorrent::detail;
#ifdef TORRENT_DHT_VERBOSE_LOGGING
TORRENT_DEFINE_LOG(rpc)
- | ||
relevance 3 | include/libtorrent/kademlia/dht_tracker.hpp:79 | take a udp_socket_interface here instead. Move udp_socket_interface down into libtorrent core |
take a udp_socket_interface here instead. Move udp_socket_interface down into libtorrent coreinclude/libtorrent/kademlia/dht_tracker.hpp:79 struct lazy_entry;
+ | ||
relevance 3 | include/libtorrent/kademlia/dht_tracker.hpp:79 | take a udp_socket_interface here instead. Move udp_socket_interface down into libtorrent core |
take a udp_socket_interface here instead. Move udp_socket_interface down into libtorrent coreinclude/libtorrent/kademlia/dht_tracker.hpp:79 struct lazy_entry;
}
namespace libtorrent { namespace dht
@@ -355,7 +252,7 @@ namespace libtorrent { namespace dht
void on_name_lookup(error_code const& e
, udp::resolver::iterator host);
- | ||
relevance 3 | include/libtorrent/kademlia/find_data.hpp:60 | rename this class to find_peers, since that's what it does find_data is an unnecessarily generic name |
rename this class to find_peers, since that's what it does
+ | ||
relevance 3 | include/libtorrent/kademlia/find_data.hpp:60 | rename this class to find_peers, since that's what it does find_data is an unnecessarily generic name |
rename this class to find_peers, since that's what it does
find_data is an unnecessarily generic nameinclude/libtorrent/kademlia/find_data.hpp:60#include <libtorrent/kademlia/node_id.hpp>
#include <libtorrent/kademlia/routing_table.hpp>
#include <libtorrent/kademlia/rpc_manager.hpp>
@@ -407,7 +304,59 @@ private:
nodes_callback m_nodes_callback;
std::map<node_id, std::string> m_write_tokens;
node_id const m_target;
- | ||
relevance 2 | src/session_impl.cpp:5113 | if we still can't find the torrent, we should probably look for it by url here |
if we still can't find the torrent, we should probably look for it by url heresrc/session_impl.cpp:5113 }
+ | ||
relevance 2 | src/piece_picker.cpp:1487 | m_downloads size will be > 0 just by having pad-files in the torrent. That should be taken into account here. |
m_downloads size will be > 0 just by having pad-files
+in the torrent. That should be taken into account here.src/piece_picker.cpp:1487 // have an affinity to pick pieces in the same speed
+ // category.
+ // * ignore_whole_pieces
+ // ignores the prefer_whole_pieces parameter (as if
+ // it was 0)
+
+ // only one of rarest_first, sequential can be set
+
+ void piece_picker::pick_pieces(bitfield const& pieces
+ , std::vector<piece_block>& interesting_blocks, int num_blocks
+ , int prefer_whole_pieces, void* peer, piece_state_t speed
+ , int options, std::vector<int> const& suggested_pieces
+ , int num_peers) const
+ {
+ TORRENT_ASSERT(peer == 0 || static_cast<policy::peer*>(peer)->in_use);
+
+ // prevent the number of partial pieces to grow indefinitely
+ // make this scale by the number of peers we have. For large
+ // scale clients, we would have more peers, and allow a higher
+ // threshold for the number of partials
+ if (m_downloads.size() > num_peers * 3 / 2) options |= prioritize_partials;
+
+ if (options & ignore_whole_pieces) prefer_whole_pieces = 0;
+
+ // only one of rarest_first and sequential can be set.
+ TORRENT_ASSERT(((options & rarest_first) ? 1 : 0)
+ + ((options & sequential) ? 1 : 0) <= 1);
+#ifdef TORRENT_EXPENSIVE_INVARIANT_CHECKS
+ TORRENT_PIECE_PICKER_INVARIANT_CHECK;
+#endif
+ TORRENT_ASSERT(num_blocks > 0);
+ TORRENT_ASSERT(pieces.size() == m_piece_map.size());
+
+ TORRENT_ASSERT(!m_priority_boundries.empty()
+ || m_dirty);
+
+ // this will be filled with blocks that we should not request
+ // unless we can't find num_blocks among the other ones.
+ // blocks that belong to pieces with a mismatching speed
+ // category for instance, or if we prefer whole pieces,
+ // blocks belonging to a piece that others have
+ // downloaded to
+ std::vector<piece_block> backup_blocks;
+ std::vector<piece_block> backup_blocks2;
+ const std::vector<int> empty_vector;
+
+ // When prefer_whole_pieces is set (usually set when downloading from
+ // fast peers) the partial pieces will not be prioritized, but actually
+ // ignored as long as possible. All blocks found in downloading
+ // pieces are regarded as backup blocks
+
+ | ||
relevance 2 | src/session_impl.cpp:5113 | if we still can't find the torrent, we should probably look for it by url here |
if we still can't find the torrent, we should probably look for it by url heresrc/session_impl.cpp:5113 }
}
#if defined TORRENT_VERBOSE_LOGGING || defined TORRENT_LOGGING || defined TORRENT_ERROR_LOGGING
else
@@ -458,7 +407,7 @@ private:
torrent_ptr->start();
#ifndef TORRENT_DISABLE_EXTENSIONS
- | ||
relevance 2 | src/torrent.cpp:5789 | pass in ec along with the alert |
pass in ec along with the alertsrc/torrent.cpp:5789
+ | ||
relevance 2 | src/torrent.cpp:5858 | pass in ec along with the alert |
pass in ec along with the alertsrc/torrent.cpp:5858
if (info_hash != m_torrent_file->info_hash())
{
if (alerts().should_post<metadata_failed_alert>())
@@ -509,7 +458,7 @@ private:
{
std::set<peer_connection*>::iterator p = i++;
(*p)->disconnect_if_redundant();
- | ||
relevance 2 | src/utp_stream.cpp:617 | support the option to turn it off |
support the option to turn it offsrc/utp_stream.cpp:617 UTP_STATE_ERROR_WAIT,
+ | ||
relevance 2 | src/utp_stream.cpp:617 | support the option to turn it off |
support the option to turn it offsrc/utp_stream.cpp:617 UTP_STATE_ERROR_WAIT,
// there are no more references to this socket
// and we can delete it
@@ -560,9 +509,9 @@ private:
#if defined TORRENT_VERBOSE_LOGGING || defined TORRENT_LOGGING || defined TORRENT_ERROR_LOGGING
int socket_impl_size() { return sizeof(utp_socket_impl); }
#endif
- | ||
relevance 2 | src/utp_stream.cpp:1848 | we might want to do something else here as well, to resend the packet immediately without it being an MTU probe |
we might want to do something else here
+ | ||
relevance 2 | src/utp_stream.cpp:1850 | we might want to do something else here as well, to resend the packet immediately without it being an MTU probe |
we might want to do something else here
as well, to resend the packet immediately without
-it being an MTU probesrc/utp_stream.cpp:1848 , boost::uint32_t(h->timestamp_difference_microseconds), int(p->mtu_probe)
+it being an MTU probesrc/utp_stream.cpp:1850 , boost::uint32_t(h->timestamp_difference_microseconds), int(p->mtu_probe)
, h->extension);
#endif
@@ -613,7 +562,7 @@ it being an MTU probesrc/utp_stream.cpp:1848relevance 2 | src/kademlia/dht_tracker.cpp:641 | fix this stats logging. For instance, the stats counters could be factored out into its own class, and dht_tracker could take an optional reference to it ++m_replies_sent[e["r"]]; m_replies_bytes_sent[e["r"]] += int(m_send_buf.size()); |
|
fix this stats logging. For instance,
+ | ||
relevance 2 | src/kademlia/dht_tracker.cpp:641 | fix this stats logging. For instance, the stats counters could be factored out into its own class, and dht_tracker could take an optional reference to it ++m_replies_sent[e["r"]]; m_replies_bytes_sent[e["r"]] += int(m_send_buf.size()); |
fix this stats logging. For instance,
the stats counters could be factored out into its own
class, and dht_tracker could take an optional reference to it
++m_replies_sent[e["r"]];
@@ -657,7 +606,7 @@ m_replies_bytes_sent[e["r"]] += int(m_send_buf.size());src/kademlia/dht
}}
- | ||
relevance 2 | src/kademlia/node.cpp:63 | make this configurable in dht_settings |
make this configurable in dht_settingssrc/kademlia/node.cpp:63#include "libtorrent/alert.hpp"
+ | ||
relevance 2 | src/kademlia/node.cpp:63 | make this configurable in dht_settings |
make this configurable in dht_settingssrc/kademlia/node.cpp:63#include "libtorrent/alert.hpp"
#include "libtorrent/socket.hpp"
#include "libtorrent/random.hpp"
#include "libtorrent/aux_/session_impl.hpp"
@@ -708,10 +657,10 @@ node_impl::node_impl(alert_dispatcher* alert_disp
, udp_socket_interface* sock
, dht_settings const& settings, node_id nid, address const& external_address
, dht_observer* observer)
- | ||
relevance 2 | include/libtorrent/torrent.hpp:1038 | this should be a deque, since time critical pieces are expected to be popped in the same order as they are sorted. The expectation is that new items are pushed back and items are popped from the front |
this should be a deque, since time critical
+ | ||
relevance 2 | include/libtorrent/torrent.hpp:1044 | this should be a deque, since time critical pieces are expected to be popped in the same order as they are sorted. The expectation is that new items are pushed back and items are popped from the front |
this should be a deque, since time critical
pieces are expected to be popped in the same order
as they are sorted. The expectation is that new items
-are pushed back and items are popped from the frontinclude/libtorrent/torrent.hpp:1038
+are pushed back and items are popped from the frontinclude/libtorrent/torrent.hpp:1044
struct time_critical_piece
{
// when this piece was first requested
@@ -762,7 +711,7 @@ are pushed back and items are popped from the frontinclude/libtorrent/t
// each bit represents a piece. a set bit means
// the piece has had its hash verified. This
- | ||
relevance 2 | include/libtorrent/torrent_info.hpp:455 | these strings (m_comment, m_created_by, m_ssl_root_cert) could be lazy_entry* to save memory |
these strings (m_comment, m_created_by, m_ssl_root_cert) could be lazy_entry* to save memoryinclude/libtorrent/torrent_info.hpp:455 std::vector<announce_entry> m_urls;
+ | ||
relevance 2 | include/libtorrent/torrent_info.hpp:466 | these strings (m_comment, m_created_by, m_ssl_root_cert) could be lazy_entry* to save memory |
these strings (m_comment, m_created_by, m_ssl_root_cert) could be lazy_entry* to save memoryinclude/libtorrent/torrent_info.hpp:466 std::vector<announce_entry> m_urls;
std::vector<web_seed_entry> m_web_seeds;
nodes_t m_nodes;
@@ -813,46 +762,7 @@ are pushed back and items are popped from the frontinclude/libtorrent/t
// this is used when creating a torrent. If there's
// only one file there are cases where it's impossible
// to know if it should be written as a multifile torrent
- | ||
relevance 2 | include/libtorrent/kademlia/node_entry.hpp:92 | replace with a union of address_v4 and address_v6 to not waste space. This struct is instantiated hundreds of times for the routing table |
replace with a union of address_v4 and address_v6
-to not waste space. This struct is instantiated hundreds of times
-for the routing tableinclude/libtorrent/kademlia/node_entry.hpp:92 , id(0)
- {
-#ifdef TORRENT_DHT_VERBOSE_LOGGING
- first_seen = time_now();
-#endif
- }
-
- bool pinged() const { return timeout_count != 0xffff; }
- void set_pinged() { if (timeout_count == 0xffff) timeout_count = 0; }
- void timed_out() { if (pinged()) ++timeout_count; }
- int fail_count() const { return pinged() ? timeout_count : 0; }
- void reset_fail_count() { if (pinged()) timeout_count = 0; }
- udp::endpoint ep() const { return udp::endpoint(addr, port); }
- bool confirmed() const { return timeout_count == 0; }
- void update_rtt(int new_rtt)
- {
- if (rtt == 0xffff) rtt = new_rtt;
- else rtt = int(rtt) / 3 + int(new_rtt) * 2 / 3;
- }
-
- address addr;
- boost::uint16_t port;
- // the number of times this node has failed to
- // respond in a row
- boost::uint16_t timeout_count;
- // the average RTT of this node
- boost::uint16_t rtt;
- node_id id;
-#ifdef TORRENT_DHT_VERBOSE_LOGGING
- ptime first_seen;
-#endif
-};
-
-} } // namespace libtorrent::dht
-
-#endif
-
- | ||
relevance 1 | src/http_seed_connection.cpp:120 | in chunked encoding mode, this assert won't hold. the chunk headers should be subtracted from the receive_buffer_size |
in chunked encoding mode, this assert won't hold.
+ | ||
relevance 1 | src/http_seed_connection.cpp:120 | in chunked encoding mode, this assert won't hold. the chunk headers should be subtracted from the receive_buffer_size |
in chunked encoding mode, this assert won't hold.
the chunk headers should be subtracted from the receive_buffer_sizesrc/http_seed_connection.cpp:120 boost::optional<piece_block_progress>
http_seed_connection::downloading_piece_progress() const
{
@@ -904,7 +814,7 @@ the chunk headers should be subtracted from the receive_buffer_sizesrc/
std::string request;
request.reserve(400);
- | ||
relevance 1 | src/peer_connection.cpp:2488 | peers should really be corked/uncorked outside of all completed disk operations |
peers should really be corked/uncorked outside of
+ | ||
relevance 1 | src/peer_connection.cpp:2488 | peers should really be corked/uncorked outside of all completed disk operations |
peers should really be corked/uncorked outside of
all completed disk operationssrc/peer_connection.cpp:2488 }
if (is_disconnecting()) return;
@@ -956,7 +866,7 @@ all completed disk operationssrc/peer_connection.cpp:2488 | ||
relevance 1 | src/session_impl.cpp:5471 | report the proper address of the router as the source IP of this understanding of our external address, instead of the empty address |
report the proper address of the router as the source IP of
+ | ||
relevance 1 | src/session_impl.cpp:5471 | report the proper address of the router as the source IP of this understanding of our external address, instead of the empty address |
report the proper address of the router as the source IP of
this understanding of our external address, instead of the empty addresssrc/session_impl.cpp:5471 void session_impl::on_port_mapping(int mapping, address const& ip, int port
, error_code const& ec, int map_transport)
{
@@ -1008,7 +918,7 @@ this understanding of our external address, instead of the empty address | ||
relevance 1 | src/session_impl.cpp:5676 | report errors as alerts |
report errors as alertssrc/session_impl.cpp:5676 }
+ | ||
relevance 1 | src/session_impl.cpp:5676 | report errors as alerts |
report errors as alertssrc/session_impl.cpp:5676 }
void session_impl::add_dht_router(std::pair<std::string, int> const& node)
{
@@ -1059,7 +969,7 @@ this understanding of our external address, instead of the empty address | ||
relevance 1 | src/session_impl.cpp:6138 | we only need to do this if our global IPv4 address has changed since the DHT (currently) only supports IPv4. Since restarting the DHT is kind of expensive, it would be nice to not do it unnecessarily |
we only need to do this if our global IPv4 address has changed
+ | ||
relevance 1 | src/session_impl.cpp:6138 | we only need to do this if our global IPv4 address has changed since the DHT (currently) only supports IPv4. Since restarting the DHT is kind of expensive, it would be nice to not do it unnecessarily |
we only need to do this if our global IPv4 address has changed
since the DHT (currently) only supports IPv4. Since restarting the DHT
is kind of expensive, it would be nice to not do it unnecessarilysrc/session_impl.cpp:6138 void session_impl::set_external_address(address const& ip
, int source_type, address const& source)
@@ -1112,11 +1022,11 @@ is kind of expensive, it would be nice to not do it unnecessarilysrc/se
#ifdef TORRENT_DISK_STATS
TORRENT_ASSERT(m_buffer_allocations >= 0);
- | ||
relevance 1 | src/torrent.cpp:1112 | make this depend on the error and on the filesystem the files are being downloaded to. If the error is no_space_left_on_device and the filesystem doesn't support sparse files, only zero the priorities of the pieces that are at the tails of all files, leaving everything up to the highest written piece in each file |
make this depend on the error and on the filesystem the
+ | ||
relevance 1 | src/torrent.cpp:1113 | make this depend on the error and on the filesystem the files are being downloaded to. If the error is no_space_left_on_device and the filesystem doesn't support sparse files, only zero the priorities of the pieces that are at the tails of all files, leaving everything up to the highest written piece in each file |
make this depend on the error and on the filesystem the
files are being downloaded to. If the error is no_space_left_on_device
and the filesystem doesn't support sparse files, only zero the priorities
of the pieces that are at the tails of all files, leaving everything
-up to the highest written piece in each filesrc/torrent.cpp:1112 if (c) c->disconnect(errors::no_memory);
+up to the highest written piece in each filesrc/torrent.cpp:1113 if (c) c->disconnect(errors::no_memory);
return;
}
@@ -1154,6 +1064,7 @@ up to the highest written piece in each filesrc/torrent.cpp:1112src/torrent.cpp:1112 | ||
relevance 1 | src/torrent.cpp:5292 | save the send_stats state instead of throwing them away it may pose an issue when downgrading though |
save the send_stats state instead of throwing them away
-it may pose an issue when downgrading thoughsrc/torrent.cpp:5292 ? (1 << k) : 0;
+ | ||
relevance 1 | src/torrent.cpp:5361 | save the send_stats state instead of throwing them away it may pose an issue when downgrading though |
save the send_stats state instead of throwing them away
+it may pose an issue when downgrading thoughsrc/torrent.cpp:5361 ? (1 << k) : 0;
bitmask.append(1, v);
TORRENT_ASSERT(bits == 8 || j == num_bitmask_bytes - 1);
}
@@ -1219,8 +1129,8 @@ it may pose an issue when downgrading thoughsrc/torrent.cpp:5292 | ||
relevance 1 | src/torrent.cpp:5930 | ideally, we would disconnect the oldest connection i.e. the one that has waited the longest to connect. |
ideally, we would disconnect the oldest connection
-i.e. the one that has waited the longest to connect.src/torrent.cpp:5930 if (m_ses.is_aborted())
+ | ||
relevance 1 | src/torrent.cpp:5999 | ideally, we would disconnect the oldest connection i.e. the one that has waited the longest to connect. |
ideally, we would disconnect the oldest connection
+i.e. the one that has waited the longest to connect.src/torrent.cpp:5999 if (m_ses.is_aborted())
{
p->disconnect(errors::session_closing);
return false;
@@ -1271,11 +1181,9 @@ i.e. the one that has waited the longest to connect.src/torrent.cpp:593
{
boost::shared_ptr<peer_plugin> pp((*i)->new_connection(p));
if (pp) p->add_extension(pp);
- | ||
relevance 1 | src/torrent.cpp:6179 | should disconnect all peers that have the pieces we have not just seeds. It would be pretty expensive to check all pieces for all peers though |
should disconnect all peers that have the pieces we have
+ | ||
relevance 1 | src/torrent.cpp:6250 | should disconnect all peers that have the pieces we have not just seeds. It would be pretty expensive to check all pieces for all peers though |
should disconnect all peers that have the pieces we have
not just seeds. It would be pretty expensive to check all pieces
-for all peers thoughsrc/torrent.cpp:6179 }
-#if defined TORRENT_DEBUG || TORRENT_RELEASE_ASSERTS
- m_finished_alert_posted = true;
+for all peers thoughsrc/torrent.cpp:6250 m_finished_alert_posted = true;
#endif
set_state(torrent_status::finished);
@@ -1293,22 +1201,25 @@ for all peers thoughsrc/torrent.cpp:6179 std::vector<peer_connection*> seeds;
- for (peer_iterator i = m_connections.begin();
- i != m_connections.end(); ++i)
+ if (settings().close_redundant_connections)
{
- peer_connection* p = *i;
- TORRENT_ASSERT(p->associated_torrent().lock().get() == this);
- if (p->upload_only())
+ std::vector<peer_connection*> seeds;
+ for (peer_iterator i = m_connections.begin();
+ i != m_connections.end(); ++i)
{
+ peer_connection* p = *i;
+ TORRENT_ASSERT(p->associated_torrent().lock().get() == this);
+ if (p->upload_only())
+ {
#if defined TORRENT_VERBOSE_LOGGING || defined TORRENT_ERROR_LOGGING
- p->peer_log("*** SEED, CLOSING CONNECTION");
+ p->peer_log("*** SEED, CLOSING CONNECTION");
#endif
- seeds.push_back(p);
+ seeds.push_back(p);
+ }
}
+ std::for_each(seeds.begin(), seeds.end()
+ , boost::bind(&peer_connection::disconnect, _1, errors::torrent_finished, 0));
}
- std::for_each(seeds.begin(), seeds.end()
- , boost::bind(&peer_connection::disconnect, _1, errors::torrent_finished, 0));
if (m_abort) return;
@@ -1323,8 +1234,7 @@ for all peers thoughsrc/torrent.cpp:6179relevance 1 | src/torrent_info.cpp:187 | we might save constructing a std::String if this would take a char const* instead |
|
we might save constructing a std::String if this would take a char const* insteadsrc/torrent_info.cpp:187 {
+ | ||
relevance 1 | src/torrent_info.cpp:187 | we might save constructing a std::String if this would take a char const* instead |
we might save constructing a std::String if this would take a char const* insteadsrc/torrent_info.cpp:187 {
tmp_path += i[0];
tmp_path += i[1];
tmp_path += i[2];
@@ -1375,9 +1285,9 @@ for all peers thoughsrc/torrent.cpp:6179relevance 1 | src/torrent_info.cpp:366 | this logic should be a separate step done once the torrent is loaded, and the original filenames should be preserved! |
|
this logic should be a separate step
+ | ||
relevance 1 | src/torrent_info.cpp:367 | this logic should be a separate step done once the torrent is loaded, and the original filenames should be preserved! |
this logic should be a separate step
done once the torrent is loaded, and the original
-filenames should be preserved!src/torrent_info.cpp:366 }
+filenames should be preserved!src/torrent_info.cpp:367 }
return false;
}
};
@@ -1415,8 +1325,8 @@ filenames should be preserved!src/torrent_info.cpp:366relevance 1 | src/torrent_info.cpp:387 | once the filename renaming is removed from here this check can be removed as well |
|
once the filename renaming is removed from here
-this check can be removed as wellsrc/torrent_info.cpp:387 return false;
+ | ||
relevance 1 | src/torrent_info.cpp:388 | once the filename renaming is removed from here this check can be removed as well |
once the filename renaming is removed from here
+this check can be removed as wellsrc/torrent_info.cpp:388 return false;
int cnt = 0;
std::set<std::string, string_less_no_case> files;
@@ -1467,7 +1377,7 @@ this check can be removed as wellsrc/torrent_info.cpp:387 | ||
relevance 1 | src/kademlia/node.cpp:690 | find_node should write directly to the response entry |
find_node should write directly to the response entrysrc/kademlia/node.cpp:690 {
+ | ||
relevance 1 | src/kademlia/node.cpp:690 | find_node should write directly to the response entry |
find_node should write directly to the response entrysrc/kademlia/node.cpp:690 {
TORRENT_LOG(node) << " values: " << reply["values"].list().size();
}
#endif
@@ -1518,7 +1428,7 @@ this check can be removed as wellsrc/torrent_info.cpp:387 | ||
relevance 1 | include/libtorrent/ip_voter.hpp:100 | instead, have one instance per possible subnet, global IPv4, global IPv6, loopback, 192.168.x.x, 10.x.x.x, etc. |
instead, have one instance per possible subnet, global IPv4, global IPv6, loopback, 192.168.x.x, 10.x.x.x, etc.include/libtorrent/ip_voter.hpp:100 bloom_filter<32> m_external_address_voters;
+ | ||
relevance 1 | include/libtorrent/ip_voter.hpp:100 | instead, have one instance per possible subnet, global IPv4, global IPv6, loopback, 192.168.x.x, 10.x.x.x, etc. |
instead, have one instance per possible subnet, global IPv4, global IPv6, loopback, 192.168.x.x, 10.x.x.x, etc.include/libtorrent/ip_voter.hpp:100 bloom_filter<32> m_external_address_voters;
std::vector<external_ip_t> m_external_addresses;
address m_external_address;
};
@@ -1545,7 +1455,7 @@ this check can be removed as wellsrc/torrent_info.cpp:387 | ||
relevance 1 | include/libtorrent/utp_stream.hpp:350 | implement blocking write. Low priority since it's not used (yet) |
implement blocking write. Low priority since it's not used (yet)include/libtorrent/utp_stream.hpp:350 for (typename Mutable_Buffers::const_iterator i = buffers.begin()
+ | ||
relevance 1 | include/libtorrent/utp_stream.hpp:350 | implement blocking write. Low priority since it's not used (yet) |
implement blocking write. Low priority since it's not used (yet)include/libtorrent/utp_stream.hpp:350 for (typename Mutable_Buffers::const_iterator i = buffers.begin()
, end(buffers.end()); i != end; ++i)
{
using asio::buffer_cast;
@@ -1596,7 +1506,7 @@ this check can be removed as wellsrc/torrent_info.cpp:387 | ||
relevance 1 | include/libtorrent/web_peer_connection.hpp:127 | if we make this be a disk_buffer_holder instead we would save a copy sometimes use allocate_disk_receive_buffer and release_disk_receive_buffer |
if we make this be a disk_buffer_holder instead
+ | ||
relevance 1 | include/libtorrent/web_peer_connection.hpp:127 | if we make this be a disk_buffer_holder instead we would save a copy sometimes use allocate_disk_receive_buffer and release_disk_receive_buffer |
if we make this be a disk_buffer_holder instead
we would save a copy sometimes
use allocate_disk_receive_buffer and release_disk_receive_bufferinclude/libtorrent/web_peer_connection.hpp:127
private:
@@ -1647,7 +1557,7 @@ use allocate_disk_receive_buffer and release_disk_receive_bufferinclude
#endif // TORRENT_WEB_PEER_CONNECTION_HPP_INCLUDED
- | ||
relevance 0 | src/bt_peer_connection.cpp:660 | this could be optimized using knuth morris pratt |
this could be optimized using knuth morris prattsrc/bt_peer_connection.cpp:660 if (m_encrypted && m_rc4_encrypted)
+ | ||
relevance 0 | src/bt_peer_connection.cpp:660 | this could be optimized using knuth morris pratt |
this could be optimized using knuth morris prattsrc/bt_peer_connection.cpp:660 if (m_encrypted && m_rc4_encrypted)
{
fun = encrypt;
userdata = m_enc_handler.get();
@@ -1698,7 +1608,7 @@ use allocate_disk_receive_buffer and release_disk_receive_bufferinclude
// }
// no complete sync
- | ||
relevance 0 | src/bt_peer_connection.cpp:1755 | don't trust this blindly |
don't trust this blindlysrc/bt_peer_connection.cpp:1755 // but where do we put that info?
+ | ||
relevance 0 | src/bt_peer_connection.cpp:1755 | don't trust this blindly |
don't trust this blindlysrc/bt_peer_connection.cpp:1755 // but where do we put that info?
int last_seen_complete = boost::uint8_t(root.dict_find_int_value("complete_ago", -1));
if (last_seen_complete >= 0) set_last_seen_complete(last_seen_complete);
@@ -1749,7 +1659,7 @@ use allocate_disk_receive_buffer and release_disk_receive_bufferinclude
disconnect(errors::upload_upload_connection);
}
- | ||
relevance 0 | src/bt_peer_connection.cpp:2069 | if we're finished, send upload_only message |
if we're finished, send upload_only messagesrc/bt_peer_connection.cpp:2069 if (msg[5 + k / 8] & (0x80 >> (k % 8))) bitfield_string[k] = '1';
+ | ||
relevance 0 | src/bt_peer_connection.cpp:2074 | if we're finished, send upload_only message |
if we're finished, send upload_only messagesrc/bt_peer_connection.cpp:2074 if (msg[5 + k / 8] & (0x80 >> (k % 8))) bitfield_string[k] = '1';
else bitfield_string[k] = '0';
}
peer_log("==> BITFIELD [ %s ]", bitfield_string.c_str());
@@ -1800,8 +1710,8 @@ use allocate_disk_receive_buffer and release_disk_receive_bufferinclude
detail::write_address(remote().address(), out);
handshake["yourip"] = remote_address;
handshake["reqq"] = m_ses.settings().max_allowed_in_request_queue;
- | ||
relevance 0 | src/bt_peer_connection.cpp:3308 | move the erasing into the loop above remove all payload ranges that has been sent |
move the erasing into the loop above
-remove all payload ranges that has been sentsrc/bt_peer_connection.cpp:3308 for (std::vector<range>::iterator i = m_payloads.begin();
+ | ||
relevance 0 | src/bt_peer_connection.cpp:3313 | move the erasing into the loop above remove all payload ranges that has been sent |
move the erasing into the loop above
+remove all payload ranges that has been sentsrc/bt_peer_connection.cpp:3313 for (std::vector<range>::iterator i = m_payloads.begin();
i != m_payloads.end(); ++i)
{
i->start -= bytes_transferred;
@@ -1852,7 +1762,7 @@ remove all payload ranges that has been sentsrc/bt_peer_connection.cpp:
TORRENT_ASSERT(m_sent_handshake);
}
- | ||
relevance 0 | src/file.cpp:1205 | is there any way to pre-fetch data from a file on windows? |
is there any way to pre-fetch data from a file on windows?src/file.cpp:1205
+ | ||
relevance 0 | src/file.cpp:1205 | is there any way to pre-fetch data from a file on windows? |
is there any way to pre-fetch data from a file on windows?src/file.cpp:1205
void file::init_file()
{
if (m_page_size != 0) return;
@@ -1903,7 +1813,7 @@ remove all payload ranges that has been sentsrc/bt_peer_connection.cpp:
#ifdef TORRENT_DEBUG
if (m_open_mode & no_buffer)
{
- | ||
relevance 0 | src/http_tracker_connection.cpp:99 | support authentication (i.e. user name and password) in the URL |
support authentication (i.e. user name and password) in the URLsrc/http_tracker_connection.cpp:99 , aux::session_impl const& ses
+ | ||
relevance 0 | src/http_tracker_connection.cpp:99 | support authentication (i.e. user name and password) in the URL |
support authentication (i.e. user name and password) in the URLsrc/http_tracker_connection.cpp:99 , aux::session_impl const& ses
, proxy_settings const& ps
, std::string const& auth
#if TORRENT_USE_I2P
@@ -1954,7 +1864,7 @@ remove all payload ranges that has been sentsrc/bt_peer_connection.cpp:
size_t arguments_start = url.find('?');
if (arguments_start != std::string::npos)
url += "&";
- | ||
relevance 0 | src/i2p_stream.cpp:172 | move this to proxy_base and use it in all proxies |
move this to proxy_base and use it in all proxiessrc/i2p_stream.cpp:172 {
+ | ||
relevance 0 | src/i2p_stream.cpp:172 | move this to proxy_base and use it in all proxies |
move this to proxy_base and use it in all proxiessrc/i2p_stream.cpp:172 {
m_state = sam_idle;
std::string name = m_sam_socket->name_lookup();
@@ -2005,7 +1915,7 @@ remove all payload ranges that has been sentsrc/bt_peer_connection.cpp:
// send hello command
m_state = read_hello_response;
- | ||
relevance 0 | src/packet_buffer.cpp:176 | use compare_less_wrap for this comparison as well |
use compare_less_wrap for this comparison as wellsrc/packet_buffer.cpp:176 while (new_size < size)
+ | ||
relevance 0 | src/packet_buffer.cpp:176 | use compare_less_wrap for this comparison as well |
use compare_less_wrap for this comparison as wellsrc/packet_buffer.cpp:176 while (new_size < size)
new_size <<= 1;
void** new_storage = (void**)malloc(sizeof(void*) * new_size);
@@ -2056,7 +1966,7 @@ remove all payload ranges that has been sentsrc/bt_peer_connection.cpp:
if (m_storage[m_last & mask]) break;
++m_last;
m_last &= 0xffff;
- | ||
relevance 0 | src/peer_connection.cpp:2651 | this might need something more so that once we have the metadata we can construct a full bitfield |
this might need something more
+ | ||
relevance 0 | src/peer_connection.cpp:2651 | this might need something more so that once we have the metadata we can construct a full bitfield |
this might need something more
so that once we have the metadata
we can construct a full bitfieldsrc/peer_connection.cpp:2651
#ifdef TORRENT_VERBOSE_LOGGING
@@ -2109,7 +2019,7 @@ we can construct a full bitfieldsrc/peer_connection.cpp:2651 | ||
relevance 0 | src/peer_connection.cpp:2782 | sort the allowed fast set in priority order |
sort the allowed fast set in priority ordersrc/peer_connection.cpp:2782 // this piece index later
+ | ||
relevance 0 | src/peer_connection.cpp:2782 | sort the allowed fast set in priority order |
sort the allowed fast set in priority ordersrc/peer_connection.cpp:2782 // this piece index later
m_allowed_fast.push_back(index);
// if the peer has the piece and we want
@@ -2160,7 +2070,7 @@ we can construct a full bitfieldsrc/peer_connection.cpp:2651 | ||
relevance 0 | src/peer_connection.cpp:3892 | we should probably just send a HAVE_ALL here |
we should probably just send a HAVE_ALL heresrc/peer_connection.cpp:3892 std::fill(m_recv_buffer.begin() + m_recv_pos, m_recv_buffer.end(), 0);
+ | ||
relevance 0 | src/peer_connection.cpp:3892 | we should probably just send a HAVE_ALL here |
we should probably just send a HAVE_ALL heresrc/peer_connection.cpp:3892 std::fill(m_recv_buffer.begin() + m_recv_pos, m_recv_buffer.end(), 0);
#endif
m_packet_size = packet_size;
@@ -2211,7 +2121,7 @@ we can construct a full bitfieldsrc/peer_connection.cpp:2651 | ||
relevance 0 | src/peer_connection.cpp:4475 | peers should really be corked/uncorked outside of all completed disk operations |
peers should really be corked/uncorked outside of
+ | ||
relevance 0 | src/peer_connection.cpp:4475 | peers should really be corked/uncorked outside of all completed disk operations |
peers should really be corked/uncorked outside of
all completed disk operationssrc/peer_connection.cpp:4475 // this means we're in seed mode and we haven't yet
// verified this piece (r.piece)
t->filesystem().async_read_and_hash(r, boost::bind(&peer_connection::on_disk_read_complete
@@ -2263,7 +2173,7 @@ all completed disk operationssrc/peer_connection.cpp:4475 | ||
relevance 0 | src/policy.cpp:857 | only allow _one_ connection to use this override at a time |
only allow _one_ connection to use this
+ | ||
relevance 0 | src/policy.cpp:857 | only allow _one_ connection to use this override at a time |
only allow _one_ connection to use this
override at a timesrc/policy.cpp:857 " external: " << external.external_address(m_peers[candidate]->address()) <<
" t: " << (session_time - m_peers[candidate]->last_connected) <<
" ]\n";
@@ -2315,7 +2225,7 @@ override at a timesrc/policy.cpp:857relevance 0 | src/policy.cpp:1889 | how do we deal with our external address changing? Pass in a force-update maybe? and keep a version number in policy |
|
how do we deal with our external address changing? Pass in a force-update maybe? and keep a version number in policysrc/policy.cpp:1889#endif
+ | ||
relevance 0 | src/policy.cpp:1889 | how do we deal with our external address changing? Pass in a force-update maybe? and keep a version number in policy |
how do we deal with our external address changing? Pass in a force-update maybe? and keep a version number in policysrc/policy.cpp:1889#endif
, on_parole(false)
, banned(false)
#ifndef TORRENT_DISABLE_DHT
@@ -2366,7 +2276,7 @@ override at a timesrc/policy.cpp:857relevance 0 | src/session_impl.cpp:1887 | recalculate all connect candidates for all torrents |
|
recalculate all connect candidates for all torrentssrc/session_impl.cpp:1887 m_upload_rate.close();
+ | ||
relevance 0 | src/session_impl.cpp:1887 | recalculate all connect candidates for all torrents |
recalculate all connect candidates for all torrentssrc/session_impl.cpp:1887 m_upload_rate.close();
// #error closing the udp socket here means that
// the uTP connections cannot be closed gracefully
@@ -2417,7 +2327,7 @@ override at a timesrc/policy.cpp:857relevance 0 | src/session_impl.cpp:4267 | allow extensions to sort torrents for queuing |
|
allow extensions to sort torrents for queuingsrc/session_impl.cpp:4267 else if (!t->is_paused())
+ | ||
relevance 0 | src/session_impl.cpp:4267 | allow extensions to sort torrents for queuing |
allow extensions to sort torrents for queuingsrc/session_impl.cpp:4267 else if (!t->is_paused())
{
TORRENT_ASSERT(t->m_resume_data_loaded || !t->valid_metadata());
--hard_limit;
@@ -2468,7 +2378,7 @@ override at a timesrc/policy.cpp:857relevance 0 | src/session_impl.cpp:4423 | use a lower limit than m_settings.connections_limit to allocate the to 10% or so of connection slots for incoming connections |
|
use a lower limit than m_settings.connections_limit
+ | ||
relevance 0 | src/session_impl.cpp:4423 | use a lower limit than m_settings.connections_limit to allocate the to 10% or so of connection slots for incoming connections |
use a lower limit than m_settings.connections_limit
to allocate the to 10% or so of connection slots for incoming
connectionssrc/session_impl.cpp:4423 {
if (m_boost_connections > max_connections)
@@ -2521,7 +2431,7 @@ connectionssrc/session_impl.cpp:4423relevance 0 | src/session_impl.cpp:4457 | make this bias configurable |
|
make this bias configurablesrc/session_impl.cpp:4457 | ||
relevance 0 | src/session_impl.cpp:4458 | also take average_peers into account, to create a bias for downloading torrents with < average peers |
also take average_peers into account, to create a bias for downloading torrents with < average peerssrc/session_impl.cpp:4458 average_peers = num_downloads_peers / num_downloads;
+ | ||
relevance 0 | src/session_impl.cpp:4457 | make this bias configurable |
make this bias configurablesrc/session_impl.cpp:4457 | ||
relevance 0 | src/session_impl.cpp:4458 | also take average_peers into account, to create a bias for downloading torrents with < average peers |
also take average_peers into account, to create a bias for downloading torrents with < average peerssrc/session_impl.cpp:4458 average_peers = num_downloads_peers / num_downloads;
if (m_next_connect_torrent == m_torrents.end())
m_next_connect_torrent = m_torrents.begin();
@@ -2572,7 +2482,7 @@ connectionssrc/session_impl.cpp:4423relevance 0 | src/session_impl.cpp:4602 | make configurable |
|
make configurablesrc/session_impl.cpp:4602
+ | ||
relevance 0 | src/session_impl.cpp:4602 | make configurable |
make configurablesrc/session_impl.cpp:4602
#ifdef TORRENT_DEBUG
for (std::vector<peer_connection*>::const_iterator i = peers.begin()
, end(peers.end()), prev(peers.end()); i != end; ++i)
@@ -2605,7 +2515,7 @@ connectionssrc/session_impl.cpp:4423relevance 0 | src/session_impl.cpp:4616 | make configurable |
|
make configurablesrc/session_impl.cpp:4616 >= (*i)->uploaded_since_unchoke() * 1000
+ | ||
relevance 0 | src/session_impl.cpp:4616 | make configurable |
make configurablesrc/session_impl.cpp:4616 >= (*i)->uploaded_since_unchoke() * 1000
* (1 + t2->priority()) / total_milliseconds(unchoke_interval));
}
prev = i;
@@ -2656,7 +2566,7 @@ connectionssrc/session_impl.cpp:4423 | ||
relevance 0 | src/storage.cpp:325 | if the read fails, set error and exit immediately |
if the read fails, set error and exit immediatelysrc/storage.cpp:325 int block_size = 16 * 1024;
+ | ||
relevance 0 | src/storage.cpp:325 | if the read fails, set error and exit immediately |
if the read fails, set error and exit immediatelysrc/storage.cpp:325 int block_size = 16 * 1024;
if (m_storage->disk_pool()) block_size = m_storage->disk_pool()->block_size();
int size = slot_size;
int num_blocks = (size + block_size - 1) / block_size;
@@ -2707,7 +2617,7 @@ connectionssrc/session_impl.cpp:4423relevance 0 | src/storage.cpp:358 | if the read fails, set error and exit immediately |
|
if the read fails, set error and exit immediatelysrc/storage.cpp:358 else
+ | ||
relevance 0 | src/storage.cpp:358 | if the read fails, set error and exit immediately |
if the read fails, set error and exit immediatelysrc/storage.cpp:358 else
{
ph.h.update((char const*)bufs[i].iov_base, bufs[i].iov_len);
small_piece_size -= bufs[i].iov_len;
@@ -2758,7 +2668,7 @@ connectionssrc/session_impl.cpp:4423relevance 0 | src/storage.cpp:623 | make this more generic to not just work if files have been renamed, but also if they have been merged into a single file for instance maybe use the same format as .torrent files and reuse some code from torrent_info |
|
make this more generic to not just work if files have been
+ | ||
relevance 0 | src/storage.cpp:623 | make this more generic to not just work if files have been renamed, but also if they have been merged into a single file for instance maybe use the same format as .torrent files and reuse some code from torrent_info |
make this more generic to not just work if files have been
renamed, but also if they have been merged into a single file for instance
maybe use the same format as .torrent files and reuse some code from torrent_infosrc/storage.cpp:623 for (file_iter = files().begin();;)
{
@@ -2811,7 +2721,7 @@ maybe use the same format as .torrent files and reuse some code from torrent_inf
for (int i = 0; i < file_sizes_ent->list_size(); ++i)
{
- | ||
relevance 0 | src/storage.cpp:1192 | what if file_base is used to merge several virtual files into a single physical file? We should probably disable this if file_base is used. This is not a widely used feature though |
what if file_base is used to merge several virtual files
+ | ||
relevance 0 | src/storage.cpp:1192 | what if file_base is used to merge several virtual files into a single physical file? We should probably disable this if file_base is used. This is not a widely used feature though |
what if file_base is used to merge several virtual files
into a single physical file? We should probably disable this
if file_base is used. This is not a widely used feature thoughsrc/storage.cpp:1192 int bytes_transferred = 0;
// if the file is opened in no_buffer mode, and the
@@ -2864,12 +2774,12 @@ if file_base is used. This is not a widely used feature thoughsrc/stora
// makes unaligned requests (and the disk cache is disabled or fully utilized
// for write cache).
- | ||
relevance 0 | src/torrent.cpp:5025 | make this more generic to not just work if files have been renamed, but also if they have been merged into a single file for instance maybe use the same format as .torrent files and reuse some code from torrent_info The mapped_files needs to be read both in the network thread and in the disk thread, since they both have their own mapped files structures which are kept in sync |
make this more generic to not just work if files have been
+ | ||
relevance 0 | src/torrent.cpp:5094 | make this more generic to not just work if files have been renamed, but also if they have been merged into a single file for instance maybe use the same format as .torrent files and reuse some code from torrent_info The mapped_files needs to be read both in the network thread and in the disk thread, since they both have their own mapped files structures which are kept in sync |
make this more generic to not just work if files have been
renamed, but also if they have been merged into a single file for instance
maybe use the same format as .torrent files and reuse some code from torrent_info
The mapped_files needs to be read both in the network thread
and in the disk thread, since they both have their own mapped files structures
-which are kept in syncsrc/torrent.cpp:5025 if (m_seed_mode) m_verified.resize(m_torrent_file->num_pieces(), false);
+which are kept in syncsrc/torrent.cpp:5094 if (m_seed_mode) m_verified.resize(m_torrent_file->num_pieces(), false);
super_seeding(rd.dict_find_int_value("super_seeding", 0));
m_last_scrape = rd.dict_find_int_value("last_scrape", 0);
@@ -2920,12 +2830,12 @@ which are kept in syncsrc/torrent.cpp:5025relevance 0 | src/torrent.cpp:5161 | if this is a merkle torrent and we can't restore the tree, we need to wipe all the bits in the have array, but not necessarily we might want to do a full check to see if we have all the pieces. This is low priority since almost no one uses merkle torrents |
|
if this is a merkle torrent and we can't
+ | ||
relevance 0 | src/torrent.cpp:5230 | if this is a merkle torrent and we can't restore the tree, we need to wipe all the bits in the have array, but not necessarily we might want to do a full check to see if we have all the pieces. This is low priority since almost no one uses merkle torrents |
if this is a merkle torrent and we can't
restore the tree, we need to wipe all the
bits in the have array, but not necessarily
we might want to do a full check to see if we have
all the pieces. This is low priority since almost
-no one uses merkle torrentssrc/torrent.cpp:5161 add_web_seed(url, web_seed_entry::http_seed);
+no one uses merkle torrentssrc/torrent.cpp:5230 add_web_seed(url, web_seed_entry::http_seed);
}
}
@@ -2973,12 +2883,12 @@ no one uses merkle torrentssrc/torrent.cpp:5161 | ||
relevance 0 | src/torrent.cpp:5349 | make this more generic to not just work if files have been renamed, but also if they have been merged into a single file for instance. using file_base |
make this more generic to not just work if files have been
+ ret["num_downloaded"] = m_downloaded;
+ | ||
relevance 0 | src/torrent.cpp:5418 | make this more generic to not just work if files have been renamed, but also if they have been merged into a single file for instance. using file_base |
make this more generic to not just work if files have been
renamed, but also if they have been merged into a single file for instance.
-using file_basesrc/torrent.cpp:5349 entry::string_type& pieces = ret["pieces"].string();
+using file_basesrc/torrent.cpp:5418 entry::string_type& pieces = ret["pieces"].string();
pieces.resize(m_torrent_file->num_pieces());
if (is_seed())
{
@@ -3029,10 +2939,10 @@ using file_basesrc/torrent.cpp:5349relevance 0 | src/torrent.cpp:7823 | go through the pieces we have and count the total number of downloaders we have. Only count peers that are interested in us since some peers might not send have messages for pieces we have it num_interested == 0, we need to pick a new piece |
|
go through the pieces we have and count the total number
+ | ||
relevance 0 | src/torrent.cpp:7901 | go through the pieces we have and count the total number of downloaders we have. Only count peers that are interested in us since some peers might not send have messages for pieces we have it num_interested == 0, we need to pick a new piece |
go through the pieces we have and count the total number
of downloaders we have. Only count peers that are interested in us
since some peers might not send have messages for pieces we have
-it num_interested == 0, we need to pick a new piecesrc/torrent.cpp:7823 }
+it num_interested == 0, we need to pick a new piecesrc/torrent.cpp:7901 }
rarest_pieces.clear();
rarest_rarity = pp.peer_count;
@@ -3083,10 +2993,10 @@ it num_interested == 0, we need to pick a new piecesrc/torrent.cpp:7823
{
// we don't keep track of availability, do it the expensive way
// do a linear search from the first piece
- | ||
relevance 0 | src/torrent.cpp:8050 | if there's been long enough since we requested something from this piece, request one of the backup blocks (the one with the least number of requests to it) and update the last request timestamp |
if there's been long enough since we requested something
+ | ||
relevance 0 | src/torrent.cpp:8128 | if there's been long enough since we requested something from this piece, request one of the backup blocks (the one with the least number of requests to it) and update the last request timestamp |
if there's been long enough since we requested something
from this piece, request one of the backup blocks (the one with
the least number of requests to it) and update the last request
-timestampsrc/torrent.cpp:8050 std::vector<pending_block> const& rq = c.request_queue();
+timestampsrc/torrent.cpp:8128 std::vector<pending_block> const& rq = c.request_queue();
bool added_request = false;
@@ -3137,7 +3047,7 @@ timestampsrc/torrent.cpp:8050relevance 0 | src/udp_tracker_connection.cpp:548 | it would be more efficient to not use a string here. however, the problem is that some trackers will respond with actual strings. For example i2p trackers |
|
it would be more efficient to not use a string here.
+ | ||
relevance 0 | src/udp_tracker_connection.cpp:548 | it would be more efficient to not use a string here. however, the problem is that some trackers will respond with actual strings. For example i2p trackers |
it would be more efficient to not use a string here.
however, the problem is that some trackers will respond
with actual strings. For example i2p trackerssrc/udp_tracker_connection.cpp:548 }
@@ -3180,7 +3090,7 @@ with actual strings. For example i2p trackerssrc/udp_tracker_connection
}
cb->tracker_response(tracker_req(), m_target.address(), ip_list
- , peer_list, interval, min_interval, complete, incomplete, address(), "" /*trackerid*/);
+ , peer_list, interval, min_interval, complete, incomplete, 0, address(), "" /*trackerid*/);
close();
return true;
@@ -3190,7 +3100,7 @@ with actual strings. For example i2p trackerssrc/udp_tracker_connection
{
restart_read_timeout();
int action = detail::read_int32(buf);
- | ||
relevance 0 | include/libtorrent/config.hpp:283 | Make this count Unicode characters instead of bytes on windows |
Make this count Unicode characters instead of bytes on windowsinclude/libtorrent/config.hpp:283
+ | ||
relevance 0 | include/libtorrent/config.hpp:283 | Make this count Unicode characters instead of bytes on windows |
Make this count Unicode characters instead of bytes on windowsinclude/libtorrent/config.hpp:283
// ==== eCS(OS/2) ===
#elif defined __OS2__
#define TORRENT_OS2
@@ -3241,7 +3151,7 @@ with actual strings. For example i2p trackerssrc/udp_tracker_connection
#include <stdarg.h>
inline int snprintf(char* buf, int len, char const* fmt, ...)
- | ||
relevance 0 | include/libtorrent/proxy_base.hpp:152 | it would be nice to remember the bind port and bind once we know where the proxy is m_sock.bind(endpoint, ec); |
it would be nice to remember the bind port and bind once we know where the proxy is
+ | ||
relevance 0 | include/libtorrent/proxy_base.hpp:152 | it would be nice to remember the bind port and bind once we know where the proxy is m_sock.bind(endpoint, ec); |
it would be nice to remember the bind port and bind once we know where the proxy is
m_sock.bind(endpoint, ec);include/libtorrent/proxy_base.hpp:152 {
return m_sock.set_option(opt, ec);
}
@@ -3293,4 +3203,55 @@ m_sock.bind(endpoint, ec);include/libtorrent/proxy_base.hpp:152 | ||
relevance 0 | include/libtorrent/torrent_info.hpp:108 | include the number of peers received from this tracker, at last announce |
include the number of peers received from this tracker, at last announceinclude/libtorrent/torrent_info.hpp:108 std::string url;
+ std::string trackerid;
+
+ // if this tracker has returned an error or warning message
+ // that message is stored here
+ std::string message;
+
+ // if this tracker failed the last time it was contacted
+ // this error code specifies what error occurred
+ error_code last_error;
+
+ int next_announce_in() const;
+ int min_announce_in() const;
+
+ // the time of next tracker announce
+ ptime next_announce;
+
+ // no announces before this time
+ ptime min_announce;
+
+
+ // if this tracker has returned scrape data, these fields are filled
+ // in with valid numbers. Otherwise they are set to -1.
+ // the number of current downloaders
+ int scrape_incomplete;
+ // the number of current seeds
+ int scrape_complete;
+ // the cumulative number of completed downloads, ever
+ int scrape_downloaded;
+
+ // the tier this tracker belongs to
+ boost::uint8_t tier;
+
+ // the number of times this tracker can fail
+ // in a row before it's removed. 0 means unlimited
+ boost::uint8_t fail_limit;
+
+ // the number of times in a row this tracker has failed
+ boost::uint8_t fails:7;
+
+ // true if we're currently trying to announce with
+ // this tracker
+ bool updating:1;
+
+ enum tracker_source
+ {
+ source_torrent = 1,
+ source_client = 2,
+ source_magnet_link = 4,
+ source_tex = 8
+ };
| ||