relevance 3 | ../src/file.cpp:481 | find out what error code is reported when the filesystem does not support hard links. |
find out what error code is reported when the filesystem
+does not support hard links.../src/file.cpp:481 {
+#ifdef TORRENT_WINDOWS
- TORRENT_ASSERT(m_blocks_in_last_piece <= m_blocks_per_piece);
- }
-
- void piece_picker::piece_info(int index, piece_picker::downloading_piece& st) const
- {
-#ifdef TORRENT_EXPENSIVE_INVARIANT_CHECKS
- TORRENT_PIECE_PICKER_INVARIANT_CHECK;
+#if TORRENT_USE_WSTRING
+#define CreateHardLink_ CreateHardLinkW
+ std::wstring n_exist = convert_to_wstring(file);
+ std::wstring n_link = convert_to_wstring(link);
+#else
+#define CreateHardLink_ CreateHardLinkA
+ std::string n_exist = convert_to_native(file);
+ std::string n_link = convert_to_native(link);
#endif
-
- TORRENT_ASSERT(index >= 0);
- TORRENT_ASSERT(index < int(m_piece_map.size()));
-
- int state = m_piece_map[index].download_queue();
- if (state != piece_pos::piece_open)
+ BOOL ret = CreateHardLink(n_link.c_str(), n_exist.c_str(), NULL);
+ if (ret)
{
- std::vector<downloading_piece>::const_iterator piece = find_dl_piece(state, index);
- TORRENT_ASSERT(piece != m_downloads[state].end());
- st = *piece;
+ ec.clear();
return;
}
- st.info_idx = 0;
- st.index = index;
- st.writing = 0;
- st.requested = 0;
- if (m_piece_map[index].have())
+
+ // something failed. Does the filesystem not support hard links?
+
+ // it's possible CreateHardLink will copy the file internally too,
+ // if the filesystem does not support it.
+ ec.assign(GetLastError(), system_category());
+ return;
+
+#else
+
+ std::string n_exist = convert_to_native(file);
+ std::string n_link = convert_to_native(link);
+
+ // assume posix's link() function exists
+ int ret = ::link(n_exist.c_str(), n_link.c_str());
+
+ if (ret == 0)
{
- st.finished = blocks_in_piece(index);
+ ec.clear();
return;
}
- st.finished = 0;
- }
- piece_picker::piece_stats_t piece_picker::piece_stats(int index) const
+ // most errors are passed through, except for the ones that indicate that
+ // hard links are not supported and require a copy.
+ | ||
relevance 3 | ../src/upnp.cpp:72 | listen_interface is not used. It's meant to bind the broadcast socket |
listen_interface is not used. It's meant to bind the broadcast socket../src/upnp.cpp:72#include <asio/ip/multicast.hpp>
+#else
+#include <boost/asio/ip/host_name.hpp>
+#include <boost/asio/ip/multicast.hpp>
+#endif
+#include <cstdlib>
+
+namespace libtorrent {
+
+namespace upnp_errors
+{
+ boost::system::error_code make_error_code(error_code_enum e)
{
- TORRENT_ASSERT(index >= 0 && index < int(m_piece_map.size()));
- piece_pos const& pp = m_piece_map[index];
- piece_stats_t ret = {
- pp.peer_count + m_seeds,
- pp.priority(this),
- pp.have(),
- pp.downloading()
- };
- return ret;
+ return error_code(e, get_upnp_category());
}
- piece_picker::dlpiece_iter piece_picker::add_download_piece(int piece)
- | ||
relevance 3 | ../src/kademlia/get_item.cpp:220 | we don't support CAS errors here! we need a custom observer |
we don't support CAS errors here! we need a custom observer../src/kademlia/get_item.cpp:220 TORRENT_LOG(node) << "sending put [ v: \"" << m_data.value()
+} // upnp_errors namespace
+
+static error_code ec;
+
+upnp::upnp(io_service& ios
+ , address const& listen_interface, std::string const& user_agent
+ , portmap_callback_t const& cb, log_callback_t const& lcb
+ , bool ignore_nonrouters)
+ : m_user_agent(user_agent)
+ , m_callback(cb)
+ , m_log_callback(lcb)
+ , m_retry_count(0)
+ , m_io_service(ios)
+ , m_resolver(ios)
+ , m_socket(udp::endpoint(address_v4::from_string("239.255.255.250", ec), 1900))
+ , m_broadcast_timer(ios)
+ , m_refresh_timer(ios)
+ , m_map_timer(ios)
+ , m_disabled(false)
+ , m_closing(false)
+ , m_ignore_non_routers(ignore_nonrouters)
+ , m_last_if_update(min_time())
+{
+ TORRENT_ASSERT(cb);
+}
+
+void upnp::start(void* state)
+{
+ error_code ec;
+ m_socket.open(boost::bind(&upnp::on_reply, self(), _1, _2, _3)
+ , m_refresh_timer.get_io_service(), ec);
+
+ if (state)
+ {
+ upnp_state_t* s = (upnp_state_t*)state;
+ | ||
relevance 3 | ../src/kademlia/get_item.cpp:220 | we don't support CAS errors here! we need a custom observer |
we don't support CAS errors here! we need a custom observer../src/kademlia/get_item.cpp:220 TORRENT_LOG(node) << "sending put [ v: \"" << m_data.value()
<< "\" seq: " << (m_data.is_mutable() ? m_data.seq() : -1)
<< " nodes: " << v.size() << " ]" ;
#endif
@@ -127,12 +170,446 @@ void get_item_observer::reply(msg const& m)
char const* sig = NULL;
boost::uint64_t seq = 0;
- lazy_entry const* r = m.message.dict_find_dict("r");
- | ||
relevance 2 | ../src/peer_connection.cpp:2989 | since we throw away the queue entry once we issue the disk job, this may happen. Instead, we should keep the queue entry around, mark it as having been requested from disk and once the disk job comes back, discard it if it has been cancelled. Maybe even be able to cancel disk jobs? |
since we throw away the queue entry once we issue
+ bdecode_node r = m.message.dict_find_dict("r");
+ | ||
relevance 3 | ../src/kademlia/logging.cpp:38 | replace this logging with alerts |
replace this logging with alerts../src/kademlia/logging.cpp:38
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+*/
+
+#include "libtorrent/kademlia/logging.hpp"
+#include "libtorrent/time.hpp"
+
+namespace libtorrent { namespace dht
+{
+ log_event::log_event(log& log)
+ : log_(log)
+ {
+ if (!log_.enabled()) return;
+
+ static const time_point start = clock_type::now();
+ char ret[200];
+ snprintf(ret, sizeof(ret), "%" PRId64
+ , total_microseconds(clock_type::now() - start));
+ log_ << ret << " [" << log.id() << "] ";
+ }
+
+ log_event::~log_event()
+ {
+ if (log_.enabled())
+ {
+ log_ << "\n";
+ log_.flush();
+ }
+ }
+
+}}
+
+ | ||
relevance 3 | ../include/libtorrent/http_tracker_connection.hpp:105 | add a unit test for this function |
add a unit test for this function../include/libtorrent/http_tracker_connection.hpp:105
+ void on_filter(http_connection& c, std::vector<tcp::endpoint>& endpoints);
+ void on_connect(http_connection& c);
+ void on_response(error_code const& ec, http_parser const& parser
+ , char const* data, int size);
+
+ virtual void on_timeout(error_code const&) {}
+
+ tracker_manager& m_man;
+ boost::shared_ptr<http_connection> m_tracker_connection;
+ address m_tracker_ip;
+#if TORRENT_USE_I2P
+ i2p_connection* m_i2p_conn;
+#endif
+ };
+
+ TORRENT_EXTRA_EXPORT tracker_response parse_tracker_response(
+ char const* data, int size, error_code& ec
+ , bool scrape_request, sha1_hash scrape_ih);
+
+ TORRENT_EXTRA_EXPORT bool extract_peer_info(bdecode_node const& info
+ , peer_entry& ret, error_code& ec);
+}
+
+#endif // TORRENT_HTTP_TRACKER_CONNECTION_HPP_INCLUDED
+
+ | ||
relevance 3 | ../include/libtorrent/kademlia/node.hpp:200 | rename this to just node |
rename this to just node../include/libtorrent/kademlia/node.hpp:200
+struct count_peers
+{
+ int& count;
+ count_peers(int& c): count(c) {}
+ void operator()(std::pair<libtorrent::dht::node_id
+ , libtorrent::dht::torrent_entry> const& t)
+ {
+ count += t.second.peers.size();
+ }
+};
+
+struct udp_socket_interface
+{
+ virtual bool has_quota() = 0;
+ virtual bool send_packet(entry& e, udp::endpoint const& addr, int flags) = 0;
+protected:
+ ~udp_socket_interface() {}
+};
+
+class TORRENT_EXTRA_EXPORT node : boost::noncopyable
+ {
+typedef std::map<node_id, torrent_entry> table_t;
+typedef std::map<node_id, dht_immutable_item> dht_immutable_table_t;
+typedef std::map<node_id, dht_mutable_item> dht_mutable_table_t;
+
+public:
+ node(udp_socket_interface* sock
+ , libtorrent::dht_settings const& settings, node_id nid
+ , dht_observer* observer, counters& cnt);
+
+ virtual ~node() {}
+
+ void tick();
+ void bootstrap(std::vector<udp::endpoint> const& nodes
+ , find_data::nodes_callback const& f);
+ void add_router_node(udp::endpoint router);
+
+ void unreachable(udp::endpoint const& ep);
+ void incoming(msg const& m);
+
+ int num_torrents() const { return m_map.size(); }
+ int num_peers() const
+ {
+ int ret = 0;
+ std::for_each(m_map.begin(), m_map.end(), count_peers(ret));
+ return ret;
+ }
+
+ int bucket_size(int bucket);
+
+ | ||
relevance 2 | ../src/alert.cpp:1444 | the salt here is allocated on the heap. It would be nice to allocate in in the stack_allocator |
the salt here is allocated on the heap. It would be nice to
+allocate in in the stack_allocator../src/alert.cpp:1444 , operation_names[op]
+ , error.value()
+ , convert_from_native(error.message()).c_str());
+ return msg;
+ }
+
+ dht_immutable_item_alert::dht_immutable_item_alert(aux::stack_allocator&
+ , sha1_hash const& t, entry const& i)
+ : target(t), item(i)
+ {}
+
+ std::string dht_immutable_item_alert::message() const
+ {
+ char msg[1050];
+ snprintf(msg, sizeof(msg), "DHT immutable item %s [ %s ]"
+ , to_hex(target.to_string()).c_str()
+ , item.to_string().c_str());
+ return msg;
+ }
+
+ dht_mutable_item_alert::dht_mutable_item_alert(aux::stack_allocator&
+ , boost::array<char, 32> k
+ , boost::array<char, 64> sig
+ , boost::uint64_t sequence
+ , std::string const& s
+ , entry const& i)
+ : key(k), signature(sig), seq(sequence), salt(s), item(i)
+ {}
+
+ std::string dht_mutable_item_alert::message() const
+ {
+ char msg[1050];
+ snprintf(msg, sizeof(msg), "DHT mutable item (key=%s salt=%s seq=%" PRId64 ") [ %s ]"
+ , to_hex(std::string(&key[0], 32)).c_str()
+ , salt.c_str()
+ , seq
+ , item.to_string().c_str());
+ return msg;
+ }
+
+ dht_put_alert::dht_put_alert(aux::stack_allocator&, sha1_hash const& t)
+ : target(t)
+ , seq(0)
+ {}
+
+ dht_put_alert::dht_put_alert(aux::stack_allocator&
+ , boost::array<char, 32> key
+ , boost::array<char, 64> sig
+ , std::string s
+ , boost::uint64_t sequence_number)
+ : target(0)
+ | ||
relevance 2 | ../src/alert_manager.cpp:97 | keep a count of the number of threads waiting. Only if it's > 0 notify them |
keep a count of the number of threads waiting. Only if it's
+> 0 notify them../src/alert_manager.cpp:97 for (ses_extension_list_t::iterator i = m_ses_extensions.begin()
+ , end(m_ses_extensions.end()); i != end; ++i)
+ {
+ (*i)->on_alert(a);
+ }
+#endif
+ if (a->type() == save_resume_data_failed_alert::alert_type
+ || a->type() == save_resume_data_alert::alert_type)
+ ++m_num_queued_resume;
+
+ if (m_alerts[m_generation].size() == 1)
+ {
+ lock.unlock();
+
+ // we just posted to an empty queue. If anyone is waiting for
+ // alerts, we need to notify them. Also (potentially) call the
+ // user supplied m_notify callback to let the client wake up its
+ // message loop to poll for alerts.
+ if (m_notify) m_notify();
+
+ m_condition.notify_all();
+ }
+ }
+
+#ifndef TORRENT_NO_DEPRECATE
+
+ bool alert_manager::maybe_dispatch(alert const& a)
+ {
+ if (m_dispatch)
+ {
+ m_dispatch(a.clone());
+ return true;
+ }
+ return false;
+ }
+
+ void alert_manager::set_dispatch_function(
+ boost::function<void(std::auto_ptr<alert>)> const& fun)
+ {
+ mutex::scoped_lock lock(m_mutex);
+
+ m_dispatch = fun;
+
+ heterogeneous_queue<alert> storage;
+ m_alerts[m_generation].swap(storage);
+ lock.unlock();
+
+ std::vector<alert*> alerts;
+ storage.get_pointers(alerts);
+
+ for (std::vector<alert*>::iterator i = alerts.begin()
+ | ||
relevance 2 | ../src/block_cache.cpp:1690 | turn these return values into enums returns -1: block not in cache -2: out of memory |
turn these return values into enums
+returns
+-1: block not in cache
+-2: out of memory../src/block_cache.cpp:1690 {
+ TORRENT_PIECE_ASSERT(!p.blocks[k].dirty, &p);
+ TORRENT_PIECE_ASSERT(!p.blocks[k].pending, &p);
+ TORRENT_PIECE_ASSERT(p.blocks[k].refcount == 0, &p);
+ }
+ TORRENT_PIECE_ASSERT(p.blocks[k].refcount >= 0, &p);
+ num_refcount += p.blocks[k].refcount;
+ }
+ TORRENT_PIECE_ASSERT(num_blocks == p.num_blocks, &p);
+ TORRENT_PIECE_ASSERT(num_pending <= p.refcount, &p);
+ TORRENT_PIECE_ASSERT(num_refcount == p.refcount, &p);
+ TORRENT_PIECE_ASSERT(num_dirty == p.num_dirty, &p);
+ }
+ TORRENT_ASSERT(m_read_cache_size == cached_read_blocks);
+ TORRENT_ASSERT(m_write_cache_size == cached_write_blocks);
+ TORRENT_ASSERT(m_pinned_blocks == num_pinned);
+ TORRENT_ASSERT(m_write_cache_size + m_read_cache_size <= in_use());
+}
+#endif
+
+
+ int block_cache::copy_from_piece(cached_piece_entry* pe, disk_io_job* j
+ , bool expect_no_fail)
+{
+ INVARIANT_CHECK;
+ TORRENT_UNUSED(expect_no_fail);
+
+ TORRENT_PIECE_ASSERT(j->buffer == 0, pe);
+ TORRENT_PIECE_ASSERT(pe->in_use, pe);
+
+ // copy from the cache and update the last use timestamp
+ int block = j->d.io.offset / block_size();
+ int block_offset = j->d.io.offset & (block_size()-1);
+ int buffer_offset = 0;
+ int size = j->d.io.buffer_size;
+ int blocks_to_read = block_offset > 0 && (size > block_size() - block_offset) ? 2 : 1;
+ TORRENT_PIECE_ASSERT(size <= block_size(), pe);
+ const int start_block = block;
+
+#ifdef TORRENT_DEBUG
+ int piece_size = j->storage->files()->piece_size(j->piece);
+ int blocks_in_piece = (piece_size + block_size() - 1) / block_size();
+ TORRENT_PIECE_ASSERT(start_block < blocks_in_piece, pe);
+#endif
+
+ // if there's no buffer, we don't have this block in
+ // the cache, and we're not currently reading it in either
+ // since it's not pending
+
+ if (inc_block_refcount(pe, start_block, ref_reading) == false)
+ {
+ | ||
relevance 2 | ../src/escape_string.cpp:209 | this should probably be moved into string_util.cpp |
this should probably be moved into string_util.cpp../src/escape_string.cpp:209 }
+ return false;
+ }
+
+ void convert_path_to_posix(std::string& path)
+ {
+ for (std::string::iterator i = path.begin()
+ , end(path.end()); i != end; ++i)
+ if (*i == '\\') *i = '/';
+ }
+
+#ifdef TORRENT_WINDOWS
+ void convert_path_to_windows(std::string& path)
+ {
+ for (std::string::iterator i = path.begin()
+ , end(path.end()); i != end; ++i)
+ if (*i == '/') *i = '\\';
+ }
+#endif
+
+ std::string read_until(char const*& str, char delim, char const* end)
+ {
+ TORRENT_ASSERT(str <= end);
+
+ std::string ret;
+ while (str != end && *str != delim)
+ {
+ ret += *str;
+ ++str;
+ }
+ // skip the delimiter as well
+ while (str != end && *str == delim) ++str;
+ return ret;
+ }
+
+ std::string maybe_url_encode(std::string const& url)
+ {
+ std::string protocol, host, auth, path;
+ int port;
+ error_code ec;
+ boost::tie(protocol, auth, host, port, path) = parse_url_components(url, ec);
+ if (ec) return url;
+
+ // first figure out if this url contains unencoded characters
+ if (!need_encoding(path.c_str(), path.size()))
+ return url;
+
+ char msg[TORRENT_MAX_PATH*4];
+ snprintf(msg, sizeof(msg), "%s://%s%s%s%s%s%s", protocol.c_str(), auth.c_str()
+ , auth.empty()?"":"@", host.c_str()
+ , port == -1 ? "" : ":"
+ | ||
relevance 2 | ../src/file.cpp:505 | test this on a FAT volume to see what error we get! |
test this on a FAT volume to see what error we get!../src/file.cpp:505 // if the filesystem does not support it.
+ ec.assign(GetLastError(), system_category());
+ return;
+
+#else
+
+ std::string n_exist = convert_to_native(file);
+ std::string n_link = convert_to_native(link);
+
+ // assume posix's link() function exists
+ int ret = ::link(n_exist.c_str(), n_link.c_str());
+
+ if (ret == 0)
+ {
+ ec.clear();
+ return;
+ }
+
+ // most errors are passed through, except for the ones that indicate that
+ // hard links are not supported and require a copy.
+ if (errno != EMLINK || errno != EXDEV)
+ {
+ // some error happened, report up to the caller
+ ec.assign(errno, generic_category());
+ return;
+ }
+#endif
+
+ // if we get here, we should copy the file
+ copy_file(file, link, ec);
+ }
+
+ bool is_directory(std::string const& f, error_code& ec)
+ {
+ ec.clear();
+ error_code e;
+ file_status s;
+ stat_file(f, &s, e);
+ if (!e && s.mode & file_status::directory) return true;
+ ec = e;
+ return false;
+ }
+
+ void recursive_copy(std::string const& old_path, std::string const& new_path, error_code& ec)
+ {
+ TORRENT_ASSERT(!ec);
+ if (is_directory(old_path, ec))
+ {
+ create_directory(new_path, ec);
+ if (ec) return;
+ for (directory i(old_path, ec); !i.done(); i.next(ec))
+ | ||
relevance 2 | ../src/peer_connection.cpp:2338 | this should probably be based on time instead of number of request messages. For a very high throughput connection, 300 may be a legitimate number of requests to have in flight when getting choked |
this should probably be based on time instead of number
+of request messages. For a very high throughput connection, 300
+may be a legitimate number of requests to have in flight when
+getting choked../src/peer_connection.cpp:2338 , "piece: %d s: %d l: %d invalid request"
+ , r.piece , r.start , r.length);
+#endif
+
+ write_reject_request(r);
+ ++m_num_invalid_requests;
+
+ if (t->alerts().should_post<invalid_request_alert>())
+ {
+ // msvc 12 appears to deduce the rvalue reference template
+ // incorrectly for bool temporaries. So, create a dummy instance
+ bool peer_interested = bool(m_peer_interested);
+ t->alerts().emplace_alert<invalid_request_alert>(
+ t->get_handle(), m_remote, m_peer_id, r
+ , t->has_piece_passed(r.piece), peer_interested, false);
+ }
+
+ // every ten invalid request, remind the peer that it's choked
+ if (!m_peer_interested && m_num_invalid_requests % 10 == 0 && m_choked)
+ {
+ if (m_num_invalid_requests > 300 && !m_peer_choked
+ && can_disconnect(error_code(errors::too_many_requests_when_choked
+ , get_libtorrent_category())))
+ {
+ disconnect(errors::too_many_requests_when_choked, op_bittorrent, 2);
+ return;
+ }
+#ifndef TORRENT_DISABLE_LOGGING
+ peer_log(peer_log_alert::outgoing_message, "CHOKE");
+#endif
+ write_choke();
+ }
+
+ return;
+ }
+
+ // if we have choked the client
+ // ignore the request
+ const int blocks_per_piece = static_cast<int>(
+ (t->torrent_file().piece_length() + t->block_size() - 1) / t->block_size());
+
+ // disconnect peers that downloads more than foo times an allowed
+ // fast piece
+ if (m_choked && fast_idx != -1 && m_accept_fast_piece_cnt[fast_idx] >= 3 * blocks_per_piece
+ && can_disconnect(error_code(errors::too_many_requests_when_choked, get_libtorrent_category())))
+ {
+ disconnect(errors::too_many_requests_when_choked, op_bittorrent, 2);
+ return;
+ }
+
+ if (m_choked && fast_idx == -1)
+ | ||
relevance 2 | ../src/peer_connection.cpp:3045 | since we throw away the queue entry once we issue the disk job, this may happen. Instead, we should keep the queue entry around, mark it as having been requested from disk and once the disk job comes back, discard it if it has been cancelled. Maybe even be able to cancel disk jobs? |
since we throw away the queue entry once we issue
the disk job, this may happen. Instead, we should keep the
queue entry around, mark it as having been requested from
disk and once the disk job comes back, discard it if it has
-been cancelled. Maybe even be able to cancel disk jobs?../src/peer_connection.cpp:2989
+been cancelled. Maybe even be able to cancel disk jobs?../src/peer_connection.cpp:3045
std::vector<peer_request>::iterator i
= std::find(m_requests.begin(), m_requests.end(), r);
@@ -144,16 +621,16 @@ been cancelled. Maybe even be able to cancel disk jobs?../src/peer_conn
if (m_requests.empty())
m_counters.inc_stats_counter(counters::num_peers_up_requests, -1);
-#if defined TORRENT_LOGGING
- peer_log("==> REJECT_PIECE [ piece: %d s: %x l: %x ] cancelled"
+#ifndef TORRENT_DISABLE_LOGGING
+ peer_log(peer_log_alert::outgoing_message, "REJECT_PIECE", "piece: %d s: %x l: %x cancelled"
, r.piece , r.start , r.length);
#endif
write_reject_request(r);
}
else
{
-#if defined TORRENT_LOGGING
- peer_log("*** GOT CANCEL NOT IN THE QUEUE");
+#ifndef TORRENT_DISABLE_LOGGING
+ peer_log(peer_log_alert::info, "INVALID_CANCEL", "got cancel not in the queue");
#endif
}
}
@@ -167,8 +644,8 @@ been cancelled. Maybe even be able to cancel disk jobs?../src/peer_conn
TORRENT_ASSERT(is_single_thread());
INVARIANT_CHECK;
-#if defined TORRENT_LOGGING
- peer_log("<== DHT_PORT [ p: %d ]", listen_port);
+#ifndef TORRENT_DISABLE_LOGGING
+ peer_log(peer_log_alert::incoming_message, "DHT_PORT", "p: %d", listen_port);
#endif
#ifndef TORRENT_DISABLE_DHT
m_ses.add_dht_node(udp::endpoint(
@@ -183,18 +660,18 @@ been cancelled. Maybe even be able to cancel disk jobs?../src/peer_conn
void peer_connection::incoming_have_all()
{
TORRENT_ASSERT(is_single_thread());
- | ||
relevance 2 | ../src/peer_connection.cpp:4575 | use a deadline_timer for timeouts. Don't rely on second_tick()! Hook this up to connect timeout as well. This would improve performance because of less work in second_tick(), and might let use remove ticking entirely eventually |
use a deadline_timer for timeouts. Don't rely on second_tick()!
+ | ||
relevance 2 | ../src/peer_connection.cpp:4695 | use a deadline_timer for timeouts. Don't rely on second_tick()! Hook this up to connect timeout as well. This would improve performance because of less work in second_tick(), and might let use remove ticking entirely eventually |
use a deadline_timer for timeouts. Don't rely on second_tick()!
Hook this up to connect timeout as well. This would improve performance
because of less work in second_tick(), and might let use remove ticking
-entirely eventually../src/peer_connection.cpp:4575 if (is_i2p(*m_socket))
- connect_timeout += 20;
+entirely eventually../src/peer_connection.cpp:4695 connect_timeout += 20;
#endif
if (d > seconds(connect_timeout)
&& can_disconnect(error_code(errors::timed_out, get_libtorrent_category())))
{
-#if defined TORRENT_LOGGING
- peer_log("*** CONNECT FAILED [ waited %d seconds ] ***", int(total_seconds(d)));
+#ifndef TORRENT_DISABLE_LOGGING
+ peer_log(peer_log_alert::info, "CONNECT_FAILED", "waited %d seconds"
+ , int(total_seconds(d)));
#endif
connect_failed(errors::timed_out);
return;
@@ -209,8 +686,9 @@ entirely eventually../src/peer_connection.cpp:4575 if (may_timeout && d > seconds(timeout()) && !m_connecting && m_reading_bytes == 0
&& can_disconnect(error_code(errors::timed_out_inactivity, get_libtorrent_category())))
{
-#if defined TORRENT_LOGGING
- peer_log("*** LAST ACTIVITY [ %d seconds ago ] ***", int(total_seconds(d)));
+#ifndef TORRENT_DISABLE_LOGGING
+ peer_log(peer_log_alert::info, "LAST_ACTIVITY", "%d seconds ago"
+ , int(total_seconds(d)));
#endif
disconnect(errors::timed_out_inactivity, op_bittorrent);
return;
@@ -222,8 +700,9 @@ entirely eventually../src/peer_connection.cpp:4575../src/peer_connection.cpp:4575relevance 2 | ../src/piece_picker.cpp:1966 | make the 2048 limit configurable |
|
make the 2048 limit configurable../src/piece_picker.cpp:1966 // only one of rarest_first or sequential can be set
+ | ||
relevance 2 | ../src/peer_list.cpp:495 | it would be nice if there was a way to iterate over these torrent_peer objects in the order they are allocated in the pool instead. It would probably be more efficient |
it would be nice if there was a way to iterate over these
+torrent_peer objects in the order they are allocated in the pool
+instead. It would probably be more efficient../src/peer_list.cpp:495 , int session_time, torrent_state* state)
+ {
+ TORRENT_ASSERT(is_single_thread());
+ INVARIANT_CHECK;
+
+ const int candidate_count = 10;
+ peers.reserve(candidate_count);
+
+ int erase_candidate = -1;
+
+ if (m_finished != state->is_finished)
+ recalculate_connect_candidates(state);
+
+ external_ip const& external = *state->ip;
+ int external_port = state->port;
+
+ if (m_round_robin >= int(m_peers.size())) m_round_robin = 0;
+
+ int max_peerlist_size = state->max_peerlist_size;
+
+ for (int iterations = (std::min)(int(m_peers.size()), 300);
+ iterations > 0; --iterations)
+ {
+ ++state->loop_counter;
+
+ if (m_round_robin >= int(m_peers.size())) m_round_robin = 0;
+
+ torrent_peer& pe = *m_peers[m_round_robin];
+ TORRENT_ASSERT(pe.in_use);
+ int current = m_round_robin;
+
+ // if the number of peers is growing large
+ // we need to start weeding.
+
+ if (int(m_peers.size()) >= max_peerlist_size * 0.95
+ && max_peerlist_size > 0)
+ {
+ if (is_erase_candidate(pe)
+ && (erase_candidate == -1
+ || !compare_peer_erase(*m_peers[erase_candidate], pe)))
+ {
+ if (should_erase_immediately(pe))
+ {
+ if (erase_candidate > current) --erase_candidate;
+ erase_peer(m_peers.begin() + current, state);
+ continue;
+ }
+ else
+ {
+ erase_candidate = current;
+ }
+ | ||
relevance 2 | ../src/piece_picker.cpp:1996 | make the 2048 limit configurable |
make the 2048 limit configurable../src/piece_picker.cpp:1996 // only one of rarest_first or sequential can be set
void piece_picker::pick_pieces(bitfield const& pieces
, std::vector<piece_block>& interesting_blocks, int num_blocks
@@ -288,8 +818,8 @@ entirely eventually../src/peer_connection.cpp:4575relevance 2 | ../src/piece_picker.cpp:2575 | the first_block returned here is the largest free range, not the first-fit range, which would be better |
|
the first_block returned here is the largest free range, not
-the first-fit range, which would be better../src/piece_picker.cpp:2575 , end(m_block_info.end()); i != end; ++i)
+ | ||
relevance 2 | ../src/piece_picker.cpp:2605 | the first_block returned here is the largest free range, not the first-fit range, which would be better |
the first_block returned here is the largest free range, not
+the first-fit range, which would be better../src/piece_picker.cpp:2605 , end(m_block_info.end()); i != end; ++i)
{
TORRENT_ASSERT(i->peer == 0 || static_cast<torrent_peer*>(i->peer)->in_use);
}
@@ -340,11 +870,11 @@ the first-fit range, which would be better../src/piece_picker.cpp:2575<
exclusive = false;
if (info.state == piece_picker::block_info::state_requested
&& info.peer != 0)
- | ||
relevance 2 | ../src/piece_picker.cpp:3360 | it would be nice if this could be folded into lock_piece() the main distinction is that this also maintains the m_num_passed counter and the passed_hash_check member Is there ever a case where we call write filed without also locking the piece? Perhaps write_failed() should imply locking it. |
it would be nice if this could be folded into lock_piece()
+ | ||
relevance 2 | ../src/piece_picker.cpp:3390 | it would be nice if this could be folded into lock_piece() the main distinction is that this also maintains the m_num_passed counter and the passed_hash_check member Is there ever a case where we call write filed without also locking the piece? Perhaps write_failed() should imply locking it. |
it would be nice if this could be folded into lock_piece()
the main distinction is that this also maintains the m_num_passed
counter and the passed_hash_check member
Is there ever a case where we call write filed without also locking
-the piece? Perhaps write_failed() should imply locking it.../src/piece_picker.cpp:3360 int state = m_piece_map[piece].download_queue();
+the piece? Perhaps write_failed() should imply locking it.../src/piece_picker.cpp:3390 int state = m_piece_map[piece].download_queue();
if (state == piece_pos::piece_open) return;
std::vector<downloading_piece>::iterator i = find_dl_piece(state, piece);
if (i == m_downloads[state].end()) return;
@@ -395,7 +925,7 @@ the piece? Perhaps write_failed() should imply locking it.../src/piece_
if (info.state == block_info::state_finished) return;
if (info.state == block_info::state_writing) --i->writing;
- | ||
relevance 2 | ../src/session_impl.cpp:216 | find a better place for this function |
find a better place for this function../src/session_impl.cpp:216 *j.vec, j.peer->make_write_handler(boost::bind(
+ | ||
relevance 2 | ../src/session_impl.cpp:214 | find a better place for this function |
find a better place for this function../src/session_impl.cpp:214 *j.vec, j.peer->make_write_handler(boost::bind(
&peer_connection::on_send_data, j.peer, _1, _2)));
}
else
@@ -446,10 +976,63 @@ namespace aux {
static const class_mapping v4_classes[] =
{
// everything
- | ||
relevance 2 | ../src/session_impl.cpp:1813 | the udp socket(s) should be using the same generic mechanism and not be restricted to a single one we should open a one listen socket for each entry in the listen_interfaces list |
the udp socket(s) should be using the same generic
+ | ||
relevance 2 | ../src/session_impl.cpp:821 | if the DHT is enabled, it should probably be restarted here. maybe it should even be deferred to not be started until the client has had a chance to pass in the dht state |
if the DHT is enabled, it should probably be restarted here.
+maybe it should even be deferred to not be started until the client
+has had a chance to pass in the dht state../src/session_impl.cpp:821 if (val) m_settings.set_int(settings_pack::allowed_enc_level, val.int_value());
+ }
+#endif
+
+ settings = e->dict_find_dict("settings");
+ if (settings)
+ {
+ settings_pack* pack = load_pack_from_dict(settings);
+ apply_settings_pack(pack);
+ }
+
+ // in case we just set a socks proxy, we might have to
+ // open the socks incoming connection
+ if (!m_socks_listen_socket) open_new_incoming_socks_connection();
+ m_udp_socket.set_proxy_settings(proxy());
+
+#ifndef TORRENT_DISABLE_DHT
+ settings = e->dict_find_dict("dht state");
+ if (settings)
+ {
+ m_dht_state = settings;
+ }
+#endif
+
+#ifndef TORRENT_NO_DEPRECATE
+ settings = e->dict_find_list("feeds");
+ if (settings)
+ {
+ m_feeds.reserve(settings.list_size());
+ for (int i = 0; i < settings.list_size(); ++i)
+ {
+ if (settings.list_at(i).type() != bdecode_node::dict_t) continue;
+ boost::shared_ptr<feed> f(new_feed(*this, feed_settings()));
+ f->load_state(settings.list_at(i));
+ f->update_feed();
+ m_feeds.push_back(f);
+ }
+ update_rss_feeds();
+ }
+#endif
+
+#ifndef TORRENT_DISABLE_EXTENSIONS
+ for (ses_extension_list_t::iterator i = m_ses_extensions.begin()
+ , end(m_ses_extensions.end()); i != end; ++i)
+ {
+ TORRENT_TRY {
+ (*i)->load_state(*e);
+ } TORRENT_CATCH(std::exception&) {}
+ }
+#endif
+ }
+ | ||
relevance 2 | ../src/session_impl.cpp:1817 | the udp socket(s) should be using the same generic mechanism and not be restricted to a single one we should open a one listen socket for each entry in the listen_interfaces list |
the udp socket(s) should be using the same generic
mechanism and not be restricted to a single one
we should open a one listen socket for each entry in the
-listen_interfaces list../src/session_impl.cpp:1813 }
+listen_interfaces list../src/session_impl.cpp:1817 }
#endif // TORRENT_USE_OPENSSL
}
#endif // TORRENT_USE_IPV6
@@ -500,15 +1083,15 @@ listen_interfaces list../src/session_impl.cpp:1813relevance 2 | ../src/session_impl.cpp:1915 | use bind_to_device in udp_socket |
|
use bind_to_device in udp_socket../src/session_impl.cpp:1915 if (m_listen_port_retries > 0)
+ | ||
relevance 2 | ../src/session_impl.cpp:1919 | use bind_to_device in udp_socket |
use bind_to_device in udp_socket../src/session_impl.cpp:1919 if (m_listen_port_retries > 0)
{
m_listen_interface.port(m_listen_interface.port() + 1);
--m_listen_port_retries;
goto retry;
}
if (m_alerts.should_post<listen_failed_alert>())
- m_alerts.post_alert(listen_failed_alert(print_endpoint(m_listen_interface)
- , listen_failed_alert::bind, ec, listen_failed_alert::udp));
+ m_alerts.emplace_alert<listen_failed_alert>(print_endpoint(m_listen_interface)
+ , listen_failed_alert::bind, ec, listen_failed_alert::udp);
return;
}
@@ -523,44 +1106,44 @@ listen_interfaces list../src/session_impl.cpp:1813 m_ssl_udp_socket.bind(ssl_bind_if, ec);
if (ec)
{
-#if defined TORRENT_LOGGING
+#ifndef TORRENT_DISABLE_LOGGING
session_log("SSL: cannot bind to UDP interface \"%s\": %s"
, print_endpoint(m_listen_interface).c_str(), ec.message().c_str());
#endif
if (m_alerts.should_post<listen_failed_alert>())
{
error_code err;
- m_alerts.post_alert(listen_failed_alert(print_endpoint(ssl_bind_if)
- , listen_failed_alert::bind, ec, listen_failed_alert::utp_ssl));
+ m_alerts.emplace_alert<listen_failed_alert>(print_endpoint(ssl_bind_if)
+ , listen_failed_alert::bind, ec, listen_failed_alert::utp_ssl);
}
ec.clear();
}
else
{
if (m_alerts.should_post<listen_succeeded_alert>())
- m_alerts.post_alert(listen_succeeded_alert(
+ m_alerts.emplace_alert<listen_succeeded_alert>(
tcp::endpoint(ssl_bind_if.address(), ssl_bind_if.port())
- , listen_succeeded_alert::utp_ssl));
+ , listen_succeeded_alert::utp_ssl);
}
}
#endif // TORRENT_USE_OPENSSL
- | ||
relevance 2 | ../src/session_impl.cpp:1941 | use bind_to_device in udp_socket |
use bind_to_device in udp_socket../src/session_impl.cpp:1941 , print_endpoint(m_listen_interface).c_str(), ec.message().c_str());
+ | ||
relevance 2 | ../src/session_impl.cpp:1945 | use bind_to_device in udp_socket |
use bind_to_device in udp_socket../src/session_impl.cpp:1945 , print_endpoint(m_listen_interface).c_str(), ec.message().c_str());
#endif
if (m_alerts.should_post<listen_failed_alert>())
{
error_code err;
- m_alerts.post_alert(listen_failed_alert(print_endpoint(ssl_bind_if)
- , listen_failed_alert::bind, ec, listen_failed_alert::utp_ssl));
+ m_alerts.emplace_alert<listen_failed_alert>(print_endpoint(ssl_bind_if)
+ , listen_failed_alert::bind, ec, listen_failed_alert::utp_ssl);
}
ec.clear();
}
else
{
if (m_alerts.should_post<listen_succeeded_alert>())
- m_alerts.post_alert(listen_succeeded_alert(
+ m_alerts.emplace_alert<listen_succeeded_alert>(
tcp::endpoint(ssl_bind_if.address(), ssl_bind_if.port())
- , listen_succeeded_alert::utp_ssl));
+ , listen_succeeded_alert::utp_ssl);
}
}
#endif // TORRENT_USE_OPENSSL
@@ -568,7 +1151,7 @@ listen_interfaces list../src/session_impl.cpp:1813 m_udp_socket.bind(udp::endpoint(m_listen_interface.address(), m_listen_interface.port()), ec);
if (ec)
{
-#if defined TORRENT_LOGGING
+#ifndef TORRENT_DISABLE_LOGGING
session_log("cannot bind to UDP interface \"%s\": %s"
, print_endpoint(m_listen_interface).c_str(), ec.message().c_str());
#endif
@@ -581,8 +1164,8 @@ listen_interfaces list../src/session_impl.cpp:1813 | ||
relevance 2 | ../src/session_impl.cpp:3388 | make a list for torrents that want to be announced on the DHT so we don't have to loop over all torrents, just to find the ones that want to announce |
make a list for torrents that want to be announced on the DHT so we
-don't have to loop over all torrents, just to find the ones that want to announce../src/session_impl.cpp:3388 if (!m_dht_torrents.empty())
+ | ||
relevance 2 | ../src/session_impl.cpp:3391 | make a list for torrents that want to be announced on the DHT so we don't have to loop over all torrents, just to find the ones that want to announce |
make a list for torrents that want to be announced on the DHT so we
+don't have to loop over all torrents, just to find the ones that want to announce../src/session_impl.cpp:3391 if (!m_dht_torrents.empty())
{
boost::shared_ptr<torrent> t;
do
@@ -648,7 +1231,60 @@ don't have to loop over all torrents, just to find the ones that want to announc
if (m_torrents.empty()) return;
if (m_next_lsd_torrent == m_torrents.end())
- | ||
relevance 2 | ../src/torrent.cpp:719 | post alert |
post alert../src/torrent.cpp:719 state_updated();
+ | ||
relevance 2 | ../src/storage.cpp:921 | is this risky? The upper layer will assume we have the whole file. Perhaps we should verify that at least the size of the file is correct |
is this risky? The upper layer will assume we have the
+whole file. Perhaps we should verify that at least the size
+of the file is correct../src/storage.cpp:921 if (links)
+ {
+ // if this is a mutable torrent, and we need to pick up some files
+ // from other torrents, do that now. Note that there is an inherent
+ // race condition here. We checked if the files existed on a different
+ // thread a while ago. These files may no longer exist or may have been
+ // moved. If so, we just fail. The user is responsible to not touch
+ // other torrents until a new mutable torrent has been completely
+ // added.
+ int idx = 0;
+ for (std::vector<std::string>::const_iterator i = links->begin();
+ i != links->end(); ++i, ++idx)
+ {
+ if (i->empty()) continue;
+
+ error_code err;
+ std::string file_path = fs.file_path(idx, m_save_path);
+ hard_link(*i, file_path, err);
+
+ // if the file already exists, that's not an error
+ if (!err || err == boost::system::errc::file_exists)
+ continue;
+
+ ec.ec = err;
+ ec.file = idx;
+ ec.operation = storage_error::hard_link;
+ return false;
+ }
+ }
+#endif // TORRENT_DISABLE_MUTABLE_TORRENTS
+
+ return true;
+ }
+
+ int default_storage::move_storage(std::string const& sp, int flags, storage_error& ec)
+ {
+ int ret = piece_manager::no_error;
+ std::string save_path = complete(sp);
+
+ // check to see if any of the files exist
+ error_code e;
+ file_storage const& f = files();
+
+ file_status s;
+ if (flags == fail_if_exist)
+ {
+ stat_file(save_path, &s, e);
+ if (e != boost::system::errc::no_such_file_or_directory)
+ {
+ // the directory exists, check all the files
+ for (int i = 0; i < f.num_files(); ++i)
+ | ||
relevance 2 | ../src/torrent.cpp:726 | post alert |
post alert../src/torrent.cpp:726 state_updated();
set_state(torrent_status::downloading);
@@ -669,12 +1305,12 @@ don't have to loop over all torrents, just to find the ones that want to announc
// an error.
- #if defined TORRENT_LOGGING
+#ifndef TORRENT_DISABLE_LOGGING
debug_log("*** FAILED SEED MODE, rechecking");
#endif
}
-#if defined TORRENT_LOGGING
+#ifndef TORRENT_DISABLE_LOGGING
debug_log("*** LEAVING SEED MODE (%s)", seed ? "as seed" : "as non-seed");
#endif
m_seed_mode = false;
@@ -699,9 +1335,9 @@ don't have to loop over all torrents, just to find the ones that want to announc
TORRENT_ASSERT(piece >= 0);
TORRENT_ASSERT(m_verified.get_bit(piece) == false);
++m_num_verified;
- | ||
relevance 2 | ../src/torrent.cpp:4729 | abort lookups this torrent has made via the session host resolver interface |
abort lookups this torrent has made via the
-session host resolver interface../src/torrent.cpp:4729 // files belonging to the torrents
- disconnect_all(errors::torrent_aborted, peer_connection_interface::op_bittorrent);
+ | ||
relevance 2 | ../src/torrent.cpp:4807 | abort lookups this torrent has made via the session host resolver interface |
abort lookups this torrent has made via the
+session host resolver interface../src/torrent.cpp:4807 // files belonging to the torrents
+ disconnect_all(errors::torrent_aborted, op_bittorrent);
// post a message to the main thread to destruct
// the torrent object from there
@@ -715,7 +1351,7 @@ session host resolver interface../src/torrent.cpp:4729 | ||
relevance 2 | ../src/torrent.cpp:4873 | the tracker login feature should probably be deprecated |
the tracker login feature should probably be deprecated../src/torrent.cpp:4873 if (alerts().should_post<file_renamed_alert>())
- alerts().post_alert(file_renamed_alert(get_handle(), j->buffer, j->piece));
+ | ||
relevance 2 | ../src/torrent.cpp:4951 | the tracker login feature should probably be deprecated |
the tracker login feature should probably be deprecated../src/torrent.cpp:4951 if (alerts().should_post<file_renamed_alert>())
+ alerts().emplace_alert<file_renamed_alert>(get_handle(), j->buffer, j->piece);
m_torrent_file->rename_file(j->piece, j->buffer);
}
else
{
if (alerts().should_post<file_rename_failed_alert>())
- alerts().post_alert(file_rename_failed_alert(get_handle()
- , j->piece, j->error.ec));
+ alerts().emplace_alert<file_rename_failed_alert>(get_handle()
+ , j->piece, j->error.ec);
}
}
- void torrent::on_torrent_paused(disk_io_job const* j)
+ void torrent::on_torrent_paused(disk_io_job const*)
{
TORRENT_ASSERT(is_single_thread());
if (alerts().should_post<torrent_paused_alert>())
- alerts().post_alert(torrent_paused_alert(get_handle()));
+ alerts().emplace_alert<torrent_paused_alert>(get_handle());
}
std::string torrent::tracker_login() const
@@ -802,7 +1438,7 @@ session host resolver interface ../src/torrent.cpp:4729relevance 2 | ../src/torrent.cpp:7725 | if peer is a really good peer, maybe we shouldn't disconnect it |
|
if peer is a really good peer, maybe we shouldn't disconnect it../src/torrent.cpp:7725#if defined TORRENT_LOGGING
+ | ||
relevance 2 | ../src/torrent.cpp:7808 | if peer is a really good peer, maybe we shouldn't disconnect it |
if peer is a really good peer, maybe we shouldn't disconnect it../src/torrent.cpp:7808#ifndef TORRENT_DISABLE_LOGGING
debug_log("incoming peer (%d)", int(m_connections.size()));
#endif
@@ -824,12 +1460,12 @@ session host resolver interface../src/torrent.cpp:4729 if (peer && peer->peer_rank() < p->peer_rank())
{
- peer->disconnect(errors::too_many_connections, peer_connection_interface::op_bittorrent);
+ peer->disconnect(errors::too_many_connections, op_bittorrent);
p->peer_disconnected_other();
}
else
{
- p->disconnect(errors::too_many_connections, peer_connection_interface::op_bittorrent);
+ p->disconnect(errors::too_many_connections, op_bittorrent);
// we have to do this here because from the peer's point of
// it wasn't really attached to the torrent, but we do need
// to let peer_list know we're removing it
@@ -853,8 +1489,8 @@ session host resolver interface../src/torrent.cpp:4729relevance 2 | ../src/tracker_manager.cpp:196 | some of these arguments could probably be moved to the tracker request itself. like the ip_filter and settings |
|
some of these arguments could probably be moved to the
-tracker request itself. like the ip_filter and settings../src/tracker_manager.cpp:196 , interval == 0 ? min_interval : interval);
+ | ||
relevance 2 | ../src/tracker_manager.cpp:200 | some of these arguments could probably be moved to the tracker request itself. like the ip_filter and settings |
some of these arguments could probably be moved to the
+tracker request itself. like the ip_filter and settings../src/tracker_manager.cpp:200 , interval == 0 ? min_interval : interval);
close();
}
@@ -879,7 +1515,7 @@ tracker request itself. like the ip_filter and settings../src/tracker_m
, resolver_interface& resolver
, struct ip_filter& ipf
, aux::session_settings const& sett
-#if defined TORRENT_LOGGING || TORRENT_USE_ASSERTS
+#if !defined TORRENT_DISABLE_LOGGING || TORRENT_USE_ASSERTS
, aux::session_logger& ses
#endif
)
@@ -888,7 +1524,7 @@ tracker request itself. like the ip_filter and settings../src/tracker_m
, m_host_resolver(resolver)
, m_settings(sett)
, m_stats_counters(stats_counters)
-#if defined TORRENT_LOGGING || TORRENT_USE_ASSERTS
+#if !defined TORRENT_DISABLE_LOGGING || TORRENT_USE_ASSERTS
, m_ses(ses)
#endif
, m_abort(false)
@@ -905,7 +1541,7 @@ tracker request itself. like the ip_filter and settings../src/tracker_m
TORRENT_ASSERT(m_ses.is_single_thread());
m_stats_counters.inc_stats_counter(counters::sent_tracker_bytes, bytes);
}
- | ||
relevance 2 | ../src/udp_tracker_connection.cpp:86 | support authentication here. tracker_req().auth |
support authentication here. tracker_req().auth../src/udp_tracker_connection.cpp:86 udp_tracker_connection::m_connection_cache;
+ | ||
relevance 2 | ../src/udp_tracker_connection.cpp:83 | support authentication here. tracker_req().auth |
support authentication here. tracker_req().auth../src/udp_tracker_connection.cpp:83 udp_tracker_connection::m_connection_cache;
mutex udp_tracker_connection::m_cache_mutex;
@@ -956,7 +1592,60 @@ tracker request itself. like the ip_filter and settings../src/tracker_m
#if defined TORRENT_ASIO_DEBUGGING
add_outstanding_async("udp_tracker_connection::name_lookup");
#endif
- | ||
relevance 2 | ../src/utp_stream.cpp:348 | it would be nice if not everything would have to be public here |
it would be nice if not everything would have to be public here../src/utp_stream.cpp:348 void incoming(boost::uint8_t const* buf, int size, packet* p, ptime now);
+ | ||
relevance 2 | ../src/ut_metadata.cpp:120 | if we were to initialize m_metadata_size lazily instead, we would probably be more efficient initialize m_metadata_size |
if we were to initialize m_metadata_size lazily instead,
+we would probably be more efficient
+initialize m_metadata_size../src/ut_metadata.cpp:120 metadata();
+ }
+
+ bool need_loaded()
+ { return m_torrent.need_loaded(); }
+
+ virtual void on_unload()
+ {
+ m_metadata.reset();
+ }
+
+ virtual void on_load()
+ {
+ // initialize m_metadata_size
+ TORRENT_ASSERT(m_torrent.is_loaded());
+ metadata();
+ }
+
+ virtual void on_files_checked()
+ {
+ metadata();
+ }
+
+ virtual boost::shared_ptr<peer_plugin> new_connection(
+ peer_connection* pc);
+
+ int get_metadata_size() const
+ {
+ TORRENT_ASSERT(m_metadata_size > 0);
+ return m_metadata_size;
+ }
+
+ buffer::const_interval metadata() const
+ {
+ if (!m_torrent.need_loaded()) return buffer::const_interval(NULL, NULL);
+ TORRENT_ASSERT(m_torrent.valid_metadata());
+ if (!m_metadata)
+ {
+ m_metadata = m_torrent.torrent_file().metadata();
+ m_metadata_size = m_torrent.torrent_file().metadata_size();
+ TORRENT_ASSERT(hasher(m_metadata.get(), m_metadata_size).final()
+ == m_torrent.torrent_file().info_hash());
+ }
+ return buffer::const_interval(m_metadata.get(), m_metadata.get()
+ + m_metadata_size);
+ }
+
+ bool received_metadata(ut_metadata_peer_plugin& source
+ , char const* buf, int size, int piece, int total_size);
+
+ // returns a piece of the metadata that
+ | ||
relevance 2 | ../src/utp_stream.cpp:351 | it would be nice if not everything would have to be public here |
it would be nice if not everything would have to be public here../src/utp_stream.cpp:351 void incoming(boost::uint8_t const* buf, int size, packet* p, time_point now);
void do_ledbat(int acked_bytes, int delay, int in_flight);
int packet_timeout() const;
bool test_socket_state();
@@ -964,7 +1653,7 @@ tracker request itself. like the ip_filter and settings../src/tracker_m
void maybe_trigger_send_callback();
bool cancel_handlers(error_code const& ec, bool kill);
bool consume_incoming_data(
- utp_header const* ph, boost::uint8_t const* ptr, int payload_size, ptime now);
+ utp_header const* ph, boost::uint8_t const* ptr, int payload_size, time_point now);
void update_mtu_limits();
void experienced_loss(int seq_nr);
@@ -1007,10 +1696,9 @@ private:
{
iovec_t(void* b, size_t l): buf(b), len(l) {}
void* buf;
- | ||
relevance 2 | ../src/web_peer_connection.cpp:628 | just make this peer not have the pieces associated with the file we just requested. Only when it doesn't have any of the file do the following |
just make this peer not have the pieces
+ | ||
relevance 2 | ../src/web_peer_connection.cpp:632 | just make this peer not have the pieces associated with the file we just requested. Only when it doesn't have any of the file do the following |
just make this peer not have the pieces
associated with the file we just requested. Only
-when it doesn't have any of the file do the following../src/web_peer_connection.cpp:628 {
- ++m_num_responses;
+when it doesn't have any of the file do the following../src/web_peer_connection.cpp:632 ++m_num_responses;
if (m_parser.connection_close())
{
@@ -1019,12 +1707,13 @@ when it doesn't have any of the file do the following../src/web_peer_co
m_web->supports_keepalive = false;
}
-#ifdef TORRENT_LOGGING
- peer_log("*** STATUS: %d %s", m_parser.status_code(), m_parser.message().c_str());
+#ifndef TORRENT_DISABLE_LOGGING
+ peer_log(peer_log_alert::info, "STATUS"
+ , "%d %s", m_parser.status_code(), m_parser.message().c_str());
std::multimap<std::string, std::string> const& headers = m_parser.headers();
for (std::multimap<std::string, std::string>::const_iterator i = headers.begin()
, end(headers.end()); i != end; ++i)
- peer_log(" %s: %s", i->first.c_str(), i->second.c_str());
+ peer_log(peer_log_alert::info, "STATUS", " %s: %s", i->first.c_str(), i->second.c_str());
#endif
// if the status code is not one of the accepted ones, abort
if (!is_ok_status(m_parser.status_code()))
@@ -1037,8 +1726,8 @@ when it doesn't have any of the file do the following../src/web_peer_co
+ (" " + m_parser.message());
if (t->alerts().should_post<url_seed_alert>())
{
- t->alerts().post_alert(url_seed_alert(t->get_handle(), m_url
- , error_msg));
+ t->alerts().emplace_alert<url_seed_alert>(t->get_handle(), m_url
+ , error_msg);
}
received_bytes(0, bytes_transferred);
disconnect(error_code(m_parser.status_code(), get_http_category()), op_bittorrent, 1);
@@ -1060,9 +1749,9 @@ when it doesn't have any of the file do the following../src/web_peer_co
{
// we should not try this server again.
t->remove_web_seed(this, errors::missing_location, op_bittorrent, 2);
- | ||
relevance 2 | ../src/web_peer_connection.cpp:687 | create a mapping of file-index to redirection URLs. Use that to form URLs instead. Support to reconnect to a new server without destructing this peer_connection |
create a mapping of file-index to redirection URLs. Use that to form
+ | ||
relevance 2 | ../src/web_peer_connection.cpp:691 | create a mapping of file-index to redirection URLs. Use that to form URLs instead. Support to reconnect to a new server without destructing this peer_connection |
create a mapping of file-index to redirection URLs. Use that to form
URLs instead. Support to reconnect to a new server without destructing this
-peer_connection../src/web_peer_connection.cpp:687 == dl_target);
+peer_connection../src/web_peer_connection.cpp:691 == dl_target);
#endif
return;
}
@@ -1108,12 +1797,14 @@ peer_connection../src/web_peer_connection.cpp:687 | ||
relevance 2 | ../src/kademlia/node.cpp:67 | make this configurable in dht_settings |
make this configurable in dht_settings../src/kademlia/node.cpp:67#include "libtorrent/kademlia/routing_table.hpp"
+ | ||
relevance 2 | ../src/kademlia/node.cpp:69 | make this configurable in dht_settings |
make this configurable in dht_settings../src/kademlia/node.cpp:69#include "libtorrent/kademlia/node_id.hpp"
+#include "libtorrent/kademlia/rpc_manager.hpp"
+#include "libtorrent/kademlia/routing_table.hpp"
#include "libtorrent/kademlia/node.hpp"
#include "libtorrent/kademlia/dht_observer.hpp"
@@ -1129,8 +1820,6 @@ peer_connection../src/web_peer_connection.cpp:687enum { announce_interval = 30 };
@@ -1139,6 +1828,8 @@ using detail::write_endpoint;
TORRENT_DEFINE_LOG(node)
#endif
+namespace {
+
// remove peers that have timed out
void purge_peers(std::set<peer_entry>& peers)
{
@@ -1146,7 +1837,7 @@ void purge_peers(std::set<peer_entry>& peers)
, end(peers.end()); i != end;)
{
// the peer has timed out
- if (i->added + minutes(int(announce_interval * 1.5f)) < time_now())
+ if (i->added + minutes(int(announce_interval * 1.5f)) < aux::time_now())
{
#ifdef TORRENT_DHT_VERBOSE_LOGGING
TORRENT_LOG(node) << "peer timed out at: " << i->addr;
@@ -1160,12 +1851,10 @@ void purge_peers(std::set<peer_entry>& peers)
void nop() {}
-node_impl::node_impl(alert_dispatcher* alert_disp
- , udp_socket_interface* sock
- , dht_settings const& settings, node_id nid, address const& external_address
- , dht_observer* observer
- | ||
relevance 2 | ../src/kademlia/node.cpp:491 | it would be nice to have a bias towards node-id prefixes that are missing in the bucket |
it would be nice to have a bias towards node-id prefixes that
-are missing in the bucket../src/kademlia/node.cpp:491 // this shouldn't happen
+node_id calculate_node_id(node_id const& nid, dht_observer* observer)
+{
+ | ||
relevance 2 | ../src/kademlia/node.cpp:497 | it would be nice to have a bias towards node-id prefixes that are missing in the bucket |
it would be nice to have a bias towards node-id prefixes that
+are missing in the bucket../src/kademlia/node.cpp:497 // this shouldn't happen
TORRENT_ASSERT(m_id != ne->id);
if (ne->id == m_id) return;
@@ -1174,7 +1863,7 @@ are missing in the bucket../src/kademlia/node.cpp:491relevance 2 | ../src/kademlia/node.cpp:581 | use the non deprecated function instead of this one |
|
use the non deprecated function instead of this one../src/kademlia/node.cpp:581 return d;
+ | ||
relevance 2 | ../src/kademlia/node.cpp:587 | use the non deprecated function instead of this one |
use the non deprecated function instead of this one../src/kademlia/node.cpp:587 return d;
}
-void node_impl::status(std::vector<dht_routing_bucket>& table
+void node::status(std::vector<dht_routing_bucket>& table
, std::vector<dht_lookup>& requests)
{
mutex_t::scoped_lock l(m_mutex);
@@ -1236,7 +1925,7 @@ void node_impl::status(std::vector<dht_routing_bucket>& table
}
#ifndef TORRENT_NO_DEPRECATE
-void node_impl::status(session_status& s)
+ void node::status(session_status& s)
{
mutex_t::scoped_lock l(m_mutex);
@@ -1254,30 +1943,30 @@ void node_impl::status(std::vector<dht_routing_bucket>& table
}
#endif
-void node_impl::lookup_peers(sha1_hash const& info_hash, entry& reply
+void node::lookup_peers(sha1_hash const& info_hash, entry& reply
, bool noseed, bool scrape) const
{
- if (m_post_alert)
- {
- alert* a = new dht_get_peers_alert(info_hash);
- if (!m_post_alert->post_alert(a)) delete a;
- }
+ if (m_observer)
+ m_observer->get_peers(info_hash);
table_t::const_iterator i = m_map.lower_bound(info_hash);
if (i == m_map.end()) return;
if (i->first != info_hash) return;
- | ||
relevance 2 | ../src/kademlia/node.cpp:909 | find_node should write directly to the response entry |
find_node should write directly to the response entry../src/kademlia/node.cpp:909 TORRENT_LOG(node) << " values: " << reply["values"].list().size();
+ torrent_entry const& v = i->second;
+
+ if (!v.name.empty()) reply["n"] = v.name;
+ | ||
relevance 2 | ../src/kademlia/node.cpp:919 | find_node should write directly to the response entry |
find_node should write directly to the response entry../src/kademlia/node.cpp:919 TORRENT_LOG(node) << " values: " << reply["values"].list().size();
}
#endif
}
- else if (strcmp(query, "find_node") == 0)
+ else if (query_len == 9 && memcmp(query, "find_node", 9) == 0)
{
key_desc_t msg_desc[] = {
- {"target", lazy_entry::string_t, 20, 0},
+ {"target", bdecode_node::string_t, 20, 0},
};
- lazy_entry const* msg_keys[1];
+ bdecode_node msg_keys[1];
if (!verify_message(arg_ent, msg_desc, msg_keys, 1, error_string, sizeof(error_string)))
{
incoming_error(e, error_string);
@@ -1285,24 +1974,24 @@ void node_impl::lookup_peers(sha1_hash const& info_hash, entry& reply
}
m_counters.inc_stats_counter(counters::dht_find_node_in);
- sha1_hash target(msg_keys[0]->string_ptr());
+ sha1_hash target(msg_keys[0].string_ptr());
nodes_t n;
m_table.find_node(target, n, 0);
write_nodes_entry(reply, n);
}
- else if (strcmp(query, "announce_peer") == 0)
+ else if (query_len == 13 && memcmp(query, "announce_peer", 13) == 0)
{
key_desc_t msg_desc[] = {
- {"info_hash", lazy_entry::string_t, 20, 0},
- {"port", lazy_entry::int_t, 0, 0},
- {"token", lazy_entry::string_t, 0, 0},
- {"n", lazy_entry::string_t, 0, key_desc_t::optional},
- {"seed", lazy_entry::int_t, 0, key_desc_t::optional},
- {"implied_port", lazy_entry::int_t, 0, key_desc_t::optional},
+ {"info_hash", bdecode_node::string_t, 20, 0},
+ {"port", bdecode_node::int_t, 0, 0},
+ {"token", bdecode_node::string_t, 0, 0},
+ {"n", bdecode_node::string_t, 0, key_desc_t::optional},
+ {"seed", bdecode_node::int_t, 0, key_desc_t::optional},
+ {"implied_port", bdecode_node::int_t, 0, key_desc_t::optional},
};
- lazy_entry const* msg_keys[6];
+ bdecode_node msg_keys[6];
if (!verify_message(arg_ent, msg_desc, msg_keys, 6, error_string, sizeof(error_string)))
{
m_counters.inc_stats_counter(counters::dht_invalid_announce);
@@ -1310,15 +1999,15 @@ void node_impl::lookup_peers(sha1_hash const& info_hash, entry& reply
return;
}
- int port = int(msg_keys[1]->int_value());
+ int port = int(msg_keys[1].int_value());
// is the announcer asking to ignore the explicit
// listen port and instead use the source port of the packet?
- if (msg_keys[5] && msg_keys[5]->int_value() != 0)
+ if (msg_keys[5] && msg_keys[5].int_value() != 0)
port = m.addr.port();
if (port < 0 || port >= 65536)
- | ||
relevance 2 | ../src/kademlia/routing_table.cpp:110 | use the non deprecated function instead of this one |
use the non deprecated function instead of this one../src/kademlia/routing_table.cpp:110
+ | ||
relevance 2 | ../src/kademlia/routing_table.cpp:114 | use the non deprecated function instead of this one |
use the non deprecated function instead of this one../src/kademlia/routing_table.cpp:114
static const int size_exceptions[] = {16, 8, 4, 2};
if (bucket < int(sizeof(size_exceptions)/sizeof(size_exceptions[0])))
return m_bucket_size * size_exceptions[bucket];
@@ -1369,7 +2058,7 @@ boost::tuple<int, int, int> routing_table::size() const
nodes += i->live_nodes.size();
for (bucket_t::const_iterator k = i->live_nodes.begin()
, end(i->live_nodes.end()); k != end; ++k)
- | ||
relevance 2 | ../src/kademlia/routing_table.cpp:900 | move the lowest priority nodes to the replacement bucket |
move the lowest priority nodes to the replacement bucket../src/kademlia/routing_table.cpp:900 bucket_t& b = m_buckets[bucket_index].live_nodes;
+ | ||
relevance 2 | ../src/kademlia/routing_table.cpp:946 | move the lowest priority nodes to the replacement bucket |
move the lowest priority nodes to the replacement bucket../src/kademlia/routing_table.cpp:946 bucket_t& b = m_buckets[bucket_index].live_nodes;
bucket_t& rb = m_buckets[bucket_index].replacements;
// move any node whose (160 - distane_exp(m_id, id)) >= (i - m_buckets.begin())
@@ -1420,8 +2109,102 @@ boost::tuple<int, int, int> routing_table::size() const
else
new_replacement_bucket.push_back(*j);
}
- | ||
relevance 2 | ../include/libtorrent/enum_net.hpp:137 | this could be done more efficiently by just looking up the interface with the given name, maybe even with if_nametoindex() |
this could be done more efficiently by just looking up
-the interface with the given name, maybe even with if_nametoindex()../include/libtorrent/enum_net.hpp:137
+ | ||
relevance 2 | ../include/libtorrent/alert_types.hpp:1428 | should the alert baseclass have this object instead? |
should the alert baseclass have this object instead?../include/libtorrent/alert_types.hpp:1428 {
+ // internal
+ portmap_log_alert(aux::stack_allocator& alloc, int t, const char* m);
+
+ TORRENT_DEFINE_ALERT(portmap_log_alert, 52)
+
+ static const int static_category = alert::port_mapping_notification;
+ virtual std::string message() const;
+
+ int map_type;
+
+#ifndef TORRENT_NO_DEPRECATE
+ std::string msg;
+#endif
+
+ // the message associated with this log line
+ char const* log_message() const;
+
+ private:
+
+ aux::stack_allocator const& m_alloc;
+
+ int m_log_idx;
+ };
+
+ // This alert is generated when a fastresume file has been passed to
+ // add_torrent() but the files on disk did not match the fastresume file.
+ // The error_code explains the reason why the resume file was rejected.
+ struct TORRENT_EXPORT fastresume_rejected_alert: torrent_alert
+ {
+ // internal
+ fastresume_rejected_alert(aux::stack_allocator& alloc
+ , torrent_handle const& h
+ , error_code const& ec
+ , std::string const& file
+ , char const* op);
+
+ TORRENT_DEFINE_ALERT(fastresume_rejected_alert, 53)
+
+ static const int static_category = alert::status_notification
+ | alert::error_notification;
+ virtual std::string message() const;
+
+ error_code error;
+
+#ifndef TORRENT_NO_DEPRECATE
+ // If the error happend to a specific file, ``file`` is the path to it.
+ std::string file;
+#endif
+
+ // If the error happend to a specific file, this returns the path to it.
+ | ||
relevance 2 | ../include/libtorrent/build_config.hpp:40 | instead of using a dummy function to cause link errors when incompatible build configurations are used, make the namespace name depend on the configuration, and have a using declaration in the headers to pull it into libtorrent. |
instead of using a dummy function to cause link errors when
+incompatible build configurations are used, make the namespace name
+depend on the configuration, and have a using declaration in the headers
+to pull it into libtorrent.../include/libtorrent/build_config.hpp:40AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+*/
+
+#ifndef TORRENT_BUILD_CONFIG_HPP_INCLUDED
+#define TORRENT_BUILD_CONFIG_HPP_INCLUDED
+
+#include "libtorrent/config.hpp"
+#include <boost/preprocessor/cat.hpp>
+#include <boost/preprocessor/stringize.hpp>
+
+#if TORRENT_USE_IPV6
+ #define TORRENT_CFG_IPV6 ipv6_
+#else
+#define TORRENT_CFG_IPV6 noipv6_
+#endif
+
+#ifdef TORRENT_NO_DEPRECATE
+#define TORRENT_CFG_DEPR nodeprecate_
+#else
+#define TORRENT_CFG_DEPR deprecated_
+#endif
+
+#define TORRENT_CFG \
+ BOOST_PP_CAT(TORRENT_CFG_IPV6, \
+ TORRENT_CFG_DEPR)
+
+#define TORRENT_CFG_STRING BOOST_PP_STRINGIZE(TORRENT_CFG)
+
+#endif
+
+ | ||
relevance 2 | ../include/libtorrent/enum_net.hpp:143 | this could be done more efficiently by just looking up the interface with the given name, maybe even with if_nametoindex() |
this could be done more efficiently by just looking up
+the interface with the given name, maybe even with if_nametoindex()../include/libtorrent/enum_net.hpp:143
address ip = address::from_string(device_name, ec);
if (!ec)
{
@@ -1472,10 +2255,61 @@ the interface with the given name, maybe even with if_nametoindex()../i
// returns true if the given device exists
TORRENT_EXTRA_EXPORT bool has_interface(char const* name, io_service& ios
- | ||
relevance 2 | ../include/libtorrent/piece_picker.hpp:599 | having 8 priority levels is probably excessive. It should probably be changed to 3 levels + dont-download |
having 8 priority levels is probably excessive. It should
-probably be changed to 3 levels + dont-download../include/libtorrent/piece_picker.hpp:599 // the number of peers that has this piece
+ | ||
relevance 2 | ../include/libtorrent/heterogeneous_queue.hpp:56 | add emplace_back() version |
add emplace_back() version../include/libtorrent/heterogeneous_queue.hpp:56#include <vector>
+
+#include <boost/cstdint.hpp>
+#include <boost/utility/enable_if.hpp>
+#include <boost/type_traits/is_base_of.hpp>
+
+#include "libtorrent/assert.hpp"
+
+namespace libtorrent {
+
+ template <class T>
+ struct heterogeneous_queue
+ {
+ heterogeneous_queue()
+ : m_storage(NULL)
+ , m_capacity(0)
+ , m_size(0)
+ , m_num_items(0)
+ {}
+
+ template <class U>
+ typename boost::enable_if<boost::is_base_of<T, U> >::type
+ push_back(U const& a)
+ {
+ // the size of the type rounded up to pointer alignment
+ const int object_size = (sizeof(U) + sizeof(*m_storage) - 1)
+ / sizeof(*m_storage);
+
+ // +1 for the length prefix
+ if (m_size + object_size + header_size > m_capacity)
+ grow_capacity(object_size);
+
+ uintptr_t* ptr = m_storage + m_size;
+
+ // length prefix
+ header_t* hdr = reinterpret_cast<header_t*>(ptr);
+ hdr->len = object_size;
+ hdr->move = &move<U>;
+ ptr += header_size;
+
+ // construct in-place
+ new (ptr) U(a);
+
+ // if we constructed the object without throwing any exception
+ // update counters to indicate the new item is in there
+ ++m_num_items;
+ m_size += header_size + object_size;
+ }
+
+ void get_pointers(std::vector<T*>& out)
+ {
+ | ||
relevance 2 | ../include/libtorrent/piece_picker.hpp:600 | having 8 priority levels is probably excessive. It should probably be changed to 3 levels + dont-download |
having 8 priority levels is probably excessive. It should
+probably be changed to 3 levels + dont-download../include/libtorrent/piece_picker.hpp:600 // the number of peers that has this piece
// (availability)
-#if TORRENT_OPTIMIZE_MEMORY_USAGE
+#ifdef TORRENT_OPTIMIZE_MEMORY_USAGE
boost::uint32_t peer_count : 9;
#else
boost::uint32_t peer_count : 16;
@@ -1505,7 +2339,7 @@ probably be changed to 3 levels + dont-download../include/libtorrent/pi
boost::uint32_t piece_priority : 3;
// index in to the piece_info vector
-#if TORRENT_OPTIMIZE_MEMORY_USAGE
+#ifdef TORRENT_OPTIMIZE_MEMORY_USAGE
boost::uint32_t index : 17;
#else
boost::uint32_t index;
@@ -1521,10 +2355,10 @@ probably be changed to 3 levels + dont-download../include/libtorrent/pi
// index is set to this to indicate that we have the
// piece. There is no entry for the piece in the
// buckets if this is the case.
-#if TORRENT_OPTIMIZE_MEMORY_USAGE
+#ifdef TORRENT_OPTIMIZE_MEMORY_USAGE
we_have_index = 0x3ffff,
#else
- | ||
relevance 2 | ../include/libtorrent/proxy_base.hpp:257 | use the resolver interface that has a built-in cache |
use the resolver interface that has a built-in cache../include/libtorrent/proxy_base.hpp:257 return m_sock.lowest_layer();
+ | ||
relevance 2 | ../include/libtorrent/proxy_base.hpp:259 | use the resolver interface that has a built-in cache |
use the resolver interface that has a built-in cache../include/libtorrent/proxy_base.hpp:259 return m_sock.lowest_layer();
}
next_layer_type& next_layer()
@@ -1551,7 +2385,61 @@ protected:
#endif
- | ||
relevance 2 | ../include/libtorrent/session.hpp:271 | the ip filter should probably be saved here too |
the ip filter should probably be saved here too../include/libtorrent/session.hpp:271
+ | ||
relevance 2 | ../include/libtorrent/session.hpp:198 | the two second constructors here should probably be deprecated in favor of the more generic one that just takes a settings_pack and a string |
the two second constructors here should probably
+be deprecated in favor of the more generic one that just
+takes a settings_pack and a string../include/libtorrent/session.hpp:198 // nat-pmp) and default plugins (ut_metadata, ut_pex and smart_ban). The
+ // default is to start those features. If you do not want them to start,
+ // pass 0 as the flags parameter.
+ //
+ // The ``alert_mask`` is the same mask that you would send to
+ // set_alert_mask().
+
+ session(settings_pack const& pack
+ , int flags = start_default_features | add_default_plugins)
+ {
+ TORRENT_CFG();
+ start(flags, pack);
+ }
+ session(fingerprint const& print = fingerprint("LT"
+ , LIBTORRENT_VERSION_MAJOR, LIBTORRENT_VERSION_MINOR, 0, 0)
+ , int flags = start_default_features | add_default_plugins
+ , boost::uint32_t alert_mask = alert::error_notification)
+ {
+ TORRENT_CFG();
+ settings_pack pack;
+ pack.set_int(settings_pack::alert_mask, alert_mask);
+ pack.set_str(settings_pack::peer_fingerprint, print.to_string());
+ if ((flags & start_default_features) == 0)
+ {
+ pack.set_bool(settings_pack::enable_upnp, false);
+ pack.set_bool(settings_pack::enable_natpmp, false);
+ pack.set_bool(settings_pack::enable_lsd, false);
+ pack.set_bool(settings_pack::enable_dht, false);
+ }
+
+ start(flags, pack);
+ }
+ session(fingerprint const& print
+ , std::pair<int, int> listen_port_range
+ , char const* listen_interface = "0.0.0.0"
+ , int flags = start_default_features | add_default_plugins
+ , int alert_mask = alert::error_notification)
+ {
+ TORRENT_CFG();
+ TORRENT_ASSERT(listen_port_range.first > 0);
+ TORRENT_ASSERT(listen_port_range.first <= listen_port_range.second);
+
+ settings_pack pack;
+ pack.set_int(settings_pack::alert_mask, alert_mask);
+ pack.set_int(settings_pack::max_retry_port_bind, listen_port_range.second - listen_port_range.first);
+ pack.set_str(settings_pack::peer_fingerprint, print.to_string());
+ char if_string[100];
+ snprintf(if_string, sizeof(if_string), "%s:%d", listen_interface, listen_port_range.first);
+ pack.set_str(settings_pack::listen_interfaces, if_string);
+
+ if ((flags & start_default_features) == 0)
+ | ||
relevance 2 | ../include/libtorrent/session.hpp:249 | the ip filter should probably be saved here too |
the ip filter should probably be saved here too../include/libtorrent/session.hpp:249 pack.set_str(settings_pack::listen_interfaces, if_string);
+
if ((flags & start_default_features) == 0)
{
pack.set_bool(settings_pack::enable_upnp, false);
@@ -1559,7 +2447,6 @@ protected:
pack.set_bool(settings_pack::enable_lsd, false);
pack.set_bool(settings_pack::enable_dht, false);
}
- init();
start(flags, pack);
}
@@ -1590,19 +2477,19 @@ protected:
save_encryption_settings = 0x020,
// internal
- save_as_map = 0x040,
-
- // saves RSS feeds
- save_feeds = 0x080
+ save_as_map = 0x040
#ifndef TORRENT_NO_DEPRECATE
,
+ // saves RSS feeds
+ save_feeds = 0x080,
save_proxy = 0x008,
save_i2p_proxy = 0x010,
save_dht_proxy = save_proxy,
save_peer_proxy = save_proxy,
save_web_proxy = save_proxy,
- | ||
relevance 2 | ../include/libtorrent/session_settings.hpp:55 | this type is only used internally now. move it to an internal header and make this type properly deprecated. |
this type is only used internally now. move it to an internal
+ save_tracker_proxy = save_proxy
+ | ||
relevance 2 | ../include/libtorrent/session_settings.hpp:55 | this type is only used internally now. move it to an internal header and make this type properly deprecated. |
this type is only used internally now. move it to an internal
header and make this type properly deprecated.../include/libtorrent/session_settings.hpp:55
#include "libtorrent/version.hpp"
#include "libtorrent/config.hpp"
@@ -1654,7 +2541,53 @@ namespace libtorrent
// proxy_settings::type field.
enum proxy_type
{
- | ||
relevance 2 | ../include/libtorrent/socks5_stream.hpp:131 | add async_connect() that takes a hostname and port as well |
add async_connect() that takes a hostname and port as well../include/libtorrent/socks5_stream.hpp:131 if (m_dst_name.size() > 255)
+ | ||
relevance 2 | ../include/libtorrent/socket_type.hpp:321 | it would be nice to use aligned_storage here when building on c++11 |
it would be nice to use aligned_storage here when
+building on c++11../include/libtorrent/socket_type.hpp:321 sizeof(stream_socket)
+ , sizeof(socks5_stream)
+ , sizeof(http_stream)
+ , sizeof(utp_stream)
+#if TORRENT_USE_I2P
+ , sizeof(i2p_stream)
+#else
+ , 0
+#endif
+#ifdef TORRENT_USE_OPENSSL
+ , sizeof(ssl_stream<stream_socket>)
+ , sizeof(ssl_stream<socks5_stream>)
+ , sizeof(ssl_stream<http_stream>)
+ , sizeof(ssl_stream<utp_stream>)
+#else
+ , 0, 0, 0, 0
+#endif
+ >::value
+ };
+
+ boost::int64_t m_data[(storage_size + sizeof(boost::int64_t) - 1)
+ / sizeof(boost::int64_t)];
+ };
+
+ // returns true if this socket is an SSL socket
+ bool is_ssl(socket_type const& s);
+
+ // returns true if this is a uTP socket
+ bool is_utp(socket_type const& s);
+
+#if TORRENT_USE_I2P
+ // returns true if this is an i2p socket
+ bool is_i2p(socket_type const& s);
+#endif
+
+ // assuming the socket_type s is an ssl socket, make sure it
+ // verifies the hostname in its SSL handshake
+ void setup_ssl_hostname(socket_type& s, std::string const& hostname, error_code& ec);
+
+ // properly shuts down SSL sockets. holder keeps s alive
+ void async_shutdown(socket_type& s, boost::shared_ptr<void> holder);
+}
+
+#endif
+
+ | ||
relevance 2 | ../include/libtorrent/socks5_stream.hpp:135 | add async_connect() that takes a hostname and port as well |
add async_connect() that takes a hostname and port as well../include/libtorrent/socks5_stream.hpp:135 if (m_dst_name.size() > 255)
m_dst_name.resize(255);
}
@@ -1705,17 +2638,17 @@ namespace libtorrent
m_resolver.async_resolve(q, boost::bind(
&socks5_stream::name_lookup, this, _1, _2, h));
}
- | ||
relevance 2 | ../include/libtorrent/tracker_manager.hpp:278 | this class probably doesn't need to have virtual functions. |
this class probably doesn't need to have virtual functions.../include/libtorrent/tracker_manager.hpp:278 int m_completion_timeout;
+ | ||
relevance 2 | ../include/libtorrent/tracker_manager.hpp:280 | this class probably doesn't need to have virtual functions. |
this class probably doesn't need to have virtual functions.../include/libtorrent/tracker_manager.hpp:280 int m_completion_timeout;
typedef mutex mutex_t;
mutable mutex_t m_mutex;
// used for timeouts
// this is set when the request has been sent
- ptime m_start_time;
+ time_point m_start_time;
// this is set every time something is received
- ptime m_read_time;
+ time_point m_read_time;
// the asio async operation
deadline_timer m_timeout;
@@ -1756,24 +2689,24 @@ namespace libtorrent
boost::shared_ptr<tracker_connection> shared_from_this()
{
- | ||
relevance 2 | ../include/libtorrent/aux_/session_interface.hpp:130 | the IP voting mechanism should be factored out to its own class, not part of the session |
the IP voting mechanism should be factored out
-to its own class, not part of the session../include/libtorrent/aux_/session_interface.hpp:130#endif
-
-#if TORRENT_USE_ASSERTS
+ | ||
relevance 2 | ../include/libtorrent/aux_/session_interface.hpp:146 | the IP voting mechanism should be factored out to its own class, not part of the session |
the IP voting mechanism should be factored out
+to its own class, not part of the session../include/libtorrent/aux_/session_interface.hpp:146#if TORRENT_USE_ASSERTS
virtual bool is_single_thread() const = 0;
virtual bool has_peer(peer_connection const* p) const = 0;
virtual bool any_torrent_has_peer(peer_connection const* p) const = 0;
virtual bool is_posting_torrent_updates() const = 0;
#endif
+ protected:
+ ~session_logger() {}
};
-#endif // TORRENT_LOGGING || TORRENT_USE_ASSERTS
+#endif // TORRENT_DISABLE_LOGGING || TORRENT_USE_ASSERTS
// TOOD: 2 make this interface a lot smaller. It could be split up into
// several smaller interfaces. Each subsystem could then limit the size
// of the mock object to test it.
struct session_interface
: buffer_allocator_interface
-#if defined TORRENT_LOGGING || TORRENT_USE_ASSERTS
+#if !defined TORRENT_DISABLE_LOGGING || TORRENT_USE_ASSERTS
, session_logger
#endif
{
@@ -1800,7 +2733,7 @@ to its own class, not part of the session../include/libtorrent/aux_/ses
typedef boost::function<void(error_code const&, std::vector<address> const&)>
callback_t;
- | ||
relevance 2 | ../include/libtorrent/aux_/session_interface.hpp:155 | remove this. There's already get_resolver() |
remove this. There's already get_resolver()../include/libtorrent/aux_/session_interface.hpp:155 source_peer = 2,
+ | ||
relevance 2 | ../include/libtorrent/aux_/session_interface.hpp:171 | remove this. There's already get_resolver() |
remove this. There's already get_resolver()../include/libtorrent/aux_/session_interface.hpp:171 source_peer = 2,
source_tracker = 4,
source_router = 8
};
@@ -1851,9 +2784,9 @@ to its own class, not part of the session../include/libtorrent/aux_/ses
virtual boost::shared_ptr<torrent> delay_load_torrent(sha1_hash const& info_hash
, peer_connection* pc) = 0;
virtual void insert_torrent(sha1_hash const& ih, boost::shared_ptr<torrent> const& t
- | ||
relevance 2 | ../include/libtorrent/aux_/session_interface.hpp:210 | factor out the thread pool for socket jobs into a separate class used to (potentially) issue socket write calls onto multiple threads |
factor out the thread pool for socket jobs into a separate
+ | ||
relevance 2 | ../include/libtorrent/aux_/session_interface.hpp:226 | factor out the thread pool for socket jobs into a separate class used to (potentially) issue socket write calls onto multiple threads |
factor out the thread pool for socket jobs into a separate
class
-used to (potentially) issue socket write calls onto multiple threads../include/libtorrent/aux_/session_interface.hpp:210 virtual int num_torrents() const = 0;
+used to (potentially) issue socket write calls onto multiple threads../include/libtorrent/aux_/session_interface.hpp:226 virtual int num_torrents() const = 0;
virtual peer_id const& get_peer_id() const = 0;
@@ -1889,7 +2822,12 @@ used to (potentially) issue socket write calls onto multiple threads../
virtual bool verify_bound_address(address const& addr, bool utp
, error_code& ec) = 0;
- | ||
relevance 1 | ../src/disk_io_thread.cpp:233 | it would be nice to have the number of threads be set dynamically |
it would be nice to have the number of threads be set dynamically../src/disk_io_thread.cpp:233 std::pair<block_cache::iterator, block_cache::iterator> pieces
+#ifndef TORRENT_DISABLE_MUTABLE_TORRENTS
+ virtual std::vector<boost::shared_ptr<torrent> > find_collection(
+ std::string const& collection) const = 0;
+#endif
+
+ | ||
relevance 1 | ../src/disk_io_thread.cpp:206 | it would be nice to have the number of threads be set dynamically |
it would be nice to have the number of threads be set dynamically../src/disk_io_thread.cpp:206 std::pair<block_cache::iterator, block_cache::iterator> pieces
= m_disk_cache.all_pieces();
TORRENT_ASSERT(pieces.first == pieces.second);
#endif
@@ -1940,8 +2878,8 @@ used to (potentially) issue socket write calls onto multiple threads../
m_threads.resize(m_num_threads);
}
}
- | ||
relevance 1 | ../src/http_seed_connection.cpp:124 | in chunked encoding mode, this assert won't hold. the chunk headers should be subtracted from the receive_buffer_size |
in chunked encoding mode, this assert won't hold.
-the chunk headers should be subtracted from the receive_buffer_size../src/http_seed_connection.cpp:124 boost::optional<piece_block_progress>
+ | ||
relevance 1 | ../src/http_seed_connection.cpp:123 | in chunked encoding mode, this assert won't hold. the chunk headers should be subtracted from the receive_buffer_size |
in chunked encoding mode, this assert won't hold.
+the chunk headers should be subtracted from the receive_buffer_size../src/http_seed_connection.cpp:123 boost::optional<piece_block_progress>
http_seed_connection::downloading_piece_progress() const
{
if (m_requests.empty())
@@ -1992,8 +2930,8 @@ the chunk headers should be subtracted from the receive_buffer_size../s
std::string request;
request.reserve(400);
- | ||
relevance 1 | ../src/session_impl.cpp:5210 | report the proper address of the router as the source IP of this understanding of our external address, instead of the empty address |
report the proper address of the router as the source IP of
-this understanding of our external address, instead of the empty address../src/session_impl.cpp:5210 void session_impl::on_port_mapping(int mapping, address const& ip, int port
+ | ||
relevance 1 | ../src/session_impl.cpp:5227 | report the proper address of the router as the source IP of this understanding of our external address, instead of the empty address |
report the proper address of the router as the source IP of
+this understanding of our external address, instead of the empty address../src/session_impl.cpp:5227 void session_impl::on_port_mapping(int mapping, address const& ip, int port
, error_code const& ec, int map_transport)
{
TORRENT_ASSERT(is_single_thread());
@@ -2004,8 +2942,8 @@ this understanding of our external address, instead of the empty address
| ||
relevance 1 | ../src/session_impl.cpp:6412 | we only need to do this if our global IPv4 address has changed since the DHT (currently) only supports IPv4. Since restarting the DHT is kind of expensive, it would be nice to not do it unnecessarily |
we only need to do this if our global IPv4 address has changed
+ | ||
relevance 1 | ../src/session_impl.cpp:6497 | we only need to do this if our global IPv4 address has changed since the DHT (currently) only supports IPv4. Since restarting the DHT is kind of expensive, it would be nice to not do it unnecessarily |
we only need to do this if our global IPv4 address has changed
since the DHT (currently) only supports IPv4. Since restarting the DHT
-is kind of expensive, it would be nice to not do it unnecessarily../src/session_impl.cpp:6412#endif
+is kind of expensive, it would be nice to not do it unnecessarily../src/session_impl.cpp:6497#endif
if (!m_external_ip.cast_vote(ip, source_type, source)) return;
-#if defined TORRENT_LOGGING
+#ifndef TORRENT_DISABLE_LOGGING
session_log(" external IP updated");
#endif
if (m_alerts.should_post<external_ip_alert>())
- m_alerts.post_alert(external_ip_alert(ip));
+ m_alerts.emplace_alert<external_ip_alert>(ip);
for (torrent_map::iterator i = m_torrents.begin()
, end(m_torrents.end()); i != end; ++i)
@@ -2097,12 +3035,12 @@ is kind of expensive, it would be nice to not do it unnecessarily../src
, boost::function<void(char*)> const& handler)
{
return m_disk_thread.async_allocate_disk_buffer(category, handler);
- | ||
relevance 1 | ../src/torrent.cpp:1157 | make this depend on the error and on the filesystem the files are being downloaded to. If the error is no_space_left_on_device and the filesystem doesn't support sparse files, only zero the priorities of the pieces that are at the tails of all files, leaving everything up to the highest written piece in each file |
make this depend on the error and on the filesystem the
+ | ||
relevance 1 | ../src/torrent.cpp:1168 | make this depend on the error and on the filesystem the files are being downloaded to. If the error is no_space_left_on_device and the filesystem doesn't support sparse files, only zero the priorities of the pieces that are at the tails of all files, leaving everything up to the highest written piece in each file |
make this depend on the error and on the filesystem the
files are being downloaded to. If the error is no_space_left_on_device
and the filesystem doesn't support sparse files, only zero the priorities
of the pieces that are at the tails of all files, leaving everything
-up to the highest written piece in each file../src/torrent.cpp:1157 alerts().post_alert(file_error_alert(j->error.ec
- , resolve_filename(j->error.file), j->error.operation_str(), get_handle()));
+up to the highest written piece in each file../src/torrent.cpp:1168 alerts().emplace_alert<file_error_alert>(j->error.ec
+ , resolve_filename(j->error.file), j->error.operation_str(), get_handle());
// put the torrent in an error-state
set_error(j->error.ec, j->error.file);
@@ -2131,6 +3069,9 @@ up to the highest written piece in each file../src/torrent.cpp:1157../src/torrent.cpp:1157 | ||
relevance 1 | ../src/torrent.cpp:6877 | save the send_stats state instead of throwing them away it may pose an issue when downgrading though |
save the send_stats state instead of throwing them away
-it may pose an issue when downgrading though../src/torrent.cpp:6877 for (int k = 0; k < bits; ++k)
+ | ||
relevance 1 | ../src/torrent.cpp:6956 | save the send_stats state instead of throwing them away it may pose an issue when downgrading though |
save the send_stats state instead of throwing them away
+it may pose an issue when downgrading though../src/torrent.cpp:6956 for (int k = 0; k < bits; ++k)
v |= (info[j*8+k].state == piece_picker::block_info::state_finished)
? (1 << k) : 0;
bitmask.append(1, v);
@@ -2204,9 +3142,9 @@ it may pose an issue when downgrading though../src/torrent.cpp:6877 | ||
relevance 1 | ../src/torrent.cpp:7970 | should disconnect all peers that have the pieces we have not just seeds. It would be pretty expensive to check all pieces for all peers though |
should disconnect all peers that have the pieces we have
+ | ||
relevance 1 | ../src/torrent.cpp:8057 | should disconnect all peers that have the pieces we have not just seeds. It would be pretty expensive to check all pieces for all peers though |
should disconnect all peers that have the pieces we have
not just seeds. It would be pretty expensive to check all pieces
-for all peers though../src/torrent.cpp:7970 set_state(torrent_status::finished);
+for all peers though../src/torrent.cpp:8057 set_state(torrent_status::finished);
set_queue_position(-1);
m_became_finished = m_ses.session_time();
@@ -2234,15 +3172,15 @@ for all peers though../src/torrent.cpp:7970../src/torrent.cpp:7970relevance 1 | ../include/libtorrent/ip_voter.hpp:122 | instead, have one instance per possible subnet, global IPv4, global IPv6, loopback, 192.168.x.x, 10.x.x.x, etc. |
|
instead, have one instance per possible subnet, global IPv4, global IPv6, loopback, 192.168.x.x, 10.x.x.x, etc.../include/libtorrent/ip_voter.hpp:122 // away all the votes and started from scratch, in case
+ | ||
relevance 1 | ../include/libtorrent/ip_voter.hpp:124 | instead, have one instance per possible subnet, global IPv4, global IPv6, loopback, 192.168.x.x, 10.x.x.x, etc. |
instead, have one instance per possible subnet, global IPv4, global IPv6, loopback, 192.168.x.x, 10.x.x.x, etc.../include/libtorrent/ip_voter.hpp:124 // away all the votes and started from scratch, in case
// our IP has changed
- ptime m_last_rotate;
+ time_point m_last_rotate;
};
// this keeps track of multiple external IPs (for now, just IPv6 and IPv4, but
@@ -2284,15 +3222,15 @@ for all peers though../src/torrent.cpp:7970relevance 1 | ../include/libtorrent/web_peer_connection.hpp:122 | if we make this be a disk_buffer_holder instead we would save a copy sometimes use allocate_disk_receive_buffer and release_disk_receive_buffer |
|
if we make this be a disk_buffer_holder instead
+ | ||
relevance 1 | ../include/libtorrent/web_peer_connection.hpp:120 | if we make this be a disk_buffer_holder instead we would save a copy sometimes use allocate_disk_receive_buffer and release_disk_receive_buffer |
if we make this be a disk_buffer_holder instead
we would save a copy sometimes
-use allocate_disk_receive_buffer and release_disk_receive_buffer../include/libtorrent/web_peer_connection.hpp:122
+use allocate_disk_receive_buffer and release_disk_receive_buffer../include/libtorrent/web_peer_connection.hpp:120
// returns the block currently being
// downloaded. And the progress of that
// block. If the peer isn't downloading
// a piece for the moment, the boost::optional
// will be invalid.
- boost::optional<piece_block_progress> downloading_piece_progress() const;
+ boost::optional<piece_block_progress> downloading_piece_progress() const TORRENT_OVERRIDE;
void handle_padfile(buffer::const_interval& recv_buffer);
@@ -2337,7 +3275,7 @@ use allocate_disk_receive_buffer and release_disk_receive_buffer../incl
};
}
- | ||
relevance 0 | ../test/test_block_cache.cpp:475 | test try_evict_blocks |
test try_evict_blocks../test/test_block_cache.cpp:475 | ||
relevance 0 | ../test/test_block_cache.cpp:476 | test evicting volatile pieces, to see them be removed |
test evicting volatile pieces, to see them be removed../test/test_block_cache.cpp:476 | ||
relevance 0 | ../test/test_block_cache.cpp:477 | test evicting dirty pieces |
test evicting dirty pieces../test/test_block_cache.cpp:477 | ||
relevance 0 | ../test/test_block_cache.cpp:478 | test free_piece |
test free_piece../test/test_block_cache.cpp:478 | ||
relevance 0 | ../test/test_block_cache.cpp:479 | test abort_dirty |
test abort_dirty../test/test_block_cache.cpp:479 | ||
relevance 0 | ../test/test_block_cache.cpp:480 | test unaligned reads |
test unaligned reads../test/test_block_cache.cpp:480 // it's supposed to be a cache hit
+ | ||
relevance 0 | ../test/test_block_cache.cpp:469 | test try_evict_blocks |
test try_evict_blocks../test/test_block_cache.cpp:469 | ||
relevance 0 | ../test/test_block_cache.cpp:470 | test evicting volatile pieces, to see them be removed |
test evicting volatile pieces, to see them be removed../test/test_block_cache.cpp:470 | ||
relevance 0 | ../test/test_block_cache.cpp:471 | test evicting dirty pieces |
test evicting dirty pieces../test/test_block_cache.cpp:471 | ||
relevance 0 | ../test/test_block_cache.cpp:472 | test free_piece |
test free_piece../test/test_block_cache.cpp:472 | ||
relevance 0 | ../test/test_block_cache.cpp:473 | test abort_dirty |
test abort_dirty../test/test_block_cache.cpp:473 | ||
relevance 0 | ../test/test_block_cache.cpp:474 | test unaligned reads |
test unaligned reads../test/test_block_cache.cpp:474 // it's supposed to be a cache hit
TEST_CHECK(ret >= 0);
// return the reference to the buffer we just read
RETURN_BUFFER;
@@ -2360,41 +3298,40 @@ int test_main()
return 0;
}
- | ||
relevance 0 | ../test/test_dht.cpp:436 | test obfuscated_get_peers |
test obfuscated_get_peers../test/test_dht.cpp:436 g_got_peers.insert(g_got_peers.end(), peers.begin(), peers.end());
-}
-
-std::vector<dht::item> g_got_items;
-dht::item g_put_item;
-int g_put_count;
-
-bool get_item_cb(dht::item& i)
-{
- if (!i.empty())
- g_got_items.push_back(i);
- if (!g_put_item.empty())
- {
- i = g_put_item;
- g_put_count++;
- return true;
- }
+ | ||
relevance 0 | ../test/test_dht.cpp:438 | test obfuscated_get_peers |
test obfuscated_get_peers../test/test_dht.cpp:438 }
return false;
}
+struct obs : dht::dht_observer
+{
+ virtual void set_external_address(address const& addr
+ , address const& source) TORRENT_OVERRIDE
+ {}
+
+ virtual address external_address() TORRENT_OVERRIDE
+ {
+ return address_v4::from_string("236.0.0.1");
+ }
+ virtual void get_peers(sha1_hash const& ih) TORRENT_OVERRIDE {}
+ virtual void outgoing_get_peers(sha1_hash const& target
+ , sha1_hash const& sent_target, udp::endpoint const& ep) TORRENT_OVERRIDE {}
+ virtual void announce(sha1_hash const& ih, address const& addr, int port) TORRENT_OVERRIDE {}
+};
+
int test_main()
{
dht_settings sett;
sett.max_torrents = 4;
sett.max_dht_items = 4;
sett.enforce_node_id = false;
- address ext = address::from_string("236.0.0.1");
mock_socket s;
- print_alert ad;
+ obs observer;
counters cnt;
- dht::node_impl node(&ad, &s, sett, node_id(0), ext, 0, cnt);
+ dht::node node(&s, sett, node_id(0), &observer, cnt);
// DHT should be running on port 48199 now
- lazy_entry response;
- lazy_entry const* parsed[11];
+ bdecode_node response;
+ bdecode_node parsed[11];
char error_string[200];
bool ret;
@@ -2403,16 +3340,41 @@ bool get_item_cb(dht::item& i)
send_dht_request(node, "ping", source, &response, "10");
dht::key_desc_t pong_desc[] = {
- {"y", lazy_entry::string_t, 1, 0},
- {"t", lazy_entry::string_t, 2, 0},
- {"r", lazy_entry::dict_t, 0, key_desc_t::parse_children},
- {"id", lazy_entry::string_t, 20, key_desc_t::last_child},
+ {"y", bdecode_node::string_t, 1, 0},
+ {"t", bdecode_node::string_t, 2, 0},
+ {"r", bdecode_node::dict_t, 0, key_desc_t::parse_children},
+ {"id", bdecode_node::string_t, 20, key_desc_t::last_child},
};
fprintf(stderr, "msg: %s\n", print_entry(response).c_str());
- ret = dht::verify_message(&response, pong_desc, parsed, 4, error_string, sizeof(error_string));
- | ||
relevance 0 | ../test/test_metadata_extension.cpp:91 | it would be nice to test reversing which session is making the connection as well |
it would be nice to test reversing
-which session is making the connection as well../test/test_metadata_extension.cpp:91 , int timeout)
+ ret = dht::verify_message(response, pong_desc, parsed, 4, error_string
+ , sizeof(error_string));
+ | ||
relevance 0 | ../test/test_file_storage.cpp:210 | test file_storage::optimize too |
test file_storage::optimize too../test/test_file_storage.cpp:210 | ||
relevance 0 | ../test/test_file_storage.cpp:211 | test map_block |
test map_block../test/test_file_storage.cpp:211 | ||
relevance 0 | ../test/test_file_storage.cpp:212 | test piece_size(int piece) |
test piece_size(int piece)../test/test_file_storage.cpp:212 | ||
relevance 0 | ../test/test_file_storage.cpp:213 | test file_index_at_offset |
test file_index_at_offset../test/test_file_storage.cpp:213 | ||
relevance 0 | ../test/test_file_storage.cpp:214 | test file attributes |
test file attributes../test/test_file_storage.cpp:214 | ||
relevance 0 | ../test/test_file_storage.cpp:215 | test symlinks |
test symlinks../test/test_file_storage.cpp:215 | ||
relevance 0 | ../test/test_file_storage.cpp:216 | test pad_files |
test pad_files../test/test_file_storage.cpp:216 | ||
relevance 0 | ../test/test_file_storage.cpp:217 | test reorder_file (make sure internal_file_entry::swap() is used) |
test reorder_file (make sure internal_file_entry::swap() is used)../test/test_file_storage.cpp:217 TEST_EQUAL(rq.piece, 7);
+ TEST_EQUAL(rq.start, 298);
+ TEST_EQUAL(rq.length, 841);
+ }
+
+ {
+ // test file_path_hash and path_hash. Make sure we can detect a path
+ // whose name collides with
+ file_storage fs;
+ fs.set_piece_length(512);
+ fs.add_file(combine_path("temp_storage", "Foo"), 17);
+ fs.add_file(combine_path("temp_storage", "foo"), 612);
+
+ fprintf(stderr, "path: %s\n", fs.file_path(0).c_str());
+ fprintf(stderr, "file: %s\n", fs.file_path(1).c_str());
+ boost::uint32_t file_hash0 = fs.file_path_hash(0, "a");
+ boost::uint32_t file_hash1 = fs.file_path_hash(1, "a");
+ TEST_EQUAL(file_hash0, file_hash1);
+ }
+
+
+ return 0;
+}
+
+ | ||
relevance 0 | ../test/test_metadata_extension.cpp:93 | it would be nice to test reversing which session is making the connection as well |
it would be nice to test reversing
+which session is making the connection as well../test/test_metadata_extension.cpp:93 , int timeout)
{
using namespace libtorrent;
namespace lt = libtorrent;
@@ -2463,32 +3425,31 @@ which session is making the connection as well../test/test_metadata_ext
ses1.apply_settings(pack);
ses2.apply_settings(pack);
- | ||
relevance 0 | ../test/test_peer_list.cpp:581 | test erasing peers |
test erasing peers../test/test_peer_list.cpp:581 | ||
relevance 0 | ../test/test_peer_list.cpp:582 | test logic for which connection to keep when receiving an incoming connection to the same peer as we just made an outgoing connection to |
test logic for which connection to keep when receiving an incoming
-connection to the same peer as we just made an outgoing connection to../test/test_peer_list.cpp:582 | ||
relevance 0 | ../test/test_peer_list.cpp:584 | test update_peer_port with allow_multiple_connections_per_ip and without |
test update_peer_port with allow_multiple_connections_per_ip and without../test/test_peer_list.cpp:584 | ||
relevance 0 | ../test/test_peer_list.cpp:585 | test add i2p peers |
test add i2p peers../test/test_peer_list.cpp:585 | ||
relevance 0 | ../test/test_peer_list.cpp:586 | test allow_i2p_mixed |
test allow_i2p_mixed../test/test_peer_list.cpp:586 | ||
relevance 0 | ../test/test_peer_list.cpp:587 | test insert_peer failing with all error conditions |
test insert_peer failing with all error conditions../test/test_peer_list.cpp:587 | ||
relevance 0 | ../test/test_peer_list.cpp:588 | test IPv6 |
test IPv6../test/test_peer_list.cpp:588 | ||
relevance 0 | ../test/test_peer_list.cpp:589 | test connect_to_peer() failing |
test connect_to_peer() failing../test/test_peer_list.cpp:589 | ||
relevance 0 | ../test/test_peer_list.cpp:590 | test connection_closed |
test connection_closed../test/test_peer_list.cpp:590 TEST_EQUAL(p.num_peers(), 2);
- TEST_EQUAL(p.num_connect_candidates(), 2);
+ | ||
relevance 0 | ../test/test_peer_list.cpp:921 | test erasing peers |
test erasing peers../test/test_peer_list.cpp:921 | ||
relevance 0 | ../test/test_peer_list.cpp:922 | test update_peer_port with allow_multiple_connections_per_ip and without |
test update_peer_port with allow_multiple_connections_per_ip and without../test/test_peer_list.cpp:922 | ||
relevance 0 | ../test/test_peer_list.cpp:923 | test add i2p peers |
test add i2p peers../test/test_peer_list.cpp:923 | ||
relevance 0 | ../test/test_peer_list.cpp:924 | test allow_i2p_mixed |
test allow_i2p_mixed../test/test_peer_list.cpp:924 | ||
relevance 0 | ../test/test_peer_list.cpp:925 | test insert_peer failing with all error conditions |
test insert_peer failing with all error conditions../test/test_peer_list.cpp:925 | ||
relevance 0 | ../test/test_peer_list.cpp:926 | test IPv6 |
test IPv6../test/test_peer_list.cpp:926 | ||
relevance 0 | ../test/test_peer_list.cpp:927 | test connect_to_peer() failing |
test connect_to_peer() failing../test/test_peer_list.cpp:927 | ||
relevance 0 | ../test/test_peer_list.cpp:928 | test connection_closed |
test connection_closed../test/test_peer_list.cpp:928 | ||
relevance 0 | ../test/test_peer_list.cpp:929 | connect candidates recalculation when incrementing failcount |
connect candidates recalculation when incrementing failcount../test/test_peer_list.cpp:929 torrent_peer* peer4 = add_peer(p, st, ep("10.0.0.4", 8080));
+ TEST_CHECK(peer4);
+ TEST_EQUAL(p.num_peers(), 4);
+ torrent_peer* peer5 = add_peer(p, st, ep("10.0.0.5", 8080));
+ TEST_CHECK(peer5);
+ TEST_EQUAL(p.num_peers(), 5);
+ torrent_peer* peer6 = p.add_peer(ep("10.0.0.6", 8080), 0, 0, &st);
+ TEST_CHECK(peer6 == NULL);
+ TEST_EQUAL(p.num_peers(), 5);
- TEST_EQUAL(p.has_peer(peer1), true);
- TEST_EQUAL(p.has_peer(peer2), true);
-
- ip_filter filter;
- filter.add_rule(address_v4::from_string("10.10.0.1")
- , address_v4::from_string("10.10.0.1"), ip_filter::blocked);
- p.apply_ip_filter(filter, &st, banned);
- TEST_EQUAL(st.erased.size(), 1);
- st.erased.clear();
-
- TEST_EQUAL(p.num_peers(), 1);
- TEST_EQUAL(p.num_connect_candidates(), 1);
-
- TEST_EQUAL(p.has_peer(peer1), false);
- TEST_EQUAL(p.has_peer(peer2), true);
+ // one of the connection should have been removed
+ TEST_EQUAL(has_peer(p, ep("10.0.0.1", 8080))
+ + has_peer(p, ep("10.0.0.2", 8080))
+ + has_peer(p, ep("10.0.0.3", 8080))
+ + has_peer(p, ep("10.0.0.4", 8080))
+ + has_peer(p, ep("10.0.0.5", 8080))
+ + has_peer(p, ep("10.0.0.6", 8080))
+ , 5);
}
return 0;
}
- | ||
relevance 0 | ../test/test_primitives.cpp:213 | test the case where we have > 120 samples (and have the base delay actually be updated) |
test the case where we have > 120 samples (and have the base delay actually be updated)../test/test_primitives.cpp:213 | ||
relevance 0 | ../test/test_primitives.cpp:214 | test the case where a sample is lower than the history entry but not lower than the base |
test the case where a sample is lower than the history entry but not lower than the base../test/test_primitives.cpp:214 TEST_CHECK(!filter.find(k3));
+ | ||
relevance 0 | ../test/test_primitives.cpp:212 | test the case where we have > 120 samples (and have the base delay actually be updated) |
test the case where we have > 120 samples (and have the base delay actually be updated)../test/test_primitives.cpp:212 | ||
relevance 0 | ../test/test_primitives.cpp:213 | test the case where a sample is lower than the history entry but not lower than the base |
test the case where a sample is lower than the history entry but not lower than the base../test/test_primitives.cpp:213 TEST_CHECK(!filter.find(k3));
TEST_CHECK(filter.find(k4));
// test timestamp_history
@@ -2539,7 +3500,81 @@ connection to the same peer as we just made an outgoing connection to..
sanitize_append_path_element(path, "a...b", 5);
TEST_EQUAL(path, "a...b");
- | ||
relevance 0 | ../test/test_resume.cpp:340 | test all other resume flags here too. This would require returning more than just the torrent_status from test_resume_flags. Also http seeds and trackers for instance |
test all other resume flags here too. This would require returning
+ | ||
relevance 0 | ../test/test_resolve_links.cpp:80 | test files with different piece size (negative test) |
test files with different piece size (negative test)../test/test_resolve_links.cpp:80 { "test2", "test1_pad_files", 0},
+ { "test3", "test1_pad_files", 0},
+ { "test2", "test1_single", 0},
+
+ // these are all padded. The first small file will accidentally also
+ // match, even though it's not tail padded, the following file is identical
+ { "test2_pad_files", "test1_pad_files", 2},
+ { "test3_pad_files", "test1_pad_files", 2},
+ { "test3_pad_files", "test2_pad_files", 2},
+ { "test1_pad_files", "test2_pad_files", 2},
+ { "test1_pad_files", "test3_pad_files", 2},
+ { "test2_pad_files", "test3_pad_files", 2},
+
+ // one might expect this to work, but since the tail of the single file
+ // torrent is not padded, the last piece hash won't match
+ { "test1_pad_files", "test1_single", 0},
+
+ // if it's padded on the other hand, it will work
+ { "test1_pad_files", "test1_single_padded", 1},
+
+};
+
+ | ||
relevance 0 | ../test/test_resolve_links.cpp:83 | it would be nice to test resolving of more than just 2 files as well. like 3 single file torrents merged into one, resolving all 3 files. |
it would be nice to test resolving of more than just 2 files as well.
+like 3 single file torrents merged into one, resolving all 3 files.../test/test_resolve_links.cpp:83 { "test2", "test1_single", 0},
+
+ // these are all padded. The first small file will accidentally also
+ // match, even though it's not tail padded, the following file is identical
+ { "test2_pad_files", "test1_pad_files", 2},
+ { "test3_pad_files", "test1_pad_files", 2},
+ { "test3_pad_files", "test2_pad_files", 2},
+ { "test1_pad_files", "test2_pad_files", 2},
+ { "test1_pad_files", "test3_pad_files", 2},
+ { "test2_pad_files", "test3_pad_files", 2},
+
+ // one might expect this to work, but since the tail of the single file
+ // torrent is not padded, the last piece hash won't match
+ { "test1_pad_files", "test1_single", 0},
+
+ // if it's padded on the other hand, it will work
+ { "test1_pad_files", "test1_single_padded", 1},
+
+};
+
+
+ int test_main()
+{
+
+#ifndef TORRENT_DISABLE_MUTABLE_TORRENTS
+ std::string path = combine_path(parent_path(current_working_directory())
+ , "mutable_test_torrents");
+
+ for (int i = 0; i < sizeof(test_torrents)/sizeof(test_torrents[0]); ++i)
+ {
+ test_torrent_t const& e = test_torrents[i];
+
+ std::string p = combine_path(path, e.filename1) + ".torrent";
+ fprintf(stderr, "loading %s\n", p.c_str());
+ boost::shared_ptr<torrent_info> ti1 = boost::make_shared<torrent_info>(p);
+
+ p = combine_path(path, e.filename2) + ".torrent";
+ fprintf(stderr, "loading %s\n", p.c_str());
+ boost::shared_ptr<torrent_info> ti2 = boost::make_shared<torrent_info>(p);
+
+ fprintf(stderr, "resolving\n");
+ resolve_links l(ti1);
+ l.match(ti2, ".");
+
+ std::vector<resolve_links::link_t> const& links = l.get_links();
+
+ int num_matches = std::count_if(links.begin(), links.end()
+ , boost::bind(&resolve_links::link_t::ti, _1));
+
+ // some debug output in case the test fails
+ if (num_matches > e.expected_matches)
+ | ||
relevance 0 | ../test/test_resume.cpp:340 | test all other resume flags here too. This would require returning more than just the torrent_status from test_resume_flags. Also http seeds and trackers for instance |
test all other resume flags here too. This would require returning
more than just the torrent_status from test_resume_flags. Also http seeds
and trackers for instance../test/test_resume.cpp:340 // resume data overrides the paused flag
fprintf(stderr, "flags: paused\n");
@@ -2565,42 +3600,7 @@ and trackers for instance../test/test_resume.cpp:340relevance 0 | ../test/test_rss.cpp:135 | verify some key state is saved in 'state' |
|
verify some key state is saved in 'state'../test/test_rss.cpp:135 feed_status st;
- f->get_feed_status(&st);
- TEST_CHECK(!st.error);
-
- print_feed(st);
-
- TEST_CHECK(st.items.size() == expect.num_items);
- if (st.items.size() > 0)
- {
- TEST_CHECK(st.items[0].url == expect.first_url);
- TEST_CHECK(st.items[0].size == expect.first_size);
- TEST_CHECK(st.items[0].title == expect.first_title);
- }
-
- entry state;
- f->save_state(state);
-
- fprintf(stderr, "feed_state:\n");
- std::cerr << state.to_string() << "\n";
-
-}
-
-int test_main()
-{
- std::string root_dir = parent_path(current_working_directory());
-
- test_feed(combine_path(root_dir, "eztv.xml"), rss_expect(30, "http://torrent.zoink.it/The.Daily.Show.2012.02.16.(HDTV-LMAO)[VTV].torrent", "The Daily Show 2012-02-16 [HDTV - LMAO]", 183442338));
- test_feed(combine_path(root_dir, "cb.xml"), rss_expect(50, "http://www.clearbits.net/get/1911-norbergfestival-2011.torrent", "Norbergfestival 2011", 1160773632));
- test_feed(combine_path(root_dir, "kat.xml"), rss_expect(25, "http://kat.ph/torrents/benito-di-paula-1975-benito-di-paula-lp-rip-ogg-at-500-jarax4u-t6194897/", "Benito Di Paula - 1975 - Benito Di Paula (LP Rip OGG at 500) [jarax4u]", 168773863));
- test_feed(combine_path(root_dir, "mn.xml"), rss_expect(20, "http://www.mininova.org/get/13203100", "Dexcell - January TwentyTwelve Mix", 137311179));
- test_feed(combine_path(root_dir, "pb.xml"), rss_expect(60, "magnet:?xt=urn:btih:FD4CDDB7BBE722D17A018EFD875EB0695ED7159C&dn=Thompson+Twins+-+1989+-+Big+Trash+%5BMP3%5D", "Thompson Twins - 1989 - Big Trash [MP3]", 100160904));
- test_feed(combine_path(root_dir, "fg.xml"), rss_expect(15, "http://torrage.com/torrent/470BCD2007011E9F31556B36E199D03D948A1A52.torrent", "Top.Gear.S20E04.1080i.ts", -1));
- return 0;
-}
-
- | ||
relevance 0 | ../test/test_ssl.cpp:377 | test using a signed certificate with the wrong info-hash in DN |
test using a signed certificate with the wrong info-hash in DN../test/test_ssl.cpp:377 // in verifying peers
+ | ||
relevance 0 | ../test/test_ssl.cpp:377 | test using a signed certificate with the wrong info-hash in DN |
test using a signed certificate with the wrong info-hash in DN../test/test_ssl.cpp:377 // in verifying peers
ctx.set_verify_mode(context::verify_none, ec);
if (ec)
{
@@ -2651,7 +3651,7 @@ int test_main()
return false;
}
fprintf(stderr, "use_tmp_dh_file \"%s\"\n", dh_params.c_str());
- | ||
relevance 0 | ../test/test_ssl.cpp:475 | also test using a hash that refers to a valid torrent but that differs from the SNI hash |
also test using a hash that refers to a valid torrent
+ | ||
relevance 0 | ../test/test_ssl.cpp:475 | also test using a hash that refers to a valid torrent but that differs from the SNI hash |
also test using a hash that refers to a valid torrent
but that differs from the SNI hash../test/test_ssl.cpp:475 print_alerts(ses1, "ses1", true, true, true, &on_alert);
if (ec)
{
@@ -2703,7 +3703,7 @@ but that differs from the SNI hash../test/test_ssl.cpp:475 | ||
relevance 0 | ../test/test_torrent.cpp:133 | wait for an alert rather than just waiting 10 seconds. This is kind of silly |
wait for an alert rather than just waiting 10 seconds. This is kind of silly../test/test_torrent.cpp:133 TEST_EQUAL(h.file_priorities().size(), info->num_files());
+ | ||
relevance 0 | ../test/test_torrent.cpp:133 | wait for an alert rather than just waiting 10 seconds. This is kind of silly |
wait for an alert rather than just waiting 10 seconds. This is kind of silly../test/test_torrent.cpp:133 TEST_EQUAL(h.file_priorities().size(), info->num_files());
TEST_EQUAL(h.file_priorities()[0], 0);
if (info->num_files() > 1)
TEST_EQUAL(h.file_priorities()[1], 0);
@@ -2729,32 +3729,32 @@ but that differs from the SNI hash../test/test_ssl.cpp:475 | ||
relevance 0 | ../test/test_torrent_parse.cpp:116 | test remap_files |
test remap_files../test/test_torrent_parse.cpp:116 | ||
relevance 0 | ../test/test_torrent_parse.cpp:117 | merkle torrents. specifically torrent_info::add_merkle_nodes and torrent with "root hash" |
merkle torrents. specifically torrent_info::add_merkle_nodes and torrent with "root hash"../test/test_torrent_parse.cpp:117 | ||
relevance 0 | ../test/test_torrent_parse.cpp:118 | torrent with 'p' (padfile) attribute |
torrent with 'p' (padfile) attribute../test/test_torrent_parse.cpp:118 | ||
relevance 0 | ../test/test_torrent_parse.cpp:119 | torrent with 'h' (hidden) attribute |
torrent with 'h' (hidden) attribute../test/test_torrent_parse.cpp:119 | ||
relevance 0 | ../test/test_torrent_parse.cpp:120 | torrent with 'x' (executable) attribute |
torrent with 'x' (executable) attribute../test/test_torrent_parse.cpp:120 | ||
relevance 0 | ../test/test_torrent_parse.cpp:121 | torrent with 'l' (symlink) attribute |
torrent with 'l' (symlink) attribute../test/test_torrent_parse.cpp:121 | ||
relevance 0 | ../test/test_torrent_parse.cpp:122 | creating a merkle torrent (torrent_info::build_merkle_list) |
creating a merkle torrent (torrent_info::build_merkle_list)../test/test_torrent_parse.cpp:122 | ||
relevance 0 | ../test/test_torrent_parse.cpp:123 | torrent with multiple trackers in multiple tiers, making sure we shuffle them (how do you test shuffling?, load it multiple times and make sure it's in different order at least once) |
torrent with multiple trackers in multiple tiers, making sure we shuffle them (how do you test shuffling?, load it multiple times and make sure it's in different order at least once)../test/test_torrent_parse.cpp:123 { "invalid_info.torrent", errors::torrent_missing_info },
+{
+/* {
+ remove("test_torrent_dir2/tmp1");
+ remove("test_torrent_dir2/tmp2");
+ remove("test_torrent_dir2/tmp3");
+ file_storage fs;
+ boost::int64_t file_size = 256 * 1024;
+ fs.add_file("test_torrent_dir2/tmp1", file_size);
+ fs.add_file("test_torrent_dir2/tmp2", file_size);
+ | ||
relevance 0 | ../test/test_torrent_info.cpp:160 | test remap_files |
test remap_files../test/test_torrent_info.cpp:160 | ||
relevance 0 | ../test/test_torrent_info.cpp:161 | merkle torrents. specifically torrent_info::add_merkle_nodes and torrent with "root hash" |
merkle torrents. specifically torrent_info::add_merkle_nodes and torrent with "root hash"../test/test_torrent_info.cpp:161 | ||
relevance 0 | ../test/test_torrent_info.cpp:162 | torrent with 'p' (padfile) attribute |
torrent with 'p' (padfile) attribute../test/test_torrent_info.cpp:162 | ||
relevance 0 | ../test/test_torrent_info.cpp:163 | torrent with 'h' (hidden) attribute |
torrent with 'h' (hidden) attribute../test/test_torrent_info.cpp:163 | ||
relevance 0 | ../test/test_torrent_info.cpp:164 | torrent with 'x' (executable) attribute |
torrent with 'x' (executable) attribute../test/test_torrent_info.cpp:164 | ||
relevance 0 | ../test/test_torrent_info.cpp:165 | torrent with 'l' (symlink) attribute |
torrent with 'l' (symlink) attribute../test/test_torrent_info.cpp:165 | ||
relevance 0 | ../test/test_torrent_info.cpp:166 | creating a merkle torrent (torrent_info::build_merkle_list) |
creating a merkle torrent (torrent_info::build_merkle_list)../test/test_torrent_info.cpp:166 | ||
relevance 0 | ../test/test_torrent_info.cpp:167 | torrent with multiple trackers in multiple tiers, making sure we shuffle them (how do you test shuffling?, load it multiple times and make sure it's in different order at least once) |
torrent with multiple trackers in multiple tiers, making sure we shuffle them (how do you test shuffling?, load it multiple times and make sure it's in different order at least once)../test/test_torrent_info.cpp:167 | ||
relevance 0 | ../test/test_torrent_info.cpp:168 | sanitize_append_path_element with all kinds of UTF-8 sequences, including invalid ones |
sanitize_append_path_element with all kinds of UTF-8 sequences, including invalid ones../test/test_torrent_info.cpp:168 | ||
relevance 0 | ../test/test_torrent_info.cpp:169 | torrents with a missing name |
torrents with a missing name../test/test_torrent_info.cpp:169 | ||
relevance 0 | ../test/test_torrent_info.cpp:170 | torrents with a zero-length name |
torrents with a zero-length name../test/test_torrent_info.cpp:170 | ||
relevance 0 | ../test/test_torrent_info.cpp:171 | torrents with a merkle tree and add_merkle_nodes |
torrents with a merkle tree and add_merkle_nodes../test/test_torrent_info.cpp:171 | ||
relevance 0 | ../test/test_torrent_info.cpp:172 | torrent with a non-dictionary info-section |
torrent with a non-dictionary info-section../test/test_torrent_info.cpp:172 | ||
relevance 0 | ../test/test_torrent_info.cpp:173 | torrents with DHT nodes |
torrents with DHT nodes../test/test_torrent_info.cpp:173 | ||
relevance 0 | ../test/test_torrent_info.cpp:174 | torrent with url-list as a single string |
torrent with url-list as a single string../test/test_torrent_info.cpp:174 | ||
relevance 0 | ../test/test_torrent_info.cpp:175 | torrent with http seed as a single string |
torrent with http seed as a single string../test/test_torrent_info.cpp:175 | ||
relevance 0 | ../test/test_torrent_info.cpp:176 | torrent with a comment |
torrent with a comment../test/test_torrent_info.cpp:176 | ||
relevance 0 | ../test/test_torrent_info.cpp:177 | torrent with an SSL cert |
torrent with an SSL cert../test/test_torrent_info.cpp:177 | ||
relevance 0 | ../test/test_torrent_info.cpp:178 | torrent with attributes (executable and hidden) |
torrent with attributes (executable and hidden)../test/test_torrent_info.cpp:178 | ||
relevance 0 | ../test/test_torrent_info.cpp:179 | torrent_info::add_tracker |
torrent_info::add_tracker../test/test_torrent_info.cpp:179 | ||
relevance 0 | ../test/test_torrent_info.cpp:180 | torrent_info::add_url_seed |
torrent_info::add_url_seed../test/test_torrent_info.cpp:180 | ||
relevance 0 | ../test/test_torrent_info.cpp:181 | torrent_info::add_http_seed |
torrent_info::add_http_seed../test/test_torrent_info.cpp:181 | ||
relevance 0 | ../test/test_torrent_info.cpp:182 | torrent_info::unload |
torrent_info::unload../test/test_torrent_info.cpp:182 | ||
relevance 0 | ../test/test_torrent_info.cpp:183 | torrent_info constructor that takes an invalid bencoded buffer |
torrent_info constructor that takes an invalid bencoded buffer../test/test_torrent_info.cpp:183 | ||
relevance 0 | ../test/test_torrent_info.cpp:184 | verify_encoding with a string that triggers character replacement |
verify_encoding with a string that triggers character replacement../test/test_torrent_info.cpp:184 { "invalid_info.torrent", errors::torrent_missing_info },
{ "string.torrent", errors::torrent_is_no_dict },
{ "negative_size.torrent", errors::torrent_invalid_length },
{ "negative_file_size.torrent", errors::torrent_invalid_length },
@@ -2775,37 +3775,37 @@ namespace libtorrent
}
- int test_main()
+int test_torrent_parse()
{
error_code ec;
- // test merkle_*() functions
+ // test sanitize_append_path_element
- // this is the structure:
- // 0
- // 1 2
- // 3 4 5 6
- // 7 8 9 10 11 12 13 14
- // num_leafs = 8
+ std::string path;
- TEST_EQUAL(merkle_num_leafs(1), 1);
- TEST_EQUAL(merkle_num_leafs(2), 2);
- TEST_EQUAL(merkle_num_leafs(3), 4);
- TEST_EQUAL(merkle_num_leafs(4), 4);
- TEST_EQUAL(merkle_num_leafs(5), 8);
- TEST_EQUAL(merkle_num_leafs(6), 8);
- TEST_EQUAL(merkle_num_leafs(7), 8);
- TEST_EQUAL(merkle_num_leafs(8), 8);
- TEST_EQUAL(merkle_num_leafs(9), 16);
- TEST_EQUAL(merkle_num_leafs(10), 16);
- TEST_EQUAL(merkle_num_leafs(11), 16);
- TEST_EQUAL(merkle_num_leafs(12), 16);
- TEST_EQUAL(merkle_num_leafs(13), 16);
- TEST_EQUAL(merkle_num_leafs(14), 16);
- TEST_EQUAL(merkle_num_leafs(15), 16);
- TEST_EQUAL(merkle_num_leafs(16), 16);
- TEST_EQUAL(merkle_num_leafs(17), 32);
- | ||
relevance 0 | ../test/test_tracker.cpp:198 | test parse peers6 |
test parse peers6../test/test_tracker.cpp:198 | ||
relevance 0 | ../test/test_tracker.cpp:199 | test parse tracker-id |
test parse tracker-id../test/test_tracker.cpp:199 | ||
relevance 0 | ../test/test_tracker.cpp:200 | test parse failure-reason |
test parse failure-reason../test/test_tracker.cpp:200 | ||
relevance 0 | ../test/test_tracker.cpp:201 | test all failure paths, including invalid bencoding not a dictionary no files entry in scrape response no info-hash entry in scrape response malformed peers in peer list of dictionaries uneven number of bytes in peers and peers6 string responses |
test all failure paths, including
+ path.clear();
+ sanitize_append_path_element(path,
+ "abcdefghi_abcdefghi_abcdefghi_abcdefghi_abcdefghi_abcdefghi_abcdefghi_abcdefghi_abcdefghi_abcdefghi_"
+ "abcdefghi_abcdefghi_abcdefghi_abcdefghi_abcdefghi_abcdefghi_abcdefghi_abcdefghi_abcdefghi_abcdefghi_"
+ "abcdefghi_abcdefghi_abcdefghi_abcdefghi_abcdefghi_", 250);
+ sanitize_append_path_element(path,
+ "abcdefghi_abcdefghi_abcdefghi_abcdefghi_abcdefghi_abcdefghi_abcdefghi_abcdefghi_abcdefghi_abcdefghi_"
+ "abcdefghi_abcdefghi_abcdefghi_abcdefghi_abcdefghi_abcdefghi_abcdefghi_abcdefghi_abcdefghi_abcdefghi_"
+ "abcdefghi_abcdefghi_abcdefghi_abcdefghi_abcde.test", 250);
+#ifdef TORRENT_WINDOWS
+ TEST_EQUAL(path,
+ "abcdefghi_abcdefghi_abcdefghi_abcdefghi_abcdefghi_abcdefghi_abcdefghi_abcdefghi_abcdefghi_abcdefghi_"
+ "abcdefghi_abcdefghi_abcdefghi_abcdefghi_abcdefghi_abcdefghi_abcdefghi_abcdefghi_abcdefghi_abcdefghi_"
+ "abcdefghi_abcdefghi_abcdefghi_abcdefghi_\\"
+ "abcdefghi_abcdefghi_abcdefghi_abcdefghi_abcdefghi_abcdefghi_abcdefghi_abcdefghi_abcdefghi_abcdefghi_"
+ "abcdefghi_abcdefghi_abcdefghi_abcdefghi_abcdefghi_abcdefghi_abcdefghi_abcdefghi_abcdefghi_abcdefghi_"
+ "abcdefghi_abcdefghi_abcdefghi_abcdefghi_.test");
+#else
+ TEST_EQUAL(path,
+ "abcdefghi_abcdefghi_abcdefghi_abcdefghi_abcdefghi_abcdefghi_abcdefghi_abcdefghi_abcdefghi_abcdefghi_"
+ "abcdefghi_abcdefghi_abcdefghi_abcdefghi_abcdefghi_abcdefghi_abcdefghi_abcdefghi_abcdefghi_abcdefghi_"
+ "abcdefghi_abcdefghi_abcdefghi_abcdefghi_/"
+ | ||
relevance 0 | ../test/test_tracker.cpp:198 | test parse peers6 |
test parse peers6../test/test_tracker.cpp:198 | ||
relevance 0 | ../test/test_tracker.cpp:199 | test parse tracker-id |
test parse tracker-id../test/test_tracker.cpp:199 | ||
relevance 0 | ../test/test_tracker.cpp:200 | test parse failure-reason |
test parse failure-reason../test/test_tracker.cpp:200 | ||
relevance 0 | ../test/test_tracker.cpp:201 | test all failure paths, including invalid bencoding not a dictionary no files entry in scrape response no info-hash entry in scrape response malformed peers in peer list of dictionaries uneven number of bytes in peers and peers6 string responses |
test all failure paths, including
invalid bencoding
not a dictionary
no files entry in scrape response
@@ -2862,7 +3862,7 @@ int test_main()
snprintf(tracker_url, sizeof(tracker_url), "http://127.0.0.1:%d/announce", http_port);
t->add_tracker(tracker_url, 0);
- | ||
relevance 0 | ../test/test_transfer.cpp:288 | factor out the disk-full test into its own unit test |
factor out the disk-full test into its own unit test../test/test_transfer.cpp:288 print_alerts(ses1, "ses1", true, true, true, &on_alert);
+ | ||
relevance 0 | ../test/test_transfer.cpp:291 | factor out the disk-full test into its own unit test |
factor out the disk-full test into its own unit test../test/test_transfer.cpp:291 print_alerts(ses1, "ses1", true, true, true, &on_alert);
print_alerts(ses2, "ses2", true, true, true, &on_alert);
if (i % 10 == 0)
@@ -2913,7 +3913,7 @@ int test_main()
fprintf(stderr, "%s: discovered disk full mode. Raise limit and disable upload-mode\n", time_now_string());
peer_disconnects = 0;
continue;
- | ||
relevance 0 | ../test/test_upnp.cpp:100 | store the log and verify that some key messages are there |
store the log and verify that some key messages are there../test/test_upnp.cpp:100 "USN:uuid:000f-66d6-7296000099dc::upnp:rootdevice\r\n"
+ | ||
relevance 0 | ../test/test_upnp.cpp:100 | store the log and verify that some key messages are there |
store the log and verify that some key messages are there../test/test_upnp.cpp:100 "USN:uuid:000f-66d6-7296000099dc::upnp:rootdevice\r\n"
"Location: http://127.0.0.1:%d/upnp.xml\r\n"
"Server: Custom/1.0 UPnP/1.0 Proc/Ver\r\n"
"EXT:\r\n"
@@ -2964,7 +3964,7 @@ int run_upnp_test(char const* root_filename, char const* router_model, char cons
error_code ec;
load_file(root_filename, buf, ec);
buf.push_back(0);
- | ||
relevance 0 | ../test/web_seed_suite.cpp:366 | file hashes don't work with the new torrent creator reading async |
file hashes don't work with the new torrent creator reading async../test/web_seed_suite.cpp:366 // corrupt the files now, so that the web seed will be banned
+ | ||
relevance 0 | ../test/web_seed_suite.cpp:366 | file hashes don't work with the new torrent creator reading async |
file hashes don't work with the new torrent creator reading async../test/web_seed_suite.cpp:366 // corrupt the files now, so that the web seed will be banned
if (test_url_seed)
{
create_random_files(combine_path(save_path, "torrent_dir"), file_sizes, sizeof(file_sizes)/sizeof(file_sizes[0]));
@@ -3015,10 +4015,10 @@ int run_upnp_test(char const* root_filename, char const* router_model, char cons
test_transfer(ses, torrent_file, proxy, port, protocol, test_url_seed
, chunked_encoding, test_ban, keepalive);
- | ||
relevance 0 | ../src/block_cache.cpp:884 | it's somewhat expensive to iterate over this linked list. Presumably because of the random access of memory. It would be nice if pieces with no evictable blocks weren't in this list |
it's somewhat expensive
+ | ||
relevance 0 | ../src/block_cache.cpp:959 | it's somewhat expensive to iterate over this linked list. Presumably because of the random access of memory. It would be nice if pieces with no evictable blocks weren't in this list |
it's somewhat expensive
to iterate over this linked list. Presumably because of the random
access of memory. It would be nice if pieces with no evictable blocks
-weren't in this list../src/block_cache.cpp:884 }
+weren't in this list../src/block_cache.cpp:959 }
else if (m_last_cache_op == ghost_hit_lru1)
{
// when we insert new items or move things from L1 to L2
@@ -3069,7 +4069,7 @@ weren't in this list../src/block_cache.cpp:884relevance 0 | ../src/block_cache.cpp:948 | this should probably only be done every n:th time |
|
this should probably only be done every n:th time../src/block_cache.cpp:948 }
+ | ||
relevance 0 | ../src/block_cache.cpp:1023 | this should probably only be done every n:th time |
this should probably only be done every n:th time../src/block_cache.cpp:1023 }
if (pe->ok_to_evict())
{
@@ -3120,7 +4120,7 @@ weren't in this list../src/block_cache.cpp:884relevance 0 | ../src/block_cache.cpp:1693 | create a holder for refcounts that automatically decrement |
|
create a holder for refcounts that automatically decrement../src/block_cache.cpp:1693 }
+ | ||
relevance 0 | ../src/block_cache.cpp:1775 | create a holder for refcounts that automatically decrement |
create a holder for refcounts that automatically decrement../src/block_cache.cpp:1775 }
j->buffer = allocate_buffer("send buffer");
if (j->buffer == 0) return -2;
@@ -3171,13 +4171,13 @@ bool block_cache::maybe_free_piece(cached_piece_entry* pe)
boost::shared_ptr<piece_manager> s = pe->storage;
- | ||
relevance 0 | ../src/bt_peer_connection.cpp:671 | this could be optimized using knuth morris pratt |
this could be optimized using knuth morris pratt../src/bt_peer_connection.cpp:671 {
- disconnect(errors::no_memory, op_encryption);
- return;
- }
+ | ||
relevance 0 | ../src/bt_peer_connection.cpp:676 | this could be optimized using knuth morris pratt |
this could be optimized using knuth morris pratt../src/bt_peer_connection.cpp:676 }
-#ifdef TORRENT_LOGGING
- peer_log(" computed RC4 keys");
+ m_rc4->set_incoming_key(&remote_key[0], 20);
+ m_rc4->set_outgoing_key(&local_key[0], 20);
+
+#ifndef TORRENT_DISABLE_LOGGING
+ peer_log(peer_log_alert::info, "ENCRYPTION", "computed RC4 keys");
#endif
}
@@ -3222,10 +4222,9 @@ bool block_cache::maybe_free_piece(cached_piece_entry* pe)
// }
// no complete sync
- | ||
relevance 0 | ../src/bt_peer_connection.cpp:2212 | if we're finished, send upload_only message |
if we're finished, send upload_only message../src/bt_peer_connection.cpp:2212 if (msg[5 + k / 8] & (0x80 >> (k % 8))) bitfield_string[k] = '1';
- else bitfield_string[k] = '0';
- }
- peer_log("==> BITFIELD [ %s ]", bitfield_string.c_str());
+ | ||
relevance 0 | ../src/bt_peer_connection.cpp:2245 | if we're finished, send upload_only message |
if we're finished, send upload_only message../src/bt_peer_connection.cpp:2245 }
+ peer_log(peer_log_alert::outgoing_message, "BITFIELD"
+ , "%s", bitfield_string.c_str());
#endif
m_sent_bitfield = true;
@@ -3237,8 +4236,9 @@ bool block_cache::maybe_free_piece(cached_piece_entry* pe)
{
for (int i = 0; i < num_lazy_pieces; ++i)
{
-#ifdef TORRENT_LOGGING
- peer_log("==> HAVE [ piece: %d ]", lazy_pieces[i]);
+#ifndef TORRENT_DISABLE_LOGGING
+ peer_log(peer_log_alert::outgoing_message, "HAVE"
+ , "piece: %d", lazy_pieces[i]);
#endif
write_have(lazy_pieces[i]);
}
@@ -3273,8 +4273,8 @@ bool block_cache::maybe_free_piece(cached_piece_entry* pe)
? m_settings.get_str(settings_pack::user_agent)
: m_settings.get_str(settings_pack::handshake_client_version);
}
- | ||
relevance 0 | ../src/choker.cpp:332 | optimize this using partial_sort or something. We don't need to sort the entire list |
optimize this using partial_sort or something. We don't need
-to sort the entire list../src/choker.cpp:332 return upload_slots;
+ | ||
relevance 0 | ../src/choker.cpp:336 | optimize this using partial_sort or something. We don't need to sort the entire list |
optimize this using partial_sort or something. We don't need
+to sort the entire list../src/choker.cpp:336 return upload_slots;
}
// ==== rate-based ====
@@ -3295,8 +4295,8 @@ to sort the entire list../src/choker.cpp:332
- | ||
relevance 0 | ../src/choker.cpp:335 | make the comparison function a free function and move it into this cpp file |
make the comparison function a free function and move it
-into this cpp file../src/choker.cpp:335 }
+ | ||
relevance 0 | ../src/choker.cpp:339 | make the comparison function a free function and move it into this cpp file |
make the comparison function a free function and move it
+into this cpp file../src/choker.cpp:339 }
// ==== rate-based ====
//
@@ -3319,7 +4319,7 @@ into this cpp file../src/choker.cpp:335 std::sort(peers.begin(), peers.end()
, boost::bind(&upload_rate_compare, _1, _2));
- | ||
relevance 0 | ../src/choker.cpp:340 | make configurable |
make configurable../src/choker.cpp:340 //
+ | ||
relevance 0 | ../src/choker.cpp:344 | make configurable |
make configurable../src/choker.cpp:344 //
// The rate based unchoker looks at our upload rate to peers, and find
// a balance between number of upload slots and the rate we achieve. The
// intention is to not spread upload bandwidth too thin, but also to not
@@ -3352,7 +4352,7 @@ into this cpp file../src/choker.cpp:335relevance 0 | ../src/choker.cpp:354 | make configurable |
|
make configurable../src/choker.cpp:354 // it purely based on the current state of our peers.
+ | ||
relevance 0 | ../src/choker.cpp:358 | make configurable |
make configurable../src/choker.cpp:358 // it purely based on the current state of our peers.
upload_slots = 0;
@@ -3403,7 +4403,58 @@ into this cpp file../src/choker.cpp:335relevance 0 | ../src/disk_buffer_pool.cpp:329 | perhaps we should sort the buffers here? |
|
perhaps we should sort the buffers here?../src/disk_buffer_pool.cpp:329 mutex::scoped_lock l(m_pool_mutex);
+ | ||
relevance 0 | ../src/create_torrent.cpp:284 | this should probably be optional |
this should probably be optional../src/create_torrent.cpp:284 boost::shared_ptr<char> dummy;
+ counters cnt;
+ disk_io_thread disk_thread(ios, cnt, 0);
+
+ storage_params params;
+ params.files = &t.files();
+ params.mapped_files = NULL;
+ params.path = path;
+ params.pool = &disk_thread.files();
+ params.mode = storage_mode_sparse;
+
+ storage_interface* storage_impl = default_storage_constructor(params);
+
+ boost::shared_ptr<piece_manager> storage = boost::make_shared<piece_manager>(
+ storage_impl, dummy, (file_storage*)&t.files());
+
+ settings_pack sett;
+ sett.set_int(settings_pack::cache_size, 0);
+ sett.set_int(settings_pack::hashing_threads, 2);
+
+ alert_manager dummy2(0, 0);
+ disk_thread.set_settings(&sett, dummy2);
+
+ int piece_counter = 0;
+ int completed_piece = 0;
+ int piece_read_ahead = 15 * 1024 * 1024 / t.piece_length();
+ if (piece_read_ahead < 1) piece_read_ahead = 1;
+
+ for (int i = 0; i < piece_read_ahead; ++i)
+ {
+ disk_thread.async_hash(storage.get(), i, disk_io_job::sequential_access
+ , boost::bind(&on_hash, _1, &t, storage, &disk_thread
+ , &piece_counter, &completed_piece, &f, &ec), (void*)0);
+ ++piece_counter;
+ if (piece_counter >= t.num_pieces()) break;
+ }
+ disk_thread.submit_jobs();
+ ios.run(ec);
+ }
+
+ create_torrent::~create_torrent() {}
+
+ create_torrent::create_torrent(file_storage& fs, int piece_size
+ , int pad_file_limit, int flags, int alignment)
+ : m_files(fs)
+ , m_creation_date(time(0))
+ , m_multifile(fs.num_files() > 1)
+ , m_private(false)
+ , m_merkle_torrent((flags & merkle) != 0)
+ , m_include_mtime((flags & modification_time) != 0)
+ , m_include_symlinks((flags & symlinks) != 0)
+ | ||
relevance 0 | ../src/disk_buffer_pool.cpp:319 | perhaps we should sort the buffers here? |
perhaps we should sort the buffers here?../src/disk_buffer_pool.cpp:319 mutex::scoped_lock l(m_pool_mutex);
for (int i = 0; i < iov_len; ++i)
{
iov[i].iov_base = allocate_buffer_impl(l, "pending read");
@@ -3429,10 +4480,13 @@ into this cpp file../src/choker.cpp:335 | ||
relevance 0 | ../src/disk_io_thread.cpp:879 | it would be nice to optimize this by having the cache pieces also ordered by |
it would be nice to optimize this by having the cache
-pieces also ordered by../src/disk_io_thread.cpp:879 // from disk_io_thread::do_delete, which is a fence job and should
+ | ||
relevance 0 | ../src/disk_io_thread.cpp:857 | it would be nice to optimize this by having the cache pieces also ordered by |
it would be nice to optimize this by having the cache
+pieces also ordered by../src/disk_io_thread.cpp:857 // from disk_io_thread::do_delete, which is a fence job and should
// have any other jobs active, i.e. there should not be any references
// keeping pieces or blocks alive
if ((flags & flush_delete_cache) && (flags & flush_expect_clear))
@@ -3506,8 +4557,8 @@ pieces also ordered by../src/disk_io_thread.cpp:879relevance 0 | ../src/disk_io_thread.cpp:922 | instead of doing a lookup each time through the loop, save cached_piece_entry pointers with piece_refcount incremented to pin them |
|
instead of doing a lookup each time through the loop, save
-cached_piece_entry pointers with piece_refcount incremented to pin them../src/disk_io_thread.cpp:922 // this is why we pass in 1 as cont_block to the flushing functions
+ | ||
relevance 0 | ../src/disk_io_thread.cpp:900 | instead of doing a lookup each time through the loop, save cached_piece_entry pointers with piece_refcount incremented to pin them |
instead of doing a lookup each time through the loop, save
+cached_piece_entry pointers with piece_refcount incremented to pin them../src/disk_io_thread.cpp:900 // this is why we pass in 1 as cont_block to the flushing functions
void disk_io_thread::try_flush_write_blocks(int num, tailqueue& completed_jobs
, mutex::scoped_lock& l)
{
@@ -3558,10 +4609,10 @@ cached_piece_entry pointers with piece_refcount incremented to pin them
cached_piece_entry* pe = m_disk_cache.find_piece(i->first, i->second);
if (pe == NULL) continue;
if (pe->num_dirty == 0) continue;
- | ||
relevance 0 | ../src/disk_io_thread.cpp:1133 | instead of doing this. pass in the settings to each storage_interface call. Each disk thread could hold its most recent understanding of the settings in a shared_ptr, and update it every time it wakes up from a job. That way each access to the settings won't require a mutex to be held. |
instead of doing this. pass in the settings to each storage_interface
+ | ||
relevance 0 | ../src/disk_io_thread.cpp:1079 | instead of doing this. pass in the settings to each storage_interface call. Each disk thread could hold its most recent understanding of the settings in a shared_ptr, and update it every time it wakes up from a job. That way each access to the settings won't require a mutex to be held. |
instead of doing this. pass in the settings to each storage_interface
call. Each disk thread could hold its most recent understanding of the settings
in a shared_ptr, and update it every time it wakes up from a job. That way
-each access to the settings won't require a mutex to be held.../src/disk_io_thread.cpp:1133 {
+each access to the settings won't require a mutex to be held.../src/disk_io_thread.cpp:1079 {
INVARIANT_CHECK;
TORRENT_ASSERT(j->next == 0);
TORRENT_ASSERT((j->flags & disk_io_job::in_progress) || !j->storage);
@@ -3586,7 +4637,7 @@ each access to the settings won't require a mutex to be held.../src/dis
TORRENT_ASSERT(j->action < sizeof(job_functions)/sizeof(job_functions[0]));
- ptime start_time = time_now_hires();
+ time_point start_time = clock_type::now();
m_stats_counters.inc_stats_counter(counters::num_running_disk_jobs, 1);
@@ -3605,10 +4656,10 @@ each access to the settings won't require a mutex to be held.../src/dis
// our quanta in case there aren't any other
// jobs to run in between
- | ||
relevance 0 | ../src/disk_io_thread.cpp:1161 | a potentially more efficient solution would be to have a special queue for retry jobs, that's only ever run when a job completes, in any thread. It would only work if counters::num_running_disk_jobs > 0 |
a potentially more efficient solution would be to have a special
+ | ||
relevance 0 | ../src/disk_io_thread.cpp:1107 | a potentially more efficient solution would be to have a special queue for retry jobs, that's only ever run when a job completes, in any thread. It would only work if counters::num_running_disk_jobs > 0 |
a potentially more efficient solution would be to have a special
queue for retry jobs, that's only ever run when a job completes, in
-any thread. It would only work if counters::num_running_disk_jobs > 0../src/disk_io_thread.cpp:1161
- ptime start_time = time_now_hires();
+any thread. It would only work if counters::num_running_disk_jobs > 0../src/disk_io_thread.cpp:1107
+ time_point start_time = clock_type::now();
m_stats_counters.inc_stats_counter(counters::num_running_disk_jobs, 1);
@@ -3638,7 +4689,7 @@ any thread. It would only work if counters::num_running_disk_jobs > 0..
}
#if TORRENT_USE_ASSERT
- | ||
relevance 0 | ../src/disk_io_thread.cpp:1175 | it should clear the hash state even when there's an error, right? |
it should clear the hash state even when there's an error, right?../src/disk_io_thread.cpp:1175 m_stats_counters.inc_stats_counter(counters::num_running_disk_jobs, -1);
+ | ||
relevance 0 | ../src/disk_io_thread.cpp:1121 | it should clear the hash state even when there's an error, right? |
it should clear the hash state even when there's an error, right?../src/disk_io_thread.cpp:1121 m_stats_counters.inc_stats_counter(counters::num_running_disk_jobs, -1);
if (ret == retry_job)
{
@@ -3675,7 +4726,7 @@ any thread. It would only work if counters::num_running_disk_jobs > 0..
j->ret = ret;
- ptime now = time_now_hires();
+ time_point now = clock_type::now();
m_job_time.add_sample(total_microseconds(now - start_time));
completed_jobs.push_back(j);
}
@@ -3689,8 +4740,8 @@ any thread. It would only work if counters::num_running_disk_jobs > 0..
j->error.operation = storage_error::alloc_cache_piece;
return -1;
}
- | ||
relevance 0 | ../src/disk_io_thread.cpp:1870 | maybe the tailqueue_iterator should contain a pointer-pointer instead and have an unlink function |
maybe the tailqueue_iterator should contain a pointer-pointer
-instead and have an unlink function../src/disk_io_thread.cpp:1870 j->callback = handler;
+ | ||
relevance 0 | ../src/disk_io_thread.cpp:1819 | maybe the tailqueue_iterator should contain a pointer-pointer instead and have an unlink function |
maybe the tailqueue_iterator should contain a pointer-pointer
+instead and have an unlink function../src/disk_io_thread.cpp:1819 j->callback = handler;
add_fence_job(storage, j);
}
@@ -3736,13 +4787,13 @@ instead and have an unlink function../src/disk_io_thread.cpp:1870<
j->callback = handler;
add_fence_job(storage, j);
- fail_jobs_impl(storage_error(boost::asio::error::operation_aborted), to_abort, completed_jobs);
+ fail_jobs_impl(storage_error(boost::asio::error::operation_aborted)
+ , to_abort, completed_jobs);
if (completed_jobs.size())
add_completed_jobs(completed_jobs);
- }
- | ||
relevance 0 | ../src/disk_io_thread.cpp:2125 | this is potentially very expensive. One way to solve it would be to have a fence for just this one piece. |
this is potentially very expensive. One way to solve
-it would be to have a fence for just this one piece.../src/disk_io_thread.cpp:2125 }
+ | ||
relevance 0 | ../src/disk_io_thread.cpp:2081 | this is potentially very expensive. One way to solve it would be to have a fence for just this one piece. |
this is potentially very expensive. One way to solve
+it would be to have a fence for just this one piece.../src/disk_io_thread.cpp:2081 }
void disk_io_thread::async_clear_piece(piece_manager* storage, int index
, boost::function<void(disk_io_job const*)> const& handler)
@@ -3793,7 +4844,7 @@ it would be to have a fence for just this one piece.../src/disk_io_thre
if (!pe->hash) return;
if (pe->hashing) return;
- | ||
relevance 0 | ../src/disk_io_thread.cpp:2386 | we should probably just hang the job on the piece and make sure the hasher gets kicked |
we should probably just hang the job on the piece and make sure the hasher gets kicked../src/disk_io_thread.cpp:2386 if (pe == NULL)
+ | ||
relevance 0 | ../src/disk_io_thread.cpp:2342 | we should probably just hang the job on the piece and make sure the hasher gets kicked |
we should probably just hang the job on the piece and make sure the hasher gets kicked../src/disk_io_thread.cpp:2342 if (pe == NULL)
{
int cache_state = (j->flags & disk_io_job::volatile_read)
? cached_piece_entry::volatile_read_lru
@@ -3832,9 +4883,6 @@ it would be to have a fence for just this one piece.../src/disk_io_thre
int block_size = m_disk_cache.block_size();
int blocks_in_piece = (piece_size + block_size - 1) / block_size;
- file::iovec_t iov;
- int ret = 0;
-
// keep track of which blocks we have locked by incrementing
// their refcounts. This is used to decrement only these blocks
// later.
@@ -3844,9 +4892,12 @@ it would be to have a fence for just this one piece.../src/disk_io_thre
// increment the refcounts of all
// blocks up front, and then hash them without holding the lock
- | ||
relevance 0 | ../src/disk_io_thread.cpp:2456 | introduce a holder class that automatically increments and decrements the piece_refcount |
introduce a holder class that automatically increments
-and decrements the piece_refcount../src/disk_io_thread.cpp:2456 for (int i = ph->offset / block_size; i < blocks_in_piece; ++i)
+ TORRENT_PIECE_ASSERT(ph->offset % block_size == 0, pe);
+ for (int i = ph->offset / block_size; i < blocks_in_piece; ++i)
{
+ | ||
relevance 0 | ../src/disk_io_thread.cpp:2409 | introduce a holder class that automatically increments and decrements the piece_refcount |
introduce a holder class that automatically increments
+and decrements the piece_refcount../src/disk_io_thread.cpp:2409 {
+ file::iovec_t iov;
iov.iov_len = (std::min)(block_size, piece_size - ph->offset);
if (next_locked_block < num_locked_blocks
@@ -3884,7 +4935,7 @@ and decrements the piece_refcount../src/disk_io_thread.cpp:2456../src/disk_io_thread.cpp:2456 | ||
relevance 0 | ../src/disk_io_thread.cpp:2698 | it would be nice to not have to lock the mutex every turn through this loop |
it would be nice to not have to lock the mutex every
-turn through this loop../src/disk_io_thread.cpp:2698 {
- j->error.ec = error::no_memory;
+ | ||
relevance 0 | ../src/disk_io_thread.cpp:2655 | it would be nice to not have to lock the mutex every turn through this loop |
it would be nice to not have to lock the mutex every
+turn through this loop../src/disk_io_thread.cpp:2655 j->error.ec = error::no_memory;
j->error.operation = storage_error::alloc_cache_piece;
return -1;
}
@@ -3906,7 +4956,8 @@ turn through this loop../src/disk_io_thread.cpp:2698../src/disk_io_thread.cpp:2698relevance 0 | ../src/http_tracker_connection.cpp:185 | support this somehow |
|
support this somehow../src/http_tracker_connection.cpp:185 url += escape_string(id.c_str(), id.length());
+ | ||
relevance 0 | ../src/http_tracker_connection.cpp:184 | support this somehow |
support this somehow../src/http_tracker_connection.cpp:184 url += escape_string(id.c_str(), id.length());
}
#if TORRENT_USE_I2P
@@ -3999,15 +5050,15 @@ turn through this loop../src/disk_io_thread.cpp:2698relevance 0 | ../src/metadata_transfer.cpp:359 | this is not safe. The torrent could be unloaded while we're still sending the metadata |
|
this is not safe. The torrent could be unloaded while
-we're still sending the metadata../src/metadata_transfer.cpp:359 std::pair<int, int> offset
- = req_to_offset(req, (int)m_tp.metadata().left());
+ | ||
relevance 0 | ../src/metadata_transfer.cpp:356 | this is not safe. The torrent could be unloaded while we're still sending the metadata |
this is not safe. The torrent could be unloaded while
+we're still sending the metadata../src/metadata_transfer.cpp:356 = req_to_offset(req, (int)m_tp.metadata().left());
char msg[15];
char* ptr = msg;
-#ifdef TORRENT_LOGGING
- m_pc.peer_log("==> METADATA [ start: %d | total_size: %d | offset: %d | data_size: %d ]"
+#ifndef TORRENT_DISABLE_LOGGING
+ m_pc.peer_log(peer_log_alert::outgoing_message, "METADATA"
+ , "start: %d total_size: %d offset: %d data_size: %d"
, req.first, req.second, offset.first, offset.second);
#endif
// yes, we have metadata, send it
@@ -4025,8 +5076,9 @@ we're still sending the metadata../src/metadata_transfer.cpp:359../src/metadata_transfer.cpp:359 | ||
relevance 0 | ../src/packet_buffer.cpp:176 | use compare_less_wrap for this comparison as well |
use compare_less_wrap for this comparison as well../src/packet_buffer.cpp:176 while (new_size < size)
+ | ||
relevance 0 | ../src/packet_buffer.cpp:176 | use compare_less_wrap for this comparison as well |
use compare_less_wrap for this comparison as well../src/packet_buffer.cpp:176 while (new_size < size)
new_size <<= 1;
void** new_storage = (void**)malloc(sizeof(void*) * new_size);
@@ -4102,7 +5153,7 @@ we're still sending the metadata../src/metadata_transfer.cpp:359 | ||
relevance 0 | ../src/part_file.cpp:252 | what do we do if someone is currently reading from the disk from this piece? does it matter? Since we won't actively erase the data from disk, but it may be overwritten soon, it's probably not that big of a deal |
what do we do if someone is currently reading from the disk
+ | ||
relevance 0 | ../src/part_file.cpp:252 | what do we do if someone is currently reading from the disk from this piece? does it matter? Since we won't actively erase the data from disk, but it may be overwritten soon, it's probably not that big of a deal |
what do we do if someone is currently reading from the disk
from this piece? does it matter? Since we won't actively erase the
data from disk, but it may be overwritten soon, it's probably not that
big of a deal../src/part_file.cpp:252 if (((mode & file::rw_mask) != file::read_only)
@@ -4156,8 +5207,8 @@ big of a deal../src/part_file.cpp:252relevance 0 | ../src/part_file.cpp:344 | instead of rebuilding the whole file header and flushing it, update the slot entries as we go |
|
instead of rebuilding the whole file header
-and flushing it, update the slot entries as we go../src/part_file.cpp:344 if (block_to_copy == m_piece_size)
+ | ||
relevance 0 | ../src/part_file.cpp:350 | instead of rebuilding the whole file header and flushing it, update the slot entries as we go |
instead of rebuilding the whole file header
+and flushing it, update the slot entries as we go../src/part_file.cpp:350 if (block_to_copy == m_piece_size)
{
m_free_slots.push_back(i->second);
m_piece_map.erase(i);
@@ -4208,16 +5259,68 @@ and flushing it, update the slot entries as we go../src/part_file.cpp:3
write_uint32(m_max_pieces, ptr);
write_uint32(m_piece_size, ptr);
- | ||
relevance 0 | ../src/peer_connection.cpp:1017 | this should be the global download rate |
this should be the global download rate../src/peer_connection.cpp:1017
+ | ||
relevance 0 | ../src/peer_connection.cpp:511 | it would be neat to be able to print this straight into the alert's stack allocator |
it would be neat to be able to print this straight into the
+alert's stack allocator../src/peer_connection.cpp:511
+ TORRENT_ASSERT(in_handshake() || is_interesting() == interested);
+
+ disconnect_if_redundant();
+ }
+
+#ifndef TORRENT_DISABLE_LOGGING
+#if defined __GNUC__ || defined __clang__
+ __attribute__((format(printf, 4, 5)))
+#endif
+ void peer_connection::peer_log(peer_log_alert::direction_t direction
+ , char const* event, char const* fmt, ...) const
+ {
+ TORRENT_ASSERT(is_single_thread());
+
+ if (!m_ses.alerts().should_post<peer_log_alert>()) return;
+
+ va_list v;
+ va_start(v, fmt);
+
+ char buf[512];
+ vsnprintf(buf, sizeof(buf), fmt, v);
+ va_end(v);
+
+ torrent_handle h;
+ boost::shared_ptr<torrent> t = m_torrent.lock();
+ if (t) h = t->get_handle();
+
+ m_ses.alerts().emplace_alert<peer_log_alert>(
+ h, m_remote, m_peer_id, direction, event, buf);
+ }
+#endif
+
+#ifndef TORRENT_DISABLE_EXTENSIONS
+ void peer_connection::add_extension(boost::shared_ptr<peer_plugin> ext)
+ {
+ TORRENT_ASSERT(is_single_thread());
+ m_extensions.push_back(ext);
+ }
+
+ peer_plugin const* peer_connection::find_plugin(char const* type)
+ {
+ TORRENT_ASSERT(is_single_thread());
+ for (extension_list_t::iterator i = m_extensions.begin()
+ , end(m_extensions.end()); i != end; ++i)
+ {
+ if (strcmp((*i)->type(), type) == 0) return (*i).get();
+ }
+ return 0;
+ }
+#endif
+ | ||
relevance 0 | ../src/peer_connection.cpp:1011 | this should be the global download rate |
this should be the global download rate../src/peer_connection.cpp:1011
int rate = 0;
// if we haven't received any data recently, the current download rate
// is not representative
- if (time_now() - m_last_piece > seconds(30) && m_download_rate_peak > 0)
+ if (aux::time_now() - m_last_piece > seconds(30) && m_download_rate_peak > 0)
{
rate = m_download_rate_peak;
}
- else if (time_now() - m_last_unchoked < seconds(5)
+ else if (aux::time_now() - m_last_unchoked < seconds(5)
&& m_statistics.total_payload_upload() < 2 * 0x4000)
{
// if we're have only been unchoked for a short period of time,
@@ -4259,7 +5362,7 @@ and flushing it, update the slot entries as we go../src/part_file.cpp:3
if (m_ignore_stats) return;
boost::shared_ptr<torrent> t = m_torrent.lock();
if (!t) return;
- | ||
relevance 0 | ../src/peer_connection.cpp:3226 | sort the allowed fast set in priority order |
sort the allowed fast set in priority order../src/peer_connection.cpp:3226
+ | ||
relevance 0 | ../src/peer_connection.cpp:3284 | sort the allowed fast set in priority order |
sort the allowed fast set in priority order../src/peer_connection.cpp:3284
// if the peer has the piece and we want
// to download it, request it
if (int(m_have_piece.size()) > index
@@ -4310,9 +5413,9 @@ and flushing it, update the slot entries as we go../src/part_file.cpp:3
boost::shared_ptr<torrent> t = m_torrent.lock();
TORRENT_ASSERT(t);
TORRENT_ASSERT(t->has_picker());
- | ||
relevance 0 | ../src/peer_connection.cpp:5870 | The stats checks can not be honored when authenticated encryption is in use because we may have encrypted data which we cannot authenticate yet |
The stats checks can not be honored when authenticated encryption is in use
-because we may have encrypted data which we cannot authenticate yet../src/peer_connection.cpp:5870#if defined TORRENT_LOGGING
- peer_log("<<< read %d bytes", int(bytes_transferred));
+ | ||
relevance 0 | ../src/peer_connection.cpp:6050 | The stats checks can not be honored when authenticated encryption is in use because we may have encrypted data which we cannot authenticate yet |
The stats checks can not be honored when authenticated encryption is in use
+because we may have encrypted data which we cannot authenticate yet../src/peer_connection.cpp:6050 peer_log(peer_log_alert::incoming, "READ"
+ , "%d bytes", int(bytes_transferred));
#endif
// correct the dl quota usage, if not all of the buffer was actually read
TORRENT_ASSERT(int(bytes_transferred) <= m_quota[download_channel]);
@@ -4362,11 +5465,11 @@ because we may have encrypted data which we cannot authenticate yet../s
}
if (num_loops > read_loops) break;
- | ||
relevance 0 | ../src/piece_picker.cpp:2040 | this could probably be optimized by incrementally calling partial_sort to sort one more element in the list. Because chances are that we'll just need a single piece, and once we've picked from it we're done. Sorting the rest of the list in that case is a waste of time. |
this could probably be optimized by incrementally
+ | ||
relevance 0 | ../src/piece_picker.cpp:2070 | this could probably be optimized by incrementally calling partial_sort to sort one more element in the list. Because chances are that we'll just need a single piece, and once we've picked from it we're done. Sorting the rest of the list in that case is a waste of time. |
this could probably be optimized by incrementally
calling partial_sort to sort one more element in the list. Because
chances are that we'll just need a single piece, and once we've
picked from it we're done. Sorting the rest of the list in that
-case is a waste of time.../src/piece_picker.cpp:2040 , end(m_downloads[piece_pos::piece_downloading].end()); i != end; ++i)
+case is a waste of time.../src/piece_picker.cpp:2070 , end(m_downloads[piece_pos::piece_downloading].end()); i != end; ++i)
{
pc.inc_stats_counter(counters::piece_picker_partial_loops);
@@ -4417,8 +5520,8 @@ case is a waste of time.../src/piece_picker.cpp:2040relevance 0 | ../src/piece_picker.cpp:2545 | when expanding pieces for cache stripe reasons, the !downloading condition doesn't make much sense |
|
when expanding pieces for cache stripe reasons,
-the !downloading condition doesn't make much sense../src/piece_picker.cpp:2545 TORRENT_ASSERT(index < (int)m_piece_map.size() || m_piece_map.empty());
+ | ||
relevance 0 | ../src/piece_picker.cpp:2575 | when expanding pieces for cache stripe reasons, the !downloading condition doesn't make much sense |
when expanding pieces for cache stripe reasons,
+the !downloading condition doesn't make much sense../src/piece_picker.cpp:2575 TORRENT_ASSERT(index < (int)m_piece_map.size() || m_piece_map.empty());
if (index+1 == (int)m_piece_map.size())
return m_blocks_in_last_piece;
else
@@ -4466,8 +5569,8 @@ the !downloading condition doesn't make much sense../src/piece_picker.c
// blocks from this piece.
// the second bool is true if this is the only active peer that is requesting
// and downloading blocks from this piece. Active means having a connection.
- | ||
relevance 0 | ../src/session_impl.cpp:512 | there's no rule here to make uTP connections not have the global or local rate limits apply to it. This used to be the default. |
there's no rule here to make uTP connections not have the global or
-local rate limits apply to it. This used to be the default.../src/session_impl.cpp:512 m_global_class = m_classes.new_peer_class("global");
+ | ||
relevance 0 | ../src/session_impl.cpp:504 | there's no rule here to make uTP connections not have the global or local rate limits apply to it. This used to be the default. |
there's no rule here to make uTP connections not have the global or
+local rate limits apply to it. This used to be the default.../src/session_impl.cpp:504 m_global_class = m_classes.new_peer_class("global");
m_tcp_peer_class = m_classes.new_peer_class("tcp");
m_local_peer_class = m_classes.new_peer_class("local");
// local peers are always unchoked
@@ -4488,16 +5591,16 @@ local rate limits apply to it. This used to be the default.../src/sessi
m_peer_class_type_filter.add(peer_class_type_filter::i2p_socket, m_tcp_peer_class);
- #if defined TORRENT_LOGGING
+#ifndef TORRENT_DISABLE_LOGGING
- session_log("libtorrent configuration: %s\n"
- "libtorrent version: %s\n"
- "libtorrent revision: %s\n\n"
+ session_log("config: %s\n"
+ "version: %s\n"
+ "revision: %s\n\n"
, TORRENT_CFG_STRING
, LIBTORRENT_VERSION
, LIBTORRENT_REVISION);
-#endif // TORRENT_LOGGING
+#endif // TORRENT_DISABLE_LOGGING
#if TORRENT_USE_RLIMIT
// ---- auto-cap max connections ----
@@ -4505,8 +5608,8 @@ local rate limits apply to it. This used to be the default.../src/sessi
struct rlimit rl;
if (getrlimit(RLIMIT_NOFILE, &rl) == 0)
{
-#if defined TORRENT_LOGGING
- session_log(" max number of open files: %d", rl.rlim_cur);
+#ifndef TORRENT_DISABLE_LOGGING
+ session_log(" max number of open files: %d", int(rl.rlim_cur));
#endif
// deduct some margin for epoll/kqueue, log files,
// futexes, shared objects etc.
@@ -4517,10 +5620,10 @@ local rate limits apply to it. This used to be the default.../src/sessi
m_settings.get_int(settings_pack::connections_limit)
, int(rl.rlim_cur * 8 / 10)));
// 20% goes towards regular files (see disk_io_thread)
-#if defined TORRENT_LOGGING
- | ||
relevance 0 | ../src/session_impl.cpp:1727 | instead of having a special case for this, just make the default listen interfaces be "0.0.0.0:6881,[::1]:6881" and use the generic path. That would even allow for not listening at all. |
instead of having a special case for this, just make the
+#ifndef TORRENT_DISABLE_LOGGING
+ | ||
relevance 0 | ../src/session_impl.cpp:1731 | instead of having a special case for this, just make the default listen interfaces be "0.0.0.0:6881,[::1]:6881" and use the generic path. That would even allow for not listening at all. |
instead of having a special case for this, just make the
default listen interfaces be "0.0.0.0:6881,[::1]:6881" and use
-the generic path. That would even allow for not listening at all.../src/session_impl.cpp:1727
+the generic path. That would even allow for not listening at all.../src/session_impl.cpp:1731
// reset the retry counter
m_listen_port_retries = m_settings.get_int(settings_pack::max_retry_port_bind);
@@ -4571,7 +5674,7 @@ retry:
if (s.sock)
{
TORRENT_ASSERT(!m_abort);
- | ||
relevance 0 | ../src/session_impl.cpp:2620 | should this function take a shared_ptr instead? |
should this function take a shared_ptr instead?../src/session_impl.cpp:2620 {
+ | ||
relevance 0 | ../src/session_impl.cpp:2624 | should this function take a shared_ptr instead? |
should this function take a shared_ptr instead?../src/session_impl.cpp:2624 {
#if defined TORRENT_ASIO_DEBUGGING
complete_async("session_impl::on_socks_accept");
#endif
@@ -4580,8 +5683,8 @@ retry:
if (e)
{
if (m_alerts.should_post<listen_failed_alert>())
- m_alerts.post_alert(listen_failed_alert("socks5", listen_failed_alert::accept, e
- , listen_failed_alert::socks5));
+ m_alerts.emplace_alert<listen_failed_alert>("socks5", listen_failed_alert::accept, e
+ , listen_failed_alert::socks5);
return;
}
open_new_incoming_socks_connection();
@@ -4612,7 +5715,7 @@ retry:
// TORRENT_ASSERT(!i->second->has_peer((peer_connection*)p));
#endif
-#if defined TORRENT_LOGGING
+#ifndef TORRENT_DISABLE_LOGGING
session_log(" CLOSING CONNECTION %s : %s"
, print_endpoint(p->remote()).c_str(), ec.message().c_str());
#endif
@@ -4622,7 +5725,7 @@ retry:
TORRENT_ASSERT(sp.use_count() > 0);
connection_map::iterator i = m_connections.find(sp);
- | ||
relevance 0 | ../src/session_impl.cpp:2985 | have a separate list for these connections, instead of having to loop through all of them |
have a separate list for these connections, instead of having to loop through all of them../src/session_impl.cpp:2985 if (m_auto_manage_time_scaler < 0)
+ | ||
relevance 0 | ../src/session_impl.cpp:2983 | have a separate list for these connections, instead of having to loop through all of them |
have a separate list for these connections, instead of having to loop through all of them../src/session_impl.cpp:2983 if (m_auto_manage_time_scaler < 0)
{
INVARIANT_CHECK;
m_auto_manage_time_scaler = settings().get_int(settings_pack::auto_manage_interval);
@@ -4644,7 +5747,7 @@ retry:
if (m_last_tick - p->connected_time()
> seconds(m_settings.get_int(settings_pack::handshake_timeout)))
- p->disconnect(errors::timed_out, peer_connection::op_bittorrent);
+ p->disconnect(errors::timed_out, op_bittorrent);
}
// --------------------------------------------------------------
@@ -4671,7 +5774,7 @@ retry:
if (!t.want_tick()) --i;
}
- | ||
relevance 0 | ../src/session_impl.cpp:3015 | this should apply to all bandwidth channels |
this should apply to all bandwidth channels../src/session_impl.cpp:3015#if TORRENT_DEBUG_STREAMING > 0
+ | ||
relevance 0 | ../src/session_impl.cpp:3013 | this should apply to all bandwidth channels |
this should apply to all bandwidth channels../src/session_impl.cpp:3013#if TORRENT_DEBUG_STREAMING > 0
printf("\033[2J\033[0;0H");
#endif
@@ -4700,16 +5803,16 @@ retry:
&& m_stat.download_ip_overhead() >= down_limit
&& m_alerts.should_post<performance_alert>())
{
- m_alerts.post_alert(performance_alert(torrent_handle()
- , performance_alert::download_limit_too_low));
+ m_alerts.emplace_alert<performance_alert>(torrent_handle()
+ , performance_alert::download_limit_too_low);
}
if (up_limit > 0
&& m_stat.upload_ip_overhead() >= up_limit
&& m_alerts.should_post<performance_alert>())
{
- m_alerts.post_alert(performance_alert(torrent_handle()
- , performance_alert::upload_limit_too_low));
+ m_alerts.emplace_alert<performance_alert>(torrent_handle()
+ , performance_alert::upload_limit_too_low);
}
}
@@ -4722,7 +5825,7 @@ retry:
// scrape paused torrents that are auto managed
// (unless the session is paused)
// --------------------------------------------------------------
- | ||
relevance 0 | ../src/session_impl.cpp:3496 | these vectors could be copied from m_torrent_lists, if we would maintain them. That way the first pass over all torrents could be avoided. It would be especially efficient if most torrents are not auto-managed whenever we receive a scrape response (or anything that may change the rank of a torrent) that one torrent could re-sort itself in a list that's kept sorted at all times. That way, this pass over all torrents could be avoided alltogether. |
these vectors could be copied from m_torrent_lists,
+ | ||
relevance 0 | ../src/session_impl.cpp:3500 | these vectors could be copied from m_torrent_lists, if we would maintain them. That way the first pass over all torrents could be avoided. It would be especially efficient if most torrents are not auto-managed whenever we receive a scrape response (or anything that may change the rank of a torrent) that one torrent could re-sort itself in a list that's kept sorted at all times. That way, this pass over all torrents could be avoided alltogether. |
these vectors could be copied from m_torrent_lists,
if we would maintain them. That way the first pass over
all torrents could be avoided. It would be especially
efficient if most torrents are not auto-managed
@@ -4730,9 +5833,8 @@ whenever we receive a scrape response (or anything
that may change the rank of a torrent) that one torrent
could re-sort itself in a list that's kept sorted at all
times. That way, this pass over all torrents could be
-avoided alltogether.../src/session_impl.cpp:3496#if defined TORRENT_LOGGING
- if (t->allows_peers())
- t->log_to_all_peers("AUTO MANAGER PAUSING TORRENT");
+avoided alltogether.../src/session_impl.cpp:3500 if (t->allows_peers())
+ t->log_to_all_peers("auto manager pausing torrent");
#endif
// use graceful pause for auto-managed torrents
t->set_allow_peers(false, true);
@@ -4744,6 +5846,7 @@ avoided alltogether.../src/session_impl.cpp:3496 | ||
relevance 0 | ../src/session_impl.cpp:3573 | allow extensions to sort torrents for queuing |
allow extensions to sort torrents for queuing../src/session_impl.cpp:3573 if (t->is_finished())
+ | ||
relevance 0 | ../src/session_impl.cpp:3577 | allow extensions to sort torrents for queuing |
allow extensions to sort torrents for queuing../src/session_impl.cpp:3577 if (t->is_finished())
seeds.push_back(t);
else
downloaders.push_back(t);
@@ -4832,9 +5935,9 @@ avoided alltogether.../src/session_impl.cpp:3496relevance 0 | ../src/session_impl.cpp:3746 | use a lower limit than m_settings.connections_limit to allocate the to 10% or so of connection slots for incoming connections |
|
use a lower limit than m_settings.connections_limit
+ | ||
relevance 0 | ../src/session_impl.cpp:3750 | use a lower limit than m_settings.connections_limit to allocate the to 10% or so of connection slots for incoming connections |
use a lower limit than m_settings.connections_limit
to allocate the to 10% or so of connection slots for incoming
-connections../src/session_impl.cpp:3746 // robin fashion, so that every torrent is equally likely to connect to a
+connections../src/session_impl.cpp:3750 // robin fashion, so that every torrent is equally likely to connect to a
// peer
// boost connections are connections made by torrent connection
@@ -4885,8 +5988,8 @@ connections../src/session_impl.cpp:3746relevance 0 | ../src/session_impl.cpp:3889 | post a message to have this happen immediately instead of waiting for the next tick |
|
post a message to have this happen
-immediately instead of waiting for the next tick../src/session_impl.cpp:3889 torrent* t = p->associated_torrent().lock().get();
+ | ||
relevance 0 | ../src/session_impl.cpp:3893 | post a message to have this happen immediately instead of waiting for the next tick |
post a message to have this happen
+immediately instead of waiting for the next tick../src/session_impl.cpp:3893 torrent* t = p->associated_torrent().lock().get();
torrent_peer* pi = p->peer_info_struct();
if (p->ignore_unchoke_slots() || t == 0 || pi == 0
@@ -4926,8 +6029,8 @@ immediately instead of waiting for the next tick../src/session_impl.cpp
// assume 20 kB/s
max_upload_rate = (std::max)(20000, m_peak_up_rate + 10000);
if (m_alerts.should_post<performance_alert>())
- m_alerts.post_alert(performance_alert(torrent_handle()
- , performance_alert::bittyrant_with_no_uplimit));
+ m_alerts.emplace_alert<performance_alert>(torrent_handle()
+ , performance_alert::bittyrant_with_no_uplimit);
}
int allowed_upload_slots = unchoke_sort(peers, max_upload_rate
@@ -4937,7 +6040,7 @@ immediately instead of waiting for the next tick../src/session_impl.cpp
int num_opt_unchoke = m_settings.get_int(settings_pack::num_optimistic_unchoke_slots);
if (num_opt_unchoke == 0) num_opt_unchoke = (std::max)(1, allowed_upload_slots / 5);
- | ||
relevance 0 | ../src/session_impl.cpp:3936 | this should be called for all peers! |
this should be called for all peers!../src/session_impl.cpp:3936 , unchoke_interval, m_settings);
+ | ||
relevance 0 | ../src/session_impl.cpp:3940 | this should be called for all peers! |
this should be called for all peers!../src/session_impl.cpp:3940 , unchoke_interval, m_settings);
m_stats_counters.set_value(counters::num_unchoke_slots
, allowed_upload_slots);
@@ -4988,10 +6091,12 @@ immediately instead of waiting for the next tick../src/session_impl.cpp
{
// no, this peer should be choked
TORRENT_ASSERT(p->peer_info_struct());
- | ||
relevance 0 | ../src/session_impl.cpp:4327 | it might be a nice feature here to limit the number of torrents to send in a single update. By just posting the first n torrents, they would nicely be round-robined because the torrent lists are always pushed back |
it might be a nice feature here to limit the number of torrents
+ | ||
relevance 0 | ../src/session_impl.cpp:4350 | it might be a nice feature here to limit the number of torrents to send in a single update. By just posting the first n torrents, they would nicely be round-robined because the torrent lists are always pushed back. Perhaps the status_update_alert could even have a fixed array of n entries rather than a vector, to further improve memory locality. |
it might be a nice feature here to limit the number of torrents
to send in a single update. By just posting the first n torrents, they
would nicely be round-robined because the torrent lists are always
-pushed back../src/session_impl.cpp:4327 t->status(&*i, flags);
+pushed back. Perhaps the status_update_alert could even have a fixed
+array of n entries rather than a vector, to further improve memory
+locality.../src/session_impl.cpp:4350 t->status(&*i, flags);
}
}
@@ -5001,27 +6106,27 @@ pushed back../src/session_impl.cpp:4327 for (std::vector<torrent*>::iterator i = state_updates.begin()
, end(state_updates.end()); i != end; ++i)
{
torrent* t = *i;
TORRENT_ASSERT(t->m_links[aux::session_impl::torrent_state_updates].in_list());
- alert->status.push_back(torrent_status());
+ status.push_back(torrent_status());
// querying accurate download counters may require
// the torrent to be loaded. Loading a torrent, and evicting another
// one will lead to calling state_updated(), which screws with
// this list while we're working on it, and break things
- t->status(&alert->status.back(), flags);
+ t->status(&status.back(), flags);
t->clear_in_state_update();
}
state_updates.clear();
@@ -5030,23 +6135,21 @@ pushed back../src/session_impl.cpp:4327relevance 0 | ../src/storage.cpp:716 | make this more generic to not just work if files have been renamed, but also if they have been merged into a single file for instance maybe use the same format as .torrent files and reuse some code from torrent_info |
|
make this more generic to not just work if files have been
+ , m_stat.total_transfer(stat::upload_ip_protocol));
+
+ m_stats_counters.set_value(counters::recv_ip_overhead_bytes
+ , m_stat.total_transfer(stat::download_ip_protocol));
+ | ||
relevance 0 | ../src/storage.cpp:731 | make this more generic to not just work if files have been renamed, but also if they have been merged into a single file for instance maybe use the same format as .torrent files and reuse some code from torrent_info |
make this more generic to not just work if files have been
renamed, but also if they have been merged into a single file for instance
-maybe use the same format as .torrent files and reuse some code from torrent_info../src/storage.cpp:716 for (;;)
- {
- if (file_offset < files().file_size(file_index))
+maybe use the same format as .torrent files and reuse some code from torrent_info../src/storage.cpp:731 if (file_offset < files().file_size(file_index))
break;
file_offset -= files().file_size(file_index);
@@ -5062,42 +6165,44 @@ maybe use the same format as .torrent files and reuse some code from torrent_inf
return int((data_start + files().piece_length() - 1) / files().piece_length());
}
- bool default_storage::verify_resume_data(lazy_entry const& rd, storage_error& ec)
+ bool default_storage::verify_resume_data(bdecode_node const& rd
+ , std::vector<std::string> const* links
+ , storage_error& ec)
{
- lazy_entry const* mapped_files = rd.dict_find_list("mapped_files");
- if (mapped_files && mapped_files->list_size() == m_files.num_files())
+ bdecode_node mapped_files = rd.dict_find_list("mapped_files");
+ if (mapped_files && mapped_files.list_size() == m_files.num_files())
{
m_mapped_files.reset(new file_storage(m_files));
for (int i = 0; i < m_files.num_files(); ++i)
{
- std::string new_filename = mapped_files->list_string_value_at(i);
+ std::string new_filename = mapped_files.list_string_value_at(i);
if (new_filename.empty()) continue;
m_mapped_files->rename_file(i, new_filename);
}
}
- lazy_entry const* file_priority = rd.dict_find_list("file_priority");
- if (file_priority && file_priority->list_size()
+ bdecode_node file_priority = rd.dict_find_list("file_priority");
+ if (file_priority && file_priority.list_size()
== files().num_files())
{
- m_file_priority.resize(file_priority->list_size());
- for (int i = 0; i < file_priority->list_size(); ++i)
- m_file_priority[i] = boost::uint8_t(file_priority->list_int_value_at(i, 1));
+ m_file_priority.resize(file_priority.list_size());
+ for (int i = 0; i < file_priority.list_size(); ++i)
+ m_file_priority[i] = boost::uint8_t(file_priority.list_int_value_at(i, 1));
}
- lazy_entry const* file_sizes_ent = rd.dict_find_list("file sizes");
+ bdecode_node file_sizes_ent = rd.dict_find_list("file sizes");
if (file_sizes_ent == 0)
{
ec.ec = errors::missing_file_sizes;
+ ec.file = -1;
+ ec.operation = storage_error::check_resume;
return false;
}
- if (file_sizes_ent->list_size() == 0)
- {
- ec.ec = errors::no_files_in_resume_data;
- | ||
relevance 0 | ../src/storage.cpp:1012 | if everything moves OK, except for the partfile we currently won't update the save path, which breaks things. it would probably make more sense to give up on the partfile |
if everything moves OK, except for the partfile
+ if (file_sizes_ent.list_size() == 0)
+ | ||
relevance 0 | ../src/storage.cpp:1062 | if everything moves OK, except for the partfile we currently won't update the save path, which breaks things. it would probably make more sense to give up on the partfile |
if everything moves OK, except for the partfile
we currently won't update the save path, which breaks things.
-it would probably make more sense to give up on the partfile../src/storage.cpp:1012 if (ec)
+it would probably make more sense to give up on the partfile../src/storage.cpp:1062 if (ec)
{
ec.file = i->second;
ec.operation = storage_error::copy;
@@ -5148,8 +6253,60 @@ it would probably make more sense to give up on the partfile../src/stor
{
fileop op = { &file::writev
, file::read_write | flags };
- | ||
relevance 0 | ../src/torrent.cpp:508 | if the existing torrent doesn't have metadata, insert the metadata we just downloaded into it. |
if the existing torrent doesn't have metadata, insert
-the metadata we just downloaded into it.../src/torrent.cpp:508
+ | ||
relevance 0 | ../src/string_util.cpp:60 | warning C4146: unary minus operator applied to unsigned type, result still unsigned |
warning C4146: unary minus operator applied to unsigned type,
+result still unsigned../src/string_util.cpp:60
+#include <boost/tuple/tuple.hpp>
+
+#include <cstdlib> // for malloc
+#include <cstring> // for memmov/strcpy/strlen
+
+#include "libtorrent/aux_/disable_warnings_pop.hpp"
+
+namespace libtorrent
+{
+
+ // lexical_cast's result depends on the locale. We need
+ // a well defined result
+ boost::array<char, 4 + std::numeric_limits<boost::int64_t>::digits10>
+ to_string(boost::int64_t n)
+ {
+ boost::array<char, 4 + std::numeric_limits<boost::int64_t>::digits10> ret;
+ char *p = &ret.back();
+ *p = '\0';
+ boost::uint64_t un = n;
+ if (n < 0) un = -un;
+ do {
+ *--p = '0' + un % 10;
+ un /= 10;
+ } while (un);
+ if (n < 0) *--p = '-';
+ std::memmove(&ret[0], p, &ret.back() - p + 1);
+ return ret;
+ }
+
+ bool is_alpha(char c)
+ {
+ return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z');
+ }
+
+ bool is_print(char c)
+ {
+ return c >= 32 && c < 127;
+ }
+
+ bool is_space(char c)
+ {
+ static const char* ws = " \t\n\r\f\v";
+ return strchr(ws, c) != 0;
+ }
+
+ char to_lower(char c)
+ {
+ return (c >= 'A' && c <= 'Z') ? c - 'A' + 'a' : c;
+ }
+
+ | ||
relevance 0 | ../src/torrent.cpp:515 | if the existing torrent doesn't have metadata, insert the metadata we just downloaded into it. |
if the existing torrent doesn't have metadata, insert
+the metadata we just downloaded into it.../src/torrent.cpp:515
m_torrent_file = tf;
// now, we might already have this torrent in the session.
@@ -5200,8 +6357,8 @@ the metadata we just downloaded into it.../src/torrent.cpp:508 | ||
relevance 0 | ../src/torrent.cpp:659 | if the existing torrent doesn't have metadata, insert the metadata we just downloaded into it. |
if the existing torrent doesn't have metadata, insert
-the metadata we just downloaded into it.../src/torrent.cpp:659 m_torrent_file = tf;
+ | ||
relevance 0 | ../src/torrent.cpp:666 | if the existing torrent doesn't have metadata, insert the metadata we just downloaded into it. |
if the existing torrent doesn't have metadata, insert
+the metadata we just downloaded into it.../src/torrent.cpp:666 m_torrent_file = tf;
m_info_hash = tf->info_hash();
// now, we might already have this torrent in the session.
@@ -5252,12 +6409,12 @@ the metadata we just downloaded into it.../src/torrent.cpp:659 | ||
relevance 0 | ../src/torrent.cpp:1461 | is verify_peer_cert called once per certificate in the chain, and this function just tells us which depth we're at right now? If so, the comment makes sense. any certificate that isn't the leaf (i.e. the one presented by the peer) should be accepted automatically, given preverified is true. The leaf certificate need to be verified to make sure its DN matches the info-hash |
is verify_peer_cert called once per certificate in the chain, and
+ | ||
relevance 0 | ../src/torrent.cpp:1475 | is verify_peer_cert called once per certificate in the chain, and this function just tells us which depth we're at right now? If so, the comment makes sense. any certificate that isn't the leaf (i.e. the one presented by the peer) should be accepted automatically, given preverified is true. The leaf certificate need to be verified to make sure its DN matches the info-hash |
is verify_peer_cert called once per certificate in the chain, and
this function just tells us which depth we're at right now? If so, the comment
makes sense.
any certificate that isn't the leaf (i.e. the one presented by the peer)
should be accepted automatically, given preverified is true. The leaf certificate
-need to be verified to make sure its DN matches the info-hash../src/torrent.cpp:1461 if (pp) p->add_extension(pp);
+need to be verified to make sure its DN matches the info-hash../src/torrent.cpp:1475 if (pp) p->add_extension(pp);
}
// if files are checked for this torrent, call the extension
@@ -5286,7 +6443,7 @@ need to be verified to make sure its DN matches the info-hash../src/tor
GENERAL_NAMES* gens = static_cast<GENERAL_NAMES*>(
X509_get_ext_d2i(cert, NID_subject_alt_name, 0, 0));
-#if defined TORRENT_LOGGING
+#ifndef TORRENT_DISABLE_LOGGING
std::string names;
bool match = false;
#endif
@@ -5299,17 +6456,19 @@ need to be verified to make sure its DN matches the info-hash../src/tor
const char* torrent_name = reinterpret_cast<const char*>(domain->data);
std::size_t name_length = domain->length;
-#if defined TORRENT_LOGGING
+#ifndef TORRENT_DISABLE_LOGGING
if (i > 1) names += " | n: ";
names.append(torrent_name, name_length);
#endif
if (strncmp(torrent_name, "*", name_length) == 0
|| strncmp(torrent_name, m_torrent_file->name().c_str(), name_length) == 0)
{
-#if defined TORRENT_LOGGING
+#ifndef TORRENT_DISABLE_LOGGING
match = true;
- | ||
relevance 0 | ../src/torrent.cpp:1865 | instead of creating the picker up front here, maybe this whole section should move to need_picker() |
instead of creating the picker up front here,
-maybe this whole section should move to need_picker()../src/torrent.cpp:1865 {
+ | ||
relevance 0 | ../src/torrent.cpp:1882 | instead of creating the picker up front here, maybe this whole section should move to need_picker() |
instead of creating the picker up front here,
+maybe this whole section should move to need_picker()../src/torrent.cpp:1882
+ if (m_seed_mode)
+ {
m_have_all = true;
m_ses.get_io_service().post(boost::bind(&torrent::files_checked, shared_from_this()));
m_resume_data.reset();
@@ -5317,8 +6476,6 @@ maybe this whole section should move to need_picker()../src/torrent.cpp
return;
}
- set_state(torrent_status::checking_resume_data);
-
int num_pad_files = 0;
TORRENT_ASSERT(block_size() > 0);
file_storage const& fs = m_torrent_file->files();
@@ -5360,10 +6517,97 @@ maybe this whole section should move to need_picker()../src/torrent.cpp
// need to consider it finished
std::vector<piece_picker::downloading_piece> dq
- | ||
relevance 0 | ../src/torrent.cpp:2061 | there may be peer extensions relying on the torrent extension still being alive. Only do this if there are no peers. And when the last peer is disconnected, if the torrent is unloaded, clear the extensions m_extensions.clear(); |
there may be peer extensions relying on the torrent extension
+ | ||
relevance 0 | ../src/torrent.cpp:1957 | this could be optimized by looking up which files are complete and just look at those |
this could be optimized by looking up which files are
+complete and just look at those../src/torrent.cpp:1957 if (!need_loaded()) return;
+
+ if (num_pad_files > 0)
+ m_picker->set_num_pad_files(num_pad_files);
+
+ std::auto_ptr<std::vector<std::string> > links;
+#ifndef TORRENT_DISABLE_MUTABLE_TORRENTS
+ if (!m_torrent_file->similar_torrents().empty()
+ || !m_torrent_file->collections().empty())
+ {
+ resolve_links res(m_torrent_file);
+
+ std::vector<sha1_hash> s = m_torrent_file->similar_torrents();
+ for (std::vector<sha1_hash>::iterator i = s.begin(), end(s.end());
+ i != end; ++i)
+ {
+ boost::shared_ptr<torrent> t = m_ses.find_torrent(*i).lock();
+ if (!t) continue;
+
+ // Only attempt to reuse files from torrents that are seeding.
+ if (!t->is_seed()) continue;
+
+ res.match(t->get_torrent_copy(), t->save_path());
+ }
+ std::vector<std::string> c = m_torrent_file->collections();
+ for (std::vector<std::string>::iterator i = c.begin(), end(c.end());
+ i != end; ++i)
+ {
+ std::vector<boost::shared_ptr<torrent> > ts = m_ses.find_collection(*i);
+
+ for (std::vector<boost::shared_ptr<torrent> >::iterator k = ts.begin()
+ , end(ts.end()); k != end; ++k)
+ {
+ // Only attempt to reuse files from torrents that are seeding.
+ | ||
relevance 0 | ../src/torrent.cpp:1973 | this could be optimized by looking up which files are complete and just look at those |
this could be optimized by looking up which files are
+complete and just look at those../src/torrent.cpp:1973 i != end; ++i)
+ {
+ boost::shared_ptr<torrent> t = m_ses.find_torrent(*i).lock();
+ if (!t) continue;
+
+ // Only attempt to reuse files from torrents that are seeding.
+ if (!t->is_seed()) continue;
+
+ res.match(t->get_torrent_copy(), t->save_path());
+ }
+ std::vector<std::string> c = m_torrent_file->collections();
+ for (std::vector<std::string>::iterator i = c.begin(), end(c.end());
+ i != end; ++i)
+ {
+ std::vector<boost::shared_ptr<torrent> > ts = m_ses.find_collection(*i);
+
+ for (std::vector<boost::shared_ptr<torrent> >::iterator k = ts.begin()
+ , end(ts.end()); k != end; ++k)
+ {
+ // Only attempt to reuse files from torrents that are seeding.
+ if (!(*k)->is_seed()) continue;
+
+ res.match((*k)->get_torrent_copy(), (*k)->save_path());
+ }
+ }
+
+ std::vector<resolve_links::link_t> const& l = res.get_links();
+ if (!l.empty())
+ {
+ links.reset(new std::vector<std::string>(l.size()));
+ for (std::vector<resolve_links::link_t>::const_iterator i = l.begin()
+ , end(l.end()); i != end; ++i)
+ {
+ if (!i->ti) continue;
+
+ torrent_info const& ti = *i->ti;
+ std::string const& save_path = i->save_path;
+ links->push_back(combine_path(save_path
+ , ti.files().file_path(i->file_idx)));
+ }
+ }
+ }
+#endif // TORRENT_DISABLE_MUTABLE_TORRENTS
+
+ inc_refcount("check_fastresume");
+ // async_check_fastresume will release links
+ m_ses.disk_thread().async_check_fastresume(
+ m_storage.get(), m_resume_data ? &m_resume_data->node : NULL
+ , links, boost::bind(&torrent::on_resume_data_checked
+ , shared_from_this(), _1));
+#ifndef TORRENT_DISABLE_LOGGING
+ | ||
relevance 0 | ../src/torrent.cpp:2140 | there may be peer extensions relying on the torrent extension still being alive. Only do this if there are no peers. And when the last peer is disconnected, if the torrent is unloaded, clear the extensions m_extensions.clear(); |
there may be peer extensions relying on the torrent extension
still being alive. Only do this if there are no peers. And when the last peer
is disconnected, if the torrent is unloaded, clear the extensions
-m_extensions.clear();../src/torrent.cpp:2061 // pinned torrents are not allowed to be swapped out
+m_extensions.clear();../src/torrent.cpp:2140 // pinned torrents are not allowed to be swapped out
TORRENT_ASSERT(!m_pinned);
m_should_be_loaded = false;
@@ -5414,9 +6658,9 @@ m_extensions.clear();../src/torrent.cpp:2061relevance 0 | ../src/torrent.cpp:2736 | this pattern is repeated in a few places. Factor this into a function and generalize the concept of a torrent having a dedicated listen port |
|
this pattern is repeated in a few places. Factor this into
+ | ||
relevance 0 | ../src/torrent.cpp:2816 | this pattern is repeated in a few places. Factor this into a function and generalize the concept of a torrent having a dedicated listen port |
this pattern is repeated in a few places. Factor this into
a function and generalize the concept of a torrent having a
-dedicated listen port../src/torrent.cpp:2736 // if the files haven't been checked yet, we're
+dedicated listen port../src/torrent.cpp:2816 // if the files haven't been checked yet, we're
// not ready for peers. Except, if we don't have metadata,
// we need peers to download from
if (!m_files_checked && valid_metadata()) return;
@@ -5455,19 +6699,19 @@ dedicated listen port../src/torrent.cpp:2736 | ||
relevance 0 | ../src/torrent.cpp:3515 | add one peer per IP the hostname resolves to |
add one peer per IP the hostname resolves to../src/torrent.cpp:3515#endif
+ | ||
relevance 0 | ../src/torrent.cpp:3593 | add one peer per IP the hostname resolves to |
add one peer per IP the hostname resolves to../src/torrent.cpp:3593#endif
void torrent::on_peer_name_lookup(error_code const& e
, std::vector<address> const& host_list, int port)
@@ -5480,7 +6724,7 @@ dedicated listen port../src/torrent.cpp:2736../src/torrent.cpp:2736../src/torrent.cpp:2736relevance 0 | ../src/torrent.cpp:4509 | update suggest_piece? |
|
update suggest_piece?../src/torrent.cpp:4509
+ | ||
relevance 0 | ../src/torrent.cpp:4587 | update suggest_piece? |
update suggest_piece?../src/torrent.cpp:4587
void torrent::peer_has_all(peer_connection const* peer)
{
if (has_picker())
@@ -5569,8 +6813,8 @@ dedicated listen port../src/torrent.cpp:2736relevance 0 | ../src/torrent.cpp:4652 | really, we should just keep the picker around in this case to maintain the availability counters |
|
really, we should just keep the picker around
-in this case to maintain the availability counters../src/torrent.cpp:4652 pieces.reserve(cs.pieces.size());
+ | ||
relevance 0 | ../src/torrent.cpp:4730 | really, we should just keep the picker around in this case to maintain the availability counters |
really, we should just keep the picker around
+in this case to maintain the availability counters../src/torrent.cpp:4730 pieces.reserve(cs.pieces.size());
// sort in ascending order, to get most recently used first
std::sort(cs.pieces.begin(), cs.pieces.end()
@@ -5621,12 +6865,12 @@ in this case to maintain the availability counters../src/torrent.cpp:46
}
void torrent::abort()
- | ||
relevance 0 | ../src/torrent.cpp:6625 | make this more generic to not just work if files have been renamed, but also if they have been merged into a single file for instance maybe use the same format as .torrent files and reuse some code from torrent_info The mapped_files needs to be read both in the network thread and in the disk thread, since they both have their own mapped files structures which are kept in sync |
make this more generic to not just work if files have been
+ | ||
relevance 0 | ../src/torrent.cpp:6704 | make this more generic to not just work if files have been renamed, but also if they have been merged into a single file for instance maybe use the same format as .torrent files and reuse some code from torrent_info The mapped_files needs to be read both in the network thread and in the disk thread, since they both have their own mapped files structures which are kept in sync |
make this more generic to not just work if files have been
renamed, but also if they have been merged into a single file for instance
maybe use the same format as .torrent files and reuse some code from torrent_info
The mapped_files needs to be read both in the network thread
and in the disk thread, since they both have their own mapped files structures
-which are kept in sync../src/torrent.cpp:6625 m_last_upload = tmp == -1 ? (std::numeric_limits<boost::int16_t>::min)() : now - tmp;
+which are kept in sync../src/torrent.cpp:6704 m_last_upload = tmp == -1 ? (std::numeric_limits<boost::int16_t>::min)() : now - tmp;
if (m_use_resume_save_path)
{
@@ -5646,12 +6890,12 @@ which are kept in sync../src/torrent.cpp:6625 lazy_entry const* mapped_files = rd.dict_find_list("mapped_files");
- if (mapped_files && mapped_files->list_size() == m_torrent_file->num_files())
+ bdecode_node mapped_files = rd.dict_find_list("mapped_files");
+ if (mapped_files && mapped_files.list_size() == m_torrent_file->num_files())
{
for (int i = 0; i < m_torrent_file->num_files(); ++i)
{
- std::string new_filename = mapped_files->list_string_value_at(i);
+ std::string new_filename = mapped_files.list_string_value_at(i);
if (new_filename.empty()) continue;
m_torrent_file->rename_file(i, new_filename);
}
@@ -5664,40 +6908,40 @@ which are kept in sync../src/torrent.cpp:6625 | ||
relevance 0 | ../src/torrent.cpp:6743 | if this is a merkle torrent and we can't restore the tree, we need to wipe all the bits in the have array, but not necessarily we might want to do a full check to see if we have all the pieces. This is low priority since almost no one uses merkle torrents |
if this is a merkle torrent and we can't
+ | ||
relevance 0 | ../src/torrent.cpp:6822 | if this is a merkle torrent and we can't restore the tree, we need to wipe all the bits in the have array, but not necessarily we might want to do a full check to see if we have all the pieces. This is low priority since almost no one uses merkle torrents |
if this is a merkle torrent and we can't
restore the tree, we need to wipe all the
bits in the have array, but not necessarily
we might want to do a full check to see if we have
all the pieces. This is low priority since almost
-no one uses merkle torrents../src/torrent.cpp:6743 add_web_seed(url, web_seed_entry::http_seed);
+no one uses merkle torrents../src/torrent.cpp:6822 add_web_seed(url, web_seed_entry::http_seed);
}
}
if (m_torrent_file->is_merkle_torrent())
{
- lazy_entry const* mt = rd.dict_find_string("merkle tree");
+ bdecode_node mt = rd.dict_find_string("merkle tree");
if (mt)
{
std::vector<sha1_hash> tree;
tree.resize(m_torrent_file->merkle_tree().size());
- std::memcpy(&tree[0], mt->string_ptr()
- , (std::min)(mt->string_length(), int(tree.size()) * 20));
- if (mt->string_length() < int(tree.size()) * 20)
- std::memset(&tree[0] + mt->string_length() / 20, 0
- , tree.size() - mt->string_length() / 20);
+ std::memcpy(&tree[0], mt.string_ptr()
+ , (std::min)(mt.string_length(), int(tree.size()) * 20));
+ if (mt.string_length() < int(tree.size()) * 20)
+ std::memset(&tree[0] + mt.string_length() / 20, 0
+ , tree.size() - mt.string_length() / 20);
m_torrent_file->set_merkle_tree(tree);
}
else
@@ -5733,9 +6977,9 @@ no one uses merkle torrents../src/torrent.cpp:6743 | ||
relevance 0 | ../src/torrent.cpp:6934 | make this more generic to not just work if files have been renamed, but also if they have been merged into a single file for instance. using file_base |
make this more generic to not just work if files have been
+ | ||
relevance 0 | ../src/torrent.cpp:7013 | make this more generic to not just work if files have been renamed, but also if they have been merged into a single file for instance. using file_base |
make this more generic to not just work if files have been
renamed, but also if they have been merged into a single file for instance.
-using file_base../src/torrent.cpp:6934 pieces.resize(m_torrent_file->num_pieces());
+using file_base../src/torrent.cpp:7013 pieces.resize(m_torrent_file->num_pieces());
if (!has_picker())
{
std::memset(&pieces[0], m_have_all, pieces.size());
@@ -5786,9 +7030,9 @@ using file_base../src/torrent.cpp:6934relevance 0 | ../src/torrent.cpp:8936 | add a flag to ignore stats, and only care about resume data for content. For unchanged files, don't trigger a load of the metadata just to save an empty resume data file |
|
add a flag to ignore stats, and only care about resume data for
+ | ||
relevance 0 | ../src/torrent.cpp:9028 | add a flag to ignore stats, and only care about resume data for content. For unchanged files, don't trigger a load of the metadata just to save an empty resume data file |
add a flag to ignore stats, and only care about resume data for
content. For unchanged files, don't trigger a load of the metadata
-just to save an empty resume data file../src/torrent.cpp:8936 if (m_complete != 0xffffff) seeds = m_complete;
+just to save an empty resume data file../src/torrent.cpp:9028 if (m_complete != 0xffffff) seeds = m_complete;
else seeds = m_peer_list ? m_peer_list->num_seeds() : 0;
if (m_incomplete != 0xffffff) downloaders = m_incomplete;
@@ -5815,22 +7059,22 @@ just to save an empty resume data file../src/torrent.cpp:8936../src/torrent.cpp:8936
| ||
relevance 0 | ../src/torrent.cpp:10546 | instead of resorting the whole list, insert the peers directly into the right place |
instead of resorting the whole list, insert the peers
-directly into the right place../src/torrent.cpp:10546 printf("timed out [average-piece-time: %d ms ]\n"
+ | ||
relevance 0 | ../src/torrent.cpp:10641 | instead of resorting the whole list, insert the peers directly into the right place |
instead of resorting the whole list, insert the peers
+directly into the right place../src/torrent.cpp:10641 printf("timed out [average-piece-time: %d ms ]\n"
, m_average_piece_time);
#endif
}
@@ -5891,7 +7135,7 @@ directly into the right place../src/torrent.cpp:10546relevance 0 | ../src/torrent_peer.cpp:176 | how do we deal with our external address changing? |
|
how do we deal with our external address changing?../src/torrent_peer.cpp:176 , is_v6_addr(false)
+ | ||
relevance 0 | ../src/torrent_peer.cpp:179 | how do we deal with our external address changing? |
how do we deal with our external address changing?../src/torrent_peer.cpp:179 , is_v6_addr(false)
#endif
#if TORRENT_USE_I2P
, is_i2p_addr(false)
@@ -5942,7 +7186,7 @@ directly into the right place../src/torrent.cpp:10546relevance 0 | ../src/udp_socket.cpp:286 | it would be nice to detect this on posix systems also |
|
it would be nice to detect this on posix systems also../src/udp_socket.cpp:286 --m_v6_outstanding;
+ | ||
relevance 0 | ../src/udp_socket.cpp:288 | it would be nice to detect this on posix systems also |
it would be nice to detect this on posix systems also../src/udp_socket.cpp:288 --m_v6_outstanding;
}
else
#endif
@@ -5993,7 +7237,7 @@ void udp_socket::call_handler(error_code const& ec, udp::endpoint const&
ret = (*i)->incoming_packet(ec, ep, buf, size);
} TORRENT_CATCH (std::exception&) {}
if (*i == NULL) i = m_observers.erase(i);
- | ||
relevance 0 | ../src/udp_socket.cpp:777 | use the system resolver_interface here |
use the system resolver_interface here../src/udp_socket.cpp:777
+ | ||
relevance 0 | ../src/udp_socket.cpp:779 | use the system resolver_interface here |
use the system resolver_interface here../src/udp_socket.cpp:779
void udp_socket::set_proxy_settings(proxy_settings const& ps)
{
CHECK_MAGIC;
@@ -6044,59 +7288,8 @@ void udp_socket::on_name_lookup(error_code const& e, tcp::resolver::iterator
+ m_outstanding_socks);
if (m_abort) return;
- | ||
relevance 0 | ../src/upnp.cpp:71 | listen_interface is not used. It's meant to bind the broadcast socket |
listen_interface is not used. It's meant to bind the broadcast socket../src/upnp.cpp:71#include <asio/ip/multicast.hpp>
-#else
-#include <boost/asio/ip/host_name.hpp>
-#include <boost/asio/ip/multicast.hpp>
-#endif
-#include <cstdlib>
-
-namespace libtorrent {
-
-namespace upnp_errors
-{
- boost::system::error_code make_error_code(error_code_enum e)
- {
- return error_code(e, get_upnp_category());
- }
-
-} // upnp_errors namespace
-
-static error_code ec;
-
-upnp::upnp(io_service& ios
- , address const& listen_interface, std::string const& user_agent
- , portmap_callback_t const& cb, log_callback_t const& lcb
- , bool ignore_nonrouters)
- : m_user_agent(user_agent)
- , m_callback(cb)
- , m_log_callback(lcb)
- , m_retry_count(0)
- , m_io_service(ios)
- , m_resolver(ios)
- , m_socket(udp::endpoint(address_v4::from_string("239.255.255.250", ec), 1900))
- , m_broadcast_timer(ios)
- , m_refresh_timer(ios)
- , m_map_timer(ios)
- , m_disabled(false)
- , m_closing(false)
- , m_ignore_non_routers(ignore_nonrouters)
- , m_last_if_update(min_time())
-{
- TORRENT_ASSERT(cb);
-}
-
-void upnp::start(void* state)
-{
- error_code ec;
- m_socket.open(boost::bind(&upnp::on_reply, self(), _1, _2, _3)
- , m_refresh_timer.get_io_service(), ec);
-
- if (state)
- {
- upnp_state_t* s = (upnp_state_t*)state;
- | ||
relevance 0 | ../src/ut_metadata.cpp:316 | we really need to increment the refcounter on the torrent while this buffer is still in the peer's send buffer |
we really need to increment the refcounter on the torrent
-while this buffer is still in the peer's send buffer../src/ut_metadata.cpp:316 if (!m_tp.need_loaded()) return;
+ | ||
relevance 0 | ../src/ut_metadata.cpp:313 | we really need to increment the refcounter on the torrent while this buffer is still in the peer's send buffer |
we really need to increment the refcounter on the torrent
+while this buffer is still in the peer's send buffer../src/ut_metadata.cpp:313 if (!m_tp.need_loaded()) return;
metadata = m_tp.metadata().begin + offset;
metadata_piece_size = (std::min)(
int(m_tp.get_metadata_size() - offset), 16 * 1024);
@@ -6131,10 +7324,11 @@ while this buffer is still in the peer's send buffer../src/ut_metadata.
if (length > 17 * 1024)
{
-#ifdef TORRENT_LOGGING
- m_pc.peer_log("<== UT_METADATA [ packet too big %d ]", length);
+#ifndef TORRENT_DISABLE_LOGGING
+ m_pc.peer_log(peer_log_alert::incoming_message, "UT_METADATA"
+ , "packet too big %d", length);
#endif
- m_pc.disconnect(errors::invalid_metadata_message, peer_connection_interface::op_bittorrent, 2);
+ m_pc.disconnect(errors::invalid_metadata_message, op_bittorrent, 2);
return true;
}
@@ -6144,10 +7338,10 @@ while this buffer is still in the peer's send buffer../src/ut_metadata.
entry msg = bdecode(body.begin, body.end, len);
if (msg.type() != entry::dictionary_t)
{
-#ifdef TORRENT_LOGGING
- m_pc.peer_log("<== UT_METADATA [ not a dictionary ]");
-#endif
- | ||
relevance 0 | ../src/utp_stream.cpp:1644 | this loop may not be very efficient |
this loop may not be very efficient../src/utp_stream.cpp:1644
+#ifndef TORRENT_DISABLE_LOGGING
+ m_pc.peer_log(peer_log_alert::incoming_message, "UT_METADATA"
+ | ||
relevance 0 | ../src/utp_stream.cpp:1709 | this loop is not very efficient. It could be fixed by having a separate list of sequence numbers that need resending |
this loop is not very efficient. It could be fixed by having
+a separate list of sequence numbers that need resending../src/utp_stream.cpp:1709
char* m_buf;
};
@@ -6198,7 +7392,7 @@ bool utp_socket_impl::send_pkt(int flags)
if (sack > 32) sack = 32;
}
- | ||
relevance 0 | ../src/web_connection_base.cpp:73 | introduce a web-seed default class which has a low download priority |
introduce a web-seed default class which has a low download priority../src/web_connection_base.cpp:73{
+ | ||
relevance 0 | ../src/web_connection_base.cpp:73 | introduce a web-seed default class which has a low download priority |
introduce a web-seed default class which has a low download priority../src/web_connection_base.cpp:73{
web_connection_base::web_connection_base(
peer_connection_args const& pack
, web_seed_t& web)
@@ -6249,8 +7443,8 @@ bool utp_socket_impl::send_pkt(int flags)
// according to the settings.
return m_settings.get_int(settings_pack::urlseed_timeout);
}
- | ||
relevance 0 | ../src/kademlia/dht_tracker.cpp:295 | ideally this function would be called when the put completes |
ideally this function would be called when the
-put completes../src/kademlia/dht_tracker.cpp:295 // since it controls whether we re-put the content
+ | ||
relevance 0 | ../src/kademlia/dht_tracker.cpp:307 | ideally this function would be called when the put completes |
ideally this function would be called when the
+put completes../src/kademlia/dht_tracker.cpp:307 // since it controls whether we re-put the content
TORRENT_ASSERT(!it.is_mutable());
f(it);
return false;
@@ -6280,6 +7474,8 @@ put completes../src/kademlia/dht_tracker.cpp:295 | ||
relevance 0 | ../include/libtorrent/bitfield.hpp:158 | rename to data() ? |
rename to data() ?../include/libtorrent/bitfield.hpp:158 if (m_buf[i] != 0) return false;
+ | ||
relevance 0 | ../include/libtorrent/bitfield.hpp:160 | rename to data() ? |
rename to data() ?../include/libtorrent/bitfield.hpp:160 if (m_buf[i] != 0) return false;
}
return true;
}
@@ -6309,7 +7503,7 @@ put completes../src/kademlia/dht_tracker.cpp:295 char const* bytes() const { return (char const*)m_buf; }
+ char const* bytes() const { return reinterpret_cast<char const*>(m_buf); }
// copy operator
bitfield& operator=(bitfield const& rhs)
@@ -6336,9 +7530,7 @@ put completes../src/kademlia/dht_tracker.cpp:295 | ||
relevance 0 | ../include/libtorrent/block_cache.hpp:213 | make this 32 bits and to count seconds since the block cache was created |
make this 32 bits and to count seconds since the block cache was created../include/libtorrent/block_cache.hpp:213
+
+ for (int i = 0; i < words; ++i)
+ | ||
relevance 0 | ../include/libtorrent/block_cache.hpp:219 | make this 32 bits and to count seconds since the block cache was created |
make this 32 bits and to count seconds since the block cache was created../include/libtorrent/block_cache.hpp:219
bool operator==(cached_piece_entry const& rhs) const
{ return storage.get() == rhs.storage.get() && piece == rhs.piece; }
@@ -6372,7 +7566,7 @@ put completes../src/kademlia/dht_tracker.cpp:295 ptime expire;
+ time_point expire;
boost::uint64_t piece:22;
@@ -6403,7 +7597,7 @@ put completes../src/kademlia/dht_tracker.cpp:295relevance 0 | ../include/libtorrent/config.hpp:334 | Make this count Unicode characters instead of bytes on windows |
|
Make this count Unicode characters instead of bytes on windows../include/libtorrent/config.hpp:334#pragma message ( "unknown OS, assuming BSD" )
+ | ||
relevance 0 | ../include/libtorrent/config.hpp:339 | Make this count Unicode characters instead of bytes on windows |
Make this count Unicode characters instead of bytes on windows../include/libtorrent/config.hpp:339#pragma message ( "unknown OS, assuming BSD" )
#else
#warning "unknown OS, assuming BSD"
#endif
@@ -6454,7 +7648,7 @@ put completes../src/kademlia/dht_tracker.cpp:295relevance 0 | ../include/libtorrent/disk_buffer_pool.hpp:134 | try to remove the observers, only using the async_allocate handlers |
|
try to remove the observers, only using the async_allocate handlers../include/libtorrent/disk_buffer_pool.hpp:134
+ | ||
relevance 0 | ../include/libtorrent/disk_buffer_pool.hpp:137 | try to remove the observers, only using the async_allocate handlers |
try to remove the observers, only using the async_allocate handlers../include/libtorrent/disk_buffer_pool.hpp:137
// number of bytes per block. The BitTorrent
// protocol defines the block size to 16 KiB.
const int m_block_size;
@@ -6505,11 +7699,7 @@ put completes../src/kademlia/dht_tracker.cpp:295relevance 0 | ../include/libtorrent/file.hpp:168 | move this into a separate header file, TU pair |
|
move this into a separate header file, TU pair../include/libtorrent/file.hpp:168 TORRENT_EXTRA_EXPORT bool is_root_path(std::string const& f);
-
-
- // internal used by create_torrent.hpp
- TORRENT_EXTRA_EXPORT std::string parent_path(std::string const& f);
+ | ||
relevance 0 | ../include/libtorrent/file.hpp:173 | move this into a separate header file, TU pair |
move this into a separate header file, TU pair../include/libtorrent/file.hpp:173 TORRENT_EXTRA_EXPORT std::string parent_path(std::string const& f);
TORRENT_EXTRA_EXPORT bool has_parent_path(std::string const& f);
TORRENT_EXTRA_EXPORT char const* filename_cstr(char const* f);
@@ -6517,6 +7707,10 @@ put completes../src/kademlia/dht_tracker.cpp:295 | ||
relevance 0 | ../include/libtorrent/peer_connection.hpp:205 | make this a raw pointer (to save size in the first cache line) and make the constructor take a raw pointer. torrent objects should always outlive their peers |
make this a raw pointer (to save size in
+ | ||
relevance 0 | ../include/libtorrent/heterogeneous_queue.hpp:185 | if this throws, should we do anything? |
if this throws, should we do anything?../include/libtorrent/heterogeneous_queue.hpp:185 - 1) / sizeof(uintptr_t);
+
+ void grow_capacity(int size)
+ {
+ int amount_to_grow = (std::max)(size + header_size
+ , (std::max)(m_capacity * 3 / 2, 128));
+
+ uintptr_t* new_storage = new uintptr_t[m_capacity + amount_to_grow];
+
+ uintptr_t* src = m_storage;
+ uintptr_t* dst = new_storage;
+ uintptr_t const* const end = m_storage + m_size;
+ while (src < end)
+ {
+ header_t* src_hdr = reinterpret_cast<header_t*>(src);
+ header_t* dst_hdr = reinterpret_cast<header_t*>(dst);
+ *dst_hdr = *src_hdr;
+ src += header_size;
+ dst += header_size;
+ TORRENT_ASSERT(src + src_hdr->len <= end);
+ src_hdr->move(dst, src);
+ src += src_hdr->len;
+ dst += src_hdr->len;
+ }
+
+ delete[] m_storage;
+ m_storage = new_storage;
+ m_capacity += amount_to_grow;
+ }
+
+ template <class U>
+ static void move(uintptr_t* dst, uintptr_t* src)
+ {
+ U* rhs = reinterpret_cast<U*>(src);
+#if __cplusplus >= 201103L
+ new (dst) U(std::move(*rhs));
+#else
+ new (dst) U(*rhs);
+#endif
+ rhs->~U();
+ }
+
+ uintptr_t* m_storage;
+ // number of uintptr_t's allocated under m_storage
+ int m_capacity;
+ // the number of uintptr_t's used under m_storage
+ int m_size;
+ // the number of objects allocated under m_storage
+ int m_num_items;
+ };
+}
+ | ||
relevance 0 | ../include/libtorrent/peer_connection.hpp:204 | make this a raw pointer (to save size in the first cache line) and make the constructor take a raw pointer. torrent objects should always outlive their peers |
make this a raw pointer (to save size in
the first cache line) and make the constructor
take a raw pointer. torrent objects should always
-outlive their peers../include/libtorrent/peer_connection.hpp:205 , m_connecting(!t.expired())
+outlive their peers../include/libtorrent/peer_connection.hpp:204 , m_connecting(!t.expired())
, m_endgame_mode(false)
, m_snubbed(false)
, m_interesting(false)
@@ -6610,8 +7855,8 @@ outlive their peers../include/libtorrent/peer_connection.hpp:205 | ||
relevance 0 | ../include/libtorrent/peer_connection.hpp:1055 | factor this out into its own class with a virtual interface torrent and session should implement this interface |
factor this out into its own class with a virtual interface
-torrent and session should implement this interface../include/libtorrent/peer_connection.hpp:1055
+ | ||
relevance 0 | ../include/libtorrent/peer_connection.hpp:1047 | factor this out into its own class with a virtual interface torrent and session should implement this interface |
factor this out into its own class with a virtual interface
+torrent and session should implement this interface../include/libtorrent/peer_connection.hpp:1047
// the local endpoint for this peer, i.e. our address
// and our port. If this is set for outgoing connections
// before the connection completes, it means we want to
@@ -6662,9 +7907,7 @@ torrent and session should implement this interface../include/libtorren
// we have got from this peer. If the request
// queue gets empty, and there have been
// invalid requests, we can assume the
- | ||
relevance 0 | ../include/libtorrent/peer_connection_interface.hpp:45 | make this interface smaller! |
make this interface smaller!../include/libtorrent/peer_connection_interface.hpp:45SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ | ||
relevance 0 | ../include/libtorrent/peer_connection_interface.hpp:47 | make this interface smaller! |
make this interface smaller!../include/libtorrent/peer_connection_interface.hpp:47CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
@@ -6675,6 +7918,8 @@ POSSIBILITY OF SUCH DAMAGE.
#include "libtorrent/socket.hpp"
#include "libtorrent/error_code.hpp"
+#include "libtorrent/alert_types.hpp"
+#include "libtorrent/operations.hpp" // for operation_t enum
namespace libtorrent
{
@@ -6684,37 +7929,37 @@ namespace libtorrent
struct peer_connection_interface
{
- // these constants are used to identify the operation
- // that failed, causing a peer to disconnect
- enum operation_t
- {
- // this is used when the bittorrent logic
- // determines to disconnect
- op_bittorrent = 0,
- op_iocontrol,
- op_getpeername,
- op_getname,
- op_alloc_recvbuf,
- op_alloc_sndbuf,
- op_file_write,
- op_file_read,
- op_file,
- op_sock_write,
- op_sock_read,
- op_sock_open,
- op_sock_bind,
- op_available,
- op_encryption,
- op_connect,
- op_ssl_handshake,
- op_get_interface,
- };
-
virtual tcp::endpoint const& remote() const = 0;
virtual tcp::endpoint local_endpoint() const = 0;
- virtual void disconnect(error_code const& ec, operation_t op, int error = 0) = 0;
- | ||
relevance 0 | ../include/libtorrent/performance_counters.hpp:132 | should keepalives be in here too? how about dont-have, share-mode, upload-only |
should keepalives be in here too?
-how about dont-have, share-mode, upload-only../include/libtorrent/performance_counters.hpp:132 // a connect candidate
+ virtual void disconnect(error_code const& ec
+ , operation_t op, int error = 0) = 0;
+ virtual peer_id const& pid() const = 0;
+ virtual void set_holepunch_mode() = 0;
+ virtual torrent_peer* peer_info_struct() const = 0;
+ virtual void set_peer_info(torrent_peer* pi) = 0;
+ virtual bool is_outgoing() const = 0;
+ virtual void add_stat(boost::int64_t downloaded, boost::int64_t uploaded) = 0;
+ virtual bool fast_reconnect() const = 0;
+ virtual bool is_choked() const = 0;
+ virtual bool failed() const = 0;
+ virtual stat const& statistics() const = 0;
+ virtual void get_peer_info(peer_info& p) const = 0;
+#ifndef TORRENT_DISABLE_LOGGING
+ virtual void peer_log(peer_log_alert::direction_t direction
+ , char const* event, char const* fmt = "", ...) const
+#if defined __GNUC__ || defined __clang__
+ __attribute__((format(printf, 4, 5)))
+#endif
+ = 0;
+#endif
+ protected:
+ ~peer_connection_interface() {}
+ };
+}
+
+#endif
+ | ||
relevance 0 | ../include/libtorrent/performance_counters.hpp:139 | should keepalives be in here too? how about dont-have, share-mode, upload-only |
should keepalives be in here too?
+how about dont-have, share-mode, upload-only../include/libtorrent/performance_counters.hpp:139 // a connect candidate
connection_attempt_loops,
// successful incoming connections (not rejected for any reason)
incoming_connections,
@@ -6765,9 +8010,9 @@ how about dont-have, share-mode, upload-only../include/libtorrent/perfo
num_outgoing_cancel,
num_outgoing_dht_port,
num_outgoing_suggest,
- | ||
relevance 0 | ../include/libtorrent/performance_counters.hpp:442 | some space could be saved here by making gauges 32 bits |
some space could be saved here by making gauges 32 bits../include/libtorrent/performance_counters.hpp:442 | ||
relevance 0 | ../include/libtorrent/performance_counters.hpp:443 | restore these to regular integers. Instead have one copy of the counters per thread and collect them at convenient synchronization points |
restore these to regular integers. Instead have one copy
+ | ||
relevance 0 | ../include/libtorrent/performance_counters.hpp:449 | some space could be saved here by making gauges 32 bits |
some space could be saved here by making gauges 32 bits../include/libtorrent/performance_counters.hpp:449 | ||
relevance 0 | ../include/libtorrent/performance_counters.hpp:450 | restore these to regular integers. Instead have one copy of the counters per thread and collect them at convenient synchronization points |
restore these to regular integers. Instead have one copy
of the counters per thread and collect them at convenient
-synchronization points../include/libtorrent/performance_counters.hpp:443 num_utp_deleted,
+synchronization points../include/libtorrent/performance_counters.hpp:450 num_utp_deleted,
num_counters,
num_gauge_counters = num_counters - num_stats_counters
@@ -6792,7 +8037,7 @@ synchronization points../include/libtorrent/performance_counters.hpp:44
#else
// if the atomic type is't lock-free, use a single lock instead, for
// the whole array
- mutex m_mutex;
+ mutable mutex m_mutex;
boost::int64_t m_stats_counter[num_counters];
#endif
};
@@ -6800,7 +8045,7 @@ synchronization points../include/libtorrent/performance_counters.hpp:44
#endif
- | ||
relevance 0 | ../include/libtorrent/piece_picker.hpp:761 | should this be allocated lazily? |
should this be allocated lazily?../include/libtorrent/piece_picker.hpp:761
+ | ||
relevance 0 | ../include/libtorrent/piece_picker.hpp:762 | should this be allocated lazily? |
should this be allocated lazily?../include/libtorrent/piece_picker.hpp:762
std::vector<downloading_piece>::const_iterator find_dl_piece(int queue, int index) const;
std::vector<downloading_piece>::iterator find_dl_piece(int queue, int index);
@@ -6851,8 +8096,8 @@ synchronization points../include/libtorrent/performance_counters.hpp:44
// this holds the information of the blocks in partially downloaded
// pieces. the downloading_piece::info index point into this vector for
- | ||
relevance 0 | ../include/libtorrent/proxy_base.hpp:171 | it would be nice to remember the bind port and bind once we know where the proxy is m_sock.bind(endpoint, ec); |
it would be nice to remember the bind port and bind once we know where the proxy is
-m_sock.bind(endpoint, ec);../include/libtorrent/proxy_base.hpp:171 void bind(endpoint_type const& /* endpoint */)
+ | ||
relevance 0 | ../include/libtorrent/proxy_base.hpp:173 | it would be nice to remember the bind port and bind once we know where the proxy is m_sock.bind(endpoint, ec); |
it would be nice to remember the bind port and bind once we know where the proxy is
+m_sock.bind(endpoint, ec);../include/libtorrent/proxy_base.hpp:173 void bind(endpoint_type const& /* endpoint */)
{
// m_sock.bind(endpoint);
}
@@ -6903,10 +8148,10 @@ m_sock.bind(endpoint, ec);../include/libtorrent/proxy_base.hpp:171
m_sock.close(ec);
m_resolver.cancel();
}
- | ||
relevance 0 | ../include/libtorrent/receive_buffer.hpp:255 | Detect when the start of the next crpyto packet is aligned with the start of piece data and the crpyto packet is at least as large as the piece data. With a little extra work we could receive directly into a disk buffer in that case. |
Detect when the start of the next crpyto packet is aligned
+ | ||
relevance 0 | ../include/libtorrent/receive_buffer.hpp:258 | Detect when the start of the next crpyto packet is aligned with the start of piece data and the crpyto packet is at least as large as the piece data. With a little extra work we could receive directly into a disk buffer in that case. |
Detect when the start of the next crpyto packet is aligned
with the start of piece data and the crpyto packet is at least
as large as the piece data. With a little extra work
-we could receive directly into a disk buffer in that case.../include/libtorrent/receive_buffer.hpp:255
+we could receive directly into a disk buffer in that case.../include/libtorrent/receive_buffer.hpp:258
void cut(int size, int packet_size, int offset = 0);
void crypto_cut(int size, int packet_size)
@@ -6933,6 +8178,9 @@ we could receive directly into a disk buffer in that case.../include/li
, std::size_t bytes_transfered);
private:
+ // explicitly disallow assignment, to silence msvc warning
+ crypto_receive_buffer& operator=(crypto_receive_buffer const&);
+
int m_recv_pos;
int m_packet_size;
int m_soft_packet_size;
@@ -6943,7 +8191,7 @@ private:
} // namespace libtorrent
#endif // #ifndef TORRENT_RECEIVE_BUFFER_HPP_INCLUDED
- | ||
relevance 0 | ../include/libtorrent/session.hpp:861 | add get_peer_class_type_filter() as well |
add get_peer_class_type_filter() as well../include/libtorrent/session.hpp:861 //
+ | ||
relevance 0 | ../include/libtorrent/session.hpp:844 | add get_peer_class_type_filter() as well |
add get_peer_class_type_filter() as well../include/libtorrent/session.hpp:844 //
// The ``peer_class`` argument cannot be greater than 31. The bitmasks
// representing peer classes in the ``peer_class_filter`` are 32 bits.
//
@@ -6994,10 +8242,10 @@ private:
// destructs.
//
// For more information on peer classes, see peer-classes_.
- | ||
relevance 0 | ../include/libtorrent/settings_pack.hpp:1099 | deprecate this ``max_rejects`` is the number of piece requests we will reject in a row while a peer is choked before the peer is considered abusive and is disconnected. |
deprecate this
+ | ||
relevance 0 | ../include/libtorrent/settings_pack.hpp:1097 | deprecate this ``max_rejects`` is the number of piece requests we will reject in a row while a peer is choked before the peer is considered abusive and is disconnected. |
deprecate this
``max_rejects`` is the number of piece requests we will reject in a
row while a peer is choked before the peer is considered abusive
-and is disconnected.../include/libtorrent/settings_pack.hpp:1099 auto_manage_startup,
+and is disconnected.../include/libtorrent/settings_pack.hpp:1097 auto_manage_startup,
// ``seeding_piece_quota`` is the number of pieces to send to a peer,
// when seeding, before rotating in another peer to the unchoke set.
@@ -7048,7 +8296,7 @@ and is disconnected.../include/libtorrent/settings_pack.hpp:1099 | ||
relevance 0 | ../include/libtorrent/torrent.hpp:1265 | this wastes 5 bits per file |
this wastes 5 bits per file../include/libtorrent/torrent.hpp:1265 typedef std::list<boost::shared_ptr<torrent_plugin> > extension_list_t;
+ | ||
relevance 0 | ../include/libtorrent/torrent.hpp:1267 | this wastes 5 bits per file |
this wastes 5 bits per file../include/libtorrent/torrent.hpp:1267 typedef std::list<boost::shared_ptr<torrent_plugin> > extension_list_t;
extension_list_t m_extensions;
#endif
@@ -7099,7 +8347,7 @@ and is disconnected.../include/libtorrent/settings_pack.hpp:1099 | ||
relevance 0 | ../include/libtorrent/torrent.hpp:1324 | These two bitfields should probably be coalesced into one |
These two bitfields should probably be coalesced into one../include/libtorrent/torrent.hpp:1324 // the .torrent file from m_url
+ | ||
relevance 0 | ../include/libtorrent/torrent.hpp:1326 | These two bitfields should probably be coalesced into one |
These two bitfields should probably be coalesced into one../include/libtorrent/torrent.hpp:1326 // the .torrent file from m_url
// std::vector<char> m_torrent_file_buf;
// this is a list of all pieces that we have announced
@@ -7150,8 +8398,8 @@ and is disconnected.../include/libtorrent/settings_pack.hpp:1099 | ||
relevance 0 | ../include/libtorrent/torrent_info.hpp:123 | include the number of peers received from this tracker, at last announce |
include the number of peers received from this tracker, at last
-announce../include/libtorrent/torrent_info.hpp:123
+ | ||
relevance 0 | ../include/libtorrent/torrent_info.hpp:115 | include the number of peers received from this tracker, at last announce |
include the number of peers received from this tracker, at last
+announce../include/libtorrent/torrent_info.hpp:115
// if this tracker failed the last time it was contacted
// this error code specifies what error occurred
error_code last_error;
@@ -7166,10 +8414,10 @@ announce../include/libtorrent/torrent_info.hpp:123
// these are either -1 or the scrape information this tracker last
@@ -7202,8 +8450,8 @@ announce../include/libtorrent/torrent_info.hpp:123relevance 0 | ../include/libtorrent/torrent_info.hpp:270 | there may be some opportunities to optimize the size if torrent_info. specifically to turn some std::string and std::vector into pointers |
|
there may be some opportunities to optimize the size if torrent_info.
-specifically to turn some std::string and std::vector into pointers../include/libtorrent/torrent_info.hpp:270 // The URL of the web seed
+ | ||
relevance 0 | ../include/libtorrent/torrent_info.hpp:262 | there may be some opportunities to optimize the size if torrent_info. specifically to turn some std::string and std::vector into pointers |
there may be some opportunities to optimize the size if torrent_info.
+specifically to turn some std::string and std::vector into pointers../include/libtorrent/torrent_info.hpp:262 // The URL of the web seed
std::string url;
// Optional authentication. If this is set, it's passed
@@ -7233,10 +8481,10 @@ specifically to turn some std::string and std::vector into pointers../i
// metadata will be created by libtorrent as soon as it has been
// downloaded from the swarm.
//
- // The constructor that takes a lazy_entry will create a torrent_info
+ // The constructor that takes a bdecode_node will create a torrent_info
// object from the information found in the given torrent_file. The
- // lazy_entry represents a tree node in an bencoded file. To load an
- // ordinary .torrent file into a lazy_entry, use lazy_bdecode().
+ // bdecode_node represents a tree node in an bencoded file. To load an
+ // ordinary .torrent file into a bdecode_node, use bdecode().
//
// The version that takes a buffer pointer and a size will decode it as a
// .torrent file and initialize the torrent_info object for you.
@@ -7254,7 +8502,7 @@ specifically to turn some std::string and std::vector into pointers../i
// error occur, they will simply set the error code to describe what went
// wrong and not fully initialize the torrent_info object. The overloads
// that do not take the extra error_code parameter will always throw if
- | ||
relevance 0 | ../include/libtorrent/tracker_manager.hpp:382 | this should be unique_ptr in the future |
this should be unique_ptr in the future../include/libtorrent/tracker_manager.hpp:382 // this is only used for SOCKS packets, since
+ | ||
relevance 0 | ../include/libtorrent/tracker_manager.hpp:384 | this should be unique_ptr in the future |
this should be unique_ptr in the future../include/libtorrent/tracker_manager.hpp:384 // this is only used for SOCKS packets, since
// they may be addressed to hostname
virtual bool incoming_packet(error_code const& e, char const* hostname
, char const* buf, int size);
@@ -7286,7 +8534,7 @@ specifically to turn some std::string and std::vector into pointers../i
resolver_interface& m_host_resolver;
aux::session_settings const& m_settings;
counters& m_stats_counters;
-#if defined TORRENT_LOGGING || TORRENT_USE_ASSERTS
+#if !defined TORRENT_DISABLE_LOGGING || TORRENT_USE_ASSERTS
aux::session_logger& m_ses;
#endif
@@ -7296,7 +8544,7 @@ specifically to turn some std::string and std::vector into pointers../i
#endif // TORRENT_TRACKER_MANAGER_HPP_INCLUDED
- | ||
relevance 0 | ../include/libtorrent/upnp.hpp:108 | support using the windows API for UPnP operations as well |
support using the windows API for UPnP operations as well../include/libtorrent/upnp.hpp:108 external_port_must_be_wildcard = 727
+ | ||
relevance 0 | ../include/libtorrent/upnp.hpp:108 | support using the windows API for UPnP operations as well |
support using the windows API for UPnP operations as well../include/libtorrent/upnp.hpp:108 external_port_must_be_wildcard = 727
};
// hidden
@@ -7347,7 +8595,7 @@ public:
// to refer to mappings that fails or succeeds in the portmap_error_alert_ and
// portmap_alert_ respectively. If The mapping fails immediately, the return value
// is -1, which means failure. There will not be any error alert notification for
- | ||
relevance 0 | ../include/libtorrent/utp_stream.hpp:395 | implement blocking write. Low priority since it's not used (yet) |
implement blocking write. Low priority since it's not used (yet)../include/libtorrent/utp_stream.hpp:395 for (typename Mutable_Buffers::const_iterator i = buffers.begin()
+ | ||
relevance 0 | ../include/libtorrent/utp_stream.hpp:402 | implement blocking write. Low priority since it's not used (yet) |
implement blocking write. Low priority since it's not used (yet)../include/libtorrent/utp_stream.hpp:402 for (typename Mutable_Buffers::const_iterator i = buffers.begin()
, end(buffers.end()); i != end; ++i)
{
using asio::buffer_cast;
@@ -7397,8 +8645,8 @@ public:
{
if (m_impl == 0)
{
- m_io_service.post(boost::bind<void>(handler, asio::error::not_connected, 0));
- | ||
relevance 0 | ../include/libtorrent/kademlia/item.hpp:61 | since this is a public function, it should probably be moved out of this header and into one with other public functions. |
since this is a public function, it should probably be moved
+ m_io_service.post(boost::bind<void>(handler
+ | ||
relevance 0 | ../include/libtorrent/kademlia/item.hpp:61 | since this is a public function, it should probably be moved out of this header and into one with other public functions. |
since this is a public function, it should probably be moved
out of this header and into one with other public functions.../include/libtorrent/kademlia/item.hpp:61#include <boost/array.hpp>
namespace libtorrent { namespace dht
@@ -7450,7 +8698,7 @@ public:
item(entry const& v
, std::pair<char const*, int> salt
, boost::uint64_t seq, char const* pk, char const* sk);
- | ||
relevance 0 | ../include/libtorrent/aux_/session_impl.hpp:836 | should this be renamed m_outgoing_interfaces? |
should this be renamed m_outgoing_interfaces?../include/libtorrent/aux_/session_impl.hpp:836 // listen socket. For each retry the port number
+ | ||
relevance 0 | ../include/libtorrent/aux_/session_impl.hpp:849 | should this be renamed m_outgoing_interfaces? |
should this be renamed m_outgoing_interfaces?../include/libtorrent/aux_/session_impl.hpp:849 // listen socket. For each retry the port number
// is incremented by one
int m_listen_port_retries;
@@ -7501,7 +8749,7 @@ public:
// round-robin index into m_net_interfaces
mutable boost::uint8_t m_interface_index;
- | ||
relevance 0 | ../include/libtorrent/aux_/session_impl.hpp:887 | replace this by a proper asio timer |
replace this by a proper asio timer../include/libtorrent/aux_/session_impl.hpp:887
+ | ||
relevance 0 | ../include/libtorrent/aux_/session_impl.hpp:900 | replace this by a proper asio timer |
replace this by a proper asio timer../include/libtorrent/aux_/session_impl.hpp:900
void open_new_incoming_socks_connection();
enum listen_on_flags_t
@@ -7525,7 +8773,7 @@ public:
// this is used to decide when to recalculate which
// torrents to keep queued and which to activate
- | ||
relevance 0 | ../include/libtorrent/aux_/session_impl.hpp:892 | replace this by a proper asio timer |
replace this by a proper asio timer../include/libtorrent/aux_/session_impl.hpp:892 {
+ | ||
relevance 0 | ../include/libtorrent/aux_/session_impl.hpp:905 | replace this by a proper asio timer |
replace this by a proper asio timer../include/libtorrent/aux_/session_impl.hpp:905 {
open_ssl_socket = 0x10
};
@@ -7551,7 +8799,7 @@ public:
// is only decresed when the unchoke set
// is recomputed, and when it reaches zero,
// the optimistic unchoke is moved to another peer.
- | ||
relevance 0 | ../include/libtorrent/aux_/session_impl.hpp:899 | replace this by a proper asio timer |
replace this by a proper asio timer../include/libtorrent/aux_/session_impl.hpp:899
+ | ||
relevance 0 | ../include/libtorrent/aux_/session_impl.hpp:912 | replace this by a proper asio timer |
replace this by a proper asio timer../include/libtorrent/aux_/session_impl.hpp:912
#ifndef TORRENT_DISABLE_DHT
entry m_dht_state;
#endif
@@ -7602,12 +8850,7 @@ public:
int m_suggest_timer;
// statistics gathered from all torrents.
- | ||
relevance 0 | ../include/libtorrent/aux_/session_interface.hpp:229 | it would be nice to not have this be part of session_interface |
it would be nice to not have this be part of session_interface../include/libtorrent/aux_/session_interface.hpp:229
- virtual boost::uint16_t listen_port() const = 0;
- virtual boost::uint16_t ssl_listen_port() const = 0;
-
- virtual void post_socket_job(socket_job& j) = 0;
-
+ | ||
relevance 0 | ../include/libtorrent/aux_/session_interface.hpp:250 | it would be nice to not have this be part of session_interface |
it would be nice to not have this be part of session_interface../include/libtorrent/aux_/session_interface.hpp:250
// load the specified torrent. also evict one torrent, except
// for the one specified, if we are at the limit of loaded torrents
virtual bool load_torrent(torrent* t) = 0;
@@ -7622,6 +8865,11 @@ public:
virtual bool verify_bound_address(address const& addr, bool utp
, error_code& ec) = 0;
+#ifndef TORRENT_DISABLE_MUTABLE_TORRENTS
+ virtual std::vector<boost::shared_ptr<torrent> > find_collection(
+ std::string const& collection) const = 0;
+#endif
+
virtual proxy_settings proxy() const = 0;
#if TORRENT_USE_I2P
@@ -7653,7 +8901,7 @@ public:
virtual bandwidth_manager* get_bandwidth_manager(int channel) = 0;
- | ||
relevance 0 | ../include/libtorrent/aux_/session_settings.hpp:78 | make this a bitfield |
make this a bitfield../include/libtorrent/aux_/session_settings.hpp:78 if ((name & settings_pack::type_mask) != settings_pack:: type ## _type_base) return default_val; \
+ | ||
relevance 0 | ../include/libtorrent/aux_/session_settings.hpp:78 | make this a bitfield |
make this a bitfield../include/libtorrent/aux_/session_settings.hpp:78 if ((name & settings_pack::type_mask) != settings_pack:: type ## _type_base) return default_val; \
return m_ ## type ## s[name - settings_pack:: type ## _type_base]
struct TORRENT_EXTRA_EXPORT session_settings
@@ -7683,4 +8931,4 @@ public:
#endif
- | ||