forked from premiere/premiere-libtorrent
more on log, consts, refs and cleanup in dht related code (#1082)
more on log, consts, refs and cleanup in dht related code. moving print_state out of main source code
This commit is contained in:
parent
3fa74c6fd5
commit
65cdc15543
|
@ -40,7 +40,7 @@ namespace libtorrent { namespace aux
|
|||
{
|
||||
// returns the current time, as represented by time_point. The
|
||||
// resolution of this timer is about 100 ms.
|
||||
time_point time_now();
|
||||
TORRENT_EXTRA_EXPORT time_point time_now();
|
||||
|
||||
TORRENT_EXTRA_EXPORT void update_time_now();
|
||||
|
||||
|
|
|
@ -44,13 +44,11 @@ POSSIBILITY OF SUCH DAMAGE.
|
|||
|
||||
namespace libtorrent
|
||||
{
|
||||
|
||||
// TODO: 2 factor these functions out
|
||||
TORRENT_EXTRA_EXPORT bool is_local(address const& a);
|
||||
TORRENT_EXTRA_EXPORT bool is_loopback(address const& addr);
|
||||
TORRENT_EXTRA_EXPORT bool is_any(address const& addr);
|
||||
TORRENT_EXTRA_EXPORT bool is_teredo(address const& addr);
|
||||
TORRENT_EXTRA_EXPORT int cidr_distance(address const& a1, address const& a2);
|
||||
bool is_ip_address(char const* host);
|
||||
|
||||
// determines if the operating system supports IPv6
|
||||
|
@ -146,4 +144,3 @@ namespace libtorrent
|
|||
}
|
||||
|
||||
#endif
|
||||
|
||||
|
|
|
@ -44,7 +44,7 @@ struct direct_traversal : traversal_algorithm
|
|||
typedef std::function<void(dht::msg const&)> message_callback;
|
||||
|
||||
direct_traversal(node& node
|
||||
, node_id target
|
||||
, node_id const& target
|
||||
, message_callback cb)
|
||||
: traversal_algorithm(node, target)
|
||||
, m_cb(cb)
|
||||
|
|
|
@ -52,7 +52,7 @@ namespace libtorrent { namespace dht
|
|||
// called every time we receive an incoming packet. Returns
|
||||
// true if we should let the packet through, and false if
|
||||
// it's blocked
|
||||
bool incoming(address addr, time_point now, dht_logger* logger);
|
||||
bool incoming(address const& addr, time_point now, dht_logger* logger);
|
||||
|
||||
void set_rate_limit(int l)
|
||||
{
|
||||
|
@ -92,4 +92,3 @@ namespace libtorrent { namespace dht
|
|||
}}
|
||||
|
||||
#endif
|
||||
|
||||
|
|
|
@ -52,7 +52,7 @@ public:
|
|||
|
||||
// for immutable itms
|
||||
get_item(node& dht_node
|
||||
, node_id target
|
||||
, node_id const& target
|
||||
, data_callback const& dcallback
|
||||
, nodes_callback const& ncallback);
|
||||
|
||||
|
|
|
@ -43,13 +43,13 @@ namespace libtorrent { namespace dht
|
|||
{
|
||||
|
||||
// calculate the target hash for an immutable item.
|
||||
sha1_hash TORRENT_EXTRA_EXPORT item_target_id(span<char const> v);
|
||||
TORRENT_EXTRA_EXPORT sha1_hash item_target_id(span<char const> v);
|
||||
|
||||
// calculate the target hash for a mutable item.
|
||||
sha1_hash TORRENT_EXTRA_EXPORT item_target_id(span<char const> salt
|
||||
TORRENT_EXTRA_EXPORT sha1_hash item_target_id(span<char const> salt
|
||||
, public_key const& pk);
|
||||
|
||||
bool TORRENT_EXTRA_EXPORT verify_mutable_item(
|
||||
TORRENT_EXTRA_EXPORT bool verify_mutable_item(
|
||||
span<char const> v
|
||||
, span<char const> salt
|
||||
, sequence_number seq
|
||||
|
@ -65,7 +65,7 @@ bool TORRENT_EXTRA_EXPORT verify_mutable_item(
|
|||
// is written into a 64 byte buffer pointed to by ``sig``. The caller
|
||||
// is responsible for allocating the destination buffer that's passed in
|
||||
// as the ``sig`` argument. Typically it would be allocated on the stack.
|
||||
signature TORRENT_EXPORT sign_mutable_item(
|
||||
TORRENT_EXPORT signature sign_mutable_item(
|
||||
span<char const> v
|
||||
, span<char const> salt
|
||||
, sequence_number seq
|
||||
|
|
|
@ -134,12 +134,6 @@ public:
|
|||
int data_size() const { return int(m_storage.num_torrents()); }
|
||||
#endif
|
||||
|
||||
#ifndef TORRENT_DISABLE_LOGGING
|
||||
// TODO: 3 make this print to the DHT logger instead
|
||||
void print_state(std::ostream& os) const
|
||||
{ m_table.print_state(os); }
|
||||
#endif
|
||||
|
||||
enum flags_t { flag_seed = 1, flag_implied_port = 2 };
|
||||
void get_peers(sha1_hash const& info_hash
|
||||
, std::function<void(std::vector<tcp::endpoint> const&)> dcallback
|
||||
|
@ -148,7 +142,7 @@ public:
|
|||
void announce(sha1_hash const& info_hash, int listen_port, int flags
|
||||
, std::function<void(std::vector<tcp::endpoint> const&)> f);
|
||||
|
||||
void direct_request(udp::endpoint ep, entry& e
|
||||
void direct_request(udp::endpoint const& ep, entry& e
|
||||
, std::function<void(msg const&)> f);
|
||||
|
||||
void get_item(sha1_hash const& target, std::function<void(item const&)> f);
|
||||
|
@ -213,9 +207,9 @@ public:
|
|||
|
||||
bool native_address(udp::endpoint const& ep) const
|
||||
{ return ep.protocol().family() == m_protocol.protocol.family(); }
|
||||
bool native_address(tcp::endpoint ep) const
|
||||
bool native_address(tcp::endpoint const& ep) const
|
||||
{ return ep.protocol().family() == m_protocol.protocol.family(); }
|
||||
bool native_address(address addr) const
|
||||
bool native_address(address const& addr) const
|
||||
{
|
||||
return (addr.is_v4() && m_protocol.protocol == m_protocol.protocol.v4())
|
||||
|| (addr.is_v6() && m_protocol.protocol == m_protocol.protocol.v6());
|
||||
|
|
|
@ -181,7 +181,7 @@ public:
|
|||
};
|
||||
add_node_status_t add_node_impl(node_entry e);
|
||||
|
||||
bool add_node(node_entry e);
|
||||
bool add_node(node_entry const& e);
|
||||
|
||||
// this function is called every time the node sees
|
||||
// a sign of a node being alive. This node will either
|
||||
|
@ -250,12 +250,6 @@ public:
|
|||
|
||||
void replacement_cache(bucket_t& nodes) const;
|
||||
|
||||
#ifndef TORRENT_DISABLE_LOGGING
|
||||
// used for debug and monitoring purposes. This will print out
|
||||
// the state of the routing table to the given stream
|
||||
void print_state(std::ostream& os) const;
|
||||
#endif
|
||||
|
||||
int bucket_limit(int bucket) const;
|
||||
|
||||
#if TORRENT_USE_INVARIANT_CHECKS
|
||||
|
@ -270,9 +264,15 @@ public:
|
|||
|| (addr.is_v6() && m_protocol == udp::v6());
|
||||
}
|
||||
|
||||
bool native_endpoint(udp::endpoint ep) const
|
||||
bool native_endpoint(udp::endpoint const& ep) const
|
||||
{ return ep.protocol() == m_protocol; }
|
||||
|
||||
node_id const& id() const
|
||||
{ return m_id; }
|
||||
|
||||
table_t const& buckets() const
|
||||
{ return m_buckets; }
|
||||
|
||||
private:
|
||||
|
||||
#ifndef TORRENT_DISABLE_LOGGING
|
||||
|
@ -326,7 +326,7 @@ private:
|
|||
ip_set m_ips;
|
||||
|
||||
// constant called k in paper
|
||||
int m_bucket_size;
|
||||
int const m_bucket_size;
|
||||
};
|
||||
|
||||
} } // namespace libtorrent::dht
|
||||
|
|
|
@ -80,7 +80,7 @@ public:
|
|||
bool incoming(msg const&, node_id* id);
|
||||
time_duration tick();
|
||||
|
||||
bool invoke(entry& e, udp::endpoint target
|
||||
bool invoke(entry& e, udp::endpoint const& target
|
||||
, observer_ptr o);
|
||||
|
||||
void add_our_id(entry& e);
|
||||
|
|
|
@ -349,5 +349,3 @@ namespace libtorrent
|
|||
maybe_abort();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -50,7 +50,7 @@ namespace libtorrent { namespace dht
|
|||
}
|
||||
}
|
||||
|
||||
bool dos_blocker::incoming(address addr, time_point now, dht_logger* logger)
|
||||
bool dos_blocker::incoming(address const& addr, time_point now, dht_logger* logger)
|
||||
{
|
||||
node_ban_entry* match = nullptr;
|
||||
node_ban_entry* min = m_ban_nodes;
|
||||
|
@ -77,10 +77,13 @@ namespace libtorrent { namespace dht
|
|||
if (match->count == m_message_rate_limit * 10)
|
||||
{
|
||||
#ifndef TORRENT_DISABLE_LOGGING
|
||||
if (logger != nullptr && logger->should_log(dht_logger::tracker))
|
||||
{
|
||||
logger->log(dht_logger::tracker, "BANNING PEER [ ip: %s time: %f count: %d ]"
|
||||
, print_address(addr).c_str()
|
||||
, total_milliseconds((now - match->limit) + seconds(10)) / 1000.0
|
||||
, match->count);
|
||||
}
|
||||
#endif
|
||||
// we've received too many messages in less than 10 seconds
|
||||
// from this node. Ignore it until it's silent for 5 minutes
|
||||
|
@ -105,4 +108,3 @@ namespace libtorrent { namespace dht
|
|||
return true;
|
||||
}
|
||||
}}
|
||||
|
||||
|
|
|
@ -101,7 +101,7 @@ void get_item::got_data(bdecode_node const& v,
|
|||
|
||||
get_item::get_item(
|
||||
node& dht_node
|
||||
, node_id target
|
||||
, node_id const& target
|
||||
, data_callback const& dcallback
|
||||
, nodes_callback const& ncallback)
|
||||
: find_data(dht_node, target, ncallback)
|
||||
|
|
|
@ -33,7 +33,6 @@ POSSIBILITY OF SUCH DAMAGE.
|
|||
#include <libtorrent/hasher.hpp>
|
||||
#include <libtorrent/kademlia/item.hpp>
|
||||
#include <libtorrent/bencode.hpp>
|
||||
#include <libtorrent/span.hpp>
|
||||
#include <libtorrent/kademlia/ed25519.hpp>
|
||||
|
||||
#include <cstdio> // for snprintf
|
||||
|
|
|
@ -123,7 +123,7 @@ void node::update_node_id()
|
|||
// if we don't have an observer, we can't ask for the external IP (and our
|
||||
// current node ID is likely not generated from an external address), so we
|
||||
// can just stop here in that case.
|
||||
if (!m_observer) return;
|
||||
if (m_observer == nullptr) return;
|
||||
|
||||
// it's possible that our external address hasn't actually changed. If our
|
||||
// current ID is still valid, don't do anything.
|
||||
|
@ -131,7 +131,7 @@ void node::update_node_id()
|
|||
return;
|
||||
|
||||
#ifndef TORRENT_DISABLE_LOGGING
|
||||
if (m_observer) m_observer->log(dht_logger::node
|
||||
if (m_observer != nullptr) m_observer->log(dht_logger::node
|
||||
, "updating node ID (because external IP address changed)");
|
||||
#endif
|
||||
|
||||
|
@ -436,7 +436,7 @@ void node::get_peers(sha1_hash const& info_hash
|
|||
ta->start();
|
||||
}
|
||||
|
||||
void node::announce(sha1_hash const& info_hash, int listen_port, int flags
|
||||
void node::announce(sha1_hash const& info_hash, int const listen_port, int const flags
|
||||
, std::function<void(std::vector<tcp::endpoint> const&)> f)
|
||||
{
|
||||
#ifndef TORRENT_DISABLE_LOGGING
|
||||
|
@ -454,7 +454,7 @@ void node::announce(sha1_hash const& info_hash, int listen_port, int flags
|
|||
, listen_port, info_hash, flags), flags & node::flag_seed);
|
||||
}
|
||||
|
||||
void node::direct_request(udp::endpoint ep, entry& e
|
||||
void node::direct_request(udp::endpoint const& ep, entry& e
|
||||
, std::function<void(msg const&)> f)
|
||||
{
|
||||
// not really a traversal
|
||||
|
|
|
@ -68,7 +68,7 @@ bool bootstrap::invoke(observer_ptr o)
|
|||
if (o->flags & observer::flag_initial)
|
||||
{
|
||||
// if this packet is being sent to a bootstrap/router node, let it know
|
||||
// that we're actualy bootstrapping (as opposed to being collateral
|
||||
// that we're actually bootstrapping (as opposed to being collateral
|
||||
// traffic).
|
||||
a["bs"] = 1;
|
||||
}
|
||||
|
|
|
@ -43,7 +43,6 @@ POSSIBILITY OF SUCH DAMAGE.
|
|||
|
||||
#include <libtorrent/hex.hpp> // to_hex
|
||||
#include "libtorrent/kademlia/routing_table.hpp"
|
||||
#include "libtorrent/broadcast_socket.hpp" // for cidr_distance
|
||||
#include "libtorrent/session_status.hpp"
|
||||
#include "libtorrent/kademlia/node_id.hpp"
|
||||
#include "libtorrent/kademlia/dht_observer.hpp"
|
||||
|
@ -53,7 +52,6 @@ POSSIBILITY OF SUCH DAMAGE.
|
|||
#include "libtorrent/invariant_check.hpp"
|
||||
#include "libtorrent/address.hpp"
|
||||
|
||||
using std::uint8_t;
|
||||
using namespace std::placeholders;
|
||||
|
||||
namespace libtorrent { namespace dht
|
||||
|
@ -131,19 +129,18 @@ int routing_table::bucket_limit(int bucket) const
|
|||
if (!m_settings.extended_routing_table) return m_bucket_size;
|
||||
|
||||
static const int size_exceptions[] = {16, 8, 4, 2};
|
||||
if (bucket < int(sizeof(size_exceptions)/sizeof(size_exceptions[0])))
|
||||
if (bucket < int(sizeof(size_exceptions) / sizeof(size_exceptions[0])))
|
||||
return m_bucket_size * size_exceptions[bucket];
|
||||
return m_bucket_size;
|
||||
}
|
||||
|
||||
void routing_table::status(std::vector<dht_routing_bucket>& s) const
|
||||
{
|
||||
for (table_t::const_iterator i = m_buckets.begin()
|
||||
, end(m_buckets.end()); i != end; ++i)
|
||||
for (auto const& i : m_buckets)
|
||||
{
|
||||
dht_routing_bucket b;
|
||||
b.num_nodes = int(i->live_nodes.size());
|
||||
b.num_replacements = int(i->replacements.size());
|
||||
b.num_nodes = int(i.live_nodes.size());
|
||||
b.num_replacements = int(i.replacements.size());
|
||||
s.push_back(b);
|
||||
}
|
||||
}
|
||||
|
@ -165,12 +162,11 @@ void routing_table::status(session_status& s) const
|
|||
// family), then it becomes a bit trickier
|
||||
s.dht_global_nodes += num_global_nodes();
|
||||
|
||||
for (table_t::const_iterator i = m_buckets.begin()
|
||||
, end(m_buckets.end()); i != end; ++i)
|
||||
for (auto const& i : m_buckets)
|
||||
{
|
||||
dht_routing_bucket b;
|
||||
b.num_nodes = int(i->live_nodes.size());
|
||||
b.num_replacements = int(i->replacements.size());
|
||||
b.num_nodes = int(i.live_nodes.size());
|
||||
b.num_replacements = int(i.replacements.size());
|
||||
#ifndef TORRENT_NO_DEPRECATE
|
||||
b.last_active = 0;
|
||||
#endif
|
||||
|
@ -184,17 +180,15 @@ std::tuple<int, int, int> routing_table::size() const
|
|||
int nodes = 0;
|
||||
int replacements = 0;
|
||||
int confirmed = 0;
|
||||
for (table_t::const_iterator i = m_buckets.begin()
|
||||
, end(m_buckets.end()); i != end; ++i)
|
||||
for (auto const& i : m_buckets)
|
||||
{
|
||||
nodes += int(i->live_nodes.size());
|
||||
for (bucket_t::const_iterator k = i->live_nodes.begin()
|
||||
, end2(i->live_nodes.end()); k != end2; ++k)
|
||||
nodes += int(i.live_nodes.size());
|
||||
for (auto const& k : i.live_nodes)
|
||||
{
|
||||
if (k->confirmed()) ++confirmed;
|
||||
if (k.confirmed()) ++confirmed;
|
||||
}
|
||||
|
||||
replacements += int(i->replacements.size());
|
||||
replacements += int(i.replacements.size());
|
||||
}
|
||||
return std::make_tuple(nodes, replacements, confirmed);
|
||||
}
|
||||
|
@ -203,10 +197,9 @@ std::int64_t routing_table::num_global_nodes() const
|
|||
{
|
||||
int deepest_bucket = 0;
|
||||
int deepest_size = 0;
|
||||
for (table_t::const_iterator i = m_buckets.begin()
|
||||
, end(m_buckets.end()); i != end; ++i)
|
||||
for (auto const& i : m_buckets)
|
||||
{
|
||||
deepest_size = int(i->live_nodes.size()); // + i->replacements.size();
|
||||
deepest_size = int(i.live_nodes.size()); // + i.replacements.size();
|
||||
if (deepest_size < m_bucket_size) break;
|
||||
// this bucket is full
|
||||
++deepest_bucket;
|
||||
|
@ -227,14 +220,14 @@ int routing_table::depth() const
|
|||
|
||||
// maybe the table is deeper now?
|
||||
while (m_depth < int(m_buckets.size())-1
|
||||
&& int(m_buckets[m_depth+1].live_nodes.size()) >= m_bucket_size / 2)
|
||||
&& int(m_buckets[m_depth + 1].live_nodes.size()) >= m_bucket_size / 2)
|
||||
{
|
||||
++m_depth;
|
||||
}
|
||||
|
||||
// maybe the table is more shallow now?
|
||||
while (m_depth > 0
|
||||
&& int(m_buckets[m_depth-1].live_nodes.size()) < m_bucket_size / 2)
|
||||
&& int(m_buckets[m_depth - 1].live_nodes.size()) < m_bucket_size / 2)
|
||||
{
|
||||
--m_depth;
|
||||
}
|
||||
|
@ -242,181 +235,6 @@ int routing_table::depth() const
|
|||
return m_depth;
|
||||
}
|
||||
|
||||
#ifndef TORRENT_DISABLE_LOGGING
|
||||
void routing_table::print_state(std::ostream& os) const
|
||||
{
|
||||
std::vector<char> buf(2048);
|
||||
int cursor = 0;
|
||||
|
||||
cursor += std::snprintf(&buf[cursor], buf.size() - cursor
|
||||
, "kademlia routing table state\n"
|
||||
"bucket_size: %d\n"
|
||||
"global node count: %" PRId64 "\n"
|
||||
"node_id: %s\n\n"
|
||||
"number of nodes per bucket:\n"
|
||||
, m_bucket_size
|
||||
, num_global_nodes()
|
||||
, aux::to_hex(m_id).c_str());
|
||||
if (cursor > buf.size() - 500) buf.resize(buf.size() * 3 / 2);
|
||||
|
||||
int idx = 0;
|
||||
|
||||
for (table_t::const_iterator i = m_buckets.begin(), end(m_buckets.end());
|
||||
i != end; ++i, ++idx)
|
||||
{
|
||||
cursor += std::snprintf(&buf[cursor], buf.size() - cursor
|
||||
, "%2d: ", idx);
|
||||
for (int k = 0; k < int(i->live_nodes.size()); ++k)
|
||||
cursor += std::snprintf(&buf[cursor], buf.size() - cursor, "#");
|
||||
for (int k = 0; k < int(i->replacements.size()); ++k)
|
||||
cursor += std::snprintf(&buf[cursor], buf.size() - cursor, "-");
|
||||
cursor += std::snprintf(&buf[cursor], buf.size() - cursor, "\n");
|
||||
|
||||
if (cursor > buf.size() - 500) buf.resize(buf.size() * 3 / 2);
|
||||
}
|
||||
|
||||
time_point now = aux::time_now();
|
||||
|
||||
cursor += std::snprintf(&buf[cursor], buf.size() - cursor
|
||||
, "\nnodes:");
|
||||
|
||||
int bucket_index = 0;
|
||||
for (table_t::const_iterator i = m_buckets.begin(), end(m_buckets.end());
|
||||
i != end; ++i, ++bucket_index)
|
||||
{
|
||||
cursor += std::snprintf(&buf[cursor], buf.size() - cursor
|
||||
, "\n=== BUCKET == %d == %d|%d ==== \n"
|
||||
, bucket_index, int(i->live_nodes.size())
|
||||
, int(i->replacements.size()));
|
||||
if (cursor > buf.size() - 500) buf.resize(buf.size() * 3 / 2);
|
||||
|
||||
int id_shift;
|
||||
// the last bucket is special, since it hasn't been split yet, it
|
||||
// includes that top bit as well
|
||||
if (bucket_index + 1 == m_buckets.size())
|
||||
id_shift = bucket_index;
|
||||
else
|
||||
id_shift = bucket_index + 1;
|
||||
|
||||
for (bucket_t::const_iterator j = i->live_nodes.begin()
|
||||
, end2(i->live_nodes.end()); j != end2; ++j)
|
||||
{
|
||||
int bucket_size_limit = bucket_limit(bucket_index);
|
||||
std::uint32_t top_mask = bucket_size_limit - 1;
|
||||
int mask_shift = 0;
|
||||
TORRENT_ASSERT_VAL(bucket_size_limit > 0, bucket_size_limit);
|
||||
while ((top_mask & 0x80) == 0)
|
||||
{
|
||||
top_mask <<= 1;
|
||||
++mask_shift;
|
||||
}
|
||||
top_mask = (0xff << mask_shift) & 0xff;
|
||||
|
||||
node_id id = j->id;
|
||||
id <<= id_shift;
|
||||
|
||||
cursor += std::snprintf(&buf[cursor], buf.size() - cursor
|
||||
, " prefix: %2x id: %s"
|
||||
, ((id[0] & top_mask) >> mask_shift)
|
||||
, aux::to_hex(j->id).c_str());
|
||||
|
||||
if (j->rtt == 0xffff)
|
||||
{
|
||||
cursor += std::snprintf(&buf[cursor], buf.size() - cursor
|
||||
, " rtt: ");
|
||||
}
|
||||
else
|
||||
{
|
||||
cursor += std::snprintf(&buf[cursor], buf.size() - cursor
|
||||
, " rtt: %4d", j->rtt);
|
||||
}
|
||||
|
||||
cursor += std::snprintf(&buf[cursor], buf.size() - cursor
|
||||
, " fail: %4d ping: %d dist: %3d"
|
||||
, j->fail_count()
|
||||
, j->pinged()
|
||||
, distance_exp(m_id, j->id));
|
||||
|
||||
if (j->last_queried == min_time())
|
||||
{
|
||||
cursor += std::snprintf(&buf[cursor], buf.size() - cursor
|
||||
, " query: ");
|
||||
}
|
||||
else
|
||||
{
|
||||
cursor += std::snprintf(&buf[cursor], buf.size() - cursor
|
||||
, " query: %3d", int(total_seconds(now - j->last_queried)));
|
||||
}
|
||||
|
||||
cursor += std::snprintf(&buf[cursor], buf.size() - cursor
|
||||
, " ip: %s\n", print_endpoint(j->ep()).c_str());
|
||||
if (cursor > buf.size() - 500) buf.resize(buf.size() * 3 / 2);
|
||||
}
|
||||
}
|
||||
|
||||
cursor += std::snprintf(&buf[cursor], buf.size() - cursor
|
||||
, "\nnode spread per bucket:\n");
|
||||
bucket_index = 0;
|
||||
for (table_t::const_iterator i = m_buckets.begin(), end(m_buckets.end());
|
||||
i != end; ++i, ++bucket_index)
|
||||
{
|
||||
int bucket_size_limit = bucket_limit(bucket_index);
|
||||
|
||||
// mask out the first 3 bits, or more depending
|
||||
// on the bucket_size_limit
|
||||
// we have all the lower bits set in (bucket_size_limit-1)
|
||||
// but we want the left-most bits to be set. Shift it
|
||||
// until the MSB is set
|
||||
std::uint32_t top_mask = bucket_size_limit - 1;
|
||||
int mask_shift = 0;
|
||||
TORRENT_ASSERT_VAL(bucket_size_limit > 0, bucket_size_limit);
|
||||
while ((top_mask & 0x80) == 0)
|
||||
{
|
||||
top_mask <<= 1;
|
||||
++mask_shift;
|
||||
}
|
||||
top_mask = (0xff << mask_shift) & 0xff;
|
||||
bucket_size_limit = (top_mask >> mask_shift) + 1;
|
||||
TORRENT_ASSERT_VAL(bucket_size_limit <= 256, bucket_size_limit);
|
||||
bool sub_buckets[256];
|
||||
memset(sub_buckets, 0, sizeof(sub_buckets));
|
||||
|
||||
int id_shift;
|
||||
// the last bucket is special, since it hasn't been split yet, it
|
||||
// includes that top bit as well
|
||||
if (bucket_index + 1 == m_buckets.size())
|
||||
id_shift = bucket_index;
|
||||
else
|
||||
id_shift = bucket_index + 1;
|
||||
|
||||
for (bucket_t::const_iterator j = i->live_nodes.begin()
|
||||
, end2(i->live_nodes.end()); j != end2; ++j)
|
||||
{
|
||||
node_id id = j->id;
|
||||
id <<= id_shift;
|
||||
int b = (id[0] & top_mask) >> mask_shift;
|
||||
TORRENT_ASSERT(b >= 0 && b < int(sizeof(sub_buckets)/sizeof(sub_buckets[0])));
|
||||
sub_buckets[b] = true;
|
||||
}
|
||||
|
||||
cursor += std::snprintf(&buf[cursor], buf.size() - cursor
|
||||
, "%2d mask: %2x: [", bucket_index, (top_mask >> mask_shift));
|
||||
|
||||
for (int j = 0; j < bucket_size_limit; ++j)
|
||||
{
|
||||
cursor += std::snprintf(&buf[cursor], buf.size() - cursor
|
||||
, (sub_buckets[j] ? "X" : " "));
|
||||
}
|
||||
cursor += std::snprintf(&buf[cursor], buf.size() - cursor
|
||||
, "]\n");
|
||||
if (cursor > buf.size() - 500) buf.resize(buf.size() * 3 / 2);
|
||||
}
|
||||
buf[cursor] = '\0';
|
||||
os << &buf[0];
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
node_entry const* routing_table::next_refresh()
|
||||
{
|
||||
// find the node with the least recent 'last_queried' field. if it's too
|
||||
|
@ -426,6 +244,7 @@ node_entry const* routing_table::next_refresh()
|
|||
node_entry* candidate = nullptr;
|
||||
|
||||
// this will have a bias towards pinging nodes close to us first.
|
||||
// TODO: why idx is never used here?
|
||||
int idx = int(m_buckets.size()) - 1;
|
||||
for (table_t::reverse_iterator i = m_buckets.rbegin()
|
||||
, end(m_buckets.rend()); i != end; ++i, --idx)
|
||||
|
@ -500,9 +319,9 @@ bool compare_ip_cidr(address const& lhs, address const& rhs)
|
|||
// if IPv6 addresses is in the same /64, they're too close and we won't
|
||||
// trust the second one
|
||||
std::uint64_t lhs_ip;
|
||||
memcpy(&lhs_ip, lhs.to_v6().to_bytes().data(), 8);
|
||||
std::memcpy(&lhs_ip, lhs.to_v6().to_bytes().data(), 8);
|
||||
std::uint64_t rhs_ip;
|
||||
memcpy(&rhs_ip, rhs.to_v6().to_bytes().data(), 8);
|
||||
std::memcpy(&rhs_ip, rhs.to_v6().to_bytes().data(), 8);
|
||||
|
||||
// since the condition we're looking for is all the first bits being
|
||||
// zero, there's no need to byte-swap into host byte order here.
|
||||
|
@ -596,7 +415,7 @@ void routing_table::remove_node(node_entry* n
|
|||
}
|
||||
}
|
||||
|
||||
bool routing_table::add_node(node_entry e)
|
||||
bool routing_table::add_node(node_entry const& e)
|
||||
{
|
||||
add_node_status_t s = add_node_impl(e);
|
||||
if (s == failed_to_add) return false;
|
||||
|
@ -670,7 +489,7 @@ routing_table::add_node_status_t routing_table::add_node_impl(node_entry e)
|
|||
if (m_settings.restrict_routing_ips)
|
||||
{
|
||||
#ifndef TORRENT_DISABLE_LOGGING
|
||||
if (m_log)
|
||||
if (m_log != nullptr && m_log->should_log(dht_logger::routing_table))
|
||||
{
|
||||
char hex_id[41];
|
||||
aux::to_hex(e.id, hex_id);
|
||||
|
@ -707,7 +526,7 @@ routing_table::add_node_status_t routing_table::add_node_impl(node_entry e)
|
|||
// This is the same IP and port, but with a new node ID.
|
||||
// This may indicate a malicious node so remove the entry.
|
||||
#ifndef TORRENT_DISABLE_LOGGING
|
||||
if (m_log)
|
||||
if (m_log != nullptr && m_log->should_log(dht_logger::routing_table))
|
||||
{
|
||||
char hex_id_new[41];
|
||||
char hex_id_old[41];
|
||||
|
@ -724,7 +543,7 @@ routing_table::add_node_status_t routing_table::add_node_impl(node_entry e)
|
|||
// when we detect possible malicious activity in a bucket,
|
||||
// schedule the other nodes in the bucket to be pinged soon
|
||||
// to clean out any other malicious nodes
|
||||
auto now = aux::time_now();
|
||||
auto const now = aux::time_now();
|
||||
for (auto& node : existing_bucket->live_nodes)
|
||||
{
|
||||
if (node.last_queried + minutes(5) < now)
|
||||
|
@ -792,7 +611,7 @@ routing_table::add_node_status_t routing_table::add_node_impl(node_entry e)
|
|||
if (m_settings.restrict_routing_ips)
|
||||
{
|
||||
// don't allow multiple entries from IPs very close to each other
|
||||
address const cmp = e.addr();
|
||||
address const& cmp = e.addr();
|
||||
j = std::find_if(b.begin(), b.end(), [&](node_entry const& a) { return compare_ip_cidr(a.addr(), cmp); });
|
||||
if (j == b.end())
|
||||
{
|
||||
|
@ -804,7 +623,7 @@ routing_table::add_node_status_t routing_table::add_node_impl(node_entry e)
|
|||
// close to this one. We know that it's not the same, because
|
||||
// it claims a different node-ID. Ignore this to avoid attacks
|
||||
#ifndef TORRENT_DISABLE_LOGGING
|
||||
if (m_log)
|
||||
if (m_log != nullptr && m_log->should_log(dht_logger::routing_table))
|
||||
{
|
||||
char hex_id1[41];
|
||||
aux::to_hex(e.id, hex_id1);
|
||||
|
@ -974,12 +793,12 @@ ip_ok:
|
|||
// from these nodes, pick the one with the highest RTT
|
||||
// and replace it
|
||||
|
||||
std::vector<bucket_t::iterator>::iterator k = std::max_element(nodes.begin(), nodes.end()
|
||||
auto k = std::max_element(nodes.begin(), nodes.end()
|
||||
, [](bucket_t::iterator lhs, bucket_t::iterator rhs)
|
||||
{ return lhs->rtt < rhs->rtt; });
|
||||
|
||||
// in this case, we would really rather replace the node even if
|
||||
// the new node has higher RTT, becase it fills a new prefix that we otherwise
|
||||
// the new node has higher RTT, because it fills a new prefix that we otherwise
|
||||
// don't have.
|
||||
force_replace = true;
|
||||
j = *k;
|
||||
|
@ -998,7 +817,7 @@ ip_ok:
|
|||
*j = e;
|
||||
m_ips.insert(e.addr());
|
||||
#ifndef TORRENT_DISABLE_LOGGING
|
||||
if (m_log)
|
||||
if (m_log != nullptr && m_log->should_log(dht_logger::routing_table))
|
||||
{
|
||||
char hex_id[41];
|
||||
aux::to_hex(e.id, hex_id);
|
||||
|
@ -1072,7 +891,7 @@ void routing_table::split_bucket()
|
|||
bucket_t& b = m_buckets[bucket_index].live_nodes;
|
||||
bucket_t& rb = m_buckets[bucket_index].replacements;
|
||||
|
||||
// move any node whose (160 - distane_exp(m_id, id)) >= (i - m_buckets.begin())
|
||||
// move any node whose (160 - distance_exp(m_id, id)) >= (i - m_buckets.begin())
|
||||
// to the new bucket
|
||||
int const new_bucket_size = bucket_limit(bucket_index + 1);
|
||||
for (bucket_t::iterator j = b.begin(); j != b.end();)
|
||||
|
@ -1163,20 +982,17 @@ void routing_table::for_each_node(
|
|||
, void (*fun2)(void*, node_entry const&)
|
||||
, void* userdata) const
|
||||
{
|
||||
for (table_t::const_iterator i = m_buckets.begin()
|
||||
, end(m_buckets.end()); i != end; ++i)
|
||||
for (auto const& i : m_buckets)
|
||||
{
|
||||
if (fun1)
|
||||
{
|
||||
for (bucket_t::const_iterator j = i->live_nodes.begin()
|
||||
, end2(i->live_nodes.end()); j != end2; ++j)
|
||||
fun1(userdata, *j);
|
||||
for (auto const& j : i.live_nodes)
|
||||
fun1(userdata, j);
|
||||
}
|
||||
if (fun2)
|
||||
{
|
||||
for (bucket_t::const_iterator j = i->replacements.begin()
|
||||
, end2(i->replacements.end()); j != end2; ++j)
|
||||
fun2(userdata, *j);
|
||||
for (auto const& j : i.replacements)
|
||||
fun2(userdata, j);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1208,13 +1024,13 @@ void routing_table::node_failed(node_id const& nid, udp::endpoint const& ep)
|
|||
j->timed_out();
|
||||
|
||||
#ifndef TORRENT_DISABLE_LOGGING
|
||||
if (m_log)
|
||||
if (m_log != nullptr && m_log->should_log(dht_logger::routing_table))
|
||||
{
|
||||
char hex_id[41];
|
||||
aux::to_hex(nid, hex_id);
|
||||
m_log->log(dht_logger::routing_table, "NODE FAILED id: %s ip: %s fails: %d pinged: %d up-time: %d"
|
||||
, hex_id, print_endpoint(j->ep()).c_str()
|
||||
, int(j->fail_count())
|
||||
, j->fail_count()
|
||||
, int(j->pinged())
|
||||
, int(total_seconds(aux::time_now() - j->first_seen)));
|
||||
}
|
||||
|
@ -1232,13 +1048,13 @@ void routing_table::node_failed(node_id const& nid, udp::endpoint const& ep)
|
|||
j->timed_out();
|
||||
|
||||
#ifndef TORRENT_DISABLE_LOGGING
|
||||
if (m_log)
|
||||
if (m_log != nullptr && m_log->should_log(dht_logger::routing_table))
|
||||
{
|
||||
char hex_id[41];
|
||||
aux::to_hex(nid, hex_id);
|
||||
m_log->log(dht_logger::routing_table, "NODE FAILED id: %s ip: %s fails: %d pinged: %d up-time: %d"
|
||||
, hex_id, print_endpoint(j->ep()).c_str()
|
||||
, int(j->fail_count())
|
||||
, j->fail_count()
|
||||
, int(j->pinged())
|
||||
, int(total_seconds(aux::time_now() - j->first_seen)));
|
||||
}
|
||||
|
@ -1287,14 +1103,14 @@ bool routing_table::node_seen(node_id const& id, udp::endpoint const& ep, int rt
|
|||
// fills the vector with the k nodes from our buckets that
|
||||
// are nearest to the given id.
|
||||
void routing_table::find_node(node_id const& target
|
||||
, std::vector<node_entry>& l, int options, int count)
|
||||
, std::vector<node_entry>& l, int const options, int count)
|
||||
{
|
||||
l.clear();
|
||||
if (count == 0) count = m_bucket_size;
|
||||
|
||||
table_t::iterator i = find_bucket(target);
|
||||
int bucket_index = std::distance(m_buckets.begin(), i);
|
||||
int bucket_size_limit = bucket_limit(bucket_index);
|
||||
int const bucket_index = std::distance(m_buckets.begin(), i);
|
||||
int const bucket_size_limit = bucket_limit(bucket_index);
|
||||
|
||||
l.reserve(bucket_size_limit);
|
||||
|
||||
|
@ -1306,8 +1122,7 @@ void routing_table::find_node(node_id const& target
|
|||
bucket_t& b = j->live_nodes;
|
||||
if (options & include_failed)
|
||||
{
|
||||
copy(b.begin(), b.end()
|
||||
, std::back_inserter(l));
|
||||
std::copy(b.begin(), b.end(), std::back_inserter(l));
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -1378,19 +1193,16 @@ void routing_table::check_invariant() const
|
|||
{
|
||||
ip_set all_ips;
|
||||
|
||||
for (table_t::const_iterator i = m_buckets.begin()
|
||||
, end(m_buckets.end()); i != end; ++i)
|
||||
for (auto const& i : m_buckets)
|
||||
{
|
||||
for (bucket_t::const_iterator j = i->replacements.begin();
|
||||
j != i->replacements.end(); ++j)
|
||||
for (auto const& j : i.replacements)
|
||||
{
|
||||
all_ips.insert(j->addr());
|
||||
all_ips.insert(j.addr());
|
||||
}
|
||||
for (bucket_t::const_iterator j = i->live_nodes.begin();
|
||||
j != i->live_nodes.end(); ++j)
|
||||
for (auto const& j : i.live_nodes)
|
||||
{
|
||||
TORRENT_ASSERT(j->addr().is_v4() == i->live_nodes.begin()->addr().is_v4());
|
||||
all_ips.insert(j->addr());
|
||||
TORRENT_ASSERT(j.addr().is_v4() == i.live_nodes.begin()->addr().is_v4());
|
||||
all_ips.insert(j.addr());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1411,4 +1223,3 @@ bool routing_table::is_full(int bucket) const
|
|||
}
|
||||
|
||||
} } // namespace libtorrent::dht
|
||||
|
||||
|
|
|
@ -418,7 +418,7 @@ void rpc_manager::add_our_id(entry& e)
|
|||
e["id"] = m_our_id.to_string();
|
||||
}
|
||||
|
||||
bool rpc_manager::invoke(entry& e, udp::endpoint target_addr
|
||||
bool rpc_manager::invoke(entry& e, udp::endpoint const& target_addr
|
||||
, observer_ptr o)
|
||||
{
|
||||
INVARIANT_CHECK;
|
||||
|
@ -450,9 +450,12 @@ bool rpc_manager::invoke(entry& e, udp::endpoint target_addr
|
|||
o->set_transaction_id(tid);
|
||||
|
||||
#ifndef TORRENT_DISABLE_LOGGING
|
||||
if (m_log != nullptr && m_log->should_log(dht_logger::rpc_manager))
|
||||
{
|
||||
m_log->log(dht_logger::rpc_manager, "[%p] invoking %s -> %s"
|
||||
, static_cast<void*>(o->algorithm()), e["q"].string().c_str()
|
||||
, print_endpoint(target_addr).c_str());
|
||||
}
|
||||
#endif
|
||||
|
||||
if (m_sock->send_packet(e, target_addr))
|
||||
|
|
|
@ -296,7 +296,7 @@ void traversal_algorithm::finished(observer_ptr o)
|
|||
// prevent request means that the total number of requests has
|
||||
// overflown. This query failed because it was the oldest one.
|
||||
// So, if this is true, don't make another request
|
||||
void traversal_algorithm::failed(observer_ptr o, int flags)
|
||||
void traversal_algorithm::failed(observer_ptr o, int const flags)
|
||||
{
|
||||
// don't tell the routing table about
|
||||
// node ids that we just generated ourself
|
||||
|
@ -564,7 +564,7 @@ void traversal_observer::reply(msg const& m)
|
|||
if (!r)
|
||||
{
|
||||
#ifndef TORRENT_DISABLE_LOGGING
|
||||
if (get_observer())
|
||||
if (get_observer() != nullptr)
|
||||
{
|
||||
get_observer()->log(dht_logger::traversal
|
||||
, "[%p] missing response dict"
|
||||
|
@ -619,7 +619,7 @@ void traversal_observer::reply(msg const& m)
|
|||
if (!id || id.string_length() != 20)
|
||||
{
|
||||
#ifndef TORRENT_DISABLE_LOGGING
|
||||
if (get_observer())
|
||||
if (get_observer() != nullptr)
|
||||
{
|
||||
get_observer()->log(dht_logger::traversal, "[%p] invalid id in response"
|
||||
, static_cast<void*>(algorithm()));
|
||||
|
|
|
@ -576,8 +576,179 @@ dht::key_desc_t const put_mutable_item_desc[] = {
|
|||
{"v", bdecode_node::none_t, 0, key_desc_t::last_child},
|
||||
};
|
||||
|
||||
void print_state(std::ostream& os, routing_table const& table)
|
||||
{
|
||||
std::vector<char> buf(2048);
|
||||
int cursor = 0;
|
||||
|
||||
} // annonymous namespace
|
||||
cursor += std::snprintf(&buf[cursor], buf.size() - cursor
|
||||
, "kademlia routing table state\n"
|
||||
"bucket_size: %d\n"
|
||||
"global node count: %" PRId64 "\n"
|
||||
"node_id: %s\n\n"
|
||||
"number of nodes per bucket:\n"
|
||||
, table.bucket_size()
|
||||
, table.num_global_nodes()
|
||||
, aux::to_hex(table.id()).c_str());
|
||||
if (cursor > int(buf.size()) - 500) buf.resize(buf.size() * 3 / 2);
|
||||
|
||||
int idx = 0;
|
||||
|
||||
for (auto i = table.buckets().begin(), end(table.buckets().end());
|
||||
i != end; ++i, ++idx)
|
||||
{
|
||||
cursor += std::snprintf(&buf[cursor], buf.size() - cursor
|
||||
, "%2d: ", idx);
|
||||
for (int k = 0; k < int(i->live_nodes.size()); ++k)
|
||||
cursor += std::snprintf(&buf[cursor], buf.size() - cursor, "#");
|
||||
for (int k = 0; k < int(i->replacements.size()); ++k)
|
||||
cursor += std::snprintf(&buf[cursor], buf.size() - cursor, "-");
|
||||
cursor += std::snprintf(&buf[cursor], buf.size() - cursor, "\n");
|
||||
|
||||
if (cursor > int(buf.size()) - 500) buf.resize(buf.size() * 3 / 2);
|
||||
}
|
||||
|
||||
time_point now = aux::time_now();
|
||||
|
||||
cursor += std::snprintf(&buf[cursor], buf.size() - cursor
|
||||
, "\nnodes:");
|
||||
|
||||
int bucket_index = 0;
|
||||
for (auto i = table.buckets().begin(), end(table.buckets().end());
|
||||
i != end; ++i, ++bucket_index)
|
||||
{
|
||||
cursor += std::snprintf(&buf[cursor], buf.size() - cursor
|
||||
, "\n=== BUCKET == %d == %d|%d ==== \n"
|
||||
, bucket_index, int(i->live_nodes.size())
|
||||
, int(i->replacements.size()));
|
||||
if (cursor > int(buf.size()) - 500) buf.resize(buf.size() * 3 / 2);
|
||||
|
||||
int id_shift;
|
||||
// the last bucket is special, since it hasn't been split yet, it
|
||||
// includes that top bit as well
|
||||
if (bucket_index + 1 == int(table.buckets().size()))
|
||||
id_shift = bucket_index;
|
||||
else
|
||||
id_shift = bucket_index + 1;
|
||||
|
||||
for (bucket_t::const_iterator j = i->live_nodes.begin()
|
||||
, end2(i->live_nodes.end()); j != end2; ++j)
|
||||
{
|
||||
int bucket_size_limit = table.bucket_limit(bucket_index);
|
||||
std::uint32_t top_mask = bucket_size_limit - 1;
|
||||
int mask_shift = 0;
|
||||
TORRENT_ASSERT_VAL(bucket_size_limit > 0, bucket_size_limit);
|
||||
while ((top_mask & 0x80) == 0)
|
||||
{
|
||||
top_mask <<= 1;
|
||||
++mask_shift;
|
||||
}
|
||||
top_mask = (0xff << mask_shift) & 0xff;
|
||||
|
||||
node_id id = j->id;
|
||||
id <<= id_shift;
|
||||
|
||||
cursor += std::snprintf(&buf[cursor], buf.size() - cursor
|
||||
, " prefix: %2x id: %s"
|
||||
, ((id[0] & top_mask) >> mask_shift)
|
||||
, aux::to_hex(j->id).c_str());
|
||||
|
||||
if (j->rtt == 0xffff)
|
||||
{
|
||||
cursor += std::snprintf(&buf[cursor], buf.size() - cursor
|
||||
, " rtt: ");
|
||||
}
|
||||
else
|
||||
{
|
||||
cursor += std::snprintf(&buf[cursor], buf.size() - cursor
|
||||
, " rtt: %4d", j->rtt);
|
||||
}
|
||||
|
||||
cursor += std::snprintf(&buf[cursor], buf.size() - cursor
|
||||
, " fail: %4d ping: %d dist: %3d"
|
||||
, j->fail_count()
|
||||
, j->pinged()
|
||||
, distance_exp(table.id(), j->id));
|
||||
|
||||
if (j->last_queried == min_time())
|
||||
{
|
||||
cursor += std::snprintf(&buf[cursor], buf.size() - cursor
|
||||
, " query: ");
|
||||
}
|
||||
else
|
||||
{
|
||||
cursor += std::snprintf(&buf[cursor], buf.size() - cursor
|
||||
, " query: %3d", int(total_seconds(now - j->last_queried)));
|
||||
}
|
||||
|
||||
cursor += std::snprintf(&buf[cursor], buf.size() - cursor
|
||||
, " ip: %s\n", print_endpoint(j->ep()).c_str());
|
||||
if (cursor > int(buf.size()) - 500) buf.resize(buf.size() * 3 / 2);
|
||||
}
|
||||
}
|
||||
|
||||
cursor += std::snprintf(&buf[cursor], buf.size() - cursor
|
||||
, "\nnode spread per bucket:\n");
|
||||
bucket_index = 0;
|
||||
for (auto i = table.buckets().begin(), end(table.buckets().end());
|
||||
i != end; ++i, ++bucket_index)
|
||||
{
|
||||
int bucket_size_limit = table.bucket_limit(bucket_index);
|
||||
|
||||
// mask out the first 3 bits, or more depending
|
||||
// on the bucket_size_limit
|
||||
// we have all the lower bits set in (bucket_size_limit-1)
|
||||
// but we want the left-most bits to be set. Shift it
|
||||
// until the MSB is set
|
||||
std::uint32_t top_mask = bucket_size_limit - 1;
|
||||
int mask_shift = 0;
|
||||
TORRENT_ASSERT_VAL(bucket_size_limit > 0, bucket_size_limit);
|
||||
while ((top_mask & 0x80) == 0)
|
||||
{
|
||||
top_mask <<= 1;
|
||||
++mask_shift;
|
||||
}
|
||||
top_mask = (0xff << mask_shift) & 0xff;
|
||||
bucket_size_limit = (top_mask >> mask_shift) + 1;
|
||||
TORRENT_ASSERT_VAL(bucket_size_limit <= 256, bucket_size_limit);
|
||||
bool sub_buckets[256];
|
||||
std::memset(sub_buckets, 0, sizeof(sub_buckets));
|
||||
|
||||
int id_shift;
|
||||
// the last bucket is special, since it hasn't been split yet, it
|
||||
// includes that top bit as well
|
||||
if (bucket_index + 1 == int(table.buckets().size()))
|
||||
id_shift = bucket_index;
|
||||
else
|
||||
id_shift = bucket_index + 1;
|
||||
|
||||
for (bucket_t::const_iterator j = i->live_nodes.begin()
|
||||
, end2(i->live_nodes.end()); j != end2; ++j)
|
||||
{
|
||||
node_id id = j->id;
|
||||
id <<= id_shift;
|
||||
int b = (id[0] & top_mask) >> mask_shift;
|
||||
TORRENT_ASSERT(b >= 0 && b < int(sizeof(sub_buckets)/sizeof(sub_buckets[0])));
|
||||
sub_buckets[b] = true;
|
||||
}
|
||||
|
||||
cursor += std::snprintf(&buf[cursor], buf.size() - cursor
|
||||
, "%2d mask: %2x: [", bucket_index, (top_mask >> mask_shift));
|
||||
|
||||
for (int j = 0; j < bucket_size_limit; ++j)
|
||||
{
|
||||
cursor += std::snprintf(&buf[cursor], buf.size() - cursor
|
||||
, (sub_buckets[j] ? "X" : " "));
|
||||
}
|
||||
cursor += std::snprintf(&buf[cursor], buf.size() - cursor
|
||||
, "]\n");
|
||||
if (cursor > int(buf.size()) - 500) buf.resize(buf.size() * 3 / 2);
|
||||
}
|
||||
buf[cursor] = '\0';
|
||||
os << &buf[0];
|
||||
}
|
||||
|
||||
} // anonymous namespace
|
||||
|
||||
TORRENT_TEST(ping)
|
||||
{
|
||||
|
@ -1442,9 +1613,7 @@ void test_routing_table(address(&rand_addr)())
|
|||
//TODO: 2 test num_global_nodes
|
||||
//TODO: 2 test need_refresh
|
||||
|
||||
#ifndef TORRENT_DISABLE_LOGGING
|
||||
table.print_state(std::cerr);
|
||||
#endif
|
||||
print_state(std::cerr, table);
|
||||
|
||||
table.for_each_node(node_push_back, nop, &nodes);
|
||||
|
||||
|
@ -2666,9 +2835,7 @@ TORRENT_TEST(routing_table_uniform)
|
|||
// i.e. no more than 5 levels
|
||||
TEST_EQUAL(tbl.num_active_buckets(), 5);
|
||||
|
||||
#ifndef TORRENT_DISABLE_LOGGING
|
||||
tbl.print_state(std::cerr);
|
||||
#endif
|
||||
print_state(std::cerr, tbl);
|
||||
}
|
||||
|
||||
TORRENT_TEST(routing_table_balance)
|
||||
|
@ -2691,9 +2858,7 @@ TORRENT_TEST(routing_table_balance)
|
|||
std::printf("num_active_buckets: %d\n", tbl.num_active_buckets());
|
||||
TEST_EQUAL(tbl.num_active_buckets(), 2);
|
||||
|
||||
#ifndef TORRENT_DISABLE_LOGGING
|
||||
tbl.print_state(std::cerr);
|
||||
#endif
|
||||
print_state(std::cerr, tbl);
|
||||
}
|
||||
|
||||
TORRENT_TEST(routing_table_extended)
|
||||
|
@ -2720,9 +2885,7 @@ TORRENT_TEST(routing_table_extended)
|
|||
}
|
||||
TEST_EQUAL(tbl.num_active_buckets(), 6);
|
||||
|
||||
#ifndef TORRENT_DISABLE_LOGGING
|
||||
tbl.print_state(std::cerr);
|
||||
#endif
|
||||
print_state(std::cerr, tbl);
|
||||
}
|
||||
|
||||
void inserter(std::set<node_id>* nodes, node_entry const& ne)
|
||||
|
@ -2755,9 +2918,7 @@ TORRENT_TEST(routing_table_set_id)
|
|||
std::set<node_id> original_nodes;
|
||||
tbl.for_each_node(std::bind(&inserter, &original_nodes, _1));
|
||||
|
||||
#ifndef TORRENT_DISABLE_LOGGING
|
||||
tbl.print_state(std::cerr);
|
||||
#endif
|
||||
print_state(std::cerr, tbl);
|
||||
|
||||
id = to_hash("ffffffffffffffffffffffffffffffffffffffff");
|
||||
|
||||
|
@ -2775,9 +2936,7 @@ TORRENT_TEST(routing_table_set_id)
|
|||
// all remaining nodes also exist in the original nodes
|
||||
TEST_EQUAL(intersection.size(), remaining_nodes.size());
|
||||
|
||||
#ifndef TORRENT_DISABLE_LOGGING
|
||||
tbl.print_state(std::cerr);
|
||||
#endif
|
||||
print_state(std::cerr, tbl);
|
||||
}
|
||||
|
||||
TORRENT_TEST(node_set_id)
|
||||
|
|
Loading…
Reference in New Issue