2006-08-01 17:27:08 +02:00
|
|
|
/*
|
|
|
|
|
2016-01-18 00:57:46 +01:00
|
|
|
Copyright (c) 2006-2016, Arvid Norberg
|
2006-08-01 17:27:08 +02:00
|
|
|
All rights reserved.
|
|
|
|
|
|
|
|
Redistribution and use in source and binary forms, with or without
|
|
|
|
modification, are permitted provided that the following conditions
|
|
|
|
are met:
|
|
|
|
|
|
|
|
* Redistributions of source code must retain the above copyright
|
|
|
|
notice, this list of conditions and the following disclaimer.
|
|
|
|
* Redistributions in binary form must reproduce the above copyright
|
|
|
|
notice, this list of conditions and the following disclaimer in
|
|
|
|
the documentation and/or other materials provided with the distribution.
|
|
|
|
* Neither the name of the author nor the names of its
|
|
|
|
contributors may be used to endorse or promote products derived
|
|
|
|
from this software without specific prior written permission.
|
|
|
|
|
|
|
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
|
|
|
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
|
|
|
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
|
|
|
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
|
|
|
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
|
|
|
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
|
|
|
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
|
|
|
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
|
|
|
POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <vector>
|
2010-03-07 07:00:12 +01:00
|
|
|
#include <iterator> // std::distance()
|
2012-09-23 05:49:04 +02:00
|
|
|
#include <algorithm> // std::copy, std::remove_copy_if
|
2006-08-01 17:27:08 +02:00
|
|
|
#include <functional>
|
|
|
|
#include <numeric>
|
2016-05-17 15:24:06 +02:00
|
|
|
#include <cstdio> // for snprintf
|
|
|
|
#include <cinttypes> // for PRId64 et.al.
|
2016-06-20 17:32:06 +02:00
|
|
|
#include <cstdint>
|
2015-05-10 20:38:10 +02:00
|
|
|
|
|
|
|
#include "libtorrent/config.hpp"
|
2006-08-01 17:27:08 +02:00
|
|
|
|
2016-06-04 16:01:43 +02:00
|
|
|
#include <libtorrent/hex.hpp> // to_hex
|
2006-08-01 17:27:08 +02:00
|
|
|
#include "libtorrent/kademlia/routing_table.hpp"
|
2009-01-23 11:36:07 +01:00
|
|
|
#include "libtorrent/session_status.hpp"
|
2006-08-01 17:27:08 +02:00
|
|
|
#include "libtorrent/kademlia/node_id.hpp"
|
2015-05-10 06:54:02 +02:00
|
|
|
#include "libtorrent/kademlia/dht_observer.hpp"
|
2015-09-18 06:23:45 +02:00
|
|
|
#include "libtorrent/aux_/time.hpp"
|
2015-01-17 18:02:58 +01:00
|
|
|
#include "libtorrent/alert_types.hpp" // for dht_routing_bucket
|
2015-05-10 06:54:02 +02:00
|
|
|
#include "libtorrent/socket_io.hpp" // for print_endpoint
|
2013-12-20 05:54:52 +01:00
|
|
|
#include "libtorrent/invariant_check.hpp"
|
2016-01-09 22:20:27 +01:00
|
|
|
#include "libtorrent/address.hpp"
|
2013-12-20 05:54:52 +01:00
|
|
|
|
2016-05-25 06:31:52 +02:00
|
|
|
using namespace std::placeholders;
|
2006-08-01 17:27:08 +02:00
|
|
|
|
|
|
|
namespace libtorrent { namespace dht
|
|
|
|
{
|
2016-01-09 22:20:27 +01:00
|
|
|
namespace
|
2014-09-20 21:27:29 +02:00
|
|
|
{
|
2016-01-09 22:20:27 +01:00
|
|
|
template <typename T, typename K>
|
|
|
|
void erase_one(T& container, K const& key)
|
|
|
|
{
|
|
|
|
typename T::iterator i = container.find(key);
|
|
|
|
TORRENT_ASSERT(i != container.end());
|
|
|
|
container.erase(i);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool verify_node_address(dht_settings const& settings
|
|
|
|
, node_id const& id, address const& addr)
|
|
|
|
{
|
|
|
|
// only when the node_id pass the verification, add it to routing table.
|
|
|
|
if (settings.enforce_node_id && !verify_id(id, addr)) return false;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
2014-09-20 21:27:29 +02:00
|
|
|
}
|
|
|
|
|
2016-09-12 15:20:15 +02:00
|
|
|
void ip_set::insert(address const& addr)
|
2015-08-17 01:21:10 +02:00
|
|
|
{
|
2015-11-14 06:08:57 +01:00
|
|
|
#if TORRENT_USE_IPV6
|
|
|
|
if (addr.is_v6())
|
2015-08-17 01:21:10 +02:00
|
|
|
m_ip6s.insert(addr.to_v6().to_bytes());
|
2015-11-14 06:08:57 +01:00
|
|
|
else
|
|
|
|
#endif
|
|
|
|
m_ip4s.insert(addr.to_v4().to_bytes());
|
2015-08-17 01:21:10 +02:00
|
|
|
}
|
|
|
|
|
2016-09-12 15:20:15 +02:00
|
|
|
size_t ip_set::count(address const& addr)
|
2015-08-17 01:21:10 +02:00
|
|
|
{
|
2015-11-14 06:08:57 +01:00
|
|
|
#if TORRENT_USE_IPV6
|
|
|
|
if (addr.is_v6())
|
2015-08-17 01:21:10 +02:00
|
|
|
return m_ip6s.count(addr.to_v6().to_bytes());
|
2015-11-14 06:08:57 +01:00
|
|
|
else
|
|
|
|
#endif
|
|
|
|
return m_ip4s.count(addr.to_v4().to_bytes());
|
2015-08-17 01:21:10 +02:00
|
|
|
}
|
|
|
|
|
2016-09-12 15:20:15 +02:00
|
|
|
void ip_set::erase(address const& addr)
|
2015-08-17 01:21:10 +02:00
|
|
|
{
|
2015-11-14 06:08:57 +01:00
|
|
|
#if TORRENT_USE_IPV6
|
|
|
|
if (addr.is_v6())
|
2015-08-17 01:21:10 +02:00
|
|
|
erase_one(m_ip6s, addr.to_v6().to_bytes());
|
2015-11-14 06:08:57 +01:00
|
|
|
else
|
|
|
|
#endif
|
|
|
|
erase_one(m_ip4s, addr.to_v4().to_bytes());
|
2015-08-17 01:21:10 +02:00
|
|
|
}
|
|
|
|
|
2016-02-12 04:56:52 +01:00
|
|
|
routing_table::routing_table(node_id const& id, udp proto, int bucket_size
|
2015-05-10 06:54:02 +02:00
|
|
|
, dht_settings const& settings
|
|
|
|
, dht_logger* log)
|
2015-11-29 08:06:36 +01:00
|
|
|
:
|
|
|
|
#ifndef TORRENT_DISABLE_LOGGING
|
|
|
|
m_log(log),
|
|
|
|
#endif
|
|
|
|
m_settings(settings)
|
2006-08-01 17:27:08 +02:00
|
|
|
, m_id(id)
|
2016-02-12 04:56:52 +01:00
|
|
|
, m_protocol(proto)
|
2013-11-13 03:17:33 +01:00
|
|
|
, m_depth(0)
|
2010-12-12 04:17:08 +01:00
|
|
|
, m_last_self_refresh(min_time())
|
2014-07-06 21:18:00 +02:00
|
|
|
, m_bucket_size(bucket_size)
|
2006-08-01 17:27:08 +02:00
|
|
|
{
|
2015-11-29 08:06:36 +01:00
|
|
|
TORRENT_UNUSED(log);
|
2013-11-13 03:17:33 +01:00
|
|
|
m_buckets.reserve(30);
|
2006-08-01 17:27:08 +02:00
|
|
|
}
|
|
|
|
|
2012-09-22 23:40:16 +02:00
|
|
|
int routing_table::bucket_limit(int bucket) const
|
|
|
|
{
|
|
|
|
if (!m_settings.extended_routing_table) return m_bucket_size;
|
|
|
|
|
2015-03-14 01:42:27 +01:00
|
|
|
static const int size_exceptions[] = {16, 8, 4, 2};
|
2016-09-13 14:18:47 +02:00
|
|
|
if (bucket < int(sizeof(size_exceptions) / sizeof(size_exceptions[0])))
|
2012-09-22 23:40:16 +02:00
|
|
|
return m_bucket_size * size_exceptions[bucket];
|
|
|
|
return m_bucket_size;
|
|
|
|
}
|
|
|
|
|
2015-01-17 18:02:58 +01:00
|
|
|
void routing_table::status(std::vector<dht_routing_bucket>& s) const
|
|
|
|
{
|
2016-09-13 14:18:47 +02:00
|
|
|
for (auto const& i : m_buckets)
|
2015-01-17 18:02:58 +01:00
|
|
|
{
|
|
|
|
dht_routing_bucket b;
|
2016-09-13 14:18:47 +02:00
|
|
|
b.num_nodes = int(i.live_nodes.size());
|
|
|
|
b.num_replacements = int(i.replacements.size());
|
2015-01-17 18:02:58 +01:00
|
|
|
s.push_back(b);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-01-04 22:31:02 +01:00
|
|
|
#ifndef TORRENT_NO_DEPRECATE
|
2015-01-17 18:02:58 +01:00
|
|
|
// TODO: 2 use the non deprecated function instead of this one
|
2009-01-23 11:36:07 +01:00
|
|
|
void routing_table::status(session_status& s) const
|
|
|
|
{
|
2016-06-04 20:04:29 +02:00
|
|
|
int dht_nodes;
|
|
|
|
int dht_node_cache;
|
2014-11-08 17:58:18 +01:00
|
|
|
int ignore;
|
2016-06-20 17:32:06 +02:00
|
|
|
std::tie(dht_nodes, dht_node_cache, ignore) = size();
|
2016-06-04 20:04:29 +02:00
|
|
|
s.dht_nodes += dht_nodes;
|
|
|
|
s.dht_node_cache += dht_node_cache;
|
|
|
|
// TODO: arvidn note
|
|
|
|
// when it's across IPv4 and IPv6, adding (dht_global_nodes) would
|
|
|
|
// make sense. in the future though, where we may have one DHT node
|
|
|
|
// per external interface (which may be multiple of the same address
|
|
|
|
// family), then it becomes a bit trickier
|
|
|
|
s.dht_global_nodes += num_global_nodes();
|
2010-12-12 04:17:08 +01:00
|
|
|
|
2016-09-13 14:18:47 +02:00
|
|
|
for (auto const& i : m_buckets)
|
2010-12-12 04:17:08 +01:00
|
|
|
{
|
|
|
|
dht_routing_bucket b;
|
2016-09-13 14:18:47 +02:00
|
|
|
b.num_nodes = int(i.live_nodes.size());
|
|
|
|
b.num_replacements = int(i.replacements.size());
|
2014-11-01 23:47:56 +01:00
|
|
|
#ifndef TORRENT_NO_DEPRECATE
|
|
|
|
b.last_active = 0;
|
|
|
|
#endif
|
2010-12-12 04:17:08 +01:00
|
|
|
s.dht_routing_table.push_back(b);
|
|
|
|
}
|
2009-01-23 11:36:07 +01:00
|
|
|
}
|
2015-01-04 22:31:02 +01:00
|
|
|
#endif
|
2009-01-23 11:36:07 +01:00
|
|
|
|
2016-06-20 17:32:06 +02:00
|
|
|
std::tuple<int, int, int> routing_table::size() const
|
2006-08-01 17:27:08 +02:00
|
|
|
{
|
|
|
|
int nodes = 0;
|
|
|
|
int replacements = 0;
|
2014-11-08 17:58:18 +01:00
|
|
|
int confirmed = 0;
|
2016-09-13 14:18:47 +02:00
|
|
|
for (auto const& i : m_buckets)
|
2006-08-01 17:27:08 +02:00
|
|
|
{
|
2016-09-13 14:18:47 +02:00
|
|
|
nodes += int(i.live_nodes.size());
|
|
|
|
for (auto const& k : i.live_nodes)
|
2014-11-08 17:58:18 +01:00
|
|
|
{
|
2016-09-13 14:18:47 +02:00
|
|
|
if (k.confirmed()) ++confirmed;
|
2014-11-08 17:58:18 +01:00
|
|
|
}
|
|
|
|
|
2016-09-13 14:18:47 +02:00
|
|
|
replacements += int(i.replacements.size());
|
2006-08-01 17:27:08 +02:00
|
|
|
}
|
2016-06-20 17:32:06 +02:00
|
|
|
return std::make_tuple(nodes, replacements, confirmed);
|
2006-08-01 17:27:08 +02:00
|
|
|
}
|
|
|
|
|
2016-06-18 20:01:38 +02:00
|
|
|
std::int64_t routing_table::num_global_nodes() const
|
2007-05-12 03:52:25 +02:00
|
|
|
{
|
2010-12-13 10:24:19 +01:00
|
|
|
int deepest_bucket = 0;
|
|
|
|
int deepest_size = 0;
|
2016-09-13 14:18:47 +02:00
|
|
|
for (auto const& i : m_buckets)
|
2007-05-12 03:52:25 +02:00
|
|
|
{
|
2016-09-13 14:18:47 +02:00
|
|
|
deepest_size = int(i.live_nodes.size()); // + i.replacements.size();
|
2010-12-13 10:24:19 +01:00
|
|
|
if (deepest_size < m_bucket_size) break;
|
|
|
|
// this bucket is full
|
|
|
|
++deepest_bucket;
|
2007-05-12 03:52:25 +02:00
|
|
|
}
|
|
|
|
|
2010-12-13 10:24:19 +01:00
|
|
|
if (deepest_bucket == 0) return 1 + deepest_size;
|
|
|
|
|
2016-06-18 20:01:38 +02:00
|
|
|
if (deepest_size < m_bucket_size / 2) return (std::int64_t(1) << deepest_bucket) * m_bucket_size;
|
|
|
|
else return (std::int64_t(2) << deepest_bucket) * deepest_size;
|
2007-05-12 03:52:25 +02:00
|
|
|
}
|
|
|
|
|
2013-10-27 00:59:55 +02:00
|
|
|
int routing_table::depth() const
|
|
|
|
{
|
2014-05-10 05:23:05 +02:00
|
|
|
if (m_depth >= int(m_buckets.size()))
|
2016-04-25 23:22:09 +02:00
|
|
|
m_depth = int(m_buckets.size()) - 1;
|
2013-11-13 03:17:33 +01:00
|
|
|
|
|
|
|
if (m_depth < 0) return m_depth;
|
|
|
|
|
|
|
|
// maybe the table is deeper now?
|
|
|
|
while (m_depth < int(m_buckets.size())-1
|
2016-09-13 14:18:47 +02:00
|
|
|
&& int(m_buckets[m_depth + 1].live_nodes.size()) >= m_bucket_size / 2)
|
2013-10-27 00:59:55 +02:00
|
|
|
{
|
2013-11-13 03:17:33 +01:00
|
|
|
++m_depth;
|
2013-10-27 00:59:55 +02:00
|
|
|
}
|
2013-11-13 03:17:33 +01:00
|
|
|
|
|
|
|
// maybe the table is more shallow now?
|
|
|
|
while (m_depth > 0
|
2016-09-13 14:18:47 +02:00
|
|
|
&& int(m_buckets[m_depth - 1].live_nodes.size()) < m_bucket_size / 2)
|
2013-11-13 03:17:33 +01:00
|
|
|
{
|
|
|
|
--m_depth;
|
|
|
|
}
|
|
|
|
|
|
|
|
return m_depth;
|
2013-10-27 00:59:55 +02:00
|
|
|
}
|
|
|
|
|
2014-11-02 10:41:29 +01:00
|
|
|
node_entry const* routing_table::next_refresh()
|
2006-08-06 18:36:00 +02:00
|
|
|
{
|
2014-11-01 23:47:56 +01:00
|
|
|
// find the node with the least recent 'last_queried' field. if it's too
|
|
|
|
// recent, return false. Otherwise return a random target ID that's close to
|
|
|
|
// a missing prefix for that bucket
|
2006-08-06 18:36:00 +02:00
|
|
|
|
2016-06-20 17:32:06 +02:00
|
|
|
node_entry* candidate = nullptr;
|
2010-12-12 20:18:23 +01:00
|
|
|
|
2014-11-01 23:47:56 +01:00
|
|
|
// this will have a bias towards pinging nodes close to us first.
|
2016-09-13 14:18:47 +02:00
|
|
|
// TODO: why idx is never used here?
|
2016-04-25 23:22:09 +02:00
|
|
|
int idx = int(m_buckets.size()) - 1;
|
2014-11-01 23:47:56 +01:00
|
|
|
for (table_t::reverse_iterator i = m_buckets.rbegin()
|
|
|
|
, end(m_buckets.rend()); i != end; ++i, --idx)
|
|
|
|
{
|
|
|
|
for (bucket_t::iterator j = i->live_nodes.begin()
|
2015-08-18 13:55:50 +02:00
|
|
|
, end2(i->live_nodes.end()); j != end2; ++j)
|
2014-11-01 23:47:56 +01:00
|
|
|
{
|
2014-11-26 03:02:32 +01:00
|
|
|
// this shouldn't happen
|
|
|
|
TORRENT_ASSERT(m_id != j->id);
|
|
|
|
if (j->id == m_id) continue;
|
|
|
|
|
2014-11-01 23:47:56 +01:00
|
|
|
if (j->last_queried == min_time())
|
|
|
|
{
|
|
|
|
candidate = &*j;
|
|
|
|
goto out;
|
|
|
|
}
|
2013-12-20 05:54:52 +01:00
|
|
|
|
2016-06-20 17:32:06 +02:00
|
|
|
if (candidate == nullptr || j->last_queried < candidate->last_queried)
|
2014-11-01 23:47:56 +01:00
|
|
|
{
|
|
|
|
candidate = &*j;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
out:
|
2010-12-12 04:17:08 +01:00
|
|
|
|
2014-11-01 23:47:56 +01:00
|
|
|
// make sure we don't pick the same node again next time we want to refresh
|
|
|
|
// the routing table
|
|
|
|
if (candidate)
|
2015-03-12 05:34:54 +01:00
|
|
|
candidate->last_queried = aux::time_now();
|
2014-11-01 23:47:56 +01:00
|
|
|
|
|
|
|
return candidate;
|
2010-01-03 12:08:39 +01:00
|
|
|
}
|
2006-08-01 17:27:08 +02:00
|
|
|
|
|
|
|
void routing_table::replacement_cache(bucket_t& nodes) const
|
|
|
|
{
|
2016-05-04 05:22:25 +02:00
|
|
|
for (auto const& b : m_buckets)
|
2006-08-01 17:27:08 +02:00
|
|
|
{
|
2016-05-04 05:22:25 +02:00
|
|
|
std::copy(b.replacements.begin(), b.replacements.end()
|
2006-08-01 17:27:08 +02:00
|
|
|
, std::back_inserter(nodes));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-01-03 12:08:39 +01:00
|
|
|
routing_table::table_t::iterator routing_table::find_bucket(node_id const& id)
|
2006-08-01 17:27:08 +02:00
|
|
|
{
|
2010-01-03 12:08:39 +01:00
|
|
|
// TORRENT_ASSERT(id != m_id);
|
|
|
|
|
2016-04-25 23:22:09 +02:00
|
|
|
int num_buckets = int(m_buckets.size());
|
2010-01-03 12:08:39 +01:00
|
|
|
if (num_buckets == 0)
|
|
|
|
{
|
|
|
|
m_buckets.push_back(routing_table_node());
|
|
|
|
++num_buckets;
|
|
|
|
}
|
|
|
|
|
|
|
|
int bucket_index = (std::min)(159 - distance_exp(m_id, id), num_buckets - 1);
|
2011-02-21 06:24:41 +01:00
|
|
|
TORRENT_ASSERT(bucket_index < int(m_buckets.size()));
|
2007-10-05 02:30:00 +02:00
|
|
|
TORRENT_ASSERT(bucket_index >= 0);
|
2010-01-03 12:08:39 +01:00
|
|
|
|
|
|
|
table_t::iterator i = m_buckets.begin();
|
|
|
|
std::advance(i, bucket_index);
|
|
|
|
return i;
|
|
|
|
}
|
|
|
|
|
2016-05-04 05:22:25 +02:00
|
|
|
// returns true if the two IPs are "too close" to each other to be allowed in
|
|
|
|
// the same DHT lookup. If they are, the last one to be found will be ignored
|
|
|
|
bool compare_ip_cidr(address const& lhs, address const& rhs)
|
2011-01-08 09:54:51 +01:00
|
|
|
{
|
2016-05-04 05:22:25 +02:00
|
|
|
TORRENT_ASSERT(lhs.is_v4() == rhs.is_v4());
|
2011-01-08 09:54:51 +01:00
|
|
|
|
2016-05-04 05:22:25 +02:00
|
|
|
#if TORRENT_USE_IPV6
|
|
|
|
if (lhs.is_v6())
|
|
|
|
{
|
|
|
|
// if IPv6 addresses is in the same /64, they're too close and we won't
|
|
|
|
// trust the second one
|
2016-06-18 20:01:38 +02:00
|
|
|
std::uint64_t lhs_ip;
|
2016-09-13 14:18:47 +02:00
|
|
|
std::memcpy(&lhs_ip, lhs.to_v6().to_bytes().data(), 8);
|
2016-06-18 20:01:38 +02:00
|
|
|
std::uint64_t rhs_ip;
|
2016-09-13 14:18:47 +02:00
|
|
|
std::memcpy(&rhs_ip, rhs.to_v6().to_bytes().data(), 8);
|
2016-05-04 05:22:25 +02:00
|
|
|
|
|
|
|
// since the condition we're looking for is all the first bits being
|
|
|
|
// zero, there's no need to byte-swap into host byte order here.
|
2016-06-18 20:01:38 +02:00
|
|
|
std::uint64_t const mask = lhs_ip ^ rhs_ip;
|
2016-05-04 05:22:25 +02:00
|
|
|
return mask == 0;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
#endif
|
|
|
|
{
|
|
|
|
// if IPv4 addresses is in the same /24, they're too close and we won't
|
|
|
|
// trust the second one
|
2016-06-18 20:01:38 +02:00
|
|
|
std::uint32_t const mask
|
2016-05-04 05:22:25 +02:00
|
|
|
= lhs.to_v4().to_ulong() ^ rhs.to_v4().to_ulong();
|
|
|
|
return mask <= 0x000000ff;
|
|
|
|
}
|
|
|
|
}
|
2015-04-22 06:24:45 +02:00
|
|
|
|
2013-12-20 05:54:52 +01:00
|
|
|
node_entry* routing_table::find_node(udp::endpoint const& ep
|
2015-08-22 15:24:49 +02:00
|
|
|
, routing_table::table_t::iterator* bucket)
|
2011-01-17 08:49:44 +01:00
|
|
|
{
|
|
|
|
for (table_t::iterator i = m_buckets.begin()
|
|
|
|
, end(m_buckets.end()); i != end; ++i)
|
|
|
|
{
|
|
|
|
for (bucket_t::iterator j = i->replacements.begin();
|
|
|
|
j != i->replacements.end(); ++j)
|
|
|
|
{
|
2013-01-28 05:00:23 +01:00
|
|
|
if (j->addr() != ep.address()) continue;
|
|
|
|
if (j->port() != ep.port()) continue;
|
2011-01-17 08:49:44 +01:00
|
|
|
*bucket = i;
|
|
|
|
return &*j;
|
|
|
|
}
|
|
|
|
for (bucket_t::iterator j = i->live_nodes.begin();
|
|
|
|
j != i->live_nodes.end(); ++j)
|
|
|
|
{
|
2013-01-28 05:00:23 +01:00
|
|
|
if (j->addr() != ep.address()) continue;
|
|
|
|
if (j->port() != ep.port()) continue;
|
2011-01-17 08:49:44 +01:00
|
|
|
*bucket = i;
|
|
|
|
return &*j;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
*bucket = m_buckets.end();
|
2016-07-09 22:26:26 +02:00
|
|
|
return nullptr;
|
2011-01-17 08:49:44 +01:00
|
|
|
}
|
|
|
|
|
2016-07-24 03:57:04 +02:00
|
|
|
void routing_table::fill_from_replacements(table_t::iterator bucket)
|
|
|
|
{
|
|
|
|
bucket_t& b = bucket->live_nodes;
|
|
|
|
bucket_t& rb = bucket->replacements;
|
|
|
|
int const bucket_size = bucket_limit(std::distance(m_buckets.begin(), bucket));
|
|
|
|
|
|
|
|
if (b.size() >= bucket_size) return;
|
|
|
|
|
|
|
|
// sort by RTT first, to find the node with the lowest
|
|
|
|
// RTT that is pinged
|
|
|
|
std::sort(rb.begin(), rb.end()
|
|
|
|
, [](node_entry const& lhs, node_entry const& rhs)
|
|
|
|
{ return lhs.rtt < rhs.rtt; });
|
|
|
|
|
|
|
|
while (b.size() < bucket_size && !rb.empty())
|
|
|
|
{
|
|
|
|
bucket_t::iterator j = std::find_if(rb.begin(), rb.end(), std::bind(&node_entry::pinged, _1));
|
|
|
|
if (j == rb.end()) j = rb.begin();
|
|
|
|
b.push_back(*j);
|
|
|
|
rb.erase(j);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-01-17 05:49:04 +01:00
|
|
|
void routing_table::remove_node(node_entry* n
|
2015-08-22 15:24:49 +02:00
|
|
|
, routing_table::table_t::iterator bucket)
|
2014-01-17 05:49:04 +01:00
|
|
|
{
|
2014-09-20 21:27:29 +02:00
|
|
|
INVARIANT_CHECK;
|
|
|
|
|
2014-01-17 05:49:04 +01:00
|
|
|
if (!bucket->replacements.empty()
|
|
|
|
&& n >= &bucket->replacements[0]
|
|
|
|
&& n < &bucket->replacements[0] + bucket->replacements.size())
|
|
|
|
{
|
|
|
|
int idx = n - &bucket->replacements[0];
|
2015-08-17 01:21:10 +02:00
|
|
|
TORRENT_ASSERT(m_ips.count(n->addr()) > 0);
|
|
|
|
m_ips.erase(n->addr());
|
2014-01-17 05:49:04 +01:00
|
|
|
bucket->replacements.erase(bucket->replacements.begin() + idx);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!bucket->live_nodes.empty()
|
|
|
|
&& n >= &bucket->live_nodes[0]
|
|
|
|
&& n < &bucket->live_nodes[0] + bucket->live_nodes.size())
|
|
|
|
{
|
|
|
|
int idx = n - &bucket->live_nodes[0];
|
2015-08-17 01:21:10 +02:00
|
|
|
TORRENT_ASSERT(m_ips.count(n->addr()) > 0);
|
|
|
|
m_ips.erase(n->addr());
|
2014-01-17 05:49:04 +01:00
|
|
|
bucket->live_nodes.erase(bucket->live_nodes.begin() + idx);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-09-13 14:18:47 +02:00
|
|
|
bool routing_table::add_node(node_entry const& e)
|
2014-11-08 17:58:18 +01:00
|
|
|
{
|
|
|
|
add_node_status_t s = add_node_impl(e);
|
|
|
|
if (s == failed_to_add) return false;
|
|
|
|
if (s == node_added) return true;
|
|
|
|
|
|
|
|
while (s == need_bucket_split)
|
|
|
|
{
|
|
|
|
split_bucket();
|
|
|
|
|
2014-11-26 03:02:32 +01:00
|
|
|
// if this assert triggers a lot in the wild, we should probably
|
2016-09-12 15:20:15 +02:00
|
|
|
// harden our resistance towards this attack. Perhaps by never
|
2014-11-26 03:02:32 +01:00
|
|
|
// splitting a bucket (and discard nodes) if the two buckets above it
|
|
|
|
// are empty or close to empty
|
2015-03-21 01:50:28 +01:00
|
|
|
// TORRENT_ASSERT(m_buckets.size() <= 50);
|
2014-11-26 03:02:32 +01:00
|
|
|
if (m_buckets.size() > 50)
|
|
|
|
{
|
|
|
|
// this is a sanity check. In the wild, we shouldn't see routing
|
|
|
|
// tables deeper than 26 or 27. If we get this deep, there might
|
|
|
|
// be a bug in the bucket splitting logic, or there may be someone
|
|
|
|
// playing a prank on us, spoofing node IDs.
|
|
|
|
s = add_node_impl(e);
|
|
|
|
if (s == node_added) return true;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2014-11-08 17:58:18 +01:00
|
|
|
// if the new bucket still has too many nodes in it, we need to keep
|
|
|
|
// splitting
|
2016-04-25 23:22:09 +02:00
|
|
|
if (m_buckets.back().live_nodes.size() > bucket_limit(int(m_buckets.size()) - 1))
|
2014-11-08 17:58:18 +01:00
|
|
|
continue;
|
|
|
|
|
|
|
|
s = add_node_impl(e);
|
|
|
|
if (s == failed_to_add) return false;
|
|
|
|
if (s == node_added) return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
routing_table::add_node_status_t routing_table::add_node_impl(node_entry e)
|
2010-01-03 12:08:39 +01:00
|
|
|
{
|
2014-12-10 08:13:57 +01:00
|
|
|
#ifdef TORRENT_EXPENSIVE_INVARIANT_CHECKS
|
2016-01-03 16:19:35 +01:00
|
|
|
// INVARIANT_CHECK;
|
2014-12-10 08:13:57 +01:00
|
|
|
#endif
|
2013-12-20 05:54:52 +01:00
|
|
|
|
2015-11-14 06:08:57 +01:00
|
|
|
// don't add if the address isn't the right type
|
2016-02-12 04:56:52 +01:00
|
|
|
if (!native_endpoint(e.ep()))
|
2015-11-14 06:08:57 +01:00
|
|
|
return failed_to_add;
|
|
|
|
|
2013-09-09 06:16:52 +02:00
|
|
|
// if we already have this (IP,port), don't do anything
|
2014-11-08 17:58:18 +01:00
|
|
|
if (m_router_nodes.find(e.ep()) != m_router_nodes.end())
|
|
|
|
return failed_to_add;
|
2010-01-03 12:08:39 +01:00
|
|
|
|
2011-01-17 08:49:44 +01:00
|
|
|
// do we already have this IP in the table?
|
2015-08-17 01:21:10 +02:00
|
|
|
if (m_ips.count(e.addr()) > 0)
|
2011-01-08 09:54:51 +01:00
|
|
|
{
|
2016-07-24 03:57:04 +02:00
|
|
|
// This exact IP already exists in the table. A node with the same IP and
|
|
|
|
// port but a different ID may be a sign of a malicious node. To be
|
|
|
|
// conservative in this case the node is removed.
|
2011-01-17 08:49:44 +01:00
|
|
|
// pinged means that we have sent a message to the IP, port and received
|
|
|
|
// a response with a correct transaction ID, i.e. it is verified to not
|
2014-01-07 09:52:53 +01:00
|
|
|
// be the result of a poisoned routing table
|
2011-01-17 08:49:44 +01:00
|
|
|
|
|
|
|
table_t::iterator existing_bucket;
|
2014-01-07 09:52:53 +01:00
|
|
|
node_entry* existing = find_node(e.ep(), &existing_bucket);
|
2016-07-09 22:26:26 +02:00
|
|
|
if (existing == nullptr)
|
2011-01-17 08:49:44 +01:00
|
|
|
{
|
2016-01-01 15:21:07 +01:00
|
|
|
// the node we're trying to add is not a match with an existing node. we
|
|
|
|
// should ignore it, unless we allow duplicate IPs in our routing
|
|
|
|
// table. There could be a node with the same IP, but with a different
|
|
|
|
// port. m_ips just contain IP addresses, whereas the lookup we just
|
|
|
|
// performed was for full endpoints (address, port).
|
2011-01-17 08:49:44 +01:00
|
|
|
if (m_settings.restrict_routing_ips)
|
|
|
|
{
|
2015-05-16 21:29:49 +02:00
|
|
|
#ifndef TORRENT_DISABLE_LOGGING
|
2016-09-13 14:18:47 +02:00
|
|
|
if (m_log != nullptr && m_log->should_log(dht_logger::routing_table))
|
2016-07-29 22:32:28 +02:00
|
|
|
{
|
|
|
|
char hex_id[41];
|
2016-07-30 00:47:05 +02:00
|
|
|
aux::to_hex(e.id, hex_id);
|
2016-07-29 22:32:28 +02:00
|
|
|
m_log->log(dht_logger::routing_table, "ignoring node (duplicate IP): %s %s"
|
|
|
|
, hex_id, print_address(e.addr()).c_str());
|
|
|
|
}
|
2011-01-17 08:49:44 +01:00
|
|
|
#endif
|
2014-11-08 17:58:18 +01:00
|
|
|
return failed_to_add;
|
2011-01-17 08:49:44 +01:00
|
|
|
}
|
|
|
|
}
|
2016-01-01 15:21:07 +01:00
|
|
|
else if (existing->id == e.id)
|
2014-01-17 05:49:04 +01:00
|
|
|
{
|
|
|
|
// if the node ID is the same, just update the failcount
|
2016-01-01 15:21:07 +01:00
|
|
|
// and be done with it.
|
2014-01-07 09:52:53 +01:00
|
|
|
existing->timeout_count = 0;
|
2016-01-01 15:21:07 +01:00
|
|
|
if (e.pinged())
|
|
|
|
{
|
|
|
|
existing->update_rtt(e.rtt);
|
|
|
|
existing->last_queried = e.last_queried;
|
|
|
|
}
|
2014-11-08 17:58:18 +01:00
|
|
|
return node_added;
|
2014-01-07 09:52:53 +01:00
|
|
|
}
|
2016-01-01 15:21:07 +01:00
|
|
|
else if (!e.pinged())
|
|
|
|
{
|
|
|
|
// this may be a routing table poison attack. If we haven't confirmed
|
|
|
|
// that this peer actually exist with this new node ID yet, ignore it.
|
|
|
|
// we definitely don't want to replace the existing entry with this one
|
|
|
|
if (m_settings.restrict_routing_ips)
|
|
|
|
return failed_to_add;
|
|
|
|
}
|
|
|
|
else
|
2014-01-07 09:52:53 +01:00
|
|
|
{
|
2014-01-17 05:49:04 +01:00
|
|
|
TORRENT_ASSERT(existing->id != e.id);
|
2016-07-24 03:57:04 +02:00
|
|
|
// This is the same IP and port, but with a new node ID.
|
|
|
|
// This may indicate a malicious node so remove the entry.
|
|
|
|
#ifndef TORRENT_DISABLE_LOGGING
|
2016-09-13 14:18:47 +02:00
|
|
|
if (m_log != nullptr && m_log->should_log(dht_logger::routing_table))
|
2016-07-29 22:32:28 +02:00
|
|
|
{
|
|
|
|
char hex_id_new[41];
|
|
|
|
char hex_id_old[41];
|
2016-07-30 00:47:05 +02:00
|
|
|
aux::to_hex(e.id, hex_id_new);
|
|
|
|
aux::to_hex(existing->id, hex_id_old);
|
2016-07-29 22:32:28 +02:00
|
|
|
m_log->log(dht_logger::routing_table, "evicting node (changed ID): old: %s new: %s %s"
|
|
|
|
, hex_id_old, hex_id_new, print_address(e.addr()).c_str());
|
|
|
|
}
|
2016-07-24 03:57:04 +02:00
|
|
|
#endif
|
|
|
|
|
2014-01-17 05:49:04 +01:00
|
|
|
remove_node(existing, existing_bucket);
|
2016-07-24 03:57:04 +02:00
|
|
|
fill_from_replacements(existing_bucket);
|
2016-07-25 00:41:40 +02:00
|
|
|
|
|
|
|
// when we detect possible malicious activity in a bucket,
|
|
|
|
// schedule the other nodes in the bucket to be pinged soon
|
|
|
|
// to clean out any other malicious nodes
|
2016-09-13 14:18:47 +02:00
|
|
|
auto const now = aux::time_now();
|
2016-07-25 00:41:40 +02:00
|
|
|
for (auto& node : existing_bucket->live_nodes)
|
|
|
|
{
|
2016-07-30 06:38:17 +02:00
|
|
|
if (node.last_queried + minutes(5) < now)
|
2016-07-25 00:41:40 +02:00
|
|
|
node.last_queried = min_time();
|
|
|
|
}
|
|
|
|
|
2016-07-24 03:57:04 +02:00
|
|
|
return failed_to_add;
|
2011-01-08 09:54:51 +01:00
|
|
|
}
|
|
|
|
}
|
2015-08-22 15:24:49 +02:00
|
|
|
|
2016-07-25 02:22:04 +02:00
|
|
|
// don't add ourself
|
|
|
|
if (e.id == m_id) return failed_to_add;
|
|
|
|
|
2012-06-25 16:17:51 +02:00
|
|
|
table_t::iterator i = find_bucket(e.id);
|
2010-01-03 12:08:39 +01:00
|
|
|
bucket_t& b = i->live_nodes;
|
|
|
|
bucket_t& rb = i->replacements;
|
2016-01-01 15:21:07 +01:00
|
|
|
int const bucket_index = std::distance(m_buckets.begin(), i);
|
2016-01-02 04:51:01 +01:00
|
|
|
// compare against the max size of the next bucket. Otherwise we may wait too
|
|
|
|
// long to split, and lose nodes (in the case where lower-numbered buckets
|
|
|
|
// are larger)
|
2016-01-01 15:21:07 +01:00
|
|
|
int const bucket_size_limit = bucket_limit(bucket_index);
|
2016-01-02 04:51:01 +01:00
|
|
|
int const next_bucket_size_limit = bucket_limit(bucket_index + 1);
|
2006-08-01 17:27:08 +02:00
|
|
|
|
2011-01-08 09:54:51 +01:00
|
|
|
bucket_t::iterator j;
|
|
|
|
|
2006-08-01 17:27:08 +02:00
|
|
|
// if the node already exists, we don't need it
|
2011-01-08 09:54:51 +01:00
|
|
|
j = std::find_if(b.begin(), b.end()
|
2016-05-25 06:31:52 +02:00
|
|
|
, [&e](node_entry const& ne) { return ne.id == e.id; });
|
2010-01-03 12:08:39 +01:00
|
|
|
|
|
|
|
if (j != b.end())
|
|
|
|
{
|
2011-01-08 09:54:51 +01:00
|
|
|
// a new IP address just claimed this node-ID
|
|
|
|
// ignore it
|
2014-11-08 17:58:18 +01:00
|
|
|
if (j->addr() != e.addr() || j->port() != e.port())
|
|
|
|
return failed_to_add;
|
2011-01-08 09:54:51 +01:00
|
|
|
|
2010-01-03 12:08:39 +01:00
|
|
|
// we already have the node in our bucket
|
2011-01-17 08:49:44 +01:00
|
|
|
TORRENT_ASSERT(j->id == e.id && j->ep() == e.ep());
|
|
|
|
j->timeout_count = 0;
|
2012-09-22 20:15:29 +02:00
|
|
|
j->update_rtt(e.rtt);
|
2014-11-08 17:58:18 +01:00
|
|
|
return node_added;
|
2010-01-03 12:08:39 +01:00
|
|
|
}
|
2006-08-01 17:27:08 +02:00
|
|
|
|
2012-09-22 20:15:29 +02:00
|
|
|
// if this node exists in the replacement bucket. update it and
|
|
|
|
// pull it out from there. We may add it back to the replacement
|
|
|
|
// bucket, but we may also replace a node in the main bucket, now
|
|
|
|
// that we have an updated RTT
|
2016-05-25 06:31:52 +02:00
|
|
|
j = std::find_if(rb.begin(), rb.end()
|
|
|
|
, [&e](node_entry const& ne) { return ne.id == e.id; });
|
2012-09-22 20:15:29 +02:00
|
|
|
if (j != rb.end())
|
|
|
|
{
|
|
|
|
// a new IP address just claimed this node-ID
|
|
|
|
// ignore it
|
2014-11-08 17:58:18 +01:00
|
|
|
if (j->addr() != e.addr() || j->port() != e.port())
|
|
|
|
return failed_to_add;
|
|
|
|
|
2012-09-22 20:15:29 +02:00
|
|
|
TORRENT_ASSERT(j->id == e.id && j->ep() == e.ep());
|
|
|
|
j->timeout_count = 0;
|
|
|
|
j->update_rtt(e.rtt);
|
|
|
|
e = *j;
|
2015-08-17 01:21:10 +02:00
|
|
|
m_ips.erase(j->addr());
|
2012-09-22 20:15:29 +02:00
|
|
|
rb.erase(j);
|
|
|
|
}
|
2006-08-01 17:27:08 +02:00
|
|
|
|
2011-01-08 09:54:51 +01:00
|
|
|
if (m_settings.restrict_routing_ips)
|
|
|
|
{
|
|
|
|
// don't allow multiple entries from IPs very close to each other
|
2016-09-13 14:18:47 +02:00
|
|
|
address const& cmp = e.addr();
|
2016-05-04 05:22:25 +02:00
|
|
|
j = std::find_if(b.begin(), b.end(), [&](node_entry const& a) { return compare_ip_cidr(a.addr(), cmp); });
|
2016-01-03 02:03:18 +01:00
|
|
|
if (j == b.end())
|
2011-01-08 09:54:51 +01:00
|
|
|
{
|
2016-05-04 05:22:25 +02:00
|
|
|
j = std::find_if(rb.begin(), rb.end(), [&](node_entry const& a) { return compare_ip_cidr(a.addr(), cmp); });
|
2016-01-03 02:03:18 +01:00
|
|
|
if (j == rb.end()) goto ip_ok;
|
2011-01-08 09:54:51 +01:00
|
|
|
}
|
|
|
|
|
2016-01-03 02:03:18 +01:00
|
|
|
// we already have a node in this bucket with an IP very
|
|
|
|
// close to this one. We know that it's not the same, because
|
|
|
|
// it claims a different node-ID. Ignore this to avoid attacks
|
2015-05-16 21:29:49 +02:00
|
|
|
#ifndef TORRENT_DISABLE_LOGGING
|
2016-09-13 14:18:47 +02:00
|
|
|
if (m_log != nullptr && m_log->should_log(dht_logger::routing_table))
|
2016-07-29 22:32:28 +02:00
|
|
|
{
|
|
|
|
char hex_id1[41];
|
2016-07-30 00:47:05 +02:00
|
|
|
aux::to_hex(e.id, hex_id1);
|
2016-07-29 22:32:28 +02:00
|
|
|
char hex_id2[41];
|
2016-07-30 00:47:05 +02:00
|
|
|
aux::to_hex(j->id, hex_id2);
|
2016-07-29 22:32:28 +02:00
|
|
|
m_log->log(dht_logger::routing_table, "ignoring node: %s %s existing node: %s %s"
|
|
|
|
, hex_id1, print_address(e.addr()).c_str()
|
|
|
|
, hex_id2, print_address(j->addr()).c_str());
|
|
|
|
}
|
2011-01-08 09:54:51 +01:00
|
|
|
#endif
|
2016-01-03 02:03:18 +01:00
|
|
|
return failed_to_add;
|
2011-01-08 09:54:51 +01:00
|
|
|
}
|
2016-01-03 02:03:18 +01:00
|
|
|
ip_ok:
|
2011-01-08 09:54:51 +01:00
|
|
|
|
2016-01-02 04:51:01 +01:00
|
|
|
// can we split the bucket?
|
|
|
|
// only nodes that haven't failed can split the bucket, and we can only
|
|
|
|
// split the last bucket
|
2016-06-20 17:32:06 +02:00
|
|
|
bool const can_split = (std::next(i) == m_buckets.end()
|
2016-01-02 04:51:01 +01:00
|
|
|
&& m_buckets.size() < 159)
|
|
|
|
&& e.fail_count() == 0
|
2016-06-20 17:32:06 +02:00
|
|
|
&& (i == m_buckets.begin() || std::prev(i)->live_nodes.size() > 1);
|
2016-01-02 04:51:01 +01:00
|
|
|
|
2012-09-22 20:15:29 +02:00
|
|
|
// if there's room in the main bucket, just insert it
|
2016-01-02 04:51:01 +01:00
|
|
|
// if we can split the bucket (i.e. it's the last bucket) use the next
|
|
|
|
// bucket's size limit. This makes use split the low-numbered buckets split
|
|
|
|
// earlier when we have larger low buckets, to make it less likely that we
|
|
|
|
// lose nodes
|
|
|
|
if (int(b.size()) < (can_split ? next_bucket_size_limit : bucket_size_limit))
|
2008-11-10 03:08:42 +01:00
|
|
|
{
|
2012-09-22 23:40:16 +02:00
|
|
|
if (b.empty()) b.reserve(bucket_size_limit);
|
2010-01-03 12:08:39 +01:00
|
|
|
b.push_back(e);
|
2015-08-17 01:21:10 +02:00
|
|
|
m_ips.insert(e.addr());
|
2014-11-08 17:58:18 +01:00
|
|
|
return node_added;
|
2010-01-03 12:08:39 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// if there is no room, we look for nodes that are not 'pinged',
|
|
|
|
// i.e. we haven't confirmed that they respond to messages.
|
|
|
|
// Then we look for nodes marked as stale
|
|
|
|
// in the k-bucket. If we find one, we can replace it.
|
2013-09-09 06:16:52 +02:00
|
|
|
// then we look for nodes with the same 3 bit prefix (or however
|
|
|
|
// many bits prefix the bucket size warrants). If there is no other
|
|
|
|
// node with this prefix, remove the duplicate with the highest RTT.
|
|
|
|
// as the last replacement strategy, if the node we found matching our
|
|
|
|
// bit prefix has higher RTT than the new node, replace it.
|
2010-01-03 12:08:39 +01:00
|
|
|
|
|
|
|
if (e.pinged() && e.fail_count() == 0)
|
|
|
|
{
|
|
|
|
// if the node we're trying to insert is considered pinged,
|
|
|
|
// we may replace other nodes that aren't pinged
|
|
|
|
|
2016-05-25 06:31:52 +02:00
|
|
|
j = std::find_if(b.begin(), b.end()
|
|
|
|
, [](node_entry const& ne) { return ne.pinged() == false; });
|
2010-01-03 12:08:39 +01:00
|
|
|
|
|
|
|
if (j != b.end() && !j->pinged())
|
|
|
|
{
|
|
|
|
// j points to a node that has not been pinged.
|
|
|
|
// Replace it with this new one
|
2015-08-17 01:21:10 +02:00
|
|
|
m_ips.erase(j->addr());
|
2012-09-22 20:15:29 +02:00
|
|
|
*j = e;
|
2015-08-17 01:21:10 +02:00
|
|
|
m_ips.insert(e.addr());
|
2014-11-08 17:58:18 +01:00
|
|
|
return node_added;
|
2010-01-03 12:08:39 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// A node is considered stale if it has failed at least one
|
|
|
|
// time. Here we choose the node that has failed most times.
|
|
|
|
// If we don't find one, place this node in the replacement-
|
|
|
|
// cache and replace any nodes that will fail in the future
|
|
|
|
// with nodes from that cache.
|
|
|
|
|
|
|
|
j = std::max_element(b.begin(), b.end()
|
2016-05-25 06:31:52 +02:00
|
|
|
, [](node_entry const& lhs, node_entry const& rhs)
|
|
|
|
{ return lhs.fail_count() < rhs.fail_count(); });
|
2013-09-09 06:16:52 +02:00
|
|
|
TORRENT_ASSERT(j != b.end());
|
2010-01-03 12:08:39 +01:00
|
|
|
|
2013-09-09 06:16:52 +02:00
|
|
|
if (j->fail_count() > 0)
|
2010-01-03 12:08:39 +01:00
|
|
|
{
|
|
|
|
// i points to a node that has been marked
|
|
|
|
// as stale. Replace it with this new one
|
2015-08-17 01:21:10 +02:00
|
|
|
m_ips.erase(j->addr());
|
2012-09-22 20:15:29 +02:00
|
|
|
*j = e;
|
2015-08-17 01:21:10 +02:00
|
|
|
m_ips.insert(e.addr());
|
2014-11-08 17:58:18 +01:00
|
|
|
return node_added;
|
2010-01-03 12:08:39 +01:00
|
|
|
}
|
2015-08-22 15:24:49 +02:00
|
|
|
|
2013-09-09 06:16:52 +02:00
|
|
|
// in order to provide as few lookups as possible before finding
|
|
|
|
// the data someone is looking for, make sure there is an affinity
|
|
|
|
// towards having a good spread of node IDs in each bucket
|
|
|
|
|
2016-06-18 20:01:38 +02:00
|
|
|
std::uint32_t mask = bucket_size_limit - 1;
|
2013-09-09 06:16:52 +02:00
|
|
|
int mask_shift = 0;
|
|
|
|
TORRENT_ASSERT_VAL(mask > 0, mask);
|
|
|
|
while ((mask & 0x80) == 0)
|
|
|
|
{
|
|
|
|
mask <<= 1;
|
|
|
|
++mask_shift;
|
|
|
|
}
|
2012-09-22 20:15:29 +02:00
|
|
|
|
2013-09-09 06:16:52 +02:00
|
|
|
// in case bucket_size_limit is not an even power of 2
|
|
|
|
mask = (0xff << mask_shift) & 0xff;
|
2012-09-22 20:15:29 +02:00
|
|
|
|
2013-09-14 23:49:08 +02:00
|
|
|
// pick out all nodes that have the same prefix as the new node
|
|
|
|
std::vector<bucket_t::iterator> nodes;
|
|
|
|
bool force_replace = false;
|
2015-08-18 13:55:50 +02:00
|
|
|
|
2016-08-26 01:26:48 +02:00
|
|
|
// the last bucket is special, since it hasn't been split yet, it
|
|
|
|
// includes that top bit as well
|
|
|
|
int const prefix_offset =
|
|
|
|
bucket_index + 1 == m_buckets.size() ? bucket_index : bucket_index + 1;
|
|
|
|
|
2013-09-14 23:49:08 +02:00
|
|
|
{
|
2015-08-18 13:55:50 +02:00
|
|
|
node_id id = e.id;
|
2016-08-26 01:26:48 +02:00
|
|
|
id <<= prefix_offset;
|
|
|
|
int const candidate_prefix = id[0] & mask;
|
2015-08-18 13:55:50 +02:00
|
|
|
|
|
|
|
for (j = b.begin(); j != b.end(); ++j)
|
|
|
|
{
|
2016-09-19 02:08:15 +02:00
|
|
|
if (!matching_prefix(j->id, mask, candidate_prefix, prefix_offset)) continue;
|
2015-08-18 13:55:50 +02:00
|
|
|
nodes.push_back(j);
|
|
|
|
}
|
2013-09-14 23:49:08 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!nodes.empty())
|
2013-09-09 06:16:52 +02:00
|
|
|
{
|
2013-09-14 23:49:08 +02:00
|
|
|
j = *std::max_element(nodes.begin(), nodes.end()
|
2016-05-25 06:31:52 +02:00
|
|
|
, [](bucket_t::iterator lhs, bucket_t::iterator rhs)
|
|
|
|
{ return lhs->rtt < rhs->rtt; });
|
2013-09-14 23:49:08 +02:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
// there is no node in this prefix-slot, there may be some
|
|
|
|
// nodes sharing a prefix. Find all nodes that do not
|
2013-09-09 06:16:52 +02:00
|
|
|
// have a unique prefix
|
|
|
|
|
2013-10-20 09:06:42 +02:00
|
|
|
// find node entries with duplicate prefixes in O(1)
|
2016-04-25 23:22:09 +02:00
|
|
|
std::vector<bucket_t::iterator> prefix(int(1 << (8 - mask_shift)), b.end());
|
2014-07-05 16:10:25 +02:00
|
|
|
TORRENT_ASSERT(int(prefix.size()) >= bucket_size_limit);
|
2013-10-20 09:06:42 +02:00
|
|
|
|
|
|
|
// the begin iterator from this object is used as a placeholder
|
|
|
|
// for an occupied slot whose node has already been added to the
|
|
|
|
// duplicate nodes list.
|
|
|
|
bucket_t placeholder;
|
2013-09-09 06:16:52 +02:00
|
|
|
|
|
|
|
nodes.reserve(b.size());
|
|
|
|
for (j = b.begin(); j != b.end(); ++j)
|
|
|
|
{
|
|
|
|
node_id id = j->id;
|
2016-08-26 01:26:48 +02:00
|
|
|
id <<= prefix_offset;
|
2013-10-20 09:06:42 +02:00
|
|
|
int this_prefix = (id[0] & mask) >> mask_shift;
|
|
|
|
TORRENT_ASSERT(this_prefix >= 0);
|
2014-07-05 16:10:25 +02:00
|
|
|
TORRENT_ASSERT(this_prefix < int(prefix.size()));
|
2013-10-20 09:06:42 +02:00
|
|
|
if (prefix[this_prefix] != b.end())
|
2013-09-09 06:16:52 +02:00
|
|
|
{
|
2013-10-20 09:06:42 +02:00
|
|
|
// there's already a node with this prefix. Remember both
|
|
|
|
// duplicates.
|
|
|
|
nodes.push_back(j);
|
|
|
|
|
|
|
|
if (prefix[this_prefix] != placeholder.begin())
|
|
|
|
{
|
|
|
|
nodes.push_back(prefix[this_prefix]);
|
|
|
|
prefix[this_prefix] = placeholder.begin();
|
|
|
|
}
|
2013-09-09 06:16:52 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!nodes.empty())
|
|
|
|
{
|
|
|
|
// from these nodes, pick the one with the highest RTT
|
|
|
|
// and replace it
|
|
|
|
|
2016-09-13 14:18:47 +02:00
|
|
|
auto k = std::max_element(nodes.begin(), nodes.end()
|
2016-05-25 06:31:52 +02:00
|
|
|
, [](bucket_t::iterator lhs, bucket_t::iterator rhs)
|
|
|
|
{ return lhs->rtt < rhs->rtt; });
|
2013-09-09 06:16:52 +02:00
|
|
|
|
|
|
|
// in this case, we would really rather replace the node even if
|
2016-09-13 14:18:47 +02:00
|
|
|
// the new node has higher RTT, because it fills a new prefix that we otherwise
|
2013-09-14 23:49:08 +02:00
|
|
|
// don't have.
|
|
|
|
force_replace = true;
|
2013-09-09 06:16:52 +02:00
|
|
|
j = *k;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
j = std::max_element(b.begin(), b.end()
|
2016-05-25 06:31:52 +02:00
|
|
|
, [](node_entry const& lhs, node_entry const& rhs)
|
|
|
|
{ return lhs.rtt < rhs.rtt; });
|
2013-09-09 06:16:52 +02:00
|
|
|
}
|
|
|
|
}
|
2012-09-22 20:15:29 +02:00
|
|
|
|
2013-09-14 23:49:08 +02:00
|
|
|
if (j != b.end() && (force_replace || j->rtt > e.rtt))
|
2012-09-22 20:15:29 +02:00
|
|
|
{
|
2015-08-17 01:21:10 +02:00
|
|
|
m_ips.erase(j->addr());
|
2012-09-22 20:15:29 +02:00
|
|
|
*j = e;
|
2015-08-17 01:21:10 +02:00
|
|
|
m_ips.insert(e.addr());
|
2015-05-16 21:29:49 +02:00
|
|
|
#ifndef TORRENT_DISABLE_LOGGING
|
2016-09-13 14:18:47 +02:00
|
|
|
if (m_log != nullptr && m_log->should_log(dht_logger::routing_table))
|
2016-01-01 15:21:07 +01:00
|
|
|
{
|
|
|
|
char hex_id[41];
|
2016-07-29 08:36:15 +02:00
|
|
|
aux::to_hex(e.id, hex_id);
|
2016-01-01 15:21:07 +01:00
|
|
|
m_log->log(dht_logger::routing_table, "replacing node with higher RTT: %s %s"
|
|
|
|
, hex_id, print_address(e.addr()).c_str());
|
|
|
|
}
|
2013-09-14 23:49:08 +02:00
|
|
|
#endif
|
2014-11-08 17:58:18 +01:00
|
|
|
return node_added;
|
2012-09-22 20:15:29 +02:00
|
|
|
}
|
2013-09-09 06:16:52 +02:00
|
|
|
// in order to keep lookup times small, prefer nodes with low RTTs
|
|
|
|
|
2010-01-03 12:08:39 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// if we can't split, try to insert into the replacement bucket
|
|
|
|
|
|
|
|
if (!can_split)
|
|
|
|
{
|
|
|
|
// if we don't have any identified stale nodes in
|
|
|
|
// the bucket, and the bucket is full, we have to
|
|
|
|
// cache this node and wait until some node fails
|
|
|
|
// and then replace it.
|
|
|
|
|
|
|
|
j = std::find_if(rb.begin(), rb.end()
|
2016-05-25 06:31:52 +02:00
|
|
|
, [&e](node_entry const& ne) { return ne.id == e.id; });
|
2010-01-03 12:08:39 +01:00
|
|
|
|
|
|
|
// if the node is already in the replacement bucket
|
|
|
|
// just return.
|
|
|
|
if (j != rb.end())
|
|
|
|
{
|
2011-01-08 09:54:51 +01:00
|
|
|
// if the IP address matches, it's the same node
|
|
|
|
// make sure it's marked as pinged
|
|
|
|
if (j->ep() == e.ep()) j->set_pinged();
|
2014-11-08 17:58:18 +01:00
|
|
|
return node_added;
|
2010-01-03 12:08:39 +01:00
|
|
|
}
|
|
|
|
|
2015-08-02 05:57:11 +02:00
|
|
|
if (int(rb.size()) >= m_bucket_size)
|
2010-01-03 12:08:39 +01:00
|
|
|
{
|
|
|
|
// if the replacement bucket is full, remove the oldest entry
|
|
|
|
// but prefer nodes that haven't been pinged, since they are
|
|
|
|
// less reliable than this one, that has been pinged
|
2016-05-25 06:31:52 +02:00
|
|
|
j = std::find_if(rb.begin(), rb.end()
|
|
|
|
, [] (node_entry const& ne) { return ne.pinged() == false; });
|
2011-01-08 09:54:51 +01:00
|
|
|
if (j == rb.end()) j = rb.begin();
|
2015-08-17 01:21:10 +02:00
|
|
|
m_ips.erase(j->addr());
|
2011-01-08 09:54:51 +01:00
|
|
|
rb.erase(j);
|
2010-01-03 12:08:39 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
if (rb.empty()) rb.reserve(m_bucket_size);
|
|
|
|
rb.push_back(e);
|
2015-08-17 01:21:10 +02:00
|
|
|
m_ips.insert(e.addr());
|
2014-11-08 17:58:18 +01:00
|
|
|
return node_added;
|
2010-01-03 12:08:39 +01:00
|
|
|
}
|
|
|
|
|
2014-11-08 17:58:18 +01:00
|
|
|
return need_bucket_split;
|
2013-01-18 07:17:30 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void routing_table::split_bucket()
|
|
|
|
{
|
2013-12-20 05:54:52 +01:00
|
|
|
INVARIANT_CHECK;
|
|
|
|
|
2016-04-25 23:22:09 +02:00
|
|
|
int const bucket_index = int(m_buckets.size()) - 1;
|
2016-01-01 15:21:07 +01:00
|
|
|
int const bucket_size_limit = bucket_limit(bucket_index);
|
2016-01-02 04:51:01 +01:00
|
|
|
TORRENT_ASSERT(int(m_buckets.back().live_nodes.size()) >= bucket_limit(bucket_index + 1));
|
2013-01-18 07:17:30 +01:00
|
|
|
|
2010-01-03 12:08:39 +01:00
|
|
|
// this is the last bucket, and it's full already. Split
|
|
|
|
// it by adding another bucket
|
|
|
|
m_buckets.push_back(routing_table_node());
|
|
|
|
bucket_t& new_bucket = m_buckets.back().live_nodes;
|
|
|
|
bucket_t& new_replacement_bucket = m_buckets.back().replacements;
|
|
|
|
|
2014-01-17 05:49:04 +01:00
|
|
|
bucket_t& b = m_buckets[bucket_index].live_nodes;
|
|
|
|
bucket_t& rb = m_buckets[bucket_index].replacements;
|
|
|
|
|
2016-09-13 14:18:47 +02:00
|
|
|
// move any node whose (160 - distance_exp(m_id, id)) >= (i - m_buckets.begin())
|
2010-01-03 12:08:39 +01:00
|
|
|
// to the new bucket
|
2016-01-01 15:21:07 +01:00
|
|
|
int const new_bucket_size = bucket_limit(bucket_index + 1);
|
2010-01-03 12:08:39 +01:00
|
|
|
for (bucket_t::iterator j = b.begin(); j != b.end();)
|
|
|
|
{
|
2016-01-01 15:21:07 +01:00
|
|
|
int const d = distance_exp(m_id, j->id);
|
|
|
|
if (d >= 159 - bucket_index)
|
2010-01-03 12:08:39 +01:00
|
|
|
{
|
|
|
|
++j;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
// this entry belongs in the new bucket
|
2013-01-18 07:17:30 +01:00
|
|
|
new_bucket.push_back(*j);
|
2010-01-03 12:08:39 +01:00
|
|
|
j = b.erase(j);
|
|
|
|
}
|
2010-12-12 21:36:42 +01:00
|
|
|
|
2014-11-08 17:58:18 +01:00
|
|
|
if (b.size() > bucket_size_limit)
|
|
|
|
{
|
2014-12-31 15:41:35 +01:00
|
|
|
// TODO: 2 move the lowest priority nodes to the replacement bucket
|
2014-11-08 17:58:18 +01:00
|
|
|
for (bucket_t::iterator i = b.begin() + bucket_size_limit
|
|
|
|
, end(b.end()); i != end; ++i)
|
|
|
|
{
|
|
|
|
rb.push_back(*i);
|
|
|
|
}
|
|
|
|
|
|
|
|
b.resize(bucket_size_limit);
|
|
|
|
}
|
|
|
|
|
2010-12-12 21:36:42 +01:00
|
|
|
// split the replacement bucket as well. If the live bucket
|
|
|
|
// is not full anymore, also move the replacement entries
|
|
|
|
// into the main bucket
|
2010-01-03 12:08:39 +01:00
|
|
|
for (bucket_t::iterator j = rb.begin(); j != rb.end();)
|
|
|
|
{
|
|
|
|
if (distance_exp(m_id, j->id) >= 159 - bucket_index)
|
|
|
|
{
|
2012-09-22 23:40:16 +02:00
|
|
|
if (int(b.size()) >= bucket_size_limit)
|
2010-12-12 21:36:42 +01:00
|
|
|
{
|
|
|
|
++j;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
b.push_back(*j);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
// this entry belongs in the new bucket
|
2012-09-22 23:40:16 +02:00
|
|
|
if (int(new_bucket.size()) < new_bucket_size)
|
2010-12-12 21:36:42 +01:00
|
|
|
new_bucket.push_back(*j);
|
2014-09-20 21:27:29 +02:00
|
|
|
else
|
2014-11-08 17:58:18 +01:00
|
|
|
new_replacement_bucket.push_back(*j);
|
2010-01-03 12:08:39 +01:00
|
|
|
}
|
|
|
|
j = rb.erase(j);
|
2008-11-10 03:08:42 +01:00
|
|
|
}
|
2010-01-03 12:08:39 +01:00
|
|
|
}
|
|
|
|
|
2016-09-12 15:20:15 +02:00
|
|
|
void routing_table::update_node_id(node_id const& id)
|
2016-01-09 19:28:15 +01:00
|
|
|
{
|
|
|
|
m_id = id;
|
|
|
|
|
|
|
|
m_ips.clear();
|
|
|
|
|
|
|
|
// pull all nodes out of the routing table, effectively emptying it
|
|
|
|
table_t old_buckets;
|
|
|
|
old_buckets.swap(m_buckets);
|
|
|
|
|
|
|
|
// then add them all back. First add the main nodes, then the replacement
|
|
|
|
// nodes
|
|
|
|
for (int i = 0; i < old_buckets.size(); ++i)
|
|
|
|
{
|
|
|
|
bucket_t const& bucket = old_buckets[i].live_nodes;
|
|
|
|
for (int j = 0; j < bucket.size(); ++j)
|
|
|
|
{
|
|
|
|
add_node(bucket[j]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// now add back the replacement nodes
|
|
|
|
for (int i = 0; i < old_buckets.size(); ++i)
|
|
|
|
{
|
|
|
|
bucket_t const& bucket = old_buckets[i].replacements;
|
|
|
|
for (int j = 0; j < bucket.size(); ++j)
|
|
|
|
{
|
|
|
|
add_node(bucket[j]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-01-03 12:08:39 +01:00
|
|
|
void routing_table::for_each_node(
|
|
|
|
void (*fun1)(void*, node_entry const&)
|
|
|
|
, void (*fun2)(void*, node_entry const&)
|
|
|
|
, void* userdata) const
|
|
|
|
{
|
2016-09-13 14:18:47 +02:00
|
|
|
for (auto const& i : m_buckets)
|
2010-01-03 12:08:39 +01:00
|
|
|
{
|
|
|
|
if (fun1)
|
|
|
|
{
|
2016-09-13 14:18:47 +02:00
|
|
|
for (auto const& j : i.live_nodes)
|
|
|
|
fun1(userdata, j);
|
2010-01-03 12:08:39 +01:00
|
|
|
}
|
|
|
|
if (fun2)
|
|
|
|
{
|
2016-09-13 14:18:47 +02:00
|
|
|
for (auto const& j : i.replacements)
|
|
|
|
fun2(userdata, j);
|
2010-01-03 12:08:39 +01:00
|
|
|
}
|
|
|
|
}
|
2006-08-01 17:27:08 +02:00
|
|
|
}
|
|
|
|
|
2014-11-02 10:41:29 +01:00
|
|
|
void routing_table::node_failed(node_id const& nid, udp::endpoint const& ep)
|
2006-08-01 17:27:08 +02:00
|
|
|
{
|
2014-12-10 08:13:57 +01:00
|
|
|
#ifdef TORRENT_EXPENSIVE_INVARIANT_CHECKS
|
2013-12-20 05:54:52 +01:00
|
|
|
INVARIANT_CHECK;
|
2014-12-10 08:13:57 +01:00
|
|
|
#endif
|
2013-12-20 05:54:52 +01:00
|
|
|
|
2010-01-03 12:08:39 +01:00
|
|
|
// if messages to ourself fails, ignore it
|
2014-11-02 10:41:29 +01:00
|
|
|
if (nid == m_id) return;
|
2010-01-03 12:08:39 +01:00
|
|
|
|
2014-11-02 10:41:29 +01:00
|
|
|
table_t::iterator i = find_bucket(nid);
|
2010-01-03 12:08:39 +01:00
|
|
|
bucket_t& b = i->live_nodes;
|
|
|
|
bucket_t& rb = i->replacements;
|
2006-08-01 17:27:08 +02:00
|
|
|
|
2010-01-03 12:08:39 +01:00
|
|
|
bucket_t::iterator j = std::find_if(b.begin(), b.end()
|
2016-05-25 06:31:52 +02:00
|
|
|
, [&nid](node_entry const& ne) { return ne.id == nid; });
|
2014-11-02 10:41:29 +01:00
|
|
|
|
|
|
|
if (j == b.end())
|
|
|
|
{
|
|
|
|
j = std::find_if(rb.begin(), rb.end()
|
2016-05-25 06:31:52 +02:00
|
|
|
, [&nid](node_entry const& ne) { return ne.id == nid; });
|
2006-08-01 17:27:08 +02:00
|
|
|
|
2014-11-02 10:41:29 +01:00
|
|
|
if (j == rb.end()
|
|
|
|
|| j->ep() != ep) return;
|
|
|
|
|
|
|
|
j->timed_out();
|
|
|
|
|
2015-05-16 21:29:49 +02:00
|
|
|
#ifndef TORRENT_DISABLE_LOGGING
|
2016-09-13 14:18:47 +02:00
|
|
|
if (m_log != nullptr && m_log->should_log(dht_logger::routing_table))
|
2016-07-29 22:32:28 +02:00
|
|
|
{
|
|
|
|
char hex_id[41];
|
2016-07-30 00:47:05 +02:00
|
|
|
aux::to_hex(nid, hex_id);
|
2016-07-29 22:32:28 +02:00
|
|
|
m_log->log(dht_logger::routing_table, "NODE FAILED id: %s ip: %s fails: %d pinged: %d up-time: %d"
|
|
|
|
, hex_id, print_endpoint(j->ep()).c_str()
|
2016-09-13 14:18:47 +02:00
|
|
|
, j->fail_count()
|
2016-07-29 22:32:28 +02:00
|
|
|
, int(j->pinged())
|
|
|
|
, int(total_seconds(aux::time_now() - j->first_seen)));
|
|
|
|
}
|
2014-11-02 10:41:29 +01:00
|
|
|
#endif
|
|
|
|
return;
|
|
|
|
}
|
2011-01-17 08:49:44 +01:00
|
|
|
|
|
|
|
// if the endpoint doesn't match, it's a different node
|
|
|
|
// claiming the same ID. The node we have in our routing
|
|
|
|
// table is not necessarily stale
|
|
|
|
if (j->ep() != ep) return;
|
2015-08-22 15:24:49 +02:00
|
|
|
|
2006-08-01 17:27:08 +02:00
|
|
|
if (rb.empty())
|
|
|
|
{
|
2010-01-03 12:08:39 +01:00
|
|
|
j->timed_out();
|
2008-05-10 07:51:58 +02:00
|
|
|
|
2015-05-16 21:29:49 +02:00
|
|
|
#ifndef TORRENT_DISABLE_LOGGING
|
2016-09-13 14:18:47 +02:00
|
|
|
if (m_log != nullptr && m_log->should_log(dht_logger::routing_table))
|
2016-07-29 22:32:28 +02:00
|
|
|
{
|
|
|
|
char hex_id[41];
|
2016-07-30 00:47:05 +02:00
|
|
|
aux::to_hex(nid, hex_id);
|
2016-07-29 22:32:28 +02:00
|
|
|
m_log->log(dht_logger::routing_table, "NODE FAILED id: %s ip: %s fails: %d pinged: %d up-time: %d"
|
|
|
|
, hex_id, print_endpoint(j->ep()).c_str()
|
2016-09-13 14:18:47 +02:00
|
|
|
, j->fail_count()
|
2016-07-29 22:32:28 +02:00
|
|
|
, int(j->pinged())
|
|
|
|
, int(total_seconds(aux::time_now() - j->first_seen)));
|
|
|
|
}
|
2008-05-10 07:51:58 +02:00
|
|
|
#endif
|
|
|
|
|
2008-11-10 03:08:42 +01:00
|
|
|
// if this node has failed too many times, or if this node
|
|
|
|
// has never responded at all, remove it
|
2010-01-03 12:08:39 +01:00
|
|
|
if (j->fail_count() >= m_settings.max_fail_count || !j->pinged())
|
2011-01-08 09:54:51 +01:00
|
|
|
{
|
2015-08-17 01:21:10 +02:00
|
|
|
m_ips.erase(j->addr());
|
2010-01-03 12:08:39 +01:00
|
|
|
b.erase(j);
|
2011-01-08 09:54:51 +01:00
|
|
|
}
|
2006-08-01 17:27:08 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2015-08-17 01:21:10 +02:00
|
|
|
m_ips.erase(j->addr());
|
2010-01-03 12:08:39 +01:00
|
|
|
b.erase(j);
|
2008-11-10 03:08:42 +01:00
|
|
|
|
2016-07-24 03:57:04 +02:00
|
|
|
fill_from_replacements(i);
|
2006-08-01 17:27:08 +02:00
|
|
|
}
|
|
|
|
|
2016-09-12 15:20:15 +02:00
|
|
|
void routing_table::add_router_node(udp::endpoint const& router)
|
2006-09-27 19:20:18 +02:00
|
|
|
{
|
|
|
|
m_router_nodes.insert(router);
|
|
|
|
}
|
|
|
|
|
2014-11-01 23:47:56 +01:00
|
|
|
// we heard from this node, but we don't know if it was spoofed or not (i.e.
|
|
|
|
// pinged == false)
|
2010-01-03 12:08:39 +01:00
|
|
|
void routing_table::heard_about(node_id const& id, udp::endpoint const& ep)
|
|
|
|
{
|
2016-01-09 22:20:27 +01:00
|
|
|
if (!verify_node_address(m_settings, id, ep.address())) return;
|
2012-09-22 20:15:29 +02:00
|
|
|
add_node(node_entry(id, ep));
|
2010-01-03 12:08:39 +01:00
|
|
|
}
|
|
|
|
|
2014-11-01 23:47:56 +01:00
|
|
|
// this function is called every time the node sees a sign of a node being
|
|
|
|
// alive. This node will either be inserted in the k-buckets or be moved to the
|
|
|
|
// top of its bucket. the return value indicates if the table needs a refresh.
|
|
|
|
// if true, the node should refresh the table (i.e. do a find_node on its own
|
|
|
|
// id)
|
2016-09-12 15:20:15 +02:00
|
|
|
bool routing_table::node_seen(node_id const& id, udp::endpoint const& ep, int rtt)
|
2006-08-01 17:27:08 +02:00
|
|
|
{
|
2016-01-09 22:20:27 +01:00
|
|
|
if (!verify_node_address(m_settings, id, ep.address())) return false;
|
2012-09-22 20:15:29 +02:00
|
|
|
return add_node(node_entry(id, ep, rtt, true));
|
2006-08-01 17:27:08 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// fills the vector with the k nodes from our buckets that
|
|
|
|
// are nearest to the given id.
|
|
|
|
void routing_table::find_node(node_id const& target
|
2016-09-13 14:18:47 +02:00
|
|
|
, std::vector<node_entry>& l, int const options, int count)
|
2006-08-01 17:27:08 +02:00
|
|
|
{
|
|
|
|
l.clear();
|
|
|
|
if (count == 0) count = m_bucket_size;
|
|
|
|
|
2010-01-03 12:08:39 +01:00
|
|
|
table_t::iterator i = find_bucket(target);
|
2016-09-13 14:18:47 +02:00
|
|
|
int const bucket_index = std::distance(m_buckets.begin(), i);
|
|
|
|
int const bucket_size_limit = bucket_limit(bucket_index);
|
2006-08-01 17:27:08 +02:00
|
|
|
|
2012-09-23 05:49:04 +02:00
|
|
|
l.reserve(bucket_size_limit);
|
2006-08-01 17:27:08 +02:00
|
|
|
|
2010-01-03 12:08:39 +01:00
|
|
|
table_t::iterator j = i;
|
|
|
|
|
2013-10-20 09:06:42 +02:00
|
|
|
int unsorted_start_idx = 0;
|
2011-02-21 06:24:41 +01:00
|
|
|
for (; j != m_buckets.end() && int(l.size()) < count; ++j)
|
2006-08-01 17:27:08 +02:00
|
|
|
{
|
2010-01-03 12:08:39 +01:00
|
|
|
bucket_t& b = j->live_nodes;
|
2008-11-10 03:08:42 +01:00
|
|
|
if (options & include_failed)
|
|
|
|
{
|
2016-09-13 14:18:47 +02:00
|
|
|
std::copy(b.begin(), b.end(), std::back_inserter(l));
|
2008-11-10 03:08:42 +01:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2016-05-25 06:31:52 +02:00
|
|
|
std::remove_copy_if(b.begin(), b.end(), std::back_inserter(l)
|
|
|
|
, [](node_entry const& ne) { return ne.confirmed() == false; });
|
2008-11-10 03:08:42 +01:00
|
|
|
}
|
2006-08-01 17:27:08 +02:00
|
|
|
|
2013-10-20 09:06:42 +02:00
|
|
|
if (int(l.size()) == count) return;
|
|
|
|
|
|
|
|
if (int(l.size()) > count)
|
2012-09-23 05:49:04 +02:00
|
|
|
{
|
|
|
|
// sort the nodes by how close they are to the target
|
2016-05-25 06:31:52 +02:00
|
|
|
std::sort(l.begin() + unsorted_start_idx, l.end()
|
|
|
|
, [&target](node_entry const& lhs, node_entry const& rhs)
|
|
|
|
{ return compare_ref(lhs.id, rhs.id, target); });
|
2012-09-23 05:49:04 +02:00
|
|
|
|
|
|
|
l.resize(count);
|
|
|
|
return;
|
|
|
|
}
|
2013-10-20 09:06:42 +02:00
|
|
|
unsorted_start_idx = int(l.size());
|
2012-09-23 05:49:04 +02:00
|
|
|
}
|
2006-08-01 17:27:08 +02:00
|
|
|
|
2010-01-03 12:08:39 +01:00
|
|
|
// if we still don't have enough nodes, copy nodes
|
|
|
|
// further away from us
|
|
|
|
|
2012-09-23 05:49:04 +02:00
|
|
|
if (i == m_buckets.begin())
|
|
|
|
return;
|
|
|
|
|
2010-01-03 12:08:39 +01:00
|
|
|
j = i;
|
2006-08-01 17:27:08 +02:00
|
|
|
|
2013-10-20 09:06:42 +02:00
|
|
|
unsorted_start_idx = int(l.size());
|
2010-01-03 12:08:39 +01:00
|
|
|
do
|
2006-08-01 17:27:08 +02:00
|
|
|
{
|
2010-01-10 05:03:11 +01:00
|
|
|
--j;
|
2010-01-03 12:08:39 +01:00
|
|
|
bucket_t& b = j->live_nodes;
|
2015-08-02 05:57:11 +02:00
|
|
|
|
2008-11-10 03:08:42 +01:00
|
|
|
if (options & include_failed)
|
|
|
|
{
|
2012-09-23 05:49:04 +02:00
|
|
|
std::copy(b.begin(), b.end(), std::back_inserter(l));
|
2008-11-10 03:08:42 +01:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2012-09-23 05:49:04 +02:00
|
|
|
std::remove_copy_if(b.begin(), b.end(), std::back_inserter(l)
|
2016-05-25 06:31:52 +02:00
|
|
|
, [](node_entry const& ne) { return ne.confirmed() == false; });
|
2012-09-23 05:49:04 +02:00
|
|
|
}
|
|
|
|
|
2013-10-20 09:06:42 +02:00
|
|
|
if (int(l.size()) == count) return;
|
|
|
|
|
|
|
|
if (int(l.size()) > count)
|
2012-09-23 05:49:04 +02:00
|
|
|
{
|
|
|
|
// sort the nodes by how close they are to the target
|
2016-05-25 06:31:52 +02:00
|
|
|
std::sort(l.begin() + unsorted_start_idx, l.end()
|
|
|
|
, [&target](node_entry const& lhs, node_entry const& rhs)
|
|
|
|
{ return compare_ref(lhs.id, rhs.id, target); });
|
2012-09-23 05:49:04 +02:00
|
|
|
|
|
|
|
l.resize(count);
|
|
|
|
return;
|
2008-11-10 03:08:42 +01:00
|
|
|
}
|
2013-10-20 09:06:42 +02:00
|
|
|
unsorted_start_idx = int(l.size());
|
2006-08-01 17:27:08 +02:00
|
|
|
}
|
2011-02-21 06:24:41 +01:00
|
|
|
while (j != m_buckets.begin() && int(l.size()) < count);
|
2012-09-23 05:49:04 +02:00
|
|
|
|
2013-10-20 09:06:42 +02:00
|
|
|
TORRENT_ASSERT(int(l.size()) <= count);
|
2006-08-01 17:27:08 +02:00
|
|
|
}
|
|
|
|
|
2014-01-21 20:26:09 +01:00
|
|
|
#if TORRENT_USE_INVARIANT_CHECKS
|
2013-12-20 05:54:52 +01:00
|
|
|
void routing_table::check_invariant() const
|
|
|
|
{
|
2015-08-17 01:21:10 +02:00
|
|
|
ip_set all_ips;
|
2013-12-20 05:54:52 +01:00
|
|
|
|
2016-09-13 14:18:47 +02:00
|
|
|
for (auto const& i : m_buckets)
|
2013-12-20 05:54:52 +01:00
|
|
|
{
|
2016-09-13 14:18:47 +02:00
|
|
|
for (auto const& j : i.replacements)
|
2013-12-20 05:54:52 +01:00
|
|
|
{
|
2016-09-13 14:18:47 +02:00
|
|
|
all_ips.insert(j.addr());
|
2013-12-20 05:54:52 +01:00
|
|
|
}
|
2016-09-13 14:18:47 +02:00
|
|
|
for (auto const& j : i.live_nodes)
|
2013-12-20 05:54:52 +01:00
|
|
|
{
|
2016-09-13 14:18:47 +02:00
|
|
|
TORRENT_ASSERT(j.addr().is_v4() == i.live_nodes.begin()->addr().is_v4());
|
|
|
|
all_ips.insert(j.addr());
|
2013-12-20 05:54:52 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
TORRENT_ASSERT(all_ips == m_ips);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2015-09-11 22:43:21 +02:00
|
|
|
bool routing_table::is_full(int bucket) const
|
|
|
|
{
|
2016-04-25 23:22:09 +02:00
|
|
|
int num_buckets = int(m_buckets.size());
|
2015-09-11 22:43:21 +02:00
|
|
|
if (num_buckets == 0) return false;
|
|
|
|
if (bucket >= num_buckets) return false;
|
|
|
|
|
|
|
|
table_t::const_iterator i = m_buckets.begin();
|
|
|
|
std::advance(i, bucket);
|
|
|
|
return (i->live_nodes.size() >= bucket_limit(bucket)
|
|
|
|
&& i->replacements.size() >= m_bucket_size);
|
|
|
|
}
|
|
|
|
|
2006-08-01 17:27:08 +02:00
|
|
|
} } // namespace libtorrent::dht
|