2006-08-01 17:27:08 +02:00
|
|
|
/*
|
|
|
|
|
2012-10-02 05:16:33 +02:00
|
|
|
Copyright (c) 2006-2012, Arvid Norberg
|
2006-08-01 17:27:08 +02:00
|
|
|
All rights reserved.
|
|
|
|
|
|
|
|
Redistribution and use in source and binary forms, with or without
|
|
|
|
modification, are permitted provided that the following conditions
|
|
|
|
are met:
|
|
|
|
|
|
|
|
* Redistributions of source code must retain the above copyright
|
|
|
|
notice, this list of conditions and the following disclaimer.
|
|
|
|
* Redistributions in binary form must reproduce the above copyright
|
|
|
|
notice, this list of conditions and the following disclaimer in
|
|
|
|
the documentation and/or other materials provided with the distribution.
|
|
|
|
* Neither the name of the author nor the names of its
|
|
|
|
contributors may be used to endorse or promote products derived
|
|
|
|
from this software without specific prior written permission.
|
|
|
|
|
|
|
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
|
|
|
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
|
|
|
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
|
|
|
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
|
|
|
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
|
|
|
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
|
|
|
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
|
|
|
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
|
|
|
POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
|
|
|
|
*/
|
|
|
|
|
2007-03-17 18:15:16 +01:00
|
|
|
#include "libtorrent/pch.hpp"
|
|
|
|
|
2006-08-01 17:27:08 +02:00
|
|
|
#include <vector>
|
2010-03-07 07:00:12 +01:00
|
|
|
#include <iterator> // std::distance()
|
2012-09-23 05:49:04 +02:00
|
|
|
#include <algorithm> // std::copy, std::remove_copy_if
|
2006-08-01 17:27:08 +02:00
|
|
|
#include <functional>
|
|
|
|
#include <numeric>
|
|
|
|
#include <boost/cstdint.hpp>
|
|
|
|
#include <boost/bind.hpp>
|
|
|
|
|
|
|
|
#include "libtorrent/kademlia/routing_table.hpp"
|
2011-01-08 09:54:51 +01:00
|
|
|
#include "libtorrent/broadcast_socket.hpp" // for cidr_distance
|
2009-01-23 11:36:07 +01:00
|
|
|
#include "libtorrent/session_status.hpp"
|
2006-08-01 17:27:08 +02:00
|
|
|
#include "libtorrent/kademlia/node_id.hpp"
|
|
|
|
#include "libtorrent/session_settings.hpp"
|
2009-11-25 07:55:34 +01:00
|
|
|
#include "libtorrent/time.hpp"
|
2006-08-01 17:27:08 +02:00
|
|
|
|
|
|
|
using boost::uint8_t;
|
|
|
|
|
|
|
|
namespace libtorrent { namespace dht
|
|
|
|
{
|
|
|
|
|
2008-05-10 07:51:58 +02:00
|
|
|
#ifdef TORRENT_DHT_VERBOSE_LOGGING
|
|
|
|
TORRENT_DEFINE_LOG(table)
|
|
|
|
#endif
|
|
|
|
|
2006-08-01 17:27:08 +02:00
|
|
|
routing_table::routing_table(node_id const& id, int bucket_size
|
|
|
|
, dht_settings const& settings)
|
|
|
|
: m_bucket_size(bucket_size)
|
|
|
|
, m_settings(settings)
|
|
|
|
, m_id(id)
|
2010-02-20 17:37:50 +01:00
|
|
|
, m_last_bootstrap(min_time())
|
2010-12-12 04:17:08 +01:00
|
|
|
, m_last_refresh(min_time())
|
|
|
|
, m_last_self_refresh(min_time())
|
2006-08-01 17:27:08 +02:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2012-09-22 23:40:16 +02:00
|
|
|
int routing_table::bucket_limit(int bucket) const
|
|
|
|
{
|
|
|
|
if (!m_settings.extended_routing_table) return m_bucket_size;
|
|
|
|
|
|
|
|
int size_exceptions[] = {16, 8, 4, 2};
|
|
|
|
if (bucket < sizeof(size_exceptions)/sizeof(size_exceptions[0]))
|
|
|
|
return m_bucket_size * size_exceptions[bucket];
|
|
|
|
return m_bucket_size;
|
|
|
|
}
|
|
|
|
|
2009-01-23 11:36:07 +01:00
|
|
|
void routing_table::status(session_status& s) const
|
|
|
|
{
|
|
|
|
boost::tie(s.dht_nodes, s.dht_node_cache) = size();
|
|
|
|
s.dht_global_nodes = num_global_nodes();
|
2010-12-12 04:17:08 +01:00
|
|
|
|
|
|
|
ptime now = time_now();
|
|
|
|
|
|
|
|
for (table_t::const_iterator i = m_buckets.begin()
|
|
|
|
, end(m_buckets.end()); i != end; ++i)
|
|
|
|
{
|
|
|
|
dht_routing_bucket b;
|
|
|
|
b.num_nodes = i->live_nodes.size();
|
|
|
|
b.num_replacements = i->replacements.size();
|
|
|
|
b.last_active = total_seconds(now - i->last_active);
|
|
|
|
s.dht_routing_table.push_back(b);
|
|
|
|
}
|
2009-01-23 11:36:07 +01:00
|
|
|
}
|
|
|
|
|
2006-08-01 17:27:08 +02:00
|
|
|
boost::tuple<int, int> routing_table::size() const
|
|
|
|
{
|
|
|
|
int nodes = 0;
|
|
|
|
int replacements = 0;
|
|
|
|
for (table_t::const_iterator i = m_buckets.begin()
|
|
|
|
, end(m_buckets.end()); i != end; ++i)
|
|
|
|
{
|
2010-01-03 12:08:39 +01:00
|
|
|
nodes += i->live_nodes.size();
|
|
|
|
replacements += i->replacements.size();
|
2006-08-01 17:27:08 +02:00
|
|
|
}
|
|
|
|
return boost::make_tuple(nodes, replacements);
|
|
|
|
}
|
|
|
|
|
2007-05-12 03:52:25 +02:00
|
|
|
size_type routing_table::num_global_nodes() const
|
|
|
|
{
|
2010-12-13 10:24:19 +01:00
|
|
|
int deepest_bucket = 0;
|
|
|
|
int deepest_size = 0;
|
2010-01-03 12:08:39 +01:00
|
|
|
for (table_t::const_iterator i = m_buckets.begin()
|
|
|
|
, end(m_buckets.end()); i != end; ++i)
|
2007-05-12 03:52:25 +02:00
|
|
|
{
|
2010-12-13 10:24:19 +01:00
|
|
|
deepest_size = i->live_nodes.size(); // + i->replacements.size();
|
|
|
|
if (deepest_size < m_bucket_size) break;
|
|
|
|
// this bucket is full
|
|
|
|
++deepest_bucket;
|
2007-05-12 03:52:25 +02:00
|
|
|
}
|
|
|
|
|
2010-12-13 10:24:19 +01:00
|
|
|
if (deepest_bucket == 0) return 1 + deepest_size;
|
|
|
|
|
2012-05-05 20:52:16 +02:00
|
|
|
if (deepest_size < m_bucket_size / 2) return (size_type(1) << deepest_bucket) * m_bucket_size;
|
|
|
|
else return (size_type(2) << deepest_bucket) * deepest_size;
|
2007-05-12 03:52:25 +02:00
|
|
|
}
|
|
|
|
|
2010-01-20 04:37:34 +01:00
|
|
|
#if (defined TORRENT_DHT_VERBOSE_LOGGING || defined TORRENT_DEBUG) && TORRENT_USE_IOSTREAM
|
2007-05-12 03:52:25 +02:00
|
|
|
|
2006-08-01 17:27:08 +02:00
|
|
|
void routing_table::print_state(std::ostream& os) const
|
|
|
|
{
|
|
|
|
os << "kademlia routing table state\n"
|
|
|
|
<< "bucket_size: " << m_bucket_size << "\n"
|
2007-05-12 03:52:25 +02:00
|
|
|
<< "global node count: " << num_global_nodes() << "\n"
|
2006-08-01 17:27:08 +02:00
|
|
|
<< "node_id: " << m_id << "\n\n";
|
|
|
|
|
2007-05-12 03:52:25 +02:00
|
|
|
os << "number of nodes per bucket:\n-- live ";
|
|
|
|
for (int i = 8; i < 160; ++i)
|
|
|
|
os << "-";
|
|
|
|
os << "\n";
|
|
|
|
|
2012-09-22 23:40:16 +02:00
|
|
|
int max_size = bucket_limit(0);
|
|
|
|
for (int k = 0; k < max_size; ++k)
|
2006-08-01 17:27:08 +02:00
|
|
|
{
|
|
|
|
for (table_t::const_iterator i = m_buckets.begin(), end(m_buckets.end());
|
|
|
|
i != end; ++i)
|
|
|
|
{
|
2012-09-22 23:40:16 +02:00
|
|
|
os << (int(i->live_nodes.size()) > (max_size - 1 - k) ? "|" : " ");
|
2006-08-01 17:27:08 +02:00
|
|
|
}
|
|
|
|
os << "\n";
|
|
|
|
}
|
2010-01-03 12:08:39 +01:00
|
|
|
for (int i = 0; i < 160; ++i) os << "+";
|
2006-08-01 17:27:08 +02:00
|
|
|
os << "\n";
|
2010-01-03 12:08:39 +01:00
|
|
|
|
2012-09-23 05:49:04 +02:00
|
|
|
for (int k = 0; k < m_bucket_size; ++k)
|
2006-08-01 17:27:08 +02:00
|
|
|
{
|
|
|
|
for (table_t::const_iterator i = m_buckets.begin(), end(m_buckets.end());
|
|
|
|
i != end; ++i)
|
|
|
|
{
|
2010-01-03 12:08:39 +01:00
|
|
|
os << (int(i->replacements.size()) > k ? "|" : " ");
|
2006-08-01 17:27:08 +02:00
|
|
|
}
|
|
|
|
os << "\n";
|
|
|
|
}
|
2007-05-12 03:52:25 +02:00
|
|
|
os << "-- cached ";
|
|
|
|
for (int i = 10; i < 160; ++i)
|
|
|
|
os << "-";
|
|
|
|
os << "\n\n";
|
2006-08-01 17:27:08 +02:00
|
|
|
|
|
|
|
os << "nodes:\n";
|
2010-01-03 12:08:39 +01:00
|
|
|
int bucket_index = 0;
|
2006-08-01 17:27:08 +02:00
|
|
|
for (table_t::const_iterator i = m_buckets.begin(), end(m_buckets.end());
|
2010-01-03 12:08:39 +01:00
|
|
|
i != end; ++i, ++bucket_index)
|
2006-08-01 17:27:08 +02:00
|
|
|
{
|
2010-01-03 12:08:39 +01:00
|
|
|
// if (i->live_nodes.empty()) continue;
|
2010-12-12 04:17:08 +01:00
|
|
|
os << "=== BUCKET == " << bucket_index
|
|
|
|
<< " == " << total_seconds(time_now() - i->last_active)
|
2008-11-11 11:03:43 +01:00
|
|
|
<< " seconds ago ===== \n";
|
2010-01-03 12:08:39 +01:00
|
|
|
for (bucket_t::const_iterator j = i->live_nodes.begin()
|
|
|
|
, end(i->live_nodes.end()); j != end; ++j)
|
2006-08-01 17:27:08 +02:00
|
|
|
{
|
2008-11-10 03:08:42 +01:00
|
|
|
os << " id: " << j->id
|
2012-09-22 20:15:29 +02:00
|
|
|
<< " rtt: " << j->rtt
|
2008-11-10 03:08:42 +01:00
|
|
|
<< " ip: " << j->ep()
|
|
|
|
<< " fails: " << j->fail_count()
|
|
|
|
<< " pinged: " << j->pinged()
|
2010-01-03 12:08:39 +01:00
|
|
|
<< " dist: " << distance_exp(m_id, j->id)
|
2008-11-10 03:08:42 +01:00
|
|
|
<< "\n";
|
2006-08-01 17:27:08 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-05-12 03:52:25 +02:00
|
|
|
#endif
|
2010-02-20 17:37:50 +01:00
|
|
|
|
|
|
|
void routing_table::touch_bucket(node_id const& target)
|
2006-08-06 18:36:00 +02:00
|
|
|
{
|
2010-02-20 17:37:50 +01:00
|
|
|
table_t::iterator i = find_bucket(target);
|
|
|
|
i->last_active = time_now();
|
2006-08-06 18:36:00 +02:00
|
|
|
}
|
|
|
|
|
2010-12-12 20:18:23 +01:00
|
|
|
// returns true if lhs is in more need of a refresh than rhs
|
|
|
|
bool compare_bucket_refresh(routing_table_node const& lhs, routing_table_node const& rhs)
|
|
|
|
{
|
|
|
|
// add the number of nodes to prioritize buckets with few nodes in them
|
2010-12-12 21:36:42 +01:00
|
|
|
return lhs.last_active + seconds(lhs.live_nodes.size() * 5)
|
|
|
|
< rhs.last_active + seconds(rhs.live_nodes.size() * 5);
|
2010-12-12 20:18:23 +01:00
|
|
|
}
|
|
|
|
|
2010-01-03 12:08:39 +01:00
|
|
|
bool routing_table::need_refresh(node_id& target) const
|
|
|
|
{
|
2010-12-12 04:17:08 +01:00
|
|
|
ptime now = time_now();
|
|
|
|
|
|
|
|
// refresh our own bucket once every 15 minutes
|
2010-12-12 10:15:54 +01:00
|
|
|
if (now - m_last_self_refresh > minutes(15))
|
2010-12-12 04:17:08 +01:00
|
|
|
{
|
|
|
|
m_last_self_refresh = now;
|
|
|
|
target = m_id;
|
2010-12-12 20:18:23 +01:00
|
|
|
#ifdef TORRENT_DHT_VERBOSE_LOGGING
|
|
|
|
TORRENT_LOG(table) << "need_refresh [ bucket: self target: " << target << " ]";
|
|
|
|
#endif
|
2010-12-12 04:17:08 +01:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2010-01-03 12:08:39 +01:00
|
|
|
if (m_buckets.empty()) return false;
|
|
|
|
|
|
|
|
table_t::const_iterator i = std::min_element(m_buckets.begin(), m_buckets.end()
|
2010-12-12 20:18:23 +01:00
|
|
|
, &compare_bucket_refresh);
|
2010-01-03 12:08:39 +01:00
|
|
|
|
2010-12-12 04:17:08 +01:00
|
|
|
if (now - i->last_active < minutes(15)) return false;
|
|
|
|
if (now - m_last_refresh < seconds(45)) return false;
|
2010-01-03 12:08:39 +01:00
|
|
|
|
|
|
|
// generate a random node_id within the given bucket
|
2011-05-26 19:04:53 +02:00
|
|
|
target = generate_random_id();
|
2010-01-09 15:49:21 +01:00
|
|
|
int num_bits = std::distance(m_buckets.begin(), i) + 1;
|
2010-01-03 12:08:39 +01:00
|
|
|
node_id mask(0);
|
|
|
|
for (int i = 0; i < num_bits; ++i) mask[i/8] |= 0x80 >> (i&7);
|
|
|
|
|
|
|
|
// target = (target & ~mask) | (root & mask)
|
|
|
|
node_id root = m_id;
|
|
|
|
root &= mask;
|
|
|
|
target &= ~mask;
|
|
|
|
target |= root;
|
|
|
|
|
|
|
|
// make sure this is in another subtree than m_id
|
|
|
|
// clear the (num_bits - 1) bit and then set it to the
|
|
|
|
// inverse of m_id's corresponding bit.
|
|
|
|
target[(num_bits - 1) / 8] &= ~(0x80 >> ((num_bits - 1) % 8));
|
|
|
|
target[(num_bits - 1) / 8] |=
|
|
|
|
(~(m_id[(num_bits - 1) / 8])) & (0x80 >> ((num_bits - 1) % 8));
|
|
|
|
|
2010-01-09 15:49:21 +01:00
|
|
|
TORRENT_ASSERT(distance_exp(m_id, target) == 160 - num_bits);
|
2010-12-12 04:17:08 +01:00
|
|
|
|
2010-12-12 04:45:49 +01:00
|
|
|
#ifdef TORRENT_DHT_VERBOSE_LOGGING
|
2010-12-12 04:17:08 +01:00
|
|
|
TORRENT_LOG(table) << "need_refresh [ bucket: " << num_bits << " target: " << target << " ]";
|
2010-12-12 04:45:49 +01:00
|
|
|
#endif
|
2010-12-12 04:17:08 +01:00
|
|
|
m_last_refresh = now;
|
2010-01-03 12:08:39 +01:00
|
|
|
return true;
|
|
|
|
}
|
2006-08-01 17:27:08 +02:00
|
|
|
|
|
|
|
void routing_table::replacement_cache(bucket_t& nodes) const
|
|
|
|
{
|
|
|
|
for (table_t::const_iterator i = m_buckets.begin()
|
|
|
|
, end(m_buckets.end()); i != end; ++i)
|
|
|
|
{
|
2010-01-03 12:08:39 +01:00
|
|
|
std::copy(i->replacements.begin(), i->replacements.end()
|
2006-08-01 17:27:08 +02:00
|
|
|
, std::back_inserter(nodes));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-01-03 12:08:39 +01:00
|
|
|
routing_table::table_t::iterator routing_table::find_bucket(node_id const& id)
|
2006-08-01 17:27:08 +02:00
|
|
|
{
|
2010-01-03 12:08:39 +01:00
|
|
|
// TORRENT_ASSERT(id != m_id);
|
|
|
|
|
|
|
|
int num_buckets = m_buckets.size();
|
|
|
|
if (num_buckets == 0)
|
|
|
|
{
|
|
|
|
m_buckets.push_back(routing_table_node());
|
2010-12-12 20:18:23 +01:00
|
|
|
// add 160 seconds to prioritize higher buckets (i.e. buckets closer to us)
|
|
|
|
m_buckets.back().last_active = min_time() + seconds(160);
|
2010-01-03 12:08:39 +01:00
|
|
|
++num_buckets;
|
|
|
|
}
|
|
|
|
|
|
|
|
int bucket_index = (std::min)(159 - distance_exp(m_id, id), num_buckets - 1);
|
2011-02-21 06:24:41 +01:00
|
|
|
TORRENT_ASSERT(bucket_index < int(m_buckets.size()));
|
2007-10-05 02:30:00 +02:00
|
|
|
TORRENT_ASSERT(bucket_index >= 0);
|
2010-01-03 12:08:39 +01:00
|
|
|
|
|
|
|
table_t::iterator i = m_buckets.begin();
|
|
|
|
std::advance(i, bucket_index);
|
|
|
|
return i;
|
|
|
|
}
|
|
|
|
|
2011-01-08 09:54:51 +01:00
|
|
|
bool compare_ip_cidr(node_entry const& lhs, node_entry const& rhs)
|
|
|
|
{
|
|
|
|
TORRENT_ASSERT(lhs.addr.is_v4() == rhs.addr.is_v4());
|
|
|
|
// the number of bits in the IPs that may match. If
|
|
|
|
// more bits that this matches, something suspicious is
|
|
|
|
// going on and we shouldn't add the second one to our
|
|
|
|
// routing table
|
|
|
|
int cutoff = rhs.addr.is_v4() ? 8 : 64;
|
|
|
|
int dist = cidr_distance(lhs.addr, rhs.addr);
|
|
|
|
return dist <= cutoff;
|
|
|
|
}
|
|
|
|
|
2011-01-17 08:49:44 +01:00
|
|
|
node_entry* routing_table::find_node(udp::endpoint const& ep, routing_table::table_t::iterator* bucket)
|
|
|
|
{
|
|
|
|
for (table_t::iterator i = m_buckets.begin()
|
|
|
|
, end(m_buckets.end()); i != end; ++i)
|
|
|
|
{
|
|
|
|
for (bucket_t::iterator j = i->replacements.begin();
|
|
|
|
j != i->replacements.end(); ++j)
|
|
|
|
{
|
|
|
|
if (j->addr != ep.address()) continue;
|
|
|
|
if (j->port != ep.port()) continue;
|
|
|
|
*bucket = i;
|
|
|
|
return &*j;
|
|
|
|
}
|
|
|
|
for (bucket_t::iterator j = i->live_nodes.begin();
|
|
|
|
j != i->live_nodes.end(); ++j)
|
|
|
|
{
|
|
|
|
if (j->addr != ep.address()) continue;
|
|
|
|
if (j->port != ep.port()) continue;
|
|
|
|
*bucket = i;
|
|
|
|
return &*j;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
*bucket = m_buckets.end();
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-09-22 20:15:29 +02:00
|
|
|
bool routing_table::add_node(node_entry e)
|
2010-01-03 12:08:39 +01:00
|
|
|
{
|
|
|
|
if (m_router_nodes.find(e.ep()) != m_router_nodes.end()) return false;
|
|
|
|
|
|
|
|
bool ret = need_bootstrap();
|
|
|
|
|
2011-01-17 08:49:44 +01:00
|
|
|
// don't add ourself
|
|
|
|
if (e.id == m_id) return ret;
|
|
|
|
|
|
|
|
// do we already have this IP in the table?
|
|
|
|
if (m_ips.find(e.addr.to_v4().to_bytes()) != m_ips.end())
|
2011-01-08 09:54:51 +01:00
|
|
|
{
|
2011-01-17 08:49:44 +01:00
|
|
|
// this exact IP already exists in the table. It might be the case
|
|
|
|
// that the node changed IP. If pinged is true, and the port also
|
|
|
|
// matches the we assume it's in fact the same node, and just update
|
|
|
|
// the routing table
|
|
|
|
// pinged means that we have sent a message to the IP, port and received
|
|
|
|
// a response with a correct transaction ID, i.e. it is verified to not
|
|
|
|
// be the result of a poioned routing table
|
|
|
|
|
|
|
|
node_entry* existing = 0;
|
|
|
|
table_t::iterator existing_bucket;
|
|
|
|
if (!e.pinged() || (existing = find_node(e.ep(), &existing_bucket)) == 0)
|
|
|
|
{
|
|
|
|
// the new node is not pinged, or it's not an existing node
|
|
|
|
// we should ignore it, unless we allow duplicate IPs in our
|
|
|
|
// routing table
|
|
|
|
if (m_settings.restrict_routing_ips)
|
|
|
|
{
|
2011-01-08 09:54:51 +01:00
|
|
|
#ifdef TORRENT_DHT_VERBOSE_LOGGING
|
2011-01-17 08:49:44 +01:00
|
|
|
TORRENT_LOG(table) << "ignoring node (duplicate IP): "
|
|
|
|
<< e.id << " " << e.addr;
|
|
|
|
#endif
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (e.pinged() && existing)
|
2011-01-08 09:54:51 +01:00
|
|
|
{
|
2011-01-17 08:49:44 +01:00
|
|
|
// if the node ID is the same, just update the failcount
|
|
|
|
// and be done with it
|
|
|
|
if (existing->id == e.id)
|
2011-01-08 09:54:51 +01:00
|
|
|
{
|
2011-01-17 08:49:44 +01:00
|
|
|
existing->timeout_count = 0;
|
2012-09-22 20:15:29 +02:00
|
|
|
existing->update_rtt(e.rtt);
|
2011-01-17 08:49:44 +01:00
|
|
|
return ret;
|
2011-01-08 09:54:51 +01:00
|
|
|
}
|
2011-01-17 08:49:44 +01:00
|
|
|
|
|
|
|
// delete the current entry before we instert the new one
|
|
|
|
bucket_t& b = existing_bucket->live_nodes;
|
|
|
|
bucket_t& rb = existing_bucket->replacements;
|
|
|
|
bool done = false;
|
|
|
|
for (bucket_t::iterator i = b.begin(), end(b.end());
|
|
|
|
i != end; ++i)
|
2011-01-08 09:54:51 +01:00
|
|
|
{
|
2011-01-17 08:49:44 +01:00
|
|
|
if (i->addr != e.addr || i->port != e.port) continue;
|
|
|
|
#ifdef TORRENT_DHT_VERBOSE_LOGGING
|
|
|
|
TORRENT_LOG(table) << "node ID changed, deleting old entry: "
|
|
|
|
<< i->id << " " << i->addr;
|
|
|
|
#endif
|
|
|
|
b.erase(i);
|
|
|
|
done = true;
|
2011-01-08 09:54:51 +01:00
|
|
|
break;
|
|
|
|
}
|
2011-01-17 08:49:44 +01:00
|
|
|
if (!done)
|
2012-06-25 16:17:51 +02:00
|
|
|
{
|
2011-01-17 08:49:44 +01:00
|
|
|
for (bucket_t::iterator i = rb.begin(), end(rb.end());
|
|
|
|
i != end; ++i)
|
|
|
|
{
|
|
|
|
if (i->addr != e.addr || i->port != e.port) continue;
|
|
|
|
#ifdef TORRENT_DHT_VERBOSE_LOGGING
|
|
|
|
TORRENT_LOG(table) << "node ID changed, deleting old entry: "
|
|
|
|
<< i->id << " " << i->addr;
|
|
|
|
#endif
|
2012-06-25 16:17:51 +02:00
|
|
|
rb.erase(i);
|
2011-01-17 08:49:44 +01:00
|
|
|
done = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
TORRENT_ASSERT(done);
|
|
|
|
m_ips.erase(e.addr.to_v4().to_bytes());
|
2011-01-08 09:54:51 +01:00
|
|
|
}
|
|
|
|
}
|
2011-01-17 08:49:44 +01:00
|
|
|
|
2012-06-25 16:17:51 +02:00
|
|
|
table_t::iterator i = find_bucket(e.id);
|
2010-01-03 12:08:39 +01:00
|
|
|
bucket_t& b = i->live_nodes;
|
|
|
|
bucket_t& rb = i->replacements;
|
2012-09-22 23:40:16 +02:00
|
|
|
int bucket_index = std::distance(m_buckets.begin(), i);
|
|
|
|
int bucket_size_limit = bucket_limit(bucket_index);
|
2006-08-01 17:27:08 +02:00
|
|
|
|
2011-01-08 09:54:51 +01:00
|
|
|
bucket_t::iterator j;
|
|
|
|
|
2006-08-01 17:27:08 +02:00
|
|
|
// if the node already exists, we don't need it
|
2011-01-08 09:54:51 +01:00
|
|
|
j = std::find_if(b.begin(), b.end()
|
2010-04-30 21:08:16 +02:00
|
|
|
, boost::bind(&node_entry::id, _1) == e.id);
|
2010-01-03 12:08:39 +01:00
|
|
|
|
|
|
|
if (j != b.end())
|
|
|
|
{
|
2011-01-08 09:54:51 +01:00
|
|
|
// a new IP address just claimed this node-ID
|
|
|
|
// ignore it
|
|
|
|
if (j->addr != e.addr || j->port != e.port) return ret;
|
|
|
|
|
2010-01-03 12:08:39 +01:00
|
|
|
// we already have the node in our bucket
|
2011-01-17 08:49:44 +01:00
|
|
|
TORRENT_ASSERT(j->id == e.id && j->ep() == e.ep());
|
|
|
|
j->timeout_count = 0;
|
2012-09-22 20:15:29 +02:00
|
|
|
j->update_rtt(e.rtt);
|
2010-01-03 12:08:39 +01:00
|
|
|
// TORRENT_LOG(table) << "updating node: " << i->id << " " << i->addr;
|
|
|
|
return ret;
|
|
|
|
}
|
2006-08-01 17:27:08 +02:00
|
|
|
|
2012-09-22 20:15:29 +02:00
|
|
|
// if this node exists in the replacement bucket. update it and
|
|
|
|
// pull it out from there. We may add it back to the replacement
|
|
|
|
// bucket, but we may also replace a node in the main bucket, now
|
|
|
|
// that we have an updated RTT
|
|
|
|
j = std::find_if(rb.begin(), rb.end(), boost::bind(&node_entry::id, _1) == e.id);
|
|
|
|
if (j != rb.end())
|
|
|
|
{
|
|
|
|
// a new IP address just claimed this node-ID
|
|
|
|
// ignore it
|
|
|
|
if (j->addr != e.addr || j->port != e.port) return ret;
|
|
|
|
TORRENT_ASSERT(j->id == e.id && j->ep() == e.ep());
|
|
|
|
j->timeout_count = 0;
|
|
|
|
j->update_rtt(e.rtt);
|
|
|
|
e = *j;
|
|
|
|
m_ips.erase(j->addr.to_v4().to_bytes());
|
|
|
|
rb.erase(j);
|
|
|
|
}
|
2006-08-01 17:27:08 +02:00
|
|
|
|
2011-01-08 09:54:51 +01:00
|
|
|
if (m_settings.restrict_routing_ips)
|
|
|
|
{
|
|
|
|
// don't allow multiple entries from IPs very close to each other
|
|
|
|
j = std::find_if(b.begin(), b.end(), boost::bind(&compare_ip_cidr, _1, e));
|
|
|
|
if (j != b.end())
|
|
|
|
{
|
|
|
|
// we already have a node in this bucket with an IP very
|
|
|
|
// close to this one. We know that it's not the same, because
|
|
|
|
// it claims a different node-ID. Ignore this to avoid attacks
|
|
|
|
#ifdef TORRENT_DHT_VERBOSE_LOGGING
|
|
|
|
TORRENT_LOG(table) << "ignoring node: " << e.id << " " << e.addr
|
|
|
|
<< " existing node: "
|
|
|
|
<< j->id << " " << j->addr;
|
|
|
|
#endif
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
j = std::find_if(rb.begin(), rb.end(), boost::bind(&compare_ip_cidr, _1, e));
|
|
|
|
if (j != rb.end())
|
|
|
|
{
|
2012-09-22 20:15:29 +02:00
|
|
|
// same thing but for the replacement bucket
|
2011-01-08 09:54:51 +01:00
|
|
|
#ifdef TORRENT_DHT_VERBOSE_LOGGING
|
|
|
|
TORRENT_LOG(table) << "ignoring (replacement) node: " << e.id << " " << e.addr
|
|
|
|
<< " existing node: "
|
|
|
|
<< j->id << " " << j->addr;
|
|
|
|
#endif
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-09-22 20:15:29 +02:00
|
|
|
// if there's room in the main bucket, just insert it
|
2012-09-22 23:40:16 +02:00
|
|
|
if (int(b.size()) < bucket_size_limit)
|
2008-11-10 03:08:42 +01:00
|
|
|
{
|
2012-09-22 23:40:16 +02:00
|
|
|
if (b.empty()) b.reserve(bucket_size_limit);
|
2010-01-03 12:08:39 +01:00
|
|
|
b.push_back(e);
|
2011-01-08 09:54:51 +01:00
|
|
|
m_ips.insert(e.addr.to_v4().to_bytes());
|
2010-01-03 12:08:39 +01:00
|
|
|
// TORRENT_LOG(table) << "inserting node: " << e.id << " " << e.addr;
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
// if there is no room, we look for nodes that are not 'pinged',
|
|
|
|
// i.e. we haven't confirmed that they respond to messages.
|
|
|
|
// Then we look for nodes marked as stale
|
|
|
|
// in the k-bucket. If we find one, we can replace it.
|
2012-09-22 20:15:29 +02:00
|
|
|
// as the last replacement strategy, we look for nodes with the
|
|
|
|
// highest RTT, and if it's higher than the new node, we replace it
|
2010-01-03 12:08:39 +01:00
|
|
|
|
|
|
|
// can we split the bucket?
|
|
|
|
bool can_split = false;
|
|
|
|
|
|
|
|
if (e.pinged() && e.fail_count() == 0)
|
|
|
|
{
|
|
|
|
// only nodes that are pinged and haven't failed
|
|
|
|
// can split the bucket, and we can only split
|
|
|
|
// the last bucket
|
2012-09-22 20:15:29 +02:00
|
|
|
can_split = (boost::next(i) == m_buckets.end() && m_buckets.size() < 159);
|
2010-01-03 12:08:39 +01:00
|
|
|
|
|
|
|
// if the node we're trying to insert is considered pinged,
|
|
|
|
// we may replace other nodes that aren't pinged
|
|
|
|
|
2010-04-30 21:08:16 +02:00
|
|
|
j = std::find_if(b.begin(), b.end(), boost::bind(&node_entry::pinged, _1) == false);
|
2010-01-03 12:08:39 +01:00
|
|
|
|
|
|
|
if (j != b.end() && !j->pinged())
|
|
|
|
{
|
|
|
|
// j points to a node that has not been pinged.
|
|
|
|
// Replace it with this new one
|
2011-01-08 09:54:51 +01:00
|
|
|
m_ips.erase(j->addr.to_v4().to_bytes());
|
2012-09-22 20:15:29 +02:00
|
|
|
*j = e;
|
2011-01-08 09:54:51 +01:00
|
|
|
m_ips.insert(e.addr.to_v4().to_bytes());
|
2010-01-03 12:08:39 +01:00
|
|
|
// TORRENT_LOG(table) << "replacing unpinged node: " << e.id << " " << e.addr;
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
// A node is considered stale if it has failed at least one
|
|
|
|
// time. Here we choose the node that has failed most times.
|
|
|
|
// If we don't find one, place this node in the replacement-
|
|
|
|
// cache and replace any nodes that will fail in the future
|
|
|
|
// with nodes from that cache.
|
|
|
|
|
|
|
|
j = std::max_element(b.begin(), b.end()
|
2010-04-30 21:08:16 +02:00
|
|
|
, boost::bind(&node_entry::fail_count, _1)
|
|
|
|
< boost::bind(&node_entry::fail_count, _2));
|
2010-01-03 12:08:39 +01:00
|
|
|
|
|
|
|
if (j != b.end() && j->fail_count() > 0)
|
|
|
|
{
|
|
|
|
// i points to a node that has been marked
|
|
|
|
// as stale. Replace it with this new one
|
2011-01-08 09:54:51 +01:00
|
|
|
m_ips.erase(j->addr.to_v4().to_bytes());
|
2012-09-22 20:15:29 +02:00
|
|
|
*j = e;
|
2011-01-08 09:54:51 +01:00
|
|
|
m_ips.insert(e.addr.to_v4().to_bytes());
|
2010-01-03 12:08:39 +01:00
|
|
|
// TORRENT_LOG(table) << "replacing stale node: " << e.id << " " << e.addr;
|
|
|
|
return ret;
|
|
|
|
}
|
2012-09-22 20:15:29 +02:00
|
|
|
|
|
|
|
// in order to keep lookup times small, prefer nodes with low RTTs
|
|
|
|
|
|
|
|
j = std::max_element(b.begin(), b.end()
|
|
|
|
, boost::bind(&node_entry::rtt, _1)
|
|
|
|
< boost::bind(&node_entry::rtt, _2));
|
|
|
|
|
|
|
|
if (j != b.end() && j->rtt > e.rtt)
|
|
|
|
{
|
|
|
|
m_ips.erase(j->addr.to_v4().to_bytes());
|
|
|
|
*j = e;
|
|
|
|
m_ips.insert(e.addr.to_v4().to_bytes());
|
|
|
|
// TORRENT_LOG(table) << "replacing node with higher RTT: " << e.id << " " << e.addr;
|
|
|
|
return ret;
|
|
|
|
}
|
2010-01-03 12:08:39 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// if we can't split, try to insert into the replacement bucket
|
|
|
|
|
|
|
|
if (!can_split)
|
|
|
|
{
|
|
|
|
// if we don't have any identified stale nodes in
|
|
|
|
// the bucket, and the bucket is full, we have to
|
|
|
|
// cache this node and wait until some node fails
|
|
|
|
// and then replace it.
|
|
|
|
|
|
|
|
j = std::find_if(rb.begin(), rb.end()
|
2010-04-30 21:08:16 +02:00
|
|
|
, boost::bind(&node_entry::id, _1) == e.id);
|
2010-01-03 12:08:39 +01:00
|
|
|
|
|
|
|
// if the node is already in the replacement bucket
|
|
|
|
// just return.
|
|
|
|
if (j != rb.end())
|
|
|
|
{
|
2011-01-08 09:54:51 +01:00
|
|
|
// if the IP address matches, it's the same node
|
|
|
|
// make sure it's marked as pinged
|
|
|
|
if (j->ep() == e.ep()) j->set_pinged();
|
2010-01-03 12:08:39 +01:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((int)rb.size() >= m_bucket_size)
|
|
|
|
{
|
|
|
|
// if the replacement bucket is full, remove the oldest entry
|
|
|
|
// but prefer nodes that haven't been pinged, since they are
|
|
|
|
// less reliable than this one, that has been pinged
|
2010-04-30 21:08:16 +02:00
|
|
|
j = std::find_if(rb.begin(), rb.end(), boost::bind(&node_entry::pinged, _1) == false);
|
2011-01-08 09:54:51 +01:00
|
|
|
if (j == rb.end()) j = rb.begin();
|
|
|
|
m_ips.erase(j->addr.to_v4().to_bytes());
|
|
|
|
rb.erase(j);
|
2010-01-03 12:08:39 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
if (rb.empty()) rb.reserve(m_bucket_size);
|
|
|
|
rb.push_back(e);
|
2011-01-08 09:54:51 +01:00
|
|
|
m_ips.insert(e.addr.to_v4().to_bytes());
|
2010-01-03 12:08:39 +01:00
|
|
|
// TORRENT_LOG(table) << "inserting node in replacement cache: " << e.id << " " << e.addr;
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
// this is the last bucket, and it's full already. Split
|
|
|
|
// it by adding another bucket
|
|
|
|
m_buckets.push_back(routing_table_node());
|
2010-12-12 20:18:23 +01:00
|
|
|
// the extra seconds added to the end is to prioritize
|
|
|
|
// buckets closer to us when refreshing
|
|
|
|
m_buckets.back().last_active = min_time() + seconds(160 - m_buckets.size());
|
2010-01-03 12:08:39 +01:00
|
|
|
bucket_t& new_bucket = m_buckets.back().live_nodes;
|
|
|
|
bucket_t& new_replacement_bucket = m_buckets.back().replacements;
|
|
|
|
|
|
|
|
// move any node whose (160 - distane_exp(m_id, id)) >= (i - m_buckets.begin())
|
|
|
|
// to the new bucket
|
2012-09-22 23:40:16 +02:00
|
|
|
int new_bucket_size = bucket_limit(bucket_index + 1);
|
2010-01-03 12:08:39 +01:00
|
|
|
for (bucket_t::iterator j = b.begin(); j != b.end();)
|
|
|
|
{
|
|
|
|
if (distance_exp(m_id, j->id) >= 159 - bucket_index)
|
|
|
|
{
|
|
|
|
++j;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
// this entry belongs in the new bucket
|
2012-09-25 20:47:04 +02:00
|
|
|
if (int(new_bucket.size()) < bucket_size_limit)
|
|
|
|
new_bucket.push_back(*j);
|
|
|
|
else if (int(new_replacement_bucket.size()) < m_bucket_size)
|
|
|
|
new_replacement_bucket.push_back(*j);
|
2010-01-03 12:08:39 +01:00
|
|
|
j = b.erase(j);
|
|
|
|
}
|
2010-12-12 21:36:42 +01:00
|
|
|
|
|
|
|
// split the replacement bucket as well. If the live bucket
|
|
|
|
// is not full anymore, also move the replacement entries
|
|
|
|
// into the main bucket
|
2010-01-03 12:08:39 +01:00
|
|
|
for (bucket_t::iterator j = rb.begin(); j != rb.end();)
|
|
|
|
{
|
|
|
|
if (distance_exp(m_id, j->id) >= 159 - bucket_index)
|
|
|
|
{
|
2012-09-22 23:40:16 +02:00
|
|
|
if (int(b.size()) >= bucket_size_limit)
|
2010-12-12 21:36:42 +01:00
|
|
|
{
|
|
|
|
++j;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
b.push_back(*j);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
// this entry belongs in the new bucket
|
2012-09-22 23:40:16 +02:00
|
|
|
if (int(new_bucket.size()) < new_bucket_size)
|
2010-12-12 21:36:42 +01:00
|
|
|
new_bucket.push_back(*j);
|
2012-09-25 20:47:04 +02:00
|
|
|
else if (int(new_replacement_bucket.size()) < m_bucket_size)
|
2010-12-12 21:36:42 +01:00
|
|
|
new_replacement_bucket.push_back(*j);
|
2010-01-03 12:08:39 +01:00
|
|
|
}
|
|
|
|
j = rb.erase(j);
|
2008-11-10 03:08:42 +01:00
|
|
|
}
|
|
|
|
|
2011-01-08 09:54:51 +01:00
|
|
|
bool added = false;
|
2010-01-03 12:08:39 +01:00
|
|
|
// now insert the new node in the appropriate bucket
|
|
|
|
if (distance_exp(m_id, e.id) >= 159 - bucket_index)
|
|
|
|
{
|
2012-09-22 23:40:16 +02:00
|
|
|
if (int(b.size()) < bucket_size_limit)
|
2011-01-08 09:54:51 +01:00
|
|
|
{
|
2010-01-03 12:08:39 +01:00
|
|
|
b.push_back(e);
|
2011-01-08 09:54:51 +01:00
|
|
|
added = true;
|
|
|
|
}
|
2011-02-21 06:24:41 +01:00
|
|
|
else if (int(rb.size()) < m_bucket_size)
|
2011-01-08 09:54:51 +01:00
|
|
|
{
|
2010-01-03 12:08:39 +01:00
|
|
|
rb.push_back(e);
|
2011-01-08 09:54:51 +01:00
|
|
|
added = true;
|
|
|
|
}
|
2010-01-03 12:08:39 +01:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2012-09-22 23:40:16 +02:00
|
|
|
if (int(new_bucket.size()) < new_bucket_size)
|
2011-01-08 09:54:51 +01:00
|
|
|
{
|
2010-01-03 12:08:39 +01:00
|
|
|
new_bucket.push_back(e);
|
2011-01-08 09:54:51 +01:00
|
|
|
added = true;
|
|
|
|
}
|
2011-02-21 06:24:41 +01:00
|
|
|
else if (int(new_replacement_bucket.size()) < m_bucket_size)
|
2011-01-08 09:54:51 +01:00
|
|
|
{
|
2010-01-03 12:08:39 +01:00
|
|
|
new_replacement_bucket.push_back(e);
|
2011-01-08 09:54:51 +01:00
|
|
|
added = true;
|
|
|
|
}
|
2010-01-03 12:08:39 +01:00
|
|
|
}
|
2011-01-08 09:54:51 +01:00
|
|
|
if (added) m_ips.insert(e.addr.to_v4().to_bytes());
|
2010-01-03 12:08:39 +01:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
void routing_table::for_each_node(
|
|
|
|
void (*fun1)(void*, node_entry const&)
|
|
|
|
, void (*fun2)(void*, node_entry const&)
|
|
|
|
, void* userdata) const
|
|
|
|
{
|
|
|
|
for (table_t::const_iterator i = m_buckets.begin()
|
|
|
|
, end(m_buckets.end()); i != end; ++i)
|
|
|
|
{
|
|
|
|
if (fun1)
|
|
|
|
{
|
|
|
|
for (bucket_t::const_iterator j = i->live_nodes.begin()
|
|
|
|
, end(i->live_nodes.end()); j != end; ++j)
|
|
|
|
fun1(userdata, *j);
|
|
|
|
}
|
|
|
|
if (fun2)
|
|
|
|
{
|
|
|
|
for (bucket_t::const_iterator j = i->replacements.begin()
|
|
|
|
, end(i->replacements.end()); j != end; ++j)
|
|
|
|
fun2(userdata, *j);
|
|
|
|
}
|
|
|
|
}
|
2006-08-01 17:27:08 +02:00
|
|
|
}
|
|
|
|
|
2011-01-17 08:49:44 +01:00
|
|
|
void routing_table::node_failed(node_id const& id, udp::endpoint const& ep)
|
2006-08-01 17:27:08 +02:00
|
|
|
{
|
2010-01-03 12:08:39 +01:00
|
|
|
// if messages to ourself fails, ignore it
|
|
|
|
if (id == m_id) return;
|
|
|
|
|
|
|
|
table_t::iterator i = find_bucket(id);
|
|
|
|
bucket_t& b = i->live_nodes;
|
|
|
|
bucket_t& rb = i->replacements;
|
2006-08-01 17:27:08 +02:00
|
|
|
|
2010-01-03 12:08:39 +01:00
|
|
|
bucket_t::iterator j = std::find_if(b.begin(), b.end()
|
2010-04-30 21:08:16 +02:00
|
|
|
, boost::bind(&node_entry::id, _1) == id);
|
2006-08-01 17:27:08 +02:00
|
|
|
|
2010-01-03 12:08:39 +01:00
|
|
|
if (j == b.end()) return;
|
2011-01-17 08:49:44 +01:00
|
|
|
|
|
|
|
// if the endpoint doesn't match, it's a different node
|
|
|
|
// claiming the same ID. The node we have in our routing
|
|
|
|
// table is not necessarily stale
|
|
|
|
if (j->ep() != ep) return;
|
2006-10-27 03:56:01 +02:00
|
|
|
|
2006-08-01 17:27:08 +02:00
|
|
|
if (rb.empty())
|
|
|
|
{
|
2010-01-03 12:08:39 +01:00
|
|
|
j->timed_out();
|
2008-05-10 07:51:58 +02:00
|
|
|
|
|
|
|
#ifdef TORRENT_DHT_VERBOSE_LOGGING
|
|
|
|
TORRENT_LOG(table) << " NODE FAILED"
|
|
|
|
" id: " << id <<
|
2010-02-08 05:35:46 +01:00
|
|
|
" ip: " << j->ep() <<
|
|
|
|
" fails: " << j->fail_count() <<
|
|
|
|
" pinged: " << j->pinged() <<
|
|
|
|
" up-time: " << total_seconds(time_now() - j->first_seen);
|
2008-05-10 07:51:58 +02:00
|
|
|
#endif
|
|
|
|
|
2008-11-10 03:08:42 +01:00
|
|
|
// if this node has failed too many times, or if this node
|
|
|
|
// has never responded at all, remove it
|
2010-01-03 12:08:39 +01:00
|
|
|
if (j->fail_count() >= m_settings.max_fail_count || !j->pinged())
|
2011-01-08 09:54:51 +01:00
|
|
|
{
|
|
|
|
m_ips.erase(j->addr.to_v4().to_bytes());
|
2010-01-03 12:08:39 +01:00
|
|
|
b.erase(j);
|
2011-01-08 09:54:51 +01:00
|
|
|
}
|
2006-08-01 17:27:08 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2011-01-08 09:54:51 +01:00
|
|
|
m_ips.erase(j->addr.to_v4().to_bytes());
|
2010-01-03 12:08:39 +01:00
|
|
|
b.erase(j);
|
2008-11-10 03:08:42 +01:00
|
|
|
|
2012-09-25 20:47:04 +02:00
|
|
|
// sort by RTT first, to find the node with the lowest
|
|
|
|
// RTT that is pinged
|
|
|
|
std::sort(rb.begin(), rb.end()
|
|
|
|
, boost::bind(&node_entry::rtt, _1) < boost::bind(&node_entry::rtt, _2));
|
|
|
|
|
|
|
|
j = std::find_if(rb.begin(), rb.end(), boost::bind(&node_entry::pinged, _1));
|
2010-01-03 12:08:39 +01:00
|
|
|
if (j == rb.end()) j = rb.begin();
|
|
|
|
b.push_back(*j);
|
|
|
|
rb.erase(j);
|
2006-08-01 17:27:08 +02:00
|
|
|
}
|
|
|
|
|
2006-09-27 19:20:18 +02:00
|
|
|
void routing_table::add_router_node(udp::endpoint router)
|
|
|
|
{
|
|
|
|
m_router_nodes.insert(router);
|
|
|
|
}
|
|
|
|
|
2010-01-03 12:08:39 +01:00
|
|
|
// we heard from this node, but we don't know if it
|
|
|
|
// was spoofed or not (i.e. pinged == false)
|
|
|
|
void routing_table::heard_about(node_id const& id, udp::endpoint const& ep)
|
|
|
|
{
|
2012-09-22 20:15:29 +02:00
|
|
|
add_node(node_entry(id, ep));
|
2010-01-03 12:08:39 +01:00
|
|
|
}
|
|
|
|
|
2006-08-01 17:27:08 +02:00
|
|
|
// this function is called every time the node sees
|
|
|
|
// a sign of a node being alive. This node will either
|
|
|
|
// be inserted in the k-buckets or be moved to the top
|
|
|
|
// of its bucket.
|
|
|
|
// the return value indicates if the table needs a refresh.
|
|
|
|
// if true, the node should refresh the table (i.e. do a find_node
|
|
|
|
// on its own id)
|
2012-09-22 20:15:29 +02:00
|
|
|
bool routing_table::node_seen(node_id const& id, udp::endpoint ep, int rtt)
|
2006-08-01 17:27:08 +02:00
|
|
|
{
|
2012-09-22 20:15:29 +02:00
|
|
|
return add_node(node_entry(id, ep, rtt, true));
|
2006-08-01 17:27:08 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
bool routing_table::need_bootstrap() const
|
|
|
|
{
|
2010-02-20 17:37:50 +01:00
|
|
|
ptime now = time_now();
|
|
|
|
if (now - m_last_bootstrap < seconds(30)) return false;
|
|
|
|
|
2010-01-03 12:08:39 +01:00
|
|
|
for (table_t::const_iterator i = m_buckets.begin()
|
|
|
|
, end(m_buckets.end()); i != end; ++i)
|
2006-08-01 17:27:08 +02:00
|
|
|
{
|
2010-01-03 12:08:39 +01:00
|
|
|
for (bucket_t::const_iterator j = i->live_nodes.begin()
|
2010-01-09 01:38:55 +01:00
|
|
|
, end(i->live_nodes.end()); j != end; ++j)
|
2010-01-03 12:08:39 +01:00
|
|
|
{
|
|
|
|
if (j->confirmed()) return false;
|
|
|
|
}
|
2006-08-01 17:27:08 +02:00
|
|
|
}
|
2010-02-20 17:37:50 +01:00
|
|
|
m_last_bootstrap = now;
|
2006-08-01 17:27:08 +02:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
// fills the vector with the k nodes from our buckets that
|
|
|
|
// are nearest to the given id.
|
|
|
|
void routing_table::find_node(node_id const& target
|
2008-11-10 03:08:42 +01:00
|
|
|
, std::vector<node_entry>& l, int options, int count)
|
2006-08-01 17:27:08 +02:00
|
|
|
{
|
|
|
|
l.clear();
|
|
|
|
if (count == 0) count = m_bucket_size;
|
|
|
|
|
2010-01-03 12:08:39 +01:00
|
|
|
table_t::iterator i = find_bucket(target);
|
2012-09-23 05:49:04 +02:00
|
|
|
int bucket_index = std::distance(m_buckets.begin(), i);
|
|
|
|
int bucket_size_limit = bucket_limit(bucket_index);
|
2006-08-01 17:27:08 +02:00
|
|
|
|
2012-09-23 05:49:04 +02:00
|
|
|
l.reserve(bucket_size_limit);
|
2006-08-01 17:27:08 +02:00
|
|
|
|
2010-01-03 12:08:39 +01:00
|
|
|
table_t::iterator j = i;
|
|
|
|
|
2011-02-21 06:24:41 +01:00
|
|
|
for (; j != m_buckets.end() && int(l.size()) < count; ++j)
|
2006-08-01 17:27:08 +02:00
|
|
|
{
|
2010-01-03 12:08:39 +01:00
|
|
|
bucket_t& b = j->live_nodes;
|
2008-11-10 03:08:42 +01:00
|
|
|
if (options & include_failed)
|
|
|
|
{
|
2012-09-23 05:49:04 +02:00
|
|
|
copy(b.begin(), b.end()
|
2010-01-03 12:08:39 +01:00
|
|
|
, std::back_inserter(l));
|
2008-11-10 03:08:42 +01:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2012-09-23 05:49:04 +02:00
|
|
|
std::remove_copy_if(b.begin(), b.end()
|
2010-01-03 12:08:39 +01:00
|
|
|
, std::back_inserter(l)
|
2010-04-30 21:08:16 +02:00
|
|
|
, !boost::bind(&node_entry::confirmed, _1));
|
2008-11-10 03:08:42 +01:00
|
|
|
}
|
2006-08-01 17:27:08 +02:00
|
|
|
|
2012-09-23 05:49:04 +02:00
|
|
|
if (int(l.size()) >= count)
|
|
|
|
{
|
|
|
|
// sort the nodes by how close they are to the target
|
|
|
|
std::sort(l.begin(), l.end(), boost::bind(&compare_ref
|
|
|
|
, boost::bind(&node_entry::id, _1)
|
|
|
|
, boost::bind(&node_entry::id, _2), target));
|
|
|
|
|
|
|
|
l.resize(count);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
2006-08-01 17:27:08 +02:00
|
|
|
|
2010-01-03 12:08:39 +01:00
|
|
|
// if we still don't have enough nodes, copy nodes
|
|
|
|
// further away from us
|
|
|
|
|
2012-09-23 05:49:04 +02:00
|
|
|
if (i == m_buckets.begin())
|
|
|
|
{
|
|
|
|
// sort the nodes by how close they are to the target
|
|
|
|
std::sort(l.begin(), l.end(), boost::bind(&compare_ref
|
|
|
|
, boost::bind(&node_entry::id, _1)
|
|
|
|
, boost::bind(&node_entry::id, _2), target));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2010-01-03 12:08:39 +01:00
|
|
|
j = i;
|
2006-08-01 17:27:08 +02:00
|
|
|
|
2010-01-03 12:08:39 +01:00
|
|
|
do
|
2006-08-01 17:27:08 +02:00
|
|
|
{
|
2010-01-10 05:03:11 +01:00
|
|
|
--j;
|
2010-01-03 12:08:39 +01:00
|
|
|
bucket_t& b = j->live_nodes;
|
2006-08-01 17:27:08 +02:00
|
|
|
|
2008-11-10 03:08:42 +01:00
|
|
|
if (options & include_failed)
|
|
|
|
{
|
2012-09-23 05:49:04 +02:00
|
|
|
std::copy(b.begin(), b.end(), std::back_inserter(l));
|
2008-11-10 03:08:42 +01:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2012-09-23 05:49:04 +02:00
|
|
|
std::remove_copy_if(b.begin(), b.end(), std::back_inserter(l)
|
|
|
|
, !boost::bind(&node_entry::confirmed, _1));
|
|
|
|
}
|
|
|
|
|
|
|
|
if (int(l.size()) >= count)
|
|
|
|
{
|
|
|
|
// sort the nodes by how close they are to the target
|
|
|
|
std::sort(l.begin(), l.end(), boost::bind(&compare_ref
|
|
|
|
, boost::bind(&node_entry::id, _1)
|
|
|
|
, boost::bind(&node_entry::id, _2), target));
|
|
|
|
|
|
|
|
l.resize(count);
|
|
|
|
return;
|
2008-11-10 03:08:42 +01:00
|
|
|
}
|
2006-08-01 17:27:08 +02:00
|
|
|
}
|
2011-02-21 06:24:41 +01:00
|
|
|
while (j != m_buckets.begin() && int(l.size()) < count);
|
2012-09-23 05:49:04 +02:00
|
|
|
|
|
|
|
// sort the nodes by how close they are to the target
|
|
|
|
std::sort(l.begin(), l.end(), boost::bind(&compare_ref
|
|
|
|
, boost::bind(&node_entry::id, _1)
|
|
|
|
, boost::bind(&node_entry::id, _2), target));
|
|
|
|
|
|
|
|
if (int(l.size()) >= count)
|
|
|
|
l.resize(count);
|
2006-08-01 17:27:08 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
} } // namespace libtorrent::dht
|
|
|
|
|