2015-08-08 03:28:51 +02:00
|
|
|
/*
|
|
|
|
|
|
|
|
Copyright (c) 2014-2015, Arvid Norberg
|
|
|
|
All rights reserved.
|
|
|
|
|
|
|
|
Redistribution and use in source and binary forms, with or without
|
|
|
|
modification, are permitted provided that the following conditions
|
|
|
|
are met:
|
|
|
|
|
|
|
|
* Redistributions of source code must retain the above copyright
|
|
|
|
notice, this list of conditions and the following disclaimer.
|
|
|
|
* Redistributions in binary form must reproduce the above copyright
|
|
|
|
notice, this list of conditions and the following disclaimer in
|
|
|
|
the documentation and/or other materials provided with the distribution.
|
|
|
|
* Neither the name of the author nor the names of its
|
|
|
|
contributors may be used to endorse or promote products derived
|
|
|
|
from this software without specific prior written permission.
|
|
|
|
|
|
|
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
|
|
|
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
|
|
|
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
|
|
|
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
|
|
|
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
|
|
|
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
|
|
|
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
|
|
|
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
|
|
|
POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
|
|
|
|
*/
|
|
|
|
|
2017-09-02 23:58:10 +02:00
|
|
|
#include "libtorrent/kademlia/dht_settings.hpp"
|
2015-08-08 03:28:51 +02:00
|
|
|
#include "libtorrent/io_service.hpp"
|
|
|
|
#include "libtorrent/deadline_timer.hpp"
|
|
|
|
#include "libtorrent/address.hpp"
|
|
|
|
#include "libtorrent/time.hpp"
|
2016-01-03 16:19:35 +01:00
|
|
|
#include "libtorrent/kademlia/node.hpp"
|
|
|
|
#include "libtorrent/kademlia/dht_observer.hpp"
|
2017-07-22 04:18:16 +02:00
|
|
|
#include "libtorrent/aux_/session_impl.hpp"
|
2016-01-03 16:19:35 +01:00
|
|
|
#include "setup_transfer.hpp"
|
|
|
|
#include <memory> // for unique_ptr
|
|
|
|
#include <random>
|
|
|
|
#include "libtorrent/socket_io.hpp" // print_endpoint
|
|
|
|
#include "libtorrent/random.hpp"
|
|
|
|
#include "libtorrent/crc32c.hpp"
|
|
|
|
#include "libtorrent/alert_types.hpp" // for dht_routing_bucket
|
2017-07-21 05:19:28 +02:00
|
|
|
#include "libtorrent/aux_/listen_socket_handle.hpp"
|
2015-08-08 03:28:51 +02:00
|
|
|
|
|
|
|
#include "setup_dht.hpp"
|
|
|
|
|
|
|
|
using namespace sim;
|
2017-04-12 20:05:53 +02:00
|
|
|
using namespace lt;
|
2015-08-08 03:28:51 +02:00
|
|
|
|
2016-01-18 06:07:21 +01:00
|
|
|
#ifndef TORRENT_DISABLE_DHT
|
|
|
|
|
2015-08-08 03:28:51 +02:00
|
|
|
namespace {
|
|
|
|
|
2016-01-03 16:19:35 +01:00
|
|
|
// this is the IP address assigned to node 'idx'
|
2016-06-19 01:24:27 +02:00
|
|
|
asio::ip::address addr_from_int(int /* idx */)
|
2016-01-03 16:19:35 +01:00
|
|
|
{
|
2016-08-06 19:18:48 +02:00
|
|
|
return rand_v4();
|
2016-01-03 16:19:35 +01:00
|
|
|
}
|
|
|
|
|
2016-06-19 01:24:27 +02:00
|
|
|
asio::ip::address addr6_from_int(int /* idx */)
|
2016-01-10 20:25:44 +01:00
|
|
|
{
|
|
|
|
asio::ip::address_v6::bytes_type bytes;
|
2016-08-06 19:18:48 +02:00
|
|
|
for (uint8_t& b : bytes) b = uint8_t(lt::random(0xff));
|
2016-01-10 20:25:44 +01:00
|
|
|
return asio::ip::address_v6(bytes);
|
|
|
|
}
|
|
|
|
|
2016-01-03 16:19:35 +01:00
|
|
|
// this is the node ID assigned to node 'idx'
|
|
|
|
dht::node_id id_from_addr(lt::address const& addr)
|
|
|
|
{
|
2019-07-30 17:27:20 +02:00
|
|
|
return dht::generate_id_impl(addr, 0);
|
2016-01-03 16:19:35 +01:00
|
|
|
}
|
|
|
|
|
2017-07-22 04:18:16 +02:00
|
|
|
std::shared_ptr<lt::aux::listen_socket_t> sim_listen_socket(tcp::endpoint ep)
|
2017-07-21 05:19:28 +02:00
|
|
|
{
|
2017-07-22 04:18:16 +02:00
|
|
|
auto ls = std::make_shared<lt::aux::listen_socket_t>();
|
2017-10-10 15:50:49 +02:00
|
|
|
ls->external_address.cast_vote(ep.address()
|
|
|
|
, lt::aux::session_interface::source_dht, lt::address());
|
2017-07-21 05:19:28 +02:00
|
|
|
ls->local_endpoint = ep;
|
|
|
|
return ls;
|
|
|
|
}
|
|
|
|
|
2016-01-03 16:19:35 +01:00
|
|
|
} // anonymous namespace
|
|
|
|
|
2017-07-21 05:19:28 +02:00
|
|
|
struct dht_node final : lt::dht::socket_manager
|
2015-08-08 03:28:51 +02:00
|
|
|
{
|
2019-01-11 01:43:45 +01:00
|
|
|
dht_node(sim::simulation& sim, lt::dht::settings const& sett, lt::counters& cnt
|
2016-06-19 01:24:27 +02:00
|
|
|
, int const idx, std::uint32_t const flags)
|
2016-01-10 20:25:44 +01:00
|
|
|
: m_io_service(sim, (flags & dht_network::bind_ipv6) ? addr6_from_int(idx) : addr_from_int(idx))
|
2016-06-04 01:44:16 +02:00
|
|
|
, m_dht_storage(lt::dht::dht_default_storage_constructor(sett))
|
2017-04-21 06:45:43 +02:00
|
|
|
, m_add_dead_nodes((flags & dht_network::add_dead_nodes) != 0)
|
|
|
|
, m_ipv6((flags & dht_network::bind_ipv6) != 0)
|
2016-01-03 16:19:35 +01:00
|
|
|
, m_socket(m_io_service)
|
2017-07-21 05:19:28 +02:00
|
|
|
, m_ls(sim_listen_socket(tcp::endpoint(m_io_service.get_ips().front(), 6881)))
|
|
|
|
, m_dht(m_ls, this, sett, id_from_addr(m_io_service.get_ips().front())
|
2017-04-21 06:45:43 +02:00
|
|
|
, nullptr, cnt
|
|
|
|
, [](lt::dht::node_id const&, std::string const&) -> lt::dht::node* { return nullptr; }
|
|
|
|
, *m_dht_storage)
|
2015-08-08 03:28:51 +02:00
|
|
|
{
|
2016-06-11 21:37:28 +02:00
|
|
|
m_dht_storage->update_node_ids({id_from_addr(m_io_service.get_ips().front())});
|
2016-01-10 20:25:44 +01:00
|
|
|
sock().open(m_ipv6 ? asio::ip::udp::v6() : asio::ip::udp::v4());
|
|
|
|
sock().bind(asio::ip::udp::endpoint(
|
|
|
|
m_ipv6 ? lt::address(lt::address_v6::any()) : lt::address(lt::address_v4::any()), 6881));
|
2015-08-08 03:28:51 +02:00
|
|
|
|
2017-12-15 17:18:27 +01:00
|
|
|
sock().non_blocking(true);
|
2016-01-03 16:19:35 +01:00
|
|
|
sock().async_receive_from(asio::mutable_buffers_1(m_buffer, sizeof(m_buffer))
|
2016-09-01 15:43:53 +02:00
|
|
|
, m_ep, [&](lt::error_code const& ec, std::size_t bytes_transferred)
|
|
|
|
{ this->on_read(ec, bytes_transferred); });
|
2016-01-03 16:19:35 +01:00
|
|
|
}
|
2015-08-08 03:28:51 +02:00
|
|
|
|
2016-01-03 16:19:35 +01:00
|
|
|
// This type is not copyable, because the socket and the dht node is not
|
|
|
|
// copyable.
|
|
|
|
dht_node(dht_node const&) = delete;
|
|
|
|
dht_node& operator=(dht_node const&) = delete;
|
|
|
|
|
|
|
|
// it's also not movable, because it passes in its this-pointer to the async
|
|
|
|
// receive function, which pins this object down. However, std::vector cannot
|
2017-06-28 17:00:14 +02:00
|
|
|
// hold non-movable and non-copyable types.
|
|
|
|
dht_node(dht_node&& n) = delete;
|
|
|
|
dht_node& operator=(dht_node&&) = delete;
|
2015-08-08 03:28:51 +02:00
|
|
|
|
2016-01-03 16:19:35 +01:00
|
|
|
void on_read(lt::error_code const& ec, std::size_t bytes_transferred)
|
|
|
|
{
|
|
|
|
if (ec) return;
|
2015-08-08 03:28:51 +02:00
|
|
|
|
2017-04-12 20:05:53 +02:00
|
|
|
using lt::entry;
|
|
|
|
using lt::bdecode;
|
2016-01-03 16:19:35 +01:00
|
|
|
|
|
|
|
int pos;
|
|
|
|
error_code err;
|
|
|
|
|
|
|
|
// since the simulation is single threaded, we can get away with just
|
|
|
|
// allocating a single of these
|
|
|
|
static bdecode_node msg;
|
2017-11-06 01:55:15 +01:00
|
|
|
int const ret = bdecode(m_buffer, m_buffer + bytes_transferred, msg, err, &pos, 10, 500);
|
2016-01-03 16:19:35 +01:00
|
|
|
if (ret != 0) return;
|
2015-08-08 03:28:51 +02:00
|
|
|
|
2016-01-03 16:19:35 +01:00
|
|
|
if (msg.type() != bdecode_node::dict_t) return;
|
2015-08-08 03:28:51 +02:00
|
|
|
|
2017-04-12 20:05:53 +02:00
|
|
|
lt::dht::msg m(msg, m_ep);
|
2017-09-22 06:00:38 +02:00
|
|
|
dht().incoming(m_ls, m);
|
2016-01-03 16:19:35 +01:00
|
|
|
|
|
|
|
sock().async_receive_from(asio::mutable_buffers_1(m_buffer, sizeof(m_buffer))
|
2016-09-01 15:43:53 +02:00
|
|
|
, m_ep, [&](lt::error_code const& ec, std::size_t bytes_transferred)
|
|
|
|
{ this->on_read(ec, bytes_transferred); });
|
2015-08-08 03:28:51 +02:00
|
|
|
}
|
|
|
|
|
2016-01-03 16:19:35 +01:00
|
|
|
bool has_quota() override { return true; }
|
2018-11-16 16:19:12 +01:00
|
|
|
bool send_packet(lt::aux::listen_socket_handle const&, entry& e, udp::endpoint const& addr) override
|
2015-08-08 03:28:51 +02:00
|
|
|
{
|
2016-01-03 16:19:35 +01:00
|
|
|
// since the simulaton is single threaded, we can get away with allocating
|
|
|
|
// just a single send buffer
|
|
|
|
static std::vector<char> send_buf;
|
2015-08-08 03:28:51 +02:00
|
|
|
|
2016-01-03 16:19:35 +01:00
|
|
|
send_buf.clear();
|
|
|
|
bencode(std::back_inserter(send_buf), e);
|
|
|
|
sock().send_to(boost::asio::const_buffers_1(send_buf.data(), int(send_buf.size())), addr);
|
|
|
|
return true;
|
2015-08-08 03:28:51 +02:00
|
|
|
}
|
|
|
|
|
2016-01-03 16:19:35 +01:00
|
|
|
// the node_id and IP address of this node
|
|
|
|
std::pair<dht::node_id, lt::udp::endpoint> node_info() const
|
2015-08-08 03:28:51 +02:00
|
|
|
{
|
2016-01-03 16:19:35 +01:00
|
|
|
return std::make_pair(dht().nid(), lt::udp::endpoint(m_io_service.get_ips().front(), 6881));
|
2015-08-08 03:28:51 +02:00
|
|
|
}
|
|
|
|
|
2016-01-03 16:19:35 +01:00
|
|
|
void bootstrap(std::vector<std::pair<dht::node_id, lt::udp::endpoint>> const& nodes)
|
2015-08-08 03:28:51 +02:00
|
|
|
{
|
2016-01-03 16:19:35 +01:00
|
|
|
// we don't want to tell every node about every other node. That's way too
|
|
|
|
// expensive. instead. pick a random subset of nodes proportionate to the
|
|
|
|
// bucket it would fall into
|
2015-08-08 03:28:51 +02:00
|
|
|
|
2019-07-30 17:27:20 +02:00
|
|
|
dht::node_id const id = dht().nid();
|
2015-08-08 03:28:51 +02:00
|
|
|
|
2016-01-03 16:19:35 +01:00
|
|
|
// the number of slots left per bucket
|
|
|
|
std::array<int, 160> nodes_per_bucket;
|
|
|
|
nodes_per_bucket.fill(8);
|
2015-08-08 03:28:51 +02:00
|
|
|
|
2016-01-03 16:19:35 +01:00
|
|
|
// when we use the larger routing table, the low buckets are larger
|
|
|
|
nodes_per_bucket[0] = 128;
|
|
|
|
nodes_per_bucket[1] = 64;
|
|
|
|
nodes_per_bucket[2] = 32;
|
|
|
|
nodes_per_bucket[3] = 16;
|
2015-08-08 03:28:51 +02:00
|
|
|
|
2019-07-30 17:27:20 +02:00
|
|
|
// pick nodes in random order to provide good connectivity
|
|
|
|
std::vector<std::size_t> order(nodes.size());
|
|
|
|
for (size_t i = 0; i < order.size(); ++i) order[i] = i;
|
|
|
|
|
|
|
|
while (!order.empty())
|
2015-08-08 03:28:51 +02:00
|
|
|
{
|
2019-07-30 17:27:20 +02:00
|
|
|
auto const idx = lt::random(static_cast<uint32_t>(order.size() - 1));
|
|
|
|
assert(idx >= 0 && idx < order.size());
|
|
|
|
auto const& n = nodes[order[idx]];
|
|
|
|
if (idx < order.size() - 1) order[idx] = order.back();
|
|
|
|
order.pop_back();
|
|
|
|
|
2016-01-03 16:19:35 +01:00
|
|
|
if (n.first == id) continue;
|
|
|
|
int const bucket = 159 - dht::distance_exp(id, n.first);
|
|
|
|
|
2016-05-17 15:24:06 +02:00
|
|
|
/* std::printf("%s ^ %s = %s %d\n"
|
2016-01-03 16:19:35 +01:00
|
|
|
, to_hex(id.to_string()).c_str()
|
|
|
|
, to_hex(n.first.to_string()).c_str()
|
|
|
|
, to_hex(dht::distance(id, n.first).to_string()).c_str()
|
|
|
|
, bucket);
|
|
|
|
*/
|
2016-08-15 01:48:31 +02:00
|
|
|
// there are no more slots in this bucket, just move on
|
2016-01-03 16:19:35 +01:00
|
|
|
if (nodes_per_bucket[bucket] == 0) continue;
|
|
|
|
--nodes_per_bucket[bucket];
|
2016-08-06 19:18:48 +02:00
|
|
|
bool const added = dht().m_table.node_seen(n.first, n.second, lt::random(300) + 10);
|
2016-07-02 01:46:59 +02:00
|
|
|
TEST_CHECK(added);
|
2016-01-03 16:19:35 +01:00
|
|
|
if (m_add_dead_nodes)
|
2015-08-08 03:28:51 +02:00
|
|
|
{
|
2016-01-03 16:19:35 +01:00
|
|
|
// generate a random node ID that would fall in `bucket`
|
|
|
|
dht::node_id const mask = dht::generate_prefix_mask(bucket + 1);
|
2019-07-30 17:27:20 +02:00
|
|
|
udp::endpoint const ep = rand_udp_ep(m_ipv6 ? rand_v6 : rand_v4);
|
|
|
|
dht::node_id target = dht::generate_id_impl(ep.address(), 0) & ~mask;
|
2016-01-03 16:19:35 +01:00
|
|
|
target |= id & mask;
|
2019-07-30 17:27:20 +02:00
|
|
|
dht().m_table.node_seen(target, ep, lt::random(300) + 10);
|
2015-08-08 03:28:51 +02:00
|
|
|
}
|
|
|
|
}
|
2016-01-03 16:19:35 +01:00
|
|
|
/*
|
|
|
|
for (int i = 0; i < 40; ++i)
|
|
|
|
{
|
2016-05-17 15:24:06 +02:00
|
|
|
std::printf("%d ", nodes_per_bucket[i]);
|
2016-01-03 16:19:35 +01:00
|
|
|
}
|
2016-05-17 15:24:06 +02:00
|
|
|
std::printf("\n");
|
2016-01-03 16:19:35 +01:00
|
|
|
*/
|
|
|
|
//#error add invalid IPs as well, to simulate churn
|
2015-08-08 03:28:51 +02:00
|
|
|
}
|
|
|
|
|
2016-01-03 16:19:35 +01:00
|
|
|
void stop()
|
2015-08-08 03:28:51 +02:00
|
|
|
{
|
2016-01-03 16:19:35 +01:00
|
|
|
sock().close();
|
2015-08-08 03:28:51 +02:00
|
|
|
}
|
|
|
|
|
2016-01-03 16:19:35 +01:00
|
|
|
lt::dht::node& dht() { return m_dht; }
|
|
|
|
lt::dht::node const& dht() const { return m_dht; }
|
2015-08-08 03:28:51 +02:00
|
|
|
|
2016-01-03 16:19:35 +01:00
|
|
|
private:
|
|
|
|
asio::io_service m_io_service;
|
2016-09-01 03:42:18 +02:00
|
|
|
std::shared_ptr<dht::dht_storage_interface> m_dht_storage;
|
2017-04-21 06:45:43 +02:00
|
|
|
bool const m_add_dead_nodes;
|
|
|
|
bool const m_ipv6;
|
2016-01-03 16:19:35 +01:00
|
|
|
lt::udp::socket m_socket;
|
|
|
|
lt::udp::socket& sock() { return m_socket; }
|
2017-07-22 04:18:16 +02:00
|
|
|
std::shared_ptr<lt::aux::listen_socket_t> m_ls;
|
2016-01-03 16:19:35 +01:00
|
|
|
lt::dht::node m_dht;
|
|
|
|
lt::udp::endpoint m_ep;
|
2016-01-11 03:24:00 +01:00
|
|
|
char m_buffer[1300];
|
2016-01-03 16:19:35 +01:00
|
|
|
};
|
2015-08-08 03:28:51 +02:00
|
|
|
|
2016-01-10 20:25:44 +01:00
|
|
|
dht_network::dht_network(sim::simulation& sim, int num_nodes, std::uint32_t flags)
|
2016-01-03 16:19:35 +01:00
|
|
|
{
|
|
|
|
m_sett.ignore_dark_internet = false;
|
|
|
|
m_sett.restrict_routing_ips = false;
|
2015-08-08 03:28:51 +02:00
|
|
|
|
2016-01-03 16:19:35 +01:00
|
|
|
// TODO: how can we introduce churn among peers?
|
2015-08-08 03:28:51 +02:00
|
|
|
|
2016-01-03 16:19:35 +01:00
|
|
|
std::vector<std::pair<dht::node_id, lt::udp::endpoint>> all_nodes;
|
|
|
|
all_nodes.reserve(num_nodes);
|
2015-08-08 03:28:51 +02:00
|
|
|
|
2016-01-03 16:19:35 +01:00
|
|
|
for (int i = 0; i < num_nodes; ++i)
|
|
|
|
{
|
|
|
|
// node 0 is the one we log
|
2016-01-10 20:25:44 +01:00
|
|
|
m_nodes.emplace_back(sim, m_sett, m_cnt, i, flags);
|
2016-01-03 16:19:35 +01:00
|
|
|
all_nodes.push_back(m_nodes.back().node_info());
|
|
|
|
}
|
2015-08-08 03:28:51 +02:00
|
|
|
|
2019-07-30 17:27:20 +02:00
|
|
|
for (auto& n : m_nodes) n.bootstrap(all_nodes);
|
2016-01-03 16:19:35 +01:00
|
|
|
}
|
2015-08-08 03:28:51 +02:00
|
|
|
|
2016-07-10 20:27:42 +02:00
|
|
|
dht_network::~dht_network() = default;
|
2016-01-03 16:19:35 +01:00
|
|
|
|
|
|
|
void print_routing_table(std::vector<lt::dht_routing_bucket> const& rt)
|
|
|
|
{
|
|
|
|
int bucket = 0;
|
|
|
|
for (std::vector<lt::dht_routing_bucket>::const_iterator i = rt.begin()
|
|
|
|
, end(rt.end()); i != end; ++i, ++bucket)
|
|
|
|
{
|
|
|
|
char const* progress_bar =
|
|
|
|
"################################"
|
|
|
|
"################################"
|
|
|
|
"################################"
|
|
|
|
"################################";
|
|
|
|
char const* short_progress_bar = "--------";
|
2016-05-17 15:24:06 +02:00
|
|
|
std::printf("%3d [%3d, %d] %s%s\n"
|
2016-01-03 16:19:35 +01:00
|
|
|
, bucket, i->num_nodes, i->num_replacements
|
|
|
|
, progress_bar + (128 - i->num_nodes)
|
2018-04-11 15:19:00 +02:00
|
|
|
, short_progress_bar + (8 - std::min(8, i->num_replacements)));
|
2016-01-03 16:19:35 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
std::vector<lt::udp::endpoint> dht_network::router_nodes() const
|
|
|
|
{
|
|
|
|
int idx = 0;
|
|
|
|
std::vector<lt::udp::endpoint> ret;
|
|
|
|
ret.reserve(8);
|
|
|
|
for (auto const& n : m_nodes)
|
|
|
|
{
|
|
|
|
if (idx >= 8) break;
|
|
|
|
++idx;
|
|
|
|
ret.push_back(n.node_info().second);
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
2015-08-08 03:28:51 +02:00
|
|
|
|
2016-01-03 16:19:35 +01:00
|
|
|
void dht_network::stop()
|
2015-08-08 03:28:51 +02:00
|
|
|
{
|
2016-01-03 16:19:35 +01:00
|
|
|
for (auto& n : m_nodes) n.stop();
|
2015-08-08 03:28:51 +02:00
|
|
|
}
|
|
|
|
|
2016-01-18 06:07:21 +01:00
|
|
|
#endif // TORRENT_DISABLE_DHT
|
2015-08-08 03:28:51 +02:00
|
|
|
|