fix test_dht to take into account the slightly more relaxed semantics of routing_table::find_node

This commit is contained in:
Arvid Norberg 2013-10-22 05:20:00 +00:00
parent a627a4e156
commit 1a2226b8f0
4 changed files with 68 additions and 49 deletions

View File

@ -67,6 +67,7 @@ node_id TORRENT_EXTRA_EXPORT generate_id_impl(address const& ip_, boost::uint32_
bool TORRENT_EXTRA_EXPORT verify_id(node_id const& nid, address const& source_ip); bool TORRENT_EXTRA_EXPORT verify_id(node_id const& nid, address const& source_ip);
bool TORRENT_EXTRA_EXPORT matching_prefix(node_entry const& n, int mask, int prefix, int bucket_index); bool TORRENT_EXTRA_EXPORT matching_prefix(node_entry const& n, int mask, int prefix, int bucket_index);
node_id TORRENT_EXTRA_EXPORT generate_prefix_mask(int bits);
} } // namespace libtorrent::dht } } // namespace libtorrent::dht

View File

@ -181,5 +181,14 @@ bool matching_prefix(node_entry const& n, int mask, int prefix, int bucket_index
return (id[0] & mask) == prefix; return (id[0] & mask) == prefix;
} }
node_id generate_prefix_mask(int bits)
{
node_id mask(0);
int b = 0;
for (; b < bits - 7; b += 8) mask[b/8] |= 0xff;
mask[b/8] |= 0xff << (8 - (bits&7));
return mask;
}
} } // namespace libtorrent::dht } } // namespace libtorrent::dht

View File

@ -290,8 +290,7 @@ bool routing_table::need_refresh(node_id& target) const
// generate a random node_id within the given bucket // generate a random node_id within the given bucket
target = generate_random_id(); target = generate_random_id();
int num_bits = std::distance(m_buckets.begin(), i) + 1; int num_bits = std::distance(m_buckets.begin(), i) + 1;
node_id mask(0); node_id mask = generate_prefix_mask(num_bits);
for (int i = 0; i < num_bits; ++i) mask[i/8] |= 0x80 >> (i&7);
// target = (target & ~mask) | (root & mask) // target = (target & ~mask) | (root & mask)
node_id root = m_id; node_id root = m_id;

View File

@ -41,7 +41,7 @@ POSSIBILITY OF SUCH DAMAGE.
#include "libtorrent/kademlia/node_id.hpp" #include "libtorrent/kademlia/node_id.hpp"
#include "libtorrent/kademlia/routing_table.hpp" #include "libtorrent/kademlia/routing_table.hpp"
#include <iostream> #include <numeric>
#include "test.hpp" #include "test.hpp"
#include "setup_transfer.hpp" #include "setup_transfer.hpp"
@ -50,6 +50,10 @@ POSSIBILITY OF SUCH DAMAGE.
#include <valgrind/memcheck.h> #include <valgrind/memcheck.h>
#endif #endif
#if TORRENT_USE_IOSTREAM
#include <iostream>
#endif
using namespace libtorrent; using namespace libtorrent;
using namespace libtorrent::dht; using namespace libtorrent::dht;
@ -323,6 +327,11 @@ struct print_alert : alert_dispatcher
} }
}; };
int sum_distance_exp(int s, node_entry const& e, node_id const& ref)
{
return s + distance_exp(e.id, ref);
}
// TODO: 3 test find_data, obfuscated_get_peers and bootstrap // TODO: 3 test find_data, obfuscated_get_peers and bootstrap
int test_main() int test_main()
{ {
@ -988,10 +997,16 @@ int test_main()
} }
} }
// test kademlia functions // test node-id functions
using namespace libtorrent::dht; using namespace libtorrent::dht;
TEST_EQUAL(generate_prefix_mask(1), to_hash("8000000000000000000000000000000000000000"));
TEST_EQUAL(generate_prefix_mask(2), to_hash("c000000000000000000000000000000000000000"));
TEST_EQUAL(generate_prefix_mask(11), to_hash("ffe0000000000000000000000000000000000000"));
TEST_EQUAL(generate_prefix_mask(17), to_hash("ffff800000000000000000000000000000000000"));
// test kademlia functions
// this is a bit too expensive to do under valgrind // this is a bit too expensive to do under valgrind
#ifndef TORRENT_USE_VALGRIND #ifndef TORRENT_USE_VALGRIND
for (int i = 0; i < 160; i += 8) for (int i = 0; i < 160; i += 8)
@ -1022,6 +1037,7 @@ int test_main()
{ {
// test kademlia routing table // test kademlia routing table
dht_settings s; dht_settings s;
s.extended_routing_table = false;
// s.restrict_routing_ips = false; // s.restrict_routing_ips = false;
node_id id = to_hash("3123456789abcdef01232456789abcdef0123456"); node_id id = to_hash("3123456789abcdef01232456789abcdef0123456");
const int bucket_size = 10; const int bucket_size = 10;
@ -1124,7 +1140,7 @@ int test_main()
} }
printf("active buckets: %d\n", table.num_active_buckets()); printf("active buckets: %d\n", table.num_active_buckets());
TEST_EQUAL(table.num_active_buckets(), 10); TEST_EQUAL(table.num_active_buckets(), 10);
TEST_CHECK(table.size().get<0>() > 10 * 10); TEST_CHECK(table.size().get<0>() >= 10 * 10);
//#error test num_global_nodes //#error test num_global_nodes
//#error test need_refresh //#error test need_refresh
@ -1134,60 +1150,54 @@ int test_main()
table.for_each_node(node_push_back, nop, &nodes); table.for_each_node(node_push_back, nop, &nodes);
std::cout << "nodes: " << nodes.size() << std::endl; printf("nodes: %d\n", int(nodes.size()));
std::vector<node_entry> temp; std::vector<node_entry> temp;
std::generate(tmp.begin(), tmp.end(), &std::rand); std::generate(tmp.begin(), tmp.end(), &std::rand);
table.find_node(tmp, temp, 0, nodes.size() * 2); table.find_node(tmp, temp, 0, nodes.size() * 2);
std::cout << "returned: " << temp.size() << std::endl; printf("returned-all: %d\n", int(temp.size()));
TEST_EQUAL(temp.size(), nodes.size()); TEST_EQUAL(temp.size(), nodes.size());
std::generate(tmp.begin(), tmp.end(), &std::rand);
table.find_node(tmp, temp, 0, bucket_size);
std::cout << "returned: " << temp.size() << std::endl;
TEST_EQUAL(temp.size(), bucket_size);
std::sort(nodes.begin(), nodes.end(), boost::bind(&compare_ref
, boost::bind(&node_entry::id, _1)
, boost::bind(&node_entry::id, _2), tmp));
int hits = 0;
// This makes sure enough of the nodes returned are actually // This makes sure enough of the nodes returned are actually
// part of the closest nodes // part of the closest nodes
for (std::vector<node_entry>::iterator i = temp.begin() std::set<node_id> duplicates;
, end(temp.end()); i != end; ++i)
#ifdef TORRENT_USE_VALGRIND
const int reps = 3;
#else
const int reps = 50;
#endif
for (int r = 0; r < reps; ++r)
{ {
int hit = std::find_if(nodes.begin(), nodes.end() std::generate(tmp.begin(), tmp.end(), &std::rand);
, boost::bind(&node_entry::id, _1) == i->id) - nodes.begin(); table.find_node(tmp, temp, 0, bucket_size * 2);
std::cerr << hit << std::endl; printf("returned: %d\n", int(temp.size()));
if (hit < int(temp.size())) ++hits; TEST_EQUAL(int(temp.size()), (std::min)(bucket_size * 2, int(nodes.size())));
std::sort(nodes.begin(), nodes.end(), boost::bind(&compare_ref
, boost::bind(&node_entry::id, _1)
, boost::bind(&node_entry::id, _2), tmp));
int expected = std::accumulate(nodes.begin(), nodes.begin() + (bucket_size * 2)
, 0, boost::bind(&sum_distance_exp, _1, _2, tmp));
int sum_hits = std::accumulate(temp.begin(), temp.end()
, 0, boost::bind(&sum_distance_exp, _1, _2, tmp));
TEST_EQUAL(bucket_size * 2, int(temp.size()));
printf("expected: %d actual: %d\n", expected, sum_hits);
TEST_EQUAL(expected, sum_hits);
duplicates.clear();
// This makes sure enough of the nodes returned are actually
// part of the closest nodes
for (std::vector<node_entry>::iterator i = temp.begin()
, end(temp.end()); i != end; ++i)
{
TEST_CHECK(duplicates.count(i->id) == 0);
duplicates.insert(i->id);
}
} }
std::cout << "hits: " << hits << std::endl;
TEST_EQUAL(hits, int(temp.size()));
std::generate(tmp.begin(), tmp.end(), &std::rand);
table.find_node(tmp, temp, 0, bucket_size * 2);
std::cout << "returned: " << temp.size() << std::endl;
TEST_EQUAL(int(temp.size()), (std::min)(bucket_size * 2, int(nodes.size())));
std::sort(nodes.begin(), nodes.end(), boost::bind(&compare_ref
, boost::bind(&node_entry::id, _1)
, boost::bind(&node_entry::id, _2), tmp));
hits = 0;
// This makes sure enough of the nodes returned are actually
// part of the closest nodes
for (std::vector<node_entry>::iterator i = temp.begin()
, end(temp.end()); i != end; ++i)
{
int hit = std::find_if(nodes.begin(), nodes.end()
, boost::bind(&node_entry::id, _1) == i->id) - nodes.begin();
std::cerr << hit << std::endl;
if (hit < int(temp.size())) ++hits;
}
std::cout << "hits: " << hits << std::endl;
TEST_EQUAL(hits, int(temp.size()));
using namespace libtorrent::dht; using namespace libtorrent::dht;