split buckets when exceeding the next bucket's size, to make sure we split before risking discarding nodes because the next bucket is smaller

This commit is contained in:
arvidn 2016-01-01 22:51:01 -05:00
parent c6b75fa79e
commit 07ddb010c5
2 changed files with 26 additions and 11 deletions

View File

@ -634,7 +634,11 @@ routing_table::add_node_status_t routing_table::add_node_impl(node_entry e)
bucket_t& b = i->live_nodes;
bucket_t& rb = i->replacements;
int const bucket_index = std::distance(m_buckets.begin(), i);
// compare against the max size of the next bucket. Otherwise we may wait too
// long to split, and lose nodes (in the case where lower-numbered buckets
// are larger)
int const bucket_size_limit = bucket_limit(bucket_index);
int const next_bucket_size_limit = bucket_limit(bucket_index + 1);
bucket_t::iterator j;
@ -714,8 +718,20 @@ routing_table::add_node_status_t routing_table::add_node_impl(node_entry e)
}
}
// can we split the bucket?
// only nodes that haven't failed can split the bucket, and we can only
// split the last bucket
bool const can_split = (boost::next(i) == m_buckets.end()
&& m_buckets.size() < 159)
&& e.fail_count() == 0
&& (i == m_buckets.begin() || boost::prior(i)->live_nodes.size() > 1);
// if there's room in the main bucket, just insert it
if (int(b.size()) < bucket_size_limit)
// if we can split the bucket (i.e. it's the last bucket) use the next
// bucket's size limit. This makes use split the low-numbered buckets split
// earlier when we have larger low buckets, to make it less likely that we
// lose nodes
if (int(b.size()) < (can_split ? next_bucket_size_limit : bucket_size_limit))
{
if (b.empty()) b.reserve(bucket_size_limit);
b.push_back(e);
@ -733,14 +749,6 @@ routing_table::add_node_status_t routing_table::add_node_impl(node_entry e)
// as the last replacement strategy, if the node we found matching our
// bit prefix has higher RTT than the new node, replace it.
// can we split the bucket?
// only nodes that haven't failed can split the bucket, and we can only
// split the last bucket
bool const can_split = (boost::next(i) == m_buckets.end()
&& m_buckets.size() < 159)
&& e.fail_count() == 0
&& (i == m_buckets.begin() || boost::prior(i)->live_nodes.size() > 1);
if (e.pinged() && e.fail_count() == 0)
{
// if the node we're trying to insert is considered pinged,
@ -949,7 +957,7 @@ void routing_table::split_bucket()
int const bucket_index = m_buckets.size()-1;
int const bucket_size_limit = bucket_limit(bucket_index);
TORRENT_ASSERT(int(m_buckets.back().live_nodes.size()) >= bucket_size_limit);
TORRENT_ASSERT(int(m_buckets.back().live_nodes.size()) >= bucket_limit(bucket_index + 1));
// this is the last bucket, and it's full already. Split
// it by adding another bucket

View File

@ -2325,11 +2325,18 @@ TORRENT_TEST(routing_table_extended)
node_id id = to_hash("1234876923549721020394873245098347598635");
node_id diff = to_hash("15764f7459456a9453f8719b09547c11d5f34061");
// we can't add the nodes in straight 0,1,2,3 order. That way the routing
// table would get unbalanced and intermediate nodes would be dropped
std::vector<boost::uint8_t> node_id_prefix;
node_id_prefix.reserve(256);
for (int i = 0; i < 256; ++i) node_id_prefix.push_back(i);
std::random_shuffle(node_id_prefix.begin(), node_id_prefix.end());
routing_table tbl(id, 8, sett, &observer);
for (int i = 0; i < 256; ++i)
{
add_and_replace(id, diff);
id[0] = i;
id[0] = node_id_prefix[i];
tbl.node_seen(id, rand_udp_ep(), 20 + (id[19] & 0xff));
}
TEST_EQUAL(tbl.num_active_buckets(), 6);