DHT optimizations and fixes. Fixed routing table bucket splitting

This commit is contained in:
Arvid Norberg 2010-12-12 20:36:42 +00:00
parent f64679c297
commit 4fcfceb087
5 changed files with 44 additions and 23 deletions

View File

@ -78,21 +78,7 @@ struct traversal_algorithm : boost::noncopyable
void add_entry(node_id const& id, udp::endpoint addr, unsigned char flags);
traversal_algorithm(
node_impl& node
, node_id target)
: m_ref_count(0)
, m_node(node)
, m_target(target)
, m_invoke_count(0)
, m_branch_factor(3)
, m_responses(0)
, m_timeouts(0)
{
#ifdef TORRENT_DHT_VERBOSE_LOGGING
TORRENT_LOG(traversal) << " [" << this << "] new traversal process. Target: " << target;
#endif
}
traversal_algorithm(node_impl& node, node_id target);
protected:
@ -128,6 +114,7 @@ protected:
int m_branch_factor;
int m_responses;
int m_timeouts;
int m_num_target_nodes;
};
} } // namespace libtorrent::dht

View File

@ -433,7 +433,7 @@ time_duration node_impl::connection_timeout()
{
time_duration d = m_rpc.tick();
ptime now(time_now());
if (now - m_last_tracker_tick < minutes(10)) return d;
if (now - m_last_tracker_tick < minutes(2)) return d;
m_last_tracker_tick = now;
// look through all peers and see if any have timed out

View File

@ -85,6 +85,9 @@ bootstrap::bootstrap(
, done_callback const& callback)
: refresh(node, target, callback)
{
// make it more resilient to nodes not responding.
// we don't want to terminate early when we're bootstrapping
m_num_target_nodes *= 2;
}
char const* bootstrap::name() const { return "bootstrap"; }

View File

@ -183,8 +183,8 @@ void routing_table::touch_bucket(node_id const& target)
bool compare_bucket_refresh(routing_table_node const& lhs, routing_table_node const& rhs)
{
// add the number of nodes to prioritize buckets with few nodes in them
return lhs.last_active + seconds(lhs.live_nodes.size())
< rhs.last_active + seconds(rhs.live_nodes.size());
return lhs.last_active + seconds(lhs.live_nodes.size() * 5)
< rhs.last_active + seconds(rhs.live_nodes.size() * 5);
}
bool routing_table::need_refresh(node_id& target) const
@ -425,15 +425,29 @@ bool routing_table::add_node(node_entry const& e)
new_bucket.push_back(*j);
j = b.erase(j);
}
// split the replacement bucket as well. If the live bucket
// is not full anymore, also move the replacement entries
// into the main bucket
for (bucket_t::iterator j = rb.begin(); j != rb.end();)
{
if (distance_exp(m_id, j->id) >= 159 - bucket_index)
{
++j;
continue;
if (b.size() >= m_bucket_size)
{
++j;
continue;
}
b.push_back(*j);
}
else
{
// this entry belongs in the new bucket
if (new_bucket.size() < m_bucket_size)
new_bucket.push_back(*j);
else
new_replacement_bucket.push_back(*j);
}
// this entry belongs in the new bucket
new_replacement_bucket.push_back(*j);
j = rb.erase(j);
}

View File

@ -58,6 +58,23 @@ observer_ptr traversal_algorithm::new_observer(void* ptr
return o;
}
traversal_algorithm::traversal_algorithm(
node_impl& node
, node_id target)
: m_ref_count(0)
, m_node(node)
, m_target(target)
, m_invoke_count(0)
, m_branch_factor(3)
, m_responses(0)
, m_timeouts(0)
, m_num_target_nodes(m_node.m_table.bucket_size() * 2)
{
#ifdef TORRENT_DHT_VERBOSE_LOGGING
TORRENT_LOG(traversal) << " [" << this << "] new traversal process. Target: " << target;
#endif
}
void traversal_algorithm::add_entry(node_id const& id, udp::endpoint addr, unsigned char flags)
{
TORRENT_ASSERT(m_node.m_rpc.allocation_size() >= sizeof(find_data_observer));
@ -237,7 +254,7 @@ namespace
void traversal_algorithm::add_requests()
{
int results_target = m_node.m_table.bucket_size();
int results_target = m_num_target_nodes;
// Find the first node that hasn't already been queried.
for (std::vector<observer_ptr>::iterator i = m_results.begin()