don't save excessive number of peers in the resume data

This commit is contained in:
Arvid Norberg 2012-07-04 21:33:04 +00:00
parent 7c7b927e07
commit a348eae42d
1 changed files with 15 additions and 0 deletions

View File

@ -5346,6 +5346,8 @@ namespace libtorrent
// failcount is a 5 bit value
int max_failcount = (std::min)(settings().max_failcount, 31);
int num_saved_peers = 0;
for (policy::const_iterator i = m_policy.begin_peer()
, end(m_policy.end_peer()); i != end; ++i)
{
@ -5381,6 +5383,18 @@ namespace libtorrent
// don't save peers that don't work
if (int(p->failcount) >= max_failcount) continue;
// the more peers we've saved, the more picky we get
// about which ones are worth saving
if (num_saved_peers > 10
&& int (p->failcount) > 0
&& int(p->failcount) > (40 - (num_saved_peers - 10)) * max_failcount / 40)
continue;
// if we have 40 peers, don't save any peers whom
// we've only heard from through the resume data
if (num_saved_peers > 40 && p->source == peer_info::resume_data)
continue;
#if TORRENT_USE_IPV6
if (addr.is_v6())
{
@ -5393,6 +5407,7 @@ namespace libtorrent
write_address(addr, peers);
write_uint16(p->port, peers);
}
++num_saved_peers;
}
ret["upload_rate_limit"] = upload_limit();