merge fix from RC_0_16

This commit is contained in:
Arvid Norberg 2013-07-26 16:59:10 +00:00
parent 7cbd54b327
commit 1cb0161f0e
3 changed files with 44 additions and 9 deletions

View File

@ -23,6 +23,7 @@
* fix uTP edge case where udp socket buffer fills up * fix uTP edge case where udp socket buffer fills up
* fix nagle implementation in uTP * fix nagle implementation in uTP
* introduce bandwidth exhaustion attack-mitigation in allowed-fast pieces
* python binding fix issue where torrent_info objects where destructing when their torrents were deleted * python binding fix issue where torrent_info objects where destructing when their torrents were deleted
* added missing field to scrape_failed_alert in python bindings * added missing field to scrape_failed_alert in python bindings
* GCC 4.8 fix * GCC 4.8 fix

View File

@ -915,6 +915,11 @@ namespace libtorrent
// if requested (regardless of choke state) // if requested (regardless of choke state)
std::vector<int> m_accept_fast; std::vector<int> m_accept_fast;
// a sent-piece counter for the allowed fast set
// to avoid exploitation. Each slot is a counter
// for one of the pieces from the allowed-fast set
std::vector<boost::uint16_t> m_accept_fast_piece_cnt;
// the pieces the peer will send us if // the pieces the peer will send us if
// requested (regardless of choke state) // requested (regardless of choke state)
std::vector<int> m_allowed_fast; std::vector<int> m_allowed_fast;
@ -1308,4 +1313,3 @@ namespace libtorrent
} }
#endif // TORRENT_PEER_CONNECTION_HPP_INCLUDED #endif // TORRENT_PEER_CONNECTION_HPP_INCLUDED

View File

@ -392,9 +392,10 @@ namespace libtorrent
} }
else if (m_ses.settings().seed_choking_algorithm == session_settings::anti_leech) else if (m_ses.settings().seed_choking_algorithm == session_settings::anti_leech)
{ {
// the anti-leech seeding algorithm ranks peers based on how many pieces // the anti-leech seeding algorithm is based on the paper "Improving
// they have, prefering to unchoke peers that just started and peers that // BitTorrent: A Simple Approach" from Chow et. al. and ranks peers based
// are close to completing. Like this: // on how many pieces they have, prefering to unchoke peers that just
// started and peers that are close to completing. Like this:
// ^ // ^
// | \ / | // | \ / |
// | \ / | // | \ / |
@ -603,8 +604,13 @@ namespace libtorrent
TORRENT_ASSERT(std::find(m_accept_fast.begin() TORRENT_ASSERT(std::find(m_accept_fast.begin()
, m_accept_fast.end(), i) , m_accept_fast.end(), i)
== m_accept_fast.end()); == m_accept_fast.end());
if (m_accept_fast.empty()) m_accept_fast.reserve(10); if (m_accept_fast.empty())
{
m_accept_fast.reserve(10);
m_accept_fast_piece_cnt.reserve(10);
}
m_accept_fast.push_back(i); m_accept_fast.push_back(i);
m_accept_fast_piece_cnt.push_back(0);
} }
return; return;
} }
@ -639,8 +645,13 @@ namespace libtorrent
peer_log("==> ALLOWED_FAST [ %d ]", piece); peer_log("==> ALLOWED_FAST [ %d ]", piece);
#endif #endif
write_allow_fast(piece); write_allow_fast(piece);
if (m_accept_fast.empty()) m_accept_fast.reserve(10); if (m_accept_fast.empty())
{
m_accept_fast.reserve(10);
m_accept_fast_piece_cnt.reserve(10);
}
m_accept_fast.push_back(piece); m_accept_fast.push_back(piece);
m_accept_fast_piece_cnt.push_back(0);
if (int(m_accept_fast.size()) >= num_allowed_pieces if (int(m_accept_fast.size()) >= num_allowed_pieces
|| int(m_accept_fast.size()) == num_pieces) return; || int(m_accept_fast.size()) == num_pieces) return;
} }
@ -1981,6 +1992,11 @@ namespace libtorrent
return; return;
} }
int fast_idx = -1;
std::vector<int>::iterator fast_iter = std::find(m_accept_fast.begin()
, m_accept_fast.end(), r.piece);
if (fast_iter != m_accept_fast.end()) fast_idx = fast_iter - m_accept_fast.begin();
// make sure this request // make sure this request
// is legal and that the peer // is legal and that the peer
// is not choked // is not choked
@ -1996,8 +2012,18 @@ namespace libtorrent
{ {
// if we have choked the client // if we have choked the client
// ignore the request // ignore the request
if (m_choked && std::find(m_accept_fast.begin(), m_accept_fast.end() const int blocks_per_piece = static_cast<int>(
, r.piece) == m_accept_fast.end()) (t->torrent_file().piece_length() + t->block_size() - 1) / t->block_size());
// disconnect peers that downloads more than foo times an allowed
// fast piece
if (m_choked && fast_idx != -1 && m_accept_fast_piece_cnt[fast_idx] >= 3 * blocks_per_piece)
{
disconnect(errors::too_many_requests_when_choked);
return;
}
if (m_choked && fast_idx == -1)
{ {
#if defined TORRENT_VERBOSE_LOGGING || defined TORRENT_ERROR_LOGGING #if defined TORRENT_VERBOSE_LOGGING || defined TORRENT_ERROR_LOGGING
peer_log("*** REJECTING REQUEST [ peer choked and piece not in allowed fast set ]"); peer_log("*** REJECTING REQUEST [ peer choked and piece not in allowed fast set ]");
@ -2021,6 +2047,10 @@ namespace libtorrent
} }
else else
{ {
// increase the allowed fast set counter
if (fast_idx != -1)
++m_accept_fast_piece_cnt[fast_idx];
m_requests.push_back(r); m_requests.push_back(r);
#ifdef TORRENT_REQUEST_LOGGING #ifdef TORRENT_REQUEST_LOGGING
if (m_ses.m_request_log) if (m_ses.m_request_log)
@ -5650,6 +5680,7 @@ namespace libtorrent
{ {
TORRENT_ASSERT(m_in_use == 1337); TORRENT_ASSERT(m_in_use == 1337);
TORRENT_ASSERT(m_queued_time_critical <= int(m_request_queue.size())); TORRENT_ASSERT(m_queued_time_critical <= int(m_request_queue.size()));
TORRENT_ASSERT(m_accept_fast.size() == m_accept_fast_piece_cnt.size());
TORRENT_ASSERT(bool(m_disk_recv_buffer) == (m_disk_recv_buffer_size > 0)); TORRENT_ASSERT(bool(m_disk_recv_buffer) == (m_disk_recv_buffer_size > 0));
@ -5972,4 +6003,3 @@ namespace libtorrent
} }
} }