merged time critical pieces improvement from RC_0_16
This commit is contained in:
parent
1cb0161f0e
commit
2599acf451
|
@ -23,6 +23,7 @@
|
|||
* fix uTP edge case where udp socket buffer fills up
|
||||
* fix nagle implementation in uTP
|
||||
|
||||
* improve time-critical pieces feature (streaming)
|
||||
* introduce bandwidth exhaustion attack-mitigation in allowed-fast pieces
|
||||
* python binding fix issue where torrent_info objects where destructing when their torrents were deleted
|
||||
* added missing field to scrape_failed_alert in python bindings
|
||||
|
|
|
@ -2854,6 +2854,10 @@ namespace libtorrent
|
|||
boost::shared_ptr<torrent> t = m_torrent.lock();
|
||||
TORRENT_ASSERT(t);
|
||||
if (t->upload_mode()) return false;
|
||||
|
||||
// ignore snubbed peers, since they're not likely to return pieces in a timely
|
||||
// manner anyway
|
||||
if (m_snubbed) return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
|
@ -8102,6 +8102,11 @@ namespace libtorrent
|
|||
, boost::bind(&peer_connection::download_queue_time, _1, 16*1024)
|
||||
< boost::bind(&peer_connection::download_queue_time, _2, 16*1024));
|
||||
|
||||
// remove the bottom 10% of peers from the candidate set
|
||||
int new_size = (peers.size() * 9 + 9) / 10;
|
||||
TORRENT_ASSERT(new_size <= peers.size());
|
||||
peers.resize(new_size);
|
||||
|
||||
std::set<peer_connection*> peers_with_requests;
|
||||
|
||||
std::vector<piece_block> interesting_blocks;
|
||||
|
@ -8125,7 +8130,7 @@ namespace libtorrent
|
|||
{
|
||||
if (peers.empty()) break;
|
||||
|
||||
// the +1000 is to compensate for the fact that we only call this functions
|
||||
// the +1000 is to compensate for the fact that we only call this function
|
||||
// once per second, so if we need to request it 500 ms from now, we should request
|
||||
// it right away
|
||||
if (i != m_time_critical_pieces.begin() && i->deadline > now
|
||||
|
@ -8140,6 +8145,8 @@ namespace libtorrent
|
|||
piece_picker::downloading_piece pi;
|
||||
m_picker->piece_info(i->piece, pi);
|
||||
|
||||
bool timed_out = false;
|
||||
|
||||
int free_to_request = m_picker->blocks_in_piece(i->piece) - pi.finished - pi.writing - pi.requested;
|
||||
if (free_to_request == 0)
|
||||
{
|
||||
|
@ -8152,8 +8159,13 @@ namespace libtorrent
|
|||
// we're just waiting for it to flush them to disk.
|
||||
// if last_requested is recent enough, we should give it some
|
||||
// more time
|
||||
break;
|
||||
// skip to the next piece
|
||||
continue;
|
||||
}
|
||||
|
||||
// it's been too long since we requested the last block from this piece. Allow re-requesting
|
||||
// blocks from this piece
|
||||
timed_out = true;
|
||||
}
|
||||
|
||||
// loop until every block has been requested from this piece (i->piece)
|
||||
|
@ -8181,6 +8193,14 @@ namespace libtorrent
|
|||
std::vector<pending_block> const& dq = c.download_queue();
|
||||
|
||||
bool added_request = false;
|
||||
bool busy_blocks = false;
|
||||
|
||||
if (timed_out && interesting_blocks.empty())
|
||||
{
|
||||
// if the piece has timed out, allow requesting back-up blocks
|
||||
interesting_blocks.swap(backup1.empty() ? backup2 : backup1);
|
||||
busy_blocks = true;
|
||||
}
|
||||
|
||||
if (!interesting_blocks.empty())
|
||||
{
|
||||
|
@ -8193,6 +8213,7 @@ namespace libtorrent
|
|||
// simply disregard this peer from this piece, since this peer
|
||||
// is likely to be causing the stall. We should request it
|
||||
// from the next peer in the list
|
||||
// the peer will be put back in the set for the next piece
|
||||
ignore_peers.push_back(*p);
|
||||
peers.erase(p);
|
||||
continue;
|
||||
|
@ -8208,7 +8229,8 @@ namespace libtorrent
|
|||
}
|
||||
else
|
||||
{
|
||||
if (!c.add_request(interesting_blocks.front(), peer_connection::req_time_critical))
|
||||
if (!c.add_request(interesting_blocks.front(), peer_connection::req_time_critical
|
||||
| (busy_blocks ? peer_connection::req_busy : 0)))
|
||||
{
|
||||
peers.erase(p);
|
||||
continue;
|
||||
|
|
Loading…
Reference in New Issue