Merge pull request #526 from arvidn/rechecking-outstanding-1.1
always keep at least 2 async hash jobs outstanding
This commit is contained in:
commit
3fa3004b85
|
@ -573,7 +573,7 @@ namespace libtorrent
|
|||
|
||||
int piece_size = pe->storage->files()->piece_size(pe->piece);
|
||||
TORRENT_PIECE_ASSERT(piece_size > 0, pe);
|
||||
|
||||
|
||||
int iov_len = 0;
|
||||
// the blocks we're flushing
|
||||
int num_flushing = 0;
|
||||
|
@ -2839,17 +2839,17 @@ namespace libtorrent
|
|||
if (no_pieces == false)
|
||||
{
|
||||
int block_size = m_disk_cache.block_size();
|
||||
|
||||
|
||||
if (storage)
|
||||
{
|
||||
ret->pieces.reserve(storage->num_pieces());
|
||||
|
||||
|
||||
for (boost::unordered_set<cached_piece_entry*>::iterator i
|
||||
= storage->cached_pieces().begin(), end(storage->cached_pieces().end());
|
||||
i != end; ++i)
|
||||
{
|
||||
TORRENT_ASSERT((*i)->storage.get() == storage);
|
||||
|
||||
|
||||
if ((*i)->cache_state == cached_piece_entry::read_lru2_ghost
|
||||
|| (*i)->cache_state == cached_piece_entry::read_lru1_ghost)
|
||||
continue;
|
||||
|
@ -2860,10 +2860,10 @@ namespace libtorrent
|
|||
else
|
||||
{
|
||||
ret->pieces.reserve(m_disk_cache.num_pieces());
|
||||
|
||||
|
||||
std::pair<block_cache::iterator, block_cache::iterator> range
|
||||
= m_disk_cache.all_pieces();
|
||||
|
||||
|
||||
for (block_cache::iterator i = range.first; i != range.second; ++i)
|
||||
{
|
||||
if (i->cache_state == cached_piece_entry::read_lru2_ghost
|
||||
|
|
|
@ -2682,7 +2682,10 @@ namespace libtorrent
|
|||
|
||||
int num_outstanding = settings().get_int(settings_pack::checking_mem_usage) * block_size()
|
||||
/ m_torrent_file->piece_length();
|
||||
if (num_outstanding <= 0) num_outstanding = 1;
|
||||
// if we only keep a single read operation in-flight at a time, we suffer
|
||||
// significant performance degradation. Always keep at least two jobs
|
||||
// outstanding
|
||||
if (num_outstanding < 2) num_outstanding = 2;
|
||||
|
||||
// we might already have some outstanding jobs, if we were paused and
|
||||
// resumed quickly, before the outstanding jobs completed
|
||||
|
|
Loading…
Reference in New Issue