set the minimum number of checking jobs based on the number of hasher threads

To effectively mask the latency of generating more hash jobs there need to be
at least two jobs in-flight for each hasher thread.
This commit is contained in:
Steven Siloti 2018-06-20 20:37:49 -07:00 committed by Arvid Norberg
parent 1e3a7cf0f4
commit b7f230316c
3 changed files with 13 additions and 5 deletions

View File

@ -388,6 +388,12 @@ namespace libtorrent
hasher_thread
};
enum
{
hasher_thread_mask = 3,
hasher_thread_divisor
};
void thread_fun(int thread_id, thread_type_t type
, boost::shared_ptr<io_service::work> w);

View File

@ -220,9 +220,8 @@ namespace libtorrent
boost::shared_ptr<io_service::work> work =
boost::make_shared<io_service::work>(boost::ref(m_ios));
// the magic number 3 is also used in add_job()
// every 4:th thread is a hasher thread
if ((thread_id & 0x3) == 3) type = hasher_thread;
if ((thread_id & hasher_thread_mask) == hasher_thread_mask) type = hasher_thread;
m_threads.push_back(boost::shared_ptr<thread>(
new thread(boost::bind(&disk_io_thread::thread_fun, this
, thread_id, type, work))));
@ -3245,7 +3244,7 @@ namespace libtorrent
// if there are at least 3 threads, there's a hasher thread
// and the hash jobs go into a separate queue
// see set_num_threads()
if (m_num_threads > 3 && j->action == disk_io_job::hash)
if (m_num_threads > hasher_thread_mask && j->action == disk_io_job::hash)
{
m_queued_hash_jobs.push_back(j);
}

View File

@ -2770,8 +2770,11 @@ namespace {
/ m_torrent_file->piece_length();
// if we only keep a single read operation in-flight at a time, we suffer
// significant performance degradation. Always keep at least two jobs
// outstanding
if (num_outstanding < 2) num_outstanding = 2;
// outstanding per hasher thread
int const min_outstanding = 2
* std::max(1, settings().get_int(settings_pack::aio_threads)
/ disk_io_thread::hasher_thread_divisor);
if (num_outstanding < min_outstanding) num_outstanding = min_outstanding;
// we might already have some outstanding jobs, if we were paused and
// resumed quickly, before the outstanding jobs completed