diff --git a/include/libtorrent/disk_io_thread.hpp b/include/libtorrent/disk_io_thread.hpp index 37da2d310..d0c1d027e 100644 --- a/include/libtorrent/disk_io_thread.hpp +++ b/include/libtorrent/disk_io_thread.hpp @@ -388,6 +388,12 @@ namespace libtorrent hasher_thread }; + enum + { + hasher_thread_mask = 3, + hasher_thread_divisor + }; + void thread_fun(int thread_id, thread_type_t type , boost::shared_ptr w); diff --git a/src/disk_io_thread.cpp b/src/disk_io_thread.cpp index 79cf1c169..373d8e2dc 100644 --- a/src/disk_io_thread.cpp +++ b/src/disk_io_thread.cpp @@ -220,9 +220,8 @@ namespace libtorrent boost::shared_ptr work = boost::make_shared(boost::ref(m_ios)); - // the magic number 3 is also used in add_job() // every 4:th thread is a hasher thread - if ((thread_id & 0x3) == 3) type = hasher_thread; + if ((thread_id & hasher_thread_mask) == hasher_thread_mask) type = hasher_thread; m_threads.push_back(boost::shared_ptr( new thread(boost::bind(&disk_io_thread::thread_fun, this , thread_id, type, work)))); @@ -3245,7 +3244,7 @@ namespace libtorrent // if there are at least 3 threads, there's a hasher thread // and the hash jobs go into a separate queue // see set_num_threads() - if (m_num_threads > 3 && j->action == disk_io_job::hash) + if (m_num_threads > hasher_thread_mask && j->action == disk_io_job::hash) { m_queued_hash_jobs.push_back(j); } diff --git a/src/torrent.cpp b/src/torrent.cpp index 2f7e02811..d7ff04bac 100644 --- a/src/torrent.cpp +++ b/src/torrent.cpp @@ -2770,8 +2770,11 @@ namespace { / m_torrent_file->piece_length(); // if we only keep a single read operation in-flight at a time, we suffer // significant performance degradation. Always keep at least two jobs - // outstanding - if (num_outstanding < 2) num_outstanding = 2; + // outstanding per hasher thread + int const min_outstanding = 2 + * std::max(1, settings().get_int(settings_pack::aio_threads) + / disk_io_thread::hasher_thread_divisor); + if (num_outstanding < min_outstanding) num_outstanding = min_outstanding; // we might already have some outstanding jobs, if we were paused and // resumed quickly, before the outstanding jobs completed