From 0bcbe433ed7ae21f913f3c7009b03adb00b68a43 Mon Sep 17 00:00:00 2001 From: Arvid Norberg Date: Sat, 25 Oct 2014 20:07:50 +0000 Subject: [PATCH] deprecate most of cache_status type (use session stats instead) --- include/libtorrent/block_cache.hpp | 2 + include/libtorrent/disk_io_thread.hpp | 25 ++-- include/libtorrent/performance_counters.hpp | 1 + include/libtorrent/storage.hpp | 3 +- src/block_cache.cpp | 4 +- src/disk_io_thread.cpp | 131 ++++++++++---------- src/session_stats.cpp | 1 + src/storage.cpp | 6 +- test/test_fence.cpp | 14 +-- test/web_seed_suite.cpp | 22 ++-- 10 files changed, 109 insertions(+), 100 deletions(-) diff --git a/include/libtorrent/block_cache.hpp b/include/libtorrent/block_cache.hpp index ea3f6c0eb..d772d348e 100644 --- a/include/libtorrent/block_cache.hpp +++ b/include/libtorrent/block_cache.hpp @@ -451,7 +451,9 @@ namespace libtorrent void clear(tailqueue& jobs); void update_stats_counters(counters& c) const; +#ifndef TORRENT_NO_DEPRECATE void get_stats(cache_status* ret) const; +#endif void set_settings(aux::session_settings const& sett); enum reason_t { ref_hashing = 0, ref_reading = 1, ref_flushing = 2 }; diff --git a/include/libtorrent/disk_io_thread.hpp b/include/libtorrent/disk_io_thread.hpp index ca380cb91..ada9f4457 100644 --- a/include/libtorrent/disk_io_thread.hpp +++ b/include/libtorrent/disk_io_thread.hpp @@ -109,10 +109,10 @@ namespace libtorrent , reads(0) , queued_bytes(0) , cache_size(0) -#endif , write_cache_size(0) , read_cache_size(0) , pinned_blocks(0) +#endif , total_used_buffers(0) #ifndef TORRENT_NO_DEPRECATE , average_read_time(0) @@ -126,6 +126,7 @@ namespace libtorrent , total_read_back(0) #endif , read_queue_size(0) +#ifndef TORRENT_NO_DEPRECATE , blocked_jobs(0) , queued_jobs(0) , peak_queued(0) @@ -140,6 +141,7 @@ namespace libtorrent , arc_write_size(0) , arc_volatile_size(0) , num_writing_threads(0) +#endif { memset(num_fence_jobs, 0, sizeof(num_fence_jobs)); } @@ -179,7 +181,7 @@ namespace libtorrent // the number of 16 KiB blocks currently in the disk cache (both read and write). // This includes both read and write cache. int cache_size; -#endif + // the number of blocks in the cache used for write cache int write_cache_size; @@ -189,6 +191,7 @@ namespace libtorrent // the number of blocks with a refcount > 0, i.e. // they may not be evicted int pinned_blocks; +#endif // the total number of buffers currently in use. // This includes the read/write disk cache as well as send and receive buffers @@ -198,6 +201,7 @@ namespace libtorrent // the number of microseconds an average disk I/O job // has to wait in the job queue before it get processed. +#ifndef TORRENT_NO_DEPRECATE // the time read jobs takes on average to complete // (not including the time in the queue), in microseconds. This only measures // read cache misses. @@ -226,18 +230,22 @@ namespace libtorrent // they were flushed before the SHA-1 hash got to hash them. If this // is large, a larger cache could significantly improve performance int total_read_back; +#endif // number of read jobs in the disk job queue int read_queue_size; +#ifndef TORRENT_NO_DEPRECATE // number of jobs blocked because of a fence int blocked_jobs; // number of jobs waiting to be issued (m_to_issue) // average over 30 seconds int queued_jobs; + // largest ever seen number of queued jobs int peak_queued; + // number of jobs waiting to complete (m_pending) // average over 30 seconds int pending_jobs; @@ -264,6 +272,7 @@ namespace libtorrent // the number of threads currently writing to disk int num_writing_threads; +#endif // counts only fence jobs that are currently blocking jobs // not fences that are themself blocked @@ -550,23 +559,11 @@ namespace libtorrent // average time to serve a job (any job) in microseconds average_accumulator m_job_time; - // the total number of outstanding jobs. This is used to - // limit the number of jobs issued in parallel. It also creates - // an opportunity to sort the jobs by physical offset before - // issued to the AIO subsystem - boost::atomic m_outstanding_jobs; - // this is the main thread io_service. Callbacks are // posted on this in order to have them execute in // the main thread. io_service& m_ios; - // the number of jobs that have been blocked by a fence. These - // jobs are queued up in their respective storage, waiting for - // the fence to be lowered. This counter is just used to know - // when it's OK to exit the main loop of the disk thread - boost::atomic m_num_blocked_jobs; - // this keeps the io_service::run() call blocked from // returning. When shutting down, it's possible that // the event queue is drained before the disk_io_thread diff --git a/include/libtorrent/performance_counters.hpp b/include/libtorrent/performance_counters.hpp index eb7e4a457..09f64abe6 100644 --- a/include/libtorrent/performance_counters.hpp +++ b/include/libtorrent/performance_counters.hpp @@ -356,6 +356,7 @@ namespace libtorrent pinned_blocks, disk_blocks_in_use, queued_disk_jobs, + num_running_disk_jobs, num_read_jobs, num_write_jobs, num_jobs, diff --git a/include/libtorrent/storage.hpp b/include/libtorrent/storage.hpp index 91c5e93b3..7e1c05056 100644 --- a/include/libtorrent/storage.hpp +++ b/include/libtorrent/storage.hpp @@ -69,6 +69,7 @@ POSSIBILITY OF SUCH DAMAGE. #include "libtorrent/stat_cache.hpp" #include "libtorrent/lazy_entry.hpp" #include "libtorrent/bitfield.hpp" +#include "libtorrent/performance_counters.hpp" // OVERVIEW // @@ -573,7 +574,7 @@ namespace libtorrent // fence_post_none if both the fence and the flush jobs were queued. enum { fence_post_fence = 0, fence_post_flush = 1, fence_post_none = 2 }; int raise_fence(disk_io_job* fence_job, disk_io_job* flush_job - , boost::atomic* blocked_counter); + , counters& cnt); bool has_fence() const; // called whenever a job completes and is posted back to the diff --git a/src/block_cache.cpp b/src/block_cache.cpp index 336afc0ec..03a4262a8 100644 --- a/src/block_cache.cpp +++ b/src/block_cache.cpp @@ -1467,14 +1467,13 @@ void block_cache::update_stats_counters(counters& c) const c.set_value(counters::arc_volatile_size, m_lru[cached_piece_entry::volatile_read_lru].size()); } +#ifndef TORRENT_NO_DEPRECATE void block_cache::get_stats(cache_status* ret) const { ret->write_cache_size = m_write_cache_size; ret->read_cache_size = m_read_cache_size; ret->pinned_blocks = m_pinned_blocks; -#ifndef TORRENT_NO_DEPRECATE ret->cache_size = m_read_cache_size + m_write_cache_size; -#endif ret->arc_mru_size = m_lru[cached_piece_entry::read_lru1].size(); ret->arc_mru_ghost_size = m_lru[cached_piece_entry::read_lru1_ghost].size(); @@ -1483,6 +1482,7 @@ void block_cache::get_stats(cache_status* ret) const ret->arc_write_size = m_lru[cached_piece_entry::write_lru].size(); ret->arc_volatile_size = m_lru[cached_piece_entry::volatile_read_lru].size(); } +#endif void block_cache::set_settings(aux::session_settings const& sett) { diff --git a/src/disk_io_thread.cpp b/src/disk_io_thread.cpp index 06b5b37fb..f56f6c5e7 100644 --- a/src/disk_io_thread.cpp +++ b/src/disk_io_thread.cpp @@ -174,9 +174,7 @@ namespace libtorrent , m_file_pool(40) , m_disk_cache(block_size, ios, boost::bind(&disk_io_thread::trigger_cache_trim, this), alert_disp) , m_stats_counters(cnt) - , m_outstanding_jobs(0) , m_ios(ios) - , m_num_blocked_jobs(0) , m_work(io_service::work(m_ios)) , m_last_disk_aio_performance_warning(min_time()) , m_post_alert(alert_disp) @@ -1140,7 +1138,7 @@ namespace libtorrent ptime start_time = time_now_hires(); - ++m_outstanding_jobs; + m_stats_counters.inc_stats_counter(counters::num_running_disk_jobs, 1); // call disk function int ret = (this->*(job_functions[j->action]))(j, completed_jobs); @@ -1148,7 +1146,7 @@ namespace libtorrent // note that -2 erros are OK TORRENT_ASSERT(ret != -1 || (j->error.ec && j->error.operation != 0)); - --m_outstanding_jobs; + m_stats_counters.inc_stats_counter(counters::num_running_disk_jobs, -1); if (ret == retry_job) { @@ -1159,8 +1157,8 @@ namespace libtorrent // TODO: a potentially more efficient solution would be to have a special // queue for retry jobs, that's only ever run when a job completes, in - // any thread. It would only work if m_outstanding_jobs > 0 - + // any thread. It would only work if counters::num_running_disk_jobs > 0 + TORRENT_ASSERT((j->flags & disk_io_job::in_progress) || !j->storage); bool need_sleep = m_queued_jobs.empty(); @@ -1637,10 +1635,10 @@ namespace libtorrent if (check_fence && j->storage->is_blocked(j)) { // this means the job was queued up inside storage - ++m_num_blocked_jobs; + m_stats_counters.inc_stats_counter(counters::blocked_disk_jobs); DLOG("blocked job: %s (torrent: %d total: %d)\n" , job_action_name[j->action], j->storage ? j->storage->num_blocked() : 0 - , int(m_num_blocked_jobs)); + , int(m_stats_counters[counters::blocked_disk_jobs])); return 2; } @@ -1738,10 +1736,10 @@ namespace libtorrent if (storage->is_blocked(j)) { // this means the job was queued up inside storage - ++m_num_blocked_jobs; + m_stats_counters.inc_stats_counter(counters::blocked_disk_jobs); DLOG("blocked job: %s (torrent: %d total: %d)\n" , job_action_name[j->action], j->storage ? j->storage->num_blocked() : 0 - , int(m_num_blocked_jobs)); + , int(m_stats_counters[counters::blocked_disk_jobs])); // make the holder give up ownership of the buffer // since the job was successfully queued up buffer.release(); @@ -2784,16 +2782,13 @@ namespace libtorrent { // These are atomic_counts, so it's safe to access them from // a different thread - - c.set_value(counters::blocked_disk_jobs, m_num_blocked_jobs); - mutex::scoped_lock jl(m_job_mutex); - c.set_value(counters::queued_disk_jobs, m_num_blocked_jobs - + m_queued_jobs.size() + m_queued_hash_jobs.size()); c.set_value(counters::num_read_jobs, read_jobs_in_use()); c.set_value(counters::num_write_jobs, write_jobs_in_use()); c.set_value(counters::num_jobs, jobs_in_use()); + c.set_value(counters::queued_disk_jobs, m_queued_jobs.size() + + m_queued_hash_jobs.size()); jl.unlock(); @@ -2808,11 +2803,12 @@ namespace libtorrent void disk_io_thread::get_cache_info(cache_status* ret, bool no_pieces , piece_manager const* storage) const { - mutex::scoped_lock jl(m_job_mutex); - ret->queued_jobs = m_queued_jobs.size() + m_queued_hash_jobs.size(); - jl.unlock(); + mutex::scoped_lock l(m_cache_mutex); + *ret = m_cache_stats; #ifndef TORRENT_NO_DEPRECATE + ret->total_used_buffers = m_disk_cache.in_use(); + ret->blocks_read_hit = m_stats_counters[counters::num_blocks_cache_hits]; ret->blocks_read = m_stats_counters[counters::num_blocks_read]; ret->blocks_written = m_stats_counters[counters::num_blocks_written]; @@ -2836,60 +2832,67 @@ namespace libtorrent ret->cumulative_write_time = m_stats_counters[counters::disk_write_time]; ret->cumulative_hash_time = m_stats_counters[counters::disk_hash_time]; ret->total_read_back = m_stats_counters[counters::num_read_back]; -#endif - mutex::scoped_lock l(m_cache_mutex); - *ret = m_cache_stats; - ret->total_used_buffers = m_disk_cache.in_use(); - ret->blocked_jobs = m_num_blocked_jobs; + ret->blocked_jobs = m_stats_counters[counters::blocked_disk_jobs]; - ret->pending_jobs = m_outstanding_jobs; ret->num_jobs = jobs_in_use(); ret->num_read_jobs = read_jobs_in_use(); ret->num_write_jobs = write_jobs_in_use(); + ret->pending_jobs = m_stats_counters[counters::num_running_disk_jobs]; ret->num_writing_threads = m_stats_counters[counters::num_writing_threads]; m_disk_cache.get_stats(ret); +#endif + ret->pieces.clear(); - if (no_pieces) return; - - int block_size = m_disk_cache.block_size(); - - if (storage) + if (no_pieces == false) { - ret->pieces.reserve(storage->num_pieces()); - - for (boost::unordered_set::iterator i - = storage->cached_pieces().begin(), end(storage->cached_pieces().end()); - i != end; ++i) + int block_size = m_disk_cache.block_size(); + + if (storage) { - TORRENT_ASSERT((*i)->storage.get() == storage); - - if ((*i)->cache_state == cached_piece_entry::read_lru2_ghost - || (*i)->cache_state == cached_piece_entry::read_lru1_ghost) - continue; - ret->pieces.push_back(cached_piece_info()); - get_cache_info_impl(ret->pieces.back(), *i, block_size); + ret->pieces.reserve(storage->num_pieces()); + + for (boost::unordered_set::iterator i + = storage->cached_pieces().begin(), end(storage->cached_pieces().end()); + i != end; ++i) + { + TORRENT_ASSERT((*i)->storage.get() == storage); + + if ((*i)->cache_state == cached_piece_entry::read_lru2_ghost + || (*i)->cache_state == cached_piece_entry::read_lru1_ghost) + continue; + ret->pieces.push_back(cached_piece_info()); + get_cache_info_impl(ret->pieces.back(), *i, block_size); + } + } + else + { + ret->pieces.reserve(m_disk_cache.num_pieces()); + + std::pair range + = m_disk_cache.all_pieces(); + + for (block_cache::iterator i = range.first; i != range.second; ++i) + { + if (i->cache_state == cached_piece_entry::read_lru2_ghost + || i->cache_state == cached_piece_entry::read_lru1_ghost) + continue; + ret->pieces.push_back(cached_piece_info()); + get_cache_info_impl(ret->pieces.back(), &*i, block_size); + } } } - else - { - ret->pieces.reserve(m_disk_cache.num_pieces()); - std::pair range - = m_disk_cache.all_pieces(); + l.unlock(); - for (block_cache::iterator i = range.first; i != range.second; ++i) - { - if (i->cache_state == cached_piece_entry::read_lru2_ghost - || i->cache_state == cached_piece_entry::read_lru1_ghost) - continue; - ret->pieces.push_back(cached_piece_info()); - get_cache_info_impl(ret->pieces.back(), &*i, block_size); - } - } +#ifndef TORRENT_NO_DEPRECATE + mutex::scoped_lock jl(m_job_mutex); + ret->queued_jobs = m_queued_jobs.size() + m_queued_hash_jobs.size(); + jl.unlock(); +#endif } int disk_io_thread::do_flush_piece(disk_io_job* j, tailqueue& completed_jobs) @@ -2927,7 +2930,8 @@ namespace libtorrent #if TORRENT_USE_ASSERTS pe->piece_log.push_back(piece_log_t(j->action)); #endif - TORRENT_PIECE_ASSERT(pe->cache_state <= cached_piece_entry::read_lru1 || pe->cache_state == cached_piece_entry::read_lru2, pe); + TORRENT_PIECE_ASSERT(pe->cache_state <= cached_piece_entry::read_lru1 + || pe->cache_state == cached_piece_entry::read_lru2, pe); ++pe->piece_refcount; if (!pe->hashing_done) @@ -3063,7 +3067,7 @@ namespace libtorrent disk_io_job* fj = allocate_job(disk_io_job::flush_storage); fj->storage = j->storage; - int ret = storage->raise_fence(j, fj, &m_num_blocked_jobs); + int ret = storage->raise_fence(j, fj, m_stats_counters); if (ret == disk_job_fence::fence_post_fence) { mutex::scoped_lock l(m_job_mutex); @@ -3134,10 +3138,10 @@ namespace libtorrent // and should be scheduled if (j->storage && j->storage->is_blocked(j)) { - ++m_num_blocked_jobs; + m_stats_counters.inc_stats_counter(counters::blocked_disk_jobs); DLOG("blocked job: %s (torrent: %d total: %d)\n" , job_action_name[j->action], j->storage ? j->storage->num_blocked() : 0 - , int(m_num_blocked_jobs)); + , int(m_stats_counters[counters::blocked_disk_jobs])); return; } @@ -3211,7 +3215,8 @@ namespace libtorrent { mutex::scoped_lock l2(m_cache_mutex); DLOG("blocked_jobs: %d queued_jobs: %d num_threads %d\n" - , int(m_num_blocked_jobs), m_queued_jobs.size(), int(m_num_threads)); + , int(m_stats_counters[counters::blocked_disk_jobs]) + , m_queued_jobs.size(), int(m_num_threads)); m_last_cache_expiry = now; tailqueue completed_jobs; flush_expired_write_blocks(completed_jobs, l2); @@ -3359,11 +3364,11 @@ namespace libtorrent #if DEBUG_DISK_THREAD if (ret) DLOG("unblocked %d jobs (%d left)\n", ret - , int(m_num_blocked_jobs) - ret); + , int(m_stats_counters[counters::blocked_disk_jobs]) - ret); #endif - m_num_blocked_jobs -= ret; - TORRENT_ASSERT(m_num_blocked_jobs >= 0); + m_stats_counters.inc_stats_counter(counters::blocked_disk_jobs, -ret); + TORRENT_ASSERT(int(m_stats_counters[counters::blocked_disk_jobs]) >= 0); if (new_jobs.size() > 0) { diff --git a/src/session_stats.cpp b/src/session_stats.cpp index 244e6a3cc..31dac7328 100644 --- a/src/session_stats.cpp +++ b/src/session_stats.cpp @@ -320,6 +320,7 @@ namespace libtorrent METRIC(disk, pinned_blocks) METRIC(disk, disk_blocks_in_use) METRIC(disk, queued_disk_jobs) + METRIC(disk, num_running_disk_jobs) METRIC(disk, num_read_jobs) METRIC(disk, num_write_jobs) METRIC(disk, num_jobs) diff --git a/src/storage.cpp b/src/storage.cpp index a2037abc0..cb0e6f2dc 100644 --- a/src/storage.cpp +++ b/src/storage.cpp @@ -1612,7 +1612,7 @@ namespace libtorrent // fj is the flush job. If the job j is queued, we need to issue // this job int disk_job_fence::raise_fence(disk_io_job* j, disk_io_job* fj - , boost::atomic* blocked_counter) + , counters& cnt) { TORRENT_ASSERT((j->flags & disk_io_job::fence) == 0); j->flags |= disk_io_job::fence; @@ -1645,7 +1645,7 @@ namespace libtorrent fj->blocked = true; #endif m_blocked_jobs.push_back(fj); - ++*blocked_counter; + cnt.inc_stats_counter(counters::blocked_disk_jobs); } else { @@ -1658,7 +1658,7 @@ namespace libtorrent j->blocked = true; #endif m_blocked_jobs.push_back(j); - ++*blocked_counter; + cnt.inc_stats_counter(counters::blocked_disk_jobs); return m_has_fence > 1 ? fence_post_none : fence_post_flush; } diff --git a/test/test_fence.cpp b/test/test_fence.cpp index 7423f33a2..e4af164ce 100644 --- a/test/test_fence.cpp +++ b/test/test_fence.cpp @@ -9,14 +9,14 @@ using namespace libtorrent; void test_disk_job_empty_fence() { libtorrent::disk_job_fence fence; - boost::atomic counter(0); + counters cnt; disk_io_job test_job[10]; // issue 5 jobs. None of them should be blocked by a fence int ret = 0; // add a fence job - ret = fence.raise_fence(&test_job[5], &test_job[6], &counter); + ret = fence.raise_fence(&test_job[5], &test_job[6], cnt); // since we don't have any outstanding jobs // we need to post this job TEST_CHECK(ret == disk_job_fence::fence_post_fence); @@ -44,7 +44,7 @@ void test_disk_job_empty_fence() void test_disk_job_fence() { - boost::atomic counter(0); + counters cnt; libtorrent::disk_job_fence fence; disk_io_job test_job[10]; @@ -68,7 +68,7 @@ void test_disk_job_fence() TEST_CHECK(fence.num_blocked() == 0); // add a fence job - ret = fence.raise_fence(&test_job[5], &test_job[6], &counter); + ret = fence.raise_fence(&test_job[5], &test_job[6], cnt); // since we have outstanding jobs, no need // to post anything TEST_CHECK(ret == disk_job_fence::fence_post_flush); @@ -117,7 +117,7 @@ void test_disk_job_fence() void test_disk_job_double_fence() { - boost::atomic counter(0); + counters cnt; libtorrent::disk_job_fence fence; disk_io_job test_job[10]; @@ -141,12 +141,12 @@ void test_disk_job_double_fence() TEST_CHECK(fence.num_blocked() == 0); // add two fence jobs - ret = fence.raise_fence(&test_job[5], &test_job[6], &counter); + ret = fence.raise_fence(&test_job[5], &test_job[6], cnt); // since we have outstanding jobs, no need // to post anything TEST_CHECK(ret == disk_job_fence::fence_post_flush); - ret = fence.raise_fence(&test_job[7], &test_job[8], &counter); + ret = fence.raise_fence(&test_job[7], &test_job[8], cnt); // since we have outstanding jobs, no need // to post anything TEST_CHECK(ret == disk_job_fence::fence_post_none); diff --git a/test/web_seed_suite.cpp b/test/web_seed_suite.cpp index 03cc745a0..a6309a3b7 100644 --- a/test/web_seed_suite.cpp +++ b/test/web_seed_suite.cpp @@ -209,8 +209,7 @@ void test_transfer(lt::session& ses, boost::shared_ptr torrent_fil // the url seed (i.e. banned it) TEST_CHECK(!test_ban || (th.url_seeds().empty() && th.http_seeds().empty())); - cache_status cs; - ses.get_cache_info(&cs); + std::map cnt = get_counters(ses); // if the web seed senr corrupt data and we banned it, we probably didn't // end up using all the cache anyway @@ -223,21 +222,24 @@ void test_transfer(lt::session& ses, boost::shared_ptr torrent_fil { for (int i = 0; i < 50; ++i) { - ses.get_cache_info(&cs); - if (cs.read_cache_size == (torrent_file->total_size() + 0x3fff) / 0x4000 - && cs.total_used_buffers == (torrent_file->total_size() + 0x3fff) / 0x4000) + cnt = get_counters(ses); + if (cnt["disk.read_cache_blocks"] + == (torrent_file->total_size() + 0x3fff) / 0x4000 + && cnt["disk.disk_blocks_in_use"] + == (torrent_file->total_size() + 0x3fff) / 0x4000) break; - fprintf(stderr, "cache_size: %d/%d\n", int(cs.read_cache_size), int(cs.total_used_buffers)); + fprintf(stderr, "cache_size: %d/%d\n", int(cnt["disk.read_cache_blocks"]) + , int(cnt["disk.disk_blocks_in_use"])); test_sleep(100); } - TEST_EQUAL(cs.read_cache_size, (torrent_file->total_size() + 0x3fff) / 0x4000); - TEST_EQUAL(cs.total_used_buffers, (torrent_file->total_size() + 0x3fff) / 0x4000); + TEST_EQUAL(cnt["disk.disk_blocks_in_use"] + , (torrent_file->total_size() + 0x3fff) / 0x4000); } } std::cerr << "total_size: " << total_size - << " read cache size: " << cs.read_cache_size - << " total used buffer: " << cs.total_used_buffers + << " read cache size: " << cnt["disk.disk_blocks_in_use"] + << " total used buffer: " << cnt["disk.disk_blocks_in_use"] << " rate_sum: " << rate_sum << " session_rate_sum: " << ses_rate_sum << " session total download: " << ses.status().total_payload_download