From d6fbff13df7a8c25e23c7fb23549c8dc69e8cf8c Mon Sep 17 00:00:00 2001 From: Arvid Norberg Date: Tue, 15 Mar 2011 02:21:28 +0000 Subject: [PATCH] measure disk sort time (phys_offset) --- include/libtorrent/disk_io_thread.hpp | 6 ++++++ src/disk_io_thread.cpp | 11 ++++++++--- src/session.cpp | 3 +++ src/session_impl.cpp | 4 +++- 4 files changed, 20 insertions(+), 4 deletions(-) diff --git a/include/libtorrent/disk_io_thread.hpp b/include/libtorrent/disk_io_thread.hpp index d25498594..3bc09489e 100644 --- a/include/libtorrent/disk_io_thread.hpp +++ b/include/libtorrent/disk_io_thread.hpp @@ -172,6 +172,7 @@ namespace libtorrent , average_write_time(0) , average_hash_time(0) , average_cache_time(0) + , average_sort_time(0) , job_queue_length(0) {} @@ -208,6 +209,7 @@ namespace libtorrent int average_write_time; int average_hash_time; int average_cache_time; + int average_sort_time; int job_queue_length; }; @@ -448,6 +450,10 @@ namespace libtorrent // scanning the cache for pieces to flush sliding_average<512> m_cache_time; + // average time to ask for physical offset on disk + // and insert into queue + sliding_average<512> m_sort_time; + typedef std::multimap read_jobs_t; read_jobs_t m_sorted_read_jobs; diff --git a/src/disk_io_thread.cpp b/src/disk_io_thread.cpp index 4919b28cd..6564c3c9c 100644 --- a/src/disk_io_thread.cpp +++ b/src/disk_io_thread.cpp @@ -344,6 +344,7 @@ namespace libtorrent ret.average_write_time = m_write_time.mean(); ret.average_hash_time = m_hash_time.mean(); ret.average_cache_time = m_cache_time.mean(); + ret.average_sort_time = m_sort_time.mean(); ret.job_queue_length = m_jobs.size() + m_sorted_read_jobs.size(); return ret; @@ -1633,9 +1634,13 @@ namespace libtorrent #ifdef TORRENT_DISK_STATS m_log << log_time() << " sorting_job" << std::endl; #endif + ptime sort_start = time_now_hires(); + size_type phys_off = j.storage->physical_offset(j.piece, j.offset); need_update_elevator_pos = need_update_elevator_pos || m_sorted_read_jobs.empty(); m_sorted_read_jobs.insert(std::pair(phys_off, j)); + + m_sort_time.add_sample(total_microseconds(time_now_hires() - sort_start)); continue; } } @@ -1700,7 +1705,8 @@ namespace libtorrent flush_expired_pieces(); - m_cache_time.add_sample(total_microseconds(time_now_hires() - now)); + ptime operation_start = time_now_hires(); + m_cache_time.add_sample(total_microseconds(operation_start - now)); int ret = 0; @@ -1970,7 +1976,6 @@ namespace libtorrent break; } - ptime read_start = time_now_hires(); disk_buffer_holder read_holder(*this, j.buffer); bool hit; @@ -2011,7 +2016,7 @@ namespace libtorrent } if (!hit) { - m_read_time.add_sample(total_microseconds(time_now_hires() - read_start)); + m_read_time.add_sample(total_microseconds(time_now_hires() - operation_start)); } TORRENT_ASSERT(j.buffer == read_holder.get()); read_holder.release(); diff --git a/src/session.cpp b/src/session.cpp index fdcf9b565..993cd5818 100644 --- a/src/session.cpp +++ b/src/session.cpp @@ -192,6 +192,9 @@ namespace libtorrent // allow 500 files open at a time set.file_pool_size = 500; + // don't update access time for each read/write + set.no_atime_storage = true; + // as a seed box, we must accept multiple peers behind // the same NAT set.allow_multiple_connections_per_ip = true; diff --git a/src/session_impl.cpp b/src/session_impl.cpp index 2a15e31e3..f42313725 100644 --- a/src/session_impl.cpp +++ b/src/session_impl.cpp @@ -946,6 +946,7 @@ namespace aux { ":disk buffer allocations" ":disk hash time" ":disk cache time" + ":disk sort time" "\n\n", m_stats_logger); } #endif @@ -2715,7 +2716,7 @@ namespace aux { "%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t" "%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t" "%f\t%f\t%f\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t" - "%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\n" + "%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\n" , total_milliseconds(now - m_last_log_rotation) / 1000.f , int(m_stat.total_upload() - m_last_uploaded) , int(m_stat.total_download() - m_last_downloaded) @@ -2795,6 +2796,7 @@ namespace aux { , cs.total_used_buffers , int(cs.average_hash_time) , int(cs.average_cache_time) + , int(cs.average_sort_time) ); m_last_cache_status = cs; m_last_failed = m_total_failed_bytes;