measure disk sort time (phys_offset)

This commit is contained in:
Arvid Norberg 2011-03-15 02:21:28 +00:00
parent f9f5e3138c
commit d6fbff13df
4 changed files with 20 additions and 4 deletions

View File

@ -172,6 +172,7 @@ namespace libtorrent
, average_write_time(0) , average_write_time(0)
, average_hash_time(0) , average_hash_time(0)
, average_cache_time(0) , average_cache_time(0)
, average_sort_time(0)
, job_queue_length(0) , job_queue_length(0)
{} {}
@ -208,6 +209,7 @@ namespace libtorrent
int average_write_time; int average_write_time;
int average_hash_time; int average_hash_time;
int average_cache_time; int average_cache_time;
int average_sort_time;
int job_queue_length; int job_queue_length;
}; };
@ -448,6 +450,10 @@ namespace libtorrent
// scanning the cache for pieces to flush // scanning the cache for pieces to flush
sliding_average<512> m_cache_time; sliding_average<512> m_cache_time;
// average time to ask for physical offset on disk
// and insert into queue
sliding_average<512> m_sort_time;
typedef std::multimap<size_type, disk_io_job> read_jobs_t; typedef std::multimap<size_type, disk_io_job> read_jobs_t;
read_jobs_t m_sorted_read_jobs; read_jobs_t m_sorted_read_jobs;

View File

@ -344,6 +344,7 @@ namespace libtorrent
ret.average_write_time = m_write_time.mean(); ret.average_write_time = m_write_time.mean();
ret.average_hash_time = m_hash_time.mean(); ret.average_hash_time = m_hash_time.mean();
ret.average_cache_time = m_cache_time.mean(); ret.average_cache_time = m_cache_time.mean();
ret.average_sort_time = m_sort_time.mean();
ret.job_queue_length = m_jobs.size() + m_sorted_read_jobs.size(); ret.job_queue_length = m_jobs.size() + m_sorted_read_jobs.size();
return ret; return ret;
@ -1633,9 +1634,13 @@ namespace libtorrent
#ifdef TORRENT_DISK_STATS #ifdef TORRENT_DISK_STATS
m_log << log_time() << " sorting_job" << std::endl; m_log << log_time() << " sorting_job" << std::endl;
#endif #endif
ptime sort_start = time_now_hires();
size_type phys_off = j.storage->physical_offset(j.piece, j.offset); size_type phys_off = j.storage->physical_offset(j.piece, j.offset);
need_update_elevator_pos = need_update_elevator_pos || m_sorted_read_jobs.empty(); need_update_elevator_pos = need_update_elevator_pos || m_sorted_read_jobs.empty();
m_sorted_read_jobs.insert(std::pair<size_type, disk_io_job>(phys_off, j)); m_sorted_read_jobs.insert(std::pair<size_type, disk_io_job>(phys_off, j));
m_sort_time.add_sample(total_microseconds(time_now_hires() - sort_start));
continue; continue;
} }
} }
@ -1700,7 +1705,8 @@ namespace libtorrent
flush_expired_pieces(); flush_expired_pieces();
m_cache_time.add_sample(total_microseconds(time_now_hires() - now)); ptime operation_start = time_now_hires();
m_cache_time.add_sample(total_microseconds(operation_start - now));
int ret = 0; int ret = 0;
@ -1970,7 +1976,6 @@ namespace libtorrent
break; break;
} }
ptime read_start = time_now_hires();
disk_buffer_holder read_holder(*this, j.buffer); disk_buffer_holder read_holder(*this, j.buffer);
bool hit; bool hit;
@ -2011,7 +2016,7 @@ namespace libtorrent
} }
if (!hit) if (!hit)
{ {
m_read_time.add_sample(total_microseconds(time_now_hires() - read_start)); m_read_time.add_sample(total_microseconds(time_now_hires() - operation_start));
} }
TORRENT_ASSERT(j.buffer == read_holder.get()); TORRENT_ASSERT(j.buffer == read_holder.get());
read_holder.release(); read_holder.release();

View File

@ -192,6 +192,9 @@ namespace libtorrent
// allow 500 files open at a time // allow 500 files open at a time
set.file_pool_size = 500; set.file_pool_size = 500;
// don't update access time for each read/write
set.no_atime_storage = true;
// as a seed box, we must accept multiple peers behind // as a seed box, we must accept multiple peers behind
// the same NAT // the same NAT
set.allow_multiple_connections_per_ip = true; set.allow_multiple_connections_per_ip = true;

View File

@ -946,6 +946,7 @@ namespace aux {
":disk buffer allocations" ":disk buffer allocations"
":disk hash time" ":disk hash time"
":disk cache time" ":disk cache time"
":disk sort time"
"\n\n", m_stats_logger); "\n\n", m_stats_logger);
} }
#endif #endif
@ -2715,7 +2716,7 @@ namespace aux {
"%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t" "%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t"
"%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t" "%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t"
"%f\t%f\t%f\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t" "%f\t%f\t%f\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t"
"%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\n" "%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\n"
, total_milliseconds(now - m_last_log_rotation) / 1000.f , total_milliseconds(now - m_last_log_rotation) / 1000.f
, int(m_stat.total_upload() - m_last_uploaded) , int(m_stat.total_upload() - m_last_uploaded)
, int(m_stat.total_download() - m_last_downloaded) , int(m_stat.total_download() - m_last_downloaded)
@ -2795,6 +2796,7 @@ namespace aux {
, cs.total_used_buffers , cs.total_used_buffers
, int(cs.average_hash_time) , int(cs.average_hash_time)
, int(cs.average_cache_time) , int(cs.average_cache_time)
, int(cs.average_sort_time)
); );
m_last_cache_status = cs; m_last_cache_status = cs;
m_last_failed = m_total_failed_bytes; m_last_failed = m_total_failed_bytes;