change semantics of disk read time and add disk hash time

This commit is contained in:
Arvid Norberg 2011-03-14 05:21:46 +00:00
parent ff7bbc6cd0
commit a08bcf7459
5 changed files with 34 additions and 9 deletions

View File

@ -853,6 +853,8 @@ Returns status of the disk cache for this session.
int total_used_buffers;
int average_queue_time;
int average_read_time;
int average_write_time;
int average_hash_time;
int job_queue_length;
};
@ -886,9 +888,18 @@ used in peer connections.
``average_queue_time`` is the number of microseconds an average disk I/O job
has to wait in the job queue before it get processed.
``average_read_time`` is the number of microseconds a read job takes to
wait in the queue and complete, in microseconds. This only includes
cache misses.
``average_read_time`` is the time read jobs takes on average to complete
(not including the time in the queue), in microseconds. This only measures
read cache misses.
``average_write_time`` is the time write jobs takes to complete, on average,
in microseconds. This does not include the time the job sits in the disk job
queue or in the write cache, only blocks that are flushed to disk.
``average_hash_time`` is the time hash jobs takes to complete on average, in
microseconds. Hash jobs include running SHA-1 on the data (which for the most
part is done incrementally) and sometimes reading back parts of the piece. It
also includes checking files without valid resume data.
``job_queue_length`` is the number of jobs in the job queue.

View File

@ -170,6 +170,7 @@ namespace libtorrent
, average_queue_time(0)
, average_read_time(0)
, average_write_time(0)
, average_hash_time(0)
, job_queue_length(0)
{}
@ -204,6 +205,7 @@ namespace libtorrent
int average_queue_time;
int average_read_time;
int average_write_time;
int average_hash_time;
int job_queue_length;
};
@ -429,13 +431,16 @@ namespace libtorrent
cache_status m_cache_stats;
// keeps average queue time for disk jobs (in microseconds)
sliding_average<4096> m_queue_time;
sliding_average<512> m_queue_time;
// average read time for cache misses (in microseconds)
sliding_average<4096> m_read_time;
sliding_average<512> m_read_time;
// average write time (in microseconds)
sliding_average<4096> m_write_time;
sliding_average<512> m_write_time;
// average hash time (in microseconds)
sliding_average<512> m_hash_time;
typedef std::multimap<size_type, disk_io_job> read_jobs_t;
read_jobs_t m_sorted_read_jobs;

View File

@ -59,7 +59,7 @@ gen_report('peer_errors', ['error peers', 'peer disconnects', 'peers eof', 'peer
gen_report('piece_picker_end_game', ['end game piece picker blocks', 'strict end game piece picker blocks', 'piece picker blocks', 'piece picks', 'reject piece picks', 'unchoked piece picks', 'incoming redundant piece picks', 'incoming piece picks', 'end game piece picks', 'snubbed piece picks'])
gen_report('piece_picker', ['piece picks', 'reject piece picks', 'unchoked piece picks', 'incoming redundant piece picks', 'incoming piece picks', 'end game piece picks', 'snubbed piece picks'])
gen_report('bandwidth', ['% failed payload bytes', '% wasted payload bytes', '% protocol bytes'])
gen_report('disk_time', ['disk read time', 'disk write time', 'disk queue time'])
gen_report('disk_time', ['disk read time', 'disk write time', 'disk queue time', 'disk hash time'])
gen_report('disk_cache_hits', ['disk block read', 'read cache hits', 'disk block written'])
gen_report('disk_cache', ['read disk cache size', 'disk cache size', 'disk buffer allocations'])
gen_report('disk_queue', ['disk queue size', 'disk queued bytes'])

View File

@ -1966,6 +1966,7 @@ namespace libtorrent
break;
}
ptime read_start = time_now_hires();
disk_buffer_holder read_holder(*this, j.buffer);
bool hit;
@ -2006,8 +2007,7 @@ namespace libtorrent
}
if (!hit)
{
ptime now = time_now_hires();
m_read_time.add_sample(total_microseconds(now - j.start_time));
m_read_time.add_sample(total_microseconds(time_now_hires() - read_start));
}
TORRENT_ASSERT(j.buffer == read_holder.get());
read_holder.release();
@ -2133,6 +2133,8 @@ namespace libtorrent
mutex::scoped_lock l(m_piece_mutex);
INVARIANT_CHECK;
ptime hash_start = time_now_hires();
cache_piece_index_t& idx = m_pieces.get<0>();
cache_piece_index_t::iterator i = find_cached_piece(m_pieces, j, l);
if (i != idx.end())
@ -2163,6 +2165,8 @@ namespace libtorrent
ret = (j.storage->info()->hash_for_piece(j.piece) == h)?0:-2;
if (ret == -2) j.storage->mark_failed(j.piece);
m_hash_time.add_sample(total_microseconds(time_now_hires() - hash_start));
break;
}
case disk_io_job::move_storage:
@ -2313,10 +2317,13 @@ namespace libtorrent
m_last_file_check = time_now_hires();
#endif
ptime hash_start = time_now_hires();
if (m_waiting_to_shutdown) break;
ret = j.storage->check_files(j.piece, j.offset, j.error);
m_hash_time.add_sample(total_microseconds(time_now_hires() - hash_start));
TORRENT_TRY {
TORRENT_ASSERT(j.callback);
if (j.callback && ret == piece_manager::need_full_check)

View File

@ -939,6 +939,7 @@ namespace aux {
":read disk cache size"
":disk cache size"
":disk buffer allocations"
":disk hash time"
"\n\n", m_stats_logger);
}
#endif
@ -2783,6 +2784,7 @@ namespace aux {
, cs.read_cache_size
, cs.cache_size
, cs.total_used_buffers
, int(cs.average_hash_time)
);
m_last_cache_status = cs;
m_last_failed = m_total_failed_bytes;