add disk cache stats to session stats

This commit is contained in:
Arvid Norberg 2011-03-11 07:37:12 +00:00
parent 44fe8c160a
commit 03a7442532
5 changed files with 41 additions and 1 deletions

View File

@ -892,6 +892,7 @@ namespace libtorrent
int m_connect_timeouts;
int m_uninteresting_peers;
int m_timeout_peers;
cache_status m_last_cache_status;
#endif
// each second tick the timer takes a little

View File

@ -169,6 +169,7 @@ namespace libtorrent
, total_used_buffers(0)
, average_queue_time(0)
, average_read_time(0)
, average_write_time(0)
, job_queue_length(0)
{}
@ -202,6 +203,7 @@ namespace libtorrent
// times in microseconds
int average_queue_time;
int average_read_time;
int average_write_time;
int job_queue_length;
};
@ -432,6 +434,9 @@ namespace libtorrent
// average read time for cache misses (in microseconds)
sliding_average<512> m_read_time;
// average write time (in microseconds)
sliding_average<512> m_write_time;
typedef std::multimap<size_type, disk_io_job> read_jobs_t;
read_jobs_t m_sorted_read_jobs;

View File

@ -59,4 +59,7 @@ gen_report('peer_errors', ['error peers', 'peer disconnects', 'peers eof', 'peer
gen_report('piece_picker_end_game', ['end game piece picker blocks', 'strict end game piece picker blocks', 'piece picker blocks', 'piece picks', 'reject piece picks', 'unchoked piece picks', 'incoming redundant piece picks', 'incoming piece picks', 'end game piece picks', 'snubbed piece picks'])
gen_report('piece_picker', ['piece picks', 'reject piece picks', 'unchoked piece picks', 'incoming redundant piece picks', 'incoming piece picks', 'end game piece picks', 'snubbed piece picks'])
gen_report('bandwidth', ['% failed payload bytes', '% wasted payload bytes', '% protocol bytes'])
gen_report('disk_time', ['disk read time', 'disk write time', 'disk queue time'])
gen_report('disk_cache', ['disk block read', 'read cache hits'])
gen_report('disk_queue', ['disk queue size', 'disk queued bytes'])

View File

@ -341,6 +341,7 @@ namespace libtorrent
ret.average_queue_time = m_queue_time.mean();
ret.average_read_time = m_read_time.mean();
ret.average_write_time = m_write_time.mean();
ret.job_queue_length = m_jobs.size() + m_sorted_read_jobs.size();
return ret;
@ -671,6 +672,8 @@ namespace libtorrent
else iov = TORRENT_ALLOCA(file::iovec_t, blocks_in_piece);
end = (std::min)(end, blocks_in_piece);
int num_write_calls = 0;
ptime write_start = time_now_hires();
for (int i = start; i <= end; ++i)
{
if (i == end || p.blocks[i].buf == 0)
@ -684,6 +687,7 @@ namespace libtorrent
p.storage->write_impl(iov, p.piece, (std::min)(
i * m_block_size, piece_size) - buffer_size, iov_counter);
iov_counter = 0;
++num_write_calls;
}
else
{
@ -691,6 +695,7 @@ namespace libtorrent
file::iovec_t b = { buf.get(), buffer_size };
p.storage->write_impl(&b, p.piece, (std::min)(
i * m_block_size, piece_size) - buffer_size, 1);
++num_write_calls;
}
l.lock();
++m_cache_stats.writes;
@ -723,6 +728,8 @@ namespace libtorrent
--m_cache_stats.cache_size;
}
ptime done = time_now_hires();
int ret = 0;
disk_io_job j;
j.storage = p.storage;
@ -745,6 +752,11 @@ namespace libtorrent
}
if (!buffers.empty()) free_multiple_buffers(&buffers[0], buffers.size());
if (num_write_calls > 0)
{
m_write_time.add_sample(total_microseconds(done - write_start) / num_write_calls);
}
TORRENT_ASSERT(buffer_size == 0);
// std::cerr << " flushing p: " << p.piece << " cached_blocks: " << m_cache_stats.cache_size << std::endl;
#ifdef TORRENT_DEBUG
@ -2058,6 +2070,7 @@ namespace libtorrent
if (cache_block(j, j.callback, j.cache_min_time, l) < 0)
{
l.unlock();
ptime start = time_now_hires();
file::iovec_t iov = {j.buffer, j.buffer_size};
ret = j.storage->write_impl(&iov, j.piece, j.offset, 1);
l.lock();
@ -2066,6 +2079,8 @@ namespace libtorrent
test_error(j);
break;
}
ptime done = time_now_hires();
m_write_time.add_sample(total_microseconds(done - start));
// we successfully wrote the block. Ignore previous errors
j.storage->clear_error();
break;

View File

@ -921,6 +921,13 @@ namespace aux {
":% failed payload bytes"
":% wasted payload bytes"
":% protocol bytes"
":disk read time"
":disk write time"
":disk queue time"
":disk queue size"
":disk queued bytes"
":read cache hits"
":disk block read"
"\n\n", m_stats_logger);
}
#endif
@ -2681,6 +2688,7 @@ namespace aux {
if (m_stats_logger)
{
cache_status cs = m_disk_thread.status();
fprintf(m_stats_logger
, "%f\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t"
"%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t"
@ -2688,7 +2696,7 @@ namespace aux {
"%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t"
"%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t"
"%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t"
"%f\t%f\t%f\n"
"%f\t%f\t%f\t%d\t%d\t%d\t%d\t%d\t%d\t%d\n"
, total_milliseconds(now - m_last_log_rotation) / 1000.f
, int(upload_rate)
, int(download_rate)
@ -2752,7 +2760,15 @@ namespace aux {
, (float(m_total_failed_bytes) * 100.f / m_stat.total_payload_download())
, (float(m_total_redundant_bytes) * 100.f / m_stat.total_payload_download())
, (float(m_stat.total_protocol_download()) * 100.f / m_stat.total_download())
, int(cs.average_read_time)
, int(cs.average_write_time)
, int(cs.average_queue_time)
, int(cs.job_queue_length)
, int(cs.queued_bytes)
, int(cs.blocks_read_hit - m_last_cache_status.blocks_read_hit)
, int(cs.blocks_read - m_last_cache_status.blocks_read)
);
m_last_cache_status = cs;
}
m_error_peers = 0;