more disk thread timing

This commit is contained in:
Arvid Norberg 2011-03-19 22:23:58 +00:00
parent a6745558e7
commit c015e28792
4 changed files with 41 additions and 6 deletions

View File

@ -174,6 +174,11 @@ namespace libtorrent
, average_job_time(0)
, average_sort_time(0)
, job_queue_length(0)
, cumulative_job_time(0)
, cumulative_read_time(0)
, cumulative_write_time(0)
, cumulative_hash_time(0)
, cumulative_sort_time(0)
{}
// the number of 16kB blocks written
@ -211,6 +216,12 @@ namespace libtorrent
int average_job_time;
int average_sort_time;
int job_queue_length;
boost::uint32_t cumulative_job_time;
boost::uint32_t cumulative_read_time;
boost::uint32_t cumulative_write_time;
boost::uint32_t cumulative_hash_time;
boost::uint32_t cumulative_sort_time;
};
struct TORRENT_EXPORT disk_buffer_pool : boost::noncopyable

View File

@ -62,6 +62,7 @@ gen_report('piece_picker_end_game', ['end game piece picker blocks', 'piece pick
gen_report('piece_picker', ['piece picks', 'reject piece picks', 'unchoke piece picks', 'incoming redundant piece picks', 'incoming piece picks', 'end game piece picks', 'snubbed piece picks'])
gen_report('bandwidth', ['% failed payload bytes', '% wasted payload bytes', '% protocol bytes'])
gen_report('disk_time', ['disk read time', 'disk write time', 'disk queue time', 'disk hash time', 'disk job time', 'disk sort time'])
gen_report('disk_time2', ['cumulative read time', 'cumulative write time', 'cumulative hash time', 'cumulative job time', 'cumulative sort time'])
gen_report('disk_cache_hits', ['disk block read', 'read cache hits', 'disk block written'])
gen_report('disk_cache', ['read disk cache size', 'disk cache size', 'disk buffer allocations', 'cache size'])
gen_report('disk_queue', ['disk queue size', 'disk queued bytes'])

View File

@ -758,6 +758,7 @@ namespace libtorrent
if (num_write_calls > 0)
{
m_write_time.add_sample(total_microseconds(done - write_start) / num_write_calls);
m_cache_stats.cumulative_write_time += total_milliseconds(done - write_start);
p.num_contiguous_blocks = contiguous_blocks(p);
}
@ -1646,7 +1647,9 @@ namespace libtorrent
need_update_elevator_pos = need_update_elevator_pos || m_sorted_read_jobs.empty();
m_sorted_read_jobs.insert(std::pair<size_type, disk_io_job>(phys_off, j));
m_sort_time.add_sample(total_microseconds(time_now_hires() - sort_start));
ptime now = time_now_hires();
m_sort_time.add_sample(total_microseconds(now - sort_start));
m_cache_stats.cumulative_sort_time += total_milliseconds(now - sort_start);
continue;
}
}
@ -2021,7 +2024,9 @@ namespace libtorrent
}
if (!hit)
{
m_read_time.add_sample(total_microseconds(time_now_hires() - operation_start));
ptime now = time_now_hires();
m_read_time.add_sample(total_microseconds(now - operation_start));
m_cache_stats.cumulative_read_time += total_milliseconds(now - operation_start);
}
TORRENT_ASSERT(j.buffer == read_holder.get());
read_holder.release();
@ -2109,6 +2114,7 @@ namespace libtorrent
}
ptime done = time_now_hires();
m_write_time.add_sample(total_microseconds(done - start));
m_cache_stats.cumulative_write_time += total_milliseconds(done - start);
// we successfully wrote the block. Ignore previous errors
j.storage->clear_error();
break;
@ -2194,7 +2200,9 @@ namespace libtorrent
ret = (j.storage->info()->hash_for_piece(j.piece) == h)?0:-2;
if (ret == -2) j.storage->mark_failed(j.piece);
m_hash_time.add_sample(total_microseconds(time_now_hires() - hash_start));
ptime done = time_now_hires();
m_hash_time.add_sample(total_microseconds(done - hash_start));
m_cache_stats.cumulative_hash_time += total_milliseconds(done - hash_start);
break;
}
case disk_io_job::move_storage:
@ -2350,7 +2358,9 @@ namespace libtorrent
ret = j.storage->check_files(j.piece, j.offset, j.error);
m_hash_time.add_sample(total_microseconds(time_now_hires() - hash_start));
ptime done = time_now_hires();
m_hash_time.add_sample(total_microseconds(done - hash_start));
m_cache_stats.cumulative_hash_time += total_milliseconds(done - hash_start);
TORRENT_TRY {
TORRENT_ASSERT(j.callback);
@ -2412,7 +2422,9 @@ namespace libtorrent
TORRENT_ASSERT(!j.storage || !j.storage->error());
m_job_time.add_sample(total_microseconds(time_now_hires() - operation_start));
ptime done = time_now_hires();
m_job_time.add_sample(total_microseconds(done - operation_start));
m_cache_stats.cumulative_job_time += total_milliseconds(done - operation_start);
// if (!j.callback) std::cerr << "DISK THREAD: no callback specified" << std::endl;
// else std::cerr << "DISK THREAD: invoking callback" << std::endl;

View File

@ -959,6 +959,11 @@ namespace aux {
":connect candidates"
":disk queue limit"
":disk queue low watermark"
":cumulative job time"
":cumulative read time"
":cumulative write time"
":cumulative hash time"
":cumulative sort time"
"\n\n", m_stats_logger);
}
#endif
@ -2733,7 +2738,8 @@ namespace aux {
"%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t"
"%f\t%f\t%f\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t"
"%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t"
"%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\n"
"%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t"
"%d\t%d\t%d\n"
, total_milliseconds(now - m_last_log_rotation) / 1000.f
, int(m_stat.total_upload() - m_last_uploaded)
, int(m_stat.total_download() - m_last_downloaded)
@ -2822,6 +2828,11 @@ namespace aux {
, connect_candidates
, int(m_settings.max_queued_disk_bytes)
, int(m_settings.max_queued_disk_bytes_low_watermark)
, int(cs.cumulative_job_time)
, int(cs.cumulative_read_time)
, int(cs.cumulative_write_time)
, int(cs.cumulative_hash_time)
, int(cs.cumulative_sort_time)
);
m_last_cache_status = cs;
m_last_failed = m_total_failed_bytes;