make disk timing be average over the last second. fixed some other bugs in previous changes

This commit is contained in:
Arvid Norberg 2011-06-26 19:45:33 +00:00
parent c647e95244
commit 209b9e4186
5 changed files with 58 additions and 15 deletions

View File

@ -454,29 +454,35 @@ namespace libtorrent
// read cache
cache_t m_read_pieces;
void flip_stats();
// total number of blocks in use by both the read
// and the write cache. This is not supposed to
// exceed m_cache_size
cache_status m_cache_stats;
// keeps average queue time for disk jobs (in microseconds)
sliding_average<512> m_queue_time;
average_accumulator m_queue_time;
// average read time for cache misses (in microseconds)
sliding_average<512> m_read_time;
average_accumulator m_read_time;
// average write time (in microseconds)
sliding_average<512> m_write_time;
average_accumulator m_write_time;
// average hash time (in microseconds)
sliding_average<512> m_hash_time;
average_accumulator m_hash_time;
// average time to serve a job (any job) in microseconds
sliding_average<512> m_job_time;
average_accumulator m_job_time;
// average time to ask for physical offset on disk
// and insert into queue
sliding_average<512> m_sort_time;
average_accumulator m_sort_time;
// the last time we reset the average time and store the
// latest value in m_cache_stats
ptime m_last_stats_flip;
typedef std::multimap<size_type, disk_io_job> read_jobs_t;
read_jobs_t m_sorted_read_jobs;

View File

@ -71,6 +71,33 @@ private:
int m_average_deviation;
};
struct average_accumulator
{
average_accumulator()
: m_num_samples(0)
, m_sample_sum(0)
{}
void add_sample(int s)
{
++m_num_samples;
m_sample_sum += s;
}
int mean()
{
int ret;
if (m_num_samples == 0) ret = 0;
else ret = m_sample_sum / m_num_samples;
m_num_samples = 0;
m_sample_sum = 0;
return ret;
}
int m_num_samples;
size_type m_sample_sum;
};
}
#endif

View File

@ -120,6 +120,7 @@ reports = [
('disk_cache', 'blocks (16kiB)', '', 'disk cache size and usage', ['read disk cache size', 'disk cache size', 'disk buffer allocations', 'cache size']),
('disk_readback', '% of written blocks', '%%', 'portion of written blocks that had to be read back for hash verification', ['% read back']),
('disk_queue', 'number of queued disk jobs', '', 'queued disk jobs', ['disk queue size', 'disk read queue size', 'read job queue size limit']),
('disk_iops', 'operations/s', '', 'number of disk operations per second', ['read ops/s', 'write ops/s']),
('mixed mode', 'rate', 'B/s', 'rates by transport protocol', ['TCP up rate','TCP down rate','uTP up rate','uTP down rate','TCP up limit','TCP down limit']),
('uTP delay', 'buffer delay', 's', 'network delays measured by uTP', ['uTP peak send delay','uTP avg send delay']),
# ('absolute_waste', 'num', '', ['failed bytes', 'redundant bytes', 'download rate']),

View File

@ -251,6 +251,7 @@ namespace libtorrent
, m_waiting_to_shutdown(false)
, m_queue_buffer_size(0)
, m_last_file_check(time_now_hires())
, m_last_stats_flip(time_now())
, m_physical_ram(0)
, m_exceeded_write_queue(false)
, m_ios(ios)
@ -294,6 +295,18 @@ namespace libtorrent
return !m_exceeded_write_queue;
}
void disk_io_thread::flip_stats()
{
// calling mean() will actually reset the accumulators
m_cache_stats.average_queue_time = m_queue_time.mean();
m_cache_stats.average_read_time = m_read_time.mean();
m_cache_stats.average_write_time = m_write_time.mean();
m_cache_stats.average_hash_time = m_hash_time.mean();
m_cache_stats.average_job_time = m_job_time.mean();
m_cache_stats.average_sort_time = m_sort_time.mean();
m_last_stats_flip = time_now();
}
void disk_io_thread::get_cache_info(sha1_hash const& ih, std::vector<cached_piece_info>& ret) const
{
mutex::scoped_lock l(m_piece_mutex);
@ -341,12 +354,6 @@ namespace libtorrent
cache_status ret = m_cache_stats;
ret.average_queue_time = m_queue_time.mean();
ret.average_read_time = m_read_time.mean();
ret.average_write_time = m_write_time.mean();
ret.average_hash_time = m_hash_time.mean();
ret.average_job_time = m_job_time.mean();
ret.average_sort_time = m_sort_time.mean();
ret.job_queue_length = m_jobs.size() + m_sorted_read_jobs.size();
ret.read_queue_size = m_sorted_read_jobs.size();
@ -1599,6 +1606,8 @@ namespace libtorrent
// flush_expired_pieces();
m_signal.wait(jl);
m_signal.clear(jl);
if (time_now() > m_last_stats_flip + seconds(1)) flip_stats();
}
if (m_abort && m_jobs.empty())

View File

@ -3244,7 +3244,7 @@ namespace aux {
#define STAT_LOG(type, val) fprintf(m_stats_logger, "%" #type "\t", val)
STAT_LOG(d, total_milliseconds(now - m_last_log_rotation) / 1000.f);
STAT_LOG(f, total_milliseconds(now - m_last_log_rotation) / 1000.f);
STAT_LOG(d, int(m_stat.total_upload() - m_last_uploaded));
STAT_LOG(d, int(m_stat.total_download() - m_last_downloaded));
STAT_LOG(d, downloading_torrents);
@ -3354,8 +3354,8 @@ namespace aux {
STAT_LOG(d, utp_down_rate);
STAT_LOG(f, float(utp_peak_send_delay) / 1000000.f);
STAT_LOG(f, float(utp_num_delay_sockets ? float(utp_send_delay_sum) / float(utp_num_delay_sockets) : 0) / 1000000.f);
STAT_LOG(f, float(cs.reads - m_last_cache_status.reads) / float(tick_interval_ms));
STAT_LOG(f, float(cs.writes - m_last_cache_status.writes) / float(tick_interval_ms));
STAT_LOG(f, float(cs.reads - m_last_cache_status.reads) * 1000.0 / float(tick_interval_ms));
STAT_LOG(f, float(cs.writes - m_last_cache_status.writes) * 1000.0 / float(tick_interval_ms));
fprintf(m_stats_logger, "\n");
#undef STAT_LOG