forked from premiere/premiere-libtorrent
measure disk cache flush times
This commit is contained in:
parent
9812626cf0
commit
7dde47b98a
|
@ -855,6 +855,7 @@ Returns status of the disk cache for this session.
|
||||||
int average_read_time;
|
int average_read_time;
|
||||||
int average_write_time;
|
int average_write_time;
|
||||||
int average_hash_time;
|
int average_hash_time;
|
||||||
|
int average_cache_time;
|
||||||
int job_queue_length;
|
int job_queue_length;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -901,6 +902,9 @@ microseconds. Hash jobs include running SHA-1 on the data (which for the most
|
||||||
part is done incrementally) and sometimes reading back parts of the piece. It
|
part is done incrementally) and sometimes reading back parts of the piece. It
|
||||||
also includes checking files without valid resume data.
|
also includes checking files without valid resume data.
|
||||||
|
|
||||||
|
``average_cache_time`` is the average amuount of time spent evicting cached
|
||||||
|
blocks that have expired from the disk cache.
|
||||||
|
|
||||||
``job_queue_length`` is the number of jobs in the job queue.
|
``job_queue_length`` is the number of jobs in the job queue.
|
||||||
|
|
||||||
get_cache_info()
|
get_cache_info()
|
||||||
|
|
|
@ -171,6 +171,7 @@ namespace libtorrent
|
||||||
, average_read_time(0)
|
, average_read_time(0)
|
||||||
, average_write_time(0)
|
, average_write_time(0)
|
||||||
, average_hash_time(0)
|
, average_hash_time(0)
|
||||||
|
, average_cache_time(0)
|
||||||
, job_queue_length(0)
|
, job_queue_length(0)
|
||||||
{}
|
{}
|
||||||
|
|
||||||
|
@ -206,6 +207,7 @@ namespace libtorrent
|
||||||
int average_read_time;
|
int average_read_time;
|
||||||
int average_write_time;
|
int average_write_time;
|
||||||
int average_hash_time;
|
int average_hash_time;
|
||||||
|
int average_cache_time;
|
||||||
int job_queue_length;
|
int job_queue_length;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -442,6 +444,10 @@ namespace libtorrent
|
||||||
// average hash time (in microseconds)
|
// average hash time (in microseconds)
|
||||||
sliding_average<512> m_hash_time;
|
sliding_average<512> m_hash_time;
|
||||||
|
|
||||||
|
// average disk cache time (in microseconds)
|
||||||
|
// scanning the cache for pieces to flush
|
||||||
|
sliding_average<512> m_cache_time;
|
||||||
|
|
||||||
typedef std::multimap<size_type, disk_io_job> read_jobs_t;
|
typedef std::multimap<size_type, disk_io_job> read_jobs_t;
|
||||||
read_jobs_t m_sorted_read_jobs;
|
read_jobs_t m_sorted_read_jobs;
|
||||||
|
|
||||||
|
|
|
@ -342,6 +342,8 @@ namespace libtorrent
|
||||||
ret.average_queue_time = m_queue_time.mean();
|
ret.average_queue_time = m_queue_time.mean();
|
||||||
ret.average_read_time = m_read_time.mean();
|
ret.average_read_time = m_read_time.mean();
|
||||||
ret.average_write_time = m_write_time.mean();
|
ret.average_write_time = m_write_time.mean();
|
||||||
|
ret.average_hash_time = m_hash_time.mean();
|
||||||
|
ret.average_cache_time = m_cache_time.mean();
|
||||||
ret.job_queue_length = m_jobs.size() + m_sorted_read_jobs.size();
|
ret.job_queue_length = m_jobs.size() + m_sorted_read_jobs.size();
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -1693,8 +1695,13 @@ namespace libtorrent
|
||||||
m_ios.post(m_queue_callback);
|
m_ios.post(m_queue_callback);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ptime now = time_now_hires();
|
||||||
|
m_queue_time.add_sample(total_microseconds(now - j.start_time));
|
||||||
|
|
||||||
flush_expired_pieces();
|
flush_expired_pieces();
|
||||||
|
|
||||||
|
m_cache_time.add_sample(total_microseconds(time_now_hires() - now));
|
||||||
|
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
TORRENT_ASSERT(j.storage
|
TORRENT_ASSERT(j.storage
|
||||||
|
@ -1714,9 +1721,6 @@ namespace libtorrent
|
||||||
if (j.storage && j.storage->get_storage_impl()->m_settings == 0)
|
if (j.storage && j.storage->get_storage_impl()->m_settings == 0)
|
||||||
j.storage->get_storage_impl()->m_settings = &m_settings;
|
j.storage->get_storage_impl()->m_settings = &m_settings;
|
||||||
|
|
||||||
ptime now = time_now_hires();
|
|
||||||
m_queue_time.add_sample(total_microseconds(now - j.start_time));
|
|
||||||
|
|
||||||
switch (j.action)
|
switch (j.action)
|
||||||
{
|
{
|
||||||
case disk_io_job::update_settings:
|
case disk_io_job::update_settings:
|
||||||
|
|
|
@ -945,6 +945,7 @@ namespace aux {
|
||||||
":disk cache size"
|
":disk cache size"
|
||||||
":disk buffer allocations"
|
":disk buffer allocations"
|
||||||
":disk hash time"
|
":disk hash time"
|
||||||
|
":disk cache time"
|
||||||
"\n\n", m_stats_logger);
|
"\n\n", m_stats_logger);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@ -2714,7 +2715,7 @@ namespace aux {
|
||||||
"%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t"
|
"%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t"
|
||||||
"%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t"
|
"%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t"
|
||||||
"%f\t%f\t%f\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t"
|
"%f\t%f\t%f\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t"
|
||||||
"%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\n"
|
"%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\n"
|
||||||
, total_milliseconds(now - m_last_log_rotation) / 1000.f
|
, total_milliseconds(now - m_last_log_rotation) / 1000.f
|
||||||
, int(m_stat.total_upload() - m_last_uploaded)
|
, int(m_stat.total_upload() - m_last_uploaded)
|
||||||
, int(m_stat.total_download() - m_last_downloaded)
|
, int(m_stat.total_download() - m_last_downloaded)
|
||||||
|
@ -2793,6 +2794,7 @@ namespace aux {
|
||||||
, cs.cache_size
|
, cs.cache_size
|
||||||
, cs.total_used_buffers
|
, cs.total_used_buffers
|
||||||
, int(cs.average_hash_time)
|
, int(cs.average_hash_time)
|
||||||
|
, int(cs.average_cache_time)
|
||||||
);
|
);
|
||||||
m_last_cache_status = cs;
|
m_last_cache_status = cs;
|
||||||
m_last_failed = m_total_failed_bytes;
|
m_last_failed = m_total_failed_bytes;
|
||||||
|
|
Loading…
Reference in New Issue