another read job starvation fix + some more logging

This commit is contained in:
Arvid Norberg 2011-03-27 08:21:26 +00:00
parent 311f7f0e57
commit 346a6fb175
3 changed files with 20 additions and 4 deletions

View File

@ -107,7 +107,7 @@ reports = [
('peers_list_size', 'num', 'number of known peers (not necessarily connected)', ['num list peers']),
('overall_rates', 'Bytes / second', 'download and upload rates', ['upload rate', 'download rate', 'smooth upload rate', 'smooth download rate']),
('disk_write_queue', 'Bytes', 'bytes queued up by peers, to be written to disk', ['disk write queued bytes', 'disk queue limit', 'disk queue low watermark']),
('peers_upload', 'num', 'number of peers by state wrt. uploading', ['peers up interested', 'peers up unchoked', 'peers up requests', 'peers disk-up', 'peers bw-up']),
('peers_upload', 'num', 'number of peers by state wrt. uploading', ['peers up interested', 'peers up unchoked', 'peers up requests', 'peers disk-up', 'peers bw-up', 'max unchoked']),
('peers_download', 'num', 'number of peers by state wrt. downloading', ['peers down interesting', 'peers down unchoked', 'peers down requests', 'peers disk-down', 'peers bw-down']),
('peer_errors', 'num', 'number of peers by error that disconnected them', ['error peers', 'peer disconnects', 'peers eof', 'peers connection reset', 'connect timeouts', 'uninteresting peers disconnect', 'banned for hash failure']),
('waste', '% of all downloaded bytes', 'proportion of all downloaded bytes that were wasted', ['% failed payload bytes', '% wasted payload bytes', '% protocol bytes']),
@ -116,7 +116,7 @@ reports = [
('disk_cache_hits', 'blocks (16kiB)', '', ['disk block read', 'read cache hits', 'disk block written', 'disk read back']),
('disk_cache', 'blocks (16kiB)', 'disk cache size and usage', ['read disk cache size', 'disk cache size', 'disk buffer allocations', 'cache size']),
('disk_readback', '% of written blocks', 'portion of written blocks that had to be read back for hash verification', ['% read back']),
('disk_queue', 'number of queued disk jobs', 'queued disk jobs', ['disk queue size', 'disk read queue size']),
('disk_queue', 'number of queued disk jobs', 'queued disk jobs', ['disk queue size', 'disk read queue size', 'read job queue size limit']),
# ('absolute_waste', 'num', '', ['failed bytes', 'redundant bytes', 'download rate']),
('connect_candidates', 'num', 'number of peers we know of that we can connect to', ['connect candidates']),

View File

@ -1642,8 +1642,20 @@ namespace libtorrent
// make sure we don't starve out the read queue by just issuing
// write jobs constantly, mix in a read job every now and then
// with a configurable ratio
// this rate must increase to every other jobs if the queued
// up read jobs increases too far.
int read_job_every = m_settings.read_job_every;
if (m_sorted_read_jobs.size() > m_settings.unchoke_slots_limit * 2)
{
int range = m_settings.unchoke_slots_limit;
int exceed = m_sorted_read_jobs.size() - range * 2;
read_job_every = (exceed * 1 + (range - exceed) * read_job_every) / 2;
if (read_job_every < 1) read_job_every = 1;
}
bool pick_read_job = m_jobs.empty()
|| (immediate_jobs_in_row >= m_settings.read_job_every
|| (immediate_jobs_in_row >= read_job_every
&& !m_sorted_read_jobs.empty());
if (!pick_read_job)

View File

@ -969,6 +969,8 @@ namespace aux {
":disk read queue size"
":tick interval"
":tick residual"
":max unchoked"
":read job queue size limit"
"\n\n", m_stats_logger);
}
#endif
@ -2934,7 +2936,7 @@ namespace aux {
"%f\t%f\t%f\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t"
"%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t"
"%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%f\t%f\t"
"%f\t%f\t%d\t%f\t%d\t%d\t%d\n"
"%f\t%f\t%d\t%f\t%d\t%d\t%d\t%d\t%d\n"
, total_milliseconds(now - m_last_log_rotation) / 1000.f
, int(m_stat.total_upload() - m_last_uploaded)
, int(m_stat.total_download() - m_last_downloaded)
@ -3032,6 +3034,8 @@ namespace aux {
, cs.read_queue_size
, tick_interval_ms
, m_tick_residual
, m_allowed_upload_slots
, m_settings.unchoke_slots_limit * 2
);
m_last_cache_status = cs;
m_last_failed = m_total_failed_bytes;