add performance warning when disk write queue exceeds half of the write cache size

This commit is contained in:
Arvid Norberg 2011-03-16 07:45:51 +00:00
parent 7e3f69ecbf
commit 6db11079c8
10 changed files with 37 additions and 12 deletions

View File

@ -6753,7 +6753,8 @@ upload or download rate performance.
upload_limit_too_low,
download_limit_too_low,
send_buffer_watermark_too_low,
too_many_optimistic_unchoke_slots
too_many_optimistic_unchoke_slots,
too_high_disk_queue_limit
};
performance_warning_t warning_code;
@ -6816,6 +6817,12 @@ too_many_optimistic_unchoke_slots
If the half (or more) of all upload slots are set as optimistic unchoke slots, this
warning is issued. You probably want more regular (rate based) unchoke slots.
too_high_disk_queue_limit
If the disk write queue ever grows larger than half of the cache size, this warning
is posted. The disk write queue eats into the total disk cache and leaves very little
left for the actual cache. This causes the disk cache to oscillate in evicting large
portions of the cache before allowing peers to download any more, onto the disk write
queue. Either lower ``max_queued_disk_bytes`` or increase ``cache_size``.
state_changed_alert
-------------------

View File

@ -220,7 +220,7 @@ namespace libtorrent
send_buffer_watermark_too_low,
too_many_optimistic_unchoke_slots,
bittyrant_with_no_uplimit,
too_high_disk_queue_limit,

View File

@ -762,6 +762,7 @@ namespace libtorrent
ptime m_last_second_tick;
// used to limit how often disk warnings are generated
ptime m_last_disk_performance_warning;
ptime m_last_disk_queue_performance_warning;
// the last time we went through the peers
// to decide which ones to choke/unchoke

View File

@ -300,7 +300,9 @@ namespace libtorrent
// aborts read operations
void stop(boost::intrusive_ptr<piece_manager> s);
void add_job(disk_io_job const& j
// returns the disk write queue size
int add_job(disk_io_job const& j
, boost::function<void(int, disk_io_job const&)> const& f
= boost::function<void(int, disk_io_job const&)>());
@ -367,7 +369,7 @@ namespace libtorrent
private:
void add_job(disk_io_job const& j
int add_job(disk_io_job const& j
, mutex::scoped_lock& l
, boost::function<void(int, disk_io_job const&)> const& f
= boost::function<void(int, disk_io_job const&)>());

View File

@ -234,7 +234,8 @@ namespace libtorrent
, boost::function<void(int, disk_io_job const&)> const& handler
, int cache_expiry = 0);
void async_write(
// returns the write queue size
int async_write(
peer_request const& r
, disk_buffer_holder& buffer
, boost::function<void(int, disk_io_job const&)> const& f);

View File

@ -116,7 +116,8 @@ namespace libtorrent {
"download limit too low (upload rate will suffer)",
"send buffer watermark too low (upload rate will suffer)",
"too many optimistic unchoke slots",
"using bittyrant unchoker with no upload rate limit set"
"using bittyrant unchoker with no upload rate limit set",
"the disk queue limit is too high compared to the cache size. The disk queue eats into the cache size"
};
return torrent_alert::message() + ": performance warning: "

View File

@ -1362,7 +1362,7 @@ namespace libtorrent
return m_queue_buffer_size;
}
void disk_io_thread::add_job(disk_io_job const& j
int disk_io_thread::add_job(disk_io_job const& j
, mutex::scoped_lock& l
, boost::function<void(int, disk_io_job const&)> const& f)
{
@ -1378,9 +1378,10 @@ namespace libtorrent
m_exceeded_write_queue = true;
}
m_signal.signal(l);
return m_queue_buffer_size;
}
void disk_io_thread::add_job(disk_io_job const& j
int disk_io_thread::add_job(disk_io_job const& j
, boost::function<void(int, disk_io_job const&)> const& f)
{
TORRENT_ASSERT(!m_abort);
@ -1389,7 +1390,7 @@ namespace libtorrent
|| j.action == disk_io_job::update_settings);
TORRENT_ASSERT(j.buffer_size <= m_block_size);
mutex::scoped_lock l(m_queue_mutex);
add_job(j, l, f);
return add_job(j, l, f);
}
bool disk_io_thread::test_error(disk_io_job& j)

View File

@ -2375,12 +2375,21 @@ namespace libtorrent
}
}
fs.async_write(p, data, boost::bind(&peer_connection::on_disk_write_complete
int write_queue_size = fs.async_write(p, data, boost::bind(&peer_connection::on_disk_write_complete
, self(), _1, _2, p, t));
m_outstanding_writing_bytes += p.length;
TORRENT_ASSERT(m_channel_state[download_channel] == peer_info::bw_idle);
m_download_queue.erase(b);
if (write_queue_size > m_ses.m_settings.cache_size / 2
&& (now - m_ses.m_last_disk_queue_performance_warning) > seconds(10)
&& m_ses.m_alerts.should_post<performance_alert>())
{
m_ses.m_last_disk_queue_performance_warning = now;
t->alerts().post_alert(performance_alert(t->get_handle()
, performance_alert::too_high_disk_queue_limit));
}
if (!m_ses.can_write_to_disk()
&& m_ses.settings().max_queued_disk_bytes
&& t->alerts().should_post<performance_alert>()

View File

@ -513,6 +513,7 @@ namespace aux {
, m_last_tick(m_created)
, m_last_second_tick(m_created - milliseconds(900))
, m_last_disk_performance_warning(min_time())
, m_last_disk_queue_performance_warning(min_time())
, m_last_choke(m_created)
, m_next_rss_update(min_time())
#ifndef TORRENT_DISABLE_DHT

View File

@ -1671,7 +1671,7 @@ ret:
#endif
}
void piece_manager::async_write(
int piece_manager::async_write(
peer_request const& r
, disk_buffer_holder& buffer
, boost::function<void(int, disk_io_job const&)> const& handler)
@ -1687,8 +1687,10 @@ ret:
j.offset = r.start;
j.buffer_size = r.length;
j.buffer = buffer.get();
m_io_thread.add_job(j, handler);
int queue_size = m_io_thread.add_job(j, handler);
buffer.release();
return queue_size;
}
void piece_manager::async_hash(int piece