try to evict a volatile piece before allocating a new one for a hash check. It may be faster to check files without growing the cache, or reusing the same buffers.

This commit is contained in:
arvidn 2016-03-07 01:05:54 -05:00
parent d059ac563d
commit 0d0048d415
6 changed files with 218 additions and 31 deletions

View File

@ -449,6 +449,9 @@ namespace libtorrent
// that couldn't be
int try_evict_blocks(int num, cached_piece_entry* ignore = 0);
// try to evict a single volatile piece, if there is one.
void try_evict_one_volatile();
// if there are any dirty blocks
void clear(tailqueue<disk_io_job>& jobs);
@ -507,9 +510,18 @@ namespace libtorrent
// this is determined by being a fraction of the cache size
int m_ghost_size;
// the is the max number of volatile read cache blocks are allowed in the
// cache. Once this is reached, other volatile blocks will start to be
// evicted.
int m_max_volatile_blocks;
// the number of blocks (buffers) allocated by volatile pieces.
boost::uint32_t m_volatile_size;
// the number of blocks in the cache
// that are in the read cache
boost::uint32_t m_read_cache_size;
// the number of blocks in the cache
// that are in the write cache
boost::uint32_t m_write_cache_size;

View File

@ -1518,6 +1518,16 @@ namespace libtorrent
// .. _i2p: http://www.i2p2.de
i2p_port,
// this determines the max number of volatile disk cache blocks. If the
// number of volatile blocks exceed this limit, other volatile blocks
// will start to be evicted. A disk cache block is volatile if it has
// low priority, and should be one of the first blocks to be evicted
// under pressure. For instance, blocks pulled into the cache as the
// result of calculating a piece hash are volatile. These blocks don't
// represent potential interest among peers, so the value of keeping
// them in the cache is limited.
cache_size_volatile,
max_int_setting_internal
};

View File

@ -113,3 +113,45 @@ TORRENT_TEST(checking_no_cache)
TEST_EQUAL(tor[0].status().is_seeding, true);
});
}
TORRENT_TEST(checking_limit_volatile)
{
run_test(
[](lt::add_torrent_params& atp, lt::settings_pack& p) {
atp.flags |= lt::add_torrent_params::flag_auto_managed;
p.set_int(lt::settings_pack::cache_size, 300);
p.set_int(lt::settings_pack::cache_size_volatile, 2);
},
[](lt::session& ses) {
int cache = get_cache_size(ses);
// the cache fits 300 blocks, but only allows two volatile blocks
TEST_EQUAL(cache, 2);
std::vector<lt::torrent_handle> tor = ses.get_torrents();
TEST_EQUAL(tor.size(), 1);
TEST_EQUAL(tor[0].status().is_seeding, true);
});
}
TORRENT_TEST(checking_volatile_limit_cache_size)
{
run_test(
[](lt::add_torrent_params& atp, lt::settings_pack& p) {
atp.flags |= lt::add_torrent_params::flag_auto_managed;
p.set_int(lt::settings_pack::cache_size, 10);
p.set_int(lt::settings_pack::cache_size_volatile, 300);
},
[](lt::session& ses) {
int cache = get_cache_size(ses);
// the cache allows 300 volatile blocks, but only fits 2 blocks
TEST_CHECK(cache > 0);
TEST_CHECK(cache <= 10);
std::vector<lt::torrent_handle> tor = ses.get_torrents();
TEST_EQUAL(tor.size(), 1);
TEST_EQUAL(tor[0].status().is_seeding, true);
});
}

View File

@ -366,6 +366,8 @@ block_cache::block_cache(int block_size, io_service& ios
: disk_buffer_pool(block_size, ios, trigger_trim)
, m_last_cache_op(cache_miss)
, m_ghost_size(8)
, m_max_volatile_blocks(100)
, m_volatile_size(0)
, m_read_cache_size(0)
, m_write_cache_size(0)
, m_send_buffer_blocks(0)
@ -554,6 +556,84 @@ void block_cache::update_cache_state(cached_piece_entry* p)
#endif
}
void block_cache::try_evict_one_volatile()
{
INVARIANT_CHECK;
DLOG(stderr, "[%p] try_evict_one_volatile\n", static_cast<void*>(this));
if (m_volatile_size < m_max_volatile_blocks) return;
linked_list<cached_piece_entry>* piece_list = &m_lru[cached_piece_entry::volatile_read_lru];
for (list_iterator<cached_piece_entry> i = piece_list->iterate(); i.get();)
{
cached_piece_entry* pe = reinterpret_cast<cached_piece_entry*>(i.get());
TORRENT_PIECE_ASSERT(pe->in_use, pe);
i.next();
if (pe->ok_to_evict())
{
#ifdef TORRENT_DEBUG
for (int j = 0; j < pe->blocks_in_piece; ++j)
TORRENT_PIECE_ASSERT(pe->blocks[j].buf == 0, pe);
#endif
TORRENT_PIECE_ASSERT(pe->refcount == 0, pe);
move_to_ghost(pe);
continue;
}
TORRENT_PIECE_ASSERT(pe->num_dirty == 0, pe);
// someone else is using this piece
if (pe->refcount > 0) continue;
// some blocks are pinned in this piece, skip it
if (pe->pinned > 0) continue;
char** to_delete = TORRENT_ALLOCA(char*, pe->blocks_in_piece);
int num_to_delete = 0;
// go through the blocks and evict the ones that are not dirty and not
// referenced
for (int j = 0; j < pe->blocks_in_piece; ++j)
{
cached_block_entry& b = pe->blocks[j];
TORRENT_PIECE_ASSERT(b.dirty == false, pe);
TORRENT_PIECE_ASSERT(b.pending == false, pe);
if (b.buf == 0 || b.refcount > 0 || b.dirty || b.pending) continue;
to_delete[num_to_delete++] = b.buf;
b.buf = NULL;
TORRENT_PIECE_ASSERT(pe->num_blocks > 0, pe);
--pe->num_blocks;
TORRENT_PIECE_ASSERT(m_read_cache_size > 0, pe);
--m_read_cache_size;
TORRENT_PIECE_ASSERT(m_volatile_size > 0, pe);
--m_volatile_size;
}
if (pe->ok_to_evict())
{
#ifdef TORRENT_DEBUG
for (int j = 0; j < pe->blocks_in_piece; ++j)
TORRENT_PIECE_ASSERT(pe->blocks[j].buf == 0, pe);
#endif
move_to_ghost(pe);
}
if (num_to_delete == 0) return;
DLOG(stderr, "[%p] removed %d blocks\n", static_cast<void*>(this)
, num_to_delete);
free_multiple_buffers(to_delete, num_to_delete);
return;
}
}
cached_piece_entry* block_cache::allocate_piece(disk_io_job const* j, int cache_state)
{
#ifdef TORRENT_EXPENSIVE_INVARIANT_CHECKS
@ -570,8 +650,8 @@ cached_piece_entry* block_cache::allocate_piece(disk_io_job const* j, int cache_
cached_piece_entry* p = find_piece(j);
if (p == 0)
{
int piece_size = j->storage->files()->piece_size(j->piece);
int blocks_in_piece = (piece_size + block_size() - 1) / block_size();
int const piece_size = j->storage->files()->piece_size(j->piece);
int const blocks_in_piece = (piece_size + block_size() - 1) / block_size();
cached_piece_entry pe;
pe.piece = j->piece;
@ -818,7 +898,13 @@ void block_cache::free_block(cached_piece_entry* pe, int block)
{
TORRENT_PIECE_ASSERT(m_read_cache_size > 0, pe);
--m_read_cache_size;
if (pe->cache_state == cached_piece_entry::volatile_read_lru)
{
--m_volatile_size;
}
}
TORRENT_PIECE_ASSERT(pe->num_blocks > 0, pe);
--pe->num_blocks;
free_buffer(b.buf);
@ -858,6 +944,12 @@ bool block_cache::evict_piece(cached_piece_entry* pe, tailqueue<disk_io_job>& jo
}
if (pe->num_blocks == 0) break;
}
if (pe->cache_state == cached_piece_entry::volatile_read_lru)
{
m_volatile_size -= num_to_delete;
}
if (num_to_delete) free_multiple_buffers(to_delete, num_to_delete);
if (pe->ok_to_evict(true))
@ -1015,6 +1107,7 @@ int block_cache::try_evict_blocks(int num, cached_piece_entry* ignore)
// go through the blocks and evict the ones that are not dirty and not
// referenced
int removed = 0;
for (int j = 0; j < pe->blocks_in_piece && num > 0; ++j)
{
cached_block_entry& b = pe->blocks[j];
@ -1025,11 +1118,17 @@ int block_cache::try_evict_blocks(int num, cached_piece_entry* ignore)
b.buf = NULL;
TORRENT_PIECE_ASSERT(pe->num_blocks > 0, pe);
--pe->num_blocks;
TORRENT_PIECE_ASSERT(m_read_cache_size > 0, pe);
--m_read_cache_size;
++removed;
--num;
}
TORRENT_PIECE_ASSERT(m_read_cache_size >= removed, pe);
m_read_cache_size -= removed;
if (pe->cache_state == cached_piece_entry::volatile_read_lru)
{
m_volatile_size -= removed;
}
if (pe->ok_to_evict())
{
#ifdef TORRENT_DEBUG
@ -1087,6 +1186,7 @@ int block_cache::try_evict_blocks(int num, cached_piece_entry* ignore)
// go through the blocks and evict the ones
// that are not dirty and not referenced
int removed = 0;
for (int j = 0; j < end && num > 0; ++j)
{
cached_block_entry& b = pe->blocks[j];
@ -1097,11 +1197,17 @@ int block_cache::try_evict_blocks(int num, cached_piece_entry* ignore)
b.buf = NULL;
TORRENT_PIECE_ASSERT(pe->num_blocks > 0, pe);
--pe->num_blocks;
TORRENT_PIECE_ASSERT(m_read_cache_size > 0, pe);
--m_read_cache_size;
++removed;
--num;
}
TORRENT_PIECE_ASSERT(m_read_cache_size >= removed, pe);
m_read_cache_size -= removed;
if (pe->cache_state == cached_piece_entry::volatile_read_lru)
{
m_volatile_size -= removed;
}
if (pe->ok_to_evict())
{
#ifdef TORRENT_DEBUG
@ -1267,6 +1373,7 @@ void block_cache::insert_blocks(cached_piece_entry* pe, int block, file::iovec_t
TORRENT_PIECE_ASSERT(pe->blocks[block].dirty == false, pe);
++pe->num_blocks;
++m_read_cache_size;
if (j->flags & disk_io_job::volatile_read) ++m_volatile_size;
if (flags & blocks_inc_refcount)
{
@ -1296,6 +1403,7 @@ void block_cache::insert_blocks(cached_piece_entry* pe, int block, file::iovec_t
pe->blocks[block].buf = NULL;
--pe->num_blocks;
--m_read_cache_size;
if (j->flags & disk_io_job::volatile_read) --m_volatile_size;
}
#endif
}
@ -1347,6 +1455,8 @@ bool block_cache::inc_block_refcount(cached_piece_entry* pe, int block, int reas
pe->blocks[block].buf = NULL;
--pe->num_blocks;
--m_read_cache_size;
if (pe->cache_state == cached_piece_entry::volatile_read_lru)
--m_volatile_size;
return false;
}
}
@ -1413,6 +1523,8 @@ void block_cache::dec_block_refcount(cached_piece_entry* pe, int block, int reas
pe->blocks[block].buf = NULL;
--pe->num_blocks;
--m_read_cache_size;
if (pe->cache_state == cached_piece_entry::volatile_read_lru)
--m_volatile_size;
}
}
#endif
@ -1478,6 +1590,7 @@ void block_cache::free_piece(cached_piece_entry* pe)
// and free them all in one go
char** to_delete = TORRENT_ALLOCA(char*, pe->blocks_in_piece);
int num_to_delete = 0;
int removed_clean = 0;
for (int i = 0; i < pe->blocks_in_piece; ++i)
{
if (pe->blocks[i].buf == 0) continue;
@ -1497,22 +1610,29 @@ void block_cache::free_piece(cached_piece_entry* pe)
}
else
{
TORRENT_PIECE_ASSERT(m_read_cache_size > 0, pe);
--m_read_cache_size;
++removed_clean;
}
}
TORRENT_PIECE_ASSERT(m_read_cache_size >= removed_clean, pe);
m_read_cache_size -= removed_clean;
if (pe->cache_state == cached_piece_entry::volatile_read_lru)
{
m_volatile_size -= num_to_delete;
}
if (num_to_delete) free_multiple_buffers(to_delete, num_to_delete);
update_cache_state(pe);
}
int block_cache::drain_piece_bufs(cached_piece_entry& p, std::vector<char*>& buf)
{
int piece_size = p.storage->files()->piece_size(p.piece);
int blocks_in_piece = (piece_size + block_size() - 1) / block_size();
int const piece_size = p.storage->files()->piece_size(p.piece);
int const blocks_in_piece = (piece_size + block_size() - 1) / block_size();
int ret = 0;
TORRENT_PIECE_ASSERT(p.in_use, &p);
int removed_clean = 0;
for (int i = 0; i < blocks_in_piece; ++i)
{
if (p.blocks[i].buf == 0) continue;
@ -1532,10 +1652,17 @@ int block_cache::drain_piece_bufs(cached_piece_entry& p, std::vector<char*>& buf
}
else
{
TORRENT_ASSERT(m_read_cache_size > 0);
--m_read_cache_size;
++removed_clean;
}
}
TORRENT_ASSERT(m_read_cache_size >= removed_clean);
m_read_cache_size -= removed_clean;
if (p.cache_state == cached_piece_entry::volatile_read_lru)
{
m_volatile_size -= removed_clean;
}
update_cache_state(&p);
return ret;
}
@ -1580,6 +1707,8 @@ void block_cache::set_settings(aux::session_settings const& sett, error_code& ec
m_ghost_size = (std::max)(8, sett.get_int(settings_pack::cache_size)
/ (std::max)(sett.get_int(settings_pack::read_cache_line_size), 4) / 2);
m_max_volatile_blocks = sett.get_int(settings_pack::cache_size_volatile);
disk_buffer_pool::set_settings(sett, ec);
}
@ -1727,8 +1856,9 @@ void block_cache::check_invariant() const
// -1: block not in cache
// -2: out of memory
int block_cache::copy_from_piece(cached_piece_entry* pe, disk_io_job* j
, bool expect_no_fail)
int block_cache::copy_from_piece(cached_piece_entry* const pe
, disk_io_job* const j
, bool const expect_no_fail)
{
INVARIANT_CHECK;
TORRENT_UNUSED(expect_no_fail);
@ -1741,13 +1871,13 @@ int block_cache::copy_from_piece(cached_piece_entry* pe, disk_io_job* j
int block_offset = j->d.io.offset & (block_size()-1);
int buffer_offset = 0;
int size = j->d.io.buffer_size;
int blocks_to_read = block_offset > 0 && (size > block_size() - block_offset) ? 2 : 1;
int const blocks_to_read = block_offset > 0 && (size > block_size() - block_offset) ? 2 : 1;
TORRENT_PIECE_ASSERT(size <= block_size(), pe);
const int start_block = block;
int const start_block = block;
#if TORRENT_USE_ASSERTS
int piece_size = j->storage->files()->piece_size(j->piece);
int blocks_in_piece = (piece_size + block_size() - 1) / block_size();
int const piece_size = j->storage->files()->piece_size(j->piece);
int const blocks_in_piece = (piece_size + block_size() - 1) / block_size();
TORRENT_PIECE_ASSERT(start_block < blocks_in_piece, pe);
#endif

View File

@ -2328,16 +2328,6 @@ namespace libtorrent
}
}
if (pe == NULL && m_settings.get_bool(settings_pack::use_read_cache) == false)
{
l.unlock();
// if there's no piece in the cache, and the read cache is disabled
// it means it's already been flushed to disk, and there's no point
// in reading it into the cache, since we're not using read cache
// so just use the uncached version
return do_uncached_hash(j);
}
if (pe == NULL)
{
int cache_state = (j->flags & disk_io_job::volatile_read)
@ -2400,6 +2390,9 @@ namespace libtorrent
locked_blocks[num_locked_blocks++] = i;
}
// to keep the cache footprint low, try to evict a volatile piece
m_disk_cache.try_evict_one_volatile();
l.unlock();
int ret = 0;
@ -2670,7 +2663,7 @@ namespace libtorrent
int block_size = m_disk_cache.block_size();
int piece_size = j->storage->files()->piece_size(j->piece);
int blocks_in_piece = (piece_size + block_size - 1) / block_size;
file::iovec_t iov;
int ret = 0;
int offset = 0;
@ -3598,6 +3591,5 @@ namespace libtorrent
{
}
#endif
}

View File

@ -340,7 +340,8 @@ namespace libtorrent
SET(inactive_up_rate, 2048, 0),
SET_NOPREV(proxy_type, settings_pack::none, &session_impl::update_proxy),
SET_NOPREV(proxy_port, 0, &session_impl::update_proxy),
SET_NOPREV(i2p_port, 0, &session_impl::update_i2p_bridge)
SET_NOPREV(i2p_port, 0, &session_impl::update_i2p_bridge),
SET_NOPREV(cache_size_volatile, 256, 0)
};
#undef SET