optimized disk I/O cache clearing

This commit is contained in:
Arvid Norberg 2010-05-13 15:01:20 +00:00
parent 4ddf87c53e
commit e07bad0686
4 changed files with 55 additions and 3 deletions

View File

@ -1,3 +1,4 @@
* optimized disk I/O cache clearing
* added feature to ask a torrent if it needs to save its resume data or not
* added setting to ignore file modification time when loading resume files
* support more fine-grained torrent states between which peer sources it

View File

@ -219,6 +219,7 @@ namespace libtorrent
char* allocate_buffer(char const* category);
void free_buffer(char* buf);
void free_multiple_buffers(char** bufvec, int numbufs);
char* allocate_buffers(int blocks, char const* category);
void free_buffers(char* buf, int blocks);
@ -240,6 +241,8 @@ namespace libtorrent
protected:
void free_buffer_impl(char* buf, mutex::scoped_lock& l);
// number of bytes per block. The BitTorrent
// protocol defines the block size to 16 KiB.
const int m_block_size;
@ -392,6 +395,8 @@ namespace libtorrent
, int options, int num_blocks, mutex::scoped_lock& l);
int cache_read_block(disk_io_job const& j, mutex::scoped_lock& l);
int free_piece(cached_piece_entry& p, mutex::scoped_lock& l);
void drain_piece_bufs(cached_piece_entry& p, std::vector<char*>& buf
, mutex::scoped_lock& l);
int try_read_from_cache(disk_io_job const& j, bool& hit);
int read_piece_from_cache_and_hash(disk_io_job const& j, sha1_hash& h);
int cache_piece(disk_io_job const& j, cache_piece_index_t::iterator& p

View File

@ -171,10 +171,30 @@ namespace libtorrent
}
#endif
void disk_buffer_pool::free_multiple_buffers(char** bufvec, int numbufs)
{
char** end = bufvec + numbufs;
// sort the pointers in order to maximize cache hits
std::sort(bufvec, end);
mutex::scoped_lock l(m_pool_mutex);
for (; bufvec != end; ++bufvec)
{
char* buf = *bufvec;
TORRENT_ASSERT(buf);
free_buffer_impl(buf, l);;
}
}
void disk_buffer_pool::free_buffer(char* buf)
{
TORRENT_ASSERT(buf);
mutex::scoped_lock l(m_pool_mutex);
free_buffer_impl(buf, l);
}
void disk_buffer_pool::free_buffer_impl(char* buf, mutex::scoped_lock& l)
{
TORRENT_ASSERT(buf);
TORRENT_ASSERT(m_magic == 0x1337);
TORRENT_ASSERT(is_disk_buffer(buf, l));
#if defined TORRENT_DISK_STATS || defined TORRENT_STATS
@ -491,13 +511,32 @@ namespace libtorrent
if (m_settings.explicit_read_cache) return;
// flush read cache
std::vector<char*> bufs;
cache_lru_index_t& ridx = m_read_pieces.get<1>();
i = ridx.begin();
while (i != ridx.end() && now - i->expire > cut_off)
{
free_piece(const_cast<cached_piece_entry&>(*i), l);
drain_piece_bufs(const_cast<cached_piece_entry&>(*i), bufs, l);
ridx.erase(i++);
}
if (!bufs.empty()) free_multiple_buffers(&bufs[0], bufs.size());
}
void disk_io_thread::drain_piece_bufs(cached_piece_entry& p, std::vector<char*>& buf
, mutex::scoped_lock& l)
{
int piece_size = p.storage->info()->piece_size(p.piece);
int blocks_in_piece = (piece_size + m_block_size - 1) / m_block_size;
for (int i = 0; i < blocks_in_piece; ++i)
{
if (p.blocks[i].buf == 0) continue;
buf.push_back(p.blocks[i].buf);
p.blocks[i].buf = 0;
--p.num_blocks;
--m_cache_stats.cache_size;
--m_cache_stats.read_cache_size;
}
}
// returns the number of blocks that were freed
@ -1752,12 +1791,15 @@ namespace libtorrent
mutex::scoped_lock l(m_piece_mutex);
// build a vector of all the buffers we need to free
// and free them all in one go
std::vector<char*> buffers;
for (cache_t::iterator i = m_read_pieces.begin();
i != m_read_pieces.end();)
{
if (i->storage == j.storage)
{
free_piece(const_cast<cached_piece_entry&>(*i), l);
drain_piece_bufs(const_cast<cached_piece_entry&>(*i), buffers, l);
i = m_read_pieces.erase(i);
}
else
@ -1766,6 +1808,7 @@ namespace libtorrent
}
}
l.unlock();
if (!buffers.empty()) free_multiple_buffers(&buffers[0], buffers.size());
release_memory();
break;
}

View File

@ -177,6 +177,9 @@ namespace libtorrent
set.low_prio_disk = false;
// one hour expiration
set.cache_expiry = 60 * 60;
// this is expensive and could add significant
// delays when freeing a large number of buffers
set.lock_disk_cache = false;
// flush write cache based on largest contiguous block
set.disk_cache_algorithm = session_settings::largest_contiguous;