fix flush_pieces in disk_io_thread to also flush the read cache

This commit is contained in:
Arvid Norberg 2015-01-01 10:10:13 +00:00
parent 98372ff974
commit a1c1f9393d
2 changed files with 18 additions and 9 deletions

View File

@ -796,15 +796,18 @@ namespace libtorrent
} }
} }
void disk_io_thread::flush_piece(cached_piece_entry* pe, int flags, tailqueue& completed_jobs, mutex::scoped_lock& l) void disk_io_thread::flush_piece(cached_piece_entry* pe, int flags
, tailqueue& completed_jobs, mutex::scoped_lock& l)
{ {
TORRENT_ASSERT(l.locked()); TORRENT_ASSERT(l.locked());
if (flags & flush_delete_cache) if (flags & flush_delete_cache)
{ {
// delete dirty blocks and post handlers with // delete dirty blocks and post handlers with
// operation_aborted error code // operation_aborted error code
fail_jobs_impl(storage_error(boost::asio::error::operation_aborted), pe->jobs, completed_jobs); fail_jobs_impl(storage_error(boost::asio::error::operation_aborted)
fail_jobs_impl(storage_error(boost::asio::error::operation_aborted), pe->read_jobs, completed_jobs); , pe->jobs, completed_jobs);
fail_jobs_impl(storage_error(boost::asio::error::operation_aborted)
, pe->read_jobs, completed_jobs);
m_disk_cache.abort_dirty(pe); m_disk_cache.abort_dirty(pe);
} }
else if ((flags & flush_write_cache) && pe->num_dirty > 0) else if ((flags & flush_write_cache) && pe->num_dirty > 0)
@ -838,6 +841,7 @@ namespace libtorrent
for (boost::unordered_set<cached_piece_entry*>::const_iterator i = pieces.begin() for (boost::unordered_set<cached_piece_entry*>::const_iterator i = pieces.begin()
, end(pieces.end()); i != end; ++i) , end(pieces.end()); i != end; ++i)
{ {
if ((*i)->get_storage() != storage) continue;
piece_index.push_back((*i)->piece); piece_index.push_back((*i)->piece);
} }
@ -873,12 +877,19 @@ namespace libtorrent
std::pair<block_cache::iterator, block_cache::iterator> range = m_disk_cache.all_pieces(); std::pair<block_cache::iterator, block_cache::iterator> range = m_disk_cache.all_pieces();
while (range.first != range.second) while (range.first != range.second)
{ {
// TODO: 2 we're not flushing the read cache at all? // TODO: it would be nice to optimize this by having the cache
// pieces also ordered by
if ((flags & (flush_read_cache | flush_delete_cache)) == 0)
{
// if we're not flushing the read cache, and not deleting the
// cache, skip pieces with no dirty blocks, i.e. read cache
// pieces
while (range.first->num_dirty == 0) while (range.first->num_dirty == 0)
{ {
++range.first; ++range.first;
if (range.first == range.second) return; if (range.first == range.second) return;
} }
}
cached_piece_entry* pe = const_cast<cached_piece_entry*>(&*range.first); cached_piece_entry* pe = const_cast<cached_piece_entry*>(&*range.first);
flush_piece(pe, flags, completed_jobs, l); flush_piece(pe, flags, completed_jobs, l);
range = m_disk_cache.all_pieces(); range = m_disk_cache.all_pieces();

View File

@ -1498,7 +1498,6 @@ typedef struct _FILE_ALLOCATED_RANGE_BUFFER {
int offset = 0; int offset = 0;
for (int i = 0; i < num_bufs; ++i) for (int i = 0; i < num_bufs; ++i)
{ {
// TODO: 2 use vm_copy here, if available, and if buffers are aligned
memcpy(dst + offset, bufs[i].iov_base, bufs[i].iov_len); memcpy(dst + offset, bufs[i].iov_base, bufs[i].iov_len);
offset += bufs[i].iov_len; offset += bufs[i].iov_len;
} }
@ -1509,7 +1508,6 @@ typedef struct _FILE_ALLOCATED_RANGE_BUFFER {
int offset = 0; int offset = 0;
for (int i = 0; i < num_bufs; ++i) for (int i = 0; i < num_bufs; ++i)
{ {
// TODO: 2 use vm_copy here, if available, and if buffers are aligned
memcpy(bufs[i].iov_base, src + offset, bufs[i].iov_len); memcpy(bufs[i].iov_base, src + offset, bufs[i].iov_len);
offset += bufs[i].iov_len; offset += bufs[i].iov_len;
} }