minor code refactor in disk_io_thread and block_cache (#1238)

This commit is contained in:
Alden Torres 2016-10-21 00:25:48 -04:00 committed by Arvid Norberg
parent 8c20f4e1fd
commit a12f62593e
2 changed files with 12 additions and 13 deletions

View File

@ -984,7 +984,7 @@ void block_cache::erase_piece(cached_piece_entry* pe)
TORRENT_PIECE_ASSERT(pe->cache_state < cached_piece_entry::num_lrus, pe);
TORRENT_PIECE_ASSERT(pe->jobs.empty(), pe);
linked_list<cached_piece_entry>* lru_list = &m_lru[pe->cache_state];
if (pe->hash)
if (pe->hash != nullptr)
{
TORRENT_PIECE_ASSERT(pe->hash->offset == 0, pe);
delete pe->hash;
@ -1274,7 +1274,7 @@ void block_cache::move_to_ghost(cached_piece_entry* pe)
linked_list<cached_piece_entry>* ghost_list = &m_lru[pe->cache_state + 1];
while (ghost_list->size() >= m_ghost_size)
{
cached_piece_entry* p = static_cast<cached_piece_entry*>(ghost_list->front());
cached_piece_entry* p = ghost_list->front();
TORRENT_PIECE_ASSERT(p != pe, p);
TORRENT_PIECE_ASSERT(p->num_blocks == 0, p);
TORRENT_PIECE_ASSERT(p->refcount == 0, p);

View File

@ -2141,7 +2141,7 @@ namespace libtorrent
for (auto i = hash_jobs.iterate(); i.get(); i.next())
{
disk_io_job* hj = i.get();
memcpy(hj->d.piece_hash, result.data(), 20);
std::memcpy(hj->d.piece_hash, result.data(), 20);
hj->ret = 0;
}
@ -2203,7 +2203,7 @@ namespace libtorrent
m_disk_cache.free_buffer(static_cast<char*>(iov.iov_base));
sha1_hash piece_hash = h.final();
memcpy(j->d.piece_hash, &piece_hash[0], 20);
std::memcpy(j->d.piece_hash, piece_hash.data(), 20);
return ret >= 0 ? 0 : -1;
}
@ -2235,11 +2235,11 @@ namespace libtorrent
TORRENT_PIECE_ASSERT(pe->cache_state <= cached_piece_entry::read_lru1 || pe->cache_state == cached_piece_entry::read_lru2, pe);
// are we already done hashing?
if (pe->hash && !pe->hashing && pe->hash->offset == piece_size)
if (pe->hash != nullptr && !pe->hashing && pe->hash->offset == piece_size)
{
DLOG("do_hash: (%d) (already done)\n", int(pe->piece));
sha1_hash piece_hash = pe->hash->h.final();
memcpy(j->d.piece_hash, &piece_hash[0], 20);
std::memcpy(j->d.piece_hash, piece_hash.data(), 20);
delete pe->hash;
pe->hash = nullptr;
if (pe->cache_state != cached_piece_entry::volatile_read_lru)
@ -2302,7 +2302,7 @@ namespace libtorrent
// their refcounts. This is used to decrement only these blocks
// later.
int* locked_blocks = TORRENT_ALLOCA(int, blocks_in_piece);
memset(locked_blocks, 0, blocks_in_piece * sizeof(int));
std::memset(locked_blocks, 0, blocks_in_piece * sizeof(int));
int num_locked_blocks = 0;
// increment the refcounts of all
@ -2386,8 +2386,7 @@ namespace libtorrent
if (ret != iov.iov_len)
{
ret = -1;
j->error.ec.assign(boost::asio::error::eof
, boost::asio::error::get_misc_category());
j->error.ec = boost::asio::error::eof;
j->error.operation = storage_error::read;
m_disk_cache.free_buffer(static_cast<char*>(iov.iov_base));
break;
@ -2428,7 +2427,7 @@ namespace libtorrent
if (ret >= 0)
{
sha1_hash piece_hash = ph->h.final();
memcpy(j->d.piece_hash, &piece_hash[0], 20);
std::memcpy(j->d.piece_hash, piece_hash.data(), 20);
delete pe->hash;
pe->hash = nullptr;
@ -3111,7 +3110,7 @@ namespace libtorrent
std::unique_lock<std::mutex> l(m_cache_mutex);
DLOG("blocked_jobs: %d queued_jobs: %d num_threads %d\n"
, int(m_stats_counters[counters::blocked_disk_jobs])
, m_generic_io_jobs.m_queued_jobs.size(), int(num_threads()));
, m_generic_io_jobs.m_queued_jobs.size(), num_threads());
m_last_cache_expiry = now;
jobqueue_t completed_jobs;
flush_expired_write_blocks(completed_jobs, l);
@ -3224,7 +3223,7 @@ namespace libtorrent
if (--m_num_running_threads > 0 || !m_abort)
{
DLOG("exiting disk thread %s. num_threads: %d aborting: %d\n"
, thread_id_str.str().c_str(), int(num_threads()), int(m_abort));
, thread_id_str.str().c_str(), num_threads(), int(m_abort));
TORRENT_ASSERT(m_magic == 0x1337);
return;
}
@ -3519,7 +3518,7 @@ namespace libtorrent
// This is run in the network thread
// TODO: 2 it would be nice to get rid of m_userdata and just have a function
// object to pass all the job completions to. It could in turn be responsible
// for posting them to the correct io_servive
// for posting them to the correct io_service
void disk_io_thread::call_job_handlers(void* userdata)
{
std::unique_lock<std::mutex> l(m_completed_jobs_mutex);