more use of const modifier

This commit is contained in:
Alden Torres 2018-08-14 15:26:10 -04:00 committed by Arvid Norberg
parent 3d0e5fdec9
commit 6a13d14f11
3 changed files with 11 additions and 11 deletions

View File

@ -1320,7 +1320,7 @@ void block_cache::insert_blocks(cached_piece_entry* pe, int block, span<iovec_t
}
// return false if the memory was purged
bool block_cache::inc_block_refcount(cached_piece_entry* pe, int block, int reason)
bool block_cache::inc_block_refcount(cached_piece_entry* pe, int const block, int const reason)
{
TORRENT_PIECE_ASSERT(pe->in_use, pe);
TORRENT_PIECE_ASSERT(block < pe->blocks_in_piece, pe);

View File

@ -129,10 +129,10 @@ namespace libtorrent {
{
if (action != job_action_t::write) return false;
int block_offset = d.io.offset & (default_block_size - 1);
int size = d.io.buffer_size;
int start = d.io.offset / default_block_size;
int end = block_offset > 0 && (size > default_block_size - block_offset) ? start + 2 : start + 1;
int const block_offset = d.io.offset & (default_block_size - 1);
int const size = d.io.buffer_size;
int const start = d.io.offset / default_block_size;
int const end = block_offset > 0 && (size > default_block_size - block_offset) ? start + 2 : start + 1;
for (int i = start; i < end; ++i)
{

View File

@ -578,7 +578,7 @@ constexpr disk_job_flags_t disk_interface::cache_hit;
// if the cache is under high pressure, we need to evict
// the blocks we just flushed to make room for more write pieces
int evict = m_disk_cache.num_to_evict(0);
int const evict = m_disk_cache.num_to_evict(0);
if (evict > 0) m_disk_cache.try_evict_blocks(evict);
return iov_len;
@ -631,7 +631,7 @@ constexpr disk_job_flags_t disk_interface::cache_hit;
}
// if we fail to lock the block, it' no longer in the cache
bool locked = m_disk_cache.inc_block_refcount(pe, i, block_cache::ref_flushing);
bool const locked = m_disk_cache.inc_block_refcount(pe, i, block_cache::ref_flushing);
// it should always succeed, since it's a dirty block, and
// should never have been marked as volatile
@ -1775,7 +1775,7 @@ constexpr disk_job_flags_t disk_interface::cache_hit;
}
void disk_io_thread::async_hash(storage_index_t const storage
, piece_index_t piece, disk_job_flags_t const flags
, piece_index_t const piece, disk_job_flags_t const flags
, std::function<void(piece_index_t, sha1_hash const&, storage_error const&)> handler)
{
disk_io_job* j = allocate_job(job_action_t::hash);
@ -1880,7 +1880,7 @@ constexpr disk_job_flags_t disk_interface::cache_hit;
}
void disk_io_thread::async_rename_file(storage_index_t const storage
, file_index_t index, std::string name
, file_index_t const index, std::string name
, std::function<void(std::string const&, file_index_t, storage_error const&)> handler)
{
disk_io_job* j = allocate_job(job_action_t::rename_file);
@ -1970,7 +1970,7 @@ constexpr disk_job_flags_t disk_interface::cache_hit;
// in fact, no jobs should really be hung on this piece
// at this point
jobqueue_t jobs;
bool ok = m_disk_cache.evict_piece(pe, jobs, block_cache::allow_ghost);
bool const ok = m_disk_cache.evict_piece(pe, jobs, block_cache::allow_ghost);
TORRENT_PIECE_ASSERT(ok, pe);
TORRENT_UNUSED(ok);
fail_jobs(storage_error(boost::asio::error::operation_aborted), jobs);
@ -2590,7 +2590,7 @@ constexpr disk_job_flags_t disk_interface::cache_hit;
: i->cache_state == cached_piece_entry::volatile_read_lru
? cached_piece_info::volatile_read_cache
: cached_piece_info::read_cache;
int blocks_in_piece = i->blocks_in_piece;
int const blocks_in_piece = i->blocks_in_piece;
info.blocks.resize(aux::numeric_cast<std::size_t>(blocks_in_piece));
for (int b = 0; b < blocks_in_piece; ++b)
info.blocks[std::size_t(b)] = i->blocks[b].buf != nullptr;