From dd7179fb10f7056ca0d69f69d92c5f1ffa193a25 Mon Sep 17 00:00:00 2001 From: Alden Torres Date: Sat, 10 Dec 2016 14:15:25 -0500 Subject: [PATCH] added a few consts to arguments and local variables --- src/disk_io_thread.cpp | 46 ++++++++++++++++++------------------ src/enum_net.cpp | 2 +- src/file.cpp | 2 +- src/file_progress.cpp | 4 ++-- src/http_connection.cpp | 2 +- src/kademlia/rpc_manager.cpp | 2 +- src/natpmp.cpp | 4 ++-- src/part_file.cpp | 2 +- src/peer_class_set.cpp | 2 +- src/peer_connection.cpp | 2 +- src/peer_list.cpp | 4 ++-- src/storage.cpp | 4 ++-- src/torrent.cpp | 6 ++--- src/upnp.cpp | 2 +- src/ut_metadata.cpp | 6 ++--- src/utp_stream.cpp | 6 ++--- src/web_peer_connection.cpp | 6 ++--- 17 files changed, 51 insertions(+), 51 deletions(-) diff --git a/src/disk_io_thread.cpp b/src/disk_io_thread.cpp index 7719d480c..a78f7d158 100644 --- a/src/disk_io_thread.cpp +++ b/src/disk_io_thread.cpp @@ -549,7 +549,7 @@ namespace libtorrent // multiple pieces, the subsequent pieces after the first one, must have // their block indices start where the previous one left off int disk_io_thread::build_iovec(cached_piece_entry* pe, int start, int end - , span iov, span flushing, int block_base_index) + , span iov, span flushing, int const block_base_index) { DLOG("build_iovec: piece=%d [%d, %d)\n" , int(pe->piece), start, end); @@ -569,7 +569,7 @@ namespace libtorrent for (int i = 0; i < start; ++i) DLOG("."); #endif - int block_size = m_disk_cache.block_size(); + int const block_size = m_disk_cache.block_size(); int size_left = piece_size; for (int i = start; i < end; ++i, size_left -= block_size) { @@ -618,8 +618,8 @@ namespace libtorrent TORRENT_PIECE_ASSERT(num_blocks > 0, pe); m_stats_counters.inc_stats_counter(counters::num_writing_threads, 1); - time_point start_time = clock_type::now(); - int block_size = m_disk_cache.block_size(); + time_point const start_time = clock_type::now(); + int const block_size = m_disk_cache.block_size(); #if DEBUG_DISK_THREAD DLOG("flush_iovec: piece: %d [ ", int(pe->piece)); @@ -634,8 +634,8 @@ namespace libtorrent // issue the actual write operation auto iov_start = iov; int flushing_start = 0; - int piece = pe->piece; - int blocks_in_piece = pe->blocks_in_piece; + int const piece = pe->piece; + int const blocks_in_piece = pe->blocks_in_piece; bool failed = false; for (int i = 1; i <= num_blocks; ++i) { @@ -698,7 +698,7 @@ namespace libtorrent #endif m_disk_cache.blocks_flushed(pe, flushing, num_blocks); - int block_size = m_disk_cache.block_size(); + int const block_size = m_disk_cache.block_size(); if (error) { @@ -1098,7 +1098,7 @@ namespace libtorrent TORRENT_ASSERT(j->action < sizeof(job_functions) / sizeof(job_functions[0])); - time_point start_time = clock_type::now(); + time_point const start_time = clock_type::now(); m_stats_counters.inc_stats_counter(counters::num_running_disk_jobs, 1); @@ -1192,7 +1192,7 @@ namespace libtorrent return status_t::fatal_disk_error; } - time_point start_time = clock_type::now(); + time_point const start_time = clock_type::now(); int const file_flags = file_flags_for_job(j , m_settings.get_bool(settings_pack::coalesce_reads)); @@ -1272,7 +1272,7 @@ namespace libtorrent int const file_flags = file_flags_for_job(j , m_settings.get_bool(settings_pack::coalesce_reads)); - time_point start_time = clock_type::now(); + time_point const start_time = clock_type::now(); ret = j->storage->readv(iov , j->piece, int(adjusted_offset), file_flags, j->error); @@ -1426,7 +1426,7 @@ namespace libtorrent status_t disk_io_thread::do_uncached_write(disk_io_job* j) { - time_point start_time = clock_type::now(); + time_point const start_time = clock_type::now(); file::iovec_t const b = { j->buffer.disk_block, size_t(j->d.io.buffer_size) }; int const file_flags = file_flags_for_job(j @@ -1982,14 +1982,14 @@ namespace libtorrent if (!pe->hash) return; if (pe->hashing) return; - int piece_size = pe->storage->files()->piece_size(pe->piece); + int const piece_size = pe->storage->files()->piece_size(pe->piece); partial_hash* ph = pe->hash.get(); // are we already done? if (ph->offset >= piece_size) return; - int block_size = m_disk_cache.block_size(); - int cursor = ph->offset / block_size; + int const block_size = m_disk_cache.block_size(); + int const cursor = ph->offset / block_size; int end = cursor; TORRENT_PIECE_ASSERT(ph->offset % block_size == 0, pe); @@ -2021,7 +2021,7 @@ namespace libtorrent l.unlock(); - time_point start_time = clock_type::now(); + time_point const start_time = clock_type::now(); for (int i = cursor; i < end; ++i) { @@ -2031,7 +2031,7 @@ namespace libtorrent offset += size; } - std::int64_t hash_time = total_microseconds(clock_type::now() - start_time); + std::int64_t const hash_time = total_microseconds(clock_type::now() - start_time); l.lock(); @@ -2113,7 +2113,7 @@ namespace libtorrent DLOG("do_hash: (uncached) reading (piece: %d block: %d)\n" , int(j->piece), i); - time_point start_time = clock_type::now(); + time_point const start_time = clock_type::now(); iov.iov_len = (std::min)(block_size, piece_size - offset); ret = j->storage->readv(iov, j->piece @@ -2227,8 +2227,8 @@ namespace libtorrent } partial_hash* ph = pe->hash.get(); - int block_size = m_disk_cache.block_size(); - int blocks_in_piece = (piece_size + block_size - 1) / block_size; + int const block_size = m_disk_cache.block_size(); + int const blocks_in_piece = (piece_size + block_size - 1) / block_size; // keep track of which blocks we have locked by incrementing // their refcounts. This is used to decrement only these blocks @@ -2304,7 +2304,7 @@ namespace libtorrent DLOG("do_hash: reading (piece: %d block: %d)\n", int(pe->piece), i); - time_point start_time = clock_type::now(); + time_point const start_time = clock_type::now(); TORRENT_PIECE_ASSERT(offset == i * block_size, pe); int read_ret = j->storage->readv(iov, j->piece @@ -2565,7 +2565,7 @@ namespace libtorrent m_disk_cache.update_stats_counters(c); } - void disk_io_thread::get_cache_info(cache_status* ret, bool no_pieces + void disk_io_thread::get_cache_info(cache_status* ret, bool const no_pieces , storage_interface const* storage) const { std::unique_lock l(m_cache_mutex); @@ -2617,7 +2617,7 @@ namespace libtorrent if (no_pieces == false) { - int block_size = m_disk_cache.block_size(); + int const block_size = m_disk_cache.block_size(); if (storage) { @@ -2948,7 +2948,7 @@ namespace libtorrent void disk_io_thread::maybe_flush_write_blocks() { - time_point now = clock_type::now(); + time_point const now = clock_type::now(); if (now <= m_last_cache_expiry + seconds(5)) return; std::unique_lock l(m_cache_mutex); diff --git a/src/enum_net.cpp b/src/enum_net.cpp index b44e2cd7a..d59b59f51 100644 --- a/src/enum_net.cpp +++ b/src/enum_net.cpp @@ -152,7 +152,7 @@ namespace libtorrent { namespace #if TORRENT_USE_NETLINK - int read_nl_sock(int sock, char *buf, int bufsize, int seq, int pid) + int read_nl_sock(int sock, char *buf, int bufsize, int const seq, int const pid) { nlmsghdr* nl_hdr; diff --git a/src/file.cpp b/src/file.cpp index cb87566c3..305c8f32d 100644 --- a/src/file.cpp +++ b/src/file.cpp @@ -1774,7 +1774,7 @@ typedef struct _FILE_ALLOCATED_RANGE_BUFFER { std::int64_t ret = 0; for (auto i : bufs) { - std::int64_t tmp_ret = f(fd, i.iov_base, i.iov_len, file_offset); + std::int64_t const tmp_ret = f(fd, i.iov_base, i.iov_len, file_offset); if (tmp_ret < 0) { #ifdef TORRENT_WINDOWS diff --git a/src/file_progress.cpp b/src/file_progress.cpp index 03bca73c0..f16379c8a 100644 --- a/src/file_progress.cpp +++ b/src/file_progress.cpp @@ -154,10 +154,10 @@ namespace libtorrent { namespace aux std::int64_t size = fs.piece_size(index); for (; size > 0; ++file_index) { - std::int64_t file_offset = off - fs.file_offset(file_index); + std::int64_t const file_offset = off - fs.file_offset(file_index); TORRENT_ASSERT(file_index != fs.num_files()); TORRENT_ASSERT(file_offset <= fs.file_size(file_index)); - std::int64_t add = (std::min)(fs.file_size(file_index) + std::int64_t const add = (std::min)(fs.file_size(file_index) - file_offset, std::int64_t(size)); m_file_progress[file_index] += add; diff --git a/src/http_connection.cpp b/src/http_connection.cpp index 3671c3e06..97f52f41e 100644 --- a/src/http_connection.cpp +++ b/src/http_connection.cpp @@ -422,7 +422,7 @@ void http_connection::on_timeout(std::weak_ptr p if (c->m_abort) return; - time_point now = clock_type::now(); + time_point const now = clock_type::now(); if (c->m_start_time + c->m_completion_timeout <= now || c->m_last_receive + c->m_read_timeout <= now) diff --git a/src/kademlia/rpc_manager.cpp b/src/kademlia/rpc_manager.cpp index a2f16d265..2f7c749ae 100644 --- a/src/kademlia/rpc_manager.cpp +++ b/src/kademlia/rpc_manager.cpp @@ -281,7 +281,7 @@ bool rpc_manager::incoming(msg const& m, node_id* id) return false; } - time_point now = clock_type::now(); + time_point const now = clock_type::now(); #ifndef TORRENT_DISABLE_LOGGING if (m_log->should_log(dht_logger::rpc_manager)) diff --git a/src/natpmp.cpp b/src/natpmp.cpp index 1617de0f6..4a1abf49a 100644 --- a/src/natpmp.cpp +++ b/src/natpmp.cpp @@ -601,7 +601,7 @@ void natpmp::update_expiration_timer() TORRENT_ASSERT(is_single_thread()); if (m_abort) return; - time_point now = aux::time_now() + milliseconds(100); + time_point const now = aux::time_now() + milliseconds(100); time_point min_expire = now + seconds(3600); int min_index = -1; for (std::vector::iterator i = m_mappings.begin() @@ -609,7 +609,7 @@ void natpmp::update_expiration_timer() { if (i->protocol == portmap_protocol::none || i->act != mapping_t::action::none) continue; - int index = int(i - m_mappings.begin()); + int const index = int(i - m_mappings.begin()); if (i->expires < now) { #ifndef TORRENT_DISABLE_LOGGING diff --git a/src/part_file.cpp b/src/part_file.cpp index 91b44e7ff..c6f72e4c3 100644 --- a/src/part_file.cpp +++ b/src/part_file.cpp @@ -297,7 +297,7 @@ namespace libtorrent , boost::system::generic_category()); } - void part_file::export_file(file& f, std::int64_t offset, std::int64_t size, error_code& ec) + void part_file::export_file(file& f, std::int64_t const offset, std::int64_t size, error_code& ec) { std::unique_lock l(m_mutex); diff --git a/src/peer_class_set.cpp b/src/peer_class_set.cpp index 5939c7655..4fe3d019e 100644 --- a/src/peer_class_set.cpp +++ b/src/peer_class_set.cpp @@ -56,7 +56,7 @@ namespace libtorrent != m_class.begin() + m_size; } - void peer_class_set::remove_class(peer_class_pool& pool, peer_class_t c) + void peer_class_set::remove_class(peer_class_pool& pool, peer_class_t const c) { std::array::iterator i = std::find(m_class.begin() , m_class.begin() + m_size, c); diff --git a/src/peer_connection.cpp b/src/peer_connection.cpp index 54e79a9c1..93dc42f79 100644 --- a/src/peer_connection.cpp +++ b/src/peer_connection.cpp @@ -6180,7 +6180,7 @@ namespace libtorrent m_send_buffer.pop_front(int(bytes_transferred)); - time_point now = clock_type::now(); + time_point const now = clock_type::now(); for (auto& block : m_download_queue) { diff --git a/src/peer_list.cpp b/src/peer_list.cpp index 5dee15429..78e54d005 100644 --- a/src/peer_list.cpp +++ b/src/peer_list.cpp @@ -155,7 +155,7 @@ namespace libtorrent continue; } - int current = int(i - m_peers.begin()); + int const current = int(i - m_peers.begin()); TORRENT_ASSERT(current >= 0); TORRENT_ASSERT(m_peers.size() > 0); TORRENT_ASSERT(i != m_peers.end()); @@ -216,7 +216,7 @@ namespace libtorrent continue; } - int current = int(i - m_peers.begin()); + int const current = int(i - m_peers.begin()); TORRENT_ASSERT(current >= 0); TORRENT_ASSERT(m_peers.size() > 0); TORRENT_ASSERT(i != m_peers.end()); diff --git a/src/storage.cpp b/src/storage.cpp index 28eb1da4e..9bd140c07 100644 --- a/src/storage.cpp +++ b/src/storage.cpp @@ -271,7 +271,7 @@ namespace libtorrent #endif error_code e; - int ret = int(handle->writev(adjusted_offset + int const ret = int(handle->writev(adjusted_offset , bufs, e, m_flags)); // set this unconditionally in case the upper layer would like to treat @@ -356,7 +356,7 @@ namespace libtorrent #endif error_code e; - int ret = int(handle->readv(adjusted_offset + int const ret = int(handle->readv(adjusted_offset , bufs, e, m_flags)); // set this unconditionally in case the upper layer would like to treat diff --git a/src/torrent.cpp b/src/torrent.cpp index 10c531ce6..8f0c424d1 100644 --- a/src/torrent.cpp +++ b/src/torrent.cpp @@ -2665,7 +2665,7 @@ namespace libtorrent req.num_want = (req.event == tracker_request::stopped) ? 0 : settings().get_int(settings_pack::num_want); - time_point now = clock_type::now(); + time_point const now = clock_type::now(); // the tier is kept as INT_MAX until we find the first // tracker that works, then it's set to that tracker's @@ -9443,7 +9443,7 @@ namespace libtorrent { using namespace libtorrent; - time_point now = clock_type::now(); + time_point const now = clock_type::now(); float deadline = 0.f; float last_request = 0.f; @@ -9810,7 +9810,7 @@ namespace libtorrent // piece is done std::vector ignore_peers; - time_point now = clock_type::now(); + time_point const now = clock_type::now(); // now, iterate over all time critical pieces, in order of importance, and // request them from the peers, in order of responsiveness. i.e. request diff --git a/src/upnp.cpp b/src/upnp.cpp index f32e06bc8..00585a513 100644 --- a/src/upnp.cpp +++ b/src/upnp.cpp @@ -770,7 +770,7 @@ void upnp::next(rootdevice& d, int i) } } -void upnp::update_map(rootdevice& d, int i) +void upnp::update_map(rootdevice& d, int const i) { TORRENT_ASSERT(is_single_thread()); TORRENT_ASSERT(d.magic == 1337); diff --git a/src/ut_metadata.cpp b/src/ut_metadata.cpp index b5f064684..4c0aae857 100644 --- a/src/ut_metadata.cpp +++ b/src/ut_metadata.cpp @@ -141,7 +141,7 @@ namespace libtorrent { namespace metadata(); } - void metadata_size(int size) + void metadata_size(int const size) { if (m_metadata_size > 0 || size <= 0 || size > 4 * 1024 * 1024) return; m_metadata_size = size; @@ -225,7 +225,7 @@ namespace libtorrent { namespace return true; } - void write_metadata_packet(int type, int piece) + void write_metadata_packet(int const type, int const piece) { TORRENT_ASSERT(type >= 0 && type <= 2); TORRENT_ASSERT(!m_pc.associated_torrent().expired()); @@ -486,7 +486,7 @@ namespace libtorrent { namespace // has_metadata is false if the peer making the request has not announced // that it has metadata. In this case, it shouldn't prevent other peers // from requesting this block by setting a timeout on it. - int ut_metadata_plugin::metadata_request(bool has_metadata) + int ut_metadata_plugin::metadata_request(bool const has_metadata) { std::vector::iterator i = std::min_element( m_requested_metadata.begin(), m_requested_metadata.end()); diff --git a/src/utp_stream.cpp b/src/utp_stream.cpp index 4d23b6dae..62da846a0 100644 --- a/src/utp_stream.cpp +++ b/src/utp_stream.cpp @@ -1337,7 +1337,7 @@ void utp_socket_impl::send_syn() h->seq_nr = m_seq_nr; h->ack_nr = 0; - time_point now = clock_type::now(); + time_point const now = clock_type::now(); p->send_time = now; h->timestamp_microseconds = std::uint32_t( total_microseconds(now.time_since_epoch()) & 0xffffffff); @@ -1431,7 +1431,7 @@ void utp_socket_impl::send_reset(utp_header const* ph) h.wnd_size = 0; h.seq_nr = std::uint16_t(random(0xffff)); h.ack_nr = ph->seq_nr; - time_point now = clock_type::now(); + time_point const now = clock_type::now(); h.timestamp_microseconds = std::uint32_t( total_microseconds(now.time_since_epoch()) & 0xffffffff); @@ -2008,7 +2008,7 @@ bool utp_socket_impl::send_pkt(int const flags) h->type_ver = (ST_FIN << 4) | 1; // fill in the timestamp as late as possible - time_point now = clock_type::now(); + time_point const now = clock_type::now(); p->send_time = now; h->timestamp_microseconds = std::uint32_t( total_microseconds(now.time_since_epoch()) & 0xffffffff); diff --git a/src/web_peer_connection.cpp b/src/web_peer_connection.cpp index bff3a8640..9bb96069b 100644 --- a/src/web_peer_connection.cpp +++ b/src/web_peer_connection.cpp @@ -541,10 +541,10 @@ bool web_peer_connection::received_invalid_data(int index, bool single_peer) { // assume the web seed has a different copy of this specific file // than what we expect, and pretend not to have it. - int fi = files[0].file_index; - int first_piece = int(fs.file_offset(fi) / fs.piece_length()); + int const fi = files[0].file_index; + int const first_piece = int(fs.file_offset(fi) / fs.piece_length()); // one past last piece - int end_piece = int((fs.file_offset(fi) + fs.file_size(fi) + 1) / fs.piece_length()); + int const end_piece = int((fs.file_offset(fi) + fs.file_size(fi) + 1) / fs.piece_length()); for (int i = first_piece; i < end_piece; ++i) incoming_dont_have(i); }