lint guided refactor in dht source code (#2516)
This commit is contained in:
parent
1c278cc697
commit
c771f16c5c
|
@ -575,7 +575,7 @@ namespace libtorrent { namespace dht {
|
|||
, dht_observer* observer, counters& cnt
|
||||
, get_foreign_node_t get_foreign_node
|
||||
, dht_storage_interface& storage)
|
||||
: dht(s, sock, settings, nid, observer, cnt, get_foreign_node, storage)
|
||||
: dht(s, sock, settings, nid, observer, cnt, std::move(get_foreign_node), storage)
|
||||
, connection_timer(ios)
|
||||
{}
|
||||
|
||||
|
@ -616,7 +616,7 @@ namespace libtorrent { namespace dht {
|
|||
{
|
||||
// use the local rather than external address because if the user is behind NAT
|
||||
// we won't know the external IP on startup
|
||||
ret.nids.push_back(std::make_pair(n.first.get_local_endpoint().address(), n.second.dht.nid()));
|
||||
ret.nids.emplace_back(n.first.get_local_endpoint().address(), n.second.dht.nid());
|
||||
auto nodes = save_nodes(n.second.dht);
|
||||
ret.nodes.insert(ret.nodes.end(), nodes.begin(), nodes.end());
|
||||
}
|
||||
|
|
|
@ -50,7 +50,7 @@ namespace libtorrent { namespace dht {
|
|||
}
|
||||
}
|
||||
|
||||
bool dos_blocker::incoming(address const& addr, time_point now, dht_logger* logger)
|
||||
bool dos_blocker::incoming(address const& addr, time_point const now, dht_logger* logger)
|
||||
{
|
||||
node_ban_entry* match = nullptr;
|
||||
node_ban_entry* min = m_ban_nodes;
|
||||
|
|
|
@ -147,7 +147,7 @@ void find_data::done()
|
|||
|
||||
std::vector<std::pair<node_entry, std::string>> results;
|
||||
int num_results = m_node.m_table.bucket_size();
|
||||
for (std::vector<observer_ptr>::iterator i = m_results.begin()
|
||||
for (auto i = m_results.begin()
|
||||
, end(m_results.end()); i != end && num_results > 0; ++i)
|
||||
{
|
||||
observer_ptr const& o = *i;
|
||||
|
@ -174,7 +174,7 @@ void find_data::done()
|
|||
#endif
|
||||
continue;
|
||||
}
|
||||
results.push_back(std::make_pair(node_entry(o->id(), o->target_ep()), j->second));
|
||||
results.emplace_back(node_entry(o->id(), o->target_ep()), j->second);
|
||||
#ifndef TORRENT_DISABLE_LOGGING
|
||||
if (logger != nullptr && logger->should_log(dht_logger::traversal))
|
||||
{
|
||||
|
|
|
@ -275,7 +275,7 @@ void obfuscated_get_peers::done()
|
|||
#endif
|
||||
|
||||
int num_added = 0;
|
||||
for (std::vector<observer_ptr>::iterator i = m_results.begin()
|
||||
for (auto i = m_results.begin()
|
||||
, end(m_results.end()); i != end && num_added < 16; ++i)
|
||||
{
|
||||
observer_ptr o = *i;
|
||||
|
|
|
@ -62,7 +62,7 @@ namespace {
|
|||
char* ptr = out.data();
|
||||
|
||||
std::size_t left = out.size() - aux::numeric_cast<std::size_t>(ptr - out.data());
|
||||
if (salt.size() > 0)
|
||||
if (!salt.empty())
|
||||
{
|
||||
ptr += std::snprintf(ptr, left, "4:salt%d:", int(salt.size()));
|
||||
left = out.size() - aux::numeric_cast<std::size_t>(ptr - out.data());
|
||||
|
@ -90,7 +90,7 @@ sha1_hash item_target_id(span<char const> salt
|
|||
, public_key const& pk)
|
||||
{
|
||||
hasher h(pk.bytes);
|
||||
if (salt.size() > 0) h.update(salt);
|
||||
if (!salt.empty()) h.update(salt);
|
||||
return h.final();
|
||||
}
|
||||
|
||||
|
@ -188,7 +188,7 @@ bool item::assign(bdecode_node const& v, span<char const> salt
|
|||
return false;
|
||||
m_pk = pk;
|
||||
m_sig = sig;
|
||||
if (salt.size() > 0)
|
||||
if (!salt.empty())
|
||||
m_salt.assign(salt.data(), salt.size());
|
||||
else
|
||||
m_salt.clear();
|
||||
|
|
|
@ -96,8 +96,8 @@ void incoming_error(entry& e, char const* msg, int error_code = 203)
|
|||
{
|
||||
e["y"] = "e";
|
||||
entry::list_type& l = e["e"].list();
|
||||
l.push_back(entry(error_code));
|
||||
l.push_back(entry(msg));
|
||||
l.emplace_back(error_code);
|
||||
l.emplace_back(msg);
|
||||
}
|
||||
|
||||
} // anonymous namespace
|
||||
|
@ -452,7 +452,7 @@ void node::announce(sha1_hash const& info_hash, int const listen_port, int const
|
|||
}
|
||||
#endif
|
||||
|
||||
get_peers(info_hash, f
|
||||
get_peers(info_hash, std::move(f)
|
||||
, std::bind(&announce_fun, _1, std::ref(*this)
|
||||
, listen_port, info_hash, flags), flags & node::flag_seed);
|
||||
}
|
||||
|
@ -995,7 +995,7 @@ void node::incoming_request(msg const& m, entry& e)
|
|||
|
||||
// pointer and length to the whole entry
|
||||
span<char const> buf = msg_keys[1].data_section();
|
||||
if (buf.size() > 1000 || buf.size() <= 0)
|
||||
if (buf.size() > 1000 || buf.empty())
|
||||
{
|
||||
m_counters.inc_stats_counter(counters::dht_invalid_put);
|
||||
incoming_error(e, "message too big", 205);
|
||||
|
|
|
@ -72,7 +72,7 @@ namespace libtorrent { namespace dht {
|
|||
#endif
|
||||
}
|
||||
|
||||
void node_entry::update_rtt(int new_rtt)
|
||||
void node_entry::update_rtt(int const new_rtt)
|
||||
{
|
||||
TORRENT_ASSERT(new_rtt <= 0xffff);
|
||||
TORRENT_ASSERT(new_rtt >= 0);
|
||||
|
|
|
@ -36,8 +36,6 @@ POSSIBILITY OF SUCH DAMAGE.
|
|||
#include <libtorrent/kademlia/dht_observer.hpp>
|
||||
#include <libtorrent/performance_counters.hpp>
|
||||
|
||||
#include <libtorrent/io.hpp>
|
||||
|
||||
namespace libtorrent { namespace dht {
|
||||
|
||||
observer_ptr bootstrap::new_observer(udp::endpoint const& ep
|
||||
|
|
|
@ -244,11 +244,9 @@ node_entry const* routing_table::next_refresh()
|
|||
node_entry* candidate = nullptr;
|
||||
|
||||
// this will have a bias towards pinging nodes close to us first.
|
||||
for (table_t::reverse_iterator i = m_buckets.rbegin()
|
||||
, end(m_buckets.rend()); i != end; ++i)
|
||||
for (auto i = m_buckets.rbegin(), end(m_buckets.rend()); i != end; ++i)
|
||||
{
|
||||
for (bucket_t::iterator j = i->live_nodes.begin()
|
||||
, end2(i->live_nodes.end()); j != end2; ++j)
|
||||
for (auto j = i->live_nodes.begin(), end2(i->live_nodes.end()); j != end2; ++j)
|
||||
{
|
||||
// this shouldn't happen
|
||||
TORRENT_ASSERT(m_id != j->id);
|
||||
|
@ -1163,7 +1161,7 @@ bool routing_table::is_full(int const bucket) const
|
|||
if (num_buckets == 0) return false;
|
||||
if (bucket >= num_buckets) return false;
|
||||
|
||||
table_t::const_iterator i = m_buckets.begin();
|
||||
auto i = m_buckets.cbegin();
|
||||
std::advance(i, bucket);
|
||||
return (int(i->live_nodes.size()) >= bucket_limit(bucket)
|
||||
&& int(i->replacements.size()) >= m_bucket_size);
|
||||
|
|
|
@ -480,7 +480,7 @@ bool rpc_manager::invoke(entry& e, udp::endpoint const& target_addr
|
|||
node& n = o->algorithm()->get_node();
|
||||
if (!n.native_address(o->target_addr()))
|
||||
{
|
||||
a["want"].list().push_back(entry(n.protocol_family_name()));
|
||||
a["want"].list().emplace_back(n.protocol_family_name());
|
||||
}
|
||||
|
||||
o->set_target(target_addr);
|
||||
|
|
|
@ -186,7 +186,7 @@ void traversal_algorithm::add_entry(node_id const& id
|
|||
if (o->target_addr().is_v6())
|
||||
{
|
||||
address_v6::bytes_type addr_bytes = o->target_addr().to_v6().to_bytes();
|
||||
address_v6::bytes_type::const_iterator prefix_it = addr_bytes.begin();
|
||||
auto prefix_it = addr_bytes.cbegin();
|
||||
std::uint64_t const prefix6 = detail::read_uint64(prefix_it);
|
||||
|
||||
if (m_peer6_prefixes.insert(prefix6).second)
|
||||
|
@ -483,7 +483,7 @@ bool traversal_algorithm::add_requests()
|
|||
// limits the number of outstanding requests, this limits the
|
||||
// number of good outstanding requests. It will use more traffic,
|
||||
// but is intended to speed up lookups
|
||||
for (std::vector<observer_ptr>::iterator i = m_results.begin()
|
||||
for (auto i = m_results.begin()
|
||||
, end(m_results.end()); i != end
|
||||
&& results_target > 0
|
||||
&& (agg ? outstanding < m_branch_factor
|
||||
|
|
Loading…
Reference in New Issue