forked from premiere/premiere-libtorrent
fixed sign-compare warnings and some refactor (#1354)
fixed sign-compare warnings and some refactor. make sequence_number backed by std::int64_t
This commit is contained in:
parent
0df9d1c641
commit
d5c4cd7280
1
Jamfile
1
Jamfile
|
@ -245,7 +245,6 @@ rule warnings ( properties * )
|
|||
|
||||
# enable these warnings again, once the other ones are dealt with
|
||||
result += <cflags>-Wno-weak-vtables ;
|
||||
result += <cflags>-Wno-sign-compare ;
|
||||
result += <cflags>-Wno-sign-conversion ;
|
||||
result += <cflags>-Wno-shorten-64-to-32 ;
|
||||
}
|
||||
|
|
|
@ -446,7 +446,7 @@ namespace
|
|||
ses.dht_get_item(public_key, salt);
|
||||
}
|
||||
|
||||
void put_string(entry& e, std::array<char, 64>& sig, std::uint64_t& seq
|
||||
void put_string(entry& e, std::array<char, 64>& sig, std::int64_t& seq
|
||||
, std::string const& salt, std::string pk, std::string sk
|
||||
, std::string data)
|
||||
{
|
||||
|
@ -470,7 +470,7 @@ namespace
|
|||
TORRENT_ASSERT(public_key.size() == 32);
|
||||
std::array<char, 32> key;
|
||||
std::copy(public_key.begin(), public_key.end(), key.begin());
|
||||
ses.dht_put_item(key, [&](entry& e, std::array<char, 64>& sig, std::uint64_t& seq
|
||||
ses.dht_put_item(key, [&](entry& e, std::array<char, 64>& sig, std::int64_t& seq
|
||||
, std::string const& salt) { put_string(e, sig, seq, salt
|
||||
, public_key, private_key, data); }
|
||||
, salt);
|
||||
|
|
|
@ -391,7 +391,7 @@ namespace libtorrent
|
|||
|
||||
void dht_put_mutable_item(std::array<char, 32> key
|
||||
, std::function<void(entry&, std::array<char,64>&
|
||||
, std::uint64_t&, std::string const&)> cb
|
||||
, std::int64_t&, std::string const&)> cb
|
||||
, std::string salt = std::string());
|
||||
|
||||
void dht_get_peers(sha1_hash const& info_hash);
|
||||
|
|
|
@ -312,7 +312,7 @@ namespace libtorrent
|
|||
// ---- 32 bit boundary ---
|
||||
|
||||
// the sum of all refcounts in all blocks
|
||||
std::uint32_t refcount = 0;
|
||||
std::int32_t refcount = 0;
|
||||
|
||||
#if TORRENT_USE_ASSERTS
|
||||
// the number of times this piece has finished hashing
|
||||
|
@ -339,11 +339,11 @@ namespace libtorrent
|
|||
std::size_t operator()(cached_piece_entry const& p) const
|
||||
{ return std::size_t(p.storage.get()) + std::size_t(p.piece); }
|
||||
};
|
||||
typedef std::unordered_set<cached_piece_entry, hash_value> cache_t;
|
||||
using cache_t = std::unordered_set<cached_piece_entry, hash_value>;
|
||||
|
||||
public:
|
||||
|
||||
typedef cache_t::const_iterator const_iterator;
|
||||
using const_iterator = cache_t::const_iterator;
|
||||
|
||||
// returns the number of blocks this job would cause to be read in
|
||||
int pad_job(disk_io_job const* j, int blocks_in_piece
|
||||
|
@ -359,7 +359,7 @@ namespace libtorrent
|
|||
list_iterator<cached_piece_entry> write_lru_pieces() const
|
||||
{ return m_lru[cached_piece_entry::write_lru].iterate(); }
|
||||
|
||||
int num_write_lru_pieces() const { return int(m_lru[cached_piece_entry::write_lru].size()); }
|
||||
int num_write_lru_pieces() const { return m_lru[cached_piece_entry::write_lru].size(); }
|
||||
|
||||
// mark this piece for deletion. If there are no outstanding
|
||||
// requests to this piece, it's removed immediately, and the
|
||||
|
@ -507,21 +507,21 @@ namespace libtorrent
|
|||
int m_max_volatile_blocks;
|
||||
|
||||
// the number of blocks (buffers) allocated by volatile pieces.
|
||||
std::uint32_t m_volatile_size;
|
||||
std::int32_t m_volatile_size;
|
||||
|
||||
// the number of blocks in the cache
|
||||
// that are in the read cache
|
||||
std::uint32_t m_read_cache_size;
|
||||
std::int32_t m_read_cache_size;
|
||||
|
||||
// the number of blocks in the cache
|
||||
// that are in the write cache
|
||||
std::uint32_t m_write_cache_size;
|
||||
std::int32_t m_write_cache_size;
|
||||
|
||||
// the number of blocks that are currently sitting
|
||||
// in peer's send buffers. If two peers are sending
|
||||
// the same block, it counts as 2, even though there're
|
||||
// no buffer duplication
|
||||
std::uint32_t m_send_buffer_blocks;
|
||||
std::int32_t m_send_buffer_blocks;
|
||||
|
||||
// the number of blocks with a refcount > 0, i.e.
|
||||
// they may not be evicted
|
||||
|
|
|
@ -87,12 +87,12 @@ namespace libtorrent
|
|||
|
||||
void release_memory();
|
||||
|
||||
std::uint32_t in_use() const
|
||||
int in_use() const
|
||||
{
|
||||
std::unique_lock<std::mutex> l(m_pool_mutex);
|
||||
return m_in_use;
|
||||
}
|
||||
std::uint32_t num_to_evict(int num_needed = 0);
|
||||
int num_to_evict(int num_needed = 0);
|
||||
|
||||
void set_settings(aux::session_settings const& sett, error_code& ec);
|
||||
|
||||
|
|
|
@ -354,7 +354,7 @@ namespace libtorrent
|
|||
{
|
||||
TORRENT_ASSERT(index >= 0);
|
||||
TORRENT_ASSERT(index < int(m_files.size()));
|
||||
return m_files[index];
|
||||
return m_files[std::size_t(index)];
|
||||
}
|
||||
TORRENT_DEPRECATED
|
||||
file_entry at(iterator i) const;
|
||||
|
|
|
@ -140,7 +140,7 @@ namespace detail
|
|||
m_access_list.insert(range(zero<Addr>(), 0));
|
||||
}
|
||||
|
||||
void add_rule(Addr first, Addr last, int flags)
|
||||
void add_rule(Addr first, Addr last, std::uint32_t const flags)
|
||||
{
|
||||
TORRENT_ASSERT(!m_access_list.empty());
|
||||
TORRENT_ASSERT(first < last || first == last);
|
||||
|
@ -212,7 +212,7 @@ namespace detail
|
|||
std::vector<ip_range<ExternalAddressType>> ret;
|
||||
ret.reserve(m_access_list.size());
|
||||
|
||||
for (typename range_t::const_iterator i = m_access_list.begin()
|
||||
for (auto i = m_access_list.begin()
|
||||
, end(m_access_list.end()); i != end;)
|
||||
{
|
||||
ip_range<ExternalAddressType> r;
|
||||
|
@ -234,7 +234,7 @@ namespace detail
|
|||
|
||||
struct range
|
||||
{
|
||||
range(Addr addr, int a = 0): start(addr), access(a) {} // NOLINT
|
||||
range(Addr addr, std::uint32_t a = 0) : start(addr), access(a) {} // NOLINT
|
||||
bool operator<(range const& r) const { return start < r.start; }
|
||||
bool operator<(Addr const& a) const { return start < a; }
|
||||
Addr start;
|
||||
|
@ -243,8 +243,7 @@ namespace detail
|
|||
std::uint32_t access;
|
||||
};
|
||||
|
||||
using range_t = std::set<range>;
|
||||
range_t m_access_list;
|
||||
std::set<range> m_access_list;
|
||||
};
|
||||
|
||||
}
|
||||
|
|
|
@ -73,7 +73,7 @@ namespace libtorrent { namespace dht
|
|||
struct sequence_number
|
||||
{
|
||||
sequence_number() : value(0) {}
|
||||
explicit sequence_number(std::uint64_t v) : value(v) {}
|
||||
explicit sequence_number(std::int64_t v) : value(v) {}
|
||||
sequence_number(sequence_number const& sqn) = default;
|
||||
bool operator<(sequence_number rhs) const
|
||||
{ return value < rhs.value; }
|
||||
|
@ -87,7 +87,7 @@ namespace libtorrent { namespace dht
|
|||
{ return value == rhs.value; }
|
||||
sequence_number& operator++()
|
||||
{ ++value; return *this; }
|
||||
std::uint64_t value;
|
||||
std::int64_t value;
|
||||
};
|
||||
|
||||
}}
|
||||
|
|
|
@ -50,7 +50,7 @@ namespace libtorrent {
|
|||
peer_class_t class_at(int i) const
|
||||
{
|
||||
TORRENT_ASSERT(i >= 0 && i < int(m_size));
|
||||
return m_class[i];
|
||||
return m_class[std::size_t(i)];
|
||||
}
|
||||
|
||||
private:
|
||||
|
|
|
@ -63,4 +63,3 @@ namespace libtorrent
|
|||
};
|
||||
}
|
||||
#endif
|
||||
|
||||
|
|
|
@ -125,7 +125,7 @@ namespace libtorrent
|
|||
|
||||
struct downloading_piece
|
||||
{
|
||||
downloading_piece() : index((std::numeric_limits<std::uint32_t>::max)())
|
||||
downloading_piece() : index(std::numeric_limits<std::int32_t>::max())
|
||||
, info_idx((std::numeric_limits<std::uint16_t>::max)())
|
||||
, finished(0)
|
||||
, passed_hash_check(0)
|
||||
|
@ -137,7 +137,7 @@ namespace libtorrent
|
|||
bool operator<(downloading_piece const& rhs) const { return index < rhs.index; }
|
||||
|
||||
// the index of the piece
|
||||
std::uint32_t index;
|
||||
std::int32_t index;
|
||||
|
||||
// info about each block in this piece. this is an index into the
|
||||
// m_block_info array, when multiplied by m_blocks_per_piece.
|
||||
|
@ -223,7 +223,7 @@ namespace libtorrent
|
|||
TORRENT_ASSERT(index >= 0);
|
||||
TORRENT_ASSERT(index < int(m_piece_map.size()));
|
||||
|
||||
piece_pos const& p = m_piece_map[index];
|
||||
piece_pos const& p = m_piece_map[std::size_t(index)];
|
||||
return p.downloading();
|
||||
}
|
||||
|
||||
|
@ -418,10 +418,10 @@ namespace libtorrent
|
|||
// functor that compares indices on downloading_pieces
|
||||
struct has_index
|
||||
{
|
||||
explicit has_index(int i): index(std::uint32_t(i)) { TORRENT_ASSERT(i >= 0); }
|
||||
bool operator()(const downloading_piece& p) const
|
||||
explicit has_index(int i) : index(i) { TORRENT_ASSERT(i >= 0); }
|
||||
bool operator()(downloading_piece const& p) const
|
||||
{ return p.index == index; }
|
||||
std::uint32_t index;
|
||||
std::int32_t index;
|
||||
};
|
||||
|
||||
int blocks_in_last_piece() const
|
||||
|
@ -676,7 +676,7 @@ namespace libtorrent
|
|||
#endif
|
||||
|
||||
bool partial_compare_rarest_first(downloading_piece const* lhs
|
||||
, downloading_piece const* rhs) const;
|
||||
, downloading_piece const* rhs) const;
|
||||
|
||||
void break_one_seed();
|
||||
|
||||
|
@ -697,9 +697,8 @@ namespace libtorrent
|
|||
// shuffles the given piece inside it's priority range
|
||||
void shuffle(int priority, int elem_index);
|
||||
|
||||
typedef std::vector<downloading_piece>::iterator dlpiece_iter;
|
||||
dlpiece_iter add_download_piece(int index);
|
||||
void erase_download_piece(dlpiece_iter i);
|
||||
std::vector<downloading_piece>::iterator add_download_piece(int index);
|
||||
void erase_download_piece(std::vector<downloading_piece>::iterator i);
|
||||
|
||||
std::vector<downloading_piece>::const_iterator find_dl_piece(int queue, int index) const;
|
||||
std::vector<downloading_piece>::iterator find_dl_piece(int queue, int index);
|
||||
|
|
|
@ -407,7 +407,7 @@ namespace libtorrent
|
|||
// To generate the signature, you may want to use the
|
||||
// ``sign_mutable_item`` function.
|
||||
//
|
||||
// std::uint64_t& seq
|
||||
// std::int64_t& seq
|
||||
// current sequence number. May be zero if there is no current value.
|
||||
// The function is expected to set this to the new sequence number of
|
||||
// the value that is to be stored. Sequence numbers must be monotonically
|
||||
|
@ -429,7 +429,7 @@ namespace libtorrent
|
|||
// calling the callback in between is convenient.
|
||||
void dht_put_item(std::array<char, 32> key
|
||||
, std::function<void(entry&, std::array<char, 64>&
|
||||
, std::uint64_t&, std::string const&)> cb
|
||||
, std::int64_t&, std::string const&)> cb
|
||||
, std::string salt = std::string());
|
||||
|
||||
void dht_get_peers(sha1_hash const& info_hash);
|
||||
|
|
|
@ -231,12 +231,12 @@ namespace libtorrent
|
|||
}
|
||||
|
||||
// accessors for specific bytes
|
||||
std::uint8_t& operator[](size_t i)
|
||||
std::uint8_t& operator[](std::size_t i)
|
||||
{
|
||||
TORRENT_ASSERT(i < size());
|
||||
return reinterpret_cast<std::uint8_t*>(m_number)[i];
|
||||
}
|
||||
std::uint8_t const& operator[](size_t i) const
|
||||
std::uint8_t const& operator[](std::size_t i) const
|
||||
{
|
||||
TORRENT_ASSERT(i < size());
|
||||
return reinterpret_cast<std::uint8_t const*>(m_number)[i];
|
||||
|
|
|
@ -103,14 +103,14 @@ struct average_accumulator
|
|||
// let the average roll over, but only be worth a
|
||||
// single sample
|
||||
m_num_samples = 1;
|
||||
m_sample_sum = std::uint64_t(ret);
|
||||
m_sample_sum = ret;
|
||||
return ret;
|
||||
}
|
||||
|
||||
private:
|
||||
|
||||
int m_num_samples = 0;
|
||||
std::uint64_t m_sample_sum = 0;
|
||||
std::int64_t m_sample_sum = 0;
|
||||
};
|
||||
|
||||
}
|
||||
|
|
|
@ -317,7 +317,7 @@ TORRENT_TEST(dht_dual_stack_mutable_item)
|
|||
std::tie(pk, sk) = lt::dht::ed25519_create_keypair(seed);
|
||||
|
||||
ses.dht_put_item(pk.bytes, [&](lt::entry& item, std::array<char, 64>& sig
|
||||
, std::uint64_t& seq, std::string const& salt)
|
||||
, std::int64_t& seq, std::string const& salt)
|
||||
{
|
||||
item = "mutable item";
|
||||
seq = 1;
|
||||
|
|
|
@ -1778,13 +1778,12 @@ namespace libtorrent {
|
|||
// this specific output is parsed by tools/parse_session_stats.py
|
||||
// if this is changed, that parser should also be changed
|
||||
char msg[50];
|
||||
std::snprintf(msg, sizeof(msg), "session stats (%d values): "
|
||||
, int(values.size()));
|
||||
std::snprintf(msg, sizeof(msg), "session stats (%d values): " , int(values.size()));
|
||||
std::string ret = msg;
|
||||
bool first = true;
|
||||
for (int i = 0; i < values.size(); ++i)
|
||||
for (auto v : values)
|
||||
{
|
||||
std::snprintf(msg, sizeof(msg), first ? "%" PRIu64 : ", %" PRIu64, values[i]);
|
||||
std::snprintf(msg, sizeof(msg), first ? "%" PRId64 : ", %" PRId64, v);
|
||||
first = false;
|
||||
ret += msg;
|
||||
}
|
||||
|
|
|
@ -281,10 +281,12 @@ static_assert(sizeof(job_action_name)/sizeof(job_action_name[0])
|
|||
, int(pe->num_blocks), int(pe->blocks_in_piece), int(pe->hashing_done)
|
||||
, int(pe->marked_for_deletion), int(pe->need_readback), pe->hash_passes
|
||||
, int(pe->read_jobs.size()), int(pe->jobs.size()));
|
||||
for (int i = 0; i < pe->piece_log.size(); ++i)
|
||||
bool first = true;
|
||||
for (auto const& log : pe->piece_log)
|
||||
{
|
||||
assert_print("%s %s (%d)", (i==0?"":",")
|
||||
, job_name(pe->piece_log[i].job), pe->piece_log[i].block);
|
||||
assert_print("%s %s (%d)", (first ? "" : ",")
|
||||
, job_name(log.job), log.block);
|
||||
first = false;
|
||||
}
|
||||
}
|
||||
assert_print("\n");
|
||||
|
@ -1259,7 +1261,7 @@ int block_cache::pad_job(disk_io_job const* j, int blocks_in_piece
|
|||
}
|
||||
|
||||
void block_cache::insert_blocks(cached_piece_entry* pe, int block, span<file::iovec_t const> iov
|
||||
, disk_io_job* j, int flags)
|
||||
, disk_io_job* j, int const flags)
|
||||
{
|
||||
#ifdef TORRENT_EXPENSIVE_INVARIANT_CHECKS
|
||||
INVARIANT_CHECK;
|
||||
|
@ -1273,17 +1275,17 @@ void block_cache::insert_blocks(cached_piece_entry* pe, int block, span<file::io
|
|||
|
||||
TORRENT_ASSERT(pe->in_use);
|
||||
|
||||
for (int i = 0; i < iov.size(); ++i, ++block)
|
||||
for (auto const& buf : iov)
|
||||
{
|
||||
// each iovec buffer has to be the size of a block (or the size of the last block)
|
||||
TORRENT_PIECE_ASSERT(iov[i].iov_len == (std::min)(block_size()
|
||||
TORRENT_PIECE_ASSERT(int(buf.iov_len) == std::min(block_size()
|
||||
, pe->storage->files()->piece_size(pe->piece) - block * block_size()), pe);
|
||||
|
||||
// no nullptrs allowed
|
||||
TORRENT_ASSERT(iov[i].iov_base);
|
||||
TORRENT_ASSERT(buf.iov_base != nullptr);
|
||||
|
||||
#ifdef TORRENT_DEBUG_BUFFERS
|
||||
TORRENT_PIECE_ASSERT(is_disk_buffer(static_cast<char*>(iov[i].iov_base)), pe);
|
||||
TORRENT_PIECE_ASSERT(is_disk_buffer(static_cast<char*>(buf.iov_base)), pe);
|
||||
#endif
|
||||
|
||||
if (pe->blocks[block].buf && (flags & blocks_inc_refcount))
|
||||
|
@ -1294,13 +1296,13 @@ void block_cache::insert_blocks(cached_piece_entry* pe, int block, span<file::io
|
|||
// either free the block or insert it. Never replace a block
|
||||
if (pe->blocks[block].buf)
|
||||
{
|
||||
free_buffer(static_cast<char*>(iov[i].iov_base));
|
||||
free_buffer(static_cast<char*>(buf.iov_base));
|
||||
}
|
||||
else
|
||||
{
|
||||
pe->blocks[block].buf = static_cast<char*>(iov[i].iov_base);
|
||||
pe->blocks[block].buf = static_cast<char*>(buf.iov_base);
|
||||
|
||||
TORRENT_PIECE_ASSERT(iov[i].iov_base != nullptr, pe);
|
||||
TORRENT_PIECE_ASSERT(buf.iov_base != nullptr, pe);
|
||||
TORRENT_PIECE_ASSERT(pe->blocks[block].dirty == false, pe);
|
||||
++pe->num_blocks;
|
||||
++m_read_cache_size;
|
||||
|
@ -1315,6 +1317,8 @@ void block_cache::insert_blocks(cached_piece_entry* pe, int block, span<file::io
|
|||
}
|
||||
|
||||
TORRENT_ASSERT(pe->blocks[block].buf != nullptr);
|
||||
|
||||
block++;
|
||||
}
|
||||
|
||||
TORRENT_PIECE_ASSERT(pe->cache_state != cached_piece_entry::read_lru1_ghost, pe);
|
||||
|
|
|
@ -1647,7 +1647,7 @@ namespace libtorrent
|
|||
|
||||
TORRENT_ASSERT(ptr <= buf + sizeof(buf));
|
||||
|
||||
send_buffer(buf, ptr - buf);
|
||||
send_buffer(buf, int(ptr - buf));
|
||||
|
||||
stats_counters().inc_stats_counter(counters::num_outgoing_extended);
|
||||
}
|
||||
|
@ -1840,7 +1840,7 @@ namespace libtorrent
|
|||
// there should be a version too
|
||||
// but where do we put that info?
|
||||
|
||||
int const last_seen_complete = root.dict_find_int_value("complete_ago", -1);
|
||||
int const last_seen_complete = int(root.dict_find_int_value("complete_ago", -1));
|
||||
if (last_seen_complete >= 0) set_last_seen_complete(last_seen_complete);
|
||||
|
||||
auto client_info = root.dict_find_string_value("v");
|
||||
|
@ -2204,7 +2204,7 @@ namespace libtorrent
|
|||
#endif
|
||||
m_sent_bitfield = true;
|
||||
|
||||
send_buffer(reinterpret_cast<char const*>(msg.data()), msg.size());
|
||||
send_buffer(reinterpret_cast<char const*>(msg.data()), int(msg.size()));
|
||||
|
||||
stats_counters().inc_stats_counter(counters::num_outgoing_bitfield);
|
||||
}
|
||||
|
@ -2900,7 +2900,7 @@ namespace libtorrent
|
|||
|
||||
// TODO: 3 this is weird buffer handling
|
||||
span<char> const buf = m_recv_buffer.mutable_buffer();
|
||||
TORRENT_ASSERT(buf.size() >= m_recv_buffer.packet_size());
|
||||
TORRENT_ASSERT(int(buf.size()) >= m_recv_buffer.packet_size());
|
||||
rc4_decrypt({buf.data(), size_t(m_recv_buffer.packet_size())});
|
||||
|
||||
recv_buffer = m_recv_buffer.get();
|
||||
|
@ -3007,7 +3007,7 @@ namespace libtorrent
|
|||
|
||||
// TODO: 3 this is weird buffer handling
|
||||
span<char> const buf = m_recv_buffer.mutable_buffer();
|
||||
TORRENT_ASSERT(buf.size() >= m_recv_buffer.packet_size());
|
||||
TORRENT_ASSERT(int(buf.size()) >= m_recv_buffer.packet_size());
|
||||
rc4_decrypt({buf.data(), size_t(m_recv_buffer.packet_size())});
|
||||
|
||||
recv_buffer = m_recv_buffer.get();
|
||||
|
@ -3068,7 +3068,7 @@ namespace libtorrent
|
|||
// ia is always rc4, so decrypt it
|
||||
// TODO: 3 this is weird buffer handling
|
||||
span<char> const buf = m_recv_buffer.mutable_buffer();
|
||||
TORRENT_ASSERT(buf.size() >= m_recv_buffer.packet_size());
|
||||
TORRENT_ASSERT(int(buf.size()) >= m_recv_buffer.packet_size());
|
||||
rc4_decrypt({buf.data(), size_t(m_recv_buffer.packet_size())});
|
||||
|
||||
#ifndef TORRENT_DISABLE_LOGGING
|
||||
|
|
|
@ -119,17 +119,17 @@ namespace libtorrent
|
|||
#endif
|
||||
}
|
||||
|
||||
std::uint32_t disk_buffer_pool::num_to_evict(int num_needed)
|
||||
int disk_buffer_pool::num_to_evict(int const num_needed)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
std::unique_lock<std::mutex> l(m_pool_mutex);
|
||||
|
||||
if (m_exceeded_max_size)
|
||||
ret = m_in_use - (std::min)(m_low_watermark, int(m_max_use - m_observers.size()*2));
|
||||
ret = m_in_use - std::min(m_low_watermark, int(m_max_use - m_observers.size() * 2));
|
||||
|
||||
if (m_in_use + num_needed > m_max_use)
|
||||
ret = (std::max)(ret, int(m_in_use + num_needed - m_max_use));
|
||||
ret = std::max(ret, m_in_use + num_needed - m_max_use);
|
||||
|
||||
if (ret < 0) ret = 0;
|
||||
else if (ret > m_in_use) ret = m_in_use;
|
||||
|
@ -271,14 +271,14 @@ namespace libtorrent
|
|||
#if TORRENT_HAVE_MMAP && !defined TORRENT_NO_DEPRECATE
|
||||
if (m_cache_pool)
|
||||
{
|
||||
if (m_free_list.size() <= (m_max_use - m_low_watermark)
|
||||
if (int(m_free_list.size()) <= (m_max_use - m_low_watermark)
|
||||
/ 2 && !m_exceeded_max_size)
|
||||
{
|
||||
m_exceeded_max_size = true;
|
||||
m_trigger_cache_trim();
|
||||
}
|
||||
if (m_free_list.empty()) return nullptr;
|
||||
std::uint64_t slot_index = m_free_list.back();
|
||||
std::int64_t const slot_index = m_free_list.back();
|
||||
m_free_list.pop_back();
|
||||
ret = m_cache_pool + (slot_index * 0x4000);
|
||||
TORRENT_ASSERT(is_disk_buffer(ret, l));
|
||||
|
@ -298,7 +298,7 @@ namespace libtorrent
|
|||
? 20 // use small increments once we've exceeded the cache size
|
||||
: m_cache_buffer_chunk_size
|
||||
? m_cache_buffer_chunk_size
|
||||
: (std::max)(m_max_use / 10, 1);
|
||||
: std::max(m_max_use / 10, 1);
|
||||
m_pool.set_next_size(effective_block_size);
|
||||
ret = static_cast<char*>(m_pool.malloc());
|
||||
}
|
||||
|
@ -418,7 +418,7 @@ namespace libtorrent
|
|||
// a 20th of everything exceeding 1 GiB
|
||||
// and a 10th of everything below a GiB
|
||||
|
||||
std::int64_t const gb = 1024 * 1024 * 1024;
|
||||
constexpr std::int64_t gb = 1024 * 1024 * 1024;
|
||||
|
||||
std::int64_t result = 0;
|
||||
if (phys_ram > 4 * gb)
|
||||
|
@ -447,7 +447,7 @@ namespace libtorrent
|
|||
// 32 bit builds should capped below 2 GB of memory, even
|
||||
// when more actual ram is available, because we're still
|
||||
// constrained by the 32 bit virtual address space.
|
||||
m_max_use = (std::min)(2 * 1024 * 1024 * 3 / 4 * 1024
|
||||
m_max_use = std::min(2 * 1024 * 1024 * 3 / 4 * 1024
|
||||
/ m_block_size, m_max_use);
|
||||
}
|
||||
}
|
||||
|
@ -455,7 +455,7 @@ namespace libtorrent
|
|||
{
|
||||
m_max_use = cache_size;
|
||||
}
|
||||
m_low_watermark = m_max_use - (std::max)(16, sett.get_int(settings_pack::max_queued_disk_bytes) / 0x4000);
|
||||
m_low_watermark = m_max_use - std::max(16, sett.get_int(settings_pack::max_queued_disk_bytes) / 0x4000);
|
||||
if (m_low_watermark < 0) m_low_watermark = 0;
|
||||
if (m_in_use >= m_max_use && !m_exceeded_max_size)
|
||||
{
|
||||
|
@ -483,7 +483,8 @@ namespace libtorrent
|
|||
TORRENT_UNUSED(best_effort);
|
||||
close(m_cache_fd);
|
||||
m_cache_fd = -1;
|
||||
std::vector<int>().swap(m_free_list);
|
||||
m_free_list.clear();
|
||||
m_free_list.shrink_to_fit();
|
||||
}
|
||||
else if (m_cache_pool == nullptr && !sett.get_str(settings_pack::mmap_cache).empty())
|
||||
{
|
||||
|
@ -528,7 +529,7 @@ namespace libtorrent
|
|||
}
|
||||
else
|
||||
{
|
||||
TORRENT_ASSERT((size_t(m_cache_pool) & 0xfff) == 0);
|
||||
TORRENT_ASSERT((std::size_t(m_cache_pool) & 0xfff) == 0);
|
||||
m_free_list.reserve(m_max_use);
|
||||
for (int i = 0; i < m_max_use; ++i)
|
||||
m_free_list.push_back(i);
|
||||
|
@ -614,4 +615,3 @@ namespace libtorrent
|
|||
}
|
||||
|
||||
}
|
||||
|
||||
|
|
|
@ -2320,7 +2320,7 @@ namespace libtorrent
|
|||
// treat a short read as an error. The hash will be invalid, the
|
||||
// block cannot be cached and the main thread should skip the rest
|
||||
// of this file
|
||||
if (read_ret != iov.iov_len)
|
||||
if (read_ret != int(iov.iov_len))
|
||||
{
|
||||
ret = status_t::fatal_disk_error;
|
||||
j->error.ec = boost::asio::error::eof;
|
||||
|
|
|
@ -63,7 +63,7 @@ namespace libtorrent
|
|||
std::lock_guard<std::mutex> l(m_mutex);
|
||||
if (i == m_max_threads) return;
|
||||
m_max_threads = i;
|
||||
if (m_threads.size() < i) return;
|
||||
if (int(m_threads.size()) < i) return;
|
||||
stop_threads(int(m_threads.size()) - i);
|
||||
}
|
||||
|
||||
|
|
|
@ -309,7 +309,7 @@ namespace libtorrent
|
|||
}
|
||||
|
||||
// write pad
|
||||
for (int j = 0; j < inbuf.size() - available_input; ++j)
|
||||
for (int j = 0; j < int(inbuf.size()) - available_input; ++j)
|
||||
{
|
||||
ret += '=';
|
||||
}
|
||||
|
@ -372,7 +372,7 @@ namespace libtorrent
|
|||
if (0 == (flags & string::no_padding))
|
||||
{
|
||||
// write pad
|
||||
for (int j = 0; j < outbuf.size() - num_out; ++j)
|
||||
for (int j = 0; j < int(outbuf.size()) - num_out; ++j)
|
||||
{
|
||||
ret += '=';
|
||||
}
|
||||
|
|
|
@ -1365,10 +1365,10 @@ namespace libtorrent
|
|||
std::uint32_t silly_hash(std::string const& str)
|
||||
{
|
||||
std::uint32_t ret = 1;
|
||||
for (int i = 0; i < str.size(); ++i)
|
||||
for (auto const ch : str)
|
||||
{
|
||||
if (str[i] == 0) continue;
|
||||
ret *= int(str[i]);
|
||||
if (ch == 0) continue;
|
||||
ret *= std::uint32_t(ch);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -130,7 +130,7 @@ namespace libtorrent
|
|||
branch_len = leaf - path.c_str();
|
||||
|
||||
// trim trailing slashes
|
||||
if (branch_len > 0 && branch_path[branch_len-1] == TORRENT_SEPARATOR)
|
||||
if (branch_len > 0 && branch_path[branch_len - 1] == TORRENT_SEPARATOR)
|
||||
--branch_len;
|
||||
}
|
||||
if (branch_len <= 0)
|
||||
|
@ -140,12 +140,12 @@ namespace libtorrent
|
|||
return;
|
||||
}
|
||||
|
||||
if (branch_len >= m_name.size()
|
||||
if (branch_len >= int(m_name.size())
|
||||
&& std::memcmp(branch_path, m_name.c_str(), m_name.size()) == 0)
|
||||
{
|
||||
// the +1 is to skip the trailing '/' (or '\')
|
||||
int const offset = int(m_name.size())
|
||||
+ (m_name.size() == branch_len ? 0 : 1);
|
||||
+ (int(m_name.size()) == branch_len ? 0 : 1);
|
||||
branch_path += offset;
|
||||
branch_len -= offset;
|
||||
e.no_root_dir = false;
|
||||
|
@ -159,7 +159,7 @@ namespace libtorrent
|
|||
auto p = std::find_if(m_paths.rbegin(), m_paths.rend()
|
||||
, [&] (std::string const& str)
|
||||
{
|
||||
if (str.size() != branch_len) return false;
|
||||
if (int(str.size()) != branch_len) return false;
|
||||
return std::memcmp(str.c_str(), branch_path, branch_len) == 0;
|
||||
});
|
||||
|
||||
|
@ -323,16 +323,16 @@ namespace libtorrent
|
|||
|
||||
void file_storage::apply_pointer_offset(ptrdiff_t const off)
|
||||
{
|
||||
for (int i = 0; i < m_files.size(); ++i)
|
||||
for (auto& f : m_files)
|
||||
{
|
||||
if (m_files[i].name_len == internal_file_entry::name_is_owned) continue;
|
||||
m_files[i].name += off;
|
||||
if (f.name_len == internal_file_entry::name_is_owned) continue;
|
||||
f.name += off;
|
||||
}
|
||||
|
||||
for (int i = 0; i < m_file_hashes.size(); ++i)
|
||||
for (auto& h : m_file_hashes)
|
||||
{
|
||||
if (m_file_hashes[i] == nullptr) continue;
|
||||
m_file_hashes[i] += off;
|
||||
if (h == nullptr) continue;
|
||||
h += off;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1139,12 +1139,12 @@ namespace libtorrent
|
|||
return std::make_tuple(begin_piece, end_piece);
|
||||
}
|
||||
|
||||
std::tuple<int, int> file_piece_range_inclusive(file_storage const& fs, int file)
|
||||
std::tuple<int, int> file_piece_range_inclusive(file_storage const& fs, int const file)
|
||||
{
|
||||
peer_request const range = fs.map_file(file, 0, 1);
|
||||
std::int64_t const file_size = fs.file_size(file);
|
||||
std::int64_t const piece_size = fs.piece_length();
|
||||
int const end_piece = (range.piece * piece_size + range.start + file_size - 1) / piece_size + 1;
|
||||
int const end_piece = int((range.piece * piece_size + range.start + file_size - 1) / piece_size + 1);
|
||||
return std::make_tuple(range.piece, end_piece);
|
||||
}
|
||||
|
||||
|
|
|
@ -430,7 +430,7 @@ void http_connection::on_timeout(std::weak_ptr<http_connection> p
|
|||
// the connection timed out. If we have more endpoints to try, just
|
||||
// close this connection. The on_connect handler will try the next
|
||||
// endpoint in the list.
|
||||
if (c->m_next_ep < c->m_endpoints.size())
|
||||
if (c->m_next_ep < int(c->m_endpoints.size()))
|
||||
{
|
||||
error_code ec;
|
||||
c->m_sock.close(ec);
|
||||
|
@ -544,7 +544,7 @@ void http_connection::on_resolve(error_code const& e
|
|||
|
||||
void http_connection::connect()
|
||||
{
|
||||
TORRENT_ASSERT(m_next_ep < m_endpoints.size());
|
||||
TORRENT_ASSERT(m_next_ep < int(m_endpoints.size()));
|
||||
|
||||
std::shared_ptr<http_connection> me(shared_from_this());
|
||||
|
||||
|
@ -581,8 +581,8 @@ void http_connection::connect()
|
|||
}
|
||||
}
|
||||
|
||||
TORRENT_ASSERT(m_next_ep < m_endpoints.size());
|
||||
if (m_next_ep >= m_endpoints.size()) return;
|
||||
TORRENT_ASSERT(m_next_ep < int(m_endpoints.size()));
|
||||
if (m_next_ep >= int(m_endpoints.size())) return;
|
||||
|
||||
tcp::endpoint target_address = m_endpoints[m_next_ep];
|
||||
++m_next_ep;
|
||||
|
@ -609,7 +609,7 @@ void http_connection::on_connect(error_code const& e)
|
|||
async_write(m_sock, boost::asio::buffer(m_sendbuffer)
|
||||
, std::bind(&http_connection::on_write, shared_from_this(), _1));
|
||||
}
|
||||
else if (m_next_ep < m_endpoints.size() && !m_abort)
|
||||
else if (m_next_ep < int(m_endpoints.size()) && !m_abort)
|
||||
{
|
||||
// The connection failed. Try the next endpoint in the list.
|
||||
error_code ec;
|
||||
|
|
|
@ -321,7 +321,7 @@ restart_response:
|
|||
m_chunked_encoding = string_begins_no_case("chunked", value.c_str());
|
||||
}
|
||||
|
||||
TORRENT_ASSERT(m_recv_pos <= recv_buffer.size());
|
||||
TORRENT_ASSERT(m_recv_pos <= int(recv_buffer.size()));
|
||||
TORRENT_ASSERT(pos <= recv_buffer.end());
|
||||
newline = std::find(pos, recv_buffer.end(), '\n');
|
||||
}
|
||||
|
|
|
@ -384,7 +384,7 @@ namespace libtorrent
|
|||
|
||||
received_bytes(0, header_size - m_partial_chunk_header);
|
||||
m_partial_chunk_header = 0;
|
||||
TORRENT_ASSERT(chunk_size != 0 || chunk_start.size() <= header_size || chunk_start[header_size] == 'H');
|
||||
TORRENT_ASSERT(chunk_size != 0 || int(chunk_start.size()) <= header_size || chunk_start[header_size] == 'H');
|
||||
// cut out the chunk header from the receive buffer
|
||||
TORRENT_ASSERT(m_chunk_pos + m_body_start < INT_MAX);
|
||||
m_recv_buffer.cut(header_size, t->block_size() + 1024, int(m_chunk_pos + m_body_start));
|
||||
|
|
|
@ -113,7 +113,7 @@ namespace
|
|||
item.value.reset(new char[size]);
|
||||
item.size = size;
|
||||
}
|
||||
std::memcpy(item.value.get(), buf.data(), size);
|
||||
std::memcpy(item.value.get(), buf.data(), buf.size());
|
||||
}
|
||||
|
||||
void touch_item(dht_immutable_item& f, address const& addr)
|
||||
|
|
|
@ -36,6 +36,7 @@ POSSIBILITY OF SUCH DAMAGE.
|
|||
#include <cinttypes> // for PRId64 et.al.
|
||||
#include <functional>
|
||||
#include <tuple>
|
||||
#include <array>
|
||||
|
||||
#ifndef TORRENT_DISABLE_LOGGING
|
||||
#include "libtorrent/hex.hpp" // to_hex
|
||||
|
@ -1062,7 +1063,7 @@ void node::incoming_request(msg const& m, entry& e)
|
|||
// number matches the expected value before replacing it
|
||||
// this is critical for avoiding race conditions when multiple
|
||||
// writers are accessing the same slot
|
||||
if (msg_keys[5] && item_seq.value != std::uint64_t(msg_keys[5].int_value()))
|
||||
if (msg_keys[5] && item_seq.value != msg_keys[5].int_value())
|
||||
{
|
||||
m_counters.inc_stats_counter(counters::dht_invalid_put);
|
||||
incoming_error(e, "CAS mismatch", 301);
|
||||
|
@ -1188,14 +1189,16 @@ void node::write_nodes_entries(sha1_hash const& info_hash
|
|||
|
||||
node::protocol_descriptor const& node::map_protocol_to_descriptor(udp protocol)
|
||||
{
|
||||
static protocol_descriptor descriptors[] =
|
||||
{ {udp::v4(), "n4", "nodes"}
|
||||
, {udp::v6(), "n6", "nodes6"} };
|
||||
static std::array<protocol_descriptor, 2> descriptors =
|
||||
{{
|
||||
{udp::v4(), "n4", "nodes"},
|
||||
{udp::v6(), "n6", "nodes6"}
|
||||
}};
|
||||
|
||||
for (int i = 0; i < sizeof(descriptors) / sizeof(protocol_descriptor); ++i)
|
||||
for (auto const& d : descriptors)
|
||||
{
|
||||
if (descriptors[i].protocol == protocol)
|
||||
return descriptors[i];
|
||||
if (d.protocol == protocol)
|
||||
return d;
|
||||
}
|
||||
|
||||
TORRENT_ASSERT_FAIL();
|
||||
|
|
|
@ -98,7 +98,7 @@ node_id generate_id_impl(address const& ip_, std::uint32_t r)
|
|||
if (ip_.is_v6())
|
||||
{
|
||||
b6 = ip_.to_v6().to_bytes();
|
||||
ip = &b6[0];
|
||||
ip = b6.data();
|
||||
num_octets = 8;
|
||||
mask = v6mask;
|
||||
}
|
||||
|
@ -106,7 +106,7 @@ node_id generate_id_impl(address const& ip_, std::uint32_t r)
|
|||
#endif
|
||||
{
|
||||
b4 = ip_.to_v4().to_bytes();
|
||||
ip = &b4[0];
|
||||
ip = b4.data();
|
||||
num_octets = 4;
|
||||
mask = v4mask;
|
||||
}
|
||||
|
@ -133,7 +133,7 @@ node_id generate_id_impl(address const& ip_, std::uint32_t r)
|
|||
id[1] = (c >> 16) & 0xff;
|
||||
id[2] = (((c >> 8) & 0xf8) | random(0x7)) & 0xff;
|
||||
|
||||
for (int i = 3; i < 19; ++i) id[i] = std::uint8_t(random(0xff));
|
||||
for (std::size_t i = 3; i < 19; ++i) id[i] = random(0xff) & 0xff;
|
||||
id[19] = r & 0xff;
|
||||
|
||||
return id;
|
||||
|
@ -204,13 +204,13 @@ bool matching_prefix(node_id const& nid, int mask, int prefix, int offset)
|
|||
return (id[0] & mask) == prefix;
|
||||
}
|
||||
|
||||
node_id generate_prefix_mask(int bits)
|
||||
node_id generate_prefix_mask(int const bits)
|
||||
{
|
||||
TORRENT_ASSERT(bits >= 0);
|
||||
TORRENT_ASSERT(bits <= 160);
|
||||
node_id mask(nullptr);
|
||||
int b = 0;
|
||||
for (; b < bits - 7; b += 8) mask[b / 8] |= 0xff;
|
||||
node_id mask;
|
||||
std::size_t b = 0;
|
||||
for (; int(b) < bits - 7; b += 8) mask[b / 8] |= 0xff;
|
||||
if (bits < 160) mask[b / 8] |= (0xff << (8 - (bits & 7))) & 0xff;
|
||||
return mask;
|
||||
}
|
||||
|
|
|
@ -960,24 +960,14 @@ void routing_table::update_node_id(node_id const& id)
|
|||
|
||||
// then add them all back. First add the main nodes, then the replacement
|
||||
// nodes
|
||||
for (int i = 0; i < old_buckets.size(); ++i)
|
||||
{
|
||||
bucket_t const& bucket = old_buckets[i].live_nodes;
|
||||
for (int j = 0; j < bucket.size(); ++j)
|
||||
{
|
||||
add_node(bucket[j]);
|
||||
}
|
||||
}
|
||||
for (auto const& b : old_buckets)
|
||||
for (auto const& n : b.live_nodes)
|
||||
add_node(n);
|
||||
|
||||
// now add back the replacement nodes
|
||||
for (int i = 0; i < old_buckets.size(); ++i)
|
||||
{
|
||||
bucket_t const& bucket = old_buckets[i].replacements;
|
||||
for (int j = 0; j < bucket.size(); ++j)
|
||||
{
|
||||
add_node(bucket[j]);
|
||||
}
|
||||
}
|
||||
for (auto const& b : old_buckets)
|
||||
for (auto const& n : b.replacements)
|
||||
add_node(n);
|
||||
}
|
||||
|
||||
void routing_table::for_each_node(
|
||||
|
|
|
@ -450,7 +450,7 @@ bool rpc_manager::invoke(entry& e, udp::endpoint const& target_addr
|
|||
std::string transaction_id;
|
||||
transaction_id.resize(2);
|
||||
char* out = &transaction_id[0];
|
||||
std::uint16_t tid = std::uint16_t(random(0x7fff));
|
||||
std::uint16_t const tid = std::uint16_t(random(0x7fff));
|
||||
detail::write_uint16(tid, out);
|
||||
e["t"] = transaction_id;
|
||||
|
||||
|
|
|
@ -207,22 +207,23 @@ void traversal_algorithm::add_entry(node_id const& id
|
|||
|
||||
if (m_results.size() > 100)
|
||||
{
|
||||
for (int i = 100; i < int(m_results.size()); ++i)
|
||||
std::for_each(m_results.begin() + 100, m_results.end()
|
||||
, [this](std::shared_ptr<observer> const& ptr)
|
||||
{
|
||||
if ((m_results[i]->flags & (observer::flag_queried | observer::flag_failed | observer::flag_alive))
|
||||
if ((ptr->flags & (observer::flag_queried | observer::flag_failed | observer::flag_alive))
|
||||
== observer::flag_queried)
|
||||
{
|
||||
// set the done flag on any outstanding queries to prevent them from
|
||||
// calling finished() or failed()
|
||||
m_results[i]->flags |= observer::flag_done;
|
||||
ptr->flags |= observer::flag_done;
|
||||
TORRENT_ASSERT(m_invoke_count > 0);
|
||||
--m_invoke_count;
|
||||
}
|
||||
|
||||
#if TORRENT_USE_ASSERTS
|
||||
m_results[i]->m_was_abandoned = true;
|
||||
ptr->m_was_abandoned = true;
|
||||
#endif
|
||||
}
|
||||
});
|
||||
m_results.resize(100);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -118,8 +118,9 @@ namespace libtorrent
|
|||
|
||||
for (int i = 0; i < num_pieces; ++i)
|
||||
{
|
||||
int slot = read_uint32(ptr);
|
||||
if (slot == 0xffffffff) continue;
|
||||
std::uint32_t const uslot = read_uint32(ptr);
|
||||
if (uslot == 0xffffffff) continue;
|
||||
int const slot = int(uslot);
|
||||
|
||||
// invalid part-file
|
||||
TORRENT_ASSERT(slot < num_pieces);
|
||||
|
@ -331,8 +332,8 @@ namespace libtorrent
|
|||
if (ec || v.iov_len == 0) return;
|
||||
|
||||
std::int64_t ret = f.writev(file_offset, v, ec);
|
||||
TORRENT_ASSERT(ec || ret == v.iov_len);
|
||||
if (ec || ret != v.iov_len) return;
|
||||
TORRENT_ASSERT(ec || ret == std::int64_t(v.iov_len));
|
||||
if (ec || ret != std::int64_t(v.iov_len)) return;
|
||||
|
||||
// we're done with the disk I/O, grab the lock again to update
|
||||
// the slot map
|
||||
|
|
|
@ -136,7 +136,7 @@ namespace libtorrent
|
|||
bufs = abufs;
|
||||
need_destruct = true;
|
||||
size_t num_bufs = 0;
|
||||
for (int i = 0; to_process > 0 && i < iovec.size(); ++i)
|
||||
for (std::size_t i = 0; to_process > 0 && i < iovec.size(); ++i)
|
||||
{
|
||||
++num_bufs;
|
||||
int const size = int(iovec[i].size());
|
||||
|
@ -370,11 +370,11 @@ void rc4_init(const unsigned char* in, unsigned long len, rc4 *state)
|
|||
keylen = state->x;
|
||||
|
||||
/* make RC4 perm and shuffle */
|
||||
for (x = 0; x < key_size; ++x) {
|
||||
for (x = 0; x < int(key_size); ++x) {
|
||||
s[x] = std::uint8_t(x);
|
||||
}
|
||||
|
||||
for (j = x = y = 0; x < key_size; x++) {
|
||||
for (j = x = y = 0; x < int(key_size); x++) {
|
||||
y = (y + state->buf[x] + key[j++]) & 255;
|
||||
if (j == keylen) {
|
||||
j = 0;
|
||||
|
|
|
@ -40,7 +40,7 @@ namespace libtorrent
|
|||
{
|
||||
if (std::find(m_class.begin(), m_class.begin() + m_size, c)
|
||||
!= m_class.begin() + m_size) return;
|
||||
if (m_size >= m_class.size() - 1)
|
||||
if (m_size >= int(m_class.size()) - 1)
|
||||
{
|
||||
TORRENT_ASSERT_FAIL();
|
||||
return;
|
||||
|
|
|
@ -596,7 +596,7 @@ namespace libtorrent
|
|||
for (;;)
|
||||
{
|
||||
char const* p = hash.data();
|
||||
for (int i = 0; i < hash.size() / sizeof(std::uint32_t); ++i)
|
||||
for (int i = 0; i < int(hash.size() / sizeof(std::uint32_t)); ++i)
|
||||
{
|
||||
++loops;
|
||||
int const piece = detail::read_uint32(p) % num_pieces;
|
||||
|
@ -2900,7 +2900,7 @@ namespace libtorrent
|
|||
&& (bytes_left * 1000) / rate < m_settings.get_int(settings_pack::predictive_piece_announce))
|
||||
{
|
||||
// we predict we will complete this piece very soon.
|
||||
t->predicted_have_piece(piece, (bytes_left * 1000) / rate);
|
||||
t->predicted_have_piece(piece, int((bytes_left * 1000) / rate));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -3044,8 +3044,6 @@ namespace libtorrent
|
|||
|
||||
piece_picker& picker = t->picker();
|
||||
|
||||
TORRENT_ASSERT(p.piece == p.piece);
|
||||
TORRENT_ASSERT(p.start == p.start);
|
||||
TORRENT_ASSERT(picker.num_peers(block_finished) == 0);
|
||||
|
||||
// std::fprintf(stderr, "peer_connection mark_as_finished peer: %p piece: %d block: %d\n"
|
||||
|
@ -3593,8 +3591,8 @@ namespace libtorrent
|
|||
|
||||
TORRENT_ASSERT(block.block_index != piece_block::invalid.block_index);
|
||||
TORRENT_ASSERT(block.piece_index != piece_block::invalid.piece_index);
|
||||
TORRENT_ASSERT(int(block.piece_index) < t->torrent_file().num_pieces());
|
||||
TORRENT_ASSERT(int(block.block_index) < t->torrent_file().piece_size(block.piece_index));
|
||||
TORRENT_ASSERT(block.piece_index < t->torrent_file().num_pieces());
|
||||
TORRENT_ASSERT(block.block_index < t->torrent_file().piece_size(block.piece_index));
|
||||
|
||||
// if all the peers that requested this block has been
|
||||
// cancelled, then just ignore the cancel.
|
||||
|
@ -3805,7 +3803,7 @@ namespace libtorrent
|
|||
send_suggest(*i);
|
||||
}
|
||||
int const max = m_settings.get_int(settings_pack::max_suggest_pieces);
|
||||
if (m_suggest_pieces.size() > max)
|
||||
if (int(m_suggest_pieces.size()) > max)
|
||||
{
|
||||
int const to_erase = m_suggest_pieces.size() - max;
|
||||
m_suggest_pieces.erase(m_suggest_pieces.begin()
|
||||
|
@ -3884,7 +3882,7 @@ namespace libtorrent
|
|||
}
|
||||
|
||||
int block_offset = block.block.block_index * t->block_size();
|
||||
int block_size = (std::min)(t->torrent_file().piece_size(
|
||||
int block_size = std::min(t->torrent_file().piece_size(
|
||||
block.block.piece_index) - block_offset, t->block_size());
|
||||
TORRENT_ASSERT(block_size > 0);
|
||||
TORRENT_ASSERT(block_size <= t->block_size());
|
||||
|
@ -3937,7 +3935,7 @@ namespace libtorrent
|
|||
#endif
|
||||
|
||||
block_offset = block.block.block_index * t->block_size();
|
||||
block_size = (std::min)(t->torrent_file().piece_size(
|
||||
block_size = std::min(t->torrent_file().piece_size(
|
||||
block.block.piece_index) - block_offset, t->block_size());
|
||||
TORRENT_ASSERT(block_size > 0);
|
||||
TORRENT_ASSERT(block_size <= t->block_size());
|
||||
|
@ -5064,7 +5062,7 @@ namespace libtorrent
|
|||
// be blocked because we have to verify the hash first, so keep going with the
|
||||
// next request. However, only let each peer have one hash verification outstanding
|
||||
// at any given time
|
||||
for (int i = 0; i < m_requests.size()
|
||||
for (int i = 0; i < int(m_requests.size())
|
||||
&& (send_buffer_size() + m_reading_bytes < buffer_size_watermark); ++i)
|
||||
{
|
||||
TORRENT_ASSERT(t->ready_for_connections());
|
||||
|
@ -5828,12 +5826,12 @@ namespace libtorrent
|
|||
// if we received exactly as many bytes as we provided a receive buffer
|
||||
// for. There most likely are more bytes to read, and we should grow our
|
||||
// receive buffer.
|
||||
TORRENT_ASSERT(bytes_transferred <= m_recv_buffer.max_receive());
|
||||
bool const grow_buffer = (bytes_transferred == m_recv_buffer.max_receive());
|
||||
TORRENT_ASSERT(int(bytes_transferred) <= m_recv_buffer.max_receive());
|
||||
bool const grow_buffer = (int(bytes_transferred) == m_recv_buffer.max_receive());
|
||||
account_received_bytes(bytes_transferred);
|
||||
|
||||
if (m_extension_outstanding_bytes > 0)
|
||||
m_extension_outstanding_bytes -= (std::min)(m_extension_outstanding_bytes, int(bytes_transferred));
|
||||
m_extension_outstanding_bytes -= std::min(m_extension_outstanding_bytes, int(bytes_transferred));
|
||||
|
||||
check_graceful_pause();
|
||||
if (m_disconnecting) return;
|
||||
|
@ -5843,7 +5841,7 @@ namespace libtorrent
|
|||
if (grow_buffer)
|
||||
{
|
||||
error_code ec;
|
||||
std::size_t buffer_size = m_socket->available(ec);
|
||||
int buffer_size = int(m_socket->available(ec));
|
||||
if (ec)
|
||||
{
|
||||
disconnect(ec, op_available);
|
||||
|
@ -5852,7 +5850,7 @@ namespace libtorrent
|
|||
|
||||
#ifndef TORRENT_DISABLE_LOGGING
|
||||
peer_log(peer_log_alert::incoming, "AVAILABLE"
|
||||
, "%d bytes", int(buffer_size));
|
||||
, "%d bytes", buffer_size);
|
||||
#endif
|
||||
|
||||
request_bandwidth(download_channel, buffer_size);
|
||||
|
@ -5862,7 +5860,7 @@ namespace libtorrent
|
|||
if (buffer_size > 0)
|
||||
{
|
||||
span<char> const vec = m_recv_buffer.reserve(buffer_size);
|
||||
size_t bytes = m_socket->read_some(
|
||||
std::size_t bytes = m_socket->read_some(
|
||||
boost::asio::mutable_buffers_1(vec.data(), vec.size()), ec);
|
||||
|
||||
// this is weird. You would imagine read_some() would do this
|
||||
|
@ -5872,7 +5870,7 @@ namespace libtorrent
|
|||
if (should_log(peer_log_alert::incoming))
|
||||
{
|
||||
peer_log(peer_log_alert::incoming, "SYNC_READ", "max: %d ret: %d e: %s"
|
||||
, int(buffer_size), int(bytes), ec ? ec.message().c_str() : "");
|
||||
, buffer_size, int(bytes), ec ? ec.message().c_str() : "");
|
||||
}
|
||||
#endif
|
||||
|
||||
|
|
|
@ -40,8 +40,8 @@ namespace libtorrent {
|
|||
counters::counters()
|
||||
{
|
||||
#ifdef ATOMIC_LLONG_LOCK_FREE
|
||||
for (int i = 0; i < m_stats_counter.size(); ++i)
|
||||
m_stats_counter[i].store(0, std::memory_order_relaxed);
|
||||
for (auto& counter : m_stats_counter)
|
||||
counter.store(0, std::memory_order_relaxed);
|
||||
#else
|
||||
std::memset(m_stats_counter, 0, sizeof(m_stats_counter));
|
||||
#endif
|
||||
|
@ -50,7 +50,7 @@ namespace libtorrent {
|
|||
counters::counters(counters const& c)
|
||||
{
|
||||
#ifdef ATOMIC_LLONG_LOCK_FREE
|
||||
for (int i = 0; i < m_stats_counter.size(); ++i)
|
||||
for (std::size_t i = 0; i < m_stats_counter.size(); ++i)
|
||||
m_stats_counter[i].store(
|
||||
c.m_stats_counter[i].load(std::memory_order_relaxed)
|
||||
, std::memory_order_relaxed);
|
||||
|
@ -63,7 +63,7 @@ namespace libtorrent {
|
|||
counters& counters::operator=(counters const& c)
|
||||
{
|
||||
#ifdef ATOMIC_LLONG_LOCK_FREE
|
||||
for (int i = 0; i < m_stats_counter.size(); ++i)
|
||||
for (std::size_t i = 0; i < m_stats_counter.size(); ++i)
|
||||
m_stats_counter[i].store(
|
||||
c.m_stats_counter[i].load(std::memory_order_relaxed)
|
||||
, std::memory_order_relaxed);
|
||||
|
|
|
@ -170,7 +170,8 @@ namespace libtorrent
|
|||
return ret;
|
||||
}
|
||||
|
||||
piece_picker::dlpiece_iter piece_picker::add_download_piece(int piece)
|
||||
std::vector<piece_picker::downloading_piece>::iterator
|
||||
piece_picker::add_download_piece(int const piece)
|
||||
{
|
||||
TORRENT_ASSERT(piece >= 0);
|
||||
TORRENT_ASSERT(piece < int(m_piece_map.size()));
|
||||
|
@ -190,7 +191,7 @@ namespace libtorrent
|
|||
else
|
||||
{
|
||||
// there is already free space in m_block_info, grab one range
|
||||
block_index = m_free_block_infos.back();
|
||||
block_index = int(m_free_block_infos.back());
|
||||
m_free_block_infos.pop_back();
|
||||
}
|
||||
|
||||
|
@ -249,7 +250,7 @@ namespace libtorrent
|
|||
m_piece_map[i->index].download_state = piece_pos::piece_open;
|
||||
m_downloads[download_state].erase(i);
|
||||
|
||||
TORRENT_ASSERT(prev_size == m_downloads[download_state].size() + 1);
|
||||
TORRENT_ASSERT(prev_size == int(m_downloads[download_state].size()) + 1);
|
||||
|
||||
#if TORRENT_USE_INVARIANT_CHECKS
|
||||
check_piece_state();
|
||||
|
@ -289,7 +290,7 @@ namespace libtorrent
|
|||
downloading_piece const& dp)
|
||||
{
|
||||
int idx = int(dp.info_idx) * m_blocks_per_piece;
|
||||
TORRENT_ASSERT(idx + m_blocks_per_piece <= m_block_info.size());
|
||||
TORRENT_ASSERT(idx + m_blocks_per_piece <= int(m_block_info.size()));
|
||||
return &m_block_info[idx];
|
||||
}
|
||||
|
||||
|
@ -334,7 +335,7 @@ namespace libtorrent
|
|||
void piece_picker::verify_pick(std::vector<piece_block> const& picked
|
||||
, bitfield const& bits) const
|
||||
{
|
||||
TORRENT_ASSERT(bits.size() == m_piece_map.size());
|
||||
TORRENT_ASSERT(bits.size() == int(m_piece_map.size()));
|
||||
for (std::vector<piece_block>::const_iterator i = picked.begin()
|
||||
, end(picked.end()); i != end; ++i)
|
||||
{
|
||||
|
@ -412,8 +413,8 @@ namespace libtorrent
|
|||
int num_pieces = have.size();
|
||||
for (int i = 0; i < num_pieces; ++i)
|
||||
{
|
||||
int h = have[i];
|
||||
TORRENT_ASSERT(m_piece_map[i].have_peers.count(p) == h);
|
||||
bool h = have[i];
|
||||
TORRENT_ASSERT(m_piece_map[std::size_t(i)].have_peers.count(p) == (h ? 1 : 0));
|
||||
}
|
||||
#else
|
||||
TORRENT_UNUSED(have);
|
||||
|
@ -617,7 +618,7 @@ namespace libtorrent
|
|||
}
|
||||
|
||||
#ifdef TORRENT_DEBUG_REFCOUNTS
|
||||
TORRENT_ASSERT(p.have_peers.size() == p.peer_count + m_seeds);
|
||||
TORRENT_ASSERT(int(p.have_peers.size()) == p.peer_count + m_seeds);
|
||||
#endif
|
||||
if (p.index == piece_pos::we_have_index)
|
||||
++num_have;
|
||||
|
@ -901,8 +902,8 @@ namespace libtorrent
|
|||
// priority bucket. If it doesn't, it means this piece changed
|
||||
// state without updating the corresponding entry in the pieces list
|
||||
TORRENT_ASSERT(m_priority_boundaries[priority] >= elem_index);
|
||||
TORRENT_ASSERT(priority == 0 || m_priority_boundaries[priority-1] <= elem_index);
|
||||
TORRENT_ASSERT(priority + 1 == m_priority_boundaries.size() || m_priority_boundaries[priority+1] > elem_index);
|
||||
TORRENT_ASSERT(priority == 0 || m_priority_boundaries[priority - 1] <= elem_index);
|
||||
TORRENT_ASSERT(priority + 1 == int(m_priority_boundaries.size()) || m_priority_boundaries[priority + 1] > elem_index);
|
||||
|
||||
int index = m_pieces[elem_index];
|
||||
// update the piece_map
|
||||
|
@ -1252,13 +1253,13 @@ namespace libtorrent
|
|||
// nothing set, nothing to do here
|
||||
if (bitmask.none_set()) return;
|
||||
|
||||
if (bitmask.all_set() && bitmask.size() == m_piece_map.size())
|
||||
if (bitmask.all_set() && bitmask.size() == int(m_piece_map.size()))
|
||||
{
|
||||
inc_refcount_all(peer);
|
||||
return;
|
||||
}
|
||||
|
||||
const int size = (std::min)(50, int(bitmask.size()/2));
|
||||
int const size = std::min(50, bitmask.size() / 2);
|
||||
|
||||
// this is an optimization where if just a few
|
||||
// pieces end up changing, instead of making
|
||||
|
@ -1340,7 +1341,7 @@ namespace libtorrent
|
|||
#ifdef TORRENT_EXPENSIVE_INVARIANT_CHECKS
|
||||
INVARIANT_CHECK;
|
||||
#endif
|
||||
TORRENT_ASSERT(bitmask.size() <= m_piece_map.size());
|
||||
TORRENT_ASSERT(bitmask.size() <= int(m_piece_map.size()));
|
||||
|
||||
#ifdef TORRENT_PICKER_LOG
|
||||
std::cerr << "[" << this << "] " << "dec_refcount(bitfield)" << std::endl;
|
||||
|
@ -1349,13 +1350,13 @@ namespace libtorrent
|
|||
// nothing set, nothing to do here
|
||||
if (bitmask.none_set()) return;
|
||||
|
||||
if (bitmask.all_set() && bitmask.size() == m_piece_map.size())
|
||||
if (bitmask.all_set() && bitmask.size() == int(m_piece_map.size()))
|
||||
{
|
||||
dec_refcount_all(peer);
|
||||
return;
|
||||
}
|
||||
|
||||
const int size = (std::min)(50, int(bitmask.size()/2));
|
||||
int const size = std::min(50, bitmask.size() / 2);
|
||||
|
||||
// this is an optimization where if just a few
|
||||
// pieces end up changing, instead of making
|
||||
|
@ -1521,16 +1522,18 @@ namespace libtorrent
|
|||
#endif
|
||||
}
|
||||
|
||||
void piece_picker::piece_passed(int index)
|
||||
void piece_picker::piece_passed(int const index)
|
||||
{
|
||||
piece_pos& p = m_piece_map[index];
|
||||
TORRENT_ASSERT(index >= 0);
|
||||
TORRENT_ASSERT(index < int(m_piece_map.size()));
|
||||
piece_pos& p = m_piece_map[std::size_t(index)];
|
||||
int download_state = p.download_queue();
|
||||
|
||||
// this is kind of odd. Could this happen?
|
||||
TORRENT_ASSERT(download_state != piece_pos::piece_open);
|
||||
if (download_state == piece_pos::piece_open) return;
|
||||
|
||||
std::vector<downloading_piece>::iterator i = find_dl_piece(download_state, index);
|
||||
auto const i = find_dl_piece(download_state, index);
|
||||
TORRENT_ASSERT(i != m_downloads[download_state].end());
|
||||
|
||||
TORRENT_ASSERT(i->locked == false);
|
||||
|
@ -1545,13 +1548,13 @@ namespace libtorrent
|
|||
we_have(index);
|
||||
}
|
||||
|
||||
void piece_picker::we_dont_have(int index)
|
||||
void piece_picker::we_dont_have(int const index)
|
||||
{
|
||||
INVARIANT_CHECK;
|
||||
TORRENT_ASSERT(index >= 0);
|
||||
TORRENT_ASSERT(index < int(m_piece_map.size()));
|
||||
|
||||
piece_pos& p = m_piece_map[index];
|
||||
piece_pos& p = m_piece_map[std::size_t(index)];
|
||||
|
||||
#ifdef TORRENT_PICKER_LOG
|
||||
std::cerr << "[" << this << "] " << "piece_picker::we_dont_have("
|
||||
|
@ -1565,8 +1568,7 @@ namespace libtorrent
|
|||
int download_state = p.download_queue();
|
||||
if (download_state == piece_pos::piece_open) return;
|
||||
|
||||
std::vector<downloading_piece>::iterator i
|
||||
= find_dl_piece(download_state, index);
|
||||
auto const i = find_dl_piece(download_state, index);
|
||||
if (i->passed_hash_check)
|
||||
{
|
||||
i->passed_hash_check = false;
|
||||
|
@ -1609,7 +1611,7 @@ namespace libtorrent
|
|||
// downloaded a piece, and that no further attempts
|
||||
// to pick that piece should be made. The piece will
|
||||
// be removed from the available piece list.
|
||||
void piece_picker::we_have(int index)
|
||||
void piece_picker::we_have(int const index)
|
||||
{
|
||||
#ifdef TORRENT_EXPENSIVE_INVARIANT_CHECKS
|
||||
INVARIANT_CHECK;
|
||||
|
@ -1621,7 +1623,7 @@ namespace libtorrent
|
|||
std::cerr << "[" << this << "] " << "piece_picker::we_have("
|
||||
<< index << ")" << std::endl;
|
||||
#endif
|
||||
piece_pos& p = m_piece_map[index];
|
||||
piece_pos& p = m_piece_map[std::size_t(index)];
|
||||
int const info_index = p.index;
|
||||
int const priority = p.priority(this);
|
||||
TORRENT_ASSERT(priority < int(m_priority_boundaries.size()) || m_dirty);
|
||||
|
@ -1631,8 +1633,7 @@ namespace libtorrent
|
|||
int state = p.download_queue();
|
||||
if (state != piece_pos::piece_open)
|
||||
{
|
||||
std::vector<downloading_piece>::iterator i
|
||||
= find_dl_piece(state, index);
|
||||
auto const i = find_dl_piece(state, index);
|
||||
TORRENT_ASSERT(i != m_downloads[state].end());
|
||||
// decrement num_passed here to compensate
|
||||
// for the unconditional increment further down
|
||||
|
@ -1929,7 +1930,7 @@ namespace libtorrent
|
|||
INVARIANT_CHECK;
|
||||
#endif
|
||||
TORRENT_ASSERT(num_blocks > 0);
|
||||
TORRENT_ASSERT(pieces.size() == m_piece_map.size());
|
||||
TORRENT_ASSERT(pieces.size() == int(m_piece_map.size()));
|
||||
|
||||
TORRENT_ASSERT(!m_priority_boundaries.empty() || m_dirty);
|
||||
|
||||
|
@ -2290,7 +2291,7 @@ get_out:
|
|||
// this peer has, and can pick from. Cap the stack allocation
|
||||
// at 200 pieces.
|
||||
|
||||
int partials_size = (std::min)(200, int(
|
||||
int partials_size = std::min(200, int(
|
||||
m_downloads[piece_pos::piece_downloading].size()
|
||||
+ m_downloads[piece_pos::piece_full].size()));
|
||||
if (partials_size == 0) return ret;
|
||||
|
@ -2340,14 +2341,10 @@ get_out:
|
|||
}
|
||||
#endif
|
||||
|
||||
for (std::vector<downloading_piece>::const_iterator i
|
||||
= m_downloads[piece_pos::piece_full].begin()
|
||||
, end(m_downloads[piece_pos::piece_full].end());
|
||||
i != end; ++i)
|
||||
for (auto const& dp : m_downloads[piece_pos::piece_full])
|
||||
{
|
||||
if (c == partials_size) break;
|
||||
|
||||
downloading_piece const& dp = *i;
|
||||
TORRENT_ASSERT(dp.requested > 0);
|
||||
// this peer doesn't have this piece, try again
|
||||
if (!pieces[dp.index]) continue;
|
||||
|
@ -2365,23 +2362,21 @@ get_out:
|
|||
while (partials_size > 0)
|
||||
{
|
||||
pc.inc_stats_counter(counters::piece_picker_busy_loops);
|
||||
int piece = random(partials_size-1);
|
||||
int piece = int(random(partials_size - 1));
|
||||
downloading_piece const* dp = partials[piece];
|
||||
TORRENT_ASSERT(pieces[dp->index]);
|
||||
TORRENT_ASSERT(piece_priority(dp->index) > 0);
|
||||
// fill in with blocks requested from other peers
|
||||
// as backups
|
||||
const int num_blocks_in_piece = blocks_in_piece(dp->index);
|
||||
int const num_blocks_in_piece = blocks_in_piece(dp->index);
|
||||
TORRENT_ASSERT(dp->requested > 0);
|
||||
block_info const* binfo = blocks_for_piece(*dp);
|
||||
for (int j = 0; j < num_blocks_in_piece; ++j)
|
||||
{
|
||||
block_info const& info = binfo[j];
|
||||
TORRENT_ASSERT(info.peer == nullptr
|
||||
|| static_cast<torrent_peer*>(info.peer)->in_use);
|
||||
TORRENT_ASSERT(info.peer == nullptr || info.peer->in_use);
|
||||
TORRENT_ASSERT(info.piece_index == dp->index);
|
||||
if (info.state != block_info::state_requested
|
||||
|| info.peer == peer)
|
||||
if (info.state != block_info::state_requested || info.peer == peer)
|
||||
continue;
|
||||
temp.push_back(piece_block(dp->index, j));
|
||||
}
|
||||
|
@ -2396,7 +2391,7 @@ get_out:
|
|||
|
||||
// the piece we picked only had blocks outstanding requested
|
||||
// by ourself. Remove it and pick another one.
|
||||
partials[piece] = partials[partials_size-1];
|
||||
partials[piece] = partials[partials_size - 1];
|
||||
--partials_size;
|
||||
}
|
||||
|
||||
|
@ -2477,7 +2472,7 @@ get_out:
|
|||
{
|
||||
TORRENT_ASSERT(index >= 0);
|
||||
TORRENT_ASSERT(index < int(m_piece_map.size()));
|
||||
piece_pos const& p = m_piece_map[index];
|
||||
piece_pos const& p = m_piece_map[std::size_t(index)];
|
||||
return p.index == piece_pos::we_have_index;
|
||||
}
|
||||
|
||||
|
@ -2495,19 +2490,19 @@ get_out:
|
|||
{
|
||||
TORRENT_ASSERT(piece >= 0 && piece < int(m_piece_map.size()));
|
||||
return bitmask[piece]
|
||||
&& !m_piece_map[piece].have()
|
||||
&& !m_piece_map[piece].filtered();
|
||||
&& !m_piece_map[std::size_t(piece)].have()
|
||||
&& !m_piece_map[std::size_t(piece)].filtered();
|
||||
}
|
||||
|
||||
bool piece_picker::can_pick(int piece, bitfield const& bitmask) const
|
||||
{
|
||||
TORRENT_ASSERT(piece >= 0 && piece < int(m_piece_map.size()));
|
||||
return bitmask[piece]
|
||||
&& !m_piece_map[piece].have()
|
||||
&& !m_piece_map[std::size_t(piece)].have()
|
||||
// TODO: when expanding pieces for cache stripe reasons,
|
||||
// the !downloading condition doesn't make much sense
|
||||
&& !m_piece_map[piece].downloading()
|
||||
&& !m_piece_map[piece].filtered();
|
||||
&& !m_piece_map[std::size_t(piece)].downloading()
|
||||
&& !m_piece_map[std::size_t(piece)].filtered();
|
||||
}
|
||||
|
||||
#if TORRENT_USE_INVARIANT_CHECKS
|
||||
|
@ -2538,7 +2533,7 @@ get_out:
|
|||
// the first-fit range, which would be better
|
||||
std::tuple<bool, bool, int, int> piece_picker::requested_from(
|
||||
piece_picker::downloading_piece const& p
|
||||
, int num_blocks_in_piece, torrent_peer* peer) const
|
||||
, int const num_blocks_in_piece, torrent_peer* peer) const
|
||||
{
|
||||
bool exclusive = true;
|
||||
bool exclusive_active = true;
|
||||
|
@ -2549,7 +2544,7 @@ get_out:
|
|||
for (int j = 0; j < num_blocks_in_piece; ++j)
|
||||
{
|
||||
piece_picker::block_info const& info = binfo[j];
|
||||
TORRENT_ASSERT(info.peer == nullptr || static_cast<torrent_peer*>(info.peer)->in_use);
|
||||
TORRENT_ASSERT(info.peer == nullptr || info.peer->in_use);
|
||||
TORRENT_ASSERT(info.piece_index == p.index);
|
||||
if (info.state == piece_picker::block_info::state_none)
|
||||
{
|
||||
|
@ -2858,7 +2853,7 @@ get_out:
|
|||
}
|
||||
|
||||
std::vector<piece_picker::downloading_piece>::iterator piece_picker::find_dl_piece(
|
||||
int queue, int index)
|
||||
int const queue, int const index)
|
||||
{
|
||||
TORRENT_ASSERT(queue >= 0 && queue < piece_pos::num_download_categories);
|
||||
downloading_piece cmp;
|
||||
|
@ -2967,7 +2962,7 @@ get_out:
|
|||
{
|
||||
TORRENT_ASSERT(block.block_index != piece_block::invalid.block_index);
|
||||
TORRENT_ASSERT(block.piece_index != piece_block::invalid.piece_index);
|
||||
TORRENT_ASSERT(block.piece_index < m_piece_map.size());
|
||||
TORRENT_ASSERT(block.piece_index < int(m_piece_map.size()));
|
||||
|
||||
int state = m_piece_map[block.piece_index].download_queue();
|
||||
if (state == piece_pos::piece_open) return false;
|
||||
|
@ -2985,7 +2980,7 @@ get_out:
|
|||
{
|
||||
TORRENT_ASSERT(block.block_index != piece_block::invalid.block_index);
|
||||
TORRENT_ASSERT(block.piece_index != piece_block::invalid.piece_index);
|
||||
TORRENT_ASSERT(block.piece_index < m_piece_map.size());
|
||||
TORRENT_ASSERT(block.piece_index < int(m_piece_map.size()));
|
||||
|
||||
if (m_piece_map[block.piece_index].index == piece_pos::we_have_index) return true;
|
||||
int state = m_piece_map[block.piece_index].download_queue();
|
||||
|
@ -3004,7 +2999,7 @@ get_out:
|
|||
{
|
||||
TORRENT_ASSERT(block.block_index != piece_block::invalid.block_index);
|
||||
TORRENT_ASSERT(block.piece_index != piece_block::invalid.piece_index);
|
||||
TORRENT_ASSERT(block.piece_index < m_piece_map.size());
|
||||
TORRENT_ASSERT(block.piece_index < int(m_piece_map.size()));
|
||||
|
||||
piece_pos const& p = m_piece_map[block.piece_index];
|
||||
if (p.index == piece_pos::we_have_index) return true;
|
||||
|
@ -3028,11 +3023,11 @@ get_out:
|
|||
<< block.piece_index << ", " << block.block_index << "} )" << std::endl;
|
||||
#endif
|
||||
|
||||
TORRENT_ASSERT(peer == nullptr || static_cast<torrent_peer*>(peer)->in_use);
|
||||
TORRENT_ASSERT(peer == nullptr || peer->in_use);
|
||||
TORRENT_ASSERT(block.block_index != piece_block::invalid.block_index);
|
||||
TORRENT_ASSERT(block.piece_index != piece_block::invalid.piece_index);
|
||||
TORRENT_ASSERT(block.piece_index < m_piece_map.size());
|
||||
TORRENT_ASSERT(int(block.block_index) < blocks_in_piece(block.piece_index));
|
||||
TORRENT_ASSERT(block.piece_index < int(m_piece_map.size()));
|
||||
TORRENT_ASSERT(block.block_index < blocks_in_piece(block.piece_index));
|
||||
TORRENT_ASSERT(!m_piece_map[block.piece_index].have());
|
||||
|
||||
piece_pos& p = m_piece_map[block.piece_index];
|
||||
|
@ -3051,7 +3046,7 @@ get_out:
|
|||
|
||||
if (prio >= 0 && !m_dirty) update(prio, p.index);
|
||||
|
||||
dlpiece_iter dp = add_download_piece(block.piece_index);
|
||||
auto const dp = add_download_piece(block.piece_index);
|
||||
block_info* binfo = blocks_for_piece(*dp);
|
||||
block_info& info = binfo[block.block_index];
|
||||
TORRENT_ASSERT(info.piece_index == block.piece_index);
|
||||
|
@ -3129,7 +3124,7 @@ get_out:
|
|||
{
|
||||
TORRENT_ASSERT(block.block_index != piece_block::invalid.block_index);
|
||||
TORRENT_ASSERT(block.piece_index != piece_block::invalid.piece_index);
|
||||
TORRENT_ASSERT(block.piece_index < m_piece_map.size());
|
||||
TORRENT_ASSERT(block.piece_index < int(m_piece_map.size()));
|
||||
TORRENT_ASSERT(block.block_index < blocks_in_piece(block.piece_index));
|
||||
|
||||
piece_pos const& p = m_piece_map[block.piece_index];
|
||||
|
@ -3179,8 +3174,8 @@ get_out:
|
|||
|
||||
TORRENT_ASSERT(block.block_index != piece_block::invalid.block_index);
|
||||
TORRENT_ASSERT(block.piece_index != piece_block::invalid.piece_index);
|
||||
TORRENT_ASSERT(block.piece_index < m_piece_map.size());
|
||||
TORRENT_ASSERT(int(block.block_index) < blocks_in_piece(block.piece_index));
|
||||
TORRENT_ASSERT(block.piece_index < int(m_piece_map.size()));
|
||||
TORRENT_ASSERT(block.block_index < blocks_in_piece(block.piece_index));
|
||||
// this is not valid for web peers
|
||||
// TORRENT_ASSERT(peer != 0);
|
||||
|
||||
|
@ -3198,7 +3193,7 @@ get_out:
|
|||
// the piece priority was set to 0
|
||||
if (prio >= 0 && !m_dirty) update(prio, p.index);
|
||||
|
||||
dlpiece_iter dp = add_download_piece(block.piece_index);
|
||||
auto const dp = add_download_piece(block.piece_index);
|
||||
block_info* binfo = blocks_for_piece(*dp);
|
||||
block_info& info = binfo[block.block_index];
|
||||
TORRENT_ASSERT(&info >= &m_block_info[0]);
|
||||
|
@ -3367,8 +3362,8 @@ get_out:
|
|||
|
||||
TORRENT_ASSERT(block.piece_index >= 0);
|
||||
TORRENT_ASSERT(block.block_index >= 0);
|
||||
TORRENT_ASSERT(block.piece_index < m_piece_map.size());
|
||||
TORRENT_ASSERT(int(block.block_index) < blocks_in_piece(block.piece_index));
|
||||
TORRENT_ASSERT(block.piece_index < int(m_piece_map.size()));
|
||||
TORRENT_ASSERT(block.block_index < blocks_in_piece(block.piece_index));
|
||||
|
||||
piece_pos& p = m_piece_map[block.piece_index];
|
||||
|
||||
|
@ -3430,8 +3425,8 @@ get_out:
|
|||
TORRENT_ASSERT(peer == nullptr || static_cast<torrent_peer*>(peer)->in_use);
|
||||
TORRENT_ASSERT(block.piece_index >= 0);
|
||||
TORRENT_ASSERT(block.block_index >= 0);
|
||||
TORRENT_ASSERT(block.piece_index < m_piece_map.size());
|
||||
TORRENT_ASSERT(int(block.block_index) < blocks_in_piece(block.piece_index));
|
||||
TORRENT_ASSERT(block.piece_index < int(m_piece_map.size()));
|
||||
TORRENT_ASSERT(block.block_index < blocks_in_piece(block.piece_index));
|
||||
|
||||
piece_pos& p = m_piece_map[block.piece_index];
|
||||
|
||||
|
@ -3450,7 +3445,7 @@ get_out:
|
|||
p.download_state = piece_pos::piece_downloading;
|
||||
if (prio >= 0 && !m_dirty) update(prio, p.index);
|
||||
|
||||
dlpiece_iter dp = add_download_piece(block.piece_index);
|
||||
auto const dp = add_download_piece(block.piece_index);
|
||||
block_info* binfo = blocks_for_piece(*dp);
|
||||
block_info& info = binfo[block.block_index];
|
||||
TORRENT_ASSERT(&info >= &m_block_info[0]);
|
||||
|
@ -3576,8 +3571,8 @@ get_out:
|
|||
|
||||
TORRENT_ASSERT(block.block_index != piece_block::invalid.block_index);
|
||||
TORRENT_ASSERT(block.piece_index != piece_block::invalid.piece_index);
|
||||
TORRENT_ASSERT(block.piece_index < m_piece_map.size());
|
||||
TORRENT_ASSERT(int(block.block_index) < blocks_in_piece(block.piece_index));
|
||||
TORRENT_ASSERT(block.piece_index < int(m_piece_map.size()));
|
||||
TORRENT_ASSERT(block.block_index < blocks_in_piece(block.piece_index));
|
||||
|
||||
int state = m_piece_map[block.piece_index].download_queue();
|
||||
if (state == piece_pos::piece_open) return;
|
||||
|
@ -3607,7 +3602,7 @@ get_out:
|
|||
if (info.peer == peer) info.peer = nullptr;
|
||||
TORRENT_ASSERT(info.peers.size() == info.num_peers);
|
||||
|
||||
TORRENT_ASSERT(int(block.block_index) < blocks_in_piece(block.piece_index));
|
||||
TORRENT_ASSERT(block.block_index < blocks_in_piece(block.piece_index));
|
||||
|
||||
// if there are other peers, leave the block requested
|
||||
if (info.num_peers > 0) return;
|
||||
|
|
|
@ -40,7 +40,7 @@ int receive_buffer::max_receive() const
|
|||
return int(m_recv_buffer.size() - m_recv_end);
|
||||
}
|
||||
|
||||
span<char> receive_buffer::reserve(int size)
|
||||
span<char> receive_buffer::reserve(int const size)
|
||||
{
|
||||
INVARIANT_CHECK;
|
||||
TORRENT_ASSERT(size > 0);
|
||||
|
@ -49,7 +49,7 @@ span<char> receive_buffer::reserve(int size)
|
|||
// normalize() must be called before receiving more data
|
||||
TORRENT_ASSERT(m_recv_start == 0);
|
||||
|
||||
if (m_recv_buffer.size() < m_recv_end + size)
|
||||
if (int(m_recv_buffer.size()) < m_recv_end + size)
|
||||
{
|
||||
int const new_size = std::max(m_recv_end + size, m_packet_size);
|
||||
buffer new_buffer(new_size
|
||||
|
@ -144,7 +144,7 @@ span<char const> receive_buffer::get() const
|
|||
return span<char const>();
|
||||
}
|
||||
|
||||
TORRENT_ASSERT(m_recv_start + m_recv_pos <= m_recv_buffer.size());
|
||||
TORRENT_ASSERT(m_recv_start + m_recv_pos <= int(m_recv_buffer.size()));
|
||||
return span<char const>(m_recv_buffer).subspan(m_recv_start, m_recv_pos);
|
||||
}
|
||||
|
||||
|
@ -169,7 +169,7 @@ span<char> receive_buffer::mutable_buffer(int const bytes)
|
|||
// in the receive buffer that have been parsed and processed.
|
||||
// it may also shrink the size of the buffer allocation if we haven't been using
|
||||
// enough of it lately.
|
||||
void receive_buffer::normalize(int force_shrink)
|
||||
void receive_buffer::normalize(int const force_shrink)
|
||||
{
|
||||
INVARIANT_CHECK;
|
||||
TORRENT_ASSERT(m_recv_end >= m_recv_start);
|
||||
|
@ -178,7 +178,7 @@ void receive_buffer::normalize(int force_shrink)
|
|||
|
||||
// if the running average drops below half of the current buffer size,
|
||||
// reallocate a smaller one.
|
||||
bool const shrink_buffer = m_recv_buffer.size() / 2 > m_watermark.mean()
|
||||
bool const shrink_buffer = int(m_recv_buffer.size()) / 2 > m_watermark.mean()
|
||||
&& m_watermark.mean() > (m_recv_end - m_recv_start);
|
||||
|
||||
span<char const> bytes_to_shift(
|
||||
|
@ -212,10 +212,10 @@ void receive_buffer::normalize(int force_shrink)
|
|||
#endif
|
||||
}
|
||||
|
||||
void receive_buffer::reset(int packet_size)
|
||||
void receive_buffer::reset(int const packet_size)
|
||||
{
|
||||
INVARIANT_CHECK;
|
||||
TORRENT_ASSERT(m_recv_buffer.size() >= m_recv_end);
|
||||
TORRENT_ASSERT(int(m_recv_buffer.size()) >= m_recv_end);
|
||||
TORRENT_ASSERT(packet_size > 0);
|
||||
if (m_recv_end > m_packet_size)
|
||||
{
|
||||
|
|
|
@ -103,7 +103,7 @@ void resolve_links::match(std::shared_ptr<const torrent_info> const& ti
|
|||
int our_piece = m_torrent_file->files().map_file(
|
||||
iter->second, 0, 0).piece;
|
||||
|
||||
int num_pieces = (file_size + piece_size - 1) / piece_size;
|
||||
int num_pieces = int((file_size + piece_size - 1) / piece_size);
|
||||
|
||||
bool match = true;
|
||||
for (int p = 0; p < num_pieces; ++p, ++their_piece, ++our_piece)
|
||||
|
|
|
@ -571,7 +571,7 @@ namespace libtorrent
|
|||
|
||||
void session_handle::dht_put_item(std::array<char, 32> key
|
||||
, std::function<void(entry&, std::array<char,64>&
|
||||
, std::uint64_t&, std::string const&)> cb
|
||||
, std::int64_t&, std::string const&)> cb
|
||||
, std::string salt)
|
||||
{
|
||||
#ifndef TORRENT_DISABLE_DHT
|
||||
|
|
|
@ -598,15 +598,17 @@ namespace aux {
|
|||
|
||||
// this specific output is parsed by tools/parse_session_stats.py
|
||||
// if this is changed, that parser should also be changed
|
||||
std::string stats_header = "session stats header: ";
|
||||
std::vector<stats_metric> stats = session_stats_metrics();
|
||||
std::sort(stats.begin(), stats.end()
|
||||
, [] (stats_metric const& lhs, stats_metric const& rhs)
|
||||
{ return lhs.value_index < rhs.value_index; });
|
||||
for (int i = 0; i < stats.size(); ++i)
|
||||
std::string stats_header = "session stats header: ";
|
||||
bool first = true;
|
||||
for (auto const& s : stats)
|
||||
{
|
||||
if (i > 0) stats_header += ", ";
|
||||
stats_header += stats[i].name;
|
||||
if (!first) stats_header += ", ";
|
||||
stats_header += s.name;
|
||||
first = false;
|
||||
}
|
||||
m_alerts.emplace_alert<log_alert>(stats_header.c_str());
|
||||
}
|
||||
|
@ -1756,11 +1758,11 @@ namespace aux {
|
|||
// of a new socket failing to bind due to a conflict with a stale socket
|
||||
std::vector<listen_endpoint_t> eps;
|
||||
|
||||
for (int i = 0; i < m_listen_interfaces.size(); ++i)
|
||||
for (auto const& iface : m_listen_interfaces)
|
||||
{
|
||||
std::string const& device = m_listen_interfaces[i].device;
|
||||
int const port = m_listen_interfaces[i].port;
|
||||
bool const ssl = m_listen_interfaces[i].ssl;
|
||||
std::string const& device = iface.device;
|
||||
int const port = iface.port;
|
||||
bool const ssl = iface.ssl;
|
||||
|
||||
#ifndef TORRENT_USE_OPENSSL
|
||||
if (ssl)
|
||||
|
@ -1817,13 +1819,13 @@ namespace aux {
|
|||
continue;
|
||||
}
|
||||
|
||||
for (int k = 0; k < int(ifs.size()); ++k)
|
||||
for (auto const& ipface : ifs)
|
||||
{
|
||||
// we're looking for a specific interface, and its address
|
||||
// (which must be of the same family as the address we're
|
||||
// connecting to)
|
||||
if (device != ifs[k].name) continue;
|
||||
eps.emplace_back(ifs[k].interface_address, port, device, ssl);
|
||||
if (device != ipface.name) continue;
|
||||
eps.emplace_back(ipface.interface_address, port, device, ssl);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1834,9 +1836,12 @@ namespace aux {
|
|||
{
|
||||
// TODO notify interested parties of this socket's demise
|
||||
#ifndef TORRENT_DISABLE_LOGGING
|
||||
session_log("Closing listen socket for %s on device \"%s\""
|
||||
, print_endpoint(remove_iter->local_endpoint).c_str()
|
||||
, remove_iter->device.c_str());
|
||||
if (should_log())
|
||||
{
|
||||
session_log("Closing listen socket for %s on device \"%s\""
|
||||
, print_endpoint(remove_iter->local_endpoint).c_str()
|
||||
, remove_iter->device.c_str());
|
||||
}
|
||||
#endif
|
||||
if (remove_iter->sock) remove_iter->sock->close(ec);
|
||||
if (remove_iter->udp_sock) remove_iter->udp_sock->close();
|
||||
|
@ -5195,10 +5200,8 @@ namespace aux {
|
|||
std::vector<std::pair<std::string, int>> nodes;
|
||||
parse_comma_separated_string_port(node_list, nodes);
|
||||
|
||||
for (int i = 0; i < nodes.size(); ++i)
|
||||
{
|
||||
add_dht_router(nodes[i]);
|
||||
}
|
||||
for (auto const& n : nodes)
|
||||
add_dht_router(n);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -5691,7 +5694,7 @@ namespace aux {
|
|||
|
||||
void put_mutable_callback(dht::item& i
|
||||
, std::function<void(entry&, std::array<char, 64>&
|
||||
, std::uint64_t&, std::string const&)> cb)
|
||||
, std::int64_t&, std::string const&)> cb)
|
||||
{
|
||||
entry value = i.value();
|
||||
dht::signature sig = i.sig();
|
||||
|
@ -5727,7 +5730,7 @@ namespace aux {
|
|||
|
||||
void session_impl::dht_put_mutable_item(std::array<char, 32> key
|
||||
, std::function<void(entry&, std::array<char,64>&
|
||||
, std::uint64_t&, std::string const&)> cb
|
||||
, std::int64_t&, std::string const&)> cb
|
||||
, std::string salt)
|
||||
{
|
||||
if (!m_dht) return;
|
||||
|
@ -5960,7 +5963,7 @@ namespace aux {
|
|||
|
||||
void session_impl::update_queued_disk_bytes()
|
||||
{
|
||||
std::uint64_t cache_size = m_settings.get_int(settings_pack::cache_size);
|
||||
int const cache_size = m_settings.get_int(settings_pack::cache_size);
|
||||
if (m_settings.get_int(settings_pack::max_queued_disk_bytes) / 16 / 1024
|
||||
> cache_size / 2
|
||||
&& cache_size > 5
|
||||
|
|
|
@ -533,7 +533,7 @@ namespace libtorrent
|
|||
std::vector<stats_metric> session_stats_metrics()
|
||||
{
|
||||
std::vector<stats_metric> stats;
|
||||
const int num = sizeof(metrics)/sizeof(metrics[0]);
|
||||
int const num = sizeof(metrics) / sizeof(metrics[0]);
|
||||
stats.resize(num);
|
||||
for (int i = 0; i < num; ++i)
|
||||
{
|
||||
|
|
|
@ -37,6 +37,7 @@ POSSIBILITY OF SUCH DAMAGE.
|
|||
#include "libtorrent/aux_/session_impl.hpp"
|
||||
|
||||
#include <algorithm>
|
||||
#include <array>
|
||||
|
||||
namespace {
|
||||
|
||||
|
@ -98,8 +99,8 @@ namespace libtorrent
|
|||
|
||||
using aux::session_impl;
|
||||
|
||||
str_setting_entry_t str_settings[settings_pack::num_string_settings] =
|
||||
{
|
||||
std::array<str_setting_entry_t, settings_pack::num_string_settings> str_settings =
|
||||
{{
|
||||
SET(user_agent, "libtorrent/" LIBTORRENT_VERSION, &session_impl::update_user_agent),
|
||||
SET(announce_ip, nullptr, nullptr),
|
||||
SET(mmap_cache, nullptr, nullptr),
|
||||
|
@ -116,10 +117,10 @@ namespace libtorrent
|
|||
SET(i2p_hostname, "", &session_impl::update_i2p_bridge),
|
||||
SET(peer_fingerprint, "-LT1200-", &session_impl::update_peer_fingerprint),
|
||||
SET(dht_bootstrap_nodes, "dht.libtorrent.org:25401", &session_impl::update_dht_bootstrap_nodes)
|
||||
};
|
||||
}};
|
||||
|
||||
bool_setting_entry_t bool_settings[settings_pack::num_bool_settings] =
|
||||
{
|
||||
std::array<bool_setting_entry_t, settings_pack::num_bool_settings> bool_settings =
|
||||
{{
|
||||
SET(allow_multiple_connections_per_ip, false, nullptr),
|
||||
DEPRECATED_SET(ignore_limits_on_local_network, true, &session_impl::update_ignore_rate_limits_on_local_network),
|
||||
SET(send_redundant_have, true, nullptr),
|
||||
|
@ -188,10 +189,10 @@ namespace libtorrent
|
|||
SET(proxy_peer_connections, true, nullptr),
|
||||
SET(auto_sequential, true, &session_impl::update_auto_sequential),
|
||||
SET(proxy_tracker_connections, true, nullptr),
|
||||
};
|
||||
}};
|
||||
|
||||
int_setting_entry_t int_settings[settings_pack::num_int_settings] =
|
||||
{
|
||||
std::array<int_setting_entry_t, settings_pack::num_int_settings> int_settings =
|
||||
{{
|
||||
SET(tracker_completion_timeout, 30, nullptr),
|
||||
SET(tracker_receive_timeout, 10, nullptr),
|
||||
SET(stop_tracker_timeout, 5, nullptr),
|
||||
|
@ -319,7 +320,7 @@ namespace libtorrent
|
|||
SET(proxy_port, 0, &session_impl::update_proxy),
|
||||
SET(i2p_port, 0, &session_impl::update_i2p_bridge),
|
||||
SET(cache_size_volatile, 256, nullptr)
|
||||
};
|
||||
}};
|
||||
|
||||
#undef SET
|
||||
#undef SET_DEPRECATED
|
||||
|
@ -328,20 +329,20 @@ namespace libtorrent
|
|||
|
||||
int setting_by_name(std::string const& key)
|
||||
{
|
||||
for (int k = 0; k < sizeof(str_settings)/sizeof(str_settings[0]); ++k)
|
||||
for (std::size_t k = 0; k < str_settings.size(); ++k)
|
||||
{
|
||||
if (key != str_settings[k].name) continue;
|
||||
return settings_pack::string_type_base + k;
|
||||
return settings_pack::string_type_base + int(k);
|
||||
}
|
||||
for (int k = 0; k < sizeof(int_settings)/sizeof(int_settings[0]); ++k)
|
||||
for (std::size_t k = 0; k < int_settings.size(); ++k)
|
||||
{
|
||||
if (key != int_settings[k].name) continue;
|
||||
return settings_pack::int_type_base + k;
|
||||
return settings_pack::int_type_base + int(k);
|
||||
}
|
||||
for (int k = 0; k < sizeof(bool_settings)/sizeof(bool_settings[0]); ++k)
|
||||
for (std::size_t k = 0; k < bool_settings.size(); ++k)
|
||||
{
|
||||
if (key != bool_settings[k].name) continue;
|
||||
return settings_pack::bool_type_base + k;
|
||||
return settings_pack::bool_type_base + int(k);
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
@ -377,27 +378,27 @@ namespace libtorrent
|
|||
case bdecode_node::int_t:
|
||||
{
|
||||
bool found = false;
|
||||
for (int k = 0; k < sizeof(int_settings) / sizeof(int_settings[0]); ++k)
|
||||
for (std::size_t k = 0; k < int_settings.size(); ++k)
|
||||
{
|
||||
if (key != int_settings[k].name) continue;
|
||||
pack.set_int(settings_pack::int_type_base + k, val.int_value());
|
||||
pack.set_int(settings_pack::int_type_base + int(k), int(val.int_value()));
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
if (found) continue;
|
||||
for (int k = 0; k < sizeof(bool_settings) / sizeof(bool_settings[0]); ++k)
|
||||
for (std::size_t k = 0; k < bool_settings.size(); ++k)
|
||||
{
|
||||
if (key != bool_settings[k].name) continue;
|
||||
pack.set_bool(settings_pack::bool_type_base + k, val.int_value() != 0);
|
||||
pack.set_bool(settings_pack::bool_type_base + int(k), val.int_value() != 0);
|
||||
break;
|
||||
}
|
||||
}
|
||||
break;
|
||||
case bdecode_node::string_t:
|
||||
for (int k = 0; k < sizeof(str_settings) / sizeof(str_settings[0]); ++k)
|
||||
for (std::size_t k = 0; k < str_settings.size(); ++k)
|
||||
{
|
||||
if (key != str_settings[k].name) continue;
|
||||
pack.set_str(settings_pack::string_type_base + k, val.string_value().to_string());
|
||||
pack.set_str(settings_pack::string_type_base + int(k), val.string_value().to_string());
|
||||
break;
|
||||
}
|
||||
break;
|
||||
|
|
|
@ -1126,7 +1126,7 @@ namespace libtorrent
|
|||
TORRENT_ASSERT(files.is_loaded());
|
||||
|
||||
// find the file iterator and file offset
|
||||
std::uint64_t torrent_offset = piece * std::uint64_t(files.piece_length()) + offset;
|
||||
std::int64_t const torrent_offset = piece * std::int64_t(files.piece_length()) + offset;
|
||||
int file_index = files.file_index_at_offset(torrent_offset);
|
||||
TORRENT_ASSERT(torrent_offset >= files.file_offset(file_index));
|
||||
TORRENT_ASSERT(torrent_offset < files.file_offset(file_index) + files.file_size(file_index));
|
||||
|
@ -1143,7 +1143,7 @@ namespace libtorrent
|
|||
// advance_bufs())
|
||||
TORRENT_ALLOCA(current_buf, file::iovec_t, bufs.size());
|
||||
copy_bufs(bufs, size, current_buf);
|
||||
TORRENT_ASSERT(count_bufs(current_buf, size) == bufs.size());
|
||||
TORRENT_ASSERT(count_bufs(current_buf, size) == int(bufs.size()));
|
||||
|
||||
TORRENT_ALLOCA(tmp_buf, file::iovec_t, bufs.size());
|
||||
|
||||
|
@ -1188,7 +1188,7 @@ namespace libtorrent
|
|||
bytes_left -= bytes_transferred;
|
||||
file_offset += bytes_transferred;
|
||||
|
||||
TORRENT_ASSERT(count_bufs(current_buf, bytes_left) <= bufs.size());
|
||||
TORRENT_ASSERT(count_bufs(current_buf, bytes_left) <= int(bufs.size()));
|
||||
|
||||
// if the file operation returned 0, we've hit end-of-file. We're done
|
||||
if (bytes_transferred == 0)
|
||||
|
|
|
@ -206,7 +206,7 @@ namespace libtorrent
|
|||
// the number of dots we've added
|
||||
char num_dots = 0;
|
||||
bool found_extension = false;
|
||||
for (int i = 0; i < element.size(); ++i)
|
||||
for (std::size_t i = 0; i < element.size(); ++i)
|
||||
{
|
||||
if (element[i] == '/'
|
||||
|| element[i] == '\\'
|
||||
|
@ -251,7 +251,7 @@ namespace libtorrent
|
|||
else
|
||||
{
|
||||
path += element[i];
|
||||
path += element[i+1];
|
||||
path += element[i + 1];
|
||||
last_len = 2;
|
||||
}
|
||||
i += 1;
|
||||
|
@ -276,8 +276,8 @@ namespace libtorrent
|
|||
else
|
||||
{
|
||||
path += element[i];
|
||||
path += element[i+1];
|
||||
path += element[i+2];
|
||||
path += element[i + 1];
|
||||
path += element[i + 2];
|
||||
last_len = 3;
|
||||
}
|
||||
i += 2;
|
||||
|
@ -286,16 +286,16 @@ namespace libtorrent
|
|||
{
|
||||
// 4 bytes
|
||||
if (element.size() - i < 4
|
||||
|| (element[i+1] & 0xc0) != 0x80
|
||||
|| (element[i+2] & 0xc0) != 0x80
|
||||
|| (element[i+3] & 0xc0) != 0x80
|
||||
|| (element[i + 1] & 0xc0) != 0x80
|
||||
|| (element[i + 2] & 0xc0) != 0x80
|
||||
|| (element[i + 3] & 0xc0) != 0x80
|
||||
)
|
||||
{
|
||||
path += '_';
|
||||
last_len = 1;
|
||||
}
|
||||
else if ((element[i] & 0x07) == 0
|
||||
&& (element[i+1] & 0x3f) == 0)
|
||||
&& (element[i + 1] & 0x3f) == 0)
|
||||
{
|
||||
// overlong sequences are invalid
|
||||
path += '_';
|
||||
|
@ -304,9 +304,9 @@ namespace libtorrent
|
|||
else
|
||||
{
|
||||
path += element[i];
|
||||
path += element[i+1];
|
||||
path += element[i+2];
|
||||
path += element[i+3];
|
||||
path += element[i + 1];
|
||||
path += element[i + 2];
|
||||
path += element[i + 3];
|
||||
last_len = 4;
|
||||
}
|
||||
i += 3;
|
||||
|
@ -331,8 +331,8 @@ namespace libtorrent
|
|||
#endif
|
||||
{
|
||||
int dot = -1;
|
||||
for (int j = int(element.size())-1;
|
||||
j > (std::max)(int(element.size() - 10), i); --j)
|
||||
for (int j = int(element.size()) - 1;
|
||||
j > std::max(int(element.size()) - 10, int(i)); --j)
|
||||
{
|
||||
if (element[j] != '.') continue;
|
||||
dot = j;
|
||||
|
@ -341,14 +341,15 @@ namespace libtorrent
|
|||
// there is no extension
|
||||
if (dot == -1) break;
|
||||
found_extension = true;
|
||||
i = dot - 1;
|
||||
TORRENT_ASSERT(dot > 0);
|
||||
i = std::size_t(dot - 1);
|
||||
}
|
||||
}
|
||||
|
||||
if (added == num_dots && added <= 2)
|
||||
{
|
||||
// revert everything
|
||||
path.erase(path.end()-added-added_separator, path.end());
|
||||
path.erase(path.end() - added - added_separator, path.end());
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -365,7 +366,7 @@ namespace libtorrent
|
|||
if (added == 0 && added_separator)
|
||||
{
|
||||
// remove the separator added at the beginning
|
||||
path.erase(path.end()-1);
|
||||
path.erase(path.end() - 1);
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
@ -528,7 +529,7 @@ namespace libtorrent
|
|||
file_flags &= ~file_storage::flag_symlink;
|
||||
}
|
||||
|
||||
if (filename_len > path.length()
|
||||
if (filename_len > int(path.length())
|
||||
|| path.compare(path.size() - filename_len, filename_len, filename
|
||||
, filename_len) != 0)
|
||||
{
|
||||
|
@ -666,11 +667,11 @@ namespace libtorrent
|
|||
const_cast<file_storage&>(*m_orig_files).apply_pointer_offset(offset);
|
||||
|
||||
#ifndef TORRENT_DISABLE_MUTABLE_TORRENTS
|
||||
for (int i = 0; i < m_collections.size(); ++i)
|
||||
m_collections[i].first += offset;
|
||||
for (auto& c : m_collections)
|
||||
c.first += offset;
|
||||
|
||||
for (int i = 0; i < m_similar_torrents.size(); ++i)
|
||||
m_similar_torrents[i] += offset;
|
||||
for (auto& st : m_similar_torrents)
|
||||
st += offset;
|
||||
#endif
|
||||
|
||||
if (m_info_dict)
|
||||
|
@ -1615,11 +1616,11 @@ namespace libtorrent
|
|||
#ifndef TORRENT_DISABLE_MUTABLE_TORRENTS
|
||||
ret.reserve(m_similar_torrents.size() + m_owned_similar_torrents.size());
|
||||
|
||||
for (int i = 0; i < m_similar_torrents.size(); ++i)
|
||||
ret.push_back(sha1_hash(m_similar_torrents[i]));
|
||||
for (auto const& st : m_similar_torrents)
|
||||
ret.push_back(sha1_hash(st));
|
||||
|
||||
for (int i = 0; i < m_owned_similar_torrents.size(); ++i)
|
||||
ret.push_back(m_owned_similar_torrents[i]);
|
||||
for (auto const& st : m_owned_similar_torrents)
|
||||
ret.push_back(st);
|
||||
#endif
|
||||
|
||||
return ret;
|
||||
|
@ -1631,11 +1632,11 @@ namespace libtorrent
|
|||
#ifndef TORRENT_DISABLE_MUTABLE_TORRENTS
|
||||
ret.reserve(m_collections.size() + m_owned_collections.size());
|
||||
|
||||
for (int i = 0; i < m_collections.size(); ++i)
|
||||
ret.push_back(std::string(m_collections[i].first, m_collections[i].second));
|
||||
for (auto const& c : m_collections)
|
||||
ret.push_back(std::string(c.first, c.second));
|
||||
|
||||
for (int i = 0; i < m_owned_collections.size(); ++i)
|
||||
ret.push_back(m_owned_collections[i]);
|
||||
for (auto const& c : m_owned_collections)
|
||||
ret.push_back(c);
|
||||
#endif // TORRENT_DISABLE_MUTABLE_TORRENTS
|
||||
|
||||
return ret;
|
||||
|
|
|
@ -387,7 +387,7 @@ void web_peer_connection::write_request(peer_request const& r)
|
|||
file_request_t file_req;
|
||||
file_req.file_index = f.file_index;
|
||||
file_req.start = f.offset;
|
||||
file_req.length = f.size;
|
||||
file_req.length = int(f.size);
|
||||
|
||||
if (info.orig_files().pad_file_at(f.file_index))
|
||||
{
|
||||
|
@ -542,7 +542,7 @@ bool web_peer_connection::received_invalid_data(int index, bool single_peer)
|
|||
// assume the web seed has a different copy of this specific file
|
||||
// than what we expect, and pretend not to have it.
|
||||
int fi = files[0].file_index;
|
||||
int first_piece = fs.file_offset(fi) / fs.piece_length();
|
||||
int first_piece = int(fs.file_offset(fi) / fs.piece_length());
|
||||
// one past last piece
|
||||
int end_piece = int((fs.file_offset(fi) + fs.file_size(fi) + 1) / fs.piece_length());
|
||||
for (int i = first_piece; i < end_piece; ++i)
|
||||
|
@ -744,7 +744,7 @@ void web_peer_connection::on_receive(error_code const& error
|
|||
}
|
||||
|
||||
TORRENT_ASSERT(recv_buffer.empty() || recv_buffer[0] == 'H');
|
||||
TORRENT_ASSERT(recv_buffer.size() <= m_recv_buffer.packet_size());
|
||||
TORRENT_ASSERT(int(recv_buffer.size()) <= m_recv_buffer.packet_size());
|
||||
|
||||
// this means the entire status line hasn't been received yet
|
||||
if (m_parser.status_code() == -1)
|
||||
|
@ -915,7 +915,7 @@ void web_peer_connection::on_receive(error_code const& error
|
|||
received_bytes(0, header_size - m_partial_chunk_header);
|
||||
m_partial_chunk_header = 0;
|
||||
TORRENT_ASSERT(chunk_size != 0
|
||||
|| chunk_start.size() <= header_size || chunk_start[header_size] == 'H');
|
||||
|| int(chunk_start.size()) <= header_size || chunk_start[header_size] == 'H');
|
||||
TORRENT_ASSERT(m_body_start + m_chunk_pos < INT_MAX);
|
||||
m_chunk_pos += chunk_size;
|
||||
recv_buffer = recv_buffer.subspan(header_size);
|
||||
|
@ -1141,9 +1141,9 @@ void web_peer_connection::handle_padfile()
|
|||
while (file_size > 0)
|
||||
{
|
||||
peer_request const front_request = m_requests.front();
|
||||
TORRENT_ASSERT(m_piece.size() < front_request.length);
|
||||
TORRENT_ASSERT(int(m_piece.size()) < front_request.length);
|
||||
|
||||
int pad_size = int((std::min)(file_size
|
||||
int pad_size = int(std::min(file_size
|
||||
, std::int64_t(front_request.length - m_piece.size())));
|
||||
TORRENT_ASSERT(pad_size > 0);
|
||||
file_size -= pad_size;
|
||||
|
|
|
@ -115,7 +115,7 @@ alert* wait_for_alert(lt::session& s, int alert_type)
|
|||
}
|
||||
|
||||
void put_string(entry& e, std::array<char, 64>& sig
|
||||
, std::uint64_t& seq
|
||||
, std::int64_t& seq
|
||||
, std::string const& salt
|
||||
, std::array<char, 32> const& pk
|
||||
, std::array<char, 64> const& sk
|
||||
|
@ -411,4 +411,3 @@ int main(int argc, char* argv[])
|
|||
}
|
||||
|
||||
#endif
|
||||
|
||||
|
|
Loading…
Reference in New Issue