code refactor related to the use of index_range and end index
This commit is contained in:
parent
0659a929af
commit
6d2f804d9f
|
@ -37,6 +37,7 @@ POSSIBILITY OF SUCH DAMAGE.
|
|||
|
||||
#include "libtorrent/units.hpp"
|
||||
#include "libtorrent/assert.hpp"
|
||||
#include "libtorrent/index_range.hpp"
|
||||
|
||||
namespace libtorrent { namespace aux {
|
||||
|
||||
|
@ -80,6 +81,13 @@ namespace libtorrent { namespace aux {
|
|||
{
|
||||
return IndexType(static_cast<underlying_index>(Size));
|
||||
}
|
||||
|
||||
// returns an object that can be used in a range-for to iterate over all
|
||||
// indices
|
||||
constexpr index_range<IndexType> range() const noexcept
|
||||
{
|
||||
return {IndexType{0}, end_index()};
|
||||
}
|
||||
};
|
||||
|
||||
}}
|
||||
|
|
|
@ -38,6 +38,7 @@ POSSIBILITY OF SUCH DAMAGE.
|
|||
|
||||
#include "libtorrent/units.hpp"
|
||||
#include "libtorrent/assert.hpp"
|
||||
#include "libtorrent/index_range.hpp"
|
||||
|
||||
namespace libtorrent { namespace aux {
|
||||
|
||||
|
@ -89,6 +90,13 @@ namespace libtorrent { namespace aux {
|
|||
return IndexType(static_cast<underlying_index>(this->size()));
|
||||
}
|
||||
|
||||
// returns an object that can be used in a range-for to iterate over all
|
||||
// indices
|
||||
index_range<IndexType> range() const noexcept
|
||||
{
|
||||
return {IndexType{0}, end_index()};
|
||||
}
|
||||
|
||||
template <typename U = underlying_index, typename Cond
|
||||
= typename std::enable_if<std::is_signed<U>::value>::type>
|
||||
void resize(underlying_index s)
|
||||
|
|
|
@ -287,7 +287,7 @@ namespace libtorrent {
|
|||
// indices in the bitfield
|
||||
index_range<IndexType> range() const noexcept
|
||||
{
|
||||
return index_range<IndexType>{IndexType{0}, end_index()};
|
||||
return {IndexType{0}, end_index()};
|
||||
}
|
||||
|
||||
bool operator[](IndexType const index) const
|
||||
|
|
|
@ -339,11 +339,11 @@ namespace libtorrent {
|
|||
// ``piece_range()`` returns an implementation-defined type that can be
|
||||
// used as the container in a range-for loop. Where the values are the
|
||||
// indices of all pieces in the file_storage.
|
||||
piece_index_t last_piece() const { return piece_index_t(m_files.num_pieces() - 1); }
|
||||
piece_index_t last_piece() const { return m_files.last_piece(); }
|
||||
piece_index_t end_piece() const
|
||||
{
|
||||
TORRENT_ASSERT(m_files.num_pieces() > 0);
|
||||
return piece_index_t(m_files.num_pieces());
|
||||
return m_files.end_piece();
|
||||
}
|
||||
index_range<piece_index_t> piece_range() const
|
||||
{ return m_files.piece_range(); }
|
||||
|
|
|
@ -542,10 +542,10 @@ namespace {
|
|||
{ return --m_files.end_index(); }
|
||||
|
||||
index_range<file_index_t> file_storage::file_range() const noexcept
|
||||
{ return {file_index_t{0}, m_files.end_index()}; }
|
||||
{ return m_files.range(); }
|
||||
|
||||
index_range<piece_index_t> file_storage::piece_range() const noexcept
|
||||
{ return {piece_index_t{0}, piece_index_t{m_num_pieces}}; }
|
||||
{ return {piece_index_t{0}, end_piece()}; }
|
||||
|
||||
peer_request file_storage::map_file(file_index_t const file_index
|
||||
, std::int64_t const file_offset, int const size) const
|
||||
|
@ -556,7 +556,7 @@ namespace {
|
|||
peer_request ret{};
|
||||
if (file_index >= end_file())
|
||||
{
|
||||
ret.piece = piece_index_t{m_num_pieces};
|
||||
ret.piece = end_piece();
|
||||
ret.start = 0;
|
||||
ret.length = 0;
|
||||
return ret;
|
||||
|
@ -566,7 +566,7 @@ namespace {
|
|||
|
||||
if (offset >= total_size())
|
||||
{
|
||||
ret.piece = piece_index_t{m_num_pieces};
|
||||
ret.piece = end_piece();
|
||||
ret.start = 0;
|
||||
ret.length = 0;
|
||||
}
|
||||
|
|
|
@ -5026,7 +5026,7 @@ bool is_downloading_state(int const st)
|
|||
|
||||
if (fs) files.resize(fs->num_files(), default_priority);
|
||||
|
||||
for (file_index_t i : index_range<file_index_t>{file_index_t{}, files.end_index()})
|
||||
for (file_index_t i : files.range())
|
||||
{
|
||||
// initialize pad files to priority 0
|
||||
if (files[i] > dont_download && fs && fs->pad_file_at(i))
|
||||
|
|
Loading…
Reference in New Issue