return a span from TORRENT_ALLOCA (#1244)
return a span from TORRENT_ALLOCA Unfortunately this requires moving the variable declaration inside the macro. Due to alloca’s unique properties pretty much the only safe way to call it is in a simple assign-to-pointer expression. Therefor we need to use a temporary pointer to store the value returned from alloca before we can call span’s ctor. This also causes double evaluation of the size parameter which is unfortunate, but no current callers of TORRENT_ALLOCA have a problem with this. Passing expressions with side effects to macros is bad mojo anyways.
This commit is contained in:
parent
e9f9c3ca22
commit
a6e5ba8804
|
@ -33,21 +33,28 @@ POSSIBILITY OF SUCH DAMAGE.
|
|||
#ifndef TORRENT_ALLOCA
|
||||
|
||||
#include "libtorrent/config.hpp"
|
||||
#include "libtorrent/span.hpp"
|
||||
|
||||
#if defined TORRENT_WINDOWS || defined TORRENT_MINGW
|
||||
|
||||
#include <malloc.h>
|
||||
#define TORRENT_ALLOCA(t, n) static_cast<t*>(_alloca(sizeof(t) * (n)))
|
||||
#define TORRENT_ALLOCA(v, t, n) ::libtorrent::span<t> v; { \
|
||||
t* TORRENT_ALLOCA_tmp = static_cast<t*>(_alloca(sizeof(t) * (n))); \
|
||||
v = ::libtorrent::span<t>(TORRENT_ALLOCA_tmp, n); }
|
||||
|
||||
#elif defined TORRENT_BSD
|
||||
|
||||
#include <stdlib.h>
|
||||
#define TORRENT_ALLOCA(t, n) static_cast<t*>(alloca(sizeof(t) * (n)))
|
||||
#define TORRENT_ALLOCA(v, t, n) ::libtorrent::span<t> v; { \
|
||||
t* TORRENT_ALLOCA_tmp = static_cast<t*>(alloca(sizeof(t) * (n))); \
|
||||
v = ::libtorrent::span<t>(TORRENT_ALLOCA_tmp, n); }
|
||||
|
||||
#else
|
||||
|
||||
#include <alloca.h>
|
||||
#define TORRENT_ALLOCA(t, n) static_cast<t*>(alloca(sizeof(t) * (n)))
|
||||
#define TORRENT_ALLOCA(v, t, n) ::libtorrent::span<t> v; { \
|
||||
t* TORRENT_ALLOCA_tmp = static_cast<t*>(alloca(sizeof(t) * (n))); \
|
||||
v = ::libtorrent::span<t>(TORRENT_ALLOCA_tmp, n); }
|
||||
|
||||
#endif
|
||||
|
||||
|
|
|
@ -54,6 +54,7 @@ POSSIBILITY OF SUCH DAMAGE.
|
|||
|
||||
#include "libtorrent/io_service_fwd.hpp"
|
||||
#include "libtorrent/file.hpp" // for iovec_t
|
||||
#include "libtorrent/span.hpp"
|
||||
|
||||
namespace libtorrent
|
||||
{
|
||||
|
@ -77,7 +78,7 @@ namespace libtorrent
|
|||
char* allocate_buffer(bool& exceeded, std::shared_ptr<disk_observer> o
|
||||
, char const* category);
|
||||
void free_buffer(char* buf);
|
||||
void free_multiple_buffers(char** bufvec, int numbufs);
|
||||
void free_multiple_buffers(span<char*> bufvec);
|
||||
|
||||
int allocate_iovec(file::iovec_t* iov, int iov_len);
|
||||
void free_iovec(file::iovec_t* iov, int iov_len);
|
||||
|
|
|
@ -149,7 +149,7 @@ namespace libtorrent
|
|||
struct add_torrent_params;
|
||||
|
||||
TORRENT_EXTRA_EXPORT int copy_bufs(file::iovec_t const* bufs, int bytes, file::iovec_t* target);
|
||||
TORRENT_EXTRA_EXPORT void advance_bufs(file::iovec_t*& bufs, int bytes);
|
||||
TORRENT_EXTRA_EXPORT span<file::iovec_t> advance_bufs(span<file::iovec_t> bufs, int bytes);
|
||||
TORRENT_EXTRA_EXPORT void clear_bufs(file::iovec_t const* bufs, int num_bufs);
|
||||
|
||||
// flags for async_move_storage
|
||||
|
|
|
@ -653,7 +653,7 @@ namespace libtorrent
|
|||
// this is the stack of bdecode_token indices, into m_tokens.
|
||||
// sp is the stack pointer, as index into the array, stack
|
||||
int sp = 0;
|
||||
stack_frame* stack = TORRENT_ALLOCA(stack_frame, depth_limit);
|
||||
TORRENT_ALLOCA(stack, stack_frame, depth_limit);
|
||||
|
||||
char const* const orig_start = start;
|
||||
|
||||
|
|
|
@ -569,7 +569,7 @@ void block_cache::try_evict_one_volatile()
|
|||
// some blocks are pinned in this piece, skip it
|
||||
if (pe->pinned > 0) continue;
|
||||
|
||||
char** to_delete = TORRENT_ALLOCA(char*, pe->blocks_in_piece);
|
||||
TORRENT_ALLOCA(to_delete, char*, pe->blocks_in_piece);
|
||||
int num_to_delete = 0;
|
||||
|
||||
// go through the blocks and evict the ones that are not dirty and not
|
||||
|
@ -607,7 +607,7 @@ void block_cache::try_evict_one_volatile()
|
|||
DLOG(stderr, "[%p] removed %d blocks\n", static_cast<void*>(this)
|
||||
, num_to_delete);
|
||||
|
||||
free_multiple_buffers(to_delete, num_to_delete);
|
||||
free_multiple_buffers(to_delete.first(num_to_delete));
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
@ -895,7 +895,7 @@ bool block_cache::evict_piece(cached_piece_entry* pe, tailqueue<disk_io_job>& jo
|
|||
|
||||
TORRENT_PIECE_ASSERT(pe->in_use, pe);
|
||||
|
||||
char** to_delete = TORRENT_ALLOCA(char*, pe->blocks_in_piece);
|
||||
TORRENT_ALLOCA(to_delete, char*, pe->blocks_in_piece);
|
||||
int num_to_delete = 0;
|
||||
for (int i = 0; i < pe->blocks_in_piece; ++i)
|
||||
{
|
||||
|
@ -928,7 +928,7 @@ bool block_cache::evict_piece(cached_piece_entry* pe, tailqueue<disk_io_job>& jo
|
|||
m_volatile_size -= num_to_delete;
|
||||
}
|
||||
|
||||
if (num_to_delete) free_multiple_buffers(to_delete, num_to_delete);
|
||||
if (num_to_delete) free_multiple_buffers(to_delete.first(num_to_delete));
|
||||
|
||||
if (pe->ok_to_evict(true))
|
||||
{
|
||||
|
@ -998,7 +998,7 @@ int block_cache::try_evict_blocks(int num, cached_piece_entry* ignore)
|
|||
|
||||
DLOG(stderr, "[%p] try_evict_blocks: %d\n", static_cast<void*>(this), num);
|
||||
|
||||
char** to_delete = TORRENT_ALLOCA(char*, num);
|
||||
TORRENT_ALLOCA(to_delete, char*, num);
|
||||
int num_to_delete = 0;
|
||||
|
||||
// There are two ends of the ARC cache we can evict from. There's L1 and L2.
|
||||
|
@ -1201,7 +1201,7 @@ int block_cache::try_evict_blocks(int num, cached_piece_entry* ignore)
|
|||
DLOG(stderr, "[%p] removed %d blocks\n", static_cast<void*>(this)
|
||||
, num_to_delete);
|
||||
|
||||
free_multiple_buffers(to_delete, num_to_delete);
|
||||
free_multiple_buffers(to_delete.first(num_to_delete));
|
||||
|
||||
return num;
|
||||
}
|
||||
|
@ -1230,7 +1230,7 @@ void block_cache::clear(tailqueue<disk_io_job>& jobs)
|
|||
drain_piece_bufs(pe, bufs);
|
||||
}
|
||||
|
||||
if (!bufs.empty()) free_multiple_buffers(&bufs[0], int(bufs.size()));
|
||||
if (!bufs.empty()) free_multiple_buffers(bufs);
|
||||
|
||||
// clear lru lists
|
||||
for (int i = 0; i < cached_piece_entry::num_lrus; ++i)
|
||||
|
@ -1433,7 +1433,7 @@ void block_cache::abort_dirty(cached_piece_entry* pe)
|
|||
|
||||
TORRENT_PIECE_ASSERT(pe->in_use, pe);
|
||||
|
||||
char** to_delete = TORRENT_ALLOCA(char*, pe->blocks_in_piece);
|
||||
TORRENT_ALLOCA(to_delete, char*, pe->blocks_in_piece);
|
||||
int num_to_delete = 0;
|
||||
for (int i = 0; i < pe->blocks_in_piece; ++i)
|
||||
{
|
||||
|
@ -1453,7 +1453,7 @@ void block_cache::abort_dirty(cached_piece_entry* pe)
|
|||
TORRENT_PIECE_ASSERT(pe->num_dirty > 0, pe);
|
||||
--pe->num_dirty;
|
||||
}
|
||||
if (num_to_delete) free_multiple_buffers(to_delete, num_to_delete);
|
||||
if (num_to_delete) free_multiple_buffers(to_delete.first(num_to_delete));
|
||||
|
||||
update_cache_state(pe);
|
||||
}
|
||||
|
@ -1472,7 +1472,7 @@ void block_cache::free_piece(cached_piece_entry* pe)
|
|||
|
||||
// build a vector of all the buffers we need to free
|
||||
// and free them all in one go
|
||||
char** to_delete = TORRENT_ALLOCA(char*, pe->blocks_in_piece);
|
||||
TORRENT_ALLOCA(to_delete, char*, pe->blocks_in_piece);
|
||||
int num_to_delete = 0;
|
||||
int removed_clean = 0;
|
||||
for (int i = 0; i < pe->blocks_in_piece; ++i)
|
||||
|
@ -1504,7 +1504,7 @@ void block_cache::free_piece(cached_piece_entry* pe)
|
|||
{
|
||||
m_volatile_size -= num_to_delete;
|
||||
}
|
||||
if (num_to_delete) free_multiple_buffers(to_delete, num_to_delete);
|
||||
if (num_to_delete) free_multiple_buffers(to_delete.first(num_to_delete));
|
||||
update_cache_state(pe);
|
||||
}
|
||||
|
||||
|
|
|
@ -2153,20 +2153,19 @@ namespace libtorrent
|
|||
|
||||
const int packet_size = (num_pieces + 7) / 8 + 5;
|
||||
|
||||
std::uint8_t* msg = TORRENT_ALLOCA(std::uint8_t, packet_size);
|
||||
if (msg == nullptr) return; // out of memory
|
||||
unsigned char* ptr = msg;
|
||||
TORRENT_ALLOCA(msg, std::uint8_t, packet_size);
|
||||
if (msg.data() == nullptr) return; // out of memory
|
||||
auto ptr = msg.begin();
|
||||
|
||||
detail::write_int32(packet_size - 4, ptr);
|
||||
detail::write_uint8(msg_bitfield, ptr);
|
||||
|
||||
if (t->is_seed())
|
||||
{
|
||||
std::memset(ptr, 0xff, packet_size - 5);
|
||||
std::fill_n(ptr, packet_size - 5, 0xff);
|
||||
|
||||
// Clear trailing bits
|
||||
unsigned char *p = msg + packet_size - 1;
|
||||
*p = (0xff << ((8 - (num_pieces & 7)) & 7)) & 0xff;
|
||||
msg.back() = (0xff << ((8 - (num_pieces & 7)) & 7)) & 0xff;
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -2206,7 +2205,7 @@ namespace libtorrent
|
|||
#endif
|
||||
m_sent_bitfield = true;
|
||||
|
||||
send_buffer(reinterpret_cast<char const*>(msg), packet_size);
|
||||
send_buffer(reinterpret_cast<char const*>(msg.data()), msg.size());
|
||||
|
||||
stats_counters().inc_stats_counter(counters::num_outgoing_bitfield);
|
||||
}
|
||||
|
|
|
@ -340,16 +340,14 @@ namespace libtorrent
|
|||
return ret;
|
||||
}
|
||||
|
||||
void disk_buffer_pool::free_multiple_buffers(char** bufvec, int numbufs)
|
||||
void disk_buffer_pool::free_multiple_buffers(span<char*> bufvec)
|
||||
{
|
||||
char** end = bufvec + numbufs;
|
||||
// sort the pointers in order to maximize cache hits
|
||||
std::sort(bufvec, end);
|
||||
std::sort(bufvec.begin(), bufvec.end());
|
||||
|
||||
std::unique_lock<std::mutex> l(m_pool_mutex);
|
||||
for (; bufvec != end; ++bufvec)
|
||||
for (char* buf : bufvec)
|
||||
{
|
||||
char* buf = *bufvec;
|
||||
TORRENT_ASSERT(is_disk_buffer(buf, l));
|
||||
free_buffer_impl(buf, l);
|
||||
remove_buffer_in_use(buf);
|
||||
|
|
|
@ -445,16 +445,16 @@ namespace libtorrent
|
|||
|
||||
cont_pieces = range_end - range_start;
|
||||
|
||||
file::iovec_t* iov = TORRENT_ALLOCA(file::iovec_t, p->blocks_in_piece * cont_pieces);
|
||||
int* flushing = TORRENT_ALLOCA(int, p->blocks_in_piece * cont_pieces);
|
||||
TORRENT_ALLOCA(iov, file::iovec_t, p->blocks_in_piece * cont_pieces);
|
||||
TORRENT_ALLOCA(flushing, int, p->blocks_in_piece * cont_pieces);
|
||||
// this is the offset into iov and flushing for each piece
|
||||
int* iovec_offset = TORRENT_ALLOCA(int, cont_pieces + 1);
|
||||
TORRENT_ALLOCA(iovec_offset, int, cont_pieces + 1);
|
||||
int iov_len = 0;
|
||||
// this is the block index each piece starts at
|
||||
int block_start = 0;
|
||||
// keep track of the pieces that have had their refcount incremented
|
||||
// so we know to decrement them later
|
||||
int* refcount_pieces = TORRENT_ALLOCA(int, cont_pieces);
|
||||
TORRENT_ALLOCA(refcount_pieces, int, cont_pieces);
|
||||
for (int i = 0; i < cont_pieces; ++i)
|
||||
{
|
||||
cached_piece_entry* pe;
|
||||
|
@ -478,7 +478,7 @@ namespace libtorrent
|
|||
++pe->piece_refcount;
|
||||
|
||||
iov_len += build_iovec(pe, 0, p->blocks_in_piece
|
||||
, iov + iov_len, flushing + iov_len, block_start);
|
||||
, iov.subspan(iov_len).data(), flushing.subspan(iov_len).data(), block_start);
|
||||
|
||||
block_start += p->blocks_in_piece;
|
||||
}
|
||||
|
@ -503,7 +503,7 @@ namespace libtorrent
|
|||
// unlock while we're performing the actual disk I/O
|
||||
// then lock again
|
||||
auto unlock = scoped_unlock(l);
|
||||
flush_iovec(first_piece, iov, flushing, iov_len, error);
|
||||
flush_iovec(first_piece, iov.data(), flushing.data(), iov_len, error);
|
||||
}
|
||||
|
||||
block_start = 0;
|
||||
|
@ -526,7 +526,7 @@ namespace libtorrent
|
|||
m_disk_cache.maybe_free_piece(pe);
|
||||
}
|
||||
const int block_diff = iovec_offset[i+1] - iovec_offset[i];
|
||||
iovec_flushed(pe, flushing + iovec_offset[i], block_diff
|
||||
iovec_flushed(pe, flushing.subspan(iovec_offset[i]).data(), block_diff
|
||||
, block_start, error, completed_jobs);
|
||||
block_start += p->blocks_in_piece;
|
||||
}
|
||||
|
@ -742,9 +742,9 @@ namespace libtorrent
|
|||
TORRENT_PIECE_ASSERT(start >= 0, pe);
|
||||
TORRENT_PIECE_ASSERT(start < end, pe);
|
||||
|
||||
file::iovec_t* iov = TORRENT_ALLOCA(file::iovec_t, pe->blocks_in_piece);
|
||||
int* flushing = TORRENT_ALLOCA(int, pe->blocks_in_piece);
|
||||
int iov_len = build_iovec(pe, start, end, iov, flushing, 0);
|
||||
TORRENT_ALLOCA(iov, file::iovec_t, pe->blocks_in_piece);
|
||||
TORRENT_ALLOCA(flushing, int, pe->blocks_in_piece);
|
||||
int iov_len = build_iovec(pe, start, end, iov.data(), flushing.data(), 0);
|
||||
if (iov_len == 0) return 0;
|
||||
|
||||
TORRENT_PIECE_ASSERT(pe->cache_state <= cached_piece_entry::read_lru1 || pe->cache_state == cached_piece_entry::read_lru2, pe);
|
||||
|
@ -757,10 +757,10 @@ namespace libtorrent
|
|||
piece_refcount_holder refcount_holder(pe);
|
||||
auto unlocker = scoped_unlock(l);
|
||||
|
||||
flush_iovec(pe, iov, flushing, iov_len, error);
|
||||
flush_iovec(pe, iov.data(), flushing.data(), iov_len, error);
|
||||
}
|
||||
|
||||
iovec_flushed(pe, flushing, iov_len, 0, error, completed_jobs);
|
||||
iovec_flushed(pe, flushing.data(), iov_len, 0, error, completed_jobs);
|
||||
|
||||
// if the cache is under high pressure, we need to evict
|
||||
// the blocks we just flushed to make room for more write pieces
|
||||
|
@ -979,7 +979,7 @@ namespace libtorrent
|
|||
time_point timeout = min_time();
|
||||
#endif
|
||||
|
||||
cached_piece_entry** to_flush = TORRENT_ALLOCA(cached_piece_entry*, 200);
|
||||
TORRENT_ALLOCA(to_flush, cached_piece_entry*, 200);
|
||||
int num_flush = 0;
|
||||
|
||||
for (list_iterator<cached_piece_entry> p = m_disk_cache.write_lru_pieces(); p.get(); p.next())
|
||||
|
@ -1235,7 +1235,7 @@ namespace libtorrent
|
|||
int const iov_len = m_disk_cache.pad_job(j, blocks_in_piece
|
||||
, m_settings.get_int(settings_pack::read_cache_line_size));
|
||||
|
||||
file::iovec_t* iov = TORRENT_ALLOCA(file::iovec_t, iov_len);
|
||||
TORRENT_ALLOCA(iov, file::iovec_t, iov_len);
|
||||
|
||||
std::unique_lock<std::mutex> l(m_cache_mutex);
|
||||
|
||||
|
@ -1253,7 +1253,7 @@ namespace libtorrent
|
|||
l.unlock();
|
||||
|
||||
// then we'll actually allocate the buffers
|
||||
int ret = m_disk_cache.allocate_iovec(iov, iov_len);
|
||||
int ret = m_disk_cache.allocate_iovec(iov.data(), iov_len);
|
||||
|
||||
if (ret < 0)
|
||||
{
|
||||
|
@ -1283,7 +1283,7 @@ namespace libtorrent
|
|||
, m_settings.get_bool(settings_pack::coalesce_reads));
|
||||
time_point start_time = clock_type::now();
|
||||
|
||||
ret = j->storage->get_storage_impl()->readv({iov, size_t(iov_len)}
|
||||
ret = j->storage->get_storage_impl()->readv(iov
|
||||
, j->piece, adjusted_offset, file_flags, j->error);
|
||||
|
||||
if (!j->error.ec)
|
||||
|
@ -1302,7 +1302,7 @@ namespace libtorrent
|
|||
if (ret < 0)
|
||||
{
|
||||
// read failed. free buffers and return error
|
||||
m_disk_cache.free_iovec(iov, iov_len);
|
||||
m_disk_cache.free_iovec(iov.data(), iov_len);
|
||||
|
||||
pe = m_disk_cache.find_piece(j);
|
||||
if (pe == nullptr)
|
||||
|
@ -1332,7 +1332,7 @@ namespace libtorrent
|
|||
// as soon we insert the blocks they may be evicted
|
||||
// (if using purgeable memory). In order to prevent that
|
||||
// until we can read from them, increment the refcounts
|
||||
m_disk_cache.insert_blocks(pe, block, iov, iov_len, j, block_cache::blocks_inc_refcount);
|
||||
m_disk_cache.insert_blocks(pe, block, iov.data(), iov_len, j, block_cache::blocks_inc_refcount);
|
||||
|
||||
TORRENT_ASSERT(pe->blocks[block].buf);
|
||||
|
||||
|
@ -2297,8 +2297,8 @@ namespace libtorrent
|
|||
// keep track of which blocks we have locked by incrementing
|
||||
// their refcounts. This is used to decrement only these blocks
|
||||
// later.
|
||||
int* locked_blocks = TORRENT_ALLOCA(int, blocks_in_piece);
|
||||
std::memset(locked_blocks, 0, blocks_in_piece * sizeof(int));
|
||||
TORRENT_ALLOCA(locked_blocks, int, blocks_in_piece);
|
||||
std::fill(locked_blocks.begin(), locked_blocks.end(), 0);
|
||||
int num_locked_blocks = 0;
|
||||
|
||||
// increment the refcounts of all
|
||||
|
|
16
src/file.cpp
16
src/file.cpp
|
@ -173,10 +173,10 @@ namespace
|
|||
|
||||
int preadv(HANDLE fd, libtorrent::file::iovec_t const* bufs, int num_bufs, std::int64_t file_offset)
|
||||
{
|
||||
OVERLAPPED* ol = TORRENT_ALLOCA(OVERLAPPED, num_bufs);
|
||||
std::memset(ol, 0, sizeof(OVERLAPPED) * num_bufs);
|
||||
TORRENT_ALLOCA(ol, OVERLAPPED, num_bufs);
|
||||
std::memset(ol.data(), 0, sizeof(OVERLAPPED) * num_bufs);
|
||||
|
||||
HANDLE* h = TORRENT_ALLOCA(HANDLE, num_bufs);
|
||||
TORRENT_ALLOCA(h, HANDLE, num_bufs);
|
||||
|
||||
for (int i = 0; i < num_bufs; ++i)
|
||||
{
|
||||
|
@ -209,7 +209,7 @@ namespace
|
|||
}
|
||||
}
|
||||
|
||||
if (wait_for_multiple_objects(num_bufs, h) == WAIT_FAILED)
|
||||
if (wait_for_multiple_objects(num_bufs, h.data()) == WAIT_FAILED)
|
||||
{
|
||||
ret = -1;
|
||||
goto done;
|
||||
|
@ -243,10 +243,10 @@ done:
|
|||
|
||||
int pwritev(HANDLE fd, libtorrent::file::iovec_t const* bufs, int num_bufs, std::int64_t file_offset)
|
||||
{
|
||||
OVERLAPPED* ol = TORRENT_ALLOCA(OVERLAPPED, num_bufs);
|
||||
std::memset(ol, 0, sizeof(OVERLAPPED) * num_bufs);
|
||||
TORRENT_ALLOCA(ol, OVERLAPPED, num_bufs);
|
||||
std::memset(ol.data(), 0, sizeof(OVERLAPPED) * num_bufs);
|
||||
|
||||
HANDLE* h = TORRENT_ALLOCA(HANDLE, num_bufs);
|
||||
TORRENT_ALLOCA(h, HANDLE, num_bufs);
|
||||
|
||||
for (int i = 0; i < num_bufs; ++i)
|
||||
{
|
||||
|
@ -279,7 +279,7 @@ done:
|
|||
}
|
||||
}
|
||||
|
||||
if (wait_for_multiple_objects(num_bufs, h) == WAIT_FAILED)
|
||||
if (wait_for_multiple_objects(num_bufs, h.data()) == WAIT_FAILED)
|
||||
{
|
||||
ret = -1;
|
||||
goto done;
|
||||
|
|
|
@ -128,14 +128,14 @@ namespace libtorrent
|
|||
|
||||
int to_process = m_send_barriers.front().next;
|
||||
|
||||
span<char>* bufs;
|
||||
size_t num_bufs;
|
||||
span<span<char>> bufs;
|
||||
bool need_destruct = false;
|
||||
if (to_process != INT_MAX)
|
||||
{
|
||||
bufs = TORRENT_ALLOCA(span<char>, iovec.size());
|
||||
TORRENT_ALLOCA(abufs, span<char>, iovec.size());
|
||||
bufs = abufs;
|
||||
need_destruct = true;
|
||||
num_bufs = 0;
|
||||
size_t num_bufs = 0;
|
||||
for (int i = 0; to_process > 0 && i < iovec.size(); ++i)
|
||||
{
|
||||
++num_bufs;
|
||||
|
@ -152,19 +152,19 @@ namespace libtorrent
|
|||
to_process -= size;
|
||||
}
|
||||
}
|
||||
bufs = bufs.first(num_bufs);
|
||||
}
|
||||
else
|
||||
{
|
||||
bufs = iovec.data();
|
||||
num_bufs = iovec.size();
|
||||
bufs = iovec;
|
||||
}
|
||||
|
||||
int next_barrier = 0;
|
||||
span<span<char const>> out_iovec;
|
||||
if (num_bufs != 0)
|
||||
if (bufs.size() != 0)
|
||||
{
|
||||
std::tie(next_barrier, out_iovec)
|
||||
= m_send_barriers.front().enc_handler->encrypt({bufs, size_t(num_bufs)});
|
||||
= m_send_barriers.front().enc_handler->encrypt(bufs);
|
||||
}
|
||||
|
||||
if (m_send_barriers.front().next != INT_MAX)
|
||||
|
@ -192,8 +192,8 @@ namespace libtorrent
|
|||
if (next_barrier != INT_MAX && next_barrier != 0)
|
||||
{
|
||||
int payload = 0;
|
||||
for (int i = 0; i < num_bufs; ++i)
|
||||
payload += int(bufs[i].size());
|
||||
for (auto buf : bufs)
|
||||
payload += int(buf.size());
|
||||
|
||||
int overhead = 0;
|
||||
for (auto buf : out_iovec)
|
||||
|
@ -203,8 +203,8 @@ namespace libtorrent
|
|||
#endif
|
||||
if (need_destruct)
|
||||
{
|
||||
for (int i = 0; i < num_bufs; ++i)
|
||||
bufs[i].~span<char>();
|
||||
for (auto buf : bufs)
|
||||
buf.~span<char>();
|
||||
}
|
||||
return std::make_tuple(next_barrier, out_iovec);
|
||||
}
|
||||
|
|
|
@ -5373,18 +5373,18 @@ namespace libtorrent
|
|||
int priority = get_priority(channel);
|
||||
|
||||
int max_channels = num_classes() + (t ? t->num_classes() : 0) + 2;
|
||||
bandwidth_channel** channels = TORRENT_ALLOCA(bandwidth_channel*, max_channels);
|
||||
TORRENT_ALLOCA(channels, bandwidth_channel*, max_channels);
|
||||
|
||||
// collect the pointers to all bandwidth channels
|
||||
// that apply to this torrent
|
||||
int c = 0;
|
||||
|
||||
c += m_ses.copy_pertinent_channels(*this, channel
|
||||
, channels + c, max_channels - c);
|
||||
, channels.subspan(c).data(), max_channels - c);
|
||||
if (t)
|
||||
{
|
||||
c += m_ses.copy_pertinent_channels(*t, channel
|
||||
, channels + c, max_channels - c);
|
||||
, channels.subspan(c).data(), max_channels - c);
|
||||
}
|
||||
|
||||
#if TORRENT_USE_ASSERTS
|
||||
|
@ -5402,7 +5402,7 @@ namespace libtorrent
|
|||
bandwidth_manager* manager = m_ses.get_bandwidth_manager(channel);
|
||||
|
||||
int ret = manager->request_bandwidth(self()
|
||||
, bytes, priority, channels, c);
|
||||
, bytes, priority, channels.data(), c);
|
||||
|
||||
if (ret == 0)
|
||||
{
|
||||
|
|
|
@ -1275,7 +1275,7 @@ namespace libtorrent
|
|||
// pieces end up changing, instead of making
|
||||
// the piece list dirty, just update those pieces
|
||||
// instead
|
||||
int* incremented = TORRENT_ALLOCA(int, size);
|
||||
TORRENT_ALLOCA(incremented, int, size);
|
||||
int num_inc = 0;
|
||||
|
||||
if (!m_dirty)
|
||||
|
@ -1372,7 +1372,7 @@ namespace libtorrent
|
|||
// pieces end up changing, instead of making
|
||||
// the piece list dirty, just update those pieces
|
||||
// instead
|
||||
int* decremented = TORRENT_ALLOCA(int, size);
|
||||
TORRENT_ALLOCA(decremented, int, size);
|
||||
int num_dec = 0;
|
||||
|
||||
if (!m_dirty)
|
||||
|
@ -1964,8 +1964,8 @@ namespace libtorrent
|
|||
// lookups when finding a downloading_piece for a specific piece index.
|
||||
// this is important and needs to stay sorted that way, that's why
|
||||
// we're copying it here
|
||||
downloading_piece const** ordered_partials = TORRENT_ALLOCA(
|
||||
downloading_piece const*, m_downloads[piece_pos::piece_downloading].size());
|
||||
TORRENT_ALLOCA(ordered_partials, downloading_piece const*
|
||||
, m_downloads[piece_pos::piece_downloading].size());
|
||||
int num_ordered_partials = 0;
|
||||
|
||||
// now, copy over the pointers. We also apply a filter here to not
|
||||
|
@ -2000,7 +2000,7 @@ namespace libtorrent
|
|||
// chances are that we'll just need a single piece, and once we've
|
||||
// picked from it we're done. Sorting the rest of the list in that
|
||||
// case is a waste of time.
|
||||
std::sort(ordered_partials, ordered_partials + num_ordered_partials
|
||||
std::sort(ordered_partials.begin(), ordered_partials.begin() + num_ordered_partials
|
||||
, std::bind(&piece_picker::partial_compare_rarest_first, this
|
||||
, _1, _2));
|
||||
}
|
||||
|
@ -2306,8 +2306,7 @@ get_out:
|
|||
+ m_downloads[piece_pos::piece_full].size()));
|
||||
if (partials_size == 0) return ret;
|
||||
|
||||
downloading_piece const** partials
|
||||
= TORRENT_ALLOCA(downloading_piece const*, partials_size);
|
||||
TORRENT_ALLOCA(partials, downloading_piece const*, partials_size);
|
||||
int c = 0;
|
||||
|
||||
#if TORRENT_USE_INVARIANT_CHECKS
|
||||
|
|
|
@ -134,20 +134,20 @@ namespace libtorrent
|
|||
}
|
||||
}
|
||||
|
||||
void advance_bufs(file::iovec_t*& bufs, int bytes)
|
||||
span<file::iovec_t> advance_bufs(span<file::iovec_t> bufs, int bytes)
|
||||
{
|
||||
int size = 0;
|
||||
for (;;)
|
||||
{
|
||||
size += int(bufs->iov_len);
|
||||
size += int(bufs.front().iov_len);
|
||||
if (size >= bytes)
|
||||
{
|
||||
bufs->iov_base = reinterpret_cast<char*>(bufs->iov_base)
|
||||
+ bufs->iov_len - (size - bytes);
|
||||
bufs->iov_len = size - bytes;
|
||||
return;
|
||||
bufs.front().iov_base = reinterpret_cast<char*>(bufs.front().iov_base)
|
||||
+ bufs.front().iov_len - (size - bytes);
|
||||
bufs.front().iov_len = size - bytes;
|
||||
return bufs;
|
||||
}
|
||||
++bufs;
|
||||
bufs = bufs.subspan(1);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1148,11 +1148,11 @@ namespace libtorrent
|
|||
// copy the iovec array so we can use it to keep track of our current
|
||||
// location by updating the head base pointer and size. (see
|
||||
// advance_bufs())
|
||||
file::iovec_t* current_buf = TORRENT_ALLOCA(file::iovec_t, num_bufs);
|
||||
copy_bufs(bufs, size, current_buf);
|
||||
TORRENT_ASSERT(count_bufs(current_buf, size) == num_bufs);
|
||||
TORRENT_ALLOCA(current_buf, file::iovec_t, num_bufs);
|
||||
copy_bufs(bufs, size, current_buf.data());
|
||||
TORRENT_ASSERT(count_bufs(current_buf.data(), size) == num_bufs);
|
||||
|
||||
file::iovec_t* tmp_buf = TORRENT_ALLOCA(file::iovec_t, num_bufs);
|
||||
TORRENT_ALLOCA(tmp_buf, file::iovec_t, num_bufs);
|
||||
|
||||
// the number of bytes left to read in the current file (specified by
|
||||
// file_index). This is the minimum of (file_size - file_offset) and
|
||||
|
@ -1184,18 +1184,18 @@ namespace libtorrent
|
|||
|
||||
// make a copy of the iovec array that _just_ covers the next
|
||||
// file_bytes_left bytes, i.e. just this one operation
|
||||
copy_bufs(current_buf, file_bytes_left, tmp_buf);
|
||||
copy_bufs(current_buf.data(), file_bytes_left, tmp_buf.data());
|
||||
|
||||
int bytes_transferred = op.file_op(file_index, file_offset,
|
||||
file_bytes_left, tmp_buf, ec);
|
||||
file_bytes_left, tmp_buf.data(), ec);
|
||||
if (ec) return -1;
|
||||
|
||||
// advance our position in the iovec array and the file offset.
|
||||
advance_bufs(current_buf, bytes_transferred);
|
||||
current_buf = advance_bufs(current_buf, bytes_transferred);
|
||||
bytes_left -= bytes_transferred;
|
||||
file_offset += bytes_transferred;
|
||||
|
||||
TORRENT_ASSERT(count_bufs(current_buf, bytes_left) <= num_bufs);
|
||||
TORRENT_ASSERT(count_bufs(current_buf.data(), bytes_left) <= num_bufs);
|
||||
|
||||
// if the file operation returned 0, we've hit end-of-file. We're done
|
||||
if (bytes_transferred == 0)
|
||||
|
|
|
@ -9604,8 +9604,7 @@ namespace libtorrent
|
|||
// busy blocks we may pick
|
||||
// first, figure out which blocks are eligible for picking
|
||||
// in "busy-mode"
|
||||
busy_block_t* busy_blocks
|
||||
= TORRENT_ALLOCA(busy_block_t, blocks_in_piece);
|
||||
TORRENT_ALLOCA(busy_blocks, busy_block_t, blocks_in_piece);
|
||||
int busy_count = 0;
|
||||
|
||||
piece_picker::block_info const* info = picker->blocks_for_piece(pi);
|
||||
|
@ -9637,16 +9636,18 @@ namespace libtorrent
|
|||
std::printf("\n");
|
||||
#endif
|
||||
|
||||
busy_blocks = busy_blocks.first(busy_count);
|
||||
|
||||
// then sort blocks by the number of peers with requests
|
||||
// to the blocks (request the blocks with the fewest peers
|
||||
// first)
|
||||
std::sort(busy_blocks, busy_blocks + busy_count);
|
||||
std::sort(busy_blocks.begin(), busy_blocks.end());
|
||||
|
||||
// then insert them into the interesting_blocks vector
|
||||
for (int k = 0; k < busy_count; ++k)
|
||||
for (auto block : busy_blocks)
|
||||
{
|
||||
interesting_blocks.push_back(
|
||||
piece_block(piece, busy_blocks[k].index));
|
||||
piece_block(piece, block.index));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1865,8 +1865,8 @@ bool utp_socket_impl::send_pkt(int const flags)
|
|||
// this alloca() statement won't necessarily produce
|
||||
// correctly aligned memory. That's why we ask for 7 more bytes
|
||||
// and adjust our pointer to be aligned later
|
||||
p = reinterpret_cast<packet*>(TORRENT_ALLOCA(char, sizeof(packet) + packet_size
|
||||
+ sizeof(packet*) - 1));
|
||||
TORRENT_ALLOCA(ps, char, sizeof(packet) + packet_size + sizeof(packet*) - 1);
|
||||
p = reinterpret_cast<packet*>(ps.data());
|
||||
p = reinterpret_cast<packet*>(align_pointer(p));
|
||||
UTP_LOGV("%8p: allocating %d bytes on the stack\n", static_cast<void*>(this), packet_size);
|
||||
p->allocated = packet_size;
|
||||
|
|
|
@ -221,9 +221,9 @@ void send_bitfield(tcp::socket& s, char const* bits)
|
|||
|
||||
int num_pieces = int(strlen(bits));
|
||||
int packet_size = (num_pieces+7)/8 + 5;
|
||||
char* msg = (char*)TORRENT_ALLOCA(char, packet_size);
|
||||
memset(msg, 0, packet_size);
|
||||
char* ptr = msg;
|
||||
TORRENT_ALLOCA(msg, char, packet_size);
|
||||
std::fill(msg.begin(), msg.end(), 0);
|
||||
char* ptr = msg.data();
|
||||
write_int32(packet_size-4, ptr);
|
||||
write_int8(5, ptr);
|
||||
log("==> bitfield [%s]", bits);
|
||||
|
@ -232,7 +232,7 @@ void send_bitfield(tcp::socket& s, char const* bits)
|
|||
ptr[i/8] |= (bits[i] == '1' ? 1 : 0) << i % 8;
|
||||
}
|
||||
error_code ec;
|
||||
boost::asio::write(s, boost::asio::buffer(msg, packet_size)
|
||||
boost::asio::write(s, boost::asio::buffer(msg.data(), msg.size())
|
||||
, boost::asio::transfer_all(), ec);
|
||||
if (ec) TEST_ERROR(ec.message());
|
||||
}
|
||||
|
|
|
@ -978,21 +978,20 @@ TORRENT_TEST(iovec_advance_bufs)
|
|||
|
||||
memcpy(iov2, iov1, sizeof(iov1));
|
||||
|
||||
file::iovec_t* iov = iov2;
|
||||
file::iovec_t* end = iov2 + 10;
|
||||
span<file::iovec_t> iov = iov2;
|
||||
|
||||
// advance iov 13 bytes. Make sure what's left fits pattern 1 shifted
|
||||
// 13 bytes
|
||||
advance_bufs(iov, 13);
|
||||
iov = advance_bufs(iov, 13);
|
||||
|
||||
// make sure what's in
|
||||
int counter = 13;
|
||||
for (int i = 0; i < end - iov; ++i)
|
||||
for (auto buf : iov)
|
||||
{
|
||||
unsigned char* buf = (unsigned char*)iov[i].iov_base;
|
||||
for (int k = 0; k < int(iov[i].iov_len); ++k)
|
||||
unsigned char* buf_base = (unsigned char*)buf.iov_base;
|
||||
for (int k = 0; k < int(buf.iov_len); ++k)
|
||||
{
|
||||
TEST_EQUAL(int(buf[k]), (counter & 0xff));
|
||||
TEST_EQUAL(int(buf_base[k]), (counter & 0xff));
|
||||
++counter;
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue