use span for scatter/gather lists (#1260)

use span for gather/scatter buffers. remove explicit size parameter from file_op
This commit is contained in:
Steven Siloti 2016-10-26 17:40:56 -07:00 committed by Arvid Norberg
parent 6751a1eeb1
commit f5366bd816
20 changed files with 214 additions and 232 deletions

View File

@ -430,8 +430,8 @@ namespace libtorrent
cached_piece_entry* add_dirty_block(disk_io_job* j);
enum { blocks_inc_refcount = 1 };
void insert_blocks(cached_piece_entry* pe, int block, file::iovec_t *iov
, int iov_len, disk_io_job* j, int flags = 0);
void insert_blocks(cached_piece_entry* pe, int block, span<file::iovec_t const> iov
, disk_io_job* j, int flags = 0);
#if TORRENT_USE_INVARIANT_CHECKS
void check_invariant() const;

View File

@ -80,8 +80,8 @@ namespace libtorrent
void free_buffer(char* buf);
void free_multiple_buffers(span<char*> bufvec);
int allocate_iovec(file::iovec_t* iov, int iov_len);
void free_iovec(file::iovec_t* iov, int iov_len);
int allocate_iovec(span<file::iovec_t> iov);
void free_iovec(span<file::iovec_t const> iov);
int block_size() const { return m_block_size; }

View File

@ -487,8 +487,8 @@ namespace libtorrent
// low level flush operations, used by flush_range
int build_iovec(cached_piece_entry* pe, int start, int end
, file::iovec_t* iov, int* flushing, int block_base_index = 0);
void flush_iovec(cached_piece_entry* pe, file::iovec_t const* iov, int const* flushing
, span<file::iovec_t> iov, span<int> flushing, int block_base_index = 0);
void flush_iovec(cached_piece_entry* pe, span<file::iovec_t const> iov, span<int const> flushing
, int num_blocks, storage_error& error);
void iovec_flushed(cached_piece_entry* pe
, int* flushing, int num_blocks, int block_offset

View File

@ -39,6 +39,7 @@ POSSIBILITY OF SUCH DAMAGE.
#include "libtorrent/config.hpp"
#include "libtorrent/string_view.hpp"
#include "libtorrent/span.hpp"
#include "libtorrent/aux_/disable_warnings_push.hpp"
@ -314,9 +315,9 @@ namespace libtorrent
int open_mode() const { return m_open_mode; }
std::int64_t writev(std::int64_t file_offset, iovec_t const* bufs, int num_bufs
std::int64_t writev(std::int64_t file_offset, span<iovec_t const> bufs
, error_code& ec, int flags = 0);
std::int64_t readv(std::int64_t file_offset, iovec_t const* bufs, int num_bufs
std::int64_t readv(std::int64_t file_offset, span<iovec_t const> bufs
, error_code& ec, int flags = 0);
std::int64_t get_size(error_code& ec) const;
@ -352,7 +353,7 @@ namespace libtorrent
#endif
};
TORRENT_EXTRA_EXPORT int bufs_size(file::iovec_t const* bufs, int num_bufs);
TORRENT_EXTRA_EXPORT int bufs_size(span<file::iovec_t const> bufs);
}

View File

@ -49,8 +49,8 @@ namespace libtorrent
part_file(std::string const& path, std::string const& name, int num_pieces, int piece_size);
~part_file();
int writev(file::iovec_t const* bufs, int num_bufs, int piece, int offset, error_code& ec);
int readv(file::iovec_t const* bufs, int num_bufs, int piece, int offset, error_code& ec);
int writev(span<file::iovec_t const> bufs, int piece, int offset, error_code& ec);
int readv(span<file::iovec_t const> bufs, int piece, int offset, error_code& ec);
// free the slot the given piece is stored in. We no longer need to store this
// piece in the part file

View File

@ -148,9 +148,9 @@ namespace libtorrent
struct cached_piece_entry;
struct add_torrent_params;
TORRENT_EXTRA_EXPORT int copy_bufs(file::iovec_t const* bufs, int bytes, file::iovec_t* target);
TORRENT_EXTRA_EXPORT int copy_bufs(span<file::iovec_t const> bufs, int bytes, span<file::iovec_t> target);
TORRENT_EXTRA_EXPORT span<file::iovec_t> advance_bufs(span<file::iovec_t> bufs, int bytes);
TORRENT_EXTRA_EXPORT void clear_bufs(file::iovec_t const* bufs, int num_bufs);
TORRENT_EXTRA_EXPORT void clear_bufs(span<file::iovec_t const> bufs);
// flags for async_move_storage
enum move_flags_t
@ -612,15 +612,15 @@ namespace libtorrent
// what to do when it's actually touching the file
struct fileop
{
virtual int file_op(int const file_index, std::int64_t const file_offset, int const size
, file::iovec_t const* bufs, storage_error& ec) = 0;
virtual int file_op(int const file_index, std::int64_t const file_offset
, span<file::iovec_t const> bufs, storage_error& ec) = 0;
};
// this function is responsible for turning read and write operations in the
// torrent space (pieces) into read and write operations in the filesystem
// space (files on disk).
TORRENT_EXTRA_EXPORT int readwritev(file_storage const& files
, file::iovec_t const* bufs, int piece, int offset, int num_bufs
, span<file::iovec_t const> bufs, int piece, int offset
, fileop& op, storage_error& ec);
}

View File

@ -1291,8 +1291,8 @@ int block_cache::pad_job(disk_io_job const* j, int blocks_in_piece
return end - start;
}
void block_cache::insert_blocks(cached_piece_entry* pe, int block, file::iovec_t *iov
, int iov_len, disk_io_job* j, int flags)
void block_cache::insert_blocks(cached_piece_entry* pe, int block, span<file::iovec_t const> iov
, disk_io_job* j, int flags)
{
#ifdef TORRENT_EXPENSIVE_INVARIANT_CHECKS
INVARIANT_CHECK;
@ -1300,7 +1300,7 @@ void block_cache::insert_blocks(cached_piece_entry* pe, int block, file::iovec_t
TORRENT_ASSERT(pe);
TORRENT_ASSERT(pe->in_use);
TORRENT_PIECE_ASSERT(iov_len > 0, pe);
TORRENT_PIECE_ASSERT(iov.size() > 0, pe);
#if TORRENT_USE_ASSERTS
// we're not allowed to add dirty blocks
@ -1314,7 +1314,7 @@ void block_cache::insert_blocks(cached_piece_entry* pe, int block, file::iovec_t
TORRENT_ASSERT(pe->in_use);
for (int i = 0; i < iov_len; ++i, ++block)
for (int i = 0; i < iov.size(); ++i, ++block)
{
// each iovec buffer has to be the size of a block (or the size of the last block)
TORRENT_PIECE_ASSERT(iov[i].iov_len == (std::min)(block_size()

View File

@ -219,21 +219,22 @@ namespace libtorrent
// this function allocates buffers and
// fills in the iovec array with the buffers
int disk_buffer_pool::allocate_iovec(file::iovec_t* iov, int iov_len)
int disk_buffer_pool::allocate_iovec(span<file::iovec_t> iov)
{
std::unique_lock<std::mutex> l(m_pool_mutex);
for (int i = 0; i < iov_len; ++i)
for (auto& i : iov)
{
iov[i].iov_base = allocate_buffer_impl(l, "pending read");
iov[i].iov_len = block_size();
if (iov[i].iov_base == nullptr)
i.iov_base = allocate_buffer_impl(l, "pending read");
i.iov_len = block_size();
if (i.iov_base == nullptr)
{
// uh oh. We failed to allocate the buffer!
// we need to roll back and free all the buffers
// we've already allocated
for (int j = 0; j < i; ++j)
for (auto j : iov)
{
char* buf = static_cast<char*>(iov[j].iov_base);
if (j.iov_base == nullptr) break;
char* buf = static_cast<char*>(j.iov_base);
TORRENT_ASSERT(is_disk_buffer(buf, l));
free_buffer_impl(buf, l);
remove_buffer_in_use(buf);
@ -244,13 +245,13 @@ namespace libtorrent
return 0;
}
void disk_buffer_pool::free_iovec(file::iovec_t* iov, int iov_len)
void disk_buffer_pool::free_iovec(span<file::iovec_t const> iov)
{
// TODO: perhaps we should sort the buffers here?
std::unique_lock<std::mutex> l(m_pool_mutex);
for (int i = 0; i < iov_len; ++i)
for (auto i : iov)
{
char* buf = static_cast<char*>(iov[i].iov_base);
char* buf = static_cast<char*>(i.iov_base);
TORRENT_ASSERT(is_disk_buffer(buf, l));
free_buffer_impl(buf, l);
remove_buffer_in_use(buf);

View File

@ -445,9 +445,9 @@ namespace libtorrent
// we keep the indices in the iovec_offset array
cont_pieces = range_end - range_start;
TORRENT_ALLOCA(iov, file::iovec_t, p->blocks_in_piece * cont_pieces);
TORRENT_ALLOCA(flushing, int, p->blocks_in_piece * cont_pieces);
int blocks_to_flush = p->blocks_in_piece * cont_pieces;
TORRENT_ALLOCA(iov, file::iovec_t, blocks_to_flush);
TORRENT_ALLOCA(flushing, int, blocks_to_flush);
// this is the offset into iov and flushing for each piece
TORRENT_ALLOCA(iovec_offset, int, cont_pieces + 1);
int iov_len = 0;
@ -479,7 +479,7 @@ namespace libtorrent
++pe->piece_refcount;
iov_len += build_iovec(pe, 0, p->blocks_in_piece
, iov.subspan(iov_len).data(), flushing.subspan(iov_len).data(), block_start);
, iov.subspan(iov_len), flushing.subspan(iov_len), block_start);
block_start += p->blocks_in_piece;
}
@ -504,7 +504,7 @@ namespace libtorrent
// unlock while we're performing the actual disk I/O
// then lock again
auto unlock = scoped_unlock(l);
flush_iovec(first_piece, iov.data(), flushing.data(), iov_len, error);
flush_iovec(first_piece, iov, flushing, iov_len, error);
}
block_start = 0;
@ -552,7 +552,7 @@ namespace libtorrent
// multiple pieces, the subsequent pieces after the first one, must have
// their block indices start where the previous one left off
int disk_io_thread::build_iovec(cached_piece_entry* pe, int start, int end
, file::iovec_t* iov, int* flushing, int block_base_index)
, span<file::iovec_t> iov, span<int> flushing, int block_base_index)
{
INVARIANT_CHECK;
@ -616,7 +616,7 @@ namespace libtorrent
// the cached_piece_entry is supposed to point to the
// first piece, if the iovec spans multiple pieces
void disk_io_thread::flush_iovec(cached_piece_entry* pe
, file::iovec_t const* iov, int const* flushing
, span<file::iovec_t const> iov, span<int const> flushing
, int num_blocks, storage_error& error)
{
TORRENT_PIECE_ASSERT(!error, pe);
@ -637,7 +637,7 @@ namespace libtorrent
? file::coalesce_buffers : 0;
// issue the actual write operation
file::iovec_t const* iov_start = iov;
auto iov_start = iov;
int flushing_start = 0;
int piece = pe->piece;
int blocks_in_piece = pe->blocks_in_piece;
@ -646,12 +646,12 @@ namespace libtorrent
{
if (i < num_blocks && flushing[i] == flushing[i - 1] + 1) continue;
int ret = pe->storage->get_storage_impl()->writev(
{iov_start, size_t(i - flushing_start)}
iov_start.first(i - flushing_start)
, piece + flushing[flushing_start] / blocks_in_piece
, (flushing[flushing_start] % blocks_in_piece) * block_size
, file_flags, error);
if (ret < 0 || error) failed = true;
iov_start = &iov[i];
iov_start = iov.subspan(i);
flushing_start = i;
}
@ -745,7 +745,7 @@ namespace libtorrent
TORRENT_ALLOCA(iov, file::iovec_t, pe->blocks_in_piece);
TORRENT_ALLOCA(flushing, int, pe->blocks_in_piece);
int iov_len = build_iovec(pe, start, end, iov.data(), flushing.data(), 0);
int iov_len = build_iovec(pe, start, end, iov, flushing, 0);
if (iov_len == 0) return 0;
TORRENT_PIECE_ASSERT(pe->cache_state <= cached_piece_entry::read_lru1 || pe->cache_state == cached_piece_entry::read_lru2, pe);
@ -758,7 +758,7 @@ namespace libtorrent
piece_refcount_holder refcount_holder(pe);
auto unlocker = scoped_unlock(l);
flush_iovec(pe, iov.data(), flushing.data(), iov_len, error);
flush_iovec(pe, iov, flushing, iov_len, error);
}
iovec_flushed(pe, flushing.data(), iov_len, 0, error, completed_jobs);
@ -1252,7 +1252,7 @@ namespace libtorrent
l.unlock();
// then we'll actually allocate the buffers
int ret = m_disk_cache.allocate_iovec(iov.data(), iov_len);
int ret = m_disk_cache.allocate_iovec(iov);
if (ret < 0)
{
@ -1301,7 +1301,7 @@ namespace libtorrent
if (ret < 0)
{
// read failed. free buffers and return error
m_disk_cache.free_iovec(iov.data(), iov_len);
m_disk_cache.free_iovec(iov);
pe = m_disk_cache.find_piece(j);
if (pe == nullptr)
@ -1331,7 +1331,7 @@ namespace libtorrent
// as soon we insert the blocks they may be evicted
// (if using purgeable memory). In order to prevent that
// until we can read from them, increment the refcounts
m_disk_cache.insert_blocks(pe, block, iov.data(), iov_len, j, block_cache::blocks_inc_refcount);
m_disk_cache.insert_blocks(pe, block, iov, j, block_cache::blocks_inc_refcount);
TORRENT_ASSERT(pe->blocks[block].buf);
@ -2403,7 +2403,7 @@ namespace libtorrent
ph->h.update({static_cast<char const*>(iov.iov_base), iov.iov_len});
l.lock();
m_disk_cache.insert_blocks(pe, i, &iov, 1, j);
m_disk_cache.insert_blocks(pe, i, iov, j);
l.unlock();
}
}
@ -2623,7 +2623,7 @@ namespace libtorrent
offset += block_size;
l.lock();
m_disk_cache.insert_blocks(pe, i, &iov, 1, j);
m_disk_cache.insert_blocks(pe, i, iov, j);
}
refcount_holder.release();

View File

@ -209,21 +209,21 @@ namespace
}
}
if (wait_for_multiple_objects(num_bufs, h.data()) == WAIT_FAILED)
if (wait_for_multiple_objects(int(h.size()), h.data()) == WAIT_FAILED)
{
ret = -1;
goto done;
}
for (int i = 0; i < num_bufs; ++i)
for (auto& o : ol)
{
if (WaitForSingleObject(ol[i].hEvent, INFINITE) == WAIT_FAILED)
if (WaitForSingleObject(o.hEvent, INFINITE) == WAIT_FAILED)
{
ret = -1;
break;
}
DWORD num_read;
if (GetOverlappedResult(fd, &ol[i], &num_read, FALSE) == FALSE)
if (GetOverlappedResult(fd, &o, &num_read, FALSE) == FALSE)
{
#ifdef ERROR_CANT_WAIT
TORRENT_ASSERT(GetLastError() != ERROR_CANT_WAIT);
@ -235,8 +235,8 @@ namespace
}
done:
for (int i = 0; i < num_bufs; ++i)
CloseHandle(h[i]);
for (auto hnd : h)
CloseHandle(hnd);
return ret;
}
@ -279,21 +279,21 @@ done:
}
}
if (wait_for_multiple_objects(num_bufs, h.data()) == WAIT_FAILED)
if (wait_for_multiple_objects(int(h.size()), h.data()) == WAIT_FAILED)
{
ret = -1;
goto done;
}
for (int i = 0; i < num_bufs; ++i)
for (auto& o : ol)
{
if (WaitForSingleObject(ol[i].hEvent, INFINITE) == WAIT_FAILED)
if (WaitForSingleObject(o.hEvent, INFINITE) == WAIT_FAILED)
{
ret = -1;
break;
}
DWORD num_written;
if (GetOverlappedResult(fd, &ol[i], &num_written, FALSE) == FALSE)
if (GetOverlappedResult(fd, &o, &num_written, FALSE) == FALSE)
{
#ifdef ERROR_CANT_WAIT
TORRENT_ASSERT(GetLastError() != ERROR_CANT_WAIT);
@ -305,8 +305,8 @@ done:
}
done:
for (int i = 0; i < num_bufs; ++i)
CloseHandle(h[i]);
for (auto hnd : h)
CloseHandle(hnd);
return ret;
}
@ -336,11 +336,11 @@ static_assert((libtorrent::file::sparse & libtorrent::file::attribute_mask) == 0
namespace libtorrent
{
int bufs_size(file::iovec_t const* bufs, int num_bufs)
int bufs_size(span<file::iovec_t const> bufs)
{
std::size_t size = 0;
for (file::iovec_t const* i = bufs, *end(bufs + num_bufs); i < end; ++i)
size += i->iov_len;
for (auto buf : bufs)
size += buf.iov_len;
return int(size);
}
@ -1676,81 +1676,76 @@ typedef struct _FILE_ALLOCATED_RANGE_BUFFER {
namespace {
#if !TORRENT_USE_PREADV
void gather_copy(file::iovec_t const* bufs, int num_bufs, char* dst)
void gather_copy(span<file::iovec_t const> bufs, char* dst)
{
std::size_t offset = 0;
for (int i = 0; i < num_bufs; ++i)
for (auto buf : bufs)
{
std::memcpy(dst + offset, bufs[i].iov_base, bufs[i].iov_len);
offset += bufs[i].iov_len;
std::memcpy(dst + offset, buf.iov_base, buf.iov_len);
offset += buf.iov_len;
}
}
void scatter_copy(file::iovec_t const* bufs, int num_bufs, char const* src)
void scatter_copy(span<file::iovec_t const> bufs, char const* src)
{
std::size_t offset = 0;
for (int i = 0; i < num_bufs; ++i)
for (auto buf : bufs)
{
std::memcpy(bufs[i].iov_base, src + offset, bufs[i].iov_len);
offset += bufs[i].iov_len;
std::memcpy(buf.iov_base, src + offset, buf.iov_len);
offset += buf.iov_len;
}
}
bool coalesce_read_buffers(file::iovec_t const*& bufs, int& num_bufs
, file::iovec_t* tmp)
bool coalesce_read_buffers(span<file::iovec_t const>& bufs
, file::iovec_t& tmp)
{
int const buf_size = bufs_size(bufs, num_bufs);
int const buf_size = bufs_size(bufs);
char* buf = static_cast<char*>(std::malloc(buf_size));
if (!buf) return false;
tmp->iov_base = buf;
tmp->iov_len = buf_size;
bufs = tmp;
num_bufs = 1;
tmp.iov_base = buf;
tmp.iov_len = buf_size;
bufs = span<file::iovec_t const>(tmp);
return true;
}
void coalesce_read_buffers_end(file::iovec_t const* bufs, int const num_bufs
void coalesce_read_buffers_end(span<file::iovec_t const> bufs
, char* const buf, bool const copy)
{
if (copy) scatter_copy(bufs, num_bufs, buf);
if (copy) scatter_copy(bufs, buf);
std::free(buf);
}
bool coalesce_write_buffers(file::iovec_t const*& bufs, int& num_bufs
, file::iovec_t* tmp)
bool coalesce_write_buffers(span<file::iovec_t const>& bufs
, file::iovec_t& tmp)
{
int const buf_size = bufs_size(bufs, num_bufs);
int const buf_size = bufs_size(bufs);
char* buf = static_cast<char*>(std::malloc(buf_size));
if (!buf) return false;
gather_copy(bufs, num_bufs, buf);
tmp->iov_base = buf;
tmp->iov_len = buf_size;
bufs = tmp;
num_bufs = 1;
gather_copy(bufs, buf);
tmp.iov_base = buf;
tmp.iov_len = buf_size;
bufs = span<file::iovec_t const>(tmp);
return true;
}
#endif // TORRENT_USE_PREADV
template <class Fun>
std::int64_t iov(Fun f, handle_type fd, std::int64_t file_offset, file::iovec_t const* bufs_in
, int num_bufs_in, error_code& ec)
std::int64_t iov(Fun f, handle_type fd, std::int64_t file_offset
, span<file::iovec_t const> bufs, error_code& ec)
{
file::iovec_t const* bufs = bufs_in;
int num_bufs = num_bufs_in;
#if TORRENT_USE_PREADV
int ret = 0;
while (num_bufs > 0)
while (!bufs.empty())
{
#ifdef IOV_MAX
int const nbufs = (std::min)(num_bufs, IOV_MAX);
auto const nbufs = bufs.first((std::min)(int(bufs.size()), IOV_MAX));
#else
int const nbufs = num_bufs;
auto const nbufs = bufs;
#endif
int tmp_ret = 0;
tmp_ret = f(fd, bufs, nbufs, file_offset);
tmp_ret = f(fd, nbufs.data(), int(nbufs.size()), file_offset);
if (tmp_ret < 0)
{
#ifdef TORRENT_WINDOWS
@ -1767,20 +1762,19 @@ typedef struct _FILE_ALLOCATED_RANGE_BUFFER {
// just need to issue the read/write operation again. In either case,
// punt that to the upper layer, as reissuing the operations is
// complicated here
const int expected_len = bufs_size(bufs, nbufs);
const int expected_len = bufs_size(nbufs);
if (tmp_ret < expected_len) break;
num_bufs -= nbufs;
bufs += nbufs;
bufs = bufs.subspan(nbufs.size());
}
return ret;
#elif TORRENT_USE_PREAD
int ret = 0;
for (file::iovec_t const* i = bufs, *end(bufs + num_bufs); i < end; ++i)
for (auto i : bufs)
{
int tmp_ret = f(fd, i->iov_base, i->iov_len, file_offset);
int tmp_ret = f(fd, i.iov_base, i.iov_len, file_offset);
if (tmp_ret < 0)
{
#ifdef TORRENT_WINDOWS
@ -1792,7 +1786,7 @@ typedef struct _FILE_ALLOCATED_RANGE_BUFFER {
}
file_offset += tmp_ret;
ret += tmp_ret;
if (tmp_ret < int(i->iov_len)) break;
if (tmp_ret < int(i.iov_len)) break;
}
return ret;
@ -1815,9 +1809,9 @@ typedef struct _FILE_ALLOCATED_RANGE_BUFFER {
}
#endif
for (file::iovec_t const* i = bufs, *end(bufs + num_bufs); i < end; ++i)
for (auto i : bufs)
{
int tmp_ret = f(fd, i->iov_base, i->iov_len);
int tmp_ret = f(fd, i.iov_base, i.iov_len);
if (tmp_ret < 0)
{
#ifdef TORRENT_WINDOWS
@ -1829,7 +1823,7 @@ typedef struct _FILE_ALLOCATED_RANGE_BUFFER {
}
file_offset += tmp_ret;
ret += tmp_ret;
if (tmp_ret < int(i->iov_len)) break;
if (tmp_ret < int(i.iov_len)) break;
}
return ret;
@ -1841,7 +1835,7 @@ typedef struct _FILE_ALLOCATED_RANGE_BUFFER {
// this has to be thread safe and atomic. i.e. on posix systems it has to be
// turned into a series of pread() calls
std::int64_t file::readv(std::int64_t file_offset, iovec_t const* bufs, int num_bufs
std::int64_t file::readv(std::int64_t file_offset, span<iovec_t const> bufs
, error_code& ec, int flags)
{
if (m_file_handle == INVALID_HANDLE_VALUE)
@ -1854,40 +1848,38 @@ typedef struct _FILE_ALLOCATED_RANGE_BUFFER {
return -1;
}
TORRENT_ASSERT((m_open_mode & rw_mask) == read_only || (m_open_mode & rw_mask) == read_write);
TORRENT_ASSERT(bufs);
TORRENT_ASSERT(num_bufs > 0);
TORRENT_ASSERT(!bufs.empty());
TORRENT_ASSERT(is_open());
#if TORRENT_USE_PREADV
TORRENT_UNUSED(flags);
int ret = iov(&::preadv, native_handle(), file_offset, bufs, num_bufs, ec);
int ret = iov(&::preadv, native_handle(), file_offset, bufs, ec);
#else
// there's no point in coalescing single buffer writes
if (num_bufs == 1)
if (bufs.size() == 1)
{
flags &= ~file::coalesce_buffers;
}
file::iovec_t tmp;
file::iovec_t const* const orig_bufs = bufs;
int const orig_num_bufs = num_bufs;
iovec_t tmp;
span<iovec_t const> tmp_bufs = bufs;
if ((flags & file::coalesce_buffers))
{
if (!coalesce_read_buffers(bufs, num_bufs, &tmp))
if (!coalesce_read_buffers(tmp_bufs, tmp))
// ok, that failed, don't coalesce this read
flags &= ~file::coalesce_buffers;
}
#if TORRENT_USE_PREAD
int ret = iov(&::pread, native_handle(), file_offset, bufs, num_bufs, ec);
int ret = iov(&::pread, native_handle(), file_offset, tmp_bufs, ec);
#else
int ret = iov(&::read, native_handle(), file_offset, bufs, num_bufs, ec);
int ret = iov(&::read, native_handle(), file_offset, tmp_bufs, ec);
#endif
if ((flags & file::coalesce_buffers))
coalesce_read_buffers_end(orig_bufs, orig_num_bufs
coalesce_read_buffers_end(bufs
, static_cast<char*>(tmp.iov_base), !ec);
#endif
@ -1897,7 +1889,7 @@ typedef struct _FILE_ALLOCATED_RANGE_BUFFER {
// This has to be thread safe, i.e. atomic.
// that means, on posix this has to be turned into a series of
// pwrite() calls
std::int64_t file::writev(std::int64_t file_offset, iovec_t const* bufs, int num_bufs
std::int64_t file::writev(std::int64_t file_offset, span<iovec_t const> bufs
, error_code& ec, int flags)
{
if (m_file_handle == INVALID_HANDLE_VALUE)
@ -1910,8 +1902,7 @@ typedef struct _FILE_ALLOCATED_RANGE_BUFFER {
return -1;
}
TORRENT_ASSERT((m_open_mode & rw_mask) == write_only || (m_open_mode & rw_mask) == read_write);
TORRENT_ASSERT(bufs);
TORRENT_ASSERT(num_bufs > 0);
TORRENT_ASSERT(!bufs.empty());
TORRENT_ASSERT(is_open());
ec.clear();
@ -1919,27 +1910,27 @@ typedef struct _FILE_ALLOCATED_RANGE_BUFFER {
#if TORRENT_USE_PREADV
TORRENT_UNUSED(flags);
int ret = iov(&::pwritev, native_handle(), file_offset, bufs, num_bufs, ec);
int ret = iov(&::pwritev, native_handle(), file_offset, bufs, ec);
#else
// there's no point in coalescing single buffer writes
if (num_bufs == 1)
if (bufs.size() == 1)
{
flags &= ~file::coalesce_buffers;
}
file::iovec_t tmp;
iovec_t tmp;
if (flags & file::coalesce_buffers)
{
if (!coalesce_write_buffers(bufs, num_bufs, &tmp))
if (!coalesce_write_buffers(bufs, tmp))
// ok, that failed, don't coalesce writes
flags &= ~file::coalesce_buffers;
}
#if TORRENT_USE_PREAD
int ret = iov(&::pwrite, native_handle(), file_offset, bufs, num_bufs, ec);
int ret = iov(&::pwrite, native_handle(), file_offset, bufs, ec);
#else
int ret = iov(&::write, native_handle(), file_offset, bufs, num_bufs, ec);
int ret = iov(&::write, native_handle(), file_offset, bufs, ec);
#endif
if (flags & file::coalesce_buffers)

View File

@ -95,7 +95,7 @@ namespace libtorrent
// parse header
std::unique_ptr<std::uint32_t[]> header(new std::uint32_t[m_header_size]);
file::iovec_t b = {header.get(), size_t(m_header_size) };
int n = m_file.readv(0, &b, 1, ec);
int n = m_file.readv(0, b, ec);
if (ec) return;
// we don't have a full header. consider the file empty
@ -170,7 +170,7 @@ namespace libtorrent
return slot;
}
int part_file::writev(file::iovec_t const* bufs, int num_bufs, int piece, int offset, error_code& ec)
int part_file::writev(span<file::iovec_t const> bufs, int piece, int offset, error_code& ec)
{
TORRENT_ASSERT(offset >= 0);
std::unique_lock<std::mutex> l(m_mutex);
@ -188,10 +188,10 @@ namespace libtorrent
l.unlock();
std::int64_t slot_offset = std::int64_t(m_header_size) + std::int64_t(slot) * m_piece_size;
return m_file.writev(slot_offset + offset, bufs, num_bufs, ec);
return m_file.writev(slot_offset + offset, bufs, ec);
}
int part_file::readv(file::iovec_t const* bufs, int num_bufs
int part_file::readv(span<file::iovec_t const> bufs
, int piece, int offset, error_code& ec)
{
TORRENT_ASSERT(offset >= 0);
@ -213,7 +213,7 @@ namespace libtorrent
l.unlock();
std::int64_t slot_offset = std::int64_t(m_header_size) + std::int64_t(slot) * m_piece_size;
return m_file.readv(slot_offset + offset, bufs, num_bufs, ec);
return m_file.readv(slot_offset + offset, bufs, ec);
}
void part_file::open_file(int mode, error_code& ec)
@ -326,11 +326,11 @@ namespace libtorrent
l.unlock();
file::iovec_t v = { buf.get(), size_t(block_to_copy) };
v.iov_len = m_file.readv(slot_offset + piece_offset, &v, 1, ec);
v.iov_len = m_file.readv(slot_offset + piece_offset, v, ec);
TORRENT_ASSERT(!ec);
if (ec || v.iov_len == 0) return;
std::int64_t ret = f.writev(file_offset, &v, 1, ec);
std::int64_t ret = f.writev(file_offset, v, ec);
TORRENT_ASSERT(ec || ret == v.iov_len);
if (ec || ret != v.iov_len) return;
@ -412,7 +412,7 @@ namespace libtorrent
std::memset(ptr, 0, m_header_size - (ptr - reinterpret_cast<char*>(header.get())));
file::iovec_t b = {header.get(), size_t(m_header_size) };
m_file.writev(0, &b, 1, ec);
m_file.writev(0, b, ec);
if (ec) return;
}
}

View File

@ -115,22 +115,18 @@ POSSIBILITY OF SUCH DAMAGE.
namespace libtorrent
{
int copy_bufs(file::iovec_t const* bufs, int bytes, file::iovec_t* target)
int copy_bufs(span<file::iovec_t const> bufs, int bytes, span<file::iovec_t> target)
{
int size = 0;
int ret = 1;
for (;;)
for (int i = 0;; i++)
{
*target = *bufs;
size += int(bufs->iov_len);
target[i] = bufs[i];
size += int(bufs[i].iov_len);
if (size >= bytes)
{
target->iov_len -= size - bytes;
return ret;
target[i].iov_len -= size - bytes;
return i + 1;
}
++bufs;
++target;
++ret;
}
}
@ -151,20 +147,20 @@ namespace libtorrent
}
}
void clear_bufs(file::iovec_t const* bufs, int num_bufs)
void clear_bufs(span<file::iovec_t const> bufs)
{
for (file::iovec_t const* i = bufs, *end(bufs + num_bufs); i < end; ++i)
std::memset(i->iov_base, 0, i->iov_len);
for (auto buf : bufs)
std::memset(buf.iov_base, 0, buf.iov_len);
}
namespace {
int count_bufs(file::iovec_t const* bufs, int bytes)
int count_bufs(span<file::iovec_t const> bufs, int bytes)
{
int size = 0;
int count = 1;
if (bytes == 0) return 0;
for (file::iovec_t const* i = bufs;; ++i, ++count)
for (auto i = bufs.begin();; ++i, ++count)
{
size += int(i->iov_len);
if (size >= bytes) return count;
@ -225,18 +221,15 @@ namespace libtorrent
int file_op(int const file_index
, std::int64_t const file_offset
, int const size
, file::iovec_t const* bufs, storage_error& ec)
, span<file::iovec_t const> bufs, storage_error& ec)
final
{
if (m_storage.files().pad_file_at(file_index))
{
// writing to a pad-file is a no-op
return size;
return bufs_size(bufs);
}
int num_bufs = count_bufs(bufs, size);
if (file_index < int(m_storage.m_file_priority.size())
&& m_storage.m_file_priority[file_index] == 0)
{
@ -245,7 +238,7 @@ namespace libtorrent
error_code e;
peer_request map = m_storage.files().map_file(file_index
, file_offset, 0);
int ret = m_storage.m_part_file->writev(bufs, num_bufs
int ret = m_storage.m_part_file->writev(bufs
, map.piece, map.start, e);
if (e)
@ -279,7 +272,7 @@ namespace libtorrent
error_code e;
int ret = handle->writev(adjusted_offset
, bufs, num_bufs, e, m_flags);
, bufs, e, m_flags);
// set this unconditionally in case the upper layer would like to treat
// short reads as errors
@ -291,7 +284,7 @@ namespace libtorrent
#ifdef TORRENT_DISK_STATS
write_access_log(adjusted_offset + ret , handle->file_id(), op_end | op_write, clock_type::now());
#endif
TORRENT_ASSERT(ret <= bufs_size(bufs, num_bufs));
TORRENT_ASSERT(ret <= bufs_size(bufs));
if (e)
{
@ -316,17 +309,14 @@ namespace libtorrent
int file_op(int const file_index
, std::int64_t const file_offset
, int const size
, file::iovec_t const* bufs, storage_error& ec)
, span<file::iovec_t const> bufs, storage_error& ec)
final
{
int num_bufs = count_bufs(bufs, size);
if (m_storage.files().pad_file_at(file_index))
{
// reading from a pad file yields zeroes
clear_bufs(bufs, num_bufs);
return size;
clear_bufs(bufs);
return bufs_size(bufs);
}
if (file_index < int(m_storage.m_file_priority.size())
@ -337,7 +327,7 @@ namespace libtorrent
error_code e;
peer_request map = m_storage.files().map_file(file_index
, file_offset, 0);
int ret = m_storage.m_part_file->readv(bufs, num_bufs
int ret = m_storage.m_part_file->readv(bufs
, map.piece, map.start, e);
if (e)
@ -367,7 +357,7 @@ namespace libtorrent
error_code e;
int ret = handle->readv(adjusted_offset
, bufs, num_bufs, e, m_flags);
, bufs, e, m_flags);
// set this unconditionally in case the upper layer would like to treat
// short reads as errors
@ -379,7 +369,7 @@ namespace libtorrent
#ifdef TORRENT_DISK_STATS
write_access_log(adjusted_offset + ret , handle->file_id(), op_end | op_read, clock_type::now());
#endif
TORRENT_ASSERT(ret <= bufs_size(bufs, num_bufs));
TORRENT_ASSERT(ret <= bufs_size(bufs));
if (e)
{
@ -1104,31 +1094,30 @@ namespace libtorrent
#ifdef TORRENT_SIMULATE_SLOW_READ
std::this_thread::sleep_for(seconds(1));
#endif
return readwritev(files(), bufs.data(), piece, offset, int(bufs.size()), op, ec);
return readwritev(files(), bufs, piece, offset, op, ec);
}
int default_storage::writev(span<file::iovec_t const> bufs
, int piece, int offset, int flags, storage_error& ec)
{
write_fileop op(*this, flags);
return readwritev(files(), bufs.data(), piece, offset, int(bufs.size()), op, ec);
return readwritev(files(), bufs, piece, offset, op, ec);
}
// much of what needs to be done when reading and writing is buffer
// management and piece to file mapping. Most of that is the same for reading
// and writing. This function is a template, and the fileop decides what to
// do with the file and the buffers.
int readwritev(file_storage const& files, file::iovec_t const* const bufs
, const int piece, const int offset, const int num_bufs, fileop& op
int readwritev(file_storage const& files, span<file::iovec_t const> const bufs
, const int piece, const int offset, fileop& op
, storage_error& ec)
{
TORRENT_ASSERT(bufs != nullptr);
TORRENT_ASSERT(piece >= 0);
TORRENT_ASSERT(piece < files.num_pieces());
TORRENT_ASSERT(offset >= 0);
TORRENT_ASSERT(num_bufs > 0);
TORRENT_ASSERT(bufs.size() > 0);
const int size = bufs_size(bufs, num_bufs);
const int size = bufs_size(bufs);
TORRENT_ASSERT(size > 0);
TORRENT_ASSERT(files.is_loaded());
@ -1148,11 +1137,11 @@ namespace libtorrent
// copy the iovec array so we can use it to keep track of our current
// location by updating the head base pointer and size. (see
// advance_bufs())
TORRENT_ALLOCA(current_buf, file::iovec_t, num_bufs);
copy_bufs(bufs, size, current_buf.data());
TORRENT_ASSERT(count_bufs(current_buf.data(), size) == num_bufs);
TORRENT_ALLOCA(current_buf, file::iovec_t, bufs.size());
copy_bufs(bufs, size, current_buf);
TORRENT_ASSERT(count_bufs(current_buf, size) == bufs.size());
TORRENT_ALLOCA(tmp_buf, file::iovec_t, num_bufs);
TORRENT_ALLOCA(tmp_buf, file::iovec_t, bufs.size());
// the number of bytes left to read in the current file (specified by
// file_index). This is the minimum of (file_size - file_offset) and
@ -1184,10 +1173,10 @@ namespace libtorrent
// make a copy of the iovec array that _just_ covers the next
// file_bytes_left bytes, i.e. just this one operation
copy_bufs(current_buf.data(), file_bytes_left, tmp_buf.data());
int tmp_bufs_used = copy_bufs(current_buf, file_bytes_left, tmp_buf);
int bytes_transferred = op.file_op(file_index, file_offset,
file_bytes_left, tmp_buf.data(), ec);
int bytes_transferred = op.file_op(file_index, file_offset
, tmp_buf.first(tmp_bufs_used), ec);
if (ec) return -1;
// advance our position in the iovec array and the file offset.
@ -1195,7 +1184,7 @@ namespace libtorrent
bytes_left -= bytes_transferred;
file_offset += bytes_transferred;
TORRENT_ASSERT(count_bufs(current_buf.data(), bytes_left) <= num_bufs);
TORRENT_ASSERT(count_bufs(current_buf, bytes_left) <= bufs.size());
// if the file operation returned 0, we've hit end-of-file. We're done
if (bytes_transferred == 0)
@ -1373,12 +1362,12 @@ namespace libtorrent
int readv(span<file::iovec_t const> bufs
, int, int, int, storage_error&) override
{
return bufs_size(bufs.data(), int(bufs.size()));
return bufs_size(bufs);
}
int writev(span<file::iovec_t const> bufs
, int, int, int, storage_error&) override
{
return bufs_size(bufs.data(), int(bufs.size()));
return bufs_size(bufs);
}
bool verify_resume_data(add_torrent_params const&

View File

@ -616,7 +616,7 @@ namespace libtorrent
v.resize(std::size_t(s));
if (s == 0) return 0;
file::iovec_t b = {&v[0], size_t(s) };
std::int64_t read = f.readv(0, &b, 1, ec);
std::int64_t read = f.readv(0, b, ec);
if (read != s) return -3;
if (ec) return -3;
return 0;

View File

@ -277,7 +277,7 @@ void save_file(char const* filename, char const* data, int size)
return;
}
file::iovec_t b = { (void*)data, size_t(size) };
out.writev(0, &b, 1, ec);
out.writev(0, b, ec);
TEST_CHECK(!ec);
if (ec)
{
@ -692,7 +692,7 @@ void create_random_files(std::string const& path, const int file_sizes[], int nu
{
int s = (std::min)(to_write, 300000);
file::iovec_t b = { random_data, size_t(s)};
f.writev(offset, &b, 1, ec);
f.writev(offset, b, ec);
if (ec) std::printf("failed to write file \"%s\": (%d) %s\n"
, full_path.c_str(), ec.value(), ec.message().c_str());
offset += s;

View File

@ -51,12 +51,12 @@ struct test_storage_impl : storage_interface
int readv(span<file::iovec_t const> bufs
, int piece, int offset, int flags, storage_error& ec) override
{
return bufs_size(bufs.data(), int(bufs.size()));
return bufs_size(bufs);
}
int writev(span<file::iovec_t const> bufs
, int piece, int offset, int flags, storage_error& ec) override
{
return bufs_size(bufs.data(), int(bufs.size()));
return bufs_size(bufs);
}
bool has_any_file(storage_error& ec) override { return false; }
@ -112,8 +112,8 @@ static void nop() {}
wj.storage = pm; \
cached_piece_entry* pe = nullptr; \
int ret = 0; \
file::iovec_t iov[1]; \
(void)iov[0]; \
file::iovec_t iov; \
(void)iov; \
(void)ret; \
(void)pe
@ -153,9 +153,9 @@ static void nop() {}
wj.piece = p; \
wj.requester = (void*)1; \
pe = bc.allocate_piece(&wj, cached_piece_entry::read_lru1); \
ret = bc.allocate_iovec(iov, 1); \
ret = bc.allocate_iovec(iov); \
TEST_EQUAL(ret, 0); \
bc.insert_blocks(pe, b, iov, 1, &wj)
bc.insert_blocks(pe, b, iov, &wj)
void test_write()
{
@ -419,8 +419,8 @@ void test_iovec()
{
TEST_SETUP;
ret = bc.allocate_iovec(iov, 1);
bc.free_iovec(iov, 1);
ret = bc.allocate_iovec(iov);
bc.free_iovec(iov);
}
void test_unaligned_read()

View File

@ -55,7 +55,7 @@ int touch_file(std::string const& filename, int size)
if (!f.open(filename, file::write_only, ec)) return -1;
if (ec) return -1;
file::iovec_t b = {&v[0], v.size()};
std::int64_t written = f.writev(0, &b, 1, ec);
std::int64_t written = f.writev(0, b, ec);
if (written != int(v.size())) return -3;
if (ec) return -3;
return 0;
@ -295,14 +295,14 @@ TORRENT_TEST(file)
TEST_EQUAL(ec, error_code());
if (ec) std::printf("%s\n", ec.message().c_str());
file::iovec_t b = {(void*)"test", 4};
TEST_EQUAL(f.writev(0, &b, 1, ec), 4);
TEST_EQUAL(f.writev(0, b, ec), 4);
if (ec)
std::printf("writev failed: [%s] %s\n", ec.category().name(), ec.message().c_str());
TEST_CHECK(!ec);
char test_buf[5] = {0};
b.iov_base = test_buf;
b.iov_len = 4;
TEST_EQUAL(f.readv(0, &b, 1, ec), 4);
TEST_EQUAL(f.readv(0, b, ec), 4);
if (ec)
std::printf("readv failed: [%s] %s\n", ec.category().name(), ec.message().c_str());
TEST_EQUAL(ec, error_code());
@ -325,7 +325,7 @@ TORRENT_TEST(hard_link)
TEST_EQUAL(ec, error_code());
file::iovec_t b = {(void*)"abcdefghijklmnopqrstuvwxyz", 26};
TEST_EQUAL(f.writev(0, &b, 1, ec), 26);
TEST_EQUAL(f.writev(0, b, ec), 26);
if (ec)
std::printf("writev failed: [%s] %s\n", ec.category().name(), ec.message().c_str());
TEST_EQUAL(ec, error_code());
@ -346,7 +346,7 @@ TORRENT_TEST(hard_link)
char test_buf[27] = {0};
b.iov_base = test_buf;
b.iov_len = 27;
TEST_EQUAL(f.readv(0, &b, 1, ec), 26);
TEST_EQUAL(f.readv(0, b, ec), 26);
if (ec)
std::printf("readv failed: [%s] %s\n", ec.category().name(), ec.message().c_str());
TEST_EQUAL(ec, error_code());
@ -372,7 +372,7 @@ TORRENT_TEST(coalesce_buffer)
TEST_EQUAL(ec, error_code());
if (ec) std::printf("%s\n", ec.message().c_str());
file::iovec_t b[2] = {{(void*)"test", 4}, {(void*)"foobar", 6}};
TEST_EQUAL(f.writev(0, b, 2, ec, file::coalesce_buffers), 4 + 6);
TEST_EQUAL(f.writev(0, {b, 2}, ec, file::coalesce_buffers), 4 + 6);
if (ec)
std::printf("writev failed: [%s] %s\n", ec.category().name(), ec.message().c_str());
TEST_CHECK(!ec);
@ -382,7 +382,7 @@ TORRENT_TEST(coalesce_buffer)
b[0].iov_len = 4;
b[1].iov_base = test_buf2;
b[1].iov_len = 6;
TEST_EQUAL(f.readv(0, b, 2, ec), 4 + 6);
TEST_EQUAL(f.readv(0, {b, 2}, ec), 4 + 6);
if (ec)
{
std::printf("readv failed: [%s] %s\n"

View File

@ -150,7 +150,7 @@ void write_test_file()
TEST_CHECK(!ec);
if (ec) std::printf("file error: %s\n", ec.message().c_str());
file::iovec_t b = { data_buffer, 3216};
test_file.writev(0, &b, 1, ec);
test_file.writev(0, b, ec);
TEST_CHECK(!ec);
if (ec) std::printf("file error: %s\n", ec.message().c_str());
test_file.close();

View File

@ -70,7 +70,7 @@ TORRENT_TEST(part_file)
for (int i = 0; i < 1024; ++i) buf[i] = i;
file::iovec_t v = {&buf, 1024};
pf.writev(&v, 1, 10, 0, ec);
pf.writev(v, 10, 0, ec);
if (ec) std::printf("part_file::writev: %s\n", ec.message().c_str());
pf.flush_metadata(ec);
@ -88,7 +88,7 @@ TORRENT_TEST(part_file)
memset(buf, 0, sizeof(buf));
pf.readv(&v, 1, 10, 0, ec);
pf.readv(v, 10, 0, ec);
if (ec) std::printf("part_file::readv: %s\n", ec.message().c_str());
for (int i = 0; i < 1024; ++i)
@ -102,7 +102,7 @@ TORRENT_TEST(part_file)
memset(buf, 0, sizeof(buf));
file::iovec_t v = {&buf, 1024};
pf.readv(&v, 1, 10, 0, ec);
pf.readv(v, 10, 0, ec);
if (ec) std::printf("part_file::readv: %s\n", ec.message().c_str());
for (int i = 0; i < 1024; ++i)
@ -137,7 +137,7 @@ TORRENT_TEST(part_file)
memset(buf, 0, sizeof(buf));
output.readv(0, &v, 1, ec);
output.readv(0, v, ec);
if (ec) std::printf("exported file read: %s\n", ec.message().c_str());
for (int i = 0; i < 1024; ++i)

View File

@ -912,7 +912,7 @@ TORRENT_TEST(iovec_copy_bufs)
alloc_iov(iov1, 10);
fill_pattern(iov1, 10);
TEST_CHECK(bufs_size(iov1, 10) >= 106);
TEST_CHECK(bufs_size({iov1, 10}) >= 106);
// copy exactly 106 bytes from iov1 to iov2
int num_bufs = copy_bufs(iov1, 106, iov2);
@ -941,7 +941,7 @@ TORRENT_TEST(iovec_clear_bufs)
alloc_iov(iov, 10);
fill_pattern(iov, 10);
clear_bufs(iov, 10);
clear_bufs({iov, 10});
for (int i = 0; i < 10; ++i)
{
unsigned char* buf = (unsigned char*)iov[i].iov_base;
@ -963,7 +963,7 @@ TORRENT_TEST(iovec_bufs_size)
int expected_size = 0;
for (int k = 0; k < i; ++k) expected_size += i * (k + 1);
TEST_EQUAL(bufs_size(iov, i), expected_size);
TEST_EQUAL(bufs_size({iov, size_t(i)}), expected_size);
free_iov(iov, i);
}
@ -1018,8 +1018,8 @@ struct test_fileop : fileop
{
explicit test_fileop(int stripe_size) : m_stripe_size(stripe_size) {}
int file_op(int const file_index, std::int64_t const file_offset, int const size
, file::iovec_t const* bufs, storage_error& ec) override
int file_op(int const file_index, std::int64_t const file_offset
, span<file::iovec_t const> bufs, storage_error& ec) override
{
size_t offset = size_t(file_offset);
if (file_index >= int(m_file_data.size()))
@ -1027,7 +1027,7 @@ struct test_fileop : fileop
m_file_data.resize(file_index + 1);
}
const int write_size = (std::min)(m_stripe_size, size);
const int write_size = (std::min)(m_stripe_size, bufs_size(bufs));
std::vector<char>& file = m_file_data[file_index];
@ -1039,9 +1039,9 @@ struct test_fileop : fileop
int left = write_size;
while (left > 0)
{
const int copy_size = (std::min)(left, int(bufs->iov_len));
memcpy(&file[offset], bufs->iov_base, copy_size);
++bufs;
const int copy_size = (std::min)(left, int(bufs.front().iov_len));
memcpy(&file[offset], bufs.front().iov_base, copy_size);
bufs = bufs.subspan(1);
offset += copy_size;
left -= copy_size;
}
@ -1057,15 +1057,15 @@ struct test_read_fileop : fileop
// EOF after size bytes read
explicit test_read_fileop(int size) : m_size(size), m_counter(0) {}
int file_op(int const file_index, std::int64_t const file_offset, int const size
, file::iovec_t const* bufs, storage_error& ec) override
int file_op(int const file_index, std::int64_t const file_offset
, span<file::iovec_t const> bufs, storage_error& ec) override
{
int local_size = (std::min)(m_size, size);
int local_size = (std::min)(m_size, bufs_size(bufs));
const int read = local_size;
while (local_size > 0)
{
unsigned char* p = (unsigned char*)bufs->iov_base;
const int len = (std::min)(int(bufs->iov_len), local_size);
unsigned char* p = (unsigned char*)bufs.front().iov_base;
const int len = (std::min)(int(bufs.front().iov_len), local_size);
for (int i = 0; i < len; ++i)
{
p[i] = m_counter & 0xff;
@ -1073,7 +1073,7 @@ struct test_read_fileop : fileop
}
local_size -= len;
m_size -= len;
++bufs;
bufs = bufs.subspan(1);
}
return read;
}
@ -1088,8 +1088,8 @@ struct test_error_fileop : fileop
explicit test_error_fileop(int error_file)
: m_error_file(error_file) {}
int file_op(int const file_index, std::int64_t const file_offset, int const size
, file::iovec_t const* bufs, storage_error& ec) override
int file_op(int const file_index, std::int64_t const file_offset
, span<file::iovec_t const> bufs, storage_error& ec) override
{
if (m_error_file == file_index)
{
@ -1099,7 +1099,7 @@ struct test_error_fileop : fileop
ec.operation = storage_error::read;
return -1;
}
return size;
return bufs_size(bufs);
}
int m_error_file;
@ -1129,14 +1129,14 @@ TORRENT_TEST(readwritev_stripe_1)
test_fileop fop(1);
storage_error ec;
TEST_CHECK(bufs_size(iov, num_bufs) >= fs.total_size());
TEST_CHECK(bufs_size({iov, size_t(num_bufs)}) >= fs.total_size());
file::iovec_t iov2[num_bufs];
copy_bufs(iov, int(fs.total_size()), iov2);
int num_bufs2 = count_bufs(iov2, int(fs.total_size()));
TEST_CHECK(num_bufs2 <= num_bufs);
int ret = readwritev(fs, iov2, 0, 0, num_bufs2, fop, ec);
int ret = readwritev(fs, {iov2, size_t(num_bufs2)}, 0, 0, fop, ec);
TEST_EQUAL(ret, fs.total_size());
TEST_EQUAL(fop.m_file_data.size(), 4);
@ -1163,7 +1163,7 @@ TORRENT_TEST(readwritev_single_buffer)
file::iovec_t iov = { &buf[0], buf.size() };
fill_pattern(&iov, 1);
int ret = readwritev(fs, &iov, 0, 0, 1, fop, ec);
int ret = readwritev(fs, iov, 0, 0, fop, ec);
TEST_EQUAL(ret, fs.total_size());
TEST_EQUAL(fop.m_file_data.size(), 4);
@ -1188,7 +1188,7 @@ TORRENT_TEST(readwritev_read)
file::iovec_t iov = { &buf[0], buf.size() };
// read everything
int ret = readwritev(fs, &iov, 0, 0, 1, fop, ec);
int ret = readwritev(fs, iov, 0, 0, fop, ec);
TEST_EQUAL(ret, fs.total_size());
TEST_CHECK(check_pattern(buf, 0));
@ -1205,7 +1205,7 @@ TORRENT_TEST(readwritev_read_short)
, static_cast<size_t>(fs.total_size()) };
// read everything
int ret = readwritev(fs, &iov, 0, 0, 1, fop, ec);
int ret = readwritev(fs, iov, 0, 0, fop, ec);
TEST_EQUAL(ec.file, 3);
@ -1225,7 +1225,7 @@ TORRENT_TEST(readwritev_error)
, static_cast<size_t>(fs.total_size()) };
// read everything
int ret = readwritev(fs, &iov, 0, 0, 1, fop, ec);
int ret = readwritev(fs, iov, 0, 0, fop, ec);
TEST_EQUAL(ret, -1);
TEST_EQUAL(ec.file, 2);
@ -1252,7 +1252,7 @@ TORRENT_TEST(readwritev_zero_size_files)
, static_cast<size_t>(fs.total_size()) };
// read everything
int ret = readwritev(fs, &iov, 0, 0, 1, fop, ec);
int ret = readwritev(fs, iov, 0, 0, fop, ec);
TEST_EQUAL(ret, fs.total_size());
TEST_CHECK(check_pattern(buf, 0));

View File

@ -59,7 +59,7 @@ TORRENT_TEST(web_seed_redirect)
return;
}
file::iovec_t b = { random_data, size_t(16000)};
f.writev(0, &b, 1, ec);
f.writev(0, b, ec);
fs.add_file("test_file", 16000);
int port = start_web_server();