block_size is a constant, no need in passing it around as a variable

This commit is contained in:
arvidn 2018-01-03 12:54:03 +01:00 committed by Arvid Norberg
parent 70448a2c3a
commit 0731200149
16 changed files with 116 additions and 167 deletions

View File

@ -343,8 +343,7 @@ namespace aux {
struct TORRENT_EXTRA_EXPORT block_cache : disk_buffer_pool
{
block_cache(int block_size, io_service& ios
, std::function<void()> const& trigger_trim);
block_cache(io_service& ios, std::function<void()> const& trigger_trim);
private:

View File

@ -55,8 +55,7 @@ namespace libtorrent {
struct TORRENT_EXTRA_EXPORT disk_buffer_pool
{
disk_buffer_pool(int block_size, io_service& ios
, std::function<void()> const& trigger_trim);
disk_buffer_pool(io_service& ios, std::function<void()> const& trigger_trim);
disk_buffer_pool(disk_buffer_pool const&) = delete;
disk_buffer_pool& operator=(disk_buffer_pool const&) = delete;
~disk_buffer_pool();
@ -76,8 +75,6 @@ namespace libtorrent {
int allocate_iovec(span<iovec_t> iov);
void free_iovec(span<iovec_t const> iov);
int block_size() const { return m_block_size; }
int in_use() const
{
std::unique_lock<std::mutex> l(m_pool_mutex);
@ -92,10 +89,6 @@ namespace libtorrent {
void free_buffer_impl(char* buf, std::unique_lock<std::mutex>& l);
char* allocate_buffer_impl(std::unique_lock<std::mutex>& l, char const* category);
// number of bytes per block. The BitTorrent
// protocol defines the block size to 16 KiB.
const int m_block_size;
// number of disk buffers currently allocated
int m_in_use;

View File

@ -67,6 +67,9 @@ namespace libtorrent {
struct file_open_mode_tag;
using file_open_mode_t = flags::bitfield_flag<std::uint8_t, file_open_mode_tag>;
// this is a bittorrent constant
constexpr int default_block_size = 0x4000;
namespace file_open_mode
{
// open the file for reading only

View File

@ -117,7 +117,7 @@ namespace libtorrent {
// for write jobs, returns true if its block
// is not dirty anymore
bool completed(cached_piece_entry const* pe, int block_size);
bool completed(cached_piece_entry const* pe);
// for read and write, this is the disk_buffer_holder
// for other jobs, it may point to other job-specific types

View File

@ -285,9 +285,7 @@ namespace aux {
, disk_interface
, buffer_allocator_interface
{
disk_io_thread(io_service& ios
, counters& cnt
, int block_size = 16 * 1024);
disk_io_thread(io_service& ios, counters& cnt);
~disk_io_thread();
void set_settings(settings_pack const* sett);

View File

@ -226,7 +226,7 @@ namespace libtorrent {
struct TORRENT_EXTRA_EXPORT torrent_hot_members
{
torrent_hot_members(aux::session_interface& ses
, add_torrent_params const& p, int block_size, bool session_paused);
, add_torrent_params const& p, bool session_paused);
protected:
// the piece picker. This is allocated lazily. When we don't
@ -305,12 +305,6 @@ namespace libtorrent {
// the maximum number of connections for this torrent
std::uint32_t m_max_connections:24;
// the size of a request block
// each piece is divided into these
// blocks when requested. The block size is
// 1 << m_block_size_shift
std::uint32_t m_block_size_shift:5;
// the state of this torrent (queued, checking, downloading, etc.)
std::uint32_t m_state:3;
@ -330,7 +324,7 @@ namespace libtorrent {
{
public:
torrent(aux::session_interface& ses, int block_size
torrent(aux::session_interface& ses
, bool session_paused, add_torrent_params const& p);
~torrent() override;
@ -853,7 +847,12 @@ namespace libtorrent {
void peer_lost(typed_bitfield<piece_index_t> const& bits
, peer_connection const* peer);
int block_size() const { TORRENT_ASSERT(m_block_size_shift > 0); return 1 << m_block_size_shift; }
int block_size() const
{
return m_torrent_file
? (std::min)(m_torrent_file->piece_length(), default_block_size)
: default_block_size;
}
peer_request to_req(piece_block const& p) const;
void disconnect_all(error_code const& ec, operation_t op);

View File

@ -340,9 +340,9 @@ cached_piece_entry::~cached_piece_entry()
#endif
}
block_cache::block_cache(int block_size, io_service& ios
block_cache::block_cache(io_service& ios
, std::function<void()> const& trigger_trim)
: disk_buffer_pool(block_size, ios, trigger_trim)
: disk_buffer_pool(ios, trigger_trim)
, m_last_cache_op(cache_miss)
, m_ghost_size(8)
, m_max_volatile_blocks(100)
@ -374,7 +374,7 @@ int block_cache::try_read(disk_io_job* j, buffer_allocator_interface& allocator
#if TORRENT_USE_ASSERTS
p->piece_log.push_back(piece_log_t(j->action, j->d.io.offset / 0x4000));
#endif
cache_hit(p, j->d.io.offset / block_size(), bool(j->flags & disk_interface::volatile_read));
cache_hit(p, j->d.io.offset / default_block_size, bool(j->flags & disk_interface::volatile_read));
ret = copy_from_piece(p, j, allocator, expect_no_fail);
if (ret < 0) return ret;
@ -609,7 +609,7 @@ cached_piece_entry* block_cache::allocate_piece(disk_io_job const* j, std::uint1
if (p == nullptr)
{
int const piece_size = j->storage->files().piece_size(j->piece);
int const blocks_in_piece = (piece_size + block_size() - 1) / block_size();
int const blocks_in_piece = (piece_size + default_block_size - 1) / default_block_size;
cached_piece_entry pe;
pe.piece = j->piece;
@ -706,8 +706,8 @@ cached_piece_entry* block_cache::add_dirty_block(disk_io_job* j)
TORRENT_PIECE_ASSERT(pe->in_use, pe);
int block = j->d.io.offset / block_size();
TORRENT_ASSERT((j->d.io.offset % block_size()) == 0);
int block = j->d.io.offset / default_block_size;
TORRENT_ASSERT((j->d.io.offset % default_block_size) == 0);
// we should never add a new dirty block on a piece
// that has checked the hash. Before we add it, the
@ -1108,7 +1108,7 @@ int block_cache::try_evict_blocks(int num, cached_piece_entry* ignore)
// the first pass, only evict blocks that have been
// hashed
if (pass == 0 && pe->hash)
end = pe->hash->offset / block_size();
end = pe->hash->offset / default_block_size;
// go through the blocks and evict the ones
// that are not dirty and not referenced
@ -1244,9 +1244,9 @@ void block_cache::move_to_ghost(cached_piece_entry* pe)
int block_cache::pad_job(disk_io_job const* j, int blocks_in_piece
, int read_ahead) const
{
int block_offset = j->d.io.offset & (block_size() - 1);
int start = j->d.io.offset / block_size();
int end = block_offset > 0 && (read_ahead > block_size() - block_offset) ? start + 2 : start + 1;
int block_offset = j->d.io.offset & (default_block_size - 1);
int start = j->d.io.offset / default_block_size;
int end = block_offset > 0 && (read_ahead > default_block_size - block_offset) ? start + 2 : start + 1;
// take the read-ahead into account
// make sure to not overflow in this case
@ -1267,15 +1267,15 @@ void block_cache::insert_blocks(cached_piece_entry* pe, int block, span<iovec_t
TORRENT_ASSERT(pe->in_use);
TORRENT_PIECE_ASSERT(iov.size() > 0, pe);
cache_hit(pe, j->d.io.offset / block_size(), bool(j->flags & disk_interface::volatile_read));
cache_hit(pe, j->d.io.offset / default_block_size, bool(j->flags & disk_interface::volatile_read));
TORRENT_ASSERT(pe->in_use);
for (auto const& buf : iov)
{
// each iovec buffer has to be the size of a block (or the size of the last block)
TORRENT_PIECE_ASSERT(int(buf.size()) == std::min(block_size()
, pe->storage->files().piece_size(pe->piece) - block * block_size()), pe);
TORRENT_PIECE_ASSERT(int(buf.size()) == std::min(default_block_size
, pe->storage->files().piece_size(pe->piece) - block * default_block_size), pe);
// no nullptrs allowed
TORRENT_ASSERT(buf.data() != nullptr);
@ -1417,7 +1417,7 @@ void block_cache::abort_dirty(cached_piece_entry* pe)
int block_cache::drain_piece_bufs(cached_piece_entry& p, std::vector<char*>& buf)
{
int const piece_size = p.storage->files().piece_size(p.piece);
int const blocks_in_piece = (piece_size + block_size() - 1) / block_size();
int const blocks_in_piece = (piece_size + default_block_size - 1) / default_block_size;
int ret = 0;
TORRENT_PIECE_ASSERT(p.in_use, &p);
@ -1628,17 +1628,17 @@ int block_cache::copy_from_piece(cached_piece_entry* const pe
TORRENT_PIECE_ASSERT(pe->in_use, pe);
// copy from the cache and update the last use timestamp
int block = j->d.io.offset / block_size();
int block_offset = j->d.io.offset & (block_size() - 1);
int block = j->d.io.offset / default_block_size;
int block_offset = j->d.io.offset & (default_block_size - 1);
int buffer_offset = 0;
int size = j->d.io.buffer_size;
int const blocks_to_read = block_offset > 0 && (size > block_size() - block_offset) ? 2 : 1;
TORRENT_PIECE_ASSERT(size <= block_size(), pe);
int const blocks_to_read = block_offset > 0 && (size > default_block_size - block_offset) ? 2 : 1;
TORRENT_PIECE_ASSERT(size <= default_block_size, pe);
int const start_block = block;
#if TORRENT_USE_ASSERTS
int const piece_size = j->storage->files().piece_size(j->piece);
int const blocks_in_piece = (piece_size + block_size() - 1) / block_size();
int const blocks_in_piece = (piece_size + default_block_size - 1) / default_block_size;
TORRENT_PIECE_ASSERT(start_block < blocks_in_piece, pe);
#endif
@ -1667,7 +1667,7 @@ int block_cache::copy_from_piece(cached_piece_entry* const pe
// make sure it didn't wrap
TORRENT_PIECE_ASSERT(pe->refcount > 0, pe);
int const blocks_per_piece = (j->storage->files().piece_length() + block_size() - 1) / block_size();
int const blocks_per_piece = (j->storage->files().piece_length() + default_block_size - 1) / default_block_size;
TORRENT_ASSERT(block_offset < 0x4000);
j->argument = disk_buffer_holder(allocator
, aux::block_cache_reference{ j->storage->storage_index()
@ -1695,7 +1695,7 @@ int block_cache::copy_from_piece(cached_piece_entry* const pe
while (size > 0)
{
TORRENT_PIECE_ASSERT(pe->blocks[block].buf, pe);
int to_copy = std::min(block_size() - block_offset, size);
int to_copy = std::min(default_block_size - block_offset, size);
std::memcpy(boost::get<disk_buffer_holder>(j->argument).get()
+ buffer_offset
, pe->blocks[block].buf + block_offset
@ -1718,7 +1718,7 @@ int block_cache::copy_from_piece(cached_piece_entry* const pe
void block_cache::reclaim_block(storage_interface* st, aux::block_cache_reference const& ref)
{
TORRENT_ASSERT(st != nullptr);
int const blocks_per_piece = (st->files().piece_length() + block_size() - 1) / block_size();
int const blocks_per_piece = (st->files().piece_length() + default_block_size - 1) / default_block_size;
piece_index_t const piece(ref.cookie / blocks_per_piece);
int const block(ref.cookie % blocks_per_piece);

View File

@ -38,6 +38,7 @@ POSSIBILITY OF SUCH DAMAGE.
#include "libtorrent/io_service.hpp"
#include "libtorrent/disk_observer.hpp"
#include "libtorrent/platform_util.hpp" // for total_physical_ram
#include "libtorrent/disk_interface.hpp" // for default_block_size
#include "libtorrent/aux_/disable_warnings_push.hpp"
@ -67,10 +68,9 @@ namespace libtorrent {
} // anonymous namespace
disk_buffer_pool::disk_buffer_pool(int block_size, io_service& ios
disk_buffer_pool::disk_buffer_pool(io_service& ios
, std::function<void()> const& trigger_trim)
: m_block_size(block_size)
, m_in_use(0)
: m_in_use(0)
, m_max_use(64)
, m_low_watermark((std::max)(m_max_use - 32, 0))
, m_trigger_cache_trim(trigger_trim)
@ -185,7 +185,7 @@ namespace libtorrent {
std::unique_lock<std::mutex> l(m_pool_mutex);
for (auto& i : iov)
{
i = { allocate_buffer_impl(l, "pending read"), std::size_t(block_size())};
i = { allocate_buffer_impl(l, "pending read"), std::size_t(default_block_size)};
if (i.data() == nullptr)
{
// uh oh. We failed to allocate the buffer!
@ -227,7 +227,7 @@ namespace libtorrent {
TORRENT_ASSERT(l.owns_lock());
TORRENT_UNUSED(l);
char* ret = page_aligned_allocator::malloc(m_block_size);
char* ret = page_aligned_allocator::malloc(default_block_size);
if (ret == nullptr)
{
@ -321,7 +321,7 @@ namespace libtorrent {
phys_ram = 1 * gb;
}
result += phys_ram / 10;
m_max_use = int(result / m_block_size);
m_max_use = int(result / default_block_size);
}
#ifdef _MSC_VER
@ -337,7 +337,7 @@ namespace libtorrent {
// when more actual ram is available, because we're still
// constrained by the 32 bit virtual address space.
m_max_use = std::min(2 * 1024 * 1024 * 3 / 4 * 1024
/ m_block_size, m_max_use);
/ default_block_size, m_max_use);
}
}
else

View File

@ -117,14 +117,14 @@ namespace libtorrent {
boost::apply_visitor(caller_visitor(*this), callback);
}
bool disk_io_job::completed(cached_piece_entry const* pe, int block_size)
bool disk_io_job::completed(cached_piece_entry const* pe)
{
if (action != job_action_t::write) return false;
int block_offset = d.io.offset & (block_size - 1);
int block_offset = d.io.offset & (default_block_size - 1);
int size = d.io.buffer_size;
int start = d.io.offset / block_size;
int end = block_offset > 0 && (size > block_size - block_offset) ? start + 2 : start + 1;
int start = d.io.offset / default_block_size;
int end = block_offset > 0 && (size > default_block_size - block_offset) ? start + 2 : start + 1;
for (int i = start; i < end; ++i)
{

View File

@ -195,14 +195,12 @@ constexpr disk_job_flags_t disk_interface::cache_hit;
// ------- disk_io_thread ------
disk_io_thread::disk_io_thread(io_service& ios
, counters& cnt
, int const block_size)
disk_io_thread::disk_io_thread(io_service& ios, counters& cnt)
: m_generic_io_jobs(*this)
, m_generic_threads(m_generic_io_jobs, ios)
, m_hash_io_jobs(*this)
, m_hash_threads(m_hash_io_jobs, ios)
, m_disk_cache(block_size, ios, std::bind(&disk_io_thread::trigger_cache_trim, this))
, m_disk_cache(ios, std::bind(&disk_io_thread::trigger_cache_trim, this))
, m_stats_counters(cnt)
, m_ios(ios)
{
@ -353,8 +351,7 @@ constexpr disk_job_flags_t disk_interface::cache_hit;
// end is one past the end
// round offset up to include the last block, which might
// have an odd size
int const block_size = m_disk_cache.block_size();
int end = p->hashing_done ? int(p->blocks_in_piece) : (p->hash->offset + block_size - 1) / block_size;
int end = p->hashing_done ? int(p->blocks_in_piece) : (p->hash->offset + default_block_size - 1) / default_block_size;
// nothing has been hashed yet, don't flush anything
if (end == 0 && !p->need_readback) return 0;
@ -441,7 +438,7 @@ constexpr disk_job_flags_t disk_interface::cache_hit;
DLOG("[%d read-cache] ", static_cast<int>(i));
continue;
}
int hash_cursor = pe->hash ? pe->hash->offset / block_size : 0;
int hash_cursor = pe->hash ? pe->hash->offset / default_block_size : 0;
// if the piece has all blocks, and they're all dirty, and they've
// all been hashed, then this piece is eligible for flushing
@ -621,9 +618,8 @@ constexpr disk_job_flags_t disk_interface::cache_hit;
for (int i = 0; i < start; ++i) DLOG(".");
#endif
int const block_size = m_disk_cache.block_size();
int size_left = piece_size;
for (int i = start; i < end; ++i, size_left -= block_size)
for (int i = start; i < end; ++i, size_left -= default_block_size)
{
TORRENT_PIECE_ASSERT(size_left > 0, pe);
// don't flush blocks that are empty (buf == 0), not dirty
@ -646,7 +642,7 @@ constexpr disk_job_flags_t disk_interface::cache_hit;
TORRENT_UNUSED(locked);
flushing[num_flushing++] = i + block_base_index;
iov[iov_len] = { pe->blocks[i].buf, aux::numeric_cast<std::size_t>(std::min(block_size, size_left)) };
iov[iov_len] = { pe->blocks[i].buf, aux::numeric_cast<std::size_t>(std::min(default_block_size, size_left)) };
++iov_len;
pe->blocks[i].pending = true;
@ -670,7 +666,6 @@ constexpr disk_job_flags_t disk_interface::cache_hit;
m_stats_counters.inc_stats_counter(counters::num_writing_threads, 1);
time_point const start_time = clock_type::now();
int const block_size = m_disk_cache.block_size();
#if DEBUG_DISK_THREAD
DLOG("flush_iovec: piece: %d [ ", int(pe->piece));
@ -695,7 +690,7 @@ constexpr disk_job_flags_t disk_interface::cache_hit;
int const ret = pe->storage->writev(
iov_start.first(i - flushing_start)
, piece_index_t(static_cast<int>(piece) + flushing[flushing_start] / blocks_in_piece)
, (flushing[flushing_start] % blocks_in_piece) * block_size
, (flushing[flushing_start] % blocks_in_piece) * default_block_size
, file_flags, error);
if (ret < 0 || error) failed = true;
iov_start = iov.subspan(i);
@ -752,8 +747,6 @@ constexpr disk_job_flags_t disk_interface::cache_hit;
#endif
m_disk_cache.blocks_flushed(pe, flushing, num_blocks);
int const block_size = m_disk_cache.block_size();
if (error)
{
fail_jobs_impl(error, pe->jobs, completed_jobs);
@ -767,7 +760,7 @@ constexpr disk_job_flags_t disk_interface::cache_hit;
j->next = nullptr;
TORRENT_PIECE_ASSERT((j->flags & disk_io_job::in_progress) || !j->storage, pe);
TORRENT_PIECE_ASSERT(j->piece == pe->piece, pe);
if (j->completed(pe, block_size))
if (j->completed(pe))
{
j->ret = status_t::no_error;
j->error = error;
@ -1273,9 +1266,8 @@ constexpr disk_job_flags_t disk_interface::cache_hit;
status_t disk_io_thread::do_read(disk_io_job* j, jobqueue_t& completed_jobs)
{
int const block_size = m_disk_cache.block_size();
int const piece_size = j->storage->files().piece_size(j->piece);
int const blocks_in_piece = (piece_size + block_size - 1) / block_size;
int const blocks_in_piece = (piece_size + default_block_size - 1) / default_block_size;
int const iov_len = m_disk_cache.pad_job(j, blocks_in_piece
, m_settings.get_int(settings_pack::read_cache_line_size));
@ -1310,13 +1302,13 @@ constexpr disk_job_flags_t disk_interface::cache_hit;
}
// this is the offset that's aligned to block boundaries
std::int64_t const adjusted_offset = j->d.io.offset & ~(block_size - 1);
std::int64_t const adjusted_offset = j->d.io.offset & ~(default_block_size - 1);
// if this is the last piece, adjust the size of the
// last buffer to match up
iov[iov_len - 1] = iov[iov_len - 1].first(aux::numeric_cast<std::size_t>(
std::min(int(piece_size - adjusted_offset)
- (iov_len - 1) * block_size, block_size)));
- (iov_len - 1) * default_block_size, default_block_size)));
TORRENT_ASSERT(iov[iov_len - 1].size() > 0);
// at this point, all the buffers are allocated and iov is initialized
@ -1369,7 +1361,7 @@ constexpr disk_job_flags_t disk_interface::cache_hit;
return status_t::fatal_disk_error;
}
int block = j->d.io.offset / block_size;
int block = j->d.io.offset / default_block_size;
#if TORRENT_USE_ASSERTS
pe->piece_log.push_back(piece_log_t(j->action, block));
#endif
@ -1516,7 +1508,7 @@ constexpr disk_job_flags_t disk_interface::cache_hit;
status_t disk_io_thread::do_write(disk_io_job* j, jobqueue_t& completed_jobs)
{
TORRENT_ASSERT(j->d.io.buffer_size <= m_disk_cache.block_size());
TORRENT_ASSERT(j->d.io.buffer_size <= default_block_size);
std::unique_lock<std::mutex> l(m_cache_mutex);
@ -1578,11 +1570,11 @@ constexpr disk_job_flags_t disk_interface::cache_hit;
, std::function<void(disk_buffer_holder block, disk_job_flags_t const flags
, storage_error const& se)> handler, disk_job_flags_t const flags)
{
TORRENT_ASSERT(r.length <= m_disk_cache.block_size());
TORRENT_ASSERT(r.length <= default_block_size);
TORRENT_ASSERT(r.length <= 16 * 1024);
DLOG("do_read piece: %d block: %d\n", static_cast<int>(r.piece)
, r.start / m_disk_cache.block_size());
, r.start / default_block_size);
disk_io_job* j = allocate_job(job_action_t::read);
j->storage = m_torrents[storage]->shared_from_this();
@ -1690,7 +1682,7 @@ constexpr disk_job_flags_t disk_interface::cache_hit;
, std::function<void(storage_error const&)> handler
, disk_job_flags_t const flags)
{
TORRENT_ASSERT(r.length <= m_disk_cache.block_size());
TORRENT_ASSERT(r.length <= default_block_size);
TORRENT_ASSERT(r.length <= 16 * 1024);
bool exceeded = false;
@ -1728,16 +1720,15 @@ constexpr disk_job_flags_t disk_interface::cache_hit;
for (auto i = range.first; i != range.second; ++i)
{
cached_piece_entry const& p = *i;
int bs = m_disk_cache.block_size();
int piece_size = p.storage->files().piece_size(p.piece);
int blocks_in_piece = (piece_size + bs - 1) / bs;
int const piece_size = p.storage->files().piece_size(p.piece);
int const blocks_in_piece = (piece_size + default_block_size - 1) / default_block_size;
for (int k = 0; k < blocks_in_piece; ++k)
TORRENT_PIECE_ASSERT(p.blocks[k].buf != boost::get<disk_buffer_holder>(j->argument).get(), &p);
}
l2_.unlock();
#endif
TORRENT_ASSERT((r.start % m_disk_cache.block_size()) == 0);
TORRENT_ASSERT((r.start % default_block_size) == 0);
if (j->storage->is_blocked(j))
{
@ -2005,10 +1996,9 @@ constexpr disk_job_flags_t disk_interface::cache_hit;
// are we already done?
if (ph->offset >= piece_size) return;
int const block_size = m_disk_cache.block_size();
int const cursor = ph->offset / block_size;
int const cursor = ph->offset / default_block_size;
int end = cursor;
TORRENT_PIECE_ASSERT(ph->offset % block_size == 0, pe);
TORRENT_PIECE_ASSERT(ph->offset % default_block_size == 0, pe);
for (int i = cursor; i < pe->blocks_in_piece; ++i)
{
@ -2043,7 +2033,7 @@ constexpr disk_job_flags_t disk_interface::cache_hit;
for (int i = cursor; i < end; ++i)
{
cached_block_entry& bl = pe->blocks[i];
int const size = std::min(block_size, piece_size - offset);
int const size = std::min(default_block_size, piece_size - offset);
ph->h.update(bl.buf, size);
offset += size;
}
@ -2113,13 +2103,12 @@ constexpr disk_job_flags_t disk_interface::cache_hit;
TORRENT_ASSERT(m_magic == 0x1337);
int const piece_size = j->storage->files().piece_size(j->piece);
int const block_size = m_disk_cache.block_size();
int const blocks_in_piece = (piece_size + block_size - 1) / block_size;
int const blocks_in_piece = (piece_size + default_block_size - 1) / default_block_size;
open_mode_t const file_flags = file_flags_for_job(j
, m_settings.get_bool(settings_pack::coalesce_reads));
iovec_t iov = { m_disk_cache.allocate_buffer("hashing")
, static_cast<std::size_t>(block_size) };
, static_cast<std::size_t>(default_block_size) };
hasher h;
int ret = 0;
int offset = 0;
@ -2130,7 +2119,7 @@ constexpr disk_job_flags_t disk_interface::cache_hit;
time_point const start_time = clock_type::now();
iov = iov.first(aux::numeric_cast<std::size_t>(std::min(block_size, piece_size - offset)));
iov = iov.first(aux::numeric_cast<std::size_t>(std::min(default_block_size, piece_size - offset)));
ret = j->storage->readv(iov, j->piece
, offset, file_flags, j->error);
if (ret < 0) break;
@ -2146,7 +2135,7 @@ constexpr disk_job_flags_t disk_interface::cache_hit;
m_stats_counters.inc_stats_counter(counters::disk_job_time, read_time);
}
offset += block_size;
offset += default_block_size;
h.update(iov);
}
@ -2172,8 +2161,7 @@ constexpr disk_job_flags_t disk_interface::cache_hit;
#if TORRENT_USE_ASSERTS
pe->piece_log.push_back(piece_log_t(j->action));
#endif
int const block_size = m_disk_cache.block_size();
m_disk_cache.cache_hit(pe, j->d.io.offset / block_size
m_disk_cache.cache_hit(pe, j->d.io.offset / default_block_size
, bool(j->flags & disk_interface::volatile_read));
TORRENT_PIECE_ASSERT(pe->cache_state <= cached_piece_entry::read_lru1 || pe->cache_state == cached_piece_entry::read_lru2, pe);
@ -2244,8 +2232,7 @@ constexpr disk_job_flags_t disk_interface::cache_hit;
}
partial_hash* ph = pe->hash.get();
int const block_size = m_disk_cache.block_size();
int const blocks_in_piece = (piece_size + block_size - 1) / block_size;
int const blocks_in_piece = (piece_size + default_block_size - 1) / default_block_size;
// keep track of which blocks we have locked by incrementing
// their refcounts. This is used to decrement only these blocks
@ -2256,8 +2243,8 @@ constexpr disk_job_flags_t disk_interface::cache_hit;
// increment the refcounts of all
// blocks up front, and then hash them without holding the lock
TORRENT_PIECE_ASSERT(ph->offset % block_size == 0, pe);
for (int i = ph->offset / block_size; i < blocks_in_piece; ++i)
TORRENT_PIECE_ASSERT(ph->offset % default_block_size == 0, pe);
for (int i = ph->offset / default_block_size; i < blocks_in_piece; ++i)
{
// is the block not in the cache?
if (pe->blocks[i].buf == nullptr) continue;
@ -2282,22 +2269,22 @@ constexpr disk_job_flags_t disk_interface::cache_hit;
status_t ret = status_t::no_error;
int next_locked_block = 0;
for (int i = offset / block_size; i < blocks_in_piece; ++i)
for (int i = offset / default_block_size; i < blocks_in_piece; ++i)
{
if (next_locked_block < num_locked_blocks
&& locked_blocks[next_locked_block] == i)
{
int const len = std::min(block_size, piece_size - offset);
int const len = std::min(default_block_size, piece_size - offset);
++next_locked_block;
TORRENT_PIECE_ASSERT(pe->blocks[i].buf, pe);
TORRENT_PIECE_ASSERT(offset == i * block_size, pe);
TORRENT_PIECE_ASSERT(offset == i * default_block_size, pe);
offset += len;
ph->h.update({pe->blocks[i].buf, aux::numeric_cast<std::size_t>(len)});
}
else
{
iovec_t const iov = { m_disk_cache.allocate_buffer("hashing")
, aux::numeric_cast<std::size_t>(std::min(block_size, piece_size - offset))};
, aux::numeric_cast<std::size_t>(std::min(default_block_size, piece_size - offset))};
if (iov.data() == nullptr)
{
@ -2323,7 +2310,7 @@ constexpr disk_job_flags_t disk_interface::cache_hit;
time_point const start_time = clock_type::now();
TORRENT_PIECE_ASSERT(offset == i * block_size, pe);
TORRENT_PIECE_ASSERT(offset == i * default_block_size, pe);
int read_ret = j->storage->readv(iov, j->piece
, offset, file_flags, j->error);
@ -2358,7 +2345,7 @@ constexpr disk_job_flags_t disk_interface::cache_hit;
m_stats_counters.inc_stats_counter(counters::disk_job_time, read_time);
}
TORRENT_PIECE_ASSERT(offset == i * block_size, pe);
TORRENT_PIECE_ASSERT(offset == i * default_block_size, pe);
offset += int(iov.size());
ph->h.update(iov);
@ -2536,14 +2523,13 @@ constexpr disk_job_flags_t disk_interface::cache_hit;
namespace {
void get_cache_info_impl(cached_piece_info& info, cached_piece_entry const* i
, int block_size)
void get_cache_info_impl(cached_piece_info& info, cached_piece_entry const* i)
{
info.piece = i->piece;
info.storage = i->storage.get();
info.last_use = i->expire;
info.need_readback = i->need_readback;
info.next_to_hash = i->hash == nullptr ? -1 : (i->hash->offset + block_size - 1) / block_size;
info.next_to_hash = i->hash == nullptr ? -1 : (i->hash->offset + default_block_size - 1) / default_block_size;
info.kind = i->cache_state == cached_piece_entry::write_lru
? cached_piece_info::write_cache
: i->cache_state == cached_piece_entry::volatile_read_lru
@ -2631,8 +2617,6 @@ constexpr disk_job_flags_t disk_interface::cache_hit;
if (no_pieces == false)
{
int const block_size = m_disk_cache.block_size();
if (!session)
{
std::shared_ptr<storage_interface> storage = m_torrents[st];
@ -2647,7 +2631,7 @@ constexpr disk_job_flags_t disk_interface::cache_hit;
|| pe.cache_state == cached_piece_entry::read_lru1_ghost)
continue;
ret->pieces.emplace_back();
get_cache_info_impl(ret->pieces.back(), &pe, block_size);
get_cache_info_impl(ret->pieces.back(), &pe);
}
}
else
@ -2661,7 +2645,7 @@ constexpr disk_job_flags_t disk_interface::cache_hit;
|| i->cache_state == cached_piece_entry::read_lru1_ghost)
continue;
ret->pieces.emplace_back();
get_cache_info_impl(ret->pieces.back(), &*i, block_size);
get_cache_info_impl(ret->pieces.back(), &*i);
}
}
}

View File

@ -149,14 +149,14 @@ namespace libtorrent {
request.reserve(400);
int size = r.length;
const int block_size = t->block_size();
const int bs = t->block_size();
const int piece_size = t->torrent_file().piece_length();
peer_request pr;
while (size > 0)
{
int request_offset = r.start + r.length - size;
pr.start = request_offset % piece_size;
pr.length = std::min(block_size, size);
pr.length = std::min(bs, size);
pr.piece = piece_index_t(static_cast<int>(r.piece) + request_offset / piece_size);
m_requests.push_back(pr);
size -= pr.length;

View File

@ -3935,15 +3935,15 @@ namespace libtorrent {
}
int block_offset = block.block.block_index * t->block_size();
int block_size = std::min(t->torrent_file().piece_size(
int bs = std::min(t->torrent_file().piece_size(
block.block.piece_index) - block_offset, t->block_size());
TORRENT_ASSERT(block_size > 0);
TORRENT_ASSERT(block_size <= t->block_size());
TORRENT_ASSERT(bs > 0);
TORRENT_ASSERT(bs <= t->block_size());
peer_request r;
r.piece = block.block.piece_index;
r.start = block_offset;
r.length = block_size;
r.length = bs;
if (m_download_queue.empty())
m_counters.inc_stats_counter(counters::num_peers_down_requests);
@ -3951,7 +3951,7 @@ namespace libtorrent {
TORRENT_ASSERT(verify_piece(t->to_req(block.block)));
block.send_buffer_offset = aux::numeric_cast<std::uint32_t>(m_send_buffer.size());
m_download_queue.push_back(block);
m_outstanding_bytes += block_size;
m_outstanding_bytes += bs;
#if TORRENT_USE_INVARIANT_CHECKS
check_invariant();
#endif
@ -3989,13 +3989,13 @@ namespace libtorrent {
#endif
block_offset = block.block.block_index * t->block_size();
block_size = std::min(t->torrent_file().piece_size(
bs = std::min(t->torrent_file().piece_size(
block.block.piece_index) - block_offset, t->block_size());
TORRENT_ASSERT(block_size > 0);
TORRENT_ASSERT(block_size <= t->block_size());
TORRENT_ASSERT(bs > 0);
TORRENT_ASSERT(bs <= t->block_size());
r.length += block_size;
m_outstanding_bytes += block_size;
r.length += bs;
m_outstanding_bytes += bs;
#if TORRENT_USE_INVARIANT_CHECKS
check_invariant();
#endif
@ -4679,11 +4679,11 @@ namespace libtorrent {
// the block size doesn't have to be 16. So we first query the
// torrent for it
std::shared_ptr<torrent> t = m_torrent.lock();
int const block_size = t->block_size();
int const bs = t->block_size();
TORRENT_ASSERT(block_size > 0);
TORRENT_ASSERT(bs > 0);
m_desired_queue_size = std::uint16_t(queue_time * download_rate / block_size);
m_desired_queue_size = std::uint16_t(queue_time * download_rate / bs);
}
if (m_desired_queue_size > m_max_out_request_queue)
@ -6338,9 +6338,9 @@ namespace libtorrent {
// download queue already
int outstanding_bytes = 0;
// bool in_download_queue = false;
int const block_size = t->block_size();
int const bs = t->block_size();
piece_block last_block(ti.last_piece()
, (ti.piece_size(ti.last_piece()) + block_size - 1) / block_size);
, (ti.piece_size(ti.last_piece()) + bs - 1) / bs);
for (std::vector<pending_block>::const_iterator i = m_download_queue.begin()
, end(m_download_queue.end()); i != end; ++i)
{

View File

@ -4975,9 +4975,7 @@ namespace {
l.reserve(num_torrents + 1);
}
torrent_ptr = std::make_shared<torrent>(*this
, 16 * 1024, m_paused
, params);
torrent_ptr = std::make_shared<torrent>(*this, m_paused, params);
torrent_ptr->set_queue_position(m_download_queue.end_index());
return std::make_pair(torrent_ptr, true);

View File

@ -109,25 +109,6 @@ using namespace std::placeholders;
namespace libtorrent {
namespace {
std::uint32_t root2(int x)
{
std::uint32_t ret = 0;
x >>= 1;
while (x > 0)
{
// if this assert triggers, the block size
// is not an even 2 exponent!
TORRENT_ASSERT(x == 1 || (x & 1) == 0);
++ret;
x >>= 1;
}
return ret;
}
} // anonymous namespace
web_seed_t::web_seed_t(web_seed_entry const& wse)
: web_seed_entry(wse)
{
@ -143,8 +124,7 @@ namespace libtorrent {
}
torrent_hot_members::torrent_hot_members(aux::session_interface& ses
, add_torrent_params const& p, int const block_size
, bool const session_paused)
, add_torrent_params const& p, bool const session_paused)
: m_ses(ses)
, m_complete(0xffffff)
, m_upload_mode(p.flags & torrent_flags::upload_mode)
@ -157,16 +137,14 @@ namespace libtorrent {
, m_graceful_pause_mode(false)
, m_state_subscription(p.flags & torrent_flags::update_subscribe)
, m_max_connections(0xffffff)
, m_block_size_shift(root2(block_size))
, m_state(torrent_status::checking_resume_data)
{}
torrent::torrent(
aux::session_interface& ses
, int const block_size
, bool const session_paused
, add_torrent_params const& p)
: torrent_hot_members(ses, p, block_size, session_paused)
: torrent_hot_members(ses, p, session_paused)
, m_tracker_timer(ses.get_io_service())
, m_inactivity_timer(ses.get_io_service())
, m_trackerid(p.trackerid)
@ -347,7 +325,6 @@ namespace libtorrent {
&& std::find(p.have_pieces.begin(), p.have_pieces.end(), false) == p.have_pieces.end();
m_connections_initialized = true;
m_block_size_shift = root2(std::min(block_size, m_torrent_file->piece_length()));
}
else
{
@ -1700,8 +1677,6 @@ namespace libtorrent {
#endif
}
m_block_size_shift = root2(std::min(block_size(), m_torrent_file->piece_length()));
if (m_torrent_file->num_pieces() > piece_picker::max_pieces)
{
set_error(errors::too_many_pieces_in_torrent, torrent_status::error_file_none);

View File

@ -103,7 +103,7 @@ static void nop() {}
#define TEST_SETUP \
io_service ios; \
block_cache bc(0x4000, ios, std::bind(&nop)); \
block_cache bc(ios, std::bind(&nop)); \
aux::session_settings sett; \
file_storage fs; \
fs.add_file("a/test0", 0x4000); \

View File

@ -233,7 +233,7 @@ void run_storage_tests(std::shared_ptr<torrent_info> info
{ // avoid having two storages use the same files
file_pool fp;
boost::asio::io_service ios;
disk_buffer_pool dp(16 * 1024, ios, std::bind(&nop));
disk_buffer_pool dp(ios, std::bind(&nop));
aux::vector<download_priority_t, file_index_t> priorities;
sha1_hash info_hash;
@ -323,7 +323,7 @@ void test_remove(std::string const& test_path, bool unbuffered)
std::vector<char> buf;
file_pool fp;
io_service ios;
disk_buffer_pool dp(16 * 1024, ios, std::bind(&nop));
disk_buffer_pool dp(ios, std::bind(&nop));
aux::session_settings set;
set.set_int(settings_pack::disk_io_write_mode
@ -397,7 +397,7 @@ void test_rename(std::string const& test_path)
std::vector<char> buf;
file_pool fp;
io_service ios;
disk_buffer_pool dp(16 * 1024, ios, std::bind(&nop));
disk_buffer_pool dp(ios, std::bind(&nop));
aux::session_settings set;
std::shared_ptr<default_storage> s = setup_torrent(fs, fp, buf, test_path
@ -476,7 +476,7 @@ void test_check_files(std::string const& test_path
sett.set_int(settings_pack::aio_threads, 1);
io.set_settings(&sett);
disk_buffer_pool dp(16 * 1024, ios, std::bind(&nop));
disk_buffer_pool dp(ios, std::bind(&nop));
aux::vector<download_priority_t, file_index_t> priorities;
sha1_hash info_hash;
@ -1379,7 +1379,7 @@ TORRENT_TEST(move_storage_to_self)
std::vector<char> buf;
file_pool fp;
io_service ios;
disk_buffer_pool dp(16 * 1024, ios, std::bind(&nop));
disk_buffer_pool dp(ios, std::bind(&nop));
std::shared_ptr<default_storage> s = setup_torrent(fs, fp, buf, save_path, set);
iovec_t const b = {&buf[0], 4};
@ -1408,7 +1408,7 @@ TORRENT_TEST(move_storage_into_self)
std::vector<char> buf;
file_pool fp;
io_service ios;
disk_buffer_pool dp(16 * 1024, ios, std::bind(&nop));
disk_buffer_pool dp(ios, std::bind(&nop));
std::shared_ptr<default_storage> s = setup_torrent(fs, fp, buf, save_path, set);
iovec_t const b = {&buf[0], 4};
@ -1454,7 +1454,7 @@ TORRENT_TEST(dont_move_intermingled_files)
std::vector<char> buf;
file_pool fp;
io_service ios;
disk_buffer_pool dp(16 * 1024, ios, std::bind(&nop));
disk_buffer_pool dp(ios, std::bind(&nop));
std::shared_ptr<default_storage> s = setup_torrent(fs, fp, buf, save_path, set);
iovec_t b = {&buf[0], 4};