compact block_cache_reference by using storage_index_t instead of pointer. make sure storage object is kept alive by block_cache_references

This commit is contained in:
arvidn 2017-01-02 02:00:20 -05:00 committed by Arvid Norberg
parent a087a283de
commit 41dbb47123
12 changed files with 62 additions and 59 deletions

View File

@ -34,15 +34,19 @@ POSSIBILITY OF SUCH DAMAGE.
#define TORRENT_BLOCK_CACHE_REFERENCE_HPP
#include "libtorrent/units.hpp"
#include "libtorrent/storage_defs.hpp"
namespace libtorrent {
namespace aux {
struct block_cache_reference
{
void* storage;
piece_index_t piece;
int block;
storage_index_t storage;
std::uint32_t cookie;
// if the cookie is set to this value, it doesn't refer to anything in the
// cache (and the buffer is mutable)
static std::uint32_t const none = 0xffffffffu;
};
}

View File

@ -344,7 +344,7 @@ namespace libtorrent
int pad_job(disk_io_job const* j, int blocks_in_piece
, int read_ahead) const;
void reclaim_block(aux::block_cache_reference const& ref);
void reclaim_block(storage_interface* st, aux::block_cache_reference const& ref);
// returns a range of all pieces. This might be a very
// long list, use carefully
@ -406,7 +406,6 @@ namespace libtorrent
// looks for this piece in the cache. If it's there, returns a pointer
// to it, otherwise 0.
cached_piece_entry* find_piece(aux::block_cache_reference const& ref);
cached_piece_entry* find_piece(disk_io_job const* j);
cached_piece_entry* find_piece(storage_interface* st, piece_index_t piece);

View File

@ -111,7 +111,7 @@ namespace libtorrent
#if TORRENT_CPP98_DEQUE
move_construct_holder_fun move_holder;
#endif
std::aligned_storage<32>::type holder;
std::aligned_storage<24>::type holder;
char* buf; // the first byte of the buffer
int size; // the total size of the buffer
int used_size; // this is the number of bytes to send/receive

View File

@ -103,7 +103,7 @@ namespace libtorrent
}
// if this returns true, the buffer may not be modified in place
bool is_mutable() const noexcept { return m_ref.storage == nullptr; }
bool is_mutable() const noexcept { return m_ref.cookie == aux::block_cache_reference::none; }
// implicitly convertible to true if the object is currently holding a
// buffer

View File

@ -199,7 +199,7 @@ namespace libtorrent
// operations until all outstanding jobs have completed.
// at that point, the m_blocked_jobs are issued
// the count is the number of fence job currently in the queue
int m_has_fence;
int m_has_fence = 0;
// when there's a fence up, jobs are queued up in here
// until the fence is lowered
@ -209,7 +209,7 @@ namespace libtorrent
// to this torrent, currently pending, hanging off of
// cached_piece_entry objects. This is used to determine
// when the fence can be lowered
std::atomic<int> m_outstanding_jobs;
std::atomic<int> m_outstanding_jobs{0};
// must be held when accessing m_has_fence and
// m_blocked_jobs
@ -449,6 +449,13 @@ namespace libtorrent
storage_index_t storage_index() const { return m_storage_index; }
void set_storage_index(storage_index_t st) { m_storage_index = st; }
int dec_refcount()
{
TORRENT_ASSERT(m_references > 0);
return m_references--;
}
void inc_refcount() { ++m_references; }
private:
bool m_need_tick = false;
@ -459,10 +466,13 @@ namespace libtorrent
// torrent. This shared_ptr is here only
// to keep the torrent object alive until
// the storage_interface destructs. This is because
// the torrent_info object is owned by the torrent.
// the file_storage object is owned by the torrent.
std::shared_ptr<void> m_torrent;
storage_index_t m_storage_index;
// the number of block_cache_reference objects referencing this storage
std::atomic<int> m_references{1};
};
// The default implementation of storage_interface. Behaves as a normal

View File

@ -1744,10 +1744,12 @@ int block_cache::copy_from_piece(cached_piece_entry* const pe
// make sure it didn't wrap
TORRENT_PIECE_ASSERT(pe->refcount > 0, pe);
j->d.io.ref.storage = j->storage.get();
j->d.io.ref.piece = pe->piece;
j->d.io.ref.block = start_block;
int const blocks_per_piece = (j->storage->files()->piece_length() + block_size() - 1) / block_size();
j->d.io.ref.storage = j->storage->storage_index();
j->d.io.ref.cookie = static_cast<int>(pe->piece) * blocks_per_piece + start_block;
j->buffer.disk_block = bl.buf + (j->d.io.offset & (block_size()-1));
j->storage->inc_refcount();
++m_send_buffer_blocks;
return j->d.io.buffer_size;
}
@ -1784,16 +1786,20 @@ int block_cache::copy_from_piece(cached_piece_entry* const pe
return j->d.io.buffer_size;
}
void block_cache::reclaim_block(aux::block_cache_reference const& ref)
void block_cache::reclaim_block(storage_interface* st, aux::block_cache_reference const& ref)
{
cached_piece_entry* pe = find_piece(ref);
int const blocks_per_piece = (st->files()->piece_length() + block_size() - 1) / block_size();
piece_index_t const piece(ref.cookie / blocks_per_piece);
int const block(ref.cookie % blocks_per_piece);
cached_piece_entry* pe = find_piece(st, piece);
TORRENT_ASSERT(pe);
if (pe == nullptr) return;
TORRENT_PIECE_ASSERT(pe->in_use, pe);
TORRENT_PIECE_ASSERT(pe->blocks[ref.block].buf, pe);
dec_block_refcount(pe, ref.block, block_cache::ref_reading);
TORRENT_PIECE_ASSERT(pe->blocks[block].buf, pe);
dec_block_refcount(pe, block, block_cache::ref_reading);
TORRENT_PIECE_ASSERT(m_send_buffer_blocks > 0, pe);
--m_send_buffer_blocks;
@ -1822,11 +1828,6 @@ bool block_cache::maybe_free_piece(cached_piece_entry* pe)
return true;
}
cached_piece_entry* block_cache::find_piece(aux::block_cache_reference const& ref)
{
return find_piece(static_cast<storage_interface*>(ref.storage), ref.piece);
}
cached_piece_entry* block_cache::find_piece(disk_io_job const* j)
{
return find_piece(j->storage.get(), j->piece);

View File

@ -40,9 +40,8 @@ namespace libtorrent
disk_buffer_holder::disk_buffer_holder(buffer_allocator_interface& alloc, char* buf) noexcept
: m_allocator(&alloc), m_buf(buf)
{
m_ref.storage = nullptr;
m_ref.piece = piece_index_t(-1);
m_ref.block = -1;
m_ref.storage = storage_index_t{0};
m_ref.cookie = aux::block_cache_reference::none;
}
disk_buffer_holder& disk_buffer_holder::operator=(disk_buffer_holder&& h) noexcept
@ -61,44 +60,32 @@ namespace libtorrent
disk_buffer_holder::disk_buffer_holder(buffer_allocator_interface& alloc
, aux::block_cache_reference const& ref, char* buf) noexcept
: m_allocator(&alloc), m_buf(buf), m_ref(ref)
{
TORRENT_ASSERT(m_ref.storage == nullptr || m_ref.piece >= piece_index_t(0));
TORRENT_ASSERT(m_ref.storage == nullptr || m_ref.block >= 0);
TORRENT_ASSERT(m_ref.storage == nullptr
|| m_ref.piece < static_cast<storage_interface*>(m_ref.storage)->files()->end_piece());
TORRENT_ASSERT(m_ref.storage == nullptr
|| m_ref.block <= static_cast<storage_interface*>(m_ref.storage)->files()->piece_length() / 0x4000);
}
{}
void disk_buffer_holder::reset(aux::block_cache_reference const& ref, char* buf)
{
if (m_ref.storage) m_allocator->reclaim_blocks(m_ref);
if (m_ref.cookie != aux::block_cache_reference::none) m_allocator->reclaim_blocks(m_ref);
else if (m_buf) m_allocator->free_disk_buffer(m_buf);
m_buf = buf;
m_ref = ref;
TORRENT_ASSERT(m_ref.piece >= piece_index_t(0));
TORRENT_ASSERT(m_ref.storage != nullptr);
TORRENT_ASSERT(m_ref.block >= 0);
TORRENT_ASSERT(m_ref.piece < static_cast<storage_interface*>(m_ref.storage)->files()->end_piece());
TORRENT_ASSERT(m_ref.block <= static_cast<storage_interface*>(m_ref.storage)->files()->piece_length() / 0x4000);
}
void disk_buffer_holder::reset(char* const buf)
{
if (m_ref.storage) m_allocator->reclaim_blocks(m_ref);
if (m_ref.cookie != aux::block_cache_reference::none) m_allocator->reclaim_blocks(m_ref);
else if (m_buf) m_allocator->free_disk_buffer(m_buf);
m_buf = buf;
m_ref.storage = nullptr;
m_ref.cookie = aux::block_cache_reference::none;
}
char* disk_buffer_holder::release() noexcept
{
char* ret = m_buf;
m_buf = nullptr;
m_ref.storage = nullptr;
m_ref.cookie = aux::block_cache_reference::none;
return ret;
}
disk_buffer_holder::~disk_buffer_holder() { reset(); }
}

View File

@ -105,9 +105,8 @@ namespace libtorrent
buffer.disk_block = nullptr;
d.io.offset = 0;
d.io.buffer_size = 0;
d.io.ref.storage = nullptr;
d.io.ref.piece = piece_index_t{0};
d.io.ref.block = 0;
d.io.ref.storage = storage_index_t{0};
d.io.ref.cookie = aux::block_cache_reference::none;
}
disk_io_job::~disk_io_job()

View File

@ -232,7 +232,8 @@ namespace libtorrent
void disk_io_thread::remove_torrent(storage_index_t const idx)
{
m_torrents[idx].reset();
auto& pos = m_torrents[idx];
if (pos->dec_refcount() == 0) pos.reset();
}
disk_io_thread::~disk_io_thread()
@ -279,8 +280,10 @@ namespace libtorrent
std::unique_lock<std::mutex> l(m_cache_mutex);
for (auto ref : refs)
{
TORRENT_ASSERT(ref.storage);
m_disk_cache.reclaim_block(ref);
auto& pos = m_torrents[ref.storage];
storage_interface* st = pos.get();
m_disk_cache.reclaim_block(st, ref);
if (st->dec_refcount() == 0) pos.reset();
}
}

View File

@ -1477,10 +1477,7 @@ namespace libtorrent
// ====== disk_job_fence implementation ========
disk_job_fence::disk_job_fence()
: m_has_fence(0)
, m_outstanding_jobs(0)
{}
disk_job_fence::disk_job_fence() {}
int disk_job_fence::job_complete(disk_io_job* j, tailqueue<disk_io_job>& jobs)
{

View File

@ -4322,7 +4322,13 @@ namespace libtorrent
alerts().emplace_alert<cache_flushed_alert>(get_handle());
}
m_storage.reset();
// if this torrent still has connections, they may have buffers to return
// to the disk storage, in which case we cannot destroy the disk storage
// yet. wait until the last peer connection is removed
if (m_connections.empty())
{
m_storage.reset();
}
// TODO: 2 abort lookups this torrent has made via the
// session host resolver interface
@ -7127,9 +7133,6 @@ namespace libtorrent
void torrent::disconnect_all(error_code const& ec, operation_t op)
{
// doesn't work with the m_paused -> m_num_peers == 0 condition
// INVARIANT_CHECK;
while (!m_connections.empty())
{
peer_connection* p = *m_connections.begin();

View File

@ -134,9 +134,9 @@ static void nop() {}
ret = bc.try_read(&rj)
#define RETURN_BUFFER \
if (rj.d.io.ref.storage) bc.reclaim_block(rj.d.io.ref); \
if (rj.d.io.ref.cookie != aux::block_cache_reference::none) bc.reclaim_block(pm.get(), rj.d.io.ref); \
else if (rj.buffer.disk_block) bc.free_buffer(rj.buffer.disk_block); \
rj.d.io.ref.storage = 0
rj.d.io.ref.cookie = aux::block_cache_reference::none
#define FLUSH(flushing) \
for (int i = 0; i < int(sizeof(flushing)/sizeof((flushing)[0])); ++i) \