forked from premiere/premiere-libtorrent
return a reference instead of pointer to file_storage in storage_interface
This commit is contained in:
parent
e4bdd14ae7
commit
86f67bd9d9
|
@ -304,7 +304,7 @@ namespace libtorrent
|
|||
// off again.
|
||||
virtual bool tick() { return false; }
|
||||
|
||||
file_storage const* files() const { return &m_files; }
|
||||
file_storage const& files() const { return m_files; }
|
||||
|
||||
bool set_need_tick()
|
||||
{
|
||||
|
@ -406,7 +406,7 @@ namespace libtorrent
|
|||
// file_storage, otherwise returns the original file_storage object.
|
||||
file_storage const& files() const
|
||||
{
|
||||
return m_mapped_files ? *m_mapped_files : *storage_interface::files();
|
||||
return m_mapped_files ? *m_mapped_files : storage_interface::files();
|
||||
}
|
||||
|
||||
private:
|
||||
|
|
|
@ -614,7 +614,7 @@ cached_piece_entry* block_cache::allocate_piece(disk_io_job const* j, std::uint1
|
|||
cached_piece_entry* p = find_piece(j);
|
||||
if (p == nullptr)
|
||||
{
|
||||
int const piece_size = j->storage->files()->piece_size(j->piece);
|
||||
int const piece_size = j->storage->files().piece_size(j->piece);
|
||||
int const blocks_in_piece = (piece_size + block_size() - 1) / block_size();
|
||||
|
||||
cached_piece_entry pe;
|
||||
|
@ -1280,7 +1280,7 @@ void block_cache::insert_blocks(cached_piece_entry* pe, int block, span<iovec_t
|
|||
{
|
||||
// each iovec buffer has to be the size of a block (or the size of the last block)
|
||||
TORRENT_PIECE_ASSERT(int(buf.iov_len) == std::min(block_size()
|
||||
, pe->storage->files()->piece_size(pe->piece) - block * block_size()), pe);
|
||||
, pe->storage->files().piece_size(pe->piece) - block * block_size()), pe);
|
||||
|
||||
// no nullptrs allowed
|
||||
TORRENT_ASSERT(buf.iov_base != nullptr);
|
||||
|
@ -1471,7 +1471,7 @@ void block_cache::free_piece(cached_piece_entry* pe)
|
|||
|
||||
int block_cache::drain_piece_bufs(cached_piece_entry& p, std::vector<char*>& buf)
|
||||
{
|
||||
int const piece_size = p.storage->files()->piece_size(p.piece);
|
||||
int const piece_size = p.storage->files().piece_size(p.piece);
|
||||
int const blocks_in_piece = (piece_size + block_size() - 1) / block_size();
|
||||
int ret = 0;
|
||||
|
||||
|
@ -1716,7 +1716,7 @@ int block_cache::copy_from_piece(cached_piece_entry* const pe
|
|||
int const start_block = block;
|
||||
|
||||
#if TORRENT_USE_ASSERTS
|
||||
int const piece_size = j->storage->files()->piece_size(j->piece);
|
||||
int const piece_size = j->storage->files().piece_size(j->piece);
|
||||
int const blocks_in_piece = (piece_size + block_size() - 1) / block_size();
|
||||
TORRENT_PIECE_ASSERT(start_block < blocks_in_piece, pe);
|
||||
#endif
|
||||
|
@ -1745,7 +1745,7 @@ int block_cache::copy_from_piece(cached_piece_entry* const pe
|
|||
|
||||
// make sure it didn't wrap
|
||||
TORRENT_PIECE_ASSERT(pe->refcount > 0, pe);
|
||||
int const blocks_per_piece = (j->storage->files()->piece_length() + block_size() - 1) / block_size();
|
||||
int const blocks_per_piece = (j->storage->files().piece_length() + block_size() - 1) / block_size();
|
||||
j->d.io.ref.storage = j->storage->storage_index();
|
||||
j->d.io.ref.cookie = static_cast<int>(pe->piece) * blocks_per_piece + start_block;
|
||||
j->buffer.disk_block = bl.buf + (j->d.io.offset & (block_size() - 1));
|
||||
|
@ -1789,7 +1789,7 @@ int block_cache::copy_from_piece(cached_piece_entry* const pe
|
|||
|
||||
void block_cache::reclaim_block(storage_interface* st, aux::block_cache_reference const& ref)
|
||||
{
|
||||
int const blocks_per_piece = (st->files()->piece_length() + block_size() - 1) / block_size();
|
||||
int const blocks_per_piece = (st->files().piece_length() + block_size() - 1) / block_size();
|
||||
piece_index_t const piece(ref.cookie / blocks_per_piece);
|
||||
int const block(ref.cookie % blocks_per_piece);
|
||||
|
||||
|
|
|
@ -384,7 +384,7 @@ namespace libtorrent
|
|||
// piece range
|
||||
piece_index_t const range_start((static_cast<int>(p->piece) / cont_pieces) * cont_pieces);
|
||||
piece_index_t const range_end(std::min(static_cast<int>(range_start)
|
||||
+ cont_pieces, p->storage->files()->num_pieces()));
|
||||
+ cont_pieces, p->storage->files().num_pieces()));
|
||||
|
||||
// look through all the pieces in this range to see if
|
||||
// they are ready to be flushed. If so, flush them all,
|
||||
|
@ -582,7 +582,7 @@ namespace libtorrent
|
|||
TORRENT_PIECE_ASSERT(start < end, pe);
|
||||
end = (std::min)(end, int(pe->blocks_in_piece));
|
||||
|
||||
int piece_size = pe->storage->files()->piece_size(pe->piece);
|
||||
int piece_size = pe->storage->files().piece_size(pe->piece);
|
||||
TORRENT_PIECE_ASSERT(piece_size > 0, pe);
|
||||
|
||||
std::size_t iov_len = 0;
|
||||
|
@ -1246,7 +1246,7 @@ namespace libtorrent
|
|||
status_t disk_io_thread::do_read(disk_io_job* j, jobqueue_t& completed_jobs)
|
||||
{
|
||||
int const block_size = m_disk_cache.block_size();
|
||||
int const piece_size = j->storage->files()->piece_size(j->piece);
|
||||
int const piece_size = j->storage->files().piece_size(j->piece);
|
||||
int const blocks_in_piece = (piece_size + block_size - 1) / block_size;
|
||||
int const iov_len = m_disk_cache.pad_job(j, blocks_in_piece
|
||||
, m_settings.get_int(settings_pack::read_cache_line_size));
|
||||
|
@ -1701,7 +1701,7 @@ namespace libtorrent
|
|||
{
|
||||
cached_piece_entry const& p = *i;
|
||||
int bs = m_disk_cache.block_size();
|
||||
int piece_size = p.storage->files()->piece_size(p.piece);
|
||||
int piece_size = p.storage->files().piece_size(p.piece);
|
||||
int blocks_in_piece = (piece_size + bs - 1) / bs;
|
||||
for (int k = 0; k < blocks_in_piece; ++k)
|
||||
TORRENT_PIECE_ASSERT(p.blocks[k].buf != j->buffer.disk_block, &p);
|
||||
|
@ -1777,7 +1777,7 @@ namespace libtorrent
|
|||
j->flags = flags;
|
||||
j->requester = requester;
|
||||
|
||||
int piece_size = j->storage->files()->piece_size(piece);
|
||||
int piece_size = j->storage->files().piece_size(piece);
|
||||
|
||||
// first check to see if the hashing is already done
|
||||
std::unique_lock<std::mutex> l(m_cache_mutex);
|
||||
|
@ -2024,7 +2024,7 @@ namespace libtorrent
|
|||
if (!pe->hash) return;
|
||||
if (pe->hashing) return;
|
||||
|
||||
int const piece_size = pe->storage->files()->piece_size(pe->piece);
|
||||
int const piece_size = pe->storage->files().piece_size(pe->piece);
|
||||
partial_hash* ph = pe->hash.get();
|
||||
|
||||
// are we already done?
|
||||
|
@ -2139,7 +2139,7 @@ namespace libtorrent
|
|||
// just read straight from the file
|
||||
TORRENT_ASSERT(m_magic == 0x1337);
|
||||
|
||||
int const piece_size = j->storage->files()->piece_size(j->piece);
|
||||
int const piece_size = j->storage->files().piece_size(j->piece);
|
||||
int const block_size = m_disk_cache.block_size();
|
||||
int const blocks_in_piece = (piece_size + block_size - 1) / block_size;
|
||||
int const file_flags = file_flags_for_job(j
|
||||
|
@ -2186,7 +2186,7 @@ namespace libtorrent
|
|||
|
||||
status_t disk_io_thread::do_hash(disk_io_job* j, jobqueue_t& /* completed_jobs */ )
|
||||
{
|
||||
int const piece_size = j->storage->files()->piece_size(j->piece);
|
||||
int const piece_size = j->storage->files().piece_size(j->piece);
|
||||
int const file_flags = file_flags_for_job(j
|
||||
, m_settings.get_bool(settings_pack::coalesce_reads));
|
||||
|
||||
|
@ -2488,7 +2488,7 @@ namespace libtorrent
|
|||
// torrent. The storage must create hard links (or copy) those files. If
|
||||
// any file does not exist or is inaccessible, the disk job must fail.
|
||||
|
||||
TORRENT_ASSERT(j->storage->files()->piece_length() > 0);
|
||||
TORRENT_ASSERT(j->storage->files().piece_length() > 0);
|
||||
|
||||
// if we don't have any resume data, return
|
||||
// or if error is set and return value is 'no_error' or 'need_full_check'
|
||||
|
@ -2895,7 +2895,7 @@ namespace libtorrent
|
|||
{
|
||||
TORRENT_ASSERT(m_magic == 0x1337);
|
||||
|
||||
TORRENT_ASSERT(!j->storage || j->storage->files()->is_valid());
|
||||
TORRENT_ASSERT(!j->storage || j->storage->files().is_valid());
|
||||
TORRENT_ASSERT(j->next == nullptr);
|
||||
// if this happens, it means we started to shut down
|
||||
// the disk threads too early. We have to post all jobs
|
||||
|
|
Loading…
Reference in New Issue