minor block_cache cleanup. make hash be a unique_ptr (#1241)

This commit is contained in:
Arvid Norberg 2016-10-21 07:37:15 -04:00 committed by GitHub
parent a12f62593e
commit 2d2874d252
3 changed files with 39 additions and 58 deletions

View File

@ -172,8 +172,8 @@ namespace libtorrent
{
cached_piece_entry();
~cached_piece_entry();
cached_piece_entry(cached_piece_entry const&) = default;
cached_piece_entry& operator=(cached_piece_entry const&) = default;
cached_piece_entry(cached_piece_entry&&) = default;
cached_piece_entry& operator=(cached_piece_entry&&) = default;
bool ok_to_evict(bool ignore_hash = false) const
{
@ -205,21 +205,21 @@ namespace libtorrent
// if this is set, we'll be calculating the hash
// for this piece. This member stores the interim
// state while we're calculating the hash.
partial_hash* hash;
std::unique_ptr<partial_hash> hash;
// set to a unique identifier of a peer that last
// requested from this piece.
void* last_requester;
void* last_requester = nullptr;
// the pointers to the block data. If this is a ghost
// cache entry, there won't be any data here
boost::shared_array<cached_block_entry> blocks;
std::unique_ptr<cached_block_entry[]> blocks;
// the last time a block was written to this piece
// plus the minimum amount of time the block is guaranteed
// to stay in the cache
//TODO: make this 32 bits and to count seconds since the block cache was created
time_point expire;
time_point expire = min_time();
std::uint64_t piece:22;
@ -238,21 +238,21 @@ namespace libtorrent
// while we have an outstanding async hash operation
// working on this piece, 'hashing' is set to 1
// When the operation returns, this is set to 0.
std::uint32_t hashing:1;
std::uint16_t hashing:1;
// if we've completed at least one hash job on this
// piece, and returned it. This is set to one
std::uint32_t hashing_done:1;
std::uint16_t hashing_done:1;
// if this is true, whenever refcount hits 0,
// this piece should be deleted
std::uint32_t marked_for_deletion:1;
std::uint16_t marked_for_deletion:1;
// this is set to true once we flush blocks past
// the hash cursor. Once this happens, there's
// no point in keeping cache blocks around for
// it in avoid_readback mode
std::uint32_t need_readback:1;
std::uint16_t need_readback:1;
// indicates which LRU list this piece is chained into
enum cache_state_t
@ -291,17 +291,17 @@ namespace libtorrent
num_lrus
};
std::uint32_t cache_state:3;
std::uint16_t cache_state:3;
// this is the number of threads that are currently holding
// a reference to this piece. A piece may not be removed from
// the cache while this is > 0
std::uint32_t piece_refcount:7;
std::uint16_t piece_refcount:7;
// if this is set to one, it means there is an outstanding
// flush_hashed job for this piece, and there's no need to
// issue another one.
std::uint32_t outstanding_flush:1;
std::uint16_t outstanding_flush:1;
// as long as there is a read operation outstanding on this
// piece, this is set to 1. Otherwise 0.
@ -309,10 +309,10 @@ namespace libtorrent
// the same blocks at the same time. If a new read job is
// added when this is 1, that new job should be hung on the
// read job queue (read_jobs).
std::uint32_t outstanding_read:1;
std::uint16_t outstanding_read:1;
// the number of blocks that have >= 1 refcount
std::uint32_t pinned:16;
std::uint16_t pinned = 0;
// ---- 32 bit boundary ---

View File

@ -282,7 +282,7 @@ static_assert(sizeof(job_action_name)/sizeof(job_action_name[0])
"hash_passed: %d\nread_jobs: %d\njobs: %d\n"
"piece_log:\n"
, int(pe->piece), pe->refcount, pe->piece_refcount, int(pe->num_blocks)
, int(pe->hashing), static_cast<void*>(pe->hash), pe->hash ? pe->hash->offset : -1
, int(pe->hashing), static_cast<void*>(pe->hash.get()), pe->hash ? pe->hash->offset : -1
, int(pe->cache_state)
, pe->cache_state < cached_piece_entry::num_lrus ? cache_state[pe->cache_state] : ""
, int(pe->outstanding_flush), int(pe->piece), int(pe->num_dirty)
@ -307,12 +307,7 @@ static_assert(sizeof(job_action_name)/sizeof(job_action_name[0])
#endif
cached_piece_entry::cached_piece_entry()
: storage()
, hash(nullptr)
, last_requester(nullptr)
, blocks()
, expire(min_time())
, piece(0)
: piece(0)
, num_dirty(0)
, num_blocks(0)
, blocks_in_piece(0)
@ -324,7 +319,6 @@ cached_piece_entry::cached_piece_entry()
, piece_refcount(0)
, outstanding_flush(0)
, outstanding_read(0)
, pinned(0)
{}
cached_piece_entry::~cached_piece_entry()
@ -346,7 +340,6 @@ cached_piece_entry::~cached_piece_entry()
}
in_use = false;
#endif
delete hash;
}
block_cache::block_cache(int block_size, io_service& ios
@ -509,7 +502,7 @@ void block_cache::update_cache_state(cached_piece_entry* p)
{
int state = p->cache_state;
int desired_state = p->cache_state;
if (p->num_dirty > 0 || p->hash != nullptr)
if (p->num_dirty > 0 || p->hash)
desired_state = cached_piece_entry::write_lru;
else if (p->cache_state == cached_piece_entry::write_lru)
desired_state = cached_piece_entry::read_lru1;
@ -647,7 +640,7 @@ cached_piece_entry* block_cache::allocate_piece(disk_io_job const* j, int cache_
pe.blocks.reset(new (std::nothrow) cached_block_entry[blocks_in_piece]);
if (!pe.blocks) return nullptr;
pe.last_requester = j->requester;
p = const_cast<cached_piece_entry*>(&*m_pieces.insert(pe).first);
p = const_cast<cached_piece_entry*>(&*m_pieces.insert(std::move(pe)).first);
j->storage->add_piece(p);
p->cache_state = cache_state;
@ -812,8 +805,8 @@ cached_piece_entry* block_cache::add_dirty_block(disk_io_job* j)
TORRENT_PIECE_ASSERT(j->piece == pe->piece, pe);
pe->jobs.push_back(j);
if (block == 0 && pe->hash == nullptr && pe->hashing_done == false)
pe->hash = new partial_hash;
if (block == 0 && !pe->hash && pe->hashing_done == false)
pe->hash.reset(new partial_hash);
update_cache_state(pe);
@ -939,9 +932,6 @@ bool block_cache::evict_piece(cached_piece_entry* pe, tailqueue<disk_io_job>& jo
if (pe->ok_to_evict(true))
{
delete pe->hash;
pe->hash = nullptr;
// append will move the items from pe->jobs onto the end of jobs
jobs.append(pe->jobs);
TORRENT_ASSERT(pe->jobs.size() == 0);
@ -984,11 +974,10 @@ void block_cache::erase_piece(cached_piece_entry* pe)
TORRENT_PIECE_ASSERT(pe->cache_state < cached_piece_entry::num_lrus, pe);
TORRENT_PIECE_ASSERT(pe->jobs.empty(), pe);
linked_list<cached_piece_entry>* lru_list = &m_lru[pe->cache_state];
if (pe->hash != nullptr)
if (pe->hash)
{
TORRENT_PIECE_ASSERT(pe->hash->offset == 0, pe);
delete pe->hash;
pe->hash = nullptr;
pe->hash.reset();
}
if (pe->cache_state != cached_piece_entry::read_lru1_ghost
&& pe->cache_state != cached_piece_entry::read_lru2_ghost)

View File

@ -1498,7 +1498,7 @@ namespace libtorrent
&& pe->hash == nullptr
&& !m_settings.get_bool(settings_pack::disable_hash_checks))
{
pe->hash = new partial_hash;
pe->hash.reset(new partial_hash);
m_disk_cache.update_cache_state(pe);
}
@ -1764,8 +1764,7 @@ namespace libtorrent
sha1_hash result = pe->hash->h.final();
memcpy(j->d.piece_hash, &result[0], 20);
delete pe->hash;
pe->hash = nullptr;
pe->hash.reset();
if (pe->cache_state != cached_piece_entry::volatile_read_lru)
pe->hashing_done = 1;
@ -2034,8 +2033,7 @@ namespace libtorrent
if (pe == nullptr) return;
TORRENT_PIECE_ASSERT(pe->hashing == false, pe);
pe->hashing_done = 0;
delete pe->hash;
pe->hash = nullptr;
pe->hash.reset();
// evict_piece returns true if the piece was in fact
// evicted. A piece may fail to be evicted if there
@ -2056,7 +2054,7 @@ namespace libtorrent
if (pe->hashing) return;
int piece_size = pe->storage->files()->piece_size(pe->piece);
partial_hash* ph = pe->hash;
partial_hash* ph = pe->hash.get();
// are we already done?
if (ph->offset >= piece_size) return;
@ -2145,8 +2143,7 @@ namespace libtorrent
hj->ret = 0;
}
delete pe->hash;
pe->hash = nullptr;
pe->hash.reset();
if (pe->cache_state != cached_piece_entry::volatile_read_lru)
pe->hashing_done = 1;
#if TORRENT_USE_ASSERTS
@ -2240,8 +2237,7 @@ namespace libtorrent
DLOG("do_hash: (%d) (already done)\n", int(pe->piece));
sha1_hash piece_hash = pe->hash->h.final();
std::memcpy(j->d.piece_hash, piece_hash.data(), 20);
delete pe->hash;
pe->hash = nullptr;
pe->hash.reset();
if (pe->cache_state != cached_piece_entry::volatile_read_lru)
pe->hashing_done = 1;
#if TORRENT_USE_ASSERTS
@ -2288,12 +2284,12 @@ namespace libtorrent
piece_refcount_holder refcount_holder(pe);
if (pe->hash == nullptr)
if (!pe->hash)
{
pe->hashing_done = 0;
pe->hash = new partial_hash;
pe->hash.reset(new partial_hash);
}
partial_hash* ph = pe->hash;
partial_hash* ph = pe->hash.get();
int block_size = m_disk_cache.block_size();
int blocks_in_piece = (piece_size + block_size - 1) / block_size;
@ -2355,8 +2351,7 @@ namespace libtorrent
refcount_holder.release();
pe->hashing = false;
delete pe->hash;
pe->hash = nullptr;
pe->hash.reset();
m_disk_cache.maybe_free_piece(pe);
@ -2429,8 +2424,7 @@ namespace libtorrent
sha1_hash piece_hash = ph->h.final();
std::memcpy(j->d.piece_hash, piece_hash.data(), 20);
delete pe->hash;
pe->hash = nullptr;
pe->hash.reset();
if (pe->cache_state != cached_piece_entry::volatile_read_lru)
pe->hashing_done = 1;
#if TORRENT_USE_ASSERTS
@ -2832,7 +2826,7 @@ namespace libtorrent
{
if (pe->hash == nullptr && !m_settings.get_bool(settings_pack::disable_hash_checks))
{
pe->hash = new partial_hash;
pe->hash.reset(new partial_hash);
m_disk_cache.update_cache_state(pe);
}
@ -2883,11 +2877,10 @@ namespace libtorrent
add_torrent_params* params = reinterpret_cast<add_torrent_params*>(j->requester);
std::string filename = resolve_file_url(params->url);
torrent_info* t = new torrent_info(filename, j->error.ec);
std::unique_ptr<torrent_info> t{new torrent_info(filename, j->error.ec)};
if (j->error.ec)
{
j->buffer.torrent_file = nullptr;
delete t;
}
else
{
@ -2895,7 +2888,7 @@ namespace libtorrent
// than to have it be done in the network thread. It has enough to
// do as it is.
std::string cert = t->ssl_cert();
j->buffer.torrent_file = t;
j->buffer.torrent_file = t.release();
}
return 0;
@ -2912,8 +2905,7 @@ namespace libtorrent
if (pe == nullptr) return 0;
TORRENT_PIECE_ASSERT(pe->hashing == false, pe);
pe->hashing_done = 0;
delete pe->hash;
pe->hash = nullptr;
pe->hash.reset();
pe->hashing_done = false;
#if TORRENT_USE_ASSERTS
@ -3464,7 +3456,7 @@ namespace libtorrent
&& pe->hash == nullptr
&& !m_settings.get_bool(settings_pack::disable_hash_checks))
{
pe->hash = new partial_hash;
pe->hash.reset(new partial_hash);
m_disk_cache.update_cache_state(pe);
}