From 9a2e511ddadf703abb9c784c4ba7fd99ee152e65 Mon Sep 17 00:00:00 2001 From: Arvid Norberg Date: Sun, 14 Sep 2014 22:42:18 +0000 Subject: [PATCH] attempted fix for inconsistent debug refcounters on blocks. pinning pieces with outstanding reads in an attempt at fixing inconsitencies seen around that logic as well --- include/libtorrent/block_cache.hpp | 22 ++++++++++------------ src/block_cache.cpp | 14 ++++++++++---- 2 files changed, 20 insertions(+), 16 deletions(-) diff --git a/include/libtorrent/block_cache.hpp b/include/libtorrent/block_cache.hpp index b07fba256..ea3f6c0eb 100644 --- a/include/libtorrent/block_cache.hpp +++ b/include/libtorrent/block_cache.hpp @@ -115,7 +115,6 @@ namespace libtorrent : buf(0) , refcount(0) , dirty(false) - , hitcount(0) , pending(false) { #if TORRENT_USE_ASSERTS @@ -127,7 +126,7 @@ namespace libtorrent char* buf; - enum { max_refcount = (1 << 15) -1 }; + enum { max_refcount = (1 << 30) - 1 }; // the number of references to this buffer. These references // might be in outstanding asyncronous requests or in peer @@ -135,7 +134,7 @@ namespace libtorrent // all references are gone and refcount reaches 0. The buf // pointer in this struct doesn't count as a reference and // is always the last to be cleared - boost::uint16_t refcount:15; + boost::uint32_t refcount:30; // if this is true, this block needs to be written to // disk before it's freed. Typically all blocks in a piece @@ -143,17 +142,13 @@ namespace libtorrent // (read-ahead cache). Once blocks are written to disk, the // dirty flag is cleared and effectively turns the block // into a read cache block - boost::uint16_t dirty:1; - - // the number of times this block has been copied out of - // the cache, serving a request. - boost::uint16_t hitcount:15; + boost::uint32_t dirty:1; // pending means that this buffer has not yet been filled in // with valid data. There's an outstanding read job for this. // If the dirty flag is set, it means there's an outstanding // write job to write this block. - boost::uint16_t pending:1; + boost::uint32_t pending:1; #if TORRENT_USE_ASSERTS // this many of the references are held by hashing operations @@ -179,6 +174,7 @@ namespace libtorrent && num_blocks == 0 && !hashing && read_jobs.size() == 0 + && outstanding_read == 0 && (ignore_hash || !hash || hash->offset == 0); } @@ -209,9 +205,11 @@ namespace libtorrent // the pointers to the block data. If this is a ghost // cache entry, there won't be any data here - // TODO: 3 could this be a scoped_array instead? does cached_piece_entry really need to be copyable? - // cached_piece_entry does need to be copyable since it's part of a container, but it's possible - // it could be a raw pointer or boost::unique_ptr perhaps + + // TODO: 3 could this be a scoped_array instead? does cached_piece_entry + // really need to be copyable? cached_piece_entry does need to be + // copyable since it's part of a container, but it's possible it could be + // a raw pointer or boost::unique_ptr perhaps boost::shared_array blocks; // the last time a block was written to this piece diff --git a/src/block_cache.cpp b/src/block_cache.cpp index 98753db6f..336afc0ec 100644 --- a/src/block_cache.cpp +++ b/src/block_cache.cpp @@ -1281,6 +1281,8 @@ bool block_cache::inc_block_refcount(cached_piece_entry* pe, int block, int reas case ref_reading: ++pe->blocks[block].reading_count; break; case ref_flushing: ++pe->blocks[block].flushing_count; break; }; + TORRENT_ASSERT(pe->blocks[block].refcount >= pe->blocks[block].hashing_count + + pe->blocks[block].reading_count + pe->blocks[block].flushing_count); #endif return true; } @@ -1337,6 +1339,8 @@ void block_cache::dec_block_refcount(cached_piece_entry* pe, int block, int reas case ref_reading: --pe->blocks[block].reading_count; break; case ref_flushing: --pe->blocks[block].flushing_count; break; }; + TORRENT_PIECE_ASSERT(pe->blocks[block].refcount >= pe->blocks[block].hashing_count + + pe->blocks[block].reading_count + pe->blocks[block].flushing_count, pe); #endif } @@ -1603,6 +1607,12 @@ void block_cache::check_invariant() const } if (p.blocks[k].pending) ++num_pending; if (p.blocks[k].refcount > 0) ++num_pinned; + + TORRENT_PIECE_ASSERT(p.blocks[k].refcount >= + p.blocks[k].hashing_count + + p.blocks[k].reading_count + + p.blocks[k].flushing_count, &p); + } else { @@ -1678,9 +1688,6 @@ int block_cache::copy_from_piece(cached_piece_entry* pe, disk_io_job* j, bool ex j->d.io.ref.block = start_block; j->buffer = bl.buf + (j->d.io.offset & (block_size()-1)); ++m_send_buffer_blocks; -#if TORRENT_USE_ASSERTS - ++bl.reading_count; -#endif return j->d.io.buffer_size; } @@ -1703,7 +1710,6 @@ int block_cache::copy_from_piece(cached_piece_entry* pe, disk_io_job* j, bool ex std::memcpy(j->buffer + buffer_offset , pe->blocks[block].buf + block_offset , to_copy); - ++pe->blocks[block].hitcount; size -= to_copy; block_offset = 0; buffer_offset += to_copy;