fix issue where pieces would be hashed despite disable_hash_checks was set
This commit is contained in:
parent
d1e0e8701b
commit
d08fa164d2
|
@ -1,5 +1,6 @@
|
||||||
1.2.2 release
|
1.2.2 release
|
||||||
|
|
||||||
|
* fix cases where the disable_hash_checks setting was not honored
|
||||||
* fix updating of is_finished torrent status, when changing piece priorities
|
* fix updating of is_finished torrent status, when changing piece priorities
|
||||||
* fix regression in &left= reporting when adding a seeding torrent
|
* fix regression in &left= reporting when adding a seeding torrent
|
||||||
* fix integer overflow in http parser
|
* fix integer overflow in http parser
|
||||||
|
|
|
@ -450,7 +450,7 @@ namespace aux {
|
||||||
// adds a block to the cache, marks it as dirty and
|
// adds a block to the cache, marks it as dirty and
|
||||||
// associates the job with it. When the block is
|
// associates the job with it. When the block is
|
||||||
// flushed, the callback is posted
|
// flushed, the callback is posted
|
||||||
cached_piece_entry* add_dirty_block(disk_io_job* j);
|
cached_piece_entry* add_dirty_block(disk_io_job* j, bool add_hasher);
|
||||||
|
|
||||||
enum { blocks_inc_refcount = 1 };
|
enum { blocks_inc_refcount = 1 };
|
||||||
void insert_blocks(cached_piece_entry* pe, int block, span<iovec_t const> iov
|
void insert_blocks(cached_piece_entry* pe, int block, span<iovec_t const> iov
|
||||||
|
|
|
@ -695,7 +695,7 @@ cached_piece_entry* block_cache::allocate_piece(disk_io_job const* j, std::uint1
|
||||||
return p;
|
return p;
|
||||||
}
|
}
|
||||||
|
|
||||||
cached_piece_entry* block_cache::add_dirty_block(disk_io_job* j)
|
cached_piece_entry* block_cache::add_dirty_block(disk_io_job* j, bool const add_hasher)
|
||||||
{
|
{
|
||||||
#ifdef TORRENT_EXPENSIVE_INVARIANT_CHECKS
|
#ifdef TORRENT_EXPENSIVE_INVARIANT_CHECKS
|
||||||
INVARIANT_CHECK;
|
INVARIANT_CHECK;
|
||||||
|
@ -755,7 +755,7 @@ cached_piece_entry* block_cache::add_dirty_block(disk_io_job* j)
|
||||||
TORRENT_PIECE_ASSERT(j->piece == pe->piece, pe);
|
TORRENT_PIECE_ASSERT(j->piece == pe->piece, pe);
|
||||||
pe->jobs.push_back(j);
|
pe->jobs.push_back(j);
|
||||||
|
|
||||||
if (block == 0 && !pe->hash && pe->hashing_done == false)
|
if (block == 0 && !pe->hash && pe->hashing_done == false && add_hasher)
|
||||||
pe->hash.reset(new partial_hash);
|
pe->hash.reset(new partial_hash);
|
||||||
|
|
||||||
update_cache_state(pe);
|
update_cache_state(pe);
|
||||||
|
|
|
@ -1566,7 +1566,8 @@ constexpr disk_job_flags_t disk_interface::cache_hit;
|
||||||
return status_t::fatal_disk_error;
|
return status_t::fatal_disk_error;
|
||||||
}
|
}
|
||||||
|
|
||||||
pe = m_disk_cache.add_dirty_block(j);
|
pe = m_disk_cache.add_dirty_block(j
|
||||||
|
, !m_settings.get_bool(settings_pack::disable_hash_checks));
|
||||||
|
|
||||||
if (pe)
|
if (pe)
|
||||||
{
|
{
|
||||||
|
@ -1786,7 +1787,8 @@ constexpr disk_job_flags_t disk_interface::cache_hit;
|
||||||
std::unique_lock<std::mutex> l(m_cache_mutex);
|
std::unique_lock<std::mutex> l(m_cache_mutex);
|
||||||
// if we succeed in adding the block to the cache, the job will
|
// if we succeed in adding the block to the cache, the job will
|
||||||
// be added along with it. we may not free j if so
|
// be added along with it. we may not free j if so
|
||||||
cached_piece_entry* dpe = m_disk_cache.add_dirty_block(j);
|
cached_piece_entry* dpe = m_disk_cache.add_dirty_block(j
|
||||||
|
, !m_settings.get_bool(settings_pack::disable_hash_checks));
|
||||||
|
|
||||||
if (dpe != nullptr)
|
if (dpe != nullptr)
|
||||||
{
|
{
|
||||||
|
@ -2187,6 +2189,9 @@ constexpr disk_job_flags_t disk_interface::cache_hit;
|
||||||
|
|
||||||
status_t disk_io_thread::do_hash(disk_io_job* j, jobqueue_t& /* completed_jobs */ )
|
status_t disk_io_thread::do_hash(disk_io_job* j, jobqueue_t& /* completed_jobs */ )
|
||||||
{
|
{
|
||||||
|
if (m_settings.get_bool(settings_pack::disable_hash_checks))
|
||||||
|
return status_t::no_error;
|
||||||
|
|
||||||
int const piece_size = j->storage->files().piece_size(j->piece);
|
int const piece_size = j->storage->files().piece_size(j->piece);
|
||||||
open_mode_t const file_flags = file_flags_for_job(j
|
open_mode_t const file_flags = file_flags_for_job(j
|
||||||
, m_settings.get_bool(settings_pack::coalesce_reads));
|
, m_settings.get_bool(settings_pack::coalesce_reads));
|
||||||
|
@ -3396,7 +3401,8 @@ constexpr disk_job_flags_t disk_interface::cache_hit;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
cached_piece_entry* pe = m_disk_cache.add_dirty_block(j);
|
cached_piece_entry* pe = m_disk_cache.add_dirty_block(j
|
||||||
|
, !m_settings.get_bool(settings_pack::disable_hash_checks));
|
||||||
|
|
||||||
if (pe == nullptr)
|
if (pe == nullptr)
|
||||||
{
|
{
|
||||||
|
|
|
@ -145,7 +145,7 @@ static void nop() {}
|
||||||
wj.d.io.buffer_size = 0x4000; \
|
wj.d.io.buffer_size = 0x4000; \
|
||||||
wj.piece = piece_index_t(p); \
|
wj.piece = piece_index_t(p); \
|
||||||
wj.argument = disk_buffer_holder(alloc, bc.allocate_buffer("write-test"), 0x4000); \
|
wj.argument = disk_buffer_holder(alloc, bc.allocate_buffer("write-test"), 0x4000); \
|
||||||
pe = bc.add_dirty_block(&wj)
|
pe = bc.add_dirty_block(&wj, true)
|
||||||
|
|
||||||
#define READ_BLOCK(p, b, r) \
|
#define READ_BLOCK(p, b, r) \
|
||||||
rj.action = job_action_t::read; \
|
rj.action = job_action_t::read; \
|
||||||
|
|
Loading…
Reference in New Issue