fix coverity warnings
This commit is contained in:
parent
a1dbba7cf4
commit
0d72163a56
|
@ -265,8 +265,8 @@ const char* const job_action_name[] =
|
|||
"piece_log:\n"
|
||||
, int(pe->piece), pe->refcount, pe->piece_refcount, int(pe->num_blocks)
|
||||
, int(pe->hashing), pe->hash, pe->hash ? pe->hash->offset : -1
|
||||
, int(pe->cache_state), pe->cache_state >= 0 && pe->cache_state
|
||||
< cached_piece_entry::num_lrus ? cache_state[pe->cache_state] : ""
|
||||
, int(pe->cache_state)
|
||||
, pe->cache_state < cached_piece_entry::num_lrus ? cache_state[pe->cache_state] : ""
|
||||
, int(pe->outstanding_flush), int(pe->piece), int(pe->num_dirty)
|
||||
, int(pe->num_blocks), int(pe->blocks_in_piece), int(pe->hashing_done)
|
||||
, int(pe->marked_for_deletion), int(pe->need_readback), pe->hash_passes
|
||||
|
|
|
@ -223,7 +223,7 @@ namespace libtorrent
|
|||
TORRENT_ASSERT(block_index < (std::numeric_limits<boost::uint16_t>::max)());
|
||||
ret.info_idx = block_index;
|
||||
TORRENT_ASSERT(int(ret.info_idx) * m_blocks_per_piece
|
||||
+ m_blocks_per_piece <= m_block_info.size());
|
||||
+ m_blocks_per_piece <= int(m_block_info.size()));
|
||||
|
||||
#ifdef TORRENT_USE_VALGRIND
|
||||
VALGRIND_CHECK_VALUE_IS_DEFINED(piece);
|
||||
|
@ -343,7 +343,7 @@ namespace libtorrent
|
|||
// TORRENT_ASSERT(dp.finished + dp.writing >= next.finished + next.writing);
|
||||
TORRENT_ASSERT(dp.index < next.index);
|
||||
TORRENT_ASSERT(int(dp.info_idx) * m_blocks_per_piece
|
||||
+ m_blocks_per_piece <= m_block_info.size());
|
||||
+ m_blocks_per_piece <= int(m_block_info.size()));
|
||||
block_info const* info = blocks_for_piece(dp);
|
||||
for (int k = 0; k < m_blocks_per_piece; ++k)
|
||||
{
|
||||
|
@ -476,7 +476,7 @@ namespace libtorrent
|
|||
// TORRENT_ASSERT(dp.finished + dp.writing >= next.finished + next.writing);
|
||||
TORRENT_ASSERT(dp.index < next.index);
|
||||
TORRENT_ASSERT(int(dp.info_idx) * m_blocks_per_piece
|
||||
+ m_blocks_per_piece <= m_block_info.size());
|
||||
+ m_blocks_per_piece <= int(m_block_info.size()));
|
||||
#if TORRENT_USE_ASSERTS
|
||||
block_info const* info = blocks_for_piece(dp);
|
||||
for (int k = 0; k < m_blocks_per_piece; ++k)
|
||||
|
@ -1109,7 +1109,7 @@ namespace libtorrent
|
|||
|
||||
TORRENT_ASSERT(i != m_downloads[download_state].end());
|
||||
TORRENT_ASSERT(int(i->info_idx) * m_blocks_per_piece
|
||||
+ m_blocks_per_piece <= m_block_info.size());
|
||||
+ m_blocks_per_piece <= int(m_block_info.size()));
|
||||
|
||||
i->locked = false;
|
||||
|
||||
|
|
|
@ -1084,43 +1084,40 @@ namespace libtorrent
|
|||
piece_block block_finished(j->piece, j->d.io.offset / block_size());
|
||||
|
||||
// we failed to write j->piece to disk tell the piece picker
|
||||
if (j->piece >= 0)
|
||||
// this will block any other peer from issuing requests
|
||||
// to this piece, until we've cleared it.
|
||||
if (j->error.ec == asio::error::operation_aborted)
|
||||
{
|
||||
// this will block any other peer from issuing requests
|
||||
// to this piece, until we've cleared it.
|
||||
if (j->error.ec == asio::error::operation_aborted)
|
||||
if (has_picker())
|
||||
picker().mark_as_canceled(block_finished, NULL);
|
||||
}
|
||||
else
|
||||
{
|
||||
// if any other peer has a busy request to this block, we need
|
||||
// to cancel it too
|
||||
cancel_block(block_finished);
|
||||
if (has_picker())
|
||||
picker().write_failed(block_finished);
|
||||
|
||||
if (m_storage)
|
||||
{
|
||||
if (has_picker())
|
||||
picker().mark_as_canceled(block_finished, NULL);
|
||||
// when this returns, all outstanding jobs to the
|
||||
// piece are done, and we can restore it, allowing
|
||||
// new requests to it
|
||||
m_ses.disk_thread().async_clear_piece(m_storage.get(), j->piece
|
||||
, boost::bind(&torrent::on_piece_fail_sync, shared_from_this(), _1, block_finished));
|
||||
}
|
||||
else
|
||||
{
|
||||
// if any other peer has a busy request to this block, we need
|
||||
// to cancel it too
|
||||
cancel_block(block_finished);
|
||||
if (has_picker())
|
||||
picker().write_failed(block_finished);
|
||||
|
||||
if (m_storage)
|
||||
{
|
||||
// when this returns, all outstanding jobs to the
|
||||
// piece are done, and we can restore it, allowing
|
||||
// new requests to it
|
||||
m_ses.disk_thread().async_clear_piece(m_storage.get(), j->piece
|
||||
, boost::bind(&torrent::on_piece_fail_sync, shared_from_this(), _1, block_finished));
|
||||
}
|
||||
else
|
||||
{
|
||||
// is m_abort true? if so, we should probably just
|
||||
// exit this function early, no need to keep the picker
|
||||
// state up-to-date, right?
|
||||
disk_io_job sj;
|
||||
sj.piece = j->piece;
|
||||
on_piece_fail_sync(&sj, block_finished);
|
||||
}
|
||||
// is m_abort true? if so, we should probably just
|
||||
// exit this function early, no need to keep the picker
|
||||
// state up-to-date, right?
|
||||
disk_io_job sj;
|
||||
sj.piece = j->piece;
|
||||
on_piece_fail_sync(&sj, block_finished);
|
||||
}
|
||||
update_gauge();
|
||||
}
|
||||
update_gauge();
|
||||
}
|
||||
|
||||
if (j->error.ec == error_code(boost::system::errc::not_enough_memory, generic_category()))
|
||||
|
|
Loading…
Reference in New Issue