fix indentation

This commit is contained in:
Steven Siloti 2016-03-12 19:45:20 -08:00
parent 826bbe35ee
commit e8f93df700
1 changed files with 129 additions and 129 deletions

View File

@ -1458,57 +1458,57 @@ namespace libtorrent
INVARIANT_CHECK; INVARIANT_CHECK;
TORRENT_ASSERT(j->d.io.buffer_size <= m_disk_cache.block_size()); TORRENT_ASSERT(j->d.io.buffer_size <= m_disk_cache.block_size());
mutex::scoped_lock l(m_cache_mutex); mutex::scoped_lock l(m_cache_mutex);
cached_piece_entry* pe = m_disk_cache.find_piece(j); cached_piece_entry* pe = m_disk_cache.find_piece(j);
if (pe && pe->hashing_done) if (pe && pe->hashing_done)
{ {
#if TORRENT_USE_ASSERTS #if TORRENT_USE_ASSERTS
print_piece_log(pe->piece_log); print_piece_log(pe->piece_log);
#endif #endif
TORRENT_ASSERT(pe->blocks[j->d.io.offset / 16 / 1024].buf != j->buffer.disk_block); TORRENT_ASSERT(pe->blocks[j->d.io.offset / 16 / 1024].buf != j->buffer.disk_block);
TORRENT_ASSERT(pe->blocks[j->d.io.offset / 16 / 1024].buf != NULL); TORRENT_ASSERT(pe->blocks[j->d.io.offset / 16 / 1024].buf != NULL);
j->error.ec = error::operation_aborted; j->error.ec = error::operation_aborted;
j->error.operation = storage_error::write; j->error.operation = storage_error::write;
return -1; return -1;
} }
pe = m_disk_cache.add_dirty_block(j); pe = m_disk_cache.add_dirty_block(j);
if (pe) if (pe)
{ {
#if TORRENT_USE_ASSERTS #if TORRENT_USE_ASSERTS
pe->piece_log.push_back(piece_log_t(j->action, j->d.io.offset / 0x4000)); pe->piece_log.push_back(piece_log_t(j->action, j->d.io.offset / 0x4000));
#endif #endif
if (!pe->hashing_done if (!pe->hashing_done
&& pe->hash == 0 && pe->hash == 0
&& !m_settings.get_bool(settings_pack::disable_hash_checks)) && !m_settings.get_bool(settings_pack::disable_hash_checks))
{ {
pe->hash = new partial_hash; pe->hash = new partial_hash;
m_disk_cache.update_cache_state(pe); m_disk_cache.update_cache_state(pe);
}
TORRENT_PIECE_ASSERT(pe->cache_state <= cached_piece_entry::read_lru1 || pe->cache_state == cached_piece_entry::read_lru2, pe);
++pe->piece_refcount;
// see if we can progress the hash cursor with this new block
kick_hasher(pe, l);
TORRENT_PIECE_ASSERT(pe->cache_state <= cached_piece_entry::read_lru1 || pe->cache_state == cached_piece_entry::read_lru2, pe);
// flushes the piece to disk in case
// it satisfies the condition for a write
// piece to be flushed
try_flush_hashed(pe, m_settings.get_int(
settings_pack::write_cache_line_size), completed_jobs, l);
--pe->piece_refcount;
m_disk_cache.maybe_free_piece(pe);
return defer_handler;
} }
TORRENT_PIECE_ASSERT(pe->cache_state <= cached_piece_entry::read_lru1 || pe->cache_state == cached_piece_entry::read_lru2, pe);
++pe->piece_refcount;
// see if we can progress the hash cursor with this new block
kick_hasher(pe, l);
TORRENT_PIECE_ASSERT(pe->cache_state <= cached_piece_entry::read_lru1 || pe->cache_state == cached_piece_entry::read_lru2, pe);
// flushes the piece to disk in case
// it satisfies the condition for a write
// piece to be flushed
try_flush_hashed(pe, m_settings.get_int(
settings_pack::write_cache_line_size), completed_jobs, l);
--pe->piece_refcount;
m_disk_cache.maybe_free_piece(pe);
return defer_handler;
}
// ok, we should just perform this job right now. // ok, we should just perform this job right now.
return do_uncached_write(j); return do_uncached_write(j);
} }
@ -1570,65 +1570,65 @@ namespace libtorrent
{ {
TORRENT_ASSERT(j->action == disk_io_job::read); TORRENT_ASSERT(j->action == disk_io_job::read);
int ret = m_disk_cache.try_read(j); int ret = m_disk_cache.try_read(j);
if (ret >= 0) if (ret >= 0)
{ {
m_stats_counters.inc_stats_counter(counters::num_blocks_cache_hits); m_stats_counters.inc_stats_counter(counters::num_blocks_cache_hits);
DLOG("do_read: cache hit\n"); DLOG("do_read: cache hit\n");
j->flags |= disk_io_job::cache_hit; j->flags |= disk_io_job::cache_hit;
j->ret = ret; j->ret = ret;
return 0; return 0;
} }
else if (ret == -2) else if (ret == -2)
{ {
j->error.ec = error::no_memory; j->error.ec = error::no_memory;
j->error.operation = storage_error::alloc_cache_piece; j->error.operation = storage_error::alloc_cache_piece;
j->ret = disk_io_job::operation_failed; j->ret = disk_io_job::operation_failed;
return 0; return 0;
} }
if (check_fence && j->storage->is_blocked(j)) if (check_fence && j->storage->is_blocked(j))
{ {
// this means the job was queued up inside storage // this means the job was queued up inside storage
m_stats_counters.inc_stats_counter(counters::blocked_disk_jobs); m_stats_counters.inc_stats_counter(counters::blocked_disk_jobs);
DLOG("blocked job: %s (torrent: %d total: %d)\n" DLOG("blocked job: %s (torrent: %d total: %d)\n"
, job_action_name[j->action], j->storage ? j->storage->num_blocked() : 0 , job_action_name[j->action], j->storage ? j->storage->num_blocked() : 0
, int(m_stats_counters[counters::blocked_disk_jobs])); , int(m_stats_counters[counters::blocked_disk_jobs]));
return 2; return 2;
} }
if (!m_settings.get_bool(settings_pack::use_read_cache) if (!m_settings.get_bool(settings_pack::use_read_cache)
|| m_settings.get_int(settings_pack::cache_size) == 0) || m_settings.get_int(settings_pack::cache_size) == 0)
{ {
// if the read cache is disabled then we can skip going through the cache // if the read cache is disabled then we can skip going through the cache
// but only if there is no existing piece entry. Otherwise there may be a // but only if there is no existing piece entry. Otherwise there may be a
// partial hit on one-or-more dirty buffers so we must use the cache // partial hit on one-or-more dirty buffers so we must use the cache
// to avoid reading bogus data from storage // to avoid reading bogus data from storage
if (m_disk_cache.find_piece(j) == NULL) if (m_disk_cache.find_piece(j) == NULL)
return 1; return 1;
} }
cached_piece_entry* pe = m_disk_cache.allocate_piece(j, cached_piece_entry::read_lru1); cached_piece_entry* pe = m_disk_cache.allocate_piece(j, cached_piece_entry::read_lru1);
if (pe == NULL) if (pe == NULL)
{ {
j->ret = -1; j->ret = -1;
j->error.ec = error::no_memory; j->error.ec = error::no_memory;
j->error.operation = storage_error::read; j->error.operation = storage_error::read;
return 0; return 0;
} }
j->flags |= disk_io_job::use_disk_cache; j->flags |= disk_io_job::use_disk_cache;
if (pe->outstanding_read) if (pe->outstanding_read)
{ {
TORRENT_PIECE_ASSERT(j->piece == pe->piece, pe); TORRENT_PIECE_ASSERT(j->piece == pe->piece, pe);
pe->read_jobs.push_back(j); pe->read_jobs.push_back(j);
return 2; return 2;
} }
#if TORRENT_USE_ASSERTS #if TORRENT_USE_ASSERTS
pe->piece_log.push_back(piece_log_t(piece_log_t::set_outstanding_jobs)); pe->piece_log.push_back(piece_log_t(piece_log_t::set_outstanding_jobs));
#endif #endif
pe->outstanding_read = 1; pe->outstanding_read = 1;
return 1; return 1;
} }
@ -1694,48 +1694,48 @@ namespace libtorrent
l_.unlock(); l_.unlock();
#endif #endif
TORRENT_ASSERT((r.start % m_disk_cache.block_size()) == 0); TORRENT_ASSERT((r.start % m_disk_cache.block_size()) == 0);
if (storage->is_blocked(j)) if (storage->is_blocked(j))
{ {
// this means the job was queued up inside storage // this means the job was queued up inside storage
m_stats_counters.inc_stats_counter(counters::blocked_disk_jobs); m_stats_counters.inc_stats_counter(counters::blocked_disk_jobs);
DLOG("blocked job: %s (torrent: %d total: %d)\n" DLOG("blocked job: %s (torrent: %d total: %d)\n"
, job_action_name[j->action], j->storage ? j->storage->num_blocked() : 0 , job_action_name[j->action], j->storage ? j->storage->num_blocked() : 0
, int(m_stats_counters[counters::blocked_disk_jobs])); , int(m_stats_counters[counters::blocked_disk_jobs]));
// make the holder give up ownership of the buffer // make the holder give up ownership of the buffer
// since the job was successfully queued up // since the job was successfully queued up
buffer.release(); buffer.release();
return; return;
} }
mutex::scoped_lock l(m_cache_mutex); mutex::scoped_lock l(m_cache_mutex);
// if we succeed in adding the block to the cache, the job will // if we succeed in adding the block to the cache, the job will
// be added along with it. we may not free j if so // be added along with it. we may not free j if so
cached_piece_entry* dpe = m_disk_cache.add_dirty_block(j); cached_piece_entry* dpe = m_disk_cache.add_dirty_block(j);
// if the buffer was successfully added to the cache // if the buffer was successfully added to the cache
// our holder should no longer own it // our holder should no longer own it
if (dpe) buffer.release(); if (dpe) buffer.release();
if (dpe && dpe->outstanding_flush == 0) if (dpe && dpe->outstanding_flush == 0)
{ {
dpe->outstanding_flush = 1; dpe->outstanding_flush = 1;
l.unlock();
// the block and write job were successfully inserted
// into the cache. Now, see if we should trigger a flush
j = allocate_job(disk_io_job::flush_hashed);
j->storage = storage->shared_from_this();
j->piece = r.piece;
j->flags = flags;
add_job(j);
}
// if we added the block (regardless of whether we also
// issued a flush job or not), we're done.
if (dpe) return;
l.unlock(); l.unlock();
// the block and write job were successfully inserted
// into the cache. Now, see if we should trigger a flush
j = allocate_job(disk_io_job::flush_hashed);
j->storage = storage->shared_from_this();
j->piece = r.piece;
j->flags = flags;
add_job(j);
}
// if we added the block (regardless of whether we also
// issued a flush job or not), we're done.
if (dpe) return;
l.unlock();
add_job(j); add_job(j);
buffer.release(); buffer.release();
} }