clean up disk_io_job a bit to avoid old-style casts

This commit is contained in:
Arvid Norberg 2015-05-16 20:41:37 +00:00
parent 99de70604a
commit 58ae099cf8
11 changed files with 88 additions and 70 deletions

View File

@ -52,6 +52,8 @@ namespace libtorrent
class entry;
class piece_manager;
struct cached_piece_entry;
struct bdecode_node;
struct torrent_info;
struct block_cache_reference
{
@ -152,7 +154,15 @@ namespace libtorrent
// for aiocb_complete this points to the aiocb that completed
// for get_cache_info this points to a cache_status object which
// is filled in
char* buffer;
union
{
char* disk_block;
char* string;
entry* resume_data;
bdecode_node const* check_resume_data;
std::vector<boost::uint8_t>* priorities;
torrent_info* torrent_file;
} buffer;
// the disk storage this job applies to (if applicable)
boost::shared_ptr<piece_manager> storage;
@ -218,7 +228,7 @@ namespace libtorrent
#if defined TORRENT_DEBUG || TORRENT_RELEASE_ASSERTS
bool in_use:1;
// set to true when the job is added to the completion queue.
// to make sure we don't add it twice
mutable bool job_posted:1;

View File

@ -358,7 +358,7 @@ int block_cache::try_read(disk_io_job* j, bool expect_no_fail)
{
INVARIANT_CHECK;
TORRENT_ASSERT(j->buffer == 0);
TORRENT_ASSERT(j->buffer.disk_block == 0);
#if TORRENT_USE_ASSERTS
// we're not allowed to add dirty blocks
@ -650,7 +650,7 @@ void block_cache::mark_deleted(file_storage const& fs)
cached_piece_entry* block_cache::add_dirty_block(disk_io_job* j)
{
#if !defined TORRENT_DISABLE_POOL_ALLOCATOR
TORRENT_ASSERT(is_disk_buffer(j->buffer));
TORRENT_ASSERT(is_disk_buffer(j->buffer.disk_block));
#endif
#ifdef TORRENT_EXPENSIVE_INVARIANT_CHECKS
INVARIANT_CHECK;
@ -664,7 +664,7 @@ cached_piece_entry* block_cache::add_dirty_block(disk_io_job* j)
== m_deleted_storages.end());
#endif
TORRENT_ASSERT(j->buffer);
TORRENT_ASSERT(j->buffer.disk_block);
TORRENT_ASSERT(m_write_cache_size + m_read_cache_size + 1 <= in_use());
cached_piece_entry* pe = allocate_piece(j, cached_piece_entry::write_lru);
@ -694,26 +694,26 @@ cached_piece_entry* block_cache::add_dirty_block(disk_io_job* j)
cached_block_entry& b = pe->blocks[block];
TORRENT_PIECE_ASSERT(b.buf != j->buffer, pe);
TORRENT_PIECE_ASSERT(b.buf != j->buffer.disk_block, pe);
// we might have a left-over read block from
// hash checking
// we might also have a previous dirty block which
// we're still waiting for to be written
if (b.buf != 0 && b.buf != j->buffer)
if (b.buf != 0 && b.buf != j->buffer.disk_block)
{
TORRENT_PIECE_ASSERT(b.refcount == 0 && !b.pending, pe);
free_block(pe, block);
TORRENT_PIECE_ASSERT(b.dirty == 0, pe);
}
b.buf = j->buffer;
b.buf = j->buffer.disk_block;
b.dirty = true;
++pe->num_blocks;
++pe->num_dirty;
++m_write_cache_size;
j->buffer = 0;
j->buffer.disk_block = 0;
TORRENT_PIECE_ASSERT(j->piece == pe->piece, pe);
TORRENT_PIECE_ASSERT(j->flags & disk_io_job::in_progress, pe);
TORRENT_PIECE_ASSERT(j->piece == pe->piece, pe);
@ -1698,7 +1698,7 @@ int block_cache::copy_from_piece(cached_piece_entry* pe, disk_io_job* j
INVARIANT_CHECK;
TORRENT_UNUSED(expect_no_fail);
TORRENT_PIECE_ASSERT(j->buffer == 0, pe);
TORRENT_PIECE_ASSERT(j->buffer.disk_block == 0, pe);
TORRENT_PIECE_ASSERT(pe->in_use, pe);
// copy from the cache and update the last use timestamp
@ -1741,7 +1741,7 @@ int block_cache::copy_from_piece(cached_piece_entry* pe, disk_io_job* j
j->d.io.ref.storage = j->storage.get();
j->d.io.ref.piece = pe->piece;
j->d.io.ref.block = start_block;
j->buffer = bl.buf + (j->d.io.offset & (block_size()-1));
j->buffer.disk_block = bl.buf + (j->d.io.offset & (block_size()-1));
++m_send_buffer_blocks;
return j->d.io.buffer_size;
}
@ -1754,15 +1754,15 @@ int block_cache::copy_from_piece(cached_piece_entry* pe, disk_io_job* j
return -1;
}
j->buffer = allocate_buffer("send buffer");
if (j->buffer == 0) return -2;
j->buffer.disk_block = allocate_buffer("send buffer");
if (j->buffer.disk_block == 0) return -2;
while (size > 0)
{
TORRENT_PIECE_ASSERT(pe->blocks[block].buf, pe);
int to_copy = (std::min)(block_size()
- block_offset, size);
std::memcpy(j->buffer + buffer_offset
std::memcpy(j->buffer.disk_block + buffer_offset
, pe->blocks[block].buf + block_offset
, to_copy);
size -= to_copy;

View File

@ -46,19 +46,22 @@ namespace libtorrent
}
disk_buffer_holder::disk_buffer_holder(buffer_allocator_interface& alloc, disk_io_job const& j)
: m_allocator(alloc), m_buf(j.buffer), m_ref(j.d.io.ref)
: m_allocator(alloc), m_buf(j.buffer.disk_block), m_ref(j.d.io.ref)
{
TORRENT_ASSERT(m_ref.storage == 0 || m_ref.piece >= 0);
TORRENT_ASSERT(m_ref.storage == 0 || m_ref.block >= 0);
TORRENT_ASSERT(m_ref.storage == 0 || m_ref.piece < ((piece_manager*)m_ref.storage)->files()->num_pieces());
TORRENT_ASSERT(m_ref.storage == 0 || m_ref.block <= ((piece_manager*)m_ref.storage)->files()->piece_length() / 0x4000);
TORRENT_ASSERT(j.action != disk_io_job::save_resume_data);
TORRENT_ASSERT(j.action != disk_io_job::rename_file);
TORRENT_ASSERT(j.action != disk_io_job::move_storage);
}
void disk_buffer_holder::reset(disk_io_job const& j)
{
if (m_ref.storage) m_allocator.reclaim_block(m_ref);
else if (m_buf) m_allocator.free_disk_buffer(m_buf);
m_buf = j.buffer;
m_buf = j.buffer.disk_block;
m_ref = j.d.io.ref;
TORRENT_ASSERT(m_ref.piece >= 0);
@ -66,6 +69,9 @@ namespace libtorrent
TORRENT_ASSERT(m_ref.block >= 0);
TORRENT_ASSERT(m_ref.piece < ((piece_manager*)m_ref.storage)->files()->num_pieces());
TORRENT_ASSERT(m_ref.block <= ((piece_manager*)m_ref.storage)->files()->piece_length() / 0x4000);
TORRENT_ASSERT(j.action != disk_io_job::save_resume_data);
TORRENT_ASSERT(j.action != disk_io_job::rename_file);
TORRENT_ASSERT(j.action != disk_io_job::move_storage);
}
void disk_buffer_holder::reset(char* buf)

View File

@ -39,7 +39,6 @@ namespace libtorrent
{
disk_io_job::disk_io_job()
: requester(0)
, buffer(0)
, piece(0)
, action(read)
, ret(0)
@ -51,6 +50,7 @@ namespace libtorrent
, blocked(false)
#endif
{
buffer.disk_block = 0;
d.io.offset = 0;
d.io.buffer_size = 0;
d.io.ref.storage = 0;
@ -61,9 +61,9 @@ namespace libtorrent
disk_io_job::~disk_io_job()
{
if (action == rename_file || action == move_storage)
free(buffer);
if (action == save_resume_data)
delete (entry*)buffer;
free(buffer.string);
else if (action == save_resume_data)
delete (entry*)buffer.resume_data;
}
bool disk_io_job::completed(cached_piece_entry const* pe, int block_size)
@ -74,7 +74,7 @@ namespace libtorrent
int size = d.io.buffer_size;
int start = d.io.offset / block_size;
int end = block_offset > 0 && (size > block_size - block_offset) ? start + 2 : start + 1;
for (int i = start; i < end; ++i)
if (pe->blocks[i].dirty || pe->blocks[i].pending) return false;

View File

@ -1143,8 +1143,8 @@ namespace libtorrent
int disk_io_thread::do_uncached_read(disk_io_job* j)
{
j->buffer = m_disk_cache.allocate_buffer("send buffer");
if (j->buffer == 0)
j->buffer.disk_block = m_disk_cache.allocate_buffer("send buffer");
if (j->buffer.disk_block == 0)
{
j->error.ec = error::no_memory;
j->error.operation = storage_error::alloc_cache_piece;
@ -1154,7 +1154,7 @@ namespace libtorrent
time_point start_time = clock_type::now();
int file_flags = file_flags_for_job(j);
file::iovec_t b = { j->buffer, size_t(j->d.io.buffer_size) };
file::iovec_t b = { j->buffer.disk_block, size_t(j->d.io.buffer_size) };
int ret = j->storage->get_storage_impl()->readv(&b, 1
, j->piece, j->d.io.offset, file_flags, j->error);
@ -1411,7 +1411,7 @@ namespace libtorrent
{
time_point start_time = clock_type::now();
file::iovec_t b = { j->buffer, size_t(j->d.io.buffer_size) };
file::iovec_t b = { j->buffer.disk_block, size_t(j->d.io.buffer_size) };
int file_flags = file_flags_for_job(j);
m_stats_counters.inc_stats_counter(counters::num_writing_threads, 1);
@ -1433,8 +1433,8 @@ namespace libtorrent
m_stats_counters.inc_stats_counter(counters::disk_job_time, write_time);
}
m_disk_cache.free_buffer(j->buffer);
j->buffer = NULL;
m_disk_cache.free_buffer(j->buffer.disk_block);
j->buffer.disk_block = NULL;
return ret;
}
@ -1457,7 +1457,7 @@ namespace libtorrent
#if TORRENT_USE_ASSERT
print_piece_log(pe->piece_log);
#endif
TORRENT_ASSERT(pe->blocks[j->d.io.offset / 16 / 1024].buf != j->buffer);
TORRENT_ASSERT(pe->blocks[j->d.io.offset / 16 / 1024].buf != j->buffer.disk_block);
TORRENT_ASSERT(pe->blocks[j->d.io.offset / 16 / 1024].buf != NULL);
j->error.ec = error::operation_aborted;
j->error.operation = storage_error::write;
@ -1527,7 +1527,7 @@ namespace libtorrent
j->piece = r.piece;
j->d.io.offset = r.start;
j->d.io.buffer_size = r.length;
j->buffer = 0;
j->buffer.disk_block = 0;
j->flags = flags;
j->requester = requester;
j->callback = handler;
@ -1637,7 +1637,7 @@ namespace libtorrent
j->piece = r.piece;
j->d.io.offset = r.start;
j->d.io.buffer_size = r.length;
j->buffer = buffer.get();
j->buffer.disk_block = buffer.get();
j->callback = handler;
j->flags = flags;
@ -1666,14 +1666,14 @@ namespace libtorrent
int piece_size = p.storage->files()->piece_size(p.piece);
int blocks_in_piece = (piece_size + bs - 1) / bs;
for (int k = 0; k < blocks_in_piece; ++k)
TORRENT_PIECE_ASSERT(p.blocks[k].buf != j->buffer, &p);
TORRENT_PIECE_ASSERT(p.blocks[k].buf != j->buffer.disk_block, &p);
}
l2_.unlock();
#endif
#if !defined TORRENT_DISABLE_POOL_ALLOCATOR && TORRENT_USE_ASSERTS
mutex::scoped_lock l_(m_cache_mutex);
TORRENT_ASSERT(m_disk_cache.is_disk_buffer(j->buffer));
TORRENT_ASSERT(m_disk_cache.is_disk_buffer(j->buffer.disk_block));
l_.unlock();
#endif
if (m_settings.get_int(settings_pack::cache_size) > 0
@ -1784,7 +1784,7 @@ namespace libtorrent
disk_io_job* j = allocate_job(disk_io_job::move_storage);
j->storage = storage->shared_from_this();
j->buffer = strdup(p.c_str());
j->buffer.string = strdup(p.c_str());
j->callback = handler;
j->flags = flags;
@ -1864,7 +1864,7 @@ namespace libtorrent
disk_io_job* j = allocate_job(disk_io_job::check_fastresume);
j->storage = storage->shared_from_this();
j->buffer = (char*)resume_data;
j->buffer.string = (char*)resume_data;
j->d.links = links.get();
j->callback = handler;
@ -1883,7 +1883,7 @@ namespace libtorrent
disk_io_job* j = allocate_job(disk_io_job::save_resume_data);
j->storage = storage->shared_from_this();
j->buffer = NULL;
j->buffer.resume_data = NULL;
j->callback = handler;
add_fence_job(storage, j);
@ -1901,7 +1901,7 @@ namespace libtorrent
disk_io_job* j = allocate_job(disk_io_job::rename_file);
j->storage = storage->shared_from_this();
j->piece = index;
j->buffer = strdup(name.c_str());
j->buffer.string = strdup(name.c_str());
j->callback = handler;
add_fence_job(storage, j);
}
@ -2016,7 +2016,7 @@ namespace libtorrent
disk_io_job* j = allocate_job(disk_io_job::file_priority);
j->storage = storage->shared_from_this();
j->buffer = (char*)p;
j->buffer.priorities = p;
j->callback = handler;
add_fence_job(storage, j);
@ -2513,7 +2513,8 @@ namespace libtorrent
TORRENT_ASSERT(j->storage->num_outstanding_jobs() == 1);
// if files have to be closed, that's the storage's responsibility
return j->storage->get_storage_impl()->move_storage(j->buffer, j->flags, j->error);
return j->storage->get_storage_impl()->move_storage(j->buffer.string
, j->flags, j->error);
}
int disk_io_thread::do_release_files(disk_io_job* j, tailqueue& completed_jobs)
@ -2533,7 +2534,7 @@ namespace libtorrent
int disk_io_thread::do_delete_files(disk_io_job* j, tailqueue& completed_jobs)
{
TORRENT_ASSERT(j->buffer == 0);
TORRENT_ASSERT(j->buffer.string == 0);
INVARIANT_CHECK;
// if this assert fails, something's wrong with the fence logic
@ -2557,7 +2558,7 @@ namespace libtorrent
// if this assert fails, something's wrong with the fence logic
TORRENT_ASSERT(j->storage->num_outstanding_jobs() == 1);
bdecode_node const* rd = (bdecode_node const*)j->buffer;
bdecode_node const* rd = j->buffer.check_resume_data;
bdecode_node tmp;
if (rd == NULL) rd = &tmp;
@ -2576,8 +2577,8 @@ namespace libtorrent
entry* resume_data = new entry(entry::dictionary_t);
j->storage->get_storage_impl()->write_resume_data(*resume_data, j->error);
TORRENT_ASSERT(j->buffer == 0);
j->buffer = (char*)resume_data;
TORRENT_ASSERT(j->buffer.resume_data == 0);
j->buffer.resume_data = resume_data;
return j->error ? -1 : 0;
}
@ -2587,7 +2588,8 @@ namespace libtorrent
TORRENT_ASSERT(j->storage->num_outstanding_jobs() == 1);
// if files need to be closed, that's the storage's responsibility
j->storage->get_storage_impl()->rename_file(j->piece, j->buffer, j->error);
j->storage->get_storage_impl()->rename_file(j->piece, j->buffer.string
, j->error);
return j->error ? -1 : 0;
}
@ -2612,7 +2614,7 @@ namespace libtorrent
int disk_io_thread::do_cache_piece(disk_io_job* j, tailqueue& /* completed_jobs */ )
{
INVARIANT_CHECK;
TORRENT_ASSERT(j->buffer == 0);
TORRENT_ASSERT(j->buffer.disk_block == 0);
if (m_settings.get_int(settings_pack::cache_size) == 0
|| m_settings.get_bool(settings_pack::use_read_cache) == false)
@ -2948,9 +2950,8 @@ namespace libtorrent
int disk_io_thread::do_file_priority(disk_io_job* j, tailqueue& /* completed_jobs */ )
{
std::vector<boost::uint8_t>* p = reinterpret_cast<std::vector<boost::uint8_t>*>(j->buffer);
boost::scoped_ptr<std::vector<boost::uint8_t> > p(j->buffer.priorities);
j->storage->get_storage_impl()->set_file_priority(*p, j->error);
delete p;
return 0;
}
@ -2962,7 +2963,7 @@ namespace libtorrent
torrent_info* t = new torrent_info(filename, j->error.ec);
if (j->error.ec)
{
j->buffer = NULL;
j->buffer.torrent_file = NULL;
delete t;
}
else
@ -2971,7 +2972,7 @@ namespace libtorrent
// than to have it be done in the network thread. It has enough to
// do as it is.
std::string cert = t->ssl_cert();
j->buffer = (char*)t;
j->buffer.torrent_file = t;
}
return 0;
@ -3361,7 +3362,7 @@ namespace libtorrent
cached_piece_entry* pe = m_disk_cache.find_piece(j);
if (pe)
{
TORRENT_ASSERT(pe->blocks[j->d.io.offset / 16 / 1024].buf != j->buffer);
TORRENT_ASSERT(pe->blocks[j->d.io.offset / 16 / 1024].buf != j->buffer.disk_block);
TORRENT_ASSERT(pe->blocks[j->d.io.offset / 16 / 1024].buf == NULL);
TORRENT_ASSERT(!pe->hashing_done);
}

View File

@ -5168,7 +5168,7 @@ namespace libtorrent
#ifndef TORRENT_DISABLE_LOGGING
peer_log(peer_log_alert::info, "FILE_ASYNC_READ_COMPLETE"
, "ret: %d piece: %d s: %x l: %x b: %p c: %s e: %s rtt: %d us"
, j->ret, r.piece, r.start, r.length, j->buffer
, j->ret, r.piece, r.start, r.length, j->buffer.disk_block
, (j->flags & disk_io_job::cache_hit ? "cache hit" : "cache miss")
, j->error.ec.message().c_str(), disk_rtt);
#endif
@ -5187,7 +5187,7 @@ namespace libtorrent
return;
}
TORRENT_ASSERT(j->buffer == 0);
TORRENT_ASSERT(j->buffer.disk_block == 0);
write_dont_have(r.piece);
write_reject_request(r);
if (t->alerts().should_post<file_error_alert>())

View File

@ -4461,7 +4461,7 @@ retry:
else
{
params->url.clear();
params->ti = boost::shared_ptr<torrent_info>((torrent_info*)j->buffer);
params->ti = boost::shared_ptr<torrent_info>(j->buffer.torrent_file);
handle = add_torrent(*params, ec);
}

View File

@ -242,7 +242,7 @@ namespace
if (j->ret != j->d.io.buffer_size) return;
hasher h;
h.update(j->buffer, j->d.io.buffer_size);
h.update(j->buffer.disk_block, j->d.io.buffer_size);
h.update((char const*)&m_salt, sizeof(m_salt));
std::pair<peer_list::iterator, peer_list::iterator> range
@ -256,7 +256,7 @@ namespace
#ifdef TORRENT_LOG_HASH_FAILURES
log_hash_block(&m_log_file, m_torrent, b.piece_index
, b.block_index, p->address(), j->buffer, j->buffer_size, true);
, b.block_index, p->address(), j->buffer.disk_block, j->buffer_size, true);
#endif
std::map<piece_block, block_entry>::iterator i = m_block_hashes.lower_bound(b);
@ -323,7 +323,7 @@ namespace
if (j->ret != j->d.io.buffer_size) return;
hasher h;
h.update(j->buffer, j->d.io.buffer_size);
h.update(j->buffer.disk_block, j->d.io.buffer_size);
h.update((char const*)&m_salt, sizeof(m_salt));
sha1_hash ok_digest = h.final();
@ -331,7 +331,7 @@ namespace
#ifdef TORRENT_LOG_HASH_FAILURES
log_hash_block(&m_log_file, m_torrent, b.first.piece_index
, b.first.block_index, a, j->buffer, j->buffer_size, false);
, b.first.block_index, a, j->buffer.disk_block, j->buffer_size, false);
#endif
// find the peer

View File

@ -1233,7 +1233,7 @@ namespace libtorrent
}
else
{
std::memcpy(rp->piece_data.get() + r.start, j->buffer, r.length);
std::memcpy(rp->piece_data.get() + r.start, j->buffer.disk_block, r.length);
}
if (rp->blocks_left == 0)
@ -4916,7 +4916,7 @@ namespace libtorrent
dec_refcount("save_resume");
m_ses.done_async_resume();
if (!j->buffer)
if (!j->buffer.resume_data)
{
alerts().emplace_alert<save_resume_data_failed_alert>(get_handle(), j->error.ec);
return;
@ -4924,10 +4924,10 @@ namespace libtorrent
m_need_save_resume_data = false;
m_last_saved_resume = m_ses.session_time();
write_resume_data(*((entry*)j->buffer));
write_resume_data(*j->buffer.resume_data);
alerts().emplace_alert<save_resume_data_alert>(
boost::shared_ptr<entry>((entry*)j->buffer), get_handle());
const_cast<disk_io_job*>(j)->buffer = 0;
boost::shared_ptr<entry>(j->buffer.resume_data), get_handle());
const_cast<disk_io_job*>(j)->buffer.resume_data = 0;
state_updated();
}
@ -4939,8 +4939,9 @@ namespace libtorrent
if (j->ret == 0)
{
if (alerts().should_post<file_renamed_alert>())
alerts().emplace_alert<file_renamed_alert>(get_handle(), j->buffer, j->piece);
m_torrent_file->rename_file(j->piece, j->buffer);
alerts().emplace_alert<file_renamed_alert>(get_handle()
, j->buffer.string, j->piece);
m_torrent_file->rename_file(j->piece, j->buffer.string);
}
else
{
@ -8421,8 +8422,8 @@ namespace libtorrent
if (j->ret == piece_manager::no_error || j->ret == piece_manager::need_full_check)
{
if (alerts().should_post<storage_moved_alert>())
alerts().emplace_alert<storage_moved_alert>(get_handle(), j->buffer);
m_save_path = j->buffer;
alerts().emplace_alert<storage_moved_alert>(get_handle(), j->buffer.string);
m_save_path = j->buffer.string;
m_need_save_resume_data = true;
if (j->ret == piece_manager::need_full_check)
force_recheck();

View File

@ -123,7 +123,7 @@ void nop() {}
wj.d.io.offset = b * 0x4000; \
wj.d.io.buffer_size = 0x4000; \
wj.piece = p; \
wj.buffer = bc.allocate_buffer("write-test"); \
wj.buffer.disk_block = bc.allocate_buffer("write-test"); \
pe = bc.add_dirty_block(&wj)
#define READ_BLOCK(p, b, r) \
@ -133,12 +133,12 @@ void nop() {}
rj.piece = p; \
rj.storage = pm; \
rj.requester = (void*)r; \
rj.buffer = 0; \
rj.buffer.disk_block = 0; \
ret = bc.try_read(&rj)
#define RETURN_BUFFER \
if (rj.d.io.ref.storage) bc.reclaim_block(rj.d.io.ref); \
else if (rj.buffer) bc.free_buffer(rj.buffer); \
else if (rj.buffer.disk_block) bc.free_buffer(rj.buffer.disk_block); \
rj.d.io.ref.storage = 0
#define FLUSH(flushing) \
@ -436,7 +436,7 @@ void test_unaligned_read()
rj.piece = 0;
rj.storage = pm;
rj.requester = (void*)1;
rj.buffer = 0;
rj.buffer.disk_block = 0;
ret = bc.try_read(&rj);
// unaligned reads copies the data into a new buffer

View File

@ -71,7 +71,7 @@ void on_read_piece(int ret, disk_io_job const& j, char const* data, int size)
{
std::cerr << time_now_string() << " on_read_piece piece: " << j.piece << std::endl;
TEST_EQUAL(ret, size);
if (ret > 0) TEST_CHECK(std::equal(j.buffer, j.buffer + ret, data));
if (ret > 0) TEST_CHECK(std::equal(j.buffer.disk_block, j.buffer.disk_block + ret, data));
}
void on_check_resume_data(disk_io_job const* j, bool* done)