make tailequeue a template, introduce type safety, remove old-style casts

This commit is contained in:
arvidn 2015-08-19 15:22:00 +02:00
parent a3608a39fe
commit bd177a857f
16 changed files with 268 additions and 270 deletions

View File

@ -77,7 +77,6 @@ set(sources
stat
stat_cache
storage
tailqueue
time
timestamp_history
torrent

View File

@ -656,7 +656,6 @@ SOURCES =
http_tracker_connection
udp_tracker_connection
sha1
tailqueue
timestamp_history
udp_socket
upnp

View File

@ -192,11 +192,11 @@ namespace libtorrent
boost::shared_ptr<piece_manager> storage;
// write jobs hanging off of this piece
tailqueue jobs;
tailqueue<disk_io_job> jobs;
// read jobs waiting for the read job currently outstanding
// on this piece to complete. These are executed at that point.
tailqueue read_jobs;
tailqueue<disk_io_job> read_jobs;
int get_piece() const { return piece; }
void* get_storage() const { return storage.get(); }
@ -375,7 +375,7 @@ namespace libtorrent
// similar to mark_for_deletion, except for actually marking the
// piece for deletion. If the piece was actually deleted,
// the function returns true
bool evict_piece(cached_piece_entry* p, tailqueue& jobs);
bool evict_piece(cached_piece_entry* p, tailqueue<disk_io_job>& jobs);
// if this piece is in L1 or L2 proper, move it to
// its respective ghost list
@ -449,8 +449,8 @@ namespace libtorrent
// that couldn't be
int try_evict_blocks(int num, cached_piece_entry* ignore = 0);
// if there are any dirty blocks
void clear(tailqueue& jobs);
// if there are any dirty blocks
void clear(tailqueue<disk_io_job>& jobs);
void update_stats_counters(counters& c) const;
#ifndef TORRENT_NO_DEPRECATE

View File

@ -74,7 +74,8 @@ namespace libtorrent
// pointers and chaining them back and forth into lists saves
// a lot of heap allocation churn of using general purpose
// containers.
struct TORRENT_EXTRA_EXPORT disk_io_job : tailqueue_node, boost::noncopyable
struct TORRENT_EXTRA_EXPORT disk_io_job : tailqueue_node<disk_io_job>
, boost::noncopyable
{
disk_io_job();
~disk_io_job();

View File

@ -98,6 +98,8 @@ namespace libtorrent
bool need_readback;
};
typedef tailqueue<disk_io_job> jobqueue_t;
// this struct holds a number of statistics counters
// relevant for the disk io thread and disk cache.
struct TORRENT_EXPORT cache_status
@ -397,37 +399,38 @@ namespace libtorrent
void check_invariant() const;
#endif
void maybe_issue_queued_read_jobs(cached_piece_entry* pe, tailqueue& completed_jobs);
int do_read(disk_io_job* j, tailqueue& completed_jobs);
void maybe_issue_queued_read_jobs(cached_piece_entry* pe,
jobqueue_t& completed_jobs);
int do_read(disk_io_job* j, jobqueue_t& completed_jobs);
int do_uncached_read(disk_io_job* j);
int do_write(disk_io_job* j, tailqueue& completed_jobs);
int do_write(disk_io_job* j, jobqueue_t& completed_jobs);
int do_uncached_write(disk_io_job* j);
int do_hash(disk_io_job* j, tailqueue& completed_jobs);
int do_hash(disk_io_job* j, jobqueue_t& completed_jobs);
int do_uncached_hash(disk_io_job* j);
int do_move_storage(disk_io_job* j, tailqueue& completed_jobs);
int do_release_files(disk_io_job* j, tailqueue& completed_jobs);
int do_delete_files(disk_io_job* j, tailqueue& completed_jobs);
int do_check_fastresume(disk_io_job* j, tailqueue& completed_jobs);
int do_save_resume_data(disk_io_job* j, tailqueue& completed_jobs);
int do_rename_file(disk_io_job* j, tailqueue& completed_jobs);
int do_stop_torrent(disk_io_job* j, tailqueue& completed_jobs);
int do_read_and_hash(disk_io_job* j, tailqueue& completed_jobs);
int do_cache_piece(disk_io_job* j, tailqueue& completed_jobs);
int do_move_storage(disk_io_job* j, jobqueue_t& completed_jobs);
int do_release_files(disk_io_job* j, jobqueue_t& completed_jobs);
int do_delete_files(disk_io_job* j, jobqueue_t& completed_jobs);
int do_check_fastresume(disk_io_job* j, jobqueue_t& completed_jobs);
int do_save_resume_data(disk_io_job* j, jobqueue_t& completed_jobs);
int do_rename_file(disk_io_job* j, jobqueue_t& completed_jobs);
int do_stop_torrent(disk_io_job* j, jobqueue_t& completed_jobs);
int do_read_and_hash(disk_io_job* j, jobqueue_t& completed_jobs);
int do_cache_piece(disk_io_job* j, jobqueue_t& completed_jobs);
#ifndef TORRENT_NO_DEPRECATE
int do_finalize_file(disk_io_job* j, tailqueue& completed_jobs);
int do_finalize_file(disk_io_job* j, jobqueue_t& completed_jobs);
#endif
int do_flush_piece(disk_io_job* j, tailqueue& completed_jobs);
int do_flush_hashed(disk_io_job* j, tailqueue& completed_jobs);
int do_flush_storage(disk_io_job* j, tailqueue& completed_jobs);
int do_trim_cache(disk_io_job* j, tailqueue& completed_jobs);
int do_file_priority(disk_io_job* j, tailqueue& completed_jobs);
int do_load_torrent(disk_io_job* j, tailqueue& completed_jobs);
int do_clear_piece(disk_io_job* j, tailqueue& completed_jobs);
int do_tick(disk_io_job* j, tailqueue& completed_jobs);
int do_resolve_links(disk_io_job* j, tailqueue& completed_jobs);
int do_flush_piece(disk_io_job* j, jobqueue_t& completed_jobs);
int do_flush_hashed(disk_io_job* j, jobqueue_t& completed_jobs);
int do_flush_storage(disk_io_job* j, jobqueue_t& completed_jobs);
int do_trim_cache(disk_io_job* j, jobqueue_t& completed_jobs);
int do_file_priority(disk_io_job* j, jobqueue_t& completed_jobs);
int do_load_torrent(disk_io_job* j, jobqueue_t& completed_jobs);
int do_clear_piece(disk_io_job* j, jobqueue_t& completed_jobs);
int do_tick(disk_io_job* j, jobqueue_t& completed_jobs);
int do_resolve_links(disk_io_job* j, jobqueue_t& completed_jobs);
void call_job_handlers(void* userdata);
@ -445,16 +448,16 @@ namespace libtorrent
};
void add_completed_job(disk_io_job* j);
void add_completed_jobs(tailqueue& jobs);
void add_completed_jobs_impl(tailqueue& jobs
, tailqueue& completed_jobs);
void add_completed_jobs(jobqueue_t& jobs);
void add_completed_jobs_impl(jobqueue_t& jobs
, jobqueue_t& completed_jobs);
void fail_jobs(storage_error const& e, tailqueue& jobs_);
void fail_jobs_impl(storage_error const& e, tailqueue& src, tailqueue& dst);
void fail_jobs(storage_error const& e, jobqueue_t& jobs_);
void fail_jobs_impl(storage_error const& e, jobqueue_t& src, jobqueue_t& dst);
void check_cache_level(mutex::scoped_lock& l, tailqueue& completed_jobs);
void check_cache_level(mutex::scoped_lock& l, jobqueue_t& completed_jobs);
void perform_job(disk_io_job* j, tailqueue& completed_jobs);
void perform_job(disk_io_job* j, jobqueue_t& completed_jobs);
// this queues up another job to be submitted
void add_job(disk_io_job* j, bool user_add = true);
@ -465,7 +468,7 @@ namespace libtorrent
// writes out the blocks [start, end) (releases the lock
// during the file operation)
int flush_range(cached_piece_entry* p, int start, int end
, tailqueue& completed_jobs, mutex::scoped_lock& l);
, jobqueue_t& completed_jobs, mutex::scoped_lock& l);
// low level flush operations, used by flush_range
int build_iovec(cached_piece_entry* pe, int start, int end
@ -475,7 +478,7 @@ namespace libtorrent
void iovec_flushed(cached_piece_entry* pe
, int* flushing, int num_blocks, int block_offset
, storage_error const& error
, tailqueue& completed_jobs);
, jobqueue_t& completed_jobs);
// assumes l is locked (the cache mutex).
// assumes pe->hash to be set.
@ -498,13 +501,13 @@ namespace libtorrent
// used for asserts and only applies for fence jobs
flush_expect_clear = 8
};
void flush_cache(piece_manager* storage, boost::uint32_t flags, tailqueue& completed_jobs, mutex::scoped_lock& l);
void flush_expired_write_blocks(tailqueue& completed_jobs, mutex::scoped_lock& l);
void flush_piece(cached_piece_entry* pe, int flags, tailqueue& completed_jobs, mutex::scoped_lock& l);
void flush_cache(piece_manager* storage, boost::uint32_t flags, jobqueue_t& completed_jobs, mutex::scoped_lock& l);
void flush_expired_write_blocks(jobqueue_t& completed_jobs, mutex::scoped_lock& l);
void flush_piece(cached_piece_entry* pe, int flags, jobqueue_t& completed_jobs, mutex::scoped_lock& l);
int try_flush_hashed(cached_piece_entry* p, int cont_blocks, tailqueue& completed_jobs, mutex::scoped_lock& l);
int try_flush_hashed(cached_piece_entry* p, int cont_blocks, jobqueue_t& completed_jobs, mutex::scoped_lock& l);
void try_flush_write_blocks(int num, tailqueue& completed_jobs, mutex::scoped_lock& l);
void try_flush_write_blocks(int num, jobqueue_t& completed_jobs, mutex::scoped_lock& l);
// used to batch reclaiming of blocks to once per cycle
void commit_reclaimed_blocks();
@ -579,13 +582,13 @@ namespace libtorrent
mutable mutex m_job_mutex;
// jobs queued for servicing
tailqueue m_queued_jobs;
jobqueue_t m_queued_jobs;
// when using more than 2 threads, this is
// used for just hashing jobs, just for threads
// dedicated to do hashing
condition_variable m_hash_job_cond;
tailqueue m_queued_hash_jobs;
jobqueue_t m_queued_hash_jobs;
// used to rate limit disk performance warnings
time_point m_last_disk_aio_performance_warning;
@ -596,7 +599,7 @@ namespace libtorrent
// will then drain the queue and execute the jobs'
// handler functions
mutex m_completed_jobs_mutex;
tailqueue m_completed_jobs;
jobqueue_t m_completed_jobs;
// these are blocks that have been returned by the main thread
// but they haven't been freed yet. This is used to batch

View File

@ -59,7 +59,7 @@ namespace libtorrent
list_node* m_current;
};
// TOOD: 3 move the code into a .cpp file and add a unit test for linked_list
// TOOD: 3 make this a template and add a unit test
struct linked_list
{
linked_list(): m_first(0), m_last(0), m_size(0) {}

View File

@ -598,7 +598,7 @@ namespace libtorrent
// main network thread. the tailqueue of jobs will have the
// backed-up jobs prepended to it in case this resulted in the
// fence being lowered.
int job_complete(disk_io_job* j, tailqueue& job_queue);
int job_complete(disk_io_job* j, tailqueue<disk_io_job>& job_queue);
int num_outstanding_jobs() const { return m_outstanding_jobs; }
// if there is a fence up, returns true and adds the job
@ -617,7 +617,7 @@ namespace libtorrent
// when there's a fence up, jobs are queued up in here
// until the fence is lowered
tailqueue m_blocked_jobs;
tailqueue<disk_io_job> m_blocked_jobs;
// the number of disk_io_job objects there are, belonging
// to this torrent, currently pending, hanging off of

View File

@ -37,10 +37,11 @@ POSSIBILITY OF SUCH DAMAGE.
namespace libtorrent
{
template <typename T>
struct tailqueue_node
{
tailqueue_node() : next(0) {}
tailqueue_node* next;
T* next;
};
template<class N>
@ -51,40 +52,130 @@ namespace libtorrent
return ret;
}
template <typename T>
struct tailqueue_iterator
{
friend struct tailqueue;
tailqueue_node const* get() const { return m_current; }
template <typename U> friend struct tailqueue;
T* get() const { return m_current; }
void next() { m_current = m_current->next; }
private:
tailqueue_iterator(tailqueue_node const* cur)
tailqueue_iterator(T* cur)
: m_current(cur) {}
// the current element
tailqueue_node const* m_current;
T* m_current;
};
template <typename T>
//#error boost::enable_if< is_base<T, tailqueue_node<T> > >
struct TORRENT_EXTRA_EXPORT tailqueue
{
tailqueue();
tailqueue(): m_first(NULL), m_last(NULL), m_size(0) {}
tailqueue_iterator iterate() const
{ return tailqueue_iterator(m_first); }
tailqueue_iterator<const T> iterate() const
{ return tailqueue_iterator<const T>(m_first); }
void append(tailqueue& rhs);
void prepend(tailqueue& rhs);
tailqueue_node* pop_front();
void push_front(tailqueue_node* e);
void push_back(tailqueue_node* e);
tailqueue_node* get_all();
tailqueue_iterator<T> iterate()
{ return tailqueue_iterator<T>(m_first); }
void append(tailqueue<T>& rhs)
{
TORRENT_ASSERT(m_last == 0 || m_last->next == 0);
TORRENT_ASSERT(rhs.m_last == 0 || rhs.m_last->next == 0);
if (rhs.m_first == 0) return;
if (m_first == 0)
{
swap(rhs);
return;
}
m_last->next = rhs.m_first;
m_last = rhs.m_last;
m_size += rhs.m_size;
rhs.m_first = 0;
rhs.m_last = 0;
rhs.m_size = 0;
TORRENT_ASSERT(m_last == 0 || m_last->next == 0);
}
void prepend(tailqueue<T>& rhs)
{
TORRENT_ASSERT(m_last == 0 || m_last->next == 0);
TORRENT_ASSERT(rhs.m_last == 0 || rhs.m_last->next == 0);
if (rhs.m_first == 0) return;
if (m_first == 0)
{
swap(rhs);
return;
}
swap(rhs);
append(rhs);
}
T* pop_front()
{
TORRENT_ASSERT(m_last == 0 || m_last->next == 0);
T* e = m_first;
m_first = m_first->next;
if (e == m_last) m_last = 0;
e->next = 0;
--m_size;
return e;
}
void push_front(T* e)
{
TORRENT_ASSERT(e->next == 0);
TORRENT_ASSERT(m_last == 0 || m_last->next == 0);
e->next = m_first;
m_first = e;
if (!m_last) m_last = e;
++m_size;
}
void push_back(T* e)
{
TORRENT_ASSERT(e->next == 0);
TORRENT_ASSERT(m_last == 0 || m_last->next == 0);
if (m_last) m_last->next = e;
else m_first = e;
m_last = e;
e->next = 0;
++m_size;
}
T* get_all()
{
TORRENT_ASSERT(m_last == 0 || m_last->next == 0);
T* e = m_first;
m_first = 0;
m_last = 0;
m_size = 0;
return e;
}
void swap(tailqueue<T>& rhs)
{
T* tmp = m_first;
m_first = rhs.m_first;
rhs.m_first = tmp;
tmp = m_last;
m_last = rhs.m_last;
rhs.m_last = tmp;
int tmp2 = m_size;
m_size = rhs.m_size;
rhs.m_size = tmp2;
}
int size() const { return m_size; }
bool empty() const { return m_size == 0; }
void swap(tailqueue& rhs);
tailqueue_node* first() const { TORRENT_ASSERT(m_size > 0); return m_first; }
tailqueue_node* last() const { TORRENT_ASSERT(m_size > 0); return m_last; }
T* first() const { TORRENT_ASSERT(m_size > 0); return m_first; }
T* last() const { TORRENT_ASSERT(m_size > 0); return m_last; }
private:
tailqueue_node* m_first;
tailqueue_node* m_last;
T* m_first;
T* m_last;
int m_size;
};
}

View File

@ -119,7 +119,6 @@ libtorrent_rasterbar_la_SOURCES = \
storage.cpp \
session_stats.cpp \
string_util.cpp \
tailqueue.cpp \
thread.cpp \
torrent.cpp \
torrent_handle.cpp \

View File

@ -816,7 +816,7 @@ void block_cache::free_block(cached_piece_entry* pe, int block)
b.buf = NULL;
}
bool block_cache::evict_piece(cached_piece_entry* pe, tailqueue& jobs)
bool block_cache::evict_piece(cached_piece_entry* pe, tailqueue<disk_io_job>& jobs)
{
INVARIANT_CHECK;
@ -883,7 +883,7 @@ void block_cache::mark_for_deletion(cached_piece_entry* p)
"piece: %d\n", this, int(p->piece));
TORRENT_PIECE_ASSERT(p->jobs.empty(), p);
tailqueue jobs;
tailqueue<disk_io_job> jobs;
if (!evict_piece(p, jobs))
{
p->marked_for_deletion = true;
@ -1114,7 +1114,7 @@ int block_cache::try_evict_blocks(int num, cached_piece_entry* ignore)
return num;
}
void block_cache::clear(tailqueue& jobs)
void block_cache::clear(tailqueue<disk_io_job>& jobs)
{
INVARIANT_CHECK;
@ -1127,9 +1127,9 @@ void block_cache::clear(tailqueue& jobs)
{
cached_piece_entry& pe = const_cast<cached_piece_entry&>(*p);
#if TORRENT_USE_ASSERTS
for (tailqueue_iterator i = pe.jobs.iterate(); i.get(); i.next())
for (tailqueue_iterator<disk_io_job> i = pe.jobs.iterate(); i.get(); i.next())
TORRENT_PIECE_ASSERT((static_cast<disk_io_job const*>(i.get()))->piece == pe.piece, &pe);
for (tailqueue_iterator i = pe.read_jobs.iterate(); i.get(); i.next())
for (tailqueue_iterator<disk_io_job> i = pe.read_jobs.iterate(); i.get(); i.next())
TORRENT_PIECE_ASSERT((static_cast<disk_io_job const*>(i.get()))->piece == pe.piece, &pe);
#endif
// this also removes the jobs from the piece
@ -1590,7 +1590,7 @@ void block_cache::check_invariant() const
// if (i == cached_piece_entry::write_lru)
// TORRENT_ASSERT(pe->num_dirty > 0);
for (tailqueue_iterator j = pe->jobs.iterate(); j.get(); j.next())
for (tailqueue_iterator<disk_io_job> j = pe->jobs.iterate(); j.get(); j.next())
{
disk_io_job const* job = static_cast<disk_io_job const*>(j.get());
TORRENT_PIECE_ASSERT(job->piece == pe->piece, pe);
@ -1830,7 +1830,7 @@ bool block_cache::maybe_free_piece(cached_piece_entry* pe)
"piece: %d refcount: %d marked_for_deletion: %d\n", this
, int(pe->piece), int(pe->refcount), int(pe->marked_for_deletion));
tailqueue jobs;
tailqueue<disk_io_job> jobs;
bool removed = evict_piece(pe, jobs);
TORRENT_UNUSED(removed); // suppress warning
TORRENT_PIECE_ASSERT(removed, pe);
@ -1860,7 +1860,7 @@ cached_piece_entry* block_cache::find_piece(piece_manager* st, int piece)
TORRENT_PIECE_ASSERT(i->in_use, &*i);
#if TORRENT_USE_ASSERTS
for (tailqueue_iterator j = i->jobs.iterate(); j.get(); j.next())
for (tailqueue_iterator<const disk_io_job> j = i->jobs.iterate(); j.get(); j.next())
{
disk_io_job const* job = static_cast<disk_io_job const*>(j.get());
TORRENT_PIECE_ASSERT(job->piece == piece, &*i);

View File

@ -298,7 +298,7 @@ namespace libtorrent
// flush all blocks that are below p->hash.offset, since we've
// already hashed those blocks, they won't cause any read-back
int disk_io_thread::try_flush_hashed(cached_piece_entry* p, int cont_block
, tailqueue& completed_jobs, mutex::scoped_lock& l)
, jobqueue_t& completed_jobs, mutex::scoped_lock& l)
{
TORRENT_ASSERT(m_magic == 0x1337);
TORRENT_ASSERT(l.locked());
@ -688,7 +688,7 @@ namespace libtorrent
void disk_io_thread::iovec_flushed(cached_piece_entry* pe
, int* flushing, int num_blocks, int block_offset
, storage_error const& error
, tailqueue& completed_jobs)
, jobqueue_t& completed_jobs)
{
for (int i = 0; i < num_blocks; ++i)
flushing[i] -= block_offset;
@ -710,10 +710,10 @@ namespace libtorrent
}
else
{
disk_io_job* j = (disk_io_job*)pe->jobs.get_all();
disk_io_job* j = pe->jobs.get_all();
while (j)
{
disk_io_job* next = (disk_io_job*)j->next;
disk_io_job* next = j->next;
j->next = NULL;
TORRENT_PIECE_ASSERT((j->flags & disk_io_job::in_progress) || !j->storage, pe);
TORRENT_PIECE_ASSERT(j->piece == pe->piece, pe);
@ -735,7 +735,7 @@ namespace libtorrent
// issues write operations for blocks in the given
// range on the given piece.
int disk_io_thread::flush_range(cached_piece_entry* pe, int start, int end
, tailqueue& completed_jobs, mutex::scoped_lock& l)
, jobqueue_t& completed_jobs, mutex::scoped_lock& l)
{
TORRENT_ASSERT(l.locked());
INVARIANT_CHECK;
@ -777,18 +777,18 @@ namespace libtorrent
return iov_len;
}
void disk_io_thread::fail_jobs(storage_error const& e, tailqueue& jobs_)
void disk_io_thread::fail_jobs(storage_error const& e, jobqueue_t& jobs_)
{
tailqueue jobs;
jobqueue_t jobs;
fail_jobs_impl(e, jobs_, jobs);
if (jobs.size()) add_completed_jobs(jobs);
}
void disk_io_thread::fail_jobs_impl(storage_error const& e, tailqueue& src, tailqueue& dst)
void disk_io_thread::fail_jobs_impl(storage_error const& e, jobqueue_t& src, jobqueue_t& dst)
{
while (src.size())
{
disk_io_job* j = (disk_io_job*)src.pop_front();
disk_io_job* j = src.pop_front();
TORRENT_ASSERT((j->flags & disk_io_job::in_progress) || !j->storage);
j->ret = -1;
j->error = e;
@ -797,7 +797,7 @@ namespace libtorrent
}
void disk_io_thread::flush_piece(cached_piece_entry* pe, int flags
, tailqueue& completed_jobs, mutex::scoped_lock& l)
, jobqueue_t& completed_jobs, mutex::scoped_lock& l)
{
TORRENT_ASSERT(l.locked());
if (flags & flush_delete_cache)
@ -830,7 +830,7 @@ namespace libtorrent
}
void disk_io_thread::flush_cache(piece_manager* storage, boost::uint32_t flags
, tailqueue& completed_jobs, mutex::scoped_lock& l)
, jobqueue_t& completed_jobs, mutex::scoped_lock& l)
{
if (storage)
{
@ -900,7 +900,7 @@ namespace libtorrent
// size limit. This means we should not restrict ourselves to contiguous
// blocks of write cache line size, but try to flush all old blocks
// this is why we pass in 1 as cont_block to the flushing functions
void disk_io_thread::try_flush_write_blocks(int num, tailqueue& completed_jobs
void disk_io_thread::try_flush_write_blocks(int num, jobqueue_t& completed_jobs
, mutex::scoped_lock& l)
{
DLOG("try_flush_write_blocks: %d\n", num);
@ -972,7 +972,7 @@ namespace libtorrent
}
}
void disk_io_thread::flush_expired_write_blocks(tailqueue& completed_jobs
void disk_io_thread::flush_expired_write_blocks(jobqueue_t& completed_jobs
, mutex::scoped_lock& l)
{
DLOG("flush_expired_write_blocks\n");
@ -1022,7 +1022,7 @@ namespace libtorrent
namespace {
typedef int (disk_io_thread::*disk_io_fun_t)(disk_io_job* j, tailqueue& completed_jobs);
typedef int (disk_io_thread::*disk_io_fun_t)(disk_io_job* j, jobqueue_t& completed_jobs);
// this is a jump-table for disk I/O jobs
const disk_io_fun_t job_functions[] =
@ -1061,7 +1061,7 @@ namespace libtorrent
// below the number of blocks we flushed by the time we're done flushing
// that's why we need to call this fairly often. Both before and after
// a disk job is executed
void disk_io_thread::check_cache_level(mutex::scoped_lock& l, tailqueue& completed_jobs)
void disk_io_thread::check_cache_level(mutex::scoped_lock& l, jobqueue_t& completed_jobs)
{
int evict = m_disk_cache.num_to_evict(0);
if (evict > 0)
@ -1077,7 +1077,7 @@ namespace libtorrent
}
}
void disk_io_thread::perform_job(disk_io_job* j, tailqueue& completed_jobs)
void disk_io_thread::perform_job(disk_io_job* j, jobqueue_t& completed_jobs)
{
INVARIANT_CHECK;
TORRENT_ASSERT(j->next == 0);
@ -1197,7 +1197,7 @@ namespace libtorrent
return ret;
}
int disk_io_thread::do_read(disk_io_job* j, tailqueue& completed_jobs)
int disk_io_thread::do_read(disk_io_job* j, jobqueue_t& completed_jobs)
{
if (!m_settings.get_bool(settings_pack::use_read_cache)
|| m_settings.get_int(settings_pack::cache_size) == 0)
@ -1352,7 +1352,7 @@ namespace libtorrent
}
void disk_io_thread::maybe_issue_queued_read_jobs(cached_piece_entry* pe
, tailqueue& completed_jobs)
, jobqueue_t& completed_jobs)
{
TORRENT_PIECE_ASSERT(pe->outstanding_read == 1, pe);
@ -1375,7 +1375,7 @@ namespace libtorrent
// Any job that is a cache hit now, complete it immediately.
// Then, issue the first non-cache-hit job. Once it complete
// it will keep working off this list
tailqueue stalled_jobs;
jobqueue_t stalled_jobs;
pe->read_jobs.swap(stalled_jobs);
// the next job to issue (i.e. this is a cache-miss)
@ -1383,7 +1383,7 @@ namespace libtorrent
while (stalled_jobs.size() > 0)
{
disk_io_job* j = (disk_io_job*)stalled_jobs.pop_front();
disk_io_job* j = stalled_jobs.pop_front();
TORRENT_ASSERT(j->flags & disk_io_job::in_progress);
int ret = m_disk_cache.try_read(j);
@ -1465,7 +1465,7 @@ namespace libtorrent
return ret;
}
int disk_io_thread::do_write(disk_io_job* j, tailqueue& completed_jobs)
int disk_io_thread::do_write(disk_io_job* j, jobqueue_t& completed_jobs)
{
INVARIANT_CHECK;
TORRENT_ASSERT(j->d.io.buffer_size <= m_disk_cache.block_size());
@ -1836,19 +1836,19 @@ namespace libtorrent
#endif
// remove cache blocks belonging to this torrent
tailqueue completed_jobs;
jobqueue_t completed_jobs;
// remove outstanding jobs belonging to this torrent
mutex::scoped_lock l2(m_job_mutex);
// TODO: maybe the tailqueue_iterator should contain a pointer-pointer
// TODO: maybe the tailqueue_iterator<disk_io_job> should contain a pointer-pointer
// instead and have an unlink function
disk_io_job* qj = (disk_io_job*)m_queued_jobs.get_all();
tailqueue to_abort;
disk_io_job* qj = m_queued_jobs.get_all();
jobqueue_t to_abort;
while (qj)
{
disk_io_job* next = (disk_io_job*)qj->next;
disk_io_job* next = qj->next;
#if TORRENT_USE_ASSERTS
qj->next = NULL;
#endif
@ -1937,12 +1937,12 @@ namespace libtorrent
// remove outstanding hash jobs belonging to this torrent
mutex::scoped_lock l2(m_job_mutex);
disk_io_job* qj = (disk_io_job*)m_queued_hash_jobs.get_all();
tailqueue to_abort;
disk_io_job* qj = m_queued_hash_jobs.get_all();
jobqueue_t to_abort;
while (qj)
{
disk_io_job* next = (disk_io_job*)qj->next;
disk_io_job* next = qj->next;
#if TORRENT_USE_ASSERTS
qj->next = NULL;
#endif
@ -1959,7 +1959,7 @@ namespace libtorrent
j->callback = handler;
add_fence_job(storage, j);
tailqueue completed_jobs;
jobqueue_t completed_jobs;
fail_jobs_impl(storage_error(boost::asio::error::operation_aborted)
, to_abort, completed_jobs);
if (completed_jobs.size())
@ -2071,14 +2071,14 @@ namespace libtorrent
{
mutex::scoped_lock l(m_cache_mutex);
tailqueue jobs;
jobqueue_t jobs;
boost::unordered_set<cached_piece_entry*> const& cache = storage->cached_pieces();
// note that i is incremented in the body!
for (boost::unordered_set<cached_piece_entry*>::const_iterator i = cache.begin()
, end(cache.end()); i != end; )
{
tailqueue temp;
jobqueue_t temp;
if (m_disk_cache.evict_piece(*(i++), temp))
jobs.append(temp);
}
@ -2125,7 +2125,7 @@ namespace libtorrent
// never be the case when this function is used
// in fact, no jobs should really be hung on this piece
// at this point
tailqueue jobs;
jobqueue_t jobs;
bool ok = m_disk_cache.evict_piece(pe, jobs);
TORRENT_PIECE_ASSERT(ok, pe);
TORRENT_UNUSED(ok);
@ -2204,12 +2204,12 @@ namespace libtorrent
// if there are any hash-jobs hanging off of this piece
// we should post them now
disk_io_job* j = (disk_io_job*)pe->jobs.get_all();
tailqueue hash_jobs;
disk_io_job* j = pe->jobs.get_all();
jobqueue_t hash_jobs;
while (j)
{
TORRENT_PIECE_ASSERT((j->flags & disk_io_job::in_progress) || !j->storage, pe);
disk_io_job* next = (disk_io_job*)j->next;
disk_io_job* next = j->next;
j->next = NULL;
TORRENT_PIECE_ASSERT(j->piece == pe->piece, pe);
if (j->action == disk_io_job::hash) hash_jobs.push_back(j);
@ -2220,9 +2220,9 @@ namespace libtorrent
{
sha1_hash result = pe->hash->h.final();
for (tailqueue_iterator i = hash_jobs.iterate(); i.get(); i.next())
for (tailqueue_iterator<disk_io_job> i = hash_jobs.iterate(); i.get(); i.next())
{
disk_io_job* hj = (disk_io_job*)i.get();
disk_io_job* hj = const_cast<disk_io_job*>(i.get());
memcpy(hj->d.piece_hash, result.data(), 20);
hj->ret = 0;
}
@ -2288,7 +2288,7 @@ namespace libtorrent
return ret >= 0 ? 0 : -1;
}
int disk_io_thread::do_hash(disk_io_job* j, tailqueue& /* completed_jobs */ )
int disk_io_thread::do_hash(disk_io_job* j, jobqueue_t& /* completed_jobs */ )
{
INVARIANT_CHECK;
@ -2533,7 +2533,7 @@ namespace libtorrent
return ret < 0 ? ret : 0;
}
int disk_io_thread::do_move_storage(disk_io_job* j, tailqueue& /* completed_jobs */ )
int disk_io_thread::do_move_storage(disk_io_job* j, jobqueue_t& /* completed_jobs */ )
{
// if this assert fails, something's wrong with the fence logic
TORRENT_ASSERT(j->storage->num_outstanding_jobs() == 1);
@ -2543,7 +2543,7 @@ namespace libtorrent
, j->flags, j->error);
}
int disk_io_thread::do_release_files(disk_io_job* j, tailqueue& completed_jobs)
int disk_io_thread::do_release_files(disk_io_job* j, jobqueue_t& completed_jobs)
{
INVARIANT_CHECK;
@ -2558,7 +2558,7 @@ namespace libtorrent
return j->error ? -1 : 0;
}
int disk_io_thread::do_delete_files(disk_io_job* j, tailqueue& completed_jobs)
int disk_io_thread::do_delete_files(disk_io_job* j, jobqueue_t& completed_jobs)
{
TORRENT_ASSERT(j->buffer.string == 0);
INVARIANT_CHECK;
@ -2579,7 +2579,7 @@ namespace libtorrent
return j->error ? -1 : 0;
}
int disk_io_thread::do_check_fastresume(disk_io_job* j, tailqueue& /* completed_jobs */ )
int disk_io_thread::do_check_fastresume(disk_io_job* j, jobqueue_t& /* completed_jobs */ )
{
// if this assert fails, something's wrong with the fence logic
TORRENT_ASSERT(j->storage->num_outstanding_jobs() == 1);
@ -2592,7 +2592,7 @@ namespace libtorrent
return j->storage->check_fastresume(*rd, links.get(), j->error);
}
int disk_io_thread::do_save_resume_data(disk_io_job* j, tailqueue& completed_jobs)
int disk_io_thread::do_save_resume_data(disk_io_job* j, jobqueue_t& completed_jobs)
{
// if this assert fails, something's wrong with the fence logic
TORRENT_ASSERT(j->storage->num_outstanding_jobs() == 1);
@ -2608,7 +2608,7 @@ namespace libtorrent
return j->error ? -1 : 0;
}
int disk_io_thread::do_rename_file(disk_io_job* j, tailqueue& /* completed_jobs */ )
int disk_io_thread::do_rename_file(disk_io_job* j, jobqueue_t& /* completed_jobs */ )
{
// if this assert fails, something's wrong with the fence logic
TORRENT_ASSERT(j->storage->num_outstanding_jobs() == 1);
@ -2619,7 +2619,7 @@ namespace libtorrent
return j->error ? -1 : 0;
}
int disk_io_thread::do_stop_torrent(disk_io_job* j, tailqueue& completed_jobs)
int disk_io_thread::do_stop_torrent(disk_io_job* j, jobqueue_t& completed_jobs)
{
// if this assert fails, something's wrong with the fence logic
TORRENT_ASSERT(j->storage->num_outstanding_jobs() == 1);
@ -2637,7 +2637,7 @@ namespace libtorrent
return j->error ? -1 : 0;
}
int disk_io_thread::do_cache_piece(disk_io_job* j, tailqueue& /* completed_jobs */ )
int disk_io_thread::do_cache_piece(disk_io_job* j, jobqueue_t& /* completed_jobs */ )
{
INVARIANT_CHECK;
TORRENT_ASSERT(j->buffer.disk_block == 0);
@ -2739,7 +2739,7 @@ namespace libtorrent
}
#ifndef TORRENT_NO_DEPRECATE
int disk_io_thread::do_finalize_file(disk_io_job* j, tailqueue& /* completed_jobs */)
int disk_io_thread::do_finalize_file(disk_io_job* j, jobqueue_t& /* completed_jobs */)
{
j->storage->get_storage_impl()->finalize_file(j->piece, j->error);
return j->error ? -1 : 0;
@ -2889,7 +2889,7 @@ namespace libtorrent
#endif
}
int disk_io_thread::do_flush_piece(disk_io_job* j, tailqueue& completed_jobs)
int disk_io_thread::do_flush_piece(disk_io_job* j, jobqueue_t& completed_jobs)
{
mutex::scoped_lock l(m_cache_mutex);
@ -2908,7 +2908,7 @@ namespace libtorrent
// this is triggered every time we insert a new dirty block in a piece
// by the time this gets executed, the block may already have been flushed
// triggered by another mechanism.
int disk_io_thread::do_flush_hashed(disk_io_job* j, tailqueue& completed_jobs)
int disk_io_thread::do_flush_hashed(disk_io_job* j, jobqueue_t& completed_jobs)
{
mutex::scoped_lock l(m_cache_mutex);
@ -2961,27 +2961,27 @@ namespace libtorrent
return 0;
}
int disk_io_thread::do_flush_storage(disk_io_job* j, tailqueue& completed_jobs)
int disk_io_thread::do_flush_storage(disk_io_job* j, jobqueue_t& completed_jobs)
{
mutex::scoped_lock l(m_cache_mutex);
flush_cache(j->storage.get(), flush_write_cache, completed_jobs, l);
return 0;
}
int disk_io_thread::do_trim_cache(disk_io_job*, tailqueue& /* completed_jobs */)
int disk_io_thread::do_trim_cache(disk_io_job*, jobqueue_t& /* completed_jobs */)
{
//#error implement
return 0;
}
int disk_io_thread::do_file_priority(disk_io_job* j, tailqueue& /* completed_jobs */ )
int disk_io_thread::do_file_priority(disk_io_job* j, jobqueue_t& /* completed_jobs */ )
{
boost::scoped_ptr<std::vector<boost::uint8_t> > p(j->buffer.priorities);
j->storage->get_storage_impl()->set_file_priority(*p, j->error);
return 0;
}
int disk_io_thread::do_load_torrent(disk_io_job* j, tailqueue& /* completed_jobs */ )
int disk_io_thread::do_load_torrent(disk_io_job* j, jobqueue_t& /* completed_jobs */ )
{
add_torrent_params* params = (add_torrent_params*)j->requester;
@ -3007,7 +3007,7 @@ namespace libtorrent
// this job won't return until all outstanding jobs on this
// piece are completed or cancelled and the buffers for it
// have been evicted
int disk_io_thread::do_clear_piece(disk_io_job* j, tailqueue& completed_jobs)
int disk_io_thread::do_clear_piece(disk_io_job* j, jobqueue_t& completed_jobs)
{
mutex::scoped_lock l(m_cache_mutex);
@ -3027,7 +3027,7 @@ namespace libtorrent
// evicted. A piece may fail to be evicted if there
// are still outstanding operations on it, in which case
// try again later
tailqueue jobs;
jobqueue_t jobs;
if (m_disk_cache.evict_piece(pe, jobs))
{
fail_jobs_impl(storage_error(boost::asio::error::operation_aborted)
@ -3044,7 +3044,7 @@ namespace libtorrent
return retry_job;
}
int disk_io_thread::do_tick(disk_io_job* j, tailqueue& /* completed_jobs */ )
int disk_io_thread::do_tick(disk_io_job* j, jobqueue_t& /* completed_jobs */ )
{
// true means this storage wants more ticks, false
// disables ticking (until it's enabled again)
@ -3193,7 +3193,7 @@ namespace libtorrent
{
while (!m_queued_jobs.empty())
{
disk_io_job* j = (disk_io_job*)m_queued_jobs.pop_front();
disk_io_job* j = m_queued_jobs.pop_front();
maybe_flush_write_blocks();
execute_job(j);
}
@ -3218,7 +3218,7 @@ namespace libtorrent
, int(m_stats_counters[counters::blocked_disk_jobs])
, m_queued_jobs.size(), int(m_num_threads));
m_last_cache_expiry = now;
tailqueue completed_jobs;
jobqueue_t completed_jobs;
flush_expired_write_blocks(completed_jobs, l);
l.unlock();
if (completed_jobs.size())
@ -3227,7 +3227,7 @@ namespace libtorrent
void disk_io_thread::execute_job(disk_io_job* j)
{
tailqueue completed_jobs;
jobqueue_t completed_jobs;
perform_job(j, completed_jobs);
if (completed_jobs.size())
add_completed_jobs(completed_jobs);
@ -3260,14 +3260,14 @@ namespace libtorrent
break;
}
j = (disk_io_job*)m_queued_jobs.pop_front();
j = m_queued_jobs.pop_front();
}
else if (type == hasher_thread)
{
TORRENT_ASSERT(l.locked());
while (m_queued_hash_jobs.empty() && thread_id < m_num_threads) m_hash_job_cond.wait(l);
if (m_queued_hash_jobs.empty() && thread_id >= m_num_threads) break;
j = (disk_io_job*)m_queued_hash_jobs.pop_front();
j = m_queued_hash_jobs.pop_front();
}
l.unlock();
@ -3337,7 +3337,7 @@ namespace libtorrent
{
TORRENT_ASSERT(m_magic == 0x1337);
tailqueue jobs;
jobqueue_t jobs;
m_disk_cache.clear(jobs);
fail_jobs(storage_error(boost::asio::error::operation_aborted), jobs);
@ -3378,14 +3378,14 @@ namespace libtorrent
void disk_io_thread::add_completed_job(disk_io_job* j)
{
tailqueue tmp;
jobqueue_t tmp;
tmp.push_back(j);
add_completed_jobs(tmp);
}
void disk_io_thread::add_completed_jobs(tailqueue& jobs)
void disk_io_thread::add_completed_jobs(jobqueue_t& jobs)
{
tailqueue new_completed_jobs;
jobqueue_t new_completed_jobs;
do
{
// when a job completes, it's possible for it to cause
@ -3400,14 +3400,14 @@ namespace libtorrent
} while (jobs.size() > 0);
}
void disk_io_thread::add_completed_jobs_impl(tailqueue& jobs
, tailqueue& completed_jobs)
void disk_io_thread::add_completed_jobs_impl(jobqueue_t& jobs
, jobqueue_t& completed_jobs)
{
tailqueue new_jobs;
jobqueue_t new_jobs;
int ret = 0;
for (tailqueue_iterator i = jobs.iterate(); i.get(); i.next())
for (tailqueue_iterator<disk_io_job> i = jobs.iterate(); i.get(); i.next())
{
disk_io_job* j = (disk_io_job*)i.get();
disk_io_job* j = i.get();
TORRENT_ASSERT((j->flags & disk_io_job::in_progress) || !j->storage);
// DLOG("job_complete %s outstanding: %d\n"
@ -3442,7 +3442,7 @@ namespace libtorrent
if (new_jobs.size() > 0)
{
#if TORRENT_USE_ASSERTS
for (tailqueue_iterator i = new_jobs.iterate(); i.get(); i.next())
for (tailqueue_iterator<disk_io_job> i = new_jobs.iterate(); i.get(); i.next())
{
disk_io_job const* j = static_cast<disk_io_job const*>(i.get());
TORRENT_ASSERT((j->flags & disk_io_job::in_progress) || !j->storage);
@ -3460,12 +3460,12 @@ namespace libtorrent
}
}
#endif
tailqueue other_jobs;
tailqueue flush_jobs;
jobqueue_t other_jobs;
jobqueue_t flush_jobs;
mutex::scoped_lock l_(m_cache_mutex);
while (new_jobs.size() > 0)
{
disk_io_job* j = (disk_io_job*)new_jobs.pop_front();
disk_io_job* j = new_jobs.pop_front();
if (j->action == disk_io_job::read
&& m_settings.get_bool(settings_pack::use_read_cache)
@ -3535,7 +3535,7 @@ namespace libtorrent
while (flush_jobs.size() > 0)
{
disk_io_job* j = (disk_io_job*)flush_jobs.pop_front();
disk_io_job* j = flush_jobs.pop_front();
add_job(j, false);
}
@ -3568,7 +3568,7 @@ namespace libtorrent
#endif
int num_jobs = m_completed_jobs.size();
disk_io_job* j = (disk_io_job*)m_completed_jobs.get_all();
disk_io_job* j = m_completed_jobs.get_all();
l.unlock();
uncork_interface* uncork = static_cast<uncork_interface*>(userdata);
@ -3580,7 +3580,7 @@ namespace libtorrent
TORRENT_ASSERT(j->job_posted == true);
TORRENT_ASSERT(j->callback_called == false);
// DLOG(" callback: %s\n", job_action_name[j->action]);
disk_io_job* next = (disk_io_job*)j->next;
disk_io_job* next = j->next;
#if TORRENT_USE_ASSERTS
j->callback_called = true;

View File

@ -1606,7 +1606,7 @@ namespace libtorrent
, m_outstanding_jobs(0)
{}
int disk_job_fence::job_complete(disk_io_job* j, tailqueue& jobs)
int disk_job_fence::job_complete(disk_io_job* j, tailqueue<disk_io_job>& jobs)
{
mutex::scoped_lock l(m_mutex);

View File

@ -32,98 +32,3 @@ POSSIBILITY OF SUCH DAMAGE.
#include "libtorrent/tailqueue.hpp"
namespace libtorrent
{
tailqueue::tailqueue(): m_first(0), m_last(0), m_size(0) {}
void tailqueue::append(tailqueue& rhs)
{
TORRENT_ASSERT(m_last == 0 || m_last->next == 0);
TORRENT_ASSERT(rhs.m_last == 0 || rhs.m_last->next == 0);
if (rhs.m_first == 0) return;
if (m_first == 0)
{
swap(rhs);
return;
}
m_last->next = rhs.m_first;
m_last = rhs.m_last;
m_size += rhs.m_size;
rhs.m_first = 0;
rhs.m_last = 0;
rhs.m_size = 0;
TORRENT_ASSERT(m_last == 0 || m_last->next == 0);
}
void tailqueue::prepend(tailqueue& rhs)
{
TORRENT_ASSERT(m_last == 0 || m_last->next == 0);
TORRENT_ASSERT(rhs.m_last == 0 || rhs.m_last->next == 0);
if (rhs.m_first == 0) return;
if (m_first == 0)
{
swap(rhs);
return;
}
swap(rhs);
append(rhs);
}
tailqueue_node* tailqueue::pop_front()
{
TORRENT_ASSERT(m_last == 0 || m_last->next == 0);
tailqueue_node* e = m_first;
m_first = m_first->next;
if (e == m_last) m_last = 0;
e->next = 0;
--m_size;
return e;
}
void tailqueue::push_front(tailqueue_node* e)
{
TORRENT_ASSERT(e->next == 0);
TORRENT_ASSERT(m_last == 0 || m_last->next == 0);
e->next = m_first;
m_first = e;
if (!m_last) m_last = e;
++m_size;
}
void tailqueue::push_back(tailqueue_node* e)
{
TORRENT_ASSERT(e->next == 0);
TORRENT_ASSERT(m_last == 0 || m_last->next == 0);
if (m_last) m_last->next = e;
else m_first = e;
m_last = e;
e->next = 0;
++m_size;
}
tailqueue_node* tailqueue::get_all()
{
TORRENT_ASSERT(m_last == 0 || m_last->next == 0);
tailqueue_node* e = m_first;
m_first = 0;
m_last = 0;
m_size = 0;
return e;
}
void tailqueue::swap(tailqueue& rhs)
{
tailqueue_node* tmp = m_first;
m_first = rhs.m_first;
rhs.m_first = tmp;
tmp = m_last;
m_last = rhs.m_last;
rhs.m_last = tmp;
int tmp2 = m_size;
m_size = rhs.m_size;
rhs.m_size = tmp2;
}
}

View File

@ -204,7 +204,7 @@ void test_write()
// to the buffer
RETURN_BUFFER;
tailqueue jobs;
tailqueue<disk_io_job> jobs;
bc.clear(jobs);
}
@ -219,7 +219,7 @@ void test_flush()
int flushing[1] = {0};
FLUSH(flushing);
tailqueue jobs;
tailqueue<disk_io_job> jobs;
bc.clear(jobs);
}
@ -241,7 +241,7 @@ void test_insert()
TEST_EQUAL(c[counters::arc_write_size], 0);
TEST_EQUAL(c[counters::arc_volatile_size], 0);
tailqueue jobs;
tailqueue<disk_io_job> jobs;
bc.clear(jobs);
}
@ -263,7 +263,7 @@ void test_evict()
TEST_EQUAL(c[counters::arc_write_size], 0);
TEST_EQUAL(c[counters::arc_volatile_size], 0);
tailqueue jobs;
tailqueue<disk_io_job> jobs;
// this should make it not be evicted
// just free the buffers
++pe->piece_refcount;
@ -359,7 +359,7 @@ void test_arc_promote()
TEST_EQUAL(c[counters::arc_write_size], 0);
TEST_EQUAL(c[counters::arc_volatile_size], 0);
tailqueue jobs;
tailqueue<disk_io_job> jobs;
bc.clear(jobs);
}
@ -381,7 +381,7 @@ void test_arc_unghost()
TEST_EQUAL(c[counters::arc_write_size], 0);
TEST_EQUAL(c[counters::arc_volatile_size], 0);
tailqueue jobs;
tailqueue<disk_io_job> jobs;
bc.evict_piece(pe, jobs);
bc.update_stats_counters(c);
@ -451,7 +451,7 @@ void test_unaligned_read()
// return the reference to the buffer we just read
RETURN_BUFFER;
tailqueue jobs;
tailqueue<disk_io_job> jobs;
bc.clear(jobs);
}

View File

@ -26,7 +26,7 @@ TORRENT_TEST(empty_fence)
ret = fence.is_blocked(&test_job[8]);
TEST_CHECK(ret == true);
tailqueue jobs;
tailqueue<disk_io_job> jobs;
// complete the fence job
fence.job_complete(&test_job[5], jobs);
@ -78,7 +78,7 @@ TORRENT_TEST(job_fence)
ret = fence.is_blocked(&test_job[8]);
TEST_CHECK(ret == true);
tailqueue jobs;
tailqueue<disk_io_job> jobs;
fence.job_complete(&test_job[3], jobs);
TEST_CHECK(jobs.size() == 0);
@ -155,7 +155,7 @@ TORRENT_TEST(double_fence)
ret = fence.is_blocked(&test_job[9]);
TEST_CHECK(ret == true);
tailqueue jobs;
tailqueue<disk_io_job> jobs;
fence.job_complete(&test_job[3], jobs);
TEST_CHECK(jobs.size() == 0);

View File

@ -35,15 +35,16 @@ POSSIBILITY OF SUCH DAMAGE.
using namespace libtorrent;
struct test_node : tailqueue_node
struct test_node : tailqueue_node<test_node>
{
test_node(char n) : name(n) {}
char name;
};
void check_chain(tailqueue& chain, char const* expected)
void check_chain(tailqueue<test_node>& chain, char const* expected)
{
tailqueue_iterator i = chain.iterate();
tailqueue_iterator<test_node> i = chain.iterate();
while (i.get())
{
TEST_EQUAL(((test_node*)i.get())->name, *expected);
@ -53,7 +54,7 @@ void check_chain(tailqueue& chain, char const* expected)
TEST_EQUAL(expected[0], 0);
}
void free_chain(tailqueue& q)
void free_chain(tailqueue<test_node>& q)
{
test_node* chain = (test_node*)q.get_all();
while(chain)
@ -64,7 +65,7 @@ void free_chain(tailqueue& q)
}
}
void build_chain(tailqueue& q, char const* str)
void build_chain(tailqueue<test_node>& q, char const* str)
{
free_chain(q);
@ -80,8 +81,8 @@ void build_chain(tailqueue& q, char const* str)
TORRENT_TEST(tailqueue)
{
tailqueue t1;
tailqueue t2;
tailqueue<test_node> t1;
tailqueue<test_node> t2;
// test prepend
build_chain(t1, "abcdef");