forked from premiere/premiere-libtorrent
turn linked_list into a template to improve type safety and remove some casts
This commit is contained in:
parent
ffa870d280
commit
8cf8e65861
|
@ -773,7 +773,7 @@ namespace libtorrent
|
|||
// which torrents should be loaded into RAM and which ones
|
||||
// shouldn't. Each torrent that's loaded is part of this
|
||||
// list.
|
||||
linked_list m_torrent_lru;
|
||||
linked_list<torrent> m_torrent_lru;
|
||||
|
||||
std::map<std::string, boost::shared_ptr<torrent> > m_uuids;
|
||||
|
||||
|
|
|
@ -168,7 +168,7 @@ namespace libtorrent
|
|||
|
||||
// list_node is here to be able to link this cache entry
|
||||
// into one of the LRU lists
|
||||
struct TORRENT_EXTRA_EXPORT cached_piece_entry : list_node
|
||||
struct TORRENT_EXTRA_EXPORT cached_piece_entry : list_node<cached_piece_entry>
|
||||
{
|
||||
cached_piece_entry();
|
||||
~cached_piece_entry();
|
||||
|
@ -362,7 +362,7 @@ namespace libtorrent
|
|||
std::pair<iterator, iterator> all_pieces() const;
|
||||
int num_pieces() const { return m_pieces.size(); }
|
||||
|
||||
list_iterator write_lru_pieces() const
|
||||
list_iterator<cached_piece_entry> write_lru_pieces() const
|
||||
{ return m_lru[cached_piece_entry::write_lru].iterate(); }
|
||||
|
||||
int num_write_lru_pieces() const { return m_lru[cached_piece_entry::write_lru].size(); }
|
||||
|
@ -491,7 +491,7 @@ namespace libtorrent
|
|||
// [2] = read-LRU1-ghost
|
||||
// [3] = read-LRU2
|
||||
// [4] = read-LRU2-ghost
|
||||
linked_list m_lru[cached_piece_entry::num_lrus];
|
||||
linked_list<cached_piece_entry> m_lru[cached_piece_entry::num_lrus];
|
||||
|
||||
// this is used to determine whether to evict blocks from
|
||||
// L1 or L2.
|
||||
|
|
|
@ -37,40 +37,46 @@ POSSIBILITY OF SUCH DAMAGE.
|
|||
|
||||
namespace libtorrent
|
||||
{
|
||||
// TODO: 3 enable_if T derives from list_node<T>
|
||||
template <typename T>
|
||||
struct list_node
|
||||
{
|
||||
list_node() : prev(0), next(0) {}
|
||||
list_node* prev;
|
||||
list_node* next;
|
||||
T* prev;
|
||||
T* next;
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
struct list_iterator
|
||||
{
|
||||
template <typename U>
|
||||
friend struct linked_list;
|
||||
list_node const* get() const { return m_current; }
|
||||
list_node* get() { return m_current; }
|
||||
|
||||
T const* get() const { return m_current; }
|
||||
T* get() { return m_current; }
|
||||
void next() { m_current = m_current->next; }
|
||||
void prev() { m_current = m_current->prev; }
|
||||
|
||||
private:
|
||||
list_iterator(list_node* cur)
|
||||
list_iterator(T* cur)
|
||||
: m_current(cur) {}
|
||||
// the current element
|
||||
list_node* m_current;
|
||||
T* m_current;
|
||||
};
|
||||
|
||||
// TOOD: 3 make this a template and add a unit test
|
||||
// TOOD: 3 add a unit test
|
||||
template <typename T>
|
||||
struct linked_list
|
||||
{
|
||||
linked_list(): m_first(0), m_last(0), m_size(0) {}
|
||||
linked_list(): m_first(NULL), m_last(NULL), m_size(0) {}
|
||||
|
||||
list_iterator iterate() const
|
||||
{ return list_iterator(m_first); }
|
||||
list_iterator<T> iterate() const
|
||||
{ return list_iterator<T>(m_first); }
|
||||
|
||||
void erase(list_node* e)
|
||||
void erase(T* e)
|
||||
{
|
||||
#if TORRENT_USE_ASSERTS
|
||||
list_node* tmp = m_first;
|
||||
T* tmp = m_first;
|
||||
bool found = false;
|
||||
while (tmp)
|
||||
{
|
||||
|
@ -101,7 +107,7 @@ namespace libtorrent
|
|||
--m_size;
|
||||
TORRENT_ASSERT(m_last == 0 || m_last->next == 0);
|
||||
}
|
||||
void push_front(list_node* e)
|
||||
void push_front(T* e)
|
||||
{
|
||||
TORRENT_ASSERT(e->next == 0);
|
||||
TORRENT_ASSERT(e->prev== 0);
|
||||
|
@ -113,7 +119,7 @@ namespace libtorrent
|
|||
m_first = e;
|
||||
++m_size;
|
||||
}
|
||||
void push_back(list_node* e)
|
||||
void push_back(T* e)
|
||||
{
|
||||
TORRENT_ASSERT(e->next == 0);
|
||||
TORRENT_ASSERT(e->prev== 0);
|
||||
|
@ -125,25 +131,25 @@ namespace libtorrent
|
|||
m_last = e;
|
||||
++m_size;
|
||||
}
|
||||
list_node* get_all()
|
||||
T* get_all()
|
||||
{
|
||||
TORRENT_ASSERT(m_last == 0 || m_last->next == 0);
|
||||
TORRENT_ASSERT(m_first == 0 || m_first->prev == 0);
|
||||
list_node* e = m_first;
|
||||
T* e = m_first;
|
||||
m_first = 0;
|
||||
m_last = 0;
|
||||
m_size = 0;
|
||||
return e;
|
||||
}
|
||||
list_node* back() { return m_last; }
|
||||
list_node* front() { return m_first; }
|
||||
list_node const* back() const { return m_last; }
|
||||
list_node const* front() const { return m_first; }
|
||||
T* back() { return m_last; }
|
||||
T* front() { return m_first; }
|
||||
T const* back() const { return m_last; }
|
||||
T const* front() const { return m_first; }
|
||||
int size() const { return m_size; }
|
||||
bool empty() const { return m_size == 0; }
|
||||
private:
|
||||
list_node* m_first;
|
||||
list_node* m_last;
|
||||
T* m_first;
|
||||
T* m_last;
|
||||
int m_size;
|
||||
};
|
||||
}
|
||||
|
|
|
@ -275,7 +275,7 @@ namespace libtorrent
|
|||
, public request_callback
|
||||
, public peer_class_set
|
||||
, public boost::enable_shared_from_this<torrent>
|
||||
, public list_node // used for torrent activity LRU
|
||||
, public list_node<torrent> // used for torrent activity LRU
|
||||
{
|
||||
public:
|
||||
|
||||
|
|
|
@ -410,7 +410,7 @@ void block_cache::bump_lru(cached_piece_entry* p)
|
|||
{
|
||||
// move to the top of the LRU list
|
||||
TORRENT_PIECE_ASSERT(p->cache_state == cached_piece_entry::write_lru, p);
|
||||
linked_list* lru_list = &m_lru[p->cache_state];
|
||||
linked_list<cached_piece_entry>* lru_list = &m_lru[p->cache_state];
|
||||
|
||||
// move to the back (MRU) of the list
|
||||
lru_list->erase(p);
|
||||
|
@ -522,8 +522,8 @@ void block_cache::update_cache_state(cached_piece_entry* p)
|
|||
|
||||
TORRENT_PIECE_ASSERT(state < cached_piece_entry::num_lrus, p);
|
||||
TORRENT_PIECE_ASSERT(desired_state < cached_piece_entry::num_lrus, p);
|
||||
linked_list* src = &m_lru[state];
|
||||
linked_list* dst = &m_lru[desired_state];
|
||||
linked_list<cached_piece_entry>* src = &m_lru[state];
|
||||
linked_list<cached_piece_entry>* dst = &m_lru[desired_state];
|
||||
|
||||
src->erase(p);
|
||||
dst->push_back(p);
|
||||
|
@ -579,7 +579,7 @@ cached_piece_entry* block_cache::allocate_piece(disk_io_job const* j, int cache_
|
|||
j->storage->add_piece(p);
|
||||
|
||||
TORRENT_PIECE_ASSERT(p->cache_state < cached_piece_entry::num_lrus, p);
|
||||
linked_list* lru_list = &m_lru[p->cache_state];
|
||||
linked_list<cached_piece_entry>* lru_list = &m_lru[p->cache_state];
|
||||
lru_list->push_back(p);
|
||||
|
||||
// this piece is part of the ARC cache (as opposed to
|
||||
|
@ -897,7 +897,7 @@ void block_cache::erase_piece(cached_piece_entry* pe)
|
|||
TORRENT_PIECE_ASSERT(pe->ok_to_evict(), pe);
|
||||
TORRENT_PIECE_ASSERT(pe->cache_state < cached_piece_entry::num_lrus, pe);
|
||||
TORRENT_PIECE_ASSERT(pe->jobs.empty(), pe);
|
||||
linked_list* lru_list = &m_lru[pe->cache_state];
|
||||
linked_list<cached_piece_entry>* lru_list = &m_lru[pe->cache_state];
|
||||
if (pe->hash)
|
||||
{
|
||||
TORRENT_PIECE_ASSERT(pe->hash->offset == 0, pe);
|
||||
|
@ -931,7 +931,7 @@ int block_cache::try_evict_blocks(int num, cached_piece_entry* ignore)
|
|||
// lru_list is an array of two lists, these are the two ends to evict from,
|
||||
// ordered by preference.
|
||||
|
||||
linked_list* lru_list[3];
|
||||
linked_list<cached_piece_entry>* lru_list[3];
|
||||
|
||||
// however, before we consider any of the proper LRU lists, we evict pieces
|
||||
// from the volatile list. These are low priority pieces that were
|
||||
|
@ -979,7 +979,7 @@ int block_cache::try_evict_blocks(int num, cached_piece_entry* ignore)
|
|||
// to iterate over this linked list. Presumably because of the random
|
||||
// access of memory. It would be nice if pieces with no evictable blocks
|
||||
// weren't in this list
|
||||
for (list_iterator i = lru_list[end]->iterate(); i.get() && num > 0;)
|
||||
for (list_iterator<cached_piece_entry> i = lru_list[end]->iterate(); i.get() && num > 0;)
|
||||
{
|
||||
cached_piece_entry* pe = reinterpret_cast<cached_piece_entry*>(i.get());
|
||||
TORRENT_PIECE_ASSERT(pe->in_use, pe);
|
||||
|
@ -1044,7 +1044,7 @@ int block_cache::try_evict_blocks(int num, cached_piece_entry* ignore)
|
|||
{
|
||||
for (int pass = 0; pass < 2 && num > 0; ++pass)
|
||||
{
|
||||
for (list_iterator i = m_lru[cached_piece_entry::write_lru].iterate(); i.get() && num > 0;)
|
||||
for (list_iterator<cached_piece_entry> i = m_lru[cached_piece_entry::write_lru].iterate(); i.get() && num > 0;)
|
||||
{
|
||||
cached_piece_entry* pe = reinterpret_cast<cached_piece_entry*>(i.get());
|
||||
TORRENT_PIECE_ASSERT(pe->in_use, pe);
|
||||
|
@ -1171,7 +1171,7 @@ void block_cache::move_to_ghost(cached_piece_entry* pe)
|
|||
return;
|
||||
|
||||
// if the ghost list is growing too big, remove the oldest entry
|
||||
linked_list* ghost_list = &m_lru[pe->cache_state + 1];
|
||||
linked_list<cached_piece_entry>* ghost_list = &m_lru[pe->cache_state + 1];
|
||||
while (ghost_list->size() >= m_ghost_size)
|
||||
{
|
||||
cached_piece_entry* p = static_cast<cached_piece_entry*>(ghost_list->front());
|
||||
|
@ -1581,7 +1581,7 @@ void block_cache::check_invariant() const
|
|||
{
|
||||
time_point timeout = min_time();
|
||||
|
||||
for (list_iterator p = m_lru[i].iterate(); p.get(); p.next())
|
||||
for (list_iterator<cached_piece_entry> p = m_lru[i].iterate(); p.get(); p.next())
|
||||
{
|
||||
cached_piece_entry* pe = static_cast<cached_piece_entry*>(p.get());
|
||||
TORRENT_PIECE_ASSERT(pe->cache_state == i, pe);
|
||||
|
|
|
@ -905,13 +905,13 @@ namespace libtorrent
|
|||
{
|
||||
DLOG("try_flush_write_blocks: %d\n", num);
|
||||
|
||||
list_iterator range = m_disk_cache.write_lru_pieces();
|
||||
list_iterator<cached_piece_entry> range = m_disk_cache.write_lru_pieces();
|
||||
std::vector<std::pair<piece_manager*, int> > pieces;
|
||||
pieces.reserve(m_disk_cache.num_write_lru_pieces());
|
||||
|
||||
for (list_iterator p = range; p.get() && num > 0; p.next())
|
||||
for (list_iterator<cached_piece_entry> p = range; p.get() && num > 0; p.next())
|
||||
{
|
||||
cached_piece_entry* e = (cached_piece_entry*)p.get();
|
||||
cached_piece_entry* e = p.get();
|
||||
if (e->num_dirty == 0) continue;
|
||||
pieces.push_back(std::make_pair(e->storage.get(), int(e->piece)));
|
||||
}
|
||||
|
@ -987,9 +987,9 @@ namespace libtorrent
|
|||
cached_piece_entry** to_flush = TORRENT_ALLOCA(cached_piece_entry*, 200);
|
||||
int num_flush = 0;
|
||||
|
||||
for (list_iterator p = m_disk_cache.write_lru_pieces(); p.get(); p.next())
|
||||
for (list_iterator<cached_piece_entry> p = m_disk_cache.write_lru_pieces(); p.get(); p.next())
|
||||
{
|
||||
cached_piece_entry* e = (cached_piece_entry*)p.get();
|
||||
cached_piece_entry* e = p.get();
|
||||
#if TORRENT_USE_ASSERTS
|
||||
TORRENT_PIECE_ASSERT(e->expire >= timeout, e);
|
||||
timeout = e->expire;
|
||||
|
@ -2477,7 +2477,7 @@ namespace libtorrent
|
|||
ret = -1;
|
||||
j->error.ec.assign(boost::asio::error::eof
|
||||
, boost::asio::error::get_misc_category());
|
||||
m_disk_cache.free_buffer((char*)iov.iov_base);
|
||||
m_disk_cache.free_buffer(static_cast<char*>(iov.iov_base));
|
||||
l.lock();
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -1372,8 +1372,8 @@ namespace aux {
|
|||
if (t->next != NULL || t->prev != NULL || m_torrent_lru.front() == t)
|
||||
{
|
||||
#ifdef TORRENT_DEBUG
|
||||
torrent* i = static_cast<torrent*>(m_torrent_lru.front());
|
||||
while (i != NULL && i != t) i = static_cast<torrent*>(i->next);
|
||||
torrent* i = m_torrent_lru.front();
|
||||
while (i != NULL && i != t) i = i->next;
|
||||
TORRENT_ASSERT(i == t);
|
||||
#endif
|
||||
|
||||
|
@ -1412,8 +1412,8 @@ namespace aux {
|
|||
TORRENT_ASSERT(t->next != NULL || t->prev != NULL || m_torrent_lru.front() == t);
|
||||
|
||||
#if defined TORRENT_DEBUG && defined TORRENT_EXPENSIVE_INVARIANT_CHECKS
|
||||
torrent* i = static_cast<torrent*>(m_torrent_lru.front());
|
||||
while (i != NULL && i != t) i = static_cast<torrent*>(i->next);
|
||||
torrent* i = m_torrent_lru.front();
|
||||
while (i != NULL && i != t) i = i->next;
|
||||
TORRENT_ASSERT(i == t);
|
||||
#endif
|
||||
|
||||
|
@ -1451,8 +1451,8 @@ namespace aux {
|
|||
if (ignore->next != NULL || ignore->prev != NULL || m_torrent_lru.front() == ignore)
|
||||
{
|
||||
#ifdef TORRENT_DEBUG
|
||||
torrent* i = static_cast<torrent*>(m_torrent_lru.front());
|
||||
while (i != NULL && i != ignore) i = static_cast<torrent*>(i->next);
|
||||
torrent* i = m_torrent_lru.front();
|
||||
while (i != NULL && i != ignore) i = i->next;
|
||||
TORRENT_ASSERT(i == ignore);
|
||||
#endif
|
||||
++loaded_limit;
|
||||
|
@ -1462,11 +1462,11 @@ namespace aux {
|
|||
{
|
||||
// we're at the limit of loaded torrents. Find the least important
|
||||
// torrent and unload it. This is done with an LRU.
|
||||
torrent* i = static_cast<torrent*>(m_torrent_lru.front());
|
||||
torrent* i = m_torrent_lru.front();
|
||||
|
||||
if (i == ignore)
|
||||
{
|
||||
i = static_cast<torrent*>(i->next);
|
||||
i = i->next;
|
||||
if (i == NULL) break;
|
||||
}
|
||||
m_stats_counters.inc_stats_counter(counters::torrent_evicted_counter);
|
||||
|
@ -5761,12 +5761,12 @@ retry:
|
|||
// clear the torrent LRU. We do this to avoid having the torrent
|
||||
// destructor assert because it's still linked into the lru list
|
||||
#if TORRENT_USE_ASSERTS
|
||||
list_node* i = m_torrent_lru.get_all();
|
||||
list_node<torrent>* i = m_torrent_lru.get_all();
|
||||
// clear the prev and next pointers in all torrents
|
||||
// to avoid the assert when destructing them
|
||||
while (i)
|
||||
{
|
||||
list_node* tmp = i;
|
||||
list_node<torrent>* tmp = i;
|
||||
i = i->next;
|
||||
tmp->next = NULL;
|
||||
tmp->prev = NULL;
|
||||
|
@ -6727,9 +6727,9 @@ retry:
|
|||
#else
|
||||
std::set<torrent*> unique_torrents;
|
||||
#endif
|
||||
for (list_iterator i = m_torrent_lru.iterate(); i.get(); i.next())
|
||||
for (list_iterator<torrent> i = m_torrent_lru.iterate(); i.get(); i.next())
|
||||
{
|
||||
torrent* t = static_cast<torrent*>(i.get());
|
||||
torrent* t = i.get();
|
||||
TORRENT_ASSERT(t->is_loaded());
|
||||
TORRENT_ASSERT(unique_torrents.count(t) == 0);
|
||||
unique_torrents.insert(t);
|
||||
|
|
Loading…
Reference in New Issue