merged RC_1_1 into master

This commit is contained in:
arvidn 2017-04-21 01:21:31 -04:00
commit 04c76249b6
15 changed files with 342 additions and 94 deletions

View File

@ -67,6 +67,8 @@
* require C++11 to build libtorrent
* fix memory leak in the disk cache
* fix double free in disk cache
* forward declaring libtorrent types is discouraged. a new fwd.hpp header is provided
1.1.3 release

View File

@ -173,7 +173,6 @@ namespace aux {
{
return refcount == 0
&& piece_refcount == 0
&& num_blocks == 0
&& !hashing
&& read_jobs.size() == 0
&& outstanding_read == 0
@ -239,7 +238,8 @@ namespace aux {
std::uint16_t hashing_done:1;
// if this is true, whenever refcount hits 0,
// this piece should be deleted
// this piece should be deleted from the cache
// (not just demoted)
std::uint16_t marked_for_deletion:1;
// this is set to true once we flush blocks past
@ -305,8 +305,13 @@ namespace aux {
// read job queue (read_jobs).
std::uint16_t outstanding_read:1;
// this is set when the piece should be evicted as soon as there
// no longer are any references to it. Evicted here means demoted
// to a ghost list
boost::uint32_t marked_for_eviction:1;
// the number of blocks that have >= 1 refcount
std::uint16_t pinned = 0;
boost::uint32_t pinned:15;
// ---- 32 bit boundary ---
@ -360,15 +365,22 @@ namespace aux {
int num_write_lru_pieces() const { return m_lru[cached_piece_entry::write_lru].size(); }
enum eviction_mode
{
allow_ghost,
disallow_ghost
};
// mark this piece for deletion. If there are no outstanding
// requests to this piece, it's removed immediately, and the
// passed in iterator will be invalidated
void mark_for_deletion(cached_piece_entry* p);
void mark_for_eviction(cached_piece_entry* p, eviction_mode mode);
// similar to mark_for_deletion, except for actually marking the
// similar to mark_for_eviction, except for actually marking the
// piece for deletion. If the piece was actually deleted,
// the function returns true
bool evict_piece(cached_piece_entry* p, tailqueue<disk_io_job>& jobs);
bool evict_piece(cached_piece_entry* p, tailqueue<disk_io_job>& jobs
, eviction_mode mode);
// if this piece is in L1 or L2 proper, move it to
// its respective ghost list

View File

@ -101,7 +101,7 @@ namespace libtorrent {
private:
void remove_oldest(std::unique_lock<std::mutex>& l);
file_handle remove_oldest(std::unique_lock<std::mutex>&);
int m_size;
bool m_low_prio_io = false;

View File

@ -336,7 +336,7 @@ namespace libtorrent {
int dec_refcount()
{
TORRENT_ASSERT(m_references > 0);
return m_references--;
return --m_references;
}
void inc_refcount() { ++m_references; }
private:

View File

@ -271,7 +271,7 @@ void test_stop_start_download(swarm_test type, bool graceful)
std::printf("tick: %d\n", ticks);
const int timeout = type == swarm_test::download ? 20 : 91;
const int timeout = type == swarm_test::download ? 20 : 100;
if (ticks > timeout)
{
TEST_ERROR("timeout");

View File

@ -282,7 +282,7 @@ static_assert(sizeof(job_action_name)/sizeof(job_action_name[0])
, pe->cache_state < cached_piece_entry::num_lrus ? cache_state[pe->cache_state] : ""
, int(pe->outstanding_flush), int(pe->piece), int(pe->num_dirty)
, int(pe->num_blocks), int(pe->blocks_in_piece), int(pe->hashing_done)
, int(pe->marked_for_deletion), int(pe->need_readback), pe->hash_passes
, int(pe->marked_for_eviction), int(pe->need_readback), pe->hash_passes
, int(pe->read_jobs.size()), int(pe->jobs.size()));
bool first = true;
for (auto const& log : pe->piece_log)
@ -316,6 +316,8 @@ cached_piece_entry::cached_piece_entry()
, piece_refcount(0)
, outstanding_flush(0)
, outstanding_read(0)
, marked_for_eviction(false)
, pinned(0)
{}
cached_piece_entry::~cached_piece_entry()
@ -361,8 +363,6 @@ int block_cache::try_read(disk_io_job* j, buffer_allocator_interface& allocator
{
INVARIANT_CHECK;
TORRENT_ASSERT(!boost::get<disk_buffer_holder>(j->argument));
cached_piece_entry* p = find_piece(j);
int ret = 0;
@ -455,12 +455,10 @@ void block_cache::cache_hit(cached_piece_entry* p, void* requester, bool volatil
if (p->cache_state == cached_piece_entry::read_lru1_ghost)
{
m_last_cache_op = ghost_hit_lru1;
p->storage->add_piece(p);
}
else if (p->cache_state == cached_piece_entry::read_lru2_ghost)
{
m_last_cache_op = ghost_hit_lru2;
p->storage->add_piece(p);
}
// move into L2 (frequently used)
@ -539,7 +537,7 @@ void block_cache::try_evict_one_volatile()
TORRENT_PIECE_ASSERT(pe->in_use, pe);
i.next();
if (pe->ok_to_evict())
if (pe->ok_to_evict() && pe->num_blocks == 0)
{
#if TORRENT_USE_INVARIANT_CHECKS
for (int j = 0; j < pe->blocks_in_piece; ++j)
@ -582,7 +580,7 @@ void block_cache::try_evict_one_volatile()
--m_volatile_size;
}
if (pe->ok_to_evict())
if (pe->ok_to_evict() && pe->num_blocks == 0)
{
#if TORRENT_USE_INVARIANT_CHECKS
for (int j = 0; j < pe->blocks_in_piece; ++j)
@ -666,7 +664,7 @@ cached_piece_entry* block_cache::allocate_piece(disk_io_job const* j, std::uint1
TORRENT_PIECE_ASSERT(p->in_use, p);
// we want to retain the piece now
p->marked_for_deletion = false;
p->marked_for_eviction = false;
// only allow changing the cache state downwards. i.e. turn a ghost
// piece into a non-ghost, or a read piece into a write piece
@ -677,16 +675,6 @@ cached_piece_entry* block_cache::allocate_piece(disk_io_job const* j, std::uint1
// into the read cache, but fails and is cleared (into the ghost list)
// then we want to add new dirty blocks to it and we need to move
// it back into the write cache
// it also happens when pulling a ghost piece back into the proper cache
if (p->cache_state == cached_piece_entry::read_lru1_ghost
|| p->cache_state == cached_piece_entry::read_lru2_ghost)
{
// since it used to be a ghost piece, but no more,
// we need to add it back to the storage
p->storage->add_piece(p);
}
m_lru[p->cache_state].erase(p);
p->cache_state = cache_state;
m_lru[p->cache_state].push_back(p);
@ -744,7 +732,7 @@ cached_piece_entry* block_cache::add_dirty_block(disk_io_job* j)
TORRENT_PIECE_ASSERT(block < pe->blocks_in_piece, pe);
TORRENT_PIECE_ASSERT(j->piece == pe->piece, pe);
TORRENT_PIECE_ASSERT(!pe->marked_for_deletion, pe);
TORRENT_PIECE_ASSERT(!pe->marked_for_eviction, pe);
TORRENT_PIECE_ASSERT(pe->blocks[block].refcount == 0, pe);
@ -814,6 +802,7 @@ void block_cache::blocks_flushed(cached_piece_entry* pe, int const* flushed, int
pe->num_dirty -= num_flushed;
update_cache_state(pe);
maybe_free_piece(pe);
}
std::pair<block_cache::const_iterator, block_cache::const_iterator> block_cache::all_pieces() const
@ -858,7 +847,8 @@ void block_cache::free_block(cached_piece_entry* pe, int block)
b.buf = nullptr;
}
bool block_cache::evict_piece(cached_piece_entry* pe, tailqueue<disk_io_job>& jobs)
bool block_cache::evict_piece(cached_piece_entry* pe, tailqueue<disk_io_job>& jobs
, eviction_mode const mode)
{
INVARIANT_CHECK;
@ -899,7 +889,7 @@ bool block_cache::evict_piece(cached_piece_entry* pe, tailqueue<disk_io_job>& jo
if (num_to_delete) free_multiple_buffers(to_delete.first(num_to_delete));
if (pe->ok_to_evict(true))
if (pe->ok_to_evict(true) && pe->num_blocks == 0)
{
pe->hash.reset();
@ -907,11 +897,13 @@ bool block_cache::evict_piece(cached_piece_entry* pe, tailqueue<disk_io_job>& jo
jobs.append(pe->jobs);
TORRENT_ASSERT(pe->jobs.size() == 0);
if (pe->cache_state == cached_piece_entry::read_lru1_ghost
|| pe->cache_state == cached_piece_entry::read_lru2_ghost)
if (mode == allow_ghost
&& (pe->cache_state == cached_piece_entry::read_lru1_ghost
|| pe->cache_state == cached_piece_entry::read_lru2_ghost))
return true;
if (pe->cache_state == cached_piece_entry::write_lru
if (mode == disallow_ghost
|| pe->cache_state == cached_piece_entry::write_lru
|| pe->cache_state == cached_piece_entry::volatile_read_lru)
erase_piece(pe);
else
@ -922,7 +914,8 @@ bool block_cache::evict_piece(cached_piece_entry* pe, tailqueue<disk_io_job>& jo
return false;
}
void block_cache::mark_for_deletion(cached_piece_entry* p)
void block_cache::mark_for_eviction(cached_piece_entry* p
, eviction_mode const mode)
{
INVARIANT_CHECK;
@ -931,9 +924,10 @@ void block_cache::mark_for_deletion(cached_piece_entry* p)
TORRENT_PIECE_ASSERT(p->jobs.empty(), p);
tailqueue<disk_io_job> jobs;
if (!evict_piece(p, jobs))
if (!evict_piece(p, jobs, mode))
{
p->marked_for_deletion = true;
p->marked_for_eviction = true;
p->marked_for_deletion = mode == disallow_ghost;
}
}
@ -950,9 +944,7 @@ void block_cache::erase_piece(cached_piece_entry* pe)
TORRENT_PIECE_ASSERT(pe->hash->offset == 0, pe);
pe->hash.reset();
}
if (pe->cache_state != cached_piece_entry::read_lru1_ghost
&& pe->cache_state != cached_piece_entry::read_lru2_ghost)
pe->storage->remove_piece(pe);
pe->storage->remove_piece(pe);
lru_list->erase(pe);
m_pieces.erase(*pe);
}
@ -1034,7 +1026,7 @@ int block_cache::try_evict_blocks(int num, cached_piece_entry* ignore)
if (pe == ignore)
continue;
if (pe->ok_to_evict())
if (pe->ok_to_evict() && pe->num_blocks == 0)
{
#if TORRENT_USE_INVARIANT_CHECKS
for (int j = 0; j < pe->blocks_in_piece; ++j)
@ -1074,7 +1066,7 @@ int block_cache::try_evict_blocks(int num, cached_piece_entry* ignore)
m_volatile_size -= removed;
}
if (pe->ok_to_evict())
if (pe->ok_to_evict() && pe->num_blocks == 0)
{
#if TORRENT_USE_INVARIANT_CHECKS
for (int j = 0; j < pe->blocks_in_piece; ++j)
@ -1107,7 +1099,7 @@ int block_cache::try_evict_blocks(int num, cached_piece_entry* ignore)
if (pe == ignore)
continue;
if (pe->ok_to_evict())
if (pe->ok_to_evict() && pe->num_blocks == 0)
{
#if TORRENT_USE_INVARIANT_CHECKS
for (int j = 0; j < pe->blocks_in_piece; ++j)
@ -1153,7 +1145,7 @@ int block_cache::try_evict_blocks(int num, cached_piece_entry* ignore)
m_volatile_size -= removed;
}
if (pe->ok_to_evict())
if (pe->ok_to_evict() && pe->num_blocks == 0)
{
#if TORRENT_USE_INVARIANT_CHECKS
for (int j = 0; j < pe->blocks_in_piece; ++j)
@ -1242,7 +1234,6 @@ void block_cache::move_to_ghost(cached_piece_entry* pe)
erase_piece(p);
}
pe->storage->remove_piece(pe);
m_lru[pe->cache_state].erase(pe);
pe->cache_state += 1;
ghost_list->push_back(pe);
@ -1574,7 +1565,7 @@ void block_cache::check_invariant() const
for (list_iterator<cached_piece_entry> p = m_lru[i].iterate(); p.get(); p.next())
{
cached_piece_entry* pe = static_cast<cached_piece_entry*>(p.get());
cached_piece_entry* pe = p.get();
TORRENT_PIECE_ASSERT(pe->cache_state == i, pe);
if (pe->num_dirty > 0)
TORRENT_PIECE_ASSERT(i == cached_piece_entry::write_lru, pe);
@ -1592,18 +1583,18 @@ void block_cache::check_invariant() const
if (i != cached_piece_entry::read_lru1_ghost
&& i != cached_piece_entry::read_lru2_ghost)
{
TORRENT_PIECE_ASSERT(pe->storage->has_piece(pe), pe);
TORRENT_PIECE_ASSERT(pe->expire >= timeout, pe);
timeout = pe->expire;
TORRENT_PIECE_ASSERT(pe->in_storage, pe);
TORRENT_PIECE_ASSERT(pe->storage->has_piece(pe), pe);
}
else
{
// pieces in the ghost lists should never have any blocks
TORRENT_PIECE_ASSERT(pe->num_blocks == 0, pe);
TORRENT_PIECE_ASSERT(pe->storage->has_piece(pe) == false, pe);
}
// pieces in the ghost list are still in the storage's list of pieces,
// because we need to be able to evict them when stopping a torrent
TORRENT_PIECE_ASSERT(pe->storage->has_piece(pe), pe);
storages.insert(pe->storage.get());
}
@ -1628,19 +1619,7 @@ void block_cache::check_invariant() const
int num_pending = 0;
int num_refcount = 0;
bool const in_storage = p.storage->has_piece(&p);
switch (p.cache_state)
{
case cached_piece_entry::write_lru:
case cached_piece_entry::volatile_read_lru:
case cached_piece_entry::read_lru1:
case cached_piece_entry::read_lru2:
TORRENT_ASSERT(in_storage == true);
break;
default:
TORRENT_ASSERT(in_storage == false);
break;
}
TORRENT_ASSERT(p.storage->has_piece(&p));
for (int k = 0; k < p.blocks_in_piece; ++k)
{
@ -1705,7 +1684,6 @@ int block_cache::copy_from_piece(cached_piece_entry* const pe
INVARIANT_CHECK;
TORRENT_UNUSED(expect_no_fail);
TORRENT_PIECE_ASSERT(!boost::get<disk_buffer_holder>(j->argument).get(), pe);
TORRENT_PIECE_ASSERT(pe->in_use, pe);
// copy from the cache and update the last use timestamp
@ -1763,6 +1741,7 @@ int block_cache::copy_from_piece(cached_piece_entry* const pe
{
TORRENT_ASSERT(!expect_no_fail);
dec_block_refcount(pe, start_block, ref_reading);
maybe_free_piece(pe);
return -1;
}
@ -1789,11 +1768,13 @@ int block_cache::copy_from_piece(cached_piece_entry* const pe
// TODO: create a holder for refcounts that automatically decrement
dec_block_refcount(pe, start_block, ref_reading);
if (blocks_to_read == 2) dec_block_refcount(pe, start_block + 1, ref_reading);
maybe_free_piece(pe);
return j->d.io.buffer_size;
}
void block_cache::reclaim_block(storage_interface* st, aux::block_cache_reference const& ref)
{
TORRENT_ASSERT(st != nullptr);
int const blocks_per_piece = (st->files().piece_length() + block_size() - 1) / block_size();
piece_index_t const piece(ref.cookie / blocks_per_piece);
int const block(ref.cookie % blocks_per_piece);
@ -1816,17 +1797,18 @@ void block_cache::reclaim_block(storage_interface* st, aux::block_cache_referenc
bool block_cache::maybe_free_piece(cached_piece_entry* pe)
{
if (!pe->ok_to_evict()
|| !pe->marked_for_deletion
|| !pe->marked_for_eviction
|| !pe->jobs.empty())
return false;
DLOG(stderr, "[%p] block_cache maybe_free_piece "
"piece: %d refcount: %d marked_for_deletion: %d\n"
"piece: %d refcount: %d marked_for_eviction: %d\n"
, static_cast<void*>(this)
, int(pe->piece), int(pe->refcount), int(pe->marked_for_deletion));
, int(pe->piece), int(pe->refcount), int(pe->marked_for_eviction));
tailqueue<disk_io_job> jobs;
bool removed = evict_piece(pe, jobs);
bool removed = evict_piece(pe, jobs
, pe->marked_for_deletion ? disallow_ghost : allow_ghost);
TORRENT_UNUSED(removed); // suppress warning
TORRENT_PIECE_ASSERT(removed, pe);
TORRENT_PIECE_ASSERT(jobs.empty(), pe);

View File

@ -50,7 +50,8 @@ namespace libtorrent {
: m_allocator(h.m_allocator), m_buf(h.m_buf), m_ref(h.m_ref)
{
// we own this buffer now
h.release();
h.m_buf = nullptr;
h.m_ref = aux::block_cache_reference();
}
disk_buffer_holder::disk_buffer_holder(buffer_allocator_interface& alloc
@ -76,6 +77,7 @@ namespace libtorrent {
char* disk_buffer_holder::release() noexcept
{
TORRENT_ASSERT(m_ref.cookie == aux::block_cache_reference::none);
char* ret = m_buf;
m_buf = nullptr;
m_ref = aux::block_cache_reference();

View File

@ -291,8 +291,13 @@ namespace libtorrent {
{
auto& pos = m_torrents[ref.storage];
storage_interface* st = pos.get();
TORRENT_ASSERT(st != nullptr);
m_disk_cache.reclaim_block(st, ref);
if (st->dec_refcount() == 0) pos.reset();
if (st->dec_refcount() == 0)
{
pos.reset();
m_free_slots.push_back(ref.storage);
}
}
}
@ -792,13 +797,13 @@ namespace libtorrent {
iovec_flushed(pe, flushing.data(), iov_len, 0, error, completed_jobs);
m_disk_cache.maybe_free_piece(pe);
// if the cache is under high pressure, we need to evict
// the blocks we just flushed to make room for more write pieces
int evict = m_disk_cache.num_to_evict(0);
if (evict > 0) m_disk_cache.try_evict_blocks(evict);
m_disk_cache.maybe_free_piece(pe);
return iov_len;
}
@ -845,12 +850,14 @@ namespace libtorrent {
// otherwise it will turn into a read piece
}
// mark_for_deletion may erase the piece from the cache, that's
// mark_for_eviction may erase the piece from the cache, that's
// why we don't have the 'i' iterator referencing it at this point
if (flags & (flush_read_cache | flush_delete_cache))
{
fail_jobs_impl(storage_error(boost::asio::error::operation_aborted), pe->jobs, completed_jobs);
m_disk_cache.mark_for_deletion(pe);
// we're removing the torrent, don't keep any entries around in the
// ghost list
m_disk_cache.mark_for_eviction(pe, block_cache::disallow_ghost);
}
}
@ -864,6 +871,7 @@ namespace libtorrent {
piece_index.reserve(pieces.size());
for (auto const& p : pieces)
{
TORRENT_ASSERT(p->get_storage() == storage);
if (p->get_storage() != storage) continue;
piece_index.push_back(p->piece);
}
@ -1835,13 +1843,23 @@ namespace libtorrent {
disk_io_job* qj = m_generic_io_jobs.m_queued_jobs.get_all();
jobqueue_t to_abort;
// if we encounter any read jobs in the queue, we need to clear the
// "outstanding_read" flag on its piece, as we abort the job
std::vector<std::pair<storage_interface*, piece_index_t> > pieces;
storage_interface* to_delete = m_torrents[storage].get();
while (qj)
{
disk_io_job* next = qj->next;
#if TORRENT_USE_ASSERTS
qj->next = nullptr;
#endif
if (qj->action == disk_io_job::read)
{
pieces.push_back(std::make_pair(qj->storage.get(), qj->piece));
}
if (qj->storage.get() == to_delete)
to_abort.push_back(qj);
else
@ -1851,6 +1869,14 @@ namespace libtorrent {
l2.unlock();
std::unique_lock<std::mutex> l(m_cache_mutex);
for (auto& p : pieces)
{
cached_piece_entry* pe = m_disk_cache.find_piece(p.first, p.second);
if (pe == NULL) continue;
TORRENT_ASSERT(pe->outstanding_read == 1);
pe->outstanding_read = 0;
}
flush_cache(to_delete, flush_delete_cache, completed_jobs, l);
l.unlock();
@ -2003,7 +2029,7 @@ namespace libtorrent {
// in fact, no jobs should really be hung on this piece
// at this point
jobqueue_t jobs;
bool ok = m_disk_cache.evict_piece(pe, jobs);
bool ok = m_disk_cache.evict_piece(pe, jobs, block_cache::allow_ghost);
TORRENT_PIECE_ASSERT(ok, pe);
TORRENT_UNUSED(ok);
fail_jobs(storage_error(boost::asio::error::operation_aborted), jobs);
@ -2451,7 +2477,8 @@ namespace libtorrent {
std::unique_lock<std::mutex> l(m_cache_mutex);
flush_cache(j->storage.get(), flush_delete_cache | flush_expect_clear
flush_cache(j->storage.get()
, flush_read_cache | flush_delete_cache | flush_expect_clear
, completed_jobs, l);
l.unlock();
@ -2812,14 +2839,14 @@ namespace libtorrent {
// are still outstanding operations on it, in which case
// try again later
jobqueue_t jobs;
if (m_disk_cache.evict_piece(pe, jobs))
if (m_disk_cache.evict_piece(pe, jobs, block_cache::allow_ghost))
{
fail_jobs_impl(storage_error(boost::asio::error::operation_aborted)
, jobs, completed_jobs);
return status_t::no_error;
}
m_disk_cache.mark_for_deletion(pe);
m_disk_cache.mark_for_eviction(pe, block_cache::allow_ghost);
if (pe->num_blocks == 0) return status_t::no_error;
// we should always be able to evict the piece, since
@ -2849,8 +2876,7 @@ namespace libtorrent {
{
std::unique_lock<std::mutex> l(m_job_mutex);
TORRENT_ASSERT((j->flags & disk_io_job::in_progress) || !j->storage);
// prioritize fence jobs since they're blocking other jobs
m_generic_io_jobs.m_queued_jobs.push_front(j);
m_generic_io_jobs.m_queued_jobs.push_back(j);
l.unlock();
// discard the flush job

View File

@ -175,7 +175,7 @@ namespace libtorrent {
{
// the file cache is at its maximum size, close
// the least recently used (lru) file from it
remove_oldest(l);
defer_destruction = remove_oldest(l);
}
return file_ptr;
}
@ -226,21 +226,20 @@ namespace libtorrent {
return ret;
}
void file_pool::remove_oldest(std::unique_lock<std::mutex>& l)
file_handle file_pool::remove_oldest(std::unique_lock<std::mutex>&)
{
using value_type = decltype(m_files)::value_type;
auto const i = std::min_element(m_files.begin(), m_files.end()
, [] (value_type const& lhs, value_type const& rhs)
{ return lhs.second.last_use < rhs.second.last_use; });
if (i == m_files.end()) return;
if (i == m_files.end()) return file_handle();
file_handle file_ptr = i->second.file_ptr;
m_files.erase(i);
// closing a file may be long running operation (mac os x)
l.unlock();
file_ptr.reset();
l.lock();
// let the calling function destruct it after releasing the mutex
return file_ptr;
}
void file_pool::release(storage_index_t const st, file_index_t file_index)
@ -309,6 +308,9 @@ namespace libtorrent {
void file_pool::resize(int size)
{
// these are destructed _after_ the mutex is released
std::vector<file_handle> defer_destruction;
std::unique_lock<std::mutex> l(m_mutex);
TORRENT_ASSERT(size > 0);
@ -319,7 +321,7 @@ namespace libtorrent {
// close the least recently used files
while (int(m_files.size()) > m_size)
remove_oldest(l);
defer_destruction.push_back(remove_oldest(l));
}
void file_pool::close_oldest()

View File

@ -130,7 +130,7 @@ void natpmp::start()
for (std::vector<mapping_t>::iterator i = m_mappings.begin()
, end(m_mappings.end()); i != end; ++i)
{
if (i->protocol != portmap_protocol::none
if (i->protocol == portmap_protocol::none
|| i->act != mapping_t::action::none)
continue;
i->act = mapping_t::action::add;

View File

@ -171,6 +171,7 @@ test-suite libtorrent :
[ run test_session.cpp ]
[ run test_session_params.cpp ]
[ run test_read_piece.cpp ]
[ run test_remove_torrent.cpp ]
[ run test_file.cpp ]
[ run test_fast_extension.cpp ]

View File

@ -7,6 +7,7 @@ test_programs = \
test_file \
test_privacy \
test_priority \
test_remove_torrent \
test_auto_unchoke \
test_checking \
test_fast_extension \
@ -199,6 +200,7 @@ test_stat_cache_SOURCES = test_stat_cache.cpp
test_file_SOURCES = test_file.cpp
test_privacy_SOURCES = test_privacy.cpp
test_priority_SOURCES = test_priority.cpp
test_remove_torrent_SOURCES = test_remove_torrent.cpp
test_auto_unchoke_SOURCES = test_auto_unchoke.cpp
test_checking_SOURCES = test_checking.cpp
test_enum_net_SOURCES = test_enum_net.cpp

View File

@ -277,7 +277,7 @@ void test_evict()
// this should make it not be evicted
// just free the buffers
++pe->piece_refcount;
bc.evict_piece(pe, jobs);
bc.evict_piece(pe, jobs, block_cache::allow_ghost);
bc.update_stats_counters(c);
TEST_EQUAL(c[counters::write_cache_blocks], 0);
@ -291,7 +291,7 @@ void test_evict()
TEST_EQUAL(c[counters::arc_volatile_size], 0);
--pe->piece_refcount;
bc.evict_piece(pe, jobs);
bc.evict_piece(pe, jobs, block_cache::allow_ghost);
bc.update_stats_counters(c);
TEST_EQUAL(c[counters::write_cache_blocks], 0);
@ -392,7 +392,7 @@ void test_arc_unghost()
TEST_EQUAL(c[counters::arc_volatile_size], 0);
tailqueue<disk_io_job> jobs;
bc.evict_piece(pe, jobs);
bc.evict_piece(pe, jobs, block_cache::allow_ghost);
bc.update_stats_counters(c);
TEST_EQUAL(c[counters::write_cache_blocks], 0);
@ -484,3 +484,28 @@ TORRENT_TEST(block_cache)
// TODO: test unaligned reads
}
TORRENT_TEST(delete_piece)
{
TEST_SETUP;
TEST_CHECK(bc.num_pieces() == 0);
INSERT(0, 0);
TEST_CHECK(bc.num_pieces() == 1);
rj.action = disk_io_job::read;
rj.d.io.offset = 0x2000;
rj.d.io.buffer_size = 0x4000;
rj.piece = piece_index_t(0);
rj.storage = pm;
rj.requester = (void*)1;
rj.argument = 0;
ret = bc.try_read(&rj, alloc);
cached_piece_entry* pe_ = bc.find_piece(pm.get(), piece_index_t(0));
bc.mark_for_eviction(pe_, block_cache::disallow_ghost);
TEST_CHECK(bc.num_pieces() == 0);
}

View File

@ -32,7 +32,6 @@ POSSIBILITY OF SUCH DAMAGE.
#include "libtorrent/session.hpp"
#include "libtorrent/session_settings.hpp"
#include "libtorrent/hasher.hpp"
#include "libtorrent/alert_types.hpp"
#include "libtorrent/bencode.hpp"
#include "libtorrent/time.hpp"
@ -124,10 +123,6 @@ void test_transfer(settings_pack const& sett, bool test_deprecated = false)
std::shared_ptr<torrent_info> t = ::create_torrent(&file, "temporary", 16 * 1024, 13, false);
file.close();
add_torrent_params addp;
addp.flags &= ~add_torrent_params::flag_paused;
addp.flags &= ~add_torrent_params::flag_auto_managed;
wait_for_listen(ses1, "ses1");
wait_for_listen(ses2, "ses1");

View File

@ -0,0 +1,199 @@
/*
Copyright (c) 2017, Arvid Norberg
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the distribution.
* Neither the name of the author nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
#include "libtorrent/session.hpp"
#include "libtorrent/torrent_handle.hpp"
#include "libtorrent/torrent_status.hpp"
#include "libtorrent/session_settings.hpp"
#include "libtorrent/torrent_info.hpp"
#include "libtorrent/ip_filter.hpp"
#include "libtorrent/aux_/path.hpp"
#include "test.hpp"
#include "setup_transfer.hpp"
#include "settings.hpp"
#include <fstream>
#include <iostream>
#include <cstdint>
using namespace libtorrent;
namespace lt = libtorrent;
using std::ignore;
enum test_case {
complete_download,
partial_download,
mid_download
};
void test_remove_torrent(int const remove_options
, test_case const test = complete_download)
{
// this allows shutting down the sessions in parallel
std::vector<session_proxy> sp;
settings_pack pack = settings();
// we do this to force pieces to be evicted into the ghost lists
pack.set_int(settings_pack::cache_size, 10);
pack.set_str(settings_pack::listen_interfaces, "0.0.0.0:48075");
lt::session ses1(pack);
pack.set_str(settings_pack::listen_interfaces, "0.0.0.0:49075");
lt::session ses2(pack);
torrent_handle tor1;
torrent_handle tor2;
int const num_pieces = (test == mid_download) ? 500 : 100;
error_code ec;
remove_all("tmp1_remove", ec);
remove_all("tmp2_remove", ec);
create_directory("tmp1_remove", ec);
std::ofstream file("tmp1_remove/temporary");
std::shared_ptr<torrent_info> t = ::create_torrent(&file, "temporary"
, 16 * 1024, num_pieces, false);
file.close();
wait_for_listen(ses1, "ses1");
wait_for_listen(ses2, "ses1");
// test using piece sizes smaller than 16kB
std::tie(tor1, tor2, ignore) = setup_transfer(&ses1, &ses2, 0
, true, false, true, "_remove", 8 * 1024, &t, false, 0);
if (test == partial_download)
{
std::vector<int> priorities(num_pieces, 1);
// set half of the pieces to priority 0
std::fill(priorities.begin(), priorities.begin() + (num_pieces / 2), 0);
tor2.prioritize_pieces(priorities);
}
torrent_status st1;
torrent_status st2;
for (int i = 0; i < 200; ++i)
{
print_alerts(ses1, "ses1", true, true);
print_alerts(ses2, "ses2", true, true);
st1 = tor1.status();
st2 = tor2.status();
if (test == mid_download && st2.num_pieces > num_pieces / 2)
{
TEST_CHECK(st2.is_finished == false);
break;
}
if (st2.is_finished) break;
TEST_CHECK(st1.state == torrent_status::seeding
|| st1.state == torrent_status::checking_files);
TEST_CHECK(st2.state == torrent_status::downloading
|| st2.state == torrent_status::checking_resume_data);
// if nothing is being transferred after 2 seconds, we're failing the test
if (st1.upload_payload_rate == 0 && i > 20)
{
TEST_ERROR("no transfer");
return;
}
std::this_thread::sleep_for(lt::milliseconds(100));
}
TEST_CHECK(st1.num_pieces > 0);
TEST_CHECK(st2.num_pieces > 0);
ses2.remove_torrent(tor2, remove_options);
ses1.remove_torrent(tor1, remove_options);
std::cerr << "removed" << std::endl;
for (int i = 0; tor2.is_valid() || tor1.is_valid(); ++i)
{
std::this_thread::sleep_for(lt::milliseconds(100));
if (++i > 40)
{
std::cerr << "torrent handle(s) still valid: "
<< (tor1.is_valid() ? "tor1 " : "")
<< (tor2.is_valid() ? "tor2 " : "")
<< std::endl;
TEST_ERROR("handle did not become invalid");
return;
}
}
if (remove_options & session::delete_files)
{
TEST_CHECK(!exists("tmp1_remove/temporary"));
TEST_CHECK(!exists("tmp2_remove/temporary"));
}
sp.push_back(ses1.abort());
sp.push_back(ses2.abort());
}
TORRENT_TEST(remove_torrent)
{
test_remove_torrent(0);
}
TORRENT_TEST(remove_torrent_and_files)
{
test_remove_torrent(session::delete_files);
}
TORRENT_TEST(remove_torrent_partial)
{
test_remove_torrent(0, partial_download);
}
TORRENT_TEST(remove_torrent_and_files_partial)
{
test_remove_torrent(session::delete_files, partial_download);
}
TORRENT_TEST(remove_torrent_mid_download)
{
test_remove_torrent(0, mid_download);
}
TORRENT_TEST(remove_torrent_and_files_mid_download)
{
test_remove_torrent(session::delete_files, mid_download);
}