make all tests run cleanly with leak sanitizer

This commit is contained in:
arvidn 2019-03-26 12:29:04 +01:00 committed by Arvid Norberg
parent 4dc2e6015d
commit 102387f4a8
9 changed files with 69 additions and 14 deletions

View File

@ -284,8 +284,8 @@ namespace aux {
void call_abort()
{
auto ptr = shared_from_this();
m_io_service.dispatch(make_handler([ptr] { ptr->abort(); }
auto self = shared_from_this();
m_io_service.dispatch(make_handler([self] { self->abort(); }
, m_abort_handler_storage, *this));
}

View File

@ -343,6 +343,7 @@ namespace aux {
struct TORRENT_EXTRA_EXPORT block_cache : disk_buffer_pool
{
block_cache(io_service& ios, std::function<void()> const& trigger_trim);
~block_cache();
private:

View File

@ -132,6 +132,11 @@ private:
std::int16_t m_responses = 0;
std::int16_t m_timeouts = 0;
// set to true when done() is called, and will prevent adding new results, as
// they would never be serviced and the whole traversal algorithm would stall
// and leak
bool m_done = false;
#ifndef TORRENT_DISABLE_LOGGING
// this is a unique ID for this specific traversal_algorithm instance,
// just used for logging

View File

@ -100,8 +100,9 @@ void run_test(HandleAlerts const& on_alert, Test const& test)
pack.set_str(settings_pack::listen_interfaces, peer0.to_string() + ":6881");
// create session
std::shared_ptr<lt::session> ses[2];
// create session
ses[0] = std::make_shared<lt::session>(pack, ios0);
pack.set_str(settings_pack::listen_interfaces, peer1.to_string() + ":6881");
@ -180,6 +181,13 @@ void operator delete(void* ptr) noexcept
std::free(ptr);
}
#ifdef __cpp_sized_deallocation
void operator delete(void* ptr, std::size_t) noexcept
{
std::free(ptr);
}
#endif
TORRENT_TEST(error_handling)
{
for (int i = 0; i < 8000; ++i)

View File

@ -341,6 +341,23 @@ block_cache::block_cache(io_service& ios
{
}
block_cache::~block_cache()
{
std::vector<char*> bufs;
for (auto const& pe : m_pieces)
{
if (!pe.blocks) continue;
int const num_blocks = int(pe.blocks_in_piece);
for (int i = 0; i < num_blocks; ++i)
{
if (pe.blocks[i].buf == nullptr) continue;
bufs.push_back(pe.blocks[i].buf);
}
}
free_multiple_buffers(bufs);
}
// returns:
// -1: not in cache
// -2: no memory

View File

@ -51,6 +51,7 @@ POSSIBILITY OF SUCH DAMAGE.
#include "libtorrent/units.hpp"
#include "libtorrent/hasher.hpp"
#include "libtorrent/aux_/array.hpp"
#include "libtorrent/aux_/scope_end.hpp"
#include <functional>
@ -1315,6 +1316,9 @@ constexpr disk_job_flags_t disk_interface::cache_hit;
return s;
}
// free buffers at the end of the scope
auto iov_dealloc = aux::scope_end([&]{ m_disk_cache.free_iovec(iov); });
// this is the offset that's aligned to block boundaries
int const adjusted_offset = aux::numeric_cast<int>(j->d.io.offset & ~(default_block_size - 1));
@ -1353,9 +1357,6 @@ constexpr disk_job_flags_t disk_interface::cache_hit;
if (ret < 0)
{
// read failed. free buffers and return error
m_disk_cache.free_iovec(iov);
pe = m_disk_cache.find_piece(j);
if (pe == nullptr)
{
@ -1381,6 +1382,10 @@ constexpr disk_job_flags_t disk_interface::cache_hit;
#if TORRENT_USE_ASSERTS
pe->piece_log.push_back(piece_log_t(j->action, block));
#endif
// we want to hold on to the iov now
iov_dealloc.disarm();
// as soon we insert the blocks they may be evicted
// (if using purgeable memory). In order to prevent that
// until we can read from them, increment the refcounts
@ -2126,6 +2131,10 @@ constexpr disk_job_flags_t disk_interface::cache_hit;
iovec_t iov = { m_disk_cache.allocate_buffer("hashing")
, static_cast<std::size_t>(default_block_size) };
// free at the end of the scope
auto iov_dealloc = aux::scope_end([&]{ m_disk_cache.free_buffer(iov.data()); });
hasher h;
int ret = 0;
int offset = 0;
@ -2155,8 +2164,6 @@ constexpr disk_job_flags_t disk_interface::cache_hit;
h.update(iov);
}
m_disk_cache.free_buffer(iov.data());
j->d.piece_hash = h.final();
return ret >= 0 ? status_t::no_error : status_t::fatal_disk_error;
}
@ -2310,6 +2317,9 @@ constexpr disk_job_flags_t disk_interface::cache_hit;
TORRENT_ALLOCA(iov, iovec_t, blocks_left);
if (m_disk_cache.allocate_iovec(iov) >= 0)
{
// free buffers at the end of the scope
auto iov_dealloc = aux::scope_end([&]{ m_disk_cache.free_iovec(iov); });
// if this is the last piece, adjust the size of the
// last buffer to match up
iov[blocks_left - 1] = iov[blocks_left - 1].first(
@ -2342,14 +2352,13 @@ constexpr disk_job_flags_t disk_interface::cache_hit;
TORRENT_ASSERT(offset == piece_size);
// we want to hold on to the buffers now, to insert them in the
// cache
iov_dealloc.disarm();
l.lock();
m_disk_cache.insert_blocks(pe, first_block, iov, j);
l.unlock();
}
else
{
m_disk_cache.free_iovec(iov);
}
}
}
@ -2391,6 +2400,9 @@ constexpr disk_job_flags_t disk_interface::cache_hit;
return status_t::fatal_disk_error;
}
// free buffers at the end of the scope
auto iov_dealloc = aux::scope_end([&]{ m_disk_cache.free_buffer(iov.data()); });
DLOG("do_hash: reading (piece: %d block: %d)\n"
, static_cast<int>(pe->piece), first_block + i);
@ -2403,7 +2415,6 @@ constexpr disk_job_flags_t disk_interface::cache_hit;
{
ret = status_t::fatal_disk_error;
TORRENT_ASSERT(j->error.ec && j->error.operation != operation_t::unknown);
m_disk_cache.free_buffer(iov.data());
break;
}
@ -2415,7 +2426,6 @@ constexpr disk_job_flags_t disk_interface::cache_hit;
ret = status_t::fatal_disk_error;
j->error.ec = boost::asio::error::eof;
j->error.operation = operation_t::file_read;
m_disk_cache.free_buffer(iov.data());
break;
}
@ -2434,6 +2444,7 @@ constexpr disk_job_flags_t disk_interface::cache_hit;
offset += int(iov.size());
ph->h.update(iov);
iov_dealloc.disarm();
l.lock();
m_disk_cache.insert_blocks(pe, first_block + i, iov, j);
l.unlock();

View File

@ -137,6 +137,7 @@ void sample_infohashes_observer::reply(msg const& m)
timeout();
}
// we deliberately do not call
traversal_observer::reply(m);
// this is necessary to play nice with
// observer::abort(), observer::done() and observer::timeout()

View File

@ -127,6 +127,8 @@ void traversal_algorithm::resort_result(observer* o)
void traversal_algorithm::add_entry(node_id const& id
, udp::endpoint const& addr, observer_flags_t const flags)
{
if (m_done) return;
TORRENT_ASSERT(m_node.m_rpc.allocation_size() >= sizeof(find_data_observer));
auto o = new_observer(addr, id);
if (!o)
@ -284,6 +286,8 @@ char const* traversal_algorithm::name() const
void traversal_algorithm::traverse(node_id const& id, udp::endpoint const& addr)
{
if (m_done) return;
#ifndef TORRENT_DISABLE_LOGGING
dht_observer* logger = get_node().observer();
if (logger != nullptr && logger->should_log(dht_logger::traversal) && id.is_all_zeros())
@ -408,6 +412,8 @@ void traversal_algorithm::log_timeout(observer_ptr const& o, char const* prefix)
void traversal_algorithm::done()
{
TORRENT_ASSERT(m_done == false);
m_done = true;
#ifndef TORRENT_DISABLE_LOGGING
int results_target = m_node.m_table.bucket_size();
int closest_target = 160;
@ -459,6 +465,8 @@ void traversal_algorithm::done()
bool traversal_algorithm::add_requests()
{
if (m_done) return true;
int results_target = m_node.m_table.bucket_size();
// this only counts outstanding requests at the top of the

View File

@ -38,6 +38,7 @@ POSSIBILITY OF SUCH DAMAGE.
#include "libtorrent/torrent.hpp"
#include "libtorrent/peer_class.hpp"
#include "libtorrent/peer_class_type_filter.hpp"
#include "libtorrent/aux_/scope_end.hpp"
#if TORRENT_ABI_VERSION == 1
#include "libtorrent/read_resume_data.hpp"
@ -411,7 +412,9 @@ namespace {
// we cannot capture a unique_ptr into a lambda in c++11, so we use a raw
// pointer for now. async_call uses a lambda expression to post the call
// to the main thread
// TODO: in C++14, use unique_ptr and move it into the lambda
auto* p = new add_torrent_params(std::move(params));
auto guard = aux::scope_end([p]{ delete p; });
p->save_path = complete(p->save_path);
#if TORRENT_ABI_VERSION == 1
@ -419,6 +422,7 @@ namespace {
#endif
async_call(&session_impl::async_add_torrent, p);
guard.disarm();
}
#ifndef BOOST_NO_EXCEPTIONS