From 102387f4a819763a2e576cad729f04acb17a6bd8 Mon Sep 17 00:00:00 2001 From: arvidn Date: Tue, 26 Mar 2019 12:29:04 +0100 Subject: [PATCH] make all tests run cleanly with leak sanitizer --- include/libtorrent/aux_/session_impl.hpp | 4 +-- include/libtorrent/block_cache.hpp | 1 + .../kademlia/traversal_algorithm.hpp | 5 +++ simulation/test_error_handling.cpp | 10 +++++- src/block_cache.cpp | 17 ++++++++++ src/disk_io_thread.cpp | 33 ++++++++++++------- src/kademlia/sample_infohashes.cpp | 1 + src/kademlia/traversal_algorithm.cpp | 8 +++++ src/session_handle.cpp | 4 +++ 9 files changed, 69 insertions(+), 14 deletions(-) diff --git a/include/libtorrent/aux_/session_impl.hpp b/include/libtorrent/aux_/session_impl.hpp index a7b97c742..b5f2a92e0 100644 --- a/include/libtorrent/aux_/session_impl.hpp +++ b/include/libtorrent/aux_/session_impl.hpp @@ -284,8 +284,8 @@ namespace aux { void call_abort() { - auto ptr = shared_from_this(); - m_io_service.dispatch(make_handler([ptr] { ptr->abort(); } + auto self = shared_from_this(); + m_io_service.dispatch(make_handler([self] { self->abort(); } , m_abort_handler_storage, *this)); } diff --git a/include/libtorrent/block_cache.hpp b/include/libtorrent/block_cache.hpp index dd0236e92..d057d7b97 100644 --- a/include/libtorrent/block_cache.hpp +++ b/include/libtorrent/block_cache.hpp @@ -343,6 +343,7 @@ namespace aux { struct TORRENT_EXTRA_EXPORT block_cache : disk_buffer_pool { block_cache(io_service& ios, std::function const& trigger_trim); + ~block_cache(); private: diff --git a/include/libtorrent/kademlia/traversal_algorithm.hpp b/include/libtorrent/kademlia/traversal_algorithm.hpp index 6b4ce53e0..aa6224614 100644 --- a/include/libtorrent/kademlia/traversal_algorithm.hpp +++ b/include/libtorrent/kademlia/traversal_algorithm.hpp @@ -132,6 +132,11 @@ private: std::int16_t m_responses = 0; std::int16_t m_timeouts = 0; + // set to true when done() is called, and will prevent adding new results, as + // they would never be serviced and the whole traversal algorithm would stall + // and leak + bool m_done = false; + #ifndef TORRENT_DISABLE_LOGGING // this is a unique ID for this specific traversal_algorithm instance, // just used for logging diff --git a/simulation/test_error_handling.cpp b/simulation/test_error_handling.cpp index ef320ebac..9c8375452 100644 --- a/simulation/test_error_handling.cpp +++ b/simulation/test_error_handling.cpp @@ -100,8 +100,9 @@ void run_test(HandleAlerts const& on_alert, Test const& test) pack.set_str(settings_pack::listen_interfaces, peer0.to_string() + ":6881"); - // create session std::shared_ptr ses[2]; + + // create session ses[0] = std::make_shared(pack, ios0); pack.set_str(settings_pack::listen_interfaces, peer1.to_string() + ":6881"); @@ -180,6 +181,13 @@ void operator delete(void* ptr) noexcept std::free(ptr); } +#ifdef __cpp_sized_deallocation +void operator delete(void* ptr, std::size_t) noexcept +{ + std::free(ptr); +} +#endif + TORRENT_TEST(error_handling) { for (int i = 0; i < 8000; ++i) diff --git a/src/block_cache.cpp b/src/block_cache.cpp index de69d49ea..0f30ad985 100644 --- a/src/block_cache.cpp +++ b/src/block_cache.cpp @@ -341,6 +341,23 @@ block_cache::block_cache(io_service& ios { } +block_cache::~block_cache() +{ + std::vector bufs; + for (auto const& pe : m_pieces) + { + if (!pe.blocks) continue; + + int const num_blocks = int(pe.blocks_in_piece); + for (int i = 0; i < num_blocks; ++i) + { + if (pe.blocks[i].buf == nullptr) continue; + bufs.push_back(pe.blocks[i].buf); + } + } + free_multiple_buffers(bufs); +} + // returns: // -1: not in cache // -2: no memory diff --git a/src/disk_io_thread.cpp b/src/disk_io_thread.cpp index 2e884be45..c800055ea 100644 --- a/src/disk_io_thread.cpp +++ b/src/disk_io_thread.cpp @@ -51,6 +51,7 @@ POSSIBILITY OF SUCH DAMAGE. #include "libtorrent/units.hpp" #include "libtorrent/hasher.hpp" #include "libtorrent/aux_/array.hpp" +#include "libtorrent/aux_/scope_end.hpp" #include @@ -1315,6 +1316,9 @@ constexpr disk_job_flags_t disk_interface::cache_hit; return s; } + // free buffers at the end of the scope + auto iov_dealloc = aux::scope_end([&]{ m_disk_cache.free_iovec(iov); }); + // this is the offset that's aligned to block boundaries int const adjusted_offset = aux::numeric_cast(j->d.io.offset & ~(default_block_size - 1)); @@ -1353,9 +1357,6 @@ constexpr disk_job_flags_t disk_interface::cache_hit; if (ret < 0) { - // read failed. free buffers and return error - m_disk_cache.free_iovec(iov); - pe = m_disk_cache.find_piece(j); if (pe == nullptr) { @@ -1381,6 +1382,10 @@ constexpr disk_job_flags_t disk_interface::cache_hit; #if TORRENT_USE_ASSERTS pe->piece_log.push_back(piece_log_t(j->action, block)); #endif + + // we want to hold on to the iov now + iov_dealloc.disarm(); + // as soon we insert the blocks they may be evicted // (if using purgeable memory). In order to prevent that // until we can read from them, increment the refcounts @@ -2126,6 +2131,10 @@ constexpr disk_job_flags_t disk_interface::cache_hit; iovec_t iov = { m_disk_cache.allocate_buffer("hashing") , static_cast(default_block_size) }; + + // free at the end of the scope + auto iov_dealloc = aux::scope_end([&]{ m_disk_cache.free_buffer(iov.data()); }); + hasher h; int ret = 0; int offset = 0; @@ -2155,8 +2164,6 @@ constexpr disk_job_flags_t disk_interface::cache_hit; h.update(iov); } - m_disk_cache.free_buffer(iov.data()); - j->d.piece_hash = h.final(); return ret >= 0 ? status_t::no_error : status_t::fatal_disk_error; } @@ -2310,6 +2317,9 @@ constexpr disk_job_flags_t disk_interface::cache_hit; TORRENT_ALLOCA(iov, iovec_t, blocks_left); if (m_disk_cache.allocate_iovec(iov) >= 0) { + // free buffers at the end of the scope + auto iov_dealloc = aux::scope_end([&]{ m_disk_cache.free_iovec(iov); }); + // if this is the last piece, adjust the size of the // last buffer to match up iov[blocks_left - 1] = iov[blocks_left - 1].first( @@ -2342,14 +2352,13 @@ constexpr disk_job_flags_t disk_interface::cache_hit; TORRENT_ASSERT(offset == piece_size); + // we want to hold on to the buffers now, to insert them in the + // cache + iov_dealloc.disarm(); l.lock(); m_disk_cache.insert_blocks(pe, first_block, iov, j); l.unlock(); } - else - { - m_disk_cache.free_iovec(iov); - } } } @@ -2391,6 +2400,9 @@ constexpr disk_job_flags_t disk_interface::cache_hit; return status_t::fatal_disk_error; } + // free buffers at the end of the scope + auto iov_dealloc = aux::scope_end([&]{ m_disk_cache.free_buffer(iov.data()); }); + DLOG("do_hash: reading (piece: %d block: %d)\n" , static_cast(pe->piece), first_block + i); @@ -2403,7 +2415,6 @@ constexpr disk_job_flags_t disk_interface::cache_hit; { ret = status_t::fatal_disk_error; TORRENT_ASSERT(j->error.ec && j->error.operation != operation_t::unknown); - m_disk_cache.free_buffer(iov.data()); break; } @@ -2415,7 +2426,6 @@ constexpr disk_job_flags_t disk_interface::cache_hit; ret = status_t::fatal_disk_error; j->error.ec = boost::asio::error::eof; j->error.operation = operation_t::file_read; - m_disk_cache.free_buffer(iov.data()); break; } @@ -2434,6 +2444,7 @@ constexpr disk_job_flags_t disk_interface::cache_hit; offset += int(iov.size()); ph->h.update(iov); + iov_dealloc.disarm(); l.lock(); m_disk_cache.insert_blocks(pe, first_block + i, iov, j); l.unlock(); diff --git a/src/kademlia/sample_infohashes.cpp b/src/kademlia/sample_infohashes.cpp index 58a36aee2..b911d5519 100644 --- a/src/kademlia/sample_infohashes.cpp +++ b/src/kademlia/sample_infohashes.cpp @@ -137,6 +137,7 @@ void sample_infohashes_observer::reply(msg const& m) timeout(); } + // we deliberately do not call traversal_observer::reply(m); // this is necessary to play nice with // observer::abort(), observer::done() and observer::timeout() diff --git a/src/kademlia/traversal_algorithm.cpp b/src/kademlia/traversal_algorithm.cpp index 8fdf27495..a528af35d 100644 --- a/src/kademlia/traversal_algorithm.cpp +++ b/src/kademlia/traversal_algorithm.cpp @@ -127,6 +127,8 @@ void traversal_algorithm::resort_result(observer* o) void traversal_algorithm::add_entry(node_id const& id , udp::endpoint const& addr, observer_flags_t const flags) { + if (m_done) return; + TORRENT_ASSERT(m_node.m_rpc.allocation_size() >= sizeof(find_data_observer)); auto o = new_observer(addr, id); if (!o) @@ -284,6 +286,8 @@ char const* traversal_algorithm::name() const void traversal_algorithm::traverse(node_id const& id, udp::endpoint const& addr) { + if (m_done) return; + #ifndef TORRENT_DISABLE_LOGGING dht_observer* logger = get_node().observer(); if (logger != nullptr && logger->should_log(dht_logger::traversal) && id.is_all_zeros()) @@ -408,6 +412,8 @@ void traversal_algorithm::log_timeout(observer_ptr const& o, char const* prefix) void traversal_algorithm::done() { + TORRENT_ASSERT(m_done == false); + m_done = true; #ifndef TORRENT_DISABLE_LOGGING int results_target = m_node.m_table.bucket_size(); int closest_target = 160; @@ -459,6 +465,8 @@ void traversal_algorithm::done() bool traversal_algorithm::add_requests() { + if (m_done) return true; + int results_target = m_node.m_table.bucket_size(); // this only counts outstanding requests at the top of the diff --git a/src/session_handle.cpp b/src/session_handle.cpp index 6057f3c0a..e315edd8f 100644 --- a/src/session_handle.cpp +++ b/src/session_handle.cpp @@ -38,6 +38,7 @@ POSSIBILITY OF SUCH DAMAGE. #include "libtorrent/torrent.hpp" #include "libtorrent/peer_class.hpp" #include "libtorrent/peer_class_type_filter.hpp" +#include "libtorrent/aux_/scope_end.hpp" #if TORRENT_ABI_VERSION == 1 #include "libtorrent/read_resume_data.hpp" @@ -411,7 +412,9 @@ namespace { // we cannot capture a unique_ptr into a lambda in c++11, so we use a raw // pointer for now. async_call uses a lambda expression to post the call // to the main thread + // TODO: in C++14, use unique_ptr and move it into the lambda auto* p = new add_torrent_params(std::move(params)); + auto guard = aux::scope_end([p]{ delete p; }); p->save_path = complete(p->save_path); #if TORRENT_ABI_VERSION == 1 @@ -419,6 +422,7 @@ namespace { #endif async_call(&session_impl::async_add_torrent, p); + guard.disarm(); } #ifndef BOOST_NO_EXCEPTIONS