From 1ce408922991ad60fa7a5d63f6f9abb3d4f00132 Mon Sep 17 00:00:00 2001 From: arvidn Date: Thu, 24 Aug 2017 00:41:15 +0200 Subject: [PATCH 1/5] log failures to parse interface and node lists from settings --- src/session_impl.cpp | 24 ++++++++++++++++++++++-- 1 file changed, 22 insertions(+), 2 deletions(-) diff --git a/src/session_impl.cpp b/src/session_impl.cpp index 1078e918f..9969df21b 100644 --- a/src/session_impl.cpp +++ b/src/session_impl.cpp @@ -5054,10 +5054,18 @@ retry: void session_impl::update_outgoing_interfaces() { - std::string net_interfaces = m_settings.get_str(settings_pack::outgoing_interfaces); + std::string const net_interfaces = m_settings.get_str(settings_pack::outgoing_interfaces); // declared in string_util.hpp parse_comma_separated_string(net_interfaces, m_net_interfaces); + +#ifndef TORRENT_DISABLE_LOGGING + if (!net_interfaces.empty() && m_net_interfaces.empty()) + { + session_log("ERROR: failed to parse outgoing interface list: %s" + , net_interfaces.c_str()); + } +#endif } tcp::endpoint session_impl::bind_outgoing_socket(socket_type& s, address @@ -5250,13 +5258,18 @@ retry: void session_impl::update_listen_interfaces() { - std::string net_interfaces = m_settings.get_str(settings_pack::listen_interfaces); + std::string const net_interfaces = m_settings.get_str(settings_pack::listen_interfaces); std::vector > new_listen_interfaces; // declared in string_util.hpp parse_comma_separated_string_port(net_interfaces, new_listen_interfaces); #ifndef TORRENT_DISABLE_LOGGING + if (!net_interfaces.empty() && new_listen_interfaces.empty()) + { + session_log("ERROR: failed to parse listen_interfaces setting: %s" + , net_interfaces.c_str()); + } session_log("update listen interfaces: %s", net_interfaces.c_str()); #endif @@ -5437,6 +5450,13 @@ retry: std::vector > nodes; parse_comma_separated_string_port(node_list, nodes); +#ifndef TORRENT_DISABLE_LOGGING + if (!node_list.empty() && nodes.empty()) + { + session_log("ERROR: failed to parse DHT bootstrap list: %s", node_list.c_str()); + } +#endif + for (int i = 0; i < nodes.size(); ++i) { add_dht_router(nodes[i]); From 41bfd41cea66009c7a4811744022baa9218c9841 Mon Sep 17 00:00:00 2001 From: arvidn Date: Fri, 25 Aug 2017 00:34:20 +0200 Subject: [PATCH 2/5] fix python binding with new boost version --- bindings/python/src/alert.cpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/bindings/python/src/alert.cpp b/bindings/python/src/alert.cpp index 8572c7e63..c171cff2e 100644 --- a/bindings/python/src/alert.cpp +++ b/bindings/python/src/alert.cpp @@ -10,6 +10,8 @@ #include #include "bytes.hpp" +#include + using namespace boost::python; using namespace libtorrent; From 52ccad23b96ac0158890b4519212a990e5de2bf3 Mon Sep 17 00:00:00 2001 From: Steven Siloti Date: Mon, 28 Aug 2017 19:57:58 -0700 Subject: [PATCH 3/5] read_piece: handle failure to allocate piece buffer --- src/torrent.cpp | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/torrent.cpp b/src/torrent.cpp index cdeb09762..395f95103 100644 --- a/src/torrent.cpp +++ b/src/torrent.cpp @@ -1014,6 +1014,12 @@ namespace libtorrent boost::shared_ptr rp = boost::make_shared(); rp->piece_data.reset(new (std::nothrow) char[piece_size]); + if (!rp->piece_data) + { + m_ses.alerts().emplace_alert( + get_handle(), piece, error_code(boost::system::errc::not_enough_memory, generic_category())); + return; + } rp->blocks_left = 0; rp->fail = false; From 621da10e60b844a7d226aafc6d8014ace5b93863 Mon Sep 17 00:00:00 2001 From: Steven Siloti Date: Tue, 29 Aug 2017 13:29:00 -0700 Subject: [PATCH 4/5] hold an owning reference to storage objects in try_flush_write_blocks It is possible for all other references to a storage object to be destroyed while try_flush_write_blocks is running. If the storage is destroyed then find_piece will crash when trying to re-aquire the shared_ptr. To prevent this, keep the storage alive by holding a shared_ptr to it in try_flush_write_blocks. Normally the fence job when stopping a torrent would prevent the storage object from being destroyed until all flush jobs are complete. try_flush_write_blocks can be run after every disk job though so it has the potential to "stradle the fence". If the associated torrent does get unloaded then it is expected that find_piece will return NULL thus causing the entry to be ignored. --- src/disk_io_thread.cpp | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/disk_io_thread.cpp b/src/disk_io_thread.cpp index 1d277d0ba..5485f1916 100644 --- a/src/disk_io_thread.cpp +++ b/src/disk_io_thread.cpp @@ -899,22 +899,22 @@ namespace libtorrent DLOG("try_flush_write_blocks: %d\n", num); list_iterator range = m_disk_cache.write_lru_pieces(); - std::vector > pieces; + std::vector, int> > pieces; pieces.reserve(m_disk_cache.num_write_lru_pieces()); for (list_iterator p = range; p.get() && num > 0; p.next()) { cached_piece_entry* e = p.get(); if (e->num_dirty == 0) continue; - pieces.push_back(std::make_pair(e->storage.get(), int(e->piece))); + pieces.push_back(std::make_pair(e->storage, int(e->piece))); } - for (std::vector >::iterator i = pieces.begin() + for (std::vector, int> >::iterator i = pieces.begin() , end(pieces.end()); i != end; ++i) { // TODO: instead of doing a lookup each time through the loop, save // cached_piece_entry pointers with piece_refcount incremented to pin them - cached_piece_entry* pe = m_disk_cache.find_piece(i->first, i->second); + cached_piece_entry* pe = m_disk_cache.find_piece(i->first.get(), i->second); if (pe == NULL) continue; // another thread may flush this piece while we're looping and @@ -941,10 +941,10 @@ namespace libtorrent // if we still need to flush blocks, start over and flush // everything in LRU order (degrade to lru cache eviction) - for (std::vector >::iterator i = pieces.begin() + for (std::vector, int> >::iterator i = pieces.begin() , end(pieces.end()); i != end; ++i) { - cached_piece_entry* pe = m_disk_cache.find_piece(i->first, i->second); + cached_piece_entry* pe = m_disk_cache.find_piece(i->first.get(), i->second); if (pe == NULL) continue; if (pe->num_dirty == 0) continue; From 37ffe99a196e7afc57611639668be8d846fe56ef Mon Sep 17 00:00:00 2001 From: arvidn Date: Sun, 3 Sep 2017 12:28:34 +0200 Subject: [PATCH 5/5] fix gen_fwd.py to correctly put declarations in the dht namespace that belong there --- include/libtorrent/fwd.hpp | 12 ++++++++---- tools/gen_fwd.py | 16 +++++++++++++--- 2 files changed, 21 insertions(+), 7 deletions(-) diff --git a/include/libtorrent/fwd.hpp b/include/libtorrent/fwd.hpp index b9f44ddd2..9a5349556 100644 --- a/include/libtorrent/fwd.hpp +++ b/include/libtorrent/fwd.hpp @@ -179,10 +179,6 @@ class hasher; struct ip_filter; class port_filter; -// include/libtorrent/kademlia/dht_storage.hpp -struct dht_storage_counters; -struct dht_storage_interface; - // include/libtorrent/peer_connection_handle.hpp struct peer_connection_handle; struct bt_peer_connection_handle; @@ -244,6 +240,14 @@ class torrent_info; // include/libtorrent/torrent_status.hpp struct torrent_status; +namespace dht { + +// include/libtorrent/kademlia/dht_storage.hpp +struct dht_storage_counters; +struct dht_storage_interface; + +} + #ifndef TORRENT_NO_DEPRECATE // include/libtorrent/alert_types.hpp diff --git a/tools/gen_fwd.py b/tools/gen_fwd.py index 8febe76e8..aeb0137f6 100644 --- a/tools/gen_fwd.py +++ b/tools/gen_fwd.py @@ -57,6 +57,7 @@ deprecated_classes = os.popen('git grep TORRENT_DEPRECATED_EXPORT').read().split def filter_classes(classes, keyword): current_file = '' ret = '' + dht_ret = '' for c in classes: line = c.split(':', 1) if not line[0].endswith('.hpp'): continue @@ -71,13 +72,22 @@ def filter_classes(classes, keyword): # TODO: support TORRENT_DEPRECATED_EXPORT if decl[1].strip() != keyword: continue + content = '' if this_file != current_file: - ret += '\n// ' + this_file + '\n' + content += '\n// ' + this_file + '\n' current_file = this_file; decl = decl[0] + ' ' + decl[2] if not decl.endswith(';'): decl += ';' - ret += decl + '\n' - return ret + content += decl + '\n' + if 'kademlia' in this_file: + dht_ret += content + else: + ret += content + + if dht_ret == '': + return ret + else: + return ret + '\nnamespace dht {\n' + dht_ret + '\n}\n' os.remove('include/libtorrent/fwd.hpp') with open('include/libtorrent/fwd.hpp', 'w+') as f: