premiere-libtorrent/src/session.cpp

449 lines
14 KiB
C++
Raw Normal View History

/*
2018-04-09 09:04:33 +02:00
Copyright (c) 2006-2018, Arvid Norberg, Magnus Jonsson
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the distribution.
* Neither the name of the author nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
2015-08-20 01:33:20 +02:00
#include "libtorrent/config.hpp"
#include "libtorrent/extensions/ut_pex.hpp"
#include "libtorrent/extensions/ut_metadata.hpp"
#include "libtorrent/extensions/smart_ban.hpp"
#include "libtorrent/session.hpp"
2017-10-14 17:40:27 +02:00
#include "libtorrent/extensions.hpp"
#include "libtorrent/aux_/session_impl.hpp"
#include "libtorrent/aux_/session_call.hpp"
#include "libtorrent/extensions.hpp" // for add_peer_flags_t
namespace libtorrent {
2017-10-14 17:40:27 +02:00
#ifndef TORRENT_DISABLE_EXTENSIONS
// declared in extensions.hpp
// remove this once C++17 is required
constexpr feature_flags_t plugin::optimistic_unchoke_feature;
constexpr feature_flags_t plugin::tick_feature;
constexpr feature_flags_t plugin::dht_request_feature;
constexpr feature_flags_t plugin::alert_feature;
#endif
namespace aux {
constexpr torrent_list_index_t session_interface::torrent_state_updates;
constexpr torrent_list_index_t session_interface::torrent_want_tick;
constexpr torrent_list_index_t session_interface::torrent_want_peers_download;
constexpr torrent_list_index_t session_interface::torrent_want_peers_finished;
constexpr torrent_list_index_t session_interface::torrent_want_scrape;
constexpr torrent_list_index_t session_interface::torrent_downloading_auto_managed;
constexpr torrent_list_index_t session_interface::torrent_seeding_auto_managed;
constexpr torrent_list_index_t session_interface::torrent_checking_auto_managed;
}
#ifndef TORRENT_DISABLE_EXTENSIONS
constexpr add_peer_flags_t torrent_plugin::first_time;
constexpr add_peer_flags_t torrent_plugin::filtered;
#endif
2017-12-11 14:37:19 +01:00
namespace {
#if defined TORRENT_ASIO_DEBUGGING
void wait_for_asio_handlers()
{
int counter = 0;
while (log_async())
{
2017-12-13 11:22:53 +01:00
std::this_thread::sleep_for(seconds(1));
2017-12-11 14:37:19 +01:00
++counter;
2017-12-13 11:22:53 +01:00
std::printf("\x1b[2J\x1b[0;0H\x1b[33m==== Waiting to shut down: %d ==== \x1b[0m\n\n"
2017-12-11 14:37:19 +01:00
, counter);
}
async_dec_threads();
2017-12-13 11:22:53 +01:00
std::fprintf(stderr, "\n\nEXPECTS NO MORE ASYNC OPS\n\n\n");
2017-12-11 14:37:19 +01:00
}
#endif
} // anonymous namespace
settings_pack min_memory_usage()
{
settings_pack set;
#if TORRENT_ABI_VERSION == 1
2014-07-06 21:18:00 +02:00
// receive data directly into disk buffers
// this yields more system calls to read() and
// kqueue(), but saves RAM.
set.set_bool(settings_pack::contiguous_recv_buffer, false);
#endif
2014-07-06 21:18:00 +02:00
set.set_int(settings_pack::max_peer_recv_buffer_size, 32 * 1024 + 200);
2014-07-06 21:18:00 +02:00
set.set_int(settings_pack::disk_io_write_mode, settings_pack::disable_os_cache);
set.set_int(settings_pack::disk_io_read_mode, settings_pack::disable_os_cache);
2014-07-06 21:18:00 +02:00
// keep 2 blocks outstanding when hashing
set.set_int(settings_pack::checking_mem_usage, 2);
2014-07-06 21:18:00 +02:00
// don't use any extra threads to do SHA-1 hashing
set.set_int(settings_pack::aio_threads, 1);
set.set_int(settings_pack::alert_queue_size, 100);
set.set_int(settings_pack::max_out_request_queue, 300);
set.set_int(settings_pack::max_allowed_in_request_queue, 100);
// setting this to a low limit, means more
// peers are more likely to request from the
// same piece. Which means fewer partial
// pieces and fewer entries in the partial
// piece list
2014-07-06 21:18:00 +02:00
set.set_int(settings_pack::whole_pieces_threshold, 2);
set.set_bool(settings_pack::use_parole_mode, false);
set.set_bool(settings_pack::prioritize_partial_pieces, true);
2011-01-22 02:36:57 +01:00
// connect to 5 peers per second
2014-07-06 21:18:00 +02:00
set.set_int(settings_pack::connection_speed, 5);
2011-01-22 02:36:57 +01:00
// only have 4 files open at a time
2014-07-06 21:18:00 +02:00
set.set_int(settings_pack::file_pool_size, 4);
// we want to keep the peer list as small as possible
2014-07-06 21:18:00 +02:00
set.set_bool(settings_pack::allow_multiple_connections_per_ip, false);
set.set_int(settings_pack::max_failcount, 2);
set.set_int(settings_pack::inactivity_timeout, 120);
// whenever a peer has downloaded one block, write
// it to disk, and don't read anything from the
// socket until the disk write is complete
2014-07-06 21:18:00 +02:00
set.set_int(settings_pack::max_queued_disk_bytes, 1);
// don't keep track of all upnp devices, keep
// the device list small
2014-07-06 21:18:00 +02:00
set.set_bool(settings_pack::upnp_ignore_nonrouters, true);
// never keep more than one 16kB block in
// the send buffer
2014-07-06 21:18:00 +02:00
set.set_int(settings_pack::send_buffer_watermark, 9);
// don't use any disk cache
2014-07-06 21:18:00 +02:00
set.set_int(settings_pack::cache_size, 0);
set.set_bool(settings_pack::use_read_cache, false);
2014-07-06 21:18:00 +02:00
set.set_bool(settings_pack::close_redundant_connections, true);
2014-07-06 21:18:00 +02:00
set.set_int(settings_pack::max_peerlist_size, 500);
set.set_int(settings_pack::max_paused_peerlist_size, 50);
// udp trackers are cheaper to talk to
2014-07-06 21:18:00 +02:00
set.set_bool(settings_pack::prefer_udp_trackers, true);
2014-07-06 21:18:00 +02:00
set.set_int(settings_pack::max_rejects, 10);
2014-07-06 21:18:00 +02:00
set.set_int(settings_pack::recv_socket_buffer_size, 16 * 1024);
set.set_int(settings_pack::send_socket_buffer_size, 16 * 1024);
// use less memory when reading and writing
// whole pieces
2014-07-06 21:18:00 +02:00
set.set_bool(settings_pack::coalesce_reads, false);
set.set_bool(settings_pack::coalesce_writes, false);
return set;
}
settings_pack high_performance_seed()
{
settings_pack set;
// don't throttle TCP, assume there is
// plenty of bandwidth
2014-07-06 21:18:00 +02:00
set.set_int(settings_pack::mixed_mode_algorithm, settings_pack::prefer_tcp);
2014-07-06 21:18:00 +02:00
set.set_int(settings_pack::max_out_request_queue, 1500);
set.set_int(settings_pack::max_allowed_in_request_queue, 2000);
set.set_int(settings_pack::max_peer_recv_buffer_size, 5 * 1024 * 1024);
// we will probably see a high rate of alerts, make it less
// likely to loose alerts
2014-07-06 21:18:00 +02:00
set.set_int(settings_pack::alert_queue_size, 10000);
2009-05-27 08:37:45 +02:00
// allow 500 files open at a time
2014-07-06 21:18:00 +02:00
set.set_int(settings_pack::file_pool_size, 500);
2011-03-15 03:21:28 +01:00
// don't update access time for each read/write
2014-07-06 21:18:00 +02:00
set.set_bool(settings_pack::no_atime_storage, true);
2011-03-15 03:21:28 +01:00
// as a seed box, we must accept multiple peers behind
// the same NAT
2014-07-06 21:18:00 +02:00
// set.set_bool(settings_pack::allow_multiple_connections_per_ip, true);
2011-01-22 02:36:57 +01:00
// connect to 50 peers per second
2014-07-06 21:18:00 +02:00
set.set_int(settings_pack::connection_speed, 500);
2011-01-22 02:36:57 +01:00
// allow 8000 peer connections
2014-07-06 21:18:00 +02:00
set.set_int(settings_pack::connections_limit, 8000);
// allow lots of peers to try to connect simultaneously
2014-07-06 21:18:00 +02:00
set.set_int(settings_pack::listen_queue_size, 3000);
// unchoke many peers
2014-07-06 21:18:00 +02:00
set.set_int(settings_pack::unchoke_slots_limit, 2000);
// use 1 GB of cache
2014-07-06 21:18:00 +02:00
set.set_int(settings_pack::cache_size, 32768 * 2);
set.set_bool(settings_pack::use_read_cache, true);
set.set_int(settings_pack::read_cache_line_size, 32);
set.set_int(settings_pack::write_cache_line_size, 256);
// 30 seconds expiration to save cache
// space for active pieces
set.set_int(settings_pack::cache_expiry, 30);
// in case the OS we're running on doesn't support
// readv/writev, allocate contiguous buffers for
// reads and writes
// disable, since it uses a lot more RAM and a significant
// amount of CPU to copy it around
set.set_bool(settings_pack::coalesce_reads, false);
set.set_bool(settings_pack::coalesce_writes, false);
2011-01-22 02:36:57 +01:00
// the max number of bytes pending write before we throttle
// download rate
2014-07-06 21:18:00 +02:00
set.set_int(settings_pack::max_queued_disk_bytes, 7 * 1024 * 1024);
// prevent fast pieces to interfere with suggested pieces
// since we unchoke everyone, we don't need fast pieces anyway
2014-07-06 21:18:00 +02:00
set.set_int(settings_pack::allowed_fast_set_size, 0);
2014-07-06 21:18:00 +02:00
// suggest pieces in the read cache for higher cache hit rate
set.set_int(settings_pack::suggest_mode, settings_pack::suggest_read_cache);
2014-07-06 21:18:00 +02:00
set.set_bool(settings_pack::close_redundant_connections, true);
2014-07-06 21:18:00 +02:00
set.set_int(settings_pack::max_rejects, 10);
2014-07-06 21:18:00 +02:00
set.set_int(settings_pack::recv_socket_buffer_size, 1024 * 1024);
set.set_int(settings_pack::send_socket_buffer_size, 1024 * 1024);
2009-05-27 08:37:45 +02:00
// don't let connections linger for too long
2014-07-06 21:18:00 +02:00
set.set_int(settings_pack::request_timeout, 10);
set.set_int(settings_pack::peer_timeout, 20);
set.set_int(settings_pack::inactivity_timeout, 20);
2009-05-27 08:37:45 +02:00
2014-07-06 21:18:00 +02:00
set.set_int(settings_pack::active_limit, 2000);
set.set_int(settings_pack::active_tracker_limit, 2000);
set.set_int(settings_pack::active_dht_limit, 600);
set.set_int(settings_pack::active_seeds, 2000);
2009-05-27 08:37:45 +02:00
2014-07-06 21:18:00 +02:00
set.set_int(settings_pack::choking_algorithm, settings_pack::fixed_slots_choker);
2009-05-27 08:37:45 +02:00
2014-07-06 21:18:00 +02:00
// of 500 ms, and a send rate of 4 MB/s, the upper
// limit should be 2 MB
set.set_int(settings_pack::send_buffer_watermark, 3 * 1024 * 1024);
// put 1.5 seconds worth of data in the send buffer
2010-03-12 03:36:55 +01:00
// this gives the disk I/O more heads-up on disk
// reads, and can maximize throughput
2014-07-06 21:18:00 +02:00
set.set_int(settings_pack::send_buffer_watermark_factor, 150);
2010-03-12 03:36:55 +01:00
// always stuff at least 1 MiB down each peer
// pipe, to quickly ramp up send rates
set.set_int(settings_pack::send_buffer_low_watermark, 1 * 1024 * 1024);
2009-05-27 08:37:45 +02:00
// don't retry peers if they fail once. Let them
// connect to us if they want to
2014-07-06 21:18:00 +02:00
set.set_int(settings_pack::max_failcount, 1);
2009-05-27 08:37:45 +02:00
2014-07-06 21:18:00 +02:00
// number of disk threads for low level file operations
set.set_int(settings_pack::aio_threads, 8);
2010-11-29 02:33:05 +01:00
2014-07-06 21:18:00 +02:00
// keep 5 MiB outstanding when checking hashes
// of a resumed file
set.set_int(settings_pack::checking_mem_usage, 320);
return set;
2014-07-06 21:18:00 +02:00
}
2017-07-27 22:26:12 +02:00
session_params read_session_params(bdecode_node const& e, save_state_flags_t const flags)
{
session_params params;
bdecode_node settings;
if (e.type() != bdecode_node::dict_t) return params;
if (flags & session_handle::save_settings)
{
settings = e.dict_find_dict("settings");
if (settings)
{
params.settings = load_pack_from_dict(settings);
}
}
#ifndef TORRENT_DISABLE_DHT
if (flags & session_handle::save_dht_settings)
{
settings = e.dict_find_dict("dht");
if (settings)
{
params.dht_settings = dht::read_dht_settings(settings);
}
}
if (flags & session_handle::save_dht_state)
{
settings = e.dict_find_dict("dht state");
if (settings)
{
params.dht_state = dht::read_dht_state(settings);
}
}
#endif
return params;
}
void session::start(session_params params, io_service* ios)
{
bool const internal_executor = ios == nullptr;
if (internal_executor)
{
// the user did not provide an executor, we have to use our own
m_io_service = std::make_shared<io_service>();
ios = m_io_service.get();
}
2017-12-03 18:12:35 +01:00
m_impl = std::make_shared<aux::session_impl>(std::ref(*ios), std::ref(params.settings));
*static_cast<session_handle*>(this) = session_handle(m_impl);
#ifndef TORRENT_DISABLE_EXTENSIONS
for (auto const& ext : params.extensions)
{
m_impl->add_ses_extension(ext);
}
#endif
#ifndef TORRENT_DISABLE_DHT
m_impl->set_dht_settings(params.dht_settings);
m_impl->set_dht_state(std::move(params.dht_state));
m_impl->set_dht_storage(params.dht_storage_constructor);
#endif
m_impl->start_session();
if (internal_executor)
{
// start a thread for the message pump
m_thread = std::make_shared<std::thread>(
[&] { m_io_service->run(); });
}
}
namespace {
std::vector<std::shared_ptr<plugin>> default_plugins(
bool empty = false)
{
#ifndef TORRENT_DISABLE_EXTENSIONS
if (empty) return {};
using wrapper = aux::session_impl::session_plugin_wrapper;
return {
std::make_shared<wrapper>(create_ut_pex_plugin),
std::make_shared<wrapper>(create_ut_metadata_plugin),
std::make_shared<wrapper>(create_smart_ban_plugin)
};
#else
TORRENT_UNUSED(empty);
return {};
#endif
}
}
2017-07-27 22:26:12 +02:00
void session::start(session_flags_t const flags, settings_pack sp, io_service* ios)
{
start({std::move(sp),
2017-07-27 22:26:12 +02:00
default_plugins(!(flags & add_default_plugins))}, ios);
}
session::~session()
{
aux::dump_call_profile();
TORRENT_ASSERT(m_impl);
// capture the shared_ptr in the dispatched function
// to keep the session_impl alive
m_impl->call_abort();
2017-12-11 14:37:19 +01:00
if (m_thread && m_thread.unique())
2016-01-19 05:13:50 +01:00
{
2017-12-11 14:37:19 +01:00
#if defined TORRENT_ASIO_DEBUGGING
wait_for_asio_handlers();
2016-01-19 05:13:50 +01:00
#endif
m_thread->join();
2017-12-11 14:37:19 +01:00
}
}
session_proxy session::abort()
{
// stop calling the alert notify function now, to avoid it thinking the
// session is still alive
m_impl->alerts().set_notify_function({});
return session_proxy(m_io_service, m_thread, m_impl);
}
2016-07-05 00:19:55 +02:00
session_proxy::session_proxy() = default;
session_proxy::session_proxy(std::shared_ptr<io_service> ios
2016-07-05 00:19:55 +02:00
, std::shared_ptr<std::thread> t
, std::shared_ptr<aux::session_impl> impl)
2016-07-10 13:34:45 +02:00
: m_io_service(std::move(ios))
, m_thread(std::move(t))
2018-01-11 01:35:15 +01:00
, m_impl(std::move(impl))
2016-07-05 00:19:55 +02:00
{}
session_proxy::session_proxy(session_proxy const&) = default;
session_proxy& session_proxy::operator=(session_proxy const&) = default;
session_proxy::session_proxy(session_proxy&&) noexcept = default;
session_proxy& session_proxy::operator=(session_proxy&&) noexcept = default;
session_proxy::~session_proxy()
{
if (m_thread && m_thread.unique())
2017-12-11 14:37:19 +01:00
{
#if defined TORRENT_ASIO_DEBUGGING
wait_for_asio_handlers();
#endif
m_thread->join();
2017-12-11 14:37:19 +01:00
}
}
session_params::session_params(settings_pack sp)
2018-01-11 01:35:15 +01:00
: session_params(std::move(sp), default_plugins())
{}
session_params::session_params(settings_pack sp
, std::vector<std::shared_ptr<plugin>> exts)
: settings(std::move(sp))
, extensions(std::move(exts))
#ifndef TORRENT_DISABLE_DHT
, dht_storage_constructor(dht::dht_default_storage_constructor)
#endif
{}
}