From 232b2e07588e9faa4efb1dec41b837ce93013b50 Mon Sep 17 00:00:00 2001 From: arvidn Date: Sat, 9 May 2020 16:01:55 +0200 Subject: [PATCH] tweak rate based choker to increase rate threashold by 2 kiB/s. Improve documentation --- ChangeLog | 1 + docs/gen_reference_doc.py | 3 +- docs/manual.rst | 25 ++++++++++ include/libtorrent/settings_pack.hpp | 68 ++++++++++++++++------------ simulation/Jamfile | 54 +++++++++++----------- simulation/setup_swarm.cpp | 29 +++++++++--- simulation/setup_swarm.hpp | 21 +++++++-- simulation/test_swarm.cpp | 8 ++-- simulation/test_tracker.cpp | 6 +-- src/choker.cpp | 17 +++---- src/session_impl.cpp | 2 - src/settings_pack.cpp | 1 + 12 files changed, 145 insertions(+), 90 deletions(-) diff --git a/ChangeLog b/ChangeLog index 051df979f..1cbfdbc3b 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,4 @@ + * improve rate-based choker documentation, and minor tweak * undeprecate upnp_ignore_nonrouters (but refering to devices on our subnet) * increase default tracker timeout * retry failed socks5 server connections diff --git a/docs/gen_reference_doc.py b/docs/gen_reference_doc.py index 44bc031e1..2acfbbc4c 100644 --- a/docs/gen_reference_doc.py +++ b/docs/gen_reference_doc.py @@ -81,7 +81,8 @@ static_links = \ ".. _`BEP 3`: https://www.bittorrent.org/beps/bep_0003.html", ".. _`BEP 17`: https://www.bittorrent.org/beps/bep_0017.html", ".. _`BEP 19`: https://www.bittorrent.org/beps/bep_0019.html", - ".. _`BEP 42`: https://www.bittorrent.org/beps/bep_0042.html" + ".. _`BEP 42`: https://www.bittorrent.org/beps/bep_0042.html", + ".. _`rate based choking`: manual-ref.html#rate-based-choking", } anon_index = 0 diff --git a/docs/manual.rst b/docs/manual.rst index e386ff290..a63c4c37d 100644 --- a/docs/manual.rst +++ b/docs/manual.rst @@ -841,6 +841,31 @@ example, if a client has both IPv4 and IPv6 connectivity, but the socks5 proxy only resolves to IPv4, only the IPv4 address will have a UDP tunnel. In that case, the IPv6 connection will not be used, since it cannot use the proxy. +rate based choking +================== + +libtorrent supports a choking algorithm that automatically determines the number +of upload slots (unchoke slots) based on the upload rate to peers. It is +controlled by the settings_pack::choking_algorithm setting. The +settings_pack::unchoke_slots_limit is ignored in this mode. + +The algorithm is designed to stay stable, and not oscillate the number of upload +slots. + +The initial rate threshold is set to settings_pack::rate_choker_initial_threshold. + +It sorts all peers by on the rate at which we are uploading to them. + +1. Compare the fastest peer against the initial threshold. +2. Increment the threshold by 2 kiB/s. +3. The next fastest peer is compared against the threshold. + If the peer rate is higher than the threshold. goto 2 +4. Terminate. The number of peers visited is the number of unchoke slots, but + never less than 2. + +In other words, the more upload slots you have, the higher rate does the slowest +unchoked peer upload at in order to open another slot. + predictive piece announce ========================= diff --git a/include/libtorrent/settings_pack.hpp b/include/libtorrent/settings_pack.hpp index a57ac4ebb..11cf0b10e 100644 --- a/include/libtorrent/settings_pack.hpp +++ b/include/libtorrent/settings_pack.hpp @@ -56,9 +56,6 @@ POSSIBILITY OF SUCH DAMAGE. // // Each configuration option is named with an enum value inside the // settings_pack class. These are the available settings: -// -// .. include:: settings-ref.rst -// namespace libtorrent { namespace aux { @@ -92,6 +89,9 @@ namespace aux { // enum values. These values are passed in to the ``set_str()``, // ``set_int()``, ``set_bool()`` functions, to specify the setting to // change. + // + // .. include:: settings-ref.rst + // struct TORRENT_EXPORT settings_pack { friend TORRENT_EXTRA_EXPORT void apply_pack_impl(settings_pack const* @@ -1043,36 +1043,14 @@ namespace aux { // downloading torrents is always "tit-for-tat", i.e. the peers we // download the fastest from are unchoked. // - // The options for choking algorithms are: - // - // * ``fixed_slots_choker`` is the traditional choker with a fixed - // number of unchoke slots (as specified by - // ``settings_pack::unchoke_slots_limit``). - // - // * ``rate_based_choker`` opens up unchoke slots based on the upload - // rate achieved to peers. The more slots that are opened, the - // marginal upload rate required to open up another slot increases. + // The options for choking algorithms are defined in the + // choking_algorithm_t enum. // // ``seed_choking_algorithm`` controls the seeding unchoke behavior. // i.e. How we select which peers to unchoke for seeding torrents. // Since a seeding torrent isn't downloading anything, the - // tit-for-tat mechanism cannot be used. The available options are: - // - // * ``round_robin`` which round-robins the peers that are unchoked - // when seeding. This distributes the upload bandwidth uniformly and - // fairly. It minimizes the ability for a peer to download everything - // without redistributing it. - // - // * ``fastest_upload`` unchokes the peers we can send to the fastest. - // This might be a bit more reliable in utilizing all available - // capacity. - // - // * ``anti_leech`` prioritizes peers who have just started or are - // just about to finish the download. The intention is to force - // peers in the middle of the download to trade with each other. - // This does not just take into account the pieces a peer is - // reporting having downloaded, but also the pieces we have sent - // to it. + // tit-for-tat mechanism cannot be used. The available options are + // defined in the seed_choking_algorithm_t enum. choking_algorithm, seed_choking_algorithm, @@ -1770,6 +1748,15 @@ namespace aux { // option. send_not_sent_low_watermark, + // the rate based choker compares the upload rate to peers against a + // threshold that increases proportionally by its size for every + // peer it visits, visiting peers in decreasing upload rate. The + // number of upload slots is determined by the number of peers whose + // upload rate exceeds the threshold. This option sets the start + // value for this threshold. A higher value leads to fewer unchoke + // slots, a lower value leads to more. + rate_choker_initial_threshold, + // The expiration time of UPnP port-mappings, specified in seconds. 0 // means permanent lease. Some routers do not support expiration times // on port-maps (nor correctly returning an error indicating lack of @@ -1792,7 +1779,16 @@ namespace aux { enum choking_algorithm_t : std::uint8_t { + // This is the traditional choker with a fixed number of unchoke + // slots (as specified by settings_pack::unchoke_slots_limit). fixed_slots_choker = 0, + + // This opens up unchoke slots based on the upload rate achieved to + // peers. The more slots that are opened, the marginal upload rate + // required to open up another slot increases. Configure the initial + // threshold with settings_pack::rate_choker_initial_threshold. + // + // For more information, see `rate based choking`_. rate_based_choker = 2, #if TORRENT_ABI_VERSION == 1 bittyrant_choker TORRENT_DEPRECATED_ENUM = 3 @@ -1803,8 +1799,22 @@ namespace aux { enum seed_choking_algorithm_t : std::uint8_t { + // which round-robins the peers that are unchoked + // when seeding. This distributes the upload bandwidth uniformly and + // fairly. It minimizes the ability for a peer to download everything + // without redistributing it. round_robin, + + // unchokes the peers we can send to the fastest. This might be a + // bit more reliable in utilizing all available capacity. fastest_upload, + + // prioritizes peers who have just started or are + // just about to finish the download. The intention is to force + // peers in the middle of the download to trade with each other. + // This does not just take into account the pieces a peer is + // reporting having downloaded, but also the pieces we have sent + // to it. anti_leech }; diff --git a/simulation/Jamfile b/simulation/Jamfile index 13f5eab8d..a08f2ca52 100644 --- a/simulation/Jamfile +++ b/simulation/Jamfile @@ -26,32 +26,30 @@ project on ; -alias libtorrent-sims : - [ run test_pause.cpp ] - [ run test_socks5.cpp ] - [ run test_checking.cpp ] - [ run test_optimistic_unchoke.cpp ] - [ run test_transfer.cpp ] - [ run test_http_connection.cpp ] - [ run test_web_seed.cpp ] - [ run test_auto_manage.cpp ] - [ run test_torrent_status.cpp ] - [ run test_swarm.cpp ] - [ run test_session.cpp ] - [ run test_super_seeding.cpp ] - [ run test_utp.cpp ] - [ run test_dht.cpp ] - [ run test_dht_bootstrap.cpp ] - [ run test_dht_storage.cpp ] - [ run test_pe_crypto.cpp ] - [ run test_metadata_extension.cpp ] - [ run test_tracker.cpp ] - [ run test_thread_pool.cpp ] - [ run test_ip_filter.cpp ] - [ run test_dht_rate_limit.cpp ] - [ run test_fast_extensions.cpp ] - [ run test_file_pool.cpp ] - [ run test_save_resume.cpp ] - [ run test_error_handling.cpp ] - ; +run test_pause.cpp ; +run test_socks5.cpp ; +run test_checking.cpp ; +run test_optimistic_unchoke.cpp ; +run test_transfer.cpp ; +run test_http_connection.cpp ; +run test_web_seed.cpp ; +run test_auto_manage.cpp ; +run test_torrent_status.cpp ; +run test_swarm.cpp ; +run test_session.cpp ; +run test_super_seeding.cpp ; +run test_utp.cpp ; +run test_dht.cpp ; +run test_dht_bootstrap.cpp ; +run test_dht_storage.cpp ; +run test_pe_crypto.cpp ; +run test_metadata_extension.cpp ; +run test_tracker.cpp ; +run test_thread_pool.cpp ; +run test_ip_filter.cpp ; +run test_dht_rate_limit.cpp ; +run test_fast_extensions.cpp ; +run test_file_pool.cpp ; +run test_save_resume.cpp ; +run test_error_handling.cpp ; diff --git a/simulation/setup_swarm.cpp b/simulation/setup_swarm.cpp index bb84c46a1..bc508455f 100644 --- a/simulation/setup_swarm.cpp +++ b/simulation/setup_swarm.cpp @@ -51,6 +51,13 @@ POSSIBILITY OF SUCH DAMAGE. using namespace sim; + +constexpr swarm_test_t swarm_test::download; +constexpr swarm_test_t swarm_test::upload; +constexpr swarm_test_t swarm_test::no_auto_stop; +constexpr swarm_test_t swarm_test::large_torrent; +constexpr swarm_test_t swarm_test::no_storage; + namespace { int transfer_rate(lt::address ip) @@ -207,7 +214,7 @@ void enable_enc(lt::settings_pack& p) } void setup_swarm(int num_nodes - , swarm_test type + , swarm_test_t type , std::function new_session , std::function add_torrent , std::function on_alert @@ -221,7 +228,7 @@ void setup_swarm(int num_nodes } void setup_swarm(int num_nodes - , swarm_test type + , swarm_test_t type , sim::simulation& sim , std::function new_session , std::function add_torrent @@ -239,7 +246,7 @@ void setup_swarm(int num_nodes } void setup_swarm(int num_nodes - , swarm_test type + , swarm_test_t type , sim::simulation& sim , lt::settings_pack const& default_settings , lt::add_torrent_params const& default_add_torrent @@ -259,7 +266,7 @@ void setup_swarm(int num_nodes } void setup_swarm(int num_nodes - , swarm_test type + , swarm_test_t const type , sim::simulation& sim , lt::settings_pack const& default_settings , lt::add_torrent_params const& default_add_torrent @@ -280,13 +287,21 @@ void setup_swarm(int num_nodes lt::error_code ec; int const swarm_id = test_counter(); std::string path = save_path(swarm_id, 0); + + // #error implement a storage-free version! no_storage flag + lt::create_directory(path, ec); if (ec) std::printf("failed to create directory: \"%s\": %s\n" , path.c_str(), ec.message().c_str()); std::ofstream file(lt::combine_path(path, "temporary").c_str()); - auto ti = ::create_torrent(&file, "temporary", 0x4000, 9, false); + auto ti = ::create_torrent(&file, "temporary", 0x4000, (type & swarm_test::large_torrent) ? 50 : 9, false); file.close(); + if (bool(type & swarm_test::download) && bool(type & swarm_test::upload)) + { + TEST_ERROR("can only use one of upload or download test type"); + } + // session 0 is the one we're testing. The others provide the scaffolding // it's either a downloader or a seed for (int i = 0; i < num_nodes; ++i) @@ -323,7 +338,7 @@ void setup_swarm(int num_nodes } lt::add_torrent_params p = default_add_torrent; - if (type == swarm_test::download) + if (type & swarm_test::download) { // in download tests, session 0 is a downloader and every other session // is a seed. save path 0 is where the files are, so that's for seeds @@ -403,7 +418,7 @@ void setup_swarm(int num_nodes bool shut_down = terminate(tick, *nodes[0]); - if (type == swarm_test::upload) + if ((type & swarm_test::upload) && !bool(type & swarm_test::no_auto_stop)) { shut_down |= std::all_of(nodes.begin() + 1, nodes.end() , [](std::shared_ptr const& s) diff --git a/simulation/setup_swarm.hpp b/simulation/setup_swarm.hpp index 8d0869133..3b78e811d 100644 --- a/simulation/setup_swarm.hpp +++ b/simulation/setup_swarm.hpp @@ -33,22 +33,33 @@ POSSIBILITY OF SUCH DAMAGE. #include "simulator/simulator.hpp" #include "libtorrent/address.hpp" #include "libtorrent/fwd.hpp" +#include "libtorrent/flags.hpp" #include #ifndef TORRENT_SETUP_SWARM_HPP_INCLUDED #define TORRENT_SETUP_SWARM_HPP_INCLUDED -enum class swarm_test { download, upload, upload_no_auto_stop }; +using lt::operator""_bit; +using swarm_test_t = lt::flags::bitfield_flag; + +struct swarm_test +{ + constexpr static swarm_test_t download = 0_bit; + constexpr static swarm_test_t upload = 1_bit; + constexpr static swarm_test_t no_auto_stop = 2_bit; + constexpr static swarm_test_t large_torrent = 3_bit; + constexpr static swarm_test_t no_storage = 4_bit; +}; void setup_swarm(int num_nodes - , swarm_test type + , swarm_test_t type , std::function new_session , std::function add_torrent , std::function on_alert , std::function terminate); void setup_swarm(int num_nodes - , swarm_test type + , swarm_test_t type , sim::simulation& sim , std::function new_session , std::function add_torrent @@ -56,7 +67,7 @@ void setup_swarm(int num_nodes , std::function terminate); void setup_swarm(int num_nodes - , swarm_test type + , swarm_test_t type , sim::simulation& sim , lt::settings_pack const& default_settings , lt::add_torrent_params const& default_add_torrent @@ -66,7 +77,7 @@ void setup_swarm(int num_nodes , std::function terminate); void setup_swarm(int num_nodes - , swarm_test type + , swarm_test_t type , sim::simulation& sim , lt::settings_pack const& default_settings , lt::add_torrent_params const& default_add_torrent diff --git a/simulation/test_swarm.cpp b/simulation/test_swarm.cpp index 39db14dac..9c51717bc 100644 --- a/simulation/test_swarm.cpp +++ b/simulation/test_swarm.cpp @@ -248,7 +248,7 @@ TORRENT_TEST(utp_only) }); } -void test_stop_start_download(swarm_test type, bool graceful) +void test_stop_start_download(swarm_test_t type, bool graceful) { bool paused_once = false; bool resumed = false; @@ -285,7 +285,7 @@ void test_stop_start_download(swarm_test type, bool graceful) if (paused_once == false) { auto st = get_status(ses); - const bool limit_reached = (type == swarm_test::download) + const bool limit_reached = (type & swarm_test::download) ? st.total_wanted_done > st.total_wanted / 2 : st.total_payload_upload >= 3 * 16 * 1024; @@ -300,13 +300,13 @@ void test_stop_start_download(swarm_test type, bool graceful) std::printf("tick: %d\n", ticks); - const int timeout = type == swarm_test::download ? 21 : 100; + const int timeout = (type & swarm_test::download) ? 21 : 100; if (ticks > timeout) { TEST_ERROR("timeout"); return true; } - if (type == swarm_test::upload) return false; + if (type & swarm_test::upload) return false; if (!is_seed(ses)) return false; std::printf("completed in %d ticks\n", ticks); return true; diff --git a/simulation/test_tracker.cpp b/simulation/test_tracker.cpp index b72945946..db12027d7 100644 --- a/simulation/test_tracker.cpp +++ b/simulation/test_tracker.cpp @@ -152,7 +152,7 @@ void test_interval(int interval) } template -std::vector test_event(swarm_test const type +std::vector test_event(swarm_test_t const type , AddTorrent add_torrent , OnAlert on_alert) { @@ -246,7 +246,7 @@ TORRENT_TEST(event_completed_downloading_replace_trackers) TORRENT_TEST(event_completed_seeding) { - auto const announces = test_event(swarm_test::upload_no_auto_stop + auto const announces = test_event(swarm_test::upload | swarm_test::no_auto_stop , [](lt::add_torrent_params& params) { params.trackers.push_back("http://2.2.2.2:8080/announce"); } @@ -262,7 +262,7 @@ TORRENT_TEST(event_completed_seeding) TORRENT_TEST(event_completed_seeding_replace_trackers) { - auto const announces = test_event(swarm_test::upload_no_auto_stop + auto const announces = test_event(swarm_test::upload | swarm_test::no_auto_stop , [](lt::add_torrent_params& params) {} , [&](lt::alert const* a, lt::session&) { if (auto const* at = alert_cast(a)) diff --git a/src/choker.cpp b/src/choker.cpp index 3af09ac43..d54e5ae78 100644 --- a/src/choker.cpp +++ b/src/choker.cpp @@ -307,8 +307,8 @@ namespace { // intention is to not spread upload bandwidth too thin, but also to not // unchoke few enough peers to not be able to saturate the up-link. // this is done by traversing the peers sorted by our upload rate to - // them in decreasing rates. For each peer we increase our threshold - // by 1 kB/s. The first peer we get to whom we upload slower than + // them in decreasing rates. For each peer we increase the threshold by + // 2 kiB/s. The first peer we get to whom we upload slower than // the threshold, we stop and that's the number of unchoke slots we have. if (sett.get_int(settings_pack::choking_algorithm) == settings_pack::rate_based_choker) @@ -317,28 +317,23 @@ namespace { // it purely based on the current state of our peers. upload_slots = 0; - // TODO: use an incremental partial_sort() here. We don't need - // to sort the entire list + int rate_threshold = sett.get_int(settings_pack::rate_choker_initial_threshold); - // TODO: make the comparison function a free function and move it - // into this cpp file std::sort(peers.begin(), peers.end() , std::bind(&upload_rate_compare, _1, _2)); - // TODO: make configurable - int rate_threshold = 1024; - - for (auto const p : peers) + for (auto const* p : peers) { int const rate = int(p->uploaded_in_last_round() * 1000 / total_milliseconds(unchoke_interval)); + // always have at least 1 unchoke slot if (rate < rate_threshold) break; ++upload_slots; // TODO: make configurable - rate_threshold += 1024; + rate_threshold += 2048; } ++upload_slots; } diff --git a/src/session_impl.cpp b/src/session_impl.cpp index 60c9cef3f..677ba81f1 100644 --- a/src/session_impl.cpp +++ b/src/session_impl.cpp @@ -4237,11 +4237,9 @@ namespace { { session_log("RECALCULATE UNCHOKE SLOTS: [ peers: %d " "eligible-peers: %d" - " max_upload_rate: %d" " allowed-slots: %d ]" , int(m_connections.size()) , int(peers.size()) - , max_upload_rate , allowed_upload_slots); } #endif diff --git a/src/settings_pack.cpp b/src/settings_pack.cpp index 7740dc94f..fe15b541c 100644 --- a/src/settings_pack.cpp +++ b/src/settings_pack.cpp @@ -350,6 +350,7 @@ constexpr int CLOSE_FILE_INTERVAL = 0; SET(max_web_seed_connections, 3, nullptr), SET(resolver_cache_timeout, 1200, &session_impl::update_resolver_cache_timeout), SET(send_not_sent_low_watermark, 16384, nullptr), + SET(rate_choker_initial_threshold, 1024, nullptr), SET(upnp_lease_duration, 3600, nullptr), }});