fix recent settings_pack ABI regression (#1752)

fix recent settings_pack ABI regression
This commit is contained in:
Arvid Norberg 2017-02-24 08:06:59 -05:00 committed by GitHub
parent 3b359abc14
commit 3e83c4119f
4 changed files with 52 additions and 23 deletions

View File

@ -777,27 +777,9 @@ namespace libtorrent
// low number, like 5
urlseed_pipeline_size,
// The maximum request range of an url seed in bytes. This value
// defines the largest possible sequential web seed request. Default
// is 16 * 1024 * 1024. Lower values are possible but will be ignored
// if they are lower then piece size.
// This value should be related to your download speed to prevent
// libtorrent from creating too many expensive http requests per
// second. You can select a value as high as you want but keep in mind
// that libtorrent can't create parallel requests if the first request
// did already select the whole file.
// If you combine bittorrent seeds with web seeds and pick strategies
// like rarest first you may find your web seed requests split into
// smaller parts because we don't download already picked pieces
// twice.
urlseed_max_request_bytes,
// time to wait until a new retry of a web seed takes place
urlseed_wait_retry,
// time to wait until a new retry of a web seed name lookup
web_seed_name_lookup_retry,
// sets the upper limit on the total number of files this session will
// keep open. The reason why files are left open at all is that some
// anti virus software hooks on every file close, and scans the file
@ -1594,6 +1576,24 @@ namespace libtorrent
// them in the cache is limited.
cache_size_volatile,
// The maximum request range of an url seed in bytes. This value
// defines the largest possible sequential web seed request. Default
// is 16 * 1024 * 1024. Lower values are possible but will be ignored
// if they are lower then piece size.
// This value should be related to your download speed to prevent
// libtorrent from creating too many expensive http requests per
// second. You can select a value as high as you want but keep in mind
// that libtorrent can't create parallel requests if the first request
// did already select the whole file.
// If you combine bittorrent seeds with web seeds and pick strategies
// like rarest first you may find your web seed requests split into
// smaller parts because we don't download already picked pieces
// twice.
urlseed_max_request_bytes,
// time to wait until a new retry of a web seed name lookup
web_seed_name_lookup_retry,
max_int_setting_internal
};

View File

@ -234,9 +234,7 @@ namespace libtorrent
SET(peer_timeout, 120, 0),
SET(urlseed_timeout, 20, 0),
SET(urlseed_pipeline_size, 5, 0),
SET_NOPREV(urlseed_max_request_bytes, 16 * 1024 * 1024, 0),
SET(urlseed_wait_retry, 30, 0),
SET_NOPREV(web_seed_name_lookup_retry, 1800, 0),
SET(file_pool_size, 40, 0),
SET(max_failcount, 3, &session_impl::update_max_failcount),
SET(min_reconnect_time, 60, 0),
@ -349,7 +347,9 @@ namespace libtorrent
SET_NOPREV(proxy_type, settings_pack::none, &session_impl::update_proxy),
SET_NOPREV(proxy_port, 0, &session_impl::update_proxy),
SET_NOPREV(i2p_port, 0, &session_impl::update_i2p_bridge),
SET_NOPREV(cache_size_volatile, 256, 0)
SET_NOPREV(cache_size_volatile, 256, 0),
SET_NOPREV(urlseed_max_request_bytes, 16 * 1024 * 1024, 0),
SET_NOPREV(web_seed_name_lookup_retry, 1800, 0),
};
#undef SET

View File

@ -89,7 +89,8 @@ int print_failures()
fprintf(stderr, "\x1b[0m");
if (total_num_failures > 0)
fprintf(stderr, "\n\n\x1b[41m == %d TEST(S) FAILED ==\x1b[0m\n\n\n", _g_test_failures);
fprintf(stderr, "\n\n\x1b[41m == %d TEST(S) FAILED ==\x1b[0m\n\n\n"
, total_num_failures);
return total_num_failures;
}

View File

@ -196,5 +196,33 @@ TORRENT_TEST(duplicates)
TEST_EQUAL(p.get_str(settings_pack::peer_fingerprint), "hij");
}
TORRENT_TEST(settings_pack_abi)
{
// make sure enum values are preserved across libtorrent versions
// for ABI compatibility
// These values are only allowed to change across major versions
TEST_EQUAL(settings_pack::string_type_base, 0x0000);
TEST_EQUAL(settings_pack::int_type_base, 0x4000);
TEST_EQUAL(settings_pack::bool_type_base, 0x8000);
TEST_EQUAL(settings_pack::type_mask, 0xc000);
// strings
TEST_EQUAL(settings_pack::outgoing_interfaces, settings_pack::string_type_base + 4);
TEST_EQUAL(settings_pack::dht_bootstrap_nodes, settings_pack::string_type_base + 11);
// bool
TEST_EQUAL(settings_pack::lazy_bitfields, settings_pack::bool_type_base + 3);
TEST_EQUAL(settings_pack::use_read_cache, settings_pack::bool_type_base + 7);
TEST_EQUAL(settings_pack::proxy_tracker_connections, settings_pack::bool_type_base + 68);
// ints
TEST_EQUAL(settings_pack::max_suggest_pieces, settings_pack::int_type_base + 66);
TEST_EQUAL(settings_pack::connections_slack, settings_pack::int_type_base + 86);
TEST_EQUAL(settings_pack::aio_threads, settings_pack::int_type_base + 104);
TEST_EQUAL(settings_pack::max_http_recv_buffer_size, settings_pack::int_type_base + 115);
TEST_EQUAL(settings_pack::web_seed_name_lookup_retry, settings_pack::int_type_base + 128);
}
// TODO: load_pack_from_dict