add setting urlseed_max_request_bytes to handle large web seed requests #1405 (#1423)

add setting urlseed_max_request_bytes to handle large web seed requests. change default for urlseed max request to 16MIB
This commit is contained in:
Falcosc 2016-12-18 21:58:14 +01:00 committed by Arvid Norberg
parent d62b980278
commit 8adcbdd32b
5 changed files with 27 additions and 10 deletions

View File

@ -1,3 +1,4 @@
* add setting urlseed_max_request_bytes to handle large web seed requests
* fix python build with CC/CXX environment
* add trackers from add_torrent_params/magnet links to separate tiers
* fix resumedata check issue with files with priority 0

View File

@ -365,7 +365,7 @@ namespace libtorrent
int picker_options() const;
void prefer_contiguous_blocks(int num)
{ m_prefer_contiguous_blocks = (std::min)(num, 255); }
{ m_prefer_contiguous_blocks = num; }
bool request_large_blocks() const
{ return m_request_large_blocks; }
@ -1134,7 +1134,7 @@ namespace libtorrent
// if it is 0, the download rate limit setting
// will be used to determine if whole pieces
// are preferred.
boost::uint8_t m_prefer_contiguous_blocks;
int m_prefer_contiguous_blocks;
// this is the number of times this peer has had
// a request rejected because of a disk I/O failure.

View File

@ -767,6 +767,21 @@ namespace libtorrent
// low number, like 5
urlseed_pipeline_size,
// The maximum request range of an url seed in bytes. This value
// defines the largest possible sequential web seed request. Default
// is 16 * 1024 * 1024. Lower values are possible but will be ignored
// if they are lower then piece size.
// This value should be related to your download speed to prevent
// libtorrent from creating too many expensive http requests per
// second. You can select a value as high as you want but keep in mind
// that libtorrent can't create parallel requests if the first request
// did already select the whole file.
// If you combine bittorrent seeds with web seeds and pick strategies
// like rarest first you may find your web seed requests split into
// smaller parts because we don't download already picked pieces
// twice.
urlseed_max_request_bytes,
// time to wait until a new retry of a web seed takes place
urlseed_wait_retry,

View File

@ -229,6 +229,7 @@ namespace libtorrent
SET(peer_timeout, 120, 0),
SET(urlseed_timeout, 20, 0),
SET(urlseed_pipeline_size, 5, 0),
SET_NOPREV(urlseed_max_request_bytes, 16 * 1024 * 1024, 0),
SET(urlseed_wait_retry, 30, 0),
SET(file_pool_size, 40, 0),
SET(max_failcount, 3, &session_impl::update_max_failcount),

View File

@ -86,16 +86,16 @@ web_peer_connection::web_peer_connection(peer_connection_args const& pack
shared_ptr<torrent> tor = pack.tor.lock();
TORRENT_ASSERT(tor);
// we always prefer downloading 1 MiB chunks
// from web seeds, or whole pieces if pieces
// are larger than a MiB
int preferred_size = 1024 * 1024;
// if the web server is known not to support keep-alive. request 4MiB
// but we want to have at least piece size to prevent block based requests
int const min_size = std::max((web.supports_keepalive ? 1 : 4) * 1024 * 1024,
tor->torrent_file().piece_length());
// if the web server is known not to support keep-alive.
// request even larger blocks at a time
if (!web.supports_keepalive) preferred_size *= 4;
// we prefer downloading large chunks from web seeds,
// but still want to be able to split requests
int const preferred_size = std::max(min_size, m_settings.get_int(settings_pack::urlseed_max_request_bytes));
prefer_contiguous_blocks((std::max)(preferred_size / tor->block_size(), 1));
prefer_contiguous_blocks(preferred_size / tor->block_size());
boost::shared_ptr<torrent> t = associated_torrent().lock();
bool const single_file_request = t->torrent_file().num_files() == 1;