add setting urlseed_max_request_bytes to handle large web seed requests. change default for urlseed max request to 16MIB
This commit is contained in:
parent
d62b980278
commit
8adcbdd32b
|
@ -1,3 +1,4 @@
|
||||||
|
* add setting urlseed_max_request_bytes to handle large web seed requests
|
||||||
* fix python build with CC/CXX environment
|
* fix python build with CC/CXX environment
|
||||||
* add trackers from add_torrent_params/magnet links to separate tiers
|
* add trackers from add_torrent_params/magnet links to separate tiers
|
||||||
* fix resumedata check issue with files with priority 0
|
* fix resumedata check issue with files with priority 0
|
||||||
|
|
|
@ -365,7 +365,7 @@ namespace libtorrent
|
||||||
int picker_options() const;
|
int picker_options() const;
|
||||||
|
|
||||||
void prefer_contiguous_blocks(int num)
|
void prefer_contiguous_blocks(int num)
|
||||||
{ m_prefer_contiguous_blocks = (std::min)(num, 255); }
|
{ m_prefer_contiguous_blocks = num; }
|
||||||
|
|
||||||
bool request_large_blocks() const
|
bool request_large_blocks() const
|
||||||
{ return m_request_large_blocks; }
|
{ return m_request_large_blocks; }
|
||||||
|
@ -1134,7 +1134,7 @@ namespace libtorrent
|
||||||
// if it is 0, the download rate limit setting
|
// if it is 0, the download rate limit setting
|
||||||
// will be used to determine if whole pieces
|
// will be used to determine if whole pieces
|
||||||
// are preferred.
|
// are preferred.
|
||||||
boost::uint8_t m_prefer_contiguous_blocks;
|
int m_prefer_contiguous_blocks;
|
||||||
|
|
||||||
// this is the number of times this peer has had
|
// this is the number of times this peer has had
|
||||||
// a request rejected because of a disk I/O failure.
|
// a request rejected because of a disk I/O failure.
|
||||||
|
|
|
@ -767,6 +767,21 @@ namespace libtorrent
|
||||||
// low number, like 5
|
// low number, like 5
|
||||||
urlseed_pipeline_size,
|
urlseed_pipeline_size,
|
||||||
|
|
||||||
|
// The maximum request range of an url seed in bytes. This value
|
||||||
|
// defines the largest possible sequential web seed request. Default
|
||||||
|
// is 16 * 1024 * 1024. Lower values are possible but will be ignored
|
||||||
|
// if they are lower then piece size.
|
||||||
|
// This value should be related to your download speed to prevent
|
||||||
|
// libtorrent from creating too many expensive http requests per
|
||||||
|
// second. You can select a value as high as you want but keep in mind
|
||||||
|
// that libtorrent can't create parallel requests if the first request
|
||||||
|
// did already select the whole file.
|
||||||
|
// If you combine bittorrent seeds with web seeds and pick strategies
|
||||||
|
// like rarest first you may find your web seed requests split into
|
||||||
|
// smaller parts because we don't download already picked pieces
|
||||||
|
// twice.
|
||||||
|
urlseed_max_request_bytes,
|
||||||
|
|
||||||
// time to wait until a new retry of a web seed takes place
|
// time to wait until a new retry of a web seed takes place
|
||||||
urlseed_wait_retry,
|
urlseed_wait_retry,
|
||||||
|
|
||||||
|
|
|
@ -229,6 +229,7 @@ namespace libtorrent
|
||||||
SET(peer_timeout, 120, 0),
|
SET(peer_timeout, 120, 0),
|
||||||
SET(urlseed_timeout, 20, 0),
|
SET(urlseed_timeout, 20, 0),
|
||||||
SET(urlseed_pipeline_size, 5, 0),
|
SET(urlseed_pipeline_size, 5, 0),
|
||||||
|
SET_NOPREV(urlseed_max_request_bytes, 16 * 1024 * 1024, 0),
|
||||||
SET(urlseed_wait_retry, 30, 0),
|
SET(urlseed_wait_retry, 30, 0),
|
||||||
SET(file_pool_size, 40, 0),
|
SET(file_pool_size, 40, 0),
|
||||||
SET(max_failcount, 3, &session_impl::update_max_failcount),
|
SET(max_failcount, 3, &session_impl::update_max_failcount),
|
||||||
|
|
|
@ -86,16 +86,16 @@ web_peer_connection::web_peer_connection(peer_connection_args const& pack
|
||||||
shared_ptr<torrent> tor = pack.tor.lock();
|
shared_ptr<torrent> tor = pack.tor.lock();
|
||||||
TORRENT_ASSERT(tor);
|
TORRENT_ASSERT(tor);
|
||||||
|
|
||||||
// we always prefer downloading 1 MiB chunks
|
// if the web server is known not to support keep-alive. request 4MiB
|
||||||
// from web seeds, or whole pieces if pieces
|
// but we want to have at least piece size to prevent block based requests
|
||||||
// are larger than a MiB
|
int const min_size = std::max((web.supports_keepalive ? 1 : 4) * 1024 * 1024,
|
||||||
int preferred_size = 1024 * 1024;
|
tor->torrent_file().piece_length());
|
||||||
|
|
||||||
// if the web server is known not to support keep-alive.
|
// we prefer downloading large chunks from web seeds,
|
||||||
// request even larger blocks at a time
|
// but still want to be able to split requests
|
||||||
if (!web.supports_keepalive) preferred_size *= 4;
|
int const preferred_size = std::max(min_size, m_settings.get_int(settings_pack::urlseed_max_request_bytes));
|
||||||
|
|
||||||
prefer_contiguous_blocks((std::max)(preferred_size / tor->block_size(), 1));
|
prefer_contiguous_blocks(preferred_size / tor->block_size());
|
||||||
|
|
||||||
boost::shared_ptr<torrent> t = associated_torrent().lock();
|
boost::shared_ptr<torrent> t = associated_torrent().lock();
|
||||||
bool const single_file_request = t->torrent_file().num_files() == 1;
|
bool const single_file_request = t->torrent_file().num_files() == 1;
|
||||||
|
|
Loading…
Reference in New Issue