fix support for web servers not supporting keepalive
This commit is contained in:
parent
6d95b48afa
commit
2b7bef0f7b
|
@ -1,5 +1,6 @@
|
|||
1.0 release
|
||||
|
||||
* fix support for web servers not supporting keepalive
|
||||
* support storing save_path in resume data
|
||||
* don't use full allocation on network drives (on windows)
|
||||
* added clear_piece_deadlines() to remove all piece deadlines
|
||||
|
|
|
@ -291,6 +291,12 @@ namespace libtorrent
|
|||
// it's also used to hold the peer_connection
|
||||
// pointer, when the web seed is connected
|
||||
policy::ipv4_peer peer_info;
|
||||
|
||||
// if the web server doesn't support keepalive or a block request was
|
||||
// interrupted, the block received so far is kept here for the next
|
||||
// connection to pick up
|
||||
peer_request restart_request;
|
||||
std::vector<char> restart_piece;
|
||||
};
|
||||
|
||||
#ifndef BOOST_NO_EXCEPTIONS
|
||||
|
|
|
@ -86,6 +86,8 @@ namespace libtorrent
|
|||
, tcp::endpoint const& remote
|
||||
, web_seed_entry& web);
|
||||
|
||||
virtual void on_connected();
|
||||
|
||||
virtual int type() const { return peer_connection::url_seed_connection; }
|
||||
|
||||
// called from the main loop when this connection has any
|
||||
|
@ -98,7 +100,7 @@ namespace libtorrent
|
|||
virtual void get_specific_peer_info(peer_info& p) const;
|
||||
virtual void disconnect(error_code const& ec, int error = 0);
|
||||
|
||||
void write_request(peer_request const& r);
|
||||
virtual void write_request(peer_request const& r);
|
||||
|
||||
virtual bool received_invalid_data(int index, bool single_peer);
|
||||
|
||||
|
@ -113,6 +115,8 @@ namespace libtorrent
|
|||
// will be invalid.
|
||||
boost::optional<piece_block_progress> downloading_piece_progress() const;
|
||||
|
||||
void handle_padfile(buffer::const_interval& recv_buffer);
|
||||
|
||||
// this has one entry per http-request
|
||||
// (might be more than the bt requests)
|
||||
std::deque<int> m_file_requests;
|
||||
|
|
|
@ -1405,7 +1405,7 @@ namespace libtorrent
|
|||
if (index >= int(m_have_piece.size()))
|
||||
{
|
||||
#if defined TORRENT_VERBOSE_LOGGING || defined TORRENT_ERROR_LOGGING
|
||||
peer_log("<== INVALID_ALLOWED_FAST [ %d | s: %d ]"
|
||||
peer_log("<== INVALID_SUGGEST [ %d | s: %d ]"
|
||||
, index, int(m_have_piece.size()));
|
||||
#endif
|
||||
return;
|
||||
|
|
|
@ -8014,6 +8014,8 @@ namespace libtorrent
|
|||
|
||||
void torrent::maybe_connect_web_seeds()
|
||||
{
|
||||
if (m_abort) return;
|
||||
|
||||
// if we have everything we want we don't need to connect to any web-seed
|
||||
if (!is_finished() && !m_web_seeds.empty() && m_files_checked
|
||||
&& int(m_connections.size()) < m_max_connections
|
||||
|
|
|
@ -587,6 +587,7 @@ namespace libtorrent
|
|||
, peer_info(tcp::endpoint(), true, 0)
|
||||
{
|
||||
peer_info.web_seed = true;
|
||||
restart_request.piece = -1;
|
||||
}
|
||||
|
||||
torrent_info::torrent_info(torrent_info const& t, int flags)
|
||||
|
|
|
@ -106,9 +106,57 @@ namespace libtorrent
|
|||
#endif
|
||||
}
|
||||
|
||||
void web_peer_connection::on_connected()
|
||||
{
|
||||
incoming_have_all();
|
||||
if (m_web.restart_request.piece != -1)
|
||||
{
|
||||
// increase the chances of requesting the block
|
||||
// we have partial data for already, to finish it
|
||||
incoming_suggest(m_web.restart_request.piece);
|
||||
}
|
||||
web_connection_base::on_connected();
|
||||
}
|
||||
|
||||
void web_peer_connection::disconnect(error_code const& ec, int error)
|
||||
{
|
||||
if (is_disconnecting()) return;
|
||||
|
||||
boost::shared_ptr<torrent> t = associated_torrent().lock();
|
||||
|
||||
if (!m_requests.empty() && !m_file_requests.empty()
|
||||
&& !m_piece.empty())
|
||||
{
|
||||
#if 0
|
||||
std::cerr << this << " SAVE-RESTART-DATA: data: " << m_piece.size()
|
||||
<< " req: " << m_requests.front().piece
|
||||
<< " off: " << m_requests.front().start
|
||||
<< std::endl;
|
||||
#endif
|
||||
m_web.restart_request = m_requests.front();
|
||||
if (!m_web.restart_piece.empty())
|
||||
{
|
||||
// we're about to replace a different restart piece
|
||||
// buffer. So it was wasted download
|
||||
if (t) t->add_redundant_bytes(m_web.restart_piece.size()
|
||||
, torrent::piece_closing);
|
||||
}
|
||||
m_web.restart_piece.swap(m_piece);
|
||||
|
||||
// we have to do this to not count this data as redundant. The
|
||||
// upper layer will call downloading_piece_progress and assume
|
||||
// it's all wasted download. Since we're saving it here, it isn't.
|
||||
m_requests.clear();
|
||||
m_block_pos = 0;
|
||||
}
|
||||
|
||||
if (!m_web.supports_keepalive && error == 0)
|
||||
{
|
||||
// if the web server doesn't support keepalive and we were
|
||||
// disconnected as a graceful EOF, reconnect right away
|
||||
if (t) t->session().m_io_service.post(
|
||||
boost::bind(&torrent::maybe_connect_web_seeds, t));
|
||||
}
|
||||
peer_connection::disconnect(ec, error);
|
||||
if (t) t->disconnect_web_seed(this);
|
||||
}
|
||||
|
@ -183,6 +231,7 @@ namespace libtorrent
|
|||
}
|
||||
|
||||
torrent_info const& info = t->torrent_file();
|
||||
peer_request req = r;
|
||||
|
||||
std::string request;
|
||||
request.reserve(400);
|
||||
|
@ -199,6 +248,32 @@ namespace libtorrent
|
|||
pr.piece = r.piece + request_offset / piece_size;
|
||||
m_requests.push_back(pr);
|
||||
size -= pr.length;
|
||||
if (m_web.restart_request == m_requests.front())
|
||||
{
|
||||
m_piece.swap(m_web.restart_piece);
|
||||
m_block_pos += m_piece.size();
|
||||
peer_request& front = m_requests.front();
|
||||
TORRENT_ASSERT(front.length > m_piece.size());
|
||||
|
||||
#if 0
|
||||
std::cerr << this << " RESTART-DATA: data: " << m_piece.size()
|
||||
<< " req: ( " << front.piece << ", " << front.start
|
||||
<< ", " << (front.start + front.length - 1) << ")"
|
||||
<< std::endl;
|
||||
#endif
|
||||
|
||||
req.start += m_piece.size();
|
||||
req.length -= m_piece.size();
|
||||
|
||||
// just to keep the accounting straight for the upper layer.
|
||||
// it doesn't know we just re-wrote the request
|
||||
incoming_piece_fragment(m_piece.size());
|
||||
m_web.restart_request.piece = -1;
|
||||
}
|
||||
|
||||
#if 0
|
||||
std::cerr << this << " REQ: p: " << pr.piece << " " << pr.start << std::endl;
|
||||
#endif
|
||||
}
|
||||
|
||||
proxy_settings const& ps = m_ses.proxy();
|
||||
|
@ -214,17 +289,19 @@ namespace libtorrent
|
|||
request += " HTTP/1.1\r\n";
|
||||
add_headers(request, ps, using_proxy);
|
||||
request += "\r\nRange: bytes=";
|
||||
request += to_string(size_type(r.piece) * info.piece_length() + r.start).elems;
|
||||
request += to_string(size_type(req.piece) * info.piece_length()
|
||||
+ req.start).elems;
|
||||
request += "-";
|
||||
request += to_string(size_type(r.piece) * info.piece_length() + r.start + r.length - 1).elems;
|
||||
request += to_string(size_type(req.piece) * info.piece_length()
|
||||
+ req.start + req.length - 1).elems;
|
||||
request += "\r\n\r\n";
|
||||
m_first_request = false;
|
||||
m_file_requests.push_back(0);
|
||||
}
|
||||
else
|
||||
{
|
||||
std::vector<file_slice> files = info.orig_files().map_block(r.piece, r.start
|
||||
, r.length);
|
||||
std::vector<file_slice> files = info.orig_files().map_block(
|
||||
req.piece, req.start, req.length);
|
||||
|
||||
for (std::vector<file_slice>::iterator i = files.begin();
|
||||
i != files.end(); ++i)
|
||||
|
@ -235,7 +312,6 @@ namespace libtorrent
|
|||
m_file_requests.push_back(f.file_index);
|
||||
continue;
|
||||
}
|
||||
|
||||
request += "GET ";
|
||||
if (using_proxy)
|
||||
{
|
||||
|
@ -268,6 +344,12 @@ namespace libtorrent
|
|||
request += to_string(f.offset + f.size - 1).elems;
|
||||
request += "\r\n\r\n";
|
||||
m_first_request = false;
|
||||
|
||||
#if 0
|
||||
std::cerr << this << " SEND-REQUEST: f: " << f.file_index
|
||||
<< " s: " << f.offset
|
||||
<< " e: " << (f.offset + f.size - 1) << std::endl;
|
||||
#endif
|
||||
TORRENT_ASSERT(f.file_index >= 0);
|
||||
m_file_requests.push_back(f.file_index);
|
||||
}
|
||||
|
@ -277,6 +359,13 @@ namespace libtorrent
|
|||
peer_log("==> %s", request.c_str());
|
||||
#endif
|
||||
|
||||
// in case the first file on this series of requests is a padfile
|
||||
// we need to handle it right now, and pretend that we got a response
|
||||
// with zeros.
|
||||
buffer::const_interval recv_buffer = receive_buffer();
|
||||
handle_padfile(recv_buffer);
|
||||
if (associated_torrent().expired()) return;
|
||||
|
||||
send_buffer(request.c_str(), request.size(), message_type_request);
|
||||
}
|
||||
|
||||
|
@ -543,7 +632,9 @@ namespace libtorrent
|
|||
TORRENT_ASSERT(!m_file_requests.empty());
|
||||
int file_index = m_file_requests.front();
|
||||
|
||||
// TODO: 2 create a mapping of file-index to redirection URLs. Use that to form URLs instead. Support to reconnect to a new server without destructing this peer_connection
|
||||
// TODO: 2 create a mapping of file-index to redirection URLs. Use that to form
|
||||
// URLs instead. Support to reconnect to a new server without destructing this
|
||||
// peer_connection
|
||||
torrent_info const& info = t->torrent_file();
|
||||
std::string path = info.orig_files().file_path(file_index);
|
||||
#ifdef TORRENT_WINDOWS
|
||||
|
@ -729,11 +820,6 @@ namespace libtorrent
|
|||
m_block_pos += payload_transferred;
|
||||
if (m_range_pos > range_end - range_start) m_range_pos = range_end - range_start;
|
||||
|
||||
#if 0
|
||||
std::cerr << "REQUESTS: m_requests: " << m_requests.size()
|
||||
<< " file_requests: " << m_file_requests.size() << std::endl;
|
||||
#endif
|
||||
|
||||
int file_index = m_file_requests.front();
|
||||
peer_request in_range = info.orig_files().map_file(file_index, range_start
|
||||
, int(range_end - range_start));
|
||||
|
@ -744,13 +830,6 @@ namespace libtorrent
|
|||
size_type re = rs + in_range.length;
|
||||
// file start
|
||||
size_type fs = size_type(front_request.piece) * info.piece_length() + front_request.start;
|
||||
#if 0
|
||||
size_type fe = fs + front_request.length;
|
||||
|
||||
std::cerr << "RANGE: r = (" << rs << ", " << re << " ) "
|
||||
"f = (" << fs << ", " << fe << ") "
|
||||
"file_index = " << file_index << " received_body = " << m_received_body << std::endl;
|
||||
#endif
|
||||
|
||||
// the http response body consists of 3 parts
|
||||
// 1. the middle of a block or the ending of a block
|
||||
|
@ -767,6 +846,7 @@ namespace libtorrent
|
|||
m_statistics.received_bytes(0, bytes_transferred);
|
||||
// this means the end of the incoming request ends _before_ the
|
||||
// first expected byte (fs + m_piece.size())
|
||||
|
||||
disconnect(errors::invalid_range, 2);
|
||||
return;
|
||||
}
|
||||
|
@ -775,7 +855,8 @@ namespace libtorrent
|
|||
// fits in the range) we should not start a partial piece, since we soon
|
||||
// will receive enough to call incoming_piece() and pass the read buffer
|
||||
// directly (in the next loop below).
|
||||
if (range_overlaps_request && !range_contains(in_range, front_request, info.piece_length()))
|
||||
if (range_overlaps_request
|
||||
&& !range_contains(in_range, front_request, info.piece_length()))
|
||||
{
|
||||
// the start of the next block to receive is stored
|
||||
// in m_piece. We need to append the rest of that
|
||||
|
@ -822,6 +903,7 @@ namespace libtorrent
|
|||
|
||||
incoming_piece_fragment(r.length);
|
||||
incoming_piece(r, recv_buffer.begin);
|
||||
|
||||
m_requests.pop_front();
|
||||
if (associated_torrent().expired()) return;
|
||||
TORRENT_ASSERT(m_block_pos >= r.length);
|
||||
|
@ -890,31 +972,11 @@ namespace libtorrent
|
|||
m_chunk_pos = 0;
|
||||
m_partial_chunk_header = 0;
|
||||
|
||||
torrent_info const& info = t->torrent_file();
|
||||
while (!m_file_requests.empty()
|
||||
&& info.orig_files().pad_file_at(m_file_requests.front()))
|
||||
{
|
||||
// the next file is a pad file. We didn't actually send
|
||||
// a request for this since it most likely doesn't exist on
|
||||
// the web server anyway. Just pretend that we received a
|
||||
// bunch of zeroes here and pop it again
|
||||
int file_index = m_file_requests.front();
|
||||
m_file_requests.pop_front();
|
||||
size_type file_size = info.orig_files().file_size(file_index);
|
||||
TORRENT_ASSERT(m_block_pos < front_request.length);
|
||||
int pad_size = int((std::min)(file_size, size_type(front_request.length - m_block_pos)));
|
||||
|
||||
// insert zeroes to represent the pad file
|
||||
m_piece.resize(m_piece.size() + size_t(pad_size), 0);
|
||||
m_block_pos += pad_size;
|
||||
incoming_piece_fragment(pad_size);
|
||||
|
||||
if (maybe_harvest_block())
|
||||
recv_buffer = receive_buffer();
|
||||
if (associated_torrent().expired()) return;
|
||||
}
|
||||
handle_padfile(recv_buffer);
|
||||
if (associated_torrent().expired()) return;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (bytes_transferred == 0 || payload_transferred == 0)
|
||||
{
|
||||
#ifdef TORRENT_DEBUG
|
||||
|
@ -940,5 +1002,37 @@ namespace libtorrent
|
|||
p.connection_type = peer_info::web_seed;
|
||||
}
|
||||
|
||||
void web_peer_connection::handle_padfile(buffer::const_interval& recv_buffer)
|
||||
{
|
||||
boost::shared_ptr<torrent> t = associated_torrent().lock();
|
||||
TORRENT_ASSERT(t);
|
||||
torrent_info const& info = t->torrent_file();
|
||||
|
||||
while (!m_file_requests.empty()
|
||||
&& info.orig_files().pad_file_at(m_file_requests.front()))
|
||||
{
|
||||
// the next file is a pad file. We didn't actually send
|
||||
// a request for this since it most likely doesn't exist on
|
||||
// the web server anyway. Just pretend that we received a
|
||||
// bunch of zeroes here and pop it again
|
||||
int file_index = m_file_requests.front();
|
||||
m_file_requests.pop_front();
|
||||
size_type file_size = info.orig_files().file_size(file_index);
|
||||
|
||||
peer_request front_request = m_requests.front();
|
||||
|
||||
TORRENT_ASSERT(m_block_pos < front_request.length);
|
||||
int pad_size = int((std::min)(file_size, size_type(front_request.length - m_block_pos)));
|
||||
|
||||
// insert zeroes to represent the pad file
|
||||
m_piece.resize(m_piece.size() + size_t(pad_size), 0);
|
||||
m_block_pos += pad_size;
|
||||
incoming_piece_fragment(pad_size);
|
||||
|
||||
if (maybe_harvest_block())
|
||||
recv_buffer = receive_buffer();
|
||||
if (associated_torrent().expired()) return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -124,6 +124,7 @@ test-suite libtorrent :
|
|||
[ run test_ssl.cpp ]
|
||||
[ run test_tracker.cpp ]
|
||||
[ run test_checking.cpp ]
|
||||
[ run test_url_seed.cpp ]
|
||||
[ run test_web_seed.cpp ]
|
||||
[ run test_web_seed_socks4.cpp ]
|
||||
[ run test_web_seed_socks5.cpp ]
|
||||
|
|
|
@ -44,6 +44,7 @@ test_programs = \
|
|||
test_utp \
|
||||
test_session \
|
||||
test_web_seed \
|
||||
test_url_seed \
|
||||
test_remap_files \
|
||||
test_gzip \
|
||||
test_utf8 \
|
||||
|
@ -172,6 +173,7 @@ enum_if_SOURCES = enum_if.cpp
|
|||
test_utp_SOURCES = test_utp.cpp
|
||||
test_session_SOURCES = test_session.cpp
|
||||
test_web_seed_SOURCES = test_web_seed.cpp
|
||||
test_url_seed_SOURCES = test_url_seed.cpp
|
||||
test_remap_files_SOURCES = test_remap_files.cpp
|
||||
test_gzip_SOURCES = test_gzip.cpp
|
||||
test_utf8_SOURCES = test_utf8.cpp
|
||||
|
|
|
@ -777,7 +777,7 @@ setup_transfer(session* ses1, session* ses2, session* ses3
|
|||
|
||||
pid_type web_server_pid = 0;
|
||||
|
||||
int start_web_server(bool ssl, bool chunked_encoding)
|
||||
int start_web_server(bool ssl, bool chunked_encoding, bool keepalive)
|
||||
{
|
||||
unsigned int seed = total_microseconds(time_now_hires() - min_time()) & 0xffffffff;
|
||||
fprintf(stderr, "random seed: %u\n", seed);
|
||||
|
@ -785,8 +785,8 @@ int start_web_server(bool ssl, bool chunked_encoding)
|
|||
int port = 5000 + (rand() % 55000);
|
||||
|
||||
char buf[200];
|
||||
snprintf(buf, sizeof(buf), "python ../web_server.py %d %d %d"
|
||||
, port, chunked_encoding , ssl);
|
||||
snprintf(buf, sizeof(buf), "python ../web_server.py %d %d %d %d"
|
||||
, port, chunked_encoding , ssl, keepalive);
|
||||
|
||||
fprintf(stderr, "%s starting web_server on port %d...\n", time_now_string(), port);
|
||||
|
||||
|
|
|
@ -86,7 +86,9 @@ EXPORT setup_transfer(libtorrent::session* ses1, libtorrent::session* ses2
|
|||
, libtorrent::add_torrent_params const* p = 0, bool stop_lsd = true, bool use_ssl_ports = false
|
||||
, boost::intrusive_ptr<libtorrent::torrent_info>* torrent2 = 0);
|
||||
|
||||
int EXPORT start_web_server(bool ssl = false, bool chunked = false);
|
||||
int EXPORT start_web_server(bool ssl = false, bool chunked = false
|
||||
, bool keepalive = true);
|
||||
|
||||
void EXPORT stop_web_server();
|
||||
int EXPORT start_proxy(int type);
|
||||
void EXPORT stop_proxy(int port);
|
||||
|
|
|
@ -0,0 +1,54 @@
|
|||
/*
|
||||
|
||||
Copyright (c) 2008-2014, Arvid Norberg
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in
|
||||
the documentation and/or other materials provided with the distribution.
|
||||
* Neither the name of the author nor the names of its
|
||||
contributors may be used to endorse or promote products derived
|
||||
from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
||||
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
*/
|
||||
|
||||
#include "test.hpp"
|
||||
#include "setup_transfer.hpp"
|
||||
#include "web_seed_suite.hpp"
|
||||
|
||||
using namespace libtorrent;
|
||||
|
||||
const int proxy = libtorrent::proxy_settings::none;
|
||||
|
||||
int test_main()
|
||||
{
|
||||
for (int keepalive = 0; keepalive < 2; ++keepalive)
|
||||
{
|
||||
#ifdef TORRENT_USE_OPENSSL
|
||||
run_http_suite(proxy, "https", 1, 0, 0, keepalive);
|
||||
#endif
|
||||
run_http_suite(proxy, "http", 1, 0, 0, keepalive);
|
||||
}
|
||||
run_http_suite(proxy, "http", 1, 0, 0, 1, 1);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
@ -1,6 +1,6 @@
|
|||
/*
|
||||
|
||||
Copyright (c) 2008, Arvid Norberg
|
||||
Copyright (c) 2008-2014, Arvid Norberg
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
|
@ -40,14 +40,10 @@ const int proxy = libtorrent::proxy_settings::none;
|
|||
|
||||
int test_main()
|
||||
{
|
||||
int ret = 0;
|
||||
for (int url_seed = 0; url_seed < 2; ++url_seed)
|
||||
{
|
||||
#ifdef TORRENT_USE_OPENSSL
|
||||
run_http_suite(proxy, "https", url_seed);
|
||||
run_http_suite(proxy, "https", false);
|
||||
#endif
|
||||
run_http_suite(proxy, "http", url_seed);
|
||||
}
|
||||
return ret;
|
||||
run_http_suite(proxy, "http", false);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -77,7 +77,8 @@ static char const* proxy_name[] = {"", "_socks4", "_socks5", "_socks5_pw", "_htt
|
|||
|
||||
// proxy: 0=none, 1=socks4, 2=socks5, 3=socks5_pw 4=http 5=http_pw
|
||||
static void test_transfer(session& ses, boost::intrusive_ptr<torrent_info> torrent_file
|
||||
, int proxy, int port, char const* protocol, bool url_seed, bool chunked_encoding, bool test_ban)
|
||||
, int proxy, int port, char const* protocol, bool url_seed
|
||||
, bool chunked_encoding, bool test_ban, bool keepalive)
|
||||
{
|
||||
using namespace libtorrent;
|
||||
|
||||
|
@ -89,8 +90,12 @@ static void test_transfer(session& ses, boost::intrusive_ptr<torrent_info> torre
|
|||
|
||||
static char const* test_name[] = {"no", "SOCKS4", "SOCKS5", "SOCKS5 password", "HTTP", "HTTP password"};
|
||||
|
||||
fprintf(stderr, "\n\n ==== TESTING === proxy: %s ==== protocol: %s ==== seed: %s === transfer-encoding: %s === corruption: %s\n\n\n"
|
||||
, test_name[proxy], protocol, url_seed ? "URL seed" : "HTTP seed", chunked_encoding ? "chunked": "none", test_ban ? "yes" : "no");
|
||||
fprintf(stderr, "\n\n ==== TESTING === proxy: %s ==== protocol: %s "
|
||||
"==== seed: %s === transfer-encoding: %s === corruption: %s "
|
||||
"==== keepalive: %s\n\n\n"
|
||||
, test_name[proxy], protocol, url_seed ? "URL seed" : "HTTP seed"
|
||||
, chunked_encoding ? "chunked": "none", test_ban ? "yes" : "no"
|
||||
, keepalive ? "yes" : "no");
|
||||
|
||||
proxy_settings ps;
|
||||
|
||||
|
@ -166,7 +171,7 @@ static void test_transfer(session& ses, boost::intrusive_ptr<torrent_info> torre
|
|||
|
||||
// if the web seed connection is disconnected, we're going to fail
|
||||
// the test. make sure to do so quickly
|
||||
if (peer_disconnects >= 1) break;
|
||||
if (keepalive && peer_disconnects >= 1) break;
|
||||
|
||||
if (s.is_seeding /* && ss.download_rate == 0.f*/)
|
||||
{
|
||||
|
@ -232,7 +237,8 @@ static void test_transfer(session& ses, boost::intrusive_ptr<torrent_info> torre
|
|||
// proxy: 0=none, 1=socks4, 2=socks5, 3=socks5_pw 4=http 5=http_pw
|
||||
// protocol: "http" or "https"
|
||||
// test_url_seed determines whether to use url-seed or http-seed
|
||||
int EXPORT run_http_suite(int proxy, char const* protocol, bool test_url_seed, bool chunked_encoding, bool test_ban)
|
||||
int EXPORT run_http_suite(int proxy, char const* protocol, bool test_url_seed
|
||||
, bool chunked_encoding, bool test_ban, bool keepalive, bool test_rename)
|
||||
{
|
||||
using namespace libtorrent;
|
||||
|
||||
|
@ -272,7 +278,7 @@ int EXPORT run_http_suite(int proxy, char const* protocol, bool test_url_seed, b
|
|||
free(random_data);
|
||||
}
|
||||
|
||||
int port = start_web_server(strcmp(protocol, "https") == 0, chunked_encoding);
|
||||
int port = start_web_server(strcmp(protocol, "https") == 0, chunked_encoding, keepalive);
|
||||
|
||||
// generate a torrent with pad files to make sure they
|
||||
// are not requested web seeds
|
||||
|
@ -356,12 +362,14 @@ int EXPORT run_http_suite(int proxy, char const* protocol, bool test_url_seed, b
|
|||
ses.listen_on(std::make_pair(51000, 52000), ec);
|
||||
if (ec) fprintf(stderr, "listen_on failed: %s\n", ec.message().c_str());
|
||||
|
||||
test_transfer(ses, torrent_file, proxy, port, protocol, test_url_seed, chunked_encoding, test_ban);
|
||||
test_transfer(ses, torrent_file, proxy, port, protocol, test_url_seed
|
||||
, chunked_encoding, test_ban, keepalive);
|
||||
|
||||
if (test_url_seed)
|
||||
if (test_url_seed && test_rename)
|
||||
{
|
||||
torrent_file->rename_file(0, combine_path(save_path, combine_path("torrent_dir", "renamed_test1")));
|
||||
test_transfer(ses, torrent_file, 0, port, protocol, test_url_seed, chunked_encoding, test_ban);
|
||||
test_transfer(ses, torrent_file, 0, port, protocol, test_url_seed
|
||||
, chunked_encoding, test_ban, keepalive);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -32,5 +32,6 @@ POSSIBILITY OF SUCH DAMAGE.
|
|||
#include "test.hpp"
|
||||
|
||||
int EXPORT run_http_suite(int proxy, char const* protocol
|
||||
, bool test_url_seed, bool chunked_encoding = false, bool test_ban = false);
|
||||
, bool test_url_seed, bool chunked_encoding = false, bool test_ban = false
|
||||
, bool keepalive = true, bool test_rename = false);
|
||||
|
||||
|
|
|
@ -6,6 +6,7 @@ import ssl
|
|||
import gzip
|
||||
|
||||
chunked_encoding = False
|
||||
keepalive = True
|
||||
|
||||
try:
|
||||
fin = open('test_file', 'rb')
|
||||
|
@ -29,6 +30,7 @@ class http_handler(SimpleHTTPServer.SimpleHTTPRequestHandler):
|
|||
|
||||
#print s.requestline
|
||||
global chunked_encoding
|
||||
global keepalive
|
||||
|
||||
# if the request contains the hostname and port. strip it
|
||||
if s.path.startswith('http://') or s.path.startswith('https://'):
|
||||
|
@ -117,6 +119,9 @@ class http_handler(SimpleHTTPServer.SimpleHTTPRequestHandler):
|
|||
s.send_header('Content-Length', end_range - start_range)
|
||||
if filename.endswith('.gz'):
|
||||
s.send_header('Content-Encoding', 'gzip')
|
||||
if not keepalive:
|
||||
s.send_header("Connection", "close")
|
||||
|
||||
s.end_headers()
|
||||
|
||||
f.seek(start_range)
|
||||
|
@ -143,6 +148,7 @@ if __name__ == '__main__':
|
|||
port = int(sys.argv[1])
|
||||
chunked_encoding = sys.argv[2] != '0'
|
||||
use_ssl = sys.argv[3] != '0'
|
||||
keepalive = sys.argv[4] != '0'
|
||||
|
||||
http_handler.protocol_version = 'HTTP/1.1'
|
||||
httpd = http_server_with_timeout(('127.0.0.1', port), http_handler)
|
||||
|
|
Loading…
Reference in New Issue