Merge pull request #426 from arvidn/web-seed-padfiles-1.1

fix handling edge cases of padfiles in web seed peer connection
This commit is contained in:
Arvid Norberg 2016-01-29 00:33:54 -05:00
commit f18d2d5420
10 changed files with 948 additions and 715 deletions

View File

@ -398,7 +398,7 @@ private:
// creation of m_enc_handler. Cannot reinitialize once
// initialized.
boost::scoped_ptr<dh_key_exchange> m_dh_key_exchange;
// used during an encrypted handshake then moved
// into m_enc_handler if rc4 encryption is negotiated
// otherwise it is destroyed when the handshake completes

View File

@ -802,6 +802,8 @@ namespace libtorrent
virtual int timeout() const;
io_service& get_io_service() { return m_ios; }
private:
// explicitly disallow assignment, to silence msvc warning
peer_connection& operator=(peer_connection const&);

View File

@ -96,7 +96,12 @@ namespace libtorrent
private:
bool maybe_harvest_block();
void on_receive_padfile();
void incoming_payload(char const* buf, int len);
void incoming_zeroes(int len);
void handle_redirect(int bytes_left);
void handle_error(int bytes_left);
void maybe_harvest_piece();
// returns the block currently being
// downloaded. And the progress of that
@ -105,30 +110,33 @@ namespace libtorrent
// will be invalid.
boost::optional<piece_block_progress> downloading_piece_progress() const TORRENT_OVERRIDE;
void handle_padfile(buffer::const_interval& recv_buffer);
void handle_padfile();
// this has one entry per http-request
// (might be more than the bt requests)
std::deque<int> m_file_requests;
struct file_request_t
{
int file_index;
int length;
boost::int64_t start;
};
std::deque<file_request_t> m_file_requests;
std::string m_url;
web_seed_t* m_web;
// this is used for intermediate storage of pieces
// that are received in more than one HTTP response
// TODO: 1 if we make this be a disk_buffer_holder instead
// we would save a copy sometimes
// this is used for intermediate storage of pieces to be delivered to the
// bittorrent engine
// TODO: 3 if we make this be a disk_buffer_holder instead
// we would save a copy
// use allocate_disk_receive_buffer and release_disk_receive_buffer
std::vector<char> m_piece;
// the number of bytes received in the current HTTP
// response. used to know where in the buffer the
// the number of bytes we've forwarded to the incoming_payload() function
// in the current HTTP response. used to know where in the buffer the
// next response starts
boost::int64_t m_received_body;
// position in the current range response
boost::int64_t m_range_pos;
int m_received_body;
// this is the offset inside the current receive
// buffer where the next chunk header will be.
@ -136,10 +144,7 @@ namespace libtorrent
// parsed. It does not necessarily point to a valid
// offset in the receive buffer, if we haven't received
// it yet. This offset never includes the HTTP header
boost::int64_t m_chunk_pos;
// the position in the current block
int m_block_pos;
int m_chunk_pos;
// this is the number of bytes we've already received
// from the next chunk header we're waiting for

View File

@ -3937,7 +3937,7 @@ namespace libtorrent
// the verification will fail for coalesced blocks
TORRENT_ASSERT(verify_piece(r) || m_request_large_blocks);
#ifndef TORRENT_DISABLE_EXTENSIONS
bool handled = false;
for (extension_list_t::iterator i = m_extensions.begin()

File diff suppressed because it is too large Load Diff

View File

@ -60,6 +60,7 @@ lib libtorrent_test
swarm_suite.cpp
test_utils.cpp
settings.cpp
make_torrent.cpp
: # requirements
# this is used to determine whether

203
test/make_torrent.cpp Normal file
View File

@ -0,0 +1,203 @@
/*
Copyright (c) 2016, Arvid Norberg
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the distribution.
* Neither the name of the author nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
#include <boost/make_shared.hpp>
#include <deque>
#include "make_torrent.hpp"
#include "libtorrent/storage.hpp"
#include "libtorrent/hasher.hpp"
#include "libtorrent/entry.hpp"
#include "libtorrent/bencode.hpp"
#include "libtorrent/file_pool.hpp"
#include "libtorrent/storage_defs.hpp"
using namespace libtorrent;
boost::shared_ptr<libtorrent::torrent_info> make_test_torrent(
torrent_args const& args)
{
entry e;
entry::dictionary_type& info = e["info"].dict();
int total_size = 0;
if (args.m_priv)
{
info["priv"] = 1;
}
// torrent offset ranges where the pad files are
// used when generating hashes
std::deque<std::pair<int,int> > pad_files;
int const piece_length = 32768;
info["piece length"] = piece_length;
if (args.m_files.size() == 1)
{
std::string const& ent = args.m_files[0];
std::string name = "test_file-1";
if (ent.find("name=") != std::string::npos)
{
int pos = ent.find("name=") + 5;
name = ent.substr(pos, ent.find(',', pos));
}
info["name"] = name;
int file_size = atoi(args.m_files[0].c_str());
info["length"] = file_size;
total_size = file_size;
}
else
{
info["name"] = args.m_name;
entry::list_type& files = info["files"].list();
for (int i = 0; i < int(args.m_files.size()); ++i)
{
int file_size = atoi(args.m_files[i].c_str());
files.push_back(entry());
entry::dictionary_type& file_entry = files.back().dict();
std::string const& ent = args.m_files[i];
if (ent.find("padfile") != std::string::npos)
{
file_entry["attr"].string() += "p";
pad_files.push_back(std::make_pair(total_size, total_size + file_size));
}
if (ent.find("executable") != std::string::npos)
file_entry["attr"].string() += "x";
char filename[100];
snprintf(filename, sizeof(filename), "test_file-%d", i);
std::string name = filename;
if (ent.find("name=") != std::string::npos)
{
int pos = ent.find("name=") + 5;
name = ent.substr(pos, ent.find(',', pos));
}
file_entry["path"].list().push_back(name);
file_entry["length"] = file_size;
total_size += file_size;
}
}
if (!args.m_url_seed.empty())
{
e["url-list"] = args.m_url_seed;
}
if (!args.m_http_seed.empty())
{
e["httpseeds"] = args.m_http_seed;
}
std::string piece_hashes;
int num_pieces = (total_size + piece_length - 1) / piece_length;
int torrent_offset = 0;
for (int i = 0; i < num_pieces; ++i)
{
hasher h;
int const piece_size = (i < num_pieces - 1) ? piece_length : total_size - (num_pieces - 1) * piece_length;
char const data = i;
char const zero = 0;
for (int o = 0; o < piece_size; ++o, ++torrent_offset)
{
while (!pad_files.empty() && torrent_offset >= pad_files.front().second)
pad_files.pop_front();
if (!pad_files.empty() && torrent_offset >= pad_files.front().first)
{
h.update(&zero, 1);
}
else
{
h.update(&data, 1);
}
}
piece_hashes += h.final().to_string();
}
info["pieces"] = piece_hashes;
std::vector<char> tmp;
std::back_insert_iterator<std::vector<char> > out(tmp);
bencode(out, e);
FILE* f = fopen("test.torrent", "w+");
fwrite(&tmp[0], 1, tmp.size(), f);
fclose(f);
return boost::make_shared<torrent_info>(&tmp[0], tmp.size());
}
void generate_files(libtorrent::torrent_info const& ti, std::string const& path
, bool alternate_data)
{
file_pool fp;
storage_params params;
params.files = &ti.files();
params.path = path;
params.pool = &fp;
default_storage st(params);
int const num_pieces = ti.num_pieces();
std::vector<char> buffer;
for (int i = 0; i < num_pieces; ++i)
{
int const piece_size = ti.piece_size(i);
buffer.resize(ti.piece_length());
boost::uint8_t const data = alternate_data ? 255 - i : i;
for (int o = 0; o < piece_size; ++o)
{
memcpy(&buffer[o], &data, 1);
}
file::iovec_t b = { &buffer[0], size_t(piece_size) };
storage_error ec;
int ret = st.writev(&b, 1, i, 0, 0, ec);
if (ret != piece_size || ec)
{
fprintf(stderr, "ERROR writing files: (%d expected %d) %s\n"
, ret, piece_size, ec.ec.message().c_str());
}
}
}

62
test/make_torrent.hpp Normal file
View File

@ -0,0 +1,62 @@
/*
Copyright (c) 2016, Arvid Norberg
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the distribution.
* Neither the name of the author nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
#include "libtorrent/torrent_info.hpp"
#include <boost/shared_ptr.hpp>
#include <vector>
#include <string>
enum flags_t
{
private_torrent = 1
};
struct torrent_args
{
torrent_args() : m_priv(false) {}
torrent_args& name(char const* n) { m_name = n; return *this; }
torrent_args& file(char const* f) { m_files.push_back(f); return *this; }
torrent_args& url_seed(char const* u) { m_url_seed = u; return *this; }
torrent_args& http_seed(char const* u) { m_http_seed = u; return *this; }
torrent_args& priv() { m_priv = true; return *this; }
bool m_priv;
std::string m_name;
std::vector<std::string> m_files;
std::string m_url_seed;
std::string m_http_seed;
};
boost::shared_ptr<libtorrent::torrent_info> make_test_torrent(torrent_args const& args);
void generate_files(libtorrent::torrent_info const& ti, std::string const& path, bool random = false);

View File

@ -45,6 +45,7 @@ POSSIBILITY OF SUCH DAMAGE.
#include "test.hpp"
#include "setup_transfer.hpp"
#include "web_seed_suite.hpp"
#include "make_torrent.hpp"
#include <boost/tuple/tuple.hpp>
#include <boost/make_shared.hpp>
@ -70,18 +71,6 @@ bool on_alert(alert const* a)
return false;
}
const int num_pieces = 9;
/*
static sha1_hash file_hash(std::string const& name)
{
std::vector<char> buf;
error_code ec;
load_file(name, buf, ec);
if (buf.empty()) return sha1_hash(0);
hasher h(&buf[0], buf.size());
return h.final();
}
*/
static char const* proxy_name[] = {"", "_socks4", "_socks5", "_socks5_pw", "_http", "_http_pw", "_i2p"};
} // anonymous namespace
@ -143,6 +132,11 @@ void test_transfer(lt::session& ses, boost::shared_ptr<torrent_info> torrent_fil
add_torrent_params p;
p.flags &= ~add_torrent_params::flag_paused;
p.flags &= ~add_torrent_params::flag_auto_managed;
// the reason to set sequential download is to make sure that the order in
// which files are requested from the web server is consistent. Any specific
// scenario that needs testing should be an explicit test case
p.flags |= add_torrent_params::flag_sequential_download;
p.ti = torrent_file;
p.save_path = save_path;
#ifndef TORRENT_NO_DEPRECATE
@ -194,35 +188,41 @@ void test_transfer(lt::session& ses, boost::shared_ptr<torrent_info> torrent_fil
, int(s.total_payload_download), int(s.total_redundant_bytes));
TEST_EQUAL(s.total_payload_download - s.total_redundant_bytes, total_size - pad_file_size);
// we need to sleep here a bit to let the session sync with the torrent stats
// commented out because it takes such a long time
// TEST_EQUAL(ses.status().total_payload_download - ses.status().total_redundant_bytes
// , total_size - pad_file_size);
break;
}
// if the web seed connection is disconnected, we're going to fail
// the test. make sure to do so quickly
if (keepalive && peer_disconnects >= 1) break;
if (!test_ban && keepalive && peer_disconnects >= 1) break;
test_sleep(100);
}
// for test_ban tests, make sure we removed
// the url seed (i.e. banned it)
TEST_CHECK(!test_ban || (th.url_seeds().empty() && th.http_seeds().empty()));
cnt = get_counters(ses);
// if the web seed senr corrupt data and we banned it, we probably didn't
// end up using all the cache anyway
if (!test_ban)
if (test_ban)
{
// for test_ban tests, make sure we removed
// the url seed (i.e. banned it)
// torrents that don't have very many pieces will not ban the web seeds,
// since they won't have an opportunity to accrue enough negative points
if (torrent_file->files().num_pieces() > 3)
{
TEST_CHECK(th.url_seeds().empty());
TEST_CHECK(th.http_seeds().empty());
}
}
else
{
// if the web seed senr corrupt data and we banned it, we probably didn't
// end up using all the cache anyway
torrent_status st = th.status();
TEST_EQUAL(st.is_seeding, true);
if (st.is_seeding)
{
// we need to sleep here a bit to let the session sync with the torrent stats
// commented out because it takes such a long time
for (int i = 0; i < 50; ++i)
{
cnt = get_counters(ses);
@ -235,8 +235,8 @@ void test_transfer(lt::session& ses, boost::shared_ptr<torrent_info> torrent_fil
, int(cnt["disk.disk_blocks_in_use"]));
test_sleep(100);
}
TEST_EQUAL(cnt["disk.disk_blocks_in_use"]
, (torrent_file->total_size() + 0x3fff) / 0x4000);
TEST_CHECK(std::abs(cnt["disk.disk_blocks_in_use"]
- (torrent_file->total_size() + 0x3fff) / 0x4000) <= 2);
}
}
@ -263,14 +263,17 @@ void test_transfer(lt::session& ses, boost::shared_ptr<torrent_info> torrent_fil
if (!test_ban)
{
std::string first_file_path = combine_path(save_path, torrent_file->files().file_path(0));
fprintf(stderr, "checking file: %s\n", first_file_path.c_str());
TEST_CHECK(exists(first_file_path));
file_storage const& fs = torrent_file->files();
for (int i = 0; i < fs.num_files(); ++i)
{
bool const expect = !fs.pad_file_at(i);
std::string file_path = combine_path(save_path, fs.file_path(i));
fprintf(stderr, "checking file: %s\n", file_path.c_str());
TEST_EQUAL(exists(file_path), expect);
}
}
ses.remove_torrent(th);
remove_all(save_path, ec);
}
// proxy: 0=none, 1=socks4, 2=socks5, 3=socks5_pw 4=http 5=http_pw
@ -286,139 +289,143 @@ int EXPORT run_http_suite(int proxy, char const* protocol, bool test_url_seed
save_path += proxy_name[proxy];
error_code ec;
create_directories(combine_path(save_path, "torrent_dir"), ec);
int const port = start_web_server(strcmp(protocol, "https") == 0, chunked_encoding, keepalive);
file_storage fs;
std::srand(10);
int piece_size = 0x4000;
static const int file_sizes[] =
{ 5, 16 - 5, 16000, 17, 10, 8000, 8000, 1,1,1,1,1,100,1,1,1,1,100,1,1,1,1,1,1
,1,1,1,1,1,1,13,65000,34,75,2,30,400,500,23000,900,43000,400,4300,6, 4};
std::vector<torrent_args> test_cases;
if (test_url_seed)
{
create_random_files(combine_path(save_path, "torrent_dir"), file_sizes, sizeof(file_sizes)/sizeof(file_sizes[0]));
add_files(fs, combine_path(save_path, "torrent_dir"));
char url[512];
snprintf(url, sizeof(url), ("%s://127.0.0.1:%d/" + save_path).c_str(), protocol, port);
fprintf(stderr, "testing: %s\n", url);
create_directories(combine_path(save_path, "torrent_dir"), ec);
torrent_args args;
// test case 1
test_cases.push_back(torrent_args().file("0").file("5,padfile").file("11")
.file("16000").file("368,padfile")
.file("16384,padfile").file("16384,padfile").file("17").file("10")
.file("8000").file("8000").file("1").file("1").file("1").file("1")
.file("1").file("100").file("0").file("1").file("1").file("1")
.file("100").file("1").file("1").file("1").file("1").file("1,padfile")
.file("1,padfile").file("1,padfile").file("1").file("0").file("0")
.file("0").file("1").file("13").file("65000").file("34").file("75")
.file("2").file("30").file("400").file("500").file("23000")
.file("900").file("43000").file("400").file("4300").file("6")
.file("4,padfile")
.name("torrent_dir")
.url_seed(url));
// test case 2 (the end of the torrent are padfiles)
test_cases.push_back(torrent_args()
.file("0,padfile")
.file("11")
.file("5")
.file("16000")
.file("368,padfile")
.file("16384,padfile")
.name("torrent_dir")
.url_seed(url));
// test case 3 (misaligned)
test_cases.push_back(torrent_args()
.file("16383")
.file("11")
.file("5")
.file("16000")
.name("torrent_dir")
.url_seed(url));
// test case 4 (a full piece padfile)
test_cases.push_back(torrent_args()
.file("32768,padfile")
.file("16000")
.file("11")
.file("5")
.name("torrent_dir")
.url_seed(url));
// test case 5 (properly aligned padfile)
test_cases.push_back(torrent_args()
.file("32760")
.file("8,padfile")
.file("32760")
.file("8")
.file("32700")
.file("68,padfile")
.file("32000")
.name("torrent_dir")
.url_seed(url));
snprintf(url, sizeof(url), ("%s://127.0.0.1:%d/" + save_path + "/test-single-file").c_str(), protocol, port);
// test case 6 (single file torrent)
test_cases.push_back(torrent_args()
.file("199092,name=test-single-file")
.name("torrent_dir")
.url_seed(url));
}
else
{
piece_size = 64 * 1024;
char* random_data = (char*)malloc(64 * 1024 * num_pieces);
std::generate(random_data, random_data + 64 * 1024 * num_pieces, random_byte);
std::string seed_filename = combine_path(save_path, "seed");
fprintf(stderr, "creating file: %s %s\n"
, current_working_directory().c_str(), seed_filename.c_str());
save_file(seed_filename.c_str(), random_data, 64 * 1024 * num_pieces);
fs.add_file("seed", 64 * 1024 * num_pieces);
free(random_data);
char url[512];
snprintf(url, sizeof(url), "%s://127.0.0.1:%d/%s/seed", protocol, port, save_path.c_str());
fprintf(stderr, "testing: %s\n", url);
// there's really just one test case for http seeds
test_cases.push_back(torrent_args().file("589824,name=seed")
.http_seed(url));
}
int port = start_web_server(strcmp(protocol, "https") == 0, chunked_encoding, keepalive);
for (int a = 0; a < int(test_cases.size()); ++a)
{
fprintf(stderr, "\n\n ==== test case %d ====\n\n\n", a);
// generate a torrent with pad files to make sure they
// are not requested web seeds
libtorrent::create_torrent t(fs, piece_size, 0x4000, libtorrent::create_torrent::optimize);
boost::shared_ptr<torrent_info> torrent_file = make_test_torrent(test_cases[a]);
char tmp[512];
if (test_url_seed)
{
snprintf(tmp, sizeof(tmp), ("%s://127.0.0.1:%d/" + save_path).c_str(), protocol, port);
t.add_url_seed(tmp);
}
else
{
snprintf(tmp, sizeof(tmp), "%s://127.0.0.1:%d/%s/seed", protocol, port, save_path.c_str());
t.add_http_seed(tmp);
}
fprintf(stderr, "testing: %s\n", tmp);
/*
for (int i = 0; i < fs.num_files(); ++i)
{
file_entry f = fs.at(i);
fprintf(stderr, " %04x: %d %s\n", int(f.offset), f.pad_file, f.path.c_str());
}
*/
// calculate the hash for all pieces
set_piece_hashes(t, save_path, ec);
// if test_ban is true, we create the files with alternate content (that
// doesn't match the hashes in the .torrent file)
generate_files(*torrent_file, save_path, test_ban);
if (ec)
{
fprintf(stderr, "error creating hashes for test torrent: %s\n"
, ec.message().c_str());
TEST_CHECK(false);
return 0;
}
if (test_ban)
{
// corrupt the files now, so that the web seed will be banned
if (test_url_seed)
if (ec)
{
create_random_files(combine_path(save_path, "torrent_dir"), file_sizes, sizeof(file_sizes)/sizeof(file_sizes[0]));
fprintf(stderr, "error creating hashes for test torrent: %s\n"
, ec.message().c_str());
TEST_CHECK(false);
return 0;
}
else
{
piece_size = 64 * 1024;
char* random_data = (char*)malloc(64 * 1024 * num_pieces);
std::generate(random_data, random_data + 64 * 1024 * num_pieces, random_byte);
save_file(combine_path(save_path, "seed").c_str(), random_data, 64 * 1024 * num_pieces);
free(random_data);
}
}
const int mask = alert::all_categories
& ~(alert::progress_notification
| alert::performance_warning
| alert::stats_notification);
std::vector<char> buf;
bencode(std::back_inserter(buf), t.generate());
boost::shared_ptr<torrent_info> torrent_file(boost::make_shared<torrent_info>(&buf[0], buf.size(), boost::ref(ec), 0));
settings_pack pack;
pack.set_int(settings_pack::max_queued_disk_bytes, 256 * 1024);
pack.set_str(settings_pack::listen_interfaces, "0.0.0.0:51000");
pack.set_int(settings_pack::max_retry_port_bind, 1000);
pack.set_int(settings_pack::alert_mask, mask);
pack.set_bool(settings_pack::enable_lsd, false);
pack.set_bool(settings_pack::enable_natpmp, false);
pack.set_bool(settings_pack::enable_upnp, false);
pack.set_bool(settings_pack::enable_dht, false);
libtorrent::session ses(pack, 0);
// TODO: file hashes don't work with the new torrent creator reading async
/*
// no point in testing the hashes since we know the data is corrupt
if (!test_ban)
{
// verify that the file hashes are correct
for (int i = 0; i < torrent_file->num_files(); ++i)
{
sha1_hash h1 = torrent_file->file_at(i).filehash;
sha1_hash h2 = file_hash(combine_path(save_path
, torrent_file->file_at(i).path));
// fprintf(stderr, "%s: %s == %s\n"
// , torrent_file->file_at(i).path.c_str()
// , to_hex(h1.to_string()).c_str(), to_hex(h2.to_string()).c_str());
TEST_EQUAL(h1, h2);
}
}
*/
{
const int mask = alert::all_categories
& ~(alert::progress_notification
| alert::performance_warning
| alert::stats_notification);
settings_pack pack;
pack.set_int(settings_pack::max_queued_disk_bytes, 256 * 1024);
pack.set_str(settings_pack::listen_interfaces, "0.0.0.0:51000");
pack.set_int(settings_pack::max_retry_port_bind, 1000);
pack.set_int(settings_pack::alert_mask, mask);
pack.set_bool(settings_pack::enable_lsd, false);
pack.set_bool(settings_pack::enable_natpmp, false);
pack.set_bool(settings_pack::enable_upnp, false);
pack.set_bool(settings_pack::enable_dht, false);
libtorrent::session ses(pack, 0);
test_transfer(ses, torrent_file, proxy, port, protocol, test_url_seed
, chunked_encoding, test_ban, keepalive, proxy_peers);
if (test_url_seed && test_rename)
{
torrent_file->rename_file(0, combine_path(save_path, combine_path("torrent_dir", "renamed_test1")));
test_transfer(ses, torrent_file, 0, port, protocol, test_url_seed
test_transfer(ses, torrent_file, proxy, port, protocol, test_url_seed
, chunked_encoding, test_ban, keepalive, proxy_peers);
if (test_url_seed && test_rename)
{
torrent_file->rename_file(0, combine_path(save_path, combine_path("torrent_dir", "renamed_test1")));
test_transfer(ses, torrent_file, 0, port, protocol, test_url_seed
, chunked_encoding, test_ban, keepalive, proxy_peers);
}
}
}
stop_web_server();
remove_all(save_path, ec);
return 0;
}

View File

@ -101,7 +101,7 @@ class http_handler(SimpleHTTPServer.SimpleHTTPRequestHandler):
filename = os.path.normpath(s.path[1:s.path.find('seed?') + 4])
print 'filename = %s' % filename
f = open(filename, 'rb')
f.seek(piece * 64 * 1024 + int(ranges[0]))
f.seek(piece * 32 * 1024 + int(ranges[0]))
data = f.read(int(ranges[1]) - int(ranges[0]) + 1)
f.close()