rewrite most of web_peer_connection::on_receive to make it a lot simpler and fix edge cases with pad-files

This commit is contained in:
arvidn 2016-01-21 23:32:20 -05:00 committed by arvidn
parent 4064f91f6d
commit efe32c6a0e
10 changed files with 948 additions and 715 deletions

View File

@ -802,6 +802,8 @@ namespace libtorrent
virtual int timeout() const; virtual int timeout() const;
io_service& get_io_service() { return m_ios; }
private: private:
// explicitly disallow assignment, to silence msvc warning // explicitly disallow assignment, to silence msvc warning
peer_connection& operator=(peer_connection const&); peer_connection& operator=(peer_connection const&);

View File

@ -96,7 +96,12 @@ namespace libtorrent
private: private:
bool maybe_harvest_block(); void on_receive_padfile();
void incoming_payload(char const* buf, int len);
void incoming_zeroes(int len);
void handle_redirect(int bytes_left);
void handle_error(int bytes_left);
void maybe_harvest_piece();
// returns the block currently being // returns the block currently being
// downloaded. And the progress of that // downloaded. And the progress of that
@ -105,30 +110,33 @@ namespace libtorrent
// will be invalid. // will be invalid.
boost::optional<piece_block_progress> downloading_piece_progress() const TORRENT_OVERRIDE; boost::optional<piece_block_progress> downloading_piece_progress() const TORRENT_OVERRIDE;
void handle_padfile(buffer::const_interval& recv_buffer); void handle_padfile();
// this has one entry per http-request // this has one entry per http-request
// (might be more than the bt requests) // (might be more than the bt requests)
std::deque<int> m_file_requests; struct file_request_t
{
int file_index;
int length;
boost::int64_t start;
};
std::deque<file_request_t> m_file_requests;
std::string m_url; std::string m_url;
web_seed_t* m_web; web_seed_t* m_web;
// this is used for intermediate storage of pieces // this is used for intermediate storage of pieces to be delivered to the
// that are received in more than one HTTP response // bittorrent engine
// TODO: 1 if we make this be a disk_buffer_holder instead // TODO: 3 if we make this be a disk_buffer_holder instead
// we would save a copy sometimes // we would save a copy
// use allocate_disk_receive_buffer and release_disk_receive_buffer // use allocate_disk_receive_buffer and release_disk_receive_buffer
std::vector<char> m_piece; std::vector<char> m_piece;
// the number of bytes received in the current HTTP // the number of bytes we've forwarded to the incoming_payload() function
// response. used to know where in the buffer the // in the current HTTP response. used to know where in the buffer the
// next response starts // next response starts
boost::int64_t m_received_body; int m_received_body;
// position in the current range response
boost::int64_t m_range_pos;
// this is the offset inside the current receive // this is the offset inside the current receive
// buffer where the next chunk header will be. // buffer where the next chunk header will be.
@ -136,10 +144,7 @@ namespace libtorrent
// parsed. It does not necessarily point to a valid // parsed. It does not necessarily point to a valid
// offset in the receive buffer, if we haven't received // offset in the receive buffer, if we haven't received
// it yet. This offset never includes the HTTP header // it yet. This offset never includes the HTTP header
boost::int64_t m_chunk_pos; int m_chunk_pos;
// the position in the current block
int m_block_pos;
// this is the number of bytes we've already received // this is the number of bytes we've already received
// from the next chunk header we're waiting for // from the next chunk header we're waiting for

File diff suppressed because it is too large Load Diff

View File

@ -60,6 +60,7 @@ lib libtorrent_test
swarm_suite.cpp swarm_suite.cpp
test_utils.cpp test_utils.cpp
settings.cpp settings.cpp
make_torrent.cpp
: # requirements : # requirements
# this is used to determine whether # this is used to determine whether

203
test/make_torrent.cpp Normal file
View File

@ -0,0 +1,203 @@
/*
Copyright (c) 2016, Arvid Norberg
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the distribution.
* Neither the name of the author nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
#include <boost/make_shared.hpp>
#include <deque>
#include "make_torrent.hpp"
#include "libtorrent/storage.hpp"
#include "libtorrent/hasher.hpp"
#include "libtorrent/entry.hpp"
#include "libtorrent/bencode.hpp"
#include "libtorrent/file_pool.hpp"
#include "libtorrent/storage_defs.hpp"
using namespace libtorrent;
boost::shared_ptr<libtorrent::torrent_info> make_test_torrent(
torrent_args const& args)
{
entry e;
entry::dictionary_type& info = e["info"].dict();
int total_size = 0;
if (args.m_priv)
{
info["priv"] = 1;
}
// torrent offset ranges where the pad files are
// used when generating hashes
std::deque<std::pair<int,int> > pad_files;
int const piece_length = 32768;
info["piece length"] = piece_length;
if (args.m_files.size() == 1)
{
std::string const& ent = args.m_files[0];
std::string name = "test_file-1";
if (ent.find("name=") != std::string::npos)
{
int pos = ent.find("name=") + 5;
name = ent.substr(pos, ent.find(',', pos));
}
info["name"] = name;
int file_size = atoi(args.m_files[0].c_str());
info["length"] = file_size;
total_size = file_size;
}
else
{
info["name"] = args.m_name;
entry::list_type& files = info["files"].list();
for (int i = 0; i < int(args.m_files.size()); ++i)
{
int file_size = atoi(args.m_files[i].c_str());
files.push_back(entry());
entry::dictionary_type& file_entry = files.back().dict();
std::string const& ent = args.m_files[i];
if (ent.find("padfile") != std::string::npos)
{
file_entry["attr"].string() += "p";
pad_files.push_back(std::make_pair(total_size, total_size + file_size));
}
if (ent.find("executable") != std::string::npos)
file_entry["attr"].string() += "x";
char filename[100];
snprintf(filename, sizeof(filename), "test_file-%d", i);
std::string name = filename;
if (ent.find("name=") != std::string::npos)
{
int pos = ent.find("name=") + 5;
name = ent.substr(pos, ent.find(',', pos));
}
file_entry["path"].list().push_back(name);
file_entry["length"] = file_size;
total_size += file_size;
}
}
if (!args.m_url_seed.empty())
{
e["url-list"] = args.m_url_seed;
}
if (!args.m_http_seed.empty())
{
e["httpseeds"] = args.m_http_seed;
}
std::string piece_hashes;
int num_pieces = (total_size + piece_length - 1) / piece_length;
int torrent_offset = 0;
for (int i = 0; i < num_pieces; ++i)
{
hasher h;
int const piece_size = (i < num_pieces - 1) ? piece_length : total_size - (num_pieces - 1) * piece_length;
char const data = i;
char const zero = 0;
for (int o = 0; o < piece_size; ++o, ++torrent_offset)
{
while (!pad_files.empty() && torrent_offset >= pad_files.front().second)
pad_files.pop_front();
if (!pad_files.empty() && torrent_offset >= pad_files.front().first)
{
h.update(&zero, 1);
}
else
{
h.update(&data, 1);
}
}
piece_hashes += h.final().to_string();
}
info["pieces"] = piece_hashes;
std::vector<char> tmp;
std::back_insert_iterator<std::vector<char> > out(tmp);
bencode(out, e);
FILE* f = fopen("test.torrent", "w+");
fwrite(&tmp[0], 1, tmp.size(), f);
fclose(f);
return boost::make_shared<torrent_info>(&tmp[0], tmp.size());
}
void generate_files(libtorrent::torrent_info const& ti, std::string const& path
, bool alternate_data)
{
file_pool fp;
storage_params params;
params.files = &ti.files();
params.path = path;
params.pool = &fp;
default_storage st(params);
int const num_pieces = ti.num_pieces();
std::vector<char> buffer;
for (int i = 0; i < num_pieces; ++i)
{
int const piece_size = ti.piece_size(i);
buffer.resize(ti.piece_length());
boost::uint8_t const data = alternate_data ? 255 - i : i;
for (int o = 0; o < piece_size; ++o)
{
memcpy(&buffer[o], &data, 1);
}
file::iovec_t b = { &buffer[0], size_t(piece_size) };
storage_error ec;
int ret = st.writev(&b, 1, i, 0, 0, ec);
if (ret != piece_size || ec)
{
fprintf(stderr, "ERROR writing files: (%d expected %d) %s\n"
, ret, piece_size, ec.ec.message().c_str());
}
}
}

62
test/make_torrent.hpp Normal file
View File

@ -0,0 +1,62 @@
/*
Copyright (c) 2016, Arvid Norberg
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the distribution.
* Neither the name of the author nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
#include "libtorrent/torrent_info.hpp"
#include <boost/shared_ptr.hpp>
#include <vector>
#include <string>
enum flags_t
{
private_torrent = 1
};
struct torrent_args
{
torrent_args() : m_priv(false) {}
torrent_args& name(char const* n) { m_name = n; return *this; }
torrent_args& file(char const* f) { m_files.push_back(f); return *this; }
torrent_args& url_seed(char const* u) { m_url_seed = u; return *this; }
torrent_args& http_seed(char const* u) { m_http_seed = u; return *this; }
torrent_args& priv() { m_priv = true; return *this; }
bool m_priv;
std::string m_name;
std::vector<std::string> m_files;
std::string m_url_seed;
std::string m_http_seed;
};
boost::shared_ptr<libtorrent::torrent_info> make_test_torrent(torrent_args const& args);
void generate_files(libtorrent::torrent_info const& ti, std::string const& path, bool random = false);

View File

@ -45,6 +45,7 @@ POSSIBILITY OF SUCH DAMAGE.
#include "test.hpp" #include "test.hpp"
#include "setup_transfer.hpp" #include "setup_transfer.hpp"
#include "web_seed_suite.hpp" #include "web_seed_suite.hpp"
#include "make_torrent.hpp"
#include <boost/tuple/tuple.hpp> #include <boost/tuple/tuple.hpp>
#include <boost/make_shared.hpp> #include <boost/make_shared.hpp>
@ -70,18 +71,6 @@ bool on_alert(alert const* a)
return false; return false;
} }
const int num_pieces = 9;
/*
static sha1_hash file_hash(std::string const& name)
{
std::vector<char> buf;
error_code ec;
load_file(name, buf, ec);
if (buf.empty()) return sha1_hash(0);
hasher h(&buf[0], buf.size());
return h.final();
}
*/
static char const* proxy_name[] = {"", "_socks4", "_socks5", "_socks5_pw", "_http", "_http_pw", "_i2p"}; static char const* proxy_name[] = {"", "_socks4", "_socks5", "_socks5_pw", "_http", "_http_pw", "_i2p"};
} // anonymous namespace } // anonymous namespace
@ -143,6 +132,11 @@ void test_transfer(lt::session& ses, boost::shared_ptr<torrent_info> torrent_fil
add_torrent_params p; add_torrent_params p;
p.flags &= ~add_torrent_params::flag_paused; p.flags &= ~add_torrent_params::flag_paused;
p.flags &= ~add_torrent_params::flag_auto_managed; p.flags &= ~add_torrent_params::flag_auto_managed;
// the reason to set sequential download is to make sure that the order in
// which files are requested from the web server is consistent. Any specific
// scenario that needs testing should be an explicit test case
p.flags |= add_torrent_params::flag_sequential_download;
p.ti = torrent_file; p.ti = torrent_file;
p.save_path = save_path; p.save_path = save_path;
#ifndef TORRENT_NO_DEPRECATE #ifndef TORRENT_NO_DEPRECATE
@ -194,35 +188,41 @@ void test_transfer(lt::session& ses, boost::shared_ptr<torrent_info> torrent_fil
, int(s.total_payload_download), int(s.total_redundant_bytes)); , int(s.total_payload_download), int(s.total_redundant_bytes));
TEST_EQUAL(s.total_payload_download - s.total_redundant_bytes, total_size - pad_file_size); TEST_EQUAL(s.total_payload_download - s.total_redundant_bytes, total_size - pad_file_size);
// we need to sleep here a bit to let the session sync with the torrent stats
// commented out because it takes such a long time
// TEST_EQUAL(ses.status().total_payload_download - ses.status().total_redundant_bytes
// , total_size - pad_file_size);
break; break;
} }
// if the web seed connection is disconnected, we're going to fail // if the web seed connection is disconnected, we're going to fail
// the test. make sure to do so quickly // the test. make sure to do so quickly
if (keepalive && peer_disconnects >= 1) break; if (!test_ban && keepalive && peer_disconnects >= 1) break;
test_sleep(100); test_sleep(100);
} }
// for test_ban tests, make sure we removed
// the url seed (i.e. banned it)
TEST_CHECK(!test_ban || (th.url_seeds().empty() && th.http_seeds().empty()));
cnt = get_counters(ses); cnt = get_counters(ses);
if (test_ban)
{
// for test_ban tests, make sure we removed
// the url seed (i.e. banned it)
// torrents that don't have very many pieces will not ban the web seeds,
// since they won't have an opportunity to accrue enough negative points
if (torrent_file->files().num_pieces() > 3)
{
TEST_CHECK(th.url_seeds().empty());
TEST_CHECK(th.http_seeds().empty());
}
}
else
{
// if the web seed senr corrupt data and we banned it, we probably didn't // if the web seed senr corrupt data and we banned it, we probably didn't
// end up using all the cache anyway // end up using all the cache anyway
if (!test_ban)
{
torrent_status st = th.status(); torrent_status st = th.status();
TEST_EQUAL(st.is_seeding, true); TEST_EQUAL(st.is_seeding, true);
if (st.is_seeding) if (st.is_seeding)
{ {
// we need to sleep here a bit to let the session sync with the torrent stats
// commented out because it takes such a long time
for (int i = 0; i < 50; ++i) for (int i = 0; i < 50; ++i)
{ {
cnt = get_counters(ses); cnt = get_counters(ses);
@ -235,8 +235,8 @@ void test_transfer(lt::session& ses, boost::shared_ptr<torrent_info> torrent_fil
, int(cnt["disk.disk_blocks_in_use"])); , int(cnt["disk.disk_blocks_in_use"]));
test_sleep(100); test_sleep(100);
} }
TEST_EQUAL(cnt["disk.disk_blocks_in_use"] TEST_CHECK(std::abs(cnt["disk.disk_blocks_in_use"]
, (torrent_file->total_size() + 0x3fff) / 0x4000); - (torrent_file->total_size() + 0x3fff) / 0x4000) <= 2);
} }
} }
@ -263,14 +263,17 @@ void test_transfer(lt::session& ses, boost::shared_ptr<torrent_info> torrent_fil
if (!test_ban) if (!test_ban)
{ {
std::string first_file_path = combine_path(save_path, torrent_file->files().file_path(0)); file_storage const& fs = torrent_file->files();
fprintf(stderr, "checking file: %s\n", first_file_path.c_str()); for (int i = 0; i < fs.num_files(); ++i)
TEST_CHECK(exists(first_file_path)); {
bool const expect = !fs.pad_file_at(i);
std::string file_path = combine_path(save_path, fs.file_path(i));
fprintf(stderr, "checking file: %s\n", file_path.c_str());
TEST_EQUAL(exists(file_path), expect);
}
} }
ses.remove_torrent(th); ses.remove_torrent(th);
remove_all(save_path, ec);
} }
// proxy: 0=none, 1=socks4, 2=socks5, 3=socks5_pw 4=http 5=http_pw // proxy: 0=none, 1=socks4, 2=socks5, 3=socks5_pw 4=http 5=http_pw
@ -286,60 +289,104 @@ int EXPORT run_http_suite(int proxy, char const* protocol, bool test_url_seed
save_path += proxy_name[proxy]; save_path += proxy_name[proxy];
error_code ec; error_code ec;
int const port = start_web_server(strcmp(protocol, "https") == 0, chunked_encoding, keepalive);
std::vector<torrent_args> test_cases;
if (test_url_seed)
{
char url[512];
snprintf(url, sizeof(url), ("%s://127.0.0.1:%d/" + save_path).c_str(), protocol, port);
fprintf(stderr, "testing: %s\n", url);
create_directories(combine_path(save_path, "torrent_dir"), ec); create_directories(combine_path(save_path, "torrent_dir"), ec);
file_storage fs; torrent_args args;
std::srand(10);
int piece_size = 0x4000;
static const int file_sizes[] =
{ 5, 16 - 5, 16000, 17, 10, 8000, 8000, 1,1,1,1,1,100,1,1,1,1,100,1,1,1,1,1,1
,1,1,1,1,1,1,13,65000,34,75,2,30,400,500,23000,900,43000,400,4300,6, 4};
if (test_url_seed) // test case 1
{ test_cases.push_back(torrent_args().file("0").file("5,padfile").file("11")
create_random_files(combine_path(save_path, "torrent_dir"), file_sizes, sizeof(file_sizes)/sizeof(file_sizes[0])); .file("16000").file("368,padfile")
add_files(fs, combine_path(save_path, "torrent_dir")); .file("16384,padfile").file("16384,padfile").file("17").file("10")
.file("8000").file("8000").file("1").file("1").file("1").file("1")
.file("1").file("100").file("0").file("1").file("1").file("1")
.file("100").file("1").file("1").file("1").file("1").file("1,padfile")
.file("1,padfile").file("1,padfile").file("1").file("0").file("0")
.file("0").file("1").file("13").file("65000").file("34").file("75")
.file("2").file("30").file("400").file("500").file("23000")
.file("900").file("43000").file("400").file("4300").file("6")
.file("4,padfile")
.name("torrent_dir")
.url_seed(url));
// test case 2 (the end of the torrent are padfiles)
test_cases.push_back(torrent_args()
.file("0,padfile")
.file("11")
.file("5")
.file("16000")
.file("368,padfile")
.file("16384,padfile")
.name("torrent_dir")
.url_seed(url));
// test case 3 (misaligned)
test_cases.push_back(torrent_args()
.file("16383")
.file("11")
.file("5")
.file("16000")
.name("torrent_dir")
.url_seed(url));
// test case 4 (a full piece padfile)
test_cases.push_back(torrent_args()
.file("32768,padfile")
.file("16000")
.file("11")
.file("5")
.name("torrent_dir")
.url_seed(url));
// test case 5 (properly aligned padfile)
test_cases.push_back(torrent_args()
.file("32760")
.file("8,padfile")
.file("32760")
.file("8")
.file("32700")
.file("68,padfile")
.file("32000")
.name("torrent_dir")
.url_seed(url));
snprintf(url, sizeof(url), ("%s://127.0.0.1:%d/" + save_path + "/test-single-file").c_str(), protocol, port);
// test case 6 (single file torrent)
test_cases.push_back(torrent_args()
.file("199092,name=test-single-file")
.name("torrent_dir")
.url_seed(url));
} }
else else
{ {
piece_size = 64 * 1024; char url[512];
char* random_data = (char*)malloc(64 * 1024 * num_pieces); snprintf(url, sizeof(url), "%s://127.0.0.1:%d/%s/seed", protocol, port, save_path.c_str());
std::generate(random_data, random_data + 64 * 1024 * num_pieces, random_byte); fprintf(stderr, "testing: %s\n", url);
std::string seed_filename = combine_path(save_path, "seed");
fprintf(stderr, "creating file: %s %s\n" // there's really just one test case for http seeds
, current_working_directory().c_str(), seed_filename.c_str()); test_cases.push_back(torrent_args().file("589824,name=seed")
save_file(seed_filename.c_str(), random_data, 64 * 1024 * num_pieces); .http_seed(url));
fs.add_file("seed", 64 * 1024 * num_pieces);
free(random_data);
} }
int port = start_web_server(strcmp(protocol, "https") == 0, chunked_encoding, keepalive); for (int a = 0; a < int(test_cases.size()); ++a)
{
fprintf(stderr, "\n\n ==== test case %d ====\n\n\n", a);
// generate a torrent with pad files to make sure they boost::shared_ptr<torrent_info> torrent_file = make_test_torrent(test_cases[a]);
// are not requested web seeds
libtorrent::create_torrent t(fs, piece_size, 0x4000, libtorrent::create_torrent::optimize);
char tmp[512]; // if test_ban is true, we create the files with alternate content (that
if (test_url_seed) // doesn't match the hashes in the .torrent file)
{ generate_files(*torrent_file, save_path, test_ban);
snprintf(tmp, sizeof(tmp), ("%s://127.0.0.1:%d/" + save_path).c_str(), protocol, port);
t.add_url_seed(tmp);
}
else
{
snprintf(tmp, sizeof(tmp), "%s://127.0.0.1:%d/%s/seed", protocol, port, save_path.c_str());
t.add_http_seed(tmp);
}
fprintf(stderr, "testing: %s\n", tmp);
/*
for (int i = 0; i < fs.num_files(); ++i)
{
file_entry f = fs.at(i);
fprintf(stderr, " %04x: %d %s\n", int(f.offset), f.pad_file, f.path.c_str());
}
*/
// calculate the hash for all pieces
set_piece_hashes(t, save_path, ec);
if (ec) if (ec)
{ {
@ -349,46 +396,6 @@ int EXPORT run_http_suite(int proxy, char const* protocol, bool test_url_seed
return 0; return 0;
} }
if (test_ban)
{
// corrupt the files now, so that the web seed will be banned
if (test_url_seed)
{
create_random_files(combine_path(save_path, "torrent_dir"), file_sizes, sizeof(file_sizes)/sizeof(file_sizes[0]));
}
else
{
piece_size = 64 * 1024;
char* random_data = (char*)malloc(64 * 1024 * num_pieces);
std::generate(random_data, random_data + 64 * 1024 * num_pieces, random_byte);
save_file(combine_path(save_path, "seed").c_str(), random_data, 64 * 1024 * num_pieces);
free(random_data);
}
}
std::vector<char> buf;
bencode(std::back_inserter(buf), t.generate());
boost::shared_ptr<torrent_info> torrent_file(boost::make_shared<torrent_info>(&buf[0], buf.size(), boost::ref(ec), 0));
// TODO: file hashes don't work with the new torrent creator reading async
/*
// no point in testing the hashes since we know the data is corrupt
if (!test_ban)
{
// verify that the file hashes are correct
for (int i = 0; i < torrent_file->num_files(); ++i)
{
sha1_hash h1 = torrent_file->file_at(i).filehash;
sha1_hash h2 = file_hash(combine_path(save_path
, torrent_file->file_at(i).path));
// fprintf(stderr, "%s: %s == %s\n"
// , torrent_file->file_at(i).path.c_str()
// , to_hex(h1.to_string()).c_str(), to_hex(h2.to_string()).c_str());
TEST_EQUAL(h1, h2);
}
}
*/
{ {
const int mask = alert::all_categories const int mask = alert::all_categories
& ~(alert::progress_notification & ~(alert::progress_notification
@ -416,9 +423,9 @@ int EXPORT run_http_suite(int proxy, char const* protocol, bool test_url_seed
, chunked_encoding, test_ban, keepalive, proxy_peers); , chunked_encoding, test_ban, keepalive, proxy_peers);
} }
} }
}
stop_web_server(); stop_web_server();
remove_all(save_path, ec);
return 0; return 0;
} }

View File

@ -101,7 +101,7 @@ class http_handler(SimpleHTTPServer.SimpleHTTPRequestHandler):
filename = os.path.normpath(s.path[1:s.path.find('seed?') + 4]) filename = os.path.normpath(s.path[1:s.path.find('seed?') + 4])
print 'filename = %s' % filename print 'filename = %s' % filename
f = open(filename, 'rb') f = open(filename, 'rb')
f.seek(piece * 64 * 1024 + int(ranges[0])) f.seek(piece * 32 * 1024 + int(ranges[0]))
data = f.read(int(ranges[1]) - int(ranges[0]) + 1) data = f.read(int(ranges[1]) - int(ranges[0]) + 1)
f.close() f.close()