add web seed support for torrents with pad files

This commit is contained in:
Arvid Norberg 2011-11-26 20:48:31 +00:00
parent 12d94246df
commit c279870516
5 changed files with 96 additions and 26 deletions

View File

@ -1,3 +1,4 @@
* add web seed support for torrents with pad files
* introduced a more scalable API for torrent status updates (post_torrent_updates()) * introduced a more scalable API for torrent status updates (post_torrent_updates())
* updated the API to add_torrent_params turning all bools into flags of a flags field * updated the API to add_torrent_params turning all bools into flags of a flags field
* added async_add_torrent() function to significantly improve performance when * added async_add_torrent() function to significantly improve performance when

View File

@ -212,6 +212,12 @@ namespace libtorrent
file_entry at(int index) const; file_entry at(int index) const;
file_entry at(iterator i) const; file_entry at(iterator i) const;
internal_file_entry const& internal_at(int index) const
{
TORRENT_ASSERT(index >= 0);
TORRENT_ASSERT(index < int(m_files.size()));
return m_files[index];
}
size_type total_size() const { return m_total_size; } size_type total_size() const { return m_total_size; }
void set_num_pieces(int n) { m_num_pieces = n; } void set_num_pieces(int n) { m_num_pieces = n; }

View File

@ -105,6 +105,8 @@ namespace libtorrent
private: private:
bool maybe_harvest_block();
// returns the block currently being // returns the block currently being
// downloaded. And the progress of that // downloaded. And the progress of that
// block. If the peer isn't downloading // block. If the peer isn't downloading

View File

@ -204,12 +204,17 @@ namespace libtorrent
i != files.end(); ++i) i != files.end(); ++i)
{ {
file_slice const& f = *i; file_slice const& f = *i;
if (info.orig_files().internal_at(f.file_index).pad_file)
{
m_file_requests.push_back(f.file_index);
continue;
}
request += "GET "; request += "GET ";
if (using_proxy) if (using_proxy)
{ {
request += m_url; request += m_url;
std::string path = info.orig_files().file_path(info.orig_files().at(f.file_index)); std::string path = info.orig_files().file_path(info.orig_files().internal_at(f.file_index));
#ifdef TORRENT_WINDOWS #ifdef TORRENT_WINDOWS
convert_path_to_posix(path); convert_path_to_posix(path);
#endif #endif
@ -218,7 +223,7 @@ namespace libtorrent
else else
{ {
std::string path = m_path; std::string path = m_path;
path += info.orig_files().file_path(info.orig_files().at(f.file_index)); path += info.orig_files().file_path(info.orig_files().internal_at(f.file_index));
#ifdef TORRENT_WINDOWS #ifdef TORRENT_WINDOWS
convert_path_to_posix(path); convert_path_to_posix(path);
#endif #endif
@ -259,6 +264,36 @@ namespace libtorrent
} }
} }
bool web_peer_connection::maybe_harvest_block()
{
peer_request const& front_request = m_requests.front();
if (int(m_piece.size()) < front_request.length) return false;
TORRENT_ASSERT(int(m_piece.size() == front_request.length));
// each call to incoming_piece() may result in us becoming
// a seed. If we become a seed, all seeds we're connected to
// will be disconnected, including this web seed. We need to
// check for the disconnect condition after the call.
boost::shared_ptr<torrent> t = associated_torrent().lock();
TORRENT_ASSERT(t);
buffer::const_interval recv_buffer = receive_buffer();
incoming_piece(front_request, &m_piece[0]);
m_requests.pop_front();
if (associated_torrent().expired()) return false;
TORRENT_ASSERT(m_block_pos >= front_request.length);
m_block_pos -= front_request.length;
cut_receive_buffer(m_body_start, t->block_size() + 1024);
m_body_start = 0;
recv_buffer = receive_buffer();
// TORRENT_ASSERT(m_received_body <= range_end - range_start);
m_piece.clear();
TORRENT_ASSERT(m_piece.empty());
return true;
}
void web_peer_connection::on_receive(error_code const& error void web_peer_connection::on_receive(error_code const& error
, std::size_t bytes_transferred) , std::size_t bytes_transferred)
{ {
@ -660,6 +695,7 @@ namespace libtorrent
int piece_size = int(m_piece.size()); int piece_size = int(m_piece.size());
int copy_size = (std::min)((std::min)(front_request.length - piece_size int copy_size = (std::min)((std::min)(front_request.length - piece_size
, recv_buffer.left()), int(range_end - range_start - m_received_body)); , recv_buffer.left()), int(range_end - range_start - m_received_body));
if (copy_size > m_chunk_pos && m_chunk_pos > 0) copy_size = m_chunk_pos;
if (copy_size > 0) if (copy_size > 0)
{ {
m_piece.resize(piece_size + copy_size); m_piece.resize(piece_size + copy_size);
@ -678,25 +714,9 @@ namespace libtorrent
incoming_piece_fragment(copy_size); incoming_piece_fragment(copy_size);
} }
if (int(m_piece.size()) == front_request.length) if (maybe_harvest_block())
{
// each call to incoming_piece() may result in us becoming
// a seed. If we become a seed, all seeds we're connected to
// will be disconnected, including this web seed. We need to
// check for the disconnect condition after the call.
incoming_piece(front_request, &m_piece[0]);
m_requests.pop_front();
if (associated_torrent().expired()) return;
TORRENT_ASSERT(m_block_pos >= front_request.length);
m_block_pos -= front_request.length;
cut_receive_buffer(m_body_start, t->block_size() + 1024);
m_body_start = 0;
recv_buffer = receive_buffer(); recv_buffer = receive_buffer();
TORRENT_ASSERT(m_received_body <= range_end - range_start); if (associated_torrent().expired()) return;
m_piece.clear();
TORRENT_ASSERT(m_piece.empty());
}
} }
// report all received blocks to the bittorrent engine // report all received blocks to the bittorrent engine
@ -776,6 +796,30 @@ namespace libtorrent
m_received_body = 0; m_received_body = 0;
m_chunk_pos = 0; m_chunk_pos = 0;
m_partial_chunk_header = 0; m_partial_chunk_header = 0;
torrent_info const& info = t->torrent_file();
while (!m_file_requests.empty()
&& info.orig_files().internal_at(m_file_requests.front()).pad_file)
{
// the next file is a pad file. We didn't actually send
// a request for this since it most likely doesn't exist on
// the web server anyway. Just pretend that we received a
// bunch of zeroes here and pop it again
int file_index = m_file_requests.front();
m_file_requests.pop_front();
size_type file_size = info.orig_files().file_size(info.orig_files().internal_at(file_index));
TORRENT_ASSERT(m_block_pos < front_request.length);
int pad_size = (std::min)(file_size, size_type(front_request.length - m_block_pos));
// insert zeroes to represent the pad file
m_piece.resize(m_piece.size() + pad_size, 0);
m_block_pos += pad_size;
incoming_piece_fragment(pad_size);
if (maybe_harvest_block())
recv_buffer = receive_buffer();
if (associated_torrent().expired()) return;
}
continue; continue;
} }
if (bytes_transferred == 0) if (bytes_transferred == 0)

View File

@ -98,6 +98,14 @@ void test_transfer(boost::intrusive_ptr<torrent_info> torrent_file
cache_status cs; cache_status cs;
file_storage const& fs = torrent_file->files();
uint pad_file_size = 0;
for (int i = 0; i < fs.num_files(); ++i)
{
file_entry f = fs.at(i);
if (f.pad_file) pad_file_size += f.size;
}
for (int i = 0; i < 30; ++i) for (int i = 0; i < 30; ++i)
{ {
torrent_status s = th.status(); torrent_status s = th.status();
@ -124,11 +132,11 @@ void test_transfer(boost::intrusive_ptr<torrent_info> torrent_file
if (s.is_seeding /* && ss.download_rate == 0.f*/) if (s.is_seeding /* && ss.download_rate == 0.f*/)
{ {
TEST_EQUAL(s.total_payload_download - s.total_redundant_bytes, total_size); TEST_EQUAL(s.total_payload_download - s.total_redundant_bytes, total_size - pad_file_size);
// we need to sleep here a bit to let the session sync with the torrent stats // we need to sleep here a bit to let the session sync with the torrent stats
test_sleep(1000); test_sleep(1000);
TEST_EQUAL(ses.status().total_payload_download - ses.status().total_redundant_bytes TEST_EQUAL(ses.status().total_payload_download - ses.status().total_redundant_bytes
, total_size); , total_size - pad_file_size);
break; break;
} }
test_sleep(500); test_sleep(500);
@ -199,12 +207,12 @@ int run_suite(char const* protocol, bool test_url_seed, bool chunked_encoding)
file_storage fs; file_storage fs;
std::srand(10); std::srand(10);
int piece_size = 16; int piece_size = 0x4000;
if (test_url_seed) if (test_url_seed)
{ {
int file_sizes[] = int file_sizes[] =
{ 5, 16 - 5, 16, 17, 10, 30, 30, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1 { 5, 16 - 5, 16000, 17, 10, 8000, 8000, 1,1,1,1,1,100,1,1,1,1,100,1,1,1,1,1,1
,1,1,1,1,1,1,13,65,34,75,2,3,4,5,23,9,43,4,43,6, 4}; ,1,1,1,1,1,1,13,65000,34,75,2,30,400,500,23000,900,43000,400,4300,6, 4};
char* random_data = (char*)malloc(300000); char* random_data = (char*)malloc(300000);
for (int i = 0; i != sizeof(file_sizes)/sizeof(file_sizes[0]); ++i) for (int i = 0; i != sizeof(file_sizes)/sizeof(file_sizes[0]); ++i)
@ -230,7 +238,10 @@ int run_suite(char const* protocol, bool test_url_seed, bool chunked_encoding)
int port = start_web_server(strcmp(protocol, "https") == 0, chunked_encoding); int port = start_web_server(strcmp(protocol, "https") == 0, chunked_encoding);
libtorrent::create_torrent t(fs, piece_size, 0, libtorrent::create_torrent::calculate_file_hashes); // generate a torrent with pad files to make sure they
// are not requested web seeds
libtorrent::create_torrent t(fs, piece_size, 0x4000, libtorrent::create_torrent::optimize
| libtorrent::create_torrent::calculate_file_hashes);
char tmp[512]; char tmp[512];
if (test_url_seed) if (test_url_seed)
{ {
@ -244,6 +255,12 @@ int run_suite(char const* protocol, bool test_url_seed, bool chunked_encoding)
} }
fprintf(stderr, "testing: %s\n", tmp); fprintf(stderr, "testing: %s\n", tmp);
for (int i = 0; i < fs.num_files(); ++i)
{
file_entry f = fs.at(i);
fprintf(stderr, " %04x: %d %s\n", int(f.offset), f.pad_file, f.path.c_str());
}
// for (int i = 0; i < 1000; ++i) sleep(1000); // for (int i = 0; i < 1000; ++i) sleep(1000);
// calculate the hash for all pieces // calculate the hash for all pieces