diff --git a/ChangeLog b/ChangeLog index 7d763c471..252840655 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,6 +1,7 @@ 1.1.0 release + * allow specifying which tracker to scrape in scrape_tracker * tracker response alerts from user initiated announces/scrapes are now posted regardless of alert mask * improve DHT performance when changing external IP (primarily affects @@ -83,6 +84,7 @@ * almost completely changed the storage interface (for custom storage) * added support for hashing pieces in multiple threads + * support SNI in https web seeds and trackers * fix unhandled exception in DHT when receiving a DHT packet over IPv6 1.0.8 release diff --git a/include/libtorrent/bt_peer_connection.hpp b/include/libtorrent/bt_peer_connection.hpp index cc568cd6a..d86247ab5 100644 --- a/include/libtorrent/bt_peer_connection.hpp +++ b/include/libtorrent/bt_peer_connection.hpp @@ -398,7 +398,7 @@ private: // creation of m_enc_handler. Cannot reinitialize once // initialized. boost::scoped_ptr m_dh_key_exchange; - + // used during an encrypted handshake then moved // into m_enc_handler if rc4 encryption is negotiated // otherwise it is destroyed when the handshake completes diff --git a/include/libtorrent/file_storage.hpp b/include/libtorrent/file_storage.hpp index 6a415d828..53dc61c6c 100644 --- a/include/libtorrent/file_storage.hpp +++ b/include/libtorrent/file_storage.hpp @@ -334,6 +334,13 @@ namespace libtorrent // returns a list of file_slice objects representing the portions of // files the specified piece index, byte offset and size range overlaps. // this is the inverse mapping of map_file(). + // + // Preconditions of this function is that the input range is within the + // torrents address space. ``piece`` may not be negative and + // + // ``piece`` * piece_size + ``offset`` + ``size`` + // + // may not exceed the total size of the torrent. std::vector map_block(int piece, boost::int64_t offset , int size) const; @@ -363,7 +370,7 @@ namespace libtorrent TORRENT_DEPRECATED reverse_iterator rend() const { return m_files.rend(); } TORRENT_DEPRECATED - internal_file_entry const& internal_at(int index) const + internal_file_entry const& internal_at(int index) const { TORRENT_ASSERT(index >= 0); TORRENT_ASSERT(index < int(m_files.size())); diff --git a/include/libtorrent/peer_connection.hpp b/include/libtorrent/peer_connection.hpp index 6b7edfd3a..1bdfaed0c 100644 --- a/include/libtorrent/peer_connection.hpp +++ b/include/libtorrent/peer_connection.hpp @@ -802,6 +802,8 @@ namespace libtorrent virtual int timeout() const; + io_service& get_io_service() { return m_ios; } + private: // explicitly disallow assignment, to silence msvc warning peer_connection& operator=(peer_connection const&); diff --git a/include/libtorrent/torrent.hpp b/include/libtorrent/torrent.hpp index ed766e86c..2c9639363 100644 --- a/include/libtorrent/torrent.hpp +++ b/include/libtorrent/torrent.hpp @@ -722,7 +722,7 @@ namespace libtorrent // forcefully sets next_announce to the current time void force_tracker_request(time_point, int tracker_idx); - void scrape_tracker(bool user_triggered); + void scrape_tracker(int idx, bool user_triggered); void announce_with_tracker(boost::uint8_t e = tracker_request::none , address const& bind_interface = address_v4::any()); diff --git a/include/libtorrent/torrent_handle.hpp b/include/libtorrent/torrent_handle.hpp index 36a9d3f63..5a1c6cd37 100644 --- a/include/libtorrent/torrent_handle.hpp +++ b/include/libtorrent/torrent_handle.hpp @@ -1085,15 +1085,18 @@ namespace libtorrent void force_reannounce(boost::posix_time::time_duration) const; #endif - // ``scrape_tracker()`` will send a scrape request to the tracker. A - // scrape request queries the tracker for statistics such as total number - // of incomplete peers, complete peers, number of downloads etc. + // ``scrape_tracker()`` will send a scrape request to a tracker. By + // default (``idx`` = -1) it will scrape the last working tracker. If + // ``idx`` is >= 0, the tracker with the specified index will scraped. + // + // A scrape request queries the tracker for statistics such as total + // number of incomplete peers, complete peers, number of downloads etc. // // This request will specifically update the ``num_complete`` and // ``num_incomplete`` fields in the torrent_status struct once it // completes. When it completes, it will generate a scrape_reply_alert. // If it fails, it will generate a scrape_failed_alert. - void scrape_tracker() const; + void scrape_tracker(int idx = -1) const; // ``set_upload_limit`` will limit the upload bandwidth used by this // particular torrent to the limit you set. It is given as the number of diff --git a/include/libtorrent/web_peer_connection.hpp b/include/libtorrent/web_peer_connection.hpp index 4bbb1dfc2..6168debe1 100644 --- a/include/libtorrent/web_peer_connection.hpp +++ b/include/libtorrent/web_peer_connection.hpp @@ -96,7 +96,12 @@ namespace libtorrent private: - bool maybe_harvest_block(); + void on_receive_padfile(); + void incoming_payload(char const* buf, int len); + void incoming_zeroes(int len); + void handle_redirect(int bytes_left); + void handle_error(int bytes_left); + void maybe_harvest_piece(); // returns the block currently being // downloaded. And the progress of that @@ -105,30 +110,33 @@ namespace libtorrent // will be invalid. boost::optional downloading_piece_progress() const TORRENT_OVERRIDE; - void handle_padfile(buffer::const_interval& recv_buffer); + void handle_padfile(); // this has one entry per http-request // (might be more than the bt requests) - std::deque m_file_requests; + struct file_request_t + { + int file_index; + int length; + boost::int64_t start; + }; + std::deque m_file_requests; std::string m_url; web_seed_t* m_web; - // this is used for intermediate storage of pieces - // that are received in more than one HTTP response - // TODO: 1 if we make this be a disk_buffer_holder instead - // we would save a copy sometimes + // this is used for intermediate storage of pieces to be delivered to the + // bittorrent engine + // TODO: 3 if we make this be a disk_buffer_holder instead + // we would save a copy // use allocate_disk_receive_buffer and release_disk_receive_buffer std::vector m_piece; - // the number of bytes received in the current HTTP - // response. used to know where in the buffer the + // the number of bytes we've forwarded to the incoming_payload() function + // in the current HTTP response. used to know where in the buffer the // next response starts - boost::int64_t m_received_body; - - // position in the current range response - boost::int64_t m_range_pos; + int m_received_body; // this is the offset inside the current receive // buffer where the next chunk header will be. @@ -136,10 +144,7 @@ namespace libtorrent // parsed. It does not necessarily point to a valid // offset in the receive buffer, if we haven't received // it yet. This offset never includes the HTTP header - boost::int64_t m_chunk_pos; - - // the position in the current block - int m_block_pos; + int m_chunk_pos; // this is the number of bytes we've already received // from the next chunk header we're waiting for diff --git a/simulation/test_tracker.cpp b/simulation/test_tracker.cpp index 72fa2479d..e33659c45 100644 --- a/simulation/test_tracker.cpp +++ b/simulation/test_tracker.cpp @@ -363,8 +363,8 @@ TORRENT_TEST(ipv6_support) TEST_EQUAL(v6_announces, 2); } -template -void announce_entry_test(Announce a, Test t) +template +void tracker_test(Announce a, Test1 test1, Test2 test2, char const* url_path = "/announce") { using sim::asio::ip::address_v4; sim_config network_cfg; @@ -375,7 +375,7 @@ void announce_entry_test(Announce a, Test t) // listen on port 8080 sim::http_server http(tracker_ios, 8080); - http.register_handler("/announce", a); + http.register_handler(url_path, a); lt::session_proxy zombie; @@ -393,26 +393,31 @@ void announce_entry_test(Announce a, Test t) p.trackers.push_back("http://tracker.com:8080/announce"); ses->async_add_torrent(p); - // stop the torrent 5 seconds in + // run the test 5 seconds in asio::high_resolution_timer t1(ios); t1.expires_from_now(chrono::seconds(5)); - t1.async_wait([&ses,&t](boost::system::error_code const& ec) + t1.async_wait([&ses,&test1](boost::system::error_code const& ec) { std::vector torrents = ses->get_torrents(); TEST_EQUAL(torrents.size(), 1); torrent_handle h = torrents.front(); + test1(h); + }); - std::vector tr = h.trackers(); - - TEST_EQUAL(tr.size(), 1); - announce_entry const& ae = tr[0]; - t(ae); + asio::high_resolution_timer t2(ios); + t2.expires_from_now(chrono::seconds(9)); + t2.async_wait([&ses,&test2](boost::system::error_code const& ec) + { + std::vector torrents = ses->get_torrents(); + TEST_EQUAL(torrents.size(), 1); + torrent_handle h = torrents.front(); + test2(h); }); // then shut down 10 seconds in - asio::high_resolution_timer t2(ios); - t2.expires_from_now(chrono::seconds(10)); - t2.async_wait([&ses,&zombie](boost::system::error_code const& ec) + asio::high_resolution_timer t3(ios); + t3.expires_from_now(chrono::seconds(10)); + t3.async_wait([&ses,&zombie](boost::system::error_code const& ec) { zombie = ses->abort(); ses->set_alert_notify([]{}); @@ -422,6 +427,21 @@ void announce_entry_test(Announce a, Test t) sim.run(); } +template +void announce_entry_test(Announce a, Test t, char const* url_path = "/announce") +{ + tracker_test(a + , [&t] (torrent_handle h) { + std::vector tr = h.trackers(); + + TEST_EQUAL(tr.size(), 1); + announce_entry const& ae = tr[0]; + t(ae); + } + , [](torrent_handle){} + , url_path); +} + TORRENT_TEST(test_error) { announce_entry_test( @@ -467,7 +487,7 @@ TORRENT_TEST(test_warning) }); } -TORRENT_TEST(test_scrape) +TORRENT_TEST(test_scrape_data_in_announce) { announce_entry_test( [](std::string method, std::string req @@ -493,6 +513,36 @@ TORRENT_TEST(test_scrape) }); } +TORRENT_TEST(test_scrape) +{ + tracker_test( + [](std::string method, std::string req + , std::map& headers) + { + TEST_EQUAL(method, "GET"); + + char response[500]; + int size = snprintf(response, sizeof(response), + "d5:filesd20:ababababababababababd8:completei1e10:downloadedi3e10:incompletei2eeee"); + return sim::send_response(200, "OK", size) + response; + } + , [](torrent_handle h) + { + h.scrape_tracker(); + } + , [](torrent_handle h) + { + std::vector tr = h.trackers(); + + TEST_EQUAL(tr.size(), 1); + announce_entry const& ae = tr[0]; + TEST_EQUAL(ae.scrape_incomplete, 2); + TEST_EQUAL(ae.scrape_complete, 1); + TEST_EQUAL(ae.scrape_downloaded, 3); + } + , "/scrape"); +} + TORRENT_TEST(test_http_status) { announce_entry_test( diff --git a/src/file.cpp b/src/file.cpp index 6cc99de01..9f42c1bfe 100644 --- a/src/file.cpp +++ b/src/file.cpp @@ -2050,6 +2050,7 @@ typedef struct _FILE_ALLOCATED_RANGE_BUFFER { } } +#if _WIN32_WINNT >= 0x0600 // only if Windows Vista or newer if ((m_open_mode & sparse) == 0) { typedef DWORD (WINAPI *GetFileInformationByHandleEx_t)(HANDLE hFile @@ -2097,6 +2098,7 @@ typedef struct _FILE_ALLOCATED_RANGE_BUFFER { set_file_valid_data(m_file_handle, s); } } +#endif // if Windows Vista #else // NON-WINDOWS struct stat st; if (fstat(native_handle(), &st) != 0) diff --git a/src/file_storage.cpp b/src/file_storage.cpp index 987b2bb61..db11be4f0 100644 --- a/src/file_storage.cpp +++ b/src/file_storage.cpp @@ -419,7 +419,8 @@ namespace libtorrent return m_files[index].name_len; } - std::vector file_storage::map_block(int piece, boost::int64_t offset + std::vector file_storage::map_block(int const piece + , boost::int64_t const offset , int size) const { TORRENT_ASSERT_PRECOND(num_files() > 0); @@ -433,6 +434,10 @@ namespace libtorrent TORRENT_ASSERT_PRECOND(boost::int64_t(target.offset + size) <= m_total_size); TORRENT_ASSERT(!compare_file_offset(target, m_files.front())); + // in case the size is past the end, fix it up + if (boost::int64_t(target.offset + size) > m_total_size) + size = m_total_size - target.offset; + std::vector::const_iterator file_iter = std::upper_bound( m_files.begin(), m_files.end(), target, compare_file_offset); diff --git a/src/peer_connection.cpp b/src/peer_connection.cpp index 615f119e3..32eb5cac2 100644 --- a/src/peer_connection.cpp +++ b/src/peer_connection.cpp @@ -3937,7 +3937,7 @@ namespace libtorrent // the verification will fail for coalesced blocks TORRENT_ASSERT(verify_piece(r) || m_request_large_blocks); - + #ifndef TORRENT_DISABLE_EXTENSIONS bool handled = false; for (extension_list_t::iterator i = m_extensions.begin() diff --git a/src/session_impl.cpp b/src/session_impl.cpp index 3919640fb..dba5c94a9 100644 --- a/src/session_impl.cpp +++ b/src/session_impl.cpp @@ -3219,7 +3219,7 @@ retry: // false means it's not triggered by the user, but automatically // by libtorrent - t.scrape_tracker(false); + t.scrape_tracker(-1, false); ++m_next_scrape_torrent; if (m_next_scrape_torrent >= int(want_scrape.size())) diff --git a/src/socket_type.cpp b/src/socket_type.cpp index 580ae20ec..2da3327a5 100644 --- a/src/socket_type.cpp +++ b/src/socket_type.cpp @@ -98,9 +98,11 @@ namespace libtorrent #define CASE(t) case socket_type_int_impl >::value: \ s.get >()->set_verify_callback( \ boost::asio::ssl::rfc2818_verification(hostname), ec); \ - ctx = SSL_get_SSL_CTX(s.get >()->native_handle()); \ + ssl = s.get >()->native_handle(); \ + ctx = SSL_get_SSL_CTX(ssl); \ break; + SSL* ssl = 0; SSL_CTX* ctx = 0; switch(s.type()) @@ -119,6 +121,14 @@ namespace libtorrent aux::openssl_set_tlsext_servername_arg(ctx, 0); } #endif // OPENSSL_VERSION_NUMBER + +#ifdef SSL_CTRL_SET_TLSEXT_HOSTNAME + if (ssl) + { + aux::openssl_set_tlsext_hostname(ssl, hostname.c_str()); + } +#endif + #else TORRENT_UNUSED(ec); TORRENT_UNUSED(hostname); diff --git a/src/storage.cpp b/src/storage.cpp index 7f18bb6df..2ba8bbc6d 100644 --- a/src/storage.cpp +++ b/src/storage.cpp @@ -1017,8 +1017,6 @@ namespace libtorrent error_code error; std::string file_path = fs.file_path(i, m_save_path); stat_file(file_path, &s, error); - file_size = s.file_size; - file_time = s.mtime; if (error) { if (error != boost::system::errc::no_such_file_or_directory) @@ -1037,6 +1035,13 @@ namespace libtorrent ec.operation = storage_error::none; return false; } + file_size = 0; + file_time = 0; + } + else + { + file_size = s.file_size; + file_time = s.mtime; } } diff --git a/src/torrent.cpp b/src/torrent.cpp index 5239b841b..17c3c5fdd 100644 --- a/src/torrent.cpp +++ b/src/torrent.cpp @@ -3279,15 +3279,15 @@ namespace libtorrent update_tracker_timer(now); } - void torrent::scrape_tracker(bool user_triggered) + void torrent::scrape_tracker(int idx, bool user_triggered) { TORRENT_ASSERT(is_single_thread()); m_last_scrape = m_ses.session_time(); if (m_trackers.empty()) return; - int i = m_last_working_tracker; - if (i == -1) i = 0; + if (idx < 0 || idx >= int(m_trackers.size())) idx = m_last_working_tracker; + if (idx < 0) idx = 0; tracker_request req; if (settings().get_bool(settings_pack::apply_ip_filter_to_trackers) @@ -3296,7 +3296,7 @@ namespace libtorrent req.info_hash = m_torrent_file->info_hash(); req.kind |= tracker_request::scrape_request; - req.url = m_trackers[i].url; + req.url = m_trackers[idx].url; #ifndef TORRENT_NO_DEPRECATE req.auth = tracker_login(); #endif diff --git a/src/torrent_handle.cpp b/src/torrent_handle.cpp index 623e1792d..33feaa2e5 100644 --- a/src/torrent_handle.cpp +++ b/src/torrent_handle.cpp @@ -800,9 +800,9 @@ namespace libtorrent ses.disk_thread().files().get_status(&status, &t->storage()); } - void torrent_handle::scrape_tracker() const + void torrent_handle::scrape_tracker(int idx) const { - TORRENT_ASYNC_CALL1(scrape_tracker, true); + TORRENT_ASYNC_CALL2(scrape_tracker, idx, true); } void torrent_handle::super_seeding(bool on) const diff --git a/src/web_peer_connection.cpp b/src/web_peer_connection.cpp index 8726ee3ca..5f9a2b1fd 100644 --- a/src/web_peer_connection.cpp +++ b/src/web_peer_connection.cpp @@ -74,9 +74,7 @@ web_peer_connection::web_peer_connection(peer_connection_args const& pack , m_url(web.url) , m_web(&web) , m_received_body(0) - , m_range_pos(0) , m_chunk_pos(0) - , m_block_pos(0) , m_partial_chunk_header(0) , m_num_responses(0) { @@ -183,14 +181,13 @@ void web_peer_connection::disconnect(error_code const& ec // upper layer will call downloading_piece_progress and assume // it's all wasted download. Since we're saving it here, it isn't. m_requests.clear(); - m_block_pos = 0; } if (m_web && !m_web->supports_keepalive && error == 0) { // if the web server doesn't support keepalive and we were // disconnected as a graceful EOF, reconnect right away - if (t) t->session().get_io_service().post( + if (t) get_io_service().post( boost::bind(&torrent::maybe_connect_web_seeds, t)); } peer_connection::disconnect(ec, op, error); @@ -209,12 +206,12 @@ web_peer_connection::downloading_piece_progress() const piece_block_progress ret; ret.piece_index = m_requests.front().piece; - ret.bytes_downloaded = m_block_pos % t->block_size(); + ret.bytes_downloaded = m_piece.size(); // this is used to make sure that the block_index stays within // bounds. If the entire piece is downloaded, the block_index // would otherwise point to one past the end - int correction = m_block_pos ? -1 : 0; - ret.block_index = (m_requests.front().start + m_block_pos + correction) / t->block_size(); + int correction = m_piece.size() ? -1 : 0; + ret.block_index = (m_requests.front().start + m_piece.size() + correction) / t->block_size(); TORRENT_ASSERT(ret.block_index < int(piece_block::invalid.block_index)); TORRENT_ASSERT(ret.piece_index < int(piece_block::invalid.piece_index)); @@ -292,7 +289,6 @@ void web_peer_connection::write_request(peer_request const& r) if (m_web->restart_request == m_requests.front()) { m_piece.swap(m_web->restart_piece); - m_block_pos += m_piece.size(); peer_request& front = m_requests.front(); TORRENT_ASSERT(front.length > int(m_piece.size())); @@ -323,23 +319,36 @@ void web_peer_connection::write_request(peer_request const& r) bool using_proxy = (proxy_type == settings_pack::http || proxy_type == settings_pack::http_pw) && !m_ssl; + // the number of pad files that have been "requested". In case we _only_ + // request padfiles, we can't rely on handling them in the on_receive() + // callback (because we won't receive anything), instead we have to post a + // pretend read callback where we can deliver the zeroes for the partfile + int num_pad_files = 0; + + // TODO: 2 do we really need a special case here? wouldn't the multi-file + // case handle single file torrents correctly too? if (single_file_request) { + file_request_t file_req; + file_req.file_index = 0; + file_req.start = boost::int64_t(req.piece) * info.piece_length() + + req.start; + file_req.length = req.length; + request += "GET "; - // do not encode single file paths, they are + // do not encode single file paths, they are // assumed to be encoded in the torrent file request += using_proxy ? m_url : m_path; request += " HTTP/1.1\r\n"; add_headers(request, m_settings, using_proxy); request += "\r\nRange: bytes="; - request += to_string(boost::int64_t(req.piece) * info.piece_length() - + req.start).elems; + request += to_string(file_req.start).elems; request += "-"; - request += to_string(boost::int64_t(req.piece) * info.piece_length() - + req.start + req.length - 1).elems; + request += to_string(file_req.start + file_req.length - 1).elems; request += "\r\n\r\n"; m_first_request = false; - m_file_requests.push_back(0); + + m_file_requests.push_back(file_req); } else { @@ -356,9 +365,16 @@ void web_peer_connection::write_request(peer_request const& r) i != files.end(); ++i) { file_slice const& f = *i; + + file_request_t file_req; + file_req.file_index = f.file_index; + file_req.start = f.offset; + file_req.length = f.size; + if (info.orig_files().pad_file_at(f.file_index)) { - m_file_requests.push_back(f.file_index); + m_file_requests.push_back(file_req); + ++num_pad_files; continue; } @@ -401,74 +417,81 @@ void web_peer_connection::write_request(peer_request const& r) << " e: " << (f.offset + f.size - 1) << std::endl; #endif TORRENT_ASSERT(f.file_index >= 0); - m_file_requests.push_back(f.file_index); + + m_file_requests.push_back(file_req); } } + if (num_pad_files == int(m_file_requests.size())) + { + get_io_service().post(boost::bind( + &web_peer_connection::on_receive_padfile, + boost::static_pointer_cast(self()))); + return; + } + #ifndef TORRENT_DISABLE_LOGGING peer_log(peer_log_alert::outgoing_message, "REQUEST", "%s", request.c_str()); #endif - // in case the first file on this series of requests is a padfile - // we need to handle it right now, and pretend that we got a response - // with zeros. - buffer::const_interval recv_buffer = m_recv_buffer.get(); - handle_padfile(recv_buffer); - if (associated_torrent().expired()) return; - send_buffer(request.c_str(), request.size(), message_type_request); } +namespace { + + std::string get_peer_name(http_parser const& p, std::string const& host) + { + std::string ret = "URL seed @ "; + ret += host; + + std::string const& server_version = p.header("server"); + if (!server_version.empty()) + { + ret += " ("; + ret += server_version; + ret += ")"; + } + return ret; + } + + boost::tuple get_range( + http_parser const& parser, error_code& ec) + { + boost::int64_t range_start; + boost::int64_t range_end; + if (parser.status_code() == 206) + { + boost::tie(range_start, range_end) = parser.content_range(); + if (range_start < 0 || range_end < range_start) + { + ec = errors::invalid_range; + range_start = 0; + range_end = 0; + } + else + { + // the http range is inclusive + range_end++; + } + } + else + { + range_start = 0; + range_end = parser.content_length(); + if (range_end < 0) + { + range_end = 0; + ec = errors::no_content_length; + } + } + return boost::tuple(range_start, range_end); + } +} + // -------------------------- // RECEIVE DATA // -------------------------- -namespace -{ - bool range_contains(peer_request const& range, peer_request const& req, int piece_size) - { - boost::int64_t range_start = boost::int64_t(range.piece) * piece_size + range.start; - boost::int64_t req_start = boost::int64_t(req.piece) * piece_size + req.start; - return range_start <= req_start - && range_start + range.length >= req_start + req.length; - } -} - -bool web_peer_connection::maybe_harvest_block() -{ - peer_request const& front_request = m_requests.front(); - - if (int(m_piece.size()) < front_request.length) return false; - TORRENT_ASSERT(int(m_piece.size()) == front_request.length); - - // each call to incoming_piece() may result in us becoming - // a seed. If we become a seed, all seeds we're connected to - // will be disconnected, including this web seed. We need to - // check for the disconnect condition after the call. - - boost::shared_ptr t = associated_torrent().lock(); - TORRENT_ASSERT(t); - buffer::const_interval recv_buffer = m_recv_buffer.get(); - - incoming_piece(front_request, &m_piece[0]); -#ifndef TORRENT_DISABLE_LOGGING - peer_log(peer_log_alert::incoming_message, "POP_REQUEST" - , "piece: %d start: %d len: %d" - , front_request.piece, front_request.start, front_request.length); -#endif - m_requests.pop_front(); - if (associated_torrent().expired()) return false; - TORRENT_ASSERT(m_block_pos >= front_request.length); - m_block_pos -= front_request.length; - m_recv_buffer.cut(m_body_start, t->block_size() + request_size_overhead); - m_body_start = 0; - recv_buffer = m_recv_buffer.get(); -// TORRENT_ASSERT(m_received_body <= range_end - range_start); - m_piece.clear(); - TORRENT_ASSERT(m_piece.empty()); - return true; -} - bool web_peer_connection::received_invalid_data(int index, bool single_peer) { if (!single_peer) return peer_connection::received_invalid_data(index, single_peer); @@ -514,18 +537,108 @@ bool web_peer_connection::received_invalid_data(int index, bool single_peer) return false; } +void web_peer_connection::on_receive_padfile() +{ + handle_padfile(); +} + +void web_peer_connection::handle_error(int bytes_left) +{ + boost::shared_ptr t = associated_torrent().lock(); + TORRENT_ASSERT(t); + + // TODO: 2 just make this peer not have the pieces + // associated with the file we just requested. Only + // when it doesn't have any of the file do the following + int retry_time = atoi(m_parser.header("retry-after").c_str()); + if (retry_time <= 0) retry_time = m_settings.get_int(settings_pack::urlseed_wait_retry); + // temporarily unavailable, retry later + t->retry_web_seed(this, retry_time); + std::string error_msg = to_string(m_parser.status_code()).elems + + (" " + m_parser.message()); + if (t->alerts().should_post()) + { + t->alerts().emplace_alert(t->get_handle(), m_url + , error_msg); + } + received_bytes(0, bytes_left); + disconnect(error_code(m_parser.status_code(), get_http_category()), op_bittorrent, 1); + return; +} + +void web_peer_connection::handle_redirect(int bytes_left) +{ + // this means we got a redirection request + // look for the location header + std::string location = m_parser.header("location"); + received_bytes(0, bytes_left); + + boost::shared_ptr t = associated_torrent().lock(); + TORRENT_ASSERT(t); + + if (location.empty()) + { + // we should not try this server again. + t->remove_web_seed(this, errors::missing_location, op_bittorrent, 2); + m_web = NULL; + TORRENT_ASSERT(is_disconnecting()); + return; + } + + bool single_file_request = false; + if (!m_path.empty() && m_path[m_path.size() - 1] != '/') + single_file_request = true; + + // add the redirected url and remove the current one + if (!single_file_request) + { + TORRENT_ASSERT(!m_file_requests.empty()); + int const file_index = m_file_requests.front().file_index; + + if (!t->need_loaded()) + { + disconnect(errors::torrent_aborted, op_bittorrent); + return; + } + // TODO: 2 create a mapping of file-index to redirection URLs. Use that to form + // URLs instead. Support to reconnect to a new server without destructing this + // peer_connection + torrent_info const& info = t->torrent_file(); + std::string path = info.orig_files().file_path(file_index); +#ifdef TORRENT_WINDOWS + convert_path_to_posix(path); +#endif + path = escape_path(path.c_str(), path.length()); + size_t i = location.rfind(path); + if (i == std::string::npos) + { + t->remove_web_seed(this, errors::invalid_redirection, op_bittorrent, 2); + m_web = NULL; + TORRENT_ASSERT(is_disconnecting()); + return; + } + location.resize(i); + } + else + { + location = resolve_redirect_location(m_url, location); + } + +#ifndef TORRENT_DISABLE_LOGGING + peer_log(peer_log_alert::info, "LOCATION", "%s", location.c_str()); +#endif + t->add_web_seed(location, web_seed_entry::url_seed, m_external_auth, m_extra_headers); + t->remove_web_seed(this, errors::redirecting, op_bittorrent, 2); + m_web = NULL; + TORRENT_ASSERT(is_disconnecting()); + return; +} + void web_peer_connection::on_receive(error_code const& error , std::size_t bytes_transferred) { INVARIANT_CHECK; -#if TORRENT_USE_ASSERTS - TORRENT_ASSERT(statistics().last_payload_downloaded() - + statistics().last_protocol_downloaded() + bytes_transferred < size_t(INT_MAX)); - int dl_target = statistics().last_payload_downloaded() - + statistics().last_protocol_downloaded() + bytes_transferred; -#endif - if (error) { received_bytes(0, bytes_transferred); @@ -533,23 +646,20 @@ void web_peer_connection::on_receive(error_code const& error peer_log(peer_log_alert::info, "ERROR" , "web_peer_connection error: %s", error.message().c_str()); #endif - TORRENT_ASSERT(statistics().last_payload_downloaded() - + statistics().last_protocol_downloaded() - == dl_target); return; } boost::shared_ptr t = associated_torrent().lock(); TORRENT_ASSERT(t); + // in case the first file on this series of requests is a padfile + // we need to handle it right now + buffer::const_interval recv_buffer = m_recv_buffer.get(); + handle_padfile(); + if (associated_torrent().expired()) return; + for (;;) { - TORRENT_ASSERT(statistics().last_payload_downloaded() - + statistics().last_protocol_downloaded() + int(bytes_transferred) - == dl_target); - - buffer::const_interval recv_buffer = m_recv_buffer.get(); - int payload; int protocol; bool header_finished = m_parser.header_finished(); @@ -558,45 +668,32 @@ void web_peer_connection::on_receive(error_code const& error bool failed = false; boost::tie(payload, protocol) = m_parser.incoming(recv_buffer, failed); received_bytes(0, protocol); - TORRENT_ASSERT(int(bytes_transferred) >= protocol); - bytes_transferred -= protocol; + TORRENT_ASSERT(int(recv_buffer.left()) >= protocol); if (failed) { - received_bytes(0, bytes_transferred); + received_bytes(0, recv_buffer.left()); #ifndef TORRENT_DISABLE_LOGGING peer_log(peer_log_alert::info, "RECEIVE_BYTES" , "%s", std::string(recv_buffer.begin, recv_buffer.end).c_str()); #endif disconnect(errors::http_parse_error, op_bittorrent, 2); - TORRENT_ASSERT(statistics().last_payload_downloaded() - + statistics().last_protocol_downloaded() - == dl_target); return; } TORRENT_ASSERT(recv_buffer.left() == 0 || *recv_buffer.begin == 'H'); - TORRENT_ASSERT(recv_buffer.left() <= m_recv_buffer.packet_size()); // this means the entire status line hasn't been received yet if (m_parser.status_code() == -1) { TORRENT_ASSERT(payload == 0); - TORRENT_ASSERT(bytes_transferred == 0); - TORRENT_ASSERT(statistics().last_payload_downloaded() - + statistics().last_protocol_downloaded() + int(bytes_transferred) - == dl_target); break; } if (!m_parser.header_finished()) { TORRENT_ASSERT(payload == 0); - TORRENT_ASSERT(bytes_transferred == 0); - TORRENT_ASSERT(statistics().last_payload_downloaded() - + statistics().last_protocol_downloaded() + int(bytes_transferred) - == dl_target); break; } @@ -624,189 +721,109 @@ void web_peer_connection::on_receive(error_code const& error , end(headers.end()); i != end; ++i) peer_log(peer_log_alert::info, "STATUS", " %s: %s", i->first.c_str(), i->second.c_str()); #endif + // if the status code is not one of the accepted ones, abort if (!is_ok_status(m_parser.status_code())) { - // TODO: 2 just make this peer not have the pieces - // associated with the file we just requested. Only - // when it doesn't have any of the file do the following - int retry_time = atoi(m_parser.header("retry-after").c_str()); - if (retry_time <= 0) retry_time = m_settings.get_int(settings_pack::urlseed_wait_retry); - // temporarily unavailable, retry later - t->retry_web_seed(this, retry_time); - std::string error_msg = to_string(m_parser.status_code()).elems - + (" " + m_parser.message()); - if (t->alerts().should_post()) - { - t->alerts().emplace_alert(t->get_handle(), m_url - , error_msg); - } - received_bytes(0, bytes_transferred); - disconnect(error_code(m_parser.status_code(), get_http_category()), op_bittorrent, 1); -#ifdef TORRENT_DEBUG - TORRENT_ASSERT(statistics().last_payload_downloaded() - + statistics().last_protocol_downloaded() - == dl_target); -#endif + handle_error(recv_buffer.left()); return; } + if (is_redirect(m_parser.status_code())) { - // this means we got a redirection request - // look for the location header - std::string location = m_parser.header("location"); - received_bytes(0, bytes_transferred); - - if (location.empty()) - { - // we should not try this server again. - t->remove_web_seed(this, errors::missing_location, op_bittorrent, 2); - m_web = NULL; - TORRENT_ASSERT(is_disconnecting()); -#ifdef TORRENT_DEBUG - TORRENT_ASSERT(statistics().last_payload_downloaded() - + statistics().last_protocol_downloaded() - == dl_target); -#endif - return; - } - - bool single_file_request = false; - if (!m_path.empty() && m_path[m_path.size() - 1] != '/') - single_file_request = true; - - // add the redirected url and remove the current one - if (!single_file_request) - { - TORRENT_ASSERT(!m_file_requests.empty()); - int file_index = m_file_requests.front(); - - if (!t->need_loaded()) - { - disconnect(errors::torrent_aborted, op_bittorrent); - return; - } -// TODO: 2 create a mapping of file-index to redirection URLs. Use that to form -// URLs instead. Support to reconnect to a new server without destructing this -// peer_connection - torrent_info const& info = t->torrent_file(); - std::string path = info.orig_files().file_path(file_index); -#ifdef TORRENT_WINDOWS - convert_path_to_posix(path); -#endif - path = escape_path(path.c_str(), path.length()); - size_t i = location.rfind(path); - if (i == std::string::npos) - { - t->remove_web_seed(this, errors::invalid_redirection, op_bittorrent, 2); - m_web = NULL; - TORRENT_ASSERT(is_disconnecting()); -#ifdef TORRENT_DEBUG - TORRENT_ASSERT(statistics().last_payload_downloaded() - + statistics().last_protocol_downloaded() - == dl_target); -#endif - return; - } - location.resize(i); - } - else - { - location = resolve_redirect_location(m_url, location); - } - -#ifndef TORRENT_DISABLE_LOGGING - peer_log(peer_log_alert::info, "LOCATION", "%s", location.c_str()); -#endif - t->add_web_seed(location, web_seed_entry::url_seed, m_external_auth, m_extra_headers); - t->remove_web_seed(this, errors::redirecting, op_bittorrent, 2); - m_web = NULL; - TORRENT_ASSERT(is_disconnecting()); -#ifdef TORRENT_DEBUG - TORRENT_ASSERT(statistics().last_payload_downloaded() - + statistics().last_protocol_downloaded() - == dl_target); -#endif - return; - } - - std::string const& server_version = m_parser.header("server"); - if (!server_version.empty()) - { - m_server_string = "URL seed @ "; - m_server_string += m_host; - m_server_string += " ("; - m_server_string += server_version; - m_server_string += ")"; - } - - m_body_start = m_parser.body_start(); - m_received_body = 0; - m_range_pos = 0; + handle_redirect(recv_buffer.left()); + return; } + m_server_string = get_peer_name(m_parser, m_host); + recv_buffer.begin += m_body_start; - // we only received the header, no data - if (recv_buffer.left() == 0) - { -#ifdef TORRENT_DEBUG - TORRENT_ASSERT(statistics().last_payload_downloaded() - + statistics().last_protocol_downloaded() - == dl_target); -#endif - break; - } + m_body_start = m_parser.body_start(); + m_received_body = 0; + } - boost::int64_t range_start; - boost::int64_t range_end; - if (m_parser.status_code() == 206) - { - boost::tie(range_start, range_end) = m_parser.content_range(); - if (range_start < 0 || range_end < range_start) - { - received_bytes(0, bytes_transferred); - // we should not try this server again. - t->remove_web_seed(this, errors::invalid_range, op_bittorrent); - m_web = NULL; - TORRENT_ASSERT(is_disconnecting()); -#ifdef TORRENT_DEBUG - TORRENT_ASSERT(statistics().last_payload_downloaded() - + statistics().last_protocol_downloaded() - == dl_target); + // we only received the header, no data + if (recv_buffer.left() == 0) break; + + // =================================== + // ======= RESPONSE BYTE RANGE ======= + // =================================== + + // despite the HTTP range being inclusive, range_start and range_end are + // exclusive to fit better into C++. i.e. range_end points one byte past + // the end of the payload + boost::int64_t range_start; + boost::int64_t range_end; + error_code ec; + boost::tie(range_start, range_end) = get_range(m_parser, ec); + if (ec) + { + received_bytes(0, recv_buffer.left()); + // we should not try this server again. + t->remove_web_seed(this, ec, op_bittorrent, 2); + m_web = NULL; + TORRENT_ASSERT(is_disconnecting()); + return; + } + + TORRENT_ASSERT(!m_file_requests.empty()); + file_request_t const& file_req = m_file_requests.front(); + if (range_start != file_req.start + || range_end != file_req.start + file_req.length) + { + // the byte range in the http response is different what we expected + received_bytes(0, recv_buffer.left()); + +#ifndef TORRENT_DISABLE_LOGGING + peer_log(peer_log_alert::incoming, "INVALID HTTP RESPONSE" + , "in=(%d, %" PRId64 "-%" PRId64 ") expected=(%d, %" PRId64 "-%" PRId64 ") ]" + , file_req.file_index, range_start, range_end + , file_req.file_index, file_req.start, file_req.start + file_req.length - 1); #endif - return; - } - // the http range is inclusive - range_end++; - } - else - { - range_start = 0; - range_end = m_parser.content_length(); - if (range_end == -1) - { - received_bytes(0, bytes_transferred); - // we should not try this server again. - t->remove_web_seed(this, errors::no_content_length, op_bittorrent, 2); - m_web = NULL; - TORRENT_ASSERT(is_disconnecting()); -#ifdef TORRENT_DEBUG - TORRENT_ASSERT(statistics().last_payload_downloaded() - + statistics().last_protocol_downloaded() - == dl_target); -#endif - return; - } - } + disconnect(errors::invalid_range, op_bittorrent, 2); + return; + } + + if (m_parser.chunked_encoding()) + { // ========================= // === CHUNKED ENCODING === // ========================= - while (m_parser.chunked_encoding() - && m_chunk_pos >= 0 - && m_chunk_pos < recv_buffer.left()) + + while (m_chunk_pos >= 0 && recv_buffer.left() > 0) { + // first deliver any payload we have in the buffer so far, ahead of + // the next chunk header. + if (m_chunk_pos > 0) + { + int const copy_size = (std::min)(m_chunk_pos, recv_buffer.left()); + TORRENT_ASSERT(copy_size > 0); + + if (m_received_body + copy_size > file_req.length) + { + // the byte range in the http response is different what we expected + received_bytes(0, recv_buffer.left()); + +#ifndef TORRENT_DISABLE_LOGGING + peer_log(peer_log_alert::incoming, "INVALID HTTP RESPONSE" + , "received body: %d request size: %d" + , m_received_body, file_req.length); +#endif + disconnect(errors::invalid_range, op_bittorrent, 2); + return; + } + incoming_payload(recv_buffer.begin, copy_size); + + recv_buffer.begin += copy_size; + m_chunk_pos -= copy_size; + + if (recv_buffer.left() == 0) goto done; + } + + TORRENT_ASSERT(m_chunk_pos == 0); + int header_size = 0; boost::int64_t chunk_size = 0; buffer::const_interval chunk_start = recv_buffer; @@ -816,245 +833,83 @@ void web_peer_connection::on_receive(error_code const& error bool ret = m_parser.parse_chunk_header(chunk_start, &chunk_size, &header_size); if (!ret) { - TORRENT_ASSERT(int(bytes_transferred) >= chunk_start.left() - m_partial_chunk_header); - bytes_transferred -= chunk_start.left() - m_partial_chunk_header; received_bytes(0, chunk_start.left() - m_partial_chunk_header); m_partial_chunk_header = chunk_start.left(); - if (bytes_transferred == 0) return; + goto done; + } +#ifndef TORRENT_DISABLE_LOGGING + peer_log(peer_log_alert::info, "CHUNKED_ENCODING" + , "parsed chunk: %" PRId64 " header_size: %d" + , chunk_size, header_size); +#endif + received_bytes(0, header_size - m_partial_chunk_header); + m_partial_chunk_header = 0; + TORRENT_ASSERT(chunk_size != 0 + || chunk_start.left() <= header_size || chunk_start.begin[header_size] == 'H'); + TORRENT_ASSERT(m_body_start + m_chunk_pos < INT_MAX); + m_chunk_pos += chunk_size; + recv_buffer.begin += header_size; + + // a chunk size of zero means the request is complete. Make sure the + // number of payload bytes we've received matches the number we + // requested. If that's not the case, we got an invalid response. + if (chunk_size == 0) + { + TORRENT_ASSERT_VAL(m_chunk_pos == 0, m_chunk_pos); + +#ifdef TORRENT_DEBUG + chunk_start = recv_buffer; + chunk_start.begin += m_chunk_pos; + TORRENT_ASSERT(chunk_start.left() == 0 || chunk_start.begin[0] == 'H'); +#endif + m_chunk_pos = -1; + + TORRENT_ASSERT(m_received_body <= file_req.length); + if (m_received_body != file_req.length) + { + // the byte range in the http response is different what we expected + received_bytes(0, recv_buffer.left()); + +#ifndef TORRENT_DISABLE_LOGGING + peer_log(peer_log_alert::incoming, "INVALID HTTP RESPONSE" + , "received body: %d request size: %d" + , m_received_body, file_req.length); +#endif + disconnect(errors::invalid_range, op_bittorrent, 2); + return; + } + // we just completed an HTTP file request. pop it from m_file_requests + m_file_requests.pop_front(); + m_parser.reset(); + m_body_start = 0; + m_received_body = 0; + m_chunk_pos = 0; + m_partial_chunk_header = 0; + + // in between each file request, there may be an implicit + // pad-file request + handle_padfile(); break; } - else - { -#ifndef TORRENT_DISABLE_LOGGING - peer_log(peer_log_alert::info, "PARSE" - , "parsed chunk: %" PRId64 " header_size: %d" - , chunk_size, header_size); -#endif - TORRENT_ASSERT(int(bytes_transferred) >= header_size - m_partial_chunk_header); - bytes_transferred -= header_size - m_partial_chunk_header; - received_bytes(0, header_size - m_partial_chunk_header); - m_partial_chunk_header = 0; - TORRENT_ASSERT(chunk_size != 0 || chunk_start.left() <= header_size || chunk_start.begin[header_size] == 'H'); - // cut out the chunk header from the receive buffer - TORRENT_ASSERT(m_body_start + m_chunk_pos < INT_MAX); - m_recv_buffer.cut(header_size, t->block_size() + request_size_overhead, int(m_body_start + m_chunk_pos)); - recv_buffer = m_recv_buffer.get(); - recv_buffer.begin += m_body_start; - m_chunk_pos += chunk_size; - if (chunk_size == 0) - { -#ifdef TORRENT_DEBUG - chunk_start = recv_buffer; - chunk_start.begin += m_chunk_pos; - TORRENT_ASSERT(chunk_start.left() == 0 || chunk_start.begin[0] == 'H'); -#endif - m_chunk_pos = -1; - } - // if all of hte receive buffer was just consumed as chunk - // header, we're done - if (bytes_transferred == 0) return; - } + + // if all of the receive buffer was just consumed as chunk + // header, we're done + if (recv_buffer.left() == 0) goto done; } + } + else + { + // this is the simple case, where we don't have chunked encoding + TORRENT_ASSERT(m_received_body <= file_req.length); + int const copy_size = (std::min)(file_req.length - m_received_body + , recv_buffer.left()); + incoming_payload(recv_buffer.begin, copy_size); + recv_buffer.begin += copy_size; - if (m_requests.empty() || m_file_requests.empty()) + TORRENT_ASSERT(m_received_body <= file_req.length); + if (m_received_body == file_req.length) { - received_bytes(0, bytes_transferred); - disconnect(errors::http_error, op_bittorrent, 2); -#ifdef TORRENT_DEBUG - TORRENT_ASSERT(statistics().last_payload_downloaded() - + statistics().last_protocol_downloaded() - == dl_target); -#endif - return; - } - - boost::int64_t left_in_response = range_end - range_start - m_range_pos; - int payload_transferred = int((std::min)(left_in_response, boost::int64_t(bytes_transferred))); - - torrent_info const& info = t->torrent_file(); - - peer_request front_request = m_requests.front(); - - TORRENT_ASSERT(m_block_pos >= 0); - -#ifndef TORRENT_DISABLE_LOGGING - peer_log(peer_log_alert::info, "TRANSFER" - , "payload_transferred: %d [ %d:%d = %d ]" - , payload_transferred, front_request.piece - , front_request.start, front_request.length); -#endif - received_bytes(payload_transferred, 0); - TORRENT_ASSERT(int(bytes_transferred) >= payload_transferred); - bytes_transferred -= payload_transferred; - m_range_pos += payload_transferred; - m_block_pos += payload_transferred; - if (m_range_pos > range_end - range_start) m_range_pos = range_end - range_start; - - if (!t->need_loaded()) - { - disconnect(errors::torrent_aborted, op_bittorrent); - return; - } - int file_index = m_file_requests.front(); - peer_request in_range = info.orig_files().map_file(file_index, range_start - , int(range_end - range_start)); - - // request start - boost::int64_t rs = boost::int64_t(in_range.piece) * info.piece_length() + in_range.start; - // request end - boost::int64_t re = rs + in_range.length; - // file start - boost::int64_t fs = boost::int64_t(front_request.piece) * info.piece_length() + front_request.start; - - // the http response body consists of 3 parts - // 1. the middle of a block or the ending of a block - // 2. a number of whole blocks - // 3. the start of a block - // in that order, these parts are parsed. - - bool range_overlaps_request = re >= fs + int(m_piece.size()); - - if (!range_overlaps_request) - { - // this means the end of the incoming request ends _before_ the - // first expected byte (fs + m_piece.size()) - - incoming_piece_fragment((std::min)(payload_transferred - , front_request.length - m_block_pos)); - received_bytes(0, bytes_transferred); - -#ifndef TORRENT_DISABLE_LOGGING - std::vector sl = info.orig_files().map_block( - front_request.piece, front_request.start, front_request.start - + front_request.length); - peer_log(peer_log_alert::incoming, "INVALID HTTP RESPONSE" - , "in=(%d, %" PRId64 "-%" PRId64 ") expected=(%d, %" PRId64 "-%" PRId64 ") piece: %d ]" - , file_index, range_start, range_end, sl[0].file_index - , sl[0].offset, sl[0].offset + sl[0].size, front_request.piece); -#endif - disconnect(errors::invalid_range, op_bittorrent, 2); - return; - } - - // if the request is contained in the range (i.e. the entire request - // fits in the range) we should not start a partial piece, since we soon - // will receive enough to call incoming_piece() and pass the read buffer - // directly (in the next loop below). - if (range_overlaps_request - && !range_contains(in_range, front_request, info.piece_length())) - { - // the start of the next block to receive is stored - // in m_piece. We need to append the rest of that - // block from the http receive buffer and then - // (if it completed) call incoming_piece() with - // m_piece as buffer. - - int piece_size = int(m_piece.size()); - int copy_size = (std::min)((std::min)(front_request.length - piece_size - , recv_buffer.left()), int(range_end - range_start - m_received_body)); - if (copy_size > m_chunk_pos && m_chunk_pos > 0) copy_size = m_chunk_pos; - if (copy_size > 0) - { - TORRENT_ASSERT(int(m_piece.size()) == m_received_in_piece); - m_piece.resize(piece_size + copy_size); - std::memcpy(&m_piece[0] + piece_size, recv_buffer.begin, copy_size); - TORRENT_ASSERT(int(m_piece.size()) <= front_request.length); - recv_buffer.begin += copy_size; - m_received_body += copy_size; - m_body_start += copy_size; - if (m_chunk_pos > 0) - { - TORRENT_ASSERT(m_chunk_pos >= copy_size); - m_chunk_pos -= copy_size; - } - TORRENT_ASSERT(m_received_body <= range_end - range_start); - TORRENT_ASSERT(int(m_piece.size()) <= front_request.length); - incoming_piece_fragment(copy_size); - TORRENT_ASSERT(int(m_piece.size()) == m_received_in_piece); - } - - if (maybe_harvest_block()) - recv_buffer = m_recv_buffer.get(); - if (associated_torrent().expired()) return; - } - - // report all received blocks to the bittorrent engine - while (!m_requests.empty() - && range_contains(in_range, m_requests.front(), info.piece_length()) - && m_block_pos >= m_requests.front().length) - { - peer_request r = m_requests.front(); - TORRENT_ASSERT(recv_buffer.left() >= r.length); - - incoming_piece_fragment(r.length); - incoming_piece(r, recv_buffer.begin); - -#ifndef TORRENT_DISABLE_LOGGING - peer_log(peer_log_alert::incoming_message, "POP_REQUEST" - , "piece: %d start: %d len: %d" - , r.piece, r.start, r.length); -#endif - m_requests.pop_front(); - if (associated_torrent().expired()) return; - TORRENT_ASSERT(m_block_pos >= r.length); - m_block_pos -= r.length; - m_received_body += r.length; - TORRENT_ASSERT(m_recv_buffer.get().begin + m_body_start == recv_buffer.begin); - TORRENT_ASSERT(m_received_body <= range_end - range_start); - m_recv_buffer.cut(m_body_start + r.length, t->block_size() + request_size_overhead); - if (m_chunk_pos > 0) - { - TORRENT_ASSERT(m_chunk_pos >= r.length); - m_chunk_pos -= r.length; - } - m_body_start = 0; - recv_buffer = m_recv_buffer.get(); - } - - if (!m_requests.empty()) - { - if (in_range.start + in_range.length < m_requests.front().start + m_requests.front().length - && (m_received_body + recv_buffer.left() >= range_end - range_start)) - { - int piece_size = int(m_piece.size()); - int copy_size = (std::min)((std::min)(m_requests.front().length - piece_size - , recv_buffer.left()), int(range_end - range_start - m_received_body)); - TORRENT_ASSERT(copy_size >= 0); - if (copy_size > 0) - { - TORRENT_ASSERT(int(m_piece.size()) == m_received_in_piece); - m_piece.resize(piece_size + copy_size); - std::memcpy(&m_piece[0] + piece_size, recv_buffer.begin, copy_size); - recv_buffer.begin += copy_size; - m_received_body += copy_size; - m_body_start += copy_size; - incoming_piece_fragment(copy_size); - TORRENT_ASSERT(int(m_piece.size()) == m_received_in_piece); - } - TORRENT_ASSERT(m_received_body == range_end - range_start); - } - } - - TORRENT_ASSERT(m_received_body <= range_end - range_start); - // if we're in chunked encoding mode, we have to wait for the complete - // tail header before we can consider have received the block, otherwise - // we'll get out of sync with the next http response. m_chunk_pos is set - // to -1 when the tail header has been received - if (m_received_body == range_end - range_start - && (!m_parser.chunked_encoding() || m_chunk_pos == -1)) - { - int size_to_cut = recv_buffer.begin - m_recv_buffer.get().begin; - - TORRENT_ASSERT(m_recv_buffer.get().left() < size_to_cut + 1 - || m_recv_buffer.get()[size_to_cut] == 'H'); - - m_recv_buffer.cut(size_to_cut, t->block_size() + request_size_overhead); - if (m_chunk_pos > 0) - { - TORRENT_ASSERT(m_chunk_pos >= size_to_cut); - m_chunk_pos -= size_to_cut; - } - recv_buffer = m_recv_buffer.get(); + // we just completed an HTTP file request. pop it from m_file_requests m_file_requests.pop_front(); m_parser.reset(); m_body_start = 0; @@ -1062,73 +917,171 @@ void web_peer_connection::on_receive(error_code const& error m_chunk_pos = 0; m_partial_chunk_header = 0; - if (!t->need_loaded()) - { - disconnect(errors::torrent_aborted, op_bittorrent); - return; - } - - handle_padfile(recv_buffer); - if (associated_torrent().expired()) return; - continue; + // in between each file request, there may be an implicit + // pad-file request + handle_padfile(); } - - if (bytes_transferred == 0 || payload_transferred == 0) - { -#ifdef TORRENT_DEBUG - TORRENT_ASSERT(statistics().last_payload_downloaded() - + statistics().last_protocol_downloaded() - == dl_target); -#endif - break; - } - TORRENT_ASSERT(payload_transferred > 0); } - TORRENT_ASSERT(bytes_transferred == 0); -#ifdef TORRENT_DEBUG - TORRENT_ASSERT(statistics().last_payload_downloaded() - + statistics().last_protocol_downloaded() == dl_target); + + if (recv_buffer.left() == 0) break; + } +done: + + // now, remove all the bytes we've processed from the receive buffer + m_recv_buffer.cut(recv_buffer.begin - m_recv_buffer.get().begin + , t->block_size() + request_size_overhead); +} + +void web_peer_connection::incoming_payload(char const* buf, int len) +{ + received_bytes(len, 0); + m_received_body += len; + +#ifndef TORRENT_DISABLE_LOGGING + peer_log(peer_log_alert::incoming_message, "INCOMING_PAYLOAD", "%d bytes", len); #endif - } - void web_peer_connection::get_specific_peer_info(peer_info& p) const + // deliver all complete bittorrent requests to the bittorrent engine + while (len > 0) { - web_connection_base::get_specific_peer_info(p); - p.flags |= peer_info::local_connection; - p.connection_type = peer_info::web_seed; - } + TORRENT_ASSERT(!m_requests.empty()); + peer_request const& front_request = m_requests.front(); + int const piece_size = int(m_piece.size()); + int const copy_size = (std::min)(front_request.length - piece_size, len); - void web_peer_connection::handle_padfile(buffer::const_interval& recv_buffer) - { - boost::shared_ptr t = associated_torrent().lock(); - TORRENT_ASSERT(t); - torrent_info const& info = t->torrent_file(); + // m_piece may not hold more than the response to the next BT request + TORRENT_ASSERT(front_request.length > piece_size); - while (!m_file_requests.empty() - && info.orig_files().pad_file_at(m_file_requests.front())) + // copy_size is the number of bytes we need to add to the end of m_piece + // to not exceed the size of the next bittorrent request to be delivered. + // m_piece can only hold the response for a single BT request at a time + m_piece.resize(piece_size + copy_size); + std::memcpy(&m_piece[0] + piece_size, buf, copy_size); + len -= copy_size; + buf += copy_size; + + // keep peer stats up-to-date + incoming_piece_fragment(copy_size); + + TORRENT_ASSERT(front_request.length >= piece_size); + if (int(m_piece.size()) == front_request.length) { - // the next file is a pad file. We didn't actually send - // a request for this since it most likely doesn't exist on - // the web server anyway. Just pretend that we received a - // bunch of zeroes here and pop it again - int file_index = m_file_requests.front(); - m_file_requests.pop_front(); - boost::int64_t file_size = info.orig_files().file_size(file_index); + boost::shared_ptr t = associated_torrent().lock(); + TORRENT_ASSERT(t); - peer_request front_request = m_requests.front(); +#ifndef TORRENT_DISABLE_LOGGING + peer_log(peer_log_alert::incoming_message, "POP_REQUEST" + , "piece: %d start: %d len: %d" + , front_request.piece, front_request.start, front_request.length); +#endif + m_requests.pop_front(); - TORRENT_ASSERT(m_block_pos < front_request.length); - int pad_size = int((std::min)(file_size, boost::int64_t(front_request.length - m_block_pos))); - - // insert zeroes to represent the pad file - m_piece.resize(m_piece.size() + size_t(pad_size), 0); - m_block_pos += pad_size; - incoming_piece_fragment(pad_size); - - if (maybe_harvest_block()) - recv_buffer = m_recv_buffer.get(); - if (associated_torrent().expired()) return; + incoming_piece(front_request, &m_piece[0]); + m_piece.clear(); } } } +void web_peer_connection::incoming_zeroes(int len) +{ +#ifndef TORRENT_DISABLE_LOGGING + peer_log(peer_log_alert::incoming_message, "INCOMING_ZEROES", "%d bytes", len); +#endif + + // deliver all complete bittorrent requests to the bittorrent engine + while (len > 0) + { + TORRENT_ASSERT(!m_requests.empty()); + peer_request const& front_request = m_requests.front(); + int const piece_size = int(m_piece.size()); + int const copy_size = (std::min)(front_request.length - piece_size, len); + + // m_piece may not hold more than the response to the next BT request + TORRENT_ASSERT(front_request.length > piece_size); + + // copy_size is the number of bytes we need to add to the end of m_piece + // to not exceed the size of the next bittorrent request to be delivered. + // m_piece can only hold the response for a single BT request at a time + m_piece.resize(piece_size + copy_size, 0); + len -= copy_size; + + // keep peer stats up-to-date + incoming_piece_fragment(copy_size); + + maybe_harvest_piece(); + } +} + +void web_peer_connection::maybe_harvest_piece() +{ + peer_request const& front_request = m_requests.front(); + TORRENT_ASSERT(front_request.length >= int(m_piece.size())); + if (int(m_piece.size()) != front_request.length) return; + + boost::shared_ptr t = associated_torrent().lock(); + TORRENT_ASSERT(t); + +#ifndef TORRENT_DISABLE_LOGGING + peer_log(peer_log_alert::incoming_message, "POP_REQUEST" + , "piece: %d start: %d len: %d" + , front_request.piece, front_request.start, front_request.length); +#endif + m_requests.pop_front(); + + incoming_piece(front_request, &m_piece[0]); + m_piece.clear(); +} + +void web_peer_connection::get_specific_peer_info(peer_info& p) const +{ + web_connection_base::get_specific_peer_info(p); + p.flags |= peer_info::local_connection; + p.connection_type = peer_info::web_seed; +} + +void web_peer_connection::handle_padfile() +{ + if (m_file_requests.empty()) return; + if (m_requests.empty()) return; + + boost::shared_ptr t = associated_torrent().lock(); + TORRENT_ASSERT(t); + torrent_info const& info = t->torrent_file(); + + while (!m_file_requests.empty() + && info.orig_files().pad_file_at(m_file_requests.front().file_index)) + { + // the next file is a pad file. We didn't actually send + // a request for this since it most likely doesn't exist on + // the web server anyway. Just pretend that we received a + // bunch of zeroes here and pop it again + boost::int64_t file_size = m_file_requests.front().length; + + // in theory the pad file can span multiple bocks, hence the loop + while (file_size > 0) + { + peer_request const front_request = m_requests.front(); + TORRENT_ASSERT(m_piece.size() < front_request.length); + + int pad_size = int((std::min)(file_size + , boost::int64_t(front_request.length - m_piece.size()))); + TORRENT_ASSERT(pad_size > 0); + file_size -= pad_size; + + incoming_zeroes(pad_size); + +#ifndef TORRENT_DISABLE_LOGGING + peer_log(peer_log_alert::info, "HANDLE_PADFILE" + , "file: %d start: %" PRId64 " len: %d" + , m_file_requests.front().file_index + , m_file_requests.front().start + , m_file_requests.front().length); +#endif + } + + m_file_requests.pop_front(); + } +} + +} // libtorrent namespace + diff --git a/test/Jamfile b/test/Jamfile index 3536a7d3b..89ab73a42 100644 --- a/test/Jamfile +++ b/test/Jamfile @@ -60,6 +60,7 @@ lib libtorrent_test swarm_suite.cpp test_utils.cpp settings.cpp + make_torrent.cpp : # requirements # this is used to determine whether diff --git a/test/make_torrent.cpp b/test/make_torrent.cpp new file mode 100644 index 000000000..03264037a --- /dev/null +++ b/test/make_torrent.cpp @@ -0,0 +1,203 @@ +/* + +Copyright (c) 2016, Arvid Norberg +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the distribution. + * Neither the name of the author nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +*/ + +#include +#include + +#include "make_torrent.hpp" +#include "libtorrent/storage.hpp" +#include "libtorrent/hasher.hpp" +#include "libtorrent/entry.hpp" +#include "libtorrent/bencode.hpp" +#include "libtorrent/file_pool.hpp" +#include "libtorrent/storage_defs.hpp" + +using namespace libtorrent; + +boost::shared_ptr make_test_torrent( + torrent_args const& args) +{ + entry e; + + entry::dictionary_type& info = e["info"].dict(); + int total_size = 0; + + if (args.m_priv) + { + info["priv"] = 1; + } + + // torrent offset ranges where the pad files are + // used when generating hashes + std::deque > pad_files; + + int const piece_length = 32768; + info["piece length"] = piece_length; + + if (args.m_files.size() == 1) + { + std::string const& ent = args.m_files[0]; + std::string name = "test_file-1"; + if (ent.find("name=") != std::string::npos) + { + int pos = ent.find("name=") + 5; + name = ent.substr(pos, ent.find(',', pos)); + } + info["name"] = name; + int file_size = atoi(args.m_files[0].c_str()); + info["length"] = file_size; + total_size = file_size; + } + else + { + info["name"] = args.m_name; + + entry::list_type& files = info["files"].list(); + for (int i = 0; i < int(args.m_files.size()); ++i) + { + int file_size = atoi(args.m_files[i].c_str()); + + files.push_back(entry()); + entry::dictionary_type& file_entry = files.back().dict(); + std::string const& ent = args.m_files[i]; + if (ent.find("padfile") != std::string::npos) + { + file_entry["attr"].string() += "p"; + pad_files.push_back(std::make_pair(total_size, total_size + file_size)); + } + if (ent.find("executable") != std::string::npos) + file_entry["attr"].string() += "x"; + + char filename[100]; + snprintf(filename, sizeof(filename), "test_file-%d", i); + + std::string name = filename; + if (ent.find("name=") != std::string::npos) + { + int pos = ent.find("name=") + 5; + name = ent.substr(pos, ent.find(',', pos)); + } + file_entry["path"].list().push_back(name); + file_entry["length"] = file_size; + total_size += file_size; + } + } + + if (!args.m_url_seed.empty()) + { + e["url-list"] = args.m_url_seed; + } + + if (!args.m_http_seed.empty()) + { + e["httpseeds"] = args.m_http_seed; + } + + std::string piece_hashes; + + int num_pieces = (total_size + piece_length - 1) / piece_length; + int torrent_offset = 0; + for (int i = 0; i < num_pieces; ++i) + { + hasher h; + int const piece_size = (i < num_pieces - 1) ? piece_length : total_size - (num_pieces - 1) * piece_length; + + char const data = i; + char const zero = 0; + for (int o = 0; o < piece_size; ++o, ++torrent_offset) + { + while (!pad_files.empty() && torrent_offset >= pad_files.front().second) + pad_files.pop_front(); + + if (!pad_files.empty() && torrent_offset >= pad_files.front().first) + { + h.update(&zero, 1); + } + else + { + h.update(&data, 1); + } + } + piece_hashes += h.final().to_string(); + } + + info["pieces"] = piece_hashes; + + std::vector tmp; + std::back_insert_iterator > out(tmp); + bencode(out, e); + + FILE* f = fopen("test.torrent", "w+"); + fwrite(&tmp[0], 1, tmp.size(), f); + fclose(f); + + return boost::make_shared(&tmp[0], tmp.size()); +} + +void generate_files(libtorrent::torrent_info const& ti, std::string const& path + , bool alternate_data) +{ + file_pool fp; + + storage_params params; + params.files = &ti.files(); + params.path = path; + params.pool = &fp; + + default_storage st(params); + + int const num_pieces = ti.num_pieces(); + + std::vector buffer; + for (int i = 0; i < num_pieces; ++i) + { + int const piece_size = ti.piece_size(i); + buffer.resize(ti.piece_length()); + + boost::uint8_t const data = alternate_data ? 255 - i : i; + for (int o = 0; o < piece_size; ++o) + { + memcpy(&buffer[o], &data, 1); + } + + file::iovec_t b = { &buffer[0], size_t(piece_size) }; + storage_error ec; + int ret = st.writev(&b, 1, i, 0, 0, ec); + if (ret != piece_size || ec) + { + fprintf(stderr, "ERROR writing files: (%d expected %d) %s\n" + , ret, piece_size, ec.ec.message().c_str()); + } + } +} + + diff --git a/test/make_torrent.hpp b/test/make_torrent.hpp new file mode 100644 index 000000000..5d701f737 --- /dev/null +++ b/test/make_torrent.hpp @@ -0,0 +1,62 @@ +/* + +Copyright (c) 2016, Arvid Norberg +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the distribution. + * Neither the name of the author nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +*/ + +#include "libtorrent/torrent_info.hpp" +#include +#include +#include + +enum flags_t +{ + private_torrent = 1 +}; + +struct torrent_args +{ + torrent_args() : m_priv(false) {} + torrent_args& name(char const* n) { m_name = n; return *this; } + torrent_args& file(char const* f) { m_files.push_back(f); return *this; } + torrent_args& url_seed(char const* u) { m_url_seed = u; return *this; } + torrent_args& http_seed(char const* u) { m_http_seed = u; return *this; } + torrent_args& priv() { m_priv = true; return *this; } + + bool m_priv; + std::string m_name; + std::vector m_files; + std::string m_url_seed; + std::string m_http_seed; +}; + +boost::shared_ptr make_test_torrent(torrent_args const& args); + +void generate_files(libtorrent::torrent_info const& ti, std::string const& path, bool random = false); + diff --git a/test/web_seed_suite.cpp b/test/web_seed_suite.cpp index 07f4b6870..cc2c7141b 100644 --- a/test/web_seed_suite.cpp +++ b/test/web_seed_suite.cpp @@ -45,6 +45,7 @@ POSSIBILITY OF SUCH DAMAGE. #include "test.hpp" #include "setup_transfer.hpp" #include "web_seed_suite.hpp" +#include "make_torrent.hpp" #include #include @@ -70,18 +71,6 @@ bool on_alert(alert const* a) return false; } -const int num_pieces = 9; -/* -static sha1_hash file_hash(std::string const& name) -{ - std::vector buf; - error_code ec; - load_file(name, buf, ec); - if (buf.empty()) return sha1_hash(0); - hasher h(&buf[0], buf.size()); - return h.final(); -} -*/ static char const* proxy_name[] = {"", "_socks4", "_socks5", "_socks5_pw", "_http", "_http_pw", "_i2p"}; } // anonymous namespace @@ -143,6 +132,11 @@ void test_transfer(lt::session& ses, boost::shared_ptr torrent_fil add_torrent_params p; p.flags &= ~add_torrent_params::flag_paused; p.flags &= ~add_torrent_params::flag_auto_managed; + + // the reason to set sequential download is to make sure that the order in + // which files are requested from the web server is consistent. Any specific + // scenario that needs testing should be an explicit test case + p.flags |= add_torrent_params::flag_sequential_download; p.ti = torrent_file; p.save_path = save_path; #ifndef TORRENT_NO_DEPRECATE @@ -194,35 +188,41 @@ void test_transfer(lt::session& ses, boost::shared_ptr torrent_fil , int(s.total_payload_download), int(s.total_redundant_bytes)); TEST_EQUAL(s.total_payload_download - s.total_redundant_bytes, total_size - pad_file_size); - // we need to sleep here a bit to let the session sync with the torrent stats - // commented out because it takes such a long time -// TEST_EQUAL(ses.status().total_payload_download - ses.status().total_redundant_bytes -// , total_size - pad_file_size); break; } // if the web seed connection is disconnected, we're going to fail // the test. make sure to do so quickly - if (keepalive && peer_disconnects >= 1) break; + if (!test_ban && keepalive && peer_disconnects >= 1) break; test_sleep(100); } - // for test_ban tests, make sure we removed - // the url seed (i.e. banned it) - TEST_CHECK(!test_ban || (th.url_seeds().empty() && th.http_seeds().empty())); - cnt = get_counters(ses); - // if the web seed senr corrupt data and we banned it, we probably didn't - // end up using all the cache anyway - if (!test_ban) + if (test_ban) { + // for test_ban tests, make sure we removed + // the url seed (i.e. banned it) + // torrents that don't have very many pieces will not ban the web seeds, + // since they won't have an opportunity to accrue enough negative points + if (torrent_file->files().num_pieces() > 3) + { + TEST_CHECK(th.url_seeds().empty()); + TEST_CHECK(th.http_seeds().empty()); + } + } + else + { + // if the web seed senr corrupt data and we banned it, we probably didn't + // end up using all the cache anyway torrent_status st = th.status(); TEST_EQUAL(st.is_seeding, true); if (st.is_seeding) { + // we need to sleep here a bit to let the session sync with the torrent stats + // commented out because it takes such a long time for (int i = 0; i < 50; ++i) { cnt = get_counters(ses); @@ -235,8 +235,8 @@ void test_transfer(lt::session& ses, boost::shared_ptr torrent_fil , int(cnt["disk.disk_blocks_in_use"])); test_sleep(100); } - TEST_EQUAL(cnt["disk.disk_blocks_in_use"] - , (torrent_file->total_size() + 0x3fff) / 0x4000); + TEST_CHECK(std::abs(cnt["disk.disk_blocks_in_use"] + - (torrent_file->total_size() + 0x3fff) / 0x4000) <= 2); } } @@ -263,14 +263,17 @@ void test_transfer(lt::session& ses, boost::shared_ptr torrent_fil if (!test_ban) { - std::string first_file_path = combine_path(save_path, torrent_file->files().file_path(0)); - fprintf(stderr, "checking file: %s\n", first_file_path.c_str()); - TEST_CHECK(exists(first_file_path)); + file_storage const& fs = torrent_file->files(); + for (int i = 0; i < fs.num_files(); ++i) + { + bool const expect = !fs.pad_file_at(i); + std::string file_path = combine_path(save_path, fs.file_path(i)); + fprintf(stderr, "checking file: %s\n", file_path.c_str()); + TEST_EQUAL(exists(file_path), expect); + } } ses.remove_torrent(th); - - remove_all(save_path, ec); } // proxy: 0=none, 1=socks4, 2=socks5, 3=socks5_pw 4=http 5=http_pw @@ -286,139 +289,143 @@ int EXPORT run_http_suite(int proxy, char const* protocol, bool test_url_seed save_path += proxy_name[proxy]; error_code ec; - create_directories(combine_path(save_path, "torrent_dir"), ec); + int const port = start_web_server(strcmp(protocol, "https") == 0, chunked_encoding, keepalive); - file_storage fs; - std::srand(10); - int piece_size = 0x4000; - static const int file_sizes[] = - { 5, 16 - 5, 16000, 17, 10, 8000, 8000, 1,1,1,1,1,100,1,1,1,1,100,1,1,1,1,1,1 - ,1,1,1,1,1,1,13,65000,34,75,2,30,400,500,23000,900,43000,400,4300,6, 4}; + std::vector test_cases; if (test_url_seed) { - create_random_files(combine_path(save_path, "torrent_dir"), file_sizes, sizeof(file_sizes)/sizeof(file_sizes[0])); - add_files(fs, combine_path(save_path, "torrent_dir")); + char url[512]; + snprintf(url, sizeof(url), ("%s://127.0.0.1:%d/" + save_path).c_str(), protocol, port); + fprintf(stderr, "testing: %s\n", url); + + create_directories(combine_path(save_path, "torrent_dir"), ec); + + torrent_args args; + + // test case 1 + test_cases.push_back(torrent_args().file("0").file("5,padfile").file("11") + .file("16000").file("368,padfile") + .file("16384,padfile").file("16384,padfile").file("17").file("10") + .file("8000").file("8000").file("1").file("1").file("1").file("1") + .file("1").file("100").file("0").file("1").file("1").file("1") + .file("100").file("1").file("1").file("1").file("1").file("1,padfile") + .file("1,padfile").file("1,padfile").file("1").file("0").file("0") + .file("0").file("1").file("13").file("65000").file("34").file("75") + .file("2").file("30").file("400").file("500").file("23000") + .file("900").file("43000").file("400").file("4300").file("6") + .file("4,padfile") + .name("torrent_dir") + .url_seed(url)); + + // test case 2 (the end of the torrent are padfiles) + test_cases.push_back(torrent_args() + .file("0,padfile") + .file("11") + .file("5") + .file("16000") + .file("368,padfile") + .file("16384,padfile") + .name("torrent_dir") + .url_seed(url)); + + // test case 3 (misaligned) + test_cases.push_back(torrent_args() + .file("16383") + .file("11") + .file("5") + .file("16000") + .name("torrent_dir") + .url_seed(url)); + + // test case 4 (a full piece padfile) + test_cases.push_back(torrent_args() + .file("32768,padfile") + .file("16000") + .file("11") + .file("5") + .name("torrent_dir") + .url_seed(url)); + + // test case 5 (properly aligned padfile) + test_cases.push_back(torrent_args() + .file("32760") + .file("8,padfile") + .file("32760") + .file("8") + .file("32700") + .file("68,padfile") + .file("32000") + .name("torrent_dir") + .url_seed(url)); + + snprintf(url, sizeof(url), ("%s://127.0.0.1:%d/" + save_path + "/test-single-file").c_str(), protocol, port); + + // test case 6 (single file torrent) + test_cases.push_back(torrent_args() + .file("199092,name=test-single-file") + .name("torrent_dir") + .url_seed(url)); } else { - piece_size = 64 * 1024; - char* random_data = (char*)malloc(64 * 1024 * num_pieces); - std::generate(random_data, random_data + 64 * 1024 * num_pieces, random_byte); - std::string seed_filename = combine_path(save_path, "seed"); - fprintf(stderr, "creating file: %s %s\n" - , current_working_directory().c_str(), seed_filename.c_str()); - save_file(seed_filename.c_str(), random_data, 64 * 1024 * num_pieces); - fs.add_file("seed", 64 * 1024 * num_pieces); - free(random_data); + char url[512]; + snprintf(url, sizeof(url), "%s://127.0.0.1:%d/%s/seed", protocol, port, save_path.c_str()); + fprintf(stderr, "testing: %s\n", url); + + // there's really just one test case for http seeds + test_cases.push_back(torrent_args().file("589824,name=seed") + .http_seed(url)); } - int port = start_web_server(strcmp(protocol, "https") == 0, chunked_encoding, keepalive); + for (int a = 0; a < int(test_cases.size()); ++a) + { + fprintf(stderr, "\n\n ==== test case %d ====\n\n\n", a); - // generate a torrent with pad files to make sure they - // are not requested web seeds - libtorrent::create_torrent t(fs, piece_size, 0x4000, libtorrent::create_torrent::optimize); + boost::shared_ptr torrent_file = make_test_torrent(test_cases[a]); - char tmp[512]; - if (test_url_seed) - { - snprintf(tmp, sizeof(tmp), ("%s://127.0.0.1:%d/" + save_path).c_str(), protocol, port); - t.add_url_seed(tmp); - } - else - { - snprintf(tmp, sizeof(tmp), "%s://127.0.0.1:%d/%s/seed", protocol, port, save_path.c_str()); - t.add_http_seed(tmp); - } - fprintf(stderr, "testing: %s\n", tmp); -/* - for (int i = 0; i < fs.num_files(); ++i) - { - file_entry f = fs.at(i); - fprintf(stderr, " %04x: %d %s\n", int(f.offset), f.pad_file, f.path.c_str()); - } -*/ - // calculate the hash for all pieces - set_piece_hashes(t, save_path, ec); + // if test_ban is true, we create the files with alternate content (that + // doesn't match the hashes in the .torrent file) + generate_files(*torrent_file, save_path, test_ban); - if (ec) - { - fprintf(stderr, "error creating hashes for test torrent: %s\n" - , ec.message().c_str()); - TEST_CHECK(false); - return 0; - } - - if (test_ban) - { - // corrupt the files now, so that the web seed will be banned - if (test_url_seed) + if (ec) { - create_random_files(combine_path(save_path, "torrent_dir"), file_sizes, sizeof(file_sizes)/sizeof(file_sizes[0])); + fprintf(stderr, "error creating hashes for test torrent: %s\n" + , ec.message().c_str()); + TEST_CHECK(false); + return 0; } - else + { - piece_size = 64 * 1024; - char* random_data = (char*)malloc(64 * 1024 * num_pieces); - std::generate(random_data, random_data + 64 * 1024 * num_pieces, random_byte); - save_file(combine_path(save_path, "seed").c_str(), random_data, 64 * 1024 * num_pieces); - free(random_data); - } - } + const int mask = alert::all_categories + & ~(alert::progress_notification + | alert::performance_warning + | alert::stats_notification); - std::vector buf; - bencode(std::back_inserter(buf), t.generate()); - boost::shared_ptr torrent_file(boost::make_shared(&buf[0], buf.size(), boost::ref(ec), 0)); + settings_pack pack; + pack.set_int(settings_pack::max_queued_disk_bytes, 256 * 1024); + pack.set_str(settings_pack::listen_interfaces, "0.0.0.0:51000"); + pack.set_int(settings_pack::max_retry_port_bind, 1000); + pack.set_int(settings_pack::alert_mask, mask); + pack.set_bool(settings_pack::enable_lsd, false); + pack.set_bool(settings_pack::enable_natpmp, false); + pack.set_bool(settings_pack::enable_upnp, false); + pack.set_bool(settings_pack::enable_dht, false); + libtorrent::session ses(pack, 0); - - // TODO: file hashes don't work with the new torrent creator reading async -/* - // no point in testing the hashes since we know the data is corrupt - if (!test_ban) - { - // verify that the file hashes are correct - for (int i = 0; i < torrent_file->num_files(); ++i) - { - sha1_hash h1 = torrent_file->file_at(i).filehash; - sha1_hash h2 = file_hash(combine_path(save_path - , torrent_file->file_at(i).path)); -// fprintf(stderr, "%s: %s == %s\n" -// , torrent_file->file_at(i).path.c_str() -// , to_hex(h1.to_string()).c_str(), to_hex(h2.to_string()).c_str()); - TEST_EQUAL(h1, h2); - } - } -*/ - { - const int mask = alert::all_categories - & ~(alert::progress_notification - | alert::performance_warning - | alert::stats_notification); - - settings_pack pack; - pack.set_int(settings_pack::max_queued_disk_bytes, 256 * 1024); - pack.set_str(settings_pack::listen_interfaces, "0.0.0.0:51000"); - pack.set_int(settings_pack::max_retry_port_bind, 1000); - pack.set_int(settings_pack::alert_mask, mask); - pack.set_bool(settings_pack::enable_lsd, false); - pack.set_bool(settings_pack::enable_natpmp, false); - pack.set_bool(settings_pack::enable_upnp, false); - pack.set_bool(settings_pack::enable_dht, false); - libtorrent::session ses(pack, 0); - - test_transfer(ses, torrent_file, proxy, port, protocol, test_url_seed - , chunked_encoding, test_ban, keepalive, proxy_peers); - - if (test_url_seed && test_rename) - { - torrent_file->rename_file(0, combine_path(save_path, combine_path("torrent_dir", "renamed_test1"))); - test_transfer(ses, torrent_file, 0, port, protocol, test_url_seed + test_transfer(ses, torrent_file, proxy, port, protocol, test_url_seed , chunked_encoding, test_ban, keepalive, proxy_peers); + + if (test_url_seed && test_rename) + { + torrent_file->rename_file(0, combine_path(save_path, combine_path("torrent_dir", "renamed_test1"))); + test_transfer(ses, torrent_file, 0, port, protocol, test_url_seed + , chunked_encoding, test_ban, keepalive, proxy_peers); + } } } stop_web_server(); - remove_all(save_path, ec); return 0; } diff --git a/test/web_server.py b/test/web_server.py index 5f97d1b5b..6f920326a 100755 --- a/test/web_server.py +++ b/test/web_server.py @@ -101,7 +101,7 @@ class http_handler(SimpleHTTPServer.SimpleHTTPRequestHandler): filename = os.path.normpath(s.path[1:s.path.find('seed?') + 4]) print 'filename = %s' % filename f = open(filename, 'rb') - f.seek(piece * 64 * 1024 + int(ranges[0])) + f.seek(piece * 32 * 1024 + int(ranges[0])) data = f.read(int(ranges[1]) - int(ranges[0]) + 1) f.close()