fix http restart-piece bug whose fix apparently failed to merge from RC_1_0 a long time ago. Also improve logging of restart pieces

This commit is contained in:
Arvid Norberg 2015-01-03 13:09:09 +00:00
parent 8589a7b01c
commit 290260054d
3 changed files with 41 additions and 19 deletions

View File

@ -133,11 +133,10 @@ void web_peer_connection::disconnect(error_code const& ec
if (!m_requests.empty() && !m_file_requests.empty() if (!m_requests.empty() && !m_file_requests.empty()
&& !m_piece.empty() && m_web) && !m_piece.empty() && m_web)
{ {
#if 0 #ifdef TORRENT_LOGGING
std::cerr << this << " SAVE-RESTART-DATA: data: " << m_piece.size() peer_log("*** SAVE-RESTART-DATA: [ data: %d req: %d off: %d ]"
<< " req: " << m_requests.front().piece , int(m_piece.size()), int(m_requests.front().piece)
<< " off: " << m_requests.front().start , int(m_requests.front().start));
<< std::endl;
#endif #endif
m_web->restart_request = m_requests.front(); m_web->restart_request = m_requests.front();
if (!m_web->restart_piece.empty()) if (!m_web->restart_piece.empty())
@ -254,6 +253,11 @@ void web_peer_connection::write_request(peer_request const& r)
pr.piece = r.piece + request_offset / piece_size; pr.piece = r.piece + request_offset / piece_size;
m_requests.push_back(pr); m_requests.push_back(pr);
#ifdef TORRENT_LOGGING
peer_log("==> REQUESTING [ piece: %d start: %d len: %d ]"
, pr.piece, pr.start, pr.length);
#endif
if (m_web->restart_request == m_requests.front()) if (m_web->restart_request == m_requests.front())
{ {
m_piece.swap(m_web->restart_piece); m_piece.swap(m_web->restart_piece);
@ -261,11 +265,10 @@ void web_peer_connection::write_request(peer_request const& r)
peer_request& front = m_requests.front(); peer_request& front = m_requests.front();
TORRENT_ASSERT(front.length > int(m_piece.size())); TORRENT_ASSERT(front.length > int(m_piece.size()));
#if 0 #ifdef TORRENT_LOGGING
std::cerr << this << " RESTART-DATA: data: " << m_piece.size() peer_log("*** RESTART-DATA: [ data: %d req: (%d, %d) ]"
<< " req: ( " << front.piece << ", " << front.start , int(m_piece.size()), int(front.piece), int(front.start)
<< ", " << (front.start + front.length - 1) << ")" , int (front.start + front.length - 1));
<< std::endl;
#endif #endif
req.start += m_piece.size(); req.start += m_piece.size();
@ -313,8 +316,8 @@ void web_peer_connection::write_request(peer_request const& r)
return; return;
} }
std::vector<file_slice> files = info.orig_files().map_block(r.piece, r.start std::vector<file_slice> files = info.orig_files().map_block(req.piece, req.start
, r.length); , req.length);
for (std::vector<file_slice>::iterator i = files.begin(); for (std::vector<file_slice>::iterator i = files.begin();
i != files.end(); ++i) i != files.end(); ++i)
@ -415,6 +418,10 @@ bool web_peer_connection::maybe_harvest_block()
buffer::const_interval recv_buffer = m_recv_buffer.get(); buffer::const_interval recv_buffer = m_recv_buffer.get();
incoming_piece(front_request, &m_piece[0]); incoming_piece(front_request, &m_piece[0]);
#ifdef TORRENT_LOGGING
peer_log("<== POP REQUEST [ piece: %d start: %d len: %d ]"
, front_request.piece, front_request.start, front_request.length);
#endif
m_requests.pop_front(); m_requests.pop_front();
if (associated_torrent().expired()) return false; if (associated_torrent().expired()) return false;
TORRENT_ASSERT(m_block_pos >= front_request.length); TORRENT_ASSERT(m_block_pos >= front_request.length);
@ -878,11 +885,21 @@ void web_peer_connection::on_receive(error_code const& error
if (!range_overlaps_request) if (!range_overlaps_request)
{ {
// this means the end of the incoming request ends _before_ the
// first expected byte (fs + m_piece.size())
incoming_piece_fragment((std::min)(payload_transferred incoming_piece_fragment((std::min)(payload_transferred
, front_request.length - m_block_pos)); , front_request.length - m_block_pos));
received_bytes(0, bytes_transferred); received_bytes(0, bytes_transferred);
// this means the end of the incoming request ends _before_ the
// first expected byte (fs + m_piece.size()) #ifdef TORRENT_LOGGING
std::vector<file_slice> sl = info.orig_files().map_block(
front_request.piece, front_request.start, front_request.start
+ front_request.length);
peer_log("INVALID HTTP RESPONSE [ in=(%d, %d-%d) expected=(%d, %d-%d) piece: %d ]"
, file_index, range_start, range_end, sl[0].file_index
, sl[0].offset, sl[0].offset + sl[0].size, front_request.piece);
#endif
disconnect(errors::invalid_range, op_bittorrent, 2); disconnect(errors::invalid_range, op_bittorrent, 2);
return; return;
} }
@ -940,6 +957,10 @@ void web_peer_connection::on_receive(error_code const& error
incoming_piece_fragment(r.length); incoming_piece_fragment(r.length);
incoming_piece(r, recv_buffer.begin); incoming_piece(r, recv_buffer.begin);
#ifdef TORRENT_LOGGING
peer_log("<== POP REQUEST [ piece: %d start: %d len: %d ]"
, r.piece, r.start, r.length);
#endif
m_requests.pop_front(); m_requests.pop_front();
if (associated_torrent().expired()) return; if (associated_torrent().expired()) return;
TORRENT_ASSERT(m_block_pos >= r.length); TORRENT_ASSERT(m_block_pos >= r.length);

View File

@ -390,14 +390,12 @@ int EXPORT run_http_suite(int proxy, char const* protocol, bool test_url_seed
} }
*/ */
{ {
libtorrent::session ses(fingerprint(" ", 0,0,0,0), 0);
settings_pack pack; settings_pack pack;
pack.set_int(settings_pack::max_queued_disk_bytes, 256 * 1024); pack.set_int(settings_pack::max_queued_disk_bytes, 256 * 1024);
pack.set_str(settings_pack::listen_interfaces, "0.0.0.0:51000"); pack.set_str(settings_pack::listen_interfaces, "0.0.0.0:51000");
pack.set_int(settings_pack::max_retry_port_bind, 1000); pack.set_int(settings_pack::max_retry_port_bind, 1000);
pack.set_int(settings_pack::alert_mask, ~(alert::progress_notification | alert::stats_notification)); pack.set_int(settings_pack::alert_mask, ~(alert::progress_notification | alert::stats_notification));
ses.apply_settings(pack); libtorrent::session ses(pack, 0);
test_transfer(ses, torrent_file, proxy, port, protocol, test_url_seed test_transfer(ses, torrent_file, proxy, port, protocol, test_url_seed
, chunked_encoding, test_ban, keepalive); , chunked_encoding, test_ban, keepalive);

View File

@ -28,7 +28,9 @@ class http_handler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def do_GET(s): def do_GET(s):
#print s.requestline print 'INCOMING-REQUEST: ', s.requestline
print s.headers
global chunked_encoding global chunked_encoding
global keepalive global keepalive
@ -99,7 +101,6 @@ class http_handler(SimpleHTTPServer.SimpleHTTPRequestHandler):
end_range = size end_range = size
if 'Range' in s.headers: if 'Range' in s.headers:
s.send_response(206) s.send_response(206)
s.send_header('Content-Range', 'bytes ' + str(start_range) + '-' + str(end_range - 1) + '/' + str(size))
st, e = s.headers['range'][6:].split('-', 1) st, e = s.headers['range'][6:].split('-', 1)
sl = len(st) sl = len(st)
el = len(e) el = len(e)
@ -111,6 +112,8 @@ class http_handler(SimpleHTTPServer.SimpleHTTPRequestHandler):
ei = int(e) ei = int(e)
if ei < size: if ei < size:
start_range = size - ei start_range = size - ei
s.send_header('Content-Range', 'bytes ' + str(start_range) \
+ '-' + str(end_range - 1) + '/' + str(size))
else: else:
s.send_response(200) s.send_response(200)
s.send_header('Accept-Ranges', 'bytes') s.send_header('Accept-Ranges', 'bytes')