made the metadata block requesting algorithm more robust against hash check failures

This commit is contained in:
Arvid Norberg 2011-01-27 06:49:32 +00:00
parent eb47f06ef9
commit a0338f6d24
2 changed files with 142 additions and 69 deletions

View File

@ -1,3 +1,4 @@
* made the metadata block requesting algorithm more robust against hash check failures
* support a separate option to use proxies for peers or not * support a separate option to use proxies for peers or not
* pausing the session now also pauses checking torrents * pausing the session now also pauses checking torrents
* moved alert queue size limit into session_settings * moved alert queue size limit into session_settings
@ -75,6 +76,7 @@
incoming connection incoming connection
* added more detailed instrumentation of the disk I/O thread * added more detailed instrumentation of the disk I/O thread
* fixed minor bug in metadata block requester (for magnet links)
* fixed race condition in iconv string converter * fixed race condition in iconv string converter
* fixed error handling in torrent_info constructor * fixed error handling in torrent_info constructor
* fixed bug in torrent_info::remap_files * fixed bug in torrent_info::remap_files

View File

@ -66,6 +66,8 @@ namespace libtorrent { namespace
return (numerator + denominator - 1) / denominator; return (numerator + denominator - 1) / denominator;
} }
struct ut_metadata_peer_plugin;
struct ut_metadata_plugin : torrent_plugin struct ut_metadata_plugin : torrent_plugin
{ {
ut_metadata_plugin(torrent& t) ut_metadata_plugin(torrent& t)
@ -100,57 +102,8 @@ namespace libtorrent { namespace
+ m_metadata_size); + m_metadata_size);
} }
bool received_metadata(char const* buf, int size, int piece, int total_size) bool received_metadata(boost::weak_ptr<ut_metadata_peer_plugin> const& source
{ , char const* buf, int size, int piece, int total_size);
if (m_torrent.valid_metadata()) return false;
if (!m_metadata)
{
// verify the total_size
if (total_size <= 0 || total_size > 500 * 1024) return false;
m_metadata.reset(new char[total_size]);
m_requested_metadata.resize(div_round_up(total_size, 16 * 1024), 0);
m_metadata_size = total_size;
}
if (piece < 0 || piece >= int(m_requested_metadata.size()))
return false;
if (total_size != m_metadata_size)
{
// they disagree about the size!
return false;
}
if (piece * 16 * 1024 + size > m_metadata_size)
{
// this piece is invalid
return false;
}
std::memcpy(&m_metadata[piece * 16 * 1024], buf, size);
// mark this piece has 'have'
m_requested_metadata[piece] = (std::numeric_limits<int>::max)();
bool have_all = std::count(m_requested_metadata.begin()
, m_requested_metadata.end(), (std::numeric_limits<int>::max)())
== int(m_requested_metadata.size());
if (!have_all) return false;
if (!m_torrent.set_metadata(&m_metadata[0], m_metadata_size))
{
if (!m_torrent.valid_metadata())
std::fill(m_requested_metadata.begin(), m_requested_metadata.end(), 0);
return false;
}
// clear the storage for the bitfield
std::vector<int>().swap(m_requested_metadata);
return true;
}
// returns a piece of the metadata that // returns a piece of the metadata that
// we should request. // we should request.
@ -178,7 +131,7 @@ namespace libtorrent { namespace
if (m_metadata_size > 0 || size <= 0 || size > 500 * 1024) return; if (m_metadata_size > 0 || size <= 0 || size > 500 * 1024) return;
m_metadata_size = size; m_metadata_size = size;
m_metadata.reset(new char[size]); m_metadata.reset(new char[size]);
m_requested_metadata.resize(div_round_up(size, 16 * 1024), 0); m_requested_metadata.resize(div_round_up(size, 16 * 1024));
} }
private: private:
@ -193,19 +146,28 @@ namespace libtorrent { namespace
int m_metadata_progress; int m_metadata_progress;
mutable int m_metadata_size; mutable int m_metadata_size;
struct metadata_piece
{
metadata_piece(): num_requests(0) {}
int num_requests;
boost::weak_ptr<ut_metadata_peer_plugin> source;
bool operator<(metadata_piece const& rhs) const
{ return num_requests < rhs.num_requests; }
};
// this vector keeps track of how many times each meatdata // this vector keeps track of how many times each meatdata
// block has been requested // block has been requested and who we ended up getting it from
// std::numeric_limits<int>::max() means we have the piece // std::numeric_limits<int>::max() means we have the piece
std::vector<int> m_requested_metadata; std::vector<metadata_piece> m_requested_metadata;
}; };
struct ut_metadata_peer_plugin : peer_plugin struct ut_metadata_peer_plugin : peer_plugin, boost::enable_shared_from_this<ut_metadata_peer_plugin>
{ {
ut_metadata_peer_plugin(torrent& t, bt_peer_connection& pc ut_metadata_peer_plugin(torrent& t, bt_peer_connection& pc
, ut_metadata_plugin& tp) , ut_metadata_plugin& tp)
: m_message_index(0) : m_message_index(0)
, m_no_metadata(min_time()) , m_request_limit(min_time())
, m_torrent(t) , m_torrent(t)
, m_pc(pc) , m_pc(pc)
, m_tp(tp) , m_tp(tp)
@ -244,13 +206,13 @@ namespace libtorrent { namespace
void write_metadata_packet(int type, int piece) void write_metadata_packet(int type, int piece)
{ {
TORRENT_ASSERT(type >= 0 && type <= 2); TORRENT_ASSERT(type >= 0 && type <= 2);
TORRENT_ASSERT(piece >= 0);
TORRENT_ASSERT(!m_pc.associated_torrent().expired()); TORRENT_ASSERT(!m_pc.associated_torrent().expired());
#ifdef TORRENT_VERBOSE_LOGGING #ifdef TORRENT_VERBOSE_LOGGING
(*m_pc.m_logger) << time_now_string() << " ==> UT_METADATA [ " (*m_pc.m_logger) << time_now_string() << " ==> UT_METADATA [ "
"type: " << type << " | piece: " << piece << " ]\n"; "type: " << type << " | piece: " << piece << " ]\n";
#endif #endif
// abort if the peer doesn't support the metadata extension // abort if the peer doesn't support the metadata extension
if (m_message_index == 0) return; if (m_message_index == 0) return;
@ -263,6 +225,17 @@ namespace libtorrent { namespace
if (type == 1) if (type == 1)
{ {
if (piece < 0 || piece >= int(m_tp.metadata().left() + 16 * 1024 - 1)/(16*1024))
{
#ifdef TORRENT_VERBOSE_LOGGING
(*m_pc.m_logger) << time_now_string() << " <== UT_METADATA [ invalid piece "
<< piece << " metadata size: " << m_tp.metadata().left() << " ]\n";
#endif
m_pc.disconnect(errors::invalid_metadata_message, 2);
return;
}
TORRENT_ASSERT(m_pc.associated_torrent().lock()->valid_metadata()); TORRENT_ASSERT(m_pc.associated_torrent().lock()->valid_metadata());
e["total_size"] = m_tp.metadata().left(); e["total_size"] = m_tp.metadata().left();
int offset = piece * 16 * 1024; int offset = piece * 16 * 1024;
@ -297,6 +270,10 @@ namespace libtorrent { namespace
if (length > 17 * 1024) if (length > 17 * 1024)
{ {
#ifdef TORRENT_VERBOSE_LOGGING
(*m_pc.m_logger) << time_now_string() << " <== UT_METADATA [ packet too big "
<< length << " ]\n";
#endif
m_pc.disconnect(errors::invalid_metadata_message, 2); m_pc.disconnect(errors::invalid_metadata_message, 2);
return true; return true;
} }
@ -307,12 +284,26 @@ namespace libtorrent { namespace
entry msg = bdecode(body.begin, body.end, len); entry msg = bdecode(body.begin, body.end, len);
if (msg.type() == entry::undefined_t) if (msg.type() == entry::undefined_t)
{ {
#ifdef TORRENT_VERBOSE_LOGGING
(*m_pc.m_logger) << time_now_string() << " <== UT_METADATA [ not a dictionary ]\n";
#endif
m_pc.disconnect(errors::invalid_metadata_message, 2); m_pc.disconnect(errors::invalid_metadata_message, 2);
return true; return true;
} }
int type = msg["msg_type"].integer(); entry const* type_ent = msg.find_key("msg_type");
int piece = msg["piece"].integer(); entry const* piece_ent = msg.find_key("piece");
if (type_ent == 0 || type_ent->type() != entry::int_t
|| piece_ent == 0 || piece_ent->type() != entry::int_t)
{
#ifdef TORRENT_VERBOSE_LOGGING
(*m_pc.m_logger) << time_now_string() << " <== UT_METADATA [ missing or invalid keys ]\n";
#endif
m_pc.disconnect(errors::invalid_metadata_message, 2);
return true;
}
int type = type_ent->integer();
int piece = type_ent->integer();
#ifdef TORRENT_VERBOSE_LOGGING #ifdef TORRENT_VERBOSE_LOGGING
(*m_pc.m_logger) << time_now_string() << " <== UT_METADATA [ " (*m_pc.m_logger) << time_now_string() << " <== UT_METADATA [ "
@ -342,14 +333,14 @@ namespace libtorrent { namespace
m_sent_requests.erase(i); m_sent_requests.erase(i);
entry const* total_size = msg.find_key("total_size"); entry const* total_size = msg.find_key("total_size");
m_tp.received_metadata(body.begin + len, body.left() - len, piece m_tp.received_metadata(shared_from_this(), body.begin + len, body.left() - len, piece
, (total_size && total_size->type() == entry::int_t) ? total_size->integer() : 0); , (total_size && total_size->type() == entry::int_t) ? total_size->integer() : 0);
maybe_send_request(); maybe_send_request();
} }
break; break;
case 2: // have no data case 2: // have no data
{ {
m_no_metadata = time_now(); m_request_limit = (std::min)(time_now() + minutes(1), m_request_limit);
std::vector<int>::iterator i = std::find(m_sent_requests.begin() std::vector<int>::iterator i = std::find(m_sent_requests.begin()
, m_sent_requests.end(), piece); , m_sent_requests.end(), piece);
// unwanted piece? // unwanted piece?
@ -388,7 +379,12 @@ namespace libtorrent { namespace
bool has_metadata() const bool has_metadata() const
{ {
return time_now() - m_no_metadata > minutes(1); return time_now() > m_request_limit;
}
void failed_hash_check(ptime const& now)
{
m_request_limit = now + seconds(20 + (rand() * 50) / RAND_MAX);
} }
private: private:
@ -397,9 +393,11 @@ namespace libtorrent { namespace
// for metadata extension messages. // for metadata extension messages.
int m_message_index; int m_message_index;
// this is set to the current time each time we get a // this is set to the next time we can request pieces
// "I don't have metadata" message. // again. It is updated every time we get a
ptime m_no_metadata; // "I don't have metadata" message, but also when
// we receive metadata that fails the infohash check
ptime m_request_limit;
// request queues // request queues
std::vector<int> m_sent_requests; std::vector<int> m_sent_requests;
@ -422,22 +420,95 @@ namespace libtorrent { namespace
int ut_metadata_plugin::metadata_request() int ut_metadata_plugin::metadata_request()
{ {
std::vector<int>::iterator i = std::min_element( std::vector<metadata_piece>::iterator i = std::min_element(
m_requested_metadata.begin(), m_requested_metadata.end()); m_requested_metadata.begin(), m_requested_metadata.end());
if (m_requested_metadata.empty()) if (m_requested_metadata.empty())
{ {
// if we don't know how many pieces there are // if we don't know how many pieces there are
// just ask for piece 0 // just ask for piece 0
m_requested_metadata.resize(1, 1); m_requested_metadata.resize(1);
return 0; i = m_requested_metadata.begin();
} }
int piece = i - m_requested_metadata.begin(); int piece = i - m_requested_metadata.begin();
m_requested_metadata[piece] = piece; ++m_requested_metadata[piece].num_requests;
return piece; return piece;
} }
inline bool ut_metadata_plugin::received_metadata(
boost::weak_ptr<ut_metadata_peer_plugin> const& source
, char const* buf, int size, int piece, int total_size)
{
if (m_torrent.valid_metadata()) return false;
if (!m_metadata)
{
// verify the total_size
if (total_size <= 0 || total_size > 500 * 1024) return false;
m_metadata.reset(new char[total_size]);
m_requested_metadata.resize(div_round_up(total_size, 16 * 1024));
m_metadata_size = total_size;
}
if (piece < 0 || piece >= int(m_requested_metadata.size()))
return false;
if (total_size != m_metadata_size)
{
// they disagree about the size!
return false;
}
if (piece * 16 * 1024 + size > m_metadata_size)
{
// this piece is invalid
return false;
}
std::memcpy(&m_metadata[piece * 16 * 1024], buf, size);
// mark this piece has 'have'
m_requested_metadata[piece].num_requests = (std::numeric_limits<int>::max)();
m_requested_metadata[piece].source = source;
bool have_all = std::count_if(m_requested_metadata.begin()
, m_requested_metadata.end(), boost::bind(&metadata_piece::num_requests, _1)
== (std::numeric_limits<int>::max)()) == int(m_requested_metadata.size());
if (!have_all) return false;
if (!m_torrent.set_metadata(&m_metadata[0], m_metadata_size))
{
if (!m_torrent.valid_metadata())
{
ptime now = time_now();
// any peer that we downloaded metadata from gets a random time
// penalty, from 5 to 30 seconds or so. During this time we don't
// make any metadata requests from those peers (to mix it up a bit
// of which peers we use)
// if we only have one block, and thus requested it from a single
// peer, we bump up the retry time a lot more to try other peers
bool single_peer = m_requested_metadata.size() == 1;
for (int i = 0; i < m_requested_metadata.size(); ++i)
{
m_requested_metadata[i].num_requests = 0;
boost::shared_ptr<ut_metadata_peer_plugin> peer
= m_requested_metadata[i].source.lock();
if (!peer) continue;
peer->failed_hash_check(single_peer ? now + minutes(5) : now);
}
}
return false;
}
// clear the storage for the bitfield
std::vector<metadata_piece>().swap(m_requested_metadata);
return true;
}
} } } }
namespace libtorrent namespace libtorrent