since the web seed downloader now calls incoming_piece with correct block sizes, incoming piece can be reverted to not allow incorrect block sizes

This commit is contained in:
Arvid Norberg 2006-12-21 11:37:03 +00:00
parent 1b46fb9e1b
commit 755d9f5967
2 changed files with 73 additions and 78 deletions

View File

@ -991,99 +991,94 @@ namespace libtorrent
std::vector<piece_block> finished_blocks; std::vector<piece_block> finished_blocks;
piece_block block_finished(p.piece, p.start / t->block_size()); piece_block block_finished(p.piece, p.start / t->block_size());
bool redundant = true; assert(p.start % t->block_size() == 0);
for (;block_finished.block_index * t->block_size() < p.start + p.length; assert(p.length == t->block_size()
++block_finished.block_index) || p.length == t->torrent_file().total_size() % t->block_size());
std::deque<piece_block>::iterator b
= std::find(
m_download_queue.begin()
, m_download_queue.end()
, block_finished);
std::deque<piece_block>::iterator i;
if (b != m_download_queue.end())
{ {
std::deque<piece_block>::iterator b if (m_assume_fifo)
= std::find(
m_download_queue.begin()
, m_download_queue.end()
, block_finished);
std::deque<piece_block>::iterator i;
if (b != m_download_queue.end())
{ {
if (m_assume_fifo) for (i = m_download_queue.begin();
i != b; ++i)
{ {
for (i = m_download_queue.begin(); #ifdef TORRENT_VERBOSE_LOGGING
i != b; ++i) (*m_logger) << to_simple_string(second_clock::universal_time())
{ << " *** SKIPPED_PIECE [ piece: " << i->piece_index << " | "
#ifdef TORRENT_VERBOSE_LOGGING "b: " << i->block_index << " ] ***\n";
(*m_logger) << to_simple_string(second_clock::universal_time()) #endif
<< " *** SKIPPED_PIECE [ piece: " << i->piece_index << " | " // since this piece was skipped, clear it and allow it to
"b: " << i->block_index << " ] ***\n"; // be requested from other peers
#endif // TODO: send cancel?
// since this piece was skipped, clear it and allow it to picker.abort_download(*i);
// be requested from other peers
// TODO: send cancel?
picker.abort_download(*i);
}
// remove the request that just finished
// from the download queue plus the
// skipped blocks.
m_download_queue.erase(m_download_queue.begin()
, boost::next(b));
} }
else
// remove the request that just finished
// from the download queue plus the
// skipped blocks.
m_download_queue.erase(m_download_queue.begin()
, boost::next(b));
}
else
{
m_download_queue.erase(b);
}
}
else
{
// cancel the block from the
// peer that has taken over it.
boost::optional<tcp::endpoint> peer
= t->picker().get_downloader(block_finished);
if (peer)
{
assert(!t->picker().is_finished(block_finished));
peer_connection* pc = t->connection_for(*peer);
if (pc && pc != this)
{ {
m_download_queue.erase(b); pc->cancel_request(block_finished);
} }
} }
else else
{ {
// cancel the block from the if (t->alerts().should_post(alert::debug))
// peer that has taken over it.
boost::optional<tcp::endpoint> peer
= t->picker().get_downloader(block_finished);
if (peer)
{ {
assert(!t->picker().is_finished(block_finished)); t->alerts().post_alert(
peer_connection* pc = t->connection_for(*peer); peer_error_alert(
if (pc && pc != this) m_remote
{ , m_peer_id
pc->cancel_request(block_finished); , "got a block that was not requested"));
}
}
else
{
if (t->alerts().should_post(alert::debug))
{
t->alerts().post_alert(
peer_error_alert(
m_remote
, m_peer_id
, "got a block that was not requested"));
}
#ifdef TORRENT_VERBOSE_LOGGING
(*m_logger) << " *** The block we just got was not in the "
"request queue ***\n";
#endif
} }
#ifdef TORRENT_VERBOSE_LOGGING
(*m_logger) << " *** The block we just got was not in the "
"request queue ***\n";
#endif
} }
// if the block we got is already finished, then ignore it
if (picker.is_finished(block_finished))
{
t->received_redundant_data(t->block_size());
}
else
{
redundant = false;
}
picker.mark_as_finished(block_finished, m_remote);
t->get_policy().block_finished(*this, block_finished);
send_block_requests();
} }
if (redundant) return; // if the block we got is already finished, then ignore it
if (picker.is_finished(block_finished))
{
t->received_redundant_data(t->block_size());
send_block_requests();
return;
}
fs.write(data, p.piece, p.start, p.length); fs.write(data, p.piece, p.start, p.length);
picker.mark_as_finished(block_finished, m_remote);
t->get_policy().block_finished(*this, block_finished);
send_block_requests();
bool was_seed = t->is_seed(); bool was_seed = t->is_seed();
bool was_finished = picker.num_filtered() + t->num_pieces() bool was_finished = picker.num_filtered() + t->num_pieces()
== t->torrent_file().num_pieces(); == t->torrent_file().num_pieces();

View File

@ -395,7 +395,7 @@ namespace libtorrent
int copy_size = std::min(front_request.length - int(m_piece.size()) int copy_size = std::min(front_request.length - int(m_piece.size())
, http_body.left()); , http_body.left());
std::copy(http_body.begin, http_body.begin + copy_size, std::back_inserter(m_piece)); std::copy(http_body.begin, http_body.begin + copy_size, std::back_inserter(m_piece));
assert(int(m_piece.size() <= front_request.length)); assert(int(m_piece.size()) <= front_request.length);
http_body.begin += copy_size; http_body.begin += copy_size;
int piece_size = int(m_piece.size()); int piece_size = int(m_piece.size());
if (piece_size < front_request.length) if (piece_size < front_request.length)