forked from premiere/premiere-libtorrent
fixed incorrect sorting of pieces in piece_picker
This commit is contained in:
parent
8c6869b4f2
commit
8d31bf442a
|
@ -186,8 +186,7 @@ project (including this documentation). The current state includes the
|
||||||
following features:</p>
|
following features:</p>
|
||||||
<ul class="simple">
|
<ul class="simple">
|
||||||
<li>multitracker extension support (as <a class="reference" href="http://home.elp.rr.com/tur/multitracker-spec.txt">specified by John Hoffman</a>)</li>
|
<li>multitracker extension support (as <a class="reference" href="http://home.elp.rr.com/tur/multitracker-spec.txt">specified by John Hoffman</a>)</li>
|
||||||
<li>serves multiple torrents on a single port and a single thread</li>
|
<li>serves multiple torrents on a single port and in a single thread</li>
|
||||||
<li>supports http proxies and proxy authentication</li>
|
|
||||||
<li>gzipped tracker-responses</li>
|
<li>gzipped tracker-responses</li>
|
||||||
<li><a class="reference" href="#http-seeding">HTTP seeding</a>, as <a class="reference" href="http://www.getright.com/seedtorrent.html">specified by Michael Burford of GetRight</a>.</li>
|
<li><a class="reference" href="#http-seeding">HTTP seeding</a>, as <a class="reference" href="http://www.getright.com/seedtorrent.html">specified by Michael Burford of GetRight</a>.</li>
|
||||||
<li>piece picking on block-level like in <a class="reference" href="http://azureus.sourceforge.net">Azureus</a> (as opposed to piece-level).
|
<li>piece picking on block-level like in <a class="reference" href="http://azureus.sourceforge.net">Azureus</a> (as opposed to piece-level).
|
||||||
|
@ -195,6 +194,7 @@ This means it can download parts of the same piece from different peers.
|
||||||
It will also prefer to download whole pieces from single peers if the
|
It will also prefer to download whole pieces from single peers if the
|
||||||
download speed is high enough from that particular peer.</li>
|
download speed is high enough from that particular peer.</li>
|
||||||
<li>queues torrents for file check, instead of checking all of them in parallel.</li>
|
<li>queues torrents for file check, instead of checking all of them in parallel.</li>
|
||||||
|
<li>supports http proxies and proxy authentication</li>
|
||||||
<li>uses separate threads for checking files and for main downloader, with a
|
<li>uses separate threads for checking files and for main downloader, with a
|
||||||
fool-proof thread-safe library interface. (i.e. There's no way for the
|
fool-proof thread-safe library interface. (i.e. There's no way for the
|
||||||
user to cause a deadlock). (see <a class="reference" href="#threads">threads</a>)</li>
|
user to cause a deadlock). (see <a class="reference" href="#threads">threads</a>)</li>
|
||||||
|
@ -232,7 +232,7 @@ network APIs on the most popular platforms. I/O completion ports on windows,
|
||||||
epoll on linux and kqueue on MacOS X and BSD.</p>
|
epoll on linux and kqueue on MacOS X and BSD.</p>
|
||||||
<p>libtorrent has been successfully compiled and tested on:</p>
|
<p>libtorrent has been successfully compiled and tested on:</p>
|
||||||
<ul class="simple">
|
<ul class="simple">
|
||||||
<li>Windows 2000 vc7.1</li>
|
<li>Windows 2000 vc7.1, vc8</li>
|
||||||
<li>Linux x86 GCC 3.3, GCC 3.4.2</li>
|
<li>Linux x86 GCC 3.3, GCC 3.4.2</li>
|
||||||
<li>MacOS X (darwin), (Apple's) GCC 3.3, (Apple's) GCC 4.0</li>
|
<li>MacOS X (darwin), (Apple's) GCC 3.3, (Apple's) GCC 4.0</li>
|
||||||
<li>SunOS 5.8 GCC 3.1</li>
|
<li>SunOS 5.8 GCC 3.1</li>
|
||||||
|
@ -1958,6 +1958,8 @@ peers this client is connected to. The fractional part tells the share of
|
||||||
pieces that have more copies than the rarest piece(s). For example: 2.5 would
|
pieces that have more copies than the rarest piece(s). For example: 2.5 would
|
||||||
mean that the rarest pieces have only 2 copies among the peers this torrent is
|
mean that the rarest pieces have only 2 copies among the peers this torrent is
|
||||||
connected to, and that 50% of all the pieces have more than two copies.</p>
|
connected to, and that 50% of all the pieces have more than two copies.</p>
|
||||||
|
<p>If sequenced download is activated (in <a class="reference" href="#session-settings">session_settings</a>), the distributed
|
||||||
|
copies will be saturated at the <tt class="docutils literal"><span class="pre">sequenced_download_threshold</span></tt>.</p>
|
||||||
<p><tt class="docutils literal"><span class="pre">block_size</span></tt> is the size of a block, in bytes. A block is a sub piece, it
|
<p><tt class="docutils literal"><span class="pre">block_size</span></tt> is the size of a block, in bytes. A block is a sub piece, it
|
||||||
is the number of bytes that each piece request asks for and the number of
|
is the number of bytes that each piece request asks for and the number of
|
||||||
bytes that each bit in the <tt class="docutils literal"><span class="pre">partial_piece_info</span></tt>'s bitset represents
|
bytes that each bit in the <tt class="docutils literal"><span class="pre">partial_piece_info</span></tt>'s bitset represents
|
||||||
|
@ -2175,7 +2177,8 @@ actual number of requests depends on the download rate and this number.</p>
|
||||||
random (rarest first) order. It can be used to tweak disk performance in
|
random (rarest first) order. It can be used to tweak disk performance in
|
||||||
settings where the random download property is less necessary. For example, if
|
settings where the random download property is less necessary. For example, if
|
||||||
the threshold is 10, all pieces which 10 or more peers have, will be downloaded
|
the threshold is 10, all pieces which 10 or more peers have, will be downloaded
|
||||||
in index order.</p>
|
in index order. This setting defaults to 100, which means that it is disabled
|
||||||
|
in practice.</p>
|
||||||
<p><tt class="docutils literal"><span class="pre">max_allowed_in_request_queue</span></tt> is the number of outstanding block requests
|
<p><tt class="docutils literal"><span class="pre">max_allowed_in_request_queue</span></tt> is the number of outstanding block requests
|
||||||
a peer is allowed to queue up in the client. If a peer sends more requests
|
a peer is allowed to queue up in the client. If a peer sends more requests
|
||||||
than this (before the first one has been handled) the last request will be
|
than this (before the first one has been handled) the last request will be
|
||||||
|
|
|
@ -28,8 +28,7 @@ project (including this documentation). The current state includes the
|
||||||
following features:
|
following features:
|
||||||
|
|
||||||
* multitracker extension support (as `specified by John Hoffman`__)
|
* multitracker extension support (as `specified by John Hoffman`__)
|
||||||
* serves multiple torrents on a single port and a single thread
|
* serves multiple torrents on a single port and in a single thread
|
||||||
* supports http proxies and proxy authentication
|
|
||||||
* gzipped tracker-responses
|
* gzipped tracker-responses
|
||||||
* `HTTP seeding`_, as `specified by Michael Burford of GetRight`__.
|
* `HTTP seeding`_, as `specified by Michael Burford of GetRight`__.
|
||||||
* piece picking on block-level like in Azureus_ (as opposed to piece-level).
|
* piece picking on block-level like in Azureus_ (as opposed to piece-level).
|
||||||
|
@ -37,6 +36,7 @@ following features:
|
||||||
It will also prefer to download whole pieces from single peers if the
|
It will also prefer to download whole pieces from single peers if the
|
||||||
download speed is high enough from that particular peer.
|
download speed is high enough from that particular peer.
|
||||||
* queues torrents for file check, instead of checking all of them in parallel.
|
* queues torrents for file check, instead of checking all of them in parallel.
|
||||||
|
* supports http proxies and proxy authentication
|
||||||
* uses separate threads for checking files and for main downloader, with a
|
* uses separate threads for checking files and for main downloader, with a
|
||||||
fool-proof thread-safe library interface. (i.e. There's no way for the
|
fool-proof thread-safe library interface. (i.e. There's no way for the
|
||||||
user to cause a deadlock). (see threads_)
|
user to cause a deadlock). (see threads_)
|
||||||
|
@ -85,7 +85,7 @@ epoll on linux and kqueue on MacOS X and BSD.
|
||||||
|
|
||||||
libtorrent has been successfully compiled and tested on:
|
libtorrent has been successfully compiled and tested on:
|
||||||
|
|
||||||
* Windows 2000 vc7.1
|
* Windows 2000 vc7.1, vc8
|
||||||
* Linux x86 GCC 3.3, GCC 3.4.2
|
* Linux x86 GCC 3.3, GCC 3.4.2
|
||||||
* MacOS X (darwin), (Apple's) GCC 3.3, (Apple's) GCC 4.0
|
* MacOS X (darwin), (Apple's) GCC 3.3, (Apple's) GCC 4.0
|
||||||
* SunOS 5.8 GCC 3.1
|
* SunOS 5.8 GCC 3.1
|
||||||
|
@ -1953,6 +1953,9 @@ pieces that have more copies than the rarest piece(s). For example: 2.5 would
|
||||||
mean that the rarest pieces have only 2 copies among the peers this torrent is
|
mean that the rarest pieces have only 2 copies among the peers this torrent is
|
||||||
connected to, and that 50% of all the pieces have more than two copies.
|
connected to, and that 50% of all the pieces have more than two copies.
|
||||||
|
|
||||||
|
If sequenced download is activated (in session_settings_), the distributed
|
||||||
|
copies will be saturated at the ``sequenced_download_threshold``.
|
||||||
|
|
||||||
``block_size`` is the size of a block, in bytes. A block is a sub piece, it
|
``block_size`` is the size of a block, in bytes. A block is a sub piece, it
|
||||||
is the number of bytes that each piece request asks for and the number of
|
is the number of bytes that each piece request asks for and the number of
|
||||||
bytes that each bit in the ``partial_piece_info``'s bitset represents
|
bytes that each bit in the ``partial_piece_info``'s bitset represents
|
||||||
|
@ -2187,7 +2190,8 @@ actual number of requests depends on the download rate and this number.
|
||||||
random (rarest first) order. It can be used to tweak disk performance in
|
random (rarest first) order. It can be used to tweak disk performance in
|
||||||
settings where the random download property is less necessary. For example, if
|
settings where the random download property is less necessary. For example, if
|
||||||
the threshold is 10, all pieces which 10 or more peers have, will be downloaded
|
the threshold is 10, all pieces which 10 or more peers have, will be downloaded
|
||||||
in index order.
|
in index order. This setting defaults to 100, which means that it is disabled
|
||||||
|
in practice.
|
||||||
|
|
||||||
``max_allowed_in_request_queue`` is the number of outstanding block requests
|
``max_allowed_in_request_queue`` is the number of outstanding block requests
|
||||||
a peer is allowed to queue up in the client. If a peer sends more requests
|
a peer is allowed to queue up in the client. If a peer sends more requests
|
||||||
|
|
|
@ -212,7 +212,7 @@ namespace libtorrent
|
||||||
int num_have_filtered() const { return m_num_have_filtered; }
|
int num_have_filtered() const { return m_num_have_filtered; }
|
||||||
#ifndef NDEBUG
|
#ifndef NDEBUG
|
||||||
// used in debug mode
|
// used in debug mode
|
||||||
void integrity_check(const torrent* t = 0) const;
|
void check_invariant(const torrent* t = 0) const;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
// functor that compares indices on downloading_pieces
|
// functor that compares indices on downloading_pieces
|
||||||
|
|
|
@ -50,7 +50,7 @@ namespace libtorrent
|
||||||
, tracker_maximum_response_length(1024*1024)
|
, tracker_maximum_response_length(1024*1024)
|
||||||
, piece_timeout(120)
|
, piece_timeout(120)
|
||||||
, request_queue_time(3.f)
|
, request_queue_time(3.f)
|
||||||
, sequenced_download_threshold(10)
|
, sequenced_download_threshold(100)
|
||||||
, max_allowed_in_request_queue(250)
|
, max_allowed_in_request_queue(250)
|
||||||
, max_out_request_queue(200)
|
, max_out_request_queue(200)
|
||||||
, whole_pieces_threshold(20)
|
, whole_pieces_threshold(20)
|
||||||
|
|
|
@ -35,6 +35,9 @@ POSSIBILITY OF SUCH DAMAGE.
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
#include <numeric>
|
#include <numeric>
|
||||||
|
|
||||||
|
// non-standard header, is_sorted()
|
||||||
|
//#include <algo.h>
|
||||||
|
|
||||||
#include "libtorrent/piece_picker.hpp"
|
#include "libtorrent/piece_picker.hpp"
|
||||||
|
|
||||||
#ifndef NDEBUG
|
#ifndef NDEBUG
|
||||||
|
@ -42,6 +45,9 @@ POSSIBILITY OF SUCH DAMAGE.
|
||||||
#include "libtorrent/torrent.hpp"
|
#include "libtorrent/torrent.hpp"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
//#define TORRENT_PIECE_PICKER_INVARIANT_CHECK INVARIANT_CHECK
|
||||||
|
#define TORRENT_PIECE_PICKER_INVARIANT_CHECK
|
||||||
|
|
||||||
namespace libtorrent
|
namespace libtorrent
|
||||||
{
|
{
|
||||||
|
|
||||||
|
@ -85,10 +91,6 @@ namespace libtorrent
|
||||||
std::vector<int> piece_list;
|
std::vector<int> piece_list;
|
||||||
piece_list.reserve(std::count(pieces.begin(), pieces.end(), false));
|
piece_list.reserve(std::count(pieces.begin(), pieces.end(), false));
|
||||||
|
|
||||||
#ifndef NDEBUG
|
|
||||||
integrity_check();
|
|
||||||
#endif
|
|
||||||
|
|
||||||
for (std::vector<bool>::const_iterator i = pieces.begin();
|
for (std::vector<bool>::const_iterator i = pieces.begin();
|
||||||
i != pieces.end(); ++i)
|
i != pieces.end(); ++i)
|
||||||
{
|
{
|
||||||
|
@ -121,10 +123,6 @@ namespace libtorrent
|
||||||
assert(m_piece_map[index].index != piece_pos::we_have_index);
|
assert(m_piece_map[index].index != piece_pos::we_have_index);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifndef NDEBUG
|
|
||||||
integrity_check();
|
|
||||||
#endif
|
|
||||||
|
|
||||||
// if we have fast resume info
|
// if we have fast resume info
|
||||||
// use it
|
// use it
|
||||||
if (!unfinished.empty())
|
if (!unfinished.empty())
|
||||||
|
@ -140,17 +138,12 @@ namespace libtorrent
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#ifndef NDEBUG
|
|
||||||
integrity_check();
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void piece_picker::set_sequenced_download_threshold(
|
void piece_picker::set_sequenced_download_threshold(
|
||||||
int sequenced_download_threshold)
|
int sequenced_download_threshold)
|
||||||
{
|
{
|
||||||
#ifndef NDEBUG
|
TORRENT_PIECE_PICKER_INVARIANT_CHECK;
|
||||||
integrity_check();
|
|
||||||
#endif
|
|
||||||
|
|
||||||
if (sequenced_download_threshold == m_sequenced_download_threshold)
|
if (sequenced_download_threshold == m_sequenced_download_threshold)
|
||||||
return;
|
return;
|
||||||
|
@ -168,14 +161,11 @@ namespace libtorrent
|
||||||
move(p.downloading, p.filtered, prev_priority, i->index);
|
move(p.downloading, p.filtered, prev_priority, i->index);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#ifndef NDEBUG
|
|
||||||
integrity_check();
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifndef NDEBUG
|
#ifndef NDEBUG
|
||||||
|
|
||||||
void piece_picker::integrity_check(const torrent* t) const
|
void piece_picker::check_invariant(const torrent* t) const
|
||||||
{
|
{
|
||||||
assert(sizeof(piece_pos) == 4);
|
assert(sizeof(piece_pos) == 4);
|
||||||
|
|
||||||
|
@ -265,7 +255,10 @@ namespace libtorrent
|
||||||
const std::vector<std::vector<int> >& c_vec = pick_piece_info_vector(i->downloading, i->filtered);
|
const std::vector<std::vector<int> >& c_vec = pick_piece_info_vector(i->downloading, i->filtered);
|
||||||
assert(i->priority(m_sequenced_download_threshold) < (int)c_vec.size());
|
assert(i->priority(m_sequenced_download_threshold) < (int)c_vec.size());
|
||||||
const std::vector<int>& vec = c_vec[i->priority(m_sequenced_download_threshold)];
|
const std::vector<int>& vec = c_vec[i->priority(m_sequenced_download_threshold)];
|
||||||
assert(i->index < vec.size());
|
if (i->index >= vec.size())
|
||||||
|
{
|
||||||
|
assert(false);
|
||||||
|
}
|
||||||
assert(vec[i->index] == index);
|
assert(vec[i->index] == index);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -339,8 +332,9 @@ namespace libtorrent
|
||||||
{
|
{
|
||||||
// the piece should be inserted ordered, not randomly
|
// the piece should be inserted ordered, not randomly
|
||||||
std::vector<int>& v = dst_vec[priority];
|
std::vector<int>& v = dst_vec[priority];
|
||||||
|
// assert(is_sorted(v.begin(), v.end()/*, std::greater<int>()*/));
|
||||||
std::vector<int>::iterator i = std::lower_bound(v.begin(), v.end()
|
std::vector<int>::iterator i = std::lower_bound(v.begin(), v.end()
|
||||||
, index, std::greater<int>());
|
, index/*, std::greater<int>()*/);
|
||||||
p.index = i - v.begin();
|
p.index = i - v.begin();
|
||||||
v.insert(i, index);
|
v.insert(i, index);
|
||||||
i = v.begin() + p.index + 1;
|
i = v.begin() + p.index + 1;
|
||||||
|
@ -349,6 +343,7 @@ namespace libtorrent
|
||||||
++m_piece_map[*i].index;
|
++m_piece_map[*i].index;
|
||||||
assert(v[m_piece_map[*i].index] == *i);
|
assert(v[m_piece_map[*i].index] == *i);
|
||||||
}
|
}
|
||||||
|
// assert(is_sorted(v.begin(), v.end()/*, std::greater<int>()*/));
|
||||||
}
|
}
|
||||||
else if (dst_vec[priority].size() < 2)
|
else if (dst_vec[priority].size() < 2)
|
||||||
{
|
{
|
||||||
|
@ -394,13 +389,19 @@ namespace libtorrent
|
||||||
piece_pos& p = m_piece_map[index];
|
piece_pos& p = m_piece_map[index];
|
||||||
int new_priority = p.priority(m_sequenced_download_threshold);
|
int new_priority = p.priority(m_sequenced_download_threshold);
|
||||||
|
|
||||||
assert(p.downloading != downloading
|
if (p.downloading == downloading
|
||||||
|| p.filtered != filtered
|
&& p.filtered == filtered
|
||||||
|| (int)new_priority != priority);
|
&& new_priority == priority)
|
||||||
|
{
|
||||||
|
assert(p.ordered(m_sequenced_download_threshold));
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
std::vector<std::vector<int> >& dst_vec(pick_piece_info_vector(
|
std::vector<std::vector<int> >& dst_vec(pick_piece_info_vector(
|
||||||
p.downloading, p.filtered));
|
p.downloading, p.filtered));
|
||||||
|
|
||||||
|
assert(&dst_vec != &src_vec || new_priority != priority);
|
||||||
|
|
||||||
if ((int)dst_vec.size() <= new_priority)
|
if ((int)dst_vec.size() <= new_priority)
|
||||||
{
|
{
|
||||||
dst_vec.resize(new_priority + 1);
|
dst_vec.resize(new_priority + 1);
|
||||||
|
@ -411,9 +412,9 @@ namespace libtorrent
|
||||||
{
|
{
|
||||||
// the piece should be inserted ordered, not randomly
|
// the piece should be inserted ordered, not randomly
|
||||||
std::vector<int>& v = dst_vec[new_priority];
|
std::vector<int>& v = dst_vec[new_priority];
|
||||||
|
// assert(is_sorted(v.begin(), v.end()/*, std::greater<int>()*/));
|
||||||
std::vector<int>::iterator i = std::lower_bound(v.begin(), v.end()
|
std::vector<int>::iterator i = std::lower_bound(v.begin(), v.end()
|
||||||
, index, std::greater<int>());
|
, index/*, std::greater<int>()*/);
|
||||||
p.index = i - v.begin();
|
p.index = i - v.begin();
|
||||||
v.insert(i, index);
|
v.insert(i, index);
|
||||||
i = v.begin() + p.index + 1;
|
i = v.begin() + p.index + 1;
|
||||||
|
@ -422,6 +423,7 @@ namespace libtorrent
|
||||||
++m_piece_map[*i].index;
|
++m_piece_map[*i].index;
|
||||||
assert(v[m_piece_map[*i].index] == *i);
|
assert(v[m_piece_map[*i].index] == *i);
|
||||||
}
|
}
|
||||||
|
// assert(is_sorted(v.begin(), v.end()/*, std::greater<int>()*/));
|
||||||
}
|
}
|
||||||
else if (dst_vec[new_priority].size() < 2)
|
else if (dst_vec[new_priority].size() < 2)
|
||||||
{
|
{
|
||||||
|
@ -448,25 +450,40 @@ namespace libtorrent
|
||||||
assert(p.index < dst_vec[p.priority(m_sequenced_download_threshold)].size());
|
assert(p.index < dst_vec[p.priority(m_sequenced_download_threshold)].size());
|
||||||
assert(dst_vec[p.priority(m_sequenced_download_threshold)][p.index] == index);
|
assert(dst_vec[p.priority(m_sequenced_download_threshold)][p.index] == index);
|
||||||
|
|
||||||
// this will remove elem from the source vector without
|
if (priority >= m_sequenced_download_threshold)
|
||||||
// preserving order, but the order is random anyway
|
|
||||||
int replace_index = src_vec[priority][elem_index] = src_vec[priority].back();
|
|
||||||
if (index != replace_index)
|
|
||||||
{
|
{
|
||||||
// update the entry we moved from the back
|
// remove the element from the source vector and preserve the order
|
||||||
m_piece_map[replace_index].index = elem_index;
|
std::vector<int>& v = src_vec[priority];
|
||||||
|
v.erase(v.begin() + elem_index);
|
||||||
assert((int)src_vec[priority].size() > elem_index);
|
for (std::vector<int>::iterator i = v.begin() + elem_index;
|
||||||
assert((int)m_piece_map[replace_index].priority(m_sequenced_download_threshold) == priority);
|
i != v.end(); ++i)
|
||||||
assert((int)m_piece_map[replace_index].index == elem_index);
|
{
|
||||||
assert(src_vec[priority][elem_index] == replace_index);
|
--m_piece_map[*i].index;
|
||||||
|
assert(v[m_piece_map[*i].index] == *i);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
assert((int)src_vec[priority].size() == elem_index+1);
|
// this will remove elem from the source vector without
|
||||||
}
|
// preserving order, but the order is random anyway
|
||||||
|
int replace_index = src_vec[priority][elem_index] = src_vec[priority].back();
|
||||||
|
if (index != replace_index)
|
||||||
|
{
|
||||||
|
// update the entry we moved from the back
|
||||||
|
m_piece_map[replace_index].index = elem_index;
|
||||||
|
|
||||||
src_vec[priority].pop_back();
|
assert((int)src_vec[priority].size() > elem_index);
|
||||||
|
assert((int)m_piece_map[replace_index].priority(m_sequenced_download_threshold) == priority);
|
||||||
|
assert((int)m_piece_map[replace_index].index == elem_index);
|
||||||
|
assert(src_vec[priority][elem_index] == replace_index);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
assert((int)src_vec[priority].size() == elem_index+1);
|
||||||
|
}
|
||||||
|
|
||||||
|
src_vec[priority].pop_back();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void piece_picker::remove(bool downloading, bool filtered, int priority
|
void piece_picker::remove(bool downloading, bool filtered, int priority
|
||||||
|
@ -497,6 +514,7 @@ namespace libtorrent
|
||||||
if (p.ordered(m_sequenced_download_threshold))
|
if (p.ordered(m_sequenced_download_threshold))
|
||||||
{
|
{
|
||||||
std::vector<int>& v = src_vec[priority];
|
std::vector<int>& v = src_vec[priority];
|
||||||
|
// assert(is_sorted(v.begin(), v.end()/*, std::greater<int>()*/));
|
||||||
std::vector<int>::iterator i = v.begin() + elem_index;
|
std::vector<int>::iterator i = v.begin() + elem_index;
|
||||||
v.erase(i);
|
v.erase(i);
|
||||||
i = v.begin() + elem_index;
|
i = v.begin() + elem_index;
|
||||||
|
@ -505,6 +523,7 @@ namespace libtorrent
|
||||||
--m_piece_map[*i].index;
|
--m_piece_map[*i].index;
|
||||||
assert(v[m_piece_map[*i].index] == *i);
|
assert(v[m_piece_map[*i].index] == *i);
|
||||||
}
|
}
|
||||||
|
// assert(is_sorted(v.begin(), v.end()/*, std::greater<int>()*/));
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
@ -520,6 +539,8 @@ namespace libtorrent
|
||||||
|
|
||||||
void piece_picker::restore_piece(int index)
|
void piece_picker::restore_piece(int index)
|
||||||
{
|
{
|
||||||
|
TORRENT_PIECE_PICKER_INVARIANT_CHECK;
|
||||||
|
|
||||||
assert(index >= 0);
|
assert(index >= 0);
|
||||||
assert(index < (int)m_piece_map.size());
|
assert(index < (int)m_piece_map.size());
|
||||||
|
|
||||||
|
@ -536,14 +557,11 @@ namespace libtorrent
|
||||||
piece_pos& p = m_piece_map[index];
|
piece_pos& p = m_piece_map[index];
|
||||||
if (p.filtered) return;
|
if (p.filtered) return;
|
||||||
move(true, p.filtered, p.priority(m_sequenced_download_threshold), p.index);
|
move(true, p.filtered, p.priority(m_sequenced_download_threshold), p.index);
|
||||||
|
|
||||||
#ifndef NDEBUG
|
|
||||||
// integrity_check();
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void piece_picker::inc_refcount(int i)
|
void piece_picker::inc_refcount(int i)
|
||||||
{
|
{
|
||||||
|
TORRENT_PIECE_PICKER_INVARIANT_CHECK;
|
||||||
assert(i >= 0);
|
assert(i >= 0);
|
||||||
assert(i < (int)m_piece_map.size());
|
assert(i < (int)m_piece_map.size());
|
||||||
|
|
||||||
|
@ -571,9 +589,7 @@ namespace libtorrent
|
||||||
|
|
||||||
void piece_picker::dec_refcount(int i)
|
void piece_picker::dec_refcount(int i)
|
||||||
{
|
{
|
||||||
#ifndef NDEBUG
|
TORRENT_PIECE_PICKER_INVARIANT_CHECK;
|
||||||
// integrity_check();
|
|
||||||
#endif
|
|
||||||
|
|
||||||
assert(i >= 0);
|
assert(i >= 0);
|
||||||
assert(i < (int)m_piece_map.size());
|
assert(i < (int)m_piece_map.size());
|
||||||
|
@ -599,6 +615,7 @@ namespace libtorrent
|
||||||
// be removed from the available piece list.
|
// be removed from the available piece list.
|
||||||
void piece_picker::we_have(int index)
|
void piece_picker::we_have(int index)
|
||||||
{
|
{
|
||||||
|
TORRENT_PIECE_PICKER_INVARIANT_CHECK;
|
||||||
assert(index >= 0);
|
assert(index >= 0);
|
||||||
assert(index < (int)m_piece_map.size());
|
assert(index < (int)m_piece_map.size());
|
||||||
|
|
||||||
|
@ -618,20 +635,14 @@ namespace libtorrent
|
||||||
if (info_index == piece_pos::we_have_index) return;
|
if (info_index == piece_pos::we_have_index) return;
|
||||||
remove(p.downloading, p.filtered, priority, info_index);
|
remove(p.downloading, p.filtered, priority, info_index);
|
||||||
p.index = piece_pos::we_have_index;
|
p.index = piece_pos::we_have_index;
|
||||||
#ifndef NDEBUG
|
|
||||||
integrity_check();
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void piece_picker::mark_as_filtered(int index)
|
void piece_picker::mark_as_filtered(int index)
|
||||||
{
|
{
|
||||||
|
TORRENT_PIECE_PICKER_INVARIANT_CHECK;
|
||||||
assert(index >= 0);
|
assert(index >= 0);
|
||||||
assert(index < (int)m_piece_map.size());
|
assert(index < (int)m_piece_map.size());
|
||||||
|
|
||||||
#ifndef NDEBUG
|
|
||||||
// integrity_check();
|
|
||||||
#endif
|
|
||||||
|
|
||||||
piece_pos& p = m_piece_map[index];
|
piece_pos& p = m_piece_map[index];
|
||||||
if (p.filtered == 1) return;
|
if (p.filtered == 1) return;
|
||||||
|
@ -646,10 +657,6 @@ namespace libtorrent
|
||||||
{
|
{
|
||||||
++m_num_have_filtered;
|
++m_num_have_filtered;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifndef NDEBUG
|
|
||||||
integrity_check();
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// this function can be used for pieces that we don't
|
// this function can be used for pieces that we don't
|
||||||
|
@ -659,6 +666,7 @@ namespace libtorrent
|
||||||
// be inserted in the available piece list again
|
// be inserted in the available piece list again
|
||||||
void piece_picker::mark_as_unfiltered(int index)
|
void piece_picker::mark_as_unfiltered(int index)
|
||||||
{
|
{
|
||||||
|
TORRENT_PIECE_PICKER_INVARIANT_CHECK;
|
||||||
assert(index >= 0);
|
assert(index >= 0);
|
||||||
assert(index < (int)m_piece_map.size());
|
assert(index < (int)m_piece_map.size());
|
||||||
|
|
||||||
|
@ -676,10 +684,6 @@ namespace libtorrent
|
||||||
--m_num_have_filtered;
|
--m_num_have_filtered;
|
||||||
assert(m_num_have_filtered >= 0);
|
assert(m_num_have_filtered >= 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifndef NDEBUG
|
|
||||||
integrity_check();
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bool piece_picker::is_filtered(int index) const
|
bool piece_picker::is_filtered(int index) const
|
||||||
|
@ -706,13 +710,10 @@ namespace libtorrent
|
||||||
, int num_blocks, bool prefer_whole_pieces
|
, int num_blocks, bool prefer_whole_pieces
|
||||||
, tcp::endpoint peer) const
|
, tcp::endpoint peer) const
|
||||||
{
|
{
|
||||||
|
TORRENT_PIECE_PICKER_INVARIANT_CHECK;
|
||||||
assert(num_blocks > 0);
|
assert(num_blocks > 0);
|
||||||
assert(pieces.size() == m_piece_map.size());
|
assert(pieces.size() == m_piece_map.size());
|
||||||
|
|
||||||
#ifndef NDEBUG
|
|
||||||
// integrity_check();
|
|
||||||
#endif
|
|
||||||
|
|
||||||
// free refers to pieces that are free to download, no one else
|
// free refers to pieces that are free to download, no one else
|
||||||
// is downloading them.
|
// is downloading them.
|
||||||
// partial is pieces that are partially being downloaded, and
|
// partial is pieces that are partially being downloaded, and
|
||||||
|
@ -973,9 +974,8 @@ namespace libtorrent
|
||||||
|
|
||||||
void piece_picker::mark_as_downloading(piece_block block, const tcp::endpoint& peer)
|
void piece_picker::mark_as_downloading(piece_block block, const tcp::endpoint& peer)
|
||||||
{
|
{
|
||||||
#ifndef NDEBUG
|
TORRENT_PIECE_PICKER_INVARIANT_CHECK;
|
||||||
// integrity_check();
|
|
||||||
#endif
|
|
||||||
assert(block.piece_index >= 0);
|
assert(block.piece_index >= 0);
|
||||||
assert(block.block_index >= 0);
|
assert(block.block_index >= 0);
|
||||||
assert(block.piece_index < (int)m_piece_map.size());
|
assert(block.piece_index < (int)m_piece_map.size());
|
||||||
|
@ -1002,16 +1002,12 @@ namespace libtorrent
|
||||||
i->info[block.block_index].peer = peer;
|
i->info[block.block_index].peer = peer;
|
||||||
i->requested_blocks[block.block_index] = 1;
|
i->requested_blocks[block.block_index] = 1;
|
||||||
}
|
}
|
||||||
#ifndef NDEBUG
|
|
||||||
// integrity_check();
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void piece_picker::mark_as_finished(piece_block block, const tcp::endpoint& peer)
|
void piece_picker::mark_as_finished(piece_block block, const tcp::endpoint& peer)
|
||||||
{
|
{
|
||||||
#ifndef NDEBUG
|
TORRENT_PIECE_PICKER_INVARIANT_CHECK;
|
||||||
// integrity_check();
|
|
||||||
#endif
|
|
||||||
assert(block.piece_index >= 0);
|
assert(block.piece_index >= 0);
|
||||||
assert(block.block_index >= 0);
|
assert(block.block_index >= 0);
|
||||||
assert(block.piece_index < (int)m_piece_map.size());
|
assert(block.piece_index < (int)m_piece_map.size());
|
||||||
|
@ -1041,9 +1037,6 @@ namespace libtorrent
|
||||||
i->requested_blocks[block.block_index] = 1;
|
i->requested_blocks[block.block_index] = 1;
|
||||||
i->finished_blocks[block.block_index] = 1;
|
i->finished_blocks[block.block_index] = 1;
|
||||||
}
|
}
|
||||||
#ifndef NDEBUG
|
|
||||||
// integrity_check();
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
/*
|
/*
|
||||||
void piece_picker::mark_as_finished(piece_block block, const peer_id& peer)
|
void piece_picker::mark_as_finished(piece_block block, const peer_id& peer)
|
||||||
|
@ -1110,9 +1103,7 @@ namespace libtorrent
|
||||||
|
|
||||||
void piece_picker::abort_download(piece_block block)
|
void piece_picker::abort_download(piece_block block)
|
||||||
{
|
{
|
||||||
#ifndef NDEBUG
|
TORRENT_PIECE_PICKER_INVARIANT_CHECK;
|
||||||
// integrity_check();
|
|
||||||
#endif
|
|
||||||
|
|
||||||
assert(block.piece_index >= 0);
|
assert(block.piece_index >= 0);
|
||||||
assert(block.block_index >= 0);
|
assert(block.block_index >= 0);
|
||||||
|
@ -1151,9 +1142,6 @@ namespace libtorrent
|
||||||
piece_pos& p = m_piece_map[block.piece_index];
|
piece_pos& p = m_piece_map[block.piece_index];
|
||||||
move(true, p.filtered, p.priority(m_sequenced_download_threshold), p.index);
|
move(true, p.filtered, p.priority(m_sequenced_download_threshold), p.index);
|
||||||
}
|
}
|
||||||
#ifndef NDEBUG
|
|
||||||
// integrity_check();
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int piece_picker::unverified_blocks() const
|
int piece_picker::unverified_blocks() const
|
||||||
|
|
|
@ -996,7 +996,20 @@ namespace libtorrent { namespace detail
|
||||||
assert(m_abort);
|
assert(m_abort);
|
||||||
m_abort = true;
|
m_abort = true;
|
||||||
|
|
||||||
|
for (connection_map::iterator i = m_connections.begin();
|
||||||
|
i != m_connections.end(); ++i)
|
||||||
|
{
|
||||||
|
i->second->disconnect();
|
||||||
|
}
|
||||||
|
|
||||||
m_connections.clear();
|
m_connections.clear();
|
||||||
|
|
||||||
|
for (connection_map::iterator i = m_half_open.begin();
|
||||||
|
i != m_half_open.end(); ++i)
|
||||||
|
{
|
||||||
|
i->second->disconnect();
|
||||||
|
}
|
||||||
|
|
||||||
m_half_open.clear();
|
m_half_open.clear();
|
||||||
m_connection_queue.clear();
|
m_connection_queue.clear();
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue