forked from premiere/premiere-libtorrent
fix crash when loading a resume file where all files have priority 0 (and the torrent finishes before having downloaded anything) and add unit test for this case
This commit is contained in:
parent
fe3a8139d7
commit
fb34607313
|
@ -6766,11 +6766,11 @@ namespace libtorrent
|
|||
if (!m_override_resume_data || m_file_priority.empty())
|
||||
{
|
||||
bdecode_node file_priority = rd.dict_find_list("file_priority");
|
||||
if (file_priority && file_priority.list_size()
|
||||
<= m_torrent_file->num_files())
|
||||
if (file_priority)
|
||||
{
|
||||
int num_files = file_priority.list_size();
|
||||
m_file_priority.resize(num_files);
|
||||
const int num_files = (std::min)(file_priority.list_size()
|
||||
, m_torrent_file->num_files());
|
||||
m_file_priority.resize(num_files, 4);
|
||||
for (int i = 0; i < num_files; ++i)
|
||||
{
|
||||
m_file_priority[i] = file_priority.list_int_value_at(i, 1);
|
||||
|
@ -8262,12 +8262,13 @@ namespace libtorrent
|
|||
|
||||
update_want_peers();
|
||||
|
||||
TORRENT_ASSERT(m_storage);
|
||||
|
||||
// we need to keep the object alive during this operation
|
||||
inc_refcount("release_files");
|
||||
m_ses.disk_thread().async_release_files(m_storage.get()
|
||||
, boost::bind(&torrent::on_cache_flushed, shared_from_this(), _1));
|
||||
if (m_storage)
|
||||
{
|
||||
// we need to keep the object alive during this operation
|
||||
inc_refcount("release_files");
|
||||
m_ses.disk_thread().async_release_files(m_storage.get()
|
||||
, boost::bind(&torrent::on_cache_flushed, shared_from_this(), _1));
|
||||
}
|
||||
|
||||
// this torrent just completed downloads, which means it will fall
|
||||
// under a different limit with the auto-manager. Make sure we
|
||||
|
@ -11172,6 +11173,8 @@ namespace libtorrent
|
|||
{
|
||||
// picker().mark_as_checking(piece);
|
||||
|
||||
TORRENT_ASSERT(m_storage.get());
|
||||
|
||||
inc_refcount("verify_piece");
|
||||
m_ses.disk_thread().async_hash(m_storage.get(), piece, 0
|
||||
, boost::bind(&torrent::on_piece_verified, shared_from_this(), _1)
|
||||
|
|
|
@ -272,7 +272,45 @@ TORRENT_TEST(file_priorities_seed_mode)
|
|||
TEST_EQUAL(file_priorities[2], 0);
|
||||
}
|
||||
|
||||
void test_seed_mode(bool file_prio, bool pieces_have, bool piece_prio)
|
||||
TORRENT_TEST(zero_file_prio)
|
||||
{
|
||||
fprintf(stderr, "test_file_prio\n");
|
||||
|
||||
session ses;
|
||||
boost::shared_ptr<torrent_info> ti = generate_torrent();
|
||||
add_torrent_params p;
|
||||
p.ti = ti;
|
||||
p.save_path = ".";
|
||||
|
||||
entry rd;
|
||||
|
||||
rd["file-format"] = "libtorrent resume file";
|
||||
rd["file-version"] = 1;
|
||||
rd["info-hash"] = ti->info_hash().to_string();
|
||||
rd["blocks per piece"] = (std::max)(1, ti->piece_length() / 0x4000);
|
||||
|
||||
entry::list_type& file_prio = rd["file_priority"].list();
|
||||
for (int i = 0; i < 100; ++i)
|
||||
{
|
||||
file_prio.push_back(entry(0));
|
||||
}
|
||||
|
||||
std::string pieces(ti->num_pieces(), '\x01');
|
||||
rd["pieces"] = pieces;
|
||||
|
||||
std::string pieces_prio(ti->num_pieces(), '\x01');
|
||||
rd["piece_priority"] = pieces_prio;
|
||||
|
||||
bencode(back_inserter(p.resume_data), rd);
|
||||
|
||||
torrent_handle h = ses.add_torrent(p);
|
||||
|
||||
torrent_status s = h.status();
|
||||
TEST_EQUAL(s.total_wanted, 0);
|
||||
}
|
||||
|
||||
void test_seed_mode(bool file_prio, bool pieces_have, bool piece_prio
|
||||
, bool all_files_zero = false)
|
||||
{
|
||||
fprintf(stderr, "test_seed_mode file_prio: %d pieces_have: %d piece_prio: %d\n"
|
||||
, file_prio, pieces_have, piece_prio);
|
||||
|
@ -295,6 +333,13 @@ void test_seed_mode(bool file_prio, bool pieces_have, bool piece_prio)
|
|||
// this should take it out of seed_mode
|
||||
entry::list_type& file_prio = rd["file_priority"].list();
|
||||
file_prio.push_back(entry(0));
|
||||
if (all_files_zero)
|
||||
{
|
||||
for (int i = 0; i < 100; ++i)
|
||||
{
|
||||
file_prio.push_back(entry(0));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
std::string pieces(ti->num_pieces(), '\x01');
|
||||
|
|
Loading…
Reference in New Issue