forked from premiere/premiere-libtorrent
merged RC_1_1 into master
This commit is contained in:
commit
90ccd5180f
|
@ -81,6 +81,7 @@
|
|||
* resume data no longer has timestamps of files
|
||||
* require C++11 to build libtorrent
|
||||
|
||||
* fix loading resume data when in seed mode
|
||||
* fix part-file creation race condition
|
||||
* fix issue with initializing settings on session construction
|
||||
* fix issue with receiving interested before metadata
|
||||
|
|
|
@ -84,6 +84,8 @@ pread
|
|||
preadv
|
||||
pwrite
|
||||
pwritev
|
||||
readv
|
||||
writev
|
||||
ftruncate
|
||||
iovec
|
||||
uint8
|
||||
|
|
|
@ -170,10 +170,24 @@ namespace libtorrent {
|
|||
storage_interface(storage_interface const&) = delete;
|
||||
storage_interface& operator=(storage_interface const&) = delete;
|
||||
|
||||
// This function is called when the storage is to be initialized. The
|
||||
// default storage will create directories and empty files at this point.
|
||||
// If ``allocate_files`` is true, it will also ``ftruncate`` all files to
|
||||
// their target size.
|
||||
// This function is called when the *storage* on disk is to be
|
||||
// initialized. The default storage will create directories and empty
|
||||
// files at this point. If ``allocate_files`` is true, it will also
|
||||
// ``ftruncate`` all files to their target size.
|
||||
//
|
||||
// This function may be called multiple time on a single instance. When a
|
||||
// torrent is force-rechecked, the storage is re-initialized to trigger
|
||||
// the re-check from scratch.
|
||||
//
|
||||
// The function is not necessarily called before other member functions.
|
||||
// For instance has_any_files() and verify_resume_data() are
|
||||
// called early to determine whether we may have to check all files or
|
||||
// not. If we're doing a full check of the files every piece will be
|
||||
// hashed, causing readv() to be called as well.
|
||||
//
|
||||
// Any required internals that need initialization should be done in the
|
||||
// constructor. This function is called before the torrent starts to
|
||||
// download.
|
||||
//
|
||||
// If an error occurs, ``storage_error`` should be set to reflect it.
|
||||
virtual void initialize(storage_error& ec) = 0;
|
||||
|
|
148
src/torrent.cpp
148
src/torrent.cpp
|
@ -573,7 +573,7 @@ namespace libtorrent {
|
|||
m_seed_mode = false;
|
||||
// seed is false if we turned out not
|
||||
// to be a seed after all
|
||||
if (!skip_checking)
|
||||
if (!skip_checking && state() != torrent_status::checking_resume_data)
|
||||
{
|
||||
m_have_all = false;
|
||||
set_state(torrent_status::downloading);
|
||||
|
@ -1787,81 +1787,79 @@ namespace libtorrent {
|
|||
if (m_seed_mode)
|
||||
{
|
||||
m_have_all = true;
|
||||
auto self = shared_from_this();
|
||||
m_ses.get_io_service().post([self] { self->wrap(&torrent::files_checked); });
|
||||
TORRENT_ASSERT(m_outstanding_check_files == false);
|
||||
m_add_torrent_params.reset();
|
||||
update_gauge();
|
||||
update_state_list();
|
||||
return;
|
||||
}
|
||||
else
|
||||
{
|
||||
int num_pad_files = 0;
|
||||
TORRENT_ASSERT(block_size() > 0);
|
||||
|
||||
for (file_index_t i(0); i < fs.end_file(); ++i)
|
||||
{
|
||||
if (fs.pad_file_at(i)) ++num_pad_files;
|
||||
|
||||
if (!fs.pad_file_at(i) || fs.file_size(i) == 0) continue;
|
||||
m_padding += std::uint32_t(fs.file_size(i));
|
||||
|
||||
// TODO: instead of creating the picker up front here,
|
||||
// maybe this whole section should move to need_picker()
|
||||
need_picker();
|
||||
|
||||
peer_request pr = m_torrent_file->map_file(i, 0, int(fs.file_size(i)));
|
||||
int off = pr.start & (block_size() - 1);
|
||||
if (off != 0) { pr.length -= block_size() - off; pr.start += block_size() - off; }
|
||||
TORRENT_ASSERT((pr.start & (block_size() - 1)) == 0);
|
||||
|
||||
int block = block_size();
|
||||
int blocks_per_piece = m_torrent_file->piece_length() / block;
|
||||
piece_block pb(pr.piece, pr.start / block);
|
||||
for (; pr.length >= block; pr.length -= block, ++pb.block_index)
|
||||
{
|
||||
if (pb.block_index == blocks_per_piece) { pb.block_index = 0; ++pb.piece_index; }
|
||||
m_picker->mark_as_finished(pb, nullptr);
|
||||
}
|
||||
// ugly edge case where padfiles are not used they way they're
|
||||
// supposed to be. i.e. added back-to back or at the end
|
||||
if (pb.block_index == blocks_per_piece) { pb.block_index = 0; ++pb.piece_index; }
|
||||
if (pr.length > 0 && ((next(i) != fs.end_file() && fs.pad_file_at(next(i)))
|
||||
|| next(i) == fs.end_file()))
|
||||
{
|
||||
m_picker->mark_as_finished(pb, nullptr);
|
||||
}
|
||||
}
|
||||
|
||||
if (m_padding > 0)
|
||||
{
|
||||
// if we marked an entire piece as finished, we actually
|
||||
// need to consider it finished
|
||||
|
||||
std::vector<piece_picker::downloading_piece> dq
|
||||
= m_picker->get_download_queue();
|
||||
|
||||
std::vector<piece_index_t> have_pieces;
|
||||
|
||||
for (auto const& p : dq)
|
||||
{
|
||||
int const num_blocks = m_picker->blocks_in_piece(p.index);
|
||||
if (p.finished < num_blocks) continue;
|
||||
have_pieces.push_back(p.index);
|
||||
}
|
||||
|
||||
for (auto i : have_pieces)
|
||||
{
|
||||
picker().piece_passed(i);
|
||||
TORRENT_ASSERT(picker().have_piece(i));
|
||||
we_have(i);
|
||||
}
|
||||
}
|
||||
|
||||
if (num_pad_files > 0)
|
||||
m_picker->set_num_pad_files(num_pad_files);
|
||||
}
|
||||
|
||||
set_state(torrent_status::checking_resume_data);
|
||||
|
||||
int num_pad_files = 0;
|
||||
TORRENT_ASSERT(block_size() > 0);
|
||||
for (file_index_t i(0); i < fs.end_file(); ++i)
|
||||
{
|
||||
if (fs.pad_file_at(i)) ++num_pad_files;
|
||||
|
||||
if (!fs.pad_file_at(i) || fs.file_size(i) == 0) continue;
|
||||
m_padding += std::uint32_t(fs.file_size(i));
|
||||
|
||||
// TODO: instead of creating the picker up front here,
|
||||
// maybe this whole section should move to need_picker()
|
||||
need_picker();
|
||||
|
||||
peer_request pr = m_torrent_file->map_file(i, 0, int(fs.file_size(i)));
|
||||
int off = pr.start & (block_size() - 1);
|
||||
if (off != 0) { pr.length -= block_size() - off; pr.start += block_size() - off; }
|
||||
TORRENT_ASSERT((pr.start & (block_size() - 1)) == 0);
|
||||
|
||||
int block = block_size();
|
||||
int blocks_per_piece = m_torrent_file->piece_length() / block;
|
||||
piece_block pb(pr.piece, pr.start / block);
|
||||
for (; pr.length >= block; pr.length -= block, ++pb.block_index)
|
||||
{
|
||||
if (pb.block_index == blocks_per_piece) { pb.block_index = 0; ++pb.piece_index; }
|
||||
m_picker->mark_as_finished(pb, nullptr);
|
||||
}
|
||||
// ugly edge case where padfiles are not used they way they're
|
||||
// supposed to be. i.e. added back-to back or at the end
|
||||
if (pb.block_index == blocks_per_piece) { pb.block_index = 0; ++pb.piece_index; }
|
||||
if (pr.length > 0 && ((next(i) != fs.end_file() && fs.pad_file_at(next(i)))
|
||||
|| next(i) == fs.end_file()))
|
||||
{
|
||||
m_picker->mark_as_finished(pb, nullptr);
|
||||
}
|
||||
}
|
||||
|
||||
if (m_padding > 0)
|
||||
{
|
||||
// if we marked an entire piece as finished, we actually
|
||||
// need to consider it finished
|
||||
|
||||
std::vector<piece_picker::downloading_piece> dq
|
||||
= m_picker->get_download_queue();
|
||||
|
||||
std::vector<piece_index_t> have_pieces;
|
||||
|
||||
for (auto const& p : dq)
|
||||
{
|
||||
int const num_blocks = m_picker->blocks_in_piece(p.index);
|
||||
if (p.finished < num_blocks) continue;
|
||||
have_pieces.push_back(p.index);
|
||||
}
|
||||
|
||||
for (auto i : have_pieces)
|
||||
{
|
||||
picker().piece_passed(i);
|
||||
TORRENT_ASSERT(picker().have_piece(i));
|
||||
we_have(i);
|
||||
}
|
||||
}
|
||||
|
||||
if (num_pad_files > 0)
|
||||
m_picker->set_num_pad_files(num_pad_files);
|
||||
|
||||
aux::vector<std::string, file_index_t> links;
|
||||
#ifndef TORRENT_DISABLE_MUTABLE_TORRENTS
|
||||
if (!m_torrent_file->similar_torrents().empty()
|
||||
|
@ -2074,7 +2072,13 @@ namespace libtorrent {
|
|||
// that when the resume data check fails. For instance, if the resume data
|
||||
// is incorrect, but we don't have any files, we skip the check and initialize
|
||||
// the storage to not have anything.
|
||||
if (status == status_t::no_error)
|
||||
if (m_seed_mode)
|
||||
{
|
||||
m_have_all = true;
|
||||
update_gauge();
|
||||
update_state_list();
|
||||
}
|
||||
else if (status == status_t::no_error)
|
||||
{
|
||||
// there are either no files for this torrent
|
||||
// or the resume_data was accepted
|
||||
|
@ -5244,7 +5248,7 @@ namespace libtorrent {
|
|||
k = m_trackers.insert(k, url);
|
||||
k->endpoints.clear();
|
||||
if (k->source == 0) k->source = announce_entry::source_client;
|
||||
if (!m_paused && !m_trackers.empty()) announce_with_tracker();
|
||||
if (m_announcing && !m_trackers.empty()) announce_with_tracker();
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
|
@ -441,10 +441,7 @@ std::shared_ptr<torrent_info> setup_peer(tcp::socket& s, sha1_hash& ih
|
|||
if (th) *th = ret;
|
||||
|
||||
// wait for the torrent to be ready
|
||||
if (!(flags & torrent_flags::seed_mode))
|
||||
{
|
||||
wait_for_downloading(*ses, "ses");
|
||||
}
|
||||
wait_for_downloading(*ses, "ses");
|
||||
|
||||
if (incoming)
|
||||
{
|
||||
|
|
|
@ -166,6 +166,7 @@ void test_transfer(settings_pack const& sett, bool test_deprecated = false)
|
|||
}
|
||||
|
||||
TEST_CHECK(st1.state == torrent_status::seeding
|
||||
|| st1.state == torrent_status::checking_resume_data
|
||||
|| st1.state == torrent_status::checking_files);
|
||||
TEST_CHECK(st2.state == torrent_status::downloading
|
||||
|| st2.state == torrent_status::checking_resume_data);
|
||||
|
|
|
@ -119,12 +119,13 @@ void test_remove_torrent(remove_flags_t const remove_options
|
|||
if (st2.is_finished) break;
|
||||
|
||||
TEST_CHECK(st1.state == torrent_status::seeding
|
||||
|| st1.state == torrent_status::checking_resume_data
|
||||
|| st1.state == torrent_status::checking_files);
|
||||
TEST_CHECK(st2.state == torrent_status::downloading
|
||||
|| st2.state == torrent_status::checking_resume_data);
|
||||
|
||||
// if nothing is being transferred after 2 seconds, we're failing the test
|
||||
if (st1.upload_payload_rate == 0 && i > 20)
|
||||
// if nothing is being transferred after 3 seconds, we're failing the test
|
||||
if (st1.upload_payload_rate == 0 && i > 30)
|
||||
{
|
||||
TEST_ERROR("no transfer");
|
||||
return;
|
||||
|
|
|
@ -215,7 +215,9 @@ void default_tests(torrent_status const& s)
|
|||
TEST_CHECK(s.active_duration < seconds(1339 + 10));
|
||||
|
||||
TEST_CHECK(s.added_time < 1347 + 2);
|
||||
TEST_CHECK(s.added_time >= 1347);
|
||||
TEST_CHECK(s.completed_time < 1348 + 2);
|
||||
TEST_CHECK(s.completed_time >= 1348);
|
||||
}
|
||||
|
||||
void test_piece_priorities(bool test_deprecated = false)
|
||||
|
@ -1056,6 +1058,34 @@ TORRENT_TEST(seed_mode_preserve)
|
|||
test_seed_mode(test_mode_t{});
|
||||
}
|
||||
|
||||
TORRENT_TEST(seed_mode_load_peers)
|
||||
{
|
||||
lt::session ses(settings());
|
||||
std::shared_ptr<torrent_info> ti = generate_torrent();
|
||||
add_torrent_params p;
|
||||
p.ti = ti;
|
||||
p.save_path = ".";
|
||||
p.flags |= torrent_flags::seed_mode;
|
||||
p.peers.push_back(tcp::endpoint(address::from_string("1.2.3.4"), 12345));
|
||||
|
||||
torrent_handle h = ses.add_torrent(p);
|
||||
|
||||
wait_for_alert(ses, torrent_checked_alert::alert_type, "seed_mode_load_peers");
|
||||
|
||||
h.save_resume_data();
|
||||
|
||||
save_resume_data_alert const* a = alert_cast<save_resume_data_alert>(
|
||||
wait_for_alert(ses, save_resume_data_alert::alert_type
|
||||
, "seed_mode_load_peers"));
|
||||
|
||||
TEST_CHECK(a);
|
||||
if (a == nullptr) return;
|
||||
|
||||
auto const& peers = a->params.peers;
|
||||
TEST_EQUAL(peers.size(), 1);
|
||||
TEST_CHECK(peers[0] == tcp::endpoint(address::from_string("1.2.3.4"), 12345));
|
||||
}
|
||||
|
||||
TORRENT_TEST(resume_save_load)
|
||||
{
|
||||
lt::session ses(settings());
|
||||
|
|
|
@ -618,7 +618,7 @@ TORRENT_TEST(tracker_proxy)
|
|||
}
|
||||
|
||||
#ifndef TORRENT_DISABLE_LOGGING
|
||||
void test_stop_tracker_timeout(bool nostop)
|
||||
void test_stop_tracker_timeout(int const timeout)
|
||||
{
|
||||
// trick the min interval so that the stopped anounce is permitted immediately
|
||||
// after the initial announce
|
||||
|
@ -658,8 +658,7 @@ void test_stop_tracker_timeout(bool nostop)
|
|||
p.set_bool(settings_pack::announce_to_all_tiers, true);
|
||||
p.set_int(settings_pack::alert_mask, alert::all_categories);
|
||||
p.set_str(settings_pack::listen_interfaces, "0.0.0.0:6881");
|
||||
if (nostop)
|
||||
p.set_int(settings_pack::stop_tracker_timeout, 0);
|
||||
p.set_int(settings_pack::stop_tracker_timeout, timeout);
|
||||
|
||||
lt::session s(p);
|
||||
|
||||
|
@ -683,28 +682,31 @@ void test_stop_tracker_timeout(bool nostop)
|
|||
announce_entry ae{tracker_url};
|
||||
h.add_tracker(ae);
|
||||
|
||||
while (true)
|
||||
{
|
||||
std::vector<alert*> alerts;
|
||||
s.pop_alerts(&alerts);
|
||||
if (std::any_of(alerts.begin(), alerts.end()
|
||||
, [](alert* a) { return a->type() == tracker_reply_alert::alert_type; }))
|
||||
break;
|
||||
}
|
||||
// make sure it announced a event=started properly
|
||||
wait_for_alert(s, tracker_reply_alert::alert_type, "s");
|
||||
|
||||
s.remove_torrent(h);
|
||||
|
||||
wait_for_alert(s, torrent_removed_alert::alert_type, "s");
|
||||
|
||||
// we remove and stop the torrent immediately after posting the alert, so we
|
||||
// need some leeway here
|
||||
std::this_thread::sleep_for(lt::seconds(2));
|
||||
|
||||
int const count = count_stopped_events(s);
|
||||
TEST_EQUAL(count, nostop ? 0 : 1);
|
||||
TEST_EQUAL(count, (timeout == 0) ? 0 : 1);
|
||||
}
|
||||
|
||||
TORRENT_TEST(stop_tracker_timeout)
|
||||
{
|
||||
std::printf("\n\nexpect to get ONE request with &event=stopped\n\n");
|
||||
test_stop_tracker_timeout(false);
|
||||
test_stop_tracker_timeout(1);
|
||||
}
|
||||
|
||||
TORRENT_TEST(stop_tracker_timeout_zero_timeout)
|
||||
{
|
||||
std::printf("\n\nexpect to NOT get a request with &event=stopped\n\n");
|
||||
test_stop_tracker_timeout(true);
|
||||
test_stop_tracker_timeout(0);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
|
Loading…
Reference in New Issue