add write_resume_data() function (#1776)

add write_resume_data() function. Make resume data alert use an add_torrent_params object
This commit is contained in:
Arvid Norberg 2017-03-05 09:31:28 -05:00 committed by GitHub
parent 8cf6e9e37c
commit a34ce0278e
16 changed files with 436 additions and 202 deletions

View File

@ -68,6 +68,7 @@ set(sources
random
receive_buffer
read_resume_data
write_resume_data
request_blocks
resolve_links
resolver

View File

@ -48,7 +48,9 @@
* added support for BEP 32, "IPv6 extension for DHT"
* overhauled listen socket and UDP socket handling, improving multi-home
support and bind-to-device
* added new read_resume_data() function, initializing add_torrent_params
* resume data is now communicated via add_torrent_params objects
* added new read_resume_data()/write_resume_data functions to write bencoded,
backwards compatible resume files
* removed deprecated fields from add_torrent_params
* deprecate "resume_data" field in add_torrent_params
* improved support for bind-to-device

View File

@ -606,6 +606,7 @@ SOURCES =
puff
random
read_resume_data
write_resume_data
receive_buffer
resolve_links
session

View File

@ -610,7 +610,10 @@ void bind_alert()
class_<save_resume_data_alert, bases<torrent_alert>, noncopyable>(
"save_resume_data_alert", no_init)
.def_readonly("params", &save_resume_data_alert::params)
#ifndef TORRENT_NO_DEPRECATE
.def_readonly("resume_data", &save_resume_data_alert::resume_data)
#endif
;
class_<file_completed_alert, bases<torrent_alert>, noncopyable>(

View File

@ -42,6 +42,7 @@ POSSIBILITY OF SUCH DAMAGE.
#include <libtorrent/bencode.hpp>
#include <libtorrent/torrent_status.hpp>
#include <libtorrent/read_resume_data.hpp>
#include <libtorrent/write_resume_data.hpp>
#include <libtorrent/error_code.hpp>
namespace lt = libtorrent;
@ -115,8 +116,8 @@ int main(int argc, char const* argv[])
if (auto rd = lt::alert_cast<lt::save_resume_data_alert>(a)) {
std::ofstream of(".resume_file", std::ios_base::binary);
of.unsetf(std::ios_base::skipws);
lt::bencode(std::ostream_iterator<char>(of)
, *rd->resume_data);
auto buf = write_resume_data_buf(rd->params);
of.write(buf.data(), buf.size());
}
if (auto st = lt::alert_cast<lt::state_update_alert>(a)) {

View File

@ -65,6 +65,7 @@ POSSIBILITY OF SUCH DAMAGE.
#include "libtorrent/time.hpp"
#include "libtorrent/create_torrent.hpp"
#include "libtorrent/read_resume_data.hpp"
#include "libtorrent/write_resume_data.hpp"
#include "torrent_view.hpp"
#include "session_view.hpp"
@ -826,7 +827,7 @@ void print_alert(libtorrent::alert const* a, std::string& str)
std::fprintf(g_log_file, "[%s] %s\n", timestamp(), a->message().c_str());
}
int save_file(std::string const& filename, std::vector<char>& v)
int save_file(std::string const& filename, std::vector<char> const& v)
{
FILE* f = std::fopen(filename.c_str(), "wb");
if (f == nullptr)
@ -1029,21 +1030,16 @@ bool handle_alert(libtorrent::session& ses, libtorrent::alert* a
{
--num_outstanding_resume_data;
torrent_handle h = p->handle;
TORRENT_ASSERT(p->resume_data);
if (p->resume_data)
auto const buf = write_resume_data_buf(p->params);
torrent_status st = h.status(torrent_handle::query_save_path);
save_file(path_append(st.save_path, path_append(".resume", leaf_path(
hash_to_filename[st.info_hash]) + ".resume")), buf);
if (h.is_valid()
&& non_files.find(h) == non_files.end()
&& std::none_of(files.begin(), files.end()
, [&h](handles_t::value_type const& hn) { return hn.second == h; }))
{
std::vector<char> out;
bencode(std::back_inserter(out), *p->resume_data);
torrent_status st = h.status(torrent_handle::query_save_path);
save_file(path_append(st.save_path, path_append(".resume", leaf_path(
hash_to_filename[st.info_hash]) + ".resume")), out);
if (h.is_valid()
&& non_files.find(h) == non_files.end()
&& std::none_of(files.begin(), files.end()
, [&h](handles_t::value_type const& hn) { return hn.second == h; }))
{
ses.remove_torrent(h);
}
ses.remove_torrent(h);
}
}
else if (save_resume_data_failed_alert* p = alert_cast<save_resume_data_failed_alert>(a))

View File

@ -104,6 +104,7 @@ nobase_include_HEADERS = \
puff.hpp \
random.hpp \
read_resume_data.hpp \
write_resume_data.hpp \
receive_buffer.hpp \
resolve_links.hpp \
resolver.hpp \

View File

@ -61,7 +61,7 @@ namespace libtorrent
// * info_hash - when all you have is an info-hash (this is similar to a
// magnet link)
//
// one of those fields need to be set. Another mandatory field is
// one of those fields must be set. Another mandatory field is
// ``save_path``. The add_torrent_params object is passed into one of the
// ``session::add_torrent()`` overloads or ``session::async_add_torrent()``.
//
@ -74,6 +74,11 @@ namespace libtorrent
// used for the torrent as long as it doesn't have metadata. See
// ``torrent_handle::name``.
//
// The ``add_torrent_params`` is also used when requesting resume data for a
// torrent. It can be saved to and restored from a file and added back to a
// new session. For serialization and deserialization of
// ``add_torrent_params`` objects, see read_resume_data() and
// write_resume_data().
struct TORRENT_EXPORT add_torrent_params
{
// The constructor can be used to initialize the storage constructor,

View File

@ -975,7 +975,7 @@ namespace libtorrent
{
// internal
save_resume_data_alert(aux::stack_allocator& alloc
, std::shared_ptr<entry> const& rd
, add_torrent_params params
, torrent_handle const& h);
TORRENT_DEFINE_ALERT_PRIO(save_resume_data_alert, 37)
@ -983,8 +983,15 @@ namespace libtorrent
static const int static_category = alert::storage_notification;
virtual std::string message() const override;
// the ``params`` structure is populated with the fields to be passed to
// add_torrent() or async_add_torrent() to resume the torrent. To
// save the state to disk, you may pass it on to write_resume_data().
add_torrent_params params;
#ifndef TORRENT_NO_DEPRECATE
// points to the resume data.
std::shared_ptr<entry> const resume_data;
std::shared_ptr<entry> resume_data;
#endif
};
// This alert is generated instead of ``save_resume_data_alert`` if there was an error

View File

@ -959,7 +959,7 @@ namespace libtorrent
torrent_handle get_handle();
void write_resume_data(entry& rd) const;
void write_resume_data(add_torrent_params& atp) const;
void seen_complete() { m_last_seen_complete = time(0); }
int time_since_complete() const { return int(time(0) - m_last_seen_complete); }

View File

@ -0,0 +1,52 @@
/*
Copyright (c) 2017, Arvid Norberg
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the distribution.
* Neither the name of the author nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef TORRENT_WRITE_RESUME_DATA_HPP_INCLUDE
#define TORRENT_WRITE_RESUME_DATA_HPP_INCLUDE
#include "libtorrent/error_code.hpp"
#include "libtorrent/export.hpp"
#include "libtorrent/bencode.hpp"
namespace libtorrent
{
struct add_torrent_params;
class entry;
// this function turns the resume data in an ``add_torrent_params`` object
// into a bencoded structure
TORRENT_EXPORT entry write_resume_data(add_torrent_params const& atp);
TORRENT_EXPORT std::vector<char> write_resume_data_buf(add_torrent_params const& atp);
}
#endif

View File

@ -113,6 +113,7 @@ libtorrent_rasterbar_la_SOURCES = \
random.cpp \
receive_buffer.cpp \
read_resume_data.cpp \
write_resume_data.cpp \
request_blocks.cpp \
resolve_links.cpp \
resolver.cpp \

View File

@ -45,6 +45,10 @@ POSSIBILITY OF SUCH DAMAGE.
#include "libtorrent/piece_block.hpp"
#include "libtorrent/hex.hpp" // to_hex
#ifndef TORRENT_NO_DEPRECATE
#include "libtorrent/write_resume_data.hpp"
#endif
#include "libtorrent/aux_/escape_string.hpp" // for convert_from_native
#include "libtorrent/aux_/max_path.hpp" // for TORRENT_MAX_PATH
@ -696,11 +700,15 @@ namespace libtorrent
}
save_resume_data_alert::save_resume_data_alert(aux::stack_allocator& alloc
, std::shared_ptr<entry> const& rd
, add_torrent_params p
, torrent_handle const& h)
: torrent_alert(alloc, h)
, resume_data(rd)
{}
, params(std::move(p))
{
#ifndef TORRENT_NO_DEPRECATE
resume_data = std::make_shared<entry>(write_resume_data(params));
#endif
}
std::string save_resume_data_alert::message() const
{

View File

@ -2100,7 +2100,7 @@ namespace libtorrent
need_picker();
const int num_bits = (std::min)(num_blocks_per_piece, blocks.size());
const int num_bits = std::min(num_blocks_per_piece, blocks.size());
for (int k = 0; k < num_bits; ++k)
{
if (blocks.get_bit(k))
@ -5927,73 +5927,56 @@ namespace libtorrent
return m_torrent_file;
}
void torrent::write_resume_data(entry& ret) const
void torrent::write_resume_data(add_torrent_params& ret) const
{
using namespace libtorrent::detail; // for write_*_endpoint()
ret["file-format"] = "libtorrent resume file";
ret["file-version"] = 1;
ret["libtorrent-version"] = LIBTORRENT_VERSION;
ret["allocation"] = m_storage_mode == storage_mode_allocate
? "allocate" : "sparse";
ret.version = LIBTORRENT_VERSION_NUM;
ret.storage_mode = storage_mode();
ret.total_uploaded = m_total_uploaded;
ret.total_downloaded = m_total_downloaded;
ret["total_uploaded"] = m_total_uploaded;
ret["total_downloaded"] = m_total_downloaded;
// cast to seconds in case that internal values doesn't have ratio<1>
ret.active_time = static_cast<int>(total_seconds(active_time()));
ret.finished_time = static_cast<int>(total_seconds(finished_time()));
ret.seeding_time = static_cast<int>(total_seconds(seeding_time()));
ret.last_seen_complete = m_last_seen_complete;
ret["active_time"] = total_seconds(active_time());
ret["finished_time"] = total_seconds(finished_time());
ret["seeding_time"] = total_seconds(seeding_time());
ret["last_seen_complete"] = m_last_seen_complete;
ret.num_complete = m_complete;
ret.num_incomplete = m_incomplete;
ret.num_downloaded = m_downloaded;
ret["num_complete"] = m_complete;
ret["num_incomplete"] = m_incomplete;
ret["num_downloaded"] = m_downloaded;
ret.flags = 0;
if (m_sequential_download) ret.flags |= add_torrent_params::flag_sequential_download;
if (m_seed_mode ) ret.flags |= add_torrent_params::flag_seed_mode;
if (m_super_seeding ) ret.flags |= add_torrent_params::flag_super_seeding;
if (is_torrent_paused()) ret.flags |= add_torrent_params::flag_paused;
if (m_auto_managed ) ret.flags |= add_torrent_params::flag_auto_managed;
ret["sequential_download"] = m_sequential_download;
ret.added_time = m_added_time;
ret.completed_time = m_completed_time;
ret["seed_mode"] = m_seed_mode;
ret["super_seeding"] = m_super_seeding;
ret["added_time"] = m_added_time;
ret["completed_time"] = m_completed_time;
ret["save_path"] = m_save_path;
ret.save_path = m_save_path;
#ifndef TORRENT_NO_DEPRECATE
// deprecated in 1.2
if (!m_url.empty()) ret["url"] = m_url;
if (!m_uuid.empty()) ret["uuid"] = m_uuid;
ret.url = m_url;
ret.uuid = m_uuid;
#endif
const sha1_hash& info_hash = torrent_file().info_hash();
ret["info-hash"] = info_hash.to_string();
ret.info_hash = torrent_file().info_hash();
if (valid_metadata())
{
if (m_magnet_link || (m_save_resume_flags & torrent_handle::save_info_dict))
{
ret["info"] = bdecode(&torrent_file().metadata()[0]
, &torrent_file().metadata()[0] + torrent_file().metadata_size());
// TODO: re-enable this code once there's a non-inlined encoder function. Or
// perhaps this should not be used until saving resume_data via
// add_torrent_params and a free function, similar to read_resume_data
// boost::shared_array<char> const info = torrent_file().metadata();
// int const size = torrent_file().metadata_size();
// ret["info"].preformatted().assign(&info[0], &info[0] + size);
ret.ti = m_torrent_file;
}
}
// blocks per piece
int const num_blocks_per_piece = torrent_file().piece_length() / block_size();
ret["blocks per piece"] = num_blocks_per_piece;
if (m_torrent_file->is_merkle_torrent())
{
// we need to save the whole merkle hash tree
// in order to resume
std::string& tree_str = ret["merkle tree"].string();
std::vector<sha1_hash> const& tree = m_torrent_file->merkle_tree();
tree_str.resize(tree.size() * 20);
std::memcpy(&tree_str[0], &tree[0], tree.size() * 20);
ret.merkle_tree = m_torrent_file->merkle_tree();
}
// if this torrent is a seed, we won't have a piece picker
@ -6001,74 +5984,46 @@ namespace libtorrent
// in either case; there will be no half-finished pieces.
if (has_picker())
{
std::vector<piece_picker::downloading_piece> q
= m_picker->get_download_queue();
int const num_blocks_per_piece = torrent_file().piece_length() / block_size();
// unfinished pieces
ret["unfinished"] = entry::list_type();
entry::list_type& up = ret["unfinished"].list();
std::vector<piece_picker::downloading_piece> const q
= m_picker->get_download_queue();
// info for each unfinished piece
for (piece_picker::downloading_piece const& dp : q)
{
if (dp.finished == 0) continue;
entry piece_struct(entry::dictionary_t);
// the unfinished piece's index
piece_struct["piece"] = static_cast<int>(dp.index);
std::string bitmask;
int const num_bitmask_bytes
= std::max(num_blocks_per_piece / 8, 1);
bitfield bitmask;
bitmask.resize(num_blocks_per_piece, false);
auto const info = m_picker->blocks_for_piece(dp);
for (int j = 0; j < num_bitmask_bytes; ++j)
for (int i = 0; i < num_blocks_per_piece; ++i)
{
char v = 0;
int bits = (std::min)(num_blocks_per_piece - j * 8, 8);
for (int k = 0; k < bits; ++k)
v |= (info[j * 8 + k].state == piece_picker::block_info::state_finished)
? (1 << k) : 0;
bitmask.append(1, v);
TORRENT_ASSERT(bits == 8 || j == num_bitmask_bytes - 1);
if (info[i].state == piece_picker::block_info::state_finished)
bitmask.set_bit(i);
}
piece_struct["bitmask"] = bitmask;
// push the struct onto the unfinished-piece list
up.push_back(piece_struct);
ret.unfinished_pieces.insert({dp.index, std::move(bitmask)});
}
}
// save trackers
entry::list_type& tr_list = ret["trackers"].list();
tr_list.push_back(entry::list_type());
int tier = 0;
for (announce_entry const& tr : m_trackers)
{
if (tr.tier == tier)
{
tr_list.back().list().push_back(tr.url);
}
else
{
tr_list.push_back(entry::list_t);
tr_list.back().list().push_back(tr.url);
tier = tr.tier;
}
ret.trackers.push_back(tr.url);
ret.tracker_tiers.push_back(tr.tier);
}
// save web seeds
if (!m_web_seeds.empty())
{
entry::list_type& url_list = ret["url-list"].list();
entry::list_type& httpseed_list = ret["httpseeds"].list();
for (web_seed_t const& ws : m_web_seeds)
{
if (ws.removed || ws.ephemeral) continue;
if (ws.type == web_seed_entry::url_seed)
url_list.push_back(ws.url);
ret.url_seeds.push_back(ws.url);
else if (ws.type == web_seed_entry::http_seed)
httpseed_list.push_back(ws.url);
ret.http_seeds.push_back(ws.url);
}
}
@ -6092,76 +6047,48 @@ namespace libtorrent
if (max_piece > piece_index_t(0))
{
entry::string_type& pieces = ret["pieces"].string();
pieces.resize(aux::numeric_cast<std::size_t>(static_cast<int>(max_piece)));
if (is_seed())
{
std::memset(&pieces[0], m_have_all, pieces.size());
ret.have_pieces.resize(static_cast<int>(max_piece), m_have_all);
}
else if (has_picker())
{
ret.have_pieces.resize(static_cast<int>(max_piece), false);
for (piece_index_t i(0); i < max_piece; ++i)
pieces[std::size_t(static_cast<int>(i))] = m_picker->have_piece(i) ? 1 : 0;
{
if (m_picker->have_piece(i)) ret.have_pieces.set_bit(i);
}
}
if (m_seed_mode)
{
TORRENT_ASSERT(m_verified.size() == int(pieces.size()));
TORRENT_ASSERT(m_verifying.size() == int(pieces.size()));
for (piece_index_t i(0); i < max_piece; ++i)
pieces[std::size_t(static_cast<int>(i))] |= m_verified[i] ? 2 : 0;
}
ret.verified_pieces = m_verified;
}
// write renamed files
if (&m_torrent_file->files() != &m_torrent_file->orig_files()
&& m_torrent_file->files().num_files() == m_torrent_file->orig_files().num_files())
{
entry::list_type& fl = ret["mapped_files"].list();
file_storage const& fs = m_torrent_file->files();
file_storage const& orig_fs = m_torrent_file->orig_files();
for (file_index_t i(0); i < fs.end_file(); ++i)
{
fl.push_back(fs.file_path(i));
if (fs.file_path(i) != orig_fs.file_path(i))
ret.renamed_files[i] = fs.file_path(i);
}
}
// write local peers
std::back_insert_iterator<entry::string_type> peers(ret["peers"].string());
std::back_insert_iterator<entry::string_type> banned_peers(ret["banned_peers"].string());
#if TORRENT_USE_IPV6
std::back_insert_iterator<entry::string_type> peers6(ret["peers6"].string());
std::back_insert_iterator<entry::string_type> banned_peers6(ret["banned_peers6"].string());
#endif
int num_saved_peers = 0;
std::vector<torrent_peer const*> deferred_peers;
if (m_peer_list)
{
for (auto p : *m_peer_list)
{
error_code ec;
address addr = p->address();
#if TORRENT_USE_I2P
if (p->is_i2p_addr)
continue;
if (p->is_i2p_addr) continue;
#endif
if (p->banned)
{
#if TORRENT_USE_IPV6
if (addr.is_v6())
{
write_address(addr, banned_peers6);
write_uint16(p->port, banned_peers6);
}
else
#endif
{
write_address(addr, banned_peers);
write_uint16(p->port, banned_peers);
}
ret.banned_peers.push_back(p->ip());
continue;
}
@ -6185,73 +6112,44 @@ namespace libtorrent
// we haven't connected to this peer. It might still
// be useful to save it, but only save it if we
// don't have enough peers that we actually did connect to
deferred_peers.push_back(p);
if (int(deferred_peers.size()) < 100)
deferred_peers.push_back(p);
continue;
}
#if TORRENT_USE_IPV6
if (addr.is_v6())
{
write_address(addr, peers6);
write_uint16(p->port, peers6);
}
else
#endif
{
write_address(addr, peers);
write_uint16(p->port, peers);
}
++num_saved_peers;
ret.peers.push_back(p->ip());
}
}
// if we didn't save 100 peers, fill in with second choice peers
if (num_saved_peers < 100)
if (int(ret.peers.size()) < 100)
{
aux::random_shuffle(deferred_peers.begin(), deferred_peers.end());
for (std::vector<torrent_peer const*>::const_iterator i = deferred_peers.begin()
, end(deferred_peers.end()); i != end && num_saved_peers < 100; ++i)
for (auto const p : deferred_peers)
{
torrent_peer const* p = *i;
address addr = p->address();
#if TORRENT_USE_IPV6
if (addr.is_v6())
{
write_address(addr, peers6);
write_uint16(p->port, peers6);
}
else
#endif
{
write_address(addr, peers);
write_uint16(p->port, peers);
}
++num_saved_peers;
ret.peers.push_back(p->ip());
if (int(ret.peers.size()) >= 100) break;
}
}
ret["upload_rate_limit"] = upload_limit();
ret["download_rate_limit"] = download_limit();
ret["max_connections"] = max_connections();
ret["max_uploads"] = max_uploads();
ret["paused"] = is_torrent_paused();
ret["auto_managed"] = m_auto_managed;
ret.upload_limit = upload_limit();
ret.download_limit = download_limit();
ret.max_connections = max_connections();
ret.max_uploads = max_uploads();
// piece priorities and file priorities are mutually exclusive. If there
// are file priorities set, don't save piece priorities.
if (!m_file_priority.empty())
{
// when in seed mode (i.e. the client promises that we have all files)
// it does not make sense to save file priorities.
if (!m_seed_mode)
{
// write file priorities
entry::list_type& file_priority = ret["file_priority"].list();
file_priority.clear();
ret.file_priorities.clear();
ret.file_priorities.reserve(m_file_priority.size());
for (auto const prio : m_file_priority)
file_priority.push_back(prio);
ret.file_priorities.push_back(prio);
}
}
else if (has_picker())
@ -6269,11 +6167,11 @@ namespace libtorrent
if (!default_prio)
{
entry::string_type& piece_priority = ret["piece_priority"].string();
piece_priority.resize(aux::numeric_cast<std::size_t>(m_torrent_file->num_pieces()));
ret.piece_priorities.clear();
ret.piece_priorities.reserve(static_cast<std::size_t>(m_torrent_file->num_pieces()));
for (piece_index_t i(0); i < fs.end_piece(); ++i)
piece_priority[std::size_t(static_cast<int>(i))] = entry::string_type::value_type(m_picker->piece_priority(i));
ret.piece_priorities.push_back(static_cast<std::uint8_t>(m_picker->piece_priority(i)));
}
}
}
@ -8347,9 +8245,9 @@ namespace libtorrent
state_updated();
auto rd = std::make_shared<entry>();
write_resume_data(*rd);
alerts().emplace_alert<save_resume_data_alert>(rd, get_handle());
add_torrent_params atp;
write_resume_data(atp);
alerts().emplace_alert<save_resume_data_alert>(std::move(atp), get_handle());
}
bool torrent::should_check_files() const

View File

@ -47,6 +47,7 @@ POSSIBILITY OF SUCH DAMAGE.
#include "libtorrent/invariant_check.hpp"
#include "libtorrent/utf8.hpp"
#include "libtorrent/announce_entry.hpp"
#include "libtorrent/write_resume_data.hpp"
#if TORRENT_COMPLETE_TYPES_REQUIRED
#include "libtorrent/peer_info.hpp" // for peer_list_entry
@ -625,10 +626,10 @@ namespace libtorrent
entry torrent_handle::write_resume_data() const
{
entry ret(entry::dictionary_t);
auto retr = std::ref(ret);
add_torrent_params params;
auto retr = std::ref(params);
sync_call(&torrent::write_resume_data, retr);
return ret;
return libtorrent::write_resume_data(params);
}
std::string torrent_handle::save_path() const

257
src/write_resume_data.cpp Normal file
View File

@ -0,0 +1,257 @@
/*
Copyright (c) 2017, Arvid Norberg
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the distribution.
* Neither the name of the author nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
#include <cstdint>
#include "libtorrent/bdecode.hpp"
#include "libtorrent/write_resume_data.hpp"
#include "libtorrent/add_torrent_params.hpp"
#include "libtorrent/socket_io.hpp" // for write_*_endpoint()
#include "libtorrent/hasher.hpp"
#include "libtorrent/torrent_info.hpp"
#include "libtorrent/aux_/numeric_cast.hpp"
#include "libtorrent/torrent.hpp" // for default_piece_priority
#include "libtorrent/aux_/numeric_cast.hpp" // for clamp
namespace libtorrent
{
entry write_resume_data(add_torrent_params const& atp)
{
entry ret;
using namespace libtorrent::detail; // for write_*_endpoint()
ret["file-format"] = "libtorrent resume file";
ret["file-version"] = 1;
ret["libtorrent-version"] = LIBTORRENT_VERSION;
ret["allocation"] = atp.storage_mode == storage_mode_allocate
? "allocate" : "sparse";
ret["total_uploaded"] = atp.total_uploaded;
ret["total_downloaded"] = atp.total_downloaded;
// cast to seconds in case that internal values doesn't have ratio<1>
ret["active_time"] = atp.active_time;
ret["finished_time"] = atp.finished_time;
ret["seeding_time"] = atp.seeding_time;
ret["last_seen_complete"] = atp.last_seen_complete;
ret["num_complete"] = atp.num_complete;
ret["num_incomplete"] = atp.num_incomplete;
ret["num_downloaded"] = atp.num_downloaded;
ret["sequential_download"] = atp.flags & add_torrent_params::flag_sequential_download;
ret["seed_mode"] = atp.flags & add_torrent_params::flag_seed_mode;
ret["super_seeding"] = atp.flags & add_torrent_params::flag_super_seeding;
ret["added_time"] = atp.added_time;
ret["completed_time"] = atp.completed_time;
ret["save_path"] = atp.save_path;
#ifndef TORRENT_NO_DEPRECATE
// deprecated in 1.2
if (!atp.url.empty()) ret["url"] = atp.url;
if (!atp.uuid.empty()) ret["uuid"] = atp.uuid;
#endif
ret["info-hash"] = atp.info_hash;
if (atp.ti)
{
boost::shared_array<char> const info = atp.ti->metadata();
int const size = atp.ti->metadata_size();
ret["info"].preformatted().assign(&info[0], &info[0] + size);
}
if (!atp.merkle_tree.empty())
{
// we need to save the whole merkle hash tree
// in order to resume
std::string& tree_str = ret["merkle tree"].string();
auto const& tree = atp.merkle_tree;
tree_str.resize(tree.size() * 20);
std::memcpy(&tree_str[0], &tree[0], tree.size() * 20);
}
if (!atp.unfinished_pieces.empty())
{
entry::list_type& up = ret["unfinished"].list();
// info for each unfinished piece
for (auto const& p : atp.unfinished_pieces)
{
entry piece_struct(entry::dictionary_t);
// the unfinished piece's index
piece_struct["piece"] = static_cast<int>(p.first);
std::string& bitmask = piece_struct["bitmask"].string();
for (auto bit : p.second)
bitmask.push_back(bit ? '1' : '0');
// push the struct onto the unfinished-piece list
up.push_back(std::move(piece_struct));
}
}
// save trackers
if (!atp.trackers.empty())
{
entry::list_type& tr_list = ret["trackers"].list();
tr_list.push_back(entry::list_type());
std::size_t tier = 0;
auto tier_it = atp.tracker_tiers.begin();
for (std::string const& tr : atp.trackers)
{
if (tier_it != atp.tracker_tiers.end())
tier = aux::clamp(std::size_t(*tier_it++), std::size_t{0}, std::size_t{1024});
if (tr_list.size() <= tier)
tr_list.resize(tier + 1);
tr_list[tier].list().push_back(tr);
}
}
// save web seeds
if (!atp.url_seeds.empty())
{
entry::list_type& url_list = ret["url-list"].list();
std::copy(atp.url_seeds.begin(), atp.url_seeds.end(), std::back_inserter(url_list));
}
if (!atp.http_seeds.empty())
{
entry::list_type& url_list = ret["httpseeds"].list();
std::copy(atp.http_seeds.begin(), atp.http_seeds.end(), std::back_inserter(url_list));
}
// write have bitmask
entry::string_type& pieces = ret["pieces"].string();
pieces.resize(aux::numeric_cast<std::size_t>(std::max(
atp.have_pieces.size(), atp.verified_pieces.size())));
std::size_t piece(0);
for (auto const bit : atp.have_pieces)
{
pieces[piece] = bit ? 1 : 0;
++piece;
}
piece = 0;
for (auto const bit : atp.verified_pieces)
{
pieces[piece] |= bit ? 2 : 0;
++piece;
}
// write renamed files
if (!atp.renamed_files.empty())
{
entry::list_type& fl = ret["mapped_files"].list();
for (auto const& ent : atp.renamed_files)
{
std::size_t const idx(static_cast<std::size_t>(static_cast<int>(ent.first)));
if (idx >= fl.size()) fl.resize(idx + 1);
fl[idx] = ent.second;
}
}
// write local peers
if (!atp.peers.empty())
{
std::back_insert_iterator<entry::string_type> ptr(ret["peers"].string());
#if TORRENT_USE_IPV6
std::back_insert_iterator<entry::string_type> ptr6(ret["peers6"].string());
#endif
for (auto const& p : atp.peers)
{
#if TORRENT_USE_IPV6
if (p.address().is_v6())
write_endpoint(p, ptr6);
else
#endif
write_endpoint(p, ptr);
}
}
if (!atp.banned_peers.empty())
{
std::back_insert_iterator<entry::string_type> ptr(ret["banned_peers"].string());
#if TORRENT_USE_IPV6
std::back_insert_iterator<entry::string_type> ptr6(ret["banned_peers6"].string());
#endif
for (auto const& p : atp.banned_peers)
{
#if TORRENT_USE_IPV6
if (p.address().is_v6())
write_endpoint(p, ptr6);
else
#endif
write_endpoint(p, ptr);
}
}
ret["upload_rate_limit"] = atp.upload_limit;
ret["download_rate_limit"] = atp.download_limit;
ret["max_connections"] = atp.max_connections;
ret["max_uploads"] = atp.upload_limit;
ret["paused"] = atp.flags & add_torrent_params::flag_paused;
ret["auto_managed"] = atp.flags & add_torrent_params::flag_auto_managed;
if (!atp.file_priorities.empty())
{
// write file priorities
entry::list_type& prio = ret["file_priority"].list();
for (auto const p : atp.file_priorities)
prio.push_back(p);
}
if (!atp.piece_priorities.empty())
{
// write piece priorities
entry::string_type& prio = ret["piece_priority"].string();
for (auto const p : atp.piece_priorities)
prio.push_back(static_cast<char>(p));
}
return ret;
}
std::vector<char> write_resume_data_buf(add_torrent_params const& atp)
{
std::vector<char> ret;
entry rd = write_resume_data(atp);
bencode(std::back_inserter(ret), rd);
return ret;
}
}