revamp the way resume data is read, by providing a function for the client to parse resume data, capturing it in add_torrent_params

This commit is contained in:
arvidn 2015-12-15 00:03:43 -05:00
parent 1710b7c750
commit 197ded5a0d
11 changed files with 492 additions and 394 deletions

View File

@ -1,3 +1,5 @@
* added new read_resume_data() function, initializing add_torrent_params
* removed deprecated fields from add_torrent_params
* improved support for bind-to-device
* deprecated ssl_listen, SSL sockets are specified in listen_interfaces now
* improved support for listening on multiple sockets and interfaces

View File

@ -606,6 +606,7 @@ SOURCES =
proxy_base
puff
random
read_resume_data
receive_buffer
resolve_links
rss

View File

@ -35,7 +35,10 @@ POSSIBILITY OF SUCH DAMAGE.
#include <string>
#include <vector>
#include "libtorrent/aux_/disable_warnings_push.hpp"
#include <boost/shared_ptr.hpp>
#include "libtorrent/aux_/disable_warnings_pop.hpp"
#include "libtorrent/storage_defs.hpp"
#include "libtorrent/peer_id.hpp" // sha1_hash
@ -75,53 +78,26 @@ namespace libtorrent
// data for the torrent. For more information, see the ``storage`` field.
add_torrent_params(storage_constructor_type sc = default_storage_constructor)
: version(LIBTORRENT_VERSION_NUM)
#ifndef TORRENT_NO_DEPRECATE
, tracker_url(0)
#endif
, storage_mode(storage_mode_sparse)
, storage(sc)
, userdata(0)
#ifndef TORRENT_NO_DEPRECATE
, flags(flag_ignore_flags | default_flags)
#else
, flags(default_flags)
#endif
, max_uploads(-1)
, max_connections(-1)
, upload_limit(-1)
, download_limit(-1)
#ifndef TORRENT_NO_DEPRECATE
, seed_mode(false)
, override_resume_data(false)
, upload_mode(false)
, share_mode(false)
, apply_ip_filter(true)
, paused(true)
, auto_managed(true)
, duplicate_is_error(false)
, merge_resume_trackers(false)
#endif
{
}
#ifndef TORRENT_NO_DEPRECATE
void update_flags() const
{
if (flags != (flag_ignore_flags | default_flags)) return;
boost::uint64_t& f = const_cast<boost::uint64_t&>(flags);
f = flag_update_subscribe;
if (seed_mode) f |= flag_seed_mode;
if (override_resume_data) f |= flag_override_resume_data;
if (upload_mode) f |= flag_upload_mode;
if (share_mode) f |= flag_share_mode;
if (apply_ip_filter) f |= flag_apply_ip_filter;
if (paused) f |= flag_paused;
if (auto_managed) f |= flag_auto_managed;
if (duplicate_is_error) f |= flag_duplicate_is_error;
if (merge_resume_trackers) f |= flag_merge_resume_trackers;
}
#endif
, total_uploaded(0)
, total_downloaded(0)
, active_time(0)
, finished_time(0)
, seeding_time(0)
, added_time(0)
, completed_time(0)
, last_seen_complete(0)
, num_complete(-1)
, num_incomplete(-1)
, num_downloaded(-1)
{}
// values for the ``flags`` field
enum flags_t
@ -230,6 +206,8 @@ namespace libtorrent
// replaced by any trackers in the resume data. The default behavior is
// to have the resume data override the .torrent file _and_ the
// trackers added in add_torrent_params.
//#error this needs to change slightly. Should trackers be extracted from the \
.torrent file on the client side? when in that case?
flag_merge_resume_trackers = 0x100,
// on by default and means that this torrent will be part of state
@ -247,10 +225,12 @@ namespace libtorrent
// the torrent handle immediately after adding it.
flag_sequential_download = 0x800,
#ifndef TORRENT_NO_DEPRECATE
// if this flag is set, the save path from the resume data file, if
// present, is honored. This defaults to not being set, in which
// case the save_path specified in add_torrent_params is always used.
flag_use_resume_save_path = 0x1000,
#endif
// indicates that this torrent should never be unloaded from RAM, even
// if unloading torrents are allowed in general. Setting this makes
@ -265,6 +245,8 @@ namespace libtorrent
// add_torrent_params are also replaced. The default behavior is to
// have any web seeds in the resume data take precedence over whatever
// is passed in here as well as the .torrent file.
//#error this needs to change slightly. Should web seeds be extracted from the \
.torrent file on the client side? when in that case?
flag_merge_resume_http_seeds = 0x2000,
// the stop when ready flag. Setting this flag is equivalent to calling
@ -273,11 +255,7 @@ namespace libtorrent
flag_stop_when_ready = 0x4000,
// internal
default_flags = flag_pinned | flag_update_subscribe | flag_auto_managed | flag_paused | flag_apply_ip_filter
#ifndef TORRENT_NO_DEPRECATE
, flag_ignore_flags = 0x80000000
#endif
default_flags = flag_merge_resume_http_seeds | flag_merge_resume_trackers | flag_pinned | flag_update_subscribe | flag_auto_managed | flag_paused | flag_apply_ip_filter
};
// filled in by the constructor and should be left untouched. It is used
@ -287,13 +265,17 @@ namespace libtorrent
// info_hash is set, this is required to be initialized.
boost::shared_ptr<torrent_info> ti;
#ifndef TORRENT_NO_DEPRECATE
char const* tracker_url;
#endif
// If the torrent doesn't have a tracker, but relies on the DHT to find
// peers, the ``trackers`` can specify tracker URLs for the torrent.
std::vector<std::string> trackers;
// the tiers the URLs in ``trackers`` belong to. Trackers belonging to
// different tiers may be treated differently, as defined by the multi
// tracker extension. This is optional, if not specified trackers are
// assumed to be part of tier 0, or whichever the last tier was as
// iterating over the trackers.
std::vector<int> tracker_tiers;
// url seeds to be added to the torrent (`BEP 17`_).
std::vector<std::string> url_seeds;
@ -302,10 +284,7 @@ namespace libtorrent
std::vector<std::pair<std::string, int> > dht_nodes;
std::string name;
// the path where the torrent is or will be stored. Note that this may
// also be stored in resume data. If you want the save path saved in
// the resume data to be used, you need to set the
// flag_use_resume_save_path flag.
// the path where the torrent is or will be stored.
//
// .. note::
// On windows this path (and other paths) are interpreted as UNC
@ -313,6 +292,9 @@ namespace libtorrent
// and may not contain the special directories "." or "..".
std::string save_path;
#error do we still need this? what's left in the resume data that isn't parsed \
out into add_torrent_params? should it be moved to add_torrent_params?
// The optional parameter, ``resume_data`` can be given if up to date
// fast-resume data is available. The fast-resume data can be acquired
// from a running torrent by calling save_resume_data() on
@ -398,18 +380,45 @@ namespace libtorrent
int upload_limit;
int download_limit;
#ifndef TORRENT_NO_DEPRECATE
bool seed_mode;
bool override_resume_data;
bool upload_mode;
bool share_mode;
bool apply_ip_filter;
bool paused;
bool auto_managed;
bool duplicate_is_error;
bool merge_resume_trackers;
#endif
// the total number of bytes uploaded and downloaded by this torrent so
// far.
boost::int64_t total_uploaded;
boost::int64_t total_downloaded;
// the numeber of seconds this torrent has spent in started, finished and
// seeding state so far, respectively.
int active_time;
int finished_time;
int seeding_time;
// if set to a non-zero value, this is the posix time of when this torrent
// was first added, including previous runs/sessions. If set to zero, the
// internal added_time will be set to the time of when add_torrent() is
// called.
time_t added_time;
time_t completed_time;
// if set to non-zero, initializes the time (expressed in posix time) when
// we last saw a seed or peers that together formed a complete copy of the
// torrent. If left set to zero, the internal counterpart to this field
// will be updated when we see a seed or a distributed copies >= 1.0.
time_t last_seen_complete;
// these field can be used to initialize the torrent's cached scrape data.
// The scrape data is high level metadata about the current state of the
// swarm, as returned by the tracker (either when announcing to it or by
// sending a specific scrape request). ``num_complete`` is the number of
// peers in the swarm that are seeds, or have every piece in the torrent.
// ``num_inomplete`` is the number of peers in the swarm that do not have
// every piece. ``num_downloaded`` is the number of times the torrent has
// been downloaded (not initiated, but the number of times a download has
// completed).
//
// Leaving any of these values set to -1 indicates we don't know, or we
// have not received any scrape data.
int num_complete;
int num_incomplete;
int num_downloaded;
};
}

View File

@ -411,7 +411,7 @@ TORRENT_EXPORT std::string print_entry(bdecode_node const& e
// produced by this function does not copy any data out of the buffer, but
// simply produces references back into it.
TORRENT_EXPORT int bdecode(char const* start, char const* end, bdecode_node& ret
, error_code& ec, int* error_pos = 0, int depth_limit = 100
, error_code& ec, int* error_pos = NULL, int depth_limit = 100
, int token_limit = 1000000);
}

View File

@ -0,0 +1,55 @@
/*
Copyright (c) 2015, Arvid Norberg
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the distribution.
* Neither the name of the author nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef TORRENT_READ_RESUME_DATA_HPP_INCLUDE
#define TORRENT_READ_RESUME_DATA_HPP_INCLUDE
#include "libtorrent/error_code.hpp"
#include "libtorrent/export.hpp"
namespace libtorrent
{
struct add_torrent_params;
struct bdecode_node;
// these functions are used to parse resume data and populate the appropriate
// fields in an add_torrent_param object. This object can then be used to add
// the actual torrent_info object to and pass to session::add_torrent() or
// session::async_add_torrent()
TORRENT_EXPORT add_torrent_params read_resume_data(bdecode_node const& rd
, error_code& ec);
TORRENT_EXPORT add_torrent_params read_resume_data(char const* buffer
, int size, error_code& ec);
}
#endif

View File

@ -989,7 +989,6 @@ namespace libtorrent
torrent_handle get_handle();
void write_resume_data(entry& rd) const;
void read_resume_data(bdecode_node const& rd);
void seen_complete() { m_last_seen_complete = time(0); }
int time_since_complete() const { return int(time(0) - m_last_seen_complete); }
@ -1306,6 +1305,7 @@ namespace libtorrent
error_code m_error;
// used if there is any resume data
#error do we still need this?
boost::scoped_ptr<resume_data_t> m_resume_data;
// if the torrent is started without metadata, it may
@ -1494,9 +1494,7 @@ namespace libtorrent
// torrent.
bool m_super_seeding:1;
// this is set when we don't want to load seed_mode,
// paused or auto_managed from the resume data
const bool m_override_resume_data:1;
#error a 1 bit hole here
#ifndef TORRENT_NO_DEPRECATE
#ifndef TORRENT_DISABLE_RESOLVE_COUNTRIES
@ -1689,14 +1687,7 @@ namespace libtorrent
// quarantine
bool m_pending_active_change:1;
// if this is set, accept the save path saved in the resume data, if
// present
bool m_use_resume_save_path:1;
// if set to true, add web seed URLs loaded from resume
// data into this torrent instead of replacing the ones from the .torrent
// file
bool m_merge_resume_http_seeds:1;
#error 2 missing bit here
// if this is set, whenever transitioning into a downloading/seeding state
// from a non-downloading/seeding state, the torrent is paused.

276
src/read_resume_data.cpp Normal file
View File

@ -0,0 +1,276 @@
/*
Copyright (c) 2015, Arvid Norberg
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the distribution.
* Neither the name of the author nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
#include "libtorrent/aux_/disable_warnings_push.hpp"
#include <boost/cstdint.hpp>
#include <boost/bind.hpp>
#include "libtorrent/aux_/disable_warnings_pop.hpp"
#include "libtorrent/bdecode.hpp"
#include "libtorrent/read_resume_data.hpp"
#include "libtorrent/add_torrent_params.hpp"
#include "libtorrent/announce_entry.hpp"
namespace libtorrent
{
namespace
{
void apply_flag(boost::uint64_t& current_flags
, bdecode_node const& n
, char const* name
, boost::uint64_t const flag)
{
if (n.dict_find_int_value(name, -1) == -1)
{
current_flags &= ~flag;
}
else
{
current_flags |= flag;
}
}
}
add_torrent_params read_resume_data(bdecode_node const& rd, error_code& ec)
{
add_torrent_params ret;
ret.total_uploaded = rd.dict_find_int_value("total_uploaded");
ret.total_downloaded = rd.dict_find_int_value("total_downloaded");
ret.active_time = rd.dict_find_int_value("active_time");
ret.finished_time = rd.dict_find_int_value("finished_time");
ret.seeding_time = rd.dict_find_int_value("seeding_time");
ret.last_seen_complete = rd.dict_find_int_value("last_seen_complete");
// scrape data cache
ret.num_complete = rd.dict_find_int_value("num_complete", -1);
ret.num_incomplete = rd.dict_find_int_value("num_incomplete", -1);
ret.num_downloaded = rd.dict_find_int_value("num_downloaded", -1);
// torrent settings
ret.max_uploads = rd.dict_find_int_value("max_uploads", -1);
ret.max_connections = rd.dict_find_int_value("max_connections", -1);
ret.upload_limit = rd.dict_find_int_value("upload_rate_limit", -1);
ret.download_limit = rd.dict_find_int_value("download_rate_limit", -1);
// torrent state
apply_flag(ret.flags, rd, "seed_mode", add_torrent_params::flag_seed_mode);
apply_flag(ret.flags, rd, "super_seeding", add_torrent_params::flag_super_seeding);
apply_flag(ret.flags, rd, "auto_managed", add_torrent_params::flag_auto_managed);
apply_flag(ret.flags, rd, "sequential_download", add_torrent_params::flag_sequential_download);
apply_flag(ret.flags, rd, "paused", add_torrent_params::flag_paused);
ret.save_path = rd.dict_find_string_value("save_path");
ret.url = rd.dict_find_string_value("url");
ret.uuid = rd.dict_find_string_value("uuid");
ret.source_feed_url = rd.dict_find_string_value("feed");
#error add a field for this. The mapping has to happen in the torrent \
constructor probably, and passed on to the storage. possibly, the \
mapped_storage should be passed directly in when the storage is constructed
bdecode_node mapped_files = rd.dict_find_list("mapped_files");
if (mapped_files && mapped_files.list_size() == m_torrent_file->num_files())
{
for (int i = 0; i < m_torrent_file->num_files(); ++i)
{
std::string new_filename = mapped_files.list_string_value_at(i);
if (new_filename.empty()) continue;
m_torrent_file->rename_file(i, new_filename);
}
}
ret.added_time = rd.dict_find_int_value("added_time", 0);
ret.completed_time = rd.dict_find_int_value("completed_time", 0);
// load file priorities except if the add_torrent_param file was set to
// override resume data
bdecode_node file_priority = rd.dict_find_list("file_priority");
if (file_priority)
{
const int num_files = file_priority.list_size();
ret.file_priorities.resize(num_files, 4);
for (int i = 0; i < num_files; ++i)
{
ret.file_priorities[i] = file_priority.list_int_value_at(i, 1);
// this is suspicious, leave seed mode
if (ret.file_priorities[i] == 0)
{
ret.flags &= ~add_torrent_params::flag_seed_mode;
}
}
}
bdecode_node trackers = rd.dict_find_list("trackers");
if (trackers)
{
// it's possible to delete the trackers from a torrent and then save
// resume data with an empty trackers list. Since we found a trackers
// list here, these should replace whatever we find in the .torrent
// file.
ret.flags &= ~add_torrent_params::flag_merge_resume_trackers;
int tier = 0;
for (int i = 0; i < trackers.list_size(); ++i)
{
bdecode_node tier_list = trackers.list_at(i);
if (!tier_list || tier_list.type() != bdecode_node::list_t)
continue;
for (int j = 0; j < tier_list.list_size(); ++j)
{
ret.trackers.push_back(tier_list.list_string_value_at(j));
ret.tracker_tiers.push_back(tier);
}
++tier;
}
std::sort(m_trackers.begin(), m_trackers.end(), boost::bind(&announce_entry::tier, _1)
< boost::bind(&announce_entry::tier, _2));
if (settings().get_bool(settings_pack::prefer_udp_trackers))
prioritize_udp_trackers();
}
// if merge resume http seeds is not set, we need to clear whatever web
// seeds we loaded from the .torrent file, because we want whatever's in
// the resume file to take precedence. If there aren't even any fields in
// the resume data though, keep the ones from the torrent
bdecode_node url_list = rd.dict_find_list("url-list");
bdecode_node httpseeds = rd.dict_find_list("httpseeds");
if ((url_list || httpseeds) && !m_merge_resume_http_seeds)
{
// since we found http seeds in the resume data, they should replace
// whatever web seeds are specified in the .torrent
ret.flags &= ~add_torrent_params::flag_merge_resume_http_seeds;
}
if (url_list)
{
for (int i = 0; i < url_list.list_size(); ++i)
{
std::string url = url_list.list_string_value_at(i);
if (url.empty()) continue;
ret.url_seeds.push_back(url);
#error this correction logic has to be moved to the torrent constructor now
if (m_torrent_file->num_files() > 1 && url[url.size()-1] != '/') url += '/';
}
}
if (httpseeds)
{
for (int i = 0; i < httpseeds.list_size(); ++i)
{
std::string url = httpseeds.list_string_value_at(i);
if (url.empty()) continue;
#error add this field (merge with url_seeds?)
ret.http_seeds.push_back(url);
}
}
bdecode_node mt = rd.dict_find_string("merkle tree");
if (mt)
{
#error add field for this
std::vector<sha1_hash> tree;
tree.resize(m_torrent_file->merkle_tree().size());
std::memcpy(&tree[0], mt.string_ptr()
, (std::min)(mt.string_length(), int(tree.size()) * 20));
if (mt.string_length() < int(tree.size()) * 20)
std::memset(&tree[0] + mt.string_length() / 20, 0
, tree.size() - mt.string_length() / 20);
m_torrent_file->set_merkle_tree(tree);
}
#error this is the case where the torrent is a merkle torrent but the resume \
data does not contain the merkle tree, we need some kind of check in the \
torrent constructor and error reporting
{
// TODO: 0 if this is a merkle torrent and we can't
// restore the tree, we need to wipe all the
// bits in the have array, but not necessarily
// we might want to do a full check to see if we have
// all the pieces. This is low priority since almost
// no one uses merkle torrents
TORRENT_ASSERT(false);
}
#error add fields to add_torrent_params for these
// some sanity checking. Maybe we shouldn't be in seed mode anymore
bdecode_node pieces = rd.dict_find("pieces");
if (pieces && pieces.type() == bdecode_node::string_t
&& int(pieces.string_length()) == m_torrent_file->num_pieces())
{
char const* pieces_str = pieces.string_ptr();
for (int i = 0, end(pieces.string_length()); i < end; ++i)
{
// being in seed mode and missing a piece is not compatible.
// Leave seed mode if that happens
if ((pieces_str[i] & 1)) continue;
m_seed_mode = false;
break;
}
}
bdecode_node piece_priority = rd.dict_find_string("piece_priority");
if (piece_priority && piece_priority.string_length()
== m_torrent_file->num_pieces())
{
char const* p = piece_priority.string_ptr();
for (int i = 0; i < piece_priority.string_length(); ++i)
{
if (p[i] > 0) continue;
m_seed_mode = false;
break;
}
}
m_verified.resize(m_torrent_file->num_pieces(), false);
return ret;
}
add_torrent_params read_resume_data(char const* buffer, int size, error_code& ec)
{
bdecode_node rd;
bdecode(buffer, buffer + size, rd, ec);
if (ec) return add_torrent_params();
return read_resume_data(rd, ec);
}
}

View File

@ -152,7 +152,7 @@ namespace libtorrent
return TORRENT_SYNC_CALL_RET(std::vector<torrent_handle>, get_torrents);
}
#ifndef BOOST_NO_EXCEPTIONS
#ifndef BOOST_NO_EXCEPTIONS
torrent_handle session_handle::add_torrent(add_torrent_params const& params)
{
error_code ec;
@ -171,13 +171,11 @@ namespace libtorrent
void session_handle::async_add_torrent(add_torrent_params const& params)
{
add_torrent_params* p = new add_torrent_params(params);
#ifndef TORRENT_NO_DEPRECATE
if (params.tracker_url)
{
p->trackers.push_back(params.tracker_url);
p->tracker_url = NULL;
}
#endif
#error all add_torrent() and async_add_torrent() function need to pass the \
add_torrent_params through a function that parses the resume vector and \
blends the results into the params object (unless deprecated functions \
are disabled)
TORRENT_ASYNC_CALL1(async_add_torrent, p);
}
@ -217,11 +215,14 @@ namespace libtorrent
, void* userdata)
{
add_torrent_params p(sc);
p.tracker_url = tracker_url;
p.trackers.push_back(tracker_url);
p.info_hash = info_hash;
p.save_path = save_path;
p.storage_mode = storage_mode;
p.paused = paused;
if (paused) p.flags |= add_torrent_params::flag_paused;
else p.flags &= ~add_torrent_params::flag_paused;
p.userdata = userdata;
p.name = name;
if (resume_data.type() != entry::undefined_t)

View File

@ -4615,10 +4615,6 @@ namespace aux {
{
TORRENT_ASSERT(!p.save_path.empty());
#ifndef TORRENT_NO_DEPRECATE
p.update_flags();
#endif
add_torrent_params params = p;
if (string_begins_no_case("magnet:", params.url.c_str()))
{

View File

@ -820,6 +820,18 @@ namespace libtorrent
#endif
}
namespace
{
bool is_seed(char const* pieces, int const len)
{
for (int i = 0; i < len; ++i)
{
if ((pieces[i] & 1) == 0) return false;
}
return true;
}
}
bool default_storage::verify_resume_data(bdecode_node const& rd
, std::vector<std::string> const* links
, storage_error& ec)
@ -868,6 +880,10 @@ namespace libtorrent
&& int(pieces.string_length()) == fs.num_pieces())
{
char const* pieces_str = pieces.string_ptr();
// TODO: this should just be a std::none_of()
bool const seed = is_seed(pieces_str, pieces.string_length());
// parse have bitmask. Verify that the files we expect to have
// actually do exist
for (int i = 0; i < fs.num_pieces(); ++i)
@ -900,6 +916,16 @@ namespace libtorrent
}
}
if (seed && size != fs.file_size(i))
{
// the resume data indicates we're a seed, but this file has
// the wrong size. Reject the resume data
ec.ec = errors::mismatching_file_size;
ec.file = i;
ec.operation = storage_error::check_resume;
return false;
}
// OK, this file existed, good. Now, skip all remaining pieces in
// this file. We're just sanity-checking whether the files exist
// or not.
@ -908,6 +934,13 @@ namespace libtorrent
i = (std::max)(i + 1, pr.piece);
}
}
else
{
ec.ec = errors::missing_pieces;
ec.file = -1;
ec.operation = storage_error::check_resume;
return false;
}
}
return true;

View File

@ -237,7 +237,6 @@ namespace libtorrent
, m_auto_sequential(false)
, m_seed_mode(false)
, m_super_seeding(false)
, m_override_resume_data((p.flags & add_torrent_params::flag_override_resume_data) != 0)
#ifndef TORRENT_NO_DEPRECATE
#ifndef TORRENT_DISABLE_RESOLVE_COUNTRIES
, m_resolving_country(false)
@ -278,8 +277,6 @@ namespace libtorrent
, m_last_scrape((std::numeric_limits<boost::int16_t>::min)())
, m_progress_ppm(0)
, m_pending_active_change(false)
, m_use_resume_save_path((p.flags & add_torrent_params::flag_use_resume_save_path) != 0)
, m_merge_resume_http_seeds((p.flags & add_torrent_params::flag_merge_resume_http_seeds) != 0)
, m_stop_when_ready((p.flags & add_torrent_params::flag_stop_when_ready) != 0)
{
// we cannot log in the constructor, because it relies on shared_from_this
@ -357,6 +354,40 @@ namespace libtorrent
m_resume_data.reset(new resume_data_t);
m_resume_data->buf = p.resume_data;
}
int tier = 0;
std::vector<int>::const_iterator tier_iter = p.tracker_tiers.begin();
for (std::vector<std::string>::const_iterator i = p.trackers.begin()
, end(p.trackers.end()); i != end; ++i)
{
if (tier_iter != p.tracker_tiers.end())
tier = *tier_iter++;
announce_entry e(*i);
e.fail_limit = 0;
e.source = announce_entry::source_magnet_link;
e.tier = tier;
m_trackers.push_back(e);
m_torrent_file->add_tracker(*i, tier);
}
if (settings().get_bool(settings_pack::prefer_udp_trackers))
prioritize_udp_trackers();
m_total_uploaded = p.total_uploaded;
m_total_downloaded = p.total_downloaded;
// the numeber of seconds this torrent has spent in started, finished and
// seeding state so far, respectively.
m_active_time = p.active_time;
m_finished_time = p.finished_time;
m_seeding_time = p.seeding_time;
m_added_time = p.added_time ? p.added_time : time(0);
m_completed_time = p.completed_time;
if (m_completed_time != 0 && m_completed_time < m_added_time)
m_completed_time = m_added_time;
}
void torrent::inc_stats_counter(int c, int value)
@ -718,6 +749,8 @@ namespace libtorrent
{
TORRENT_ASSERT(is_single_thread());
#error why isn't this done in the constructor?
#ifndef TORRENT_DISABLE_LOGGING
debug_log("creating torrent: %s max-uploads: %d max-connections: %d "
"upload-limit: %d download-limit: %d flags: %s%s%s%s%s%s%s%s%s%s%s%s"
@ -770,28 +803,6 @@ namespace libtorrent
if (!m_name && !m_url.empty()) m_name.reset(new std::string(m_url));
#ifndef TORRENT_NO_DEPRECATE
if (p.tracker_url && std::strlen(p.tracker_url) > 0)
{
m_trackers.push_back(announce_entry(p.tracker_url));
m_trackers.back().fail_limit = 0;
m_trackers.back().source = announce_entry::source_magnet_link;
m_torrent_file->add_tracker(p.tracker_url);
}
#endif
for (std::vector<std::string>::const_iterator i = p.trackers.begin()
, end(p.trackers.end()); i != end; ++i)
{
m_trackers.push_back(announce_entry(*i));
m_trackers.back().fail_limit = 0;
m_trackers.back().source = announce_entry::source_magnet_link;
m_torrent_file->add_tracker(*i);
}
if (settings().get_bool(settings_pack::prefer_udp_trackers))
prioritize_udp_trackers();
// if we don't have metadata, make this torrent pinned. The
// client may unpin it once we have metadata and it has had
// a chance to save it on the metadata_received_alert
@ -1808,7 +1819,8 @@ namespace libtorrent
}
// this may not be called from a constructor because of the call to
// shared_from_this()
// shared_from_this(). It's either called when we start() the torrent, or at a
// later time if it's a magnet link, once the metadata is downloaded
void torrent::init()
{
INVARIANT_CHECK;
@ -1883,10 +1895,6 @@ namespace libtorrent
#endif
m_resume_data.reset();
}
else
{
read_resume_data(m_resume_data->node);
}
}
#if TORRENT_USE_ASSERTS
@ -1927,6 +1935,7 @@ namespace libtorrent
// if we've already loaded file priorities, don't load piece priorities,
// they will interfere.
#error we should get the piece priority from add_torrent_params instead
if (!m_seed_mode && m_resume_data && m_file_priority.empty())
{
bdecode_node piece_priority = m_resume_data->node
@ -6784,278 +6793,6 @@ namespace libtorrent
#endif
#endif // TORRENT_NO_DEPRECATE
void torrent::read_resume_data(bdecode_node const& rd)
{
m_total_uploaded = rd.dict_find_int_value("total_uploaded");
m_total_downloaded = rd.dict_find_int_value("total_downloaded");
m_active_time = rd.dict_find_int_value("active_time");
m_finished_time = rd.dict_find_int_value("finished_time");
m_seeding_time = rd.dict_find_int_value("seeding_time");
m_last_seen_complete = rd.dict_find_int_value("last_seen_complete");
m_complete = rd.dict_find_int_value("num_complete", 0xffffff);
m_incomplete = rd.dict_find_int_value("num_incomplete", 0xffffff);
m_downloaded = rd.dict_find_int_value("num_downloaded", 0xffffff);
if (!m_override_resume_data)
{
int up_limit_ = rd.dict_find_int_value("upload_rate_limit", -1);
if (up_limit_ != -1) set_upload_limit(up_limit_);
int down_limit_ = rd.dict_find_int_value("download_rate_limit", -1);
if (down_limit_ != -1) set_download_limit(down_limit_);
int max_connections_ = rd.dict_find_int_value("max_connections", -1);
if (max_connections_ != -1) set_max_connections(max_connections_);
int max_uploads_ = rd.dict_find_int_value("max_uploads", -1);
if (max_uploads_ != -1) set_max_uploads(max_uploads_);
int seed_mode_ = rd.dict_find_int_value("seed_mode", -1);
if (seed_mode_ != -1) m_seed_mode = seed_mode_ && m_torrent_file->is_valid();
int super_seeding_ = rd.dict_find_int_value("super_seeding", -1);
if (super_seeding_ != -1) super_seeding(super_seeding_ != 0);
int auto_managed_ = rd.dict_find_int_value("auto_managed", -1);
if (auto_managed_ != -1)
{
m_auto_managed = auto_managed_ != 0;
update_want_scrape();
update_state_list();
}
int sequential_ = rd.dict_find_int_value("sequential_download", -1);
if (sequential_ != -1) set_sequential_download(sequential_ != 0);
int paused_ = rd.dict_find_int_value("paused", -1);
if (paused_ != -1)
{
set_allow_peers(paused_ == 0);
m_announce_to_dht = (paused_ == 0);
m_announce_to_trackers = (paused_ == 0);
m_announce_to_lsd = (paused_ == 0);
update_gauge();
update_want_peers();
update_want_scrape();
update_state_list();
}
int dht_ = rd.dict_find_int_value("announce_to_dht", -1);
if (dht_ != -1) m_announce_to_dht = (dht_ != 0);
int lsd_ = rd.dict_find_int_value("announce_to_lsd", -1);
if (lsd_ != -1) m_announce_to_lsd = (lsd_ != 0);
int track_ = rd.dict_find_int_value("announce_to_trackers", -1);
if (track_ != -1) m_announce_to_trackers = (track_ != 0);
#ifndef TORRENT_DISABLE_LOGGING
debug_log("loaded resume data: max-uploads: %d max-connections: %d "
"upload-limit: %d download-limit: %d paused: %d sequential-download: %d "
"super-seeding: %d auto-managed: %d"
, max_uploads_, max_connections_, up_limit_, down_limit_
, paused_, sequential_, super_seeding_, auto_managed_);
#endif
}
int now = m_ses.session_time();
int tmp = rd.dict_find_int_value("last_scrape", -1);
m_last_scrape = tmp == -1 ? (std::numeric_limits<boost::int16_t>::min)() : now - tmp;
tmp = rd.dict_find_int_value("last_download", -1);
m_last_download = tmp == -1 ? (std::numeric_limits<boost::int16_t>::min)() : now - tmp;
tmp = rd.dict_find_int_value("last_upload", -1);
m_last_upload = tmp == -1 ? (std::numeric_limits<boost::int16_t>::min)() : now - tmp;
if (m_use_resume_save_path)
{
std::string p = rd.dict_find_string_value("save_path");
if (!p.empty())
{
m_save_path = p;
#ifndef TORRENT_DISABLE_LOGGING
debug_log("loaded resume data: save-path: %s", m_save_path.c_str());
#endif
}
}
m_url = rd.dict_find_string_value("url");
m_uuid = rd.dict_find_string_value("uuid");
m_source_feed_url = rd.dict_find_string_value("feed");
if (!m_uuid.empty() || !m_url.empty())
{
boost::shared_ptr<torrent> me(shared_from_this());
// insert this torrent in the uuid index
m_ses.insert_uuid_torrent(m_uuid.empty() ? m_url : m_uuid, me);
}
// The mapped_files needs to be read both in the network thread
// and in the disk thread, since they both have their own mapped files structures
// which are kept in sync
bdecode_node mapped_files = rd.dict_find_list("mapped_files");
if (mapped_files && mapped_files.list_size() == m_torrent_file->num_files())
{
for (int i = 0; i < m_torrent_file->num_files(); ++i)
{
std::string new_filename = mapped_files.list_string_value_at(i);
if (new_filename.empty()) continue;
m_torrent_file->rename_file(i, new_filename);
}
}
m_added_time = rd.dict_find_int_value("added_time", m_added_time);
m_completed_time = rd.dict_find_int_value("completed_time", m_completed_time);
if (m_completed_time != 0 && m_completed_time < m_added_time)
m_completed_time = m_added_time;
// load file priorities except if the add_torrent_param file was set to
// override resume data
if (!m_override_resume_data || m_file_priority.empty())
{
bdecode_node file_priority = rd.dict_find_list("file_priority");
if (file_priority)
{
const int num_files = (std::min)(file_priority.list_size()
, m_torrent_file->num_files());
std::vector<int> file_prio(num_files);
for (int i = 0; i < num_files; ++i)
{
file_prio[i] = file_priority.list_int_value_at(i, 1);
// this is suspicious, leave seed mode
if (file_prio[i] == 0) m_seed_mode = false;
}
prioritize_files(file_prio);
}
}
bdecode_node trackers = rd.dict_find_list("trackers");
if (trackers)
{
if (!m_merge_resume_trackers) m_trackers.clear();
int tier = 0;
for (int i = 0; i < trackers.list_size(); ++i)
{
bdecode_node tier_list = trackers.list_at(i);
if (!tier_list || tier_list.type() != bdecode_node::list_t)
continue;
for (int j = 0; j < tier_list.list_size(); ++j)
{
announce_entry e(tier_list.list_string_value_at(j));
if (std::find_if(m_trackers.begin(), m_trackers.end()
, boost::bind(&announce_entry::url, _1) == e.url) != m_trackers.end())
continue;
e.tier = tier;
e.fail_limit = 0;
m_trackers.push_back(e);
}
++tier;
}
std::sort(m_trackers.begin(), m_trackers.end(), boost::bind(&announce_entry::tier, _1)
< boost::bind(&announce_entry::tier, _2));
if (settings().get_bool(settings_pack::prefer_udp_trackers))
prioritize_udp_trackers();
}
// if merge resume http seeds is not set, we need to clear whatever web
// seeds we loaded from the .torrent file, because we want whatever's in
// the resume file to take precedence. If there aren't even any fields in
// the resume data though, keep the ones from the torrent
bdecode_node url_list = rd.dict_find_list("url-list");
bdecode_node httpseeds = rd.dict_find_list("httpseeds");
if ((url_list || httpseeds) && !m_merge_resume_http_seeds)
{
m_web_seeds.clear();
}
if (url_list)
{
for (int i = 0; i < url_list.list_size(); ++i)
{
std::string url = url_list.list_string_value_at(i);
if (url.empty()) continue;
if (m_torrent_file->num_files() > 1 && url[url.size()-1] != '/') url += '/';
add_web_seed(url, web_seed_entry::url_seed);
}
}
if (httpseeds)
{
for (int i = 0; i < httpseeds.list_size(); ++i)
{
std::string url = httpseeds.list_string_value_at(i);
if (url.empty()) continue;
add_web_seed(url, web_seed_entry::http_seed);
}
}
if (m_torrent_file->is_merkle_torrent())
{
bdecode_node mt = rd.dict_find_string("merkle tree");
if (mt)
{
std::vector<sha1_hash> tree;
tree.resize(m_torrent_file->merkle_tree().size());
std::memcpy(&tree[0], mt.string_ptr()
, (std::min)(mt.string_length(), int(tree.size()) * 20));
if (mt.string_length() < int(tree.size()) * 20)
std::memset(&tree[0] + mt.string_length() / 20, 0
, tree.size() - mt.string_length() / 20);
m_torrent_file->set_merkle_tree(tree);
}
else
{
// TODO: 0 if this is a merkle torrent and we can't
// restore the tree, we need to wipe all the
// bits in the have array, but not necessarily
// we might want to do a full check to see if we have
// all the pieces. This is low priority since almost
// no one uses merkle torrents
TORRENT_ASSERT(false);
}
}
// updating some of the torrent state may have set need_save_resume_data.
// clear it here since we've just restored the resume data we already
// have. Nothing has changed from that state yet.
m_need_save_resume_data = false;
if (m_seed_mode)
{
// some sanity checking. Maybe we shouldn't be in seed mode anymore
bdecode_node pieces = rd.dict_find("pieces");
if (pieces && pieces.type() == bdecode_node::string_t
&& int(pieces.string_length()) == m_torrent_file->num_pieces())
{
char const* pieces_str = pieces.string_ptr();
for (int i = 0, end(pieces.string_length()); i < end; ++i)
{
// being in seed mode and missing a piece is not compatible.
// Leave seed mode if that happens
if ((pieces_str[i] & 1)) continue;
m_seed_mode = false;
break;
}
}
bdecode_node piece_priority = rd.dict_find_string("piece_priority");
if (piece_priority && piece_priority.string_length()
== m_torrent_file->num_pieces())
{
char const* p = piece_priority.string_ptr();
for (int i = 0; i < piece_priority.string_length(); ++i)
{
if (p[i] > 0) continue;
m_seed_mode = false;
break;
}
}
m_verified.resize(m_torrent_file->num_pieces(), false);
}
}
boost::shared_ptr<const torrent_info> torrent::get_torrent_copy()
{
if (!m_torrent_file->is_valid()) return boost::shared_ptr<const torrent_info>();
@ -7372,9 +7109,6 @@ namespace libtorrent
ret["max_connections"] = max_connections();
ret["max_uploads"] = max_uploads();
ret["paused"] = is_torrent_paused();
ret["announce_to_dht"] = m_announce_to_dht;
ret["announce_to_trackers"] = m_announce_to_trackers;
ret["announce_to_lsd"] = m_announce_to_lsd;
ret["auto_managed"] = m_auto_managed;
// piece priorities and file priorities are mutually exclusive. If there