introduce backwards-compatibility, of loading resume data and merging it with the add_torrent_params. add some more missing fields. deprecate some more flags in add_torrent_params

This commit is contained in:
arvidn 2016-02-11 13:58:18 -05:00 committed by arvidn
parent e0ee12d4f3
commit 35b998f90d
4 changed files with 245 additions and 89 deletions

View File

@ -119,6 +119,7 @@ namespace libtorrent
// in there will override the seed mode you set here.
flag_seed_mode = 0x001,
#ifndef TORRENT_NO_DEPRECATE
// If ``flag_override_resume_data`` is set, flags set for this torrent
// in this ``add_torrent_params`` object will take precedence over
// whatever states are saved in the resume data. For instance, the
@ -132,6 +133,7 @@ namespace libtorrent
// If this flag is set, but file_priorities is empty, file priorities
// are still loaded from the resume data, if present.
flag_override_resume_data = 0x002,
#endif
// If ``flag_upload_mode`` is set, the torrent will be initialized in
// upload-mode, which means it will not make any piece requests. This
@ -199,16 +201,16 @@ namespace libtorrent
flag_auto_managed = 0x040,
flag_duplicate_is_error = 0x080,
// defaults to off and specifies whether tracker URLs loaded from
#ifndef TORRENT_NO_DEPRECATE
// defaults to on and specifies whether tracker URLs loaded from
// resume data should be added to the trackers in the torrent or
// replace the trackers. When replacing trackers (i.e. this flag is not
// set), any trackers passed in via add_torrent_params are also
// replaced by any trackers in the resume data. The default behavior is
// to have the resume data override the .torrent file _and_ the
// trackers added in add_torrent_params.
//#error this needs to change slightly. Should trackers be extracted from the \
.torrent file on the client side? when in that case?
flag_merge_resume_trackers = 0x100,
#endif
// on by default and means that this torrent will be part of state
// updates when calling post_torrent_updates().
@ -237,7 +239,8 @@ namespace libtorrent
// the torrent exempt from loading/unloading management.
flag_pinned = 0x2000,
// defaults to off and specifies whether web seed URLs loaded from
#ifndef TORRENT_NO_DEPRECATE
// defaults to on and specifies whether web seed URLs loaded from
// resume data should be added to the ones in the torrent file or
// replace them. No distinction is made between the two different kinds
// of web seeds (`BEP 17`_ and `BEP 19`_). When replacing web seeds
@ -245,17 +248,33 @@ namespace libtorrent
// add_torrent_params are also replaced. The default behavior is to
// have any web seeds in the resume data take precedence over whatever
// is passed in here as well as the .torrent file.
//#error this needs to change slightly. Should web seeds be extracted from the \
.torrent file on the client side? when in that case?
flag_merge_resume_http_seeds = 0x2000,
#endif
// the stop when ready flag. Setting this flag is equivalent to calling
// torrent_handle::stop_when_ready() immediately after the torrent is
// added.
flag_stop_when_ready = 0x4000,
// when this flag is set, the tracker list in the add_torrent_params
// object override any trackers from the torrent file. If the flag is
// not set, the trackers from the add_torrent_params object will be
// added to the list of trackers used by the torrent.
flag_override_trackers = 0x8000,
// If this flag is set, the web seeds from the add_torrent_params
// object will override any web seeds in the torrent file. If it's not
// set, web seeds in the add_torrent_params object will be added to the
// list of web seeds used by the torrent.
flag_override_web_seeds = 0x10000,
// internal
default_flags = flag_merge_resume_http_seeds | flag_merge_resume_trackers | flag_pinned | flag_update_subscribe | flag_auto_managed | flag_paused | flag_apply_ip_filter
default_flags = flag_pinned | flag_update_subscribe
| flag_auto_managed | flag_paused | flag_apply_ip_filter
#ifndef TORRENT_NO_DEPRECATE
| flag_merge_resume_http_seeds
| flag_merge_resume_trackers
#endif
};
// filled in by the constructor and should be left untouched. It is used
@ -292,17 +311,6 @@ namespace libtorrent
// and may not contain the special directories "." or "..".
std::string save_path;
#error do we still need this? what's left in the resume data that isn't parsed \
out into add_torrent_params? should it be moved to add_torrent_params?
// The optional parameter, ``resume_data`` can be given if up to date
// fast-resume data is available. The fast-resume data can be acquired
// from a running torrent by calling save_resume_data() on
// torrent_handle. See fast-resume_. The ``vector`` that is passed in
// will be swapped into the running torrent instance with
// ``std::vector::swap()``.
std::vector<char> resume_data;
// One of the values from storage_mode_t. For more information, see
// storage-allocation_.
storage_mode_t storage_mode;
@ -419,6 +427,30 @@ namespace libtorrent
int num_complete;
int num_incomplete;
int num_downloaded;
// URLs can be added to these two lists to specify additional web
// seeds to be used by the torrent. If the ``flag_override_web_seeds``
// is set, these will be the _only_ ones to be used. i.e. any web seeds
// found in the .torrent file will be overridden.
//
// http_seeds expects URLs to web servers implementing the original HTTP
// seed specification `BEP 17`_.
//
// url_seeds expects URLs to regular web servers, aka "get right" style,
// specified in `BEP 19`_.
std::vector<std::string> http_seeds;
std::vector<std::string> url_seeds;
#ifndef TORRENT_NO_DEPRECATE
// The optional parameter, ``resume_data`` can be given if up to date
// fast-resume data is available. The fast-resume data can be acquired
// from a running torrent by calling save_resume_data() on
// torrent_handle. See fast-resume_. The ``vector`` that is passed in
// will be swapped into the running torrent instance with
// ``std::vector::swap()``.
std::vector<char> resume_data;
#endif
};
}

View File

@ -85,7 +85,7 @@ namespace libtorrent
can only be done reliably on the libtorrent side as the torrent is being \
added. i.e. the info_hash needs to be saved
ret.total_uploaded = rd.dict_find_int_value("total_uploaded");
ret.toal_uploaded = rd.dict_find_int_value("total_uploaded");
ret.total_downloaded = rd.dict_find_int_value("total_downloaded");
ret.active_time = rd.dict_find_int_value("active_time");
ret.finished_time = rd.dict_find_int_value("finished_time");
@ -160,7 +160,7 @@ namespace libtorrent
// resume data with an empty trackers list. Since we found a trackers
// list here, these should replace whatever we find in the .torrent
// file.
ret.flags &= ~add_torrent_params::flag_merge_resume_trackers;
ret.flags |= add_torrent_params::flag_override_trackers;
int tier = 0;
for (int i = 0; i < trackers.list_size(); ++i)
@ -189,11 +189,11 @@ namespace libtorrent
// the resume data though, keep the ones from the torrent
bdecode_node url_list = rd.dict_find_list("url-list");
bdecode_node httpseeds = rd.dict_find_list("httpseeds");
if ((url_list || httpseeds) && !m_merge_resume_http_seeds)
if (url_list || httpseeds)
{
// since we found http seeds in the resume data, they should replace
// whatever web seeds are specified in the .torrent
ret.flags &= ~add_torrent_params::flag_merge_resume_http_seeds;
// whatever web seeds are specified in the .torrent, by default
ret.flags |= add_torrent_params::flag_override_http_seeds;
}
if (url_list)
@ -203,9 +203,6 @@ namespace libtorrent
std::string url = url_list.list_string_value_at(i);
if (url.empty()) continue;
ret.url_seeds.push_back(url);
#error this correction logic has to be moved to the torrent constructor now
if (m_torrent_file->num_files() > 1 && url[url.size()-1] != '/') url += '/';
}
}
@ -215,7 +212,6 @@ namespace libtorrent
{
std::string url = httpseeds.list_string_value_at(i);
if (url.empty()) continue;
#error add this field (merge with url_seeds?)
ret.http_seeds.push_back(url);
}
}
@ -278,7 +274,11 @@ namespace libtorrent
}
}
m_verified.resize(m_torrent_file->num_pieces(), false);
#error read "unfinished" pieces
#error read "peers" list
#error read "peers6" list
#error read "banned_peers" list
#error read "banned_peers6" list
return ret;
}

View File

@ -152,11 +152,114 @@ namespace libtorrent
return TORRENT_SYNC_CALL_RET(std::vector<torrent_handle>, get_torrents);
}
#ifndef TORRENT_NO_DEPRECATE
namespace
{
void handle_backwards_compatible_resume_data(add_torrent_params& atp
, error_code& ec)
{
// if there's no resume data set, there's nothing to do. It's either
// using the previous API without resume data, or the resume data has
// already been parsed out into the add_torrent_params struct.
if (atp.resume_data.empty()) return;
add_torrent_params resume_data
= read_resume_data(&atp.resume_data[0], atp.resume_data.size(), ec);
if (ec) return;
// now, merge resume_data into atp according to the merge flags
if (atp.flags & add_torrent_params::flag_use_resume_save_path
&& !resume_data.save_path.empty())
{
atp.save_path = resume_data.save_path;
}
if (!resume_data.trackers.empty())
{
atp.tracker_tiers.resize(atp.trackers.size(), 0);
atp.trackers.insert(atp.trackers.end()
, resume_data.trackers.begin()
, resume_data.trackers.end());
atp.tracker_tiers.insert(atp.tracker_tiers.end()
, resume_data.tracker_tiers.begin()
, resume_data.tracker_tiers.end());
if ((resume_data.flags & add_torrent_params::flag_merge_resume_trackers) == 0)
atp.flags |= add_torrent_params::flag_override_trackers;
}
if (!resume_data.url_seeds.empty())
{
atp.urk_seeds.insert(atp.url_seeds.end()
, resume_data.url_seeds.begin()
, resume_data.url_seeds.end());
if ((resume_data.flags & add_torrent_params::flag_merge_resume_http_seeds) == 0)
atp.flags &= ~add_torrent_params::flag_override_web_seeds;
}
if (!resume_data.http_seeds.empty())
{
atp.urk_seeds.insert(atp.http_seeds.end()
, resume_data.http_seeds.begin()
, resume_data.http_seeds.end());
if ((resume_data.flags & add_torrent_params::flag_merge_resume_http_seeds) == 0)
atp.flags &= ~add_torrent_params::flag_override_web_seeds;
}
atp.total_uploaded = resume_data.total_uploaded;
atp.total_downloaded = resume_data.total_downloaded;
atp.num_complete = resume_data.num_complete;
atp.num_incomplete = resume_data.num_incomplete;
atp.num_downloaded = resume_data.num_downloaded;
atp.total_uploaded = resume_data.total_uploaded
atp.total_downloaded = resume_data.total_downloaded
atp.active_time = resume_data.active_time
atp.finished_time = resume_data.finished_time
atp.seeding_time = resume_data.seeding_time
atp.last_seen_complete = resume_data.last_seen_complete;
atp.url = resume_data.url;
atp.uuid = resume_data.uuid;
atp.source_feed_url = resume_data.source_feed_url;
atp.added_time = resume_data.added_time;
atp.completed_time = resume_data.completed_time;
if ((atp.flags & flag_override_resume_data) == 0)
{
atp.download_limit = resume_data.download_limit;
atp.uoload_limit = resume_data.upload_limit;
atp.max_connections = resume_data.max_connections;
atp.max_uploads = resume_data.max_uploads;
atp.trackerid = resume_data.trackerid;
atp.file_priorities = resume_data.file_priorities;
boost::uint64_t const mask =
add_torrent_params::flag_seed_mode
| add_torrent_params::flagsuper_seeding
| add_torrent_params::flag_auto_managed
| add_torrent_params::flag_sequential_download
| add_torrent_params::flag_paused;
atp.flags &= ~mask;
atp.flags |= resume_data.flags & mask;
}
}
}
#endif
#ifndef BOOST_NO_EXCEPTIONS
torrent_handle session_handle::add_torrent(add_torrent_params const& params)
{
error_code ec;
torrent_handle r = TORRENT_SYNC_CALL_RET2(torrent_handle, add_torrent, params, boost::ref(ec));
#ifndef TORRENT_NO_DEPRECATE
add_torrent_params p = params;
handle_backwards_compatible_resume_data(params, ec);
if (ec) throw libtorrent_exception(ec);
#else
add_torrent_params const& p = params;
#endif
torrent_handle r = TORRENT_SYNC_CALL_RET2(torrent_handle, add_torrent, p, boost::ref(ec));
if (ec) throw libtorrent_exception(ec);
return r;
}
@ -165,16 +268,26 @@ namespace libtorrent
torrent_handle session_handle::add_torrent(add_torrent_params const& params, error_code& ec)
{
ec.clear();
return TORRENT_SYNC_CALL_RET2(torrent_handle, add_torrent, params, boost::ref(ec));
#ifndef TORRENT_NO_DEPRECATE
add_torrent_params p = params;
handle_backwards_compatible_resume_data(p, ec);
if (ec) return torrent_handle();
#else
add_torrent_params const& p = params;
#endif
return TORRENT_SYNC_CALL_RET2(torrent_handle, add_torrent, p, boost::ref(ec));
}
void session_handle::async_add_torrent(add_torrent_params const& params)
{
add_torrent_params* p = new add_torrent_params(params);
#error all add_torrent() and async_add_torrent() function need to pass the \
add_torrent_params through a function that parses the resume vector and \
blends the results into the params object (unless deprecated functions \
are disabled)
#ifndef TORRENT_NO_DEPRECATE
error_code ec;
handle_backwards_compatible_resume_data(*p, ec);
#error what should we do about error handling here?
if (ec) return;
#endif
TORRENT_ASYNC_CALL1(async_add_torrent, p);
}

View File

@ -289,6 +289,7 @@ namespace libtorrent
// if there is resume data already, we don't need to trigger the initial save
// resume data
#error maybe m_need_save_resume_data should be another flag in add_torrent_params
if (!p.resume_data.empty() && (p.flags & add_torrent_params::flag_override_resume_data) == 0)
m_need_save_resume_data = false;
@ -313,17 +314,59 @@ namespace libtorrent
if (!m_torrent_file)
m_torrent_file = (p.ti ? p.ti : boost::make_shared<torrent_info>(info_hash));
std::vector<web_seed_entry> const& web_seeds = m_torrent_file->web_seeds();
m_web_seeds.insert(m_web_seeds.end(), web_seeds.begin(), web_seeds.end());
// --- WEB SEEDS ---
// if override web seed flag is set, don't load any web seeds from the
// torrent file.
if ((p.flags & add_torrent_params::flag_override_web_seeds) == 0)
{
std::vector<web_seed_entry> const& web_seeds = m_torrent_file->web_seeds();
m_web_seeds.insert(m_web_seeds.end(), web_seeds.begin(), web_seeds.end());
}
// add web seeds from add_torrent_params
bool const multi_file = m_torrent_file->is_valid()
&& m_torrent_file->num_files() > 1;
for (std::vector<std::string>::const_iterator i = p.url_seeds.begin()
, end(p.url_seeds.end()); i != end; ++i)
{
m_web_seeds.push_back(web_seed_t(*i, web_seed_entry::url_seed));
// correct URLs to end with a "/" for multi-file torrents
std::string& url = m_web_seeds.back().url;
if (multi_file && url[url.size()-1] != '/') url += '/';
}
for (std::vector<std::string>::const_iterator i = p.http_seeds.begin()
, end(p.http_seeds.end()); i != end; ++i)
{
m_web_seeds.push_back(web_seed_t(*i, web_seed_entry::http_seed));
}
// --- TRACKERS ---
// if override trackers flag is set, don't load trackers from torrent file
if ((p.flags & add_torrent_params::flag_override_trackers) == 0)
{
m_trackers = m_torrent_file->trackers();
}
int tier = 0;
std::vector<int>::const_iterator tier_iter = p.tracker_tiers.begin();
for (std::vector<std::string>::const_iterator i = p.trackers.begin()
, end(p.trackers.end()); i != end; ++i)
{
if (tier_iter != p.tracker_tiers.end())
tier = *tier_iter++;
announce_entry e(*i);
e.fail_limit = 0;
e.source = announce_entry::source_magnet_link;
e.tier = tier;
m_trackers.push_back(e);
m_torrent_file->add_tracker(*i, tier);
}
m_trackers = m_torrent_file->trackers();
if (m_torrent_file->is_valid())
{
m_seed_mode = (p.flags & add_torrent_params::flag_seed_mode) != 0;
@ -346,26 +389,9 @@ namespace libtorrent
m_verifying.resize(m_torrent_file->num_pieces(), false);
}
int tier = 0;
std::vector<int>::const_iterator tier_iter = p.tracker_tiers.begin();
for (std::vector<std::string>::const_iterator i = p.trackers.begin()
, end(p.trackers.end()); i != end; ++i)
{
if (tier_iter != p.tracker_tiers.end())
tier = *tier_iter++;
announce_entry e(*i);
e.fail_limit = 0;
e.source = announce_entry::source_magnet_link;
e.tier = tier;
m_trackers.push_back(e);
m_torrent_file->add_tracker(*i, tier);
}
if (settings().get_bool(settings_pack::prefer_udp_trackers))
prioritize_udp_trackers();
m_total_uploaded = p.total_uploaded;
m_total_downloaded = p.total_downloaded;
@ -744,37 +770,35 @@ namespace libtorrent
#ifndef TORRENT_DISABLE_LOGGING
debug_log("creating torrent: %s max-uploads: %d max-connections: %d "
"upload-limit: %d download-limit: %d flags: %s%s%s%s%s%s%s%s%s%s%s%s"
"upload-limit: %d download-limit: %d flags: %s%s%s%s%s%s%s%s%s%s%s"
"save-path: %s"
, torrent_file().name().c_str()
, p.max_uploads
, p.max_connections
, p.upload_limit
, p.download_limit
, (p.flags == add_torrent_params::flag_seed_mode)
, (p.flags & add_torrent_params::flag_seed_mode)
? "seed-mode " : ""
, (p.flags == add_torrent_params::flag_override_resume_data)
? "override-resume-data " : ""
, (p.flags == add_torrent_params::flag_upload_mode)
, (p.flags & add_torrent_params::flag_upload_mode)
? "upload-mode " : ""
, (p.flags == add_torrent_params::flag_share_mode)
, (p.flags & add_torrent_params::flag_share_mode)
? "share-mode " : ""
, (p.flags == add_torrent_params::flag_apply_ip_filter)
, (p.flags & add_torrent_params::flag_apply_ip_filter)
? "apply-ip-filter " : ""
, (p.flags == add_torrent_params::flag_paused)
, (p.flags & add_torrent_params::flag_paused)
? "paused " : ""
, (p.flags == add_torrent_params::flag_auto_managed)
, (p.flags & add_torrent_params::flag_auto_managed)
? "auto-managed " : ""
, (p.flags == add_torrent_params::flag_merge_resume_trackers)
? "merge-resume-trackers " : ""
, (p.flags == add_torrent_params::flag_update_subscribe)
, (p.flags & add_torrent_params::flag_update_subscribe)
? "update-subscribe " : ""
, (p.flags == add_torrent_params::flag_super_seeding)
, (p.flags & add_torrent_params::flag_super_seeding)
? "super-seeding " : ""
, (p.flags == add_torrent_params::flag_sequential_download)
, (p.flags & add_torrent_params::flag_sequential_download)
? "sequential-download " : ""
, (p.flags == add_torrent_params::flag_use_resume_save_path)
? "resume-save-path " : ""
, (p.flags & add_torrent_params::flag_override_trackers)
? "override-trackers" : ""
, (p.flags & add_torrent_params::flag_override_web_seeds)
? "override-web-seeds " : ""
, p.save_path.c_str()
);
#endif
@ -2269,6 +2293,10 @@ namespace libtorrent
state_updated();
#error instead of keeping m_resume_data as a member of torrent, we'll need an \
add_torrent_params member, to store resume_data until we need it. Unless, \
we don't support resume data for magnet links and URLs, that resolve later
if (m_resume_data && m_resume_data->node.type() == bdecode_node::dict_t)
{
using namespace libtorrent::detail; // for read_*_endpoint()
@ -2404,6 +2432,7 @@ namespace libtorrent
if (!j->error && m_resume_data && m_resume_data->node.type() == bdecode_node::dict_t)
{
// parse have bitmask
#error get this from add_torrent_params
bdecode_node pieces = m_resume_data->node.dict_find("pieces");
if (pieces && pieces.type() == bdecode_node::string_t
&& int(pieces.string_length()) == m_torrent_file->num_pieces())
@ -2423,30 +2452,12 @@ namespace libtorrent
if (m_seed_mode && (pieces_str[i] & 2)) m_verified.set_bit(i);
}
}
else
{
bdecode_node slots = m_resume_data->node.dict_find("slots");
if (slots && slots.type() == bdecode_node::list_t)
{
for (int i = 0; i < slots.list_size(); ++i)
{
int piece = slots.list_int_value_at(i, -1);
if (piece >= 0)
{
need_picker();
m_picker->we_have(piece);
update_gauge();
inc_stats_counter(counters::num_piece_passed);
we_have(piece);
}
}
}
}
// parse unfinished pieces
int num_blocks_per_piece =
static_cast<int>(torrent_file().piece_length()) / block_size();
#error get this from add_torrent_params
if (bdecode_node unfinished_ent
= m_resume_data->node.dict_find_list("unfinished"))
{