fix loading of peers and banned_peers from resume_data

This commit is contained in:
arvidn 2016-02-12 15:42:39 -05:00 committed by arvidn
parent 35b998f90d
commit a60bbe0a1c
4 changed files with 93 additions and 132 deletions

View File

@ -38,11 +38,13 @@ POSSIBILITY OF SUCH DAMAGE.
#include "libtorrent/aux_/disable_warnings_push.hpp"
#include <boost/shared_ptr.hpp>
#include <boost/function.hpp>
#include "libtorrent/aux_/disable_warnings_pop.hpp"
#include "libtorrent/storage_defs.hpp"
#include "libtorrent/peer_id.hpp" // sha1_hash
#include "libtorrent/version.hpp"
#include "libtorrent/socket.hpp" // for tcp::endpoint
namespace libtorrent
{
@ -295,9 +297,6 @@ namespace libtorrent
// iterating over the trackers.
std::vector<int> tracker_tiers;
// url seeds to be added to the torrent (`BEP 17`_).
std::vector<std::string> url_seeds;
// a list of hostname and port pairs, representing DHT nodes to be added
// to the session (if DHT is enabled). The hostname may be an IP address.
std::vector<std::pair<std::string, int> > dht_nodes;
@ -441,6 +440,9 @@ namespace libtorrent
std::vector<std::string> http_seeds;
std::vector<std::string> url_seeds;
std::vector<tcp::endpoint> peers;
std::vector<tcp::endpoint> banned_peers;
#ifndef TORRENT_NO_DEPRECATE
// The optional parameter, ``resume_data`` can be given if up to date
// fast-resume data is available. The fast-resume data can be acquired

View File

@ -41,6 +41,7 @@ POSSIBILITY OF SUCH DAMAGE.
#include "libtorrent/read_resume_data.hpp"
#include "libtorrent/add_torrent_params.hpp"
#include "libtorrent/announce_entry.hpp"
#include "libtorrent/socket_io.hpp" // for read_*_endpoint()
namespace libtorrent
{
@ -85,7 +86,7 @@ namespace libtorrent
can only be done reliably on the libtorrent side as the torrent is being \
added. i.e. the info_hash needs to be saved
ret.toal_uploaded = rd.dict_find_int_value("total_uploaded");
ret.total_uploaded = rd.dict_find_int_value("total_uploaded");
ret.total_downloaded = rd.dict_find_int_value("total_downloaded");
ret.active_time = rd.dict_find_int_value("active_time");
ret.finished_time = rd.dict_find_int_value("finished_time");
@ -176,11 +177,6 @@ namespace libtorrent
}
++tier;
}
std::sort(m_trackers.begin(), m_trackers.end(), boost::bind(&announce_entry::tier, _1)
< boost::bind(&announce_entry::tier, _2));
if (settings().get_bool(settings_pack::prefer_udp_trackers))
prioritize_udp_trackers();
}
// if merge resume http seeds is not set, we need to clear whatever web
@ -193,7 +189,7 @@ namespace libtorrent
{
// since we found http seeds in the resume data, they should replace
// whatever web seeds are specified in the .torrent, by default
ret.flags |= add_torrent_params::flag_override_http_seeds;
ret.flags |= add_torrent_params::flag_override_web_seeds;
}
if (url_list)
@ -274,11 +270,36 @@ namespace libtorrent
}
}
using namespace libtorrent::detail; // for read_*_endpoint()
if (bdecode_node peers_entry = rd.dict_find_string("peers"))
{
char const* ptr = peers_entry.string_ptr();
for (int i = 0; i < peers_entry.string_length(); i += 6)
ret.peers.push_back(read_v4_endpoint<tcp::endpoint>(ptr));
}
if (bdecode_node peers_entry = rd.dict_find_string("peers6"))
{
char const* ptr = peers_entry.string_ptr();
for (int i = 0; i < peers_entry.string_length(); i += 18)
ret.peers.push_back(read_v6_endpoint<tcp::endpoint>(ptr));
}
if (bdecode_node peers_entry = rd.dict_find_string("banned_peers"))
{
char const* ptr = peers_entry.string_ptr();
for (int i = 0; i < peers_entry.string_length(); i += 6)
ret.banned_peers.push_back(read_v4_endpoint<tcp::endpoint>(ptr));
}
if (bdecode_node peers_entry = rd.dict_find_string("banned_peers6"))
{
char const* ptr = peers_entry.string_ptr();
for (int i = 0; i < peers_entry.string_length(); i += 18)
ret.banned_peers.push_back(read_v6_endpoint<tcp::endpoint>(ptr));
}
#error read "unfinished" pieces
#error read "peers" list
#error read "peers6" list
#error read "banned_peers" list
#error read "banned_peers6" list
return ret;
}

View File

@ -36,6 +36,10 @@ POSSIBILITY OF SUCH DAMAGE.
#include "libtorrent/torrent.hpp"
#include "libtorrent/lazy_entry.hpp"
#ifndef TORRENT_NO_DEPRECATE
#include "libtorrent/read_resume_data.hpp"
#endif
#ifdef __GNUC__
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-macros"
@ -190,20 +194,20 @@ namespace libtorrent
if (!resume_data.url_seeds.empty())
{
atp.urk_seeds.insert(atp.url_seeds.end()
atp.url_seeds.insert(atp.url_seeds.end()
, resume_data.url_seeds.begin()
, resume_data.url_seeds.end());
if ((resume_data.flags & add_torrent_params::flag_merge_resume_http_seeds) == 0)
atp.flags &= ~add_torrent_params::flag_override_web_seeds;
atp.flags |= add_torrent_params::flag_override_web_seeds;
}
if (!resume_data.http_seeds.empty())
{
atp.urk_seeds.insert(atp.http_seeds.end()
atp.url_seeds.insert(atp.http_seeds.end()
, resume_data.http_seeds.begin()
, resume_data.http_seeds.end());
if ((resume_data.flags & add_torrent_params::flag_merge_resume_http_seeds) == 0)
atp.flags &= ~add_torrent_params::flag_override_web_seeds;
atp.flags |= add_torrent_params::flag_override_web_seeds;
}
atp.total_uploaded = resume_data.total_uploaded;
@ -211,11 +215,11 @@ namespace libtorrent
atp.num_complete = resume_data.num_complete;
atp.num_incomplete = resume_data.num_incomplete;
atp.num_downloaded = resume_data.num_downloaded;
atp.total_uploaded = resume_data.total_uploaded
atp.total_downloaded = resume_data.total_downloaded
atp.active_time = resume_data.active_time
atp.finished_time = resume_data.finished_time
atp.seeding_time = resume_data.seeding_time
atp.total_uploaded = resume_data.total_uploaded;
atp.total_downloaded = resume_data.total_downloaded;
atp.active_time = resume_data.active_time;
atp.finished_time = resume_data.finished_time;
atp.seeding_time = resume_data.seeding_time;
atp.last_seen_complete = resume_data.last_seen_complete;
atp.url = resume_data.url;
@ -225,10 +229,13 @@ namespace libtorrent
atp.added_time = resume_data.added_time;
atp.completed_time = resume_data.completed_time;
if ((atp.flags & flag_override_resume_data) == 0)
atp.peers.swap(resume_data.peers);
atp.banned_peers.swap(resume_data.banned_peers);
if ((atp.flags & add_torrent_params::flag_override_resume_data) == 0)
{
atp.download_limit = resume_data.download_limit;
atp.uoload_limit = resume_data.upload_limit;
atp.upload_limit = resume_data.upload_limit;
atp.max_connections = resume_data.max_connections;
atp.max_uploads = resume_data.max_uploads;
atp.trackerid = resume_data.trackerid;
@ -236,7 +243,7 @@ namespace libtorrent
boost::uint64_t const mask =
add_torrent_params::flag_seed_mode
| add_torrent_params::flagsuper_seeding
| add_torrent_params::flag_super_seeding
| add_torrent_params::flag_auto_managed
| add_torrent_params::flag_sequential_download
| add_torrent_params::flag_paused;
@ -254,7 +261,7 @@ namespace libtorrent
error_code ec;
#ifndef TORRENT_NO_DEPRECATE
add_torrent_params p = params;
handle_backwards_compatible_resume_data(params, ec);
handle_backwards_compatible_resume_data(p, ec);
if (ec) throw libtorrent_exception(ec);
#else
add_torrent_params const& p = params;
@ -286,7 +293,11 @@ namespace libtorrent
error_code ec;
handle_backwards_compatible_resume_data(*p, ec);
#error what should we do about error handling here?
if (ec) return;
if (ec)
{
delete p;
return;
}
#endif
TORRENT_ASYNC_CALL1(async_add_torrent, p);
@ -312,7 +323,8 @@ namespace libtorrent
bencode(std::back_inserter(p.resume_data), resume_data);
}
p.storage_mode = storage_mode;
p.paused = paused;
if (paused) p.flags |= add_torrent_params::flag_paused;
else p.flags &= ~add_torrent_params::flag_paused;
return add_torrent(p);
}

View File

@ -367,6 +367,35 @@ namespace libtorrent
m_torrent_file->add_tracker(*i, tier);
}
std::sort(m_trackers.begin(), m_trackers.end(), boost::bind(&announce_entry::tier, _1)
< boost::bind(&announce_entry::tier, _2));
if (settings().get_bool(settings_pack::prefer_udp_trackers))
prioritize_udp_trackers();
// --- PEERS ---
for (std::vector<tcp::endpoint>::const_iterator i = p.peers.begin()
, end(p.peers.end()); i != end; ++i)
{
add_peer(*i , peer_info::resume_data);
}
for (std::vector<tcp::endpoint>::const_iterator i = p.banned_peers.begin()
, end(p.banned_peers.end()); i != end; ++i)
{
torrent_peer* peer = add_peer(*i, peer_info::resume_data);
if (peer) ban_peer(peer);
}
if (!p.peers.empty() || !p.banned_peers.empty())
update_want_peers();
#ifndef TORRENT_DISABLE_LOGGING
if (m_peer_list && m_peer_list->num_peers() > 0)
debug_log("resume added peers (%d)", m_peer_list->num_peers());
#endif
if (m_torrent_file->is_valid())
{
m_seed_mode = (p.flags & add_torrent_params::flag_seed_mode) != 0;
@ -2297,109 +2326,6 @@ namespace libtorrent
add_torrent_params member, to store resume_data until we need it. Unless, \
we don't support resume data for magnet links and URLs, that resolve later
if (m_resume_data && m_resume_data->node.type() == bdecode_node::dict_t)
{
using namespace libtorrent::detail; // for read_*_endpoint()
if (bdecode_node peers_entry = m_resume_data->node.dict_find_string("peers"))
{
int num_peers = peers_entry.string_length() / (sizeof(address_v4::bytes_type) + 2);
char const* ptr = peers_entry.string_ptr();
for (int i = 0; i < num_peers; ++i)
{
add_peer(read_v4_endpoint<tcp::endpoint>(ptr)
, peer_info::resume_data);
}
update_want_peers();
}
if (bdecode_node banned_peers_entry
= m_resume_data->node.dict_find_string("banned_peers"))
{
int num_peers = banned_peers_entry.string_length() / (sizeof(address_v4::bytes_type) + 2);
char const* ptr = banned_peers_entry.string_ptr();
for (int i = 0; i < num_peers; ++i)
{
std::vector<torrent_peer*> peers;
torrent_peer* p = add_peer(read_v4_endpoint<tcp::endpoint>(ptr)
, peer_info::resume_data);
peers_erased(peers);
if (p) ban_peer(p);
}
update_want_peers();
}
#if TORRENT_USE_IPV6
if (bdecode_node peers6_entry = m_resume_data->node.dict_find_string("peers6"))
{
int num_peers = peers6_entry.string_length() / (sizeof(address_v6::bytes_type) + 2);
char const* ptr = peers6_entry.string_ptr();
for (int i = 0; i < num_peers; ++i)
{
add_peer(read_v6_endpoint<tcp::endpoint>(ptr)
, peer_info::resume_data);
}
update_want_peers();
}
if (bdecode_node banned_peers6_entry = m_resume_data->node.dict_find_string("banned_peers6"))
{
int num_peers = banned_peers6_entry.string_length() / (sizeof(address_v6::bytes_type) + 2);
char const* ptr = banned_peers6_entry.string_ptr();
for (int i = 0; i < num_peers; ++i)
{
torrent_peer* p = add_peer(read_v6_endpoint<tcp::endpoint>(ptr)
, peer_info::resume_data);
if (p) ban_peer(p);
}
update_want_peers();
}
#endif
// parse out "peers" from the resume data and add them to the peer list
if (bdecode_node peers_entry = m_resume_data->node.dict_find_list("peers"))
{
for (int i = 0; i < peers_entry.list_size(); ++i)
{
bdecode_node e = peers_entry.list_at(i);
if (e.type() != bdecode_node::dict_t) continue;
std::string ip = e.dict_find_string_value("ip");
int port = e.dict_find_int_value("port");
if (ip.empty() || port == 0) continue;
error_code ec;
tcp::endpoint a(address::from_string(ip, ec), boost::uint16_t(port));
if (ec) continue;
add_peer(a, peer_info::resume_data);
}
update_want_peers();
}
// parse out "banned_peers" and add them as banned
if (bdecode_node banned_peers_entry = m_resume_data->node.dict_find_list("banned_peers"))
{
for (int i = 0; i < banned_peers_entry.list_size(); ++i)
{
bdecode_node e = banned_peers_entry.list_at(i);
if (e.type() != bdecode_node::dict_t) continue;
std::string ip = e.dict_find_string_value("ip");
int port = e.dict_find_int_value("port");
if (ip.empty() || port == 0) continue;
error_code ec;
tcp::endpoint a(address::from_string(ip, ec)
, boost::uint16_t(port));
if (ec) continue;
torrent_peer* p = add_peer(a, peer_info::resume_data);
if (p) ban_peer(p);
}
update_want_peers();
}
}
#ifndef TORRENT_DISABLE_LOGGING
if (m_peer_list && m_peer_list->num_peers() > 0)
debug_log("resume added peers (%d)", m_peer_list->num_peers());
#endif
// only report this error if the user actually provided resume data
if ((j->error || j->ret != 0) && m_resume_data
&& m_ses.alerts().should_post<fastresume_rejected_alert>())