merge RC_1_1 into master

This commit is contained in:
arvidn 2017-02-26 00:48:12 -05:00
commit 081365fbcf
13 changed files with 158 additions and 27 deletions

View File

@ -57,6 +57,9 @@
* resume data no longer has timestamps of files
* require C++11 to build libtorrent
1.1.2 release
* default TOS marking to 0x20
* fix invalid access when leaving seed-mode with outstanding hash jobs
* fix ABI compatibility issue introduced with preformatted entry type
* add web_seed_name_lookup_retry to session_settings

View File

@ -6,6 +6,7 @@
#include <libtorrent/create_torrent.hpp>
#include <libtorrent/file_storage.hpp>
#include "libtorrent/torrent_info.hpp"
#include <libtorrent/version.hpp>
#include "bytes.hpp"
using namespace boost::python;

View File

@ -45,12 +45,81 @@ namespace boost
}
}
#include <boost/asio/error.hpp>
#if defined TORRENT_USE_OPENSSL
#include <boost/asio/ssl/error.hpp>
#endif
#include "boost_python.hpp"
using namespace boost::python;
using namespace libtorrent;
using boost::system::error_category;
namespace {
struct ec_pickle_suite : boost::python::pickle_suite
{
static boost::python::tuple
getinitargs(error_code const& ec)
{
return boost::python::tuple();
}
static boost::python::tuple
getstate(error_code const& ec)
{
return boost::python::make_tuple(ec.value(), ec.category().name());
}
static void
setstate(error_code& ec, boost::python::tuple state)
{
using namespace boost::python;
if (len(state) != 2)
{
PyErr_SetObject(PyExc_ValueError,
("expected 2-item tuple in call to __setstate__; got %s"
% state).ptr());
throw_error_already_set();
}
int const value = extract<int>(state[0]);
std::string const category = extract<std::string>(state[1]);
if (category == "system")
ec.assign(value, libtorrent::system_category());
else if (category == "generic")
ec.assign(value, libtorrent::generic_category());
else if (category == "libtorrent")
ec.assign(value, libtorrent::libtorrent_category());
else if (category == "http error")
ec.assign(value, libtorrent::http_category());
else if (category == "UPnP error")
ec.assign(value, libtorrent::upnp_category());
else if (category == "bdecode error")
ec.assign(value, libtorrent::bdecode_category());
else if (category == "asio.netdb")
ec.assign(value, boost::asio::error::get_netdb_category());
else if (category == "asio.addinfo")
ec.assign(value, boost::asio::error::get_addrinfo_category());
else if (category == "asio.misc")
ec.assign(value, boost::asio::error::get_misc_category());
else if (category == "asio.misc")
ec.assign(value, boost::asio::error::get_misc_category());
#if defined TORRENT_USE_OPENSSL
else if (category == "asio.ssl")
ec.assign(value, boost::asio::error::get_ssl_category());
#endif
else
{
PyErr_SetObject(PyExc_ValueError,
("unexpected error_category passed to __setstate__; got '%s'"
% object(category)).ptr());
throw_error_already_set();
}
}
};
}
void bind_error_code()
{
using boost::noncopyable;
@ -71,6 +140,7 @@ void bind_error_code()
.def("category", &error_code::category
, return_value_policy<reference_existing_object>())
.def("assign", &error_code::assign)
.def_pickle(ec_pickle_suite())
;
using return_existing = return_value_policy<reference_existing_object>;

View File

@ -247,7 +247,10 @@ list trackers(torrent_handle& h)
d["url"] = i->url;
d["trackerid"] = i->trackerid;
d["message"] = i->message;
d["last_error"] = i->last_error;
dict last_error;
last_error["value"] = i->last_error.value();
last_error["category"] = i->last_error.category().name();
d["last_error"] = last_error;
d["next_announce"] = i->next_announce;
d["min_announce"] = i->min_announce;
d["scrape_incomplete"] = i->scrape_incomplete;

View File

@ -14,6 +14,7 @@ import binascii
import subprocess as sub
import sys
import inspect
import pickle
# include terminal interface for travis parallel executions of scripts which use
# terminal features: fix multiple stdin assignment at termios.tcgetattr
@ -88,6 +89,20 @@ class test_torrent_handle(unittest.TestCase):
self.assertEqual(new_trackers[1]['tier'], 1)
self.assertEqual(new_trackers[1]['fail_limit'], 2)
def test_pickle_trackers(self):
"""Test lt objects convertors are working and trackers can be pickled"""
self.setup()
tracker = lt.announce_entry('udp://tracker1.com')
tracker.tier = 0
tracker.fail_limit = 1
trackers = [tracker]
self.h.replace_trackers(trackers)
tracker_list = [tracker for tracker in self.h.trackers()]
pickled_trackers = pickle.dumps(tracker_list)
unpickled_trackers = pickle.loads(pickled_trackers)
self.assertEqual(unpickled_trackers[0]['url'], 'udp://tracker1.com')
self.assertEqual(unpickled_trackers[0]['last_error']['value'], 0)
def test_file_status(self):
self.setup()
l = self.h.file_status()
@ -109,6 +124,13 @@ class test_torrent_handle(unittest.TestCase):
self.assertLessEqual(abs(st.last_upload - sessionStart), datetime.timedelta(seconds=1))
self.assertLessEqual(abs(st.last_download - sessionStart), datetime.timedelta(seconds=1))
def test_serialize_trackers(self):
"""Test to ensure the dict contains only python built-in types"""
self.setup()
self.h.add_tracker({'url':'udp://tracker1.com'})
import json
print(json.dumps(self.h.trackers()[0]))
def test_torrent_status(self):
self.setup()
st = self.h.status()
@ -257,6 +279,7 @@ class test_torrent_info(unittest.TestCase):
self.assertEquals(ae.can_announce(False), True)
self.assertEquals(ae.scrape_incomplete, -1)
self.assertEquals(ae.next_announce, None)
self.assertEquals(ae.last_error.value(), 0)
class test_alerts(unittest.TestCase):

View File

@ -20,6 +20,8 @@ client_test_SOURCES = client_test.cpp print.cpp session_view.cpp torrent_view.cp
stats_counters_SOURCES = stats_counters.cpp
bt_get_SOURCES = bt-get.cpp
bt_get2_SOURCES = bt-get2.cpp
bt_get_CXXFLAGS = -std=c++11
bt_get2_CXXFLAGS = -std=c++11
dump_torrent_SOURCES = dump_torrent.cpp
make_torrent_SOURCES = make_torrent.cpp
simple_client_SOURCES = simple_client.cpp

View File

@ -41,7 +41,6 @@ POSSIBILITY OF SUCH DAMAGE.
#include "libtorrent/string_view.hpp"
#include "libtorrent/aux_/vector.hpp"
#include "libtorrent/file.hpp" // for combine_path etc.
#include "libtorrent/version.hpp"
#include <vector>
#include <string>

View File

@ -793,27 +793,9 @@ namespace libtorrent
// low number, like 5
urlseed_pipeline_size,
// The maximum request range of an url seed in bytes. This value
// defines the largest possible sequential web seed request. Default
// is 16 * 1024 * 1024. Lower values are possible but will be ignored
// if they are lower then piece size.
// This value should be related to your download speed to prevent
// libtorrent from creating too many expensive http requests per
// second. You can select a value as high as you want but keep in mind
// that libtorrent can't create parallel requests if the first request
// did already select the whole file.
// If you combine bittorrent seeds with web seeds and pick strategies
// like rarest first you may find your web seed requests split into
// smaller parts because we don't download already picked pieces
// twice.
urlseed_max_request_bytes,
// time to wait until a new retry of a web seed takes place
urlseed_wait_retry,
// time to wait until a new retry of a web seed name lookup
web_seed_name_lookup_retry,
// sets the upper limit on the total number of files this session will
// keep open. The reason why files are left open at all is that some
// anti virus software hooks on every file close, and scans the file
@ -1618,6 +1600,24 @@ namespace libtorrent
// them in the cache is limited.
cache_size_volatile,
// The maximum request range of an url seed in bytes. This value
// defines the largest possible sequential web seed request. Default
// is 16 * 1024 * 1024. Lower values are possible but will be ignored
// if they are lower then piece size.
// This value should be related to your download speed to prevent
// libtorrent from creating too many expensive http requests per
// second. You can select a value as high as you want but keep in mind
// that libtorrent can't create parallel requests if the first request
// did already select the whole file.
// If you combine bittorrent seeds with web seeds and pick strategies
// like rarest first you may find your web seed requests split into
// smaller parts because we don't download already picked pieces
// twice.
urlseed_max_request_bytes,
// time to wait until a new retry of a web seed name lookup
web_seed_name_lookup_retry,
max_int_setting_internal
};

View File

@ -211,9 +211,7 @@ namespace libtorrent
SET(peer_timeout, 120, nullptr),
SET(urlseed_timeout, 20, nullptr),
SET(urlseed_pipeline_size, 5, nullptr),
SET(urlseed_max_request_bytes, 16 * 1024 * 1024, 0),
SET(urlseed_wait_retry, 30, nullptr),
SET(web_seed_name_lookup_retry, 1800, nullptr),
SET(file_pool_size, 40, nullptr),
SET(max_failcount, 3, &session_impl::update_max_failcount),
SET(min_reconnect_time, 60, nullptr),
@ -240,7 +238,7 @@ namespace libtorrent
SET(disk_io_read_mode, settings_pack::enable_os_cache, nullptr),
SET(outgoing_port, 0, nullptr),
SET(num_outgoing_ports, 0, nullptr),
SET(peer_tos, 0, &session_impl::update_peer_tos),
SET(peer_tos, 0x20, &session_impl::update_peer_tos),
SET(active_downloads, 3, &session_impl::trigger_auto_manage),
SET(active_seeds, 5, &session_impl::trigger_auto_manage),
SET(active_checking, 1, &session_impl::trigger_auto_manage),
@ -326,7 +324,9 @@ namespace libtorrent
SET(proxy_type, settings_pack::none, &session_impl::update_proxy),
SET(proxy_port, 0, &session_impl::update_proxy),
SET(i2p_port, 0, &session_impl::update_i2p_bridge),
SET(cache_size_volatile, 256, nullptr)
SET(cache_size_volatile, 256, nullptr),
SET(urlseed_max_request_bytes, 16 * 1024 * 1024, 0),
SET(web_seed_name_lookup_retry, 1800, nullptr),
}});
#undef SET

View File

@ -10724,7 +10724,7 @@ namespace libtorrent
st->pieces.resize(num_pieces, false);
}
st->num_pieces = num_have();
st->num_seeds = num_seeds();
st->num_seeds = num_seeds() - int(m_num_connecting_seeds);
if ((flags & torrent_handle::query_distributed_copies) && m_picker.get())
{
std::tie(st->distributed_full_copies, st->distributed_fraction) =

View File

@ -136,7 +136,7 @@ namespace libtorrent
res = ConvertUTF32toUTF8(const_cast<const UTF32**>(&cp), cp + 1, &start, start + 5, lenientConversion);
TORRENT_ASSERT(res == conversionOK);
for (int i = 0; i < start - sequence; ++i)
for (int i = 0; i < std::min(5, int(start - sequence)); ++i)
tmp_path += char(sequence[i]);
}

View File

@ -89,7 +89,8 @@ int print_failures()
std::printf("\x1b[0m");
if (total_num_failures > 0)
std::printf("\n\n\x1b[41m == %d TEST(S) FAILED ==\x1b[0m\n\n\n", total_num_failures);
std::printf("\n\n\x1b[41m == %d TEST(S) FAILED ==\x1b[0m\n\n\n"
, total_num_failures);
return total_num_failures;
}

View File

@ -226,3 +226,32 @@ TORRENT_TEST(load_pack_from_dict)
TEST_EQUAL(p2.get_int(settings_pack::max_out_request_queue), 1337);
TEST_EQUAL(p2.get_bool(settings_pack::send_redundant_have), false);
}
TORRENT_TEST(settings_pack_abi)
{
// make sure enum values are preserved across libtorrent versions
// for ABI compatibility
// These values are only allowed to change across major versions
TEST_EQUAL(settings_pack::string_type_base, 0x0000);
TEST_EQUAL(settings_pack::int_type_base, 0x4000);
TEST_EQUAL(settings_pack::bool_type_base, 0x8000);
TEST_EQUAL(settings_pack::type_mask, 0xc000);
// strings
TEST_EQUAL(settings_pack::outgoing_interfaces, settings_pack::string_type_base + 4);
TEST_EQUAL(settings_pack::dht_bootstrap_nodes, settings_pack::string_type_base + 11);
// bool
TEST_EQUAL(settings_pack::lazy_bitfields, settings_pack::bool_type_base + 3);
TEST_EQUAL(settings_pack::use_read_cache, settings_pack::bool_type_base + 7);
TEST_EQUAL(settings_pack::proxy_tracker_connections, settings_pack::bool_type_base + 67);
// ints
TEST_EQUAL(settings_pack::max_suggest_pieces, settings_pack::int_type_base + 66);
TEST_EQUAL(settings_pack::connections_slack, settings_pack::int_type_base + 86);
TEST_EQUAL(settings_pack::aio_threads, settings_pack::int_type_base + 104);
TEST_EQUAL(settings_pack::max_http_recv_buffer_size, settings_pack::int_type_base + 115);
TEST_EQUAL(settings_pack::web_seed_name_lookup_retry, settings_pack::int_type_base + 128);
}