merged RC_1_1 into master

This commit is contained in:
arvidn 2017-02-17 00:35:49 -05:00
commit 3ffa3f2a08
15 changed files with 173 additions and 144 deletions

View File

@ -56,6 +56,7 @@
* resume data no longer has timestamps of files * resume data no longer has timestamps of files
* require C++11 to build libtorrent * require C++11 to build libtorrent
* fix ABI compatibility issue introduced with preformatted entry type
* add web_seed_name_lookup_retry to session_settings * add web_seed_name_lookup_retry to session_settings
* slightly improve proxy settings backwards compatibility * slightly improve proxy settings backwards compatibility
* add function to get default settings * add function to get default settings

View File

@ -52,7 +52,7 @@ install:
- if not defined linkflags ( set linkflags="" ) - if not defined linkflags ( set linkflags="" )
- if not defined include ( set include="" ) - if not defined include ( set include="" )
- cd %ROOT_DIRECTORY% - cd %ROOT_DIRECTORY%
- set BOOST_ROOT=c:\Libraries\boost_1_59_0 - set BOOST_ROOT=c:\Libraries\boost_1_63_0
- set BOOST_BUILD_PATH=%BOOST_ROOT%\tools\build - set BOOST_BUILD_PATH=%BOOST_ROOT%\tools\build
- echo %BOOST_ROOT% - echo %BOOST_ROOT%
- echo %BOOST_BUILD_PATH% - echo %BOOST_BUILD_PATH%

View File

@ -13,6 +13,7 @@ import shutil
import binascii import binascii
import subprocess as sub import subprocess as sub
import sys import sys
import inspect
# include terminal interface for travis parallel executions of scripts which use # include terminal interface for travis parallel executions of scripts which use
# terminal features: fix multiple stdin assignment at termios.tcgetattr # terminal features: fix multiple stdin assignment at termios.tcgetattr
@ -365,11 +366,25 @@ class test_session(unittest.TestCase):
def test_post_session_stats(self): def test_post_session_stats(self):
s = lt.session({'alert_mask': lt.alert.category_t.stats_notification, s = lt.session({'alert_mask': lt.alert.category_t.stats_notification,
'enable_dht': False}) 'enable_dht': False})
s.post_session_stats()
a = s.wait_for_alert(1000) def test_post_session_stats(self):
self.assertTrue(isinstance(a, lt.session_stats_alert)) s = lt.session({'alert_mask': lt.alert.category_t.stats_notification, 'enable_dht': False})
self.assertTrue(isinstance(a.values, dict)) s.post_session_stats()
self.assertTrue(len(a.values) > 0) alerts = []
# first the stats headers log line. but not if logging is disabled
if 'log_alert' in [i[0] for i in inspect.getmembers(lt)]:
s.wait_for_alert(1000)
alerts = s.pop_alerts()
a = alerts.pop(0)
self.assertTrue(isinstance(a, lt.log_alert))
# then the actual stats values
if len(alerts) == 0:
s.wait_for_alert(1000)
alerts = s.pop_alerts()
a = alerts.pop(0)
self.assertTrue(isinstance(a, lt.session_stats_alert))
self.assertTrue(isinstance(a.values, dict))
self.assertTrue(len(a.values) > 0)
def test_unknown_settings(self): def test_unknown_settings(self):
try: try:

View File

@ -23,6 +23,38 @@ computer.
This document describes techniques to benchmark libtorrent performance This document describes techniques to benchmark libtorrent performance
and how parameters are likely to affect it. and how parameters are likely to affect it.
profiling
=========
libtorrent is instrumented with a number of counters and gauges you can have
access to via the ``session_stats_alert``. First, enable these alerts in the
alert mask::
settings_pack p;
p.set_int(settings_mask::alert_mask, alert::stats_notification);
ses.apply_settings(p);
Then print alerts to a file::
std::vector<alert*> alerts;
ses.pop_alerts(&alerts);
for (auto* a : alerts) {
std::cout << a->message() << "\n";
}
If you want to separate generic alerts from session stats, you can filter on the
alert category in the alert, ``alert::category()``.
The alerts with data will have the type ``session_stats_alert`` and there is one
``session_log_alert`` that will be posted on startup containing the column names
for all metrics. Logging this line will greatly simplify interpreting the output.
The python scrip in ``tools/parse_session_stats.py`` can parse the resulting
file and produce graphs of relevant stats. It requires gnuplot__.
__ http://www.gnuplot.info
reducing memory footprint reducing memory footprint
========================= =========================
@ -145,21 +177,6 @@ all peers. This is the least amount of memory possible for the send buffer.
You should benchmark your max send rate when adjusting this setting. If you have You should benchmark your max send rate when adjusting this setting. If you have
a very fast disk, you are less likely see a performance hit. a very fast disk, you are less likely see a performance hit.
optimize hashing for memory usage
---------------------------------
When libtorrent is doing hash checks of a file, or when it re-reads a piece that
was just completed to verify its hash, there are two options. The default one
is optimized for speed, which allocates buffers for the entire piece, reads in
the whole piece in one read call, then hashes it.
The second option is to optimize for memory usage instead, where a single buffer
is allocated, and the piece is read one block at a time, hashing it as each
block is read from the file. For low memory environments, this latter approach
is recommended. Change this by settings ``settings_pack::optimize_hashing_for_speed``
to false. This will significantly reduce peak memory usage, especially for
torrents with very large pieces.
reduce executable size reduce executable size
---------------------- ----------------------
@ -185,28 +202,6 @@ For all available options, see the `building libtorrent`_ secion.
.. _`building libtorrent`: building.html .. _`building libtorrent`: building.html
play nice with the disk
=======================
When checking a torrent, libtorrent will try to read as fast as possible from the disk.
The only thing that might hold it back is a CPU that is slow at calculating SHA-1 hashes,
but typically the file checking is limited by disk read speed. Most operating systems
today do not prioritize disk access based on the importance of the operation, this means
that checking a torrent might delay other disk accesses, such as virtual memory swapping
or just loading file by other (interactive) applications.
In order to play nicer with the disk, and leave some spare time for it to service other
processes that might be of higher importance to the end-user, you can introduce a sleep
between the disc accesses. This is a direct tradeoff between how fast you can check a
torrent and how soft you will hit the disk.
You control this by setting the ``settings_pack::file_checks_delay_per_block`` to greater
than zero. This number is the number of milliseconds to sleep between each read of 16 kiB.
The sleeps are not necessarily in between each 16 kiB block (it might be read in larger chunks),
but the number will be multiplied by the number of blocks that were read, to maintain the
same semantics.
high performance seeding high performance seeding
======================== ========================
@ -251,25 +246,6 @@ the read cache is removed immediately. This saves a significant amount of cache
which can be used as read-ahead for other peers. To enable volatile read cache, set which can be used as read-ahead for other peers. To enable volatile read cache, set
``settings_pack::volatile_read_cache`` to true. ``settings_pack::volatile_read_cache`` to true.
SSD as level 2 cache
--------------------
It is possible to introduce a second level of cache, below the RAM disk cache. This is done
by setting ``settings_pack::mmap_cache`` to a file path pointing to the SSD drive, and
increasing the ``settings_pack::cache_size`` to the number of 16 kiB blocks would fit
on the drive (or less).
This will allocate disk buffers (for reading and writing) from a memory region that has
been mapped to the specified file. If the drive this file lives on is not significantly
faster than the destination drive, performance will be degraded. The point is to take
advantage primarily of the fast read speed from SSD drives and use it to extend the read
cache, improving seed performance.
Which parts of the cache that actually live in RAM is determined by the operating system.
Note that when using this feature, any block which ends up being pulled from the mmapped
file will be considered a cache hit.
uTP-TCP mixed mode uTP-TCP mixed mode
------------------ ------------------
@ -305,8 +281,8 @@ peers
----- -----
First of all, in order to allow many connections, set the global connection limit First of all, in order to allow many connections, set the global connection limit
high, ``session::set_max_connections()``. Also set the upload rate limit to high, ``settings_pack::connections_limit``. Also set the upload rate limit to
infinite, ``session::set_upload_rate_limit()``, passing 0 means infinite. infinite, ``settings_pack::upload_rate_limit``, 0 means infinite.
When dealing with a large number of peers, it might be a good idea to have slightly When dealing with a large number of peers, it might be a good idea to have slightly
stricter timeouts, to get rid of lingering connections as soon as possible. stricter timeouts, to get rid of lingering connections as soon as possible.
@ -319,9 +295,10 @@ multiple connections from the same IP. That way two people from behind the same
can use the service simultaneously. This is controlled by can use the service simultaneously. This is controlled by
``settings_pack::allow_multiple_connections_per_ip``. ``settings_pack::allow_multiple_connections_per_ip``.
In order to always unchoke peers, turn off automatic unchoke In order to always unchoke peers, turn off automatic unchoke by setting
``settings_pack::auto_upload_slots`` and set the number of upload slots to a large ``settings_pack::choking_algorithm`` to ``fixed_slot_choker`` and set the number
number via ``session::set_max_uploads()``, or use -1 (which means infinite). of upload slots to a large number via ``settings_pack::unchoke_slots_limit``,
or use -1 (which means infinite).
torrent limits torrent limits
-------------- --------------
@ -356,12 +333,12 @@ the returned vector. If you have a lot of torrents, you might want to update the
of only certain torrents. For instance, you might only be interested in torrents that of only certain torrents. For instance, you might only be interested in torrents that
are being downloaded. are being downloaded.
The intended use of these functions is to start off by calling ``get_torrent_status`` The intended use of these functions is to start off by calling ``get_torrent_status()``
to get a list of all torrents that match your criteria. Then call ``refresh_torrent_status`` to get a list of all torrents that match your criteria. Then call ``refresh_torrent_status()``
on that list. This will only refresh the status for the torrents in your list, and thus on that list. This will only refresh the status for the torrents in your list, and thus
ignore all other torrents you might be running. This may save a significant amount of ignore all other torrents you might be running. This may save a significant amount of
time, especially if the number of torrents you're interested in is small. In order to time, especially if the number of torrents you're interested in is small. In order to
keep your list of interested torrents up to date, you can either call ``get_torrent_status`` keep your list of interested torrents up to date, you can either call ``get_torrent_status()``
from time to time, to include torrents you might have become interested in since the last from time to time, to include torrents you might have become interested in since the last
time. In order to stop refreshing a certain torrent, simply remove it from the list. time. In order to stop refreshing a certain torrent, simply remove it from the list.
@ -370,6 +347,9 @@ update your list based on these alerts. There are alerts for when torrents are a
paused, resumed, completed etc. Doing this ensures that you only query status for the paused, resumed, completed etc. Doing this ensures that you only query status for the
minimal set of torrents you are actually interested in. minimal set of torrents you are actually interested in.
To get an update with only the torrents that have changed since last time, call
``session::post_torrent_updates()``.
benchmarking benchmarking
============ ============
@ -428,7 +408,7 @@ covered here, or if you have improved any of the parser scrips, please consider
contributing it back to the project. contributing it back to the project.
If you have run tests and found that some algorithm or default value in If you have run tests and found that some algorithm or default value in
libtorrent is suboptimal, please contribute that knowledge back as well, to libtorrent are suboptimal, please contribute that knowledge back as well, to
allow us to improve the library. allow us to improve the library.
If you have additional suggestions on how to tune libtorrent for any specific If you have additional suggestions on how to tune libtorrent for any specific

View File

@ -1558,6 +1558,8 @@ namespace libtorrent
// This alert is posted approximately once every second, and it contains // This alert is posted approximately once every second, and it contains
// byte counters of most statistics that's tracked for torrents. Each active // byte counters of most statistics that's tracked for torrents. Each active
// torrent posts these alerts regularly. // torrent posts these alerts regularly.
// This alert has been superceded by calling ``post_torrent_updates()``
// regularly on the session object. This alert will be removed
struct TORRENT_EXPORT stats_alert final : torrent_alert struct TORRENT_EXPORT stats_alert final : torrent_alert
{ {
// internal // internal
@ -2440,7 +2442,7 @@ namespace libtorrent
// this is posted when one or more blocks are picked by the piece picker, // this is posted when one or more blocks are picked by the piece picker,
// assuming the verbose piece picker logging is enabled (see // assuming the verbose piece picker logging is enabled (see
// picker_log_notification). // picker_log_notification).
struct TORRENT_EXPORT picker_log_alert : peer_alert struct TORRENT_EXPORT picker_log_alert final : peer_alert
{ {
// internal // internal
picker_log_alert(aux::stack_allocator& alloc, torrent_handle const& h picker_log_alert(aux::stack_allocator& alloc, torrent_handle const& h

View File

@ -40,6 +40,8 @@ POSSIBILITY OF SUCH DAMAGE.
#include "libtorrent/hasher.hpp" #include "libtorrent/hasher.hpp"
#include "libtorrent/string_view.hpp" #include "libtorrent/string_view.hpp"
#include "libtorrent/aux_/vector.hpp" #include "libtorrent/aux_/vector.hpp"
#include "libtorrent/file.hpp" // for combine_path etc.
#include "libtorrent/version.hpp"
#include <vector> #include <vector>
#include <string> #include <string>

View File

@ -251,10 +251,10 @@ namespace libtorrent
// in a swarm has the same IP address. // in a swarm has the same IP address.
allow_multiple_connections_per_ip = bool_type_base, allow_multiple_connections_per_ip = bool_type_base,
#ifndef TORRENT_NO_DEPRECATE
// if set to true, upload, download and unchoke limits are ignored for // if set to true, upload, download and unchoke limits are ignored for
// peers on the local network. This option is *DEPRECATED*, please use // peers on the local network. This option is *DEPRECATED*, please use
// set_peer_class_filter() instead. // set_peer_class_filter() instead.
#ifndef TORRENT_NO_DEPRECATE
ignore_limits_on_local_network, ignore_limits_on_local_network,
#else #else
deprecated1, deprecated1,
@ -946,7 +946,7 @@ namespace libtorrent
// still allocates all upload capacity, but shuffles it around to // still allocates all upload capacity, but shuffles it around to
// the best peers first. For this choker to be efficient, you need // the best peers first. For this choker to be efficient, you need
// to set a global upload rate limit // to set a global upload rate limit
// (``session::set_upload_rate_limit()``). For more information // (``settings_pack::upload_rate_limit``). For more information
// about this choker, see the paper_. This choker is not fully // about this choker, see the paper_. This choker is not fully
// implemented nor tested. // implemented nor tested.
// //
@ -1179,6 +1179,7 @@ namespace libtorrent
// allowed to grow to. // allowed to grow to.
max_peer_recv_buffer_size, max_peer_recv_buffer_size,
#ifndef TORRENT_NO_DEPRECATE
// ``file_checks_delay_per_block`` is the number of milliseconds to // ``file_checks_delay_per_block`` is the number of milliseconds to
// sleep in between disk read operations when checking torrents. This // sleep in between disk read operations when checking torrents. This
// defaults to 0, but can be set to higher numbers to slow down the // defaults to 0, but can be set to higher numbers to slow down the
@ -1187,6 +1188,9 @@ namespace libtorrent
// bit longer, as long as they leave disk I/O time for other // bit longer, as long as they leave disk I/O time for other
// processes. // processes.
file_checks_delay_per_block, file_checks_delay_per_block,
#else
deprecated14,
#endif
// ``read_cache_line_size`` is the number of blocks to read into the // ``read_cache_line_size`` is the number of blocks to read into the
// read cache when a read cache miss occurs. Setting this to 0 is // read cache when a read cache miss occurs. Setting this to 0 is
@ -1300,20 +1304,11 @@ namespace libtorrent
// share_mode_target is set to more than 3, nothing is downloaded. // share_mode_target is set to more than 3, nothing is downloaded.
share_mode_target, share_mode_target,
// ``upload_rate_limit``, ``download_rate_limit``, // ``upload_rate_limit`` and ``download_rate_limit`` sets
// ``local_upload_rate_limit`` and ``local_download_rate_limit`` sets
// the session-global limits of upload and download rate limits, in // the session-global limits of upload and download rate limits, in
// bytes per second. The local rates refer to peers on the local // bytes per second. By default peers on the local network are not rate
// network. By default peers on the local network are not rate
// limited. // limited.
// //
// These rate limits are only used for local peers (peers within the
// same subnet as the client itself) and it is only used when
// ``ignore_limits_on_local_network`` is set to true (which it is by
// default). These rate limits default to unthrottled, but can be
// useful in case you want to treat local peers preferentially, but
// not quite unthrottled.
//
// A value of 0 means unlimited. // A value of 0 means unlimited.
upload_rate_limit, upload_rate_limit,
download_rate_limit, download_rate_limit,

View File

@ -368,7 +368,7 @@ namespace libtorrent
create_torrent::create_torrent(torrent_info const& ti) create_torrent::create_torrent(torrent_info const& ti)
: m_files(const_cast<file_storage&>(ti.files())) : m_files(const_cast<file_storage&>(ti.files()))
, m_creation_date(time(nullptr)) , m_creation_date(time(0))
, m_multifile(ti.num_files() > 1) , m_multifile(ti.num_files() > 1)
, m_private(ti.priv()) , m_private(ti.priv())
, m_merkle_torrent(ti.is_merkle_torrent()) , m_merkle_torrent(ti.is_merkle_torrent())

View File

@ -677,7 +677,7 @@ namespace libtorrent
for (;i != udp::resolver_iterator(); ++i) for (;i != udp::resolver_iterator(); ++i)
{ {
iface.interface_address = i->endpoint().address(); iface.interface_address = i->endpoint().address();
iface.name[0] = 0; iface.name[0] = '\0';
iface.mtu = 1500; iface.mtu = 1500;
if (iface.interface_address.is_v4()) if (iface.interface_address.is_v4())
iface.netmask = address_v4::netmask(iface.interface_address.to_v4()); iface.netmask = address_v4::netmask(iface.interface_address.to_v4());

View File

@ -79,11 +79,6 @@ namespace libtorrent
// connect to 5 peers per second // connect to 5 peers per second
set.set_int(settings_pack::connection_speed, 5); set.set_int(settings_pack::connection_speed, 5);
// be extra nice on the hard drive when running
// on embedded devices. This might slow down
// torrent checking
set.set_int(settings_pack::file_checks_delay_per_block, 5);
// only have 4 files open at a time // only have 4 files open at a time
set.set_int(settings_pack::file_pool_size, 4); set.set_int(settings_pack::file_pool_size, 4);

View File

@ -588,12 +588,11 @@ namespace aux {
TORRENT_ASSERT(is_single_thread()); TORRENT_ASSERT(is_single_thread());
#ifndef TORRENT_DISABLE_LOGGING #ifndef TORRENT_DISABLE_LOGGING
// this alert is a bit special. Since it's so verbose it's not only // this alert is a bit special. The stats headers aren't very useful
// filtered by its own alert type (log_alert) but also whether session // unless session_stats is enabled, sp it's posted in the session_Stats
// stats alerts are actually enabled. Without session_stats alerts the // category as well
// headers aren't very useful anyway
if (m_alerts.should_post<log_alert>() if (m_alerts.should_post<log_alert>()
&& m_alerts.should_post<session_stats_alert>()) || m_alerts.should_post<session_stats_alert>())
{ {
session_log(" *** session thread init"); session_log(" *** session thread init");

View File

@ -262,7 +262,7 @@ namespace libtorrent
SET(recv_socket_buffer_size, 0, &session_impl::update_socket_buffer_size), SET(recv_socket_buffer_size, 0, &session_impl::update_socket_buffer_size),
SET(send_socket_buffer_size, 0, &session_impl::update_socket_buffer_size), SET(send_socket_buffer_size, 0, &session_impl::update_socket_buffer_size),
SET(max_peer_recv_buffer_size, 2 * 1024 * 1024, nullptr), SET(max_peer_recv_buffer_size, 2 * 1024 * 1024, nullptr),
SET(file_checks_delay_per_block, 0, nullptr), DEPRECATED_SET(file_checks_delay_per_block, 0, nullptr),
SET(read_cache_line_size, 32, nullptr), SET(read_cache_line_size, 32, nullptr),
SET(write_cache_line_size, 16, nullptr), SET(write_cache_line_size, 16, nullptr),
SET(optimistic_disk_retry, 10 * 60, nullptr), SET(optimistic_disk_retry, 10 * 60, nullptr),

View File

@ -5980,9 +5980,14 @@ namespace libtorrent
{ {
if (m_magnet_link || (m_save_resume_flags & torrent_handle::save_info_dict)) if (m_magnet_link || (m_save_resume_flags & torrent_handle::save_info_dict))
{ {
boost::shared_array<char> const info = torrent_file().metadata(); ret["info"] = bdecode(&torrent_file().metadata()[0]
int const size = torrent_file().metadata_size(); , &torrent_file().metadata()[0] + torrent_file().metadata_size());
ret["info"].preformatted().assign(&info[0], &info[0] + size); // TODO: re-enable this code once there's a non-inlined encoder function. Or
// perhaps this should not be used until saving resume_data via
// add_torrent_params and a free function, similar to read_resume_data
// boost::shared_array<char> const info = torrent_file().metadata();
// int const size = torrent_file().metadata_size();
// ret["info"].preformatted().assign(&info[0], &info[0] + size);
} }
} }

View File

@ -47,6 +47,7 @@ POSSIBILITY OF SUCH DAMAGE.
#include "libtorrent/assert.hpp" #include "libtorrent/assert.hpp"
#include "libtorrent/file.hpp" #include "libtorrent/file.hpp"
#include "libtorrent/random.hpp" #include "libtorrent/random.hpp"
#include "libtorrent/aux_/escape_string.hpp"
#include <csignal> #include <csignal>
#ifdef _WIN32 #ifdef _WIN32
@ -217,6 +218,51 @@ void print_usage(char const* executable)
"by -l. If no test is specified, all tests are run\n", executable); "by -l. If no test is specified, all tests are run\n", executable);
} }
void change_directory(std::string const& f, error_code& ec)
{
ec.clear();
#ifdef TORRENT_WINDOWS
#if TORRENT_USE_WSTRING
#define SetCurrentDirectory_ SetCurrentDirectoryW
std::wstring n = convert_to_wstring(f);
#else
#define SetCurrentDirectory_ SetCurrentDirectoryA
std::string const& n = convert_to_native(f);
#endif // TORRENT_USE_WSTRING
if (SetCurrentDirectory_(n.c_str()) == 0)
ec.assign(GetLastError(), system_category());
#else
std::string n = convert_to_native(f);
int ret = ::chdir(n.c_str());
if (ret != 0)
ec.assign(errno, system_category());
#endif
}
struct unit_directory_guard
{
std::string dir;
unit_directory_guard(unit_directory_guard const&) = delete;
unit_directory_guard& operator=(unit_directory_guard const&) = delete;
~unit_directory_guard()
{
if (keep_files) return;
error_code ec;
std::string const parent_dir = parent_path(dir);
// windows will not allow to remove current dir, so let's change it to root
change_directory(parent_dir, ec);
if (ec)
{
TEST_ERROR("Failed to change directory: " + ec.message());
return;
}
remove_all(dir, ec);
if (ec) TEST_ERROR("Failed to remove unit test directory: " + ec.message());
}
};
EXPORT int main(int argc, char const* argv[]) EXPORT int main(int argc, char const* argv[])
{ {
char const* executable = argv[0]; char const* executable = argv[0];
@ -315,32 +361,12 @@ EXPORT int main(int argc, char const* argv[])
#else #else
process_id = getpid(); process_id = getpid();
#endif #endif
std::string root_dir = current_working_directory();
char dir[40]; char dir[40];
std::snprintf(dir, sizeof(dir), "test_tmp_%u", process_id); snprintf(dir, sizeof(dir), "test_tmp_%u", process_id);
std::string test_dir = complete(dir); std::string unit_dir_prefix = combine_path(root_dir, dir);
error_code ec; std::printf("test: %s\ncwd_prefix = \"%s\"\nrnd = %x\n"
create_directory(test_dir, ec); , executable, unit_dir_prefix.c_str(), libtorrent::random(0xffffffff));
if (ec)
{
std::printf("Failed to create test directory: %s\n", ec.message().c_str());
return 1;
}
int ret;
#ifdef TORRENT_WINDOWS
SetCurrentDirectoryA(dir);
#else
ret = chdir(dir);
if (ret != 0)
{
std::printf("failed to change directory to \"%s\": %s"
, dir, strerror(errno));
return 1;
}
#endif
std::printf("test: %s\ncwd = \"%s\"\nrnd: %x\n"
, executable, test_dir.c_str(), libtorrent::random(0xffffffff));
int total_failures = 0;
if (_g_num_unit_tests == 0) if (_g_num_unit_tests == 0)
{ {
@ -357,6 +383,25 @@ EXPORT int main(int argc, char const* argv[])
if (filter && tests_to_run.count(_g_unit_tests[i].name) == 0) if (filter && tests_to_run.count(_g_unit_tests[i].name) == 0)
continue; continue;
std::string unit_dir = unit_dir_prefix;
char i_str[40];
snprintf(i_str, sizeof(i_str), "%u", i);
unit_dir.append(i_str);
error_code ec;
create_directory(unit_dir, ec);
if (ec)
{
std::printf("Failed to create unit test directory: %s\n", ec.message().c_str());
return 1;
}
unit_directory_guard unit_dir_guard{unit_dir};
change_directory(unit_dir, ec);
if (ec)
{
std::printf("Failed to change unit test directory: %s\n", ec.message().c_str());
return 1;
}
unit_test_t& t = _g_unit_tests[i]; unit_test_t& t = _g_unit_tests[i];
if (redirect_stdout || redirect_stderr) if (redirect_stdout || redirect_stderr)
@ -399,7 +444,6 @@ EXPORT int main(int argc, char const* argv[])
try try
{ {
#endif #endif
_g_test_failures = 0; _g_test_failures = 0;
(*t.fun)(); (*t.fun)();
#ifndef BOOST_NO_EXCEPTIONS #ifndef BOOST_NO_EXCEPTIONS
@ -434,7 +478,6 @@ EXPORT int main(int argc, char const* argv[])
t.num_failures = _g_test_failures; t.num_failures = _g_test_failures;
t.run = true; t.run = true;
total_failures += _g_test_failures;
++num_run; ++num_run;
if (redirect_stdout && t.output) if (redirect_stdout && t.output)
@ -471,16 +514,6 @@ EXPORT int main(int argc, char const* argv[])
if (redirect_stdout) fflush(stdout); if (redirect_stdout) fflush(stdout);
if (redirect_stderr) fflush(stderr); if (redirect_stderr) fflush(stderr);
ret = print_failures(); return print_failures() ? 333 : 0;
#if !defined TORRENT_LOGGING
if (ret == 0 && !keep_files)
{
remove_all(test_dir, ec);
if (ec)
std::printf("failed to remove test dir: %s\n", ec.message().c_str());
}
#endif
return total_failures ? 333 : 0;
} }

View File

@ -36,7 +36,7 @@ POSSIBILITY OF SUCH DAMAGE.
unit_test_t _g_unit_tests[1024]; unit_test_t _g_unit_tests[1024];
int _g_num_unit_tests = 0; int _g_num_unit_tests = 0;
int _g_test_failures = 0; int _g_test_failures = 0; // flushed at start of every unit
int _g_test_idx = 0; int _g_test_idx = 0;
static std::vector<std::string> failure_strings; static std::vector<std::string> failure_strings;
@ -65,6 +65,7 @@ int print_failures()
} }
std::printf("\n\n"); std::printf("\n\n");
int total_num_failures = 0;
for (int i = 0; i < _g_num_unit_tests; ++i) for (int i = 0; i < _g_num_unit_tests; ++i)
{ {
@ -77,6 +78,7 @@ int print_failures()
} }
else else
{ {
total_num_failures += _g_unit_tests[i].num_failures;
std::printf("\x1b[31m[%-*s] %d FAILURES\n" std::printf("\x1b[31m[%-*s] %d FAILURES\n"
, longest_name , longest_name
, _g_unit_tests[i].name , _g_unit_tests[i].name
@ -86,8 +88,8 @@ int print_failures()
std::printf("\x1b[0m"); std::printf("\x1b[0m");
if (_g_test_failures > 0) if (total_num_failures > 0)
std::printf("\n\n\x1b[41m == %d TEST(S) FAILED ==\x1b[0m\n\n\n", _g_test_failures); std::printf("\n\n\x1b[41m == %d TEST(S) FAILED ==\x1b[0m\n\n\n", total_num_failures);
return _g_test_failures; return total_num_failures;
} }