merged RC_1_1 into master

This commit is contained in:
arvidn 2016-03-16 19:59:53 -04:00
commit f79a9c7adf
37 changed files with 884 additions and 525 deletions

View File

@ -7,6 +7,8 @@
1.1.0 release
* improve robustness and performance of uTP PMTU discovery
* fix duplicate ACK issue in uTP
* support filtering which parts of session state are loaded by load_state()
* deprecate support for adding torrents by HTTP URL
* allow specifying which tracker to scrape in scrape_tracker

View File

@ -174,6 +174,7 @@ bool print_trackers = false;
bool print_peers = false;
bool print_log = false;
bool print_downloads = false;
bool print_matrix = false;
bool print_file_progress = false;
bool show_pad_files = false;
bool show_dht_status = false;
@ -378,9 +379,10 @@ FILE* g_log_file = 0;
std::string const& piece_bar(libtorrent::bitfield const& p, int width)
{
#ifdef _WIN32
int const table_size = 2;
int const table_size = 5;
#else
int const table_size = 18;
width *= 2; // we only print one character for every two "slots"
#endif
double const piece_per_char = p.size() / double(width);
@ -397,6 +399,10 @@ std::string const& piece_bar(libtorrent::bitfield const& p, int width)
// the [piece, piece + pieces_per_char) range is the pieces that are represented by each character
double piece = 0;
// we print two blocks at a time, so calculate the color in pair
int color[2];
int last_color[2] = { -1, -1};
for (int i = 0; i < width; ++i, piece += piece_per_char)
{
int num_pieces = 0;
@ -404,15 +410,31 @@ std::string const& piece_bar(libtorrent::bitfield const& p, int width)
int end = (std::max)(int(piece + piece_per_char), int(piece) + 1);
for (int k = int(piece); k < end; ++k, ++num_pieces)
if (p[k]) ++num_have;
int color = int(std::ceil(num_have / float((std::max)(num_pieces, 1)) * (table_size - 1)));
int const c = int(std::ceil(num_have / float((std::max)(num_pieces, 1)) * (table_size - 1)));
char buf[40];
#ifdef _WIN32
snprintf(buf, sizeof(buf), "\x1b[4%dm", color ? 7 : 0);
color[i & 1] = c;
#ifndef _WIN32
if ((i & 1) == 1)
{
// now, print color[0] and [1]
// bg determines whether we're settings foreground or background color
static int const bg[] = { 38, 48};
for (int i = 0; i < 2; ++i)
{
if (color[i] != last_color[i])
{
snprintf(buf, sizeof(buf), "\x1b[%d;5;%dm", bg[i & 1], 232 + color[i]);
last_color[i] = color[i];
bar += buf;
}
}
bar += "\u258C";
}
#else
snprintf(buf, sizeof(buf), "\x1b[48;5;%dm", 232 + color);
static char const table[] = {' ', '\xb0', '\xb1', '\xb2', '\xdb'};
bar += table[c];
#endif
bar += buf;
bar += " ";
}
bar += esc("0");
bar += "]";
@ -1924,6 +1946,7 @@ int main(int argc, char* argv[])
if (c == 'i') print_peers = !print_peers;
if (c == 'l') print_log = !print_log;
if (c == 'd') print_downloads = !print_downloads;
if (c == 'y') print_matrix = !print_matrix;
if (c == 'f') print_file_progress = !print_file_progress;
if (c == 'P') show_pad_files = !show_pad_files;
if (c == 'g') show_dht_status = !show_dht_status;
@ -1967,8 +1990,8 @@ int main(int argc, char* argv[])
"[i] toggle show peers [d] toggle show downloading pieces\n"
"[u] show uTP stats [f] toggle show files\n"
"[g] show DHT [x] toggle disk cache stats\n"
"[t] show trackers [l] show alert log\n"
"[P] show pad files (in file list)\n"
"[t] show trackers [l] toggle show log\n"
"[P] show pad files (in file list) [y] toggle show piece matrix\n"
"\n"
"COLUMN OPTIONS\n"
"[1] toggle IP column [2]\n"
@ -2107,6 +2130,13 @@ int main(int argc, char* argv[])
}
}
if (print_matrix)
{
int height = 0;
print(piece_matrix(s.pieces, terminal_width, &height).c_str());
pos += height;
}
if (print_downloads)
{
h.get_download_queue(queue);

View File

@ -128,6 +128,101 @@ std::string const& progress_bar(int progress, int width, color_code c
return bar;
}
bool get_piece(libtorrent::bitfield const& p, int index)
{
if (index < 0 || index >= p.size()) return false;
return p.get_bit(index);
}
#ifndef _WIN32
// this function uses the block characters that splits up the glyph in 4
// segments and provide all combinations of a segment lit or not. This allows us
// to print 4 pieces per character.
std::string piece_matrix(libtorrent::bitfield const& p, int width, int* height)
{
// print two rows of pieces at a time
int piece = 0;
++*height;
std::string ret;
ret.reserve((p.size() + width * 2 - 1) / width / 2 * 4);
while (piece < p.size())
{
for (int i = 0; i < width; ++i)
{
// each character has 4 pieces. store them in a byte to use for lookups
int const c = get_piece(p, piece)
| (get_piece(p, piece+1) << 1)
| (get_piece(p, width*2+piece) << 2)
| (get_piece(p, width*2+piece+1) << 3);
// we have 4 bits, 16 different combinations
static char const* const chars[] =
{
" ", // no bit is set 0000
"\u2598", // upper left 0001
"\u259d", // upper right 0010
"\u2580", // both top bits 0011
"\u2596", // lower left 0100
"\u258c", // both left bits 0101
"\u259e", // upper right, lower left 0110
"\u259b", // left and upper sides 0111
"\u2597", // lower right 1000
"\u259a", // lower right, upper left 1001
"\u2590", // right side 1010
"\u259c", // lower right, top side 1011
"\u2584", // both lower bits 1100
"\u2599", // both lower, top left 1101
"\u259f", // both lower, top right 1110
"\x1b[7m \x1b[27m" // all bits are set (full block)
};
ret += chars[c];
piece += 2;
}
ret += '\n';
++*height;
piece += width * 2; // skip another row, as we've already printed it
}
return ret;
}
#else
// on MS-DOS terminals, we only have block characters for upper half and lower
// half. This lets us print two pieces per character.
std::string piece_matrix(libtorrent::bitfield const& p, int width, int* height)
{
// print two rows of pieces at a time
int piece = 0;
++*height;
std::string ret;
ret.reserve((p.size() + width * 2 - 1) / width);
while (piece < p.size())
{
for (int i = 0; i < width; ++i)
{
// each character has 8 pieces. store them in a byte to use for lookups
// the ordering of these bits
int const c = get_piece(p, piece)
| (get_piece(p, width*2+piece) << 1);
static char const* const chars[] =
{
" ", // no piece 00
"\xdf", // top piece 01
"\xdc", // bottom piece 10
"\xdb" // both pieces 11
};
ret += chars[c];
++piece;
}
ret += '\n';
++*height;
piece += width * 2; // skip another row, as we've already printed it
}
return ret;
}
#endif
void set_cursor_pos(int x, int y)
{
#ifdef _WIN32

View File

@ -2,6 +2,7 @@
#define PRINT_HPP_
#include <string>
#include "libtorrent/bitfield.hpp"
enum color_code
{
@ -36,6 +37,7 @@ void clear_screen();
void clear_rows(int y1, int y2);
void terminal_size(int* terminal_width, int* terminal_height);
std::string piece_matrix(libtorrent::bitfield const& p, int width, int* height);
void print(char const* str);

View File

@ -180,7 +180,7 @@ namespace libtorrent
}
#endif
// returns true if we can announec to this tracker now.
// returns true if we can announce to this tracker now.
// The current time is passed in as ``now``. The ``is_seed``
// argument is necessary because once we become a seed, we
// need to announce right away, even if the re-announce timer

View File

@ -135,7 +135,7 @@ namespace libtorrent
enum { max_refcount = (1 << 30) - 1 };
// the number of references to this buffer. These references
// might be in outstanding asyncronous requests or in peer
// might be in outstanding asynchronous requests or in peer
// connection send buffers. We can't free the buffer until
// all references are gone and refcount reaches 0. The buf
// pointer in this struct doesn't count as a reference and
@ -449,6 +449,9 @@ namespace libtorrent
// that couldn't be
int try_evict_blocks(int num, cached_piece_entry* ignore = 0);
// try to evict a single volatile piece, if there is one.
void try_evict_one_volatile();
// if there are any dirty blocks
void clear(tailqueue<disk_io_job>& jobs);
@ -463,6 +466,7 @@ namespace libtorrent
void dec_block_refcount(cached_piece_entry* pe, int block, int reason);
int pinned_blocks() const { return m_pinned_blocks; }
int read_cache_size() const { return m_read_cache_size; }
#if TORRENT_USE_ASSERTS
void mark_deleted(file_storage const& fs);
@ -507,9 +511,18 @@ namespace libtorrent
// this is determined by being a fraction of the cache size
int m_ghost_size;
// the is the max number of volatile read cache blocks are allowed in the
// cache. Once this is reached, other volatile blocks will start to be
// evicted.
int m_max_volatile_blocks;
// the number of blocks (buffers) allocated by volatile pieces.
boost::uint32_t m_volatile_size;
// the number of blocks in the cache
// that are in the read cache
boost::uint32_t m_read_cache_size;
// the number of blocks in the cache
// that are in the write cache
boost::uint32_t m_write_cache_size;

View File

@ -179,8 +179,6 @@ POSSIBILITY OF SUCH DAMAGE.
# endif
#include <AvailabilityMacros.h>
#define TORRENT_USE_PURGABLE_CONTROL 1
#if MAC_OS_X_VERSION_MIN_REQUIRED >= 1070
// on OSX, use the built-in common crypto for built-in
# if !defined TORRENT_USE_OPENSSL && !defined TORRENT_USE_GCRYPT
@ -620,10 +618,6 @@ int snprintf(char* buf, int len, char const* fmt, ...)
#define TORRENT_HAS_BOOST_UNORDERED 1
#endif
#ifndef TORRENT_USE_PURGABLE_CONTROL
#define TORRENT_USE_PURGABLE_CONTROL 0
#endif
#if !defined TORRENT_IOV_MAX
#ifdef IOV_MAX
#define TORRENT_IOV_MAX IOV_MAX

View File

@ -135,7 +135,7 @@ namespace libtorrent
// of buffers in use drops below the low watermark,
// we start calling these functions back
// TODO: try to remove the observers, only using the async_allocate handlers
std::vector<boost::shared_ptr<disk_observer> > m_observers;
std::vector<boost::weak_ptr<disk_observer> > m_observers;
// these handlers are executed when a new buffer is available
std::vector<handler_t> m_handlers;

View File

@ -84,7 +84,7 @@ namespace libtorrent
, boost::function<void(disk_io_job const*)> const& handler)= 0;
virtual void async_rename_file(piece_manager* storage, int index, std::string const& name
, boost::function<void(disk_io_job const*)> const& handler) = 0;
virtual void async_delete_files(piece_manager* storage
virtual void async_delete_files(piece_manager* storage, int options
, boost::function<void(disk_io_job const*)> const& handler) = 0;
virtual void async_set_file_priority(piece_manager* storage
, std::vector<boost::uint8_t> const& prio

View File

@ -136,11 +136,7 @@ namespace libtorrent
in_progress = 0x20,
// turns into file::coalesce_buffers in the file operation
coalesce_buffers = 0x40,
// the disk cache was enabled when this job was issued, it should use
// the disk cache once it's handled by a disk thread
use_disk_cache = 0x80
coalesce_buffers = 0x40
};
// for write jobs, returns true if its block
@ -164,6 +160,7 @@ namespace libtorrent
add_torrent_params const* check_resume_data;
std::vector<boost::uint8_t>* priorities;
torrent_info* torrent_file;
int delete_options;
} buffer;
// the disk storage this job applies to (if applicable)

View File

@ -1,6 +1,6 @@
/*
Copyright (c) 2007-2016, Arvid Norberg
Copyright (c) 2007-2016, Arvid Norberg, Steven Siloti
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -311,7 +311,7 @@ namespace libtorrent
void async_release_files(piece_manager* storage
, boost::function<void(disk_io_job const*)> const& handler
= boost::function<void(disk_io_job const*)>()) TORRENT_OVERRIDE;
void async_delete_files(piece_manager* storage
void async_delete_files(piece_manager* storage, int options
, boost::function<void(disk_io_job const*)> const& handler) TORRENT_OVERRIDE;
void async_check_files(piece_manager* storage
, add_torrent_params const* resume_data
@ -547,6 +547,13 @@ namespace libtorrent
// disk cache
mutable mutex m_cache_mutex;
block_cache m_disk_cache;
enum
{
cache_check_idle,
cache_check_active,
cache_check_reinvoke
};
int m_cache_check_state;
// total number of blocks in use by both the read
// and the write cache. This is not supposed to

View File

@ -258,7 +258,7 @@ namespace libtorrent
// called once per second
virtual void on_tick() {}
// called when choosing peers to optimisticallly unchoke. peer's will be
// called when choosing peers to optimistically unchoke. peer's will be
// unchoked in the order they appear in the given vector. if
// the plugin returns true then the ordering provided will be used and no
// other plugin will be allowed to change it. If your plugin expects this
@ -414,7 +414,7 @@ namespace libtorrent
virtual bool on_extension_handshake(bdecode_node const&) { return true; }
// returning true from any of the message handlers
// indicates that the plugin has handeled the message.
// indicates that the plugin has handled the message.
// it will break the plugin chain traversing and not let
// anyone else handle the message, including the default
// handler.
@ -470,7 +470,7 @@ namespace libtorrent
virtual void on_piece_pass(int /*index*/) {}
virtual void on_piece_failed(int /*index*/) {}
// called aproximately once every second
// called approximately once every second
virtual void tick() {}
// called each time a request message is to be sent. If true
@ -491,7 +491,7 @@ namespace libtorrent
// are now ready to be sent to the lower layer. This must be at least
// as large as the number of bytes passed in and may be larger if there
// is additional data to be inserted at the head of the send buffer.
// The additional data is retrived from the passed in vector. The
// The additional data is retrieved from the passed in vector. The
// vector must be cleared if no additional data is to be inserted.
virtual int encrypt(std::vector<boost::asio::mutable_buffer>& /*send_vec*/) = 0;

View File

@ -511,7 +511,7 @@ namespace libtorrent
// a block or piece boundary.
flag_pad_file = 1,
// this file is hiddent (sets the hidden attribute
// this file is hidden (sets the hidden attribute
// on windows)
flag_hidden = 2,
@ -534,7 +534,7 @@ namespace libtorrent
int file_index_at_offset(boost::int64_t offset) const;
// low-level function. returns a pointer to the internal storage for
// the filename. This string may not be null terinated!
// the filename. This string may not be null terminated!
// the ``file_name_len()`` function returns the length of the filename.
char const* file_name_ptr(int index) const;
int file_name_len(int index) const;
@ -637,7 +637,7 @@ namespace libtorrent
// this is always the root directory
std::string m_name;
// the sum of all filesizes
// the sum of all file sizes
boost::int64_t m_total_size;
// the number of files. This is used when

View File

@ -53,7 +53,7 @@ namespace libtorrent
namespace gzip_errors
{
// libtorrent uses boost.system's ``error_code`` class to represent errors. libtorrent has
// its own error category get_gzip_category() whith the error codes defined by error_code_enum.
// its own error category get_gzip_category() with the error codes defined by error_code_enum.
enum error_code_enum
{
// Not an error

View File

@ -937,6 +937,10 @@ namespace libtorrent
time_point m_last_receive;
time_point m_last_sent;
// the last time we filled our send buffer with payload
// this is used for timeouts
time_point m_last_sent_payload;
// the time when the first entry in the request queue was requested. Used
// for request timeout. it doesn't necessarily represent the time when a
// specific request was made. Since requests can be handled out-of-order,

View File

@ -335,7 +335,7 @@ namespace libtorrent
num_loaded_torrents,
num_pinned_torrents,
// these counter indices deliberatly
// these counter indices deliberately
// match the order of socket type IDs
// defined in socket_type.hpp.
num_tcp_peers,

View File

@ -394,7 +394,7 @@ namespace libtorrent
// store the given bencoded data as an immutable item in the DHT.
// the returned hash is the key that is to be used to look the item
// up agan. It's just the sha-1 hash of the bencoded form of the
// up again. It's just the sha-1 hash of the bencoded form of the
// structure.
sha1_hash dht_put_item(entry data);
@ -767,7 +767,11 @@ namespace libtorrent
enum options_t
{
// delete the files belonging to the torrent from disk.
delete_files = 1
// including the part-file, if there is one
delete_files = 1,
// delete just the part-file associated with this torrent
delete_partfile = 2
};
// flags to be passed in to the session constructor

View File

@ -268,14 +268,16 @@ namespace libtorrent
// passes the hash check, it is taken out of parole mode.
use_parole_mode,
// enable and disable caching of read blocks and blocks to be written
// to disk respsectively. the purpose of the read cache is partly
// read-ahead of requests but also to avoid reading blocks back from
// the disk multiple times for popular pieces. the write cache purpose
// is to hold off writing blocks to disk until they have been hashed,
// to avoid having to read them back in again.
// enable and disable caching of blocks read from disk. the purpose of
// the read cache is partly read-ahead of requests but also to avoid
// reading blocks back from the disk multiple times for popular
// pieces.
use_read_cache,
#ifndef TORRENT_NO_DEPRECATED
use_write_cache,
#else
deprecated7,
#endif
// this will make the disk cache never flush a write piece if it would
// cause is to have to re-read it once we want to calculate the piece
@ -1541,6 +1543,16 @@ namespace libtorrent
// .. _i2p: http://www.i2p2.de
i2p_port,
// this determines the max number of volatile disk cache blocks. If the
// number of volatile blocks exceed this limit, other volatile blocks
// will start to be evicted. A disk cache block is volatile if it has
// low priority, and should be one of the first blocks to be evicted
// under pressure. For instance, blocks pulled into the cache as the
// result of calculating a piece hash are volatile. These blocks don't
// represent potential interest among peers, so the value of keeping
// them in the cache is limited.
cache_size_volatile,
max_int_setting_internal
};

View File

@ -329,8 +329,10 @@ namespace libtorrent
virtual void rename_file(int index, std::string const& new_filename
, storage_error& ec) = 0;
// This function should delete all files and directories belonging to
// this storage.
// This function should delete some or all of the storage for this torrent.
// The ``options`` parameter specifies whether to delete all files or just
// the partfile. ``options`` are set to the same value as the options
// passed to session::remove_torrent().
//
// If an error occurs, ``storage_error`` should be set to reflect it.
//
@ -350,7 +352,7 @@ namespace libtorrent
// void release_memory();
// };
//
virtual void delete_files(storage_error& ec) = 0;
virtual void delete_files(int options, storage_error& ec) = 0;
#ifndef TORRENT_NO_DEPRECATE
// This function is called each time a file is completely downloaded. The
@ -416,7 +418,7 @@ namespace libtorrent
virtual void rename_file(int index, std::string const& new_filename
, storage_error& ec) TORRENT_OVERRIDE;
virtual void release_files(storage_error& ec) TORRENT_OVERRIDE;
virtual void delete_files(storage_error& ec) TORRENT_OVERRIDE;
virtual void delete_files(int options, storage_error& ec) TORRENT_OVERRIDE;
virtual void initialize(storage_error& ec) TORRENT_OVERRIDE;
virtual int move_storage(std::string const& save_path, int flags
, storage_error& ec) TORRENT_OVERRIDE;
@ -495,7 +497,7 @@ namespace libtorrent
, storage_error&) TORRENT_OVERRIDE {}
virtual void rename_file(int, std::string const&, storage_error&) TORRENT_OVERRIDE {}
virtual void release_files(storage_error&) TORRENT_OVERRIDE {}
virtual void delete_files(storage_error&) TORRENT_OVERRIDE {}
virtual void delete_files(int, storage_error&) TORRENT_OVERRIDE {}
virtual void initialize(storage_error&) TORRENT_OVERRIDE {}
virtual int move_storage(std::string const&, int, storage_error&) TORRENT_OVERRIDE { return 0; }
@ -532,7 +534,7 @@ namespace libtorrent
virtual void release_files(storage_error&) TORRENT_OVERRIDE {}
virtual void rename_file(int /* index */
, std::string const& /* new_filenamem */, storage_error&) TORRENT_OVERRIDE {}
virtual void delete_files(storage_error&) TORRENT_OVERRIDE {}
virtual void delete_files(int, storage_error&) TORRENT_OVERRIDE {}
};
struct disk_io_thread;

View File

@ -498,7 +498,7 @@ namespace libtorrent
bool should_check_files() const;
bool delete_files();
bool delete_files(int options);
void peers_erased(std::vector<torrent_peer*> const& peers);
// ============ start deprecation =============

View File

@ -95,6 +95,17 @@ namespace libtorrent
void defer_ack(utp_socket_impl* s);
void subscribe_drained(utp_socket_impl* s);
void restrict_mtu(int mtu)
{
m_restrict_mtu[m_mtu_idx] = mtu;
m_mtu_idx = (m_mtu_idx + 1) % m_restrict_mtu.size();
}
int restrict_mtu() const
{
return *std::max_element(m_restrict_mtu.begin(), m_restrict_mtu.end());
}
// used to keep stats of uTP events
// the counter is the enum from ``counters``.
void inc_stats_counter(int counter, int delta = 1);
@ -154,6 +165,9 @@ namespace libtorrent
// stats counters
counters& m_counters;
boost::array<int, 3> m_restrict_mtu;
int m_mtu_idx;
// this is passed on to the instantiate connection
// if this is non-null it will create SSL connections over uTP
void* m_ssl_context;

View File

@ -113,3 +113,45 @@ TORRENT_TEST(checking_no_cache)
TEST_EQUAL(tor[0].status().is_seeding, true);
});
}
TORRENT_TEST(checking_limit_volatile)
{
run_test(
[](lt::add_torrent_params& atp, lt::settings_pack& p) {
atp.flags |= lt::add_torrent_params::flag_auto_managed;
p.set_int(lt::settings_pack::cache_size, 300);
p.set_int(lt::settings_pack::cache_size_volatile, 2);
},
[](lt::session& ses) {
int cache = get_cache_size(ses);
// the cache fits 300 blocks, but only allows two volatile blocks
TEST_EQUAL(cache, 2);
std::vector<lt::torrent_handle> tor = ses.get_torrents();
TEST_EQUAL(tor.size(), 1);
TEST_EQUAL(tor[0].status().is_seeding, true);
});
}
TORRENT_TEST(checking_volatile_limit_cache_size)
{
run_test(
[](lt::add_torrent_params& atp, lt::settings_pack& p) {
atp.flags |= lt::add_torrent_params::flag_auto_managed;
p.set_int(lt::settings_pack::cache_size, 10);
p.set_int(lt::settings_pack::cache_size_volatile, 300);
},
[](lt::session& ses) {
int cache = get_cache_size(ses);
// the cache allows 300 volatile blocks, but only fits 2 blocks
TEST_CHECK(cache > 0);
TEST_CHECK(cache <= 10);
std::vector<lt::torrent_handle> tor = ses.get_torrents();
TEST_EQUAL(tor.size(), 1);
TEST_EQUAL(tor[0].status().is_seeding, true);
});
}

View File

@ -36,6 +36,7 @@ POSSIBILITY OF SUCH DAMAGE.
#include "libtorrent/alert_types.hpp"
#include "libtorrent/session.hpp"
#include "libtorrent/session_stats.hpp"
#include "libtorrent/file.hpp"
using namespace libtorrent;
@ -68,7 +69,7 @@ TORRENT_TEST(plain)
// terminate
, [](int ticks, lt::session& ses) -> bool
{
if (ticks > 75)
if (ticks > 80)
{
TEST_ERROR("timeout");
return true;
@ -109,7 +110,7 @@ TORRENT_TEST(session_stats)
, [](int ticks, lt::session& ses) -> bool
{
ses.post_session_stats();
if (ticks > 75)
if (ticks > 80)
{
TEST_ERROR("timeout");
return true;
@ -135,7 +136,7 @@ TORRENT_TEST(suggest)
// terminate
, [](int ticks, lt::session& ses) -> bool
{
if (ticks > 75)
if (ticks > 80)
{
TEST_ERROR("timeout");
return true;
@ -163,7 +164,7 @@ TORRENT_TEST(utp_only)
// terminate
, [](int ticks, lt::session& ses) -> bool
{
if (ticks > 75)
if (ticks > 80)
{
TEST_ERROR("timeout");
return true;
@ -322,7 +323,7 @@ TORRENT_TEST(explicit_cache)
// terminate
, [](int ticks, lt::session& ses) -> bool
{
if (ticks > 75)
if (ticks > 80)
{
TEST_ERROR("timeout");
return true;
@ -350,6 +351,68 @@ TORRENT_TEST(shutdown)
});
}
TORRENT_TEST(delete_files)
{
std::string save_path;
setup_swarm(2, swarm_test::download
// add session
, [](lt::settings_pack& pack) {}
// add torrent
, [](lt::add_torrent_params& params) {}
// on alert
, [](lt::alert const* a, lt::session& ses) {}
// terminate
, [&save_path](int ticks, lt::session& ses) -> bool
{
if (completed_pieces(ses) == 0) return false;
auto h = ses.get_torrents()[0];
save_path = h.status().save_path;
ses.remove_torrent(h, session::delete_files);
return true;
});
// assert the file is no longer there
file_status st;
error_code ec;
stat_file(combine_path(save_path, "temporary"), &st, ec);
printf("expecting \"%s/temporary\" to NOT exist [%s | %s]\n"
, save_path.c_str()
, ec.category().name()
, ec.message().c_str());
TEST_EQUAL(ec, error_code(boost::system::errc::no_such_file_or_directory, system_category()));
}
TORRENT_TEST(delete_partfile)
{
std::string save_path;
setup_swarm(2, swarm_test::download
// add session
, [](lt::settings_pack& pack) {}
// add torrent
, [](lt::add_torrent_params& params) {}
// on alert
, [](lt::alert const* a, lt::session& ses) {}
// terminate
, [&save_path](int ticks, lt::session& ses) -> bool
{
if (completed_pieces(ses) == 0) return false;
auto h = ses.get_torrents()[0];
save_path = h.status().save_path;
ses.remove_torrent(h, session::delete_partfile);
return true;
});
// assert the file *is* still there
file_status st;
error_code ec;
stat_file(combine_path(save_path, "temporary"), &st, ec);
printf("expecting \"%s/temporary\" to exist [%s]\n", save_path.c_str()
, ec.message().c_str());
TEST_CHECK(!ec);
}
// TODO: add test that makes sure a torrent in graceful pause mode won't make
// outgoing connections
// TODO: add test that makes sure a torrent in graceful pause mode won't accept

View File

@ -49,20 +49,6 @@ POSSIBILITY OF SUCH DAMAGE.
#include "libtorrent/random.hpp"
#endif
#include "libtorrent/aux_/disable_warnings_push.hpp"
#if TORRENT_USE_PURGABLE_CONTROL
#include <mach/mach.h>
// see comments at:
// http://www.opensource.apple.com/source/xnu/xnu-792.13.8/osfmk/vm/vm_object.c
const vm_purgable_t vm_purgable_set_state = VM_PURGABLE_SET_STATE;
const vm_purgable_t vm_purgable_nonvolatile = VM_PURGABLE_NONVOLATILE;
#endif
#include "libtorrent/aux_/disable_warnings_pop.hpp"
/*
The disk cache mimics ARC (adaptive replacement cache).
@ -365,6 +351,8 @@ block_cache::block_cache(int block_size, io_service& ios
: disk_buffer_pool(block_size, ios, trigger_trim)
, m_last_cache_op(cache_miss)
, m_ghost_size(8)
, m_max_volatile_blocks(100)
, m_volatile_size(0)
, m_read_cache_size(0)
, m_write_cache_size(0)
, m_send_buffer_blocks(0)
@ -553,6 +541,84 @@ void block_cache::update_cache_state(cached_piece_entry* p)
#endif
}
void block_cache::try_evict_one_volatile()
{
INVARIANT_CHECK;
DLOG(stderr, "[%p] try_evict_one_volatile\n", static_cast<void*>(this));
if (m_volatile_size < m_max_volatile_blocks) return;
linked_list<cached_piece_entry>* piece_list = &m_lru[cached_piece_entry::volatile_read_lru];
for (list_iterator<cached_piece_entry> i = piece_list->iterate(); i.get();)
{
cached_piece_entry* pe = reinterpret_cast<cached_piece_entry*>(i.get());
TORRENT_PIECE_ASSERT(pe->in_use, pe);
i.next();
if (pe->ok_to_evict())
{
#ifdef TORRENT_DEBUG
for (int j = 0; j < pe->blocks_in_piece; ++j)
TORRENT_PIECE_ASSERT(pe->blocks[j].buf == 0, pe);
#endif
TORRENT_PIECE_ASSERT(pe->refcount == 0, pe);
move_to_ghost(pe);
continue;
}
TORRENT_PIECE_ASSERT(pe->num_dirty == 0, pe);
// someone else is using this piece
if (pe->refcount > 0) continue;
// some blocks are pinned in this piece, skip it
if (pe->pinned > 0) continue;
char** to_delete = TORRENT_ALLOCA(char*, pe->blocks_in_piece);
int num_to_delete = 0;
// go through the blocks and evict the ones that are not dirty and not
// referenced
for (int j = 0; j < pe->blocks_in_piece; ++j)
{
cached_block_entry& b = pe->blocks[j];
TORRENT_PIECE_ASSERT(b.dirty == false, pe);
TORRENT_PIECE_ASSERT(b.pending == false, pe);
if (b.buf == 0 || b.refcount > 0 || b.dirty || b.pending) continue;
to_delete[num_to_delete++] = b.buf;
b.buf = NULL;
TORRENT_PIECE_ASSERT(pe->num_blocks > 0, pe);
--pe->num_blocks;
TORRENT_PIECE_ASSERT(m_read_cache_size > 0, pe);
--m_read_cache_size;
TORRENT_PIECE_ASSERT(m_volatile_size > 0, pe);
--m_volatile_size;
}
if (pe->ok_to_evict())
{
#ifdef TORRENT_DEBUG
for (int j = 0; j < pe->blocks_in_piece; ++j)
TORRENT_PIECE_ASSERT(pe->blocks[j].buf == 0, pe);
#endif
move_to_ghost(pe);
}
if (num_to_delete == 0) return;
DLOG(stderr, "[%p] removed %d blocks\n", static_cast<void*>(this)
, num_to_delete);
free_multiple_buffers(to_delete, num_to_delete);
return;
}
}
cached_piece_entry* block_cache::allocate_piece(disk_io_job const* j, int cache_state)
{
#ifdef TORRENT_EXPENSIVE_INVARIANT_CHECKS
@ -569,8 +635,8 @@ cached_piece_entry* block_cache::allocate_piece(disk_io_job const* j, int cache_
cached_piece_entry* p = find_piece(j);
if (p == 0)
{
int piece_size = j->storage->files()->piece_size(j->piece);
int blocks_in_piece = (piece_size + block_size() - 1) / block_size();
int const piece_size = j->storage->files()->piece_size(j->piece);
int const blocks_in_piece = (piece_size + block_size() - 1) / block_size();
cached_piece_entry pe;
pe.piece = j->piece;
@ -817,7 +883,13 @@ void block_cache::free_block(cached_piece_entry* pe, int block)
{
TORRENT_PIECE_ASSERT(m_read_cache_size > 0, pe);
--m_read_cache_size;
if (pe->cache_state == cached_piece_entry::volatile_read_lru)
{
--m_volatile_size;
}
}
TORRENT_PIECE_ASSERT(pe->num_blocks > 0, pe);
--pe->num_blocks;
free_buffer(b.buf);
@ -857,6 +929,12 @@ bool block_cache::evict_piece(cached_piece_entry* pe, tailqueue<disk_io_job>& jo
}
if (pe->num_blocks == 0) break;
}
if (pe->cache_state == cached_piece_entry::volatile_read_lru)
{
m_volatile_size -= num_to_delete;
}
if (num_to_delete) free_multiple_buffers(to_delete, num_to_delete);
if (pe->ok_to_evict(true))
@ -1014,6 +1092,7 @@ int block_cache::try_evict_blocks(int num, cached_piece_entry* ignore)
// go through the blocks and evict the ones that are not dirty and not
// referenced
int removed = 0;
for (int j = 0; j < pe->blocks_in_piece && num > 0; ++j)
{
cached_block_entry& b = pe->blocks[j];
@ -1024,11 +1103,17 @@ int block_cache::try_evict_blocks(int num, cached_piece_entry* ignore)
b.buf = NULL;
TORRENT_PIECE_ASSERT(pe->num_blocks > 0, pe);
--pe->num_blocks;
TORRENT_PIECE_ASSERT(m_read_cache_size > 0, pe);
--m_read_cache_size;
++removed;
--num;
}
TORRENT_PIECE_ASSERT(m_read_cache_size >= removed, pe);
m_read_cache_size -= removed;
if (pe->cache_state == cached_piece_entry::volatile_read_lru)
{
m_volatile_size -= removed;
}
if (pe->ok_to_evict())
{
#ifdef TORRENT_DEBUG
@ -1086,6 +1171,7 @@ int block_cache::try_evict_blocks(int num, cached_piece_entry* ignore)
// go through the blocks and evict the ones
// that are not dirty and not referenced
int removed = 0;
for (int j = 0; j < end && num > 0; ++j)
{
cached_block_entry& b = pe->blocks[j];
@ -1096,11 +1182,17 @@ int block_cache::try_evict_blocks(int num, cached_piece_entry* ignore)
b.buf = NULL;
TORRENT_PIECE_ASSERT(pe->num_blocks > 0, pe);
--pe->num_blocks;
TORRENT_PIECE_ASSERT(m_read_cache_size > 0, pe);
--m_read_cache_size;
++removed;
--num;
}
TORRENT_PIECE_ASSERT(m_read_cache_size >= removed, pe);
m_read_cache_size -= removed;
if (pe->cache_state == cached_piece_entry::volatile_read_lru)
{
m_volatile_size -= removed;
}
if (pe->ok_to_evict())
{
#ifdef TORRENT_DEBUG
@ -1215,7 +1307,9 @@ int block_cache::pad_job(disk_io_job const* j, int blocks_in_piece
void block_cache::insert_blocks(cached_piece_entry* pe, int block, file::iovec_t *iov
, int iov_len, disk_io_job* j, int flags)
{
#ifdef TORRENT_EXPENSIVE_INVARIANT_CHECKS
INVARIANT_CHECK;
#endif
TORRENT_ASSERT(pe);
TORRENT_ASSERT(pe->in_use);
@ -1264,6 +1358,7 @@ void block_cache::insert_blocks(cached_piece_entry* pe, int block, file::iovec_t
TORRENT_PIECE_ASSERT(pe->blocks[block].dirty == false, pe);
++pe->num_blocks;
++m_read_cache_size;
if (j->flags & disk_io_job::volatile_read) ++m_volatile_size;
if (flags & blocks_inc_refcount)
{
@ -1271,39 +1366,9 @@ void block_cache::insert_blocks(cached_piece_entry* pe, int block, file::iovec_t
TORRENT_UNUSED(ret); // suppress warning
TORRENT_ASSERT(ret);
}
else
{
#if TORRENT_USE_PURGABLE_CONTROL && defined TORRENT_DISABLE_POOL_ALLOCATOR
// volatile read blocks are group 0, regular reads are group 1
int state = VM_PURGABLE_VOLATILE | ((j->flags & disk_io_job::volatile_read) ? VM_VOLATILE_GROUP_0 : VM_VOLATILE_GROUP_1);
kern_return_t ret = vm_purgable_control(
mach_task_self(),
reinterpret_cast<vm_address_t>(pe->blocks[block].buf),
vm_purgable_set_state,
&state);
#ifdef TORRENT_DEBUG
// if ((random() % 200) == 0) ret = 1;
#endif
if (ret != KERN_SUCCESS || (state & VM_PURGABLE_EMPTY))
{
fprintf(stderr, "insert_blocks(piece=%d block=%d): "
"vm_purgable_control failed: %d state & VM_PURGABLE_EMPTY: %d\n"
, pe->piece, block, ret, state & VM_PURGABLE_EMPTY);
free_buffer(pe->blocks[block].buf);
pe->blocks[block].buf = NULL;
--pe->num_blocks;
--m_read_cache_size;
}
#endif
}
}
#if TORRENT_USE_PURGABLE_CONTROL && defined TORRENT_DISABLE_POOL_ALLOCATOR
TORRENT_ASSERT(pe->blocks[block].buf != NULL
|| (flags & blocks_inc_refcount) == 0);
#else
TORRENT_ASSERT(pe->blocks[block].buf != NULL);
#endif
}
TORRENT_PIECE_ASSERT(pe->cache_state != cached_piece_entry::read_lru1_ghost, pe);
@ -1320,34 +1385,6 @@ bool block_cache::inc_block_refcount(cached_piece_entry* pe, int block, int reas
TORRENT_PIECE_ASSERT(pe->blocks[block].refcount < cached_block_entry::max_refcount, pe);
if (pe->blocks[block].refcount == 0)
{
#if TORRENT_USE_PURGABLE_CONTROL && defined TORRENT_DISABLE_POOL_ALLOCATOR
// we're adding the first refcount to this block, first make sure
// its still here. It's only volatile if it's not dirty and has refcount == 0
if (!pe->blocks[block].dirty)
{
int state = vm_purgable_nonvolatile;
kern_return_t ret = vm_purgable_control(
mach_task_self(),
reinterpret_cast<vm_address_t>(pe->blocks[block].buf),
vm_purgable_set_state,
&state);
#ifdef TORRENT_DEBUG
// if ((random() % 200) == 0) ret = 1;
#endif
if (ret != KERN_SUCCESS || (state & VM_PURGABLE_EMPTY))
{
fprintf(stderr, "inc_block_refcount(piece=%d block=%d): "
"vm_purgable_control failed: %d state & VM_PURGABLE_EMPTY: %d\n"
, pe->piece, block, ret, state & VM_PURGABLE_EMPTY);
free_buffer(pe->blocks[block].buf);
pe->blocks[block].buf = NULL;
--pe->num_blocks;
--m_read_cache_size;
return false;
}
}
#endif
++pe->pinned;
++m_pinned_blocks;
}
@ -1385,34 +1422,6 @@ void block_cache::dec_block_refcount(cached_piece_entry* pe, int block, int reas
--pe->pinned;
TORRENT_PIECE_ASSERT(m_pinned_blocks > 0, pe);
--m_pinned_blocks;
#if TORRENT_USE_PURGABLE_CONTROL && defined TORRENT_DISABLE_POOL_ALLOCATOR
// we're removing the last refcount to this block, first make sure
// its still here. It's only volatile if it's not dirty and has refcount == 0
if (!pe->blocks[block].dirty)
{
// group 0 is the first one to be reclaimed
int state = VM_PURGABLE_VOLATILE | VM_VOLATILE_GROUP_1;
kern_return_t ret = vm_purgable_control(
mach_task_self(),
reinterpret_cast<vm_address_t>(pe->blocks[block].buf),
vm_purgable_set_state,
&state);
#ifdef TORRENT_DEBUG
// if ((random() % 200) == 0) ret = 1;
#endif
if (ret != KERN_SUCCESS || (state & VM_PURGABLE_EMPTY))
{
fprintf(stderr, "dec_block_refcount(piece=%d block=%d): "
"vm_purgable_control failed: %d state & VM_PURGABLE_EMPTY: %d\n"
, pe->piece, block, ret, state & VM_PURGABLE_EMPTY);
free_buffer(pe->blocks[block].buf);
pe->blocks[block].buf = NULL;
--pe->num_blocks;
--m_read_cache_size;
}
}
#endif
}
#if TORRENT_USE_ASSERTS
switch (reason)
@ -1475,6 +1484,7 @@ void block_cache::free_piece(cached_piece_entry* pe)
// and free them all in one go
char** to_delete = TORRENT_ALLOCA(char*, pe->blocks_in_piece);
int num_to_delete = 0;
int removed_clean = 0;
for (int i = 0; i < pe->blocks_in_piece; ++i)
{
if (pe->blocks[i].buf == 0) continue;
@ -1494,22 +1504,29 @@ void block_cache::free_piece(cached_piece_entry* pe)
}
else
{
TORRENT_PIECE_ASSERT(m_read_cache_size > 0, pe);
--m_read_cache_size;
++removed_clean;
}
}
TORRENT_PIECE_ASSERT(m_read_cache_size >= removed_clean, pe);
m_read_cache_size -= removed_clean;
if (pe->cache_state == cached_piece_entry::volatile_read_lru)
{
m_volatile_size -= num_to_delete;
}
if (num_to_delete) free_multiple_buffers(to_delete, num_to_delete);
update_cache_state(pe);
}
int block_cache::drain_piece_bufs(cached_piece_entry& p, std::vector<char*>& buf)
{
int piece_size = p.storage->files()->piece_size(p.piece);
int blocks_in_piece = (piece_size + block_size() - 1) / block_size();
int const piece_size = p.storage->files()->piece_size(p.piece);
int const blocks_in_piece = (piece_size + block_size() - 1) / block_size();
int ret = 0;
TORRENT_PIECE_ASSERT(p.in_use, &p);
int removed_clean = 0;
for (int i = 0; i < blocks_in_piece; ++i)
{
if (p.blocks[i].buf == 0) continue;
@ -1529,10 +1546,17 @@ int block_cache::drain_piece_bufs(cached_piece_entry& p, std::vector<char*>& buf
}
else
{
TORRENT_ASSERT(m_read_cache_size > 0);
--m_read_cache_size;
++removed_clean;
}
}
TORRENT_ASSERT(m_read_cache_size >= removed_clean);
m_read_cache_size -= removed_clean;
if (p.cache_state == cached_piece_entry::volatile_read_lru)
{
m_volatile_size -= removed_clean;
}
update_cache_state(&p);
return ret;
}
@ -1577,6 +1601,8 @@ void block_cache::set_settings(aux::session_settings const& sett, error_code& ec
m_ghost_size = (std::max)(8, sett.get_int(settings_pack::cache_size)
/ (std::max)(sett.get_int(settings_pack::read_cache_line_size), 4) / 2);
m_max_volatile_blocks = sett.get_int(settings_pack::cache_size_volatile);
disk_buffer_pool::set_settings(sett, ec);
}
@ -1724,8 +1750,9 @@ void block_cache::check_invariant() const
// -1: block not in cache
// -2: out of memory
int block_cache::copy_from_piece(cached_piece_entry* pe, disk_io_job* j
, bool expect_no_fail)
int block_cache::copy_from_piece(cached_piece_entry* const pe
, disk_io_job* const j
, bool const expect_no_fail)
{
INVARIANT_CHECK;
TORRENT_UNUSED(expect_no_fail);
@ -1738,13 +1765,13 @@ int block_cache::copy_from_piece(cached_piece_entry* pe, disk_io_job* j
int block_offset = j->d.io.offset & (block_size()-1);
int buffer_offset = 0;
int size = j->d.io.buffer_size;
int blocks_to_read = block_offset > 0 && (size > block_size() - block_offset) ? 2 : 1;
int const blocks_to_read = block_offset > 0 && (size > block_size() - block_offset) ? 2 : 1;
TORRENT_PIECE_ASSERT(size <= block_size(), pe);
const int start_block = block;
int const start_block = block;
#if TORRENT_USE_ASSERTS
int piece_size = j->storage->files()->piece_size(j->piece);
int blocks_in_piece = (piece_size + block_size() - 1) / block_size();
int const piece_size = j->storage->files()->piece_size(j->piece);
int const blocks_in_piece = (piece_size + block_size() - 1) / block_size();
TORRENT_PIECE_ASSERT(start_block < blocks_in_piece, pe);
#endif

View File

@ -3578,7 +3578,7 @@ namespace libtorrent
int next_barrier = m_enc_handler.encrypt(iovec);
#ifndef TORRENT_DISABLE_LOGGING
if (next_barrier != 0)
peer_log(peer_log_alert::outgoing_message, "SEND_BARRIER"
peer_log(peer_log_alert::outgoing, "SEND_BARRIER"
, "encrypted block s = %d", next_barrier);
#endif
return next_barrier;

View File

@ -60,18 +60,14 @@ POSSIBILITY OF SUCH DAMAGE.
#include <linux/unistd.h>
#endif
#if TORRENT_USE_PURGABLE_CONTROL
#include <mach/mach.h>
// see comments at:
// http://www.opensource.apple.com/source/xnu/xnu-792.13.8/osfmk/vm/vm_object.c
#endif
#include "libtorrent/aux_/disable_warnings_pop.hpp"
namespace libtorrent
{
namespace {
// this is posted to the network thread
static void watermark_callback(std::vector<boost::shared_ptr<disk_observer> >* cbs
void watermark_callback(std::vector<boost::weak_ptr<disk_observer> >* cbs
, std::vector<disk_buffer_pool::handler_t>* handlers)
{
if (handlers)
@ -84,13 +80,18 @@ namespace libtorrent
if (cbs != NULL)
{
for (std::vector<boost::shared_ptr<disk_observer> >::iterator i = cbs->begin()
for (std::vector<boost::weak_ptr<disk_observer> >::iterator i = cbs->begin()
, end(cbs->end()); i != end; ++i)
(*i)->on_disk();
{
boost::shared_ptr<disk_observer> o = i->lock();
if (o) o->on_disk();
}
delete cbs;
}
}
} // anonymous namespace
disk_buffer_pool::disk_buffer_pool(int block_size, io_service& ios
, boost::function<void()> const& trigger_trim)
: m_block_size(block_size)
@ -189,7 +190,7 @@ namespace libtorrent
{
l.unlock();
m_ios.post(boost::bind(&watermark_callback
, static_cast<std::vector<boost::shared_ptr<disk_observer> >*>(NULL)
, static_cast<std::vector<boost::weak_ptr<disk_observer> >*>(NULL)
, slice));
return;
}
@ -201,13 +202,13 @@ namespace libtorrent
{
l.unlock();
m_ios.post(boost::bind(&watermark_callback
, static_cast<std::vector<boost::shared_ptr<disk_observer> >*>(NULL)
, static_cast<std::vector<boost::weak_ptr<disk_observer> >*>(NULL)
, handlers));
return;
}
std::vector<boost::shared_ptr<disk_observer> >* cbs
= new std::vector<boost::shared_ptr<disk_observer> >();
std::vector<boost::weak_ptr<disk_observer> >* cbs
= new std::vector<boost::weak_ptr<disk_observer> >();
m_observers.swap(*cbs);
l.unlock();
m_ios.post(boost::bind(&watermark_callback, cbs, handlers));
@ -355,18 +356,7 @@ namespace libtorrent
{
#if defined TORRENT_DISABLE_POOL_ALLOCATOR
#if TORRENT_USE_PURGABLE_CONTROL
kern_return_t res = vm_allocate(
mach_task_self(),
reinterpret_cast<vm_address_t*>(&ret),
0x4000,
VM_FLAGS_PURGABLE |
VM_FLAGS_ANYWHERE);
if (res != KERN_SUCCESS)
ret = NULL;
#else
ret = page_aligned_allocator::malloc(m_block_size);
#endif // TORRENT_USE_PURGABLE_CONTROL
#else
if (m_using_pool_allocator)
@ -641,15 +631,7 @@ namespace libtorrent
{
#if defined TORRENT_DISABLE_POOL_ALLOCATOR
#if TORRENT_USE_PURGABLE_CONTROL
vm_deallocate(
mach_task_self(),
reinterpret_cast<vm_address_t>(buf),
0x4000
);
#else
page_aligned_allocator::free(buf);
#endif // TORRENT_USE_PURGABLE_CONTROL
#else
if (m_using_pool_allocator)

View File

@ -1,6 +1,6 @@
/*
Copyright (c) 2007-2016, Arvid Norberg
Copyright (c) 2007-2016, Arvid Norberg, Steven Siloti
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -160,6 +160,7 @@ namespace libtorrent
, m_last_file_check(clock_type::now())
, m_file_pool(40)
, m_disk_cache(block_size, ios, boost::bind(&disk_io_thread::trigger_cache_trim, this))
, m_cache_check_state(cache_check_idle)
, m_stats_counters(cnt)
, m_ios(ios)
, m_last_disk_aio_performance_warning(min_time())
@ -939,6 +940,8 @@ namespace libtorrent
kick_hasher(pe, l);
num -= try_flush_hashed(pe, 1, completed_jobs, l);
--pe->piece_refcount;
m_disk_cache.maybe_free_piece(pe);
}
// when the write cache is under high pressure, it is likely
@ -1066,6 +1069,14 @@ namespace libtorrent
// a disk job is executed
void disk_io_thread::check_cache_level(mutex::scoped_lock& l, jobqueue_t& completed_jobs)
{
// when the read cache is disabled, always try to evict all read cache
// blocks
if (!m_settings.get_bool(settings_pack::use_read_cache))
{
int const evict = m_disk_cache.read_cache_size();
m_disk_cache.try_evict_blocks(evict);
}
int evict = m_disk_cache.num_to_evict(0);
if (evict > 0)
{
@ -1086,18 +1097,18 @@ namespace libtorrent
TORRENT_ASSERT(j->next == 0);
TORRENT_ASSERT((j->flags & disk_io_job::in_progress) || !j->storage);
mutex::scoped_lock l(m_cache_mutex);
#if DEBUG_DISK_THREAD
{
mutex::scoped_lock l(m_cache_mutex);
check_cache_level(l, completed_jobs);
DLOG("perform_job job: %s ( %s%s) piece: %d offset: %d outstanding: %d\n"
, job_action_name[j->action]
, (j->flags & disk_io_job::fence) ? "fence ": ""
, (j->flags & disk_io_job::force_copy) ? "force_copy ": ""
, j->piece, j->d.io.offset
, j->storage ? j->storage->num_outstanding_jobs() : -1);
l.unlock();
DLOG("perform_job job: %s ( %s%s) piece: %d offset: %d outstanding: %d\n"
, job_action_name[j->action]
, (j->flags & disk_io_job::fence) ? "fence ": ""
, (j->flags & disk_io_job::force_copy) ? "force_copy ": ""
, j->piece, j->d.io.offset
, j->storage ? j->storage->num_outstanding_jobs() : -1);
}
#endif
boost::shared_ptr<piece_manager> storage = j->storage;
@ -1122,6 +1133,23 @@ namespace libtorrent
m_stats_counters.inc_stats_counter(counters::num_running_disk_jobs, -1);
mutex::scoped_lock l(m_cache_mutex);
if (m_cache_check_state == cache_check_idle)
{
m_cache_check_state = cache_check_active;
while (m_cache_check_state != cache_check_idle)
{
check_cache_level(l, completed_jobs);
TORRENT_ASSERT(l.locked());
--m_cache_check_state;
}
}
else
{
m_cache_check_state = cache_check_reinvoke;
}
l.unlock();
if (ret == retry_job)
{
mutex::scoped_lock l2(m_job_mutex);
@ -1187,14 +1215,6 @@ namespace libtorrent
int disk_io_thread::do_read(disk_io_job* j, jobqueue_t& completed_jobs)
{
if ((j->flags & disk_io_job::use_disk_cache) == 0)
{
// we're not using a cache. This is the simple path
// just read straight from the file
int ret = do_uncached_read(j);
return ret;
}
int block_size = m_disk_cache.block_size();
int piece_size = j->storage->files()->piece_size(j->piece);
int blocks_in_piece = (piece_size + block_size - 1) / block_size;
@ -1211,26 +1231,8 @@ namespace libtorrent
cached_piece_entry* pe = m_disk_cache.find_piece(j);
if (pe == NULL)
{
// this isn't supposed to happen. The piece is supposed
// to be allocated when the read job is posted to the
// queue, and have 'outstanding_read' set to 1
TORRENT_ASSERT(false);
int cache_state = (j->flags & disk_io_job::volatile_read)
? cached_piece_entry::volatile_read_lru
: cached_piece_entry::read_lru1;
pe = m_disk_cache.allocate_piece(j, cache_state);
if (pe == NULL)
{
j->error.ec = error::no_memory;
j->error.operation = storage_error::alloc_cache_piece;
m_disk_cache.free_iovec(iov, iov_len);
return -1;
}
#if TORRENT_USE_ASSERTS
pe->piece_log.push_back(piece_log_t(piece_log_t::set_outstanding_jobs));
#endif
pe->outstanding_read = 1;
l.unlock();
return do_uncached_read(j);
}
TORRENT_PIECE_ASSERT(pe->outstanding_read == 1, pe);
@ -1453,60 +1455,55 @@ namespace libtorrent
INVARIANT_CHECK;
TORRENT_ASSERT(j->d.io.buffer_size <= m_disk_cache.block_size());
// should we put this write job in the cache?
// if we don't use the cache we shouldn't.
if (j->flags & disk_io_job::use_disk_cache)
mutex::scoped_lock l(m_cache_mutex);
cached_piece_entry* pe = m_disk_cache.find_piece(j);
if (pe && pe->hashing_done)
{
mutex::scoped_lock l(m_cache_mutex);
cached_piece_entry* pe = m_disk_cache.find_piece(j);
if (pe && pe->hashing_done)
{
#if TORRENT_USE_ASSERTS
print_piece_log(pe->piece_log);
print_piece_log(pe->piece_log);
#endif
TORRENT_ASSERT(pe->blocks[j->d.io.offset / 16 / 1024].buf != j->buffer.disk_block);
TORRENT_ASSERT(pe->blocks[j->d.io.offset / 16 / 1024].buf != NULL);
j->error.ec = error::operation_aborted;
j->error.operation = storage_error::write;
return -1;
}
TORRENT_ASSERT(pe->blocks[j->d.io.offset / 16 / 1024].buf != j->buffer.disk_block);
TORRENT_ASSERT(pe->blocks[j->d.io.offset / 16 / 1024].buf != NULL);
j->error.ec = error::operation_aborted;
j->error.operation = storage_error::write;
return -1;
}
pe = m_disk_cache.add_dirty_block(j);
pe = m_disk_cache.add_dirty_block(j);
if (pe)
{
if (pe)
{
#if TORRENT_USE_ASSERTS
pe->piece_log.push_back(piece_log_t(j->action, j->d.io.offset / 0x4000));
pe->piece_log.push_back(piece_log_t(j->action, j->d.io.offset / 0x4000));
#endif
if (!pe->hashing_done
&& pe->hash == 0
&& !m_settings.get_bool(settings_pack::disable_hash_checks))
{
pe->hash = new partial_hash;
m_disk_cache.update_cache_state(pe);
}
TORRENT_PIECE_ASSERT(pe->cache_state <= cached_piece_entry::read_lru1 || pe->cache_state == cached_piece_entry::read_lru2, pe);
++pe->piece_refcount;
// see if we can progress the hash cursor with this new block
kick_hasher(pe, l);
TORRENT_PIECE_ASSERT(pe->cache_state <= cached_piece_entry::read_lru1 || pe->cache_state == cached_piece_entry::read_lru2, pe);
// flushes the piece to disk in case
// it satisfies the condition for a write
// piece to be flushed
try_flush_hashed(pe, m_settings.get_int(
settings_pack::write_cache_line_size), completed_jobs, l);
--pe->piece_refcount;
m_disk_cache.maybe_free_piece(pe);
return defer_handler;
if (!pe->hashing_done
&& pe->hash == 0
&& !m_settings.get_bool(settings_pack::disable_hash_checks))
{
pe->hash = new partial_hash;
m_disk_cache.update_cache_state(pe);
}
TORRENT_PIECE_ASSERT(pe->cache_state <= cached_piece_entry::read_lru1 || pe->cache_state == cached_piece_entry::read_lru2, pe);
++pe->piece_refcount;
// see if we can progress the hash cursor with this new block
kick_hasher(pe, l);
TORRENT_PIECE_ASSERT(pe->cache_state <= cached_piece_entry::read_lru1 || pe->cache_state == cached_piece_entry::read_lru2, pe);
// flushes the piece to disk in case
// it satisfies the condition for a write
// piece to be flushed
try_flush_hashed(pe, m_settings.get_int(
settings_pack::write_cache_line_size), completed_jobs, l);
--pe->piece_refcount;
m_disk_cache.maybe_free_piece(pe);
return defer_handler;
}
// ok, we should just perform this job right now.
@ -1570,57 +1567,65 @@ namespace libtorrent
{
TORRENT_ASSERT(j->action == disk_io_job::read);
if (m_settings.get_bool(settings_pack::use_read_cache)
&& m_settings.get_int(settings_pack::cache_size) != 0)
int ret = m_disk_cache.try_read(j);
if (ret >= 0)
{
int ret = m_disk_cache.try_read(j);
if (ret >= 0)
{
m_stats_counters.inc_stats_counter(counters::num_blocks_cache_hits);
DLOG("do_read: cache hit\n");
j->flags |= disk_io_job::cache_hit;
j->ret = ret;
return 0;
}
else if (ret == -2)
{
j->error.ec = error::no_memory;
j->error.operation = storage_error::alloc_cache_piece;
j->ret = disk_io_job::operation_failed;
return 0;
}
m_stats_counters.inc_stats_counter(counters::num_blocks_cache_hits);
DLOG("do_read: cache hit\n");
j->flags |= disk_io_job::cache_hit;
j->ret = ret;
return 0;
}
else if (ret == -2)
{
j->error.ec = error::no_memory;
j->error.operation = storage_error::alloc_cache_piece;
j->ret = disk_io_job::operation_failed;
return 0;
}
if (check_fence && j->storage->is_blocked(j))
{
// this means the job was queued up inside storage
m_stats_counters.inc_stats_counter(counters::blocked_disk_jobs);
DLOG("blocked job: %s (torrent: %d total: %d)\n"
, job_action_name[j->action], j->storage ? j->storage->num_blocked() : 0
, int(m_stats_counters[counters::blocked_disk_jobs]));
return 2;
}
if (check_fence && j->storage->is_blocked(j))
{
// this means the job was queued up inside storage
m_stats_counters.inc_stats_counter(counters::blocked_disk_jobs);
DLOG("blocked job: %s (torrent: %d total: %d)\n"
, job_action_name[j->action], j->storage ? j->storage->num_blocked() : 0
, int(m_stats_counters[counters::blocked_disk_jobs]));
return 2;
}
cached_piece_entry* pe = m_disk_cache.allocate_piece(j, cached_piece_entry::read_lru1);
if (pe == NULL)
{
j->ret = -1;
j->error.ec = error::no_memory;
j->error.operation = storage_error::read;
return 0;
}
j->flags |= disk_io_job::use_disk_cache;
if (pe->outstanding_read)
{
TORRENT_PIECE_ASSERT(j->piece == pe->piece, pe);
pe->read_jobs.push_back(j);
return 2;
}
if (!m_settings.get_bool(settings_pack::use_read_cache)
|| m_settings.get_int(settings_pack::cache_size) == 0)
{
// if the read cache is disabled then we can skip going through the cache
// but only if there is no existing piece entry. Otherwise there may be a
// partial hit on one-or-more dirty buffers so we must use the cache
// to avoid reading bogus data from storage
if (m_disk_cache.find_piece(j) == NULL)
return 1;
}
cached_piece_entry* pe = m_disk_cache.allocate_piece(j, cached_piece_entry::read_lru1);
if (pe == NULL)
{
j->ret = -1;
j->error.ec = error::no_memory;
j->error.operation = storage_error::read;
return 0;
}
if (pe->outstanding_read)
{
TORRENT_PIECE_ASSERT(j->piece == pe->piece, pe);
pe->read_jobs.push_back(j);
return 2;
}
#if TORRENT_USE_ASSERTS
pe->piece_log.push_back(piece_log_t(piece_log_t::set_outstanding_jobs));
pe->piece_log.push_back(piece_log_t(piece_log_t::set_outstanding_jobs));
#endif
pe->outstanding_read = 1;
}
pe->outstanding_read = 1;
return 1;
}
@ -1684,53 +1689,49 @@ namespace libtorrent
TORRENT_ASSERT(m_disk_cache.is_disk_buffer(j->buffer.disk_block));
l_.unlock();
#endif
if (m_settings.get_int(settings_pack::cache_size) != 0
&& m_settings.get_bool(settings_pack::use_write_cache))
TORRENT_ASSERT((r.start % m_disk_cache.block_size()) == 0);
if (storage->is_blocked(j))
{
TORRENT_ASSERT((r.start % m_disk_cache.block_size()) == 0);
j->flags |= disk_io_job::use_disk_cache;
if (storage->is_blocked(j))
{
// this means the job was queued up inside storage
m_stats_counters.inc_stats_counter(counters::blocked_disk_jobs);
DLOG("blocked job: %s (torrent: %d total: %d)\n"
, job_action_name[j->action], j->storage ? j->storage->num_blocked() : 0
, int(m_stats_counters[counters::blocked_disk_jobs]));
// make the holder give up ownership of the buffer
// since the job was successfully queued up
buffer.release();
return;
}
mutex::scoped_lock l(m_cache_mutex);
// if we succeed in adding the block to the cache, the job will
// be added along with it. we may not free j if so
cached_piece_entry* dpe = m_disk_cache.add_dirty_block(j);
// if the buffer was successfully added to the cache
// our holder should no longer own it
if (dpe) buffer.release();
if (dpe && dpe->outstanding_flush == 0)
{
dpe->outstanding_flush = 1;
l.unlock();
// the block and write job were successfully inserted
// into the cache. Now, see if we should trigger a flush
j = allocate_job(disk_io_job::flush_hashed);
j->storage = storage->shared_from_this();
j->piece = r.piece;
j->flags = flags;
add_job(j);
}
// if we added the block (regardless of whether we also
// issued a flush job or not), we're done.
if (dpe) return;
l.unlock();
// this means the job was queued up inside storage
m_stats_counters.inc_stats_counter(counters::blocked_disk_jobs);
DLOG("blocked job: %s (torrent: %d total: %d)\n"
, job_action_name[j->action], j->storage ? j->storage->num_blocked() : 0
, int(m_stats_counters[counters::blocked_disk_jobs]));
// make the holder give up ownership of the buffer
// since the job was successfully queued up
buffer.release();
return;
}
mutex::scoped_lock l(m_cache_mutex);
// if we succeed in adding the block to the cache, the job will
// be added along with it. we may not free j if so
cached_piece_entry* dpe = m_disk_cache.add_dirty_block(j);
// if the buffer was successfully added to the cache
// our holder should no longer own it
if (dpe) buffer.release();
if (dpe && dpe->outstanding_flush == 0)
{
dpe->outstanding_flush = 1;
l.unlock();
// the block and write job were successfully inserted
// into the cache. Now, see if we should trigger a flush
j = allocate_job(disk_io_job::flush_hashed);
j->storage = storage->shared_from_this();
j->piece = r.piece;
j->flags = flags;
add_job(j);
}
// if we added the block (regardless of whether we also
// issued a flush job or not), we're done.
if (dpe) return;
l.unlock();
add_job(j);
buffer.release();
}
@ -1777,13 +1778,6 @@ namespace libtorrent
return;
}
l.unlock();
if (m_settings.get_bool(settings_pack::use_read_cache)
&& m_settings.get_int(settings_pack::cache_size) != 0)
{
j->flags |= disk_io_job::use_disk_cache;
}
add_job(j);
}
@ -1816,6 +1810,7 @@ namespace libtorrent
}
void disk_io_thread::async_delete_files(piece_manager* storage
, int const options
, boost::function<void(disk_io_job const*)> const& handler)
{
#ifdef TORRENT_DEBUG
@ -1856,6 +1851,7 @@ namespace libtorrent
disk_io_job* j = allocate_job(disk_io_job::delete_files);
j->storage = storage->shared_from_this();
j->callback = handler;
j->buffer.delete_options = options;
add_fence_job(storage, j);
fail_jobs_impl(storage_error(boost::asio::error::operation_aborted)
@ -2267,9 +2263,6 @@ namespace libtorrent
{
INVARIANT_CHECK;
if ((j->flags & disk_io_job::use_disk_cache) == 0)
return do_uncached_hash(j);
int const piece_size = j->storage->files()->piece_size(j->piece);
int const file_flags = file_flags_for_job(j);
@ -2309,14 +2302,8 @@ namespace libtorrent
return 0;
}
}
if (pe == NULL && m_settings.get_bool(settings_pack::use_read_cache) == false)
else if (m_settings.get_bool(settings_pack::use_read_cache) == false)
{
l.unlock();
// if there's no piece in the cache, and the read cache is disabled
// it means it's already been flushed to disk, and there's no point
// in reading it into the cache, since we're not using read cache
// so just use the uncached version
return do_uncached_hash(j);
}
@ -2382,6 +2369,9 @@ namespace libtorrent
locked_blocks[num_locked_blocks++] = i;
}
// to keep the cache footprint low, try to evict a volatile piece
m_disk_cache.try_evict_one_volatile();
l.unlock();
int ret = 0;
@ -2537,7 +2527,7 @@ namespace libtorrent
int disk_io_thread::do_delete_files(disk_io_job* j, jobqueue_t& completed_jobs)
{
TORRENT_ASSERT(j->buffer.string == 0);
TORRENT_ASSERT(j->buffer.delete_options != 0);
INVARIANT_CHECK;
// if this assert fails, something's wrong with the fence logic
@ -2552,7 +2542,7 @@ namespace libtorrent
, completed_jobs, l);
l.unlock();
j->storage->get_storage_impl()->delete_files(j->error);
j->storage->get_storage_impl()->delete_files(j->buffer.delete_options, j->error);
return j->error ? -1 : 0;
}
@ -2636,7 +2626,7 @@ namespace libtorrent
int block_size = m_disk_cache.block_size();
int piece_size = j->storage->files()->piece_size(j->piece);
int blocks_in_piece = (piece_size + block_size - 1) / block_size;
file::iovec_t iov;
int ret = 0;
int offset = 0;
@ -2876,6 +2866,9 @@ namespace libtorrent
cached_piece_entry* pe = m_disk_cache.find_piece(j);
if (pe == NULL) return 0;
pe->outstanding_flush = 0;
if (pe->num_dirty == 0) return 0;
// if multiple threads are flushing this piece, this assert may fire
@ -2913,8 +2906,6 @@ namespace libtorrent
TORRENT_ASSERT(l.locked());
// TORRENT_PIECE_ASSERT(pe->outstanding_flush == 1, pe);
pe->outstanding_flush = 0;
--pe->piece_refcount;
m_disk_cache.maybe_free_piece(pe);
@ -3428,9 +3419,7 @@ namespace libtorrent
{
disk_io_job* j = new_jobs.pop_front();
if (j->action == disk_io_job::read
&& m_settings.get_bool(settings_pack::use_read_cache)
&& m_settings.get_int(settings_pack::cache_size) != 0)
if (j->action == disk_io_job::read)
{
int state = prep_read_job_impl(j, false);
switch (state)
@ -3564,6 +3553,5 @@ namespace libtorrent
{
}
#endif
}

View File

@ -137,6 +137,7 @@ namespace libtorrent
, m_last_choke(min_time())
, m_last_receive(aux::time_now())
, m_last_sent(aux::time_now())
, m_last_sent_payload(aux::time_now())
, m_requested(min_time())
, m_remote_dl_update(aux::time_now())
, m_connect(aux::time_now())
@ -1709,15 +1710,7 @@ namespace libtorrent
else if (m_ses.preemptive_unchoke())
{
// if the peer is choked and we have upload slots left,
// then unchoke it. Another condition that has to be met
// is that the torrent doesn't keep track of the individual
// up/down ratio for each peer (ratio == 0) or (if it does
// keep track) this particular connection isn't a leecher.
// If the peer was choked because it was leeching, don't
// unchoke it again.
// The exception to this last condition is if we're a seed.
// In that case we don't care if people are leeching, they
// can't pay for their downloads anyway.
// then unchoke it.
boost::shared_ptr<torrent> t = m_torrent.lock();
TORRENT_ASSERT(t);
@ -2618,7 +2611,13 @@ namespace libtorrent
return;
}
if (exceeded)
// every peer is entitled to have two disk blocks allocated at any given
// time, regardless of whether the cache size is exceeded or not. If this
// was not the case, when the cache size setting is very small, most peers
// would be blocked most of the time, because the disk cache would
// continously be in exceeded state. Only rarely would it actually drop
// down to 0 and unblock all peers.
if (exceeded && m_outstanding_writing_bytes > 0)
{
if ((m_channel_state[download_channel] & peer_info::bw_disk) == 0)
m_counters.inc_stats_counter(counters::num_peers_down_disk);
@ -4548,7 +4547,9 @@ namespace libtorrent
return false;
}
if (exceeded)
// to understand why m_outstanding_writing_bytes is here, see comment by
// the other call to allocate_disk_buffer()
if (exceeded && m_outstanding_writing_bytes > 0)
{
#ifndef TORRENT_DISABLE_LOGGING
peer_log(peer_log_alert::info, "DISK", "exceeded disk buffer watermark");
@ -4832,10 +4833,13 @@ namespace libtorrent
return;
}
// disconnect peers that we unchoked, but
// they didn't send a request within 60 seconds.
// disconnect peers that we unchoked, but they didn't send a request in
// the last 60 seconds, and we haven't been working on servicing a request
// for more than 60 seconds.
// but only if we're a seed
d = now - (std::max)(m_last_unchoke, m_last_incoming_request);
d = now - (std::max)((std::max)(m_last_unchoke, m_last_incoming_request)
, m_last_sent_payload);
if (may_timeout
&& !m_connecting
&& m_requests.empty()
@ -5228,6 +5232,7 @@ namespace libtorrent
, boost::bind(&peer_connection::on_disk_read_complete
, self(), _1, r, clock_type::now()), this);
}
m_last_sent_payload = clock_type::now();
m_requests.erase(m_requests.begin() + i);
if (m_requests.empty())

View File

@ -1095,17 +1095,6 @@ namespace aux {
TORRENT_ASSERT_VAL(conn == int(m_connections.size()) + 1, conn);
}
m_download_rate.close();
m_upload_rate.close();
// TODO: 3 closing the udp socket here means that
// the uTP connections cannot be closed gracefully
m_udp_socket.close();
m_external_udp_port = 0;
#ifdef TORRENT_USE_OPENSSL
m_ssl_udp_socket.close();
#endif
// we need to give all the sockets an opportunity to actually have their handlers
// called and cancelled before we continue the shutdown. This is a bit
// complicated, if there are no "undead" peers, it's safe tor resume the
@ -1121,6 +1110,15 @@ namespace aux {
void session_impl::abort_stage2()
{
m_download_rate.close();
m_upload_rate.close();
m_udp_socket.close();
m_external_udp_port = 0;
#ifdef TORRENT_USE_OPENSSL
m_ssl_udp_socket.close();
#endif
// it's OK to detach the threads here. The disk_io_thread
// has an internal counter and won't release the network
// thread until they're all dead (via m_work).
@ -5007,7 +5005,8 @@ namespace aux {
boost::shared_ptr<torrent> tptr = h.m_torrent.lock();
if (!tptr) return;
m_alerts.emplace_alert<torrent_removed_alert>(tptr->get_handle(), tptr->info_hash());
m_alerts.emplace_alert<torrent_removed_alert>(tptr->get_handle()
, tptr->info_hash());
remove_torrent_impl(tptr, options);
@ -5015,7 +5014,8 @@ namespace aux {
tptr->set_queue_position(-1);
}
void session_impl::remove_torrent_impl(boost::shared_ptr<torrent> tptr, int options)
void session_impl::remove_torrent_impl(boost::shared_ptr<torrent> tptr
, int options)
{
#ifndef TORRENT_NO_DEPRECATE
// deprecated in 1.2
@ -5045,9 +5045,9 @@ namespace aux {
if (i == m_torrents.end()) return;
torrent& t = *i->second;
if (options & session::delete_files)
if (options)
{
if (!t.delete_files())
if (!t.delete_files(options))
{
if (m_alerts.should_post<torrent_delete_failed_alert>())
m_alerts.emplace_alert<torrent_delete_failed_alert>(t.get_handle()

View File

@ -154,7 +154,7 @@ namespace libtorrent
SET(upnp_ignore_nonrouters, false, 0),
SET(use_parole_mode, true, 0),
SET(use_read_cache, true, 0),
SET(use_write_cache, true, 0),
DEPRECATED_SET(use_write_cache, true, 0),
SET(dont_flush_write_cache, false, 0),
SET(explicit_read_cache, false, 0),
SET(coalesce_reads, false, 0),
@ -344,7 +344,8 @@ namespace libtorrent
SET(inactive_up_rate, 2048, 0),
SET_NOPREV(proxy_type, settings_pack::none, &session_impl::update_proxy),
SET_NOPREV(proxy_port, 0, &session_impl::update_proxy),
SET_NOPREV(i2p_port, 0, &session_impl::update_i2p_bridge)
SET_NOPREV(i2p_port, 0, &session_impl::update_i2p_bridge),
SET_NOPREV(cache_size_volatile, 256, 0)
};
#undef SET
@ -606,14 +607,14 @@ namespace libtorrent
&& std::find(callbacks.begin(), callbacks.end(), sa.fun) == callbacks.end())
callbacks.push_back(sa.fun);
}
for (std::vector<std::pair<boost::uint16_t, int> >::const_iterator i = pack->m_ints.begin()
, end(pack->m_ints.end()); i != end; ++i)
{
// disregard setting indices that are not int types
if ((i->first & settings_pack::type_mask) != settings_pack::int_type_base)
continue;
// ignore settings that are out of bounds
int index = i->first & settings_pack::index_mask;
if (index < 0 || index >= settings_pack::num_int_settings)
@ -632,7 +633,7 @@ namespace libtorrent
// disregard setting indices that are not bool types
if ((i->first & settings_pack::type_mask) != settings_pack::bool_type_base)
continue;
// ignore settings that are out of bounds
int index = i->first & settings_pack::index_mask;
if (index < 0 || index >= settings_pack::num_bool_settings)

View File

@ -743,9 +743,10 @@ namespace libtorrent
ec.clear();
}
void default_storage::delete_files(storage_error& ec)
void default_storage::delete_files(int const options, storage_error& ec)
{
DFLOG(stderr, "[%p] delete_files\n", static_cast<void*>(this));
DFLOG(stderr, "[%p] delete_files [%x]\n", static_cast<void*>(this)
, options);
#if TORRENT_USE_ASSERTS
// this is a fence job, we expect no other
@ -764,54 +765,70 @@ namespace libtorrent
// make sure we don't have the files open
m_pool.release(this);
// if there's a part file open, make sure to destruct it to have it
// release the underlying part file. Otherwise we may not be able to
// delete it
if (m_part_file) m_part_file.reset();
#if defined TORRENT_DEBUG_FILE_LEAKS
print_open_files("release files", m_files.name().c_str());
#endif
#if TORRENT_USE_ASSERTS
m_pool.mark_deleted(m_files);
#endif
// delete the files from disk
std::set<std::string> directories;
typedef std::set<std::string>::iterator iter_t;
for (int i = 0; i < files().num_files(); ++i)
if (options == session::delete_files)
{
std::string fp = files().file_path(i);
bool complete = is_complete(fp);
std::string p = complete ? fp : combine_path(m_save_path, fp);
if (!complete)
#if TORRENT_USE_ASSERTS
m_pool.mark_deleted(m_files);
#endif
// delete the files from disk
std::set<std::string> directories;
typedef std::set<std::string>::iterator iter_t;
for (int i = 0; i < files().num_files(); ++i)
{
std::string bp = parent_path(fp);
std::pair<iter_t, bool> ret;
ret.second = true;
while (ret.second && !bp.empty())
std::string fp = files().file_path(i);
bool complete = is_complete(fp);
std::string p = complete ? fp : combine_path(m_save_path, fp);
if (!complete)
{
ret = directories.insert(combine_path(m_save_path, bp));
bp = parent_path(bp);
std::string bp = parent_path(fp);
std::pair<iter_t, bool> ret;
ret.second = true;
while (ret.second && !bp.empty())
{
ret = directories.insert(combine_path(m_save_path, bp));
bp = parent_path(bp);
}
}
delete_one_file(p, ec.ec);
if (ec) { ec.file = i; ec.operation = storage_error::remove; }
}
// remove the directories. Reverse order to delete
// subdirectories first
for (std::set<std::string>::reverse_iterator i = directories.rbegin()
, end(directories.rend()); i != end; ++i)
{
error_code error;
delete_one_file(*i, error);
if (error && !ec) { ec.file = -1; ec.ec = error; ec.operation = storage_error::remove; }
}
delete_one_file(p, ec.ec);
if (ec) { ec.file = i; ec.operation = storage_error::remove; }
}
// remove the directories. Reverse order to delete
// subdirectories first
for (std::set<std::string>::reverse_iterator i = directories.rbegin()
, end(directories.rend()); i != end; ++i)
if (options == session::delete_files
|| options == session::delete_partfile)
{
error_code error;
delete_one_file(*i, error);
if (error && !ec) { ec.file = -1; ec.ec = error; ec.operation = storage_error::remove; }
remove(combine_path(m_save_path, m_part_file_name), error);
DFLOG(stderr, "[%p] delete partfile %s/%s [%s]\n", static_cast<void*>(this)
, m_save_path.c_str(), m_part_file_name.c_str(), error.message().c_str());
if (error && error != boost::system::errc::no_such_file_or_directory)
{
ec.file = -1;
ec.ec = error;
ec.operation = storage_error::remove;
}
}
error_code error;
remove(combine_path(m_save_path, m_part_file_name), error);
DFLOG(stderr, "[%p] delete partfile %s/%s [%s]\n", static_cast<void*>(this)
, m_save_path.c_str(), m_part_file_name.c_str(), error.message().c_str());
if (error != boost::system::errc::no_such_file_or_directory && !error)
{ ec.file = -1; ec.ec = error; ec.operation = storage_error::remove; }
DFLOG(stderr, "[%p] delete_files result: %s\n", static_cast<void*>(this)
, ec.ec.message().c_str());

View File

@ -8797,7 +8797,7 @@ namespace libtorrent
return limit_impl(peer_connection::download_channel);
}
bool torrent::delete_files()
bool torrent::delete_files(int const options)
{
TORRENT_ASSERT(is_single_thread());
@ -8813,7 +8813,7 @@ namespace libtorrent
{
TORRENT_ASSERT(m_storage);
inc_refcount("delete_files");
m_ses.disk_thread().async_delete_files(m_storage.get()
m_ses.disk_thread().async_delete_files(m_storage.get(), options
, boost::bind(&torrent::on_files_deleted, shared_from_this(), _1));
m_deleted = true;
return true;

View File

@ -35,6 +35,7 @@ POSSIBILITY OF SUCH DAMAGE.
#include "libtorrent/utp_socket_manager.hpp"
#include "libtorrent/instantiate_connection.hpp"
#include "libtorrent/socket_io.hpp"
#include "libtorrent/socket.hpp" // for TORRENT_HAS_DONT_FRAGMENT
#include "libtorrent/broadcast_socket.hpp" // for is_teredo
#include "libtorrent/random.hpp"
#include "libtorrent/performance_counters.hpp"
@ -59,8 +60,11 @@ namespace libtorrent
, m_last_if_update(min_time())
, m_sock_buf_size(0)
, m_counters(cnt)
, m_mtu_idx(0)
, m_ssl_context(ssl_context)
{}
{
m_restrict_mtu.fill(65536);
}
utp_socket_manager::~utp_socket_manager()
{
@ -128,7 +132,6 @@ namespace libtorrent
// the address field in the SOCKS header
if (addr.is_v4()) mtu -= 4;
else mtu -= 16;
}
else
{
@ -136,7 +139,7 @@ namespace libtorrent
else mtu -= TORRENT_IPV6_HEADER;
}
utp_mtu = mtu;
utp_mtu = (std::min)(mtu, restrict_mtu());
}
void utp_socket_manager::send_packet(udp::endpoint const& ep, char const* p
@ -159,12 +162,18 @@ namespace libtorrent
#ifdef TORRENT_HAS_DONT_FRAGMENT
error_code tmp;
if (flags & utp_socket_manager::dont_fragment)
{
m_sock.set_option(libtorrent::dont_fragment(true), tmp);
TORRENT_ASSERT_VAL(!tmp, tmp.message());
}
#endif
m_sock.send(ep, p, len, ec);
#ifdef TORRENT_HAS_DONT_FRAGMENT
if (flags & utp_socket_manager::dont_fragment)
{
m_sock.set_option(libtorrent::dont_fragment(false), tmp);
TORRENT_ASSERT_VAL(!tmp, tmp.message());
}
#endif
}

View File

@ -65,9 +65,8 @@ static struct utp_logger
FILE* utp_log_file;
mutex utp_log_mutex;
utp_logger() : utp_log_file(0)
{
utp_log_file = NULL;
utp_logger() : utp_log_file(NULL) {
utp_log_file = fopen("utp.log", "w+");
}
~utp_logger()
{
@ -806,7 +805,8 @@ void utp_socket_impl::update_mtu_limits()
{
INVARIANT_CHECK;
TORRENT_ASSERT(m_mtu_floor <= m_mtu_ceiling);
if (m_mtu_floor > m_mtu_ceiling) m_mtu_floor = m_mtu_ceiling;
m_mtu = (m_mtu_floor + m_mtu_ceiling) / 2;
if ((m_cwnd >> 16) < m_mtu) m_cwnd = boost::int64_t(m_mtu) << 16;
@ -1370,6 +1370,7 @@ void utp_socket_impl::send_syn()
p->size = sizeof(utp_header);
p->header_size = sizeof(utp_header);
p->num_transmissions = 0;
p->mtu_probe = false;
#ifdef TORRENT_DEBUG
p->num_fast_resend = 0;
#endif
@ -1492,6 +1493,12 @@ void utp_socket_impl::send_reset(utp_header const* ph)
error_code ec;
m_sm->send_packet(udp::endpoint(m_remote_address, m_port)
, reinterpret_cast<char const*>(&h), sizeof(h), ec);
if (ec)
{
UTP_LOGV("%8p: socket error: %s\n"
, static_cast<void*>(this)
, ec.message().c_str());
}
}
std::size_t utp_socket_impl::available() const
@ -1755,7 +1762,7 @@ bool utp_socket_impl::send_pkt(int flags)
INVARIANT_CHECK;
#endif
bool force = (flags & pkt_ack) || (flags & pkt_fin);
bool const force = (flags & pkt_ack) || (flags & pkt_fin);
// TORRENT_ASSERT(m_state != UTP_STATE_FIN_SENT || (flags & pkt_ack));
@ -1794,14 +1801,29 @@ bool utp_socket_impl::send_pkt(int flags)
if (sack > 32) sack = 32;
}
boost::uint32_t close_reason = m_close_reason;
boost::uint32_t const close_reason = m_close_reason;
int header_size = sizeof(utp_header)
// MTU DISCOVERY
// under these conditions, the next packet we send should be an MTU probe.
// MTU probes get to use the mid-point packet size, whereas other packets
// use a conservative packet size of the largest known to work. The reason
// for the cwnd condition is to make sure the probe is surrounded by non-
// probes, to be able to distinguish a loss of the probe vs. just loss in
// general.
bool const mtu_probe = (m_mtu_seq == 0
&& m_write_buffer_size >= m_mtu_floor * 3
&& m_seq_nr != 0
&& (m_cwnd >> 16) > m_mtu_floor * 3);
int const header_size = sizeof(utp_header)
+ (sack ? sack + 2 : 0)
+ (close_reason ? 6 : 0);
int payload_size = m_write_buffer_size;
if (m_mtu - header_size < payload_size)
payload_size = m_mtu - header_size;
// for non MTU-probes, use the conservative packet size
int const effective_mtu = mtu_probe ? m_mtu : m_mtu_floor;
int payload_size = (std::min)(m_write_buffer_size
, effective_mtu - header_size);
// if we have one MSS worth of data, make sure it fits in our
// congestion window and the advertized receive window from
@ -1828,11 +1850,11 @@ bool utp_socket_impl::send_pkt(int flags)
#if TORRENT_UTP_LOG
UTP_LOGV("%8p: skipping send seq_nr:%d ack_nr:%d "
"id:%d target:%s header_size:%d error:%s send_buffer_size:%d cwnd:%d "
"adv_wnd:%d in-flight:%d mtu:%d\n"
"adv_wnd:%d in-flight:%d mtu:%d effective-mtu:%d\n"
, static_cast<void*>(this), int(m_seq_nr), int(m_ack_nr)
, m_send_id, print_endpoint(udp::endpoint(m_remote_address, m_port)).c_str()
, header_size, m_error.message().c_str(), m_write_buffer_size, int(m_cwnd >> 16)
, m_adv_wnd, m_bytes_in_flight, m_mtu);
, m_adv_wnd, m_bytes_in_flight, m_mtu, effective_mtu);
#endif
return false;
}
@ -1876,8 +1898,8 @@ bool utp_socket_impl::send_pkt(int flags)
// need to keep the packet around (in the outbuf)
if (payload_size)
{
p = static_cast<packet*>(malloc(sizeof(packet) + m_mtu));
p->allocated = m_mtu;
p = static_cast<packet*>(malloc(sizeof(packet) + effective_mtu));
p->allocated = effective_mtu;
buf_holder.reset(reinterpret_cast<char*>(p));
m_sm->inc_stats_counter(counters::utp_payload_pkts_out);
@ -1946,9 +1968,9 @@ bool utp_socket_impl::send_pkt(int flags)
else
sack = 0;
boost::int32_t size_left = p->allocated - p->size;
TORRENT_ASSERT(size_left > 0);
size_left = (std::min)(size_left, m_write_buffer_size);
boost::int32_t const size_left = (std::min)(p->allocated - p->size
, m_write_buffer_size);
write_payload(p->buf + p->size, size_left);
p->size += size_left;
@ -2001,9 +2023,9 @@ bool utp_socket_impl::send_pkt(int flags)
// outstanding packet is acked, we'll send this
// payload
UTP_LOGV("%8p: NAGLE not enough payload send_buffer_size:%d cwnd:%d "
"adv_wnd:%d in-flight:%d mtu:%d\n"
"adv_wnd:%d in-flight:%d mtu:%d effective_mtu:%d\n"
, static_cast<void*>(this), m_write_buffer_size, int(m_cwnd >> 16)
, m_adv_wnd, m_bytes_in_flight, m_mtu);
, m_adv_wnd, m_bytes_in_flight, m_mtu, effective_mtu);
TORRENT_ASSERT(m_nagle_packet == NULL);
TORRENT_ASSERT(h->seq_nr == m_seq_nr);
m_nagle_packet = p;
@ -2011,10 +2033,9 @@ bool utp_socket_impl::send_pkt(int flags)
return false;
}
// MTU DISCOVERY
if (m_mtu_seq == 0
&& p->size > m_mtu_floor
&& m_seq_nr != 0)
// for ST_DATA packets, payload size is 0. Such packets do not have unique
// sequence numbers and should never be used as mtu probes
if ((mtu_probe || p->mtu_probe) && payload_size > 0)
{
p->mtu_probe = true;
m_mtu_seq = m_seq_nr;
@ -2063,7 +2084,9 @@ bool utp_socket_impl::send_pkt(int flags)
if (ec == error::message_size)
{
#if TORRENT_UTP_LOG
UTP_LOGV("%8p: error sending packet: %s\n", static_cast<void*>(this), ec.message().c_str());
UTP_LOGV("%8p: error sending packet: %s\n"
, static_cast<void*>(this)
, ec.message().c_str());
#endif
// if we fail even though this is not a probe, we're screwed
// since we'd have to repacketize
@ -2074,8 +2097,7 @@ bool utp_socket_impl::send_pkt(int flags)
// resend the packet immediately without
// it being an MTU probe
p->mtu_probe = false;
if (m_mtu_seq == m_ack_nr)
m_mtu_seq = 0;
m_mtu_seq = 0;
ec.clear();
#if TORRENT_UTP_LOG
@ -2124,6 +2146,11 @@ bool utp_socket_impl::send_pkt(int flags)
#endif
TORRENT_ASSERT(h->seq_nr == m_seq_nr);
// 0 is a special sequence number, since it's also used as "uninitialized".
// we never send an mtu probe for sequence number 0
TORRENT_ASSERT(p->mtu_probe == (m_seq_nr == m_mtu_seq)
|| m_seq_nr == 0);
// release the buffer, we're saving it in the circular
// buffer of outgoing packets
buf_holder.release();
@ -2290,10 +2317,15 @@ bool utp_socket_impl::resend_packet(packet* p, bool fast_resend)
return !m_stalled;
}
void utp_socket_impl::experienced_loss(int seq_nr)
void utp_socket_impl::experienced_loss(int const seq_nr)
{
INVARIANT_CHECK;
// the window size could go below one MMS here, if it does,
// we'll get a timeout in about one second
m_sm->inc_stats_counter(counters::utp_packet_loss);
// since loss often comes in bursts, we only cut the
// window in half once per RTT. This is implemented
// by limiting which packets can cause us to cut the
@ -2319,11 +2351,6 @@ void utp_socket_impl::experienced_loss(int seq_nr)
m_slow_start = false;
UTP_LOGV("%8p: experienced loss, slow_start -> 0\n", static_cast<void*>(this));
}
// the window size could go below one MMS here, if it does,
// we'll get a timeout in about one second
m_sm->inc_stats_counter(counters::utp_packet_loss);
}
void utp_socket_impl::set_state(int s)
@ -2484,7 +2511,7 @@ bool utp_socket_impl::cancel_handlers(error_code const& ec, bool kill)
TORRENT_ASSERT(ec);
bool ret = m_read_handler || m_write_handler || m_connect_handler;
// calling the callbacks with m_userdata being 0 will just crash
TORRENT_ASSERT((ret && bool(m_userdata)) || !ret);
@ -2665,11 +2692,9 @@ void utp_socket_impl::init_mtu(int link_mtu, int utp_mtu)
// set the ceiling to what we found out from the interface
m_mtu_ceiling = utp_mtu;
// however, start the search from a more conservative MTU
int overhead = link_mtu - utp_mtu;
m_mtu = TORRENT_ETHERNET_MTU - overhead;
// start in the middle of the PMTU search space
m_mtu = (m_mtu_ceiling + m_mtu_floor) / 2;
if (m_mtu > m_mtu_ceiling) m_mtu = m_mtu_ceiling;
if (m_mtu_floor > utp_mtu) m_mtu_floor = utp_mtu;
// if the window size is smaller than one packet size
@ -2886,8 +2911,13 @@ bool utp_socket_impl::incoming_packet(boost::uint8_t const* buf, int size
// if we get an ack for the same sequence number as
// was last ACKed, and we have outstanding packets,
// it counts as a duplicate ack
if (ph->ack_nr == m_acked_seq_nr && m_outbuf.size())
// it counts as a duplicate ack. The reason to not count ST_DATA packets as
// duplicate ACKs is because we may be receiving a stream of those
// regardless of our outgoing traffic, which makes their ACK number not
// indicative of a dropped packet
if (ph->ack_nr == m_acked_seq_nr
&& m_outbuf.size()
&& ph->get_type() == ST_STATE)
{
++m_duplicate_acks;
}
@ -3548,6 +3578,15 @@ void utp_socket_impl::tick(time_point now)
if (m_outbuf.size()) ++m_num_timeouts;
UTP_LOGV("%8p: timeout num-timeouts: %d max-resends: %d confirmed: %d "
" acked-seq-num: %d mtu-seq: %d\n"
, static_cast<void*>(this)
, m_num_timeouts
, m_sm->num_resends()
, m_confirmed
, m_acked_seq_nr
, m_mtu_seq);
// a socket that has not been confirmed to actually have a live remote end
// (the IP may have been spoofed) fail on the first timeout. If we had
// heard anything from this peer, it would have been confirmed.
@ -3590,7 +3629,7 @@ void utp_socket_impl::tick(time_point now)
m_timeout = now + milliseconds(packet_timeout());
UTP_LOGV("%8p: timeout resetting cwnd:%d\n"
UTP_LOGV("%8p: resetting cwnd:%d\n"
, static_cast<void*>(this), int(m_cwnd >> 16));
// we dropped all packets, that includes the mtu probe
@ -3606,7 +3645,7 @@ void utp_socket_impl::tick(time_point now)
// we're very likely to have an ssthres set, which will make us leave
// slow start before inducing more delay or loss.
m_slow_start = true;
UTP_LOGV("%8p: timeout slow_start -> 1\n", static_cast<void*>(this));
UTP_LOGV("%8p: slow_start -> 1\n", static_cast<void*>(this));
// we need to go one past m_seq_nr to cover the case
// where we just sent a SYN packet and then adjusted for
@ -3639,6 +3678,16 @@ void utp_socket_impl::tick(time_point now)
, static_cast<void*>(this), p->num_transmissions, socket_state_names[m_state]);
#endif
if (p->size > m_mtu_floor)
{
// the packet that caused the connection to fail was an mtu probe
// (note that the mtu_probe field won't be set at this point because
// it's cleared when the packet is re-sent). This suggests that
// perhaps our network throws away oversized packets without
// fragmenting them. Tell the socket manager to be more conservative
// about mtu ceiling in the future
m_sm->restrict_mtu(m_mtu);
}
// the connection is dead
m_error = boost::asio::error::timed_out;
set_state(UTP_STATE_ERROR_WAIT);

View File

@ -363,9 +363,9 @@ void wait_for_downloading(lt::session& ses, char const* name)
} while (a);
if (!downloading_done)
{
fprintf(stderr, "did not receive a state_changed_alert indicating "
fprintf(stderr, "%s: did not receive a state_changed_alert indicating "
"the torrent is downloading. waited: %d ms\n"
, int(total_milliseconds(clock_type::now() - start)));
, name, int(total_milliseconds(clock_type::now() - start)));
}
}

View File

@ -71,7 +71,7 @@ struct test_storage_impl : storage_interface
virtual void release_files(storage_error& ec) TORRENT_OVERRIDE {}
virtual void rename_file(int index, std::string const& new_filenamem
, storage_error& ec) TORRENT_OVERRIDE {}
virtual void delete_files(storage_error& ec) TORRENT_OVERRIDE {}
virtual void delete_files(int, storage_error& ec) TORRENT_OVERRIDE {}
#ifndef TORRENT_NO_DEPRECATE
virtual void finalize_file(int, storage_error&) TORRENT_OVERRIDE {}
#endif

View File

@ -356,7 +356,7 @@ void test_remove(std::string const& test_path, bool unbuffered)
, combine_path("_folder3", "test5.tmp"))), &st, ec);
TEST_EQUAL(st.file_size, 8);
s->delete_files(se);
s->delete_files(session::delete_files, se);
if (se) print_error("delete_files", 0, se.ec);
if (se)