Merge pull request #545 from arvidn/zero-cache-perf-1.1

allow each peer have at least 2 allocated disk blocks at any given time
This commit is contained in:
Arvid Norberg 2016-03-16 08:14:02 -04:00
commit 4343bb911c
9 changed files with 51 additions and 67 deletions

View File

@ -135,7 +135,7 @@ namespace libtorrent
// of buffers in use drops below the low watermark, // of buffers in use drops below the low watermark,
// we start calling these functions back // we start calling these functions back
// TODO: try to remove the observers, only using the async_allocate handlers // TODO: try to remove the observers, only using the async_allocate handlers
std::vector<boost::shared_ptr<disk_observer> > m_observers; std::vector<boost::weak_ptr<disk_observer> > m_observers;
// these handlers are executed when a new buffer is available // these handlers are executed when a new buffer is available
std::vector<handler_t> m_handlers; std::vector<handler_t> m_handlers;

View File

@ -136,11 +136,7 @@ namespace libtorrent
in_progress = 0x20, in_progress = 0x20,
// turns into file::coalesce_buffers in the file operation // turns into file::coalesce_buffers in the file operation
coalesce_buffers = 0x40, coalesce_buffers = 0x40
// the disk cache was enabled when this job was issued, it should use
// the disk cache once it's handled by a disk thread
use_disk_cache = 0x80
}; };
// for write jobs, returns true if its block // for write jobs, returns true if its block

View File

@ -1,6 +1,6 @@
/* /*
Copyright (c) 2007-2016, Arvid Norberg Copyright (c) 2007-2016, Arvid Norberg, Steven Siloti
All rights reserved. All rights reserved.
Redistribution and use in source and binary forms, with or without Redistribution and use in source and binary forms, with or without

View File

@ -259,14 +259,16 @@ namespace libtorrent
// passes the hash check, it is taken out of parole mode. // passes the hash check, it is taken out of parole mode.
use_parole_mode, use_parole_mode,
// enable and disable caching of read blocks and blocks to be written // enable and disable caching of blocks read from disk. the purpose of
// to disk respsectively. the purpose of the read cache is partly // the read cache is partly read-ahead of requests but also to avoid
// read-ahead of requests but also to avoid reading blocks back from // reading blocks back from the disk multiple times for popular
// the disk multiple times for popular pieces. the write cache purpose // pieces.
// is to hold off writing blocks to disk until they have been hashed,
// to avoid having to read them back in again.
use_read_cache, use_read_cache,
#ifndef TORRENT_NO_DEPRECATED
use_write_cache, use_write_cache,
#else
deprecated7,
#endif
// this will make the disk cache never flush a write piece if it would // this will make the disk cache never flush a write piece if it would
// cause is to have to re-read it once we want to calculate the piece // cause is to have to re-read it once we want to calculate the piece

View File

@ -64,8 +64,10 @@ POSSIBILITY OF SUCH DAMAGE.
namespace libtorrent namespace libtorrent
{ {
namespace {
// this is posted to the network thread // this is posted to the network thread
static void watermark_callback(std::vector<boost::shared_ptr<disk_observer> >* cbs void watermark_callback(std::vector<boost::weak_ptr<disk_observer> >* cbs
, std::vector<disk_buffer_pool::handler_t>* handlers) , std::vector<disk_buffer_pool::handler_t>* handlers)
{ {
if (handlers) if (handlers)
@ -78,13 +80,18 @@ namespace libtorrent
if (cbs != NULL) if (cbs != NULL)
{ {
for (std::vector<boost::shared_ptr<disk_observer> >::iterator i = cbs->begin() for (std::vector<boost::weak_ptr<disk_observer> >::iterator i = cbs->begin()
, end(cbs->end()); i != end; ++i) , end(cbs->end()); i != end; ++i)
(*i)->on_disk(); {
boost::shared_ptr<disk_observer> o = i->lock();
if (o) o->on_disk();
}
delete cbs; delete cbs;
} }
} }
} // anonymous namespace
disk_buffer_pool::disk_buffer_pool(int block_size, io_service& ios disk_buffer_pool::disk_buffer_pool(int block_size, io_service& ios
, boost::function<void()> const& trigger_trim) , boost::function<void()> const& trigger_trim)
: m_block_size(block_size) : m_block_size(block_size)
@ -183,7 +190,7 @@ namespace libtorrent
{ {
l.unlock(); l.unlock();
m_ios.post(boost::bind(&watermark_callback m_ios.post(boost::bind(&watermark_callback
, static_cast<std::vector<boost::shared_ptr<disk_observer> >*>(NULL) , static_cast<std::vector<boost::weak_ptr<disk_observer> >*>(NULL)
, slice)); , slice));
return; return;
} }
@ -195,13 +202,13 @@ namespace libtorrent
{ {
l.unlock(); l.unlock();
m_ios.post(boost::bind(&watermark_callback m_ios.post(boost::bind(&watermark_callback
, static_cast<std::vector<boost::shared_ptr<disk_observer> >*>(NULL) , static_cast<std::vector<boost::weak_ptr<disk_observer> >*>(NULL)
, handlers)); , handlers));
return; return;
} }
std::vector<boost::shared_ptr<disk_observer> >* cbs std::vector<boost::weak_ptr<disk_observer> >* cbs
= new std::vector<boost::shared_ptr<disk_observer> >(); = new std::vector<boost::weak_ptr<disk_observer> >();
m_observers.swap(*cbs); m_observers.swap(*cbs);
l.unlock(); l.unlock();
m_ios.post(boost::bind(&watermark_callback, cbs, handlers)); m_ios.post(boost::bind(&watermark_callback, cbs, handlers));

View File

@ -1,6 +1,6 @@
/* /*
Copyright (c) 2007-2016, Arvid Norberg Copyright (c) 2007-2016, Arvid Norberg, Steven Siloti
All rights reserved. All rights reserved.
Redistribution and use in source and binary forms, with or without Redistribution and use in source and binary forms, with or without
@ -1208,14 +1208,6 @@ namespace libtorrent
int disk_io_thread::do_read(disk_io_job* j, jobqueue_t& completed_jobs) int disk_io_thread::do_read(disk_io_job* j, jobqueue_t& completed_jobs)
{ {
if ((j->flags & disk_io_job::use_disk_cache) == 0)
{
// we're not using a cache. This is the simple path
// just read straight from the file
int ret = do_uncached_read(j);
return ret;
}
int block_size = m_disk_cache.block_size(); int block_size = m_disk_cache.block_size();
int piece_size = j->storage->files()->piece_size(j->piece); int piece_size = j->storage->files()->piece_size(j->piece);
int blocks_in_piece = (piece_size + block_size - 1) / block_size; int blocks_in_piece = (piece_size + block_size - 1) / block_size;
@ -1232,26 +1224,8 @@ namespace libtorrent
cached_piece_entry* pe = m_disk_cache.find_piece(j); cached_piece_entry* pe = m_disk_cache.find_piece(j);
if (pe == NULL) if (pe == NULL)
{ {
// this isn't supposed to happen. The piece is supposed l.unlock();
// to be allocated when the read job is posted to the return do_uncached_read(j);
// queue, and have 'outstanding_read' set to 1
TORRENT_ASSERT(false);
int cache_state = (j->flags & disk_io_job::volatile_read)
? cached_piece_entry::volatile_read_lru
: cached_piece_entry::read_lru1;
pe = m_disk_cache.allocate_piece(j, cache_state);
if (pe == NULL)
{
j->error.ec = error::no_memory;
j->error.operation = storage_error::alloc_cache_piece;
m_disk_cache.free_iovec(iov, iov_len);
return -1;
}
#if TORRENT_USE_ASSERTS
pe->piece_log.push_back(piece_log_t(piece_log_t::set_outstanding_jobs));
#endif
pe->outstanding_read = 1;
} }
TORRENT_PIECE_ASSERT(pe->outstanding_read == 1, pe); TORRENT_PIECE_ASSERT(pe->outstanding_read == 1, pe);
@ -1633,7 +1607,6 @@ namespace libtorrent
j->error.operation = storage_error::read; j->error.operation = storage_error::read;
return 0; return 0;
} }
j->flags |= disk_io_job::use_disk_cache;
if (pe->outstanding_read) if (pe->outstanding_read)
{ {
TORRENT_PIECE_ASSERT(j->piece == pe->piece, pe); TORRENT_PIECE_ASSERT(j->piece == pe->piece, pe);

View File

@ -2611,7 +2611,13 @@ namespace libtorrent
return; return;
} }
if (exceeded) // every peer is entitled to have two disk blocks allocated at any given
// time, regardless of whether the cache size is exceeded or not. If this
// was not the case, when the cache size setting is very small, most peers
// would be blocked most of the time, because the disk cache would
// continously be in exceeded state. Only rarely would it actually drop
// down to 0 and unblock all peers.
if (exceeded && m_outstanding_writing_bytes > 0)
{ {
if ((m_channel_state[download_channel] & peer_info::bw_disk) == 0) if ((m_channel_state[download_channel] & peer_info::bw_disk) == 0)
m_counters.inc_stats_counter(counters::num_peers_down_disk); m_counters.inc_stats_counter(counters::num_peers_down_disk);
@ -4541,7 +4547,9 @@ namespace libtorrent
return false; return false;
} }
if (exceeded) // to understand why m_outstanding_writing_bytes is here, see comment by
// the other call to allocate_disk_buffer()
if (exceeded && m_outstanding_writing_bytes > 0)
{ {
#ifndef TORRENT_DISABLE_LOGGING #ifndef TORRENT_DISABLE_LOGGING
peer_log(peer_log_alert::info, "DISK", "exceeded disk buffer watermark"); peer_log(peer_log_alert::info, "DISK", "exceeded disk buffer watermark");

View File

@ -1153,17 +1153,6 @@ namespace aux {
TORRENT_ASSERT_VAL(conn == int(m_connections.size()) + 1, conn); TORRENT_ASSERT_VAL(conn == int(m_connections.size()) + 1, conn);
} }
m_download_rate.close();
m_upload_rate.close();
// #error closing the udp socket here means that
// the uTP connections cannot be closed gracefully
m_udp_socket.close();
m_external_udp_port = 0;
#ifdef TORRENT_USE_OPENSSL
m_ssl_udp_socket.close();
#endif
// we need to give all the sockets an opportunity to actually have their handlers // we need to give all the sockets an opportunity to actually have their handlers
// called and cancelled before we continue the shutdown. This is a bit // called and cancelled before we continue the shutdown. This is a bit
// complicated, if there are no "undead" peers, it's safe tor resume the // complicated, if there are no "undead" peers, it's safe tor resume the
@ -1179,6 +1168,15 @@ namespace aux {
void session_impl::abort_stage2() void session_impl::abort_stage2()
{ {
m_download_rate.close();
m_upload_rate.close();
m_udp_socket.close();
m_external_udp_port = 0;
#ifdef TORRENT_USE_OPENSSL
m_ssl_udp_socket.close();
#endif
// it's OK to detach the threads here. The disk_io_thread // it's OK to detach the threads here. The disk_io_thread
// has an internal counter and won't release the network // has an internal counter and won't release the network
// thread until they're all dead (via m_work). // thread until they're all dead (via m_work).

View File

@ -150,7 +150,7 @@ namespace libtorrent
SET(upnp_ignore_nonrouters, false, 0), SET(upnp_ignore_nonrouters, false, 0),
SET(use_parole_mode, true, 0), SET(use_parole_mode, true, 0),
SET(use_read_cache, true, 0), SET(use_read_cache, true, 0),
SET(use_write_cache, true, 0), DEPRECATED_SET(use_write_cache, true, 0),
SET(dont_flush_write_cache, false, 0), SET(dont_flush_write_cache, false, 0),
SET(explicit_read_cache, false, 0), SET(explicit_read_cache, false, 0),
SET(coalesce_reads, false, 0), SET(coalesce_reads, false, 0),
@ -603,14 +603,14 @@ namespace libtorrent
&& std::find(callbacks.begin(), callbacks.end(), sa.fun) == callbacks.end()) && std::find(callbacks.begin(), callbacks.end(), sa.fun) == callbacks.end())
callbacks.push_back(sa.fun); callbacks.push_back(sa.fun);
} }
for (std::vector<std::pair<boost::uint16_t, int> >::const_iterator i = pack->m_ints.begin() for (std::vector<std::pair<boost::uint16_t, int> >::const_iterator i = pack->m_ints.begin()
, end(pack->m_ints.end()); i != end; ++i) , end(pack->m_ints.end()); i != end; ++i)
{ {
// disregard setting indices that are not int types // disregard setting indices that are not int types
if ((i->first & settings_pack::type_mask) != settings_pack::int_type_base) if ((i->first & settings_pack::type_mask) != settings_pack::int_type_base)
continue; continue;
// ignore settings that are out of bounds // ignore settings that are out of bounds
int index = i->first & settings_pack::index_mask; int index = i->first & settings_pack::index_mask;
if (index < 0 || index >= settings_pack::num_int_settings) if (index < 0 || index >= settings_pack::num_int_settings)
@ -629,7 +629,7 @@ namespace libtorrent
// disregard setting indices that are not bool types // disregard setting indices that are not bool types
if ((i->first & settings_pack::type_mask) != settings_pack::bool_type_base) if ((i->first & settings_pack::type_mask) != settings_pack::bool_type_base)
continue; continue;
// ignore settings that are out of bounds // ignore settings that are out of bounds
int index = i->first & settings_pack::index_mask; int index = i->first & settings_pack::index_mask;
if (index < 0 || index >= settings_pack::num_bool_settings) if (index < 0 || index >= settings_pack::num_bool_settings)