2011-08-01 10:03:26 +02:00
|
|
|
/*
|
|
|
|
|
2018-04-09 09:04:33 +02:00
|
|
|
Copyright (c) 2007-2018, Arvid Norberg
|
2011-08-01 10:03:26 +02:00
|
|
|
All rights reserved.
|
|
|
|
|
|
|
|
Redistribution and use in source and binary forms, with or without
|
|
|
|
modification, are permitted provided that the following conditions
|
|
|
|
are met:
|
|
|
|
|
|
|
|
* Redistributions of source code must retain the above copyright
|
|
|
|
notice, this list of conditions and the following disclaimer.
|
|
|
|
* Redistributions in binary form must reproduce the above copyright
|
|
|
|
notice, this list of conditions and the following disclaimer in
|
|
|
|
the documentation and/or other materials provided with the distribution.
|
|
|
|
* Neither the name of the author nor the names of its
|
|
|
|
contributors may be used to endorse or promote products derived
|
|
|
|
from this software without specific prior written permission.
|
|
|
|
|
|
|
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
|
|
|
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
|
|
|
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
|
|
|
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
|
|
|
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
|
|
|
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
|
|
|
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
|
|
|
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
|
|
|
POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
|
|
|
|
*/
|
|
|
|
|
2014-07-06 21:18:00 +02:00
|
|
|
#include "libtorrent/config.hpp"
|
2011-08-01 10:03:26 +02:00
|
|
|
#include "libtorrent/disk_buffer_pool.hpp"
|
|
|
|
#include "libtorrent/assert.hpp"
|
2014-07-06 21:18:00 +02:00
|
|
|
#include "libtorrent/allocator.hpp"
|
|
|
|
#include "libtorrent/aux_/session_settings.hpp"
|
|
|
|
#include "libtorrent/io_service.hpp"
|
|
|
|
#include "libtorrent/disk_observer.hpp"
|
2015-11-08 04:00:20 +01:00
|
|
|
#include "libtorrent/platform_util.hpp" // for total_physical_ram
|
2018-01-03 12:54:03 +01:00
|
|
|
#include "libtorrent/disk_interface.hpp" // for default_block_size
|
2014-07-06 21:18:00 +02:00
|
|
|
|
2015-08-18 16:42:03 +02:00
|
|
|
#include "libtorrent/aux_/disable_warnings_push.hpp"
|
|
|
|
|
2014-07-06 21:18:00 +02:00
|
|
|
#ifdef TORRENT_BSD
|
|
|
|
#include <sys/sysctl.h>
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef TORRENT_LINUX
|
|
|
|
#include <linux/unistd.h>
|
|
|
|
#endif
|
|
|
|
|
2015-08-18 16:42:03 +02:00
|
|
|
#include "libtorrent/aux_/disable_warnings_pop.hpp"
|
|
|
|
|
2017-04-12 19:00:57 +02:00
|
|
|
namespace libtorrent {
|
|
|
|
|
2016-03-16 02:10:58 +01:00
|
|
|
namespace {
|
|
|
|
|
2014-07-06 21:18:00 +02:00
|
|
|
// this is posted to the network thread
|
2016-09-01 03:42:18 +02:00
|
|
|
void watermark_callback(std::vector<std::weak_ptr<disk_observer>> const& cbs)
|
2014-07-06 21:18:00 +02:00
|
|
|
{
|
2016-05-07 06:55:38 +02:00
|
|
|
for (auto const& i : cbs)
|
2014-07-06 21:18:00 +02:00
|
|
|
{
|
2016-09-01 03:42:18 +02:00
|
|
|
std::shared_ptr<disk_observer> o = i.lock();
|
2016-05-07 06:55:38 +02:00
|
|
|
if (o) o->on_disk();
|
2014-07-06 21:18:00 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-03-16 02:10:58 +01:00
|
|
|
} // anonymous namespace
|
|
|
|
|
2018-01-03 12:54:03 +01:00
|
|
|
disk_buffer_pool::disk_buffer_pool(io_service& ios
|
2016-08-13 03:31:55 +02:00
|
|
|
, std::function<void()> const& trigger_trim)
|
2018-01-03 12:54:03 +01:00
|
|
|
: m_in_use(0)
|
2014-07-06 21:18:00 +02:00
|
|
|
, m_max_use(64)
|
2018-04-11 15:19:00 +02:00
|
|
|
, m_low_watermark(std::max(m_max_use - 32, 0))
|
2014-07-06 21:18:00 +02:00
|
|
|
, m_trigger_cache_trim(trigger_trim)
|
|
|
|
, m_exceeded_max_size(false)
|
|
|
|
, m_ios(ios)
|
2011-08-01 10:03:26 +02:00
|
|
|
{
|
2014-01-19 20:45:50 +01:00
|
|
|
#if TORRENT_USE_ASSERTS
|
2011-08-01 10:03:26 +02:00
|
|
|
m_magic = 0x1337;
|
2014-07-06 21:18:00 +02:00
|
|
|
m_settings_set = false;
|
2011-08-01 10:03:26 +02:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
disk_buffer_pool::~disk_buffer_pool()
|
|
|
|
{
|
|
|
|
TORRENT_ASSERT(m_magic == 0x1337);
|
2014-07-06 21:18:00 +02:00
|
|
|
#if TORRENT_USE_ASSERTS
|
2011-08-01 10:03:26 +02:00
|
|
|
m_magic = 0;
|
|
|
|
#endif
|
|
|
|
|
2014-07-06 21:18:00 +02:00
|
|
|
}
|
|
|
|
|
2016-11-27 14:46:53 +01:00
|
|
|
int disk_buffer_pool::num_to_evict(int const num_needed)
|
2014-07-06 21:18:00 +02:00
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
|
2016-05-01 00:54:23 +02:00
|
|
|
std::unique_lock<std::mutex> l(m_pool_mutex);
|
2014-07-06 21:18:00 +02:00
|
|
|
|
|
|
|
if (m_exceeded_max_size)
|
2017-01-15 00:53:25 +01:00
|
|
|
ret = m_in_use - std::min(m_low_watermark, m_max_use - int(m_observers.size()) * 2);
|
2014-07-06 21:18:00 +02:00
|
|
|
|
|
|
|
if (m_in_use + num_needed > m_max_use)
|
2016-11-27 14:46:53 +01:00
|
|
|
ret = std::max(ret, m_in_use + num_needed - m_max_use);
|
2014-07-06 21:18:00 +02:00
|
|
|
|
|
|
|
if (ret < 0) ret = 0;
|
|
|
|
else if (ret > m_in_use) ret = m_in_use;
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
// checks to see if we're no longer exceeding the high watermark,
|
|
|
|
// and if we're in fact below the low watermark. If so, we need to
|
|
|
|
// post the notification messages to the peers that are waiting for
|
|
|
|
// more buffers to received data into
|
2016-05-01 00:54:23 +02:00
|
|
|
void disk_buffer_pool::check_buffer_level(std::unique_lock<std::mutex>& l)
|
2014-07-06 21:18:00 +02:00
|
|
|
{
|
2016-05-01 00:54:23 +02:00
|
|
|
TORRENT_ASSERT(l.owns_lock());
|
2014-07-06 21:18:00 +02:00
|
|
|
if (!m_exceeded_max_size || m_in_use > m_low_watermark) return;
|
|
|
|
|
|
|
|
m_exceeded_max_size = false;
|
|
|
|
|
2016-09-01 03:42:18 +02:00
|
|
|
std::vector<std::weak_ptr<disk_observer>> cbs;
|
2016-05-07 06:55:38 +02:00
|
|
|
m_observers.swap(cbs);
|
2014-07-06 21:18:00 +02:00
|
|
|
l.unlock();
|
2016-05-07 06:55:38 +02:00
|
|
|
m_ios.post(std::bind(&watermark_callback, std::move(cbs)));
|
2014-07-06 21:18:00 +02:00
|
|
|
}
|
|
|
|
|
2014-07-19 10:20:20 +02:00
|
|
|
#if TORRENT_USE_ASSERTS
|
2011-08-01 10:03:26 +02:00
|
|
|
bool disk_buffer_pool::is_disk_buffer(char* buffer
|
2016-05-01 00:54:23 +02:00
|
|
|
, std::unique_lock<std::mutex>& l) const
|
2011-08-01 10:03:26 +02:00
|
|
|
{
|
|
|
|
TORRENT_ASSERT(m_magic == 0x1337);
|
2016-05-01 00:54:23 +02:00
|
|
|
TORRENT_ASSERT(l.owns_lock());
|
2015-04-22 06:24:45 +02:00
|
|
|
TORRENT_UNUSED(l);
|
2014-07-06 21:18:00 +02:00
|
|
|
|
2016-07-02 01:46:59 +02:00
|
|
|
#if TORRENT_USE_INVARIANT_CHECKS
|
2014-07-06 21:18:00 +02:00
|
|
|
return m_buffers_in_use.count(buffer) == 1;
|
2015-04-21 06:30:34 +02:00
|
|
|
#elif defined TORRENT_DEBUG_BUFFERS
|
2018-03-22 17:01:38 +01:00
|
|
|
return page_in_use(buffer);
|
2013-03-21 02:18:39 +01:00
|
|
|
#else
|
2017-08-19 11:04:39 +02:00
|
|
|
TORRENT_UNUSED(buffer);
|
|
|
|
return true;
|
2013-03-21 02:18:39 +01:00
|
|
|
#endif
|
2011-08-01 10:03:26 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
bool disk_buffer_pool::is_disk_buffer(char* buffer) const
|
|
|
|
{
|
2016-05-01 00:54:23 +02:00
|
|
|
std::unique_lock<std::mutex> l(m_pool_mutex);
|
2011-08-01 10:03:26 +02:00
|
|
|
return is_disk_buffer(buffer, l);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
char* disk_buffer_pool::allocate_buffer(char const* category)
|
|
|
|
{
|
2016-05-01 00:54:23 +02:00
|
|
|
std::unique_lock<std::mutex> l(m_pool_mutex);
|
2014-07-06 21:18:00 +02:00
|
|
|
return allocate_buffer_impl(l, category);
|
|
|
|
}
|
|
|
|
|
2014-11-17 04:10:00 +01:00
|
|
|
// we allow allocating more blocks even after we exceed the max size,
|
|
|
|
// but communicate back to the allocator (typically the peer_connection)
|
|
|
|
// that we have exceeded the limit via the out-parameter "exceeded". The
|
|
|
|
// caller is expected to honor this by not allocating any more buffers
|
|
|
|
// until the disk_observer object (passed in as "o") is invoked, indicating
|
|
|
|
// that there's more room in the pool now. This caps the amount of over-
|
|
|
|
// allocation to one block per peer connection.
|
2014-07-06 21:18:00 +02:00
|
|
|
char* disk_buffer_pool::allocate_buffer(bool& exceeded
|
2016-09-01 03:42:18 +02:00
|
|
|
, std::shared_ptr<disk_observer> o, char const* category)
|
2014-07-06 21:18:00 +02:00
|
|
|
{
|
2016-05-01 00:54:23 +02:00
|
|
|
std::unique_lock<std::mutex> l(m_pool_mutex);
|
2014-07-06 21:18:00 +02:00
|
|
|
char* ret = allocate_buffer_impl(l, category);
|
|
|
|
if (m_exceeded_max_size)
|
|
|
|
{
|
|
|
|
exceeded = true;
|
|
|
|
if (o) m_observers.push_back(o);
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2015-02-15 00:20:45 +01:00
|
|
|
// this function allocates buffers and
|
|
|
|
// fills in the iovec array with the buffers
|
2017-01-11 06:42:10 +01:00
|
|
|
int disk_buffer_pool::allocate_iovec(span<iovec_t> iov)
|
2015-02-15 00:20:45 +01:00
|
|
|
{
|
2016-05-01 00:54:23 +02:00
|
|
|
std::unique_lock<std::mutex> l(m_pool_mutex);
|
2016-10-27 02:40:56 +02:00
|
|
|
for (auto& i : iov)
|
2015-02-15 00:20:45 +01:00
|
|
|
{
|
2018-01-03 12:54:03 +01:00
|
|
|
i = { allocate_buffer_impl(l, "pending read"), std::size_t(default_block_size)};
|
2017-04-29 06:27:55 +02:00
|
|
|
if (i.data() == nullptr)
|
2015-02-15 00:20:45 +01:00
|
|
|
{
|
|
|
|
// uh oh. We failed to allocate the buffer!
|
|
|
|
// we need to roll back and free all the buffers
|
|
|
|
// we've already allocated
|
2016-10-27 02:40:56 +02:00
|
|
|
for (auto j : iov)
|
2016-10-19 07:18:05 +02:00
|
|
|
{
|
2017-04-29 06:27:55 +02:00
|
|
|
if (j.data() == nullptr) break;
|
|
|
|
char* buf = j.data();
|
2016-10-19 07:18:05 +02:00
|
|
|
TORRENT_ASSERT(is_disk_buffer(buf, l));
|
|
|
|
free_buffer_impl(buf, l);
|
|
|
|
remove_buffer_in_use(buf);
|
|
|
|
}
|
2015-02-15 00:20:45 +01:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-01-11 06:42:10 +01:00
|
|
|
void disk_buffer_pool::free_iovec(span<iovec_t const> iov)
|
2015-02-15 00:20:45 +01:00
|
|
|
{
|
|
|
|
// TODO: perhaps we should sort the buffers here?
|
2016-05-01 00:54:23 +02:00
|
|
|
std::unique_lock<std::mutex> l(m_pool_mutex);
|
2016-10-27 02:40:56 +02:00
|
|
|
for (auto i : iov)
|
2016-10-19 07:18:05 +02:00
|
|
|
{
|
2017-04-29 06:27:55 +02:00
|
|
|
char* buf = i.data();
|
2016-10-19 07:18:05 +02:00
|
|
|
TORRENT_ASSERT(is_disk_buffer(buf, l));
|
|
|
|
free_buffer_impl(buf, l);
|
|
|
|
remove_buffer_in_use(buf);
|
|
|
|
}
|
2015-02-15 00:20:45 +01:00
|
|
|
check_buffer_level(l);
|
|
|
|
}
|
|
|
|
|
2016-05-01 00:54:23 +02:00
|
|
|
char* disk_buffer_pool::allocate_buffer_impl(std::unique_lock<std::mutex>& l
|
2015-04-19 08:28:21 +02:00
|
|
|
, char const*)
|
2014-07-06 21:18:00 +02:00
|
|
|
{
|
|
|
|
TORRENT_ASSERT(m_settings_set);
|
2011-08-01 10:03:26 +02:00
|
|
|
TORRENT_ASSERT(m_magic == 0x1337);
|
2016-05-01 00:54:23 +02:00
|
|
|
TORRENT_ASSERT(l.owns_lock());
|
2015-04-22 06:24:45 +02:00
|
|
|
TORRENT_UNUSED(l);
|
2014-07-06 21:18:00 +02:00
|
|
|
|
2018-03-22 17:01:38 +01:00
|
|
|
char* ret = page_malloc(default_block_size);
|
2016-12-26 01:07:31 +01:00
|
|
|
|
|
|
|
if (ret == nullptr)
|
|
|
|
{
|
|
|
|
m_exceeded_max_size = true;
|
|
|
|
m_trigger_cache_trim();
|
|
|
|
return nullptr;
|
2013-03-21 02:18:39 +01:00
|
|
|
}
|
2014-07-06 21:18:00 +02:00
|
|
|
|
2016-10-19 07:18:05 +02:00
|
|
|
++m_in_use;
|
|
|
|
|
2016-07-02 01:46:59 +02:00
|
|
|
#if TORRENT_USE_INVARIANT_CHECKS
|
2016-10-19 07:18:05 +02:00
|
|
|
try
|
|
|
|
{
|
|
|
|
TORRENT_ASSERT(m_buffers_in_use.count(ret) == 0);
|
|
|
|
m_buffers_in_use.insert(ret);
|
|
|
|
}
|
|
|
|
catch (...)
|
|
|
|
{
|
|
|
|
free_buffer_impl(ret, l);
|
|
|
|
return nullptr;
|
|
|
|
}
|
2013-03-21 02:18:39 +01:00
|
|
|
#endif
|
2014-07-06 21:18:00 +02:00
|
|
|
|
2015-02-15 00:20:45 +01:00
|
|
|
if (m_in_use >= m_low_watermark + (m_max_use - m_low_watermark)
|
|
|
|
/ 2 && !m_exceeded_max_size)
|
2014-07-06 21:18:00 +02:00
|
|
|
{
|
|
|
|
m_exceeded_max_size = true;
|
|
|
|
m_trigger_cache_trim();
|
|
|
|
}
|
2011-08-01 10:03:26 +02:00
|
|
|
|
2014-07-06 21:18:00 +02:00
|
|
|
TORRENT_ASSERT(is_disk_buffer(ret, l));
|
2011-08-01 10:03:26 +02:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2016-10-22 20:43:40 +02:00
|
|
|
void disk_buffer_pool::free_multiple_buffers(span<char*> bufvec)
|
2011-08-01 10:03:26 +02:00
|
|
|
{
|
|
|
|
// sort the pointers in order to maximize cache hits
|
2016-10-22 20:43:40 +02:00
|
|
|
std::sort(bufvec.begin(), bufvec.end());
|
2011-08-01 10:03:26 +02:00
|
|
|
|
2016-05-01 00:54:23 +02:00
|
|
|
std::unique_lock<std::mutex> l(m_pool_mutex);
|
2016-10-22 20:43:40 +02:00
|
|
|
for (char* buf : bufvec)
|
2011-08-01 10:03:26 +02:00
|
|
|
{
|
2016-10-19 07:18:05 +02:00
|
|
|
TORRENT_ASSERT(is_disk_buffer(buf, l));
|
2014-07-06 21:18:00 +02:00
|
|
|
free_buffer_impl(buf, l);
|
2016-10-19 07:18:05 +02:00
|
|
|
remove_buffer_in_use(buf);
|
2011-08-01 10:03:26 +02:00
|
|
|
}
|
2014-07-06 21:18:00 +02:00
|
|
|
|
|
|
|
check_buffer_level(l);
|
2011-08-01 10:03:26 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void disk_buffer_pool::free_buffer(char* buf)
|
|
|
|
{
|
2016-05-01 00:54:23 +02:00
|
|
|
std::unique_lock<std::mutex> l(m_pool_mutex);
|
2016-10-19 07:18:05 +02:00
|
|
|
TORRENT_ASSERT(is_disk_buffer(buf, l));
|
2011-08-01 10:03:26 +02:00
|
|
|
free_buffer_impl(buf, l);
|
2016-10-19 07:18:05 +02:00
|
|
|
remove_buffer_in_use(buf);
|
2014-07-06 21:18:00 +02:00
|
|
|
check_buffer_level(l);
|
|
|
|
}
|
|
|
|
|
2016-12-26 01:07:31 +01:00
|
|
|
void disk_buffer_pool::set_settings(aux::session_settings const& sett)
|
2014-07-06 21:18:00 +02:00
|
|
|
{
|
2016-05-01 00:54:23 +02:00
|
|
|
std::unique_lock<std::mutex> l(m_pool_mutex);
|
2014-07-06 21:18:00 +02:00
|
|
|
|
2016-12-26 01:07:31 +01:00
|
|
|
int const cache_size = sett.get_int(settings_pack::cache_size);
|
|
|
|
if (cache_size < 0)
|
2014-07-06 21:18:00 +02:00
|
|
|
{
|
2017-03-09 13:46:52 +01:00
|
|
|
std::int64_t phys_ram = total_physical_ram();
|
2016-12-26 01:07:31 +01:00
|
|
|
if (phys_ram == 0) m_max_use = 1024;
|
|
|
|
else
|
2014-07-06 21:18:00 +02:00
|
|
|
{
|
2016-12-26 01:07:31 +01:00
|
|
|
// this is the logic to calculate the automatic disk cache size
|
|
|
|
// based on the amount of physical RAM.
|
|
|
|
// The more physical RAM, the smaller portion of it is allocated
|
|
|
|
// for the cache.
|
|
|
|
|
2018-08-05 16:11:40 +02:00
|
|
|
// we take a 40th of everything exceeding 4 GiB
|
|
|
|
// a 30th of everything exceeding 1 GiB
|
2016-12-26 01:07:31 +01:00
|
|
|
// and a 10th of everything below a GiB
|
|
|
|
|
|
|
|
constexpr std::int64_t gb = 1024 * 1024 * 1024;
|
|
|
|
|
|
|
|
std::int64_t result = 0;
|
|
|
|
if (phys_ram > 4 * gb)
|
2016-03-06 07:42:46 +01:00
|
|
|
{
|
2018-08-05 16:11:40 +02:00
|
|
|
result += (phys_ram - 4 * gb) / 40;
|
2016-12-26 01:07:31 +01:00
|
|
|
phys_ram = 4 * gb;
|
2016-03-06 07:42:46 +01:00
|
|
|
}
|
2016-12-26 01:07:31 +01:00
|
|
|
if (phys_ram > 1 * gb)
|
|
|
|
{
|
2018-08-05 16:11:40 +02:00
|
|
|
result += (phys_ram - 1 * gb) / 30;
|
2016-12-26 01:07:31 +01:00
|
|
|
phys_ram = 1 * gb;
|
|
|
|
}
|
2018-08-05 16:11:40 +02:00
|
|
|
result += phys_ram / 20;
|
2018-01-03 12:54:03 +01:00
|
|
|
m_max_use = int(result / default_block_size);
|
2016-12-26 01:07:31 +01:00
|
|
|
}
|
2015-11-08 04:00:20 +01:00
|
|
|
|
2016-04-17 22:56:07 +02:00
|
|
|
#ifdef _MSC_VER
|
|
|
|
#pragma warning(push)
|
|
|
|
#pragma warning(disable : 4127 ) /* warning C4127: conditional expression is constant */
|
|
|
|
#endif // _MSC_VER
|
2016-12-26 01:07:31 +01:00
|
|
|
if (sizeof(void*) == 4)
|
2016-04-17 22:56:07 +02:00
|
|
|
#ifdef _MSC_VER
|
|
|
|
#pragma warning(pop)
|
|
|
|
#endif // _MSC_VER
|
2014-07-06 21:18:00 +02:00
|
|
|
{
|
2016-12-26 01:07:31 +01:00
|
|
|
// 32 bit builds should capped below 2 GB of memory, even
|
|
|
|
// when more actual ram is available, because we're still
|
|
|
|
// constrained by the 32 bit virtual address space.
|
|
|
|
m_max_use = std::min(2 * 1024 * 1024 * 3 / 4 * 1024
|
2018-01-03 12:54:03 +01:00
|
|
|
/ default_block_size, m_max_use);
|
2014-07-06 21:18:00 +02:00
|
|
|
}
|
|
|
|
}
|
2016-12-26 01:07:31 +01:00
|
|
|
else
|
2014-07-06 21:18:00 +02:00
|
|
|
{
|
2016-12-26 01:07:31 +01:00
|
|
|
m_max_use = cache_size;
|
2014-07-06 21:18:00 +02:00
|
|
|
}
|
2016-12-26 01:07:31 +01:00
|
|
|
m_low_watermark = m_max_use - std::max(16, sett.get_int(settings_pack::max_queued_disk_bytes) / 0x4000);
|
|
|
|
if (m_low_watermark < 0) m_low_watermark = 0;
|
|
|
|
if (m_in_use >= m_max_use && !m_exceeded_max_size)
|
2014-07-06 21:18:00 +02:00
|
|
|
{
|
2016-12-26 01:07:31 +01:00
|
|
|
m_exceeded_max_size = true;
|
|
|
|
m_trigger_cache_trim();
|
2014-07-06 21:18:00 +02:00
|
|
|
}
|
2016-12-26 01:07:31 +01:00
|
|
|
|
|
|
|
#if TORRENT_USE_ASSERTS
|
|
|
|
m_settings_set = true;
|
|
|
|
#endif
|
2011-08-01 10:03:26 +02:00
|
|
|
}
|
|
|
|
|
2016-10-19 07:18:05 +02:00
|
|
|
void disk_buffer_pool::remove_buffer_in_use(char* buf)
|
|
|
|
{
|
|
|
|
TORRENT_UNUSED(buf);
|
|
|
|
#if TORRENT_USE_INVARIANT_CHECKS
|
|
|
|
std::set<char*>::iterator i = m_buffers_in_use.find(buf);
|
|
|
|
TORRENT_ASSERT(i != m_buffers_in_use.end());
|
|
|
|
m_buffers_in_use.erase(i);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2016-05-01 00:54:23 +02:00
|
|
|
void disk_buffer_pool::free_buffer_impl(char* buf, std::unique_lock<std::mutex>& l)
|
2011-08-01 10:03:26 +02:00
|
|
|
{
|
|
|
|
TORRENT_ASSERT(buf);
|
|
|
|
TORRENT_ASSERT(m_magic == 0x1337);
|
2014-07-06 21:18:00 +02:00
|
|
|
TORRENT_ASSERT(m_settings_set);
|
2016-05-01 00:54:23 +02:00
|
|
|
TORRENT_ASSERT(l.owns_lock());
|
2015-04-22 06:24:45 +02:00
|
|
|
TORRENT_UNUSED(l);
|
2014-07-06 21:18:00 +02:00
|
|
|
|
2018-03-22 17:01:38 +01:00
|
|
|
page_free(buf);
|
2014-07-06 21:18:00 +02:00
|
|
|
|
2011-08-01 10:03:26 +02:00
|
|
|
--m_in_use;
|
|
|
|
}
|
2014-07-06 21:18:00 +02:00
|
|
|
|
2011-08-01 10:03:26 +02:00
|
|
|
}
|