2007-06-10 22:46:09 +02:00
|
|
|
/*
|
|
|
|
|
2018-04-09 09:04:33 +02:00
|
|
|
Copyright (c) 2007-2018, Arvid Norberg, Steven Siloti
|
2007-06-10 22:46:09 +02:00
|
|
|
All rights reserved.
|
|
|
|
|
|
|
|
Redistribution and use in source and binary forms, with or without
|
|
|
|
modification, are permitted provided that the following conditions
|
|
|
|
are met:
|
|
|
|
|
|
|
|
* Redistributions of source code must retain the above copyright
|
|
|
|
notice, this list of conditions and the following disclaimer.
|
|
|
|
* Redistributions in binary form must reproduce the above copyright
|
|
|
|
notice, this list of conditions and the following disclaimer in
|
|
|
|
the documentation and/or other materials provided with the distribution.
|
|
|
|
* Neither the name of the author nor the names of its
|
|
|
|
contributors may be used to endorse or promote products derived
|
|
|
|
from this software without specific prior written permission.
|
|
|
|
|
|
|
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
|
|
|
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
|
|
|
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
|
|
|
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
|
|
|
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
|
|
|
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
|
|
|
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
|
|
|
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
|
|
|
POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
|
|
|
|
*/
|
|
|
|
|
2015-08-16 18:17:23 +02:00
|
|
|
#include "libtorrent/config.hpp"
|
2007-06-10 22:46:09 +02:00
|
|
|
#include "libtorrent/storage.hpp"
|
|
|
|
#include "libtorrent/disk_io_thread.hpp"
|
2008-04-13 00:08:07 +02:00
|
|
|
#include "libtorrent/disk_buffer_holder.hpp"
|
2017-01-27 18:43:34 +01:00
|
|
|
#include "libtorrent/aux_/alloca.hpp"
|
2017-01-29 21:37:42 +01:00
|
|
|
#include "libtorrent/aux_/throw.hpp"
|
2009-02-03 08:46:24 +01:00
|
|
|
#include "libtorrent/error_code.hpp"
|
2009-11-23 09:38:50 +01:00
|
|
|
#include "libtorrent/error.hpp"
|
2010-01-23 04:02:32 +01:00
|
|
|
#include "libtorrent/file_pool.hpp"
|
2015-09-18 06:23:45 +02:00
|
|
|
#include "libtorrent/torrent_info.hpp"
|
2015-12-02 06:45:34 +01:00
|
|
|
#include "libtorrent/platform_util.hpp"
|
2007-09-17 10:15:54 +02:00
|
|
|
#include "libtorrent/time.hpp"
|
2014-07-06 21:18:00 +02:00
|
|
|
#include "libtorrent/disk_buffer_pool.hpp"
|
|
|
|
#include "libtorrent/disk_io_job.hpp"
|
|
|
|
#include "libtorrent/alert_types.hpp"
|
|
|
|
#include "libtorrent/performance_counters.hpp"
|
2015-04-03 22:15:48 +02:00
|
|
|
#include "libtorrent/alert_manager.hpp"
|
2014-07-06 21:18:00 +02:00
|
|
|
#include "libtorrent/debug.hpp"
|
2016-12-22 16:42:33 +01:00
|
|
|
#include "libtorrent/units.hpp"
|
2017-01-06 07:39:01 +01:00
|
|
|
#include "libtorrent/hasher.hpp"
|
2017-02-05 04:05:53 +01:00
|
|
|
#include "libtorrent/aux_/array.hpp"
|
2019-03-26 12:29:04 +01:00
|
|
|
#include "libtorrent/aux_/scope_end.hpp"
|
2010-03-10 08:14:10 +01:00
|
|
|
|
2016-09-01 21:04:58 +02:00
|
|
|
#include <functional>
|
|
|
|
|
2017-09-28 10:11:20 +02:00
|
|
|
#include "libtorrent/aux_/disable_warnings_push.hpp"
|
2017-04-16 22:37:39 +02:00
|
|
|
#include <boost/variant/get.hpp>
|
2017-09-28 10:11:20 +02:00
|
|
|
#include "libtorrent/aux_/disable_warnings_pop.hpp"
|
2017-04-16 22:37:39 +02:00
|
|
|
|
2014-07-06 21:18:00 +02:00
|
|
|
#define DEBUG_DISK_THREAD 0
|
|
|
|
|
2019-03-24 17:30:22 +01:00
|
|
|
namespace libtorrent {
|
|
|
|
char const* job_name(job_action_t const job);
|
|
|
|
}
|
|
|
|
|
2015-08-02 21:55:05 +02:00
|
|
|
#if DEBUG_DISK_THREAD
|
2018-04-23 11:17:47 +02:00
|
|
|
#include <cstdarg> // for va_list
|
2016-10-26 02:46:23 +02:00
|
|
|
#include <sstream>
|
2016-11-26 09:09:01 +01:00
|
|
|
#include <cstdio> // for vsnprintf
|
2018-04-28 03:25:36 +02:00
|
|
|
|
2015-08-02 21:55:05 +02:00
|
|
|
#define DLOG(...) debug_log(__VA_ARGS__)
|
|
|
|
#else
|
|
|
|
#define DLOG(...) do {} while(false)
|
|
|
|
#endif
|
|
|
|
|
2017-04-12 19:00:57 +02:00
|
|
|
namespace libtorrent {
|
2014-07-06 21:18:00 +02:00
|
|
|
|
|
|
|
#if TORRENT_USE_ASSERTS
|
2015-04-20 06:52:49 +02:00
|
|
|
|
2014-07-06 21:18:00 +02:00
|
|
|
#define TORRENT_PIECE_ASSERT(cond, piece) \
|
2019-02-26 15:00:40 +01:00
|
|
|
do { if (!(cond)) { assert_print_piece(piece); assert_fail(#cond, __LINE__, __FILE__, __func__, nullptr); } } TORRENT_WHILE_0
|
2014-07-06 21:18:00 +02:00
|
|
|
|
2016-05-02 18:36:21 +02:00
|
|
|
#define TORRENT_PIECE_ASSERT_FAIL(piece) \
|
2019-02-26 15:00:40 +01:00
|
|
|
do { assert_print_piece(piece); assert_fail("<unconditional>", __LINE__, __FILE__, __func__, nullptr); } TORRENT_WHILE_0
|
2016-05-02 18:36:21 +02:00
|
|
|
|
2014-07-06 21:18:00 +02:00
|
|
|
#else
|
2015-04-27 04:21:12 +02:00
|
|
|
#define TORRENT_PIECE_ASSERT(cond, piece) do {} TORRENT_WHILE_0
|
2016-05-02 18:36:21 +02:00
|
|
|
#define TORRENT_PIECE_ASSERT_FAIL(piece) do {} TORRENT_WHILE_0
|
2015-08-18 15:33:00 +02:00
|
|
|
#endif // TORRENT_USE_ASSERTS
|
2014-07-06 21:18:00 +02:00
|
|
|
|
2016-05-15 19:28:22 +02:00
|
|
|
|
2015-04-21 02:23:00 +02:00
|
|
|
namespace {
|
|
|
|
|
2016-05-15 19:28:22 +02:00
|
|
|
#if DEBUG_DISK_THREAD
|
2015-09-02 07:30:40 +02:00
|
|
|
|
2014-07-06 21:18:00 +02:00
|
|
|
void debug_log(char const* fmt, ...)
|
|
|
|
{
|
2016-05-01 00:54:23 +02:00
|
|
|
static std::mutex log_mutex;
|
2015-06-14 22:00:04 +02:00
|
|
|
static const time_point start = clock_type::now();
|
2019-03-24 17:30:22 +01:00
|
|
|
// map thread IDs to low numbers
|
|
|
|
static std::unordered_map<std::thread::id, int> thread_ids;
|
|
|
|
|
|
|
|
std::thread::id const self = std::this_thread::get_id();
|
|
|
|
|
|
|
|
std::unique_lock<std::mutex> l(log_mutex);
|
|
|
|
auto it = thread_ids.insert({self, int(thread_ids.size())}).first;
|
|
|
|
|
2015-08-18 15:33:00 +02:00
|
|
|
va_list v;
|
2014-07-06 21:18:00 +02:00
|
|
|
va_start(v, fmt);
|
|
|
|
|
|
|
|
char usr[2048];
|
2016-11-26 09:09:01 +01:00
|
|
|
int len = std::vsnprintf(usr, sizeof(usr), fmt, v);
|
2014-07-06 21:18:00 +02:00
|
|
|
|
|
|
|
static bool prepend_time = true;
|
|
|
|
if (!prepend_time)
|
|
|
|
{
|
|
|
|
prepend_time = (usr[len-1] == '\n');
|
|
|
|
fputs(usr, stderr);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
va_end(v);
|
|
|
|
char buf[2300];
|
2016-12-22 16:42:33 +01:00
|
|
|
int const t = int(total_milliseconds(clock_type::now() - start));
|
2019-03-24 17:30:22 +01:00
|
|
|
std::snprintf(buf, sizeof(buf), "\x1b[3%dm%05d: [%d] %s\x1b[0m"
|
|
|
|
, (it->second % 7) + 1, t, it->second, usr);
|
2014-07-06 21:18:00 +02:00
|
|
|
prepend_time = (usr[len-1] == '\n');
|
|
|
|
fputs(buf, stderr);
|
|
|
|
}
|
|
|
|
|
2016-05-15 19:28:22 +02:00
|
|
|
#endif // DEBUG_DISK_THREAD
|
|
|
|
|
2017-05-26 20:49:21 +02:00
|
|
|
open_mode_t file_flags_for_job(disk_io_job* j
|
2016-03-20 16:38:55 +01:00
|
|
|
, bool const coalesce_buffers)
|
2014-07-06 21:18:00 +02:00
|
|
|
{
|
2017-07-15 02:59:20 +02:00
|
|
|
open_mode_t ret = open_mode_t{};
|
|
|
|
if (!(j->flags & disk_interface::sequential_access)) ret |= open_mode::random_access;
|
|
|
|
if (coalesce_buffers) ret |= open_mode::coalesce_buffers;
|
2014-07-06 21:18:00 +02:00
|
|
|
return ret;
|
|
|
|
}
|
2010-01-12 02:56:48 +01:00
|
|
|
|
2016-11-26 07:51:47 +01:00
|
|
|
// the do_* functions can return this to indicate the disk
|
|
|
|
// job did not complete immediately, and shouldn't be posted yet
|
|
|
|
constexpr status_t defer_handler = static_cast<status_t>(200);
|
|
|
|
|
|
|
|
// the job cannot be completed right now, put it back in the
|
|
|
|
// queue and try again later
|
|
|
|
constexpr status_t retry_job = static_cast<status_t>(201);
|
|
|
|
|
|
|
|
|
2016-10-19 07:18:05 +02:00
|
|
|
struct piece_refcount_holder
|
|
|
|
{
|
|
|
|
explicit piece_refcount_holder(cached_piece_entry* p) : m_pe(p)
|
|
|
|
{ ++m_pe->piece_refcount; }
|
|
|
|
~piece_refcount_holder()
|
|
|
|
{
|
|
|
|
if (!m_executed)
|
|
|
|
{
|
|
|
|
TORRENT_PIECE_ASSERT(m_pe->piece_refcount > 0, m_pe);
|
|
|
|
--m_pe->piece_refcount;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
piece_refcount_holder(piece_refcount_holder const&) = delete;
|
|
|
|
piece_refcount_holder& operator=(piece_refcount_holder const&) = delete;
|
|
|
|
void release()
|
|
|
|
{
|
|
|
|
TORRENT_ASSERT(!m_executed);
|
|
|
|
m_executed = true;
|
|
|
|
TORRENT_PIECE_ASSERT(m_pe->piece_refcount > 0, m_pe);
|
|
|
|
--m_pe->piece_refcount;
|
|
|
|
}
|
|
|
|
private:
|
|
|
|
cached_piece_entry* m_pe;
|
|
|
|
bool m_executed = false;
|
|
|
|
};
|
|
|
|
|
|
|
|
template <typename Lock>
|
|
|
|
struct scoped_unlocker_impl
|
|
|
|
{
|
|
|
|
explicit scoped_unlocker_impl(Lock& l) : m_lock(&l) { m_lock->unlock(); }
|
|
|
|
~scoped_unlocker_impl() { if (m_lock) m_lock->lock(); }
|
2018-06-07 02:35:00 +02:00
|
|
|
scoped_unlocker_impl(scoped_unlocker_impl&& rhs) noexcept : m_lock(rhs.m_lock)
|
2016-10-19 07:18:05 +02:00
|
|
|
{ rhs.m_lock = nullptr; }
|
2018-06-07 02:35:00 +02:00
|
|
|
scoped_unlocker_impl& operator=(scoped_unlocker_impl&& rhs) noexcept
|
2016-10-19 07:18:05 +02:00
|
|
|
{
|
2017-09-12 23:10:11 +02:00
|
|
|
if (&rhs == this) return *this;
|
2016-10-19 07:18:05 +02:00
|
|
|
if (m_lock) m_lock->lock();
|
|
|
|
m_lock = rhs.m_lock;
|
|
|
|
rhs.m_lock = nullptr;
|
2016-12-27 07:45:48 +01:00
|
|
|
return *this;
|
2016-10-19 07:18:05 +02:00
|
|
|
}
|
|
|
|
private:
|
|
|
|
Lock* m_lock;
|
|
|
|
};
|
|
|
|
|
|
|
|
template <typename Lock>
|
|
|
|
scoped_unlocker_impl<Lock> scoped_unlock(Lock& l)
|
|
|
|
{ return scoped_unlocker_impl<Lock>(l); }
|
|
|
|
|
2015-04-21 02:23:00 +02:00
|
|
|
} // anonymous namespace
|
|
|
|
|
2017-09-09 19:43:54 +02:00
|
|
|
constexpr disk_job_flags_t disk_interface::force_copy;
|
|
|
|
constexpr disk_job_flags_t disk_interface::sequential_access;
|
|
|
|
constexpr disk_job_flags_t disk_interface::volatile_read;
|
|
|
|
constexpr disk_job_flags_t disk_interface::cache_hit;
|
|
|
|
|
2009-01-21 08:31:49 +01:00
|
|
|
// ------- disk_io_thread ------
|
|
|
|
|
2019-08-29 14:50:51 +02:00
|
|
|
disk_io_thread::disk_io_thread(io_service& ios, aux::session_settings const& sett, counters& cnt)
|
2017-01-09 01:22:59 +01:00
|
|
|
: m_generic_io_jobs(*this)
|
2016-06-16 02:49:28 +02:00
|
|
|
, m_generic_threads(m_generic_io_jobs, ios)
|
2017-01-09 01:22:59 +01:00
|
|
|
, m_hash_io_jobs(*this)
|
2016-06-16 02:49:28 +02:00
|
|
|
, m_hash_threads(m_hash_io_jobs, ios)
|
2019-08-29 14:50:51 +02:00
|
|
|
, m_settings(sett)
|
2018-01-03 12:54:03 +01:00
|
|
|
, m_disk_cache(ios, std::bind(&disk_io_thread::trigger_cache_trim, this))
|
2014-08-01 08:07:48 +02:00
|
|
|
, m_stats_counters(cnt)
|
2009-01-21 08:31:49 +01:00
|
|
|
, m_ios(ios)
|
|
|
|
{
|
2019-08-29 14:50:51 +02:00
|
|
|
settings_updated();
|
2007-09-17 10:15:54 +02:00
|
|
|
}
|
2007-06-10 22:46:09 +02:00
|
|
|
|
2016-12-31 18:35:10 +01:00
|
|
|
storage_interface* disk_io_thread::get_torrent(storage_index_t const storage)
|
|
|
|
{
|
2017-04-22 23:30:31 +02:00
|
|
|
TORRENT_ASSERT(m_magic == 0x1337);
|
2016-12-31 18:35:10 +01:00
|
|
|
return m_torrents[storage].get();
|
|
|
|
}
|
|
|
|
|
2017-04-09 00:24:50 +02:00
|
|
|
std::vector<open_file_state> disk_io_thread::get_status(storage_index_t const st) const
|
|
|
|
{
|
|
|
|
return m_file_pool.get_status(st);
|
|
|
|
}
|
|
|
|
|
2017-04-07 00:11:24 +02:00
|
|
|
storage_holder disk_io_thread::new_torrent(storage_constructor_type sc
|
|
|
|
, storage_params p, std::shared_ptr<void> const& owner)
|
2016-12-31 18:35:10 +01:00
|
|
|
{
|
2017-04-07 00:11:24 +02:00
|
|
|
std::unique_ptr<storage_interface> storage(sc(p, m_file_pool));
|
|
|
|
storage->set_owner(owner);
|
|
|
|
|
2016-12-31 18:35:10 +01:00
|
|
|
TORRENT_ASSERT(storage);
|
2017-03-23 13:18:04 +01:00
|
|
|
if (m_free_slots.empty())
|
|
|
|
{
|
2017-12-28 21:41:46 +01:00
|
|
|
// make sure there's always space in here to add another free slot.
|
|
|
|
// stopping a torrent should never fail because it needs to allocate memory
|
|
|
|
m_free_slots.reserve(m_torrents.size() + 1);
|
2017-03-23 13:18:04 +01:00
|
|
|
storage_index_t const idx = m_torrents.end_index();
|
|
|
|
m_torrents.emplace_back(std::move(storage));
|
|
|
|
m_torrents.back()->set_storage_index(idx);
|
|
|
|
return storage_holder(idx, *this);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
storage_index_t const idx = m_free_slots.back();
|
|
|
|
m_free_slots.pop_back();
|
|
|
|
(m_torrents[idx] = std::move(storage))->set_storage_index(idx);
|
|
|
|
return storage_holder(idx, *this);
|
|
|
|
}
|
2016-12-31 18:35:10 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void disk_io_thread::remove_torrent(storage_index_t const idx)
|
|
|
|
{
|
2017-01-02 08:00:20 +01:00
|
|
|
auto& pos = m_torrents[idx];
|
2017-03-23 13:18:04 +01:00
|
|
|
if (pos->dec_refcount() == 0)
|
|
|
|
{
|
|
|
|
pos.reset();
|
|
|
|
m_free_slots.push_back(idx);
|
|
|
|
}
|
2016-12-31 18:35:10 +01:00
|
|
|
}
|
|
|
|
|
2017-12-28 21:41:46 +01:00
|
|
|
#if TORRENT_USE_ASSERTS
|
2007-06-10 22:46:09 +02:00
|
|
|
disk_io_thread::~disk_io_thread()
|
|
|
|
{
|
2014-07-06 21:18:00 +02:00
|
|
|
DLOG("destructing disk_io_thread\n");
|
|
|
|
|
2014-02-03 02:55:26 +01:00
|
|
|
TORRENT_ASSERT(m_magic == 0x1337);
|
|
|
|
m_magic = 0xdead;
|
2019-03-25 01:41:52 +01:00
|
|
|
TORRENT_ASSERT(m_generic_io_jobs.m_queued_jobs.empty());
|
|
|
|
TORRENT_ASSERT(m_hash_io_jobs.m_queued_jobs.empty());
|
2007-06-10 22:46:09 +02:00
|
|
|
}
|
2017-12-28 21:41:46 +01:00
|
|
|
#endif
|
2007-06-10 22:46:09 +02:00
|
|
|
|
2017-11-20 18:10:12 +01:00
|
|
|
void disk_io_thread::abort(bool const wait)
|
2015-06-14 22:00:04 +02:00
|
|
|
{
|
2019-03-24 17:30:22 +01:00
|
|
|
DLOG("disk_io_thread::abort: (%d)\n", int(wait));
|
|
|
|
|
2019-03-25 01:41:52 +01:00
|
|
|
// first make sure queued jobs have been submitted
|
|
|
|
// otherwise the queue may not get processed
|
|
|
|
submit_jobs();
|
|
|
|
|
2016-06-18 22:53:23 +02:00
|
|
|
// abuse the job mutex to make setting m_abort and checking the thread count atomic
|
|
|
|
// see also the comment in thread_fun
|
|
|
|
std::unique_lock<std::mutex> l(m_job_mutex);
|
|
|
|
if (m_abort.exchange(true)) return;
|
2019-03-25 01:41:52 +01:00
|
|
|
bool const no_threads = m_generic_threads.num_threads() == 0
|
|
|
|
&& m_hash_threads.num_threads() == 0;
|
2017-10-22 14:03:49 +02:00
|
|
|
// abort outstanding jobs belonging to this torrent
|
|
|
|
|
2019-03-24 17:30:22 +01:00
|
|
|
DLOG("aborting hash jobs\n");
|
2017-10-22 14:03:49 +02:00
|
|
|
for (auto i = m_hash_io_jobs.m_queued_jobs.iterate(); i.get(); i.next())
|
|
|
|
i.get()->flags |= disk_io_job::aborted;
|
2016-06-18 22:53:23 +02:00
|
|
|
l.unlock();
|
|
|
|
|
2017-10-22 14:03:49 +02:00
|
|
|
// if there are no disk threads, we can't wait for the jobs here, because
|
|
|
|
// we'd stall indefinitely
|
2016-06-18 22:53:23 +02:00
|
|
|
if (no_threads)
|
2015-06-14 22:00:04 +02:00
|
|
|
{
|
|
|
|
abort_jobs();
|
|
|
|
}
|
2016-06-18 22:53:23 +02:00
|
|
|
|
2019-03-24 17:30:22 +01:00
|
|
|
DLOG("aborting thread pools\n");
|
2016-06-18 22:53:23 +02:00
|
|
|
// even if there are no threads it doesn't hurt to abort the pools
|
|
|
|
// it prevents threads from being started after an abort which is a good
|
|
|
|
// defensive programming measure
|
|
|
|
m_generic_threads.abort(wait);
|
|
|
|
m_hash_threads.abort(wait);
|
2015-06-14 22:00:04 +02:00
|
|
|
}
|
|
|
|
|
2016-11-18 06:25:27 +01:00
|
|
|
void disk_io_thread::reclaim_blocks(span<aux::block_cache_reference> refs)
|
2010-07-14 06:16:38 +02:00
|
|
|
{
|
2014-02-03 02:55:26 +01:00
|
|
|
TORRENT_ASSERT(m_magic == 0x1337);
|
|
|
|
|
2016-05-01 00:54:23 +02:00
|
|
|
std::unique_lock<std::mutex> l(m_cache_mutex);
|
2016-11-01 04:48:30 +01:00
|
|
|
for (auto ref : refs)
|
|
|
|
{
|
2017-01-02 08:00:20 +01:00
|
|
|
auto& pos = m_torrents[ref.storage];
|
|
|
|
storage_interface* st = pos.get();
|
2017-04-21 07:21:31 +02:00
|
|
|
TORRENT_ASSERT(st != nullptr);
|
2017-01-02 08:00:20 +01:00
|
|
|
m_disk_cache.reclaim_block(st, ref);
|
2017-04-21 07:21:31 +02:00
|
|
|
if (st->dec_refcount() == 0)
|
2014-07-06 21:18:00 +02:00
|
|
|
{
|
2017-04-21 07:21:31 +02:00
|
|
|
pos.reset();
|
|
|
|
m_free_slots.push_back(ref.storage);
|
2014-07-06 21:18:00 +02:00
|
|
|
}
|
2016-11-01 04:48:30 +01:00
|
|
|
}
|
2011-02-13 23:27:02 +01:00
|
|
|
}
|
|
|
|
|
2019-08-29 14:50:51 +02:00
|
|
|
void disk_io_thread::settings_updated()
|
2011-06-26 21:45:33 +02:00
|
|
|
{
|
2014-02-03 02:55:26 +01:00
|
|
|
TORRENT_ASSERT(m_magic == 0x1337);
|
2016-05-01 00:54:23 +02:00
|
|
|
std::unique_lock<std::mutex> l(m_cache_mutex);
|
2016-12-26 01:07:31 +01:00
|
|
|
m_disk_cache.set_settings(m_settings);
|
2017-03-08 02:17:35 +01:00
|
|
|
m_file_pool.resize(m_settings.get_int(settings_pack::file_pool_size));
|
2016-11-06 19:39:09 +01:00
|
|
|
|
|
|
|
int const num_threads = m_settings.get_int(settings_pack::aio_threads);
|
|
|
|
// add one hasher thread for every three generic threads
|
2018-06-23 23:19:53 +02:00
|
|
|
int const num_hash_threads = num_threads / hasher_thread_divisor;
|
2019-03-24 17:30:22 +01:00
|
|
|
|
|
|
|
DLOG("set_max_threads(%d, %d)\n", num_threads - num_hash_threads
|
|
|
|
, num_hash_threads);
|
2016-11-06 19:39:09 +01:00
|
|
|
m_generic_threads.set_max_threads(num_threads - num_hash_threads);
|
|
|
|
m_hash_threads.set_max_threads(num_hash_threads);
|
2011-06-26 21:45:33 +02:00
|
|
|
}
|
|
|
|
|
2014-07-06 21:18:00 +02:00
|
|
|
// flush all blocks that are below p->hash.offset, since we've
|
|
|
|
// already hashed those blocks, they won't cause any read-back
|
2016-11-15 18:03:11 +01:00
|
|
|
int disk_io_thread::try_flush_hashed(cached_piece_entry* p, int const cont_block
|
2016-05-01 00:54:23 +02:00
|
|
|
, jobqueue_t& completed_jobs, std::unique_lock<std::mutex>& l)
|
2008-02-08 11:22:05 +01:00
|
|
|
{
|
2014-02-03 02:55:26 +01:00
|
|
|
TORRENT_ASSERT(m_magic == 0x1337);
|
2016-05-01 00:54:23 +02:00
|
|
|
TORRENT_ASSERT(l.owns_lock());
|
2014-07-06 21:18:00 +02:00
|
|
|
TORRENT_ASSERT(cont_block > 0);
|
2016-07-09 22:26:26 +02:00
|
|
|
if (p->hash == nullptr && !p->hashing_done)
|
2014-07-06 21:18:00 +02:00
|
|
|
{
|
|
|
|
DLOG("try_flush_hashed: (%d) no hash\n", int(p->piece));
|
|
|
|
return 0;
|
|
|
|
}
|
2014-02-03 02:55:26 +01:00
|
|
|
|
2014-07-06 21:18:00 +02:00
|
|
|
if (p->num_dirty == 0)
|
|
|
|
{
|
|
|
|
DLOG("try_flush_hashed: no dirty blocks\n");
|
|
|
|
return 0;
|
2008-02-08 11:22:05 +01:00
|
|
|
}
|
2010-03-03 08:09:04 +01:00
|
|
|
|
2014-07-06 21:18:00 +02:00
|
|
|
// end is one past the end
|
|
|
|
// round offset up to include the last block, which might
|
|
|
|
// have an odd size
|
2018-01-03 12:54:03 +01:00
|
|
|
int end = p->hashing_done ? int(p->blocks_in_piece) : (p->hash->offset + default_block_size - 1) / default_block_size;
|
2010-03-03 08:09:04 +01:00
|
|
|
|
2014-07-06 21:18:00 +02:00
|
|
|
// nothing has been hashed yet, don't flush anything
|
|
|
|
if (end == 0 && !p->need_readback) return 0;
|
2010-03-03 08:09:04 +01:00
|
|
|
|
2014-07-06 21:18:00 +02:00
|
|
|
// the number of contiguous blocks we need to be allowed to flush
|
2017-01-06 07:39:01 +01:00
|
|
|
int block_limit = std::min(cont_block, int(p->blocks_in_piece));
|
2008-02-08 11:22:05 +01:00
|
|
|
|
2014-07-06 21:18:00 +02:00
|
|
|
// if everything has been hashed, we might as well flush everything
|
|
|
|
// regardless of the contiguous block restriction
|
|
|
|
if (end == int(p->blocks_in_piece)) block_limit = 1;
|
|
|
|
|
|
|
|
if (p->need_readback)
|
|
|
|
{
|
|
|
|
// if this piece needs a read-back already, don't
|
|
|
|
// try to keep it from being flushed, since we'll
|
|
|
|
// need to read it back regardless. Flushing will
|
|
|
|
// save blocks that can be used to "save" other
|
2016-07-31 03:53:11 +02:00
|
|
|
// pieces from being flushed prematurely
|
2014-07-06 21:18:00 +02:00
|
|
|
end = int(p->blocks_in_piece);
|
|
|
|
}
|
|
|
|
|
2016-10-25 05:48:57 +02:00
|
|
|
TORRENT_ASSERT(end <= p->blocks_in_piece);
|
|
|
|
|
2014-07-06 21:18:00 +02:00
|
|
|
// count number of blocks that would be flushed
|
|
|
|
int num_blocks = 0;
|
2016-10-28 18:28:27 +02:00
|
|
|
for (int i = end - 1; i >= 0; --i)
|
2014-07-06 21:18:00 +02:00
|
|
|
num_blocks += (p->blocks[i].dirty && !p->blocks[i].pending);
|
|
|
|
|
|
|
|
// we did not satisfy the block_limit requirement
|
|
|
|
// i.e. too few blocks would be flushed at this point, put it off
|
|
|
|
if (block_limit > num_blocks) return 0;
|
|
|
|
|
|
|
|
// if the cache line size is larger than a whole piece, hold
|
|
|
|
// off flushing this piece until enough adjacent pieces are
|
|
|
|
// full as well.
|
2016-12-13 16:30:36 +01:00
|
|
|
int cont_pieces = int(cont_block / p->blocks_in_piece);
|
2014-07-06 21:18:00 +02:00
|
|
|
|
|
|
|
// at this point, we may enforce flushing full cache stripes even when
|
|
|
|
// they span multiple pieces. This won't necessarily work in the general
|
|
|
|
// case, because it assumes that the piece picker will have an affinity
|
|
|
|
// to download whole stripes at a time. This is why this setting is turned
|
|
|
|
// off by default, flushing only one piece at a time
|
|
|
|
|
|
|
|
if (cont_pieces <= 1 || m_settings.get_bool(settings_pack::allow_partial_disk_writes))
|
|
|
|
{
|
|
|
|
DLOG("try_flush_hashed: (%d) blocks_in_piece: %d end: %d\n"
|
|
|
|
, int(p->piece), int(p->blocks_in_piece), end);
|
|
|
|
|
2015-05-18 07:32:18 +02:00
|
|
|
return flush_range(p, 0, end, completed_jobs, l);
|
2014-07-06 21:18:00 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// piece range
|
2016-12-22 16:42:33 +01:00
|
|
|
piece_index_t const range_start((static_cast<int>(p->piece) / cont_pieces) * cont_pieces);
|
|
|
|
piece_index_t const range_end(std::min(static_cast<int>(range_start)
|
2017-04-07 15:15:54 +02:00
|
|
|
+ cont_pieces, p->storage->files().num_pieces()));
|
2014-07-06 21:18:00 +02:00
|
|
|
|
|
|
|
// look through all the pieces in this range to see if
|
|
|
|
// they are ready to be flushed. If so, flush them all,
|
|
|
|
// otherwise, hold off
|
|
|
|
bool range_full = true;
|
2015-12-02 06:45:34 +01:00
|
|
|
|
2016-06-20 17:32:06 +02:00
|
|
|
cached_piece_entry* first_piece = nullptr;
|
2014-07-06 21:18:00 +02:00
|
|
|
DLOG("try_flush_hashed: multi-piece: ");
|
2016-12-22 16:42:33 +01:00
|
|
|
for (piece_index_t i = range_start; i != range_end; ++i)
|
2007-06-10 22:46:09 +02:00
|
|
|
{
|
2014-07-06 21:18:00 +02:00
|
|
|
if (i == p->piece)
|
2007-06-10 22:46:09 +02:00
|
|
|
{
|
2014-07-06 21:18:00 +02:00
|
|
|
if (i == range_start) first_piece = p;
|
2016-12-22 16:42:33 +01:00
|
|
|
DLOG("[%d self] ", static_cast<int>(i));
|
2007-06-10 22:46:09 +02:00
|
|
|
continue;
|
|
|
|
}
|
2014-07-06 21:18:00 +02:00
|
|
|
cached_piece_entry* pe = m_disk_cache.find_piece(p->storage.get(), i);
|
2016-06-20 17:32:06 +02:00
|
|
|
if (pe == nullptr)
|
2008-06-09 06:46:34 +02:00
|
|
|
{
|
2016-12-22 16:42:33 +01:00
|
|
|
DLOG("[%d nullptr] ", static_cast<int>(i));
|
2014-07-06 21:18:00 +02:00
|
|
|
range_full = false;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (i == range_start) first_piece = pe;
|
|
|
|
|
|
|
|
// if this is a read-cache piece, it has already been flushed
|
|
|
|
if (pe->cache_state != cached_piece_entry::write_lru)
|
|
|
|
{
|
2016-12-22 16:42:33 +01:00
|
|
|
DLOG("[%d read-cache] ", static_cast<int>(i));
|
2014-07-06 21:18:00 +02:00
|
|
|
continue;
|
|
|
|
}
|
2018-01-03 12:54:03 +01:00
|
|
|
int hash_cursor = pe->hash ? pe->hash->offset / default_block_size : 0;
|
2014-07-06 21:18:00 +02:00
|
|
|
|
|
|
|
// if the piece has all blocks, and they're all dirty, and they've
|
|
|
|
// all been hashed, then this piece is eligible for flushing
|
|
|
|
if (pe->num_dirty == pe->blocks_in_piece
|
|
|
|
&& (pe->hashing_done
|
|
|
|
|| hash_cursor == pe->blocks_in_piece
|
|
|
|
|| m_settings.get_bool(settings_pack::disable_hash_checks)))
|
|
|
|
{
|
2016-12-22 16:42:33 +01:00
|
|
|
DLOG("[%d hash-done] ", static_cast<int>(i));
|
2008-06-09 06:46:34 +02:00
|
|
|
continue;
|
|
|
|
}
|
2007-06-10 22:46:09 +02:00
|
|
|
|
2017-09-12 23:10:11 +02:00
|
|
|
#if DEBUG_DISK_THREAD
|
2014-07-06 21:18:00 +02:00
|
|
|
if (pe->num_dirty < pe->blocks_in_piece)
|
|
|
|
{
|
2016-12-22 16:42:33 +01:00
|
|
|
DLOG("[%d dirty:%d] ", static_cast<int>(i), int(pe->num_dirty));
|
2014-07-06 21:18:00 +02:00
|
|
|
}
|
|
|
|
else if (pe->hashing_done == 0 && hash_cursor < pe->blocks_in_piece)
|
|
|
|
{
|
2016-12-22 16:42:33 +01:00
|
|
|
DLOG("[%d cursor:%d] ", static_cast<int>(i), hash_cursor);
|
2014-07-06 21:18:00 +02:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2016-12-22 16:42:33 +01:00
|
|
|
DLOG("[%d xx] ", static_cast<int>(i));
|
2014-07-06 21:18:00 +02:00
|
|
|
}
|
2017-09-12 23:10:11 +02:00
|
|
|
#endif
|
2010-01-27 05:25:45 +01:00
|
|
|
|
2016-07-31 03:53:11 +02:00
|
|
|
// TODO: in this case, the piece should probably not be flushed yet. are there
|
2014-07-06 21:18:00 +02:00
|
|
|
// any more cases where it should?
|
2008-02-10 01:58:25 +01:00
|
|
|
|
2014-07-06 21:18:00 +02:00
|
|
|
range_full = false;
|
|
|
|
break;
|
|
|
|
}
|
2008-06-12 06:40:37 +02:00
|
|
|
|
2014-07-06 21:18:00 +02:00
|
|
|
if (!range_full)
|
2008-02-10 01:58:25 +01:00
|
|
|
{
|
2014-07-06 21:18:00 +02:00
|
|
|
DLOG("not flushing\n");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
DLOG("\n");
|
|
|
|
|
|
|
|
// now, build a iovec for all pieces that we want to flush, so that they
|
|
|
|
// can be flushed in a single atomic operation. This is especially important
|
|
|
|
// when there are more than 1 disk thread, to make sure they don't
|
|
|
|
// interleave in undesired places.
|
|
|
|
// in order to remember where each piece boundary ended up in the iovec,
|
|
|
|
// we keep the indices in the iovec_offset array
|
|
|
|
|
2016-12-22 16:42:33 +01:00
|
|
|
cont_pieces = static_cast<int>(range_end) - static_cast<int>(range_start);
|
|
|
|
int const blocks_to_flush = int(p->blocks_in_piece * cont_pieces);
|
2017-01-11 06:42:10 +01:00
|
|
|
TORRENT_ALLOCA(iov, iovec_t, blocks_to_flush);
|
2016-10-27 02:40:56 +02:00
|
|
|
TORRENT_ALLOCA(flushing, int, blocks_to_flush);
|
2014-07-06 21:18:00 +02:00
|
|
|
// this is the offset into iov and flushing for each piece
|
2016-10-22 20:43:40 +02:00
|
|
|
TORRENT_ALLOCA(iovec_offset, int, cont_pieces + 1);
|
2014-07-06 21:18:00 +02:00
|
|
|
int iov_len = 0;
|
|
|
|
// this is the block index each piece starts at
|
|
|
|
int block_start = 0;
|
|
|
|
// keep track of the pieces that have had their refcount incremented
|
|
|
|
// so we know to decrement them later
|
2016-10-22 20:43:40 +02:00
|
|
|
TORRENT_ALLOCA(refcount_pieces, int, cont_pieces);
|
2016-12-22 16:42:33 +01:00
|
|
|
piece_index_t piece = range_start;
|
|
|
|
for (int i = 0; i < cont_pieces; ++i, ++piece)
|
2014-07-06 21:18:00 +02:00
|
|
|
{
|
|
|
|
cached_piece_entry* pe;
|
2016-12-22 16:42:33 +01:00
|
|
|
if (piece == p->piece) pe = p;
|
|
|
|
else pe = m_disk_cache.find_piece(p->storage.get(), piece);
|
2016-06-20 17:32:06 +02:00
|
|
|
if (pe == nullptr
|
2014-07-06 21:18:00 +02:00
|
|
|
|| pe->cache_state != cached_piece_entry::write_lru)
|
2011-03-26 20:06:58 +01:00
|
|
|
{
|
2014-07-06 21:18:00 +02:00
|
|
|
refcount_pieces[i] = 0;
|
|
|
|
iovec_offset[i] = iov_len;
|
2016-12-13 16:30:36 +01:00
|
|
|
block_start += int(p->blocks_in_piece);
|
2014-07-06 21:18:00 +02:00
|
|
|
continue;
|
2011-03-26 20:06:58 +01:00
|
|
|
}
|
|
|
|
|
2014-07-06 21:18:00 +02:00
|
|
|
iovec_offset[i] = iov_len;
|
|
|
|
refcount_pieces[i] = 1;
|
|
|
|
TORRENT_ASSERT_VAL(pe->cache_state <= cached_piece_entry::read_lru1 || pe->cache_state == cached_piece_entry::read_lru2, pe);
|
|
|
|
#if TORRENT_USE_ASSERTS
|
|
|
|
pe->piece_log.push_back(piece_log_t(piece_log_t::flushing, -1));
|
|
|
|
#endif
|
|
|
|
++pe->piece_refcount;
|
2008-12-27 03:38:14 +01:00
|
|
|
|
2014-07-06 21:18:00 +02:00
|
|
|
iov_len += build_iovec(pe, 0, p->blocks_in_piece
|
2016-10-27 02:40:56 +02:00
|
|
|
, iov.subspan(iov_len), flushing.subspan(iov_len), block_start);
|
2010-01-15 17:45:42 +01:00
|
|
|
|
2016-12-13 16:30:36 +01:00
|
|
|
block_start += int(p->blocks_in_piece);
|
2010-01-31 17:29:52 +01:00
|
|
|
}
|
2014-07-06 21:18:00 +02:00
|
|
|
iovec_offset[cont_pieces] = iov_len;
|
2010-05-13 17:01:20 +02:00
|
|
|
|
2014-07-06 21:18:00 +02:00
|
|
|
// ok, now we have one (or more, but hopefully one) contiguous
|
|
|
|
// iovec array. Now, flush it to disk
|
2010-05-13 17:01:20 +02:00
|
|
|
|
2016-06-20 17:32:06 +02:00
|
|
|
TORRENT_ASSERT(first_piece != nullptr);
|
2014-07-06 21:18:00 +02:00
|
|
|
|
|
|
|
if (iov_len == 0)
|
2010-05-13 17:01:20 +02:00
|
|
|
{
|
2014-07-06 21:18:00 +02:00
|
|
|
// we may not exit here if we incremented any piece refcounters
|
|
|
|
TORRENT_ASSERT(cont_pieces == 0);
|
|
|
|
DLOG(" iov_len: 0 cont_pieces: %d range_start: %d range_end: %d\n"
|
2016-12-22 16:42:33 +01:00
|
|
|
, cont_pieces, static_cast<int>(range_start), static_cast<int>(range_end));
|
2014-07-06 21:18:00 +02:00
|
|
|
return 0;
|
2010-05-13 17:01:20 +02:00
|
|
|
}
|
2008-02-10 01:58:25 +01:00
|
|
|
|
2014-07-06 21:18:00 +02:00
|
|
|
storage_error error;
|
2016-10-19 07:18:05 +02:00
|
|
|
{
|
|
|
|
// unlock while we're performing the actual disk I/O
|
|
|
|
// then lock again
|
|
|
|
auto unlock = scoped_unlock(l);
|
2016-10-27 02:40:56 +02:00
|
|
|
flush_iovec(first_piece, iov, flushing, iov_len, error);
|
2016-10-19 07:18:05 +02:00
|
|
|
}
|
2014-07-06 21:18:00 +02:00
|
|
|
|
|
|
|
block_start = 0;
|
2016-12-22 16:42:33 +01:00
|
|
|
|
|
|
|
piece = range_start;
|
|
|
|
for (int i = 0; i < cont_pieces; ++i, ++piece)
|
2008-02-22 05:11:04 +01:00
|
|
|
{
|
2014-07-06 21:18:00 +02:00
|
|
|
cached_piece_entry* pe;
|
2016-12-22 16:42:33 +01:00
|
|
|
if (piece == p->piece) pe = p;
|
|
|
|
else pe = m_disk_cache.find_piece(p->storage.get(), piece);
|
2016-06-20 17:32:06 +02:00
|
|
|
if (pe == nullptr)
|
2014-07-06 21:18:00 +02:00
|
|
|
{
|
2016-12-22 16:42:33 +01:00
|
|
|
DLOG("iovec_flushed: piece %d gone!\n", static_cast<int>(piece));
|
2014-07-06 21:18:00 +02:00
|
|
|
TORRENT_PIECE_ASSERT(refcount_pieces[i] == 0, pe);
|
2016-12-13 16:30:36 +01:00
|
|
|
block_start += int(p->blocks_in_piece);
|
2014-07-06 21:18:00 +02:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (refcount_pieces[i])
|
|
|
|
{
|
|
|
|
TORRENT_PIECE_ASSERT(pe->piece_refcount > 0, pe);
|
|
|
|
--pe->piece_refcount;
|
|
|
|
m_disk_cache.maybe_free_piece(pe);
|
|
|
|
}
|
2017-01-24 20:03:17 +01:00
|
|
|
const int block_diff = iovec_offset[i + 1] - iovec_offset[i];
|
2016-10-22 20:43:40 +02:00
|
|
|
iovec_flushed(pe, flushing.subspan(iovec_offset[i]).data(), block_diff
|
2014-07-06 21:18:00 +02:00
|
|
|
, block_start, error, completed_jobs);
|
2016-12-13 16:30:36 +01:00
|
|
|
block_start += int(p->blocks_in_piece);
|
2008-02-22 05:11:04 +01:00
|
|
|
}
|
2014-07-06 21:18:00 +02:00
|
|
|
|
|
|
|
// if the cache is under high pressure, we need to evict
|
|
|
|
// the blocks we just flushed to make room for more write pieces
|
2018-08-14 21:26:10 +02:00
|
|
|
int const evict = m_disk_cache.num_to_evict(0);
|
2014-07-06 21:18:00 +02:00
|
|
|
if (evict > 0) m_disk_cache.try_evict_blocks(evict);
|
|
|
|
|
|
|
|
return iov_len;
|
2008-02-22 05:11:04 +01:00
|
|
|
}
|
|
|
|
|
2014-07-06 21:18:00 +02:00
|
|
|
// iov and flushing are expected to be arrays to at least pe->blocks_in_piece
|
2016-07-31 03:53:11 +02:00
|
|
|
// items in them. Returns the number of iovecs written to the iov array.
|
2014-07-06 21:18:00 +02:00
|
|
|
// The same number of block indices are written to the flushing array. These
|
2016-07-31 03:53:11 +02:00
|
|
|
// are block indices that the respective iovec structure refers to, since
|
2014-07-06 21:18:00 +02:00
|
|
|
// we might not be able to flush everything as a single contiguous block,
|
|
|
|
// the block indices indicates where the block run is broken
|
|
|
|
// the cache needs to be locked when calling this function
|
|
|
|
// block_base_index is the offset added to every block index written to
|
|
|
|
// the flushing array. This can be used when building iovecs spanning
|
|
|
|
// multiple pieces, the subsequent pieces after the first one, must have
|
|
|
|
// their block indices start where the previous one left off
|
2017-11-20 18:10:12 +01:00
|
|
|
int disk_io_thread::build_iovec(cached_piece_entry* pe, int const start, int end
|
2017-01-11 06:42:10 +01:00
|
|
|
, span<iovec_t> iov, span<int> flushing, int const block_base_index)
|
2008-02-22 05:11:04 +01:00
|
|
|
{
|
2014-07-06 21:18:00 +02:00
|
|
|
DLOG("build_iovec: piece=%d [%d, %d)\n"
|
|
|
|
, int(pe->piece), start, end);
|
|
|
|
TORRENT_PIECE_ASSERT(start >= 0, pe);
|
|
|
|
TORRENT_PIECE_ASSERT(start < end, pe);
|
2017-11-20 18:10:12 +01:00
|
|
|
end = std::min(end, int(pe->blocks_in_piece));
|
2010-01-27 05:25:45 +01:00
|
|
|
|
2017-11-20 18:10:12 +01:00
|
|
|
int const piece_size = pe->storage->files().piece_size(pe->piece);
|
2014-07-06 21:18:00 +02:00
|
|
|
TORRENT_PIECE_ASSERT(piece_size > 0, pe);
|
2016-03-06 22:31:18 +01:00
|
|
|
|
2018-11-01 23:05:30 +01:00
|
|
|
int iov_len = 0;
|
2014-07-06 21:18:00 +02:00
|
|
|
// the blocks we're flushing
|
2018-11-01 23:05:30 +01:00
|
|
|
int num_flushing = 0;
|
2009-05-24 17:32:14 +02:00
|
|
|
|
2014-07-06 21:18:00 +02:00
|
|
|
#if DEBUG_DISK_THREAD
|
|
|
|
DLOG("build_iov: piece: %d [", int(pe->piece));
|
|
|
|
for (int i = 0; i < start; ++i) DLOG(".");
|
|
|
|
#endif
|
2010-05-17 01:14:47 +02:00
|
|
|
|
2014-07-06 21:18:00 +02:00
|
|
|
int size_left = piece_size;
|
2018-01-03 12:54:03 +01:00
|
|
|
for (int i = start; i < end; ++i, size_left -= default_block_size)
|
2010-01-27 05:25:45 +01:00
|
|
|
{
|
2014-07-06 21:18:00 +02:00
|
|
|
TORRENT_PIECE_ASSERT(size_left > 0, pe);
|
|
|
|
// don't flush blocks that are empty (buf == 0), not dirty
|
|
|
|
// (read cache blocks), or pending (already being written)
|
2016-06-20 17:32:06 +02:00
|
|
|
if (pe->blocks[i].buf == nullptr
|
2014-07-06 21:18:00 +02:00
|
|
|
|| pe->blocks[i].pending
|
|
|
|
|| !pe->blocks[i].dirty)
|
2010-01-27 05:25:45 +01:00
|
|
|
{
|
2014-07-06 21:18:00 +02:00
|
|
|
DLOG("-");
|
|
|
|
continue;
|
2009-05-24 17:32:14 +02:00
|
|
|
}
|
2014-07-06 21:18:00 +02:00
|
|
|
|
|
|
|
// if we fail to lock the block, it' no longer in the cache
|
2018-08-14 21:26:10 +02:00
|
|
|
bool const locked = m_disk_cache.inc_block_refcount(pe, i, block_cache::ref_flushing);
|
2014-07-06 21:18:00 +02:00
|
|
|
|
2016-07-31 03:53:11 +02:00
|
|
|
// it should always succeed, since it's a dirty block, and
|
2014-07-06 21:18:00 +02:00
|
|
|
// should never have been marked as volatile
|
|
|
|
TORRENT_ASSERT(locked);
|
2015-05-19 05:13:49 +02:00
|
|
|
TORRENT_ASSERT(pe->cache_state != cached_piece_entry::volatile_read_lru);
|
|
|
|
TORRENT_UNUSED(locked);
|
2014-07-06 21:18:00 +02:00
|
|
|
|
|
|
|
flushing[num_flushing++] = i + block_base_index;
|
2018-11-01 23:05:30 +01:00
|
|
|
iov[iov_len] = { pe->blocks[i].buf, std::min(default_block_size, size_left) };
|
2014-07-06 21:18:00 +02:00
|
|
|
++iov_len;
|
|
|
|
pe->blocks[i].pending = true;
|
|
|
|
|
|
|
|
DLOG("x");
|
2008-02-22 05:11:04 +01:00
|
|
|
}
|
2014-07-06 21:18:00 +02:00
|
|
|
DLOG("]\n");
|
2010-05-17 01:14:47 +02:00
|
|
|
|
2014-07-06 21:18:00 +02:00
|
|
|
TORRENT_PIECE_ASSERT(iov_len == num_flushing, pe);
|
2017-01-31 02:31:32 +01:00
|
|
|
return aux::numeric_cast<int>(iov_len);
|
2008-02-22 05:11:04 +01:00
|
|
|
}
|
|
|
|
|
2014-07-06 21:18:00 +02:00
|
|
|
// does the actual writing to disk
|
|
|
|
// the cached_piece_entry is supposed to point to the
|
|
|
|
// first piece, if the iovec spans multiple pieces
|
|
|
|
void disk_io_thread::flush_iovec(cached_piece_entry* pe
|
2017-01-11 06:42:10 +01:00
|
|
|
, span<iovec_t const> iov, span<int const> flushing
|
2016-11-15 18:03:11 +01:00
|
|
|
, int const num_blocks, storage_error& error)
|
2009-05-23 09:35:45 +02:00
|
|
|
{
|
2014-07-06 21:18:00 +02:00
|
|
|
TORRENT_PIECE_ASSERT(!error, pe);
|
|
|
|
TORRENT_PIECE_ASSERT(num_blocks > 0, pe);
|
2014-08-01 08:07:48 +02:00
|
|
|
m_stats_counters.inc_stats_counter(counters::num_writing_threads, 1);
|
2014-07-06 21:18:00 +02:00
|
|
|
|
2016-12-10 20:15:25 +01:00
|
|
|
time_point const start_time = clock_type::now();
|
2014-07-06 21:18:00 +02:00
|
|
|
|
|
|
|
#if DEBUG_DISK_THREAD
|
|
|
|
DLOG("flush_iovec: piece: %d [ ", int(pe->piece));
|
|
|
|
for (int i = 0; i < num_blocks; ++i)
|
|
|
|
DLOG("%d ", flushing[i]);
|
|
|
|
DLOG("]\n");
|
|
|
|
#endif
|
|
|
|
|
2017-07-03 22:18:53 +02:00
|
|
|
open_mode_t const file_flags = m_settings.get_bool(settings_pack::coalesce_writes)
|
2017-07-15 02:59:20 +02:00
|
|
|
? open_mode::coalesce_buffers : open_mode_t{};
|
2016-03-20 16:38:55 +01:00
|
|
|
|
2014-07-06 21:18:00 +02:00
|
|
|
// issue the actual write operation
|
2016-10-27 02:40:56 +02:00
|
|
|
auto iov_start = iov;
|
2018-11-01 23:05:30 +01:00
|
|
|
int flushing_start = 0;
|
2016-12-22 16:42:33 +01:00
|
|
|
piece_index_t const piece = pe->piece;
|
2017-07-03 22:18:53 +02:00
|
|
|
int const blocks_in_piece = int(pe->blocks_in_piece);
|
2014-07-06 21:18:00 +02:00
|
|
|
bool failed = false;
|
2018-11-01 23:05:30 +01:00
|
|
|
for (int i = 1; i <= num_blocks; ++i)
|
2009-05-23 09:35:45 +02:00
|
|
|
{
|
2018-11-01 23:05:30 +01:00
|
|
|
if (i < num_blocks && flushing[i] == flushing[i - 1] + 1) continue;
|
2016-11-13 03:45:30 +01:00
|
|
|
int const ret = pe->storage->writev(
|
2016-10-27 02:40:56 +02:00
|
|
|
iov_start.first(i - flushing_start)
|
2016-12-22 16:42:33 +01:00
|
|
|
, piece_index_t(static_cast<int>(piece) + flushing[flushing_start] / blocks_in_piece)
|
2018-01-03 12:54:03 +01:00
|
|
|
, (flushing[flushing_start] % blocks_in_piece) * default_block_size
|
2016-03-20 16:38:55 +01:00
|
|
|
, file_flags, error);
|
2014-07-06 21:18:00 +02:00
|
|
|
if (ret < 0 || error) failed = true;
|
2016-10-27 02:40:56 +02:00
|
|
|
iov_start = iov.subspan(i);
|
2014-07-06 21:18:00 +02:00
|
|
|
flushing_start = i;
|
2009-05-23 09:35:45 +02:00
|
|
|
}
|
2014-07-06 21:18:00 +02:00
|
|
|
|
2014-08-01 08:07:48 +02:00
|
|
|
m_stats_counters.inc_stats_counter(counters::num_writing_threads, -1);
|
2014-07-06 21:18:00 +02:00
|
|
|
|
2017-05-18 14:50:22 +02:00
|
|
|
{
|
|
|
|
std::lock_guard<std::mutex> l(m_need_tick_mutex);
|
|
|
|
if (!pe->storage->set_need_tick())
|
2017-11-20 18:10:12 +01:00
|
|
|
m_need_tick.emplace_back(aux::time_now() + minutes(2), pe->storage);
|
2017-05-18 14:50:22 +02:00
|
|
|
}
|
2016-11-06 07:39:41 +01:00
|
|
|
|
2014-07-06 21:18:00 +02:00
|
|
|
if (!failed)
|
|
|
|
{
|
|
|
|
TORRENT_PIECE_ASSERT(!error, pe);
|
2017-11-20 18:10:12 +01:00
|
|
|
std::int64_t const write_time = total_microseconds(clock_type::now() - start_time);
|
2014-07-06 21:18:00 +02:00
|
|
|
|
2014-08-01 08:07:48 +02:00
|
|
|
m_stats_counters.inc_stats_counter(counters::num_blocks_written, num_blocks);
|
|
|
|
m_stats_counters.inc_stats_counter(counters::num_write_ops);
|
|
|
|
m_stats_counters.inc_stats_counter(counters::disk_write_time, write_time);
|
|
|
|
m_stats_counters.inc_stats_counter(counters::disk_job_time, write_time);
|
2014-07-06 21:18:00 +02:00
|
|
|
#if DEBUG_DISK_THREAD
|
|
|
|
DLOG("flush_iovec: %d\n", num_blocks);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
#if DEBUG_DISK_THREAD
|
|
|
|
else
|
|
|
|
{
|
|
|
|
DLOG("flush_iovec: error: (%d) %s\n"
|
|
|
|
, error.ec.value(), error.ec.message().c_str());
|
|
|
|
}
|
|
|
|
#endif
|
2009-05-23 09:35:45 +02:00
|
|
|
}
|
|
|
|
|
2014-07-06 21:18:00 +02:00
|
|
|
// It is necessary to call this function with the blocks produced by
|
|
|
|
// build_iovec, to reset their state to not being flushed anymore
|
|
|
|
// the cache needs to be locked when calling this function
|
2018-04-26 19:32:02 +02:00
|
|
|
bool disk_io_thread::iovec_flushed(cached_piece_entry* pe
|
2016-11-15 18:03:11 +01:00
|
|
|
, int* flushing, int const num_blocks, int const block_offset
|
2014-07-06 21:18:00 +02:00
|
|
|
, storage_error const& error
|
2015-08-19 15:22:00 +02:00
|
|
|
, jobqueue_t& completed_jobs)
|
2009-05-23 09:35:45 +02:00
|
|
|
{
|
2014-07-06 21:18:00 +02:00
|
|
|
for (int i = 0; i < num_blocks; ++i)
|
|
|
|
flushing[i] -= block_offset;
|
|
|
|
|
|
|
|
#if DEBUG_DISK_THREAD
|
|
|
|
DLOG("iovec_flushed: piece: %d block_offset: %d [ "
|
2016-12-22 16:42:33 +01:00
|
|
|
, static_cast<int>(pe->piece), block_offset);
|
2014-07-06 21:18:00 +02:00
|
|
|
for (int i = 0; i < num_blocks; ++i)
|
|
|
|
DLOG("%d ", flushing[i]);
|
|
|
|
DLOG("]\n");
|
|
|
|
#endif
|
2018-04-26 19:32:02 +02:00
|
|
|
if (m_disk_cache.blocks_flushed(pe, flushing, num_blocks))
|
|
|
|
return true;
|
2011-09-27 12:46:56 +02:00
|
|
|
|
2014-07-06 21:18:00 +02:00
|
|
|
if (error)
|
2011-09-27 06:05:05 +02:00
|
|
|
{
|
2014-07-06 21:18:00 +02:00
|
|
|
fail_jobs_impl(error, pe->jobs, completed_jobs);
|
2011-09-27 06:05:05 +02:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2015-08-19 15:22:00 +02:00
|
|
|
disk_io_job* j = pe->jobs.get_all();
|
2014-07-06 21:18:00 +02:00
|
|
|
while (j)
|
2011-09-27 09:50:58 +02:00
|
|
|
{
|
2015-08-19 15:22:00 +02:00
|
|
|
disk_io_job* next = j->next;
|
2016-06-20 17:32:06 +02:00
|
|
|
j->next = nullptr;
|
2014-07-06 21:18:00 +02:00
|
|
|
TORRENT_PIECE_ASSERT((j->flags & disk_io_job::in_progress) || !j->storage, pe);
|
|
|
|
TORRENT_PIECE_ASSERT(j->piece == pe->piece, pe);
|
2018-01-03 12:54:03 +01:00
|
|
|
if (j->completed(pe))
|
2014-07-06 21:18:00 +02:00
|
|
|
{
|
2016-11-26 07:51:47 +01:00
|
|
|
j->ret = status_t::no_error;
|
2014-07-06 21:18:00 +02:00
|
|
|
j->error = error;
|
|
|
|
completed_jobs.push_back(j);
|
|
|
|
}
|
2011-09-27 12:46:56 +02:00
|
|
|
else
|
2009-05-23 09:35:45 +02:00
|
|
|
{
|
2014-07-06 21:18:00 +02:00
|
|
|
pe->jobs.push_back(j);
|
2009-05-23 09:35:45 +02:00
|
|
|
}
|
2014-07-06 21:18:00 +02:00
|
|
|
j = next;
|
2009-05-23 09:35:45 +02:00
|
|
|
}
|
|
|
|
}
|
2018-04-26 19:32:02 +02:00
|
|
|
|
|
|
|
return false;
|
2014-07-06 21:18:00 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// issues write operations for blocks in the given
|
|
|
|
// range on the given piece.
|
2017-11-20 18:10:12 +01:00
|
|
|
int disk_io_thread::flush_range(cached_piece_entry* pe, int const start, int const end
|
2016-05-01 00:54:23 +02:00
|
|
|
, jobqueue_t& completed_jobs, std::unique_lock<std::mutex>& l)
|
2014-07-06 21:18:00 +02:00
|
|
|
{
|
2016-05-01 00:54:23 +02:00
|
|
|
TORRENT_ASSERT(l.owns_lock());
|
2014-07-06 21:18:00 +02:00
|
|
|
|
|
|
|
DLOG("flush_range: piece=%d [%d, %d)\n"
|
2016-12-22 16:42:33 +01:00
|
|
|
, static_cast<int>(pe->piece), start, end);
|
2014-07-06 21:18:00 +02:00
|
|
|
TORRENT_PIECE_ASSERT(start >= 0, pe);
|
|
|
|
TORRENT_PIECE_ASSERT(start < end, pe);
|
2015-05-18 07:32:18 +02:00
|
|
|
|
2017-01-27 22:46:55 +01:00
|
|
|
TORRENT_ALLOCA(iov, iovec_t, pe->blocks_in_piece);
|
|
|
|
TORRENT_ALLOCA(flushing, int, pe->blocks_in_piece);
|
2017-11-20 18:10:12 +01:00
|
|
|
int const iov_len = build_iovec(pe, start, end, iov, flushing, 0);
|
2014-07-06 21:18:00 +02:00
|
|
|
if (iov_len == 0) return 0;
|
|
|
|
|
|
|
|
TORRENT_PIECE_ASSERT(pe->cache_state <= cached_piece_entry::read_lru1 || pe->cache_state == cached_piece_entry::read_lru2, pe);
|
|
|
|
#if TORRENT_USE_ASSERTS
|
|
|
|
pe->piece_log.push_back(piece_log_t(piece_log_t::flush_range, -1));
|
|
|
|
#endif
|
|
|
|
|
|
|
|
storage_error error;
|
2016-10-19 07:18:05 +02:00
|
|
|
{
|
|
|
|
piece_refcount_holder refcount_holder(pe);
|
|
|
|
auto unlocker = scoped_unlock(l);
|
2014-07-06 21:18:00 +02:00
|
|
|
|
2016-10-27 02:40:56 +02:00
|
|
|
flush_iovec(pe, iov, flushing, iov_len, error);
|
2016-10-19 07:18:05 +02:00
|
|
|
}
|
2014-07-06 21:18:00 +02:00
|
|
|
|
2018-04-28 03:25:36 +02:00
|
|
|
if (!iovec_flushed(pe, flushing.data(), iov_len, 0, error, completed_jobs))
|
2018-04-26 19:32:02 +02:00
|
|
|
m_disk_cache.maybe_free_piece(pe);
|
2017-04-17 05:10:44 +02:00
|
|
|
|
2014-07-06 21:18:00 +02:00
|
|
|
// if the cache is under high pressure, we need to evict
|
|
|
|
// the blocks we just flushed to make room for more write pieces
|
2017-11-20 18:10:12 +01:00
|
|
|
int const evict = m_disk_cache.num_to_evict(0);
|
2014-07-06 21:18:00 +02:00
|
|
|
if (evict > 0) m_disk_cache.try_evict_blocks(evict);
|
2009-05-23 09:35:45 +02:00
|
|
|
|
2014-07-06 21:18:00 +02:00
|
|
|
return iov_len;
|
2009-05-23 09:35:45 +02:00
|
|
|
}
|
|
|
|
|
2015-08-19 15:22:00 +02:00
|
|
|
void disk_io_thread::fail_jobs(storage_error const& e, jobqueue_t& jobs_)
|
2011-03-21 07:31:48 +01:00
|
|
|
{
|
2015-08-19 15:22:00 +02:00
|
|
|
jobqueue_t jobs;
|
2014-07-06 21:18:00 +02:00
|
|
|
fail_jobs_impl(e, jobs_, jobs);
|
2018-06-24 00:11:36 +02:00
|
|
|
if (!jobs.empty()) add_completed_jobs(jobs);
|
2011-03-21 07:31:48 +01:00
|
|
|
}
|
|
|
|
|
2015-08-19 15:22:00 +02:00
|
|
|
void disk_io_thread::fail_jobs_impl(storage_error const& e, jobqueue_t& src, jobqueue_t& dst)
|
2008-02-08 11:22:05 +01:00
|
|
|
{
|
2018-06-24 00:11:36 +02:00
|
|
|
while (!src.empty())
|
2014-07-06 21:18:00 +02:00
|
|
|
{
|
2015-08-19 15:22:00 +02:00
|
|
|
disk_io_job* j = src.pop_front();
|
2014-07-06 21:18:00 +02:00
|
|
|
TORRENT_ASSERT((j->flags & disk_io_job::in_progress) || !j->storage);
|
2016-11-26 07:51:47 +01:00
|
|
|
j->ret = status_t::fatal_disk_error;
|
2014-07-06 21:18:00 +02:00
|
|
|
j->error = e;
|
|
|
|
dst.push_back(j);
|
|
|
|
}
|
|
|
|
}
|
2009-05-24 02:12:53 +02:00
|
|
|
|
2017-04-09 07:28:46 +02:00
|
|
|
void disk_io_thread::flush_piece(cached_piece_entry* pe, std::uint32_t const flags
|
2016-05-01 00:54:23 +02:00
|
|
|
, jobqueue_t& completed_jobs, std::unique_lock<std::mutex>& l)
|
2014-07-06 21:18:00 +02:00
|
|
|
{
|
2016-05-01 00:54:23 +02:00
|
|
|
TORRENT_ASSERT(l.owns_lock());
|
2014-07-06 21:18:00 +02:00
|
|
|
if (flags & flush_delete_cache)
|
|
|
|
{
|
|
|
|
// delete dirty blocks and post handlers with
|
|
|
|
// operation_aborted error code
|
2015-01-01 11:10:13 +01:00
|
|
|
fail_jobs_impl(storage_error(boost::asio::error::operation_aborted)
|
|
|
|
, pe->jobs, completed_jobs);
|
|
|
|
fail_jobs_impl(storage_error(boost::asio::error::operation_aborted)
|
|
|
|
, pe->read_jobs, completed_jobs);
|
2014-07-06 21:18:00 +02:00
|
|
|
m_disk_cache.abort_dirty(pe);
|
|
|
|
}
|
|
|
|
else if ((flags & flush_write_cache) && pe->num_dirty > 0)
|
|
|
|
{
|
|
|
|
// issue write commands
|
2015-05-18 07:32:18 +02:00
|
|
|
flush_range(pe, 0, INT_MAX, completed_jobs, l);
|
2011-04-17 07:33:33 +02:00
|
|
|
|
2014-07-06 21:18:00 +02:00
|
|
|
// if we're also flushing the read cache, this piece
|
|
|
|
// should be removed as soon as all write jobs finishes
|
|
|
|
// otherwise it will turn into a read piece
|
|
|
|
}
|
2008-02-22 05:11:04 +01:00
|
|
|
|
2017-04-15 00:51:11 +02:00
|
|
|
// mark_for_eviction may erase the piece from the cache, that's
|
2014-07-06 21:18:00 +02:00
|
|
|
// why we don't have the 'i' iterator referencing it at this point
|
|
|
|
if (flags & (flush_read_cache | flush_delete_cache))
|
|
|
|
{
|
|
|
|
fail_jobs_impl(storage_error(boost::asio::error::operation_aborted), pe->jobs, completed_jobs);
|
2017-04-15 00:51:11 +02:00
|
|
|
// we're removing the torrent, don't keep any entries around in the
|
|
|
|
// ghost list
|
|
|
|
m_disk_cache.mark_for_eviction(pe, block_cache::disallow_ghost);
|
2014-07-06 21:18:00 +02:00
|
|
|
}
|
|
|
|
}
|
2011-03-26 20:06:58 +01:00
|
|
|
|
2017-04-09 07:28:46 +02:00
|
|
|
void disk_io_thread::flush_cache(storage_interface* storage, std::uint32_t const flags
|
2016-05-01 00:54:23 +02:00
|
|
|
, jobqueue_t& completed_jobs, std::unique_lock<std::mutex>& l)
|
2014-07-06 21:18:00 +02:00
|
|
|
{
|
2017-11-20 18:10:12 +01:00
|
|
|
if (storage != nullptr)
|
2009-05-23 09:35:45 +02:00
|
|
|
{
|
2016-06-05 20:07:24 +02:00
|
|
|
auto const& pieces = storage->cached_pieces();
|
2016-12-22 16:42:33 +01:00
|
|
|
std::vector<piece_index_t> piece_index;
|
2014-07-06 21:18:00 +02:00
|
|
|
piece_index.reserve(pieces.size());
|
2016-06-05 20:07:24 +02:00
|
|
|
for (auto const& p : pieces)
|
2009-05-23 09:35:45 +02:00
|
|
|
{
|
2017-12-25 09:17:53 +01:00
|
|
|
TORRENT_ASSERT(p.get_storage() == storage);
|
|
|
|
if (p.get_storage() != storage) continue;
|
|
|
|
piece_index.push_back(p.piece);
|
2009-05-23 09:35:45 +02:00
|
|
|
}
|
2014-07-06 21:18:00 +02:00
|
|
|
|
2016-12-22 16:42:33 +01:00
|
|
|
for (auto idx : piece_index)
|
2009-05-23 09:35:45 +02:00
|
|
|
{
|
2016-12-22 16:42:33 +01:00
|
|
|
cached_piece_entry* pe = m_disk_cache.find_piece(storage, idx);
|
2016-06-20 17:32:06 +02:00
|
|
|
if (pe == nullptr) continue;
|
2014-07-06 21:18:00 +02:00
|
|
|
TORRENT_PIECE_ASSERT(pe->storage.get() == storage, pe);
|
|
|
|
flush_piece(pe, flags, completed_jobs, l);
|
2009-05-23 09:35:45 +02:00
|
|
|
}
|
2014-07-06 21:18:00 +02:00
|
|
|
#if TORRENT_USE_ASSERTS
|
2016-05-01 00:54:23 +02:00
|
|
|
TORRENT_ASSERT(l.owns_lock());
|
2014-07-06 21:18:00 +02:00
|
|
|
// if the user asked to delete the cache for this storage
|
|
|
|
// we really should not have any pieces left. This is only called
|
|
|
|
// from disk_io_thread::do_delete, which is a fence job and should
|
|
|
|
// have any other jobs active, i.e. there should not be any references
|
|
|
|
// keeping pieces or blocks alive
|
|
|
|
if ((flags & flush_delete_cache) && (flags & flush_expect_clear))
|
2011-03-20 06:47:27 +01:00
|
|
|
{
|
2016-06-05 20:07:24 +02:00
|
|
|
auto const& storage_pieces = storage->cached_pieces();
|
2017-12-25 09:17:53 +01:00
|
|
|
for (auto const& p : storage_pieces)
|
2014-07-06 21:18:00 +02:00
|
|
|
{
|
2017-12-25 09:17:53 +01:00
|
|
|
cached_piece_entry* pe = m_disk_cache.find_piece(storage, p.piece);
|
2014-07-06 21:18:00 +02:00
|
|
|
TORRENT_PIECE_ASSERT(pe->num_dirty == 0, pe);
|
|
|
|
}
|
2011-03-20 06:47:27 +01:00
|
|
|
}
|
2014-07-06 21:18:00 +02:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2016-09-06 04:25:20 +02:00
|
|
|
auto range = m_disk_cache.all_pieces();
|
2014-07-06 21:18:00 +02:00
|
|
|
while (range.first != range.second)
|
2011-03-20 06:47:27 +01:00
|
|
|
{
|
2015-01-01 11:10:13 +01:00
|
|
|
// TODO: it would be nice to optimize this by having the cache
|
|
|
|
// pieces also ordered by
|
|
|
|
if ((flags & (flush_read_cache | flush_delete_cache)) == 0)
|
2014-07-06 21:18:00 +02:00
|
|
|
{
|
2015-01-01 11:10:13 +01:00
|
|
|
// if we're not flushing the read cache, and not deleting the
|
|
|
|
// cache, skip pieces with no dirty blocks, i.e. read cache
|
|
|
|
// pieces
|
|
|
|
while (range.first->num_dirty == 0)
|
|
|
|
{
|
|
|
|
++range.first;
|
|
|
|
if (range.first == range.second) return;
|
|
|
|
}
|
2014-07-06 21:18:00 +02:00
|
|
|
}
|
|
|
|
cached_piece_entry* pe = const_cast<cached_piece_entry*>(&*range.first);
|
|
|
|
flush_piece(pe, flags, completed_jobs, l);
|
|
|
|
range = m_disk_cache.all_pieces();
|
2011-03-20 06:47:27 +01:00
|
|
|
}
|
|
|
|
}
|
2008-02-08 11:22:05 +01:00
|
|
|
}
|
|
|
|
|
2014-07-06 21:18:00 +02:00
|
|
|
// this is called if we're exceeding (or about to exceed) the cache
|
|
|
|
// size limit. This means we should not restrict ourselves to contiguous
|
|
|
|
// blocks of write cache line size, but try to flush all old blocks
|
|
|
|
// this is why we pass in 1 as cont_block to the flushing functions
|
2015-08-19 15:22:00 +02:00
|
|
|
void disk_io_thread::try_flush_write_blocks(int num, jobqueue_t& completed_jobs
|
2016-05-01 00:54:23 +02:00
|
|
|
, std::unique_lock<std::mutex>& l)
|
2008-02-08 11:22:05 +01:00
|
|
|
{
|
2014-07-06 21:18:00 +02:00
|
|
|
DLOG("try_flush_write_blocks: %d\n", num);
|
2009-05-27 18:50:46 +02:00
|
|
|
|
2017-11-20 18:10:12 +01:00
|
|
|
auto const range = m_disk_cache.write_lru_pieces();
|
2017-09-03 13:57:19 +02:00
|
|
|
aux::vector<std::pair<std::shared_ptr<storage_interface>, piece_index_t>> pieces;
|
2014-07-06 21:18:00 +02:00
|
|
|
pieces.reserve(m_disk_cache.num_write_lru_pieces());
|
2009-05-27 18:50:46 +02:00
|
|
|
|
2017-11-20 18:10:12 +01:00
|
|
|
for (auto p = range; p.get() && num > 0; p.next())
|
2014-07-06 21:18:00 +02:00
|
|
|
{
|
2015-08-20 02:02:46 +02:00
|
|
|
cached_piece_entry* e = p.get();
|
2014-07-06 21:18:00 +02:00
|
|
|
if (e->num_dirty == 0) continue;
|
2017-11-20 18:10:12 +01:00
|
|
|
pieces.emplace_back(e->storage, e->piece);
|
2014-07-06 21:18:00 +02:00
|
|
|
}
|
|
|
|
|
2016-10-25 23:27:48 +02:00
|
|
|
for (auto const& p : pieces)
|
2014-07-06 21:18:00 +02:00
|
|
|
{
|
|
|
|
// TODO: instead of doing a lookup each time through the loop, save
|
|
|
|
// cached_piece_entry pointers with piece_refcount incremented to pin them
|
2017-09-03 13:57:19 +02:00
|
|
|
cached_piece_entry* pe = m_disk_cache.find_piece(p.first.get(), p.second);
|
2016-06-20 17:32:06 +02:00
|
|
|
if (pe == nullptr) continue;
|
2014-07-06 21:18:00 +02:00
|
|
|
|
|
|
|
// another thread may flush this piece while we're looping and
|
|
|
|
// evict it into a read piece and then also evict it to ghost
|
|
|
|
if (pe->cache_state != cached_piece_entry::write_lru) continue;
|
|
|
|
|
|
|
|
#if TORRENT_USE_ASSERTS
|
|
|
|
pe->piece_log.push_back(piece_log_t(piece_log_t::try_flush_write_blocks, -1));
|
2008-02-10 01:58:25 +01:00
|
|
|
#endif
|
2014-07-06 21:18:00 +02:00
|
|
|
++pe->piece_refcount;
|
|
|
|
kick_hasher(pe, l);
|
|
|
|
num -= try_flush_hashed(pe, 1, completed_jobs, l);
|
|
|
|
--pe->piece_refcount;
|
2016-03-13 19:45:45 +01:00
|
|
|
|
|
|
|
m_disk_cache.maybe_free_piece(pe);
|
2014-07-06 21:18:00 +02:00
|
|
|
}
|
2009-01-03 09:11:31 +01:00
|
|
|
|
2014-07-06 21:18:00 +02:00
|
|
|
// when the write cache is under high pressure, it is likely
|
|
|
|
// counter productive to actually do this, since a piece may
|
2015-08-20 02:02:46 +02:00
|
|
|
// not have had its flush_hashed job run on it
|
2014-07-06 21:18:00 +02:00
|
|
|
// so only do it if no other thread is currently flushing
|
2009-01-03 09:11:31 +01:00
|
|
|
|
2014-08-01 08:07:48 +02:00
|
|
|
if (num == 0 || m_stats_counters[counters::num_writing_threads] > 0) return;
|
2014-07-06 21:18:00 +02:00
|
|
|
|
|
|
|
// if we still need to flush blocks, start over and flush
|
|
|
|
// everything in LRU order (degrade to lru cache eviction)
|
2016-10-25 23:27:48 +02:00
|
|
|
for (auto const& p : pieces)
|
2008-02-08 11:22:05 +01:00
|
|
|
{
|
2017-09-03 13:57:19 +02:00
|
|
|
cached_piece_entry* pe = m_disk_cache.find_piece(p.first.get(), p.second);
|
2016-06-20 17:32:06 +02:00
|
|
|
if (pe == nullptr) continue;
|
2014-07-06 21:18:00 +02:00
|
|
|
if (pe->num_dirty == 0) continue;
|
2009-01-03 09:11:31 +01:00
|
|
|
|
2014-07-06 21:18:00 +02:00
|
|
|
// another thread may flush this piece while we're looping and
|
|
|
|
// evict it into a read piece and then also evict it to ghost
|
|
|
|
if (pe->cache_state != cached_piece_entry::write_lru) continue;
|
2011-03-11 08:37:12 +01:00
|
|
|
|
2014-07-06 21:18:00 +02:00
|
|
|
// don't flush blocks that are being hashed by another thread
|
|
|
|
if (pe->num_dirty == 0 || pe->hashing) continue;
|
2008-02-08 11:22:05 +01:00
|
|
|
|
2014-07-06 21:18:00 +02:00
|
|
|
#if TORRENT_USE_ASSERTS
|
|
|
|
pe->piece_log.push_back(piece_log_t(piece_log_t::try_flush_write_blocks2, -1));
|
|
|
|
#endif
|
|
|
|
++pe->piece_refcount;
|
2008-02-08 11:22:05 +01:00
|
|
|
|
2015-05-18 07:32:18 +02:00
|
|
|
num -= flush_range(pe, 0, INT_MAX, completed_jobs, l);
|
2014-07-06 21:18:00 +02:00
|
|
|
--pe->piece_refcount;
|
2009-06-10 10:30:55 +02:00
|
|
|
|
2014-07-06 21:18:00 +02:00
|
|
|
m_disk_cache.maybe_free_piece(pe);
|
|
|
|
}
|
2008-02-08 11:22:05 +01:00
|
|
|
}
|
|
|
|
|
2015-08-19 15:22:00 +02:00
|
|
|
void disk_io_thread::flush_expired_write_blocks(jobqueue_t& completed_jobs
|
2016-05-01 00:54:23 +02:00
|
|
|
, std::unique_lock<std::mutex>& l)
|
2008-02-22 05:11:04 +01:00
|
|
|
{
|
2014-07-06 21:18:00 +02:00
|
|
|
DLOG("flush_expired_write_blocks\n");
|
2009-11-28 04:14:08 +01:00
|
|
|
|
2017-11-20 18:10:12 +01:00
|
|
|
time_point const now = aux::time_now();
|
|
|
|
time_duration const expiration_limit = seconds(m_settings.get_int(settings_pack::cache_expiry));
|
2009-11-28 04:14:08 +01:00
|
|
|
|
2014-07-06 21:18:00 +02:00
|
|
|
#if TORRENT_USE_ASSERTS
|
2015-03-12 05:34:54 +01:00
|
|
|
time_point timeout = min_time();
|
2014-07-06 21:18:00 +02:00
|
|
|
#endif
|
2009-11-28 04:14:08 +01:00
|
|
|
|
2016-10-22 20:43:40 +02:00
|
|
|
TORRENT_ALLOCA(to_flush, cached_piece_entry*, 200);
|
2014-07-06 21:18:00 +02:00
|
|
|
int num_flush = 0;
|
2009-11-28 04:14:08 +01:00
|
|
|
|
2015-08-20 02:02:46 +02:00
|
|
|
for (list_iterator<cached_piece_entry> p = m_disk_cache.write_lru_pieces(); p.get(); p.next())
|
2008-02-22 05:11:04 +01:00
|
|
|
{
|
2015-08-20 02:02:46 +02:00
|
|
|
cached_piece_entry* e = p.get();
|
2014-07-06 21:18:00 +02:00
|
|
|
#if TORRENT_USE_ASSERTS
|
|
|
|
TORRENT_PIECE_ASSERT(e->expire >= timeout, e);
|
|
|
|
timeout = e->expire;
|
|
|
|
#endif
|
2009-11-28 04:14:08 +01:00
|
|
|
|
2014-07-06 21:18:00 +02:00
|
|
|
// since we're iterating in order of last use, if this piece
|
|
|
|
// shouldn't be evicted, none of the following ones will either
|
|
|
|
if (now - e->expire < expiration_limit) break;
|
|
|
|
if (e->num_dirty == 0) continue;
|
2008-02-22 05:11:04 +01:00
|
|
|
|
2014-07-06 21:18:00 +02:00
|
|
|
TORRENT_PIECE_ASSERT(e->cache_state <= cached_piece_entry::read_lru1 || e->cache_state == cached_piece_entry::read_lru2, e);
|
|
|
|
#if TORRENT_USE_ASSERTS
|
|
|
|
e->piece_log.push_back(piece_log_t(piece_log_t::flush_expired, -1));
|
|
|
|
#endif
|
|
|
|
++e->piece_refcount;
|
|
|
|
// We can rely on the piece entry not being removed by
|
|
|
|
// incrementing the piece_refcount
|
|
|
|
to_flush[num_flush++] = e;
|
|
|
|
if (num_flush == 200) break;
|
2008-02-22 05:11:04 +01:00
|
|
|
}
|
|
|
|
|
2014-07-06 21:18:00 +02:00
|
|
|
for (int i = 0; i < num_flush; ++i)
|
2009-11-28 04:14:08 +01:00
|
|
|
{
|
2015-05-18 07:32:18 +02:00
|
|
|
flush_range(to_flush[i], 0, INT_MAX, completed_jobs, l);
|
2014-07-06 21:18:00 +02:00
|
|
|
TORRENT_ASSERT(to_flush[i]->piece_refcount > 0);
|
|
|
|
--to_flush[i]->piece_refcount;
|
|
|
|
m_disk_cache.maybe_free_piece(to_flush[i]);
|
2009-11-28 04:14:08 +01:00
|
|
|
}
|
2014-07-06 21:18:00 +02:00
|
|
|
}
|
|
|
|
|
2015-04-20 06:52:49 +02:00
|
|
|
namespace {
|
|
|
|
|
2018-03-22 17:01:38 +01:00
|
|
|
using disk_io_fun_t = status_t (disk_io_thread::*)(disk_io_job*, jobqueue_t&);
|
2014-07-06 21:18:00 +02:00
|
|
|
|
|
|
|
// this is a jump-table for disk I/O jobs
|
2017-06-11 19:53:15 +02:00
|
|
|
std::array<disk_io_fun_t, 15> const job_functions =
|
|
|
|
{{
|
2014-07-06 21:18:00 +02:00
|
|
|
&disk_io_thread::do_read,
|
|
|
|
&disk_io_thread::do_write,
|
|
|
|
&disk_io_thread::do_hash,
|
|
|
|
&disk_io_thread::do_move_storage,
|
|
|
|
&disk_io_thread::do_release_files,
|
|
|
|
&disk_io_thread::do_delete_files,
|
|
|
|
&disk_io_thread::do_check_fastresume,
|
|
|
|
&disk_io_thread::do_rename_file,
|
|
|
|
&disk_io_thread::do_stop_torrent,
|
|
|
|
&disk_io_thread::do_flush_piece,
|
|
|
|
&disk_io_thread::do_flush_hashed,
|
|
|
|
&disk_io_thread::do_flush_storage,
|
|
|
|
&disk_io_thread::do_trim_cache,
|
|
|
|
&disk_io_thread::do_file_priority,
|
2016-11-06 07:39:41 +01:00
|
|
|
&disk_io_thread::do_clear_piece
|
2017-06-11 19:53:15 +02:00
|
|
|
}};
|
2009-11-28 04:14:08 +01:00
|
|
|
|
2015-04-20 06:52:49 +02:00
|
|
|
} // anonymous namespace
|
|
|
|
|
2014-07-06 21:18:00 +02:00
|
|
|
// evict and/or flush blocks if we're exceeding the cache size
|
|
|
|
// or used to exceed it and haven't dropped below the low watermark yet
|
|
|
|
// the low watermark is dynamic, based on the number of peers waiting
|
|
|
|
// on buffers to free up. The more waiters, the lower the low watermark
|
|
|
|
// is. Because of this, the target for flushing jobs may have dropped
|
|
|
|
// below the number of blocks we flushed by the time we're done flushing
|
|
|
|
// that's why we need to call this fairly often. Both before and after
|
|
|
|
// a disk job is executed
|
2016-05-01 00:54:23 +02:00
|
|
|
void disk_io_thread::check_cache_level(std::unique_lock<std::mutex>& l, jobqueue_t& completed_jobs)
|
2014-07-06 21:18:00 +02:00
|
|
|
{
|
2016-03-16 07:22:18 +01:00
|
|
|
// when the read cache is disabled, always try to evict all read cache
|
|
|
|
// blocks
|
|
|
|
if (!m_settings.get_bool(settings_pack::use_read_cache))
|
|
|
|
{
|
|
|
|
int const evict = m_disk_cache.read_cache_size();
|
|
|
|
m_disk_cache.try_evict_blocks(evict);
|
|
|
|
}
|
|
|
|
|
2014-07-06 21:18:00 +02:00
|
|
|
int evict = m_disk_cache.num_to_evict(0);
|
|
|
|
if (evict > 0)
|
2009-01-03 09:11:31 +01:00
|
|
|
{
|
2014-07-06 21:18:00 +02:00
|
|
|
evict = m_disk_cache.try_evict_blocks(evict);
|
|
|
|
// don't evict write jobs if at least one other thread
|
|
|
|
// is flushing right now. Doing so could result in
|
|
|
|
// unnecessary flushing of the wrong pieces
|
2014-08-01 08:07:48 +02:00
|
|
|
if (evict > 0 && m_stats_counters[counters::num_writing_threads] == 0)
|
2009-11-28 04:14:08 +01:00
|
|
|
{
|
2014-07-06 21:18:00 +02:00
|
|
|
try_flush_write_blocks(evict, completed_jobs, l);
|
2009-05-21 18:15:05 +02:00
|
|
|
}
|
2009-01-03 09:11:31 +01:00
|
|
|
}
|
2008-02-22 05:11:04 +01:00
|
|
|
}
|
2009-02-03 08:46:24 +01:00
|
|
|
|
2015-08-19 15:22:00 +02:00
|
|
|
void disk_io_thread::perform_job(disk_io_job* j, jobqueue_t& completed_jobs)
|
2008-02-22 05:11:04 +01:00
|
|
|
{
|
2016-07-09 22:26:26 +02:00
|
|
|
TORRENT_ASSERT(j->next == nullptr);
|
2014-07-06 21:18:00 +02:00
|
|
|
TORRENT_ASSERT((j->flags & disk_io_job::in_progress) || !j->storage);
|
2008-02-22 05:11:04 +01:00
|
|
|
|
2016-03-13 04:06:01 +01:00
|
|
|
#if DEBUG_DISK_THREAD
|
|
|
|
{
|
2016-05-01 00:54:23 +02:00
|
|
|
std::unique_lock<std::mutex> l(m_cache_mutex);
|
2008-02-22 05:11:04 +01:00
|
|
|
|
2019-03-24 17:30:22 +01:00
|
|
|
DLOG("perform_job job: %s ( %s) piece: %d offset: %d outstanding: %d\n"
|
|
|
|
, job_name(j->action)
|
2016-03-13 04:06:01 +01:00
|
|
|
, (j->flags & disk_io_job::fence) ? "fence ": ""
|
2016-12-22 16:42:33 +01:00
|
|
|
, static_cast<int>(j->piece), j->d.io.offset
|
2016-03-13 04:06:01 +01:00
|
|
|
, j->storage ? j->storage->num_outstanding_jobs() : -1);
|
|
|
|
}
|
|
|
|
#endif
|
2008-02-22 05:11:04 +01:00
|
|
|
|
2016-11-13 03:45:30 +01:00
|
|
|
std::shared_ptr<storage_interface> storage = j->storage;
|
2009-05-24 02:12:53 +02:00
|
|
|
|
2018-09-03 09:54:03 +02:00
|
|
|
#ifdef TORRENT_EXPENSIVE_INVARIANT_CHECKS
|
2018-09-04 14:31:05 +02:00
|
|
|
if (j->storage)
|
2018-09-03 09:54:03 +02:00
|
|
|
{
|
2018-09-04 14:31:05 +02:00
|
|
|
std::unique_lock<std::mutex> l(m_cache_mutex);
|
|
|
|
auto const& pieces = j->storage->cached_pieces();
|
|
|
|
for (auto const& p : pieces)
|
|
|
|
TORRENT_ASSERT(p.storage == j->storage);
|
2018-09-03 09:54:03 +02:00
|
|
|
}
|
|
|
|
#endif
|
2017-05-18 14:50:22 +02:00
|
|
|
// TODO: 4 instead of doing this. pass in the settings to each storage_interface
|
2014-07-06 21:18:00 +02:00
|
|
|
// call. Each disk thread could hold its most recent understanding of the settings
|
|
|
|
// in a shared_ptr, and update it every time it wakes up from a job. That way
|
2016-05-01 00:54:23 +02:00
|
|
|
// each access to the settings won't require a std::mutex to be held.
|
2016-11-13 03:45:30 +01:00
|
|
|
if (storage && storage->m_settings == nullptr)
|
|
|
|
storage->m_settings = &m_settings;
|
2008-02-22 05:11:04 +01:00
|
|
|
|
2017-06-11 19:53:15 +02:00
|
|
|
TORRENT_ASSERT(static_cast<int>(j->action) < int(job_functions.size()));
|
2010-01-27 05:25:45 +01:00
|
|
|
|
2014-10-25 22:07:50 +02:00
|
|
|
m_stats_counters.inc_stats_counter(counters::num_running_disk_jobs, 1);
|
2008-02-22 05:11:04 +01:00
|
|
|
|
2014-07-06 21:18:00 +02:00
|
|
|
// call disk function
|
2016-10-19 07:18:05 +02:00
|
|
|
// TODO: in the future, propagate exceptions back to the handlers
|
2016-11-26 07:51:47 +01:00
|
|
|
status_t ret = status_t::no_error;
|
2016-10-19 07:18:05 +02:00
|
|
|
try
|
|
|
|
{
|
2017-06-11 19:53:15 +02:00
|
|
|
int const idx = static_cast<int>(j->action);
|
|
|
|
ret = (this->*(job_functions[static_cast<std::size_t>(idx)]))(j, completed_jobs);
|
2016-10-19 07:18:05 +02:00
|
|
|
}
|
|
|
|
catch (boost::system::system_error const& err)
|
|
|
|
{
|
2016-11-26 07:51:47 +01:00
|
|
|
ret = status_t::fatal_disk_error;
|
2016-10-19 07:18:05 +02:00
|
|
|
j->error.ec = err.code();
|
2017-06-18 00:18:19 +02:00
|
|
|
j->error.operation = operation_t::exception;
|
2016-10-19 07:18:05 +02:00
|
|
|
}
|
|
|
|
catch (std::bad_alloc const&)
|
|
|
|
{
|
2016-11-26 07:51:47 +01:00
|
|
|
ret = status_t::fatal_disk_error;
|
2016-10-19 07:18:05 +02:00
|
|
|
j->error.ec = errors::no_memory;
|
2017-06-18 00:18:19 +02:00
|
|
|
j->error.operation = operation_t::exception;
|
2016-10-19 07:18:05 +02:00
|
|
|
}
|
|
|
|
catch (std::exception const&)
|
|
|
|
{
|
2016-11-26 07:51:47 +01:00
|
|
|
ret = status_t::fatal_disk_error;
|
2016-10-19 07:18:05 +02:00
|
|
|
j->error.ec = boost::asio::error::fault;
|
2017-06-18 00:18:19 +02:00
|
|
|
j->error.operation = operation_t::exception;
|
2016-10-19 07:18:05 +02:00
|
|
|
}
|
2014-07-06 21:18:00 +02:00
|
|
|
|
2016-08-15 01:48:31 +02:00
|
|
|
// note that -2 errors are OK
|
2016-11-26 07:51:47 +01:00
|
|
|
TORRENT_ASSERT(ret != status_t::fatal_disk_error
|
2017-06-18 00:18:19 +02:00
|
|
|
|| (j->error.ec && j->error.operation != operation_t::unknown));
|
2014-10-13 01:49:51 +02:00
|
|
|
|
2014-10-25 22:07:50 +02:00
|
|
|
m_stats_counters.inc_stats_counter(counters::num_running_disk_jobs, -1);
|
2014-07-06 21:18:00 +02:00
|
|
|
|
2016-05-01 00:54:23 +02:00
|
|
|
std::unique_lock<std::mutex> l(m_cache_mutex);
|
2016-03-14 03:36:04 +01:00
|
|
|
if (m_cache_check_state == cache_check_idle)
|
|
|
|
{
|
|
|
|
m_cache_check_state = cache_check_active;
|
|
|
|
while (m_cache_check_state != cache_check_idle)
|
|
|
|
{
|
|
|
|
check_cache_level(l, completed_jobs);
|
2016-05-01 00:54:23 +02:00
|
|
|
TORRENT_ASSERT(l.owns_lock());
|
2016-03-14 03:36:04 +01:00
|
|
|
--m_cache_check_state;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
m_cache_check_state = cache_check_reinvoke;
|
|
|
|
}
|
2016-03-13 04:06:01 +01:00
|
|
|
l.unlock();
|
|
|
|
|
2014-07-06 21:18:00 +02:00
|
|
|
if (ret == retry_job)
|
2008-02-22 05:11:04 +01:00
|
|
|
{
|
2016-06-16 02:49:28 +02:00
|
|
|
job_queue& q = queue_for_job(j);
|
|
|
|
|
2016-05-01 00:54:23 +02:00
|
|
|
std::unique_lock<std::mutex> l2(m_job_mutex);
|
2014-07-06 21:18:00 +02:00
|
|
|
// to avoid busy looping here, give up
|
|
|
|
// our quanta in case there aren't any other
|
|
|
|
// jobs to run in between
|
|
|
|
|
|
|
|
// TODO: a potentially more efficient solution would be to have a special
|
|
|
|
// queue for retry jobs, that's only ever run when a job completes, in
|
2014-10-25 22:07:50 +02:00
|
|
|
// any thread. It would only work if counters::num_running_disk_jobs > 0
|
2015-08-19 01:39:01 +02:00
|
|
|
|
2014-07-06 21:18:00 +02:00
|
|
|
TORRENT_ASSERT((j->flags & disk_io_job::in_progress) || !j->storage);
|
2015-08-19 01:39:01 +02:00
|
|
|
|
2016-06-16 02:49:28 +02:00
|
|
|
bool const need_sleep = q.m_queued_jobs.empty();
|
|
|
|
q.m_queued_jobs.push_back(j);
|
2015-08-19 01:39:01 +02:00
|
|
|
l2.unlock();
|
2016-05-01 00:54:23 +02:00
|
|
|
if (need_sleep) std::this_thread::yield();
|
2014-07-06 21:18:00 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ret == defer_handler) return;
|
2008-02-22 05:11:04 +01:00
|
|
|
|
2014-07-06 21:18:00 +02:00
|
|
|
j->ret = ret;
|
2009-05-19 09:00:05 +02:00
|
|
|
|
2014-07-06 21:18:00 +02:00
|
|
|
completed_jobs.push_back(j);
|
2008-02-22 05:11:04 +01:00
|
|
|
}
|
|
|
|
|
2016-11-26 07:51:47 +01:00
|
|
|
status_t disk_io_thread::do_uncached_read(disk_io_job* j)
|
2008-02-22 05:11:04 +01:00
|
|
|
{
|
2017-10-13 01:34:24 +02:00
|
|
|
j->argument = disk_buffer_holder(*this, m_disk_cache.allocate_buffer("send buffer"), 0x4000);
|
2017-04-16 22:37:39 +02:00
|
|
|
auto& buffer = boost::get<disk_buffer_holder>(j->argument);
|
|
|
|
if (buffer.get() == nullptr)
|
2014-07-06 21:18:00 +02:00
|
|
|
{
|
|
|
|
j->error.ec = error::no_memory;
|
2017-06-18 00:18:19 +02:00
|
|
|
j->error.operation = operation_t::alloc_cache_piece;
|
2016-11-26 07:51:47 +01:00
|
|
|
return status_t::fatal_disk_error;
|
2014-07-06 21:18:00 +02:00
|
|
|
}
|
2008-02-22 05:11:04 +01:00
|
|
|
|
2016-12-10 20:15:25 +01:00
|
|
|
time_point const start_time = clock_type::now();
|
2008-02-22 05:11:04 +01:00
|
|
|
|
2017-05-26 20:49:21 +02:00
|
|
|
open_mode_t const file_flags = file_flags_for_job(j
|
2016-03-20 16:38:55 +01:00
|
|
|
, m_settings.get_bool(settings_pack::coalesce_reads));
|
2018-11-01 23:05:30 +01:00
|
|
|
iovec_t b = {buffer.get(), j->d.io.buffer_size};
|
2015-08-19 01:39:01 +02:00
|
|
|
|
2017-11-20 18:10:12 +01:00
|
|
|
int const ret = j->storage->readv(b
|
2014-07-28 03:06:00 +02:00
|
|
|
, j->piece, j->d.io.offset, file_flags, j->error);
|
2015-08-19 01:39:01 +02:00
|
|
|
|
2018-08-25 14:13:51 +02:00
|
|
|
TORRENT_ASSERT(ret >= 0 || (j->error.ec && j->error.operation != operation_t::unknown));
|
2016-12-05 02:15:49 +01:00
|
|
|
TORRENT_UNUSED(ret);
|
2014-10-13 01:49:51 +02:00
|
|
|
|
2014-07-06 21:18:00 +02:00
|
|
|
if (!j->error.ec)
|
2010-01-27 05:25:45 +01:00
|
|
|
{
|
2017-11-20 18:10:12 +01:00
|
|
|
std::int64_t const read_time = total_microseconds(clock_type::now() - start_time);
|
2014-08-01 08:07:48 +02:00
|
|
|
|
|
|
|
m_stats_counters.inc_stats_counter(counters::num_read_back);
|
|
|
|
m_stats_counters.inc_stats_counter(counters::num_blocks_read);
|
|
|
|
m_stats_counters.inc_stats_counter(counters::num_read_ops);
|
|
|
|
m_stats_counters.inc_stats_counter(counters::disk_read_time, read_time);
|
|
|
|
m_stats_counters.inc_stats_counter(counters::disk_job_time, read_time);
|
2008-02-22 05:11:04 +01:00
|
|
|
}
|
2016-11-26 07:51:47 +01:00
|
|
|
return status_t::no_error;
|
2010-01-15 17:45:42 +01:00
|
|
|
}
|
|
|
|
|
2016-11-26 07:51:47 +01:00
|
|
|
status_t disk_io_thread::do_read(disk_io_job* j, jobqueue_t& completed_jobs)
|
2010-01-15 17:45:42 +01:00
|
|
|
{
|
2017-04-07 15:15:54 +02:00
|
|
|
int const piece_size = j->storage->files().piece_size(j->piece);
|
2018-01-03 12:54:03 +01:00
|
|
|
int const blocks_in_piece = (piece_size + default_block_size - 1) / default_block_size;
|
2016-09-27 02:05:04 +02:00
|
|
|
int const iov_len = m_disk_cache.pad_job(j, blocks_in_piece
|
2014-07-06 21:18:00 +02:00
|
|
|
, m_settings.get_int(settings_pack::read_cache_line_size));
|
2010-01-31 20:14:00 +01:00
|
|
|
|
2017-01-11 06:42:10 +01:00
|
|
|
TORRENT_ALLOCA(iov, iovec_t, iov_len);
|
2011-08-07 09:27:38 +02:00
|
|
|
|
2016-05-01 00:54:23 +02:00
|
|
|
std::unique_lock<std::mutex> l(m_cache_mutex);
|
2011-08-07 09:27:38 +02:00
|
|
|
|
2017-11-20 18:10:12 +01:00
|
|
|
int const evict = m_disk_cache.num_to_evict(iov_len);
|
2014-07-06 21:18:00 +02:00
|
|
|
if (evict > 0) m_disk_cache.try_evict_blocks(evict);
|
2010-01-15 17:45:42 +01:00
|
|
|
|
2014-07-06 21:18:00 +02:00
|
|
|
cached_piece_entry* pe = m_disk_cache.find_piece(j);
|
2016-06-20 17:32:06 +02:00
|
|
|
if (pe == nullptr)
|
2009-02-03 08:46:24 +01:00
|
|
|
{
|
2016-03-15 06:55:36 +01:00
|
|
|
l.unlock();
|
|
|
|
return do_uncached_read(j);
|
2009-02-03 08:46:24 +01:00
|
|
|
}
|
2014-07-06 21:18:00 +02:00
|
|
|
TORRENT_PIECE_ASSERT(pe->outstanding_read == 1, pe);
|
2009-02-03 08:46:24 +01:00
|
|
|
|
2014-07-06 21:18:00 +02:00
|
|
|
l.unlock();
|
2009-02-03 08:46:24 +01:00
|
|
|
|
2014-07-06 21:18:00 +02:00
|
|
|
// then we'll actually allocate the buffers
|
2016-10-27 02:40:56 +02:00
|
|
|
int ret = m_disk_cache.allocate_iovec(iov);
|
2009-05-24 02:12:53 +02:00
|
|
|
|
2014-07-06 21:18:00 +02:00
|
|
|
if (ret < 0)
|
|
|
|
{
|
2016-11-26 07:51:47 +01:00
|
|
|
status_t const s = do_uncached_read(j);
|
2014-07-06 21:18:00 +02:00
|
|
|
|
2016-05-01 00:54:23 +02:00
|
|
|
std::unique_lock<std::mutex> l2(m_cache_mutex);
|
2015-08-19 01:39:01 +02:00
|
|
|
pe = m_disk_cache.find_piece(j);
|
2016-12-12 02:24:26 +01:00
|
|
|
if (pe != nullptr) maybe_issue_queued_read_jobs(pe, completed_jobs);
|
2016-11-26 07:51:47 +01:00
|
|
|
return s;
|
2008-02-22 05:11:04 +01:00
|
|
|
}
|
2009-02-03 08:46:24 +01:00
|
|
|
|
2019-03-26 12:29:04 +01:00
|
|
|
// free buffers at the end of the scope
|
|
|
|
auto iov_dealloc = aux::scope_end([&]{ m_disk_cache.free_iovec(iov); });
|
|
|
|
|
2014-07-06 21:18:00 +02:00
|
|
|
// this is the offset that's aligned to block boundaries
|
2018-11-01 23:05:30 +01:00
|
|
|
int const adjusted_offset = aux::numeric_cast<int>(j->d.io.offset & ~(default_block_size - 1));
|
2014-07-06 21:18:00 +02:00
|
|
|
|
|
|
|
// if this is the last piece, adjust the size of the
|
|
|
|
// last buffer to match up
|
2018-11-01 23:05:30 +01:00
|
|
|
iov[iov_len - 1] = iov[iov_len - 1].first(
|
|
|
|
std::min(piece_size - adjusted_offset - (iov_len - 1)
|
|
|
|
* default_block_size, default_block_size));
|
2017-04-29 06:27:55 +02:00
|
|
|
TORRENT_ASSERT(iov[iov_len - 1].size() > 0);
|
2014-07-06 21:18:00 +02:00
|
|
|
|
2016-08-15 01:48:31 +02:00
|
|
|
// at this point, all the buffers are allocated and iov is initialized
|
2014-07-06 21:18:00 +02:00
|
|
|
// and the blocks have their refcounters incremented, so no other thread
|
2016-05-01 00:54:23 +02:00
|
|
|
// can remove them. We can now release the cache std::mutex and dive into the
|
2014-07-06 21:18:00 +02:00
|
|
|
// disk operations.
|
|
|
|
|
2017-05-26 20:49:21 +02:00
|
|
|
open_mode_t const file_flags = file_flags_for_job(j
|
2016-03-20 16:38:55 +01:00
|
|
|
, m_settings.get_bool(settings_pack::coalesce_reads));
|
2016-12-10 20:15:25 +01:00
|
|
|
time_point const start_time = clock_type::now();
|
2014-02-03 02:55:26 +01:00
|
|
|
|
2016-11-13 03:45:30 +01:00
|
|
|
ret = j->storage->readv(iov
|
2016-12-05 02:15:49 +01:00
|
|
|
, j->piece, int(adjusted_offset), file_flags, j->error);
|
2009-02-03 08:46:24 +01:00
|
|
|
|
2018-08-25 14:13:51 +02:00
|
|
|
TORRENT_ASSERT(ret >= 0 || (j->error.ec && j->error.operation != operation_t::unknown));
|
2018-08-19 21:55:31 +02:00
|
|
|
|
2014-07-06 21:18:00 +02:00
|
|
|
if (!j->error.ec)
|
2013-09-04 06:09:33 +02:00
|
|
|
{
|
2016-12-05 02:15:49 +01:00
|
|
|
std::int64_t const read_time = total_microseconds(clock_type::now() - start_time);
|
2014-08-01 08:07:48 +02:00
|
|
|
|
|
|
|
m_stats_counters.inc_stats_counter(counters::num_blocks_read, iov_len);
|
|
|
|
m_stats_counters.inc_stats_counter(counters::num_read_ops);
|
|
|
|
m_stats_counters.inc_stats_counter(counters::disk_read_time, read_time);
|
|
|
|
m_stats_counters.inc_stats_counter(counters::disk_job_time, read_time);
|
2013-09-04 06:09:33 +02:00
|
|
|
}
|
2009-02-03 08:46:24 +01:00
|
|
|
|
2014-07-06 21:18:00 +02:00
|
|
|
l.lock();
|
2009-02-03 08:46:24 +01:00
|
|
|
|
2014-07-06 21:18:00 +02:00
|
|
|
if (ret < 0)
|
2009-02-03 08:46:24 +01:00
|
|
|
{
|
2015-08-19 01:39:01 +02:00
|
|
|
pe = m_disk_cache.find_piece(j);
|
2016-06-20 17:32:06 +02:00
|
|
|
if (pe == nullptr)
|
2014-07-06 21:18:00 +02:00
|
|
|
{
|
|
|
|
// the piece is supposed to be allocated when the
|
|
|
|
// disk job is allocated
|
2016-05-02 18:36:21 +02:00
|
|
|
TORRENT_ASSERT_FAIL();
|
2016-11-26 07:51:47 +01:00
|
|
|
return status_t::fatal_disk_error;
|
2014-07-06 21:18:00 +02:00
|
|
|
}
|
|
|
|
TORRENT_PIECE_ASSERT(pe->outstanding_read == 1, pe);
|
2010-01-27 05:25:45 +01:00
|
|
|
|
2018-06-24 00:11:36 +02:00
|
|
|
if (!pe->read_jobs.empty())
|
2014-07-06 21:18:00 +02:00
|
|
|
fail_jobs_impl(j->error, pe->read_jobs, completed_jobs);
|
2018-06-24 00:11:36 +02:00
|
|
|
TORRENT_PIECE_ASSERT(pe->read_jobs.empty(), pe);
|
2014-07-06 21:18:00 +02:00
|
|
|
pe->outstanding_read = 0;
|
|
|
|
#if TORRENT_USE_ASSERTS
|
2018-06-24 00:11:36 +02:00
|
|
|
pe->piece_log.emplace_back(piece_log_t::clear_outstanding_jobs);
|
2014-07-06 21:18:00 +02:00
|
|
|
#endif
|
|
|
|
m_disk_cache.maybe_free_piece(pe);
|
2016-11-26 07:51:47 +01:00
|
|
|
return status_t::fatal_disk_error;
|
2009-02-03 08:46:24 +01:00
|
|
|
}
|
|
|
|
|
2018-01-03 12:54:03 +01:00
|
|
|
int block = j->d.io.offset / default_block_size;
|
2015-08-18 13:55:50 +02:00
|
|
|
#if TORRENT_USE_ASSERTS
|
2014-07-06 21:18:00 +02:00
|
|
|
pe->piece_log.push_back(piece_log_t(j->action, block));
|
|
|
|
#endif
|
2019-03-26 12:29:04 +01:00
|
|
|
|
|
|
|
// we want to hold on to the iov now
|
|
|
|
iov_dealloc.disarm();
|
|
|
|
|
2014-07-06 21:18:00 +02:00
|
|
|
// as soon we insert the blocks they may be evicted
|
|
|
|
// (if using purgeable memory). In order to prevent that
|
|
|
|
// until we can read from them, increment the refcounts
|
2016-10-27 02:40:56 +02:00
|
|
|
m_disk_cache.insert_blocks(pe, block, iov, j, block_cache::blocks_inc_refcount);
|
2009-02-03 08:46:24 +01:00
|
|
|
|
2014-07-06 21:18:00 +02:00
|
|
|
TORRENT_ASSERT(pe->blocks[block].buf);
|
2009-02-03 08:46:24 +01:00
|
|
|
|
2017-11-20 18:10:12 +01:00
|
|
|
int const tmp = m_disk_cache.try_read(j, *this, true);
|
2015-05-19 05:13:49 +02:00
|
|
|
|
|
|
|
// This should always succeed because we just checked to see there is a
|
|
|
|
// buffer for this block
|
2014-07-06 21:18:00 +02:00
|
|
|
TORRENT_ASSERT(tmp >= 0);
|
2015-05-19 05:13:49 +02:00
|
|
|
TORRENT_UNUSED(tmp);
|
2008-02-22 05:11:04 +01:00
|
|
|
|
2014-07-06 21:18:00 +02:00
|
|
|
maybe_issue_queued_read_jobs(pe, completed_jobs);
|
|
|
|
|
|
|
|
for (int i = 0; i < iov_len; ++i, ++block)
|
|
|
|
m_disk_cache.dec_block_refcount(pe, block, block_cache::ref_reading);
|
2014-02-03 02:55:26 +01:00
|
|
|
|
2016-11-26 07:51:47 +01:00
|
|
|
return status_t::no_error;
|
2009-06-10 10:30:55 +02:00
|
|
|
}
|
|
|
|
|
2015-04-20 06:52:49 +02:00
|
|
|
void disk_io_thread::maybe_issue_queued_read_jobs(cached_piece_entry* pe
|
2015-08-19 15:22:00 +02:00
|
|
|
, jobqueue_t& completed_jobs)
|
2011-11-16 08:09:12 +01:00
|
|
|
{
|
2014-07-06 21:18:00 +02:00
|
|
|
TORRENT_PIECE_ASSERT(pe->outstanding_read == 1, pe);
|
2011-11-16 08:09:12 +01:00
|
|
|
|
2014-07-06 21:18:00 +02:00
|
|
|
// if we're shutting down, just cancel the jobs
|
2015-06-14 22:00:04 +02:00
|
|
|
if (m_abort)
|
2011-11-16 08:09:12 +01:00
|
|
|
{
|
2015-04-20 06:52:49 +02:00
|
|
|
fail_jobs_impl(storage_error(boost::asio::error::operation_aborted)
|
|
|
|
, pe->read_jobs, completed_jobs);
|
2018-06-24 00:11:36 +02:00
|
|
|
TORRENT_PIECE_ASSERT(pe->read_jobs.empty(), pe);
|
2014-07-06 21:18:00 +02:00
|
|
|
pe->outstanding_read = 0;
|
|
|
|
#if TORRENT_USE_ASSERTS
|
2018-06-24 00:11:36 +02:00
|
|
|
pe->piece_log.emplace_back(piece_log_t::clear_outstanding_jobs);
|
2014-07-06 21:18:00 +02:00
|
|
|
#endif
|
|
|
|
m_disk_cache.maybe_free_piece(pe);
|
|
|
|
return;
|
2011-11-16 08:09:12 +01:00
|
|
|
}
|
|
|
|
|
2014-07-06 21:18:00 +02:00
|
|
|
// while we were reading, there may have been a few jobs
|
|
|
|
// that got queued up also wanting to read from this piece.
|
|
|
|
// Any job that is a cache hit now, complete it immediately.
|
|
|
|
// Then, issue the first non-cache-hit job. Once it complete
|
|
|
|
// it will keep working off this list
|
2015-08-19 15:22:00 +02:00
|
|
|
jobqueue_t stalled_jobs;
|
2014-07-06 21:18:00 +02:00
|
|
|
pe->read_jobs.swap(stalled_jobs);
|
2014-02-03 02:55:26 +01:00
|
|
|
|
2014-07-06 21:18:00 +02:00
|
|
|
// the next job to issue (i.e. this is a cache-miss)
|
2016-06-20 17:32:06 +02:00
|
|
|
disk_io_job* next_job = nullptr;
|
2009-10-20 04:49:56 +02:00
|
|
|
|
2018-06-24 00:11:36 +02:00
|
|
|
while (!stalled_jobs.empty())
|
2011-11-16 08:09:12 +01:00
|
|
|
{
|
2015-08-19 15:22:00 +02:00
|
|
|
disk_io_job* j = stalled_jobs.pop_front();
|
2014-07-06 21:18:00 +02:00
|
|
|
TORRENT_ASSERT(j->flags & disk_io_job::in_progress);
|
|
|
|
|
2017-04-16 22:37:39 +02:00
|
|
|
int ret = m_disk_cache.try_read(j, *this);
|
2014-07-06 21:18:00 +02:00
|
|
|
if (ret >= 0)
|
|
|
|
{
|
|
|
|
// cache-hit
|
2014-08-01 08:07:48 +02:00
|
|
|
m_stats_counters.inc_stats_counter(counters::num_blocks_cache_hits);
|
2014-07-06 21:18:00 +02:00
|
|
|
DLOG("do_read: cache hit\n");
|
2016-11-23 07:43:57 +01:00
|
|
|
j->flags |= disk_interface::cache_hit;
|
2016-11-26 07:51:47 +01:00
|
|
|
j->ret = status_t::no_error;
|
2014-07-06 21:18:00 +02:00
|
|
|
completed_jobs.push_back(j);
|
|
|
|
}
|
|
|
|
else if (ret == -2)
|
2011-11-16 08:09:12 +01:00
|
|
|
{
|
2014-07-06 21:18:00 +02:00
|
|
|
// error
|
2016-11-26 07:51:47 +01:00
|
|
|
j->ret = status_t::fatal_disk_error;
|
2014-07-06 21:18:00 +02:00
|
|
|
completed_jobs.push_back(j);
|
2011-11-16 08:09:12 +01:00
|
|
|
}
|
2014-07-06 21:18:00 +02:00
|
|
|
else
|
2011-11-16 08:09:12 +01:00
|
|
|
{
|
2014-07-06 21:18:00 +02:00
|
|
|
// cache-miss, issue the first one
|
|
|
|
// put back the rest
|
2016-06-20 17:32:06 +02:00
|
|
|
if (next_job == nullptr)
|
2014-07-06 21:18:00 +02:00
|
|
|
{
|
|
|
|
next_job = j;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
TORRENT_PIECE_ASSERT(j->piece == pe->piece, pe);
|
|
|
|
pe->read_jobs.push_back(j);
|
|
|
|
}
|
2011-11-16 08:09:12 +01:00
|
|
|
}
|
|
|
|
}
|
2014-02-03 02:55:26 +01:00
|
|
|
|
2014-07-06 21:18:00 +02:00
|
|
|
if (next_job)
|
|
|
|
{
|
2015-06-14 22:00:04 +02:00
|
|
|
add_job(next_job, false);
|
2014-07-06 21:18:00 +02:00
|
|
|
}
|
|
|
|
else
|
2008-07-18 01:41:46 +02:00
|
|
|
{
|
2018-06-24 00:11:36 +02:00
|
|
|
TORRENT_PIECE_ASSERT(pe->read_jobs.empty(), pe);
|
2014-07-06 21:18:00 +02:00
|
|
|
pe->outstanding_read = 0;
|
|
|
|
#if TORRENT_USE_ASSERTS
|
2018-06-24 00:11:36 +02:00
|
|
|
pe->piece_log.emplace_back(piece_log_t::clear_outstanding_jobs);
|
2014-07-06 21:18:00 +02:00
|
|
|
#endif
|
|
|
|
m_disk_cache.maybe_free_piece(pe);
|
2008-07-18 01:41:46 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-11-26 07:51:47 +01:00
|
|
|
status_t disk_io_thread::do_uncached_write(disk_io_job* j)
|
2011-10-19 07:46:49 +02:00
|
|
|
{
|
2016-12-10 20:15:25 +01:00
|
|
|
time_point const start_time = clock_type::now();
|
2017-04-16 22:37:39 +02:00
|
|
|
auto buffer = std::move(boost::get<disk_buffer_holder>(j->argument));
|
2009-03-31 10:05:46 +02:00
|
|
|
|
2018-11-01 23:05:30 +01:00
|
|
|
iovec_t const b = { buffer.get(), j->d.io.buffer_size};
|
2017-05-26 20:49:21 +02:00
|
|
|
open_mode_t const file_flags = file_flags_for_job(j
|
2016-03-20 16:38:55 +01:00
|
|
|
, m_settings.get_bool(settings_pack::coalesce_writes));
|
2015-06-14 22:00:04 +02:00
|
|
|
|
2014-08-01 08:07:48 +02:00
|
|
|
m_stats_counters.inc_stats_counter(counters::num_writing_threads, 1);
|
2009-09-05 09:21:10 +02:00
|
|
|
|
2014-07-06 21:18:00 +02:00
|
|
|
// the actual write operation
|
2016-11-13 03:45:30 +01:00
|
|
|
int const ret = j->storage->writev(b
|
2014-07-28 03:06:00 +02:00
|
|
|
, j->piece, j->d.io.offset, file_flags, j->error);
|
2015-06-14 22:00:04 +02:00
|
|
|
|
2018-08-25 14:13:51 +02:00
|
|
|
TORRENT_ASSERT(ret >= 0 || (j->error.ec && j->error.operation != operation_t::unknown));
|
2018-08-19 21:55:31 +02:00
|
|
|
|
2014-08-01 08:07:48 +02:00
|
|
|
m_stats_counters.inc_stats_counter(counters::num_writing_threads, -1);
|
2009-09-05 09:21:10 +02:00
|
|
|
|
2014-07-06 21:18:00 +02:00
|
|
|
if (!j->error.ec)
|
|
|
|
{
|
2017-05-23 07:13:36 +02:00
|
|
|
std::int64_t const write_time = total_microseconds(clock_type::now() - start_time);
|
2014-08-01 08:07:48 +02:00
|
|
|
|
|
|
|
m_stats_counters.inc_stats_counter(counters::num_blocks_written);
|
|
|
|
m_stats_counters.inc_stats_counter(counters::num_write_ops);
|
|
|
|
m_stats_counters.inc_stats_counter(counters::disk_write_time, write_time);
|
|
|
|
m_stats_counters.inc_stats_counter(counters::disk_job_time, write_time);
|
2014-07-06 21:18:00 +02:00
|
|
|
}
|
2009-09-05 09:21:10 +02:00
|
|
|
|
2017-05-18 14:50:22 +02:00
|
|
|
{
|
|
|
|
std::lock_guard<std::mutex> l(m_need_tick_mutex);
|
|
|
|
if (!j->storage->set_need_tick())
|
2017-11-20 18:10:12 +01:00
|
|
|
m_need_tick.emplace_back(aux::time_now() + minutes(2), j->storage);
|
2017-05-18 14:50:22 +02:00
|
|
|
}
|
2016-11-06 07:39:41 +01:00
|
|
|
|
2016-11-26 07:51:47 +01:00
|
|
|
return ret != j->d.io.buffer_size
|
|
|
|
? status_t::fatal_disk_error : status_t::no_error;
|
2009-09-05 09:21:10 +02:00
|
|
|
}
|
|
|
|
|
2016-11-26 07:51:47 +01:00
|
|
|
status_t disk_io_thread::do_write(disk_io_job* j, jobqueue_t& completed_jobs)
|
2007-06-10 22:46:09 +02:00
|
|
|
{
|
2018-01-03 12:54:03 +01:00
|
|
|
TORRENT_ASSERT(j->d.io.buffer_size <= default_block_size);
|
2011-02-11 08:20:11 +01:00
|
|
|
|
2016-05-01 00:54:23 +02:00
|
|
|
std::unique_lock<std::mutex> l(m_cache_mutex);
|
2014-07-06 21:18:00 +02:00
|
|
|
|
2016-03-13 04:45:20 +01:00
|
|
|
cached_piece_entry* pe = m_disk_cache.find_piece(j);
|
2017-11-20 18:10:12 +01:00
|
|
|
if (pe != nullptr && pe->hashing_done)
|
2016-03-13 04:45:20 +01:00
|
|
|
{
|
2015-08-18 13:55:50 +02:00
|
|
|
#if TORRENT_USE_ASSERTS
|
2016-03-13 04:45:20 +01:00
|
|
|
print_piece_log(pe->piece_log);
|
2011-02-11 08:20:11 +01:00
|
|
|
#endif
|
2017-04-16 22:37:39 +02:00
|
|
|
TORRENT_ASSERT(pe->blocks[j->d.io.offset / 16 / 1024].buf
|
|
|
|
!= boost::get<disk_buffer_holder>(j->argument).get());
|
2016-06-20 17:32:06 +02:00
|
|
|
TORRENT_ASSERT(pe->blocks[j->d.io.offset / 16 / 1024].buf != nullptr);
|
2016-03-13 04:45:20 +01:00
|
|
|
j->error.ec = error::operation_aborted;
|
2017-06-18 00:18:19 +02:00
|
|
|
j->error.operation = operation_t::file_write;
|
2016-11-26 07:51:47 +01:00
|
|
|
return status_t::fatal_disk_error;
|
2016-03-13 04:45:20 +01:00
|
|
|
}
|
2009-09-05 09:21:10 +02:00
|
|
|
|
2019-09-13 14:17:10 +02:00
|
|
|
pe = m_disk_cache.add_dirty_block(j
|
|
|
|
, !m_settings.get_bool(settings_pack::disable_hash_checks));
|
2010-01-12 02:56:48 +01:00
|
|
|
|
2016-03-13 04:45:20 +01:00
|
|
|
if (pe)
|
|
|
|
{
|
2015-08-18 13:55:50 +02:00
|
|
|
#if TORRENT_USE_ASSERTS
|
2016-03-13 04:45:20 +01:00
|
|
|
pe->piece_log.push_back(piece_log_t(j->action, j->d.io.offset / 0x4000));
|
2007-09-17 10:15:54 +02:00
|
|
|
#endif
|
2011-10-19 07:46:49 +02:00
|
|
|
|
2016-03-13 04:45:20 +01:00
|
|
|
if (!pe->hashing_done
|
2016-07-09 22:26:26 +02:00
|
|
|
&& pe->hash == nullptr
|
2016-03-13 04:45:20 +01:00
|
|
|
&& !m_settings.get_bool(settings_pack::disable_hash_checks))
|
|
|
|
{
|
2016-10-21 13:37:15 +02:00
|
|
|
pe->hash.reset(new partial_hash);
|
2016-03-13 04:45:20 +01:00
|
|
|
m_disk_cache.update_cache_state(pe);
|
|
|
|
}
|
2011-10-19 07:46:49 +02:00
|
|
|
|
2016-03-13 04:45:20 +01:00
|
|
|
TORRENT_PIECE_ASSERT(pe->cache_state <= cached_piece_entry::read_lru1 || pe->cache_state == cached_piece_entry::read_lru2, pe);
|
|
|
|
++pe->piece_refcount;
|
2008-03-08 07:06:31 +01:00
|
|
|
|
2016-03-13 04:45:20 +01:00
|
|
|
// see if we can progress the hash cursor with this new block
|
|
|
|
kick_hasher(pe, l);
|
2011-10-19 07:46:49 +02:00
|
|
|
|
2016-03-13 04:45:20 +01:00
|
|
|
TORRENT_PIECE_ASSERT(pe->cache_state <= cached_piece_entry::read_lru1 || pe->cache_state == cached_piece_entry::read_lru2, pe);
|
2011-10-19 07:46:49 +02:00
|
|
|
|
2016-03-13 04:45:20 +01:00
|
|
|
// flushes the piece to disk in case
|
|
|
|
// it satisfies the condition for a write
|
|
|
|
// piece to be flushed
|
|
|
|
try_flush_hashed(pe, m_settings.get_int(
|
|
|
|
settings_pack::write_cache_line_size), completed_jobs, l);
|
2009-03-14 10:24:58 +01:00
|
|
|
|
2016-03-13 04:45:20 +01:00
|
|
|
--pe->piece_refcount;
|
|
|
|
m_disk_cache.maybe_free_piece(pe);
|
2014-02-03 02:55:26 +01:00
|
|
|
|
2016-03-13 04:45:20 +01:00
|
|
|
return defer_handler;
|
2014-07-06 21:18:00 +02:00
|
|
|
}
|
2007-06-10 22:46:09 +02:00
|
|
|
|
2014-07-06 21:18:00 +02:00
|
|
|
// ok, we should just perform this job right now.
|
|
|
|
return do_uncached_write(j);
|
|
|
|
}
|
|
|
|
|
2016-12-31 18:35:10 +01:00
|
|
|
void disk_io_thread::async_read(storage_index_t storage, peer_request const& r
|
2017-09-09 19:43:54 +02:00
|
|
|
, std::function<void(disk_buffer_holder block, disk_job_flags_t const flags
|
|
|
|
, storage_error const& se)> handler, disk_job_flags_t const flags)
|
2014-07-06 21:18:00 +02:00
|
|
|
{
|
2018-01-03 12:54:03 +01:00
|
|
|
TORRENT_ASSERT(r.length <= default_block_size);
|
2009-09-05 09:21:10 +02:00
|
|
|
|
2018-08-06 09:59:42 +02:00
|
|
|
DLOG("async_read piece: %d block: %d\n", static_cast<int>(r.piece)
|
2018-01-03 12:54:03 +01:00
|
|
|
, r.start / default_block_size);
|
2011-03-20 07:37:19 +01:00
|
|
|
|
2017-06-11 19:53:15 +02:00
|
|
|
disk_io_job* j = allocate_job(job_action_t::read);
|
2016-12-31 18:35:10 +01:00
|
|
|
j->storage = m_torrents[storage]->shared_from_this();
|
2014-07-06 21:18:00 +02:00
|
|
|
j->piece = r.piece;
|
|
|
|
j->d.io.offset = r.start;
|
2016-11-25 17:17:25 +01:00
|
|
|
j->d.io.buffer_size = std::uint16_t(r.length);
|
2017-10-13 01:34:24 +02:00
|
|
|
j->argument = disk_buffer_holder(*this, nullptr, 0);
|
2014-07-06 21:18:00 +02:00
|
|
|
j->flags = flags;
|
2016-06-27 07:02:00 +02:00
|
|
|
j->callback = std::move(handler);
|
2011-03-27 10:21:26 +02:00
|
|
|
|
2018-08-06 09:59:42 +02:00
|
|
|
TORRENT_ASSERT(static_cast<int>(r.piece) * static_cast<std::int64_t>(j->storage->files().piece_length())
|
|
|
|
+ r.start + r.length <= j->storage->files().total_size());
|
|
|
|
|
2016-05-01 00:54:23 +02:00
|
|
|
std::unique_lock<std::mutex> l(m_cache_mutex);
|
2017-10-22 14:03:49 +02:00
|
|
|
int const ret = prep_read_job_impl(j);
|
2014-07-06 21:18:00 +02:00
|
|
|
l.unlock();
|
2011-12-19 06:53:11 +01:00
|
|
|
|
2014-07-06 21:18:00 +02:00
|
|
|
switch (ret)
|
|
|
|
{
|
|
|
|
case 0:
|
2017-04-16 22:37:39 +02:00
|
|
|
j->call_callback();
|
2014-07-06 21:18:00 +02:00
|
|
|
free_job(j);
|
|
|
|
break;
|
|
|
|
case 1:
|
|
|
|
add_job(j);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// this function checks to see if a read job is a cache hit,
|
2016-07-31 03:53:11 +02:00
|
|
|
// and if it doesn't have a piece allocated, it allocates
|
2014-07-06 21:18:00 +02:00
|
|
|
// one and it sets outstanding_read flag and possibly queues
|
|
|
|
// up the job in the piece read job list
|
2016-05-01 00:54:23 +02:00
|
|
|
// the cache std::mutex must be held when calling this
|
2016-10-22 17:47:24 +02:00
|
|
|
//
|
2014-07-06 21:18:00 +02:00
|
|
|
// returns 0 if the job succeeded immediately
|
|
|
|
// 1 if it needs to be added to the job queue
|
|
|
|
// 2 if it was deferred and will be performed later (no need to
|
|
|
|
// add it to the queue)
|
2017-10-22 14:03:49 +02:00
|
|
|
int disk_io_thread::prep_read_job_impl(disk_io_job* j, bool const check_fence)
|
2014-07-06 21:18:00 +02:00
|
|
|
{
|
2017-06-11 19:53:15 +02:00
|
|
|
TORRENT_ASSERT(j->action == job_action_t::read);
|
2014-07-06 21:18:00 +02:00
|
|
|
|
2017-10-22 14:03:49 +02:00
|
|
|
int const ret = m_disk_cache.try_read(j, *this);
|
2016-03-13 04:45:20 +01:00
|
|
|
if (ret >= 0)
|
2014-07-06 21:18:00 +02:00
|
|
|
{
|
2016-03-13 04:45:20 +01:00
|
|
|
m_stats_counters.inc_stats_counter(counters::num_blocks_cache_hits);
|
|
|
|
DLOG("do_read: cache hit\n");
|
2016-11-23 07:43:57 +01:00
|
|
|
j->flags |= disk_interface::cache_hit;
|
2016-11-26 07:51:47 +01:00
|
|
|
j->ret = status_t::no_error;
|
2016-03-13 04:45:20 +01:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
else if (ret == -2)
|
|
|
|
{
|
|
|
|
j->error.ec = error::no_memory;
|
2017-06-18 00:18:19 +02:00
|
|
|
j->error.operation = operation_t::alloc_cache_piece;
|
2016-11-26 07:51:47 +01:00
|
|
|
j->ret = status_t::fatal_disk_error;
|
2016-03-13 04:45:20 +01:00
|
|
|
return 0;
|
|
|
|
}
|
2011-03-27 10:21:26 +02:00
|
|
|
|
2016-03-13 04:45:20 +01:00
|
|
|
if (check_fence && j->storage->is_blocked(j))
|
|
|
|
{
|
|
|
|
// this means the job was queued up inside storage
|
|
|
|
m_stats_counters.inc_stats_counter(counters::blocked_disk_jobs);
|
|
|
|
DLOG("blocked job: %s (torrent: %d total: %d)\n"
|
2019-03-24 17:30:22 +01:00
|
|
|
, job_name(j->action), j->storage ? j->storage->num_blocked() : 0
|
2016-03-13 04:45:20 +01:00
|
|
|
, int(m_stats_counters[counters::blocked_disk_jobs]));
|
|
|
|
return 2;
|
|
|
|
}
|
2011-03-20 20:17:59 +01:00
|
|
|
|
2016-03-13 04:45:20 +01:00
|
|
|
if (!m_settings.get_bool(settings_pack::use_read_cache)
|
|
|
|
|| m_settings.get_int(settings_pack::cache_size) == 0)
|
|
|
|
{
|
|
|
|
// if the read cache is disabled then we can skip going through the cache
|
|
|
|
// but only if there is no existing piece entry. Otherwise there may be a
|
|
|
|
// partial hit on one-or-more dirty buffers so we must use the cache
|
|
|
|
// to avoid reading bogus data from storage
|
2016-06-20 17:32:06 +02:00
|
|
|
if (m_disk_cache.find_piece(j) == nullptr)
|
2016-03-13 04:45:20 +01:00
|
|
|
return 1;
|
|
|
|
}
|
2016-03-12 22:16:39 +01:00
|
|
|
|
2016-03-13 04:45:20 +01:00
|
|
|
cached_piece_entry* pe = m_disk_cache.allocate_piece(j, cached_piece_entry::read_lru1);
|
2016-03-12 22:16:39 +01:00
|
|
|
|
2016-06-20 17:32:06 +02:00
|
|
|
if (pe == nullptr)
|
2016-03-13 04:45:20 +01:00
|
|
|
{
|
2016-11-26 07:51:47 +01:00
|
|
|
j->ret = status_t::fatal_disk_error;
|
2016-03-13 04:45:20 +01:00
|
|
|
j->error.ec = error::no_memory;
|
2017-06-18 00:18:19 +02:00
|
|
|
j->error.operation = operation_t::file_read;
|
2016-03-13 04:45:20 +01:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
if (pe->outstanding_read)
|
|
|
|
{
|
|
|
|
TORRENT_PIECE_ASSERT(j->piece == pe->piece, pe);
|
|
|
|
pe->read_jobs.push_back(j);
|
|
|
|
return 2;
|
|
|
|
}
|
2009-09-05 09:21:10 +02:00
|
|
|
|
2014-07-06 21:18:00 +02:00
|
|
|
#if TORRENT_USE_ASSERTS
|
2016-03-13 04:45:20 +01:00
|
|
|
pe->piece_log.push_back(piece_log_t(piece_log_t::set_outstanding_jobs));
|
2014-07-06 21:18:00 +02:00
|
|
|
#endif
|
2016-03-13 04:45:20 +01:00
|
|
|
pe->outstanding_read = 1;
|
2016-03-12 22:16:39 +01:00
|
|
|
|
2014-07-06 21:18:00 +02:00
|
|
|
return 1;
|
|
|
|
}
|
2009-09-05 09:21:10 +02:00
|
|
|
|
2016-12-31 18:35:10 +01:00
|
|
|
bool disk_io_thread::async_write(storage_index_t const storage, peer_request const& r
|
2016-12-26 08:38:05 +01:00
|
|
|
, char const* buf, std::shared_ptr<disk_observer> o
|
2016-11-21 05:58:48 +01:00
|
|
|
, std::function<void(storage_error const&)> handler
|
2017-09-09 19:43:54 +02:00
|
|
|
, disk_job_flags_t const flags)
|
2014-07-06 21:18:00 +02:00
|
|
|
{
|
2018-01-03 12:54:03 +01:00
|
|
|
TORRENT_ASSERT(r.length <= default_block_size);
|
2014-07-06 21:18:00 +02:00
|
|
|
TORRENT_ASSERT(r.length <= 16 * 1024);
|
2018-02-04 01:57:54 +01:00
|
|
|
TORRENT_ASSERT(buf != nullptr);
|
2014-07-06 21:18:00 +02:00
|
|
|
|
2016-12-26 08:38:05 +01:00
|
|
|
bool exceeded = false;
|
2017-10-13 01:34:24 +02:00
|
|
|
disk_buffer_holder buffer(*this, m_disk_cache.allocate_buffer(exceeded, o, "receive buffer"), 0x4000);
|
2017-01-29 21:37:42 +01:00
|
|
|
if (!buffer) aux::throw_ex<std::bad_alloc>();
|
2017-01-31 02:31:32 +01:00
|
|
|
std::memcpy(buffer.get(), buf, aux::numeric_cast<std::size_t>(r.length));
|
2016-12-26 08:38:05 +01:00
|
|
|
|
2017-06-11 19:53:15 +02:00
|
|
|
disk_io_job* j = allocate_job(job_action_t::write);
|
2016-12-31 18:35:10 +01:00
|
|
|
j->storage = m_torrents[storage]->shared_from_this();
|
2014-07-06 21:18:00 +02:00
|
|
|
j->piece = r.piece;
|
|
|
|
j->d.io.offset = r.start;
|
2016-11-25 17:17:25 +01:00
|
|
|
j->d.io.buffer_size = std::uint16_t(r.length);
|
2017-04-16 22:37:39 +02:00
|
|
|
j->argument = std::move(buffer);
|
2016-06-27 07:02:00 +02:00
|
|
|
j->callback = std::move(handler);
|
2014-07-06 21:18:00 +02:00
|
|
|
j->flags = flags;
|
|
|
|
|
2015-08-18 13:55:50 +02:00
|
|
|
#if TORRENT_USE_ASSERTS
|
2016-05-01 00:54:23 +02:00
|
|
|
std::unique_lock<std::mutex> l3_(m_cache_mutex);
|
2014-07-06 21:18:00 +02:00
|
|
|
cached_piece_entry* pe = m_disk_cache.find_piece(j);
|
|
|
|
if (pe)
|
|
|
|
{
|
|
|
|
// we should never add a new dirty block to a piece
|
|
|
|
// whose hash we have calculated. The piece needs
|
|
|
|
// to be cleared first, (async_clear_piece).
|
|
|
|
TORRENT_ASSERT(pe->hashing_done == 0);
|
2011-04-26 09:03:05 +02:00
|
|
|
|
2016-06-20 17:32:06 +02:00
|
|
|
TORRENT_ASSERT(pe->blocks[r.start / 0x4000].refcount == 0 || pe->blocks[r.start / 0x4000].buf == nullptr);
|
2014-07-06 21:18:00 +02:00
|
|
|
}
|
|
|
|
l3_.unlock();
|
|
|
|
#endif
|
|
|
|
|
2015-08-18 13:55:50 +02:00
|
|
|
#if TORRENT_USE_ASSERTS && defined TORRENT_EXPENSIVE_INVARIANT_CHECKS
|
2016-05-01 00:54:23 +02:00
|
|
|
std::unique_lock<std::mutex> l2_(m_cache_mutex);
|
2016-09-06 04:25:20 +02:00
|
|
|
auto range = m_disk_cache.all_pieces();
|
|
|
|
for (auto i = range.first; i != range.second; ++i)
|
2014-07-06 21:18:00 +02:00
|
|
|
{
|
|
|
|
cached_piece_entry const& p = *i;
|
2018-01-03 12:54:03 +01:00
|
|
|
int const piece_size = p.storage->files().piece_size(p.piece);
|
|
|
|
int const blocks_in_piece = (piece_size + default_block_size - 1) / default_block_size;
|
2014-07-06 21:18:00 +02:00
|
|
|
for (int k = 0; k < blocks_in_piece; ++k)
|
2017-04-16 22:37:39 +02:00
|
|
|
TORRENT_PIECE_ASSERT(p.blocks[k].buf != boost::get<disk_buffer_holder>(j->argument).get(), &p);
|
2014-07-06 21:18:00 +02:00
|
|
|
}
|
|
|
|
l2_.unlock();
|
2010-01-12 02:56:48 +01:00
|
|
|
#endif
|
2011-03-15 03:21:28 +01:00
|
|
|
|
2018-01-03 12:54:03 +01:00
|
|
|
TORRENT_ASSERT((r.start % default_block_size) == 0);
|
2011-03-20 20:17:59 +01:00
|
|
|
|
2016-12-31 18:35:10 +01:00
|
|
|
if (j->storage->is_blocked(j))
|
2016-03-13 04:45:20 +01:00
|
|
|
{
|
|
|
|
// this means the job was queued up inside storage
|
|
|
|
m_stats_counters.inc_stats_counter(counters::blocked_disk_jobs);
|
|
|
|
DLOG("blocked job: %s (torrent: %d total: %d)\n"
|
2019-03-24 17:30:22 +01:00
|
|
|
, job_name(j->action), j->storage ? j->storage->num_blocked() : 0
|
2016-03-13 04:45:20 +01:00
|
|
|
, int(m_stats_counters[counters::blocked_disk_jobs]));
|
2016-12-26 08:38:05 +01:00
|
|
|
return exceeded;
|
2016-03-13 04:45:20 +01:00
|
|
|
}
|
2009-12-16 11:49:15 +01:00
|
|
|
|
2016-05-01 00:54:23 +02:00
|
|
|
std::unique_lock<std::mutex> l(m_cache_mutex);
|
2016-03-13 04:45:20 +01:00
|
|
|
// if we succeed in adding the block to the cache, the job will
|
|
|
|
// be added along with it. we may not free j if so
|
2019-09-13 14:17:10 +02:00
|
|
|
cached_piece_entry* dpe = m_disk_cache.add_dirty_block(j
|
|
|
|
, !m_settings.get_bool(settings_pack::disable_hash_checks));
|
2010-04-24 23:53:45 +02:00
|
|
|
|
2017-11-20 18:10:12 +01:00
|
|
|
if (dpe != nullptr)
|
2016-03-13 04:45:20 +01:00
|
|
|
{
|
2016-06-16 14:24:41 +02:00
|
|
|
if (dpe->outstanding_flush == 0)
|
|
|
|
{
|
|
|
|
dpe->outstanding_flush = 1;
|
|
|
|
l.unlock();
|
2009-09-05 09:21:10 +02:00
|
|
|
|
2016-06-16 14:24:41 +02:00
|
|
|
// the block and write job were successfully inserted
|
|
|
|
// into the cache. Now, see if we should trigger a flush
|
2017-06-11 19:53:15 +02:00
|
|
|
j = allocate_job(job_action_t::flush_hashed);
|
2016-12-31 18:35:10 +01:00
|
|
|
j->storage = m_torrents[storage]->shared_from_this();
|
2016-06-16 14:24:41 +02:00
|
|
|
j->piece = r.piece;
|
|
|
|
j->flags = flags;
|
|
|
|
add_job(j);
|
|
|
|
}
|
|
|
|
|
|
|
|
// if we added the block (regardless of whether we also
|
|
|
|
// issued a flush job or not), we're done.
|
2016-12-26 08:38:05 +01:00
|
|
|
return exceeded;
|
2016-06-16 14:24:41 +02:00
|
|
|
}
|
2016-03-13 04:45:20 +01:00
|
|
|
l.unlock();
|
2009-09-05 09:21:10 +02:00
|
|
|
|
2014-07-06 21:18:00 +02:00
|
|
|
add_job(j);
|
2016-12-26 08:38:05 +01:00
|
|
|
return exceeded;
|
2014-07-06 21:18:00 +02:00
|
|
|
}
|
2009-09-05 09:21:10 +02:00
|
|
|
|
2016-12-31 18:35:10 +01:00
|
|
|
void disk_io_thread::async_hash(storage_index_t const storage
|
2018-08-14 21:26:10 +02:00
|
|
|
, piece_index_t const piece, disk_job_flags_t const flags
|
2017-06-08 00:46:49 +02:00
|
|
|
, std::function<void(piece_index_t, sha1_hash const&, storage_error const&)> handler)
|
2014-07-06 21:18:00 +02:00
|
|
|
{
|
2017-06-11 19:53:15 +02:00
|
|
|
disk_io_job* j = allocate_job(job_action_t::hash);
|
2016-12-31 18:35:10 +01:00
|
|
|
j->storage = m_torrents[storage]->shared_from_this();
|
2014-07-06 21:18:00 +02:00
|
|
|
j->piece = piece;
|
2016-06-27 07:02:00 +02:00
|
|
|
j->callback = std::move(handler);
|
2014-07-06 21:18:00 +02:00
|
|
|
j->flags = flags;
|
2009-09-05 09:21:10 +02:00
|
|
|
|
2017-11-20 18:10:12 +01:00
|
|
|
int const piece_size = j->storage->files().piece_size(piece);
|
2011-10-19 07:46:49 +02:00
|
|
|
|
2014-07-06 21:18:00 +02:00
|
|
|
// first check to see if the hashing is already done
|
2016-05-01 00:54:23 +02:00
|
|
|
std::unique_lock<std::mutex> l(m_cache_mutex);
|
2014-07-06 21:18:00 +02:00
|
|
|
cached_piece_entry* pe = m_disk_cache.find_piece(j);
|
2016-10-28 18:28:27 +02:00
|
|
|
if (pe != nullptr && !pe->hashing && pe->hash && pe->hash->offset == piece_size)
|
2014-07-06 21:18:00 +02:00
|
|
|
{
|
2018-06-24 00:11:36 +02:00
|
|
|
j->d.piece_hash = pe->hash->h.final();
|
2008-04-13 00:08:07 +02:00
|
|
|
|
2016-10-21 13:37:15 +02:00
|
|
|
pe->hash.reset();
|
2007-06-10 22:46:09 +02:00
|
|
|
|
2014-07-06 21:18:00 +02:00
|
|
|
if (pe->cache_state != cached_piece_entry::volatile_read_lru)
|
|
|
|
pe->hashing_done = 1;
|
2007-06-10 22:46:09 +02:00
|
|
|
|
2014-07-06 21:18:00 +02:00
|
|
|
#if TORRENT_USE_ASSERTS
|
|
|
|
++pe->hash_passes;
|
2008-02-14 04:48:20 +01:00
|
|
|
#endif
|
2015-06-14 22:00:04 +02:00
|
|
|
|
2014-07-06 21:18:00 +02:00
|
|
|
l.unlock();
|
2017-04-16 22:37:39 +02:00
|
|
|
j->call_callback();
|
2014-07-06 21:18:00 +02:00
|
|
|
free_job(j);
|
|
|
|
return;
|
|
|
|
}
|
2015-06-14 22:00:04 +02:00
|
|
|
l.unlock();
|
2014-07-06 21:18:00 +02:00
|
|
|
add_job(j);
|
|
|
|
}
|
2009-01-21 08:31:49 +01:00
|
|
|
|
2016-12-31 18:35:10 +01:00
|
|
|
void disk_io_thread::async_move_storage(storage_index_t const storage
|
2017-06-01 10:15:15 +02:00
|
|
|
, std::string p, move_flags_t const flags
|
2016-11-26 07:51:47 +01:00
|
|
|
, std::function<void(status_t, std::string const&, storage_error const&)> handler)
|
2014-07-06 21:18:00 +02:00
|
|
|
{
|
2017-06-11 19:53:15 +02:00
|
|
|
disk_io_job* j = allocate_job(job_action_t::move_storage);
|
2016-12-31 18:35:10 +01:00
|
|
|
j->storage = m_torrents[storage]->shared_from_this();
|
2017-04-16 22:37:39 +02:00
|
|
|
j->argument = std::move(p);
|
2016-06-27 07:02:00 +02:00
|
|
|
j->callback = std::move(handler);
|
2017-06-01 10:15:15 +02:00
|
|
|
j->move_flags = flags;
|
2012-04-06 05:02:50 +02:00
|
|
|
|
2016-12-31 18:35:10 +01:00
|
|
|
add_fence_job(j);
|
2014-07-06 21:18:00 +02:00
|
|
|
}
|
2010-02-21 09:52:26 +01:00
|
|
|
|
2016-12-31 18:35:10 +01:00
|
|
|
void disk_io_thread::async_release_files(storage_index_t const storage
|
2016-11-22 07:48:14 +01:00
|
|
|
, std::function<void()> handler)
|
2014-07-06 21:18:00 +02:00
|
|
|
{
|
2017-06-11 19:53:15 +02:00
|
|
|
disk_io_job* j = allocate_job(job_action_t::release_files);
|
2016-12-31 18:35:10 +01:00
|
|
|
j->storage = m_torrents[storage]->shared_from_this();
|
2016-06-27 07:02:00 +02:00
|
|
|
j->callback = std::move(handler);
|
2010-02-21 09:52:26 +01:00
|
|
|
|
2018-09-03 09:54:03 +02:00
|
|
|
#ifdef TORRENT_EXPENSIVE_INVARIANT_CHECKS
|
|
|
|
{
|
2018-09-04 14:31:05 +02:00
|
|
|
std::unique_lock<std::mutex> l(m_cache_mutex);
|
|
|
|
auto const& pieces = j->storage->cached_pieces();
|
|
|
|
for (auto const& p : pieces)
|
|
|
|
TORRENT_ASSERT(p.storage == j->storage);
|
2018-09-03 09:54:03 +02:00
|
|
|
}
|
|
|
|
#endif
|
2016-12-31 18:35:10 +01:00
|
|
|
add_fence_job(j);
|
2014-07-06 21:18:00 +02:00
|
|
|
}
|
|
|
|
|
2018-04-05 16:20:54 +02:00
|
|
|
void disk_io_thread::abort_hash_jobs(storage_index_t const storage)
|
|
|
|
{
|
2018-04-05 07:55:23 +02:00
|
|
|
// abort outstanding hash jobs belonging to this torrent
|
|
|
|
std::unique_lock<std::mutex> l(m_job_mutex);
|
|
|
|
|
|
|
|
std::shared_ptr<storage_interface> st
|
2018-04-05 16:20:54 +02:00
|
|
|
= m_torrents[storage]->shared_from_this();
|
2018-04-05 07:55:23 +02:00
|
|
|
// hash jobs
|
2018-04-05 16:20:54 +02:00
|
|
|
for (auto i = m_hash_io_jobs.m_queued_jobs.iterate(); i.get(); i.next())
|
|
|
|
{
|
2018-04-05 07:55:23 +02:00
|
|
|
disk_io_job *j = i.get();
|
|
|
|
if (j->storage != st) continue;
|
|
|
|
j->flags |= disk_io_job::aborted;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-12-31 18:35:10 +01:00
|
|
|
void disk_io_thread::async_delete_files(storage_index_t const storage
|
2017-07-27 22:26:12 +02:00
|
|
|
, remove_flags_t const options
|
2016-11-23 07:43:57 +01:00
|
|
|
, std::function<void(storage_error const&)> handler)
|
2014-07-06 21:18:00 +02:00
|
|
|
{
|
2018-04-05 07:55:23 +02:00
|
|
|
abort_hash_jobs(storage);
|
2017-06-11 19:53:15 +02:00
|
|
|
disk_io_job* j = allocate_job(job_action_t::delete_files);
|
2016-12-31 18:35:10 +01:00
|
|
|
j->storage = m_torrents[storage]->shared_from_this();
|
2016-06-27 07:02:00 +02:00
|
|
|
j->callback = std::move(handler);
|
2017-04-16 22:37:39 +02:00
|
|
|
j->argument = options;
|
2016-12-31 18:35:10 +01:00
|
|
|
add_fence_job(j);
|
2014-07-06 21:18:00 +02:00
|
|
|
}
|
|
|
|
|
2016-12-31 18:35:10 +01:00
|
|
|
void disk_io_thread::async_check_files(storage_index_t const storage
|
2016-02-15 00:17:32 +01:00
|
|
|
, add_torrent_params const* resume_data
|
2016-12-22 16:42:33 +01:00
|
|
|
, aux::vector<std::string, file_index_t>& links
|
2016-11-26 07:51:47 +01:00
|
|
|
, std::function<void(status_t, storage_error const&)> handler)
|
2014-07-06 21:18:00 +02:00
|
|
|
{
|
2017-11-20 18:10:12 +01:00
|
|
|
auto links_vector = new aux::vector<std::string, file_index_t>();
|
2015-08-20 01:33:20 +02:00
|
|
|
links_vector->swap(links);
|
|
|
|
|
2017-06-11 19:53:15 +02:00
|
|
|
disk_io_job* j = allocate_job(job_action_t::check_fastresume);
|
2016-12-31 18:35:10 +01:00
|
|
|
j->storage = m_torrents[storage]->shared_from_this();
|
2017-04-16 22:37:39 +02:00
|
|
|
j->argument = resume_data;
|
2015-08-20 01:33:20 +02:00
|
|
|
j->d.links = links_vector;
|
2016-06-27 07:02:00 +02:00
|
|
|
j->callback = std::move(handler);
|
2009-05-23 05:05:21 +02:00
|
|
|
|
2016-12-31 18:35:10 +01:00
|
|
|
add_fence_job(j);
|
2014-07-06 21:18:00 +02:00
|
|
|
}
|
2008-04-13 23:26:57 +02:00
|
|
|
|
2016-12-31 18:35:10 +01:00
|
|
|
void disk_io_thread::async_rename_file(storage_index_t const storage
|
2018-08-14 21:26:10 +02:00
|
|
|
, file_index_t const index, std::string name
|
2016-12-22 16:42:33 +01:00
|
|
|
, std::function<void(std::string const&, file_index_t, storage_error const&)> handler)
|
2014-07-06 21:18:00 +02:00
|
|
|
{
|
2017-06-11 19:53:15 +02:00
|
|
|
disk_io_job* j = allocate_job(job_action_t::rename_file);
|
2016-12-31 18:35:10 +01:00
|
|
|
j->storage = m_torrents[storage]->shared_from_this();
|
2016-12-22 16:42:33 +01:00
|
|
|
j->file_index = index;
|
2017-04-16 22:37:39 +02:00
|
|
|
j->argument = std::move(name);
|
2016-06-27 07:02:00 +02:00
|
|
|
j->callback = std::move(handler);
|
2016-12-31 18:35:10 +01:00
|
|
|
add_fence_job(j);
|
2014-07-06 21:18:00 +02:00
|
|
|
}
|
2010-01-15 17:45:42 +01:00
|
|
|
|
2016-12-31 18:35:10 +01:00
|
|
|
void disk_io_thread::async_stop_torrent(storage_index_t const storage
|
2016-11-23 07:43:57 +01:00
|
|
|
, std::function<void()> handler)
|
2014-07-06 21:18:00 +02:00
|
|
|
{
|
2018-04-05 07:55:23 +02:00
|
|
|
abort_hash_jobs(storage);
|
2017-06-11 19:53:15 +02:00
|
|
|
disk_io_job* j = allocate_job(job_action_t::stop_torrent);
|
2017-10-22 14:03:49 +02:00
|
|
|
j->storage = m_torrents[storage]->shared_from_this();
|
2016-06-27 07:02:00 +02:00
|
|
|
j->callback = std::move(handler);
|
2016-12-31 18:35:10 +01:00
|
|
|
add_fence_job(j);
|
2014-07-06 21:18:00 +02:00
|
|
|
}
|
2011-03-19 23:52:29 +01:00
|
|
|
|
2016-12-31 18:35:10 +01:00
|
|
|
void disk_io_thread::async_flush_piece(storage_index_t const storage
|
2016-12-22 16:42:33 +01:00
|
|
|
, piece_index_t const piece
|
2016-11-23 01:03:27 +01:00
|
|
|
, std::function<void()> handler)
|
2014-07-06 21:18:00 +02:00
|
|
|
{
|
2017-06-11 19:53:15 +02:00
|
|
|
disk_io_job* j = allocate_job(job_action_t::flush_piece);
|
2016-12-31 18:35:10 +01:00
|
|
|
j->storage = m_torrents[storage]->shared_from_this();
|
2014-07-06 21:18:00 +02:00
|
|
|
j->piece = piece;
|
2016-06-27 07:02:00 +02:00
|
|
|
j->callback = std::move(handler);
|
2008-07-18 17:31:22 +02:00
|
|
|
|
2015-06-14 22:00:04 +02:00
|
|
|
if (m_abort)
|
2014-07-06 21:18:00 +02:00
|
|
|
{
|
2015-06-06 07:22:53 +02:00
|
|
|
j->error.ec = boost::asio::error::operation_aborted;
|
2017-04-16 22:37:39 +02:00
|
|
|
j->call_callback();
|
2014-07-06 21:18:00 +02:00
|
|
|
free_job(j);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
add_job(j);
|
|
|
|
}
|
|
|
|
|
2016-12-31 18:35:10 +01:00
|
|
|
void disk_io_thread::async_set_file_priority(storage_index_t const storage
|
2017-10-29 00:44:40 +02:00
|
|
|
, aux::vector<download_priority_t, file_index_t> prios
|
2018-07-08 14:36:32 +02:00
|
|
|
, std::function<void(storage_error const&, aux::vector<download_priority_t, file_index_t>)> handler)
|
2014-07-06 21:18:00 +02:00
|
|
|
{
|
2017-06-11 19:53:15 +02:00
|
|
|
disk_io_job* j = allocate_job(job_action_t::file_priority);
|
2016-12-31 18:35:10 +01:00
|
|
|
j->storage = m_torrents[storage]->shared_from_this();
|
2017-04-16 22:37:39 +02:00
|
|
|
j->argument = std::move(prios);
|
2016-06-27 07:02:00 +02:00
|
|
|
j->callback = std::move(handler);
|
2008-02-08 11:22:05 +01:00
|
|
|
|
2016-12-31 18:35:10 +01:00
|
|
|
add_fence_job(j);
|
2014-07-06 21:18:00 +02:00
|
|
|
}
|
|
|
|
|
2016-12-31 18:35:10 +01:00
|
|
|
void disk_io_thread::async_clear_piece(storage_index_t const storage
|
2016-12-22 16:42:33 +01:00
|
|
|
, piece_index_t const index, std::function<void(piece_index_t)> handler)
|
2014-07-06 21:18:00 +02:00
|
|
|
{
|
2017-06-11 19:53:15 +02:00
|
|
|
disk_io_job* j = allocate_job(job_action_t::clear_piece);
|
2016-12-31 18:35:10 +01:00
|
|
|
j->storage = m_torrents[storage]->shared_from_this();
|
2014-07-06 21:18:00 +02:00
|
|
|
j->piece = index;
|
2016-06-27 07:02:00 +02:00
|
|
|
j->callback = std::move(handler);
|
2014-07-06 21:18:00 +02:00
|
|
|
|
|
|
|
// regular jobs are not guaranteed to be executed in-order
|
|
|
|
// since clear piece must guarantee that all write jobs that
|
|
|
|
// have been issued finish before the clear piece job completes
|
|
|
|
|
|
|
|
// TODO: this is potentially very expensive. One way to solve
|
|
|
|
// it would be to have a fence for just this one piece.
|
2016-12-31 18:35:10 +01:00
|
|
|
add_fence_job(j);
|
2014-07-06 21:18:00 +02:00
|
|
|
}
|
|
|
|
|
2016-12-31 18:35:10 +01:00
|
|
|
void disk_io_thread::clear_piece(storage_index_t const storage
|
2016-12-22 16:42:33 +01:00
|
|
|
, piece_index_t const index)
|
2014-07-06 21:18:00 +02:00
|
|
|
{
|
2016-12-31 18:35:10 +01:00
|
|
|
|
|
|
|
storage_interface* st = m_torrents[storage].get();
|
2016-05-01 00:54:23 +02:00
|
|
|
std::unique_lock<std::mutex> l(m_cache_mutex);
|
2014-07-06 21:18:00 +02:00
|
|
|
|
2016-12-31 18:35:10 +01:00
|
|
|
cached_piece_entry* pe = m_disk_cache.find_piece(st, index);
|
2016-07-09 22:26:26 +02:00
|
|
|
if (pe == nullptr) return;
|
2014-07-06 21:18:00 +02:00
|
|
|
TORRENT_PIECE_ASSERT(pe->hashing == false, pe);
|
|
|
|
pe->hashing_done = 0;
|
2016-10-21 13:37:15 +02:00
|
|
|
pe->hash.reset();
|
2014-07-06 21:18:00 +02:00
|
|
|
|
|
|
|
// evict_piece returns true if the piece was in fact
|
|
|
|
// evicted. A piece may fail to be evicted if there
|
|
|
|
// are still outstanding operations on it, which should
|
|
|
|
// never be the case when this function is used
|
|
|
|
// in fact, no jobs should really be hung on this piece
|
|
|
|
// at this point
|
2015-08-19 15:22:00 +02:00
|
|
|
jobqueue_t jobs;
|
2018-08-14 21:26:10 +02:00
|
|
|
bool const ok = m_disk_cache.evict_piece(pe, jobs, block_cache::allow_ghost);
|
2014-07-06 21:18:00 +02:00
|
|
|
TORRENT_PIECE_ASSERT(ok, pe);
|
2015-05-19 05:13:49 +02:00
|
|
|
TORRENT_UNUSED(ok);
|
2014-07-06 21:18:00 +02:00
|
|
|
fail_jobs(storage_error(boost::asio::error::operation_aborted), jobs);
|
|
|
|
}
|
|
|
|
|
2016-05-01 00:54:23 +02:00
|
|
|
void disk_io_thread::kick_hasher(cached_piece_entry* pe, std::unique_lock<std::mutex>& l)
|
2014-07-06 21:18:00 +02:00
|
|
|
{
|
|
|
|
if (!pe->hash) return;
|
|
|
|
if (pe->hashing) return;
|
|
|
|
|
2017-04-07 15:15:54 +02:00
|
|
|
int const piece_size = pe->storage->files().piece_size(pe->piece);
|
2016-10-21 13:37:15 +02:00
|
|
|
partial_hash* ph = pe->hash.get();
|
2014-07-06 21:18:00 +02:00
|
|
|
|
|
|
|
// are we already done?
|
|
|
|
if (ph->offset >= piece_size) return;
|
|
|
|
|
2018-01-03 12:54:03 +01:00
|
|
|
int const cursor = ph->offset / default_block_size;
|
2014-07-06 21:18:00 +02:00
|
|
|
int end = cursor;
|
2018-01-03 12:54:03 +01:00
|
|
|
TORRENT_PIECE_ASSERT(ph->offset % default_block_size == 0, pe);
|
2014-07-06 21:18:00 +02:00
|
|
|
|
|
|
|
for (int i = cursor; i < pe->blocks_in_piece; ++i)
|
|
|
|
{
|
|
|
|
cached_block_entry& bl = pe->blocks[i];
|
2016-07-09 22:26:26 +02:00
|
|
|
if (bl.buf == nullptr) break;
|
2014-07-06 21:18:00 +02:00
|
|
|
|
|
|
|
// if we fail to lock the block, it' no longer in the cache
|
|
|
|
if (m_disk_cache.inc_block_refcount(pe, i, block_cache::ref_hashing) == false)
|
|
|
|
break;
|
|
|
|
|
|
|
|
++end;
|
|
|
|
}
|
|
|
|
|
|
|
|
// no blocks to hash?
|
|
|
|
if (end == cursor) return;
|
|
|
|
|
|
|
|
pe->hashing = 1;
|
|
|
|
|
|
|
|
DLOG("kick_hasher: %d - %d (piece: %d offset: %d)\n"
|
|
|
|
, cursor, end, int(pe->piece), ph->offset);
|
|
|
|
|
2016-10-25 06:13:35 +02:00
|
|
|
// save a local copy of offset to avoid concurrent access
|
|
|
|
int offset = ph->offset;
|
2016-10-25 07:02:44 +02:00
|
|
|
#if TORRENT_USE_ASSERTS
|
|
|
|
int old_offset = offset;
|
|
|
|
#endif
|
2016-10-25 06:13:35 +02:00
|
|
|
|
2014-07-06 21:18:00 +02:00
|
|
|
l.unlock();
|
|
|
|
|
2016-12-10 20:15:25 +01:00
|
|
|
time_point const start_time = clock_type::now();
|
2014-07-06 21:18:00 +02:00
|
|
|
|
|
|
|
for (int i = cursor; i < end; ++i)
|
|
|
|
{
|
|
|
|
cached_block_entry& bl = pe->blocks[i];
|
2018-01-03 12:54:03 +01:00
|
|
|
int const size = std::min(default_block_size, piece_size - offset);
|
2014-07-06 21:18:00 +02:00
|
|
|
ph->h.update(bl.buf, size);
|
2016-10-25 06:13:35 +02:00
|
|
|
offset += size;
|
2014-07-06 21:18:00 +02:00
|
|
|
}
|
|
|
|
|
2016-12-10 20:15:25 +01:00
|
|
|
std::int64_t const hash_time = total_microseconds(clock_type::now() - start_time);
|
2014-07-06 21:18:00 +02:00
|
|
|
|
|
|
|
l.lock();
|
|
|
|
|
2016-10-25 07:02:44 +02:00
|
|
|
TORRENT_ASSERT(old_offset == ph->offset);
|
2016-10-25 06:13:35 +02:00
|
|
|
ph->offset = offset;
|
|
|
|
|
2014-07-06 21:18:00 +02:00
|
|
|
TORRENT_PIECE_ASSERT(pe->hashing, pe);
|
|
|
|
TORRENT_PIECE_ASSERT(pe->hash, pe);
|
|
|
|
|
2014-08-01 08:07:48 +02:00
|
|
|
m_stats_counters.inc_stats_counter(counters::num_blocks_hashed, end - cursor);
|
|
|
|
m_stats_counters.inc_stats_counter(counters::disk_hash_time, hash_time);
|
|
|
|
m_stats_counters.inc_stats_counter(counters::disk_job_time, hash_time);
|
2014-07-06 21:18:00 +02:00
|
|
|
|
|
|
|
pe->hashing = 0;
|
|
|
|
|
|
|
|
// decrement the block refcounters
|
|
|
|
for (int i = cursor; i < end; ++i)
|
|
|
|
m_disk_cache.dec_block_refcount(pe, i, block_cache::ref_hashing);
|
|
|
|
|
|
|
|
// did we complete the hash?
|
|
|
|
if (pe->hash->offset != piece_size) return;
|
|
|
|
|
|
|
|
// if there are any hash-jobs hanging off of this piece
|
|
|
|
// we should post them now
|
2015-08-19 15:22:00 +02:00
|
|
|
disk_io_job* j = pe->jobs.get_all();
|
|
|
|
jobqueue_t hash_jobs;
|
2014-07-06 21:18:00 +02:00
|
|
|
while (j)
|
|
|
|
{
|
|
|
|
TORRENT_PIECE_ASSERT((j->flags & disk_io_job::in_progress) || !j->storage, pe);
|
2015-08-19 15:22:00 +02:00
|
|
|
disk_io_job* next = j->next;
|
2016-06-20 17:32:06 +02:00
|
|
|
j->next = nullptr;
|
2014-07-06 21:18:00 +02:00
|
|
|
TORRENT_PIECE_ASSERT(j->piece == pe->piece, pe);
|
2017-06-11 19:53:15 +02:00
|
|
|
if (j->action == job_action_t::hash) hash_jobs.push_back(j);
|
2014-07-06 21:18:00 +02:00
|
|
|
else pe->jobs.push_back(j);
|
|
|
|
j = next;
|
|
|
|
}
|
2018-06-24 00:11:36 +02:00
|
|
|
if (!hash_jobs.empty())
|
2014-07-06 21:18:00 +02:00
|
|
|
{
|
2018-06-24 00:11:36 +02:00
|
|
|
sha1_hash const result = pe->hash->h.final();
|
2014-07-06 21:18:00 +02:00
|
|
|
|
2016-07-31 03:53:11 +02:00
|
|
|
for (auto i = hash_jobs.iterate(); i.get(); i.next())
|
2014-07-06 21:18:00 +02:00
|
|
|
{
|
2016-07-31 03:53:11 +02:00
|
|
|
disk_io_job* hj = i.get();
|
2017-10-22 15:24:12 +02:00
|
|
|
hj->d.piece_hash = result;
|
2016-11-26 07:51:47 +01:00
|
|
|
hj->ret = status_t::no_error;
|
2014-07-06 21:18:00 +02:00
|
|
|
}
|
|
|
|
|
2016-10-21 13:37:15 +02:00
|
|
|
pe->hash.reset();
|
2014-07-06 21:18:00 +02:00
|
|
|
if (pe->cache_state != cached_piece_entry::volatile_read_lru)
|
|
|
|
pe->hashing_done = 1;
|
|
|
|
#if TORRENT_USE_ASSERTS
|
|
|
|
++pe->hash_passes;
|
|
|
|
#endif
|
|
|
|
add_completed_jobs(hash_jobs);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-11-26 07:51:47 +01:00
|
|
|
status_t disk_io_thread::do_uncached_hash(disk_io_job* j)
|
2014-07-06 21:18:00 +02:00
|
|
|
{
|
|
|
|
// we're not using a cache. This is the simple path
|
|
|
|
// just read straight from the file
|
|
|
|
TORRENT_ASSERT(m_magic == 0x1337);
|
|
|
|
|
2017-04-07 15:15:54 +02:00
|
|
|
int const piece_size = j->storage->files().piece_size(j->piece);
|
2018-01-03 12:54:03 +01:00
|
|
|
int const blocks_in_piece = (piece_size + default_block_size - 1) / default_block_size;
|
2017-05-26 20:49:21 +02:00
|
|
|
open_mode_t const file_flags = file_flags_for_job(j
|
2016-03-20 16:38:55 +01:00
|
|
|
, m_settings.get_bool(settings_pack::coalesce_reads));
|
2014-07-06 21:18:00 +02:00
|
|
|
|
2017-05-02 06:07:50 +02:00
|
|
|
iovec_t iov = { m_disk_cache.allocate_buffer("hashing")
|
2018-01-03 12:54:03 +01:00
|
|
|
, static_cast<std::size_t>(default_block_size) };
|
2019-03-26 12:29:04 +01:00
|
|
|
|
|
|
|
// free at the end of the scope
|
|
|
|
auto iov_dealloc = aux::scope_end([&]{ m_disk_cache.free_buffer(iov.data()); });
|
|
|
|
|
2014-07-06 21:18:00 +02:00
|
|
|
hasher h;
|
|
|
|
int ret = 0;
|
|
|
|
int offset = 0;
|
|
|
|
for (int i = 0; i < blocks_in_piece; ++i)
|
|
|
|
{
|
|
|
|
DLOG("do_hash: (uncached) reading (piece: %d block: %d)\n"
|
|
|
|
, int(j->piece), i);
|
|
|
|
|
2016-12-10 20:15:25 +01:00
|
|
|
time_point const start_time = clock_type::now();
|
2014-07-06 21:18:00 +02:00
|
|
|
|
2018-11-01 23:05:30 +01:00
|
|
|
iov = iov.first(std::min(default_block_size, piece_size - offset));
|
|
|
|
ret = j->storage->readv(iov, j->piece, offset, file_flags, j->error);
|
2019-02-10 20:01:48 +01:00
|
|
|
if (ret <= 0) break;
|
2018-11-01 23:05:30 +01:00
|
|
|
iov = iov.first(ret);
|
2014-07-06 21:18:00 +02:00
|
|
|
|
|
|
|
if (!j->error.ec)
|
|
|
|
{
|
2016-12-05 02:15:49 +01:00
|
|
|
std::int64_t const read_time = total_microseconds(clock_type::now() - start_time);
|
2014-08-01 08:07:48 +02:00
|
|
|
|
|
|
|
m_stats_counters.inc_stats_counter(counters::num_blocks_read);
|
|
|
|
m_stats_counters.inc_stats_counter(counters::num_read_ops);
|
|
|
|
m_stats_counters.inc_stats_counter(counters::disk_read_time, read_time);
|
|
|
|
m_stats_counters.inc_stats_counter(counters::disk_job_time, read_time);
|
2014-07-06 21:18:00 +02:00
|
|
|
}
|
|
|
|
|
2018-01-03 12:54:03 +01:00
|
|
|
offset += default_block_size;
|
2017-04-29 06:27:55 +02:00
|
|
|
h.update(iov);
|
2014-07-06 21:18:00 +02:00
|
|
|
}
|
|
|
|
|
2018-06-24 00:11:36 +02:00
|
|
|
j->d.piece_hash = h.final();
|
2016-11-26 07:51:47 +01:00
|
|
|
return ret >= 0 ? status_t::no_error : status_t::fatal_disk_error;
|
2014-07-06 21:18:00 +02:00
|
|
|
}
|
|
|
|
|
2016-11-26 07:51:47 +01:00
|
|
|
status_t disk_io_thread::do_hash(disk_io_job* j, jobqueue_t& /* completed_jobs */ )
|
2014-07-06 21:18:00 +02:00
|
|
|
{
|
2019-09-13 14:17:10 +02:00
|
|
|
if (m_settings.get_bool(settings_pack::disable_hash_checks))
|
|
|
|
return status_t::no_error;
|
|
|
|
|
2017-04-07 15:15:54 +02:00
|
|
|
int const piece_size = j->storage->files().piece_size(j->piece);
|
2017-05-26 20:49:21 +02:00
|
|
|
open_mode_t const file_flags = file_flags_for_job(j
|
2016-03-20 16:38:55 +01:00
|
|
|
, m_settings.get_bool(settings_pack::coalesce_reads));
|
2014-07-06 21:18:00 +02:00
|
|
|
|
2016-05-01 00:54:23 +02:00
|
|
|
std::unique_lock<std::mutex> l(m_cache_mutex);
|
2014-07-06 21:18:00 +02:00
|
|
|
|
|
|
|
cached_piece_entry* pe = m_disk_cache.find_piece(j);
|
2016-10-28 18:28:27 +02:00
|
|
|
if (pe != nullptr)
|
2014-07-06 21:18:00 +02:00
|
|
|
{
|
2014-07-12 21:23:31 +02:00
|
|
|
TORRENT_ASSERT(pe->in_use);
|
2014-07-06 21:18:00 +02:00
|
|
|
#if TORRENT_USE_ASSERTS
|
|
|
|
pe->piece_log.push_back(piece_log_t(j->action));
|
|
|
|
#endif
|
2018-01-03 12:54:03 +01:00
|
|
|
m_disk_cache.cache_hit(pe, j->d.io.offset / default_block_size
|
2017-09-09 19:43:54 +02:00
|
|
|
, bool(j->flags & disk_interface::volatile_read));
|
2014-07-06 21:18:00 +02:00
|
|
|
|
|
|
|
TORRENT_PIECE_ASSERT(pe->cache_state <= cached_piece_entry::read_lru1 || pe->cache_state == cached_piece_entry::read_lru2, pe);
|
2016-10-19 07:18:05 +02:00
|
|
|
{
|
|
|
|
piece_refcount_holder h(pe);
|
|
|
|
kick_hasher(pe, l);
|
|
|
|
}
|
2014-07-06 21:18:00 +02:00
|
|
|
|
|
|
|
TORRENT_PIECE_ASSERT(pe->cache_state <= cached_piece_entry::read_lru1 || pe->cache_state == cached_piece_entry::read_lru2, pe);
|
|
|
|
|
|
|
|
// are we already done hashing?
|
2016-10-26 02:46:23 +02:00
|
|
|
if (pe->hash && !pe->hashing && pe->hash->offset == piece_size)
|
2014-07-06 21:18:00 +02:00
|
|
|
{
|
|
|
|
DLOG("do_hash: (%d) (already done)\n", int(pe->piece));
|
2018-06-24 00:11:36 +02:00
|
|
|
j->d.piece_hash = pe->hash->h.final();
|
2016-10-21 13:37:15 +02:00
|
|
|
pe->hash.reset();
|
2014-07-06 21:18:00 +02:00
|
|
|
if (pe->cache_state != cached_piece_entry::volatile_read_lru)
|
|
|
|
pe->hashing_done = 1;
|
|
|
|
#if TORRENT_USE_ASSERTS
|
|
|
|
++pe->hash_passes;
|
|
|
|
#endif
|
|
|
|
m_disk_cache.update_cache_state(pe);
|
|
|
|
m_disk_cache.maybe_free_piece(pe);
|
2016-11-26 07:51:47 +01:00
|
|
|
return status_t::no_error;
|
2014-07-06 21:18:00 +02:00
|
|
|
}
|
|
|
|
}
|
2016-03-16 07:22:18 +01:00
|
|
|
else if (m_settings.get_bool(settings_pack::use_read_cache) == false)
|
2014-07-06 21:18:00 +02:00
|
|
|
{
|
|
|
|
return do_uncached_hash(j);
|
|
|
|
}
|
|
|
|
|
2016-06-20 17:32:06 +02:00
|
|
|
if (pe == nullptr)
|
2014-07-06 21:18:00 +02:00
|
|
|
{
|
2016-12-13 16:30:36 +01:00
|
|
|
std::uint16_t const cache_state = std::uint16_t((j->flags & disk_interface::volatile_read)
|
2014-07-06 21:18:00 +02:00
|
|
|
? cached_piece_entry::volatile_read_lru
|
2016-12-13 16:30:36 +01:00
|
|
|
: cached_piece_entry::read_lru1);
|
2014-07-06 21:18:00 +02:00
|
|
|
pe = m_disk_cache.allocate_piece(j, cache_state);
|
|
|
|
}
|
2016-06-20 17:32:06 +02:00
|
|
|
if (pe == nullptr)
|
2014-07-06 21:18:00 +02:00
|
|
|
{
|
|
|
|
j->error.ec = error::no_memory;
|
2017-06-18 00:18:19 +02:00
|
|
|
j->error.operation = operation_t::alloc_cache_piece;
|
2016-11-26 07:51:47 +01:00
|
|
|
return status_t::fatal_disk_error;
|
2014-07-06 21:18:00 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if (pe->hashing)
|
|
|
|
{
|
|
|
|
TORRENT_PIECE_ASSERT(pe->hash, pe);
|
|
|
|
// another thread is hashing this piece right now
|
|
|
|
// try again in a little bit
|
|
|
|
DLOG("do_hash: retry\n");
|
|
|
|
// TODO: we should probably just hang the job on the piece and make sure the hasher gets kicked
|
|
|
|
return retry_job;
|
|
|
|
}
|
|
|
|
|
|
|
|
pe->hashing = 1;
|
|
|
|
|
2014-07-20 10:59:02 +02:00
|
|
|
TORRENT_PIECE_ASSERT(pe->cache_state <= cached_piece_entry::read_lru1
|
|
|
|
|| pe->cache_state == cached_piece_entry::read_lru2, pe);
|
2016-10-19 07:18:05 +02:00
|
|
|
|
|
|
|
piece_refcount_holder refcount_holder(pe);
|
2014-07-06 21:18:00 +02:00
|
|
|
|
2016-10-21 13:37:15 +02:00
|
|
|
if (!pe->hash)
|
2014-07-06 21:18:00 +02:00
|
|
|
{
|
|
|
|
pe->hashing_done = 0;
|
2016-10-21 13:37:15 +02:00
|
|
|
pe->hash.reset(new partial_hash);
|
2014-07-06 21:18:00 +02:00
|
|
|
}
|
2016-10-21 13:37:15 +02:00
|
|
|
partial_hash* ph = pe->hash.get();
|
2014-07-06 21:18:00 +02:00
|
|
|
|
2018-01-03 12:54:03 +01:00
|
|
|
int const blocks_in_piece = (piece_size + default_block_size - 1) / default_block_size;
|
2016-03-05 02:20:49 +01:00
|
|
|
|
2018-06-16 15:02:13 +02:00
|
|
|
// we don't care about anything to the left of ph->offset
|
|
|
|
// since those blocks have already been hashed.
|
|
|
|
// we just care about [firs_block, first_block + blocks_left]
|
2018-06-20 10:47:27 +02:00
|
|
|
int const first_block = ph->offset / default_block_size;
|
2018-06-16 15:02:13 +02:00
|
|
|
int const blocks_left = blocks_in_piece - first_block;
|
|
|
|
|
|
|
|
// ph->offset
|
|
|
|
// | first_block
|
|
|
|
// | |
|
|
|
|
// v v
|
|
|
|
// +---+---+---+---+---+---+
|
|
|
|
// | | | | | | |
|
|
|
|
// +---+---+---+---+---+---+
|
|
|
|
//
|
|
|
|
// \-----------/
|
|
|
|
// blocks_left
|
|
|
|
//
|
|
|
|
// \-----------------------/
|
|
|
|
// blocks_in_piece
|
2016-03-05 02:20:49 +01:00
|
|
|
|
2014-07-06 21:18:00 +02:00
|
|
|
// keep track of which blocks we have locked by incrementing
|
|
|
|
// their refcounts. This is used to decrement only these blocks
|
|
|
|
// later.
|
2016-10-22 20:43:40 +02:00
|
|
|
TORRENT_ALLOCA(locked_blocks, int, blocks_in_piece);
|
|
|
|
std::fill(locked_blocks.begin(), locked_blocks.end(), 0);
|
2014-07-06 21:18:00 +02:00
|
|
|
int num_locked_blocks = 0;
|
|
|
|
|
|
|
|
// increment the refcounts of all
|
|
|
|
// blocks up front, and then hash them without holding the lock
|
2018-01-03 12:54:03 +01:00
|
|
|
TORRENT_PIECE_ASSERT(ph->offset % default_block_size == 0, pe);
|
2018-06-16 15:02:13 +02:00
|
|
|
for (int i = 0; i < blocks_left; ++i)
|
2014-07-06 21:18:00 +02:00
|
|
|
{
|
|
|
|
// is the block not in the cache?
|
2018-06-20 10:47:27 +02:00
|
|
|
if (pe->blocks[first_block + i].buf == nullptr) continue;
|
2014-07-06 21:18:00 +02:00
|
|
|
|
2018-06-20 10:47:27 +02:00
|
|
|
// if we fail to lock the block, it's no longer in the cache
|
2018-06-16 15:02:13 +02:00
|
|
|
if (m_disk_cache.inc_block_refcount(pe, first_block + i, block_cache::ref_hashing) == false)
|
2014-07-06 21:18:00 +02:00
|
|
|
continue;
|
|
|
|
|
|
|
|
locked_blocks[num_locked_blocks++] = i;
|
|
|
|
}
|
|
|
|
|
2016-03-07 07:05:54 +01:00
|
|
|
// to keep the cache footprint low, try to evict a volatile piece
|
|
|
|
m_disk_cache.try_evict_one_volatile();
|
|
|
|
|
2016-10-25 06:13:35 +02:00
|
|
|
// save a local copy of offset to avoid concurrent access
|
|
|
|
int offset = ph->offset;
|
2016-10-25 07:02:44 +02:00
|
|
|
#if TORRENT_USE_ASSERTS
|
|
|
|
int old_offset = offset;
|
|
|
|
#endif
|
2016-10-25 06:13:35 +02:00
|
|
|
|
2014-07-06 21:18:00 +02:00
|
|
|
l.unlock();
|
|
|
|
|
2018-06-16 15:02:13 +02:00
|
|
|
bool slow_path = true;
|
2014-07-06 21:18:00 +02:00
|
|
|
|
2018-06-16 15:02:13 +02:00
|
|
|
if (num_locked_blocks == 0)
|
2014-07-06 21:18:00 +02:00
|
|
|
{
|
2018-06-16 15:02:13 +02:00
|
|
|
// this is the fast path where we don't have any blocks in the cache.
|
|
|
|
// We'll need to read all (remaining blocks) from disk
|
2018-06-20 10:47:27 +02:00
|
|
|
TORRENT_ALLOCA(iov, iovec_t, blocks_left);
|
|
|
|
if (m_disk_cache.allocate_iovec(iov) >= 0)
|
2014-07-06 21:18:00 +02:00
|
|
|
{
|
2019-03-26 12:29:04 +01:00
|
|
|
// free buffers at the end of the scope
|
|
|
|
auto iov_dealloc = aux::scope_end([&]{ m_disk_cache.free_iovec(iov); });
|
|
|
|
|
2018-06-16 15:02:13 +02:00
|
|
|
// if this is the last piece, adjust the size of the
|
|
|
|
// last buffer to match up
|
2018-11-01 23:05:30 +01:00
|
|
|
iov[blocks_left - 1] = iov[blocks_left - 1].first(
|
|
|
|
piece_size - (blocks_in_piece - 1) * default_block_size);
|
2018-06-20 10:47:27 +02:00
|
|
|
TORRENT_ASSERT(iov[blocks_left - 1].size() > 0);
|
|
|
|
TORRENT_ASSERT(iov[blocks_left - 1].size() <= default_block_size);
|
2014-07-06 21:18:00 +02:00
|
|
|
|
2018-06-16 15:02:13 +02:00
|
|
|
time_point const start_time = clock_type::now();
|
2018-06-20 10:47:27 +02:00
|
|
|
int const read_ret = j->storage->readv(iov
|
2018-06-16 15:02:13 +02:00
|
|
|
, j->piece, offset, file_flags, j->error);
|
2014-07-06 21:18:00 +02:00
|
|
|
|
2018-06-20 10:47:27 +02:00
|
|
|
if (read_ret == piece_size - offset)
|
2018-06-16 15:02:13 +02:00
|
|
|
{
|
2018-06-20 10:47:27 +02:00
|
|
|
std::int64_t const read_time = total_microseconds(clock_type::now() - start_time);
|
2014-07-06 21:18:00 +02:00
|
|
|
|
2018-08-02 11:13:08 +02:00
|
|
|
m_stats_counters.inc_stats_counter(counters::num_blocks_hashed, blocks_left);
|
2018-06-16 15:02:13 +02:00
|
|
|
m_stats_counters.inc_stats_counter(counters::num_read_back, blocks_left);
|
|
|
|
m_stats_counters.inc_stats_counter(counters::num_blocks_read, blocks_left);
|
|
|
|
m_stats_counters.inc_stats_counter(counters::num_read_ops);
|
|
|
|
m_stats_counters.inc_stats_counter(counters::disk_read_time, read_time);
|
|
|
|
m_stats_counters.inc_stats_counter(counters::disk_job_time, read_time);
|
2014-07-06 21:18:00 +02:00
|
|
|
|
2018-06-20 10:47:27 +02:00
|
|
|
for (auto const& v : iov)
|
2018-06-16 15:02:13 +02:00
|
|
|
{
|
2018-06-20 10:47:27 +02:00
|
|
|
offset += int(v.size());
|
|
|
|
ph->h.update(v);
|
2018-06-16 15:02:13 +02:00
|
|
|
}
|
2014-07-06 21:18:00 +02:00
|
|
|
|
2018-06-16 15:02:13 +02:00
|
|
|
slow_path = false;
|
2014-07-06 21:18:00 +02:00
|
|
|
|
2018-06-20 10:47:27 +02:00
|
|
|
TORRENT_ASSERT(offset == piece_size);
|
2014-07-06 21:18:00 +02:00
|
|
|
|
2019-03-26 12:29:04 +01:00
|
|
|
// we want to hold on to the buffers now, to insert them in the
|
|
|
|
// cache
|
|
|
|
iov_dealloc.disarm();
|
2018-06-16 15:02:13 +02:00
|
|
|
l.lock();
|
2018-06-20 10:47:27 +02:00
|
|
|
m_disk_cache.insert_blocks(pe, first_block, iov, j);
|
2018-06-16 15:02:13 +02:00
|
|
|
l.unlock();
|
|
|
|
}
|
2014-07-06 21:18:00 +02:00
|
|
|
}
|
2018-06-16 15:02:13 +02:00
|
|
|
}
|
2014-07-06 21:18:00 +02:00
|
|
|
|
2018-06-20 10:47:27 +02:00
|
|
|
status_t ret = status_t::no_error;
|
2018-06-16 15:02:13 +02:00
|
|
|
if (slow_path)
|
|
|
|
{
|
|
|
|
int next_locked_block = 0;
|
|
|
|
for (int i = 0; i < blocks_left; ++i)
|
2014-07-06 21:18:00 +02:00
|
|
|
{
|
2018-06-16 15:02:13 +02:00
|
|
|
if (next_locked_block < num_locked_blocks
|
|
|
|
&& locked_blocks[next_locked_block] == i)
|
2014-07-20 10:59:02 +02:00
|
|
|
{
|
2018-06-20 10:47:27 +02:00
|
|
|
int const len = std::min(default_block_size, piece_size - offset);
|
2018-06-16 15:02:13 +02:00
|
|
|
++next_locked_block;
|
|
|
|
TORRENT_PIECE_ASSERT(pe->blocks[first_block + i].buf, pe);
|
2018-06-20 10:47:27 +02:00
|
|
|
TORRENT_PIECE_ASSERT(offset == (first_block + i) * default_block_size, pe);
|
|
|
|
offset += len;
|
2018-11-01 23:05:30 +01:00
|
|
|
ph->h.update({pe->blocks[first_block + i].buf, len});
|
2014-07-20 10:59:02 +02:00
|
|
|
}
|
2018-06-16 15:02:13 +02:00
|
|
|
else
|
2014-07-06 21:18:00 +02:00
|
|
|
{
|
2018-06-20 10:47:27 +02:00
|
|
|
iovec_t const iov = { m_disk_cache.allocate_buffer("hashing")
|
2018-11-01 23:05:30 +01:00
|
|
|
, std::min(default_block_size, piece_size - offset)};
|
2014-08-01 08:07:48 +02:00
|
|
|
|
2018-06-20 10:47:27 +02:00
|
|
|
if (iov.data() == nullptr)
|
2018-06-16 15:02:13 +02:00
|
|
|
{
|
|
|
|
l.lock();
|
|
|
|
// decrement the refcounts of the blocks we just hashed
|
|
|
|
for (int k = 0; k < num_locked_blocks; ++k)
|
|
|
|
m_disk_cache.dec_block_refcount(pe, first_block + locked_blocks[k], block_cache::ref_hashing);
|
2014-07-06 21:18:00 +02:00
|
|
|
|
2018-06-20 10:47:27 +02:00
|
|
|
refcount_holder.release();
|
2018-06-16 15:02:13 +02:00
|
|
|
pe->hashing = false;
|
2018-06-20 10:47:27 +02:00
|
|
|
pe->hash.reset();
|
2018-06-16 15:02:13 +02:00
|
|
|
m_disk_cache.maybe_free_piece(pe);
|
2014-07-06 21:18:00 +02:00
|
|
|
|
2018-06-16 15:02:13 +02:00
|
|
|
j->error.ec = errors::no_memory;
|
2018-06-20 10:47:27 +02:00
|
|
|
j->error.operation = operation_t::alloc_cache_piece;
|
|
|
|
return status_t::fatal_disk_error;
|
2018-06-16 15:02:13 +02:00
|
|
|
}
|
2014-07-06 21:18:00 +02:00
|
|
|
|
2019-03-26 12:29:04 +01:00
|
|
|
// free buffers at the end of the scope
|
|
|
|
auto iov_dealloc = aux::scope_end([&]{ m_disk_cache.free_buffer(iov.data()); });
|
|
|
|
|
2018-06-20 10:47:27 +02:00
|
|
|
DLOG("do_hash: reading (piece: %d block: %d)\n"
|
|
|
|
, static_cast<int>(pe->piece), first_block + i);
|
2014-07-06 21:18:00 +02:00
|
|
|
|
2018-06-16 15:02:13 +02:00
|
|
|
time_point const start_time = clock_type::now();
|
2018-06-20 10:47:27 +02:00
|
|
|
TORRENT_PIECE_ASSERT(offset == (first_block + i) * default_block_size, pe);
|
|
|
|
int const read_ret = j->storage->readv(iov, j->piece
|
|
|
|
, offset, file_flags, j->error);
|
2014-07-06 21:18:00 +02:00
|
|
|
|
2018-06-20 10:47:27 +02:00
|
|
|
if (read_ret < 0)
|
2018-06-16 15:02:13 +02:00
|
|
|
{
|
2018-06-20 10:47:27 +02:00
|
|
|
ret = status_t::fatal_disk_error;
|
|
|
|
TORRENT_ASSERT(j->error.ec && j->error.operation != operation_t::unknown);
|
2018-06-16 15:02:13 +02:00
|
|
|
break;
|
|
|
|
}
|
2014-07-20 10:59:02 +02:00
|
|
|
|
2018-06-16 15:02:13 +02:00
|
|
|
// treat a short read as an error. The hash will be invalid, the
|
|
|
|
// block cannot be cached and the main thread should skip the rest
|
|
|
|
// of this file
|
2018-06-20 10:47:27 +02:00
|
|
|
if (read_ret != int(iov.size()))
|
2018-06-16 15:02:13 +02:00
|
|
|
{
|
2018-06-20 10:47:27 +02:00
|
|
|
ret = status_t::fatal_disk_error;
|
|
|
|
j->error.ec = boost::asio::error::eof;
|
|
|
|
j->error.operation = operation_t::file_read;
|
2018-06-16 15:02:13 +02:00
|
|
|
break;
|
|
|
|
}
|
2014-08-01 08:07:48 +02:00
|
|
|
|
2018-06-16 15:02:13 +02:00
|
|
|
if (!j->error.ec)
|
|
|
|
{
|
2018-06-20 10:47:27 +02:00
|
|
|
std::int64_t const read_time = total_microseconds(clock_type::now() - start_time);
|
2014-07-06 21:18:00 +02:00
|
|
|
|
2018-06-16 15:02:13 +02:00
|
|
|
m_stats_counters.inc_stats_counter(counters::num_read_back);
|
|
|
|
m_stats_counters.inc_stats_counter(counters::num_blocks_read);
|
|
|
|
m_stats_counters.inc_stats_counter(counters::num_read_ops);
|
|
|
|
m_stats_counters.inc_stats_counter(counters::disk_read_time, read_time);
|
|
|
|
m_stats_counters.inc_stats_counter(counters::disk_job_time, read_time);
|
|
|
|
}
|
|
|
|
|
2018-06-20 10:47:27 +02:00
|
|
|
TORRENT_PIECE_ASSERT(offset == (first_block + i) * default_block_size, pe);
|
|
|
|
offset += int(iov.size());
|
|
|
|
ph->h.update(iov);
|
2014-07-06 21:18:00 +02:00
|
|
|
|
2019-03-26 12:29:04 +01:00
|
|
|
iov_dealloc.disarm();
|
2018-06-16 15:02:13 +02:00
|
|
|
l.lock();
|
2018-06-20 10:47:27 +02:00
|
|
|
m_disk_cache.insert_blocks(pe, first_block + i, iov, j);
|
2018-06-16 15:02:13 +02:00
|
|
|
l.unlock();
|
|
|
|
}
|
2014-07-06 21:18:00 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
l.lock();
|
|
|
|
|
2016-10-25 07:02:44 +02:00
|
|
|
TORRENT_ASSERT(old_offset == ph->offset);
|
2016-10-25 06:13:35 +02:00
|
|
|
ph->offset = offset;
|
|
|
|
|
2014-07-06 21:18:00 +02:00
|
|
|
// decrement the refcounts of the blocks we just hashed
|
|
|
|
for (int i = 0; i < num_locked_blocks; ++i)
|
2018-06-16 15:02:13 +02:00
|
|
|
m_disk_cache.dec_block_refcount(pe, first_block + locked_blocks[i], block_cache::ref_hashing);
|
2014-07-06 21:18:00 +02:00
|
|
|
|
2016-10-19 07:18:05 +02:00
|
|
|
refcount_holder.release();
|
2014-07-06 21:18:00 +02:00
|
|
|
|
|
|
|
pe->hashing = 0;
|
|
|
|
|
2016-11-26 07:51:47 +01:00
|
|
|
if (ret == status_t::no_error)
|
2014-07-06 21:18:00 +02:00
|
|
|
{
|
2017-11-20 18:10:12 +01:00
|
|
|
j->d.piece_hash = ph->h.final();
|
2014-07-06 21:18:00 +02:00
|
|
|
|
2016-10-21 13:37:15 +02:00
|
|
|
pe->hash.reset();
|
2014-07-06 21:18:00 +02:00
|
|
|
if (pe->cache_state != cached_piece_entry::volatile_read_lru)
|
|
|
|
pe->hashing_done = 1;
|
|
|
|
#if TORRENT_USE_ASSERTS
|
|
|
|
++pe->hash_passes;
|
|
|
|
#endif
|
|
|
|
m_disk_cache.update_cache_state(pe);
|
|
|
|
}
|
|
|
|
|
|
|
|
m_disk_cache.maybe_free_piece(pe);
|
|
|
|
|
2017-06-18 00:18:19 +02:00
|
|
|
TORRENT_ASSERT(ret == status_t::no_error || (j->error.ec && j->error.operation != operation_t::unknown));
|
2014-10-13 01:49:51 +02:00
|
|
|
|
2016-11-26 07:51:47 +01:00
|
|
|
return ret;
|
2014-07-06 21:18:00 +02:00
|
|
|
}
|
|
|
|
|
2016-11-26 07:51:47 +01:00
|
|
|
status_t disk_io_thread::do_move_storage(disk_io_job* j, jobqueue_t& /* completed_jobs */ )
|
2014-07-06 21:18:00 +02:00
|
|
|
{
|
|
|
|
// if this assert fails, something's wrong with the fence logic
|
|
|
|
TORRENT_ASSERT(j->storage->num_outstanding_jobs() == 1);
|
|
|
|
|
|
|
|
// if files have to be closed, that's the storage's responsibility
|
2017-04-16 22:37:39 +02:00
|
|
|
return j->storage->move_storage(boost::get<std::string>(j->argument)
|
2017-06-01 10:15:15 +02:00
|
|
|
, j->move_flags, j->error);
|
2014-07-06 21:18:00 +02:00
|
|
|
}
|
|
|
|
|
2016-11-26 07:51:47 +01:00
|
|
|
status_t disk_io_thread::do_release_files(disk_io_job* j, jobqueue_t& completed_jobs)
|
2014-07-06 21:18:00 +02:00
|
|
|
{
|
|
|
|
// if this assert fails, something's wrong with the fence logic
|
|
|
|
TORRENT_ASSERT(j->storage->num_outstanding_jobs() == 1);
|
|
|
|
|
2016-05-01 00:54:23 +02:00
|
|
|
std::unique_lock<std::mutex> l(m_cache_mutex);
|
2014-07-06 21:18:00 +02:00
|
|
|
flush_cache(j->storage.get(), flush_write_cache, completed_jobs, l);
|
|
|
|
l.unlock();
|
|
|
|
|
2016-11-13 03:45:30 +01:00
|
|
|
j->storage->release_files(j->error);
|
2016-11-26 07:51:47 +01:00
|
|
|
return j->error ? status_t::fatal_disk_error : status_t::no_error;
|
2014-07-06 21:18:00 +02:00
|
|
|
}
|
|
|
|
|
2016-11-26 07:51:47 +01:00
|
|
|
status_t disk_io_thread::do_delete_files(disk_io_job* j, jobqueue_t& completed_jobs)
|
2014-07-06 21:18:00 +02:00
|
|
|
{
|
2017-07-27 22:26:12 +02:00
|
|
|
TORRENT_ASSERT(boost::get<remove_flags_t>(j->argument));
|
2014-07-06 21:18:00 +02:00
|
|
|
|
|
|
|
// if this assert fails, something's wrong with the fence logic
|
|
|
|
TORRENT_ASSERT(j->storage->num_outstanding_jobs() == 1);
|
|
|
|
|
2016-05-01 00:54:23 +02:00
|
|
|
std::unique_lock<std::mutex> l(m_cache_mutex);
|
2015-08-20 01:33:20 +02:00
|
|
|
|
2017-04-15 00:51:11 +02:00
|
|
|
flush_cache(j->storage.get()
|
|
|
|
, flush_read_cache | flush_delete_cache | flush_expect_clear
|
2015-04-20 06:52:49 +02:00
|
|
|
, completed_jobs, l);
|
2014-07-06 21:18:00 +02:00
|
|
|
l.unlock();
|
|
|
|
|
2017-07-27 22:26:12 +02:00
|
|
|
j->storage->delete_files(boost::get<remove_flags_t>(j->argument), j->error);
|
2016-11-26 07:51:47 +01:00
|
|
|
return j->error ? status_t::fatal_disk_error : status_t::no_error;
|
2014-07-06 21:18:00 +02:00
|
|
|
}
|
|
|
|
|
2016-11-26 07:51:47 +01:00
|
|
|
status_t disk_io_thread::do_check_fastresume(disk_io_job* j, jobqueue_t& /* completed_jobs */ )
|
2014-07-06 21:18:00 +02:00
|
|
|
{
|
|
|
|
// if this assert fails, something's wrong with the fence logic
|
|
|
|
TORRENT_ASSERT(j->storage->num_outstanding_jobs() == 1);
|
|
|
|
|
2017-04-16 22:37:39 +02:00
|
|
|
add_torrent_params const* rd = boost::get<add_torrent_params const*>(j->argument);
|
2016-02-15 00:17:32 +01:00
|
|
|
add_torrent_params tmp;
|
2016-06-20 17:32:06 +02:00
|
|
|
if (rd == nullptr) rd = &tmp;
|
2014-07-06 21:18:00 +02:00
|
|
|
|
2016-12-22 16:42:33 +01:00
|
|
|
std::unique_ptr<aux::vector<std::string, file_index_t>> links(j->d.links);
|
2016-11-12 04:01:18 +01:00
|
|
|
// check if the fastresume data is up to date
|
|
|
|
// if it is, use it and return true. If it
|
|
|
|
// isn't return false and the full check
|
|
|
|
// will be run. If the links pointer is non-empty, it has the same number
|
|
|
|
// of elements as there are files. Each element is either empty or contains
|
|
|
|
// the absolute path to a file identical to the corresponding file in this
|
|
|
|
// torrent. The storage must create hard links (or copy) those files. If
|
|
|
|
// any file does not exist or is inaccessible, the disk job must fail.
|
|
|
|
|
2017-04-07 15:15:54 +02:00
|
|
|
TORRENT_ASSERT(j->storage->files().piece_length() > 0);
|
2016-11-12 04:01:18 +01:00
|
|
|
|
2019-02-14 14:12:50 +01:00
|
|
|
bool const verify_success = j->storage->verify_resume_data(*rd
|
|
|
|
, links ? *links : aux::vector<std::string, file_index_t>(), j->error);
|
|
|
|
|
2016-11-12 04:01:18 +01:00
|
|
|
// if we don't have any resume data, return
|
|
|
|
// or if error is set and return value is 'no_error' or 'need_full_check'
|
|
|
|
// the error message indicates that the fast resume data was rejected
|
|
|
|
// if 'fatal_disk_error' is returned, the error message indicates what
|
|
|
|
// when wrong in the disk access
|
2019-02-14 14:12:50 +01:00
|
|
|
if ((rd->have_pieces.empty() || !verify_success)
|
2016-11-12 04:01:18 +01:00
|
|
|
&& !m_settings.get_bool(settings_pack::no_recheck_incomplete_resume))
|
|
|
|
{
|
|
|
|
// j->error may have been set at this point, by verify_resume_data()
|
|
|
|
// it's important to not have it cleared out subsequent calls, as long
|
|
|
|
// as they succeed.
|
2018-04-24 16:51:39 +02:00
|
|
|
storage_error ignore;
|
|
|
|
if (j->storage->has_any_file(ignore))
|
2016-11-12 04:01:18 +01:00
|
|
|
{
|
|
|
|
// always initialize the storage
|
2018-04-24 16:51:39 +02:00
|
|
|
storage_error se;
|
|
|
|
j->storage->initialize(se);
|
2016-11-12 04:01:18 +01:00
|
|
|
if (se)
|
|
|
|
{
|
|
|
|
j->error = se;
|
2016-11-26 07:51:47 +01:00
|
|
|
return status_t::fatal_disk_error;
|
2016-11-12 04:01:18 +01:00
|
|
|
}
|
2016-11-26 07:51:47 +01:00
|
|
|
return status_t::need_full_check;
|
2016-11-12 04:01:18 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-04-24 16:51:39 +02:00
|
|
|
storage_error se;
|
2016-11-13 03:45:30 +01:00
|
|
|
j->storage->initialize(se);
|
2016-11-12 04:01:18 +01:00
|
|
|
if (se)
|
|
|
|
{
|
|
|
|
j->error = se;
|
2016-11-26 07:51:47 +01:00
|
|
|
return status_t::fatal_disk_error;
|
2016-11-12 04:01:18 +01:00
|
|
|
}
|
2016-11-26 07:51:47 +01:00
|
|
|
return status_t::no_error;
|
2014-07-06 21:18:00 +02:00
|
|
|
}
|
|
|
|
|
2016-11-26 07:51:47 +01:00
|
|
|
status_t disk_io_thread::do_rename_file(disk_io_job* j, jobqueue_t& /* completed_jobs */ )
|
2014-07-06 21:18:00 +02:00
|
|
|
{
|
|
|
|
// if this assert fails, something's wrong with the fence logic
|
|
|
|
TORRENT_ASSERT(j->storage->num_outstanding_jobs() == 1);
|
|
|
|
|
|
|
|
// if files need to be closed, that's the storage's responsibility
|
2017-04-16 22:37:39 +02:00
|
|
|
j->storage->rename_file(j->file_index, boost::get<std::string>(j->argument)
|
2015-05-16 22:41:37 +02:00
|
|
|
, j->error);
|
2016-11-26 07:51:47 +01:00
|
|
|
return j->error ? status_t::fatal_disk_error : status_t::no_error;
|
2014-07-06 21:18:00 +02:00
|
|
|
}
|
|
|
|
|
2016-11-26 07:51:47 +01:00
|
|
|
status_t disk_io_thread::do_stop_torrent(disk_io_job* j, jobqueue_t& completed_jobs)
|
2014-07-06 21:18:00 +02:00
|
|
|
{
|
|
|
|
// if this assert fails, something's wrong with the fence logic
|
|
|
|
TORRENT_ASSERT(j->storage->num_outstanding_jobs() == 1);
|
|
|
|
|
|
|
|
// issue write commands for all dirty blocks
|
|
|
|
// and clear all read jobs
|
2016-05-01 00:54:23 +02:00
|
|
|
std::unique_lock<std::mutex> l(m_cache_mutex);
|
2015-04-20 06:52:49 +02:00
|
|
|
flush_cache(j->storage.get(), flush_read_cache | flush_write_cache
|
|
|
|
, completed_jobs, l);
|
2014-07-06 21:18:00 +02:00
|
|
|
l.unlock();
|
|
|
|
|
2016-11-13 03:45:30 +01:00
|
|
|
j->storage->release_files(j->error);
|
2016-11-26 07:51:47 +01:00
|
|
|
return j->error ? status_t::fatal_disk_error : status_t::no_error;
|
2014-07-06 21:18:00 +02:00
|
|
|
}
|
|
|
|
|
2015-04-21 02:23:00 +02:00
|
|
|
namespace {
|
|
|
|
|
2018-01-03 12:54:03 +01:00
|
|
|
void get_cache_info_impl(cached_piece_info& info, cached_piece_entry const* i)
|
2014-07-06 21:18:00 +02:00
|
|
|
{
|
|
|
|
info.piece = i->piece;
|
|
|
|
info.storage = i->storage.get();
|
|
|
|
info.last_use = i->expire;
|
|
|
|
info.need_readback = i->need_readback;
|
2018-01-03 12:54:03 +01:00
|
|
|
info.next_to_hash = i->hash == nullptr ? -1 : (i->hash->offset + default_block_size - 1) / default_block_size;
|
2014-07-06 21:18:00 +02:00
|
|
|
info.kind = i->cache_state == cached_piece_entry::write_lru
|
|
|
|
? cached_piece_info::write_cache
|
|
|
|
: i->cache_state == cached_piece_entry::volatile_read_lru
|
|
|
|
? cached_piece_info::volatile_read_cache
|
|
|
|
: cached_piece_info::read_cache;
|
2018-08-14 21:26:10 +02:00
|
|
|
int const blocks_in_piece = i->blocks_in_piece;
|
2017-01-31 02:31:32 +01:00
|
|
|
info.blocks.resize(aux::numeric_cast<std::size_t>(blocks_in_piece));
|
2014-07-06 21:18:00 +02:00
|
|
|
for (int b = 0; b < blocks_in_piece; ++b)
|
2017-01-31 02:31:32 +01:00
|
|
|
info.blocks[std::size_t(b)] = i->blocks[b].buf != nullptr;
|
2014-07-06 21:18:00 +02:00
|
|
|
}
|
|
|
|
|
2015-04-21 02:23:00 +02:00
|
|
|
} // anonymous namespace
|
|
|
|
|
2014-07-06 21:18:00 +02:00
|
|
|
void disk_io_thread::update_stats_counters(counters& c) const
|
|
|
|
{
|
|
|
|
// These are atomic_counts, so it's safe to access them from
|
|
|
|
// a different thread
|
2016-05-01 00:54:23 +02:00
|
|
|
std::unique_lock<std::mutex> jl(m_job_mutex);
|
2014-07-06 21:18:00 +02:00
|
|
|
|
|
|
|
c.set_value(counters::num_read_jobs, read_jobs_in_use());
|
|
|
|
c.set_value(counters::num_write_jobs, write_jobs_in_use());
|
|
|
|
c.set_value(counters::num_jobs, jobs_in_use());
|
2016-06-16 02:49:28 +02:00
|
|
|
c.set_value(counters::queued_disk_jobs, m_generic_io_jobs.m_queued_jobs.size()
|
|
|
|
+ m_hash_io_jobs.m_queued_jobs.size());
|
2014-07-06 21:18:00 +02:00
|
|
|
|
|
|
|
jl.unlock();
|
|
|
|
|
2016-05-01 00:54:23 +02:00
|
|
|
std::unique_lock<std::mutex> l(m_cache_mutex);
|
2014-07-06 21:18:00 +02:00
|
|
|
|
|
|
|
// gauges
|
|
|
|
c.set_value(counters::disk_blocks_in_use, m_disk_cache.in_use());
|
|
|
|
|
|
|
|
m_disk_cache.update_stats_counters(c);
|
|
|
|
}
|
|
|
|
|
2018-06-24 00:11:36 +02:00
|
|
|
void disk_io_thread::get_cache_info(cache_status* ret, storage_index_t const st
|
2016-12-31 18:35:10 +01:00
|
|
|
, bool const no_pieces, bool const session) const
|
2014-07-06 21:18:00 +02:00
|
|
|
{
|
2016-05-01 00:54:23 +02:00
|
|
|
std::unique_lock<std::mutex> l(m_cache_mutex);
|
2014-07-06 21:18:00 +02:00
|
|
|
|
2018-04-26 09:01:14 +02:00
|
|
|
#if TORRENT_ABI_VERSION == 1
|
2014-10-25 22:07:50 +02:00
|
|
|
ret->total_used_buffers = m_disk_cache.in_use();
|
|
|
|
|
2016-12-05 02:15:49 +01:00
|
|
|
ret->blocks_read_hit = int(m_stats_counters[counters::num_blocks_cache_hits]);
|
|
|
|
ret->blocks_read = int(m_stats_counters[counters::num_blocks_read]);
|
|
|
|
ret->blocks_written = int(m_stats_counters[counters::num_blocks_written]);
|
|
|
|
ret->writes = int(m_stats_counters[counters::num_write_ops]);
|
|
|
|
ret->reads = int(m_stats_counters[counters::num_read_ops]);
|
|
|
|
|
2017-11-20 18:10:12 +01:00
|
|
|
int num_read_jobs = int(std::max(std::int64_t(1)
|
2016-12-05 02:15:49 +01:00
|
|
|
, m_stats_counters[counters::num_read_ops]));
|
2017-11-20 18:10:12 +01:00
|
|
|
int num_write_jobs = int(std::max(std::int64_t(1)
|
2016-12-05 02:15:49 +01:00
|
|
|
, m_stats_counters[counters::num_write_ops]));
|
2017-11-20 18:10:12 +01:00
|
|
|
int num_hash_jobs = int(std::max(std::int64_t(1)
|
2016-12-05 02:15:49 +01:00
|
|
|
, m_stats_counters[counters::num_blocks_hashed]));
|
|
|
|
|
|
|
|
ret->average_read_time = int(m_stats_counters[counters::disk_read_time] / num_read_jobs);
|
|
|
|
ret->average_write_time = int(m_stats_counters[counters::disk_write_time] / num_write_jobs);
|
|
|
|
ret->average_hash_time = int(m_stats_counters[counters::disk_hash_time] / num_hash_jobs);
|
|
|
|
ret->average_job_time = int(m_stats_counters[counters::disk_job_time]
|
|
|
|
/ (num_read_jobs + num_write_jobs + num_hash_jobs));
|
|
|
|
ret->cumulative_job_time = int(m_stats_counters[counters::disk_job_time]);
|
|
|
|
ret->cumulative_read_time = int(m_stats_counters[counters::disk_read_time]);
|
|
|
|
ret->cumulative_write_time = int(m_stats_counters[counters::disk_write_time]);
|
|
|
|
ret->cumulative_hash_time = int(m_stats_counters[counters::disk_hash_time]);
|
|
|
|
ret->total_read_back = int(m_stats_counters[counters::num_read_back]);
|
|
|
|
|
|
|
|
ret->blocked_jobs = int(m_stats_counters[counters::blocked_disk_jobs]);
|
2014-07-06 21:18:00 +02:00
|
|
|
|
|
|
|
ret->num_jobs = jobs_in_use();
|
|
|
|
ret->num_read_jobs = read_jobs_in_use();
|
2014-10-25 23:28:57 +02:00
|
|
|
ret->read_queue_size = read_jobs_in_use();
|
2014-07-06 21:18:00 +02:00
|
|
|
ret->num_write_jobs = write_jobs_in_use();
|
2016-12-05 02:15:49 +01:00
|
|
|
ret->pending_jobs = int(m_stats_counters[counters::num_running_disk_jobs]);
|
|
|
|
ret->num_writing_threads = int(m_stats_counters[counters::num_writing_threads]);
|
2014-07-06 21:18:00 +02:00
|
|
|
|
2017-06-11 19:53:15 +02:00
|
|
|
for (int i = 0; i < static_cast<int>(job_action_t::num_job_ids); ++i)
|
2016-12-05 02:15:49 +01:00
|
|
|
ret->num_fence_jobs[i] = int(m_stats_counters[counters::num_fenced_read + i]);
|
2014-10-25 23:28:57 +02:00
|
|
|
|
2014-07-06 21:18:00 +02:00
|
|
|
m_disk_cache.get_stats(ret);
|
|
|
|
|
2014-10-25 22:07:50 +02:00
|
|
|
#endif
|
2014-07-06 21:18:00 +02:00
|
|
|
|
2014-10-25 22:07:50 +02:00
|
|
|
ret->pieces.clear();
|
2014-07-06 21:18:00 +02:00
|
|
|
|
2014-10-25 22:07:50 +02:00
|
|
|
if (no_pieces == false)
|
2014-07-06 21:18:00 +02:00
|
|
|
{
|
2016-12-31 18:35:10 +01:00
|
|
|
if (!session)
|
2014-07-06 21:18:00 +02:00
|
|
|
{
|
2017-01-02 06:01:43 +01:00
|
|
|
std::shared_ptr<storage_interface> storage = m_torrents[st];
|
2016-12-31 18:35:10 +01:00
|
|
|
TORRENT_ASSERT(storage);
|
2017-01-31 02:31:32 +01:00
|
|
|
ret->pieces.reserve(aux::numeric_cast<std::size_t>(storage->num_pieces()));
|
2016-03-06 22:31:18 +01:00
|
|
|
|
2017-12-25 09:17:53 +01:00
|
|
|
for (auto const& pe : storage->cached_pieces())
|
2014-10-25 22:07:50 +02:00
|
|
|
{
|
2017-12-25 09:17:53 +01:00
|
|
|
TORRENT_ASSERT(pe.storage.get() == storage.get());
|
2016-03-06 22:31:18 +01:00
|
|
|
|
2017-12-25 09:17:53 +01:00
|
|
|
if (pe.cache_state == cached_piece_entry::read_lru2_ghost
|
|
|
|
|| pe.cache_state == cached_piece_entry::read_lru1_ghost)
|
2014-10-25 22:07:50 +02:00
|
|
|
continue;
|
2017-11-20 18:10:12 +01:00
|
|
|
ret->pieces.emplace_back();
|
2018-01-03 12:54:03 +01:00
|
|
|
get_cache_info_impl(ret->pieces.back(), &pe);
|
2014-10-25 22:07:50 +02:00
|
|
|
}
|
2014-07-06 21:18:00 +02:00
|
|
|
}
|
2014-10-25 22:07:50 +02:00
|
|
|
else
|
2014-07-06 21:18:00 +02:00
|
|
|
{
|
2017-01-31 02:31:32 +01:00
|
|
|
ret->pieces.reserve(aux::numeric_cast<std::size_t>(m_disk_cache.num_pieces()));
|
2016-03-06 22:31:18 +01:00
|
|
|
|
2016-09-06 04:25:20 +02:00
|
|
|
auto range = m_disk_cache.all_pieces();
|
|
|
|
for (auto i = range.first; i != range.second; ++i)
|
2014-10-25 22:07:50 +02:00
|
|
|
{
|
|
|
|
if (i->cache_state == cached_piece_entry::read_lru2_ghost
|
|
|
|
|| i->cache_state == cached_piece_entry::read_lru1_ghost)
|
|
|
|
continue;
|
2017-11-20 18:10:12 +01:00
|
|
|
ret->pieces.emplace_back();
|
2018-01-03 12:54:03 +01:00
|
|
|
get_cache_info_impl(ret->pieces.back(), &*i);
|
2014-10-25 22:07:50 +02:00
|
|
|
}
|
2014-07-06 21:18:00 +02:00
|
|
|
}
|
|
|
|
}
|
2014-10-25 22:07:50 +02:00
|
|
|
|
|
|
|
l.unlock();
|
|
|
|
|
2018-04-26 09:01:14 +02:00
|
|
|
#if TORRENT_ABI_VERSION == 1
|
2016-05-01 00:54:23 +02:00
|
|
|
std::unique_lock<std::mutex> jl(m_job_mutex);
|
2016-06-16 02:49:28 +02:00
|
|
|
ret->queued_jobs = m_generic_io_jobs.m_queued_jobs.size() + m_hash_io_jobs.m_queued_jobs.size();
|
2014-10-25 22:07:50 +02:00
|
|
|
jl.unlock();
|
|
|
|
#endif
|
2014-07-06 21:18:00 +02:00
|
|
|
}
|
|
|
|
|
2016-11-26 07:51:47 +01:00
|
|
|
status_t disk_io_thread::do_flush_piece(disk_io_job* j, jobqueue_t& completed_jobs)
|
2014-07-06 21:18:00 +02:00
|
|
|
{
|
2016-05-01 00:54:23 +02:00
|
|
|
std::unique_lock<std::mutex> l(m_cache_mutex);
|
2014-07-06 21:18:00 +02:00
|
|
|
|
|
|
|
cached_piece_entry* pe = m_disk_cache.find_piece(j);
|
2016-11-26 07:51:47 +01:00
|
|
|
if (pe == nullptr) return status_t::no_error;
|
2014-07-06 21:18:00 +02:00
|
|
|
|
|
|
|
#if TORRENT_USE_ASSERTS
|
2018-06-24 00:11:36 +02:00
|
|
|
pe->piece_log.emplace_back(j->action);
|
2014-07-06 21:18:00 +02:00
|
|
|
#endif
|
2015-04-20 06:52:49 +02:00
|
|
|
try_flush_hashed(pe, m_settings.get_int(
|
|
|
|
settings_pack::write_cache_line_size), completed_jobs, l);
|
2014-07-06 21:18:00 +02:00
|
|
|
|
2016-11-26 07:51:47 +01:00
|
|
|
return status_t::no_error;
|
2014-07-06 21:18:00 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// this is triggered every time we insert a new dirty block in a piece
|
|
|
|
// by the time this gets executed, the block may already have been flushed
|
|
|
|
// triggered by another mechanism.
|
2016-11-26 07:51:47 +01:00
|
|
|
status_t disk_io_thread::do_flush_hashed(disk_io_job* j, jobqueue_t& completed_jobs)
|
2014-07-06 21:18:00 +02:00
|
|
|
{
|
2016-05-01 00:54:23 +02:00
|
|
|
std::unique_lock<std::mutex> l(m_cache_mutex);
|
2014-07-06 21:18:00 +02:00
|
|
|
|
|
|
|
cached_piece_entry* pe = m_disk_cache.find_piece(j);
|
|
|
|
|
2016-11-26 07:51:47 +01:00
|
|
|
if (pe == nullptr) return status_t::no_error;
|
2016-03-12 19:03:20 +01:00
|
|
|
|
|
|
|
pe->outstanding_flush = 0;
|
|
|
|
|
2016-11-26 07:51:47 +01:00
|
|
|
if (pe->num_dirty == 0) return status_t::no_error;
|
2014-07-06 21:18:00 +02:00
|
|
|
|
|
|
|
// if multiple threads are flushing this piece, this assert may fire
|
|
|
|
// this happens if the cache is running full and pieces are started to
|
|
|
|
// get flushed
|
|
|
|
// TORRENT_PIECE_ASSERT(pe->outstanding_flush == 1, pe);
|
|
|
|
|
|
|
|
#if TORRENT_USE_ASSERTS
|
2018-06-24 00:11:36 +02:00
|
|
|
pe->piece_log.emplace_back(j->action);
|
2014-07-06 21:18:00 +02:00
|
|
|
#endif
|
2014-10-25 22:07:50 +02:00
|
|
|
TORRENT_PIECE_ASSERT(pe->cache_state <= cached_piece_entry::read_lru1
|
|
|
|
|| pe->cache_state == cached_piece_entry::read_lru2, pe);
|
2016-10-19 07:18:05 +02:00
|
|
|
|
|
|
|
piece_refcount_holder refcount_holder(pe);
|
2014-07-06 21:18:00 +02:00
|
|
|
|
|
|
|
if (!pe->hashing_done)
|
|
|
|
{
|
2016-07-09 22:26:26 +02:00
|
|
|
if (pe->hash == nullptr && !m_settings.get_bool(settings_pack::disable_hash_checks))
|
2014-07-06 21:18:00 +02:00
|
|
|
{
|
2016-10-21 13:37:15 +02:00
|
|
|
pe->hash.reset(new partial_hash);
|
2014-07-06 21:18:00 +02:00
|
|
|
m_disk_cache.update_cache_state(pe);
|
|
|
|
}
|
|
|
|
|
|
|
|
// see if we can progress the hash cursor with this new block
|
|
|
|
kick_hasher(pe, l);
|
|
|
|
|
|
|
|
TORRENT_PIECE_ASSERT(pe->cache_state <= cached_piece_entry::read_lru1 || pe->cache_state == cached_piece_entry::read_lru2, pe);
|
|
|
|
}
|
|
|
|
|
|
|
|
// flushes the piece to disk in case
|
|
|
|
// it satisfies the condition for a write
|
|
|
|
// piece to be flushed
|
|
|
|
// #error if hash checks are disabled, always just flush
|
2015-04-20 06:52:49 +02:00
|
|
|
try_flush_hashed(pe, m_settings.get_int(
|
|
|
|
settings_pack::write_cache_line_size), completed_jobs, l);
|
2014-07-06 21:18:00 +02:00
|
|
|
|
2016-05-01 00:54:23 +02:00
|
|
|
TORRENT_ASSERT(l.owns_lock());
|
2014-07-06 21:18:00 +02:00
|
|
|
|
2016-10-19 07:18:05 +02:00
|
|
|
refcount_holder.release();
|
2014-07-06 21:18:00 +02:00
|
|
|
|
|
|
|
m_disk_cache.maybe_free_piece(pe);
|
|
|
|
|
2016-11-26 07:51:47 +01:00
|
|
|
return status_t::no_error;
|
2014-07-06 21:18:00 +02:00
|
|
|
}
|
|
|
|
|
2016-11-26 07:51:47 +01:00
|
|
|
status_t disk_io_thread::do_flush_storage(disk_io_job* j, jobqueue_t& completed_jobs)
|
2014-07-06 21:18:00 +02:00
|
|
|
{
|
2016-05-01 00:54:23 +02:00
|
|
|
std::unique_lock<std::mutex> l(m_cache_mutex);
|
2014-07-06 21:18:00 +02:00
|
|
|
flush_cache(j->storage.get(), flush_write_cache, completed_jobs, l);
|
2016-11-26 07:51:47 +01:00
|
|
|
return status_t::no_error;
|
2014-07-06 21:18:00 +02:00
|
|
|
}
|
|
|
|
|
2016-11-26 07:51:47 +01:00
|
|
|
status_t disk_io_thread::do_trim_cache(disk_io_job*, jobqueue_t& /* completed_jobs */)
|
2014-07-06 21:18:00 +02:00
|
|
|
{
|
|
|
|
//#error implement
|
2016-11-26 07:51:47 +01:00
|
|
|
return status_t::no_error;
|
2014-07-06 21:18:00 +02:00
|
|
|
}
|
|
|
|
|
2016-11-26 07:51:47 +01:00
|
|
|
status_t disk_io_thread::do_file_priority(disk_io_job* j, jobqueue_t& /* completed_jobs */ )
|
2014-07-06 21:18:00 +02:00
|
|
|
{
|
2017-04-16 22:37:39 +02:00
|
|
|
j->storage->set_file_priority(
|
2018-06-09 21:53:22 +02:00
|
|
|
boost::get<aux::vector<download_priority_t, file_index_t>>(j->argument)
|
2017-04-16 22:37:39 +02:00
|
|
|
, j->error);
|
2016-11-26 07:51:47 +01:00
|
|
|
return status_t::no_error;
|
2014-07-06 21:18:00 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// this job won't return until all outstanding jobs on this
|
|
|
|
// piece are completed or cancelled and the buffers for it
|
|
|
|
// have been evicted
|
2016-11-26 07:51:47 +01:00
|
|
|
status_t disk_io_thread::do_clear_piece(disk_io_job* j, jobqueue_t& completed_jobs)
|
2014-07-06 21:18:00 +02:00
|
|
|
{
|
2016-05-01 00:54:23 +02:00
|
|
|
std::unique_lock<std::mutex> l(m_cache_mutex);
|
2014-07-06 21:18:00 +02:00
|
|
|
|
|
|
|
cached_piece_entry* pe = m_disk_cache.find_piece(j);
|
2016-11-26 07:51:47 +01:00
|
|
|
if (pe == nullptr) return status_t::no_error;
|
2014-07-06 21:18:00 +02:00
|
|
|
TORRENT_PIECE_ASSERT(pe->hashing == false, pe);
|
|
|
|
pe->hashing_done = 0;
|
2016-10-21 13:37:15 +02:00
|
|
|
pe->hash.reset();
|
2014-07-06 21:18:00 +02:00
|
|
|
pe->hashing_done = false;
|
|
|
|
|
|
|
|
#if TORRENT_USE_ASSERTS
|
2018-06-24 00:11:36 +02:00
|
|
|
pe->piece_log.emplace_back(j->action);
|
2008-03-08 07:06:31 +01:00
|
|
|
#endif
|
2014-07-06 21:18:00 +02:00
|
|
|
|
|
|
|
// evict_piece returns true if the piece was in fact
|
|
|
|
// evicted. A piece may fail to be evicted if there
|
|
|
|
// are still outstanding operations on it, in which case
|
|
|
|
// try again later
|
2015-08-19 15:22:00 +02:00
|
|
|
jobqueue_t jobs;
|
2017-04-15 00:51:11 +02:00
|
|
|
if (m_disk_cache.evict_piece(pe, jobs, block_cache::allow_ghost))
|
2014-07-06 21:18:00 +02:00
|
|
|
{
|
2015-04-20 06:52:49 +02:00
|
|
|
fail_jobs_impl(storage_error(boost::asio::error::operation_aborted)
|
|
|
|
, jobs, completed_jobs);
|
2016-11-26 07:51:47 +01:00
|
|
|
return status_t::no_error;
|
2014-07-06 21:18:00 +02:00
|
|
|
}
|
|
|
|
|
2017-04-15 00:51:11 +02:00
|
|
|
m_disk_cache.mark_for_eviction(pe, block_cache::allow_ghost);
|
2016-11-26 07:51:47 +01:00
|
|
|
if (pe->num_blocks == 0) return status_t::no_error;
|
2014-07-06 21:18:00 +02:00
|
|
|
|
|
|
|
// we should always be able to evict the piece, since
|
|
|
|
// this is a fence job
|
2016-05-02 18:36:21 +02:00
|
|
|
TORRENT_PIECE_ASSERT_FAIL(pe);
|
2014-07-06 21:18:00 +02:00
|
|
|
return retry_job;
|
|
|
|
}
|
|
|
|
|
2016-12-31 18:35:10 +01:00
|
|
|
void disk_io_thread::add_fence_job(disk_io_job* j, bool const user_add)
|
2014-07-06 21:18:00 +02:00
|
|
|
{
|
|
|
|
// if this happens, it means we started to shut down
|
|
|
|
// the disk threads too early. We have to post all jobs
|
|
|
|
// before the disk threads are shut down
|
2015-06-14 22:00:04 +02:00
|
|
|
TORRENT_ASSERT(!m_abort);
|
2014-07-06 21:18:00 +02:00
|
|
|
|
|
|
|
DLOG("add_fence:job: %s (outstanding: %d)\n"
|
2019-03-24 17:30:22 +01:00
|
|
|
, job_name(j->action)
|
2014-07-06 21:18:00 +02:00
|
|
|
, j->storage->num_outstanding_jobs());
|
|
|
|
|
2017-06-11 19:53:15 +02:00
|
|
|
m_stats_counters.inc_stats_counter(counters::num_fenced_read + static_cast<int>(j->action));
|
2014-07-06 21:18:00 +02:00
|
|
|
|
2017-06-11 19:53:15 +02:00
|
|
|
disk_io_job* fj = allocate_job(job_action_t::flush_storage);
|
2014-07-06 21:18:00 +02:00
|
|
|
fj->storage = j->storage;
|
2018-12-06 10:55:26 +01:00
|
|
|
TORRENT_ASSERT(fj->flags == disk_job_flags_t{});
|
2014-07-06 21:18:00 +02:00
|
|
|
|
2016-12-31 18:35:10 +01:00
|
|
|
int ret = j->storage->raise_fence(j, fj, m_stats_counters);
|
2017-01-22 00:36:51 +01:00
|
|
|
if (ret == aux::disk_job_fence::fence_post_fence)
|
2014-07-06 21:18:00 +02:00
|
|
|
{
|
2016-05-01 00:54:23 +02:00
|
|
|
std::unique_lock<std::mutex> l(m_job_mutex);
|
2014-07-06 21:18:00 +02:00
|
|
|
TORRENT_ASSERT((j->flags & disk_io_job::in_progress) || !j->storage);
|
2017-04-21 07:21:31 +02:00
|
|
|
m_generic_io_jobs.m_queued_jobs.push_back(j);
|
2014-07-06 21:18:00 +02:00
|
|
|
l.unlock();
|
|
|
|
|
|
|
|
// discard the flush job
|
|
|
|
free_job(fj);
|
2015-06-14 22:00:04 +02:00
|
|
|
|
2016-06-16 02:49:28 +02:00
|
|
|
if (num_threads() == 0 && user_add)
|
2015-06-14 22:00:04 +02:00
|
|
|
immediate_execute();
|
|
|
|
|
2014-07-06 21:18:00 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2017-01-22 00:36:51 +01:00
|
|
|
if (ret == aux::disk_job_fence::fence_post_flush)
|
2014-07-06 21:18:00 +02:00
|
|
|
{
|
|
|
|
// now, we have to make sure that all outstanding jobs on this
|
|
|
|
// storage actually get flushed, in order for the fence job to
|
|
|
|
// be executed
|
2016-05-01 00:54:23 +02:00
|
|
|
std::unique_lock<std::mutex> l(m_job_mutex);
|
2014-07-06 21:18:00 +02:00
|
|
|
TORRENT_ASSERT((fj->flags & disk_io_job::in_progress) || !fj->storage);
|
|
|
|
|
2016-06-16 02:49:28 +02:00
|
|
|
m_generic_io_jobs.m_queued_jobs.push_front(fj);
|
2014-07-06 21:18:00 +02:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2017-09-09 19:43:54 +02:00
|
|
|
TORRENT_ASSERT(!(fj->flags & disk_io_job::in_progress));
|
2014-07-06 21:18:00 +02:00
|
|
|
TORRENT_ASSERT(fj->blocked);
|
|
|
|
}
|
2015-06-14 22:00:04 +02:00
|
|
|
|
2016-06-16 02:49:28 +02:00
|
|
|
if (num_threads() == 0 && user_add)
|
2015-06-14 22:00:04 +02:00
|
|
|
immediate_execute();
|
2014-07-06 21:18:00 +02:00
|
|
|
}
|
|
|
|
|
2016-12-31 18:35:10 +01:00
|
|
|
void disk_io_thread::add_job(disk_io_job* j, bool const user_add)
|
2014-07-06 21:18:00 +02:00
|
|
|
{
|
|
|
|
TORRENT_ASSERT(m_magic == 0x1337);
|
|
|
|
|
2017-04-07 15:15:54 +02:00
|
|
|
TORRENT_ASSERT(!j->storage || j->storage->files().is_valid());
|
2016-06-20 17:32:06 +02:00
|
|
|
TORRENT_ASSERT(j->next == nullptr);
|
2014-07-06 21:18:00 +02:00
|
|
|
// if this happens, it means we started to shut down
|
|
|
|
// the disk threads too early. We have to post all jobs
|
|
|
|
// before the disk threads are shut down
|
2015-06-14 22:00:04 +02:00
|
|
|
TORRENT_ASSERT(!m_abort
|
2017-06-11 19:53:15 +02:00
|
|
|
|| j->action == job_action_t::flush_piece
|
|
|
|
|| j->action == job_action_t::trim_cache);
|
2014-07-06 21:18:00 +02:00
|
|
|
|
|
|
|
// this happens for read jobs that get hung on pieces in the
|
|
|
|
// block cache, and then get issued
|
|
|
|
if (j->flags & disk_io_job::in_progress)
|
|
|
|
{
|
2016-05-01 00:54:23 +02:00
|
|
|
std::unique_lock<std::mutex> l(m_job_mutex);
|
2014-07-06 21:18:00 +02:00
|
|
|
TORRENT_ASSERT((j->flags & disk_io_job::in_progress) || !j->storage);
|
2016-06-16 02:49:28 +02:00
|
|
|
m_generic_io_jobs.m_queued_jobs.push_back(j);
|
2015-06-14 22:00:04 +02:00
|
|
|
|
|
|
|
// if we literally have 0 disk threads, we have to execute the jobs
|
|
|
|
// immediately. If add job is called internally by the disk_io_thread,
|
|
|
|
// we need to defer executing it. We only want the top level to loop
|
|
|
|
// over the job queue (as is done below)
|
2016-06-16 02:49:28 +02:00
|
|
|
if (num_threads() == 0 && user_add)
|
2015-06-14 22:00:04 +02:00
|
|
|
{
|
|
|
|
l.unlock();
|
|
|
|
immediate_execute();
|
|
|
|
}
|
2014-07-06 21:18:00 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
DLOG("add_job: %s (outstanding: %d)\n"
|
2019-03-24 17:30:22 +01:00
|
|
|
, job_name(j->action)
|
2014-07-06 21:18:00 +02:00
|
|
|
, j->storage ? j->storage->num_outstanding_jobs() : 0);
|
|
|
|
|
|
|
|
// is the fence up for this storage?
|
|
|
|
// jobs that are instantaneous are not affected by the fence, is_blocked()
|
|
|
|
// will take ownership of the job and queue it up, in case the fence is up
|
|
|
|
// if the fence flag is set, this job just raised the fence on the storage
|
|
|
|
// and should be scheduled
|
|
|
|
if (j->storage && j->storage->is_blocked(j))
|
|
|
|
{
|
2014-10-25 22:07:50 +02:00
|
|
|
m_stats_counters.inc_stats_counter(counters::blocked_disk_jobs);
|
2014-07-06 21:18:00 +02:00
|
|
|
DLOG("blocked job: %s (torrent: %d total: %d)\n"
|
2019-03-24 17:30:22 +01:00
|
|
|
, job_name(j->action), j->storage ? j->storage->num_blocked() : 0
|
2014-10-25 22:07:50 +02:00
|
|
|
, int(m_stats_counters[counters::blocked_disk_jobs]));
|
2014-07-06 21:18:00 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2016-05-01 00:54:23 +02:00
|
|
|
std::unique_lock<std::mutex> l(m_job_mutex);
|
2014-07-06 21:18:00 +02:00
|
|
|
|
|
|
|
TORRENT_ASSERT((j->flags & disk_io_job::in_progress) || !j->storage);
|
2015-06-14 22:00:04 +02:00
|
|
|
|
2016-06-16 02:49:28 +02:00
|
|
|
job_queue& q = queue_for_job(j);
|
|
|
|
q.m_queued_jobs.push_back(j);
|
|
|
|
// if we literally have 0 disk threads, we have to execute the jobs
|
|
|
|
// immediately. If add job is called internally by the disk_io_thread,
|
|
|
|
// we need to defer executing it. We only want the top level to loop
|
|
|
|
// over the job queue (as is done below)
|
|
|
|
if (pool_for_job(j).max_threads() == 0 && user_add)
|
2015-06-14 22:00:04 +02:00
|
|
|
{
|
2016-06-16 02:49:28 +02:00
|
|
|
l.unlock();
|
|
|
|
immediate_execute();
|
2015-06-14 22:00:04 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void disk_io_thread::immediate_execute()
|
|
|
|
{
|
2016-06-16 02:49:28 +02:00
|
|
|
while (!m_generic_io_jobs.m_queued_jobs.empty())
|
2015-06-14 22:00:04 +02:00
|
|
|
{
|
2016-06-16 02:49:28 +02:00
|
|
|
disk_io_job* j = m_generic_io_jobs.m_queued_jobs.pop_front();
|
2015-06-14 22:00:04 +02:00
|
|
|
maybe_flush_write_blocks();
|
|
|
|
execute_job(j);
|
|
|
|
}
|
2014-07-06 21:18:00 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void disk_io_thread::submit_jobs()
|
|
|
|
{
|
2016-05-01 00:54:23 +02:00
|
|
|
std::unique_lock<std::mutex> l(m_job_mutex);
|
2016-06-16 02:49:28 +02:00
|
|
|
if (!m_generic_io_jobs.m_queued_jobs.empty())
|
|
|
|
{
|
|
|
|
m_generic_io_jobs.m_job_cond.notify_all();
|
|
|
|
m_generic_threads.job_queued(m_generic_io_jobs.m_queued_jobs.size());
|
|
|
|
}
|
|
|
|
if (!m_hash_io_jobs.m_queued_jobs.empty())
|
|
|
|
{
|
|
|
|
m_hash_io_jobs.m_job_cond.notify_all();
|
|
|
|
m_hash_threads.job_queued(m_hash_io_jobs.m_queued_jobs.size());
|
|
|
|
}
|
2014-07-06 21:18:00 +02:00
|
|
|
}
|
|
|
|
|
2015-06-14 22:00:04 +02:00
|
|
|
void disk_io_thread::maybe_flush_write_blocks()
|
|
|
|
{
|
2016-12-10 20:15:25 +01:00
|
|
|
time_point const now = clock_type::now();
|
2015-06-14 22:00:04 +02:00
|
|
|
if (now <= m_last_cache_expiry + seconds(5)) return;
|
|
|
|
|
2016-05-01 00:54:23 +02:00
|
|
|
std::unique_lock<std::mutex> l(m_cache_mutex);
|
2015-06-14 22:00:04 +02:00
|
|
|
DLOG("blocked_jobs: %d queued_jobs: %d num_threads %d\n"
|
|
|
|
, int(m_stats_counters[counters::blocked_disk_jobs])
|
2016-10-21 06:25:48 +02:00
|
|
|
, m_generic_io_jobs.m_queued_jobs.size(), num_threads());
|
2015-06-14 22:00:04 +02:00
|
|
|
m_last_cache_expiry = now;
|
2015-08-19 15:22:00 +02:00
|
|
|
jobqueue_t completed_jobs;
|
2015-06-14 22:00:04 +02:00
|
|
|
flush_expired_write_blocks(completed_jobs, l);
|
|
|
|
l.unlock();
|
2018-06-24 00:11:36 +02:00
|
|
|
if (!completed_jobs.empty())
|
2015-06-14 22:00:04 +02:00
|
|
|
add_completed_jobs(completed_jobs);
|
|
|
|
}
|
|
|
|
|
|
|
|
void disk_io_thread::execute_job(disk_io_job* j)
|
|
|
|
{
|
2015-08-19 15:22:00 +02:00
|
|
|
jobqueue_t completed_jobs;
|
2017-10-22 14:03:49 +02:00
|
|
|
if (j->flags & disk_io_job::aborted)
|
|
|
|
{
|
|
|
|
j->ret = status_t::fatal_disk_error;
|
|
|
|
j->error = storage_error(boost::asio::error::operation_aborted);
|
|
|
|
completed_jobs.push_back(j);
|
|
|
|
add_completed_jobs(completed_jobs);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2015-06-14 22:00:04 +02:00
|
|
|
perform_job(j, completed_jobs);
|
2018-06-24 00:11:36 +02:00
|
|
|
if (!completed_jobs.empty())
|
2015-06-14 22:00:04 +02:00
|
|
|
add_completed_jobs(completed_jobs);
|
|
|
|
}
|
|
|
|
|
2016-06-16 02:49:28 +02:00
|
|
|
bool disk_io_thread::wait_for_job(job_queue& jobq, disk_io_thread_pool& threads
|
|
|
|
, std::unique_lock<std::mutex>& l)
|
2014-07-06 21:18:00 +02:00
|
|
|
{
|
2016-06-16 02:49:28 +02:00
|
|
|
TORRENT_ASSERT(l.owns_lock());
|
2014-07-06 21:18:00 +02:00
|
|
|
|
2016-06-16 02:49:28 +02:00
|
|
|
// the thread should only go active if it is exiting or there is work to do
|
|
|
|
// if the thread goes active on every wakeup it causes the minimum idle thread
|
|
|
|
// count to be lower than it should be
|
|
|
|
// for performance reasons we also want to avoid going idle and active again
|
|
|
|
// if there is already work to do
|
|
|
|
if (jobq.m_queued_jobs.empty())
|
2014-07-06 21:18:00 +02:00
|
|
|
{
|
2016-06-16 02:49:28 +02:00
|
|
|
threads.thread_idle();
|
2014-07-06 21:18:00 +02:00
|
|
|
|
2016-06-16 02:49:28 +02:00
|
|
|
do
|
|
|
|
{
|
2014-07-06 21:18:00 +02:00
|
|
|
// if the number of wanted threads is decreased,
|
|
|
|
// we may stop this thread
|
2016-06-16 02:49:28 +02:00
|
|
|
// when we're terminating the last thread, make sure
|
2014-07-06 21:18:00 +02:00
|
|
|
// we finish up all queued jobs first
|
2016-06-16 02:49:28 +02:00
|
|
|
if (threads.should_exit()
|
|
|
|
&& (jobq.m_queued_jobs.empty()
|
|
|
|
|| threads.num_threads() > 1)
|
|
|
|
// try_thread_exit must be the last condition
|
|
|
|
&& threads.try_thread_exit(std::this_thread::get_id()))
|
2014-07-06 21:18:00 +02:00
|
|
|
{
|
|
|
|
// time to exit this thread.
|
2016-06-16 02:49:28 +02:00
|
|
|
threads.thread_active();
|
|
|
|
return true;
|
2008-04-13 23:26:57 +02:00
|
|
|
}
|
2014-07-06 21:18:00 +02:00
|
|
|
|
2016-06-16 02:49:28 +02:00
|
|
|
jobq.m_job_cond.wait(l);
|
|
|
|
} while (jobq.m_queued_jobs.empty());
|
|
|
|
|
|
|
|
threads.thread_active();
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2017-01-09 01:22:59 +01:00
|
|
|
void disk_io_thread::thread_fun(job_queue& queue
|
|
|
|
, disk_io_thread_pool& pool)
|
2016-06-16 02:49:28 +02:00
|
|
|
{
|
2017-01-09 01:22:59 +01:00
|
|
|
std::thread::id const thread_id = std::this_thread::get_id();
|
2016-06-16 02:49:28 +02:00
|
|
|
|
2019-03-24 17:30:22 +01:00
|
|
|
DLOG("started disk thread\n");
|
2016-06-16 02:49:28 +02:00
|
|
|
|
2016-06-18 22:53:23 +02:00
|
|
|
std::unique_lock<std::mutex> l(m_job_mutex);
|
|
|
|
|
2016-06-16 02:49:28 +02:00
|
|
|
++m_num_running_threads;
|
|
|
|
m_stats_counters.inc_stats_counter(counters::num_running_threads, 1);
|
|
|
|
|
|
|
|
for (;;)
|
|
|
|
{
|
2016-07-09 22:26:26 +02:00
|
|
|
disk_io_job* j = nullptr;
|
2017-01-09 01:22:59 +01:00
|
|
|
bool const should_exit = wait_for_job(queue, pool, l);
|
|
|
|
if (should_exit) break;
|
|
|
|
j = queue.m_queued_jobs.pop_front();
|
2014-07-06 21:18:00 +02:00
|
|
|
l.unlock();
|
|
|
|
|
|
|
|
TORRENT_ASSERT((j->flags & disk_io_job::in_progress) || !j->storage);
|
|
|
|
|
2017-01-09 01:22:59 +01:00
|
|
|
if (&pool == &m_generic_threads && thread_id == pool.first_thread_id())
|
2014-07-06 21:18:00 +02:00
|
|
|
{
|
|
|
|
// there's no need for all threads to be doing this
|
2015-06-14 22:00:04 +02:00
|
|
|
maybe_flush_write_blocks();
|
2016-11-06 07:39:41 +01:00
|
|
|
|
|
|
|
time_point const now = aux::time_now();
|
|
|
|
{
|
2017-05-18 14:50:22 +02:00
|
|
|
std::unique_lock<std::mutex> l2(m_need_tick_mutex);
|
|
|
|
while (!m_need_tick.empty() && m_need_tick.front().first < now)
|
|
|
|
{
|
|
|
|
std::shared_ptr<storage_interface> st = m_need_tick.front().second.lock();
|
|
|
|
m_need_tick.erase(m_need_tick.begin());
|
|
|
|
if (st)
|
|
|
|
{
|
|
|
|
l2.unlock();
|
|
|
|
st->tick();
|
|
|
|
l2.lock();
|
|
|
|
}
|
|
|
|
}
|
2016-11-06 07:39:41 +01:00
|
|
|
}
|
2017-04-09 00:24:50 +02:00
|
|
|
|
|
|
|
if (now > m_next_close_oldest_file)
|
|
|
|
{
|
|
|
|
seconds const interval(m_settings.get_int(settings_pack::close_file_interval));
|
|
|
|
if (interval <= seconds(0))
|
|
|
|
{
|
|
|
|
m_next_close_oldest_file = max_time();
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
m_next_close_oldest_file = now + interval;
|
|
|
|
m_file_pool.close_oldest();
|
|
|
|
}
|
|
|
|
}
|
2014-07-06 21:18:00 +02:00
|
|
|
}
|
|
|
|
|
2015-06-14 22:00:04 +02:00
|
|
|
execute_job(j);
|
2014-07-06 21:18:00 +02:00
|
|
|
|
|
|
|
l.lock();
|
|
|
|
}
|
|
|
|
|
2015-06-14 22:00:04 +02:00
|
|
|
// do cleanup in the last running thread
|
|
|
|
// if we're not aborting, that means we just configured the thread pool to
|
|
|
|
// not have any threads (i.e. perform all disk operations in the network
|
|
|
|
// thread). In this case, the cleanup will happen in abort().
|
2014-08-01 08:07:48 +02:00
|
|
|
m_stats_counters.inc_stats_counter(counters::num_running_threads, -1);
|
2015-06-14 22:00:04 +02:00
|
|
|
if (--m_num_running_threads > 0 || !m_abort)
|
2014-07-06 21:18:00 +02:00
|
|
|
{
|
2019-03-24 17:30:22 +01:00
|
|
|
DLOG("exiting disk thread. num_threads: %d aborting: %d\n"
|
|
|
|
, num_threads(), int(m_abort));
|
2014-07-06 21:18:00 +02:00
|
|
|
TORRENT_ASSERT(m_magic == 0x1337);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2016-06-18 22:53:23 +02:00
|
|
|
// it is important to hold the job mutex while calling try_thread_exit()
|
|
|
|
// and continue to hold it until checking m_abort above so that abort()
|
|
|
|
// doesn't inadvertently trigger the code below when it thinks there are no
|
|
|
|
// more disk I/O threads running
|
|
|
|
l.unlock();
|
|
|
|
|
2014-07-06 21:18:00 +02:00
|
|
|
// at this point, there are no queued jobs left. However, main
|
|
|
|
// thread is still running and may still have peer_connections
|
|
|
|
// that haven't fully destructed yet, reclaiming their references
|
|
|
|
// to read blocks in the disk cache. We need to wait until all
|
|
|
|
// references are removed from other threads before we can go
|
|
|
|
// ahead with the cleanup.
|
2015-06-14 22:00:04 +02:00
|
|
|
// This is not supposed to happen because the disk thread is now scheduled
|
|
|
|
// for shut down after all peers have shut down (see
|
|
|
|
// session_impl::abort_stage2()).
|
2016-05-01 00:54:23 +02:00
|
|
|
std::unique_lock<std::mutex> l2(m_cache_mutex);
|
2015-06-14 22:00:04 +02:00
|
|
|
TORRENT_ASSERT_VAL(m_disk_cache.pinned_blocks() == 0
|
|
|
|
, m_disk_cache.pinned_blocks());
|
2014-07-06 21:18:00 +02:00
|
|
|
while (m_disk_cache.pinned_blocks() > 0)
|
|
|
|
{
|
|
|
|
l2.unlock();
|
2016-05-01 00:54:23 +02:00
|
|
|
std::this_thread::sleep_for(milliseconds(100));
|
2014-07-06 21:18:00 +02:00
|
|
|
l2.lock();
|
|
|
|
}
|
|
|
|
l2.unlock();
|
|
|
|
|
2019-03-24 17:30:22 +01:00
|
|
|
DLOG("the last disk thread alive. cleaning up\n");
|
2014-07-06 21:18:00 +02:00
|
|
|
|
2015-06-14 22:00:04 +02:00
|
|
|
abort_jobs();
|
2014-07-06 21:18:00 +02:00
|
|
|
|
2016-06-11 21:37:28 +02:00
|
|
|
TORRENT_ASSERT(m_magic == 0x1337);
|
2015-06-14 22:00:04 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void disk_io_thread::abort_jobs()
|
|
|
|
{
|
2019-03-24 17:30:22 +01:00
|
|
|
DLOG("disk_io_thread::abort_jobs\n");
|
|
|
|
|
2015-06-14 22:00:04 +02:00
|
|
|
TORRENT_ASSERT(m_magic == 0x1337);
|
2019-03-26 03:49:35 +01:00
|
|
|
if (m_jobs_aborted.test_and_set()) return;
|
2015-06-14 22:00:04 +02:00
|
|
|
|
2015-08-19 15:22:00 +02:00
|
|
|
jobqueue_t jobs;
|
2014-07-06 21:18:00 +02:00
|
|
|
m_disk_cache.clear(jobs);
|
|
|
|
fail_jobs(storage_error(boost::asio::error::operation_aborted), jobs);
|
|
|
|
|
|
|
|
// close all files. This may take a long
|
|
|
|
// time on certain OSes (i.e. Mac OS)
|
|
|
|
// that's why it's important to do this in
|
|
|
|
// the disk thread in parallel with stopping
|
|
|
|
// trackers.
|
|
|
|
m_file_pool.release();
|
|
|
|
|
|
|
|
#if TORRENT_USE_ASSERTS
|
|
|
|
// by now, all pieces should have been evicted
|
2016-09-06 04:25:20 +02:00
|
|
|
auto pieces = m_disk_cache.all_pieces();
|
2014-07-06 21:18:00 +02:00
|
|
|
TORRENT_ASSERT(pieces.first == pieces.second);
|
2008-03-08 07:06:31 +01:00
|
|
|
#endif
|
2015-06-14 22:00:04 +02:00
|
|
|
|
2014-07-06 21:18:00 +02:00
|
|
|
TORRENT_ASSERT(m_magic == 0x1337);
|
|
|
|
}
|
2009-05-22 08:32:39 +02:00
|
|
|
|
2016-06-16 02:49:28 +02:00
|
|
|
int disk_io_thread::num_threads() const
|
|
|
|
{
|
|
|
|
return m_generic_threads.max_threads() + m_hash_threads.max_threads();
|
|
|
|
}
|
|
|
|
|
|
|
|
disk_io_thread::job_queue& disk_io_thread::queue_for_job(disk_io_job* j)
|
|
|
|
{
|
2017-06-11 19:53:15 +02:00
|
|
|
if (m_hash_threads.max_threads() > 0 && j->action == job_action_t::hash)
|
2016-06-16 02:49:28 +02:00
|
|
|
return m_hash_io_jobs;
|
|
|
|
else
|
|
|
|
return m_generic_io_jobs;
|
|
|
|
}
|
|
|
|
|
|
|
|
disk_io_thread_pool& disk_io_thread::pool_for_job(disk_io_job* j)
|
|
|
|
{
|
2017-06-11 19:53:15 +02:00
|
|
|
if (m_hash_threads.max_threads() > 0 && j->action == job_action_t::hash)
|
2016-06-16 02:49:28 +02:00
|
|
|
return m_hash_threads;
|
|
|
|
else
|
|
|
|
return m_generic_threads;
|
|
|
|
}
|
|
|
|
|
2014-07-06 21:18:00 +02:00
|
|
|
// this is a callback called by the block_cache when
|
|
|
|
// it's exceeding the disk cache size.
|
|
|
|
void disk_io_thread::trigger_cache_trim()
|
|
|
|
{
|
|
|
|
// we just exceeded the cache size limit. Trigger a trim job
|
2017-06-11 19:53:15 +02:00
|
|
|
disk_io_job* j = allocate_job(job_action_t::trim_cache);
|
2015-06-14 22:00:04 +02:00
|
|
|
add_job(j, false);
|
2014-07-06 21:18:00 +02:00
|
|
|
submit_jobs();
|
|
|
|
}
|
|
|
|
|
2015-08-19 15:22:00 +02:00
|
|
|
void disk_io_thread::add_completed_jobs(jobqueue_t& jobs)
|
2014-07-06 21:18:00 +02:00
|
|
|
{
|
2015-08-19 15:22:00 +02:00
|
|
|
jobqueue_t new_completed_jobs;
|
2014-07-06 21:18:00 +02:00
|
|
|
do
|
|
|
|
{
|
|
|
|
// when a job completes, it's possible for it to cause
|
|
|
|
// a fence to be lowered, issuing the jobs queued up
|
|
|
|
// behind the fence. It's also possible for some of these
|
|
|
|
// jobs to be cache-hits, completing immediately. Those
|
|
|
|
// jobs are added to the new_completed_jobs queue and
|
|
|
|
// we need to re-issue those
|
|
|
|
add_completed_jobs_impl(jobs, new_completed_jobs);
|
2018-06-24 00:11:36 +02:00
|
|
|
TORRENT_ASSERT(jobs.empty());
|
2014-07-06 21:18:00 +02:00
|
|
|
jobs.swap(new_completed_jobs);
|
2018-06-24 00:11:36 +02:00
|
|
|
} while (!jobs.empty());
|
2014-07-06 21:18:00 +02:00
|
|
|
}
|
2011-03-14 06:21:46 +01:00
|
|
|
|
2015-08-19 15:22:00 +02:00
|
|
|
void disk_io_thread::add_completed_jobs_impl(jobqueue_t& jobs
|
|
|
|
, jobqueue_t& completed_jobs)
|
2014-07-06 21:18:00 +02:00
|
|
|
{
|
2015-08-19 15:22:00 +02:00
|
|
|
jobqueue_t new_jobs;
|
2014-07-06 21:18:00 +02:00
|
|
|
int ret = 0;
|
2018-06-24 00:11:36 +02:00
|
|
|
for (auto i = jobs.iterate(); i.get(); i.next())
|
2014-07-06 21:18:00 +02:00
|
|
|
{
|
2015-08-19 15:22:00 +02:00
|
|
|
disk_io_job* j = i.get();
|
2014-07-06 21:18:00 +02:00
|
|
|
TORRENT_ASSERT((j->flags & disk_io_job::in_progress) || !j->storage);
|
|
|
|
|
|
|
|
// DLOG("job_complete %s outstanding: %d\n"
|
2019-03-24 17:30:22 +01:00
|
|
|
// , job_name(j->action), j->storage ? j->storage->num_outstanding_jobs() : 0);
|
2014-07-06 21:18:00 +02:00
|
|
|
|
|
|
|
if (j->storage)
|
|
|
|
{
|
|
|
|
if (j->flags & disk_io_job::fence)
|
2014-10-25 22:59:54 +02:00
|
|
|
{
|
|
|
|
m_stats_counters.inc_stats_counter(
|
2017-06-11 19:53:15 +02:00
|
|
|
counters::num_fenced_read + static_cast<int>(j->action), -1);
|
2014-10-25 22:59:54 +02:00
|
|
|
}
|
|
|
|
|
2014-07-06 21:18:00 +02:00
|
|
|
ret += j->storage->job_complete(j, new_jobs);
|
|
|
|
}
|
|
|
|
TORRENT_ASSERT(ret == new_jobs.size());
|
2017-09-09 19:43:54 +02:00
|
|
|
TORRENT_ASSERT(!(j->flags & disk_io_job::in_progress));
|
2014-07-06 21:18:00 +02:00
|
|
|
#if TORRENT_USE_ASSERTS
|
|
|
|
TORRENT_ASSERT(j->job_posted == false);
|
|
|
|
j->job_posted = true;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2016-10-19 07:18:05 +02:00
|
|
|
if (ret)
|
|
|
|
{
|
|
|
|
DLOG("unblocked %d jobs (%d left)\n", ret
|
|
|
|
, int(m_stats_counters[counters::blocked_disk_jobs]) - ret);
|
|
|
|
}
|
2014-07-06 21:18:00 +02:00
|
|
|
|
2014-10-25 22:07:50 +02:00
|
|
|
m_stats_counters.inc_stats_counter(counters::blocked_disk_jobs, -ret);
|
|
|
|
TORRENT_ASSERT(int(m_stats_counters[counters::blocked_disk_jobs]) >= 0);
|
2014-07-06 21:18:00 +02:00
|
|
|
|
2018-06-24 00:11:36 +02:00
|
|
|
if (!new_jobs.empty())
|
2014-07-06 21:18:00 +02:00
|
|
|
{
|
|
|
|
#if TORRENT_USE_ASSERTS
|
2018-06-24 00:11:36 +02:00
|
|
|
for (auto i = new_jobs.iterate(); i.get(); i.next())
|
2014-07-06 21:18:00 +02:00
|
|
|
{
|
|
|
|
disk_io_job const* j = static_cast<disk_io_job const*>(i.get());
|
|
|
|
TORRENT_ASSERT((j->flags & disk_io_job::in_progress) || !j->storage);
|
|
|
|
|
2017-06-11 19:53:15 +02:00
|
|
|
if (j->action != job_action_t::write) continue;
|
2017-04-16 22:37:39 +02:00
|
|
|
|
|
|
|
std::unique_lock<std::mutex> l(m_cache_mutex);
|
|
|
|
cached_piece_entry* pe = m_disk_cache.find_piece(j);
|
|
|
|
if (!pe) continue;
|
|
|
|
|
|
|
|
TORRENT_ASSERT(pe->blocks[j->d.io.offset / 16 / 1024].buf
|
|
|
|
!= boost::get<disk_buffer_holder>(j->argument).get());
|
|
|
|
TORRENT_ASSERT(pe->blocks[j->d.io.offset / 16 / 1024].buf == nullptr);
|
|
|
|
TORRENT_ASSERT(!pe->hashing_done);
|
2014-07-06 21:18:00 +02:00
|
|
|
}
|
|
|
|
#endif
|
2015-08-19 15:22:00 +02:00
|
|
|
jobqueue_t other_jobs;
|
|
|
|
jobqueue_t flush_jobs;
|
2016-05-01 00:54:23 +02:00
|
|
|
std::unique_lock<std::mutex> l_(m_cache_mutex);
|
2018-06-24 00:11:36 +02:00
|
|
|
while (!new_jobs.empty())
|
2014-07-06 21:18:00 +02:00
|
|
|
{
|
2015-08-19 15:22:00 +02:00
|
|
|
disk_io_job* j = new_jobs.pop_front();
|
2014-07-06 21:18:00 +02:00
|
|
|
|
2017-06-11 19:53:15 +02:00
|
|
|
if (j->action == job_action_t::read)
|
2014-07-06 21:18:00 +02:00
|
|
|
{
|
2017-10-22 14:03:49 +02:00
|
|
|
int const state = prep_read_job_impl(j, false);
|
2015-08-19 01:39:01 +02:00
|
|
|
switch (state)
|
2008-04-13 20:54:36 +02:00
|
|
|
{
|
2014-07-06 21:18:00 +02:00
|
|
|
case 0:
|
|
|
|
completed_jobs.push_back(j);
|
|
|
|
break;
|
|
|
|
case 1:
|
|
|
|
other_jobs.push_back(j);
|
|
|
|
break;
|
2008-04-13 23:26:57 +02:00
|
|
|
}
|
2014-07-06 21:18:00 +02:00
|
|
|
continue;
|
2008-04-13 23:26:57 +02:00
|
|
|
}
|
2014-07-06 21:18:00 +02:00
|
|
|
|
|
|
|
// write jobs should be put straight into the cache
|
2017-06-11 19:53:15 +02:00
|
|
|
if (j->action != job_action_t::write)
|
2008-04-13 23:26:57 +02:00
|
|
|
{
|
2014-07-06 21:18:00 +02:00
|
|
|
other_jobs.push_back(j);
|
|
|
|
continue;
|
2007-06-10 22:46:09 +02:00
|
|
|
}
|
2014-07-06 21:18:00 +02:00
|
|
|
|
2019-09-13 14:17:10 +02:00
|
|
|
cached_piece_entry* pe = m_disk_cache.add_dirty_block(j
|
|
|
|
, !m_settings.get_bool(settings_pack::disable_hash_checks));
|
2014-07-06 21:18:00 +02:00
|
|
|
|
2016-06-20 17:32:06 +02:00
|
|
|
if (pe == nullptr)
|
2008-05-28 10:44:40 +02:00
|
|
|
{
|
2014-07-06 21:18:00 +02:00
|
|
|
// this isn't correct, since jobs in the jobs
|
|
|
|
// queue aren't ordered
|
|
|
|
other_jobs.push_back(j);
|
|
|
|
continue;
|
2014-02-28 11:19:29 +01:00
|
|
|
}
|
2014-07-06 21:18:00 +02:00
|
|
|
|
|
|
|
#if TORRENT_USE_ASSERTS
|
|
|
|
pe->piece_log.push_back(piece_log_t(j->action, j->d.io.offset / 0x4000));
|
|
|
|
#endif
|
|
|
|
|
|
|
|
if (!pe->hashing_done
|
2016-07-09 22:26:26 +02:00
|
|
|
&& pe->hash == nullptr
|
2014-07-06 21:18:00 +02:00
|
|
|
&& !m_settings.get_bool(settings_pack::disable_hash_checks))
|
2014-02-28 11:19:29 +01:00
|
|
|
{
|
2016-10-21 13:37:15 +02:00
|
|
|
pe->hash.reset(new partial_hash);
|
2014-07-06 21:18:00 +02:00
|
|
|
m_disk_cache.update_cache_state(pe);
|
2008-05-28 10:44:40 +02:00
|
|
|
}
|
2014-07-06 21:18:00 +02:00
|
|
|
|
|
|
|
TORRENT_PIECE_ASSERT(pe->cache_state <= cached_piece_entry::read_lru1 || pe->cache_state == cached_piece_entry::read_lru2, pe);
|
|
|
|
|
|
|
|
if (pe->outstanding_flush == 0)
|
|
|
|
{
|
|
|
|
pe->outstanding_flush = 1;
|
|
|
|
|
|
|
|
// the block and write job were successfully inserted
|
|
|
|
// into the cache. Now, see if we should trigger a flush
|
2017-06-11 19:53:15 +02:00
|
|
|
disk_io_job* fj = allocate_job(job_action_t::flush_hashed);
|
2014-07-06 21:18:00 +02:00
|
|
|
fj->storage = j->storage;
|
|
|
|
fj->piece = j->piece;
|
|
|
|
flush_jobs.push_back(fj);
|
2011-02-25 18:00:36 +01:00
|
|
|
}
|
2007-06-10 22:46:09 +02:00
|
|
|
}
|
2014-07-06 21:18:00 +02:00
|
|
|
l_.unlock();
|
|
|
|
|
2016-10-19 07:18:05 +02:00
|
|
|
{
|
|
|
|
std::lock_guard<std::mutex> l(m_job_mutex);
|
|
|
|
m_generic_io_jobs.m_queued_jobs.append(other_jobs);
|
|
|
|
}
|
2014-07-06 21:18:00 +02:00
|
|
|
|
2018-06-24 00:11:36 +02:00
|
|
|
while (!flush_jobs.empty())
|
2007-06-10 22:46:09 +02:00
|
|
|
{
|
2015-08-19 15:22:00 +02:00
|
|
|
disk_io_job* j = flush_jobs.pop_front();
|
2015-06-14 22:00:04 +02:00
|
|
|
add_job(j, false);
|
2007-06-10 22:46:09 +02:00
|
|
|
}
|
|
|
|
|
2016-10-19 07:18:05 +02:00
|
|
|
{
|
|
|
|
std::lock_guard<std::mutex> l(m_job_mutex);
|
|
|
|
m_generic_io_jobs.m_job_cond.notify_all();
|
|
|
|
m_generic_threads.job_queued(m_generic_io_jobs.m_queued_jobs.size());
|
|
|
|
}
|
2014-07-06 21:18:00 +02:00
|
|
|
}
|
|
|
|
|
2016-10-19 07:18:05 +02:00
|
|
|
std::lock_guard<std::mutex> l(m_completed_jobs_mutex);
|
2014-07-06 21:18:00 +02:00
|
|
|
m_completed_jobs.append(jobs);
|
2010-10-18 09:38:14 +02:00
|
|
|
|
2016-10-19 07:18:05 +02:00
|
|
|
if (!m_job_completions_in_flight)
|
2014-07-06 21:18:00 +02:00
|
|
|
{
|
|
|
|
// we take this lock just to make the logging prettier (non-interleaved)
|
|
|
|
DLOG("posting job handlers (%d)\n", m_completed_jobs.size());
|
2016-10-19 07:18:05 +02:00
|
|
|
|
2016-10-30 23:21:07 +01:00
|
|
|
m_ios.post(std::bind(&disk_io_thread::call_job_handlers, this));
|
2016-10-19 07:18:05 +02:00
|
|
|
m_job_completions_in_flight = true;
|
2014-07-06 21:18:00 +02:00
|
|
|
}
|
|
|
|
}
|
2011-03-18 04:07:10 +01:00
|
|
|
|
2014-07-06 21:18:00 +02:00
|
|
|
// This is run in the network thread
|
2016-10-30 23:21:07 +01:00
|
|
|
void disk_io_thread::call_job_handlers()
|
2014-07-06 21:18:00 +02:00
|
|
|
{
|
2018-08-02 11:13:08 +02:00
|
|
|
m_stats_counters.inc_stats_counter(counters::on_disk_counter);
|
2016-05-01 00:54:23 +02:00
|
|
|
std::unique_lock<std::mutex> l(m_completed_jobs_mutex);
|
2014-07-06 21:18:00 +02:00
|
|
|
|
|
|
|
DLOG("call_job_handlers (%d)\n", m_completed_jobs.size());
|
2016-10-19 07:18:05 +02:00
|
|
|
|
|
|
|
TORRENT_ASSERT(m_job_completions_in_flight);
|
|
|
|
m_job_completions_in_flight = false;
|
2014-07-06 21:18:00 +02:00
|
|
|
|
2015-08-19 15:22:00 +02:00
|
|
|
disk_io_job* j = m_completed_jobs.get_all();
|
2014-07-06 21:18:00 +02:00
|
|
|
l.unlock();
|
|
|
|
|
2017-02-05 04:05:53 +01:00
|
|
|
aux::array<disk_io_job*, 64> to_delete;
|
2016-10-23 04:00:47 +02:00
|
|
|
int cnt = 0;
|
2014-07-06 21:18:00 +02:00
|
|
|
|
|
|
|
while (j)
|
|
|
|
{
|
|
|
|
TORRENT_ASSERT(j->job_posted == true);
|
|
|
|
TORRENT_ASSERT(j->callback_called == false);
|
2019-03-24 17:30:22 +01:00
|
|
|
// DLOG(" callback: %s\n", job_name(j->action));
|
2015-08-19 15:22:00 +02:00
|
|
|
disk_io_job* next = j->next;
|
2014-07-06 21:18:00 +02:00
|
|
|
|
|
|
|
#if TORRENT_USE_ASSERTS
|
|
|
|
j->callback_called = true;
|
|
|
|
#endif
|
2017-04-16 22:37:39 +02:00
|
|
|
j->call_callback();
|
2016-10-23 04:00:47 +02:00
|
|
|
to_delete[cnt++] = j;
|
2014-07-06 21:18:00 +02:00
|
|
|
j = next;
|
2016-12-13 16:30:36 +01:00
|
|
|
if (cnt == int(to_delete.size()))
|
2016-10-23 04:00:47 +02:00
|
|
|
{
|
|
|
|
cnt = 0;
|
|
|
|
free_jobs(to_delete.data(), int(to_delete.size()));
|
|
|
|
}
|
2007-06-10 22:46:09 +02:00
|
|
|
}
|
2014-07-06 21:18:00 +02:00
|
|
|
|
2016-10-23 04:00:47 +02:00
|
|
|
if (cnt > 0) free_jobs(to_delete.data(), cnt);
|
2014-07-06 21:18:00 +02:00
|
|
|
}
|
2007-06-10 22:46:09 +02:00
|
|
|
}
|