2007-06-10 22:46:09 +02:00
|
|
|
/*
|
|
|
|
|
|
|
|
Copyright (c) 2007, Arvid Norberg
|
|
|
|
All rights reserved.
|
|
|
|
|
|
|
|
Redistribution and use in source and binary forms, with or without
|
|
|
|
modification, are permitted provided that the following conditions
|
|
|
|
are met:
|
|
|
|
|
|
|
|
* Redistributions of source code must retain the above copyright
|
|
|
|
notice, this list of conditions and the following disclaimer.
|
|
|
|
* Redistributions in binary form must reproduce the above copyright
|
|
|
|
notice, this list of conditions and the following disclaimer in
|
|
|
|
the documentation and/or other materials provided with the distribution.
|
|
|
|
* Neither the name of the author nor the names of its
|
|
|
|
contributors may be used to endorse or promote products derived
|
|
|
|
from this software without specific prior written permission.
|
|
|
|
|
|
|
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
|
|
|
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
|
|
|
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
|
|
|
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
|
|
|
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
|
|
|
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
|
|
|
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
|
|
|
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
|
|
|
POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
|
|
|
|
*/
|
|
|
|
|
2009-09-05 09:21:10 +02:00
|
|
|
/*
|
|
|
|
Disk queue elevator patch by Morten Husveit
|
|
|
|
*/
|
|
|
|
|
2007-06-10 22:46:09 +02:00
|
|
|
#include "libtorrent/storage.hpp"
|
|
|
|
#include "libtorrent/disk_io_thread.hpp"
|
2008-04-13 00:08:07 +02:00
|
|
|
#include "libtorrent/disk_buffer_holder.hpp"
|
2009-01-11 03:02:34 +01:00
|
|
|
#include "libtorrent/alloca.hpp"
|
|
|
|
#include "libtorrent/invariant_check.hpp"
|
2009-02-03 08:46:24 +01:00
|
|
|
#include "libtorrent/error_code.hpp"
|
2009-11-23 09:38:50 +01:00
|
|
|
#include "libtorrent/error.hpp"
|
2010-01-23 04:02:32 +01:00
|
|
|
#include "libtorrent/file_pool.hpp"
|
2008-02-22 05:11:04 +01:00
|
|
|
#include <boost/scoped_array.hpp>
|
2009-11-23 09:38:50 +01:00
|
|
|
#include <boost/bind.hpp>
|
2007-06-10 22:46:09 +02:00
|
|
|
|
2007-09-17 10:15:54 +02:00
|
|
|
#include "libtorrent/time.hpp"
|
|
|
|
|
2009-03-31 10:15:21 +02:00
|
|
|
#if TORRENT_USE_MLOCK && !defined TORRENT_WINDOWS
|
2009-02-06 10:46:13 +01:00
|
|
|
#include <sys/mman.h>
|
|
|
|
#endif
|
|
|
|
|
2010-03-10 08:14:10 +01:00
|
|
|
#ifdef TORRENT_BSD
|
|
|
|
#include <sys/sysctl.h>
|
|
|
|
#endif
|
|
|
|
|
2010-03-12 04:17:25 +01:00
|
|
|
#if TORRENT_USE_RLIMIT
|
|
|
|
#include <sys/resource.h>
|
|
|
|
#endif
|
|
|
|
|
2010-07-24 04:54:42 +02:00
|
|
|
#ifdef TORRENT_LINUX
|
|
|
|
#include <linux/unistd.h>
|
|
|
|
#endif
|
|
|
|
|
2007-06-10 22:46:09 +02:00
|
|
|
namespace libtorrent
|
|
|
|
{
|
2010-01-12 02:56:48 +01:00
|
|
|
bool should_cancel_on_abort(disk_io_job const& j);
|
|
|
|
bool is_read_operation(disk_io_job const& j);
|
|
|
|
bool operation_has_buffer(disk_io_job const& j);
|
|
|
|
|
2009-01-21 08:31:49 +01:00
|
|
|
disk_buffer_pool::disk_buffer_pool(int block_size)
|
|
|
|
: m_block_size(block_size)
|
2009-05-01 10:00:58 +02:00
|
|
|
, m_in_use(0)
|
2008-04-09 07:19:11 +02:00
|
|
|
#ifndef TORRENT_DISABLE_POOL_ALLOCATOR
|
2009-05-12 20:52:05 +02:00
|
|
|
, m_pool(block_size, m_settings.cache_buffer_chunk_size)
|
2008-04-09 07:19:11 +02:00
|
|
|
#endif
|
2007-09-17 10:15:54 +02:00
|
|
|
{
|
2009-05-04 08:42:24 +02:00
|
|
|
#if defined TORRENT_DISK_STATS || defined TORRENT_STATS
|
2007-09-29 18:14:03 +02:00
|
|
|
m_allocations = 0;
|
2009-01-23 10:13:31 +01:00
|
|
|
#endif
|
|
|
|
#ifdef TORRENT_DISK_STATS
|
|
|
|
m_log.open("disk_buffers.log", std::ios::trunc);
|
2009-05-19 09:00:05 +02:00
|
|
|
m_categories["read cache"] = 0;
|
|
|
|
m_categories["write cache"] = 0;
|
2009-06-15 00:20:23 +02:00
|
|
|
|
|
|
|
m_disk_access_log.open("disk_access.log", std::ios::trunc);
|
2009-05-03 05:16:15 +02:00
|
|
|
#endif
|
|
|
|
#ifdef TORRENT_DEBUG
|
2009-05-11 23:19:03 +02:00
|
|
|
m_magic = 0x1337;
|
2007-09-29 18:14:03 +02:00
|
|
|
#endif
|
2009-01-21 08:31:49 +01:00
|
|
|
}
|
|
|
|
|
2009-05-03 05:16:15 +02:00
|
|
|
#ifdef TORRENT_DEBUG
|
|
|
|
disk_buffer_pool::~disk_buffer_pool()
|
|
|
|
{
|
|
|
|
TORRENT_ASSERT(m_magic == 0x1337);
|
|
|
|
m_magic = 0;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2009-08-05 21:02:51 +02:00
|
|
|
#if defined TORRENT_DEBUG || defined TORRENT_DISK_STATS
|
2009-05-23 17:50:38 +02:00
|
|
|
bool disk_buffer_pool::is_disk_buffer(char* buffer
|
2009-10-20 04:49:56 +02:00
|
|
|
, mutex::scoped_lock& l) const
|
2009-01-21 08:31:49 +01:00
|
|
|
{
|
2009-05-03 05:16:15 +02:00
|
|
|
TORRENT_ASSERT(m_magic == 0x1337);
|
2009-05-23 05:05:21 +02:00
|
|
|
#ifdef TORRENT_DISK_STATS
|
|
|
|
if (m_buf_to_category.find(buffer)
|
|
|
|
== m_buf_to_category.end()) return false;
|
|
|
|
#endif
|
2009-05-23 19:17:47 +02:00
|
|
|
#ifdef TORRENT_DISABLE_POOL_ALLOCATOR
|
|
|
|
return true;
|
|
|
|
#else
|
2009-01-21 08:31:49 +01:00
|
|
|
return m_pool.is_from(buffer);
|
|
|
|
#endif
|
|
|
|
}
|
2009-05-23 17:50:38 +02:00
|
|
|
|
|
|
|
bool disk_buffer_pool::is_disk_buffer(char* buffer) const
|
|
|
|
{
|
2009-10-20 04:49:56 +02:00
|
|
|
mutex::scoped_lock l(m_pool_mutex);
|
2009-05-23 17:50:38 +02:00
|
|
|
return is_disk_buffer(buffer, l);
|
|
|
|
}
|
2009-01-21 08:31:49 +01:00
|
|
|
#endif
|
|
|
|
|
2009-01-23 10:13:31 +01:00
|
|
|
char* disk_buffer_pool::allocate_buffer(char const* category)
|
2009-01-21 08:31:49 +01:00
|
|
|
{
|
2009-10-20 04:49:56 +02:00
|
|
|
mutex::scoped_lock l(m_pool_mutex);
|
2009-05-03 05:16:15 +02:00
|
|
|
TORRENT_ASSERT(m_magic == 0x1337);
|
2009-01-21 08:31:49 +01:00
|
|
|
#ifdef TORRENT_DISABLE_POOL_ALLOCATOR
|
2009-01-23 10:13:31 +01:00
|
|
|
char* ret = page_aligned_allocator::malloc(m_block_size);
|
2009-01-21 08:31:49 +01:00
|
|
|
#else
|
2009-01-23 10:13:31 +01:00
|
|
|
char* ret = (char*)m_pool.ordered_malloc();
|
2009-05-12 20:52:05 +02:00
|
|
|
m_pool.set_next_size(m_settings.cache_buffer_chunk_size);
|
2009-01-23 10:13:31 +01:00
|
|
|
#endif
|
2009-05-01 10:00:58 +02:00
|
|
|
++m_in_use;
|
2009-03-31 10:15:21 +02:00
|
|
|
#if TORRENT_USE_MLOCK
|
2009-02-06 10:46:13 +01:00
|
|
|
if (m_settings.lock_disk_cache)
|
|
|
|
{
|
|
|
|
#ifdef TORRENT_WINDOWS
|
|
|
|
VirtualLock(ret, m_block_size);
|
|
|
|
#else
|
|
|
|
mlock(ret, m_block_size);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2009-05-04 08:42:24 +02:00
|
|
|
#if defined TORRENT_DISK_STATS || defined TORRENT_STATS
|
2009-01-23 10:13:31 +01:00
|
|
|
++m_allocations;
|
2009-05-04 08:42:24 +02:00
|
|
|
#endif
|
|
|
|
#ifdef TORRENT_DISK_STATS
|
2009-01-23 10:13:31 +01:00
|
|
|
++m_categories[category];
|
|
|
|
m_buf_to_category[ret] = category;
|
|
|
|
m_log << log_time() << " " << category << ": " << m_categories[category] << "\n";
|
2009-01-21 08:31:49 +01:00
|
|
|
#endif
|
2009-05-23 19:17:47 +02:00
|
|
|
TORRENT_ASSERT(ret == 0 || is_disk_buffer(ret, l));
|
2009-01-23 10:13:31 +01:00
|
|
|
return ret;
|
2009-01-21 08:31:49 +01:00
|
|
|
}
|
|
|
|
|
2009-05-19 09:00:05 +02:00
|
|
|
#ifdef TORRENT_DISK_STATS
|
|
|
|
void disk_buffer_pool::rename_buffer(char* buf, char const* category)
|
|
|
|
{
|
2009-10-20 04:49:56 +02:00
|
|
|
mutex::scoped_lock l(m_pool_mutex);
|
2009-05-23 19:17:47 +02:00
|
|
|
TORRENT_ASSERT(is_disk_buffer(buf, l));
|
2009-05-19 09:00:05 +02:00
|
|
|
TORRENT_ASSERT(m_categories.find(m_buf_to_category[buf])
|
|
|
|
!= m_categories.end());
|
|
|
|
std::string const& prev_category = m_buf_to_category[buf];
|
|
|
|
--m_categories[prev_category];
|
|
|
|
m_log << log_time() << " " << prev_category << ": " << m_categories[prev_category] << "\n";
|
|
|
|
|
|
|
|
++m_categories[category];
|
|
|
|
m_buf_to_category[buf] = category;
|
|
|
|
m_log << log_time() << " " << category << ": " << m_categories[category] << "\n";
|
2009-05-23 19:17:47 +02:00
|
|
|
TORRENT_ASSERT(m_categories.find(m_buf_to_category[buf])
|
|
|
|
!= m_categories.end());
|
2009-05-19 09:00:05 +02:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2010-05-13 17:01:20 +02:00
|
|
|
void disk_buffer_pool::free_multiple_buffers(char** bufvec, int numbufs)
|
|
|
|
{
|
|
|
|
char** end = bufvec + numbufs;
|
|
|
|
// sort the pointers in order to maximize cache hits
|
|
|
|
std::sort(bufvec, end);
|
|
|
|
|
|
|
|
mutex::scoped_lock l(m_pool_mutex);
|
|
|
|
for (; bufvec != end; ++bufvec)
|
|
|
|
{
|
|
|
|
char* buf = *bufvec;
|
|
|
|
TORRENT_ASSERT(buf);
|
|
|
|
free_buffer_impl(buf, l);;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-01-21 08:31:49 +01:00
|
|
|
void disk_buffer_pool::free_buffer(char* buf)
|
|
|
|
{
|
2009-10-20 04:49:56 +02:00
|
|
|
mutex::scoped_lock l(m_pool_mutex);
|
2010-05-13 17:01:20 +02:00
|
|
|
free_buffer_impl(buf, l);
|
|
|
|
}
|
|
|
|
|
|
|
|
void disk_buffer_pool::free_buffer_impl(char* buf, mutex::scoped_lock& l)
|
|
|
|
{
|
|
|
|
TORRENT_ASSERT(buf);
|
2009-05-03 05:16:15 +02:00
|
|
|
TORRENT_ASSERT(m_magic == 0x1337);
|
2009-05-23 19:17:47 +02:00
|
|
|
TORRENT_ASSERT(is_disk_buffer(buf, l));
|
2009-05-04 08:42:24 +02:00
|
|
|
#if defined TORRENT_DISK_STATS || defined TORRENT_STATS
|
2009-01-21 08:31:49 +01:00
|
|
|
--m_allocations;
|
2009-05-04 08:42:24 +02:00
|
|
|
#endif
|
|
|
|
#ifdef TORRENT_DISK_STATS
|
2009-01-23 10:13:31 +01:00
|
|
|
TORRENT_ASSERT(m_categories.find(m_buf_to_category[buf])
|
|
|
|
!= m_categories.end());
|
|
|
|
std::string const& category = m_buf_to_category[buf];
|
|
|
|
--m_categories[category];
|
|
|
|
m_log << log_time() << " " << category << ": " << m_categories[category] << "\n";
|
|
|
|
m_buf_to_category.erase(buf);
|
2009-01-21 08:31:49 +01:00
|
|
|
#endif
|
2009-03-31 10:15:21 +02:00
|
|
|
#if TORRENT_USE_MLOCK
|
2009-02-06 10:46:13 +01:00
|
|
|
if (m_settings.lock_disk_cache)
|
|
|
|
{
|
|
|
|
#ifdef TORRENT_WINDOWS
|
|
|
|
VirtualUnlock(buf, m_block_size);
|
|
|
|
#else
|
|
|
|
munlock(buf, m_block_size);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
#endif
|
2009-01-21 08:31:49 +01:00
|
|
|
#ifdef TORRENT_DISABLE_POOL_ALLOCATOR
|
|
|
|
page_aligned_allocator::free(buf);
|
|
|
|
#else
|
2011-02-22 03:53:26 +01:00
|
|
|
m_pool.free(buf);
|
2009-01-21 08:31:49 +01:00
|
|
|
#endif
|
2009-05-01 10:00:58 +02:00
|
|
|
--m_in_use;
|
2009-01-21 08:31:49 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void disk_buffer_pool::release_memory()
|
|
|
|
{
|
2009-05-03 05:16:15 +02:00
|
|
|
TORRENT_ASSERT(m_magic == 0x1337);
|
2009-01-21 08:31:49 +01:00
|
|
|
#ifndef TORRENT_DISABLE_POOL_ALLOCATOR
|
2009-10-20 04:49:56 +02:00
|
|
|
mutex::scoped_lock l(m_pool_mutex);
|
2009-01-21 08:31:49 +01:00
|
|
|
m_pool.release_memory();
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
// ------- disk_io_thread ------
|
|
|
|
|
|
|
|
|
2009-11-23 09:38:50 +01:00
|
|
|
disk_io_thread::disk_io_thread(io_service& ios
|
2009-06-10 10:30:55 +02:00
|
|
|
, boost::function<void()> const& queue_callback
|
2010-01-23 04:02:32 +01:00
|
|
|
, file_pool& fp
|
2009-06-10 10:30:55 +02:00
|
|
|
, int block_size)
|
2009-01-21 08:31:49 +01:00
|
|
|
: disk_buffer_pool(block_size)
|
|
|
|
, m_abort(false)
|
2009-05-22 08:32:39 +02:00
|
|
|
, m_waiting_to_shutdown(false)
|
2009-01-21 08:31:49 +01:00
|
|
|
, m_queue_buffer_size(0)
|
2009-05-30 06:02:03 +02:00
|
|
|
, m_last_file_check(time_now_hires())
|
2010-03-10 08:14:10 +01:00
|
|
|
, m_physical_ram(0)
|
2011-02-13 23:27:02 +01:00
|
|
|
, m_exceeded_write_queue(false)
|
2009-01-21 08:31:49 +01:00
|
|
|
, m_ios(ios)
|
2009-06-10 10:30:55 +02:00
|
|
|
, m_queue_callback(queue_callback)
|
2009-05-13 19:17:33 +02:00
|
|
|
, m_work(io_service::work(m_ios))
|
2010-01-23 04:02:32 +01:00
|
|
|
, m_file_pool(fp)
|
2009-10-20 04:49:56 +02:00
|
|
|
, m_disk_io_thread(boost::bind(&disk_io_thread::thread_fun, this))
|
2009-01-21 08:31:49 +01:00
|
|
|
{
|
2011-02-11 08:20:11 +01:00
|
|
|
// don't do anything in here. Essentially all members
|
|
|
|
// of this object are owned by the newly created thread.
|
|
|
|
// initialize stuff in thread_fun().
|
2007-09-17 10:15:54 +02:00
|
|
|
}
|
2007-06-10 22:46:09 +02:00
|
|
|
|
|
|
|
disk_io_thread::~disk_io_thread()
|
|
|
|
{
|
2007-12-24 09:15:10 +01:00
|
|
|
TORRENT_ASSERT(m_abort == true);
|
2007-06-10 22:46:09 +02:00
|
|
|
}
|
|
|
|
|
2010-07-14 06:16:38 +02:00
|
|
|
void disk_io_thread::abort()
|
2007-12-24 09:15:10 +01:00
|
|
|
{
|
2009-10-20 04:49:56 +02:00
|
|
|
mutex::scoped_lock l(m_queue_mutex);
|
2008-06-09 06:46:34 +02:00
|
|
|
disk_io_job j;
|
2009-05-22 08:32:39 +02:00
|
|
|
m_waiting_to_shutdown = true;
|
2008-06-09 06:46:34 +02:00
|
|
|
j.action = disk_io_job::abort_thread;
|
2010-12-30 04:46:11 +01:00
|
|
|
j.start_time = time_now_hires();
|
2008-06-09 06:46:34 +02:00
|
|
|
m_jobs.insert(m_jobs.begin(), j);
|
2009-10-20 04:49:56 +02:00
|
|
|
m_signal.signal(l);
|
2010-07-14 06:16:38 +02:00
|
|
|
}
|
2007-12-24 09:15:10 +01:00
|
|
|
|
2010-07-14 06:16:38 +02:00
|
|
|
void disk_io_thread::join()
|
|
|
|
{
|
2007-12-24 09:15:10 +01:00
|
|
|
m_disk_io_thread.join();
|
2010-07-14 06:16:38 +02:00
|
|
|
mutex::scoped_lock l(m_queue_mutex);
|
2008-11-11 10:32:51 +01:00
|
|
|
TORRENT_ASSERT(m_abort == true);
|
|
|
|
m_jobs.clear();
|
2007-12-24 09:15:10 +01:00
|
|
|
}
|
|
|
|
|
2011-02-13 23:27:02 +01:00
|
|
|
bool disk_io_thread::can_write() const
|
|
|
|
{
|
|
|
|
mutex::scoped_lock l(m_queue_mutex);
|
|
|
|
return !m_exceeded_write_queue;
|
|
|
|
}
|
|
|
|
|
2008-02-08 11:22:05 +01:00
|
|
|
void disk_io_thread::get_cache_info(sha1_hash const& ih, std::vector<cached_piece_info>& ret) const
|
|
|
|
{
|
2009-10-20 04:49:56 +02:00
|
|
|
mutex::scoped_lock l(m_piece_mutex);
|
2008-02-08 11:22:05 +01:00
|
|
|
ret.clear();
|
|
|
|
ret.reserve(m_pieces.size());
|
2008-02-25 00:14:10 +01:00
|
|
|
for (cache_t::const_iterator i = m_pieces.begin()
|
2008-02-08 11:22:05 +01:00
|
|
|
, end(m_pieces.end()); i != end; ++i)
|
|
|
|
{
|
|
|
|
torrent_info const& ti = *i->storage->info();
|
|
|
|
if (ti.info_hash() != ih) continue;
|
|
|
|
cached_piece_info info;
|
2011-03-20 06:47:27 +01:00
|
|
|
info.next_to_hash = i->next_block_to_hash;
|
2008-02-08 11:22:05 +01:00
|
|
|
info.piece = i->piece;
|
2010-01-31 20:14:00 +01:00
|
|
|
info.last_use = i->expire;
|
2008-07-11 12:29:26 +02:00
|
|
|
info.kind = cached_piece_info::write_cache;
|
|
|
|
int blocks_in_piece = (ti.piece_size(i->piece) + (m_block_size) - 1) / m_block_size;
|
|
|
|
info.blocks.resize(blocks_in_piece);
|
|
|
|
for (int b = 0; b < blocks_in_piece; ++b)
|
2009-06-10 10:30:55 +02:00
|
|
|
if (i->blocks[b].buf) info.blocks[b] = true;
|
2008-07-11 12:29:26 +02:00
|
|
|
ret.push_back(info);
|
|
|
|
}
|
|
|
|
for (cache_t::const_iterator i = m_read_pieces.begin()
|
|
|
|
, end(m_read_pieces.end()); i != end; ++i)
|
|
|
|
{
|
|
|
|
torrent_info const& ti = *i->storage->info();
|
|
|
|
if (ti.info_hash() != ih) continue;
|
|
|
|
cached_piece_info info;
|
2011-03-20 06:47:27 +01:00
|
|
|
info.next_to_hash = i->next_block_to_hash;
|
2008-07-11 12:29:26 +02:00
|
|
|
info.piece = i->piece;
|
2010-01-31 20:14:00 +01:00
|
|
|
info.last_use = i->expire;
|
2008-07-11 12:29:26 +02:00
|
|
|
info.kind = cached_piece_info::read_cache;
|
2008-02-22 05:11:04 +01:00
|
|
|
int blocks_in_piece = (ti.piece_size(i->piece) + (m_block_size) - 1) / m_block_size;
|
2008-02-08 11:22:05 +01:00
|
|
|
info.blocks.resize(blocks_in_piece);
|
|
|
|
for (int b = 0; b < blocks_in_piece; ++b)
|
2009-06-10 10:30:55 +02:00
|
|
|
if (i->blocks[b].buf) info.blocks[b] = true;
|
2008-02-08 11:22:05 +01:00
|
|
|
ret.push_back(info);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
cache_status disk_io_thread::status() const
|
|
|
|
{
|
2009-10-20 04:49:56 +02:00
|
|
|
mutex::scoped_lock l(m_piece_mutex);
|
2009-05-02 08:52:57 +02:00
|
|
|
m_cache_stats.total_used_buffers = in_use();
|
2009-07-21 06:32:27 +02:00
|
|
|
m_cache_stats.queued_bytes = m_queue_buffer_size;
|
2010-03-03 08:09:04 +01:00
|
|
|
|
|
|
|
cache_status ret = m_cache_stats;
|
|
|
|
|
|
|
|
ret.average_queue_time = m_queue_time.mean();
|
|
|
|
ret.average_read_time = m_read_time.mean();
|
2011-03-11 08:37:12 +01:00
|
|
|
ret.average_write_time = m_write_time.mean();
|
2011-03-15 02:44:32 +01:00
|
|
|
ret.average_hash_time = m_hash_time.mean();
|
2011-03-18 04:07:10 +01:00
|
|
|
ret.average_job_time = m_job_time.mean();
|
2011-03-15 03:21:28 +01:00
|
|
|
ret.average_sort_time = m_sort_time.mean();
|
2010-07-23 21:11:00 +02:00
|
|
|
ret.job_queue_length = m_jobs.size() + m_sorted_read_jobs.size();
|
2011-03-20 10:04:03 +01:00
|
|
|
ret.read_queue_size = m_sorted_read_jobs.size();
|
2010-03-03 08:09:04 +01:00
|
|
|
|
|
|
|
return ret;
|
2008-02-08 11:22:05 +01:00
|
|
|
}
|
|
|
|
|
2007-06-10 22:46:09 +02:00
|
|
|
// aborts read operations
|
|
|
|
void disk_io_thread::stop(boost::intrusive_ptr<piece_manager> s)
|
|
|
|
{
|
2009-10-20 04:49:56 +02:00
|
|
|
mutex::scoped_lock l(m_queue_mutex);
|
2007-06-10 22:46:09 +02:00
|
|
|
// read jobs are aborted, write and move jobs are syncronized
|
2008-02-26 21:08:33 +01:00
|
|
|
for (std::list<disk_io_job>::iterator i = m_jobs.begin();
|
2007-06-10 22:46:09 +02:00
|
|
|
i != m_jobs.end();)
|
|
|
|
{
|
|
|
|
if (i->storage != s)
|
|
|
|
{
|
|
|
|
++i;
|
|
|
|
continue;
|
|
|
|
}
|
2010-01-12 02:56:48 +01:00
|
|
|
if (should_cancel_on_abort(*i))
|
2008-06-09 06:46:34 +02:00
|
|
|
{
|
2010-03-03 05:32:06 +01:00
|
|
|
if (i->action == disk_io_job::write)
|
|
|
|
{
|
|
|
|
TORRENT_ASSERT(m_queue_buffer_size >= i->buffer_size);
|
|
|
|
m_queue_buffer_size -= i->buffer_size;
|
|
|
|
}
|
2010-01-12 02:56:48 +01:00
|
|
|
post_callback(i->callback, *i, -3);
|
2008-06-09 06:46:34 +02:00
|
|
|
m_jobs.erase(i++);
|
|
|
|
continue;
|
|
|
|
}
|
2007-06-10 22:46:09 +02:00
|
|
|
++i;
|
|
|
|
}
|
2008-11-17 02:19:46 +01:00
|
|
|
disk_io_job j;
|
|
|
|
j.action = disk_io_job::abort_torrent;
|
|
|
|
j.storage = s;
|
2009-10-20 04:49:56 +02:00
|
|
|
add_job(j, l);
|
2007-06-10 22:46:09 +02:00
|
|
|
}
|
|
|
|
|
2010-01-27 05:25:45 +01:00
|
|
|
struct update_last_use
|
2008-02-08 11:22:05 +01:00
|
|
|
{
|
2010-01-31 20:14:00 +01:00
|
|
|
update_last_use(int exp): expire(exp) {}
|
2010-01-27 05:25:45 +01:00
|
|
|
void operator()(disk_io_thread::cached_piece_entry& p)
|
2008-02-08 11:22:05 +01:00
|
|
|
{
|
2010-01-27 05:25:45 +01:00
|
|
|
TORRENT_ASSERT(p.storage);
|
2010-01-31 20:14:00 +01:00
|
|
|
p.expire = time_now() + seconds(expire);
|
2008-02-08 11:22:05 +01:00
|
|
|
}
|
2010-01-31 20:14:00 +01:00
|
|
|
int expire;
|
2010-01-27 05:25:45 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
disk_io_thread::cache_piece_index_t::iterator disk_io_thread::find_cached_piece(
|
|
|
|
disk_io_thread::cache_t& cache
|
|
|
|
, disk_io_job const& j, mutex::scoped_lock& l)
|
|
|
|
{
|
|
|
|
cache_piece_index_t& idx = cache.get<0>();
|
|
|
|
cache_piece_index_t::iterator i
|
|
|
|
= idx.find(std::pair<void*, int>(j.storage.get(), j.piece));
|
|
|
|
TORRENT_ASSERT(i == idx.end() || (i->storage == j.storage && i->piece == j.piece));
|
|
|
|
return i;
|
2008-02-08 11:22:05 +01:00
|
|
|
}
|
2007-06-10 22:46:09 +02:00
|
|
|
|
2008-06-12 06:40:37 +02:00
|
|
|
void disk_io_thread::flush_expired_pieces()
|
2008-02-10 01:58:25 +01:00
|
|
|
{
|
|
|
|
ptime now = time_now();
|
|
|
|
|
2009-10-20 04:49:56 +02:00
|
|
|
mutex::scoped_lock l(m_piece_mutex);
|
2008-06-12 06:40:37 +02:00
|
|
|
|
2008-02-22 05:11:04 +01:00
|
|
|
INVARIANT_CHECK;
|
2008-12-27 03:38:14 +01:00
|
|
|
// flush write cache
|
2010-01-27 05:25:45 +01:00
|
|
|
cache_lru_index_t& widx = m_pieces.get<1>();
|
|
|
|
cache_lru_index_t::iterator i = widx.begin();
|
|
|
|
time_duration cut_off = seconds(m_settings.cache_expiry);
|
2010-01-31 20:14:00 +01:00
|
|
|
while (i != widx.end() && now - i->expire > cut_off)
|
2008-02-10 01:58:25 +01:00
|
|
|
{
|
2010-01-27 05:25:45 +01:00
|
|
|
TORRENT_ASSERT(i->storage);
|
|
|
|
flush_range(const_cast<cached_piece_entry&>(*i), 0, INT_MAX, l);
|
2011-03-20 06:47:27 +01:00
|
|
|
// we want to keep the piece in here to have an accurate
|
|
|
|
// number for next_block_to_hash, if we're in avoid_readback mode
|
|
|
|
if (m_settings.disk_cache_algorithm != session_settings::avoid_readback)
|
|
|
|
widx.erase(i++);
|
2011-03-20 18:08:18 +01:00
|
|
|
else
|
|
|
|
++i;
|
2008-02-10 01:58:25 +01:00
|
|
|
}
|
2008-12-27 03:38:14 +01:00
|
|
|
|
2010-01-15 17:45:42 +01:00
|
|
|
if (m_settings.explicit_read_cache) return;
|
|
|
|
|
2008-12-27 03:38:14 +01:00
|
|
|
// flush read cache
|
2010-05-13 17:01:20 +02:00
|
|
|
std::vector<char*> bufs;
|
2010-01-27 05:25:45 +01:00
|
|
|
cache_lru_index_t& ridx = m_read_pieces.get<1>();
|
|
|
|
i = ridx.begin();
|
2010-01-31 20:14:00 +01:00
|
|
|
while (i != ridx.end() && now - i->expire > cut_off)
|
2010-01-31 17:29:52 +01:00
|
|
|
{
|
2010-05-13 17:01:20 +02:00
|
|
|
drain_piece_bufs(const_cast<cached_piece_entry&>(*i), bufs, l);
|
2010-01-27 05:25:45 +01:00
|
|
|
ridx.erase(i++);
|
2010-01-31 17:29:52 +01:00
|
|
|
}
|
2010-05-13 17:01:20 +02:00
|
|
|
if (!bufs.empty()) free_multiple_buffers(&bufs[0], bufs.size());
|
|
|
|
}
|
|
|
|
|
2010-05-17 01:14:47 +02:00
|
|
|
int disk_io_thread::drain_piece_bufs(cached_piece_entry& p, std::vector<char*>& buf
|
2010-05-13 17:01:20 +02:00
|
|
|
, mutex::scoped_lock& l)
|
|
|
|
{
|
|
|
|
int piece_size = p.storage->info()->piece_size(p.piece);
|
|
|
|
int blocks_in_piece = (piece_size + m_block_size - 1) / m_block_size;
|
2010-05-17 01:14:47 +02:00
|
|
|
int ret = 0;
|
2010-05-13 17:01:20 +02:00
|
|
|
|
|
|
|
for (int i = 0; i < blocks_in_piece; ++i)
|
|
|
|
{
|
|
|
|
if (p.blocks[i].buf == 0) continue;
|
|
|
|
buf.push_back(p.blocks[i].buf);
|
2010-05-17 01:14:47 +02:00
|
|
|
++ret;
|
2010-05-13 17:01:20 +02:00
|
|
|
p.blocks[i].buf = 0;
|
|
|
|
--p.num_blocks;
|
|
|
|
--m_cache_stats.cache_size;
|
|
|
|
--m_cache_stats.read_cache_size;
|
|
|
|
}
|
2010-05-17 01:14:47 +02:00
|
|
|
return ret;
|
2008-02-10 01:58:25 +01:00
|
|
|
}
|
|
|
|
|
2009-05-23 09:35:45 +02:00
|
|
|
// returns the number of blocks that were freed
|
2009-10-20 04:49:56 +02:00
|
|
|
int disk_io_thread::free_piece(cached_piece_entry& p, mutex::scoped_lock& l)
|
2008-02-22 05:11:04 +01:00
|
|
|
{
|
|
|
|
int piece_size = p.storage->info()->piece_size(p.piece);
|
|
|
|
int blocks_in_piece = (piece_size + m_block_size - 1) / m_block_size;
|
2009-05-23 09:35:45 +02:00
|
|
|
int ret = 0;
|
2008-02-22 05:11:04 +01:00
|
|
|
|
2010-05-17 01:14:47 +02:00
|
|
|
// build a vector of all the buffers we need to free
|
|
|
|
// and free them all in one go
|
|
|
|
std::vector<char*> buffers;
|
2008-02-22 05:11:04 +01:00
|
|
|
for (int i = 0; i < blocks_in_piece; ++i)
|
|
|
|
{
|
2009-06-10 10:30:55 +02:00
|
|
|
if (p.blocks[i].buf == 0) continue;
|
2010-05-17 01:14:47 +02:00
|
|
|
buffers.push_back(p.blocks[i].buf);
|
2009-05-23 09:35:45 +02:00
|
|
|
++ret;
|
2009-06-10 10:30:55 +02:00
|
|
|
p.blocks[i].buf = 0;
|
2008-02-22 05:11:04 +01:00
|
|
|
--p.num_blocks;
|
|
|
|
--m_cache_stats.cache_size;
|
|
|
|
--m_cache_stats.read_cache_size;
|
|
|
|
}
|
2010-05-17 01:14:47 +02:00
|
|
|
if (!buffers.empty()) free_multiple_buffers(&buffers[0], buffers.size());
|
2009-05-23 09:35:45 +02:00
|
|
|
return ret;
|
2008-02-22 05:11:04 +01:00
|
|
|
}
|
|
|
|
|
2009-05-23 09:35:45 +02:00
|
|
|
// returns the number of blocks that were freed
|
|
|
|
int disk_io_thread::clear_oldest_read_piece(
|
2010-01-27 05:25:45 +01:00
|
|
|
int num_blocks, int ignore, mutex::scoped_lock& l)
|
2008-02-22 05:11:04 +01:00
|
|
|
{
|
|
|
|
INVARIANT_CHECK;
|
|
|
|
|
2010-01-27 05:25:45 +01:00
|
|
|
cache_lru_index_t& idx = m_read_pieces.get<1>();
|
|
|
|
if (idx.empty()) return 0;
|
|
|
|
|
|
|
|
cache_lru_index_t::iterator i = idx.begin();
|
|
|
|
if (i->piece == ignore)
|
2008-02-22 05:11:04 +01:00
|
|
|
{
|
2010-01-27 05:25:45 +01:00
|
|
|
++i;
|
|
|
|
if (i == idx.end()) return 0;
|
|
|
|
}
|
2009-05-24 17:32:14 +02:00
|
|
|
|
2011-03-15 03:54:41 +01:00
|
|
|
// don't replace an entry that hasn't expired yet
|
|
|
|
if (time_now() < i->expire) return 0;
|
2010-01-27 05:25:45 +01:00
|
|
|
int blocks = 0;
|
2010-05-17 01:14:47 +02:00
|
|
|
|
|
|
|
// build a vector of all the buffers we need to free
|
|
|
|
// and free them all in one go
|
|
|
|
std::vector<char*> buffers;
|
2010-01-27 05:25:45 +01:00
|
|
|
if (num_blocks >= i->num_blocks)
|
|
|
|
{
|
2010-05-17 01:14:47 +02:00
|
|
|
blocks = drain_piece_bufs(const_cast<cached_piece_entry&>(*i), buffers, l);
|
2010-01-27 05:25:45 +01:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
// delete blocks from the start and from the end
|
|
|
|
// until num_blocks have been freed
|
|
|
|
int end = (i->storage->info()->piece_size(i->piece) + m_block_size - 1) / m_block_size - 1;
|
|
|
|
int start = 0;
|
|
|
|
|
|
|
|
while (num_blocks)
|
|
|
|
{
|
2010-02-01 02:11:10 +01:00
|
|
|
// if we have a volatile read cache, only clear
|
|
|
|
// from the end, since we're already clearing
|
|
|
|
// from the start as blocks are read
|
|
|
|
if (!m_settings.volatile_read_cache)
|
|
|
|
{
|
|
|
|
while (i->blocks[start].buf == 0 && start <= end) ++start;
|
|
|
|
if (start > end) break;
|
2010-05-17 01:14:47 +02:00
|
|
|
buffers.push_back(i->blocks[start].buf);
|
2010-02-01 02:11:10 +01:00
|
|
|
i->blocks[start].buf = 0;
|
|
|
|
++blocks;
|
|
|
|
--const_cast<cached_piece_entry&>(*i).num_blocks;
|
|
|
|
--m_cache_stats.cache_size;
|
|
|
|
--m_cache_stats.read_cache_size;
|
|
|
|
--num_blocks;
|
|
|
|
if (!num_blocks) break;
|
|
|
|
}
|
2010-01-27 05:25:45 +01:00
|
|
|
|
|
|
|
while (i->blocks[end].buf == 0 && start <= end) --end;
|
|
|
|
if (start > end) break;
|
2010-05-17 01:14:47 +02:00
|
|
|
buffers.push_back(i->blocks[end].buf);
|
2010-01-27 05:25:45 +01:00
|
|
|
i->blocks[end].buf = 0;
|
|
|
|
++blocks;
|
|
|
|
--const_cast<cached_piece_entry&>(*i).num_blocks;
|
|
|
|
--m_cache_stats.cache_size;
|
|
|
|
--m_cache_stats.read_cache_size;
|
|
|
|
--num_blocks;
|
2009-05-24 17:32:14 +02:00
|
|
|
}
|
2008-02-22 05:11:04 +01:00
|
|
|
}
|
2010-01-27 05:25:45 +01:00
|
|
|
if (i->num_blocks == 0) idx.erase(i);
|
2010-05-17 01:14:47 +02:00
|
|
|
|
|
|
|
if (!buffers.empty()) free_multiple_buffers(&buffers[0], buffers.size());
|
2010-01-27 05:25:45 +01:00
|
|
|
return blocks;
|
2008-02-22 05:11:04 +01:00
|
|
|
}
|
|
|
|
|
2009-05-23 09:35:45 +02:00
|
|
|
int contiguous_blocks(disk_io_thread::cached_piece_entry const& b)
|
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
int current = 0;
|
|
|
|
int blocks_in_piece = (b.storage->info()->piece_size(b.piece) + 16 * 1024 - 1) / (16 * 1024);
|
|
|
|
for (int i = 0; i < blocks_in_piece; ++i)
|
|
|
|
{
|
2009-06-10 10:30:55 +02:00
|
|
|
if (b.blocks[i].buf) ++current;
|
2009-05-23 09:35:45 +02:00
|
|
|
else
|
|
|
|
{
|
|
|
|
if (current > ret) ret = current;
|
|
|
|
current = 0;
|
|
|
|
}
|
|
|
|
}
|
2009-05-27 18:50:46 +02:00
|
|
|
if (current > ret) ret = current;
|
2009-05-23 09:35:45 +02:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2010-01-27 05:25:45 +01:00
|
|
|
int disk_io_thread::flush_contiguous_blocks(cached_piece_entry& p
|
2009-10-20 04:49:56 +02:00
|
|
|
, mutex::scoped_lock& l, int lower_limit)
|
2009-05-23 09:35:45 +02:00
|
|
|
{
|
|
|
|
// first find the largest range of contiguous blocks
|
|
|
|
int len = 0;
|
|
|
|
int current = 0;
|
|
|
|
int pos = 0;
|
|
|
|
int start = 0;
|
2010-01-27 05:25:45 +01:00
|
|
|
int blocks_in_piece = (p.storage->info()->piece_size(p.piece)
|
2009-05-23 09:35:45 +02:00
|
|
|
+ m_block_size - 1) / m_block_size;
|
|
|
|
for (int i = 0; i < blocks_in_piece; ++i)
|
|
|
|
{
|
2010-01-27 05:25:45 +01:00
|
|
|
if (p.blocks[i].buf) ++current;
|
2009-05-23 09:35:45 +02:00
|
|
|
else
|
|
|
|
{
|
|
|
|
if (current > len)
|
|
|
|
{
|
|
|
|
len = current;
|
|
|
|
pos = start;
|
|
|
|
}
|
|
|
|
current = 0;
|
|
|
|
start = i + 1;
|
|
|
|
}
|
|
|
|
}
|
2009-05-27 18:50:46 +02:00
|
|
|
if (current > len)
|
|
|
|
{
|
|
|
|
len = current;
|
|
|
|
pos = start;
|
|
|
|
}
|
2009-05-23 09:35:45 +02:00
|
|
|
|
2009-05-27 18:50:46 +02:00
|
|
|
if (len < lower_limit || len <= 0) return 0;
|
2010-01-27 05:25:45 +01:00
|
|
|
len = flush_range(p, pos, pos + len, l);
|
2009-05-23 09:35:45 +02:00
|
|
|
return len;
|
|
|
|
}
|
|
|
|
|
2011-03-21 07:31:48 +01:00
|
|
|
bool cmp_contiguous(disk_io_thread::cached_piece_entry const& lhs
|
|
|
|
, disk_io_thread::cached_piece_entry const& rhs)
|
|
|
|
{
|
|
|
|
return lhs.num_contiguous_blocks < rhs.num_contiguous_blocks;
|
|
|
|
}
|
|
|
|
|
2009-05-23 09:35:45 +02:00
|
|
|
// flushes 'blocks' blocks from the cache
|
2009-10-20 04:49:56 +02:00
|
|
|
int disk_io_thread::flush_cache_blocks(mutex::scoped_lock& l
|
2010-01-27 05:25:45 +01:00
|
|
|
, int blocks, int ignore, int options)
|
2008-02-08 11:22:05 +01:00
|
|
|
{
|
2008-02-22 05:11:04 +01:00
|
|
|
// first look if there are any read cache entries that can
|
|
|
|
// be cleared
|
2009-05-23 09:35:45 +02:00
|
|
|
int ret = 0;
|
2009-05-24 02:12:53 +02:00
|
|
|
int tmp = 0;
|
2009-05-23 09:35:45 +02:00
|
|
|
do {
|
2009-05-24 17:32:14 +02:00
|
|
|
tmp = clear_oldest_read_piece(blocks, ignore, l);
|
2009-05-24 02:12:53 +02:00
|
|
|
blocks -= tmp;
|
|
|
|
ret += tmp;
|
|
|
|
} while (tmp > 0 && blocks > 0);
|
|
|
|
|
|
|
|
if (options & dont_flush_write_blocks) return ret;
|
2008-02-22 05:11:04 +01:00
|
|
|
|
2009-05-23 09:35:45 +02:00
|
|
|
if (m_settings.disk_cache_algorithm == session_settings::lru)
|
|
|
|
{
|
2010-02-05 05:30:57 +01:00
|
|
|
cache_lru_index_t& idx = m_pieces.get<1>();
|
2009-05-23 09:35:45 +02:00
|
|
|
while (blocks > 0)
|
|
|
|
{
|
2010-01-27 05:25:45 +01:00
|
|
|
cache_lru_index_t::iterator i = idx.begin();
|
|
|
|
if (i == idx.end()) return ret;
|
|
|
|
tmp = flush_range(const_cast<cached_piece_entry&>(*i), 0, INT_MAX, l);
|
|
|
|
idx.erase(i);
|
2009-05-24 02:12:53 +02:00
|
|
|
blocks -= tmp;
|
|
|
|
ret += tmp;
|
2009-05-23 09:35:45 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
else if (m_settings.disk_cache_algorithm == session_settings::largest_contiguous)
|
|
|
|
{
|
2010-02-05 05:30:57 +01:00
|
|
|
cache_lru_index_t& idx = m_pieces.get<1>();
|
2009-05-23 09:35:45 +02:00
|
|
|
while (blocks > 0)
|
|
|
|
{
|
2011-03-21 07:31:48 +01:00
|
|
|
cache_lru_index_t::iterator i = std::max_element(idx.begin(), idx.end(), &cmp_contiguous);
|
2010-01-27 05:25:45 +01:00
|
|
|
if (i == idx.end()) return ret;
|
|
|
|
tmp = flush_contiguous_blocks(const_cast<cached_piece_entry&>(*i), l);
|
|
|
|
if (i->num_blocks == 0) idx.erase(i);
|
2009-05-24 02:12:53 +02:00
|
|
|
blocks -= tmp;
|
|
|
|
ret += tmp;
|
2009-05-23 09:35:45 +02:00
|
|
|
}
|
|
|
|
}
|
2011-03-20 06:47:27 +01:00
|
|
|
else if (m_settings.disk_cache_algorithm == session_settings::avoid_readback)
|
|
|
|
{
|
|
|
|
cache_lru_index_t& idx = m_pieces.get<1>();
|
|
|
|
for (cache_lru_index_t::iterator i = idx.begin(); i != idx.end(); ++i)
|
|
|
|
{
|
|
|
|
cached_piece_entry& p = const_cast<cached_piece_entry&>(*i);
|
|
|
|
if (!i->blocks[i->next_block_to_hash].buf) continue;
|
|
|
|
int piece_size = i->storage->info()->piece_size(i->piece);
|
|
|
|
int blocks_in_piece = (piece_size + m_block_size - 1) / m_block_size;
|
|
|
|
int start = i->next_block_to_hash;
|
|
|
|
int end = start + 1;
|
|
|
|
while (end < blocks_in_piece && i->blocks[end].buf) ++end;
|
|
|
|
tmp = flush_range(p, start, end, l);
|
|
|
|
p.num_contiguous_blocks = contiguous_blocks(p);
|
|
|
|
blocks -= tmp;
|
|
|
|
ret += tmp;
|
|
|
|
if (blocks <= 0) break;
|
|
|
|
}
|
|
|
|
|
|
|
|
// if we still need to flush blocks, flush the largest contiguous blocks
|
|
|
|
// regardless of if we'll have to read them back later
|
|
|
|
while (blocks > 0)
|
|
|
|
{
|
2011-03-21 07:31:48 +01:00
|
|
|
cache_lru_index_t::iterator i = std::max_element(idx.begin(), idx.end(), &cmp_contiguous);
|
2011-03-20 06:47:27 +01:00
|
|
|
if (i == idx.end()) return ret;
|
|
|
|
tmp = flush_contiguous_blocks(const_cast<cached_piece_entry&>(*i), l);
|
|
|
|
blocks -= tmp;
|
|
|
|
ret += tmp;
|
|
|
|
}
|
|
|
|
}
|
2009-05-24 02:12:53 +02:00
|
|
|
return ret;
|
2008-02-08 11:22:05 +01:00
|
|
|
}
|
|
|
|
|
2010-01-27 05:25:45 +01:00
|
|
|
int disk_io_thread::flush_range(cached_piece_entry& p
|
2009-10-20 04:49:56 +02:00
|
|
|
, int start, int end, mutex::scoped_lock& l)
|
2008-02-08 11:22:05 +01:00
|
|
|
{
|
2008-02-22 05:11:04 +01:00
|
|
|
INVARIANT_CHECK;
|
2009-05-27 18:50:46 +02:00
|
|
|
|
|
|
|
TORRENT_ASSERT(start < end);
|
|
|
|
|
2008-02-08 11:22:05 +01:00
|
|
|
int piece_size = p.storage->info()->piece_size(p.piece);
|
2008-02-10 01:58:25 +01:00
|
|
|
#ifdef TORRENT_DISK_STATS
|
|
|
|
m_log << log_time() << " flushing " << piece_size << std::endl;
|
|
|
|
#endif
|
2008-02-08 11:22:05 +01:00
|
|
|
TORRENT_ASSERT(piece_size > 0);
|
|
|
|
|
2008-02-22 05:11:04 +01:00
|
|
|
int blocks_in_piece = (piece_size + m_block_size - 1) / m_block_size;
|
2008-02-08 11:22:05 +01:00
|
|
|
int buffer_size = 0;
|
|
|
|
int offset = 0;
|
2009-01-03 09:11:31 +01:00
|
|
|
|
|
|
|
boost::scoped_array<char> buf;
|
2009-01-11 03:02:34 +01:00
|
|
|
file::iovec_t* iov = 0;
|
2009-01-03 09:11:31 +01:00
|
|
|
int iov_counter = 0;
|
2009-01-21 08:31:49 +01:00
|
|
|
if (m_settings.coalesce_writes) buf.reset(new (std::nothrow) char[piece_size]);
|
2009-01-11 03:02:34 +01:00
|
|
|
else iov = TORRENT_ALLOCA(file::iovec_t, blocks_in_piece);
|
2009-01-03 09:11:31 +01:00
|
|
|
|
2009-05-23 09:35:45 +02:00
|
|
|
end = (std::min)(end, blocks_in_piece);
|
2011-03-11 08:37:12 +01:00
|
|
|
int num_write_calls = 0;
|
|
|
|
ptime write_start = time_now_hires();
|
2009-05-23 09:35:45 +02:00
|
|
|
for (int i = start; i <= end; ++i)
|
2008-02-08 11:22:05 +01:00
|
|
|
{
|
2009-06-10 10:30:55 +02:00
|
|
|
if (i == end || p.blocks[i].buf == 0)
|
2008-02-08 11:22:05 +01:00
|
|
|
{
|
|
|
|
if (buffer_size == 0) continue;
|
|
|
|
|
2008-02-22 05:11:04 +01:00
|
|
|
TORRENT_ASSERT(buffer_size <= i * m_block_size);
|
2008-02-08 11:22:05 +01:00
|
|
|
l.unlock();
|
2009-01-03 09:11:31 +01:00
|
|
|
if (iov)
|
|
|
|
{
|
2011-03-13 22:07:46 +01:00
|
|
|
int ret = p.storage->write_impl(iov, p.piece, (std::min)(
|
2009-01-03 09:11:31 +01:00
|
|
|
i * m_block_size, piece_size) - buffer_size, iov_counter);
|
|
|
|
iov_counter = 0;
|
2011-03-13 22:07:46 +01:00
|
|
|
if (ret > 0) ++num_write_calls;
|
2009-01-03 09:11:31 +01:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
TORRENT_ASSERT(buf);
|
|
|
|
file::iovec_t b = { buf.get(), buffer_size };
|
2011-03-13 22:07:46 +01:00
|
|
|
int ret = p.storage->write_impl(&b, p.piece, (std::min)(
|
2009-01-03 09:11:31 +01:00
|
|
|
i * m_block_size, piece_size) - buffer_size, 1);
|
2011-03-13 22:07:46 +01:00
|
|
|
if (ret > 0) ++num_write_calls;
|
2009-01-03 09:11:31 +01:00
|
|
|
}
|
2008-02-08 11:22:05 +01:00
|
|
|
l.lock();
|
2008-02-22 05:11:04 +01:00
|
|
|
++m_cache_stats.writes;
|
2008-02-08 11:22:05 +01:00
|
|
|
// std::cerr << " flushing p: " << p.piece << " bytes: " << buffer_size << std::endl;
|
|
|
|
buffer_size = 0;
|
|
|
|
offset = 0;
|
|
|
|
continue;
|
|
|
|
}
|
2008-02-22 05:11:04 +01:00
|
|
|
int block_size = (std::min)(piece_size - i * m_block_size, m_block_size);
|
2008-02-08 11:22:05 +01:00
|
|
|
TORRENT_ASSERT(offset + block_size <= piece_size);
|
|
|
|
TORRENT_ASSERT(offset + block_size > 0);
|
2011-02-21 06:24:41 +01:00
|
|
|
if (iov)
|
2008-02-22 05:11:04 +01:00
|
|
|
{
|
2011-02-21 06:24:41 +01:00
|
|
|
TORRENT_ASSERT(!buf);
|
2009-06-10 10:30:55 +02:00
|
|
|
iov[iov_counter].iov_base = p.blocks[i].buf;
|
2009-01-03 09:11:31 +01:00
|
|
|
iov[iov_counter].iov_len = block_size;
|
|
|
|
++iov_counter;
|
2008-02-22 05:11:04 +01:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2011-02-21 06:24:41 +01:00
|
|
|
TORRENT_ASSERT(buf);
|
|
|
|
TORRENT_ASSERT(iov == 0);
|
2009-06-10 10:30:55 +02:00
|
|
|
std::memcpy(buf.get() + offset, p.blocks[i].buf, block_size);
|
2008-02-22 05:11:04 +01:00
|
|
|
offset += m_block_size;
|
|
|
|
}
|
2009-01-03 09:11:31 +01:00
|
|
|
buffer_size += block_size;
|
2008-02-22 05:11:04 +01:00
|
|
|
TORRENT_ASSERT(p.num_blocks > 0);
|
|
|
|
--p.num_blocks;
|
|
|
|
++m_cache_stats.blocks_written;
|
|
|
|
--m_cache_stats.cache_size;
|
2011-03-20 06:47:27 +01:00
|
|
|
if (i == p.next_block_to_hash) ++p.next_block_to_hash;
|
2008-02-08 11:22:05 +01:00
|
|
|
}
|
2009-01-03 09:11:31 +01:00
|
|
|
|
2011-03-11 08:37:12 +01:00
|
|
|
ptime done = time_now_hires();
|
|
|
|
|
2009-05-24 02:12:53 +02:00
|
|
|
int ret = 0;
|
2009-06-10 10:30:55 +02:00
|
|
|
disk_io_job j;
|
|
|
|
j.storage = p.storage;
|
|
|
|
j.action = disk_io_job::write;
|
|
|
|
j.buffer = 0;
|
|
|
|
j.piece = p.piece;
|
|
|
|
test_error(j);
|
2010-05-17 01:14:47 +02:00
|
|
|
std::vector<char*> buffers;
|
2009-05-23 09:35:45 +02:00
|
|
|
for (int i = start; i < end; ++i)
|
2009-01-03 09:11:31 +01:00
|
|
|
{
|
2009-06-10 10:30:55 +02:00
|
|
|
if (p.blocks[i].buf == 0) continue;
|
|
|
|
j.buffer_size = (std::min)(piece_size - i * m_block_size, m_block_size);
|
|
|
|
int result = j.error ? -1 : j.buffer_size;
|
|
|
|
j.offset = i * m_block_size;
|
2010-05-17 01:14:47 +02:00
|
|
|
buffers.push_back(p.blocks[i].buf);
|
2009-06-10 10:30:55 +02:00
|
|
|
post_callback(p.blocks[i].callback, j, result);
|
|
|
|
p.blocks[i].callback.clear();
|
|
|
|
p.blocks[i].buf = 0;
|
2009-05-24 02:12:53 +02:00
|
|
|
++ret;
|
2009-01-03 09:11:31 +01:00
|
|
|
}
|
2010-05-17 01:14:47 +02:00
|
|
|
if (!buffers.empty()) free_multiple_buffers(&buffers[0], buffers.size());
|
2009-01-03 09:11:31 +01:00
|
|
|
|
2011-03-11 08:37:12 +01:00
|
|
|
if (num_write_calls > 0)
|
|
|
|
{
|
|
|
|
m_write_time.add_sample(total_microseconds(done - write_start) / num_write_calls);
|
2011-03-19 23:23:58 +01:00
|
|
|
m_cache_stats.cumulative_write_time += total_milliseconds(done - write_start);
|
2011-03-16 08:21:58 +01:00
|
|
|
p.num_contiguous_blocks = contiguous_blocks(p);
|
2011-03-11 08:37:12 +01:00
|
|
|
}
|
|
|
|
|
2008-02-08 11:22:05 +01:00
|
|
|
TORRENT_ASSERT(buffer_size == 0);
|
2008-02-22 05:11:04 +01:00
|
|
|
// std::cerr << " flushing p: " << p.piece << " cached_blocks: " << m_cache_stats.cache_size << std::endl;
|
2008-11-29 22:33:21 +01:00
|
|
|
#ifdef TORRENT_DEBUG
|
2009-05-23 09:35:45 +02:00
|
|
|
for (int i = start; i < end; ++i)
|
2009-06-10 10:30:55 +02:00
|
|
|
TORRENT_ASSERT(p.blocks[i].buf == 0);
|
2008-02-08 11:22:05 +01:00
|
|
|
#endif
|
2009-05-24 02:12:53 +02:00
|
|
|
return ret;
|
2008-02-08 11:22:05 +01:00
|
|
|
}
|
|
|
|
|
2009-05-23 20:39:55 +02:00
|
|
|
// returns -1 on failure
|
2009-06-10 10:30:55 +02:00
|
|
|
int disk_io_thread::cache_block(disk_io_job& j
|
|
|
|
, boost::function<void(int,disk_io_job const&)>& handler
|
2010-01-31 20:14:00 +01:00
|
|
|
, int cache_expire
|
2009-10-20 04:49:56 +02:00
|
|
|
, mutex::scoped_lock& l)
|
2008-02-08 11:22:05 +01:00
|
|
|
{
|
2008-02-22 05:11:04 +01:00
|
|
|
INVARIANT_CHECK;
|
|
|
|
TORRENT_ASSERT(find_cached_piece(m_pieces, j, l) == m_pieces.end());
|
2009-05-31 21:33:54 +02:00
|
|
|
TORRENT_ASSERT((j.offset & (m_block_size-1)) == 0);
|
2010-01-31 20:14:00 +01:00
|
|
|
TORRENT_ASSERT(j.cache_min_time >= 0);
|
2008-02-08 11:22:05 +01:00
|
|
|
cached_piece_entry p;
|
|
|
|
|
2009-06-10 10:30:55 +02:00
|
|
|
int piece_size = j.storage->info()->piece_size(j.piece);
|
|
|
|
int blocks_in_piece = (piece_size + m_block_size - 1) / m_block_size;
|
|
|
|
// there's no point in caching the piece if
|
|
|
|
// there's only one block in it
|
|
|
|
if (blocks_in_piece <= 1) return -1;
|
|
|
|
|
2009-05-19 09:00:05 +02:00
|
|
|
#ifdef TORRENT_DISK_STATS
|
|
|
|
rename_buffer(j.buffer, "write cache");
|
|
|
|
#endif
|
|
|
|
|
2008-02-08 11:22:05 +01:00
|
|
|
p.piece = j.piece;
|
|
|
|
p.storage = j.storage;
|
2010-01-31 20:14:00 +01:00
|
|
|
p.expire = time_now() + seconds(j.cache_min_time);
|
2008-02-08 11:22:05 +01:00
|
|
|
p.num_blocks = 1;
|
2011-03-16 08:21:58 +01:00
|
|
|
p.num_contiguous_blocks = 1;
|
2011-03-20 06:47:27 +01:00
|
|
|
p.next_block_to_hash = 0;
|
2009-06-10 10:30:55 +02:00
|
|
|
p.blocks.reset(new (std::nothrow) cached_block_entry[blocks_in_piece]);
|
2009-05-23 20:39:55 +02:00
|
|
|
if (!p.blocks) return -1;
|
2008-02-22 05:11:04 +01:00
|
|
|
int block = j.offset / m_block_size;
|
|
|
|
// std::cerr << " adding cache entry for p: " << j.piece << " block: " << block << " cached_blocks: " << m_cache_stats.cache_size << std::endl;
|
2009-06-10 10:30:55 +02:00
|
|
|
p.blocks[block].buf = j.buffer;
|
|
|
|
p.blocks[block].callback.swap(handler);
|
2008-02-22 05:11:04 +01:00
|
|
|
++m_cache_stats.cache_size;
|
2010-01-27 05:25:45 +01:00
|
|
|
cache_lru_index_t& idx = m_pieces.get<1>();
|
|
|
|
TORRENT_ASSERT(p.storage);
|
|
|
|
idx.insert(p);
|
2009-05-23 20:39:55 +02:00
|
|
|
return 0;
|
2008-02-08 11:22:05 +01:00
|
|
|
}
|
|
|
|
|
2008-02-22 05:11:04 +01:00
|
|
|
// fills a piece with data from disk, returns the total number of bytes
|
|
|
|
// read or -1 if there was an error
|
2009-02-03 08:46:24 +01:00
|
|
|
int disk_io_thread::read_into_piece(cached_piece_entry& p, int start_block
|
2009-10-20 04:49:56 +02:00
|
|
|
, int options, int num_blocks, mutex::scoped_lock& l)
|
2008-02-22 05:11:04 +01:00
|
|
|
{
|
2009-11-28 04:14:08 +01:00
|
|
|
TORRENT_ASSERT(num_blocks > 0);
|
2008-02-22 05:11:04 +01:00
|
|
|
int piece_size = p.storage->info()->piece_size(p.piece);
|
|
|
|
int blocks_in_piece = (piece_size + m_block_size - 1) / m_block_size;
|
|
|
|
|
|
|
|
int end_block = start_block;
|
2009-05-23 21:27:27 +02:00
|
|
|
int num_read = 0;
|
2009-11-28 04:14:08 +01:00
|
|
|
|
|
|
|
int iov_counter = 0;
|
|
|
|
file::iovec_t* iov = TORRENT_ALLOCA(file::iovec_t, (std::min)(blocks_in_piece - start_block, num_blocks));
|
|
|
|
|
|
|
|
int piece_offset = start_block * m_block_size;
|
|
|
|
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
boost::scoped_array<char> buf;
|
2008-02-22 05:11:04 +01:00
|
|
|
for (int i = start_block; i < blocks_in_piece
|
2009-11-28 04:14:08 +01:00
|
|
|
&& ((options & ignore_cache_size)
|
|
|
|
|| in_use() < m_settings.cache_size); ++i)
|
2008-02-22 05:11:04 +01:00
|
|
|
{
|
2009-11-28 04:14:08 +01:00
|
|
|
int block_size = (std::min)(piece_size - piece_offset, m_block_size);
|
|
|
|
TORRENT_ASSERT(piece_offset <= piece_size);
|
|
|
|
|
2008-02-22 05:11:04 +01:00
|
|
|
// this is a block that is already allocated
|
2010-01-15 17:45:42 +01:00
|
|
|
// free it and allocate a new one
|
2009-11-28 04:14:08 +01:00
|
|
|
if (p.blocks[i].buf)
|
|
|
|
{
|
|
|
|
free_buffer(p.blocks[i].buf);
|
|
|
|
--p.num_blocks;
|
|
|
|
--m_cache_stats.cache_size;
|
|
|
|
--m_cache_stats.read_cache_size;
|
|
|
|
}
|
2009-06-10 10:30:55 +02:00
|
|
|
p.blocks[i].buf = allocate_buffer("read cache");
|
2008-02-22 05:11:04 +01:00
|
|
|
|
|
|
|
// the allocation failed, break
|
2009-11-28 04:14:08 +01:00
|
|
|
if (p.blocks[i].buf == 0)
|
|
|
|
{
|
|
|
|
free_piece(p, l);
|
|
|
|
return -1;
|
|
|
|
}
|
2008-02-22 05:11:04 +01:00
|
|
|
++p.num_blocks;
|
|
|
|
++m_cache_stats.cache_size;
|
|
|
|
++m_cache_stats.read_cache_size;
|
|
|
|
++end_block;
|
2009-05-23 21:27:27 +02:00
|
|
|
++num_read;
|
2009-11-28 04:14:08 +01:00
|
|
|
iov[iov_counter].iov_base = p.blocks[i].buf;
|
|
|
|
iov[iov_counter].iov_len = block_size;
|
|
|
|
++iov_counter;
|
|
|
|
piece_offset += m_block_size;
|
2009-05-24 18:49:01 +02:00
|
|
|
if (num_read >= num_blocks) break;
|
2008-02-22 05:11:04 +01:00
|
|
|
}
|
|
|
|
|
2009-11-28 04:14:08 +01:00
|
|
|
if (end_block == start_block)
|
|
|
|
{
|
|
|
|
// something failed. Free all buffers
|
|
|
|
// we just allocated
|
|
|
|
free_piece(p, l);
|
|
|
|
return -2;
|
|
|
|
}
|
|
|
|
|
|
|
|
TORRENT_ASSERT(iov_counter <= (std::min)(blocks_in_piece - start_block, num_blocks));
|
2008-02-22 05:11:04 +01:00
|
|
|
|
2008-12-03 07:42:53 +01:00
|
|
|
// the buffer_size is the size of the buffer we need to read
|
|
|
|
// all these blocks.
|
|
|
|
const int buffer_size = (std::min)((end_block - start_block) * m_block_size
|
|
|
|
, piece_size - start_block * m_block_size);
|
2009-11-28 04:14:08 +01:00
|
|
|
TORRENT_ASSERT(buffer_size > 0);
|
2008-02-22 05:11:04 +01:00
|
|
|
TORRENT_ASSERT(buffer_size <= piece_size);
|
|
|
|
TORRENT_ASSERT(buffer_size + start_block * m_block_size <= piece_size);
|
2009-01-03 09:11:31 +01:00
|
|
|
|
2009-11-28 04:14:08 +01:00
|
|
|
if (m_settings.coalesce_reads)
|
|
|
|
buf.reset(new (std::nothrow) char[buffer_size]);
|
|
|
|
|
2008-02-22 05:11:04 +01:00
|
|
|
if (buf)
|
|
|
|
{
|
|
|
|
l.unlock();
|
2009-01-03 09:11:31 +01:00
|
|
|
file::iovec_t b = { buf.get(), buffer_size };
|
2009-01-14 04:05:35 +01:00
|
|
|
ret = p.storage->read_impl(&b, p.piece, start_block * m_block_size, 1);
|
2008-02-22 05:11:04 +01:00
|
|
|
l.lock();
|
2009-11-28 04:14:08 +01:00
|
|
|
++m_cache_stats.reads;
|
|
|
|
if (p.storage->error())
|
|
|
|
{
|
|
|
|
free_piece(p, l);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2009-05-21 18:15:05 +02:00
|
|
|
if (ret != buffer_size)
|
|
|
|
{
|
|
|
|
// this means the file wasn't big enough for this read
|
|
|
|
p.storage->get_storage_impl()->set_error(""
|
2009-11-29 08:06:38 +01:00
|
|
|
, errors::file_too_short);
|
2009-11-28 04:14:08 +01:00
|
|
|
free_piece(p, l);
|
2009-05-21 18:15:05 +02:00
|
|
|
return -1;
|
|
|
|
}
|
2008-02-22 05:11:04 +01:00
|
|
|
|
2009-11-28 04:14:08 +01:00
|
|
|
int offset = 0;
|
|
|
|
for (int i = 0; i < iov_counter; ++i)
|
2008-02-22 05:11:04 +01:00
|
|
|
{
|
2009-11-28 04:14:08 +01:00
|
|
|
TORRENT_ASSERT(iov[i].iov_base);
|
|
|
|
TORRENT_ASSERT(iov[i].iov_len > 0);
|
2011-02-21 06:24:41 +01:00
|
|
|
TORRENT_ASSERT(int(offset + iov[i].iov_len) <= buffer_size);
|
2009-11-28 04:14:08 +01:00
|
|
|
std::memcpy(iov[i].iov_base, buf.get() + offset, iov[i].iov_len);
|
|
|
|
offset += iov[i].iov_len;
|
2008-02-22 05:11:04 +01:00
|
|
|
}
|
|
|
|
}
|
2009-11-28 04:14:08 +01:00
|
|
|
else
|
2009-01-03 09:11:31 +01:00
|
|
|
{
|
|
|
|
l.unlock();
|
2009-05-12 19:56:12 +02:00
|
|
|
ret = p.storage->read_impl(iov, p.piece, start_block * m_block_size, iov_counter);
|
2009-01-03 09:11:31 +01:00
|
|
|
l.lock();
|
2009-11-28 04:14:08 +01:00
|
|
|
++m_cache_stats.reads;
|
|
|
|
if (p.storage->error())
|
|
|
|
{
|
|
|
|
free_piece(p, l);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2009-05-21 18:15:05 +02:00
|
|
|
if (ret != buffer_size)
|
|
|
|
{
|
|
|
|
// this means the file wasn't big enough for this read
|
|
|
|
p.storage->get_storage_impl()->set_error(""
|
2009-11-29 08:06:38 +01:00
|
|
|
, errors::file_too_short);
|
2009-11-28 04:14:08 +01:00
|
|
|
free_piece(p, l);
|
2009-05-21 18:15:05 +02:00
|
|
|
return -1;
|
|
|
|
}
|
2009-01-03 09:11:31 +01:00
|
|
|
}
|
|
|
|
|
2009-11-28 04:14:08 +01:00
|
|
|
TORRENT_ASSERT(ret == buffer_size);
|
|
|
|
return ret;
|
2008-02-22 05:11:04 +01:00
|
|
|
}
|
2009-02-03 08:46:24 +01:00
|
|
|
|
2008-02-22 05:11:04 +01:00
|
|
|
// returns -1 on read error, -2 if there isn't any space in the cache
|
|
|
|
// or the number of bytes read
|
2009-10-20 04:49:56 +02:00
|
|
|
int disk_io_thread::cache_read_block(disk_io_job const& j, mutex::scoped_lock& l)
|
2008-02-22 05:11:04 +01:00
|
|
|
{
|
|
|
|
INVARIANT_CHECK;
|
|
|
|
|
2010-01-31 20:14:00 +01:00
|
|
|
TORRENT_ASSERT(j.cache_min_time >= 0);
|
|
|
|
|
2010-01-15 17:45:42 +01:00
|
|
|
// this function will create a new cached_piece_entry
|
|
|
|
// and requires that it doesn't already exist
|
2010-01-27 05:25:45 +01:00
|
|
|
cache_piece_index_t& idx = m_read_pieces.get<0>();
|
|
|
|
TORRENT_ASSERT(find_cached_piece(m_read_pieces, j, l) == idx.end());
|
2010-01-15 17:45:42 +01:00
|
|
|
|
2008-02-22 05:11:04 +01:00
|
|
|
int piece_size = j.storage->info()->piece_size(j.piece);
|
|
|
|
int blocks_in_piece = (piece_size + m_block_size - 1) / m_block_size;
|
|
|
|
|
|
|
|
int start_block = j.offset / m_block_size;
|
|
|
|
|
2009-05-24 18:49:01 +02:00
|
|
|
int blocks_to_read = blocks_in_piece - start_block;
|
|
|
|
blocks_to_read = (std::min)(blocks_to_read, (std::max)((m_settings.cache_size
|
|
|
|
+ m_cache_stats.read_cache_size - in_use())/2, 3));
|
|
|
|
blocks_to_read = (std::min)(blocks_to_read, m_settings.read_cache_line_size);
|
2010-01-31 20:14:00 +01:00
|
|
|
if (j.max_cache_line > 0) blocks_to_read = (std::min)(blocks_to_read, j.max_cache_line);
|
2009-05-24 02:12:53 +02:00
|
|
|
|
|
|
|
if (in_use() + blocks_to_read > m_settings.cache_size)
|
2010-01-15 17:45:42 +01:00
|
|
|
{
|
|
|
|
int clear = in_use() + blocks_to_read - m_settings.cache_size;
|
2010-01-27 05:25:45 +01:00
|
|
|
if (flush_cache_blocks(l, clear, j.piece, dont_flush_write_blocks) < clear)
|
2009-05-24 02:12:53 +02:00
|
|
|
return -2;
|
2010-01-15 17:45:42 +01:00
|
|
|
}
|
2008-02-22 05:11:04 +01:00
|
|
|
|
|
|
|
cached_piece_entry p;
|
|
|
|
p.piece = j.piece;
|
|
|
|
p.storage = j.storage;
|
2010-01-31 20:14:00 +01:00
|
|
|
p.expire = time_now() + seconds(j.cache_min_time);
|
2008-02-22 05:11:04 +01:00
|
|
|
p.num_blocks = 0;
|
2011-03-16 08:21:58 +01:00
|
|
|
p.num_contiguous_blocks = 0;
|
2011-03-20 06:47:27 +01:00
|
|
|
p.next_block_to_hash = 0;
|
2009-06-10 10:30:55 +02:00
|
|
|
p.blocks.reset(new (std::nothrow) cached_block_entry[blocks_in_piece]);
|
2009-05-23 20:39:55 +02:00
|
|
|
if (!p.blocks) return -1;
|
2010-01-27 05:25:45 +01:00
|
|
|
|
2009-05-24 18:49:01 +02:00
|
|
|
int ret = read_into_piece(p, start_block, 0, blocks_to_read, l);
|
2010-01-15 17:45:42 +01:00
|
|
|
|
2010-01-27 05:25:45 +01:00
|
|
|
TORRENT_ASSERT(p.storage);
|
|
|
|
if (ret >= 0) idx.insert(p);
|
2008-02-22 05:11:04 +01:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2008-11-29 22:33:21 +01:00
|
|
|
#ifdef TORRENT_DEBUG
|
2008-02-22 05:11:04 +01:00
|
|
|
void disk_io_thread::check_invariant() const
|
|
|
|
{
|
|
|
|
int cached_write_blocks = 0;
|
2010-01-27 05:25:45 +01:00
|
|
|
cache_piece_index_t const& idx = m_pieces.get<0>();
|
|
|
|
for (cache_piece_index_t::const_iterator i = idx.begin()
|
|
|
|
, end(idx.end()); i != end; ++i)
|
2008-02-22 05:11:04 +01:00
|
|
|
{
|
|
|
|
cached_piece_entry const& p = *i;
|
|
|
|
TORRENT_ASSERT(p.blocks);
|
2011-03-16 08:21:58 +01:00
|
|
|
TORRENT_ASSERT(p.num_contiguous_blocks == contiguous_blocks(p));
|
2008-02-22 05:11:04 +01:00
|
|
|
|
2010-01-27 05:25:45 +01:00
|
|
|
TORRENT_ASSERT(p.storage);
|
2008-02-22 05:11:04 +01:00
|
|
|
int piece_size = p.storage->info()->piece_size(p.piece);
|
|
|
|
int blocks_in_piece = (piece_size + m_block_size - 1) / m_block_size;
|
|
|
|
int blocks = 0;
|
|
|
|
for (int k = 0; k < blocks_in_piece; ++k)
|
|
|
|
{
|
2009-06-10 10:30:55 +02:00
|
|
|
if (p.blocks[k].buf)
|
2008-02-22 05:11:04 +01:00
|
|
|
{
|
2008-04-09 07:19:11 +02:00
|
|
|
#ifndef TORRENT_DISABLE_POOL_ALLOCATOR
|
2009-06-10 10:30:55 +02:00
|
|
|
TORRENT_ASSERT(is_disk_buffer(p.blocks[k].buf));
|
2008-04-09 07:19:11 +02:00
|
|
|
#endif
|
2008-02-22 05:11:04 +01:00
|
|
|
++blocks;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// TORRENT_ASSERT(blocks == p.num_blocks);
|
|
|
|
cached_write_blocks += blocks;
|
|
|
|
}
|
|
|
|
|
|
|
|
int cached_read_blocks = 0;
|
2008-02-25 00:14:10 +01:00
|
|
|
for (cache_t::const_iterator i = m_read_pieces.begin()
|
2008-02-22 05:11:04 +01:00
|
|
|
, end(m_read_pieces.end()); i != end; ++i)
|
|
|
|
{
|
|
|
|
cached_piece_entry const& p = *i;
|
|
|
|
TORRENT_ASSERT(p.blocks);
|
|
|
|
|
|
|
|
int piece_size = p.storage->info()->piece_size(p.piece);
|
|
|
|
int blocks_in_piece = (piece_size + m_block_size - 1) / m_block_size;
|
|
|
|
int blocks = 0;
|
|
|
|
for (int k = 0; k < blocks_in_piece; ++k)
|
|
|
|
{
|
2009-06-10 10:30:55 +02:00
|
|
|
if (p.blocks[k].buf)
|
2008-02-22 05:11:04 +01:00
|
|
|
{
|
2008-04-09 07:19:11 +02:00
|
|
|
#ifndef TORRENT_DISABLE_POOL_ALLOCATOR
|
2009-06-10 10:30:55 +02:00
|
|
|
TORRENT_ASSERT(is_disk_buffer(p.blocks[k].buf));
|
2008-04-09 07:19:11 +02:00
|
|
|
#endif
|
2008-02-22 05:11:04 +01:00
|
|
|
++blocks;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// TORRENT_ASSERT(blocks == p.num_blocks);
|
|
|
|
cached_read_blocks += blocks;
|
|
|
|
}
|
|
|
|
|
|
|
|
TORRENT_ASSERT(cached_read_blocks == m_cache_stats.read_cache_size);
|
2010-01-27 05:25:45 +01:00
|
|
|
TORRENT_ASSERT(cached_read_blocks + cached_write_blocks == m_cache_stats.cache_size);
|
2008-02-22 05:11:04 +01:00
|
|
|
|
2009-05-19 09:00:05 +02:00
|
|
|
#ifdef TORRENT_DISK_STATS
|
|
|
|
int read_allocs = m_categories.find(std::string("read cache"))->second;
|
|
|
|
int write_allocs = m_categories.find(std::string("write cache"))->second;
|
|
|
|
TORRENT_ASSERT(cached_read_blocks == read_allocs);
|
|
|
|
TORRENT_ASSERT(cached_write_blocks == write_allocs);
|
|
|
|
#endif
|
|
|
|
|
2008-02-22 05:11:04 +01:00
|
|
|
// when writing, there may be a one block difference, right before an old piece
|
|
|
|
// is flushed
|
2009-01-21 08:31:49 +01:00
|
|
|
TORRENT_ASSERT(m_cache_stats.cache_size <= m_settings.cache_size + 1);
|
2008-02-22 05:11:04 +01:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2010-01-15 17:45:42 +01:00
|
|
|
// reads the full piece specified by j into the read cache
|
|
|
|
// returns the iterator to it and whether or not it already
|
|
|
|
// was in the cache (hit).
|
2010-01-27 05:25:45 +01:00
|
|
|
int disk_io_thread::cache_piece(disk_io_job const& j, cache_piece_index_t::iterator& p
|
2010-01-15 17:45:42 +01:00
|
|
|
, bool& hit, int options, mutex::scoped_lock& l)
|
2008-02-22 05:11:04 +01:00
|
|
|
{
|
2010-01-15 17:45:42 +01:00
|
|
|
INVARIANT_CHECK;
|
2008-02-22 05:11:04 +01:00
|
|
|
|
2010-01-31 20:14:00 +01:00
|
|
|
TORRENT_ASSERT(j.cache_min_time >= 0);
|
|
|
|
|
2010-01-27 05:25:45 +01:00
|
|
|
cache_piece_index_t& idx = m_read_pieces.get<0>();
|
2010-01-15 17:45:42 +01:00
|
|
|
p = find_cached_piece(m_read_pieces, j, l);
|
2008-02-22 05:11:04 +01:00
|
|
|
|
2010-01-15 17:45:42 +01:00
|
|
|
hit = true;
|
2008-02-22 05:11:04 +01:00
|
|
|
int ret = 0;
|
|
|
|
|
2009-11-28 04:14:08 +01:00
|
|
|
int piece_size = j.storage->info()->piece_size(j.piece);
|
|
|
|
int blocks_in_piece = (piece_size + m_block_size - 1) / m_block_size;
|
|
|
|
|
|
|
|
if (p != m_read_pieces.end() && p->num_blocks != blocks_in_piece)
|
|
|
|
{
|
2010-01-15 17:45:42 +01:00
|
|
|
INVARIANT_CHECK;
|
2009-11-28 04:14:08 +01:00
|
|
|
// we have the piece in the cache, but not all of the blocks
|
2010-01-27 05:25:45 +01:00
|
|
|
ret = read_into_piece(const_cast<cached_piece_entry&>(*p), 0
|
|
|
|
, options, blocks_in_piece, l);
|
2009-11-28 04:14:08 +01:00
|
|
|
hit = false;
|
|
|
|
if (ret < 0) return ret;
|
2010-01-31 20:14:00 +01:00
|
|
|
idx.modify(p, update_last_use(j.cache_min_time));
|
2009-11-28 04:14:08 +01:00
|
|
|
}
|
2010-01-15 17:45:42 +01:00
|
|
|
else if (p == m_read_pieces.end())
|
2008-02-22 05:11:04 +01:00
|
|
|
{
|
2010-01-15 17:45:42 +01:00
|
|
|
INVARIANT_CHECK;
|
|
|
|
// if the piece cannot be found in the cache,
|
|
|
|
// read the whole piece starting at the block
|
|
|
|
// we got a request for.
|
|
|
|
|
|
|
|
cached_piece_entry pe;
|
|
|
|
pe.piece = j.piece;
|
|
|
|
pe.storage = j.storage;
|
2010-01-31 20:14:00 +01:00
|
|
|
pe.expire = time_now() + seconds(j.cache_min_time);
|
2010-01-15 17:45:42 +01:00
|
|
|
pe.num_blocks = 0;
|
2011-03-16 08:21:58 +01:00
|
|
|
pe.num_contiguous_blocks = 0;
|
2011-03-20 06:47:27 +01:00
|
|
|
pe.next_block_to_hash = 0;
|
2010-01-15 17:45:42 +01:00
|
|
|
pe.blocks.reset(new (std::nothrow) cached_block_entry[blocks_in_piece]);
|
|
|
|
if (!pe.blocks) return -1;
|
|
|
|
ret = read_into_piece(pe, 0, options, INT_MAX, l);
|
|
|
|
|
2008-02-22 05:11:04 +01:00
|
|
|
hit = false;
|
2010-01-27 05:25:45 +01:00
|
|
|
if (ret < 0) return ret;
|
|
|
|
TORRENT_ASSERT(pe.storage);
|
|
|
|
p = idx.insert(pe).first;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2010-01-31 20:14:00 +01:00
|
|
|
idx.modify(p, update_last_use(j.cache_min_time));
|
2008-02-22 05:11:04 +01:00
|
|
|
}
|
2010-01-15 17:45:42 +01:00
|
|
|
TORRENT_ASSERT(!m_read_pieces.empty());
|
|
|
|
TORRENT_ASSERT(p->piece == j.piece);
|
|
|
|
TORRENT_ASSERT(p->storage == j.storage);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
// cache the entire piece and hash it
|
|
|
|
int disk_io_thread::read_piece_from_cache_and_hash(disk_io_job const& j, sha1_hash& h)
|
|
|
|
{
|
|
|
|
TORRENT_ASSERT(j.buffer);
|
|
|
|
|
2010-01-31 20:14:00 +01:00
|
|
|
TORRENT_ASSERT(j.cache_min_time >= 0);
|
|
|
|
|
2010-01-15 17:45:42 +01:00
|
|
|
mutex::scoped_lock l(m_piece_mutex);
|
|
|
|
|
2010-01-27 05:25:45 +01:00
|
|
|
cache_piece_index_t::iterator p;
|
2010-01-15 17:45:42 +01:00
|
|
|
bool hit;
|
|
|
|
int ret = cache_piece(j, p, hit, ignore_cache_size, l);
|
|
|
|
if (ret < 0) return ret;
|
|
|
|
|
|
|
|
int piece_size = j.storage->info()->piece_size(j.piece);
|
|
|
|
int blocks_in_piece = (piece_size + m_block_size - 1) / m_block_size;
|
2008-02-22 05:11:04 +01:00
|
|
|
|
2010-11-07 22:41:03 +01:00
|
|
|
if (!m_settings.disable_hash_checks)
|
2009-02-03 08:46:24 +01:00
|
|
|
{
|
2010-11-07 22:41:03 +01:00
|
|
|
hasher ctx;
|
|
|
|
|
|
|
|
for (int i = 0; i < blocks_in_piece; ++i)
|
|
|
|
{
|
|
|
|
TORRENT_ASSERT(p->blocks[i].buf);
|
|
|
|
ctx.update((char const*)p->blocks[i].buf, (std::min)(piece_size, m_block_size));
|
|
|
|
piece_size -= m_block_size;
|
|
|
|
}
|
|
|
|
h = ctx.final();
|
2009-02-03 08:46:24 +01:00
|
|
|
}
|
|
|
|
|
2010-01-27 05:25:45 +01:00
|
|
|
ret = copy_from_piece(const_cast<cached_piece_entry&>(*p), hit, j, l);
|
2009-02-03 08:46:24 +01:00
|
|
|
TORRENT_ASSERT(ret > 0);
|
|
|
|
if (ret < 0) return ret;
|
2010-01-27 05:25:45 +01:00
|
|
|
cache_piece_index_t& idx = m_read_pieces.get<0>();
|
2010-01-31 17:29:52 +01:00
|
|
|
if (p->num_blocks == 0) idx.erase(p);
|
2010-01-31 20:14:00 +01:00
|
|
|
else idx.modify(p, update_last_use(j.cache_min_time));
|
2009-02-03 08:46:24 +01:00
|
|
|
|
|
|
|
// if read cache is disabled or we exceeded the
|
|
|
|
// limit, remove this piece from the cache
|
2010-01-15 17:45:42 +01:00
|
|
|
// also, if the piece wasn't in the cache when
|
|
|
|
// the function was called, and we're using an
|
|
|
|
// explicit read cache, remove it again
|
2009-05-01 10:00:58 +02:00
|
|
|
if (in_use() >= m_settings.cache_size
|
2010-01-15 17:45:42 +01:00
|
|
|
|| !m_settings.use_read_cache
|
|
|
|
|| (m_settings.explicit_read_cache && !hit))
|
2008-02-22 05:11:04 +01:00
|
|
|
{
|
2009-02-03 08:46:24 +01:00
|
|
|
TORRENT_ASSERT(!m_read_pieces.empty());
|
|
|
|
TORRENT_ASSERT(p->piece == j.piece);
|
|
|
|
TORRENT_ASSERT(p->storage == j.storage);
|
|
|
|
if (p != m_read_pieces.end())
|
2008-02-22 05:11:04 +01:00
|
|
|
{
|
2010-01-27 05:25:45 +01:00
|
|
|
free_piece(const_cast<cached_piece_entry&>(*p), l);
|
2009-02-03 08:46:24 +01:00
|
|
|
m_read_pieces.erase(p);
|
2008-02-22 05:11:04 +01:00
|
|
|
}
|
2009-02-03 08:46:24 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
ret = j.buffer_size;
|
|
|
|
++m_cache_stats.blocks_read;
|
|
|
|
if (hit) ++m_cache_stats.blocks_read_hit;
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2009-12-16 11:49:15 +01:00
|
|
|
// this doesn't modify the read cache, it only
|
|
|
|
// checks to see if the given read request can
|
|
|
|
// be fully satisfied from the given cached piece
|
|
|
|
// this is similar to copy_from_piece() but it
|
|
|
|
// doesn't do anything but determining if it's a
|
|
|
|
// cache hit or not
|
2010-01-27 05:25:45 +01:00
|
|
|
bool disk_io_thread::is_cache_hit(cached_piece_entry& p
|
2009-12-16 11:49:15 +01:00
|
|
|
, disk_io_job const& j, mutex::scoped_lock& l)
|
|
|
|
{
|
|
|
|
int block = j.offset / m_block_size;
|
|
|
|
int block_offset = j.offset & (m_block_size-1);
|
|
|
|
int size = j.buffer_size;
|
2010-12-24 23:53:00 +01:00
|
|
|
int min_blocks_to_read = block_offset > 0 && (size > m_block_size - block_offset) ? 2 : 1;
|
2009-12-16 11:49:15 +01:00
|
|
|
TORRENT_ASSERT(size <= m_block_size);
|
|
|
|
int start_block = block;
|
|
|
|
// if we have to read more than one block, and
|
|
|
|
// the first block is there, make sure we test
|
|
|
|
// for the second block
|
2010-01-27 05:25:45 +01:00
|
|
|
if (p.blocks[start_block].buf != 0 && min_blocks_to_read > 1)
|
2009-12-16 11:49:15 +01:00
|
|
|
++start_block;
|
|
|
|
|
2010-12-24 23:53:00 +01:00
|
|
|
#ifdef TORRENT_DEBUG
|
|
|
|
int piece_size = j.storage->info()->piece_size(j.piece);
|
|
|
|
int blocks_in_piece = (piece_size + m_block_size - 1) / m_block_size;
|
|
|
|
TORRENT_ASSERT(start_block < blocks_in_piece);
|
|
|
|
#endif
|
|
|
|
|
2010-01-27 05:25:45 +01:00
|
|
|
return p.blocks[start_block].buf != 0;
|
2009-12-16 11:49:15 +01:00
|
|
|
}
|
|
|
|
|
2010-01-27 05:25:45 +01:00
|
|
|
int disk_io_thread::copy_from_piece(cached_piece_entry& p, bool& hit
|
2009-10-20 04:49:56 +02:00
|
|
|
, disk_io_job const& j, mutex::scoped_lock& l)
|
2009-02-03 08:46:24 +01:00
|
|
|
{
|
|
|
|
TORRENT_ASSERT(j.buffer);
|
|
|
|
|
|
|
|
// copy from the cache and update the last use timestamp
|
|
|
|
int block = j.offset / m_block_size;
|
|
|
|
int block_offset = j.offset & (m_block_size-1);
|
|
|
|
int buffer_offset = 0;
|
|
|
|
int size = j.buffer_size;
|
2010-12-24 23:53:00 +01:00
|
|
|
int min_blocks_to_read = block_offset > 0 && (size > m_block_size - block_offset) ? 2 : 1;
|
2009-05-31 21:33:54 +02:00
|
|
|
TORRENT_ASSERT(size <= m_block_size);
|
|
|
|
int start_block = block;
|
2010-01-27 05:25:45 +01:00
|
|
|
if (p.blocks[start_block].buf != 0 && min_blocks_to_read > 1)
|
2009-05-31 21:33:54 +02:00
|
|
|
++start_block;
|
2010-12-24 23:53:00 +01:00
|
|
|
|
|
|
|
int piece_size = j.storage->info()->piece_size(j.piece);
|
|
|
|
int blocks_in_piece = (piece_size + m_block_size - 1) / m_block_size;
|
|
|
|
TORRENT_ASSERT(start_block < blocks_in_piece);
|
|
|
|
|
2009-05-31 21:33:54 +02:00
|
|
|
// if block_offset > 0, we need to read two blocks, and then
|
|
|
|
// copy parts of both, because it's not aligned to the block
|
|
|
|
// boundaries
|
2010-01-27 05:25:45 +01:00
|
|
|
if (p.blocks[start_block].buf == 0)
|
2009-02-03 08:46:24 +01:00
|
|
|
{
|
2010-01-15 17:45:42 +01:00
|
|
|
// if we use an explicit read cache, pretend there's no
|
|
|
|
// space to force hitting disk without caching anything
|
|
|
|
if (m_settings.explicit_read_cache) return -2;
|
|
|
|
|
2009-05-31 21:33:54 +02:00
|
|
|
int end_block = start_block;
|
2010-01-27 05:25:45 +01:00
|
|
|
while (end_block < blocks_in_piece && p.blocks[end_block].buf == 0) ++end_block;
|
2009-05-24 02:12:53 +02:00
|
|
|
|
|
|
|
int blocks_to_read = end_block - block;
|
2009-05-24 18:49:01 +02:00
|
|
|
blocks_to_read = (std::min)(blocks_to_read, (std::max)((m_settings.cache_size
|
|
|
|
+ m_cache_stats.read_cache_size - in_use())/2, 3));
|
|
|
|
blocks_to_read = (std::min)(blocks_to_read, m_settings.read_cache_line_size);
|
2009-05-31 21:33:54 +02:00
|
|
|
blocks_to_read = (std::max)(blocks_to_read, min_blocks_to_read);
|
2010-01-31 20:14:00 +01:00
|
|
|
if (j.max_cache_line > 0) blocks_to_read = (std::min)(blocks_to_read, j.max_cache_line);
|
2010-01-15 17:45:42 +01:00
|
|
|
|
|
|
|
// if we don't have enough space for the new piece, try flushing something else
|
2009-05-24 02:12:53 +02:00
|
|
|
if (in_use() + blocks_to_read > m_settings.cache_size)
|
2010-01-15 17:45:42 +01:00
|
|
|
{
|
|
|
|
int clear = in_use() + blocks_to_read - m_settings.cache_size;
|
2010-01-27 05:25:45 +01:00
|
|
|
if (flush_cache_blocks(l, clear, p.piece, dont_flush_write_blocks) < clear)
|
2009-05-24 02:12:53 +02:00
|
|
|
return -2;
|
2010-01-15 17:45:42 +01:00
|
|
|
}
|
2009-05-24 02:12:53 +02:00
|
|
|
|
2010-01-27 05:25:45 +01:00
|
|
|
int ret = read_into_piece(p, block, 0, blocks_to_read, l);
|
2009-02-03 08:46:24 +01:00
|
|
|
hit = false;
|
2009-05-31 21:33:54 +02:00
|
|
|
if (ret < 0) return ret;
|
|
|
|
if (ret < size + block_offset) return -2;
|
2010-01-27 05:25:45 +01:00
|
|
|
TORRENT_ASSERT(p.blocks[block].buf);
|
2009-02-03 08:46:24 +01:00
|
|
|
}
|
|
|
|
|
2010-05-17 01:14:47 +02:00
|
|
|
// build a vector of all the buffers we need to free
|
|
|
|
// and free them all in one go
|
|
|
|
std::vector<char*> buffers;
|
2009-02-03 08:46:24 +01:00
|
|
|
while (size > 0)
|
|
|
|
{
|
2010-01-27 05:25:45 +01:00
|
|
|
TORRENT_ASSERT(p.blocks[block].buf);
|
2009-02-03 08:46:24 +01:00
|
|
|
int to_copy = (std::min)(m_block_size
|
2008-02-22 05:11:04 +01:00
|
|
|
- block_offset, size);
|
2009-02-03 08:46:24 +01:00
|
|
|
std::memcpy(j.buffer + buffer_offset
|
2010-01-30 04:50:17 +01:00
|
|
|
, p.blocks[block].buf + block_offset
|
|
|
|
, to_copy);
|
2009-02-03 08:46:24 +01:00
|
|
|
size -= to_copy;
|
|
|
|
block_offset = 0;
|
|
|
|
buffer_offset += to_copy;
|
2010-01-30 04:50:17 +01:00
|
|
|
if (m_settings.volatile_read_cache)
|
|
|
|
{
|
2010-02-06 18:56:58 +01:00
|
|
|
// if volatile read cache is set, the assumption is
|
|
|
|
// that no other peer is likely to request the same
|
|
|
|
// piece. Therefore, for each request out of the cache
|
|
|
|
// we clear the block that was requested and any blocks
|
|
|
|
// the peer skipped
|
|
|
|
for (int i = block; i >= 0 && p.blocks[i].buf; --i)
|
|
|
|
{
|
2010-05-17 01:14:47 +02:00
|
|
|
buffers.push_back(p.blocks[i].buf);
|
2010-02-06 18:56:58 +01:00
|
|
|
p.blocks[i].buf = 0;
|
|
|
|
--p.num_blocks;
|
|
|
|
--m_cache_stats.cache_size;
|
|
|
|
--m_cache_stats.read_cache_size;
|
|
|
|
}
|
2010-01-30 04:50:17 +01:00
|
|
|
}
|
2009-02-03 08:46:24 +01:00
|
|
|
++block;
|
2008-02-22 05:11:04 +01:00
|
|
|
}
|
2010-05-17 01:14:47 +02:00
|
|
|
if (!buffers.empty()) free_multiple_buffers(&buffers[0], buffers.size());
|
2009-02-03 08:46:24 +01:00
|
|
|
return j.buffer_size;
|
|
|
|
}
|
|
|
|
|
2010-01-31 22:13:52 +01:00
|
|
|
int disk_io_thread::try_read_from_cache(disk_io_job const& j, bool& hit)
|
2009-02-03 08:46:24 +01:00
|
|
|
{
|
|
|
|
TORRENT_ASSERT(j.buffer);
|
2010-01-31 20:14:00 +01:00
|
|
|
TORRENT_ASSERT(j.cache_min_time >= 0);
|
2009-02-03 08:46:24 +01:00
|
|
|
|
2009-10-20 04:49:56 +02:00
|
|
|
mutex::scoped_lock l(m_piece_mutex);
|
2009-02-03 08:46:24 +01:00
|
|
|
if (!m_settings.use_read_cache) return -2;
|
|
|
|
|
2010-01-27 05:25:45 +01:00
|
|
|
cache_piece_index_t& idx = m_read_pieces.get<0>();
|
|
|
|
cache_piece_index_t::iterator p
|
2009-02-03 08:46:24 +01:00
|
|
|
= find_cached_piece(m_read_pieces, j, l);
|
|
|
|
|
2010-01-31 22:13:52 +01:00
|
|
|
hit = true;
|
2009-02-03 08:46:24 +01:00
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
// if the piece cannot be found in the cache,
|
|
|
|
// read the whole piece starting at the block
|
|
|
|
// we got a request for.
|
2010-01-27 05:25:45 +01:00
|
|
|
if (p == idx.end())
|
2009-02-03 08:46:24 +01:00
|
|
|
{
|
2010-01-15 17:45:42 +01:00
|
|
|
// if we use an explicit read cache and we
|
|
|
|
// couldn't find the block in the cache,
|
|
|
|
// pretend that there's not enough space
|
|
|
|
// to cache it, to force the read operation
|
|
|
|
// go go straight to disk
|
|
|
|
if (m_settings.explicit_read_cache) return -2;
|
|
|
|
|
2009-02-03 08:46:24 +01:00
|
|
|
ret = cache_read_block(j, l);
|
|
|
|
hit = false;
|
|
|
|
if (ret < 0) return ret;
|
2010-01-27 05:25:45 +01:00
|
|
|
|
|
|
|
p = find_cached_piece(m_read_pieces, j, l);
|
2009-02-03 08:46:24 +01:00
|
|
|
TORRENT_ASSERT(!m_read_pieces.empty());
|
|
|
|
TORRENT_ASSERT(p->piece == j.piece);
|
|
|
|
TORRENT_ASSERT(p->storage == j.storage);
|
|
|
|
}
|
|
|
|
|
2010-01-27 05:25:45 +01:00
|
|
|
TORRENT_ASSERT(p != idx.end());
|
2009-02-03 08:46:24 +01:00
|
|
|
|
2010-01-27 05:25:45 +01:00
|
|
|
ret = copy_from_piece(const_cast<cached_piece_entry&>(*p), hit, j, l);
|
2009-02-03 08:46:24 +01:00
|
|
|
if (ret < 0) return ret;
|
2010-01-31 17:29:52 +01:00
|
|
|
if (p->num_blocks == 0) idx.erase(p);
|
2010-01-31 20:14:00 +01:00
|
|
|
else idx.modify(p, update_last_use(j.cache_min_time));
|
2009-02-03 08:46:24 +01:00
|
|
|
|
|
|
|
ret = j.buffer_size;
|
|
|
|
++m_cache_stats.blocks_read;
|
|
|
|
if (hit) ++m_cache_stats.blocks_read_hit;
|
2008-02-22 05:11:04 +01:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2009-07-21 06:32:27 +02:00
|
|
|
size_type disk_io_thread::queue_buffer_size() const
|
2009-06-10 10:30:55 +02:00
|
|
|
{
|
2009-10-20 04:49:56 +02:00
|
|
|
mutex::scoped_lock l(m_queue_mutex);
|
2009-07-21 06:32:27 +02:00
|
|
|
return m_queue_buffer_size;
|
2009-06-10 10:30:55 +02:00
|
|
|
}
|
|
|
|
|
2011-03-16 08:45:51 +01:00
|
|
|
int disk_io_thread::add_job(disk_io_job const& j
|
2009-10-20 04:49:56 +02:00
|
|
|
, mutex::scoped_lock& l
|
|
|
|
, boost::function<void(int, disk_io_job const&)> const& f)
|
|
|
|
{
|
|
|
|
m_jobs.push_back(j);
|
|
|
|
m_jobs.back().callback.swap(const_cast<boost::function<void(int, disk_io_job const&)>&>(f));
|
2010-03-03 08:09:04 +01:00
|
|
|
m_jobs.back().start_time = time_now_hires();
|
2009-10-20 04:49:56 +02:00
|
|
|
|
|
|
|
if (j.action == disk_io_job::write)
|
2011-02-04 04:02:23 +01:00
|
|
|
{
|
2009-10-20 04:49:56 +02:00
|
|
|
m_queue_buffer_size += j.buffer_size;
|
2011-02-04 04:02:23 +01:00
|
|
|
if (m_queue_buffer_size >= m_settings.max_queued_disk_bytes
|
|
|
|
&& m_settings.max_queued_disk_bytes > 0)
|
2011-02-10 07:06:56 +01:00
|
|
|
m_exceeded_write_queue = true;
|
2011-02-04 04:02:23 +01:00
|
|
|
}
|
2009-10-20 04:49:56 +02:00
|
|
|
m_signal.signal(l);
|
2011-03-16 08:45:51 +01:00
|
|
|
return m_queue_buffer_size;
|
2009-10-20 04:49:56 +02:00
|
|
|
}
|
|
|
|
|
2011-03-16 08:45:51 +01:00
|
|
|
int disk_io_thread::add_job(disk_io_job const& j
|
2007-06-10 22:46:09 +02:00
|
|
|
, boost::function<void(int, disk_io_job const&)> const& f)
|
|
|
|
{
|
2009-05-13 19:17:33 +02:00
|
|
|
TORRENT_ASSERT(!m_abort);
|
2009-01-28 08:09:10 +01:00
|
|
|
TORRENT_ASSERT(j.storage
|
|
|
|
|| j.action == disk_io_job::abort_thread
|
|
|
|
|| j.action == disk_io_job::update_settings);
|
2008-02-22 05:11:04 +01:00
|
|
|
TORRENT_ASSERT(j.buffer_size <= m_block_size);
|
2009-10-20 04:49:56 +02:00
|
|
|
mutex::scoped_lock l(m_queue_mutex);
|
2011-03-16 08:45:51 +01:00
|
|
|
return add_job(j, l, f);
|
2007-06-10 22:46:09 +02:00
|
|
|
}
|
|
|
|
|
2008-07-18 01:41:46 +02:00
|
|
|
bool disk_io_thread::test_error(disk_io_job& j)
|
|
|
|
{
|
2009-01-28 08:09:10 +01:00
|
|
|
TORRENT_ASSERT(j.storage);
|
2008-07-18 01:41:46 +02:00
|
|
|
error_code const& ec = j.storage->error();
|
|
|
|
if (ec)
|
|
|
|
{
|
2009-05-23 05:05:21 +02:00
|
|
|
j.buffer = 0;
|
2009-06-10 10:30:55 +02:00
|
|
|
j.str.clear();
|
2008-07-18 01:41:46 +02:00
|
|
|
j.error = ec;
|
|
|
|
j.error_file = j.storage->error_file();
|
2008-11-29 22:33:21 +01:00
|
|
|
#ifdef TORRENT_DEBUG
|
2009-10-20 04:49:56 +02:00
|
|
|
printf("ERROR: '%s' in %s\n", ec.message().c_str(), j.error_file.c_str());
|
2008-07-18 01:41:46 +02:00
|
|
|
#endif
|
2009-06-10 10:30:55 +02:00
|
|
|
j.storage->clear_error();
|
2008-07-18 01:41:46 +02:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2009-03-31 10:05:46 +02:00
|
|
|
void disk_io_thread::post_callback(
|
|
|
|
boost::function<void(int, disk_io_job const&)> const& handler
|
|
|
|
, disk_io_job const& j, int ret)
|
|
|
|
{
|
|
|
|
if (!handler) return;
|
|
|
|
|
2009-11-23 00:55:54 +01:00
|
|
|
m_ios.post(boost::bind(handler, ret, j));
|
2009-03-31 10:05:46 +02:00
|
|
|
}
|
|
|
|
|
2009-09-05 09:21:10 +02:00
|
|
|
enum action_flags_t
|
|
|
|
{
|
|
|
|
read_operation = 1
|
2010-01-12 02:56:48 +01:00
|
|
|
, buffer_operation = 2
|
|
|
|
, cancel_on_abort = 4
|
2009-09-05 09:21:10 +02:00
|
|
|
};
|
|
|
|
|
2009-09-06 02:57:01 +02:00
|
|
|
static const boost::uint8_t action_flags[] =
|
2009-09-05 09:21:10 +02:00
|
|
|
{
|
2010-01-12 02:56:48 +01:00
|
|
|
read_operation + buffer_operation + cancel_on_abort // read
|
2009-09-05 09:21:10 +02:00
|
|
|
, buffer_operation // write
|
|
|
|
, 0 // hash
|
2010-01-12 02:56:48 +01:00
|
|
|
, 0 // move_storage
|
|
|
|
, 0 // release_files
|
|
|
|
, 0 // delete_files
|
|
|
|
, 0 // check_fastresume
|
|
|
|
, read_operation + cancel_on_abort // check_files
|
|
|
|
, 0 // save_resume_data
|
|
|
|
, 0 // rename_file
|
|
|
|
, 0 // abort_thread
|
|
|
|
, 0 // clear_read_cache
|
|
|
|
, 0 // abort_torrent
|
|
|
|
, cancel_on_abort // update_settings
|
|
|
|
, read_operation + cancel_on_abort // read_and_hash
|
2010-01-15 17:45:42 +01:00
|
|
|
, read_operation + cancel_on_abort // cache_piece
|
2010-01-09 19:40:05 +01:00
|
|
|
, 0 // finalize_file
|
2009-09-05 09:21:10 +02:00
|
|
|
};
|
|
|
|
|
2010-01-12 02:56:48 +01:00
|
|
|
bool should_cancel_on_abort(disk_io_job const& j)
|
2009-09-05 09:21:10 +02:00
|
|
|
{
|
2011-02-21 06:24:41 +01:00
|
|
|
TORRENT_ASSERT(j.action >= 0 && j.action < int(sizeof(action_flags)));
|
2010-01-12 02:56:48 +01:00
|
|
|
return action_flags[j.action] & cancel_on_abort;
|
2009-09-05 09:21:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
bool is_read_operation(disk_io_job const& j)
|
|
|
|
{
|
2011-02-21 06:24:41 +01:00
|
|
|
TORRENT_ASSERT(j.action >= 0 && j.action < int(sizeof(action_flags)));
|
2009-09-05 09:21:10 +02:00
|
|
|
return action_flags[j.action] & read_operation;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool operation_has_buffer(disk_io_job const& j)
|
|
|
|
{
|
2011-02-21 06:24:41 +01:00
|
|
|
TORRENT_ASSERT(j.action >= 0 && j.action < int(sizeof(action_flags)));
|
2009-09-05 09:21:10 +02:00
|
|
|
return action_flags[j.action] & buffer_operation;
|
|
|
|
}
|
|
|
|
|
2009-10-20 04:49:56 +02:00
|
|
|
void disk_io_thread::thread_fun()
|
2007-06-10 22:46:09 +02:00
|
|
|
{
|
2011-02-11 08:20:11 +01:00
|
|
|
#ifdef TORRENT_DISK_STATS
|
|
|
|
m_log.open("disk_io_thread.log", std::ios::trunc);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
// figure out how much physical RAM there is in
|
|
|
|
// this machine. This is used for automatically
|
|
|
|
// sizing the disk cache size when it's set to
|
|
|
|
// automatic.
|
|
|
|
#ifdef TORRENT_BSD
|
|
|
|
int mib[2] = { CTL_HW, HW_MEMSIZE };
|
|
|
|
size_t len = sizeof(m_physical_ram);
|
|
|
|
if (sysctl(mib, 2, &m_physical_ram, &len, NULL, 0) != 0)
|
|
|
|
m_physical_ram = 0;
|
|
|
|
#elif defined TORRENT_WINDOWS
|
|
|
|
MEMORYSTATUSEX ms;
|
|
|
|
ms.dwLength = sizeof(MEMORYSTATUSEX);
|
|
|
|
if (GlobalMemoryStatusEx(&ms))
|
|
|
|
m_physical_ram = ms.ullTotalPhys;
|
|
|
|
else
|
|
|
|
m_physical_ram = 0;
|
|
|
|
#elif defined TORRENT_LINUX
|
|
|
|
m_physical_ram = sysconf(_SC_PHYS_PAGES);
|
|
|
|
m_physical_ram *= sysconf(_SC_PAGESIZE);
|
|
|
|
#elif defined TORRENT_AMIGA
|
|
|
|
m_physical_ram = AvailMem(MEMF_PUBLIC);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#if TORRENT_USE_RLIMIT
|
|
|
|
if (m_physical_ram > 0)
|
|
|
|
{
|
|
|
|
struct rlimit r;
|
|
|
|
if (getrlimit(RLIMIT_AS, &r) == 0 && r.rlim_cur != RLIM_INFINITY)
|
|
|
|
{
|
|
|
|
if (m_physical_ram > r.rlim_cur)
|
|
|
|
m_physical_ram = r.rlim_cur;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
2010-01-12 02:56:48 +01:00
|
|
|
// 1 = forward in list, -1 = backwards in list
|
2009-09-05 09:21:10 +02:00
|
|
|
int elevator_direction = 1;
|
|
|
|
|
2010-07-23 18:15:14 +02:00
|
|
|
read_jobs_t::iterator elevator_job_pos = m_sorted_read_jobs.begin();
|
2010-04-24 23:53:45 +02:00
|
|
|
size_type last_elevator_pos = 0;
|
|
|
|
bool need_update_elevator_pos = false;
|
2011-03-20 20:17:59 +01:00
|
|
|
int immediate_jobs_in_row = 0;
|
2010-01-12 02:56:48 +01:00
|
|
|
|
2007-06-10 22:46:09 +02:00
|
|
|
for (;;)
|
|
|
|
{
|
2011-02-04 04:02:23 +01:00
|
|
|
// used to indicate whether or not we should post the
|
|
|
|
// 'restart download' event or not.
|
|
|
|
bool post = false;
|
|
|
|
|
2007-09-17 10:15:54 +02:00
|
|
|
#ifdef TORRENT_DISK_STATS
|
|
|
|
m_log << log_time() << " idle" << std::endl;
|
|
|
|
#endif
|
2009-10-20 04:49:56 +02:00
|
|
|
mutex::scoped_lock jl(m_queue_mutex);
|
2008-03-08 07:06:31 +01:00
|
|
|
|
2010-07-23 18:15:14 +02:00
|
|
|
while (m_jobs.empty() && m_sorted_read_jobs.empty() && !m_abort)
|
2009-03-14 10:24:58 +01:00
|
|
|
{
|
|
|
|
// if there hasn't been an event in one second
|
|
|
|
// see if we should flush the cache
|
2009-04-15 18:32:05 +02:00
|
|
|
// if (!m_signal.timed_wait(jl, boost::posix_time::seconds(1)))
|
|
|
|
// flush_expired_pieces();
|
|
|
|
m_signal.wait(jl);
|
2009-10-20 04:49:56 +02:00
|
|
|
m_signal.clear(jl);
|
2009-03-14 10:24:58 +01:00
|
|
|
}
|
|
|
|
|
2008-06-09 06:46:34 +02:00
|
|
|
if (m_abort && m_jobs.empty())
|
|
|
|
{
|
2008-06-12 06:40:37 +02:00
|
|
|
jl.unlock();
|
|
|
|
|
2009-10-20 04:49:56 +02:00
|
|
|
mutex::scoped_lock l(m_piece_mutex);
|
2008-06-09 06:46:34 +02:00
|
|
|
// flush all disk caches
|
2010-01-27 05:25:45 +01:00
|
|
|
cache_piece_index_t& widx = m_pieces.get<0>();
|
|
|
|
for (cache_piece_index_t::iterator i = widx.begin()
|
|
|
|
, end(widx.end()); i != end; ++i)
|
|
|
|
flush_range(const_cast<cached_piece_entry&>(*i), 0, INT_MAX, l);
|
|
|
|
|
2010-05-04 04:58:23 +02:00
|
|
|
#ifdef TORRENT_DISABLE_POOL_ALLOCATOR
|
|
|
|
// since we're aborting the thread, we don't actually
|
|
|
|
// need to free all the blocks individually. We can just
|
|
|
|
// clear the piece list and the memory will be freed when we
|
|
|
|
// destruct the m_pool. If we're not using a pool, we actually
|
|
|
|
// have to free everything individually though
|
2010-01-27 05:25:45 +01:00
|
|
|
cache_piece_index_t& idx = m_read_pieces.get<0>();
|
|
|
|
for (cache_piece_index_t::iterator i = idx.begin()
|
|
|
|
, end(idx.end()); i != end; ++i)
|
|
|
|
free_piece(const_cast<cached_piece_entry&>(*i), l);
|
2010-05-04 04:58:23 +02:00
|
|
|
#endif
|
2010-01-27 05:25:45 +01:00
|
|
|
|
2008-06-09 06:46:34 +02:00
|
|
|
m_pieces.clear();
|
|
|
|
m_read_pieces.clear();
|
2009-05-13 19:17:33 +02:00
|
|
|
// release the io_service to allow the run() call to return
|
|
|
|
// we do this once we stop posting new callbacks to it.
|
|
|
|
m_work.reset();
|
2008-06-09 06:46:34 +02:00
|
|
|
return;
|
|
|
|
}
|
2007-06-10 22:46:09 +02:00
|
|
|
|
2010-01-12 02:56:48 +01:00
|
|
|
disk_io_job j;
|
2009-09-05 09:21:10 +02:00
|
|
|
|
2011-03-20 07:37:19 +01:00
|
|
|
ptime now = time_now_hires();
|
|
|
|
m_queue_time.add_sample(total_microseconds(now - j.start_time));
|
|
|
|
ptime operation_start = now;
|
|
|
|
|
2011-03-20 20:17:59 +01:00
|
|
|
// make sure we don't starve out the read queue by just issuing
|
|
|
|
// write jobs constantly, mix in a read job every now and then
|
|
|
|
// with a configurable ratio
|
|
|
|
bool pick_read_job = m_jobs.empty()
|
|
|
|
|| (immediate_jobs_in_row >= m_settings.read_job_every
|
|
|
|
&& !m_sorted_read_jobs.empty());
|
|
|
|
|
|
|
|
if (!pick_read_job)
|
2009-09-05 09:21:10 +02:00
|
|
|
{
|
2010-01-12 02:56:48 +01:00
|
|
|
// we have a job in the job queue. If it's
|
|
|
|
// a read operation and we are allowed to
|
|
|
|
// reorder jobs, sort it into the read job
|
|
|
|
// list and continue, otherwise just pop it
|
|
|
|
// and use it later
|
|
|
|
j = m_jobs.front();
|
|
|
|
m_jobs.pop_front();
|
2010-03-03 05:32:06 +01:00
|
|
|
if (j.action == disk_io_job::write)
|
|
|
|
{
|
|
|
|
TORRENT_ASSERT(m_queue_buffer_size >= j.buffer_size);
|
|
|
|
m_queue_buffer_size -= j.buffer_size;
|
2011-02-04 04:02:23 +01:00
|
|
|
|
|
|
|
if (m_exceeded_write_queue)
|
|
|
|
{
|
|
|
|
int low_watermark = m_settings.max_queued_disk_bytes_low_watermark == 0
|
2011-02-13 23:27:02 +01:00
|
|
|
? m_settings.max_queued_disk_bytes / 2
|
2011-02-04 04:02:23 +01:00
|
|
|
: m_settings.max_queued_disk_bytes_low_watermark;
|
|
|
|
if (low_watermark >= m_settings.max_queued_disk_bytes)
|
2011-02-13 23:27:02 +01:00
|
|
|
low_watermark = m_settings.max_queued_disk_bytes / 2;
|
2011-02-04 04:02:23 +01:00
|
|
|
|
|
|
|
if (m_queue_buffer_size < low_watermark
|
2011-02-13 23:27:02 +01:00
|
|
|
|| m_settings.max_queued_disk_bytes == 0)
|
2011-02-04 04:02:23 +01:00
|
|
|
{
|
2011-02-13 23:27:02 +01:00
|
|
|
m_exceeded_write_queue = false;
|
2011-02-04 04:02:23 +01:00
|
|
|
// we just dropped below the high watermark of number of bytes
|
|
|
|
// queued for writing to the disk. Notify the session so that it
|
|
|
|
// can trigger all the connections waiting for this event
|
|
|
|
post = true;
|
|
|
|
}
|
|
|
|
}
|
2010-03-03 05:32:06 +01:00
|
|
|
}
|
|
|
|
|
2010-01-12 02:56:48 +01:00
|
|
|
jl.unlock();
|
2009-09-05 09:21:10 +02:00
|
|
|
|
2010-01-12 02:56:48 +01:00
|
|
|
bool defer = false;
|
2009-09-05 09:21:10 +02:00
|
|
|
|
2010-01-12 02:56:48 +01:00
|
|
|
if (is_read_operation(j))
|
2009-09-05 09:21:10 +02:00
|
|
|
{
|
2010-01-12 02:56:48 +01:00
|
|
|
defer = true;
|
|
|
|
|
|
|
|
// at this point the operation we're looking
|
|
|
|
// at is a read operation. If this read operation
|
|
|
|
// can be fully satisfied by the read cache, handle
|
|
|
|
// it immediately
|
|
|
|
if (m_settings.use_read_cache)
|
2009-09-05 09:21:10 +02:00
|
|
|
{
|
2010-01-12 02:56:48 +01:00
|
|
|
#ifdef TORRENT_DISK_STATS
|
|
|
|
m_log << log_time() << " check_cache_hit" << std::endl;
|
|
|
|
#endif
|
|
|
|
// unfortunately we need to lock the cache
|
|
|
|
// if the cache querying function would be
|
|
|
|
// made asyncronous, this would not be
|
|
|
|
// necessary anymore
|
|
|
|
mutex::scoped_lock l(m_piece_mutex);
|
2010-01-27 05:25:45 +01:00
|
|
|
cache_piece_index_t::iterator p
|
2010-01-12 02:56:48 +01:00
|
|
|
= find_cached_piece(m_read_pieces, j, l);
|
|
|
|
|
2010-01-27 05:25:45 +01:00
|
|
|
cache_piece_index_t& idx = m_read_pieces.get<0>();
|
2010-01-12 02:56:48 +01:00
|
|
|
// if it's a cache hit, process the job immediately
|
2010-01-27 05:25:45 +01:00
|
|
|
if (p != idx.end() && is_cache_hit(const_cast<cached_piece_entry&>(*p), j, l))
|
2010-01-12 02:56:48 +01:00
|
|
|
defer = false;
|
|
|
|
}
|
|
|
|
}
|
2009-09-05 09:21:10 +02:00
|
|
|
|
2010-01-14 00:37:23 +01:00
|
|
|
TORRENT_ASSERT(j.offset >= 0);
|
2010-01-12 02:56:48 +01:00
|
|
|
if (m_settings.allow_reordered_disk_operations && defer)
|
|
|
|
{
|
|
|
|
#ifdef TORRENT_DISK_STATS
|
|
|
|
m_log << log_time() << " sorting_job" << std::endl;
|
|
|
|
#endif
|
2011-03-15 03:21:28 +01:00
|
|
|
ptime sort_start = time_now_hires();
|
|
|
|
|
2010-01-12 02:56:48 +01:00
|
|
|
size_type phys_off = j.storage->physical_offset(j.piece, j.offset);
|
2010-07-23 18:15:14 +02:00
|
|
|
need_update_elevator_pos = need_update_elevator_pos || m_sorted_read_jobs.empty();
|
|
|
|
m_sorted_read_jobs.insert(std::pair<size_type, disk_io_job>(phys_off, j));
|
2011-03-15 03:21:28 +01:00
|
|
|
|
2011-03-19 23:23:58 +01:00
|
|
|
ptime now = time_now_hires();
|
|
|
|
m_sort_time.add_sample(total_microseconds(now - sort_start));
|
|
|
|
m_cache_stats.cumulative_sort_time += total_milliseconds(now - sort_start);
|
2011-03-20 09:29:18 +01:00
|
|
|
m_cache_stats.cumulative_job_time += total_milliseconds(now - operation_start);
|
2010-01-12 02:56:48 +01:00
|
|
|
continue;
|
|
|
|
}
|
2011-03-20 20:17:59 +01:00
|
|
|
|
|
|
|
++immediate_jobs_in_row;
|
2010-01-12 02:56:48 +01:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
// the job queue is empty, pick the next read job
|
|
|
|
// from the sorted job list. So we don't need the
|
|
|
|
// job queue lock anymore
|
|
|
|
jl.unlock();
|
2009-09-05 09:21:10 +02:00
|
|
|
|
2011-03-20 20:17:59 +01:00
|
|
|
immediate_jobs_in_row = 0;
|
|
|
|
|
2010-07-23 18:15:14 +02:00
|
|
|
TORRENT_ASSERT(!m_sorted_read_jobs.empty());
|
2009-12-16 11:49:15 +01:00
|
|
|
|
2010-07-23 18:15:14 +02:00
|
|
|
// if m_sorted_read_jobs used to be empty,
|
2010-04-24 23:53:45 +02:00
|
|
|
// we need to update the elevator position
|
|
|
|
if (need_update_elevator_pos)
|
|
|
|
{
|
2010-07-23 18:15:14 +02:00
|
|
|
elevator_job_pos = m_sorted_read_jobs.lower_bound(last_elevator_pos);
|
2010-04-24 23:53:45 +02:00
|
|
|
need_update_elevator_pos = false;
|
|
|
|
}
|
|
|
|
|
2010-01-12 02:56:48 +01:00
|
|
|
// if we've reached the end, change the elevator direction
|
2010-07-23 18:15:14 +02:00
|
|
|
if (elevator_job_pos == m_sorted_read_jobs.end())
|
2010-01-12 02:56:48 +01:00
|
|
|
{
|
|
|
|
elevator_direction = -1;
|
|
|
|
--elevator_job_pos;
|
|
|
|
}
|
2010-07-23 18:15:14 +02:00
|
|
|
TORRENT_ASSERT(!m_sorted_read_jobs.empty());
|
2009-09-05 09:21:10 +02:00
|
|
|
|
2010-07-23 18:15:14 +02:00
|
|
|
TORRENT_ASSERT(elevator_job_pos != m_sorted_read_jobs.end());
|
2010-01-12 02:56:48 +01:00
|
|
|
j = elevator_job_pos->second;
|
|
|
|
read_jobs_t::iterator to_erase = elevator_job_pos;
|
2009-09-05 09:21:10 +02:00
|
|
|
|
2010-01-12 02:56:48 +01:00
|
|
|
// if we've reached the begining of the sorted list,
|
|
|
|
// change the elvator direction
|
2010-07-23 18:15:14 +02:00
|
|
|
if (elevator_job_pos == m_sorted_read_jobs.begin())
|
2010-01-12 02:56:48 +01:00
|
|
|
elevator_direction = 1;
|
2009-09-05 09:21:10 +02:00
|
|
|
|
2010-01-12 02:56:48 +01:00
|
|
|
// move the elevator before erasing the job we're processing
|
|
|
|
// to keep the iterator valid
|
|
|
|
if (elevator_direction > 0) ++elevator_job_pos;
|
|
|
|
else --elevator_job_pos;
|
2009-09-05 09:21:10 +02:00
|
|
|
|
2010-04-14 08:26:38 +02:00
|
|
|
TORRENT_ASSERT(to_erase != elevator_job_pos);
|
2010-04-24 23:53:45 +02:00
|
|
|
last_elevator_pos = to_erase->first;
|
2010-07-23 18:15:14 +02:00
|
|
|
m_sorted_read_jobs.erase(to_erase);
|
2009-09-05 09:21:10 +02:00
|
|
|
}
|
|
|
|
|
2008-04-13 00:08:07 +02:00
|
|
|
// if there's a buffer in this job, it will be freed
|
|
|
|
// when this holder is destructed, unless it has been
|
|
|
|
// released.
|
|
|
|
disk_buffer_holder holder(*this
|
2010-01-12 02:56:48 +01:00
|
|
|
, operation_has_buffer(j) ? j.buffer : 0);
|
2008-04-13 00:08:07 +02:00
|
|
|
|
2011-02-04 04:02:23 +01:00
|
|
|
if (post && m_queue_callback)
|
2011-02-13 23:27:02 +01:00
|
|
|
{
|
|
|
|
TORRENT_ASSERT(m_exceeded_write_queue == false);
|
2011-02-04 04:02:23 +01:00
|
|
|
m_ios.post(m_queue_callback);
|
2011-02-13 23:27:02 +01:00
|
|
|
}
|
2009-06-10 10:30:55 +02:00
|
|
|
|
2008-06-12 06:40:37 +02:00
|
|
|
flush_expired_pieces();
|
2007-06-10 22:46:09 +02:00
|
|
|
|
|
|
|
int ret = 0;
|
|
|
|
|
2009-01-28 08:09:10 +01:00
|
|
|
TORRENT_ASSERT(j.storage
|
|
|
|
|| j.action == disk_io_job::abort_thread
|
|
|
|
|| j.action == disk_io_job::update_settings);
|
2007-09-17 10:15:54 +02:00
|
|
|
#ifdef TORRENT_DISK_STATS
|
2008-02-14 04:48:20 +01:00
|
|
|
ptime start = time_now();
|
|
|
|
#endif
|
2010-01-31 20:14:00 +01:00
|
|
|
|
|
|
|
if (j.cache_min_time < 0)
|
|
|
|
j.cache_min_time = j.cache_min_time == 0 ? m_settings.default_cache_min_age
|
|
|
|
: (std::max)(m_settings.default_cache_min_age, j.cache_min_time);
|
|
|
|
|
2011-03-10 06:01:36 +01:00
|
|
|
TORRENT_TRY
|
|
|
|
{
|
2008-04-13 23:26:57 +02:00
|
|
|
|
2009-01-21 08:31:49 +01:00
|
|
|
if (j.storage && j.storage->get_storage_impl()->m_settings == 0)
|
|
|
|
j.storage->get_storage_impl()->m_settings = &m_settings;
|
|
|
|
|
2008-04-13 23:26:57 +02:00
|
|
|
switch (j.action)
|
2008-02-14 04:48:20 +01:00
|
|
|
{
|
2009-01-10 06:46:02 +01:00
|
|
|
case disk_io_job::update_settings:
|
|
|
|
{
|
|
|
|
#ifdef TORRENT_DISK_STATS
|
|
|
|
m_log << log_time() << " update_settings " << std::endl;
|
|
|
|
#endif
|
|
|
|
TORRENT_ASSERT(j.buffer);
|
|
|
|
session_settings const& s = *((session_settings*)j.buffer);
|
|
|
|
TORRENT_ASSERT(s.cache_size >= 0);
|
|
|
|
TORRENT_ASSERT(s.cache_expiry > 0);
|
|
|
|
|
2010-01-23 04:02:32 +01:00
|
|
|
#if defined TORRENT_WINDOWS
|
|
|
|
if (m_settings.low_prio_disk != s.low_prio_disk)
|
|
|
|
{
|
|
|
|
m_file_pool.set_low_prio_io(s.low_prio_disk);
|
|
|
|
// we need to close all files, since the prio
|
|
|
|
// only takes affect when files are opened
|
|
|
|
m_file_pool.release(0);
|
|
|
|
}
|
|
|
|
#endif
|
2009-01-21 08:31:49 +01:00
|
|
|
m_settings = s;
|
2010-01-23 04:02:32 +01:00
|
|
|
m_file_pool.resize(m_settings.file_pool_size);
|
|
|
|
#if defined __APPLE__ && defined __MACH__ && MAC_OS_X_VERSION_MIN_REQUIRED >= 1050
|
|
|
|
setiopolicy_np(IOPOL_TYPE_DISK, IOPOL_SCOPE_THREAD
|
|
|
|
, m_settings.low_prio_disk ? IOPOL_THROTTLE : IOPOL_DEFAULT);
|
2010-07-24 04:54:42 +02:00
|
|
|
#elif defined IOPRIO_WHO_PROCESS
|
|
|
|
syscall(ioprio_set, IOPRIO_WHO_PROCESS, getpid());
|
2010-01-23 04:02:32 +01:00
|
|
|
#endif
|
2010-03-10 08:14:10 +01:00
|
|
|
if (m_settings.cache_size == -1)
|
|
|
|
{
|
|
|
|
// the cache size is set to automatic. Make it
|
|
|
|
// depend on the amount of physical RAM
|
|
|
|
// if we don't know how much RAM we have, just set the
|
|
|
|
// cache size to 16 MiB (1024 blocks)
|
|
|
|
if (m_physical_ram == 0)
|
|
|
|
m_settings.cache_size = 1024;
|
|
|
|
else
|
|
|
|
m_settings.cache_size = m_physical_ram / 8 / m_block_size;
|
|
|
|
}
|
2009-09-02 18:42:33 +02:00
|
|
|
break;
|
2009-01-10 06:46:02 +01:00
|
|
|
}
|
2008-11-17 02:19:46 +01:00
|
|
|
case disk_io_job::abort_torrent:
|
|
|
|
{
|
2008-12-30 09:20:25 +01:00
|
|
|
#ifdef TORRENT_DISK_STATS
|
|
|
|
m_log << log_time() << " abort_torrent " << std::endl;
|
|
|
|
#endif
|
2009-10-20 04:49:56 +02:00
|
|
|
mutex::scoped_lock jl(m_queue_mutex);
|
2008-11-17 02:19:46 +01:00
|
|
|
for (std::list<disk_io_job>::iterator i = m_jobs.begin();
|
|
|
|
i != m_jobs.end();)
|
|
|
|
{
|
|
|
|
if (i->storage != j.storage)
|
|
|
|
{
|
|
|
|
++i;
|
|
|
|
continue;
|
|
|
|
}
|
2010-01-12 02:56:48 +01:00
|
|
|
if (should_cancel_on_abort(*i))
|
2008-11-17 02:19:46 +01:00
|
|
|
{
|
2010-03-03 05:32:06 +01:00
|
|
|
if (i->action == disk_io_job::write)
|
|
|
|
{
|
|
|
|
TORRENT_ASSERT(m_queue_buffer_size >= i->buffer_size);
|
|
|
|
m_queue_buffer_size -= i->buffer_size;
|
|
|
|
}
|
2010-01-12 02:56:48 +01:00
|
|
|
post_callback(i->callback, *i, -3);
|
2008-11-17 02:19:46 +01:00
|
|
|
m_jobs.erase(i++);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
++i;
|
|
|
|
}
|
2010-01-12 02:56:48 +01:00
|
|
|
// now clear all the read jobs
|
2010-07-23 18:15:14 +02:00
|
|
|
for (read_jobs_t::iterator i = m_sorted_read_jobs.begin();
|
|
|
|
i != m_sorted_read_jobs.end();)
|
2010-01-12 02:56:48 +01:00
|
|
|
{
|
|
|
|
if (i->second.storage != j.storage)
|
|
|
|
{
|
|
|
|
++i;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
post_callback(i->second.callback, i->second, -3);
|
2010-04-15 00:02:49 +02:00
|
|
|
if (elevator_job_pos == i) ++elevator_job_pos;
|
2010-07-23 18:15:14 +02:00
|
|
|
m_sorted_read_jobs.erase(i++);
|
2010-01-12 02:56:48 +01:00
|
|
|
}
|
2010-02-21 09:52:26 +01:00
|
|
|
jl.unlock();
|
|
|
|
|
2010-02-21 10:03:56 +01:00
|
|
|
mutex::scoped_lock l(m_piece_mutex);
|
2010-02-21 09:52:26 +01:00
|
|
|
|
2010-05-13 17:01:20 +02:00
|
|
|
// build a vector of all the buffers we need to free
|
|
|
|
// and free them all in one go
|
|
|
|
std::vector<char*> buffers;
|
2010-02-21 09:52:26 +01:00
|
|
|
for (cache_t::iterator i = m_read_pieces.begin();
|
|
|
|
i != m_read_pieces.end();)
|
|
|
|
{
|
|
|
|
if (i->storage == j.storage)
|
|
|
|
{
|
2010-05-13 17:01:20 +02:00
|
|
|
drain_piece_bufs(const_cast<cached_piece_entry&>(*i), buffers, l);
|
2010-02-21 09:52:26 +01:00
|
|
|
i = m_read_pieces.erase(i);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
++i;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
l.unlock();
|
2010-05-13 17:01:20 +02:00
|
|
|
if (!buffers.empty()) free_multiple_buffers(&buffers[0], buffers.size());
|
2010-02-21 09:52:26 +01:00
|
|
|
release_memory();
|
2008-11-17 02:19:46 +01:00
|
|
|
break;
|
|
|
|
}
|
2008-06-12 06:40:37 +02:00
|
|
|
case disk_io_job::abort_thread:
|
|
|
|
{
|
2008-12-30 09:20:25 +01:00
|
|
|
#ifdef TORRENT_DISK_STATS
|
|
|
|
m_log << log_time() << " abort_thread " << std::endl;
|
|
|
|
#endif
|
2010-01-12 02:56:48 +01:00
|
|
|
// clear all read jobs
|
2009-10-20 04:49:56 +02:00
|
|
|
mutex::scoped_lock jl(m_queue_mutex);
|
2008-06-12 06:40:37 +02:00
|
|
|
|
|
|
|
for (std::list<disk_io_job>::iterator i = m_jobs.begin();
|
2009-05-22 08:32:39 +02:00
|
|
|
i != m_jobs.end();)
|
2008-06-12 06:40:37 +02:00
|
|
|
{
|
2010-01-12 02:56:48 +01:00
|
|
|
if (should_cancel_on_abort(*i))
|
2008-06-12 06:40:37 +02:00
|
|
|
{
|
2010-03-03 05:32:06 +01:00
|
|
|
if (i->action == disk_io_job::write)
|
|
|
|
{
|
|
|
|
TORRENT_ASSERT(m_queue_buffer_size >= i->buffer_size);
|
|
|
|
m_queue_buffer_size -= i->buffer_size;
|
|
|
|
}
|
2010-01-12 02:56:48 +01:00
|
|
|
post_callback(i->callback, *i, -3);
|
2008-06-12 06:40:37 +02:00
|
|
|
m_jobs.erase(i++);
|
|
|
|
continue;
|
|
|
|
}
|
2010-01-12 02:56:48 +01:00
|
|
|
++i;
|
|
|
|
}
|
|
|
|
jl.unlock();
|
|
|
|
|
2010-07-23 18:15:14 +02:00
|
|
|
for (read_jobs_t::iterator i = m_sorted_read_jobs.begin();
|
|
|
|
i != m_sorted_read_jobs.end();)
|
2010-01-12 02:56:48 +01:00
|
|
|
{
|
|
|
|
if (i->second.storage != j.storage)
|
2008-06-12 06:40:37 +02:00
|
|
|
{
|
2010-01-12 02:56:48 +01:00
|
|
|
++i;
|
2008-06-12 06:40:37 +02:00
|
|
|
continue;
|
|
|
|
}
|
2010-01-12 02:56:48 +01:00
|
|
|
post_callback(i->second.callback, i->second, -3);
|
2010-04-15 00:02:49 +02:00
|
|
|
if (elevator_job_pos == i) ++elevator_job_pos;
|
2010-07-23 18:15:14 +02:00
|
|
|
m_sorted_read_jobs.erase(i++);
|
2008-06-12 06:40:37 +02:00
|
|
|
}
|
2009-05-13 19:17:33 +02:00
|
|
|
|
|
|
|
m_abort = true;
|
2008-06-12 06:40:37 +02:00
|
|
|
break;
|
|
|
|
}
|
2009-02-03 08:46:24 +01:00
|
|
|
case disk_io_job::read_and_hash:
|
|
|
|
{
|
|
|
|
#ifdef TORRENT_DISK_STATS
|
|
|
|
m_log << log_time() << " read_and_hash " << j.buffer_size << std::endl;
|
|
|
|
#endif
|
|
|
|
INVARIANT_CHECK;
|
|
|
|
TORRENT_ASSERT(j.buffer == 0);
|
|
|
|
j.buffer = allocate_buffer("send buffer");
|
|
|
|
TORRENT_ASSERT(j.buffer_size <= m_block_size);
|
|
|
|
if (j.buffer == 0)
|
|
|
|
{
|
|
|
|
ret = -1;
|
2009-06-19 18:42:33 +02:00
|
|
|
#if BOOST_VERSION == 103500
|
|
|
|
j.error = error_code(boost::system::posix_error::not_enough_memory
|
|
|
|
, get_posix_category());
|
|
|
|
#elif BOOST_VERSION > 103500
|
2009-06-01 00:41:53 +02:00
|
|
|
j.error = error_code(boost::system::errc::not_enough_memory
|
|
|
|
, get_posix_category());
|
2009-06-03 09:46:50 +02:00
|
|
|
#else
|
2009-11-23 09:38:50 +01:00
|
|
|
j.error = error::no_memory;
|
2009-06-03 09:46:50 +02:00
|
|
|
#endif
|
2009-06-10 10:30:55 +02:00
|
|
|
j.str.clear();
|
2009-02-03 08:46:24 +01:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
disk_buffer_holder read_holder(*this, j.buffer);
|
|
|
|
|
|
|
|
// read the entire piece and verify the piece hash
|
|
|
|
// since we need to check the hash, this function
|
|
|
|
// will ignore the cache size limit (at least for
|
|
|
|
// reading and hashing, not for keeping it around)
|
|
|
|
sha1_hash h;
|
|
|
|
ret = read_piece_from_cache_and_hash(j, h);
|
2009-05-28 03:19:48 +02:00
|
|
|
|
|
|
|
// -2 means there's no space in the read cache
|
|
|
|
// or that the read cache is disabled
|
2009-02-03 08:46:24 +01:00
|
|
|
if (ret == -1)
|
|
|
|
{
|
|
|
|
test_error(j);
|
|
|
|
break;
|
|
|
|
}
|
2009-08-02 08:40:45 +02:00
|
|
|
if (!m_settings.disable_hash_checks)
|
|
|
|
ret = (j.storage->info()->hash_for_piece(j.piece) == h)?ret:-3;
|
2009-02-03 08:46:24 +01:00
|
|
|
if (ret == -3)
|
|
|
|
{
|
|
|
|
j.storage->mark_failed(j.piece);
|
2009-11-29 08:06:38 +01:00
|
|
|
j.error = errors::failed_hash_check;
|
2009-06-10 10:30:55 +02:00
|
|
|
j.str.clear();
|
2009-05-23 05:05:21 +02:00
|
|
|
j.buffer = 0;
|
|
|
|
break;
|
2009-02-03 08:46:24 +01:00
|
|
|
}
|
|
|
|
|
2009-05-23 05:05:21 +02:00
|
|
|
TORRENT_ASSERT(j.buffer == read_holder.get());
|
2009-02-03 08:46:24 +01:00
|
|
|
read_holder.release();
|
2009-06-01 00:38:49 +02:00
|
|
|
#if TORRENT_DISK_STATS
|
|
|
|
rename_buffer(j.buffer, "released send buffer");
|
|
|
|
#endif
|
2009-02-03 08:46:24 +01:00
|
|
|
break;
|
|
|
|
}
|
2010-01-09 19:40:05 +01:00
|
|
|
case disk_io_job::finalize_file:
|
|
|
|
{
|
|
|
|
#ifdef TORRENT_DISK_STATS
|
|
|
|
m_log << log_time() << " finalize_file " << j.piece << std::endl;
|
|
|
|
#endif
|
|
|
|
j.storage->finalize_file(j.piece);
|
|
|
|
break;
|
|
|
|
}
|
2008-04-13 23:26:57 +02:00
|
|
|
case disk_io_job::read:
|
2007-06-10 22:46:09 +02:00
|
|
|
{
|
2008-07-18 01:41:46 +02:00
|
|
|
if (test_error(j))
|
2008-02-14 04:48:20 +01:00
|
|
|
{
|
2008-04-13 23:26:57 +02:00
|
|
|
ret = -1;
|
2008-11-29 22:38:34 +01:00
|
|
|
break;
|
2008-04-13 23:26:57 +02:00
|
|
|
}
|
2007-09-17 10:15:54 +02:00
|
|
|
#ifdef TORRENT_DISK_STATS
|
2010-01-31 22:13:52 +01:00
|
|
|
m_log << log_time();
|
2007-09-17 10:15:54 +02:00
|
|
|
#endif
|
2008-04-13 23:26:57 +02:00
|
|
|
INVARIANT_CHECK;
|
|
|
|
TORRENT_ASSERT(j.buffer == 0);
|
2009-01-23 10:13:31 +01:00
|
|
|
j.buffer = allocate_buffer("send buffer");
|
2008-04-13 23:26:57 +02:00
|
|
|
TORRENT_ASSERT(j.buffer_size <= m_block_size);
|
|
|
|
if (j.buffer == 0)
|
|
|
|
{
|
2010-01-31 22:13:52 +01:00
|
|
|
#ifdef TORRENT_DISK_STATS
|
|
|
|
m_log << " read 0" << std::endl;
|
|
|
|
#endif
|
2008-04-13 23:26:57 +02:00
|
|
|
ret = -1;
|
2009-06-19 18:42:33 +02:00
|
|
|
#if BOOST_VERSION == 103500
|
|
|
|
j.error = error_code(boost::system::posix_error::not_enough_memory
|
|
|
|
, get_posix_category());
|
|
|
|
#elif BOOST_VERSION > 103500
|
2009-06-01 00:41:53 +02:00
|
|
|
j.error = error_code(boost::system::errc::not_enough_memory
|
|
|
|
, get_posix_category());
|
2009-06-03 09:46:50 +02:00
|
|
|
#else
|
2009-11-23 09:38:50 +01:00
|
|
|
j.error = error::no_memory;
|
2009-06-03 09:46:50 +02:00
|
|
|
#endif
|
2009-06-10 10:30:55 +02:00
|
|
|
j.str.clear();
|
2008-04-13 23:26:57 +02:00
|
|
|
break;
|
|
|
|
}
|
2008-02-22 05:11:04 +01:00
|
|
|
|
2008-04-13 23:26:57 +02:00
|
|
|
disk_buffer_holder read_holder(*this, j.buffer);
|
2009-05-23 05:05:21 +02:00
|
|
|
|
2010-01-31 22:13:52 +01:00
|
|
|
bool hit;
|
|
|
|
ret = try_read_from_cache(j, hit);
|
2008-04-13 23:26:57 +02:00
|
|
|
|
2010-01-31 22:13:52 +01:00
|
|
|
#ifdef TORRENT_DISK_STATS
|
|
|
|
m_log << (hit?" read-cache-hit ":" read ") << j.buffer_size << std::endl;
|
|
|
|
#endif
|
2008-04-13 23:26:57 +02:00
|
|
|
// -2 means there's no space in the read cache
|
|
|
|
// or that the read cache is disabled
|
|
|
|
if (ret == -1)
|
|
|
|
{
|
|
|
|
j.buffer = 0;
|
2008-07-18 01:41:46 +02:00
|
|
|
test_error(j);
|
2008-04-13 23:26:57 +02:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
else if (ret == -2)
|
|
|
|
{
|
2009-01-03 09:11:31 +01:00
|
|
|
file::iovec_t b = { j.buffer, j.buffer_size };
|
|
|
|
ret = j.storage->read_impl(&b, j.piece, j.offset, 1);
|
2008-04-13 23:26:57 +02:00
|
|
|
if (ret < 0)
|
2008-02-14 04:48:20 +01:00
|
|
|
{
|
2008-07-18 01:41:46 +02:00
|
|
|
test_error(j);
|
2008-02-22 05:11:04 +01:00
|
|
|
break;
|
|
|
|
}
|
2009-05-23 05:05:21 +02:00
|
|
|
if (ret != j.buffer_size)
|
2009-05-21 18:15:05 +02:00
|
|
|
{
|
|
|
|
// this means the file wasn't big enough for this read
|
2009-05-23 05:05:21 +02:00
|
|
|
j.buffer = 0;
|
2009-11-29 08:06:38 +01:00
|
|
|
j.error = errors::file_too_short;
|
2009-05-21 18:15:05 +02:00
|
|
|
j.error_file.clear();
|
2009-06-10 10:30:55 +02:00
|
|
|
j.str.clear();
|
2009-05-21 18:15:05 +02:00
|
|
|
ret = -1;
|
|
|
|
break;
|
|
|
|
}
|
2008-04-13 23:26:57 +02:00
|
|
|
++m_cache_stats.blocks_read;
|
2010-03-04 04:49:06 +01:00
|
|
|
hit = false;
|
|
|
|
}
|
|
|
|
if (!hit)
|
|
|
|
{
|
2011-03-19 23:23:58 +01:00
|
|
|
ptime now = time_now_hires();
|
|
|
|
m_read_time.add_sample(total_microseconds(now - operation_start));
|
|
|
|
m_cache_stats.cumulative_read_time += total_milliseconds(now - operation_start);
|
2008-02-14 04:48:20 +01:00
|
|
|
}
|
2009-05-23 05:05:21 +02:00
|
|
|
TORRENT_ASSERT(j.buffer == read_holder.get());
|
2008-04-13 23:26:57 +02:00
|
|
|
read_holder.release();
|
2009-06-01 00:38:49 +02:00
|
|
|
#if TORRENT_DISK_STATS
|
|
|
|
rename_buffer(j.buffer, "released send buffer");
|
|
|
|
#endif
|
2008-04-13 23:26:57 +02:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case disk_io_job::write:
|
|
|
|
{
|
2007-09-17 10:15:54 +02:00
|
|
|
#ifdef TORRENT_DISK_STATS
|
2008-04-13 23:26:57 +02:00
|
|
|
m_log << log_time() << " write " << j.buffer_size << std::endl;
|
2007-09-17 10:15:54 +02:00
|
|
|
#endif
|
2009-10-20 04:49:56 +02:00
|
|
|
mutex::scoped_lock l(m_piece_mutex);
|
2008-04-13 23:26:57 +02:00
|
|
|
INVARIANT_CHECK;
|
2009-05-23 09:35:45 +02:00
|
|
|
|
2010-10-18 09:38:14 +02:00
|
|
|
TORRENT_ASSERT(!j.storage->error());
|
2010-01-31 20:14:00 +01:00
|
|
|
TORRENT_ASSERT(j.cache_min_time >= 0);
|
|
|
|
|
2009-05-23 09:35:45 +02:00
|
|
|
if (in_use() >= m_settings.cache_size)
|
2010-10-18 09:38:14 +02:00
|
|
|
{
|
2010-01-27 05:25:45 +01:00
|
|
|
flush_cache_blocks(l, in_use() - m_settings.cache_size + 1);
|
2010-10-18 09:38:14 +02:00
|
|
|
if (test_error(j)) break;
|
|
|
|
}
|
|
|
|
TORRENT_ASSERT(!j.storage->error());
|
2009-05-23 09:35:45 +02:00
|
|
|
|
2010-01-27 05:25:45 +01:00
|
|
|
cache_piece_index_t& idx = m_pieces.get<0>();
|
|
|
|
cache_piece_index_t::iterator p = find_cached_piece(m_pieces, j, l);
|
2008-04-13 23:26:57 +02:00
|
|
|
int block = j.offset / m_block_size;
|
|
|
|
TORRENT_ASSERT(j.buffer);
|
|
|
|
TORRENT_ASSERT(j.buffer_size <= m_block_size);
|
2011-03-16 08:21:58 +01:00
|
|
|
int piece_size = j.storage->info()->piece_size(j.piece);
|
|
|
|
int blocks_in_piece = (piece_size + m_block_size - 1) / m_block_size;
|
2010-01-27 05:25:45 +01:00
|
|
|
if (p != idx.end())
|
2008-04-13 23:26:57 +02:00
|
|
|
{
|
2011-03-16 08:21:58 +01:00
|
|
|
bool recalc_contiguous = false;
|
2009-06-10 10:30:55 +02:00
|
|
|
TORRENT_ASSERT(p->blocks[block].buf == 0);
|
|
|
|
if (p->blocks[block].buf)
|
2008-02-08 11:22:05 +01:00
|
|
|
{
|
2009-06-10 10:30:55 +02:00
|
|
|
free_buffer(p->blocks[block].buf);
|
2009-11-28 04:14:08 +01:00
|
|
|
--m_cache_stats.cache_size;
|
2010-01-27 05:25:45 +01:00
|
|
|
--const_cast<cached_piece_entry&>(*p).num_blocks;
|
2008-02-08 11:22:05 +01:00
|
|
|
}
|
2011-03-20 06:47:27 +01:00
|
|
|
else if ((block > 0 && p->blocks[block-1].buf)
|
|
|
|
|| (block < blocks_in_piece-1 && p->blocks[block+1].buf)
|
|
|
|
|| p->num_blocks == 0)
|
2011-03-16 08:21:58 +01:00
|
|
|
{
|
|
|
|
// update the contiguous blocks counter for this piece. Only if it has
|
|
|
|
// an adjacent block. If it doesn't, we already know it couldn't have
|
|
|
|
// increased the largest contiguous block span in this piece
|
|
|
|
recalc_contiguous = true;
|
|
|
|
}
|
2009-06-10 10:30:55 +02:00
|
|
|
p->blocks[block].buf = j.buffer;
|
2010-01-12 02:56:48 +01:00
|
|
|
p->blocks[block].callback.swap(j.callback);
|
2009-05-19 09:00:05 +02:00
|
|
|
#ifdef TORRENT_DISK_STATS
|
|
|
|
rename_buffer(j.buffer, "write cache");
|
|
|
|
#endif
|
2008-04-13 23:26:57 +02:00
|
|
|
++m_cache_stats.cache_size;
|
2010-01-27 05:25:45 +01:00
|
|
|
++const_cast<cached_piece_entry&>(*p).num_blocks;
|
2011-03-16 08:21:58 +01:00
|
|
|
if (recalc_contiguous)
|
|
|
|
{
|
|
|
|
const_cast<cached_piece_entry&>(*p).num_contiguous_blocks = contiguous_blocks(*p);
|
|
|
|
}
|
2010-01-31 20:14:00 +01:00
|
|
|
idx.modify(p, update_last_use(j.cache_min_time));
|
2009-05-24 02:12:53 +02:00
|
|
|
// we might just have created a contiguous range
|
|
|
|
// that meets the requirement to be flushed. try it
|
2011-03-20 06:47:27 +01:00
|
|
|
// if we're in avoid_readback mode, don't do this. Only flush
|
|
|
|
// pieces when we need more space in the cache (which will avoid
|
|
|
|
// flushing blocks out-of-order) or when we issue a hash job,
|
|
|
|
// wich indicates the piece is completely downloaded
|
|
|
|
if (m_settings.disk_cache_algorithm != session_settings::avoid_readback)
|
|
|
|
flush_contiguous_blocks(const_cast<cached_piece_entry&>(*p)
|
|
|
|
, l, m_settings.write_cache_line_size);
|
2010-01-27 05:25:45 +01:00
|
|
|
if (p->num_blocks == 0) idx.erase(p);
|
2010-10-18 09:38:14 +02:00
|
|
|
test_error(j);
|
|
|
|
TORRENT_ASSERT(!j.storage->error());
|
2008-02-08 11:22:05 +01:00
|
|
|
}
|
2008-04-13 23:26:57 +02:00
|
|
|
else
|
2008-02-08 11:22:05 +01:00
|
|
|
{
|
2010-10-18 09:38:14 +02:00
|
|
|
TORRENT_ASSERT(!j.storage->error());
|
2010-01-31 20:14:00 +01:00
|
|
|
if (cache_block(j, j.callback, j.cache_min_time, l) < 0)
|
2009-05-23 20:39:55 +02:00
|
|
|
{
|
2009-12-25 17:13:35 +01:00
|
|
|
l.unlock();
|
2011-03-11 08:37:12 +01:00
|
|
|
ptime start = time_now_hires();
|
2009-05-23 20:39:55 +02:00
|
|
|
file::iovec_t iov = {j.buffer, j.buffer_size};
|
|
|
|
ret = j.storage->write_impl(&iov, j.piece, j.offset, 1);
|
2009-12-25 17:13:35 +01:00
|
|
|
l.lock();
|
2009-05-23 20:39:55 +02:00
|
|
|
if (ret < 0)
|
|
|
|
{
|
|
|
|
test_error(j);
|
|
|
|
break;
|
|
|
|
}
|
2011-03-11 08:37:12 +01:00
|
|
|
ptime done = time_now_hires();
|
|
|
|
m_write_time.add_sample(total_microseconds(done - start));
|
2011-03-19 23:23:58 +01:00
|
|
|
m_cache_stats.cumulative_write_time += total_milliseconds(done - start);
|
2010-10-18 09:38:14 +02:00
|
|
|
// we successfully wrote the block. Ignore previous errors
|
|
|
|
j.storage->clear_error();
|
2009-06-15 00:48:07 +02:00
|
|
|
break;
|
2009-05-23 20:39:55 +02:00
|
|
|
}
|
2010-10-18 09:38:14 +02:00
|
|
|
TORRENT_ASSERT(!j.storage->error());
|
2008-04-13 23:26:57 +02:00
|
|
|
}
|
|
|
|
// we've now inserted the buffer
|
|
|
|
// in the cache, we should not
|
|
|
|
// free it at the end
|
|
|
|
holder.release();
|
2009-05-23 09:35:45 +02:00
|
|
|
|
|
|
|
if (in_use() > m_settings.cache_size)
|
2010-10-18 09:38:14 +02:00
|
|
|
{
|
2010-01-27 05:25:45 +01:00
|
|
|
flush_cache_blocks(l, in_use() - m_settings.cache_size);
|
2010-10-18 09:38:14 +02:00
|
|
|
test_error(j);
|
|
|
|
}
|
|
|
|
TORRENT_ASSERT(!j.storage->error());
|
2009-05-23 09:35:45 +02:00
|
|
|
|
2008-04-13 23:26:57 +02:00
|
|
|
break;
|
|
|
|
}
|
2010-01-15 17:45:42 +01:00
|
|
|
case disk_io_job::cache_piece:
|
|
|
|
{
|
|
|
|
mutex::scoped_lock l(m_piece_mutex);
|
|
|
|
|
|
|
|
if (test_error(j))
|
|
|
|
{
|
|
|
|
ret = -1;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
#ifdef TORRENT_DISK_STATS
|
|
|
|
m_log << log_time() << " cache " << j.piece << std::endl;
|
|
|
|
#endif
|
|
|
|
INVARIANT_CHECK;
|
|
|
|
TORRENT_ASSERT(j.buffer == 0);
|
|
|
|
|
2010-01-27 05:25:45 +01:00
|
|
|
cache_piece_index_t::iterator p;
|
2010-01-15 17:45:42 +01:00
|
|
|
bool hit;
|
|
|
|
ret = cache_piece(j, p, hit, 0, l);
|
|
|
|
if (ret == -2) ret = -1;
|
|
|
|
|
|
|
|
if (ret < 0) test_error(j);
|
|
|
|
break;
|
|
|
|
}
|
2008-04-13 23:26:57 +02:00
|
|
|
case disk_io_job::hash:
|
|
|
|
{
|
2007-09-17 10:15:54 +02:00
|
|
|
#ifdef TORRENT_DISK_STATS
|
2008-04-13 23:26:57 +02:00
|
|
|
m_log << log_time() << " hash" << std::endl;
|
2007-09-17 10:15:54 +02:00
|
|
|
#endif
|
2010-10-18 09:38:14 +02:00
|
|
|
TORRENT_ASSERT(!j.storage->error());
|
2009-10-20 04:49:56 +02:00
|
|
|
mutex::scoped_lock l(m_piece_mutex);
|
2008-04-13 23:26:57 +02:00
|
|
|
INVARIANT_CHECK;
|
2008-03-10 09:19:31 +01:00
|
|
|
|
2010-01-27 05:25:45 +01:00
|
|
|
cache_piece_index_t& idx = m_pieces.get<0>();
|
|
|
|
cache_piece_index_t::iterator i = find_cached_piece(m_pieces, j, l);
|
|
|
|
if (i != idx.end())
|
2008-04-13 23:26:57 +02:00
|
|
|
{
|
2010-01-27 05:25:45 +01:00
|
|
|
TORRENT_ASSERT(i->storage);
|
|
|
|
int ret = flush_range(const_cast<cached_piece_entry&>(*i), 0, INT_MAX, l);
|
|
|
|
idx.erase(i);
|
2008-07-18 01:41:46 +02:00
|
|
|
if (test_error(j))
|
2008-02-14 04:48:20 +01:00
|
|
|
{
|
|
|
|
ret = -1;
|
2008-04-13 00:08:07 +02:00
|
|
|
j.storage->mark_failed(j.piece);
|
2008-02-14 04:48:20 +01:00
|
|
|
break;
|
|
|
|
}
|
2008-02-08 11:22:05 +01:00
|
|
|
}
|
2008-04-13 23:26:57 +02:00
|
|
|
l.unlock();
|
2009-08-02 08:40:45 +02:00
|
|
|
if (m_settings.disable_hash_checks)
|
|
|
|
{
|
|
|
|
ret = 0;
|
|
|
|
break;
|
|
|
|
}
|
2011-03-19 23:52:29 +01:00
|
|
|
|
|
|
|
ptime hash_start = time_now_hires();
|
|
|
|
|
2011-03-20 02:19:14 +01:00
|
|
|
int readback = 0;
|
|
|
|
sha1_hash h = j.storage->hash_for_piece_impl(j.piece, &readback);
|
2008-07-18 01:41:46 +02:00
|
|
|
if (test_error(j))
|
2008-02-14 04:48:20 +01:00
|
|
|
{
|
2008-04-13 23:26:57 +02:00
|
|
|
ret = -1;
|
|
|
|
j.storage->mark_failed(j.piece);
|
|
|
|
break;
|
|
|
|
}
|
2009-08-02 08:40:45 +02:00
|
|
|
|
2011-03-20 02:19:14 +01:00
|
|
|
m_cache_stats.total_read_back += readback / m_block_size;
|
|
|
|
|
2008-04-13 23:26:57 +02:00
|
|
|
ret = (j.storage->info()->hash_for_piece(j.piece) == h)?0:-2;
|
|
|
|
if (ret == -2) j.storage->mark_failed(j.piece);
|
2011-03-14 06:21:46 +01:00
|
|
|
|
2011-03-19 23:23:58 +01:00
|
|
|
ptime done = time_now_hires();
|
|
|
|
m_hash_time.add_sample(total_microseconds(done - hash_start));
|
|
|
|
m_cache_stats.cumulative_hash_time += total_milliseconds(done - hash_start);
|
2008-04-13 23:26:57 +02:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case disk_io_job::move_storage:
|
|
|
|
{
|
2007-09-17 10:15:54 +02:00
|
|
|
#ifdef TORRENT_DISK_STATS
|
2008-04-13 23:26:57 +02:00
|
|
|
m_log << log_time() << " move" << std::endl;
|
2007-09-17 10:15:54 +02:00
|
|
|
#endif
|
2008-04-13 23:26:57 +02:00
|
|
|
TORRENT_ASSERT(j.buffer == 0);
|
2009-03-31 10:05:46 +02:00
|
|
|
ret = j.storage->move_storage_impl(j.str);
|
2008-04-13 23:26:57 +02:00
|
|
|
if (ret != 0)
|
|
|
|
{
|
2008-07-18 01:41:46 +02:00
|
|
|
test_error(j);
|
2007-06-10 22:46:09 +02:00
|
|
|
break;
|
2008-02-14 04:48:20 +01:00
|
|
|
}
|
2009-10-26 02:29:39 +01:00
|
|
|
j.str = j.storage->save_path();
|
2008-04-13 23:26:57 +02:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case disk_io_job::release_files:
|
|
|
|
{
|
2007-09-17 10:15:54 +02:00
|
|
|
#ifdef TORRENT_DISK_STATS
|
2008-04-13 23:26:57 +02:00
|
|
|
m_log << log_time() << " release" << std::endl;
|
2007-09-17 10:15:54 +02:00
|
|
|
#endif
|
2008-04-13 23:26:57 +02:00
|
|
|
TORRENT_ASSERT(j.buffer == 0);
|
2008-02-08 11:22:05 +01:00
|
|
|
|
2009-10-20 04:49:56 +02:00
|
|
|
mutex::scoped_lock l(m_piece_mutex);
|
2008-04-13 23:26:57 +02:00
|
|
|
INVARIANT_CHECK;
|
2008-03-10 09:19:31 +01:00
|
|
|
|
2008-04-13 23:26:57 +02:00
|
|
|
for (cache_t::iterator i = m_pieces.begin(); i != m_pieces.end();)
|
|
|
|
{
|
|
|
|
if (i->storage == j.storage)
|
2008-03-16 11:51:25 +01:00
|
|
|
{
|
2010-01-27 05:25:45 +01:00
|
|
|
flush_range(const_cast<cached_piece_entry&>(*i), 0, INT_MAX, l);
|
2008-04-13 23:26:57 +02:00
|
|
|
i = m_pieces.erase(i);
|
2008-03-16 11:51:25 +01:00
|
|
|
}
|
2008-04-13 23:26:57 +02:00
|
|
|
else
|
2008-02-14 04:48:20 +01:00
|
|
|
{
|
2008-04-13 23:26:57 +02:00
|
|
|
++i;
|
2008-02-14 04:48:20 +01:00
|
|
|
}
|
2008-02-08 11:22:05 +01:00
|
|
|
}
|
2008-06-12 06:40:37 +02:00
|
|
|
l.unlock();
|
2009-01-21 08:31:49 +01:00
|
|
|
release_memory();
|
|
|
|
|
2008-04-13 23:26:57 +02:00
|
|
|
ret = j.storage->release_files_impl();
|
2008-07-18 01:41:46 +02:00
|
|
|
if (ret != 0) test_error(j);
|
2008-04-13 23:26:57 +02:00
|
|
|
break;
|
|
|
|
}
|
2008-07-18 17:31:22 +02:00
|
|
|
case disk_io_job::clear_read_cache:
|
|
|
|
{
|
|
|
|
#ifdef TORRENT_DISK_STATS
|
|
|
|
m_log << log_time() << " clear-cache" << std::endl;
|
|
|
|
#endif
|
|
|
|
TORRENT_ASSERT(j.buffer == 0);
|
|
|
|
|
2009-10-20 04:49:56 +02:00
|
|
|
mutex::scoped_lock l(m_piece_mutex);
|
2008-07-18 17:31:22 +02:00
|
|
|
INVARIANT_CHECK;
|
|
|
|
|
|
|
|
for (cache_t::iterator i = m_read_pieces.begin();
|
|
|
|
i != m_read_pieces.end();)
|
|
|
|
{
|
|
|
|
if (i->storage == j.storage)
|
|
|
|
{
|
2010-01-27 05:25:45 +01:00
|
|
|
free_piece(const_cast<cached_piece_entry&>(*i), l);
|
2008-07-18 17:31:22 +02:00
|
|
|
i = m_read_pieces.erase(i);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
++i;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
l.unlock();
|
2009-01-21 08:31:49 +01:00
|
|
|
release_memory();
|
2008-07-18 17:31:22 +02:00
|
|
|
ret = 0;
|
|
|
|
break;
|
|
|
|
}
|
2008-04-13 23:26:57 +02:00
|
|
|
case disk_io_job::delete_files:
|
|
|
|
{
|
2007-10-13 05:33:33 +02:00
|
|
|
#ifdef TORRENT_DISK_STATS
|
2008-04-13 23:26:57 +02:00
|
|
|
m_log << log_time() << " delete" << std::endl;
|
2007-10-13 05:33:33 +02:00
|
|
|
#endif
|
2008-04-13 23:26:57 +02:00
|
|
|
TORRENT_ASSERT(j.buffer == 0);
|
2008-02-08 11:22:05 +01:00
|
|
|
|
2009-10-20 04:49:56 +02:00
|
|
|
mutex::scoped_lock l(m_piece_mutex);
|
2008-04-13 23:26:57 +02:00
|
|
|
INVARIANT_CHECK;
|
2008-03-10 09:19:31 +01:00
|
|
|
|
2010-01-27 05:25:45 +01:00
|
|
|
cache_piece_index_t& idx = m_pieces.get<0>();
|
|
|
|
cache_piece_index_t::iterator start = idx.lower_bound(std::pair<void*, int>(j.storage.get(), 0));
|
|
|
|
cache_piece_index_t::iterator end = idx.upper_bound(std::pair<void*, int>(j.storage.get(), INT_MAX));
|
2008-02-08 11:22:05 +01:00
|
|
|
|
2010-05-17 01:14:47 +02:00
|
|
|
// build a vector of all the buffers we need to free
|
|
|
|
// and free them all in one go
|
|
|
|
std::vector<char*> buffers;
|
2010-01-27 05:25:45 +01:00
|
|
|
for (cache_piece_index_t::iterator i = start; i != end; ++i)
|
2008-04-13 23:26:57 +02:00
|
|
|
{
|
2010-01-27 05:25:45 +01:00
|
|
|
torrent_info const& ti = *i->storage->info();
|
|
|
|
int blocks_in_piece = (ti.piece_size(i->piece) + m_block_size - 1) / m_block_size;
|
2008-04-13 23:26:57 +02:00
|
|
|
for (int j = 0; j < blocks_in_piece; ++j)
|
2008-02-08 11:22:05 +01:00
|
|
|
{
|
2010-01-27 05:25:45 +01:00
|
|
|
if (i->blocks[j].buf == 0) continue;
|
2010-05-17 01:14:47 +02:00
|
|
|
buffers.push_back(i->blocks[j].buf);
|
2010-01-27 05:25:45 +01:00
|
|
|
i->blocks[j].buf = 0;
|
2008-09-17 04:29:05 +02:00
|
|
|
--m_cache_stats.cache_size;
|
2008-02-08 11:22:05 +01:00
|
|
|
}
|
2008-04-13 23:26:57 +02:00
|
|
|
}
|
2010-01-27 05:25:45 +01:00
|
|
|
idx.erase(start, end);
|
2008-06-12 06:40:37 +02:00
|
|
|
l.unlock();
|
2010-05-17 01:14:47 +02:00
|
|
|
if (!buffers.empty()) free_multiple_buffers(&buffers[0], buffers.size());
|
2009-01-21 08:31:49 +01:00
|
|
|
release_memory();
|
|
|
|
|
2008-04-13 23:26:57 +02:00
|
|
|
ret = j.storage->delete_files_impl();
|
2008-07-18 01:41:46 +02:00
|
|
|
if (ret != 0) test_error(j);
|
2008-04-13 23:26:57 +02:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case disk_io_job::check_fastresume:
|
|
|
|
{
|
2008-03-08 07:06:31 +01:00
|
|
|
#ifdef TORRENT_DISK_STATS
|
2008-12-30 09:20:25 +01:00
|
|
|
m_log << log_time() << " check_fastresume" << std::endl;
|
2008-03-08 07:06:31 +01:00
|
|
|
#endif
|
2008-07-01 01:14:31 +02:00
|
|
|
lazy_entry const* rd = (lazy_entry const*)j.buffer;
|
2008-04-13 23:26:57 +02:00
|
|
|
TORRENT_ASSERT(rd != 0);
|
2009-06-28 02:36:41 +02:00
|
|
|
ret = j.storage->check_fastresume(*rd, j.error);
|
2011-01-19 11:07:51 +01:00
|
|
|
test_error(j);
|
2008-04-13 23:26:57 +02:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case disk_io_job::check_files:
|
|
|
|
{
|
2008-03-08 07:06:31 +01:00
|
|
|
#ifdef TORRENT_DISK_STATS
|
2008-12-30 09:20:25 +01:00
|
|
|
m_log << log_time() << " check_files" << std::endl;
|
2008-03-08 07:06:31 +01:00
|
|
|
#endif
|
2008-04-13 23:26:57 +02:00
|
|
|
int piece_size = j.storage->info()->piece_length();
|
|
|
|
for (int processed = 0; processed < 4 * 1024 * 1024; processed += piece_size)
|
|
|
|
{
|
2009-05-27 19:06:50 +02:00
|
|
|
ptime now = time_now_hires();
|
2009-05-30 06:02:03 +02:00
|
|
|
TORRENT_ASSERT(now >= m_last_file_check);
|
2010-06-21 07:45:45 +02:00
|
|
|
// this happens sometimes on windows for some reason
|
|
|
|
if (now < m_last_file_check) now = m_last_file_check;
|
|
|
|
|
2009-06-03 09:22:43 +02:00
|
|
|
#if BOOST_VERSION > 103600
|
2009-05-22 08:32:39 +02:00
|
|
|
if (now - m_last_file_check < milliseconds(m_settings.file_checks_delay_per_block))
|
|
|
|
{
|
|
|
|
int sleep_time = m_settings.file_checks_delay_per_block
|
|
|
|
* (piece_size / (16 * 1024))
|
|
|
|
- total_milliseconds(now - m_last_file_check);
|
2009-05-27 19:06:50 +02:00
|
|
|
if (sleep_time < 0) sleep_time = 0;
|
2009-06-01 00:38:49 +02:00
|
|
|
TORRENT_ASSERT(sleep_time < 5 * 1000);
|
2009-05-22 08:32:39 +02:00
|
|
|
|
2009-10-20 04:49:56 +02:00
|
|
|
sleep(sleep_time);
|
2009-05-22 08:32:39 +02:00
|
|
|
}
|
2009-05-27 19:06:50 +02:00
|
|
|
m_last_file_check = time_now_hires();
|
2009-06-03 09:22:43 +02:00
|
|
|
#endif
|
2009-05-22 08:32:39 +02:00
|
|
|
|
2011-03-14 06:21:46 +01:00
|
|
|
ptime hash_start = time_now_hires();
|
2009-05-22 08:32:39 +02:00
|
|
|
if (m_waiting_to_shutdown) break;
|
|
|
|
|
2009-06-28 02:36:41 +02:00
|
|
|
ret = j.storage->check_files(j.piece, j.offset, j.error);
|
2008-03-08 07:06:31 +01:00
|
|
|
|
2011-03-19 23:23:58 +01:00
|
|
|
ptime done = time_now_hires();
|
|
|
|
m_hash_time.add_sample(total_microseconds(done - hash_start));
|
|
|
|
m_cache_stats.cumulative_hash_time += total_milliseconds(done - hash_start);
|
2011-03-14 06:21:46 +01:00
|
|
|
|
2011-02-25 18:00:36 +01:00
|
|
|
TORRENT_TRY {
|
2010-01-12 02:56:48 +01:00
|
|
|
TORRENT_ASSERT(j.callback);
|
|
|
|
if (j.callback && ret == piece_manager::need_full_check)
|
|
|
|
post_callback(j.callback, j, ret);
|
2011-02-25 18:00:36 +01:00
|
|
|
} TORRENT_CATCH(std::exception&) {}
|
2008-04-13 23:26:57 +02:00
|
|
|
if (ret != piece_manager::need_full_check) break;
|
2008-03-08 07:06:31 +01:00
|
|
|
}
|
2008-07-18 01:41:46 +02:00
|
|
|
if (test_error(j))
|
|
|
|
{
|
|
|
|
ret = piece_manager::fatal_disk_error;
|
|
|
|
break;
|
|
|
|
}
|
2010-11-26 21:38:31 +01:00
|
|
|
TORRENT_ASSERT(ret != -2 || j.error);
|
2008-07-18 01:41:46 +02:00
|
|
|
|
2008-04-13 23:26:57 +02:00
|
|
|
// if the check is not done, add it at the end of the job queue
|
|
|
|
if (ret == piece_manager::need_full_check)
|
2008-04-13 20:54:36 +02:00
|
|
|
{
|
2010-01-14 00:37:23 +01:00
|
|
|
// offset needs to be reset to 0 so that the disk
|
|
|
|
// job sorting can be done correctly
|
|
|
|
j.offset = 0;
|
2010-01-12 02:56:48 +01:00
|
|
|
add_job(j, j.callback);
|
2008-04-13 23:26:57 +02:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case disk_io_job::save_resume_data:
|
|
|
|
{
|
2008-04-13 20:54:36 +02:00
|
|
|
#ifdef TORRENT_DISK_STATS
|
2008-12-30 09:20:25 +01:00
|
|
|
m_log << log_time() << " save_resume_data" << std::endl;
|
2008-04-13 20:54:36 +02:00
|
|
|
#endif
|
2008-04-13 23:26:57 +02:00
|
|
|
j.resume_data.reset(new entry(entry::dictionary_t));
|
|
|
|
j.storage->write_resume_data(*j.resume_data);
|
|
|
|
ret = 0;
|
|
|
|
break;
|
2007-06-10 22:46:09 +02:00
|
|
|
}
|
2008-05-28 10:44:40 +02:00
|
|
|
case disk_io_job::rename_file:
|
|
|
|
{
|
|
|
|
#ifdef TORRENT_DISK_STATS
|
2008-12-30 09:20:25 +01:00
|
|
|
m_log << log_time() << " rename_file" << std::endl;
|
2008-05-28 10:44:40 +02:00
|
|
|
#endif
|
|
|
|
ret = j.storage->rename_file_impl(j.piece, j.str);
|
2009-05-07 08:41:41 +02:00
|
|
|
if (ret != 0)
|
|
|
|
{
|
|
|
|
test_error(j);
|
|
|
|
break;
|
|
|
|
}
|
2008-05-28 10:44:40 +02:00
|
|
|
}
|
2011-02-25 18:00:36 +01:00
|
|
|
}
|
2007-06-10 22:46:09 +02:00
|
|
|
}
|
2011-02-25 18:00:36 +01:00
|
|
|
TORRENT_CATCH(std::exception& e)
|
2007-06-10 22:46:09 +02:00
|
|
|
{
|
2011-03-10 06:01:36 +01:00
|
|
|
TORRENT_DECLARE_DUMMY(std::exception, e);
|
2008-02-14 04:48:20 +01:00
|
|
|
ret = -1;
|
2011-02-25 18:00:36 +01:00
|
|
|
TORRENT_TRY {
|
2008-01-15 00:51:04 +01:00
|
|
|
j.str = e.what();
|
2011-02-25 18:00:36 +01:00
|
|
|
} TORRENT_CATCH(std::exception&) {}
|
2007-06-10 22:46:09 +02:00
|
|
|
}
|
|
|
|
|
2010-10-18 09:38:14 +02:00
|
|
|
TORRENT_ASSERT(!j.storage || !j.storage->error());
|
|
|
|
|
2011-03-19 23:23:58 +01:00
|
|
|
ptime done = time_now_hires();
|
|
|
|
m_job_time.add_sample(total_microseconds(done - operation_start));
|
|
|
|
m_cache_stats.cumulative_job_time += total_milliseconds(done - operation_start);
|
2011-03-18 04:07:10 +01:00
|
|
|
|
2010-01-12 02:56:48 +01:00
|
|
|
// if (!j.callback) std::cerr << "DISK THREAD: no callback specified" << std::endl;
|
2007-06-10 22:46:09 +02:00
|
|
|
// else std::cerr << "DISK THREAD: invoking callback" << std::endl;
|
2011-02-25 18:00:36 +01:00
|
|
|
TORRENT_TRY {
|
2010-11-26 21:38:31 +01:00
|
|
|
TORRENT_ASSERT(ret != -2 || j.error
|
2008-07-30 10:05:04 +02:00
|
|
|
|| j.action == disk_io_job::hash);
|
2009-06-01 00:38:49 +02:00
|
|
|
#if TORRENT_DISK_STATS
|
|
|
|
if ((j.action == disk_io_job::read || j.action == disk_io_job::read_and_hash)
|
|
|
|
&& j.buffer != 0)
|
|
|
|
rename_buffer(j.buffer, "posted send buffer");
|
|
|
|
#endif
|
2010-01-12 02:56:48 +01:00
|
|
|
post_callback(j.callback, j, ret);
|
2011-02-25 18:00:36 +01:00
|
|
|
} TORRENT_CATCH(std::exception&) {
|
2008-04-13 00:08:07 +02:00
|
|
|
TORRENT_ASSERT(false);
|
2008-04-10 11:11:54 +02:00
|
|
|
}
|
2007-06-10 22:46:09 +02:00
|
|
|
}
|
2008-02-14 04:48:20 +01:00
|
|
|
TORRENT_ASSERT(false);
|
2007-06-10 22:46:09 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|