2007-06-10 22:46:09 +02:00
|
|
|
/*
|
|
|
|
|
|
|
|
Copyright (c) 2007, Arvid Norberg
|
|
|
|
All rights reserved.
|
|
|
|
|
|
|
|
Redistribution and use in source and binary forms, with or without
|
|
|
|
modification, are permitted provided that the following conditions
|
|
|
|
are met:
|
|
|
|
|
|
|
|
* Redistributions of source code must retain the above copyright
|
|
|
|
notice, this list of conditions and the following disclaimer.
|
|
|
|
* Redistributions in binary form must reproduce the above copyright
|
|
|
|
notice, this list of conditions and the following disclaimer in
|
|
|
|
the documentation and/or other materials provided with the distribution.
|
|
|
|
* Neither the name of the author nor the names of its
|
|
|
|
contributors may be used to endorse or promote products derived
|
|
|
|
from this software without specific prior written permission.
|
|
|
|
|
|
|
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
|
|
|
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
|
|
|
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
|
|
|
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
|
|
|
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
|
|
|
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
|
|
|
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
|
|
|
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
|
|
|
POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "libtorrent/storage.hpp"
|
|
|
|
#include "libtorrent/disk_io_thread.hpp"
|
2008-04-13 00:08:07 +02:00
|
|
|
#include "libtorrent/disk_buffer_holder.hpp"
|
2009-01-11 03:02:34 +01:00
|
|
|
#include "libtorrent/alloca.hpp"
|
|
|
|
#include "libtorrent/invariant_check.hpp"
|
2009-02-03 08:46:24 +01:00
|
|
|
#include "libtorrent/error_code.hpp"
|
2008-02-22 05:11:04 +01:00
|
|
|
#include <boost/scoped_array.hpp>
|
2007-06-10 22:46:09 +02:00
|
|
|
|
2008-02-08 11:22:05 +01:00
|
|
|
#ifdef TORRENT_DISK_STATS
|
2007-09-17 10:15:54 +02:00
|
|
|
#include "libtorrent/time.hpp"
|
|
|
|
#endif
|
|
|
|
|
2009-03-31 10:15:21 +02:00
|
|
|
#if TORRENT_USE_MLOCK && !defined TORRENT_WINDOWS
|
2009-02-06 10:46:13 +01:00
|
|
|
#include <sys/mman.h>
|
|
|
|
#endif
|
|
|
|
|
2007-06-10 22:46:09 +02:00
|
|
|
namespace libtorrent
|
|
|
|
{
|
2009-01-21 08:31:49 +01:00
|
|
|
disk_buffer_pool::disk_buffer_pool(int block_size)
|
|
|
|
: m_block_size(block_size)
|
2009-05-01 10:00:58 +02:00
|
|
|
, m_in_use(0)
|
2008-04-09 07:19:11 +02:00
|
|
|
#ifndef TORRENT_DISABLE_POOL_ALLOCATOR
|
2009-05-12 20:52:05 +02:00
|
|
|
, m_pool(block_size, m_settings.cache_buffer_chunk_size)
|
2008-04-09 07:19:11 +02:00
|
|
|
#endif
|
2007-09-17 10:15:54 +02:00
|
|
|
{
|
2009-05-04 08:42:24 +02:00
|
|
|
#if defined TORRENT_DISK_STATS || defined TORRENT_STATS
|
2007-09-29 18:14:03 +02:00
|
|
|
m_allocations = 0;
|
2009-01-23 10:13:31 +01:00
|
|
|
#endif
|
|
|
|
#ifdef TORRENT_DISK_STATS
|
|
|
|
m_log.open("disk_buffers.log", std::ios::trunc);
|
2009-05-19 09:00:05 +02:00
|
|
|
m_categories["read cache"] = 0;
|
|
|
|
m_categories["write cache"] = 0;
|
2009-06-15 00:20:23 +02:00
|
|
|
|
|
|
|
m_disk_access_log.open("disk_access.log", std::ios::trunc);
|
2009-05-03 05:16:15 +02:00
|
|
|
#endif
|
|
|
|
#ifdef TORRENT_DEBUG
|
2009-05-11 23:19:03 +02:00
|
|
|
m_magic = 0x1337;
|
2007-09-29 18:14:03 +02:00
|
|
|
#endif
|
2009-01-21 08:31:49 +01:00
|
|
|
}
|
|
|
|
|
2009-05-03 05:16:15 +02:00
|
|
|
#ifdef TORRENT_DEBUG
|
|
|
|
disk_buffer_pool::~disk_buffer_pool()
|
|
|
|
{
|
|
|
|
TORRENT_ASSERT(m_magic == 0x1337);
|
|
|
|
m_magic = 0;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2009-08-05 21:02:51 +02:00
|
|
|
#if defined TORRENT_DEBUG || defined TORRENT_DISK_STATS
|
2009-05-23 17:50:38 +02:00
|
|
|
bool disk_buffer_pool::is_disk_buffer(char* buffer
|
|
|
|
,boost::mutex::scoped_lock& l) const
|
2009-01-21 08:31:49 +01:00
|
|
|
{
|
2009-05-03 05:16:15 +02:00
|
|
|
TORRENT_ASSERT(m_magic == 0x1337);
|
2009-05-23 05:05:21 +02:00
|
|
|
#ifdef TORRENT_DISK_STATS
|
|
|
|
if (m_buf_to_category.find(buffer)
|
|
|
|
== m_buf_to_category.end()) return false;
|
|
|
|
#endif
|
2009-05-23 19:17:47 +02:00
|
|
|
#ifdef TORRENT_DISABLE_POOL_ALLOCATOR
|
|
|
|
return true;
|
|
|
|
#else
|
2009-01-21 08:31:49 +01:00
|
|
|
return m_pool.is_from(buffer);
|
|
|
|
#endif
|
|
|
|
}
|
2009-05-23 17:50:38 +02:00
|
|
|
|
|
|
|
bool disk_buffer_pool::is_disk_buffer(char* buffer) const
|
|
|
|
{
|
|
|
|
mutex_t::scoped_lock l(m_pool_mutex);
|
|
|
|
return is_disk_buffer(buffer, l);
|
|
|
|
}
|
2009-01-21 08:31:49 +01:00
|
|
|
#endif
|
|
|
|
|
2009-01-23 10:13:31 +01:00
|
|
|
char* disk_buffer_pool::allocate_buffer(char const* category)
|
2009-01-21 08:31:49 +01:00
|
|
|
{
|
|
|
|
mutex_t::scoped_lock l(m_pool_mutex);
|
2009-05-03 05:16:15 +02:00
|
|
|
TORRENT_ASSERT(m_magic == 0x1337);
|
2009-01-21 08:31:49 +01:00
|
|
|
#ifdef TORRENT_DISABLE_POOL_ALLOCATOR
|
2009-01-23 10:13:31 +01:00
|
|
|
char* ret = page_aligned_allocator::malloc(m_block_size);
|
2009-01-21 08:31:49 +01:00
|
|
|
#else
|
2009-01-23 10:13:31 +01:00
|
|
|
char* ret = (char*)m_pool.ordered_malloc();
|
2009-05-12 20:52:05 +02:00
|
|
|
m_pool.set_next_size(m_settings.cache_buffer_chunk_size);
|
2009-01-23 10:13:31 +01:00
|
|
|
#endif
|
2009-05-01 10:00:58 +02:00
|
|
|
++m_in_use;
|
2009-03-31 10:15:21 +02:00
|
|
|
#if TORRENT_USE_MLOCK
|
2009-02-06 10:46:13 +01:00
|
|
|
if (m_settings.lock_disk_cache)
|
|
|
|
{
|
|
|
|
#ifdef TORRENT_WINDOWS
|
|
|
|
VirtualLock(ret, m_block_size);
|
|
|
|
#else
|
|
|
|
mlock(ret, m_block_size);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2009-05-04 08:42:24 +02:00
|
|
|
#if defined TORRENT_DISK_STATS || defined TORRENT_STATS
|
2009-01-23 10:13:31 +01:00
|
|
|
++m_allocations;
|
2009-05-04 08:42:24 +02:00
|
|
|
#endif
|
|
|
|
#ifdef TORRENT_DISK_STATS
|
2009-01-23 10:13:31 +01:00
|
|
|
++m_categories[category];
|
|
|
|
m_buf_to_category[ret] = category;
|
|
|
|
m_log << log_time() << " " << category << ": " << m_categories[category] << "\n";
|
2009-01-21 08:31:49 +01:00
|
|
|
#endif
|
2009-05-23 19:17:47 +02:00
|
|
|
TORRENT_ASSERT(ret == 0 || is_disk_buffer(ret, l));
|
2009-01-23 10:13:31 +01:00
|
|
|
return ret;
|
2009-01-21 08:31:49 +01:00
|
|
|
}
|
|
|
|
|
2009-05-19 09:00:05 +02:00
|
|
|
#ifdef TORRENT_DISK_STATS
|
|
|
|
void disk_buffer_pool::rename_buffer(char* buf, char const* category)
|
|
|
|
{
|
2009-05-23 19:17:47 +02:00
|
|
|
mutex_t::scoped_lock l(m_pool_mutex);
|
|
|
|
TORRENT_ASSERT(is_disk_buffer(buf, l));
|
2009-05-19 09:00:05 +02:00
|
|
|
TORRENT_ASSERT(m_categories.find(m_buf_to_category[buf])
|
|
|
|
!= m_categories.end());
|
|
|
|
std::string const& prev_category = m_buf_to_category[buf];
|
|
|
|
--m_categories[prev_category];
|
|
|
|
m_log << log_time() << " " << prev_category << ": " << m_categories[prev_category] << "\n";
|
|
|
|
|
|
|
|
++m_categories[category];
|
|
|
|
m_buf_to_category[buf] = category;
|
|
|
|
m_log << log_time() << " " << category << ": " << m_categories[category] << "\n";
|
2009-05-23 19:17:47 +02:00
|
|
|
TORRENT_ASSERT(m_categories.find(m_buf_to_category[buf])
|
|
|
|
!= m_categories.end());
|
2009-05-19 09:00:05 +02:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2009-01-21 08:31:49 +01:00
|
|
|
void disk_buffer_pool::free_buffer(char* buf)
|
|
|
|
{
|
|
|
|
TORRENT_ASSERT(buf);
|
|
|
|
mutex_t::scoped_lock l(m_pool_mutex);
|
2009-05-03 05:16:15 +02:00
|
|
|
TORRENT_ASSERT(m_magic == 0x1337);
|
2009-05-23 19:17:47 +02:00
|
|
|
TORRENT_ASSERT(is_disk_buffer(buf, l));
|
2009-05-04 08:42:24 +02:00
|
|
|
#if defined TORRENT_DISK_STATS || defined TORRENT_STATS
|
2009-01-21 08:31:49 +01:00
|
|
|
--m_allocations;
|
2009-05-04 08:42:24 +02:00
|
|
|
#endif
|
|
|
|
#ifdef TORRENT_DISK_STATS
|
2009-01-23 10:13:31 +01:00
|
|
|
TORRENT_ASSERT(m_categories.find(m_buf_to_category[buf])
|
|
|
|
!= m_categories.end());
|
|
|
|
std::string const& category = m_buf_to_category[buf];
|
|
|
|
--m_categories[category];
|
|
|
|
m_log << log_time() << " " << category << ": " << m_categories[category] << "\n";
|
|
|
|
m_buf_to_category.erase(buf);
|
2009-01-21 08:31:49 +01:00
|
|
|
#endif
|
2009-03-31 10:15:21 +02:00
|
|
|
#if TORRENT_USE_MLOCK
|
2009-02-06 10:46:13 +01:00
|
|
|
if (m_settings.lock_disk_cache)
|
|
|
|
{
|
|
|
|
#ifdef TORRENT_WINDOWS
|
|
|
|
VirtualUnlock(buf, m_block_size);
|
|
|
|
#else
|
|
|
|
munlock(buf, m_block_size);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
#endif
|
2009-01-21 08:31:49 +01:00
|
|
|
#ifdef TORRENT_DISABLE_POOL_ALLOCATOR
|
|
|
|
page_aligned_allocator::free(buf);
|
|
|
|
#else
|
|
|
|
m_pool.ordered_free(buf);
|
|
|
|
#endif
|
2009-05-01 10:00:58 +02:00
|
|
|
--m_in_use;
|
2009-01-21 08:31:49 +01:00
|
|
|
}
|
|
|
|
|
2009-01-23 10:13:31 +01:00
|
|
|
char* disk_buffer_pool::allocate_buffers(int num_blocks, char const* category)
|
2009-01-21 08:31:49 +01:00
|
|
|
{
|
|
|
|
mutex_t::scoped_lock l(m_pool_mutex);
|
2009-05-03 05:16:15 +02:00
|
|
|
TORRENT_ASSERT(m_magic == 0x1337);
|
2009-01-21 08:31:49 +01:00
|
|
|
#ifdef TORRENT_DISABLE_POOL_ALLOCATOR
|
2009-01-23 10:13:31 +01:00
|
|
|
char* ret = page_aligned_allocator::malloc(m_block_size * num_blocks);
|
2009-01-21 08:31:49 +01:00
|
|
|
#else
|
2009-01-23 10:13:31 +01:00
|
|
|
char* ret = (char*)m_pool.ordered_malloc(num_blocks);
|
2009-05-12 20:52:05 +02:00
|
|
|
m_pool.set_next_size(m_settings.cache_buffer_chunk_size);
|
2009-01-21 08:31:49 +01:00
|
|
|
#endif
|
2009-05-01 10:00:58 +02:00
|
|
|
m_in_use += num_blocks;
|
2009-03-31 10:15:21 +02:00
|
|
|
#if TORRENT_USE_MLOCK
|
2009-02-06 10:46:13 +01:00
|
|
|
if (m_settings.lock_disk_cache)
|
|
|
|
{
|
|
|
|
#ifdef TORRENT_WINDOWS
|
|
|
|
VirtualLock(ret, m_block_size * num_blocks);
|
|
|
|
#else
|
|
|
|
mlock(ret, m_block_size * num_blocks);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
#endif
|
2009-05-04 08:42:24 +02:00
|
|
|
#if defined TORRENT_DISK_STATS || defined TORRENT_STATS
|
2009-01-23 10:13:31 +01:00
|
|
|
m_allocations += num_blocks;
|
2009-05-04 08:42:24 +02:00
|
|
|
#endif
|
|
|
|
#ifdef TORRENT_DISK_STATS
|
2009-01-23 10:13:31 +01:00
|
|
|
m_categories[category] += num_blocks;
|
|
|
|
m_buf_to_category[ret] = category;
|
|
|
|
m_log << log_time() << " " << category << ": " << m_categories[category] << "\n";
|
|
|
|
#endif
|
2009-05-23 19:17:47 +02:00
|
|
|
TORRENT_ASSERT(ret == 0 || is_disk_buffer(ret, l));
|
2009-01-23 10:13:31 +01:00
|
|
|
return ret;
|
2009-01-21 08:31:49 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void disk_buffer_pool::free_buffers(char* buf, int num_blocks)
|
|
|
|
{
|
|
|
|
TORRENT_ASSERT(buf);
|
|
|
|
TORRENT_ASSERT(num_blocks >= 1);
|
|
|
|
mutex_t::scoped_lock l(m_pool_mutex);
|
2009-05-03 05:16:15 +02:00
|
|
|
TORRENT_ASSERT(m_magic == 0x1337);
|
2009-05-23 19:17:47 +02:00
|
|
|
TORRENT_ASSERT(is_disk_buffer(buf, l));
|
2009-05-04 08:42:24 +02:00
|
|
|
#if defined TORRENT_DISK_STATS || defined TORRENT_STATS
|
2009-01-21 08:31:49 +01:00
|
|
|
m_allocations -= num_blocks;
|
2009-05-04 08:42:24 +02:00
|
|
|
#endif
|
|
|
|
#ifdef TORRENT_DISK_STATS
|
2009-01-23 10:13:31 +01:00
|
|
|
TORRENT_ASSERT(m_categories.find(m_buf_to_category[buf])
|
|
|
|
!= m_categories.end());
|
|
|
|
std::string const& category = m_buf_to_category[buf];
|
|
|
|
m_categories[category] -= num_blocks;
|
|
|
|
m_log << log_time() << " " << category << ": " << m_categories[category] << "\n";
|
|
|
|
m_buf_to_category.erase(buf);
|
2009-01-21 08:31:49 +01:00
|
|
|
#endif
|
2009-03-31 10:15:21 +02:00
|
|
|
#if TORRENT_USE_MLOCK
|
2009-02-06 10:46:13 +01:00
|
|
|
if (m_settings.lock_disk_cache)
|
|
|
|
{
|
|
|
|
#ifdef TORRENT_WINDOWS
|
|
|
|
VirtualUnlock(buf, m_block_size * num_blocks);
|
|
|
|
#else
|
|
|
|
munlock(buf, m_block_size * num_blocks);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
#endif
|
2009-01-21 08:31:49 +01:00
|
|
|
#ifdef TORRENT_DISABLE_POOL_ALLOCATOR
|
|
|
|
page_aligned_allocator::free(buf);
|
|
|
|
#else
|
|
|
|
m_pool.ordered_free(buf, num_blocks);
|
|
|
|
#endif
|
2009-05-01 10:00:58 +02:00
|
|
|
m_in_use -= num_blocks;
|
2009-01-21 08:31:49 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void disk_buffer_pool::release_memory()
|
|
|
|
{
|
2009-05-03 05:16:15 +02:00
|
|
|
TORRENT_ASSERT(m_magic == 0x1337);
|
2009-01-21 08:31:49 +01:00
|
|
|
#ifndef TORRENT_DISABLE_POOL_ALLOCATOR
|
|
|
|
mutex_t::scoped_lock l(m_pool_mutex);
|
|
|
|
m_pool.release_memory();
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
// ------- disk_io_thread ------
|
|
|
|
|
|
|
|
|
2009-06-10 10:30:55 +02:00
|
|
|
disk_io_thread::disk_io_thread(asio::io_service& ios
|
|
|
|
, boost::function<void()> const& queue_callback
|
|
|
|
, int block_size)
|
2009-01-21 08:31:49 +01:00
|
|
|
: disk_buffer_pool(block_size)
|
|
|
|
, m_abort(false)
|
2009-05-22 08:32:39 +02:00
|
|
|
, m_waiting_to_shutdown(false)
|
2009-01-21 08:31:49 +01:00
|
|
|
, m_queue_buffer_size(0)
|
2009-05-30 06:02:03 +02:00
|
|
|
, m_last_file_check(time_now_hires())
|
2009-01-21 08:31:49 +01:00
|
|
|
, m_ios(ios)
|
2009-06-10 10:30:55 +02:00
|
|
|
, m_queue_callback(queue_callback)
|
2009-05-13 19:17:33 +02:00
|
|
|
, m_work(io_service::work(m_ios))
|
2009-01-21 08:31:49 +01:00
|
|
|
, m_disk_io_thread(boost::ref(*this))
|
|
|
|
{
|
2009-08-02 00:47:07 +02:00
|
|
|
#ifdef TORRENT_DISK_STATS
|
|
|
|
m_log.open("disk_io_thread.log", std::ios::trunc);
|
|
|
|
#endif
|
2007-09-17 10:15:54 +02:00
|
|
|
}
|
2007-06-10 22:46:09 +02:00
|
|
|
|
|
|
|
disk_io_thread::~disk_io_thread()
|
|
|
|
{
|
2007-12-24 09:15:10 +01:00
|
|
|
TORRENT_ASSERT(m_abort == true);
|
2007-06-10 22:46:09 +02:00
|
|
|
}
|
|
|
|
|
2007-12-24 09:15:10 +01:00
|
|
|
void disk_io_thread::join()
|
|
|
|
{
|
2008-06-12 06:40:37 +02:00
|
|
|
mutex_t::scoped_lock l(m_queue_mutex);
|
2008-06-09 06:46:34 +02:00
|
|
|
disk_io_job j;
|
2009-05-22 08:32:39 +02:00
|
|
|
m_waiting_to_shutdown = true;
|
2008-06-09 06:46:34 +02:00
|
|
|
j.action = disk_io_job::abort_thread;
|
|
|
|
m_jobs.insert(m_jobs.begin(), j);
|
2007-12-24 09:15:10 +01:00
|
|
|
m_signal.notify_all();
|
|
|
|
l.unlock();
|
|
|
|
|
|
|
|
m_disk_io_thread.join();
|
2008-11-11 10:32:51 +01:00
|
|
|
l.lock();
|
|
|
|
TORRENT_ASSERT(m_abort == true);
|
|
|
|
m_jobs.clear();
|
2007-12-24 09:15:10 +01:00
|
|
|
}
|
|
|
|
|
2008-02-08 11:22:05 +01:00
|
|
|
void disk_io_thread::get_cache_info(sha1_hash const& ih, std::vector<cached_piece_info>& ret) const
|
|
|
|
{
|
2008-06-12 06:40:37 +02:00
|
|
|
mutex_t::scoped_lock l(m_piece_mutex);
|
2008-02-08 11:22:05 +01:00
|
|
|
ret.clear();
|
|
|
|
ret.reserve(m_pieces.size());
|
2008-02-25 00:14:10 +01:00
|
|
|
for (cache_t::const_iterator i = m_pieces.begin()
|
2008-02-08 11:22:05 +01:00
|
|
|
, end(m_pieces.end()); i != end; ++i)
|
|
|
|
{
|
|
|
|
torrent_info const& ti = *i->storage->info();
|
|
|
|
if (ti.info_hash() != ih) continue;
|
|
|
|
cached_piece_info info;
|
|
|
|
info.piece = i->piece;
|
2008-02-22 05:11:04 +01:00
|
|
|
info.last_use = i->last_use;
|
2008-07-11 12:29:26 +02:00
|
|
|
info.kind = cached_piece_info::write_cache;
|
|
|
|
int blocks_in_piece = (ti.piece_size(i->piece) + (m_block_size) - 1) / m_block_size;
|
|
|
|
info.blocks.resize(blocks_in_piece);
|
|
|
|
for (int b = 0; b < blocks_in_piece; ++b)
|
2009-06-10 10:30:55 +02:00
|
|
|
if (i->blocks[b].buf) info.blocks[b] = true;
|
2008-07-11 12:29:26 +02:00
|
|
|
ret.push_back(info);
|
|
|
|
}
|
|
|
|
for (cache_t::const_iterator i = m_read_pieces.begin()
|
|
|
|
, end(m_read_pieces.end()); i != end; ++i)
|
|
|
|
{
|
|
|
|
torrent_info const& ti = *i->storage->info();
|
|
|
|
if (ti.info_hash() != ih) continue;
|
|
|
|
cached_piece_info info;
|
|
|
|
info.piece = i->piece;
|
|
|
|
info.last_use = i->last_use;
|
|
|
|
info.kind = cached_piece_info::read_cache;
|
2008-02-22 05:11:04 +01:00
|
|
|
int blocks_in_piece = (ti.piece_size(i->piece) + (m_block_size) - 1) / m_block_size;
|
2008-02-08 11:22:05 +01:00
|
|
|
info.blocks.resize(blocks_in_piece);
|
|
|
|
for (int b = 0; b < blocks_in_piece; ++b)
|
2009-06-10 10:30:55 +02:00
|
|
|
if (i->blocks[b].buf) info.blocks[b] = true;
|
2008-02-08 11:22:05 +01:00
|
|
|
ret.push_back(info);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
cache_status disk_io_thread::status() const
|
|
|
|
{
|
2008-06-12 06:40:37 +02:00
|
|
|
mutex_t::scoped_lock l(m_piece_mutex);
|
2009-05-02 08:52:57 +02:00
|
|
|
m_cache_stats.total_used_buffers = in_use();
|
2009-07-21 06:32:27 +02:00
|
|
|
m_cache_stats.queued_bytes = m_queue_buffer_size;
|
2008-02-22 05:11:04 +01:00
|
|
|
return m_cache_stats;
|
2008-02-08 11:22:05 +01:00
|
|
|
}
|
|
|
|
|
2007-06-10 22:46:09 +02:00
|
|
|
// aborts read operations
|
|
|
|
void disk_io_thread::stop(boost::intrusive_ptr<piece_manager> s)
|
|
|
|
{
|
2008-06-12 06:40:37 +02:00
|
|
|
mutex_t::scoped_lock l(m_queue_mutex);
|
2007-06-10 22:46:09 +02:00
|
|
|
// read jobs are aborted, write and move jobs are syncronized
|
2008-02-26 21:08:33 +01:00
|
|
|
for (std::list<disk_io_job>::iterator i = m_jobs.begin();
|
2007-06-10 22:46:09 +02:00
|
|
|
i != m_jobs.end();)
|
|
|
|
{
|
|
|
|
if (i->storage != s)
|
|
|
|
{
|
|
|
|
++i;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (i->action == disk_io_job::read)
|
|
|
|
{
|
2009-03-31 10:05:46 +02:00
|
|
|
post_callback(i->callback, *i, -1);
|
2007-06-10 22:46:09 +02:00
|
|
|
m_jobs.erase(i++);
|
|
|
|
continue;
|
|
|
|
}
|
2008-06-09 06:46:34 +02:00
|
|
|
if (i->action == disk_io_job::check_files)
|
|
|
|
{
|
2009-03-31 10:05:46 +02:00
|
|
|
post_callback(i->callback, *i, piece_manager::disk_check_aborted);
|
2008-06-09 06:46:34 +02:00
|
|
|
m_jobs.erase(i++);
|
|
|
|
continue;
|
|
|
|
}
|
2007-06-10 22:46:09 +02:00
|
|
|
++i;
|
|
|
|
}
|
2008-11-17 02:19:46 +01:00
|
|
|
disk_io_job j;
|
|
|
|
j.action = disk_io_job::abort_torrent;
|
|
|
|
j.storage = s;
|
|
|
|
add_job(j);
|
2007-06-10 22:46:09 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
bool range_overlap(int start1, int length1, int start2, int length2)
|
|
|
|
{
|
|
|
|
return (start1 <= start2 && start1 + length1 > start2)
|
|
|
|
|| (start2 <= start1 && start2 + length2 > start1);
|
|
|
|
}
|
|
|
|
|
|
|
|
namespace
|
|
|
|
{
|
2007-09-16 03:34:06 +02:00
|
|
|
// The semantic of this operator is:
|
2008-02-22 05:11:04 +01:00
|
|
|
// should lhs come before rhs in the job queue
|
2007-06-10 22:46:09 +02:00
|
|
|
bool operator<(disk_io_job const& lhs, disk_io_job const& rhs)
|
|
|
|
{
|
2007-09-16 03:34:06 +02:00
|
|
|
// NOTE: comparison inverted to make higher priority
|
|
|
|
// skip _in_front_of_ lower priority
|
|
|
|
if (lhs.priority > rhs.priority) return true;
|
|
|
|
if (lhs.priority < rhs.priority) return false;
|
|
|
|
|
2007-06-10 22:46:09 +02:00
|
|
|
if (lhs.storage.get() < rhs.storage.get()) return true;
|
|
|
|
if (lhs.storage.get() > rhs.storage.get()) return false;
|
|
|
|
if (lhs.piece < rhs.piece) return true;
|
|
|
|
if (lhs.piece > rhs.piece) return false;
|
|
|
|
if (lhs.offset < rhs.offset) return true;
|
|
|
|
// if (lhs.offset > rhs.offset) return false;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
2008-02-08 11:22:05 +01:00
|
|
|
|
2008-02-25 00:14:10 +01:00
|
|
|
disk_io_thread::cache_t::iterator disk_io_thread::find_cached_piece(
|
|
|
|
disk_io_thread::cache_t& cache
|
2008-02-22 05:11:04 +01:00
|
|
|
, disk_io_job const& j, mutex_t::scoped_lock& l)
|
2008-02-08 11:22:05 +01:00
|
|
|
{
|
2008-02-25 00:14:10 +01:00
|
|
|
for (cache_t::iterator i = cache.begin()
|
2008-02-22 05:11:04 +01:00
|
|
|
, end(cache.end()); i != end; ++i)
|
2008-02-08 11:22:05 +01:00
|
|
|
{
|
|
|
|
if (i->storage != j.storage || i->piece != j.piece) continue;
|
|
|
|
return i;
|
|
|
|
}
|
2008-02-22 05:11:04 +01:00
|
|
|
return cache.end();
|
2008-02-08 11:22:05 +01:00
|
|
|
}
|
2007-06-10 22:46:09 +02:00
|
|
|
|
2008-06-12 06:40:37 +02:00
|
|
|
void disk_io_thread::flush_expired_pieces()
|
2008-02-10 01:58:25 +01:00
|
|
|
{
|
|
|
|
ptime now = time_now();
|
|
|
|
|
2008-06-12 06:40:37 +02:00
|
|
|
mutex_t::scoped_lock l(m_piece_mutex);
|
|
|
|
|
2008-02-22 05:11:04 +01:00
|
|
|
INVARIANT_CHECK;
|
2008-12-27 03:38:14 +01:00
|
|
|
// flush write cache
|
2008-02-10 01:58:25 +01:00
|
|
|
for (;;)
|
|
|
|
{
|
2008-02-25 00:14:10 +01:00
|
|
|
cache_t::iterator i = std::min_element(
|
2008-02-10 01:58:25 +01:00
|
|
|
m_pieces.begin(), m_pieces.end()
|
2008-02-22 05:11:04 +01:00
|
|
|
, bind(&cached_piece_entry::last_use, _1)
|
|
|
|
< bind(&cached_piece_entry::last_use, _2));
|
2008-12-27 03:38:14 +01:00
|
|
|
if (i == m_pieces.end()) break;
|
2008-02-22 05:11:04 +01:00
|
|
|
int age = total_seconds(now - i->last_use);
|
2009-01-21 08:31:49 +01:00
|
|
|
if (age < m_settings.cache_expiry) break;
|
2008-02-10 01:58:25 +01:00
|
|
|
flush_and_remove(i, l);
|
|
|
|
}
|
2008-12-27 03:38:14 +01:00
|
|
|
|
|
|
|
// flush read cache
|
|
|
|
for (;;)
|
|
|
|
{
|
|
|
|
cache_t::iterator i = std::min_element(
|
|
|
|
m_read_pieces.begin(), m_read_pieces.end()
|
|
|
|
, bind(&cached_piece_entry::last_use, _1)
|
|
|
|
< bind(&cached_piece_entry::last_use, _2));
|
|
|
|
if (i == m_read_pieces.end()) break;
|
|
|
|
int age = total_seconds(now - i->last_use);
|
2009-01-21 08:31:49 +01:00
|
|
|
if (age < m_settings.cache_expiry) break;
|
2008-12-27 03:38:14 +01:00
|
|
|
free_piece(*i, l);
|
|
|
|
m_read_pieces.erase(i);
|
|
|
|
}
|
2008-02-10 01:58:25 +01:00
|
|
|
}
|
|
|
|
|
2009-05-23 09:35:45 +02:00
|
|
|
// returns the number of blocks that were freed
|
|
|
|
int disk_io_thread::free_piece(cached_piece_entry& p, mutex_t::scoped_lock& l)
|
2008-02-22 05:11:04 +01:00
|
|
|
{
|
|
|
|
int piece_size = p.storage->info()->piece_size(p.piece);
|
|
|
|
int blocks_in_piece = (piece_size + m_block_size - 1) / m_block_size;
|
2009-05-23 09:35:45 +02:00
|
|
|
int ret = 0;
|
2008-02-22 05:11:04 +01:00
|
|
|
|
|
|
|
for (int i = 0; i < blocks_in_piece; ++i)
|
|
|
|
{
|
2009-06-10 10:30:55 +02:00
|
|
|
if (p.blocks[i].buf == 0) continue;
|
|
|
|
free_buffer(p.blocks[i].buf);
|
2009-05-23 09:35:45 +02:00
|
|
|
++ret;
|
2009-06-10 10:30:55 +02:00
|
|
|
p.blocks[i].buf = 0;
|
2008-02-22 05:11:04 +01:00
|
|
|
--p.num_blocks;
|
|
|
|
--m_cache_stats.cache_size;
|
|
|
|
--m_cache_stats.read_cache_size;
|
|
|
|
}
|
2009-05-23 09:35:45 +02:00
|
|
|
return ret;
|
2008-02-22 05:11:04 +01:00
|
|
|
}
|
|
|
|
|
2009-05-23 09:35:45 +02:00
|
|
|
// returns the number of blocks that were freed
|
|
|
|
int disk_io_thread::clear_oldest_read_piece(
|
2009-05-24 17:32:14 +02:00
|
|
|
int num_blocks
|
|
|
|
, cache_t::iterator ignore
|
2008-02-25 00:14:10 +01:00
|
|
|
, mutex_t::scoped_lock& l)
|
2008-02-22 05:11:04 +01:00
|
|
|
{
|
|
|
|
INVARIANT_CHECK;
|
|
|
|
|
2008-02-25 00:14:10 +01:00
|
|
|
cache_t::iterator i = std::min_element(
|
2008-02-22 05:11:04 +01:00
|
|
|
m_read_pieces.begin(), m_read_pieces.end()
|
|
|
|
, bind(&cached_piece_entry::last_use, _1)
|
|
|
|
< bind(&cached_piece_entry::last_use, _2));
|
2008-02-25 00:14:10 +01:00
|
|
|
if (i != m_read_pieces.end() && i != ignore)
|
2008-02-22 05:11:04 +01:00
|
|
|
{
|
2008-02-25 00:14:10 +01:00
|
|
|
// don't replace an entry that is less than one second old
|
2009-05-23 09:35:45 +02:00
|
|
|
if (time_now() - i->last_use < seconds(1)) return 0;
|
2009-05-24 17:32:14 +02:00
|
|
|
int blocks = 0;
|
|
|
|
if (num_blocks >= i->num_blocks)
|
|
|
|
{
|
|
|
|
blocks = free_piece(*i, l);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
// delete blocks from the start and from the end
|
|
|
|
// until num_blocks have been freed
|
|
|
|
int end = (i->storage->info()->piece_size(i->piece) + m_block_size - 1) / m_block_size - 1;
|
|
|
|
int start = 0;
|
|
|
|
|
|
|
|
while (num_blocks)
|
|
|
|
{
|
2009-06-10 10:30:55 +02:00
|
|
|
while (i->blocks[start].buf == 0 && start <= end) ++start;
|
2009-05-24 17:32:14 +02:00
|
|
|
if (start > end) break;
|
2009-06-10 10:30:55 +02:00
|
|
|
free_buffer(i->blocks[start].buf);
|
|
|
|
i->blocks[start].buf = 0;
|
2009-05-24 17:32:14 +02:00
|
|
|
++blocks;
|
|
|
|
--i->num_blocks;
|
|
|
|
--m_cache_stats.cache_size;
|
|
|
|
--m_cache_stats.read_cache_size;
|
|
|
|
--num_blocks;
|
|
|
|
if (!num_blocks) break;
|
|
|
|
|
2009-06-10 10:30:55 +02:00
|
|
|
while (i->blocks[end].buf == 0 && start <= end) --end;
|
2009-05-24 17:32:14 +02:00
|
|
|
if (start > end) break;
|
2009-06-10 10:30:55 +02:00
|
|
|
free_buffer(i->blocks[end].buf);
|
|
|
|
i->blocks[end].buf = 0;
|
2009-05-24 17:32:14 +02:00
|
|
|
++blocks;
|
|
|
|
--i->num_blocks;
|
|
|
|
--m_cache_stats.cache_size;
|
|
|
|
--m_cache_stats.read_cache_size;
|
|
|
|
--num_blocks;
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
if (i->num_blocks == 0) m_read_pieces.erase(i);
|
2009-05-23 09:35:45 +02:00
|
|
|
return blocks;
|
2008-02-22 05:11:04 +01:00
|
|
|
}
|
2009-05-23 09:35:45 +02:00
|
|
|
return 0;
|
2008-02-22 05:11:04 +01:00
|
|
|
}
|
|
|
|
|
2009-05-23 09:35:45 +02:00
|
|
|
int contiguous_blocks(disk_io_thread::cached_piece_entry const& b)
|
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
int current = 0;
|
|
|
|
int blocks_in_piece = (b.storage->info()->piece_size(b.piece) + 16 * 1024 - 1) / (16 * 1024);
|
|
|
|
for (int i = 0; i < blocks_in_piece; ++i)
|
|
|
|
{
|
2009-06-10 10:30:55 +02:00
|
|
|
if (b.blocks[i].buf) ++current;
|
2009-05-23 09:35:45 +02:00
|
|
|
else
|
|
|
|
{
|
|
|
|
if (current > ret) ret = current;
|
|
|
|
current = 0;
|
|
|
|
}
|
|
|
|
}
|
2009-05-27 18:50:46 +02:00
|
|
|
if (current > ret) ret = current;
|
2009-05-23 09:35:45 +02:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
int disk_io_thread::flush_contiguous_blocks(disk_io_thread::cache_t::iterator e
|
2009-05-24 02:12:53 +02:00
|
|
|
, mutex_t::scoped_lock& l, int lower_limit)
|
2009-05-23 09:35:45 +02:00
|
|
|
{
|
|
|
|
// first find the largest range of contiguous blocks
|
|
|
|
int len = 0;
|
|
|
|
int current = 0;
|
|
|
|
int pos = 0;
|
|
|
|
int start = 0;
|
|
|
|
int blocks_in_piece = (e->storage->info()->piece_size(e->piece)
|
|
|
|
+ m_block_size - 1) / m_block_size;
|
|
|
|
for (int i = 0; i < blocks_in_piece; ++i)
|
|
|
|
{
|
2009-06-10 10:30:55 +02:00
|
|
|
if (e->blocks[i].buf) ++current;
|
2009-05-23 09:35:45 +02:00
|
|
|
else
|
|
|
|
{
|
|
|
|
if (current > len)
|
|
|
|
{
|
|
|
|
len = current;
|
|
|
|
pos = start;
|
|
|
|
}
|
|
|
|
current = 0;
|
|
|
|
start = i + 1;
|
|
|
|
}
|
|
|
|
}
|
2009-05-27 18:50:46 +02:00
|
|
|
if (current > len)
|
|
|
|
{
|
|
|
|
len = current;
|
|
|
|
pos = start;
|
|
|
|
}
|
2009-05-23 09:35:45 +02:00
|
|
|
|
2009-05-27 18:50:46 +02:00
|
|
|
if (len < lower_limit || len <= 0) return 0;
|
2009-05-24 02:12:53 +02:00
|
|
|
len = flush_range(e, pos, pos + len, l);
|
2009-05-23 09:35:45 +02:00
|
|
|
if (e->num_blocks == 0) m_pieces.erase(e);
|
|
|
|
return len;
|
|
|
|
}
|
|
|
|
|
|
|
|
// flushes 'blocks' blocks from the cache
|
2009-05-24 02:12:53 +02:00
|
|
|
int disk_io_thread::flush_cache_blocks(mutex_t::scoped_lock& l
|
|
|
|
, int blocks, cache_t::iterator ignore, int options)
|
2008-02-08 11:22:05 +01:00
|
|
|
{
|
2008-02-22 05:11:04 +01:00
|
|
|
// first look if there are any read cache entries that can
|
|
|
|
// be cleared
|
2009-05-23 09:35:45 +02:00
|
|
|
int ret = 0;
|
2009-05-24 02:12:53 +02:00
|
|
|
int tmp = 0;
|
2009-05-23 09:35:45 +02:00
|
|
|
do {
|
2009-05-24 17:32:14 +02:00
|
|
|
tmp = clear_oldest_read_piece(blocks, ignore, l);
|
2009-05-24 02:12:53 +02:00
|
|
|
blocks -= tmp;
|
|
|
|
ret += tmp;
|
|
|
|
} while (tmp > 0 && blocks > 0);
|
|
|
|
|
|
|
|
if (options & dont_flush_write_blocks) return ret;
|
2008-02-22 05:11:04 +01:00
|
|
|
|
2009-05-23 09:35:45 +02:00
|
|
|
if (m_settings.disk_cache_algorithm == session_settings::lru)
|
|
|
|
{
|
|
|
|
while (blocks > 0)
|
|
|
|
{
|
|
|
|
cache_t::iterator i = std::min_element(
|
|
|
|
m_pieces.begin(), m_pieces.end()
|
|
|
|
, bind(&cached_piece_entry::last_use, _1)
|
|
|
|
< bind(&cached_piece_entry::last_use, _2));
|
2009-05-24 02:12:53 +02:00
|
|
|
if (i == m_pieces.end()) return ret;
|
|
|
|
tmp = flush_and_remove(i, l);
|
|
|
|
blocks -= tmp;
|
|
|
|
ret += tmp;
|
2009-05-23 09:35:45 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
else if (m_settings.disk_cache_algorithm == session_settings::largest_contiguous)
|
|
|
|
{
|
|
|
|
while (blocks > 0)
|
|
|
|
{
|
|
|
|
cache_t::iterator i = std::max_element(
|
|
|
|
m_pieces.begin(), m_pieces.end()
|
|
|
|
, bind(&contiguous_blocks, _1)
|
|
|
|
< bind(&contiguous_blocks, _2));
|
2009-05-24 02:12:53 +02:00
|
|
|
if (i == m_pieces.end()) return ret;
|
|
|
|
tmp = flush_contiguous_blocks(i, l);
|
|
|
|
blocks -= tmp;
|
|
|
|
ret += tmp;
|
2009-05-23 09:35:45 +02:00
|
|
|
}
|
|
|
|
}
|
2009-05-24 02:12:53 +02:00
|
|
|
return ret;
|
2008-02-08 11:22:05 +01:00
|
|
|
}
|
|
|
|
|
2009-05-24 02:12:53 +02:00
|
|
|
int disk_io_thread::flush_and_remove(disk_io_thread::cache_t::iterator e
|
2008-02-08 11:22:05 +01:00
|
|
|
, mutex_t::scoped_lock& l)
|
|
|
|
{
|
2009-05-24 02:12:53 +02:00
|
|
|
int ret = flush_range(e, 0, INT_MAX, l);
|
2008-02-08 11:22:05 +01:00
|
|
|
m_pieces.erase(e);
|
2009-05-24 02:12:53 +02:00
|
|
|
return ret;
|
2008-02-08 11:22:05 +01:00
|
|
|
}
|
|
|
|
|
2009-05-24 02:12:53 +02:00
|
|
|
int disk_io_thread::flush_range(disk_io_thread::cache_t::iterator e
|
2009-05-23 09:35:45 +02:00
|
|
|
, int start, int end, mutex_t::scoped_lock& l)
|
2008-02-08 11:22:05 +01:00
|
|
|
{
|
2008-02-22 05:11:04 +01:00
|
|
|
INVARIANT_CHECK;
|
2009-05-27 18:50:46 +02:00
|
|
|
|
|
|
|
TORRENT_ASSERT(start < end);
|
|
|
|
|
2009-01-03 09:11:31 +01:00
|
|
|
// TODO: copy *e and unlink it before unlocking
|
2008-02-08 11:22:05 +01:00
|
|
|
cached_piece_entry& p = *e;
|
|
|
|
int piece_size = p.storage->info()->piece_size(p.piece);
|
2008-02-10 01:58:25 +01:00
|
|
|
#ifdef TORRENT_DISK_STATS
|
|
|
|
m_log << log_time() << " flushing " << piece_size << std::endl;
|
|
|
|
#endif
|
2008-02-08 11:22:05 +01:00
|
|
|
TORRENT_ASSERT(piece_size > 0);
|
|
|
|
|
2008-02-22 05:11:04 +01:00
|
|
|
int blocks_in_piece = (piece_size + m_block_size - 1) / m_block_size;
|
2008-02-08 11:22:05 +01:00
|
|
|
int buffer_size = 0;
|
|
|
|
int offset = 0;
|
2009-01-03 09:11:31 +01:00
|
|
|
|
|
|
|
boost::scoped_array<char> buf;
|
2009-01-11 03:02:34 +01:00
|
|
|
file::iovec_t* iov = 0;
|
2009-01-03 09:11:31 +01:00
|
|
|
int iov_counter = 0;
|
2009-01-21 08:31:49 +01:00
|
|
|
if (m_settings.coalesce_writes) buf.reset(new (std::nothrow) char[piece_size]);
|
2009-01-11 03:02:34 +01:00
|
|
|
else iov = TORRENT_ALLOCA(file::iovec_t, blocks_in_piece);
|
2009-01-03 09:11:31 +01:00
|
|
|
|
2009-05-23 09:35:45 +02:00
|
|
|
end = (std::min)(end, blocks_in_piece);
|
|
|
|
for (int i = start; i <= end; ++i)
|
2008-02-08 11:22:05 +01:00
|
|
|
{
|
2009-06-10 10:30:55 +02:00
|
|
|
if (i == end || p.blocks[i].buf == 0)
|
2008-02-08 11:22:05 +01:00
|
|
|
{
|
|
|
|
if (buffer_size == 0) continue;
|
|
|
|
|
2008-02-22 05:11:04 +01:00
|
|
|
TORRENT_ASSERT(buffer_size <= i * m_block_size);
|
2008-02-08 11:22:05 +01:00
|
|
|
l.unlock();
|
2009-01-03 09:11:31 +01:00
|
|
|
if (iov)
|
|
|
|
{
|
2009-01-11 03:02:34 +01:00
|
|
|
p.storage->write_impl(iov, p.piece, (std::min)(
|
2009-01-03 09:11:31 +01:00
|
|
|
i * m_block_size, piece_size) - buffer_size, iov_counter);
|
|
|
|
iov_counter = 0;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
TORRENT_ASSERT(buf);
|
|
|
|
file::iovec_t b = { buf.get(), buffer_size };
|
|
|
|
p.storage->write_impl(&b, p.piece, (std::min)(
|
|
|
|
i * m_block_size, piece_size) - buffer_size, 1);
|
|
|
|
}
|
2008-02-08 11:22:05 +01:00
|
|
|
l.lock();
|
2008-02-22 05:11:04 +01:00
|
|
|
++m_cache_stats.writes;
|
2008-02-08 11:22:05 +01:00
|
|
|
// std::cerr << " flushing p: " << p.piece << " bytes: " << buffer_size << std::endl;
|
|
|
|
buffer_size = 0;
|
|
|
|
offset = 0;
|
|
|
|
continue;
|
|
|
|
}
|
2008-02-22 05:11:04 +01:00
|
|
|
int block_size = (std::min)(piece_size - i * m_block_size, m_block_size);
|
2008-02-08 11:22:05 +01:00
|
|
|
TORRENT_ASSERT(offset + block_size <= piece_size);
|
|
|
|
TORRENT_ASSERT(offset + block_size > 0);
|
2008-02-22 05:11:04 +01:00
|
|
|
if (!buf)
|
|
|
|
{
|
2009-06-10 10:30:55 +02:00
|
|
|
iov[iov_counter].iov_base = p.blocks[i].buf;
|
2009-01-03 09:11:31 +01:00
|
|
|
iov[iov_counter].iov_len = block_size;
|
|
|
|
++iov_counter;
|
2008-02-22 05:11:04 +01:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2009-06-10 10:30:55 +02:00
|
|
|
std::memcpy(buf.get() + offset, p.blocks[i].buf, block_size);
|
2008-02-22 05:11:04 +01:00
|
|
|
offset += m_block_size;
|
|
|
|
}
|
2009-01-03 09:11:31 +01:00
|
|
|
buffer_size += block_size;
|
2008-02-22 05:11:04 +01:00
|
|
|
TORRENT_ASSERT(p.num_blocks > 0);
|
|
|
|
--p.num_blocks;
|
|
|
|
++m_cache_stats.blocks_written;
|
|
|
|
--m_cache_stats.cache_size;
|
2008-02-08 11:22:05 +01:00
|
|
|
}
|
2009-01-03 09:11:31 +01:00
|
|
|
|
2009-05-24 02:12:53 +02:00
|
|
|
int ret = 0;
|
2009-06-10 10:30:55 +02:00
|
|
|
disk_io_job j;
|
|
|
|
j.storage = p.storage;
|
|
|
|
j.action = disk_io_job::write;
|
|
|
|
j.buffer = 0;
|
|
|
|
j.piece = p.piece;
|
|
|
|
test_error(j);
|
2009-05-23 09:35:45 +02:00
|
|
|
for (int i = start; i < end; ++i)
|
2009-01-03 09:11:31 +01:00
|
|
|
{
|
2009-06-10 10:30:55 +02:00
|
|
|
if (p.blocks[i].buf == 0) continue;
|
|
|
|
j.buffer_size = (std::min)(piece_size - i * m_block_size, m_block_size);
|
|
|
|
int result = j.error ? -1 : j.buffer_size;
|
|
|
|
j.offset = i * m_block_size;
|
|
|
|
free_buffer(p.blocks[i].buf);
|
|
|
|
post_callback(p.blocks[i].callback, j, result);
|
|
|
|
p.blocks[i].callback.clear();
|
|
|
|
p.blocks[i].buf = 0;
|
2009-05-24 02:12:53 +02:00
|
|
|
++ret;
|
2009-01-03 09:11:31 +01:00
|
|
|
}
|
|
|
|
|
2008-02-08 11:22:05 +01:00
|
|
|
TORRENT_ASSERT(buffer_size == 0);
|
2008-02-22 05:11:04 +01:00
|
|
|
// std::cerr << " flushing p: " << p.piece << " cached_blocks: " << m_cache_stats.cache_size << std::endl;
|
2008-11-29 22:33:21 +01:00
|
|
|
#ifdef TORRENT_DEBUG
|
2009-05-23 09:35:45 +02:00
|
|
|
for (int i = start; i < end; ++i)
|
2009-06-10 10:30:55 +02:00
|
|
|
TORRENT_ASSERT(p.blocks[i].buf == 0);
|
2008-02-08 11:22:05 +01:00
|
|
|
#endif
|
2009-05-24 02:12:53 +02:00
|
|
|
return ret;
|
2008-02-08 11:22:05 +01:00
|
|
|
}
|
|
|
|
|
2009-05-23 20:39:55 +02:00
|
|
|
// returns -1 on failure
|
2009-06-10 10:30:55 +02:00
|
|
|
int disk_io_thread::cache_block(disk_io_job& j
|
|
|
|
, boost::function<void(int,disk_io_job const&)>& handler
|
|
|
|
, mutex_t::scoped_lock& l)
|
2008-02-08 11:22:05 +01:00
|
|
|
{
|
2008-02-22 05:11:04 +01:00
|
|
|
INVARIANT_CHECK;
|
|
|
|
TORRENT_ASSERT(find_cached_piece(m_pieces, j, l) == m_pieces.end());
|
2009-05-31 21:33:54 +02:00
|
|
|
TORRENT_ASSERT((j.offset & (m_block_size-1)) == 0);
|
2008-02-08 11:22:05 +01:00
|
|
|
cached_piece_entry p;
|
|
|
|
|
2009-06-10 10:30:55 +02:00
|
|
|
int piece_size = j.storage->info()->piece_size(j.piece);
|
|
|
|
int blocks_in_piece = (piece_size + m_block_size - 1) / m_block_size;
|
|
|
|
// there's no point in caching the piece if
|
|
|
|
// there's only one block in it
|
|
|
|
if (blocks_in_piece <= 1) return -1;
|
|
|
|
|
2009-05-19 09:00:05 +02:00
|
|
|
#ifdef TORRENT_DISK_STATS
|
|
|
|
rename_buffer(j.buffer, "write cache");
|
|
|
|
#endif
|
|
|
|
|
2008-02-08 11:22:05 +01:00
|
|
|
p.piece = j.piece;
|
|
|
|
p.storage = j.storage;
|
2008-02-22 05:11:04 +01:00
|
|
|
p.last_use = time_now();
|
2008-02-08 11:22:05 +01:00
|
|
|
p.num_blocks = 1;
|
2009-06-10 10:30:55 +02:00
|
|
|
p.blocks.reset(new (std::nothrow) cached_block_entry[blocks_in_piece]);
|
2009-05-23 20:39:55 +02:00
|
|
|
if (!p.blocks) return -1;
|
2008-02-22 05:11:04 +01:00
|
|
|
int block = j.offset / m_block_size;
|
|
|
|
// std::cerr << " adding cache entry for p: " << j.piece << " block: " << block << " cached_blocks: " << m_cache_stats.cache_size << std::endl;
|
2009-06-10 10:30:55 +02:00
|
|
|
p.blocks[block].buf = j.buffer;
|
|
|
|
p.blocks[block].callback.swap(handler);
|
2008-02-22 05:11:04 +01:00
|
|
|
++m_cache_stats.cache_size;
|
2008-02-08 11:22:05 +01:00
|
|
|
m_pieces.push_back(p);
|
2009-05-23 20:39:55 +02:00
|
|
|
return 0;
|
2008-02-08 11:22:05 +01:00
|
|
|
}
|
|
|
|
|
2008-02-22 05:11:04 +01:00
|
|
|
// fills a piece with data from disk, returns the total number of bytes
|
|
|
|
// read or -1 if there was an error
|
2009-02-03 08:46:24 +01:00
|
|
|
int disk_io_thread::read_into_piece(cached_piece_entry& p, int start_block
|
2009-05-24 18:49:01 +02:00
|
|
|
, int options, int num_blocks, mutex_t::scoped_lock& l)
|
2008-02-22 05:11:04 +01:00
|
|
|
{
|
|
|
|
int piece_size = p.storage->info()->piece_size(p.piece);
|
|
|
|
int blocks_in_piece = (piece_size + m_block_size - 1) / m_block_size;
|
|
|
|
|
|
|
|
int end_block = start_block;
|
2009-05-23 21:27:27 +02:00
|
|
|
int num_read = 0;
|
2008-02-22 05:11:04 +01:00
|
|
|
for (int i = start_block; i < blocks_in_piece
|
2009-05-01 10:00:58 +02:00
|
|
|
&& (in_use() < m_settings.cache_size
|
2009-05-02 08:52:57 +02:00
|
|
|
|| (options & ignore_cache_size)); ++i)
|
2008-02-22 05:11:04 +01:00
|
|
|
{
|
|
|
|
// this is a block that is already allocated
|
|
|
|
// stop allocating and don't read more than
|
|
|
|
// what we've allocated now
|
2009-06-10 10:30:55 +02:00
|
|
|
if (p.blocks[i].buf) break;
|
|
|
|
p.blocks[i].buf = allocate_buffer("read cache");
|
2008-02-22 05:11:04 +01:00
|
|
|
|
|
|
|
// the allocation failed, break
|
2009-06-10 10:30:55 +02:00
|
|
|
if (p.blocks[i].buf == 0) break;
|
2008-02-22 05:11:04 +01:00
|
|
|
++p.num_blocks;
|
|
|
|
++m_cache_stats.cache_size;
|
|
|
|
++m_cache_stats.read_cache_size;
|
|
|
|
++end_block;
|
2009-05-23 21:27:27 +02:00
|
|
|
++num_read;
|
2009-05-24 18:49:01 +02:00
|
|
|
if (num_read >= num_blocks) break;
|
2008-02-22 05:11:04 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
if (end_block == start_block) return -2;
|
|
|
|
|
2008-12-03 07:42:53 +01:00
|
|
|
// the buffer_size is the size of the buffer we need to read
|
|
|
|
// all these blocks.
|
|
|
|
const int buffer_size = (std::min)((end_block - start_block) * m_block_size
|
|
|
|
, piece_size - start_block * m_block_size);
|
2008-02-22 05:11:04 +01:00
|
|
|
TORRENT_ASSERT(buffer_size <= piece_size);
|
|
|
|
TORRENT_ASSERT(buffer_size + start_block * m_block_size <= piece_size);
|
|
|
|
boost::scoped_array<char> buf;
|
2009-05-12 19:56:12 +02:00
|
|
|
file::iovec_t* iov = 0;
|
2009-01-03 09:11:31 +01:00
|
|
|
int iov_counter = 0;
|
2009-01-21 08:31:49 +01:00
|
|
|
if (m_settings.coalesce_reads) buf.reset(new (std::nothrow) char[buffer_size]);
|
2009-05-23 20:39:55 +02:00
|
|
|
if (!buf) iov = TORRENT_ALLOCA(file::iovec_t, end_block - start_block);
|
2009-01-03 09:11:31 +01:00
|
|
|
|
2008-02-22 05:11:04 +01:00
|
|
|
int ret = 0;
|
|
|
|
if (buf)
|
|
|
|
{
|
|
|
|
l.unlock();
|
2009-01-03 09:11:31 +01:00
|
|
|
file::iovec_t b = { buf.get(), buffer_size };
|
2009-01-14 04:05:35 +01:00
|
|
|
ret = p.storage->read_impl(&b, p.piece, start_block * m_block_size, 1);
|
2008-02-22 05:11:04 +01:00
|
|
|
l.lock();
|
2009-06-01 00:38:49 +02:00
|
|
|
if (p.storage->error()) return -1;
|
2009-05-21 18:15:05 +02:00
|
|
|
if (ret != buffer_size)
|
|
|
|
{
|
|
|
|
// this means the file wasn't big enough for this read
|
|
|
|
p.storage->get_storage_impl()->set_error(""
|
|
|
|
, error_code(errors::file_too_short, libtorrent_category));
|
|
|
|
return -1;
|
|
|
|
}
|
2008-02-22 05:11:04 +01:00
|
|
|
++m_cache_stats.reads;
|
|
|
|
}
|
|
|
|
|
|
|
|
int piece_offset = start_block * m_block_size;
|
|
|
|
int offset = 0;
|
|
|
|
for (int i = start_block; i < end_block; ++i)
|
|
|
|
{
|
|
|
|
int block_size = (std::min)(piece_size - piece_offset, m_block_size);
|
2009-06-10 10:30:55 +02:00
|
|
|
if (p.blocks[i].buf == 0) break;
|
2008-02-22 05:11:04 +01:00
|
|
|
TORRENT_ASSERT(offset <= buffer_size);
|
|
|
|
TORRENT_ASSERT(piece_offset <= piece_size);
|
2008-12-03 07:42:53 +01:00
|
|
|
TORRENT_ASSERT(offset + block_size <= buffer_size);
|
2008-02-22 05:11:04 +01:00
|
|
|
if (buf)
|
|
|
|
{
|
2009-06-10 10:30:55 +02:00
|
|
|
std::memcpy(p.blocks[i].buf, buf.get() + offset, block_size);
|
2008-02-22 05:11:04 +01:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2009-06-10 10:30:55 +02:00
|
|
|
iov[iov_counter].iov_base = p.blocks[i].buf;
|
2009-01-03 09:11:31 +01:00
|
|
|
iov[iov_counter].iov_len = block_size;
|
|
|
|
++iov_counter;
|
2008-02-22 05:11:04 +01:00
|
|
|
}
|
|
|
|
offset += m_block_size;
|
|
|
|
piece_offset += m_block_size;
|
|
|
|
}
|
2009-01-03 09:11:31 +01:00
|
|
|
|
|
|
|
if (iov)
|
|
|
|
{
|
|
|
|
l.unlock();
|
2009-05-12 19:56:12 +02:00
|
|
|
ret = p.storage->read_impl(iov, p.piece, start_block * m_block_size, iov_counter);
|
2009-01-03 09:11:31 +01:00
|
|
|
l.lock();
|
2009-06-01 00:38:49 +02:00
|
|
|
if (p.storage->error()) return -1;
|
2009-05-21 18:15:05 +02:00
|
|
|
if (ret != buffer_size)
|
|
|
|
{
|
|
|
|
// this means the file wasn't big enough for this read
|
|
|
|
p.storage->get_storage_impl()->set_error(""
|
|
|
|
, error_code(errors::file_too_short, libtorrent_category));
|
|
|
|
return -1;
|
|
|
|
}
|
2009-01-03 09:11:31 +01:00
|
|
|
++m_cache_stats.reads;
|
|
|
|
}
|
|
|
|
|
2008-02-22 05:11:04 +01:00
|
|
|
TORRENT_ASSERT(ret <= buffer_size);
|
2009-01-14 04:05:35 +01:00
|
|
|
TORRENT_ASSERT(ret == buffer_size || p.storage->error());
|
2008-02-22 05:11:04 +01:00
|
|
|
return (ret != buffer_size) ? -1 : ret;
|
|
|
|
}
|
|
|
|
|
2009-02-03 08:46:24 +01:00
|
|
|
// returns -1 on read error, -2 on out of memory error or the number of bytes read
|
|
|
|
// this function ignores the cache size limit, it will read the entire
|
|
|
|
// piece regardless of the offset in j
|
2009-05-24 18:49:01 +02:00
|
|
|
// this is used for seed-mode, where we need to read the entire piece to calculate
|
|
|
|
// the hash
|
2009-02-03 08:46:24 +01:00
|
|
|
int disk_io_thread::cache_read_piece(disk_io_job const& j, mutex_t::scoped_lock& l)
|
|
|
|
{
|
|
|
|
INVARIANT_CHECK;
|
|
|
|
|
|
|
|
int piece_size = j.storage->info()->piece_size(j.piece);
|
|
|
|
int blocks_in_piece = (piece_size + m_block_size - 1) / m_block_size;
|
|
|
|
|
2009-05-24 02:12:53 +02:00
|
|
|
if (in_use() + blocks_in_piece > m_settings.cache_size)
|
|
|
|
flush_cache_blocks(l, in_use() + blocks_in_piece - m_settings.cache_size, m_read_pieces.end());
|
2009-02-03 08:46:24 +01:00
|
|
|
|
|
|
|
cached_piece_entry p;
|
|
|
|
p.piece = j.piece;
|
|
|
|
p.storage = j.storage;
|
|
|
|
p.last_use = time_now();
|
|
|
|
p.num_blocks = 0;
|
2009-06-10 10:30:55 +02:00
|
|
|
p.blocks.reset(new (std::nothrow) cached_block_entry[blocks_in_piece]);
|
2009-05-23 20:39:55 +02:00
|
|
|
if (!p.blocks) return -1;
|
2009-05-24 18:49:01 +02:00
|
|
|
int ret = read_into_piece(p, 0, ignore_cache_size, INT_MAX, l);
|
2009-02-03 08:46:24 +01:00
|
|
|
|
2009-05-28 03:19:48 +02:00
|
|
|
if (ret < 0)
|
2009-02-03 08:46:24 +01:00
|
|
|
free_piece(p, l);
|
|
|
|
else
|
|
|
|
m_read_pieces.push_back(p);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2008-02-22 05:11:04 +01:00
|
|
|
// returns -1 on read error, -2 if there isn't any space in the cache
|
|
|
|
// or the number of bytes read
|
|
|
|
int disk_io_thread::cache_read_block(disk_io_job const& j, mutex_t::scoped_lock& l)
|
|
|
|
{
|
|
|
|
INVARIANT_CHECK;
|
|
|
|
|
|
|
|
int piece_size = j.storage->info()->piece_size(j.piece);
|
|
|
|
int blocks_in_piece = (piece_size + m_block_size - 1) / m_block_size;
|
|
|
|
|
|
|
|
int start_block = j.offset / m_block_size;
|
|
|
|
|
2009-05-24 18:49:01 +02:00
|
|
|
int blocks_to_read = blocks_in_piece - start_block;
|
|
|
|
blocks_to_read = (std::min)(blocks_to_read, (std::max)((m_settings.cache_size
|
|
|
|
+ m_cache_stats.read_cache_size - in_use())/2, 3));
|
|
|
|
blocks_to_read = (std::min)(blocks_to_read, m_settings.read_cache_line_size);
|
2009-05-24 02:12:53 +02:00
|
|
|
|
|
|
|
if (in_use() + blocks_to_read > m_settings.cache_size)
|
|
|
|
if (flush_cache_blocks(l, in_use() + blocks_to_read - m_settings.cache_size
|
|
|
|
, m_read_pieces.end(), dont_flush_write_blocks) == 0)
|
|
|
|
return -2;
|
2008-02-22 05:11:04 +01:00
|
|
|
|
|
|
|
cached_piece_entry p;
|
|
|
|
p.piece = j.piece;
|
|
|
|
p.storage = j.storage;
|
|
|
|
p.last_use = time_now();
|
|
|
|
p.num_blocks = 0;
|
2009-06-10 10:30:55 +02:00
|
|
|
p.blocks.reset(new (std::nothrow) cached_block_entry[blocks_in_piece]);
|
2009-05-23 20:39:55 +02:00
|
|
|
if (!p.blocks) return -1;
|
2009-05-24 18:49:01 +02:00
|
|
|
int ret = read_into_piece(p, start_block, 0, blocks_to_read, l);
|
2008-02-22 05:11:04 +01:00
|
|
|
|
2009-05-28 03:19:48 +02:00
|
|
|
if (ret < 0)
|
2008-02-22 05:11:04 +01:00
|
|
|
free_piece(p, l);
|
|
|
|
else
|
|
|
|
m_read_pieces.push_back(p);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2008-11-29 22:33:21 +01:00
|
|
|
#ifdef TORRENT_DEBUG
|
2008-02-22 05:11:04 +01:00
|
|
|
void disk_io_thread::check_invariant() const
|
|
|
|
{
|
|
|
|
int cached_write_blocks = 0;
|
2008-02-25 00:14:10 +01:00
|
|
|
for (cache_t::const_iterator i = m_pieces.begin()
|
2008-02-22 05:11:04 +01:00
|
|
|
, end(m_pieces.end()); i != end; ++i)
|
|
|
|
{
|
|
|
|
cached_piece_entry const& p = *i;
|
|
|
|
TORRENT_ASSERT(p.blocks);
|
|
|
|
|
2008-06-09 06:46:34 +02:00
|
|
|
if (!p.storage) continue;
|
2008-02-22 05:11:04 +01:00
|
|
|
int piece_size = p.storage->info()->piece_size(p.piece);
|
|
|
|
int blocks_in_piece = (piece_size + m_block_size - 1) / m_block_size;
|
|
|
|
int blocks = 0;
|
|
|
|
for (int k = 0; k < blocks_in_piece; ++k)
|
|
|
|
{
|
2009-06-10 10:30:55 +02:00
|
|
|
if (p.blocks[k].buf)
|
2008-02-22 05:11:04 +01:00
|
|
|
{
|
2008-04-09 07:19:11 +02:00
|
|
|
#ifndef TORRENT_DISABLE_POOL_ALLOCATOR
|
2009-06-10 10:30:55 +02:00
|
|
|
TORRENT_ASSERT(is_disk_buffer(p.blocks[k].buf));
|
2008-04-09 07:19:11 +02:00
|
|
|
#endif
|
2008-02-22 05:11:04 +01:00
|
|
|
++blocks;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// TORRENT_ASSERT(blocks == p.num_blocks);
|
|
|
|
cached_write_blocks += blocks;
|
|
|
|
}
|
|
|
|
|
|
|
|
int cached_read_blocks = 0;
|
2008-02-25 00:14:10 +01:00
|
|
|
for (cache_t::const_iterator i = m_read_pieces.begin()
|
2008-02-22 05:11:04 +01:00
|
|
|
, end(m_read_pieces.end()); i != end; ++i)
|
|
|
|
{
|
|
|
|
cached_piece_entry const& p = *i;
|
|
|
|
TORRENT_ASSERT(p.blocks);
|
|
|
|
|
|
|
|
int piece_size = p.storage->info()->piece_size(p.piece);
|
|
|
|
int blocks_in_piece = (piece_size + m_block_size - 1) / m_block_size;
|
|
|
|
int blocks = 0;
|
|
|
|
for (int k = 0; k < blocks_in_piece; ++k)
|
|
|
|
{
|
2009-06-10 10:30:55 +02:00
|
|
|
if (p.blocks[k].buf)
|
2008-02-22 05:11:04 +01:00
|
|
|
{
|
2008-04-09 07:19:11 +02:00
|
|
|
#ifndef TORRENT_DISABLE_POOL_ALLOCATOR
|
2009-06-10 10:30:55 +02:00
|
|
|
TORRENT_ASSERT(is_disk_buffer(p.blocks[k].buf));
|
2008-04-09 07:19:11 +02:00
|
|
|
#endif
|
2008-02-22 05:11:04 +01:00
|
|
|
++blocks;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// TORRENT_ASSERT(blocks == p.num_blocks);
|
|
|
|
cached_read_blocks += blocks;
|
|
|
|
}
|
|
|
|
|
|
|
|
TORRENT_ASSERT(cached_read_blocks + cached_write_blocks == m_cache_stats.cache_size);
|
|
|
|
TORRENT_ASSERT(cached_read_blocks == m_cache_stats.read_cache_size);
|
|
|
|
|
2009-05-19 09:00:05 +02:00
|
|
|
#ifdef TORRENT_DISK_STATS
|
|
|
|
int read_allocs = m_categories.find(std::string("read cache"))->second;
|
|
|
|
int write_allocs = m_categories.find(std::string("write cache"))->second;
|
|
|
|
TORRENT_ASSERT(cached_read_blocks == read_allocs);
|
|
|
|
TORRENT_ASSERT(cached_write_blocks == write_allocs);
|
|
|
|
#endif
|
|
|
|
|
2008-02-22 05:11:04 +01:00
|
|
|
// when writing, there may be a one block difference, right before an old piece
|
|
|
|
// is flushed
|
2009-01-21 08:31:49 +01:00
|
|
|
TORRENT_ASSERT(m_cache_stats.cache_size <= m_settings.cache_size + 1);
|
2008-02-22 05:11:04 +01:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2009-02-03 08:46:24 +01:00
|
|
|
int disk_io_thread::read_piece_from_cache_and_hash(disk_io_job const& j, sha1_hash& h)
|
2008-02-22 05:11:04 +01:00
|
|
|
{
|
|
|
|
TORRENT_ASSERT(j.buffer);
|
|
|
|
|
2008-06-12 06:40:37 +02:00
|
|
|
mutex_t::scoped_lock l(m_piece_mutex);
|
2009-02-03 08:46:24 +01:00
|
|
|
|
2008-02-25 00:14:10 +01:00
|
|
|
cache_t::iterator p
|
2008-02-22 05:11:04 +01:00
|
|
|
= find_cached_piece(m_read_pieces, j, l);
|
|
|
|
|
|
|
|
bool hit = true;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
// if the piece cannot be found in the cache,
|
|
|
|
// read the whole piece starting at the block
|
|
|
|
// we got a request for.
|
|
|
|
if (p == m_read_pieces.end())
|
|
|
|
{
|
2009-02-03 08:46:24 +01:00
|
|
|
ret = cache_read_piece(j, l);
|
2008-02-22 05:11:04 +01:00
|
|
|
hit = false;
|
|
|
|
if (ret < 0) return ret;
|
2008-02-25 00:14:10 +01:00
|
|
|
p = m_read_pieces.end();
|
|
|
|
--p;
|
|
|
|
TORRENT_ASSERT(!m_read_pieces.empty());
|
2008-02-22 05:11:04 +01:00
|
|
|
TORRENT_ASSERT(p->piece == j.piece);
|
|
|
|
TORRENT_ASSERT(p->storage == j.storage);
|
|
|
|
}
|
|
|
|
|
2009-02-03 08:46:24 +01:00
|
|
|
hasher ctx;
|
|
|
|
|
|
|
|
int piece_size = j.storage->info()->piece_size(j.piece);
|
|
|
|
int blocks_in_piece = (piece_size + m_block_size - 1) / m_block_size;
|
|
|
|
|
|
|
|
for (int i = 0; i < blocks_in_piece; ++i)
|
|
|
|
{
|
2009-06-10 10:30:55 +02:00
|
|
|
TORRENT_ASSERT(p->blocks[i].buf);
|
|
|
|
ctx.update((char const*)p->blocks[i].buf, (std::min)(piece_size, m_block_size));
|
2009-02-03 08:46:24 +01:00
|
|
|
piece_size -= m_block_size;
|
|
|
|
}
|
|
|
|
h = ctx.final();
|
|
|
|
|
|
|
|
ret = copy_from_piece(p, hit, j, l);
|
|
|
|
TORRENT_ASSERT(ret > 0);
|
|
|
|
if (ret < 0) return ret;
|
|
|
|
|
|
|
|
// if read cache is disabled or we exceeded the
|
|
|
|
// limit, remove this piece from the cache
|
2009-05-01 10:00:58 +02:00
|
|
|
if (in_use() >= m_settings.cache_size
|
2009-02-03 08:46:24 +01:00
|
|
|
|| !m_settings.use_read_cache)
|
2008-02-22 05:11:04 +01:00
|
|
|
{
|
2009-02-03 08:46:24 +01:00
|
|
|
TORRENT_ASSERT(!m_read_pieces.empty());
|
|
|
|
TORRENT_ASSERT(p->piece == j.piece);
|
|
|
|
TORRENT_ASSERT(p->storage == j.storage);
|
|
|
|
if (p != m_read_pieces.end())
|
2008-02-22 05:11:04 +01:00
|
|
|
{
|
2009-02-03 08:46:24 +01:00
|
|
|
free_piece(*p, l);
|
|
|
|
m_read_pieces.erase(p);
|
2008-02-22 05:11:04 +01:00
|
|
|
}
|
2009-02-03 08:46:24 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
ret = j.buffer_size;
|
|
|
|
++m_cache_stats.blocks_read;
|
|
|
|
if (hit) ++m_cache_stats.blocks_read_hit;
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
int disk_io_thread::copy_from_piece(cache_t::iterator p, bool& hit
|
|
|
|
, disk_io_job const& j, mutex_t::scoped_lock& l)
|
|
|
|
{
|
|
|
|
TORRENT_ASSERT(j.buffer);
|
|
|
|
|
|
|
|
// copy from the cache and update the last use timestamp
|
|
|
|
int block = j.offset / m_block_size;
|
|
|
|
int block_offset = j.offset & (m_block_size-1);
|
|
|
|
int buffer_offset = 0;
|
|
|
|
int size = j.buffer_size;
|
2009-05-31 21:33:54 +02:00
|
|
|
int min_blocks_to_read = block_offset > 0 ? 2 : 1;
|
|
|
|
TORRENT_ASSERT(size <= m_block_size);
|
|
|
|
int start_block = block;
|
2009-06-10 10:30:55 +02:00
|
|
|
if (p->blocks[start_block].buf != 0 && min_blocks_to_read > 1)
|
2009-05-31 21:33:54 +02:00
|
|
|
++start_block;
|
|
|
|
// if block_offset > 0, we need to read two blocks, and then
|
|
|
|
// copy parts of both, because it's not aligned to the block
|
|
|
|
// boundaries
|
2009-06-10 10:30:55 +02:00
|
|
|
if (p->blocks[start_block].buf == 0)
|
2009-02-03 08:46:24 +01:00
|
|
|
{
|
|
|
|
int piece_size = j.storage->info()->piece_size(j.piece);
|
|
|
|
int blocks_in_piece = (piece_size + m_block_size - 1) / m_block_size;
|
2009-05-31 21:33:54 +02:00
|
|
|
int end_block = start_block;
|
2009-06-10 10:30:55 +02:00
|
|
|
while (end_block < blocks_in_piece && p->blocks[end_block].buf == 0) ++end_block;
|
2009-05-24 02:12:53 +02:00
|
|
|
|
|
|
|
int blocks_to_read = end_block - block;
|
2009-05-24 18:49:01 +02:00
|
|
|
blocks_to_read = (std::min)(blocks_to_read, (std::max)((m_settings.cache_size
|
|
|
|
+ m_cache_stats.read_cache_size - in_use())/2, 3));
|
|
|
|
blocks_to_read = (std::min)(blocks_to_read, m_settings.read_cache_line_size);
|
2009-05-31 21:33:54 +02:00
|
|
|
blocks_to_read = (std::max)(blocks_to_read, min_blocks_to_read);
|
2009-05-24 02:12:53 +02:00
|
|
|
if (in_use() + blocks_to_read > m_settings.cache_size)
|
|
|
|
if (flush_cache_blocks(l, in_use() + blocks_to_read - m_settings.cache_size
|
|
|
|
, p, dont_flush_write_blocks) == 0)
|
|
|
|
return -2;
|
|
|
|
|
2009-05-31 21:33:54 +02:00
|
|
|
int ret = read_into_piece(*p, block, 0, blocks_to_read, l);
|
2009-02-03 08:46:24 +01:00
|
|
|
hit = false;
|
2009-05-31 21:33:54 +02:00
|
|
|
if (ret < 0) return ret;
|
|
|
|
if (ret < size + block_offset) return -2;
|
2009-06-10 10:30:55 +02:00
|
|
|
TORRENT_ASSERT(p->blocks[block].buf);
|
2009-02-03 08:46:24 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
p->last_use = time_now();
|
|
|
|
while (size > 0)
|
|
|
|
{
|
2009-06-10 10:30:55 +02:00
|
|
|
TORRENT_ASSERT(p->blocks[block].buf);
|
2009-02-03 08:46:24 +01:00
|
|
|
int to_copy = (std::min)(m_block_size
|
2008-02-22 05:11:04 +01:00
|
|
|
- block_offset, size);
|
2009-02-03 08:46:24 +01:00
|
|
|
std::memcpy(j.buffer + buffer_offset
|
2009-06-10 10:30:55 +02:00
|
|
|
, p->blocks[block].buf + block_offset
|
2008-02-22 05:11:04 +01:00
|
|
|
, to_copy);
|
2009-02-03 08:46:24 +01:00
|
|
|
size -= to_copy;
|
|
|
|
block_offset = 0;
|
|
|
|
buffer_offset += to_copy;
|
|
|
|
++block;
|
2008-02-22 05:11:04 +01:00
|
|
|
}
|
2009-02-03 08:46:24 +01:00
|
|
|
return j.buffer_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
int disk_io_thread::try_read_from_cache(disk_io_job const& j)
|
|
|
|
{
|
|
|
|
TORRENT_ASSERT(j.buffer);
|
|
|
|
|
|
|
|
mutex_t::scoped_lock l(m_piece_mutex);
|
|
|
|
if (!m_settings.use_read_cache) return -2;
|
|
|
|
|
|
|
|
cache_t::iterator p
|
|
|
|
= find_cached_piece(m_read_pieces, j, l);
|
|
|
|
|
|
|
|
bool hit = true;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
// if the piece cannot be found in the cache,
|
|
|
|
// read the whole piece starting at the block
|
|
|
|
// we got a request for.
|
|
|
|
if (p == m_read_pieces.end())
|
|
|
|
{
|
|
|
|
ret = cache_read_block(j, l);
|
|
|
|
hit = false;
|
|
|
|
if (ret < 0) return ret;
|
|
|
|
p = m_read_pieces.end();
|
|
|
|
--p;
|
|
|
|
TORRENT_ASSERT(!m_read_pieces.empty());
|
|
|
|
TORRENT_ASSERT(p->piece == j.piece);
|
|
|
|
TORRENT_ASSERT(p->storage == j.storage);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (p == m_read_pieces.end()) return ret;
|
|
|
|
|
|
|
|
ret = copy_from_piece(p, hit, j, l);
|
|
|
|
if (ret < 0) return ret;
|
|
|
|
|
|
|
|
ret = j.buffer_size;
|
|
|
|
++m_cache_stats.blocks_read;
|
|
|
|
if (hit) ++m_cache_stats.blocks_read_hit;
|
2008-02-22 05:11:04 +01:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2009-07-21 06:32:27 +02:00
|
|
|
size_type disk_io_thread::queue_buffer_size() const
|
2009-06-10 10:30:55 +02:00
|
|
|
{
|
|
|
|
mutex_t::scoped_lock l(m_queue_mutex);
|
2009-07-21 06:32:27 +02:00
|
|
|
return m_queue_buffer_size;
|
2009-06-10 10:30:55 +02:00
|
|
|
}
|
|
|
|
|
2007-06-10 22:46:09 +02:00
|
|
|
void disk_io_thread::add_job(disk_io_job const& j
|
|
|
|
, boost::function<void(int, disk_io_job const&)> const& f)
|
|
|
|
{
|
2009-05-13 19:17:33 +02:00
|
|
|
TORRENT_ASSERT(!m_abort);
|
2007-10-05 02:30:00 +02:00
|
|
|
TORRENT_ASSERT(!j.callback);
|
2009-01-28 08:09:10 +01:00
|
|
|
TORRENT_ASSERT(j.storage
|
|
|
|
|| j.action == disk_io_job::abort_thread
|
|
|
|
|| j.action == disk_io_job::update_settings);
|
2008-02-22 05:11:04 +01:00
|
|
|
TORRENT_ASSERT(j.buffer_size <= m_block_size);
|
2008-06-12 06:40:37 +02:00
|
|
|
mutex_t::scoped_lock l(m_queue_mutex);
|
2008-02-08 11:22:05 +01:00
|
|
|
|
2008-02-26 21:08:33 +01:00
|
|
|
std::list<disk_io_job>::reverse_iterator i = m_jobs.rbegin();
|
2007-06-10 22:46:09 +02:00
|
|
|
if (j.action == disk_io_job::read)
|
|
|
|
{
|
|
|
|
// when we're reading, we may not skip
|
|
|
|
// ahead of any write operation that overlaps
|
|
|
|
// the region we're reading
|
2007-10-04 00:30:40 +02:00
|
|
|
for (; i != m_jobs.rend(); i++)
|
2007-06-10 22:46:09 +02:00
|
|
|
{
|
2007-10-04 00:30:40 +02:00
|
|
|
// if *i should come before j, stop
|
|
|
|
// and insert j before i
|
|
|
|
if (*i < j) break;
|
|
|
|
// if we come across a write operation that
|
|
|
|
// overlaps the region we're reading, we need
|
|
|
|
// to stop
|
2007-06-10 22:46:09 +02:00
|
|
|
if (i->action == disk_io_job::write
|
|
|
|
&& i->storage == j.storage
|
|
|
|
&& i->piece == j.piece
|
|
|
|
&& range_overlap(i->offset, i->buffer_size
|
|
|
|
, j.offset, j.buffer_size))
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else if (j.action == disk_io_job::write)
|
|
|
|
{
|
|
|
|
for (; i != m_jobs.rend(); ++i)
|
|
|
|
{
|
2007-10-04 00:30:40 +02:00
|
|
|
if (*i < j)
|
2007-06-10 22:46:09 +02:00
|
|
|
{
|
|
|
|
if (i != m_jobs.rbegin()
|
|
|
|
&& i.base()->storage.get() != j.storage.get())
|
|
|
|
i = m_jobs.rbegin();
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-10-04 00:30:40 +02:00
|
|
|
// if we are placed in front of all other jobs, put it on the back of
|
|
|
|
// the queue, to sweep the disk in the same direction, and to avoid
|
|
|
|
// starvation. The exception is if the priority is higher than the
|
|
|
|
// job at the front of the queue
|
|
|
|
if (i == m_jobs.rend() && (m_jobs.empty() || j.priority <= m_jobs.back().priority))
|
|
|
|
i = m_jobs.rbegin();
|
2007-06-10 22:46:09 +02:00
|
|
|
|
2008-02-26 21:08:33 +01:00
|
|
|
std::list<disk_io_job>::iterator k = m_jobs.insert(i.base(), j);
|
2007-06-10 22:46:09 +02:00
|
|
|
k->callback.swap(const_cast<boost::function<void(int, disk_io_job const&)>&>(f));
|
|
|
|
if (j.action == disk_io_job::write)
|
|
|
|
m_queue_buffer_size += j.buffer_size;
|
|
|
|
m_signal.notify_all();
|
|
|
|
}
|
|
|
|
|
2008-07-18 01:41:46 +02:00
|
|
|
bool disk_io_thread::test_error(disk_io_job& j)
|
|
|
|
{
|
2009-01-28 08:09:10 +01:00
|
|
|
TORRENT_ASSERT(j.storage);
|
2008-07-18 01:41:46 +02:00
|
|
|
error_code const& ec = j.storage->error();
|
|
|
|
if (ec)
|
|
|
|
{
|
2009-05-23 05:05:21 +02:00
|
|
|
j.buffer = 0;
|
2009-06-10 10:30:55 +02:00
|
|
|
j.str.clear();
|
2008-07-18 01:41:46 +02:00
|
|
|
j.error = ec;
|
|
|
|
j.error_file = j.storage->error_file();
|
2008-11-29 22:33:21 +01:00
|
|
|
#ifdef TORRENT_DEBUG
|
2009-06-10 10:30:55 +02:00
|
|
|
std::cout << "ERROR: '" << ec.message() << " in "
|
2009-06-01 00:41:53 +02:00
|
|
|
<< j.error_file << std::endl;
|
2008-07-18 01:41:46 +02:00
|
|
|
#endif
|
2009-06-10 10:30:55 +02:00
|
|
|
j.storage->clear_error();
|
2008-07-18 01:41:46 +02:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2009-03-31 10:05:46 +02:00
|
|
|
void disk_io_thread::post_callback(
|
|
|
|
boost::function<void(int, disk_io_job const&)> const& handler
|
|
|
|
, disk_io_job const& j, int ret)
|
|
|
|
{
|
|
|
|
if (!handler) return;
|
|
|
|
|
|
|
|
m_ios.post(bind(handler, ret, j));
|
|
|
|
}
|
|
|
|
|
2007-06-10 22:46:09 +02:00
|
|
|
void disk_io_thread::operator()()
|
|
|
|
{
|
|
|
|
for (;;)
|
|
|
|
{
|
2007-09-17 10:15:54 +02:00
|
|
|
#ifdef TORRENT_DISK_STATS
|
|
|
|
m_log << log_time() << " idle" << std::endl;
|
|
|
|
#endif
|
2008-06-12 06:40:37 +02:00
|
|
|
mutex_t::scoped_lock jl(m_queue_mutex);
|
2008-03-08 07:06:31 +01:00
|
|
|
|
2007-06-10 22:46:09 +02:00
|
|
|
while (m_jobs.empty() && !m_abort)
|
2009-03-14 10:24:58 +01:00
|
|
|
{
|
|
|
|
// if there hasn't been an event in one second
|
|
|
|
// see if we should flush the cache
|
2009-04-15 18:32:05 +02:00
|
|
|
// if (!m_signal.timed_wait(jl, boost::posix_time::seconds(1)))
|
|
|
|
// flush_expired_pieces();
|
|
|
|
m_signal.wait(jl);
|
2009-03-14 10:24:58 +01:00
|
|
|
}
|
|
|
|
|
2008-06-09 06:46:34 +02:00
|
|
|
if (m_abort && m_jobs.empty())
|
|
|
|
{
|
2008-06-12 06:40:37 +02:00
|
|
|
jl.unlock();
|
|
|
|
|
|
|
|
mutex_t::scoped_lock l(m_piece_mutex);
|
2008-06-09 06:46:34 +02:00
|
|
|
// flush all disk caches
|
|
|
|
for (cache_t::iterator i = m_pieces.begin()
|
|
|
|
, end(m_pieces.end()); i != end; ++i)
|
2009-05-23 09:35:45 +02:00
|
|
|
flush_range(i, 0, INT_MAX, l);
|
2008-06-09 06:46:34 +02:00
|
|
|
for (cache_t::iterator i = m_read_pieces.begin()
|
|
|
|
, end(m_read_pieces.end()); i != end; ++i)
|
|
|
|
free_piece(*i, l);
|
|
|
|
m_pieces.clear();
|
|
|
|
m_read_pieces.clear();
|
2009-05-13 19:17:33 +02:00
|
|
|
// release the io_service to allow the run() call to return
|
|
|
|
// we do this once we stop posting new callbacks to it.
|
|
|
|
m_work.reset();
|
2008-06-09 06:46:34 +02:00
|
|
|
return;
|
|
|
|
}
|
2007-06-10 22:46:09 +02:00
|
|
|
|
2008-04-13 00:08:07 +02:00
|
|
|
// if there's a buffer in this job, it will be freed
|
|
|
|
// when this holder is destructed, unless it has been
|
|
|
|
// released.
|
|
|
|
disk_buffer_holder holder(*this
|
|
|
|
, m_jobs.front().action != disk_io_job::check_fastresume
|
2009-01-28 08:09:10 +01:00
|
|
|
&& m_jobs.front().action != disk_io_job::update_settings
|
2008-04-13 00:08:07 +02:00
|
|
|
? m_jobs.front().buffer : 0);
|
|
|
|
|
2007-06-10 22:46:09 +02:00
|
|
|
boost::function<void(int, disk_io_job const&)> handler;
|
|
|
|
handler.swap(m_jobs.front().callback);
|
2008-03-08 07:06:31 +01:00
|
|
|
|
2007-06-10 22:46:09 +02:00
|
|
|
disk_io_job j = m_jobs.front();
|
|
|
|
m_jobs.pop_front();
|
2009-07-26 21:54:16 +02:00
|
|
|
if (j.action == disk_io_job::write)
|
|
|
|
{
|
|
|
|
TORRENT_ASSERT(m_queue_buffer_size >= j.buffer_size);
|
|
|
|
m_queue_buffer_size -= j.buffer_size;
|
|
|
|
}
|
2008-02-10 01:58:25 +01:00
|
|
|
|
2009-07-26 21:54:16 +02:00
|
|
|
bool post = false;
|
2009-06-10 10:30:55 +02:00
|
|
|
if (m_queue_buffer_size + j.buffer_size >= m_settings.max_queued_disk_bytes
|
|
|
|
&& m_queue_buffer_size < m_settings.max_queued_disk_bytes
|
2009-07-21 06:32:27 +02:00
|
|
|
&& m_queue_callback
|
|
|
|
&& m_settings.max_queued_disk_bytes > 0)
|
2009-06-10 10:30:55 +02:00
|
|
|
{
|
|
|
|
// we just dropped below the high watermark of number of bytes
|
|
|
|
// queued for writing to the disk. Notify the session so that it
|
|
|
|
// can trigger all the connections waiting for this event
|
2009-07-26 21:54:16 +02:00
|
|
|
post = true;
|
2009-06-10 10:30:55 +02:00
|
|
|
}
|
2009-07-26 21:54:16 +02:00
|
|
|
jl.unlock();
|
|
|
|
|
|
|
|
if (post) m_ios.post(m_queue_callback);
|
2009-06-10 10:30:55 +02:00
|
|
|
|
2008-06-12 06:40:37 +02:00
|
|
|
flush_expired_pieces();
|
2007-06-10 22:46:09 +02:00
|
|
|
|
|
|
|
int ret = 0;
|
|
|
|
|
2009-01-28 08:09:10 +01:00
|
|
|
TORRENT_ASSERT(j.storage
|
|
|
|
|| j.action == disk_io_job::abort_thread
|
|
|
|
|| j.action == disk_io_job::update_settings);
|
2007-09-17 10:15:54 +02:00
|
|
|
#ifdef TORRENT_DISK_STATS
|
2008-02-14 04:48:20 +01:00
|
|
|
ptime start = time_now();
|
|
|
|
#endif
|
|
|
|
#ifndef BOOST_NO_EXCEPTIONS
|
|
|
|
try {
|
2007-09-17 10:15:54 +02:00
|
|
|
#endif
|
2008-04-13 23:26:57 +02:00
|
|
|
|
2009-01-21 08:31:49 +01:00
|
|
|
if (j.storage && j.storage->get_storage_impl()->m_settings == 0)
|
|
|
|
j.storage->get_storage_impl()->m_settings = &m_settings;
|
|
|
|
|
2008-04-13 23:26:57 +02:00
|
|
|
switch (j.action)
|
2008-02-14 04:48:20 +01:00
|
|
|
{
|
2009-01-10 06:46:02 +01:00
|
|
|
case disk_io_job::update_settings:
|
|
|
|
{
|
|
|
|
#ifdef TORRENT_DISK_STATS
|
|
|
|
m_log << log_time() << " update_settings " << std::endl;
|
|
|
|
#endif
|
|
|
|
TORRENT_ASSERT(j.buffer);
|
|
|
|
session_settings const& s = *((session_settings*)j.buffer);
|
|
|
|
TORRENT_ASSERT(s.cache_size >= 0);
|
|
|
|
TORRENT_ASSERT(s.cache_expiry > 0);
|
|
|
|
|
2009-01-21 08:31:49 +01:00
|
|
|
m_settings = s;
|
2009-09-02 18:42:33 +02:00
|
|
|
break;
|
2009-01-10 06:46:02 +01:00
|
|
|
}
|
2008-11-17 02:19:46 +01:00
|
|
|
case disk_io_job::abort_torrent:
|
|
|
|
{
|
2008-12-30 09:20:25 +01:00
|
|
|
#ifdef TORRENT_DISK_STATS
|
|
|
|
m_log << log_time() << " abort_torrent " << std::endl;
|
|
|
|
#endif
|
2008-11-17 02:19:46 +01:00
|
|
|
mutex_t::scoped_lock jl(m_queue_mutex);
|
|
|
|
for (std::list<disk_io_job>::iterator i = m_jobs.begin();
|
|
|
|
i != m_jobs.end();)
|
|
|
|
{
|
|
|
|
if (i->storage != j.storage)
|
|
|
|
{
|
|
|
|
++i;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (i->action == disk_io_job::check_files)
|
|
|
|
{
|
2009-03-31 10:05:46 +02:00
|
|
|
post_callback(i->callback, *i, piece_manager::disk_check_aborted);
|
2008-11-17 02:19:46 +01:00
|
|
|
m_jobs.erase(i++);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
++i;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
2008-06-12 06:40:37 +02:00
|
|
|
case disk_io_job::abort_thread:
|
|
|
|
{
|
2008-12-30 09:20:25 +01:00
|
|
|
#ifdef TORRENT_DISK_STATS
|
|
|
|
m_log << log_time() << " abort_thread " << std::endl;
|
|
|
|
#endif
|
2008-06-12 06:40:37 +02:00
|
|
|
mutex_t::scoped_lock jl(m_queue_mutex);
|
|
|
|
|
|
|
|
for (std::list<disk_io_job>::iterator i = m_jobs.begin();
|
2009-05-22 08:32:39 +02:00
|
|
|
i != m_jobs.end();)
|
2008-06-12 06:40:37 +02:00
|
|
|
{
|
|
|
|
if (i->action == disk_io_job::read)
|
|
|
|
{
|
2009-03-31 10:05:46 +02:00
|
|
|
post_callback(i->callback, *i, -1);
|
2008-06-12 06:40:37 +02:00
|
|
|
m_jobs.erase(i++);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (i->action == disk_io_job::check_files)
|
|
|
|
{
|
2009-03-31 10:05:46 +02:00
|
|
|
post_callback(i->callback, *i, piece_manager::disk_check_aborted);
|
2008-06-12 06:40:37 +02:00
|
|
|
m_jobs.erase(i++);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
++i;
|
|
|
|
}
|
2009-05-13 19:17:33 +02:00
|
|
|
|
|
|
|
m_abort = true;
|
2008-06-12 06:40:37 +02:00
|
|
|
break;
|
|
|
|
}
|
2009-02-03 08:46:24 +01:00
|
|
|
case disk_io_job::read_and_hash:
|
|
|
|
{
|
|
|
|
#ifdef TORRENT_DISK_STATS
|
|
|
|
m_log << log_time() << " read_and_hash " << j.buffer_size << std::endl;
|
|
|
|
#endif
|
|
|
|
INVARIANT_CHECK;
|
|
|
|
TORRENT_ASSERT(j.buffer == 0);
|
|
|
|
j.buffer = allocate_buffer("send buffer");
|
|
|
|
TORRENT_ASSERT(j.buffer_size <= m_block_size);
|
|
|
|
if (j.buffer == 0)
|
|
|
|
{
|
|
|
|
ret = -1;
|
2009-06-19 18:42:33 +02:00
|
|
|
#if BOOST_VERSION == 103500
|
|
|
|
j.error = error_code(boost::system::posix_error::not_enough_memory
|
|
|
|
, get_posix_category());
|
|
|
|
#elif BOOST_VERSION > 103500
|
2009-06-01 00:41:53 +02:00
|
|
|
j.error = error_code(boost::system::errc::not_enough_memory
|
|
|
|
, get_posix_category());
|
2009-06-03 09:46:50 +02:00
|
|
|
#else
|
|
|
|
j.error = asio::error::no_memory;
|
|
|
|
#endif
|
2009-06-10 10:30:55 +02:00
|
|
|
j.str.clear();
|
2009-02-03 08:46:24 +01:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
disk_buffer_holder read_holder(*this, j.buffer);
|
|
|
|
|
|
|
|
// read the entire piece and verify the piece hash
|
|
|
|
// since we need to check the hash, this function
|
|
|
|
// will ignore the cache size limit (at least for
|
|
|
|
// reading and hashing, not for keeping it around)
|
|
|
|
sha1_hash h;
|
|
|
|
ret = read_piece_from_cache_and_hash(j, h);
|
2009-05-28 03:19:48 +02:00
|
|
|
|
|
|
|
// -2 means there's no space in the read cache
|
|
|
|
// or that the read cache is disabled
|
2009-02-03 08:46:24 +01:00
|
|
|
if (ret == -1)
|
|
|
|
{
|
|
|
|
test_error(j);
|
|
|
|
break;
|
|
|
|
}
|
2009-08-02 08:40:45 +02:00
|
|
|
if (!m_settings.disable_hash_checks)
|
|
|
|
ret = (j.storage->info()->hash_for_piece(j.piece) == h)?ret:-3;
|
2009-02-03 08:46:24 +01:00
|
|
|
if (ret == -3)
|
|
|
|
{
|
|
|
|
j.storage->mark_failed(j.piece);
|
|
|
|
j.error = error_code(errors::failed_hash_check, libtorrent_category);
|
2009-06-10 10:30:55 +02:00
|
|
|
j.str.clear();
|
2009-05-23 05:05:21 +02:00
|
|
|
j.buffer = 0;
|
|
|
|
break;
|
2009-02-03 08:46:24 +01:00
|
|
|
}
|
|
|
|
|
2009-05-23 05:05:21 +02:00
|
|
|
TORRENT_ASSERT(j.buffer == read_holder.get());
|
2009-02-03 08:46:24 +01:00
|
|
|
read_holder.release();
|
2009-06-01 00:38:49 +02:00
|
|
|
#if TORRENT_DISK_STATS
|
|
|
|
rename_buffer(j.buffer, "released send buffer");
|
|
|
|
#endif
|
2009-02-03 08:46:24 +01:00
|
|
|
break;
|
|
|
|
}
|
2008-04-13 23:26:57 +02:00
|
|
|
case disk_io_job::read:
|
2007-06-10 22:46:09 +02:00
|
|
|
{
|
2008-07-18 01:41:46 +02:00
|
|
|
if (test_error(j))
|
2008-02-14 04:48:20 +01:00
|
|
|
{
|
2008-04-13 23:26:57 +02:00
|
|
|
ret = -1;
|
2008-11-29 22:38:34 +01:00
|
|
|
break;
|
2008-04-13 23:26:57 +02:00
|
|
|
}
|
2007-09-17 10:15:54 +02:00
|
|
|
#ifdef TORRENT_DISK_STATS
|
2008-04-13 23:26:57 +02:00
|
|
|
m_log << log_time() << " read " << j.buffer_size << std::endl;
|
2007-09-17 10:15:54 +02:00
|
|
|
#endif
|
2008-04-13 23:26:57 +02:00
|
|
|
INVARIANT_CHECK;
|
|
|
|
TORRENT_ASSERT(j.buffer == 0);
|
2009-01-23 10:13:31 +01:00
|
|
|
j.buffer = allocate_buffer("send buffer");
|
2008-04-13 23:26:57 +02:00
|
|
|
TORRENT_ASSERT(j.buffer_size <= m_block_size);
|
|
|
|
if (j.buffer == 0)
|
|
|
|
{
|
|
|
|
ret = -1;
|
2009-06-19 18:42:33 +02:00
|
|
|
#if BOOST_VERSION == 103500
|
|
|
|
j.error = error_code(boost::system::posix_error::not_enough_memory
|
|
|
|
, get_posix_category());
|
|
|
|
#elif BOOST_VERSION > 103500
|
2009-06-01 00:41:53 +02:00
|
|
|
j.error = error_code(boost::system::errc::not_enough_memory
|
|
|
|
, get_posix_category());
|
2009-06-03 09:46:50 +02:00
|
|
|
#else
|
|
|
|
j.error = asio::error::no_memory;
|
|
|
|
#endif
|
2009-06-10 10:30:55 +02:00
|
|
|
j.str.clear();
|
2008-04-13 23:26:57 +02:00
|
|
|
break;
|
|
|
|
}
|
2008-02-22 05:11:04 +01:00
|
|
|
|
2008-04-13 23:26:57 +02:00
|
|
|
disk_buffer_holder read_holder(*this, j.buffer);
|
2009-05-23 05:05:21 +02:00
|
|
|
|
2008-06-12 06:40:37 +02:00
|
|
|
ret = try_read_from_cache(j);
|
2008-04-13 23:26:57 +02:00
|
|
|
|
|
|
|
// -2 means there's no space in the read cache
|
|
|
|
// or that the read cache is disabled
|
|
|
|
if (ret == -1)
|
|
|
|
{
|
|
|
|
j.buffer = 0;
|
2008-07-18 01:41:46 +02:00
|
|
|
test_error(j);
|
2008-04-13 23:26:57 +02:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
else if (ret == -2)
|
|
|
|
{
|
2009-01-03 09:11:31 +01:00
|
|
|
file::iovec_t b = { j.buffer, j.buffer_size };
|
|
|
|
ret = j.storage->read_impl(&b, j.piece, j.offset, 1);
|
2008-04-13 23:26:57 +02:00
|
|
|
if (ret < 0)
|
2008-02-14 04:48:20 +01:00
|
|
|
{
|
2008-07-18 01:41:46 +02:00
|
|
|
test_error(j);
|
2008-02-22 05:11:04 +01:00
|
|
|
break;
|
|
|
|
}
|
2009-05-23 05:05:21 +02:00
|
|
|
if (ret != j.buffer_size)
|
2009-05-21 18:15:05 +02:00
|
|
|
{
|
|
|
|
// this means the file wasn't big enough for this read
|
2009-05-23 05:05:21 +02:00
|
|
|
j.buffer = 0;
|
2009-05-21 18:15:05 +02:00
|
|
|
j.error = error_code(errors::file_too_short, libtorrent_category);
|
|
|
|
j.error_file.clear();
|
2009-06-10 10:30:55 +02:00
|
|
|
j.str.clear();
|
2009-05-21 18:15:05 +02:00
|
|
|
ret = -1;
|
|
|
|
break;
|
|
|
|
}
|
2008-04-13 23:26:57 +02:00
|
|
|
++m_cache_stats.blocks_read;
|
2008-02-14 04:48:20 +01:00
|
|
|
}
|
2009-05-23 05:05:21 +02:00
|
|
|
TORRENT_ASSERT(j.buffer == read_holder.get());
|
2008-04-13 23:26:57 +02:00
|
|
|
read_holder.release();
|
2009-06-01 00:38:49 +02:00
|
|
|
#if TORRENT_DISK_STATS
|
|
|
|
rename_buffer(j.buffer, "released send buffer");
|
|
|
|
#endif
|
2008-04-13 23:26:57 +02:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case disk_io_job::write:
|
|
|
|
{
|
2007-09-17 10:15:54 +02:00
|
|
|
#ifdef TORRENT_DISK_STATS
|
2008-04-13 23:26:57 +02:00
|
|
|
m_log << log_time() << " write " << j.buffer_size << std::endl;
|
2007-09-17 10:15:54 +02:00
|
|
|
#endif
|
2008-06-12 06:40:37 +02:00
|
|
|
mutex_t::scoped_lock l(m_piece_mutex);
|
2008-04-13 23:26:57 +02:00
|
|
|
INVARIANT_CHECK;
|
2009-05-23 09:35:45 +02:00
|
|
|
|
|
|
|
if (in_use() >= m_settings.cache_size)
|
2009-05-24 02:12:53 +02:00
|
|
|
flush_cache_blocks(l, in_use() - m_settings.cache_size + 1, m_read_pieces.end());
|
2009-05-23 09:35:45 +02:00
|
|
|
|
2008-04-13 23:26:57 +02:00
|
|
|
cache_t::iterator p
|
|
|
|
= find_cached_piece(m_pieces, j, l);
|
|
|
|
int block = j.offset / m_block_size;
|
|
|
|
TORRENT_ASSERT(j.buffer);
|
|
|
|
TORRENT_ASSERT(j.buffer_size <= m_block_size);
|
|
|
|
if (p != m_pieces.end())
|
|
|
|
{
|
2009-06-10 10:30:55 +02:00
|
|
|
TORRENT_ASSERT(p->blocks[block].buf == 0);
|
|
|
|
if (p->blocks[block].buf)
|
2008-02-08 11:22:05 +01:00
|
|
|
{
|
2009-06-10 10:30:55 +02:00
|
|
|
free_buffer(p->blocks[block].buf);
|
2008-04-13 23:26:57 +02:00
|
|
|
--p->num_blocks;
|
2008-02-08 11:22:05 +01:00
|
|
|
}
|
2009-06-10 10:30:55 +02:00
|
|
|
p->blocks[block].buf = j.buffer;
|
|
|
|
p->blocks[block].callback.swap(handler);
|
2009-05-19 09:00:05 +02:00
|
|
|
#ifdef TORRENT_DISK_STATS
|
|
|
|
rename_buffer(j.buffer, "write cache");
|
|
|
|
#endif
|
2008-04-13 23:26:57 +02:00
|
|
|
++m_cache_stats.cache_size;
|
|
|
|
++p->num_blocks;
|
|
|
|
p->last_use = time_now();
|
2009-05-24 02:12:53 +02:00
|
|
|
// we might just have created a contiguous range
|
|
|
|
// that meets the requirement to be flushed. try it
|
|
|
|
flush_contiguous_blocks(p, l, m_settings.write_cache_line_size);
|
2008-02-08 11:22:05 +01:00
|
|
|
}
|
2008-04-13 23:26:57 +02:00
|
|
|
else
|
2008-02-08 11:22:05 +01:00
|
|
|
{
|
2009-06-10 10:30:55 +02:00
|
|
|
if (cache_block(j, handler, l) < 0)
|
2009-05-23 20:39:55 +02:00
|
|
|
{
|
|
|
|
file::iovec_t iov = {j.buffer, j.buffer_size};
|
|
|
|
ret = j.storage->write_impl(&iov, j.piece, j.offset, 1);
|
|
|
|
if (ret < 0)
|
|
|
|
{
|
|
|
|
test_error(j);
|
|
|
|
break;
|
|
|
|
}
|
2009-06-15 00:48:07 +02:00
|
|
|
break;
|
2009-05-23 20:39:55 +02:00
|
|
|
}
|
2008-04-13 23:26:57 +02:00
|
|
|
}
|
|
|
|
// we've now inserted the buffer
|
|
|
|
// in the cache, we should not
|
|
|
|
// free it at the end
|
|
|
|
holder.release();
|
2009-05-23 09:35:45 +02:00
|
|
|
|
|
|
|
if (in_use() > m_settings.cache_size)
|
2009-05-24 02:12:53 +02:00
|
|
|
flush_cache_blocks(l, in_use() - m_settings.cache_size, m_read_pieces.end());
|
2009-05-23 09:35:45 +02:00
|
|
|
|
2008-04-13 23:26:57 +02:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case disk_io_job::hash:
|
|
|
|
{
|
2007-09-17 10:15:54 +02:00
|
|
|
#ifdef TORRENT_DISK_STATS
|
2008-04-13 23:26:57 +02:00
|
|
|
m_log << log_time() << " hash" << std::endl;
|
2007-09-17 10:15:54 +02:00
|
|
|
#endif
|
2008-06-12 06:40:37 +02:00
|
|
|
mutex_t::scoped_lock l(m_piece_mutex);
|
2008-04-13 23:26:57 +02:00
|
|
|
INVARIANT_CHECK;
|
2008-03-10 09:19:31 +01:00
|
|
|
|
2008-04-13 23:26:57 +02:00
|
|
|
cache_t::iterator i
|
|
|
|
= find_cached_piece(m_pieces, j, l);
|
|
|
|
if (i != m_pieces.end())
|
|
|
|
{
|
|
|
|
flush_and_remove(i, l);
|
2008-07-18 01:41:46 +02:00
|
|
|
if (test_error(j))
|
2008-02-14 04:48:20 +01:00
|
|
|
{
|
|
|
|
ret = -1;
|
2008-04-13 00:08:07 +02:00
|
|
|
j.storage->mark_failed(j.piece);
|
2008-02-14 04:48:20 +01:00
|
|
|
break;
|
|
|
|
}
|
2008-02-08 11:22:05 +01:00
|
|
|
}
|
2008-04-13 23:26:57 +02:00
|
|
|
l.unlock();
|
2009-08-02 08:40:45 +02:00
|
|
|
if (m_settings.disable_hash_checks)
|
|
|
|
{
|
|
|
|
ret = 0;
|
|
|
|
break;
|
|
|
|
}
|
2008-04-13 23:26:57 +02:00
|
|
|
sha1_hash h = j.storage->hash_for_piece_impl(j.piece);
|
2008-07-18 01:41:46 +02:00
|
|
|
if (test_error(j))
|
2008-02-14 04:48:20 +01:00
|
|
|
{
|
2008-04-13 23:26:57 +02:00
|
|
|
ret = -1;
|
|
|
|
j.storage->mark_failed(j.piece);
|
|
|
|
break;
|
|
|
|
}
|
2009-08-02 08:40:45 +02:00
|
|
|
|
2008-04-13 23:26:57 +02:00
|
|
|
ret = (j.storage->info()->hash_for_piece(j.piece) == h)?0:-2;
|
|
|
|
if (ret == -2) j.storage->mark_failed(j.piece);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case disk_io_job::move_storage:
|
|
|
|
{
|
2007-09-17 10:15:54 +02:00
|
|
|
#ifdef TORRENT_DISK_STATS
|
2008-04-13 23:26:57 +02:00
|
|
|
m_log << log_time() << " move" << std::endl;
|
2007-09-17 10:15:54 +02:00
|
|
|
#endif
|
2008-04-13 23:26:57 +02:00
|
|
|
TORRENT_ASSERT(j.buffer == 0);
|
2009-03-31 10:05:46 +02:00
|
|
|
ret = j.storage->move_storage_impl(j.str);
|
2008-04-13 23:26:57 +02:00
|
|
|
if (ret != 0)
|
|
|
|
{
|
2008-07-18 01:41:46 +02:00
|
|
|
test_error(j);
|
2007-06-10 22:46:09 +02:00
|
|
|
break;
|
2008-02-14 04:48:20 +01:00
|
|
|
}
|
2008-04-13 23:26:57 +02:00
|
|
|
j.str = j.storage->save_path().string();
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case disk_io_job::release_files:
|
|
|
|
{
|
2007-09-17 10:15:54 +02:00
|
|
|
#ifdef TORRENT_DISK_STATS
|
2008-04-13 23:26:57 +02:00
|
|
|
m_log << log_time() << " release" << std::endl;
|
2007-09-17 10:15:54 +02:00
|
|
|
#endif
|
2008-04-13 23:26:57 +02:00
|
|
|
TORRENT_ASSERT(j.buffer == 0);
|
2008-02-08 11:22:05 +01:00
|
|
|
|
2008-06-12 06:40:37 +02:00
|
|
|
mutex_t::scoped_lock l(m_piece_mutex);
|
2008-04-13 23:26:57 +02:00
|
|
|
INVARIANT_CHECK;
|
2008-03-10 09:19:31 +01:00
|
|
|
|
2008-04-13 23:26:57 +02:00
|
|
|
for (cache_t::iterator i = m_pieces.begin(); i != m_pieces.end();)
|
|
|
|
{
|
|
|
|
if (i->storage == j.storage)
|
2008-03-16 11:51:25 +01:00
|
|
|
{
|
2009-05-23 09:35:45 +02:00
|
|
|
flush_range(i, 0, INT_MAX, l);
|
2008-04-13 23:26:57 +02:00
|
|
|
i = m_pieces.erase(i);
|
2008-03-16 11:51:25 +01:00
|
|
|
}
|
2008-04-13 23:26:57 +02:00
|
|
|
else
|
2008-02-14 04:48:20 +01:00
|
|
|
{
|
2008-04-13 23:26:57 +02:00
|
|
|
++i;
|
2008-02-14 04:48:20 +01:00
|
|
|
}
|
2008-02-08 11:22:05 +01:00
|
|
|
}
|
2008-06-12 06:40:37 +02:00
|
|
|
l.unlock();
|
2009-01-21 08:31:49 +01:00
|
|
|
release_memory();
|
|
|
|
|
2008-04-13 23:26:57 +02:00
|
|
|
ret = j.storage->release_files_impl();
|
2008-07-18 01:41:46 +02:00
|
|
|
if (ret != 0) test_error(j);
|
2008-04-13 23:26:57 +02:00
|
|
|
break;
|
|
|
|
}
|
2008-07-18 17:31:22 +02:00
|
|
|
case disk_io_job::clear_read_cache:
|
|
|
|
{
|
|
|
|
#ifdef TORRENT_DISK_STATS
|
|
|
|
m_log << log_time() << " clear-cache" << std::endl;
|
|
|
|
#endif
|
|
|
|
TORRENT_ASSERT(j.buffer == 0);
|
|
|
|
|
|
|
|
mutex_t::scoped_lock l(m_piece_mutex);
|
|
|
|
INVARIANT_CHECK;
|
|
|
|
|
|
|
|
for (cache_t::iterator i = m_read_pieces.begin();
|
|
|
|
i != m_read_pieces.end();)
|
|
|
|
{
|
|
|
|
if (i->storage == j.storage)
|
|
|
|
{
|
|
|
|
free_piece(*i, l);
|
|
|
|
i = m_read_pieces.erase(i);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
++i;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
l.unlock();
|
2009-01-21 08:31:49 +01:00
|
|
|
release_memory();
|
2008-07-18 17:31:22 +02:00
|
|
|
ret = 0;
|
|
|
|
break;
|
|
|
|
}
|
2008-04-13 23:26:57 +02:00
|
|
|
case disk_io_job::delete_files:
|
|
|
|
{
|
2007-10-13 05:33:33 +02:00
|
|
|
#ifdef TORRENT_DISK_STATS
|
2008-04-13 23:26:57 +02:00
|
|
|
m_log << log_time() << " delete" << std::endl;
|
2007-10-13 05:33:33 +02:00
|
|
|
#endif
|
2008-04-13 23:26:57 +02:00
|
|
|
TORRENT_ASSERT(j.buffer == 0);
|
2008-02-08 11:22:05 +01:00
|
|
|
|
2008-06-12 06:40:37 +02:00
|
|
|
mutex_t::scoped_lock l(m_piece_mutex);
|
2008-04-13 23:26:57 +02:00
|
|
|
INVARIANT_CHECK;
|
2008-03-10 09:19:31 +01:00
|
|
|
|
2008-04-13 23:26:57 +02:00
|
|
|
cache_t::iterator i = std::remove_if(
|
|
|
|
m_pieces.begin(), m_pieces.end(), bind(&cached_piece_entry::storage, _1) == j.storage);
|
2008-02-08 11:22:05 +01:00
|
|
|
|
2008-04-13 23:26:57 +02:00
|
|
|
for (cache_t::iterator k = i; k != m_pieces.end(); ++k)
|
|
|
|
{
|
|
|
|
torrent_info const& ti = *k->storage->info();
|
|
|
|
int blocks_in_piece = (ti.piece_size(k->piece) + m_block_size - 1) / m_block_size;
|
|
|
|
for (int j = 0; j < blocks_in_piece; ++j)
|
2008-02-08 11:22:05 +01:00
|
|
|
{
|
2009-06-10 10:30:55 +02:00
|
|
|
if (k->blocks[j].buf == 0) continue;
|
|
|
|
free_buffer(k->blocks[j].buf);
|
|
|
|
k->blocks[j].buf = 0;
|
2008-09-17 04:29:05 +02:00
|
|
|
--m_cache_stats.cache_size;
|
2008-02-08 11:22:05 +01:00
|
|
|
}
|
2008-04-13 23:26:57 +02:00
|
|
|
}
|
|
|
|
m_pieces.erase(i, m_pieces.end());
|
2008-06-12 06:40:37 +02:00
|
|
|
l.unlock();
|
2009-01-21 08:31:49 +01:00
|
|
|
release_memory();
|
|
|
|
|
2008-04-13 23:26:57 +02:00
|
|
|
ret = j.storage->delete_files_impl();
|
2008-07-18 01:41:46 +02:00
|
|
|
if (ret != 0) test_error(j);
|
2008-04-13 23:26:57 +02:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case disk_io_job::check_fastresume:
|
|
|
|
{
|
2008-03-08 07:06:31 +01:00
|
|
|
#ifdef TORRENT_DISK_STATS
|
2008-12-30 09:20:25 +01:00
|
|
|
m_log << log_time() << " check_fastresume" << std::endl;
|
2008-03-08 07:06:31 +01:00
|
|
|
#endif
|
2008-07-01 01:14:31 +02:00
|
|
|
lazy_entry const* rd = (lazy_entry const*)j.buffer;
|
2008-04-13 23:26:57 +02:00
|
|
|
TORRENT_ASSERT(rd != 0);
|
2009-06-28 02:36:41 +02:00
|
|
|
ret = j.storage->check_fastresume(*rd, j.error);
|
2008-04-13 23:26:57 +02:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case disk_io_job::check_files:
|
|
|
|
{
|
2008-03-08 07:06:31 +01:00
|
|
|
#ifdef TORRENT_DISK_STATS
|
2008-12-30 09:20:25 +01:00
|
|
|
m_log << log_time() << " check_files" << std::endl;
|
2008-03-08 07:06:31 +01:00
|
|
|
#endif
|
2008-04-13 23:26:57 +02:00
|
|
|
int piece_size = j.storage->info()->piece_length();
|
|
|
|
for (int processed = 0; processed < 4 * 1024 * 1024; processed += piece_size)
|
|
|
|
{
|
2009-05-27 19:06:50 +02:00
|
|
|
ptime now = time_now_hires();
|
2009-05-30 06:02:03 +02:00
|
|
|
TORRENT_ASSERT(now >= m_last_file_check);
|
2009-06-03 09:22:43 +02:00
|
|
|
#if BOOST_VERSION > 103600
|
2009-05-22 08:32:39 +02:00
|
|
|
if (now - m_last_file_check < milliseconds(m_settings.file_checks_delay_per_block))
|
|
|
|
{
|
|
|
|
int sleep_time = m_settings.file_checks_delay_per_block
|
|
|
|
* (piece_size / (16 * 1024))
|
|
|
|
- total_milliseconds(now - m_last_file_check);
|
2009-05-27 19:06:50 +02:00
|
|
|
if (sleep_time < 0) sleep_time = 0;
|
2009-06-01 00:38:49 +02:00
|
|
|
TORRENT_ASSERT(sleep_time < 5 * 1000);
|
2009-05-22 08:32:39 +02:00
|
|
|
|
|
|
|
boost::thread::sleep(boost::get_system_time()
|
|
|
|
+ boost::posix_time::milliseconds(sleep_time));
|
|
|
|
}
|
2009-05-27 19:06:50 +02:00
|
|
|
m_last_file_check = time_now_hires();
|
2009-06-03 09:22:43 +02:00
|
|
|
#endif
|
2009-05-22 08:32:39 +02:00
|
|
|
|
|
|
|
if (m_waiting_to_shutdown) break;
|
|
|
|
|
2009-06-28 02:36:41 +02:00
|
|
|
ret = j.storage->check_files(j.piece, j.offset, j.error);
|
2008-03-08 07:06:31 +01:00
|
|
|
|
|
|
|
#ifndef BOOST_NO_EXCEPTIONS
|
2008-04-13 23:26:57 +02:00
|
|
|
try {
|
2008-03-08 07:06:31 +01:00
|
|
|
#endif
|
2008-04-13 23:26:57 +02:00
|
|
|
TORRENT_ASSERT(handler);
|
|
|
|
if (handler && ret == piece_manager::need_full_check)
|
2009-03-31 10:05:46 +02:00
|
|
|
post_callback(handler, j, ret);
|
2008-03-08 07:06:31 +01:00
|
|
|
#ifndef BOOST_NO_EXCEPTIONS
|
2008-04-13 23:26:57 +02:00
|
|
|
} catch (std::exception&) {}
|
2008-03-08 07:06:31 +01:00
|
|
|
#endif
|
2008-04-13 23:26:57 +02:00
|
|
|
if (ret != piece_manager::need_full_check) break;
|
2008-03-08 07:06:31 +01:00
|
|
|
}
|
2008-07-18 01:41:46 +02:00
|
|
|
if (test_error(j))
|
|
|
|
{
|
|
|
|
ret = piece_manager::fatal_disk_error;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
TORRENT_ASSERT(ret != -2 || !j.str.empty());
|
|
|
|
|
2008-04-13 23:26:57 +02:00
|
|
|
// if the check is not done, add it at the end of the job queue
|
|
|
|
if (ret == piece_manager::need_full_check)
|
2008-04-13 20:54:36 +02:00
|
|
|
{
|
2008-06-12 06:40:37 +02:00
|
|
|
add_job(j, handler);
|
2008-04-13 23:26:57 +02:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case disk_io_job::save_resume_data:
|
|
|
|
{
|
2008-04-13 20:54:36 +02:00
|
|
|
#ifdef TORRENT_DISK_STATS
|
2008-12-30 09:20:25 +01:00
|
|
|
m_log << log_time() << " save_resume_data" << std::endl;
|
2008-04-13 20:54:36 +02:00
|
|
|
#endif
|
2008-04-13 23:26:57 +02:00
|
|
|
j.resume_data.reset(new entry(entry::dictionary_t));
|
|
|
|
j.storage->write_resume_data(*j.resume_data);
|
|
|
|
ret = 0;
|
|
|
|
break;
|
2007-06-10 22:46:09 +02:00
|
|
|
}
|
2008-05-28 10:44:40 +02:00
|
|
|
case disk_io_job::rename_file:
|
|
|
|
{
|
|
|
|
#ifdef TORRENT_DISK_STATS
|
2008-12-30 09:20:25 +01:00
|
|
|
m_log << log_time() << " rename_file" << std::endl;
|
2008-05-28 10:44:40 +02:00
|
|
|
#endif
|
|
|
|
ret = j.storage->rename_file_impl(j.piece, j.str);
|
2009-05-07 08:41:41 +02:00
|
|
|
if (ret != 0)
|
|
|
|
{
|
|
|
|
test_error(j);
|
|
|
|
break;
|
|
|
|
}
|
2008-05-28 10:44:40 +02:00
|
|
|
}
|
2007-06-10 22:46:09 +02:00
|
|
|
}
|
2008-02-14 04:48:20 +01:00
|
|
|
#ifndef BOOST_NO_EXCEPTIONS
|
2009-01-03 09:11:31 +01:00
|
|
|
}
|
|
|
|
catch (std::exception& e)
|
2007-06-10 22:46:09 +02:00
|
|
|
{
|
2008-02-14 04:48:20 +01:00
|
|
|
ret = -1;
|
2008-01-15 00:51:04 +01:00
|
|
|
try
|
|
|
|
{
|
|
|
|
j.str = e.what();
|
|
|
|
}
|
|
|
|
catch (std::exception&) {}
|
2007-06-10 22:46:09 +02:00
|
|
|
}
|
2008-02-14 04:48:20 +01:00
|
|
|
#endif
|
2007-06-10 22:46:09 +02:00
|
|
|
|
|
|
|
// if (!handler) std::cerr << "DISK THREAD: no callback specified" << std::endl;
|
|
|
|
// else std::cerr << "DISK THREAD: invoking callback" << std::endl;
|
2008-02-14 04:48:20 +01:00
|
|
|
#ifndef BOOST_NO_EXCEPTIONS
|
|
|
|
try {
|
|
|
|
#endif
|
2008-07-30 10:05:04 +02:00
|
|
|
TORRENT_ASSERT(ret != -2 || !j.str.empty()
|
|
|
|
|| j.action == disk_io_job::hash);
|
2009-06-01 00:38:49 +02:00
|
|
|
#if TORRENT_DISK_STATS
|
|
|
|
if ((j.action == disk_io_job::read || j.action == disk_io_job::read_and_hash)
|
|
|
|
&& j.buffer != 0)
|
|
|
|
rename_buffer(j.buffer, "posted send buffer");
|
|
|
|
#endif
|
2009-03-31 10:05:46 +02:00
|
|
|
post_callback(handler, j, ret);
|
2008-02-14 04:48:20 +01:00
|
|
|
#ifndef BOOST_NO_EXCEPTIONS
|
2008-04-10 11:11:54 +02:00
|
|
|
} catch (std::exception&)
|
|
|
|
{
|
2008-04-13 00:08:07 +02:00
|
|
|
TORRENT_ASSERT(false);
|
2008-04-10 11:11:54 +02:00
|
|
|
}
|
2008-02-14 04:48:20 +01:00
|
|
|
#endif
|
2007-06-10 22:46:09 +02:00
|
|
|
}
|
2008-02-14 04:48:20 +01:00
|
|
|
TORRENT_ASSERT(false);
|
2007-06-10 22:46:09 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|