2014-07-06 21:18:00 +02:00
|
|
|
/*
|
|
|
|
|
|
|
|
Copyright (c) 2012, Arvid Norberg
|
|
|
|
All rights reserved.
|
|
|
|
|
|
|
|
Redistribution and use in source and binary forms, with or without
|
|
|
|
modification, are permitted provided that the following conditions
|
|
|
|
are met:
|
|
|
|
|
|
|
|
* Redistributions of source code must retain the above copyright
|
|
|
|
notice, this list of conditions and the following disclaimer.
|
|
|
|
* Redistributions in binary form must reproduce the above copyright
|
|
|
|
notice, this list of conditions and the following disclaimer in
|
|
|
|
the documentation and/or other materials provided with the distribution.
|
|
|
|
* Neither the name of the author nor the names of its
|
|
|
|
contributors may be used to endorse or promote products derived
|
|
|
|
from this software without specific prior written permission.
|
|
|
|
|
|
|
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
|
|
|
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
|
|
|
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
|
|
|
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
|
|
|
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
|
|
|
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
|
|
|
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
|
|
|
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
|
|
|
POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "test.hpp"
|
|
|
|
#include "libtorrent/block_cache.hpp"
|
|
|
|
#include "libtorrent/io_service.hpp"
|
|
|
|
#include "libtorrent/alert.hpp"
|
2014-09-03 05:17:47 +02:00
|
|
|
#include "libtorrent/alert_types.hpp"
|
2014-07-06 21:18:00 +02:00
|
|
|
#include "libtorrent/disk_io_thread.hpp"
|
|
|
|
#include "libtorrent/storage.hpp"
|
2014-09-03 05:17:47 +02:00
|
|
|
#include "libtorrent/session.hpp"
|
2017-04-29 06:27:55 +02:00
|
|
|
#include "libtorrent/aux_/path.hpp" // for bufs_size
|
2014-07-06 21:18:00 +02:00
|
|
|
|
2016-05-25 06:31:52 +02:00
|
|
|
#include <functional>
|
2016-09-01 03:42:18 +02:00
|
|
|
#include <memory>
|
2014-07-06 21:18:00 +02:00
|
|
|
|
2017-04-12 20:05:53 +02:00
|
|
|
using namespace lt;
|
2014-07-06 21:18:00 +02:00
|
|
|
|
2018-01-23 19:23:57 +01:00
|
|
|
namespace {
|
|
|
|
|
2014-07-06 21:18:00 +02:00
|
|
|
struct test_storage_impl : storage_interface
|
|
|
|
{
|
2017-04-07 00:11:24 +02:00
|
|
|
explicit test_storage_impl(file_storage const& fs) : storage_interface(fs) {}
|
2018-01-25 15:34:39 +01:00
|
|
|
void initialize(storage_error&) override {}
|
2014-07-06 21:18:00 +02:00
|
|
|
|
2017-01-11 06:42:10 +01:00
|
|
|
int readv(span<iovec_t const> bufs
|
2018-01-25 15:34:39 +01:00
|
|
|
, piece_index_t, int /*offset*/, open_mode_t, storage_error&) override
|
2014-07-06 21:18:00 +02:00
|
|
|
{
|
2016-10-27 02:40:56 +02:00
|
|
|
return bufs_size(bufs);
|
2014-07-06 21:18:00 +02:00
|
|
|
}
|
2017-01-11 06:42:10 +01:00
|
|
|
int writev(span<iovec_t const> bufs
|
2018-01-25 15:34:39 +01:00
|
|
|
, piece_index_t, int /*offset*/, open_mode_t, storage_error&) override
|
2014-07-06 21:18:00 +02:00
|
|
|
{
|
2016-10-27 02:40:56 +02:00
|
|
|
return bufs_size(bufs);
|
2014-07-06 21:18:00 +02:00
|
|
|
}
|
|
|
|
|
2018-01-25 15:34:39 +01:00
|
|
|
bool has_any_file(storage_error&) override { return false; }
|
|
|
|
void set_file_priority(aux::vector<download_priority_t, file_index_t> const&
|
|
|
|
, storage_error&) override {}
|
|
|
|
status_t move_storage(std::string const&, move_flags_t
|
|
|
|
, storage_error&) override { return status_t::no_error; }
|
|
|
|
bool verify_resume_data(add_torrent_params const&
|
|
|
|
, aux::vector<std::string, file_index_t> const&
|
|
|
|
, storage_error&) override { return true; }
|
|
|
|
void release_files(storage_error&) override {}
|
|
|
|
void rename_file(file_index_t, std::string const&
|
|
|
|
, storage_error&) override {}
|
|
|
|
void delete_files(remove_flags_t, storage_error&) override {}
|
2014-07-06 21:18:00 +02:00
|
|
|
};
|
|
|
|
|
2017-04-16 22:37:39 +02:00
|
|
|
struct allocator : buffer_allocator_interface
|
|
|
|
{
|
|
|
|
allocator(block_cache& bc, storage_interface* st)
|
|
|
|
: m_cache(bc), m_storage(st) {}
|
|
|
|
|
|
|
|
void free_disk_buffer(char* b) override
|
|
|
|
{ m_cache.free_buffer(b); }
|
|
|
|
|
|
|
|
void reclaim_blocks(span<aux::block_cache_reference> refs) override
|
|
|
|
{
|
|
|
|
for (auto ref : refs)
|
|
|
|
m_cache.reclaim_block(m_storage, ref);
|
|
|
|
}
|
2018-01-25 15:34:39 +01:00
|
|
|
|
|
|
|
virtual ~allocator() = default;
|
2017-04-16 22:37:39 +02:00
|
|
|
private:
|
|
|
|
block_cache& m_cache;
|
|
|
|
storage_interface* m_storage;
|
|
|
|
};
|
|
|
|
|
2015-05-30 06:31:23 +02:00
|
|
|
static void nop() {}
|
2014-07-06 21:18:00 +02:00
|
|
|
|
2016-06-18 14:31:07 +02:00
|
|
|
#if TORRENT_USE_ASSERTS
|
2014-07-06 21:18:00 +02:00
|
|
|
#define INITIALIZE_JOB(j) j.in_use = true;
|
|
|
|
#else
|
2016-02-16 07:43:06 +01:00
|
|
|
#define INITIALIZE_JOB(j)
|
2014-07-06 21:18:00 +02:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#define TEST_SETUP \
|
|
|
|
io_service ios; \
|
2018-01-03 12:54:03 +01:00
|
|
|
block_cache bc(ios, std::bind(&nop)); \
|
2014-07-06 21:18:00 +02:00
|
|
|
aux::session_settings sett; \
|
|
|
|
file_storage fs; \
|
|
|
|
fs.add_file("a/test0", 0x4000); \
|
|
|
|
fs.add_file("a/test1", 0x4000); \
|
|
|
|
fs.add_file("a/test2", 0x4000); \
|
|
|
|
fs.add_file("a/test3", 0x4000); \
|
|
|
|
fs.add_file("a/test4", 0x4000); \
|
|
|
|
fs.add_file("a/test5", 0x4000); \
|
|
|
|
fs.add_file("a/test6", 0x4000); \
|
|
|
|
fs.add_file("a/test7", 0x4000); \
|
|
|
|
fs.set_piece_length(0x8000); \
|
|
|
|
fs.set_num_pieces(5); \
|
2016-11-13 03:45:30 +01:00
|
|
|
std::shared_ptr<storage_interface> pm \
|
2017-04-07 00:11:24 +02:00
|
|
|
= std::make_shared<test_storage_impl>(fs); \
|
2017-04-16 22:37:39 +02:00
|
|
|
allocator alloc(bc, pm.get()); \
|
2016-12-26 01:07:31 +01:00
|
|
|
bc.set_settings(sett); \
|
2016-11-13 03:45:30 +01:00
|
|
|
pm->m_settings = &sett; \
|
2014-07-06 21:18:00 +02:00
|
|
|
disk_io_job rj; \
|
|
|
|
disk_io_job wj; \
|
|
|
|
INITIALIZE_JOB(rj) \
|
|
|
|
INITIALIZE_JOB(wj) \
|
|
|
|
rj.storage = pm; \
|
|
|
|
wj.storage = pm; \
|
2016-09-27 02:05:04 +02:00
|
|
|
cached_piece_entry* pe = nullptr; \
|
2014-07-06 21:18:00 +02:00
|
|
|
int ret = 0; \
|
2017-01-11 06:42:10 +01:00
|
|
|
iovec_t iov; \
|
2016-10-27 02:40:56 +02:00
|
|
|
(void)iov; \
|
2014-12-09 10:33:34 +01:00
|
|
|
(void)ret; \
|
|
|
|
(void)pe
|
2014-07-06 21:18:00 +02:00
|
|
|
|
|
|
|
#define WRITE_BLOCK(p, b) \
|
|
|
|
wj.flags = disk_io_job::in_progress; \
|
2017-06-11 19:53:15 +02:00
|
|
|
wj.action = job_action_t::write; \
|
2016-07-10 13:34:45 +02:00
|
|
|
wj.d.io.offset = (b) * 0x4000; \
|
2014-07-06 21:18:00 +02:00
|
|
|
wj.d.io.buffer_size = 0x4000; \
|
2016-12-22 16:42:33 +01:00
|
|
|
wj.piece = piece_index_t(p); \
|
2017-10-13 01:34:24 +02:00
|
|
|
wj.argument = disk_buffer_holder(alloc, bc.allocate_buffer("write-test"), 0x4000); \
|
2014-07-06 21:18:00 +02:00
|
|
|
pe = bc.add_dirty_block(&wj)
|
|
|
|
|
|
|
|
#define READ_BLOCK(p, b, r) \
|
2017-06-11 19:53:15 +02:00
|
|
|
rj.action = job_action_t::read; \
|
2016-07-10 13:34:45 +02:00
|
|
|
rj.d.io.offset = (b) * 0x4000; \
|
2014-07-06 21:18:00 +02:00
|
|
|
rj.d.io.buffer_size = 0x4000; \
|
2016-12-22 16:42:33 +01:00
|
|
|
rj.piece = piece_index_t(p); \
|
2014-07-06 21:18:00 +02:00
|
|
|
rj.storage = pm; \
|
2017-10-13 01:34:24 +02:00
|
|
|
rj.argument = disk_buffer_holder(alloc, nullptr, 0); \
|
2017-04-16 22:37:39 +02:00
|
|
|
ret = bc.try_read(&rj, alloc)
|
2014-07-06 21:18:00 +02:00
|
|
|
|
|
|
|
#define FLUSH(flushing) \
|
2016-07-10 13:34:45 +02:00
|
|
|
for (int i = 0; i < int(sizeof(flushing)/sizeof((flushing)[0])); ++i) \
|
2014-07-06 21:18:00 +02:00
|
|
|
{ \
|
2016-07-10 13:34:45 +02:00
|
|
|
pe->blocks[(flushing)[i]].pending = true; \
|
2014-07-06 21:18:00 +02:00
|
|
|
bc.inc_block_refcount(pe, 0, block_cache::ref_flushing); \
|
|
|
|
} \
|
2016-07-10 13:34:45 +02:00
|
|
|
bc.blocks_flushed(pe, flushing, sizeof(flushing)/sizeof((flushing)[0]))
|
2014-07-06 21:18:00 +02:00
|
|
|
|
|
|
|
#define INSERT(p, b) \
|
2016-12-22 16:42:33 +01:00
|
|
|
wj.piece = piece_index_t(p); \
|
2014-07-06 21:18:00 +02:00
|
|
|
pe = bc.allocate_piece(&wj, cached_piece_entry::read_lru1); \
|
2016-10-27 02:40:56 +02:00
|
|
|
ret = bc.allocate_iovec(iov); \
|
2014-07-06 21:18:00 +02:00
|
|
|
TEST_EQUAL(ret, 0); \
|
2016-10-27 02:40:56 +02:00
|
|
|
bc.insert_blocks(pe, b, iov, &wj)
|
2014-07-06 21:18:00 +02:00
|
|
|
|
|
|
|
void test_write()
|
|
|
|
{
|
|
|
|
TEST_SETUP;
|
|
|
|
|
|
|
|
// write block (0,0)
|
|
|
|
WRITE_BLOCK(0, 0);
|
|
|
|
|
2014-09-03 05:17:47 +02:00
|
|
|
counters c;
|
|
|
|
bc.update_stats_counters(c);
|
|
|
|
TEST_EQUAL(c[counters::write_cache_blocks], 1);
|
|
|
|
TEST_EQUAL(c[counters::read_cache_blocks], 0);
|
|
|
|
TEST_EQUAL(c[counters::pinned_blocks], 0);
|
|
|
|
TEST_EQUAL(c[counters::arc_mru_size], 0);
|
|
|
|
TEST_EQUAL(c[counters::arc_mru_ghost_size], 0);
|
|
|
|
TEST_EQUAL(c[counters::arc_mfu_size], 0);
|
|
|
|
TEST_EQUAL(c[counters::arc_mfu_ghost_size], 0);
|
|
|
|
TEST_EQUAL(c[counters::arc_write_size], 1);
|
|
|
|
TEST_EQUAL(c[counters::arc_volatile_size], 0);
|
2014-07-06 21:18:00 +02:00
|
|
|
|
|
|
|
// try to read it back
|
|
|
|
READ_BLOCK(0, 0, 1);
|
|
|
|
TEST_EQUAL(bc.pinned_blocks(), 1);
|
2014-09-03 05:17:47 +02:00
|
|
|
bc.update_stats_counters(c);
|
|
|
|
TEST_EQUAL(c[counters::pinned_blocks], 1);
|
2014-07-06 21:18:00 +02:00
|
|
|
|
|
|
|
// it's supposed to be a cache hit
|
|
|
|
TEST_CHECK(ret >= 0);
|
|
|
|
|
|
|
|
// return the reference to the buffer we just read
|
2017-07-27 22:26:12 +02:00
|
|
|
rj.argument = remove_flags_t{};
|
2017-04-16 22:37:39 +02:00
|
|
|
|
2014-07-06 21:18:00 +02:00
|
|
|
TEST_EQUAL(bc.pinned_blocks(), 0);
|
2014-09-03 05:17:47 +02:00
|
|
|
bc.update_stats_counters(c);
|
|
|
|
TEST_EQUAL(c[counters::pinned_blocks], 0);
|
2014-07-06 21:18:00 +02:00
|
|
|
|
|
|
|
// try to read block (1, 0)
|
|
|
|
READ_BLOCK(1, 0, 1);
|
|
|
|
|
|
|
|
// that's supposed to be a cache miss
|
|
|
|
TEST_CHECK(ret < 0);
|
|
|
|
TEST_EQUAL(bc.pinned_blocks(), 0);
|
2014-09-03 05:17:47 +02:00
|
|
|
bc.update_stats_counters(c);
|
|
|
|
TEST_EQUAL(c[counters::pinned_blocks], 0);
|
2014-07-06 21:18:00 +02:00
|
|
|
|
2017-07-27 22:26:12 +02:00
|
|
|
rj.argument = remove_flags_t{};
|
2014-07-06 21:18:00 +02:00
|
|
|
|
2015-08-19 15:22:00 +02:00
|
|
|
tailqueue<disk_io_job> jobs;
|
2014-07-06 21:18:00 +02:00
|
|
|
bc.clear(jobs);
|
|
|
|
}
|
|
|
|
|
|
|
|
void test_flush()
|
|
|
|
{
|
|
|
|
TEST_SETUP;
|
|
|
|
|
|
|
|
// write block (0,0)
|
|
|
|
WRITE_BLOCK(0, 0);
|
|
|
|
|
|
|
|
// pretend to flush to disk
|
|
|
|
int flushing[1] = {0};
|
|
|
|
FLUSH(flushing);
|
|
|
|
|
2015-08-19 15:22:00 +02:00
|
|
|
tailqueue<disk_io_job> jobs;
|
2014-07-06 21:18:00 +02:00
|
|
|
bc.clear(jobs);
|
|
|
|
}
|
|
|
|
|
|
|
|
void test_insert()
|
|
|
|
{
|
|
|
|
TEST_SETUP;
|
|
|
|
|
|
|
|
INSERT(0, 0);
|
|
|
|
|
2014-09-03 05:17:47 +02:00
|
|
|
counters c;
|
|
|
|
bc.update_stats_counters(c);
|
|
|
|
TEST_EQUAL(c[counters::write_cache_blocks], 0);
|
|
|
|
TEST_EQUAL(c[counters::read_cache_blocks], 1);
|
|
|
|
TEST_EQUAL(c[counters::pinned_blocks], 0);
|
|
|
|
TEST_EQUAL(c[counters::arc_mru_size], 1);
|
|
|
|
TEST_EQUAL(c[counters::arc_mru_ghost_size], 0);
|
|
|
|
TEST_EQUAL(c[counters::arc_mfu_size], 0);
|
|
|
|
TEST_EQUAL(c[counters::arc_mfu_ghost_size], 0);
|
|
|
|
TEST_EQUAL(c[counters::arc_write_size], 0);
|
|
|
|
TEST_EQUAL(c[counters::arc_volatile_size], 0);
|
2014-07-06 21:18:00 +02:00
|
|
|
|
2015-08-19 15:22:00 +02:00
|
|
|
tailqueue<disk_io_job> jobs;
|
2014-07-06 21:18:00 +02:00
|
|
|
bc.clear(jobs);
|
|
|
|
}
|
|
|
|
|
|
|
|
void test_evict()
|
|
|
|
{
|
|
|
|
TEST_SETUP;
|
|
|
|
|
|
|
|
INSERT(0, 0);
|
|
|
|
|
2014-09-03 05:17:47 +02:00
|
|
|
counters c;
|
|
|
|
bc.update_stats_counters(c);
|
|
|
|
TEST_EQUAL(c[counters::write_cache_blocks], 0);
|
|
|
|
TEST_EQUAL(c[counters::read_cache_blocks], 1);
|
|
|
|
TEST_EQUAL(c[counters::pinned_blocks], 0);
|
|
|
|
TEST_EQUAL(c[counters::arc_mru_size], 1);
|
|
|
|
TEST_EQUAL(c[counters::arc_mru_ghost_size], 0);
|
|
|
|
TEST_EQUAL(c[counters::arc_mfu_size], 0);
|
|
|
|
TEST_EQUAL(c[counters::arc_mfu_ghost_size], 0);
|
|
|
|
TEST_EQUAL(c[counters::arc_write_size], 0);
|
|
|
|
TEST_EQUAL(c[counters::arc_volatile_size], 0);
|
2014-07-06 21:18:00 +02:00
|
|
|
|
2015-08-19 15:22:00 +02:00
|
|
|
tailqueue<disk_io_job> jobs;
|
2014-07-06 21:18:00 +02:00
|
|
|
// this should make it not be evicted
|
|
|
|
// just free the buffers
|
|
|
|
++pe->piece_refcount;
|
2017-04-15 00:51:11 +02:00
|
|
|
bc.evict_piece(pe, jobs, block_cache::allow_ghost);
|
2014-07-06 21:18:00 +02:00
|
|
|
|
2014-09-03 05:17:47 +02:00
|
|
|
bc.update_stats_counters(c);
|
|
|
|
TEST_EQUAL(c[counters::write_cache_blocks], 0);
|
|
|
|
TEST_EQUAL(c[counters::read_cache_blocks], 0);
|
|
|
|
TEST_EQUAL(c[counters::pinned_blocks], 0);
|
|
|
|
TEST_EQUAL(c[counters::arc_mru_size], 1);
|
|
|
|
TEST_EQUAL(c[counters::arc_mru_ghost_size], 0);
|
|
|
|
TEST_EQUAL(c[counters::arc_mfu_size], 0);
|
|
|
|
TEST_EQUAL(c[counters::arc_mfu_ghost_size], 0);
|
|
|
|
TEST_EQUAL(c[counters::arc_write_size], 0);
|
|
|
|
TEST_EQUAL(c[counters::arc_volatile_size], 0);
|
2014-07-06 21:18:00 +02:00
|
|
|
|
|
|
|
--pe->piece_refcount;
|
2017-04-15 00:51:11 +02:00
|
|
|
bc.evict_piece(pe, jobs, block_cache::allow_ghost);
|
2014-07-06 21:18:00 +02:00
|
|
|
|
2014-09-03 05:17:47 +02:00
|
|
|
bc.update_stats_counters(c);
|
|
|
|
TEST_EQUAL(c[counters::write_cache_blocks], 0);
|
|
|
|
TEST_EQUAL(c[counters::read_cache_blocks], 0);
|
|
|
|
TEST_EQUAL(c[counters::pinned_blocks], 0);
|
|
|
|
TEST_EQUAL(c[counters::arc_mru_size], 0);
|
|
|
|
TEST_EQUAL(c[counters::arc_mru_ghost_size], 1);
|
|
|
|
TEST_EQUAL(c[counters::arc_mfu_size], 0);
|
|
|
|
TEST_EQUAL(c[counters::arc_mfu_ghost_size], 0);
|
|
|
|
TEST_EQUAL(c[counters::arc_write_size], 0);
|
|
|
|
TEST_EQUAL(c[counters::arc_volatile_size], 0);
|
2014-07-06 21:18:00 +02:00
|
|
|
|
|
|
|
bc.clear(jobs);
|
|
|
|
}
|
|
|
|
|
|
|
|
// test to have two different requestors read a block and
|
|
|
|
// make sure it moves into the MFU list
|
|
|
|
void test_arc_promote()
|
|
|
|
{
|
|
|
|
TEST_SETUP;
|
|
|
|
|
|
|
|
INSERT(0, 0);
|
|
|
|
|
2014-09-03 05:17:47 +02:00
|
|
|
counters c;
|
|
|
|
bc.update_stats_counters(c);
|
|
|
|
TEST_EQUAL(c[counters::write_cache_blocks], 0);
|
|
|
|
TEST_EQUAL(c[counters::read_cache_blocks], 1);
|
|
|
|
TEST_EQUAL(c[counters::pinned_blocks], 0);
|
|
|
|
TEST_EQUAL(c[counters::arc_mru_size], 1);
|
|
|
|
TEST_EQUAL(c[counters::arc_mru_ghost_size], 0);
|
|
|
|
TEST_EQUAL(c[counters::arc_mfu_size], 0);
|
|
|
|
TEST_EQUAL(c[counters::arc_mfu_ghost_size], 0);
|
|
|
|
TEST_EQUAL(c[counters::arc_write_size], 0);
|
|
|
|
TEST_EQUAL(c[counters::arc_volatile_size], 0);
|
2014-07-06 21:18:00 +02:00
|
|
|
|
|
|
|
READ_BLOCK(0, 0, 1);
|
|
|
|
TEST_EQUAL(bc.pinned_blocks(), 1);
|
2014-09-03 05:17:47 +02:00
|
|
|
bc.update_stats_counters(c);
|
|
|
|
TEST_EQUAL(c[counters::pinned_blocks], 1);
|
|
|
|
|
2014-07-06 21:18:00 +02:00
|
|
|
// it's supposed to be a cache hit
|
|
|
|
TEST_CHECK(ret >= 0);
|
|
|
|
// return the reference to the buffer we just read
|
2017-07-27 22:26:12 +02:00
|
|
|
rj.argument = remove_flags_t{};
|
2014-07-06 21:18:00 +02:00
|
|
|
|
2014-09-03 05:17:47 +02:00
|
|
|
bc.update_stats_counters(c);
|
|
|
|
TEST_EQUAL(c[counters::write_cache_blocks], 0);
|
|
|
|
TEST_EQUAL(c[counters::read_cache_blocks], 1);
|
|
|
|
TEST_EQUAL(c[counters::pinned_blocks], 0);
|
|
|
|
TEST_EQUAL(c[counters::arc_mru_size], 1);
|
|
|
|
TEST_EQUAL(c[counters::arc_mru_ghost_size], 0);
|
|
|
|
TEST_EQUAL(c[counters::arc_mfu_size], 0);
|
|
|
|
TEST_EQUAL(c[counters::arc_mfu_ghost_size], 0);
|
|
|
|
TEST_EQUAL(c[counters::arc_write_size], 0);
|
|
|
|
TEST_EQUAL(c[counters::arc_volatile_size], 0);
|
2014-07-06 21:18:00 +02:00
|
|
|
|
|
|
|
READ_BLOCK(0, 0, 2);
|
|
|
|
TEST_EQUAL(bc.pinned_blocks(), 1);
|
2014-09-03 05:17:47 +02:00
|
|
|
bc.update_stats_counters(c);
|
|
|
|
TEST_EQUAL(c[counters::pinned_blocks], 1);
|
|
|
|
|
2014-07-06 21:18:00 +02:00
|
|
|
// it's supposed to be a cache hit
|
|
|
|
TEST_CHECK(ret >= 0);
|
|
|
|
// return the reference to the buffer we just read
|
2017-07-27 22:26:12 +02:00
|
|
|
rj.argument = remove_flags_t{};
|
2014-07-06 21:18:00 +02:00
|
|
|
|
2014-09-03 05:17:47 +02:00
|
|
|
bc.update_stats_counters(c);
|
|
|
|
TEST_EQUAL(c[counters::write_cache_blocks], 0);
|
|
|
|
TEST_EQUAL(c[counters::read_cache_blocks], 1);
|
|
|
|
TEST_EQUAL(c[counters::pinned_blocks], 0);
|
|
|
|
TEST_EQUAL(c[counters::arc_mru_size], 0);
|
|
|
|
TEST_EQUAL(c[counters::arc_mru_ghost_size], 0);
|
|
|
|
TEST_EQUAL(c[counters::arc_mfu_size], 1);
|
|
|
|
TEST_EQUAL(c[counters::arc_mfu_ghost_size], 0);
|
|
|
|
TEST_EQUAL(c[counters::arc_write_size], 0);
|
|
|
|
TEST_EQUAL(c[counters::arc_volatile_size], 0);
|
2014-07-06 21:18:00 +02:00
|
|
|
|
2015-08-19 15:22:00 +02:00
|
|
|
tailqueue<disk_io_job> jobs;
|
2014-07-06 21:18:00 +02:00
|
|
|
bc.clear(jobs);
|
|
|
|
}
|
|
|
|
|
|
|
|
void test_arc_unghost()
|
|
|
|
{
|
|
|
|
TEST_SETUP;
|
|
|
|
|
|
|
|
INSERT(0, 0);
|
|
|
|
|
2014-09-03 05:17:47 +02:00
|
|
|
counters c;
|
|
|
|
bc.update_stats_counters(c);
|
|
|
|
TEST_EQUAL(c[counters::write_cache_blocks], 0);
|
|
|
|
TEST_EQUAL(c[counters::read_cache_blocks], 1);
|
|
|
|
TEST_EQUAL(c[counters::pinned_blocks], 0);
|
|
|
|
TEST_EQUAL(c[counters::arc_mru_size], 1);
|
|
|
|
TEST_EQUAL(c[counters::arc_mru_ghost_size], 0);
|
|
|
|
TEST_EQUAL(c[counters::arc_mfu_size], 0);
|
|
|
|
TEST_EQUAL(c[counters::arc_mfu_ghost_size], 0);
|
|
|
|
TEST_EQUAL(c[counters::arc_write_size], 0);
|
|
|
|
TEST_EQUAL(c[counters::arc_volatile_size], 0);
|
2014-07-06 21:18:00 +02:00
|
|
|
|
2015-08-19 15:22:00 +02:00
|
|
|
tailqueue<disk_io_job> jobs;
|
2017-04-15 00:51:11 +02:00
|
|
|
bc.evict_piece(pe, jobs, block_cache::allow_ghost);
|
2014-07-06 21:18:00 +02:00
|
|
|
|
2014-09-03 05:17:47 +02:00
|
|
|
bc.update_stats_counters(c);
|
|
|
|
TEST_EQUAL(c[counters::write_cache_blocks], 0);
|
|
|
|
TEST_EQUAL(c[counters::read_cache_blocks], 0);
|
|
|
|
TEST_EQUAL(c[counters::pinned_blocks], 0);
|
|
|
|
TEST_EQUAL(c[counters::arc_mru_size], 0);
|
|
|
|
TEST_EQUAL(c[counters::arc_mru_ghost_size], 1);
|
|
|
|
TEST_EQUAL(c[counters::arc_mfu_size], 0);
|
|
|
|
TEST_EQUAL(c[counters::arc_mfu_ghost_size], 0);
|
|
|
|
TEST_EQUAL(c[counters::arc_write_size], 0);
|
|
|
|
TEST_EQUAL(c[counters::arc_volatile_size], 0);
|
2014-07-06 21:18:00 +02:00
|
|
|
|
|
|
|
// the block is now a ghost. If we cache-hit it,
|
|
|
|
// it should be promoted back to the main list
|
2017-06-08 00:46:49 +02:00
|
|
|
bc.cache_hit(pe, 0, false);
|
2014-07-06 21:18:00 +02:00
|
|
|
|
2014-09-03 05:17:47 +02:00
|
|
|
bc.update_stats_counters(c);
|
|
|
|
TEST_EQUAL(c[counters::write_cache_blocks], 0);
|
2014-07-06 21:18:00 +02:00
|
|
|
// we didn't actually read in any blocks, so the cache size
|
|
|
|
// is still 0
|
2014-09-03 05:17:47 +02:00
|
|
|
TEST_EQUAL(c[counters::read_cache_blocks], 0);
|
|
|
|
TEST_EQUAL(c[counters::pinned_blocks], 0);
|
|
|
|
TEST_EQUAL(c[counters::arc_mru_size], 1);
|
|
|
|
TEST_EQUAL(c[counters::arc_mru_ghost_size], 0);
|
|
|
|
TEST_EQUAL(c[counters::arc_mfu_size], 0);
|
|
|
|
TEST_EQUAL(c[counters::arc_mfu_ghost_size], 0);
|
|
|
|
TEST_EQUAL(c[counters::arc_write_size], 0);
|
|
|
|
TEST_EQUAL(c[counters::arc_volatile_size], 0);
|
2014-07-06 21:18:00 +02:00
|
|
|
|
|
|
|
bc.clear(jobs);
|
|
|
|
}
|
|
|
|
|
|
|
|
void test_iovec()
|
|
|
|
{
|
|
|
|
TEST_SETUP;
|
|
|
|
|
2016-10-27 02:40:56 +02:00
|
|
|
ret = bc.allocate_iovec(iov);
|
|
|
|
bc.free_iovec(iov);
|
2014-07-06 21:18:00 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void test_unaligned_read()
|
|
|
|
{
|
|
|
|
TEST_SETUP;
|
|
|
|
|
|
|
|
INSERT(0, 0);
|
|
|
|
INSERT(0, 1);
|
|
|
|
|
2017-06-11 19:53:15 +02:00
|
|
|
rj.action = job_action_t::read;
|
2014-07-06 21:18:00 +02:00
|
|
|
rj.d.io.offset = 0x2000;
|
|
|
|
rj.d.io.buffer_size = 0x4000;
|
2016-12-22 16:42:33 +01:00
|
|
|
rj.piece = piece_index_t(0);
|
2014-07-06 21:18:00 +02:00
|
|
|
rj.storage = pm;
|
2017-10-13 01:34:24 +02:00
|
|
|
rj.argument = disk_buffer_holder(alloc, nullptr, 0);
|
2017-04-16 22:37:39 +02:00
|
|
|
ret = bc.try_read(&rj, alloc);
|
2014-07-06 21:18:00 +02:00
|
|
|
|
|
|
|
// unaligned reads copies the data into a new buffer
|
|
|
|
// rather than
|
|
|
|
TEST_EQUAL(bc.pinned_blocks(), 0);
|
2014-09-03 05:17:47 +02:00
|
|
|
counters c;
|
|
|
|
bc.update_stats_counters(c);
|
|
|
|
TEST_EQUAL(c[counters::pinned_blocks], 0);
|
|
|
|
|
2014-07-06 21:18:00 +02:00
|
|
|
// it's supposed to be a cache hit
|
|
|
|
TEST_CHECK(ret >= 0);
|
|
|
|
// return the reference to the buffer we just read
|
2017-07-27 22:26:12 +02:00
|
|
|
rj.argument = remove_flags_t{};
|
2014-07-06 21:18:00 +02:00
|
|
|
|
2015-08-19 15:22:00 +02:00
|
|
|
tailqueue<disk_io_job> jobs;
|
2014-07-06 21:18:00 +02:00
|
|
|
bc.clear(jobs);
|
|
|
|
}
|
|
|
|
|
2018-01-23 19:23:57 +01:00
|
|
|
} // anonymous namespace
|
|
|
|
|
2015-05-30 06:31:23 +02:00
|
|
|
TORRENT_TEST(block_cache)
|
2014-07-06 21:18:00 +02:00
|
|
|
{
|
|
|
|
test_write();
|
|
|
|
test_flush();
|
|
|
|
test_insert();
|
|
|
|
test_evict();
|
|
|
|
test_arc_promote();
|
|
|
|
test_arc_unghost();
|
|
|
|
test_iovec();
|
|
|
|
test_unaligned_read();
|
|
|
|
|
|
|
|
// TODO: test try_evict_blocks
|
|
|
|
// TODO: test evicting volatile pieces, to see them be removed
|
|
|
|
// TODO: test evicting dirty pieces
|
|
|
|
// TODO: test free_piece
|
|
|
|
// TODO: test abort_dirty
|
|
|
|
// TODO: test unaligned reads
|
|
|
|
}
|
|
|
|
|
2017-04-15 00:51:11 +02:00
|
|
|
TORRENT_TEST(delete_piece)
|
|
|
|
{
|
|
|
|
TEST_SETUP;
|
|
|
|
|
|
|
|
TEST_CHECK(bc.num_pieces() == 0);
|
|
|
|
|
|
|
|
INSERT(0, 0);
|
|
|
|
|
|
|
|
TEST_CHECK(bc.num_pieces() == 1);
|
|
|
|
|
2017-06-11 19:53:15 +02:00
|
|
|
rj.action = job_action_t::read;
|
2017-04-15 00:51:11 +02:00
|
|
|
rj.d.io.offset = 0x2000;
|
|
|
|
rj.d.io.buffer_size = 0x4000;
|
2017-04-21 07:21:31 +02:00
|
|
|
rj.piece = piece_index_t(0);
|
2017-04-15 00:51:11 +02:00
|
|
|
rj.storage = pm;
|
2017-07-27 22:26:12 +02:00
|
|
|
rj.argument = remove_flags_t{};
|
2017-04-21 07:21:31 +02:00
|
|
|
ret = bc.try_read(&rj, alloc);
|
2017-04-15 00:51:11 +02:00
|
|
|
|
2017-04-21 07:21:31 +02:00
|
|
|
cached_piece_entry* pe_ = bc.find_piece(pm.get(), piece_index_t(0));
|
2017-04-15 00:51:11 +02:00
|
|
|
bc.mark_for_eviction(pe_, block_cache::disallow_ghost);
|
|
|
|
|
|
|
|
TEST_CHECK(bc.num_pieces() == 0);
|
|
|
|
}
|