storage optimization. introduced a nother allocation mode: sparse. Now there is allocate, compact and sparse. Unless compact is used, the memory structures used to keep track of pieces are no longer allocated. sparse storage mode is now the default

This commit is contained in:
Arvid Norberg 2007-10-08 20:01:36 +00:00
parent e7a40f4a08
commit 4d19f7ff0f
12 changed files with 479 additions and 400 deletions

View File

@ -442,7 +442,7 @@ void add_torrent(libtorrent::session& ses
catch (boost::filesystem::filesystem_error&) {}
torrent_handle h = ses.add_torrent(t, save_path, resume_data
, compact_mode, false);
, compact_mode ? storage_mode_compact : storage_mode_sparse, false);
handles.insert(std::make_pair(
monitored_dir?std::string(torrent):std::string(), h));
@ -481,7 +481,8 @@ void scan_dir(path const& dir_path
// the file has been added to the dir, start
// downloading it.
add_torrent(ses, handles, file, preferred_ratio, compact_mode
add_torrent(ses, handles, file, preferred_ratio, compact_mode ? storage_mode_compact
: storage_mode_sparse
, save_path, true);
valid.insert(file);
}
@ -805,7 +806,8 @@ int main(int ac, char* av[])
sha1_hash info_hash = boost::lexical_cast<sha1_hash>(what[1]);
torrent_handle h = ses.add_torrent(std::string(what[2]).c_str()
, info_hash, 0, save_path, entry(), compact_allocation_mode);
, info_hash, 0, save_path, entry(), compact_allocation_mode ? storage_mode_compact
: storage_mode_sparse);
handles.insert(std::make_pair(std::string(), h));
h.set_max_connections(60);
@ -816,7 +818,7 @@ int main(int ac, char* av[])
}
// if it's a torrent file, open it as usual
add_torrent(ses, handles, i->c_str(), preferred_ratio
, compact_allocation_mode, save_path, false);
, compact_allocation_mode ? storage_mode_compact : storage_mode_sparse, save_path, false);
}
catch (std::exception& e)
{

View File

@ -254,7 +254,7 @@ namespace libtorrent
boost::intrusive_ptr<torrent_info> ti
, fs::path const& save_path
, entry const& resume_data
, bool compact_mode
, storage_mode_t storage_mode
, storage_constructor_type sc
, bool paused
, void* userdata);
@ -265,7 +265,7 @@ namespace libtorrent
, char const* name
, fs::path const& save_path
, entry const& resume_data
, bool compact_mode
, storage_mode_t storage_mode
, storage_constructor_type sc
, bool paused
, void* userdata);

View File

@ -115,7 +115,7 @@ namespace libtorrent
: m_impl(impl) {}
boost::shared_ptr<aux::session_impl> m_impl;
};
class TORRENT_EXPORT session: public boost::noncopyable, aux::eh_initializer
{
public:
@ -140,7 +140,7 @@ namespace libtorrent
torrent_info const& ti
, fs::path const& save_path
, entry const& resume_data = entry()
, bool compact_mode = true
, storage_mode_t storage_mode = storage_mode_sparse
, bool paused = false
, storage_constructor_type sc = default_storage_constructor) TORRENT_DEPRECATED;
@ -148,7 +148,7 @@ namespace libtorrent
boost::intrusive_ptr<torrent_info> ti
, fs::path const& save_path
, entry const& resume_data = entry()
, bool compact_mode = true
, storage_mode_t storage_mode = storage_mode_sparse
, bool paused = false
, storage_constructor_type sc = default_storage_constructor
, void* userdata = 0);
@ -159,7 +159,7 @@ namespace libtorrent
, char const* name
, fs::path const& save_path
, entry const& resume_data = entry()
, bool compact_mode = true
, storage_mode_t storage_mode = storage_mode_sparse
, bool paused = false
, storage_constructor_type sc = default_storage_constructor
, void* userdata = 0);

View File

@ -71,6 +71,13 @@ namespace libtorrent
struct file_pool;
struct disk_io_job;
enum storage_mode_t
{
storage_mode_allocate = 0,
storage_mode_sparse,
storage_mode_compact
};
#if defined(_WIN32) && defined(UNICODE)
TORRENT_EXPORT std::wstring safe_convert(std::string const& s);
@ -180,7 +187,7 @@ namespace libtorrent
~piece_manager();
bool check_fastresume(aux::piece_checker_data& d
, std::vector<bool>& pieces, int& num_pieces, bool compact_mode);
, std::vector<bool>& pieces, int& num_pieces, storage_mode_t storage_mode);
std::pair<bool, float> check_files(std::vector<bool>& pieces
, int& num_pieces, boost::recursive_mutex& mutex);
@ -191,8 +198,8 @@ namespace libtorrent
bool verify_resume_data(entry& rd, std::string& error);
bool is_allocating() const
{ return m_state == state_allocating; }
{ return m_state == state_expand_pieces; }
void mark_failed(int index);
unsigned long piece_crc(
@ -200,8 +207,9 @@ namespace libtorrent
, int block_size
, piece_picker::block_info const* bi);
int slot_for_piece(int piece_index) const;
int slot_for(int piece) const;
int piece_for(int slot) const;
void async_read(
peer_request const& r
, boost::function<void(int, disk_io_job const&)> const& handler
@ -231,7 +239,7 @@ namespace libtorrent
void export_piece_map(std::vector<int>& pieces) const;
bool compact_allocation() const
{ return m_compact_mode; }
{ return m_storage_mode == storage_mode_compact; }
#ifndef NDEBUG
std::string name() const { return m_info->name(); }
@ -261,6 +269,7 @@ namespace libtorrent
, int offset
, int size);
void switch_to_full_mode();
sha1_hash hash_for_piece_impl(int piece);
void release_files_impl();
@ -276,16 +285,7 @@ namespace libtorrent
#endif
boost::scoped_ptr<storage_interface> m_storage;
// if this is true, pieces are always allocated at the
// lowest possible slot index. If it is false, pieces
// are always written to their final place immediately
bool m_compact_mode;
// if this is true, pieces that haven't been downloaded
// will be filled with zeroes. Not filling with zeroes
// will not work in some cases (where a seek cannot pass
// the end of the file).
bool m_fill_mode;
storage_mode_t m_storage_mode;
// a bitmask representing the pieces we have
std::vector<bool> m_have_piece;
@ -329,10 +329,21 @@ namespace libtorrent
state_create_files,
// checking the files
state_full_check,
// allocating files (in non-compact mode)
state_allocating
// move pieces to their final position
state_expand_pieces
} m_state;
int m_current_slot;
// used during check. If any piece is found
// that is not in its final position, this
// is set to true
bool m_out_of_place;
// used to move pieces while expanding
// the storage from compact allocation
// to full allocation
std::vector<char> m_scratch_buffer;
std::vector<char> m_scratch_buffer2;
// the piece that is in the scratch buffer
int m_scratch_piece;
// this is saved in case we need to instantiate a new
// storage (osed when remapping files)

View File

@ -101,7 +101,7 @@ namespace libtorrent
, boost::intrusive_ptr<torrent_info> tf
, fs::path const& save_path
, tcp::endpoint const& net_interface
, bool compact_mode
, storage_mode_t m_storage_mode
, int block_size
, storage_constructor_type sc
, bool paused);
@ -116,7 +116,7 @@ namespace libtorrent
, char const* name
, fs::path const& save_path
, tcp::endpoint const& net_interface
, bool compact_mode
, storage_mode_t m_storage_mode
, int block_size
, storage_constructor_type sc
, bool paused);
@ -751,7 +751,7 @@ namespace libtorrent
fs::path m_save_path;
// determines the storage state for this torrent.
const bool m_compact_mode;
storage_mode_t m_storage_mode;
// defaults to 16 kiB, but can be set by the user
// when creating the torrent

View File

@ -52,6 +52,7 @@ POSSIBILITY OF SUCH DAMAGE.
#include "libtorrent/torrent_info.hpp"
#include "libtorrent/time.hpp"
#include "libtorrent/config.hpp"
#include "libtorrent/storage.hpp"
namespace libtorrent
{
@ -106,7 +107,7 @@ namespace libtorrent
, num_connections(0)
, uploads_limit(0)
, connections_limit(0)
, compact_mode(false)
, storage_mode(storage_mode_sparse)
{}
enum state_t
@ -216,7 +217,7 @@ namespace libtorrent
// true if the torrent is saved in compact mode
// false if it is saved in full allocation mode
bool compact_mode;
storage_mode_t storage_mode;
};
struct TORRENT_EXPORT block_info

View File

@ -186,28 +186,28 @@ namespace libtorrent
torrent_info const& ti
, fs::path const& save_path
, entry const& resume_data
, bool compact_mode
, storage_mode_t storage_mode
, bool paused
, storage_constructor_type sc)
{
TORRENT_ASSERT(!ti.m_half_metadata);
boost::intrusive_ptr<torrent_info> tip(new torrent_info(ti));
return m_impl->add_torrent(tip, save_path, resume_data
, compact_mode, sc, paused, 0);
, storage_mode, sc, paused, 0);
}
torrent_handle session::add_torrent(
boost::intrusive_ptr<torrent_info> ti
, fs::path const& save_path
, entry const& resume_data
, bool compact_mode
, storage_mode_t storage_mode
, bool paused
, storage_constructor_type sc
, void* userdata)
{
TORRENT_ASSERT(!ti->m_half_metadata);
return m_impl->add_torrent(ti, save_path, resume_data
, compact_mode, sc, paused, userdata);
, storage_mode, sc, paused, userdata);
}
torrent_handle session::add_torrent(
@ -216,13 +216,13 @@ namespace libtorrent
, char const* name
, fs::path const& save_path
, entry const& e
, bool compact_mode
, storage_mode_t storage_mode
, bool paused
, storage_constructor_type sc
, void* userdata)
{
return m_impl->add_torrent(tracker_url, info_hash, name, save_path, e
, compact_mode, sc, paused, userdata);
, storage_mode, sc, paused, userdata);
}
void session::remove_torrent(const torrent_handle& h)

View File

@ -1623,7 +1623,7 @@ namespace detail
boost::intrusive_ptr<torrent_info> ti
, fs::path const& save_path
, entry const& resume_data
, bool compact_mode
, storage_mode_t storage_mode
, storage_constructor_type sc
, bool paused
, void* userdata)
@ -1655,7 +1655,7 @@ namespace detail
// the thread
boost::shared_ptr<torrent> torrent_ptr(
new torrent(*this, m_checker_impl, ti, save_path
, m_listen_interface, compact_mode, 16 * 1024
, m_listen_interface, storage_mode, 16 * 1024
, sc, paused));
torrent_ptr->start();
@ -1701,7 +1701,7 @@ namespace detail
, char const* name
, fs::path const& save_path
, entry const&
, bool compact_mode
, storage_mode_t storage_mode
, storage_constructor_type sc
, bool paused
, void* userdata)
@ -1735,7 +1735,7 @@ namespace detail
// the thread
boost::shared_ptr<torrent> torrent_ptr(
new torrent(*this, m_checker_impl, tracker_url, info_hash, name
, save_path, m_listen_interface, compact_mode, 16 * 1024
, save_path, m_listen_interface, storage_mode, 16 * 1024
, sc, paused));
torrent_ptr->start();

View File

@ -447,7 +447,7 @@ namespace libtorrent
}
// if the file is empty, just create it. But also make sure
// the directory exits.
// the directory exists.
if (file_iter->size == 0)
{
file(m_save_path / file_iter->path, file::out);
@ -931,107 +931,6 @@ namespace libtorrent
return new storage(ti, path, fp);
}
bool supports_sparse_files(fs::path const& p)
{
TORRENT_ASSERT(p.is_complete());
#if defined(_WIN32)
// assume windows API is available
DWORD max_component_len = 0;
DWORD volume_flags = 0;
std::string root_device = p.root_name() + "\\";
#if defined(UNICODE)
std::wstring wph(safe_convert(root_device));
bool ret = ::GetVolumeInformation(wph.c_str(), 0
, 0, 0, &max_component_len, &volume_flags, 0, 0);
#else
bool ret = ::GetVolumeInformation(root_device.c_str(), 0
, 0, 0, &max_component_len, &volume_flags, 0, 0);
#endif
if (!ret) return false;
if (volume_flags & FILE_SUPPORTS_SPARSE_FILES)
return true;
#endif
#if defined(__APPLE__) || defined(__linux__) || defined(__FreeBSD__)
// find the last existing directory of the save path
fs::path query_path = p;
while (!query_path.empty() && !exists(query_path))
query_path = query_path.branch_path();
#endif
#if defined(__APPLE__)
struct statfs fsinfo;
int ret = statfs(query_path.native_directory_string().c_str(), &fsinfo);
if (ret != 0) return false;
attrlist request;
request.bitmapcount = ATTR_BIT_MAP_COUNT;
request.reserved = 0;
request.commonattr = 0;
request.volattr = ATTR_VOL_CAPABILITIES;
request.dirattr = 0;
request.fileattr = 0;
request.forkattr = 0;
struct vol_capabilities_attr_buf
{
unsigned long length;
vol_capabilities_attr_t info;
} vol_cap;
ret = getattrlist(fsinfo.f_mntonname, &request, &vol_cap
, sizeof(vol_cap), 0);
if (ret != 0) return false;
if (vol_cap.info.capabilities[VOL_CAPABILITIES_FORMAT]
& (VOL_CAP_FMT_SPARSE_FILES | VOL_CAP_FMT_ZERO_RUNS))
{
return true;
}
// workaround for bugs in Mac OS X where zero run is not reported
if (!strcmp(fsinfo.f_fstypename, "hfs")
|| !strcmp(fsinfo.f_fstypename, "ufs"))
return true;
return false;
#endif
#if defined(__linux__) || defined(__FreeBSD__)
struct statfs buf;
int err = statfs(query_path.native_directory_string().c_str(), &buf);
if (err == 0)
{
switch (buf.f_type)
{
case 0x5346544e: // NTFS
case 0xEF51: // EXT2 OLD
case 0xEF53: // EXT2 and EXT3
case 0x00011954: // UFS
case 0x52654973: // ReiserFS
case 0x52345362: // Reiser4
case 0x58465342: // XFS
case 0x65735546: // NTFS-3G
case 0x19540119: // UFS2
return true;
}
}
#ifndef NDEBUG
else
{
std::cerr << "statfs returned " << err << std::endl;
std::cerr << "errno: " << errno << std::endl;
std::cerr << "path: " << query_path.native_directory_string() << std::endl;
}
#endif
#endif
// TODO: POSIX implementation
return false;
}
// -- piece_manager -----------------------------------------------------
piece_manager::piece_manager(
@ -1042,15 +941,16 @@ namespace libtorrent
, disk_io_thread& io
, storage_constructor_type sc)
: m_storage(sc(ti, save_path, fp))
, m_compact_mode(false)
, m_fill_mode(true)
, m_storage_mode(storage_mode_sparse)
, m_info(ti)
, m_save_path(complete(save_path))
, m_current_slot(0)
, m_out_of_place(false)
, m_scratch_piece(-1)
, m_storage_constructor(sc)
, m_io_thread(io)
, m_torrent(torrent)
{
m_fill_mode = !supports_sparse_files(save_path);
}
piece_manager::~piece_manager()
@ -1158,7 +1058,7 @@ namespace libtorrent
m_piece_hasher.erase(i);
}
int slot = m_piece_to_slot[piece];
int slot = slot_for(piece);
TORRENT_ASSERT(slot != has_no_slot);
return m_storage->hash_for_slot(slot, ph, m_info->piece_size(piece));
}
@ -1206,11 +1106,10 @@ namespace libtorrent
INVARIANT_CHECK;
if (m_storage_mode != storage_mode_compact) return;
TORRENT_ASSERT(piece_index >= 0 && piece_index < (int)m_piece_to_slot.size());
TORRENT_ASSERT(m_piece_to_slot[piece_index] >= 0);
int slot_index = m_piece_to_slot[piece_index];
TORRENT_ASSERT(slot_index >= 0);
m_slot_to_piece[slot_index] = unassigned;
@ -1218,12 +1117,6 @@ namespace libtorrent
m_free_slots.push_back(slot_index);
}
int piece_manager::slot_for_piece(int piece_index) const
{
TORRENT_ASSERT(piece_index >= 0 && piece_index < m_info->num_pieces());
return m_piece_to_slot[piece_index];
}
unsigned long piece_manager::piece_crc(
int slot_index
, int block_size
@ -1275,11 +1168,7 @@ namespace libtorrent
TORRENT_ASSERT(buf);
TORRENT_ASSERT(offset >= 0);
TORRENT_ASSERT(size > 0);
TORRENT_ASSERT(piece_index >= 0 && piece_index < (int)m_piece_to_slot.size());
TORRENT_ASSERT(m_piece_to_slot[piece_index] >= 0
&& m_piece_to_slot[piece_index] < (int)m_slot_to_piece.size());
int slot = m_piece_to_slot[piece_index];
TORRENT_ASSERT(slot >= 0 && slot < (int)m_slot_to_piece.size());
int slot = slot_for(piece_index);
return m_storage->read(buf, slot, offset, size);
}
@ -1292,7 +1181,7 @@ namespace libtorrent
TORRENT_ASSERT(buf);
TORRENT_ASSERT(offset >= 0);
TORRENT_ASSERT(size > 0);
TORRENT_ASSERT(piece_index >= 0 && piece_index < (int)m_piece_to_slot.size());
TORRENT_ASSERT(piece_index >= 0 && piece_index < m_info->num_pieces());
if (offset == 0)
{
@ -1317,7 +1206,6 @@ namespace libtorrent
}
int slot = allocate_slot_for_piece(piece_index);
TORRENT_ASSERT(slot >= 0 && slot < (int)m_slot_to_piece.size());
m_storage->write(buf, slot, offset, size);
}
@ -1426,7 +1314,8 @@ namespace libtorrent
// that piece as unassigned, since this slot
// is the correct place for the piece.
m_slot_to_piece[other_slot] = unassigned;
m_free_slots.push_back(other_slot);
if (m_storage_mode == storage_mode_compact)
m_free_slots.push_back(other_slot);
}
TORRENT_ASSERT(m_piece_to_slot[piece_index] != current_slot);
TORRENT_ASSERT(m_piece_to_slot[piece_index] >= 0);
@ -1485,7 +1374,7 @@ namespace libtorrent
bool piece_manager::check_fastresume(
aux::piece_checker_data& data
, std::vector<bool>& pieces
, int& num_pieces, bool compact_mode)
, int& num_pieces, storage_mode_t storage_mode)
{
boost::recursive_mutex::scoped_lock lock(m_mutex);
@ -1493,7 +1382,7 @@ namespace libtorrent
TORRENT_ASSERT(m_info->piece_length() > 0);
m_compact_mode = compact_mode;
m_storage_mode = storage_mode;
// This will corrupt the storage
// use while debugging to find
@ -1503,9 +1392,13 @@ namespace libtorrent
m_piece_to_slot.resize(m_info->num_pieces(), has_no_slot);
m_slot_to_piece.resize(m_info->num_pieces(), unallocated);
m_free_slots.clear();
m_unallocated_slots.clear();
TORRENT_ASSERT(m_free_slots.empty());
TORRENT_ASSERT(m_unallocated_slots.empty());
// assume no piece is out of place (i.e. in a slot
// other than the one it should be in)
bool out_of_place = false;
pieces.clear();
pieces.resize(m_info->num_pieces(), false);
num_pieces = 0;
@ -1513,13 +1406,14 @@ namespace libtorrent
// if we have fast-resume info
// use it instead of doing the actual checking
if (!data.piece_map.empty()
&& data.piece_map.size() <= m_slot_to_piece.size())
&& int(data.piece_map.size()) <= m_info->num_pieces())
{
for (int i = 0; i < (int)data.piece_map.size(); ++i)
{
m_slot_to_piece[i] = data.piece_map[i];
if (data.piece_map[i] >= 0)
{
if (data.piece_map[i] != i) out_of_place = true;
m_piece_to_slot[data.piece_map[i]] = i;
int found_piece = data.piece_map[i];
@ -1537,27 +1431,53 @@ namespace libtorrent
}
else if (data.piece_map[i] == unassigned)
{
m_free_slots.push_back(i);
if (m_storage_mode == storage_mode_compact)
m_free_slots.push_back(i);
}
else
{
TORRENT_ASSERT(data.piece_map[i] == unallocated);
m_unallocated_slots.push_back(i);
if (m_storage_mode == storage_mode_compact)
m_unallocated_slots.push_back(i);
}
}
m_unallocated_slots.reserve(int(pieces.size() - data.piece_map.size()));
for (int i = (int)data.piece_map.size(); i < (int)pieces.size(); ++i)
if (m_storage_mode == storage_mode_compact)
{
m_unallocated_slots.push_back(i);
m_unallocated_slots.reserve(int(m_info->num_pieces() - data.piece_map.size()));
for (int i = (int)data.piece_map.size(); i < (int)m_info->num_pieces(); ++i)
{
m_unallocated_slots.push_back(i);
}
if (m_unallocated_slots.empty())
{
switch_to_full_mode();
}
}
else
{
if (!out_of_place)
{
// if no piece is out of place
// since we're in full allocation mode, we can
// forget the piece allocation tables
std::vector<int>().swap(m_piece_to_slot);
std::vector<int>().swap(m_slot_to_piece);
m_state = state_create_files;
return false;
}
else
{
// in this case we're in full allocation mode, but
// we're resuming a compact allocated storage
m_state = state_expand_pieces;
m_current_slot = 0;
return false;
}
}
if (m_unallocated_slots.empty())
m_state = state_create_files;
else if (m_compact_mode)
m_state = state_create_files;
else
m_state = state_allocating;
m_state = state_create_files;
return false;
}
@ -1572,18 +1492,13 @@ namespace libtorrent
| |
| v
| +------------+
| | full_check |
| +------------+
| |
| v
| +------------+
|->| allocating |
| +------------+
| |
| v
| +--------------+
|->| create_files |
| +------------+ +---------------+
| | full_check |-->| expand_pieses |
| +------------+ +---------------+
| | |
| v |
| +--------------+ |
+->| create_files | <------+
+--------------+
|
v
@ -1602,67 +1517,97 @@ namespace libtorrent
std::pair<bool, float> piece_manager::check_files(
std::vector<bool>& pieces, int& num_pieces, boost::recursive_mutex& mutex)
{
#ifndef NDEBUG
boost::recursive_mutex::scoped_lock l_(mutex);
TORRENT_ASSERT(num_pieces == std::count(pieces.begin(), pieces.end(), true));
if (m_state == state_allocating)
{
if (m_compact_mode || m_unallocated_slots.empty())
{
m_state = state_create_files;
return std::make_pair(false, 1.f);
}
if (int(m_unallocated_slots.size()) == m_info->num_pieces()
&& !m_fill_mode)
{
// if there is not a single file on disk, just
// create the files
m_state = state_create_files;
return std::make_pair(false, 1.f);
}
// if we're not in compact mode, make sure the
// pieces are spread out and placed at their
// final position.
TORRENT_ASSERT(!m_unallocated_slots.empty());
if (!m_fill_mode)
{
// if we're not filling the allocation
// just make sure we move the current pieces
// into place, and just skip all other
// allocation
// allocate_slots returns true if it had to
// move any data
allocate_slots(m_unallocated_slots.size(), true);
}
else
{
allocate_slots(1);
}
return std::make_pair(false, 1.f - (float)m_unallocated_slots.size()
/ (float)m_slot_to_piece.size());
}
l_.unlock();
#endif
if (m_state == state_create_files)
{
m_storage->initialize(!m_fill_mode && !m_compact_mode);
if (!m_unallocated_slots.empty() && !m_compact_mode)
{
TORRENT_ASSERT(!m_fill_mode);
std::vector<int>().swap(m_unallocated_slots);
std::fill(m_slot_to_piece.begin(), m_slot_to_piece.end(), int(unassigned));
m_free_slots.resize(m_info->num_pieces());
for (int i = 0; i < m_info->num_pieces(); ++i)
m_free_slots[i] = i;
}
m_storage->initialize(m_storage_mode == storage_mode_allocate);
m_state = state_finished;
return std::make_pair(true, 1.f);
}
if (m_state == state_expand_pieces)
{
INVARIANT_CHECK;
if (m_scratch_piece >= 0)
{
int piece = m_scratch_piece;
int other_piece = m_slot_to_piece[piece];
m_scratch_piece = -1;
if (other_piece >= 0)
{
if (m_scratch_buffer2.empty())
m_scratch_buffer2.resize(m_info->piece_length());
m_storage->read(&m_scratch_buffer2[0], piece, 0, m_info->piece_size(other_piece));
m_scratch_piece = other_piece;
m_piece_to_slot[other_piece] = unassigned;
}
// the slot where this piece belongs is
// free. Just move the piece there.
m_storage->write(&m_scratch_buffer[0], piece, 0, m_info->piece_size(piece));
m_piece_to_slot[piece] = piece;
m_slot_to_piece[piece] = piece;
if (other_piece >= 0)
m_scratch_buffer.swap(m_scratch_buffer2);
return std::make_pair(false, (float)m_current_slot / m_info->num_pieces());
}
while (m_current_slot < m_info->num_pieces()
&& (m_slot_to_piece[m_current_slot] == m_current_slot
|| m_slot_to_piece[m_current_slot] < 0))
{
++m_current_slot;
}
if (m_current_slot == m_info->num_pieces())
{
m_state = state_create_files;
std::vector<char>().swap(m_scratch_buffer);
std::vector<char>().swap(m_scratch_buffer2);
if (m_storage_mode != storage_mode_compact)
{
std::vector<int>().swap(m_piece_to_slot);
std::vector<int>().swap(m_slot_to_piece);
}
return std::make_pair(false, 1.f);
}
int piece = m_slot_to_piece[m_current_slot];
TORRENT_ASSERT(piece >= 0);
int other_piece = m_slot_to_piece[piece];
if (other_piece >= 0)
{
// there is another piece in the slot
// where this one goes. Store it in the scratch
// buffer until next iteration.
if (m_scratch_buffer.empty())
m_scratch_buffer.resize(m_info->piece_length());
m_storage->read(&m_scratch_buffer[0], piece, 0, m_info->piece_size(other_piece));
m_scratch_piece = other_piece;
m_piece_to_slot[other_piece] = unassigned;
}
// the slot where this piece belongs is
// free. Just move the piece there.
m_storage->move_slot(m_current_slot, piece);
m_piece_to_slot[piece] = piece;
m_slot_to_piece[m_current_slot] = unassigned;
m_slot_to_piece[piece] = piece;
return std::make_pair(false, (float)m_current_slot / m_info->num_pieces());
}
TORRENT_ASSERT(m_state == state_full_check);
// ------------------------
@ -1674,12 +1619,13 @@ namespace libtorrent
// initialization for the full check
if (m_hash_to_piece.empty())
{
m_current_slot = 0;
for (int i = 0; i < m_info->num_pieces(); ++i)
{
m_hash_to_piece.insert(std::make_pair(m_info->hash_for_piece(i), i));
}
boost::recursive_mutex::scoped_lock l(mutex);
std::fill(pieces.begin(), pieces.end(), false);
num_pieces = 0;
}
m_piece_data.resize(int(m_info->piece_length()));
@ -1694,6 +1640,10 @@ namespace libtorrent
int piece_index = identify_data(m_piece_data, m_current_slot
, pieces, num_pieces, m_hash_to_piece, mutex);
if (piece_index != m_current_slot
&& piece_index >= 0)
m_out_of_place = true;
TORRENT_ASSERT(num_pieces == std::count(pieces.begin(), pieces.end(), true));
TORRENT_ASSERT(piece_index == unassigned || piece_index >= 0);
@ -1745,8 +1695,11 @@ namespace libtorrent
std::vector<int>::iterator i =
std::find(m_free_slots.begin(), m_free_slots.end(), other_slot);
TORRENT_ASSERT(i != m_free_slots.end());
m_free_slots.erase(i);
m_free_slots.push_back(m_current_slot);
if (m_storage_mode == storage_mode_compact)
{
m_free_slots.erase(i);
m_free_slots.push_back(m_current_slot);
}
}
if (other_piece >= 0)
@ -1770,7 +1723,8 @@ namespace libtorrent
m_slot_to_piece[other_slot] = piece_index;
m_piece_to_slot[other_piece] = m_current_slot;
if (piece_index == unassigned)
if (piece_index == unassigned
&& m_storage_mode == storage_mode_compact)
m_free_slots.push_back(other_slot);
if (piece_index >= 0)
@ -1845,8 +1799,11 @@ namespace libtorrent
std::vector<int>::iterator i =
std::find(m_free_slots.begin(), m_free_slots.end(), slot1);
TORRENT_ASSERT(i != m_free_slots.end());
m_free_slots.erase(i);
m_free_slots.push_back(slot2);
if (m_storage_mode == storage_mode_compact)
{
m_free_slots.erase(i);
m_free_slots.push_back(slot2);
}
}
if (piece1 >= 0)
@ -1873,7 +1830,7 @@ namespace libtorrent
// the slot was identified as piece 'piece_index'
if (piece_index != unassigned)
m_piece_to_slot[piece_index] = m_current_slot;
else
else if (m_storage_mode == storage_mode_compact)
m_free_slots.push_back(m_current_slot);
m_slot_to_piece[m_current_slot] = piece_index;
@ -1899,10 +1856,13 @@ namespace libtorrent
(file_offset - current_offset + m_info->piece_length() - 1)
/ m_info->piece_length());
for (int i = m_current_slot; i < m_current_slot + skip_blocks; ++i)
if (m_storage_mode == storage_mode_compact)
{
TORRENT_ASSERT(m_slot_to_piece[i] == unallocated);
m_unallocated_slots.push_back(i);
for (int i = m_current_slot; i < m_current_slot + skip_blocks; ++i)
{
TORRENT_ASSERT(m_slot_to_piece[i] == unallocated);
m_unallocated_slots.push_back(i);
}
}
// current slot will increase by one at the end of the for-loop too
@ -1910,15 +1870,46 @@ namespace libtorrent
}
++m_current_slot;
if (m_current_slot >= m_info->num_pieces())
if (m_current_slot >= m_info->num_pieces())
{
TORRENT_ASSERT(m_current_slot == m_info->num_pieces());
// clear the memory we've been using
std::vector<char>().swap(m_piece_data);
std::multimap<sha1_hash, int>().swap(m_hash_to_piece);
m_state = state_allocating;
if (m_storage_mode != storage_mode_compact)
{
if (!m_out_of_place)
{
// if no piece is out of place
// since we're in full allocation mode, we can
// forget the piece allocation tables
std::vector<int>().swap(m_piece_to_slot);
std::vector<int>().swap(m_slot_to_piece);
m_state = state_create_files;
return std::make_pair(false, 1.f);
}
else
{
// in this case we're in full allocation mode, but
// we're resuming a compact allocated storage
m_state = state_expand_pieces;
m_current_slot = 0;
return std::make_pair(false, 0.f);
}
}
else if (m_unallocated_slots.empty())
{
switch_to_full_mode();
}
m_state = state_create_files;
#ifndef NDEBUG
boost::recursive_mutex::scoped_lock l(mutex);
TORRENT_ASSERT(num_pieces == std::count(pieces.begin(), pieces.end(), true));
#endif
return std::make_pair(false, 1.f);
}
@ -1927,10 +1918,26 @@ namespace libtorrent
return std::make_pair(false, (float)m_current_slot / m_info->num_pieces());
}
void piece_manager::switch_to_full_mode()
{
TORRENT_ASSERT(m_storage_mode == storage_mode_compact);
TORRENT_ASSERT(m_unallocated_slots.empty());
// we have allocated all slots, switch to
// full allocation mode in order to free
// some unnecessary memory.
m_storage_mode = storage_mode_sparse;
std::vector<int>().swap(m_unallocated_slots);
std::vector<int>().swap(m_free_slots);
std::vector<int>().swap(m_piece_to_slot);
std::vector<int>().swap(m_slot_to_piece);
}
int piece_manager::allocate_slot_for_piece(int piece_index)
{
boost::recursive_mutex::scoped_lock lock(m_mutex);
if (m_storage_mode != storage_mode_compact) return piece_index;
// INVARIANT_CHECK;
TORRENT_ASSERT(piece_index >= 0);
@ -2030,26 +2037,27 @@ namespace libtorrent
debug_log();
#endif
}
TORRENT_ASSERT(slot_index >= 0);
TORRENT_ASSERT(slot_index < (int)m_slot_to_piece.size());
if (m_unallocated_slots.empty())
{
switch_to_full_mode();
}
return slot_index;
}
bool piece_manager::allocate_slots(int num_slots, bool abort_on_disk)
{
TORRENT_ASSERT(num_slots > 0);
boost::recursive_mutex::scoped_lock lock(m_mutex);
TORRENT_ASSERT(num_slots > 0);
// INVARIANT_CHECK;
TORRENT_ASSERT(!m_unallocated_slots.empty());
TORRENT_ASSERT(m_storage_mode == storage_mode_compact);
const int stack_buffer_size = 16*1024;
char zeroes[stack_buffer_size];
memset(zeroes, 0, stack_buffer_size);
bool written = false;
for (int i = 0; i < num_slots && !m_unallocated_slots.empty(); ++i)
@ -2069,134 +2077,160 @@ namespace libtorrent
m_piece_to_slot[pos] = pos;
written = true;
}
else if (m_fill_mode)
{
int piece_size = int(m_info->piece_size(pos));
int offset = 0;
for (; piece_size > 0; piece_size -= stack_buffer_size
, offset += stack_buffer_size)
{
m_storage->write(zeroes, pos, offset
, (std::min)(piece_size, stack_buffer_size));
}
written = true;
}
m_unallocated_slots.erase(m_unallocated_slots.begin());
m_slot_to_piece[new_free_slot] = unassigned;
m_free_slots.push_back(new_free_slot);
if (abort_on_disk && written) return true;
if (abort_on_disk && written) break;
}
TORRENT_ASSERT(m_free_slots.size() > 0);
return written;
}
int piece_manager::slot_for(int piece) const
{
if (m_storage_mode != storage_mode_compact) return piece;
TORRENT_ASSERT(piece < int(m_piece_to_slot.size()));
TORRENT_ASSERT(piece >= 0);
return m_piece_to_slot[piece];
}
int piece_manager::piece_for(int slot) const
{
if (m_storage_mode != storage_mode_compact) return slot;
TORRENT_ASSERT(slot < int(m_slot_to_piece.size()));
TORRENT_ASSERT(slot >= 0);
return m_slot_to_piece[slot];
}
#ifndef NDEBUG
void piece_manager::check_invariant() const
{
boost::recursive_mutex::scoped_lock lock(m_mutex);
if (m_piece_to_slot.empty()) return;
TORRENT_ASSERT((int)m_piece_to_slot.size() == m_info->num_pieces());
TORRENT_ASSERT((int)m_slot_to_piece.size() == m_info->num_pieces());
for (std::vector<int>::const_iterator i = m_free_slots.begin();
i != m_free_slots.end(); ++i)
if (m_unallocated_slots.empty() && m_state == state_finished)
{
TORRENT_ASSERT(*i < (int)m_slot_to_piece.size());
TORRENT_ASSERT(*i >= 0);
TORRENT_ASSERT(m_slot_to_piece[*i] == unassigned);
TORRENT_ASSERT(std::find(i+1, m_free_slots.end(), *i)
== m_free_slots.end());
TORRENT_ASSERT(m_storage_mode != storage_mode_compact);
}
for (std::vector<int>::const_iterator i = m_unallocated_slots.begin();
i != m_unallocated_slots.end(); ++i)
if (m_storage_mode != storage_mode_compact)
{
TORRENT_ASSERT(*i < (int)m_slot_to_piece.size());
TORRENT_ASSERT(*i >= 0);
TORRENT_ASSERT(m_slot_to_piece[*i] == unallocated);
TORRENT_ASSERT(std::find(i+1, m_unallocated_slots.end(), *i)
== m_unallocated_slots.end());
TORRENT_ASSERT(m_unallocated_slots.empty());
TORRENT_ASSERT(m_free_slots.empty());
}
for (int i = 0; i < m_info->num_pieces(); ++i)
if (m_storage_mode != storage_mode_compact
&& m_state != state_expand_pieces
&& m_state != state_full_check)
{
// Check domain of piece_to_slot's elements
if (m_piece_to_slot[i] != has_no_slot)
TORRENT_ASSERT(m_piece_to_slot.empty());
TORRENT_ASSERT(m_slot_to_piece.empty());
}
else
{
if (m_piece_to_slot.empty()) return;
TORRENT_ASSERT((int)m_piece_to_slot.size() == m_info->num_pieces());
TORRENT_ASSERT((int)m_slot_to_piece.size() == m_info->num_pieces());
for (std::vector<int>::const_iterator i = m_free_slots.begin();
i != m_free_slots.end(); ++i)
{
TORRENT_ASSERT(m_piece_to_slot[i] >= 0);
TORRENT_ASSERT(m_piece_to_slot[i] < (int)m_slot_to_piece.size());
TORRENT_ASSERT(*i < (int)m_slot_to_piece.size());
TORRENT_ASSERT(*i >= 0);
TORRENT_ASSERT(m_slot_to_piece[*i] == unassigned);
TORRENT_ASSERT(std::find(i+1, m_free_slots.end(), *i)
== m_free_slots.end());
}
// Check domain of slot_to_piece's elements
if (m_slot_to_piece[i] != unallocated
&& m_slot_to_piece[i] != unassigned)
for (std::vector<int>::const_iterator i = m_unallocated_slots.begin();
i != m_unallocated_slots.end(); ++i)
{
TORRENT_ASSERT(m_slot_to_piece[i] >= 0);
TORRENT_ASSERT(m_slot_to_piece[i] < (int)m_piece_to_slot.size());
TORRENT_ASSERT(*i < (int)m_slot_to_piece.size());
TORRENT_ASSERT(*i >= 0);
TORRENT_ASSERT(m_slot_to_piece[*i] == unallocated);
TORRENT_ASSERT(std::find(i+1, m_unallocated_slots.end(), *i)
== m_unallocated_slots.end());
}
// do more detailed checks on piece_to_slot
if (m_piece_to_slot[i] >= 0)
for (int i = 0; i < m_info->num_pieces(); ++i)
{
TORRENT_ASSERT(m_slot_to_piece[m_piece_to_slot[i]] == i);
if (m_piece_to_slot[i] != i)
// Check domain of piece_to_slot's elements
if (m_piece_to_slot[i] != has_no_slot)
{
TORRENT_ASSERT(m_slot_to_piece[i] == unallocated);
TORRENT_ASSERT(m_piece_to_slot[i] >= 0);
TORRENT_ASSERT(m_piece_to_slot[i] < (int)m_slot_to_piece.size());
}
}
else
{
TORRENT_ASSERT(m_piece_to_slot[i] == has_no_slot);
}
// do more detailed checks on slot_to_piece
// Check domain of slot_to_piece's elements
if (m_slot_to_piece[i] != unallocated
&& m_slot_to_piece[i] != unassigned)
{
TORRENT_ASSERT(m_slot_to_piece[i] >= 0);
TORRENT_ASSERT(m_slot_to_piece[i] < (int)m_piece_to_slot.size());
}
if (m_slot_to_piece[i] >= 0)
{
TORRENT_ASSERT(m_slot_to_piece[i] < (int)m_piece_to_slot.size());
TORRENT_ASSERT(m_piece_to_slot[m_slot_to_piece[i]] == i);
// do more detailed checks on piece_to_slot
if (m_piece_to_slot[i] >= 0)
{
TORRENT_ASSERT(m_slot_to_piece[m_piece_to_slot[i]] == i);
if (m_piece_to_slot[i] != i)
{
TORRENT_ASSERT(m_slot_to_piece[i] == unallocated);
}
}
else
{
TORRENT_ASSERT(m_piece_to_slot[i] == has_no_slot);
}
// do more detailed checks on slot_to_piece
if (m_slot_to_piece[i] >= 0)
{
TORRENT_ASSERT(m_slot_to_piece[i] < (int)m_piece_to_slot.size());
TORRENT_ASSERT(m_piece_to_slot[m_slot_to_piece[i]] == i);
#ifdef TORRENT_STORAGE_DEBUG
TORRENT_ASSERT(
std::find(
m_unallocated_slots.begin()
, m_unallocated_slots.end()
, i) == m_unallocated_slots.end()
);
TORRENT_ASSERT(
std::find(
m_free_slots.begin()
, m_free_slots.end()
, i) == m_free_slots.end()
);
TORRENT_ASSERT(
std::find(
m_unallocated_slots.begin()
, m_unallocated_slots.end()
, i) == m_unallocated_slots.end()
);
TORRENT_ASSERT(
std::find(
m_free_slots.begin()
, m_free_slots.end()
, i) == m_free_slots.end()
);
#endif
}
else if (m_slot_to_piece[i] == unallocated)
{
}
else if (m_slot_to_piece[i] == unallocated)
{
#ifdef TORRENT_STORAGE_DEBUG
TORRENT_ASSERT(m_unallocated_slots.empty()
|| (std::find(
m_unallocated_slots.begin()
, m_unallocated_slots.end()
, i) != m_unallocated_slots.end())
);
TORRENT_ASSERT(m_unallocated_slots.empty()
|| (std::find(
m_unallocated_slots.begin()
, m_unallocated_slots.end()
, i) != m_unallocated_slots.end())
);
#endif
}
else if (m_slot_to_piece[i] == unassigned)
{
}
else if (m_slot_to_piece[i] == unassigned)
{
#ifdef TORRENT_STORAGE_DEBUG
TORRENT_ASSERT(
std::find(
m_free_slots.begin()
, m_free_slots.end()
, i) != m_free_slots.end()
);
TORRENT_ASSERT(
std::find(
m_free_slots.begin()
, m_free_slots.end()
, i) != m_free_slots.end()
);
#endif
}
else
{
TORRENT_ASSERT(false && "m_slot_to_piece[i] is invalid");
}
else
{
TORRENT_ASSERT(false && "m_slot_to_piece[i] is invalid");
}
}
}
}

View File

@ -154,7 +154,7 @@ namespace libtorrent
, boost::intrusive_ptr<torrent_info> tf
, fs::path const& save_path
, tcp::endpoint const& net_interface
, bool compact_mode
, storage_mode_t storage_mode
, int block_size
, storage_constructor_type sc
, bool paused)
@ -195,7 +195,7 @@ namespace libtorrent
, m_total_redundant_bytes(0)
, m_net_interface(net_interface.address(), 0)
, m_save_path(complete(save_path))
, m_compact_mode(compact_mode)
, m_storage_mode(storage_mode)
, m_default_block_size(block_size)
, m_connections_initialized(true)
, m_settings(ses.settings())
@ -215,7 +215,7 @@ namespace libtorrent
, char const* name
, fs::path const& save_path
, tcp::endpoint const& net_interface
, bool compact_mode
, storage_mode_t storage_mode
, int block_size
, storage_constructor_type sc
, bool paused)
@ -255,7 +255,7 @@ namespace libtorrent
, m_total_redundant_bytes(0)
, m_net_interface(net_interface.address(), 0)
, m_save_path(complete(save_path))
, m_compact_mode(compact_mode)
, m_storage_mode(storage_mode)
, m_default_block_size(block_size)
, m_connections_initialized(false)
, m_settings(ses.settings())
@ -2218,7 +2218,7 @@ namespace libtorrent
TORRENT_ASSERT(m_storage);
TORRENT_ASSERT(m_owning_storage.get());
done = m_storage->check_fastresume(data, m_have_pieces, m_num_pieces
, m_compact_mode);
, m_storage_mode);
}
catch (std::exception& e)
{
@ -2768,7 +2768,7 @@ namespace libtorrent
!boost::bind(&peer_connection::is_connecting
, boost::bind(&std::map<tcp::endpoint,peer_connection*>::value_type::second, _1)));
st.compact_mode = m_compact_mode;
st.storage_mode = m_storage_mode;
st.num_complete = m_complete;
st.num_incomplete = m_incomplete;

View File

@ -722,10 +722,10 @@ namespace libtorrent
}
piece_struct["bitmask"] = bitmask;
TORRENT_ASSERT(t->filesystem().slot_for_piece(i->index) >= 0);
TORRENT_ASSERT(t->filesystem().slot_for(i->index) >= 0);
unsigned long adler
= t->filesystem().piece_crc(
t->filesystem().slot_for_piece(i->index)
t->filesystem().slot_for(i->index)
, t->block_size()
, i->info);

View File

@ -19,11 +19,14 @@ const int piece_size = 16;
void on_read_piece(int ret, disk_io_job const& j, char const* data, int size)
{
std::cerr << "on_read_piece piece: " << j.piece << std::endl;
TEST_CHECK(ret == size);
TEST_CHECK(std::equal(j.buffer, j.buffer + ret, data));
}
void run_storage_tests(boost::intrusive_ptr<torrent_info> info, bool compact_allocation = true)
void run_storage_tests(boost::intrusive_ptr<torrent_info> info
, path const& test_path
, libtorrent::storage_mode_t storage_mode)
{
const int half = piece_size / 2;
@ -45,7 +48,7 @@ void run_storage_tests(boost::intrusive_ptr<torrent_info> info, bool compact_all
info->create_torrent();
create_directory(initial_path() / "temp_storage");
create_directory(test_path / "temp_storage");
int num_pieces = (1 + 612 + 17 + piece_size - 1) / piece_size;
TEST_CHECK(info->num_pieces() == num_pieces);
@ -55,7 +58,7 @@ void run_storage_tests(boost::intrusive_ptr<torrent_info> info, bool compact_all
{ // avoid having two storages use the same files
file_pool fp;
boost::scoped_ptr<storage_interface> s(
default_storage_constructor(info, initial_path(), fp));
default_storage_constructor(info, test_path, fp));
// write piece 1 (in slot 0)
s->write(piece1, 0, 0, half);
@ -85,14 +88,14 @@ void run_storage_tests(boost::intrusive_ptr<torrent_info> info, bool compact_all
disk_io_thread io;
boost::shared_ptr<int> dummy(new int);
boost::intrusive_ptr<piece_manager> pm = new piece_manager(dummy, info
, initial_path(), fp, io, default_storage_constructor);
, test_path, fp, io, default_storage_constructor);
boost::mutex lock;
libtorrent::aux::piece_checker_data d;
std::vector<bool> pieces;
num_pieces = 0;
TEST_CHECK(pm->check_fastresume(d, pieces, num_pieces
, compact_allocation) == false);
, storage_mode) == false);
bool finished = false;
float progress;
num_pieces = 0;
@ -105,15 +108,15 @@ void run_storage_tests(boost::intrusive_ptr<torrent_info> info, bool compact_all
boost::function<void(int, disk_io_job const&)> none;
TEST_CHECK(exists("temp_storage"));
pm->async_move_storage("temp_storage2", none);
TEST_CHECK(exists(test_path / "temp_storage"));
pm->async_move_storage(test_path / "temp_storage2", none);
test_sleep(2000);
TEST_CHECK(!exists("temp_storage"));
TEST_CHECK(exists("temp_storage2/temp_storage"));
pm->async_move_storage(".", none);
TEST_CHECK(!exists(test_path / "temp_storage"));
TEST_CHECK(exists(test_path / "temp_storage2/temp_storage"));
pm->async_move_storage(test_path , none);
test_sleep(2000);
TEST_CHECK(!exists("temp_storage2/temp_storage"));
remove_all("temp_storage2");
TEST_CHECK(!exists(test_path / "temp_storage2/temp_storage"));
remove_all(test_path / "temp_storage2");
peer_request r;
r.piece = 0;
@ -129,8 +132,10 @@ void run_storage_tests(boost::intrusive_ptr<torrent_info> info, bool compact_all
}
}
int test_main()
void run_test(path const& test_path)
{
std::cerr << "\n=== " << test_path << " ===\n" << std::endl;
boost::intrusive_ptr<torrent_info> info(new torrent_info());
info->set_piece_size(piece_size);
info->add_file("temp_storage/test1.tmp", 17);
@ -139,16 +144,18 @@ int test_main()
info->add_file("temp_storage/test4.tmp", 0);
info->add_file("temp_storage/test5.tmp", 1);
run_storage_tests(info);
std::cerr << "=== test 1 ===" << std::endl;
run_storage_tests(info, test_path, storage_mode_compact);
// make sure the files have the correct size
std::cerr << file_size(initial_path() / "temp_storage" / "test1.tmp") << std::endl;
TEST_CHECK(file_size(initial_path() / "temp_storage" / "test1.tmp") == 17);
std::cerr << file_size(initial_path() / "temp_storage" / "test2.tmp") << std::endl;
TEST_CHECK(file_size(initial_path() / "temp_storage" / "test2.tmp") == 31);
TEST_CHECK(exists("temp_storage/test3.tmp"));
TEST_CHECK(exists("temp_storage/test4.tmp"));
remove_all(initial_path() / "temp_storage");
std::cerr << file_size(test_path / "temp_storage" / "test1.tmp") << std::endl;
TEST_CHECK(file_size(test_path / "temp_storage" / "test1.tmp") == 17);
std::cerr << file_size(test_path / "temp_storage" / "test2.tmp") << std::endl;
TEST_CHECK(file_size(test_path / "temp_storage" / "test2.tmp") == 31);
TEST_CHECK(exists(test_path / "temp_storage/test3.tmp"));
TEST_CHECK(exists(test_path / "temp_storage/test4.tmp"));
remove_all(test_path / "temp_storage");
// ==============================================
@ -158,12 +165,14 @@ int test_main()
bool ret = info->remap_files(map);
TEST_CHECK(ret);
run_storage_tests(info, false);
std::cerr << "=== test 2 ===" << std::endl;
std::cerr << file_size(initial_path() / "temp_storage" / "test.tmp") << std::endl;
TEST_CHECK(file_size(initial_path() / "temp_storage" / "test.tmp") == 17 + 612 + 1);
run_storage_tests(info, test_path, storage_mode_compact);
remove_all(initial_path() / "temp_storage");
std::cerr << file_size(test_path / "temp_storage" / "test.tmp") << std::endl;
TEST_CHECK(file_size(test_path / "temp_storage" / "test.tmp") == 48);
remove_all(test_path / "temp_storage");
// ==============================================
@ -171,23 +180,45 @@ int test_main()
info->set_piece_size(piece_size);
info->add_file("temp_storage/test1.tmp", 17 + 612 + 1);
run_storage_tests(info);
std::cerr << "=== test 3 ===" << std::endl;
run_storage_tests(info, test_path, storage_mode_compact);
// 48 = piece_size * 3
TEST_CHECK(file_size(initial_path() / "temp_storage" / "test1.tmp") == 48);
remove_all(initial_path() / "temp_storage");
TEST_CHECK(file_size(test_path / "temp_storage" / "test1.tmp") == 48);
remove_all(test_path / "temp_storage");
// ==============================================
// make sure full allocation mode actually allocates the files
// and creates the directories
run_storage_tests(info, false);
std::cerr << "=== test 4 ===" << std::endl;
std::cerr << file_size(initial_path() / "temp_storage" / "test1.tmp") << std::endl;
TEST_CHECK(file_size(initial_path() / "temp_storage" / "test1.tmp") == 17 + 612 + 1);
run_storage_tests(info, test_path, storage_mode_allocate);
remove_all(initial_path() / "temp_storage");
std::cerr << file_size(test_path / "temp_storage" / "test1.tmp") << std::endl;
TEST_CHECK(file_size(test_path / "temp_storage" / "test1.tmp") == 17 + 612 + 1);
remove_all(test_path / "temp_storage");
}
int test_main()
{
std::vector<path> test_paths;
char* env = std::getenv("TORRENT_TEST_PATHS");
if (env == 0)
{
test_paths.push_back(initial_path());
}
else
{
char* p = std::strtok(env, ";");
while (p != 0)
{
test_paths.push_back(complete(p));
p = std::strtok(0, ";");
}
}
std::for_each(test_paths.begin(), test_paths.end(), bind(&run_test, _1));
return 0;
}