added option to use a single block buffer when hashing, instead of for the entire piece

This commit is contained in:
Arvid Norberg 2009-05-03 19:09:06 +00:00
parent bb9815713b
commit f2ec6b5adb
4 changed files with 54 additions and 11 deletions

View File

@ -3375,6 +3375,8 @@ that will be sent to the tracker. The user-agent is a good way to identify your
int recv_socket_buffer_size;
int send_socket_buffer_size;
bool optimize_hashing_for_speed;
};
``user_agent`` this is the client identification to the tracker.
@ -3729,6 +3731,17 @@ the OS default (i.e. don't change the buffer sizes). The socket buffer
sizes are changed using setsockopt() with SOL_SOCKET/SO_RCVBUF and
SO_SNDBUFFER.
``optimize_hashing_for_speed`` chooses between two ways of reading back
piece data from disk when its complete and needs to be verified against
the piece hash. This happens if some blocks were flushed to the disk
out of order. Everything that is flushed in order is hashed as it goes
along. Optimizing for speed will allocate space to fit all the the
remaingin, unhashed, part of the piece, reads the data into it in a single
call and hashes it. This is the default. If ``optimizing_hashing_for_speed``
is false, a single block will be allocated (16 kB), and the unhashed parts
of the piece are read, one at a time, and hashed in this single block. This
is appropriate on systems that are memory constrained.
pe_settings
===========

View File

@ -714,6 +714,7 @@ int main(int argc, char* argv[])
settings.user_agent = "client_test/" LIBTORRENT_VERSION;
settings.auto_upload_slots_rate_based = true;
settings.announce_to_all_trackers = true;
settings.optimize_hashing_for_speed = false;
std::deque<std::string> events;

View File

@ -163,6 +163,7 @@ namespace libtorrent
, max_rejects(50)
, recv_socket_buffer_size(0)
, send_socket_buffer_size(0)
, optimize_hashing_for_speed(true)
{}
// this is the user agent that will be sent to the tracker
@ -541,6 +542,11 @@ namespace libtorrent
// 0 means OS default
int recv_socket_buffer_size;
int send_socket_buffer_size;
// if this is set to false, the hashing will be
// optimized for memory usage instead of the
// number of read operations
bool optimize_hashing_for_speed;
};
#ifndef TORRENT_DISABLE_DHT

View File

@ -488,19 +488,42 @@ namespace libtorrent
if (disk_pool()) block_size = disk_pool()->block_size();
int size = slot_size;
int num_blocks = (size + block_size - 1) / block_size;
file::iovec_t* bufs = TORRENT_ALLOCA(file::iovec_t, num_blocks);
for (int i = 0; i < num_blocks; ++i)
{
bufs[i].iov_base = disk_pool()->allocate_buffer("hash temp");
bufs[i].iov_len = (std::min)(block_size, size);
size -= bufs[i].iov_len;
}
readv(bufs, slot, ph.offset, num_blocks);
for (int i = 0; i < num_blocks; ++i)
// when we optimize for speed we allocate all the buffers we
// need for the rest of the piece, and read it all in one call
// and then hash it. When optimizing for memory usage, we read
// one block at a time and hash it. This ends up only using a
// single buffer
if (settings().optimize_hashing_for_speed)
{
ph.h.update((char const*)bufs[i].iov_base, bufs[i].iov_len);
disk_pool()->free_buffer((char*)bufs[i].iov_base);
file::iovec_t* bufs = TORRENT_ALLOCA(file::iovec_t, num_blocks);
for (int i = 0; i < num_blocks; ++i)
{
bufs[i].iov_base = disk_pool()->allocate_buffer("hash temp");
bufs[i].iov_len = (std::min)(block_size, size);
size -= bufs[i].iov_len;
}
readv(bufs, slot, ph.offset, num_blocks);
for (int i = 0; i < num_blocks; ++i)
{
ph.h.update((char const*)bufs[i].iov_base, bufs[i].iov_len);
disk_pool()->free_buffer((char*)bufs[i].iov_base);
}
}
else
{
file::iovec_t buf;
disk_buffer_holder holder(*disk_pool(), disk_pool()->allocate_buffer("hash temp"));
buf.iov_base = holder.get();
for (int i = 0; i < num_blocks; ++i)
{
buf.iov_len = (std::min)(block_size, size);
readv(&buf, slot, ph.offset, 1);
ph.h.update((char const*)buf.iov_base, buf.iov_len);
ph.offset += buf.iov_len;
size -= buf.iov_len;
}
}
if (error()) return sha1_hash(0);
}