added option to use a single block buffer when hashing, instead of for the entire piece
This commit is contained in:
parent
bb9815713b
commit
f2ec6b5adb
|
@ -3375,6 +3375,8 @@ that will be sent to the tracker. The user-agent is a good way to identify your
|
||||||
|
|
||||||
int recv_socket_buffer_size;
|
int recv_socket_buffer_size;
|
||||||
int send_socket_buffer_size;
|
int send_socket_buffer_size;
|
||||||
|
|
||||||
|
bool optimize_hashing_for_speed;
|
||||||
};
|
};
|
||||||
|
|
||||||
``user_agent`` this is the client identification to the tracker.
|
``user_agent`` this is the client identification to the tracker.
|
||||||
|
@ -3729,6 +3731,17 @@ the OS default (i.e. don't change the buffer sizes). The socket buffer
|
||||||
sizes are changed using setsockopt() with SOL_SOCKET/SO_RCVBUF and
|
sizes are changed using setsockopt() with SOL_SOCKET/SO_RCVBUF and
|
||||||
SO_SNDBUFFER.
|
SO_SNDBUFFER.
|
||||||
|
|
||||||
|
``optimize_hashing_for_speed`` chooses between two ways of reading back
|
||||||
|
piece data from disk when its complete and needs to be verified against
|
||||||
|
the piece hash. This happens if some blocks were flushed to the disk
|
||||||
|
out of order. Everything that is flushed in order is hashed as it goes
|
||||||
|
along. Optimizing for speed will allocate space to fit all the the
|
||||||
|
remaingin, unhashed, part of the piece, reads the data into it in a single
|
||||||
|
call and hashes it. This is the default. If ``optimizing_hashing_for_speed``
|
||||||
|
is false, a single block will be allocated (16 kB), and the unhashed parts
|
||||||
|
of the piece are read, one at a time, and hashed in this single block. This
|
||||||
|
is appropriate on systems that are memory constrained.
|
||||||
|
|
||||||
|
|
||||||
pe_settings
|
pe_settings
|
||||||
===========
|
===========
|
||||||
|
|
|
@ -714,6 +714,7 @@ int main(int argc, char* argv[])
|
||||||
settings.user_agent = "client_test/" LIBTORRENT_VERSION;
|
settings.user_agent = "client_test/" LIBTORRENT_VERSION;
|
||||||
settings.auto_upload_slots_rate_based = true;
|
settings.auto_upload_slots_rate_based = true;
|
||||||
settings.announce_to_all_trackers = true;
|
settings.announce_to_all_trackers = true;
|
||||||
|
settings.optimize_hashing_for_speed = false;
|
||||||
|
|
||||||
std::deque<std::string> events;
|
std::deque<std::string> events;
|
||||||
|
|
||||||
|
|
|
@ -163,6 +163,7 @@ namespace libtorrent
|
||||||
, max_rejects(50)
|
, max_rejects(50)
|
||||||
, recv_socket_buffer_size(0)
|
, recv_socket_buffer_size(0)
|
||||||
, send_socket_buffer_size(0)
|
, send_socket_buffer_size(0)
|
||||||
|
, optimize_hashing_for_speed(true)
|
||||||
{}
|
{}
|
||||||
|
|
||||||
// this is the user agent that will be sent to the tracker
|
// this is the user agent that will be sent to the tracker
|
||||||
|
@ -541,6 +542,11 @@ namespace libtorrent
|
||||||
// 0 means OS default
|
// 0 means OS default
|
||||||
int recv_socket_buffer_size;
|
int recv_socket_buffer_size;
|
||||||
int send_socket_buffer_size;
|
int send_socket_buffer_size;
|
||||||
|
|
||||||
|
// if this is set to false, the hashing will be
|
||||||
|
// optimized for memory usage instead of the
|
||||||
|
// number of read operations
|
||||||
|
bool optimize_hashing_for_speed;
|
||||||
};
|
};
|
||||||
|
|
||||||
#ifndef TORRENT_DISABLE_DHT
|
#ifndef TORRENT_DISABLE_DHT
|
||||||
|
|
|
@ -488,6 +488,14 @@ namespace libtorrent
|
||||||
if (disk_pool()) block_size = disk_pool()->block_size();
|
if (disk_pool()) block_size = disk_pool()->block_size();
|
||||||
int size = slot_size;
|
int size = slot_size;
|
||||||
int num_blocks = (size + block_size - 1) / block_size;
|
int num_blocks = (size + block_size - 1) / block_size;
|
||||||
|
|
||||||
|
// when we optimize for speed we allocate all the buffers we
|
||||||
|
// need for the rest of the piece, and read it all in one call
|
||||||
|
// and then hash it. When optimizing for memory usage, we read
|
||||||
|
// one block at a time and hash it. This ends up only using a
|
||||||
|
// single buffer
|
||||||
|
if (settings().optimize_hashing_for_speed)
|
||||||
|
{
|
||||||
file::iovec_t* bufs = TORRENT_ALLOCA(file::iovec_t, num_blocks);
|
file::iovec_t* bufs = TORRENT_ALLOCA(file::iovec_t, num_blocks);
|
||||||
for (int i = 0; i < num_blocks; ++i)
|
for (int i = 0; i < num_blocks; ++i)
|
||||||
{
|
{
|
||||||
|
@ -502,6 +510,21 @@ namespace libtorrent
|
||||||
ph.h.update((char const*)bufs[i].iov_base, bufs[i].iov_len);
|
ph.h.update((char const*)bufs[i].iov_base, bufs[i].iov_len);
|
||||||
disk_pool()->free_buffer((char*)bufs[i].iov_base);
|
disk_pool()->free_buffer((char*)bufs[i].iov_base);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
file::iovec_t buf;
|
||||||
|
disk_buffer_holder holder(*disk_pool(), disk_pool()->allocate_buffer("hash temp"));
|
||||||
|
buf.iov_base = holder.get();
|
||||||
|
for (int i = 0; i < num_blocks; ++i)
|
||||||
|
{
|
||||||
|
buf.iov_len = (std::min)(block_size, size);
|
||||||
|
readv(&buf, slot, ph.offset, 1);
|
||||||
|
ph.h.update((char const*)buf.iov_base, buf.iov_len);
|
||||||
|
ph.offset += buf.iov_len;
|
||||||
|
size -= buf.iov_len;
|
||||||
|
}
|
||||||
|
}
|
||||||
if (error()) return sha1_hash(0);
|
if (error()) return sha1_hash(0);
|
||||||
}
|
}
|
||||||
return ph.h.final();
|
return ph.h.final();
|
||||||
|
|
Loading…
Reference in New Issue