forked from premiere/premiere-libtorrent
removed unnecessary parentheses around min/max, added some const modifier (#2926)
This commit is contained in:
parent
a7406de2b8
commit
9b1e79660f
|
@ -1665,7 +1665,7 @@ COLUMN OPTIONS
|
||||||
, "%3d [%3d, %d] %s%s\x1b[K\n"
|
, "%3d [%3d, %d] %s%s\x1b[K\n"
|
||||||
, bucket, n.num_nodes, n.num_replacements
|
, bucket, n.num_nodes, n.num_replacements
|
||||||
, progress_bar + (128 - n.num_nodes)
|
, progress_bar + (128 - n.num_nodes)
|
||||||
, short_progress_bar + (8 - (std::min)(8, n.num_replacements)));
|
, short_progress_bar + (8 - std::min(8, n.num_replacements)));
|
||||||
out += str;
|
out += str;
|
||||||
pos += 1;
|
pos += 1;
|
||||||
}
|
}
|
||||||
|
|
|
@ -629,7 +629,7 @@ struct peer_conn
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
block = (std::min)(start / 0x4000, block);
|
block = std::min(start / 0x4000, block);
|
||||||
if (block == 0)
|
if (block == 0)
|
||||||
{
|
{
|
||||||
pieces.push_back(current_piece);
|
pieces.push_back(current_piece);
|
||||||
|
@ -802,7 +802,7 @@ void generate_torrent(std::vector<char>& buf, int num_pieces, int num_files
|
||||||
char b[100];
|
char b[100];
|
||||||
std::snprintf(b, sizeof(b), "%s/stress_test%d", torrent_name, i);
|
std::snprintf(b, sizeof(b), "%s/stress_test%d", torrent_name, i);
|
||||||
++i;
|
++i;
|
||||||
fs.add_file(b, (std::min)(s, std::int64_t(file_size)));
|
fs.add_file(b, std::min(s, file_size));
|
||||||
s -= file_size;
|
s -= file_size;
|
||||||
file_size += 200;
|
file_size += 200;
|
||||||
}
|
}
|
||||||
|
|
|
@ -122,7 +122,7 @@ int main(int argc, char* argv[]) try
|
||||||
for (lt::file_index_t i(0); i < st.end_file(); ++i)
|
for (lt::file_index_t i(0); i < st.end_file(); ++i)
|
||||||
{
|
{
|
||||||
auto const first = st.map_file(i, 0, 0).piece;
|
auto const first = st.map_file(i, 0, 0).piece;
|
||||||
auto const last = st.map_file(i, (std::max)(std::int64_t(st.file_size(i))-1, std::int64_t(0)), 0).piece;
|
auto const last = st.map_file(i, std::max(std::int64_t(st.file_size(i)) - 1, std::int64_t(0)), 0).piece;
|
||||||
auto const flags = st.file_flags(i);
|
auto const flags = st.file_flags(i);
|
||||||
std::stringstream file_hash;
|
std::stringstream file_hash;
|
||||||
if (!st.hash(i).is_all_zeros())
|
if (!st.hash(i).is_all_zeros())
|
||||||
|
|
|
@ -544,4 +544,3 @@ one_more:
|
||||||
fputs(buf, stdout);
|
fputs(buf, stdout);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -115,9 +115,9 @@ void session_view::render()
|
||||||
, color(to_string(int(m_cnt[0][m_queued_reads_idx]), 3), col_red).c_str()
|
, color(to_string(int(m_cnt[0][m_queued_reads_idx]), 3), col_red).c_str()
|
||||||
, color(to_string(int(m_cnt[0][m_queued_writes_idx]), 3), col_green).c_str()
|
, color(to_string(int(m_cnt[0][m_queued_writes_idx]), 3), col_green).c_str()
|
||||||
, int((m_cnt[0][m_blocks_written_idx] - m_cnt[0][m_write_ops_idx]) * 100
|
, int((m_cnt[0][m_blocks_written_idx] - m_cnt[0][m_write_ops_idx]) * 100
|
||||||
/ (std::max)(std::int64_t(1), m_cnt[0][m_blocks_written_idx]))
|
/ std::max(std::int64_t(1), m_cnt[0][m_blocks_written_idx]))
|
||||||
, int(m_cnt[0][m_cache_hit_idx] * 100
|
, int(m_cnt[0][m_cache_hit_idx] * 100
|
||||||
/ (std::max)(std::int64_t(1), m_cnt[0][m_num_blocks_read_idx]))
|
/ std::max(std::int64_t(1), m_cnt[0][m_num_blocks_read_idx]))
|
||||||
, add_suffix(m_cnt[0][m_writes_cache_idx] * 16 * 1024).c_str()
|
, add_suffix(m_cnt[0][m_writes_cache_idx] * 16 * 1024).c_str()
|
||||||
, add_suffix(m_cnt[0][m_reads_cache_idx] * 16 * 1024).c_str()
|
, add_suffix(m_cnt[0][m_reads_cache_idx] * 16 * 1024).c_str()
|
||||||
, add_suffix(m_cnt[0][m_blocks_in_use_idx] * 16 * 1024).c_str()
|
, add_suffix(m_cnt[0][m_blocks_in_use_idx] * 16 * 1024).c_str()
|
||||||
|
|
|
@ -296,7 +296,7 @@ void print_routing_table(std::vector<lt::dht_routing_bucket> const& rt)
|
||||||
std::printf("%3d [%3d, %d] %s%s\n"
|
std::printf("%3d [%3d, %d] %s%s\n"
|
||||||
, bucket, i->num_nodes, i->num_replacements
|
, bucket, i->num_nodes, i->num_replacements
|
||||||
, progress_bar + (128 - i->num_nodes)
|
, progress_bar + (128 - i->num_nodes)
|
||||||
, short_progress_bar + (8 - (std::min)(8, i->num_replacements)));
|
, short_progress_bar + (8 - std::min(8, i->num_replacements)));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1239,8 +1239,8 @@ void block_cache::move_to_ghost(cached_piece_entry* pe)
|
||||||
ghost_list->push_back(pe);
|
ghost_list->push_back(pe);
|
||||||
}
|
}
|
||||||
|
|
||||||
int block_cache::pad_job(disk_io_job const* j, int blocks_in_piece
|
int block_cache::pad_job(disk_io_job const* j, int const blocks_in_piece
|
||||||
, int read_ahead) const
|
, int const read_ahead) const
|
||||||
{
|
{
|
||||||
int block_offset = j->d.io.offset & (default_block_size - 1);
|
int block_offset = j->d.io.offset & (default_block_size - 1);
|
||||||
int start = j->d.io.offset / default_block_size;
|
int start = j->d.io.offset / default_block_size;
|
||||||
|
@ -1249,7 +1249,7 @@ int block_cache::pad_job(disk_io_job const* j, int blocks_in_piece
|
||||||
// take the read-ahead into account
|
// take the read-ahead into account
|
||||||
// make sure to not overflow in this case
|
// make sure to not overflow in this case
|
||||||
if (read_ahead == INT_MAX) end = blocks_in_piece;
|
if (read_ahead == INT_MAX) end = blocks_in_piece;
|
||||||
else end = (std::min)(blocks_in_piece, (std::max)(start + read_ahead, end));
|
else end = std::min(blocks_in_piece, std::max(start + read_ahead, end));
|
||||||
|
|
||||||
return end - start;
|
return end - start;
|
||||||
}
|
}
|
||||||
|
@ -1493,8 +1493,8 @@ void block_cache::set_settings(aux::session_settings const& sett)
|
||||||
// assumption is that there are about 128 blocks per piece,
|
// assumption is that there are about 128 blocks per piece,
|
||||||
// and there are two ghost lists, so divide by 2.
|
// and there are two ghost lists, so divide by 2.
|
||||||
|
|
||||||
m_ghost_size = (std::max)(8, sett.get_int(settings_pack::cache_size)
|
m_ghost_size = std::max(8, sett.get_int(settings_pack::cache_size)
|
||||||
/ (std::max)(sett.get_int(settings_pack::read_cache_line_size), 4) / 2);
|
/ std::max(sett.get_int(settings_pack::read_cache_line_size), 4) / 2);
|
||||||
|
|
||||||
m_max_volatile_blocks = sett.get_int(settings_pack::cache_size_volatile);
|
m_max_volatile_blocks = sett.get_int(settings_pack::cache_size_volatile);
|
||||||
disk_buffer_pool::set_settings(sett);
|
disk_buffer_pool::set_settings(sett);
|
||||||
|
|
|
@ -242,8 +242,8 @@ namespace libtorrent {
|
||||||
d1 *= lhs->get_priority(peer_connection::upload_channel);
|
d1 *= lhs->get_priority(peer_connection::upload_channel);
|
||||||
d2 *= rhs->get_priority(peer_connection::upload_channel);
|
d2 *= rhs->get_priority(peer_connection::upload_channel);
|
||||||
|
|
||||||
d1 = d1 * 1000 / (std::max)(std::int64_t(1), u1);
|
d1 = d1 * 1000 / std::max(std::int64_t(1), u1);
|
||||||
d2 = d2 * 1000 / (std::max)(std::int64_t(1), u2);
|
d2 = d2 * 1000 / std::max(std::int64_t(1), u2);
|
||||||
if (d1 > d2) return true;
|
if (d1 > d2) return true;
|
||||||
if (d1 < d2) return false;
|
if (d1 < d2) return false;
|
||||||
|
|
||||||
|
@ -255,8 +255,8 @@ namespace libtorrent {
|
||||||
} // anonymous namespace
|
} // anonymous namespace
|
||||||
|
|
||||||
int unchoke_sort(std::vector<peer_connection*>& peers
|
int unchoke_sort(std::vector<peer_connection*>& peers
|
||||||
, int max_upload_rate
|
, int const max_upload_rate
|
||||||
, time_duration unchoke_interval
|
, time_duration const unchoke_interval
|
||||||
, aux::session_settings const& sett)
|
, aux::session_settings const& sett)
|
||||||
{
|
{
|
||||||
#if TORRENT_USE_ASSERTS
|
#if TORRENT_USE_ASSERTS
|
||||||
|
@ -269,7 +269,7 @@ namespace libtorrent {
|
||||||
|
|
||||||
int upload_slots = sett.get_int(settings_pack::unchoke_slots_limit);
|
int upload_slots = sett.get_int(settings_pack::unchoke_slots_limit);
|
||||||
if (upload_slots < 0)
|
if (upload_slots < 0)
|
||||||
upload_slots = (std::numeric_limits<int>::max)();
|
upload_slots = std::numeric_limits<int>::max();
|
||||||
|
|
||||||
// ==== BitTyrant ====
|
// ==== BitTyrant ====
|
||||||
//
|
//
|
||||||
|
@ -382,28 +382,28 @@ namespace libtorrent {
|
||||||
int const pieces = sett.get_int(settings_pack::seeding_piece_quota);
|
int const pieces = sett.get_int(settings_pack::seeding_piece_quota);
|
||||||
|
|
||||||
std::partial_sort(peers.begin(), peers.begin()
|
std::partial_sort(peers.begin(), peers.begin()
|
||||||
+ (std::min)(upload_slots, int(peers.size())), peers.end()
|
+ std::min(upload_slots, int(peers.size())), peers.end()
|
||||||
, std::bind(&unchoke_compare_rr, _1, _2, pieces));
|
, std::bind(&unchoke_compare_rr, _1, _2, pieces));
|
||||||
}
|
}
|
||||||
else if (sett.get_int(settings_pack::seed_choking_algorithm)
|
else if (sett.get_int(settings_pack::seed_choking_algorithm)
|
||||||
== settings_pack::fastest_upload)
|
== settings_pack::fastest_upload)
|
||||||
{
|
{
|
||||||
std::partial_sort(peers.begin(), peers.begin()
|
std::partial_sort(peers.begin(), peers.begin()
|
||||||
+ (std::min)(upload_slots, int(peers.size())), peers.end()
|
+ std::min(upload_slots, int(peers.size())), peers.end()
|
||||||
, std::bind(&unchoke_compare_fastest_upload, _1, _2));
|
, std::bind(&unchoke_compare_fastest_upload, _1, _2));
|
||||||
}
|
}
|
||||||
else if (sett.get_int(settings_pack::seed_choking_algorithm)
|
else if (sett.get_int(settings_pack::seed_choking_algorithm)
|
||||||
== settings_pack::anti_leech)
|
== settings_pack::anti_leech)
|
||||||
{
|
{
|
||||||
std::partial_sort(peers.begin(), peers.begin()
|
std::partial_sort(peers.begin(), peers.begin()
|
||||||
+ (std::min)(upload_slots, int(peers.size())), peers.end()
|
+ std::min(upload_slots, int(peers.size())), peers.end()
|
||||||
, std::bind(&unchoke_compare_anti_leech, _1, _2));
|
, std::bind(&unchoke_compare_anti_leech, _1, _2));
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
int const pieces = sett.get_int(settings_pack::seeding_piece_quota);
|
int const pieces = sett.get_int(settings_pack::seeding_piece_quota);
|
||||||
std::partial_sort(peers.begin(), peers.begin()
|
std::partial_sort(peers.begin(), peers.begin()
|
||||||
+ (std::min)(upload_slots, int(peers.size())), peers.end()
|
+ std::min(upload_slots, int(peers.size())), peers.end()
|
||||||
, std::bind(&unchoke_compare_rr, _1, _2, pieces));
|
, std::bind(&unchoke_compare_rr, _1, _2, pieces));
|
||||||
|
|
||||||
TORRENT_ASSERT_FAIL();
|
TORRENT_ASSERT_FAIL();
|
||||||
|
|
|
@ -72,7 +72,7 @@ namespace libtorrent {
|
||||||
, std::function<void()> const& trigger_trim)
|
, std::function<void()> const& trigger_trim)
|
||||||
: m_in_use(0)
|
: m_in_use(0)
|
||||||
, m_max_use(64)
|
, m_max_use(64)
|
||||||
, m_low_watermark((std::max)(m_max_use - 32, 0))
|
, m_low_watermark(std::max(m_max_use - 32, 0))
|
||||||
, m_trigger_cache_trim(trigger_trim)
|
, m_trigger_cache_trim(trigger_trim)
|
||||||
, m_exceeded_max_size(false)
|
, m_exceeded_max_size(false)
|
||||||
, m_ios(ios)
|
, m_ios(ios)
|
||||||
|
|
|
@ -136,7 +136,7 @@ namespace libtorrent {
|
||||||
return m_threads.front().get_id();
|
return m_threads.front().get_id();
|
||||||
}
|
}
|
||||||
|
|
||||||
void disk_io_thread_pool::job_queued(int queue_size)
|
void disk_io_thread_pool::job_queued(int const queue_size)
|
||||||
{
|
{
|
||||||
// this check is not strictly necessary
|
// this check is not strictly necessary
|
||||||
// but do it to avoid acquiring the mutex in the trivial case
|
// but do it to avoid acquiring the mutex in the trivial case
|
||||||
|
@ -147,9 +147,9 @@ namespace libtorrent {
|
||||||
// reduce the number of threads requested to stop if we're going to need
|
// reduce the number of threads requested to stop if we're going to need
|
||||||
// them for these new jobs
|
// them for these new jobs
|
||||||
int to_exit = m_threads_to_exit;
|
int to_exit = m_threads_to_exit;
|
||||||
while (to_exit > (std::max)(0, m_num_idle_threads - queue_size) &&
|
while (to_exit > std::max(0, m_num_idle_threads - queue_size) &&
|
||||||
!m_threads_to_exit.compare_exchange_weak(to_exit
|
!m_threads_to_exit.compare_exchange_weak(to_exit
|
||||||
, (std::max)(0, m_num_idle_threads - queue_size)));
|
, std::max(0, m_num_idle_threads - queue_size)));
|
||||||
|
|
||||||
// now start threads until we either have enough to service
|
// now start threads until we either have enough to service
|
||||||
// all queued jobs without blocking or hit the max
|
// all queued jobs without blocking or hit the max
|
||||||
|
@ -192,7 +192,7 @@ namespace libtorrent {
|
||||||
if (min_idle <= 0) return;
|
if (min_idle <= 0) return;
|
||||||
// stop either the minimum number of idle threads or the number of threads
|
// stop either the minimum number of idle threads or the number of threads
|
||||||
// which must be stopped to get below the max, whichever is larger
|
// which must be stopped to get below the max, whichever is larger
|
||||||
int const to_stop = (std::max)(min_idle, int(m_threads.size()) - m_max_threads);
|
int const to_stop = std::max(min_idle, int(m_threads.size()) - m_max_threads);
|
||||||
stop_threads(to_stop);
|
stop_threads(to_stop);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -166,12 +166,12 @@ namespace {
|
||||||
// sure we wait for all of them, partially in sequence
|
// sure we wait for all of them, partially in sequence
|
||||||
DWORD wait_for_multiple_objects(int num_handles, HANDLE* h)
|
DWORD wait_for_multiple_objects(int num_handles, HANDLE* h)
|
||||||
{
|
{
|
||||||
int batch_size = (std::min)(num_handles, MAXIMUM_WAIT_OBJECTS);
|
int batch_size = std::min(num_handles, MAXIMUM_WAIT_OBJECTS);
|
||||||
while (WaitForMultipleObjects(batch_size, h, TRUE, INFINITE) != WAIT_FAILED)
|
while (WaitForMultipleObjects(batch_size, h, TRUE, INFINITE) != WAIT_FAILED)
|
||||||
{
|
{
|
||||||
h += batch_size;
|
h += batch_size;
|
||||||
num_handles -= batch_size;
|
num_handles -= batch_size;
|
||||||
batch_size = (std::min)(num_handles, MAXIMUM_WAIT_OBJECTS);
|
batch_size = std::min(num_handles, MAXIMUM_WAIT_OBJECTS);
|
||||||
if (batch_size <= 0) return WAIT_OBJECT_0;
|
if (batch_size <= 0) return WAIT_OBJECT_0;
|
||||||
}
|
}
|
||||||
return WAIT_FAILED;
|
return WAIT_FAILED;
|
||||||
|
|
|
@ -266,7 +266,7 @@ restart_response:
|
||||||
// we're done once we reach the end of the headers
|
// we're done once we reach the end of the headers
|
||||||
// if (!m_method.empty()) m_finished = true;
|
// if (!m_method.empty()) m_finished = true;
|
||||||
// the HTTP header should always be < 2 GB
|
// the HTTP header should always be < 2 GB
|
||||||
TORRENT_ASSERT(m_recv_pos < (std::numeric_limits<int>::max)());
|
TORRENT_ASSERT(m_recv_pos < std::numeric_limits<int>::max());
|
||||||
m_body_start_pos = int(m_recv_pos);
|
m_body_start_pos = int(m_recv_pos);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -363,7 +363,7 @@ restart_response:
|
||||||
std::int64_t payload = m_cur_chunk_end - m_recv_pos;
|
std::int64_t payload = m_cur_chunk_end - m_recv_pos;
|
||||||
if (payload > 0)
|
if (payload > 0)
|
||||||
{
|
{
|
||||||
TORRENT_ASSERT(payload < (std::numeric_limits<int>::max)());
|
TORRENT_ASSERT(payload < std::numeric_limits<int>::max());
|
||||||
m_recv_pos += payload;
|
m_recv_pos += payload;
|
||||||
std::get<0>(ret) += int(payload);
|
std::get<0>(ret) += int(payload);
|
||||||
incoming -= int(payload);
|
incoming -= int(payload);
|
||||||
|
@ -429,7 +429,7 @@ restart_response:
|
||||||
&& m_content_length >= 0)
|
&& m_content_length >= 0)
|
||||||
{
|
{
|
||||||
TORRENT_ASSERT(m_content_length - m_recv_pos + m_body_start_pos
|
TORRENT_ASSERT(m_content_length - m_recv_pos + m_body_start_pos
|
||||||
< (std::numeric_limits<int>::max)());
|
< std::numeric_limits<int>::max());
|
||||||
incoming = int(m_content_length - m_recv_pos + m_body_start_pos);
|
incoming = int(m_content_length - m_recv_pos + m_body_start_pos);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -64,7 +64,7 @@ int distance_exp(node_id const& n1, node_id const& n2)
|
||||||
// TODO: it's a little bit weird to return 159 - leading zeroes. It should
|
// TODO: it's a little bit weird to return 159 - leading zeroes. It should
|
||||||
// probably be 160 - leading zeroes, but all other code in here is tuned to
|
// probably be 160 - leading zeroes, but all other code in here is tuned to
|
||||||
// this expectation now, and it doesn't really matter (other than complexity)
|
// this expectation now, and it doesn't really matter (other than complexity)
|
||||||
return (std::max)(159 - distance(n1, n2).count_leading_zeroes(), 0);
|
return std::max(159 - distance(n1, n2).count_leading_zeroes(), 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
int min_distance_exp(node_id const& n1, std::vector<node_id> const& ids)
|
int min_distance_exp(node_id const& n1, std::vector<node_id> const& ids)
|
||||||
|
|
|
@ -285,7 +285,7 @@ routing_table::table_t::iterator routing_table::find_bucket(node_id const& id)
|
||||||
++num_buckets;
|
++num_buckets;
|
||||||
}
|
}
|
||||||
|
|
||||||
int bucket_index = (std::min)(159 - distance_exp(m_id, id), num_buckets - 1);
|
int bucket_index = std::min(159 - distance_exp(m_id, id), num_buckets - 1);
|
||||||
TORRENT_ASSERT(bucket_index < int(m_buckets.size()));
|
TORRENT_ASSERT(bucket_index < int(m_buckets.size()));
|
||||||
TORRENT_ASSERT(bucket_index >= 0);
|
TORRENT_ASSERT(bucket_index >= 0);
|
||||||
|
|
||||||
|
|
|
@ -447,7 +447,7 @@ time_duration rpc_manager::tick()
|
||||||
std::for_each(timeouts.begin(), timeouts.end(), std::bind(&observer::timeout, _1));
|
std::for_each(timeouts.begin(), timeouts.end(), std::bind(&observer::timeout, _1));
|
||||||
std::for_each(short_timeouts.begin(), short_timeouts.end(), std::bind(&observer::short_timeout, _1));
|
std::for_each(short_timeouts.begin(), short_timeouts.end(), std::bind(&observer::short_timeout, _1));
|
||||||
|
|
||||||
return (std::max)(ret, duration_cast<time_duration>(milliseconds(200)));
|
return std::max(ret, duration_cast<time_duration>(milliseconds(200)));
|
||||||
}
|
}
|
||||||
|
|
||||||
void rpc_manager::add_our_id(entry& e)
|
void rpc_manager::add_our_id(entry& e)
|
||||||
|
|
|
@ -524,7 +524,7 @@ bool traversal_algorithm::add_requests()
|
||||||
o->flags |= observer::flag_queried;
|
o->flags |= observer::flag_queried;
|
||||||
if (invoke(*i))
|
if (invoke(*i))
|
||||||
{
|
{
|
||||||
TORRENT_ASSERT(m_invoke_count < (std::numeric_limits<std::int8_t>::max)());
|
TORRENT_ASSERT(m_invoke_count < std::numeric_limits<std::int8_t>::max());
|
||||||
++m_invoke_count;
|
++m_invoke_count;
|
||||||
++outstanding;
|
++outstanding;
|
||||||
}
|
}
|
||||||
|
@ -580,13 +580,13 @@ void traversal_algorithm::status(dht_lookup& l)
|
||||||
l.target = m_target;
|
l.target = m_target;
|
||||||
|
|
||||||
int last_sent = INT_MAX;
|
int last_sent = INT_MAX;
|
||||||
time_point now = aux::time_now();
|
time_point const now = aux::time_now();
|
||||||
for (auto const& r : m_results)
|
for (auto const& r : m_results)
|
||||||
{
|
{
|
||||||
observer const& o = *r;
|
observer const& o = *r;
|
||||||
if (o.flags & observer::flag_queried)
|
if (o.flags & observer::flag_queried)
|
||||||
{
|
{
|
||||||
last_sent = (std::min)(last_sent, int(total_seconds(now - o.sent())));
|
last_sent = std::min(last_sent, int(total_seconds(now - o.sent())));
|
||||||
if (o.has_short_timeout()) ++l.first_timeout;
|
if (o.has_short_timeout()) ++l.first_timeout;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
|
@ -527,7 +527,7 @@ namespace libtorrent {
|
||||||
{
|
{
|
||||||
char const* slash = std::strrchr(f.c_str(), '/');
|
char const* slash = std::strrchr(f.c_str(), '/');
|
||||||
#ifdef TORRENT_WINDOWS
|
#ifdef TORRENT_WINDOWS
|
||||||
slash = (std::max)((char const*)std::strrchr(f.c_str(), '\\'), slash);
|
slash = std::max((char const*)std::strrchr(f.c_str(), '\\'), slash);
|
||||||
#endif
|
#endif
|
||||||
char const* ext = std::strrchr(f.c_str(), '.');
|
char const* ext = std::strrchr(f.c_str(), '.');
|
||||||
// if we don't have an extension, just return f
|
// if we don't have an extension, just return f
|
||||||
|
|
|
@ -69,8 +69,8 @@ namespace libtorrent {
|
||||||
label = pci->label;
|
label = pci->label;
|
||||||
set_upload_limit(pci->upload_limit);
|
set_upload_limit(pci->upload_limit);
|
||||||
set_download_limit(pci->download_limit);
|
set_download_limit(pci->download_limit);
|
||||||
priority[peer_connection::upload_channel] = (std::max)(1, (std::min)(255, pci->upload_priority));
|
priority[peer_connection::upload_channel] = std::max(1, std::min(255, pci->upload_priority));
|
||||||
priority[peer_connection::download_channel] = (std::max)(1, (std::min)(255, pci->download_priority));
|
priority[peer_connection::download_channel] = std::max(1, std::min(255, pci->download_priority));
|
||||||
}
|
}
|
||||||
|
|
||||||
peer_class_t peer_class_pool::new_peer_class(std::string label)
|
peer_class_t peer_class_pool::new_peer_class(std::string label)
|
||||||
|
|
|
@ -3654,7 +3654,7 @@ namespace libtorrent {
|
||||||
|
|
||||||
int const block_offset = block.block_index * t->block_size();
|
int const block_offset = block.block_index * t->block_size();
|
||||||
int const block_size
|
int const block_size
|
||||||
= (std::min)(t->torrent_file().piece_size(block.piece_index)-block_offset,
|
= std::min(t->torrent_file().piece_size(block.piece_index) - block_offset,
|
||||||
t->block_size());
|
t->block_size());
|
||||||
TORRENT_ASSERT(block_size > 0);
|
TORRENT_ASSERT(block_size > 0);
|
||||||
TORRENT_ASSERT(block_size <= t->block_size());
|
TORRENT_ASSERT(block_size <= t->block_size());
|
||||||
|
|
|
@ -221,7 +221,7 @@ namespace libtorrent {
|
||||||
TORRENT_ASSERT(downloading_iter == m_downloads[download_state].end()
|
TORRENT_ASSERT(downloading_iter == m_downloads[download_state].end()
|
||||||
|| downloading_iter->index != piece);
|
|| downloading_iter->index != piece);
|
||||||
TORRENT_ASSERT(block_index >= 0);
|
TORRENT_ASSERT(block_index >= 0);
|
||||||
TORRENT_ASSERT(block_index < (std::numeric_limits<std::uint16_t>::max)());
|
TORRENT_ASSERT(block_index < std::numeric_limits<std::uint16_t>::max());
|
||||||
ret.info_idx = std::uint16_t(block_index);
|
ret.info_idx = std::uint16_t(block_index);
|
||||||
TORRENT_ASSERT(int(ret.info_idx) * m_blocks_per_piece
|
TORRENT_ASSERT(int(ret.info_idx) * m_blocks_per_piece
|
||||||
+ m_blocks_per_piece <= int(m_block_info.size()));
|
+ m_blocks_per_piece <= int(m_block_info.size()));
|
||||||
|
@ -1809,7 +1809,7 @@ namespace {
|
||||||
, int const num_blocks)
|
, int const num_blocks)
|
||||||
{
|
{
|
||||||
if (src.empty()) return num_blocks;
|
if (src.empty()) return num_blocks;
|
||||||
int const to_copy = (std::min)(int(src.size()), num_blocks);
|
int const to_copy = std::min(int(src.size()), num_blocks);
|
||||||
|
|
||||||
dst.insert(dst.end(), src.begin(), src.begin() + to_copy);
|
dst.insert(dst.end(), src.begin(), src.begin() + to_copy);
|
||||||
src.erase(src.begin(), src.begin() + to_copy);
|
src.erase(src.begin(), src.begin() + to_copy);
|
||||||
|
@ -2611,7 +2611,7 @@ get_out:
|
||||||
#if TORRENT_USE_INVARIANT_CHECKS
|
#if TORRENT_USE_INVARIANT_CHECKS
|
||||||
verify_pick(interesting_blocks, pieces);
|
verify_pick(interesting_blocks, pieces);
|
||||||
#endif
|
#endif
|
||||||
return (std::max)(num_blocks, 0);
|
return std::max(num_blocks, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
int piece_picker::add_blocks_downloading(downloading_piece const& dp
|
int piece_picker::add_blocks_downloading(downloading_piece const& dp
|
||||||
|
|
|
@ -72,7 +72,7 @@ namespace libtorrent {
|
||||||
if (getrlimit(RLIMIT_NOFILE, &rl) == 0)
|
if (getrlimit(RLIMIT_NOFILE, &rl) == 0)
|
||||||
{
|
{
|
||||||
if (rl.rlim_cur == rlim_infinity)
|
if (rl.rlim_cur == rlim_infinity)
|
||||||
return (std::numeric_limits<int>::max)();
|
return std::numeric_limits<int>::max();
|
||||||
|
|
||||||
return rl.rlim_cur <= std::numeric_limits<int>::max()
|
return rl.rlim_cur <= std::numeric_limits<int>::max()
|
||||||
? int(rl.rlim_cur) : std::numeric_limits<int>::max();
|
? int(rl.rlim_cur) : std::numeric_limits<int>::max();
|
||||||
|
|
|
@ -3221,7 +3221,7 @@ namespace aux {
|
||||||
|
|
||||||
m_created += hours(4);
|
m_created += hours(4);
|
||||||
|
|
||||||
const int four_hours = 60 * 60 * 4;
|
constexpr int four_hours = 60 * 60 * 4;
|
||||||
for (auto& i : m_torrents)
|
for (auto& i : m_torrents)
|
||||||
{
|
{
|
||||||
i.second->step_session_time(four_hours);
|
i.second->step_session_time(four_hours);
|
||||||
|
@ -3345,8 +3345,8 @@ namespace aux {
|
||||||
// TODO: this should apply to all bandwidth channels
|
// TODO: this should apply to all bandwidth channels
|
||||||
if (m_settings.get_bool(settings_pack::rate_limit_ip_overhead))
|
if (m_settings.get_bool(settings_pack::rate_limit_ip_overhead))
|
||||||
{
|
{
|
||||||
int up_limit = upload_rate_limit(m_global_class);
|
int const up_limit = upload_rate_limit(m_global_class);
|
||||||
int down_limit = download_rate_limit(m_global_class);
|
int const down_limit = download_rate_limit(m_global_class);
|
||||||
|
|
||||||
if (down_limit > 0
|
if (down_limit > 0
|
||||||
&& m_stat.download_ip_overhead() >= down_limit
|
&& m_stat.download_ip_overhead() >= down_limit
|
||||||
|
@ -3365,8 +3365,8 @@ namespace aux {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
m_peak_up_rate = (std::max)(m_stat.upload_rate(), m_peak_up_rate);
|
m_peak_up_rate = std::max(m_stat.upload_rate(), m_peak_up_rate);
|
||||||
m_peak_down_rate = (std::max)(m_stat.download_rate(), m_peak_down_rate);
|
m_peak_down_rate = std::max(m_stat.download_rate(), m_peak_down_rate);
|
||||||
|
|
||||||
m_stat.second_tick(tick_interval_ms);
|
m_stat.second_tick(tick_interval_ms);
|
||||||
|
|
||||||
|
@ -3382,7 +3382,7 @@ namespace aux {
|
||||||
{
|
{
|
||||||
aux::vector<torrent*>& want_scrape = m_torrent_lists[torrent_want_scrape];
|
aux::vector<torrent*>& want_scrape = m_torrent_lists[torrent_want_scrape];
|
||||||
m_auto_scrape_time_scaler = m_settings.get_int(settings_pack::auto_scrape_interval)
|
m_auto_scrape_time_scaler = m_settings.get_int(settings_pack::auto_scrape_interval)
|
||||||
/ (std::max)(1, int(want_scrape.size()));
|
/ std::max(1, int(want_scrape.size()));
|
||||||
if (m_auto_scrape_time_scaler < m_settings.get_int(settings_pack::auto_scrape_min_interval))
|
if (m_auto_scrape_time_scaler < m_settings.get_int(settings_pack::auto_scrape_min_interval))
|
||||||
m_auto_scrape_time_scaler = m_settings.get_int(settings_pack::auto_scrape_min_interval);
|
m_auto_scrape_time_scaler = m_settings.get_int(settings_pack::auto_scrape_min_interval);
|
||||||
|
|
||||||
|
@ -3477,7 +3477,7 @@ namespace aux {
|
||||||
|| t->max_connections() < 6)
|
|| t->max_connections() < 6)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
int const peers_to_disconnect = (std::min)((std::max)(t->num_peers()
|
int const peers_to_disconnect = std::min(std::max(t->num_peers()
|
||||||
* m_settings.get_int(settings_pack::peer_turnover) / 100, 1)
|
* m_settings.get_int(settings_pack::peer_turnover) / 100, 1)
|
||||||
, t->num_connect_candidates());
|
, t->num_connect_candidates());
|
||||||
t->disconnect_peers(peers_to_disconnect, errors::optimistic_disconnect);
|
t->disconnect_peers(peers_to_disconnect, errors::optimistic_disconnect);
|
||||||
|
@ -3754,7 +3754,7 @@ namespace aux {
|
||||||
int session_impl::get_int_setting(int n) const
|
int session_impl::get_int_setting(int n) const
|
||||||
{
|
{
|
||||||
int const v = settings().get_int(n);
|
int const v = settings().get_int(n);
|
||||||
if (v < 0) return (std::numeric_limits<int>::max)();
|
if (v < 0) return std::numeric_limits<int>::max();
|
||||||
return v;
|
return v;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3795,17 +3795,17 @@ namespace aux {
|
||||||
// make sure the remaining torrents are paused, but their order is not
|
// make sure the remaining torrents are paused, but their order is not
|
||||||
// relevant
|
// relevant
|
||||||
std::partial_sort(checking.begin(), checking.begin() +
|
std::partial_sort(checking.begin(), checking.begin() +
|
||||||
(std::min)(checking_limit, int(checking.size())), checking.end()
|
std::min(checking_limit, int(checking.size())), checking.end()
|
||||||
, [](torrent const* lhs, torrent const* rhs)
|
, [](torrent const* lhs, torrent const* rhs)
|
||||||
{ return lhs->sequence_number() < rhs->sequence_number(); });
|
{ return lhs->sequence_number() < rhs->sequence_number(); });
|
||||||
|
|
||||||
std::partial_sort(downloaders.begin(), downloaders.begin() +
|
std::partial_sort(downloaders.begin(), downloaders.begin() +
|
||||||
(std::min)(hard_limit, int(downloaders.size())), downloaders.end()
|
std::min(hard_limit, int(downloaders.size())), downloaders.end()
|
||||||
, [](torrent const* lhs, torrent const* rhs)
|
, [](torrent const* lhs, torrent const* rhs)
|
||||||
{ return lhs->sequence_number() < rhs->sequence_number(); });
|
{ return lhs->sequence_number() < rhs->sequence_number(); });
|
||||||
|
|
||||||
std::partial_sort(seeds.begin(), seeds.begin() +
|
std::partial_sort(seeds.begin(), seeds.begin() +
|
||||||
(std::min)(hard_limit, int(seeds.size())), seeds.end()
|
std::min(hard_limit, int(seeds.size())), seeds.end()
|
||||||
, [this](torrent const* lhs, torrent const* rhs)
|
, [this](torrent const* lhs, torrent const* rhs)
|
||||||
{ return lhs->seed_rank(m_settings) > rhs->seed_rank(m_settings); });
|
{ return lhs->seed_rank(m_settings) > rhs->seed_rank(m_settings); });
|
||||||
}
|
}
|
||||||
|
@ -3867,7 +3867,7 @@ namespace aux {
|
||||||
for (auto& e : plugins)
|
for (auto& e : plugins)
|
||||||
{
|
{
|
||||||
uint64_t const priority = e->get_unchoke_priority(peer_connection_handle(*peer.peer));
|
uint64_t const priority = e->get_unchoke_priority(peer_connection_handle(*peer.peer));
|
||||||
peer.ext_priority = (std::min)(priority, peer.ext_priority);
|
peer.ext_priority = std::min(priority, peer.ext_priority);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return peer.ext_priority;
|
return peer.ext_priority;
|
||||||
|
@ -6911,7 +6911,7 @@ namespace aux {
|
||||||
|
|
||||||
if (m_settings.get_int(settings_pack::unchoke_slots_limit) < 0
|
if (m_settings.get_int(settings_pack::unchoke_slots_limit) < 0
|
||||||
&& m_settings.get_int(settings_pack::choking_algorithm) == settings_pack::fixed_slots_choker)
|
&& m_settings.get_int(settings_pack::choking_algorithm) == settings_pack::fixed_slots_choker)
|
||||||
TORRENT_ASSERT(m_stats_counters[counters::num_unchoke_slots] == (std::numeric_limits<int>::max)());
|
TORRENT_ASSERT(m_stats_counters[counters::num_unchoke_slots] == std::numeric_limits<int>::max());
|
||||||
|
|
||||||
for (torrent_list_index_t l{}; l != m_torrent_lists.end_index(); ++l)
|
for (torrent_list_index_t l{}; l != m_torrent_lists.end_index(); ++l)
|
||||||
{
|
{
|
||||||
|
|
|
@ -87,7 +87,7 @@ namespace {
|
||||||
if (i == m_block_hashes.end() || i->first.piece_index != p) return;
|
if (i == m_block_hashes.end() || i->first.piece_index != p) return;
|
||||||
|
|
||||||
int size = m_torrent.torrent_file().piece_size(p);
|
int size = m_torrent.torrent_file().piece_size(p);
|
||||||
peer_request r = {p, 0, (std::min)(16 * 1024, size)};
|
peer_request r = {p, 0, std::min(16 * 1024, size)};
|
||||||
piece_block pb(p, 0);
|
piece_block pb(p, 0);
|
||||||
while (size > 0)
|
while (size > 0)
|
||||||
{
|
{
|
||||||
|
@ -108,7 +108,7 @@ namespace {
|
||||||
|
|
||||||
r.start += 16 * 1024;
|
r.start += 16 * 1024;
|
||||||
size -= 16 * 1024;
|
size -= 16 * 1024;
|
||||||
r.length = (std::min)(16 * 1024, size);
|
r.length = std::min(16 * 1024, size);
|
||||||
++pb.block_index;
|
++pb.block_index;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -125,7 +125,7 @@ namespace {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void on_piece_failed(piece_index_t p) override
|
void on_piece_failed(piece_index_t const p) override
|
||||||
{
|
{
|
||||||
// The piece failed the hash check. Record
|
// The piece failed the hash check. Record
|
||||||
// the CRC and origin peer of every block
|
// the CRC and origin peer of every block
|
||||||
|
@ -138,7 +138,7 @@ namespace {
|
||||||
m_torrent.picker().get_downloaders(downloaders, p);
|
m_torrent.picker().get_downloaders(downloaders, p);
|
||||||
|
|
||||||
int size = m_torrent.torrent_file().piece_size(p);
|
int size = m_torrent.torrent_file().piece_size(p);
|
||||||
peer_request r = {p, 0, (std::min)(16*1024, size)};
|
peer_request r = {p, 0, std::min(16*1024, size)};
|
||||||
piece_block pb(p, 0);
|
piece_block pb(p, 0);
|
||||||
for (auto const& i : downloaders)
|
for (auto const& i : downloaders)
|
||||||
{
|
{
|
||||||
|
@ -156,7 +156,7 @@ namespace {
|
||||||
|
|
||||||
r.start += 16*1024;
|
r.start += 16*1024;
|
||||||
size -= 16*1024;
|
size -= 16*1024;
|
||||||
r.length = (std::min)(16*1024, size);
|
r.length = std::min(16*1024, size);
|
||||||
++pb.block_index;
|
++pb.block_index;
|
||||||
}
|
}
|
||||||
TORRENT_ASSERT(size <= 0);
|
TORRENT_ASSERT(size <= 0);
|
||||||
|
|
|
@ -259,7 +259,7 @@ namespace libtorrent {namespace {
|
||||||
|
|
||||||
int offset = piece * 16 * 1024;
|
int offset = piece * 16 * 1024;
|
||||||
metadata = m_tp.metadata().data() + offset;
|
metadata = m_tp.metadata().data() + offset;
|
||||||
metadata_piece_size = (std::min)(
|
metadata_piece_size = std::min(
|
||||||
m_tp.get_metadata_size() - offset, 16 * 1024);
|
m_tp.get_metadata_size() - offset, 16 * 1024);
|
||||||
TORRENT_ASSERT(metadata_piece_size > 0);
|
TORRENT_ASSERT(metadata_piece_size > 0);
|
||||||
TORRENT_ASSERT(offset >= 0);
|
TORRENT_ASSERT(offset >= 0);
|
||||||
|
@ -394,7 +394,7 @@ namespace libtorrent {namespace {
|
||||||
break;
|
break;
|
||||||
case metadata_dont_have:
|
case metadata_dont_have:
|
||||||
{
|
{
|
||||||
m_request_limit = (std::max)(aux::time_now() + minutes(1), m_request_limit);
|
m_request_limit = std::max(aux::time_now() + minutes(1), m_request_limit);
|
||||||
auto const i = std::find(m_sent_requests.begin()
|
auto const i = std::find(m_sent_requests.begin()
|
||||||
, m_sent_requests.end(), piece);
|
, m_sent_requests.end(), piece);
|
||||||
// unwanted piece?
|
// unwanted piece?
|
||||||
|
|
|
@ -471,7 +471,7 @@ void peer_conn::on_message(error_code const& ec, size_t bytes_transferred)
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
block = (std::min)(start / 0x4000, block);
|
block = std::min(start / 0x4000, block);
|
||||||
if (block == 0)
|
if (block == 0)
|
||||||
{
|
{
|
||||||
pieces.push_back(current_piece);
|
pieces.push_back(current_piece);
|
||||||
|
|
|
@ -196,7 +196,7 @@ void test_equal_connections(int num, int limit)
|
||||||
run_test(v, manager);
|
run_test(v, manager);
|
||||||
|
|
||||||
float sum = 0.f;
|
float sum = 0.f;
|
||||||
float err = (std::max)(limit / num * 0.3f, 1000.f);
|
float const err = std::max(limit / num * 0.3f, 1000.f);
|
||||||
for (connections_t::iterator i = v.begin()
|
for (connections_t::iterator i = v.begin()
|
||||||
, end(v.end()); i != end; ++i)
|
, end(v.end()); i != end; ++i)
|
||||||
{
|
{
|
||||||
|
@ -307,7 +307,7 @@ void test_torrents(int num, int limit1, int limit2, int global_limit)
|
||||||
|
|
||||||
if (global_limit > 0 && global_limit < limit1 + limit2)
|
if (global_limit > 0 && global_limit < limit1 + limit2)
|
||||||
{
|
{
|
||||||
limit1 = (std::min)(limit1, global_limit / 2);
|
limit1 = std::min(limit1, global_limit / 2);
|
||||||
limit2 = global_limit - limit1;
|
limit2 = global_limit - limit1;
|
||||||
}
|
}
|
||||||
float sum = 0.f;
|
float sum = 0.f;
|
||||||
|
|
|
@ -1732,7 +1732,7 @@ void test_routing_table(address(&rand_addr)())
|
||||||
std::generate(tmp.begin(), tmp.end(), random_byte);
|
std::generate(tmp.begin(), tmp.end(), random_byte);
|
||||||
table.find_node(tmp, temp, 0, bucket_size * 2);
|
table.find_node(tmp, temp, 0, bucket_size * 2);
|
||||||
std::printf("returned: %d\n", int(temp.size()));
|
std::printf("returned: %d\n", int(temp.size()));
|
||||||
TEST_EQUAL(int(temp.size()), (std::min)(bucket_size * 2, int(nodes.size())));
|
TEST_EQUAL(int(temp.size()), std::min(bucket_size * 2, int(nodes.size())));
|
||||||
|
|
||||||
std::sort(nodes.begin(), nodes.end(), std::bind(&compare_ref
|
std::sort(nodes.begin(), nodes.end(), std::bind(&compare_ref
|
||||||
, std::bind(&node_entry::id, _1)
|
, std::bind(&node_entry::id, _1)
|
||||||
|
|
|
@ -840,7 +840,7 @@ void test_zero_file_prio(bool test_deprecated = false)
|
||||||
rd["file-format"] = "libtorrent resume file";
|
rd["file-format"] = "libtorrent resume file";
|
||||||
rd["file-version"] = 1;
|
rd["file-version"] = 1;
|
||||||
rd["info-hash"] = ti->info_hash().to_string();
|
rd["info-hash"] = ti->info_hash().to_string();
|
||||||
rd["blocks per piece"] = (std::max)(1, ti->piece_length() / 0x4000);
|
rd["blocks per piece"] = std::max(1, ti->piece_length() / 0x4000);
|
||||||
|
|
||||||
entry::list_type& file_prio = rd["file_priority"].list();
|
entry::list_type& file_prio = rd["file_priority"].list();
|
||||||
for (int i = 0; i < 100; ++i)
|
for (int i = 0; i < 100; ++i)
|
||||||
|
@ -965,7 +965,7 @@ void test_seed_mode(test_mode_t const flags)
|
||||||
rd["file-format"] = "libtorrent resume file";
|
rd["file-format"] = "libtorrent resume file";
|
||||||
rd["file-version"] = 1;
|
rd["file-version"] = 1;
|
||||||
rd["info-hash"] = ti->info_hash().to_string();
|
rd["info-hash"] = ti->info_hash().to_string();
|
||||||
rd["blocks per piece"] = (std::max)(1, ti->piece_length() / 0x4000);
|
rd["blocks per piece"] = std::max(1, ti->piece_length() / 0x4000);
|
||||||
|
|
||||||
if (flags & test_mode::file_prio)
|
if (flags & test_mode::file_prio)
|
||||||
{
|
{
|
||||||
|
|
|
@ -297,7 +297,7 @@ TORRENT_TEST(extract_peer_hostname)
|
||||||
peer_entry result = extract_peer("d2:ip11:example.com4:porti1ee"
|
peer_entry result = extract_peer("d2:ip11:example.com4:porti1ee"
|
||||||
, error_code(), true);
|
, error_code(), true);
|
||||||
TEST_EQUAL(result.hostname, "example.com");
|
TEST_EQUAL(result.hostname, "example.com");
|
||||||
TEST_EQUAL(result.pid, (peer_id::min)());
|
TEST_EQUAL(result.pid, peer_id::min());
|
||||||
TEST_EQUAL(result.port, 1);
|
TEST_EQUAL(result.port, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue