merged changes from RC_1_0

This commit is contained in:
Arvid Norberg 2014-11-08 16:58:18 +00:00
parent 601f0dc434
commit aae56c991c
26 changed files with 1274 additions and 929 deletions

View File

@ -35,6 +35,9 @@
1.0.3 release 1.0.3 release
* tweak flag_override_resume_data semantics to make more sense (breaks
backwards compatibility of edge-cases)
* improve DHT bootstrapping and periodic refresh
* improve DHT maintanence performance (by pinging instead of full lookups) * improve DHT maintanence performance (by pinging instead of full lookups)
* fix bug in DHT routing table node-id prefix optimization * fix bug in DHT routing table node-id prefix optimization
* fix incorrect behavior of flag_use_resume_save_path * fix incorrect behavior of flag_use_resume_save_path
@ -123,6 +126,8 @@
* fix uTP edge case where udp socket buffer fills up * fix uTP edge case where udp socket buffer fills up
* fix nagle implementation in uTP * fix nagle implementation in uTP
* fix bug in error handling in protocol encryption
0.16.18 release 0.16.18 release
* fix uninitialized values in DHT DOS mitigation * fix uninitialized values in DHT DOS mitigation

View File

@ -142,9 +142,9 @@ data for the torrent. For more information, see the <tt class="docutils literal"
<p>Declared in &quot;<a class="reference external" href="../include/libtorrent/add_torrent_params.hpp">libtorrent/add_torrent_params.hpp</a>&quot;</p> <p>Declared in &quot;<a class="reference external" href="../include/libtorrent/add_torrent_params.hpp">libtorrent/add_torrent_params.hpp</a>&quot;</p>
<table border="1" class="docutils"> <table border="1" class="docutils">
<colgroup> <colgroup>
<col width="42%" /> <col width="41%" />
<col width="5%" /> <col width="5%" />
<col width="53%" /> <col width="54%" />
</colgroup> </colgroup>
<thead valign="bottom"> <thead valign="bottom">
<tr><th class="head">name</th> <tr><th class="head">name</th>
@ -172,15 +172,16 @@ in there will override the seed mode you set here.</p>
</tr> </tr>
<tr><td>flag_override_resume_data</td> <tr><td>flag_override_resume_data</td>
<td>2</td> <td>2</td>
<td><p class="first">If <tt class="docutils literal">flag_override_resume_data</tt> is set, the <tt class="docutils literal">paused</tt>, <td>If <tt class="docutils literal">flag_override_resume_data</tt> is set, flags set for this torrent
<tt class="docutils literal">auto_managed</tt> and <tt class="docutils literal">save_path</tt> of the torrent are not loaded in this <tt class="docutils literal">add_torrent_params</tt> object will take precedence over
from the resume data, but the states requested by the flags in whatever states are saved in the resume data. For instance, the
<tt class="docutils literal">add_torrent_params</tt> will override them.</p> <tt class="docutils literal">paused</tt>, <tt class="docutils literal">auto_managed</tt>, <tt class="docutils literal">sequential_download</tt>, <tt class="docutils literal">seed_mode</tt>,
<p class="last">If you pass in resume data, the paused state of the torrent when <tt class="docutils literal">super_seeding</tt>, <tt class="docutils literal">max_uploads</tt>, <tt class="docutils literal">max_connections</tt>,
the resume data was saved will override the paused state you pass <tt class="docutils literal">upload_limit</tt> and <tt class="docutils literal">download_limit</tt> are all affected by this
in here. You can override this by setting flag. The intention of this flag is to have any field in
<tt class="docutils literal">flag_override_resume_data</tt>.</p> <a class="reference external" href="reference-Session.html#add_torrent_params">add_torrent_params</a> configuring the torrent override the corresponding
</td> configuration from the resume file, with the one exception of save
resume data, which has its own flag (for historic reasons).</td>
</tr> </tr>
<tr><td>flag_upload_mode</td> <tr><td>flag_upload_mode</td>
<td>4</td> <td>4</td>
@ -438,151 +439,14 @@ struct cache_status
<strong>cache_status</strong> (); <strong>cache_status</strong> ();
std::vector&lt;cached_piece_info&gt; pieces; std::vector&lt;cached_piece_info&gt; pieces;
int write_cache_size;
int read_cache_size;
int pinned_blocks;
mutable int total_used_buffers;
int average_read_time;
int average_write_time;
int average_hash_time;
int average_job_time;
int cumulative_job_time;
int cumulative_read_time;
int cumulative_write_time;
int cumulative_hash_time;
int total_read_back;
int read_queue_size;
int blocked_jobs;
int queued_jobs;
int peak_queued;
int pending_jobs;
int num_jobs;
int num_read_jobs;
int num_write_jobs;
int arc_mru_size;
int arc_mru_ghost_size;
int arc_mfu_size;
int arc_mfu_ghost_size;
int arc_write_size;
int arc_volatile_size;
int num_writing_threads;
int num_fence_jobs[disk_io_job::num_job_ids];
}; };
</pre> </pre>
<a name="cache_status()"></a><div class="section" id="id24"> <a name="cache_status()"></a><div class="section" id="id25">
<h2>cache_status()</h2> <h2>cache_status()</h2>
<pre class="literal-block"> <pre class="literal-block">
<strong>cache_status</strong> (); <strong>cache_status</strong> ();
</pre> </pre>
<p>initializes all counters to 0</p> <p>initializes all counters to 0</p>
<a name="write_cache_size"></a><dl class="docutils">
<dt>write_cache_size</dt>
<dd>the number of blocks in the cache used for write cache</dd>
</dl>
<a name="read_cache_size"></a><dl class="docutils">
<dt>read_cache_size</dt>
<dd>the number of 16KiB blocks in the read cache.</dd>
</dl>
<a name="pinned_blocks"></a><dl class="docutils">
<dt>pinned_blocks</dt>
<dd>the number of blocks with a refcount &gt; 0, i.e.
they may not be evicted</dd>
</dl>
<a name="total_used_buffers"></a><dl class="docutils">
<dt>total_used_buffers</dt>
<dd>the total number of buffers currently in use.
This includes the read/write disk cache as well as send and receive buffers
used in peer connections.</dd>
</dl>
<a name="average_read_time"></a><dl class="docutils">
<dt>average_read_time</dt>
<dd>the time read jobs takes on average to complete
(not including the time in the queue), in microseconds. This only measures
read cache misses.</dd>
</dl>
<a name="average_write_time"></a><dl class="docutils">
<dt>average_write_time</dt>
<dd>the time write jobs takes to complete, on average,
in microseconds. This does not include the time the job sits in the disk job
queue or in the write cache, only blocks that are flushed to disk.</dd>
</dl>
<a name="average_hash_time"></a>
<a name="average_job_time"></a><dl class="docutils">
<dt>average_hash_time average_job_time</dt>
<dd>the time hash jobs takes to complete on average, in
microseconds. Hash jobs include running SHA-1 on the data (which for the most
part is done incrementally) and sometimes reading back parts of the piece. It
also includes checking files without valid resume data.</dd>
</dl>
<a name="cumulative_job_time"></a>
<a name="cumulative_read_time"></a>
<a name="cumulative_write_time"></a>
<a name="cumulative_hash_time"></a><dl class="docutils">
<dt>cumulative_job_time cumulative_read_time cumulative_write_time cumulative_hash_time</dt>
<dd>the number of milliseconds spent in all disk jobs, and specific ones
since the start of the <a class="reference external" href="reference-Session.html#session">session</a>. Times are specified in milliseconds</dd>
</dl>
<a name="total_read_back"></a><dl class="docutils">
<dt>total_read_back</dt>
<dd>the number of blocks that had to be read back from disk because
they were flushed before the SHA-1 hash got to hash them. If this
is large, a larger cache could significantly improve performance</dd>
</dl>
<a name="read_queue_size"></a><dl class="docutils">
<dt>read_queue_size</dt>
<dd>number of read jobs in the disk job queue</dd>
</dl>
<a name="blocked_jobs"></a><dl class="docutils">
<dt>blocked_jobs</dt>
<dd>number of jobs blocked because of a fence</dd>
</dl>
<a name="queued_jobs"></a><dl class="docutils">
<dt>queued_jobs</dt>
<dd>number of jobs waiting to be issued (m_to_issue)
average over 30 seconds</dd>
</dl>
<a name="peak_queued"></a><dl class="docutils">
<dt>peak_queued</dt>
<dd>largest ever seen number of queued jobs</dd>
</dl>
<a name="pending_jobs"></a><dl class="docutils">
<dt>pending_jobs</dt>
<dd>number of jobs waiting to complete (m_pending)
average over 30 seconds</dd>
</dl>
<a name="num_jobs"></a><dl class="docutils">
<dt>num_jobs</dt>
<dd>total number of disk job objects allocated right now</dd>
</dl>
<a name="num_read_jobs"></a><dl class="docutils">
<dt>num_read_jobs</dt>
<dd>total number of disk read job objects allocated right now</dd>
</dl>
<a name="num_write_jobs"></a><dl class="docutils">
<dt>num_write_jobs</dt>
<dd>total number of disk write job objects allocated right now</dd>
</dl>
<a name="arc_mru_size"></a>
<a name="arc_mru_ghost_size"></a>
<a name="arc_mfu_size"></a>
<a name="arc_mfu_ghost_size"></a>
<a name="arc_write_size"></a>
<a name="arc_volatile_size"></a><dl class="docutils">
<dt>arc_mru_size arc_mru_ghost_size arc_mfu_size arc_mfu_ghost_size arc_write_size arc_volatile_size</dt>
<dd>ARC cache stats. All of these counters are in number of pieces
not blocks. A piece does not necessarily correspond to a certain
number of blocks. The pieces in the ghost list never have any
blocks in them</dd>
</dl>
<a name="num_writing_threads"></a><dl class="docutils">
<dt>num_writing_threads</dt>
<dd>the number of threads currently writing to disk</dd>
</dl>
<a name="num_fence_jobs[disk_io_job"></a><dl class="docutils">
<dt>num_fence_jobs[disk_io_job</dt>
<dd>counts only fence jobs that are currently blocking jobs
not fences that are themself blocked</dd>
</dl>
<a name="stats_metric"></a></div> <a name="stats_metric"></a></div>
</div> </div>
<div class="section" id="stats-metric"> <div class="section" id="stats-metric">
@ -1656,7 +1520,6 @@ struct dht_routing_bucket
{ {
int num_nodes; int num_nodes;
int num_replacements; int num_replacements;
int last_active;
}; };
</pre> </pre>
<a name="num_nodes"></a> <a name="num_nodes"></a>
@ -1665,10 +1528,6 @@ struct dht_routing_bucket
<dd>the total number of nodes and replacement nodes <dd>the total number of nodes and replacement nodes
in the routing table</dd> in the routing table</dd>
</dl> </dl>
<a name="last_active"></a><dl class="docutils">
<dt>last_active</dt>
<dd>number of seconds since last activity</dd>
</dl>
<a name="utp_status"></a></div> <a name="utp_status"></a></div>
<div class="section" id="utp-status"> <div class="section" id="utp-status">
<h1>utp_status</h1> <h1>utp_status</h1>

File diff suppressed because one or more lines are too long

View File

@ -1756,7 +1756,7 @@ int main(int argc, char* argv[])
, "%3d [%3d, %d] %s%s\n" , "%3d [%3d, %d] %s%s\n"
, bucket, i->num_nodes, i->num_replacements , bucket, i->num_nodes, i->num_replacements
, progress_bar + (128 - i->num_nodes) , progress_bar + (128 - i->num_nodes)
, "--------" + (8 - i->num_replacements)); , "--------" + (8 - (std::min)(8, i->num_replacements)));
out += str; out += str;
} }

View File

@ -147,15 +147,16 @@ namespace libtorrent
// in there will override the seed mode you set here. // in there will override the seed mode you set here.
flag_seed_mode = 0x001, flag_seed_mode = 0x001,
// If ``flag_override_resume_data`` is set, the ``paused``, // If ``flag_override_resume_data`` is set, flags set for this torrent
// ``auto_managed`` and ``save_path`` of the torrent are not loaded // in this ``add_torrent_params`` object will take precedence over
// from the resume data, but the states requested by the flags in // whatever states are saved in the resume data. For instance, the
// ``add_torrent_params`` will override them. // ``paused``, ``auto_managed``, ``sequential_download``, ``seed_mode``,
// // ``super_seeding``, ``max_uploads``, ``max_connections``,
// If you pass in resume data, the paused state of the torrent when // ``upload_limit`` and ``download_limit`` are all affected by this
// the resume data was saved will override the paused state you pass // flag. The intention of this flag is to have any field in
// in here. You can override this by setting // add_torrent_params configuring the torrent override the corresponding
// ``flag_override_resume_data``. // configuration from the resume file, with the one exception of save
// resume data, which has its own flag (for historic reasons).
flag_override_resume_data = 0x002, flag_override_resume_data = 0x002,
// If ``flag_upload_mode`` is set, the torrent will be initialized in // If ``flag_upload_mode`` is set, the torrent will be initialized in

View File

@ -223,7 +223,7 @@ public:
node_id const& nid() const { return m_id; } node_id const& nid() const { return m_id; }
boost::tuple<int, int> size() const { return m_table.size(); } boost::tuple<int, int, int> size() const { return m_table.size(); }
size_type num_global_nodes() const size_type num_global_nodes() const
{ return m_table.num_global_nodes(); } { return m_table.num_global_nodes(); }

View File

@ -63,7 +63,9 @@ int TORRENT_EXTRA_EXPORT distance_exp(node_id const& n1, node_id const& n2);
node_id TORRENT_EXTRA_EXPORT generate_id(address const& external_ip); node_id TORRENT_EXTRA_EXPORT generate_id(address const& external_ip);
node_id TORRENT_EXTRA_EXPORT generate_random_id(); node_id TORRENT_EXTRA_EXPORT generate_random_id();
bool TORRENT_EXTRA_EXPORT verify_random_id(node_id const& nid); void TORRENT_EXTRA_EXPORT make_id_secret(node_id& in);
node_id TORRENT_EXTRA_EXPORT generate_secret_id();
bool TORRENT_EXTRA_EXPORT verify_secret_id(node_id const& nid);
node_id TORRENT_EXTRA_EXPORT generate_id_impl(address const& ip_, boost::uint32_t r); node_id TORRENT_EXTRA_EXPORT generate_id_impl(address const& ip_, boost::uint32_t r);
bool TORRENT_EXTRA_EXPORT verify_id(node_id const& nid, address const& source_ip); bool TORRENT_EXTRA_EXPORT verify_id(node_id const& nid, address const& source_ip);

View File

@ -43,6 +43,8 @@ namespace libtorrent { namespace dht
class routing_table; class routing_table;
class rpc_manager; class rpc_manager;
// TODO: 3 collapse this class into the bootstrap class (or maybe the other
// way around)
class refresh : public get_peers class refresh : public get_peers
{ {
public: public:
@ -68,6 +70,8 @@ public:
virtual char const* name() const; virtual char const* name() const;
void trim_seed_nodes();
protected: protected:
virtual void done(); virtual void done();

View File

@ -104,6 +104,13 @@ public:
router_iterator router_begin() const { return m_router_nodes.begin(); } router_iterator router_begin() const { return m_router_nodes.begin(); }
router_iterator router_end() const { return m_router_nodes.end(); } router_iterator router_end() const { return m_router_nodes.end(); }
enum add_node_status_t {
failed_to_add = 0,
node_added,
need_bucket_split
};
add_node_status_t add_node_impl(node_entry e);
bool add_node(node_entry e); bool add_node(node_entry e);
// this function is called every time the node sees // this function is called every time the node sees
@ -147,7 +154,11 @@ public:
int bucket_size() const { return m_bucket_size; } int bucket_size() const { return m_bucket_size; }
boost::tuple<int, int> size() const; // returns the number of nodes in the main buckets, number of nodes in the
// replacement buckets and the number of nodes in the main buckets that have
// been pinged and confirmed up
boost::tuple<int, int, int> size() const;
size_type num_global_nodes() const; size_type num_global_nodes() const;
// the number of bits down we have full buckets // the number of bits down we have full buckets
@ -155,9 +166,6 @@ public:
// we have // we have
int depth() const; int depth() const;
// returns true if there are no working nodes
// in the routing table
bool need_bootstrap() const;
int num_active_buckets() const { return m_buckets.size(); } int num_active_buckets() const { return m_buckets.size(); }
void replacement_cache(bucket_t& nodes) const; void replacement_cache(bucket_t& nodes) const;
@ -202,14 +210,6 @@ private:
// it's mutable because it's updated by depth(), which is const // it's mutable because it's updated by depth(), which is const
mutable int m_depth; mutable int m_depth;
// the last time need_bootstrap() returned true
mutable ptime m_last_bootstrap;
// the last time the routing table was refreshed.
// this is used to stagger buckets needing refresh
// to be at least 45 seconds apart.
mutable ptime m_last_refresh;
// the last time we refreshed our own bucket // the last time we refreshed our own bucket
// refreshed every 15 minutes // refreshed every 15 minutes
mutable ptime m_last_self_refresh; mutable ptime m_last_self_refresh;

View File

@ -698,8 +698,8 @@ namespace libtorrent
void announce_with_tracker(boost::uint8_t e void announce_with_tracker(boost::uint8_t e
= tracker_request::none = tracker_request::none
, address const& bind_interface = address_v4::any()); , address const& bind_interface = address_v4::any());
int seconds_since_last_scrape() const { return m_ses.session_time() - m_last_scrape; } int seconds_since_last_scrape() const
{ return m_last_scrape == INT16_MIN ? -1 : m_ses.session_time() - m_last_scrape; }
#ifndef TORRENT_DISABLE_DHT #ifndef TORRENT_DISABLE_DHT
void dht_announce(); void dht_announce();
#endif #endif
@ -1583,17 +1583,19 @@ namespace libtorrent
// ---- // ----
// the timestamp of the last piece passed for this torrent // the timestamp of the last piece passed for this torrent specified in
// specified in session_time // session_time. This is signed because it must be able to represent time
boost::uint16_t m_last_download; // before the session started
boost::int16_t m_last_download;
// the number of peer connections to seeds. This should be the same as // the number of peer connections to seeds. This should be the same as
// counting the peer connections that say true for is_seed() // counting the peer connections that say true for is_seed()
boost::uint16_t m_num_seeds; boost::uint16_t m_num_seeds;
// the timestamp of the last byte uploaded from this torrent // the timestamp of the last byte uploaded from this torrent specified in
// specified in session_time // session_time. This is signed because it must be able to represent time
boost::uint16_t m_last_upload; // before the session started
boost::int16_t m_last_upload;
// this is a second count-down to when we should tick the // this is a second count-down to when we should tick the
// storage for this torrent. Ticking the storage is used // storage for this torrent. Ticking the storage is used
@ -1636,10 +1638,10 @@ namespace libtorrent
// is optional and may be 0xffffff // is optional and may be 0xffffff
unsigned int m_downloaded:24; unsigned int m_downloaded:24;
// the timestamp of the last scrape request to // the timestamp of the last scrape request to one of the trackers in
// one of the trackers in this torrent // this torrent specified in session_time. This is signed because it must
// specified in session_time // be able to represent time before the session started
boost::uint16_t m_last_scrape; boost::int16_t m_last_scrape;
// ---- // ----

View File

@ -2569,7 +2569,7 @@ namespace libtorrent
if (is_disconnecting()) return; if (is_disconnecting()) return;
// read dh key, generate shared secret // read dh key, generate shared secret
if (m_dh_key_exchange->compute_secret(recv_buffer.begin) == -1) if (m_dh_key_exchange->compute_secret(recv_buffer.begin) != 0)
{ {
disconnect(errors::no_memory, op_encryption); disconnect(errors::no_memory, op_encryption);
return; return;

View File

@ -328,7 +328,7 @@ namespace libtorrent { namespace dht
{ {
first = false; first = false;
pc << "\n\n ***** starting log at " << time_now_string() << " *****\n\n" pc << "\n\n ***** starting log at " << time_now_string() << " *****\n\n"
<< "minute:active nodes:passive nodes" << "minute:active nodes:passive nodes:confirmed nodes"
":ping replies sent:ping queries recvd" ":ping replies sent:ping queries recvd"
":ping replies bytes sent:ping queries bytes recvd" ":ping replies bytes sent:ping queries bytes recvd"
":find_node replies sent:find_node queries recv" ":find_node replies sent:find_node queries recv"
@ -348,10 +348,12 @@ namespace libtorrent { namespace dht
int active; int active;
int passive; int passive;
boost::tie(active, passive) = m_dht.size(); int confirmed;
boost::tie(active, passive, confirmed) = m_dht.size();
pc << (m_counter * tick_period) pc << (m_counter * tick_period)
<< "\t" << active << "\t" << active
<< "\t" << passive; << "\t" << passive
<< "\t" << confirmed;
for (int i = 0; i < 5; ++i) for (int i = 0; i < 5; ++i)
pc << "\t" << (m_replies_sent[i] / float(tick_period)) pc << "\t" << (m_replies_sent[i] / float(tick_period))
<< "\t" << (m_queries_received[i] / float(tick_period)) << "\t" << (m_queries_received[i] / float(tick_period))

View File

@ -178,7 +178,10 @@ std::string node_impl::generate_token(udp::endpoint const& addr, char const* inf
void node_impl::bootstrap(std::vector<udp::endpoint> const& nodes void node_impl::bootstrap(std::vector<udp::endpoint> const& nodes
, find_data::nodes_callback const& f) , find_data::nodes_callback const& f)
{ {
boost::intrusive_ptr<dht::bootstrap> r(new dht::bootstrap(*this, m_id, f)); node_id target = m_id;
make_id_secret(target);
boost::intrusive_ptr<dht::bootstrap> r(new dht::bootstrap(*this, target, f));
m_last_self_refresh = time_now(); m_last_self_refresh = time_now();
#ifdef TORRENT_DHT_VERBOSE_LOGGING #ifdef TORRENT_DHT_VERBOSE_LOGGING
@ -194,6 +197,9 @@ void node_impl::bootstrap(std::vector<udp::endpoint> const& nodes
r->add_entry(node_id(0), *i, observer::flag_initial); r->add_entry(node_id(0), *i, observer::flag_initial);
} }
// make us start as far away from our node ID as possible
r->trim_seed_nodes();
#ifdef TORRENT_DHT_VERBOSE_LOGGING #ifdef TORRENT_DHT_VERBOSE_LOGGING
TORRENT_LOG(node) << "bootstrapping with " << count << " nodes"; TORRENT_LOG(node) << "bootstrapping with " << count << " nodes";
#endif #endif
@ -454,7 +460,9 @@ void node_impl::tick()
ptime now = time_now(); ptime now = time_now();
if (m_last_self_refresh + minutes(10) < now) if (m_last_self_refresh + minutes(10) < now)
{ {
boost::intrusive_ptr<dht::refresh> r(new dht::refresh(*this, m_id node_id target = m_id;
make_id_secret(target);
boost::intrusive_ptr<dht::bootstrap> r(new dht::bootstrap(*this, target
, boost::bind(&nop))); , boost::bind(&nop)));
r->start(); r->start();
m_last_self_refresh = now; m_last_self_refresh = now;
@ -478,7 +486,7 @@ void node_impl::send_single_refresh(udp::endpoint const& ep, int bucket
// TODO: 2 it would be nice to have a bias towards node-id prefixes that // TODO: 2 it would be nice to have a bias towards node-id prefixes that
// are missing in the bucket // are missing in the bucket
node_id mask = generate_prefix_mask(bucket + 1); node_id mask = generate_prefix_mask(bucket + 1);
node_id target = generate_random_id() & ~mask; node_id target = generate_secret_id() & ~mask;
target |= m_id & mask; target |= m_id & mask;
// create a dummy traversal_algorithm // create a dummy traversal_algorithm

View File

@ -151,25 +151,36 @@ node_id generate_id_impl(address const& ip_, boost::uint32_t r)
static boost::uint32_t secret = 0; static boost::uint32_t secret = 0;
node_id generate_random_id() void make_id_secret(node_id& in)
{ {
char r[20];
for (int i = 0; i < 20; ++i) r[i] = random() & 0xff;
node_id ret = hasher(r, 20).final();
if (secret == 0) secret = (random() % 0xfffffffe) + 1; if (secret == 0) secret = (random() % 0xfffffffe) + 1;
boost::uint32_t rand = random();
// generate the last 4 bytes as a "signature" of the previous 4 bytes. This // generate the last 4 bytes as a "signature" of the previous 4 bytes. This
// lets us verify whether a hash came from this function or not in the future. // lets us verify whether a hash came from this function or not in the future.
hasher h((char*)&secret, 4); hasher h((char*)&secret, 4);
h.update((char*)&ret[20-8], 4); h.update((char*)&rand, 4);
sha1_hash secret_hash = h.final(); sha1_hash secret_hash = h.final();
memcpy(&ret[20-4], &secret_hash[0], 4); memcpy(&in[20-4], &secret_hash[0], 4);
memcpy(&in[20-8], &rand, 4);
}
node_id generate_random_id()
{
char r[20];
for (int i = 0; i < 20; ++i) r[i] = random() & 0xff;
return hasher(r, 20).final();
}
node_id generate_secret_id()
{
node_id ret = generate_random_id();
make_id_secret(ret);
return ret; return ret;
} }
bool verify_random_id(node_id const& nid) bool verify_secret_id(node_id const& nid)
{ {
if (secret == 0) return false; if (secret == 0) return false;

View File

@ -92,6 +92,15 @@ bootstrap::bootstrap(
char const* bootstrap::name() const { return "bootstrap"; } char const* bootstrap::name() const { return "bootstrap"; }
void bootstrap::trim_seed_nodes()
{
// when we're bootstrapping, we want to start as far away from our ID as
// possible, to cover as much as possible of the ID space. So, remove all
// nodes except for the 32 that are farthest away from us
if (m_results.size() > 32)
m_results.erase(m_results.begin(), m_results.end() - 32);
}
void bootstrap::done() void bootstrap::done()
{ {
#ifdef TORRENT_DHT_VERBOSE_LOGGING #ifdef TORRENT_DHT_VERBOSE_LOGGING

View File

@ -77,8 +77,6 @@ routing_table::routing_table(node_id const& id, int bucket_size
: m_settings(settings) : m_settings(settings)
, m_id(id) , m_id(id)
, m_depth(0) , m_depth(0)
, m_last_bootstrap(time_now())
, m_last_refresh(min_time())
, m_last_self_refresh(min_time()) , m_last_self_refresh(min_time())
, m_bucket_size(bucket_size) , m_bucket_size(bucket_size)
{ {
@ -97,7 +95,8 @@ int routing_table::bucket_limit(int bucket) const
void routing_table::status(session_status& s) const void routing_table::status(session_status& s) const
{ {
boost::tie(s.dht_nodes, s.dht_node_cache) = size(); int ignore;
boost::tie(s.dht_nodes, s.dht_node_cache, ignore) = size();
s.dht_global_nodes = num_global_nodes(); s.dht_global_nodes = num_global_nodes();
for (table_t::const_iterator i = m_buckets.begin() for (table_t::const_iterator i = m_buckets.begin()
@ -113,17 +112,24 @@ void routing_table::status(session_status& s) const
} }
} }
boost::tuple<int, int> routing_table::size() const boost::tuple<int, int, int> routing_table::size() const
{ {
int nodes = 0; int nodes = 0;
int replacements = 0; int replacements = 0;
int confirmed = 0;
for (table_t::const_iterator i = m_buckets.begin() for (table_t::const_iterator i = m_buckets.begin()
, end(m_buckets.end()); i != end; ++i) , end(m_buckets.end()); i != end; ++i)
{ {
nodes += i->live_nodes.size(); nodes += i->live_nodes.size();
for (bucket_t::const_iterator k = i->live_nodes.begin()
, end(i->live_nodes.end()); k != end; ++k)
{
if (k->confirmed()) ++confirmed;
}
replacements += i->replacements.size(); replacements += i->replacements.size();
} }
return boost::make_tuple(nodes, replacements); return boost::make_tuple(nodes, replacements, confirmed);
} }
size_type routing_table::num_global_nodes() const size_type routing_table::num_global_nodes() const
@ -441,16 +447,37 @@ void routing_table::remove_node(node_entry* n
} }
bool routing_table::add_node(node_entry e) bool routing_table::add_node(node_entry e)
{
add_node_status_t s = add_node_impl(e);
if (s == failed_to_add) return false;
if (s == node_added) return true;
while (s == need_bucket_split)
{
split_bucket();
// if the new bucket still has too many nodes in it, we need to keep
// splitting
if (m_buckets.back().live_nodes.size() > bucket_limit(m_buckets.size()-1))
continue;
s = add_node_impl(e);
if (s == failed_to_add) return false;
if (s == node_added) return true;
}
return false;
}
routing_table::add_node_status_t routing_table::add_node_impl(node_entry e)
{ {
INVARIANT_CHECK; INVARIANT_CHECK;
// if we already have this (IP,port), don't do anything // if we already have this (IP,port), don't do anything
if (m_router_nodes.find(e.ep()) != m_router_nodes.end()) return false; if (m_router_nodes.find(e.ep()) != m_router_nodes.end())
return failed_to_add;
bool ret = need_bootstrap();
// don't add ourself // don't add ourself
if (e.id == m_id) return ret; if (e.id == m_id) return failed_to_add;
// do we already have this IP in the table? // do we already have this IP in the table?
if (m_ips.count(e.addr().to_v4().to_bytes()) > 0) if (m_ips.count(e.addr().to_v4().to_bytes()) > 0)
@ -476,7 +503,7 @@ bool routing_table::add_node(node_entry e)
TORRENT_LOG(table) << "ignoring node (duplicate IP): " TORRENT_LOG(table) << "ignoring node (duplicate IP): "
<< e.id << " " << e.addr(); << e.id << " " << e.addr();
#endif #endif
return ret; return failed_to_add;
} }
} }
else if (existing && existing->id == e.id) else if (existing && existing->id == e.id)
@ -486,7 +513,7 @@ bool routing_table::add_node(node_entry e)
existing->timeout_count = 0; existing->timeout_count = 0;
existing->update_rtt(e.rtt); existing->update_rtt(e.rtt);
existing->last_queried = e.last_queried; existing->last_queried = e.last_queried;
return ret; return node_added;
} }
else if (existing) else if (existing)
{ {
@ -514,14 +541,15 @@ bool routing_table::add_node(node_entry e)
{ {
// a new IP address just claimed this node-ID // a new IP address just claimed this node-ID
// ignore it // ignore it
if (j->addr() != e.addr() || j->port() != e.port()) return ret; if (j->addr() != e.addr() || j->port() != e.port())
return failed_to_add;
// we already have the node in our bucket // we already have the node in our bucket
TORRENT_ASSERT(j->id == e.id && j->ep() == e.ep()); TORRENT_ASSERT(j->id == e.id && j->ep() == e.ep());
j->timeout_count = 0; j->timeout_count = 0;
j->update_rtt(e.rtt); j->update_rtt(e.rtt);
// TORRENT_LOG(table) << "updating node: " << i->id << " " << i->addr(); // TORRENT_LOG(table) << "updating node: " << i->id << " " << i->addr();
return ret; return node_added;
} }
// if this node exists in the replacement bucket. update it and // if this node exists in the replacement bucket. update it and
@ -533,7 +561,9 @@ bool routing_table::add_node(node_entry e)
{ {
// a new IP address just claimed this node-ID // a new IP address just claimed this node-ID
// ignore it // ignore it
if (j->addr() != e.addr() || j->port() != e.port()) return ret; if (j->addr() != e.addr() || j->port() != e.port())
return failed_to_add;
TORRENT_ASSERT(j->id == e.id && j->ep() == e.ep()); TORRENT_ASSERT(j->id == e.id && j->ep() == e.ep());
j->timeout_count = 0; j->timeout_count = 0;
j->update_rtt(e.rtt); j->update_rtt(e.rtt);
@ -556,7 +586,7 @@ bool routing_table::add_node(node_entry e)
<< " existing node: " << " existing node: "
<< j->id << " " << j->addr(); << j->id << " " << j->addr();
#endif #endif
return ret; return failed_to_add;
} }
j = std::find_if(rb.begin(), rb.end(), boost::bind(&compare_ip_cidr, _1, e)); j = std::find_if(rb.begin(), rb.end(), boost::bind(&compare_ip_cidr, _1, e));
@ -568,7 +598,7 @@ bool routing_table::add_node(node_entry e)
<< " existing node: " << " existing node: "
<< j->id << " " << j->addr(); << j->id << " " << j->addr();
#endif #endif
return ret; return failed_to_add;
} }
} }
@ -579,7 +609,7 @@ bool routing_table::add_node(node_entry e)
b.push_back(e); b.push_back(e);
m_ips.insert(e.addr().to_v4().to_bytes()); m_ips.insert(e.addr().to_v4().to_bytes());
// TORRENT_LOG(table) << "inserting node: " << e.id << " " << e.addr(); // TORRENT_LOG(table) << "inserting node: " << e.id << " " << e.addr();
return ret; return node_added;
} }
// if there is no room, we look for nodes that are not 'pinged', // if there is no room, we look for nodes that are not 'pinged',
@ -613,7 +643,7 @@ bool routing_table::add_node(node_entry e)
*j = e; *j = e;
m_ips.insert(e.addr().to_v4().to_bytes()); m_ips.insert(e.addr().to_v4().to_bytes());
// TORRENT_LOG(table) << "replacing unpinged node: " << e.id << " " << e.addr(); // TORRENT_LOG(table) << "replacing unpinged node: " << e.id << " " << e.addr();
return ret; return node_added;
} }
// A node is considered stale if it has failed at least one // A node is considered stale if it has failed at least one
@ -635,7 +665,7 @@ bool routing_table::add_node(node_entry e)
*j = e; *j = e;
m_ips.insert(e.addr().to_v4().to_bytes()); m_ips.insert(e.addr().to_v4().to_bytes());
// TORRENT_LOG(table) << "replacing stale node: " << e.id << " " << e.addr(); // TORRENT_LOG(table) << "replacing stale node: " << e.id << " " << e.addr();
return ret; return node_added;
} }
// in order to provide as few lookups as possible before finding // in order to provide as few lookups as possible before finding
@ -746,7 +776,7 @@ bool routing_table::add_node(node_entry e)
TORRENT_LOG(table) << "replacing node with higher RTT: " << e.id TORRENT_LOG(table) << "replacing node with higher RTT: " << e.id
<< " " << e.addr(); << " " << e.addr();
#endif #endif
return ret; return node_added;
} }
// in order to keep lookup times small, prefer nodes with low RTTs // in order to keep lookup times small, prefer nodes with low RTTs
@ -771,7 +801,7 @@ bool routing_table::add_node(node_entry e)
// if the IP address matches, it's the same node // if the IP address matches, it's the same node
// make sure it's marked as pinged // make sure it's marked as pinged
if (j->ep() == e.ep()) j->set_pinged(); if (j->ep() == e.ep()) j->set_pinged();
return ret; return node_added;
} }
if ((int)rb.size() >= m_bucket_size) if ((int)rb.size() >= m_bucket_size)
@ -789,29 +819,10 @@ bool routing_table::add_node(node_entry e)
rb.push_back(e); rb.push_back(e);
m_ips.insert(e.addr().to_v4().to_bytes()); m_ips.insert(e.addr().to_v4().to_bytes());
// TORRENT_LOG(table) << "inserting node in replacement cache: " << e.id << " " << e.addr(); // TORRENT_LOG(table) << "inserting node in replacement cache: " << e.id << " " << e.addr();
return ret; return node_added;
} }
split_bucket(); return need_bucket_split;
// now insert the new node in the appropriate bucket
i = find_bucket(e.id);
int dst_bucket = std::distance(m_buckets.begin(), i);
bucket_t& nb = i->live_nodes;
bucket_t& nrb = i->replacements;
if (int(nb.size()) < bucket_limit(dst_bucket))
nb.push_back(e);
else if (int(nrb.size()) < m_bucket_size)
nrb.push_back(e);
else
nb.push_back(e); // trigger another split
m_ips.insert(e.addr().to_v4().to_bytes());
while (int(m_buckets.back().live_nodes.size()) > bucket_limit(m_buckets.size() - 1))
split_bucket();
return ret;
} }
void routing_table::split_bucket() void routing_table::split_bucket()
@ -846,6 +857,18 @@ void routing_table::split_bucket()
j = b.erase(j); j = b.erase(j);
} }
if (b.size() > bucket_size_limit)
{
// TODO: 3 move the lowest priority nodes to the replacement bucket
for (bucket_t::iterator i = b.begin() + bucket_size_limit
, end(b.end()); i != end; ++i)
{
rb.push_back(*i);
}
b.resize(bucket_size_limit);
}
// split the replacement bucket as well. If the live bucket // split the replacement bucket as well. If the live bucket
// is not full anymore, also move the replacement entries // is not full anymore, also move the replacement entries
// into the main bucket // into the main bucket
@ -865,10 +888,8 @@ void routing_table::split_bucket()
// this entry belongs in the new bucket // this entry belongs in the new bucket
if (int(new_bucket.size()) < new_bucket_size) if (int(new_bucket.size()) < new_bucket_size)
new_bucket.push_back(*j); new_bucket.push_back(*j);
else if (int(new_replacement_bucket.size()) < m_bucket_size)
new_replacement_bucket.push_back(*j);
else else
erase_one(m_ips, j->addr().to_v4().to_bytes()); new_replacement_bucket.push_back(*j);
} }
j = rb.erase(j); j = rb.erase(j);
} }
@ -996,24 +1017,6 @@ bool routing_table::node_seen(node_id const& id, udp::endpoint ep, int rtt)
return add_node(node_entry(id, ep, rtt, true)); return add_node(node_entry(id, ep, rtt, true));
} }
bool routing_table::need_bootstrap() const
{
ptime now = time_now();
if (now - seconds(30) < m_last_bootstrap) return false;
for (table_t::const_iterator i = m_buckets.begin()
, end(m_buckets.end()); i != end; ++i)
{
for (bucket_t::const_iterator j = i->live_nodes.begin()
, end(i->live_nodes.end()); j != end; ++j)
{
if (j->confirmed()) return false;
}
}
m_last_bootstrap = now;
return true;
}
// fills the vector with the k nodes from our buckets that // fills the vector with the k nodes from our buckets that
// are nearest to the given id. // are nearest to the given id.
void routing_table::find_node(node_id const& target void routing_table::find_node(node_id const& target

View File

@ -556,6 +556,60 @@ namespace libtorrent
return line_len; return line_len;
} }
void escape_string(std::string& ret, char const* str, int len)
{
for (int i = 0; i < len; ++i)
{
if (str[i] >= 32 && str[i] < 127)
{
ret += str[i];
}
else
{
char tmp[5];
snprintf(tmp, sizeof(tmp), "\\x%02x", (unsigned char)str[i]);
ret += tmp;
}
}
}
void print_string(std::string& ret, char const* str, int len, bool single_line)
{
bool printable = true;
for (int i = 0; i < len; ++i)
{
char c = str[i];
if (c >= 32 && c < 127) continue;
printable = false;
break;
}
ret += "'";
if (printable)
{
if (single_line && len > 30)
{
ret.append(str, 14);
ret += "...";
ret.append(str + len-14, 14);
}
else
ret.append(str, len);
ret += "'";
return;
}
if (single_line && len > 20)
{
escape_string(ret, str, 9);
ret += "...";
escape_string(ret, str + len - 9, 9);
}
else
{
escape_string(ret, str, len);
}
ret += "'";
}
std::string print_entry(lazy_entry const& e, bool single_line, int indent) std::string print_entry(lazy_entry const& e, bool single_line, int indent)
{ {
char indent_str[200]; char indent_str[200];
@ -576,56 +630,7 @@ namespace libtorrent
} }
case lazy_entry::string_t: case lazy_entry::string_t:
{ {
bool printable = true; print_string(ret, e.string_ptr(), e.string_length(), single_line);
char const* str = e.string_ptr();
for (int i = 0; i < e.string_length(); ++i)
{
char c = str[i];
if (c >= 32 && c < 127) continue;
printable = false;
break;
}
ret += "'";
if (printable)
{
if (single_line && e.string_length() > 30)
{
ret.append(e.string_ptr(), 14);
ret += "...";
ret.append(e.string_ptr() + e.string_length()-14, 14);
}
else
ret.append(e.string_ptr(), e.string_length());
ret += "'";
return ret;
}
if (single_line && e.string_length() > 20)
{
for (int i = 0; i < 9; ++i)
{
char tmp[5];
snprintf(tmp, sizeof(tmp), "%02x", (unsigned char)str[i]);
ret += tmp;
}
ret += "...";
for (int i = e.string_length() - 9
, len(e.string_length()); i < len; ++i)
{
char tmp[5];
snprintf(tmp, sizeof(tmp), "%02x", (unsigned char)str[i]);
ret += tmp;
}
}
else
{
for (int i = 0; i < e.string_length(); ++i)
{
char tmp[5];
snprintf(tmp, sizeof(tmp), "%02x", (unsigned char)str[i]);
ret += tmp;
}
}
ret += "'";
return ret; return ret;
} }
case lazy_entry::list_t: case lazy_entry::list_t:
@ -654,9 +659,8 @@ namespace libtorrent
{ {
if (i == 0 && one_liner) ret += " "; if (i == 0 && one_liner) ret += " ";
std::pair<std::string, lazy_entry const*> ent = e.dict_at(i); std::pair<std::string, lazy_entry const*> ent = e.dict_at(i);
ret += "'"; print_string(ret, ent.first.c_str(), ent.first.size(), true);
ret += ent.first; ret += ": ";
ret += "': ";
ret += print_entry(*ent.second, single_line, indent + 2); ret += print_entry(*ent.second, single_line, indent + 2);
if (i < e.dict_size() - 1) ret += (one_liner?", ":indent_str); if (i < e.dict_size() - 1) ret += (one_liner?", ":indent_str);
else ret += (one_liner?" ":indent_str+1); else ret += (one_liner?" ":indent_str+1);

View File

@ -225,6 +225,7 @@ get_out:
} }
get_out: get_out:
// TODO: 3 clean this up using destructors instead
if (prime) gcry_mpi_release(prime); if (prime) gcry_mpi_release(prime);
if (remote_key) gcry_mpi_release(remote_key); if (remote_key) gcry_mpi_release(remote_key);
if (secret) gcry_mpi_release(secret); if (secret) gcry_mpi_release(secret);

View File

@ -1223,9 +1223,9 @@ namespace libtorrent
#endif #endif
#ifndef TORRENT_DISABLE_DHT #ifndef TORRENT_DISABLE_DHT
if (dht::verify_random_id(ih)) if (dht::verify_secret_id(ih))
{ {
// this means the hash was generated from our generate_random_id() // this means the hash was generated from our generate_secret_id()
// as part of DHT traffic. The fact that we got an incoming // as part of DHT traffic. The fact that we got an incoming
// connection on this info-hash, means the other end, making this // connection on this info-hash, means the other end, making this
// connection fished it out of the DHT chatter. That's suspicious. // connection fished it out of the DHT chatter. That's suspicious.

View File

@ -223,16 +223,16 @@ namespace libtorrent
, m_deleted(false) , m_deleted(false)
, m_pinned(p.flags & add_torrent_params::flag_pinned) , m_pinned(p.flags & add_torrent_params::flag_pinned)
, m_should_be_loaded(true) , m_should_be_loaded(true)
, m_last_download(0) , m_last_download(INT16_MIN)
, m_num_seeds(0) , m_num_seeds(0)
, m_last_upload(0) , m_last_upload(INT16_MIN)
, m_storage_tick(0) , m_storage_tick(0)
, m_auto_managed(p.flags & add_torrent_params::flag_auto_managed) , m_auto_managed(p.flags & add_torrent_params::flag_auto_managed)
, m_current_gauge_state(no_gauge_state) , m_current_gauge_state(no_gauge_state)
, m_moving_storage(false) , m_moving_storage(false)
, m_inactive(false) , m_inactive(false)
, m_downloaded(0xffffff) , m_downloaded(0xffffff)
, m_last_scrape(0) , m_last_scrape(INT16_MIN)
, m_progress_ppm(0) , m_progress_ppm(0)
, m_use_resume_save_path(p.flags & add_torrent_params::flag_use_resume_save_path) , m_use_resume_save_path(p.flags & add_torrent_params::flag_use_resume_save_path)
{ {
@ -739,26 +739,21 @@ namespace libtorrent
#if defined TORRENT_VERBOSE_LOGGING || defined TORRENT_LOGGING #if defined TORRENT_VERBOSE_LOGGING || defined TORRENT_LOGGING
debug_log("starting torrent"); debug_log("starting torrent");
#endif #endif
TORRENT_ASSERT(!m_picker); std::vector<boost::uint64_t>().swap(m_file_progress);
if (!m_seed_mode) if (m_resume_data)
{ {
std::vector<boost::uint64_t>().swap(m_file_progress); int pos;
error_code ec;
if (m_resume_data) if (lazy_bdecode(&m_resume_data->buf[0], &m_resume_data->buf[0]
{
int pos;
error_code ec;
if (lazy_bdecode(&m_resume_data->buf[0], &m_resume_data->buf[0]
+ m_resume_data->buf.size(), m_resume_data->entry, ec, &pos) != 0) + m_resume_data->buf.size(), m_resume_data->entry, ec, &pos) != 0)
{ {
m_resume_data.reset(); m_resume_data.reset();
#if defined TORRENT_VERBOSE_LOGGING || defined TORRENT_LOGGING || defined TORRENT_ERROR_LOGGING #if defined TORRENT_VERBOSE_LOGGING || defined TORRENT_LOGGING || defined TORRENT_ERROR_LOGGING
debug_log("resume data rejected: %s pos: %d", ec.message().c_str(), pos); debug_log("resume data rejected: %s pos: %d", ec.message().c_str(), pos);
#endif #endif
if (m_ses.alerts().should_post<fastresume_rejected_alert>()) if (m_ses.alerts().should_post<fastresume_rejected_alert>())
m_ses.alerts().post_alert(fastresume_rejected_alert(get_handle(), ec, "", 0)); m_ses.alerts().post_alert(fastresume_rejected_alert(get_handle(), ec, "", 0));
}
} }
} }
@ -1726,68 +1721,6 @@ namespace libtorrent
return; return;
} }
// Chicken-and-egg: need to load resume data to get last save_path
// before constructing m_owning_storage, but need storage before
// loading resume data. So peek ahead in this case.
// only do this if the user is willing to have the resume data
// settings override the settings set in add_torrent_params
if (m_use_resume_save_path
&& m_resume_data
&& m_resume_data->entry.type() == lazy_entry::dict_t)
{
std::string p = m_resume_data->entry.dict_find_string_value("save_path");
if (!p.empty()) m_save_path = p;
}
construct_storage();
if (m_share_mode && valid_metadata())
{
// in share mode, all pieces have their priorities initialized to 0
m_file_priority.clear();
m_file_priority.resize(m_torrent_file->num_files(), 0);
}
if (!m_connections_initialized)
{
m_connections_initialized = true;
// all peer connections have to initialize themselves now that the metadata
// is available
// copy the peer list since peers may disconnect and invalidate
// m_connections as we initialize them
std::vector<peer_connection*> peers = m_connections;
for (torrent::peer_iterator i = peers.begin();
i != peers.end(); ++i)
{
peer_connection* pc = *i;
if (pc->is_disconnecting()) continue;
pc->on_metadata_impl();
if (pc->is_disconnecting()) continue;
pc->init();
}
}
// in case file priorities were passed in via the add_torrent_params
// and also in the case of share mode, we need to update the priorities
update_piece_priorities();
std::vector<web_seed_entry> const& web_seeds = m_torrent_file->web_seeds();
m_web_seeds.insert(m_web_seeds.end(), web_seeds.begin(), web_seeds.end());
if (m_seed_mode)
{
m_have_all = true;
m_ses.get_io_service().post(boost::bind(&torrent::files_checked, shared_from_this()));
m_resume_data.reset();
#if TORRENT_USE_ASSERTS
m_resume_data_loaded = true;
#endif
update_gauge();
return;
}
set_state(torrent_status::checking_resume_data);
if (m_resume_data && m_resume_data->entry.type() == lazy_entry::dict_t) if (m_resume_data && m_resume_data->entry.type() == lazy_entry::dict_t)
{ {
int ev = 0; int ev = 0;
@ -1825,6 +1758,76 @@ namespace libtorrent
m_resume_data_loaded = true; m_resume_data_loaded = true;
#endif #endif
construct_storage();
if (!m_seed_mode && m_resume_data)
{
lazy_entry const* piece_priority = m_resume_data->entry.dict_find_string("piece_priority");
if (piece_priority && piece_priority->string_length()
== m_torrent_file->num_pieces())
{
char const* p = piece_priority->string_ptr();
for (int i = 0; i < piece_priority->string_length(); ++i)
{
int prio = p[i];
if (!has_picker() && prio == 1) continue;
need_picker();
m_picker->set_piece_priority(i, p[i]);
update_gauge();
}
}
}
if (m_share_mode && valid_metadata())
{
// in share mode, all pieces have their priorities initialized to 0
m_file_priority.clear();
m_file_priority.resize(m_torrent_file->num_files(), 0);
}
if (!m_connections_initialized)
{
m_connections_initialized = true;
// all peer connections have to initialize themselves now that the metadata
// is available
// copy the peer list since peers may disconnect and invalidate
// m_connections as we initialize them
std::vector<peer_connection*> peers = m_connections;
for (torrent::peer_iterator i = peers.begin();
i != peers.end(); ++i)
{
peer_connection* pc = *i;
if (pc->is_disconnecting()) continue;
pc->on_metadata_impl();
if (pc->is_disconnecting()) continue;
pc->init();
}
}
// in case file priorities were passed in via the add_torrent_params
// and also in the case of share mode, we need to update the priorities
update_piece_priorities();
std::vector<web_seed_entry> const& web_seeds = m_torrent_file->web_seeds();
m_web_seeds.insert(m_web_seeds.end(), web_seeds.begin(), web_seeds.end());
set_state(torrent_status::checking_resume_data);
#if TORRENT_USE_ASSERTS
m_resume_data_loaded = true;
#endif
if (m_seed_mode)
{
m_have_all = true;
m_ses.get_io_service().post(boost::bind(&torrent::files_checked, shared_from_this()));
m_resume_data.reset();
update_gauge();
return;
}
set_state(torrent_status::checking_resume_data);
int num_pad_files = 0; int num_pad_files = 0;
TORRENT_ASSERT(block_size() > 0); TORRENT_ASSERT(block_size() > 0);
file_storage const& fs = m_torrent_file->files(); file_storage const& fs = m_torrent_file->files();
@ -6505,17 +6508,63 @@ namespace libtorrent
m_complete = rd.dict_find_int_value("num_complete", 0xffffff); m_complete = rd.dict_find_int_value("num_complete", 0xffffff);
m_incomplete = rd.dict_find_int_value("num_incomplete", 0xffffff); m_incomplete = rd.dict_find_int_value("num_incomplete", 0xffffff);
m_downloaded = rd.dict_find_int_value("num_downloaded", 0xffffff); m_downloaded = rd.dict_find_int_value("num_downloaded", 0xffffff);
set_upload_limit(rd.dict_find_int_value("upload_rate_limit", -1));
set_download_limit(rd.dict_find_int_value("download_rate_limit", -1)); if (!m_override_resume_data)
set_max_connections(rd.dict_find_int_value("max_connections", -1));
set_max_uploads(rd.dict_find_int_value("max_uploads", -1));
m_seed_mode = rd.dict_find_int_value("seed_mode", 0) && m_torrent_file->is_valid();
if (m_seed_mode)
{ {
m_verified.resize(m_torrent_file->num_pieces(), false); int up_limit_ = rd.dict_find_int_value("upload_rate_limit", -1);
m_verifying.resize(m_torrent_file->num_pieces(), false); if (up_limit_ != -1) set_upload_limit(up_limit_);
int down_limit_ = rd.dict_find_int_value("download_rate_limit", -1);
if (down_limit_ != -1) set_download_limit(down_limit_);
int max_connections_ = rd.dict_find_int_value("max_connections", -1);
if (max_connections_ != -1) set_max_connections(max_connections_);
int max_uploads_ = rd.dict_find_int_value("max_uploads", -1);
if (max_uploads_ != -1) set_max_uploads(max_uploads_);
int seed_mode_ = rd.dict_find_int_value("seed_mode", -1);
if (seed_mode_ != -1) m_seed_mode = seed_mode_ && m_torrent_file->is_valid();
int super_seeding_ = rd.dict_find_int_value("super_seeding", -1);
if (super_seeding_ != -1) super_seeding(super_seeding_);
int auto_managed_ = rd.dict_find_int_value("auto_managed", -1);
if (auto_managed_ != -1) m_auto_managed = auto_managed_;
int sequential_ = rd.dict_find_int_value("sequential_download", -1);
if (sequential_ != -1) set_sequential_download(sequential_);
int paused_ = rd.dict_find_int_value("paused", -1);
if (paused_ != -1)
{
set_allow_peers(!paused_);
m_announce_to_dht = !paused_;
m_announce_to_trackers = !paused_;
m_announce_to_lsd = !paused_;
update_gauge();
update_want_peers();
update_want_scrape();
}
int dht_ = rd.dict_find_int_value("announce_to_dht", -1);
if (dht_ != -1) m_announce_to_dht = dht_;
int lsd_ = rd.dict_find_int_value("announce_to_lsd", -1);
if (lsd_ != -1) m_announce_to_lsd = lsd_;
int track_ = rd.dict_find_int_value("announce_to_trackers", -1);
if (track_ != -1) m_announce_to_trackers = track_;
} }
super_seeding(rd.dict_find_int_value("super_seeding", 0));
if (m_seed_mode)
m_verified.resize(m_torrent_file->num_pieces(), false);
int now = m_ses.session_time();
int tmp = rd.dict_find_int_value("last_scrape", -1);
m_last_scrape = tmp == -1 ? INT16_MIN : now - tmp;
tmp = rd.dict_find_int_value("last_download", -1);
m_last_download = tmp == -1 ? INT16_MIN : now - tmp;
tmp = rd.dict_find_int_value("last_upload", -1);
m_last_upload = tmp == -1 ? INT16_MIN : now - tmp;
if (m_use_resume_save_path) if (m_use_resume_save_path)
{ {
@ -6557,79 +6606,34 @@ namespace libtorrent
if (m_completed_time != 0 && m_completed_time < m_added_time) if (m_completed_time != 0 && m_completed_time < m_added_time)
m_completed_time = m_added_time; m_completed_time = m_added_time;
lazy_entry const* file_priority = rd.dict_find_list("file_priority"); if (!m_seed_mode && !m_override_resume_data)
if (file_priority && file_priority->list_size()
== m_torrent_file->num_files())
{ {
int num_files = m_torrent_file->num_files(); lazy_entry const* file_priority = rd.dict_find_list("file_priority");
m_file_priority.resize(num_files); if (file_priority && file_priority->list_size()
for (int i = 0; i < num_files; ++i) == m_torrent_file->num_files())
m_file_priority[i] = file_priority->list_int_value_at(i, 1);
// unallocated slots are assumed to be priority 1, so cut off any
// trailing ones
int end_range = num_files - 1;
for (; end_range >= 0; --end_range) if (m_file_priority[end_range] != 1) break;
m_file_priority.resize(end_range + 1);
// initialize pad files to priority 0
file_storage const& fs = m_torrent_file->files();
for (int i = 0; i < (std::min)(fs.num_files(), end_range + 1); ++i)
{ {
if (!fs.pad_file_at(i)) continue; int num_files = m_torrent_file->num_files();
m_file_priority[i] = 0; m_file_priority.resize(num_files);
for (int i = 0; i < num_files; ++i)
m_file_priority[i] = file_priority->list_int_value_at(i, 1);
// unallocated slots are assumed to be priority 1, so cut off any
// trailing ones
int end_range = num_files - 1;
for (; end_range >= 0; --end_range) if (m_file_priority[end_range] != 1) break;
m_file_priority.resize(end_range + 1);
// initialize pad files to priority 0
file_storage const& fs = m_torrent_file->files();
for (int i = 0; i < (std::min)(fs.num_files(), end_range + 1); ++i)
{
if (!fs.pad_file_at(i)) continue;
m_file_priority[i] = 0;
}
} }
update_piece_priorities(); update_piece_priorities();
} }
lazy_entry const* piece_priority = rd.dict_find_string("piece_priority");
if (piece_priority && piece_priority->string_length()
== m_torrent_file->num_pieces())
{
char const* p = piece_priority->string_ptr();
for (int i = 0; i < piece_priority->string_length(); ++i)
{
int prio = p[i];
if (!has_picker() && prio == 1) continue;
need_picker();
m_picker->set_piece_priority(i, p[i]);
update_gauge();
}
}
if (!m_override_resume_data)
{
int auto_managed_ = rd.dict_find_int_value("auto_managed", -1);
if (auto_managed_ != -1) m_auto_managed = auto_managed_;
update_gauge();
}
int sequential_ = rd.dict_find_int_value("sequential_download", -1);
if (sequential_ != -1) set_sequential_download(sequential_);
if (!m_override_resume_data)
{
int paused_ = rd.dict_find_int_value("paused", -1);
if (paused_ != -1)
{
set_allow_peers(!paused_);
m_announce_to_dht = !paused_;
m_announce_to_trackers = !paused_;
m_announce_to_lsd = !paused_;
update_gauge();
update_want_peers();
update_want_scrape();
}
int dht_ = rd.dict_find_int_value("announce_to_dht", -1);
if (dht_ != -1) m_announce_to_dht = dht_;
int lsd_ = rd.dict_find_int_value("announce_to_lsd", -1);
if (lsd_ != -1) m_announce_to_lsd = lsd_;
int track_ = rd.dict_find_int_value("announce_to_trackers", -1);
if (track_ != -1) m_announce_to_trackers = track_;
}
lazy_entry const* trackers = rd.dict_find_list("trackers"); lazy_entry const* trackers = rd.dict_find_list("trackers");
if (trackers) if (trackers)
{ {
@ -6873,7 +6877,7 @@ namespace libtorrent
{ {
std::memset(&pieces[0], m_have_all, pieces.size()); std::memset(&pieces[0], m_have_all, pieces.size());
} }
else else if (has_picker())
{ {
for (int i = 0, end(pieces.size()); i < end; ++i) for (int i = 0, end(pieces.size()); i < end; ++i)
pieces[i] = m_picker->have_piece(i) ? 1 : 0; pieces[i] = m_picker->have_piece(i) ? 1 : 0;
@ -11191,8 +11195,8 @@ namespace libtorrent
st->added_time = m_added_time; st->added_time = m_added_time;
st->completed_time = m_completed_time; st->completed_time = m_completed_time;
st->last_scrape = m_last_scrape == 0 ? -1 st->last_scrape = m_last_scrape == INT16_MIN ? -1
: m_ses.session_time() - m_last_scrape; : clamped_subtract(m_ses.session_time(), m_last_scrape);
st->share_mode = m_share_mode; st->share_mode = m_share_mode;
st->upload_mode = m_upload_mode; st->upload_mode = m_upload_mode;
@ -11223,10 +11227,10 @@ namespace libtorrent
st->finished_time = finished_time(); st->finished_time = finished_time();
st->active_time = active_time(); st->active_time = active_time();
st->seeding_time = seeding_time(); st->seeding_time = seeding_time();
st->time_since_upload = m_last_upload == 0 ? -1 st->time_since_upload = m_last_upload == INT16_MIN ? -1
: m_ses.session_time() - m_last_upload; : clamped_subtract(m_ses.session_time(), m_last_upload);
st->time_since_download = m_last_download == 0 ? -1 st->time_since_download = m_last_download == INT16_MIN ? -1
: m_ses.session_time() - m_last_download; : clamped_subtract(m_ses.session_time(), m_last_download);
st->storage_mode = (storage_mode_t)m_storage_mode; st->storage_mode = (storage_mode_t)m_storage_mode;

View File

@ -90,6 +90,7 @@ feature launcher : none valgrind : composite ;
feature.compose <launcher>valgrind : <testing.launcher>"valgrind --tool=memcheck -v --num-callers=20 --read-var-info=yes --track-origins=yes --error-exitcode=222 --suppressions=valgrind_suppressions.txt" <valgrind>on ; feature.compose <launcher>valgrind : <testing.launcher>"valgrind --tool=memcheck -v --num-callers=20 --read-var-info=yes --track-origins=yes --error-exitcode=222 --suppressions=valgrind_suppressions.txt" <valgrind>on ;
test-suite libtorrent : test-suite libtorrent :
[ run test_resume.cpp ]
[ run test_sliding_average.cpp ] [ run test_sliding_average.cpp ]
[ run test_socket_io.cpp ] [ run test_socket_io.cpp ]
[ run test_random.cpp ] [ run test_random.cpp ]

View File

@ -37,6 +37,7 @@ test_programs = \
test_packet_buffer \ test_packet_buffer \
test_settings_pack \ test_settings_pack \
test_read_piece \ test_read_piece \
test_resume \
test_rss \ test_rss \
test_ssl \ test_ssl \
test_storage \ test_storage \

View File

@ -620,11 +620,15 @@ int test_main()
// ====== test node ID testing ===== // ====== test node ID testing =====
{ {
node_id rnd = generate_random_id(); node_id rnd = generate_secret_id();
TEST_CHECK(verify_random_id(rnd)); TEST_CHECK(verify_secret_id(rnd));
rnd[19] ^= 0x55; rnd[19] ^= 0x55;
TEST_CHECK(!verify_random_id(rnd)); TEST_CHECK(!verify_secret_id(rnd));
rnd = generate_random_id();
make_id_secret(rnd);
TEST_CHECK(verify_secret_id(rnd));
} }
// ====== test node ID enforcement ====== // ====== test node ID enforcement ======

297
test/test_resume.cpp Normal file
View File

@ -0,0 +1,297 @@
/*
Copyright (c) 2014, Arvid Norberg
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the distribution.
* Neither the name of the author nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
#include "libtorrent/session.hpp"
#include "libtorrent/add_torrent_params.hpp"
#include "libtorrent/torrent_info.hpp"
#include "libtorrent/random.hpp"
#include "libtorrent/create_torrent.hpp"
#include <boost/make_shared.hpp>
#include "test.hpp"
#include "setup_transfer.hpp"
using namespace libtorrent;
boost::shared_ptr<torrent_info> generate_torrent()
{
file_storage fs;
fs.add_file("test_resume/tmp1", 128 * 1024 * 10);
libtorrent::create_torrent t(fs, 128 * 1024, 6);
t.add_tracker("http://torrent_file_tracker.com/announce");
int num = t.num_pieces();
TEST_CHECK(num > 0);
for (int i = 0; i < num; ++i)
{
sha1_hash ph;
for (int k = 0; k < 20; ++k) ph[k] = libtorrent::random();
t.set_hash(i, ph);
}
std::vector<char> buf;
bencode(std::back_inserter(buf), t.generate());
return boost::make_shared<torrent_info>(&buf[0], buf.size());
}
std::vector<char> generate_resume_data(torrent_info* ti)
{
entry rd;
rd["file-format"] = "libtorrent resume file";
rd["file-version"] = 1;
rd["info-hash"] = ti->info_hash().to_string();
rd["blocks per piece"] = (std::max)(1, ti->piece_length() / 0x4000);
rd["pieces"] = std::string(ti->num_pieces(), '\0');
rd["total_uploaded"] = 1337;
rd["total_downloaded"] = 1338;
rd["active_time"] = 1339;
rd["seeding_time"] = 1340;
rd["num_seeds"] = 1341;
rd["num_downloaders"] = 1342;
rd["upload_rate_limit"] = 1343;
rd["download_rate_limit"] = 1344;
rd["max_connections"] = 1345;
rd["max_uploads"] = 1346;
rd["seed_mode"] = 0;
rd["super_seeding"] = 0;
rd["added_time"] = 1347;
rd["completed_time"] = 1348;
rd["last_scrape"] = 1349;
rd["last_download"] = 1350;
rd["last_upload"] = 1351;
rd["finished_time"] = 1352;
entry::list_type& file_prio = rd["file_priority"].list();
file_prio.push_back(entry(1));
rd["piece_priority"] = std::string(ti->num_pieces(), '\x01');
rd["auto_managed"] = 0;
rd["sequential_download"] = 0;
rd["paused"] = 0;
entry::list_type& trackers = rd["trackers"].list();
trackers.push_back(entry(entry::list_t));
trackers.back().list().push_back(entry("http://resume_data_tracker.com/announce"));
entry::list_type& url_list = rd["url-list"].list();
url_list.push_back(entry("http://resume_data_url_seed.com"));
entry::list_type& httpseeds = rd["httpseeds"].list();
httpseeds.push_back(entry("http://resume_data_http_seed.com"));
rd["save_path"] = "/resume_data save_path";
std::vector<char> ret;
bencode(back_inserter(ret), rd);
return ret;
}
torrent_status test_resume_flags(int flags)
{
session ses;
boost::shared_ptr<torrent_info> ti = generate_torrent();
add_torrent_params p;
p.ti = ti;
p.flags = flags;
p.save_path = "/add_torrent_params save_path";
p.trackers.push_back("http://add_torrent_params_tracker.com/announce");
p.url_seeds.push_back("http://add_torrent_params_url_seed.com");
std::vector<char> rd = generate_resume_data(ti.get());
p.resume_data.swap(rd);
p.max_uploads = 1;
p.max_connections = 2;
p.upload_limit = 3;
p.download_limit = 4;
p.file_priorities.push_back(2);
torrent_handle h = ses.add_torrent(p);
torrent_status s = h.status();
TEST_EQUAL(s.info_hash, ti->info_hash());
return s;
}
void default_tests(torrent_status const& s)
{
TEST_EQUAL(s.last_scrape, 1349);
TEST_EQUAL(s.time_since_download, 1350);
TEST_EQUAL(s.time_since_upload, 1351);
TEST_EQUAL(s.active_time, 1339);
TEST_EQUAL(s.finished_time, 1352);
TEST_EQUAL(s.seeding_time, 1340);
TEST_EQUAL(s.added_time, 1347);
TEST_EQUAL(s.completed_time, 1348);
}
int test_main()
{
torrent_status s;
fprintf(stderr, "flags: 0\n");
s = test_resume_flags(0);
default_tests(s);
TEST_EQUAL(s.save_path, "/add_torrent_params save_path");
TEST_EQUAL(s.sequential_download, false);
TEST_EQUAL(s.paused, false);
TEST_EQUAL(s.auto_managed, false);
TEST_EQUAL(s.seed_mode, false);
TEST_EQUAL(s.super_seeding, false);
TEST_EQUAL(s.share_mode, false);
TEST_EQUAL(s.upload_mode, false);
TEST_EQUAL(s.ip_filter_applies, false);
TEST_EQUAL(s.connections_limit, 1345);
TEST_EQUAL(s.uploads_limit, 1346);
fprintf(stderr, "flags: use_resume_save_path\n");
s = test_resume_flags(add_torrent_params::flag_use_resume_save_path);
default_tests(s);
TEST_EQUAL(s.save_path, "/resume_data save_path");
TEST_EQUAL(s.sequential_download, false);
TEST_EQUAL(s.paused, false);
TEST_EQUAL(s.auto_managed, false);
TEST_EQUAL(s.seed_mode, false);
TEST_EQUAL(s.super_seeding, false);
TEST_EQUAL(s.share_mode, false);
TEST_EQUAL(s.upload_mode, false);
TEST_EQUAL(s.ip_filter_applies, false);
TEST_EQUAL(s.connections_limit, 1345);
TEST_EQUAL(s.uploads_limit, 1346);
fprintf(stderr, "flags: override_resume_data\n");
s = test_resume_flags(add_torrent_params::flag_override_resume_data
| add_torrent_params::flag_paused);
default_tests(s);
TEST_EQUAL(s.save_path, "/add_torrent_params save_path");
TEST_EQUAL(s.sequential_download, false);
TEST_EQUAL(s.paused, true);
TEST_EQUAL(s.auto_managed, false);
TEST_EQUAL(s.seed_mode, false);
TEST_EQUAL(s.super_seeding, false);
TEST_EQUAL(s.share_mode, false);
TEST_EQUAL(s.upload_mode, false);
TEST_EQUAL(s.ip_filter_applies, false);
TEST_EQUAL(s.connections_limit, 2);
TEST_EQUAL(s.uploads_limit, 1);
fprintf(stderr, "flags: seed_mode\n");
s = test_resume_flags(add_torrent_params::flag_override_resume_data
| add_torrent_params::flag_seed_mode);
default_tests(s);
TEST_EQUAL(s.save_path, "/add_torrent_params save_path");
TEST_EQUAL(s.sequential_download, false);
TEST_EQUAL(s.paused, false);
TEST_EQUAL(s.auto_managed, false);
TEST_EQUAL(s.seed_mode, true);
TEST_EQUAL(s.super_seeding, false);
TEST_EQUAL(s.share_mode, false);
TEST_EQUAL(s.upload_mode, false);
TEST_EQUAL(s.ip_filter_applies, false);
TEST_EQUAL(s.connections_limit, 2);
TEST_EQUAL(s.uploads_limit, 1);
fprintf(stderr, "flags: upload_mode\n");
s = test_resume_flags(add_torrent_params::flag_upload_mode);
default_tests(s);
TEST_EQUAL(s.save_path, "/add_torrent_params save_path");
TEST_EQUAL(s.sequential_download, false);
TEST_EQUAL(s.paused, false);
TEST_EQUAL(s.auto_managed, false);
TEST_EQUAL(s.seed_mode, false);
TEST_EQUAL(s.super_seeding, false);
TEST_EQUAL(s.share_mode, false);
TEST_EQUAL(s.upload_mode, true);
TEST_EQUAL(s.ip_filter_applies, false);
TEST_EQUAL(s.connections_limit, 1345);
TEST_EQUAL(s.uploads_limit, 1346);
fprintf(stderr, "flags: share_mode\n");
s = test_resume_flags(add_torrent_params::flag_override_resume_data
| add_torrent_params::flag_share_mode);
default_tests(s);
TEST_EQUAL(s.save_path, "/add_torrent_params save_path");
TEST_EQUAL(s.sequential_download, false);
TEST_EQUAL(s.paused, false);
TEST_EQUAL(s.auto_managed, false);
TEST_EQUAL(s.seed_mode, false);
TEST_EQUAL(s.super_seeding, false);
TEST_EQUAL(s.share_mode, true);
TEST_EQUAL(s.upload_mode, false);
TEST_EQUAL(s.ip_filter_applies, false);
TEST_EQUAL(s.connections_limit, 2);
TEST_EQUAL(s.uploads_limit, 1);
// resume data overrides the auto-managed flag
fprintf(stderr, "flags: auto_managed\n");
s = test_resume_flags(add_torrent_params::flag_auto_managed);
default_tests(s);
TEST_EQUAL(s.save_path, "/add_torrent_params save_path");
TEST_EQUAL(s.sequential_download, false);
TEST_EQUAL(s.paused, false);
TEST_EQUAL(s.auto_managed, false);
TEST_EQUAL(s.seed_mode, false);
TEST_EQUAL(s.super_seeding, false);
TEST_EQUAL(s.share_mode, false);
TEST_EQUAL(s.upload_mode, false);
TEST_EQUAL(s.ip_filter_applies, false);
TEST_EQUAL(s.connections_limit, 1345);
TEST_EQUAL(s.uploads_limit, 1346);
// resume data overrides the paused flag
fprintf(stderr, "flags: paused\n");
s = test_resume_flags(add_torrent_params::flag_paused);
default_tests(s);
TEST_EQUAL(s.save_path, "/add_torrent_params save_path");
TEST_EQUAL(s.sequential_download, false);
TEST_EQUAL(s.paused, false);
TEST_EQUAL(s.auto_managed, false);
TEST_EQUAL(s.seed_mode, false);
TEST_EQUAL(s.super_seeding, false);
TEST_EQUAL(s.share_mode, false);
TEST_EQUAL(s.upload_mode, false);
TEST_EQUAL(s.ip_filter_applies, false);
TEST_EQUAL(s.connections_limit, 1345);
TEST_EQUAL(s.uploads_limit, 1346);
// TODO: 2 test all other resume flags here too. This would require returning
// more than just the torrent_status from test_resume_flags. Also http seeds
// and trackers for instance
return 0;
}

View File

@ -34,8 +34,8 @@ searches = []
def convert_timestamp(t): def convert_timestamp(t):
parts = t.split('.') parts = t.split('.')
posix = time.strptime(parts[0], '%H:%M:%S') hms = parts[0].split(':')
return (posix.tm_hour * 3600 + posix.tm_min * 60 + posix.tm_sec) * 1000 + int(parts[1]) return (int(hms[0]) * 3600 + int(hms[1]) * 60 + int(hms[2])) * 1000 + int(parts[1])
last_incoming = '' last_incoming = ''

View File

@ -42,7 +42,7 @@ replot
out.close() out.close()
gnuplot_scripts += [name] gnuplot_scripts += [name]
gen_stats_gnuplot('dht_routing_table_size', 'nodes', ['active nodes','passive nodes']) gen_stats_gnuplot('dht_routing_table_size', 'nodes', ['active nodes','passive nodes', 'confirmed nodes'])
gen_stats_gnuplot('dht_tracker_table_size', '', ['num torrents', 'num peers']) gen_stats_gnuplot('dht_tracker_table_size', '', ['num torrents', 'num peers'])
gen_stats_gnuplot('dht_announces', 'messages per minute', ['announces per min', 'failed announces per min']) gen_stats_gnuplot('dht_announces', 'messages per minute', ['announces per min', 'failed announces per min'])
gen_stats_gnuplot('dht_clients', 'messages per minute', ['total msgs per min', 'az msgs per min', 'ut msgs per min', 'lt msgs per min', 'mp msgs per min', 'gr msgs per min']) gen_stats_gnuplot('dht_clients', 'messages per minute', ['total msgs per min', 'az msgs per min', 'ut msgs per min', 'lt msgs per min', 'mp msgs per min', 'gr msgs per min'])