premiere-libtorrent/src/torrent.cpp

2445 lines
65 KiB
C++
Raw Normal View History

/*
Copyright (c) 2003, Arvid Norberg
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the distribution.
* Neither the name of the author nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
#include <ctime>
#include <iostream>
#include <fstream>
#include <iomanip>
#include <iterator>
#include <algorithm>
#include <set>
#include <cctype>
2003-10-30 00:28:09 +01:00
#include <numeric>
#ifdef _MSC_VER
#pragma warning(push, 1)
#endif
#include <boost/lexical_cast.hpp>
#include <boost/filesystem/convenience.hpp>
#include <boost/bind.hpp>
#include <boost/thread/mutex.hpp>
#ifdef _MSC_VER
#pragma warning(pop)
#endif
2003-10-26 18:35:23 +01:00
#include "libtorrent/torrent_handle.hpp"
#include "libtorrent/session.hpp"
#include "libtorrent/torrent_info.hpp"
2004-01-31 11:46:15 +01:00
#include "libtorrent/tracker_manager.hpp"
#include "libtorrent/bencode.hpp"
#include "libtorrent/hasher.hpp"
#include "libtorrent/entry.hpp"
#include "libtorrent/peer.hpp"
#include "libtorrent/bt_peer_connection.hpp"
#include "libtorrent/web_peer_connection.hpp"
#include "libtorrent/peer_id.hpp"
2003-12-22 08:14:35 +01:00
#include "libtorrent/alert.hpp"
#include "libtorrent/identify_client.hpp"
2004-01-18 20:12:18 +01:00
#include "libtorrent/alert_types.hpp"
#include "libtorrent/extensions.hpp"
#include "libtorrent/aux_/session_impl.hpp"
using namespace libtorrent;
2004-10-14 03:17:04 +02:00
using namespace boost::posix_time;
using boost::tuples::tuple;
using boost::tuples::get;
using boost::tuples::make_tuple;
using boost::filesystem::complete;
using boost::bind;
using boost::mutex;
using libtorrent::aux::session_impl;
// PROFILING CODE
#ifdef TORRENT_PROFILE
#include <boost/date_time/posix_time/ptime.hpp>
namespace libtorrent
{
namespace
{
using boost::posix_time::ptime;
using boost::posix_time::time_duration;
using boost::posix_time::microsec_clock;
std::vector<std::pair<ptime, std::string> > checkpoints;
}
void add_checkpoint(std::string const& str)
{
checkpoints.push_back(std::make_pair(microsec_clock::universal_time(), str));
}
void print_checkpoints()
{
for (std::vector<std::pair<ptime, std::string> >::iterator i
= checkpoints.begin(); i != checkpoints.end(); ++i)
{
ptime cur = i->first;
if (i + 1 != checkpoints.end())
{
time_duration diff = (i + 1)->first - cur;
std::cout << diff.total_microseconds() << " " << i->second << "\n";
}
else
{
std::cout << " " << i->second << "\n";
}
}
}
}
#endif
namespace
{
enum
{
// wait 60 seconds before retrying a failed tracker
tracker_retry_delay_min = 60
// when tracker_failed_max trackers
// has failed, wait 10 minutes instead
, tracker_retry_delay_max = 10 * 60
, tracker_failed_max = 5
};
2005-07-10 12:42:00 +02:00
int calculate_block_size(const torrent_info& i, int default_block_size)
{
2005-07-10 12:42:00 +02:00
if (default_block_size < 1024) default_block_size = 1024;
2003-12-21 18:28:27 +01:00
// if pieces are too small, adjust the block size
if (i.piece_length() < default_block_size)
{
2004-03-07 21:50:56 +01:00
return static_cast<int>(i.piece_length());
2003-12-21 18:28:27 +01:00
}
// if pieces are too large, adjust the block size
2004-01-15 17:45:34 +01:00
if (i.piece_length() / default_block_size > piece_picker::max_blocks_per_piece)
2003-12-21 18:28:27 +01:00
{
2004-03-07 21:50:56 +01:00
return static_cast<int>(i.piece_length() / piece_picker::max_blocks_per_piece);
2003-12-21 18:28:27 +01:00
}
// otherwise, go with the default
return default_block_size;
}
2004-01-12 04:05:10 +01:00
struct find_peer_by_ip
{
find_peer_by_ip(tcp::endpoint const& a, const torrent* t)
2004-01-25 13:37:15 +01:00
: ip(a)
, tor(t)
{ assert(t != 0); }
2004-01-12 04:05:10 +01:00
bool operator()(const session_impl::connection_map::value_type& c) const
2004-01-12 04:05:10 +01:00
{
tcp::endpoint sender = c.first->remote_endpoint();
if (sender.address() != ip.address()) return false;
if (tor != c.second->associated_torrent().lock().get()) return false;
2004-01-12 04:05:10 +01:00
return true;
}
tcp::endpoint const& ip;
torrent const* tor;
2004-01-12 04:05:10 +01:00
};
2004-01-13 04:08:59 +01:00
struct peer_by_id
{
peer_by_id(const peer_id& i): pid(i) {}
2004-01-13 04:08:59 +01:00
bool operator()(const std::pair<tcp::endpoint, peer_connection*>& p) const
2004-01-13 04:08:59 +01:00
{
if (p.second->pid() != pid) return false;
2004-01-13 04:08:59 +01:00
// have a special case for all zeros. We can have any number
// of peers with that pid, since it's used to indicate no pid.
if (std::count(pid.begin(), pid.end(), 0) == 20) return false;
2004-01-13 04:08:59 +01:00
return true;
}
peer_id const& pid;
2004-01-13 04:08:59 +01:00
};
#ifdef TORRENT_LOGGING
void print_legend(boost::shared_ptr<logger> l)
{
(*l) << "1. time, seconds\n"
<< "2. hard send quota, bytes\n"
<< "3. soft send quota, bytes\n"
<< "4. excess bytes sent\n"
<< "5. excess bytes sent last time slice\n"
<< "6. hard receive quota, bytes\n"
<< "7. soft receive quota, bytes\n"
<< "8. excess bytes received\n"
<< "9. excess bytes received last time slice\n"
<< "10. num peers\n"
<< "11. max ul quota limit\n"
<< "12. max dl quota limit\n"
<< "13. bytes sent\n"
<< "14. bytes sent 10 seconds mean\n"
<< "15. bytes received\n"
<< "16. bytes received 10 seconds mean\n"
<< "17. total payload download\n"
<< "18. total web seed payload download\n"
<< "19. total redundant bytes downloaded\n"
<< "\n";
}
#endif
}
namespace libtorrent
{
2003-12-07 06:53:04 +01:00
torrent::torrent(
session_impl& ses
, aux::checker_impl& checker
, torrent_info const& tf
, boost::filesystem::path const& save_path
, tcp::endpoint const& net_interface
2005-07-10 12:42:00 +02:00
, bool compact_mode
, int block_size
, session_settings const& s)
: m_torrent_file(tf)
, m_abort(false)
2004-03-21 03:03:37 +01:00
, m_paused(false)
2004-07-24 13:54:17 +02:00
, m_just_paused(false)
, m_event(tracker_request::started)
, m_block_size(0)
, m_storage(0)
2004-10-14 03:17:04 +02:00
, m_next_request(second_clock::universal_time())
, m_duration(1800)
, m_complete(-1)
, m_incomplete(-1)
, m_host_resolver(ses.m_io_service)
#ifndef TORRENT_DISABLE_DHT
, m_dht_announce_timer(ses.m_io_service)
#endif
2004-07-24 13:54:17 +02:00
, m_policy()
, m_ses(ses)
, m_checker(checker)
, m_picker(0)
2004-09-12 12:12:16 +02:00
, m_trackers(m_torrent_file.trackers())
2004-01-31 11:20:19 +01:00
, m_last_working_tracker(-1)
, m_currently_trying_tracker(0)
, m_failed_trackers(0)
, m_time_scaler(0)
2003-12-07 06:53:04 +01:00
, m_priority(.5)
, m_num_pieces(0)
2003-12-22 08:14:35 +01:00
, m_got_tracker_response(false)
2004-01-12 04:05:10 +01:00
, m_ratio(0.f)
2004-04-18 14:28:02 +02:00
, m_total_failed_bytes(0)
, m_total_redundant_bytes(0)
, m_net_interface(net_interface.address(), 0)
2004-03-23 23:58:18 +01:00
, m_upload_bandwidth_limit(std::numeric_limits<int>::max())
2004-03-28 19:45:37 +02:00
, m_download_bandwidth_limit(std::numeric_limits<int>::max())
, m_excess_ul(0)
, m_excess_dl(0)
2006-11-24 17:59:47 +01:00
, m_soft_ul_limit(10000)
, m_soft_dl_limit(10000)
2004-10-18 12:36:47 +02:00
, m_save_path(complete(save_path))
, m_compact_mode(compact_mode)
2005-07-10 12:42:00 +02:00
, m_default_block_size(block_size)
, m_connections_initialized(true)
, m_settings(s)
{
#ifndef NDEBUG
m_initial_done = 0;
#endif
#ifdef TORRENT_LOGGING
m_log = ses.create_log("torrent_"
+ boost::lexical_cast<std::string>(tf.info_hash())
, m_ses.listen_port(), false);
print_legend(m_log);
m_second_count = 0;
std::fill_n(m_ul_history, 10, 0);
std::fill_n(m_dl_history, 10, 0);
m_peer_log = ses.create_log("torrent_peers_"
+ boost::lexical_cast<std::string>(tf.info_hash())
, m_ses.listen_port(), false);
#endif
INVARIANT_CHECK;
2004-10-29 15:21:09 +02:00
m_uploads_quota.min = 2;
m_connections_quota.min = 2;
// this will be corrected the next time the main session
// distributes resources, i.e. on average in 0.5 seconds
m_connections_quota.given = 100;
2004-10-29 15:21:09 +02:00
m_uploads_quota.max = std::numeric_limits<int>::max();
m_connections_quota.max = std::numeric_limits<int>::max();
2005-08-23 11:59:56 +02:00
m_dl_bandwidth_quota.min = 100;
m_dl_bandwidth_quota.max = resource_request::inf;
if (m_ses.m_download_rate == -1)
{
m_dl_bandwidth_quota.given = resource_request::inf;
}
else
{
m_dl_bandwidth_quota.given = 400;
}
m_ul_bandwidth_quota.min = 100;
m_ul_bandwidth_quota.max = resource_request::inf;
if (m_ses.m_upload_rate == -1)
{
m_ul_bandwidth_quota.given = resource_request::inf;
}
else
{
m_ul_bandwidth_quota.given = 400;
}
2004-07-24 13:54:17 +02:00
m_policy.reset(new policy(this));
init();
#ifndef TORRENT_DISABLE_DHT
if (!tf.priv())
{
m_dht_announce_timer.expires_from_now(seconds(10));
m_dht_announce_timer.async_wait(m_ses.m_strand.wrap(
bind(&torrent::on_dht_announce, this, _1)));
}
#endif
}
torrent::torrent(
session_impl& ses
, aux::checker_impl& checker
, char const* tracker_url
, sha1_hash const& info_hash
, char const* name
, boost::filesystem::path const& save_path
, tcp::endpoint const& net_interface
2005-07-10 12:42:00 +02:00
, bool compact_mode
, int block_size
, session_settings const& s)
2004-10-10 02:42:48 +02:00
: m_torrent_file(info_hash)
, m_abort(false)
, m_paused(false)
2004-07-24 13:54:17 +02:00
, m_just_paused(false)
, m_event(tracker_request::started)
, m_block_size(0)
, m_storage(0)
2004-10-14 03:17:04 +02:00
, m_next_request(second_clock::universal_time())
, m_duration(1800)
, m_complete(-1)
, m_incomplete(-1)
, m_host_resolver(ses.m_io_service)
#ifndef TORRENT_DISABLE_DHT
, m_dht_announce_timer(ses.m_io_service)
#endif
2004-07-24 13:54:17 +02:00
, m_policy()
, m_ses(ses)
, m_checker(checker)
, m_picker(0)
, m_last_working_tracker(-1)
, m_currently_trying_tracker(0)
, m_failed_trackers(0)
, m_time_scaler(0)
, m_priority(.5)
, m_num_pieces(0)
, m_got_tracker_response(false)
, m_ratio(0.f)
, m_total_failed_bytes(0)
, m_total_redundant_bytes(0)
, m_net_interface(net_interface.address(), 0)
, m_upload_bandwidth_limit(std::numeric_limits<int>::max())
, m_download_bandwidth_limit(std::numeric_limits<int>::max())
, m_excess_ul(0)
, m_excess_dl(0)
2006-11-24 17:59:47 +01:00
, m_soft_ul_limit(10000)
, m_soft_dl_limit(10000)
2004-10-18 12:36:47 +02:00
, m_save_path(complete(save_path))
, m_compact_mode(compact_mode)
2005-07-10 12:42:00 +02:00
, m_default_block_size(block_size)
, m_connections_initialized(false)
, m_settings(s)
{
#ifndef NDEBUG
m_initial_done = 0;
#endif
#ifdef TORRENT_LOGGING
m_log = ses.create_log("torrent_"
+ boost::lexical_cast<std::string>(info_hash)
, m_ses.listen_port(), true);
print_legend(m_log);
m_second_count = 0;
std::fill_n(m_ul_history, 10, 0);
std::fill_n(m_dl_history, 10, 0);
#endif
INVARIANT_CHECK;
if (name) m_name.reset(new std::string(name));
2004-10-29 15:21:09 +02:00
m_uploads_quota.min = 2;
m_connections_quota.min = 2;
// this will be corrected the next time the main session
// distributes resources, i.e. on average in 0.5 seconds
m_connections_quota.given = 100;
2004-10-29 15:21:09 +02:00
m_uploads_quota.max = std::numeric_limits<int>::max();
m_connections_quota.max = std::numeric_limits<int>::max();
2005-08-23 11:59:56 +02:00
m_dl_bandwidth_quota.min = 100;
m_dl_bandwidth_quota.max = resource_request::inf;
if (m_ses.m_download_rate == -1)
{
m_dl_bandwidth_quota.given = resource_request::inf;
}
else
{
m_dl_bandwidth_quota.given = 400;
}
m_ul_bandwidth_quota.min = 100;
m_ul_bandwidth_quota.max = resource_request::inf;
if (m_ses.m_upload_rate == -1)
{
m_ul_bandwidth_quota.given = resource_request::inf;
}
else
{
m_ul_bandwidth_quota.given = 400;
}
2004-09-12 12:12:16 +02:00
m_trackers.push_back(announce_entry(tracker_url));
2004-07-24 13:54:17 +02:00
m_policy.reset(new policy(this));
m_torrent_file.add_tracker(tracker_url);
#ifndef TORRENT_DISABLE_DHT
m_dht_announce_timer.expires_from_now(seconds(10));
m_dht_announce_timer.async_wait(m_ses.m_strand.wrap(
bind(&torrent::on_dht_announce, this, _1)));
#endif
}
2003-12-14 06:56:12 +01:00
torrent::~torrent()
{
// The invariant can't be maintained here, since the torrent
// is being destructed, all weak references to it have been
// reset, which means that all its peers already have an
// invalidated torrent pointer (so it cannot be verified to be correct)
// i.e. the invariant can only be maintained if all connections have
// been closed by the time the torrent is destructed. And they are
// supposed to be closed. So we can still do the invariant check.
assert(m_connections.empty());
INVARIANT_CHECK;
if (m_ses.is_aborted())
m_abort = true;
if (!m_connections.empty())
disconnect_all();
2003-12-14 06:56:12 +01:00
}
std::string torrent::name() const
{
if (valid_metadata()) return m_torrent_file.name();
if (m_name) return *m_name;
return "";
}
#ifndef TORRENT_DISABLE_EXTENSIONS
void torrent::add_extension(boost::shared_ptr<torrent_plugin> ext)
{
m_extensions.push_back(ext);
}
#endif
void torrent::init()
{
INVARIANT_CHECK;
assert(m_torrent_file.is_valid());
2004-11-18 23:33:50 +01:00
assert(m_torrent_file.num_files() > 0);
assert(m_torrent_file.total_size() >= 0);
m_have_pieces.resize(m_torrent_file.num_pieces(), false);
m_storage.reset(new piece_manager(m_torrent_file, m_save_path, m_ses.m_files));
2005-07-10 12:42:00 +02:00
m_block_size = calculate_block_size(m_torrent_file, m_default_block_size);
m_picker.reset(new piece_picker(
static_cast<int>(m_torrent_file.piece_length() / m_block_size)
, static_cast<int>((m_torrent_file.total_size()+m_block_size-1)/m_block_size)));
std::vector<std::string> const& url_seeds = m_torrent_file.url_seeds();
std::copy(url_seeds.begin(), url_seeds.end(), std::inserter(m_web_seeds
, m_web_seeds.begin()));
}
2004-02-26 01:27:06 +01:00
void torrent::use_interface(const char* net_interface)
{
INVARIANT_CHECK;
m_net_interface = tcp::endpoint(address::from_string(net_interface), 0);
2004-02-26 01:27:06 +01:00
}
#ifndef TORRENT_DISABLE_DHT
void torrent::on_dht_announce_response_disp(boost::weak_ptr<libtorrent::torrent> t
, std::vector<tcp::endpoint> const& peers)
{
boost::shared_ptr<libtorrent::torrent> tor = t.lock();
if (!tor) return;
tor->on_dht_announce_response(peers);
}
void torrent::on_dht_announce(asio::error_code const& e)
{
if (e) return;
m_dht_announce_timer.expires_from_now(boost::posix_time::minutes(30));
m_dht_announce_timer.async_wait(m_ses.m_strand.wrap(
bind(&torrent::on_dht_announce, this, _1)));
if (!m_ses.m_dht) return;
// TODO: There should be a way to abort an announce operation on the dht.
// when the torrent is destructed
boost::weak_ptr<torrent> self(shared_from_this());
m_ses.m_dht->announce(m_torrent_file.info_hash()
, m_ses.m_listen_interface.port()
, m_ses.m_strand.wrap(bind(&torrent::on_dht_announce_response_disp, self, _1)));
}
void torrent::on_dht_announce_response(std::vector<tcp::endpoint> const& peers)
{
std::for_each(peers.begin(), peers.end(), bind(
&policy::peer_from_tracker, boost::ref(m_policy), _1, peer_id(0)));
}
#endif
2004-07-24 13:54:17 +02:00
// returns true if it is time for this torrent to make another
// tracker request
bool torrent::should_request()
{
INVARIANT_CHECK;
if (m_torrent_file.trackers().empty()) return false;
2004-07-24 13:54:17 +02:00
if (m_just_paused)
{
m_just_paused = false;
return true;
}
return !m_paused &&
2004-10-14 03:17:04 +02:00
m_next_request < second_clock::universal_time();
2004-07-24 13:54:17 +02:00
}
2005-08-11 01:32:39 +02:00
void torrent::tracker_warning(std::string const& msg)
{
INVARIANT_CHECK;
2005-08-11 01:32:39 +02:00
if (m_ses.m_alerts.should_post(alert::warning))
{
m_ses.m_alerts.post_alert(tracker_warning_alert(get_handle(), msg));
}
}
void torrent::tracker_response(
2005-04-24 02:50:52 +02:00
tracker_request const&
, std::vector<peer_entry>& peer_list
, int interval
, int complete
2005-02-23 09:57:54 +01:00
, int incomplete)
{
INVARIANT_CHECK;
session_impl::mutex_t::scoped_lock l(m_ses.m_mutex);
m_failed_trackers = 0;
// announce intervals less than 5 minutes
2004-01-25 13:37:15 +01:00
// are insane.
2004-09-16 19:18:10 +02:00
if (interval < 60 * 5) interval = 60 * 5;
2004-01-25 13:37:15 +01:00
m_last_working_tracker
2004-09-12 12:12:16 +02:00
= prioritize_tracker(m_currently_trying_tracker);
m_currently_trying_tracker = 0;
2003-10-23 18:55:52 +02:00
m_duration = interval;
m_next_request = second_clock::universal_time() + boost::posix_time::seconds(m_duration);
2003-10-23 18:55:52 +02:00
if (complete >= 0) m_complete = complete;
if (incomplete >= 0) m_incomplete = incomplete;
// connect to random peers from the list
std::random_shuffle(peer_list.begin(), peer_list.end());
2003-10-23 18:55:52 +02:00
2005-07-06 15:18:10 +02:00
#if defined(TORRENT_VERBOSE_LOGGING) || defined(TORRENT_LOGGING)
std::stringstream s;
s << "TRACKER RESPONSE:\n"
"interval: " << m_duration << "\n"
"peers:\n";
for (std::vector<peer_entry>::const_iterator i = peer_list.begin();
i != peer_list.end(); ++i)
{
s << " " << std::setfill(' ') << std::setw(16) << i->ip
<< " " << std::setw(5) << std::dec << i->port << " ";
if (!i->pid.is_all_zeros()) s << " " << i->pid << " " << identify_client(i->pid);
s << "\n";
}
debug_log(s.str());
#endif
// for each of the peers we got from the tracker
for (std::vector<peer_entry>::iterator i = peer_list.begin();
i != peer_list.end(); ++i)
{
// don't make connections to ourself
if (i->pid == m_ses.get_peer_id())
continue;
tcp::endpoint a(address::from_string(i->ip), i->port);
if (m_ses.m_ip_filter.access(a.address()) & ip_filter::blocked)
{
2005-07-06 15:18:10 +02:00
#if defined(TORRENT_VERBOSE_LOGGING) || defined(TORRENT_LOGGING)
debug_log("blocked ip from tracker: " + i->ip);
#endif
continue;
}
m_policy->peer_from_tracker(a, i->pid);
}
2005-03-10 10:59:12 +01:00
if (m_ses.m_alerts.should_post(alert::info))
{
std::stringstream s;
s << "Got response from tracker: "
<< m_trackers[m_last_working_tracker].url;
m_ses.m_alerts.post_alert(tracker_reply_alert(
get_handle(), s.str()));
}
2003-12-22 08:14:35 +01:00
m_got_tracker_response = true;
}
size_type torrent::bytes_left() const
2003-12-07 06:53:04 +01:00
{
// if we don't have the metadata yet, we
// cannot tell how big the torrent is.
if (!valid_metadata()) return -1;
return m_torrent_file.total_size()
- quantized_bytes_done();
}
size_type torrent::quantized_bytes_done() const
{
// INVARIANT_CHECK;
if (!valid_metadata()) return 0;
if (m_torrent_file.num_pieces() == 0)
return 0;
if (is_seed()) return m_torrent_file.total_size();
const int last_piece = m_torrent_file.num_pieces() - 1;
size_type total_done
= m_num_pieces * m_torrent_file.piece_length();
// if we have the last piece, we have to correct
// the amount we have, since the first calculation
// assumed all pieces were of equal size
if (m_have_pieces[last_piece])
{
int corr = m_torrent_file.piece_size(last_piece)
- m_torrent_file.piece_length();
total_done += corr;
}
return total_done;
2004-01-15 17:45:34 +01:00
}
2005-05-30 19:43:03 +02:00
// the first value is the total number of bytes downloaded
// the second value is the number of bytes of those that haven't
// been filtered as not wanted we have downloaded
tuple<size_type, size_type> torrent::bytes_done() const
2004-01-15 17:45:34 +01:00
{
INVARIANT_CHECK;
if (!valid_metadata()) return tuple<size_type, size_type>(0,0);
if (m_torrent_file.num_pieces() == 0)
return tuple<size_type, size_type>(0,0);
2005-05-30 19:43:03 +02:00
const int last_piece = m_torrent_file.num_pieces() - 1;
2004-01-15 17:45:34 +01:00
if (is_seed())
return make_tuple(m_torrent_file.total_size()
, m_torrent_file.total_size());
2005-05-30 19:43:03 +02:00
size_type wanted_done = (m_num_pieces - m_picker->num_have_filtered())
* m_torrent_file.piece_length();
size_type total_done
2004-01-15 17:45:34 +01:00
= m_num_pieces * m_torrent_file.piece_length();
assert(m_num_pieces < m_torrent_file.num_pieces());
2004-01-15 17:45:34 +01:00
// if we have the last piece, we have to correct
// the amount we have, since the first calculation
// assumed all pieces were of equal size
2003-12-07 06:53:04 +01:00
if (m_have_pieces[last_piece])
{
2005-05-30 19:43:03 +02:00
int corr = m_torrent_file.piece_size(last_piece)
- m_torrent_file.piece_length();
total_done += corr;
if (!m_picker->is_filtered(last_piece))
wanted_done += corr;
2003-12-07 06:53:04 +01:00
}
2006-12-16 00:22:40 +01:00
assert(total_done <= m_torrent_file.total_size());
assert(wanted_done <= m_torrent_file.total_size());
2004-01-15 17:45:34 +01:00
const std::vector<piece_picker::downloading_piece>& dl_queue
= m_picker->get_download_queue();
2004-01-15 17:45:34 +01:00
2005-05-30 19:43:03 +02:00
const int blocks_per_piece = static_cast<int>(
m_torrent_file.piece_length() / m_block_size);
2004-01-15 17:45:34 +01:00
for (std::vector<piece_picker::downloading_piece>::const_iterator i =
2005-05-30 19:43:03 +02:00
dl_queue.begin(); i != dl_queue.end(); ++i)
2004-01-15 17:45:34 +01:00
{
2005-05-30 19:43:03 +02:00
int corr = 0;
2006-12-31 15:48:18 +01:00
int index = i->index;
assert(!m_have_pieces[index]);
assert(int(i->finished_blocks.count())
2006-12-31 15:48:18 +01:00
< m_picker->blocks_in_piece(index));
#ifndef NDEBUG
for (std::vector<piece_picker::downloading_piece>::const_iterator j = boost::next(i);
j != dl_queue.end(); ++j)
{
2006-12-31 15:48:18 +01:00
assert(j->index != index);
}
#endif
2004-01-15 17:45:34 +01:00
for (int j = 0; j < blocks_per_piece; ++j)
{
2006-12-16 00:22:40 +01:00
assert(i->finished_blocks[j] == 0 || i->finished_blocks[j] == 1);
2006-12-31 15:48:18 +01:00
assert(m_picker->is_finished(piece_block(index, j)) == i->finished_blocks[j]);
2006-12-16 00:22:40 +01:00
corr += i->finished_blocks[j] * m_block_size;
2006-12-31 15:48:18 +01:00
assert(index != last_piece || j < m_picker->blocks_in_last_piece()
2006-12-16 00:22:40 +01:00
|| i->finished_blocks[j] == 0);
2004-01-15 17:45:34 +01:00
}
// correction if this was the last piece
// and if we have the last block
if (i->index == last_piece
&& i->finished_blocks[m_picker->blocks_in_last_piece()-1])
2004-01-15 17:45:34 +01:00
{
2005-05-30 19:43:03 +02:00
corr -= m_block_size;
corr += m_torrent_file.piece_size(last_piece) % m_block_size;
2004-01-15 17:45:34 +01:00
}
2005-05-30 19:43:03 +02:00
total_done += corr;
2006-12-31 15:48:18 +01:00
if (!m_picker->is_filtered(index))
2005-05-30 19:43:03 +02:00
wanted_done += corr;
2004-01-15 17:45:34 +01:00
}
assert(total_done < m_torrent_file.total_size());
assert(wanted_done < m_torrent_file.total_size());
2006-12-15 13:29:47 +01:00
2004-09-16 03:14:16 +02:00
std::map<piece_block, int> downloading_piece;
for (const_peer_iterator i = begin(); i != end(); ++i)
2004-01-15 17:45:34 +01:00
{
peer_connection* pc = i->second;
2004-01-15 17:45:34 +01:00
boost::optional<piece_block_progress> p
= pc->downloading_piece_progress();
2004-01-15 17:45:34 +01:00
if (p)
{
if (m_have_pieces[p->piece_index])
continue;
2004-09-16 03:14:16 +02:00
piece_block block(p->piece_index, p->block_index);
if (m_picker->is_finished(block))
2004-01-15 17:45:34 +01:00
continue;
2003-12-07 06:53:04 +01:00
2004-09-16 03:14:16 +02:00
std::map<piece_block, int>::iterator dp
= downloading_piece.find(block);
if (dp != downloading_piece.end())
{
if (dp->second < p->bytes_downloaded)
dp->second = p->bytes_downloaded;
}
else
{
downloading_piece[block] = p->bytes_downloaded;
}
#ifndef NDEBUG
2004-01-15 17:45:34 +01:00
assert(p->bytes_downloaded <= p->full_block_bytes);
int last_piece = m_torrent_file.num_pieces() - 1;
if (p->piece_index == last_piece
&& p->block_index == m_torrent_file.piece_size(last_piece) / block_size())
assert(p->full_block_bytes == m_torrent_file.piece_size(last_piece) % block_size());
else
assert(p->full_block_bytes == block_size());
#endif
2004-01-15 17:45:34 +01:00
}
}
2004-09-16 03:14:16 +02:00
for (std::map<piece_block, int>::iterator i = downloading_piece.begin();
i != downloading_piece.end(); ++i)
2005-05-30 19:43:03 +02:00
{
2004-09-16 03:14:16 +02:00
total_done += i->second;
2005-05-30 19:43:03 +02:00
if (!m_picker->is_filtered(i->first.piece_index))
wanted_done += i->second;
}
2006-12-15 13:29:47 +01:00
#ifndef NDEBUG
if (total_done >= m_torrent_file.total_size())
{
std::copy(m_have_pieces.begin(), m_have_pieces.end()
, std::ostream_iterator<bool>(std::cerr, " "));
std::cerr << std::endl;
std::cerr << "num_pieces: " << m_num_pieces << std::endl;
std::cerr << "unfinished:" << std::endl;
for (std::vector<piece_picker::downloading_piece>::const_iterator i =
dl_queue.begin(); i != dl_queue.end(); ++i)
{
std::cerr << " " << i->index << " ";
for (int j = 0; j < blocks_per_piece; ++j)
{
std::cerr << i->finished_blocks[j];
}
std::cerr << std::endl;
}
std::cerr << "downloading pieces:" << std::endl;
for (std::map<piece_block, int>::iterator i = downloading_piece.begin();
i != downloading_piece.end(); ++i)
{
std::cerr << " " << i->first.piece_index << ":" << i->first.block_index
<< " " << i->second << std::endl;
}
}
assert(total_done < m_torrent_file.total_size());
assert(wanted_done < m_torrent_file.total_size());
#endif
2006-12-15 13:29:47 +01:00
assert(total_done >= wanted_done);
return make_tuple(total_done, wanted_done);
2004-01-15 17:45:34 +01:00
}
2003-12-07 06:53:04 +01:00
2003-12-01 22:27:27 +01:00
void torrent::piece_failed(int index)
{
// if the last piece fails the peer connection will still
// think that it has received all of it until this function
// resets the download queue. So, we cannot do the
// invariant check here since it assumes:
// (total_done == m_torrent_file.total_size()) => is_seed()
// INVARIANT_CHECK;
assert(m_storage.get());
assert(m_picker.get());
2004-01-25 13:37:15 +01:00
assert(index >= 0);
assert(index < m_torrent_file.num_pieces());
2003-12-22 08:14:35 +01:00
if (m_ses.m_alerts.should_post(alert::info))
{
std::stringstream s;
s << "hash for piece " << index << " failed";
2004-01-07 01:48:02 +01:00
m_ses.m_alerts.post_alert(hash_failed_alert(get_handle(), index, s.str()));
2003-12-22 08:14:35 +01:00
}
2004-04-18 14:28:02 +02:00
// increase the total amount of failed bytes
m_total_failed_bytes += m_torrent_file.piece_size(index);
std::vector<tcp::endpoint> downloaders;
m_picker->get_downloaders(downloaders, index);
2003-12-01 22:27:27 +01:00
// decrease the trust point of all peers that sent
// parts of this piece.
2004-09-12 12:12:16 +02:00
// first, build a set of all peers that participated
std::set<tcp::endpoint> peers;
2004-09-12 12:12:16 +02:00
std::copy(downloaders.begin(), downloaders.end(), std::inserter(peers, peers.begin()));
#ifndef TORRENT_DISABLE_EXTENSIONS
for (extension_list_t::iterator i = m_extensions.begin()
, end(m_extensions.end()); i != end; ++i)
{
try { (*i)->on_piece_failed(index); } catch (std::exception&) {}
}
#endif
for (std::set<tcp::endpoint>::iterator i = peers.begin()
2004-09-12 12:12:16 +02:00
, end(peers.end()); i != end; ++i)
2003-12-01 22:27:27 +01:00
{
2004-01-13 04:08:59 +01:00
peer_iterator p = m_connections.find(*i);
if (p == m_connections.end()) continue;
p->second->received_invalid_data(index);
2003-12-14 06:56:12 +01:00
2004-09-12 12:12:16 +02:00
// either, we have received too many failed hashes
// or this was the only peer that sent us this piece.
// TODO: make this a changable setting
2004-09-12 12:12:16 +02:00
if (p->second->trust_points() <= -7 || peers.size() == 1)
2003-12-01 22:27:27 +01:00
{
2003-12-14 06:56:12 +01:00
// we don't trust this peer anymore
// ban it.
2004-03-21 03:03:37 +01:00
if (m_ses.m_alerts.should_post(alert::info))
{
m_ses.m_alerts.post_alert(peer_ban_alert(
p->first
, get_handle()
, "banning peer because of too many corrupt pieces"));
}
2004-03-30 21:11:07 +02:00
m_policy->ban_peer(*p->second);
#if defined(TORRENT_VERBOSE_LOGGING)
(*p->second->m_logger) << "*** BANNING PEER 'too many corrupt pieces'\n";
#endif
2004-03-30 21:11:07 +02:00
p->second->disconnect();
2003-12-01 22:27:27 +01:00
}
}
// we have to let the piece_picker know that
// this piece failed the check as it can restore it
// and mark it as being interesting for download
// TODO: do this more intelligently! and keep track
// of how much crap (data that failed hash-check) and
// how much redundant data we have downloaded
// if some clients has sent more than one piece
// start with redownloading the pieces that the client
// that has sent the least number of pieces
m_picker->restore_piece(index);
m_storage->mark_failed(index);
2004-01-09 11:50:22 +01:00
assert(m_have_pieces[index] == false);
2003-12-01 22:27:27 +01:00
}
2005-03-05 15:17:17 +01:00
void torrent::abort()
{
INVARIANT_CHECK;
2005-03-05 15:17:17 +01:00
m_abort = true;
// if the torrent is paused, it doesn't need
// to announce with even=stopped again.
if (!m_paused)
m_event = tracker_request::stopped;
2005-03-05 15:17:17 +01:00
// disconnect all peers and close all
// files belonging to the torrents
2005-03-05 15:17:17 +01:00
disconnect_all();
2005-03-10 12:41:22 +01:00
if (m_storage.get()) m_storage->release_files();
2005-03-05 15:17:17 +01:00
}
2003-12-01 22:27:27 +01:00
void torrent::announce_piece(int index)
{
2006-12-31 15:48:18 +01:00
// INVARIANT_CHECK;
2004-01-25 13:37:15 +01:00
assert(index >= 0);
assert(index < m_torrent_file.num_pieces());
std::vector<tcp::endpoint> downloaders;
m_picker->get_downloaders(downloaders, index);
2003-12-01 22:27:27 +01:00
// increase the trust point of all peers that sent
// parts of this piece.
std::set<tcp::endpoint> peers;
2004-09-12 12:12:16 +02:00
std::copy(downloaders.begin(), downloaders.end(), std::inserter(peers, peers.begin()));
if (!m_have_pieces[index])
m_num_pieces++;
m_have_pieces[index] = true;
assert(std::accumulate(m_have_pieces.begin(), m_have_pieces.end(), 0)
== m_num_pieces);
m_picker->we_have(index);
2004-01-13 04:08:59 +01:00
for (peer_iterator i = m_connections.begin(); i != m_connections.end(); ++i)
i->second->announce_piece(index);
2006-12-31 15:48:18 +01:00
for (std::set<tcp::endpoint>::iterator i = peers.begin()
, end(peers.end()); i != end; ++i)
{
peer_iterator p = m_connections.find(*i);
if (p == m_connections.end()) continue;
p->second->received_valid_data(index);
}
#ifndef TORRENT_DISABLE_EXTENSIONS
for (extension_list_t::iterator i = m_extensions.begin()
, end(m_extensions.end()); i != end; ++i)
{
try { (*i)->on_piece_pass(index); } catch (std::exception&) {}
}
#endif
if (is_seed()) m_picker.reset();
}
2004-03-28 19:45:37 +02:00
std::string torrent::tracker_login() const
{
if (m_username.empty() && m_password.empty()) return "";
return m_username + ":" + m_password;
}
void torrent::filter_piece(int index, bool filter)
{
INVARIANT_CHECK;
assert(valid_metadata());
if (is_seed()) return;
// this call is only valid on torrents with metadata
assert(m_picker.get());
assert(index >= 0);
assert(index < m_torrent_file.num_pieces());
2005-05-30 19:43:03 +02:00
// TODO: update peer's interesting-bit
if (filter) m_picker->mark_as_filtered(index);
else m_picker->mark_as_unfiltered(index);
}
2005-06-23 01:04:37 +02:00
void torrent::filter_pieces(std::vector<bool> const& bitmask)
{
INVARIANT_CHECK;
2005-06-23 01:04:37 +02:00
// this call is only valid on torrents with metadata
assert(valid_metadata());
if (is_seed()) return;
2005-06-23 01:04:37 +02:00
assert(m_picker.get());
// TODO: update peer's interesting-bit
std::vector<int> state;
2005-06-23 01:04:37 +02:00
state.reserve(100);
int index = 0;
for (std::vector<bool>::const_iterator i = bitmask.begin()
, end(bitmask.end()); i != end; ++i, ++index)
{
if (m_picker->is_filtered(index) == *i) continue;
if (*i)
m_picker->mark_as_filtered(index);
else
state.push_back(index);
2005-06-23 01:04:37 +02:00
}
for (std::vector<int>::reverse_iterator i = state.rbegin();
i != state.rend(); ++i)
2005-06-23 01:04:37 +02:00
{
m_picker->mark_as_unfiltered(*i);
2005-06-23 01:04:37 +02:00
}
}
bool torrent::is_piece_filtered(int index) const
{
// this call is only valid on torrents with metadata
assert(valid_metadata());
if (is_seed()) return false;
assert(m_picker.get());
assert(index >= 0);
assert(index < m_torrent_file.num_pieces());
return m_picker->is_filtered(index);
}
void torrent::filtered_pieces(std::vector<bool>& bitmask) const
{
INVARIANT_CHECK;
// this call is only valid on torrents with metadata
assert(valid_metadata());
if (is_seed())
{
bitmask.clear();
bitmask.resize(m_torrent_file.num_pieces(), false);
return;
}
assert(m_picker.get());
m_picker->filtered_pieces(bitmask);
}
2005-07-04 01:33:47 +02:00
void torrent::filter_files(std::vector<bool> const& bitmask)
{
INVARIANT_CHECK;
2005-07-04 01:33:47 +02:00
// this call is only valid on torrents with metadata
if (!valid_metadata() || is_seed()) return;
2005-07-04 18:27:14 +02:00
2005-07-04 18:33:54 +02:00
// the bitmask need to have exactly one bit for every file
// in the torrent
2005-07-16 02:56:50 +02:00
assert((int)bitmask.size() == m_torrent_file.num_files());
2005-07-04 18:33:54 +02:00
2005-07-08 16:04:14 +02:00
size_type position = 0;
2005-07-04 18:27:14 +02:00
2005-07-04 18:33:54 +02:00
if (m_torrent_file.num_pieces())
{
int piece_length = m_torrent_file.piece_length();
// mark all pieces as filtered, then clear the bits for files
// that should be downloaded
std::vector<bool> piece_filter(m_torrent_file.num_pieces(), true);
for (int i = 0; i < (int)bitmask.size(); ++i)
2005-07-04 18:27:14 +02:00
{
2005-07-08 16:04:14 +02:00
size_type start = position;
2005-07-04 18:33:54 +02:00
position += m_torrent_file.file_at(i).size;
// is the file selected for download?
if (!bitmask[i])
{
// mark all pieces of the file as downloadable
int start_piece = int(start / piece_length);
int last_piece = int(position / piece_length);
// if one piece spans several files, we might
// come here several times with the same start_piece, end_piece
std::fill(piece_filter.begin() + start_piece, piece_filter.begin()
+ last_piece + 1, false);
2005-07-04 18:27:14 +02:00
}
}
2005-07-04 18:33:54 +02:00
filter_pieces(piece_filter);
2005-07-04 18:27:14 +02:00
}
2005-07-04 01:33:47 +02:00
}
2004-09-12 12:12:16 +02:00
void torrent::replace_trackers(std::vector<announce_entry> const& urls)
{
assert(!urls.empty());
m_trackers = urls;
if (m_currently_trying_tracker >= (int)m_trackers.size())
2004-09-12 15:53:00 +02:00
m_currently_trying_tracker = (int)m_trackers.size()-1;
2004-09-12 12:12:16 +02:00
m_last_working_tracker = -1;
}
2004-03-21 03:03:37 +01:00
tracker_request torrent::generate_tracker_request()
{
INVARIANT_CHECK;
2004-03-27 23:02:31 +01:00
m_next_request
2004-10-14 03:17:04 +02:00
= second_clock::universal_time()
+ boost::posix_time::seconds(tracker_retry_delay_max);
2004-01-20 23:59:21 +01:00
tracker_request req;
req.info_hash = m_torrent_file.info_hash();
req.pid = m_ses.get_peer_id();
2004-01-20 23:59:21 +01:00
req.downloaded = m_stat.total_payload_download();
req.web_downloaded = m_web_stat.total_payload_download();
2004-01-20 23:59:21 +01:00
req.uploaded = m_stat.total_payload_upload();
req.left = bytes_left();
if (req.left == -1) req.left = 16*1024;
req.event = m_event;
if (m_event != tracker_request::stopped)
m_event = tracker_request::none;
2004-09-12 12:12:16 +02:00
req.url = m_trackers[m_currently_trying_tracker].url;
assert(m_connections_quota.given > 0);
2004-04-15 00:16:56 +02:00
req.num_want = std::max(
2004-10-29 15:21:09 +02:00
(m_connections_quota.given
- m_policy->num_peers()), 10);
2005-03-08 15:16:14 +01:00
// if we are aborting. we don't want any new peers
if (req.event == tracker_request::stopped)
req.num_want = 0;
2004-03-21 03:03:37 +01:00
// default initialize, these should be set by caller
// before passing the request to the tracker_manager
req.listen_port = 0;
req.key = 0;
2004-01-20 23:59:21 +01:00
return req;
}
void torrent::remove_peer(peer_connection* p) try
{
INVARIANT_CHECK;
2004-01-25 13:37:15 +01:00
assert(p != 0);
peer_iterator i = m_connections.find(p->remote());
if (i == m_connections.end()) return;
if (ready_for_connections())
{
assert(p->associated_torrent().lock().get() == this);
std::vector<int> piece_list;
const std::vector<bool>& pieces = p->get_bitfield();
for (std::vector<bool>::const_iterator i = pieces.begin();
2005-09-28 18:12:47 +02:00
i != pieces.end(); ++i)
{
if (*i) piece_list.push_back(static_cast<int>(i - pieces.begin()));
}
2003-12-18 04:30:41 +01:00
for (std::vector<int>::reverse_iterator i = piece_list.rbegin();
i != piece_list.rend(); ++i)
{
peer_lost(*i);
}
}
m_policy->connection_closed(*p);
m_connections.erase(i);
2005-10-01 13:20:47 +02:00
#ifndef NDEBUG
m_policy->check_invariant();
#endif
}
catch (std::exception& e)
{
#ifndef NDEBUG
std::string err = e.what();
#endif
assert(false);
};
void torrent::connect_to_url_seed(std::string const& url)
{
INVARIANT_CHECK;
#if defined(TORRENT_VERBOSE_LOGGING) || defined(TORRENT_LOGGING)
std::string now(to_simple_string(second_clock::universal_time()));
(*m_ses.m_logger) << now << " resolving: " << url << "\n";
#endif
std::string protocol;
std::string hostname;
int port;
std::string path;
boost::tie(protocol, hostname, port, path)
= parse_url_components(url);
m_resolving_web_seeds.insert(url);
if (m_ses.settings().proxy_ip.empty())
{
tcp::resolver::query q(hostname, boost::lexical_cast<std::string>(port));
m_host_resolver.async_resolve(q, m_ses.m_strand.wrap(
bind(&torrent::on_name_lookup, shared_from_this(), _1, _2, url)));
}
else
{
// use proxy
tcp::resolver::query q(m_ses.settings().proxy_ip
, boost::lexical_cast<std::string>(m_ses.settings().proxy_port));
m_host_resolver.async_resolve(q, m_ses.m_strand.wrap(
bind(&torrent::on_name_lookup, shared_from_this(), _1, _2, url)));
}
}
void torrent::on_name_lookup(asio::error_code const& e, tcp::resolver::iterator host
, std::string url) try
{
session_impl::mutex_t::scoped_lock l(m_ses.m_mutex);
INVARIANT_CHECK;
#if defined(TORRENT_VERBOSE_LOGGING) || defined(TORRENT_LOGGING)
std::string now(to_simple_string(second_clock::universal_time()));
(*m_ses.m_logger) << now << " completed resolve: " << url << "\n";
#endif
std::set<std::string>::iterator i = m_resolving_web_seeds.find(url);
if (i != m_resolving_web_seeds.end()) m_resolving_web_seeds.erase(i);
if (e || host == tcp::resolver::iterator())
{
if (m_ses.m_alerts.should_post(alert::warning))
{
std::stringstream msg;
msg << "HTTP seed hostname lookup failed: " << e.message();
m_ses.m_alerts.post_alert(
url_seed_alert(url, msg.str()));
}
#if defined(TORRENT_VERBOSE_LOGGING) || defined(TORRENT_LOGGING)
(*m_ses.m_logger) << " ** HOSTNAME LOOKUP FAILED!**: " << url << "\n";
#endif
2004-01-13 04:08:59 +01:00
// the name lookup failed for the http host. Don't try
// this host again
remove_url_seed(url);
return;
}
if (m_ses.is_aborted()) return;
tcp::endpoint a(host->endpoint());
2005-10-01 13:20:47 +02:00
if (m_ses.m_ip_filter.access(a.address()) & ip_filter::blocked)
{
// TODO: post alert: "web seed at " + a.address().to_string() + " blocked by ip filter");
return;
}
boost::shared_ptr<stream_socket> s(new stream_socket(m_ses.m_io_service));
boost::intrusive_ptr<peer_connection> c(new web_peer_connection(
m_ses, shared_from_this(), s, a, url));
#if defined(TORRENT_VERBOSE_LOGGING) || defined(TORRENT_LOGGING)
(*m_ses.m_logger) << "add url seed (" << c->m_dl_bandwidth_quota.given << ", "
<< c->m_dl_bandwidth_quota.used << ") ";
(*m_ses.m_logger) << "\n";
#endif
2005-10-01 13:20:47 +02:00
#ifndef NDEBUG
c->m_in_constructor = false;
2005-10-01 13:20:47 +02:00
#endif
try
{
m_ses.m_connection_queue.push_back(c);
assert(m_connections.find(a) == m_connections.end());
#ifndef NDEBUG
m_policy->check_invariant();
#endif
// add the newly connected peer to this torrent's peer list
m_connections.insert(
std::make_pair(a, boost::get_pointer(c)));
2005-10-01 13:20:47 +02:00
#ifndef NDEBUG
m_policy->check_invariant();
2005-10-01 13:20:47 +02:00
#endif
m_ses.process_connection_queue();
}
catch (std::exception& e)
{
#if defined(TORRENT_VERBOSE_LOGGING) || defined(TORRENT_LOGGING)
(*m_ses.m_logger) << " ** HOSTNAME LOOKUP FAILED!**: " << e.what() << "\n";
#endif
// TODO: post an error alert!
std::map<tcp::endpoint, peer_connection*>::iterator i = m_connections.find(a);
if (i != m_connections.end()) m_connections.erase(i);
m_ses.connection_failed(s, a, e.what());
c->disconnect();
}
}
catch (std::exception& exc)
{
assert(false);
};
peer_connection& torrent::connect_to_peer(const tcp::endpoint& a)
{
INVARIANT_CHECK;
if (m_ses.m_ip_filter.access(a.address()) & ip_filter::blocked)
throw protocol_error(a.address().to_string() + " blocked by ip filter");
if (m_connections.find(a) != m_connections.end())
throw protocol_error("already connected to peer");
boost::shared_ptr<stream_socket> s(new stream_socket(m_ses.m_io_service));
boost::intrusive_ptr<peer_connection> c(new bt_peer_connection(
m_ses, shared_from_this(), s, a));
#if defined(TORRENT_VERBOSE_LOGGING) || defined(TORRENT_LOGGING)
(*m_ses.m_logger) << "connect_to_peer (" << c->m_dl_bandwidth_quota.given << ", "
<< c->m_dl_bandwidth_quota.used << ") ";
(*m_ses.m_logger) << "\n";
#endif
#ifndef NDEBUG
c->m_in_constructor = false;
#endif
#ifndef TORRENT_DISABLE_EXTENSIONS
for (extension_list_t::iterator i = m_extensions.begin()
, end(m_extensions.end()); i != end; ++i)
{
boost::shared_ptr<peer_plugin> pp((*i)->new_connection(c.get()));
if (pp) c->add_extension(pp);
}
#endif
try
{
m_ses.m_connection_queue.push_back(c);
assert(m_connections.find(a) == m_connections.end());
#ifndef NDEBUG
m_policy->check_invariant();
#endif
// add the newly connected peer to this torrent's peer list
m_connections.insert(
std::make_pair(a, boost::get_pointer(c)));
#ifndef NDEBUG
m_policy->check_invariant();
#endif
m_ses.process_connection_queue();
}
catch (std::exception& e)
{
// TODO: post an error alert!
std::map<tcp::endpoint, peer_connection*>::iterator i = m_connections.find(a);
if (i != m_connections.end()) m_connections.erase(i);
m_ses.connection_failed(s, a, e.what());
c->disconnect();
throw;
}
if (c->is_disconnecting()) throw protocol_error("failed to connect");
2003-12-14 06:56:12 +01:00
return *c;
}
void torrent::set_metadata(entry const& metadata)
{
m_torrent_file.parse_info_section(metadata);
boost::mutex::scoped_lock(m_checker.m_mutex);
boost::shared_ptr<aux::piece_checker_data> d(
new aux::piece_checker_data);
d->torrent_ptr = shared_from_this();
d->save_path = m_save_path;
d->info_hash = m_torrent_file.info_hash();
// add the torrent to the queue to be checked
m_checker.m_torrents.push_back(d);
typedef session_impl::torrent_map torrent_map;
torrent_map::iterator i = m_ses.m_torrents.find(
m_torrent_file.info_hash());
assert(i != m_ses.m_torrents.end());
m_ses.m_torrents.erase(i);
// and notify the thread that it got another
// job in its queue
m_checker.m_cond.notify_one();
if (m_ses.m_alerts.should_post(alert::info))
{
m_ses.m_alerts.post_alert(metadata_received_alert(
get_handle(), "metadata successfully received from swarm"));
}
}
void torrent::attach_peer(peer_connection* p)
{
INVARIANT_CHECK;
2004-01-25 13:37:15 +01:00
assert(p != 0);
2004-01-15 01:46:44 +01:00
assert(!p->is_local());
2004-01-13 04:08:59 +01:00
std::map<tcp::endpoint, peer_connection*>::iterator c
= m_connections.find(p->remote());
if (c != m_connections.end())
{
// we already have a peer_connection to this ip.
// It may currently be waiting for completing a
// connection attempt that might fail. So,
// prioritize this current connection since
// it has already succeeded.
if (!c->second->is_connecting())
{
throw protocol_error("already connected to peer");
}
c->second->disconnect();
}
if (m_ses.m_connections.find(p->get_socket())
== m_ses.m_connections.end())
{
throw protocol_error("peer is not properly constructed");
}
if (m_ses.is_aborted())
{
throw protocol_error("session is closing");
}
peer_iterator ci = m_connections.insert(
std::make_pair(p->remote(), p)).first;
try
{
// if new_connection throws, we have to remove the
// it from the list.
#ifndef TORRENT_DISABLE_EXTENSIONS
for (extension_list_t::iterator i = m_extensions.begin()
, end(m_extensions.end()); i != end; ++i)
{
boost::shared_ptr<peer_plugin> pp((*i)->new_connection(p));
if (pp) p->add_extension(pp);
}
#endif
m_policy->new_connection(*ci->second);
}
catch (std::exception& e)
{
m_connections.erase(ci);
throw;
}
#ifndef NDEBUG
assert(p->remote() == p->get_socket()->remote_endpoint());
#endif
2005-10-01 13:20:47 +02:00
#ifndef NDEBUG
m_policy->check_invariant();
#endif
}
2004-01-20 23:59:21 +01:00
void torrent::disconnect_all()
{
session_impl::mutex_t::scoped_lock l(m_ses.m_mutex);
INVARIANT_CHECK;
while (!m_connections.empty())
{
peer_connection& p = *m_connections.begin()->second;
assert(p.associated_torrent().lock().get() == this);
#if defined(TORRENT_VERBOSE_LOGGING)
if (m_abort)
(*p.m_logger) << "*** CLOSING CONNECTION 'aborting'\n";
else
(*p.m_logger) << "*** CLOSING CONNECTION 'pausing'\n";
#endif
#ifndef NDEBUG
std::size_t size = m_connections.size();
#endif
p.disconnect();
assert(m_connections.size() <= size);
2004-01-20 12:01:50 +01:00
}
}
2004-01-13 04:08:59 +01:00
2005-06-11 01:12:50 +02:00
// called when torrent is finished (all interested pieces downloaded)
void torrent::finished()
2004-01-20 12:01:50 +01:00
{
INVARIANT_CHECK;
if (alerts().should_post(alert::info))
{
alerts().post_alert(torrent_finished_alert(
get_handle()
, "torrent has finished downloading"));
}
// disconnect all seeds
// TODO: should disconnect all peers that have the pieces we have
// not just seeds
std::vector<peer_connection*> seeds;
2004-01-20 12:01:50 +01:00
for (peer_iterator i = m_connections.begin();
2005-06-11 01:12:50 +02:00
i != m_connections.end(); ++i)
2004-01-20 12:01:50 +01:00
{
assert(i->second->associated_torrent().lock().get() == this);
2004-01-20 12:01:50 +01:00
if (i->second->is_seed())
{
#if defined(TORRENT_VERBOSE_LOGGING)
(*i->second->m_logger) << "*** SEED, CLOSING CONNECTION\n";
#endif
seeds.push_back(i->second);
}
}
std::for_each(seeds.begin(), seeds.end()
, bind(&peer_connection::disconnect, _1));
2005-03-10 12:26:55 +01:00
m_storage->release_files();
2005-06-11 01:12:50 +02:00
}
// called when torrent is complete (all pieces downloaded)
void torrent::completed()
{
INVARIANT_CHECK;
// make the next tracker request
// be a completed-event
m_event = tracker_request::completed;
force_tracker_request();
}
2004-09-12 12:12:16 +02:00
// this will move the tracker with the given index
// to a prioritized position in the list (move it towards
// the begining) and return the new index to the tracker.
int torrent::prioritize_tracker(int index)
{
INVARIANT_CHECK;
2004-09-12 12:12:16 +02:00
assert(index >= 0);
if (index >= (int)m_trackers.size()) return (int)m_trackers.size()-1;
while (index > 0 && m_trackers[index].tier == m_trackers[index-1].tier)
{
std::swap(m_trackers[index].url, m_trackers[index-1].url);
--index;
}
return index;
}
void torrent::try_next_tracker()
{
INVARIANT_CHECK;
2004-10-10 02:42:48 +02:00
using namespace boost::posix_time;
++m_currently_trying_tracker;
2004-09-12 12:12:16 +02:00
if ((unsigned)m_currently_trying_tracker >= m_trackers.size())
{
int delay = tracker_retry_delay_min
+ std::min(m_failed_trackers, (int)tracker_failed_max)
* (tracker_retry_delay_max - tracker_retry_delay_min)
/ tracker_failed_max;
++m_failed_trackers;
// if we've looped the tracker list, wait a bit before retrying
m_currently_trying_tracker = 0;
2004-10-14 03:17:04 +02:00
m_next_request = second_clock::universal_time() + seconds(delay);
}
else
{
// don't delay before trying the next tracker
2004-10-14 03:17:04 +02:00
m_next_request = second_clock::universal_time();
}
2004-09-12 12:12:16 +02:00
}
bool torrent::check_fastresume(aux::piece_checker_data& data)
{
INVARIANT_CHECK;
if (!m_storage.get())
{
// this means we have received the metadata through the
// metadata extension, and we have to initialize
init();
}
assert(m_storage.get());
bool done = true;
try
{
done = m_storage->check_fastresume(data, m_have_pieces, m_num_pieces
, m_compact_mode);
}
catch (std::exception& e)
{
// probably means file permission failure or invalid filename
std::fill(m_have_pieces.begin(), m_have_pieces.end(), false);
m_num_pieces = 0;
if (m_ses.m_alerts.should_post(alert::fatal))
{
m_ses.m_alerts.post_alert(
file_error_alert(
get_handle()
, e.what()));
}
pause();
}
#ifndef NDEBUG
m_initial_done = boost::get<0>(bytes_done());
#endif
return done;
}
std::pair<bool, float> torrent::check_files()
{
INVARIANT_CHECK;
assert(m_storage.get());
std::pair<bool, float> progress(true, 1.f);
try
{
progress = m_storage->check_files(m_have_pieces, m_num_pieces);
}
catch (std::exception& e)
{
// probably means file permission failure or invalid filename
std::fill(m_have_pieces.begin(), m_have_pieces.end(), false);
m_num_pieces = 0;
if (m_ses.m_alerts.should_post(alert::fatal))
{
m_ses.m_alerts.post_alert(
file_error_alert(
get_handle()
, e.what()));
}
pause();
}
#ifndef NDEBUG
m_initial_done = boost::get<0>(bytes_done());
#endif
return progress;
}
void torrent::files_checked(std::vector<piece_picker::downloading_piece> const&
unfinished_pieces)
{
session_impl::mutex_t::scoped_lock l(m_ses.m_mutex);
INVARIANT_CHECK;
2003-12-07 16:03:06 +01:00
if (!is_seed())
{
m_picker->files_checked(m_have_pieces, unfinished_pieces);
}
else
{
m_picker.reset();
}
if (!m_connections_initialized)
{
m_connections_initialized = true;
// all peer connections have to initialize themselves now that the metadata
// is available
typedef std::map<tcp::endpoint, peer_connection*> conn_map;
for (conn_map::iterator i = m_connections.begin()
, end(m_connections.end()); i != end;)
{
try
{
i->second->init();
i->second->on_metadata();
++i;
}
catch (std::exception& e)
{
// the connection failed, close it
conn_map::iterator j = i;
++j;
m_ses.connection_failed(i->second->get_socket()
, i->first, e.what());
i = j;
}
}
}
#ifndef NDEBUG
m_initial_done = boost::get<0>(bytes_done());
#endif
}
2004-01-07 01:48:02 +01:00
alert_manager& torrent::alerts() const
{
return m_ses.m_alerts;
}
boost::filesystem::path torrent::save_path() const
{
2004-10-10 02:42:48 +02:00
return m_save_path;
}
2004-07-18 02:39:58 +02:00
bool torrent::move_storage(boost::filesystem::path const& save_path)
{
INVARIANT_CHECK;
2005-01-10 12:14:22 +01:00
bool ret = true;
if (m_storage.get())
{
ret = m_storage->move_storage(save_path);
m_save_path = m_storage->save_path();
}
else
{
m_save_path = save_path;
}
2004-11-18 23:33:50 +01:00
return ret;
2004-07-18 02:39:58 +02:00
}
piece_manager& torrent::filesystem()
{
INVARIANT_CHECK;
assert(m_storage.get());
return *m_storage;
}
2004-01-07 01:48:02 +01:00
torrent_handle torrent::get_handle() const
{
INVARIANT_CHECK;
return torrent_handle(&m_ses, &m_checker, m_torrent_file.info_hash());
2004-01-07 01:48:02 +01:00
}
session_settings const& torrent::settings() const
{
// INVARIANT_CHECK;
return m_ses.settings();
}
2004-01-07 01:48:02 +01:00
2003-12-14 06:56:12 +01:00
#ifndef NDEBUG
void torrent::check_invariant() const
2003-12-14 06:56:12 +01:00
{
// size_type download = m_stat.total_payload_download();
// size_type done = boost::get<0>(bytes_done());
// assert(download >= done - m_initial_done);
for (const_peer_iterator i = begin(); i != end(); ++i)
{
peer_connection const& p = *i->second;
torrent* associated_torrent = p.associated_torrent().lock().get();
if (associated_torrent != this)
assert(false);
}
if (valid_metadata())
{
assert(int(m_have_pieces.size()) == m_torrent_file.num_pieces());
}
else
{
assert(m_have_pieces.empty());
}
size_type total_done = quantized_bytes_done();
if (is_seed())
assert(total_done == m_torrent_file.total_size());
else
assert(total_done != m_torrent_file.total_size());
// This check is very expensive.
assert(m_num_pieces
== std::count(m_have_pieces.begin(), m_have_pieces.end(), true));
2004-01-25 13:37:15 +01:00
assert(m_priority >= 0.f && m_priority < 1.f);
assert(!valid_metadata() || m_block_size > 0);
assert(!valid_metadata() || (m_torrent_file.piece_length() % m_block_size) == 0);
// if (is_seed()) assert(m_picker.get() == 0);
2003-12-14 06:56:12 +01:00
}
#endif
void torrent::set_sequenced_download_threshold(int threshold)
{
// TODO: if there is not valid metadata, save this setting and
// set it once the piece picker is created.
if (valid_metadata() && !is_seed())
picker().set_sequenced_download_threshold(threshold);
}
2004-10-29 15:21:09 +02:00
void torrent::set_max_uploads(int limit)
{
assert(limit >= -1);
if (limit == -1) limit = std::numeric_limits<int>::max();
m_uploads_quota.max = std::max(m_uploads_quota.min, limit);
}
void torrent::set_max_connections(int limit)
{
assert(limit >= -1);
if (limit == -1) limit = std::numeric_limits<int>::max();
m_connections_quota.max = std::max(m_connections_quota.min, limit);
}
void torrent::set_peer_upload_limit(tcp::endpoint ip, int limit)
{
assert(limit >= -1);
peer_connection* p = connection_for(ip);
if (p == 0) return;
p->set_upload_limit(limit);
}
void torrent::set_peer_download_limit(tcp::endpoint ip, int limit)
{
assert(limit >= -1);
peer_connection* p = connection_for(ip);
if (p == 0) return;
p->set_download_limit(limit);
}
2004-07-24 13:54:17 +02:00
void torrent::set_upload_limit(int limit)
{
assert(limit >= -1);
if (limit == -1) limit = std::numeric_limits<int>::max();
if (limit < num_peers() * 10) limit = num_peers() * 10;
m_upload_bandwidth_limit = limit;
}
void torrent::set_download_limit(int limit)
{
assert(limit >= -1);
if (limit == -1) limit = std::numeric_limits<int>::max();
if (limit < num_peers() * 10) limit = num_peers() * 10;
m_download_bandwidth_limit = limit;
}
2004-03-21 03:03:37 +01:00
void torrent::pause()
{
INVARIANT_CHECK;
2004-12-21 13:30:09 +01:00
if (m_paused) return;
#ifndef TORRENT_DISABLE_EXTENSIONS
for (extension_list_t::iterator i = m_extensions.begin()
, end(m_extensions.end()); i != end; ++i)
{
try { if ((*i)->on_pause()) return; } catch (std::exception&) {}
}
#endif
2004-03-21 03:03:37 +01:00
disconnect_all();
m_paused = true;
2004-07-24 13:54:17 +02:00
// tell the tracker that we stopped
m_event = tracker_request::stopped;
m_just_paused = true;
2005-03-05 15:17:17 +01:00
// this will make the storage close all
// files and flush all cached data
2005-03-10 12:26:55 +01:00
if (m_storage.get()) m_storage->release_files();
2004-03-21 03:03:37 +01:00
}
void torrent::resume()
{
INVARIANT_CHECK;
2004-12-21 13:30:09 +01:00
if (!m_paused) return;
#ifndef TORRENT_DISABLE_EXTENSIONS
for (extension_list_t::iterator i = m_extensions.begin()
, end(m_extensions.end()); i != end; ++i)
{
try { if ((*i)->on_resume()) return; } catch (std::exception&) {}
}
#endif
2004-03-21 03:03:37 +01:00
m_paused = false;
2004-07-24 13:54:17 +02:00
// tell the tracker that we're back
m_event = tracker_request::started;
force_tracker_request();
2004-03-21 03:03:37 +01:00
// make pulse be called as soon as possible
m_time_scaler = 0;
}
void torrent::second_tick(stat& accumulator, float tick_interval)
{
INVARIANT_CHECK;
2004-10-29 15:21:09 +02:00
m_connections_quota.used = (int)m_connections.size();
m_uploads_quota.used = m_policy->num_uploads();
2003-12-07 06:53:04 +01:00
2004-03-28 19:45:37 +02:00
m_ul_bandwidth_quota.used = 0;
m_ul_bandwidth_quota.max = 0;
m_ul_bandwidth_quota.min = 0;
m_dl_bandwidth_quota.used = 0;
m_dl_bandwidth_quota.min = 0;
m_dl_bandwidth_quota.max = 0;
2004-03-23 23:58:18 +01:00
#ifndef TORRENT_DISABLE_EXTENSIONS
for (extension_list_t::iterator i = m_extensions.begin()
, end(m_extensions.end()); i != end; ++i)
{
try { (*i)->tick(); } catch (std::exception&) {}
}
#endif
if (m_paused)
{
// let the stats fade out to 0
m_stat.second_tick(tick_interval);
m_web_stat.second_tick(tick_interval);
return;
}
// ---- WEB SEEDS ----
// if we're a seed, we don't need to connect to any web-seed
if (!is_seed() && !m_web_seeds.empty())
{
// keep trying web-seeds if there are any
// first find out which web seeds we are connected to
std::set<std::string> web_seeds;
for (peer_iterator i = m_connections.begin();
i != m_connections.end(); ++i)
{
web_peer_connection* p
= dynamic_cast<web_peer_connection*>(i->second);
if (!p) continue;
web_seeds.insert(p->url());
}
for (std::set<std::string>::iterator i = m_resolving_web_seeds.begin()
, end(m_resolving_web_seeds.end()); i != end; ++i)
web_seeds.insert(web_seeds.begin(), *i);
// from the list of available web seeds, subtract the ones we are
// already connected to.
std::vector<std::string> not_connected_web_seeds;
std::set_difference(m_web_seeds.begin(), m_web_seeds.end(), web_seeds.begin()
, web_seeds.end(), std::back_inserter(not_connected_web_seeds));
// connect to all of those that we aren't connected to
std::for_each(not_connected_web_seeds.begin(), not_connected_web_seeds.end()
, bind(&torrent::connect_to_url_seed, this, _1));
}
2004-01-13 04:08:59 +01:00
for (peer_iterator i = m_connections.begin();
i != m_connections.end(); ++i)
2003-12-07 06:53:04 +01:00
{
2004-01-13 04:08:59 +01:00
peer_connection* p = i->second;
2004-03-23 23:58:18 +01:00
m_stat += p->statistics();
if (dynamic_cast<web_peer_connection*>(p))
{
m_web_stat += p->statistics();
}
2004-03-28 19:45:37 +02:00
// updates the peer connection's ul/dl bandwidth
// resource requests
p->second_tick(tick_interval);
2004-03-28 19:45:37 +02:00
m_ul_bandwidth_quota.used += p->m_ul_bandwidth_quota.used;
m_ul_bandwidth_quota.min += p->m_ul_bandwidth_quota.min;
m_dl_bandwidth_quota.used += p->m_dl_bandwidth_quota.used;
m_dl_bandwidth_quota.min += p->m_dl_bandwidth_quota.min;
m_ul_bandwidth_quota.max = saturated_add(
m_ul_bandwidth_quota.max
, p->m_ul_bandwidth_quota.max);
m_dl_bandwidth_quota.max = saturated_add(
m_dl_bandwidth_quota.max
, p->m_dl_bandwidth_quota.max);
2003-12-07 06:53:04 +01:00
}
2004-03-28 19:45:37 +02:00
m_ul_bandwidth_quota.max
= std::min(m_ul_bandwidth_quota.max, m_upload_bandwidth_limit);
2005-08-23 11:59:56 +02:00
if (m_upload_bandwidth_limit == resource_request::inf)
m_ul_bandwidth_quota.max = resource_request::inf;
2004-03-28 19:45:37 +02:00
m_dl_bandwidth_quota.max
= std::min(m_dl_bandwidth_quota.max, m_download_bandwidth_limit);
2004-03-23 23:58:18 +01:00
2005-08-23 11:59:56 +02:00
if (m_download_bandwidth_limit == resource_request::inf)
m_dl_bandwidth_quota.max = resource_request::inf;
2004-04-18 15:41:08 +02:00
accumulator += m_stat;
m_stat.second_tick(tick_interval);
m_web_stat.second_tick(tick_interval);
2003-12-07 06:53:04 +01:00
}
void torrent::distribute_resources(float tick_interval)
2004-03-23 23:58:18 +01:00
{
INVARIANT_CHECK;
2004-10-29 15:21:09 +02:00
m_time_scaler--;
if (m_time_scaler <= 0)
{
m_time_scaler = 10;
m_policy->pulse();
}
assert(m_ul_bandwidth_quota.given >= 0);
assert(m_dl_bandwidth_quota.given >= 0);
int ul_used = 0;
int dl_used = 0;
#ifdef TORRENT_LOGGING
int ul_max = 0;
int dl_max = 0;
#endif
for (peer_iterator i = m_connections.begin();
i != m_connections.end(); ++i)
{
peer_connection* p = i->second;
// the bandwidth exceeding the given amount is accumulated to
// the next timeslice, don't take it into account now as well!
// (that would lead to a spiral of accumulating used-values)
ul_used += std::min(p->m_ul_bandwidth_quota.used, p->m_ul_bandwidth_quota.given);
dl_used += std::min(p->m_dl_bandwidth_quota.used, p->m_dl_bandwidth_quota.given);
#ifdef TORRENT_LOGGING
ul_max = saturated_add(ul_max, p->m_ul_bandwidth_quota.max);
dl_max = saturated_add(dl_max, p->m_dl_bandwidth_quota.max);
#endif
}
m_excess_ul += ul_used - m_ul_bandwidth_quota.given;
m_excess_dl += dl_used - m_dl_bandwidth_quota.given;
m_excess_ul = std::max(m_excess_ul, -10000);
m_excess_dl = std::max(m_excess_dl, -10000);
int ul_to_distribute = std::max(int((m_ul_bandwidth_quota.given
- m_excess_ul * 0.7f) * 1.6f), 0);
int dl_to_distribute = std::max(int((m_dl_bandwidth_quota.given
- m_excess_dl * 0.7f) * 1.6f), 0);
m_soft_ul_limit = int(m_soft_ul_limit + (ul_to_distribute - m_soft_ul_limit) * 0.1f);
m_soft_dl_limit = int(m_soft_dl_limit + (dl_to_distribute - m_soft_dl_limit) * 0.1f);
2006-11-24 17:59:47 +01:00
ul_to_distribute = m_soft_ul_limit;
dl_to_distribute = m_soft_dl_limit;
#ifdef TORRENT_LOGGING
std::copy(m_ul_history + 1, m_ul_history + debug_bw_history_size, m_ul_history);
m_ul_history[debug_bw_history_size-1] = ul_used;
std::copy(m_dl_history + 1, m_dl_history + debug_bw_history_size, m_dl_history);
m_dl_history[debug_bw_history_size-1] = dl_used;
size_type mean_ul = 0;
size_type mean_dl = 0;
for (int i = 0; i < debug_bw_history_size; ++i)
{
mean_ul += m_ul_history[i];
mean_dl += m_dl_history[i];
}
mean_ul /= debug_bw_history_size;
mean_dl /= debug_bw_history_size;
int ul_leftovers = 0;
int dl_leftovers = 0;
for (peer_iterator i = m_connections.begin();
i != m_connections.end(); ++i)
{
ul_leftovers += i->second->m_ul_bandwidth_quota.leftovers;
dl_leftovers += i->second->m_dl_bandwidth_quota.leftovers;
}
(*m_log)
<< ul_used << "\t"
<< mean_ul << "\t"
<< dl_used << "\t"
<< mean_dl << "\t"
<< m_stat.total_payload_download() << "\t"
<< m_web_stat.total_payload_download() << "\t"
<< m_total_redundant_bytes
<< "\n";
(*m_log)
<< m_second_count++ << "\t"
<< m_ul_bandwidth_quota.given << "\t"
<< ul_to_distribute << "\t"
<< m_excess_ul << "\t"
<< ul_leftovers << "\t"
<< m_dl_bandwidth_quota.given << "\t"
<< dl_to_distribute << "\t"
<< m_excess_dl << "\t"
<< dl_leftovers << "\t"
<< num_peers() << "\t"
<< ul_max << "\t"
<< dl_max << "\t";
(*m_peer_log) << m_second_count << "\t";
for (peer_iterator i = m_connections.begin();
i != m_connections.end(); ++i)
{
int given = i->second->m_dl_bandwidth_quota.given;
(*m_peer_log) << (given == resource_request::inf ? -1 : given)
<< "\t" << i->second->m_dl_bandwidth_quota.used << "\t";
}
for (int i = m_connections.size(); i < 10; ++i)
{
(*m_peer_log) << 0 << "\t" << 0 << "\t";
}
(*m_peer_log) << "\n";
#endif
2004-03-28 19:45:37 +02:00
// distribute allowed upload among the peers
allocate_resources(ul_to_distribute
2004-03-28 19:45:37 +02:00
, m_connections
, &peer_connection::m_ul_bandwidth_quota);
// distribute allowed download among the peers
allocate_resources(dl_to_distribute
2004-03-23 23:58:18 +01:00
, m_connections
2004-03-28 19:45:37 +02:00
, &peer_connection::m_dl_bandwidth_quota);
2005-08-23 11:59:56 +02:00
2004-10-29 15:21:09 +02:00
using boost::bind;
2004-03-28 19:45:37 +02:00
// tell all peers to reset their used quota. This is
// a new second and they can again use up their quota
2004-10-29 15:21:09 +02:00
for (std::map<tcp::endpoint, peer_connection*>::iterator i
= m_connections.begin(); i != m_connections.end(); ++i)
2004-03-23 23:58:18 +01:00
{
i->second->reset_upload_quota();
}
}
2003-12-07 06:53:04 +01:00
bool torrent::verify_piece(int piece_index)
{
2006-12-31 15:48:18 +01:00
// INVARIANT_CHECK;
assert(m_storage.get());
2004-01-25 13:37:15 +01:00
assert(piece_index >= 0);
assert(piece_index < m_torrent_file.num_pieces());
2005-02-23 09:57:54 +01:00
assert(piece_index < (int)m_have_pieces.size());
2004-01-25 13:37:15 +01:00
2004-03-07 21:50:56 +01:00
int size = static_cast<int>(m_torrent_file.piece_size(piece_index));
2003-12-07 06:53:04 +01:00
std::vector<char> buffer(size);
2003-12-17 17:37:20 +01:00
assert(size > 0);
m_storage->read(&buffer[0], piece_index, 0, size);
2003-12-07 06:53:04 +01:00
hasher h;
h.update(&buffer[0], size);
sha1_hash digest = h.final();
if (m_torrent_file.hash_for_piece(piece_index) != digest)
return false;
return true;
}
const tcp::endpoint& torrent::current_tracker() const
2004-03-01 01:50:00 +01:00
{
return m_tracker_address;
}
bool torrent::is_allocating() const
{ return m_storage.get() && m_storage->is_allocating(); }
void torrent::file_progress(std::vector<float>& fp) const
{
assert(valid_metadata());
fp.clear();
fp.resize(m_torrent_file.num_files(), 0.f);
for (int i = 0; i < m_torrent_file.num_files(); ++i)
{
peer_request ret = m_torrent_file.map_file(i, 0, 0);
size_type size = m_torrent_file.file_at(i).size;
// zero sized files are considered
// 100% done all the time
if (size == 0)
{
fp[i] = 1.f;
continue;
}
size_type done = 0;
while (size > 0)
{
size_type bytes_step = std::min(m_torrent_file.piece_size(ret.piece)
- ret.start, size);
if (m_have_pieces[ret.piece]) done += bytes_step;
++ret.piece;
ret.start = 0;
size -= bytes_step;
}
assert(size == 0);
fp[i] = static_cast<float>(done) / m_torrent_file.file_at(i).size;
}
}
2003-10-31 05:02:51 +01:00
torrent_status torrent::status() const
2003-10-26 18:35:23 +01:00
{
INVARIANT_CHECK;
2004-01-15 17:45:34 +01:00
assert(std::accumulate(
m_have_pieces.begin()
, m_have_pieces.end()
, 0) == m_num_pieces);
2003-10-30 00:28:09 +01:00
2004-01-15 17:45:34 +01:00
torrent_status st;
2003-10-30 00:28:09 +01:00
st.num_peers = (int)std::count_if(m_connections.begin(), m_connections.end(),
boost::bind<bool>(std::logical_not<bool>(), boost::bind(&peer_connection::is_connecting,
boost::bind(&std::map<tcp::endpoint,peer_connection*>::value_type::second, _1))));
2005-02-23 21:38:29 +01:00
st.num_complete = m_complete;
st.num_incomplete = m_incomplete;
2004-03-21 03:03:37 +01:00
st.paused = m_paused;
2005-05-30 19:43:03 +02:00
boost::tie(st.total_done, st.total_wanted_done) = bytes_done();
2003-10-31 13:07:07 +01:00
2003-12-22 08:14:35 +01:00
// payload transfer
st.total_payload_download = m_stat.total_payload_download();
st.total_payload_upload = m_stat.total_payload_upload();
// total transfer
st.total_download = m_stat.total_payload_download()
+ m_stat.total_protocol_download();
st.total_upload = m_stat.total_payload_upload()
+ m_stat.total_protocol_upload();
2004-04-18 14:28:02 +02:00
// failed bytes
st.total_failed_bytes = m_total_failed_bytes;
st.total_redundant_bytes = m_total_redundant_bytes;
2004-04-18 14:28:02 +02:00
2003-12-22 08:14:35 +01:00
// transfer rate
2003-12-07 06:53:04 +01:00
st.download_rate = m_stat.download_rate();
st.upload_rate = m_stat.upload_rate();
2004-04-18 14:28:02 +02:00
st.download_payload_rate = m_stat.download_payload_rate();
st.upload_payload_rate = m_stat.upload_payload_rate();
2003-12-22 08:14:35 +01:00
2003-11-05 00:27:06 +01:00
st.next_announce = next_announce()
2004-10-14 03:17:04 +02:00
- second_clock::universal_time();
2004-03-21 03:03:37 +01:00
if (st.next_announce.is_negative()) st.next_announce
= boost::posix_time::seconds(0);
2004-01-17 21:04:19 +01:00
st.announce_interval = boost::posix_time::seconds(m_duration);
2005-03-11 18:21:56 +01:00
if (m_last_working_tracker >= 0)
{
st.current_tracker
= m_trackers[m_last_working_tracker].url;
}
// if we don't have any metadata, stop here
if (!valid_metadata())
{
if (m_got_tracker_response == false)
st.state = torrent_status::connecting_to_tracker;
else
st.state = torrent_status::downloading_metadata;
2004-09-16 03:14:16 +02:00
// TODO: add a progress member to the torrent that will be used in this case
// and that may be set by a plugin
// if (m_metadata_size == 0) st.progress = 0.f;
// else st.progress = std::min(1.f, m_metadata_progress / (float)m_metadata_size);
st.progress = 0.f;
2004-09-16 03:14:16 +02:00
st.block_size = 0;
return st;
}
st.block_size = block_size();
// fill in status that depends on metadata
2005-05-30 19:43:03 +02:00
st.total_wanted = m_torrent_file.total_size();
if (m_picker.get() && (m_picker->num_filtered() > 0
|| m_picker->num_have_filtered() > 0))
{
int filtered_pieces = m_picker->num_filtered()
+ m_picker->num_have_filtered();
int last_piece_index = m_torrent_file.num_pieces() - 1;
if (m_picker->is_filtered(last_piece_index))
{
st.total_wanted -= m_torrent_file.piece_size(last_piece_index);
--filtered_pieces;
}
st.total_wanted -= filtered_pieces * m_torrent_file.piece_length();
}
assert(st.total_wanted >= st.total_wanted_done);
if (st.total_wanted == 0) st.progress = 1.f;
else st.progress = st.total_wanted_done
/ static_cast<double>(st.total_wanted);
2004-01-15 02:01:09 +01:00
2004-01-15 17:45:34 +01:00
st.pieces = &m_have_pieces;
st.num_pieces = m_num_pieces;
2003-12-22 08:14:35 +01:00
if (m_got_tracker_response == false)
{
2003-12-22 08:14:35 +01:00
st.state = torrent_status::connecting_to_tracker;
}
else if (is_seed())
{
assert(st.total_done == m_torrent_file.total_size());
2003-10-31 05:02:51 +01:00
st.state = torrent_status::seeding;
}
2005-05-30 19:43:03 +02:00
else if (st.total_wanted_done == st.total_wanted)
{
assert(st.total_done != m_torrent_file.total_size());
2005-05-30 19:43:03 +02:00
st.state = torrent_status::finished;
}
2003-10-31 05:02:51 +01:00
else
{
2003-10-31 05:02:51 +01:00
st.state = torrent_status::downloading;
}
2003-10-31 05:02:51 +01:00
st.num_seeds = num_seeds();
if (m_picker.get())
st.distributed_copies = m_picker->distributed_copies();
else
st.distributed_copies = -1;
2003-10-31 05:02:51 +01:00
return st;
2003-10-26 18:35:23 +01:00
}
int torrent::num_seeds() const
{
INVARIANT_CHECK;
2004-10-18 00:23:08 +02:00
return (int)std::count_if(m_connections.begin(), m_connections.end(),
boost::bind(&peer_connection::is_seed,
boost::bind(&std::map<tcp::endpoint
,peer_connection*>::value_type::second, _1)));
2004-09-12 15:53:00 +02:00
}
void torrent::tracker_request_timed_out(
tracker_request const&)
2003-12-22 08:14:35 +01:00
{
session_impl::mutex_t::scoped_lock l(m_ses.m_mutex);
INVARIANT_CHECK;
2005-07-06 15:18:10 +02:00
#if defined(TORRENT_VERBOSE_LOGGING) || defined(TORRENT_LOGGING)
2003-12-22 08:14:35 +01:00
debug_log("*** tracker timed out");
#endif
2003-12-22 08:14:35 +01:00
if (m_ses.m_alerts.should_post(alert::warning))
{
std::stringstream s;
s << "tracker: \""
2004-09-12 12:12:16 +02:00
<< m_trackers[m_currently_trying_tracker].url
2003-12-22 08:14:35 +01:00
<< "\" timed out";
2004-09-12 12:12:16 +02:00
m_ses.m_alerts.post_alert(tracker_alert(get_handle()
, m_failed_trackers + 1, 0, s.str()));
2003-12-22 08:14:35 +01:00
}
try_next_tracker();
}
2004-01-26 11:29:00 +01:00
// TODO: with some response codes, we should just consider
2003-12-22 08:14:35 +01:00
// the tracker as a failure and not retry
// it anymore
2005-04-24 02:50:52 +02:00
void torrent::tracker_request_error(tracker_request const&
, int response_code, const std::string& str)
2003-12-22 08:14:35 +01:00
{
INVARIANT_CHECK;
session_impl::mutex_t::scoped_lock l(m_ses.m_mutex);
2005-07-06 15:18:10 +02:00
#if defined(TORRENT_VERBOSE_LOGGING) || defined(TORRENT_LOGGING)
2003-12-22 08:14:35 +01:00
debug_log(std::string("*** tracker error: ") + str);
#endif
if (m_ses.m_alerts.should_post(alert::warning))
{
std::stringstream s;
s << "tracker: \""
2004-09-12 12:12:16 +02:00
<< m_trackers[m_currently_trying_tracker].url
2003-12-22 08:14:35 +01:00
<< "\" " << str;
2004-09-12 12:12:16 +02:00
m_ses.m_alerts.post_alert(tracker_alert(get_handle()
, m_failed_trackers + 1, response_code, s.str()));
2003-12-22 08:14:35 +01:00
}
try_next_tracker();
}
2005-07-06 15:18:10 +02:00
#if defined(TORRENT_VERBOSE_LOGGING) || defined(TORRENT_LOGGING)
void torrent::debug_log(const std::string& line)
{
2003-12-07 06:53:04 +01:00
(*m_ses.m_logger) << line << "\n";
}
#endif
2005-06-15 14:54:35 +02:00
}