diff --git a/docs/todo.html b/docs/todo.html index e9f787c65..2c8d539cf 100644 --- a/docs/todo.html +++ b/docs/todo.html @@ -127,60 +127,7 @@ return m_connections.size() < m_max_connections && !is_paused() && ((m_state != torrent_status::checking_files -relevance 3src/torrent.cpp:8718with 110 as response codes, we should just consider the tracker as a failure and not retry it anymore

with 110 as response codes, we should just consider -the tracker as a failure and not retry -it anymore

src/torrent.cpp:8718

		TORRENT_ASSERT(m_ses.is_network_thread());
-		TORRENT_ASSERT(b > 0);
-		m_total_failed_bytes += b;
-		m_ses.add_failed_bytes(b);
-//		TORRENT_ASSERT(m_total_redundant_bytes + m_total_failed_bytes
-//			<= m_stat.total_payload_download());
-	}
-
-	int torrent::num_seeds() const
-	{
-		TORRENT_ASSERT(m_ses.is_network_thread());
-		INVARIANT_CHECK;
-
-		int ret = 0;
-		for (std::set<peer_connection*>::const_iterator i = m_connections.begin()
-			, end(m_connections.end()); i != end; ++i)
-			if ((*i)->is_seed()) ++ret;
-		return ret;
-	}
-
-
void torrent::tracker_request_error(tracker_request const& r -
, int response_code, error_code const& ec, const std::string& msg - , int retry_interval) - { - TORRENT_ASSERT(m_ses.is_network_thread()); - - INVARIANT_CHECK; - -#if defined TORRENT_VERBOSE_LOGGING || defined TORRENT_LOGGING || defined TORRENT_ERROR_LOGGING - debug_log("*** tracker error: (%d) %s %s", ec.value(), ec.message().c_str(), msg.c_str()); -#endif - if (r.kind == tracker_request::announce_request) - { - announce_entry* ae = find_tracker(r); - if (ae) - { - ae->failed(settings(), retry_interval); - ae->last_error = ec; - ae->message = msg; - int tracker_index = ae - &m_trackers[0]; -#if defined TORRENT_VERBOSE_LOGGING || defined TORRENT_LOGGING || defined TORRENT_ERROR_LOGGING - debug_log("*** increment tracker fail count [%d]", ae->fails); -#endif - deprioritize_tracker(tracker_index); - } - if (m_ses.m_alerts.should_post<tracker_error_alert>()) - { - m_ses.m_alerts.post_alert(tracker_error_alert(get_handle() - , ae?ae->fails:0, response_code, r.url, ec, msg)); - } - } -
relevance 3src/utp_stream.cpp:412remove the read timeout concept. This should not be necessary

remove the read timeout concept. This should not be necessary

src/utp_stream.cpp:412

	// these are the callbacks made into the utp_stream object
+
relevance 3src/utp_stream.cpp:412remove the read timeout concept. This should not be necessary

remove the read timeout concept. This should not be necessary

src/utp_stream.cpp:412

	// these are the callbacks made into the utp_stream object
 	// on read/write/connect events
 	utp_stream::handler_t m_read_handler;
 	utp_stream::handler_t m_write_handler;
@@ -202,9 +149,7 @@ it anymore

src/torrent.cpp:8718

	ptime m_read_timeout;
 
-
relevance 3src/utp_stream.cpp:415remove the write timeout concept. This should not be necessary

remove the write timeout concept. This should not be necessary

src/utp_stream.cpp:415

	// these are the callbacks made into the utp_stream object
-	// on read/write/connect events
-	utp_stream::handler_t m_read_handler;
+
relevance 3src/utp_stream.cpp:415remove the write timeout concept. This should not be necessary

remove the write timeout concept. This should not be necessary

src/utp_stream.cpp:415

	utp_stream::handler_t m_read_handler;
 	utp_stream::handler_t m_write_handler;
 	utp_stream::connect_handler_t m_connect_handler;
 
@@ -222,6 +167,8 @@ it anymore

src/torrent.cpp:8718

	ptime m_write_timeout;
 
 	// the time when the last packet we sent times out. Including re-sends.
@@ -253,6 +200,58 @@ it anymore

src/torrent.cpp:8718

relevance 3src/utp_stream.cpp:1685this alloca() statement won't necessariky produce correctly aligned memory. do something about that

this alloca() statement won't necessariky produce +correctly aligned memory. do something about that

src/utp_stream.cpp:1685

	bool stack_alloced = false;
+#endif
+
+	// payload size being zero means we're just sending
+	// an force. We should not pick up the nagle packet
+	if (!m_nagle_packet || (payload_size == 0 && force))
+	{
+		// we only need a heap allocation if we have payload and
+		// need to keep the packet around (in the outbuf)
+		if (payload_size) 
+		{
+			p = (packet*)malloc(sizeof(packet) + m_mtu);
+			p->allocated = m_mtu;
+		}
+		else
+		{
+#ifdef TORRENT_DEBUG
+			stack_alloced = true;
+#endif
+			TORRENT_ASSERT(force);
+
p = (packet*)TORRENT_ALLOCA(char, sizeof(packet) + packet_size); +
UTP_LOGV("%8p: allocating %d bytes on the stack\n", this, packet_size); + p->allocated = packet_size; + } + + p->size = packet_size; + p->header_size = packet_size - payload_size; + p->num_transmissions = 0; + p->need_resend = false; + ptr = p->buf; + h = (utp_header*)ptr; + ptr += sizeof(utp_header); + + h->extension = sack ? 1 : 0; + h->connection_id = m_send_id; + // seq_nr is ignored for ST_STATE packets, so it doesn't + // matter that we say this is a sequence number we haven't + // actually sent yet + h->seq_nr = m_seq_nr; + h->type_ver = ((payload_size ? ST_DATA : ST_STATE) << 4) | 1; + + write_payload(p->buf + p->header_size, payload_size); + } + else + { + // pick up the nagle packet and keep adding bytes to it + p = m_nagle_packet; + + ptr = p->buf + sizeof(utp_header); + h = (utp_header*)p->buf; + TORRENT_ASSERT(h->seq_nr == m_seq_nr);
relevance 3src/kademlia/rpc_manager.cpp:36remove this dependency by having the dht observer have its own flags

remove this dependency by having the dht observer have its own flags

src/kademlia/rpc_manager.cpp:36

      contributors may be used to endorse or promote products derived
       from this software without specific prior written permission.
@@ -608,9 +607,9 @@ private:
 #if defined TORRENT_VERBOSE_LOGGING || defined TORRENT_LOGGING || defined TORRENT_ERROR_LOGGING
 int socket_impl_size() { return sizeof(utp_socket_impl); }
 #endif
-
relevance 2src/utp_stream.cpp:1846we might want to do something else here as well, to resend the packet immediately without it being an MTU probe

we might want to do something else here +

relevance 2src/utp_stream.cpp:1848we might want to do something else here as well, to resend the packet immediately without it being an MTU probe

we might want to do something else here as well, to resend the packet immediately without -it being an MTU probe

src/utp_stream.cpp:1846

		, boost::uint32_t(h->timestamp_difference_microseconds), int(p->mtu_probe)
+it being an MTU probe

src/utp_stream.cpp:1848

		, boost::uint32_t(h->timestamp_difference_microseconds), int(p->mtu_probe)
 		, h->extension);
 #endif
 
@@ -810,7 +809,7 @@ are pushed back and items are popped from the front

include/libtorrent/t // each bit represents a piece. a set bit means // the piece has had its hash verified. This -

relevance 2include/libtorrent/torrent_info.hpp:450these strings (m_comment, m_created_by, m_ssl_root_cert) could be lazy_entry* to save memory

these strings (m_comment, m_created_by, m_ssl_root_cert) could be lazy_entry* to save memory

include/libtorrent/torrent_info.hpp:450

		std::vector<announce_entry> m_urls;
+
relevance 2include/libtorrent/torrent_info.hpp:455these strings (m_comment, m_created_by, m_ssl_root_cert) could be lazy_entry* to save memory

these strings (m_comment, m_created_by, m_ssl_root_cert) could be lazy_entry* to save memory

include/libtorrent/torrent_info.hpp:455

		std::vector<announce_entry> m_urls;
 		std::vector<web_seed_entry> m_web_seeds;
 		nodes_t m_nodes;
 
@@ -1464,26 +1463,26 @@ filenames should be preserved!

src/torrent_info.cpp:366

relevance 1src/torrent_info.cpp:387once the filename renaming is removed from here this check can be removed as well

once the filename renaming is removed from here -this check can be removed as well

src/torrent_info.cpp:387

			}
-			return false;
-		}
-	};
+this check can be removed as well

src/torrent_info.cpp:387

				return false;
 
-	bool extract_files(lazy_entry const& list, file_storage& target
-		, std::string const& root_dir, ptrdiff_t info_ptr_diff)
-	{
-		if (list.type() != lazy_entry::list_t) return false;
-		target.reserve(list.list_size());
-		for (int i = 0, end(list.list_size()); i < end; ++i)
-		{
-			lazy_entry const* file_hash = 0;
-			time_t mtime = 0;
-			file_entry e;
-			lazy_entry const* fee = 0;
-			if (!extract_single_file(*list.list_at(i), e, root_dir
-				, &file_hash, &fee, &mtime))
-				return false;
+			int cnt = 0;
+			std::set<std::string, string_less_no_case> files;
 
+			// as long as this file already exists
+			// increase the counter
+			while (!files.insert(e.path).second)
+			{
+				++cnt;
+				char suffix[50];
+				snprintf(suffix, sizeof(suffix), ".%d%s", cnt, extension(e.path).c_str());
+				replace_extension(e.path, suffix);
+			}
+			target.add_file(e, file_hash ? file_hash->string_ptr() + info_ptr_diff : 0);
+
+			// This is a memory optimization! Instead of having
+			// each entry keep a string for its filename, make it
+			// simply point into the info-section buffer
+			internal_file_entry const& fe = *target.rbegin();
 
if (fee && fe.filename() == fee->string_value())
{ // this string pointer does not necessarily point into @@ -2569,26 +2568,26 @@ connections

src/session_impl.cpp:4423

relevance 0src/session_impl.cpp:4457make this bias configurable

make this bias configurable

src/session_impl.cpp:4457

relevance 0src/session_impl.cpp:4458also take average_peers into account, to create a bias for downloading torrents with < average peers

also take average_peers into account, to create a bias for downloading torrents with < average peers

src/session_impl.cpp:4458

		{
-			if (m_boost_connections > max_connections)
-			{
-				m_boost_connections -= max_connections;
-				max_connections = 0;
-			}
-			else
-			{
-				max_connections -= m_boost_connections;
-				m_boost_connections = 0;
-			}
-		}
+
relevance 0src/session_impl.cpp:4457make this bias configurable

make this bias configurable

src/session_impl.cpp:4457

relevance 0src/session_impl.cpp:4458also take average_peers into account, to create a bias for downloading torrents with < average peers

also take average_peers into account, to create a bias for downloading torrents with < average peers

src/session_impl.cpp:4458

				average_peers = num_downloads_peers / num_downloads;
 
-		// this logic is here to smooth out the number of new connection
-		// attempts over time, to prevent connecting a large number of
-		// sockets, wait 10 seconds, and then try again
-		int limit = (std::min)(m_settings.connections_limit - num_connections(), free_slots);
-		if (m_settings.smooth_connects && max_connections > (limit+1) / 2)
-			max_connections = (limit+1) / 2;
+			if (m_next_connect_torrent == m_torrents.end())
+				m_next_connect_torrent = m_torrents.begin();
 
+			int steps_since_last_connect = 0;
+			int num_torrents = int(m_torrents.size());
+			for (;;)
+			{
+				torrent& t = *m_next_connect_torrent->second;
+				if (t.want_more_peers())
+				{
+					TORRENT_ASSERT(t.allows_peers());
+					// have a bias to give more connection attempts
+					// to downloading torrents than seed, and even
+					// more to downloading torrents with less than
+					// average number of connections
+					int num_attempts = 1;
+					if (!t.is_finished())
+					{
 
TORRENT_ASSERT(m_num_active_downloading > 0);
num_attempts += m_num_active_finished / m_num_active_downloading; } @@ -2653,26 +2652,26 @@ connections

src/session_impl.cpp:4423

relevance 0src/session_impl.cpp:4616make configurable

make configurable

src/session_impl.cpp:4616

-#ifdef TORRENT_DEBUG
-			for (std::vector<peer_connection*>::const_iterator i = peers.begin()
-				, end(peers.end()), prev(peers.end()); i != end; ++i)
-			{
-				if (prev != end)
-				{
-					boost::shared_ptr<torrent> t1 = (*prev)->associated_torrent().lock();
-					TORRENT_ASSERT(t1);
-					boost::shared_ptr<torrent> t2 = (*i)->associated_torrent().lock();
-					TORRENT_ASSERT(t2);
-					TORRENT_ASSERT((*prev)->uploaded_since_unchoke() * 1000
-						* (1 + t1->priority()) / total_milliseconds(unchoke_interval)
-						>= (*i)->uploaded_since_unchoke() * 1000
+
relevance 0src/session_impl.cpp:4616make configurable

make configurable

src/session_impl.cpp:4616

						>= (*i)->uploaded_since_unchoke() * 1000
 						* (1 + t2->priority()) / total_milliseconds(unchoke_interval));
 				}
 				prev = i;
 			}
 #endif
 
+			int rate_threshold = 1024;
+
+			for (std::vector<peer_connection*>::const_iterator i = peers.begin()
+				, end(peers.end()); i != end; ++i)
+			{
+				peer_connection const& p = **i;
+				int rate = int(p.uploaded_since_unchoke()
+					* 1000 / total_milliseconds(unchoke_interval));
+
+				if (rate < rate_threshold) break;
+
+				++m_allowed_upload_slots;
+
 
rate_threshold += 1024;
} // allow one optimistic unchoke @@ -2755,25 +2754,25 @@ connections

src/session_impl.cpp:4423

relevance 0src/storage.cpp:358if the read fails, set error and exit immediately

if the read fails, set error and exit immediately

src/storage.cpp:358

			if (m_storage->disk_pool()) block_size = m_storage->disk_pool()->block_size();
-			int size = slot_size;
-			int num_blocks = (size + block_size - 1) / block_size;
-
-			// when we optimize for speed we allocate all the buffers we
-			// need for the rest of the piece, and read it all in one call
-			// and then hash it. When optimizing for memory usage, we read
-			// one block at a time and hash it. This ends up only using a
-			// single buffer
-			if (m_storage->settings().optimize_hashing_for_speed)
+
relevance 0src/storage.cpp:358if the read fails, set error and exit immediately

if the read fails, set error and exit immediately

src/storage.cpp:358

					else
+					{
+						ph.h.update((char const*)bufs[i].iov_base, bufs[i].iov_len);
+						small_piece_size -= bufs[i].iov_len;
+					}
+					ph.offset += bufs[i].iov_len;
+					m_storage->disk_pool()->free_buffer((char*)bufs[i].iov_base);
+				}
+			}
+			else
 			{
-				file::iovec_t* bufs = TORRENT_ALLOCA(file::iovec_t, num_blocks);
+				file::iovec_t buf;
+				disk_buffer_holder holder(*m_storage->disk_pool()
+					, m_storage->disk_pool()->allocate_buffer("hash temp"));
+				buf.iov_base = holder.get();
 				for (int i = 0; i < num_blocks; ++i)
 				{
-					bufs[i].iov_base = m_storage->disk_pool()->allocate_buffer("hash temp");
-					bufs[i].iov_len = (std::min)(block_size, size);
-					size -= bufs[i].iov_len;
-				}
-				num_read = m_storage->readv(bufs, slot, ph.offset, num_blocks);
+					buf.iov_len = (std::min)(block_size, size);
+					int ret = m_storage->readv(&buf, slot, ph.offset, 1);
 					if (ret > 0) num_read += ret;
 
if (small_hash && small_piece_size <= block_size) diff --git a/gen_todo.py b/gen_todo.py index 8dc2fcd3c..5f9673176 100644 --- a/gen_todo.py +++ b/gen_todo.py @@ -64,11 +64,17 @@ for f in files: state = 'context' items[-1]['context'] = ''.join(context) + '
' + html_sanitize(l) + '
'; context_lines = 1 + + context.append(html_sanitize(l)) + if len(context) > 20: context.pop(0) continue if state == 'context': items[-1]['context'] += html_sanitize(l) context_lines += 1 + + context.append(html_sanitize(l)) + if len(context) > 20: context.pop(0) if context_lines > 30: state = '' h.close() diff --git a/src/utp_stream.cpp b/src/utp_stream.cpp index 48abcc4d2..d8088e2e0 100644 --- a/src/utp_stream.cpp +++ b/src/utp_stream.cpp @@ -1682,7 +1682,7 @@ bool utp_socket_impl::send_pkt(int flags) stack_alloced = true; #endif TORRENT_ASSERT(force); - // TODO: 3 this alloca() statement won't necessariky produce + // TODO: 3 this alloca() statement won't necessarily produce // correctly aligned memory. do something about that p = (packet*)TORRENT_ALLOCA(char, sizeof(packet) + packet_size); UTP_LOGV("%8p: allocating %d bytes on the stack\n", this, packet_size);