use of BOOST_FALLTHROUGH, typos and minor cleanup

This commit is contained in:
Alden Torres 2016-07-30 21:53:11 -04:00
parent e368efe535
commit e55a310e79
5 changed files with 15 additions and 20 deletions

View File

@ -41,7 +41,7 @@ namespace libtorrent
// to indicate that it has called all the disk job handlers
// in the current batch. The intention is for the peer
// connections to be able to not issue any sends on their
// sockets until they have recevied all the disk jobs
// sockets until they have received all the disk jobs
// that are ready first. This makes the networking more
// efficient since it can send larger buffers down to the
// kernel per system call.
@ -56,4 +56,3 @@ namespace libtorrent
}
#endif

View File

@ -41,7 +41,7 @@ POSSIBILITY OF SUCH DAMAGE.
#include "libtorrent/file_pool.hpp"
#include "libtorrent/torrent_info.hpp"
#include "libtorrent/platform_util.hpp"
#include <boost/scoped_array.hpp>
#include <functional>
#include <tuple>
#include <set>
@ -304,7 +304,7 @@ namespace libtorrent
// try to keep it from being flushed, since we'll
// need to read it back regardless. Flushing will
// save blocks that can be used to "save" other
// pieces from being fllushed prematurely
// pieces from being flushed prematurely
end = int(p->blocks_in_piece);
}
@ -396,7 +396,7 @@ namespace libtorrent
DLOG("[%d xx] ", i);
}
// TOOD: in this case, the piece should probably not be flushed yet. are there
// TODO: in this case, the piece should probably not be flushed yet. are there
// any more cases where it should?
range_full = false;
@ -513,9 +513,9 @@ namespace libtorrent
}
// iov and flushing are expected to be arrays to at least pe->blocks_in_piece
// items in them. Returns the numner of iovecs written to the iov array.
// items in them. Returns the number of iovecs written to the iov array.
// The same number of block indices are written to the flushing array. These
// are block indices that the respecivec iovec structure refers to, since
// are block indices that the respective iovec structure refers to, since
// we might not be able to flush everything as a single contiguous block,
// the block indices indicates where the block run is broken
// the cache needs to be locked when calling this function
@ -564,7 +564,7 @@ namespace libtorrent
// if we fail to lock the block, it' no longer in the cache
bool locked = m_disk_cache.inc_block_refcount(pe, i, block_cache::ref_flushing);
// it should always suceed, since it's a dirty block, and
// it should always succeed, since it's a dirty block, and
// should never have been marked as volatile
TORRENT_ASSERT(locked);
TORRENT_ASSERT(pe->cache_state != cached_piece_entry::volatile_read_lru);
@ -1522,7 +1522,7 @@ namespace libtorrent
}
// this function checks to see if a read job is a cache hit,
// and if it doesn't have a picece allocated, it allocates
// and if it doesn't have a piece allocated, it allocates
// one and it sets outstanding_read flag and possibly queues
// up the job in the piece read job list
// the cache std::mutex must be held when calling this
@ -2162,9 +2162,9 @@ namespace libtorrent
{
sha1_hash result = pe->hash->h.final();
for (tailqueue_iterator<disk_io_job> i = hash_jobs.iterate(); i.get(); i.next())
for (auto i = hash_jobs.iterate(); i.get(); i.next())
{
disk_io_job* hj = const_cast<disk_io_job*>(i.get());
disk_io_job* hj = i.get();
memcpy(hj->d.piece_hash, result.data(), 20);
hj->ret = 0;
}
@ -3587,4 +3587,3 @@ namespace libtorrent
}
#endif
}

View File

@ -1280,7 +1280,7 @@ namespace libtorrent
if (!m_dirty)
{
// first count how many pieces we're updating. If it's few (less than half)
// we'll just update them one at a time. Othewise we'll just update the counters
// we'll just update them one at a time. Otherwise we'll just update the counters
// and mark the picker as dirty, so we'll rebuild it next time we need it.
// this only matters if we're not already dirty, in which case the fasted
// thing to do is to just update the counters and be done
@ -1377,7 +1377,7 @@ namespace libtorrent
if (!m_dirty)
{
// first count how many pieces we're updating. If it's few (less than half)
// we'll just update them one at a time. Othewise we'll just update the counters
// we'll just update them one at a time. Otherwise we'll just update the counters
// and mark the picker as dirty, so we'll rebuild it next time we need it.
// this only matters if we're not already dirty, in which case the fasted
// thing to do is to just update the counters and be done
@ -1617,7 +1617,7 @@ namespace libtorrent
if (p.priority(this) >= 0) add(index);
}
// this is used to indicate that we succesfully have
// this is used to indicate that we successfully have
// downloaded a piece, and that no further attempts
// to pick that piece should be made. The piece will
// be removed from the available piece list.
@ -1903,7 +1903,7 @@ namespace libtorrent
// only one of rarest_first or sequential can be set
// the return value is a combination of picker_log_alert::picker_flags_t,
// indicating which path throught the picker we took to arrive at the
// indicating which path thought the picker we took to arrive at the
// returned block picks.
std::uint32_t piece_picker::pick_pieces(bitfield const& pieces
, std::vector<piece_block>& interesting_blocks, int num_blocks
@ -3716,4 +3716,3 @@ get_out:
}
}

View File

@ -32,7 +32,6 @@ POSSIBILITY OF SUCH DAMAGE.
#include "libtorrent/config.hpp"
#include "libtorrent/random.hpp"
#include "libtorrent/assert.hpp"
#include <random>
@ -72,4 +71,3 @@ namespace libtorrent
}
}

View File

@ -3147,7 +3147,7 @@ bool utp_socket_impl::incoming_packet(span<std::uint8_t const> buf
m_connect_handler = false;
utp_stream::on_connect(m_userdata, m_error, false);
}
// fall through
BOOST_FALLTHROUGH;
}
case UTP_STATE_CONNECTED:
{