forked from premiere/premiere-libtorrent
merged RC_1_1 into master
This commit is contained in:
commit
77ce318a84
|
@ -581,8 +581,6 @@ namespace libtorrent
|
||||||
char* allocate_disk_buffer(bool& exceeded
|
char* allocate_disk_buffer(bool& exceeded
|
||||||
, boost::shared_ptr<disk_observer> o
|
, boost::shared_ptr<disk_observer> o
|
||||||
, char const* category) TORRENT_OVERRIDE;
|
, char const* category) TORRENT_OVERRIDE;
|
||||||
char* async_allocate_disk_buffer(char const* category
|
|
||||||
, boost::function<void(char*)> const& handler) TORRENT_OVERRIDE;
|
|
||||||
void reclaim_block(block_cache_reference ref) TORRENT_OVERRIDE;
|
void reclaim_block(block_cache_reference ref) TORRENT_OVERRIDE;
|
||||||
|
|
||||||
bool exceeded_cache_use() const
|
bool exceeded_cache_use() const
|
||||||
|
|
|
@ -58,8 +58,6 @@ namespace libtorrent
|
||||||
virtual char* allocate_disk_buffer(bool& exceeded
|
virtual char* allocate_disk_buffer(bool& exceeded
|
||||||
, boost::shared_ptr<disk_observer> o
|
, boost::shared_ptr<disk_observer> o
|
||||||
, char const* category) = 0;
|
, char const* category) = 0;
|
||||||
virtual char* async_allocate_disk_buffer(char const* category
|
|
||||||
, boost::function<void(char*)> const& handler) = 0;
|
|
||||||
protected:
|
protected:
|
||||||
~buffer_allocator_interface() {}
|
~buffer_allocator_interface() {}
|
||||||
};
|
};
|
||||||
|
|
|
@ -75,11 +75,6 @@ namespace libtorrent
|
||||||
bool is_disk_buffer(char* buffer) const;
|
bool is_disk_buffer(char* buffer) const;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
// tries to allocate a disk buffer. If the cache is full, this function will
|
|
||||||
// return NULL and call the disk_observer once a buffer becomes available
|
|
||||||
char* async_allocate_buffer(char const* category
|
|
||||||
, boost::function<void(char*)> const& handler);
|
|
||||||
|
|
||||||
char* allocate_buffer(char const* category);
|
char* allocate_buffer(char const* category);
|
||||||
char* allocate_buffer(bool& exceeded, boost::shared_ptr<disk_observer> o
|
char* allocate_buffer(bool& exceeded, boost::shared_ptr<disk_observer> o
|
||||||
, char const* category);
|
, char const* category);
|
||||||
|
@ -134,12 +129,8 @@ namespace libtorrent
|
||||||
// adding up callbacks to this queue. Once the number
|
// adding up callbacks to this queue. Once the number
|
||||||
// of buffers in use drops below the low watermark,
|
// of buffers in use drops below the low watermark,
|
||||||
// we start calling these functions back
|
// we start calling these functions back
|
||||||
// TODO: try to remove the observers, only using the async_allocate handlers
|
|
||||||
std::vector<boost::weak_ptr<disk_observer> > m_observers;
|
std::vector<boost::weak_ptr<disk_observer> > m_observers;
|
||||||
|
|
||||||
// these handlers are executed when a new buffer is available
|
|
||||||
std::vector<handler_t> m_handlers;
|
|
||||||
|
|
||||||
// callback used to tell the cache it needs to free up some blocks
|
// callback used to tell the cache it needs to free up some blocks
|
||||||
boost::function<void()> m_trigger_cache_trim;
|
boost::function<void()> m_trigger_cache_trim;
|
||||||
|
|
||||||
|
|
|
@ -361,7 +361,6 @@ namespace libtorrent
|
||||||
void trigger_cache_trim();
|
void trigger_cache_trim();
|
||||||
char* allocate_disk_buffer(bool& exceeded, boost::shared_ptr<disk_observer> o
|
char* allocate_disk_buffer(bool& exceeded, boost::shared_ptr<disk_observer> o
|
||||||
, char const* category) TORRENT_OVERRIDE;
|
, char const* category) TORRENT_OVERRIDE;
|
||||||
char* async_allocate_disk_buffer(char const* category, boost::function<void(char*)> const& handler) TORRENT_OVERRIDE;
|
|
||||||
|
|
||||||
bool exceeded_cache_use() const
|
bool exceeded_cache_use() const
|
||||||
{ return m_disk_cache.exceeded_max_size(); }
|
{ return m_disk_cache.exceeded_max_size(); }
|
||||||
|
|
|
@ -67,17 +67,8 @@ namespace libtorrent
|
||||||
namespace {
|
namespace {
|
||||||
|
|
||||||
// this is posted to the network thread
|
// this is posted to the network thread
|
||||||
void watermark_callback(std::vector<boost::weak_ptr<disk_observer> >* cbs
|
void watermark_callback(std::vector<boost::weak_ptr<disk_observer> >* cbs)
|
||||||
, std::vector<disk_buffer_pool::handler_t>* handlers)
|
|
||||||
{
|
{
|
||||||
if (handlers)
|
|
||||||
{
|
|
||||||
for (std::vector<disk_buffer_pool::handler_t>::iterator i = handlers->begin()
|
|
||||||
, end(handlers->end()); i != end; ++i)
|
|
||||||
i->callback(i->buffer);
|
|
||||||
delete handlers;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (cbs != NULL)
|
if (cbs != NULL)
|
||||||
{
|
{
|
||||||
for (std::vector<boost::weak_ptr<disk_observer> >::iterator i = cbs->begin()
|
for (std::vector<boost::weak_ptr<disk_observer> >::iterator i = cbs->begin()
|
||||||
|
@ -147,7 +138,7 @@ namespace libtorrent
|
||||||
mutex::scoped_lock l(m_pool_mutex);
|
mutex::scoped_lock l(m_pool_mutex);
|
||||||
|
|
||||||
if (m_exceeded_max_size)
|
if (m_exceeded_max_size)
|
||||||
ret = m_in_use - (std::min)(m_low_watermark, int(m_max_use - (m_observers.size() + m_handlers.size())*2));
|
ret = m_in_use - (std::min)(m_low_watermark, int(m_max_use - m_observers.size()*2));
|
||||||
|
|
||||||
if (m_in_use + num_needed > m_max_use)
|
if (m_in_use + num_needed > m_max_use)
|
||||||
ret = (std::max)(ret, int(m_in_use + num_needed - m_max_use));
|
ret = (std::max)(ret, int(m_in_use + num_needed - m_max_use));
|
||||||
|
@ -169,49 +160,11 @@ namespace libtorrent
|
||||||
|
|
||||||
m_exceeded_max_size = false;
|
m_exceeded_max_size = false;
|
||||||
|
|
||||||
// if slice is non-NULL, only some of the handlers got a buffer
|
|
||||||
// back, and the slice should be posted back to the network thread
|
|
||||||
std::vector<handler_t>* slice = NULL;
|
|
||||||
|
|
||||||
for (std::vector<handler_t>::iterator i = m_handlers.begin()
|
|
||||||
, end(m_handlers.end()); i != end; ++i)
|
|
||||||
{
|
|
||||||
i->buffer = allocate_buffer_impl(l, i->category);
|
|
||||||
if (!m_exceeded_max_size || i == end - 1) continue;
|
|
||||||
|
|
||||||
// only some of the handlers got buffers. We need to slice the vector
|
|
||||||
slice = new std::vector<handler_t>();
|
|
||||||
slice->insert(slice->end(), m_handlers.begin(), i + 1);
|
|
||||||
m_handlers.erase(m_handlers.begin(), i + 1);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (slice != NULL)
|
|
||||||
{
|
|
||||||
l.unlock();
|
|
||||||
m_ios.post(boost::bind(&watermark_callback
|
|
||||||
, static_cast<std::vector<boost::weak_ptr<disk_observer> >*>(NULL)
|
|
||||||
, slice));
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
std::vector<handler_t>* handlers = new std::vector<handler_t>();
|
|
||||||
handlers->swap(m_handlers);
|
|
||||||
|
|
||||||
if (m_exceeded_max_size)
|
|
||||||
{
|
|
||||||
l.unlock();
|
|
||||||
m_ios.post(boost::bind(&watermark_callback
|
|
||||||
, static_cast<std::vector<boost::weak_ptr<disk_observer> >*>(NULL)
|
|
||||||
, handlers));
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
std::vector<boost::weak_ptr<disk_observer> >* cbs
|
std::vector<boost::weak_ptr<disk_observer> >* cbs
|
||||||
= new std::vector<boost::weak_ptr<disk_observer> >();
|
= new std::vector<boost::weak_ptr<disk_observer> >();
|
||||||
m_observers.swap(*cbs);
|
m_observers.swap(*cbs);
|
||||||
l.unlock();
|
l.unlock();
|
||||||
m_ios.post(boost::bind(&watermark_callback, cbs, handlers));
|
m_ios.post(boost::bind(&watermark_callback, cbs));
|
||||||
}
|
}
|
||||||
|
|
||||||
#if TORRENT_USE_ASSERTS
|
#if TORRENT_USE_ASSERTS
|
||||||
|
@ -251,25 +204,6 @@ namespace libtorrent
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
char* disk_buffer_pool::async_allocate_buffer(char const* category
|
|
||||||
, boost::function<void(char*)> const& handler)
|
|
||||||
{
|
|
||||||
mutex::scoped_lock l(m_pool_mutex);
|
|
||||||
if (m_exceeded_max_size)
|
|
||||||
{
|
|
||||||
m_handlers.push_back(handler_t());
|
|
||||||
handler_t& h = m_handlers.back();
|
|
||||||
h.category = category;
|
|
||||||
h.callback = handler;
|
|
||||||
h.buffer = NULL;
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
char* ret = allocate_buffer_impl(l, category);
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
char* disk_buffer_pool::allocate_buffer(char const* category)
|
char* disk_buffer_pool::allocate_buffer(char const* category)
|
||||||
{
|
{
|
||||||
mutex::scoped_lock l(m_pool_mutex);
|
mutex::scoped_lock l(m_pool_mutex);
|
||||||
|
|
|
@ -261,10 +261,6 @@ namespace libtorrent
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
char* disk_io_thread::async_allocate_disk_buffer(char const* category
|
|
||||||
, boost::function<void(char*)> const& handler)
|
|
||||||
{ return m_disk_cache.async_allocate_buffer(category, handler); }
|
|
||||||
|
|
||||||
void disk_io_thread::reclaim_block(block_cache_reference ref)
|
void disk_io_thread::reclaim_block(block_cache_reference ref)
|
||||||
{
|
{
|
||||||
TORRENT_ASSERT(m_magic == 0x1337);
|
TORRENT_ASSERT(m_magic == 0x1337);
|
||||||
|
|
|
@ -6289,28 +6289,6 @@ namespace libtorrent
|
||||||
// cache space right now
|
// cache space right now
|
||||||
|
|
||||||
if (m_channel_state[download_channel] & peer_info::bw_disk) return false;
|
if (m_channel_state[download_channel] & peer_info::bw_disk) return false;
|
||||||
/*
|
|
||||||
// if we already have a disk buffer, we might as well use it
|
|
||||||
// if contiguous recv buffer is true, don't apply this logic, but
|
|
||||||
// actually wait until we try to allocate a buffer and exceed the limit
|
|
||||||
if (m_disk_recv_buffer == NULL
|
|
||||||
&& !m_settings.get_bool(settings_pack::contiguous_recv_buffer))
|
|
||||||
{
|
|
||||||
m_disk_recv_buffer.reset(m_ses.async_allocate_disk_buffer("receive buffer",
|
|
||||||
boost::bind(&peer_connection::on_allocate_disk_buffer, self(), _1, #error buffer_size)));
|
|
||||||
|
|
||||||
if (m_disk_recv_buffer == NULL)
|
|
||||||
{
|
|
||||||
m_counters.inc_stats_counter(counters::num_peers_down_disk);
|
|
||||||
const_cast<peer_connection*>(this)->m_channel_state[download_channel] |= peer_info::bw_disk;
|
|
||||||
|
|
||||||
#ifndef TORRENT_DISABLE_LOGGING
|
|
||||||
peer_log(peer_log_alert::info, "DISK", "exceeded disk buffer watermark");
|
|
||||||
#endif
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
*/
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return !m_connecting && !m_disconnecting;
|
return !m_connecting && !m_disconnecting;
|
||||||
|
|
|
@ -6834,12 +6834,6 @@ namespace aux {
|
||||||
return m_disk_thread.allocate_disk_buffer(category);
|
return m_disk_thread.allocate_disk_buffer(category);
|
||||||
}
|
}
|
||||||
|
|
||||||
char* session_impl::async_allocate_disk_buffer(char const* category
|
|
||||||
, boost::function<void(char*)> const& handler)
|
|
||||||
{
|
|
||||||
return m_disk_thread.async_allocate_disk_buffer(category, handler);
|
|
||||||
}
|
|
||||||
|
|
||||||
void session_impl::free_disk_buffer(char* buf)
|
void session_impl::free_disk_buffer(char* buf)
|
||||||
{
|
{
|
||||||
m_disk_thread.free_disk_buffer(buf);
|
m_disk_thread.free_disk_buffer(buf);
|
||||||
|
|
Loading…
Reference in New Issue