2012-03-11 06:35:29 +01:00
#!/usr/bin/env python
2016-01-11 08:10:42 +01:00
# Copyright (c) 2016, Arvid Norberg
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the distribution.
# * Neither the name of the author nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# this script can parse and generate reports from the alert log from a
# libtorrent session
2008-07-01 20:59:13 +02:00
2014-07-06 21:18:00 +02:00
import os , sys , time , os , math
2012-03-11 06:35:29 +01:00
from multiprocessing . pool import ThreadPool
2014-07-06 21:18:00 +02:00
thread_pool = ThreadPool ( 8 )
2007-09-29 18:14:03 +02:00
2016-01-11 08:10:42 +01:00
output_dir = ' session_stats_report '
2008-12-27 03:22:20 +01:00
stat = open ( sys . argv [ 1 ] )
line = stat . readline ( )
2016-01-11 08:10:42 +01:00
while not ' session stats header: ' in line :
2008-12-27 03:22:20 +01:00
line = stat . readline ( )
2016-01-11 08:10:42 +01:00
keys = line . split ( ' session stats header: ' ) [ 1 ] . strip ( ) . split ( ' , ' )
2008-12-27 03:22:20 +01:00
2016-01-11 08:10:42 +01:00
try : os . mkdir ( output_dir )
except : pass
data_out = open ( os . path . join ( output_dir , ' counters.dat ' ) , ' w+ ' )
idx = 0
for l in stat :
if not ' session stats ( ' in l : continue
data_out . write ( ( " %d \t " % idx ) + l . split ( ' values): ' ) [ 1 ] . strip ( ) . replace ( ' , ' , ' \t ' ) + ' \n ' )
idx + = 1
data_out . close ( )
2011-03-20 09:58:16 +01:00
2012-03-11 06:35:29 +01:00
line_graph = 0
histogram = 1
stacked = 2
2014-07-06 21:18:00 +02:00
diff = 3
2012-03-11 06:35:29 +01:00
graph_colors = [ ]
pattern = [ [ 0 , 0 , 1 ] , [ 0 , 1 , 0 ] , [ 1 , 0 , 0 ] , [ 1 , 0 , 1 ] , [ 0 , 1 , 1 ] , [ 1 , 1 , 0 ] ]
def process_color ( c , op ) :
for i in range ( 3 ) :
if op == 0 :
c [ i ] = min ( 255 , c [ i ] + 0xb0 )
if op == 2 :
c [ i ] = max ( 0 , c [ i ] - 0x50 )
return c
for i in range ( 0 , len ( pattern ) * 3 ) :
op = i / len ( pattern )
c = list ( pattern [ i % len ( pattern ) ] )
for i in range ( 3 ) : c [ i ] * = 0xff
c = process_color ( c , op )
c = ' # %02x %02x %02x ' % ( c [ 0 ] , c [ 1 ] , c [ 2 ] )
graph_colors . append ( c )
line_colors = list ( graph_colors )
line_colors . reverse ( )
2014-07-06 21:18:00 +02:00
gradient16_colors = [ ]
for i in range ( 0 , 16 ) :
f = i / 16.
pi = 3.1415927
r = max ( int ( 255 * ( math . sin ( f * pi ) + 0.2 ) ) , 0 )
g = max ( int ( 255 * ( math . sin ( ( f - 0.5 ) * pi ) + 0.2 ) ) , 0 )
b = max ( int ( 255 * ( math . sin ( ( f + 0.5 ) * pi ) + 0.2 ) ) , 0 )
c = ' # %02x %02x %02x ' % ( min ( r , 255 ) , min ( g , 255 ) , min ( b , 255 ) )
gradient16_colors . append ( c )
gradient18_colors = [ ]
for i in range ( 0 , 18 ) :
f = i / 18.
pi = 3.1415927
r = max ( int ( 255 * ( math . sin ( f * pi ) + 0.2 ) ) , 0 )
g = max ( int ( 255 * ( math . sin ( ( f - 0.5 ) * pi ) + 0.2 ) ) , 0 )
b = max ( int ( 255 * ( math . sin ( ( f + 0.5 ) * pi ) + 0.2 ) ) , 0 )
c = ' # %02x %02x %02x ' % ( min ( r , 255 ) , min ( g , 255 ) , min ( b , 255 ) )
gradient18_colors . append ( c )
gradient6_colors = [ ]
for i in range ( 0 , 6 ) :
f = i / 6.
c = ' # %02x %02x %02x ' % ( min ( int ( 255 * ( - 2 * f + 2 ) ) , 255 ) , min ( int ( 255 * ( 2 * f ) ) , 255 ) , 100 )
gradient6_colors . append ( c )
2012-03-11 06:35:29 +01:00
def plot_fun ( script ) :
2014-07-06 21:18:00 +02:00
ret = os . system ( ' gnuplot " %s " 2>/dev/null ' % script )
if ret != 0 and ret != 256 :
print ' system: %d \n ' % ret
raise Exception ( " abort " )
2012-03-11 06:35:29 +01:00
sys . stdout . write ( ' . ' )
sys . stdout . flush ( )
2016-01-11 08:10:42 +01:00
def to_title ( key ) :
return key . replace ( ' _ ' , ' ' ) . replace ( ' . ' , ' - ' )
2011-03-20 09:58:16 +01:00
2016-01-11 08:10:42 +01:00
def gen_report ( name , unit , lines , short_unit , generation , log_file , options ) :
2012-03-11 06:35:29 +01:00
filename = os . path . join ( output_dir , ' %s _ %04d .png ' % ( name , generation ) )
thumb = os . path . join ( output_dir , ' %s _ %04d _thumb.png ' % ( name , generation ) )
2011-03-21 09:16:04 +01:00
# don't re-render a graph unless the logfile has changed
try :
dst1 = os . stat ( filename )
dst2 = os . stat ( thumb )
src = os . stat ( log_file )
if dst1 . st_mtime > src . st_mtime and dst2 . st_mtime > src . st_mtime :
sys . stdout . write ( ' . ' )
2012-03-11 06:35:29 +01:00
return None
2011-03-21 09:16:04 +01:00
except : pass
2016-01-11 08:10:42 +01:00
2012-03-11 06:35:29 +01:00
script = os . path . join ( output_dir , ' %s _ %04d .gnuplot ' % ( name , generation ) )
out = open ( script , ' wb ' )
2008-12-27 03:22:20 +01:00
print >> out , " set term png size 1200,700 "
2011-03-21 09:16:04 +01:00
print >> out , ' set output " %s " ' % filename
2014-07-06 21:18:00 +02:00
if not ' allow-negative ' in options :
print >> out , ' set yrange [0:*] '
2010-01-31 22:13:52 +01:00
print >> out , " set tics nomirror "
2008-12-27 03:22:20 +01:00
print >> out , " set key box "
2014-07-06 21:18:00 +02:00
print >> out , " set key left top "
colors = graph_colors
if options [ ' type ' ] == line_graph :
colors = line_colors
try :
if options [ ' colors ' ] == ' gradient16 ' :
colors = gradient16_colors
elif options [ ' colors ' ] == ' gradient6 ' :
colors = gradient6_colors
if options [ ' colors ' ] == ' gradient18 ' :
colors = gradient18_colors
except : pass
if options [ ' type ' ] == histogram :
binwidth = options [ ' binwidth ' ]
numbins = int ( options [ ' numbins ' ] )
2011-09-19 00:14:51 +02:00
print >> out , ' binwidth= %f ' % binwidth
print >> out , ' set boxwidth binwidth '
print >> out , ' bin(x,width)=width*floor(x/width) + binwidth/2 '
2014-07-06 21:18:00 +02:00
print >> out , ' set xrange [0: %f ] ' % ( binwidth * numbins )
2011-09-19 00:14:51 +02:00
print >> out , ' set xlabel " %s " ' % unit
print >> out , ' set ylabel " number " '
k = lines [ 0 ]
2011-03-17 06:31:06 +01:00
try :
column = keys . index ( k ) + 2
except :
print ' " %s " not found ' % k
2011-09-19 00:14:51 +02:00
return
print >> out , ' plot " %s " using (bin($ %d ,binwidth)):(1.0) smooth freq with boxes ' % ( log_file , column )
print >> out , ' '
print >> out , ' '
print >> out , ' '
2014-07-06 21:18:00 +02:00
elif options [ ' type ' ] == stacked :
2012-03-11 06:35:29 +01:00
print >> out , ' set xrange [0:*] '
print >> out , ' set ylabel " %s " ' % unit
print >> out , ' set xlabel " time (s) " '
print >> out , ' set format y " %% .1s %% c %s " ; ' % short_unit
2015-07-11 07:51:30 +02:00
print >> out , ' set style fill solid 1.0 noborder '
2012-03-11 06:35:29 +01:00
print >> out , ' plot ' ,
column = 2
first = True
graph = ' '
plot_expression = ' '
color = 0
for k in lines :
try :
column = keys . index ( k ) + 2
except :
print ' " %s " not found ' % k
continue ;
if not first :
plot_expression = ' , ' + plot_expression
graph + = ' + '
axis = ' x1y1 '
graph + = ' $ %d ' % column
2016-01-11 08:10:42 +01:00
plot_expression = ' " %s " using 1:( %s ) title " %s " axes %s with filledcurves x1 lc rgb " %s " ' % ( log_file , graph , to_title ( k ) , axis , colors [ color % len ( colors ) ] ) + plot_expression
2012-03-11 06:35:29 +01:00
first = False
color + = 1
print >> out , plot_expression
2014-07-06 21:18:00 +02:00
elif options [ ' type ' ] == diff :
print >> out , ' set xrange [0:*] '
print >> out , ' set ylabel " %s " ' % unit
print >> out , ' set xlabel " time (s) " '
print >> out , ' set format y " %% .1s %% c %s " ; ' % short_unit
column = 2
first = True
graph = ' '
title = ' '
for k in lines :
try :
column = keys . index ( k ) + 2
except :
print ' " %s " not found ' % k
continue ;
if not first :
graph + = ' - '
title + = ' - '
graph + = ' $ %d ' % column
2016-01-11 08:10:42 +01:00
title + = to_title ( k )
2014-07-06 21:18:00 +02:00
first = False
print >> out , ' plot " %s " using 1:( %s ) title " %s " with step ' % ( log_file , graph , title )
2011-09-19 00:14:51 +02:00
else :
print >> out , ' set xrange [0:*] '
print >> out , ' set ylabel " %s " ' % unit
print >> out , ' set xlabel " time (s) " '
print >> out , ' set format y " %% .1s %% c %s " ; ' % short_unit
print >> out , ' plot ' ,
column = 2
first = True
2012-03-11 06:35:29 +01:00
color = 0
2011-09-19 00:14:51 +02:00
for k in lines :
try :
column = keys . index ( k ) + 2
except :
print ' " %s " not found ' % k
continue ;
if not first : print >> out , ' , ' ,
axis = ' x1y1 '
2016-01-11 08:10:42 +01:00
print >> out , ' " %s " using 1: %d title " %s " axes %s with steps lc rgb " %s " ' % ( log_file , column , to_title ( k ) , axis , colors [ color % len ( colors ) ] ) ,
2011-09-19 00:14:51 +02:00
first = False
2012-03-11 06:35:29 +01:00
color + = 1
2011-09-19 00:14:51 +02:00
print >> out , ' '
print >> out , ' set term png size 150,100 '
2011-03-21 09:16:04 +01:00
print >> out , ' set output " %s " ' % thumb
2011-03-20 09:58:16 +01:00
print >> out , ' set key off '
print >> out , ' unset tics '
print >> out , ' set format x " " '
print >> out , ' set format y " " '
print >> out , ' set xlabel " " '
print >> out , ' set ylabel " " '
print >> out , ' set y2label " " '
2011-03-20 22:05:49 +01:00
print >> out , ' set rmargin 0 '
print >> out , ' set lmargin 0 '
print >> out , ' set tmargin 0 '
print >> out , ' set bmargin 0 '
2011-03-20 09:58:16 +01:00
print >> out , " replot "
2008-12-27 03:22:20 +01:00
out . close ( )
2012-03-11 06:35:29 +01:00
return script
2011-03-20 09:58:16 +01:00
2011-03-20 17:14:56 +01:00
def gen_html ( reports , generations ) :
2011-03-20 09:58:16 +01:00
file = open ( os . path . join ( output_dir , ' index.html ' ) , ' w+ ' )
2011-03-20 21:22:23 +01:00
css = ''' img { margin: 0}
#head { display: block }
2011-03-20 22:05:49 +01:00
#graphs { white-space:nowrap; }
2011-03-20 21:22:23 +01:00
h1 { line - height : 1 ; display : inline }
h2 { line - height : 1 ; display : inline ; font - size : 1 em ; font - weight : normal } ; '''
print >> file , ' <html><head><style type= " text/css " > %s </style></head><body> ' % css
2011-03-20 09:58:16 +01:00
for i in reports :
2011-04-26 08:27:48 +02:00
print >> file , ' <div id= " head " ><h1> %s </h1><h2> %s </h2><div><div id= " graphs " > ' % ( i [ 0 ] , i [ 3 ] )
2011-03-20 17:14:56 +01:00
for g in generations :
2012-03-11 06:35:29 +01:00
print >> file , ' <a href= " %s _ %04d .png " ><img src= " %s _ %04d _thumb.png " ></a> ' % ( i [ 0 ] , g , i [ 0 ] , g )
2011-03-20 22:05:49 +01:00
print >> file , ' </div> '
2011-03-20 09:58:16 +01:00
2011-03-20 21:22:23 +01:00
print >> file , ' </body></html> '
2011-03-20 09:58:16 +01:00
file . close ( )
reports = [
2016-01-11 08:10:42 +01:00
( ' torrents ' , ' num ' , ' ' , ' number of torrents in different torrent states ' , [ \
' ses.num_downloading_torrents ' , \
' ses.num_seeding_torrents ' , \
' ses.num_checking_torrents ' , \
' ses.num_stopped_torrents ' , \
' ses.num_upload_only_torrents ' , \
' ses.num_error_torrents ' , \
' ses.num_queued_seeding_torrents ' , \
' ses.num_queued_download_torrents ' \
] , { ' type ' : stacked } ) ,
( ' peers ' , ' num ' , ' ' , ' num connected peers ' , [ ' peer.num_peers_connected ' , ' peer.num_peers_half_open ' ] , { ' type ' : stacked } ) ,
( ' peers_max ' , ' num ' , ' ' , ' num connected peers ' , [ ' peer.num_peers_connected ' , ' peer.num_peers_half_open ' ] ) ,
( ' peer_churn ' , ' num ' , ' ' , ' connecting and disconnecting peers ' , [ ' peer.num_peers_half_open ' , ' peer.connection_attempts ' ] ) ,
( ' new_peers ' , ' num ' , ' ' , ' ' , [ ' peer.incoming_connections ' , ' peer.connection_attempts ' ] ) ,
( ' connection_attempts ' , ' num ' , ' ' , ' ' , [ ' peer.connection_attempt_loops ' , ' peer.connection_attempts ' ] ) ,
( ' pieces ' , ' num ' , ' ' , ' number completed pieces ' , [ ' ses.num_total_pieces_added ' , ' ses.num_piece_passed ' , ' ses.num_piece_failed ' ] ) ,
( ' disk_write_queue ' , ' Bytes ' , ' B ' , ' bytes queued up by peers, to be written to disk ' , [ ' disk.queued_write_bytes ' ] ) ,
( ' peers_requests ' , ' num ' , ' ' , ' incoming piece request rate ' , [ \
' peer.piece_requests ' , \
' peer.max_piece_requests ' , \
' peer.invalid_piece_requests ' , \
' peer.choked_piece_requests ' , \
' peer.cancelled_piece_requests ' \
] ) ,
( ' peers_upload ' , ' num ' , ' ' , ' number of peers by state wrt. uploading ' , [ \
' peer.num_peers_up_disk ' , \
' peer.num_peers_up_interested ' , \
' peer.num_peers_up_unchoked_all ' , \
' peer.num_peers_up_unchoked_optimistic ' , \
' peer.num_peers_up_unchoked ' , \
' peer.num_peers_up_requests ' \
] ) ,
( ' peers_download ' , ' num ' , ' ' , ' number of peers by state wrt. downloading ' , [ \
' peer.num_peers_down_interested ' , \
' peer.num_peers_down_unchoked ' , \
' peer.num_peers_down_requests ' , \
' peer.num_peers_down_disk ' \
] ) ,
( ' peer_errors ' , ' num ' , ' ' , ' number of peers by error that disconnected them ' , [ \
' peer.disconnected_peers ' , \
' peer.eof_peers ' , \
' peer.connreset_peers ' , \
' peer.connrefused_peers ' , \
' peer.connaborted_peers ' , \
' peer.perm_peers ' , \
' peer.buffer_peers ' , \
' peer.unreachable_peers ' , \
' peer.broken_pipe_peers ' , \
' peer.addrinuse_peers ' , \
' peer.no_access_peers ' , \
' peer.invalid_arg_peers ' , \
' peer.aborted_peers ' \
] , { ' type ' : stacked } ) ,
( ' peer_errors_incoming ' , ' num ' , ' ' , ' number of peers by incoming or outgoing connection ' , [ \
' peer.error_incoming_peers ' , \
' peer.error_outgoing_peers ' \
] ) ,
( ' peer_errors_transport ' , ' num ' , ' ' , ' number of peers by transport protocol ' , [ \
' peer.error_tcp_peers ' , \
' peer.error_utp_peers '
] ) ,
( ' peer_errors_encryption ' , ' num ' , ' ' , ' number of peers by encryption level ' , [ \
' peer.error_encrypted_peers ' , \
' peer.error_rc4_peers ' , \
] ) ,
( ' incoming requests ' , ' num ' , ' ' , ' incoming 16kiB block requests ' , [ ' ses.num_incoming_request ' ] ) ,
( ' waste ' , ' downloaded bytes ' , ' B ' , ' proportion of all downloaded bytes that were wasted ' , [ \
' net.recv_failed_bytes ' , \
' net.recv_redundant_bytes ' , \
' net.recv_ip_overhead_bytes ' \
] , { ' type ' : stacked } ) ,
( ' waste by source ' , ' num wasted bytes ' , ' B ' , ' what is causing the waste ' , [ \
' ses.waste_piece_timed_out ' , \
' ses.waste_piece_cancelled ' , \
' ses.waste_piece_unknown ' , \
' ses.waste_piece_seed ' , \
' ses.waste_piece_end_game ' , \
' ses.waste_piece_closing ' \
] , { ' type ' : stacked } ) ,
( ' disk_time ' , ' % o f total disk job time ' , ' %% ' , ' proportion of time spent by the disk thread ' , [ ' disk.disk_read_time ' , ' disk.disk_write_time ' , ' disk.disk_hash_time ' ] , { ' type ' : stacked } ) ,
( ' disk_cache_hits ' , ' blocks (16kiB) ' , ' ' , ' ' , [ ' disk.num_blocks_read ' , ' disk.num_blocks_cache_hits ' ] , { ' type ' : stacked } ) ,
( ' disk_cache ' , ' blocks (16kiB) ' , ' ' , ' disk cache size and usage ' , [ ' disk.disk_blocks_in_use ' , ' disk.read_cache_blocks ' , ' disk.write_cache_blocks ' , ' disk.pinned_blocks ' ] ) ,
( ' disk_readback ' , ' % o f written blocks ' , ' %% ' , ' portion of written blocks that had to be read back for hash verification ' , [ ' disk.num_read_back ' ] ) ,
( ' disk_queue ' , ' number of queued disk jobs ' , ' ' , ' num disk jobs ' , [ ' disk.num_write_jobs ' , ' disk.num_read_jobs ' , ' disk.num_jobs ' , ' disk.queued_disk_jobs ' , ' disk.blocked_disk_jobs ' ] ) ,
( ' disk fences ' , ' num ' , ' ' , ' number of jobs currently blocked by a fence job ' , [ ' disk.blocked_disk_jobs ' ] ) ,
# ('fence jobs', 'num', '', 'active fence jobs per type', ['move_storage', 'release_files', 'delete_files', 'check_fastresume', 'save_resume_data', 'rename_file', 'stop_torrent', 'file_priority', 'clear_piece'], {'type':stacked}),
( ' disk threads ' , ' num ' , ' ' , ' number of disk threads currently writing ' , [ ' disk.num_writing_threads ' , ' disk.num_running_threads ' ] ) ,
# ('mixed mode', 'rate', 'B/s', 'rates by transport protocol', ['TCP up rate','TCP down rate','uTP up rate','uTP down rate','TCP up limit','TCP down limit']),
( ' connection_type ' , ' num ' , ' ' , ' peers by transport protocol ' , [ \
' peer.num_tcp_peers ' , \
' peer.num_socks5_peers ' , \
' peer.num_http_proxy_peers ' , \
' peer.num_utp_peers ' , \
' peer.num_i2p_peers ' , \
' peer.num_ssl_peers ' , \
' peer.num_ssl_socks5_peers ' , \
' peer.num_ssl_http_proxy_peers ' , \
' peer.num_ssl_utp_peers ' \
] ) ,
# ('uTP delay', 'buffering delay', 's', 'network delays measured by uTP', ['uTP peak send delay','uTP peak recv delay', 'uTP avg send delay', 'uTP avg recv delay']),
# ('uTP send delay histogram', 'buffering delay', 's', 'send delays measured by uTP', ['uTP avg send delay'], {'type': histogram, 'binwidth': 0.05, 'numbins': 100}),
# ('uTP recv delay histogram', 'buffering delay', 's', 'receive delays measured by uTP', ['uTP avg recv delay'], {'type': histogram, 'binwidth': 0.05, 'numbins': 100}),
( ' uTP stats ' , ' num ' , ' ' , ' number of uTP events ' , [ \
' utp.utp_packet_loss ' , \
' utp.utp_timeout ' , \
' utp.utp_packets_in ' , \
' utp.utp_packets_out ' , \
' utp.utp_fast_retransmit ' , \
' utp.utp_packet_resend ' , \
' utp.utp_samples_above_target ' , \
' utp.utp_samples_below_target ' , \
' utp.utp_payload_pkts_in ' , \
' utp.utp_payload_pkts_out ' , \
' utp.utp_invalid_pkts_in ' , \
' utp.utp_redundant_pkts_in ' \
] , { ' type ' : stacked } ) ,
( ' boost.asio messages ' , ' num events ' , ' ' , ' number of messages posted ' , [ \
' net.on_read_counter ' , \
' net.on_write_counter ' , \
' net.on_tick_counter ' , \
' net.on_lsd_counter ' , \
' net.on_lsd_peer_counter ' , \
' net.on_udp_counter ' , \
' net.on_accept_counter ' , \
' net.on_disk_counter ' \
] , { ' type ' : stacked } ) ,
( ' send_buffer_sizes ' , ' num ' , ' ' , ' ' , [ \
' sock_bufs.socket_send_size3 ' , \
' sock_bufs.socket_send_size4 ' , \
' sock_bufs.socket_send_size5 ' , \
' sock_bufs.socket_send_size6 ' , \
' sock_bufs.socket_send_size7 ' , \
' sock_bufs.socket_send_size8 ' , \
' sock_bufs.socket_send_size9 ' , \
' sock_bufs.socket_send_size10 ' , \
' sock_bufs.socket_send_size11 ' , \
' sock_bufs.socket_send_size12 ' , \
' sock_bufs.socket_send_size13 ' , \
' sock_bufs.socket_send_size14 ' , \
' sock_bufs.socket_send_size15 ' , \
' sock_bufs.socket_send_size16 ' , \
' sock_bufs.socket_send_size17 ' , \
' sock_bufs.socket_send_size18 ' , \
' sock_bufs.socket_send_size19 ' , \
' sock_bufs.socket_send_size20 ' \
] , { ' type ' : stacked , ' colors ' : ' gradient18 ' } ) ,
( ' recv_buffer_sizes ' , ' num ' , ' ' , ' ' , [ \
' sock_bufs.socket_recv_size3 ' , \
' sock_bufs.socket_recv_size4 ' , \
' sock_bufs.socket_recv_size5 ' , \
' sock_bufs.socket_recv_size6 ' , \
' sock_bufs.socket_recv_size7 ' , \
' sock_bufs.socket_recv_size8 ' , \
' sock_bufs.socket_recv_size9 ' , \
' sock_bufs.socket_recv_size10 ' , \
' sock_bufs.socket_recv_size11 ' , \
' sock_bufs.socket_recv_size12 ' , \
' sock_bufs.socket_recv_size13 ' , \
' sock_bufs.socket_recv_size14 ' , \
' sock_bufs.socket_recv_size15 ' , \
' sock_bufs.socket_recv_size16 ' , \
' sock_bufs.socket_recv_size17 ' , \
' sock_bufs.socket_recv_size18 ' , \
' sock_bufs.socket_recv_size19 ' , \
' sock_bufs.socket_recv_size20 ' \
] , { ' type ' : stacked , ' colors ' : ' gradient18 ' } ) ,
( ' ARC ' , ' num pieces ' , ' ' , ' ' , [ \
' disk.arc_mru_ghost_size ' , \
' disk.arc_mru_size ' , \
' disk.arc_volatile_size ' , \
' disk.arc_mfu_size ' , \
' disk.arc_mfu_ghost_size ' \
] , { ' allow-negative ' : True } ) ,
( ' torrent churn ' , ' num torrents ' , ' ' , ' ' , [ ' ses.num_loaded_torrents ' , ' ses.num_pinned_torrents ' , ' ses.torrent_evicted_counter ' ] ) ,
( ' pinned torrents ' , ' num torrents ' , ' ' , ' ' , [ ' ses.num_pinned_torrents ' ] ) ,
( ' loaded torrents ' , ' num torrents ' , ' ' , ' ' , [ ' ses.num_loaded_torrents ' , ' ses.num_pinned_torrents ' ] ) ,
( ' request latency ' , ' us ' , ' ' , ' latency from receiving requests to sending response ' , [ ' disk.request_latency ' ] ) ,
2014-07-06 21:18:00 +02:00
( ' incoming messages ' , ' num ' , ' ' , ' number of received bittorrent messages, by type ' , [ \
2016-01-11 08:10:42 +01:00
' ses.num_incoming_choke ' , \
' ses.num_incoming_unchoke ' , \
' ses.num_incoming_interested ' , \
' ses.num_incoming_not_interested ' , \
' ses.num_incoming_have ' , \
' ses.num_incoming_bitfield ' , \
' ses.num_incoming_request ' , \
' ses.num_incoming_piece ' , \
' ses.num_incoming_cancel ' , \
' ses.num_incoming_dht_port ' , \
' ses.num_incoming_suggest ' , \
' ses.num_incoming_have_all ' , \
' ses.num_incoming_have_none ' , \
' ses.num_incoming_reject ' , \
' ses.num_incoming_allowed_fast ' , \
' ses.num_incoming_ext_handshake ' , \
' ses.num_incoming_pex ' , \
' ses.num_incoming_metadata ' , \
' ses.num_incoming_extended ' \
] , { ' type ' : stacked } ) ,
2014-07-06 21:18:00 +02:00
( ' outgoing messages ' , ' num ' , ' ' , ' number of sent bittorrent messages, by type ' , [ \
2016-01-11 08:10:42 +01:00
' ses.num_outgoing_choke ' , \
' ses.num_outgoing_unchoke ' , \
' ses.num_outgoing_interested ' , \
' ses.num_outgoing_not_interested ' , \
' ses.num_outgoing_have ' , \
' ses.num_outgoing_bitfield ' , \
' ses.num_outgoing_request ' , \
' ses.num_outgoing_piece ' , \
' ses.num_outgoing_cancel ' , \
' ses.num_outgoing_dht_port ' , \
' ses.num_outgoing_suggest ' , \
' ses.num_outgoing_have_all ' , \
' ses.num_outgoing_have_none ' , \
' ses.num_outgoing_reject ' , \
' ses.num_outgoing_allowed_fast ' , \
' ses.num_outgoing_ext_handshake ' , \
' ses.num_outgoing_pex ' , \
' ses.num_outgoing_metadata ' , \
' ses.num_outgoing_extended ' \
] , { ' type ' : stacked } ) ,
2014-07-06 21:18:00 +02:00
( ' request in balance ' , ' num ' , ' ' , ' request and piece message balance ' , [ \
2016-01-11 08:10:42 +01:00
' ses.num_incoming_request ' , \
' ses.num_outgoing_piece ' , \
' ses.num_outgoing_reject ' , \
2014-07-06 21:18:00 +02:00
] , { ' type ' : diff } ) ,
( ' request out balance ' , ' num ' , ' ' , ' request and piece message balance ' , [ \
2016-01-11 08:10:42 +01:00
' ses.num_outgoing_request ' , \
' ses.num_incoming_piece ' , \
' ses.num_incoming_reject ' , \
2014-07-06 21:18:00 +02:00
] , { ' type ' : diff } ) ,
2008-12-27 03:22:20 +01:00
2011-03-20 09:58:16 +01:00
#somewhat uninteresting stats
2016-01-11 08:10:42 +01:00
# ('peer_dl_rates', 'num', '', 'peers split into download rate buckets', ['peers down 0', 'peers down 0-2', 'peers down 2-5', 'peers down 5-10', 'peers down 50-100', 'peers down 100-'], {'type':stacked, 'colors':'gradient6'}),
# ('peer_dl_rates2', 'num', '', 'peers split into download rate buckets (only downloading peers)', ['peers down 0-2', 'peers down 2-5', 'peers down 5-10', 'peers down 50-100', 'peers down 100-'], {'type':stacked, 'colors':'gradient6'}),
# ('peer_ul_rates', 'num', '', 'peers split into upload rate buckets', ['peers up 0', 'peers up 0-2', 'peers up 2-5', 'peers up 5-10', 'peers up 50-100', 'peers up 100-'], {'type':stacked, 'colors':'gradient6'}),
# ('peer_ul_rates2', 'num', '', 'peers split into upload rate buckets (only uploading peers)', ['peers up 0-2', 'peers up 2-5', 'peers up 5-10', 'peers up 50-100', 'peers up 100-'], {'type':stacked, 'colors':'gradient6'}),
( ' piece_picker_invocations ' , ' invocations of piece picker ' , ' ' , ' ' , [ \
' picker.reject_piece_picks ' , \
' picker.unchoke_piece_picks ' , \
' picker.incoming_redundant_piece_picks ' , \
' picker.incoming_piece_picks ' , \
' picker.end_game_piece_picks ' , \
' picker.snubbed_piece_picks ' , \
' picker.interesting_piece_picks ' , \
' picker.hash_fail_piece_picks ' \
] , { ' type ' : stacked } ) ,
2014-07-06 21:18:00 +02:00
( ' piece_picker_loops ' , ' loops through piece picker ' , ' ' , ' ' , [ \
2016-01-11 08:10:42 +01:00
' picker.piece_picker_partial_loops ' , \
' picker.piece_picker_suggest_loops ' , \
' picker.piece_picker_sequential_loops ' , \
' picker.piece_picker_reverse_rare_loops ' , \
' picker.piece_picker_rare_loops ' , \
' picker.piece_picker_rand_start_loops ' , \
' picker.piece_picker_rand_loops ' , \
' picker.piece_picker_busy_loops ' \
] , { ' type ' : stacked } ) ,
# ('picker_full_partials_distribution', 'full pieces', '', '', ['num full partial pieces'], {'type': histogram, 'binwidth': 5, 'numbins': 120}),
# ('picker_partials_distribution', 'partial pieces', '', '', ['num downloading partial pieces'], {'type': histogram, 'binwidth': 5, 'numbins': 120})
2011-03-20 09:58:16 +01:00
]
2007-09-29 18:14:03 +02:00
2011-03-20 17:14:56 +01:00
print ' generating graphs '
2016-01-11 08:10:42 +01:00
g = 0
2011-03-20 17:14:56 +01:00
generations = [ ]
2012-03-11 06:35:29 +01:00
scripts = [ ]
2016-01-11 08:10:42 +01:00
print ' [ %s ] %04d \r [ ' % ( ' ' * len ( reports ) , g ) ,
for i in reports :
try : options = i [ 5 ]
except : options = { }
if not ' type ' in options :
options [ ' type ' ] = line_graph
script = gen_report ( i [ 0 ] , i [ 1 ] , i [ 4 ] , i [ 2 ] , g , os . path . join ( output_dir , ' counters.dat ' ) , options )
if script != None : scripts . append ( script )
generations . append ( g )
g + = 1
# run gnuplot on all scripts, in parallel
thread_pool . map ( plot_fun , scripts )
scripts = [ ]
2014-07-06 21:18:00 +02:00
2012-03-11 06:35:29 +01:00
print ' \n generating html '
2011-03-20 17:14:56 +01:00
gen_html ( reports , generations )
2012-03-11 06:35:29 +01:00