forked from premiere/premiere-libtorrent
use proper prefixes in session stats graphs. Makes the results a lot more readable
This commit is contained in:
parent
29ce1a3704
commit
0f6b4b51e5
|
@ -14,7 +14,7 @@ keys = line.strip().split(':')[1:]
|
||||||
|
|
||||||
output_dir = 'session_stats_report'
|
output_dir = 'session_stats_report'
|
||||||
|
|
||||||
def gen_report(name, unit, lines, generation, log_file):
|
def gen_report(name, unit, lines, short_unit, generation, log_file):
|
||||||
try:
|
try:
|
||||||
os.mkdir(output_dir)
|
os.mkdir(output_dir)
|
||||||
except: pass
|
except: pass
|
||||||
|
@ -46,6 +46,7 @@ def gen_report(name, unit, lines, generation, log_file):
|
||||||
print >>out, "set tics nomirror"
|
print >>out, "set tics nomirror"
|
||||||
print >>out, "set style data lines"
|
print >>out, "set style data lines"
|
||||||
print >>out, "set key box"
|
print >>out, "set key box"
|
||||||
|
print >>out, "set format y \"%%.0s%%c%s\";" % short_unit
|
||||||
print >>out, 'plot',
|
print >>out, 'plot',
|
||||||
column = 2
|
column = 2
|
||||||
first = True
|
first = True
|
||||||
|
@ -93,7 +94,7 @@ def gen_html(reports, generations):
|
||||||
print >>file, '<html><head><style type="text/css">%s</style></head><body>' % css
|
print >>file, '<html><head><style type="text/css">%s</style></head><body>' % css
|
||||||
|
|
||||||
for i in reports:
|
for i in reports:
|
||||||
print >>file, '<div id="head"><h1>%s </h1><h2>%s</h2><div><div id="graphs">' % (i[0], i[2])
|
print >>file, '<div id="head"><h1>%s </h1><h2>%s</h2><div><div id="graphs">' % (i[0], i[3])
|
||||||
for g in generations:
|
for g in generations:
|
||||||
print >>file, '<a href="session_stats_%s_%04d.png"><img src="session_stats_%s_%04d_thumb.png"></a>' % (i[0], g, i[0], g)
|
print >>file, '<a href="session_stats_%s_%04d.png"><img src="session_stats_%s_%04d_thumb.png"></a>' % (i[0], g, i[0], g)
|
||||||
print >>file, '</div>'
|
print >>file, '</div>'
|
||||||
|
@ -102,32 +103,32 @@ def gen_html(reports, generations):
|
||||||
file.close()
|
file.close()
|
||||||
|
|
||||||
reports = [
|
reports = [
|
||||||
('torrents', 'num', 'number of torrents in different torrent states', ['downloading torrents', 'seeding torrents', 'checking torrents', 'stopped torrents', 'upload-only torrents', 'error torrents']),
|
('torrents', 'num', '', 'number of torrents in different torrent states', ['downloading torrents', 'seeding torrents', 'checking torrents', 'stopped torrents', 'upload-only torrents', 'error torrents']),
|
||||||
('peers', 'num', 'num connected peers', ['peers', 'connecting peers', 'connection attempts', 'banned peers', 'max connections']),
|
('peers', 'num', 'num connected peers', '', ['peers', 'connecting peers', 'connection attempts', 'banned peers', 'max connections']),
|
||||||
('connect_candidates', 'num', 'number of peers we know of that we can connect to', ['connect candidates']),
|
('connect_candidates', 'num', '', 'number of peers we know of that we can connect to', ['connect candidates']),
|
||||||
('peers_list_size', 'num', 'number of known peers (not necessarily connected)', ['num list peers']),
|
('peers_list_size', 'num', '', 'number of known peers (not necessarily connected)', ['num list peers']),
|
||||||
('overall_rates', 'Bytes / second', 'download and upload rates', ['uploaded bytes', 'downloaded bytes', 'upload rate', 'download rate', 'smooth upload rate', 'smooth download rate']),
|
('overall_rates', 'Bytes / second', 'B/s', 'download and upload rates', ['uploaded bytes', 'downloaded bytes', 'upload rate', 'download rate', 'smooth upload rate', 'smooth download rate']),
|
||||||
('disk_write_queue', 'Bytes', 'bytes queued up by peers, to be written to disk', ['disk write queued bytes', 'disk queue limit', 'disk queue low watermark']),
|
('disk_write_queue', 'Bytes', 'B', 'bytes queued up by peers, to be written to disk', ['disk write queued bytes', 'disk queue limit', 'disk queue low watermark']),
|
||||||
('peers_upload', 'num', 'number of peers by state wrt. uploading', ['peers up interested', 'peers up unchoked', 'peers up requests', 'peers disk-up', 'peers bw-up', 'max unchoked']),
|
('peers_upload', 'num', '', 'number of peers by state wrt. uploading', ['peers up interested', 'peers up unchoked', 'peers up requests', 'peers disk-up', 'peers bw-up', 'max unchoked']),
|
||||||
('peers_download', 'num', 'number of peers by state wrt. downloading', ['peers down interesting', 'peers down unchoked', 'peers down requests', 'peers disk-down', 'peers bw-down']),
|
('peers_download', 'num', '', 'number of peers by state wrt. downloading', ['peers down interesting', 'peers down unchoked', 'peers down requests', 'peers disk-down', 'peers bw-down']),
|
||||||
('peer_errors', 'num', 'number of peers by error that disconnected them', ['error peers', 'peer disconnects', 'peers eof', 'peers connection reset', 'connect timeouts', 'uninteresting peers disconnect', 'banned for hash failure']),
|
('peer_errors', 'num', '', 'number of peers by error that disconnected them', ['error peers', 'peer disconnects', 'peers eof', 'peers connection reset', 'connect timeouts', 'uninteresting peers disconnect', 'banned for hash failure']),
|
||||||
('waste', '% of all downloaded bytes', 'proportion of all downloaded bytes that were wasted', ['% failed payload bytes', '% wasted payload bytes', '% protocol bytes']),
|
('waste', '% of all downloaded bytes', '%%', 'proportion of all downloaded bytes that were wasted', ['% failed payload bytes', '% wasted payload bytes', '% protocol bytes']),
|
||||||
('average_disk_time_absolute', 'microseconds', 'running averages of timings of disk operations', ['disk read time', 'disk write time', 'disk queue time', 'disk hash time', 'disk job time', 'disk sort time']),
|
('average_disk_time_absolute', 'microseconds', 'us', 'running averages of timings of disk operations', ['disk read time', 'disk write time', 'disk queue time', 'disk hash time', 'disk job time', 'disk sort time']),
|
||||||
('disk_time', '% of total disk job time', 'proportion of time spent by the disk thread', ['% read time', '% write time', '% hash time', '% sort time']),
|
('disk_time', '% of total disk job time', '%%', 'proportion of time spent by the disk thread', ['% read time', '% write time', '% hash time', '% sort time']),
|
||||||
('disk_cache_hits', 'blocks (16kiB)', '', ['disk block read', 'read cache hits', 'disk block written', 'disk read back']),
|
('disk_cache_hits', 'blocks (16kiB)', '', '', ['disk block read', 'read cache hits', 'disk block written', 'disk read back']),
|
||||||
('disk_cache', 'blocks (16kiB)', 'disk cache size and usage', ['read disk cache size', 'disk cache size', 'disk buffer allocations', 'cache size']),
|
('disk_cache', 'blocks (16kiB)', '', 'disk cache size and usage', ['read disk cache size', 'disk cache size', 'disk buffer allocations', 'cache size']),
|
||||||
('disk_readback', '% of written blocks', 'portion of written blocks that had to be read back for hash verification', ['% read back']),
|
('disk_readback', '% of written blocks', '%', 'portion of written blocks that had to be read back for hash verification', ['% read back']),
|
||||||
('disk_queue', 'number of queued disk jobs', 'queued disk jobs', ['disk queue size', 'disk read queue size', 'read job queue size limit']),
|
('disk_queue', 'number of queued disk jobs', '', 'queued disk jobs', ['disk queue size', 'disk read queue size', 'read job queue size limit']),
|
||||||
# ('absolute_waste', 'num', '', ['failed bytes', 'redundant bytes', 'download rate']),
|
# ('absolute_waste', 'num', '', ['failed bytes', 'redundant bytes', 'download rate']),
|
||||||
|
|
||||||
#somewhat uninteresting stats
|
#somewhat uninteresting stats
|
||||||
('tick_rate', 'milliseconds between ticks', '', ['tick interval', 'tick residual']),
|
('tick_rate', 'milliseconds between ticks', 'ms', '', ['tick interval', 'tick residual']),
|
||||||
('peer_dl_rates', 'num', 'peers split into download rate buckets', ['peers down 0', 'peers down 0-2', 'peers down 2-5', 'peers down 5-10', 'peers down 50-100', 'peers down 100-']),
|
('peer_dl_rates', 'num', '', 'peers split into download rate buckets', ['peers down 0', 'peers down 0-2', 'peers down 2-5', 'peers down 5-10', 'peers down 50-100', 'peers down 100-']),
|
||||||
('peer_dl_rates2', 'num', 'peers split into download rate buckets (only downloading peers)', ['peers down 0-2', 'peers down 2-5', 'peers down 5-10', 'peers down 50-100', 'peers down 100-']),
|
('peer_dl_rates2', 'num', '', 'peers split into download rate buckets (only downloading peers)', ['peers down 0-2', 'peers down 2-5', 'peers down 5-10', 'peers down 50-100', 'peers down 100-']),
|
||||||
('peer_ul_rates', 'num', 'peers split into upload rate buckets', ['peers up 0', 'peers up 0-2', 'peers up 2-5', 'peers up 5-10', 'peers up 50-100', 'peers up 100-']),
|
('peer_ul_rates', 'num', '', 'peers split into upload rate buckets', ['peers up 0', 'peers up 0-2', 'peers up 2-5', 'peers up 5-10', 'peers up 50-100', 'peers up 100-']),
|
||||||
('peer_ul_rates2', 'num', 'peers split into upload rate buckets (only uploading peers)', ['peers up 0-2', 'peers up 2-5', 'peers up 5-10', 'peers up 50-100', 'peers up 100-']),
|
('peer_ul_rates2', 'num', '', 'peers split into upload rate buckets (only uploading peers)', ['peers up 0-2', 'peers up 2-5', 'peers up 5-10', 'peers up 50-100', 'peers up 100-']),
|
||||||
('piece_picker_end_game', 'blocks', '', ['end game piece picker blocks', 'piece picker blocks', 'piece picks', 'reject piece picks', 'unchoke piece picks', 'incoming redundant piece picks', 'incoming piece picks', 'end game piece picks', 'snubbed piece picks']),
|
('piece_picker_end_game', 'blocks', '', '', ['end game piece picker blocks', 'piece picker blocks', 'piece picks', 'reject piece picks', 'unchoke piece picks', 'incoming redundant piece picks', 'incoming piece picks', 'end game piece picks', 'snubbed piece picks']),
|
||||||
('piece_picker', 'blocks', '', ['piece picks', 'reject piece picks', 'unchoke piece picks', 'incoming redundant piece picks', 'incoming piece picks', 'end game piece picks', 'snubbed piece picks']),
|
('piece_picker', 'blocks', '', '', ['piece picks', 'reject piece picks', 'unchoke piece picks', 'incoming redundant piece picks', 'incoming piece picks', 'end game piece picks', 'snubbed piece picks']),
|
||||||
]
|
]
|
||||||
|
|
||||||
print 'generating graphs'
|
print 'generating graphs'
|
||||||
|
@ -138,7 +139,7 @@ g = int(log_file_list[1])
|
||||||
generations = []
|
generations = []
|
||||||
while os.path.exists(os.path.join(log_file_path, log_file)):
|
while os.path.exists(os.path.join(log_file_path, log_file)):
|
||||||
print '[%s] %04d\r[' % (' ' * len(reports), g),
|
print '[%s] %04d\r[' % (' ' * len(reports), g),
|
||||||
for i in reports: gen_report(i[0], i[1], i[3], g, os.path.join(log_file_path, log_file))
|
for i in reports: gen_report(i[0], i[1], i[4], i[2], g, os.path.join(log_file_path, log_file))
|
||||||
print ''
|
print ''
|
||||||
generations.append(g)
|
generations.append(g)
|
||||||
g += 1
|
g += 1
|
||||||
|
|
Loading…
Reference in New Issue