2015-07-11 07:51:30 +02:00
|
|
|
#!/usr/bin/env python
|
|
|
|
|
2011-05-02 01:43:59 +02:00
|
|
|
import sys
|
|
|
|
import os
|
|
|
|
import resource
|
|
|
|
import shutil
|
|
|
|
import shlex
|
|
|
|
import time
|
|
|
|
import subprocess
|
|
|
|
import random
|
2014-07-06 21:18:00 +02:00
|
|
|
import signal
|
|
|
|
import hashlib
|
2011-05-02 01:43:59 +02:00
|
|
|
|
|
|
|
# this is a disk I/O benchmark script. It runs menchmarks
|
|
|
|
# over different filesystems, different cache sizes and
|
|
|
|
# different number of peers (can be used to find a reasonable
|
|
|
|
# range for unchoke slots).
|
|
|
|
|
|
|
|
# it also measures performance improvements of re-ordering
|
|
|
|
# read requests based on physical location and OS hints
|
|
|
|
# like posix_fadvice(FADV_WILLNEED). It can also be used
|
|
|
|
# for the AIO branch to measure improvements over the
|
|
|
|
# classic thread based disk I/O
|
|
|
|
|
2011-05-16 08:07:44 +02:00
|
|
|
# to set up the test, build the example directory in release
|
2011-05-02 01:43:59 +02:00
|
|
|
# with statistics=on and copy fragmentation_test, client_test
|
2011-07-04 18:57:47 +02:00
|
|
|
# and connection_tester to a directory called 'stage_aio'
|
|
|
|
# and 'stage_syncio' (or make a symbolic link to the bjam
|
|
|
|
# output directory).
|
2011-05-02 01:43:59 +02:00
|
|
|
# make sure gnuplot is installed.
|
|
|
|
|
|
|
|
# the following lists define the space tests will be run in
|
|
|
|
|
|
|
|
# variables to test. All these are run on the first
|
|
|
|
# entry in the filesystem list.
|
2014-07-06 21:18:00 +02:00
|
|
|
cache_sizes = [0, 32768, 400000]
|
|
|
|
peers = [200, 500, 1000]
|
|
|
|
builds = ['rtorrent', 'utorrent', 'aio', 'syncio']
|
2011-05-02 01:43:59 +02:00
|
|
|
|
|
|
|
# the drives are assumed to be mounted under ./<name>
|
|
|
|
# or have symbolic links to them.
|
2014-07-06 21:18:00 +02:00
|
|
|
filesystem = ['xfs', 'ext4', 'ext3', 'reiser']
|
|
|
|
default_fs = filesystem[0]
|
2011-05-02 01:43:59 +02:00
|
|
|
|
|
|
|
# the number of peers for the filesystem test. The
|
|
|
|
# idea is to stress test the filesystem by using a lot
|
|
|
|
# of peers, since each peer essentially is a separate
|
|
|
|
# read location on the platter
|
2014-07-06 21:18:00 +02:00
|
|
|
default_peers = peers[1]
|
2011-05-02 01:43:59 +02:00
|
|
|
|
|
|
|
# the amount of cache for the filesystem test
|
2014-07-06 21:18:00 +02:00
|
|
|
# 5.5 GiB of cache
|
|
|
|
default_cache = cache_sizes[-1]
|
2011-05-02 01:43:59 +02:00
|
|
|
|
|
|
|
# the number of seconds to run each test. It's important that
|
|
|
|
# this is shorter than what it takes to finish downloading
|
|
|
|
# the test torrent, since then the average rate will not
|
|
|
|
# be representative of the peak anymore
|
2011-07-04 18:57:47 +02:00
|
|
|
# this has to be long enough to download a full copy
|
2014-07-06 21:18:00 +02:00
|
|
|
# of the test torrent. It's also important for the
|
|
|
|
# test to be long enough that the warming up of the
|
|
|
|
# disk cache is not a significant part of the test,
|
|
|
|
# since download rates will be extremely high while downloading
|
|
|
|
# into RAM
|
|
|
|
test_duration = 200 # 700
|
2011-05-02 01:43:59 +02:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# make sure the environment is properly set up
|
2014-07-06 21:18:00 +02:00
|
|
|
try:
|
|
|
|
if os.name == 'posix':
|
|
|
|
resource.setrlimit(resource.RLIMIT_NOFILE, (4000, 5000))
|
|
|
|
except:
|
|
|
|
if resource.getrlimit(resource.RLIMIT_NOFILE)[0] < 4000:
|
|
|
|
print 'please set ulimit -n to at least 4000'
|
|
|
|
sys.exit(1)
|
2011-05-02 01:43:59 +02:00
|
|
|
|
2011-07-04 18:57:47 +02:00
|
|
|
def build_stage_dirs():
|
|
|
|
ret = []
|
2014-07-06 21:18:00 +02:00
|
|
|
for i in builds[2:3]:
|
2011-07-04 18:57:47 +02:00
|
|
|
ret.append('stage_%s' % i)
|
|
|
|
return ret
|
|
|
|
|
2011-05-02 01:43:59 +02:00
|
|
|
# make sure we have all the binaries available
|
2012-02-19 10:37:35 +01:00
|
|
|
binaries = ['client_test', 'connection_tester', 'fragmentation_test']
|
2011-07-04 18:57:47 +02:00
|
|
|
for b in build_stage_dirs():
|
|
|
|
for i in binaries:
|
|
|
|
p = os.path.join(b, i)
|
|
|
|
if not os.path.exists(p):
|
|
|
|
print 'make sure "%s" is available in ./%s' % (i, b)
|
|
|
|
sys.exit(1)
|
2011-05-02 01:43:59 +02:00
|
|
|
|
|
|
|
for i in filesystem:
|
|
|
|
if not os.path.exists(i):
|
|
|
|
print ('the path "%s" does not exist. This is directory/mountpoint is ' +
|
|
|
|
'used as the download directory and is the filesystem that will be benchmarked ' +
|
|
|
|
'and need to exist.') % i
|
|
|
|
sys.exit(1)
|
|
|
|
|
|
|
|
# make sure we have a test torrent
|
|
|
|
if not os.path.exists('test.torrent'):
|
|
|
|
print 'generating test torrent'
|
2014-07-06 21:18:00 +02:00
|
|
|
# generate a 100 GB torrent, to make sure it won't all fit in physical RAM
|
|
|
|
os.system('./stage_aio/connection_tester gen-torrent 10000 test.torrent')
|
|
|
|
|
|
|
|
if not os.path.exists('test2.torrent'):
|
|
|
|
print 'generating test torrent 2'
|
|
|
|
# generate a 6 GB torrent, to make sure it will fit in physical RAM
|
|
|
|
os.system('./stage_aio/connection_tester gen-torrent 6000 test2.torrent')
|
2011-05-02 01:43:59 +02:00
|
|
|
|
|
|
|
# use a new port for each test to make sure they keep working
|
|
|
|
# this port is incremented for each test run
|
2014-07-06 21:18:00 +02:00
|
|
|
port = 10000 + random.randint(0, 40000)
|
|
|
|
|
|
|
|
def clear_caches():
|
|
|
|
if 'linux' in sys.platform:
|
|
|
|
os.system('sync')
|
|
|
|
open('/proc/sys/vm/drop_caches', 'w').write('3')
|
|
|
|
elif 'darwin' in sys.platform:
|
|
|
|
os.system('purge')
|
2011-05-02 01:43:59 +02:00
|
|
|
|
|
|
|
def build_commandline(config, port):
|
2014-07-06 21:18:00 +02:00
|
|
|
|
2011-05-02 01:43:59 +02:00
|
|
|
num_peers = config['num-peers']
|
2014-07-06 21:18:00 +02:00
|
|
|
torrent_path = config['torrent']
|
|
|
|
|
|
|
|
if config['build'] == 'utorrent':
|
|
|
|
try: os.mkdir('utorrent_session')
|
|
|
|
except: pass
|
|
|
|
cfg = open('utorrent_session/settings.dat', 'w+')
|
|
|
|
|
|
|
|
cfg.write('d')
|
|
|
|
cfg.write('20:ul_slots_per_torrenti%de' % num_peers)
|
|
|
|
cfg.write('17:conns_per_torrenti%de' % num_peers)
|
|
|
|
cfg.write('14:conns_globallyi%de' % num_peers)
|
|
|
|
cfg.write('9:bind_porti%de' % port)
|
|
|
|
cfg.write('19:dir_active_download%d:%s' % (len(config['save-path']), config['save-path']))
|
|
|
|
cfg.write('19:diskio.sparse_filesi1e')
|
|
|
|
cfg.write('14:cache.overridei1e')
|
|
|
|
cfg.write('19:cache.override_sizei%de' % int(config['cache-size'] * 16 / 1024))
|
|
|
|
cfg.write('17:dir_autoload_flagi1e')
|
|
|
|
cfg.write('12:dir_autoload8:autoload')
|
|
|
|
cfg.write('11:logger_maski4294967295e')
|
|
|
|
cfg.write('1:vi0e')
|
|
|
|
cfg.write('12:webui.enablei1e')
|
|
|
|
cfg.write('19:webui.enable_listeni1e')
|
|
|
|
cfg.write('14:webui.hashword20:' + hashlib.sha1('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaadmin').digest())
|
|
|
|
cfg.write('10:webui.porti8080e')
|
|
|
|
cfg.write('10:webui.salt32:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa')
|
|
|
|
cfg.write('14:webui.username5:admin')
|
|
|
|
cfg.write('e')
|
|
|
|
cfg.close()
|
|
|
|
try: os.mkdir('utorrent_session/autoload')
|
|
|
|
except: pass
|
|
|
|
try: shutil.copy(torrent_path, 'utorrent_session/autoload/')
|
|
|
|
except: pass
|
|
|
|
return './utorrent-server-v3_0/utserver -logfile session_stats/alerts_log.txt -settingspath utorrent_session'
|
|
|
|
|
|
|
|
if config['build'] == 'rtorrent':
|
|
|
|
if os.path.exists('rtorrent_session'):
|
|
|
|
add_command = ''
|
|
|
|
else:
|
|
|
|
try: os.mkdir('rtorrent_session')
|
|
|
|
except: pass
|
|
|
|
# it seems rtorrent may delete the original torrent when it's being added
|
|
|
|
try: shutil.copy(torrent_path, 'rtorrent_session/')
|
|
|
|
except: pass
|
|
|
|
add_command = '-O load_start_verbose=rtorrent_session/%s ' % torrent_path
|
|
|
|
|
|
|
|
return 'rtorrent -d %s -n -p %d-%d -O max_peers=%d -O max_uploads=%d %s -s rtorrent_session -O max_memory_usage=128000000000' \
|
|
|
|
% (config['save-path'], port, port, num_peers, num_peers, add_command)
|
|
|
|
|
|
|
|
disable_disk = ''
|
|
|
|
if config['disable-disk']: disable_disk = '-0'
|
|
|
|
return './stage_%s/client_test -k -N -H -M -B %d -l %d -S %d -T %d -c %d -C %d -s "%s" -p %d -E %d %s -f session_stats/alerts_log.txt %s' \
|
|
|
|
% (config['build'], test_duration, num_peers, num_peers, num_peers, num_peers, config['cache-size'], config['save-path'], port, \
|
|
|
|
config['hash-threads'], disable_disk, torrent_path)
|
2011-05-02 01:43:59 +02:00
|
|
|
|
|
|
|
def delete_files(files):
|
|
|
|
for i in files:
|
|
|
|
try: os.remove(i)
|
|
|
|
except:
|
|
|
|
try: shutil.rmtree(i)
|
2011-07-04 18:57:47 +02:00
|
|
|
except:
|
|
|
|
try:
|
2014-07-06 21:18:00 +02:00
|
|
|
if os.path.exists(i): print 'failed to delete %s' % i
|
2011-07-04 18:57:47 +02:00
|
|
|
except: pass
|
2011-05-02 01:43:59 +02:00
|
|
|
|
2014-07-06 21:18:00 +02:00
|
|
|
# typically the schedulers available are 'noop', 'deadline' and 'cfq'
|
|
|
|
def build_test_config(fs=default_fs, num_peers=default_peers, cache_size=default_cache, \
|
|
|
|
test='upload', build='aio', profile='', hash_threads=1, torrent='test.torrent', \
|
|
|
|
disable_disk = False):
|
|
|
|
config = {'test': test, 'save-path': os.path.join('./', fs), 'num-peers': num_peers, \
|
|
|
|
'cache-size': cache_size, 'build': build, 'profile':profile, \
|
|
|
|
'hash-threads': hash_threads, 'torrent': torrent, 'disable-disk': disable_disk }
|
2011-05-02 01:43:59 +02:00
|
|
|
return config
|
|
|
|
|
2014-07-06 21:18:00 +02:00
|
|
|
def prefix_len(text, prefix):
|
|
|
|
for i in xrange(1, len(prefix)):
|
|
|
|
if (not text.startswith(prefix[0:i])): return i-1
|
|
|
|
return len(prefix)
|
|
|
|
|
|
|
|
def device_name(path):
|
|
|
|
mount = subprocess.Popen('mount', stdout=subprocess.PIPE)
|
2015-07-11 07:51:30 +02:00
|
|
|
|
2014-07-06 21:18:00 +02:00
|
|
|
max_match_len = 0
|
|
|
|
match_device = ''
|
|
|
|
path = os.path.abspath(path)
|
|
|
|
|
|
|
|
for mp in mount.stdout.readlines():
|
|
|
|
c = mp.split(' ')
|
|
|
|
device = c[0]
|
|
|
|
mountpoint = c[2]
|
|
|
|
prefix = prefix_len(path, mountpoint)
|
|
|
|
if prefix > max_match_len:
|
|
|
|
max_match_len = prefix
|
|
|
|
match_device = device
|
|
|
|
|
|
|
|
device = match_device
|
|
|
|
device = device.split('/')[-1][0:3]
|
|
|
|
print 'device for path: %s -> %s' % (path, device)
|
|
|
|
return device
|
|
|
|
|
2011-05-02 01:43:59 +02:00
|
|
|
def build_target_folder(config):
|
2011-07-04 18:57:47 +02:00
|
|
|
test = 'seed'
|
|
|
|
if config['test'] == 'upload': test = 'download'
|
2014-07-06 21:18:00 +02:00
|
|
|
elif config['test'] == 'dual': test = 'dual'
|
|
|
|
|
|
|
|
if 'linux' in sys.platform:
|
|
|
|
io_scheduler = open('/sys/block/%s/queue/scheduler' % device_name(config['save-path'])).read().split('[')[1].split(']')[0]
|
|
|
|
else:
|
|
|
|
io_scheduler = sys.platform
|
|
|
|
|
|
|
|
no_disk = ''
|
|
|
|
if config['disable-disk']: no_disk = '_no-disk'
|
|
|
|
|
|
|
|
return 'results_%s_%s_%d_%d_%s_%s_h%d%s' % (config['build'], test, config['num-peers'], \
|
|
|
|
config['cache-size'], os.path.split(config['save-path'])[1], io_scheduler, \
|
|
|
|
config['hash-threads'], no_disk)
|
|
|
|
|
|
|
|
def find_library(name):
|
|
|
|
paths = ['/usr/lib64/', '/usr/local/lib64/', '/usr/lib/', '/usr/local/lib/']
|
|
|
|
|
|
|
|
for p in paths:
|
|
|
|
try:
|
|
|
|
if os.path.exists(p + name): return p + name
|
|
|
|
except: pass
|
|
|
|
return name
|
|
|
|
|
|
|
|
def find_binary(names):
|
|
|
|
paths = ['/usr/bin/', '/usr/local/bin/']
|
|
|
|
for n in names:
|
|
|
|
for p in paths:
|
|
|
|
try:
|
|
|
|
if os.path.exists(p + n): return p + n
|
|
|
|
except: pass
|
|
|
|
return names[0]
|
2011-05-02 01:43:59 +02:00
|
|
|
|
|
|
|
def run_test(config):
|
|
|
|
|
2011-07-04 18:57:47 +02:00
|
|
|
target_folder = build_target_folder(config)
|
|
|
|
if os.path.exists(target_folder):
|
|
|
|
print 'results already exists, skipping test (%s)' % target_folder
|
2011-05-02 01:43:59 +02:00
|
|
|
return
|
|
|
|
|
2014-07-06 21:18:00 +02:00
|
|
|
print '\n\n*********************************'
|
|
|
|
print '* RUNNING TEST *'
|
|
|
|
print '*********************************\n\n'
|
|
|
|
print '%s %s' % (config['build'], config['test'])
|
|
|
|
|
2011-05-02 01:43:59 +02:00
|
|
|
# make sure any previous test file is removed
|
2011-07-04 18:57:47 +02:00
|
|
|
# don't clean up unless we're running a download-test, so that we leave the test file
|
|
|
|
# complete for a seed test.
|
2014-07-06 21:18:00 +02:00
|
|
|
delete_files(['utorrent_session/settings.dat', 'utorrent_session/settings.dat.old', 'asserts.log'])
|
|
|
|
if config['test'] == 'upload' or config['test'] == 'dual':
|
2011-07-04 18:57:47 +02:00
|
|
|
print 'deleting files'
|
2014-07-06 21:18:00 +02:00
|
|
|
delete_files([os.path.join(config['save-path'], 'stress_test_file'), '.ses_state', os.path.join(config['save-path'], '.resume'), 'utorrent_session', '.dht_state', 'session_stats', 'rtorrent_session'])
|
2011-05-02 01:43:59 +02:00
|
|
|
|
|
|
|
try: os.mkdir('session_stats')
|
|
|
|
except: pass
|
|
|
|
|
|
|
|
# save off the command line for reference
|
|
|
|
global port
|
|
|
|
cmdline = build_commandline(config, port)
|
2014-07-06 21:18:00 +02:00
|
|
|
binary = cmdline.split(' ')[0]
|
|
|
|
environment = None
|
|
|
|
if config['profile'] == 'tcmalloc': environment = {'LD_PRELOAD':find_library('libprofiler.so.0'), 'CPUPROFILE': 'session_stats/cpu_profile.prof'}
|
|
|
|
if config['profile'] == 'memory': environment = {'LD_PRELOAD':find_library('libprofiler.so.0'), 'HEAPPROFILE': 'session_stats/heap_profile.prof'}
|
|
|
|
if config['profile'] == 'perf': cmdline = 'perf timechart record --call-graph --output=session_stats/perf_profile.prof ' + cmdline
|
2011-05-02 01:43:59 +02:00
|
|
|
f = open('session_stats/cmdline.txt', 'w+')
|
|
|
|
f.write(cmdline)
|
|
|
|
f.close()
|
|
|
|
|
|
|
|
f = open('session_stats/config.txt', 'w+')
|
|
|
|
print >>f, config
|
|
|
|
f.close()
|
|
|
|
|
2014-07-06 21:18:00 +02:00
|
|
|
print 'clearing disk cache'
|
|
|
|
clear_caches()
|
|
|
|
print 'OK'
|
2011-05-16 08:07:44 +02:00
|
|
|
client_output = open('session_stats/client.output', 'w+')
|
2014-07-06 21:18:00 +02:00
|
|
|
client_error = open('session_stats/client.error', 'w+')
|
2011-05-16 08:07:44 +02:00
|
|
|
print 'launching: %s' % cmdline
|
2014-07-06 21:18:00 +02:00
|
|
|
client = subprocess.Popen(shlex.split(cmdline), stdout=client_output, stdin=subprocess.PIPE, stderr=client_error, env=environment)
|
|
|
|
print 'OK'
|
2011-05-16 08:07:44 +02:00
|
|
|
# enable disk stats printing
|
2014-07-06 21:18:00 +02:00
|
|
|
if config['build'] != 'rtorrent' and config['build'] != 'utorrent':
|
|
|
|
print >>client.stdin, 'x',
|
|
|
|
time.sleep(4)
|
|
|
|
cmdline = './stage_aio/connection_tester %s %d 127.0.0.1 %d %s' % (config['test'], config['num-peers'], port, config['torrent'])
|
2011-05-16 08:07:44 +02:00
|
|
|
print 'launching: %s' % cmdline
|
|
|
|
tester_output = open('session_stats/tester.output', 'w+')
|
|
|
|
tester = subprocess.Popen(shlex.split(cmdline), stdout=tester_output)
|
2014-07-06 21:18:00 +02:00
|
|
|
print 'OK'
|
2015-07-11 07:51:30 +02:00
|
|
|
|
2014-07-06 21:18:00 +02:00
|
|
|
time.sleep(2)
|
|
|
|
|
|
|
|
print '\n'
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
time.sleep(1)
|
|
|
|
tester.poll()
|
|
|
|
if tester.returncode != None:
|
|
|
|
print 'tester terminated'
|
|
|
|
break
|
|
|
|
client.poll()
|
|
|
|
if client.returncode != None:
|
|
|
|
print 'client terminated'
|
|
|
|
break
|
|
|
|
print '\r%d / %d' % (i, test_duration),
|
|
|
|
sys.stdout.flush()
|
|
|
|
i += 1
|
|
|
|
if config['test'] != 'upload' and config['test'] != 'dual' and i >= test_duration: break
|
|
|
|
print '\n'
|
|
|
|
|
|
|
|
if client.returncode == None:
|
|
|
|
try:
|
|
|
|
print 'killing client'
|
|
|
|
client.send_signal(signal.SIGINT)
|
|
|
|
except:
|
|
|
|
pass
|
|
|
|
|
|
|
|
time.sleep(10)
|
2011-05-16 08:07:44 +02:00
|
|
|
client.wait()
|
2014-07-06 21:18:00 +02:00
|
|
|
tester.wait()
|
2011-05-16 08:07:44 +02:00
|
|
|
tester_output.close()
|
|
|
|
client_output.close()
|
2014-07-06 21:18:00 +02:00
|
|
|
terminate = False
|
|
|
|
if tester.returncode != 0:
|
|
|
|
print 'tester returned %d' % tester.returncode
|
|
|
|
terminate = True
|
|
|
|
if client.returncode != 0:
|
|
|
|
print 'client returned %d' % client.returncode
|
|
|
|
terminate = True
|
|
|
|
|
|
|
|
try: shutil.copy('asserts.log', 'session_stats/')
|
|
|
|
except: pass
|
|
|
|
|
|
|
|
try: shutil.move('libtorrent_logs0', 'session_stats/')
|
|
|
|
except: pass
|
|
|
|
try: shutil.move('libtorrent_logs%s' % port, 'session_stats/')
|
|
|
|
except: pass
|
2011-05-02 01:43:59 +02:00
|
|
|
|
|
|
|
# run fragmentation test
|
2011-05-05 06:02:54 +02:00
|
|
|
print 'analyzing fragmentation'
|
2014-07-06 21:18:00 +02:00
|
|
|
os.system('./stage_aio/fragmentation_test test.torrent %s' % (config['save-path']))
|
|
|
|
try: shutil.copy('fragmentation.log', 'session_stats/')
|
|
|
|
except: pass
|
|
|
|
|
2011-05-05 06:02:54 +02:00
|
|
|
shutil.copy('fragmentation.gnuplot', 'session_stats/')
|
2014-07-06 21:18:00 +02:00
|
|
|
try: shutil.copy('file_access.log', 'session_stats/')
|
|
|
|
except: pass
|
|
|
|
|
|
|
|
os.system('filefrag %s >session_stats/filefrag.out' % config['save-path'])
|
|
|
|
os.system('filefrag -v %s >session_stats/filefrag_verbose.out' % config['save-path'])
|
2011-05-02 01:43:59 +02:00
|
|
|
|
|
|
|
os.chdir('session_stats')
|
|
|
|
|
|
|
|
# parse session stats
|
|
|
|
print 'parsing session log'
|
|
|
|
os.system('python ../../parse_session_stats.py *.0000.log')
|
2014-07-06 21:18:00 +02:00
|
|
|
os.system('../stage_aio/parse_access_log file_access.log %s' % (os.path.join('..', config['save-path'], 'stress_test_file')))
|
2011-05-02 01:43:59 +02:00
|
|
|
|
|
|
|
os.chdir('..')
|
|
|
|
|
2014-07-06 21:18:00 +02:00
|
|
|
if config['profile'] == 'tcmalloc':
|
|
|
|
print 'analyzing CPU profile [%s]' % binary
|
|
|
|
os.system('%s --pdf %s session_stats/cpu_profile.prof >session_stats/cpu_profile.pdf' % (find_binary(['google-pprof', 'pprof']), binary))
|
|
|
|
if config['profile'] == 'memory':
|
|
|
|
for i in xrange(1, 300):
|
|
|
|
profile = 'session_stats/heap_profile.prof.%04d.heap' % i
|
|
|
|
try: os.stat(profile)
|
|
|
|
except: break
|
|
|
|
print 'analyzing heap profile [%s] %d' % (binary, i)
|
|
|
|
os.system('%s --pdf %s %s >session_stats/heap_profile_%d.pdf' % (find_binary(['google-pprof', 'pprof']), binary, profile, i))
|
|
|
|
if config['profile'] == 'perf':
|
|
|
|
print 'analyzing CPU profile [%s]' % binary
|
|
|
|
os.system('perf timechart --input=session_stats/perf_profile.prof --output=session_stats/profile_timechart.svg')
|
|
|
|
os.system('perf report --input=session_stats/perf_profile.prof --threads --show-nr-samples --vmlinux vmlinuz-2.6.38-8-generic.bzip >session_stats/profile.txt')
|
|
|
|
|
2011-05-02 01:43:59 +02:00
|
|
|
# move the results into its final place
|
|
|
|
print 'saving results'
|
|
|
|
os.rename('session_stats', build_target_folder(config))
|
|
|
|
|
|
|
|
port += 1
|
|
|
|
|
2014-07-06 21:18:00 +02:00
|
|
|
if terminate: sys.exit(1)
|
2011-07-04 18:57:47 +02:00
|
|
|
|
2014-07-06 21:18:00 +02:00
|
|
|
for h in range(0, 7):
|
|
|
|
config = build_test_config(num_peers=30, build='aio', test='upload', torrent='test.torrent', hash_threads=h, disable_disk=True)
|
|
|
|
run_test(config)
|
|
|
|
sys.exit(0)
|
|
|
|
|
|
|
|
for b in ['aio', 'syncio']:
|
|
|
|
for test in ['dual', 'upload', 'download']:
|
|
|
|
config = build_test_config(build=b, test=test)
|
|
|
|
run_test(config)
|
|
|
|
sys.exit(0)
|
|
|
|
|
|
|
|
for b in builds:
|
|
|
|
for test in ['upload', 'download']:
|
|
|
|
config = build_test_config(build=b, test=test)
|
|
|
|
run_test(config)
|
|
|
|
|
|
|
|
for p in peers:
|
|
|
|
for test in ['upload', 'download']:
|
|
|
|
config = build_test_config(num_peers=p, test=test)
|
|
|
|
run_test(config)
|
2011-05-02 01:43:59 +02:00
|
|
|
|
|
|
|
for c in cache_sizes:
|
2014-07-06 21:18:00 +02:00
|
|
|
for test in ['upload', 'download']:
|
|
|
|
config = build_test_config(cache_size=c, test=test)
|
|
|
|
run_test(config)
|
|
|
|
|
|
|
|
for fs in filesystem:
|
|
|
|
for test in ['upload', 'download']:
|
|
|
|
config = build_test_config(fs=fs, test=test)
|
|
|
|
run_test(config)
|
2011-05-02 01:43:59 +02:00
|
|
|
|