update run_benchmark.py

This commit is contained in:
arvidn 2018-11-24 16:21:29 +01:00 committed by Arvid Norberg
parent a58307733e
commit 7a615695bc
1 changed files with 149 additions and 237 deletions

View File

@ -1,4 +1,5 @@
#!/usr/bin/env python #!/usr/bin/env python
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
from __future__ import print_function from __future__ import print_function
import sys import sys
@ -12,36 +13,22 @@ import random
import signal import signal
import hashlib import hashlib
# this is a disk I/O benchmark script. It runs menchmarks # this is a disk I/O benchmark script. It runs benchmarks
# over different filesystems, different cache sizes and # over different number of peers.
# different number of peers (can be used to find a reasonable
# range for unchoke slots).
# it also measures performance improvements of re-ordering
# read requests based on physical location and OS hints
# like posix_fadvice(FADV_WILLNEED). It can also be used
# for the AIO branch to measure improvements over the
# classic thread based disk I/O
# to set up the test, build the example directory in release # to set up the test, build the example directory in release
# with statistics=on and copy fragmentation_test, client_test # and stage client_test and connection_tester to the examples directory:
# and connection_tester to a directory called 'stage_aio' #
# and 'stage_syncio' (or make a symbolic link to the bjam # bjam -j8 link=static release debug-symbols=on stage_client_test \
# output directory). # stage_connection_tester
#
# make sure gnuplot is installed. # make sure gnuplot is installed.
# the following lists define the space tests will be run in # the following lists define the space tests will be run in
# variables to test. All these are run on the first peers = [50, 200, 500, 1000]
# entry in the filesystem list. # builds = ['rtorrent', 'utorrent', 'libtorrent']
cache_sizes = [0, 32768, 400000] builds = ['libtorrent']
peers = [200, 500, 1000]
builds = ['rtorrent', 'utorrent', 'aio', 'syncio']
# the drives are assumed to be mounted under ./<name>
# or have symbolic links to them.
filesystem = ['xfs', 'ext4', 'ext3', 'reiser']
default_fs = filesystem[0]
# the number of peers for the filesystem test. The # the number of peers for the filesystem test. The
# idea is to stress test the filesystem by using a lot # idea is to stress test the filesystem by using a lot
@ -51,7 +38,7 @@ default_peers = peers[1]
# the amount of cache for the filesystem test # the amount of cache for the filesystem test
# 5.5 GiB of cache # 5.5 GiB of cache
default_cache = cache_sizes[-1] default_cache = 400000
# the number of seconds to run each test. It's important that # the number of seconds to run each test. It's important that
# this is shorter than what it takes to finish downloading # this is shorter than what it takes to finish downloading
@ -63,8 +50,9 @@ default_cache = cache_sizes[-1]
# disk cache is not a significant part of the test, # disk cache is not a significant part of the test,
# since download rates will be extremely high while downloading # since download rates will be extremely high while downloading
# into RAM # into RAM
test_duration = 200 # 700 test_duration = 100
utorrent_version = 'utorrent-server-alpha-v3_3'
# make sure the environment is properly set up # make sure the environment is properly set up
try: try:
@ -84,7 +72,7 @@ def build_stage_dirs():
# make sure we have all the binaries available # make sure we have all the binaries available
binaries = ['client_test', 'connection_tester', 'fragmentation_test'] binaries = ['client_test', 'connection_tester']
for b in build_stage_dirs(): for b in build_stage_dirs():
for i in binaries: for i in binaries:
p = os.path.join(b, i) p = os.path.join(b, i)
@ -92,70 +80,68 @@ for b in build_stage_dirs():
print('make sure "%s" is available in ./%s' % (i, b)) print('make sure "%s" is available in ./%s' % (i, b))
sys.exit(1) sys.exit(1)
for i in filesystem:
if not os.path.exists(i):
print(('the path "%s" does not exist. This is directory/mountpoint is ' +
'used as the download directory and is the filesystem that will be benchmarked ' +
'and need to exist.') % i)
sys.exit(1)
# make sure we have a test torrent # make sure we have a test torrent
if not os.path.exists('test.torrent'): if not os.path.exists('test.torrent'):
print('generating test torrent') print('generating test torrent')
# generate a 100 GB torrent, to make sure it won't all fit in physical RAM # generate a 100 GB torrent, to make sure it won't all fit in physical RAM
os.system('./stage_aio/connection_tester gen-torrent 10000 test.torrent') os.system('./connection_tester gen-torrent -s 100000 -t test.torrent')
if not os.path.exists('test2.torrent'):
print('generating test torrent 2')
# generate a 6 GB torrent, to make sure it will fit in physical RAM
os.system('./stage_aio/connection_tester gen-torrent 6000 test2.torrent')
# use a new port for each test to make sure they keep working # use a new port for each test to make sure they keep working
# this port is incremented for each test run # this port is incremented for each test run
port = 10000 + random.randint(0, 40000) port = 10000 + random.randint(0, 40000)
try:
os.mkdir('benchmark-dir')
except Exception:
pass
def clear_caches(): def clear_caches():
if 'linux' in sys.platform: if 'linux' in sys.platform:
os.system('sync') os.system('sync')
try:
open('/proc/sys/vm/drop_caches', 'w').write('3') open('/proc/sys/vm/drop_caches', 'w').write('3')
except Exception:
pass
elif 'darwin' in sys.platform: elif 'darwin' in sys.platform:
os.system('purge') os.system('purge')
def build_commandline(config, port): def build_utorrent_commandline(config, port):
num_peers = config['num-peers'] num_peers = config['num-peers']
torrent_path = config['torrent'] torrent_path = config['torrent']
target_folder = build_target_folder(config)
if config['build'] == 'utorrent':
try: try:
os.mkdir('utorrent_session') os.mkdir('utorrent_session')
except Exception: except Exception:
pass pass
cfg = open('utorrent_session/settings.dat', 'w+') with open('utorrent_session/settings.dat', 'w+') as cfg:
cfg.write('d') cfg.write('d')
cfg.write('20:ul_slots_per_torrenti%de' % num_peers) cfg.write('20:ul_slots_per_torrenti%de' % num_peers)
cfg.write('17:conns_per_torrenti%de' % num_peers) cfg.write('17:conns_per_torrenti%de' % num_peers)
cfg.write('14:conns_globallyi%de' % num_peers) cfg.write('14:conns_globallyi%de' % num_peers)
cfg.write('9:bind_porti%de' % port) cfg.write('9:bind_porti%de' % port)
cfg.write('19:dir_active_download%d:%s' % (len(config['save-path']), config['save-path'])) cfg.write('19:dir_active_download%d:%s' % (len(config['save-path']),
config['save-path']))
cfg.write('19:diskio.sparse_filesi1e') cfg.write('19:diskio.sparse_filesi1e')
cfg.write('14:cache.overridei1e') cfg.write('14:cache.overridei1e')
cfg.write('19:cache.override_sizei%de' % int(config['cache-size'] * 16 / 1024)) cfg.write('19:cache.override_sizei%de' % int(config['cache-size'] *
16 / 1024))
cfg.write('17:dir_autoload_flagi1e') cfg.write('17:dir_autoload_flagi1e')
cfg.write('12:dir_autoload8:autoload') cfg.write('12:dir_autoload8:autoload')
cfg.write('11:logger_maski4294967295e') cfg.write('11:logger_maski4294967295e')
cfg.write('1:vi0e') cfg.write('1:vi0e')
cfg.write('12:webui.enablei1e') cfg.write('12:webui.enablei1e')
cfg.write('19:webui.enable_listeni1e') cfg.write('19:webui.enable_listeni1e')
cfg.write('14:webui.hashword20:' + hashlib.sha1('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaadmin').digest()) cfg.write('14:webui.hashword20:' + hashlib.sha1(
'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaadmin').digest())
cfg.write('10:webui.porti8080e') cfg.write('10:webui.porti8080e')
cfg.write('10:webui.salt32:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa') cfg.write('10:webui.salt32:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa')
cfg.write('14:webui.username5:admin') cfg.write('14:webui.username5:admin')
cfg.write('e') cfg.write('e')
cfg.close()
try: try:
os.mkdir('utorrent_session/autoload') os.mkdir('utorrent_session/autoload')
except Exception: except Exception:
@ -164,38 +150,65 @@ def build_commandline(config, port):
shutil.copy(torrent_path, 'utorrent_session/autoload/') shutil.copy(torrent_path, 'utorrent_session/autoload/')
except Exception: except Exception:
pass pass
return './utorrent-server-v3_0/utserver -logfile session_stats/alerts_log.txt -settingspath utorrent_session' return './%s/utserver -logfile %s/client.log -settingspath ' % \
(utorrent_version, target_folder) + \
'utorrent_session'
if config['build'] == 'rtorrent':
if os.path.exists('rtorrent_session'): def build_rtorrent_commandline(config, port):
num_peers = config['num-peers']
torrent_path = config['torrent']
target_folder = build_target_folder(config)
if os.path.exists(target_folder):
add_command = '' add_command = ''
else: else:
try: try:
os.mkdir('rtorrent_session') os.mkdir(target_folder)
except Exception: except Exception:
pass pass
# it seems rtorrent may delete the original torrent when it's being added # it seems rtorrent may delete the original torrent when it's being added
try: try:
shutil.copy(torrent_path, 'rtorrent_session/') shutil.copy(torrent_path, target_folder)
except Exception: except Exception:
pass pass
add_command = '-O load_start_verbose=rtorrent_session/%s ' % torrent_path add_command = '-O load_start_verbose=%s/%s ' % (target_folder, torrent_path)
return ('rtorrent -d %s -n -p %d-%d -O max_peers=%d -O max_uploads=%d %s -s ' return ('rtorrent -d %s -n -p %d-%d -O max_peers=%d -O max_uploads=%d %s -s '
'rtorrent_session -O max_memory_usage=128000000000') % ( '%s -O max_memory_usage=128000000000') % (
config['save-path'], port, port, num_peers, num_peers, add_command) config['save-path'], port, port, num_peers, num_peers, add_command, target_folder)
disable_disk = ''
if config['disable-disk']: def build_libtorrent_commandline(config, port):
disable_disk = '-0' num_peers = config['num-peers']
return ('./stage_%s/client_test -k -N -H -M -B %d -l %d -S %d -T %d -c %d -C %d -s "%s" -p %d -E %d %s ' torrent_path = config['torrent']
'-f session_stats/alerts_log.txt %s') % ( target_folder = build_target_folder(config)
config['build'], test_duration, num_peers, num_peers, num_peers, num_peers, config['cache-size'],
config['save-path'], port, config['hash-threads'], disable_disk, torrent_path) return ('./client_test -k -O -F 500 --enable_upnp=0 --enable_natpmp=0 '
'--enable_dht=0 --mixed_mode_algorithm=0 --peer_timeout=%d '
'--listen_queue_size=%d --unchoke_slots_limit=%d -T %d '
'--connections_limit=%d --cache_size=%d -s "%s" '
'--listen_interfaces="0.0.0.0:%d" --aio_threads=%d '
'-f %s/client.log %s') % (
test_duration, num_peers, num_peers, num_peers, num_peers, config['cache-size'],
config['save-path'], port, config['disk-threads'], target_folder, torrent_path)
def build_commandline(config, port):
if config['build'] == 'utorrent':
return build_utorrent_commandline(config, port)
if config['build'] == 'rtorrent':
return build_rtorrent_commandline(config, port)
if config['build'] == 'libtorrent':
return build_libtorrent_commandline(config, port)
def delete_files(files): def delete_files(files):
for i in files: for i in files:
print('deleting %s' % i)
try: try:
os.remove(i) os.remove(i)
except Exception: except Exception:
@ -208,72 +221,27 @@ def delete_files(files):
except Exception: except Exception:
pass pass
# typically the schedulers available are 'noop', 'deadline' and 'cfq'
def build_test_config(num_peers=default_peers, cache_size=default_cache,
def build_test_config(fs=default_fs, num_peers=default_peers, cache_size=default_cache, test='download', build='libtorrent', profile='', disk_threads=16,
test='upload', build='aio', profile='', hash_threads=1, torrent='test.torrent', torrent='test.torrent', disable_disk=False):
disable_disk=False): config = {'test': test, 'save-path': os.path.join('.', 'benchmark-dir'), 'num-peers': num_peers,
config = {'test': test, 'save-path': os.path.join('./', fs), 'num-peers': num_peers,
'cache-size': cache_size, 'build': build, 'profile': profile, 'cache-size': cache_size, 'build': build, 'profile': profile,
'hash-threads': hash_threads, 'torrent': torrent, 'disable-disk': disable_disk} 'disk-threads': disk_threads, 'torrent': torrent, 'disable-disk': disable_disk}
return config return config
def prefix_len(text, prefix):
for i in range(1, len(prefix)):
if (not text.startswith(prefix[0:i])):
return i - 1
return len(prefix)
def device_name(path):
mount = subprocess.Popen('mount', stdout=subprocess.PIPE)
max_match_len = 0
match_device = ''
path = os.path.abspath(path)
for mp in mount.stdout.readlines():
c = mp.split(' ')
device = c[0]
mountpoint = c[2]
prefix = prefix_len(path, mountpoint)
if prefix > max_match_len:
max_match_len = prefix
match_device = device
device = match_device
device = device.split('/')[-1][0:3]
print('device for path: %s -> %s' % (path, device))
return device
def build_target_folder(config): def build_target_folder(config):
test = 'seed'
if config['test'] == 'upload':
test = 'download'
elif config['test'] == 'dual':
test = 'dual'
if 'linux' in sys.platform:
io_scheduler = open('/sys/block/%s/queue/scheduler' %
device_name(config['save-path'])).read().split('[')[1].split(']')[0]
else:
io_scheduler = sys.platform
no_disk = '' no_disk = ''
if config['disable-disk']: if config['disable-disk']:
no_disk = '_no-disk' no_disk = '_no-disk'
return 'results_%s_%s_%d_%d_%s_%s_h%d%s' % (config['build'], return 'results_%s_%s_%d_%d_%d%s' % (config['build'],
test, config['test'],
config['num-peers'], config['num-peers'],
config['cache-size'], config['cache-size'],
os.path.split( config['disk-threads'],
config['save-path'])[1],
io_scheduler,
config['hash-threads'],
no_disk) no_disk)
@ -303,6 +271,8 @@ def find_binary(names):
def run_test(config): def run_test(config):
j = os.path.join
target_folder = build_target_folder(config) target_folder = build_target_folder(config)
if os.path.exists(target_folder): if os.path.exists(target_folder):
print('results already exists, skipping test (%s)' % target_folder) print('results already exists, skipping test (%s)' % target_folder)
@ -317,20 +287,16 @@ def run_test(config):
# don't clean up unless we're running a download-test, so that we leave the test file # don't clean up unless we're running a download-test, so that we leave the test file
# complete for a seed test. # complete for a seed test.
delete_files(['utorrent_session/settings.dat', 'utorrent_session/settings.dat.old', 'asserts.log']) delete_files(['utorrent_session/settings.dat', 'utorrent_session/settings.dat.old', 'asserts.log'])
if config['test'] == 'upload' or config['test'] == 'dual': if config['test'] == 'download' or config['test'] == 'dual':
print('deleting files') delete_files([j(config['save-path'], 'test'),
delete_files([os.path.join(config['save-path'],
'stress_test_file'),
'.ses_state', '.ses_state',
os.path.join(config['save-path'], j(config['save-path'], '.resume'),
'.resume'),
'utorrent_session', 'utorrent_session',
'.dht_state', '.dht_state',
'session_stats',
'rtorrent_session']) 'rtorrent_session'])
try: try:
os.mkdir('session_stats') os.mkdir(target_folder)
except Exception: except Exception:
pass pass
@ -340,24 +306,25 @@ def run_test(config):
binary = cmdline.split(' ')[0] binary = cmdline.split(' ')[0]
environment = None environment = None
if config['profile'] == 'tcmalloc': if config['profile'] == 'tcmalloc':
environment = {'LD_PRELOAD': find_library('libprofiler.so.0'), 'CPUPROFILE': 'session_stats/cpu_profile.prof'} environment = {'LD_PRELOAD': find_library('libprofiler.so.0'),
'CPUPROFILE': j(target_folder, 'cpu_profile.prof')}
if config['profile'] == 'memory': if config['profile'] == 'memory':
environment = {'LD_PRELOAD': find_library('libprofiler.so.0'), 'HEAPPROFILE': 'session_stats/heap_profile.prof'} environment = {'LD_PRELOAD': find_library('libprofiler.so.0'),
'HEAPPROFILE': j(target_folder, 'heap_profile.prof')}
if config['profile'] == 'perf': if config['profile'] == 'perf':
cmdline = 'perf timechart record --call-graph --output=session_stats/perf_profile.prof ' + cmdline cmdline = 'perf record -g --output=' + \
f = open('session_stats/cmdline.txt', 'w+') j(target_folder, 'perf_profile.prof') + ' ' + cmdline
with open(j(target_folder, 'cmdline.txt'), 'w+') as f:
f.write(cmdline) f.write(cmdline)
f.close()
f = open('session_stats/config.txt', 'w+') with open(j(target_folder, 'config.txt'), 'w+') as f:
print(config, file=f) print(config, file=f)
f.close()
print('clearing disk cache') print('clearing disk cache')
clear_caches() clear_caches()
print('OK') print('OK')
client_output = open('session_stats/client.output', 'w+') client_output = open(j(target_folder, 'client.output'), 'w+')
client_error = open('session_stats/client.error', 'w+') client_error = open(j(target_folder, 'client.error'), 'w+')
print('launching: %s' % cmdline) print('launching: %s' % cmdline)
client = subprocess.Popen( client = subprocess.Popen(
shlex.split(cmdline), shlex.split(cmdline),
@ -367,13 +334,14 @@ def run_test(config):
env=environment) env=environment)
print('OK') print('OK')
# enable disk stats printing # enable disk stats printing
if config['build'] != 'rtorrent' and config['build'] != 'utorrent': if config['build'] == 'libtorrent':
print('x', end=' ', file=client.stdin) print('x', end=' ', file=client.stdin)
time.sleep(4) time.sleep(4)
cmdline = './stage_aio/connection_tester %s %d 127.0.0.1 %d %s' % ( test_dir = 'upload' if config['test'] == 'download' else 'download' if config['test'] == 'upload' else 'dual'
config['test'], config['num-peers'], port, config['torrent']) cmdline = './connection_tester %s -c %d -d 127.0.0.1 -p %d -t %s' % (
test_dir, config['num-peers'], port, config['torrent'])
print('launching: %s' % cmdline) print('launching: %s' % cmdline)
tester_output = open('session_stats/tester.output', 'w+') tester_output = open(j(target_folder, 'tester.output'), 'w+')
tester = subprocess.Popen(shlex.split(cmdline), stdout=tester_output) tester = subprocess.Popen(shlex.split(cmdline), stdout=tester_output)
print('OK') print('OK')
@ -391,10 +359,13 @@ def run_test(config):
if client.returncode is not None: if client.returncode is not None:
print('client terminated') print('client terminated')
break break
print('\r%d / %d' % (i, test_duration), end=' ') print('\r%d / %d\x1b[K' % (i, test_duration), end=' ')
sys.stdout.flush() sys.stdout.flush()
i += 1 i += 1
if config['test'] != 'upload' and config['test'] != 'dual' and i >= test_duration: # in download- and dual tests, connection_tester will exit once the
# client is done downloading. In upload tests, we'll upload for
# 'test_duration' number of seconds until we end the test
if config['test'] != 'download' and config['test'] != 'dual' and i >= test_duration:
break break
print('\n') print('\n')
@ -419,69 +390,37 @@ def run_test(config):
terminate = True terminate = True
try: try:
shutil.copy('asserts.log', 'session_stats/') shutil.copy('asserts.log', target_folder)
except Exception: except Exception:
pass pass
try: os.chdir(target_folder)
shutil.move('libtorrent_logs0', 'session_stats/')
except Exception:
pass
try:
shutil.move('libtorrent_logs%s' % port, 'session_stats/')
except Exception:
pass
# run fragmentation test
print('analyzing fragmentation')
os.system('./stage_aio/fragmentation_test test.torrent %s' % (config['save-path']))
try:
shutil.copy('fragmentation.log', 'session_stats/')
except Exception:
pass
shutil.copy('fragmentation.gnuplot', 'session_stats/')
try:
shutil.copy('file_access.log', 'session_stats/')
except Exception:
pass
os.system('filefrag %s >session_stats/filefrag.out' % config['save-path'])
os.system('filefrag -v %s >session_stats/filefrag_verbose.out' % config['save-path'])
os.chdir('session_stats')
if config['build'] == 'libtorrent':
# parse session stats # parse session stats
print('parsing session log') print('parsing session log')
os.system('python ../../parse_session_stats.py *.0000.log') os.system('python ../../tools/parse_session_stats.py client.log')
os.system('../stage_aio/parse_access_log file_access.log %s' %
(os.path.join('..', config['save-path'], 'stress_test_file')))
os.chdir('..') os.chdir('..')
if config['profile'] == 'tcmalloc': if config['profile'] == 'tcmalloc':
print('analyzing CPU profile [%s]' % binary) print('analyzing CPU profile [%s]' % binary)
os.system('%s --pdf %s session_stats/cpu_profile.prof >session_stats/cpu_profile.pdf' % os.system('%s --pdf %s %s/cpu_profile.prof >%s/cpu_profile.pdf' %
(find_binary(['google-pprof', 'pprof']), binary)) (find_binary(['google-pprof', 'pprof']), binary, target_folder, target_folder))
if config['profile'] == 'memory': if config['profile'] == 'memory':
for i in range(1, 300): for i in range(1, 300):
profile = 'session_stats/heap_profile.prof.%04d.heap' % i profile = j(target_folder, 'heap_profile.prof.%04d.heap' % i)
try: try:
os.stat(profile) os.stat(profile)
except Exception: except Exception:
break break
print('analyzing heap profile [%s] %d' % (binary, i)) print('analyzing heap profile [%s] %d' % (binary, i))
os.system('%s --pdf %s %s >session_stats/heap_profile_%d.pdf' % os.system('%s --pdf %s %s >%s/heap_profile_%d.pdf' %
(find_binary(['google-pprof', 'pprof']), binary, profile, i)) (find_binary(['google-pprof', 'pprof']), binary, profile, target_folder, i))
if config['profile'] == 'perf': if config['profile'] == 'perf':
print('analyzing CPU profile [%s]' % binary) print('analyzing CPU profile [%s]' % binary)
os.system('perf timechart --input=session_stats/perf_profile.prof --output=session_stats/profile_timechart.svg') os.system(('perf report --input=%s/perf_profile.prof --threads --demangle --show-nr-samples '
os.system(('perf report --input=session_stats/perf_profile.prof --threads --show-nr-samples ' '>%s/profile.txt' % (target_folder, target_folder)))
'--vmlinux vmlinuz-2.6.38-8-generic.bzip >session_stats/profile.txt'))
# move the results into its final place
print('saving results')
os.rename('session_stats', build_target_folder(config))
port += 1 port += 1
@ -489,39 +428,12 @@ def run_test(config):
sys.exit(1) sys.exit(1)
for h in range(0, 7):
config = build_test_config(
num_peers=30,
build='aio',
test='upload',
torrent='test.torrent',
hash_threads=h,
disable_disk=True)
run_test(config)
sys.exit(0)
for b in ['aio', 'syncio']:
for test in ['dual', 'upload', 'download']:
config = build_test_config(build=b, test=test)
run_test(config)
sys.exit(0)
for b in builds: for b in builds:
for test in ['upload', 'download']: for test in ['upload', 'download', 'dual']:
config = build_test_config(build=b, test=test) config = build_test_config(build=b, test=test, profile='perf')
run_test(config) run_test(config)
for p in peers: for p in peers:
for test in ['upload', 'download']: for test in ['upload', 'download', 'dual']:
config = build_test_config(num_peers=p, test=test) config = build_test_config(num_peers=p, test=test, profile='perf')
run_test(config)
for c in cache_sizes:
for test in ['upload', 'download']:
config = build_test_config(cache_size=c, test=test)
run_test(config)
for fs in filesystem:
for test in ['upload', 'download']:
config = build_test_config(fs=fs, test=test)
run_test(config) run_test(config)