From 7a615695bc38be9c2110a50b9c6ca960af98e94f Mon Sep 17 00:00:00 2001 From: arvidn Date: Sat, 24 Nov 2018 16:21:29 +0100 Subject: [PATCH] update run_benchmark.py --- examples/run_benchmarks.py | 386 ++++++++++++++----------------------- 1 file changed, 149 insertions(+), 237 deletions(-) diff --git a/examples/run_benchmarks.py b/examples/run_benchmarks.py index 5c135cdb7..4610b3772 100755 --- a/examples/run_benchmarks.py +++ b/examples/run_benchmarks.py @@ -1,4 +1,5 @@ #!/usr/bin/env python +# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4 from __future__ import print_function import sys @@ -12,36 +13,22 @@ import random import signal import hashlib -# this is a disk I/O benchmark script. It runs menchmarks -# over different filesystems, different cache sizes and -# different number of peers (can be used to find a reasonable -# range for unchoke slots). - -# it also measures performance improvements of re-ordering -# read requests based on physical location and OS hints -# like posix_fadvice(FADV_WILLNEED). It can also be used -# for the AIO branch to measure improvements over the -# classic thread based disk I/O +# this is a disk I/O benchmark script. It runs benchmarks +# over different number of peers. # to set up the test, build the example directory in release -# with statistics=on and copy fragmentation_test, client_test -# and connection_tester to a directory called 'stage_aio' -# and 'stage_syncio' (or make a symbolic link to the bjam -# output directory). +# and stage client_test and connection_tester to the examples directory: +# +# bjam -j8 link=static release debug-symbols=on stage_client_test \ +# stage_connection_tester +# # make sure gnuplot is installed. # the following lists define the space tests will be run in -# variables to test. All these are run on the first -# entry in the filesystem list. -cache_sizes = [0, 32768, 400000] -peers = [200, 500, 1000] -builds = ['rtorrent', 'utorrent', 'aio', 'syncio'] - -# the drives are assumed to be mounted under ./ -# or have symbolic links to them. -filesystem = ['xfs', 'ext4', 'ext3', 'reiser'] -default_fs = filesystem[0] +peers = [50, 200, 500, 1000] +# builds = ['rtorrent', 'utorrent', 'libtorrent'] +builds = ['libtorrent'] # the number of peers for the filesystem test. The # idea is to stress test the filesystem by using a lot @@ -51,7 +38,7 @@ default_peers = peers[1] # the amount of cache for the filesystem test # 5.5 GiB of cache -default_cache = cache_sizes[-1] +default_cache = 400000 # the number of seconds to run each test. It's important that # this is shorter than what it takes to finish downloading @@ -63,8 +50,9 @@ default_cache = cache_sizes[-1] # disk cache is not a significant part of the test, # since download rates will be extremely high while downloading # into RAM -test_duration = 200 # 700 +test_duration = 100 +utorrent_version = 'utorrent-server-alpha-v3_3' # make sure the environment is properly set up try: @@ -84,7 +72,7 @@ def build_stage_dirs(): # make sure we have all the binaries available -binaries = ['client_test', 'connection_tester', 'fragmentation_test'] +binaries = ['client_test', 'connection_tester'] for b in build_stage_dirs(): for i in binaries: p = os.path.join(b, i) @@ -92,110 +80,135 @@ for b in build_stage_dirs(): print('make sure "%s" is available in ./%s' % (i, b)) sys.exit(1) -for i in filesystem: - if not os.path.exists(i): - print(('the path "%s" does not exist. This is directory/mountpoint is ' + - 'used as the download directory and is the filesystem that will be benchmarked ' + - 'and need to exist.') % i) - sys.exit(1) - # make sure we have a test torrent if not os.path.exists('test.torrent'): print('generating test torrent') # generate a 100 GB torrent, to make sure it won't all fit in physical RAM - os.system('./stage_aio/connection_tester gen-torrent 10000 test.torrent') - -if not os.path.exists('test2.torrent'): - print('generating test torrent 2') - # generate a 6 GB torrent, to make sure it will fit in physical RAM - os.system('./stage_aio/connection_tester gen-torrent 6000 test2.torrent') + os.system('./connection_tester gen-torrent -s 100000 -t test.torrent') # use a new port for each test to make sure they keep working # this port is incremented for each test run port = 10000 + random.randint(0, 40000) +try: + os.mkdir('benchmark-dir') +except Exception: + pass + def clear_caches(): if 'linux' in sys.platform: os.system('sync') - open('/proc/sys/vm/drop_caches', 'w').write('3') + try: + open('/proc/sys/vm/drop_caches', 'w').write('3') + except Exception: + pass elif 'darwin' in sys.platform: os.system('purge') -def build_commandline(config, port): - +def build_utorrent_commandline(config, port): num_peers = config['num-peers'] torrent_path = config['torrent'] + target_folder = build_target_folder(config) - if config['build'] == 'utorrent': - try: - os.mkdir('utorrent_session') - except Exception: - pass - cfg = open('utorrent_session/settings.dat', 'w+') + try: + os.mkdir('utorrent_session') + except Exception: + pass + with open('utorrent_session/settings.dat', 'w+') as cfg: cfg.write('d') cfg.write('20:ul_slots_per_torrenti%de' % num_peers) cfg.write('17:conns_per_torrenti%de' % num_peers) cfg.write('14:conns_globallyi%de' % num_peers) cfg.write('9:bind_porti%de' % port) - cfg.write('19:dir_active_download%d:%s' % (len(config['save-path']), config['save-path'])) + cfg.write('19:dir_active_download%d:%s' % (len(config['save-path']), + config['save-path'])) cfg.write('19:diskio.sparse_filesi1e') cfg.write('14:cache.overridei1e') - cfg.write('19:cache.override_sizei%de' % int(config['cache-size'] * 16 / 1024)) + cfg.write('19:cache.override_sizei%de' % int(config['cache-size'] * + 16 / 1024)) cfg.write('17:dir_autoload_flagi1e') cfg.write('12:dir_autoload8:autoload') cfg.write('11:logger_maski4294967295e') cfg.write('1:vi0e') cfg.write('12:webui.enablei1e') cfg.write('19:webui.enable_listeni1e') - cfg.write('14:webui.hashword20:' + hashlib.sha1('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaadmin').digest()) + cfg.write('14:webui.hashword20:' + hashlib.sha1( + 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaadmin').digest()) cfg.write('10:webui.porti8080e') cfg.write('10:webui.salt32:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa') cfg.write('14:webui.username5:admin') cfg.write('e') - cfg.close() + + try: + os.mkdir('utorrent_session/autoload') + except Exception: + pass + try: + shutil.copy(torrent_path, 'utorrent_session/autoload/') + except Exception: + pass + return './%s/utserver -logfile %s/client.log -settingspath ' % \ + (utorrent_version, target_folder) + \ + 'utorrent_session' + + +def build_rtorrent_commandline(config, port): + num_peers = config['num-peers'] + torrent_path = config['torrent'] + target_folder = build_target_folder(config) + + if os.path.exists(target_folder): + add_command = '' + else: try: - os.mkdir('utorrent_session/autoload') + os.mkdir(target_folder) except Exception: pass + # it seems rtorrent may delete the original torrent when it's being added try: - shutil.copy(torrent_path, 'utorrent_session/autoload/') + shutil.copy(torrent_path, target_folder) except Exception: pass - return './utorrent-server-v3_0/utserver -logfile session_stats/alerts_log.txt -settingspath utorrent_session' + add_command = '-O load_start_verbose=%s/%s ' % (target_folder, torrent_path) + + return ('rtorrent -d %s -n -p %d-%d -O max_peers=%d -O max_uploads=%d %s -s ' + '%s -O max_memory_usage=128000000000') % ( + config['save-path'], port, port, num_peers, num_peers, add_command, target_folder) + + +def build_libtorrent_commandline(config, port): + num_peers = config['num-peers'] + torrent_path = config['torrent'] + target_folder = build_target_folder(config) + + return ('./client_test -k -O -F 500 --enable_upnp=0 --enable_natpmp=0 ' + '--enable_dht=0 --mixed_mode_algorithm=0 --peer_timeout=%d ' + '--listen_queue_size=%d --unchoke_slots_limit=%d -T %d ' + '--connections_limit=%d --cache_size=%d -s "%s" ' + '--listen_interfaces="0.0.0.0:%d" --aio_threads=%d ' + '-f %s/client.log %s') % ( + test_duration, num_peers, num_peers, num_peers, num_peers, config['cache-size'], + config['save-path'], port, config['disk-threads'], target_folder, torrent_path) + + +def build_commandline(config, port): + + if config['build'] == 'utorrent': + return build_utorrent_commandline(config, port) if config['build'] == 'rtorrent': - if os.path.exists('rtorrent_session'): - add_command = '' - else: - try: - os.mkdir('rtorrent_session') - except Exception: - pass - # it seems rtorrent may delete the original torrent when it's being added - try: - shutil.copy(torrent_path, 'rtorrent_session/') - except Exception: - pass - add_command = '-O load_start_verbose=rtorrent_session/%s ' % torrent_path + return build_rtorrent_commandline(config, port) - return ('rtorrent -d %s -n -p %d-%d -O max_peers=%d -O max_uploads=%d %s -s ' - 'rtorrent_session -O max_memory_usage=128000000000') % ( - config['save-path'], port, port, num_peers, num_peers, add_command) - - disable_disk = '' - if config['disable-disk']: - disable_disk = '-0' - return ('./stage_%s/client_test -k -N -H -M -B %d -l %d -S %d -T %d -c %d -C %d -s "%s" -p %d -E %d %s ' - '-f session_stats/alerts_log.txt %s') % ( - config['build'], test_duration, num_peers, num_peers, num_peers, num_peers, config['cache-size'], - config['save-path'], port, config['hash-threads'], disable_disk, torrent_path) + if config['build'] == 'libtorrent': + return build_libtorrent_commandline(config, port) def delete_files(files): for i in files: + print('deleting %s' % i) try: os.remove(i) except Exception: @@ -208,73 +221,28 @@ def delete_files(files): except Exception: pass -# typically the schedulers available are 'noop', 'deadline' and 'cfq' - -def build_test_config(fs=default_fs, num_peers=default_peers, cache_size=default_cache, - test='upload', build='aio', profile='', hash_threads=1, torrent='test.torrent', - disable_disk=False): - config = {'test': test, 'save-path': os.path.join('./', fs), 'num-peers': num_peers, +def build_test_config(num_peers=default_peers, cache_size=default_cache, + test='download', build='libtorrent', profile='', disk_threads=16, + torrent='test.torrent', disable_disk=False): + config = {'test': test, 'save-path': os.path.join('.', 'benchmark-dir'), 'num-peers': num_peers, 'cache-size': cache_size, 'build': build, 'profile': profile, - 'hash-threads': hash_threads, 'torrent': torrent, 'disable-disk': disable_disk} + 'disk-threads': disk_threads, 'torrent': torrent, 'disable-disk': disable_disk} return config -def prefix_len(text, prefix): - for i in range(1, len(prefix)): - if (not text.startswith(prefix[0:i])): - return i - 1 - return len(prefix) - - -def device_name(path): - mount = subprocess.Popen('mount', stdout=subprocess.PIPE) - - max_match_len = 0 - match_device = '' - path = os.path.abspath(path) - - for mp in mount.stdout.readlines(): - c = mp.split(' ') - device = c[0] - mountpoint = c[2] - prefix = prefix_len(path, mountpoint) - if prefix > max_match_len: - max_match_len = prefix - match_device = device - - device = match_device - device = device.split('/')[-1][0:3] - print('device for path: %s -> %s' % (path, device)) - return device - - def build_target_folder(config): - test = 'seed' - if config['test'] == 'upload': - test = 'download' - elif config['test'] == 'dual': - test = 'dual' - - if 'linux' in sys.platform: - io_scheduler = open('/sys/block/%s/queue/scheduler' % - device_name(config['save-path'])).read().split('[')[1].split(']')[0] - else: - io_scheduler = sys.platform no_disk = '' if config['disable-disk']: no_disk = '_no-disk' - return 'results_%s_%s_%d_%d_%s_%s_h%d%s' % (config['build'], - test, - config['num-peers'], - config['cache-size'], - os.path.split( - config['save-path'])[1], - io_scheduler, - config['hash-threads'], - no_disk) + return 'results_%s_%s_%d_%d_%d%s' % (config['build'], + config['test'], + config['num-peers'], + config['cache-size'], + config['disk-threads'], + no_disk) def find_library(name): @@ -303,6 +271,8 @@ def find_binary(names): def run_test(config): + j = os.path.join + target_folder = build_target_folder(config) if os.path.exists(target_folder): print('results already exists, skipping test (%s)' % target_folder) @@ -317,20 +287,16 @@ def run_test(config): # don't clean up unless we're running a download-test, so that we leave the test file # complete for a seed test. delete_files(['utorrent_session/settings.dat', 'utorrent_session/settings.dat.old', 'asserts.log']) - if config['test'] == 'upload' or config['test'] == 'dual': - print('deleting files') - delete_files([os.path.join(config['save-path'], - 'stress_test_file'), + if config['test'] == 'download' or config['test'] == 'dual': + delete_files([j(config['save-path'], 'test'), '.ses_state', - os.path.join(config['save-path'], - '.resume'), + j(config['save-path'], '.resume'), 'utorrent_session', '.dht_state', - 'session_stats', 'rtorrent_session']) try: - os.mkdir('session_stats') + os.mkdir(target_folder) except Exception: pass @@ -340,24 +306,25 @@ def run_test(config): binary = cmdline.split(' ')[0] environment = None if config['profile'] == 'tcmalloc': - environment = {'LD_PRELOAD': find_library('libprofiler.so.0'), 'CPUPROFILE': 'session_stats/cpu_profile.prof'} + environment = {'LD_PRELOAD': find_library('libprofiler.so.0'), + 'CPUPROFILE': j(target_folder, 'cpu_profile.prof')} if config['profile'] == 'memory': - environment = {'LD_PRELOAD': find_library('libprofiler.so.0'), 'HEAPPROFILE': 'session_stats/heap_profile.prof'} + environment = {'LD_PRELOAD': find_library('libprofiler.so.0'), + 'HEAPPROFILE': j(target_folder, 'heap_profile.prof')} if config['profile'] == 'perf': - cmdline = 'perf timechart record --call-graph --output=session_stats/perf_profile.prof ' + cmdline - f = open('session_stats/cmdline.txt', 'w+') - f.write(cmdline) - f.close() + cmdline = 'perf record -g --output=' + \ + j(target_folder, 'perf_profile.prof') + ' ' + cmdline + with open(j(target_folder, 'cmdline.txt'), 'w+') as f: + f.write(cmdline) - f = open('session_stats/config.txt', 'w+') - print(config, file=f) - f.close() + with open(j(target_folder, 'config.txt'), 'w+') as f: + print(config, file=f) print('clearing disk cache') clear_caches() print('OK') - client_output = open('session_stats/client.output', 'w+') - client_error = open('session_stats/client.error', 'w+') + client_output = open(j(target_folder, 'client.output'), 'w+') + client_error = open(j(target_folder, 'client.error'), 'w+') print('launching: %s' % cmdline) client = subprocess.Popen( shlex.split(cmdline), @@ -367,13 +334,14 @@ def run_test(config): env=environment) print('OK') # enable disk stats printing - if config['build'] != 'rtorrent' and config['build'] != 'utorrent': + if config['build'] == 'libtorrent': print('x', end=' ', file=client.stdin) time.sleep(4) - cmdline = './stage_aio/connection_tester %s %d 127.0.0.1 %d %s' % ( - config['test'], config['num-peers'], port, config['torrent']) + test_dir = 'upload' if config['test'] == 'download' else 'download' if config['test'] == 'upload' else 'dual' + cmdline = './connection_tester %s -c %d -d 127.0.0.1 -p %d -t %s' % ( + test_dir, config['num-peers'], port, config['torrent']) print('launching: %s' % cmdline) - tester_output = open('session_stats/tester.output', 'w+') + tester_output = open(j(target_folder, 'tester.output'), 'w+') tester = subprocess.Popen(shlex.split(cmdline), stdout=tester_output) print('OK') @@ -391,10 +359,13 @@ def run_test(config): if client.returncode is not None: print('client terminated') break - print('\r%d / %d' % (i, test_duration), end=' ') + print('\r%d / %d\x1b[K' % (i, test_duration), end=' ') sys.stdout.flush() i += 1 - if config['test'] != 'upload' and config['test'] != 'dual' and i >= test_duration: + # in download- and dual tests, connection_tester will exit once the + # client is done downloading. In upload tests, we'll upload for + # 'test_duration' number of seconds until we end the test + if config['test'] != 'download' and config['test'] != 'dual' and i >= test_duration: break print('\n') @@ -419,69 +390,37 @@ def run_test(config): terminate = True try: - shutil.copy('asserts.log', 'session_stats/') + shutil.copy('asserts.log', target_folder) except Exception: pass - try: - shutil.move('libtorrent_logs0', 'session_stats/') - except Exception: - pass - try: - shutil.move('libtorrent_logs%s' % port, 'session_stats/') - except Exception: - pass + os.chdir(target_folder) - # run fragmentation test - print('analyzing fragmentation') - os.system('./stage_aio/fragmentation_test test.torrent %s' % (config['save-path'])) - try: - shutil.copy('fragmentation.log', 'session_stats/') - except Exception: - pass - - shutil.copy('fragmentation.gnuplot', 'session_stats/') - try: - shutil.copy('file_access.log', 'session_stats/') - except Exception: - pass - - os.system('filefrag %s >session_stats/filefrag.out' % config['save-path']) - os.system('filefrag -v %s >session_stats/filefrag_verbose.out' % config['save-path']) - - os.chdir('session_stats') - - # parse session stats - print('parsing session log') - os.system('python ../../parse_session_stats.py *.0000.log') - os.system('../stage_aio/parse_access_log file_access.log %s' % - (os.path.join('..', config['save-path'], 'stress_test_file'))) + if config['build'] == 'libtorrent': + # parse session stats + print('parsing session log') + os.system('python ../../tools/parse_session_stats.py client.log') os.chdir('..') if config['profile'] == 'tcmalloc': print('analyzing CPU profile [%s]' % binary) - os.system('%s --pdf %s session_stats/cpu_profile.prof >session_stats/cpu_profile.pdf' % - (find_binary(['google-pprof', 'pprof']), binary)) + os.system('%s --pdf %s %s/cpu_profile.prof >%s/cpu_profile.pdf' % + (find_binary(['google-pprof', 'pprof']), binary, target_folder, target_folder)) if config['profile'] == 'memory': for i in range(1, 300): - profile = 'session_stats/heap_profile.prof.%04d.heap' % i + profile = j(target_folder, 'heap_profile.prof.%04d.heap' % i) try: os.stat(profile) except Exception: break print('analyzing heap profile [%s] %d' % (binary, i)) - os.system('%s --pdf %s %s >session_stats/heap_profile_%d.pdf' % - (find_binary(['google-pprof', 'pprof']), binary, profile, i)) + os.system('%s --pdf %s %s >%s/heap_profile_%d.pdf' % + (find_binary(['google-pprof', 'pprof']), binary, profile, target_folder, i)) if config['profile'] == 'perf': print('analyzing CPU profile [%s]' % binary) - os.system('perf timechart --input=session_stats/perf_profile.prof --output=session_stats/profile_timechart.svg') - os.system(('perf report --input=session_stats/perf_profile.prof --threads --show-nr-samples ' - '--vmlinux vmlinuz-2.6.38-8-generic.bzip >session_stats/profile.txt')) - - # move the results into its final place - print('saving results') - os.rename('session_stats', build_target_folder(config)) + os.system(('perf report --input=%s/perf_profile.prof --threads --demangle --show-nr-samples ' + '>%s/profile.txt' % (target_folder, target_folder))) port += 1 @@ -489,39 +428,12 @@ def run_test(config): sys.exit(1) -for h in range(0, 7): - config = build_test_config( - num_peers=30, - build='aio', - test='upload', - torrent='test.torrent', - hash_threads=h, - disable_disk=True) - run_test(config) -sys.exit(0) - -for b in ['aio', 'syncio']: - for test in ['dual', 'upload', 'download']: - config = build_test_config(build=b, test=test) - run_test(config) -sys.exit(0) - for b in builds: - for test in ['upload', 'download']: - config = build_test_config(build=b, test=test) + for test in ['upload', 'download', 'dual']: + config = build_test_config(build=b, test=test, profile='perf') run_test(config) for p in peers: - for test in ['upload', 'download']: - config = build_test_config(num_peers=p, test=test) - run_test(config) - -for c in cache_sizes: - for test in ['upload', 'download']: - config = build_test_config(cache_size=c, test=test) - run_test(config) - -for fs in filesystem: - for test in ['upload', 'download']: - config = build_test_config(fs=fs, test=test) + for test in ['upload', 'download', 'dual']: + config = build_test_config(num_peers=p, test=test, profile='perf') run_test(config)