diff --git a/src/tools/ftbench/src/tohtml.py b/src/tools/ftbench/src/tohtml.py index 2f2820c4f..49daec954 100644 --- a/src/tools/ftbench/src/tohtml.py +++ b/src/tools/ftbench/src/tohtml.py @@ -39,7 +39,9 @@ FONT_COUNT = 5 WARNING_SAME_COMMIT = "Warning: Baseline and Benchmark have the same commit ID!" INFO_1 = "* Average time for single iteration. Smaller values are better." -INFO_2 = "* If a value in the 'Iterations' column is given as 'x | y', values x and y give the number of iterations in the baseline and the benchmark test, respectively." +INFO_2 = "* If a value in the 'Iterations' column is given as 'x | y',\ + values x and y give the number of iterations in the baseline \ + and the benchmark test, respectively." def main(): @@ -112,7 +114,8 @@ def generate_info_table(html_file, baseline_info, benchmark_info): ): write_to_html( html_file, - f'{info}{baseline_line.strip()}{benchmark_line.strip()}\n' + f'{info}{baseline_line.strip()}\ + {benchmark_line.strip()}\n' ) write_to_html(html_file, "
") write_to_html(html_file, f"

{INFO_1}

") @@ -124,7 +127,10 @@ def generate_total_results_table(html_file, baseline_dir, benchmark_dir): # This dictionary will store aggregated results. test_results = { - test: {"baseline": 0, "benchmark": 0, "n_baseline": 0, "n_benchmark": 0} + test: {"baseline": 0, + "benchmark": 0, + "n_baseline": 0, + "n_benchmark": 0} for test in [ "Load", "Load_Advances (Normal)", @@ -166,11 +172,11 @@ def generate_total_results_table(html_file, baseline_dir, benchmark_dir): if baseline_line.startswith(" "): baseline_match = re.match( - r"\s+(.*?)\s+(\d+\.\d+)\s+microseconds\s+(\d+)\s", baseline_line) + r"\s+(.*?)\s+(\d+\.\d+)\s+microseconds\s+(\d+)\s", + baseline_line) benchmark_match = re.match( r"\s+(.*?)\s+(\d+\.\d+)\s+microseconds\s+(\d+)\s", - benchmark_line, - ) + benchmark_line) if baseline_match and benchmark_match: test = baseline_match.group(1).strip() @@ -229,12 +235,14 @@ def generate_total_results_table(html_file, baseline_dir, benchmark_dir): html_file, f'{test}{n_display}\ {baseline:.1f}\ - {benchmark:.1f}{diff:.1f}\n', + {benchmark:.1f}\ + {diff:.1f}\n', ) write_to_html( html_file, - f'Total duration for all tests:{total_time:.0f} s', + f'Total duration for all tests:\ + {total_time:.0f} s', ) write_to_html(html_file, "\n") @@ -257,9 +265,9 @@ def generate_results_table( write_to_html( html_file, f'TestIterations\ - * Baseline (µs)\ - * Benchmark (µs)\ - Difference (%)\n' + * Baseline (µs)\ + * Benchmark (µs)\ + Difference (%)\n' ) total_n = total_time = 0 @@ -315,13 +323,17 @@ def generate_results_table( write_to_html( html_file, - f'{baseline_match.group(1)}{n}\ - {baseline_value:.1f}{benchmark_value:.1f}{percentage_diff:.1f}\n', + f'{baseline_match.group(1)}\ + {n}\ + {baseline_value:.1f}\ + {benchmark_value:.1f}\ + {percentage_diff:.1f}\n', ) write_to_html( html_file, - f'Total duration for the font:{total_time:.0f} s\n', + f'Total duration for the font:\ + {total_time:.0f} s\n', )