Message ID | 20201021145859.11201-20-vsementsov@virtuozzo.com |
---|---|
State | Accepted |
Commit | aa362403f46848c4377ffa9702008e6a2d5f876e |
Headers | show |
Series | preallocate filter | expand |
21.10.2020 17:58, Vladimir Sementsov-Ogievskiy wrote: > @@ -39,21 +43,70 @@ def result_to_text(result): > return 'FAILED' > > > -def results_to_text(results): > - """Return text representation of bench() returned dict.""" > - from tabulate import tabulate > - > +def results_dimension(results): > dim = None > - tab = [[""] + [c['id'] for c in results['envs']]] > for case in results['cases']: > - row = [case['id']] > for env in results['envs']: > res = results['tab'][case['id']][env['id']] > if dim is None: > dim = res['dimension'] > else: > assert dim == res['dimension'] > + > + assert dim in ('iops', 'sec') s/sec/seconds/ > + > + return dim > + > +
On 21.10.20 16:58, Vladimir Sementsov-Ogievskiy wrote: > Performance improvements / degradations are usually discussed in > percentage. Let's make the script calculate it for us. > > Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> > --- > scripts/simplebench/results_to_text.py | 67 +++++++++++++++++++++++--- > 1 file changed, 60 insertions(+), 7 deletions(-) > > diff --git a/scripts/simplebench/results_to_text.py b/scripts/simplebench/results_to_text.py > index 479f7ac1d4..56fdacf7ca 100644 > --- a/scripts/simplebench/results_to_text.py > +++ b/scripts/simplebench/results_to_text.py [...] > + for j in range(0, i): > + env_j = results['envs'][j] > + res_j = case_results[env_j['id']] > + cell += ' ' > + > + if 'average' not in res_j: > + # Failed result > + cell += '--' > + continue > + > + col_j = tab[0][j + 1] if named_columns else '' > + diff_pr = round((res['average'] - res_j['average']) / > + res_j['average'] * 100) > + cell += f' {col_j}{diff_pr:+}%' Contrasting to v6, you added the "cell += ' '" line, dropped a space in the "cell += '--'" line (was: "cell += ' --'"), but kept the space here. I would have assumed that the leading space is dropped here, too. But I don’t quite know, what I should be expecting, so. Anyway, I’ll just leave this here: Reviewed-by: Max Reitz <mreitz@redhat.com> > + row.append(cell) > + tab.append(row) > + > + return f'All results are in {dim}\n\n' + tabulate.tabulate(tab) >
diff --git a/scripts/simplebench/results_to_text.py b/scripts/simplebench/results_to_text.py index 479f7ac1d4..56fdacf7ca 100644 --- a/scripts/simplebench/results_to_text.py +++ b/scripts/simplebench/results_to_text.py @@ -17,6 +17,10 @@ # import math +import tabulate + +# We want leading whitespace for difference row cells (see below) +tabulate.PRESERVE_WHITESPACE = True def format_value(x, stdev): @@ -39,21 +43,70 @@ def result_to_text(result): return 'FAILED' -def results_to_text(results): - """Return text representation of bench() returned dict.""" - from tabulate import tabulate - +def results_dimension(results): dim = None - tab = [[""] + [c['id'] for c in results['envs']]] for case in results['cases']: - row = [case['id']] for env in results['envs']: res = results['tab'][case['id']][env['id']] if dim is None: dim = res['dimension'] else: assert dim == res['dimension'] + + assert dim in ('iops', 'sec') + + return dim + + +def results_to_text(results): + """Return text representation of bench() returned dict.""" + n_columns = len(results['envs']) + named_columns = n_columns > 2 + dim = results_dimension(results) + tab = [] + + if named_columns: + # Environment columns are named A, B, ... + tab.append([''] + [chr(ord('A') + i) for i in range(n_columns)]) + + tab.append([''] + [c['id'] for c in results['envs']]) + + for case in results['cases']: + row = [case['id']] + case_results = results['tab'][case['id']] + for env in results['envs']: + res = case_results[env['id']] row.append(result_to_text(res)) tab.append(row) - return f'All results are in {dim}\n\n' + tabulate(tab) + # Add row of difference between columns. For each column starting from + # B we calculate difference with all previous columns. + row = ['', ''] # case name and first column + for i in range(1, n_columns): + cell = '' + env = results['envs'][i] + res = case_results[env['id']] + + if 'average' not in res: + # Failed result + row.append(cell) + continue + + for j in range(0, i): + env_j = results['envs'][j] + res_j = case_results[env_j['id']] + cell += ' ' + + if 'average' not in res_j: + # Failed result + cell += '--' + continue + + col_j = tab[0][j + 1] if named_columns else '' + diff_pr = round((res['average'] - res_j['average']) / + res_j['average'] * 100) + cell += f' {col_j}{diff_pr:+}%' + row.append(cell) + tab.append(row) + + return f'All results are in {dim}\n\n' + tabulate.tabulate(tab)
Performance improvements / degradations are usually discussed in percentage. Let's make the script calculate it for us. Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> --- scripts/simplebench/results_to_text.py | 67 +++++++++++++++++++++++--- 1 file changed, 60 insertions(+), 7 deletions(-)