Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(459)

Unified Diff: scripts/slave/recipe_modules/perf_try/bisect_results_json.py

Issue 1573293002: Change auto_bisect to post results to perf dashboard. (Closed) Base URL: https://chromium.googlesource.com/chromium/tools/build.git@master
Patch Set: update Created 4 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: scripts/slave/recipe_modules/perf_try/bisect_results_json.py
diff --git a/scripts/slave/recipe_modules/perf_try/bisect_results_json.py b/scripts/slave/recipe_modules/perf_try/bisect_results_json.py
new file mode 100644
index 0000000000000000000000000000000000000000..e99045ff8ff3c2b13ce5f7924b3966e72730a1f4
--- /dev/null
+++ b/scripts/slave/recipe_modules/perf_try/bisect_results_json.py
@@ -0,0 +1,97 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# Note: The Perf Dashboard will display these data. Any changes here should
+# be updated on Perf Dashboard as well.
+
+_FAILED_INITIAL_CONFIDENCE_ABORT_REASON = (
+ 'The metric values for the initial "good" and "bad" revisions '
+ 'do not represent a clear regression.')
+
+_DIRECTION_OF_IMPROVEMENT_ABORT_REASON = (
+ 'The metric values for the initial "good" and "bad" revisions match the '
+ 'expected direction of improvement. Thus, likely represent an improvement '
+ 'and not a regression.')
+
+
+def get(api, config, results_without_patch, results_with_patch, labels):
qyearsley 2016/01/11 22:49:43 Might it be potentially clearer if this module is
chrisphan 2016/01/14 00:53:26 Moved this to perf_try/api for consistency. This
+ """Returns the results as a dict."""
+
+ output_with_patch = results_with_patch.get('output')
+ output_without_patch = results_without_patch.get('output')
+ values_with_patch = results_with_patch.get('results').get('values')
+ values_without_patch = results_without_patch.get('results').get('values')
+
+ cloud_links_without_patch = api.parse_cloud_links(output_without_patch)
+ cloud_links_with_patch = api.parse_cloud_links(output_with_patch)
+
+ cloud_link = (cloud_links_without_patch['html'][0]
+ if cloud_links_without_patch['html'] else '')
+
+ results = {
+ 'try_job_id': config.get('try_job_id'),
+ 'status': 'completed', # TODO(chrisphan) Get partial results state.
+ 'buildbot_log_url': '', # TODO(chrisphan) Get this.
+ 'bisect_bot': '', # TODO(chrisphan): Get this.
+ 'command': config.get('command'),
+ 'metric': config.get('metric'),
+ 'cloud_link': cloud_link,
+ }
+
+ if not values_with_patch or not values_without_patch:
+ results['warnings'] = ['No values from test with patch, or none '
+ 'from test without patch.\n Output with patch:\n%s\n\nOutput without '
+ 'patch:\n%s' % (output_with_patch, output_without_patch)]
+ return results
+
+ mean_with_patch = api.m.math_utils.mean(values_with_patch)
+ mean_without_patch = api.m.math_utils.mean(values_without_patch)
+
+ stderr_with_patch = api.m.math_utils.standard_error(values_with_patch)
+ stderr_without_patch = api.m.math_utils.standard_error(
+ values_without_patch)
+
+ profiler_with_patch = cloud_links_with_patch['profiler']
+ profiler_without_patch = cloud_links_without_patch['profiler']
+
+ # Calculate the % difference in the means of the 2 runs.
+ relative_change = None
+ std_err = None
+ if mean_with_patch and values_with_patch:
+ relative_change = api.m.math_utils.relative_change(
+ mean_without_patch, mean_with_patch) * 100
+ std_err = api.m.math_utils.pooled_standard_error(
+ [values_with_patch, values_without_patch])
+
+ if relative_change is not None and std_err is not None:
+ data = [
+ ['Revision', 'Mean', 'Std.Error'],
+ ['Patch', str(mean_with_patch), str(stderr_with_patch)],
+ ['No Patch', str(mean_without_patch), str(stderr_without_patch)]
+ ]
+ results['change'] = relative_change
+ results['std_err'] = std_err
+ results['result'] = _pretty_table(data)
+
+ profiler_links = []
+ if profiler_with_patch and profiler_without_patch:
+ for i in xrange(len(profiler_with_patch)): # pragma: no cover
+ profiler_links.append({
+ 'title': '%s[%d]' % (labels.get('profiler_link1'), i),
+ 'link': profiler_with_patch[i]
+ })
+ for i in xrange(len(profiler_without_patch)): # pragma: no cover
+ profiler_links.append({
+ 'title': '%s[%d]' % (labels.get('profiler_link2'), i),
+ 'link': profiler_without_patch[i]
+ })
+ results['profiler_links'] = profiler_links
+
+ return results
+
+def _pretty_table(data):
+ results = []
+ for row in data:
+ results.append('%-15s' * len(row) % tuple(row))
+ return '\n'.join(results)

Powered by Google App Engine
This is Rietveld 408576698