Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(186)

Side by Side Diff: scripts/slave/recipe_modules/perf_try/api.py

Issue 1573293002: Change auto_bisect to post results to perf dashboard. (Closed) Base URL: https://chromium.googlesource.com/chromium/tools/build.git@master
Patch Set: update Created 4 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 # Copyright 2015 The Chromium Authors. All rights reserved. 1 # Copyright 2015 The Chromium Authors. All rights reserved.
2 # Use of this source code is governed by a BSD-style license that can be 2 # Use of this source code is governed by a BSD-style license that can be
3 # found in the LICENSE file. 3 # found in the LICENSE file.
4 4
5 """API for the perf try job recipe module. 5 """API for the perf try job recipe module.
6 6
7 This API is meant to enable the perf try job recipe on any chromium-supported 7 This API is meant to enable the perf try job recipe on any chromium-supported
8 platform for any test that can be run via buildbot, perf or otherwise. 8 platform for any test that can be run via buildbot, perf or otherwise.
9 """ 9 """
10 10
11 import re 11 import re
12 12
13 from recipe_engine import recipe_api 13 from recipe_engine import recipe_api
14 14 from . import bisect_results_json
15 15
16 PERF_CONFIG_FILE = 'tools/run-perf-test.cfg' 16 PERF_CONFIG_FILE = 'tools/run-perf-test.cfg'
17 WEBKIT_PERF_CONFIG_FILE = 'third_party/WebKit/Tools/run-perf-test.cfg' 17 WEBKIT_PERF_CONFIG_FILE = 'third_party/WebKit/Tools/run-perf-test.cfg'
18 PERF_BENCHMARKS_PATH = 'tools/perf/benchmarks' 18 PERF_BENCHMARKS_PATH = 'tools/perf/benchmarks'
19 PERF_MEASUREMENTS_PATH = 'tools/perf/measurements' 19 PERF_MEASUREMENTS_PATH = 'tools/perf/measurements'
20 BUILDBOT_BUILDERNAME = 'BUILDBOT_BUILDERNAME' 20 BUILDBOT_BUILDERNAME = 'BUILDBOT_BUILDERNAME'
21 BENCHMARKS_JSON_FILE = 'benchmarks.json' 21 BENCHMARKS_JSON_FILE = 'benchmarks.json'
22 22
23 CLOUD_RESULTS_LINK = (r'\s(?P<VALUES>http://storage.googleapis.com/' 23 CLOUD_RESULTS_LINK = (r'\s(?P<VALUES>http://storage.googleapis.com/'
24 'chromium-telemetry/html-results/results-[a-z0-9-_]+)\s') 24 'chromium-telemetry/html-results/results-[a-z0-9-_]+)\s')
(...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after
82 upload_on_last_run=True, 82 upload_on_last_run=True,
83 results_label='TOT' if r[1] is None else r[1], 83 results_label='TOT' if r[1] is None else r[1],
84 allow_flakes=False) 84 allow_flakes=False)
85 85
86 labels = { 86 labels = {
87 'profiler_link1': ('%s - Profiler Data' % 'With Patch' 87 'profiler_link1': ('%s - Profiler Data' % 'With Patch'
88 if r[0] is None else r[0]), 88 if r[0] is None else r[0]),
89 'profiler_link2': ('%s - Profiler Data' % 'Without Patch' 89 'profiler_link2': ('%s - Profiler Data' % 'Without Patch'
90 if r[1] is None else r[1]) 90 if r[1] is None else r[1])
91 } 91 }
92
93 # TODO(chrisphan): Deprecate this. perf_dashboard.post_bisect below
94 # already outputs data in json format.
92 self._compare_and_present_results( 95 self._compare_and_present_results(
93 test_cfg, results_without_patch, results_with_patch, labels) 96 test_cfg, results_without_patch, results_with_patch, labels)
94 97
98 bisect_results = bisect_results_json.get(
99 self, test_cfg, results_without_patch, results_with_patch, labels)
100 self.m.perf_dashboard.set_default_config()
101 self.m.perf_dashboard.post_bisect(bisect_results, halt_on_failure=True)
102
95 def run_cq_job(self, update_step, master_dict, files_in_patch): 103 def run_cq_job(self, update_step, master_dict, files_in_patch):
96 """Runs benchmarks affected by a CL on CQ.""" 104 """Runs benchmarks affected by a CL on CQ."""
97 buildername = self.m.properties['buildername'] 105 buildername = self.m.properties['buildername']
98 affected_benchmarks = self._get_affected_benchmarks(files_in_patch) 106 affected_benchmarks = self._get_affected_benchmarks(files_in_patch)
99 if not affected_benchmarks: 107 if not affected_benchmarks:
100 step_result = self.m.step('Results', []) 108 step_result = self.m.step('Results', [])
101 step_result.presentation.step_text = ( 109 step_result.presentation.step_text = (
102 'There are no modifications to Telemetry benchmarks,' 110 'There are no modifications to Telemetry benchmarks,'
103 ' aborting the try job.') 111 ' aborting the try job.')
104 return 112 return
(...skipping 171 matching lines...) Expand 10 before | Expand all | Expand 10 after
276 self._get_hash(config.get('good_revision'))) 284 self._get_hash(config.get('good_revision')))
277 285
278 def _compare_and_present_results( 286 def _compare_and_present_results(
279 self, cfg, results_without_patch, results_with_patch, labels): 287 self, cfg, results_without_patch, results_with_patch, labels):
280 """Parses results and creates Results step.""" 288 """Parses results and creates Results step."""
281 output_with_patch = results_with_patch.get('output') 289 output_with_patch = results_with_patch.get('output')
282 output_without_patch = results_without_patch.get('output') 290 output_without_patch = results_without_patch.get('output')
283 values_with_patch = results_with_patch.get('results').get('values') 291 values_with_patch = results_with_patch.get('results').get('values')
284 values_without_patch = results_without_patch.get('results').get('values') 292 values_without_patch = results_without_patch.get('results').get('values')
285 293
286 cloud_links_without_patch = _parse_cloud_links(output_without_patch) 294 cloud_links_without_patch = self.parse_cloud_links(output_without_patch)
287 cloud_links_with_patch = _parse_cloud_links(output_with_patch) 295 cloud_links_with_patch = self.parse_cloud_links(output_with_patch)
288 296
289 results_link = (cloud_links_without_patch['html'][0] 297 results_link = (cloud_links_without_patch['html'][0]
290 if cloud_links_without_patch['html'] else '') 298 if cloud_links_without_patch['html'] else '')
291 299
292 if not values_with_patch or not values_without_patch: 300 if not values_with_patch or not values_without_patch:
293 step_result = self.m.step('Results', []) 301 step_result = self.m.step('Results', [])
294 step_result.presentation.step_text = ( 302 step_result.presentation.step_text = (
295 'No values from test with patch, or none from test without patch.\n' 303 'No values from test with patch, or none from test without patch.\n'
296 'Output with patch:\n%s\n\nOutput without patch:\n%s' % ( 304 'Output with patch:\n%s\n\nOutput without patch:\n%s' % (
297 output_with_patch, output_without_patch)) 305 output_with_patch, output_without_patch))
(...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after
346 step_result.presentation.links.update({ 354 step_result.presentation.links.update({
347 '%s[%d]' % ( 355 '%s[%d]' % (
348 labels.get('profiler_link1'), i): profiler_with_patch[i] 356 labels.get('profiler_link1'), i): profiler_with_patch[i]
349 }) 357 })
350 for i in xrange(len(profiler_without_patch)): # pragma: no cover 358 for i in xrange(len(profiler_without_patch)): # pragma: no cover
351 step_result.presentation.links.update({ 359 step_result.presentation.links.update({
352 '%s[%d]' % ( 360 '%s[%d]' % (
353 labels.get('profiler_link2'), i): profiler_without_patch[i] 361 labels.get('profiler_link2'), i): profiler_without_patch[i]
354 }) 362 })
355 363
364 def parse_cloud_links(self, output):
365 html_results_pattern = re.compile(CLOUD_RESULTS_LINK, re.MULTILINE)
366 profiler_pattern = re.compile(PROFILER_RESULTS_LINK, re.MULTILINE)
356 367
357 def _parse_cloud_links(output): 368 results = {
358 html_results_pattern = re.compile(CLOUD_RESULTS_LINK, re.MULTILINE) 369 'html': html_results_pattern.findall(output),
359 profiler_pattern = re.compile(PROFILER_RESULTS_LINK, re.MULTILINE) 370 'profiler': profiler_pattern.findall(output),
360 371 }
361 results = { 372 return results
362 'html': html_results_pattern.findall(output),
363 'profiler': profiler_pattern.findall(output),
364 }
365
366 return results
367 373
368 374
369 def _validate_perf_config(config_contents, required_parameters): 375 def _validate_perf_config(config_contents, required_parameters):
370 """Validates the perf config file contents. 376 """Validates the perf config file contents.
371 377
372 This is used when we're doing a perf try job, the config file is called 378 This is used when we're doing a perf try job, the config file is called
373 run-perf-test.cfg by default. 379 run-perf-test.cfg by default.
374 380
375 The parameters checked are the required parameters; any additional optional 381 The parameters checked are the required parameters; any additional optional
376 parameters won't be checked and validation will still pass. 382 parameters won't be checked and validation will still pass.
(...skipping 65 matching lines...) Expand 10 before | Expand all | Expand 10 after
442 448
443 def _prepend_src_to_path_in_command(test_cfg): 449 def _prepend_src_to_path_in_command(test_cfg):
444 command_to_run = [] 450 command_to_run = []
445 for v in test_cfg.get('command').split(): 451 for v in test_cfg.get('command').split():
446 if v in ['./tools/perf/run_benchmark', 452 if v in ['./tools/perf/run_benchmark',
447 'tools/perf/run_benchmark', 453 'tools/perf/run_benchmark',
448 'tools\\perf\\run_benchmark']: 454 'tools\\perf\\run_benchmark']:
449 v = 'src/tools/perf/run_benchmark' 455 v = 'src/tools/perf/run_benchmark'
450 command_to_run.append(v) 456 command_to_run.append(v)
451 test_cfg.update({'command': ' '.join(command_to_run)}) 457 test_cfg.update({'command': ' '.join(command_to_run)})
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698