OLD | NEW |
| (Empty) |
1 # Copyright 2015 The Chromium Authors. All rights reserved. | |
2 # Use of this source code is governed by a BSD-style license that can be | |
3 # found in the LICENSE file. | |
4 | |
5 import json | |
6 import re | |
7 | |
8 # Note: The Perf Dashboard specifically for the string "Status: Positive" when | |
9 # deciding whether to CC authors on the bug. | |
10 _RESULTS_BANNER = """ | |
11 ===== BISECT JOB RESULTS ===== | |
12 Status: %(status)s | |
13 | |
14 Test Command: %(command)s | |
15 Test Metric: %(metric)s | |
16 Relative Change: %(change)s | |
17 Score: %(score)s | |
18 Retested CL with revert: %(retest)s | |
19 | |
20 """ | |
21 | |
22 # When the bisect was aborted without a bisect failure the following template | |
23 # is used. | |
24 _ABORT_REASON_TEMPLATE = """ | |
25 ===== BISECTION ABORTED ===== | |
26 The bisect was aborted because %(abort_reason)s | |
27 Please contact the the team (see below) if you believe this is in error. | |
28 | |
29 Bug ID: %(bug_id)s | |
30 | |
31 Test Command: %(command)s | |
32 Test Metric: %(metric)s | |
33 Good revision: %(good_revision)s | |
34 Bad revision: %(bad_revision)s | |
35 | |
36 """ | |
37 | |
38 # The perf dashboard specifically looks for the string | |
39 # "Author : " to parse out who to cc on a bug. If you change the | |
40 # formatting here, please update the perf dashboard as well. | |
41 _RESULTS_REVISION_INFO = """ | |
42 ===== SUSPECTED CL(s) ===== | |
43 Subject : %(subject)s | |
44 Author : %(author)s | |
45 Commit description: | |
46 %(commit_info)s | |
47 Commit : %(cl)s | |
48 Date : %(cl_date)s | |
49 | |
50 """ | |
51 | |
52 _REVISION_TABLE_TEMPLATE = """ | |
53 ===== TESTED REVISIONS ===== | |
54 %(table)s | |
55 | |
56 """ | |
57 | |
58 _RESULTS_THANKYOU = """ | |
59 | O O | Visit http://www.chromium.org/developers/speed-infra/perf-bug-faq | |
60 | X | for more information addressing perf regression bugs. For feedback, | |
61 | / \\ | file a bug with label Cr-Tests-AutoBisect. Thank you!""" | |
62 | |
63 | |
64 _WARNINGS_TEMPLATE = """ | |
65 ===== WARNINGS ===== | |
66 The following warnings were raised by the bisect job: | |
67 | |
68 * %(warnings)s | |
69 | |
70 """ | |
71 | |
72 _FAILED_INITIAL_CONFIDENCE_ABORT_REASON = ( | |
73 'The metric values for the initial "good" and "bad" revisions ' | |
74 'do not represent a clear regression.') | |
75 | |
76 _DIRECTION_OF_IMPROVEMENT_ABORT_REASON = ( | |
77 'The metric values for the initial "good" and "bad" revisions match the ' | |
78 'expected direction of improvement. Thus, likely represent an improvement ' | |
79 'and not a regression.') | |
80 | |
81 _REQUIRED_RESULTS_CONFIDENCE = 95.0 | |
82 | |
83 | |
84 class BisectResults(object): | |
85 | |
86 def __init__(self, bisector, partial=False): | |
87 """Create a new results object from a finished bisect job.""" | |
88 if not bisector.bisect_over and not partial: | |
89 raise ValueError( | |
90 'Invalid parameter, the bisect must be over by the time the ' | |
91 'BisectResults constructor is called') # pragma: no cover | |
92 self._bisector = bisector | |
93 self.results_confidence = None | |
94 self.abort_reason = None | |
95 self.culprit_cl_hash = None | |
96 self.commit_info = None | |
97 self.culprit_author = None | |
98 self.culprit_subject = None | |
99 self.culprit_date = None | |
100 self.partial = partial | |
101 self._gather_results() | |
102 | |
103 def as_string(self): | |
104 return self._make_header() + self._make_body() + self._make_footer() | |
105 | |
106 def _make_header(self): | |
107 # Unconditionally include this string at the top of the results since it is | |
108 # used by the dashboard to separate the bisect results from other buildbot | |
109 # output. | |
110 if self.partial: | |
111 return '---partial bisect results start here---\n' | |
112 header = '---bisect results start here---\n' | |
113 if not self.abort_reason: | |
114 header += _RESULTS_BANNER % { | |
115 'status': self.status, | |
116 'command': self.command, | |
117 'metric': self.metric, | |
118 'change': self.relative_change, | |
119 'score': self.results_confidence, | |
120 'retest': 'Not Implemented.' | |
121 } | |
122 else: | |
123 header += _ABORT_REASON_TEMPLATE % { | |
124 'abort_reason': self.abort_reason, | |
125 'bug_id': self.bug_id, | |
126 'command': self.command, | |
127 'metric': self.metric, | |
128 'good_revision': self.good_revision, | |
129 'bad_revision': self.bad_revision | |
130 } | |
131 if self.warnings and not self.partial: | |
132 header += _WARNINGS_TEMPLATE % {'warnings': '\n * '.join(self.warnings)} | |
133 return header | |
134 | |
135 def _make_body(self): | |
136 body = '' | |
137 if self.culprit_cl_hash: | |
138 body += _RESULTS_REVISION_INFO % { | |
139 'subject': self.culprit_subject, | |
140 'author': self.culprit_author, | |
141 'cl_date': self.culprit_date, | |
142 'commit_info': self.commit_info, | |
143 'cl': self.culprit_cl_hash | |
144 } | |
145 body += self._compose_revisions_table() | |
146 return body.encode('ascii', 'replace') | |
147 | |
148 def _make_footer(self): | |
149 if self.partial: | |
150 return '----End of partial results----' | |
151 return _RESULTS_THANKYOU | |
152 | |
153 def _gather_results(self): | |
154 # TODO(robertocn): Add viewcl link here. | |
155 # TODO(robertocn): Merge this into constructor. | |
156 bisector = self._bisector | |
157 config = bisector.bisect_config | |
158 | |
159 # TODO(robertocn): Add platform here. | |
160 self.relative_change = bisector.relative_change | |
161 self.warnings = bisector.warnings | |
162 self.command = config['command'] | |
163 self.metric = config['metric'] | |
164 self.bug_id = config.get('bug_id') | |
165 self.good_revision = bisector.good_rev.commit_hash | |
166 self.bad_revision = bisector.bad_rev.commit_hash | |
167 | |
168 self.is_telemetry = ('tools/perf/run_' in self.command or | |
169 'tools\\perf\\run_' in self.command) | |
170 | |
171 if self.is_telemetry: | |
172 self.telemetry_command = re.sub(r'--browser=[^\s]+', | |
173 '--browser=<bot-name>', | |
174 self.command) | |
175 | |
176 self.culprit_cl_hash = None | |
177 if bisector.culprit: | |
178 self._set_culprit_attributes(bisector.culprit) | |
179 self.results_confidence = bisector.api.m.math_utils.confidence_score( | |
180 bisector.lkgr.values, bisector.fkbr.values) | |
181 | |
182 if bisector.failed_initial_confidence: | |
183 self.abort_reason = _FAILED_INITIAL_CONFIDENCE_ABORT_REASON | |
184 elif bisector.failed_direction: | |
185 self.abort_reason = _DIRECTION_OF_IMPROVEMENT_ABORT_REASON | |
186 | |
187 if self.partial: | |
188 self.status = 'Partial Results only.' | |
189 elif bisector.failed: | |
190 self.status = 'Negative: Failed to bisect.' | |
191 elif self.results_confidence > _REQUIRED_RESULTS_CONFIDENCE: | |
192 self.status = 'Positive: A suspected commit was found.' | |
193 self._bisector.surface_result('CULPRIT_FOUND') | |
194 else: | |
195 self.status = ('Negative: Completed, but no culprit was found with ' | |
196 'high confidence.') | |
197 self._bisector.surface_result('LO_FINAL_CONF') | |
198 | |
199 def _set_culprit_attributes(self, culprit): | |
200 self.culprit_cl_hash = None | |
201 api = self._bisector.api | |
202 if culprit: | |
203 self.culprit_cl_hash = culprit.deps_revision or culprit.commit_hash | |
204 culprit_info = api.query_revision_info( | |
205 self.culprit_cl_hash, culprit.depot_name) | |
206 self.culprit_subject = culprit_info['subject'] | |
207 self.culprit_author = (culprit_info['author'] + ', ' + | |
208 culprit_info['email']) | |
209 self.commit_info = culprit_info['body'] | |
210 self.culprit_date = culprit_info['date'] | |
211 | |
212 def _compose_revisions_table(self): | |
213 def revision_row(r): | |
214 result = [ | |
215 r.depot_name, | |
216 r.deps_revision or 'r' + str(r.commit_pos), | |
217 _format_number(r.mean_value), | |
218 _format_number(r.std_dev), | |
219 len(r.values), | |
220 'good' if r.good else 'bad' if r.bad else 'unknown', | |
221 '<-' if self._bisector.culprit == r else '', | |
222 ] | |
223 return map(str, result) | |
224 | |
225 is_return_code = self._bisector.is_return_code_mode() | |
226 headers_row = [[ | |
227 'Depot', | |
228 'Revision', | |
229 'Mean Value' if not is_return_code else 'Exit Code', | |
230 'Std. Dev.', | |
231 'Num Values', | |
232 'Good?', | |
233 '', | |
234 ]] | |
235 revision_rows = [revision_row(r) | |
236 for r in self._bisector.revisions | |
237 if r.tested or r.aborted] | |
238 all_rows = headers_row + revision_rows | |
239 return _REVISION_TABLE_TEMPLATE % {'table': pretty_table(all_rows)} | |
240 | |
241 | |
242 def _format_number(x): | |
243 if x is None: | |
244 return 'N/A' | |
245 if isinstance(x, int): | |
246 return str(x) | |
247 return str(round(x, 6)) | |
248 | |
249 | |
250 def pretty_table(data): | |
251 """Arrange a matrix of strings into an ascii table. | |
252 | |
253 This function was ripped off directly from somewhere in skia. It is | |
254 inefficient and so, should be avoided for large data sets. | |
255 | |
256 Args: | |
257 data (list): A list of lists of strings containing the data to tabulate. It | |
258 is expected to be rectangular. | |
259 | |
260 Returns: A multi-line string containing the data arranged in a tabular manner. | |
261 """ | |
262 result = '' | |
263 column_widths = [0] * len(data[0]) | |
264 for row in data: | |
265 column_widths = [max(longest_len, len(prop)) for | |
266 longest_len, prop in zip(column_widths, row)] | |
267 for row in data: | |
268 is_culprit_row = row[-1] == '<-' | |
269 if is_culprit_row: | |
270 result += '\n' | |
271 for prop, width in zip(row, column_widths): | |
272 result += prop.ljust(width + 1) | |
273 result += '\n' | |
274 if is_culprit_row: | |
275 result += '\n' | |
276 return result | |
OLD | NEW |